1 /*
2  * Copyright (c) 2025 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/kernel.h>
8 #include <zephyr/ztest.h>
9 #include <zephyr/kernel_structs.h>
10 #include <zephyr/app_memory/app_memdomain.h>
11 #include <zephyr/sys/util.h>
12 #include <zephyr/sys/barrier.h>
13 #include <zephyr/sys/libc-hooks.h> /* for z_libc_partition */
14 
15 #define NUM_THREADS     3
16 #define TIMES_SWITCHING 10
17 #define STACKSIZE       (256 + CONFIG_TEST_EXTRA_STACK_SIZE)
18 
19 extern void clear_fault(void);
20 
21 #ifdef CONFIG_USERSPACE_SWITCHING_TESTS
22 /*
23  * Even numbered threads use domain_a.
24  * Odd numbered threads use domain_b.
25  */
26 
27 struct k_mem_domain domain_a;
28 K_APPMEM_PARTITION_DEFINE(partition_a);
29 K_APP_BMEM(partition_a) volatile unsigned int part_a_loops[NUM_THREADS];
30 
31 struct k_mem_domain domain_b;
32 K_APPMEM_PARTITION_DEFINE(partition_b);
33 K_APP_BMEM(partition_b) volatile unsigned int part_b_loops[NUM_THREADS];
34 
35 static struct k_thread threads[NUM_THREADS];
36 static K_THREAD_STACK_ARRAY_DEFINE(threads_stacks, NUM_THREADS, STACKSIZE);
37 
38 static K_SEM_DEFINE(sem_switching, 1, 1);
39 
switch_thread_fn(void * arg1,void * arg2,void * arg3)40 static void switch_thread_fn(void *arg1, void *arg2, void *arg3)
41 {
42 	volatile unsigned int *loop_ptr;
43 	const uintptr_t thread_id = (uintptr_t)arg1;
44 
45 	if ((thread_id % 2) == 0) {
46 		loop_ptr = &part_a_loops[thread_id];
47 	} else {
48 		loop_ptr = &part_b_loops[thread_id];
49 	}
50 
51 	for (int i = 0; i < TIMES_SWITCHING; i++) {
52 #ifdef CONFIG_DEBUG
53 		TC_PRINT("Thread %lu (%u)\n", thread_id, *loop_ptr);
54 #endif
55 
56 		*loop_ptr += 1;
57 		compiler_barrier();
58 
59 		/* Make sure this can still use kernel objects. */
60 		k_sem_take(&sem_switching, K_FOREVER);
61 		k_sem_give(&sem_switching);
62 
63 		k_yield();
64 	}
65 }
66 
67 #endif /* CONFIG_USERSPACE_SWITCHING_TESTS */
68 
run_switching(int num_kernel_threads)69 static void run_switching(int num_kernel_threads)
70 {
71 #ifdef CONFIG_USERSPACE_SWITCHING_TESTS
72 	unsigned int i;
73 	int remaining_kernel_threads = num_kernel_threads;
74 
75 	/* Not expecting any errors. */
76 	clear_fault();
77 
78 	for (i = 0; i < NUM_THREADS; i++) {
79 		uint32_t perms;
80 		bool is_kernel_thread = remaining_kernel_threads > 0;
81 
82 		if (is_kernel_thread) {
83 			perms = K_INHERIT_PERMS;
84 
85 			remaining_kernel_threads--;
86 		} else {
87 			perms = K_INHERIT_PERMS | K_USER;
88 		}
89 
90 		/* Clear loop counters. */
91 		part_a_loops[i] = 0;
92 		part_b_loops[i] = 0;
93 
94 		/* Must delay start of threads to apply memory domains to them. */
95 		k_thread_create(&threads[i], threads_stacks[i], STACKSIZE, switch_thread_fn,
96 				(void *)(uintptr_t)i, NULL, NULL, -1, perms, K_FOREVER);
97 
98 #ifdef CONFIG_SCHED_CPU_MASK
99 		/*
100 		 * Make sure all created threads run on the same CPU
101 		 * so that memory domain switching is being tested.
102 		 */
103 		(void)k_thread_cpu_pin(&threads[i], 0);
104 #endif /* CONFIG_SCHED_CPU_MASK */
105 
106 		k_thread_access_grant(&threads[i], &sem_switching);
107 
108 		/*
109 		 * Kernel threads by default has access to all memory.
110 		 * So no need to put them into memory domains.
111 		 */
112 		if (!is_kernel_thread) {
113 			/* Remember EVEN -> A, ODD -> B. */
114 			if ((i % 2) == 0) {
115 				k_mem_domain_add_thread(&domain_a, &threads[i]);
116 			} else {
117 				k_mem_domain_add_thread(&domain_b, &threads[i]);
118 			}
119 		}
120 	}
121 
122 	/* Start the thread loops. */
123 	for (i = 0; i < NUM_THREADS; i++) {
124 		k_thread_start(&threads[i]);
125 	}
126 
127 	/* Wait for all threads to finish. */
128 	for (i = 0; i < NUM_THREADS; i++) {
129 		k_thread_join(&threads[i], K_FOREVER);
130 	}
131 
132 	/* Check to make sure all threads have looped enough times. */
133 	for (i = 0; i < NUM_THREADS; i++) {
134 		int loops;
135 
136 		/*
137 		 * Each thread should never have access to the loop counters on
138 		 * the other partition. Accessing them should generate faults.
139 		 * Though we check just in case.
140 		 */
141 		if ((i % 2) == 0) {
142 			loops = part_a_loops[i];
143 
144 			zassert_equal(part_b_loops[i], 0, "part_b_loops[%i] should be zero but not",
145 				      i);
146 		} else {
147 			loops = part_b_loops[i];
148 
149 			zassert_equal(part_a_loops[i], 0, "part_a_loops[%i] should be zero but not",
150 				      i);
151 		}
152 
153 		zassert_equal(loops, TIMES_SWITCHING,
154 			      "thread %u has not done enough loops (%u != %u)", i, loops,
155 			      TIMES_SWITCHING);
156 	}
157 #else  /* CONFIG_USERSPACE_SWITCHING_TESTS */
158 	ztest_test_skip();
159 #endif /* CONFIG_USERSPACE_SWITCHING_TESTS */
160 }
161 
ZTEST(userspace_domain_switching,test_kernel_only_switching)162 ZTEST(userspace_domain_switching, test_kernel_only_switching)
163 {
164 	/*
165 	 * Run with all kernel threads.
166 	 *
167 	 * This should work as kernel threads by default have access to
168 	 * all memory, without having to attach them to memory domains.
169 	 * This serves as a baseline check.
170 	 */
171 	run_switching(NUM_THREADS);
172 }
173 
ZTEST(userspace_domain_switching,test_user_only_switching)174 ZTEST(userspace_domain_switching, test_user_only_switching)
175 {
176 	/* Run with all user threads. */
177 	run_switching(0);
178 }
179 
ZTEST(userspace_domain_switching,test_kernel_user_mix_switching)180 ZTEST(userspace_domain_switching, test_kernel_user_mix_switching)
181 {
182 	/* Run with one kernel thread while others are all user threads. */
183 	run_switching(1);
184 }
185 
switching_setup(void)186 void *switching_setup(void)
187 {
188 #ifdef CONFIG_USERSPACE_SWITCHING_TESTS
189 	static bool already_inited;
190 
191 	if (already_inited) {
192 		return NULL;
193 	}
194 
195 	struct k_mem_partition *parts_a[] = {
196 #if Z_LIBC_PARTITION_EXISTS
197 		&z_libc_partition,
198 #endif
199 		&ztest_mem_partition, &partition_a
200 	};
201 
202 	struct k_mem_partition *parts_b[] = {
203 #if Z_LIBC_PARTITION_EXISTS
204 		&z_libc_partition,
205 #endif
206 		&ztest_mem_partition, &partition_b
207 	};
208 
209 	zassert_equal(k_mem_domain_init(&domain_a, ARRAY_SIZE(parts_a), parts_a), 0,
210 		      "failed to initialize memory domain A");
211 
212 	zassert_equal(k_mem_domain_init(&domain_b, ARRAY_SIZE(parts_b), parts_b), 0,
213 		      "failed to initialize memory domain B");
214 
215 	already_inited = true;
216 #endif /* CONFIG_USERSPACE_SWITCHING_TESTS */
217 
218 	return NULL;
219 }
220 
switching_before(void * fixture)221 void switching_before(void *fixture)
222 {
223 #ifdef CONFIG_USERSPACE_SWITCHING_TESTS
224 	int i;
225 
226 	for (i = 0; i < NUM_THREADS; i++) {
227 		k_thread_access_grant(k_current_get(), &threads[i]);
228 	}
229 #endif /* CONFIG_USERSPACE_SWITCHING_TESTS */
230 }
231 
232 ZTEST_SUITE(userspace_domain_switching, NULL, switching_setup, switching_before, NULL, NULL);
233