1 /*
2  * Copyright (c) 2024 Intel Corporation.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/tc_util.h>
8 #include <zephyr/ztest.h>
9 #include <zephyr/kernel.h>
10 #include <ksched.h>
11 #include <ipi.h>
12 #include <zephyr/kernel_structs.h>
13 
14 #define STACK_SIZE (1024 + CONFIG_TEST_EXTRA_STACK_SIZE)
15 
16 #define NUM_THREADS (CONFIG_MP_MAX_NUM_CPUS - 1)
17 
18 #define DELAY_FOR_IPIS 200
19 
20 static struct k_thread thread[NUM_THREADS];
21 static struct k_thread alt_thread;
22 
23 static bool alt_thread_created;
24 
25 static K_THREAD_STACK_ARRAY_DEFINE(stack, NUM_THREADS, STACK_SIZE);
26 static K_THREAD_STACK_DEFINE(alt_stack, STACK_SIZE);
27 
28 static uint32_t ipi_count[CONFIG_MP_MAX_NUM_CPUS];
29 static struct k_spinlock ipilock;
30 static atomic_t busy_started;
31 static volatile bool alt_thread_done;
32 
33 static K_SEM_DEFINE(sem, 0, 1);
34 
z_trace_sched_ipi(void)35 void z_trace_sched_ipi(void)
36 {
37 	k_spinlock_key_t  key;
38 
39 	key = k_spin_lock(&ipilock);
40 	ipi_count[_current_cpu->id]++;
41 	k_spin_unlock(&ipilock, key);
42 }
43 
clear_ipi_counts(void)44 static void clear_ipi_counts(void)
45 {
46 	k_spinlock_key_t  key;
47 
48 	key = k_spin_lock(&ipilock);
49 	memset(ipi_count, 0, sizeof(ipi_count));
50 	k_spin_unlock(&ipilock, key);
51 }
52 
get_ipi_counts(uint32_t * set,size_t n_elem)53 static void get_ipi_counts(uint32_t *set, size_t n_elem)
54 {
55 	k_spinlock_key_t  key;
56 
57 	key = k_spin_lock(&ipilock);
58 	memcpy(set, ipi_count, n_elem * sizeof(*set));
59 	k_spin_unlock(&ipilock, key);
60 }
61 
busy_thread_entry(void * p1,void * p2,void * p3)62 static void busy_thread_entry(void *p1, void *p2, void *p3)
63 {
64 	int  key;
65 	uint32_t id;
66 
67 	key = arch_irq_lock();
68 	id = _current_cpu->id;
69 	arch_irq_unlock(key);
70 
71 	atomic_or(&busy_started, BIT(id));
72 
73 	while (1) {
74 	}
75 }
76 
wait_until_busy_threads_ready(uint32_t id)77 static bool wait_until_busy_threads_ready(uint32_t id)
78 {
79 	uint32_t  all;
80 	uint32_t  value;
81 	unsigned int i;
82 
83 	all = IPI_ALL_CPUS_MASK ^ BIT(id);
84 	for (i = 0; i < 10; i++) {
85 		k_busy_wait(1000);
86 
87 		value = (uint32_t)atomic_get(&busy_started);
88 		if (value == all) {
89 			break;
90 		}
91 	}
92 
93 	return (i < 10);
94 }
95 
pending_thread_entry(void * p1,void * p2,void * p3)96 static void pending_thread_entry(void *p1, void *p2, void *p3)
97 {
98 	int  key;
99 
100 	k_sem_take(&sem, K_FOREVER);
101 
102 	while (!alt_thread_done) {
103 		key = arch_irq_lock();
104 		arch_spin_relax();
105 		arch_irq_unlock(key);
106 	}
107 }
108 
alt_thread_create(int priority,const char * desc)109 static void alt_thread_create(int priority, const char *desc)
110 {
111 	k_thread_create(&alt_thread, alt_stack, STACK_SIZE,
112 			pending_thread_entry, NULL, NULL, NULL,
113 			priority, 0, K_NO_WAIT);
114 	alt_thread_created = true;
115 
116 	/* Verify alt_thread is pending */
117 
118 	k_busy_wait(10000);
119 	zassert_true(z_is_thread_pending(&alt_thread),
120 		     "%s priority thread has not pended.\n", desc);
121 }
122 
busy_threads_create(int priority)123 uint32_t busy_threads_create(int priority)
124 {
125 	unsigned int  i;
126 	uint32_t      id;
127 	int           key;
128 
129 	atomic_clear(&busy_started);
130 
131 	for (i = 0; i < NUM_THREADS; i++) {
132 		k_thread_create(&thread[i], stack[i], STACK_SIZE,
133 				busy_thread_entry, NULL, NULL, NULL,
134 				priority, 0, K_NO_WAIT);
135 	}
136 
137 	/* Align to tick boundary to minimize probability of timer ISRs */
138 
139 	k_sleep(K_TICKS(1));
140 	key = arch_irq_lock();
141 	id = _current_cpu->id;
142 	arch_irq_unlock(key);
143 
144 	/*
145 	 * Spin until all busy threads are ready. It is assumed that as this
146 	 * thread and the busy threads are cooperative that they will not be
147 	 * rescheduled to execute on a different CPU.
148 	 */
149 
150 	zassert_true(wait_until_busy_threads_ready(id),
151 		     "1 or more 'busy threads' not ready.\n");
152 
153 	return id;
154 }
155 
busy_threads_priority_set(int priority,int delta)156 void busy_threads_priority_set(int priority, int delta)
157 {
158 	unsigned int  i;
159 
160 	for (i = 0; i < NUM_THREADS; i++) {
161 		k_thread_priority_set(&thread[i], priority);
162 		priority += delta;
163 	}
164 }
165 
166 /**
167  * Verify that arch_sched_broadcast_ipi() broadcasts IPIs as expected.
168  */
ZTEST(ipi,test_arch_sched_broadcast_ipi)169 ZTEST(ipi, test_arch_sched_broadcast_ipi)
170 {
171 	uint32_t  set[CONFIG_MP_MAX_NUM_CPUS];
172 	uint32_t  id;
173 	int priority;
174 	unsigned int j;
175 
176 	priority = k_thread_priority_get(k_current_get());
177 
178 	id = busy_threads_create(priority - 1);
179 
180 	/* Broadcast the IPI. All other CPUs ought to receive and process it */
181 
182 	clear_ipi_counts();
183 	arch_sched_broadcast_ipi();
184 	k_busy_wait(DELAY_FOR_IPIS);
185 	get_ipi_counts(set, CONFIG_MP_MAX_NUM_CPUS);
186 
187 	for (j = 0; j < CONFIG_MP_MAX_NUM_CPUS; j++) {
188 		if (id == j) {
189 			zassert_true(set[j] == 0,
190 				     "Broadcast-Expected 0, got %u\n",
191 				     set[j]);
192 		} else {
193 			zassert_true(set[j] == 1,
194 				     "Broadcast-Expected 1, got %u\n",
195 				     set[j]);
196 		}
197 	}
198 }
199 
200 #ifdef CONFIG_ARCH_HAS_DIRECTED_IPIS
201 /**
202  * Verify that arch_sched_directed_ipi() directs IPIs as expected.
203  */
ZTEST(ipi,test_arch_sched_directed_ipi)204 ZTEST(ipi, test_arch_sched_directed_ipi)
205 {
206 	uint32_t  set[CONFIG_MP_MAX_NUM_CPUS];
207 	uint32_t  id;
208 	int priority;
209 	unsigned int j;
210 
211 	priority = k_thread_priority_get(k_current_get());
212 
213 	id = busy_threads_create(priority - 1);
214 
215 	/*
216 	 * Send an IPI to each CPU, one at a time. Verify that only the
217 	 * targeted CPU received the IPI.
218 	 */
219 	for (unsigned int i = 0; i < CONFIG_MP_MAX_NUM_CPUS; i++) {
220 		if (i == id) {
221 			continue;
222 		}
223 
224 		clear_ipi_counts();
225 		arch_sched_directed_ipi(BIT(i));
226 		k_busy_wait(DELAY_FOR_IPIS);
227 		get_ipi_counts(set, CONFIG_MP_MAX_NUM_CPUS);
228 
229 		for (j = 0; j < CONFIG_MP_MAX_NUM_CPUS; j++) {
230 			if (i == j) {
231 				zassert_true(set[j] == 1,
232 					     "Direct-Expected 1, got %u\n",
233 					     set[j]);
234 			} else {
235 				zassert_true(set[j] == 0,
236 					     "Direct-Expected 0, got %u\n",
237 					     set[j]);
238 			}
239 		}
240 	}
241 }
242 #endif
243 
244 /**
245  * Verify that waking a thread whose priority is lower than any other
246  * currently executing thread does not result in any IPIs being sent.
247  */
ZTEST(ipi,test_low_thread_wakes_no_ipis)248 ZTEST(ipi, test_low_thread_wakes_no_ipis)
249 {
250 	uint32_t  set[CONFIG_MP_MAX_NUM_CPUS];
251 	uint32_t  id;
252 	int priority;
253 	unsigned int i;
254 
255 	priority = k_thread_priority_get(k_current_get());
256 	atomic_clear(&busy_started);
257 
258 	alt_thread_create(5, "Low");
259 
260 	id = busy_threads_create(priority - 1);
261 
262 	/*
263 	 * Lower the priority of the busy threads now that we know that they
264 	 * have started. As this is expected to generate IPIs, busy wait for
265 	 * some small amount of time to give them time to be processed.
266 	 */
267 
268 	busy_threads_priority_set(0, 0);
269 	k_busy_wait(DELAY_FOR_IPIS);
270 
271 	/*
272 	 * Low priority thread is pended. Current thread is cooperative.
273 	 * Other CPUs are executing preemptible threads @ priority 0.
274 	 */
275 
276 	clear_ipi_counts();
277 	k_sem_give(&sem);
278 	k_busy_wait(DELAY_FOR_IPIS);
279 	get_ipi_counts(set, CONFIG_MP_MAX_NUM_CPUS);
280 
281 	zassert_true(z_is_thread_ready(&alt_thread),
282 		     "Low priority thread is not ready.\n");
283 
284 	alt_thread_done = true;
285 
286 	for (i = 0; i < CONFIG_MP_MAX_NUM_CPUS; i++) {
287 		zassert_true(set[i] == 0,
288 			     "CPU %u unexpectedly received IPI.\n", i);
289 	}
290 }
291 
292 /**
293  * Verify that waking a thread whose priority is higher than all currently
294  * executing threads results in the proper IPIs being sent and processed.
295  */
ZTEST(ipi,test_high_thread_wakes_some_ipis)296 ZTEST(ipi, test_high_thread_wakes_some_ipis)
297 {
298 	uint32_t  set[CONFIG_MP_MAX_NUM_CPUS];
299 	uint32_t  id;
300 	int priority;
301 	unsigned int i;
302 
303 	priority = k_thread_priority_get(k_current_get());
304 	atomic_clear(&busy_started);
305 
306 	alt_thread_create(priority - 1 - NUM_THREADS, "High");
307 
308 	id = busy_threads_create(priority - 1);
309 
310 	/*
311 	 * Lower the priority of the busy threads now that we know that they
312 	 * have started and are busy waiting. As this is expected to generate
313 	 * IPIs, busy wait for some small amount of time to give them time to
314 	 * be processed.
315 	 */
316 
317 	busy_threads_priority_set(0, 1);
318 	k_busy_wait(DELAY_FOR_IPIS);
319 
320 	/*
321 	 * High priority thread is pended. Current thread is cooperative.
322 	 * Other CPUs are executing preemptible threads.
323 	 */
324 
325 	clear_ipi_counts();
326 	k_sem_give(&sem);
327 	k_busy_wait(DELAY_FOR_IPIS);
328 	get_ipi_counts(set, CONFIG_MP_MAX_NUM_CPUS);
329 
330 	zassert_true(z_is_thread_ready(&alt_thread),
331 		     "High priority thread is not ready.\n");
332 
333 	alt_thread_done = true;
334 
335 	for (i = 0; i < CONFIG_MP_MAX_NUM_CPUS; i++) {
336 		if (i == id) {
337 			continue;
338 		}
339 
340 		zassert_true(set[i] == 1, "CPU%u got %u IPIs", i, set[i]);
341 	}
342 
343 	zassert_true(set[id] == 0, "Current CPU got %u IPI(s).\n", set[id]);
344 }
345 
346 /**
347  * Verify that lowering the priority of an active thread results in an IPI.
348  * If directed IPIs are enabled, then only the CPU executing that active
349  * thread ought to receive the IPI. Otherwise if IPIs are broadcast, then all
350  * other CPUs save the current CPU ought to receive IPIs.
351  */
ZTEST(ipi,test_thread_priority_set_lower)352 ZTEST(ipi, test_thread_priority_set_lower)
353 {
354 	uint32_t  set[CONFIG_MP_MAX_NUM_CPUS];
355 	uint32_t  id;
356 	int priority;
357 	unsigned int i;
358 
359 	priority = k_thread_priority_get(k_current_get());
360 
361 	id = busy_threads_create(priority - 1);
362 
363 	clear_ipi_counts();
364 	k_thread_priority_set(&thread[0], priority);
365 	k_busy_wait(DELAY_FOR_IPIS);
366 	get_ipi_counts(set, CONFIG_MP_MAX_NUM_CPUS);
367 
368 	for (i = 0; i < CONFIG_MP_MAX_NUM_CPUS; i++) {
369 		if (i == id) {
370 			continue;
371 		}
372 
373 #ifdef CONFIG_ARCH_HAS_DIRECTED_IPIS
374 		unsigned int j;
375 
376 		for (j = 0; j < NUM_THREADS; j++) {
377 			if (_kernel.cpus[i].current == &thread[j]) {
378 				break;
379 			}
380 		}
381 
382 		zassert_true(j < NUM_THREADS,
383 			     "CPU%u not executing expected thread\n", i);
384 
385 		if (j == 0) {
386 			zassert_true(set[i] == 1, "CPU%u got %u IPIs.\n",
387 				     i, set[i]);
388 		} else {
389 			zassert_true(set[i] == 0, "CPU%u got %u IPI(s).\n",
390 				     i, set[i]);
391 		}
392 #else
393 		zassert_true(set[i] == 1, "CPU%u got %u IPIs", i, set[i]);
394 #endif
395 	}
396 
397 	zassert_true(set[id] == 0, "Current CPU got %u IPI(s).\n", set[id]);
398 }
399 
400 /*
401  * Verify that IPIs are not sent to CPUs that are executing cooperative
402  * threads.
403  */
ZTEST(ipi,test_thread_coop_no_ipis)404 ZTEST(ipi, test_thread_coop_no_ipis)
405 {
406 	uint32_t  set[CONFIG_MP_MAX_NUM_CPUS];
407 	uint32_t  id;
408 	int priority;
409 	unsigned int i;
410 
411 	priority = k_thread_priority_get(k_current_get());
412 	atomic_clear(&busy_started);
413 
414 	alt_thread_create(priority - 1 - NUM_THREADS, "High");
415 
416 	id = busy_threads_create(priority - 1);
417 
418 	/*
419 	 * High priority thread is pended. Current thread is cooperative.
420 	 * Other CPUs are executing lower priority cooperative threads.
421 	 */
422 
423 	clear_ipi_counts();
424 	k_sem_give(&sem);
425 	k_busy_wait(DELAY_FOR_IPIS);
426 	get_ipi_counts(set, CONFIG_MP_MAX_NUM_CPUS);
427 
428 	zassert_true(z_is_thread_ready(&alt_thread),
429 		     "High priority thread is not ready.\n");
430 
431 	alt_thread_done = true;
432 
433 	for (i = 0; i < CONFIG_MP_MAX_NUM_CPUS; i++) {
434 		zassert_true(set[i] == 0, "CPU%u got %u IPIs", i, set[i]);
435 	}
436 }
437 
ipi_tests_setup(void)438 static void *ipi_tests_setup(void)
439 {
440 	/*
441 	 * Sleep a bit to guarantee that all CPUs enter an idle thread
442 	 * from which they can exit correctly to run the test.
443 	 */
444 
445 	k_sleep(K_MSEC(20));
446 
447 	return NULL;
448 }
449 
cleanup_threads(void * fixture)450 static void cleanup_threads(void *fixture)
451 {
452 	unsigned int  i;
453 
454 	ARG_UNUSED(fixture);
455 
456 	/*
457 	 * Ensure that spawned busy threads are aborted before
458 	 * proceeding to the next test.
459 	 */
460 
461 	for (i = 0; i < NUM_THREADS; i++) {
462 		k_thread_abort(&thread[i]);
463 	}
464 
465 	/* Ensure alt_thread ,if it was created, also gets aborted */
466 
467 	if (alt_thread_created) {
468 		k_thread_abort(&alt_thread);
469 	}
470 	alt_thread_created = false;
471 
472 	alt_thread_done = false;
473 }
474 
475 ZTEST_SUITE(ipi, NULL, ipi_tests_setup, NULL, cleanup_threads, NULL);
476