1 /*
2 * Copyright (c) 2018 Intel Corporation.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/tc_util.h>
8 #include <zephyr/ztest.h>
9 #include <zephyr/kernel.h>
10 #include <ksched.h>
11 #include <zephyr/kernel_structs.h>
12
13 #if CONFIG_MP_MAX_NUM_CPUS < 2
14 #error SMP test requires at least two CPUs!
15 #endif
16
17 #define RUN_FACTOR (CONFIG_SMP_TEST_RUN_FACTOR / 100.0)
18
19 #define T2_STACK_SIZE (2048 + CONFIG_TEST_EXTRA_STACK_SIZE)
20 #define STACK_SIZE (384 + CONFIG_TEST_EXTRA_STACK_SIZE)
21 #define DELAY_US 50000
22 #define TIMEOUT 1000
23 #define EQUAL_PRIORITY 1
24 #define TIME_SLICE_MS 500
25 #define THREAD_DELAY 1
26 #define SLEEP_MS_LONG ((int)(15000 * RUN_FACTOR))
27
28 struct k_thread t2;
29 K_THREAD_STACK_DEFINE(t2_stack, T2_STACK_SIZE);
30
31 volatile int t2_count;
32 volatile int sync_count = -1;
33
34 static int main_thread_id;
35 static int child_thread_id;
36 volatile int rv;
37
38 K_SEM_DEFINE(cpuid_sema, 0, 1);
39 K_SEM_DEFINE(sema, 0, 1);
40 static struct k_mutex smutex;
41 static struct k_sem smp_sem;
42
43 #define MAX_NUM_THREADS CONFIG_MP_MAX_NUM_CPUS
44
45 struct thread_info {
46 k_tid_t tid;
47 int executed;
48 int priority;
49 int cpu_id;
50 };
51 static ZTEST_BMEM volatile struct thread_info tinfo[MAX_NUM_THREADS];
52 static struct k_thread tthread[MAX_NUM_THREADS];
53 static K_THREAD_STACK_ARRAY_DEFINE(tstack, MAX_NUM_THREADS, STACK_SIZE);
54
55 static volatile int thread_started[MAX_NUM_THREADS - 1];
56
57 static struct k_poll_signal tsignal[MAX_NUM_THREADS];
58 static struct k_poll_event tevent[MAX_NUM_THREADS];
59
curr_cpu(void)60 static int curr_cpu(void)
61 {
62 unsigned int k = arch_irq_lock();
63 int ret = arch_curr_cpu()->id;
64
65 arch_irq_unlock(k);
66 return ret;
67 }
68
69 /**
70 * @brief SMP
71 * @defgroup kernel_smp_tests SMP Tests
72 * @ingroup all_tests
73 * @{
74 * @}
75 */
76
77 /**
78 * @defgroup kernel_smp_integration_tests SMP Integration Tests
79 * @ingroup kernel_smp_tests
80 * @{
81 * @}
82 */
83
84 /**
85 * @defgroup kernel_smp_module_tests SMP Module Tests
86 * @ingroup kernel_smp_tests
87 * @{
88 * @}
89 */
90
t2_fn(void * a,void * b,void * c)91 static void t2_fn(void *a, void *b, void *c)
92 {
93 ARG_UNUSED(a);
94 ARG_UNUSED(b);
95 ARG_UNUSED(c);
96
97 t2_count = 0;
98
99 /* This thread simply increments a counter while spinning on
100 * the CPU. The idea is that it will always be iterating
101 * faster than the other thread so long as it is fairly
102 * scheduled (and it's designed to NOT be fairly schedulable
103 * without a separate CPU!), so the main thread can always
104 * check its progress.
105 */
106 while (1) {
107 k_busy_wait(DELAY_US);
108 t2_count++;
109 }
110 }
111
112 /**
113 * @brief Verify SMP with 2 cooperative threads
114 *
115 * @ingroup kernel_smp_tests
116 *
117 * @details Multi processing is verified by checking whether
118 * 2 cooperative threads run simultaneously at different cores
119 */
ZTEST(smp,test_smp_coop_threads)120 ZTEST(smp, test_smp_coop_threads)
121 {
122 int i, ok = 1;
123
124 if (!IS_ENABLED(CONFIG_SCHED_IPI_SUPPORTED)) {
125 /* The spawned thread enters an infinite loop, so it can't be
126 * successfully aborted via an IPI. Just skip in that
127 * configuration.
128 */
129 ztest_test_skip();
130 }
131
132 k_tid_t tid = k_thread_create(&t2, t2_stack, T2_STACK_SIZE, t2_fn,
133 NULL, NULL, NULL,
134 K_PRIO_COOP(2), 0, K_NO_WAIT);
135
136 /* Wait for the other thread (on a separate CPU) to actually
137 * start running. We want synchrony to be as perfect as
138 * possible.
139 */
140 t2_count = -1;
141 while (t2_count == -1) {
142 }
143
144 for (i = 0; i < 10; i++) {
145 /* Wait slightly longer than the other thread so our
146 * count will always be lower
147 */
148 k_busy_wait(DELAY_US + (DELAY_US / 8));
149
150 if (t2_count <= i) {
151 ok = 0;
152 break;
153 }
154 }
155
156 k_thread_abort(tid);
157 k_thread_join(tid, K_FOREVER);
158 zassert_true(ok, "SMP test failed");
159 }
160
child_fn(void * p1,void * p2,void * p3)161 static void child_fn(void *p1, void *p2, void *p3)
162 {
163 ARG_UNUSED(p2);
164 ARG_UNUSED(p3);
165 int parent_cpu_id = POINTER_TO_INT(p1);
166
167 zassert_true(parent_cpu_id != curr_cpu(),
168 "Parent isn't on other core");
169
170 sync_count++;
171 k_sem_give(&cpuid_sema);
172 }
173
174 /**
175 * @brief Verify CPU IDs of threads in SMP
176 *
177 * @ingroup kernel_smp_tests
178 *
179 * @details Verify whether thread running on other core is
180 * parent thread from child thread
181 */
ZTEST(smp,test_cpu_id_threads)182 ZTEST(smp, test_cpu_id_threads)
183 {
184 /* Make sure idle thread runs on each core */
185 k_sleep(K_MSEC(1000));
186
187 int parent_cpu_id = curr_cpu();
188
189 k_tid_t tid = k_thread_create(&t2, t2_stack, T2_STACK_SIZE, child_fn,
190 INT_TO_POINTER(parent_cpu_id), NULL,
191 NULL, K_PRIO_PREEMPT(2), 0, K_NO_WAIT);
192
193 while (sync_count == -1) {
194 }
195 k_sem_take(&cpuid_sema, K_FOREVER);
196
197 k_thread_abort(tid);
198 k_thread_join(tid, K_FOREVER);
199 }
200
thread_entry_fn(void * p1,void * p2,void * p3)201 static void thread_entry_fn(void *p1, void *p2, void *p3)
202 {
203 ARG_UNUSED(p2);
204 ARG_UNUSED(p3);
205 int thread_num = POINTER_TO_INT(p1);
206 int count = 0;
207
208 tinfo[thread_num].executed = 1;
209 tinfo[thread_num].cpu_id = curr_cpu();
210
211 while (count++ < 5) {
212 k_busy_wait(DELAY_US);
213 }
214 }
215
spin_for_threads_exit(void)216 static void spin_for_threads_exit(void)
217 {
218 unsigned int num_threads = arch_num_cpus();
219
220 for (int i = 0; i < num_threads - 1; i++) {
221 volatile uint8_t *p = &tinfo[i].tid->base.thread_state;
222
223 while (!(*p & _THREAD_DEAD)) {
224 }
225 }
226 k_busy_wait(DELAY_US);
227 }
228
spawn_threads(int prio,int thread_num,int equal_prio,k_thread_entry_t thread_entry,int delay)229 static void spawn_threads(int prio, int thread_num, int equal_prio,
230 k_thread_entry_t thread_entry, int delay)
231 {
232 int i;
233
234 /* Spawn threads of priority higher than
235 * the previously created thread
236 */
237 for (i = 0; i < thread_num; i++) {
238 if (equal_prio) {
239 tinfo[i].priority = prio;
240 } else {
241 /* Increase priority for each thread */
242 tinfo[i].priority = prio - 1;
243 prio = tinfo[i].priority;
244 }
245 tinfo[i].tid = k_thread_create(&tthread[i], tstack[i],
246 STACK_SIZE, thread_entry,
247 INT_TO_POINTER(i), NULL, NULL,
248 tinfo[i].priority, 0,
249 K_MSEC(delay));
250 if (delay) {
251 /* Increase delay for each thread */
252 delay = delay + 10;
253 }
254 }
255 }
256
abort_threads(int num)257 static void abort_threads(int num)
258 {
259 for (int i = 0; i < num; i++) {
260 k_thread_abort(tinfo[i].tid);
261 }
262
263 for (int i = 0; i < num; i++) {
264 k_thread_join(tinfo[i].tid, K_FOREVER);
265 }
266 }
267
cleanup_resources(void)268 static void cleanup_resources(void)
269 {
270 unsigned int num_threads = arch_num_cpus();
271
272 for (int i = 0; i < num_threads; i++) {
273 tinfo[i].tid = 0;
274 tinfo[i].executed = 0;
275 tinfo[i].priority = 0;
276 }
277 }
278
thread_ab_entry(void * p1,void * p2,void * p3)279 static void __no_optimization thread_ab_entry(void *p1, void *p2, void *p3)
280 {
281 ARG_UNUSED(p1);
282 ARG_UNUSED(p2);
283 ARG_UNUSED(p3);
284
285 while (true) {
286 }
287 }
288
289 #define SPAWN_AB_PRIO K_PRIO_COOP(10)
290
291 /**
292 * @brief Verify the code path when we do context switch in k_thread_abort on SMP system
293 *
294 * @ingroup kernel_smp_tests
295 *
296 * @details test logic:
297 * - The ztest thread has cooperative priority.
298 * - From ztest thread we spawn N number of cooperative threads, where N = number of CPUs.
299 * - The spawned cooperative are executing infinite loop (so they occupy CPU core until they are
300 * aborted).
301 * - We have (number of CPUs - 1) spawned threads run and executing infinite loop, as current CPU
302 * is occupied by ztest cooperative thread. Due to that the last of spawned threads is ready but
303 * not executing.
304 * - We abort spawned threads one-by-one from the ztest thread.
305 * - At the first k_thread_abort call the ztest thread will be preempted by the remaining spawned
306 * thread which has higher priority than ztest thread.
307 * But... k_thread_abort call should has destroyed one of the spawned threads, so ztest thread
308 * should have a CPU available to run on.
309 * - We expect that all spawned threads will be aborted successfully.
310 *
311 * This was the test case for zephyrproject-rtos/zephyr#58040 issue where this test caused system
312 * hang.
313 */
314
ZTEST(smp,test_coop_switch_in_abort)315 ZTEST(smp, test_coop_switch_in_abort)
316 {
317 k_tid_t tid[MAX_NUM_THREADS];
318 unsigned int num_threads = arch_num_cpus();
319 unsigned int i;
320
321 zassert_true(arch_current_thread()->base.prio < 0,
322 "test case relies on ztest thread be cooperative");
323 zassert_true(arch_current_thread()->base.prio > SPAWN_AB_PRIO,
324 "spawn test need to have higher priority than ztest thread");
325
326 /* Spawn N number of cooperative threads, where N = number of CPUs */
327 for (i = 0; i < num_threads; i++) {
328 tid[i] = k_thread_create(&tthread[i], tstack[i],
329 STACK_SIZE, thread_ab_entry,
330 NULL, NULL, NULL,
331 SPAWN_AB_PRIO, 0, K_NO_WAIT);
332 }
333
334 /* Wait for some time to let spawned threads on other cores run and start executing infinite
335 * loop.
336 */
337 k_busy_wait(DELAY_US * 4);
338
339 /* At this time we have (number of CPUs - 1) spawned threads run and executing infinite loop
340 * on other CPU cores, as current CPU is occupied by this ztest cooperative thread.
341 * Due to that the last of spawned threads is ready but not executing.
342 */
343
344 /* Abort all spawned threads one-by-one. At the first k_thread_abort call the context
345 * switch will happen and the last 'spawned' thread will start.
346 * We should successfully abort all threads.
347 */
348 for (i = 0; i < num_threads; i++) {
349 k_thread_abort(tid[i]);
350 }
351
352 /* Cleanup */
353 for (i = 0; i < num_threads; i++) {
354 zassert_equal(k_thread_join(tid[i], K_FOREVER), 0);
355 }
356 }
357
358 /**
359 * @brief Test cooperative threads non-preemption
360 *
361 * @ingroup kernel_smp_tests
362 *
363 * @details Spawn cooperative threads equal to number of cores
364 * supported. Main thread will already be running on 1 core.
365 * Check if the last thread created preempts any threads
366 * already running.
367 */
ZTEST(smp,test_coop_resched_threads)368 ZTEST(smp, test_coop_resched_threads)
369 {
370 unsigned int num_threads = arch_num_cpus();
371
372 /* Spawn threads equal to number of cores,
373 * since we don't give up current CPU, last thread
374 * will not get scheduled
375 */
376 spawn_threads(K_PRIO_COOP(10), num_threads, !EQUAL_PRIORITY,
377 &thread_entry_fn, THREAD_DELAY);
378
379 /* Wait for some time to let other core's thread run */
380 k_busy_wait(DELAY_US);
381
382
383 /* Reassure that cooperative thread's are not preempted
384 * by checking last thread's execution
385 * status. We know that all threads got rescheduled on
386 * other cores except the last one
387 */
388 for (int i = 0; i < num_threads - 1; i++) {
389 zassert_true(tinfo[i].executed == 1,
390 "cooperative thread %d didn't run", i);
391 }
392 zassert_true(tinfo[num_threads - 1].executed == 0,
393 "cooperative thread is preempted");
394
395 /* Abort threads created */
396 abort_threads(num_threads);
397 cleanup_resources();
398 }
399
400 /**
401 * @brief Test preemptness of preemptive thread
402 *
403 * @ingroup kernel_smp_tests
404 *
405 * @details Create preemptive thread and let it run
406 * on another core and verify if it gets preempted
407 * if another thread of higher priority is spawned
408 */
ZTEST(smp,test_preempt_resched_threads)409 ZTEST(smp, test_preempt_resched_threads)
410 {
411 unsigned int num_threads = arch_num_cpus();
412
413 /* Spawn threads equal to number of cores,
414 * lower priority thread should
415 * be preempted by higher ones
416 */
417 spawn_threads(K_PRIO_PREEMPT(10), num_threads, !EQUAL_PRIORITY,
418 &thread_entry_fn, THREAD_DELAY);
419
420 spin_for_threads_exit();
421
422 for (int i = 0; i < num_threads; i++) {
423 zassert_true(tinfo[i].executed == 1,
424 "preemptive thread %d didn't run", i);
425 }
426
427 /* Abort threads created */
428 abort_threads(num_threads);
429 cleanup_resources();
430 }
431
432 /**
433 * @brief Validate behavior of thread when it yields
434 *
435 * @ingroup kernel_smp_tests
436 *
437 * @details Spawn cooperative threads equal to number
438 * of cores, so last thread would be pending, call
439 * yield() from main thread. Now, all threads must be
440 * executed
441 */
ZTEST(smp,test_yield_threads)442 ZTEST(smp, test_yield_threads)
443 {
444 unsigned int num_threads = arch_num_cpus();
445
446 /* Spawn threads equal to the number
447 * of cores, so the last thread would be
448 * pending.
449 */
450 spawn_threads(K_PRIO_COOP(10), num_threads, !EQUAL_PRIORITY,
451 &thread_entry_fn, !THREAD_DELAY);
452
453 k_yield();
454 k_busy_wait(DELAY_US);
455
456 for (int i = 0; i < num_threads; i++) {
457 zassert_true(tinfo[i].executed == 1,
458 "thread %d did not execute", i);
459
460 }
461
462 abort_threads(num_threads);
463 cleanup_resources();
464 }
465
466 /**
467 * @brief Test behavior of thread when it sleeps
468 *
469 * @ingroup kernel_smp_tests
470 *
471 * @details Spawn cooperative thread and call
472 * sleep() from main thread. After timeout, all
473 * threads has to be scheduled.
474 */
ZTEST(smp,test_sleep_threads)475 ZTEST(smp, test_sleep_threads)
476 {
477 unsigned int num_threads = arch_num_cpus();
478
479 spawn_threads(K_PRIO_COOP(10), num_threads, !EQUAL_PRIORITY,
480 &thread_entry_fn, !THREAD_DELAY);
481
482 k_msleep(TIMEOUT);
483
484 for (int i = 0; i < num_threads; i++) {
485 zassert_true(tinfo[i].executed == 1,
486 "thread %d did not execute", i);
487 }
488
489 abort_threads(num_threads);
490 cleanup_resources();
491 }
492
thread_wakeup_entry(void * p1,void * p2,void * p3)493 static void thread_wakeup_entry(void *p1, void *p2, void *p3)
494 {
495 ARG_UNUSED(p2);
496 ARG_UNUSED(p3);
497 int thread_num = POINTER_TO_INT(p1);
498
499 thread_started[thread_num] = 1;
500
501 k_msleep(DELAY_US * 1000);
502
503 tinfo[thread_num].executed = 1;
504 }
505
wakeup_on_start_thread(int tnum)506 static void wakeup_on_start_thread(int tnum)
507 {
508 int threads_started = 0, i;
509
510 /* For each thread, spin waiting for it to first flag that
511 * it's going to sleep, and then that it's actually blocked
512 */
513 for (i = 0; i < tnum; i++) {
514 while (thread_started[i] == 0) {
515 }
516 while (!z_is_thread_prevented_from_running(tinfo[i].tid)) {
517 }
518 }
519
520 for (i = 0; i < tnum; i++) {
521 if (thread_started[i] == 1 && threads_started <= tnum) {
522 threads_started++;
523 k_wakeup(tinfo[i].tid);
524 }
525 }
526 zassert_equal(threads_started, tnum,
527 "All threads haven't started");
528 }
529
check_wokeup_threads(int tnum)530 static void check_wokeup_threads(int tnum)
531 {
532 int threads_woke_up = 0, i;
533
534 /* k_wakeup() isn't synchronous, give the other CPU time to
535 * schedule them
536 */
537 k_busy_wait(200000);
538
539 for (i = 0; i < tnum; i++) {
540 if (tinfo[i].executed == 1 && threads_woke_up <= tnum) {
541 threads_woke_up++;
542 }
543 }
544 zassert_equal(threads_woke_up, tnum, "Threads did not wakeup");
545 }
546
547 /**
548 * @brief Test behavior of wakeup() in SMP case
549 *
550 * @ingroup kernel_smp_tests
551 *
552 * @details Spawn number of threads equal to number of
553 * remaining cores and let them sleep for a while. Call
554 * wakeup() of those threads from parent thread and check
555 * if they are all running
556 */
ZTEST(smp,test_wakeup_threads)557 ZTEST(smp, test_wakeup_threads)
558 {
559 unsigned int num_threads = arch_num_cpus();
560
561 /* Spawn threads to run on all remaining cores */
562 spawn_threads(K_PRIO_COOP(10), num_threads - 1, !EQUAL_PRIORITY,
563 &thread_wakeup_entry, !THREAD_DELAY);
564
565 /* Check if all the threads have started, then call wakeup */
566 wakeup_on_start_thread(num_threads - 1);
567
568 /* Count threads which are woken up */
569 check_wokeup_threads(num_threads - 1);
570
571 /* Abort all threads and cleanup */
572 abort_threads(num_threads - 1);
573 cleanup_resources();
574 }
575
576 /* a thread for testing get current cpu */
thread_get_cpu_entry(void * p1,void * p2,void * p3)577 static void thread_get_cpu_entry(void *p1, void *p2, void *p3)
578 {
579 ARG_UNUSED(p1);
580 ARG_UNUSED(p2);
581 ARG_UNUSED(p3);
582
583 int bsp_id = *(int *)p1;
584 int cpu_id = -1;
585
586 /* get current cpu number for running thread */
587 _cpu_t *curr_cpu = arch_curr_cpu();
588
589 /**TESTPOINT: call arch_curr_cpu() to get cpu struct */
590 zassert_true(curr_cpu != NULL,
591 "test failed to get current cpu.");
592
593 cpu_id = curr_cpu->id;
594
595 zassert_true(bsp_id != cpu_id,
596 "should not be the same with our BSP");
597
598 /* loop forever to ensure running on this CPU */
599 while (1) {
600 k_busy_wait(DELAY_US);
601 }
602 }
603
604 /**
605 * @brief Test get a pointer of CPU
606 *
607 * @ingroup kernel_smp_module_tests
608 *
609 * @details
610 * Test Objective:
611 * - To verify architecture layer provides a mechanism to return a pointer to the
612 * current kernel CPU record of the running CPU.
613 * We call arch_curr_cpu() and get its member, both in main and spawned thread
614 * separately, and compare them. They shall be different in SMP environment.
615 *
616 * Testing techniques:
617 * - Interface testing, function and block box testing,
618 * dynamic analysis and testing,
619 *
620 * Prerequisite Conditions:
621 * - CONFIG_SMP=y, and the HW platform must support SMP.
622 *
623 * Input Specifications:
624 * - N/A
625 *
626 * Test Procedure:
627 * -# In main thread, call arch_curr_cpu() to get it's member "id",then store it
628 * into a variable thread_id.
629 * -# Spawn a thread t2, and pass the stored thread_id to it, then call
630 * k_busy_wait() 50us to wait for thread run and won't be swapped out.
631 * -# In thread t2, call arch_curr_cpu() to get pointer of current cpu data. Then
632 * check if it not NULL.
633 * -# Store the member id via accessing pointer of current cpu data to var cpu_id.
634 * -# Check if cpu_id is not equaled to bsp_id that we pass into thread.
635 * -# Call k_busy_wait() and loop forever.
636 * -# In main thread, terminate the thread t2 before exit.
637 *
638 * Expected Test Result:
639 * - The pointer of current cpu data that we got from function call is correct.
640 *
641 * Pass/Fail Criteria:
642 * - Successful if the check of step 3,5 are all passed.
643 * - Failure if one of the check of step 3,5 is failed.
644 *
645 * Assumptions and Constraints:
646 * - This test using for the platform that support SMP, in our current scenario
647 * , only x86_64, arc and xtensa supported.
648 *
649 * @see arch_curr_cpu()
650 */
651 static int _cpu_id;
ZTEST(smp,test_get_cpu)652 ZTEST(smp, test_get_cpu)
653 {
654 k_tid_t thread_id;
655
656 if (!IS_ENABLED(CONFIG_SCHED_IPI_SUPPORTED)) {
657 /* The spawned thread enters an infinite loop, so it can't be
658 * successfully aborted via an IPI. Just skip in that
659 * configuration.
660 */
661 ztest_test_skip();
662 }
663
664 /* get current cpu number */
665 _cpu_id = arch_curr_cpu()->id;
666
667 thread_id = k_thread_create(&t2, t2_stack, T2_STACK_SIZE,
668 thread_get_cpu_entry,
669 &_cpu_id, NULL, NULL,
670 K_PRIO_COOP(2),
671 K_INHERIT_PERMS, K_NO_WAIT);
672
673 k_busy_wait(DELAY_US);
674
675 k_thread_abort(thread_id);
676 k_thread_join(thread_id, K_FOREVER);
677 }
678
679 #ifdef CONFIG_TRACE_SCHED_IPI
680 /* global variable for testing send IPI */
681 static volatile int sched_ipi_has_called;
682
z_trace_sched_ipi(void)683 void z_trace_sched_ipi(void)
684 {
685 sched_ipi_has_called++;
686 }
687 #endif
688
689 /**
690 * @brief Test interprocessor interrupt
691 *
692 * @ingroup kernel_smp_integration_tests
693 *
694 * @details
695 * Test Objective:
696 * - To verify architecture layer provides a mechanism to issue an interprocessor
697 * interrupt to all other CPUs in the system that calls the scheduler IPI.
698 * We simply add a hook in z_sched_ipi(), in order to check if it has been
699 * called once in another CPU except the caller, when arch_sched_broadcast_ipi()
700 * is called.
701 *
702 * Testing techniques:
703 * - Interface testing, function and block box testing,
704 * dynamic analysis and testing
705 *
706 * Prerequisite Conditions:
707 * - CONFIG_SMP=y, and the HW platform must support SMP.
708 * - CONFIG_TRACE_SCHED_IPI=y was set.
709 *
710 * Input Specifications:
711 * - N/A
712 *
713 * Test Procedure:
714 * -# In main thread, given a global variable sched_ipi_has_called equaled zero.
715 * -# Call arch_sched_broadcast_ipi() then sleep for 100ms.
716 * -# In z_sched_ipi() handler, increment the sched_ipi_has_called.
717 * -# In main thread, check the sched_ipi_has_called is not equaled to zero.
718 * -# Repeat step 1 to 4 for 3 times.
719 *
720 * Expected Test Result:
721 * - The pointer of current cpu data that we got from function call is correct.
722 *
723 * Pass/Fail Criteria:
724 * - Successful if the check of step 4 are all passed.
725 * - Failure if one of the check of step 4 is failed.
726 *
727 * Assumptions and Constraints:
728 * - This test using for the platform that support SMP, in our current scenario
729 * , only x86_64 and arc supported.
730 *
731 * @see arch_sched_broadcast_ipi()
732 */
733 #ifdef CONFIG_SCHED_IPI_SUPPORTED
ZTEST(smp,test_smp_ipi)734 ZTEST(smp, test_smp_ipi)
735 {
736 #ifndef CONFIG_TRACE_SCHED_IPI
737 ztest_test_skip();
738 #else
739
740 TC_PRINT("cpu num=%d", arch_num_cpus());
741
742 for (int i = 0; i < 3 ; i++) {
743 /* issue a sched ipi to tell other CPU to run thread */
744 sched_ipi_has_called = 0;
745 arch_sched_broadcast_ipi();
746
747 /* Need to wait longer than we think, loaded CI
748 * systems need to wait for host scheduling to run the
749 * other CPU's thread.
750 */
751 k_msleep(100);
752
753 /**TESTPOINT: check if enter our IPI interrupt handler */
754 zassert_true(sched_ipi_has_called != 0,
755 "did not receive IPI.(%d)",
756 sched_ipi_has_called);
757 }
758 #endif
759 }
760 #endif
761
k_sys_fatal_error_handler(unsigned int reason,const struct arch_esf * esf)762 void k_sys_fatal_error_handler(unsigned int reason, const struct arch_esf *esf)
763 {
764 static int trigger;
765
766 if (reason != K_ERR_KERNEL_OOPS) {
767 printk("wrong error reason\n");
768 TC_END_REPORT(TC_FAIL);
769 k_fatal_halt(reason);
770 }
771
772 if (trigger == 0) {
773 child_thread_id = curr_cpu();
774 trigger++;
775 } else {
776 main_thread_id = curr_cpu();
777
778 /* Verify the fatal was happened on different core */
779 zassert_true(main_thread_id != child_thread_id,
780 "fatal on the same core");
781 }
782 }
783
entry_oops(void * p1,void * p2,void * p3)784 void entry_oops(void *p1, void *p2, void *p3)
785 {
786 k_oops();
787 TC_ERROR("SHOULD NEVER SEE THIS\n");
788 }
789
790 /**
791 * @brief Test fatal error can be triggered on different core
792
793 * @details When CONFIG_SMP is enabled, on some multiprocessor
794 * platforms, exception can be triggered on different core at
795 * the same time.
796 *
797 * @ingroup kernel_common_tests
798 */
ZTEST(smp,test_fatal_on_smp)799 ZTEST(smp, test_fatal_on_smp)
800 {
801 /* Creat a child thread and trigger a crash */
802 k_thread_create(&t2, t2_stack, T2_STACK_SIZE, entry_oops,
803 NULL, NULL, NULL,
804 K_PRIO_PREEMPT(2), 0, K_NO_WAIT);
805
806 /* hold cpu and wait for thread trigger exception and being terminated */
807 k_busy_wait(5 * DELAY_US);
808
809 /* Verify that child thread is no longer running. We can't simply use k_thread_join here
810 * as we don't want to introduce reschedule point here.
811 */
812 zassert_true(z_is_thread_state_set(&t2, _THREAD_DEAD));
813
814 /* Manually trigger the crash in mainthread */
815 entry_oops(NULL, NULL, NULL);
816
817 /* should not be here */
818 ztest_test_fail();
819 }
820
workq_handler(struct k_work * work)821 static void workq_handler(struct k_work *work)
822 {
823 child_thread_id = curr_cpu();
824 }
825
826 /**
827 * @brief Test system workq run on different core
828
829 * @details When macro CONFIG_SMP is enabled, workq can be run
830 * on different core.
831 *
832 * @ingroup kernel_common_tests
833 */
ZTEST(smp,test_workq_on_smp)834 ZTEST(smp, test_workq_on_smp)
835 {
836 static struct k_work work;
837
838 k_work_init(&work, workq_handler);
839
840 /* submit work item on system workq */
841 k_work_submit(&work);
842
843 /* Wait for some time to let other core's thread run */
844 k_busy_wait(DELAY_US);
845
846 /* check work have finished */
847 zassert_equal(k_work_busy_get(&work), 0);
848
849 main_thread_id = curr_cpu();
850
851 /* Verify the ztest thread and system workq run on different core */
852 zassert_true(main_thread_id != child_thread_id,
853 "system workq run on the same core");
854 }
855
t1_mutex_lock(void * p1,void * p2,void * p3)856 static void t1_mutex_lock(void *p1, void *p2, void *p3)
857 {
858 ARG_UNUSED(p2);
859 ARG_UNUSED(p3);
860
861 /* t1 will get mutex first */
862 k_mutex_lock((struct k_mutex *)p1, K_FOREVER);
863
864 k_msleep(2);
865
866 k_mutex_unlock((struct k_mutex *)p1);
867 }
868
t2_mutex_lock(void * p1,void * p2,void * p3)869 static void t2_mutex_lock(void *p1, void *p2, void *p3)
870 {
871 ARG_UNUSED(p2);
872 ARG_UNUSED(p3);
873
874 zassert_equal(arch_current_thread()->base.global_lock_count, 0,
875 "thread global lock cnt %d is incorrect",
876 arch_current_thread()->base.global_lock_count);
877
878 k_mutex_lock((struct k_mutex *)p1, K_FOREVER);
879
880 zassert_equal(arch_current_thread()->base.global_lock_count, 0,
881 "thread global lock cnt %d is incorrect",
882 arch_current_thread()->base.global_lock_count);
883
884 k_mutex_unlock((struct k_mutex *)p1);
885
886 /**TESTPOINT: z_smp_release_global_lock() has been call during
887 * context switch but global_lock_cnt has not been decrease
888 * because no irq_lock() was called.
889 */
890 zassert_equal(arch_current_thread()->base.global_lock_count, 0,
891 "thread global lock cnt %d is incorrect",
892 arch_current_thread()->base.global_lock_count);
893 }
894
895 /**
896 * @brief Test scenario that a thread release the global lock
897 *
898 * @ingroup kernel_smp_tests
899 *
900 * @details Validate the scenario that make the internal APIs of SMP
901 * z_smp_release_global_lock() to be called.
902 */
ZTEST(smp,test_smp_release_global_lock)903 ZTEST(smp, test_smp_release_global_lock)
904 {
905 k_mutex_init(&smutex);
906
907 tinfo[0].tid =
908 k_thread_create(&tthread[0], tstack[0], STACK_SIZE,
909 t1_mutex_lock,
910 &smutex, NULL, NULL,
911 K_PRIO_PREEMPT(5),
912 K_INHERIT_PERMS, K_NO_WAIT);
913
914 tinfo[1].tid =
915 k_thread_create(&tthread[1], tstack[1], STACK_SIZE,
916 t2_mutex_lock,
917 &smutex, NULL, NULL,
918 K_PRIO_PREEMPT(3),
919 K_INHERIT_PERMS, K_MSEC(1));
920
921 /* Hold one of the cpu to ensure context switch as we wanted
922 * can happen in another cpu.
923 */
924 k_busy_wait(20000);
925
926 k_thread_join(tinfo[1].tid, K_FOREVER);
927 k_thread_join(tinfo[0].tid, K_FOREVER);
928 cleanup_resources();
929 }
930
931 #define LOOP_COUNT ((int)(20000 * RUN_FACTOR))
932
933 enum sync_t {
934 LOCK_IRQ,
935 LOCK_SEM,
936 LOCK_MUTEX
937 };
938
939 static int global_cnt;
940 static struct k_mutex smp_mutex;
941
942 static void (*sync_lock)(void *);
943 static void (*sync_unlock)(void *);
944
sync_lock_dummy(void * k)945 static void sync_lock_dummy(void *k)
946 {
947 /* no sync lock used */
948 }
949
sync_lock_irq(void * k)950 static void sync_lock_irq(void *k)
951 {
952 *((unsigned int *)k) = irq_lock();
953 }
954
sync_unlock_irq(void * k)955 static void sync_unlock_irq(void *k)
956 {
957 irq_unlock(*(unsigned int *)k);
958 }
959
sync_lock_sem(void * k)960 static void sync_lock_sem(void *k)
961 {
962 k_sem_take(&smp_sem, K_FOREVER);
963 }
964
sync_unlock_sem(void * k)965 static void sync_unlock_sem(void *k)
966 {
967 k_sem_give(&smp_sem);
968 }
969
sync_lock_mutex(void * k)970 static void sync_lock_mutex(void *k)
971 {
972 k_mutex_lock(&smp_mutex, K_FOREVER);
973 }
974
sync_unlock_mutex(void * k)975 static void sync_unlock_mutex(void *k)
976 {
977 k_mutex_unlock(&smp_mutex);
978 }
979
sync_init(int lock_type)980 static void sync_init(int lock_type)
981 {
982 switch (lock_type) {
983 case LOCK_IRQ:
984 sync_lock = sync_lock_irq;
985 sync_unlock = sync_unlock_irq;
986 break;
987 case LOCK_SEM:
988 sync_lock = sync_lock_sem;
989 sync_unlock = sync_unlock_sem;
990 k_sem_init(&smp_sem, 1, 3);
991 break;
992 case LOCK_MUTEX:
993 sync_lock = sync_lock_mutex;
994 sync_unlock = sync_unlock_mutex;
995 k_mutex_init(&smp_mutex);
996 break;
997
998 default:
999 sync_lock = sync_unlock = sync_lock_dummy;
1000 }
1001 }
1002
inc_global_cnt(void * a,void * b,void * c)1003 static void inc_global_cnt(void *a, void *b, void *c)
1004 {
1005 int key;
1006
1007 for (int i = 0; i < LOOP_COUNT; i++) {
1008
1009 sync_lock(&key);
1010
1011 global_cnt++;
1012 global_cnt--;
1013 global_cnt++;
1014
1015 sync_unlock(&key);
1016 }
1017 }
1018
run_concurrency(void * p1,void * p2,void * p3)1019 static int run_concurrency(void *p1, void *p2, void *p3)
1020 {
1021 ARG_UNUSED(p3);
1022
1023 int type = POINTER_TO_INT(p1);
1024 k_thread_entry_t func = p2;
1025 uint32_t start_t, end_t;
1026
1027 sync_init(type);
1028 global_cnt = 0;
1029 start_t = k_cycle_get_32();
1030
1031 tinfo[0].tid =
1032 k_thread_create(&tthread[0], tstack[0], STACK_SIZE,
1033 func,
1034 NULL, NULL, NULL,
1035 K_PRIO_PREEMPT(1),
1036 K_INHERIT_PERMS, K_NO_WAIT);
1037
1038 tinfo[1].tid =
1039 k_thread_create(&tthread[1], tstack[1], STACK_SIZE,
1040 func,
1041 NULL, NULL, NULL,
1042 K_PRIO_PREEMPT(1),
1043 K_INHERIT_PERMS, K_NO_WAIT);
1044
1045 k_tid_t tid =
1046 k_thread_create(&t2, t2_stack, T2_STACK_SIZE,
1047 func,
1048 NULL, NULL, NULL,
1049 K_PRIO_PREEMPT(1),
1050 K_INHERIT_PERMS, K_NO_WAIT);
1051
1052 k_thread_join(tinfo[0].tid, K_FOREVER);
1053 k_thread_join(tinfo[1].tid, K_FOREVER);
1054 k_thread_join(tid, K_FOREVER);
1055 cleanup_resources();
1056
1057 end_t = k_cycle_get_32();
1058
1059 printk("type %d: cnt %d, spend %u ms\n", type, global_cnt,
1060 k_cyc_to_ms_ceil32(end_t - start_t));
1061
1062 return global_cnt == (LOOP_COUNT * 3);
1063 }
1064
1065 /**
1066 * @brief Test if the concurrency of SMP works or not
1067 *
1068 * @ingroup kernel_smp_tests
1069 *
1070 * @details Validate the global lock and unlock API of SMP are thread-safe.
1071 * We make 3 thread to increase the global count in different cpu and
1072 * they both do locking then unlocking for LOOP_COUNT times. It shall be no
1073 * deadlock happened and total global count shall be 3 * LOOP COUNT.
1074 *
1075 * We show the 4 kinds of scenario:
1076 * - No any lock used
1077 * - Use global irq lock
1078 * - Use semaphore
1079 * - Use mutex
1080 */
ZTEST(smp,test_inc_concurrency)1081 ZTEST(smp, test_inc_concurrency)
1082 {
1083 /* increasing global var with irq lock */
1084 zassert_true(run_concurrency(INT_TO_POINTER(LOCK_IRQ), inc_global_cnt, NULL),
1085 "total count %d is wrong(i)", global_cnt);
1086
1087 /* increasing global var with irq lock */
1088 zassert_true(run_concurrency(INT_TO_POINTER(LOCK_SEM), inc_global_cnt, NULL),
1089 "total count %d is wrong(s)", global_cnt);
1090
1091 /* increasing global var with irq lock */
1092 zassert_true(run_concurrency(INT_TO_POINTER(LOCK_MUTEX), inc_global_cnt, NULL),
1093 "total count %d is wrong(M)", global_cnt);
1094 }
1095
1096 /**
1097 * @brief Torture test for context switching code
1098 *
1099 * @ingroup kernel_smp_tests
1100 *
1101 * @details Leverage the polling API to stress test the context switching code.
1102 * This test will hammer all the CPUs with thread swapping requests.
1103 */
process_events(void * arg0,void * arg1,void * arg2)1104 static void process_events(void *arg0, void *arg1, void *arg2)
1105 {
1106 ARG_UNUSED(arg1);
1107 ARG_UNUSED(arg2);
1108
1109 uintptr_t id = (uintptr_t) arg0;
1110
1111 while (1) {
1112 k_poll(&tevent[id], 1, K_FOREVER);
1113
1114 if (tevent[id].signal->result != 0x55) {
1115 ztest_test_fail();
1116 }
1117
1118 tevent[id].signal->signaled = 0;
1119 tevent[id].state = K_POLL_STATE_NOT_READY;
1120
1121 k_poll_signal_reset(&tsignal[id]);
1122 }
1123 }
1124
signal_raise(void * arg0,void * arg1,void * arg2)1125 static void signal_raise(void *arg0, void *arg1, void *arg2)
1126 {
1127 unsigned int num_threads = arch_num_cpus();
1128
1129 while (1) {
1130 for (uintptr_t i = 0; i < num_threads; i++) {
1131 k_poll_signal_raise(&tsignal[i], 0x55);
1132 }
1133 }
1134 }
1135
ZTEST(smp,test_smp_switch_torture)1136 ZTEST(smp, test_smp_switch_torture)
1137 {
1138 unsigned int num_threads = arch_num_cpus();
1139
1140 if (CONFIG_SMP_TEST_RUN_FACTOR == 0) {
1141 /* If CONFIG_SMP_TEST_RUN_FACTOR is zero,
1142 * the switch torture test is effectively
1143 * not doing anything as the k_sleep()
1144 * below is not going to sleep at all,
1145 * and all created threads are being
1146 * terminated (almost) immediately after
1147 * creation. So if run factor is zero,
1148 * mark the test as skipped.
1149 */
1150 ztest_test_skip();
1151 }
1152
1153 for (uintptr_t i = 0; i < num_threads; i++) {
1154 k_poll_signal_init(&tsignal[i]);
1155 k_poll_event_init(&tevent[i], K_POLL_TYPE_SIGNAL,
1156 K_POLL_MODE_NOTIFY_ONLY, &tsignal[i]);
1157
1158 k_thread_create(&tthread[i], tstack[i], STACK_SIZE,
1159 process_events,
1160 (void *) i, NULL, NULL, K_PRIO_PREEMPT(i + 1),
1161 K_INHERIT_PERMS, K_NO_WAIT);
1162 }
1163
1164 k_thread_create(&t2, t2_stack, T2_STACK_SIZE, signal_raise,
1165 NULL, NULL, NULL, K_PRIO_COOP(2), 0, K_NO_WAIT);
1166
1167 k_sleep(K_MSEC(SLEEP_MS_LONG));
1168
1169 k_thread_abort(&t2);
1170 k_thread_join(&t2, K_FOREVER);
1171 for (uintptr_t i = 0; i < num_threads; i++) {
1172 k_thread_abort(&tthread[i]);
1173 k_thread_join(&tthread[i], K_FOREVER);
1174 }
1175 }
1176
1177 /**
1178 * @brief Torture test for cpu affinity code
1179 *
1180 * @ingroup kernel_smp_tests
1181 *
1182 * @details Pin thread to a specific cpu. Once thread gets cpu, check
1183 * the cpu id is correct and then thread will give up cpu.
1184 */
1185 #ifdef CONFIG_SCHED_CPU_MASK
check_affinity(void * arg0,void * arg1,void * arg2)1186 static void check_affinity(void *arg0, void *arg1, void *arg2)
1187 {
1188 ARG_UNUSED(arg1);
1189 ARG_UNUSED(arg2);
1190
1191 int affinity = POINTER_TO_INT(arg0);
1192 int counter = 30;
1193
1194 while (counter != 0) {
1195 zassert_equal(affinity, curr_cpu(), "Affinity test failed.");
1196 counter--;
1197 k_yield();
1198 }
1199 }
1200
ZTEST(smp,test_smp_affinity)1201 ZTEST(smp, test_smp_affinity)
1202 {
1203 int num_threads = arch_num_cpus();
1204
1205 for (int i = 0; i < num_threads; ++i) {
1206 k_thread_create(&tthread[i], tstack[i],
1207 STACK_SIZE, check_affinity,
1208 INT_TO_POINTER(i), NULL, NULL,
1209 0, 0, K_FOREVER);
1210
1211 k_thread_cpu_pin(&tthread[i], i);
1212 k_thread_start(&tthread[i]);
1213 }
1214
1215 for (int i = 0; i < num_threads; i++) {
1216 k_thread_join(&tthread[i], K_FOREVER);
1217 }
1218 }
1219 #endif
1220
smp_tests_setup(void)1221 static void *smp_tests_setup(void)
1222 {
1223 /* Sleep a bit to guarantee that both CPUs enter an idle
1224 * thread from which they can exit correctly to run the main
1225 * test.
1226 */
1227 k_sleep(K_MSEC(10));
1228
1229 return NULL;
1230 }
1231
1232 ZTEST_SUITE(smp, NULL, smp_tests_setup, NULL, NULL, NULL);
1233