1 /*
2 * Copyright (c) 2018 Intel Corporation.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr.h>
8 #include <tc_util.h>
9 #include <ztest.h>
10 #include <kernel.h>
11 #include <ksched.h>
12 #include <kernel_structs.h>
13
14 #if CONFIG_MP_NUM_CPUS < 2
15 #error SMP test requires at least two CPUs!
16 #endif
17
18 #define T2_STACK_SIZE (2048 + CONFIG_TEST_EXTRA_STACKSIZE)
19 #define STACK_SIZE (384 + CONFIG_TEST_EXTRA_STACKSIZE)
20 #define DELAY_US 50000
21 #define TIMEOUT 1000
22 #define EQUAL_PRIORITY 1
23 #define TIME_SLICE_MS 500
24 #define THREAD_DELAY 1
25 #define SLEEP_MS_LONG 15000
26
27 struct k_thread t2;
28 K_THREAD_STACK_DEFINE(t2_stack, T2_STACK_SIZE);
29
30 volatile int t2_count;
31 volatile int sync_count = -1;
32
33 static int main_thread_id;
34 static int child_thread_id;
35 volatile int rv;
36
37 K_SEM_DEFINE(cpuid_sema, 0, 1);
38 K_SEM_DEFINE(sema, 0, 1);
39 static struct k_mutex smutex;
40 static struct k_sem smp_sem;
41
42 #define THREADS_NUM CONFIG_MP_NUM_CPUS
43
44 struct thread_info {
45 k_tid_t tid;
46 int executed;
47 int priority;
48 int cpu_id;
49 };
50 static ZTEST_BMEM volatile struct thread_info tinfo[THREADS_NUM];
51 static struct k_thread tthread[THREADS_NUM];
52 static K_THREAD_STACK_ARRAY_DEFINE(tstack, THREADS_NUM, STACK_SIZE);
53
54 static volatile int thread_started[THREADS_NUM - 1];
55
56 static struct k_poll_signal tsignal[THREADS_NUM];
57 static struct k_poll_event tevent[THREADS_NUM];
58
curr_cpu(void)59 static int curr_cpu(void)
60 {
61 unsigned int k = arch_irq_lock();
62 int ret = arch_curr_cpu()->id;
63
64 arch_irq_unlock(k);
65 return ret;
66 }
67
68 /**
69 * @brief Tests for SMP
70 * @defgroup kernel_smp_tests SMP Tests
71 * @ingroup all_tests
72 * @{
73 * @}
74 */
75
76 /**
77 * @defgroup kernel_smp_integration_tests SMP Tests
78 * @ingroup all_tests
79 * @{
80 * @}
81 */
82
83 /**
84 * @defgroup kernel_smp_module_tests SMP Tests
85 * @ingroup all_tests
86 * @{
87 * @}
88 */
89
t2_fn(void * a,void * b,void * c)90 static void t2_fn(void *a, void *b, void *c)
91 {
92 ARG_UNUSED(a);
93 ARG_UNUSED(b);
94 ARG_UNUSED(c);
95
96 t2_count = 0;
97
98 /* This thread simply increments a counter while spinning on
99 * the CPU. The idea is that it will always be iterating
100 * faster than the other thread so long as it is fairly
101 * scheduled (and it's designed to NOT be fairly schedulable
102 * without a separate CPU!), so the main thread can always
103 * check its progress.
104 */
105 while (1) {
106 k_busy_wait(DELAY_US);
107 t2_count++;
108 }
109 }
110
111 /**
112 * @brief Verify SMP with 2 cooperative threads
113 *
114 * @ingroup kernel_smp_tests
115 *
116 * @details Multi processing is verified by checking whether
117 * 2 cooperative threads run simultaneously at different cores
118 */
test_smp_coop_threads(void)119 void test_smp_coop_threads(void)
120 {
121 int i, ok = 1;
122
123 k_tid_t tid = k_thread_create(&t2, t2_stack, T2_STACK_SIZE, t2_fn,
124 NULL, NULL, NULL,
125 K_PRIO_COOP(2), 0, K_NO_WAIT);
126
127 /* Wait for the other thread (on a separate CPU) to actually
128 * start running. We want synchrony to be as perfect as
129 * possible.
130 */
131 t2_count = -1;
132 while (t2_count == -1) {
133 }
134
135 for (i = 0; i < 10; i++) {
136 /* Wait slightly longer than the other thread so our
137 * count will always be lower
138 */
139 k_busy_wait(DELAY_US + (DELAY_US / 8));
140
141 if (t2_count <= i) {
142 ok = 0;
143 break;
144 }
145 }
146
147 k_thread_abort(tid);
148 k_thread_join(tid, K_FOREVER);
149 zassert_true(ok, "SMP test failed");
150 }
151
child_fn(void * p1,void * p2,void * p3)152 static void child_fn(void *p1, void *p2, void *p3)
153 {
154 ARG_UNUSED(p2);
155 ARG_UNUSED(p3);
156 int parent_cpu_id = POINTER_TO_INT(p1);
157
158 zassert_true(parent_cpu_id != curr_cpu(),
159 "Parent isn't on other core");
160
161 sync_count++;
162 k_sem_give(&cpuid_sema);
163 }
164
165 /**
166 * @brief Verify CPU IDs of threads in SMP
167 *
168 * @ingroup kernel_smp_tests
169 *
170 * @details Verify whether thread running on other core is
171 * parent thread from child thread
172 */
test_cpu_id_threads(void)173 void test_cpu_id_threads(void)
174 {
175 /* Make sure idle thread runs on each core */
176 k_sleep(K_MSEC(1000));
177
178 int parent_cpu_id = curr_cpu();
179
180 k_tid_t tid = k_thread_create(&t2, t2_stack, T2_STACK_SIZE, child_fn,
181 INT_TO_POINTER(parent_cpu_id), NULL,
182 NULL, K_PRIO_PREEMPT(2), 0, K_NO_WAIT);
183
184 while (sync_count == -1) {
185 }
186 k_sem_take(&cpuid_sema, K_FOREVER);
187
188 k_thread_abort(tid);
189 k_thread_join(tid, K_FOREVER);
190 }
191
thread_entry(void * p1,void * p2,void * p3)192 static void thread_entry(void *p1, void *p2, void *p3)
193 {
194 ARG_UNUSED(p2);
195 ARG_UNUSED(p3);
196 int thread_num = POINTER_TO_INT(p1);
197 int count = 0;
198
199 tinfo[thread_num].executed = 1;
200 tinfo[thread_num].cpu_id = curr_cpu();
201
202 while (count++ < 5) {
203 k_busy_wait(DELAY_US);
204 }
205 }
206
spin_for_threads_exit(void)207 static void spin_for_threads_exit(void)
208 {
209 for (int i = 0; i < THREADS_NUM - 1; i++) {
210 volatile uint8_t *p = &tinfo[i].tid->base.thread_state;
211
212 while (!(*p & _THREAD_DEAD)) {
213 }
214 }
215 k_busy_wait(DELAY_US);
216 }
217
spawn_threads(int prio,int thread_num,int equal_prio,k_thread_entry_t thread_entry,int delay)218 static void spawn_threads(int prio, int thread_num, int equal_prio,
219 k_thread_entry_t thread_entry, int delay)
220 {
221 int i;
222
223 /* Spawn threads of priority higher than
224 * the previously created thread
225 */
226 for (i = 0; i < thread_num; i++) {
227 if (equal_prio) {
228 tinfo[i].priority = prio;
229 } else {
230 /* Increase priority for each thread */
231 tinfo[i].priority = prio - 1;
232 prio = tinfo[i].priority;
233 }
234 tinfo[i].tid = k_thread_create(&tthread[i], tstack[i],
235 STACK_SIZE, thread_entry,
236 INT_TO_POINTER(i), NULL, NULL,
237 tinfo[i].priority, 0,
238 K_MSEC(delay));
239 if (delay) {
240 /* Increase delay for each thread */
241 delay = delay + 10;
242 }
243 }
244 }
245
abort_threads(int num)246 static void abort_threads(int num)
247 {
248 for (int i = 0; i < num; i++) {
249 k_thread_abort(tinfo[i].tid);
250 }
251
252 for (int i = 0; i < num; i++) {
253 k_thread_join(tinfo[i].tid, K_FOREVER);
254 }
255 }
256
cleanup_resources(void)257 static void cleanup_resources(void)
258 {
259 for (int i = 0; i < THREADS_NUM; i++) {
260 tinfo[i].tid = 0;
261 tinfo[i].executed = 0;
262 tinfo[i].priority = 0;
263 }
264 }
265
266 /**
267 * @brief Test cooperative threads non-preemption
268 *
269 * @ingroup kernel_smp_tests
270 *
271 * @details Spawn cooperative threads equal to number of cores
272 * supported. Main thread will already be running on 1 core.
273 * Check if the last thread created preempts any threads
274 * already running.
275 */
test_coop_resched_threads(void)276 void test_coop_resched_threads(void)
277 {
278 /* Spawn threads equal to number of cores,
279 * since we don't give up current CPU, last thread
280 * will not get scheduled
281 */
282 spawn_threads(K_PRIO_COOP(10), THREADS_NUM, !EQUAL_PRIORITY,
283 &thread_entry, THREAD_DELAY);
284
285 /* Wait for some time to let other core's thread run */
286 k_busy_wait(DELAY_US);
287
288
289 /* Reassure that cooperative thread's are not preempted
290 * by checking last thread's execution
291 * status. We know that all threads got rescheduled on
292 * other cores except the last one
293 */
294 for (int i = 0; i < THREADS_NUM - 1; i++) {
295 zassert_true(tinfo[i].executed == 1,
296 "cooperative thread %d didn't run", i);
297 }
298 zassert_true(tinfo[THREADS_NUM - 1].executed == 0,
299 "cooperative thread is preempted");
300
301 /* Abort threads created */
302 abort_threads(THREADS_NUM);
303 cleanup_resources();
304 }
305
306 /**
307 * @brief Test preemptness of preemptive thread
308 *
309 * @ingroup kernel_smp_tests
310 *
311 * @details Create preemptive thread and let it run
312 * on another core and verify if it gets preempted
313 * if another thread of higher priority is spawned
314 */
test_preempt_resched_threads(void)315 void test_preempt_resched_threads(void)
316 {
317 /* Spawn threads equal to number of cores,
318 * lower priority thread should
319 * be preempted by higher ones
320 */
321 spawn_threads(K_PRIO_PREEMPT(10), THREADS_NUM, !EQUAL_PRIORITY,
322 &thread_entry, THREAD_DELAY);
323
324 spin_for_threads_exit();
325
326 for (int i = 0; i < THREADS_NUM; i++) {
327 zassert_true(tinfo[i].executed == 1,
328 "preemptive thread %d didn't run", i);
329 }
330
331 /* Abort threads created */
332 abort_threads(THREADS_NUM);
333 cleanup_resources();
334 }
335
336 /**
337 * @brief Validate behavior of thread when it yields
338 *
339 * @ingroup kernel_smp_tests
340 *
341 * @details Spawn cooperative threads equal to number
342 * of cores, so last thread would be pending, call
343 * yield() from main thread. Now, all threads must be
344 * executed
345 */
test_yield_threads(void)346 void test_yield_threads(void)
347 {
348 /* Spawn threads equal to the number
349 * of cores, so the last thread would be
350 * pending.
351 */
352 spawn_threads(K_PRIO_COOP(10), THREADS_NUM, !EQUAL_PRIORITY,
353 &thread_entry, !THREAD_DELAY);
354
355 k_yield();
356 k_busy_wait(DELAY_US);
357
358 for (int i = 0; i < THREADS_NUM; i++) {
359 zassert_true(tinfo[i].executed == 1,
360 "thread %d did not execute", i);
361
362 }
363
364 abort_threads(THREADS_NUM);
365 cleanup_resources();
366 }
367
368 /**
369 * @brief Test behavior of thread when it sleeps
370 *
371 * @ingroup kernel_smp_tests
372 *
373 * @details Spawn cooperative thread and call
374 * sleep() from main thread. After timeout, all
375 * threads has to be scheduled.
376 */
test_sleep_threads(void)377 void test_sleep_threads(void)
378 {
379 spawn_threads(K_PRIO_COOP(10), THREADS_NUM, !EQUAL_PRIORITY,
380 &thread_entry, !THREAD_DELAY);
381
382 k_msleep(TIMEOUT);
383
384 for (int i = 0; i < THREADS_NUM; i++) {
385 zassert_true(tinfo[i].executed == 1,
386 "thread %d did not execute", i);
387 }
388
389 abort_threads(THREADS_NUM);
390 cleanup_resources();
391 }
392
thread_wakeup_entry(void * p1,void * p2,void * p3)393 static void thread_wakeup_entry(void *p1, void *p2, void *p3)
394 {
395 ARG_UNUSED(p2);
396 ARG_UNUSED(p3);
397 int thread_num = POINTER_TO_INT(p1);
398
399 thread_started[thread_num] = 1;
400
401 k_msleep(DELAY_US * 1000);
402
403 tinfo[thread_num].executed = 1;
404 }
405
wakeup_on_start_thread(int tnum)406 static void wakeup_on_start_thread(int tnum)
407 {
408 int threads_started = 0, i;
409
410 /* For each thread, spin waiting for it to first flag that
411 * it's going to sleep, and then that it's actually blocked
412 */
413 for (i = 0; i < tnum; i++) {
414 while (thread_started[i] == 0) {
415 }
416 while (!z_is_thread_prevented_from_running(tinfo[i].tid)) {
417 }
418 }
419
420 for (i = 0; i < tnum; i++) {
421 if (thread_started[i] == 1 && threads_started <= tnum) {
422 threads_started++;
423 k_wakeup(tinfo[i].tid);
424 }
425 }
426 zassert_equal(threads_started, tnum,
427 "All threads haven't started");
428 }
429
check_wokeup_threads(int tnum)430 static void check_wokeup_threads(int tnum)
431 {
432 int threads_woke_up = 0, i;
433
434 /* k_wakeup() isn't synchronous, give the other CPU time to
435 * schedule them
436 */
437 k_busy_wait(200000);
438
439 for (i = 0; i < tnum; i++) {
440 if (tinfo[i].executed == 1 && threads_woke_up <= tnum) {
441 threads_woke_up++;
442 }
443 }
444 zassert_equal(threads_woke_up, tnum, "Threads did not wakeup");
445 }
446
447 /**
448 * @brief Test behavior of wakeup() in SMP case
449 *
450 * @ingroup kernel_smp_tests
451 *
452 * @details Spawn number of threads equal to number of
453 * remaining cores and let them sleep for a while. Call
454 * wakeup() of those threads from parent thread and check
455 * if they are all running
456 */
test_wakeup_threads(void)457 void test_wakeup_threads(void)
458 {
459 /* Spawn threads to run on all remaining cores */
460 spawn_threads(K_PRIO_COOP(10), THREADS_NUM - 1, !EQUAL_PRIORITY,
461 &thread_wakeup_entry, !THREAD_DELAY);
462
463 /* Check if all the threads have started, then call wakeup */
464 wakeup_on_start_thread(THREADS_NUM - 1);
465
466 /* Count threads which are woken up */
467 check_wokeup_threads(THREADS_NUM - 1);
468
469 /* Abort all threads and cleanup */
470 abort_threads(THREADS_NUM - 1);
471 cleanup_resources();
472 }
473
474 /* a thread for testing get current cpu */
thread_get_cpu_entry(void * p1,void * p2,void * p3)475 static void thread_get_cpu_entry(void *p1, void *p2, void *p3)
476 {
477 int bsp_id = *(int *)p1;
478 int cpu_id = -1;
479
480 /* get current cpu number for running thread */
481 _cpu_t *curr_cpu = arch_curr_cpu();
482
483 /**TESTPOINT: call arch_curr_cpu() to get cpu struct */
484 zassert_true(curr_cpu != NULL,
485 "test failed to get current cpu.");
486
487 cpu_id = curr_cpu->id;
488
489 zassert_true(bsp_id != cpu_id,
490 "should not be the same with our BSP");
491
492 /* loop forever to ensure running on this CPU */
493 while (1) {
494 k_busy_wait(DELAY_US);
495 }
496 }
497
498 /**
499 * @brief Test get a pointer of CPU
500 *
501 * @ingroup kernel_smp_module_tests
502 *
503 * @details
504 * Test Objective:
505 * - To verify architecture layer provides a mechanism to return a pointer to the
506 * current kernel CPU record of the running CPU.
507 * We call arch_curr_cpu() and get it's member, both in main and spwaned thread
508 * speratively, and compare them. They shall be different in SMP enviornment.
509 *
510 * Testing techniques:
511 * - Interface testing, function and block box testing,
512 * dynamic analysis and testing,
513 *
514 * Prerequisite Conditions:
515 * - CONFIG_SMP=y, and the HW platform must support SMP.
516 *
517 * Input Specifications:
518 * - N/A
519 *
520 * Test Procedure:
521 * -# In main thread, call arch_curr_cpu() to get it's member "id",then store it
522 * into a variable thread_id.
523 * -# Spawn a thread t2, and pass the stored thread_id to it, then call
524 * k_busy_wait() 50us to wait for thread run and won't be swapped out.
525 * -# In thread t2, call arch_curr_cpu() to get pointer of current cpu data. Then
526 * check if it not NULL.
527 * -# Store the member id via accessing pointer of current cpu data to var cpu_id.
528 * -# Check if cpu_id is not equaled to bsp_id that we pass into thread.
529 * -# Call k_busy_wait() and loop forever.
530 * -# In main thread, terminate the thread t2 before exit.
531 *
532 * Expected Test Result:
533 * - The pointer of current cpu data that we got from function call is correct.
534 *
535 * Pass/Fail Criteria:
536 * - Successful if the check of step 3,5 are all passed.
537 * - Failure if one of the check of step 3,5 is failed.
538 *
539 * Assumptions and Constraints:
540 * - This test using for the platform that support SMP, in our current scenario
541 * , only x86_64, arc and xtensa supported.
542 *
543 * @see arch_curr_cpu()
544 */
test_get_cpu(void)545 void test_get_cpu(void)
546 {
547 k_tid_t thread_id;
548
549 /* get current cpu number */
550 int cpu_id = arch_curr_cpu()->id;
551
552 thread_id = k_thread_create(&t2, t2_stack, T2_STACK_SIZE,
553 (k_thread_entry_t)thread_get_cpu_entry,
554 &cpu_id, NULL, NULL,
555 K_PRIO_COOP(2),
556 K_INHERIT_PERMS, K_NO_WAIT);
557
558 k_busy_wait(DELAY_US);
559
560 k_thread_abort(thread_id);
561 k_thread_join(thread_id, K_FOREVER);
562 }
563
564 #ifdef CONFIG_TRACE_SCHED_IPI
565 /* global variable for testing send IPI */
566 static volatile int sched_ipi_has_called;
567
z_trace_sched_ipi(void)568 void z_trace_sched_ipi(void)
569 {
570 sched_ipi_has_called++;
571 }
572 #endif
573
574 /**
575 * @brief Test interprocessor interrupt
576 *
577 * @ingroup kernel_smp_integration_tests
578 *
579 * @details
580 * Test Objective:
581 * - To verify architecture layer provides a mechanism to issue an interprocessor
582 * interrupt to all other CPUs in the system that calls the scheduler IPI.
583 * We simply add a hook in z_sched_ipi(), in order to check if it has been
584 * called once in another CPU except the caller, when arch_sched_ipi() is
585 * called.
586 *
587 * Testing techniques:
588 * - Interface testing, function and block box testing,
589 * dynamic analysis and testing
590 *
591 * Prerequisite Conditions:
592 * - CONFIG_SMP=y, and the HW platform must support SMP.
593 * - CONFIG_TRACE_SCHED_IPI=y was set.
594 *
595 * Input Specifications:
596 * - N/A
597 *
598 * Test Procedure:
599 * -# In main thread, given a global variable sched_ipi_has_called equaled zero.
600 * -# Call arch_sched_ipi() then sleep for 100ms.
601 * -# In z_sched_ipi() handler, increment the sched_ipi_has_called.
602 * -# In main thread, check the sched_ipi_has_called is not equaled to zero.
603 * -# Repeat step 1 to 4 for 3 times.
604 *
605 * Expected Test Result:
606 * - The pointer of current cpu data that we got from function call is correct.
607 *
608 * Pass/Fail Criteria:
609 * - Successful if the check of step 4 are all passed.
610 * - Failure if one of the check of step 4 is failed.
611 *
612 * Assumptions and Constraints:
613 * - This test using for the platform that support SMP, in our current scenario
614 * , only x86_64 and arc supported.
615 *
616 * @see arch_sched_ipi()
617 */
test_smp_ipi(void)618 void test_smp_ipi(void)
619 {
620 #ifndef CONFIG_TRACE_SCHED_IPI
621 ztest_test_skip();
622 #endif
623
624 TC_PRINT("cpu num=%d", CONFIG_MP_NUM_CPUS);
625
626 for (int i = 0; i < 3 ; i++) {
627 /* issue a sched ipi to tell other CPU to run thread */
628 sched_ipi_has_called = 0;
629 arch_sched_ipi();
630
631 /* Need to wait longer than we think, loaded CI
632 * systems need to wait for host scheduling to run the
633 * other CPU's thread.
634 */
635 k_msleep(100);
636
637 /**TESTPOINT: check if enter our IPI interrupt handler */
638 zassert_true(sched_ipi_has_called != 0,
639 "did not receive IPI.(%d)",
640 sched_ipi_has_called);
641 }
642 }
643
k_sys_fatal_error_handler(unsigned int reason,const z_arch_esf_t * pEsf)644 void k_sys_fatal_error_handler(unsigned int reason, const z_arch_esf_t *pEsf)
645 {
646 static int trigger;
647
648 if (reason != K_ERR_KERNEL_OOPS) {
649 printk("wrong error reason\n");
650 k_fatal_halt(reason);
651 }
652
653 if (trigger == 0) {
654 child_thread_id = curr_cpu();
655 trigger++;
656 } else {
657 main_thread_id = curr_cpu();
658
659 /* Verify the fatal was happened on different core */
660 zassert_true(main_thread_id != child_thread_id,
661 "fatal on the same core");
662 }
663 }
664
entry_oops(void * p1,void * p2,void * p3)665 void entry_oops(void *p1, void *p2, void *p3)
666 {
667 k_oops();
668 TC_ERROR("SHOULD NEVER SEE THIS\n");
669 }
670
671 /**
672 * @brief Test fatal error can be triggered on different core
673
674 * @details When CONFIG_SMP is enabled, on some multiprocessor
675 * platforms, exception can be triggered on different core at
676 * the same time.
677 *
678 * @ingroup kernel_common_tests
679 */
test_fatal_on_smp(void)680 void test_fatal_on_smp(void)
681 {
682 /* Creat a child thread and trigger a crash */
683 k_thread_create(&t2, t2_stack, T2_STACK_SIZE, entry_oops,
684 NULL, NULL, NULL,
685 K_PRIO_PREEMPT(2), 0, K_NO_WAIT);
686
687 /* hold cpu and wait for thread trigger exception */
688 k_busy_wait(2000);
689
690 /* Manually trigger the crash in mainthread */
691 entry_oops(NULL, NULL, NULL);
692
693 /* should not be here */
694 ztest_test_fail();
695 }
696
workq_handler(struct k_work * work)697 static void workq_handler(struct k_work *work)
698 {
699 child_thread_id = curr_cpu();
700 }
701
702 /**
703 * @brief Test system workq run on different core
704
705 * @details When macro CONFIG_SMP is enabled, workq can be run
706 * on different core.
707 *
708 * @ingroup kernel_common_tests
709 */
test_workq_on_smp(void)710 void test_workq_on_smp(void)
711 {
712 static struct k_work work;
713
714 k_work_init(&work, workq_handler);
715
716 /* submit work item on system workq */
717 k_work_submit(&work);
718
719 /* Wait for some time to let other core's thread run */
720 k_busy_wait(DELAY_US);
721
722 /* check work have finished */
723 zassert_equal(k_work_busy_get(&work), 0, NULL);
724
725 main_thread_id = curr_cpu();
726
727 /* Verify the ztest thread and system workq run on different core */
728 zassert_true(main_thread_id != child_thread_id,
729 "system workq run on the same core");
730 }
731
t1_mutex_lock(void * p1,void * p2,void * p3)732 static void t1_mutex_lock(void *p1, void *p2, void *p3)
733 {
734 /* t1 will get mutex first */
735 k_mutex_lock((struct k_mutex *)p1, K_FOREVER);
736
737 k_msleep(2);
738
739 k_mutex_unlock((struct k_mutex *)p1);
740 }
741
t2_mutex_lock(void * p1,void * p2,void * p3)742 static void t2_mutex_lock(void *p1, void *p2, void *p3)
743 {
744 zassert_equal(_current->base.global_lock_count, 0,
745 "thread global lock cnt %d is incorrect",
746 _current->base.global_lock_count);
747
748 k_mutex_lock((struct k_mutex *)p1, K_FOREVER);
749
750 zassert_equal(_current->base.global_lock_count, 0,
751 "thread global lock cnt %d is incorrect",
752 _current->base.global_lock_count);
753
754 k_mutex_unlock((struct k_mutex *)p1);
755
756 /**TESTPOINT: z_smp_release_global_lock() has been call during
757 * context switch but global_lock_cnt has not been decrease
758 * because no irq_lock() was called.
759 */
760 zassert_equal(_current->base.global_lock_count, 0,
761 "thread global lock cnt %d is incorrect",
762 _current->base.global_lock_count);
763 }
764
765 /**
766 * @brief Test scenairo that a thread release the global lock
767 *
768 * @ingroup kernel_smp_tests
769 *
770 * @details Validate the scenario that make the internal APIs of SMP
771 * z_smp_release_global_lock() to be called.
772 */
test_smp_release_global_lock(void)773 void test_smp_release_global_lock(void)
774 {
775 k_mutex_init(&smutex);
776
777 tinfo[0].tid =
778 k_thread_create(&tthread[0], tstack[0], STACK_SIZE,
779 (k_thread_entry_t)t1_mutex_lock,
780 &smutex, NULL, NULL,
781 K_PRIO_PREEMPT(5),
782 K_INHERIT_PERMS, K_NO_WAIT);
783
784 tinfo[1].tid =
785 k_thread_create(&tthread[1], tstack[1], STACK_SIZE,
786 (k_thread_entry_t)t2_mutex_lock,
787 &smutex, NULL, NULL,
788 K_PRIO_PREEMPT(3),
789 K_INHERIT_PERMS, K_MSEC(1));
790
791 /* Hold one of the cpu to ensure context switch as we wanted
792 * can happen in another cpu.
793 */
794 k_busy_wait(20000);
795
796 k_thread_join(tinfo[1].tid, K_FOREVER);
797 k_thread_join(tinfo[0].tid, K_FOREVER);
798 cleanup_resources();
799 }
800
801 #define LOOP_COUNT 20000
802
803 enum sync_t {
804 LOCK_IRQ,
805 LOCK_SEM,
806 LOCK_MUTEX
807 };
808
809 static int global_cnt;
810 static struct k_mutex smp_mutex;
811
812 static void (*sync_lock)(void *);
813 static void (*sync_unlock)(void *);
814
sync_lock_dummy(void * k)815 static void sync_lock_dummy(void *k)
816 {
817 /* no sync lock used */
818 }
819
sync_lock_irq(void * k)820 static void sync_lock_irq(void *k)
821 {
822 *((unsigned int *)k) = irq_lock();
823 }
824
sync_unlock_irq(void * k)825 static void sync_unlock_irq(void *k)
826 {
827 irq_unlock(*(unsigned int *)k);
828 }
829
sync_lock_sem(void * k)830 static void sync_lock_sem(void *k)
831 {
832 k_sem_take(&smp_sem, K_FOREVER);
833 }
834
sync_unlock_sem(void * k)835 static void sync_unlock_sem(void *k)
836 {
837 k_sem_give(&smp_sem);
838 }
839
sync_lock_mutex(void * k)840 static void sync_lock_mutex(void *k)
841 {
842 k_mutex_lock(&smp_mutex, K_FOREVER);
843 }
844
sync_unlock_mutex(void * k)845 static void sync_unlock_mutex(void *k)
846 {
847 k_mutex_unlock(&smp_mutex);
848 }
849
sync_init(int lock_type)850 static void sync_init(int lock_type)
851 {
852 switch (lock_type) {
853 case LOCK_IRQ:
854 sync_lock = sync_lock_irq;
855 sync_unlock = sync_unlock_irq;
856 break;
857 case LOCK_SEM:
858 sync_lock = sync_lock_sem;
859 sync_unlock = sync_unlock_sem;
860 k_sem_init(&smp_sem, 1, 3);
861 break;
862 case LOCK_MUTEX:
863 sync_lock = sync_lock_mutex;
864 sync_unlock = sync_unlock_mutex;
865 k_mutex_init(&smp_mutex);
866 break;
867
868 default:
869 sync_lock = sync_unlock = sync_lock_dummy;
870 }
871 }
872
inc_global_cnt(void * a,void * b,void * c)873 static void inc_global_cnt(void *a, void *b, void *c)
874 {
875 int key;
876
877 for (int i = 0; i < LOOP_COUNT; i++) {
878
879 sync_lock(&key);
880
881 global_cnt++;
882 global_cnt--;
883 global_cnt++;
884
885 sync_unlock(&key);
886 }
887 }
888
run_concurrency(int type,void * func)889 static int run_concurrency(int type, void *func)
890 {
891 uint32_t start_t, end_t;
892
893 sync_init(type);
894 global_cnt = 0;
895 start_t = k_cycle_get_32();
896
897 tinfo[0].tid =
898 k_thread_create(&tthread[0], tstack[0], STACK_SIZE,
899 (k_thread_entry_t)func,
900 NULL, NULL, NULL,
901 K_PRIO_PREEMPT(1),
902 K_INHERIT_PERMS, K_NO_WAIT);
903
904 tinfo[1].tid =
905 k_thread_create(&tthread[1], tstack[1], STACK_SIZE,
906 (k_thread_entry_t)func,
907 NULL, NULL, NULL,
908 K_PRIO_PREEMPT(1),
909 K_INHERIT_PERMS, K_NO_WAIT);
910
911 k_tid_t tid =
912 k_thread_create(&t2, t2_stack, T2_STACK_SIZE,
913 (k_thread_entry_t)func,
914 NULL, NULL, NULL,
915 K_PRIO_PREEMPT(1),
916 K_INHERIT_PERMS, K_NO_WAIT);
917
918 k_thread_join(tinfo[0].tid, K_FOREVER);
919 k_thread_join(tinfo[1].tid, K_FOREVER);
920 k_thread_join(tid, K_FOREVER);
921 cleanup_resources();
922
923 end_t = k_cycle_get_32();
924
925 printk("type %d: cnt %d, spend %u ms\n", type, global_cnt,
926 k_cyc_to_ms_ceil32(end_t - start_t));
927
928 return global_cnt == (LOOP_COUNT * 3);
929 }
930
931 /**
932 * @brief Test if the concurrency of SMP works or not
933 *
934 * @ingroup kernel_smp_tests
935 *
936 * @details Validate the global lock and unlock API of SMP are thread-safe.
937 * We make 3 thread to increase the global count in differenet cpu and
938 * they both do locking then unlocking for LOOP_COUNT times. It shall be no
939 * deadlock happened and total global count shall be 3 * LOOP COUNT.
940 *
941 * We show the 4 kinds of scenairo:
942 * - No any lock used
943 * - Use global irq lock
944 * - Use semaphore
945 * - Use mutex
946 */
test_inc_concurrency(void)947 void test_inc_concurrency(void)
948 {
949 /* increasing global var with irq lock */
950 zassert_true(run_concurrency(LOCK_IRQ, inc_global_cnt),
951 "total count %d is wrong(i)", global_cnt);
952
953 /* increasing global var with irq lock */
954 zassert_true(run_concurrency(LOCK_SEM, inc_global_cnt),
955 "total count %d is wrong(s)", global_cnt);
956
957 /* increasing global var with irq lock */
958 zassert_true(run_concurrency(LOCK_MUTEX, inc_global_cnt),
959 "total count %d is wrong(M)", global_cnt);
960 }
961
962 /**
963 * @brief Torture test for context switching code
964 *
965 * @ingroup kernel_smp_tests
966 *
967 * @details Leverage the polling API to stress test the context switching code.
968 * This test will hammer all the CPUs with thread swapping requests.
969 */
process_events(void * arg0,void * arg1,void * arg2)970 static void process_events(void *arg0, void *arg1, void *arg2)
971 {
972 uintptr_t id = (uintptr_t) arg0;
973
974 while (1) {
975 k_poll(&tevent[id], 1, K_FOREVER);
976
977 if (tevent[id].signal->result != 0x55) {
978 ztest_test_fail();
979 }
980
981 tevent[id].signal->signaled = 0;
982 tevent[id].state = K_POLL_STATE_NOT_READY;
983
984 k_poll_signal_reset(&tsignal[id]);
985 }
986 }
987
signal_raise(void * arg0,void * arg1,void * arg2)988 static void signal_raise(void *arg0, void *arg1, void *arg2)
989 {
990 while (1) {
991 for (uintptr_t i = 0; i < THREADS_NUM; i++) {
992 k_poll_signal_raise(&tsignal[i], 0x55);
993 }
994 }
995 }
996
test_smp_switch_torture(void)997 void test_smp_switch_torture(void)
998 {
999 for (uintptr_t i = 0; i < THREADS_NUM; i++) {
1000 k_poll_signal_init(&tsignal[i]);
1001 k_poll_event_init(&tevent[i], K_POLL_TYPE_SIGNAL,
1002 K_POLL_MODE_NOTIFY_ONLY, &tsignal[i]);
1003
1004 k_thread_create(&tthread[i], tstack[i], STACK_SIZE,
1005 (k_thread_entry_t) process_events,
1006 (void *) i, NULL, NULL, K_PRIO_PREEMPT(i + 1),
1007 K_INHERIT_PERMS, K_NO_WAIT);
1008 }
1009
1010 k_thread_create(&t2, t2_stack, T2_STACK_SIZE, signal_raise,
1011 NULL, NULL, NULL, K_PRIO_COOP(2), 0, K_NO_WAIT);
1012
1013 k_sleep(K_MSEC(SLEEP_MS_LONG));
1014
1015 k_thread_abort(&t2);
1016 k_thread_join(&t2, K_FOREVER);
1017 for (uintptr_t i = 0; i < THREADS_NUM; i++) {
1018 k_thread_abort(&tthread[i]);
1019 k_thread_join(&tthread[i], K_FOREVER);
1020 }
1021 }
1022
test_main(void)1023 void test_main(void)
1024 {
1025 /* Sleep a bit to guarantee that both CPUs enter an idle
1026 * thread from which they can exit correctly to run the main
1027 * test.
1028 */
1029 k_sleep(K_MSEC(10));
1030
1031 ztest_test_suite(smp,
1032 ztest_unit_test(test_smp_coop_threads),
1033 ztest_unit_test(test_cpu_id_threads),
1034 ztest_unit_test(test_coop_resched_threads),
1035 ztest_unit_test(test_preempt_resched_threads),
1036 ztest_unit_test(test_yield_threads),
1037 ztest_unit_test(test_sleep_threads),
1038 ztest_unit_test(test_wakeup_threads),
1039 ztest_unit_test(test_smp_ipi),
1040 ztest_unit_test(test_get_cpu),
1041 ztest_unit_test(test_fatal_on_smp),
1042 ztest_unit_test(test_workq_on_smp),
1043 ztest_unit_test(test_smp_release_global_lock),
1044 ztest_unit_test(test_inc_concurrency),
1045 ztest_unit_test(test_smp_switch_torture)
1046 );
1047 ztest_run_test_suite(smp);
1048 }
1049