1 /*
2 * Copyright (c) 2024 Intel Corporation.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/tc_util.h>
8 #include <zephyr/ztest.h>
9 #include <zephyr/kernel.h>
10 #include <zephyr/irq_offload.h>
11
12 #if CONFIG_MP_MAX_NUM_CPUS < 2
13 #error "SMP test requires at least two CPUs!"
14 #endif
15
16 #define NUM_THREADS CONFIG_MP_MAX_NUM_CPUS
17 #define STACK_SIZE 1024 + CONFIG_TEST_EXTRA_STACK_SIZE
18
19 K_THREAD_STACK_ARRAY_DEFINE(thread_stack, NUM_THREADS, STACK_SIZE);
20 struct k_thread thread[NUM_THREADS];
21
22 struct isr_args {
23 volatile bool *sync;
24 volatile bool *wait;
25 struct k_thread *target;
26 };
27
28 volatile bool sync[NUM_THREADS];
29
30 struct isr_args isr_args[NUM_THREADS];
31
isr(const void * args)32 static void isr(const void *args)
33 {
34 const struct isr_args *var = args;
35
36 *(var->sync) = true; /* Flag that ISR is in progress */
37
38 while (*(var->wait) == false) { /* Wait upon dependent CPU */
39 }
40
41 k_thread_abort(var->target); /* Abort thread on another CPU */
42
43 /*
44 * Give other CPUs time to also call k_thread_abort() to establish
45 * the circular dependency that this test is designed to exercise.
46 * On platforms with large emulator/simulator quantum sizes, without
47 * this delay one thread may complete its ISR and return before
48 * another thread even calls k_thread_abort() on it.
49 */
50 k_busy_wait(100);
51 }
52
thread_entry(void * p1,void * p2,void * p3)53 static void thread_entry(void *p1, void *p2, void *p3)
54 {
55 unsigned int index = (unsigned int)(uintptr_t)p1;
56 struct isr_args *var = p2;
57
58 printk("Thread %u started\n", index);
59
60 irq_offload(isr, var);
61
62 zassert_true(false, "Thread %u did not abort!", index);
63 }
64
ZTEST(smp_abort,test_smp_thread_abort_deadlock)65 ZTEST(smp_abort, test_smp_thread_abort_deadlock)
66 {
67 unsigned int i;
68 int priority;
69
70 priority = k_thread_priority_get(k_current_get());
71
72 /*
73 * Each thread will run on its own CPU and invoke an ISR.
74 * Each ISR will wait until the next thread enters its ISR
75 * before attempting to abort that thread. This ensures that
76 * we have a scenario where each CPU is attempting to abort
77 * the active thread that was interrupted by an ISR.
78 */
79
80 for (i = 0; i < NUM_THREADS; i++) {
81 isr_args[i].sync = &sync[i];
82 isr_args[i].wait = &sync[(i + 1) % NUM_THREADS];
83 isr_args[i].target = &thread[(i + 1) % NUM_THREADS];
84 }
85
86 for (i = 0; i < NUM_THREADS; i++) {
87
88 k_thread_create(&thread[i], thread_stack[i],
89 STACK_SIZE, thread_entry,
90 (void *)(uintptr_t)i, &isr_args[i], NULL,
91 priority - 1, 0, K_NO_WAIT);
92 }
93
94 for (i = 0; i < NUM_THREADS; i++) {
95 k_thread_join(&thread[i], K_FOREVER);
96 }
97
98 printk("Done!\n");
99 }
100
101 ZTEST_SUITE(smp_abort, NULL, NULL, NULL, NULL, NULL);
102