1 /*
2 * Copyright (c) 2018 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6 #include <zephyr.h>
7 #include <ztest.h>
8 #include <irq_offload.h>
9 #include <kernel_structs.h> /* for _THREAD_PENDING */
10
11 /* Explicit preemption test. Works by creating a set of threads in
12 * each priority class (cooperative, preemptive, metairq) which all go
13 * to sleep. Then one is woken up (from a low priority manager
14 * thread) and arranges to wake up one other thread and validate that
15 * the next thread to be run is correct according to the documented
16 * rules.
17 *
18 * The wakeup test is repeated for all four combinations of threads
19 * either holding or not holding the scheduler lock, and by a
20 * synchronous wake vs. a wake in a (offloaded) interrupt.
21 */
22
23 #if defined(CONFIG_SMP) && CONFIG_MP_NUM_CPUS > 1
24 #error Preemption test requires single-CPU operation
25 #endif
26
27 #if CONFIG_NUM_METAIRQ_PRIORITIES < 1
28 #error Need one metairq priority
29 #endif
30
31 #if CONFIG_NUM_COOP_PRIORITIES < 2
32 #error Need two cooperative priorities
33 #endif
34
35 #if CONFIG_NUM_PREEMPT_PRIORITIES < 2
36 #error Need two preemptible priorities
37 #endif
38
39 /* Two threads at each priority (to test the case of waking up a
40 * thread of equal priority). But only one metairq, as it isn't
41 * technically legal to have more than one at the same priority.
42 */
43 const enum { METAIRQ, COOP, PREEMPTIBLE } worker_priorities[] = {
44 METAIRQ,
45 COOP, COOP,
46 PREEMPTIBLE, PREEMPTIBLE,
47 };
48
49 #define NUM_THREADS ARRAY_SIZE(worker_priorities)
50
51 #define STACK_SIZE (640 + CONFIG_TEST_EXTRA_STACKSIZE)
52
53 k_tid_t last_thread;
54
55 struct k_thread manager_thread;
56
57 K_THREAD_STACK_DEFINE(manager_stack, STACK_SIZE);
58
59 struct k_thread worker_threads[NUM_THREADS];
60
61 K_THREAD_STACK_ARRAY_DEFINE(worker_stacks, NUM_THREADS, STACK_SIZE);
62
63 struct k_thread manager_thread;
64
65 struct k_sem worker_sems[NUM_THREADS];
66
67 /* Command to worker: who to wake up */
68 int target;
69
70 /* Command to worker: use a sched_lock()? */
71 volatile int do_lock;
72
73 /* Command to worker: use irq_offload() to indirect the wakeup? */
74 volatile int do_irq;
75
76 /* Command to worker: sleep after wakeup? */
77 volatile int do_sleep;
78
79 /* Command to worker: yield after wakeup? */
80 volatile int do_yield;
81
82 K_SEM_DEFINE(main_sem, 0, 1);
83
wakeup_src_thread(int id)84 void wakeup_src_thread(int id)
85 {
86 volatile k_tid_t src_thread = &worker_threads[id];
87
88 zassert_true(k_current_get() == &manager_thread, "");
89
90 /* irq_offload() on ARM appears not to do what we want. It
91 * doesn't appear to go through the normal exception return
92 * path and always returns back into the calling context, so
93 * it can't be used to fake preemption.
94 */
95 if (do_irq && IS_ENABLED(CONFIG_ARM)) {
96 return;
97 }
98
99 last_thread = NULL;
100
101 /* A little bit of white-box inspection: check that all the
102 * worker threads are pending.
103 */
104 for (int i = 0; i < NUM_THREADS; i++) {
105 k_tid_t th = &worker_threads[i];
106
107 zassert_equal(strcmp(k_thread_state_str(th), "pending"),
108 0, "worker thread %d not pending?", i);
109 }
110
111 /* Wake the src worker up */
112 last_thread = NULL;
113 k_sem_give(&worker_sems[id]);
114
115 while (do_sleep && !(src_thread->base.thread_state & _THREAD_PENDING)) {
116 /* spin, waiting on the sleep timeout */
117 #if defined(CONFIG_ARCH_POSIX)
118 /**
119 * In the posix arch busy wait loops waiting for something to
120 * happen need to halt the CPU due to the infinitely fast clock
121 * assumption. (Or in plain English: otherwise you hang in this
122 * loop. Because the posix arch emulates having 1 CPU by only
123 * enabling 1 thread at a time. And because it assumes code
124 * executes in 0 time: it always waits for the code to finish
125 * and it letting the cpu sleep before letting time pass)
126 */
127 k_busy_wait(50);
128 #endif
129 }
130
131 /* We are lowest priority, SOMEONE must have run */
132 zassert_true(!!last_thread, "");
133 }
134
manager(void * p1,void * p2,void * p3)135 void manager(void *p1, void *p2, void *p3)
136 {
137 for (int src = 0; src < NUM_THREADS; src++) {
138 for (target = 0; target < NUM_THREADS; target++) {
139
140 if (src == target) {
141 continue;
142 }
143
144 for (do_lock = 0; do_lock < 2; do_lock++) {
145 for (do_irq = 0; do_irq < 2; do_irq++) {
146 do_yield = 0;
147 do_sleep = 0;
148 wakeup_src_thread(src);
149
150 do_yield = 1;
151 do_sleep = 0;
152 wakeup_src_thread(src);
153
154 do_yield = 0;
155 do_sleep = 1;
156 wakeup_src_thread(src);
157 }
158 }
159 }
160 }
161
162 k_sem_give(&main_sem);
163 }
164
irq_waker(const void * p)165 void irq_waker(const void *p)
166 {
167 ARG_UNUSED(p);
168 k_sem_give(&worker_sems[target]);
169 }
170
171 #define PRI(n) (worker_priorities[n])
172
validate_wakeup(int src,int target,k_tid_t last_thread)173 void validate_wakeup(int src, int target, k_tid_t last_thread)
174 {
175 int preempted = &worker_threads[target] == last_thread;
176 int src_wins = PRI(src) < PRI(target);
177 int target_wins = PRI(target) < PRI(src);
178 int tie = PRI(src) == PRI(target);
179
180 if (do_sleep) {
181 zassert_true(preempted, "sleeping must let any worker run");
182 return;
183 }
184
185 if (do_yield) {
186 if (preempted) {
187 zassert_false(src_wins,
188 "src (pri %d) should not have yielded to tgt (%d)",
189 PRI(src), PRI(target));
190 } else {
191 zassert_true(src_wins,
192 "src (pri %d) should have yielded to tgt (%d)",
193 PRI(src), PRI(target));
194 }
195
196 return;
197 }
198
199 if (preempted) {
200 zassert_true(target_wins, "preemption must raise priority");
201 }
202
203 if (PRI(target) == METAIRQ) {
204 zassert_true(preempted,
205 "metairq threads must always preempt");
206 } else {
207 zassert_false(do_lock && preempted,
208 "threads holding scheduler lock must not be preempted");
209
210 zassert_false(preempted && src_wins,
211 "lower priority threads must never preempt");
212
213 if (!do_lock) {
214 zassert_false(!preempted && target_wins,
215 "higher priority thread should have preempted");
216
217 /* The scheudler implements a 'first added to
218 * queue' policy for threads within a single
219 * priority, so the last thread woken up (the
220 * target) must never run before the source
221 * thread.
222 *
223 * NOTE: I checked, and Zephyr doesn't
224 * actually document this behavior, though a
225 * few other tests rely on it IIRC. IMHO
226 * there are good arguments for either this
227 * policy OR the opposite ("run newly woken
228 * threads first"), and long term we may want
229 * to revisit this particular check and maybe
230 * make the poilicy configurable.
231 */
232 zassert_false(preempted && tie,
233 "tied priority should not preempt");
234 }
235 }
236 }
237
worker(void * p1,void * p2,void * p3)238 void worker(void *p1, void *p2, void *p3)
239 {
240 int id = POINTER_TO_INT(p1);
241 k_tid_t curr = &worker_threads[id], prev;
242
243 ARG_UNUSED(p2);
244 ARG_UNUSED(p3);
245
246 zassert_true(id >= 0 && id < NUM_THREADS, "");
247 zassert_true(curr == k_current_get(), "");
248
249 while (1) {
250 /* Wait for the manager or another test thread to wake
251 * us up
252 */
253 k_sem_take(&worker_sems[id], K_FOREVER);
254
255 last_thread = curr;
256
257 /* If we're the wakeup target, setting last_thread is
258 * all we do
259 */
260 if (id == target) {
261 continue;
262 }
263
264 if (do_lock) {
265 k_sched_lock();
266 }
267
268 if (do_irq) {
269 /* Do the sem_give() in a IRQ to validate that
270 * ISR return does the right thing
271 */
272 irq_offload(irq_waker, NULL);
273 prev = last_thread;
274 } else {
275 /* Do the sem_give() directly to validate that
276 * the synchronous scheduling does the right
277 * thing
278 */
279 k_sem_give(&worker_sems[target]);
280 prev = last_thread;
281 }
282
283 if (do_lock) {
284 k_sched_unlock();
285 }
286
287 if (do_yield) {
288 k_yield();
289 prev = last_thread;
290 }
291
292 if (do_sleep) {
293 uint64_t start = k_uptime_get();
294
295 k_sleep(K_MSEC(1));
296
297 zassert_true(k_uptime_get() - start > 0,
298 "didn't sleep");
299 prev = last_thread;
300 }
301
302 validate_wakeup(id, target, prev);
303 }
304 }
305
306 /**
307 * @brief Test preemption
308 *
309 * @ingroup kernel_sched_tests
310 */
test_preempt(void)311 void test_preempt(void)
312 {
313 int priority;
314
315 for (int i = 0; i < NUM_THREADS; i++) {
316 k_sem_init(&worker_sems[i], 0, 1);
317
318 if (worker_priorities[i] == METAIRQ) {
319 priority = K_HIGHEST_THREAD_PRIO;
320 } else if (worker_priorities[i] == COOP) {
321 priority = K_HIGHEST_THREAD_PRIO
322 + CONFIG_NUM_METAIRQ_PRIORITIES;
323
324 zassert_true(priority < K_PRIO_PREEMPT(0), "");
325 } else {
326 priority = K_LOWEST_APPLICATION_THREAD_PRIO - 1;
327
328 zassert_true(priority >= K_PRIO_PREEMPT(0), "");
329 }
330
331 k_thread_create(&worker_threads[i],
332 worker_stacks[i], STACK_SIZE,
333 worker, INT_TO_POINTER(i), NULL, NULL,
334 priority, 0, K_NO_WAIT);
335 }
336
337 k_thread_create(&manager_thread, manager_stack, STACK_SIZE,
338 manager, NULL, NULL, NULL,
339 K_LOWEST_APPLICATION_THREAD_PRIO, 0, K_NO_WAIT);
340
341 /* We don't control the priority of this thread so can't make
342 * it part of the test. Just get out of the way until the
343 * test is done
344 */
345 k_sem_take(&main_sem, K_FOREVER);
346 }
347
test_main(void)348 void test_main(void)
349 {
350 ztest_test_suite(suite_preempt,
351 ztest_unit_test(test_preempt));
352 ztest_run_test_suite(suite_preempt);
353 }
354