1 /*
2  * Copyright (c) 2018 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 #include <zephyr/kernel.h>
7 #include <zephyr/ztest.h>
8 #include <zephyr/irq_offload.h>
9 #include <zephyr/kernel_structs.h> /* for _THREAD_PENDING */
10 
11 /* Explicit preemption test.  Works by creating a set of threads in
12  * each priority class (cooperative, preemptive, metairq) which all go
13  * to sleep.  Then one is woken up (from a low priority manager
14  * thread) and arranges to wake up one other thread and validate that
15  * the next thread to be run is correct according to the documented
16  * rules.
17  *
18  * The wakeup test is repeated for all four combinations of threads
19  * either holding or not holding the scheduler lock, and by a
20  * synchronous wake vs. a wake in a (offloaded) interrupt.
21  */
22 
23 #if defined(CONFIG_SMP) && CONFIG_MP_MAX_NUM_CPUS > 1
24 #error Preemption test requires single-CPU operation
25 #endif
26 
27 #if CONFIG_NUM_METAIRQ_PRIORITIES < 1
28 #error Need one metairq priority
29 #endif
30 
31 #if CONFIG_NUM_COOP_PRIORITIES < 2
32 #error Need two cooperative priorities
33 #endif
34 
35 #if CONFIG_NUM_PREEMPT_PRIORITIES < 2
36 #error Need two preemptible priorities
37 #endif
38 
39 /* Two threads at each priority (to test the case of waking up a
40  * thread of equal priority).  But only one metairq, as it isn't
41  * technically legal to have more than one at the same priority.
42  */
43 const enum { METAIRQ, COOP, PREEMPTIBLE } worker_priorities[] = {
44 	METAIRQ,
45 	COOP, COOP,
46 	PREEMPTIBLE, PREEMPTIBLE,
47 };
48 
49 #define NUM_THREADS ARRAY_SIZE(worker_priorities)
50 
51 #define STACK_SIZE (640 + CONFIG_TEST_EXTRA_STACK_SIZE)
52 
53 k_tid_t last_wakeup_thread;
54 
55 struct k_thread manager_thread;
56 
57 K_THREAD_STACK_DEFINE(manager_stack, STACK_SIZE);
58 
59 struct k_thread worker_threads[NUM_THREADS];
60 
61 K_THREAD_STACK_ARRAY_DEFINE(worker_stacks, NUM_THREADS, STACK_SIZE);
62 
63 struct k_thread manager_thread;
64 
65 struct k_sem worker_sems[NUM_THREADS];
66 
67 /* Command to worker: who to wake up */
68 int wakeup_target;
69 
70 /* Command to worker: use a sched_lock()? */
71 volatile int do_lock;
72 
73 /* Command to worker: use irq_offload() to indirect the wakeup? */
74 volatile int do_irq;
75 
76 /* Command to worker: sleep after wakeup? */
77 volatile int do_sleep;
78 
79 /* Command to worker: yield after wakeup? */
80 volatile int do_yield;
81 
82 K_SEM_DEFINE(main_sem, 0, 1);
83 
wakeup_src_thread(int id)84 void wakeup_src_thread(int id)
85 {
86 	volatile k_tid_t src_thread = &worker_threads[id];
87 
88 	zassert_true(k_current_get() == &manager_thread, "");
89 
90 	/* irq_offload() on ARM appears not to do what we want.  It
91 	 * doesn't appear to go through the normal exception return
92 	 * path and always returns back into the calling context, so
93 	 * it can't be used to fake preemption.
94 	 */
95 	if (do_irq && IS_ENABLED(CONFIG_ARM)) {
96 		return;
97 	}
98 
99 	last_wakeup_thread = NULL;
100 
101 	/* A little bit of white-box inspection: check that all the
102 	 * worker threads are pending.
103 	 */
104 	for (int i = 0; i < NUM_THREADS; i++) {
105 		k_tid_t th = &worker_threads[i];
106 		char buffer[16];
107 		const char *str;
108 
109 		str = k_thread_state_str(th, buffer, sizeof(buffer));
110 		zassert_not_null(strstr(str, "pending"),
111 				 "worker thread %d not pending?", i);
112 	}
113 
114 	/* Wake the src worker up */
115 	last_wakeup_thread = NULL;
116 	k_sem_give(&worker_sems[id]);
117 
118 	while (do_sleep && !(src_thread->base.thread_state & _THREAD_PENDING)) {
119 		/* spin, waiting on the sleep timeout */
120 		Z_SPIN_DELAY(50);
121 	}
122 
123 	/* We are lowest priority, SOMEONE must have run */
124 	zassert_true(!!last_wakeup_thread, "");
125 }
126 
manager(void * p1,void * p2,void * p3)127 void manager(void *p1, void *p2, void *p3)
128 {
129 	for (int src = 0; src < NUM_THREADS; src++) {
130 		for (wakeup_target = 0; wakeup_target < NUM_THREADS; wakeup_target++) {
131 
132 			if (src == wakeup_target) {
133 				continue;
134 			}
135 
136 			for (do_lock = 0; do_lock < 2; do_lock++) {
137 				for (do_irq = 0; do_irq < 2; do_irq++) {
138 					do_yield = 0;
139 					do_sleep = 0;
140 					wakeup_src_thread(src);
141 
142 					do_yield = 1;
143 					do_sleep = 0;
144 					wakeup_src_thread(src);
145 
146 					do_yield = 0;
147 					do_sleep = 1;
148 					wakeup_src_thread(src);
149 				}
150 			}
151 		}
152 	}
153 
154 	k_sem_give(&main_sem);
155 }
156 
irq_waker(const void * p)157 void irq_waker(const void *p)
158 {
159 	ARG_UNUSED(p);
160 	k_sem_give(&worker_sems[wakeup_target]);
161 }
162 
163 #define PRI(n) (worker_priorities[n])
164 
validate_wakeup(int src,int target,k_tid_t last_thread)165 void validate_wakeup(int src, int target, k_tid_t last_thread)
166 {
167 	int preempted = &worker_threads[target] == last_thread;
168 	int src_wins = PRI(src) < PRI(target);
169 	int target_wins = PRI(target) < PRI(src);
170 	int tie = PRI(src) == PRI(target);
171 
172 	if (do_sleep) {
173 		zassert_true(preempted, "sleeping must let any worker run");
174 		return;
175 	}
176 
177 	if (do_yield) {
178 		if (preempted) {
179 			zassert_false(src_wins,
180 				      "src (pri %d) should not have yielded to tgt (%d)",
181 				      PRI(src), PRI(target));
182 		} else {
183 			zassert_true(src_wins,
184 				      "src (pri %d) should have yielded to tgt (%d)",
185 				      PRI(src), PRI(target));
186 		}
187 
188 		return;
189 	}
190 
191 	if (preempted) {
192 		zassert_true(target_wins, "preemption must raise priority");
193 	}
194 
195 	if (PRI(target) == METAIRQ) {
196 		zassert_true(preempted,
197 			     "metairq threads must always preempt");
198 	} else {
199 		zassert_false(do_lock && preempted,
200 			      "threads holding scheduler lock must not be preempted");
201 
202 		zassert_false(preempted && src_wins,
203 			      "lower priority threads must never preempt");
204 
205 		if (!do_lock) {
206 			zassert_false(!preempted && target_wins,
207 				      "higher priority thread should have preempted");
208 
209 			/* The scheduler implements a 'first added to
210 			 * queue' policy for threads within a single
211 			 * priority, so the last thread woken up (the
212 			 * target) must never run before the source
213 			 * thread.
214 			 *
215 			 * NOTE: I checked, and Zephyr doesn't
216 			 * actually document this behavior, though a
217 			 * few other tests rely on it IIRC.  IMHO
218 			 * there are good arguments for either this
219 			 * policy OR the opposite ("run newly woken
220 			 * threads first"), and long term we may want
221 			 * to revisit this particular check and maybe
222 			 * make the policy configurable.
223 			 */
224 			zassert_false(preempted && tie,
225 				      "tied priority should not preempt");
226 		}
227 	}
228 }
229 
worker(void * p1,void * p2,void * p3)230 void worker(void *p1, void *p2, void *p3)
231 {
232 	int id = POINTER_TO_INT(p1);
233 	k_tid_t curr = &worker_threads[id], prev;
234 
235 	ARG_UNUSED(p2);
236 	ARG_UNUSED(p3);
237 
238 	zassert_true(id >= 0 && id < NUM_THREADS, "");
239 	zassert_true(curr == k_current_get(), "");
240 
241 	while (1) {
242 		/* Wait for the manager or another test thread to wake
243 		 * us up
244 		 */
245 		k_sem_take(&worker_sems[id], K_FOREVER);
246 
247 		last_wakeup_thread = curr;
248 
249 		/* If we're the wakeup target, setting last_wakeup_thread is
250 		 * all we do
251 		 */
252 		if (id == wakeup_target) {
253 			continue;
254 		}
255 
256 		if (do_lock) {
257 			k_sched_lock();
258 		}
259 
260 		if (do_irq) {
261 			/* Do the sem_give() in a IRQ to validate that
262 			 * ISR return does the right thing
263 			 */
264 			irq_offload(irq_waker, NULL);
265 			prev = last_wakeup_thread;
266 		} else {
267 			/* Do the sem_give() directly to validate that
268 			 * the synchronous scheduling does the right
269 			 * thing
270 			 */
271 			k_sem_give(&worker_sems[wakeup_target]);
272 			prev = last_wakeup_thread;
273 		}
274 
275 		if (do_lock) {
276 			k_sched_unlock();
277 		}
278 
279 		if (do_yield) {
280 			k_yield();
281 			prev = last_wakeup_thread;
282 		}
283 
284 		if (do_sleep) {
285 			uint64_t start = k_uptime_get();
286 
287 			k_sleep(K_MSEC(1));
288 
289 			zassert_true(k_uptime_get() - start > 0,
290 				     "didn't sleep");
291 			prev = last_wakeup_thread;
292 		}
293 
294 		validate_wakeup(id, wakeup_target, prev);
295 	}
296 }
297 
298 /**
299  * @brief Test preemption
300  *
301  * @ingroup kernel_sched_tests
302  */
ZTEST(suite_preempt,test_preempt)303 ZTEST(suite_preempt, test_preempt)
304 {
305 	int priority;
306 
307 	for (int i = 0; i < NUM_THREADS; i++) {
308 		k_sem_init(&worker_sems[i], 0, 1);
309 
310 		if (worker_priorities[i] == METAIRQ) {
311 			priority = K_HIGHEST_THREAD_PRIO;
312 		} else if (worker_priorities[i] == COOP) {
313 			priority = K_HIGHEST_THREAD_PRIO
314 				+ CONFIG_NUM_METAIRQ_PRIORITIES;
315 
316 			zassert_true(priority < K_PRIO_PREEMPT(0), "");
317 		} else {
318 			priority = K_LOWEST_APPLICATION_THREAD_PRIO - 1;
319 
320 			zassert_true(priority >= K_PRIO_PREEMPT(0), "");
321 		}
322 
323 		k_thread_create(&worker_threads[i],
324 				worker_stacks[i], STACK_SIZE,
325 				worker, INT_TO_POINTER(i), NULL, NULL,
326 				priority, 0, K_NO_WAIT);
327 	}
328 
329 	k_thread_create(&manager_thread, manager_stack, STACK_SIZE,
330 			manager, NULL, NULL, NULL,
331 			K_LOWEST_APPLICATION_THREAD_PRIO, 0, K_NO_WAIT);
332 
333 	/* We don't control the priority of this thread so can't make
334 	 * it part of the test.  Just get out of the way until the
335 	 * test is done
336 	 */
337 	k_sem_take(&main_sem, K_FOREVER);
338 
339 	/* unit test clean up */
340 
341 	/* k_thread_abort() also works here.
342 	 * But join should be more graceful.
343 	 */
344 	k_thread_join(&manager_thread, K_FOREVER);
345 
346 	/* worker threads have to be aborted.
347 	 * It is difficult to make them stop gracefully.
348 	 */
349 	for (int i = 0; i < NUM_THREADS; i++) {
350 		k_thread_abort(&worker_threads[i]);
351 	}
352 
353 }
354 
355 ZTEST_SUITE(suite_preempt, NULL, NULL, NULL, NULL, NULL);
356