1 /*
2  * Copyright (c) 2018 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 #include <zephyr/kernel.h>
7 #include <zephyr/ztest.h>
8 #include <zephyr/random/random.h>
9 
10 #define NUM_THREADS 8
11 /* this should be large enough for us
12  * to print a failing assert if necessary
13  */
14 #define STACK_SIZE (512 + CONFIG_TEST_EXTRA_STACK_SIZE)
15 
16 #define MSEC_TO_CYCLES(msec)  (int)(((uint64_t)(msec) * \
17 				     (uint64_t)CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC) / \
18 				    (uint64_t)MSEC_PER_SEC)
19 
20 struct k_thread worker_threads[NUM_THREADS];
21 volatile struct k_thread *expected_thread;
22 k_tid_t worker_tids[NUM_THREADS];
23 
24 K_THREAD_STACK_ARRAY_DEFINE(worker_stacks, NUM_THREADS, STACK_SIZE);
25 
26 int thread_deadlines[NUM_THREADS];
27 
28 /* The number of worker threads that ran, and array of their
29  * indices in execution order
30  */
31 int n_exec;
32 int exec_order[NUM_THREADS];
33 
worker(void * p1,void * p2,void * p3)34 void worker(void *p1, void *p2, void *p3)
35 {
36 	int tidx = POINTER_TO_INT(p1);
37 
38 	ARG_UNUSED(p2);
39 	ARG_UNUSED(p3);
40 
41 	zassert_true(tidx >= 0 && tidx < NUM_THREADS, "");
42 	zassert_true(n_exec >= 0 && n_exec < NUM_THREADS, "");
43 
44 	exec_order[n_exec++] = tidx;
45 
46 	/* Sleep, don't exit.  It's not implausible that some
47 	 * platforms implement a thread-based cleanup step for threads
48 	 * that exit (pthreads does this already) which might muck
49 	 * with the scheduling.
50 	 */
51 	while (1) {
52 		k_sleep(K_MSEC(1000000));
53 	}
54 }
55 
ZTEST(suite_deadline,test_deadline)56 ZTEST(suite_deadline, test_deadline)
57 {
58 	int i;
59 
60 	n_exec = 0;
61 
62 	/* Create a bunch of threads at a single lower priority.  Give
63 	 * them each a random deadline.  Sleep, and check that they
64 	 * were executed in the right order.
65 	 */
66 	for (i = 0; i < NUM_THREADS; i++) {
67 		worker_tids[i] = k_thread_create(&worker_threads[i],
68 				worker_stacks[i], STACK_SIZE,
69 				worker, INT_TO_POINTER(i), NULL, NULL,
70 				K_LOWEST_APPLICATION_THREAD_PRIO,
71 				0, K_NO_WAIT);
72 
73 		/* Positive-definite number with the bottom 8 bits
74 		 * masked off to prevent aliasing where "very close"
75 		 * deadlines end up in the opposite order due to the
76 		 * changing "now" between calls to
77 		 * k_thread_deadline_set().
78 		 *
79 		 * Use only 30 bits of significant value.  The API
80 		 * permits 31 (strictly: the deadline time of the
81 		 * "first" runnable thread in any given priority and
82 		 * the "last" must be less than 2^31), but because the
83 		 * time between our generation here and the set of the
84 		 * deadline below takes non-zero time, it's possible
85 		 * to see rollovers.  Easier than using a modulus test
86 		 * or whatnot to restrict the values.
87 		 */
88 		thread_deadlines[i] = sys_rand32_get() & 0x3fffff00;
89 	}
90 
91 	zassert_true(n_exec == 0, "threads ran too soon");
92 
93 	/* Similarly do the deadline setting in one quick pass to
94 	 * minimize aliasing with "now"
95 	 */
96 	for (i = 0; i < NUM_THREADS; i++) {
97 		k_thread_deadline_set(&worker_threads[i], thread_deadlines[i]);
98 	}
99 
100 	zassert_true(n_exec == 0, "threads ran too soon");
101 
102 	k_sleep(K_MSEC(100));
103 
104 	zassert_true(n_exec == NUM_THREADS, "not enough threads ran");
105 
106 	for (i = 1; i < NUM_THREADS; i++) {
107 		int d0 = thread_deadlines[exec_order[i-1]];
108 		int d1 = thread_deadlines[exec_order[i]];
109 
110 		zassert_true(d0 <= d1, "threads ran in wrong order");
111 	}
112 	for (i = 0; i < NUM_THREADS; i++) {
113 		k_thread_abort(worker_tids[i]);
114 	}
115 }
116 
yield_worker(void * p1,void * p2,void * p3)117 void yield_worker(void *p1, void *p2, void *p3)
118 {
119 	ARG_UNUSED(p1);
120 	ARG_UNUSED(p2);
121 	ARG_UNUSED(p3);
122 
123 	zassert_true(n_exec >= 0 && n_exec < NUM_THREADS, "");
124 
125 	n_exec += 1;
126 
127 	k_yield();
128 
129 	/* should not get here until all threads have started */
130 	zassert_true(n_exec == NUM_THREADS, "");
131 
132 	k_thread_abort(k_current_get());
133 
134 	CODE_UNREACHABLE;
135 }
136 
ZTEST(suite_deadline,test_yield)137 ZTEST(suite_deadline, test_yield)
138 {
139 	/* Test that yield works across threads with the
140 	 * same deadline and priority. This currently works by
141 	 * simply not setting a deadline, which results in a
142 	 * deadline of 0.
143 	 */
144 
145 	int i;
146 
147 	n_exec = 0;
148 
149 	/* Create a bunch of threads at a single lower priority
150 	 * and deadline.
151 	 * Each thread increments its own variable, then yields
152 	 * to the next. Sleep. Check that all threads ran.
153 	 */
154 	for (i = 0; i < NUM_THREADS; i++) {
155 		k_thread_create(&worker_threads[i],
156 				worker_stacks[i], STACK_SIZE,
157 				yield_worker, NULL, NULL, NULL,
158 				K_LOWEST_APPLICATION_THREAD_PRIO,
159 				0, K_NO_WAIT);
160 	}
161 
162 	zassert_true(n_exec == 0, "threads ran too soon");
163 
164 	k_sleep(K_MSEC(100));
165 
166 	zassert_true(n_exec == NUM_THREADS, "not enough threads ran");
167 }
168 
unqueue_worker(void * p1,void * p2,void * p3)169 void unqueue_worker(void *p1, void *p2, void *p3)
170 {
171 	ARG_UNUSED(p1);
172 	ARG_UNUSED(p2);
173 	ARG_UNUSED(p3);
174 
175 	zassert_true(n_exec >= 0 && n_exec < NUM_THREADS, "");
176 
177 	n_exec += 1;
178 }
179 
180 /**
181  * @brief Validate the behavior of deadline_set when the thread is not queued
182  *
183  * @details Create a bunch of threads with scheduling delay which make the
184  * thread in unqueued state. The k_thread_deadline_set() call should not make
185  * these threads run before there delay time pass.
186  *
187  * @ingroup kernel_sched_tests
188  */
ZTEST(suite_deadline,test_unqueued)189 ZTEST(suite_deadline, test_unqueued)
190 {
191 	int i;
192 
193 	n_exec = 0;
194 
195 	for (i = 0; i < NUM_THREADS; i++) {
196 		worker_tids[i] = k_thread_create(&worker_threads[i],
197 				worker_stacks[i], STACK_SIZE,
198 				unqueue_worker, NULL, NULL, NULL,
199 				K_LOWEST_APPLICATION_THREAD_PRIO,
200 				0, K_MSEC(100));
201 	}
202 
203 	zassert_true(n_exec == 0, "threads ran too soon");
204 
205 	for (i = 0; i < NUM_THREADS; i++) {
206 		thread_deadlines[i] = sys_rand32_get() & 0x3fffff00;
207 		k_thread_deadline_set(&worker_threads[i], thread_deadlines[i]);
208 	}
209 
210 	k_sleep(K_MSEC(50));
211 
212 	zassert_true(n_exec == 0, "deadline set make the unqueued thread run");
213 
214 	k_sleep(K_MSEC(100));
215 
216 	zassert_true(n_exec == NUM_THREADS, "not enough threads ran");
217 
218 	for (i = 0; i < NUM_THREADS; i++) {
219 		k_thread_abort(worker_tids[i]);
220 	}
221 }
222 
223 #if (CONFIG_MP_MAX_NUM_CPUS == 1)
reschedule_wrapper(const void * param)224 static void reschedule_wrapper(const void *param)
225 {
226 	ARG_UNUSED(param);
227 
228 	k_reschedule();
229 }
230 
test_reschedule_helper0(void * p1,void * p2,void * p3)231 static void test_reschedule_helper0(void *p1, void *p2, void *p3)
232 {
233 	/* 4. Reschedule brings us here */
234 
235 	zassert_true(expected_thread == arch_current_thread(), "");
236 
237 	expected_thread = &worker_threads[1];
238 }
239 
test_reschedule_helper1(void * p1,void * p2,void * p3)240 static void test_reschedule_helper1(void *p1, void *p2, void *p3)
241 {
242 	void (*offload)(void (*f)(const void *p), const void *param) = p1;
243 
244 	/* 1. First helper expected to execute */
245 
246 	zassert_true(expected_thread == arch_current_thread(), "");
247 
248 	offload(reschedule_wrapper, NULL);
249 
250 	/* 2. Deadlines have not changed. Expected no changes */
251 
252 	zassert_true(expected_thread == arch_current_thread(), "");
253 
254 	k_thread_deadline_set(arch_current_thread(), MSEC_TO_CYCLES(1000));
255 
256 	/* 3. Deadline changed, but there was no reschedule */
257 
258 	zassert_true(expected_thread == arch_current_thread(), "");
259 
260 	expected_thread = &worker_threads[0];
261 	offload(reschedule_wrapper, NULL);
262 
263 	/* 5. test_thread_reschedule_helper0 executed */
264 
265 	zassert_true(expected_thread == arch_current_thread(), "");
266 }
267 
thread_offload(void (* f)(const void * p),const void * param)268 static void thread_offload(void (*f)(const void *p), const void *param)
269 {
270 	f(param);
271 }
272 
ZTEST(suite_deadline,test_thread_reschedule)273 ZTEST(suite_deadline, test_thread_reschedule)
274 {
275 	k_thread_create(&worker_threads[0], worker_stacks[0], STACK_SIZE,
276 			test_reschedule_helper0,
277 			thread_offload, NULL, NULL,
278 			K_LOWEST_APPLICATION_THREAD_PRIO,
279 			0, K_NO_WAIT);
280 
281 	k_thread_create(&worker_threads[1], worker_stacks[1], STACK_SIZE,
282 			test_reschedule_helper1,
283 			thread_offload, NULL, NULL,
284 			K_LOWEST_APPLICATION_THREAD_PRIO,
285 			0, K_NO_WAIT);
286 
287 	k_thread_deadline_set(&worker_threads[0], MSEC_TO_CYCLES(500));
288 	k_thread_deadline_set(&worker_threads[1], MSEC_TO_CYCLES(10));
289 
290 	expected_thread = &worker_threads[1];
291 
292 	k_thread_join(&worker_threads[1], K_FOREVER);
293 	k_thread_join(&worker_threads[0], K_FOREVER);
294 
295 #ifndef CONFIG_SMP
296 	/*
297 	 * When SMP is enabled, there is always a reschedule performed
298 	 * at the end of the ISR.
299 	 */
300 	k_thread_create(&worker_threads[0], worker_stacks[0], STACK_SIZE,
301 			test_reschedule_helper0,
302 			irq_offload, NULL, NULL,
303 			K_LOWEST_APPLICATION_THREAD_PRIO,
304 			0, K_NO_WAIT);
305 
306 	k_thread_create(&worker_threads[1], worker_stacks[1], STACK_SIZE,
307 			test_reschedule_helper1,
308 			irq_offload, NULL, NULL,
309 			K_LOWEST_APPLICATION_THREAD_PRIO,
310 			0, K_NO_WAIT);
311 
312 	k_thread_deadline_set(&worker_threads[0], MSEC_TO_CYCLES(500));
313 	k_thread_deadline_set(&worker_threads[1], MSEC_TO_CYCLES(10));
314 
315 	expected_thread = &worker_threads[1];
316 
317 	k_thread_join(&worker_threads[1], K_FOREVER);
318 	k_thread_join(&worker_threads[0], K_FOREVER);
319 
320 #endif /* !CONFIG_SMP */
321 }
322 #endif /* CONFIG_MP_MAX_NUM_CPUS == 1 */
323 
324 ZTEST_SUITE(suite_deadline, NULL, NULL, NULL, NULL, NULL);
325