1 /*
2  * Copyright (c) 2018 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 #include <zephyr.h>
7 #include <ztest.h>
8 #include <random/rand32.h>
9 
10 #define NUM_THREADS 8
11 /* this should be large enough for us
12  * to print a failing assert if necessary
13  */
14 #define STACK_SIZE (512 + CONFIG_TEST_EXTRA_STACKSIZE)
15 
16 struct k_thread worker_threads[NUM_THREADS];
17 k_tid_t worker_tids[NUM_THREADS];
18 
19 K_THREAD_STACK_ARRAY_DEFINE(worker_stacks, NUM_THREADS, STACK_SIZE);
20 
21 int thread_deadlines[NUM_THREADS];
22 
23 /* The number of worker threads that ran, and and array of their
24  * indices in execution order
25  */
26 int n_exec;
27 int exec_order[NUM_THREADS];
28 
worker(void * p1,void * p2,void * p3)29 void worker(void *p1, void *p2, void *p3)
30 {
31 	int tidx = POINTER_TO_INT(p1);
32 
33 	ARG_UNUSED(p2);
34 	ARG_UNUSED(p3);
35 
36 	zassert_true(tidx >= 0 && tidx < NUM_THREADS, "");
37 	zassert_true(n_exec >= 0 && n_exec < NUM_THREADS, "");
38 
39 	exec_order[n_exec++] = tidx;
40 
41 	/* Sleep, don't exit.  It's not implausible that some
42 	 * platforms implement a thread-based cleanup step for threads
43 	 * that exit (pthreads does this already) which might muck
44 	 * with the scheduling.
45 	 */
46 	while (1) {
47 		k_sleep(K_MSEC(1000000));
48 	}
49 }
50 
test_deadline(void)51 void test_deadline(void)
52 {
53 	int i;
54 
55 	/* Create a bunch of threads at a single lower priority.  Give
56 	 * them each a random deadline.  Sleep, and check that they
57 	 * were executed in the right order.
58 	 */
59 	for (i = 0; i < NUM_THREADS; i++) {
60 		worker_tids[i] = k_thread_create(&worker_threads[i],
61 				worker_stacks[i], STACK_SIZE,
62 				worker, INT_TO_POINTER(i), NULL, NULL,
63 				K_LOWEST_APPLICATION_THREAD_PRIO,
64 				0, K_NO_WAIT);
65 
66 		/* Positive-definite number with the bottom 8 bits
67 		 * masked off to prevent aliasing where "very close"
68 		 * deadlines end up in the opposite order due to the
69 		 * changing "now" between calls to
70 		 * k_thread_deadline_set().
71 		 *
72 		 * Use only 30 bits of significant value.  The API
73 		 * permits 31 (strictly: the deadline time of the
74 		 * "first" runnable thread in any given priority and
75 		 * the "last" must be less than 2^31), but because the
76 		 * time between our generation here and the set of the
77 		 * deadline below takes non-zero time, it's possible
78 		 * to see rollovers.  Easier than using a modulus test
79 		 * or whatnot to restrict the values.
80 		 */
81 		thread_deadlines[i] = sys_rand32_get() & 0x3fffff00;
82 	}
83 
84 	zassert_true(n_exec == 0, "threads ran too soon");
85 
86 	/* Similarly do the deadline setting in one quick pass to
87 	 * minimize aliasing with "now"
88 	 */
89 	for (i = 0; i < NUM_THREADS; i++) {
90 		k_thread_deadline_set(&worker_threads[i], thread_deadlines[i]);
91 	}
92 
93 	zassert_true(n_exec == 0, "threads ran too soon");
94 
95 	k_sleep(K_MSEC(100));
96 
97 	zassert_true(n_exec == NUM_THREADS, "not enough threads ran");
98 
99 	for (i = 1; i < NUM_THREADS; i++) {
100 		int d0 = thread_deadlines[exec_order[i-1]];
101 		int d1 = thread_deadlines[exec_order[i]];
102 
103 		zassert_true(d0 <= d1, "threads ran in wrong order");
104 	}
105 	for (i = 0; i < NUM_THREADS; i++) {
106 		k_thread_abort(worker_tids[i]);
107 	}
108 }
109 
yield_worker(void * p1,void * p2,void * p3)110 void yield_worker(void *p1, void *p2, void *p3)
111 {
112 	ARG_UNUSED(p1);
113 	ARG_UNUSED(p2);
114 	ARG_UNUSED(p3);
115 
116 	zassert_true(n_exec >= 0 && n_exec < NUM_THREADS, "");
117 
118 	n_exec += 1;
119 
120 	k_yield();
121 
122 	/* should not get here until all threads have started */
123 	zassert_true(n_exec == NUM_THREADS, "");
124 
125 	k_thread_abort(k_current_get());
126 }
127 
test_yield(void)128 void test_yield(void)
129 {
130 	/* Test that yield works across threads with the
131 	 * same deadline and priority. This currently works by
132 	 * simply not setting a deadline, which results in a
133 	 * deadline of 0.
134 	 */
135 
136 	int i;
137 
138 	n_exec = 0;
139 
140 	/* Create a bunch of threads at a single lower priority
141 	 * and deadline.
142 	 * Each thread increments its own variable, then yields
143 	 * to the next. Sleep. Check that all threads ran.
144 	 */
145 	for (i = 0; i < NUM_THREADS; i++) {
146 		k_thread_create(&worker_threads[i],
147 				worker_stacks[i], STACK_SIZE,
148 				yield_worker, NULL, NULL, NULL,
149 				K_LOWEST_APPLICATION_THREAD_PRIO,
150 				0, K_NO_WAIT);
151 	}
152 
153 	zassert_true(n_exec == 0, "threads ran too soon");
154 
155 	k_sleep(K_MSEC(100));
156 
157 	zassert_true(n_exec == NUM_THREADS, "not enough threads ran");
158 }
159 
unqueue_worker(void * p1,void * p2,void * p3)160 void unqueue_worker(void *p1, void *p2, void *p3)
161 {
162 	ARG_UNUSED(p1);
163 	ARG_UNUSED(p2);
164 	ARG_UNUSED(p3);
165 
166 	zassert_true(n_exec >= 0 && n_exec < NUM_THREADS, "");
167 
168 	n_exec += 1;
169 }
170 
171 /**
172  * @brief Validate the behavior of dealine_set when the thread is not queued
173  *
174  * @details Create a bunch of threads with scheduling delay which make the
175  * thread in unqueued state. The k_thread_deadline_set() call should not make
176  * these threads run before there delay time pass.
177  *
178  * @ingroup kernel_sched_tests
179  */
test_unqueued(void)180 void test_unqueued(void)
181 {
182 	int i;
183 
184 	n_exec = 0;
185 
186 	for (i = 0; i < NUM_THREADS; i++) {
187 		worker_tids[i] = k_thread_create(&worker_threads[i],
188 				worker_stacks[i], STACK_SIZE,
189 				unqueue_worker, NULL, NULL, NULL,
190 				K_LOWEST_APPLICATION_THREAD_PRIO,
191 				0, K_MSEC(100));
192 	}
193 
194 	zassert_true(n_exec == 0, "threads ran too soon");
195 
196 	for (i = 0; i < NUM_THREADS; i++) {
197 		thread_deadlines[i] = sys_rand32_get() & 0x3fffff00;
198 		k_thread_deadline_set(&worker_threads[i], thread_deadlines[i]);
199 	}
200 
201 	k_sleep(K_MSEC(50));
202 
203 	zassert_true(n_exec == 0, "deadline set make the unqueued thread run");
204 
205 	k_sleep(K_MSEC(100));
206 
207 	zassert_true(n_exec == NUM_THREADS, "not enough threads ran");
208 
209 	for (i = 0; i < NUM_THREADS; i++) {
210 		k_thread_abort(worker_tids[i]);
211 	}
212 }
213 
test_main(void)214 void test_main(void)
215 {
216 	ztest_test_suite(suite_deadline,
217 			 ztest_unit_test(test_deadline),
218 			 ztest_unit_test(test_yield),
219 			 ztest_unit_test(test_unqueued));
220 	ztest_run_test_suite(suite_deadline);
221 }
222