1 /*
2  * Copyright (c) 2020 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 #include <zephyr.h>
7 #include <random/rand32.h>
8 #include <ztest.h>
9 #include <sys/p4wq.h>
10 
11 #define NUM_THREADS (CONFIG_MP_NUM_CPUS * 2)
12 #define MAX_ITEMS (NUM_THREADS * 8)
13 #define MAX_EVENTS 1024
14 
15 K_P4WQ_DEFINE(wq, NUM_THREADS, 2048);
16 
17 static struct k_p4wq_work simple_item;
18 static volatile int has_run;
19 static volatile int run_count;
20 static volatile int spin_release;
21 
22 struct test_item {
23 	struct k_p4wq_work item;
24 	bool active;
25 	bool running;
26 };
27 
28 static struct k_spinlock lock;
29 static struct test_item items[MAX_ITEMS];
30 static int active_items;
31 static int event_count;
32 static bool stress_complete;
33 
34 static void stress_handler(struct k_p4wq_work *item);
35 
stress_sub(struct test_item * item)36 static void stress_sub(struct test_item *item)
37 {
38 	/* Choose a random preemptible priority higher than the idle
39 	 * priority, and a random deadline sometime within the next
40 	 * 2ms
41 	 */
42 	item->item.priority = sys_rand32_get() % (K_LOWEST_THREAD_PRIO - 1);
43 	item->item.deadline = sys_rand32_get() % k_ms_to_cyc_ceil32(2);
44 	item->item.handler = stress_handler;
45 	item->running = false;
46 	item->active = true;
47 	active_items++;
48 	k_p4wq_submit(&wq, &item->item);
49 }
50 
stress_handler(struct k_p4wq_work * item)51 static void stress_handler(struct k_p4wq_work *item)
52 {
53 	k_spinlock_key_t k = k_spin_lock(&lock);
54 	struct test_item *titem = CONTAINER_OF(item, struct test_item, item);
55 
56 	titem->running = true;
57 
58 	int curr_pri = k_thread_priority_get(k_current_get());
59 
60 	zassert_true(curr_pri == item->priority,
61 		     "item ran with wrong priority: want %d have %d",
62 		     item->priority, curr_pri);
63 
64 	if (stress_complete) {
65 		k_spin_unlock(&lock, k);
66 		return;
67 	}
68 
69 	active_items--;
70 
71 	/* Pick 0-3 random item slots and submit them if they aren't
72 	 * already.  Make sure we always have at least one active.
73 	 */
74 	int num_tries = sys_rand32_get() % 4;
75 
76 	for (int i = 0; (active_items == 0) || (i < num_tries); i++) {
77 		int ii = sys_rand32_get() % MAX_ITEMS;
78 
79 		if (items[ii].item.thread == NULL &&
80 		    &items[ii] != titem && !items[ii].active) {
81 			stress_sub(&items[ii]);
82 		}
83 	}
84 
85 	if (event_count++ >= MAX_EVENTS) {
86 		stress_complete = true;
87 	}
88 
89 	titem->active = false;
90 	k_spin_unlock(&lock, k);
91 }
92 
93 /* Simple stress test designed to flood the queue and retires as many
94  * items of random priority as possible.  Note that because of the
95  * random priorities, this tends to produce a lot of "out of worker
96  * threads" warnings from the queue as we randomly try to submit more
97  * schedulable (i.e. high priority) items than there are threads to
98  * run them.
99  */
test_stress(void)100 static void test_stress(void)
101 {
102 	k_thread_priority_set(k_current_get(), -1);
103 	memset(items, 0, sizeof(items));
104 
105 	stress_complete = false;
106 	active_items = 1;
107 	items[0].item.priority = -1;
108 	stress_handler(&items[0].item);
109 
110 	while (!stress_complete) {
111 		k_msleep(100);
112 	}
113 	k_msleep(10);
114 
115 	zassert_true(event_count > 1, "stress tests didn't run");
116 }
117 
active_count(void)118 static int active_count(void)
119 {
120 	/* Whitebox: count the number of BLOCKED threads, because the
121 	 * queue will unpend them synchronously in submit but the
122 	 * "active" list is maintained from the thread itself against
123 	 * which we can't synchronize easily.
124 	 */
125 	int count = 0;
126 	sys_dnode_t *dummy;
127 
128 	SYS_DLIST_FOR_EACH_NODE(&wq.waitq.waitq, dummy) {
129 		count++;
130 	}
131 
132 	count = NUM_THREADS - count;
133 	return count;
134 }
135 
spin_handler(struct k_p4wq_work * item)136 static void spin_handler(struct k_p4wq_work *item)
137 {
138 	while (!spin_release) {
139 		k_busy_wait(10);
140 	}
141 }
142 
143 /* Selects and adds a new item to the queue, returns an indication of
144  * whether the item changed the number of active threads.  Does not
145  * return the item itself, not needed.
146  */
add_new_item(int pri)147 static bool add_new_item(int pri)
148 {
149 	static int num_items;
150 	int n0 = active_count();
151 	struct k_p4wq_work *item = &items[num_items++].item;
152 
153 	__ASSERT_NO_MSG(num_items < MAX_ITEMS);
154 	item->priority = pri;
155 	item->deadline = k_us_to_cyc_ceil32(100);
156 	item->handler = spin_handler;
157 	k_p4wq_submit(&wq, item);
158 	k_usleep(1);
159 
160 	return (active_count() != n0);
161 }
162 
163 /* Whitebox test of thread state: make sure that as we add threads
164  * they get scheduled as needed, up to NUM_CPUS (at which point the
165  * queue should STOP scheduling new threads).  Then add more at higher
166  * priorities and verify that they get scheduled too (to allow
167  * preemption), up to the maximum number of threads that we created.
168  */
test_fill_queue(void)169 static void test_fill_queue(void)
170 {
171 	int p0 = 4;
172 
173 	/* The work item priorities are 0-4, this thread should be -1
174 	 * so it's guaranteed not to be preempted
175 	 */
176 	k_thread_priority_set(k_current_get(), -1);
177 
178 	/* Spawn enough threads so the queue saturates the CPU count
179 	 * (note they have lower priority than the current thread so
180 	 * we can be sure to run).  They should all be made active
181 	 * when added.
182 	 */
183 	for (int i = 0; i < CONFIG_MP_NUM_CPUS; i++) {
184 		zassert_true(add_new_item(p0), "thread should be active");
185 	}
186 
187 	/* Add one more, it should NOT be scheduled */
188 	zassert_false(add_new_item(p0), "thread should not be active");
189 
190 	/* Now add more at higher priorities, they should get
191 	 * scheduled (so that they can preempt the running ones) until
192 	 * we run out of threads.
193 	 */
194 	for (int pri = p0 - 1; pri >= p0 - 4; pri++) {
195 		for (int i = 0; i < CONFIG_MP_NUM_CPUS; i++) {
196 			bool active = add_new_item(pri);
197 
198 			if (!active) {
199 				zassert_equal(active_count(), NUM_THREADS,
200 					      "thread max not reached");
201 				goto done;
202 			}
203 		}
204 	}
205 
206  done:
207 	/* Clean up and wait for the threads to be idle */
208 	spin_release = 1;
209 	do {
210 		k_msleep(1);
211 	} while (active_count() != 0);
212 	k_msleep(1);
213 }
214 
resubmit_handler(struct k_p4wq_work * item)215 static void resubmit_handler(struct k_p4wq_work *item)
216 {
217 	if (run_count++ == 0) {
218 		k_p4wq_submit(&wq, item);
219 	} else {
220 		/* While we're here: validate that it doesn't show
221 		 * itself as "live" while executing
222 		 */
223 		zassert_false(k_p4wq_cancel(&wq, item),
224 			      "item should not be cancelable while running");
225 	}
226 }
227 
228 /* Validate item can be resubmitted from its own handler */
test_resubmit(void)229 static void test_resubmit(void)
230 {
231 	run_count = 0;
232 	simple_item = (struct k_p4wq_work){};
233 	simple_item.handler = resubmit_handler;
234 	k_p4wq_submit(&wq, &simple_item);
235 
236 	k_msleep(100);
237 	zassert_equal(run_count, 2, "Wrong run count: %d\n", run_count);
238 }
239 
simple_handler(struct k_p4wq_work * work)240 void simple_handler(struct k_p4wq_work *work)
241 {
242 	zassert_equal(work, &simple_item, "bad work item pointer");
243 	zassert_false(has_run, "ran twice");
244 	has_run = true;
245 }
246 
247 /* Simple test that submited items run, and at the correct priority */
test_p4wq_simple(void)248 static void test_p4wq_simple(void)
249 {
250 	int prio = 2;
251 
252 	k_thread_priority_set(k_current_get(), prio);
253 
254 	/* Lower priority item, should not run until we yield */
255 	simple_item.priority = prio + 1;
256 	simple_item.deadline = 0;
257 	simple_item.handler = simple_handler;
258 
259 	has_run = false;
260 	k_p4wq_submit(&wq, &simple_item);
261 	zassert_false(has_run, "ran too early");
262 
263 	k_msleep(10);
264 	zassert_true(has_run, "low-priority item didn't run");
265 
266 	/* Higher priority, should preempt us */
267 	has_run = false;
268 	simple_item.priority = prio - 1;
269 	k_p4wq_submit(&wq, &simple_item);
270 	zassert_true(has_run, "high-priority item didn't run");
271 }
272 
test_main(void)273 void test_main(void)
274 {
275 	ztest_test_suite(lib_p4wq_test,
276 			 ztest_1cpu_unit_test(test_p4wq_simple),
277 			 ztest_unit_test(test_resubmit),
278 			 ztest_unit_test(test_fill_queue),
279 			 ztest_unit_test(test_stress));
280 
281 	ztest_run_test_suite(lib_p4wq_test);
282 }
283