1 /*
2  * Copyright (c) 2020 Nordic Semiconductor ASA
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 /* This test covers deprecated API.  Avoid inappropriate diagnostics
8  * about the use of that API.
9  */
10 #include <zephyr/toolchain.h>
11 #undef __deprecated
12 #define __deprecated
13 #undef __DEPRECATED_MACRO
14 #define __DEPRECATED_MACRO
15 
16 #include <zephyr/ztest.h>
17 
18 #define STACK_SIZE (1024 + CONFIG_TEST_EXTRA_STACK_SIZE)
19 #define COOPHI_PRIORITY K_PRIO_COOP(0) /* = -4 */
20 /* SYSTEM_WORKQUEUE_PRIORITY = -3 */
21 /* ZTEST_THREAD_PRIORITY = -2 */
22 #define COOPLO_PRIORITY K_PRIO_COOP(3) /* = -1 */
23 #define PREEMPT_PRIORITY K_PRIO_PREEMPT(1) /* = 1 */
24 
25 #define DELAY_MS 100
26 #define DELAY_TIMEOUT K_MSEC(DELAY_MS)
27 
28 BUILD_ASSERT(COOPHI_PRIORITY < CONFIG_SYSTEM_WORKQUEUE_PRIORITY,
29 	     "COOPHI not higher priority than system workqueue");
30 BUILD_ASSERT(CONFIG_SYSTEM_WORKQUEUE_PRIORITY < CONFIG_ZTEST_THREAD_PRIORITY,
31 	     "System workqueue not higher priority than ZTEST");
32 BUILD_ASSERT(CONFIG_ZTEST_THREAD_PRIORITY < COOPLO_PRIORITY,
33 	     "ZTEST not higher priority than COOPLO");
34 BUILD_ASSERT(COOPLO_PRIORITY < 0,
35 	     "COOPLO not cooperative");
36 
37 /* Given by work thread to signal completion. */
38 static struct k_sem sync_sem;
39 
40 static bool run_flag = true;
41 
42 /* Given by test thread to release a work item. */
43 static struct k_sem rel_sem;
44 
45 /* Common work structures, to avoid dead references to stack objects
46  * if a test fails.
47  */
48 static struct k_work common_work;
49 static struct k_work common_work1;
50 static struct k_work_delayable dwork;
51 
52 /* Work synchronization objects must be in cache-coherent memory,
53  * which excludes stacks on some architectures.
54  */
55 static struct k_work_sync work_sync;
56 
57 static struct k_thread *main_thread;
58 
59 /* We have these threads, in strictly decreasing order of priority:
60  * * coophi: a high priority cooperative work queue
61  * * system: the standard system work queue
62  * * ztest thread: priority for threads running tests
63  * * cooplo : a low-priority cooperative work queue
64  * * preempt: a preemptible work queue
65  *
66  * The test infrastructure records the number of times each work queue
67  * executes in a counter.
68  *
69  * The common work handler also supports internal re-submission if
70  * configured to do so.
71  *
72  * There are three core handlers:
73  * * The basic one (counter_handler) increments the count of handler
74  *   invocations by work queue thread, optionally resubmits, then
75  *   releases the semaphore the test is waiting for.
76  * * The blocking one (rel_handler) waits until something invokes
77  *   handler_release() to allow it to complete by invoking
78  *   counter_handler().  This makes a work queue busy for arbitrary
79  *   periods, but requires something external to trigger the release.
80  * * The delaying one (delay_handler) waits for K_MSEC(DELAY_MS) before
81  *   invoking counter_handler().
82  */
83 static atomic_t resubmits_left;
84 
85 /* k_uptime_get32() on the last invocation of the core handler. */
86 static uint32_t volatile last_handle_ms;
87 
88 static K_THREAD_STACK_DEFINE(coophi_stack, STACK_SIZE);
89 static struct k_work_q coophi_queue;
90 static struct k_work_q not_start_queue;
91 static atomic_t coophi_ctr;
coophi_counter(void)92 static inline int coophi_counter(void)
93 {
94 	return atomic_get(&coophi_ctr);
95 }
96 
97 static K_THREAD_STACK_DEFINE(cooplo_stack, STACK_SIZE);
98 static struct k_work_q cooplo_queue;
99 static atomic_t cooplo_ctr;
cooplo_counter(void)100 static inline int cooplo_counter(void)
101 {
102 	return atomic_get(&cooplo_ctr);
103 }
104 
coop_counter(struct k_work_q * wq)105 static inline int coop_counter(struct k_work_q *wq)
106 {
107 	return (wq == &coophi_queue) ? coophi_counter()
108 		: (wq == &cooplo_queue) ? cooplo_counter()
109 		: -1;
110 }
111 
112 static K_THREAD_STACK_DEFINE(preempt_stack, STACK_SIZE);
113 static struct k_work_q preempt_queue;
114 static atomic_t preempt_ctr;
preempt_counter(void)115 static inline int preempt_counter(void)
116 {
117 	return atomic_get(&preempt_ctr);
118 }
119 
120 static K_THREAD_STACK_DEFINE(invalid_test_stack, STACK_SIZE);
121 static struct k_work_q invalid_test_queue;
122 
123 static atomic_t system_ctr;
system_counter(void)124 static inline int system_counter(void)
125 {
126 	return atomic_get(&system_ctr);
127 }
128 
reset_counters(void)129 static inline void reset_counters(void)
130 {
131 	/* If this fails the previous test didn't clean up */
132 	zassert_equal(k_sem_take(&sync_sem, K_NO_WAIT), -EBUSY);
133 	last_handle_ms = UINT32_MAX;
134 	atomic_set(&resubmits_left, 0);
135 	atomic_set(&coophi_ctr, 0);
136 	atomic_set(&system_ctr, 0);
137 	atomic_set(&cooplo_ctr, 0);
138 	atomic_set(&preempt_ctr, 0);
139 }
140 
counter_handler(struct k_work * work)141 static void counter_handler(struct k_work *work)
142 {
143 	last_handle_ms = k_uptime_get_32();
144 	if (k_current_get() == &coophi_queue.thread) {
145 		atomic_inc(&coophi_ctr);
146 	} else if (k_current_get() == &k_sys_work_q.thread) {
147 		atomic_inc(&system_ctr);
148 	} else if (k_current_get() == &cooplo_queue.thread) {
149 		atomic_inc(&cooplo_ctr);
150 	} else if (k_current_get() == &preempt_queue.thread) {
151 		atomic_inc(&preempt_ctr);
152 	}
153 	if (atomic_dec(&resubmits_left) > 0) {
154 		(void)k_work_submit_to_queue(NULL, work);
155 	} else {
156 		k_sem_give(&sync_sem);
157 	}
158 }
159 
handler_release(void)160 static inline void handler_release(void)
161 {
162 	k_sem_give(&rel_sem);
163 }
164 
async_release_cb(struct k_timer * timer)165 static void async_release_cb(struct k_timer *timer)
166 {
167 	handler_release();
168 }
169 
170 static K_TIMER_DEFINE(async_releaser, async_release_cb, NULL);
171 
async_release(void)172 static inline void async_release(void)
173 {
174 	k_timer_start(&async_releaser, K_TICKS(1), K_NO_WAIT);
175 }
176 
rel_handler(struct k_work * work)177 static void rel_handler(struct k_work *work)
178 {
179 	(void)k_sem_take(&rel_sem, K_FOREVER);
180 	counter_handler(work);
181 }
182 
delay_handler(struct k_work * work)183 static void delay_handler(struct k_work *work)
184 {
185 	k_sleep(K_MSEC(DELAY_MS));
186 	counter_handler(work);
187 }
188 
189 /* Check that standard initializations result in expected content. */
test_work_init(void)190 static void test_work_init(void)
191 {
192 	static K_WORK_DEFINE(fnstat, counter_handler);
193 
194 	static struct k_work stack;
195 
196 	k_work_init(&stack, counter_handler);
197 	zassert_mem_equal(&stack, &fnstat, sizeof(stack),
198 			  NULL);
199 }
200 
test_delayable_init(void)201 static void test_delayable_init(void)
202 {
203 	static K_WORK_DELAYABLE_DEFINE(fnstat, counter_handler);
204 
205 	static struct k_work_delayable stack;
206 
207 	k_work_init_delayable(&stack, counter_handler);
208 	zassert_mem_equal(&stack, &fnstat, sizeof(stack),
209 			  NULL);
210 }
211 
212 /* Check that submission to an unstarted queue is diagnosed. */
ZTEST(work,test_unstarted)213 ZTEST(work, test_unstarted)
214 {
215 	int rc;
216 
217 	k_work_init(&common_work, counter_handler);
218 	zassert_equal(k_work_busy_get(&common_work), 0);
219 
220 	rc = k_work_submit_to_queue(&not_start_queue, &common_work);
221 	zassert_equal(rc, -ENODEV);
222 }
223 
test_queue_start(void)224 static void test_queue_start(void)
225 {
226 	struct k_work_queue_config cfg = {
227 		.name = "wq.preempt",
228 	};
229 	k_work_queue_init(&preempt_queue);
230 	zassert_equal(preempt_queue.flags, 0);
231 	k_work_queue_start(&preempt_queue, preempt_stack, STACK_SIZE,
232 			    PREEMPT_PRIORITY, &cfg);
233 	zassert_equal(preempt_queue.flags, K_WORK_QUEUE_STARTED);
234 
235 	if (IS_ENABLED(CONFIG_THREAD_NAME)) {
236 		const char *tn = k_thread_name_get(&preempt_queue.thread);
237 
238 		zassert_true(tn != cfg.name);
239 		zassert_true(tn != NULL);
240 		zassert_equal(strcmp(tn, cfg.name), 0);
241 	}
242 
243 	cfg.name = NULL;
244 	zassert_equal(invalid_test_queue.flags, 0);
245 	k_work_queue_start(&invalid_test_queue, invalid_test_stack, STACK_SIZE,
246 			    PREEMPT_PRIORITY, &cfg);
247 	zassert_equal(invalid_test_queue.flags, K_WORK_QUEUE_STARTED);
248 
249 	if (IS_ENABLED(CONFIG_THREAD_NAME)) {
250 		const char *tn = k_thread_name_get(&invalid_test_queue.thread);
251 
252 		zassert_true(tn != cfg.name);
253 		zassert_true(tn != NULL);
254 		zassert_equal(strcmp(tn, ""), 0);
255 	}
256 
257 	cfg.name = "wq.coophi";
258 	cfg.no_yield = true;
259 	k_work_queue_start(&coophi_queue, coophi_stack, STACK_SIZE,
260 			    COOPHI_PRIORITY, &cfg);
261 	zassert_equal(coophi_queue.flags,
262 		      K_WORK_QUEUE_STARTED | K_WORK_QUEUE_NO_YIELD, NULL);
263 
264 	cfg.name = "wq.cooplo";
265 	cfg.no_yield = true;
266 	k_work_queue_start(&cooplo_queue, cooplo_stack, STACK_SIZE,
267 			    COOPLO_PRIORITY, &cfg);
268 	zassert_equal(cooplo_queue.flags,
269 		      K_WORK_QUEUE_STARTED | K_WORK_QUEUE_NO_YIELD, NULL);
270 }
271 
272 /* Check validation of submission without a destination queue. */
ZTEST(work,test_null_queue)273 ZTEST(work, test_null_queue)
274 {
275 	int rc;
276 
277 	k_work_init(&common_work, counter_handler);
278 	zassert_equal(k_work_busy_get(&common_work), 0);
279 
280 	rc = k_work_submit_to_queue(NULL, &common_work);
281 	zassert_equal(rc, -EINVAL);
282 }
283 
284 /* Basic single-CPU check submitting with a non-blocking handler. */
ZTEST(work_1cpu,test_1cpu_simple_queue)285 ZTEST(work_1cpu, test_1cpu_simple_queue)
286 {
287 	int rc;
288 
289 	/* Reset state and use the non-blocking handler */
290 	reset_counters();
291 	k_work_init(&common_work, counter_handler);
292 	zassert_equal(k_work_busy_get(&common_work), 0);
293 	zassert_equal(k_work_is_pending(&common_work), false);
294 
295 	/* Submit to the cooperative queue */
296 	rc = k_work_submit_to_queue(&coophi_queue, &common_work);
297 	zassert_equal(rc, 1);
298 	zassert_equal(k_work_busy_get(&common_work), K_WORK_QUEUED);
299 	zassert_equal(k_work_is_pending(&common_work), true);
300 
301 	/* Shouldn't have been started since test thread is
302 	 * cooperative.
303 	 */
304 	zassert_equal(coophi_counter(), 0);
305 
306 	/* Let it run, then check it finished. */
307 	k_sleep(K_TICKS(1));
308 	zassert_equal(coophi_counter(), 1);
309 	zassert_equal(k_work_busy_get(&common_work), 0);
310 
311 	/* Flush the sync state from completion */
312 	rc = k_sem_take(&sync_sem, K_NO_WAIT);
313 	zassert_equal(rc, 0);
314 }
315 
316 /* Basic SMP check submitting with a non-blocking handler. */
ZTEST(work,test_smp_simple_queue)317 ZTEST(work, test_smp_simple_queue)
318 {
319 	if (!IS_ENABLED(CONFIG_SMP)) {
320 		ztest_test_skip();
321 		return;
322 	}
323 
324 	int rc;
325 
326 	/* Reset state and use the non-blocking handler */
327 	reset_counters();
328 	k_work_init(&common_work, counter_handler);
329 	zassert_equal(k_work_busy_get(&common_work), 0);
330 	zassert_equal(k_work_is_pending(&common_work), false);
331 
332 	/* Submit to the cooperative queue */
333 	rc = k_work_submit_to_queue(&coophi_queue, &common_work);
334 	zassert_equal(rc, 1);
335 
336 	/* It should run and finish without this thread yielding. */
337 	int64_t ts0 = k_uptime_ticks();
338 	uint32_t delay;
339 
340 	do {
341 		delay = k_ticks_to_ms_floor32(k_uptime_ticks() - ts0);
342 	} while (k_work_is_pending(&common_work) && (delay < DELAY_MS));
343 
344 	zassert_equal(k_work_busy_get(&common_work), 0);
345 	zassert_equal(coophi_counter(), 1);
346 
347 	/* Flush the sync state from completion */
348 	rc = k_sem_take(&sync_sem, K_NO_WAIT);
349 	zassert_equal(rc, 0);
350 }
351 
352 /* Basic single-CPU check submitting with a blocking handler */
ZTEST(work_1cpu,test_1cpu_sync_queue)353 ZTEST(work_1cpu, test_1cpu_sync_queue)
354 {
355 	int rc;
356 
357 	/* Reset state and use the blocking handler */
358 	reset_counters();
359 	k_work_init(&common_work, rel_handler);
360 	zassert_equal(k_work_busy_get(&common_work), 0);
361 
362 	/* Submit to the cooperative queue */
363 	rc = k_work_submit_to_queue(&coophi_queue, &common_work);
364 	zassert_equal(rc, 1);
365 	zassert_equal(k_work_busy_get(&common_work), K_WORK_QUEUED);
366 
367 	/* Shouldn't have been started since test thread is
368 	 * cooperative.
369 	 */
370 	zassert_equal(coophi_counter(), 0);
371 
372 	/* Let it run, then check it didn't finish. */
373 	k_sleep(K_TICKS(1));
374 	zassert_equal(coophi_counter(), 0);
375 	zassert_equal(k_work_busy_get(&common_work), K_WORK_RUNNING);
376 
377 	/* Make it ready so it can finish when this thread yields. */
378 	handler_release();
379 	zassert_equal(coophi_counter(), 0);
380 
381 	/* Wait for then verify finish */
382 	rc = k_sem_take(&sync_sem, K_FOREVER);
383 	zassert_equal(rc, 0);
384 	zassert_equal(coophi_counter(), 1);
385 }
386 
387 /* Verify that if a work item is submitted while it is being run by a
388  * queue thread it gets submitted to the queue it's running on, to
389  * prevent reentrant invocation, at least on a single CPU.
390  */
ZTEST(work_1cpu,test_1cpu_reentrant_queue)391 ZTEST(work_1cpu, test_1cpu_reentrant_queue)
392 {
393 	int rc;
394 
395 	/* Reset state and use the blocking handler */
396 	reset_counters();
397 	k_work_init(&common_work, rel_handler);
398 
399 	/* Submit to the cooperative queue. */
400 	rc = k_work_submit_to_queue(&coophi_queue, &common_work);
401 	zassert_equal(rc, 1);
402 	zassert_equal(coophi_counter(), 0);
403 
404 	/* Release it so it's running and can be rescheduled. */
405 	k_sleep(K_TICKS(1));
406 	zassert_equal(coophi_counter(), 0);
407 
408 	/* Resubmit to a different queue. */
409 	rc = k_work_submit_to_queue(&preempt_queue, &common_work);
410 	zassert_equal(rc, 2);
411 
412 	/* Release the first submission. */
413 	handler_release();
414 	rc = k_sem_take(&sync_sem, K_FOREVER);
415 	zassert_equal(rc, 0);
416 	zassert_equal(coophi_counter(), 1);
417 
418 	/* Confirm the second submission was redirected to the running
419 	 * queue to avoid re-entrancy problems.
420 	 */
421 	handler_release();
422 	rc = k_sem_take(&sync_sem, K_FOREVER);
423 	zassert_equal(rc, 0);
424 	zassert_equal(coophi_counter(), 2);
425 }
426 
427 /* Single CPU submit two work items and wait for flush in order
428  * before they get started.
429  */
ZTEST(work_1cpu,test_1cpu_queued_flush)430 ZTEST(work_1cpu, test_1cpu_queued_flush)
431 {
432 	int rc;
433 
434 	/* Reset state and use the delaying handler */
435 	reset_counters();
436 	k_work_init(&common_work, delay_handler);
437 	k_work_init(&common_work1, delay_handler);
438 
439 	/* Submit to the cooperative queue. */
440 	rc = k_work_submit_to_queue(&coophi_queue, &common_work1);
441 	zassert_equal(rc, 1);
442 	rc = k_work_submit_to_queue(&coophi_queue, &common_work);
443 	zassert_equal(rc, 1);
444 	zassert_equal(coophi_counter(), 0);
445 
446 	/* Confirm that it's still in the queue, then wait for completion.
447 	 * This should wait.
448 	 */
449 	zassert_equal(k_work_busy_get(&common_work), K_WORK_QUEUED);
450 	zassert_equal(k_work_busy_get(&common_work1), K_WORK_QUEUED);
451 	zassert_true(k_work_flush(&common_work, &work_sync));
452 	zassert_false(k_work_flush(&common_work1, &work_sync));
453 
454 	/* Verify completion. */
455 	zassert_equal(coophi_counter(), 2);
456 	zassert_true(!k_work_is_pending(&common_work));
457 	zassert_true(!k_work_is_pending(&common_work1));
458 	rc = k_sem_take(&sync_sem, K_NO_WAIT);
459 	zassert_equal(rc, 0);
460 
461 	/* After completion flush should be a no-op */
462 	zassert_false(k_work_flush(&common_work, &work_sync));
463 	zassert_false(k_work_flush(&common_work1, &work_sync));
464 }
465 
466 /* Single CPU submit a work item and wait for flush after it's started.
467  */
ZTEST(work_1cpu,test_1cpu_running_flush)468 ZTEST(work_1cpu, test_1cpu_running_flush)
469 {
470 	int rc;
471 
472 	/* Reset state and use the delaying handler */
473 	reset_counters();
474 	k_work_init(&common_work, delay_handler);
475 
476 	/* Submit to the cooperative queue. */
477 	rc = k_work_submit_to_queue(&coophi_queue, &common_work);
478 	zassert_equal(rc, 1);
479 	zassert_equal(coophi_counter(), 0);
480 	zassert_equal(k_work_busy_get(&common_work), K_WORK_QUEUED);
481 
482 	/* Release it so it's running. */
483 	k_sleep(K_TICKS(1));
484 	zassert_equal(k_work_busy_get(&common_work), K_WORK_RUNNING);
485 	zassert_equal(coophi_counter(), 0);
486 
487 	/* Wait for completion.  This should be released by the delay
488 	 * handler.
489 	 */
490 	zassert_true(k_work_flush(&common_work, &work_sync));
491 
492 	/* Verify completion. */
493 	zassert_equal(coophi_counter(), 1);
494 	rc = k_sem_take(&sync_sem, K_NO_WAIT);
495 	zassert_equal(rc, 0);
496 }
497 
498 /* Single CPU schedule a work item and wait for flush. */
ZTEST(work_1cpu,test_1cpu_delayed_flush)499 ZTEST(work_1cpu, test_1cpu_delayed_flush)
500 {
501 	int rc;
502 	uint32_t flush_ms;
503 	uint32_t wait_ms;
504 
505 	/* Reset state and use non-blocking handler */
506 	reset_counters();
507 	k_work_init_delayable(&dwork, counter_handler);
508 
509 	/* Unscheduled completes immediately. */
510 	zassert_false(k_work_flush_delayable(&dwork, &work_sync));
511 
512 	/* Submit to the cooperative queue. */
513 	rc = k_work_schedule_for_queue(&coophi_queue, &dwork, K_MSEC(DELAY_MS));
514 	zassert_equal(rc, 1);
515 	zassert_equal(coophi_counter(), 0);
516 
517 	/* Align to tick then flush. */
518 	k_sleep(K_TICKS(1));
519 	flush_ms = k_uptime_get_32();
520 	zassert_true(k_work_flush_delayable(&dwork, &work_sync));
521 	wait_ms = last_handle_ms - flush_ms;
522 	zassert_true(wait_ms <= 1, "waited %u", wait_ms);
523 
524 	/* Verify completion. */
525 	zassert_equal(coophi_counter(), 1);
526 	rc = k_sem_take(&sync_sem, K_NO_WAIT);
527 	zassert_equal(rc, 0);
528 }
529 
530 /* Single CPU cancel before work item is unqueued should complete
531  * immediately.
532  */
ZTEST(work_1cpu,test_1cpu_queued_cancel)533 ZTEST(work_1cpu, test_1cpu_queued_cancel)
534 {
535 	int rc;
536 
537 	/* Reset state and use the blocking handler */
538 	reset_counters();
539 	k_work_init(&common_work, rel_handler);
540 
541 	/* Submit to the cooperative queue. */
542 	rc = k_work_submit_to_queue(&coophi_queue, &common_work);
543 	zassert_equal(rc, 1);
544 	zassert_equal(coophi_counter(), 0);
545 
546 	/* Cancellation should complete immediately. */
547 	zassert_equal(k_work_cancel(&common_work), 0);
548 
549 	/* Shouldn't have run. */
550 	zassert_equal(coophi_counter(), 0);
551 }
552 
553 /* Single CPU cancel before work item is unqueued should not wait. */
ZTEST(work_1cpu,test_1cpu_queued_cancel_sync)554 ZTEST(work_1cpu, test_1cpu_queued_cancel_sync)
555 {
556 	int rc;
557 
558 	/* Reset state and use the blocking handler */
559 	reset_counters();
560 	k_work_init(&common_work, rel_handler);
561 
562 	/* Cancel an unqueued work item should not affect the work
563 	 * and return false.
564 	 */
565 	zassert_false(k_work_cancel_sync(&common_work, &work_sync));
566 
567 	/* Submit to the cooperative queue. */
568 	rc = k_work_submit_to_queue(&coophi_queue, &common_work);
569 	zassert_equal(rc, 1);
570 	zassert_equal(coophi_counter(), 0);
571 
572 	/* Cancellation should complete immediately, indicating that
573 	 * work was pending.
574 	 */
575 	zassert_true(k_work_cancel_sync(&common_work, &work_sync));
576 
577 	/* Shouldn't have run. */
578 	zassert_equal(coophi_counter(), 0);
579 }
580 
581 /* Single CPU cancel before scheduled work item is queued should
582  * complete immediately.
583  */
ZTEST(work_1cpu,test_1cpu_delayed_cancel)584 ZTEST(work_1cpu, test_1cpu_delayed_cancel)
585 {
586 	int rc;
587 
588 	/* Reset state and use the blocking handler */
589 	reset_counters();
590 	k_work_init_delayable(&dwork, rel_handler);
591 
592 	/* Submit to the cooperative queue. */
593 	rc = k_work_schedule_for_queue(&coophi_queue, &dwork, K_MSEC(DELAY_MS));
594 	zassert_equal(rc, 1);
595 	zassert_equal(coophi_counter(), 0);
596 
597 	/* Cancellation should complete immediately. */
598 	zassert_equal(k_work_cancel_delayable(&dwork), 0);
599 
600 	/* Shouldn't have run. */
601 	zassert_equal(coophi_counter(), 0);
602 }
603 
604 
605 /* Single CPU cancel before scheduled work item is queued should not wait. */
ZTEST(work_1cpu,test_1cpu_delayed_cancel_sync)606 ZTEST(work_1cpu, test_1cpu_delayed_cancel_sync)
607 {
608 	int rc;
609 
610 	/* Reset state and use the blocking handler */
611 	reset_counters();
612 	k_work_init_delayable(&dwork, rel_handler);
613 
614 	/* Cancel an unqueued delayable work item should not affect the work
615 	 * and return false.
616 	 */
617 	zassert_false(k_work_cancel_delayable_sync(&dwork, &work_sync));
618 
619 	/* Submit to the cooperative queue. */
620 	rc = k_work_schedule_for_queue(&coophi_queue, &dwork, K_MSEC(DELAY_MS));
621 	zassert_equal(rc, 1);
622 	zassert_equal(coophi_counter(), 0);
623 
624 	/* Cancellation should complete immediately, indicating that
625 	 * work was pending.
626 	 */
627 	zassert_true(k_work_cancel_delayable_sync(&dwork, &work_sync));
628 
629 	/* Shouldn't have run. */
630 	zassert_equal(coophi_counter(), 0);
631 }
632 
633 /* Single CPU cancel after delayable item starts should wait. */
ZTEST(work_1cpu,test_1cpu_delayed_cancel_sync_wait)634 ZTEST(work_1cpu, test_1cpu_delayed_cancel_sync_wait)
635 {
636 	int rc;
637 
638 	/* Reset state and use the blocking handler */
639 	reset_counters();
640 	k_work_init_delayable(&dwork, rel_handler);
641 
642 	/* Submit to the cooperative queue. */
643 	rc = k_work_schedule_for_queue(&coophi_queue, &dwork, K_NO_WAIT);
644 	zassert_equal(k_work_delayable_busy_get(&dwork), K_WORK_QUEUED);
645 	zassert_equal(coophi_counter(), 0);
646 
647 	/* Get it to running, where it will block. */
648 	k_sleep(K_TICKS(1));
649 	zassert_equal(coophi_counter(), 0);
650 	zassert_equal(k_work_delayable_busy_get(&dwork), K_WORK_RUNNING);
651 
652 	/* Schedule to release, then cancel should delay. */
653 	async_release();
654 	zassert_true(k_work_cancel_delayable_sync(&dwork, &work_sync));
655 
656 	/* Verify completion. */
657 	zassert_equal(coophi_counter(), 1);
658 	rc = k_sem_take(&sync_sem, K_NO_WAIT);
659 	zassert_equal(rc, 0);
660 }
661 
662 /* Infrastructure to capture behavior of work item that's being
663  * cancelled.
664  */
665 struct test_running_cancel_timer {
666 	struct k_timer timer;
667 	struct k_work work;
668 	int submit_rc;
669 	int busy_rc;
670 };
671 
672 static struct test_running_cancel_timer test_running_cancel_ctx;
673 
test_running_cancel_cb(struct k_timer * timer)674 static void test_running_cancel_cb(struct k_timer *timer)
675 {
676 	struct test_running_cancel_timer *ctx =
677 		CONTAINER_OF(timer, struct test_running_cancel_timer, timer);
678 
679 	ctx->busy_rc = k_work_busy_get(&ctx->work);
680 	ctx->submit_rc = k_work_submit_to_queue(&coophi_queue, &ctx->work);
681 	handler_release();
682 }
683 
684 /* Single CPU test cancellation after work starts. */
ZTEST(work_1cpu,test_1cpu_running_cancel)685 ZTEST(work_1cpu, test_1cpu_running_cancel)
686 {
687 	struct test_running_cancel_timer *ctx = &test_running_cancel_ctx;
688 	struct k_work *wp = &ctx->work;
689 	static const uint32_t ms_timeout = 10;
690 	int rc;
691 
692 	/* Reset state and use the blocking handler */
693 	reset_counters();
694 	k_work_init(wp, rel_handler);
695 
696 	/* Submit to the cooperative queue. */
697 	rc = k_work_submit_to_queue(&coophi_queue, wp);
698 	zassert_equal(rc, 1);
699 	zassert_equal(coophi_counter(), 0);
700 
701 	/* Release it so it's running. */
702 	k_sleep(K_TICKS(1));
703 	zassert_equal(coophi_counter(), 0);
704 
705 	/* Schedule the async process to capture state and release work. */
706 	ctx->submit_rc = INT_MAX;
707 	ctx->busy_rc = INT_MAX;
708 	k_timer_init(&ctx->timer, test_running_cancel_cb, NULL);
709 	k_timer_start(&ctx->timer, K_MSEC(ms_timeout), K_NO_WAIT);
710 
711 	/* Cancellation should not complete. */
712 	zassert_equal(k_work_cancel(wp), K_WORK_RUNNING | K_WORK_CANCELING,
713 		      NULL);
714 
715 	/* Handler should not have run. */
716 	zassert_equal(coophi_counter(), 0);
717 
718 	/* Busy wait until timer expires. Thread context is blocked so cancelling
719 	 * of work won't be completed.
720 	 */
721 	k_busy_wait(1000 * (ms_timeout + 1));
722 
723 	zassert_equal(k_timer_status_get(&ctx->timer), 1);
724 
725 	/* Wait for cancellation to complete. */
726 	zassert_true(k_work_cancel_sync(wp, &work_sync));
727 
728 	/* Verify completion */
729 	rc = k_sem_take(&sync_sem, K_NO_WAIT);
730 	zassert_equal(rc, 0);
731 
732 	/* Handler should have detected running and canceling. */
733 	zassert_equal(ctx->busy_rc, K_WORK_RUNNING | K_WORK_CANCELING);
734 
735 	/* Attempt to submit while cancelling should have been
736 	 * rejected.
737 	 */
738 	zassert_equal(ctx->submit_rc, -EBUSY);
739 
740 	/* Post-cancellation should have no flags. */
741 	rc = k_work_busy_get(wp);
742 	zassert_equal(rc, 0, "bad: %d", rc);
743 }
744 
745 /* Single CPU test wait-for-cancellation after the work item has
746  * started running.
747  */
ZTEST(work_1cpu,test_1cpu_running_cancel_sync)748 ZTEST(work_1cpu, test_1cpu_running_cancel_sync)
749 {
750 	struct test_running_cancel_timer *ctx = &test_running_cancel_ctx;
751 	struct k_work *wp = &ctx->work;
752 	static const uint32_t ms_timeout = 10;
753 	int rc;
754 
755 	/* Reset state and use the blocking handler */
756 	reset_counters();
757 	k_work_init(wp, rel_handler);
758 
759 	/* Submit to the cooperative queue. */
760 	rc = k_work_submit_to_queue(&coophi_queue, wp);
761 	zassert_equal(rc, 1);
762 	zassert_equal(coophi_counter(), 0);
763 
764 	/* Release it so it's running. */
765 	k_sleep(K_TICKS(1));
766 	zassert_equal(coophi_counter(), 0);
767 
768 	/* Schedule the async process to capture state and release work. */
769 	ctx->submit_rc = INT_MAX;
770 	ctx->busy_rc = INT_MAX;
771 	k_timer_init(&ctx->timer, test_running_cancel_cb, NULL);
772 	k_timer_start(&ctx->timer, K_MSEC(ms_timeout), K_NO_WAIT);
773 
774 	/* Cancellation should wait. */
775 	zassert_true(k_work_cancel_sync(wp, &work_sync));
776 
777 	/* Handler should have run. */
778 	zassert_equal(coophi_counter(), 1);
779 
780 	/* Busy wait until timer expires. Thread context is blocked so cancelling
781 	 * of work won't be completed.
782 	 */
783 	k_busy_wait(1000 * (ms_timeout + 1));
784 
785 	zassert_equal(k_timer_status_get(&ctx->timer), 1);
786 
787 	/* Verify completion */
788 	rc = k_sem_take(&sync_sem, K_NO_WAIT);
789 	zassert_equal(rc, 0);
790 
791 	/* Handler should have detected running and canceling. */
792 	zassert_equal(ctx->busy_rc, K_WORK_RUNNING | K_WORK_CANCELING,
793 		      NULL);
794 
795 	/* Attempt to submit while cancelling should have been
796 	 * rejected.
797 	 */
798 	zassert_equal(ctx->submit_rc, -EBUSY);
799 
800 	/* Post-cancellation should have no flags. */
801 	rc = k_work_busy_get(wp);
802 	zassert_equal(rc, 0, "bad: %d", rc);
803 }
804 
805 /* SMP cancel after work item is started should succeed but require
806  * wait.
807  */
ZTEST(work,test_smp_running_cancel)808 ZTEST(work, test_smp_running_cancel)
809 {
810 	int rc;
811 
812 	if (!IS_ENABLED(CONFIG_SMP)) {
813 		ztest_test_skip();
814 		return;
815 	}
816 
817 	/* Reset state and use the delaying handler */
818 	reset_counters();
819 	k_work_init(&common_work, delay_handler);
820 
821 	/* Submit to the cooperative queue. */
822 	rc = k_work_submit_to_queue(&coophi_queue, &common_work);
823 	zassert_equal(rc, 1);
824 
825 	/* It should advance to running without this thread yielding. */
826 	int64_t ts0 = k_uptime_ticks();
827 	uint32_t delay;
828 
829 	do {
830 		delay = k_ticks_to_ms_floor32(k_uptime_ticks() - ts0);
831 	} while ((k_work_busy_get(&common_work) != K_WORK_RUNNING)
832 		 && (delay < DELAY_MS));
833 
834 	/* Cancellation should not succeed immediately because the
835 	 * work is running.
836 	 */
837 	rc = k_work_cancel(&common_work);
838 	zassert_equal(rc, K_WORK_RUNNING | K_WORK_CANCELING, "rc %x", rc);
839 
840 	/* Sync should wait. */
841 	zassert_equal(k_work_cancel_sync(&common_work, &work_sync), true);
842 
843 	/* Should have completed. */
844 	zassert_equal(coophi_counter(), 1);
845 	rc = k_sem_take(&sync_sem, K_NO_WAIT);
846 	zassert_equal(rc, 0);
847 }
848 
849 /* Drain with no active workers completes immediately. */
ZTEST(work,test_drain_empty)850 ZTEST(work, test_drain_empty)
851 {
852 	int rc;
853 
854 	rc = k_work_queue_drain(&coophi_queue, false);
855 	zassert_equal(rc, 0);
856 }
857 
858 struct test_drain_wait_timer {
859 	struct k_timer timer;
860 	struct k_work work;
861 	int submit_rc;
862 };
863 
864 static struct test_drain_wait_timer test_drain_wait_ctx;
865 
test_drain_wait_cb(struct k_timer * timer)866 static void test_drain_wait_cb(struct k_timer *timer)
867 {
868 	struct test_drain_wait_timer *ctx =
869 		CONTAINER_OF(timer, struct test_drain_wait_timer, timer);
870 
871 	ctx->submit_rc = k_work_submit_to_queue(&coophi_queue, &ctx->work);
872 }
873 
874 /* Single CPU submit an item and wait for it to drain. */
ZTEST(work_1cpu,test_1cpu_drain_wait)875 ZTEST(work_1cpu, test_1cpu_drain_wait)
876 {
877 	struct test_drain_wait_timer *ctx = &test_drain_wait_ctx;
878 	struct k_work *wp = &ctx->work;
879 	int rc;
880 
881 	/* Reset state, allow one re-submission, and use the delaying
882 	 * handler.
883 	 */
884 	reset_counters();
885 	atomic_set(&resubmits_left, 1);
886 	k_work_init(wp, delay_handler);
887 
888 	/* Submit to the cooperative queue. */
889 	rc = k_work_submit_to_queue(&coophi_queue, wp);
890 	zassert_equal(rc, 1);
891 	zassert_equal(coophi_counter(), 0);
892 
893 	/* Schedule the async process to capture submission state
894 	 * while draining.
895 	 */
896 	ctx->submit_rc = INT_MAX;
897 	k_timer_init(&ctx->timer, test_drain_wait_cb, NULL);
898 	k_timer_start(&ctx->timer, K_MSEC(10), K_NO_WAIT);
899 
900 	/* Wait to drain */
901 	rc = k_work_queue_drain(&coophi_queue, false);
902 	zassert_equal(rc, 1);
903 
904 	/* Wait until timer expires. */
905 	(void)k_timer_status_sync(&ctx->timer);
906 
907 	/* Verify completion */
908 	rc = k_sem_take(&sync_sem, K_NO_WAIT);
909 	zassert_equal(rc, 0);
910 
911 	/* Confirm that chained submission worked, and non-chained
912 	 * submission failed.
913 	 */
914 	zassert_equal(coophi_counter(), 2);
915 	zassert_equal(ctx->submit_rc, -EBUSY);
916 }
917 
918 /* Single CPU submit item, drain with plug, test, then unplug. */
ZTEST(work_1cpu,test_1cpu_plugged_drain)919 ZTEST(work_1cpu, test_1cpu_plugged_drain)
920 {
921 	int rc;
922 
923 	/* Reset state and use the delaying handler. */
924 	reset_counters();
925 	k_work_init(&common_work, delay_handler);
926 
927 	/* Submit to the cooperative queue */
928 	rc = k_work_submit_to_queue(&coophi_queue, &common_work);
929 	zassert_equal(rc, 1);
930 
931 	/* Wait to drain, and plug. */
932 	rc = k_work_queue_drain(&coophi_queue, true);
933 	zassert_equal(rc, 1);
934 
935 	/* Verify completion */
936 	rc = k_sem_take(&sync_sem, K_NO_WAIT);
937 	zassert_equal(rc, 0);
938 	zassert_equal(coophi_counter(), 1);
939 
940 	/* Queue should be plugged */
941 	zassert_equal(coophi_queue.flags,
942 		      K_WORK_QUEUE_STARTED
943 		      | K_WORK_QUEUE_PLUGGED
944 		      | K_WORK_QUEUE_NO_YIELD,
945 		      NULL);
946 
947 	/* Switch to the non-blocking handler. */
948 	k_work_init(&common_work, counter_handler);
949 
950 	/* Resubmission should fail because queue is plugged */
951 	rc = k_work_submit_to_queue(&coophi_queue, &common_work);
952 	zassert_equal(rc, -EBUSY);
953 
954 	/* Unplug the queue */
955 	rc = k_work_queue_unplug(&coophi_queue);
956 	zassert_equal(rc, 0);
957 
958 	/* Unplug the unplugged queue should not affect the queue */
959 	rc = k_work_queue_unplug(&coophi_queue);
960 	zassert_equal(rc, -EALREADY);
961 	zassert_equal(coophi_queue.flags,
962 		      K_WORK_QUEUE_STARTED | K_WORK_QUEUE_NO_YIELD,
963 		      NULL);
964 
965 	/* Resubmission should succeed and complete */
966 	rc = k_work_submit_to_queue(&coophi_queue, &common_work);
967 	zassert_equal(rc, 1);
968 
969 	/* Flush the sync state and verify completion */
970 	rc = k_sem_take(&sync_sem, K_FOREVER);
971 	zassert_equal(rc, 0);
972 	zassert_equal(coophi_counter(), 2);
973 }
974 
975 /* Single CPU test delayed submission */
ZTEST(work_1cpu,test_1cpu_basic_schedule)976 ZTEST(work_1cpu, test_1cpu_basic_schedule)
977 {
978 	int rc;
979 	uint32_t sched_ms;
980 	uint32_t max_ms = k_ticks_to_ms_ceil32(1U
981 				+ k_ms_to_ticks_ceil32(DELAY_MS));
982 	uint32_t elapsed_ms;
983 	struct k_work *wp = &dwork.work; /* whitebox testing */
984 
985 	/* Reset state and use non-blocking handler */
986 	reset_counters();
987 	k_work_init_delayable(&dwork, counter_handler);
988 
989 	/* Verify that work is idle and marked delayable. */
990 	zassert_equal(k_work_busy_get(wp), 0);
991 	zassert_equal(wp->flags & K_WORK_DELAYABLE, K_WORK_DELAYABLE,
992 		       NULL);
993 
994 	/* Align to tick, then schedule after normal delay. */
995 	k_sleep(K_TICKS(1));
996 	sched_ms = k_uptime_get_32();
997 	rc = k_work_schedule_for_queue(&coophi_queue, &dwork, K_MSEC(DELAY_MS));
998 	zassert_equal(rc, 1);
999 	rc = k_work_busy_get(wp);
1000 	zassert_equal(rc, K_WORK_DELAYED);
1001 	zassert_equal(k_work_delayable_busy_get(&dwork), rc);
1002 	zassert_equal(k_work_delayable_is_pending(&dwork), true);
1003 
1004 	/* Scheduling again does nothing. */
1005 	rc = k_work_schedule_for_queue(&coophi_queue, &dwork, K_NO_WAIT);
1006 	zassert_equal(rc, 0);
1007 
1008 	/* Wait for completion */
1009 	rc = k_sem_take(&sync_sem, K_FOREVER);
1010 	zassert_equal(rc, 0);
1011 
1012 	/* Make sure it ran and is now idle */
1013 	zassert_equal(coophi_counter(), 1);
1014 	zassert_equal(k_work_busy_get(wp), 0);
1015 
1016 	/* Check that the delay is within the expected range. */
1017 	elapsed_ms = last_handle_ms - sched_ms;
1018 	zassert_true(elapsed_ms >= DELAY_MS,
1019 		     "short %u < %u\n", elapsed_ms, DELAY_MS);
1020 	zassert_true(elapsed_ms <= max_ms,
1021 		     "long %u > %u\n", elapsed_ms, max_ms);
1022 }
1023 
1024 struct state_1cpu_basic_schedule_running {
1025 	struct k_work_delayable dwork;
1026 	int schedule_res;
1027 };
1028 
handle_1cpu_basic_schedule_running(struct k_work * work)1029 static void handle_1cpu_basic_schedule_running(struct k_work *work)
1030 {
1031 	struct k_work_delayable *one_dwork = k_work_delayable_from_work(work);
1032 	struct state_1cpu_basic_schedule_running *state
1033 		= CONTAINER_OF(one_dwork, struct state_1cpu_basic_schedule_running,
1034 			       dwork);
1035 
1036 	/* Co-opt the resubmits so we can test the schedule API
1037 	 * explicitly.
1038 	 */
1039 	if (atomic_dec(&resubmits_left) > 0) {
1040 		/* Schedule again on current queue */
1041 		state->schedule_res = k_work_schedule_for_queue(NULL, one_dwork,
1042 								K_MSEC(DELAY_MS));
1043 	} else {
1044 		/* Flag that it didn't schedule */
1045 		state->schedule_res = -EALREADY;
1046 	}
1047 
1048 	counter_handler(work);
1049 }
1050 
1051 /* Single CPU test that schedules when running */
ZTEST(work_1cpu,test_1cpu_basic_schedule_running)1052 ZTEST(work_1cpu, test_1cpu_basic_schedule_running)
1053 {
1054 	int rc;
1055 	static struct state_1cpu_basic_schedule_running state = {
1056 		.schedule_res = -1,
1057 	};
1058 
1059 	/* Reset state and set for one resubmit.  Use a test-specific
1060 	 * handler.
1061 	 */
1062 	reset_counters();
1063 	atomic_set(&resubmits_left, 1);
1064 	k_work_init_delayable(&state.dwork, handle_1cpu_basic_schedule_running);
1065 
1066 	zassert_equal(state.schedule_res, -1);
1067 
1068 	rc = k_work_schedule_for_queue(&coophi_queue, &state.dwork,
1069 				       K_MSEC(DELAY_MS));
1070 	zassert_equal(rc, 1);
1071 
1072 	zassert_equal(coop_counter(&coophi_queue), 0);
1073 
1074 	/* Wait for completion */
1075 	rc = k_sem_take(&sync_sem, K_FOREVER);
1076 	zassert_equal(rc, 0);
1077 	zassert_equal(state.schedule_res, 1);
1078 	zassert_equal(coop_counter(&coophi_queue), 1);
1079 
1080 	/* Wait for completion */
1081 	rc = k_sem_take(&sync_sem, K_FOREVER);
1082 	zassert_equal(rc, 0);
1083 	zassert_equal(state.schedule_res, -EALREADY);
1084 	zassert_equal(coop_counter(&coophi_queue), 2);
1085 }
1086 
1087 /* Single CPU test schedule without delay is queued immediately. */
ZTEST(work_1cpu,test_1cpu_immed_schedule)1088 ZTEST(work_1cpu, test_1cpu_immed_schedule)
1089 {
1090 	int rc;
1091 	struct k_work *wp = &dwork.work; /* whitebox testing */
1092 
1093 	/* Reset state and use the non-blocking handler */
1094 	reset_counters();
1095 	k_work_init_delayable(&dwork, counter_handler);
1096 	zassert_equal(k_work_busy_get(wp), 0);
1097 
1098 	/* Submit to the cooperative queue */
1099 	rc = k_work_schedule_for_queue(&coophi_queue, &dwork, K_NO_WAIT);
1100 	zassert_equal(rc, 1);
1101 	rc = k_work_busy_get(wp);
1102 	zassert_equal(rc, K_WORK_QUEUED);
1103 	zassert_equal(k_work_delayable_busy_get(&dwork), rc);
1104 	zassert_equal(k_work_delayable_is_pending(&dwork), true);
1105 
1106 	/* Scheduling again does nothing. */
1107 	rc = k_work_schedule_for_queue(&coophi_queue, &dwork, K_NO_WAIT);
1108 	zassert_equal(rc, 0);
1109 
1110 	/* Shouldn't have been started since test thread is
1111 	 * cooperative.
1112 	 */
1113 	zassert_equal(coophi_counter(), 0);
1114 
1115 	/* Let it run, then check it didn't finish. */
1116 	k_sleep(K_TICKS(1));
1117 	zassert_equal(coophi_counter(), 1);
1118 	zassert_equal(k_work_busy_get(wp), 0);
1119 
1120 	/* Flush the sync state from completion */
1121 	rc = k_sem_take(&sync_sem, K_NO_WAIT);
1122 	zassert_equal(rc, 0);
1123 }
1124 
1125 /* Single CPU test that delayed work can be rescheduled. */
ZTEST(work_1cpu,test_1cpu_basic_reschedule)1126 ZTEST(work_1cpu, test_1cpu_basic_reschedule)
1127 {
1128 	int rc;
1129 	uint32_t sched_ms;
1130 	uint32_t max_ms = k_ticks_to_ms_ceil32(1U
1131 				+ k_ms_to_ticks_ceil32(DELAY_MS));
1132 	uint32_t elapsed_ms;
1133 	struct k_work *wp = &dwork.work; /* whitebox testing */
1134 
1135 	/* Reset state and use non-blocking handler */
1136 	reset_counters();
1137 	k_work_init_delayable(&dwork, counter_handler);
1138 
1139 	/* Verify that work is idle and marked delayable. */
1140 	zassert_equal(k_work_busy_get(wp), 0);
1141 	zassert_equal(wp->flags & K_WORK_DELAYABLE, K_WORK_DELAYABLE,
1142 		       NULL);
1143 
1144 	/* Schedule to the preempt queue after twice the standard
1145 	 * delay.
1146 	 */
1147 	rc = k_work_reschedule_for_queue(&preempt_queue, &dwork,
1148 					  K_MSEC(2U * DELAY_MS));
1149 	zassert_equal(rc, 1);
1150 	zassert_equal(k_work_busy_get(wp), K_WORK_DELAYED);
1151 
1152 	/* Align to tick then reschedule on the cooperative queue for
1153 	 * the standard delay.
1154 	 */
1155 	k_sleep(K_TICKS(1));
1156 	sched_ms = k_uptime_get_32();
1157 	rc = k_work_reschedule_for_queue(&coophi_queue, &dwork,
1158 					  K_MSEC(DELAY_MS));
1159 	zassert_equal(rc, 1);
1160 	zassert_equal(k_work_busy_get(wp), K_WORK_DELAYED);
1161 
1162 	/* Wait for completion */
1163 	rc = k_sem_take(&sync_sem, K_FOREVER);
1164 	zassert_equal(rc, 0);
1165 
1166 	/* Make sure it ran on the coop queue and is now idle */
1167 	zassert_equal(coophi_counter(), 1);
1168 	zassert_equal(k_work_busy_get(wp), 0);
1169 
1170 	/* Check that the delay is within the expected range. */
1171 	elapsed_ms = last_handle_ms - sched_ms;
1172 	zassert_true(elapsed_ms >= DELAY_MS,
1173 		     "short %u < %u\n", elapsed_ms, DELAY_MS);
1174 	zassert_true(elapsed_ms <= max_ms,
1175 		     "long %u > %u\n", elapsed_ms, max_ms);
1176 }
1177 
1178 /* Single CPU test that delayed work can be immediately queued by
1179  * reschedule API.
1180  */
ZTEST(work_1cpu,test_1cpu_immed_reschedule)1181 ZTEST(work_1cpu, test_1cpu_immed_reschedule)
1182 {
1183 	int rc;
1184 	struct k_work *wp = &dwork.work; /* whitebox testing */
1185 
1186 	/* Reset state and use the delay handler */
1187 	reset_counters();
1188 	k_work_init_delayable(&dwork, delay_handler);
1189 	zassert_equal(k_work_busy_get(wp), 0);
1190 
1191 	/* Schedule immediately to the cooperative queue */
1192 	rc = k_work_reschedule_for_queue(&coophi_queue, &dwork, K_NO_WAIT);
1193 	zassert_equal(rc, 1);
1194 	zassert_equal(k_work_busy_get(wp), K_WORK_QUEUED);
1195 
1196 	/* Shouldn't have been started since test thread is
1197 	 * cooperative.
1198 	 */
1199 	zassert_equal(coophi_counter(), 0);
1200 
1201 	/* Let it run, then check it didn't finish. */
1202 	k_sleep(K_TICKS(1));
1203 	zassert_equal(coophi_counter(), 0);
1204 	zassert_equal(k_work_busy_get(wp), K_WORK_RUNNING);
1205 
1206 	/* Schedule immediately to the preemptive queue (will divert
1207 	 * to coop since running).
1208 	 */
1209 	rc = k_work_reschedule_for_queue(&preempt_queue, &dwork, K_NO_WAIT);
1210 	zassert_equal(rc, 2);
1211 	zassert_equal(k_work_busy_get(wp), K_WORK_QUEUED | K_WORK_RUNNING,
1212 		      NULL);
1213 
1214 	/* Schedule after 3x the delay to the preemptive queue
1215 	 * (will not divert since previous submissions will have
1216 	 * completed).
1217 	 */
1218 	rc = k_work_reschedule_for_queue(&preempt_queue, &dwork,
1219 					  K_MSEC(3 * DELAY_MS));
1220 	zassert_equal(rc, 1);
1221 	zassert_equal(k_work_busy_get(wp),
1222 		      K_WORK_DELAYED | K_WORK_QUEUED | K_WORK_RUNNING,
1223 		      NULL);
1224 
1225 	/* Wait for the original no-wait submission (total 1 delay)) */
1226 	rc = k_sem_take(&sync_sem, K_FOREVER);
1227 	zassert_equal(rc, 0);
1228 
1229 	/* Check that coop ran once, and work is still delayed and
1230 	 * also running.
1231 	 */
1232 	zassert_equal(coophi_counter(), 1);
1233 	zassert_equal(k_work_busy_get(wp), K_WORK_DELAYED | K_WORK_RUNNING,
1234 		      NULL);
1235 
1236 	/* Wait for the queued no-wait submission (total 2 delay) */
1237 	rc = k_sem_take(&sync_sem, K_FOREVER);
1238 	zassert_equal(rc, 0);
1239 
1240 	/* Check that got diverted to coop and ran, and work is still
1241 	 * delayed.
1242 	 */
1243 	zassert_equal(coophi_counter(), 2);
1244 	zassert_equal(preempt_counter(), 0);
1245 	zassert_equal(k_work_busy_get(wp), K_WORK_DELAYED,
1246 		      NULL);
1247 
1248 	/* Wait for the delayed submission (total 3 delay) */
1249 	rc = k_sem_take(&sync_sem, K_FOREVER);
1250 	zassert_equal(rc, 0);
1251 
1252 	/* Check that ran on preempt.  In fact we're here because the
1253 	 * test thread is higher priority, so the work will still be
1254 	 * marked running.
1255 	 */
1256 	zassert_equal(coophi_counter(), 2);
1257 	zassert_equal(preempt_counter(), 1);
1258 	zassert_equal(k_work_busy_get(wp), K_WORK_RUNNING,
1259 		      NULL);
1260 
1261 	/* Wait for preempt to drain */
1262 	rc = k_work_queue_drain(&preempt_queue, false);
1263 	zassert_equal(rc, 1);
1264 }
1265 
1266 /* Test no-yield behavior, returns true iff work queue priority is
1267  * higher than test thread priority
1268  */
try_queue_no_yield(struct k_work_q * wq)1269 static bool try_queue_no_yield(struct k_work_q *wq)
1270 {
1271 	int rc;
1272 	bool is_high = (k_thread_priority_get(k_work_queue_thread_get(wq))
1273 			< k_thread_priority_get(k_current_get()));
1274 
1275 	TC_PRINT("Testing no-yield on %s-priority queue\n",
1276 		 is_high ? "high" : "low");
1277 	reset_counters();
1278 
1279 	/* Submit two work items directly to the cooperative queue. */
1280 
1281 	k_work_init(&common_work, counter_handler);
1282 	k_work_init_delayable(&dwork, counter_handler);
1283 
1284 	rc = k_work_submit_to_queue(wq, &common_work);
1285 	zassert_equal(rc, 1);
1286 	rc = k_work_schedule_for_queue(wq, &dwork, K_NO_WAIT);
1287 	zassert_equal(rc, 1);
1288 
1289 	/* Wait for completion */
1290 	zassert_equal(k_work_is_pending(&common_work), true);
1291 	zassert_equal(k_work_delayable_is_pending(&dwork), true);
1292 	rc = k_sem_take(&sync_sem, K_FOREVER);
1293 	zassert_equal(rc, 0);
1294 
1295 	/* Because there was no yield both should have run, and
1296 	 * another yield won't cause anything to happen.
1297 	 */
1298 	zassert_equal(coop_counter(wq), 2);
1299 	zassert_equal(k_work_is_pending(&common_work), false);
1300 	zassert_equal(k_work_delayable_is_pending(&dwork), false);
1301 
1302 	/* The first give unblocked this thread; we need to consume
1303 	 * the give from the second work task.
1304 	 */
1305 	zassert_equal(k_sem_take(&sync_sem, K_NO_WAIT), 0);
1306 
1307 	zassert_equal(k_sem_take(&sync_sem, K_NO_WAIT), -EBUSY);
1308 
1309 	return is_high;
1310 }
1311 
1312 /* Verify that no-yield policy works */
ZTEST(work_1cpu,test_1cpu_queue_no_yield)1313 ZTEST(work_1cpu, test_1cpu_queue_no_yield)
1314 {
1315 	/* This test needs two slots available in the sem! */
1316 	k_sem_init(&sync_sem, 0, 2);
1317 	zassert_equal(try_queue_no_yield(&coophi_queue), true);
1318 	zassert_equal(try_queue_no_yield(&cooplo_queue), false);
1319 	k_sem_init(&sync_sem, 0, 1);
1320 }
1321 
1322 /* Basic functionality with the system work queue. */
ZTEST(work_1cpu,test_1cpu_system_queue)1323 ZTEST(work_1cpu, test_1cpu_system_queue)
1324 {
1325 	int rc;
1326 
1327 	/* Reset state and use the non-blocking handler */
1328 	reset_counters();
1329 	k_work_init(&common_work, counter_handler);
1330 	zassert_equal(k_work_busy_get(&common_work), 0);
1331 
1332 	/* Submit to the system queue */
1333 	rc = k_work_submit(&common_work);
1334 	zassert_equal(rc, 1);
1335 	zassert_equal(k_work_busy_get(&common_work), K_WORK_QUEUED);
1336 
1337 	/* Shouldn't have been started since test thread is
1338 	 * cooperative.
1339 	 */
1340 	zassert_equal(system_counter(), 0);
1341 
1342 	/* Let it run, then check it didn't finish. */
1343 	k_sleep(K_TICKS(1));
1344 	zassert_equal(system_counter(), 1);
1345 	zassert_equal(k_work_busy_get(&common_work), 0);
1346 
1347 	/* Flush the sync state from completion */
1348 	rc = k_sem_take(&sync_sem, K_NO_WAIT);
1349 	zassert_equal(rc, 0);
1350 }
1351 
ZTEST(work_1cpu,test_1cpu_system_schedule)1352 ZTEST(work_1cpu, test_1cpu_system_schedule)
1353 {
1354 	int rc;
1355 	uint32_t sched_ms;
1356 	uint32_t max_ms = k_ticks_to_ms_ceil32(1U
1357 				+ k_ms_to_ticks_ceil32(DELAY_MS));
1358 	uint32_t elapsed_ms;
1359 
1360 	/* Reset state and use non-blocking handler */
1361 	reset_counters();
1362 	k_work_init_delayable(&dwork, counter_handler);
1363 
1364 	/* Verify that work is idle and marked delayable. */
1365 	zassert_equal(k_work_delayable_busy_get(&dwork), 0);
1366 	zassert_equal(dwork.work.flags & K_WORK_DELAYABLE, K_WORK_DELAYABLE,
1367 		       NULL);
1368 
1369 	/* Align to tick, then schedule after normal delay. */
1370 	k_sleep(K_TICKS(1));
1371 	sched_ms = k_uptime_get_32();
1372 	rc = k_work_schedule(&dwork, K_MSEC(DELAY_MS));
1373 	zassert_equal(rc, 1);
1374 	zassert_equal(k_work_delayable_busy_get(&dwork), K_WORK_DELAYED);
1375 
1376 	/* Scheduling again does nothing. */
1377 	rc = k_work_schedule(&dwork, K_NO_WAIT);
1378 	zassert_equal(rc, 0);
1379 
1380 	/* Wait for completion */
1381 	rc = k_sem_take(&sync_sem, K_FOREVER);
1382 	zassert_equal(rc, 0);
1383 
1384 	/* Make sure it ran and is now idle */
1385 	zassert_equal(system_counter(), 1);
1386 	zassert_equal(k_work_delayable_busy_get(&dwork), 0);
1387 
1388 	/* Check that the delay is within the expected range. */
1389 	elapsed_ms = last_handle_ms - sched_ms;
1390 	zassert_true(elapsed_ms >= DELAY_MS,
1391 		     "short %u < %u\n", elapsed_ms, DELAY_MS);
1392 	zassert_true(elapsed_ms <= max_ms,
1393 		     "long %u > %u\n", elapsed_ms, max_ms);
1394 }
1395 
ZTEST(work_1cpu,test_1cpu_system_reschedule)1396 ZTEST(work_1cpu, test_1cpu_system_reschedule)
1397 {
1398 	int rc;
1399 	uint32_t sched_ms;
1400 	uint32_t max_ms = k_ticks_to_ms_ceil32(1U
1401 				+ k_ms_to_ticks_ceil32(DELAY_MS));
1402 	uint32_t elapsed_ms;
1403 
1404 	/* Reset state and use non-blocking handler */
1405 	reset_counters();
1406 	k_work_init_delayable(&dwork, counter_handler);
1407 
1408 	/* Verify that work is idle and marked delayable. */
1409 	zassert_equal(k_work_delayable_busy_get(&dwork), 0);
1410 	zassert_equal(dwork.work.flags & K_WORK_DELAYABLE, K_WORK_DELAYABLE,
1411 		       NULL);
1412 
1413 	/* Schedule to the preempt queue after twice the standard
1414 	 * delay.
1415 	 */
1416 	rc = k_work_reschedule(&dwork, K_MSEC(2U * DELAY_MS));
1417 	zassert_equal(rc, 1);
1418 	zassert_equal(k_work_delayable_busy_get(&dwork), K_WORK_DELAYED);
1419 
1420 	/* Align to tick then reschedule on the system queue for
1421 	 * the standard delay.
1422 	 */
1423 	k_sleep(K_TICKS(1));
1424 	sched_ms = k_uptime_get_32();
1425 	rc = k_work_reschedule(&dwork, K_MSEC(DELAY_MS));
1426 	zassert_equal(rc, 1);
1427 	zassert_equal(k_work_delayable_busy_get(&dwork), K_WORK_DELAYED);
1428 
1429 	/* Wait for completion */
1430 	rc = k_sem_take(&sync_sem, K_FOREVER);
1431 	zassert_equal(rc, 0);
1432 
1433 	/* Make sure it ran on the system queue and is now idle */
1434 	zassert_equal(system_counter(), 1);
1435 	zassert_equal(k_work_delayable_busy_get(&dwork), 0);
1436 
1437 	/* Check that the delay is within the expected range. */
1438 	elapsed_ms = last_handle_ms - sched_ms;
1439 	zassert_true(elapsed_ms >= DELAY_MS,
1440 		     "short %u < %u\n", elapsed_ms, DELAY_MS);
1441 	zassert_true(elapsed_ms <= max_ms,
1442 		     "long %u > %u\n", elapsed_ms, max_ms);
1443 }
1444 
ZTEST(work,test_nop)1445 ZTEST(work, test_nop)
1446 {
1447 	ztest_test_skip();
1448 }
1449 
workq_setup(void)1450 void *workq_setup(void)
1451 {
1452 	main_thread = k_current_get();
1453 	k_sem_init(&sync_sem, 0, 1);
1454 	k_sem_init(&rel_sem, 0, 1);
1455 
1456 	test_work_init();
1457 	test_delayable_init();
1458 
1459 	if (run_flag) {
1460 		test_queue_start();
1461 		run_flag = false;
1462 	}
1463 
1464 	return NULL;
1465 }
1466 
1467 ZTEST_SUITE(work_1cpu, NULL, workq_setup, ztest_simple_1cpu_before, ztest_simple_1cpu_after, NULL);
1468 ZTEST_SUITE(work, NULL, workq_setup, NULL, NULL, NULL);
1469