1 /*
2  * Copyright (c) 2020 Nordic Semiconductor ASA
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 /**
8  * @file
9  *
10  * Second generation work queue implementation
11  */
12 
13 #include <zephyr/kernel.h>
14 #include <zephyr/kernel_structs.h>
15 #include <wait_q.h>
16 #include <zephyr/spinlock.h>
17 #include <errno.h>
18 #include <ksched.h>
19 #include <zephyr/sys/printk.h>
20 
flag_clear(uint32_t * flagp,uint32_t bit)21 static inline void flag_clear(uint32_t *flagp,
22 			      uint32_t bit)
23 {
24 	*flagp &= ~BIT(bit);
25 }
26 
flag_set(uint32_t * flagp,uint32_t bit)27 static inline void flag_set(uint32_t *flagp,
28 			    uint32_t bit)
29 {
30 	*flagp |= BIT(bit);
31 }
32 
flag_test(const uint32_t * flagp,uint32_t bit)33 static inline bool flag_test(const uint32_t *flagp,
34 			     uint32_t bit)
35 {
36 	return (*flagp & BIT(bit)) != 0U;
37 }
38 
flag_test_and_clear(uint32_t * flagp,int bit)39 static inline bool flag_test_and_clear(uint32_t *flagp,
40 				       int bit)
41 {
42 	bool ret = flag_test(flagp, bit);
43 
44 	flag_clear(flagp, bit);
45 
46 	return ret;
47 }
48 
flags_set(uint32_t * flagp,uint32_t flags)49 static inline void flags_set(uint32_t *flagp,
50 			     uint32_t flags)
51 {
52 	*flagp = flags;
53 }
54 
flags_get(const uint32_t * flagp)55 static inline uint32_t flags_get(const uint32_t *flagp)
56 {
57 	return *flagp;
58 }
59 
60 /* Lock to protect the internal state of all work items, work queues,
61  * and pending_cancels.
62  */
63 static struct k_spinlock lock;
64 
65 /* Invoked by work thread */
handle_flush(struct k_work * work)66 static void handle_flush(struct k_work *work) { }
67 
init_flusher(struct z_work_flusher * flusher)68 static inline void init_flusher(struct z_work_flusher *flusher)
69 {
70 	struct k_work *work = &flusher->work;
71 	k_sem_init(&flusher->sem, 0, 1);
72 	k_work_init(&flusher->work, handle_flush);
73 	flag_set(&work->flags, K_WORK_FLUSHING_BIT);
74 }
75 
76 /* List of pending cancellations. */
77 static sys_slist_t pending_cancels;
78 
79 /* Initialize a canceler record and add it to the list of pending
80  * cancels.
81  *
82  * Invoked with work lock held.
83  *
84  * @param canceler the structure used to notify a waiting process.
85  * @param work the work structure that is to be canceled
86  */
init_work_cancel(struct z_work_canceller * canceler,struct k_work * work)87 static inline void init_work_cancel(struct z_work_canceller *canceler,
88 				    struct k_work *work)
89 {
90 	k_sem_init(&canceler->sem, 0, 1);
91 	canceler->work = work;
92 	sys_slist_append(&pending_cancels, &canceler->node);
93 }
94 
95 /* Complete flushing of a work item.
96  *
97  * Invoked with work lock held.
98  *
99  * Invoked from a work queue thread.
100  *
101  * Reschedules.
102  *
103  * @param work the work structure that has completed flushing.
104  */
finalize_flush_locked(struct k_work * work)105 static void finalize_flush_locked(struct k_work *work)
106 {
107 	struct z_work_flusher *flusher
108 		= CONTAINER_OF(work, struct z_work_flusher, work);
109 
110 	flag_clear(&work->flags, K_WORK_FLUSHING_BIT);
111 
112 	k_sem_give(&flusher->sem);
113 };
114 
115 /* Complete cancellation of a work item and unlock held lock.
116  *
117  * Invoked with work lock held.
118  *
119  * Invoked from a work queue thread.
120  *
121  * Reschedules.
122  *
123  * @param work the work structure that has completed cancellation
124  */
finalize_cancel_locked(struct k_work * work)125 static void finalize_cancel_locked(struct k_work *work)
126 {
127 	struct z_work_canceller *wc, *tmp;
128 	sys_snode_t *prev = NULL;
129 
130 	/* Clear this first, so released high-priority threads don't
131 	 * see it when doing things.
132 	 */
133 	flag_clear(&work->flags, K_WORK_CANCELING_BIT);
134 
135 	/* Search for and remove the matching container, and release
136 	 * what's waiting for the completion.  The same work item can
137 	 * appear multiple times in the list if multiple threads
138 	 * attempt to cancel it.
139 	 */
140 	SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&pending_cancels, wc, tmp, node) {
141 		if (wc->work == work) {
142 			sys_slist_remove(&pending_cancels, prev, &wc->node);
143 			k_sem_give(&wc->sem);
144 			break;
145 		}
146 		prev = &wc->node;
147 	}
148 }
149 
k_work_init(struct k_work * work,k_work_handler_t handler)150 void k_work_init(struct k_work *work,
151 		  k_work_handler_t handler)
152 {
153 	__ASSERT_NO_MSG(work != NULL);
154 	__ASSERT_NO_MSG(handler != NULL);
155 
156 	*work = (struct k_work)Z_WORK_INITIALIZER(handler);
157 
158 	SYS_PORT_TRACING_OBJ_INIT(k_work, work);
159 }
160 
work_busy_get_locked(const struct k_work * work)161 static inline int work_busy_get_locked(const struct k_work *work)
162 {
163 	return flags_get(&work->flags) & K_WORK_MASK;
164 }
165 
k_work_busy_get(const struct k_work * work)166 int k_work_busy_get(const struct k_work *work)
167 {
168 	k_spinlock_key_t key = k_spin_lock(&lock);
169 	int ret = work_busy_get_locked(work);
170 
171 	k_spin_unlock(&lock, key);
172 
173 	return ret;
174 }
175 
176 /* Add a flusher work item to the queue.
177  *
178  * Invoked with work lock held.
179  *
180  * Caller must notify queue of pending work.
181  *
182  * @param queue queue on which a work item may appear.
183  * @param work the work item that is either queued or running on @p
184  * queue
185  * @param flusher an uninitialized/unused flusher object
186  */
queue_flusher_locked(struct k_work_q * queue,struct k_work * work,struct z_work_flusher * flusher)187 static void queue_flusher_locked(struct k_work_q *queue,
188 				 struct k_work *work,
189 				 struct z_work_flusher *flusher)
190 {
191 	bool in_list = false;
192 	struct k_work *wn;
193 
194 	/* Determine whether the work item is still queued. */
195 	SYS_SLIST_FOR_EACH_CONTAINER(&queue->pending, wn, node) {
196 		if (wn == work) {
197 			in_list = true;
198 			break;
199 		}
200 	}
201 
202 	init_flusher(flusher);
203 	if (in_list) {
204 		sys_slist_insert(&queue->pending, &work->node,
205 				 &flusher->work.node);
206 	} else {
207 		sys_slist_prepend(&queue->pending, &flusher->work.node);
208 	}
209 }
210 
211 /* Try to remove a work item from the given queue.
212  *
213  * Invoked with work lock held.
214  *
215  * @param queue the queue from which the work should be removed
216  * @param work work that may be on the queue
217  */
queue_remove_locked(struct k_work_q * queue,struct k_work * work)218 static inline void queue_remove_locked(struct k_work_q *queue,
219 				       struct k_work *work)
220 {
221 	if (flag_test_and_clear(&work->flags, K_WORK_QUEUED_BIT)) {
222 		(void)sys_slist_find_and_remove(&queue->pending, &work->node);
223 	}
224 }
225 
226 /* Potentially notify a queue that it needs to look for pending work.
227  *
228  * This may make the work queue thread ready, but as the lock is held it
229  * will not be a reschedule point.  Callers should yield after the lock is
230  * released where appropriate (generally if this returns true).
231  *
232  * @param queue to be notified.  If this is null no notification is required.
233  *
234  * @return true if and only if the queue was notified and woken, i.e. a
235  * reschedule is pending.
236  */
notify_queue_locked(struct k_work_q * queue)237 static inline bool notify_queue_locked(struct k_work_q *queue)
238 {
239 	bool rv = false;
240 
241 	if (queue != NULL) {
242 		rv = z_sched_wake(&queue->notifyq, 0, NULL);
243 	}
244 
245 	return rv;
246 }
247 
248 /* Submit an work item to a queue if queue state allows new work.
249  *
250  * Submission is rejected if no queue is provided, or if the queue is
251  * draining and the work isn't being submitted from the queue's
252  * thread (chained submission).
253  *
254  * Invoked with work lock held.
255  * Conditionally notifies queue.
256  *
257  * @param queue the queue to which work should be submitted.  This may
258  * be null, in which case the submission will fail.
259  *
260  * @param work to be submitted
261  *
262  * @retval 1 if successfully queued
263  * @retval -EINVAL if no queue is provided
264  * @retval -ENODEV if the queue is not started
265  * @retval -EBUSY if the submission was rejected (draining, plugged)
266  */
queue_submit_locked(struct k_work_q * queue,struct k_work * work)267 static inline int queue_submit_locked(struct k_work_q *queue,
268 				      struct k_work *work)
269 {
270 	if (queue == NULL) {
271 		return -EINVAL;
272 	}
273 
274 	int ret;
275 	bool chained = (_current == &queue->thread) && !k_is_in_isr();
276 	bool draining = flag_test(&queue->flags, K_WORK_QUEUE_DRAIN_BIT);
277 	bool plugged = flag_test(&queue->flags, K_WORK_QUEUE_PLUGGED_BIT);
278 
279 	/* Test for acceptability, in priority order:
280 	 *
281 	 * * -ENODEV if the queue isn't running.
282 	 * * -EBUSY if draining and not chained
283 	 * * -EBUSY if plugged and not draining
284 	 * * otherwise OK
285 	 */
286 	if (!flag_test(&queue->flags, K_WORK_QUEUE_STARTED_BIT)) {
287 		ret = -ENODEV;
288 	} else if (draining && !chained) {
289 		ret = -EBUSY;
290 	} else if (plugged && !draining) {
291 		ret = -EBUSY;
292 	} else {
293 		sys_slist_append(&queue->pending, &work->node);
294 		ret = 1;
295 		(void)notify_queue_locked(queue);
296 	}
297 
298 	return ret;
299 }
300 
301 /* Attempt to submit work to a queue.
302  *
303  * The submission can fail if:
304  * * the work is cancelling,
305  * * no candidate queue can be identified;
306  * * the candidate queue rejects the submission.
307  *
308  * Invoked with work lock held.
309  * Conditionally notifies queue.
310  *
311  * @param work the work structure to be submitted
312 
313  * @param queuep pointer to a queue reference.  On input this should
314  * dereference to the proposed queue (which may be null); after completion it
315  * will be null if the work was not submitted or if submitted will reference
316  * the queue it was submitted to.  That may or may not be the queue provided
317  * on input.
318  *
319  * @retval 0 if work was already submitted to a queue
320  * @retval 1 if work was not submitted and has been queued to @p queue
321  * @retval 2 if work was running and has been queued to the queue that was
322  * running it
323  * @retval -EBUSY if canceling or submission was rejected by queue
324  * @retval -EINVAL if no queue is provided
325  * @retval -ENODEV if the queue is not started
326  */
submit_to_queue_locked(struct k_work * work,struct k_work_q ** queuep)327 static int submit_to_queue_locked(struct k_work *work,
328 				  struct k_work_q **queuep)
329 {
330 	int ret = 0;
331 
332 	if (flag_test(&work->flags, K_WORK_CANCELING_BIT)) {
333 		/* Disallowed */
334 		ret = -EBUSY;
335 	} else if (!flag_test(&work->flags, K_WORK_QUEUED_BIT)) {
336 		/* Not currently queued */
337 		ret = 1;
338 
339 		/* If no queue specified resubmit to last queue.
340 		 */
341 		if (*queuep == NULL) {
342 			*queuep = work->queue;
343 		}
344 
345 		/* If the work is currently running we have to use the
346 		 * queue it's running on to prevent handler
347 		 * re-entrancy.
348 		 */
349 		if (flag_test(&work->flags, K_WORK_RUNNING_BIT)) {
350 			__ASSERT_NO_MSG(work->queue != NULL);
351 			*queuep = work->queue;
352 			ret = 2;
353 		}
354 
355 		int rc = queue_submit_locked(*queuep, work);
356 
357 		if (rc < 0) {
358 			ret = rc;
359 		} else {
360 			flag_set(&work->flags, K_WORK_QUEUED_BIT);
361 			work->queue = *queuep;
362 		}
363 	} else {
364 		/* Already queued, do nothing. */
365 	}
366 
367 	if (ret <= 0) {
368 		*queuep = NULL;
369 	}
370 
371 	return ret;
372 }
373 
374 /* Submit work to a queue but do not yield the current thread.
375  *
376  * Intended for internal use.
377  *
378  * See also submit_to_queue_locked().
379  *
380  * @param queuep pointer to a queue reference.
381  * @param work the work structure to be submitted
382  *
383  * @retval see submit_to_queue_locked()
384  */
z_work_submit_to_queue(struct k_work_q * queue,struct k_work * work)385 int z_work_submit_to_queue(struct k_work_q *queue,
386 		  struct k_work *work)
387 {
388 	__ASSERT_NO_MSG(work != NULL);
389 	__ASSERT_NO_MSG(work->handler != NULL);
390 
391 	k_spinlock_key_t key = k_spin_lock(&lock);
392 
393 	int ret = submit_to_queue_locked(work, &queue);
394 
395 	k_spin_unlock(&lock, key);
396 
397 	return ret;
398 }
399 
k_work_submit_to_queue(struct k_work_q * queue,struct k_work * work)400 int k_work_submit_to_queue(struct k_work_q *queue,
401 			    struct k_work *work)
402 {
403 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, submit_to_queue, queue, work);
404 
405 	int ret = z_work_submit_to_queue(queue, work);
406 
407 	/* submit_to_queue_locked() won't reschedule on its own
408 	 * (really it should, otherwise this process will result in
409 	 * spurious calls to z_swap() due to the race), so do it here
410 	 * if the queue state changed.
411 	 */
412 	if (ret > 0) {
413 		z_reschedule_unlocked();
414 	}
415 
416 	SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, submit_to_queue, queue, work, ret);
417 
418 	return ret;
419 }
420 
k_work_submit(struct k_work * work)421 int k_work_submit(struct k_work *work)
422 {
423 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, submit, work);
424 
425 	int ret = k_work_submit_to_queue(&k_sys_work_q, work);
426 
427 	SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, submit, work, ret);
428 
429 	return ret;
430 }
431 
432 /* Flush the work item if necessary.
433  *
434  * Flushing is necessary only if the work is either queued or running.
435  *
436  * Invoked with work lock held by key.
437  * Sleeps.
438  *
439  * @param work the work item that is to be flushed
440  * @param flusher state used to synchronize the flush
441  *
442  * @retval true if work is queued or running.  If this happens the
443  * caller must take the flusher semaphore after releasing the lock.
444  *
445  * @retval false otherwise.  No wait required.
446  */
work_flush_locked(struct k_work * work,struct z_work_flusher * flusher)447 static bool work_flush_locked(struct k_work *work,
448 			      struct z_work_flusher *flusher)
449 {
450 	bool need_flush = (flags_get(&work->flags)
451 			   & (K_WORK_QUEUED | K_WORK_RUNNING)) != 0U;
452 
453 	if (need_flush) {
454 		struct k_work_q *queue = work->queue;
455 
456 		__ASSERT_NO_MSG(queue != NULL);
457 
458 		queue_flusher_locked(queue, work, flusher);
459 		notify_queue_locked(queue);
460 	}
461 
462 	return need_flush;
463 }
464 
k_work_flush(struct k_work * work,struct k_work_sync * sync)465 bool k_work_flush(struct k_work *work,
466 		  struct k_work_sync *sync)
467 {
468 	__ASSERT_NO_MSG(work != NULL);
469 	__ASSERT_NO_MSG(!flag_test(&work->flags, K_WORK_DELAYABLE_BIT));
470 	__ASSERT_NO_MSG(!k_is_in_isr());
471 	__ASSERT_NO_MSG(sync != NULL);
472 #ifdef CONFIG_KERNEL_COHERENCE
473 	__ASSERT_NO_MSG(arch_mem_coherent(sync));
474 #endif /* CONFIG_KERNEL_COHERENCE */
475 
476 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, flush, work);
477 
478 	struct z_work_flusher *flusher = &sync->flusher;
479 	k_spinlock_key_t key = k_spin_lock(&lock);
480 
481 	bool need_flush = work_flush_locked(work, flusher);
482 
483 	k_spin_unlock(&lock, key);
484 
485 	/* If necessary wait until the flusher item completes */
486 	if (need_flush) {
487 		SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_work, flush, work, K_FOREVER);
488 
489 		k_sem_take(&flusher->sem, K_FOREVER);
490 	}
491 
492 	SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, flush, work, need_flush);
493 
494 	return need_flush;
495 }
496 
497 /* Execute the non-waiting steps necessary to cancel a work item.
498  *
499  * Invoked with work lock held.
500  *
501  * @param work the work item to be canceled.
502  *
503  * @retval true if we need to wait for the work item to finish canceling
504  * @retval false if the work item is idle
505  *
506  * @return k_busy_wait() captured under lock
507  */
cancel_async_locked(struct k_work * work)508 static int cancel_async_locked(struct k_work *work)
509 {
510 	/* If we haven't already started canceling, do it now. */
511 	if (!flag_test(&work->flags, K_WORK_CANCELING_BIT)) {
512 		/* Remove it from the queue, if it's queued. */
513 		queue_remove_locked(work->queue, work);
514 	}
515 
516 	/* If it's still busy after it's been dequeued, then flag it
517 	 * as canceling.
518 	 */
519 	int ret = work_busy_get_locked(work);
520 
521 	if (ret != 0) {
522 		flag_set(&work->flags, K_WORK_CANCELING_BIT);
523 		ret = work_busy_get_locked(work);
524 	}
525 
526 	return ret;
527 }
528 
529 /* Complete cancellation necessary, release work lock, and wait if
530  * necessary.
531  *
532  * Invoked with work lock held by key.
533  * Sleeps.
534  *
535  * @param work work that is being canceled
536  * @param canceller state used to synchronize the cancellation
537  * @param key used by work lock
538  *
539  * @retval true if and only if the work was still active on entry.  The caller
540  * must wait on the canceller semaphore after releasing the lock.
541  *
542  * @retval false if work was idle on entry.  The caller need not wait.
543  */
cancel_sync_locked(struct k_work * work,struct z_work_canceller * canceller)544 static bool cancel_sync_locked(struct k_work *work,
545 			       struct z_work_canceller *canceller)
546 {
547 	bool ret = flag_test(&work->flags, K_WORK_CANCELING_BIT);
548 
549 	/* If something's still running then we have to wait for
550 	 * completion, which is indicated when finish_cancel() gets
551 	 * invoked.
552 	 */
553 	if (ret) {
554 		init_work_cancel(canceller, work);
555 	}
556 
557 	return ret;
558 }
559 
k_work_cancel(struct k_work * work)560 int k_work_cancel(struct k_work *work)
561 {
562 	__ASSERT_NO_MSG(work != NULL);
563 	__ASSERT_NO_MSG(!flag_test(&work->flags, K_WORK_DELAYABLE_BIT));
564 
565 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, cancel, work);
566 
567 	k_spinlock_key_t key = k_spin_lock(&lock);
568 	int ret = cancel_async_locked(work);
569 
570 	k_spin_unlock(&lock, key);
571 
572 	SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, cancel, work, ret);
573 
574 	return ret;
575 }
576 
k_work_cancel_sync(struct k_work * work,struct k_work_sync * sync)577 bool k_work_cancel_sync(struct k_work *work,
578 			struct k_work_sync *sync)
579 {
580 	__ASSERT_NO_MSG(work != NULL);
581 	__ASSERT_NO_MSG(sync != NULL);
582 	__ASSERT_NO_MSG(!flag_test(&work->flags, K_WORK_DELAYABLE_BIT));
583 	__ASSERT_NO_MSG(!k_is_in_isr());
584 #ifdef CONFIG_KERNEL_COHERENCE
585 	__ASSERT_NO_MSG(arch_mem_coherent(sync));
586 #endif /* CONFIG_KERNEL_COHERENCE */
587 
588 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, cancel_sync, work, sync);
589 
590 	struct z_work_canceller *canceller = &sync->canceller;
591 	k_spinlock_key_t key = k_spin_lock(&lock);
592 	bool pending = (work_busy_get_locked(work) != 0U);
593 	bool need_wait = false;
594 
595 	if (pending) {
596 		(void)cancel_async_locked(work);
597 		need_wait = cancel_sync_locked(work, canceller);
598 	}
599 
600 	k_spin_unlock(&lock, key);
601 
602 	if (need_wait) {
603 		SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_work, cancel_sync, work, sync);
604 
605 		k_sem_take(&canceller->sem, K_FOREVER);
606 	}
607 
608 	SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, cancel_sync, work, sync, pending);
609 	return pending;
610 }
611 
612 /* Loop executed by a work queue thread.
613  *
614  * @param workq_ptr pointer to the work queue structure
615  */
work_queue_main(void * workq_ptr,void * p2,void * p3)616 static void work_queue_main(void *workq_ptr, void *p2, void *p3)
617 {
618 	ARG_UNUSED(p2);
619 	ARG_UNUSED(p3);
620 
621 	struct k_work_q *queue = (struct k_work_q *)workq_ptr;
622 
623 	while (true) {
624 		sys_snode_t *node;
625 		struct k_work *work = NULL;
626 		k_work_handler_t handler = NULL;
627 		k_spinlock_key_t key = k_spin_lock(&lock);
628 		bool yield;
629 
630 		/* Check for and prepare any new work. */
631 		node = sys_slist_get(&queue->pending);
632 		if (node != NULL) {
633 			/* Mark that there's some work active that's
634 			 * not on the pending list.
635 			 */
636 			flag_set(&queue->flags, K_WORK_QUEUE_BUSY_BIT);
637 			work = CONTAINER_OF(node, struct k_work, node);
638 			flag_set(&work->flags, K_WORK_RUNNING_BIT);
639 			flag_clear(&work->flags, K_WORK_QUEUED_BIT);
640 
641 			/* Static code analysis tool can raise a false-positive violation
642 			 * in the line below that 'work' is checked for null after being
643 			 * dereferenced.
644 			 *
645 			 * The work is figured out by CONTAINER_OF, as a container
646 			 * of type struct k_work that contains the node.
647 			 * The only way for it to be NULL is if node would be a member
648 			 * of struct k_work object that has been placed at address NULL,
649 			 * which should never happen, even line 'if (work != NULL)'
650 			 * ensures that.
651 			 * This means that if node is not NULL, then work will not be NULL.
652 			 */
653 			handler = work->handler;
654 		} else if (flag_test_and_clear(&queue->flags,
655 					       K_WORK_QUEUE_DRAIN_BIT)) {
656 			/* Not busy and draining: move threads waiting for
657 			 * drain to ready state.  The held spinlock inhibits
658 			 * immediate reschedule; released threads get their
659 			 * chance when this invokes z_sched_wait() below.
660 			 *
661 			 * We don't touch K_WORK_QUEUE_PLUGGABLE, so getting
662 			 * here doesn't mean that the queue will allow new
663 			 * submissions.
664 			 */
665 			(void)z_sched_wake_all(&queue->drainq, 1, NULL);
666 		} else {
667 			/* No work is available and no queue state requires
668 			 * special handling.
669 			 */
670 			;
671 		}
672 
673 		if (work == NULL) {
674 			/* Nothing's had a chance to add work since we took
675 			 * the lock, and we didn't find work nor got asked to
676 			 * stop.  Just go to sleep: when something happens the
677 			 * work thread will be woken and we can check again.
678 			 */
679 
680 			(void)z_sched_wait(&lock, key, &queue->notifyq,
681 					   K_FOREVER, NULL);
682 			continue;
683 		}
684 
685 		k_spin_unlock(&lock, key);
686 
687 		__ASSERT_NO_MSG(handler != NULL);
688 		handler(work);
689 
690 		/* Mark the work item as no longer running and deal
691 		 * with any cancellation and flushing issued while it
692 		 * was running.  Clear the BUSY flag and optionally
693 		 * yield to prevent starving other threads.
694 		 */
695 		key = k_spin_lock(&lock);
696 
697 		flag_clear(&work->flags, K_WORK_RUNNING_BIT);
698 		if (flag_test(&work->flags, K_WORK_FLUSHING_BIT)) {
699 			finalize_flush_locked(work);
700 		}
701 		if (flag_test(&work->flags, K_WORK_CANCELING_BIT)) {
702 			finalize_cancel_locked(work);
703 		}
704 
705 		flag_clear(&queue->flags, K_WORK_QUEUE_BUSY_BIT);
706 		yield = !flag_test(&queue->flags, K_WORK_QUEUE_NO_YIELD_BIT);
707 		k_spin_unlock(&lock, key);
708 
709 		/* Optionally yield to prevent the work queue from
710 		 * starving other threads.
711 		 */
712 		if (yield) {
713 			k_yield();
714 		}
715 	}
716 }
717 
k_work_queue_init(struct k_work_q * queue)718 void k_work_queue_init(struct k_work_q *queue)
719 {
720 	__ASSERT_NO_MSG(queue != NULL);
721 
722 	*queue = (struct k_work_q) {
723 		.flags = 0,
724 	};
725 
726 	SYS_PORT_TRACING_OBJ_INIT(k_work_queue, queue);
727 }
728 
k_work_queue_start(struct k_work_q * queue,k_thread_stack_t * stack,size_t stack_size,int prio,const struct k_work_queue_config * cfg)729 void k_work_queue_start(struct k_work_q *queue,
730 			k_thread_stack_t *stack,
731 			size_t stack_size,
732 			int prio,
733 			const struct k_work_queue_config *cfg)
734 {
735 	__ASSERT_NO_MSG(queue);
736 	__ASSERT_NO_MSG(stack);
737 	__ASSERT_NO_MSG(!flag_test(&queue->flags, K_WORK_QUEUE_STARTED_BIT));
738 	uint32_t flags = K_WORK_QUEUE_STARTED;
739 
740 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work_queue, start, queue);
741 
742 	sys_slist_init(&queue->pending);
743 	z_waitq_init(&queue->notifyq);
744 	z_waitq_init(&queue->drainq);
745 
746 	if ((cfg != NULL) && cfg->no_yield) {
747 		flags |= K_WORK_QUEUE_NO_YIELD;
748 	}
749 
750 	/* It hasn't actually been started yet, but all the state is in place
751 	 * so we can submit things and once the thread gets control it's ready
752 	 * to roll.
753 	 */
754 	flags_set(&queue->flags, flags);
755 
756 	(void)k_thread_create(&queue->thread, stack, stack_size,
757 			      work_queue_main, queue, NULL, NULL,
758 			      prio, 0, K_FOREVER);
759 
760 	if ((cfg != NULL) && (cfg->name != NULL)) {
761 		k_thread_name_set(&queue->thread, cfg->name);
762 	}
763 
764 	if ((cfg != NULL) && (cfg->essential)) {
765 		queue->thread.base.user_options |= K_ESSENTIAL;
766 	}
767 
768 	k_thread_start(&queue->thread);
769 
770 	SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work_queue, start, queue);
771 }
772 
k_work_queue_drain(struct k_work_q * queue,bool plug)773 int k_work_queue_drain(struct k_work_q *queue,
774 		       bool plug)
775 {
776 	__ASSERT_NO_MSG(queue);
777 	__ASSERT_NO_MSG(!k_is_in_isr());
778 
779 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work_queue, drain, queue);
780 
781 	int ret = 0;
782 	k_spinlock_key_t key = k_spin_lock(&lock);
783 
784 	if (((flags_get(&queue->flags)
785 	      & (K_WORK_QUEUE_BUSY | K_WORK_QUEUE_DRAIN)) != 0U)
786 	    || plug
787 	    || !sys_slist_is_empty(&queue->pending)) {
788 		flag_set(&queue->flags, K_WORK_QUEUE_DRAIN_BIT);
789 		if (plug) {
790 			flag_set(&queue->flags, K_WORK_QUEUE_PLUGGED_BIT);
791 		}
792 
793 		notify_queue_locked(queue);
794 		ret = z_sched_wait(&lock, key, &queue->drainq,
795 				   K_FOREVER, NULL);
796 	} else {
797 		k_spin_unlock(&lock, key);
798 	}
799 
800 	SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work_queue, drain, queue, ret);
801 
802 	return ret;
803 }
804 
k_work_queue_unplug(struct k_work_q * queue)805 int k_work_queue_unplug(struct k_work_q *queue)
806 {
807 	__ASSERT_NO_MSG(queue);
808 
809 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work_queue, unplug, queue);
810 
811 	int ret = -EALREADY;
812 	k_spinlock_key_t key = k_spin_lock(&lock);
813 
814 	if (flag_test_and_clear(&queue->flags, K_WORK_QUEUE_PLUGGED_BIT)) {
815 		ret = 0;
816 	}
817 
818 	k_spin_unlock(&lock, key);
819 
820 	SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work_queue, unplug, queue, ret);
821 
822 	return ret;
823 }
824 
825 #ifdef CONFIG_SYS_CLOCK_EXISTS
826 
827 /* Timeout handler for delayable work.
828  *
829  * Invoked by timeout infrastructure.
830  * Takes and releases work lock.
831  * Conditionally reschedules.
832  */
work_timeout(struct _timeout * to)833 static void work_timeout(struct _timeout *to)
834 {
835 	struct k_work_delayable *dw
836 		= CONTAINER_OF(to, struct k_work_delayable, timeout);
837 	struct k_work *wp = &dw->work;
838 	k_spinlock_key_t key = k_spin_lock(&lock);
839 	struct k_work_q *queue = NULL;
840 
841 	/* If the work is still marked delayed (should be) then clear that
842 	 * state and submit it to the queue.  If successful the queue will be
843 	 * notified of new work at the next reschedule point.
844 	 *
845 	 * If not successful there is no notification that the work has been
846 	 * abandoned.  Sorry.
847 	 */
848 	if (flag_test_and_clear(&wp->flags, K_WORK_DELAYED_BIT)) {
849 		queue = dw->queue;
850 		(void)submit_to_queue_locked(wp, &queue);
851 	}
852 
853 	k_spin_unlock(&lock, key);
854 }
855 
k_work_init_delayable(struct k_work_delayable * dwork,k_work_handler_t handler)856 void k_work_init_delayable(struct k_work_delayable *dwork,
857 			    k_work_handler_t handler)
858 {
859 	__ASSERT_NO_MSG(dwork != NULL);
860 	__ASSERT_NO_MSG(handler != NULL);
861 
862 	*dwork = (struct k_work_delayable){
863 		.work = {
864 			.handler = handler,
865 			.flags = K_WORK_DELAYABLE,
866 		},
867 	};
868 	z_init_timeout(&dwork->timeout);
869 
870 	SYS_PORT_TRACING_OBJ_INIT(k_work_delayable, dwork);
871 }
872 
work_delayable_busy_get_locked(const struct k_work_delayable * dwork)873 static inline int work_delayable_busy_get_locked(const struct k_work_delayable *dwork)
874 {
875 	return flags_get(&dwork->work.flags) & K_WORK_MASK;
876 }
877 
k_work_delayable_busy_get(const struct k_work_delayable * dwork)878 int k_work_delayable_busy_get(const struct k_work_delayable *dwork)
879 {
880 	k_spinlock_key_t key = k_spin_lock(&lock);
881 	int ret = work_delayable_busy_get_locked(dwork);
882 
883 	k_spin_unlock(&lock, key);
884 	return ret;
885 }
886 
887 /* Attempt to schedule a work item for future (maybe immediate)
888  * submission.
889  *
890  * Invoked with work lock held.
891  *
892  * See also submit_to_queue_locked(), which implements this for a no-wait
893  * delay.
894  *
895  * Invoked with work lock held.
896  *
897  * @param queuep pointer to a pointer to a queue.  On input this
898  * should dereference to the proposed queue (which may be null); after
899  * completion it will be null if the work was not submitted or if
900  * submitted will reference the queue it was submitted to.  That may
901  * or may not be the queue provided on input.
902  *
903  * @param dwork the delayed work structure
904  *
905  * @param delay the delay to use before scheduling.
906  *
907  * @retval from submit_to_queue_locked() if delay is K_NO_WAIT; otherwise
908  * @retval 1 to indicate successfully scheduled.
909  */
schedule_for_queue_locked(struct k_work_q ** queuep,struct k_work_delayable * dwork,k_timeout_t delay)910 static int schedule_for_queue_locked(struct k_work_q **queuep,
911 				     struct k_work_delayable *dwork,
912 				     k_timeout_t delay)
913 {
914 	int ret = 1;
915 	struct k_work *work = &dwork->work;
916 
917 	if (K_TIMEOUT_EQ(delay, K_NO_WAIT)) {
918 		return submit_to_queue_locked(work, queuep);
919 	}
920 
921 	flag_set(&work->flags, K_WORK_DELAYED_BIT);
922 	dwork->queue = *queuep;
923 
924 	/* Add timeout */
925 	z_add_timeout(&dwork->timeout, work_timeout, delay);
926 
927 	return ret;
928 }
929 
930 /* Unschedule delayable work.
931  *
932  * If the work is delayed, cancel the timeout and clear the delayed
933  * flag.
934  *
935  * Invoked with work lock held.
936  *
937  * @param dwork pointer to delayable work structure.
938  *
939  * @return true if and only if work had been delayed so the timeout
940  * was cancelled.
941  */
unschedule_locked(struct k_work_delayable * dwork)942 static inline bool unschedule_locked(struct k_work_delayable *dwork)
943 {
944 	bool ret = false;
945 	struct k_work *work = &dwork->work;
946 
947 	/* If scheduled, try to cancel.  If it fails, that means the
948 	 * callback has been dequeued and will inevitably run (or has
949 	 * already run), so treat that as "undelayed" and return
950 	 * false.
951 	 */
952 	if (flag_test_and_clear(&work->flags, K_WORK_DELAYED_BIT)) {
953 		ret = z_abort_timeout(&dwork->timeout) == 0;
954 	}
955 
956 	return ret;
957 }
958 
959 /* Full cancellation of a delayable work item.
960  *
961  * Unschedules the delayed part then delegates to standard work
962  * cancellation.
963  *
964  * Invoked with work lock held.
965  *
966  * @param dwork delayable work item
967  *
968  * @return k_work_busy_get() flags
969  */
cancel_delayable_async_locked(struct k_work_delayable * dwork)970 static int cancel_delayable_async_locked(struct k_work_delayable *dwork)
971 {
972 	(void)unschedule_locked(dwork);
973 
974 	return cancel_async_locked(&dwork->work);
975 }
976 
k_work_schedule_for_queue(struct k_work_q * queue,struct k_work_delayable * dwork,k_timeout_t delay)977 int k_work_schedule_for_queue(struct k_work_q *queue,
978 			       struct k_work_delayable *dwork,
979 			       k_timeout_t delay)
980 {
981 	__ASSERT_NO_MSG(dwork != NULL);
982 
983 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, schedule_for_queue, queue, dwork, delay);
984 
985 	struct k_work *work = &dwork->work;
986 	int ret = 0;
987 	k_spinlock_key_t key = k_spin_lock(&lock);
988 
989 	/* Schedule the work item if it's idle or running. */
990 	if ((work_busy_get_locked(work) & ~K_WORK_RUNNING) == 0U) {
991 		ret = schedule_for_queue_locked(&queue, dwork, delay);
992 	}
993 
994 	k_spin_unlock(&lock, key);
995 
996 	SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, schedule_for_queue, queue, dwork, delay, ret);
997 
998 	return ret;
999 }
1000 
k_work_schedule(struct k_work_delayable * dwork,k_timeout_t delay)1001 int k_work_schedule(struct k_work_delayable *dwork,
1002 				   k_timeout_t delay)
1003 {
1004 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, schedule, dwork, delay);
1005 
1006 	int ret = k_work_schedule_for_queue(&k_sys_work_q, dwork, delay);
1007 
1008 	SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, schedule, dwork, delay, ret);
1009 
1010 	return ret;
1011 }
1012 
k_work_reschedule_for_queue(struct k_work_q * queue,struct k_work_delayable * dwork,k_timeout_t delay)1013 int k_work_reschedule_for_queue(struct k_work_q *queue,
1014 				 struct k_work_delayable *dwork,
1015 				 k_timeout_t delay)
1016 {
1017 	__ASSERT_NO_MSG(dwork != NULL);
1018 
1019 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, reschedule_for_queue, queue, dwork, delay);
1020 
1021 	int ret;
1022 	k_spinlock_key_t key = k_spin_lock(&lock);
1023 
1024 	/* Remove any active scheduling. */
1025 	(void)unschedule_locked(dwork);
1026 
1027 	/* Schedule the work item with the new parameters. */
1028 	ret = schedule_for_queue_locked(&queue, dwork, delay);
1029 
1030 	k_spin_unlock(&lock, key);
1031 
1032 	SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, reschedule_for_queue, queue, dwork, delay, ret);
1033 
1034 	return ret;
1035 }
1036 
k_work_reschedule(struct k_work_delayable * dwork,k_timeout_t delay)1037 int k_work_reschedule(struct k_work_delayable *dwork,
1038 				     k_timeout_t delay)
1039 {
1040 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, reschedule, dwork, delay);
1041 
1042 	int ret = k_work_reschedule_for_queue(&k_sys_work_q, dwork, delay);
1043 
1044 	SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, reschedule, dwork, delay, ret);
1045 
1046 	return ret;
1047 }
1048 
k_work_cancel_delayable(struct k_work_delayable * dwork)1049 int k_work_cancel_delayable(struct k_work_delayable *dwork)
1050 {
1051 	__ASSERT_NO_MSG(dwork != NULL);
1052 
1053 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, cancel_delayable, dwork);
1054 
1055 	k_spinlock_key_t key = k_spin_lock(&lock);
1056 	int ret = cancel_delayable_async_locked(dwork);
1057 
1058 	k_spin_unlock(&lock, key);
1059 
1060 	SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, cancel_delayable, dwork, ret);
1061 
1062 	return ret;
1063 }
1064 
k_work_cancel_delayable_sync(struct k_work_delayable * dwork,struct k_work_sync * sync)1065 bool k_work_cancel_delayable_sync(struct k_work_delayable *dwork,
1066 				  struct k_work_sync *sync)
1067 {
1068 	__ASSERT_NO_MSG(dwork != NULL);
1069 	__ASSERT_NO_MSG(sync != NULL);
1070 	__ASSERT_NO_MSG(!k_is_in_isr());
1071 #ifdef CONFIG_KERNEL_COHERENCE
1072 	__ASSERT_NO_MSG(arch_mem_coherent(sync));
1073 #endif /* CONFIG_KERNEL_COHERENCE */
1074 
1075 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, cancel_delayable_sync, dwork, sync);
1076 
1077 	struct z_work_canceller *canceller = &sync->canceller;
1078 	k_spinlock_key_t key = k_spin_lock(&lock);
1079 	bool pending = (work_delayable_busy_get_locked(dwork) != 0U);
1080 	bool need_wait = false;
1081 
1082 	if (pending) {
1083 		(void)cancel_delayable_async_locked(dwork);
1084 		need_wait = cancel_sync_locked(&dwork->work, canceller);
1085 	}
1086 
1087 	k_spin_unlock(&lock, key);
1088 
1089 	if (need_wait) {
1090 		k_sem_take(&canceller->sem, K_FOREVER);
1091 	}
1092 
1093 	SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, cancel_delayable_sync, dwork, sync, pending);
1094 	return pending;
1095 }
1096 
k_work_flush_delayable(struct k_work_delayable * dwork,struct k_work_sync * sync)1097 bool k_work_flush_delayable(struct k_work_delayable *dwork,
1098 			    struct k_work_sync *sync)
1099 {
1100 	__ASSERT_NO_MSG(dwork != NULL);
1101 	__ASSERT_NO_MSG(sync != NULL);
1102 	__ASSERT_NO_MSG(!k_is_in_isr());
1103 #ifdef CONFIG_KERNEL_COHERENCE
1104 	__ASSERT_NO_MSG(arch_mem_coherent(sync));
1105 #endif /* CONFIG_KERNEL_COHERENCE */
1106 
1107 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, flush_delayable, dwork, sync);
1108 
1109 	struct k_work *work = &dwork->work;
1110 	struct z_work_flusher *flusher = &sync->flusher;
1111 	k_spinlock_key_t key = k_spin_lock(&lock);
1112 
1113 	/* If it's idle release the lock and return immediately. */
1114 	if (work_busy_get_locked(work) == 0U) {
1115 		k_spin_unlock(&lock, key);
1116 
1117 		SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, flush_delayable, dwork, sync, false);
1118 
1119 		return false;
1120 	}
1121 
1122 	/* If unscheduling did something then submit it.  Ignore a
1123 	 * failed submission (e.g. when cancelling).
1124 	 */
1125 	if (unschedule_locked(dwork)) {
1126 		struct k_work_q *queue = dwork->queue;
1127 
1128 		(void)submit_to_queue_locked(work, &queue);
1129 	}
1130 
1131 	/* Wait for it to finish */
1132 	bool need_flush = work_flush_locked(work, flusher);
1133 
1134 	k_spin_unlock(&lock, key);
1135 
1136 	/* If necessary wait until the flusher item completes */
1137 	if (need_flush) {
1138 		k_sem_take(&flusher->sem, K_FOREVER);
1139 	}
1140 
1141 	SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, flush_delayable, dwork, sync, need_flush);
1142 
1143 	return need_flush;
1144 }
1145 
1146 #endif /* CONFIG_SYS_CLOCK_EXISTS */
1147