1 /*
2  * Copyright (c) 2016, Wind River Systems, Inc.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 /**
8  * @file
9  *
10  * @brief Public kernel APIs.
11  */
12 
13 #ifndef ZEPHYR_INCLUDE_KERNEL_H_
14 #define ZEPHYR_INCLUDE_KERNEL_H_
15 
16 #if !defined(_ASMLANGUAGE)
17 #include <zephyr/kernel_includes.h>
18 #include <errno.h>
19 #include <limits.h>
20 #include <stdbool.h>
21 #include <zephyr/toolchain.h>
22 #include <zephyr/tracing/tracing_macros.h>
23 #include <zephyr/sys/mem_stats.h>
24 #include <zephyr/sys/iterable_sections.h>
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
30 /*
31  * Zephyr currently assumes the size of a couple standard types to simplify
32  * print string formats. Let's make sure this doesn't change without notice.
33  */
34 BUILD_ASSERT(sizeof(int32_t) == sizeof(int));
35 BUILD_ASSERT(sizeof(int64_t) == sizeof(long long));
36 BUILD_ASSERT(sizeof(intptr_t) == sizeof(long));
37 
38 /**
39  * @brief Kernel APIs
40  * @defgroup kernel_apis Kernel APIs
41  * @{
42  * @}
43  */
44 
45 #define K_ANY NULL
46 
47 #if CONFIG_NUM_COOP_PRIORITIES + CONFIG_NUM_PREEMPT_PRIORITIES == 0
48 #error Zero available thread priorities defined!
49 #endif
50 
51 #define K_PRIO_COOP(x) (-(CONFIG_NUM_COOP_PRIORITIES - (x)))
52 #define K_PRIO_PREEMPT(x) (x)
53 
54 #define K_HIGHEST_THREAD_PRIO (-CONFIG_NUM_COOP_PRIORITIES)
55 #define K_LOWEST_THREAD_PRIO CONFIG_NUM_PREEMPT_PRIORITIES
56 #define K_IDLE_PRIO K_LOWEST_THREAD_PRIO
57 #define K_HIGHEST_APPLICATION_THREAD_PRIO (K_HIGHEST_THREAD_PRIO)
58 #define K_LOWEST_APPLICATION_THREAD_PRIO (K_LOWEST_THREAD_PRIO - 1)
59 
60 #ifdef CONFIG_POLL
61 #define _POLL_EVENT_OBJ_INIT(obj) \
62 	.poll_events = SYS_DLIST_STATIC_INIT(&obj.poll_events),
63 #define _POLL_EVENT sys_dlist_t poll_events
64 #else
65 #define _POLL_EVENT_OBJ_INIT(obj)
66 #define _POLL_EVENT
67 #endif
68 
69 struct k_thread;
70 struct k_mutex;
71 struct k_sem;
72 struct k_msgq;
73 struct k_mbox;
74 struct k_pipe;
75 struct k_queue;
76 struct k_fifo;
77 struct k_lifo;
78 struct k_stack;
79 struct k_mem_slab;
80 struct k_timer;
81 struct k_poll_event;
82 struct k_poll_signal;
83 struct k_mem_domain;
84 struct k_mem_partition;
85 struct k_futex;
86 struct k_event;
87 
88 enum execution_context_types {
89 	K_ISR = 0,
90 	K_COOP_THREAD,
91 	K_PREEMPT_THREAD,
92 };
93 
94 /* private, used by k_poll and k_work_poll */
95 struct k_work_poll;
96 typedef int (*_poller_cb_t)(struct k_poll_event *event, uint32_t state);
97 
98 /**
99  * @addtogroup thread_apis
100  * @{
101  */
102 
103 typedef void (*k_thread_user_cb_t)(const struct k_thread *thread,
104 				   void *user_data);
105 
106 /**
107  * @brief Iterate over all the threads in the system.
108  *
109  * This routine iterates over all the threads in the system and
110  * calls the user_cb function for each thread.
111  *
112  * @param user_cb Pointer to the user callback function.
113  * @param user_data Pointer to user data.
114  *
115  * @note @kconfig{CONFIG_THREAD_MONITOR} must be set for this function
116  * to be effective.
117  * @note This API uses @ref k_spin_lock to protect the _kernel.threads
118  * list which means creation of new threads and terminations of existing
119  * threads are blocked until this API returns.
120  */
121 extern void k_thread_foreach(k_thread_user_cb_t user_cb, void *user_data);
122 
123 /**
124  * @brief Iterate over all the threads in the system without locking.
125  *
126  * This routine works exactly the same like @ref k_thread_foreach
127  * but unlocks interrupts when user_cb is executed.
128  *
129  * @param user_cb Pointer to the user callback function.
130  * @param user_data Pointer to user data.
131  *
132  * @note @kconfig{CONFIG_THREAD_MONITOR} must be set for this function
133  * to be effective.
134  * @note This API uses @ref k_spin_lock only when accessing the _kernel.threads
135  * queue elements. It unlocks it during user callback function processing.
136  * If a new task is created when this @c foreach function is in progress,
137  * the added new task would not be included in the enumeration.
138  * If a task is aborted during this enumeration, there would be a race here
139  * and there is a possibility that this aborted task would be included in the
140  * enumeration.
141  * @note If the task is aborted and the memory occupied by its @c k_thread
142  * structure is reused when this @c k_thread_foreach_unlocked is in progress
143  * it might even lead to the system behave unstable.
144  * This function may never return, as it would follow some @c next task
145  * pointers treating given pointer as a pointer to the k_thread structure
146  * while it is something different right now.
147  * Do not reuse the memory that was occupied by k_thread structure of aborted
148  * task if it was aborted after this function was called in any context.
149  */
150 extern void k_thread_foreach_unlocked(
151 	k_thread_user_cb_t user_cb, void *user_data);
152 
153 /** @} */
154 
155 /**
156  * @defgroup thread_apis Thread APIs
157  * @ingroup kernel_apis
158  * @{
159  */
160 
161 #endif /* !_ASMLANGUAGE */
162 
163 
164 /*
165  * Thread user options. May be needed by assembly code. Common part uses low
166  * bits, arch-specific use high bits.
167  */
168 
169 /**
170  * @brief system thread that must not abort
171  * */
172 #define K_ESSENTIAL (BIT(0))
173 
174 #if defined(CONFIG_FPU_SHARING)
175 /**
176  * @brief FPU registers are managed by context switch
177  *
178  * @details
179  * This option indicates that the thread uses the CPU's floating point
180  * registers. This instructs the kernel to take additional steps to save
181  * and restore the contents of these registers when scheduling the thread.
182  * No effect if @kconfig{CONFIG_FPU_SHARING} is not enabled.
183  */
184 #define K_FP_IDX 1
185 #define K_FP_REGS (BIT(K_FP_IDX))
186 #endif
187 
188 /**
189  * @brief user mode thread
190  *
191  * This thread has dropped from supervisor mode to user mode and consequently
192  * has additional restrictions
193  */
194 #define K_USER (BIT(2))
195 
196 /**
197  * @brief Inherit Permissions
198  *
199  * @details
200  * Indicates that the thread being created should inherit all kernel object
201  * permissions from the thread that created it. No effect if
202  * @kconfig{CONFIG_USERSPACE} is not enabled.
203  */
204 #define K_INHERIT_PERMS (BIT(3))
205 
206 /**
207  * @brief Callback item state
208  *
209  * @details
210  * This is a single bit of state reserved for "callback manager"
211  * utilities (p4wq initially) who need to track operations invoked
212  * from within a user-provided callback they have been invoked.
213  * Effectively it serves as a tiny bit of zero-overhead TLS data.
214  */
215 #define K_CALLBACK_STATE (BIT(4))
216 
217 #ifdef CONFIG_ARC
218 /* ARC processor Bitmask definitions for threads user options */
219 
220 #if defined(CONFIG_ARC_DSP_SHARING)
221 /**
222  * @brief DSP registers are managed by context switch
223  *
224  * @details
225  * This option indicates that the thread uses the CPU's DSP registers.
226  * This instructs the kernel to take additional steps to save and
227  * restore the contents of these registers when scheduling the thread.
228  * No effect if @kconfig{CONFIG_ARC_DSP_SHARING} is not enabled.
229  */
230 #define K_DSP_IDX 6
231 #define K_ARC_DSP_REGS (BIT(K_DSP_IDX))
232 #endif
233 
234 #if defined(CONFIG_ARC_AGU_SHARING)
235 /**
236  * @brief AGU registers are managed by context switch
237  *
238  * @details
239  * This option indicates that the thread uses the ARC processor's XY
240  * memory and DSP feature. Often used with @kconfig{CONFIG_ARC_AGU_SHARING}.
241  * No effect if @kconfig{CONFIG_ARC_AGU_SHARING} is not enabled.
242  */
243 #define K_AGU_IDX 7
244 #define K_ARC_AGU_REGS (BIT(K_AGU_IDX))
245 #endif
246 #endif
247 
248 #ifdef CONFIG_X86
249 /* x86 Bitmask definitions for threads user options */
250 
251 #if defined(CONFIG_FPU_SHARING) && defined(CONFIG_X86_SSE)
252 /**
253  * @brief FP and SSE registers are managed by context switch on x86
254  *
255  * @details
256  * This option indicates that the thread uses the x86 CPU's floating point
257  * and SSE registers. This instructs the kernel to take additional steps to
258  * save and restore the contents of these registers when scheduling
259  * the thread. No effect if @kconfig{CONFIG_X86_SSE} is not enabled.
260  */
261 #define K_SSE_REGS (BIT(7))
262 #endif
263 #endif
264 
265 /* end - thread options */
266 
267 #if !defined(_ASMLANGUAGE)
268 /**
269  * @brief Dynamically allocate a thread stack.
270  *
271  * Relevant stack creation flags include:
272  * - @ref K_USER allocate a userspace thread (requires `CONFIG_USERSPACE=y`)
273  *
274  * @param size Stack size in bytes.
275  * @param flags Stack creation flags, or 0.
276  *
277  * @retval the allocated thread stack on success.
278  * @retval NULL on failure.
279  *
280  * @see CONFIG_DYNAMIC_THREAD
281  */
282 __syscall k_thread_stack_t *k_thread_stack_alloc(size_t size, int flags);
283 
284 /**
285  * @brief Free a dynamically allocated thread stack.
286  *
287  * @param stack Pointer to the thread stack.
288  *
289  * @retval 0 on success.
290  * @retval -EBUSY if the thread stack is in use.
291  * @retval -EINVAL if @p stack is invalid.
292  * @retval -ENOSYS if dynamic thread stack allocation is disabled
293  *
294  * @see CONFIG_DYNAMIC_THREAD
295  */
296 __syscall int k_thread_stack_free(k_thread_stack_t *stack);
297 
298 /**
299  * @brief Create a thread.
300  *
301  * This routine initializes a thread, then schedules it for execution.
302  *
303  * The new thread may be scheduled for immediate execution or a delayed start.
304  * If the newly spawned thread does not have a delayed start the kernel
305  * scheduler may preempt the current thread to allow the new thread to
306  * execute.
307  *
308  * Thread options are architecture-specific, and can include K_ESSENTIAL,
309  * K_FP_REGS, and K_SSE_REGS. Multiple options may be specified by separating
310  * them using "|" (the logical OR operator).
311  *
312  * Stack objects passed to this function must be originally defined with
313  * either of these macros in order to be portable:
314  *
315  * - K_THREAD_STACK_DEFINE() - For stacks that may support either user or
316  *   supervisor threads.
317  * - K_KERNEL_STACK_DEFINE() - For stacks that may support supervisor
318  *   threads only. These stacks use less memory if CONFIG_USERSPACE is
319  *   enabled.
320  *
321  * The stack_size parameter has constraints. It must either be:
322  *
323  * - The original size value passed to K_THREAD_STACK_DEFINE() or
324  *   K_KERNEL_STACK_DEFINE()
325  * - The return value of K_THREAD_STACK_SIZEOF(stack) if the stack was
326  *   defined with K_THREAD_STACK_DEFINE()
327  * - The return value of K_KERNEL_STACK_SIZEOF(stack) if the stack was
328  *   defined with K_KERNEL_STACK_DEFINE().
329  *
330  * Using other values, or sizeof(stack) may produce undefined behavior.
331  *
332  * @param new_thread Pointer to uninitialized struct k_thread
333  * @param stack Pointer to the stack space.
334  * @param stack_size Stack size in bytes.
335  * @param entry Thread entry function.
336  * @param p1 1st entry point parameter.
337  * @param p2 2nd entry point parameter.
338  * @param p3 3rd entry point parameter.
339  * @param prio Thread priority.
340  * @param options Thread options.
341  * @param delay Scheduling delay, or K_NO_WAIT (for no delay).
342  *
343  * @return ID of new thread.
344  *
345  */
346 __syscall k_tid_t k_thread_create(struct k_thread *new_thread,
347 				  k_thread_stack_t *stack,
348 				  size_t stack_size,
349 				  k_thread_entry_t entry,
350 				  void *p1, void *p2, void *p3,
351 				  int prio, uint32_t options, k_timeout_t delay);
352 
353 /**
354  * @brief Drop a thread's privileges permanently to user mode
355  *
356  * This allows a supervisor thread to be re-used as a user thread.
357  * This function does not return, but control will transfer to the provided
358  * entry point as if this was a new user thread.
359  *
360  * The implementation ensures that the stack buffer contents are erased.
361  * Any thread-local storage will be reverted to a pristine state.
362  *
363  * Memory domain membership, resource pool assignment, kernel object
364  * permissions, priority, and thread options are preserved.
365  *
366  * A common use of this function is to re-use the main thread as a user thread
367  * once all supervisor mode-only tasks have been completed.
368  *
369  * @param entry Function to start executing from
370  * @param p1 1st entry point parameter
371  * @param p2 2nd entry point parameter
372  * @param p3 3rd entry point parameter
373  */
374 extern FUNC_NORETURN void k_thread_user_mode_enter(k_thread_entry_t entry,
375 						   void *p1, void *p2,
376 						   void *p3);
377 
378 /**
379  * @brief Grant a thread access to a set of kernel objects
380  *
381  * This is a convenience function. For the provided thread, grant access to
382  * the remaining arguments, which must be pointers to kernel objects.
383  *
384  * The thread object must be initialized (i.e. running). The objects don't
385  * need to be.
386  * Note that NULL shouldn't be passed as an argument.
387  *
388  * @param thread Thread to grant access to objects
389  * @param ... list of kernel object pointers
390  */
391 #define k_thread_access_grant(thread, ...) \
392 	FOR_EACH_FIXED_ARG(k_object_access_grant, (;), thread, __VA_ARGS__)
393 
394 /**
395  * @brief Assign a resource memory pool to a thread
396  *
397  * By default, threads have no resource pool assigned unless their parent
398  * thread has a resource pool, in which case it is inherited. Multiple
399  * threads may be assigned to the same memory pool.
400  *
401  * Changing a thread's resource pool will not migrate allocations from the
402  * previous pool.
403  *
404  * @param thread Target thread to assign a memory pool for resource requests.
405  * @param heap Heap object to use for resources,
406  *             or NULL if the thread should no longer have a memory pool.
407  */
k_thread_heap_assign(struct k_thread * thread,struct k_heap * heap)408 static inline void k_thread_heap_assign(struct k_thread *thread,
409 					struct k_heap *heap)
410 {
411 	thread->resource_pool = heap;
412 }
413 
414 #if defined(CONFIG_INIT_STACKS) && defined(CONFIG_THREAD_STACK_INFO)
415 /**
416  * @brief Obtain stack usage information for the specified thread
417  *
418  * User threads will need to have permission on the target thread object.
419  *
420  * Some hardware may prevent inspection of a stack buffer currently in use.
421  * If this API is called from supervisor mode, on the currently running thread,
422  * on a platform which selects @kconfig{CONFIG_NO_UNUSED_STACK_INSPECTION}, an
423  * error will be generated.
424  *
425  * @param thread Thread to inspect stack information
426  * @param unused_ptr Output parameter, filled in with the unused stack space
427  *	of the target thread in bytes.
428  * @return 0 on success
429  * @return -EBADF Bad thread object (user mode only)
430  * @return -EPERM No permissions on thread object (user mode only)
431  * #return -ENOTSUP Forbidden by hardware policy
432  * @return -EINVAL Thread is uninitialized or exited (user mode only)
433  * @return -EFAULT Bad memory address for unused_ptr (user mode only)
434  */
435 __syscall int k_thread_stack_space_get(const struct k_thread *thread,
436 				       size_t *unused_ptr);
437 #endif
438 
439 #if (CONFIG_HEAP_MEM_POOL_SIZE > 0)
440 /**
441  * @brief Assign the system heap as a thread's resource pool
442  *
443  * Similar to k_thread_heap_assign(), but the thread will use
444  * the kernel heap to draw memory.
445  *
446  * Use with caution, as a malicious thread could perform DoS attacks on the
447  * kernel heap.
448  *
449  * @param thread Target thread to assign the system heap for resource requests
450  *
451  */
452 void k_thread_system_pool_assign(struct k_thread *thread);
453 #endif /* (CONFIG_HEAP_MEM_POOL_SIZE > 0) */
454 
455 /**
456  * @brief Sleep until a thread exits
457  *
458  * The caller will be put to sleep until the target thread exits, either due
459  * to being aborted, self-exiting, or taking a fatal error. This API returns
460  * immediately if the thread isn't running.
461  *
462  * This API may only be called from ISRs with a K_NO_WAIT timeout,
463  * where it can be useful as a predicate to detect when a thread has
464  * aborted.
465  *
466  * @param thread Thread to wait to exit
467  * @param timeout upper bound time to wait for the thread to exit.
468  * @retval 0 success, target thread has exited or wasn't running
469  * @retval -EBUSY returned without waiting
470  * @retval -EAGAIN waiting period timed out
471  * @retval -EDEADLK target thread is joining on the caller, or target thread
472  *                  is the caller
473  */
474 __syscall int k_thread_join(struct k_thread *thread, k_timeout_t timeout);
475 
476 /**
477  * @brief Put the current thread to sleep.
478  *
479  * This routine puts the current thread to sleep for @a duration,
480  * specified as a k_timeout_t object.
481  *
482  * @note if @a timeout is set to K_FOREVER then the thread is suspended.
483  *
484  * @param timeout Desired duration of sleep.
485  *
486  * @return Zero if the requested time has elapsed or the number of milliseconds
487  * left to sleep, if thread was woken up by \ref k_wakeup call.
488  */
489 __syscall int32_t k_sleep(k_timeout_t timeout);
490 
491 /**
492  * @brief Put the current thread to sleep.
493  *
494  * This routine puts the current thread to sleep for @a duration milliseconds.
495  *
496  * @param ms Number of milliseconds to sleep.
497  *
498  * @return Zero if the requested time has elapsed or the number of milliseconds
499  * left to sleep, if thread was woken up by \ref k_wakeup call.
500  */
k_msleep(int32_t ms)501 static inline int32_t k_msleep(int32_t ms)
502 {
503 	return k_sleep(Z_TIMEOUT_MS(ms));
504 }
505 
506 /**
507  * @brief Put the current thread to sleep with microsecond resolution.
508  *
509  * This function is unlikely to work as expected without kernel tuning.
510  * In particular, because the lower bound on the duration of a sleep is
511  * the duration of a tick, @kconfig{CONFIG_SYS_CLOCK_TICKS_PER_SEC} must be
512  * adjusted to achieve the resolution desired. The implications of doing
513  * this must be understood before attempting to use k_usleep(). Use with
514  * caution.
515  *
516  * @param us Number of microseconds to sleep.
517  *
518  * @return Zero if the requested time has elapsed or the number of microseconds
519  * left to sleep, if thread was woken up by \ref k_wakeup call.
520  */
521 __syscall int32_t k_usleep(int32_t us);
522 
523 /**
524  * @brief Cause the current thread to busy wait.
525  *
526  * This routine causes the current thread to execute a "do nothing" loop for
527  * @a usec_to_wait microseconds.
528  *
529  * @note The clock used for the microsecond-resolution delay here may
530  * be skewed relative to the clock used for system timeouts like
531  * k_sleep().  For example k_busy_wait(1000) may take slightly more or
532  * less time than k_sleep(K_MSEC(1)), with the offset dependent on
533  * clock tolerances.
534  *
535  * @note In case when @kconfig{CONFIG_SYSTEM_CLOCK_SLOPPY_IDLE} and
536  * @kconfig{CONFIG_PM} options are enabled, this function may not work.
537  * The timer/clock used for delay processing may be disabled/inactive.
538  */
539 __syscall void k_busy_wait(uint32_t usec_to_wait);
540 
541 /**
542  * @brief Check whether it is possible to yield in the current context.
543  *
544  * This routine checks whether the kernel is in a state where it is possible to
545  * yield or call blocking API's. It should be used by code that needs to yield
546  * to perform correctly, but can feasibly be called from contexts where that
547  * is not possible. For example in the PRE_KERNEL initialization step, or when
548  * being run from the idle thread.
549  *
550  * @return True if it is possible to yield in the current context, false otherwise.
551  */
552 bool k_can_yield(void);
553 
554 /**
555  * @brief Yield the current thread.
556  *
557  * This routine causes the current thread to yield execution to another
558  * thread of the same or higher priority. If there are no other ready threads
559  * of the same or higher priority, the routine returns immediately.
560  */
561 __syscall void k_yield(void);
562 
563 /**
564  * @brief Wake up a sleeping thread.
565  *
566  * This routine prematurely wakes up @a thread from sleeping.
567  *
568  * If @a thread is not currently sleeping, the routine has no effect.
569  *
570  * @param thread ID of thread to wake.
571  */
572 __syscall void k_wakeup(k_tid_t thread);
573 
574 /**
575  * @brief Query thread ID of the current thread.
576  *
577  * This unconditionally queries the kernel via a system call.
578  *
579  * @note Use k_current_get() unless absolutely sure this is necessary.
580  *       This should only be used directly where the thread local
581  *       variable cannot be used or may contain invalid values
582  *       if thread local storage (TLS) is enabled. If TLS is not
583  *       enabled, this is the same as k_current_get().
584  *
585  * @return ID of current thread.
586  */
587 __attribute_const__
588 __syscall k_tid_t k_sched_current_thread_query(void);
589 
590 /**
591  * @brief Get thread ID of the current thread.
592  *
593  * @return ID of current thread.
594  *
595  */
596 __attribute_const__
k_current_get(void)597 static inline k_tid_t k_current_get(void)
598 {
599 #ifdef CONFIG_THREAD_LOCAL_STORAGE
600 	/* Thread-local cache of current thread ID, set in z_thread_entry() */
601 	extern __thread k_tid_t z_tls_current;
602 
603 	return z_tls_current;
604 #else
605 	return k_sched_current_thread_query();
606 #endif
607 }
608 
609 /**
610  * @brief Abort a thread.
611  *
612  * This routine permanently stops execution of @a thread. The thread is taken
613  * off all kernel queues it is part of (i.e. the ready queue, the timeout
614  * queue, or a kernel object wait queue). However, any kernel resources the
615  * thread might currently own (such as mutexes or memory blocks) are not
616  * released. It is the responsibility of the caller of this routine to ensure
617  * all necessary cleanup is performed.
618  *
619  * After k_thread_abort() returns, the thread is guaranteed not to be
620  * running or to become runnable anywhere on the system.  Normally
621  * this is done via blocking the caller (in the same manner as
622  * k_thread_join()), but in interrupt context on SMP systems the
623  * implementation is required to spin for threads that are running on
624  * other CPUs.  Note that as specified, this means that on SMP
625  * platforms it is possible for application code to create a deadlock
626  * condition by simultaneously aborting a cycle of threads using at
627  * least one termination from interrupt context.  Zephyr cannot detect
628  * all such conditions.
629  *
630  * @param thread ID of thread to abort.
631  */
632 __syscall void k_thread_abort(k_tid_t thread);
633 
634 
635 /**
636  * @brief Start an inactive thread
637  *
638  * If a thread was created with K_FOREVER in the delay parameter, it will
639  * not be added to the scheduling queue until this function is called
640  * on it.
641  *
642  * @param thread thread to start
643  */
644 __syscall void k_thread_start(k_tid_t thread);
645 
646 extern k_ticks_t z_timeout_expires(const struct _timeout *timeout);
647 extern k_ticks_t z_timeout_remaining(const struct _timeout *timeout);
648 
649 #ifdef CONFIG_SYS_CLOCK_EXISTS
650 
651 /**
652  * @brief Get time when a thread wakes up, in system ticks
653  *
654  * This routine computes the system uptime when a waiting thread next
655  * executes, in units of system ticks.  If the thread is not waiting,
656  * it returns current system time.
657  */
658 __syscall k_ticks_t k_thread_timeout_expires_ticks(const struct k_thread *t);
659 
z_impl_k_thread_timeout_expires_ticks(const struct k_thread * t)660 static inline k_ticks_t z_impl_k_thread_timeout_expires_ticks(
661 						const struct k_thread *t)
662 {
663 	return z_timeout_expires(&t->base.timeout);
664 }
665 
666 /**
667  * @brief Get time remaining before a thread wakes up, in system ticks
668  *
669  * This routine computes the time remaining before a waiting thread
670  * next executes, in units of system ticks.  If the thread is not
671  * waiting, it returns zero.
672  */
673 __syscall k_ticks_t k_thread_timeout_remaining_ticks(const struct k_thread *t);
674 
z_impl_k_thread_timeout_remaining_ticks(const struct k_thread * t)675 static inline k_ticks_t z_impl_k_thread_timeout_remaining_ticks(
676 						const struct k_thread *t)
677 {
678 	return z_timeout_remaining(&t->base.timeout);
679 }
680 
681 #endif /* CONFIG_SYS_CLOCK_EXISTS */
682 
683 /**
684  * @cond INTERNAL_HIDDEN
685  */
686 
687 struct _static_thread_data {
688 	struct k_thread *init_thread;
689 	k_thread_stack_t *init_stack;
690 	unsigned int init_stack_size;
691 	k_thread_entry_t init_entry;
692 	void *init_p1;
693 	void *init_p2;
694 	void *init_p3;
695 	int init_prio;
696 	uint32_t init_options;
697 	const char *init_name;
698 #ifdef CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME
699 	int32_t init_delay_ms;
700 #else
701 	k_timeout_t init_delay;
702 #endif
703 };
704 
705 #ifdef CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME
706 #define Z_THREAD_INIT_DELAY_INITIALIZER(ms) .init_delay_ms = (ms)
707 #define Z_THREAD_INIT_DELAY(thread) SYS_TIMEOUT_MS((thread)->init_delay_ms)
708 #else
709 #define Z_THREAD_INIT_DELAY_INITIALIZER(ms) .init_delay = SYS_TIMEOUT_MS(ms)
710 #define Z_THREAD_INIT_DELAY(thread) (thread)->init_delay
711 #endif
712 
713 #define Z_THREAD_INITIALIZER(thread, stack, stack_size,           \
714 			    entry, p1, p2, p3,                   \
715 			    prio, options, delay, tname)         \
716 	{                                                        \
717 	.init_thread = (thread),				 \
718 	.init_stack = (stack),					 \
719 	.init_stack_size = (stack_size),                         \
720 	.init_entry = (k_thread_entry_t)entry,			 \
721 	.init_p1 = (void *)p1,                                   \
722 	.init_p2 = (void *)p2,                                   \
723 	.init_p3 = (void *)p3,                                   \
724 	.init_prio = (prio),                                     \
725 	.init_options = (options),                               \
726 	.init_name = STRINGIFY(tname),                           \
727 	Z_THREAD_INIT_DELAY_INITIALIZER(delay)			 \
728 	}
729 
730 /*
731  * Refer to K_THREAD_DEFINE() and K_KERNEL_THREAD_DEFINE() for
732  * information on arguments.
733  */
734 #define Z_THREAD_COMMON_DEFINE(name, stack_size,			\
735 			       entry, p1, p2, p3,			\
736 			       prio, options, delay)			\
737 	struct k_thread _k_thread_obj_##name;				\
738 	STRUCT_SECTION_ITERABLE(_static_thread_data,			\
739 				_k_thread_data_##name) =		\
740 		Z_THREAD_INITIALIZER(&_k_thread_obj_##name,		\
741 				     _k_thread_stack_##name, stack_size,\
742 				     entry, p1, p2, p3, prio, options,	\
743 				     delay, name);			\
744 	const k_tid_t name = (k_tid_t)&_k_thread_obj_##name
745 
746 /**
747  * INTERNAL_HIDDEN @endcond
748  */
749 
750 /**
751  * @brief Statically define and initialize a thread.
752  *
753  * The thread may be scheduled for immediate execution or a delayed start.
754  *
755  * Thread options are architecture-specific, and can include K_ESSENTIAL,
756  * K_FP_REGS, and K_SSE_REGS. Multiple options may be specified by separating
757  * them using "|" (the logical OR operator).
758  *
759  * The ID of the thread can be accessed using:
760  *
761  * @code extern const k_tid_t <name>; @endcode
762  *
763  * @param name Name of the thread.
764  * @param stack_size Stack size in bytes.
765  * @param entry Thread entry function.
766  * @param p1 1st entry point parameter.
767  * @param p2 2nd entry point parameter.
768  * @param p3 3rd entry point parameter.
769  * @param prio Thread priority.
770  * @param options Thread options.
771  * @param delay Scheduling delay (in milliseconds), zero for no delay.
772  *
773  * @note Static threads with zero delay should not normally have
774  * MetaIRQ priority levels.  This can preempt the system
775  * initialization handling (depending on the priority of the main
776  * thread) and cause surprising ordering side effects.  It will not
777  * affect anything in the OS per se, but consider it bad practice.
778  * Use a SYS_INIT() callback if you need to run code before entrance
779  * to the application main().
780  */
781 #define K_THREAD_DEFINE(name, stack_size,                                \
782 			entry, p1, p2, p3,                               \
783 			prio, options, delay)                            \
784 	K_THREAD_STACK_DEFINE(_k_thread_stack_##name, stack_size);	 \
785 	Z_THREAD_COMMON_DEFINE(name, stack_size, entry, p1, p2, p3,	 \
786 			       prio, options, delay)
787 
788 /**
789  * @brief Statically define and initialize a thread intended to run only in kernel mode.
790  *
791  * The thread may be scheduled for immediate execution or a delayed start.
792  *
793  * Thread options are architecture-specific, and can include K_ESSENTIAL,
794  * K_FP_REGS, and K_SSE_REGS. Multiple options may be specified by separating
795  * them using "|" (the logical OR operator).
796  *
797  * The ID of the thread can be accessed using:
798  *
799  * @code extern const k_tid_t <name>; @endcode
800  *
801  * @note Threads defined by this can only run in kernel mode, and cannot be
802  *       transformed into user thread via k_thread_user_mode_enter().
803  *
804  * @warning Depending on the architecture, the stack size (@p stack_size)
805  *          may need to be multiples of CONFIG_MMU_PAGE_SIZE (if MMU)
806  *          or in power-of-two size (if MPU).
807  *
808  * @param name Name of the thread.
809  * @param stack_size Stack size in bytes.
810  * @param entry Thread entry function.
811  * @param p1 1st entry point parameter.
812  * @param p2 2nd entry point parameter.
813  * @param p3 3rd entry point parameter.
814  * @param prio Thread priority.
815  * @param options Thread options.
816  * @param delay Scheduling delay (in milliseconds), zero for no delay.
817  */
818 #define K_KERNEL_THREAD_DEFINE(name, stack_size,			\
819 			       entry, p1, p2, p3,			\
820 			       prio, options, delay)			\
821 	K_KERNEL_STACK_DEFINE(_k_thread_stack_##name, stack_size);	\
822 	Z_THREAD_COMMON_DEFINE(name, stack_size, entry, p1, p2, p3,	\
823 			       prio, options, delay)
824 
825 /**
826  * @brief Get a thread's priority.
827  *
828  * This routine gets the priority of @a thread.
829  *
830  * @param thread ID of thread whose priority is needed.
831  *
832  * @return Priority of @a thread.
833  */
834 __syscall int k_thread_priority_get(k_tid_t thread);
835 
836 /**
837  * @brief Set a thread's priority.
838  *
839  * This routine immediately changes the priority of @a thread.
840  *
841  * Rescheduling can occur immediately depending on the priority @a thread is
842  * set to:
843  *
844  * - If its priority is raised above the priority of the caller of this
845  * function, and the caller is preemptible, @a thread will be scheduled in.
846  *
847  * - If the caller operates on itself, it lowers its priority below that of
848  * other threads in the system, and the caller is preemptible, the thread of
849  * highest priority will be scheduled in.
850  *
851  * Priority can be assigned in the range of -CONFIG_NUM_COOP_PRIORITIES to
852  * CONFIG_NUM_PREEMPT_PRIORITIES-1, where -CONFIG_NUM_COOP_PRIORITIES is the
853  * highest priority.
854  *
855  * @param thread ID of thread whose priority is to be set.
856  * @param prio New priority.
857  *
858  * @warning Changing the priority of a thread currently involved in mutex
859  * priority inheritance may result in undefined behavior.
860  */
861 __syscall void k_thread_priority_set(k_tid_t thread, int prio);
862 
863 
864 #ifdef CONFIG_SCHED_DEADLINE
865 /**
866  * @brief Set deadline expiration time for scheduler
867  *
868  * This sets the "deadline" expiration as a time delta from the
869  * current time, in the same units used by k_cycle_get_32().  The
870  * scheduler (when deadline scheduling is enabled) will choose the
871  * next expiring thread when selecting between threads at the same
872  * static priority.  Threads at different priorities will be scheduled
873  * according to their static priority.
874  *
875  * @note Deadlines are stored internally using 32 bit unsigned
876  * integers.  The number of cycles between the "first" deadline in the
877  * scheduler queue and the "last" deadline must be less than 2^31 (i.e
878  * a signed non-negative quantity).  Failure to adhere to this rule
879  * may result in scheduled threads running in an incorrect deadline
880  * order.
881  *
882  * @note Despite the API naming, the scheduler makes no guarantees the
883  * the thread WILL be scheduled within that deadline, nor does it take
884  * extra metadata (like e.g. the "runtime" and "period" parameters in
885  * Linux sched_setattr()) that allows the kernel to validate the
886  * scheduling for achievability.  Such features could be implemented
887  * above this call, which is simply input to the priority selection
888  * logic.
889  *
890  * @note You should enable @kconfig{CONFIG_SCHED_DEADLINE} in your project
891  * configuration.
892  *
893  * @param thread A thread on which to set the deadline
894  * @param deadline A time delta, in cycle units
895  *
896  */
897 __syscall void k_thread_deadline_set(k_tid_t thread, int deadline);
898 #endif
899 
900 #ifdef CONFIG_SCHED_CPU_MASK
901 /**
902  * @brief Sets all CPU enable masks to zero
903  *
904  * After this returns, the thread will no longer be schedulable on any
905  * CPUs.  The thread must not be currently runnable.
906  *
907  * @note You should enable @kconfig{CONFIG_SCHED_CPU_MASK} in your project
908  * configuration.
909  *
910  * @param thread Thread to operate upon
911  * @return Zero on success, otherwise error code
912  */
913 int k_thread_cpu_mask_clear(k_tid_t thread);
914 
915 /**
916  * @brief Sets all CPU enable masks to one
917  *
918  * After this returns, the thread will be schedulable on any CPU.  The
919  * thread must not be currently runnable.
920  *
921  * @note You should enable @kconfig{CONFIG_SCHED_CPU_MASK} in your project
922  * configuration.
923  *
924  * @param thread Thread to operate upon
925  * @return Zero on success, otherwise error code
926  */
927 int k_thread_cpu_mask_enable_all(k_tid_t thread);
928 
929 /**
930  * @brief Enable thread to run on specified CPU
931  *
932  * The thread must not be currently runnable.
933  *
934  * @note You should enable @kconfig{CONFIG_SCHED_CPU_MASK} in your project
935  * configuration.
936  *
937  * @param thread Thread to operate upon
938  * @param cpu CPU index
939  * @return Zero on success, otherwise error code
940  */
941 int k_thread_cpu_mask_enable(k_tid_t thread, int cpu);
942 
943 /**
944  * @brief Prevent thread to run on specified CPU
945  *
946  * The thread must not be currently runnable.
947  *
948  * @note You should enable @kconfig{CONFIG_SCHED_CPU_MASK} in your project
949  * configuration.
950  *
951  * @param thread Thread to operate upon
952  * @param cpu CPU index
953  * @return Zero on success, otherwise error code
954  */
955 int k_thread_cpu_mask_disable(k_tid_t thread, int cpu);
956 
957 /**
958  * @brief Pin a thread to a CPU
959  *
960  * Pin a thread to a CPU by first clearing the cpu mask and then enabling the
961  * thread on the selected CPU.
962  *
963  * @param thread Thread to operate upon
964  * @param cpu CPU index
965  * @return Zero on success, otherwise error code
966  */
967 int k_thread_cpu_pin(k_tid_t thread, int cpu);
968 #endif
969 
970 /**
971  * @brief Suspend a thread.
972  *
973  * This routine prevents the kernel scheduler from making @a thread
974  * the current thread. All other internal operations on @a thread are
975  * still performed; for example, kernel objects it is waiting on are
976  * still handed to it.  Note that any existing timeouts
977  * (e.g. k_sleep(), or a timeout argument to k_sem_take() et. al.)
978  * will be canceled.  On resume, the thread will begin running
979  * immediately and return from the blocked call.
980  *
981  * If @a thread is already suspended, the routine has no effect.
982  *
983  * @param thread ID of thread to suspend.
984  */
985 __syscall void k_thread_suspend(k_tid_t thread);
986 
987 /**
988  * @brief Resume a suspended thread.
989  *
990  * This routine allows the kernel scheduler to make @a thread the current
991  * thread, when it is next eligible for that role.
992  *
993  * If @a thread is not currently suspended, the routine has no effect.
994  *
995  * @param thread ID of thread to resume.
996  */
997 __syscall void k_thread_resume(k_tid_t thread);
998 
999 /**
1000  * @brief Set time-slicing period and scope.
1001  *
1002  * This routine specifies how the scheduler will perform time slicing of
1003  * preemptible threads.
1004  *
1005  * To enable time slicing, @a slice must be non-zero. The scheduler
1006  * ensures that no thread runs for more than the specified time limit
1007  * before other threads of that priority are given a chance to execute.
1008  * Any thread whose priority is higher than @a prio is exempted, and may
1009  * execute as long as desired without being preempted due to time slicing.
1010  *
1011  * Time slicing only limits the maximum amount of time a thread may continuously
1012  * execute. Once the scheduler selects a thread for execution, there is no
1013  * minimum guaranteed time the thread will execute before threads of greater or
1014  * equal priority are scheduled.
1015  *
1016  * When the current thread is the only one of that priority eligible
1017  * for execution, this routine has no effect; the thread is immediately
1018  * rescheduled after the slice period expires.
1019  *
1020  * To disable timeslicing, set both @a slice and @a prio to zero.
1021  *
1022  * @param slice Maximum time slice length (in milliseconds).
1023  * @param prio Highest thread priority level eligible for time slicing.
1024  */
1025 extern void k_sched_time_slice_set(int32_t slice, int prio);
1026 
1027 /**
1028  * @brief Set thread time slice
1029  *
1030  * As for k_sched_time_slice_set, but (when
1031  * CONFIG_TIMESLICE_PER_THREAD=y) sets the timeslice for a specific
1032  * thread.  When non-zero, this timeslice will take precedence over
1033  * the global value.
1034  *
1035  * When such a thread's timeslice expires, the configured callback
1036  * will be called before the thread is removed/re-added to the run
1037  * queue.  This callback will occur in interrupt context, and the
1038  * specified thread is guaranteed to have been preempted by the
1039  * currently-executing ISR.  Such a callback is free to, for example,
1040  * modify the thread priority or slice time for future execution,
1041  * suspend the thread, etc...
1042  *
1043  * @note Unlike the older API, the time slice parameter here is
1044  * specified in ticks, not milliseconds.  Ticks have always been the
1045  * internal unit, and not all platforms have integer conversions
1046  * between the two.
1047  *
1048  * @note Threads with a non-zero slice time set will be timesliced
1049  * always, even if they are higher priority than the maximum timeslice
1050  * priority set via k_sched_time_slice_set().
1051  *
1052  * @note The callback notification for slice expiration happens, as it
1053  * must, while the thread is still "current", and thus it happens
1054  * before any registered timeouts at this tick.  This has the somewhat
1055  * confusing side effect that the tick time (c.f. k_uptime_get()) does
1056  * not yet reflect the expired ticks.  Applications wishing to make
1057  * fine-grained timing decisions within this callback should use the
1058  * cycle API, or derived facilities like k_thread_runtime_stats_get().
1059  *
1060  * @param th A valid, initialized thread
1061  * @param slice_ticks Maximum timeslice, in ticks
1062  * @param expired Callback function called on slice expiration
1063  * @param data Parameter for the expiration handler
1064  */
1065 void k_thread_time_slice_set(struct k_thread *th, int32_t slice_ticks,
1066 			     k_thread_timeslice_fn_t expired, void *data);
1067 
1068 /** @} */
1069 
1070 /**
1071  * @addtogroup isr_apis
1072  * @{
1073  */
1074 
1075 /**
1076  * @brief Determine if code is running at interrupt level.
1077  *
1078  * This routine allows the caller to customize its actions, depending on
1079  * whether it is a thread or an ISR.
1080  *
1081  * @funcprops \isr_ok
1082  *
1083  * @return false if invoked by a thread.
1084  * @return true if invoked by an ISR.
1085  */
1086 extern bool k_is_in_isr(void);
1087 
1088 /**
1089  * @brief Determine if code is running in a preemptible thread.
1090  *
1091  * This routine allows the caller to customize its actions, depending on
1092  * whether it can be preempted by another thread. The routine returns a 'true'
1093  * value if all of the following conditions are met:
1094  *
1095  * - The code is running in a thread, not at ISR.
1096  * - The thread's priority is in the preemptible range.
1097  * - The thread has not locked the scheduler.
1098  *
1099  * @funcprops \isr_ok
1100  *
1101  * @return 0 if invoked by an ISR or by a cooperative thread.
1102  * @return Non-zero if invoked by a preemptible thread.
1103  */
1104 __syscall int k_is_preempt_thread(void);
1105 
1106 /**
1107  * @brief Test whether startup is in the before-main-task phase.
1108  *
1109  * This routine allows the caller to customize its actions, depending on
1110  * whether it being invoked before the kernel is fully active.
1111  *
1112  * @funcprops \isr_ok
1113  *
1114  * @return true if invoked before post-kernel initialization
1115  * @return false if invoked during/after post-kernel initialization
1116  */
k_is_pre_kernel(void)1117 static inline bool k_is_pre_kernel(void)
1118 {
1119 	extern bool z_sys_post_kernel; /* in init.c */
1120 
1121 	return !z_sys_post_kernel;
1122 }
1123 
1124 /**
1125  * @}
1126  */
1127 
1128 /**
1129  * @addtogroup thread_apis
1130  * @{
1131  */
1132 
1133 /**
1134  * @brief Lock the scheduler.
1135  *
1136  * This routine prevents the current thread from being preempted by another
1137  * thread by instructing the scheduler to treat it as a cooperative thread.
1138  * If the thread subsequently performs an operation that makes it unready,
1139  * it will be context switched out in the normal manner. When the thread
1140  * again becomes the current thread, its non-preemptible status is maintained.
1141  *
1142  * This routine can be called recursively.
1143  *
1144  * Owing to clever implementation details, scheduler locks are
1145  * extremely fast for non-userspace threads (just one byte
1146  * inc/decrement in the thread struct).
1147  *
1148  * @note This works by elevating the thread priority temporarily to a
1149  * cooperative priority, allowing cheap synchronization vs. other
1150  * preemptible or cooperative threads running on the current CPU.  It
1151  * does not prevent preemption or asynchrony of other types.  It does
1152  * not prevent threads from running on other CPUs when CONFIG_SMP=y.
1153  * It does not prevent interrupts from happening, nor does it prevent
1154  * threads with MetaIRQ priorities from preempting the current thread.
1155  * In general this is a historical API not well-suited to modern
1156  * applications, use with care.
1157  */
1158 extern void k_sched_lock(void);
1159 
1160 /**
1161  * @brief Unlock the scheduler.
1162  *
1163  * This routine reverses the effect of a previous call to k_sched_lock().
1164  * A thread must call the routine once for each time it called k_sched_lock()
1165  * before the thread becomes preemptible.
1166  */
1167 extern void k_sched_unlock(void);
1168 
1169 /**
1170  * @brief Set current thread's custom data.
1171  *
1172  * This routine sets the custom data for the current thread to @ value.
1173  *
1174  * Custom data is not used by the kernel itself, and is freely available
1175  * for a thread to use as it sees fit. It can be used as a framework
1176  * upon which to build thread-local storage.
1177  *
1178  * @param value New custom data value.
1179  *
1180  */
1181 __syscall void k_thread_custom_data_set(void *value);
1182 
1183 /**
1184  * @brief Get current thread's custom data.
1185  *
1186  * This routine returns the custom data for the current thread.
1187  *
1188  * @return Current custom data value.
1189  */
1190 __syscall void *k_thread_custom_data_get(void);
1191 
1192 /**
1193  * @brief Set current thread name
1194  *
1195  * Set the name of the thread to be used when @kconfig{CONFIG_THREAD_MONITOR}
1196  * is enabled for tracing and debugging.
1197  *
1198  * @param thread Thread to set name, or NULL to set the current thread
1199  * @param str Name string
1200  * @retval 0 on success
1201  * @retval -EFAULT Memory access error with supplied string
1202  * @retval -ENOSYS Thread name configuration option not enabled
1203  * @retval -EINVAL Thread name too long
1204  */
1205 __syscall int k_thread_name_set(k_tid_t thread, const char *str);
1206 
1207 /**
1208  * @brief Get thread name
1209  *
1210  * Get the name of a thread
1211  *
1212  * @param thread Thread ID
1213  * @retval Thread name, or NULL if configuration not enabled
1214  */
1215 const char *k_thread_name_get(k_tid_t thread);
1216 
1217 /**
1218  * @brief Copy the thread name into a supplied buffer
1219  *
1220  * @param thread Thread to obtain name information
1221  * @param buf Destination buffer
1222  * @param size Destination buffer size
1223  * @retval -ENOSPC Destination buffer too small
1224  * @retval -EFAULT Memory access error
1225  * @retval -ENOSYS Thread name feature not enabled
1226  * @retval 0 Success
1227  */
1228 __syscall int k_thread_name_copy(k_tid_t thread, char *buf,
1229 				 size_t size);
1230 
1231 /**
1232  * @brief Get thread state string
1233  *
1234  * This routine generates a human friendly string containing the thread's
1235  * state, and copies as much of it as possible into @a buf.
1236  *
1237  * @param thread_id Thread ID
1238  * @param buf Buffer into which to copy state strings
1239  * @param buf_size Size of the buffer
1240  *
1241  * @retval Pointer to @a buf if data was copied, else a pointer to "".
1242  */
1243 const char *k_thread_state_str(k_tid_t thread_id, char *buf, size_t buf_size);
1244 
1245 /**
1246  * @}
1247  */
1248 
1249 /**
1250  * @addtogroup clock_apis
1251  * @{
1252  */
1253 
1254 /**
1255  * @brief Generate null timeout delay.
1256  *
1257  * This macro generates a timeout delay that instructs a kernel API
1258  * not to wait if the requested operation cannot be performed immediately.
1259  *
1260  * @return Timeout delay value.
1261  */
1262 #define K_NO_WAIT Z_TIMEOUT_NO_WAIT
1263 
1264 /**
1265  * @brief Generate timeout delay from nanoseconds.
1266  *
1267  * This macro generates a timeout delay that instructs a kernel API to
1268  * wait up to @a t nanoseconds to perform the requested operation.
1269  * Note that timer precision is limited to the tick rate, not the
1270  * requested value.
1271  *
1272  * @param t Duration in nanoseconds.
1273  *
1274  * @return Timeout delay value.
1275  */
1276 #define K_NSEC(t)     Z_TIMEOUT_NS(t)
1277 
1278 /**
1279  * @brief Generate timeout delay from microseconds.
1280  *
1281  * This macro generates a timeout delay that instructs a kernel API
1282  * to wait up to @a t microseconds to perform the requested operation.
1283  * Note that timer precision is limited to the tick rate, not the
1284  * requested value.
1285  *
1286  * @param t Duration in microseconds.
1287  *
1288  * @return Timeout delay value.
1289  */
1290 #define K_USEC(t)     Z_TIMEOUT_US(t)
1291 
1292 /**
1293  * @brief Generate timeout delay from cycles.
1294  *
1295  * This macro generates a timeout delay that instructs a kernel API
1296  * to wait up to @a t cycles to perform the requested operation.
1297  *
1298  * @param t Duration in cycles.
1299  *
1300  * @return Timeout delay value.
1301  */
1302 #define K_CYC(t)     Z_TIMEOUT_CYC(t)
1303 
1304 /**
1305  * @brief Generate timeout delay from system ticks.
1306  *
1307  * This macro generates a timeout delay that instructs a kernel API
1308  * to wait up to @a t ticks to perform the requested operation.
1309  *
1310  * @param t Duration in system ticks.
1311  *
1312  * @return Timeout delay value.
1313  */
1314 #define K_TICKS(t)     Z_TIMEOUT_TICKS(t)
1315 
1316 /**
1317  * @brief Generate timeout delay from milliseconds.
1318  *
1319  * This macro generates a timeout delay that instructs a kernel API
1320  * to wait up to @a ms milliseconds to perform the requested operation.
1321  *
1322  * @param ms Duration in milliseconds.
1323  *
1324  * @return Timeout delay value.
1325  */
1326 #define K_MSEC(ms)     Z_TIMEOUT_MS(ms)
1327 
1328 /**
1329  * @brief Generate timeout delay from seconds.
1330  *
1331  * This macro generates a timeout delay that instructs a kernel API
1332  * to wait up to @a s seconds to perform the requested operation.
1333  *
1334  * @param s Duration in seconds.
1335  *
1336  * @return Timeout delay value.
1337  */
1338 #define K_SECONDS(s)   K_MSEC((s) * MSEC_PER_SEC)
1339 
1340 /**
1341  * @brief Generate timeout delay from minutes.
1342 
1343  * This macro generates a timeout delay that instructs a kernel API
1344  * to wait up to @a m minutes to perform the requested operation.
1345  *
1346  * @param m Duration in minutes.
1347  *
1348  * @return Timeout delay value.
1349  */
1350 #define K_MINUTES(m)   K_SECONDS((m) * 60)
1351 
1352 /**
1353  * @brief Generate timeout delay from hours.
1354  *
1355  * This macro generates a timeout delay that instructs a kernel API
1356  * to wait up to @a h hours to perform the requested operation.
1357  *
1358  * @param h Duration in hours.
1359  *
1360  * @return Timeout delay value.
1361  */
1362 #define K_HOURS(h)     K_MINUTES((h) * 60)
1363 
1364 /**
1365  * @brief Generate infinite timeout delay.
1366  *
1367  * This macro generates a timeout delay that instructs a kernel API
1368  * to wait as long as necessary to perform the requested operation.
1369  *
1370  * @return Timeout delay value.
1371  */
1372 #define K_FOREVER Z_FOREVER
1373 
1374 #ifdef CONFIG_TIMEOUT_64BIT
1375 
1376 /**
1377  * @brief Generates an absolute/uptime timeout value from system ticks
1378  *
1379  * This macro generates a timeout delay that represents an expiration
1380  * at the absolute uptime value specified, in system ticks.  That is, the
1381  * timeout will expire immediately after the system uptime reaches the
1382  * specified tick count.
1383  *
1384  * @param t Tick uptime value
1385  * @return Timeout delay value
1386  */
1387 #define K_TIMEOUT_ABS_TICKS(t) \
1388 	Z_TIMEOUT_TICKS(Z_TICK_ABS((k_ticks_t)MAX(t, 0)))
1389 
1390 /**
1391  * @brief Generates an absolute/uptime timeout value from milliseconds
1392  *
1393  * This macro generates a timeout delay that represents an expiration
1394  * at the absolute uptime value specified, in milliseconds.  That is,
1395  * the timeout will expire immediately after the system uptime reaches
1396  * the specified tick count.
1397  *
1398  * @param t Millisecond uptime value
1399  * @return Timeout delay value
1400  */
1401 #define K_TIMEOUT_ABS_MS(t) K_TIMEOUT_ABS_TICKS(k_ms_to_ticks_ceil64(t))
1402 
1403 /**
1404  * @brief Generates an absolute/uptime timeout value from microseconds
1405  *
1406  * This macro generates a timeout delay that represents an expiration
1407  * at the absolute uptime value specified, in microseconds.  That is,
1408  * the timeout will expire immediately after the system uptime reaches
1409  * the specified time.  Note that timer precision is limited by the
1410  * system tick rate and not the requested timeout value.
1411  *
1412  * @param t Microsecond uptime value
1413  * @return Timeout delay value
1414  */
1415 #define K_TIMEOUT_ABS_US(t) K_TIMEOUT_ABS_TICKS(k_us_to_ticks_ceil64(t))
1416 
1417 /**
1418  * @brief Generates an absolute/uptime timeout value from nanoseconds
1419  *
1420  * This macro generates a timeout delay that represents an expiration
1421  * at the absolute uptime value specified, in nanoseconds.  That is,
1422  * the timeout will expire immediately after the system uptime reaches
1423  * the specified time.  Note that timer precision is limited by the
1424  * system tick rate and not the requested timeout value.
1425  *
1426  * @param t Nanosecond uptime value
1427  * @return Timeout delay value
1428  */
1429 #define K_TIMEOUT_ABS_NS(t) K_TIMEOUT_ABS_TICKS(k_ns_to_ticks_ceil64(t))
1430 
1431 /**
1432  * @brief Generates an absolute/uptime timeout value from system cycles
1433  *
1434  * This macro generates a timeout delay that represents an expiration
1435  * at the absolute uptime value specified, in cycles.  That is, the
1436  * timeout will expire immediately after the system uptime reaches the
1437  * specified time.  Note that timer precision is limited by the system
1438  * tick rate and not the requested timeout value.
1439  *
1440  * @param t Cycle uptime value
1441  * @return Timeout delay value
1442  */
1443 #define K_TIMEOUT_ABS_CYC(t) K_TIMEOUT_ABS_TICKS(k_cyc_to_ticks_ceil64(t))
1444 
1445 #endif
1446 
1447 /**
1448  * @}
1449  */
1450 
1451 /**
1452  * @cond INTERNAL_HIDDEN
1453  */
1454 
1455 struct k_timer {
1456 	/*
1457 	 * _timeout structure must be first here if we want to use
1458 	 * dynamic timer allocation. timeout.node is used in the double-linked
1459 	 * list of free timers
1460 	 */
1461 	struct _timeout timeout;
1462 
1463 	/* wait queue for the (single) thread waiting on this timer */
1464 	_wait_q_t wait_q;
1465 
1466 	/* runs in ISR context */
1467 	void (*expiry_fn)(struct k_timer *timer);
1468 
1469 	/* runs in the context of the thread that calls k_timer_stop() */
1470 	void (*stop_fn)(struct k_timer *timer);
1471 
1472 	/* timer period */
1473 	k_timeout_t period;
1474 
1475 	/* timer status */
1476 	uint32_t status;
1477 
1478 	/* user-specific data, also used to support legacy features */
1479 	void *user_data;
1480 
1481 	SYS_PORT_TRACING_TRACKING_FIELD(k_timer)
1482 
1483 #ifdef CONFIG_OBJ_CORE_TIMER
1484 	struct k_obj_core  obj_core;
1485 #endif
1486 };
1487 
1488 #define Z_TIMER_INITIALIZER(obj, expiry, stop) \
1489 	{ \
1490 	.timeout = { \
1491 		.node = {},\
1492 		.fn = z_timer_expiration_handler, \
1493 		.dticks = 0, \
1494 	}, \
1495 	.wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
1496 	.expiry_fn = expiry, \
1497 	.stop_fn = stop, \
1498 	.status = 0, \
1499 	.user_data = 0, \
1500 	}
1501 
1502 /**
1503  * INTERNAL_HIDDEN @endcond
1504  */
1505 
1506 /**
1507  * @defgroup timer_apis Timer APIs
1508  * @ingroup kernel_apis
1509  * @{
1510  */
1511 
1512 /**
1513  * @typedef k_timer_expiry_t
1514  * @brief Timer expiry function type.
1515  *
1516  * A timer's expiry function is executed by the system clock interrupt handler
1517  * each time the timer expires. The expiry function is optional, and is only
1518  * invoked if the timer has been initialized with one.
1519  *
1520  * @param timer     Address of timer.
1521  */
1522 typedef void (*k_timer_expiry_t)(struct k_timer *timer);
1523 
1524 /**
1525  * @typedef k_timer_stop_t
1526  * @brief Timer stop function type.
1527  *
1528  * A timer's stop function is executed if the timer is stopped prematurely.
1529  * The function runs in the context of call that stops the timer.  As
1530  * k_timer_stop() can be invoked from an ISR, the stop function must be
1531  * callable from interrupt context (isr-ok).
1532  *
1533  * The stop function is optional, and is only invoked if the timer has been
1534  * initialized with one.
1535  *
1536  * @param timer     Address of timer.
1537  */
1538 typedef void (*k_timer_stop_t)(struct k_timer *timer);
1539 
1540 /**
1541  * @brief Statically define and initialize a timer.
1542  *
1543  * The timer can be accessed outside the module where it is defined using:
1544  *
1545  * @code extern struct k_timer <name>; @endcode
1546  *
1547  * @param name Name of the timer variable.
1548  * @param expiry_fn Function to invoke each time the timer expires.
1549  * @param stop_fn   Function to invoke if the timer is stopped while running.
1550  */
1551 #define K_TIMER_DEFINE(name, expiry_fn, stop_fn) \
1552 	STRUCT_SECTION_ITERABLE(k_timer, name) = \
1553 		Z_TIMER_INITIALIZER(name, expiry_fn, stop_fn)
1554 
1555 /**
1556  * @brief Initialize a timer.
1557  *
1558  * This routine initializes a timer, prior to its first use.
1559  *
1560  * @param timer     Address of timer.
1561  * @param expiry_fn Function to invoke each time the timer expires.
1562  * @param stop_fn   Function to invoke if the timer is stopped while running.
1563  */
1564 extern void k_timer_init(struct k_timer *timer,
1565 			 k_timer_expiry_t expiry_fn,
1566 			 k_timer_stop_t stop_fn);
1567 
1568 /**
1569  * @brief Start a timer.
1570  *
1571  * This routine starts a timer, and resets its status to zero. The timer
1572  * begins counting down using the specified duration and period values.
1573  *
1574  * Attempting to start a timer that is already running is permitted.
1575  * The timer's status is reset to zero and the timer begins counting down
1576  * using the new duration and period values.
1577  *
1578  * @param timer     Address of timer.
1579  * @param duration  Initial timer duration.
1580  * @param period    Timer period.
1581  */
1582 __syscall void k_timer_start(struct k_timer *timer,
1583 			     k_timeout_t duration, k_timeout_t period);
1584 
1585 /**
1586  * @brief Stop a timer.
1587  *
1588  * This routine stops a running timer prematurely. The timer's stop function,
1589  * if one exists, is invoked by the caller.
1590  *
1591  * Attempting to stop a timer that is not running is permitted, but has no
1592  * effect on the timer.
1593  *
1594  * @note The stop handler has to be callable from ISRs if @a k_timer_stop is to
1595  * be called from ISRs.
1596  *
1597  * @funcprops \isr_ok
1598  *
1599  * @param timer     Address of timer.
1600  */
1601 __syscall void k_timer_stop(struct k_timer *timer);
1602 
1603 /**
1604  * @brief Read timer status.
1605  *
1606  * This routine reads the timer's status, which indicates the number of times
1607  * it has expired since its status was last read.
1608  *
1609  * Calling this routine resets the timer's status to zero.
1610  *
1611  * @param timer     Address of timer.
1612  *
1613  * @return Timer status.
1614  */
1615 __syscall uint32_t k_timer_status_get(struct k_timer *timer);
1616 
1617 /**
1618  * @brief Synchronize thread to timer expiration.
1619  *
1620  * This routine blocks the calling thread until the timer's status is non-zero
1621  * (indicating that it has expired at least once since it was last examined)
1622  * or the timer is stopped. If the timer status is already non-zero,
1623  * or the timer is already stopped, the caller continues without waiting.
1624  *
1625  * Calling this routine resets the timer's status to zero.
1626  *
1627  * This routine must not be used by interrupt handlers, since they are not
1628  * allowed to block.
1629  *
1630  * @param timer     Address of timer.
1631  *
1632  * @return Timer status.
1633  */
1634 __syscall uint32_t k_timer_status_sync(struct k_timer *timer);
1635 
1636 #ifdef CONFIG_SYS_CLOCK_EXISTS
1637 
1638 /**
1639  * @brief Get next expiration time of a timer, in system ticks
1640  *
1641  * This routine returns the future system uptime reached at the next
1642  * time of expiration of the timer, in units of system ticks.  If the
1643  * timer is not running, current system time is returned.
1644  *
1645  * @param timer The timer object
1646  * @return Uptime of expiration, in ticks
1647  */
1648 __syscall k_ticks_t k_timer_expires_ticks(const struct k_timer *timer);
1649 
z_impl_k_timer_expires_ticks(const struct k_timer * timer)1650 static inline k_ticks_t z_impl_k_timer_expires_ticks(
1651 				       const struct k_timer *timer)
1652 {
1653 	return z_timeout_expires(&timer->timeout);
1654 }
1655 
1656 /**
1657  * @brief Get time remaining before a timer next expires, in system ticks
1658  *
1659  * This routine computes the time remaining before a running timer
1660  * next expires, in units of system ticks.  If the timer is not
1661  * running, it returns zero.
1662  */
1663 __syscall k_ticks_t k_timer_remaining_ticks(const struct k_timer *timer);
1664 
z_impl_k_timer_remaining_ticks(const struct k_timer * timer)1665 static inline k_ticks_t z_impl_k_timer_remaining_ticks(
1666 				       const struct k_timer *timer)
1667 {
1668 	return z_timeout_remaining(&timer->timeout);
1669 }
1670 
1671 /**
1672  * @brief Get time remaining before a timer next expires.
1673  *
1674  * This routine computes the (approximate) time remaining before a running
1675  * timer next expires. If the timer is not running, it returns zero.
1676  *
1677  * @param timer     Address of timer.
1678  *
1679  * @return Remaining time (in milliseconds).
1680  */
k_timer_remaining_get(struct k_timer * timer)1681 static inline uint32_t k_timer_remaining_get(struct k_timer *timer)
1682 {
1683 	return k_ticks_to_ms_floor32(k_timer_remaining_ticks(timer));
1684 }
1685 
1686 #endif /* CONFIG_SYS_CLOCK_EXISTS */
1687 
1688 /**
1689  * @brief Associate user-specific data with a timer.
1690  *
1691  * This routine records the @a user_data with the @a timer, to be retrieved
1692  * later.
1693  *
1694  * It can be used e.g. in a timer handler shared across multiple subsystems to
1695  * retrieve data specific to the subsystem this timer is associated with.
1696  *
1697  * @param timer     Address of timer.
1698  * @param user_data User data to associate with the timer.
1699  */
1700 __syscall void k_timer_user_data_set(struct k_timer *timer, void *user_data);
1701 
1702 /**
1703  * @internal
1704  */
z_impl_k_timer_user_data_set(struct k_timer * timer,void * user_data)1705 static inline void z_impl_k_timer_user_data_set(struct k_timer *timer,
1706 					       void *user_data)
1707 {
1708 	timer->user_data = user_data;
1709 }
1710 
1711 /**
1712  * @brief Retrieve the user-specific data from a timer.
1713  *
1714  * @param timer     Address of timer.
1715  *
1716  * @return The user data.
1717  */
1718 __syscall void *k_timer_user_data_get(const struct k_timer *timer);
1719 
z_impl_k_timer_user_data_get(const struct k_timer * timer)1720 static inline void *z_impl_k_timer_user_data_get(const struct k_timer *timer)
1721 {
1722 	return timer->user_data;
1723 }
1724 
1725 /** @} */
1726 
1727 /**
1728  * @addtogroup clock_apis
1729  * @ingroup kernel_apis
1730  * @{
1731  */
1732 
1733 /**
1734  * @brief Get system uptime, in system ticks.
1735  *
1736  * This routine returns the elapsed time since the system booted, in
1737  * ticks (c.f. @kconfig{CONFIG_SYS_CLOCK_TICKS_PER_SEC}), which is the
1738  * fundamental unit of resolution of kernel timekeeping.
1739  *
1740  * @return Current uptime in ticks.
1741  */
1742 __syscall int64_t k_uptime_ticks(void);
1743 
1744 /**
1745  * @brief Get system uptime.
1746  *
1747  * This routine returns the elapsed time since the system booted,
1748  * in milliseconds.
1749  *
1750  * @note
1751  *    While this function returns time in milliseconds, it does
1752  *    not mean it has millisecond resolution. The actual resolution depends on
1753  *    @kconfig{CONFIG_SYS_CLOCK_TICKS_PER_SEC} config option.
1754  *
1755  * @return Current uptime in milliseconds.
1756  */
k_uptime_get(void)1757 static inline int64_t k_uptime_get(void)
1758 {
1759 	return k_ticks_to_ms_floor64(k_uptime_ticks());
1760 }
1761 
1762 /**
1763  * @brief Get system uptime (32-bit version).
1764  *
1765  * This routine returns the lower 32 bits of the system uptime in
1766  * milliseconds.
1767  *
1768  * Because correct conversion requires full precision of the system
1769  * clock there is no benefit to using this over k_uptime_get() unless
1770  * you know the application will never run long enough for the system
1771  * clock to approach 2^32 ticks.  Calls to this function may involve
1772  * interrupt blocking and 64-bit math.
1773  *
1774  * @note
1775  *    While this function returns time in milliseconds, it does
1776  *    not mean it has millisecond resolution. The actual resolution depends on
1777  *    @kconfig{CONFIG_SYS_CLOCK_TICKS_PER_SEC} config option
1778  *
1779  * @return The low 32 bits of the current uptime, in milliseconds.
1780  */
k_uptime_get_32(void)1781 static inline uint32_t k_uptime_get_32(void)
1782 {
1783 	return (uint32_t)k_uptime_get();
1784 }
1785 
1786 /**
1787  * @brief Get elapsed time.
1788  *
1789  * This routine computes the elapsed time between the current system uptime
1790  * and an earlier reference time, in milliseconds.
1791  *
1792  * @param reftime Pointer to a reference time, which is updated to the current
1793  *                uptime upon return.
1794  *
1795  * @return Elapsed time.
1796  */
k_uptime_delta(int64_t * reftime)1797 static inline int64_t k_uptime_delta(int64_t *reftime)
1798 {
1799 	int64_t uptime, delta;
1800 
1801 	uptime = k_uptime_get();
1802 	delta = uptime - *reftime;
1803 	*reftime = uptime;
1804 
1805 	return delta;
1806 }
1807 
1808 /**
1809  * @brief Read the hardware clock.
1810  *
1811  * This routine returns the current time, as measured by the system's hardware
1812  * clock.
1813  *
1814  * @return Current hardware clock up-counter (in cycles).
1815  */
k_cycle_get_32(void)1816 static inline uint32_t k_cycle_get_32(void)
1817 {
1818 	return arch_k_cycle_get_32();
1819 }
1820 
1821 /**
1822  * @brief Read the 64-bit hardware clock.
1823  *
1824  * This routine returns the current time in 64-bits, as measured by the
1825  * system's hardware clock, if available.
1826  *
1827  * @see CONFIG_TIMER_HAS_64BIT_CYCLE_COUNTER
1828  *
1829  * @return Current hardware clock up-counter (in cycles).
1830  */
k_cycle_get_64(void)1831 static inline uint64_t k_cycle_get_64(void)
1832 {
1833 	if (!IS_ENABLED(CONFIG_TIMER_HAS_64BIT_CYCLE_COUNTER)) {
1834 		__ASSERT(0, "64-bit cycle counter not enabled on this platform. "
1835 			    "See CONFIG_TIMER_HAS_64BIT_CYCLE_COUNTER");
1836 		return 0;
1837 	}
1838 
1839 	return arch_k_cycle_get_64();
1840 }
1841 
1842 /**
1843  * @}
1844  */
1845 
1846 struct k_queue {
1847 	sys_sflist_t data_q;
1848 	struct k_spinlock lock;
1849 	_wait_q_t wait_q;
1850 
1851 	_POLL_EVENT;
1852 
1853 	SYS_PORT_TRACING_TRACKING_FIELD(k_queue)
1854 };
1855 
1856 /**
1857  * @cond INTERNAL_HIDDEN
1858  */
1859 
1860 #define Z_QUEUE_INITIALIZER(obj) \
1861 	{ \
1862 	.data_q = SYS_SFLIST_STATIC_INIT(&obj.data_q), \
1863 	.lock = { }, \
1864 	.wait_q = Z_WAIT_Q_INIT(&obj.wait_q),	\
1865 	_POLL_EVENT_OBJ_INIT(obj)		\
1866 	}
1867 
1868 /**
1869  * INTERNAL_HIDDEN @endcond
1870  */
1871 
1872 /**
1873  * @defgroup queue_apis Queue APIs
1874  * @ingroup kernel_apis
1875  * @{
1876  */
1877 
1878 /**
1879  * @brief Initialize a queue.
1880  *
1881  * This routine initializes a queue object, prior to its first use.
1882  *
1883  * @param queue Address of the queue.
1884  */
1885 __syscall void k_queue_init(struct k_queue *queue);
1886 
1887 /**
1888  * @brief Cancel waiting on a queue.
1889  *
1890  * This routine causes first thread pending on @a queue, if any, to
1891  * return from k_queue_get() call with NULL value (as if timeout expired).
1892  * If the queue is being waited on by k_poll(), it will return with
1893  * -EINTR and K_POLL_STATE_CANCELLED state (and per above, subsequent
1894  * k_queue_get() will return NULL).
1895  *
1896  * @funcprops \isr_ok
1897  *
1898  * @param queue Address of the queue.
1899  */
1900 __syscall void k_queue_cancel_wait(struct k_queue *queue);
1901 
1902 /**
1903  * @brief Append an element to the end of a queue.
1904  *
1905  * This routine appends a data item to @a queue. A queue data item must be
1906  * aligned on a word boundary, and the first word of the item is reserved
1907  * for the kernel's use.
1908  *
1909  * @funcprops \isr_ok
1910  *
1911  * @param queue Address of the queue.
1912  * @param data Address of the data item.
1913  */
1914 extern void k_queue_append(struct k_queue *queue, void *data);
1915 
1916 /**
1917  * @brief Append an element to a queue.
1918  *
1919  * This routine appends a data item to @a queue. There is an implicit memory
1920  * allocation to create an additional temporary bookkeeping data structure from
1921  * the calling thread's resource pool, which is automatically freed when the
1922  * item is removed. The data itself is not copied.
1923  *
1924  * @funcprops \isr_ok
1925  *
1926  * @param queue Address of the queue.
1927  * @param data Address of the data item.
1928  *
1929  * @retval 0 on success
1930  * @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
1931  */
1932 __syscall int32_t k_queue_alloc_append(struct k_queue *queue, void *data);
1933 
1934 /**
1935  * @brief Prepend an element to a queue.
1936  *
1937  * This routine prepends a data item to @a queue. A queue data item must be
1938  * aligned on a word boundary, and the first word of the item is reserved
1939  * for the kernel's use.
1940  *
1941  * @funcprops \isr_ok
1942  *
1943  * @param queue Address of the queue.
1944  * @param data Address of the data item.
1945  */
1946 extern void k_queue_prepend(struct k_queue *queue, void *data);
1947 
1948 /**
1949  * @brief Prepend an element to a queue.
1950  *
1951  * This routine prepends a data item to @a queue. There is an implicit memory
1952  * allocation to create an additional temporary bookkeeping data structure from
1953  * the calling thread's resource pool, which is automatically freed when the
1954  * item is removed. The data itself is not copied.
1955  *
1956  * @funcprops \isr_ok
1957  *
1958  * @param queue Address of the queue.
1959  * @param data Address of the data item.
1960  *
1961  * @retval 0 on success
1962  * @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
1963  */
1964 __syscall int32_t k_queue_alloc_prepend(struct k_queue *queue, void *data);
1965 
1966 /**
1967  * @brief Inserts an element to a queue.
1968  *
1969  * This routine inserts a data item to @a queue after previous item. A queue
1970  * data item must be aligned on a word boundary, and the first word of
1971  * the item is reserved for the kernel's use.
1972  *
1973  * @funcprops \isr_ok
1974  *
1975  * @param queue Address of the queue.
1976  * @param prev Address of the previous data item.
1977  * @param data Address of the data item.
1978  */
1979 extern void k_queue_insert(struct k_queue *queue, void *prev, void *data);
1980 
1981 /**
1982  * @brief Atomically append a list of elements to a queue.
1983  *
1984  * This routine adds a list of data items to @a queue in one operation.
1985  * The data items must be in a singly-linked list, with the first word
1986  * in each data item pointing to the next data item; the list must be
1987  * NULL-terminated.
1988  *
1989  * @funcprops \isr_ok
1990  *
1991  * @param queue Address of the queue.
1992  * @param head Pointer to first node in singly-linked list.
1993  * @param tail Pointer to last node in singly-linked list.
1994  *
1995  * @retval 0 on success
1996  * @retval -EINVAL on invalid supplied data
1997  *
1998  */
1999 extern int k_queue_append_list(struct k_queue *queue, void *head, void *tail);
2000 
2001 /**
2002  * @brief Atomically add a list of elements to a queue.
2003  *
2004  * This routine adds a list of data items to @a queue in one operation.
2005  * The data items must be in a singly-linked list implemented using a
2006  * sys_slist_t object. Upon completion, the original list is empty.
2007  *
2008  * @funcprops \isr_ok
2009  *
2010  * @param queue Address of the queue.
2011  * @param list Pointer to sys_slist_t object.
2012  *
2013  * @retval 0 on success
2014  * @retval -EINVAL on invalid data
2015  */
2016 extern int k_queue_merge_slist(struct k_queue *queue, sys_slist_t *list);
2017 
2018 /**
2019  * @brief Get an element from a queue.
2020  *
2021  * This routine removes first data item from @a queue. The first word of the
2022  * data item is reserved for the kernel's use.
2023  *
2024  * @note @a timeout must be set to K_NO_WAIT if called from ISR.
2025  *
2026  * @funcprops \isr_ok
2027  *
2028  * @param queue Address of the queue.
2029  * @param timeout Non-negative waiting period to obtain a data item
2030  *                or one of the special values K_NO_WAIT and
2031  *                K_FOREVER.
2032  *
2033  * @return Address of the data item if successful; NULL if returned
2034  * without waiting, or waiting period timed out.
2035  */
2036 __syscall void *k_queue_get(struct k_queue *queue, k_timeout_t timeout);
2037 
2038 /**
2039  * @brief Remove an element from a queue.
2040  *
2041  * This routine removes data item from @a queue. The first word of the
2042  * data item is reserved for the kernel's use. Removing elements from k_queue
2043  * rely on sys_slist_find_and_remove which is not a constant time operation.
2044  *
2045  * @note @a timeout must be set to K_NO_WAIT if called from ISR.
2046  *
2047  * @funcprops \isr_ok
2048  *
2049  * @param queue Address of the queue.
2050  * @param data Address of the data item.
2051  *
2052  * @return true if data item was removed
2053  */
2054 bool k_queue_remove(struct k_queue *queue, void *data);
2055 
2056 /**
2057  * @brief Append an element to a queue only if it's not present already.
2058  *
2059  * This routine appends data item to @a queue. The first word of the data
2060  * item is reserved for the kernel's use. Appending elements to k_queue
2061  * relies on sys_slist_is_node_in_list which is not a constant time operation.
2062  *
2063  * @funcprops \isr_ok
2064  *
2065  * @param queue Address of the queue.
2066  * @param data Address of the data item.
2067  *
2068  * @return true if data item was added, false if not
2069  */
2070 bool k_queue_unique_append(struct k_queue *queue, void *data);
2071 
2072 /**
2073  * @brief Query a queue to see if it has data available.
2074  *
2075  * Note that the data might be already gone by the time this function returns
2076  * if other threads are also trying to read from the queue.
2077  *
2078  * @funcprops \isr_ok
2079  *
2080  * @param queue Address of the queue.
2081  *
2082  * @return Non-zero if the queue is empty.
2083  * @return 0 if data is available.
2084  */
2085 __syscall int k_queue_is_empty(struct k_queue *queue);
2086 
z_impl_k_queue_is_empty(struct k_queue * queue)2087 static inline int z_impl_k_queue_is_empty(struct k_queue *queue)
2088 {
2089 	return (int)sys_sflist_is_empty(&queue->data_q);
2090 }
2091 
2092 /**
2093  * @brief Peek element at the head of queue.
2094  *
2095  * Return element from the head of queue without removing it.
2096  *
2097  * @param queue Address of the queue.
2098  *
2099  * @return Head element, or NULL if queue is empty.
2100  */
2101 __syscall void *k_queue_peek_head(struct k_queue *queue);
2102 
2103 /**
2104  * @brief Peek element at the tail of queue.
2105  *
2106  * Return element from the tail of queue without removing it.
2107  *
2108  * @param queue Address of the queue.
2109  *
2110  * @return Tail element, or NULL if queue is empty.
2111  */
2112 __syscall void *k_queue_peek_tail(struct k_queue *queue);
2113 
2114 /**
2115  * @brief Statically define and initialize a queue.
2116  *
2117  * The queue can be accessed outside the module where it is defined using:
2118  *
2119  * @code extern struct k_queue <name>; @endcode
2120  *
2121  * @param name Name of the queue.
2122  */
2123 #define K_QUEUE_DEFINE(name) \
2124 	STRUCT_SECTION_ITERABLE(k_queue, name) = \
2125 		Z_QUEUE_INITIALIZER(name)
2126 
2127 /** @} */
2128 
2129 #ifdef CONFIG_USERSPACE
2130 /**
2131  * @brief futex structure
2132  *
2133  * A k_futex is a lightweight mutual exclusion primitive designed
2134  * to minimize kernel involvement. Uncontended operation relies
2135  * only on atomic access to shared memory. k_futex are tracked as
2136  * kernel objects and can live in user memory so that any access
2137  * bypasses the kernel object permission management mechanism.
2138  */
2139 struct k_futex {
2140 	atomic_t val;
2141 };
2142 
2143 /**
2144  * @brief futex kernel data structure
2145  *
2146  * z_futex_data are the helper data structure for k_futex to complete
2147  * futex contended operation on kernel side, structure z_futex_data
2148  * of every futex object is invisible in user mode.
2149  */
2150 struct z_futex_data {
2151 	_wait_q_t wait_q;
2152 	struct k_spinlock lock;
2153 };
2154 
2155 #define Z_FUTEX_DATA_INITIALIZER(obj) \
2156 	{ \
2157 	.wait_q = Z_WAIT_Q_INIT(&obj.wait_q) \
2158 	}
2159 
2160 /**
2161  * @defgroup futex_apis FUTEX APIs
2162  * @ingroup kernel_apis
2163  * @{
2164  */
2165 
2166 /**
2167  * @brief Pend the current thread on a futex
2168  *
2169  * Tests that the supplied futex contains the expected value, and if so,
2170  * goes to sleep until some other thread calls k_futex_wake() on it.
2171  *
2172  * @param futex Address of the futex.
2173  * @param expected Expected value of the futex, if it is different the caller
2174  *		   will not wait on it.
2175  * @param timeout Non-negative waiting period on the futex, or
2176  *		  one of the special values K_NO_WAIT or K_FOREVER.
2177  * @retval -EACCES Caller does not have read access to futex address.
2178  * @retval -EAGAIN If the futex value did not match the expected parameter.
2179  * @retval -EINVAL Futex parameter address not recognized by the kernel.
2180  * @retval -ETIMEDOUT Thread woke up due to timeout and not a futex wakeup.
2181  * @retval 0 if the caller went to sleep and was woken up. The caller
2182  *	     should check the futex's value on wakeup to determine if it needs
2183  *	     to block again.
2184  */
2185 __syscall int k_futex_wait(struct k_futex *futex, int expected,
2186 			   k_timeout_t timeout);
2187 
2188 /**
2189  * @brief Wake one/all threads pending on a futex
2190  *
2191  * Wake up the highest priority thread pending on the supplied futex, or
2192  * wakeup all the threads pending on the supplied futex, and the behavior
2193  * depends on wake_all.
2194  *
2195  * @param futex Futex to wake up pending threads.
2196  * @param wake_all If true, wake up all pending threads; If false,
2197  *                 wakeup the highest priority thread.
2198  * @retval -EACCES Caller does not have access to the futex address.
2199  * @retval -EINVAL Futex parameter address not recognized by the kernel.
2200  * @retval Number of threads that were woken up.
2201  */
2202 __syscall int k_futex_wake(struct k_futex *futex, bool wake_all);
2203 
2204 /** @} */
2205 #endif
2206 
2207 /**
2208  * @defgroup event_apis Event APIs
2209  * @ingroup kernel_apis
2210  * @{
2211  */
2212 
2213 /**
2214  * Event Structure
2215  * @ingroup event_apis
2216  */
2217 
2218 struct k_event {
2219 	_wait_q_t         wait_q;
2220 	uint32_t          events;
2221 	struct k_spinlock lock;
2222 
2223 	SYS_PORT_TRACING_TRACKING_FIELD(k_event)
2224 
2225 #ifdef CONFIG_OBJ_CORE_EVENT
2226 	struct k_obj_core obj_core;
2227 #endif
2228 
2229 };
2230 
2231 #define Z_EVENT_INITIALIZER(obj) \
2232 	{ \
2233 	.wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
2234 	.events = 0 \
2235 	}
2236 
2237 /**
2238  * @brief Initialize an event object
2239  *
2240  * This routine initializes an event object, prior to its first use.
2241  *
2242  * @param event Address of the event object.
2243  */
2244 __syscall void k_event_init(struct k_event *event);
2245 
2246 /**
2247  * @brief Post one or more events to an event object
2248  *
2249  * This routine posts one or more events to an event object. All tasks waiting
2250  * on the event object @a event whose waiting conditions become met by this
2251  * posting immediately unpend.
2252  *
2253  * Posting differs from setting in that posted events are merged together with
2254  * the current set of events tracked by the event object.
2255  *
2256  * @param event Address of the event object
2257  * @param events Set of events to post to @a event
2258  *
2259  * @retval Previous value of the events in @a event
2260  */
2261 __syscall uint32_t k_event_post(struct k_event *event, uint32_t events);
2262 
2263 /**
2264  * @brief Set the events in an event object
2265  *
2266  * This routine sets the events stored in event object to the specified value.
2267  * All tasks waiting on the event object @a event whose waiting conditions
2268  * become met by this immediately unpend.
2269  *
2270  * Setting differs from posting in that set events replace the current set of
2271  * events tracked by the event object.
2272  *
2273  * @param event Address of the event object
2274  * @param events Set of events to set in @a event
2275  *
2276  * @retval Previous value of the events in @a event
2277  */
2278 __syscall uint32_t k_event_set(struct k_event *event, uint32_t events);
2279 
2280 /**
2281  * @brief Set or clear the events in an event object
2282  *
2283  * This routine sets the events stored in event object to the specified value.
2284  * All tasks waiting on the event object @a event whose waiting conditions
2285  * become met by this immediately unpend. Unlike @ref k_event_set, this routine
2286  * allows specific event bits to be set and cleared as determined by the mask.
2287  *
2288  * @param event Address of the event object
2289  * @param events Set of events to set/clear in @a event
2290  * @param events_mask Mask to be applied to @a events
2291  *
2292  * @retval Previous value of the events in @a events_mask
2293  */
2294 __syscall uint32_t k_event_set_masked(struct k_event *event, uint32_t events,
2295 				  uint32_t events_mask);
2296 
2297 /**
2298  * @brief Clear the events in an event object
2299  *
2300  * This routine clears (resets) the specified events stored in an event object.
2301  *
2302  * @param event Address of the event object
2303  * @param events Set of events to clear in @a event
2304  *
2305  * @retval Previous value of the events in @a event
2306  */
2307 __syscall uint32_t k_event_clear(struct k_event *event, uint32_t events);
2308 
2309 /**
2310  * @brief Wait for any of the specified events
2311  *
2312  * This routine waits on event object @a event until any of the specified
2313  * events have been delivered to the event object, or the maximum wait time
2314  * @a timeout has expired. A thread may wait on up to 32 distinctly numbered
2315  * events that are expressed as bits in a single 32-bit word.
2316  *
2317  * @note The caller must be careful when resetting if there are multiple threads
2318  * waiting for the event object @a event.
2319  *
2320  * @param event Address of the event object
2321  * @param events Set of desired events on which to wait
2322  * @param reset If true, clear the set of events tracked by the event object
2323  *              before waiting. If false, do not clear the events.
2324  * @param timeout Waiting period for the desired set of events or one of the
2325  *                special values K_NO_WAIT and K_FOREVER.
2326  *
2327  * @retval set of matching events upon success
2328  * @retval 0 if matching events were not received within the specified time
2329  */
2330 __syscall uint32_t k_event_wait(struct k_event *event, uint32_t events,
2331 				bool reset, k_timeout_t timeout);
2332 
2333 /**
2334  * @brief Wait for all of the specified events
2335  *
2336  * This routine waits on event object @a event until all of the specified
2337  * events have been delivered to the event object, or the maximum wait time
2338  * @a timeout has expired. A thread may wait on up to 32 distinctly numbered
2339  * events that are expressed as bits in a single 32-bit word.
2340  *
2341  * @note The caller must be careful when resetting if there are multiple threads
2342  * waiting for the event object @a event.
2343  *
2344  * @param event Address of the event object
2345  * @param events Set of desired events on which to wait
2346  * @param reset If true, clear the set of events tracked by the event object
2347  *              before waiting. If false, do not clear the events.
2348  * @param timeout Waiting period for the desired set of events or one of the
2349  *                special values K_NO_WAIT and K_FOREVER.
2350  *
2351  * @retval set of matching events upon success
2352  * @retval 0 if matching events were not received within the specified time
2353  */
2354 __syscall uint32_t k_event_wait_all(struct k_event *event, uint32_t events,
2355 				    bool reset, k_timeout_t timeout);
2356 
2357 /**
2358  * @brief Test the events currently tracked in the event object
2359  *
2360  * @param event Address of the event object
2361  * @param events_mask Set of desired events to test
2362  *
2363  * @retval Current value of events in @a events_mask
2364  */
k_event_test(struct k_event * event,uint32_t events_mask)2365 static inline uint32_t k_event_test(struct k_event *event, uint32_t events_mask)
2366 {
2367 	return k_event_wait(event, events_mask, false, K_NO_WAIT);
2368 }
2369 
2370 /**
2371  * @brief Statically define and initialize an event object
2372  *
2373  * The event can be accessed outside the module where it is defined using:
2374  *
2375  * @code extern struct k_event <name>; @endcode
2376  *
2377  * @param name Name of the event object.
2378  */
2379 #define K_EVENT_DEFINE(name)                                   \
2380 	STRUCT_SECTION_ITERABLE(k_event, name) =               \
2381 		Z_EVENT_INITIALIZER(name);
2382 
2383 /** @} */
2384 
2385 struct k_fifo {
2386 	struct k_queue _queue;
2387 #ifdef CONFIG_OBJ_CORE_FIFO
2388 	struct k_obj_core  obj_core;
2389 #endif
2390 };
2391 
2392 /**
2393  * @cond INTERNAL_HIDDEN
2394  */
2395 #define Z_FIFO_INITIALIZER(obj) \
2396 	{ \
2397 	._queue = Z_QUEUE_INITIALIZER(obj._queue) \
2398 	}
2399 
2400 /**
2401  * INTERNAL_HIDDEN @endcond
2402  */
2403 
2404 /**
2405  * @defgroup fifo_apis FIFO APIs
2406  * @ingroup kernel_apis
2407  * @{
2408  */
2409 
2410 /**
2411  * @brief Initialize a FIFO queue.
2412  *
2413  * This routine initializes a FIFO queue, prior to its first use.
2414  *
2415  * @param fifo Address of the FIFO queue.
2416  */
2417 #define k_fifo_init(fifo)                                    \
2418 	({                                                   \
2419 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, init, fifo); \
2420 	k_queue_init(&(fifo)->_queue);                       \
2421 	K_OBJ_CORE_INIT(K_OBJ_CORE(fifo), _obj_type_fifo);   \
2422 	K_OBJ_CORE_LINK(K_OBJ_CORE(fifo));                   \
2423 	SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, init, fifo);  \
2424 	})
2425 
2426 /**
2427  * @brief Cancel waiting on a FIFO queue.
2428  *
2429  * This routine causes first thread pending on @a fifo, if any, to
2430  * return from k_fifo_get() call with NULL value (as if timeout
2431  * expired).
2432  *
2433  * @funcprops \isr_ok
2434  *
2435  * @param fifo Address of the FIFO queue.
2436  */
2437 #define k_fifo_cancel_wait(fifo) \
2438 	({ \
2439 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, cancel_wait, fifo); \
2440 	k_queue_cancel_wait(&(fifo)->_queue); \
2441 	SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, cancel_wait, fifo); \
2442 	})
2443 
2444 /**
2445  * @brief Add an element to a FIFO queue.
2446  *
2447  * This routine adds a data item to @a fifo. A FIFO data item must be
2448  * aligned on a word boundary, and the first word of the item is reserved
2449  * for the kernel's use.
2450  *
2451  * @funcprops \isr_ok
2452  *
2453  * @param fifo Address of the FIFO.
2454  * @param data Address of the data item.
2455  */
2456 #define k_fifo_put(fifo, data) \
2457 	({ \
2458 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, put, fifo, data); \
2459 	k_queue_append(&(fifo)->_queue, data); \
2460 	SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, put, fifo, data); \
2461 	})
2462 
2463 /**
2464  * @brief Add an element to a FIFO queue.
2465  *
2466  * This routine adds a data item to @a fifo. There is an implicit memory
2467  * allocation to create an additional temporary bookkeeping data structure from
2468  * the calling thread's resource pool, which is automatically freed when the
2469  * item is removed. The data itself is not copied.
2470  *
2471  * @funcprops \isr_ok
2472  *
2473  * @param fifo Address of the FIFO.
2474  * @param data Address of the data item.
2475  *
2476  * @retval 0 on success
2477  * @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
2478  */
2479 #define k_fifo_alloc_put(fifo, data) \
2480 	({ \
2481 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, alloc_put, fifo, data); \
2482 	int fap_ret = k_queue_alloc_append(&(fifo)->_queue, data); \
2483 	SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, alloc_put, fifo, data, fap_ret); \
2484 	fap_ret; \
2485 	})
2486 
2487 /**
2488  * @brief Atomically add a list of elements to a FIFO.
2489  *
2490  * This routine adds a list of data items to @a fifo in one operation.
2491  * The data items must be in a singly-linked list, with the first word of
2492  * each data item pointing to the next data item; the list must be
2493  * NULL-terminated.
2494  *
2495  * @funcprops \isr_ok
2496  *
2497  * @param fifo Address of the FIFO queue.
2498  * @param head Pointer to first node in singly-linked list.
2499  * @param tail Pointer to last node in singly-linked list.
2500  */
2501 #define k_fifo_put_list(fifo, head, tail) \
2502 	({ \
2503 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, put_list, fifo, head, tail); \
2504 	k_queue_append_list(&(fifo)->_queue, head, tail); \
2505 	SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, put_list, fifo, head, tail); \
2506 	})
2507 
2508 /**
2509  * @brief Atomically add a list of elements to a FIFO queue.
2510  *
2511  * This routine adds a list of data items to @a fifo in one operation.
2512  * The data items must be in a singly-linked list implemented using a
2513  * sys_slist_t object. Upon completion, the sys_slist_t object is invalid
2514  * and must be re-initialized via sys_slist_init().
2515  *
2516  * @funcprops \isr_ok
2517  *
2518  * @param fifo Address of the FIFO queue.
2519  * @param list Pointer to sys_slist_t object.
2520  */
2521 #define k_fifo_put_slist(fifo, list) \
2522 	({ \
2523 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, put_slist, fifo, list); \
2524 	k_queue_merge_slist(&(fifo)->_queue, list); \
2525 	SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, put_slist, fifo, list); \
2526 	})
2527 
2528 /**
2529  * @brief Get an element from a FIFO queue.
2530  *
2531  * This routine removes a data item from @a fifo in a "first in, first out"
2532  * manner. The first word of the data item is reserved for the kernel's use.
2533  *
2534  * @note @a timeout must be set to K_NO_WAIT if called from ISR.
2535  *
2536  * @funcprops \isr_ok
2537  *
2538  * @param fifo Address of the FIFO queue.
2539  * @param timeout Waiting period to obtain a data item,
2540  *                or one of the special values K_NO_WAIT and K_FOREVER.
2541  *
2542  * @return Address of the data item if successful; NULL if returned
2543  * without waiting, or waiting period timed out.
2544  */
2545 #define k_fifo_get(fifo, timeout) \
2546 	({ \
2547 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, get, fifo, timeout); \
2548 	void *fg_ret = k_queue_get(&(fifo)->_queue, timeout); \
2549 	SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, get, fifo, timeout, fg_ret); \
2550 	fg_ret; \
2551 	})
2552 
2553 /**
2554  * @brief Query a FIFO queue to see if it has data available.
2555  *
2556  * Note that the data might be already gone by the time this function returns
2557  * if other threads is also trying to read from the FIFO.
2558  *
2559  * @funcprops \isr_ok
2560  *
2561  * @param fifo Address of the FIFO queue.
2562  *
2563  * @return Non-zero if the FIFO queue is empty.
2564  * @return 0 if data is available.
2565  */
2566 #define k_fifo_is_empty(fifo) \
2567 	k_queue_is_empty(&(fifo)->_queue)
2568 
2569 /**
2570  * @brief Peek element at the head of a FIFO queue.
2571  *
2572  * Return element from the head of FIFO queue without removing it. A usecase
2573  * for this is if elements of the FIFO object are themselves containers. Then
2574  * on each iteration of processing, a head container will be peeked,
2575  * and some data processed out of it, and only if the container is empty,
2576  * it will be completely remove from the FIFO queue.
2577  *
2578  * @param fifo Address of the FIFO queue.
2579  *
2580  * @return Head element, or NULL if the FIFO queue is empty.
2581  */
2582 #define k_fifo_peek_head(fifo) \
2583 	({ \
2584 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, peek_head, fifo); \
2585 	void *fph_ret = k_queue_peek_head(&(fifo)->_queue); \
2586 	SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, peek_head, fifo, fph_ret); \
2587 	fph_ret; \
2588 	})
2589 
2590 /**
2591  * @brief Peek element at the tail of FIFO queue.
2592  *
2593  * Return element from the tail of FIFO queue (without removing it). A usecase
2594  * for this is if elements of the FIFO queue are themselves containers. Then
2595  * it may be useful to add more data to the last container in a FIFO queue.
2596  *
2597  * @param fifo Address of the FIFO queue.
2598  *
2599  * @return Tail element, or NULL if a FIFO queue is empty.
2600  */
2601 #define k_fifo_peek_tail(fifo) \
2602 	({ \
2603 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, peek_tail, fifo); \
2604 	void *fpt_ret = k_queue_peek_tail(&(fifo)->_queue); \
2605 	SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, peek_tail, fifo, fpt_ret); \
2606 	fpt_ret; \
2607 	})
2608 
2609 /**
2610  * @brief Statically define and initialize a FIFO queue.
2611  *
2612  * The FIFO queue can be accessed outside the module where it is defined using:
2613  *
2614  * @code extern struct k_fifo <name>; @endcode
2615  *
2616  * @param name Name of the FIFO queue.
2617  */
2618 #define K_FIFO_DEFINE(name) \
2619 	STRUCT_SECTION_ITERABLE(k_fifo, name) = \
2620 		Z_FIFO_INITIALIZER(name)
2621 
2622 /** @} */
2623 
2624 struct k_lifo {
2625 	struct k_queue _queue;
2626 #ifdef CONFIG_OBJ_CORE_LIFO
2627 	struct k_obj_core  obj_core;
2628 #endif
2629 };
2630 
2631 /**
2632  * @cond INTERNAL_HIDDEN
2633  */
2634 
2635 #define Z_LIFO_INITIALIZER(obj) \
2636 	{ \
2637 	._queue = Z_QUEUE_INITIALIZER(obj._queue) \
2638 	}
2639 
2640 /**
2641  * INTERNAL_HIDDEN @endcond
2642  */
2643 
2644 /**
2645  * @defgroup lifo_apis LIFO APIs
2646  * @ingroup kernel_apis
2647  * @{
2648  */
2649 
2650 /**
2651  * @brief Initialize a LIFO queue.
2652  *
2653  * This routine initializes a LIFO queue object, prior to its first use.
2654  *
2655  * @param lifo Address of the LIFO queue.
2656  */
2657 #define k_lifo_init(lifo)                                    \
2658 	({                                                   \
2659 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, init, lifo); \
2660 	k_queue_init(&(lifo)->_queue);                       \
2661 	K_OBJ_CORE_INIT(K_OBJ_CORE(lifo), _obj_type_lifo);   \
2662 	K_OBJ_CORE_LINK(K_OBJ_CORE(lifo));                   \
2663 	SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, init, lifo);  \
2664 	})
2665 
2666 /**
2667  * @brief Add an element to a LIFO queue.
2668  *
2669  * This routine adds a data item to @a lifo. A LIFO queue data item must be
2670  * aligned on a word boundary, and the first word of the item is
2671  * reserved for the kernel's use.
2672  *
2673  * @funcprops \isr_ok
2674  *
2675  * @param lifo Address of the LIFO queue.
2676  * @param data Address of the data item.
2677  */
2678 #define k_lifo_put(lifo, data) \
2679 	({ \
2680 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, put, lifo, data); \
2681 	k_queue_prepend(&(lifo)->_queue, data); \
2682 	SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, put, lifo, data); \
2683 	})
2684 
2685 /**
2686  * @brief Add an element to a LIFO queue.
2687  *
2688  * This routine adds a data item to @a lifo. There is an implicit memory
2689  * allocation to create an additional temporary bookkeeping data structure from
2690  * the calling thread's resource pool, which is automatically freed when the
2691  * item is removed. The data itself is not copied.
2692  *
2693  * @funcprops \isr_ok
2694  *
2695  * @param lifo Address of the LIFO.
2696  * @param data Address of the data item.
2697  *
2698  * @retval 0 on success
2699  * @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
2700  */
2701 #define k_lifo_alloc_put(lifo, data) \
2702 	({ \
2703 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, alloc_put, lifo, data); \
2704 	int lap_ret = k_queue_alloc_prepend(&(lifo)->_queue, data); \
2705 	SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, alloc_put, lifo, data, lap_ret); \
2706 	lap_ret; \
2707 	})
2708 
2709 /**
2710  * @brief Get an element from a LIFO queue.
2711  *
2712  * This routine removes a data item from @a LIFO in a "last in, first out"
2713  * manner. The first word of the data item is reserved for the kernel's use.
2714  *
2715  * @note @a timeout must be set to K_NO_WAIT if called from ISR.
2716  *
2717  * @funcprops \isr_ok
2718  *
2719  * @param lifo Address of the LIFO queue.
2720  * @param timeout Waiting period to obtain a data item,
2721  *                or one of the special values K_NO_WAIT and K_FOREVER.
2722  *
2723  * @return Address of the data item if successful; NULL if returned
2724  * without waiting, or waiting period timed out.
2725  */
2726 #define k_lifo_get(lifo, timeout) \
2727 	({ \
2728 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, get, lifo, timeout); \
2729 	void *lg_ret = k_queue_get(&(lifo)->_queue, timeout); \
2730 	SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, get, lifo, timeout, lg_ret); \
2731 	lg_ret; \
2732 	})
2733 
2734 /**
2735  * @brief Statically define and initialize a LIFO queue.
2736  *
2737  * The LIFO queue can be accessed outside the module where it is defined using:
2738  *
2739  * @code extern struct k_lifo <name>; @endcode
2740  *
2741  * @param name Name of the fifo.
2742  */
2743 #define K_LIFO_DEFINE(name) \
2744 	STRUCT_SECTION_ITERABLE(k_lifo, name) = \
2745 		Z_LIFO_INITIALIZER(name)
2746 
2747 /** @} */
2748 
2749 /**
2750  * @cond INTERNAL_HIDDEN
2751  */
2752 #define K_STACK_FLAG_ALLOC	((uint8_t)1)	/* Buffer was allocated */
2753 
2754 typedef uintptr_t stack_data_t;
2755 
2756 struct k_stack {
2757 	_wait_q_t wait_q;
2758 	struct k_spinlock lock;
2759 	stack_data_t *base, *next, *top;
2760 
2761 	uint8_t flags;
2762 
2763 	SYS_PORT_TRACING_TRACKING_FIELD(k_stack)
2764 
2765 #ifdef CONFIG_OBJ_CORE_STACK
2766 	struct k_obj_core  obj_core;
2767 #endif
2768 };
2769 
2770 #define Z_STACK_INITIALIZER(obj, stack_buffer, stack_num_entries) \
2771 	{ \
2772 	.wait_q = Z_WAIT_Q_INIT(&obj.wait_q),	\
2773 	.base = stack_buffer, \
2774 	.next = stack_buffer, \
2775 	.top = stack_buffer + stack_num_entries, \
2776 	}
2777 
2778 /**
2779  * INTERNAL_HIDDEN @endcond
2780  */
2781 
2782 /**
2783  * @defgroup stack_apis Stack APIs
2784  * @ingroup kernel_apis
2785  * @{
2786  */
2787 
2788 /**
2789  * @brief Initialize a stack.
2790  *
2791  * This routine initializes a stack object, prior to its first use.
2792  *
2793  * @param stack Address of the stack.
2794  * @param buffer Address of array used to hold stacked values.
2795  * @param num_entries Maximum number of values that can be stacked.
2796  */
2797 void k_stack_init(struct k_stack *stack,
2798 		  stack_data_t *buffer, uint32_t num_entries);
2799 
2800 
2801 /**
2802  * @brief Initialize a stack.
2803  *
2804  * This routine initializes a stack object, prior to its first use. Internal
2805  * buffers will be allocated from the calling thread's resource pool.
2806  * This memory will be released if k_stack_cleanup() is called, or
2807  * userspace is enabled and the stack object loses all references to it.
2808  *
2809  * @param stack Address of the stack.
2810  * @param num_entries Maximum number of values that can be stacked.
2811  *
2812  * @return -ENOMEM if memory couldn't be allocated
2813  */
2814 
2815 __syscall int32_t k_stack_alloc_init(struct k_stack *stack,
2816 				   uint32_t num_entries);
2817 
2818 /**
2819  * @brief Release a stack's allocated buffer
2820  *
2821  * If a stack object was given a dynamically allocated buffer via
2822  * k_stack_alloc_init(), this will free it. This function does nothing
2823  * if the buffer wasn't dynamically allocated.
2824  *
2825  * @param stack Address of the stack.
2826  * @retval 0 on success
2827  * @retval -EAGAIN when object is still in use
2828  */
2829 int k_stack_cleanup(struct k_stack *stack);
2830 
2831 /**
2832  * @brief Push an element onto a stack.
2833  *
2834  * This routine adds a stack_data_t value @a data to @a stack.
2835  *
2836  * @funcprops \isr_ok
2837  *
2838  * @param stack Address of the stack.
2839  * @param data Value to push onto the stack.
2840  *
2841  * @retval 0 on success
2842  * @retval -ENOMEM if stack is full
2843  */
2844 __syscall int k_stack_push(struct k_stack *stack, stack_data_t data);
2845 
2846 /**
2847  * @brief Pop an element from a stack.
2848  *
2849  * This routine removes a stack_data_t value from @a stack in a "last in,
2850  * first out" manner and stores the value in @a data.
2851  *
2852  * @note @a timeout must be set to K_NO_WAIT if called from ISR.
2853  *
2854  * @funcprops \isr_ok
2855  *
2856  * @param stack Address of the stack.
2857  * @param data Address of area to hold the value popped from the stack.
2858  * @param timeout Waiting period to obtain a value,
2859  *                or one of the special values K_NO_WAIT and
2860  *                K_FOREVER.
2861  *
2862  * @retval 0 Element popped from stack.
2863  * @retval -EBUSY Returned without waiting.
2864  * @retval -EAGAIN Waiting period timed out.
2865  */
2866 __syscall int k_stack_pop(struct k_stack *stack, stack_data_t *data,
2867 			  k_timeout_t timeout);
2868 
2869 /**
2870  * @brief Statically define and initialize a stack
2871  *
2872  * The stack can be accessed outside the module where it is defined using:
2873  *
2874  * @code extern struct k_stack <name>; @endcode
2875  *
2876  * @param name Name of the stack.
2877  * @param stack_num_entries Maximum number of values that can be stacked.
2878  */
2879 #define K_STACK_DEFINE(name, stack_num_entries)                \
2880 	stack_data_t __noinit                                  \
2881 		_k_stack_buf_##name[stack_num_entries];        \
2882 	STRUCT_SECTION_ITERABLE(k_stack, name) =               \
2883 		Z_STACK_INITIALIZER(name, _k_stack_buf_##name, \
2884 				    stack_num_entries)
2885 
2886 /** @} */
2887 
2888 /**
2889  * @cond INTERNAL_HIDDEN
2890  */
2891 
2892 struct k_work;
2893 struct k_work_q;
2894 struct k_work_queue_config;
2895 extern struct k_work_q k_sys_work_q;
2896 
2897 /**
2898  * INTERNAL_HIDDEN @endcond
2899  */
2900 
2901 /**
2902  * @defgroup mutex_apis Mutex APIs
2903  * @ingroup kernel_apis
2904  * @{
2905  */
2906 
2907 /**
2908  * Mutex Structure
2909  * @ingroup mutex_apis
2910  */
2911 struct k_mutex {
2912 	/** Mutex wait queue */
2913 	_wait_q_t wait_q;
2914 	/** Mutex owner */
2915 	struct k_thread *owner;
2916 
2917 	/** Current lock count */
2918 	uint32_t lock_count;
2919 
2920 	/** Original thread priority */
2921 	int owner_orig_prio;
2922 
2923 	SYS_PORT_TRACING_TRACKING_FIELD(k_mutex)
2924 
2925 #ifdef CONFIG_OBJ_CORE_MUTEX
2926 	struct k_obj_core obj_core;
2927 #endif
2928 };
2929 
2930 /**
2931  * @cond INTERNAL_HIDDEN
2932  */
2933 #define Z_MUTEX_INITIALIZER(obj) \
2934 	{ \
2935 	.wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
2936 	.owner = NULL, \
2937 	.lock_count = 0, \
2938 	.owner_orig_prio = K_LOWEST_APPLICATION_THREAD_PRIO, \
2939 	}
2940 
2941 /**
2942  * INTERNAL_HIDDEN @endcond
2943  */
2944 
2945 /**
2946  * @brief Statically define and initialize a mutex.
2947  *
2948  * The mutex can be accessed outside the module where it is defined using:
2949  *
2950  * @code extern struct k_mutex <name>; @endcode
2951  *
2952  * @param name Name of the mutex.
2953  */
2954 #define K_MUTEX_DEFINE(name) \
2955 	STRUCT_SECTION_ITERABLE(k_mutex, name) = \
2956 		Z_MUTEX_INITIALIZER(name)
2957 
2958 /**
2959  * @brief Initialize a mutex.
2960  *
2961  * This routine initializes a mutex object, prior to its first use.
2962  *
2963  * Upon completion, the mutex is available and does not have an owner.
2964  *
2965  * @param mutex Address of the mutex.
2966  *
2967  * @retval 0 Mutex object created
2968  *
2969  */
2970 __syscall int k_mutex_init(struct k_mutex *mutex);
2971 
2972 
2973 /**
2974  * @brief Lock a mutex.
2975  *
2976  * This routine locks @a mutex. If the mutex is locked by another thread,
2977  * the calling thread waits until the mutex becomes available or until
2978  * a timeout occurs.
2979  *
2980  * A thread is permitted to lock a mutex it has already locked. The operation
2981  * completes immediately and the lock count is increased by 1.
2982  *
2983  * Mutexes may not be locked in ISRs.
2984  *
2985  * @param mutex Address of the mutex.
2986  * @param timeout Waiting period to lock the mutex,
2987  *                or one of the special values K_NO_WAIT and
2988  *                K_FOREVER.
2989  *
2990  * @retval 0 Mutex locked.
2991  * @retval -EBUSY Returned without waiting.
2992  * @retval -EAGAIN Waiting period timed out.
2993  */
2994 __syscall int k_mutex_lock(struct k_mutex *mutex, k_timeout_t timeout);
2995 
2996 /**
2997  * @brief Unlock a mutex.
2998  *
2999  * This routine unlocks @a mutex. The mutex must already be locked by the
3000  * calling thread.
3001  *
3002  * The mutex cannot be claimed by another thread until it has been unlocked by
3003  * the calling thread as many times as it was previously locked by that
3004  * thread.
3005  *
3006  * Mutexes may not be unlocked in ISRs, as mutexes must only be manipulated
3007  * in thread context due to ownership and priority inheritance semantics.
3008  *
3009  * @param mutex Address of the mutex.
3010  *
3011  * @retval 0 Mutex unlocked.
3012  * @retval -EPERM The current thread does not own the mutex
3013  * @retval -EINVAL The mutex is not locked
3014  *
3015  */
3016 __syscall int k_mutex_unlock(struct k_mutex *mutex);
3017 
3018 /**
3019  * @}
3020  */
3021 
3022 
3023 struct k_condvar {
3024 	_wait_q_t wait_q;
3025 
3026 #ifdef CONFIG_OBJ_CORE_CONDVAR
3027 	struct k_obj_core  obj_core;
3028 #endif
3029 };
3030 
3031 #define Z_CONDVAR_INITIALIZER(obj)                                             \
3032 	{                                                                      \
3033 		.wait_q = Z_WAIT_Q_INIT(&obj.wait_q),                          \
3034 	}
3035 
3036 /**
3037  * @defgroup condvar_apis Condition Variables APIs
3038  * @ingroup kernel_apis
3039  * @{
3040  */
3041 
3042 /**
3043  * @brief Initialize a condition variable
3044  *
3045  * @param condvar pointer to a @p k_condvar structure
3046  * @retval 0 Condition variable created successfully
3047  */
3048 __syscall int k_condvar_init(struct k_condvar *condvar);
3049 
3050 /**
3051  * @brief Signals one thread that is pending on the condition variable
3052  *
3053  * @param condvar pointer to a @p k_condvar structure
3054  * @retval 0 On success
3055  */
3056 __syscall int k_condvar_signal(struct k_condvar *condvar);
3057 
3058 /**
3059  * @brief Unblock all threads that are pending on the condition
3060  * variable
3061  *
3062  * @param condvar pointer to a @p k_condvar structure
3063  * @return An integer with number of woken threads on success
3064  */
3065 __syscall int k_condvar_broadcast(struct k_condvar *condvar);
3066 
3067 /**
3068  * @brief Waits on the condition variable releasing the mutex lock
3069  *
3070  * Atomically releases the currently owned mutex, blocks the current thread
3071  * waiting on the condition variable specified by @a condvar,
3072  * and finally acquires the mutex again.
3073  *
3074  * The waiting thread unblocks only after another thread calls
3075  * k_condvar_signal, or k_condvar_broadcast with the same condition variable.
3076  *
3077  * @param condvar pointer to a @p k_condvar structure
3078  * @param mutex Address of the mutex.
3079  * @param timeout Waiting period for the condition variable
3080  *                or one of the special values K_NO_WAIT and K_FOREVER.
3081  * @retval 0 On success
3082  * @retval -EAGAIN Waiting period timed out.
3083  */
3084 __syscall int k_condvar_wait(struct k_condvar *condvar, struct k_mutex *mutex,
3085 			     k_timeout_t timeout);
3086 
3087 /**
3088  * @brief Statically define and initialize a condition variable.
3089  *
3090  * The condition variable can be accessed outside the module where it is
3091  * defined using:
3092  *
3093  * @code extern struct k_condvar <name>; @endcode
3094  *
3095  * @param name Name of the condition variable.
3096  */
3097 #define K_CONDVAR_DEFINE(name)                                                 \
3098 	STRUCT_SECTION_ITERABLE(k_condvar, name) =                             \
3099 		Z_CONDVAR_INITIALIZER(name)
3100 /**
3101  * @}
3102  */
3103 
3104 /**
3105  * @cond INTERNAL_HIDDEN
3106  */
3107 
3108 struct k_sem {
3109 	_wait_q_t wait_q;
3110 	unsigned int count;
3111 	unsigned int limit;
3112 
3113 	_POLL_EVENT;
3114 
3115 	SYS_PORT_TRACING_TRACKING_FIELD(k_sem)
3116 
3117 #ifdef CONFIG_OBJ_CORE_SEM
3118 	struct k_obj_core  obj_core;
3119 #endif
3120 };
3121 
3122 #define Z_SEM_INITIALIZER(obj, initial_count, count_limit) \
3123 	{ \
3124 	.wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
3125 	.count = initial_count, \
3126 	.limit = count_limit, \
3127 	_POLL_EVENT_OBJ_INIT(obj) \
3128 	}
3129 
3130 /**
3131  * INTERNAL_HIDDEN @endcond
3132  */
3133 
3134 /**
3135  * @defgroup semaphore_apis Semaphore APIs
3136  * @ingroup kernel_apis
3137  * @{
3138  */
3139 
3140 /**
3141  * @brief Maximum limit value allowed for a semaphore.
3142  *
3143  * This is intended for use when a semaphore does not have
3144  * an explicit maximum limit, and instead is just used for
3145  * counting purposes.
3146  *
3147  */
3148 #define K_SEM_MAX_LIMIT UINT_MAX
3149 
3150 /**
3151  * @brief Initialize a semaphore.
3152  *
3153  * This routine initializes a semaphore object, prior to its first use.
3154  *
3155  * @param sem Address of the semaphore.
3156  * @param initial_count Initial semaphore count.
3157  * @param limit Maximum permitted semaphore count.
3158  *
3159  * @see K_SEM_MAX_LIMIT
3160  *
3161  * @retval 0 Semaphore created successfully
3162  * @retval -EINVAL Invalid values
3163  *
3164  */
3165 __syscall int k_sem_init(struct k_sem *sem, unsigned int initial_count,
3166 			  unsigned int limit);
3167 
3168 /**
3169  * @brief Take a semaphore.
3170  *
3171  * This routine takes @a sem.
3172  *
3173  * @note @a timeout must be set to K_NO_WAIT if called from ISR.
3174  *
3175  * @funcprops \isr_ok
3176  *
3177  * @param sem Address of the semaphore.
3178  * @param timeout Waiting period to take the semaphore,
3179  *                or one of the special values K_NO_WAIT and K_FOREVER.
3180  *
3181  * @retval 0 Semaphore taken.
3182  * @retval -EBUSY Returned without waiting.
3183  * @retval -EAGAIN Waiting period timed out,
3184  *			or the semaphore was reset during the waiting period.
3185  */
3186 __syscall int k_sem_take(struct k_sem *sem, k_timeout_t timeout);
3187 
3188 /**
3189  * @brief Give a semaphore.
3190  *
3191  * This routine gives @a sem, unless the semaphore is already at its maximum
3192  * permitted count.
3193  *
3194  * @funcprops \isr_ok
3195  *
3196  * @param sem Address of the semaphore.
3197  */
3198 __syscall void k_sem_give(struct k_sem *sem);
3199 
3200 /**
3201  * @brief Resets a semaphore's count to zero.
3202  *
3203  * This routine sets the count of @a sem to zero.
3204  * Any outstanding semaphore takes will be aborted
3205  * with -EAGAIN.
3206  *
3207  * @param sem Address of the semaphore.
3208  */
3209 __syscall void k_sem_reset(struct k_sem *sem);
3210 
3211 /**
3212  * @brief Get a semaphore's count.
3213  *
3214  * This routine returns the current count of @a sem.
3215  *
3216  * @param sem Address of the semaphore.
3217  *
3218  * @return Current semaphore count.
3219  */
3220 __syscall unsigned int k_sem_count_get(struct k_sem *sem);
3221 
3222 /**
3223  * @internal
3224  */
z_impl_k_sem_count_get(struct k_sem * sem)3225 static inline unsigned int z_impl_k_sem_count_get(struct k_sem *sem)
3226 {
3227 	return sem->count;
3228 }
3229 
3230 /**
3231  * @brief Statically define and initialize a semaphore.
3232  *
3233  * The semaphore can be accessed outside the module where it is defined using:
3234  *
3235  * @code extern struct k_sem <name>; @endcode
3236  *
3237  * @param name Name of the semaphore.
3238  * @param initial_count Initial semaphore count.
3239  * @param count_limit Maximum permitted semaphore count.
3240  */
3241 #define K_SEM_DEFINE(name, initial_count, count_limit) \
3242 	STRUCT_SECTION_ITERABLE(k_sem, name) = \
3243 		Z_SEM_INITIALIZER(name, initial_count, count_limit); \
3244 	BUILD_ASSERT(((count_limit) != 0) && \
3245 		     ((initial_count) <= (count_limit)) && \
3246 			 ((count_limit) <= K_SEM_MAX_LIMIT));
3247 
3248 /** @} */
3249 
3250 /**
3251  * @cond INTERNAL_HIDDEN
3252  */
3253 
3254 struct k_work_delayable;
3255 struct k_work_sync;
3256 
3257 /**
3258  * INTERNAL_HIDDEN @endcond
3259  */
3260 
3261 /**
3262  * @defgroup workqueue_apis Work Queue APIs
3263  * @ingroup kernel_apis
3264  * @{
3265  */
3266 
3267 /** @brief The signature for a work item handler function.
3268  *
3269  * The function will be invoked by the thread animating a work queue.
3270  *
3271  * @param work the work item that provided the handler.
3272  */
3273 typedef void (*k_work_handler_t)(struct k_work *work);
3274 
3275 /** @brief Initialize a (non-delayable) work structure.
3276  *
3277  * This must be invoked before submitting a work structure for the first time.
3278  * It need not be invoked again on the same work structure.  It can be
3279  * re-invoked to change the associated handler, but this must be done when the
3280  * work item is idle.
3281  *
3282  * @funcprops \isr_ok
3283  *
3284  * @param work the work structure to be initialized.
3285  *
3286  * @param handler the handler to be invoked by the work item.
3287  */
3288 void k_work_init(struct k_work *work,
3289 		  k_work_handler_t handler);
3290 
3291 /** @brief Busy state flags from the work item.
3292  *
3293  * A zero return value indicates the work item appears to be idle.
3294  *
3295  * @note This is a live snapshot of state, which may change before the result
3296  * is checked.  Use locks where appropriate.
3297  *
3298  * @funcprops \isr_ok
3299  *
3300  * @param work pointer to the work item.
3301  *
3302  * @return a mask of flags K_WORK_DELAYED, K_WORK_QUEUED,
3303  * K_WORK_RUNNING, and K_WORK_CANCELING.
3304  */
3305 int k_work_busy_get(const struct k_work *work);
3306 
3307 /** @brief Test whether a work item is currently pending.
3308  *
3309  * Wrapper to determine whether a work item is in a non-idle dstate.
3310  *
3311  * @note This is a live snapshot of state, which may change before the result
3312  * is checked.  Use locks where appropriate.
3313  *
3314  * @funcprops \isr_ok
3315  *
3316  * @param work pointer to the work item.
3317  *
3318  * @return true if and only if k_work_busy_get() returns a non-zero value.
3319  */
3320 static inline bool k_work_is_pending(const struct k_work *work);
3321 
3322 /** @brief Submit a work item to a queue.
3323  *
3324  * @param queue pointer to the work queue on which the item should run.  If
3325  * NULL the queue from the most recent submission will be used.
3326  *
3327  * @funcprops \isr_ok
3328  *
3329  * @param work pointer to the work item.
3330  *
3331  * @retval 0 if work was already submitted to a queue
3332  * @retval 1 if work was not submitted and has been queued to @p queue
3333  * @retval 2 if work was running and has been queued to the queue that was
3334  * running it
3335  * @retval -EBUSY
3336  * * if work submission was rejected because the work item is cancelling; or
3337  * * @p queue is draining; or
3338  * * @p queue is plugged.
3339  * @retval -EINVAL if @p queue is null and the work item has never been run.
3340  * @retval -ENODEV if @p queue has not been started.
3341  */
3342 int k_work_submit_to_queue(struct k_work_q *queue,
3343 			   struct k_work *work);
3344 
3345 /** @brief Submit a work item to the system queue.
3346  *
3347  * @funcprops \isr_ok
3348  *
3349  * @param work pointer to the work item.
3350  *
3351  * @return as with k_work_submit_to_queue().
3352  */
3353 extern int k_work_submit(struct k_work *work);
3354 
3355 /** @brief Wait for last-submitted instance to complete.
3356  *
3357  * Resubmissions may occur while waiting, including chained submissions (from
3358  * within the handler).
3359  *
3360  * @note Be careful of caller and work queue thread relative priority.  If
3361  * this function sleeps it will not return until the work queue thread
3362  * completes the tasks that allow this thread to resume.
3363  *
3364  * @note Behavior is undefined if this function is invoked on @p work from a
3365  * work queue running @p work.
3366  *
3367  * @param work pointer to the work item.
3368  *
3369  * @param sync pointer to an opaque item containing state related to the
3370  * pending cancellation.  The object must persist until the call returns, and
3371  * be accessible from both the caller thread and the work queue thread.  The
3372  * object must not be used for any other flush or cancel operation until this
3373  * one completes.  On architectures with CONFIG_KERNEL_COHERENCE the object
3374  * must be allocated in coherent memory.
3375  *
3376  * @retval true if call had to wait for completion
3377  * @retval false if work was already idle
3378  */
3379 bool k_work_flush(struct k_work *work,
3380 		  struct k_work_sync *sync);
3381 
3382 /** @brief Cancel a work item.
3383  *
3384  * This attempts to prevent a pending (non-delayable) work item from being
3385  * processed by removing it from the work queue.  If the item is being
3386  * processed, the work item will continue to be processed, but resubmissions
3387  * are rejected until cancellation completes.
3388  *
3389  * If this returns zero cancellation is complete, otherwise something
3390  * (probably a work queue thread) is still referencing the item.
3391  *
3392  * See also k_work_cancel_sync().
3393  *
3394  * @funcprops \isr_ok
3395  *
3396  * @param work pointer to the work item.
3397  *
3398  * @return the k_work_busy_get() status indicating the state of the item after all
3399  * cancellation steps performed by this call are completed.
3400  */
3401 int k_work_cancel(struct k_work *work);
3402 
3403 /** @brief Cancel a work item and wait for it to complete.
3404  *
3405  * Same as k_work_cancel() but does not return until cancellation is complete.
3406  * This can be invoked by a thread after k_work_cancel() to synchronize with a
3407  * previous cancellation.
3408  *
3409  * On return the work structure will be idle unless something submits it after
3410  * the cancellation was complete.
3411  *
3412  * @note Be careful of caller and work queue thread relative priority.  If
3413  * this function sleeps it will not return until the work queue thread
3414  * completes the tasks that allow this thread to resume.
3415  *
3416  * @note Behavior is undefined if this function is invoked on @p work from a
3417  * work queue running @p work.
3418  *
3419  * @param work pointer to the work item.
3420  *
3421  * @param sync pointer to an opaque item containing state related to the
3422  * pending cancellation.  The object must persist until the call returns, and
3423  * be accessible from both the caller thread and the work queue thread.  The
3424  * object must not be used for any other flush or cancel operation until this
3425  * one completes.  On architectures with CONFIG_KERNEL_COHERENCE the object
3426  * must be allocated in coherent memory.
3427  *
3428  * @retval true if work was pending (call had to wait for cancellation of a
3429  * running handler to complete, or scheduled or submitted operations were
3430  * cancelled);
3431  * @retval false otherwise
3432  */
3433 bool k_work_cancel_sync(struct k_work *work, struct k_work_sync *sync);
3434 
3435 /** @brief Initialize a work queue structure.
3436  *
3437  * This must be invoked before starting a work queue structure for the first time.
3438  * It need not be invoked again on the same work queue structure.
3439  *
3440  * @funcprops \isr_ok
3441  *
3442  * @param queue the queue structure to be initialized.
3443  */
3444 void k_work_queue_init(struct k_work_q *queue);
3445 
3446 /** @brief Initialize a work queue.
3447  *
3448  * This configures the work queue thread and starts it running.  The function
3449  * should not be re-invoked on a queue.
3450  *
3451  * @param queue pointer to the queue structure. It must be initialized
3452  *        in zeroed/bss memory or with @ref k_work_queue_init before
3453  *        use.
3454  *
3455  * @param stack pointer to the work thread stack area.
3456  *
3457  * @param stack_size size of the the work thread stack area, in bytes.
3458  *
3459  * @param prio initial thread priority
3460  *
3461  * @param cfg optional additional configuration parameters.  Pass @c
3462  * NULL if not required, to use the defaults documented in
3463  * k_work_queue_config.
3464  */
3465 void k_work_queue_start(struct k_work_q *queue,
3466 			k_thread_stack_t *stack, size_t stack_size,
3467 			int prio, const struct k_work_queue_config *cfg);
3468 
3469 /** @brief Access the thread that animates a work queue.
3470  *
3471  * This is necessary to grant a work queue thread access to things the work
3472  * items it will process are expected to use.
3473  *
3474  * @param queue pointer to the queue structure.
3475  *
3476  * @return the thread associated with the work queue.
3477  */
3478 static inline k_tid_t k_work_queue_thread_get(struct k_work_q *queue);
3479 
3480 /** @brief Wait until the work queue has drained, optionally plugging it.
3481  *
3482  * This blocks submission to the work queue except when coming from queue
3483  * thread, and blocks the caller until no more work items are available in the
3484  * queue.
3485  *
3486  * If @p plug is true then submission will continue to be blocked after the
3487  * drain operation completes until k_work_queue_unplug() is invoked.
3488  *
3489  * Note that work items that are delayed are not yet associated with their
3490  * work queue.  They must be cancelled externally if a goal is to ensure the
3491  * work queue remains empty.  The @p plug feature can be used to prevent
3492  * delayed items from being submitted after the drain completes.
3493  *
3494  * @param queue pointer to the queue structure.
3495  *
3496  * @param plug if true the work queue will continue to block new submissions
3497  * after all items have drained.
3498  *
3499  * @retval 1 if call had to wait for the drain to complete
3500  * @retval 0 if call did not have to wait
3501  * @retval negative if wait was interrupted or failed
3502  */
3503 int k_work_queue_drain(struct k_work_q *queue, bool plug);
3504 
3505 /** @brief Release a work queue to accept new submissions.
3506  *
3507  * This releases the block on new submissions placed when k_work_queue_drain()
3508  * is invoked with the @p plug option enabled.  If this is invoked before the
3509  * drain completes new items may be submitted as soon as the drain completes.
3510  *
3511  * @funcprops \isr_ok
3512  *
3513  * @param queue pointer to the queue structure.
3514  *
3515  * @retval 0 if successfully unplugged
3516  * @retval -EALREADY if the work queue was not plugged.
3517  */
3518 int k_work_queue_unplug(struct k_work_q *queue);
3519 
3520 /** @brief Initialize a delayable work structure.
3521  *
3522  * This must be invoked before scheduling a delayable work structure for the
3523  * first time.  It need not be invoked again on the same work structure.  It
3524  * can be re-invoked to change the associated handler, but this must be done
3525  * when the work item is idle.
3526  *
3527  * @funcprops \isr_ok
3528  *
3529  * @param dwork the delayable work structure to be initialized.
3530  *
3531  * @param handler the handler to be invoked by the work item.
3532  */
3533 void k_work_init_delayable(struct k_work_delayable *dwork,
3534 			   k_work_handler_t handler);
3535 
3536 /**
3537  * @brief Get the parent delayable work structure from a work pointer.
3538  *
3539  * This function is necessary when a @c k_work_handler_t function is passed to
3540  * k_work_schedule_for_queue() and the handler needs to access data from the
3541  * container of the containing `k_work_delayable`.
3542  *
3543  * @param work Address passed to the work handler
3544  *
3545  * @return Address of the containing @c k_work_delayable structure.
3546  */
3547 static inline struct k_work_delayable *
3548 k_work_delayable_from_work(struct k_work *work);
3549 
3550 /** @brief Busy state flags from the delayable work item.
3551  *
3552  * @funcprops \isr_ok
3553  *
3554  * @note This is a live snapshot of state, which may change before the result
3555  * can be inspected.  Use locks where appropriate.
3556  *
3557  * @param dwork pointer to the delayable work item.
3558  *
3559  * @return a mask of flags K_WORK_DELAYED, K_WORK_QUEUED, K_WORK_RUNNING, and
3560  * K_WORK_CANCELING.  A zero return value indicates the work item appears to
3561  * be idle.
3562  */
3563 int k_work_delayable_busy_get(const struct k_work_delayable *dwork);
3564 
3565 /** @brief Test whether a delayed work item is currently pending.
3566  *
3567  * Wrapper to determine whether a delayed work item is in a non-idle state.
3568  *
3569  * @note This is a live snapshot of state, which may change before the result
3570  * can be inspected.  Use locks where appropriate.
3571  *
3572  * @funcprops \isr_ok
3573  *
3574  * @param dwork pointer to the delayable work item.
3575  *
3576  * @return true if and only if k_work_delayable_busy_get() returns a non-zero
3577  * value.
3578  */
3579 static inline bool k_work_delayable_is_pending(
3580 	const struct k_work_delayable *dwork);
3581 
3582 /** @brief Get the absolute tick count at which a scheduled delayable work
3583  * will be submitted.
3584  *
3585  * @note This is a live snapshot of state, which may change before the result
3586  * can be inspected.  Use locks where appropriate.
3587  *
3588  * @funcprops \isr_ok
3589  *
3590  * @param dwork pointer to the delayable work item.
3591  *
3592  * @return the tick count when the timer that will schedule the work item will
3593  * expire, or the current tick count if the work is not scheduled.
3594  */
3595 static inline k_ticks_t k_work_delayable_expires_get(
3596 	const struct k_work_delayable *dwork);
3597 
3598 /** @brief Get the number of ticks until a scheduled delayable work will be
3599  * submitted.
3600  *
3601  * @note This is a live snapshot of state, which may change before the result
3602  * can be inspected.  Use locks where appropriate.
3603  *
3604  * @funcprops \isr_ok
3605  *
3606  * @param dwork pointer to the delayable work item.
3607  *
3608  * @return the number of ticks until the timer that will schedule the work
3609  * item will expire, or zero if the item is not scheduled.
3610  */
3611 static inline k_ticks_t k_work_delayable_remaining_get(
3612 	const struct k_work_delayable *dwork);
3613 
3614 /** @brief Submit an idle work item to a queue after a delay.
3615  *
3616  * Unlike k_work_reschedule_for_queue() this is a no-op if the work item is
3617  * already scheduled or submitted, even if @p delay is @c K_NO_WAIT.
3618  *
3619  * @funcprops \isr_ok
3620  *
3621  * @param queue the queue on which the work item should be submitted after the
3622  * delay.
3623  *
3624  * @param dwork pointer to the delayable work item.
3625  *
3626  * @param delay the time to wait before submitting the work item.  If @c
3627  * K_NO_WAIT and the work is not pending this is equivalent to
3628  * k_work_submit_to_queue().
3629  *
3630  * @retval 0 if work was already scheduled or submitted.
3631  * @retval 1 if work has been scheduled.
3632  * @retval -EBUSY if @p delay is @c K_NO_WAIT and
3633  *         k_work_submit_to_queue() fails with this code.
3634  * @retval -EINVAL if @p delay is @c K_NO_WAIT and
3635  *         k_work_submit_to_queue() fails with this code.
3636  * @retval -ENODEV if @p delay is @c K_NO_WAIT and
3637  *         k_work_submit_to_queue() fails with this code.
3638  */
3639 int k_work_schedule_for_queue(struct k_work_q *queue,
3640 			       struct k_work_delayable *dwork,
3641 			       k_timeout_t delay);
3642 
3643 /** @brief Submit an idle work item to the system work queue after a
3644  * delay.
3645  *
3646  * This is a thin wrapper around k_work_schedule_for_queue(), with all the API
3647  * characteristics of that function.
3648  *
3649  * @param dwork pointer to the delayable work item.
3650  *
3651  * @param delay the time to wait before submitting the work item.  If @c
3652  * K_NO_WAIT this is equivalent to k_work_submit_to_queue().
3653  *
3654  * @return as with k_work_schedule_for_queue().
3655  */
3656 extern int k_work_schedule(struct k_work_delayable *dwork,
3657 				   k_timeout_t delay);
3658 
3659 /** @brief Reschedule a work item to a queue after a delay.
3660  *
3661  * Unlike k_work_schedule_for_queue() this function can change the deadline of
3662  * a scheduled work item, and will schedule a work item that is in any state
3663  * (e.g. is idle, submitted, or running).  This function does not affect
3664  * ("unsubmit") a work item that has been submitted to a queue.
3665  *
3666  * @funcprops \isr_ok
3667  *
3668  * @param queue the queue on which the work item should be submitted after the
3669  * delay.
3670  *
3671  * @param dwork pointer to the delayable work item.
3672  *
3673  * @param delay the time to wait before submitting the work item.  If @c
3674  * K_NO_WAIT this is equivalent to k_work_submit_to_queue() after canceling
3675  * any previous scheduled submission.
3676  *
3677  * @note If delay is @c K_NO_WAIT ("no delay") the return values are as with
3678  * k_work_submit_to_queue().
3679  *
3680  * @retval 0 if delay is @c K_NO_WAIT and work was already on a queue
3681  * @retval 1 if
3682  * * delay is @c K_NO_WAIT and work was not submitted but has now been queued
3683  *   to @p queue; or
3684  * * delay not @c K_NO_WAIT and work has been scheduled
3685  * @retval 2 if delay is @c K_NO_WAIT and work was running and has been queued
3686  * to the queue that was running it
3687  * @retval -EBUSY if @p delay is @c K_NO_WAIT and
3688  *         k_work_submit_to_queue() fails with this code.
3689  * @retval -EINVAL if @p delay is @c K_NO_WAIT and
3690  *         k_work_submit_to_queue() fails with this code.
3691  * @retval -ENODEV if @p delay is @c K_NO_WAIT and
3692  *         k_work_submit_to_queue() fails with this code.
3693  */
3694 int k_work_reschedule_for_queue(struct k_work_q *queue,
3695 				 struct k_work_delayable *dwork,
3696 				 k_timeout_t delay);
3697 
3698 /** @brief Reschedule a work item to the system work queue after a
3699  * delay.
3700  *
3701  * This is a thin wrapper around k_work_reschedule_for_queue(), with all the
3702  * API characteristics of that function.
3703  *
3704  * @param dwork pointer to the delayable work item.
3705  *
3706  * @param delay the time to wait before submitting the work item.
3707  *
3708  * @return as with k_work_reschedule_for_queue().
3709  */
3710 extern int k_work_reschedule(struct k_work_delayable *dwork,
3711 				     k_timeout_t delay);
3712 
3713 /** @brief Flush delayable work.
3714  *
3715  * If the work is scheduled, it is immediately submitted.  Then the caller
3716  * blocks until the work completes, as with k_work_flush().
3717  *
3718  * @note Be careful of caller and work queue thread relative priority.  If
3719  * this function sleeps it will not return until the work queue thread
3720  * completes the tasks that allow this thread to resume.
3721  *
3722  * @note Behavior is undefined if this function is invoked on @p dwork from a
3723  * work queue running @p dwork.
3724  *
3725  * @param dwork pointer to the delayable work item.
3726  *
3727  * @param sync pointer to an opaque item containing state related to the
3728  * pending cancellation.  The object must persist until the call returns, and
3729  * be accessible from both the caller thread and the work queue thread.  The
3730  * object must not be used for any other flush or cancel operation until this
3731  * one completes.  On architectures with CONFIG_KERNEL_COHERENCE the object
3732  * must be allocated in coherent memory.
3733  *
3734  * @retval true if call had to wait for completion
3735  * @retval false if work was already idle
3736  */
3737 bool k_work_flush_delayable(struct k_work_delayable *dwork,
3738 			    struct k_work_sync *sync);
3739 
3740 /** @brief Cancel delayable work.
3741  *
3742  * Similar to k_work_cancel() but for delayable work.  If the work is
3743  * scheduled or submitted it is canceled.  This function does not wait for the
3744  * cancellation to complete.
3745  *
3746  * @note The work may still be running when this returns.  Use
3747  * k_work_flush_delayable() or k_work_cancel_delayable_sync() to ensure it is
3748  * not running.
3749  *
3750  * @note Canceling delayable work does not prevent rescheduling it.  It does
3751  * prevent submitting it until the cancellation completes.
3752  *
3753  * @funcprops \isr_ok
3754  *
3755  * @param dwork pointer to the delayable work item.
3756  *
3757  * @return the k_work_delayable_busy_get() status indicating the state of the
3758  * item after all cancellation steps performed by this call are completed.
3759  */
3760 int k_work_cancel_delayable(struct k_work_delayable *dwork);
3761 
3762 /** @brief Cancel delayable work and wait.
3763  *
3764  * Like k_work_cancel_delayable() but waits until the work becomes idle.
3765  *
3766  * @note Canceling delayable work does not prevent rescheduling it.  It does
3767  * prevent submitting it until the cancellation completes.
3768  *
3769  * @note Be careful of caller and work queue thread relative priority.  If
3770  * this function sleeps it will not return until the work queue thread
3771  * completes the tasks that allow this thread to resume.
3772  *
3773  * @note Behavior is undefined if this function is invoked on @p dwork from a
3774  * work queue running @p dwork.
3775  *
3776  * @param dwork pointer to the delayable work item.
3777  *
3778  * @param sync pointer to an opaque item containing state related to the
3779  * pending cancellation.  The object must persist until the call returns, and
3780  * be accessible from both the caller thread and the work queue thread.  The
3781  * object must not be used for any other flush or cancel operation until this
3782  * one completes.  On architectures with CONFIG_KERNEL_COHERENCE the object
3783  * must be allocated in coherent memory.
3784  *
3785  * @retval true if work was not idle (call had to wait for cancellation of a
3786  * running handler to complete, or scheduled or submitted operations were
3787  * cancelled);
3788  * @retval false otherwise
3789  */
3790 bool k_work_cancel_delayable_sync(struct k_work_delayable *dwork,
3791 				  struct k_work_sync *sync);
3792 
3793 enum {
3794 /**
3795  * @cond INTERNAL_HIDDEN
3796  */
3797 
3798 	/* The atomic API is used for all work and queue flags fields to
3799 	 * enforce sequential consistency in SMP environments.
3800 	 */
3801 
3802 	/* Bits that represent the work item states.  At least nine of the
3803 	 * combinations are distinct valid stable states.
3804 	 */
3805 	K_WORK_RUNNING_BIT = 0,
3806 	K_WORK_CANCELING_BIT = 1,
3807 	K_WORK_QUEUED_BIT = 2,
3808 	K_WORK_DELAYED_BIT = 3,
3809 
3810 	K_WORK_MASK = BIT(K_WORK_DELAYED_BIT) | BIT(K_WORK_QUEUED_BIT)
3811 		| BIT(K_WORK_RUNNING_BIT) | BIT(K_WORK_CANCELING_BIT),
3812 
3813 	/* Static work flags */
3814 	K_WORK_DELAYABLE_BIT = 8,
3815 	K_WORK_DELAYABLE = BIT(K_WORK_DELAYABLE_BIT),
3816 
3817 	/* Dynamic work queue flags */
3818 	K_WORK_QUEUE_STARTED_BIT = 0,
3819 	K_WORK_QUEUE_STARTED = BIT(K_WORK_QUEUE_STARTED_BIT),
3820 	K_WORK_QUEUE_BUSY_BIT = 1,
3821 	K_WORK_QUEUE_BUSY = BIT(K_WORK_QUEUE_BUSY_BIT),
3822 	K_WORK_QUEUE_DRAIN_BIT = 2,
3823 	K_WORK_QUEUE_DRAIN = BIT(K_WORK_QUEUE_DRAIN_BIT),
3824 	K_WORK_QUEUE_PLUGGED_BIT = 3,
3825 	K_WORK_QUEUE_PLUGGED = BIT(K_WORK_QUEUE_PLUGGED_BIT),
3826 
3827 	/* Static work queue flags */
3828 	K_WORK_QUEUE_NO_YIELD_BIT = 8,
3829 	K_WORK_QUEUE_NO_YIELD = BIT(K_WORK_QUEUE_NO_YIELD_BIT),
3830 
3831 /**
3832  * INTERNAL_HIDDEN @endcond
3833  */
3834 	/* Transient work flags */
3835 
3836 	/** @brief Flag indicating a work item that is running under a work
3837 	 * queue thread.
3838 	 *
3839 	 * Accessed via k_work_busy_get().  May co-occur with other flags.
3840 	 */
3841 	K_WORK_RUNNING = BIT(K_WORK_RUNNING_BIT),
3842 
3843 	/** @brief Flag indicating a work item that is being canceled.
3844 	 *
3845 	 * Accessed via k_work_busy_get().  May co-occur with other flags.
3846 	 */
3847 	K_WORK_CANCELING = BIT(K_WORK_CANCELING_BIT),
3848 
3849 	/** @brief Flag indicating a work item that has been submitted to a
3850 	 * queue but has not started running.
3851 	 *
3852 	 * Accessed via k_work_busy_get().  May co-occur with other flags.
3853 	 */
3854 	K_WORK_QUEUED = BIT(K_WORK_QUEUED_BIT),
3855 
3856 	/** @brief Flag indicating a delayed work item that is scheduled for
3857 	 * submission to a queue.
3858 	 *
3859 	 * Accessed via k_work_busy_get().  May co-occur with other flags.
3860 	 */
3861 	K_WORK_DELAYED = BIT(K_WORK_DELAYED_BIT),
3862 };
3863 
3864 /** @brief A structure used to submit work. */
3865 struct k_work {
3866 	/* All fields are protected by the work module spinlock.  No fields
3867 	 * are to be accessed except through kernel API.
3868 	 */
3869 
3870 	/* Node to link into k_work_q pending list. */
3871 	sys_snode_t node;
3872 
3873 	/* The function to be invoked by the work queue thread. */
3874 	k_work_handler_t handler;
3875 
3876 	/* The queue on which the work item was last submitted. */
3877 	struct k_work_q *queue;
3878 
3879 	/* State of the work item.
3880 	 *
3881 	 * The item can be DELAYED, QUEUED, and RUNNING simultaneously.
3882 	 *
3883 	 * It can be RUNNING and CANCELING simultaneously.
3884 	 */
3885 	uint32_t flags;
3886 };
3887 
3888 #define Z_WORK_INITIALIZER(work_handler) { \
3889 	.handler = work_handler, \
3890 }
3891 
3892 /** @brief A structure used to submit work after a delay. */
3893 struct k_work_delayable {
3894 	/* The work item. */
3895 	struct k_work work;
3896 
3897 	/* Timeout used to submit work after a delay. */
3898 	struct _timeout timeout;
3899 
3900 	/* The queue to which the work should be submitted. */
3901 	struct k_work_q *queue;
3902 };
3903 
3904 #define Z_WORK_DELAYABLE_INITIALIZER(work_handler) { \
3905 	.work = { \
3906 		.handler = work_handler, \
3907 		.flags = K_WORK_DELAYABLE, \
3908 	}, \
3909 }
3910 
3911 /**
3912  * @brief Initialize a statically-defined delayable work item.
3913  *
3914  * This macro can be used to initialize a statically-defined delayable
3915  * work item, prior to its first use. For example,
3916  *
3917  * @code static K_WORK_DELAYABLE_DEFINE(<dwork>, <work_handler>); @endcode
3918  *
3919  * Note that if the runtime dependencies support initialization with
3920  * k_work_init_delayable() using that will eliminate the initialized
3921  * object in ROM that is produced by this macro and copied in at
3922  * system startup.
3923  *
3924  * @param work Symbol name for delayable work item object
3925  * @param work_handler Function to invoke each time work item is processed.
3926  */
3927 #define K_WORK_DELAYABLE_DEFINE(work, work_handler) \
3928 	struct k_work_delayable work \
3929 	  = Z_WORK_DELAYABLE_INITIALIZER(work_handler)
3930 
3931 /**
3932  * @cond INTERNAL_HIDDEN
3933  */
3934 
3935 /* Record used to wait for work to flush.
3936  *
3937  * The work item is inserted into the queue that will process (or is
3938  * processing) the item, and will be processed as soon as the item
3939  * completes.  When the flusher is processed the semaphore will be
3940  * signaled, releasing the thread waiting for the flush.
3941  */
3942 struct z_work_flusher {
3943 	struct k_work work;
3944 	struct k_sem sem;
3945 };
3946 
3947 /* Record used to wait for work to complete a cancellation.
3948  *
3949  * The work item is inserted into a global queue of pending cancels.
3950  * When a cancelling work item goes idle any matching waiters are
3951  * removed from pending_cancels and are woken.
3952  */
3953 struct z_work_canceller {
3954 	sys_snode_t node;
3955 	struct k_work *work;
3956 	struct k_sem sem;
3957 };
3958 
3959 /**
3960  * INTERNAL_HIDDEN @endcond
3961  */
3962 
3963 /** @brief A structure holding internal state for a pending synchronous
3964  * operation on a work item or queue.
3965  *
3966  * Instances of this type are provided by the caller for invocation of
3967  * k_work_flush(), k_work_cancel_sync() and sibling flush and cancel APIs.  A
3968  * referenced object must persist until the call returns, and be accessible
3969  * from both the caller thread and the work queue thread.
3970  *
3971  * @note If CONFIG_KERNEL_COHERENCE is enabled the object must be allocated in
3972  * coherent memory; see arch_mem_coherent().  The stack on these architectures
3973  * is generally not coherent.  be stack-allocated.  Violations are detected by
3974  * runtime assertion.
3975  */
3976 struct k_work_sync {
3977 	union {
3978 		struct z_work_flusher flusher;
3979 		struct z_work_canceller canceller;
3980 	};
3981 };
3982 
3983 /** @brief A structure holding optional configuration items for a work
3984  * queue.
3985  *
3986  * This structure, and values it references, are not retained by
3987  * k_work_queue_start().
3988  */
3989 struct k_work_queue_config {
3990 	/** The name to be given to the work queue thread.
3991 	 *
3992 	 * If left null the thread will not have a name.
3993 	 */
3994 	const char *name;
3995 
3996 	/** Control whether the work queue thread should yield between
3997 	 * items.
3998 	 *
3999 	 * Yielding between items helps guarantee the work queue
4000 	 * thread does not starve other threads, including cooperative
4001 	 * ones released by a work item.  This is the default behavior.
4002 	 *
4003 	 * Set this to @c true to prevent the work queue thread from
4004 	 * yielding between items.  This may be appropriate when a
4005 	 * sequence of items should complete without yielding
4006 	 * control.
4007 	 */
4008 	bool no_yield;
4009 };
4010 
4011 /** @brief A structure used to hold work until it can be processed. */
4012 struct k_work_q {
4013 	/* The thread that animates the work. */
4014 	struct k_thread thread;
4015 
4016 	/* All the following fields must be accessed only while the
4017 	 * work module spinlock is held.
4018 	 */
4019 
4020 	/* List of k_work items to be worked. */
4021 	sys_slist_t pending;
4022 
4023 	/* Wait queue for idle work thread. */
4024 	_wait_q_t notifyq;
4025 
4026 	/* Wait queue for threads waiting for the queue to drain. */
4027 	_wait_q_t drainq;
4028 
4029 	/* Flags describing queue state. */
4030 	uint32_t flags;
4031 };
4032 
4033 /* Provide the implementation for inline functions declared above */
4034 
k_work_is_pending(const struct k_work * work)4035 static inline bool k_work_is_pending(const struct k_work *work)
4036 {
4037 	return k_work_busy_get(work) != 0;
4038 }
4039 
4040 static inline struct k_work_delayable *
k_work_delayable_from_work(struct k_work * work)4041 k_work_delayable_from_work(struct k_work *work)
4042 {
4043 	return CONTAINER_OF(work, struct k_work_delayable, work);
4044 }
4045 
k_work_delayable_is_pending(const struct k_work_delayable * dwork)4046 static inline bool k_work_delayable_is_pending(
4047 	const struct k_work_delayable *dwork)
4048 {
4049 	return k_work_delayable_busy_get(dwork) != 0;
4050 }
4051 
k_work_delayable_expires_get(const struct k_work_delayable * dwork)4052 static inline k_ticks_t k_work_delayable_expires_get(
4053 	const struct k_work_delayable *dwork)
4054 {
4055 	return z_timeout_expires(&dwork->timeout);
4056 }
4057 
k_work_delayable_remaining_get(const struct k_work_delayable * dwork)4058 static inline k_ticks_t k_work_delayable_remaining_get(
4059 	const struct k_work_delayable *dwork)
4060 {
4061 	return z_timeout_remaining(&dwork->timeout);
4062 }
4063 
k_work_queue_thread_get(struct k_work_q * queue)4064 static inline k_tid_t k_work_queue_thread_get(struct k_work_q *queue)
4065 {
4066 	return &queue->thread;
4067 }
4068 
4069 /** @} */
4070 
4071 struct k_work_user;
4072 
4073 /**
4074  * @addtogroup workqueue_apis
4075  * @{
4076  */
4077 
4078 /**
4079  * @typedef k_work_user_handler_t
4080  * @brief Work item handler function type for user work queues.
4081  *
4082  * A work item's handler function is executed by a user workqueue's thread
4083  * when the work item is processed by the workqueue.
4084  *
4085  * @param work Address of the work item.
4086  */
4087 typedef void (*k_work_user_handler_t)(struct k_work_user *work);
4088 
4089 /**
4090  * @cond INTERNAL_HIDDEN
4091  */
4092 
4093 struct k_work_user_q {
4094 	struct k_queue queue;
4095 	struct k_thread thread;
4096 };
4097 
4098 enum {
4099 	K_WORK_USER_STATE_PENDING,	/* Work item pending state */
4100 };
4101 
4102 struct k_work_user {
4103 	void *_reserved;		/* Used by k_queue implementation. */
4104 	k_work_user_handler_t handler;
4105 	atomic_t flags;
4106 };
4107 
4108 /**
4109  * INTERNAL_HIDDEN @endcond
4110  */
4111 
4112 #if defined(__cplusplus) && ((__cplusplus - 0) < 202002L)
4113 #define Z_WORK_USER_INITIALIZER(work_handler) { NULL, work_handler, 0 }
4114 #else
4115 #define Z_WORK_USER_INITIALIZER(work_handler) \
4116 	{ \
4117 	._reserved = NULL, \
4118 	.handler = work_handler, \
4119 	.flags = 0 \
4120 	}
4121 #endif
4122 
4123 /**
4124  * @brief Initialize a statically-defined user work item.
4125  *
4126  * This macro can be used to initialize a statically-defined user work
4127  * item, prior to its first use. For example,
4128  *
4129  * @code static K_WORK_USER_DEFINE(<work>, <work_handler>); @endcode
4130  *
4131  * @param work Symbol name for work item object
4132  * @param work_handler Function to invoke each time work item is processed.
4133  */
4134 #define K_WORK_USER_DEFINE(work, work_handler) \
4135 	struct k_work_user work = Z_WORK_USER_INITIALIZER(work_handler)
4136 
4137 /**
4138  * @brief Initialize a userspace work item.
4139  *
4140  * This routine initializes a user workqueue work item, prior to its
4141  * first use.
4142  *
4143  * @param work Address of work item.
4144  * @param handler Function to invoke each time work item is processed.
4145  */
k_work_user_init(struct k_work_user * work,k_work_user_handler_t handler)4146 static inline void k_work_user_init(struct k_work_user *work,
4147 				    k_work_user_handler_t handler)
4148 {
4149 	*work = (struct k_work_user)Z_WORK_USER_INITIALIZER(handler);
4150 }
4151 
4152 /**
4153  * @brief Check if a userspace work item is pending.
4154  *
4155  * This routine indicates if user work item @a work is pending in a workqueue's
4156  * queue.
4157  *
4158  * @note Checking if the work is pending gives no guarantee that the
4159  *       work will still be pending when this information is used. It is up to
4160  *       the caller to make sure that this information is used in a safe manner.
4161  *
4162  * @funcprops \isr_ok
4163  *
4164  * @param work Address of work item.
4165  *
4166  * @return true if work item is pending, or false if it is not pending.
4167  */
k_work_user_is_pending(struct k_work_user * work)4168 static inline bool k_work_user_is_pending(struct k_work_user *work)
4169 {
4170 	return atomic_test_bit(&work->flags, K_WORK_USER_STATE_PENDING);
4171 }
4172 
4173 /**
4174  * @brief Submit a work item to a user mode workqueue
4175  *
4176  * Submits a work item to a workqueue that runs in user mode. A temporary
4177  * memory allocation is made from the caller's resource pool which is freed
4178  * once the worker thread consumes the k_work item. The workqueue
4179  * thread must have memory access to the k_work item being submitted. The caller
4180  * must have permission granted on the work_q parameter's queue object.
4181  *
4182  * @funcprops \isr_ok
4183  *
4184  * @param work_q Address of workqueue.
4185  * @param work Address of work item.
4186  *
4187  * @retval -EBUSY if the work item was already in some workqueue
4188  * @retval -ENOMEM if no memory for thread resource pool allocation
4189  * @retval 0 Success
4190  */
k_work_user_submit_to_queue(struct k_work_user_q * work_q,struct k_work_user * work)4191 static inline int k_work_user_submit_to_queue(struct k_work_user_q *work_q,
4192 					      struct k_work_user *work)
4193 {
4194 	int ret = -EBUSY;
4195 
4196 	if (!atomic_test_and_set_bit(&work->flags,
4197 				     K_WORK_USER_STATE_PENDING)) {
4198 		ret = k_queue_alloc_append(&work_q->queue, work);
4199 
4200 		/* Couldn't insert into the queue. Clear the pending bit
4201 		 * so the work item can be submitted again
4202 		 */
4203 		if (ret != 0) {
4204 			atomic_clear_bit(&work->flags,
4205 					 K_WORK_USER_STATE_PENDING);
4206 		}
4207 	}
4208 
4209 	return ret;
4210 }
4211 
4212 /**
4213  * @brief Start a workqueue in user mode
4214  *
4215  * This works identically to k_work_queue_start() except it is callable from
4216  * user mode, and the worker thread created will run in user mode.  The caller
4217  * must have permissions granted on both the work_q parameter's thread and
4218  * queue objects, and the same restrictions on priority apply as
4219  * k_thread_create().
4220  *
4221  * @param work_q Address of workqueue.
4222  * @param stack Pointer to work queue thread's stack space, as defined by
4223  *		K_THREAD_STACK_DEFINE()
4224  * @param stack_size Size of the work queue thread's stack (in bytes), which
4225  *		should either be the same constant passed to
4226  *		K_THREAD_STACK_DEFINE() or the value of K_THREAD_STACK_SIZEOF().
4227  * @param prio Priority of the work queue's thread.
4228  * @param name optional thread name.  If not null a copy is made into the
4229  *		thread's name buffer.
4230  */
4231 extern void k_work_user_queue_start(struct k_work_user_q *work_q,
4232 				    k_thread_stack_t *stack,
4233 				    size_t stack_size, int prio,
4234 				    const char *name);
4235 
4236 /**
4237  * @brief Access the user mode thread that animates a work queue.
4238  *
4239  * This is necessary to grant a user mode work queue thread access to things
4240  * the work items it will process are expected to use.
4241  *
4242  * @param work_q pointer to the user mode queue structure.
4243  *
4244  * @return the user mode thread associated with the work queue.
4245  */
k_work_user_queue_thread_get(struct k_work_user_q * work_q)4246 static inline k_tid_t k_work_user_queue_thread_get(struct k_work_user_q *work_q)
4247 {
4248 	return &work_q->thread;
4249 }
4250 
4251 /** @} */
4252 
4253 /**
4254  * @cond INTERNAL_HIDDEN
4255  */
4256 
4257 struct k_work_poll {
4258 	struct k_work work;
4259 	struct k_work_q *workq;
4260 	struct z_poller poller;
4261 	struct k_poll_event *events;
4262 	int num_events;
4263 	k_work_handler_t real_handler;
4264 	struct _timeout timeout;
4265 	int poll_result;
4266 };
4267 
4268 /**
4269  * INTERNAL_HIDDEN @endcond
4270  */
4271 
4272 /**
4273  * @addtogroup workqueue_apis
4274  * @{
4275  */
4276 
4277 /**
4278  * @brief Initialize a statically-defined work item.
4279  *
4280  * This macro can be used to initialize a statically-defined workqueue work
4281  * item, prior to its first use. For example,
4282  *
4283  * @code static K_WORK_DEFINE(<work>, <work_handler>); @endcode
4284  *
4285  * @param work Symbol name for work item object
4286  * @param work_handler Function to invoke each time work item is processed.
4287  */
4288 #define K_WORK_DEFINE(work, work_handler) \
4289 	struct k_work work = Z_WORK_INITIALIZER(work_handler)
4290 
4291 /**
4292  * @brief Initialize a triggered work item.
4293  *
4294  * This routine initializes a workqueue triggered work item, prior to
4295  * its first use.
4296  *
4297  * @param work Address of triggered work item.
4298  * @param handler Function to invoke each time work item is processed.
4299  */
4300 extern void k_work_poll_init(struct k_work_poll *work,
4301 			     k_work_handler_t handler);
4302 
4303 /**
4304  * @brief Submit a triggered work item.
4305  *
4306  * This routine schedules work item @a work to be processed by workqueue
4307  * @a work_q when one of the given @a events is signaled. The routine
4308  * initiates internal poller for the work item and then returns to the caller.
4309  * Only when one of the watched events happen the work item is actually
4310  * submitted to the workqueue and becomes pending.
4311  *
4312  * Submitting a previously submitted triggered work item that is still
4313  * waiting for the event cancels the existing submission and reschedules it
4314  * the using the new event list. Note that this behavior is inherently subject
4315  * to race conditions with the pre-existing triggered work item and work queue,
4316  * so care must be taken to synchronize such resubmissions externally.
4317  *
4318  * @funcprops \isr_ok
4319  *
4320  * @warning
4321  * Provided array of events as well as a triggered work item must be placed
4322  * in persistent memory (valid until work handler execution or work
4323  * cancellation) and cannot be modified after submission.
4324  *
4325  * @param work_q Address of workqueue.
4326  * @param work Address of delayed work item.
4327  * @param events An array of events which trigger the work.
4328  * @param num_events The number of events in the array.
4329  * @param timeout Timeout after which the work will be scheduled
4330  *		  for execution even if not triggered.
4331  *
4332  *
4333  * @retval 0 Work item started watching for events.
4334  * @retval -EINVAL Work item is being processed or has completed its work.
4335  * @retval -EADDRINUSE Work item is pending on a different workqueue.
4336  */
4337 extern int k_work_poll_submit_to_queue(struct k_work_q *work_q,
4338 				       struct k_work_poll *work,
4339 				       struct k_poll_event *events,
4340 				       int num_events,
4341 				       k_timeout_t timeout);
4342 
4343 /**
4344  * @brief Submit a triggered work item to the system workqueue.
4345  *
4346  * This routine schedules work item @a work to be processed by system
4347  * workqueue when one of the given @a events is signaled. The routine
4348  * initiates internal poller for the work item and then returns to the caller.
4349  * Only when one of the watched events happen the work item is actually
4350  * submitted to the workqueue and becomes pending.
4351  *
4352  * Submitting a previously submitted triggered work item that is still
4353  * waiting for the event cancels the existing submission and reschedules it
4354  * the using the new event list. Note that this behavior is inherently subject
4355  * to race conditions with the pre-existing triggered work item and work queue,
4356  * so care must be taken to synchronize such resubmissions externally.
4357  *
4358  * @funcprops \isr_ok
4359  *
4360  * @warning
4361  * Provided array of events as well as a triggered work item must not be
4362  * modified until the item has been processed by the workqueue.
4363  *
4364  * @param work Address of delayed work item.
4365  * @param events An array of events which trigger the work.
4366  * @param num_events The number of events in the array.
4367  * @param timeout Timeout after which the work will be scheduled
4368  *		  for execution even if not triggered.
4369  *
4370  * @retval 0 Work item started watching for events.
4371  * @retval -EINVAL Work item is being processed or has completed its work.
4372  * @retval -EADDRINUSE Work item is pending on a different workqueue.
4373  */
4374 extern int k_work_poll_submit(struct k_work_poll *work,
4375 				     struct k_poll_event *events,
4376 				     int num_events,
4377 				     k_timeout_t timeout);
4378 
4379 /**
4380  * @brief Cancel a triggered work item.
4381  *
4382  * This routine cancels the submission of triggered work item @a work.
4383  * A triggered work item can only be canceled if no event triggered work
4384  * submission.
4385  *
4386  * @funcprops \isr_ok
4387  *
4388  * @param work Address of delayed work item.
4389  *
4390  * @retval 0 Work item canceled.
4391  * @retval -EINVAL Work item is being processed or has completed its work.
4392  */
4393 extern int k_work_poll_cancel(struct k_work_poll *work);
4394 
4395 /** @} */
4396 
4397 /**
4398  * @defgroup msgq_apis Message Queue APIs
4399  * @ingroup kernel_apis
4400  * @{
4401  */
4402 
4403 /**
4404  * @brief Message Queue Structure
4405  */
4406 struct k_msgq {
4407 	/** Message queue wait queue */
4408 	_wait_q_t wait_q;
4409 	/** Lock */
4410 	struct k_spinlock lock;
4411 	/** Message size */
4412 	size_t msg_size;
4413 	/** Maximal number of messages */
4414 	uint32_t max_msgs;
4415 	/** Start of message buffer */
4416 	char *buffer_start;
4417 	/** End of message buffer */
4418 	char *buffer_end;
4419 	/** Read pointer */
4420 	char *read_ptr;
4421 	/** Write pointer */
4422 	char *write_ptr;
4423 	/** Number of used messages */
4424 	uint32_t used_msgs;
4425 
4426 	_POLL_EVENT;
4427 
4428 	/** Message queue */
4429 	uint8_t flags;
4430 
4431 	SYS_PORT_TRACING_TRACKING_FIELD(k_msgq)
4432 
4433 #ifdef CONFIG_OBJ_CORE_MSGQ
4434 	struct k_obj_core  obj_core;
4435 #endif
4436 };
4437 /**
4438  * @cond INTERNAL_HIDDEN
4439  */
4440 
4441 
4442 #define Z_MSGQ_INITIALIZER(obj, q_buffer, q_msg_size, q_max_msgs) \
4443 	{ \
4444 	.wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
4445 	.msg_size = q_msg_size, \
4446 	.max_msgs = q_max_msgs, \
4447 	.buffer_start = q_buffer, \
4448 	.buffer_end = q_buffer + (q_max_msgs * q_msg_size), \
4449 	.read_ptr = q_buffer, \
4450 	.write_ptr = q_buffer, \
4451 	.used_msgs = 0, \
4452 	_POLL_EVENT_OBJ_INIT(obj) \
4453 	}
4454 
4455 /**
4456  * INTERNAL_HIDDEN @endcond
4457  */
4458 
4459 
4460 #define K_MSGQ_FLAG_ALLOC	BIT(0)
4461 
4462 /**
4463  * @brief Message Queue Attributes
4464  */
4465 struct k_msgq_attrs {
4466 	/** Message Size */
4467 	size_t msg_size;
4468 	/** Maximal number of messages */
4469 	uint32_t max_msgs;
4470 	/** Used messages */
4471 	uint32_t used_msgs;
4472 };
4473 
4474 
4475 /**
4476  * @brief Statically define and initialize a message queue.
4477  *
4478  * The message queue's ring buffer contains space for @a q_max_msgs messages,
4479  * each of which is @a q_msg_size bytes long. Alignment of the message queue's
4480  * ring buffer is not necessary, setting @a q_align to 1 is sufficient.
4481  *
4482  * The message queue can be accessed outside the module where it is defined
4483  * using:
4484  *
4485  * @code extern struct k_msgq <name>; @endcode
4486  *
4487  * @param q_name Name of the message queue.
4488  * @param q_msg_size Message size (in bytes).
4489  * @param q_max_msgs Maximum number of messages that can be queued.
4490  * @param q_align Alignment of the message queue's ring buffer (power of 2).
4491  *
4492  */
4493 #define K_MSGQ_DEFINE(q_name, q_msg_size, q_max_msgs, q_align)		\
4494 	static char __noinit __aligned(q_align)				\
4495 		_k_fifo_buf_##q_name[(q_max_msgs) * (q_msg_size)];	\
4496 	STRUCT_SECTION_ITERABLE(k_msgq, q_name) =			\
4497 	       Z_MSGQ_INITIALIZER(q_name, _k_fifo_buf_##q_name,	\
4498 				  (q_msg_size), (q_max_msgs))
4499 
4500 /**
4501  * @brief Initialize a message queue.
4502  *
4503  * This routine initializes a message queue object, prior to its first use.
4504  *
4505  * The message queue's ring buffer must contain space for @a max_msgs messages,
4506  * each of which is @a msg_size bytes long. Alignment of the message queue's
4507  * ring buffer is not necessary.
4508  *
4509  * @param msgq Address of the message queue.
4510  * @param buffer Pointer to ring buffer that holds queued messages.
4511  * @param msg_size Message size (in bytes).
4512  * @param max_msgs Maximum number of messages that can be queued.
4513  */
4514 void k_msgq_init(struct k_msgq *msgq, char *buffer, size_t msg_size,
4515 		 uint32_t max_msgs);
4516 
4517 /**
4518  * @brief Initialize a message queue.
4519  *
4520  * This routine initializes a message queue object, prior to its first use,
4521  * allocating its internal ring buffer from the calling thread's resource
4522  * pool.
4523  *
4524  * Memory allocated for the ring buffer can be released by calling
4525  * k_msgq_cleanup(), or if userspace is enabled and the msgq object loses
4526  * all of its references.
4527  *
4528  * @param msgq Address of the message queue.
4529  * @param msg_size Message size (in bytes).
4530  * @param max_msgs Maximum number of messages that can be queued.
4531  *
4532  * @return 0 on success, -ENOMEM if there was insufficient memory in the
4533  *	thread's resource pool, or -EINVAL if the size parameters cause
4534  *	an integer overflow.
4535  */
4536 __syscall int k_msgq_alloc_init(struct k_msgq *msgq, size_t msg_size,
4537 				uint32_t max_msgs);
4538 
4539 /**
4540  * @brief Release allocated buffer for a queue
4541  *
4542  * Releases memory allocated for the ring buffer.
4543  *
4544  * @param msgq message queue to cleanup
4545  *
4546  * @retval 0 on success
4547  * @retval -EBUSY Queue not empty
4548  */
4549 int k_msgq_cleanup(struct k_msgq *msgq);
4550 
4551 /**
4552  * @brief Send a message to a message queue.
4553  *
4554  * This routine sends a message to message queue @a q.
4555  *
4556  * @note The message content is copied from @a data into @a msgq and the @a data
4557  * pointer is not retained, so the message content will not be modified
4558  * by this function.
4559  *
4560  * @funcprops \isr_ok
4561  *
4562  * @param msgq Address of the message queue.
4563  * @param data Pointer to the message.
4564  * @param timeout Non-negative waiting period to add the message,
4565  *                or one of the special values K_NO_WAIT and
4566  *                K_FOREVER.
4567  *
4568  * @retval 0 Message sent.
4569  * @retval -ENOMSG Returned without waiting or queue purged.
4570  * @retval -EAGAIN Waiting period timed out.
4571  */
4572 __syscall int k_msgq_put(struct k_msgq *msgq, const void *data, k_timeout_t timeout);
4573 
4574 /**
4575  * @brief Receive a message from a message queue.
4576  *
4577  * This routine receives a message from message queue @a q in a "first in,
4578  * first out" manner.
4579  *
4580  * @note @a timeout must be set to K_NO_WAIT if called from ISR.
4581  *
4582  * @funcprops \isr_ok
4583  *
4584  * @param msgq Address of the message queue.
4585  * @param data Address of area to hold the received message.
4586  * @param timeout Waiting period to receive the message,
4587  *                or one of the special values K_NO_WAIT and
4588  *                K_FOREVER.
4589  *
4590  * @retval 0 Message received.
4591  * @retval -ENOMSG Returned without waiting.
4592  * @retval -EAGAIN Waiting period timed out.
4593  */
4594 __syscall int k_msgq_get(struct k_msgq *msgq, void *data, k_timeout_t timeout);
4595 
4596 /**
4597  * @brief Peek/read a message from a message queue.
4598  *
4599  * This routine reads a message from message queue @a q in a "first in,
4600  * first out" manner and leaves the message in the queue.
4601  *
4602  * @funcprops \isr_ok
4603  *
4604  * @param msgq Address of the message queue.
4605  * @param data Address of area to hold the message read from the queue.
4606  *
4607  * @retval 0 Message read.
4608  * @retval -ENOMSG Returned when the queue has no message.
4609  */
4610 __syscall int k_msgq_peek(struct k_msgq *msgq, void *data);
4611 
4612 /**
4613  * @brief Peek/read a message from a message queue at the specified index
4614  *
4615  * This routine reads a message from message queue at the specified index
4616  * and leaves the message in the queue.
4617  * k_msgq_peek_at(msgq, data, 0) is equivalent to k_msgq_peek(msgq, data)
4618  *
4619  * @funcprops \isr_ok
4620  *
4621  * @param msgq Address of the message queue.
4622  * @param data Address of area to hold the message read from the queue.
4623  * @param idx Message queue index at which to peek
4624  *
4625  * @retval 0 Message read.
4626  * @retval -ENOMSG Returned when the queue has no message at index.
4627  */
4628 __syscall int k_msgq_peek_at(struct k_msgq *msgq, void *data, uint32_t idx);
4629 
4630 /**
4631  * @brief Purge a message queue.
4632  *
4633  * This routine discards all unreceived messages in a message queue's ring
4634  * buffer. Any threads that are blocked waiting to send a message to the
4635  * message queue are unblocked and see an -ENOMSG error code.
4636  *
4637  * @param msgq Address of the message queue.
4638  */
4639 __syscall void k_msgq_purge(struct k_msgq *msgq);
4640 
4641 /**
4642  * @brief Get the amount of free space in a message queue.
4643  *
4644  * This routine returns the number of unused entries in a message queue's
4645  * ring buffer.
4646  *
4647  * @param msgq Address of the message queue.
4648  *
4649  * @return Number of unused ring buffer entries.
4650  */
4651 __syscall uint32_t k_msgq_num_free_get(struct k_msgq *msgq);
4652 
4653 /**
4654  * @brief Get basic attributes of a message queue.
4655  *
4656  * This routine fetches basic attributes of message queue into attr argument.
4657  *
4658  * @param msgq Address of the message queue.
4659  * @param attrs pointer to message queue attribute structure.
4660  */
4661 __syscall void  k_msgq_get_attrs(struct k_msgq *msgq,
4662 				 struct k_msgq_attrs *attrs);
4663 
4664 
z_impl_k_msgq_num_free_get(struct k_msgq * msgq)4665 static inline uint32_t z_impl_k_msgq_num_free_get(struct k_msgq *msgq)
4666 {
4667 	return msgq->max_msgs - msgq->used_msgs;
4668 }
4669 
4670 /**
4671  * @brief Get the number of messages in a message queue.
4672  *
4673  * This routine returns the number of messages in a message queue's ring buffer.
4674  *
4675  * @param msgq Address of the message queue.
4676  *
4677  * @return Number of messages.
4678  */
4679 __syscall uint32_t k_msgq_num_used_get(struct k_msgq *msgq);
4680 
z_impl_k_msgq_num_used_get(struct k_msgq * msgq)4681 static inline uint32_t z_impl_k_msgq_num_used_get(struct k_msgq *msgq)
4682 {
4683 	return msgq->used_msgs;
4684 }
4685 
4686 /** @} */
4687 
4688 /**
4689  * @defgroup mailbox_apis Mailbox APIs
4690  * @ingroup kernel_apis
4691  * @{
4692  */
4693 
4694 /**
4695  * @brief Mailbox Message Structure
4696  *
4697  */
4698 struct k_mbox_msg {
4699 	/** internal use only - needed for legacy API support */
4700 	uint32_t _mailbox;
4701 	/** size of message (in bytes) */
4702 	size_t size;
4703 	/** application-defined information value */
4704 	uint32_t info;
4705 	/** sender's message data buffer */
4706 	void *tx_data;
4707 	/** source thread id */
4708 	k_tid_t rx_source_thread;
4709 	/** target thread id */
4710 	k_tid_t tx_target_thread;
4711 	/** internal use only - thread waiting on send (may be a dummy) */
4712 	k_tid_t _syncing_thread;
4713 #if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
4714 	/** internal use only - semaphore used during asynchronous send */
4715 	struct k_sem *_async_sem;
4716 #endif
4717 };
4718 /**
4719  * @brief Mailbox Structure
4720  *
4721  */
4722 struct k_mbox {
4723 	/** Transmit messages queue */
4724 	_wait_q_t tx_msg_queue;
4725 	/** Receive message queue */
4726 	_wait_q_t rx_msg_queue;
4727 	struct k_spinlock lock;
4728 
4729 	SYS_PORT_TRACING_TRACKING_FIELD(k_mbox)
4730 
4731 #ifdef CONFIG_OBJ_CORE_MAILBOX
4732 	struct k_obj_core  obj_core;
4733 #endif
4734 };
4735 /**
4736  * @cond INTERNAL_HIDDEN
4737  */
4738 
4739 #define Z_MBOX_INITIALIZER(obj) \
4740 	{ \
4741 	.tx_msg_queue = Z_WAIT_Q_INIT(&obj.tx_msg_queue), \
4742 	.rx_msg_queue = Z_WAIT_Q_INIT(&obj.rx_msg_queue), \
4743 	}
4744 
4745 /**
4746  * INTERNAL_HIDDEN @endcond
4747  */
4748 
4749 /**
4750  * @brief Statically define and initialize a mailbox.
4751  *
4752  * The mailbox is to be accessed outside the module where it is defined using:
4753  *
4754  * @code extern struct k_mbox <name>; @endcode
4755  *
4756  * @param name Name of the mailbox.
4757  */
4758 #define K_MBOX_DEFINE(name) \
4759 	STRUCT_SECTION_ITERABLE(k_mbox, name) = \
4760 		Z_MBOX_INITIALIZER(name) \
4761 
4762 /**
4763  * @brief Initialize a mailbox.
4764  *
4765  * This routine initializes a mailbox object, prior to its first use.
4766  *
4767  * @param mbox Address of the mailbox.
4768  */
4769 extern void k_mbox_init(struct k_mbox *mbox);
4770 
4771 /**
4772  * @brief Send a mailbox message in a synchronous manner.
4773  *
4774  * This routine sends a message to @a mbox and waits for a receiver to both
4775  * receive and process it. The message data may be in a buffer or non-existent
4776  * (i.e. an empty message).
4777  *
4778  * @param mbox Address of the mailbox.
4779  * @param tx_msg Address of the transmit message descriptor.
4780  * @param timeout Waiting period for the message to be received,
4781  *                or one of the special values K_NO_WAIT
4782  *                and K_FOREVER. Once the message has been received,
4783  *                this routine waits as long as necessary for the message
4784  *                to be completely processed.
4785  *
4786  * @retval 0 Message sent.
4787  * @retval -ENOMSG Returned without waiting.
4788  * @retval -EAGAIN Waiting period timed out.
4789  */
4790 extern int k_mbox_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
4791 		      k_timeout_t timeout);
4792 
4793 /**
4794  * @brief Send a mailbox message in an asynchronous manner.
4795  *
4796  * This routine sends a message to @a mbox without waiting for a receiver
4797  * to process it. The message data may be in a buffer or non-existent
4798  * (i.e. an empty message). Optionally, the semaphore @a sem will be given
4799  * when the message has been both received and completely processed by
4800  * the receiver.
4801  *
4802  * @param mbox Address of the mailbox.
4803  * @param tx_msg Address of the transmit message descriptor.
4804  * @param sem Address of a semaphore, or NULL if none is needed.
4805  */
4806 extern void k_mbox_async_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
4807 			     struct k_sem *sem);
4808 
4809 /**
4810  * @brief Receive a mailbox message.
4811  *
4812  * This routine receives a message from @a mbox, then optionally retrieves
4813  * its data and disposes of the message.
4814  *
4815  * @param mbox Address of the mailbox.
4816  * @param rx_msg Address of the receive message descriptor.
4817  * @param buffer Address of the buffer to receive data, or NULL to defer data
4818  *               retrieval and message disposal until later.
4819  * @param timeout Waiting period for a message to be received,
4820  *                or one of the special values K_NO_WAIT and K_FOREVER.
4821  *
4822  * @retval 0 Message received.
4823  * @retval -ENOMSG Returned without waiting.
4824  * @retval -EAGAIN Waiting period timed out.
4825  */
4826 extern int k_mbox_get(struct k_mbox *mbox, struct k_mbox_msg *rx_msg,
4827 		      void *buffer, k_timeout_t timeout);
4828 
4829 /**
4830  * @brief Retrieve mailbox message data into a buffer.
4831  *
4832  * This routine completes the processing of a received message by retrieving
4833  * its data into a buffer, then disposing of the message.
4834  *
4835  * Alternatively, this routine can be used to dispose of a received message
4836  * without retrieving its data.
4837  *
4838  * @param rx_msg Address of the receive message descriptor.
4839  * @param buffer Address of the buffer to receive data, or NULL to discard
4840  *               the data.
4841  */
4842 extern void k_mbox_data_get(struct k_mbox_msg *rx_msg, void *buffer);
4843 
4844 /** @} */
4845 
4846 /**
4847  * @defgroup pipe_apis Pipe APIs
4848  * @ingroup kernel_apis
4849  * @{
4850  */
4851 
4852 /** Pipe Structure */
4853 struct k_pipe {
4854 	unsigned char *buffer;          /**< Pipe buffer: may be NULL */
4855 	size_t         size;            /**< Buffer size */
4856 	size_t         bytes_used;      /**< # bytes used in buffer */
4857 	size_t         read_index;      /**< Where in buffer to read from */
4858 	size_t         write_index;     /**< Where in buffer to write */
4859 	struct k_spinlock lock;		/**< Synchronization lock */
4860 
4861 	struct {
4862 		_wait_q_t      readers; /**< Reader wait queue */
4863 		_wait_q_t      writers; /**< Writer wait queue */
4864 	} wait_q;			/** Wait queue */
4865 
4866 	_POLL_EVENT;
4867 
4868 	uint8_t	       flags;		/**< Flags */
4869 
4870 	SYS_PORT_TRACING_TRACKING_FIELD(k_pipe)
4871 
4872 #ifdef CONFIG_OBJ_CORE_PIPE
4873 	struct k_obj_core  obj_core;
4874 #endif
4875 };
4876 
4877 /**
4878  * @cond INTERNAL_HIDDEN
4879  */
4880 #define K_PIPE_FLAG_ALLOC	BIT(0)	/** Buffer was allocated */
4881 
4882 #define Z_PIPE_INITIALIZER(obj, pipe_buffer, pipe_buffer_size)     \
4883 	{                                                           \
4884 	.buffer = pipe_buffer,                                      \
4885 	.size = pipe_buffer_size,                                   \
4886 	.bytes_used = 0,                                            \
4887 	.read_index = 0,                                            \
4888 	.write_index = 0,                                           \
4889 	.lock = {},                                                 \
4890 	.wait_q = {                                                 \
4891 		.readers = Z_WAIT_Q_INIT(&obj.wait_q.readers),       \
4892 		.writers = Z_WAIT_Q_INIT(&obj.wait_q.writers)        \
4893 	},                                                          \
4894 	_POLL_EVENT_OBJ_INIT(obj)                                   \
4895 	.flags = 0,                                                 \
4896 	}
4897 
4898 /**
4899  * INTERNAL_HIDDEN @endcond
4900  */
4901 
4902 /**
4903  * @brief Statically define and initialize a pipe.
4904  *
4905  * The pipe can be accessed outside the module where it is defined using:
4906  *
4907  * @code extern struct k_pipe <name>; @endcode
4908  *
4909  * @param name Name of the pipe.
4910  * @param pipe_buffer_size Size of the pipe's ring buffer (in bytes),
4911  *                         or zero if no ring buffer is used.
4912  * @param pipe_align Alignment of the pipe's ring buffer (power of 2).
4913  *
4914  */
4915 #define K_PIPE_DEFINE(name, pipe_buffer_size, pipe_align)		\
4916 	static unsigned char __noinit __aligned(pipe_align)		\
4917 		_k_pipe_buf_##name[pipe_buffer_size];			\
4918 	STRUCT_SECTION_ITERABLE(k_pipe, name) =				\
4919 		Z_PIPE_INITIALIZER(name, _k_pipe_buf_##name, pipe_buffer_size)
4920 
4921 /**
4922  * @brief Initialize a pipe.
4923  *
4924  * This routine initializes a pipe object, prior to its first use.
4925  *
4926  * @param pipe Address of the pipe.
4927  * @param buffer Address of the pipe's ring buffer, or NULL if no ring buffer
4928  *               is used.
4929  * @param size Size of the pipe's ring buffer (in bytes), or zero if no ring
4930  *             buffer is used.
4931  */
4932 void k_pipe_init(struct k_pipe *pipe, unsigned char *buffer, size_t size);
4933 
4934 /**
4935  * @brief Release a pipe's allocated buffer
4936  *
4937  * If a pipe object was given a dynamically allocated buffer via
4938  * k_pipe_alloc_init(), this will free it. This function does nothing
4939  * if the buffer wasn't dynamically allocated.
4940  *
4941  * @param pipe Address of the pipe.
4942  * @retval 0 on success
4943  * @retval -EAGAIN nothing to cleanup
4944  */
4945 int k_pipe_cleanup(struct k_pipe *pipe);
4946 
4947 /**
4948  * @brief Initialize a pipe and allocate a buffer for it
4949  *
4950  * Storage for the buffer region will be allocated from the calling thread's
4951  * resource pool. This memory will be released if k_pipe_cleanup() is called,
4952  * or userspace is enabled and the pipe object loses all references to it.
4953  *
4954  * This function should only be called on uninitialized pipe objects.
4955  *
4956  * @param pipe Address of the pipe.
4957  * @param size Size of the pipe's ring buffer (in bytes), or zero if no ring
4958  *             buffer is used.
4959  * @retval 0 on success
4960  * @retval -ENOMEM if memory couldn't be allocated
4961  */
4962 __syscall int k_pipe_alloc_init(struct k_pipe *pipe, size_t size);
4963 
4964 /**
4965  * @brief Write data to a pipe.
4966  *
4967  * This routine writes up to @a bytes_to_write bytes of data to @a pipe.
4968  *
4969  * @param pipe Address of the pipe.
4970  * @param data Address of data to write.
4971  * @param bytes_to_write Size of data (in bytes).
4972  * @param bytes_written Address of area to hold the number of bytes written.
4973  * @param min_xfer Minimum number of bytes to write.
4974  * @param timeout Waiting period to wait for the data to be written,
4975  *                or one of the special values K_NO_WAIT and K_FOREVER.
4976  *
4977  * @retval 0 At least @a min_xfer bytes of data were written.
4978  * @retval -EIO Returned without waiting; zero data bytes were written.
4979  * @retval -EAGAIN Waiting period timed out; between zero and @a min_xfer
4980  *                 minus one data bytes were written.
4981  */
4982 __syscall int k_pipe_put(struct k_pipe *pipe, void *data,
4983 			 size_t bytes_to_write, size_t *bytes_written,
4984 			 size_t min_xfer, k_timeout_t timeout);
4985 
4986 /**
4987  * @brief Read data from a pipe.
4988  *
4989  * This routine reads up to @a bytes_to_read bytes of data from @a pipe.
4990  *
4991  * @param pipe Address of the pipe.
4992  * @param data Address to place the data read from pipe.
4993  * @param bytes_to_read Maximum number of data bytes to read.
4994  * @param bytes_read Address of area to hold the number of bytes read.
4995  * @param min_xfer Minimum number of data bytes to read.
4996  * @param timeout Waiting period to wait for the data to be read,
4997  *                or one of the special values K_NO_WAIT and K_FOREVER.
4998  *
4999  * @retval 0 At least @a min_xfer bytes of data were read.
5000  * @retval -EINVAL invalid parameters supplied
5001  * @retval -EIO Returned without waiting; zero data bytes were read.
5002  * @retval -EAGAIN Waiting period timed out; between zero and @a min_xfer
5003  *                 minus one data bytes were read.
5004  */
5005 __syscall int k_pipe_get(struct k_pipe *pipe, void *data,
5006 			 size_t bytes_to_read, size_t *bytes_read,
5007 			 size_t min_xfer, k_timeout_t timeout);
5008 
5009 /**
5010  * @brief Query the number of bytes that may be read from @a pipe.
5011  *
5012  * @param pipe Address of the pipe.
5013  *
5014  * @retval a number n such that 0 <= n <= @ref k_pipe.size; the
5015  *         result is zero for unbuffered pipes.
5016  */
5017 __syscall size_t k_pipe_read_avail(struct k_pipe *pipe);
5018 
5019 /**
5020  * @brief Query the number of bytes that may be written to @a pipe
5021  *
5022  * @param pipe Address of the pipe.
5023  *
5024  * @retval a number n such that 0 <= n <= @ref k_pipe.size; the
5025  *         result is zero for unbuffered pipes.
5026  */
5027 __syscall size_t k_pipe_write_avail(struct k_pipe *pipe);
5028 
5029 /**
5030  * @brief Flush the pipe of write data
5031  *
5032  * This routine flushes the pipe. Flushing the pipe is equivalent to reading
5033  * both all the data in the pipe's buffer and all the data waiting to go into
5034  * that pipe into a large temporary buffer and discarding the buffer. Any
5035  * writers that were previously pended become unpended.
5036  *
5037  * @param pipe Address of the pipe.
5038  */
5039 __syscall void k_pipe_flush(struct k_pipe *pipe);
5040 
5041 /**
5042  * @brief Flush the pipe's internal buffer
5043  *
5044  * This routine flushes the pipe's internal buffer. This is equivalent to
5045  * reading up to N bytes from the pipe (where N is the size of the pipe's
5046  * buffer) into a temporary buffer and then discarding that buffer. If there
5047  * were writers previously pending, then some may unpend as they try to fill
5048  * up the pipe's emptied buffer.
5049  *
5050  * @param pipe Address of the pipe.
5051  */
5052 __syscall void k_pipe_buffer_flush(struct k_pipe *pipe);
5053 
5054 /** @} */
5055 
5056 /**
5057  * @cond INTERNAL_HIDDEN
5058  */
5059 
5060 struct k_mem_slab_info {
5061 	uint32_t num_blocks;
5062 	size_t   block_size;
5063 	uint32_t num_used;
5064 #ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION
5065 	uint32_t max_used;
5066 #endif
5067 };
5068 
5069 struct k_mem_slab {
5070 	_wait_q_t wait_q;
5071 	struct k_spinlock lock;
5072 	char *buffer;
5073 	char *free_list;
5074 	struct k_mem_slab_info info;
5075 
5076 	SYS_PORT_TRACING_TRACKING_FIELD(k_mem_slab)
5077 
5078 #ifdef CONFIG_OBJ_CORE_MEM_SLAB
5079 	struct k_obj_core  obj_core;
5080 #endif
5081 };
5082 
5083 #define Z_MEM_SLAB_INITIALIZER(_slab, _slab_buffer, _slab_block_size, \
5084 			       _slab_num_blocks)                      \
5085 	{                                                             \
5086 	.wait_q = Z_WAIT_Q_INIT(&(_slab).wait_q),                     \
5087 	.lock = {},                                                   \
5088 	.buffer = _slab_buffer,                                       \
5089 	.free_list = NULL,                                            \
5090 	.info = {_slab_num_blocks, _slab_block_size, 0}               \
5091 	}
5092 
5093 
5094 /**
5095  * INTERNAL_HIDDEN @endcond
5096  */
5097 
5098 /**
5099  * @defgroup mem_slab_apis Memory Slab APIs
5100  * @ingroup kernel_apis
5101  * @{
5102  */
5103 
5104 /**
5105  * @brief Statically define and initialize a memory slab in a public (non-static) scope.
5106  *
5107  * The memory slab's buffer contains @a slab_num_blocks memory blocks
5108  * that are @a slab_block_size bytes long. The buffer is aligned to a
5109  * @a slab_align -byte boundary. To ensure that each memory block is similarly
5110  * aligned to this boundary, @a slab_block_size must also be a multiple of
5111  * @a slab_align.
5112  *
5113  * The memory slab can be accessed outside the module where it is defined
5114  * using:
5115  *
5116  * @code extern struct k_mem_slab <name>; @endcode
5117  *
5118  * @note This macro cannot be used together with a static keyword.
5119  *       If such a use-case is desired, use @ref K_MEM_SLAB_DEFINE_STATIC
5120  *       instead.
5121  *
5122  * @param name Name of the memory slab.
5123  * @param slab_block_size Size of each memory block (in bytes).
5124  * @param slab_num_blocks Number memory blocks.
5125  * @param slab_align Alignment of the memory slab's buffer (power of 2).
5126  */
5127 #define K_MEM_SLAB_DEFINE(name, slab_block_size, slab_num_blocks, slab_align) \
5128 	char __noinit_named(k_mem_slab_buf_##name) \
5129 	   __aligned(WB_UP(slab_align)) \
5130 	   _k_mem_slab_buf_##name[(slab_num_blocks) * WB_UP(slab_block_size)]; \
5131 	STRUCT_SECTION_ITERABLE(k_mem_slab, name) = \
5132 		Z_MEM_SLAB_INITIALIZER(name, _k_mem_slab_buf_##name, \
5133 					WB_UP(slab_block_size), slab_num_blocks)
5134 
5135 /**
5136  * @brief Statically define and initialize a memory slab in a private (static) scope.
5137  *
5138  * The memory slab's buffer contains @a slab_num_blocks memory blocks
5139  * that are @a slab_block_size bytes long. The buffer is aligned to a
5140  * @a slab_align -byte boundary. To ensure that each memory block is similarly
5141  * aligned to this boundary, @a slab_block_size must also be a multiple of
5142  * @a slab_align.
5143  *
5144  * @param name Name of the memory slab.
5145  * @param slab_block_size Size of each memory block (in bytes).
5146  * @param slab_num_blocks Number memory blocks.
5147  * @param slab_align Alignment of the memory slab's buffer (power of 2).
5148  */
5149 #define K_MEM_SLAB_DEFINE_STATIC(name, slab_block_size, slab_num_blocks, slab_align) \
5150 	static char __noinit_named(k_mem_slab_buf_##name) \
5151 	   __aligned(WB_UP(slab_align)) \
5152 	   _k_mem_slab_buf_##name[(slab_num_blocks) * WB_UP(slab_block_size)]; \
5153 	static STRUCT_SECTION_ITERABLE(k_mem_slab, name) = \
5154 		Z_MEM_SLAB_INITIALIZER(name, _k_mem_slab_buf_##name, \
5155 					WB_UP(slab_block_size), slab_num_blocks)
5156 
5157 /**
5158  * @brief Initialize a memory slab.
5159  *
5160  * Initializes a memory slab, prior to its first use.
5161  *
5162  * The memory slab's buffer contains @a slab_num_blocks memory blocks
5163  * that are @a slab_block_size bytes long. The buffer must be aligned to an
5164  * N-byte boundary matching a word boundary, where N is a power of 2
5165  * (i.e. 4 on 32-bit systems, 8, 16, ...).
5166  * To ensure that each memory block is similarly aligned to this boundary,
5167  * @a slab_block_size must also be a multiple of N.
5168  *
5169  * @param slab Address of the memory slab.
5170  * @param buffer Pointer to buffer used for the memory blocks.
5171  * @param block_size Size of each memory block (in bytes).
5172  * @param num_blocks Number of memory blocks.
5173  *
5174  * @retval 0 on success
5175  * @retval -EINVAL invalid data supplied
5176  *
5177  */
5178 extern int k_mem_slab_init(struct k_mem_slab *slab, void *buffer,
5179 			   size_t block_size, uint32_t num_blocks);
5180 
5181 /**
5182  * @brief Allocate memory from a memory slab.
5183  *
5184  * This routine allocates a memory block from a memory slab.
5185  *
5186  * @note @a timeout must be set to K_NO_WAIT if called from ISR.
5187  * @note When CONFIG_MULTITHREADING=n any @a timeout is treated as K_NO_WAIT.
5188  *
5189  * @funcprops \isr_ok
5190  *
5191  * @param slab Address of the memory slab.
5192  * @param mem Pointer to block address area.
5193  * @param timeout Non-negative waiting period to wait for operation to complete.
5194  *        Use K_NO_WAIT to return without waiting,
5195  *        or K_FOREVER to wait as long as necessary.
5196  *
5197  * @retval 0 Memory allocated. The block address area pointed at by @a mem
5198  *         is set to the starting address of the memory block.
5199  * @retval -ENOMEM Returned without waiting.
5200  * @retval -EAGAIN Waiting period timed out.
5201  * @retval -EINVAL Invalid data supplied
5202  */
5203 extern int k_mem_slab_alloc(struct k_mem_slab *slab, void **mem,
5204 			    k_timeout_t timeout);
5205 
5206 /**
5207  * @brief Free memory allocated from a memory slab.
5208  *
5209  * This routine releases a previously allocated memory block back to its
5210  * associated memory slab.
5211  *
5212  * @param slab Address of the memory slab.
5213  * @param mem Pointer to the memory block (as returned by k_mem_slab_alloc()).
5214  */
5215 extern void k_mem_slab_free(struct k_mem_slab *slab, void *mem);
5216 
5217 /**
5218  * @brief Get the number of used blocks in a memory slab.
5219  *
5220  * This routine gets the number of memory blocks that are currently
5221  * allocated in @a slab.
5222  *
5223  * @param slab Address of the memory slab.
5224  *
5225  * @return Number of allocated memory blocks.
5226  */
k_mem_slab_num_used_get(struct k_mem_slab * slab)5227 static inline uint32_t k_mem_slab_num_used_get(struct k_mem_slab *slab)
5228 {
5229 	return slab->info.num_used;
5230 }
5231 
5232 /**
5233  * @brief Get the number of maximum used blocks so far in a memory slab.
5234  *
5235  * This routine gets the maximum number of memory blocks that were
5236  * allocated in @a slab.
5237  *
5238  * @param slab Address of the memory slab.
5239  *
5240  * @return Maximum number of allocated memory blocks.
5241  */
k_mem_slab_max_used_get(struct k_mem_slab * slab)5242 static inline uint32_t k_mem_slab_max_used_get(struct k_mem_slab *slab)
5243 {
5244 #ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION
5245 	return slab->info.max_used;
5246 #else
5247 	ARG_UNUSED(slab);
5248 	return 0;
5249 #endif
5250 }
5251 
5252 /**
5253  * @brief Get the number of unused blocks in a memory slab.
5254  *
5255  * This routine gets the number of memory blocks that are currently
5256  * unallocated in @a slab.
5257  *
5258  * @param slab Address of the memory slab.
5259  *
5260  * @return Number of unallocated memory blocks.
5261  */
k_mem_slab_num_free_get(struct k_mem_slab * slab)5262 static inline uint32_t k_mem_slab_num_free_get(struct k_mem_slab *slab)
5263 {
5264 	return slab->info.num_blocks - slab->info.num_used;
5265 }
5266 
5267 /**
5268  * @brief Get the memory stats for a memory slab
5269  *
5270  * This routine gets the runtime memory usage stats for the slab @a slab.
5271  *
5272  * @param slab Address of the memory slab
5273  * @param stats Pointer to memory into which to copy memory usage statistics
5274  *
5275  * @retval 0 Success
5276  * @retval -EINVAL Any parameter points to NULL
5277  */
5278 
5279 int k_mem_slab_runtime_stats_get(struct k_mem_slab *slab, struct sys_memory_stats *stats);
5280 
5281 /**
5282  * @brief Reset the maximum memory usage for a slab
5283  *
5284  * This routine resets the maximum memory usage for the slab @a slab to its
5285  * current usage.
5286  *
5287  * @param slab Address of the memory slab
5288  *
5289  * @retval 0 Success
5290  * @retval -EINVAL Memory slab is NULL
5291  */
5292 int k_mem_slab_runtime_stats_reset_max(struct k_mem_slab *slab);
5293 
5294 /** @} */
5295 
5296 /**
5297  * @addtogroup heap_apis
5298  * @{
5299  */
5300 
5301 /* kernel synchronized heap struct */
5302 
5303 struct k_heap {
5304 	struct sys_heap heap;
5305 	_wait_q_t wait_q;
5306 	struct k_spinlock lock;
5307 };
5308 
5309 /**
5310  * @brief Initialize a k_heap
5311  *
5312  * This constructs a synchronized k_heap object over a memory region
5313  * specified by the user.  Note that while any alignment and size can
5314  * be passed as valid parameters, internal alignment restrictions
5315  * inside the inner sys_heap mean that not all bytes may be usable as
5316  * allocated memory.
5317  *
5318  * @param h Heap struct to initialize
5319  * @param mem Pointer to memory.
5320  * @param bytes Size of memory region, in bytes
5321  */
5322 void k_heap_init(struct k_heap *h, void *mem, size_t bytes);
5323 
5324 /** @brief Allocate aligned memory from a k_heap
5325  *
5326  * Behaves in all ways like k_heap_alloc(), except that the returned
5327  * memory (if available) will have a starting address in memory which
5328  * is a multiple of the specified power-of-two alignment value in
5329  * bytes.  The resulting memory can be returned to the heap using
5330  * k_heap_free().
5331  *
5332  * @note @a timeout must be set to K_NO_WAIT if called from ISR.
5333  * @note When CONFIG_MULTITHREADING=n any @a timeout is treated as K_NO_WAIT.
5334  *
5335  * @funcprops \isr_ok
5336  *
5337  * @param h Heap from which to allocate
5338  * @param align Alignment in bytes, must be a power of two
5339  * @param bytes Number of bytes requested
5340  * @param timeout How long to wait, or K_NO_WAIT
5341  * @return Pointer to memory the caller can now use
5342  */
5343 void *k_heap_aligned_alloc(struct k_heap *h, size_t align, size_t bytes,
5344 			k_timeout_t timeout);
5345 
5346 /**
5347  * @brief Allocate memory from a k_heap
5348  *
5349  * Allocates and returns a memory buffer from the memory region owned
5350  * by the heap.  If no memory is available immediately, the call will
5351  * block for the specified timeout (constructed via the standard
5352  * timeout API, or K_NO_WAIT or K_FOREVER) waiting for memory to be
5353  * freed.  If the allocation cannot be performed by the expiration of
5354  * the timeout, NULL will be returned.
5355  * Allocated memory is aligned on a multiple of pointer sizes.
5356  *
5357  * @note @a timeout must be set to K_NO_WAIT if called from ISR.
5358  * @note When CONFIG_MULTITHREADING=n any @a timeout is treated as K_NO_WAIT.
5359  *
5360  * @funcprops \isr_ok
5361  *
5362  * @param h Heap from which to allocate
5363  * @param bytes Desired size of block to allocate
5364  * @param timeout How long to wait, or K_NO_WAIT
5365  * @return A pointer to valid heap memory, or NULL
5366  */
5367 void *k_heap_alloc(struct k_heap *h, size_t bytes,
5368 				 k_timeout_t timeout);
5369 
5370 /**
5371  * @brief Free memory allocated by k_heap_alloc()
5372  *
5373  * Returns the specified memory block, which must have been returned
5374  * from k_heap_alloc(), to the heap for use by other callers.  Passing
5375  * a NULL block is legal, and has no effect.
5376  *
5377  * @param h Heap to which to return the memory
5378  * @param mem A valid memory block, or NULL
5379  */
5380 void k_heap_free(struct k_heap *h, void *mem);
5381 
5382 /* Hand-calculated minimum heap sizes needed to return a successful
5383  * 1-byte allocation.  See details in lib/os/heap.[ch]
5384  */
5385 #define Z_HEAP_MIN_SIZE (sizeof(void *) > 4 ? 56 : 44)
5386 
5387 /**
5388  * @brief Define a static k_heap in the specified linker section
5389  *
5390  * This macro defines and initializes a static memory region and
5391  * k_heap of the requested size in the specified linker section.
5392  * After kernel start, &name can be used as if k_heap_init() had
5393  * been called.
5394  *
5395  * Note that this macro enforces a minimum size on the memory region
5396  * to accommodate metadata requirements.  Very small heaps will be
5397  * padded to fit.
5398  *
5399  * @param name Symbol name for the struct k_heap object
5400  * @param bytes Size of memory region, in bytes
5401  * @param in_section __attribute__((section(name))
5402  */
5403 #define Z_HEAP_DEFINE_IN_SECT(name, bytes, in_section)		\
5404 	char in_section						\
5405 	     __aligned(8) /* CHUNK_UNIT */			\
5406 	     kheap_##name[MAX(bytes, Z_HEAP_MIN_SIZE)];		\
5407 	STRUCT_SECTION_ITERABLE(k_heap, name) = {		\
5408 		.heap = {					\
5409 			.init_mem = kheap_##name,		\
5410 			.init_bytes = MAX(bytes, Z_HEAP_MIN_SIZE), \
5411 		 },						\
5412 	}
5413 
5414 /**
5415  * @brief Define a static k_heap
5416  *
5417  * This macro defines and initializes a static memory region and
5418  * k_heap of the requested size.  After kernel start, &name can be
5419  * used as if k_heap_init() had been called.
5420  *
5421  * Note that this macro enforces a minimum size on the memory region
5422  * to accommodate metadata requirements.  Very small heaps will be
5423  * padded to fit.
5424  *
5425  * @param name Symbol name for the struct k_heap object
5426  * @param bytes Size of memory region, in bytes
5427  */
5428 #define K_HEAP_DEFINE(name, bytes)				\
5429 	Z_HEAP_DEFINE_IN_SECT(name, bytes,			\
5430 			      __noinit_named(kheap_buf_##name))
5431 
5432 /**
5433  * @brief Define a static k_heap in uncached memory
5434  *
5435  * This macro defines and initializes a static memory region and
5436  * k_heap of the requested size in uncached memory.  After kernel
5437  * start, &name can be used as if k_heap_init() had been called.
5438  *
5439  * Note that this macro enforces a minimum size on the memory region
5440  * to accommodate metadata requirements.  Very small heaps will be
5441  * padded to fit.
5442  *
5443  * @param name Symbol name for the struct k_heap object
5444  * @param bytes Size of memory region, in bytes
5445  */
5446 #define K_HEAP_DEFINE_NOCACHE(name, bytes)			\
5447 	Z_HEAP_DEFINE_IN_SECT(name, bytes, __nocache)
5448 
5449 /**
5450  * @}
5451  */
5452 
5453 /**
5454  * @defgroup heap_apis Heap APIs
5455  * @ingroup kernel_apis
5456  * @{
5457  */
5458 
5459 /**
5460  * @brief Allocate memory from the heap with a specified alignment.
5461  *
5462  * This routine provides semantics similar to aligned_alloc(); memory is
5463  * allocated from the heap with a specified alignment. However, one minor
5464  * difference is that k_aligned_alloc() accepts any non-zero @p size,
5465  * whereas aligned_alloc() only accepts a @p size that is an integral
5466  * multiple of @p align.
5467  *
5468  * Above, aligned_alloc() refers to:
5469  * C11 standard (ISO/IEC 9899:2011): 7.22.3.1
5470  * The aligned_alloc function (p: 347-348)
5471  *
5472  * @param align Alignment of memory requested (in bytes).
5473  * @param size Amount of memory requested (in bytes).
5474  *
5475  * @return Address of the allocated memory if successful; otherwise NULL.
5476  */
5477 extern void *k_aligned_alloc(size_t align, size_t size);
5478 
5479 /**
5480  * @brief Allocate memory from the heap.
5481  *
5482  * This routine provides traditional malloc() semantics. Memory is
5483  * allocated from the heap memory pool.
5484  * Allocated memory is aligned on a multiple of pointer sizes.
5485  *
5486  * @param size Amount of memory requested (in bytes).
5487  *
5488  * @return Address of the allocated memory if successful; otherwise NULL.
5489  */
5490 extern void *k_malloc(size_t size);
5491 
5492 /**
5493  * @brief Free memory allocated from heap.
5494  *
5495  * This routine provides traditional free() semantics. The memory being
5496  * returned must have been allocated from the heap memory pool.
5497  *
5498  * If @a ptr is NULL, no operation is performed.
5499  *
5500  * @param ptr Pointer to previously allocated memory.
5501  */
5502 extern void k_free(void *ptr);
5503 
5504 /**
5505  * @brief Allocate memory from heap, array style
5506  *
5507  * This routine provides traditional calloc() semantics. Memory is
5508  * allocated from the heap memory pool and zeroed.
5509  *
5510  * @param nmemb Number of elements in the requested array
5511  * @param size Size of each array element (in bytes).
5512  *
5513  * @return Address of the allocated memory if successful; otherwise NULL.
5514  */
5515 extern void *k_calloc(size_t nmemb, size_t size);
5516 
5517 /** @} */
5518 
5519 /* polling API - PRIVATE */
5520 
5521 #ifdef CONFIG_POLL
5522 #define _INIT_OBJ_POLL_EVENT(obj) do { (obj)->poll_event = NULL; } while (false)
5523 #else
5524 #define _INIT_OBJ_POLL_EVENT(obj) do { } while (false)
5525 #endif
5526 
5527 /* private - types bit positions */
5528 enum _poll_types_bits {
5529 	/* can be used to ignore an event */
5530 	_POLL_TYPE_IGNORE,
5531 
5532 	/* to be signaled by k_poll_signal_raise() */
5533 	_POLL_TYPE_SIGNAL,
5534 
5535 	/* semaphore availability */
5536 	_POLL_TYPE_SEM_AVAILABLE,
5537 
5538 	/* queue/FIFO/LIFO data availability */
5539 	_POLL_TYPE_DATA_AVAILABLE,
5540 
5541 	/* msgq data availability */
5542 	_POLL_TYPE_MSGQ_DATA_AVAILABLE,
5543 
5544 	/* pipe data availability */
5545 	_POLL_TYPE_PIPE_DATA_AVAILABLE,
5546 
5547 	_POLL_NUM_TYPES
5548 };
5549 
5550 #define Z_POLL_TYPE_BIT(type) (1U << ((type) - 1U))
5551 
5552 /* private - states bit positions */
5553 enum _poll_states_bits {
5554 	/* default state when creating event */
5555 	_POLL_STATE_NOT_READY,
5556 
5557 	/* signaled by k_poll_signal_raise() */
5558 	_POLL_STATE_SIGNALED,
5559 
5560 	/* semaphore is available */
5561 	_POLL_STATE_SEM_AVAILABLE,
5562 
5563 	/* data is available to read on queue/FIFO/LIFO */
5564 	_POLL_STATE_DATA_AVAILABLE,
5565 
5566 	/* queue/FIFO/LIFO wait was cancelled */
5567 	_POLL_STATE_CANCELLED,
5568 
5569 	/* data is available to read on a message queue */
5570 	_POLL_STATE_MSGQ_DATA_AVAILABLE,
5571 
5572 	/* data is available to read from a pipe */
5573 	_POLL_STATE_PIPE_DATA_AVAILABLE,
5574 
5575 	_POLL_NUM_STATES
5576 };
5577 
5578 #define Z_POLL_STATE_BIT(state) (1U << ((state) - 1U))
5579 
5580 #define _POLL_EVENT_NUM_UNUSED_BITS \
5581 	(32 - (0 \
5582 	       + 8 /* tag */ \
5583 	       + _POLL_NUM_TYPES \
5584 	       + _POLL_NUM_STATES \
5585 	       + 1 /* modes */ \
5586 	      ))
5587 
5588 /* end of polling API - PRIVATE */
5589 
5590 
5591 /**
5592  * @defgroup poll_apis Async polling APIs
5593  * @ingroup kernel_apis
5594  * @{
5595  */
5596 
5597 /* Public polling API */
5598 
5599 /* public - values for k_poll_event.type bitfield */
5600 #define K_POLL_TYPE_IGNORE 0
5601 #define K_POLL_TYPE_SIGNAL Z_POLL_TYPE_BIT(_POLL_TYPE_SIGNAL)
5602 #define K_POLL_TYPE_SEM_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_SEM_AVAILABLE)
5603 #define K_POLL_TYPE_DATA_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_DATA_AVAILABLE)
5604 #define K_POLL_TYPE_FIFO_DATA_AVAILABLE K_POLL_TYPE_DATA_AVAILABLE
5605 #define K_POLL_TYPE_MSGQ_DATA_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_MSGQ_DATA_AVAILABLE)
5606 #define K_POLL_TYPE_PIPE_DATA_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_PIPE_DATA_AVAILABLE)
5607 
5608 /* public - polling modes */
5609 enum k_poll_modes {
5610 	/* polling thread does not take ownership of objects when available */
5611 	K_POLL_MODE_NOTIFY_ONLY = 0,
5612 
5613 	K_POLL_NUM_MODES
5614 };
5615 
5616 /* public - values for k_poll_event.state bitfield */
5617 #define K_POLL_STATE_NOT_READY 0
5618 #define K_POLL_STATE_SIGNALED Z_POLL_STATE_BIT(_POLL_STATE_SIGNALED)
5619 #define K_POLL_STATE_SEM_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_SEM_AVAILABLE)
5620 #define K_POLL_STATE_DATA_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_DATA_AVAILABLE)
5621 #define K_POLL_STATE_FIFO_DATA_AVAILABLE K_POLL_STATE_DATA_AVAILABLE
5622 #define K_POLL_STATE_MSGQ_DATA_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_MSGQ_DATA_AVAILABLE)
5623 #define K_POLL_STATE_PIPE_DATA_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_PIPE_DATA_AVAILABLE)
5624 #define K_POLL_STATE_CANCELLED Z_POLL_STATE_BIT(_POLL_STATE_CANCELLED)
5625 
5626 /* public - poll signal object */
5627 struct k_poll_signal {
5628 	/** PRIVATE - DO NOT TOUCH */
5629 	sys_dlist_t poll_events;
5630 
5631 	/**
5632 	 * 1 if the event has been signaled, 0 otherwise. Stays set to 1 until
5633 	 * user resets it to 0.
5634 	 */
5635 	unsigned int signaled;
5636 
5637 	/** custom result value passed to k_poll_signal_raise() if needed */
5638 	int result;
5639 };
5640 
5641 #define K_POLL_SIGNAL_INITIALIZER(obj) \
5642 	{ \
5643 	.poll_events = SYS_DLIST_STATIC_INIT(&obj.poll_events), \
5644 	.signaled = 0, \
5645 	.result = 0, \
5646 	}
5647 /**
5648  * @brief Poll Event
5649  *
5650  */
5651 struct k_poll_event {
5652 	/** PRIVATE - DO NOT TOUCH */
5653 	sys_dnode_t _node;
5654 
5655 	/** PRIVATE - DO NOT TOUCH */
5656 	struct z_poller *poller;
5657 
5658 	/** optional user-specified tag, opaque, untouched by the API */
5659 	uint32_t tag:8;
5660 
5661 	/** bitfield of event types (bitwise-ORed K_POLL_TYPE_xxx values) */
5662 	uint32_t type:_POLL_NUM_TYPES;
5663 
5664 	/** bitfield of event states (bitwise-ORed K_POLL_STATE_xxx values) */
5665 	uint32_t state:_POLL_NUM_STATES;
5666 
5667 	/** mode of operation, from enum k_poll_modes */
5668 	uint32_t mode:1;
5669 
5670 	/** unused bits in 32-bit word */
5671 	uint32_t unused:_POLL_EVENT_NUM_UNUSED_BITS;
5672 
5673 	/** per-type data */
5674 	union {
5675 		void *obj;
5676 		struct k_poll_signal *signal;
5677 		struct k_sem *sem;
5678 		struct k_fifo *fifo;
5679 		struct k_queue *queue;
5680 		struct k_msgq *msgq;
5681 #ifdef CONFIG_PIPES
5682 		struct k_pipe *pipe;
5683 #endif
5684 	};
5685 };
5686 
5687 #define K_POLL_EVENT_INITIALIZER(_event_type, _event_mode, _event_obj) \
5688 	{ \
5689 	.poller = NULL, \
5690 	.type = _event_type, \
5691 	.state = K_POLL_STATE_NOT_READY, \
5692 	.mode = _event_mode, \
5693 	.unused = 0, \
5694 	{ \
5695 		.obj = _event_obj, \
5696 	}, \
5697 	}
5698 
5699 #define K_POLL_EVENT_STATIC_INITIALIZER(_event_type, _event_mode, _event_obj, \
5700 					event_tag) \
5701 	{ \
5702 	.tag = event_tag, \
5703 	.type = _event_type, \
5704 	.state = K_POLL_STATE_NOT_READY, \
5705 	.mode = _event_mode, \
5706 	.unused = 0, \
5707 	{ \
5708 		.obj = _event_obj, \
5709 	}, \
5710 	}
5711 
5712 /**
5713  * @brief Initialize one struct k_poll_event instance
5714  *
5715  * After this routine is called on a poll event, the event it ready to be
5716  * placed in an event array to be passed to k_poll().
5717  *
5718  * @param event The event to initialize.
5719  * @param type A bitfield of the types of event, from the K_POLL_TYPE_xxx
5720  *             values. Only values that apply to the same object being polled
5721  *             can be used together. Choosing K_POLL_TYPE_IGNORE disables the
5722  *             event.
5723  * @param mode Future. Use K_POLL_MODE_NOTIFY_ONLY.
5724  * @param obj Kernel object or poll signal.
5725  */
5726 
5727 extern void k_poll_event_init(struct k_poll_event *event, uint32_t type,
5728 			      int mode, void *obj);
5729 
5730 /**
5731  * @brief Wait for one or many of multiple poll events to occur
5732  *
5733  * This routine allows a thread to wait concurrently for one or many of
5734  * multiple poll events to have occurred. Such events can be a kernel object
5735  * being available, like a semaphore, or a poll signal event.
5736  *
5737  * When an event notifies that a kernel object is available, the kernel object
5738  * is not "given" to the thread calling k_poll(): it merely signals the fact
5739  * that the object was available when the k_poll() call was in effect. Also,
5740  * all threads trying to acquire an object the regular way, i.e. by pending on
5741  * the object, have precedence over the thread polling on the object. This
5742  * means that the polling thread will never get the poll event on an object
5743  * until the object becomes available and its pend queue is empty. For this
5744  * reason, the k_poll() call is more effective when the objects being polled
5745  * only have one thread, the polling thread, trying to acquire them.
5746  *
5747  * When k_poll() returns 0, the caller should loop on all the events that were
5748  * passed to k_poll() and check the state field for the values that were
5749  * expected and take the associated actions.
5750  *
5751  * Before being reused for another call to k_poll(), the user has to reset the
5752  * state field to K_POLL_STATE_NOT_READY.
5753  *
5754  * When called from user mode, a temporary memory allocation is required from
5755  * the caller's resource pool.
5756  *
5757  * @param events An array of events to be polled for.
5758  * @param num_events The number of events in the array.
5759  * @param timeout Waiting period for an event to be ready,
5760  *                or one of the special values K_NO_WAIT and K_FOREVER.
5761  *
5762  * @retval 0 One or more events are ready.
5763  * @retval -EAGAIN Waiting period timed out.
5764  * @retval -EINTR Polling has been interrupted, e.g. with
5765  *         k_queue_cancel_wait(). All output events are still set and valid,
5766  *         cancelled event(s) will be set to K_POLL_STATE_CANCELLED. In other
5767  *         words, -EINTR status means that at least one of output events is
5768  *         K_POLL_STATE_CANCELLED.
5769  * @retval -ENOMEM Thread resource pool insufficient memory (user mode only)
5770  * @retval -EINVAL Bad parameters (user mode only)
5771  */
5772 
5773 __syscall int k_poll(struct k_poll_event *events, int num_events,
5774 		     k_timeout_t timeout);
5775 
5776 /**
5777  * @brief Initialize a poll signal object.
5778  *
5779  * Ready a poll signal object to be signaled via k_poll_signal_raise().
5780  *
5781  * @param sig A poll signal.
5782  */
5783 
5784 __syscall void k_poll_signal_init(struct k_poll_signal *sig);
5785 
5786 /*
5787  * @brief Reset a poll signal object's state to unsignaled.
5788  *
5789  * @param sig A poll signal object
5790  */
5791 __syscall void k_poll_signal_reset(struct k_poll_signal *sig);
5792 
5793 /**
5794  * @brief Fetch the signaled state and result value of a poll signal
5795  *
5796  * @param sig A poll signal object
5797  * @param signaled An integer buffer which will be written nonzero if the
5798  *		   object was signaled
5799  * @param result An integer destination buffer which will be written with the
5800  *		   result value if the object was signaled, or an undefined
5801  *		   value if it was not.
5802  */
5803 __syscall void k_poll_signal_check(struct k_poll_signal *sig,
5804 				   unsigned int *signaled, int *result);
5805 
5806 /**
5807  * @brief Signal a poll signal object.
5808  *
5809  * This routine makes ready a poll signal, which is basically a poll event of
5810  * type K_POLL_TYPE_SIGNAL. If a thread was polling on that event, it will be
5811  * made ready to run. A @a result value can be specified.
5812  *
5813  * The poll signal contains a 'signaled' field that, when set by
5814  * k_poll_signal_raise(), stays set until the user sets it back to 0 with
5815  * k_poll_signal_reset(). It thus has to be reset by the user before being
5816  * passed again to k_poll() or k_poll() will consider it being signaled, and
5817  * will return immediately.
5818  *
5819  * @note The result is stored and the 'signaled' field is set even if
5820  * this function returns an error indicating that an expiring poll was
5821  * not notified.  The next k_poll() will detect the missed raise.
5822  *
5823  * @param sig A poll signal.
5824  * @param result The value to store in the result field of the signal.
5825  *
5826  * @retval 0 The signal was delivered successfully.
5827  * @retval -EAGAIN The polling thread's timeout is in the process of expiring.
5828  */
5829 
5830 __syscall int k_poll_signal_raise(struct k_poll_signal *sig, int result);
5831 
5832 /** @} */
5833 
5834 /**
5835  * @defgroup cpu_idle_apis CPU Idling APIs
5836  * @ingroup kernel_apis
5837  * @{
5838  */
5839 /**
5840  * @brief Make the CPU idle.
5841  *
5842  * This function makes the CPU idle until an event wakes it up.
5843  *
5844  * In a regular system, the idle thread should be the only thread responsible
5845  * for making the CPU idle and triggering any type of power management.
5846  * However, in some more constrained systems, such as a single-threaded system,
5847  * the only thread would be responsible for this if needed.
5848  *
5849  * @note In some architectures, before returning, the function unmasks interrupts
5850  * unconditionally.
5851  */
k_cpu_idle(void)5852 static inline void k_cpu_idle(void)
5853 {
5854 	arch_cpu_idle();
5855 }
5856 
5857 /**
5858  * @brief Make the CPU idle in an atomic fashion.
5859  *
5860  * Similar to k_cpu_idle(), but must be called with interrupts locked.
5861  *
5862  * Enabling interrupts and entering a low-power mode will be atomic,
5863  * i.e. there will be no period of time where interrupts are enabled before
5864  * the processor enters a low-power mode.
5865  *
5866  * After waking up from the low-power mode, the interrupt lockout state will
5867  * be restored as if by irq_unlock(key).
5868  *
5869  * @param key Interrupt locking key obtained from irq_lock().
5870  */
k_cpu_atomic_idle(unsigned int key)5871 static inline void k_cpu_atomic_idle(unsigned int key)
5872 {
5873 	arch_cpu_atomic_idle(key);
5874 }
5875 
5876 /**
5877  * @}
5878  */
5879 
5880 /**
5881  * @cond INTERNAL_HIDDEN
5882  * @internal
5883  */
5884 #ifdef ARCH_EXCEPT
5885 /* This architecture has direct support for triggering a CPU exception */
5886 #define z_except_reason(reason)	ARCH_EXCEPT(reason)
5887 #else
5888 
5889 #if !defined(CONFIG_ASSERT_NO_FILE_INFO)
5890 #define __EXCEPT_LOC() __ASSERT_PRINT("@ %s:%d\n", __FILE__, __LINE__)
5891 #else
5892 #define __EXCEPT_LOC()
5893 #endif
5894 
5895 /* NOTE: This is the implementation for arches that do not implement
5896  * ARCH_EXCEPT() to generate a real CPU exception.
5897  *
5898  * We won't have a real exception frame to determine the PC value when
5899  * the oops occurred, so print file and line number before we jump into
5900  * the fatal error handler.
5901  */
5902 #define z_except_reason(reason) do { \
5903 		__EXCEPT_LOC();              \
5904 		z_fatal_error(reason, NULL); \
5905 	} while (false)
5906 
5907 #endif /* _ARCH__EXCEPT */
5908 /**
5909  * INTERNAL_HIDDEN @endcond
5910  */
5911 
5912 /**
5913  * @brief Fatally terminate a thread
5914  *
5915  * This should be called when a thread has encountered an unrecoverable
5916  * runtime condition and needs to terminate. What this ultimately
5917  * means is determined by the _fatal_error_handler() implementation, which
5918  * will be called will reason code K_ERR_KERNEL_OOPS.
5919  *
5920  * If this is called from ISR context, the default system fatal error handler
5921  * will treat it as an unrecoverable system error, just like k_panic().
5922  */
5923 #define k_oops()	z_except_reason(K_ERR_KERNEL_OOPS)
5924 
5925 /**
5926  * @brief Fatally terminate the system
5927  *
5928  * This should be called when the Zephyr kernel has encountered an
5929  * unrecoverable runtime condition and needs to terminate. What this ultimately
5930  * means is determined by the _fatal_error_handler() implementation, which
5931  * will be called will reason code K_ERR_KERNEL_PANIC.
5932  */
5933 #define k_panic()	z_except_reason(K_ERR_KERNEL_PANIC)
5934 
5935 /**
5936  * @cond INTERNAL_HIDDEN
5937  */
5938 
5939 /*
5940  * private APIs that are utilized by one or more public APIs
5941  */
5942 
5943 /**
5944  * @internal
5945  */
5946 #ifdef CONFIG_MULTITHREADING
5947 /**
5948  * @internal
5949  */
5950 extern void z_init_static_threads(void);
5951 #else
5952 /**
5953  * @internal
5954  */
5955 #define z_init_static_threads() do { } while (false)
5956 #endif
5957 
5958 /**
5959  * @internal
5960  */
5961 extern void z_timer_expiration_handler(struct _timeout *t);
5962 /**
5963  * INTERNAL_HIDDEN @endcond
5964  */
5965 
5966 #ifdef CONFIG_PRINTK
5967 /**
5968  * @brief Emit a character buffer to the console device
5969  *
5970  * @param c String of characters to print
5971  * @param n The length of the string
5972  *
5973  */
5974 __syscall void k_str_out(char *c, size_t n);
5975 #endif
5976 
5977 /**
5978  * @brief Disable preservation of floating point context information.
5979  *
5980  * This routine informs the kernel that the specified thread
5981  * will no longer be using the floating point registers.
5982  *
5983  * @warning
5984  * Some architectures apply restrictions on how the disabling of floating
5985  * point preservation may be requested, see arch_float_disable.
5986  *
5987  * @warning
5988  * This routine should only be used to disable floating point support for
5989  * a thread that currently has such support enabled.
5990  *
5991  * @param thread ID of thread.
5992  *
5993  * @retval 0        On success.
5994  * @retval -ENOTSUP If the floating point disabling is not implemented.
5995  *         -EINVAL  If the floating point disabling could not be performed.
5996  */
5997 __syscall int k_float_disable(struct k_thread *thread);
5998 
5999 /**
6000  * @brief Enable preservation of floating point context information.
6001  *
6002  * This routine informs the kernel that the specified thread
6003  * will use the floating point registers.
6004 
6005  * Invoking this routine initializes the thread's floating point context info
6006  * to that of an FPU that has been reset. The next time the thread is scheduled
6007  * by z_swap() it will either inherit an FPU that is guaranteed to be in a
6008  * "sane" state (if the most recent user of the FPU was cooperatively swapped
6009  * out) or the thread's own floating point context will be loaded (if the most
6010  * recent user of the FPU was preempted, or if this thread is the first user
6011  * of the FPU). Thereafter, the kernel will protect the thread's FP context
6012  * so that it is not altered during a preemptive context switch.
6013  *
6014  * The @a options parameter indicates which floating point register sets will
6015  * be used by the specified thread.
6016  *
6017  * For x86 options:
6018  *
6019  * - K_FP_REGS  indicates x87 FPU and MMX registers only
6020  * - K_SSE_REGS indicates SSE registers (and also x87 FPU and MMX registers)
6021  *
6022  * @warning
6023  * Some architectures apply restrictions on how the enabling of floating
6024  * point preservation may be requested, see arch_float_enable.
6025  *
6026  * @warning
6027  * This routine should only be used to enable floating point support for
6028  * a thread that currently has such support enabled.
6029  *
6030  * @param thread  ID of thread.
6031  * @param options architecture dependent options
6032  *
6033  * @retval 0        On success.
6034  * @retval -ENOTSUP If the floating point enabling is not implemented.
6035  *         -EINVAL  If the floating point enabling could not be performed.
6036  */
6037 __syscall int k_float_enable(struct k_thread *thread, unsigned int options);
6038 
6039 /**
6040  * @brief Get the runtime statistics of a thread
6041  *
6042  * @param thread ID of thread.
6043  * @param stats Pointer to struct to copy statistics into.
6044  * @return -EINVAL if null pointers, otherwise 0
6045  */
6046 int k_thread_runtime_stats_get(k_tid_t thread,
6047 			       k_thread_runtime_stats_t *stats);
6048 
6049 /**
6050  * @brief Get the runtime statistics of all threads
6051  *
6052  * @param stats Pointer to struct to copy statistics into.
6053  * @return -EINVAL if null pointers, otherwise 0
6054  */
6055 int k_thread_runtime_stats_all_get(k_thread_runtime_stats_t *stats);
6056 
6057 /**
6058  * @brief Enable gathering of runtime statistics for specified thread
6059  *
6060  * This routine enables the gathering of runtime statistics for the specified
6061  * thread.
6062  *
6063  * @param thread ID of thread
6064  * @return -EINVAL if invalid thread ID, otherwise 0
6065  */
6066 extern int k_thread_runtime_stats_enable(k_tid_t thread);
6067 
6068 /**
6069  * @brief Disable gathering of runtime statistics for specified thread
6070  *
6071  * This routine disables the gathering of runtime statistics for the specified
6072  * thread.
6073  *
6074  * @param thread ID of thread
6075  * @return -EINVAL if invalid thread ID, otherwise 0
6076  */
6077 extern int k_thread_runtime_stats_disable(k_tid_t thread);
6078 
6079 /**
6080  * @brief Enable gathering of system runtime statistics
6081  *
6082  * This routine enables the gathering of system runtime statistics. Note that
6083  * it does not affect the gathering of similar statistics for individual
6084  * threads.
6085  */
6086 extern void k_sys_runtime_stats_enable(void);
6087 
6088 /**
6089  * @brief Disable gathering of system runtime statistics
6090  *
6091  * This routine disables the gathering of system runtime statistics. Note that
6092  * it does not affect the gathering of similar statistics for individual
6093  * threads.
6094  */
6095 extern void k_sys_runtime_stats_disable(void);
6096 
6097 #ifdef __cplusplus
6098 }
6099 #endif
6100 
6101 #include <zephyr/tracing/tracing.h>
6102 #include <syscalls/kernel.h>
6103 
6104 #endif /* !_ASMLANGUAGE */
6105 
6106 #endif /* ZEPHYR_INCLUDE_KERNEL_H_ */
6107