1 /*
2  * Copyright (c) 2016, Wind River Systems, Inc.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 /**
8  * @file
9  *
10  * @brief Public kernel APIs.
11  */
12 
13 #ifndef ZEPHYR_INCLUDE_KERNEL_H_
14 #define ZEPHYR_INCLUDE_KERNEL_H_
15 
16 #if !defined(_ASMLANGUAGE)
17 #include <zephyr/kernel_includes.h>
18 #include <errno.h>
19 #include <limits.h>
20 #include <stdbool.h>
21 #include <zephyr/toolchain.h>
22 #include <zephyr/tracing/tracing_macros.h>
23 #include <zephyr/sys/mem_stats.h>
24 #include <zephyr/sys/iterable_sections.h>
25 #include <zephyr/sys/ring_buffer.h>
26 
27 #ifdef __cplusplus
28 extern "C" {
29 #endif
30 
31 /*
32  * Zephyr currently assumes the size of a couple standard types to simplify
33  * print string formats. Let's make sure this doesn't change without notice.
34  */
35 BUILD_ASSERT(sizeof(int32_t) == sizeof(int));
36 BUILD_ASSERT(sizeof(int64_t) == sizeof(long long));
37 BUILD_ASSERT(sizeof(intptr_t) == sizeof(long));
38 
39 /**
40  * @brief Kernel APIs
41  * @defgroup kernel_apis Kernel APIs
42  * @since 1.0
43  * @version 1.0.0
44  * @{
45  * @}
46  */
47 
48 #define K_ANY NULL
49 
50 #if (CONFIG_NUM_COOP_PRIORITIES + CONFIG_NUM_PREEMPT_PRIORITIES) == 0
51 #error Zero available thread priorities defined!
52 #endif
53 
54 #define K_PRIO_COOP(x) (-(CONFIG_NUM_COOP_PRIORITIES - (x)))
55 #define K_PRIO_PREEMPT(x) (x)
56 
57 #define K_HIGHEST_THREAD_PRIO (-CONFIG_NUM_COOP_PRIORITIES)
58 #define K_LOWEST_THREAD_PRIO CONFIG_NUM_PREEMPT_PRIORITIES
59 #define K_IDLE_PRIO K_LOWEST_THREAD_PRIO
60 #define K_HIGHEST_APPLICATION_THREAD_PRIO (K_HIGHEST_THREAD_PRIO)
61 #define K_LOWEST_APPLICATION_THREAD_PRIO (K_LOWEST_THREAD_PRIO - 1)
62 
63 #ifdef CONFIG_POLL
64 #define Z_POLL_EVENT_OBJ_INIT(obj) \
65 	.poll_events = SYS_DLIST_STATIC_INIT(&obj.poll_events),
66 #define Z_DECL_POLL_EVENT sys_dlist_t poll_events;
67 #else
68 #define Z_POLL_EVENT_OBJ_INIT(obj)
69 #define Z_DECL_POLL_EVENT
70 #endif
71 
72 struct k_thread;
73 struct k_mutex;
74 struct k_sem;
75 struct k_msgq;
76 struct k_mbox;
77 struct k_pipe;
78 struct k_queue;
79 struct k_fifo;
80 struct k_lifo;
81 struct k_stack;
82 struct k_mem_slab;
83 struct k_timer;
84 struct k_poll_event;
85 struct k_poll_signal;
86 struct k_mem_domain;
87 struct k_mem_partition;
88 struct k_futex;
89 struct k_event;
90 
91 enum execution_context_types {
92 	K_ISR = 0,
93 	K_COOP_THREAD,
94 	K_PREEMPT_THREAD,
95 };
96 
97 /* private, used by k_poll and k_work_poll */
98 struct k_work_poll;
99 typedef int (*_poller_cb_t)(struct k_poll_event *event, uint32_t state);
100 
101 /**
102  * @addtogroup thread_apis
103  * @{
104  */
105 
106 typedef void (*k_thread_user_cb_t)(const struct k_thread *thread,
107 				   void *user_data);
108 
109 /**
110  * @brief Iterate over all the threads in the system.
111  *
112  * This routine iterates over all the threads in the system and
113  * calls the user_cb function for each thread.
114  *
115  * @param user_cb Pointer to the user callback function.
116  * @param user_data Pointer to user data.
117  *
118  * @note @kconfig{CONFIG_THREAD_MONITOR} must be set for this function
119  * to be effective.
120  * @note This API uses @ref k_spin_lock to protect the _kernel.threads
121  * list which means creation of new threads and terminations of existing
122  * threads are blocked until this API returns.
123  */
124 void k_thread_foreach(k_thread_user_cb_t user_cb, void *user_data);
125 
126 /**
127  * @brief Iterate over all the threads in running on specified cpu.
128  *
129  * This function is does otherwise the same thing as k_thread_foreach(),
130  * but it only loops through the threads running on specified cpu only.
131  * If CONFIG_SMP is not defined the implementation this is the same as
132  * k_thread_foreach(), with an assert cpu == 0.
133  *
134  * @param cpu The filtered cpu number
135  * @param user_cb Pointer to the user callback function.
136  * @param user_data Pointer to user data.
137  *
138  * @note @kconfig{CONFIG_THREAD_MONITOR} must be set for this function
139  * to be effective.
140  * @note This API uses @ref k_spin_lock to protect the _kernel.threads
141  * list which means creation of new threads and terminations of existing
142  * threads are blocked until this API returns.
143  */
144 #ifdef CONFIG_SMP
145 void k_thread_foreach_filter_by_cpu(unsigned int cpu,
146 				    k_thread_user_cb_t user_cb, void *user_data);
147 #else
148 static inline
k_thread_foreach_filter_by_cpu(unsigned int cpu,k_thread_user_cb_t user_cb,void * user_data)149 void k_thread_foreach_filter_by_cpu(unsigned int cpu,
150 				    k_thread_user_cb_t user_cb, void *user_data)
151 {
152 	__ASSERT(cpu == 0, "cpu filter out of bounds");
153 	ARG_UNUSED(cpu);
154 	k_thread_foreach(user_cb, user_data);
155 }
156 #endif
157 
158 /**
159  * @brief Iterate over all the threads in the system without locking.
160  *
161  * This routine works exactly the same like @ref k_thread_foreach
162  * but unlocks interrupts when user_cb is executed.
163  *
164  * @param user_cb Pointer to the user callback function.
165  * @param user_data Pointer to user data.
166  *
167  * @note @kconfig{CONFIG_THREAD_MONITOR} must be set for this function
168  * to be effective.
169  * @note This API uses @ref k_spin_lock only when accessing the _kernel.threads
170  * queue elements. It unlocks it during user callback function processing.
171  * If a new task is created when this @c foreach function is in progress,
172  * the added new task would not be included in the enumeration.
173  * If a task is aborted during this enumeration, there would be a race here
174  * and there is a possibility that this aborted task would be included in the
175  * enumeration.
176  * @note If the task is aborted and the memory occupied by its @c k_thread
177  * structure is reused when this @c k_thread_foreach_unlocked is in progress
178  * it might even lead to the system behave unstable.
179  * This function may never return, as it would follow some @c next task
180  * pointers treating given pointer as a pointer to the k_thread structure
181  * while it is something different right now.
182  * Do not reuse the memory that was occupied by k_thread structure of aborted
183  * task if it was aborted after this function was called in any context.
184  */
185 void k_thread_foreach_unlocked(
186 	k_thread_user_cb_t user_cb, void *user_data);
187 
188 /**
189  * @brief Iterate over the threads in running on current cpu without locking.
190  *
191  * This function does otherwise the same thing as
192  * k_thread_foreach_unlocked(), but it only loops through the threads
193  * running on specified cpu. If CONFIG_SMP is not defined the
194  * implementation this is the same as k_thread_foreach_unlocked(), with an
195  * assert requiring cpu == 0.
196  *
197  * @param cpu The filtered cpu number
198  * @param user_cb Pointer to the user callback function.
199  * @param user_data Pointer to user data.
200  *
201  * @note @kconfig{CONFIG_THREAD_MONITOR} must be set for this function
202  * to be effective.
203  * @note This API uses @ref k_spin_lock only when accessing the _kernel.threads
204  * queue elements. It unlocks it during user callback function processing.
205  * If a new task is created when this @c foreach function is in progress,
206  * the added new task would not be included in the enumeration.
207  * If a task is aborted during this enumeration, there would be a race here
208  * and there is a possibility that this aborted task would be included in the
209  * enumeration.
210  * @note If the task is aborted and the memory occupied by its @c k_thread
211  * structure is reused when this @c k_thread_foreach_unlocked is in progress
212  * it might even lead to the system behave unstable.
213  * This function may never return, as it would follow some @c next task
214  * pointers treating given pointer as a pointer to the k_thread structure
215  * while it is something different right now.
216  * Do not reuse the memory that was occupied by k_thread structure of aborted
217  * task if it was aborted after this function was called in any context.
218  */
219 #ifdef CONFIG_SMP
220 void k_thread_foreach_unlocked_filter_by_cpu(unsigned int cpu,
221 					     k_thread_user_cb_t user_cb, void *user_data);
222 #else
223 static inline
k_thread_foreach_unlocked_filter_by_cpu(unsigned int cpu,k_thread_user_cb_t user_cb,void * user_data)224 void k_thread_foreach_unlocked_filter_by_cpu(unsigned int cpu,
225 					     k_thread_user_cb_t user_cb, void *user_data)
226 {
227 	__ASSERT(cpu == 0, "cpu filter out of bounds");
228 	ARG_UNUSED(cpu);
229 	k_thread_foreach_unlocked(user_cb, user_data);
230 }
231 #endif
232 
233 /** @} */
234 
235 /**
236  * @defgroup thread_apis Thread APIs
237  * @ingroup kernel_apis
238  * @{
239  */
240 
241 #endif /* !_ASMLANGUAGE */
242 
243 
244 /*
245  * Thread user options. May be needed by assembly code. Common part uses low
246  * bits, arch-specific use high bits.
247  */
248 
249 /**
250  * @brief system thread that must not abort
251  * */
252 #define K_ESSENTIAL (BIT(0))
253 
254 #define K_FP_IDX 1
255 /**
256  * @brief FPU registers are managed by context switch
257  *
258  * @details
259  * This option indicates that the thread uses the CPU's floating point
260  * registers. This instructs the kernel to take additional steps to save
261  * and restore the contents of these registers when scheduling the thread.
262  * No effect if @kconfig{CONFIG_FPU_SHARING} is not enabled.
263  */
264 #define K_FP_REGS (BIT(K_FP_IDX))
265 
266 /**
267  * @brief user mode thread
268  *
269  * This thread has dropped from supervisor mode to user mode and consequently
270  * has additional restrictions
271  */
272 #define K_USER (BIT(2))
273 
274 /**
275  * @brief Inherit Permissions
276  *
277  * @details
278  * Indicates that the thread being created should inherit all kernel object
279  * permissions from the thread that created it. No effect if
280  * @kconfig{CONFIG_USERSPACE} is not enabled.
281  */
282 #define K_INHERIT_PERMS (BIT(3))
283 
284 /**
285  * @brief Callback item state
286  *
287  * @details
288  * This is a single bit of state reserved for "callback manager"
289  * utilities (p4wq initially) who need to track operations invoked
290  * from within a user-provided callback they have been invoked.
291  * Effectively it serves as a tiny bit of zero-overhead TLS data.
292  */
293 #define K_CALLBACK_STATE (BIT(4))
294 
295 /**
296  * @brief DSP registers are managed by context switch
297  *
298  * @details
299  * This option indicates that the thread uses the CPU's DSP registers.
300  * This instructs the kernel to take additional steps to save and
301  * restore the contents of these registers when scheduling the thread.
302  * No effect if @kconfig{CONFIG_DSP_SHARING} is not enabled.
303  */
304 #define K_DSP_IDX 6
305 #define K_DSP_REGS (BIT(K_DSP_IDX))
306 
307 /**
308  * @brief AGU registers are managed by context switch
309  *
310  * @details
311  * This option indicates that the thread uses the ARC processor's XY
312  * memory and DSP feature. Often used with @kconfig{CONFIG_ARC_AGU_SHARING}.
313  * No effect if @kconfig{CONFIG_ARC_AGU_SHARING} is not enabled.
314  */
315 #define K_AGU_IDX 7
316 #define K_AGU_REGS (BIT(K_AGU_IDX))
317 
318 /**
319  * @brief FP and SSE registers are managed by context switch on x86
320  *
321  * @details
322  * This option indicates that the thread uses the x86 CPU's floating point
323  * and SSE registers. This instructs the kernel to take additional steps to
324  * save and restore the contents of these registers when scheduling
325  * the thread. No effect if @kconfig{CONFIG_X86_SSE} is not enabled.
326  */
327 #define K_SSE_REGS (BIT(7))
328 
329 /* end - thread options */
330 
331 #if !defined(_ASMLANGUAGE)
332 /**
333  * @brief Dynamically allocate a thread stack.
334  *
335  * Relevant stack creation flags include:
336  * - @ref K_USER allocate a userspace thread (requires `CONFIG_USERSPACE=y`)
337  *
338  * @param size Stack size in bytes.
339  * @param flags Stack creation flags, or 0.
340  *
341  * @retval the allocated thread stack on success.
342  * @retval NULL on failure.
343  *
344  * @see CONFIG_DYNAMIC_THREAD
345  */
346 __syscall k_thread_stack_t *k_thread_stack_alloc(size_t size, int flags);
347 
348 /**
349  * @brief Free a dynamically allocated thread stack.
350  *
351  * @param stack Pointer to the thread stack.
352  *
353  * @retval 0 on success.
354  * @retval -EBUSY if the thread stack is in use.
355  * @retval -EINVAL if @p stack is invalid.
356  * @retval -ENOSYS if dynamic thread stack allocation is disabled
357  *
358  * @see CONFIG_DYNAMIC_THREAD
359  */
360 __syscall int k_thread_stack_free(k_thread_stack_t *stack);
361 
362 /**
363  * @brief Create a thread.
364  *
365  * This routine initializes a thread, then schedules it for execution.
366  *
367  * The new thread may be scheduled for immediate execution or a delayed start.
368  * If the newly spawned thread does not have a delayed start the kernel
369  * scheduler may preempt the current thread to allow the new thread to
370  * execute.
371  *
372  * Thread options are architecture-specific, and can include K_ESSENTIAL,
373  * K_FP_REGS, and K_SSE_REGS. Multiple options may be specified by separating
374  * them using "|" (the logical OR operator).
375  *
376  * Stack objects passed to this function must be originally defined with
377  * either of these macros in order to be portable:
378  *
379  * - K_THREAD_STACK_DEFINE() - For stacks that may support either user or
380  *   supervisor threads.
381  * - K_KERNEL_STACK_DEFINE() - For stacks that may support supervisor
382  *   threads only. These stacks use less memory if CONFIG_USERSPACE is
383  *   enabled.
384  *
385  * The stack_size parameter has constraints. It must either be:
386  *
387  * - The original size value passed to K_THREAD_STACK_DEFINE() or
388  *   K_KERNEL_STACK_DEFINE()
389  * - The return value of K_THREAD_STACK_SIZEOF(stack) if the stack was
390  *   defined with K_THREAD_STACK_DEFINE()
391  * - The return value of K_KERNEL_STACK_SIZEOF(stack) if the stack was
392  *   defined with K_KERNEL_STACK_DEFINE().
393  *
394  * Using other values, or sizeof(stack) may produce undefined behavior.
395  *
396  * @param new_thread Pointer to uninitialized struct k_thread
397  * @param stack Pointer to the stack space.
398  * @param stack_size Stack size in bytes.
399  * @param entry Thread entry function.
400  * @param p1 1st entry point parameter.
401  * @param p2 2nd entry point parameter.
402  * @param p3 3rd entry point parameter.
403  * @param prio Thread priority.
404  * @param options Thread options.
405  * @param delay Scheduling delay, or K_NO_WAIT (for no delay).
406  *
407  * @return ID of new thread.
408  *
409  */
410 __syscall k_tid_t k_thread_create(struct k_thread *new_thread,
411 				  k_thread_stack_t *stack,
412 				  size_t stack_size,
413 				  k_thread_entry_t entry,
414 				  void *p1, void *p2, void *p3,
415 				  int prio, uint32_t options, k_timeout_t delay);
416 
417 /**
418  * @brief Drop a thread's privileges permanently to user mode
419  *
420  * This allows a supervisor thread to be re-used as a user thread.
421  * This function does not return, but control will transfer to the provided
422  * entry point as if this was a new user thread.
423  *
424  * The implementation ensures that the stack buffer contents are erased.
425  * Any thread-local storage will be reverted to a pristine state.
426  *
427  * Memory domain membership, resource pool assignment, kernel object
428  * permissions, priority, and thread options are preserved.
429  *
430  * A common use of this function is to re-use the main thread as a user thread
431  * once all supervisor mode-only tasks have been completed.
432  *
433  * @param entry Function to start executing from
434  * @param p1 1st entry point parameter
435  * @param p2 2nd entry point parameter
436  * @param p3 3rd entry point parameter
437  */
438 FUNC_NORETURN void k_thread_user_mode_enter(k_thread_entry_t entry,
439 						   void *p1, void *p2,
440 						   void *p3);
441 
442 /**
443  * @brief Grant a thread access to a set of kernel objects
444  *
445  * This is a convenience function. For the provided thread, grant access to
446  * the remaining arguments, which must be pointers to kernel objects.
447  *
448  * The thread object must be initialized (i.e. running). The objects don't
449  * need to be.
450  * Note that NULL shouldn't be passed as an argument.
451  *
452  * @param thread Thread to grant access to objects
453  * @param ... list of kernel object pointers
454  */
455 #define k_thread_access_grant(thread, ...) \
456 	FOR_EACH_FIXED_ARG(k_object_access_grant, (;), (thread), __VA_ARGS__)
457 
458 /**
459  * @brief Assign a resource memory pool to a thread
460  *
461  * By default, threads have no resource pool assigned unless their parent
462  * thread has a resource pool, in which case it is inherited. Multiple
463  * threads may be assigned to the same memory pool.
464  *
465  * Changing a thread's resource pool will not migrate allocations from the
466  * previous pool.
467  *
468  * @param thread Target thread to assign a memory pool for resource requests.
469  * @param heap Heap object to use for resources,
470  *             or NULL if the thread should no longer have a memory pool.
471  */
k_thread_heap_assign(struct k_thread * thread,struct k_heap * heap)472 static inline void k_thread_heap_assign(struct k_thread *thread,
473 					struct k_heap *heap)
474 {
475 	thread->resource_pool = heap;
476 }
477 
478 #if defined(CONFIG_INIT_STACKS) && defined(CONFIG_THREAD_STACK_INFO)
479 /**
480  * @brief Obtain stack usage information for the specified thread
481  *
482  * User threads will need to have permission on the target thread object.
483  *
484  * Some hardware may prevent inspection of a stack buffer currently in use.
485  * If this API is called from supervisor mode, on the currently running thread,
486  * on a platform which selects @kconfig{CONFIG_NO_UNUSED_STACK_INSPECTION}, an
487  * error will be generated.
488  *
489  * @param thread Thread to inspect stack information
490  * @param unused_ptr Output parameter, filled in with the unused stack space
491  *	of the target thread in bytes.
492  * @return 0 on success
493  * @return -EBADF Bad thread object (user mode only)
494  * @return -EPERM No permissions on thread object (user mode only)
495  * #return -ENOTSUP Forbidden by hardware policy
496  * @return -EINVAL Thread is uninitialized or exited (user mode only)
497  * @return -EFAULT Bad memory address for unused_ptr (user mode only)
498  */
499 __syscall int k_thread_stack_space_get(const struct k_thread *thread,
500 				       size_t *unused_ptr);
501 #endif
502 
503 #if (K_HEAP_MEM_POOL_SIZE > 0)
504 /**
505  * @brief Assign the system heap as a thread's resource pool
506  *
507  * Similar to k_thread_heap_assign(), but the thread will use
508  * the kernel heap to draw memory.
509  *
510  * Use with caution, as a malicious thread could perform DoS attacks on the
511  * kernel heap.
512  *
513  * @param thread Target thread to assign the system heap for resource requests
514  *
515  */
516 void k_thread_system_pool_assign(struct k_thread *thread);
517 #endif /* (K_HEAP_MEM_POOL_SIZE > 0) */
518 
519 /**
520  * @brief Sleep until a thread exits
521  *
522  * The caller will be put to sleep until the target thread exits, either due
523  * to being aborted, self-exiting, or taking a fatal error. This API returns
524  * immediately if the thread isn't running.
525  *
526  * This API may only be called from ISRs with a K_NO_WAIT timeout,
527  * where it can be useful as a predicate to detect when a thread has
528  * aborted.
529  *
530  * @param thread Thread to wait to exit
531  * @param timeout upper bound time to wait for the thread to exit.
532  * @retval 0 success, target thread has exited or wasn't running
533  * @retval -EBUSY returned without waiting
534  * @retval -EAGAIN waiting period timed out
535  * @retval -EDEADLK target thread is joining on the caller, or target thread
536  *                  is the caller
537  */
538 __syscall int k_thread_join(struct k_thread *thread, k_timeout_t timeout);
539 
540 /**
541  * @brief Put the current thread to sleep.
542  *
543  * This routine puts the current thread to sleep for @a duration,
544  * specified as a k_timeout_t object.
545  *
546  * @param timeout Desired duration of sleep.
547  *
548  * @return Zero if the requested time has elapsed or the time left to
549  * sleep rounded up to the nearest millisecond (e.g. if the thread was
550  * awoken by the \ref k_wakeup call).  Will be clamped to INT_MAX in
551  * the case where the remaining time is unrepresentable in an int32_t.
552  */
553 __syscall int32_t k_sleep(k_timeout_t timeout);
554 
555 /**
556  * @brief Put the current thread to sleep.
557  *
558  * This routine puts the current thread to sleep for @a duration milliseconds.
559  *
560  * @param ms Number of milliseconds to sleep.
561  *
562  * @return Zero if the requested time has elapsed or if the thread was woken up
563  * by the \ref k_wakeup call, the time left to sleep rounded up to the nearest
564  * millisecond.
565  */
k_msleep(int32_t ms)566 static inline int32_t k_msleep(int32_t ms)
567 {
568 	return k_sleep(Z_TIMEOUT_MS(ms));
569 }
570 
571 /**
572  * @brief Put the current thread to sleep with microsecond resolution.
573  *
574  * This function is unlikely to work as expected without kernel tuning.
575  * In particular, because the lower bound on the duration of a sleep is
576  * the duration of a tick, @kconfig{CONFIG_SYS_CLOCK_TICKS_PER_SEC} must be
577  * adjusted to achieve the resolution desired. The implications of doing
578  * this must be understood before attempting to use k_usleep(). Use with
579  * caution.
580  *
581  * @param us Number of microseconds to sleep.
582  *
583  * @return Zero if the requested time has elapsed or if the thread was woken up
584  * by the \ref k_wakeup call, the time left to sleep rounded up to the nearest
585  * microsecond.
586  */
587 __syscall int32_t k_usleep(int32_t us);
588 
589 /**
590  * @brief Cause the current thread to busy wait.
591  *
592  * This routine causes the current thread to execute a "do nothing" loop for
593  * @a usec_to_wait microseconds.
594  *
595  * @note The clock used for the microsecond-resolution delay here may
596  * be skewed relative to the clock used for system timeouts like
597  * k_sleep().  For example k_busy_wait(1000) may take slightly more or
598  * less time than k_sleep(K_MSEC(1)), with the offset dependent on
599  * clock tolerances.
600  *
601  * @note In case when @kconfig{CONFIG_SYSTEM_CLOCK_SLOPPY_IDLE} and
602  * @kconfig{CONFIG_PM} options are enabled, this function may not work.
603  * The timer/clock used for delay processing may be disabled/inactive.
604  */
605 __syscall void k_busy_wait(uint32_t usec_to_wait);
606 
607 /**
608  * @brief Check whether it is possible to yield in the current context.
609  *
610  * This routine checks whether the kernel is in a state where it is possible to
611  * yield or call blocking API's. It should be used by code that needs to yield
612  * to perform correctly, but can feasibly be called from contexts where that
613  * is not possible. For example in the PRE_KERNEL initialization step, or when
614  * being run from the idle thread.
615  *
616  * @return True if it is possible to yield in the current context, false otherwise.
617  */
618 bool k_can_yield(void);
619 
620 /**
621  * @brief Yield the current thread.
622  *
623  * This routine causes the current thread to yield execution to another
624  * thread of the same or higher priority. If there are no other ready threads
625  * of the same or higher priority, the routine returns immediately.
626  */
627 __syscall void k_yield(void);
628 
629 /**
630  * @brief Wake up a sleeping thread.
631  *
632  * This routine prematurely wakes up @a thread from sleeping.
633  *
634  * If @a thread is not currently sleeping, the routine has no effect.
635  *
636  * @param thread ID of thread to wake.
637  */
638 __syscall void k_wakeup(k_tid_t thread);
639 
640 /**
641  * @brief Query thread ID of the current thread.
642  *
643  * This unconditionally queries the kernel via a system call.
644  *
645  * @note Use k_current_get() unless absolutely sure this is necessary.
646  *       This should only be used directly where the thread local
647  *       variable cannot be used or may contain invalid values
648  *       if thread local storage (TLS) is enabled. If TLS is not
649  *       enabled, this is the same as k_current_get().
650  *
651  * @return ID of current thread.
652  */
653 __attribute_const__
654 __syscall k_tid_t k_sched_current_thread_query(void);
655 
656 /**
657  * @brief Get thread ID of the current thread.
658  *
659  * @return ID of current thread.
660  *
661  */
662 __attribute_const__
k_current_get(void)663 static inline k_tid_t k_current_get(void)
664 {
665 #ifdef CONFIG_CURRENT_THREAD_USE_TLS
666 
667 	/* Thread-local cache of current thread ID, set in z_thread_entry() */
668 	extern Z_THREAD_LOCAL k_tid_t z_tls_current;
669 
670 	return z_tls_current;
671 #else
672 	return k_sched_current_thread_query();
673 #endif
674 }
675 
676 /**
677  * @brief Abort a thread.
678  *
679  * This routine permanently stops execution of @a thread. The thread is taken
680  * off all kernel queues it is part of (i.e. the ready queue, the timeout
681  * queue, or a kernel object wait queue). However, any kernel resources the
682  * thread might currently own (such as mutexes or memory blocks) are not
683  * released. It is the responsibility of the caller of this routine to ensure
684  * all necessary cleanup is performed.
685  *
686  * After k_thread_abort() returns, the thread is guaranteed not to be
687  * running or to become runnable anywhere on the system.  Normally
688  * this is done via blocking the caller (in the same manner as
689  * k_thread_join()), but in interrupt context on SMP systems the
690  * implementation is required to spin for threads that are running on
691  * other CPUs.
692  *
693  * @param thread ID of thread to abort.
694  */
695 __syscall void k_thread_abort(k_tid_t thread);
696 
697 k_ticks_t z_timeout_expires(const struct _timeout *timeout);
698 k_ticks_t z_timeout_remaining(const struct _timeout *timeout);
699 
700 #ifdef CONFIG_SYS_CLOCK_EXISTS
701 
702 /**
703  * @brief Get time when a thread wakes up, in system ticks
704  *
705  * This routine computes the system uptime when a waiting thread next
706  * executes, in units of system ticks.  If the thread is not waiting,
707  * it returns current system time.
708  */
709 __syscall k_ticks_t k_thread_timeout_expires_ticks(const struct k_thread *thread);
710 
z_impl_k_thread_timeout_expires_ticks(const struct k_thread * thread)711 static inline k_ticks_t z_impl_k_thread_timeout_expires_ticks(
712 						const struct k_thread *thread)
713 {
714 	return z_timeout_expires(&thread->base.timeout);
715 }
716 
717 /**
718  * @brief Get time remaining before a thread wakes up, in system ticks
719  *
720  * This routine computes the time remaining before a waiting thread
721  * next executes, in units of system ticks.  If the thread is not
722  * waiting, it returns zero.
723  */
724 __syscall k_ticks_t k_thread_timeout_remaining_ticks(const struct k_thread *thread);
725 
z_impl_k_thread_timeout_remaining_ticks(const struct k_thread * thread)726 static inline k_ticks_t z_impl_k_thread_timeout_remaining_ticks(
727 						const struct k_thread *thread)
728 {
729 	return z_timeout_remaining(&thread->base.timeout);
730 }
731 
732 #endif /* CONFIG_SYS_CLOCK_EXISTS */
733 
734 /**
735  * @cond INTERNAL_HIDDEN
736  */
737 
738 struct _static_thread_data {
739 	struct k_thread *init_thread;
740 	k_thread_stack_t *init_stack;
741 	unsigned int init_stack_size;
742 	k_thread_entry_t init_entry;
743 	void *init_p1;
744 	void *init_p2;
745 	void *init_p3;
746 	int init_prio;
747 	uint32_t init_options;
748 	const char *init_name;
749 #ifdef CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME
750 	int32_t init_delay_ms;
751 #else
752 	k_timeout_t init_delay;
753 #endif
754 };
755 
756 #ifdef CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME
757 #define Z_THREAD_INIT_DELAY_INITIALIZER(ms) .init_delay_ms = (ms)
758 #define Z_THREAD_INIT_DELAY(thread) SYS_TIMEOUT_MS((thread)->init_delay_ms)
759 #else
760 #define Z_THREAD_INIT_DELAY_INITIALIZER(ms) .init_delay = SYS_TIMEOUT_MS_INIT(ms)
761 #define Z_THREAD_INIT_DELAY(thread) (thread)->init_delay
762 #endif
763 
764 #define Z_THREAD_INITIALIZER(thread, stack, stack_size,           \
765 			    entry, p1, p2, p3,                   \
766 			    prio, options, delay, tname)         \
767 	{                                                        \
768 	.init_thread = (thread),				 \
769 	.init_stack = (stack),					 \
770 	.init_stack_size = (stack_size),                         \
771 	.init_entry = (k_thread_entry_t)entry,			 \
772 	.init_p1 = (void *)p1,                                   \
773 	.init_p2 = (void *)p2,                                   \
774 	.init_p3 = (void *)p3,                                   \
775 	.init_prio = (prio),                                     \
776 	.init_options = (options),                               \
777 	.init_name = STRINGIFY(tname),                           \
778 	Z_THREAD_INIT_DELAY_INITIALIZER(delay)			 \
779 	}
780 
781 /*
782  * Refer to K_THREAD_DEFINE() and K_KERNEL_THREAD_DEFINE() for
783  * information on arguments.
784  */
785 #define Z_THREAD_COMMON_DEFINE(name, stack_size,			\
786 			       entry, p1, p2, p3,			\
787 			       prio, options, delay)			\
788 	struct k_thread _k_thread_obj_##name;				\
789 	STRUCT_SECTION_ITERABLE(_static_thread_data,			\
790 				_k_thread_data_##name) =		\
791 		Z_THREAD_INITIALIZER(&_k_thread_obj_##name,		\
792 				     _k_thread_stack_##name, stack_size,\
793 				     entry, p1, p2, p3, prio, options,	\
794 				     delay, name);			\
795 	const k_tid_t name = (k_tid_t)&_k_thread_obj_##name
796 
797 /**
798  * INTERNAL_HIDDEN @endcond
799  */
800 
801 /**
802  * @brief Statically define and initialize a thread.
803  *
804  * The thread may be scheduled for immediate execution or a delayed start.
805  *
806  * Thread options are architecture-specific, and can include K_ESSENTIAL,
807  * K_FP_REGS, and K_SSE_REGS. Multiple options may be specified by separating
808  * them using "|" (the logical OR operator).
809  *
810  * The ID of the thread can be accessed using:
811  *
812  * @code extern const k_tid_t <name>; @endcode
813  *
814  * @param name Name of the thread.
815  * @param stack_size Stack size in bytes.
816  * @param entry Thread entry function.
817  * @param p1 1st entry point parameter.
818  * @param p2 2nd entry point parameter.
819  * @param p3 3rd entry point parameter.
820  * @param prio Thread priority.
821  * @param options Thread options.
822  * @param delay Scheduling delay (in milliseconds), zero for no delay.
823  *
824  * @note Static threads with zero delay should not normally have
825  * MetaIRQ priority levels.  This can preempt the system
826  * initialization handling (depending on the priority of the main
827  * thread) and cause surprising ordering side effects.  It will not
828  * affect anything in the OS per se, but consider it bad practice.
829  * Use a SYS_INIT() callback if you need to run code before entrance
830  * to the application main().
831  */
832 #define K_THREAD_DEFINE(name, stack_size,                                \
833 			entry, p1, p2, p3,                               \
834 			prio, options, delay)                            \
835 	K_THREAD_STACK_DEFINE(_k_thread_stack_##name, stack_size);	 \
836 	Z_THREAD_COMMON_DEFINE(name, stack_size, entry, p1, p2, p3,	 \
837 			       prio, options, delay)
838 
839 /**
840  * @brief Statically define and initialize a thread intended to run only in kernel mode.
841  *
842  * The thread may be scheduled for immediate execution or a delayed start.
843  *
844  * Thread options are architecture-specific, and can include K_ESSENTIAL,
845  * K_FP_REGS, and K_SSE_REGS. Multiple options may be specified by separating
846  * them using "|" (the logical OR operator).
847  *
848  * The ID of the thread can be accessed using:
849  *
850  * @code extern const k_tid_t <name>; @endcode
851  *
852  * @note Threads defined by this can only run in kernel mode, and cannot be
853  *       transformed into user thread via k_thread_user_mode_enter().
854  *
855  * @warning Depending on the architecture, the stack size (@p stack_size)
856  *          may need to be multiples of CONFIG_MMU_PAGE_SIZE (if MMU)
857  *          or in power-of-two size (if MPU).
858  *
859  * @param name Name of the thread.
860  * @param stack_size Stack size in bytes.
861  * @param entry Thread entry function.
862  * @param p1 1st entry point parameter.
863  * @param p2 2nd entry point parameter.
864  * @param p3 3rd entry point parameter.
865  * @param prio Thread priority.
866  * @param options Thread options.
867  * @param delay Scheduling delay (in milliseconds), zero for no delay.
868  */
869 #define K_KERNEL_THREAD_DEFINE(name, stack_size,			\
870 			       entry, p1, p2, p3,			\
871 			       prio, options, delay)			\
872 	K_KERNEL_STACK_DEFINE(_k_thread_stack_##name, stack_size);	\
873 	Z_THREAD_COMMON_DEFINE(name, stack_size, entry, p1, p2, p3,	\
874 			       prio, options, delay)
875 
876 /**
877  * @brief Get a thread's priority.
878  *
879  * This routine gets the priority of @a thread.
880  *
881  * @param thread ID of thread whose priority is needed.
882  *
883  * @return Priority of @a thread.
884  */
885 __syscall int k_thread_priority_get(k_tid_t thread);
886 
887 /**
888  * @brief Set a thread's priority.
889  *
890  * This routine immediately changes the priority of @a thread.
891  *
892  * Rescheduling can occur immediately depending on the priority @a thread is
893  * set to:
894  *
895  * - If its priority is raised above the priority of a currently scheduled
896  * preemptible thread, @a thread will be scheduled in.
897  *
898  * - If the caller lowers the priority of a currently scheduled preemptible
899  * thread below that of other threads in the system, the thread of the highest
900  * priority will be scheduled in.
901  *
902  * Priority can be assigned in the range of -CONFIG_NUM_COOP_PRIORITIES to
903  * CONFIG_NUM_PREEMPT_PRIORITIES-1, where -CONFIG_NUM_COOP_PRIORITIES is the
904  * highest priority.
905  *
906  * @param thread ID of thread whose priority is to be set.
907  * @param prio New priority.
908  *
909  * @warning Changing the priority of a thread currently involved in mutex
910  * priority inheritance may result in undefined behavior.
911  */
912 __syscall void k_thread_priority_set(k_tid_t thread, int prio);
913 
914 
915 #ifdef CONFIG_SCHED_DEADLINE
916 /**
917  * @brief Set deadline expiration time for scheduler
918  *
919  * This sets the "deadline" expiration as a time delta from the
920  * current time, in the same units used by k_cycle_get_32().  The
921  * scheduler (when deadline scheduling is enabled) will choose the
922  * next expiring thread when selecting between threads at the same
923  * static priority.  Threads at different priorities will be scheduled
924  * according to their static priority.
925  *
926  * @note Deadlines are stored internally using 32 bit unsigned
927  * integers.  The number of cycles between the "first" deadline in the
928  * scheduler queue and the "last" deadline must be less than 2^31 (i.e
929  * a signed non-negative quantity).  Failure to adhere to this rule
930  * may result in scheduled threads running in an incorrect deadline
931  * order.
932  *
933  * @note Despite the API naming, the scheduler makes no guarantees
934  * the thread WILL be scheduled within that deadline, nor does it take
935  * extra metadata (like e.g. the "runtime" and "period" parameters in
936  * Linux sched_setattr()) that allows the kernel to validate the
937  * scheduling for achievability.  Such features could be implemented
938  * above this call, which is simply input to the priority selection
939  * logic.
940  *
941  * @note You should enable @kconfig{CONFIG_SCHED_DEADLINE} in your project
942  * configuration.
943  *
944  * @param thread A thread on which to set the deadline
945  * @param deadline A time delta, in cycle units
946  *
947  */
948 __syscall void k_thread_deadline_set(k_tid_t thread, int deadline);
949 #endif
950 
951 /**
952  * @brief Invoke the scheduler
953  *
954  * This routine invokes the scheduler to force a schedule point on the current
955  * CPU. If invoked from within a thread, the scheduler will be invoked
956  * immediately (provided interrupts were not locked when invoked). If invoked
957  * from within an ISR, the scheduler will be invoked upon exiting the ISR.
958  *
959  * Invoking the scheduler allows the kernel to make an immediate determination
960  * as to what the next thread to execute should be. Unlike yielding, this
961  * routine is not guaranteed to switch to a thread of equal or higher priority
962  * if any are available. For example, if the current thread is cooperative and
963  * there is a still higher priority cooperative thread that is ready, then
964  * yielding will switch to that higher priority thread whereas this routine
965  * will not.
966  *
967  * Most applications will never use this routine.
968  */
969 __syscall void k_reschedule(void);
970 
971 #ifdef CONFIG_SCHED_CPU_MASK
972 /**
973  * @brief Sets all CPU enable masks to zero
974  *
975  * After this returns, the thread will no longer be schedulable on any
976  * CPUs.  The thread must not be currently runnable.
977  *
978  * @note You should enable @kconfig{CONFIG_SCHED_CPU_MASK} in your project
979  * configuration.
980  *
981  * @param thread Thread to operate upon
982  * @return Zero on success, otherwise error code
983  */
984 int k_thread_cpu_mask_clear(k_tid_t thread);
985 
986 /**
987  * @brief Sets all CPU enable masks to one
988  *
989  * After this returns, the thread will be schedulable on any CPU.  The
990  * thread must not be currently runnable.
991  *
992  * @note You should enable @kconfig{CONFIG_SCHED_CPU_MASK} in your project
993  * configuration.
994  *
995  * @param thread Thread to operate upon
996  * @return Zero on success, otherwise error code
997  */
998 int k_thread_cpu_mask_enable_all(k_tid_t thread);
999 
1000 /**
1001  * @brief Enable thread to run on specified CPU
1002  *
1003  * The thread must not be currently runnable.
1004  *
1005  * @note You should enable @kconfig{CONFIG_SCHED_CPU_MASK} in your project
1006  * configuration.
1007  *
1008  * @param thread Thread to operate upon
1009  * @param cpu CPU index
1010  * @return Zero on success, otherwise error code
1011  */
1012 int k_thread_cpu_mask_enable(k_tid_t thread, int cpu);
1013 
1014 /**
1015  * @brief Prevent thread to run on specified CPU
1016  *
1017  * The thread must not be currently runnable.
1018  *
1019  * @note You should enable @kconfig{CONFIG_SCHED_CPU_MASK} in your project
1020  * configuration.
1021  *
1022  * @param thread Thread to operate upon
1023  * @param cpu CPU index
1024  * @return Zero on success, otherwise error code
1025  */
1026 int k_thread_cpu_mask_disable(k_tid_t thread, int cpu);
1027 
1028 /**
1029  * @brief Pin a thread to a CPU
1030  *
1031  * Pin a thread to a CPU by first clearing the cpu mask and then enabling the
1032  * thread on the selected CPU.
1033  *
1034  * @param thread Thread to operate upon
1035  * @param cpu CPU index
1036  * @return Zero on success, otherwise error code
1037  */
1038 int k_thread_cpu_pin(k_tid_t thread, int cpu);
1039 #endif
1040 
1041 /**
1042  * @brief Suspend a thread.
1043  *
1044  * This routine prevents the kernel scheduler from making @a thread
1045  * the current thread. All other internal operations on @a thread are
1046  * still performed; for example, kernel objects it is waiting on are
1047  * still handed to it. Thread suspension does not impact any timeout
1048  * upon which the thread may be waiting (such as a timeout from a call
1049  * to k_sem_take() or k_sleep()). Thus if the timeout expires while the
1050  * thread is suspended, it is still suspended until k_thread_resume()
1051  * is called.
1052  *
1053  * When the target thread is active on another CPU, the caller will block until
1054  * the target thread is halted (suspended or aborted).  But if the caller is in
1055  * an interrupt context, it will spin waiting for that target thread active on
1056  * another CPU to halt.
1057  *
1058  * If @a thread is already suspended, the routine has no effect.
1059  *
1060  * @param thread ID of thread to suspend.
1061  */
1062 __syscall void k_thread_suspend(k_tid_t thread);
1063 
1064 /**
1065  * @brief Resume a suspended thread.
1066  *
1067  * This routine reverses the thread suspension from k_thread_suspend()
1068  * and allows the kernel scheduler to make @a thread the current thread
1069  * when it is next eligible for that role.
1070  *
1071  * If @a thread is not currently suspended, the routine has no effect.
1072  *
1073  * @param thread ID of thread to resume.
1074  */
1075 __syscall void k_thread_resume(k_tid_t thread);
1076 
1077 /**
1078  * @brief Start an inactive thread
1079  *
1080  * If a thread was created with K_FOREVER in the delay parameter, it will
1081  * not be added to the scheduling queue until this function is called
1082  * on it.
1083  *
1084  * @note This is a legacy API for compatibility.  Modern Zephyr
1085  * threads are initialized in the "sleeping" state and do not need
1086  * special handling for "start".
1087  *
1088  * @param thread thread to start
1089  */
k_thread_start(k_tid_t thread)1090 static inline void k_thread_start(k_tid_t thread)
1091 {
1092 	k_wakeup(thread);
1093 }
1094 
1095 /**
1096  * @brief Set time-slicing period and scope.
1097  *
1098  * This routine specifies how the scheduler will perform time slicing of
1099  * preemptible threads.
1100  *
1101  * To enable time slicing, @a slice must be non-zero. The scheduler
1102  * ensures that no thread runs for more than the specified time limit
1103  * before other threads of that priority are given a chance to execute.
1104  * Any thread whose priority is higher than @a prio is exempted, and may
1105  * execute as long as desired without being preempted due to time slicing.
1106  *
1107  * Time slicing only limits the maximum amount of time a thread may continuously
1108  * execute. Once the scheduler selects a thread for execution, there is no
1109  * minimum guaranteed time the thread will execute before threads of greater or
1110  * equal priority are scheduled.
1111  *
1112  * When the current thread is the only one of that priority eligible
1113  * for execution, this routine has no effect; the thread is immediately
1114  * rescheduled after the slice period expires.
1115  *
1116  * To disable timeslicing, set both @a slice and @a prio to zero.
1117  *
1118  * @param slice Maximum time slice length (in milliseconds).
1119  * @param prio Highest thread priority level eligible for time slicing.
1120  */
1121 void k_sched_time_slice_set(int32_t slice, int prio);
1122 
1123 /**
1124  * @brief Set thread time slice
1125  *
1126  * As for k_sched_time_slice_set, but (when
1127  * CONFIG_TIMESLICE_PER_THREAD=y) sets the timeslice for a specific
1128  * thread.  When non-zero, this timeslice will take precedence over
1129  * the global value.
1130  *
1131  * When such a thread's timeslice expires, the configured callback
1132  * will be called before the thread is removed/re-added to the run
1133  * queue.  This callback will occur in interrupt context, and the
1134  * specified thread is guaranteed to have been preempted by the
1135  * currently-executing ISR.  Such a callback is free to, for example,
1136  * modify the thread priority or slice time for future execution,
1137  * suspend the thread, etc...
1138  *
1139  * @note Unlike the older API, the time slice parameter here is
1140  * specified in ticks, not milliseconds.  Ticks have always been the
1141  * internal unit, and not all platforms have integer conversions
1142  * between the two.
1143  *
1144  * @note Threads with a non-zero slice time set will be timesliced
1145  * always, even if they are higher priority than the maximum timeslice
1146  * priority set via k_sched_time_slice_set().
1147  *
1148  * @note The callback notification for slice expiration happens, as it
1149  * must, while the thread is still "current", and thus it happens
1150  * before any registered timeouts at this tick.  This has the somewhat
1151  * confusing side effect that the tick time (c.f. k_uptime_get()) does
1152  * not yet reflect the expired ticks.  Applications wishing to make
1153  * fine-grained timing decisions within this callback should use the
1154  * cycle API, or derived facilities like k_thread_runtime_stats_get().
1155  *
1156  * @param th A valid, initialized thread
1157  * @param slice_ticks Maximum timeslice, in ticks
1158  * @param expired Callback function called on slice expiration
1159  * @param data Parameter for the expiration handler
1160  */
1161 void k_thread_time_slice_set(struct k_thread *th, int32_t slice_ticks,
1162 			     k_thread_timeslice_fn_t expired, void *data);
1163 
1164 /** @} */
1165 
1166 /**
1167  * @addtogroup isr_apis
1168  * @{
1169  */
1170 
1171 /**
1172  * @brief Determine if code is running at interrupt level.
1173  *
1174  * This routine allows the caller to customize its actions, depending on
1175  * whether it is a thread or an ISR.
1176  *
1177  * @funcprops \isr_ok
1178  *
1179  * @return false if invoked by a thread.
1180  * @return true if invoked by an ISR.
1181  */
1182 bool k_is_in_isr(void);
1183 
1184 /**
1185  * @brief Determine if code is running in a preemptible thread.
1186  *
1187  * This routine allows the caller to customize its actions, depending on
1188  * whether it can be preempted by another thread. The routine returns a 'true'
1189  * value if all of the following conditions are met:
1190  *
1191  * - The code is running in a thread, not at ISR.
1192  * - The thread's priority is in the preemptible range.
1193  * - The thread has not locked the scheduler.
1194  *
1195  * @funcprops \isr_ok
1196  *
1197  * @return 0 if invoked by an ISR or by a cooperative thread.
1198  * @return Non-zero if invoked by a preemptible thread.
1199  */
1200 __syscall int k_is_preempt_thread(void);
1201 
1202 /**
1203  * @brief Test whether startup is in the before-main-task phase.
1204  *
1205  * This routine allows the caller to customize its actions, depending on
1206  * whether it being invoked before the kernel is fully active.
1207  *
1208  * @funcprops \isr_ok
1209  *
1210  * @return true if invoked before post-kernel initialization
1211  * @return false if invoked during/after post-kernel initialization
1212  */
k_is_pre_kernel(void)1213 static inline bool k_is_pre_kernel(void)
1214 {
1215 	extern bool z_sys_post_kernel; /* in init.c */
1216 
1217 	return !z_sys_post_kernel;
1218 }
1219 
1220 /**
1221  * @}
1222  */
1223 
1224 /**
1225  * @addtogroup thread_apis
1226  * @{
1227  */
1228 
1229 /**
1230  * @brief Lock the scheduler.
1231  *
1232  * This routine prevents the current thread from being preempted by another
1233  * thread by instructing the scheduler to treat it as a cooperative thread.
1234  * If the thread subsequently performs an operation that makes it unready,
1235  * it will be context switched out in the normal manner. When the thread
1236  * again becomes the current thread, its non-preemptible status is maintained.
1237  *
1238  * This routine can be called recursively.
1239  *
1240  * Owing to clever implementation details, scheduler locks are
1241  * extremely fast for non-userspace threads (just one byte
1242  * inc/decrement in the thread struct).
1243  *
1244  * @note This works by elevating the thread priority temporarily to a
1245  * cooperative priority, allowing cheap synchronization vs. other
1246  * preemptible or cooperative threads running on the current CPU.  It
1247  * does not prevent preemption or asynchrony of other types.  It does
1248  * not prevent threads from running on other CPUs when CONFIG_SMP=y.
1249  * It does not prevent interrupts from happening, nor does it prevent
1250  * threads with MetaIRQ priorities from preempting the current thread.
1251  * In general this is a historical API not well-suited to modern
1252  * applications, use with care.
1253  */
1254 void k_sched_lock(void);
1255 
1256 /**
1257  * @brief Unlock the scheduler.
1258  *
1259  * This routine reverses the effect of a previous call to k_sched_lock().
1260  * A thread must call the routine once for each time it called k_sched_lock()
1261  * before the thread becomes preemptible.
1262  */
1263 void k_sched_unlock(void);
1264 
1265 /**
1266  * @brief Set current thread's custom data.
1267  *
1268  * This routine sets the custom data for the current thread to @ value.
1269  *
1270  * Custom data is not used by the kernel itself, and is freely available
1271  * for a thread to use as it sees fit. It can be used as a framework
1272  * upon which to build thread-local storage.
1273  *
1274  * @param value New custom data value.
1275  *
1276  */
1277 __syscall void k_thread_custom_data_set(void *value);
1278 
1279 /**
1280  * @brief Get current thread's custom data.
1281  *
1282  * This routine returns the custom data for the current thread.
1283  *
1284  * @return Current custom data value.
1285  */
1286 __syscall void *k_thread_custom_data_get(void);
1287 
1288 /**
1289  * @brief Set current thread name
1290  *
1291  * Set the name of the thread to be used when @kconfig{CONFIG_THREAD_MONITOR}
1292  * is enabled for tracing and debugging.
1293  *
1294  * @param thread Thread to set name, or NULL to set the current thread
1295  * @param str Name string
1296  * @retval 0 on success
1297  * @retval -EFAULT Memory access error with supplied string
1298  * @retval -ENOSYS Thread name configuration option not enabled
1299  * @retval -EINVAL Thread name too long
1300  */
1301 __syscall int k_thread_name_set(k_tid_t thread, const char *str);
1302 
1303 /**
1304  * @brief Get thread name
1305  *
1306  * Get the name of a thread
1307  *
1308  * @param thread Thread ID
1309  * @retval Thread name, or NULL if configuration not enabled
1310  */
1311 const char *k_thread_name_get(k_tid_t thread);
1312 
1313 /**
1314  * @brief Copy the thread name into a supplied buffer
1315  *
1316  * @param thread Thread to obtain name information
1317  * @param buf Destination buffer
1318  * @param size Destination buffer size
1319  * @retval -ENOSPC Destination buffer too small
1320  * @retval -EFAULT Memory access error
1321  * @retval -ENOSYS Thread name feature not enabled
1322  * @retval 0 Success
1323  */
1324 __syscall int k_thread_name_copy(k_tid_t thread, char *buf,
1325 				 size_t size);
1326 
1327 /**
1328  * @brief Get thread state string
1329  *
1330  * This routine generates a human friendly string containing the thread's
1331  * state, and copies as much of it as possible into @a buf.
1332  *
1333  * @param thread_id Thread ID
1334  * @param buf Buffer into which to copy state strings
1335  * @param buf_size Size of the buffer
1336  *
1337  * @retval Pointer to @a buf if data was copied, else a pointer to "".
1338  */
1339 const char *k_thread_state_str(k_tid_t thread_id, char *buf, size_t buf_size);
1340 
1341 /**
1342  * @}
1343  */
1344 
1345 /**
1346  * @addtogroup clock_apis
1347  * @{
1348  */
1349 
1350 /**
1351  * @brief Generate null timeout delay.
1352  *
1353  * This macro generates a timeout delay that instructs a kernel API
1354  * not to wait if the requested operation cannot be performed immediately.
1355  *
1356  * @return Timeout delay value.
1357  */
1358 #define K_NO_WAIT Z_TIMEOUT_NO_WAIT
1359 
1360 /**
1361  * @brief Generate timeout delay from nanoseconds.
1362  *
1363  * This macro generates a timeout delay that instructs a kernel API to
1364  * wait up to @a t nanoseconds to perform the requested operation.
1365  * Note that timer precision is limited to the tick rate, not the
1366  * requested value.
1367  *
1368  * @param t Duration in nanoseconds.
1369  *
1370  * @return Timeout delay value.
1371  */
1372 #define K_NSEC(t)     Z_TIMEOUT_NS(t)
1373 
1374 /**
1375  * @brief Generate timeout delay from microseconds.
1376  *
1377  * This macro generates a timeout delay that instructs a kernel API
1378  * to wait up to @a t microseconds to perform the requested operation.
1379  * Note that timer precision is limited to the tick rate, not the
1380  * requested value.
1381  *
1382  * @param t Duration in microseconds.
1383  *
1384  * @return Timeout delay value.
1385  */
1386 #define K_USEC(t)     Z_TIMEOUT_US(t)
1387 
1388 /**
1389  * @brief Generate timeout delay from cycles.
1390  *
1391  * This macro generates a timeout delay that instructs a kernel API
1392  * to wait up to @a t cycles to perform the requested operation.
1393  *
1394  * @param t Duration in cycles.
1395  *
1396  * @return Timeout delay value.
1397  */
1398 #define K_CYC(t)     Z_TIMEOUT_CYC(t)
1399 
1400 /**
1401  * @brief Generate timeout delay from system ticks.
1402  *
1403  * This macro generates a timeout delay that instructs a kernel API
1404  * to wait up to @a t ticks to perform the requested operation.
1405  *
1406  * @param t Duration in system ticks.
1407  *
1408  * @return Timeout delay value.
1409  */
1410 #define K_TICKS(t)     Z_TIMEOUT_TICKS(t)
1411 
1412 /**
1413  * @brief Generate timeout delay from milliseconds.
1414  *
1415  * This macro generates a timeout delay that instructs a kernel API
1416  * to wait up to @a ms milliseconds to perform the requested operation.
1417  *
1418  * @param ms Duration in milliseconds.
1419  *
1420  * @return Timeout delay value.
1421  */
1422 #define K_MSEC(ms)     Z_TIMEOUT_MS(ms)
1423 
1424 /**
1425  * @brief Generate timeout delay from seconds.
1426  *
1427  * This macro generates a timeout delay that instructs a kernel API
1428  * to wait up to @a s seconds to perform the requested operation.
1429  *
1430  * @param s Duration in seconds.
1431  *
1432  * @return Timeout delay value.
1433  */
1434 #define K_SECONDS(s)   K_MSEC((s) * MSEC_PER_SEC)
1435 
1436 /**
1437  * @brief Generate timeout delay from minutes.
1438 
1439  * This macro generates a timeout delay that instructs a kernel API
1440  * to wait up to @a m minutes to perform the requested operation.
1441  *
1442  * @param m Duration in minutes.
1443  *
1444  * @return Timeout delay value.
1445  */
1446 #define K_MINUTES(m)   K_SECONDS((m) * 60)
1447 
1448 /**
1449  * @brief Generate timeout delay from hours.
1450  *
1451  * This macro generates a timeout delay that instructs a kernel API
1452  * to wait up to @a h hours to perform the requested operation.
1453  *
1454  * @param h Duration in hours.
1455  *
1456  * @return Timeout delay value.
1457  */
1458 #define K_HOURS(h)     K_MINUTES((h) * 60)
1459 
1460 /**
1461  * @brief Generate infinite timeout delay.
1462  *
1463  * This macro generates a timeout delay that instructs a kernel API
1464  * to wait as long as necessary to perform the requested operation.
1465  *
1466  * @return Timeout delay value.
1467  */
1468 #define K_FOREVER Z_FOREVER
1469 
1470 #ifdef CONFIG_TIMEOUT_64BIT
1471 
1472 /**
1473  * @brief Generates an absolute/uptime timeout value from system ticks
1474  *
1475  * This macro generates a timeout delay that represents an expiration
1476  * at the absolute uptime value specified, in system ticks.  That is, the
1477  * timeout will expire immediately after the system uptime reaches the
1478  * specified tick count.
1479  *
1480  * @param t Tick uptime value
1481  * @return Timeout delay value
1482  */
1483 #define K_TIMEOUT_ABS_TICKS(t) \
1484 	Z_TIMEOUT_TICKS(Z_TICK_ABS((k_ticks_t)MAX(t, 0)))
1485 
1486 /**
1487  * @brief Generates an absolute/uptime timeout value from milliseconds
1488  *
1489  * This macro generates a timeout delay that represents an expiration
1490  * at the absolute uptime value specified, in milliseconds.  That is,
1491  * the timeout will expire immediately after the system uptime reaches
1492  * the specified tick count.
1493  *
1494  * @param t Millisecond uptime value
1495  * @return Timeout delay value
1496  */
1497 #define K_TIMEOUT_ABS_MS(t) K_TIMEOUT_ABS_TICKS(k_ms_to_ticks_ceil64(t))
1498 
1499 /**
1500  * @brief Generates an absolute/uptime timeout value from microseconds
1501  *
1502  * This macro generates a timeout delay that represents an expiration
1503  * at the absolute uptime value specified, in microseconds.  That is,
1504  * the timeout will expire immediately after the system uptime reaches
1505  * the specified time.  Note that timer precision is limited by the
1506  * system tick rate and not the requested timeout value.
1507  *
1508  * @param t Microsecond uptime value
1509  * @return Timeout delay value
1510  */
1511 #define K_TIMEOUT_ABS_US(t) K_TIMEOUT_ABS_TICKS(k_us_to_ticks_ceil64(t))
1512 
1513 /**
1514  * @brief Generates an absolute/uptime timeout value from nanoseconds
1515  *
1516  * This macro generates a timeout delay that represents an expiration
1517  * at the absolute uptime value specified, in nanoseconds.  That is,
1518  * the timeout will expire immediately after the system uptime reaches
1519  * the specified time.  Note that timer precision is limited by the
1520  * system tick rate and not the requested timeout value.
1521  *
1522  * @param t Nanosecond uptime value
1523  * @return Timeout delay value
1524  */
1525 #define K_TIMEOUT_ABS_NS(t) K_TIMEOUT_ABS_TICKS(k_ns_to_ticks_ceil64(t))
1526 
1527 /**
1528  * @brief Generates an absolute/uptime timeout value from system cycles
1529  *
1530  * This macro generates a timeout delay that represents an expiration
1531  * at the absolute uptime value specified, in cycles.  That is, the
1532  * timeout will expire immediately after the system uptime reaches the
1533  * specified time.  Note that timer precision is limited by the system
1534  * tick rate and not the requested timeout value.
1535  *
1536  * @param t Cycle uptime value
1537  * @return Timeout delay value
1538  */
1539 #define K_TIMEOUT_ABS_CYC(t) K_TIMEOUT_ABS_TICKS(k_cyc_to_ticks_ceil64(t))
1540 
1541 #endif
1542 
1543 /**
1544  * @}
1545  */
1546 
1547 /**
1548  * @cond INTERNAL_HIDDEN
1549  */
1550 
1551 struct k_timer {
1552 	/*
1553 	 * _timeout structure must be first here if we want to use
1554 	 * dynamic timer allocation. timeout.node is used in the double-linked
1555 	 * list of free timers
1556 	 */
1557 	struct _timeout timeout;
1558 
1559 	/* wait queue for the (single) thread waiting on this timer */
1560 	_wait_q_t wait_q;
1561 
1562 	/* runs in ISR context */
1563 	void (*expiry_fn)(struct k_timer *timer);
1564 
1565 	/* runs in the context of the thread that calls k_timer_stop() */
1566 	void (*stop_fn)(struct k_timer *timer);
1567 
1568 	/* timer period */
1569 	k_timeout_t period;
1570 
1571 	/* timer status */
1572 	uint32_t status;
1573 
1574 	/* user-specific data, also used to support legacy features */
1575 	void *user_data;
1576 
1577 	SYS_PORT_TRACING_TRACKING_FIELD(k_timer)
1578 
1579 #ifdef CONFIG_OBJ_CORE_TIMER
1580 	struct k_obj_core  obj_core;
1581 #endif
1582 };
1583 
1584 #define Z_TIMER_INITIALIZER(obj, expiry, stop) \
1585 	{ \
1586 	.timeout = { \
1587 		.node = {},\
1588 		.fn = z_timer_expiration_handler, \
1589 		.dticks = 0, \
1590 	}, \
1591 	.wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
1592 	.expiry_fn = expiry, \
1593 	.stop_fn = stop, \
1594 	.status = 0, \
1595 	.user_data = 0, \
1596 	}
1597 
1598 /**
1599  * INTERNAL_HIDDEN @endcond
1600  */
1601 
1602 /**
1603  * @defgroup timer_apis Timer APIs
1604  * @ingroup kernel_apis
1605  * @{
1606  */
1607 
1608 /**
1609  * @typedef k_timer_expiry_t
1610  * @brief Timer expiry function type.
1611  *
1612  * A timer's expiry function is executed by the system clock interrupt handler
1613  * each time the timer expires. The expiry function is optional, and is only
1614  * invoked if the timer has been initialized with one.
1615  *
1616  * @param timer     Address of timer.
1617  */
1618 typedef void (*k_timer_expiry_t)(struct k_timer *timer);
1619 
1620 /**
1621  * @typedef k_timer_stop_t
1622  * @brief Timer stop function type.
1623  *
1624  * A timer's stop function is executed if the timer is stopped prematurely.
1625  * The function runs in the context of call that stops the timer.  As
1626  * k_timer_stop() can be invoked from an ISR, the stop function must be
1627  * callable from interrupt context (isr-ok).
1628  *
1629  * The stop function is optional, and is only invoked if the timer has been
1630  * initialized with one.
1631  *
1632  * @param timer     Address of timer.
1633  */
1634 typedef void (*k_timer_stop_t)(struct k_timer *timer);
1635 
1636 /**
1637  * @brief Statically define and initialize a timer.
1638  *
1639  * The timer can be accessed outside the module where it is defined using:
1640  *
1641  * @code extern struct k_timer <name>; @endcode
1642  *
1643  * @param name Name of the timer variable.
1644  * @param expiry_fn Function to invoke each time the timer expires.
1645  * @param stop_fn   Function to invoke if the timer is stopped while running.
1646  */
1647 #define K_TIMER_DEFINE(name, expiry_fn, stop_fn) \
1648 	STRUCT_SECTION_ITERABLE(k_timer, name) = \
1649 		Z_TIMER_INITIALIZER(name, expiry_fn, stop_fn)
1650 
1651 /**
1652  * @brief Initialize a timer.
1653  *
1654  * This routine initializes a timer, prior to its first use.
1655  *
1656  * @param timer     Address of timer.
1657  * @param expiry_fn Function to invoke each time the timer expires.
1658  * @param stop_fn   Function to invoke if the timer is stopped while running.
1659  */
1660 void k_timer_init(struct k_timer *timer,
1661 			 k_timer_expiry_t expiry_fn,
1662 			 k_timer_stop_t stop_fn);
1663 
1664 /**
1665  * @brief Start a timer.
1666  *
1667  * This routine starts a timer, and resets its status to zero. The timer
1668  * begins counting down using the specified duration and period values.
1669  *
1670  * Attempting to start a timer that is already running is permitted.
1671  * The timer's status is reset to zero and the timer begins counting down
1672  * using the new duration and period values.
1673  *
1674  * @param timer     Address of timer.
1675  * @param duration  Initial timer duration.
1676  * @param period    Timer period.
1677  */
1678 __syscall void k_timer_start(struct k_timer *timer,
1679 			     k_timeout_t duration, k_timeout_t period);
1680 
1681 /**
1682  * @brief Stop a timer.
1683  *
1684  * This routine stops a running timer prematurely. The timer's stop function,
1685  * if one exists, is invoked by the caller.
1686  *
1687  * Attempting to stop a timer that is not running is permitted, but has no
1688  * effect on the timer.
1689  *
1690  * @note The stop handler has to be callable from ISRs if @a k_timer_stop is to
1691  * be called from ISRs.
1692  *
1693  * @funcprops \isr_ok
1694  *
1695  * @param timer     Address of timer.
1696  */
1697 __syscall void k_timer_stop(struct k_timer *timer);
1698 
1699 /**
1700  * @brief Read timer status.
1701  *
1702  * This routine reads the timer's status, which indicates the number of times
1703  * it has expired since its status was last read.
1704  *
1705  * Calling this routine resets the timer's status to zero.
1706  *
1707  * @param timer     Address of timer.
1708  *
1709  * @return Timer status.
1710  */
1711 __syscall uint32_t k_timer_status_get(struct k_timer *timer);
1712 
1713 /**
1714  * @brief Synchronize thread to timer expiration.
1715  *
1716  * This routine blocks the calling thread until the timer's status is non-zero
1717  * (indicating that it has expired at least once since it was last examined)
1718  * or the timer is stopped. If the timer status is already non-zero,
1719  * or the timer is already stopped, the caller continues without waiting.
1720  *
1721  * Calling this routine resets the timer's status to zero.
1722  *
1723  * This routine must not be used by interrupt handlers, since they are not
1724  * allowed to block.
1725  *
1726  * @param timer     Address of timer.
1727  *
1728  * @return Timer status.
1729  */
1730 __syscall uint32_t k_timer_status_sync(struct k_timer *timer);
1731 
1732 #ifdef CONFIG_SYS_CLOCK_EXISTS
1733 
1734 /**
1735  * @brief Get next expiration time of a timer, in system ticks
1736  *
1737  * This routine returns the future system uptime reached at the next
1738  * time of expiration of the timer, in units of system ticks.  If the
1739  * timer is not running, current system time is returned.
1740  *
1741  * @param timer The timer object
1742  * @return Uptime of expiration, in ticks
1743  */
1744 __syscall k_ticks_t k_timer_expires_ticks(const struct k_timer *timer);
1745 
z_impl_k_timer_expires_ticks(const struct k_timer * timer)1746 static inline k_ticks_t z_impl_k_timer_expires_ticks(
1747 				       const struct k_timer *timer)
1748 {
1749 	return z_timeout_expires(&timer->timeout);
1750 }
1751 
1752 /**
1753  * @brief Get time remaining before a timer next expires, in system ticks
1754  *
1755  * This routine computes the time remaining before a running timer
1756  * next expires, in units of system ticks.  If the timer is not
1757  * running, it returns zero.
1758  *
1759  * @param timer The timer object
1760  * @return Remaining time until expiration, in ticks
1761  */
1762 __syscall k_ticks_t k_timer_remaining_ticks(const struct k_timer *timer);
1763 
z_impl_k_timer_remaining_ticks(const struct k_timer * timer)1764 static inline k_ticks_t z_impl_k_timer_remaining_ticks(
1765 				       const struct k_timer *timer)
1766 {
1767 	return z_timeout_remaining(&timer->timeout);
1768 }
1769 
1770 /**
1771  * @brief Get time remaining before a timer next expires.
1772  *
1773  * This routine computes the (approximate) time remaining before a running
1774  * timer next expires. If the timer is not running, it returns zero.
1775  *
1776  * @param timer     Address of timer.
1777  *
1778  * @return Remaining time (in milliseconds).
1779  */
k_timer_remaining_get(struct k_timer * timer)1780 static inline uint32_t k_timer_remaining_get(struct k_timer *timer)
1781 {
1782 	return k_ticks_to_ms_floor32(k_timer_remaining_ticks(timer));
1783 }
1784 
1785 #endif /* CONFIG_SYS_CLOCK_EXISTS */
1786 
1787 /**
1788  * @brief Associate user-specific data with a timer.
1789  *
1790  * This routine records the @a user_data with the @a timer, to be retrieved
1791  * later.
1792  *
1793  * It can be used e.g. in a timer handler shared across multiple subsystems to
1794  * retrieve data specific to the subsystem this timer is associated with.
1795  *
1796  * @param timer     Address of timer.
1797  * @param user_data User data to associate with the timer.
1798  */
1799 __syscall void k_timer_user_data_set(struct k_timer *timer, void *user_data);
1800 
1801 /**
1802  * @internal
1803  */
z_impl_k_timer_user_data_set(struct k_timer * timer,void * user_data)1804 static inline void z_impl_k_timer_user_data_set(struct k_timer *timer,
1805 					       void *user_data)
1806 {
1807 	timer->user_data = user_data;
1808 }
1809 
1810 /**
1811  * @brief Retrieve the user-specific data from a timer.
1812  *
1813  * @param timer     Address of timer.
1814  *
1815  * @return The user data.
1816  */
1817 __syscall void *k_timer_user_data_get(const struct k_timer *timer);
1818 
z_impl_k_timer_user_data_get(const struct k_timer * timer)1819 static inline void *z_impl_k_timer_user_data_get(const struct k_timer *timer)
1820 {
1821 	return timer->user_data;
1822 }
1823 
1824 /** @} */
1825 
1826 /**
1827  * @addtogroup clock_apis
1828  * @ingroup kernel_apis
1829  * @{
1830  */
1831 
1832 /**
1833  * @brief Get system uptime, in system ticks.
1834  *
1835  * This routine returns the elapsed time since the system booted, in
1836  * ticks (c.f. @kconfig{CONFIG_SYS_CLOCK_TICKS_PER_SEC}), which is the
1837  * fundamental unit of resolution of kernel timekeeping.
1838  *
1839  * @return Current uptime in ticks.
1840  */
1841 __syscall int64_t k_uptime_ticks(void);
1842 
1843 /**
1844  * @brief Get system uptime.
1845  *
1846  * This routine returns the elapsed time since the system booted,
1847  * in milliseconds.
1848  *
1849  * @note
1850  *    While this function returns time in milliseconds, it does
1851  *    not mean it has millisecond resolution. The actual resolution depends on
1852  *    @kconfig{CONFIG_SYS_CLOCK_TICKS_PER_SEC} config option.
1853  *
1854  * @return Current uptime in milliseconds.
1855  */
k_uptime_get(void)1856 static inline int64_t k_uptime_get(void)
1857 {
1858 	return k_ticks_to_ms_floor64(k_uptime_ticks());
1859 }
1860 
1861 /**
1862  * @brief Get system uptime (32-bit version).
1863  *
1864  * This routine returns the lower 32 bits of the system uptime in
1865  * milliseconds.
1866  *
1867  * Because correct conversion requires full precision of the system
1868  * clock there is no benefit to using this over k_uptime_get() unless
1869  * you know the application will never run long enough for the system
1870  * clock to approach 2^32 ticks.  Calls to this function may involve
1871  * interrupt blocking and 64-bit math.
1872  *
1873  * @note
1874  *    While this function returns time in milliseconds, it does
1875  *    not mean it has millisecond resolution. The actual resolution depends on
1876  *    @kconfig{CONFIG_SYS_CLOCK_TICKS_PER_SEC} config option
1877  *
1878  * @return The low 32 bits of the current uptime, in milliseconds.
1879  */
k_uptime_get_32(void)1880 static inline uint32_t k_uptime_get_32(void)
1881 {
1882 	return (uint32_t)k_uptime_get();
1883 }
1884 
1885 /**
1886  * @brief Get system uptime in seconds.
1887  *
1888  * This routine returns the elapsed time since the system booted,
1889  * in seconds.
1890  *
1891  * @return Current uptime in seconds.
1892  */
k_uptime_seconds(void)1893 static inline uint32_t k_uptime_seconds(void)
1894 {
1895 	return k_ticks_to_sec_floor32(k_uptime_ticks());
1896 }
1897 
1898 /**
1899  * @brief Get elapsed time.
1900  *
1901  * This routine computes the elapsed time between the current system uptime
1902  * and an earlier reference time, in milliseconds.
1903  *
1904  * @param reftime Pointer to a reference time, which is updated to the current
1905  *                uptime upon return.
1906  *
1907  * @return Elapsed time.
1908  */
k_uptime_delta(int64_t * reftime)1909 static inline int64_t k_uptime_delta(int64_t *reftime)
1910 {
1911 	int64_t uptime, delta;
1912 
1913 	uptime = k_uptime_get();
1914 	delta = uptime - *reftime;
1915 	*reftime = uptime;
1916 
1917 	return delta;
1918 }
1919 
1920 /**
1921  * @brief Read the hardware clock.
1922  *
1923  * This routine returns the current time, as measured by the system's hardware
1924  * clock.
1925  *
1926  * @return Current hardware clock up-counter (in cycles).
1927  */
k_cycle_get_32(void)1928 static inline uint32_t k_cycle_get_32(void)
1929 {
1930 	return arch_k_cycle_get_32();
1931 }
1932 
1933 /**
1934  * @brief Read the 64-bit hardware clock.
1935  *
1936  * This routine returns the current time in 64-bits, as measured by the
1937  * system's hardware clock, if available.
1938  *
1939  * @see CONFIG_TIMER_HAS_64BIT_CYCLE_COUNTER
1940  *
1941  * @return Current hardware clock up-counter (in cycles).
1942  */
k_cycle_get_64(void)1943 static inline uint64_t k_cycle_get_64(void)
1944 {
1945 	if (!IS_ENABLED(CONFIG_TIMER_HAS_64BIT_CYCLE_COUNTER)) {
1946 		__ASSERT(0, "64-bit cycle counter not enabled on this platform. "
1947 			    "See CONFIG_TIMER_HAS_64BIT_CYCLE_COUNTER");
1948 		return 0;
1949 	}
1950 
1951 	return arch_k_cycle_get_64();
1952 }
1953 
1954 /**
1955  * @}
1956  */
1957 
1958 struct k_queue {
1959 	sys_sflist_t data_q;
1960 	struct k_spinlock lock;
1961 	_wait_q_t wait_q;
1962 
1963 	Z_DECL_POLL_EVENT
1964 
1965 	SYS_PORT_TRACING_TRACKING_FIELD(k_queue)
1966 };
1967 
1968 /**
1969  * @cond INTERNAL_HIDDEN
1970  */
1971 
1972 #define Z_QUEUE_INITIALIZER(obj) \
1973 	{ \
1974 	.data_q = SYS_SFLIST_STATIC_INIT(&obj.data_q), \
1975 	.lock = { }, \
1976 	.wait_q = Z_WAIT_Q_INIT(&obj.wait_q),	\
1977 	Z_POLL_EVENT_OBJ_INIT(obj)		\
1978 	}
1979 
1980 /**
1981  * INTERNAL_HIDDEN @endcond
1982  */
1983 
1984 /**
1985  * @defgroup queue_apis Queue APIs
1986  * @ingroup kernel_apis
1987  * @{
1988  */
1989 
1990 /**
1991  * @brief Initialize a queue.
1992  *
1993  * This routine initializes a queue object, prior to its first use.
1994  *
1995  * @param queue Address of the queue.
1996  */
1997 __syscall void k_queue_init(struct k_queue *queue);
1998 
1999 /**
2000  * @brief Cancel waiting on a queue.
2001  *
2002  * This routine causes first thread pending on @a queue, if any, to
2003  * return from k_queue_get() call with NULL value (as if timeout expired).
2004  * If the queue is being waited on by k_poll(), it will return with
2005  * -EINTR and K_POLL_STATE_CANCELLED state (and per above, subsequent
2006  * k_queue_get() will return NULL).
2007  *
2008  * @funcprops \isr_ok
2009  *
2010  * @param queue Address of the queue.
2011  */
2012 __syscall void k_queue_cancel_wait(struct k_queue *queue);
2013 
2014 /**
2015  * @brief Append an element to the end of a queue.
2016  *
2017  * This routine appends a data item to @a queue. A queue data item must be
2018  * aligned on a word boundary, and the first word of the item is reserved
2019  * for the kernel's use.
2020  *
2021  * @funcprops \isr_ok
2022  *
2023  * @param queue Address of the queue.
2024  * @param data Address of the data item.
2025  */
2026 void k_queue_append(struct k_queue *queue, void *data);
2027 
2028 /**
2029  * @brief Append an element to a queue.
2030  *
2031  * This routine appends a data item to @a queue. There is an implicit memory
2032  * allocation to create an additional temporary bookkeeping data structure from
2033  * the calling thread's resource pool, which is automatically freed when the
2034  * item is removed. The data itself is not copied.
2035  *
2036  * @funcprops \isr_ok
2037  *
2038  * @param queue Address of the queue.
2039  * @param data Address of the data item.
2040  *
2041  * @retval 0 on success
2042  * @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
2043  */
2044 __syscall int32_t k_queue_alloc_append(struct k_queue *queue, void *data);
2045 
2046 /**
2047  * @brief Prepend an element to a queue.
2048  *
2049  * This routine prepends a data item to @a queue. A queue data item must be
2050  * aligned on a word boundary, and the first word of the item is reserved
2051  * for the kernel's use.
2052  *
2053  * @funcprops \isr_ok
2054  *
2055  * @param queue Address of the queue.
2056  * @param data Address of the data item.
2057  */
2058 void k_queue_prepend(struct k_queue *queue, void *data);
2059 
2060 /**
2061  * @brief Prepend an element to a queue.
2062  *
2063  * This routine prepends a data item to @a queue. There is an implicit memory
2064  * allocation to create an additional temporary bookkeeping data structure from
2065  * the calling thread's resource pool, which is automatically freed when the
2066  * item is removed. The data itself is not copied.
2067  *
2068  * @funcprops \isr_ok
2069  *
2070  * @param queue Address of the queue.
2071  * @param data Address of the data item.
2072  *
2073  * @retval 0 on success
2074  * @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
2075  */
2076 __syscall int32_t k_queue_alloc_prepend(struct k_queue *queue, void *data);
2077 
2078 /**
2079  * @brief Inserts an element to a queue.
2080  *
2081  * This routine inserts a data item to @a queue after previous item. A queue
2082  * data item must be aligned on a word boundary, and the first word of
2083  * the item is reserved for the kernel's use.
2084  *
2085  * @funcprops \isr_ok
2086  *
2087  * @param queue Address of the queue.
2088  * @param prev Address of the previous data item.
2089  * @param data Address of the data item.
2090  */
2091 void k_queue_insert(struct k_queue *queue, void *prev, void *data);
2092 
2093 /**
2094  * @brief Atomically append a list of elements to a queue.
2095  *
2096  * This routine adds a list of data items to @a queue in one operation.
2097  * The data items must be in a singly-linked list, with the first word
2098  * in each data item pointing to the next data item; the list must be
2099  * NULL-terminated.
2100  *
2101  * @funcprops \isr_ok
2102  *
2103  * @param queue Address of the queue.
2104  * @param head Pointer to first node in singly-linked list.
2105  * @param tail Pointer to last node in singly-linked list.
2106  *
2107  * @retval 0 on success
2108  * @retval -EINVAL on invalid supplied data
2109  *
2110  */
2111 int k_queue_append_list(struct k_queue *queue, void *head, void *tail);
2112 
2113 /**
2114  * @brief Atomically add a list of elements to a queue.
2115  *
2116  * This routine adds a list of data items to @a queue in one operation.
2117  * The data items must be in a singly-linked list implemented using a
2118  * sys_slist_t object. Upon completion, the original list is empty.
2119  *
2120  * @funcprops \isr_ok
2121  *
2122  * @param queue Address of the queue.
2123  * @param list Pointer to sys_slist_t object.
2124  *
2125  * @retval 0 on success
2126  * @retval -EINVAL on invalid data
2127  */
2128 int k_queue_merge_slist(struct k_queue *queue, sys_slist_t *list);
2129 
2130 /**
2131  * @brief Get an element from a queue.
2132  *
2133  * This routine removes first data item from @a queue. The first word of the
2134  * data item is reserved for the kernel's use.
2135  *
2136  * @note @a timeout must be set to K_NO_WAIT if called from ISR.
2137  *
2138  * @funcprops \isr_ok
2139  *
2140  * @param queue Address of the queue.
2141  * @param timeout Waiting period to obtain a data item, or one of the special
2142  *                values K_NO_WAIT and K_FOREVER.
2143  *
2144  * @return Address of the data item if successful; NULL if returned
2145  * without waiting, or waiting period timed out.
2146  */
2147 __syscall void *k_queue_get(struct k_queue *queue, k_timeout_t timeout);
2148 
2149 /**
2150  * @brief Remove an element from a queue.
2151  *
2152  * This routine removes data item from @a queue. The first word of the
2153  * data item is reserved for the kernel's use. Removing elements from k_queue
2154  * rely on sys_slist_find_and_remove which is not a constant time operation.
2155  *
2156  * @note @a timeout must be set to K_NO_WAIT if called from ISR.
2157  *
2158  * @funcprops \isr_ok
2159  *
2160  * @param queue Address of the queue.
2161  * @param data Address of the data item.
2162  *
2163  * @return true if data item was removed
2164  */
2165 bool k_queue_remove(struct k_queue *queue, void *data);
2166 
2167 /**
2168  * @brief Append an element to a queue only if it's not present already.
2169  *
2170  * This routine appends data item to @a queue. The first word of the data
2171  * item is reserved for the kernel's use. Appending elements to k_queue
2172  * relies on sys_slist_is_node_in_list which is not a constant time operation.
2173  *
2174  * @funcprops \isr_ok
2175  *
2176  * @param queue Address of the queue.
2177  * @param data Address of the data item.
2178  *
2179  * @return true if data item was added, false if not
2180  */
2181 bool k_queue_unique_append(struct k_queue *queue, void *data);
2182 
2183 /**
2184  * @brief Query a queue to see if it has data available.
2185  *
2186  * Note that the data might be already gone by the time this function returns
2187  * if other threads are also trying to read from the queue.
2188  *
2189  * @funcprops \isr_ok
2190  *
2191  * @param queue Address of the queue.
2192  *
2193  * @return Non-zero if the queue is empty.
2194  * @return 0 if data is available.
2195  */
2196 __syscall int k_queue_is_empty(struct k_queue *queue);
2197 
z_impl_k_queue_is_empty(struct k_queue * queue)2198 static inline int z_impl_k_queue_is_empty(struct k_queue *queue)
2199 {
2200 	return sys_sflist_is_empty(&queue->data_q) ? 1 : 0;
2201 }
2202 
2203 /**
2204  * @brief Peek element at the head of queue.
2205  *
2206  * Return element from the head of queue without removing it.
2207  *
2208  * @param queue Address of the queue.
2209  *
2210  * @return Head element, or NULL if queue is empty.
2211  */
2212 __syscall void *k_queue_peek_head(struct k_queue *queue);
2213 
2214 /**
2215  * @brief Peek element at the tail of queue.
2216  *
2217  * Return element from the tail of queue without removing it.
2218  *
2219  * @param queue Address of the queue.
2220  *
2221  * @return Tail element, or NULL if queue is empty.
2222  */
2223 __syscall void *k_queue_peek_tail(struct k_queue *queue);
2224 
2225 /**
2226  * @brief Statically define and initialize a queue.
2227  *
2228  * The queue can be accessed outside the module where it is defined using:
2229  *
2230  * @code extern struct k_queue <name>; @endcode
2231  *
2232  * @param name Name of the queue.
2233  */
2234 #define K_QUEUE_DEFINE(name) \
2235 	STRUCT_SECTION_ITERABLE(k_queue, name) = \
2236 		Z_QUEUE_INITIALIZER(name)
2237 
2238 /** @} */
2239 
2240 #ifdef CONFIG_USERSPACE
2241 /**
2242  * @brief futex structure
2243  *
2244  * A k_futex is a lightweight mutual exclusion primitive designed
2245  * to minimize kernel involvement. Uncontended operation relies
2246  * only on atomic access to shared memory. k_futex are tracked as
2247  * kernel objects and can live in user memory so that any access
2248  * bypasses the kernel object permission management mechanism.
2249  */
2250 struct k_futex {
2251 	atomic_t val;
2252 };
2253 
2254 /**
2255  * @brief futex kernel data structure
2256  *
2257  * z_futex_data are the helper data structure for k_futex to complete
2258  * futex contended operation on kernel side, structure z_futex_data
2259  * of every futex object is invisible in user mode.
2260  */
2261 struct z_futex_data {
2262 	_wait_q_t wait_q;
2263 	struct k_spinlock lock;
2264 };
2265 
2266 #define Z_FUTEX_DATA_INITIALIZER(obj) \
2267 	{ \
2268 	.wait_q = Z_WAIT_Q_INIT(&obj.wait_q) \
2269 	}
2270 
2271 /**
2272  * @defgroup futex_apis FUTEX APIs
2273  * @ingroup kernel_apis
2274  * @{
2275  */
2276 
2277 /**
2278  * @brief Pend the current thread on a futex
2279  *
2280  * Tests that the supplied futex contains the expected value, and if so,
2281  * goes to sleep until some other thread calls k_futex_wake() on it.
2282  *
2283  * @param futex Address of the futex.
2284  * @param expected Expected value of the futex, if it is different the caller
2285  *		   will not wait on it.
2286  * @param timeout Waiting period on the futex, or one of the special values
2287  *                K_NO_WAIT or K_FOREVER.
2288  * @retval -EACCES Caller does not have read access to futex address.
2289  * @retval -EAGAIN If the futex value did not match the expected parameter.
2290  * @retval -EINVAL Futex parameter address not recognized by the kernel.
2291  * @retval -ETIMEDOUT Thread woke up due to timeout and not a futex wakeup.
2292  * @retval 0 if the caller went to sleep and was woken up. The caller
2293  *	     should check the futex's value on wakeup to determine if it needs
2294  *	     to block again.
2295  */
2296 __syscall int k_futex_wait(struct k_futex *futex, int expected,
2297 			   k_timeout_t timeout);
2298 
2299 /**
2300  * @brief Wake one/all threads pending on a futex
2301  *
2302  * Wake up the highest priority thread pending on the supplied futex, or
2303  * wakeup all the threads pending on the supplied futex, and the behavior
2304  * depends on wake_all.
2305  *
2306  * @param futex Futex to wake up pending threads.
2307  * @param wake_all If true, wake up all pending threads; If false,
2308  *                 wakeup the highest priority thread.
2309  * @retval -EACCES Caller does not have access to the futex address.
2310  * @retval -EINVAL Futex parameter address not recognized by the kernel.
2311  * @retval Number of threads that were woken up.
2312  */
2313 __syscall int k_futex_wake(struct k_futex *futex, bool wake_all);
2314 
2315 /** @} */
2316 #endif
2317 
2318 /**
2319  * @defgroup event_apis Event APIs
2320  * @ingroup kernel_apis
2321  * @{
2322  */
2323 
2324 /**
2325  * Event Structure
2326  * @ingroup event_apis
2327  */
2328 
2329 struct k_event {
2330 	_wait_q_t         wait_q;
2331 	uint32_t          events;
2332 	struct k_spinlock lock;
2333 
2334 	SYS_PORT_TRACING_TRACKING_FIELD(k_event)
2335 
2336 #ifdef CONFIG_OBJ_CORE_EVENT
2337 	struct k_obj_core obj_core;
2338 #endif
2339 
2340 };
2341 
2342 #define Z_EVENT_INITIALIZER(obj) \
2343 	{ \
2344 	.wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
2345 	.events = 0 \
2346 	}
2347 
2348 /**
2349  * @brief Initialize an event object
2350  *
2351  * This routine initializes an event object, prior to its first use.
2352  *
2353  * @param event Address of the event object.
2354  */
2355 __syscall void k_event_init(struct k_event *event);
2356 
2357 /**
2358  * @brief Post one or more events to an event object
2359  *
2360  * This routine posts one or more events to an event object. All tasks waiting
2361  * on the event object @a event whose waiting conditions become met by this
2362  * posting immediately unpend.
2363  *
2364  * Posting differs from setting in that posted events are merged together with
2365  * the current set of events tracked by the event object.
2366  *
2367  * @param event Address of the event object
2368  * @param events Set of events to post to @a event
2369  *
2370  * @retval Previous value of the events in @a event
2371  */
2372 __syscall uint32_t k_event_post(struct k_event *event, uint32_t events);
2373 
2374 /**
2375  * @brief Set the events in an event object
2376  *
2377  * This routine sets the events stored in event object to the specified value.
2378  * All tasks waiting on the event object @a event whose waiting conditions
2379  * become met by this immediately unpend.
2380  *
2381  * Setting differs from posting in that set events replace the current set of
2382  * events tracked by the event object.
2383  *
2384  * @param event Address of the event object
2385  * @param events Set of events to set in @a event
2386  *
2387  * @retval Previous value of the events in @a event
2388  */
2389 __syscall uint32_t k_event_set(struct k_event *event, uint32_t events);
2390 
2391 /**
2392  * @brief Set or clear the events in an event object
2393  *
2394  * This routine sets the events stored in event object to the specified value.
2395  * All tasks waiting on the event object @a event whose waiting conditions
2396  * become met by this immediately unpend. Unlike @ref k_event_set, this routine
2397  * allows specific event bits to be set and cleared as determined by the mask.
2398  *
2399  * @param event Address of the event object
2400  * @param events Set of events to set/clear in @a event
2401  * @param events_mask Mask to be applied to @a events
2402  *
2403  * @retval Previous value of the events in @a events_mask
2404  */
2405 __syscall uint32_t k_event_set_masked(struct k_event *event, uint32_t events,
2406 				  uint32_t events_mask);
2407 
2408 /**
2409  * @brief Clear the events in an event object
2410  *
2411  * This routine clears (resets) the specified events stored in an event object.
2412  *
2413  * @param event Address of the event object
2414  * @param events Set of events to clear in @a event
2415  *
2416  * @retval Previous value of the events in @a event
2417  */
2418 __syscall uint32_t k_event_clear(struct k_event *event, uint32_t events);
2419 
2420 /**
2421  * @brief Wait for any of the specified events
2422  *
2423  * This routine waits on event object @a event until any of the specified
2424  * events have been delivered to the event object, or the maximum wait time
2425  * @a timeout has expired. A thread may wait on up to 32 distinctly numbered
2426  * events that are expressed as bits in a single 32-bit word.
2427  *
2428  * @note The caller must be careful when resetting if there are multiple threads
2429  * waiting for the event object @a event.
2430  *
2431  * @param event Address of the event object
2432  * @param events Set of desired events on which to wait
2433  * @param reset If true, clear the set of events tracked by the event object
2434  *              before waiting. If false, do not clear the events.
2435  * @param timeout Waiting period for the desired set of events or one of the
2436  *                special values K_NO_WAIT and K_FOREVER.
2437  *
2438  * @retval set of matching events upon success
2439  * @retval 0 if matching events were not received within the specified time
2440  */
2441 __syscall uint32_t k_event_wait(struct k_event *event, uint32_t events,
2442 				bool reset, k_timeout_t timeout);
2443 
2444 /**
2445  * @brief Wait for all of the specified events
2446  *
2447  * This routine waits on event object @a event until all of the specified
2448  * events have been delivered to the event object, or the maximum wait time
2449  * @a timeout has expired. A thread may wait on up to 32 distinctly numbered
2450  * events that are expressed as bits in a single 32-bit word.
2451  *
2452  * @note The caller must be careful when resetting if there are multiple threads
2453  * waiting for the event object @a event.
2454  *
2455  * @param event Address of the event object
2456  * @param events Set of desired events on which to wait
2457  * @param reset If true, clear the set of events tracked by the event object
2458  *              before waiting. If false, do not clear the events.
2459  * @param timeout Waiting period for the desired set of events or one of the
2460  *                special values K_NO_WAIT and K_FOREVER.
2461  *
2462  * @retval set of matching events upon success
2463  * @retval 0 if matching events were not received within the specified time
2464  */
2465 __syscall uint32_t k_event_wait_all(struct k_event *event, uint32_t events,
2466 				    bool reset, k_timeout_t timeout);
2467 
2468 /**
2469  * @brief Test the events currently tracked in the event object
2470  *
2471  * @param event Address of the event object
2472  * @param events_mask Set of desired events to test
2473  *
2474  * @retval Current value of events in @a events_mask
2475  */
k_event_test(struct k_event * event,uint32_t events_mask)2476 static inline uint32_t k_event_test(struct k_event *event, uint32_t events_mask)
2477 {
2478 	return k_event_wait(event, events_mask, false, K_NO_WAIT);
2479 }
2480 
2481 /**
2482  * @brief Statically define and initialize an event object
2483  *
2484  * The event can be accessed outside the module where it is defined using:
2485  *
2486  * @code extern struct k_event <name>; @endcode
2487  *
2488  * @param name Name of the event object.
2489  */
2490 #define K_EVENT_DEFINE(name)                                   \
2491 	STRUCT_SECTION_ITERABLE(k_event, name) =               \
2492 		Z_EVENT_INITIALIZER(name);
2493 
2494 /** @} */
2495 
2496 struct k_fifo {
2497 	struct k_queue _queue;
2498 #ifdef CONFIG_OBJ_CORE_FIFO
2499 	struct k_obj_core  obj_core;
2500 #endif
2501 };
2502 
2503 /**
2504  * @cond INTERNAL_HIDDEN
2505  */
2506 #define Z_FIFO_INITIALIZER(obj) \
2507 	{ \
2508 	._queue = Z_QUEUE_INITIALIZER(obj._queue) \
2509 	}
2510 
2511 /**
2512  * INTERNAL_HIDDEN @endcond
2513  */
2514 
2515 /**
2516  * @defgroup fifo_apis FIFO APIs
2517  * @ingroup kernel_apis
2518  * @{
2519  */
2520 
2521 /**
2522  * @brief Initialize a FIFO queue.
2523  *
2524  * This routine initializes a FIFO queue, prior to its first use.
2525  *
2526  * @param fifo Address of the FIFO queue.
2527  */
2528 #define k_fifo_init(fifo)                                    \
2529 	({                                                   \
2530 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, init, fifo); \
2531 	k_queue_init(&(fifo)->_queue);                       \
2532 	K_OBJ_CORE_INIT(K_OBJ_CORE(fifo), _obj_type_fifo);   \
2533 	K_OBJ_CORE_LINK(K_OBJ_CORE(fifo));                   \
2534 	SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, init, fifo);  \
2535 	})
2536 
2537 /**
2538  * @brief Cancel waiting on a FIFO queue.
2539  *
2540  * This routine causes first thread pending on @a fifo, if any, to
2541  * return from k_fifo_get() call with NULL value (as if timeout
2542  * expired).
2543  *
2544  * @funcprops \isr_ok
2545  *
2546  * @param fifo Address of the FIFO queue.
2547  */
2548 #define k_fifo_cancel_wait(fifo) \
2549 	({ \
2550 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, cancel_wait, fifo); \
2551 	k_queue_cancel_wait(&(fifo)->_queue); \
2552 	SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, cancel_wait, fifo); \
2553 	})
2554 
2555 /**
2556  * @brief Add an element to a FIFO queue.
2557  *
2558  * This routine adds a data item to @a fifo. A FIFO data item must be
2559  * aligned on a word boundary, and the first word of the item is reserved
2560  * for the kernel's use.
2561  *
2562  * @funcprops \isr_ok
2563  *
2564  * @param fifo Address of the FIFO.
2565  * @param data Address of the data item.
2566  */
2567 #define k_fifo_put(fifo, data) \
2568 	({ \
2569 	void *_data = data; \
2570 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, put, fifo, _data); \
2571 	k_queue_append(&(fifo)->_queue, _data); \
2572 	SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, put, fifo, _data); \
2573 	})
2574 
2575 /**
2576  * @brief Add an element to a FIFO queue.
2577  *
2578  * This routine adds a data item to @a fifo. There is an implicit memory
2579  * allocation to create an additional temporary bookkeeping data structure from
2580  * the calling thread's resource pool, which is automatically freed when the
2581  * item is removed. The data itself is not copied.
2582  *
2583  * @funcprops \isr_ok
2584  *
2585  * @param fifo Address of the FIFO.
2586  * @param data Address of the data item.
2587  *
2588  * @retval 0 on success
2589  * @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
2590  */
2591 #define k_fifo_alloc_put(fifo, data) \
2592 	({ \
2593 	void *_data = data; \
2594 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, alloc_put, fifo, _data); \
2595 	int fap_ret = k_queue_alloc_append(&(fifo)->_queue, _data); \
2596 	SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, alloc_put, fifo, _data, fap_ret); \
2597 	fap_ret; \
2598 	})
2599 
2600 /**
2601  * @brief Atomically add a list of elements to a FIFO.
2602  *
2603  * This routine adds a list of data items to @a fifo in one operation.
2604  * The data items must be in a singly-linked list, with the first word of
2605  * each data item pointing to the next data item; the list must be
2606  * NULL-terminated.
2607  *
2608  * @funcprops \isr_ok
2609  *
2610  * @param fifo Address of the FIFO queue.
2611  * @param head Pointer to first node in singly-linked list.
2612  * @param tail Pointer to last node in singly-linked list.
2613  */
2614 #define k_fifo_put_list(fifo, head, tail) \
2615 	({ \
2616 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, put_list, fifo, head, tail); \
2617 	k_queue_append_list(&(fifo)->_queue, head, tail); \
2618 	SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, put_list, fifo, head, tail); \
2619 	})
2620 
2621 /**
2622  * @brief Atomically add a list of elements to a FIFO queue.
2623  *
2624  * This routine adds a list of data items to @a fifo in one operation.
2625  * The data items must be in a singly-linked list implemented using a
2626  * sys_slist_t object. Upon completion, the sys_slist_t object is invalid
2627  * and must be re-initialized via sys_slist_init().
2628  *
2629  * @funcprops \isr_ok
2630  *
2631  * @param fifo Address of the FIFO queue.
2632  * @param list Pointer to sys_slist_t object.
2633  */
2634 #define k_fifo_put_slist(fifo, list) \
2635 	({ \
2636 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, put_slist, fifo, list); \
2637 	k_queue_merge_slist(&(fifo)->_queue, list); \
2638 	SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, put_slist, fifo, list); \
2639 	})
2640 
2641 /**
2642  * @brief Get an element from a FIFO queue.
2643  *
2644  * This routine removes a data item from @a fifo in a "first in, first out"
2645  * manner. The first word of the data item is reserved for the kernel's use.
2646  *
2647  * @note @a timeout must be set to K_NO_WAIT if called from ISR.
2648  *
2649  * @funcprops \isr_ok
2650  *
2651  * @param fifo Address of the FIFO queue.
2652  * @param timeout Waiting period to obtain a data item,
2653  *                or one of the special values K_NO_WAIT and K_FOREVER.
2654  *
2655  * @return Address of the data item if successful; NULL if returned
2656  * without waiting, or waiting period timed out.
2657  */
2658 #define k_fifo_get(fifo, timeout) \
2659 	({ \
2660 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, get, fifo, timeout); \
2661 	void *fg_ret = k_queue_get(&(fifo)->_queue, timeout); \
2662 	SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, get, fifo, timeout, fg_ret); \
2663 	fg_ret; \
2664 	})
2665 
2666 /**
2667  * @brief Query a FIFO queue to see if it has data available.
2668  *
2669  * Note that the data might be already gone by the time this function returns
2670  * if other threads is also trying to read from the FIFO.
2671  *
2672  * @funcprops \isr_ok
2673  *
2674  * @param fifo Address of the FIFO queue.
2675  *
2676  * @return Non-zero if the FIFO queue is empty.
2677  * @return 0 if data is available.
2678  */
2679 #define k_fifo_is_empty(fifo) \
2680 	k_queue_is_empty(&(fifo)->_queue)
2681 
2682 /**
2683  * @brief Peek element at the head of a FIFO queue.
2684  *
2685  * Return element from the head of FIFO queue without removing it. A usecase
2686  * for this is if elements of the FIFO object are themselves containers. Then
2687  * on each iteration of processing, a head container will be peeked,
2688  * and some data processed out of it, and only if the container is empty,
2689  * it will be completely remove from the FIFO queue.
2690  *
2691  * @param fifo Address of the FIFO queue.
2692  *
2693  * @return Head element, or NULL if the FIFO queue is empty.
2694  */
2695 #define k_fifo_peek_head(fifo) \
2696 	({ \
2697 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, peek_head, fifo); \
2698 	void *fph_ret = k_queue_peek_head(&(fifo)->_queue); \
2699 	SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, peek_head, fifo, fph_ret); \
2700 	fph_ret; \
2701 	})
2702 
2703 /**
2704  * @brief Peek element at the tail of FIFO queue.
2705  *
2706  * Return element from the tail of FIFO queue (without removing it). A usecase
2707  * for this is if elements of the FIFO queue are themselves containers. Then
2708  * it may be useful to add more data to the last container in a FIFO queue.
2709  *
2710  * @param fifo Address of the FIFO queue.
2711  *
2712  * @return Tail element, or NULL if a FIFO queue is empty.
2713  */
2714 #define k_fifo_peek_tail(fifo) \
2715 	({ \
2716 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, peek_tail, fifo); \
2717 	void *fpt_ret = k_queue_peek_tail(&(fifo)->_queue); \
2718 	SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, peek_tail, fifo, fpt_ret); \
2719 	fpt_ret; \
2720 	})
2721 
2722 /**
2723  * @brief Statically define and initialize a FIFO queue.
2724  *
2725  * The FIFO queue can be accessed outside the module where it is defined using:
2726  *
2727  * @code extern struct k_fifo <name>; @endcode
2728  *
2729  * @param name Name of the FIFO queue.
2730  */
2731 #define K_FIFO_DEFINE(name) \
2732 	STRUCT_SECTION_ITERABLE(k_fifo, name) = \
2733 		Z_FIFO_INITIALIZER(name)
2734 
2735 /** @} */
2736 
2737 struct k_lifo {
2738 	struct k_queue _queue;
2739 #ifdef CONFIG_OBJ_CORE_LIFO
2740 	struct k_obj_core  obj_core;
2741 #endif
2742 };
2743 
2744 /**
2745  * @cond INTERNAL_HIDDEN
2746  */
2747 
2748 #define Z_LIFO_INITIALIZER(obj) \
2749 	{ \
2750 	._queue = Z_QUEUE_INITIALIZER(obj._queue) \
2751 	}
2752 
2753 /**
2754  * INTERNAL_HIDDEN @endcond
2755  */
2756 
2757 /**
2758  * @defgroup lifo_apis LIFO APIs
2759  * @ingroup kernel_apis
2760  * @{
2761  */
2762 
2763 /**
2764  * @brief Initialize a LIFO queue.
2765  *
2766  * This routine initializes a LIFO queue object, prior to its first use.
2767  *
2768  * @param lifo Address of the LIFO queue.
2769  */
2770 #define k_lifo_init(lifo)                                    \
2771 	({                                                   \
2772 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, init, lifo); \
2773 	k_queue_init(&(lifo)->_queue);                       \
2774 	K_OBJ_CORE_INIT(K_OBJ_CORE(lifo), _obj_type_lifo);   \
2775 	K_OBJ_CORE_LINK(K_OBJ_CORE(lifo));                   \
2776 	SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, init, lifo);  \
2777 	})
2778 
2779 /**
2780  * @brief Add an element to a LIFO queue.
2781  *
2782  * This routine adds a data item to @a lifo. A LIFO queue data item must be
2783  * aligned on a word boundary, and the first word of the item is
2784  * reserved for the kernel's use.
2785  *
2786  * @funcprops \isr_ok
2787  *
2788  * @param lifo Address of the LIFO queue.
2789  * @param data Address of the data item.
2790  */
2791 #define k_lifo_put(lifo, data) \
2792 	({ \
2793 	void *_data = data; \
2794 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, put, lifo, _data); \
2795 	k_queue_prepend(&(lifo)->_queue, _data); \
2796 	SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, put, lifo, _data); \
2797 	})
2798 
2799 /**
2800  * @brief Add an element to a LIFO queue.
2801  *
2802  * This routine adds a data item to @a lifo. There is an implicit memory
2803  * allocation to create an additional temporary bookkeeping data structure from
2804  * the calling thread's resource pool, which is automatically freed when the
2805  * item is removed. The data itself is not copied.
2806  *
2807  * @funcprops \isr_ok
2808  *
2809  * @param lifo Address of the LIFO.
2810  * @param data Address of the data item.
2811  *
2812  * @retval 0 on success
2813  * @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
2814  */
2815 #define k_lifo_alloc_put(lifo, data) \
2816 	({ \
2817 	void *_data = data; \
2818 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, alloc_put, lifo, _data); \
2819 	int lap_ret = k_queue_alloc_prepend(&(lifo)->_queue, _data); \
2820 	SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, alloc_put, lifo, _data, lap_ret); \
2821 	lap_ret; \
2822 	})
2823 
2824 /**
2825  * @brief Get an element from a LIFO queue.
2826  *
2827  * This routine removes a data item from @a LIFO in a "last in, first out"
2828  * manner. The first word of the data item is reserved for the kernel's use.
2829  *
2830  * @note @a timeout must be set to K_NO_WAIT if called from ISR.
2831  *
2832  * @funcprops \isr_ok
2833  *
2834  * @param lifo Address of the LIFO queue.
2835  * @param timeout Waiting period to obtain a data item,
2836  *                or one of the special values K_NO_WAIT and K_FOREVER.
2837  *
2838  * @return Address of the data item if successful; NULL if returned
2839  * without waiting, or waiting period timed out.
2840  */
2841 #define k_lifo_get(lifo, timeout) \
2842 	({ \
2843 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, get, lifo, timeout); \
2844 	void *lg_ret = k_queue_get(&(lifo)->_queue, timeout); \
2845 	SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, get, lifo, timeout, lg_ret); \
2846 	lg_ret; \
2847 	})
2848 
2849 /**
2850  * @brief Statically define and initialize a LIFO queue.
2851  *
2852  * The LIFO queue can be accessed outside the module where it is defined using:
2853  *
2854  * @code extern struct k_lifo <name>; @endcode
2855  *
2856  * @param name Name of the fifo.
2857  */
2858 #define K_LIFO_DEFINE(name) \
2859 	STRUCT_SECTION_ITERABLE(k_lifo, name) = \
2860 		Z_LIFO_INITIALIZER(name)
2861 
2862 /** @} */
2863 
2864 /**
2865  * @cond INTERNAL_HIDDEN
2866  */
2867 #define K_STACK_FLAG_ALLOC	((uint8_t)1)	/* Buffer was allocated */
2868 
2869 typedef uintptr_t stack_data_t;
2870 
2871 struct k_stack {
2872 	_wait_q_t wait_q;
2873 	struct k_spinlock lock;
2874 	stack_data_t *base, *next, *top;
2875 
2876 	uint8_t flags;
2877 
2878 	SYS_PORT_TRACING_TRACKING_FIELD(k_stack)
2879 
2880 #ifdef CONFIG_OBJ_CORE_STACK
2881 	struct k_obj_core  obj_core;
2882 #endif
2883 };
2884 
2885 #define Z_STACK_INITIALIZER(obj, stack_buffer, stack_num_entries) \
2886 	{ \
2887 	.wait_q = Z_WAIT_Q_INIT(&(obj).wait_q),	\
2888 	.base = (stack_buffer), \
2889 	.next = (stack_buffer), \
2890 	.top = (stack_buffer) + (stack_num_entries), \
2891 	}
2892 
2893 /**
2894  * INTERNAL_HIDDEN @endcond
2895  */
2896 
2897 /**
2898  * @defgroup stack_apis Stack APIs
2899  * @ingroup kernel_apis
2900  * @{
2901  */
2902 
2903 /**
2904  * @brief Initialize a stack.
2905  *
2906  * This routine initializes a stack object, prior to its first use.
2907  *
2908  * @param stack Address of the stack.
2909  * @param buffer Address of array used to hold stacked values.
2910  * @param num_entries Maximum number of values that can be stacked.
2911  */
2912 void k_stack_init(struct k_stack *stack,
2913 		  stack_data_t *buffer, uint32_t num_entries);
2914 
2915 
2916 /**
2917  * @brief Initialize a stack.
2918  *
2919  * This routine initializes a stack object, prior to its first use. Internal
2920  * buffers will be allocated from the calling thread's resource pool.
2921  * This memory will be released if k_stack_cleanup() is called, or
2922  * userspace is enabled and the stack object loses all references to it.
2923  *
2924  * @param stack Address of the stack.
2925  * @param num_entries Maximum number of values that can be stacked.
2926  *
2927  * @return -ENOMEM if memory couldn't be allocated
2928  */
2929 
2930 __syscall int32_t k_stack_alloc_init(struct k_stack *stack,
2931 				   uint32_t num_entries);
2932 
2933 /**
2934  * @brief Release a stack's allocated buffer
2935  *
2936  * If a stack object was given a dynamically allocated buffer via
2937  * k_stack_alloc_init(), this will free it. This function does nothing
2938  * if the buffer wasn't dynamically allocated.
2939  *
2940  * @param stack Address of the stack.
2941  * @retval 0 on success
2942  * @retval -EAGAIN when object is still in use
2943  */
2944 int k_stack_cleanup(struct k_stack *stack);
2945 
2946 /**
2947  * @brief Push an element onto a stack.
2948  *
2949  * This routine adds a stack_data_t value @a data to @a stack.
2950  *
2951  * @funcprops \isr_ok
2952  *
2953  * @param stack Address of the stack.
2954  * @param data Value to push onto the stack.
2955  *
2956  * @retval 0 on success
2957  * @retval -ENOMEM if stack is full
2958  */
2959 __syscall int k_stack_push(struct k_stack *stack, stack_data_t data);
2960 
2961 /**
2962  * @brief Pop an element from a stack.
2963  *
2964  * This routine removes a stack_data_t value from @a stack in a "last in,
2965  * first out" manner and stores the value in @a data.
2966  *
2967  * @note @a timeout must be set to K_NO_WAIT if called from ISR.
2968  *
2969  * @funcprops \isr_ok
2970  *
2971  * @param stack Address of the stack.
2972  * @param data Address of area to hold the value popped from the stack.
2973  * @param timeout Waiting period to obtain a value,
2974  *                or one of the special values K_NO_WAIT and
2975  *                K_FOREVER.
2976  *
2977  * @retval 0 Element popped from stack.
2978  * @retval -EBUSY Returned without waiting.
2979  * @retval -EAGAIN Waiting period timed out.
2980  */
2981 __syscall int k_stack_pop(struct k_stack *stack, stack_data_t *data,
2982 			  k_timeout_t timeout);
2983 
2984 /**
2985  * @brief Statically define and initialize a stack
2986  *
2987  * The stack can be accessed outside the module where it is defined using:
2988  *
2989  * @code extern struct k_stack <name>; @endcode
2990  *
2991  * @param name Name of the stack.
2992  * @param stack_num_entries Maximum number of values that can be stacked.
2993  */
2994 #define K_STACK_DEFINE(name, stack_num_entries)                \
2995 	stack_data_t __noinit                                  \
2996 		_k_stack_buf_##name[stack_num_entries];        \
2997 	STRUCT_SECTION_ITERABLE(k_stack, name) =               \
2998 		Z_STACK_INITIALIZER(name, _k_stack_buf_##name, \
2999 				    stack_num_entries)
3000 
3001 /** @} */
3002 
3003 /**
3004  * @cond INTERNAL_HIDDEN
3005  */
3006 
3007 struct k_work;
3008 struct k_work_q;
3009 struct k_work_queue_config;
3010 extern struct k_work_q k_sys_work_q;
3011 
3012 /**
3013  * INTERNAL_HIDDEN @endcond
3014  */
3015 
3016 /**
3017  * @defgroup mutex_apis Mutex APIs
3018  * @ingroup kernel_apis
3019  * @{
3020  */
3021 
3022 /**
3023  * Mutex Structure
3024  * @ingroup mutex_apis
3025  */
3026 struct k_mutex {
3027 	/** Mutex wait queue */
3028 	_wait_q_t wait_q;
3029 	/** Mutex owner */
3030 	struct k_thread *owner;
3031 
3032 	/** Current lock count */
3033 	uint32_t lock_count;
3034 
3035 	/** Original thread priority */
3036 	int owner_orig_prio;
3037 
3038 	SYS_PORT_TRACING_TRACKING_FIELD(k_mutex)
3039 
3040 #ifdef CONFIG_OBJ_CORE_MUTEX
3041 	struct k_obj_core obj_core;
3042 #endif
3043 };
3044 
3045 /**
3046  * @cond INTERNAL_HIDDEN
3047  */
3048 #define Z_MUTEX_INITIALIZER(obj) \
3049 	{ \
3050 	.wait_q = Z_WAIT_Q_INIT(&(obj).wait_q), \
3051 	.owner = NULL, \
3052 	.lock_count = 0, \
3053 	.owner_orig_prio = K_LOWEST_APPLICATION_THREAD_PRIO, \
3054 	}
3055 
3056 /**
3057  * INTERNAL_HIDDEN @endcond
3058  */
3059 
3060 /**
3061  * @brief Statically define and initialize a mutex.
3062  *
3063  * The mutex can be accessed outside the module where it is defined using:
3064  *
3065  * @code extern struct k_mutex <name>; @endcode
3066  *
3067  * @param name Name of the mutex.
3068  */
3069 #define K_MUTEX_DEFINE(name) \
3070 	STRUCT_SECTION_ITERABLE(k_mutex, name) = \
3071 		Z_MUTEX_INITIALIZER(name)
3072 
3073 /**
3074  * @brief Initialize a mutex.
3075  *
3076  * This routine initializes a mutex object, prior to its first use.
3077  *
3078  * Upon completion, the mutex is available and does not have an owner.
3079  *
3080  * @param mutex Address of the mutex.
3081  *
3082  * @retval 0 Mutex object created
3083  *
3084  */
3085 __syscall int k_mutex_init(struct k_mutex *mutex);
3086 
3087 
3088 /**
3089  * @brief Lock a mutex.
3090  *
3091  * This routine locks @a mutex. If the mutex is locked by another thread,
3092  * the calling thread waits until the mutex becomes available or until
3093  * a timeout occurs.
3094  *
3095  * A thread is permitted to lock a mutex it has already locked. The operation
3096  * completes immediately and the lock count is increased by 1.
3097  *
3098  * Mutexes may not be locked in ISRs.
3099  *
3100  * @param mutex Address of the mutex.
3101  * @param timeout Waiting period to lock the mutex,
3102  *                or one of the special values K_NO_WAIT and
3103  *                K_FOREVER.
3104  *
3105  * @retval 0 Mutex locked.
3106  * @retval -EBUSY Returned without waiting.
3107  * @retval -EAGAIN Waiting period timed out.
3108  */
3109 __syscall int k_mutex_lock(struct k_mutex *mutex, k_timeout_t timeout);
3110 
3111 /**
3112  * @brief Unlock a mutex.
3113  *
3114  * This routine unlocks @a mutex. The mutex must already be locked by the
3115  * calling thread.
3116  *
3117  * The mutex cannot be claimed by another thread until it has been unlocked by
3118  * the calling thread as many times as it was previously locked by that
3119  * thread.
3120  *
3121  * Mutexes may not be unlocked in ISRs, as mutexes must only be manipulated
3122  * in thread context due to ownership and priority inheritance semantics.
3123  *
3124  * @param mutex Address of the mutex.
3125  *
3126  * @retval 0 Mutex unlocked.
3127  * @retval -EPERM The current thread does not own the mutex
3128  * @retval -EINVAL The mutex is not locked
3129  *
3130  */
3131 __syscall int k_mutex_unlock(struct k_mutex *mutex);
3132 
3133 /**
3134  * @}
3135  */
3136 
3137 
3138 struct k_condvar {
3139 	_wait_q_t wait_q;
3140 
3141 #ifdef CONFIG_OBJ_CORE_CONDVAR
3142 	struct k_obj_core  obj_core;
3143 #endif
3144 };
3145 
3146 #define Z_CONDVAR_INITIALIZER(obj)                                             \
3147 	{                                                                      \
3148 		.wait_q = Z_WAIT_Q_INIT(&obj.wait_q),                          \
3149 	}
3150 
3151 /**
3152  * @defgroup condvar_apis Condition Variables APIs
3153  * @ingroup kernel_apis
3154  * @{
3155  */
3156 
3157 /**
3158  * @brief Initialize a condition variable
3159  *
3160  * @param condvar pointer to a @p k_condvar structure
3161  * @retval 0 Condition variable created successfully
3162  */
3163 __syscall int k_condvar_init(struct k_condvar *condvar);
3164 
3165 /**
3166  * @brief Signals one thread that is pending on the condition variable
3167  *
3168  * @param condvar pointer to a @p k_condvar structure
3169  * @retval 0 On success
3170  */
3171 __syscall int k_condvar_signal(struct k_condvar *condvar);
3172 
3173 /**
3174  * @brief Unblock all threads that are pending on the condition
3175  * variable
3176  *
3177  * @param condvar pointer to a @p k_condvar structure
3178  * @return An integer with number of woken threads on success
3179  */
3180 __syscall int k_condvar_broadcast(struct k_condvar *condvar);
3181 
3182 /**
3183  * @brief Waits on the condition variable releasing the mutex lock
3184  *
3185  * Atomically releases the currently owned mutex, blocks the current thread
3186  * waiting on the condition variable specified by @a condvar,
3187  * and finally acquires the mutex again.
3188  *
3189  * The waiting thread unblocks only after another thread calls
3190  * k_condvar_signal, or k_condvar_broadcast with the same condition variable.
3191  *
3192  * @param condvar pointer to a @p k_condvar structure
3193  * @param mutex Address of the mutex.
3194  * @param timeout Waiting period for the condition variable
3195  *                or one of the special values K_NO_WAIT and K_FOREVER.
3196  * @retval 0 On success
3197  * @retval -EAGAIN Waiting period timed out.
3198  */
3199 __syscall int k_condvar_wait(struct k_condvar *condvar, struct k_mutex *mutex,
3200 			     k_timeout_t timeout);
3201 
3202 /**
3203  * @brief Statically define and initialize a condition variable.
3204  *
3205  * The condition variable can be accessed outside the module where it is
3206  * defined using:
3207  *
3208  * @code extern struct k_condvar <name>; @endcode
3209  *
3210  * @param name Name of the condition variable.
3211  */
3212 #define K_CONDVAR_DEFINE(name)                                                 \
3213 	STRUCT_SECTION_ITERABLE(k_condvar, name) =                             \
3214 		Z_CONDVAR_INITIALIZER(name)
3215 /**
3216  * @}
3217  */
3218 
3219 /**
3220  * @cond INTERNAL_HIDDEN
3221  */
3222 
3223 struct k_sem {
3224 	_wait_q_t wait_q;
3225 	unsigned int count;
3226 	unsigned int limit;
3227 
3228 	Z_DECL_POLL_EVENT
3229 
3230 	SYS_PORT_TRACING_TRACKING_FIELD(k_sem)
3231 
3232 #ifdef CONFIG_OBJ_CORE_SEM
3233 	struct k_obj_core  obj_core;
3234 #endif
3235 };
3236 
3237 #define Z_SEM_INITIALIZER(obj, initial_count, count_limit) \
3238 	{ \
3239 	.wait_q = Z_WAIT_Q_INIT(&(obj).wait_q), \
3240 	.count = (initial_count), \
3241 	.limit = (count_limit), \
3242 	Z_POLL_EVENT_OBJ_INIT(obj) \
3243 	}
3244 
3245 /**
3246  * INTERNAL_HIDDEN @endcond
3247  */
3248 
3249 /**
3250  * @defgroup semaphore_apis Semaphore APIs
3251  * @ingroup kernel_apis
3252  * @{
3253  */
3254 
3255 /**
3256  * @brief Maximum limit value allowed for a semaphore.
3257  *
3258  * This is intended for use when a semaphore does not have
3259  * an explicit maximum limit, and instead is just used for
3260  * counting purposes.
3261  *
3262  */
3263 #define K_SEM_MAX_LIMIT UINT_MAX
3264 
3265 /**
3266  * @brief Initialize a semaphore.
3267  *
3268  * This routine initializes a semaphore object, prior to its first use.
3269  *
3270  * @param sem Address of the semaphore.
3271  * @param initial_count Initial semaphore count.
3272  * @param limit Maximum permitted semaphore count.
3273  *
3274  * @see K_SEM_MAX_LIMIT
3275  *
3276  * @retval 0 Semaphore created successfully
3277  * @retval -EINVAL Invalid values
3278  *
3279  */
3280 __syscall int k_sem_init(struct k_sem *sem, unsigned int initial_count,
3281 			  unsigned int limit);
3282 
3283 /**
3284  * @brief Take a semaphore.
3285  *
3286  * This routine takes @a sem.
3287  *
3288  * @note @a timeout must be set to K_NO_WAIT if called from ISR.
3289  *
3290  * @funcprops \isr_ok
3291  *
3292  * @param sem Address of the semaphore.
3293  * @param timeout Waiting period to take the semaphore,
3294  *                or one of the special values K_NO_WAIT and K_FOREVER.
3295  *
3296  * @retval 0 Semaphore taken.
3297  * @retval -EBUSY Returned without waiting.
3298  * @retval -EAGAIN Waiting period timed out,
3299  *			or the semaphore was reset during the waiting period.
3300  */
3301 __syscall int k_sem_take(struct k_sem *sem, k_timeout_t timeout);
3302 
3303 /**
3304  * @brief Give a semaphore.
3305  *
3306  * This routine gives @a sem, unless the semaphore is already at its maximum
3307  * permitted count.
3308  *
3309  * @funcprops \isr_ok
3310  *
3311  * @param sem Address of the semaphore.
3312  */
3313 __syscall void k_sem_give(struct k_sem *sem);
3314 
3315 /**
3316  * @brief Resets a semaphore's count to zero.
3317  *
3318  * This routine sets the count of @a sem to zero.
3319  * Any outstanding semaphore takes will be aborted
3320  * with -EAGAIN.
3321  *
3322  * @param sem Address of the semaphore.
3323  */
3324 __syscall void k_sem_reset(struct k_sem *sem);
3325 
3326 /**
3327  * @brief Get a semaphore's count.
3328  *
3329  * This routine returns the current count of @a sem.
3330  *
3331  * @param sem Address of the semaphore.
3332  *
3333  * @return Current semaphore count.
3334  */
3335 __syscall unsigned int k_sem_count_get(struct k_sem *sem);
3336 
3337 /**
3338  * @internal
3339  */
z_impl_k_sem_count_get(struct k_sem * sem)3340 static inline unsigned int z_impl_k_sem_count_get(struct k_sem *sem)
3341 {
3342 	return sem->count;
3343 }
3344 
3345 /**
3346  * @brief Statically define and initialize a semaphore.
3347  *
3348  * The semaphore can be accessed outside the module where it is defined using:
3349  *
3350  * @code extern struct k_sem <name>; @endcode
3351  *
3352  * @param name Name of the semaphore.
3353  * @param initial_count Initial semaphore count.
3354  * @param count_limit Maximum permitted semaphore count.
3355  */
3356 #define K_SEM_DEFINE(name, initial_count, count_limit)                                             \
3357 	STRUCT_SECTION_ITERABLE(k_sem, name) =                                                     \
3358 		Z_SEM_INITIALIZER(name, initial_count, count_limit);                               \
3359 	BUILD_ASSERT(((count_limit) != 0) &&                                                       \
3360 		     (((initial_count) < (count_limit)) || ((initial_count) == (count_limit))) &&  \
3361 		     ((count_limit) <= K_SEM_MAX_LIMIT));
3362 
3363 /** @} */
3364 
3365 /**
3366  * @cond INTERNAL_HIDDEN
3367  */
3368 
3369 struct k_work_delayable;
3370 struct k_work_sync;
3371 
3372 /**
3373  * INTERNAL_HIDDEN @endcond
3374  */
3375 
3376 /**
3377  * @defgroup workqueue_apis Work Queue APIs
3378  * @ingroup kernel_apis
3379  * @{
3380  */
3381 
3382 /** @brief The signature for a work item handler function.
3383  *
3384  * The function will be invoked by the thread animating a work queue.
3385  *
3386  * @param work the work item that provided the handler.
3387  */
3388 typedef void (*k_work_handler_t)(struct k_work *work);
3389 
3390 /** @brief Initialize a (non-delayable) work structure.
3391  *
3392  * This must be invoked before submitting a work structure for the first time.
3393  * It need not be invoked again on the same work structure.  It can be
3394  * re-invoked to change the associated handler, but this must be done when the
3395  * work item is idle.
3396  *
3397  * @funcprops \isr_ok
3398  *
3399  * @param work the work structure to be initialized.
3400  *
3401  * @param handler the handler to be invoked by the work item.
3402  */
3403 void k_work_init(struct k_work *work,
3404 		  k_work_handler_t handler);
3405 
3406 /** @brief Busy state flags from the work item.
3407  *
3408  * A zero return value indicates the work item appears to be idle.
3409  *
3410  * @note This is a live snapshot of state, which may change before the result
3411  * is checked.  Use locks where appropriate.
3412  *
3413  * @funcprops \isr_ok
3414  *
3415  * @param work pointer to the work item.
3416  *
3417  * @return a mask of flags K_WORK_DELAYED, K_WORK_QUEUED,
3418  * K_WORK_RUNNING, K_WORK_CANCELING, and K_WORK_FLUSHING.
3419  */
3420 int k_work_busy_get(const struct k_work *work);
3421 
3422 /** @brief Test whether a work item is currently pending.
3423  *
3424  * Wrapper to determine whether a work item is in a non-idle state.
3425  *
3426  * @note This is a live snapshot of state, which may change before the result
3427  * is checked.  Use locks where appropriate.
3428  *
3429  * @funcprops \isr_ok
3430  *
3431  * @param work pointer to the work item.
3432  *
3433  * @return true if and only if k_work_busy_get() returns a non-zero value.
3434  */
3435 static inline bool k_work_is_pending(const struct k_work *work);
3436 
3437 /** @brief Submit a work item to a queue.
3438  *
3439  * @param queue pointer to the work queue on which the item should run.  If
3440  * NULL the queue from the most recent submission will be used.
3441  *
3442  * @funcprops \isr_ok
3443  *
3444  * @param work pointer to the work item.
3445  *
3446  * @retval 0 if work was already submitted to a queue
3447  * @retval 1 if work was not submitted and has been queued to @p queue
3448  * @retval 2 if work was running and has been queued to the queue that was
3449  * running it
3450  * @retval -EBUSY
3451  * * if work submission was rejected because the work item is cancelling; or
3452  * * @p queue is draining; or
3453  * * @p queue is plugged.
3454  * @retval -EINVAL if @p queue is null and the work item has never been run.
3455  * @retval -ENODEV if @p queue has not been started.
3456  */
3457 int k_work_submit_to_queue(struct k_work_q *queue,
3458 			   struct k_work *work);
3459 
3460 /** @brief Submit a work item to the system queue.
3461  *
3462  * @funcprops \isr_ok
3463  *
3464  * @param work pointer to the work item.
3465  *
3466  * @return as with k_work_submit_to_queue().
3467  */
3468 int k_work_submit(struct k_work *work);
3469 
3470 /** @brief Wait for last-submitted instance to complete.
3471  *
3472  * Resubmissions may occur while waiting, including chained submissions (from
3473  * within the handler).
3474  *
3475  * @note Be careful of caller and work queue thread relative priority.  If
3476  * this function sleeps it will not return until the work queue thread
3477  * completes the tasks that allow this thread to resume.
3478  *
3479  * @note Behavior is undefined if this function is invoked on @p work from a
3480  * work queue running @p work.
3481  *
3482  * @param work pointer to the work item.
3483  *
3484  * @param sync pointer to an opaque item containing state related to the
3485  * pending cancellation.  The object must persist until the call returns, and
3486  * be accessible from both the caller thread and the work queue thread.  The
3487  * object must not be used for any other flush or cancel operation until this
3488  * one completes.  On architectures with CONFIG_KERNEL_COHERENCE the object
3489  * must be allocated in coherent memory.
3490  *
3491  * @retval true if call had to wait for completion
3492  * @retval false if work was already idle
3493  */
3494 bool k_work_flush(struct k_work *work,
3495 		  struct k_work_sync *sync);
3496 
3497 /** @brief Cancel a work item.
3498  *
3499  * This attempts to prevent a pending (non-delayable) work item from being
3500  * processed by removing it from the work queue.  If the item is being
3501  * processed, the work item will continue to be processed, but resubmissions
3502  * are rejected until cancellation completes.
3503  *
3504  * If this returns zero cancellation is complete, otherwise something
3505  * (probably a work queue thread) is still referencing the item.
3506  *
3507  * See also k_work_cancel_sync().
3508  *
3509  * @funcprops \isr_ok
3510  *
3511  * @param work pointer to the work item.
3512  *
3513  * @return the k_work_busy_get() status indicating the state of the item after all
3514  * cancellation steps performed by this call are completed.
3515  */
3516 int k_work_cancel(struct k_work *work);
3517 
3518 /** @brief Cancel a work item and wait for it to complete.
3519  *
3520  * Same as k_work_cancel() but does not return until cancellation is complete.
3521  * This can be invoked by a thread after k_work_cancel() to synchronize with a
3522  * previous cancellation.
3523  *
3524  * On return the work structure will be idle unless something submits it after
3525  * the cancellation was complete.
3526  *
3527  * @note Be careful of caller and work queue thread relative priority.  If
3528  * this function sleeps it will not return until the work queue thread
3529  * completes the tasks that allow this thread to resume.
3530  *
3531  * @note Behavior is undefined if this function is invoked on @p work from a
3532  * work queue running @p work.
3533  *
3534  * @param work pointer to the work item.
3535  *
3536  * @param sync pointer to an opaque item containing state related to the
3537  * pending cancellation.  The object must persist until the call returns, and
3538  * be accessible from both the caller thread and the work queue thread.  The
3539  * object must not be used for any other flush or cancel operation until this
3540  * one completes.  On architectures with CONFIG_KERNEL_COHERENCE the object
3541  * must be allocated in coherent memory.
3542  *
3543  * @retval true if work was pending (call had to wait for cancellation of a
3544  * running handler to complete, or scheduled or submitted operations were
3545  * cancelled);
3546  * @retval false otherwise
3547  */
3548 bool k_work_cancel_sync(struct k_work *work, struct k_work_sync *sync);
3549 
3550 /** @brief Initialize a work queue structure.
3551  *
3552  * This must be invoked before starting a work queue structure for the first time.
3553  * It need not be invoked again on the same work queue structure.
3554  *
3555  * @funcprops \isr_ok
3556  *
3557  * @param queue the queue structure to be initialized.
3558  */
3559 void k_work_queue_init(struct k_work_q *queue);
3560 
3561 /** @brief Initialize a work queue.
3562  *
3563  * This configures the work queue thread and starts it running.  The function
3564  * should not be re-invoked on a queue.
3565  *
3566  * @param queue pointer to the queue structure. It must be initialized
3567  *        in zeroed/bss memory or with @ref k_work_queue_init before
3568  *        use.
3569  *
3570  * @param stack pointer to the work thread stack area.
3571  *
3572  * @param stack_size size of the work thread stack area, in bytes.
3573  *
3574  * @param prio initial thread priority
3575  *
3576  * @param cfg optional additional configuration parameters.  Pass @c
3577  * NULL if not required, to use the defaults documented in
3578  * k_work_queue_config.
3579  */
3580 void k_work_queue_start(struct k_work_q *queue,
3581 			k_thread_stack_t *stack, size_t stack_size,
3582 			int prio, const struct k_work_queue_config *cfg);
3583 
3584 /** @brief Access the thread that animates a work queue.
3585  *
3586  * This is necessary to grant a work queue thread access to things the work
3587  * items it will process are expected to use.
3588  *
3589  * @param queue pointer to the queue structure.
3590  *
3591  * @return the thread associated with the work queue.
3592  */
3593 static inline k_tid_t k_work_queue_thread_get(struct k_work_q *queue);
3594 
3595 /** @brief Wait until the work queue has drained, optionally plugging it.
3596  *
3597  * This blocks submission to the work queue except when coming from queue
3598  * thread, and blocks the caller until no more work items are available in the
3599  * queue.
3600  *
3601  * If @p plug is true then submission will continue to be blocked after the
3602  * drain operation completes until k_work_queue_unplug() is invoked.
3603  *
3604  * Note that work items that are delayed are not yet associated with their
3605  * work queue.  They must be cancelled externally if a goal is to ensure the
3606  * work queue remains empty.  The @p plug feature can be used to prevent
3607  * delayed items from being submitted after the drain completes.
3608  *
3609  * @param queue pointer to the queue structure.
3610  *
3611  * @param plug if true the work queue will continue to block new submissions
3612  * after all items have drained.
3613  *
3614  * @retval 1 if call had to wait for the drain to complete
3615  * @retval 0 if call did not have to wait
3616  * @retval negative if wait was interrupted or failed
3617  */
3618 int k_work_queue_drain(struct k_work_q *queue, bool plug);
3619 
3620 /** @brief Release a work queue to accept new submissions.
3621  *
3622  * This releases the block on new submissions placed when k_work_queue_drain()
3623  * is invoked with the @p plug option enabled.  If this is invoked before the
3624  * drain completes new items may be submitted as soon as the drain completes.
3625  *
3626  * @funcprops \isr_ok
3627  *
3628  * @param queue pointer to the queue structure.
3629  *
3630  * @retval 0 if successfully unplugged
3631  * @retval -EALREADY if the work queue was not plugged.
3632  */
3633 int k_work_queue_unplug(struct k_work_q *queue);
3634 
3635 /** @brief Stop a work queue.
3636  *
3637  * Stops the work queue thread and ensures that no further work will be processed.
3638  * This call is blocking and guarantees that the work queue thread has terminated
3639  * cleanly if successful, no work will be processed past this point.
3640  *
3641  * @param queue Pointer to the queue structure.
3642  * @param timeout Maximum time to wait for the work queue to stop.
3643  *
3644  * @retval 0 if the work queue was stopped
3645  * @retval -EALREADY if the work queue was not started (or already stopped)
3646  * @retval -EBUSY if the work queue is actively processing work items
3647  * @retval -ETIMEDOUT if the work queue did not stop within the stipulated timeout
3648  */
3649 int k_work_queue_stop(struct k_work_q *queue, k_timeout_t timeout);
3650 
3651 /** @brief Initialize a delayable work structure.
3652  *
3653  * This must be invoked before scheduling a delayable work structure for the
3654  * first time.  It need not be invoked again on the same work structure.  It
3655  * can be re-invoked to change the associated handler, but this must be done
3656  * when the work item is idle.
3657  *
3658  * @funcprops \isr_ok
3659  *
3660  * @param dwork the delayable work structure to be initialized.
3661  *
3662  * @param handler the handler to be invoked by the work item.
3663  */
3664 void k_work_init_delayable(struct k_work_delayable *dwork,
3665 			   k_work_handler_t handler);
3666 
3667 /**
3668  * @brief Get the parent delayable work structure from a work pointer.
3669  *
3670  * This function is necessary when a @c k_work_handler_t function is passed to
3671  * k_work_schedule_for_queue() and the handler needs to access data from the
3672  * container of the containing `k_work_delayable`.
3673  *
3674  * @param work Address passed to the work handler
3675  *
3676  * @return Address of the containing @c k_work_delayable structure.
3677  */
3678 static inline struct k_work_delayable *
3679 k_work_delayable_from_work(struct k_work *work);
3680 
3681 /** @brief Busy state flags from the delayable work item.
3682  *
3683  * @funcprops \isr_ok
3684  *
3685  * @note This is a live snapshot of state, which may change before the result
3686  * can be inspected.  Use locks where appropriate.
3687  *
3688  * @param dwork pointer to the delayable work item.
3689  *
3690  * @return a mask of flags K_WORK_DELAYED, K_WORK_QUEUED, K_WORK_RUNNING,
3691  * K_WORK_CANCELING, and K_WORK_FLUSHING.  A zero return value indicates the
3692  * work item appears to be idle.
3693  */
3694 int k_work_delayable_busy_get(const struct k_work_delayable *dwork);
3695 
3696 /** @brief Test whether a delayed work item is currently pending.
3697  *
3698  * Wrapper to determine whether a delayed work item is in a non-idle state.
3699  *
3700  * @note This is a live snapshot of state, which may change before the result
3701  * can be inspected.  Use locks where appropriate.
3702  *
3703  * @funcprops \isr_ok
3704  *
3705  * @param dwork pointer to the delayable work item.
3706  *
3707  * @return true if and only if k_work_delayable_busy_get() returns a non-zero
3708  * value.
3709  */
3710 static inline bool k_work_delayable_is_pending(
3711 	const struct k_work_delayable *dwork);
3712 
3713 /** @brief Get the absolute tick count at which a scheduled delayable work
3714  * will be submitted.
3715  *
3716  * @note This is a live snapshot of state, which may change before the result
3717  * can be inspected.  Use locks where appropriate.
3718  *
3719  * @funcprops \isr_ok
3720  *
3721  * @param dwork pointer to the delayable work item.
3722  *
3723  * @return the tick count when the timer that will schedule the work item will
3724  * expire, or the current tick count if the work is not scheduled.
3725  */
3726 static inline k_ticks_t k_work_delayable_expires_get(
3727 	const struct k_work_delayable *dwork);
3728 
3729 /** @brief Get the number of ticks until a scheduled delayable work will be
3730  * submitted.
3731  *
3732  * @note This is a live snapshot of state, which may change before the result
3733  * can be inspected.  Use locks where appropriate.
3734  *
3735  * @funcprops \isr_ok
3736  *
3737  * @param dwork pointer to the delayable work item.
3738  *
3739  * @return the number of ticks until the timer that will schedule the work
3740  * item will expire, or zero if the item is not scheduled.
3741  */
3742 static inline k_ticks_t k_work_delayable_remaining_get(
3743 	const struct k_work_delayable *dwork);
3744 
3745 /** @brief Submit an idle work item to a queue after a delay.
3746  *
3747  * Unlike k_work_reschedule_for_queue() this is a no-op if the work item is
3748  * already scheduled or submitted, even if @p delay is @c K_NO_WAIT.
3749  *
3750  * @funcprops \isr_ok
3751  *
3752  * @param queue the queue on which the work item should be submitted after the
3753  * delay.
3754  *
3755  * @param dwork pointer to the delayable work item.
3756  *
3757  * @param delay the time to wait before submitting the work item.  If @c
3758  * K_NO_WAIT and the work is not pending this is equivalent to
3759  * k_work_submit_to_queue().
3760  *
3761  * @retval 0 if work was already scheduled or submitted.
3762  * @retval 1 if work has been scheduled.
3763  * @retval 2 if @p delay is @c K_NO_WAIT and work
3764  *         was running and has been queued to the queue that was running it.
3765  * @retval -EBUSY if @p delay is @c K_NO_WAIT and
3766  *         k_work_submit_to_queue() fails with this code.
3767  * @retval -EINVAL if @p delay is @c K_NO_WAIT and
3768  *         k_work_submit_to_queue() fails with this code.
3769  * @retval -ENODEV if @p delay is @c K_NO_WAIT and
3770  *         k_work_submit_to_queue() fails with this code.
3771  */
3772 int k_work_schedule_for_queue(struct k_work_q *queue,
3773 			       struct k_work_delayable *dwork,
3774 			       k_timeout_t delay);
3775 
3776 /** @brief Submit an idle work item to the system work queue after a
3777  * delay.
3778  *
3779  * This is a thin wrapper around k_work_schedule_for_queue(), with all the API
3780  * characteristics of that function.
3781  *
3782  * @param dwork pointer to the delayable work item.
3783  *
3784  * @param delay the time to wait before submitting the work item.  If @c
3785  * K_NO_WAIT this is equivalent to k_work_submit_to_queue().
3786  *
3787  * @return as with k_work_schedule_for_queue().
3788  */
3789 int k_work_schedule(struct k_work_delayable *dwork,
3790 				   k_timeout_t delay);
3791 
3792 /** @brief Reschedule a work item to a queue after a delay.
3793  *
3794  * Unlike k_work_schedule_for_queue() this function can change the deadline of
3795  * a scheduled work item, and will schedule a work item that is in any state
3796  * (e.g. is idle, submitted, or running).  This function does not affect
3797  * ("unsubmit") a work item that has been submitted to a queue.
3798  *
3799  * @funcprops \isr_ok
3800  *
3801  * @param queue the queue on which the work item should be submitted after the
3802  * delay.
3803  *
3804  * @param dwork pointer to the delayable work item.
3805  *
3806  * @param delay the time to wait before submitting the work item.  If @c
3807  * K_NO_WAIT this is equivalent to k_work_submit_to_queue() after canceling
3808  * any previous scheduled submission.
3809  *
3810  * @note If delay is @c K_NO_WAIT ("no delay") the return values are as with
3811  * k_work_submit_to_queue().
3812  *
3813  * @retval 0 if delay is @c K_NO_WAIT and work was already on a queue
3814  * @retval 1 if
3815  * * delay is @c K_NO_WAIT and work was not submitted but has now been queued
3816  *   to @p queue; or
3817  * * delay not @c K_NO_WAIT and work has been scheduled
3818  * @retval 2 if delay is @c K_NO_WAIT and work was running and has been queued
3819  * to the queue that was running it
3820  * @retval -EBUSY if @p delay is @c K_NO_WAIT and
3821  *         k_work_submit_to_queue() fails with this code.
3822  * @retval -EINVAL if @p delay is @c K_NO_WAIT and
3823  *         k_work_submit_to_queue() fails with this code.
3824  * @retval -ENODEV if @p delay is @c K_NO_WAIT and
3825  *         k_work_submit_to_queue() fails with this code.
3826  */
3827 int k_work_reschedule_for_queue(struct k_work_q *queue,
3828 				 struct k_work_delayable *dwork,
3829 				 k_timeout_t delay);
3830 
3831 /** @brief Reschedule a work item to the system work queue after a
3832  * delay.
3833  *
3834  * This is a thin wrapper around k_work_reschedule_for_queue(), with all the
3835  * API characteristics of that function.
3836  *
3837  * @param dwork pointer to the delayable work item.
3838  *
3839  * @param delay the time to wait before submitting the work item.
3840  *
3841  * @return as with k_work_reschedule_for_queue().
3842  */
3843 int k_work_reschedule(struct k_work_delayable *dwork,
3844 				     k_timeout_t delay);
3845 
3846 /** @brief Flush delayable work.
3847  *
3848  * If the work is scheduled, it is immediately submitted.  Then the caller
3849  * blocks until the work completes, as with k_work_flush().
3850  *
3851  * @note Be careful of caller and work queue thread relative priority.  If
3852  * this function sleeps it will not return until the work queue thread
3853  * completes the tasks that allow this thread to resume.
3854  *
3855  * @note Behavior is undefined if this function is invoked on @p dwork from a
3856  * work queue running @p dwork.
3857  *
3858  * @param dwork pointer to the delayable work item.
3859  *
3860  * @param sync pointer to an opaque item containing state related to the
3861  * pending cancellation.  The object must persist until the call returns, and
3862  * be accessible from both the caller thread and the work queue thread.  The
3863  * object must not be used for any other flush or cancel operation until this
3864  * one completes.  On architectures with CONFIG_KERNEL_COHERENCE the object
3865  * must be allocated in coherent memory.
3866  *
3867  * @retval true if call had to wait for completion
3868  * @retval false if work was already idle
3869  */
3870 bool k_work_flush_delayable(struct k_work_delayable *dwork,
3871 			    struct k_work_sync *sync);
3872 
3873 /** @brief Cancel delayable work.
3874  *
3875  * Similar to k_work_cancel() but for delayable work.  If the work is
3876  * scheduled or submitted it is canceled.  This function does not wait for the
3877  * cancellation to complete.
3878  *
3879  * @note The work may still be running when this returns.  Use
3880  * k_work_flush_delayable() or k_work_cancel_delayable_sync() to ensure it is
3881  * not running.
3882  *
3883  * @note Canceling delayable work does not prevent rescheduling it.  It does
3884  * prevent submitting it until the cancellation completes.
3885  *
3886  * @funcprops \isr_ok
3887  *
3888  * @param dwork pointer to the delayable work item.
3889  *
3890  * @return the k_work_delayable_busy_get() status indicating the state of the
3891  * item after all cancellation steps performed by this call are completed.
3892  */
3893 int k_work_cancel_delayable(struct k_work_delayable *dwork);
3894 
3895 /** @brief Cancel delayable work and wait.
3896  *
3897  * Like k_work_cancel_delayable() but waits until the work becomes idle.
3898  *
3899  * @note Canceling delayable work does not prevent rescheduling it.  It does
3900  * prevent submitting it until the cancellation completes.
3901  *
3902  * @note Be careful of caller and work queue thread relative priority.  If
3903  * this function sleeps it will not return until the work queue thread
3904  * completes the tasks that allow this thread to resume.
3905  *
3906  * @note Behavior is undefined if this function is invoked on @p dwork from a
3907  * work queue running @p dwork.
3908  *
3909  * @param dwork pointer to the delayable work item.
3910  *
3911  * @param sync pointer to an opaque item containing state related to the
3912  * pending cancellation.  The object must persist until the call returns, and
3913  * be accessible from both the caller thread and the work queue thread.  The
3914  * object must not be used for any other flush or cancel operation until this
3915  * one completes.  On architectures with CONFIG_KERNEL_COHERENCE the object
3916  * must be allocated in coherent memory.
3917  *
3918  * @retval true if work was not idle (call had to wait for cancellation of a
3919  * running handler to complete, or scheduled or submitted operations were
3920  * cancelled);
3921  * @retval false otherwise
3922  */
3923 bool k_work_cancel_delayable_sync(struct k_work_delayable *dwork,
3924 				  struct k_work_sync *sync);
3925 
3926 enum {
3927 /**
3928  * @cond INTERNAL_HIDDEN
3929  */
3930 
3931 	/* The atomic API is used for all work and queue flags fields to
3932 	 * enforce sequential consistency in SMP environments.
3933 	 */
3934 
3935 	/* Bits that represent the work item states.  At least nine of the
3936 	 * combinations are distinct valid stable states.
3937 	 */
3938 	K_WORK_RUNNING_BIT = 0,
3939 	K_WORK_CANCELING_BIT = 1,
3940 	K_WORK_QUEUED_BIT = 2,
3941 	K_WORK_DELAYED_BIT = 3,
3942 	K_WORK_FLUSHING_BIT = 4,
3943 
3944 	K_WORK_MASK = BIT(K_WORK_DELAYED_BIT) | BIT(K_WORK_QUEUED_BIT)
3945 		| BIT(K_WORK_RUNNING_BIT) | BIT(K_WORK_CANCELING_BIT) | BIT(K_WORK_FLUSHING_BIT),
3946 
3947 	/* Static work flags */
3948 	K_WORK_DELAYABLE_BIT = 8,
3949 	K_WORK_DELAYABLE = BIT(K_WORK_DELAYABLE_BIT),
3950 
3951 	/* Dynamic work queue flags */
3952 	K_WORK_QUEUE_STARTED_BIT = 0,
3953 	K_WORK_QUEUE_STARTED = BIT(K_WORK_QUEUE_STARTED_BIT),
3954 	K_WORK_QUEUE_BUSY_BIT = 1,
3955 	K_WORK_QUEUE_BUSY = BIT(K_WORK_QUEUE_BUSY_BIT),
3956 	K_WORK_QUEUE_DRAIN_BIT = 2,
3957 	K_WORK_QUEUE_DRAIN = BIT(K_WORK_QUEUE_DRAIN_BIT),
3958 	K_WORK_QUEUE_PLUGGED_BIT = 3,
3959 	K_WORK_QUEUE_PLUGGED = BIT(K_WORK_QUEUE_PLUGGED_BIT),
3960 	K_WORK_QUEUE_STOP_BIT = 4,
3961 	K_WORK_QUEUE_STOP = BIT(K_WORK_QUEUE_STOP_BIT),
3962 
3963 	/* Static work queue flags */
3964 	K_WORK_QUEUE_NO_YIELD_BIT = 8,
3965 	K_WORK_QUEUE_NO_YIELD = BIT(K_WORK_QUEUE_NO_YIELD_BIT),
3966 
3967 /**
3968  * INTERNAL_HIDDEN @endcond
3969  */
3970 	/* Transient work flags */
3971 
3972 	/** @brief Flag indicating a work item that is running under a work
3973 	 * queue thread.
3974 	 *
3975 	 * Accessed via k_work_busy_get().  May co-occur with other flags.
3976 	 */
3977 	K_WORK_RUNNING = BIT(K_WORK_RUNNING_BIT),
3978 
3979 	/** @brief Flag indicating a work item that is being canceled.
3980 	 *
3981 	 * Accessed via k_work_busy_get().  May co-occur with other flags.
3982 	 */
3983 	K_WORK_CANCELING = BIT(K_WORK_CANCELING_BIT),
3984 
3985 	/** @brief Flag indicating a work item that has been submitted to a
3986 	 * queue but has not started running.
3987 	 *
3988 	 * Accessed via k_work_busy_get().  May co-occur with other flags.
3989 	 */
3990 	K_WORK_QUEUED = BIT(K_WORK_QUEUED_BIT),
3991 
3992 	/** @brief Flag indicating a delayed work item that is scheduled for
3993 	 * submission to a queue.
3994 	 *
3995 	 * Accessed via k_work_busy_get().  May co-occur with other flags.
3996 	 */
3997 	K_WORK_DELAYED = BIT(K_WORK_DELAYED_BIT),
3998 
3999 	/** @brief Flag indicating a synced work item that is being flushed.
4000 	 *
4001 	 * Accessed via k_work_busy_get().  May co-occur with other flags.
4002 	 */
4003 	K_WORK_FLUSHING = BIT(K_WORK_FLUSHING_BIT),
4004 };
4005 
4006 /** @brief A structure used to submit work. */
4007 struct k_work {
4008 	/* All fields are protected by the work module spinlock.  No fields
4009 	 * are to be accessed except through kernel API.
4010 	 */
4011 
4012 	/* Node to link into k_work_q pending list. */
4013 	sys_snode_t node;
4014 
4015 	/* The function to be invoked by the work queue thread. */
4016 	k_work_handler_t handler;
4017 
4018 	/* The queue on which the work item was last submitted. */
4019 	struct k_work_q *queue;
4020 
4021 	/* State of the work item.
4022 	 *
4023 	 * The item can be DELAYED, QUEUED, and RUNNING simultaneously.
4024 	 *
4025 	 * It can be RUNNING and CANCELING simultaneously.
4026 	 */
4027 	uint32_t flags;
4028 };
4029 
4030 #define Z_WORK_INITIALIZER(work_handler) { \
4031 	.handler = (work_handler), \
4032 }
4033 
4034 /** @brief A structure used to submit work after a delay. */
4035 struct k_work_delayable {
4036 	/* The work item. */
4037 	struct k_work work;
4038 
4039 	/* Timeout used to submit work after a delay. */
4040 	struct _timeout timeout;
4041 
4042 	/* The queue to which the work should be submitted. */
4043 	struct k_work_q *queue;
4044 };
4045 
4046 #define Z_WORK_DELAYABLE_INITIALIZER(work_handler) { \
4047 	.work = { \
4048 		.handler = (work_handler), \
4049 		.flags = K_WORK_DELAYABLE, \
4050 	}, \
4051 }
4052 
4053 /**
4054  * @brief Initialize a statically-defined delayable work item.
4055  *
4056  * This macro can be used to initialize a statically-defined delayable
4057  * work item, prior to its first use. For example,
4058  *
4059  * @code static K_WORK_DELAYABLE_DEFINE(<dwork>, <work_handler>); @endcode
4060  *
4061  * Note that if the runtime dependencies support initialization with
4062  * k_work_init_delayable() using that will eliminate the initialized
4063  * object in ROM that is produced by this macro and copied in at
4064  * system startup.
4065  *
4066  * @param work Symbol name for delayable work item object
4067  * @param work_handler Function to invoke each time work item is processed.
4068  */
4069 #define K_WORK_DELAYABLE_DEFINE(work, work_handler) \
4070 	struct k_work_delayable work \
4071 	  = Z_WORK_DELAYABLE_INITIALIZER(work_handler)
4072 
4073 /**
4074  * @cond INTERNAL_HIDDEN
4075  */
4076 
4077 /* Record used to wait for work to flush.
4078  *
4079  * The work item is inserted into the queue that will process (or is
4080  * processing) the item, and will be processed as soon as the item
4081  * completes.  When the flusher is processed the semaphore will be
4082  * signaled, releasing the thread waiting for the flush.
4083  */
4084 struct z_work_flusher {
4085 	struct k_work work;
4086 	struct k_sem sem;
4087 };
4088 
4089 /* Record used to wait for work to complete a cancellation.
4090  *
4091  * The work item is inserted into a global queue of pending cancels.
4092  * When a cancelling work item goes idle any matching waiters are
4093  * removed from pending_cancels and are woken.
4094  */
4095 struct z_work_canceller {
4096 	sys_snode_t node;
4097 	struct k_work *work;
4098 	struct k_sem sem;
4099 };
4100 
4101 /**
4102  * INTERNAL_HIDDEN @endcond
4103  */
4104 
4105 /** @brief A structure holding internal state for a pending synchronous
4106  * operation on a work item or queue.
4107  *
4108  * Instances of this type are provided by the caller for invocation of
4109  * k_work_flush(), k_work_cancel_sync() and sibling flush and cancel APIs.  A
4110  * referenced object must persist until the call returns, and be accessible
4111  * from both the caller thread and the work queue thread.
4112  *
4113  * @note If CONFIG_KERNEL_COHERENCE is enabled the object must be allocated in
4114  * coherent memory; see arch_mem_coherent().  The stack on these architectures
4115  * is generally not coherent.  be stack-allocated.  Violations are detected by
4116  * runtime assertion.
4117  */
4118 struct k_work_sync {
4119 	union {
4120 		struct z_work_flusher flusher;
4121 		struct z_work_canceller canceller;
4122 	};
4123 };
4124 
4125 /** @brief A structure holding optional configuration items for a work
4126  * queue.
4127  *
4128  * This structure, and values it references, are not retained by
4129  * k_work_queue_start().
4130  */
4131 struct k_work_queue_config {
4132 	/** The name to be given to the work queue thread.
4133 	 *
4134 	 * If left null the thread will not have a name.
4135 	 */
4136 	const char *name;
4137 
4138 	/** Control whether the work queue thread should yield between
4139 	 * items.
4140 	 *
4141 	 * Yielding between items helps guarantee the work queue
4142 	 * thread does not starve other threads, including cooperative
4143 	 * ones released by a work item.  This is the default behavior.
4144 	 *
4145 	 * Set this to @c true to prevent the work queue thread from
4146 	 * yielding between items.  This may be appropriate when a
4147 	 * sequence of items should complete without yielding
4148 	 * control.
4149 	 */
4150 	bool no_yield;
4151 
4152 	/** Control whether the work queue thread should be marked as
4153 	 * essential thread.
4154 	 */
4155 	bool essential;
4156 };
4157 
4158 /** @brief A structure used to hold work until it can be processed. */
4159 struct k_work_q {
4160 	/* The thread that animates the work. */
4161 	struct k_thread thread;
4162 
4163 	/* All the following fields must be accessed only while the
4164 	 * work module spinlock is held.
4165 	 */
4166 
4167 	/* List of k_work items to be worked. */
4168 	sys_slist_t pending;
4169 
4170 	/* Wait queue for idle work thread. */
4171 	_wait_q_t notifyq;
4172 
4173 	/* Wait queue for threads waiting for the queue to drain. */
4174 	_wait_q_t drainq;
4175 
4176 	/* Flags describing queue state. */
4177 	uint32_t flags;
4178 };
4179 
4180 /* Provide the implementation for inline functions declared above */
4181 
k_work_is_pending(const struct k_work * work)4182 static inline bool k_work_is_pending(const struct k_work *work)
4183 {
4184 	return k_work_busy_get(work) != 0;
4185 }
4186 
4187 static inline struct k_work_delayable *
k_work_delayable_from_work(struct k_work * work)4188 k_work_delayable_from_work(struct k_work *work)
4189 {
4190 	return CONTAINER_OF(work, struct k_work_delayable, work);
4191 }
4192 
k_work_delayable_is_pending(const struct k_work_delayable * dwork)4193 static inline bool k_work_delayable_is_pending(
4194 	const struct k_work_delayable *dwork)
4195 {
4196 	return k_work_delayable_busy_get(dwork) != 0;
4197 }
4198 
k_work_delayable_expires_get(const struct k_work_delayable * dwork)4199 static inline k_ticks_t k_work_delayable_expires_get(
4200 	const struct k_work_delayable *dwork)
4201 {
4202 	return z_timeout_expires(&dwork->timeout);
4203 }
4204 
k_work_delayable_remaining_get(const struct k_work_delayable * dwork)4205 static inline k_ticks_t k_work_delayable_remaining_get(
4206 	const struct k_work_delayable *dwork)
4207 {
4208 	return z_timeout_remaining(&dwork->timeout);
4209 }
4210 
k_work_queue_thread_get(struct k_work_q * queue)4211 static inline k_tid_t k_work_queue_thread_get(struct k_work_q *queue)
4212 {
4213 	return &queue->thread;
4214 }
4215 
4216 /** @} */
4217 
4218 struct k_work_user;
4219 
4220 /**
4221  * @addtogroup workqueue_apis
4222  * @{
4223  */
4224 
4225 /**
4226  * @typedef k_work_user_handler_t
4227  * @brief Work item handler function type for user work queues.
4228  *
4229  * A work item's handler function is executed by a user workqueue's thread
4230  * when the work item is processed by the workqueue.
4231  *
4232  * @param work Address of the work item.
4233  */
4234 typedef void (*k_work_user_handler_t)(struct k_work_user *work);
4235 
4236 /**
4237  * @cond INTERNAL_HIDDEN
4238  */
4239 
4240 struct k_work_user_q {
4241 	struct k_queue queue;
4242 	struct k_thread thread;
4243 };
4244 
4245 enum {
4246 	K_WORK_USER_STATE_PENDING,	/* Work item pending state */
4247 };
4248 
4249 struct k_work_user {
4250 	void *_reserved;		/* Used by k_queue implementation. */
4251 	k_work_user_handler_t handler;
4252 	atomic_t flags;
4253 };
4254 
4255 /**
4256  * INTERNAL_HIDDEN @endcond
4257  */
4258 
4259 #if defined(__cplusplus) && ((__cplusplus - 0) < 202002L)
4260 #define Z_WORK_USER_INITIALIZER(work_handler) { NULL, work_handler, 0 }
4261 #else
4262 #define Z_WORK_USER_INITIALIZER(work_handler) \
4263 	{ \
4264 	._reserved = NULL, \
4265 	.handler = (work_handler), \
4266 	.flags = 0 \
4267 	}
4268 #endif
4269 
4270 /**
4271  * @brief Initialize a statically-defined user work item.
4272  *
4273  * This macro can be used to initialize a statically-defined user work
4274  * item, prior to its first use. For example,
4275  *
4276  * @code static K_WORK_USER_DEFINE(<work>, <work_handler>); @endcode
4277  *
4278  * @param work Symbol name for work item object
4279  * @param work_handler Function to invoke each time work item is processed.
4280  */
4281 #define K_WORK_USER_DEFINE(work, work_handler) \
4282 	struct k_work_user work = Z_WORK_USER_INITIALIZER(work_handler)
4283 
4284 /**
4285  * @brief Initialize a userspace work item.
4286  *
4287  * This routine initializes a user workqueue work item, prior to its
4288  * first use.
4289  *
4290  * @param work Address of work item.
4291  * @param handler Function to invoke each time work item is processed.
4292  */
k_work_user_init(struct k_work_user * work,k_work_user_handler_t handler)4293 static inline void k_work_user_init(struct k_work_user *work,
4294 				    k_work_user_handler_t handler)
4295 {
4296 	*work = (struct k_work_user)Z_WORK_USER_INITIALIZER(handler);
4297 }
4298 
4299 /**
4300  * @brief Check if a userspace work item is pending.
4301  *
4302  * This routine indicates if user work item @a work is pending in a workqueue's
4303  * queue.
4304  *
4305  * @note Checking if the work is pending gives no guarantee that the
4306  *       work will still be pending when this information is used. It is up to
4307  *       the caller to make sure that this information is used in a safe manner.
4308  *
4309  * @funcprops \isr_ok
4310  *
4311  * @param work Address of work item.
4312  *
4313  * @return true if work item is pending, or false if it is not pending.
4314  */
k_work_user_is_pending(struct k_work_user * work)4315 static inline bool k_work_user_is_pending(struct k_work_user *work)
4316 {
4317 	return atomic_test_bit(&work->flags, K_WORK_USER_STATE_PENDING);
4318 }
4319 
4320 /**
4321  * @brief Submit a work item to a user mode workqueue
4322  *
4323  * Submits a work item to a workqueue that runs in user mode. A temporary
4324  * memory allocation is made from the caller's resource pool which is freed
4325  * once the worker thread consumes the k_work item. The workqueue
4326  * thread must have memory access to the k_work item being submitted. The caller
4327  * must have permission granted on the work_q parameter's queue object.
4328  *
4329  * @funcprops \isr_ok
4330  *
4331  * @param work_q Address of workqueue.
4332  * @param work Address of work item.
4333  *
4334  * @retval -EBUSY if the work item was already in some workqueue
4335  * @retval -ENOMEM if no memory for thread resource pool allocation
4336  * @retval 0 Success
4337  */
k_work_user_submit_to_queue(struct k_work_user_q * work_q,struct k_work_user * work)4338 static inline int k_work_user_submit_to_queue(struct k_work_user_q *work_q,
4339 					      struct k_work_user *work)
4340 {
4341 	int ret = -EBUSY;
4342 
4343 	if (!atomic_test_and_set_bit(&work->flags,
4344 				     K_WORK_USER_STATE_PENDING)) {
4345 		ret = k_queue_alloc_append(&work_q->queue, work);
4346 
4347 		/* Couldn't insert into the queue. Clear the pending bit
4348 		 * so the work item can be submitted again
4349 		 */
4350 		if (ret != 0) {
4351 			atomic_clear_bit(&work->flags,
4352 					 K_WORK_USER_STATE_PENDING);
4353 		}
4354 	}
4355 
4356 	return ret;
4357 }
4358 
4359 /**
4360  * @brief Start a workqueue in user mode
4361  *
4362  * This works identically to k_work_queue_start() except it is callable from
4363  * user mode, and the worker thread created will run in user mode.  The caller
4364  * must have permissions granted on both the work_q parameter's thread and
4365  * queue objects, and the same restrictions on priority apply as
4366  * k_thread_create().
4367  *
4368  * @param work_q Address of workqueue.
4369  * @param stack Pointer to work queue thread's stack space, as defined by
4370  *		K_THREAD_STACK_DEFINE()
4371  * @param stack_size Size of the work queue thread's stack (in bytes), which
4372  *		should either be the same constant passed to
4373  *		K_THREAD_STACK_DEFINE() or the value of K_THREAD_STACK_SIZEOF().
4374  * @param prio Priority of the work queue's thread.
4375  * @param name optional thread name.  If not null a copy is made into the
4376  *		thread's name buffer.
4377  */
4378 void k_work_user_queue_start(struct k_work_user_q *work_q,
4379 				    k_thread_stack_t *stack,
4380 				    size_t stack_size, int prio,
4381 				    const char *name);
4382 
4383 /**
4384  * @brief Access the user mode thread that animates a work queue.
4385  *
4386  * This is necessary to grant a user mode work queue thread access to things
4387  * the work items it will process are expected to use.
4388  *
4389  * @param work_q pointer to the user mode queue structure.
4390  *
4391  * @return the user mode thread associated with the work queue.
4392  */
k_work_user_queue_thread_get(struct k_work_user_q * work_q)4393 static inline k_tid_t k_work_user_queue_thread_get(struct k_work_user_q *work_q)
4394 {
4395 	return &work_q->thread;
4396 }
4397 
4398 /** @} */
4399 
4400 /**
4401  * @cond INTERNAL_HIDDEN
4402  */
4403 
4404 struct k_work_poll {
4405 	struct k_work work;
4406 	struct k_work_q *workq;
4407 	struct z_poller poller;
4408 	struct k_poll_event *events;
4409 	int num_events;
4410 	k_work_handler_t real_handler;
4411 	struct _timeout timeout;
4412 	int poll_result;
4413 };
4414 
4415 /**
4416  * INTERNAL_HIDDEN @endcond
4417  */
4418 
4419 /**
4420  * @addtogroup workqueue_apis
4421  * @{
4422  */
4423 
4424 /**
4425  * @brief Initialize a statically-defined work item.
4426  *
4427  * This macro can be used to initialize a statically-defined workqueue work
4428  * item, prior to its first use. For example,
4429  *
4430  * @code static K_WORK_DEFINE(<work>, <work_handler>); @endcode
4431  *
4432  * @param work Symbol name for work item object
4433  * @param work_handler Function to invoke each time work item is processed.
4434  */
4435 #define K_WORK_DEFINE(work, work_handler) \
4436 	struct k_work work = Z_WORK_INITIALIZER(work_handler)
4437 
4438 /**
4439  * @brief Initialize a triggered work item.
4440  *
4441  * This routine initializes a workqueue triggered work item, prior to
4442  * its first use.
4443  *
4444  * @param work Address of triggered work item.
4445  * @param handler Function to invoke each time work item is processed.
4446  */
4447 void k_work_poll_init(struct k_work_poll *work,
4448 			     k_work_handler_t handler);
4449 
4450 /**
4451  * @brief Submit a triggered work item.
4452  *
4453  * This routine schedules work item @a work to be processed by workqueue
4454  * @a work_q when one of the given @a events is signaled. The routine
4455  * initiates internal poller for the work item and then returns to the caller.
4456  * Only when one of the watched events happen the work item is actually
4457  * submitted to the workqueue and becomes pending.
4458  *
4459  * Submitting a previously submitted triggered work item that is still
4460  * waiting for the event cancels the existing submission and reschedules it
4461  * the using the new event list. Note that this behavior is inherently subject
4462  * to race conditions with the pre-existing triggered work item and work queue,
4463  * so care must be taken to synchronize such resubmissions externally.
4464  *
4465  * @funcprops \isr_ok
4466  *
4467  * @warning
4468  * Provided array of events as well as a triggered work item must be placed
4469  * in persistent memory (valid until work handler execution or work
4470  * cancellation) and cannot be modified after submission.
4471  *
4472  * @param work_q Address of workqueue.
4473  * @param work Address of delayed work item.
4474  * @param events An array of events which trigger the work.
4475  * @param num_events The number of events in the array.
4476  * @param timeout Timeout after which the work will be scheduled
4477  *		  for execution even if not triggered.
4478  *
4479  *
4480  * @retval 0 Work item started watching for events.
4481  * @retval -EINVAL Work item is being processed or has completed its work.
4482  * @retval -EADDRINUSE Work item is pending on a different workqueue.
4483  */
4484 int k_work_poll_submit_to_queue(struct k_work_q *work_q,
4485 				       struct k_work_poll *work,
4486 				       struct k_poll_event *events,
4487 				       int num_events,
4488 				       k_timeout_t timeout);
4489 
4490 /**
4491  * @brief Submit a triggered work item to the system workqueue.
4492  *
4493  * This routine schedules work item @a work to be processed by system
4494  * workqueue when one of the given @a events is signaled. The routine
4495  * initiates internal poller for the work item and then returns to the caller.
4496  * Only when one of the watched events happen the work item is actually
4497  * submitted to the workqueue and becomes pending.
4498  *
4499  * Submitting a previously submitted triggered work item that is still
4500  * waiting for the event cancels the existing submission and reschedules it
4501  * the using the new event list. Note that this behavior is inherently subject
4502  * to race conditions with the pre-existing triggered work item and work queue,
4503  * so care must be taken to synchronize such resubmissions externally.
4504  *
4505  * @funcprops \isr_ok
4506  *
4507  * @warning
4508  * Provided array of events as well as a triggered work item must not be
4509  * modified until the item has been processed by the workqueue.
4510  *
4511  * @param work Address of delayed work item.
4512  * @param events An array of events which trigger the work.
4513  * @param num_events The number of events in the array.
4514  * @param timeout Timeout after which the work will be scheduled
4515  *		  for execution even if not triggered.
4516  *
4517  * @retval 0 Work item started watching for events.
4518  * @retval -EINVAL Work item is being processed or has completed its work.
4519  * @retval -EADDRINUSE Work item is pending on a different workqueue.
4520  */
4521 int k_work_poll_submit(struct k_work_poll *work,
4522 				     struct k_poll_event *events,
4523 				     int num_events,
4524 				     k_timeout_t timeout);
4525 
4526 /**
4527  * @brief Cancel a triggered work item.
4528  *
4529  * This routine cancels the submission of triggered work item @a work.
4530  * A triggered work item can only be canceled if no event triggered work
4531  * submission.
4532  *
4533  * @funcprops \isr_ok
4534  *
4535  * @param work Address of delayed work item.
4536  *
4537  * @retval 0 Work item canceled.
4538  * @retval -EINVAL Work item is being processed or has completed its work.
4539  */
4540 int k_work_poll_cancel(struct k_work_poll *work);
4541 
4542 /** @} */
4543 
4544 /**
4545  * @defgroup msgq_apis Message Queue APIs
4546  * @ingroup kernel_apis
4547  * @{
4548  */
4549 
4550 /**
4551  * @brief Message Queue Structure
4552  */
4553 struct k_msgq {
4554 	/** Message queue wait queue */
4555 	_wait_q_t wait_q;
4556 	/** Lock */
4557 	struct k_spinlock lock;
4558 	/** Message size */
4559 	size_t msg_size;
4560 	/** Maximal number of messages */
4561 	uint32_t max_msgs;
4562 	/** Start of message buffer */
4563 	char *buffer_start;
4564 	/** End of message buffer */
4565 	char *buffer_end;
4566 	/** Read pointer */
4567 	char *read_ptr;
4568 	/** Write pointer */
4569 	char *write_ptr;
4570 	/** Number of used messages */
4571 	uint32_t used_msgs;
4572 
4573 	Z_DECL_POLL_EVENT
4574 
4575 	/** Message queue */
4576 	uint8_t flags;
4577 
4578 	SYS_PORT_TRACING_TRACKING_FIELD(k_msgq)
4579 
4580 #ifdef CONFIG_OBJ_CORE_MSGQ
4581 	struct k_obj_core  obj_core;
4582 #endif
4583 };
4584 /**
4585  * @cond INTERNAL_HIDDEN
4586  */
4587 
4588 
4589 #define Z_MSGQ_INITIALIZER(obj, q_buffer, q_msg_size, q_max_msgs) \
4590 	{ \
4591 	.wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
4592 	.msg_size = q_msg_size, \
4593 	.max_msgs = q_max_msgs, \
4594 	.buffer_start = q_buffer, \
4595 	.buffer_end = q_buffer + (q_max_msgs * q_msg_size), \
4596 	.read_ptr = q_buffer, \
4597 	.write_ptr = q_buffer, \
4598 	.used_msgs = 0, \
4599 	Z_POLL_EVENT_OBJ_INIT(obj) \
4600 	}
4601 
4602 /**
4603  * INTERNAL_HIDDEN @endcond
4604  */
4605 
4606 
4607 #define K_MSGQ_FLAG_ALLOC	BIT(0)
4608 
4609 /**
4610  * @brief Message Queue Attributes
4611  */
4612 struct k_msgq_attrs {
4613 	/** Message Size */
4614 	size_t msg_size;
4615 	/** Maximal number of messages */
4616 	uint32_t max_msgs;
4617 	/** Used messages */
4618 	uint32_t used_msgs;
4619 };
4620 
4621 
4622 /**
4623  * @brief Statically define and initialize a message queue.
4624  *
4625  * The message queue's ring buffer contains space for @a q_max_msgs messages,
4626  * each of which is @a q_msg_size bytes long. Alignment of the message queue's
4627  * ring buffer is not necessary, setting @a q_align to 1 is sufficient.
4628  *
4629  * The message queue can be accessed outside the module where it is defined
4630  * using:
4631  *
4632  * @code extern struct k_msgq <name>; @endcode
4633  *
4634  * @param q_name Name of the message queue.
4635  * @param q_msg_size Message size (in bytes).
4636  * @param q_max_msgs Maximum number of messages that can be queued.
4637  * @param q_align Alignment of the message queue's ring buffer (power of 2).
4638  *
4639  */
4640 #define K_MSGQ_DEFINE(q_name, q_msg_size, q_max_msgs, q_align)		\
4641 	static char __noinit __aligned(q_align)				\
4642 		_k_fifo_buf_##q_name[(q_max_msgs) * (q_msg_size)];	\
4643 	STRUCT_SECTION_ITERABLE(k_msgq, q_name) =			\
4644 	       Z_MSGQ_INITIALIZER(q_name, _k_fifo_buf_##q_name,	\
4645 				  (q_msg_size), (q_max_msgs))
4646 
4647 /**
4648  * @brief Initialize a message queue.
4649  *
4650  * This routine initializes a message queue object, prior to its first use.
4651  *
4652  * The message queue's ring buffer must contain space for @a max_msgs messages,
4653  * each of which is @a msg_size bytes long. Alignment of the message queue's
4654  * ring buffer is not necessary.
4655  *
4656  * @param msgq Address of the message queue.
4657  * @param buffer Pointer to ring buffer that holds queued messages.
4658  * @param msg_size Message size (in bytes).
4659  * @param max_msgs Maximum number of messages that can be queued.
4660  */
4661 void k_msgq_init(struct k_msgq *msgq, char *buffer, size_t msg_size,
4662 		 uint32_t max_msgs);
4663 
4664 /**
4665  * @brief Initialize a message queue.
4666  *
4667  * This routine initializes a message queue object, prior to its first use,
4668  * allocating its internal ring buffer from the calling thread's resource
4669  * pool.
4670  *
4671  * Memory allocated for the ring buffer can be released by calling
4672  * k_msgq_cleanup(), or if userspace is enabled and the msgq object loses
4673  * all of its references.
4674  *
4675  * @param msgq Address of the message queue.
4676  * @param msg_size Message size (in bytes).
4677  * @param max_msgs Maximum number of messages that can be queued.
4678  *
4679  * @return 0 on success, -ENOMEM if there was insufficient memory in the
4680  *	thread's resource pool, or -EINVAL if the size parameters cause
4681  *	an integer overflow.
4682  */
4683 __syscall int k_msgq_alloc_init(struct k_msgq *msgq, size_t msg_size,
4684 				uint32_t max_msgs);
4685 
4686 /**
4687  * @brief Release allocated buffer for a queue
4688  *
4689  * Releases memory allocated for the ring buffer.
4690  *
4691  * @param msgq message queue to cleanup
4692  *
4693  * @retval 0 on success
4694  * @retval -EBUSY Queue not empty
4695  */
4696 int k_msgq_cleanup(struct k_msgq *msgq);
4697 
4698 /**
4699  * @brief Send a message to a message queue.
4700  *
4701  * This routine sends a message to message queue @a q.
4702  *
4703  * @note The message content is copied from @a data into @a msgq and the @a data
4704  * pointer is not retained, so the message content will not be modified
4705  * by this function.
4706  *
4707  * @funcprops \isr_ok
4708  *
4709  * @param msgq Address of the message queue.
4710  * @param data Pointer to the message.
4711  * @param timeout Waiting period to add the message, or one of the special
4712  *                values K_NO_WAIT and K_FOREVER.
4713  *
4714  * @retval 0 Message sent.
4715  * @retval -ENOMSG Returned without waiting or queue purged.
4716  * @retval -EAGAIN Waiting period timed out.
4717  */
4718 __syscall int k_msgq_put(struct k_msgq *msgq, const void *data, k_timeout_t timeout);
4719 
4720 /**
4721  * @brief Receive a message from a message queue.
4722  *
4723  * This routine receives a message from message queue @a q in a "first in,
4724  * first out" manner.
4725  *
4726  * @note @a timeout must be set to K_NO_WAIT if called from ISR.
4727  *
4728  * @funcprops \isr_ok
4729  *
4730  * @param msgq Address of the message queue.
4731  * @param data Address of area to hold the received message.
4732  * @param timeout Waiting period to receive the message,
4733  *                or one of the special values K_NO_WAIT and
4734  *                K_FOREVER.
4735  *
4736  * @retval 0 Message received.
4737  * @retval -ENOMSG Returned without waiting or queue purged.
4738  * @retval -EAGAIN Waiting period timed out.
4739  */
4740 __syscall int k_msgq_get(struct k_msgq *msgq, void *data, k_timeout_t timeout);
4741 
4742 /**
4743  * @brief Peek/read a message from a message queue.
4744  *
4745  * This routine reads a message from message queue @a q in a "first in,
4746  * first out" manner and leaves the message in the queue.
4747  *
4748  * @funcprops \isr_ok
4749  *
4750  * @param msgq Address of the message queue.
4751  * @param data Address of area to hold the message read from the queue.
4752  *
4753  * @retval 0 Message read.
4754  * @retval -ENOMSG Returned when the queue has no message.
4755  */
4756 __syscall int k_msgq_peek(struct k_msgq *msgq, void *data);
4757 
4758 /**
4759  * @brief Peek/read a message from a message queue at the specified index
4760  *
4761  * This routine reads a message from message queue at the specified index
4762  * and leaves the message in the queue.
4763  * k_msgq_peek_at(msgq, data, 0) is equivalent to k_msgq_peek(msgq, data)
4764  *
4765  * @funcprops \isr_ok
4766  *
4767  * @param msgq Address of the message queue.
4768  * @param data Address of area to hold the message read from the queue.
4769  * @param idx Message queue index at which to peek
4770  *
4771  * @retval 0 Message read.
4772  * @retval -ENOMSG Returned when the queue has no message at index.
4773  */
4774 __syscall int k_msgq_peek_at(struct k_msgq *msgq, void *data, uint32_t idx);
4775 
4776 /**
4777  * @brief Purge a message queue.
4778  *
4779  * This routine discards all unreceived messages in a message queue's ring
4780  * buffer. Any threads that are blocked waiting to send a message to the
4781  * message queue are unblocked and see an -ENOMSG error code.
4782  *
4783  * @param msgq Address of the message queue.
4784  */
4785 __syscall void k_msgq_purge(struct k_msgq *msgq);
4786 
4787 /**
4788  * @brief Get the amount of free space in a message queue.
4789  *
4790  * This routine returns the number of unused entries in a message queue's
4791  * ring buffer.
4792  *
4793  * @param msgq Address of the message queue.
4794  *
4795  * @return Number of unused ring buffer entries.
4796  */
4797 __syscall uint32_t k_msgq_num_free_get(struct k_msgq *msgq);
4798 
4799 /**
4800  * @brief Get basic attributes of a message queue.
4801  *
4802  * This routine fetches basic attributes of message queue into attr argument.
4803  *
4804  * @param msgq Address of the message queue.
4805  * @param attrs pointer to message queue attribute structure.
4806  */
4807 __syscall void  k_msgq_get_attrs(struct k_msgq *msgq,
4808 				 struct k_msgq_attrs *attrs);
4809 
4810 
z_impl_k_msgq_num_free_get(struct k_msgq * msgq)4811 static inline uint32_t z_impl_k_msgq_num_free_get(struct k_msgq *msgq)
4812 {
4813 	return msgq->max_msgs - msgq->used_msgs;
4814 }
4815 
4816 /**
4817  * @brief Get the number of messages in a message queue.
4818  *
4819  * This routine returns the number of messages in a message queue's ring buffer.
4820  *
4821  * @param msgq Address of the message queue.
4822  *
4823  * @return Number of messages.
4824  */
4825 __syscall uint32_t k_msgq_num_used_get(struct k_msgq *msgq);
4826 
z_impl_k_msgq_num_used_get(struct k_msgq * msgq)4827 static inline uint32_t z_impl_k_msgq_num_used_get(struct k_msgq *msgq)
4828 {
4829 	return msgq->used_msgs;
4830 }
4831 
4832 /** @} */
4833 
4834 /**
4835  * @defgroup mailbox_apis Mailbox APIs
4836  * @ingroup kernel_apis
4837  * @{
4838  */
4839 
4840 /**
4841  * @brief Mailbox Message Structure
4842  *
4843  */
4844 struct k_mbox_msg {
4845 	/** size of message (in bytes) */
4846 	size_t size;
4847 	/** application-defined information value */
4848 	uint32_t info;
4849 	/** sender's message data buffer */
4850 	void *tx_data;
4851 	/** source thread id */
4852 	k_tid_t rx_source_thread;
4853 	/** target thread id */
4854 	k_tid_t tx_target_thread;
4855 	/** internal use only - thread waiting on send (may be a dummy) */
4856 	k_tid_t _syncing_thread;
4857 #if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
4858 	/** internal use only - semaphore used during asynchronous send */
4859 	struct k_sem *_async_sem;
4860 #endif
4861 };
4862 /**
4863  * @brief Mailbox Structure
4864  *
4865  */
4866 struct k_mbox {
4867 	/** Transmit messages queue */
4868 	_wait_q_t tx_msg_queue;
4869 	/** Receive message queue */
4870 	_wait_q_t rx_msg_queue;
4871 	struct k_spinlock lock;
4872 
4873 	SYS_PORT_TRACING_TRACKING_FIELD(k_mbox)
4874 
4875 #ifdef CONFIG_OBJ_CORE_MAILBOX
4876 	struct k_obj_core  obj_core;
4877 #endif
4878 };
4879 /**
4880  * @cond INTERNAL_HIDDEN
4881  */
4882 
4883 #define Z_MBOX_INITIALIZER(obj) \
4884 	{ \
4885 	.tx_msg_queue = Z_WAIT_Q_INIT(&obj.tx_msg_queue), \
4886 	.rx_msg_queue = Z_WAIT_Q_INIT(&obj.rx_msg_queue), \
4887 	}
4888 
4889 /**
4890  * INTERNAL_HIDDEN @endcond
4891  */
4892 
4893 /**
4894  * @brief Statically define and initialize a mailbox.
4895  *
4896  * The mailbox is to be accessed outside the module where it is defined using:
4897  *
4898  * @code extern struct k_mbox <name>; @endcode
4899  *
4900  * @param name Name of the mailbox.
4901  */
4902 #define K_MBOX_DEFINE(name) \
4903 	STRUCT_SECTION_ITERABLE(k_mbox, name) = \
4904 		Z_MBOX_INITIALIZER(name) \
4905 
4906 /**
4907  * @brief Initialize a mailbox.
4908  *
4909  * This routine initializes a mailbox object, prior to its first use.
4910  *
4911  * @param mbox Address of the mailbox.
4912  */
4913 void k_mbox_init(struct k_mbox *mbox);
4914 
4915 /**
4916  * @brief Send a mailbox message in a synchronous manner.
4917  *
4918  * This routine sends a message to @a mbox and waits for a receiver to both
4919  * receive and process it. The message data may be in a buffer or non-existent
4920  * (i.e. an empty message).
4921  *
4922  * @param mbox Address of the mailbox.
4923  * @param tx_msg Address of the transmit message descriptor.
4924  * @param timeout Waiting period for the message to be received,
4925  *                or one of the special values K_NO_WAIT
4926  *                and K_FOREVER. Once the message has been received,
4927  *                this routine waits as long as necessary for the message
4928  *                to be completely processed.
4929  *
4930  * @retval 0 Message sent.
4931  * @retval -ENOMSG Returned without waiting.
4932  * @retval -EAGAIN Waiting period timed out.
4933  */
4934 int k_mbox_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
4935 		      k_timeout_t timeout);
4936 
4937 /**
4938  * @brief Send a mailbox message in an asynchronous manner.
4939  *
4940  * This routine sends a message to @a mbox without waiting for a receiver
4941  * to process it. The message data may be in a buffer or non-existent
4942  * (i.e. an empty message). Optionally, the semaphore @a sem will be given
4943  * when the message has been both received and completely processed by
4944  * the receiver.
4945  *
4946  * @param mbox Address of the mailbox.
4947  * @param tx_msg Address of the transmit message descriptor.
4948  * @param sem Address of a semaphore, or NULL if none is needed.
4949  */
4950 void k_mbox_async_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
4951 			     struct k_sem *sem);
4952 
4953 /**
4954  * @brief Receive a mailbox message.
4955  *
4956  * This routine receives a message from @a mbox, then optionally retrieves
4957  * its data and disposes of the message.
4958  *
4959  * @param mbox Address of the mailbox.
4960  * @param rx_msg Address of the receive message descriptor.
4961  * @param buffer Address of the buffer to receive data, or NULL to defer data
4962  *               retrieval and message disposal until later.
4963  * @param timeout Waiting period for a message to be received,
4964  *                or one of the special values K_NO_WAIT and K_FOREVER.
4965  *
4966  * @retval 0 Message received.
4967  * @retval -ENOMSG Returned without waiting.
4968  * @retval -EAGAIN Waiting period timed out.
4969  */
4970 int k_mbox_get(struct k_mbox *mbox, struct k_mbox_msg *rx_msg,
4971 		      void *buffer, k_timeout_t timeout);
4972 
4973 /**
4974  * @brief Retrieve mailbox message data into a buffer.
4975  *
4976  * This routine completes the processing of a received message by retrieving
4977  * its data into a buffer, then disposing of the message.
4978  *
4979  * Alternatively, this routine can be used to dispose of a received message
4980  * without retrieving its data.
4981  *
4982  * @param rx_msg Address of the receive message descriptor.
4983  * @param buffer Address of the buffer to receive data, or NULL to discard
4984  *               the data.
4985  */
4986 void k_mbox_data_get(struct k_mbox_msg *rx_msg, void *buffer);
4987 
4988 /** @} */
4989 
4990 /**
4991  * @defgroup pipe_apis Pipe APIs
4992  * @ingroup kernel_apis
4993  * @{
4994  */
4995 
4996 /**
4997  * @brief initialize a pipe
4998  *
4999  * This routine initializes a pipe object, prior to its first use.
5000  *
5001  * @param pipe Address of the pipe.
5002  * @param buffer Address of the pipe's buffer, or NULL if no ring buffer is used.
5003  * @param buffer_size Size of the pipe's buffer, or zero if no ring buffer is used.
5004  */
5005 __syscall void k_pipe_init(struct k_pipe *pipe, uint8_t *buffer, size_t buffer_size);
5006 
5007 #ifdef CONFIG_PIPES
5008 /** Pipe Structure */
5009 struct k_pipe {
5010 	unsigned char *buffer;          /**< Pipe buffer: may be NULL */
5011 	size_t         size;            /**< Buffer size */
5012 	size_t         bytes_used;      /**< Number of bytes used in buffer */
5013 	size_t         read_index;      /**< Where in buffer to read from */
5014 	size_t         write_index;     /**< Where in buffer to write */
5015 	struct k_spinlock lock;		/**< Synchronization lock */
5016 
5017 	struct {
5018 		_wait_q_t      readers; /**< Reader wait queue */
5019 		_wait_q_t      writers; /**< Writer wait queue */
5020 	} wait_q;			/** Wait queue */
5021 
5022 	Z_DECL_POLL_EVENT
5023 
5024 	uint8_t	       flags;		/**< Flags */
5025 
5026 	SYS_PORT_TRACING_TRACKING_FIELD(k_pipe)
5027 
5028 #ifdef CONFIG_OBJ_CORE_PIPE
5029 	struct k_obj_core  obj_core;
5030 #endif
5031 };
5032 
5033 /**
5034  * @cond INTERNAL_HIDDEN
5035  */
5036 #define K_PIPE_FLAG_ALLOC	BIT(0)	/** Buffer was allocated */
5037 
5038 #define Z_PIPE_INITIALIZER(obj, pipe_buffer, pipe_buffer_size)     \
5039 	{                                                           \
5040 	.buffer = pipe_buffer,                                      \
5041 	.size = pipe_buffer_size,                                   \
5042 	.bytes_used = 0,                                            \
5043 	.read_index = 0,                                            \
5044 	.write_index = 0,                                           \
5045 	.lock = {},                                                 \
5046 	.wait_q = {                                                 \
5047 		.readers = Z_WAIT_Q_INIT(&obj.wait_q.readers),       \
5048 		.writers = Z_WAIT_Q_INIT(&obj.wait_q.writers)        \
5049 	},                                                          \
5050 	Z_POLL_EVENT_OBJ_INIT(obj)                                   \
5051 	.flags = 0,                                                 \
5052 	}
5053 
5054 /**
5055  * INTERNAL_HIDDEN @endcond
5056  */
5057 
5058 /**
5059  * @brief Statically define and initialize a pipe.
5060  *
5061  * The pipe can be accessed outside the module where it is defined using:
5062  *
5063  * @code extern struct k_pipe <name>; @endcode
5064  *
5065  * @param name Name of the pipe.
5066  * @param pipe_buffer_size Size of the pipe's ring buffer (in bytes),
5067  *                         or zero if no ring buffer is used.
5068  * @param pipe_align Alignment of the pipe's ring buffer (power of 2).
5069  *
5070  */
5071 #define K_PIPE_DEFINE(name, pipe_buffer_size, pipe_align)		\
5072 	static unsigned char __noinit __aligned(pipe_align)		\
5073 		_k_pipe_buf_##name[pipe_buffer_size];			\
5074 	STRUCT_SECTION_ITERABLE(k_pipe, name) =				\
5075 		Z_PIPE_INITIALIZER(name, _k_pipe_buf_##name, pipe_buffer_size)
5076 
5077 /**
5078  * @deprecated Dynamic allocation of pipe buffers will be removed in the new k_pipe API.
5079  * @brief Release a pipe's allocated buffer
5080  *
5081  * If a pipe object was given a dynamically allocated buffer via
5082  * k_pipe_alloc_init(), this will free it. This function does nothing
5083  * if the buffer wasn't dynamically allocated.
5084  *
5085  * @param pipe Address of the pipe.
5086  * @retval 0 on success
5087  * @retval -EAGAIN nothing to cleanup
5088  */
5089 __deprecated int k_pipe_cleanup(struct k_pipe *pipe);
5090 
5091 /**
5092  * @deprecated Dynamic allocation of pipe buffers will be removed in the new k_pipe API.
5093  * @brief Initialize a pipe and allocate a buffer for it
5094  *
5095  * Storage for the buffer region will be allocated from the calling thread's
5096  * resource pool. This memory will be released if k_pipe_cleanup() is called,
5097  * or userspace is enabled and the pipe object loses all references to it.
5098  *
5099  * This function should only be called on uninitialized pipe objects.
5100  *
5101  * @param pipe Address of the pipe.
5102  * @param size Size of the pipe's ring buffer (in bytes), or zero if no ring
5103  *             buffer is used.
5104  * @retval 0 on success
5105  * @retval -ENOMEM if memory couldn't be allocated
5106  */
5107 __deprecated __syscall int k_pipe_alloc_init(struct k_pipe *pipe, size_t size);
5108 
5109 /**
5110  * @deprecated k_pipe_put() is replaced by k_pipe_write(...) in the new k_pipe API.
5111  * @brief Write data to a pipe.
5112  *
5113  * This routine writes up to @a bytes_to_write bytes of data to @a pipe.
5114  *
5115  * @param pipe Address of the pipe.
5116  * @param data Address of data to write.
5117  * @param bytes_to_write Size of data (in bytes).
5118  * @param bytes_written Address of area to hold the number of bytes written.
5119  * @param min_xfer Minimum number of bytes to write.
5120  * @param timeout Waiting period to wait for the data to be written,
5121  *                or one of the special values K_NO_WAIT and K_FOREVER.
5122  *
5123  * @retval 0 At least @a min_xfer bytes of data were written.
5124  * @retval -EIO Returned without waiting; zero data bytes were written.
5125  * @retval -EAGAIN Waiting period timed out; between zero and @a min_xfer
5126  *                 minus one data bytes were written.
5127  */
5128 __deprecated __syscall int k_pipe_put(struct k_pipe *pipe, const void *data,
5129 			 size_t bytes_to_write, size_t *bytes_written,
5130 			 size_t min_xfer, k_timeout_t timeout);
5131 
5132 /**
5133  * @deprecated k_pipe_get() is replaced by k_pipe_read(...) in the new k_pipe API.
5134  * @brief Read data from a pipe.
5135  *
5136  * This routine reads up to @a bytes_to_read bytes of data from @a pipe.
5137  *
5138  * @param pipe Address of the pipe.
5139  * @param data Address to place the data read from pipe.
5140  * @param bytes_to_read Maximum number of data bytes to read.
5141  * @param bytes_read Address of area to hold the number of bytes read.
5142  * @param min_xfer Minimum number of data bytes to read.
5143  * @param timeout Waiting period to wait for the data to be read,
5144  *                or one of the special values K_NO_WAIT and K_FOREVER.
5145  *
5146  * @retval 0 At least @a min_xfer bytes of data were read.
5147  * @retval -EINVAL invalid parameters supplied
5148  * @retval -EIO Returned without waiting; zero data bytes were read.
5149  * @retval -EAGAIN Waiting period timed out; between zero and @a min_xfer
5150  *                 minus one data bytes were read.
5151  */
5152 __deprecated  __syscall int k_pipe_get(struct k_pipe *pipe, void *data,
5153 			 size_t bytes_to_read, size_t *bytes_read,
5154 			 size_t min_xfer, k_timeout_t timeout);
5155 
5156 /**
5157  * @deprecated k_pipe_read_avail() will be removed in the new k_pipe API.
5158  * @brief Query the number of bytes that may be read from @a pipe.
5159  *
5160  * @param pipe Address of the pipe.
5161  *
5162  * @retval a number n such that 0 <= n <= @ref k_pipe.size; the
5163  *         result is zero for unbuffered pipes.
5164  */
5165 __deprecated  __syscall size_t k_pipe_read_avail(struct k_pipe *pipe);
5166 
5167 /**
5168  * @deprecated k_pipe_write_avail() will be removed in the new k_pipe API.
5169  * @brief Query the number of bytes that may be written to @a pipe
5170  *
5171  * @param pipe Address of the pipe.
5172  *
5173  * @retval a number n such that 0 <= n <= @ref k_pipe.size; the
5174  *         result is zero for unbuffered pipes.
5175  */
5176 __deprecated __syscall size_t k_pipe_write_avail(struct k_pipe *pipe);
5177 
5178 /**
5179  * @deprecated k_pipe_flush() will be removed in the new k_pipe API.
5180  * @brief Flush the pipe of write data
5181  *
5182  * This routine flushes the pipe. Flushing the pipe is equivalent to reading
5183  * both all the data in the pipe's buffer and all the data waiting to go into
5184  * that pipe into a large temporary buffer and discarding the buffer. Any
5185  * writers that were previously pended become unpended.
5186  *
5187  * @param pipe Address of the pipe.
5188  */
5189 __deprecated __syscall void k_pipe_flush(struct k_pipe *pipe);
5190 
5191 /**
5192  * @deprecated k_pipe_buffer_flush will be removed in the new k_pipe API.
5193  * @brief Flush the pipe's internal buffer
5194  *
5195  * This routine flushes the pipe's internal buffer. This is equivalent to
5196  * reading up to N bytes from the pipe (where N is the size of the pipe's
5197  * buffer) into a temporary buffer and then discarding that buffer. If there
5198  * were writers previously pending, then some may unpend as they try to fill
5199  * up the pipe's emptied buffer.
5200  *
5201  * @param pipe Address of the pipe.
5202  */
5203 __deprecated __syscall void k_pipe_buffer_flush(struct k_pipe *pipe);
5204 
5205 #else /* CONFIG_PIPES */
5206 
5207 enum pipe_flags {
5208 	PIPE_FLAG_OPEN = BIT(0),
5209 	PIPE_FLAG_RESET = BIT(1),
5210 };
5211 
5212 struct k_pipe {
5213 	size_t waiting;
5214 	struct ring_buf buf;
5215 	struct k_spinlock lock;
5216 	_wait_q_t data;
5217 	_wait_q_t space;
5218 	uint8_t flags;
5219 
5220 	Z_DECL_POLL_EVENT
5221 #ifdef CONFIG_OBJ_CORE_PIPE
5222 	struct k_obj_core  obj_core;
5223 #endif
5224 	SYS_PORT_TRACING_TRACKING_FIELD(k_pipe)
5225 };
5226 
5227 /**
5228  * @cond INTERNAL_HIDDEN
5229  */
5230 #define Z_PIPE_INITIALIZER(obj, pipe_buffer, pipe_buffer_size)	\
5231 {								\
5232 	.buf = RING_BUF_INIT(pipe_buffer, pipe_buffer_size),	\
5233 	.data = Z_WAIT_Q_INIT(&obj.data),			\
5234 	.space = Z_WAIT_Q_INIT(&obj.space),			\
5235 	.flags = PIPE_FLAG_OPEN,				\
5236 	.waiting = 0,						\
5237 	Z_POLL_EVENT_OBJ_INIT(obj)				\
5238 }
5239 /**
5240  * INTERNAL_HIDDEN @endcond
5241  */
5242 
5243 /**
5244  * @brief Statically define and initialize a pipe.
5245  *
5246  * The pipe can be accessed outside the module where it is defined using:
5247  *
5248  * @code extern struct k_pipe <name>; @endcode
5249  *
5250  * @param name Name of the pipe.
5251  * @param pipe_buffer_size Size of the pipe's ring buffer (in bytes)
5252  *                         or zero if no ring buffer is used.
5253  * @param pipe_align Alignment of the pipe's ring buffer (power of 2).
5254  *
5255  */
5256 #define K_PIPE_DEFINE(name, pipe_buffer_size, pipe_align)		\
5257 	static unsigned char __noinit __aligned(pipe_align)		\
5258 		_k_pipe_buf_##name[pipe_buffer_size];			\
5259 	STRUCT_SECTION_ITERABLE(k_pipe, name) =				\
5260 		Z_PIPE_INITIALIZER(name, _k_pipe_buf_##name, pipe_buffer_size)
5261 
5262 
5263 /**
5264  * @brief Write data to a pipe
5265  *
5266  * This routine writes up to @a len bytes of data to @a pipe.
5267  * If the pipe is full, the routine will block until the data can be written or the timeout expires.
5268  *
5269  * @param pipe Address of the pipe.
5270  * @param data Address of data to write.
5271  * @param len Size of data (in bytes).
5272  * @param timeout Waiting period to wait for the data to be written.
5273  *
5274  * @retval number of bytes written on success
5275  * @retval -EAGAIN if no data could be written before the timeout expired
5276  * @retval -ECANCELED if the write was interrupted by k_pipe_reset(..)
5277  * @retval -EPIPE if the pipe was closed
5278  */
5279 __syscall int k_pipe_write(struct k_pipe *pipe, const uint8_t *data, size_t len,
5280 			   k_timeout_t timeout);
5281 
5282 /**
5283  * @brief Read data from a pipe
5284  * This routine reads up to @a len bytes of data from @a pipe.
5285  * If the pipe is empty, the routine will block until the data can be read or the timeout expires.
5286  *
5287  * @param pipe Address of the pipe.
5288  * @param data Address to place the data read from pipe.
5289  * @param len Requested number of bytes to read.
5290  * @param timeout Waiting period to wait for the data to be read.
5291  *
5292  * @retval number of bytes read on success
5293  * @retval -EAGAIN if no data could be read before the timeout expired
5294  * @retval -ECANCELED if the read was interrupted by k_pipe_reset(..)
5295  * @retval -EPIPE if the pipe was closed
5296  */
5297 __syscall int k_pipe_read(struct k_pipe *pipe, uint8_t *data, size_t len,
5298 			  k_timeout_t timeout);
5299 
5300 /**
5301  * @brief Reset a pipe
5302  * This routine resets the pipe, discarding any unread data and unblocking any threads waiting to
5303  * write or read, causing the waiting threads to return with -ECANCELED. Calling k_pipe_read(..) or
5304  * k_pipe_write(..) when the pipe is resetting but not yet reset will return -ECANCELED.
5305  * The pipe is left open after a reset and can be used as normal.
5306  *
5307  * @param pipe Address of the pipe.
5308  */
5309 __syscall void k_pipe_reset(struct k_pipe *pipe);
5310 
5311 /**
5312  * @brief Close a pipe
5313  *
5314  * This routine closes a pipe. Any threads that were blocked on the pipe
5315  * will be unblocked and receive an error code.
5316  *
5317  * @param pipe Address of the pipe.
5318  */
5319 __syscall void k_pipe_close(struct k_pipe *pipe);
5320 #endif /* CONFIG_PIPES */
5321 /** @} */
5322 
5323 /**
5324  * @cond INTERNAL_HIDDEN
5325  */
5326 struct k_mem_slab_info {
5327 	uint32_t num_blocks;
5328 	size_t   block_size;
5329 	uint32_t num_used;
5330 #ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION
5331 	uint32_t max_used;
5332 #endif
5333 };
5334 
5335 struct k_mem_slab {
5336 	_wait_q_t wait_q;
5337 	struct k_spinlock lock;
5338 	char *buffer;
5339 	char *free_list;
5340 	struct k_mem_slab_info info;
5341 
5342 	SYS_PORT_TRACING_TRACKING_FIELD(k_mem_slab)
5343 
5344 #ifdef CONFIG_OBJ_CORE_MEM_SLAB
5345 	struct k_obj_core  obj_core;
5346 #endif
5347 };
5348 
5349 #define Z_MEM_SLAB_INITIALIZER(_slab, _slab_buffer, _slab_block_size, \
5350 			       _slab_num_blocks)                      \
5351 	{                                                             \
5352 	.wait_q = Z_WAIT_Q_INIT(&(_slab).wait_q),                     \
5353 	.lock = {},                                                   \
5354 	.buffer = _slab_buffer,                                       \
5355 	.free_list = NULL,                                            \
5356 	.info = {_slab_num_blocks, _slab_block_size, 0}               \
5357 	}
5358 
5359 
5360 /**
5361  * INTERNAL_HIDDEN @endcond
5362  */
5363 
5364 /**
5365  * @defgroup mem_slab_apis Memory Slab APIs
5366  * @ingroup kernel_apis
5367  * @{
5368  */
5369 
5370 /**
5371  * @brief Statically define and initialize a memory slab in a public (non-static) scope.
5372  *
5373  * The memory slab's buffer contains @a slab_num_blocks memory blocks
5374  * that are @a slab_block_size bytes long. The buffer is aligned to a
5375  * @a slab_align -byte boundary. To ensure that each memory block is similarly
5376  * aligned to this boundary, @a slab_block_size must also be a multiple of
5377  * @a slab_align.
5378  *
5379  * The memory slab can be accessed outside the module where it is defined
5380  * using:
5381  *
5382  * @code extern struct k_mem_slab <name>; @endcode
5383  *
5384  * @note This macro cannot be used together with a static keyword.
5385  *       If such a use-case is desired, use @ref K_MEM_SLAB_DEFINE_STATIC
5386  *       instead.
5387  *
5388  * @param name Name of the memory slab.
5389  * @param slab_block_size Size of each memory block (in bytes).
5390  * @param slab_num_blocks Number memory blocks.
5391  * @param slab_align Alignment of the memory slab's buffer (power of 2).
5392  */
5393 #define K_MEM_SLAB_DEFINE(name, slab_block_size, slab_num_blocks, slab_align) \
5394 	char __noinit_named(k_mem_slab_buf_##name) \
5395 	   __aligned(WB_UP(slab_align)) \
5396 	   _k_mem_slab_buf_##name[(slab_num_blocks) * WB_UP(slab_block_size)]; \
5397 	STRUCT_SECTION_ITERABLE(k_mem_slab, name) = \
5398 		Z_MEM_SLAB_INITIALIZER(name, _k_mem_slab_buf_##name, \
5399 					WB_UP(slab_block_size), slab_num_blocks)
5400 
5401 /**
5402  * @brief Statically define and initialize a memory slab in a private (static) scope.
5403  *
5404  * The memory slab's buffer contains @a slab_num_blocks memory blocks
5405  * that are @a slab_block_size bytes long. The buffer is aligned to a
5406  * @a slab_align -byte boundary. To ensure that each memory block is similarly
5407  * aligned to this boundary, @a slab_block_size must also be a multiple of
5408  * @a slab_align.
5409  *
5410  * @param name Name of the memory slab.
5411  * @param slab_block_size Size of each memory block (in bytes).
5412  * @param slab_num_blocks Number memory blocks.
5413  * @param slab_align Alignment of the memory slab's buffer (power of 2).
5414  */
5415 #define K_MEM_SLAB_DEFINE_STATIC(name, slab_block_size, slab_num_blocks, slab_align) \
5416 	static char __noinit_named(k_mem_slab_buf_##name) \
5417 	   __aligned(WB_UP(slab_align)) \
5418 	   _k_mem_slab_buf_##name[(slab_num_blocks) * WB_UP(slab_block_size)]; \
5419 	static STRUCT_SECTION_ITERABLE(k_mem_slab, name) = \
5420 		Z_MEM_SLAB_INITIALIZER(name, _k_mem_slab_buf_##name, \
5421 					WB_UP(slab_block_size), slab_num_blocks)
5422 
5423 /**
5424  * @brief Initialize a memory slab.
5425  *
5426  * Initializes a memory slab, prior to its first use.
5427  *
5428  * The memory slab's buffer contains @a slab_num_blocks memory blocks
5429  * that are @a slab_block_size bytes long. The buffer must be aligned to an
5430  * N-byte boundary matching a word boundary, where N is a power of 2
5431  * (i.e. 4 on 32-bit systems, 8, 16, ...).
5432  * To ensure that each memory block is similarly aligned to this boundary,
5433  * @a slab_block_size must also be a multiple of N.
5434  *
5435  * @param slab Address of the memory slab.
5436  * @param buffer Pointer to buffer used for the memory blocks.
5437  * @param block_size Size of each memory block (in bytes).
5438  * @param num_blocks Number of memory blocks.
5439  *
5440  * @retval 0 on success
5441  * @retval -EINVAL invalid data supplied
5442  *
5443  */
5444 int k_mem_slab_init(struct k_mem_slab *slab, void *buffer,
5445 			   size_t block_size, uint32_t num_blocks);
5446 
5447 /**
5448  * @brief Allocate memory from a memory slab.
5449  *
5450  * This routine allocates a memory block from a memory slab.
5451  *
5452  * @note @a timeout must be set to K_NO_WAIT if called from ISR.
5453  * @note When CONFIG_MULTITHREADING=n any @a timeout is treated as K_NO_WAIT.
5454  *
5455  * @funcprops \isr_ok
5456  *
5457  * @param slab Address of the memory slab.
5458  * @param mem Pointer to block address area.
5459  * @param timeout Waiting period to wait for operation to complete.
5460  *        Use K_NO_WAIT to return without waiting,
5461  *        or K_FOREVER to wait as long as necessary.
5462  *
5463  * @retval 0 Memory allocated. The block address area pointed at by @a mem
5464  *         is set to the starting address of the memory block.
5465  * @retval -ENOMEM Returned without waiting.
5466  * @retval -EAGAIN Waiting period timed out.
5467  * @retval -EINVAL Invalid data supplied
5468  */
5469 int k_mem_slab_alloc(struct k_mem_slab *slab, void **mem,
5470 			    k_timeout_t timeout);
5471 
5472 /**
5473  * @brief Free memory allocated from a memory slab.
5474  *
5475  * This routine releases a previously allocated memory block back to its
5476  * associated memory slab.
5477  *
5478  * @param slab Address of the memory slab.
5479  * @param mem Pointer to the memory block (as returned by k_mem_slab_alloc()).
5480  */
5481 void k_mem_slab_free(struct k_mem_slab *slab, void *mem);
5482 
5483 /**
5484  * @brief Get the number of used blocks in a memory slab.
5485  *
5486  * This routine gets the number of memory blocks that are currently
5487  * allocated in @a slab.
5488  *
5489  * @param slab Address of the memory slab.
5490  *
5491  * @return Number of allocated memory blocks.
5492  */
k_mem_slab_num_used_get(struct k_mem_slab * slab)5493 static inline uint32_t k_mem_slab_num_used_get(struct k_mem_slab *slab)
5494 {
5495 	return slab->info.num_used;
5496 }
5497 
5498 /**
5499  * @brief Get the number of maximum used blocks so far in a memory slab.
5500  *
5501  * This routine gets the maximum number of memory blocks that were
5502  * allocated in @a slab.
5503  *
5504  * @param slab Address of the memory slab.
5505  *
5506  * @return Maximum number of allocated memory blocks.
5507  */
k_mem_slab_max_used_get(struct k_mem_slab * slab)5508 static inline uint32_t k_mem_slab_max_used_get(struct k_mem_slab *slab)
5509 {
5510 #ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION
5511 	return slab->info.max_used;
5512 #else
5513 	ARG_UNUSED(slab);
5514 	return 0;
5515 #endif
5516 }
5517 
5518 /**
5519  * @brief Get the number of unused blocks in a memory slab.
5520  *
5521  * This routine gets the number of memory blocks that are currently
5522  * unallocated in @a slab.
5523  *
5524  * @param slab Address of the memory slab.
5525  *
5526  * @return Number of unallocated memory blocks.
5527  */
k_mem_slab_num_free_get(struct k_mem_slab * slab)5528 static inline uint32_t k_mem_slab_num_free_get(struct k_mem_slab *slab)
5529 {
5530 	return slab->info.num_blocks - slab->info.num_used;
5531 }
5532 
5533 /**
5534  * @brief Get the memory stats for a memory slab
5535  *
5536  * This routine gets the runtime memory usage stats for the slab @a slab.
5537  *
5538  * @param slab Address of the memory slab
5539  * @param stats Pointer to memory into which to copy memory usage statistics
5540  *
5541  * @retval 0 Success
5542  * @retval -EINVAL Any parameter points to NULL
5543  */
5544 
5545 int k_mem_slab_runtime_stats_get(struct k_mem_slab *slab, struct sys_memory_stats *stats);
5546 
5547 /**
5548  * @brief Reset the maximum memory usage for a slab
5549  *
5550  * This routine resets the maximum memory usage for the slab @a slab to its
5551  * current usage.
5552  *
5553  * @param slab Address of the memory slab
5554  *
5555  * @retval 0 Success
5556  * @retval -EINVAL Memory slab is NULL
5557  */
5558 int k_mem_slab_runtime_stats_reset_max(struct k_mem_slab *slab);
5559 
5560 /** @} */
5561 
5562 /**
5563  * @addtogroup heap_apis
5564  * @{
5565  */
5566 
5567 /* kernel synchronized heap struct */
5568 
5569 struct k_heap {
5570 	struct sys_heap heap;
5571 	_wait_q_t wait_q;
5572 	struct k_spinlock lock;
5573 };
5574 
5575 /**
5576  * @brief Initialize a k_heap
5577  *
5578  * This constructs a synchronized k_heap object over a memory region
5579  * specified by the user.  Note that while any alignment and size can
5580  * be passed as valid parameters, internal alignment restrictions
5581  * inside the inner sys_heap mean that not all bytes may be usable as
5582  * allocated memory.
5583  *
5584  * @param h Heap struct to initialize
5585  * @param mem Pointer to memory.
5586  * @param bytes Size of memory region, in bytes
5587  */
5588 void k_heap_init(struct k_heap *h, void *mem,
5589 		size_t bytes) __attribute_nonnull(1);
5590 
5591 /**
5592  * @brief Allocate aligned memory from a k_heap
5593  *
5594  * Behaves in all ways like k_heap_alloc(), except that the returned
5595  * memory (if available) will have a starting address in memory which
5596  * is a multiple of the specified power-of-two alignment value in
5597  * bytes.  The resulting memory can be returned to the heap using
5598  * k_heap_free().
5599  *
5600  * @note @a timeout must be set to K_NO_WAIT if called from ISR.
5601  * @note When CONFIG_MULTITHREADING=n any @a timeout is treated as K_NO_WAIT.
5602  *
5603  * @funcprops \isr_ok
5604  *
5605  * @param h Heap from which to allocate
5606  * @param align Alignment in bytes, must be a power of two
5607  * @param bytes Number of bytes requested
5608  * @param timeout How long to wait, or K_NO_WAIT
5609  * @return Pointer to memory the caller can now use
5610  */
5611 void *k_heap_aligned_alloc(struct k_heap *h, size_t align, size_t bytes,
5612 			k_timeout_t timeout) __attribute_nonnull(1);
5613 
5614 /**
5615  * @brief Allocate memory from a k_heap
5616  *
5617  * Allocates and returns a memory buffer from the memory region owned
5618  * by the heap.  If no memory is available immediately, the call will
5619  * block for the specified timeout (constructed via the standard
5620  * timeout API, or K_NO_WAIT or K_FOREVER) waiting for memory to be
5621  * freed.  If the allocation cannot be performed by the expiration of
5622  * the timeout, NULL will be returned.
5623  * Allocated memory is aligned on a multiple of pointer sizes.
5624  *
5625  * @note @a timeout must be set to K_NO_WAIT if called from ISR.
5626  * @note When CONFIG_MULTITHREADING=n any @a timeout is treated as K_NO_WAIT.
5627  *
5628  * @funcprops \isr_ok
5629  *
5630  * @param h Heap from which to allocate
5631  * @param bytes Desired size of block to allocate
5632  * @param timeout How long to wait, or K_NO_WAIT
5633  * @return A pointer to valid heap memory, or NULL
5634  */
5635 void *k_heap_alloc(struct k_heap *h, size_t bytes,
5636 		k_timeout_t timeout) __attribute_nonnull(1);
5637 
5638 /**
5639  * @brief Allocate and initialize memory for an array of objects from a k_heap
5640  *
5641  * Allocates memory for an array of num objects of size and initializes all
5642  * bytes in the allocated storage to zero.  If no memory is available
5643  * immediately, the call will block for the specified timeout (constructed
5644  * via the standard timeout API, or K_NO_WAIT or K_FOREVER) waiting for memory
5645  * to be freed.  If the allocation cannot be performed by the expiration of
5646  * the timeout, NULL will be returned.
5647  * Allocated memory is aligned on a multiple of pointer sizes.
5648  *
5649  * @note @a timeout must be set to K_NO_WAIT if called from ISR.
5650  * @note When CONFIG_MULTITHREADING=n any @a timeout is treated as K_NO_WAIT.
5651  *
5652  * @funcprops \isr_ok
5653  *
5654  * @param h Heap from which to allocate
5655  * @param num Number of objects to allocate
5656  * @param size Desired size of each object to allocate
5657  * @param timeout How long to wait, or K_NO_WAIT
5658  * @return A pointer to valid heap memory, or NULL
5659  */
5660 void *k_heap_calloc(struct k_heap *h, size_t num, size_t size, k_timeout_t timeout)
5661 	__attribute_nonnull(1);
5662 
5663 /**
5664  * @brief Reallocate memory from a k_heap
5665  *
5666  * Reallocates and returns a memory buffer from the memory region owned
5667  * by the heap.  If no memory is available immediately, the call will
5668  * block for the specified timeout (constructed via the standard
5669  * timeout API, or K_NO_WAIT or K_FOREVER) waiting for memory to be
5670  * freed.  If the allocation cannot be performed by the expiration of
5671  * the timeout, NULL will be returned.
5672  * Reallocated memory is aligned on a multiple of pointer sizes.
5673  *
5674  * @note @a timeout must be set to K_NO_WAIT if called from ISR.
5675  * @note When CONFIG_MULTITHREADING=n any @a timeout is treated as K_NO_WAIT.
5676  *
5677  * @funcprops \isr_ok
5678  *
5679  * @param h Heap from which to allocate
5680  * @param ptr Original pointer returned from a previous allocation
5681  * @param bytes Desired size of block to allocate
5682  * @param timeout How long to wait, or K_NO_WAIT
5683  *
5684  * @return Pointer to memory the caller can now use, or NULL
5685  */
5686 void *k_heap_realloc(struct k_heap *h, void *ptr, size_t bytes, k_timeout_t timeout)
5687 	__attribute_nonnull(1);
5688 
5689 /**
5690  * @brief Free memory allocated by k_heap_alloc()
5691  *
5692  * Returns the specified memory block, which must have been returned
5693  * from k_heap_alloc(), to the heap for use by other callers.  Passing
5694  * a NULL block is legal, and has no effect.
5695  *
5696  * @param h Heap to which to return the memory
5697  * @param mem A valid memory block, or NULL
5698  */
5699 void k_heap_free(struct k_heap *h, void *mem) __attribute_nonnull(1);
5700 
5701 /* Hand-calculated minimum heap sizes needed to return a successful
5702  * 1-byte allocation.  See details in lib/os/heap.[ch]
5703  */
5704 #define Z_HEAP_MIN_SIZE ((sizeof(void *) > 4) ? 56 : 44)
5705 
5706 /**
5707  * @brief Define a static k_heap in the specified linker section
5708  *
5709  * This macro defines and initializes a static memory region and
5710  * k_heap of the requested size in the specified linker section.
5711  * After kernel start, &name can be used as if k_heap_init() had
5712  * been called.
5713  *
5714  * Note that this macro enforces a minimum size on the memory region
5715  * to accommodate metadata requirements.  Very small heaps will be
5716  * padded to fit.
5717  *
5718  * @param name Symbol name for the struct k_heap object
5719  * @param bytes Size of memory region, in bytes
5720  * @param in_section __attribute__((section(name))
5721  */
5722 #define Z_HEAP_DEFINE_IN_SECT(name, bytes, in_section)		\
5723 	char in_section						\
5724 	     __aligned(8) /* CHUNK_UNIT */			\
5725 	     kheap_##name[MAX(bytes, Z_HEAP_MIN_SIZE)];		\
5726 	STRUCT_SECTION_ITERABLE(k_heap, name) = {		\
5727 		.heap = {					\
5728 			.init_mem = kheap_##name,		\
5729 			.init_bytes = MAX(bytes, Z_HEAP_MIN_SIZE), \
5730 		 },						\
5731 	}
5732 
5733 /**
5734  * @brief Define a static k_heap
5735  *
5736  * This macro defines and initializes a static memory region and
5737  * k_heap of the requested size.  After kernel start, &name can be
5738  * used as if k_heap_init() had been called.
5739  *
5740  * Note that this macro enforces a minimum size on the memory region
5741  * to accommodate metadata requirements.  Very small heaps will be
5742  * padded to fit.
5743  *
5744  * @param name Symbol name for the struct k_heap object
5745  * @param bytes Size of memory region, in bytes
5746  */
5747 #define K_HEAP_DEFINE(name, bytes)				\
5748 	Z_HEAP_DEFINE_IN_SECT(name, bytes,			\
5749 			      __noinit_named(kheap_buf_##name))
5750 
5751 /**
5752  * @brief Define a static k_heap in uncached memory
5753  *
5754  * This macro defines and initializes a static memory region and
5755  * k_heap of the requested size in uncached memory.  After kernel
5756  * start, &name can be used as if k_heap_init() had been called.
5757  *
5758  * Note that this macro enforces a minimum size on the memory region
5759  * to accommodate metadata requirements.  Very small heaps will be
5760  * padded to fit.
5761  *
5762  * @param name Symbol name for the struct k_heap object
5763  * @param bytes Size of memory region, in bytes
5764  */
5765 #define K_HEAP_DEFINE_NOCACHE(name, bytes)			\
5766 	Z_HEAP_DEFINE_IN_SECT(name, bytes, __nocache)
5767 
5768 /**
5769  * @}
5770  */
5771 
5772 /**
5773  * @defgroup heap_apis Heap APIs
5774  * @ingroup kernel_apis
5775  * @{
5776  */
5777 
5778 /**
5779  * @brief Allocate memory from the heap with a specified alignment.
5780  *
5781  * This routine provides semantics similar to aligned_alloc(); memory is
5782  * allocated from the heap with a specified alignment. However, one minor
5783  * difference is that k_aligned_alloc() accepts any non-zero @p size,
5784  * whereas aligned_alloc() only accepts a @p size that is an integral
5785  * multiple of @p align.
5786  *
5787  * Above, aligned_alloc() refers to:
5788  * C11 standard (ISO/IEC 9899:2011): 7.22.3.1
5789  * The aligned_alloc function (p: 347-348)
5790  *
5791  * @param align Alignment of memory requested (in bytes).
5792  * @param size Amount of memory requested (in bytes).
5793  *
5794  * @return Address of the allocated memory if successful; otherwise NULL.
5795  */
5796 void *k_aligned_alloc(size_t align, size_t size);
5797 
5798 /**
5799  * @brief Allocate memory from the heap.
5800  *
5801  * This routine provides traditional malloc() semantics. Memory is
5802  * allocated from the heap memory pool.
5803  * Allocated memory is aligned on a multiple of pointer sizes.
5804  *
5805  * @param size Amount of memory requested (in bytes).
5806  *
5807  * @return Address of the allocated memory if successful; otherwise NULL.
5808  */
5809 void *k_malloc(size_t size);
5810 
5811 /**
5812  * @brief Free memory allocated from heap.
5813  *
5814  * This routine provides traditional free() semantics. The memory being
5815  * returned must have been allocated from the heap memory pool.
5816  *
5817  * If @a ptr is NULL, no operation is performed.
5818  *
5819  * @param ptr Pointer to previously allocated memory.
5820  */
5821 void k_free(void *ptr);
5822 
5823 /**
5824  * @brief Allocate memory from heap, array style
5825  *
5826  * This routine provides traditional calloc() semantics. Memory is
5827  * allocated from the heap memory pool and zeroed.
5828  *
5829  * @param nmemb Number of elements in the requested array
5830  * @param size Size of each array element (in bytes).
5831  *
5832  * @return Address of the allocated memory if successful; otherwise NULL.
5833  */
5834 void *k_calloc(size_t nmemb, size_t size);
5835 
5836 /** @brief Expand the size of an existing allocation
5837  *
5838  * Returns a pointer to a new memory region with the same contents,
5839  * but a different allocated size.  If the new allocation can be
5840  * expanded in place, the pointer returned will be identical.
5841  * Otherwise the data will be copies to a new block and the old one
5842  * will be freed as per sys_heap_free().  If the specified size is
5843  * smaller than the original, the block will be truncated in place and
5844  * the remaining memory returned to the heap.  If the allocation of a
5845  * new block fails, then NULL will be returned and the old block will
5846  * not be freed or modified.
5847  *
5848  * @param ptr Original pointer returned from a previous allocation
5849  * @param size Amount of memory requested (in bytes).
5850  *
5851  * @return Pointer to memory the caller can now use, or NULL.
5852  */
5853 void *k_realloc(void *ptr, size_t size);
5854 
5855 /** @} */
5856 
5857 /* polling API - PRIVATE */
5858 
5859 #ifdef CONFIG_POLL
5860 #define _INIT_OBJ_POLL_EVENT(obj) do { (obj)->poll_event = NULL; } while (false)
5861 #else
5862 #define _INIT_OBJ_POLL_EVENT(obj) do { } while (false)
5863 #endif
5864 
5865 /* private - types bit positions */
5866 enum _poll_types_bits {
5867 	/* can be used to ignore an event */
5868 	_POLL_TYPE_IGNORE,
5869 
5870 	/* to be signaled by k_poll_signal_raise() */
5871 	_POLL_TYPE_SIGNAL,
5872 
5873 	/* semaphore availability */
5874 	_POLL_TYPE_SEM_AVAILABLE,
5875 
5876 	/* queue/FIFO/LIFO data availability */
5877 	_POLL_TYPE_DATA_AVAILABLE,
5878 
5879 	/* msgq data availability */
5880 	_POLL_TYPE_MSGQ_DATA_AVAILABLE,
5881 
5882 	/* pipe data availability */
5883 	_POLL_TYPE_PIPE_DATA_AVAILABLE,
5884 
5885 	_POLL_NUM_TYPES
5886 };
5887 
5888 #define Z_POLL_TYPE_BIT(type) (1U << ((type) - 1U))
5889 
5890 /* private - states bit positions */
5891 enum _poll_states_bits {
5892 	/* default state when creating event */
5893 	_POLL_STATE_NOT_READY,
5894 
5895 	/* signaled by k_poll_signal_raise() */
5896 	_POLL_STATE_SIGNALED,
5897 
5898 	/* semaphore is available */
5899 	_POLL_STATE_SEM_AVAILABLE,
5900 
5901 	/* data is available to read on queue/FIFO/LIFO */
5902 	_POLL_STATE_DATA_AVAILABLE,
5903 
5904 	/* queue/FIFO/LIFO wait was cancelled */
5905 	_POLL_STATE_CANCELLED,
5906 
5907 	/* data is available to read on a message queue */
5908 	_POLL_STATE_MSGQ_DATA_AVAILABLE,
5909 
5910 	/* data is available to read from a pipe */
5911 	_POLL_STATE_PIPE_DATA_AVAILABLE,
5912 
5913 	_POLL_NUM_STATES
5914 };
5915 
5916 #define Z_POLL_STATE_BIT(state) (1U << ((state) - 1U))
5917 
5918 #define _POLL_EVENT_NUM_UNUSED_BITS \
5919 	(32 - (0 \
5920 	       + 8 /* tag */ \
5921 	       + _POLL_NUM_TYPES \
5922 	       + _POLL_NUM_STATES \
5923 	       + 1 /* modes */ \
5924 	      ))
5925 
5926 /* end of polling API - PRIVATE */
5927 
5928 
5929 /**
5930  * @defgroup poll_apis Async polling APIs
5931  * @ingroup kernel_apis
5932  * @{
5933  */
5934 
5935 /* Public polling API */
5936 
5937 /* public - values for k_poll_event.type bitfield */
5938 #define K_POLL_TYPE_IGNORE 0
5939 #define K_POLL_TYPE_SIGNAL Z_POLL_TYPE_BIT(_POLL_TYPE_SIGNAL)
5940 #define K_POLL_TYPE_SEM_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_SEM_AVAILABLE)
5941 #define K_POLL_TYPE_DATA_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_DATA_AVAILABLE)
5942 #define K_POLL_TYPE_FIFO_DATA_AVAILABLE K_POLL_TYPE_DATA_AVAILABLE
5943 #define K_POLL_TYPE_MSGQ_DATA_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_MSGQ_DATA_AVAILABLE)
5944 #define K_POLL_TYPE_PIPE_DATA_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_PIPE_DATA_AVAILABLE)
5945 
5946 /* public - polling modes */
5947 enum k_poll_modes {
5948 	/* polling thread does not take ownership of objects when available */
5949 	K_POLL_MODE_NOTIFY_ONLY = 0,
5950 
5951 	K_POLL_NUM_MODES
5952 };
5953 
5954 /* public - values for k_poll_event.state bitfield */
5955 #define K_POLL_STATE_NOT_READY 0
5956 #define K_POLL_STATE_SIGNALED Z_POLL_STATE_BIT(_POLL_STATE_SIGNALED)
5957 #define K_POLL_STATE_SEM_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_SEM_AVAILABLE)
5958 #define K_POLL_STATE_DATA_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_DATA_AVAILABLE)
5959 #define K_POLL_STATE_FIFO_DATA_AVAILABLE K_POLL_STATE_DATA_AVAILABLE
5960 #define K_POLL_STATE_MSGQ_DATA_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_MSGQ_DATA_AVAILABLE)
5961 #define K_POLL_STATE_PIPE_DATA_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_PIPE_DATA_AVAILABLE)
5962 #define K_POLL_STATE_CANCELLED Z_POLL_STATE_BIT(_POLL_STATE_CANCELLED)
5963 
5964 /* public - poll signal object */
5965 struct k_poll_signal {
5966 	/** PRIVATE - DO NOT TOUCH */
5967 	sys_dlist_t poll_events;
5968 
5969 	/**
5970 	 * 1 if the event has been signaled, 0 otherwise. Stays set to 1 until
5971 	 * user resets it to 0.
5972 	 */
5973 	unsigned int signaled;
5974 
5975 	/** custom result value passed to k_poll_signal_raise() if needed */
5976 	int result;
5977 };
5978 
5979 #define K_POLL_SIGNAL_INITIALIZER(obj) \
5980 	{ \
5981 	.poll_events = SYS_DLIST_STATIC_INIT(&obj.poll_events), \
5982 	.signaled = 0, \
5983 	.result = 0, \
5984 	}
5985 /**
5986  * @brief Poll Event
5987  *
5988  */
5989 struct k_poll_event {
5990 	/** PRIVATE - DO NOT TOUCH */
5991 	sys_dnode_t _node;
5992 
5993 	/** PRIVATE - DO NOT TOUCH */
5994 	struct z_poller *poller;
5995 
5996 	/** optional user-specified tag, opaque, untouched by the API */
5997 	uint32_t tag:8;
5998 
5999 	/** bitfield of event types (bitwise-ORed K_POLL_TYPE_xxx values) */
6000 	uint32_t type:_POLL_NUM_TYPES;
6001 
6002 	/** bitfield of event states (bitwise-ORed K_POLL_STATE_xxx values) */
6003 	uint32_t state:_POLL_NUM_STATES;
6004 
6005 	/** mode of operation, from enum k_poll_modes */
6006 	uint32_t mode:1;
6007 
6008 	/** unused bits in 32-bit word */
6009 	uint32_t unused:_POLL_EVENT_NUM_UNUSED_BITS;
6010 
6011 	/** per-type data */
6012 	union {
6013 		/* The typed_* fields below are used by K_POLL_EVENT_*INITIALIZER() macros to ensure
6014 		 * type safety of polled objects.
6015 		 */
6016 		void *obj, *typed_K_POLL_TYPE_IGNORE;
6017 		struct k_poll_signal *signal, *typed_K_POLL_TYPE_SIGNAL;
6018 		struct k_sem *sem, *typed_K_POLL_TYPE_SEM_AVAILABLE;
6019 		struct k_fifo *fifo, *typed_K_POLL_TYPE_FIFO_DATA_AVAILABLE;
6020 		struct k_queue *queue, *typed_K_POLL_TYPE_DATA_AVAILABLE;
6021 		struct k_msgq *msgq, *typed_K_POLL_TYPE_MSGQ_DATA_AVAILABLE;
6022 		struct k_pipe *pipe, *typed_K_POLL_TYPE_PIPE_DATA_AVAILABLE;
6023 	};
6024 };
6025 
6026 #define K_POLL_EVENT_INITIALIZER(_event_type, _event_mode, _event_obj) \
6027 	{ \
6028 	.poller = NULL, \
6029 	.type = _event_type, \
6030 	.state = K_POLL_STATE_NOT_READY, \
6031 	.mode = _event_mode, \
6032 	.unused = 0, \
6033 	{ \
6034 		.typed_##_event_type = _event_obj, \
6035 	}, \
6036 	}
6037 
6038 #define K_POLL_EVENT_STATIC_INITIALIZER(_event_type, _event_mode, _event_obj, \
6039 					event_tag) \
6040 	{ \
6041 	.tag = event_tag, \
6042 	.type = _event_type, \
6043 	.state = K_POLL_STATE_NOT_READY, \
6044 	.mode = _event_mode, \
6045 	.unused = 0, \
6046 	{ \
6047 		.typed_##_event_type = _event_obj, \
6048 	}, \
6049 	}
6050 
6051 /**
6052  * @brief Initialize one struct k_poll_event instance
6053  *
6054  * After this routine is called on a poll event, the event it ready to be
6055  * placed in an event array to be passed to k_poll().
6056  *
6057  * @param event The event to initialize.
6058  * @param type A bitfield of the types of event, from the K_POLL_TYPE_xxx
6059  *             values. Only values that apply to the same object being polled
6060  *             can be used together. Choosing K_POLL_TYPE_IGNORE disables the
6061  *             event.
6062  * @param mode Future. Use K_POLL_MODE_NOTIFY_ONLY.
6063  * @param obj Kernel object or poll signal.
6064  */
6065 
6066 void k_poll_event_init(struct k_poll_event *event, uint32_t type,
6067 			      int mode, void *obj);
6068 
6069 /**
6070  * @brief Wait for one or many of multiple poll events to occur
6071  *
6072  * This routine allows a thread to wait concurrently for one or many of
6073  * multiple poll events to have occurred. Such events can be a kernel object
6074  * being available, like a semaphore, or a poll signal event.
6075  *
6076  * When an event notifies that a kernel object is available, the kernel object
6077  * is not "given" to the thread calling k_poll(): it merely signals the fact
6078  * that the object was available when the k_poll() call was in effect. Also,
6079  * all threads trying to acquire an object the regular way, i.e. by pending on
6080  * the object, have precedence over the thread polling on the object. This
6081  * means that the polling thread will never get the poll event on an object
6082  * until the object becomes available and its pend queue is empty. For this
6083  * reason, the k_poll() call is more effective when the objects being polled
6084  * only have one thread, the polling thread, trying to acquire them.
6085  *
6086  * When k_poll() returns 0, the caller should loop on all the events that were
6087  * passed to k_poll() and check the state field for the values that were
6088  * expected and take the associated actions.
6089  *
6090  * Before being reused for another call to k_poll(), the user has to reset the
6091  * state field to K_POLL_STATE_NOT_READY.
6092  *
6093  * When called from user mode, a temporary memory allocation is required from
6094  * the caller's resource pool.
6095  *
6096  * @param events An array of events to be polled for.
6097  * @param num_events The number of events in the array.
6098  * @param timeout Waiting period for an event to be ready,
6099  *                or one of the special values K_NO_WAIT and K_FOREVER.
6100  *
6101  * @retval 0 One or more events are ready.
6102  * @retval -EAGAIN Waiting period timed out.
6103  * @retval -EINTR Polling has been interrupted, e.g. with
6104  *         k_queue_cancel_wait(). All output events are still set and valid,
6105  *         cancelled event(s) will be set to K_POLL_STATE_CANCELLED. In other
6106  *         words, -EINTR status means that at least one of output events is
6107  *         K_POLL_STATE_CANCELLED.
6108  * @retval -ENOMEM Thread resource pool insufficient memory (user mode only)
6109  * @retval -EINVAL Bad parameters (user mode only)
6110  */
6111 
6112 __syscall int k_poll(struct k_poll_event *events, int num_events,
6113 		     k_timeout_t timeout);
6114 
6115 /**
6116  * @brief Initialize a poll signal object.
6117  *
6118  * Ready a poll signal object to be signaled via k_poll_signal_raise().
6119  *
6120  * @param sig A poll signal.
6121  */
6122 
6123 __syscall void k_poll_signal_init(struct k_poll_signal *sig);
6124 
6125 /**
6126  * @brief Reset a poll signal object's state to unsignaled.
6127  *
6128  * @param sig A poll signal object
6129  */
6130 __syscall void k_poll_signal_reset(struct k_poll_signal *sig);
6131 
6132 /**
6133  * @brief Fetch the signaled state and result value of a poll signal
6134  *
6135  * @param sig A poll signal object
6136  * @param signaled An integer buffer which will be written nonzero if the
6137  *		   object was signaled
6138  * @param result An integer destination buffer which will be written with the
6139  *		   result value if the object was signaled, or an undefined
6140  *		   value if it was not.
6141  */
6142 __syscall void k_poll_signal_check(struct k_poll_signal *sig,
6143 				   unsigned int *signaled, int *result);
6144 
6145 /**
6146  * @brief Signal a poll signal object.
6147  *
6148  * This routine makes ready a poll signal, which is basically a poll event of
6149  * type K_POLL_TYPE_SIGNAL. If a thread was polling on that event, it will be
6150  * made ready to run. A @a result value can be specified.
6151  *
6152  * The poll signal contains a 'signaled' field that, when set by
6153  * k_poll_signal_raise(), stays set until the user sets it back to 0 with
6154  * k_poll_signal_reset(). It thus has to be reset by the user before being
6155  * passed again to k_poll() or k_poll() will consider it being signaled, and
6156  * will return immediately.
6157  *
6158  * @note The result is stored and the 'signaled' field is set even if
6159  * this function returns an error indicating that an expiring poll was
6160  * not notified.  The next k_poll() will detect the missed raise.
6161  *
6162  * @param sig A poll signal.
6163  * @param result The value to store in the result field of the signal.
6164  *
6165  * @retval 0 The signal was delivered successfully.
6166  * @retval -EAGAIN The polling thread's timeout is in the process of expiring.
6167  */
6168 
6169 __syscall int k_poll_signal_raise(struct k_poll_signal *sig, int result);
6170 
6171 /** @} */
6172 
6173 /**
6174  * @defgroup cpu_idle_apis CPU Idling APIs
6175  * @ingroup kernel_apis
6176  * @{
6177  */
6178 /**
6179  * @brief Make the CPU idle.
6180  *
6181  * This function makes the CPU idle until an event wakes it up.
6182  *
6183  * In a regular system, the idle thread should be the only thread responsible
6184  * for making the CPU idle and triggering any type of power management.
6185  * However, in some more constrained systems, such as a single-threaded system,
6186  * the only thread would be responsible for this if needed.
6187  *
6188  * @note In some architectures, before returning, the function unmasks interrupts
6189  * unconditionally.
6190  */
k_cpu_idle(void)6191 static inline void k_cpu_idle(void)
6192 {
6193 	arch_cpu_idle();
6194 }
6195 
6196 /**
6197  * @brief Make the CPU idle in an atomic fashion.
6198  *
6199  * Similar to k_cpu_idle(), but must be called with interrupts locked.
6200  *
6201  * Enabling interrupts and entering a low-power mode will be atomic,
6202  * i.e. there will be no period of time where interrupts are enabled before
6203  * the processor enters a low-power mode.
6204  *
6205  * After waking up from the low-power mode, the interrupt lockout state will
6206  * be restored as if by irq_unlock(key).
6207  *
6208  * @param key Interrupt locking key obtained from irq_lock().
6209  */
k_cpu_atomic_idle(unsigned int key)6210 static inline void k_cpu_atomic_idle(unsigned int key)
6211 {
6212 	arch_cpu_atomic_idle(key);
6213 }
6214 
6215 /**
6216  * @}
6217  */
6218 
6219 /**
6220  * @cond INTERNAL_HIDDEN
6221  * @internal
6222  */
6223 #ifdef ARCH_EXCEPT
6224 /* This architecture has direct support for triggering a CPU exception */
6225 #define z_except_reason(reason)	ARCH_EXCEPT(reason)
6226 #else
6227 
6228 #if !defined(CONFIG_ASSERT_NO_FILE_INFO)
6229 #define __EXCEPT_LOC() __ASSERT_PRINT("@ %s:%d\n", __FILE__, __LINE__)
6230 #else
6231 #define __EXCEPT_LOC()
6232 #endif
6233 
6234 /* NOTE: This is the implementation for arches that do not implement
6235  * ARCH_EXCEPT() to generate a real CPU exception.
6236  *
6237  * We won't have a real exception frame to determine the PC value when
6238  * the oops occurred, so print file and line number before we jump into
6239  * the fatal error handler.
6240  */
6241 #define z_except_reason(reason) do { \
6242 		__EXCEPT_LOC();              \
6243 		z_fatal_error(reason, NULL); \
6244 	} while (false)
6245 
6246 #endif /* _ARCH__EXCEPT */
6247 /**
6248  * INTERNAL_HIDDEN @endcond
6249  */
6250 
6251 /**
6252  * @brief Fatally terminate a thread
6253  *
6254  * This should be called when a thread has encountered an unrecoverable
6255  * runtime condition and needs to terminate. What this ultimately
6256  * means is determined by the _fatal_error_handler() implementation, which
6257  * will be called will reason code K_ERR_KERNEL_OOPS.
6258  *
6259  * If this is called from ISR context, the default system fatal error handler
6260  * will treat it as an unrecoverable system error, just like k_panic().
6261  */
6262 #define k_oops()	z_except_reason(K_ERR_KERNEL_OOPS)
6263 
6264 /**
6265  * @brief Fatally terminate the system
6266  *
6267  * This should be called when the Zephyr kernel has encountered an
6268  * unrecoverable runtime condition and needs to terminate. What this ultimately
6269  * means is determined by the _fatal_error_handler() implementation, which
6270  * will be called will reason code K_ERR_KERNEL_PANIC.
6271  */
6272 #define k_panic()	z_except_reason(K_ERR_KERNEL_PANIC)
6273 
6274 /**
6275  * @cond INTERNAL_HIDDEN
6276  */
6277 
6278 /*
6279  * private APIs that are utilized by one or more public APIs
6280  */
6281 
6282 /**
6283  * @internal
6284  */
6285 void z_timer_expiration_handler(struct _timeout *timeout);
6286 /**
6287  * INTERNAL_HIDDEN @endcond
6288  */
6289 
6290 #ifdef CONFIG_PRINTK
6291 /**
6292  * @brief Emit a character buffer to the console device
6293  *
6294  * @param c String of characters to print
6295  * @param n The length of the string
6296  *
6297  */
6298 __syscall void k_str_out(char *c, size_t n);
6299 #endif
6300 
6301 /**
6302  * @defgroup float_apis Floating Point APIs
6303  * @ingroup kernel_apis
6304  * @{
6305  */
6306 
6307 /**
6308  * @brief Disable preservation of floating point context information.
6309  *
6310  * This routine informs the kernel that the specified thread
6311  * will no longer be using the floating point registers.
6312  *
6313  * @warning
6314  * Some architectures apply restrictions on how the disabling of floating
6315  * point preservation may be requested, see arch_float_disable.
6316  *
6317  * @warning
6318  * This routine should only be used to disable floating point support for
6319  * a thread that currently has such support enabled.
6320  *
6321  * @param thread ID of thread.
6322  *
6323  * @retval 0        On success.
6324  * @retval -ENOTSUP If the floating point disabling is not implemented.
6325  *         -EINVAL  If the floating point disabling could not be performed.
6326  */
6327 __syscall int k_float_disable(struct k_thread *thread);
6328 
6329 /**
6330  * @brief Enable preservation of floating point context information.
6331  *
6332  * This routine informs the kernel that the specified thread
6333  * will use the floating point registers.
6334 
6335  * Invoking this routine initializes the thread's floating point context info
6336  * to that of an FPU that has been reset. The next time the thread is scheduled
6337  * by z_swap() it will either inherit an FPU that is guaranteed to be in a
6338  * "sane" state (if the most recent user of the FPU was cooperatively swapped
6339  * out) or the thread's own floating point context will be loaded (if the most
6340  * recent user of the FPU was preempted, or if this thread is the first user
6341  * of the FPU). Thereafter, the kernel will protect the thread's FP context
6342  * so that it is not altered during a preemptive context switch.
6343  *
6344  * The @a options parameter indicates which floating point register sets will
6345  * be used by the specified thread.
6346  *
6347  * For x86 options:
6348  *
6349  * - K_FP_REGS  indicates x87 FPU and MMX registers only
6350  * - K_SSE_REGS indicates SSE registers (and also x87 FPU and MMX registers)
6351  *
6352  * @warning
6353  * Some architectures apply restrictions on how the enabling of floating
6354  * point preservation may be requested, see arch_float_enable.
6355  *
6356  * @warning
6357  * This routine should only be used to enable floating point support for
6358  * a thread that currently has such support enabled.
6359  *
6360  * @param thread  ID of thread.
6361  * @param options architecture dependent options
6362  *
6363  * @retval 0        On success.
6364  * @retval -ENOTSUP If the floating point enabling is not implemented.
6365  *         -EINVAL  If the floating point enabling could not be performed.
6366  */
6367 __syscall int k_float_enable(struct k_thread *thread, unsigned int options);
6368 
6369 /**
6370  * @}
6371  */
6372 
6373 /**
6374  * @brief Get the runtime statistics of a thread
6375  *
6376  * @param thread ID of thread.
6377  * @param stats Pointer to struct to copy statistics into.
6378  * @return -EINVAL if null pointers, otherwise 0
6379  */
6380 int k_thread_runtime_stats_get(k_tid_t thread,
6381 			       k_thread_runtime_stats_t *stats);
6382 
6383 /**
6384  * @brief Get the runtime statistics of all threads
6385  *
6386  * @param stats Pointer to struct to copy statistics into.
6387  * @return -EINVAL if null pointers, otherwise 0
6388  */
6389 int k_thread_runtime_stats_all_get(k_thread_runtime_stats_t *stats);
6390 
6391 /**
6392  * @brief Get the runtime statistics of all threads on specified cpu
6393  *
6394  * @param cpu The cpu number
6395  * @param stats Pointer to struct to copy statistics into.
6396  * @return -EINVAL if null pointers, otherwise 0
6397  */
6398 int k_thread_runtime_stats_cpu_get(int cpu, k_thread_runtime_stats_t *stats);
6399 
6400 /**
6401  * @brief Enable gathering of runtime statistics for specified thread
6402  *
6403  * This routine enables the gathering of runtime statistics for the specified
6404  * thread.
6405  *
6406  * @param thread ID of thread
6407  * @return -EINVAL if invalid thread ID, otherwise 0
6408  */
6409 int k_thread_runtime_stats_enable(k_tid_t thread);
6410 
6411 /**
6412  * @brief Disable gathering of runtime statistics for specified thread
6413  *
6414  * This routine disables the gathering of runtime statistics for the specified
6415  * thread.
6416  *
6417  * @param thread ID of thread
6418  * @return -EINVAL if invalid thread ID, otherwise 0
6419  */
6420 int k_thread_runtime_stats_disable(k_tid_t thread);
6421 
6422 /**
6423  * @brief Enable gathering of system runtime statistics
6424  *
6425  * This routine enables the gathering of system runtime statistics. Note that
6426  * it does not affect the gathering of similar statistics for individual
6427  * threads.
6428  */
6429 void k_sys_runtime_stats_enable(void);
6430 
6431 /**
6432  * @brief Disable gathering of system runtime statistics
6433  *
6434  * This routine disables the gathering of system runtime statistics. Note that
6435  * it does not affect the gathering of similar statistics for individual
6436  * threads.
6437  */
6438 void k_sys_runtime_stats_disable(void);
6439 
6440 #ifdef __cplusplus
6441 }
6442 #endif
6443 
6444 #include <zephyr/tracing/tracing.h>
6445 #include <zephyr/syscalls/kernel.h>
6446 
6447 #endif /* !_ASMLANGUAGE */
6448 
6449 #endif /* ZEPHYR_INCLUDE_KERNEL_H_ */
6450