1 /*
2 * Copyright (c) 2016, Wind River Systems, Inc.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 /**
8 * @file
9 *
10 * @brief Public kernel APIs.
11 */
12
13 #ifndef ZEPHYR_INCLUDE_KERNEL_H_
14 #define ZEPHYR_INCLUDE_KERNEL_H_
15
16 #if !defined(_ASMLANGUAGE)
17 #include <zephyr/kernel_includes.h>
18 #include <errno.h>
19 #include <limits.h>
20 #include <stdbool.h>
21 #include <zephyr/toolchain.h>
22 #include <zephyr/tracing/tracing_macros.h>
23 #include <zephyr/sys/mem_stats.h>
24 #include <zephyr/sys/iterable_sections.h>
25
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29
30 /*
31 * Zephyr currently assumes the size of a couple standard types to simplify
32 * print string formats. Let's make sure this doesn't change without notice.
33 */
34 BUILD_ASSERT(sizeof(int32_t) == sizeof(int));
35 BUILD_ASSERT(sizeof(int64_t) == sizeof(long long));
36 BUILD_ASSERT(sizeof(intptr_t) == sizeof(long));
37
38 /**
39 * @brief Kernel APIs
40 * @defgroup kernel_apis Kernel APIs
41 * @since 1.0
42 * @version 1.0.0
43 * @{
44 * @}
45 */
46
47 #define K_ANY NULL
48
49 #if (CONFIG_NUM_COOP_PRIORITIES + CONFIG_NUM_PREEMPT_PRIORITIES) == 0
50 #error Zero available thread priorities defined!
51 #endif
52
53 #define K_PRIO_COOP(x) (-(CONFIG_NUM_COOP_PRIORITIES - (x)))
54 #define K_PRIO_PREEMPT(x) (x)
55
56 #define K_HIGHEST_THREAD_PRIO (-CONFIG_NUM_COOP_PRIORITIES)
57 #define K_LOWEST_THREAD_PRIO CONFIG_NUM_PREEMPT_PRIORITIES
58 #define K_IDLE_PRIO K_LOWEST_THREAD_PRIO
59 #define K_HIGHEST_APPLICATION_THREAD_PRIO (K_HIGHEST_THREAD_PRIO)
60 #define K_LOWEST_APPLICATION_THREAD_PRIO (K_LOWEST_THREAD_PRIO - 1)
61
62 #ifdef CONFIG_POLL
63 #define Z_POLL_EVENT_OBJ_INIT(obj) \
64 .poll_events = SYS_DLIST_STATIC_INIT(&obj.poll_events),
65 #define Z_DECL_POLL_EVENT sys_dlist_t poll_events;
66 #else
67 #define Z_POLL_EVENT_OBJ_INIT(obj)
68 #define Z_DECL_POLL_EVENT
69 #endif
70
71 struct k_thread;
72 struct k_mutex;
73 struct k_sem;
74 struct k_msgq;
75 struct k_mbox;
76 struct k_pipe;
77 struct k_queue;
78 struct k_fifo;
79 struct k_lifo;
80 struct k_stack;
81 struct k_mem_slab;
82 struct k_timer;
83 struct k_poll_event;
84 struct k_poll_signal;
85 struct k_mem_domain;
86 struct k_mem_partition;
87 struct k_futex;
88 struct k_event;
89
90 enum execution_context_types {
91 K_ISR = 0,
92 K_COOP_THREAD,
93 K_PREEMPT_THREAD,
94 };
95
96 /* private, used by k_poll and k_work_poll */
97 struct k_work_poll;
98 typedef int (*_poller_cb_t)(struct k_poll_event *event, uint32_t state);
99
100 /**
101 * @addtogroup thread_apis
102 * @{
103 */
104
105 typedef void (*k_thread_user_cb_t)(const struct k_thread *thread,
106 void *user_data);
107
108 /**
109 * @brief Iterate over all the threads in the system.
110 *
111 * This routine iterates over all the threads in the system and
112 * calls the user_cb function for each thread.
113 *
114 * @param user_cb Pointer to the user callback function.
115 * @param user_data Pointer to user data.
116 *
117 * @note @kconfig{CONFIG_THREAD_MONITOR} must be set for this function
118 * to be effective.
119 * @note This API uses @ref k_spin_lock to protect the _kernel.threads
120 * list which means creation of new threads and terminations of existing
121 * threads are blocked until this API returns.
122 */
123 void k_thread_foreach(k_thread_user_cb_t user_cb, void *user_data);
124
125 /**
126 * @brief Iterate over all the threads in running on specified cpu.
127 *
128 * This function is does otherwise the same thing as k_thread_foreach(),
129 * but it only loops through the threads running on specified cpu only.
130 * If CONFIG_SMP is not defined the implementation this is the same as
131 * k_thread_foreach(), with an assert cpu == 0.
132 *
133 * @param cpu The filtered cpu number
134 * @param user_cb Pointer to the user callback function.
135 * @param user_data Pointer to user data.
136 *
137 * @note @kconfig{CONFIG_THREAD_MONITOR} must be set for this function
138 * to be effective.
139 * @note This API uses @ref k_spin_lock to protect the _kernel.threads
140 * list which means creation of new threads and terminations of existing
141 * threads are blocked until this API returns.
142 */
143 #ifdef CONFIG_SMP
144 void k_thread_foreach_filter_by_cpu(unsigned int cpu,
145 k_thread_user_cb_t user_cb, void *user_data);
146 #else
147 static inline
k_thread_foreach_filter_by_cpu(unsigned int cpu,k_thread_user_cb_t user_cb,void * user_data)148 void k_thread_foreach_filter_by_cpu(unsigned int cpu,
149 k_thread_user_cb_t user_cb, void *user_data)
150 {
151 __ASSERT(cpu == 0, "cpu filter out of bounds");
152 ARG_UNUSED(cpu);
153 k_thread_foreach(user_cb, user_data);
154 }
155 #endif
156
157 /**
158 * @brief Iterate over all the threads in the system without locking.
159 *
160 * This routine works exactly the same like @ref k_thread_foreach
161 * but unlocks interrupts when user_cb is executed.
162 *
163 * @param user_cb Pointer to the user callback function.
164 * @param user_data Pointer to user data.
165 *
166 * @note @kconfig{CONFIG_THREAD_MONITOR} must be set for this function
167 * to be effective.
168 * @note This API uses @ref k_spin_lock only when accessing the _kernel.threads
169 * queue elements. It unlocks it during user callback function processing.
170 * If a new task is created when this @c foreach function is in progress,
171 * the added new task would not be included in the enumeration.
172 * If a task is aborted during this enumeration, there would be a race here
173 * and there is a possibility that this aborted task would be included in the
174 * enumeration.
175 * @note If the task is aborted and the memory occupied by its @c k_thread
176 * structure is reused when this @c k_thread_foreach_unlocked is in progress
177 * it might even lead to the system behave unstable.
178 * This function may never return, as it would follow some @c next task
179 * pointers treating given pointer as a pointer to the k_thread structure
180 * while it is something different right now.
181 * Do not reuse the memory that was occupied by k_thread structure of aborted
182 * task if it was aborted after this function was called in any context.
183 */
184 void k_thread_foreach_unlocked(
185 k_thread_user_cb_t user_cb, void *user_data);
186
187 /**
188 * @brief Iterate over the threads in running on current cpu without locking.
189 *
190 * This function does otherwise the same thing as
191 * k_thread_foreach_unlocked(), but it only loops through the threads
192 * running on specified cpu. If CONFIG_SMP is not defined the
193 * implementation this is the same as k_thread_foreach_unlocked(), with an
194 * assert requiring cpu == 0.
195 *
196 * @param cpu The filtered cpu number
197 * @param user_cb Pointer to the user callback function.
198 * @param user_data Pointer to user data.
199 *
200 * @note @kconfig{CONFIG_THREAD_MONITOR} must be set for this function
201 * to be effective.
202 * @note This API uses @ref k_spin_lock only when accessing the _kernel.threads
203 * queue elements. It unlocks it during user callback function processing.
204 * If a new task is created when this @c foreach function is in progress,
205 * the added new task would not be included in the enumeration.
206 * If a task is aborted during this enumeration, there would be a race here
207 * and there is a possibility that this aborted task would be included in the
208 * enumeration.
209 * @note If the task is aborted and the memory occupied by its @c k_thread
210 * structure is reused when this @c k_thread_foreach_unlocked is in progress
211 * it might even lead to the system behave unstable.
212 * This function may never return, as it would follow some @c next task
213 * pointers treating given pointer as a pointer to the k_thread structure
214 * while it is something different right now.
215 * Do not reuse the memory that was occupied by k_thread structure of aborted
216 * task if it was aborted after this function was called in any context.
217 */
218 #ifdef CONFIG_SMP
219 void k_thread_foreach_unlocked_filter_by_cpu(unsigned int cpu,
220 k_thread_user_cb_t user_cb, void *user_data);
221 #else
222 static inline
k_thread_foreach_unlocked_filter_by_cpu(unsigned int cpu,k_thread_user_cb_t user_cb,void * user_data)223 void k_thread_foreach_unlocked_filter_by_cpu(unsigned int cpu,
224 k_thread_user_cb_t user_cb, void *user_data)
225 {
226 __ASSERT(cpu == 0, "cpu filter out of bounds");
227 ARG_UNUSED(cpu);
228 k_thread_foreach_unlocked(user_cb, user_data);
229 }
230 #endif
231
232 /** @} */
233
234 /**
235 * @defgroup thread_apis Thread APIs
236 * @ingroup kernel_apis
237 * @{
238 */
239
240 #endif /* !_ASMLANGUAGE */
241
242
243 /*
244 * Thread user options. May be needed by assembly code. Common part uses low
245 * bits, arch-specific use high bits.
246 */
247
248 /**
249 * @brief system thread that must not abort
250 * */
251 #define K_ESSENTIAL (BIT(0))
252
253 #define K_FP_IDX 1
254 /**
255 * @brief FPU registers are managed by context switch
256 *
257 * @details
258 * This option indicates that the thread uses the CPU's floating point
259 * registers. This instructs the kernel to take additional steps to save
260 * and restore the contents of these registers when scheduling the thread.
261 * No effect if @kconfig{CONFIG_FPU_SHARING} is not enabled.
262 */
263 #define K_FP_REGS (BIT(K_FP_IDX))
264
265 /**
266 * @brief user mode thread
267 *
268 * This thread has dropped from supervisor mode to user mode and consequently
269 * has additional restrictions
270 */
271 #define K_USER (BIT(2))
272
273 /**
274 * @brief Inherit Permissions
275 *
276 * @details
277 * Indicates that the thread being created should inherit all kernel object
278 * permissions from the thread that created it. No effect if
279 * @kconfig{CONFIG_USERSPACE} is not enabled.
280 */
281 #define K_INHERIT_PERMS (BIT(3))
282
283 /**
284 * @brief Callback item state
285 *
286 * @details
287 * This is a single bit of state reserved for "callback manager"
288 * utilities (p4wq initially) who need to track operations invoked
289 * from within a user-provided callback they have been invoked.
290 * Effectively it serves as a tiny bit of zero-overhead TLS data.
291 */
292 #define K_CALLBACK_STATE (BIT(4))
293
294 /**
295 * @brief DSP registers are managed by context switch
296 *
297 * @details
298 * This option indicates that the thread uses the CPU's DSP registers.
299 * This instructs the kernel to take additional steps to save and
300 * restore the contents of these registers when scheduling the thread.
301 * No effect if @kconfig{CONFIG_DSP_SHARING} is not enabled.
302 */
303 #define K_DSP_IDX 6
304 #define K_DSP_REGS (BIT(K_DSP_IDX))
305
306 /**
307 * @brief AGU registers are managed by context switch
308 *
309 * @details
310 * This option indicates that the thread uses the ARC processor's XY
311 * memory and DSP feature. Often used with @kconfig{CONFIG_ARC_AGU_SHARING}.
312 * No effect if @kconfig{CONFIG_ARC_AGU_SHARING} is not enabled.
313 */
314 #define K_AGU_IDX 7
315 #define K_AGU_REGS (BIT(K_AGU_IDX))
316
317 /**
318 * @brief FP and SSE registers are managed by context switch on x86
319 *
320 * @details
321 * This option indicates that the thread uses the x86 CPU's floating point
322 * and SSE registers. This instructs the kernel to take additional steps to
323 * save and restore the contents of these registers when scheduling
324 * the thread. No effect if @kconfig{CONFIG_X86_SSE} is not enabled.
325 */
326 #define K_SSE_REGS (BIT(7))
327
328 /* end - thread options */
329
330 #if !defined(_ASMLANGUAGE)
331 /**
332 * @brief Dynamically allocate a thread stack.
333 *
334 * Relevant stack creation flags include:
335 * - @ref K_USER allocate a userspace thread (requires `CONFIG_USERSPACE=y`)
336 *
337 * @param size Stack size in bytes.
338 * @param flags Stack creation flags, or 0.
339 *
340 * @retval the allocated thread stack on success.
341 * @retval NULL on failure.
342 *
343 * @see CONFIG_DYNAMIC_THREAD
344 */
345 __syscall k_thread_stack_t *k_thread_stack_alloc(size_t size, int flags);
346
347 /**
348 * @brief Free a dynamically allocated thread stack.
349 *
350 * @param stack Pointer to the thread stack.
351 *
352 * @retval 0 on success.
353 * @retval -EBUSY if the thread stack is in use.
354 * @retval -EINVAL if @p stack is invalid.
355 * @retval -ENOSYS if dynamic thread stack allocation is disabled
356 *
357 * @see CONFIG_DYNAMIC_THREAD
358 */
359 __syscall int k_thread_stack_free(k_thread_stack_t *stack);
360
361 /**
362 * @brief Create a thread.
363 *
364 * This routine initializes a thread, then schedules it for execution.
365 *
366 * The new thread may be scheduled for immediate execution or a delayed start.
367 * If the newly spawned thread does not have a delayed start the kernel
368 * scheduler may preempt the current thread to allow the new thread to
369 * execute.
370 *
371 * Thread options are architecture-specific, and can include K_ESSENTIAL,
372 * K_FP_REGS, and K_SSE_REGS. Multiple options may be specified by separating
373 * them using "|" (the logical OR operator).
374 *
375 * Stack objects passed to this function must be originally defined with
376 * either of these macros in order to be portable:
377 *
378 * - K_THREAD_STACK_DEFINE() - For stacks that may support either user or
379 * supervisor threads.
380 * - K_KERNEL_STACK_DEFINE() - For stacks that may support supervisor
381 * threads only. These stacks use less memory if CONFIG_USERSPACE is
382 * enabled.
383 *
384 * The stack_size parameter has constraints. It must either be:
385 *
386 * - The original size value passed to K_THREAD_STACK_DEFINE() or
387 * K_KERNEL_STACK_DEFINE()
388 * - The return value of K_THREAD_STACK_SIZEOF(stack) if the stack was
389 * defined with K_THREAD_STACK_DEFINE()
390 * - The return value of K_KERNEL_STACK_SIZEOF(stack) if the stack was
391 * defined with K_KERNEL_STACK_DEFINE().
392 *
393 * Using other values, or sizeof(stack) may produce undefined behavior.
394 *
395 * @param new_thread Pointer to uninitialized struct k_thread
396 * @param stack Pointer to the stack space.
397 * @param stack_size Stack size in bytes.
398 * @param entry Thread entry function.
399 * @param p1 1st entry point parameter.
400 * @param p2 2nd entry point parameter.
401 * @param p3 3rd entry point parameter.
402 * @param prio Thread priority.
403 * @param options Thread options.
404 * @param delay Scheduling delay, or K_NO_WAIT (for no delay).
405 *
406 * @return ID of new thread.
407 *
408 */
409 __syscall k_tid_t k_thread_create(struct k_thread *new_thread,
410 k_thread_stack_t *stack,
411 size_t stack_size,
412 k_thread_entry_t entry,
413 void *p1, void *p2, void *p3,
414 int prio, uint32_t options, k_timeout_t delay);
415
416 /**
417 * @brief Drop a thread's privileges permanently to user mode
418 *
419 * This allows a supervisor thread to be re-used as a user thread.
420 * This function does not return, but control will transfer to the provided
421 * entry point as if this was a new user thread.
422 *
423 * The implementation ensures that the stack buffer contents are erased.
424 * Any thread-local storage will be reverted to a pristine state.
425 *
426 * Memory domain membership, resource pool assignment, kernel object
427 * permissions, priority, and thread options are preserved.
428 *
429 * A common use of this function is to re-use the main thread as a user thread
430 * once all supervisor mode-only tasks have been completed.
431 *
432 * @param entry Function to start executing from
433 * @param p1 1st entry point parameter
434 * @param p2 2nd entry point parameter
435 * @param p3 3rd entry point parameter
436 */
437 FUNC_NORETURN void k_thread_user_mode_enter(k_thread_entry_t entry,
438 void *p1, void *p2,
439 void *p3);
440
441 /**
442 * @brief Grant a thread access to a set of kernel objects
443 *
444 * This is a convenience function. For the provided thread, grant access to
445 * the remaining arguments, which must be pointers to kernel objects.
446 *
447 * The thread object must be initialized (i.e. running). The objects don't
448 * need to be.
449 * Note that NULL shouldn't be passed as an argument.
450 *
451 * @param thread Thread to grant access to objects
452 * @param ... list of kernel object pointers
453 */
454 #define k_thread_access_grant(thread, ...) \
455 FOR_EACH_FIXED_ARG(k_object_access_grant, (;), (thread), __VA_ARGS__)
456
457 /**
458 * @brief Assign a resource memory pool to a thread
459 *
460 * By default, threads have no resource pool assigned unless their parent
461 * thread has a resource pool, in which case it is inherited. Multiple
462 * threads may be assigned to the same memory pool.
463 *
464 * Changing a thread's resource pool will not migrate allocations from the
465 * previous pool.
466 *
467 * @param thread Target thread to assign a memory pool for resource requests.
468 * @param heap Heap object to use for resources,
469 * or NULL if the thread should no longer have a memory pool.
470 */
k_thread_heap_assign(struct k_thread * thread,struct k_heap * heap)471 static inline void k_thread_heap_assign(struct k_thread *thread,
472 struct k_heap *heap)
473 {
474 thread->resource_pool = heap;
475 }
476
477 #if defined(CONFIG_INIT_STACKS) && defined(CONFIG_THREAD_STACK_INFO)
478 /**
479 * @brief Obtain stack usage information for the specified thread
480 *
481 * User threads will need to have permission on the target thread object.
482 *
483 * Some hardware may prevent inspection of a stack buffer currently in use.
484 * If this API is called from supervisor mode, on the currently running thread,
485 * on a platform which selects @kconfig{CONFIG_NO_UNUSED_STACK_INSPECTION}, an
486 * error will be generated.
487 *
488 * @param thread Thread to inspect stack information
489 * @param unused_ptr Output parameter, filled in with the unused stack space
490 * of the target thread in bytes.
491 * @return 0 on success
492 * @return -EBADF Bad thread object (user mode only)
493 * @return -EPERM No permissions on thread object (user mode only)
494 * #return -ENOTSUP Forbidden by hardware policy
495 * @return -EINVAL Thread is uninitialized or exited (user mode only)
496 * @return -EFAULT Bad memory address for unused_ptr (user mode only)
497 */
498 __syscall int k_thread_stack_space_get(const struct k_thread *thread,
499 size_t *unused_ptr);
500 #endif
501
502 #if (K_HEAP_MEM_POOL_SIZE > 0)
503 /**
504 * @brief Assign the system heap as a thread's resource pool
505 *
506 * Similar to k_thread_heap_assign(), but the thread will use
507 * the kernel heap to draw memory.
508 *
509 * Use with caution, as a malicious thread could perform DoS attacks on the
510 * kernel heap.
511 *
512 * @param thread Target thread to assign the system heap for resource requests
513 *
514 */
515 void k_thread_system_pool_assign(struct k_thread *thread);
516 #endif /* (K_HEAP_MEM_POOL_SIZE > 0) */
517
518 /**
519 * @brief Sleep until a thread exits
520 *
521 * The caller will be put to sleep until the target thread exits, either due
522 * to being aborted, self-exiting, or taking a fatal error. This API returns
523 * immediately if the thread isn't running.
524 *
525 * This API may only be called from ISRs with a K_NO_WAIT timeout,
526 * where it can be useful as a predicate to detect when a thread has
527 * aborted.
528 *
529 * @param thread Thread to wait to exit
530 * @param timeout upper bound time to wait for the thread to exit.
531 * @retval 0 success, target thread has exited or wasn't running
532 * @retval -EBUSY returned without waiting
533 * @retval -EAGAIN waiting period timed out
534 * @retval -EDEADLK target thread is joining on the caller, or target thread
535 * is the caller
536 */
537 __syscall int k_thread_join(struct k_thread *thread, k_timeout_t timeout);
538
539 /**
540 * @brief Put the current thread to sleep.
541 *
542 * This routine puts the current thread to sleep for @a duration,
543 * specified as a k_timeout_t object.
544 *
545 * @param timeout Desired duration of sleep.
546 *
547 * @return Zero if the requested time has elapsed or if the thread was woken up
548 * by the \ref k_wakeup call, the time left to sleep rounded up to the nearest
549 * millisecond.
550 */
551 __syscall int32_t k_sleep(k_timeout_t timeout);
552
553 /**
554 * @brief Put the current thread to sleep.
555 *
556 * This routine puts the current thread to sleep for @a duration milliseconds.
557 *
558 * @param ms Number of milliseconds to sleep.
559 *
560 * @return Zero if the requested time has elapsed or if the thread was woken up
561 * by the \ref k_wakeup call, the time left to sleep rounded up to the nearest
562 * millisecond.
563 */
k_msleep(int32_t ms)564 static inline int32_t k_msleep(int32_t ms)
565 {
566 return k_sleep(Z_TIMEOUT_MS(ms));
567 }
568
569 /**
570 * @brief Put the current thread to sleep with microsecond resolution.
571 *
572 * This function is unlikely to work as expected without kernel tuning.
573 * In particular, because the lower bound on the duration of a sleep is
574 * the duration of a tick, @kconfig{CONFIG_SYS_CLOCK_TICKS_PER_SEC} must be
575 * adjusted to achieve the resolution desired. The implications of doing
576 * this must be understood before attempting to use k_usleep(). Use with
577 * caution.
578 *
579 * @param us Number of microseconds to sleep.
580 *
581 * @return Zero if the requested time has elapsed or if the thread was woken up
582 * by the \ref k_wakeup call, the time left to sleep rounded up to the nearest
583 * microsecond.
584 */
585 __syscall int32_t k_usleep(int32_t us);
586
587 /**
588 * @brief Cause the current thread to busy wait.
589 *
590 * This routine causes the current thread to execute a "do nothing" loop for
591 * @a usec_to_wait microseconds.
592 *
593 * @note The clock used for the microsecond-resolution delay here may
594 * be skewed relative to the clock used for system timeouts like
595 * k_sleep(). For example k_busy_wait(1000) may take slightly more or
596 * less time than k_sleep(K_MSEC(1)), with the offset dependent on
597 * clock tolerances.
598 *
599 * @note In case when @kconfig{CONFIG_SYSTEM_CLOCK_SLOPPY_IDLE} and
600 * @kconfig{CONFIG_PM} options are enabled, this function may not work.
601 * The timer/clock used for delay processing may be disabled/inactive.
602 */
603 __syscall void k_busy_wait(uint32_t usec_to_wait);
604
605 /**
606 * @brief Check whether it is possible to yield in the current context.
607 *
608 * This routine checks whether the kernel is in a state where it is possible to
609 * yield or call blocking API's. It should be used by code that needs to yield
610 * to perform correctly, but can feasibly be called from contexts where that
611 * is not possible. For example in the PRE_KERNEL initialization step, or when
612 * being run from the idle thread.
613 *
614 * @return True if it is possible to yield in the current context, false otherwise.
615 */
616 bool k_can_yield(void);
617
618 /**
619 * @brief Yield the current thread.
620 *
621 * This routine causes the current thread to yield execution to another
622 * thread of the same or higher priority. If there are no other ready threads
623 * of the same or higher priority, the routine returns immediately.
624 */
625 __syscall void k_yield(void);
626
627 /**
628 * @brief Wake up a sleeping thread.
629 *
630 * This routine prematurely wakes up @a thread from sleeping.
631 *
632 * If @a thread is not currently sleeping, the routine has no effect.
633 *
634 * @param thread ID of thread to wake.
635 */
636 __syscall void k_wakeup(k_tid_t thread);
637
638 /**
639 * @brief Query thread ID of the current thread.
640 *
641 * This unconditionally queries the kernel via a system call.
642 *
643 * @note Use k_current_get() unless absolutely sure this is necessary.
644 * This should only be used directly where the thread local
645 * variable cannot be used or may contain invalid values
646 * if thread local storage (TLS) is enabled. If TLS is not
647 * enabled, this is the same as k_current_get().
648 *
649 * @return ID of current thread.
650 */
651 __attribute_const__
652 __syscall k_tid_t k_sched_current_thread_query(void);
653
654 /**
655 * @brief Get thread ID of the current thread.
656 *
657 * @return ID of current thread.
658 *
659 */
660 __attribute_const__
k_current_get(void)661 static inline k_tid_t k_current_get(void)
662 {
663 #ifdef CONFIG_CURRENT_THREAD_USE_TLS
664
665 /* Thread-local cache of current thread ID, set in z_thread_entry() */
666 extern Z_THREAD_LOCAL k_tid_t z_tls_current;
667
668 return z_tls_current;
669 #else
670 return k_sched_current_thread_query();
671 #endif
672 }
673
674 /**
675 * @brief Abort a thread.
676 *
677 * This routine permanently stops execution of @a thread. The thread is taken
678 * off all kernel queues it is part of (i.e. the ready queue, the timeout
679 * queue, or a kernel object wait queue). However, any kernel resources the
680 * thread might currently own (such as mutexes or memory blocks) are not
681 * released. It is the responsibility of the caller of this routine to ensure
682 * all necessary cleanup is performed.
683 *
684 * After k_thread_abort() returns, the thread is guaranteed not to be
685 * running or to become runnable anywhere on the system. Normally
686 * this is done via blocking the caller (in the same manner as
687 * k_thread_join()), but in interrupt context on SMP systems the
688 * implementation is required to spin for threads that are running on
689 * other CPUs.
690 *
691 * @param thread ID of thread to abort.
692 */
693 __syscall void k_thread_abort(k_tid_t thread);
694
695 k_ticks_t z_timeout_expires(const struct _timeout *timeout);
696 k_ticks_t z_timeout_remaining(const struct _timeout *timeout);
697
698 #ifdef CONFIG_SYS_CLOCK_EXISTS
699
700 /**
701 * @brief Get time when a thread wakes up, in system ticks
702 *
703 * This routine computes the system uptime when a waiting thread next
704 * executes, in units of system ticks. If the thread is not waiting,
705 * it returns current system time.
706 */
707 __syscall k_ticks_t k_thread_timeout_expires_ticks(const struct k_thread *thread);
708
z_impl_k_thread_timeout_expires_ticks(const struct k_thread * thread)709 static inline k_ticks_t z_impl_k_thread_timeout_expires_ticks(
710 const struct k_thread *thread)
711 {
712 return z_timeout_expires(&thread->base.timeout);
713 }
714
715 /**
716 * @brief Get time remaining before a thread wakes up, in system ticks
717 *
718 * This routine computes the time remaining before a waiting thread
719 * next executes, in units of system ticks. If the thread is not
720 * waiting, it returns zero.
721 */
722 __syscall k_ticks_t k_thread_timeout_remaining_ticks(const struct k_thread *thread);
723
z_impl_k_thread_timeout_remaining_ticks(const struct k_thread * thread)724 static inline k_ticks_t z_impl_k_thread_timeout_remaining_ticks(
725 const struct k_thread *thread)
726 {
727 return z_timeout_remaining(&thread->base.timeout);
728 }
729
730 #endif /* CONFIG_SYS_CLOCK_EXISTS */
731
732 /**
733 * @cond INTERNAL_HIDDEN
734 */
735
736 struct _static_thread_data {
737 struct k_thread *init_thread;
738 k_thread_stack_t *init_stack;
739 unsigned int init_stack_size;
740 k_thread_entry_t init_entry;
741 void *init_p1;
742 void *init_p2;
743 void *init_p3;
744 int init_prio;
745 uint32_t init_options;
746 const char *init_name;
747 #ifdef CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME
748 int32_t init_delay_ms;
749 #else
750 k_timeout_t init_delay;
751 #endif
752 };
753
754 #ifdef CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME
755 #define Z_THREAD_INIT_DELAY_INITIALIZER(ms) .init_delay_ms = (ms)
756 #define Z_THREAD_INIT_DELAY(thread) SYS_TIMEOUT_MS((thread)->init_delay_ms)
757 #else
758 #define Z_THREAD_INIT_DELAY_INITIALIZER(ms) .init_delay = SYS_TIMEOUT_MS(ms)
759 #define Z_THREAD_INIT_DELAY(thread) (thread)->init_delay
760 #endif
761
762 #define Z_THREAD_INITIALIZER(thread, stack, stack_size, \
763 entry, p1, p2, p3, \
764 prio, options, delay, tname) \
765 { \
766 .init_thread = (thread), \
767 .init_stack = (stack), \
768 .init_stack_size = (stack_size), \
769 .init_entry = (k_thread_entry_t)entry, \
770 .init_p1 = (void *)p1, \
771 .init_p2 = (void *)p2, \
772 .init_p3 = (void *)p3, \
773 .init_prio = (prio), \
774 .init_options = (options), \
775 .init_name = STRINGIFY(tname), \
776 Z_THREAD_INIT_DELAY_INITIALIZER(delay) \
777 }
778
779 /*
780 * Refer to K_THREAD_DEFINE() and K_KERNEL_THREAD_DEFINE() for
781 * information on arguments.
782 */
783 #define Z_THREAD_COMMON_DEFINE(name, stack_size, \
784 entry, p1, p2, p3, \
785 prio, options, delay) \
786 struct k_thread _k_thread_obj_##name; \
787 STRUCT_SECTION_ITERABLE(_static_thread_data, \
788 _k_thread_data_##name) = \
789 Z_THREAD_INITIALIZER(&_k_thread_obj_##name, \
790 _k_thread_stack_##name, stack_size,\
791 entry, p1, p2, p3, prio, options, \
792 delay, name); \
793 const k_tid_t name = (k_tid_t)&_k_thread_obj_##name
794
795 /**
796 * INTERNAL_HIDDEN @endcond
797 */
798
799 /**
800 * @brief Statically define and initialize a thread.
801 *
802 * The thread may be scheduled for immediate execution or a delayed start.
803 *
804 * Thread options are architecture-specific, and can include K_ESSENTIAL,
805 * K_FP_REGS, and K_SSE_REGS. Multiple options may be specified by separating
806 * them using "|" (the logical OR operator).
807 *
808 * The ID of the thread can be accessed using:
809 *
810 * @code extern const k_tid_t <name>; @endcode
811 *
812 * @param name Name of the thread.
813 * @param stack_size Stack size in bytes.
814 * @param entry Thread entry function.
815 * @param p1 1st entry point parameter.
816 * @param p2 2nd entry point parameter.
817 * @param p3 3rd entry point parameter.
818 * @param prio Thread priority.
819 * @param options Thread options.
820 * @param delay Scheduling delay (in milliseconds), zero for no delay.
821 *
822 * @note Static threads with zero delay should not normally have
823 * MetaIRQ priority levels. This can preempt the system
824 * initialization handling (depending on the priority of the main
825 * thread) and cause surprising ordering side effects. It will not
826 * affect anything in the OS per se, but consider it bad practice.
827 * Use a SYS_INIT() callback if you need to run code before entrance
828 * to the application main().
829 */
830 #define K_THREAD_DEFINE(name, stack_size, \
831 entry, p1, p2, p3, \
832 prio, options, delay) \
833 K_THREAD_STACK_DEFINE(_k_thread_stack_##name, stack_size); \
834 Z_THREAD_COMMON_DEFINE(name, stack_size, entry, p1, p2, p3, \
835 prio, options, delay)
836
837 /**
838 * @brief Statically define and initialize a thread intended to run only in kernel mode.
839 *
840 * The thread may be scheduled for immediate execution or a delayed start.
841 *
842 * Thread options are architecture-specific, and can include K_ESSENTIAL,
843 * K_FP_REGS, and K_SSE_REGS. Multiple options may be specified by separating
844 * them using "|" (the logical OR operator).
845 *
846 * The ID of the thread can be accessed using:
847 *
848 * @code extern const k_tid_t <name>; @endcode
849 *
850 * @note Threads defined by this can only run in kernel mode, and cannot be
851 * transformed into user thread via k_thread_user_mode_enter().
852 *
853 * @warning Depending on the architecture, the stack size (@p stack_size)
854 * may need to be multiples of CONFIG_MMU_PAGE_SIZE (if MMU)
855 * or in power-of-two size (if MPU).
856 *
857 * @param name Name of the thread.
858 * @param stack_size Stack size in bytes.
859 * @param entry Thread entry function.
860 * @param p1 1st entry point parameter.
861 * @param p2 2nd entry point parameter.
862 * @param p3 3rd entry point parameter.
863 * @param prio Thread priority.
864 * @param options Thread options.
865 * @param delay Scheduling delay (in milliseconds), zero for no delay.
866 */
867 #define K_KERNEL_THREAD_DEFINE(name, stack_size, \
868 entry, p1, p2, p3, \
869 prio, options, delay) \
870 K_KERNEL_STACK_DEFINE(_k_thread_stack_##name, stack_size); \
871 Z_THREAD_COMMON_DEFINE(name, stack_size, entry, p1, p2, p3, \
872 prio, options, delay)
873
874 /**
875 * @brief Get a thread's priority.
876 *
877 * This routine gets the priority of @a thread.
878 *
879 * @param thread ID of thread whose priority is needed.
880 *
881 * @return Priority of @a thread.
882 */
883 __syscall int k_thread_priority_get(k_tid_t thread);
884
885 /**
886 * @brief Set a thread's priority.
887 *
888 * This routine immediately changes the priority of @a thread.
889 *
890 * Rescheduling can occur immediately depending on the priority @a thread is
891 * set to:
892 *
893 * - If its priority is raised above the priority of a currently scheduled
894 * preemptible thread, @a thread will be scheduled in.
895 *
896 * - If the caller lowers the priority of a currently scheduled preemptible
897 * thread below that of other threads in the system, the thread of the highest
898 * priority will be scheduled in.
899 *
900 * Priority can be assigned in the range of -CONFIG_NUM_COOP_PRIORITIES to
901 * CONFIG_NUM_PREEMPT_PRIORITIES-1, where -CONFIG_NUM_COOP_PRIORITIES is the
902 * highest priority.
903 *
904 * @param thread ID of thread whose priority is to be set.
905 * @param prio New priority.
906 *
907 * @warning Changing the priority of a thread currently involved in mutex
908 * priority inheritance may result in undefined behavior.
909 */
910 __syscall void k_thread_priority_set(k_tid_t thread, int prio);
911
912
913 #ifdef CONFIG_SCHED_DEADLINE
914 /**
915 * @brief Set deadline expiration time for scheduler
916 *
917 * This sets the "deadline" expiration as a time delta from the
918 * current time, in the same units used by k_cycle_get_32(). The
919 * scheduler (when deadline scheduling is enabled) will choose the
920 * next expiring thread when selecting between threads at the same
921 * static priority. Threads at different priorities will be scheduled
922 * according to their static priority.
923 *
924 * @note Deadlines are stored internally using 32 bit unsigned
925 * integers. The number of cycles between the "first" deadline in the
926 * scheduler queue and the "last" deadline must be less than 2^31 (i.e
927 * a signed non-negative quantity). Failure to adhere to this rule
928 * may result in scheduled threads running in an incorrect deadline
929 * order.
930 *
931 * @note Despite the API naming, the scheduler makes no guarantees
932 * the thread WILL be scheduled within that deadline, nor does it take
933 * extra metadata (like e.g. the "runtime" and "period" parameters in
934 * Linux sched_setattr()) that allows the kernel to validate the
935 * scheduling for achievability. Such features could be implemented
936 * above this call, which is simply input to the priority selection
937 * logic.
938 *
939 * @note You should enable @kconfig{CONFIG_SCHED_DEADLINE} in your project
940 * configuration.
941 *
942 * @param thread A thread on which to set the deadline
943 * @param deadline A time delta, in cycle units
944 *
945 */
946 __syscall void k_thread_deadline_set(k_tid_t thread, int deadline);
947 #endif
948
949 /**
950 * @brief Invoke the scheduler
951 *
952 * This routine invokes the scheduler to force a schedule point on the current
953 * CPU. If invoked from within a thread, the scheduler will be invoked
954 * immediately (provided interrupts were not locked when invoked). If invoked
955 * from within an ISR, the scheduler will be invoked upon exiting the ISR.
956 *
957 * Invoking the scheduler allows the kernel to make an immediate determination
958 * as to what the next thread to execute should be. Unlike yielding, this
959 * routine is not guaranteed to switch to a thread of equal or higher priority
960 * if any are available. For example, if the current thread is cooperative and
961 * there is a still higher priority cooperative thread that is ready, then
962 * yielding will switch to that higher priority thread whereas this routine
963 * will not.
964 *
965 * Most applications will never use this routine.
966 */
967 __syscall void k_reschedule(void);
968
969 #ifdef CONFIG_SCHED_CPU_MASK
970 /**
971 * @brief Sets all CPU enable masks to zero
972 *
973 * After this returns, the thread will no longer be schedulable on any
974 * CPUs. The thread must not be currently runnable.
975 *
976 * @note You should enable @kconfig{CONFIG_SCHED_CPU_MASK} in your project
977 * configuration.
978 *
979 * @param thread Thread to operate upon
980 * @return Zero on success, otherwise error code
981 */
982 int k_thread_cpu_mask_clear(k_tid_t thread);
983
984 /**
985 * @brief Sets all CPU enable masks to one
986 *
987 * After this returns, the thread will be schedulable on any CPU. The
988 * thread must not be currently runnable.
989 *
990 * @note You should enable @kconfig{CONFIG_SCHED_CPU_MASK} in your project
991 * configuration.
992 *
993 * @param thread Thread to operate upon
994 * @return Zero on success, otherwise error code
995 */
996 int k_thread_cpu_mask_enable_all(k_tid_t thread);
997
998 /**
999 * @brief Enable thread to run on specified CPU
1000 *
1001 * The thread must not be currently runnable.
1002 *
1003 * @note You should enable @kconfig{CONFIG_SCHED_CPU_MASK} in your project
1004 * configuration.
1005 *
1006 * @param thread Thread to operate upon
1007 * @param cpu CPU index
1008 * @return Zero on success, otherwise error code
1009 */
1010 int k_thread_cpu_mask_enable(k_tid_t thread, int cpu);
1011
1012 /**
1013 * @brief Prevent thread to run on specified CPU
1014 *
1015 * The thread must not be currently runnable.
1016 *
1017 * @note You should enable @kconfig{CONFIG_SCHED_CPU_MASK} in your project
1018 * configuration.
1019 *
1020 * @param thread Thread to operate upon
1021 * @param cpu CPU index
1022 * @return Zero on success, otherwise error code
1023 */
1024 int k_thread_cpu_mask_disable(k_tid_t thread, int cpu);
1025
1026 /**
1027 * @brief Pin a thread to a CPU
1028 *
1029 * Pin a thread to a CPU by first clearing the cpu mask and then enabling the
1030 * thread on the selected CPU.
1031 *
1032 * @param thread Thread to operate upon
1033 * @param cpu CPU index
1034 * @return Zero on success, otherwise error code
1035 */
1036 int k_thread_cpu_pin(k_tid_t thread, int cpu);
1037 #endif
1038
1039 /**
1040 * @brief Suspend a thread.
1041 *
1042 * This routine prevents the kernel scheduler from making @a thread
1043 * the current thread. All other internal operations on @a thread are
1044 * still performed; for example, kernel objects it is waiting on are
1045 * still handed to it. Thread suspension does not impact any timeout
1046 * upon which the thread may be waiting (such as a timeout from a call
1047 * to k_sem_take() or k_sleep()). Thus if the timeout expires while the
1048 * thread is suspended, it is still suspended until k_thread_resume()
1049 * is called.
1050 *
1051 * When the target thread is active on another CPU, the caller will block until
1052 * the target thread is halted (suspended or aborted). But if the caller is in
1053 * an interrupt context, it will spin waiting for that target thread active on
1054 * another CPU to halt.
1055 *
1056 * If @a thread is already suspended, the routine has no effect.
1057 *
1058 * @param thread ID of thread to suspend.
1059 */
1060 __syscall void k_thread_suspend(k_tid_t thread);
1061
1062 /**
1063 * @brief Resume a suspended thread.
1064 *
1065 * This routine reverses the thread suspension from k_thread_suspend()
1066 * and allows the kernel scheduler to make @a thread the current thread
1067 * when it is next eligible for that role.
1068 *
1069 * If @a thread is not currently suspended, the routine has no effect.
1070 *
1071 * @param thread ID of thread to resume.
1072 */
1073 __syscall void k_thread_resume(k_tid_t thread);
1074
1075 /**
1076 * @brief Start an inactive thread
1077 *
1078 * If a thread was created with K_FOREVER in the delay parameter, it will
1079 * not be added to the scheduling queue until this function is called
1080 * on it.
1081 *
1082 * @note This is a legacy API for compatibility. Modern Zephyr
1083 * threads are initialized in the "sleeping" state and do not need
1084 * special handling for "start".
1085 *
1086 * @param thread thread to start
1087 */
k_thread_start(k_tid_t thread)1088 static inline void k_thread_start(k_tid_t thread)
1089 {
1090 k_wakeup(thread);
1091 }
1092
1093 /**
1094 * @brief Set time-slicing period and scope.
1095 *
1096 * This routine specifies how the scheduler will perform time slicing of
1097 * preemptible threads.
1098 *
1099 * To enable time slicing, @a slice must be non-zero. The scheduler
1100 * ensures that no thread runs for more than the specified time limit
1101 * before other threads of that priority are given a chance to execute.
1102 * Any thread whose priority is higher than @a prio is exempted, and may
1103 * execute as long as desired without being preempted due to time slicing.
1104 *
1105 * Time slicing only limits the maximum amount of time a thread may continuously
1106 * execute. Once the scheduler selects a thread for execution, there is no
1107 * minimum guaranteed time the thread will execute before threads of greater or
1108 * equal priority are scheduled.
1109 *
1110 * When the current thread is the only one of that priority eligible
1111 * for execution, this routine has no effect; the thread is immediately
1112 * rescheduled after the slice period expires.
1113 *
1114 * To disable timeslicing, set both @a slice and @a prio to zero.
1115 *
1116 * @param slice Maximum time slice length (in milliseconds).
1117 * @param prio Highest thread priority level eligible for time slicing.
1118 */
1119 void k_sched_time_slice_set(int32_t slice, int prio);
1120
1121 /**
1122 * @brief Set thread time slice
1123 *
1124 * As for k_sched_time_slice_set, but (when
1125 * CONFIG_TIMESLICE_PER_THREAD=y) sets the timeslice for a specific
1126 * thread. When non-zero, this timeslice will take precedence over
1127 * the global value.
1128 *
1129 * When such a thread's timeslice expires, the configured callback
1130 * will be called before the thread is removed/re-added to the run
1131 * queue. This callback will occur in interrupt context, and the
1132 * specified thread is guaranteed to have been preempted by the
1133 * currently-executing ISR. Such a callback is free to, for example,
1134 * modify the thread priority or slice time for future execution,
1135 * suspend the thread, etc...
1136 *
1137 * @note Unlike the older API, the time slice parameter here is
1138 * specified in ticks, not milliseconds. Ticks have always been the
1139 * internal unit, and not all platforms have integer conversions
1140 * between the two.
1141 *
1142 * @note Threads with a non-zero slice time set will be timesliced
1143 * always, even if they are higher priority than the maximum timeslice
1144 * priority set via k_sched_time_slice_set().
1145 *
1146 * @note The callback notification for slice expiration happens, as it
1147 * must, while the thread is still "current", and thus it happens
1148 * before any registered timeouts at this tick. This has the somewhat
1149 * confusing side effect that the tick time (c.f. k_uptime_get()) does
1150 * not yet reflect the expired ticks. Applications wishing to make
1151 * fine-grained timing decisions within this callback should use the
1152 * cycle API, or derived facilities like k_thread_runtime_stats_get().
1153 *
1154 * @param th A valid, initialized thread
1155 * @param slice_ticks Maximum timeslice, in ticks
1156 * @param expired Callback function called on slice expiration
1157 * @param data Parameter for the expiration handler
1158 */
1159 void k_thread_time_slice_set(struct k_thread *th, int32_t slice_ticks,
1160 k_thread_timeslice_fn_t expired, void *data);
1161
1162 /** @} */
1163
1164 /**
1165 * @addtogroup isr_apis
1166 * @{
1167 */
1168
1169 /**
1170 * @brief Determine if code is running at interrupt level.
1171 *
1172 * This routine allows the caller to customize its actions, depending on
1173 * whether it is a thread or an ISR.
1174 *
1175 * @funcprops \isr_ok
1176 *
1177 * @return false if invoked by a thread.
1178 * @return true if invoked by an ISR.
1179 */
1180 bool k_is_in_isr(void);
1181
1182 /**
1183 * @brief Determine if code is running in a preemptible thread.
1184 *
1185 * This routine allows the caller to customize its actions, depending on
1186 * whether it can be preempted by another thread. The routine returns a 'true'
1187 * value if all of the following conditions are met:
1188 *
1189 * - The code is running in a thread, not at ISR.
1190 * - The thread's priority is in the preemptible range.
1191 * - The thread has not locked the scheduler.
1192 *
1193 * @funcprops \isr_ok
1194 *
1195 * @return 0 if invoked by an ISR or by a cooperative thread.
1196 * @return Non-zero if invoked by a preemptible thread.
1197 */
1198 __syscall int k_is_preempt_thread(void);
1199
1200 /**
1201 * @brief Test whether startup is in the before-main-task phase.
1202 *
1203 * This routine allows the caller to customize its actions, depending on
1204 * whether it being invoked before the kernel is fully active.
1205 *
1206 * @funcprops \isr_ok
1207 *
1208 * @return true if invoked before post-kernel initialization
1209 * @return false if invoked during/after post-kernel initialization
1210 */
k_is_pre_kernel(void)1211 static inline bool k_is_pre_kernel(void)
1212 {
1213 extern bool z_sys_post_kernel; /* in init.c */
1214
1215 return !z_sys_post_kernel;
1216 }
1217
1218 /**
1219 * @}
1220 */
1221
1222 /**
1223 * @addtogroup thread_apis
1224 * @{
1225 */
1226
1227 /**
1228 * @brief Lock the scheduler.
1229 *
1230 * This routine prevents the current thread from being preempted by another
1231 * thread by instructing the scheduler to treat it as a cooperative thread.
1232 * If the thread subsequently performs an operation that makes it unready,
1233 * it will be context switched out in the normal manner. When the thread
1234 * again becomes the current thread, its non-preemptible status is maintained.
1235 *
1236 * This routine can be called recursively.
1237 *
1238 * Owing to clever implementation details, scheduler locks are
1239 * extremely fast for non-userspace threads (just one byte
1240 * inc/decrement in the thread struct).
1241 *
1242 * @note This works by elevating the thread priority temporarily to a
1243 * cooperative priority, allowing cheap synchronization vs. other
1244 * preemptible or cooperative threads running on the current CPU. It
1245 * does not prevent preemption or asynchrony of other types. It does
1246 * not prevent threads from running on other CPUs when CONFIG_SMP=y.
1247 * It does not prevent interrupts from happening, nor does it prevent
1248 * threads with MetaIRQ priorities from preempting the current thread.
1249 * In general this is a historical API not well-suited to modern
1250 * applications, use with care.
1251 */
1252 void k_sched_lock(void);
1253
1254 /**
1255 * @brief Unlock the scheduler.
1256 *
1257 * This routine reverses the effect of a previous call to k_sched_lock().
1258 * A thread must call the routine once for each time it called k_sched_lock()
1259 * before the thread becomes preemptible.
1260 */
1261 void k_sched_unlock(void);
1262
1263 /**
1264 * @brief Set current thread's custom data.
1265 *
1266 * This routine sets the custom data for the current thread to @ value.
1267 *
1268 * Custom data is not used by the kernel itself, and is freely available
1269 * for a thread to use as it sees fit. It can be used as a framework
1270 * upon which to build thread-local storage.
1271 *
1272 * @param value New custom data value.
1273 *
1274 */
1275 __syscall void k_thread_custom_data_set(void *value);
1276
1277 /**
1278 * @brief Get current thread's custom data.
1279 *
1280 * This routine returns the custom data for the current thread.
1281 *
1282 * @return Current custom data value.
1283 */
1284 __syscall void *k_thread_custom_data_get(void);
1285
1286 /**
1287 * @brief Set current thread name
1288 *
1289 * Set the name of the thread to be used when @kconfig{CONFIG_THREAD_MONITOR}
1290 * is enabled for tracing and debugging.
1291 *
1292 * @param thread Thread to set name, or NULL to set the current thread
1293 * @param str Name string
1294 * @retval 0 on success
1295 * @retval -EFAULT Memory access error with supplied string
1296 * @retval -ENOSYS Thread name configuration option not enabled
1297 * @retval -EINVAL Thread name too long
1298 */
1299 __syscall int k_thread_name_set(k_tid_t thread, const char *str);
1300
1301 /**
1302 * @brief Get thread name
1303 *
1304 * Get the name of a thread
1305 *
1306 * @param thread Thread ID
1307 * @retval Thread name, or NULL if configuration not enabled
1308 */
1309 const char *k_thread_name_get(k_tid_t thread);
1310
1311 /**
1312 * @brief Copy the thread name into a supplied buffer
1313 *
1314 * @param thread Thread to obtain name information
1315 * @param buf Destination buffer
1316 * @param size Destination buffer size
1317 * @retval -ENOSPC Destination buffer too small
1318 * @retval -EFAULT Memory access error
1319 * @retval -ENOSYS Thread name feature not enabled
1320 * @retval 0 Success
1321 */
1322 __syscall int k_thread_name_copy(k_tid_t thread, char *buf,
1323 size_t size);
1324
1325 /**
1326 * @brief Get thread state string
1327 *
1328 * This routine generates a human friendly string containing the thread's
1329 * state, and copies as much of it as possible into @a buf.
1330 *
1331 * @param thread_id Thread ID
1332 * @param buf Buffer into which to copy state strings
1333 * @param buf_size Size of the buffer
1334 *
1335 * @retval Pointer to @a buf if data was copied, else a pointer to "".
1336 */
1337 const char *k_thread_state_str(k_tid_t thread_id, char *buf, size_t buf_size);
1338
1339 /**
1340 * @}
1341 */
1342
1343 /**
1344 * @addtogroup clock_apis
1345 * @{
1346 */
1347
1348 /**
1349 * @brief Generate null timeout delay.
1350 *
1351 * This macro generates a timeout delay that instructs a kernel API
1352 * not to wait if the requested operation cannot be performed immediately.
1353 *
1354 * @return Timeout delay value.
1355 */
1356 #define K_NO_WAIT Z_TIMEOUT_NO_WAIT
1357
1358 /**
1359 * @brief Generate timeout delay from nanoseconds.
1360 *
1361 * This macro generates a timeout delay that instructs a kernel API to
1362 * wait up to @a t nanoseconds to perform the requested operation.
1363 * Note that timer precision is limited to the tick rate, not the
1364 * requested value.
1365 *
1366 * @param t Duration in nanoseconds.
1367 *
1368 * @return Timeout delay value.
1369 */
1370 #define K_NSEC(t) Z_TIMEOUT_NS(t)
1371
1372 /**
1373 * @brief Generate timeout delay from microseconds.
1374 *
1375 * This macro generates a timeout delay that instructs a kernel API
1376 * to wait up to @a t microseconds to perform the requested operation.
1377 * Note that timer precision is limited to the tick rate, not the
1378 * requested value.
1379 *
1380 * @param t Duration in microseconds.
1381 *
1382 * @return Timeout delay value.
1383 */
1384 #define K_USEC(t) Z_TIMEOUT_US(t)
1385
1386 /**
1387 * @brief Generate timeout delay from cycles.
1388 *
1389 * This macro generates a timeout delay that instructs a kernel API
1390 * to wait up to @a t cycles to perform the requested operation.
1391 *
1392 * @param t Duration in cycles.
1393 *
1394 * @return Timeout delay value.
1395 */
1396 #define K_CYC(t) Z_TIMEOUT_CYC(t)
1397
1398 /**
1399 * @brief Generate timeout delay from system ticks.
1400 *
1401 * This macro generates a timeout delay that instructs a kernel API
1402 * to wait up to @a t ticks to perform the requested operation.
1403 *
1404 * @param t Duration in system ticks.
1405 *
1406 * @return Timeout delay value.
1407 */
1408 #define K_TICKS(t) Z_TIMEOUT_TICKS(t)
1409
1410 /**
1411 * @brief Generate timeout delay from milliseconds.
1412 *
1413 * This macro generates a timeout delay that instructs a kernel API
1414 * to wait up to @a ms milliseconds to perform the requested operation.
1415 *
1416 * @param ms Duration in milliseconds.
1417 *
1418 * @return Timeout delay value.
1419 */
1420 #define K_MSEC(ms) Z_TIMEOUT_MS(ms)
1421
1422 /**
1423 * @brief Generate timeout delay from seconds.
1424 *
1425 * This macro generates a timeout delay that instructs a kernel API
1426 * to wait up to @a s seconds to perform the requested operation.
1427 *
1428 * @param s Duration in seconds.
1429 *
1430 * @return Timeout delay value.
1431 */
1432 #define K_SECONDS(s) K_MSEC((s) * MSEC_PER_SEC)
1433
1434 /**
1435 * @brief Generate timeout delay from minutes.
1436
1437 * This macro generates a timeout delay that instructs a kernel API
1438 * to wait up to @a m minutes to perform the requested operation.
1439 *
1440 * @param m Duration in minutes.
1441 *
1442 * @return Timeout delay value.
1443 */
1444 #define K_MINUTES(m) K_SECONDS((m) * 60)
1445
1446 /**
1447 * @brief Generate timeout delay from hours.
1448 *
1449 * This macro generates a timeout delay that instructs a kernel API
1450 * to wait up to @a h hours to perform the requested operation.
1451 *
1452 * @param h Duration in hours.
1453 *
1454 * @return Timeout delay value.
1455 */
1456 #define K_HOURS(h) K_MINUTES((h) * 60)
1457
1458 /**
1459 * @brief Generate infinite timeout delay.
1460 *
1461 * This macro generates a timeout delay that instructs a kernel API
1462 * to wait as long as necessary to perform the requested operation.
1463 *
1464 * @return Timeout delay value.
1465 */
1466 #define K_FOREVER Z_FOREVER
1467
1468 #ifdef CONFIG_TIMEOUT_64BIT
1469
1470 /**
1471 * @brief Generates an absolute/uptime timeout value from system ticks
1472 *
1473 * This macro generates a timeout delay that represents an expiration
1474 * at the absolute uptime value specified, in system ticks. That is, the
1475 * timeout will expire immediately after the system uptime reaches the
1476 * specified tick count.
1477 *
1478 * @param t Tick uptime value
1479 * @return Timeout delay value
1480 */
1481 #define K_TIMEOUT_ABS_TICKS(t) \
1482 Z_TIMEOUT_TICKS(Z_TICK_ABS((k_ticks_t)MAX(t, 0)))
1483
1484 /**
1485 * @brief Generates an absolute/uptime timeout value from milliseconds
1486 *
1487 * This macro generates a timeout delay that represents an expiration
1488 * at the absolute uptime value specified, in milliseconds. That is,
1489 * the timeout will expire immediately after the system uptime reaches
1490 * the specified tick count.
1491 *
1492 * @param t Millisecond uptime value
1493 * @return Timeout delay value
1494 */
1495 #define K_TIMEOUT_ABS_MS(t) K_TIMEOUT_ABS_TICKS(k_ms_to_ticks_ceil64(t))
1496
1497 /**
1498 * @brief Generates an absolute/uptime timeout value from microseconds
1499 *
1500 * This macro generates a timeout delay that represents an expiration
1501 * at the absolute uptime value specified, in microseconds. That is,
1502 * the timeout will expire immediately after the system uptime reaches
1503 * the specified time. Note that timer precision is limited by the
1504 * system tick rate and not the requested timeout value.
1505 *
1506 * @param t Microsecond uptime value
1507 * @return Timeout delay value
1508 */
1509 #define K_TIMEOUT_ABS_US(t) K_TIMEOUT_ABS_TICKS(k_us_to_ticks_ceil64(t))
1510
1511 /**
1512 * @brief Generates an absolute/uptime timeout value from nanoseconds
1513 *
1514 * This macro generates a timeout delay that represents an expiration
1515 * at the absolute uptime value specified, in nanoseconds. That is,
1516 * the timeout will expire immediately after the system uptime reaches
1517 * the specified time. Note that timer precision is limited by the
1518 * system tick rate and not the requested timeout value.
1519 *
1520 * @param t Nanosecond uptime value
1521 * @return Timeout delay value
1522 */
1523 #define K_TIMEOUT_ABS_NS(t) K_TIMEOUT_ABS_TICKS(k_ns_to_ticks_ceil64(t))
1524
1525 /**
1526 * @brief Generates an absolute/uptime timeout value from system cycles
1527 *
1528 * This macro generates a timeout delay that represents an expiration
1529 * at the absolute uptime value specified, in cycles. That is, the
1530 * timeout will expire immediately after the system uptime reaches the
1531 * specified time. Note that timer precision is limited by the system
1532 * tick rate and not the requested timeout value.
1533 *
1534 * @param t Cycle uptime value
1535 * @return Timeout delay value
1536 */
1537 #define K_TIMEOUT_ABS_CYC(t) K_TIMEOUT_ABS_TICKS(k_cyc_to_ticks_ceil64(t))
1538
1539 #endif
1540
1541 /**
1542 * @}
1543 */
1544
1545 /**
1546 * @cond INTERNAL_HIDDEN
1547 */
1548
1549 struct k_timer {
1550 /*
1551 * _timeout structure must be first here if we want to use
1552 * dynamic timer allocation. timeout.node is used in the double-linked
1553 * list of free timers
1554 */
1555 struct _timeout timeout;
1556
1557 /* wait queue for the (single) thread waiting on this timer */
1558 _wait_q_t wait_q;
1559
1560 /* runs in ISR context */
1561 void (*expiry_fn)(struct k_timer *timer);
1562
1563 /* runs in the context of the thread that calls k_timer_stop() */
1564 void (*stop_fn)(struct k_timer *timer);
1565
1566 /* timer period */
1567 k_timeout_t period;
1568
1569 /* timer status */
1570 uint32_t status;
1571
1572 /* user-specific data, also used to support legacy features */
1573 void *user_data;
1574
1575 SYS_PORT_TRACING_TRACKING_FIELD(k_timer)
1576
1577 #ifdef CONFIG_OBJ_CORE_TIMER
1578 struct k_obj_core obj_core;
1579 #endif
1580 };
1581
1582 #define Z_TIMER_INITIALIZER(obj, expiry, stop) \
1583 { \
1584 .timeout = { \
1585 .node = {},\
1586 .fn = z_timer_expiration_handler, \
1587 .dticks = 0, \
1588 }, \
1589 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
1590 .expiry_fn = expiry, \
1591 .stop_fn = stop, \
1592 .status = 0, \
1593 .user_data = 0, \
1594 }
1595
1596 /**
1597 * INTERNAL_HIDDEN @endcond
1598 */
1599
1600 /**
1601 * @defgroup timer_apis Timer APIs
1602 * @ingroup kernel_apis
1603 * @{
1604 */
1605
1606 /**
1607 * @typedef k_timer_expiry_t
1608 * @brief Timer expiry function type.
1609 *
1610 * A timer's expiry function is executed by the system clock interrupt handler
1611 * each time the timer expires. The expiry function is optional, and is only
1612 * invoked if the timer has been initialized with one.
1613 *
1614 * @param timer Address of timer.
1615 */
1616 typedef void (*k_timer_expiry_t)(struct k_timer *timer);
1617
1618 /**
1619 * @typedef k_timer_stop_t
1620 * @brief Timer stop function type.
1621 *
1622 * A timer's stop function is executed if the timer is stopped prematurely.
1623 * The function runs in the context of call that stops the timer. As
1624 * k_timer_stop() can be invoked from an ISR, the stop function must be
1625 * callable from interrupt context (isr-ok).
1626 *
1627 * The stop function is optional, and is only invoked if the timer has been
1628 * initialized with one.
1629 *
1630 * @param timer Address of timer.
1631 */
1632 typedef void (*k_timer_stop_t)(struct k_timer *timer);
1633
1634 /**
1635 * @brief Statically define and initialize a timer.
1636 *
1637 * The timer can be accessed outside the module where it is defined using:
1638 *
1639 * @code extern struct k_timer <name>; @endcode
1640 *
1641 * @param name Name of the timer variable.
1642 * @param expiry_fn Function to invoke each time the timer expires.
1643 * @param stop_fn Function to invoke if the timer is stopped while running.
1644 */
1645 #define K_TIMER_DEFINE(name, expiry_fn, stop_fn) \
1646 STRUCT_SECTION_ITERABLE(k_timer, name) = \
1647 Z_TIMER_INITIALIZER(name, expiry_fn, stop_fn)
1648
1649 /**
1650 * @brief Initialize a timer.
1651 *
1652 * This routine initializes a timer, prior to its first use.
1653 *
1654 * @param timer Address of timer.
1655 * @param expiry_fn Function to invoke each time the timer expires.
1656 * @param stop_fn Function to invoke if the timer is stopped while running.
1657 */
1658 void k_timer_init(struct k_timer *timer,
1659 k_timer_expiry_t expiry_fn,
1660 k_timer_stop_t stop_fn);
1661
1662 /**
1663 * @brief Start a timer.
1664 *
1665 * This routine starts a timer, and resets its status to zero. The timer
1666 * begins counting down using the specified duration and period values.
1667 *
1668 * Attempting to start a timer that is already running is permitted.
1669 * The timer's status is reset to zero and the timer begins counting down
1670 * using the new duration and period values.
1671 *
1672 * @param timer Address of timer.
1673 * @param duration Initial timer duration.
1674 * @param period Timer period.
1675 */
1676 __syscall void k_timer_start(struct k_timer *timer,
1677 k_timeout_t duration, k_timeout_t period);
1678
1679 /**
1680 * @brief Stop a timer.
1681 *
1682 * This routine stops a running timer prematurely. The timer's stop function,
1683 * if one exists, is invoked by the caller.
1684 *
1685 * Attempting to stop a timer that is not running is permitted, but has no
1686 * effect on the timer.
1687 *
1688 * @note The stop handler has to be callable from ISRs if @a k_timer_stop is to
1689 * be called from ISRs.
1690 *
1691 * @funcprops \isr_ok
1692 *
1693 * @param timer Address of timer.
1694 */
1695 __syscall void k_timer_stop(struct k_timer *timer);
1696
1697 /**
1698 * @brief Read timer status.
1699 *
1700 * This routine reads the timer's status, which indicates the number of times
1701 * it has expired since its status was last read.
1702 *
1703 * Calling this routine resets the timer's status to zero.
1704 *
1705 * @param timer Address of timer.
1706 *
1707 * @return Timer status.
1708 */
1709 __syscall uint32_t k_timer_status_get(struct k_timer *timer);
1710
1711 /**
1712 * @brief Synchronize thread to timer expiration.
1713 *
1714 * This routine blocks the calling thread until the timer's status is non-zero
1715 * (indicating that it has expired at least once since it was last examined)
1716 * or the timer is stopped. If the timer status is already non-zero,
1717 * or the timer is already stopped, the caller continues without waiting.
1718 *
1719 * Calling this routine resets the timer's status to zero.
1720 *
1721 * This routine must not be used by interrupt handlers, since they are not
1722 * allowed to block.
1723 *
1724 * @param timer Address of timer.
1725 *
1726 * @return Timer status.
1727 */
1728 __syscall uint32_t k_timer_status_sync(struct k_timer *timer);
1729
1730 #ifdef CONFIG_SYS_CLOCK_EXISTS
1731
1732 /**
1733 * @brief Get next expiration time of a timer, in system ticks
1734 *
1735 * This routine returns the future system uptime reached at the next
1736 * time of expiration of the timer, in units of system ticks. If the
1737 * timer is not running, current system time is returned.
1738 *
1739 * @param timer The timer object
1740 * @return Uptime of expiration, in ticks
1741 */
1742 __syscall k_ticks_t k_timer_expires_ticks(const struct k_timer *timer);
1743
z_impl_k_timer_expires_ticks(const struct k_timer * timer)1744 static inline k_ticks_t z_impl_k_timer_expires_ticks(
1745 const struct k_timer *timer)
1746 {
1747 return z_timeout_expires(&timer->timeout);
1748 }
1749
1750 /**
1751 * @brief Get time remaining before a timer next expires, in system ticks
1752 *
1753 * This routine computes the time remaining before a running timer
1754 * next expires, in units of system ticks. If the timer is not
1755 * running, it returns zero.
1756 *
1757 * @param timer The timer object
1758 * @return Remaining time until expiration, in ticks
1759 */
1760 __syscall k_ticks_t k_timer_remaining_ticks(const struct k_timer *timer);
1761
z_impl_k_timer_remaining_ticks(const struct k_timer * timer)1762 static inline k_ticks_t z_impl_k_timer_remaining_ticks(
1763 const struct k_timer *timer)
1764 {
1765 return z_timeout_remaining(&timer->timeout);
1766 }
1767
1768 /**
1769 * @brief Get time remaining before a timer next expires.
1770 *
1771 * This routine computes the (approximate) time remaining before a running
1772 * timer next expires. If the timer is not running, it returns zero.
1773 *
1774 * @param timer Address of timer.
1775 *
1776 * @return Remaining time (in milliseconds).
1777 */
k_timer_remaining_get(struct k_timer * timer)1778 static inline uint32_t k_timer_remaining_get(struct k_timer *timer)
1779 {
1780 return k_ticks_to_ms_floor32(k_timer_remaining_ticks(timer));
1781 }
1782
1783 #endif /* CONFIG_SYS_CLOCK_EXISTS */
1784
1785 /**
1786 * @brief Associate user-specific data with a timer.
1787 *
1788 * This routine records the @a user_data with the @a timer, to be retrieved
1789 * later.
1790 *
1791 * It can be used e.g. in a timer handler shared across multiple subsystems to
1792 * retrieve data specific to the subsystem this timer is associated with.
1793 *
1794 * @param timer Address of timer.
1795 * @param user_data User data to associate with the timer.
1796 */
1797 __syscall void k_timer_user_data_set(struct k_timer *timer, void *user_data);
1798
1799 /**
1800 * @internal
1801 */
z_impl_k_timer_user_data_set(struct k_timer * timer,void * user_data)1802 static inline void z_impl_k_timer_user_data_set(struct k_timer *timer,
1803 void *user_data)
1804 {
1805 timer->user_data = user_data;
1806 }
1807
1808 /**
1809 * @brief Retrieve the user-specific data from a timer.
1810 *
1811 * @param timer Address of timer.
1812 *
1813 * @return The user data.
1814 */
1815 __syscall void *k_timer_user_data_get(const struct k_timer *timer);
1816
z_impl_k_timer_user_data_get(const struct k_timer * timer)1817 static inline void *z_impl_k_timer_user_data_get(const struct k_timer *timer)
1818 {
1819 return timer->user_data;
1820 }
1821
1822 /** @} */
1823
1824 /**
1825 * @addtogroup clock_apis
1826 * @ingroup kernel_apis
1827 * @{
1828 */
1829
1830 /**
1831 * @brief Get system uptime, in system ticks.
1832 *
1833 * This routine returns the elapsed time since the system booted, in
1834 * ticks (c.f. @kconfig{CONFIG_SYS_CLOCK_TICKS_PER_SEC}), which is the
1835 * fundamental unit of resolution of kernel timekeeping.
1836 *
1837 * @return Current uptime in ticks.
1838 */
1839 __syscall int64_t k_uptime_ticks(void);
1840
1841 /**
1842 * @brief Get system uptime.
1843 *
1844 * This routine returns the elapsed time since the system booted,
1845 * in milliseconds.
1846 *
1847 * @note
1848 * While this function returns time in milliseconds, it does
1849 * not mean it has millisecond resolution. The actual resolution depends on
1850 * @kconfig{CONFIG_SYS_CLOCK_TICKS_PER_SEC} config option.
1851 *
1852 * @return Current uptime in milliseconds.
1853 */
k_uptime_get(void)1854 static inline int64_t k_uptime_get(void)
1855 {
1856 return k_ticks_to_ms_floor64(k_uptime_ticks());
1857 }
1858
1859 /**
1860 * @brief Get system uptime (32-bit version).
1861 *
1862 * This routine returns the lower 32 bits of the system uptime in
1863 * milliseconds.
1864 *
1865 * Because correct conversion requires full precision of the system
1866 * clock there is no benefit to using this over k_uptime_get() unless
1867 * you know the application will never run long enough for the system
1868 * clock to approach 2^32 ticks. Calls to this function may involve
1869 * interrupt blocking and 64-bit math.
1870 *
1871 * @note
1872 * While this function returns time in milliseconds, it does
1873 * not mean it has millisecond resolution. The actual resolution depends on
1874 * @kconfig{CONFIG_SYS_CLOCK_TICKS_PER_SEC} config option
1875 *
1876 * @return The low 32 bits of the current uptime, in milliseconds.
1877 */
k_uptime_get_32(void)1878 static inline uint32_t k_uptime_get_32(void)
1879 {
1880 return (uint32_t)k_uptime_get();
1881 }
1882
1883 /**
1884 * @brief Get system uptime in seconds.
1885 *
1886 * This routine returns the elapsed time since the system booted,
1887 * in seconds.
1888 *
1889 * @return Current uptime in seconds.
1890 */
k_uptime_seconds(void)1891 static inline uint32_t k_uptime_seconds(void)
1892 {
1893 return k_ticks_to_sec_floor32(k_uptime_ticks());
1894 }
1895
1896 /**
1897 * @brief Get elapsed time.
1898 *
1899 * This routine computes the elapsed time between the current system uptime
1900 * and an earlier reference time, in milliseconds.
1901 *
1902 * @param reftime Pointer to a reference time, which is updated to the current
1903 * uptime upon return.
1904 *
1905 * @return Elapsed time.
1906 */
k_uptime_delta(int64_t * reftime)1907 static inline int64_t k_uptime_delta(int64_t *reftime)
1908 {
1909 int64_t uptime, delta;
1910
1911 uptime = k_uptime_get();
1912 delta = uptime - *reftime;
1913 *reftime = uptime;
1914
1915 return delta;
1916 }
1917
1918 /**
1919 * @brief Read the hardware clock.
1920 *
1921 * This routine returns the current time, as measured by the system's hardware
1922 * clock.
1923 *
1924 * @return Current hardware clock up-counter (in cycles).
1925 */
k_cycle_get_32(void)1926 static inline uint32_t k_cycle_get_32(void)
1927 {
1928 return arch_k_cycle_get_32();
1929 }
1930
1931 /**
1932 * @brief Read the 64-bit hardware clock.
1933 *
1934 * This routine returns the current time in 64-bits, as measured by the
1935 * system's hardware clock, if available.
1936 *
1937 * @see CONFIG_TIMER_HAS_64BIT_CYCLE_COUNTER
1938 *
1939 * @return Current hardware clock up-counter (in cycles).
1940 */
k_cycle_get_64(void)1941 static inline uint64_t k_cycle_get_64(void)
1942 {
1943 if (!IS_ENABLED(CONFIG_TIMER_HAS_64BIT_CYCLE_COUNTER)) {
1944 __ASSERT(0, "64-bit cycle counter not enabled on this platform. "
1945 "See CONFIG_TIMER_HAS_64BIT_CYCLE_COUNTER");
1946 return 0;
1947 }
1948
1949 return arch_k_cycle_get_64();
1950 }
1951
1952 /**
1953 * @}
1954 */
1955
1956 struct k_queue {
1957 sys_sflist_t data_q;
1958 struct k_spinlock lock;
1959 _wait_q_t wait_q;
1960
1961 Z_DECL_POLL_EVENT
1962
1963 SYS_PORT_TRACING_TRACKING_FIELD(k_queue)
1964 };
1965
1966 /**
1967 * @cond INTERNAL_HIDDEN
1968 */
1969
1970 #define Z_QUEUE_INITIALIZER(obj) \
1971 { \
1972 .data_q = SYS_SFLIST_STATIC_INIT(&obj.data_q), \
1973 .lock = { }, \
1974 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
1975 Z_POLL_EVENT_OBJ_INIT(obj) \
1976 }
1977
1978 /**
1979 * INTERNAL_HIDDEN @endcond
1980 */
1981
1982 /**
1983 * @defgroup queue_apis Queue APIs
1984 * @ingroup kernel_apis
1985 * @{
1986 */
1987
1988 /**
1989 * @brief Initialize a queue.
1990 *
1991 * This routine initializes a queue object, prior to its first use.
1992 *
1993 * @param queue Address of the queue.
1994 */
1995 __syscall void k_queue_init(struct k_queue *queue);
1996
1997 /**
1998 * @brief Cancel waiting on a queue.
1999 *
2000 * This routine causes first thread pending on @a queue, if any, to
2001 * return from k_queue_get() call with NULL value (as if timeout expired).
2002 * If the queue is being waited on by k_poll(), it will return with
2003 * -EINTR and K_POLL_STATE_CANCELLED state (and per above, subsequent
2004 * k_queue_get() will return NULL).
2005 *
2006 * @funcprops \isr_ok
2007 *
2008 * @param queue Address of the queue.
2009 */
2010 __syscall void k_queue_cancel_wait(struct k_queue *queue);
2011
2012 /**
2013 * @brief Append an element to the end of a queue.
2014 *
2015 * This routine appends a data item to @a queue. A queue data item must be
2016 * aligned on a word boundary, and the first word of the item is reserved
2017 * for the kernel's use.
2018 *
2019 * @funcprops \isr_ok
2020 *
2021 * @param queue Address of the queue.
2022 * @param data Address of the data item.
2023 */
2024 void k_queue_append(struct k_queue *queue, void *data);
2025
2026 /**
2027 * @brief Append an element to a queue.
2028 *
2029 * This routine appends a data item to @a queue. There is an implicit memory
2030 * allocation to create an additional temporary bookkeeping data structure from
2031 * the calling thread's resource pool, which is automatically freed when the
2032 * item is removed. The data itself is not copied.
2033 *
2034 * @funcprops \isr_ok
2035 *
2036 * @param queue Address of the queue.
2037 * @param data Address of the data item.
2038 *
2039 * @retval 0 on success
2040 * @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
2041 */
2042 __syscall int32_t k_queue_alloc_append(struct k_queue *queue, void *data);
2043
2044 /**
2045 * @brief Prepend an element to a queue.
2046 *
2047 * This routine prepends a data item to @a queue. A queue data item must be
2048 * aligned on a word boundary, and the first word of the item is reserved
2049 * for the kernel's use.
2050 *
2051 * @funcprops \isr_ok
2052 *
2053 * @param queue Address of the queue.
2054 * @param data Address of the data item.
2055 */
2056 void k_queue_prepend(struct k_queue *queue, void *data);
2057
2058 /**
2059 * @brief Prepend an element to a queue.
2060 *
2061 * This routine prepends a data item to @a queue. There is an implicit memory
2062 * allocation to create an additional temporary bookkeeping data structure from
2063 * the calling thread's resource pool, which is automatically freed when the
2064 * item is removed. The data itself is not copied.
2065 *
2066 * @funcprops \isr_ok
2067 *
2068 * @param queue Address of the queue.
2069 * @param data Address of the data item.
2070 *
2071 * @retval 0 on success
2072 * @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
2073 */
2074 __syscall int32_t k_queue_alloc_prepend(struct k_queue *queue, void *data);
2075
2076 /**
2077 * @brief Inserts an element to a queue.
2078 *
2079 * This routine inserts a data item to @a queue after previous item. A queue
2080 * data item must be aligned on a word boundary, and the first word of
2081 * the item is reserved for the kernel's use.
2082 *
2083 * @funcprops \isr_ok
2084 *
2085 * @param queue Address of the queue.
2086 * @param prev Address of the previous data item.
2087 * @param data Address of the data item.
2088 */
2089 void k_queue_insert(struct k_queue *queue, void *prev, void *data);
2090
2091 /**
2092 * @brief Atomically append a list of elements to a queue.
2093 *
2094 * This routine adds a list of data items to @a queue in one operation.
2095 * The data items must be in a singly-linked list, with the first word
2096 * in each data item pointing to the next data item; the list must be
2097 * NULL-terminated.
2098 *
2099 * @funcprops \isr_ok
2100 *
2101 * @param queue Address of the queue.
2102 * @param head Pointer to first node in singly-linked list.
2103 * @param tail Pointer to last node in singly-linked list.
2104 *
2105 * @retval 0 on success
2106 * @retval -EINVAL on invalid supplied data
2107 *
2108 */
2109 int k_queue_append_list(struct k_queue *queue, void *head, void *tail);
2110
2111 /**
2112 * @brief Atomically add a list of elements to a queue.
2113 *
2114 * This routine adds a list of data items to @a queue in one operation.
2115 * The data items must be in a singly-linked list implemented using a
2116 * sys_slist_t object. Upon completion, the original list is empty.
2117 *
2118 * @funcprops \isr_ok
2119 *
2120 * @param queue Address of the queue.
2121 * @param list Pointer to sys_slist_t object.
2122 *
2123 * @retval 0 on success
2124 * @retval -EINVAL on invalid data
2125 */
2126 int k_queue_merge_slist(struct k_queue *queue, sys_slist_t *list);
2127
2128 /**
2129 * @brief Get an element from a queue.
2130 *
2131 * This routine removes first data item from @a queue. The first word of the
2132 * data item is reserved for the kernel's use.
2133 *
2134 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
2135 *
2136 * @funcprops \isr_ok
2137 *
2138 * @param queue Address of the queue.
2139 * @param timeout Waiting period to obtain a data item, or one of the special
2140 * values K_NO_WAIT and K_FOREVER.
2141 *
2142 * @return Address of the data item if successful; NULL if returned
2143 * without waiting, or waiting period timed out.
2144 */
2145 __syscall void *k_queue_get(struct k_queue *queue, k_timeout_t timeout);
2146
2147 /**
2148 * @brief Remove an element from a queue.
2149 *
2150 * This routine removes data item from @a queue. The first word of the
2151 * data item is reserved for the kernel's use. Removing elements from k_queue
2152 * rely on sys_slist_find_and_remove which is not a constant time operation.
2153 *
2154 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
2155 *
2156 * @funcprops \isr_ok
2157 *
2158 * @param queue Address of the queue.
2159 * @param data Address of the data item.
2160 *
2161 * @return true if data item was removed
2162 */
2163 bool k_queue_remove(struct k_queue *queue, void *data);
2164
2165 /**
2166 * @brief Append an element to a queue only if it's not present already.
2167 *
2168 * This routine appends data item to @a queue. The first word of the data
2169 * item is reserved for the kernel's use. Appending elements to k_queue
2170 * relies on sys_slist_is_node_in_list which is not a constant time operation.
2171 *
2172 * @funcprops \isr_ok
2173 *
2174 * @param queue Address of the queue.
2175 * @param data Address of the data item.
2176 *
2177 * @return true if data item was added, false if not
2178 */
2179 bool k_queue_unique_append(struct k_queue *queue, void *data);
2180
2181 /**
2182 * @brief Query a queue to see if it has data available.
2183 *
2184 * Note that the data might be already gone by the time this function returns
2185 * if other threads are also trying to read from the queue.
2186 *
2187 * @funcprops \isr_ok
2188 *
2189 * @param queue Address of the queue.
2190 *
2191 * @return Non-zero if the queue is empty.
2192 * @return 0 if data is available.
2193 */
2194 __syscall int k_queue_is_empty(struct k_queue *queue);
2195
z_impl_k_queue_is_empty(struct k_queue * queue)2196 static inline int z_impl_k_queue_is_empty(struct k_queue *queue)
2197 {
2198 return sys_sflist_is_empty(&queue->data_q) ? 1 : 0;
2199 }
2200
2201 /**
2202 * @brief Peek element at the head of queue.
2203 *
2204 * Return element from the head of queue without removing it.
2205 *
2206 * @param queue Address of the queue.
2207 *
2208 * @return Head element, or NULL if queue is empty.
2209 */
2210 __syscall void *k_queue_peek_head(struct k_queue *queue);
2211
2212 /**
2213 * @brief Peek element at the tail of queue.
2214 *
2215 * Return element from the tail of queue without removing it.
2216 *
2217 * @param queue Address of the queue.
2218 *
2219 * @return Tail element, or NULL if queue is empty.
2220 */
2221 __syscall void *k_queue_peek_tail(struct k_queue *queue);
2222
2223 /**
2224 * @brief Statically define and initialize a queue.
2225 *
2226 * The queue can be accessed outside the module where it is defined using:
2227 *
2228 * @code extern struct k_queue <name>; @endcode
2229 *
2230 * @param name Name of the queue.
2231 */
2232 #define K_QUEUE_DEFINE(name) \
2233 STRUCT_SECTION_ITERABLE(k_queue, name) = \
2234 Z_QUEUE_INITIALIZER(name)
2235
2236 /** @} */
2237
2238 #ifdef CONFIG_USERSPACE
2239 /**
2240 * @brief futex structure
2241 *
2242 * A k_futex is a lightweight mutual exclusion primitive designed
2243 * to minimize kernel involvement. Uncontended operation relies
2244 * only on atomic access to shared memory. k_futex are tracked as
2245 * kernel objects and can live in user memory so that any access
2246 * bypasses the kernel object permission management mechanism.
2247 */
2248 struct k_futex {
2249 atomic_t val;
2250 };
2251
2252 /**
2253 * @brief futex kernel data structure
2254 *
2255 * z_futex_data are the helper data structure for k_futex to complete
2256 * futex contended operation on kernel side, structure z_futex_data
2257 * of every futex object is invisible in user mode.
2258 */
2259 struct z_futex_data {
2260 _wait_q_t wait_q;
2261 struct k_spinlock lock;
2262 };
2263
2264 #define Z_FUTEX_DATA_INITIALIZER(obj) \
2265 { \
2266 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q) \
2267 }
2268
2269 /**
2270 * @defgroup futex_apis FUTEX APIs
2271 * @ingroup kernel_apis
2272 * @{
2273 */
2274
2275 /**
2276 * @brief Pend the current thread on a futex
2277 *
2278 * Tests that the supplied futex contains the expected value, and if so,
2279 * goes to sleep until some other thread calls k_futex_wake() on it.
2280 *
2281 * @param futex Address of the futex.
2282 * @param expected Expected value of the futex, if it is different the caller
2283 * will not wait on it.
2284 * @param timeout Waiting period on the futex, or one of the special values
2285 * K_NO_WAIT or K_FOREVER.
2286 * @retval -EACCES Caller does not have read access to futex address.
2287 * @retval -EAGAIN If the futex value did not match the expected parameter.
2288 * @retval -EINVAL Futex parameter address not recognized by the kernel.
2289 * @retval -ETIMEDOUT Thread woke up due to timeout and not a futex wakeup.
2290 * @retval 0 if the caller went to sleep and was woken up. The caller
2291 * should check the futex's value on wakeup to determine if it needs
2292 * to block again.
2293 */
2294 __syscall int k_futex_wait(struct k_futex *futex, int expected,
2295 k_timeout_t timeout);
2296
2297 /**
2298 * @brief Wake one/all threads pending on a futex
2299 *
2300 * Wake up the highest priority thread pending on the supplied futex, or
2301 * wakeup all the threads pending on the supplied futex, and the behavior
2302 * depends on wake_all.
2303 *
2304 * @param futex Futex to wake up pending threads.
2305 * @param wake_all If true, wake up all pending threads; If false,
2306 * wakeup the highest priority thread.
2307 * @retval -EACCES Caller does not have access to the futex address.
2308 * @retval -EINVAL Futex parameter address not recognized by the kernel.
2309 * @retval Number of threads that were woken up.
2310 */
2311 __syscall int k_futex_wake(struct k_futex *futex, bool wake_all);
2312
2313 /** @} */
2314 #endif
2315
2316 /**
2317 * @defgroup event_apis Event APIs
2318 * @ingroup kernel_apis
2319 * @{
2320 */
2321
2322 /**
2323 * Event Structure
2324 * @ingroup event_apis
2325 */
2326
2327 struct k_event {
2328 _wait_q_t wait_q;
2329 uint32_t events;
2330 struct k_spinlock lock;
2331
2332 SYS_PORT_TRACING_TRACKING_FIELD(k_event)
2333
2334 #ifdef CONFIG_OBJ_CORE_EVENT
2335 struct k_obj_core obj_core;
2336 #endif
2337
2338 };
2339
2340 #define Z_EVENT_INITIALIZER(obj) \
2341 { \
2342 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
2343 .events = 0 \
2344 }
2345
2346 /**
2347 * @brief Initialize an event object
2348 *
2349 * This routine initializes an event object, prior to its first use.
2350 *
2351 * @param event Address of the event object.
2352 */
2353 __syscall void k_event_init(struct k_event *event);
2354
2355 /**
2356 * @brief Post one or more events to an event object
2357 *
2358 * This routine posts one or more events to an event object. All tasks waiting
2359 * on the event object @a event whose waiting conditions become met by this
2360 * posting immediately unpend.
2361 *
2362 * Posting differs from setting in that posted events are merged together with
2363 * the current set of events tracked by the event object.
2364 *
2365 * @param event Address of the event object
2366 * @param events Set of events to post to @a event
2367 *
2368 * @retval Previous value of the events in @a event
2369 */
2370 __syscall uint32_t k_event_post(struct k_event *event, uint32_t events);
2371
2372 /**
2373 * @brief Set the events in an event object
2374 *
2375 * This routine sets the events stored in event object to the specified value.
2376 * All tasks waiting on the event object @a event whose waiting conditions
2377 * become met by this immediately unpend.
2378 *
2379 * Setting differs from posting in that set events replace the current set of
2380 * events tracked by the event object.
2381 *
2382 * @param event Address of the event object
2383 * @param events Set of events to set in @a event
2384 *
2385 * @retval Previous value of the events in @a event
2386 */
2387 __syscall uint32_t k_event_set(struct k_event *event, uint32_t events);
2388
2389 /**
2390 * @brief Set or clear the events in an event object
2391 *
2392 * This routine sets the events stored in event object to the specified value.
2393 * All tasks waiting on the event object @a event whose waiting conditions
2394 * become met by this immediately unpend. Unlike @ref k_event_set, this routine
2395 * allows specific event bits to be set and cleared as determined by the mask.
2396 *
2397 * @param event Address of the event object
2398 * @param events Set of events to set/clear in @a event
2399 * @param events_mask Mask to be applied to @a events
2400 *
2401 * @retval Previous value of the events in @a events_mask
2402 */
2403 __syscall uint32_t k_event_set_masked(struct k_event *event, uint32_t events,
2404 uint32_t events_mask);
2405
2406 /**
2407 * @brief Clear the events in an event object
2408 *
2409 * This routine clears (resets) the specified events stored in an event object.
2410 *
2411 * @param event Address of the event object
2412 * @param events Set of events to clear in @a event
2413 *
2414 * @retval Previous value of the events in @a event
2415 */
2416 __syscall uint32_t k_event_clear(struct k_event *event, uint32_t events);
2417
2418 /**
2419 * @brief Wait for any of the specified events
2420 *
2421 * This routine waits on event object @a event until any of the specified
2422 * events have been delivered to the event object, or the maximum wait time
2423 * @a timeout has expired. A thread may wait on up to 32 distinctly numbered
2424 * events that are expressed as bits in a single 32-bit word.
2425 *
2426 * @note The caller must be careful when resetting if there are multiple threads
2427 * waiting for the event object @a event.
2428 *
2429 * @param event Address of the event object
2430 * @param events Set of desired events on which to wait
2431 * @param reset If true, clear the set of events tracked by the event object
2432 * before waiting. If false, do not clear the events.
2433 * @param timeout Waiting period for the desired set of events or one of the
2434 * special values K_NO_WAIT and K_FOREVER.
2435 *
2436 * @retval set of matching events upon success
2437 * @retval 0 if matching events were not received within the specified time
2438 */
2439 __syscall uint32_t k_event_wait(struct k_event *event, uint32_t events,
2440 bool reset, k_timeout_t timeout);
2441
2442 /**
2443 * @brief Wait for all of the specified events
2444 *
2445 * This routine waits on event object @a event until all of the specified
2446 * events have been delivered to the event object, or the maximum wait time
2447 * @a timeout has expired. A thread may wait on up to 32 distinctly numbered
2448 * events that are expressed as bits in a single 32-bit word.
2449 *
2450 * @note The caller must be careful when resetting if there are multiple threads
2451 * waiting for the event object @a event.
2452 *
2453 * @param event Address of the event object
2454 * @param events Set of desired events on which to wait
2455 * @param reset If true, clear the set of events tracked by the event object
2456 * before waiting. If false, do not clear the events.
2457 * @param timeout Waiting period for the desired set of events or one of the
2458 * special values K_NO_WAIT and K_FOREVER.
2459 *
2460 * @retval set of matching events upon success
2461 * @retval 0 if matching events were not received within the specified time
2462 */
2463 __syscall uint32_t k_event_wait_all(struct k_event *event, uint32_t events,
2464 bool reset, k_timeout_t timeout);
2465
2466 /**
2467 * @brief Test the events currently tracked in the event object
2468 *
2469 * @param event Address of the event object
2470 * @param events_mask Set of desired events to test
2471 *
2472 * @retval Current value of events in @a events_mask
2473 */
k_event_test(struct k_event * event,uint32_t events_mask)2474 static inline uint32_t k_event_test(struct k_event *event, uint32_t events_mask)
2475 {
2476 return k_event_wait(event, events_mask, false, K_NO_WAIT);
2477 }
2478
2479 /**
2480 * @brief Statically define and initialize an event object
2481 *
2482 * The event can be accessed outside the module where it is defined using:
2483 *
2484 * @code extern struct k_event <name>; @endcode
2485 *
2486 * @param name Name of the event object.
2487 */
2488 #define K_EVENT_DEFINE(name) \
2489 STRUCT_SECTION_ITERABLE(k_event, name) = \
2490 Z_EVENT_INITIALIZER(name);
2491
2492 /** @} */
2493
2494 struct k_fifo {
2495 struct k_queue _queue;
2496 #ifdef CONFIG_OBJ_CORE_FIFO
2497 struct k_obj_core obj_core;
2498 #endif
2499 };
2500
2501 /**
2502 * @cond INTERNAL_HIDDEN
2503 */
2504 #define Z_FIFO_INITIALIZER(obj) \
2505 { \
2506 ._queue = Z_QUEUE_INITIALIZER(obj._queue) \
2507 }
2508
2509 /**
2510 * INTERNAL_HIDDEN @endcond
2511 */
2512
2513 /**
2514 * @defgroup fifo_apis FIFO APIs
2515 * @ingroup kernel_apis
2516 * @{
2517 */
2518
2519 /**
2520 * @brief Initialize a FIFO queue.
2521 *
2522 * This routine initializes a FIFO queue, prior to its first use.
2523 *
2524 * @param fifo Address of the FIFO queue.
2525 */
2526 #define k_fifo_init(fifo) \
2527 ({ \
2528 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, init, fifo); \
2529 k_queue_init(&(fifo)->_queue); \
2530 K_OBJ_CORE_INIT(K_OBJ_CORE(fifo), _obj_type_fifo); \
2531 K_OBJ_CORE_LINK(K_OBJ_CORE(fifo)); \
2532 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, init, fifo); \
2533 })
2534
2535 /**
2536 * @brief Cancel waiting on a FIFO queue.
2537 *
2538 * This routine causes first thread pending on @a fifo, if any, to
2539 * return from k_fifo_get() call with NULL value (as if timeout
2540 * expired).
2541 *
2542 * @funcprops \isr_ok
2543 *
2544 * @param fifo Address of the FIFO queue.
2545 */
2546 #define k_fifo_cancel_wait(fifo) \
2547 ({ \
2548 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, cancel_wait, fifo); \
2549 k_queue_cancel_wait(&(fifo)->_queue); \
2550 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, cancel_wait, fifo); \
2551 })
2552
2553 /**
2554 * @brief Add an element to a FIFO queue.
2555 *
2556 * This routine adds a data item to @a fifo. A FIFO data item must be
2557 * aligned on a word boundary, and the first word of the item is reserved
2558 * for the kernel's use.
2559 *
2560 * @funcprops \isr_ok
2561 *
2562 * @param fifo Address of the FIFO.
2563 * @param data Address of the data item.
2564 */
2565 #define k_fifo_put(fifo, data) \
2566 ({ \
2567 void *_data = data; \
2568 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, put, fifo, _data); \
2569 k_queue_append(&(fifo)->_queue, _data); \
2570 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, put, fifo, _data); \
2571 })
2572
2573 /**
2574 * @brief Add an element to a FIFO queue.
2575 *
2576 * This routine adds a data item to @a fifo. There is an implicit memory
2577 * allocation to create an additional temporary bookkeeping data structure from
2578 * the calling thread's resource pool, which is automatically freed when the
2579 * item is removed. The data itself is not copied.
2580 *
2581 * @funcprops \isr_ok
2582 *
2583 * @param fifo Address of the FIFO.
2584 * @param data Address of the data item.
2585 *
2586 * @retval 0 on success
2587 * @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
2588 */
2589 #define k_fifo_alloc_put(fifo, data) \
2590 ({ \
2591 void *_data = data; \
2592 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, alloc_put, fifo, _data); \
2593 int fap_ret = k_queue_alloc_append(&(fifo)->_queue, _data); \
2594 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, alloc_put, fifo, _data, fap_ret); \
2595 fap_ret; \
2596 })
2597
2598 /**
2599 * @brief Atomically add a list of elements to a FIFO.
2600 *
2601 * This routine adds a list of data items to @a fifo in one operation.
2602 * The data items must be in a singly-linked list, with the first word of
2603 * each data item pointing to the next data item; the list must be
2604 * NULL-terminated.
2605 *
2606 * @funcprops \isr_ok
2607 *
2608 * @param fifo Address of the FIFO queue.
2609 * @param head Pointer to first node in singly-linked list.
2610 * @param tail Pointer to last node in singly-linked list.
2611 */
2612 #define k_fifo_put_list(fifo, head, tail) \
2613 ({ \
2614 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, put_list, fifo, head, tail); \
2615 k_queue_append_list(&(fifo)->_queue, head, tail); \
2616 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, put_list, fifo, head, tail); \
2617 })
2618
2619 /**
2620 * @brief Atomically add a list of elements to a FIFO queue.
2621 *
2622 * This routine adds a list of data items to @a fifo in one operation.
2623 * The data items must be in a singly-linked list implemented using a
2624 * sys_slist_t object. Upon completion, the sys_slist_t object is invalid
2625 * and must be re-initialized via sys_slist_init().
2626 *
2627 * @funcprops \isr_ok
2628 *
2629 * @param fifo Address of the FIFO queue.
2630 * @param list Pointer to sys_slist_t object.
2631 */
2632 #define k_fifo_put_slist(fifo, list) \
2633 ({ \
2634 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, put_slist, fifo, list); \
2635 k_queue_merge_slist(&(fifo)->_queue, list); \
2636 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, put_slist, fifo, list); \
2637 })
2638
2639 /**
2640 * @brief Get an element from a FIFO queue.
2641 *
2642 * This routine removes a data item from @a fifo in a "first in, first out"
2643 * manner. The first word of the data item is reserved for the kernel's use.
2644 *
2645 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
2646 *
2647 * @funcprops \isr_ok
2648 *
2649 * @param fifo Address of the FIFO queue.
2650 * @param timeout Waiting period to obtain a data item,
2651 * or one of the special values K_NO_WAIT and K_FOREVER.
2652 *
2653 * @return Address of the data item if successful; NULL if returned
2654 * without waiting, or waiting period timed out.
2655 */
2656 #define k_fifo_get(fifo, timeout) \
2657 ({ \
2658 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, get, fifo, timeout); \
2659 void *fg_ret = k_queue_get(&(fifo)->_queue, timeout); \
2660 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, get, fifo, timeout, fg_ret); \
2661 fg_ret; \
2662 })
2663
2664 /**
2665 * @brief Query a FIFO queue to see if it has data available.
2666 *
2667 * Note that the data might be already gone by the time this function returns
2668 * if other threads is also trying to read from the FIFO.
2669 *
2670 * @funcprops \isr_ok
2671 *
2672 * @param fifo Address of the FIFO queue.
2673 *
2674 * @return Non-zero if the FIFO queue is empty.
2675 * @return 0 if data is available.
2676 */
2677 #define k_fifo_is_empty(fifo) \
2678 k_queue_is_empty(&(fifo)->_queue)
2679
2680 /**
2681 * @brief Peek element at the head of a FIFO queue.
2682 *
2683 * Return element from the head of FIFO queue without removing it. A usecase
2684 * for this is if elements of the FIFO object are themselves containers. Then
2685 * on each iteration of processing, a head container will be peeked,
2686 * and some data processed out of it, and only if the container is empty,
2687 * it will be completely remove from the FIFO queue.
2688 *
2689 * @param fifo Address of the FIFO queue.
2690 *
2691 * @return Head element, or NULL if the FIFO queue is empty.
2692 */
2693 #define k_fifo_peek_head(fifo) \
2694 ({ \
2695 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, peek_head, fifo); \
2696 void *fph_ret = k_queue_peek_head(&(fifo)->_queue); \
2697 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, peek_head, fifo, fph_ret); \
2698 fph_ret; \
2699 })
2700
2701 /**
2702 * @brief Peek element at the tail of FIFO queue.
2703 *
2704 * Return element from the tail of FIFO queue (without removing it). A usecase
2705 * for this is if elements of the FIFO queue are themselves containers. Then
2706 * it may be useful to add more data to the last container in a FIFO queue.
2707 *
2708 * @param fifo Address of the FIFO queue.
2709 *
2710 * @return Tail element, or NULL if a FIFO queue is empty.
2711 */
2712 #define k_fifo_peek_tail(fifo) \
2713 ({ \
2714 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, peek_tail, fifo); \
2715 void *fpt_ret = k_queue_peek_tail(&(fifo)->_queue); \
2716 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, peek_tail, fifo, fpt_ret); \
2717 fpt_ret; \
2718 })
2719
2720 /**
2721 * @brief Statically define and initialize a FIFO queue.
2722 *
2723 * The FIFO queue can be accessed outside the module where it is defined using:
2724 *
2725 * @code extern struct k_fifo <name>; @endcode
2726 *
2727 * @param name Name of the FIFO queue.
2728 */
2729 #define K_FIFO_DEFINE(name) \
2730 STRUCT_SECTION_ITERABLE(k_fifo, name) = \
2731 Z_FIFO_INITIALIZER(name)
2732
2733 /** @} */
2734
2735 struct k_lifo {
2736 struct k_queue _queue;
2737 #ifdef CONFIG_OBJ_CORE_LIFO
2738 struct k_obj_core obj_core;
2739 #endif
2740 };
2741
2742 /**
2743 * @cond INTERNAL_HIDDEN
2744 */
2745
2746 #define Z_LIFO_INITIALIZER(obj) \
2747 { \
2748 ._queue = Z_QUEUE_INITIALIZER(obj._queue) \
2749 }
2750
2751 /**
2752 * INTERNAL_HIDDEN @endcond
2753 */
2754
2755 /**
2756 * @defgroup lifo_apis LIFO APIs
2757 * @ingroup kernel_apis
2758 * @{
2759 */
2760
2761 /**
2762 * @brief Initialize a LIFO queue.
2763 *
2764 * This routine initializes a LIFO queue object, prior to its first use.
2765 *
2766 * @param lifo Address of the LIFO queue.
2767 */
2768 #define k_lifo_init(lifo) \
2769 ({ \
2770 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, init, lifo); \
2771 k_queue_init(&(lifo)->_queue); \
2772 K_OBJ_CORE_INIT(K_OBJ_CORE(lifo), _obj_type_lifo); \
2773 K_OBJ_CORE_LINK(K_OBJ_CORE(lifo)); \
2774 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, init, lifo); \
2775 })
2776
2777 /**
2778 * @brief Add an element to a LIFO queue.
2779 *
2780 * This routine adds a data item to @a lifo. A LIFO queue data item must be
2781 * aligned on a word boundary, and the first word of the item is
2782 * reserved for the kernel's use.
2783 *
2784 * @funcprops \isr_ok
2785 *
2786 * @param lifo Address of the LIFO queue.
2787 * @param data Address of the data item.
2788 */
2789 #define k_lifo_put(lifo, data) \
2790 ({ \
2791 void *_data = data; \
2792 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, put, lifo, _data); \
2793 k_queue_prepend(&(lifo)->_queue, _data); \
2794 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, put, lifo, _data); \
2795 })
2796
2797 /**
2798 * @brief Add an element to a LIFO queue.
2799 *
2800 * This routine adds a data item to @a lifo. There is an implicit memory
2801 * allocation to create an additional temporary bookkeeping data structure from
2802 * the calling thread's resource pool, which is automatically freed when the
2803 * item is removed. The data itself is not copied.
2804 *
2805 * @funcprops \isr_ok
2806 *
2807 * @param lifo Address of the LIFO.
2808 * @param data Address of the data item.
2809 *
2810 * @retval 0 on success
2811 * @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
2812 */
2813 #define k_lifo_alloc_put(lifo, data) \
2814 ({ \
2815 void *_data = data; \
2816 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, alloc_put, lifo, _data); \
2817 int lap_ret = k_queue_alloc_prepend(&(lifo)->_queue, _data); \
2818 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, alloc_put, lifo, _data, lap_ret); \
2819 lap_ret; \
2820 })
2821
2822 /**
2823 * @brief Get an element from a LIFO queue.
2824 *
2825 * This routine removes a data item from @a LIFO in a "last in, first out"
2826 * manner. The first word of the data item is reserved for the kernel's use.
2827 *
2828 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
2829 *
2830 * @funcprops \isr_ok
2831 *
2832 * @param lifo Address of the LIFO queue.
2833 * @param timeout Waiting period to obtain a data item,
2834 * or one of the special values K_NO_WAIT and K_FOREVER.
2835 *
2836 * @return Address of the data item if successful; NULL if returned
2837 * without waiting, or waiting period timed out.
2838 */
2839 #define k_lifo_get(lifo, timeout) \
2840 ({ \
2841 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, get, lifo, timeout); \
2842 void *lg_ret = k_queue_get(&(lifo)->_queue, timeout); \
2843 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, get, lifo, timeout, lg_ret); \
2844 lg_ret; \
2845 })
2846
2847 /**
2848 * @brief Statically define and initialize a LIFO queue.
2849 *
2850 * The LIFO queue can be accessed outside the module where it is defined using:
2851 *
2852 * @code extern struct k_lifo <name>; @endcode
2853 *
2854 * @param name Name of the fifo.
2855 */
2856 #define K_LIFO_DEFINE(name) \
2857 STRUCT_SECTION_ITERABLE(k_lifo, name) = \
2858 Z_LIFO_INITIALIZER(name)
2859
2860 /** @} */
2861
2862 /**
2863 * @cond INTERNAL_HIDDEN
2864 */
2865 #define K_STACK_FLAG_ALLOC ((uint8_t)1) /* Buffer was allocated */
2866
2867 typedef uintptr_t stack_data_t;
2868
2869 struct k_stack {
2870 _wait_q_t wait_q;
2871 struct k_spinlock lock;
2872 stack_data_t *base, *next, *top;
2873
2874 uint8_t flags;
2875
2876 SYS_PORT_TRACING_TRACKING_FIELD(k_stack)
2877
2878 #ifdef CONFIG_OBJ_CORE_STACK
2879 struct k_obj_core obj_core;
2880 #endif
2881 };
2882
2883 #define Z_STACK_INITIALIZER(obj, stack_buffer, stack_num_entries) \
2884 { \
2885 .wait_q = Z_WAIT_Q_INIT(&(obj).wait_q), \
2886 .base = (stack_buffer), \
2887 .next = (stack_buffer), \
2888 .top = (stack_buffer) + (stack_num_entries), \
2889 }
2890
2891 /**
2892 * INTERNAL_HIDDEN @endcond
2893 */
2894
2895 /**
2896 * @defgroup stack_apis Stack APIs
2897 * @ingroup kernel_apis
2898 * @{
2899 */
2900
2901 /**
2902 * @brief Initialize a stack.
2903 *
2904 * This routine initializes a stack object, prior to its first use.
2905 *
2906 * @param stack Address of the stack.
2907 * @param buffer Address of array used to hold stacked values.
2908 * @param num_entries Maximum number of values that can be stacked.
2909 */
2910 void k_stack_init(struct k_stack *stack,
2911 stack_data_t *buffer, uint32_t num_entries);
2912
2913
2914 /**
2915 * @brief Initialize a stack.
2916 *
2917 * This routine initializes a stack object, prior to its first use. Internal
2918 * buffers will be allocated from the calling thread's resource pool.
2919 * This memory will be released if k_stack_cleanup() is called, or
2920 * userspace is enabled and the stack object loses all references to it.
2921 *
2922 * @param stack Address of the stack.
2923 * @param num_entries Maximum number of values that can be stacked.
2924 *
2925 * @return -ENOMEM if memory couldn't be allocated
2926 */
2927
2928 __syscall int32_t k_stack_alloc_init(struct k_stack *stack,
2929 uint32_t num_entries);
2930
2931 /**
2932 * @brief Release a stack's allocated buffer
2933 *
2934 * If a stack object was given a dynamically allocated buffer via
2935 * k_stack_alloc_init(), this will free it. This function does nothing
2936 * if the buffer wasn't dynamically allocated.
2937 *
2938 * @param stack Address of the stack.
2939 * @retval 0 on success
2940 * @retval -EAGAIN when object is still in use
2941 */
2942 int k_stack_cleanup(struct k_stack *stack);
2943
2944 /**
2945 * @brief Push an element onto a stack.
2946 *
2947 * This routine adds a stack_data_t value @a data to @a stack.
2948 *
2949 * @funcprops \isr_ok
2950 *
2951 * @param stack Address of the stack.
2952 * @param data Value to push onto the stack.
2953 *
2954 * @retval 0 on success
2955 * @retval -ENOMEM if stack is full
2956 */
2957 __syscall int k_stack_push(struct k_stack *stack, stack_data_t data);
2958
2959 /**
2960 * @brief Pop an element from a stack.
2961 *
2962 * This routine removes a stack_data_t value from @a stack in a "last in,
2963 * first out" manner and stores the value in @a data.
2964 *
2965 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
2966 *
2967 * @funcprops \isr_ok
2968 *
2969 * @param stack Address of the stack.
2970 * @param data Address of area to hold the value popped from the stack.
2971 * @param timeout Waiting period to obtain a value,
2972 * or one of the special values K_NO_WAIT and
2973 * K_FOREVER.
2974 *
2975 * @retval 0 Element popped from stack.
2976 * @retval -EBUSY Returned without waiting.
2977 * @retval -EAGAIN Waiting period timed out.
2978 */
2979 __syscall int k_stack_pop(struct k_stack *stack, stack_data_t *data,
2980 k_timeout_t timeout);
2981
2982 /**
2983 * @brief Statically define and initialize a stack
2984 *
2985 * The stack can be accessed outside the module where it is defined using:
2986 *
2987 * @code extern struct k_stack <name>; @endcode
2988 *
2989 * @param name Name of the stack.
2990 * @param stack_num_entries Maximum number of values that can be stacked.
2991 */
2992 #define K_STACK_DEFINE(name, stack_num_entries) \
2993 stack_data_t __noinit \
2994 _k_stack_buf_##name[stack_num_entries]; \
2995 STRUCT_SECTION_ITERABLE(k_stack, name) = \
2996 Z_STACK_INITIALIZER(name, _k_stack_buf_##name, \
2997 stack_num_entries)
2998
2999 /** @} */
3000
3001 /**
3002 * @cond INTERNAL_HIDDEN
3003 */
3004
3005 struct k_work;
3006 struct k_work_q;
3007 struct k_work_queue_config;
3008 extern struct k_work_q k_sys_work_q;
3009
3010 /**
3011 * INTERNAL_HIDDEN @endcond
3012 */
3013
3014 /**
3015 * @defgroup mutex_apis Mutex APIs
3016 * @ingroup kernel_apis
3017 * @{
3018 */
3019
3020 /**
3021 * Mutex Structure
3022 * @ingroup mutex_apis
3023 */
3024 struct k_mutex {
3025 /** Mutex wait queue */
3026 _wait_q_t wait_q;
3027 /** Mutex owner */
3028 struct k_thread *owner;
3029
3030 /** Current lock count */
3031 uint32_t lock_count;
3032
3033 /** Original thread priority */
3034 int owner_orig_prio;
3035
3036 SYS_PORT_TRACING_TRACKING_FIELD(k_mutex)
3037
3038 #ifdef CONFIG_OBJ_CORE_MUTEX
3039 struct k_obj_core obj_core;
3040 #endif
3041 };
3042
3043 /**
3044 * @cond INTERNAL_HIDDEN
3045 */
3046 #define Z_MUTEX_INITIALIZER(obj) \
3047 { \
3048 .wait_q = Z_WAIT_Q_INIT(&(obj).wait_q), \
3049 .owner = NULL, \
3050 .lock_count = 0, \
3051 .owner_orig_prio = K_LOWEST_APPLICATION_THREAD_PRIO, \
3052 }
3053
3054 /**
3055 * INTERNAL_HIDDEN @endcond
3056 */
3057
3058 /**
3059 * @brief Statically define and initialize a mutex.
3060 *
3061 * The mutex can be accessed outside the module where it is defined using:
3062 *
3063 * @code extern struct k_mutex <name>; @endcode
3064 *
3065 * @param name Name of the mutex.
3066 */
3067 #define K_MUTEX_DEFINE(name) \
3068 STRUCT_SECTION_ITERABLE(k_mutex, name) = \
3069 Z_MUTEX_INITIALIZER(name)
3070
3071 /**
3072 * @brief Initialize a mutex.
3073 *
3074 * This routine initializes a mutex object, prior to its first use.
3075 *
3076 * Upon completion, the mutex is available and does not have an owner.
3077 *
3078 * @param mutex Address of the mutex.
3079 *
3080 * @retval 0 Mutex object created
3081 *
3082 */
3083 __syscall int k_mutex_init(struct k_mutex *mutex);
3084
3085
3086 /**
3087 * @brief Lock a mutex.
3088 *
3089 * This routine locks @a mutex. If the mutex is locked by another thread,
3090 * the calling thread waits until the mutex becomes available or until
3091 * a timeout occurs.
3092 *
3093 * A thread is permitted to lock a mutex it has already locked. The operation
3094 * completes immediately and the lock count is increased by 1.
3095 *
3096 * Mutexes may not be locked in ISRs.
3097 *
3098 * @param mutex Address of the mutex.
3099 * @param timeout Waiting period to lock the mutex,
3100 * or one of the special values K_NO_WAIT and
3101 * K_FOREVER.
3102 *
3103 * @retval 0 Mutex locked.
3104 * @retval -EBUSY Returned without waiting.
3105 * @retval -EAGAIN Waiting period timed out.
3106 */
3107 __syscall int k_mutex_lock(struct k_mutex *mutex, k_timeout_t timeout);
3108
3109 /**
3110 * @brief Unlock a mutex.
3111 *
3112 * This routine unlocks @a mutex. The mutex must already be locked by the
3113 * calling thread.
3114 *
3115 * The mutex cannot be claimed by another thread until it has been unlocked by
3116 * the calling thread as many times as it was previously locked by that
3117 * thread.
3118 *
3119 * Mutexes may not be unlocked in ISRs, as mutexes must only be manipulated
3120 * in thread context due to ownership and priority inheritance semantics.
3121 *
3122 * @param mutex Address of the mutex.
3123 *
3124 * @retval 0 Mutex unlocked.
3125 * @retval -EPERM The current thread does not own the mutex
3126 * @retval -EINVAL The mutex is not locked
3127 *
3128 */
3129 __syscall int k_mutex_unlock(struct k_mutex *mutex);
3130
3131 /**
3132 * @}
3133 */
3134
3135
3136 struct k_condvar {
3137 _wait_q_t wait_q;
3138
3139 #ifdef CONFIG_OBJ_CORE_CONDVAR
3140 struct k_obj_core obj_core;
3141 #endif
3142 };
3143
3144 #define Z_CONDVAR_INITIALIZER(obj) \
3145 { \
3146 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
3147 }
3148
3149 /**
3150 * @defgroup condvar_apis Condition Variables APIs
3151 * @ingroup kernel_apis
3152 * @{
3153 */
3154
3155 /**
3156 * @brief Initialize a condition variable
3157 *
3158 * @param condvar pointer to a @p k_condvar structure
3159 * @retval 0 Condition variable created successfully
3160 */
3161 __syscall int k_condvar_init(struct k_condvar *condvar);
3162
3163 /**
3164 * @brief Signals one thread that is pending on the condition variable
3165 *
3166 * @param condvar pointer to a @p k_condvar structure
3167 * @retval 0 On success
3168 */
3169 __syscall int k_condvar_signal(struct k_condvar *condvar);
3170
3171 /**
3172 * @brief Unblock all threads that are pending on the condition
3173 * variable
3174 *
3175 * @param condvar pointer to a @p k_condvar structure
3176 * @return An integer with number of woken threads on success
3177 */
3178 __syscall int k_condvar_broadcast(struct k_condvar *condvar);
3179
3180 /**
3181 * @brief Waits on the condition variable releasing the mutex lock
3182 *
3183 * Atomically releases the currently owned mutex, blocks the current thread
3184 * waiting on the condition variable specified by @a condvar,
3185 * and finally acquires the mutex again.
3186 *
3187 * The waiting thread unblocks only after another thread calls
3188 * k_condvar_signal, or k_condvar_broadcast with the same condition variable.
3189 *
3190 * @param condvar pointer to a @p k_condvar structure
3191 * @param mutex Address of the mutex.
3192 * @param timeout Waiting period for the condition variable
3193 * or one of the special values K_NO_WAIT and K_FOREVER.
3194 * @retval 0 On success
3195 * @retval -EAGAIN Waiting period timed out.
3196 */
3197 __syscall int k_condvar_wait(struct k_condvar *condvar, struct k_mutex *mutex,
3198 k_timeout_t timeout);
3199
3200 /**
3201 * @brief Statically define and initialize a condition variable.
3202 *
3203 * The condition variable can be accessed outside the module where it is
3204 * defined using:
3205 *
3206 * @code extern struct k_condvar <name>; @endcode
3207 *
3208 * @param name Name of the condition variable.
3209 */
3210 #define K_CONDVAR_DEFINE(name) \
3211 STRUCT_SECTION_ITERABLE(k_condvar, name) = \
3212 Z_CONDVAR_INITIALIZER(name)
3213 /**
3214 * @}
3215 */
3216
3217 /**
3218 * @cond INTERNAL_HIDDEN
3219 */
3220
3221 struct k_sem {
3222 _wait_q_t wait_q;
3223 unsigned int count;
3224 unsigned int limit;
3225
3226 Z_DECL_POLL_EVENT
3227
3228 SYS_PORT_TRACING_TRACKING_FIELD(k_sem)
3229
3230 #ifdef CONFIG_OBJ_CORE_SEM
3231 struct k_obj_core obj_core;
3232 #endif
3233 };
3234
3235 #define Z_SEM_INITIALIZER(obj, initial_count, count_limit) \
3236 { \
3237 .wait_q = Z_WAIT_Q_INIT(&(obj).wait_q), \
3238 .count = (initial_count), \
3239 .limit = (count_limit), \
3240 Z_POLL_EVENT_OBJ_INIT(obj) \
3241 }
3242
3243 /**
3244 * INTERNAL_HIDDEN @endcond
3245 */
3246
3247 /**
3248 * @defgroup semaphore_apis Semaphore APIs
3249 * @ingroup kernel_apis
3250 * @{
3251 */
3252
3253 /**
3254 * @brief Maximum limit value allowed for a semaphore.
3255 *
3256 * This is intended for use when a semaphore does not have
3257 * an explicit maximum limit, and instead is just used for
3258 * counting purposes.
3259 *
3260 */
3261 #define K_SEM_MAX_LIMIT UINT_MAX
3262
3263 /**
3264 * @brief Initialize a semaphore.
3265 *
3266 * This routine initializes a semaphore object, prior to its first use.
3267 *
3268 * @param sem Address of the semaphore.
3269 * @param initial_count Initial semaphore count.
3270 * @param limit Maximum permitted semaphore count.
3271 *
3272 * @see K_SEM_MAX_LIMIT
3273 *
3274 * @retval 0 Semaphore created successfully
3275 * @retval -EINVAL Invalid values
3276 *
3277 */
3278 __syscall int k_sem_init(struct k_sem *sem, unsigned int initial_count,
3279 unsigned int limit);
3280
3281 /**
3282 * @brief Take a semaphore.
3283 *
3284 * This routine takes @a sem.
3285 *
3286 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
3287 *
3288 * @funcprops \isr_ok
3289 *
3290 * @param sem Address of the semaphore.
3291 * @param timeout Waiting period to take the semaphore,
3292 * or one of the special values K_NO_WAIT and K_FOREVER.
3293 *
3294 * @retval 0 Semaphore taken.
3295 * @retval -EBUSY Returned without waiting.
3296 * @retval -EAGAIN Waiting period timed out,
3297 * or the semaphore was reset during the waiting period.
3298 */
3299 __syscall int k_sem_take(struct k_sem *sem, k_timeout_t timeout);
3300
3301 /**
3302 * @brief Give a semaphore.
3303 *
3304 * This routine gives @a sem, unless the semaphore is already at its maximum
3305 * permitted count.
3306 *
3307 * @funcprops \isr_ok
3308 *
3309 * @param sem Address of the semaphore.
3310 */
3311 __syscall void k_sem_give(struct k_sem *sem);
3312
3313 /**
3314 * @brief Resets a semaphore's count to zero.
3315 *
3316 * This routine sets the count of @a sem to zero.
3317 * Any outstanding semaphore takes will be aborted
3318 * with -EAGAIN.
3319 *
3320 * @param sem Address of the semaphore.
3321 */
3322 __syscall void k_sem_reset(struct k_sem *sem);
3323
3324 /**
3325 * @brief Get a semaphore's count.
3326 *
3327 * This routine returns the current count of @a sem.
3328 *
3329 * @param sem Address of the semaphore.
3330 *
3331 * @return Current semaphore count.
3332 */
3333 __syscall unsigned int k_sem_count_get(struct k_sem *sem);
3334
3335 /**
3336 * @internal
3337 */
z_impl_k_sem_count_get(struct k_sem * sem)3338 static inline unsigned int z_impl_k_sem_count_get(struct k_sem *sem)
3339 {
3340 return sem->count;
3341 }
3342
3343 /**
3344 * @brief Statically define and initialize a semaphore.
3345 *
3346 * The semaphore can be accessed outside the module where it is defined using:
3347 *
3348 * @code extern struct k_sem <name>; @endcode
3349 *
3350 * @param name Name of the semaphore.
3351 * @param initial_count Initial semaphore count.
3352 * @param count_limit Maximum permitted semaphore count.
3353 */
3354 #define K_SEM_DEFINE(name, initial_count, count_limit) \
3355 STRUCT_SECTION_ITERABLE(k_sem, name) = \
3356 Z_SEM_INITIALIZER(name, initial_count, count_limit); \
3357 BUILD_ASSERT(((count_limit) != 0) && \
3358 (((initial_count) < (count_limit)) || ((initial_count) == (count_limit))) && \
3359 ((count_limit) <= K_SEM_MAX_LIMIT));
3360
3361 /** @} */
3362
3363 /**
3364 * @cond INTERNAL_HIDDEN
3365 */
3366
3367 struct k_work_delayable;
3368 struct k_work_sync;
3369
3370 /**
3371 * INTERNAL_HIDDEN @endcond
3372 */
3373
3374 /**
3375 * @defgroup workqueue_apis Work Queue APIs
3376 * @ingroup kernel_apis
3377 * @{
3378 */
3379
3380 /** @brief The signature for a work item handler function.
3381 *
3382 * The function will be invoked by the thread animating a work queue.
3383 *
3384 * @param work the work item that provided the handler.
3385 */
3386 typedef void (*k_work_handler_t)(struct k_work *work);
3387
3388 /** @brief Initialize a (non-delayable) work structure.
3389 *
3390 * This must be invoked before submitting a work structure for the first time.
3391 * It need not be invoked again on the same work structure. It can be
3392 * re-invoked to change the associated handler, but this must be done when the
3393 * work item is idle.
3394 *
3395 * @funcprops \isr_ok
3396 *
3397 * @param work the work structure to be initialized.
3398 *
3399 * @param handler the handler to be invoked by the work item.
3400 */
3401 void k_work_init(struct k_work *work,
3402 k_work_handler_t handler);
3403
3404 /** @brief Busy state flags from the work item.
3405 *
3406 * A zero return value indicates the work item appears to be idle.
3407 *
3408 * @note This is a live snapshot of state, which may change before the result
3409 * is checked. Use locks where appropriate.
3410 *
3411 * @funcprops \isr_ok
3412 *
3413 * @param work pointer to the work item.
3414 *
3415 * @return a mask of flags K_WORK_DELAYED, K_WORK_QUEUED,
3416 * K_WORK_RUNNING, K_WORK_CANCELING, and K_WORK_FLUSHING.
3417 */
3418 int k_work_busy_get(const struct k_work *work);
3419
3420 /** @brief Test whether a work item is currently pending.
3421 *
3422 * Wrapper to determine whether a work item is in a non-idle dstate.
3423 *
3424 * @note This is a live snapshot of state, which may change before the result
3425 * is checked. Use locks where appropriate.
3426 *
3427 * @funcprops \isr_ok
3428 *
3429 * @param work pointer to the work item.
3430 *
3431 * @return true if and only if k_work_busy_get() returns a non-zero value.
3432 */
3433 static inline bool k_work_is_pending(const struct k_work *work);
3434
3435 /** @brief Submit a work item to a queue.
3436 *
3437 * @param queue pointer to the work queue on which the item should run. If
3438 * NULL the queue from the most recent submission will be used.
3439 *
3440 * @funcprops \isr_ok
3441 *
3442 * @param work pointer to the work item.
3443 *
3444 * @retval 0 if work was already submitted to a queue
3445 * @retval 1 if work was not submitted and has been queued to @p queue
3446 * @retval 2 if work was running and has been queued to the queue that was
3447 * running it
3448 * @retval -EBUSY
3449 * * if work submission was rejected because the work item is cancelling; or
3450 * * @p queue is draining; or
3451 * * @p queue is plugged.
3452 * @retval -EINVAL if @p queue is null and the work item has never been run.
3453 * @retval -ENODEV if @p queue has not been started.
3454 */
3455 int k_work_submit_to_queue(struct k_work_q *queue,
3456 struct k_work *work);
3457
3458 /** @brief Submit a work item to the system queue.
3459 *
3460 * @funcprops \isr_ok
3461 *
3462 * @param work pointer to the work item.
3463 *
3464 * @return as with k_work_submit_to_queue().
3465 */
3466 int k_work_submit(struct k_work *work);
3467
3468 /** @brief Wait for last-submitted instance to complete.
3469 *
3470 * Resubmissions may occur while waiting, including chained submissions (from
3471 * within the handler).
3472 *
3473 * @note Be careful of caller and work queue thread relative priority. If
3474 * this function sleeps it will not return until the work queue thread
3475 * completes the tasks that allow this thread to resume.
3476 *
3477 * @note Behavior is undefined if this function is invoked on @p work from a
3478 * work queue running @p work.
3479 *
3480 * @param work pointer to the work item.
3481 *
3482 * @param sync pointer to an opaque item containing state related to the
3483 * pending cancellation. The object must persist until the call returns, and
3484 * be accessible from both the caller thread and the work queue thread. The
3485 * object must not be used for any other flush or cancel operation until this
3486 * one completes. On architectures with CONFIG_KERNEL_COHERENCE the object
3487 * must be allocated in coherent memory.
3488 *
3489 * @retval true if call had to wait for completion
3490 * @retval false if work was already idle
3491 */
3492 bool k_work_flush(struct k_work *work,
3493 struct k_work_sync *sync);
3494
3495 /** @brief Cancel a work item.
3496 *
3497 * This attempts to prevent a pending (non-delayable) work item from being
3498 * processed by removing it from the work queue. If the item is being
3499 * processed, the work item will continue to be processed, but resubmissions
3500 * are rejected until cancellation completes.
3501 *
3502 * If this returns zero cancellation is complete, otherwise something
3503 * (probably a work queue thread) is still referencing the item.
3504 *
3505 * See also k_work_cancel_sync().
3506 *
3507 * @funcprops \isr_ok
3508 *
3509 * @param work pointer to the work item.
3510 *
3511 * @return the k_work_busy_get() status indicating the state of the item after all
3512 * cancellation steps performed by this call are completed.
3513 */
3514 int k_work_cancel(struct k_work *work);
3515
3516 /** @brief Cancel a work item and wait for it to complete.
3517 *
3518 * Same as k_work_cancel() but does not return until cancellation is complete.
3519 * This can be invoked by a thread after k_work_cancel() to synchronize with a
3520 * previous cancellation.
3521 *
3522 * On return the work structure will be idle unless something submits it after
3523 * the cancellation was complete.
3524 *
3525 * @note Be careful of caller and work queue thread relative priority. If
3526 * this function sleeps it will not return until the work queue thread
3527 * completes the tasks that allow this thread to resume.
3528 *
3529 * @note Behavior is undefined if this function is invoked on @p work from a
3530 * work queue running @p work.
3531 *
3532 * @param work pointer to the work item.
3533 *
3534 * @param sync pointer to an opaque item containing state related to the
3535 * pending cancellation. The object must persist until the call returns, and
3536 * be accessible from both the caller thread and the work queue thread. The
3537 * object must not be used for any other flush or cancel operation until this
3538 * one completes. On architectures with CONFIG_KERNEL_COHERENCE the object
3539 * must be allocated in coherent memory.
3540 *
3541 * @retval true if work was pending (call had to wait for cancellation of a
3542 * running handler to complete, or scheduled or submitted operations were
3543 * cancelled);
3544 * @retval false otherwise
3545 */
3546 bool k_work_cancel_sync(struct k_work *work, struct k_work_sync *sync);
3547
3548 /** @brief Initialize a work queue structure.
3549 *
3550 * This must be invoked before starting a work queue structure for the first time.
3551 * It need not be invoked again on the same work queue structure.
3552 *
3553 * @funcprops \isr_ok
3554 *
3555 * @param queue the queue structure to be initialized.
3556 */
3557 void k_work_queue_init(struct k_work_q *queue);
3558
3559 /** @brief Initialize a work queue.
3560 *
3561 * This configures the work queue thread and starts it running. The function
3562 * should not be re-invoked on a queue.
3563 *
3564 * @param queue pointer to the queue structure. It must be initialized
3565 * in zeroed/bss memory or with @ref k_work_queue_init before
3566 * use.
3567 *
3568 * @param stack pointer to the work thread stack area.
3569 *
3570 * @param stack_size size of the work thread stack area, in bytes.
3571 *
3572 * @param prio initial thread priority
3573 *
3574 * @param cfg optional additional configuration parameters. Pass @c
3575 * NULL if not required, to use the defaults documented in
3576 * k_work_queue_config.
3577 */
3578 void k_work_queue_start(struct k_work_q *queue,
3579 k_thread_stack_t *stack, size_t stack_size,
3580 int prio, const struct k_work_queue_config *cfg);
3581
3582 /** @brief Access the thread that animates a work queue.
3583 *
3584 * This is necessary to grant a work queue thread access to things the work
3585 * items it will process are expected to use.
3586 *
3587 * @param queue pointer to the queue structure.
3588 *
3589 * @return the thread associated with the work queue.
3590 */
3591 static inline k_tid_t k_work_queue_thread_get(struct k_work_q *queue);
3592
3593 /** @brief Wait until the work queue has drained, optionally plugging it.
3594 *
3595 * This blocks submission to the work queue except when coming from queue
3596 * thread, and blocks the caller until no more work items are available in the
3597 * queue.
3598 *
3599 * If @p plug is true then submission will continue to be blocked after the
3600 * drain operation completes until k_work_queue_unplug() is invoked.
3601 *
3602 * Note that work items that are delayed are not yet associated with their
3603 * work queue. They must be cancelled externally if a goal is to ensure the
3604 * work queue remains empty. The @p plug feature can be used to prevent
3605 * delayed items from being submitted after the drain completes.
3606 *
3607 * @param queue pointer to the queue structure.
3608 *
3609 * @param plug if true the work queue will continue to block new submissions
3610 * after all items have drained.
3611 *
3612 * @retval 1 if call had to wait for the drain to complete
3613 * @retval 0 if call did not have to wait
3614 * @retval negative if wait was interrupted or failed
3615 */
3616 int k_work_queue_drain(struct k_work_q *queue, bool plug);
3617
3618 /** @brief Release a work queue to accept new submissions.
3619 *
3620 * This releases the block on new submissions placed when k_work_queue_drain()
3621 * is invoked with the @p plug option enabled. If this is invoked before the
3622 * drain completes new items may be submitted as soon as the drain completes.
3623 *
3624 * @funcprops \isr_ok
3625 *
3626 * @param queue pointer to the queue structure.
3627 *
3628 * @retval 0 if successfully unplugged
3629 * @retval -EALREADY if the work queue was not plugged.
3630 */
3631 int k_work_queue_unplug(struct k_work_q *queue);
3632
3633 /** @brief Stop a work queue.
3634 *
3635 * Stops the work queue thread and ensures that no further work will be processed.
3636 * This call is blocking and guarantees that the work queue thread has terminated
3637 * cleanly if successful, no work will be processed past this point.
3638 *
3639 * @param queue Pointer to the queue structure.
3640 * @param timeout Maximum time to wait for the work queue to stop.
3641 *
3642 * @retval 0 if the work queue was stopped
3643 * @retval -EALREADY if the work queue was not started (or already stopped)
3644 * @retval -EBUSY if the work queue is actively processing work items
3645 * @retval -ETIMEDOUT if the work queue did not stop within the stipulated timeout
3646 */
3647 int k_work_queue_stop(struct k_work_q *queue, k_timeout_t timeout);
3648
3649 /** @brief Initialize a delayable work structure.
3650 *
3651 * This must be invoked before scheduling a delayable work structure for the
3652 * first time. It need not be invoked again on the same work structure. It
3653 * can be re-invoked to change the associated handler, but this must be done
3654 * when the work item is idle.
3655 *
3656 * @funcprops \isr_ok
3657 *
3658 * @param dwork the delayable work structure to be initialized.
3659 *
3660 * @param handler the handler to be invoked by the work item.
3661 */
3662 void k_work_init_delayable(struct k_work_delayable *dwork,
3663 k_work_handler_t handler);
3664
3665 /**
3666 * @brief Get the parent delayable work structure from a work pointer.
3667 *
3668 * This function is necessary when a @c k_work_handler_t function is passed to
3669 * k_work_schedule_for_queue() and the handler needs to access data from the
3670 * container of the containing `k_work_delayable`.
3671 *
3672 * @param work Address passed to the work handler
3673 *
3674 * @return Address of the containing @c k_work_delayable structure.
3675 */
3676 static inline struct k_work_delayable *
3677 k_work_delayable_from_work(struct k_work *work);
3678
3679 /** @brief Busy state flags from the delayable work item.
3680 *
3681 * @funcprops \isr_ok
3682 *
3683 * @note This is a live snapshot of state, which may change before the result
3684 * can be inspected. Use locks where appropriate.
3685 *
3686 * @param dwork pointer to the delayable work item.
3687 *
3688 * @return a mask of flags K_WORK_DELAYED, K_WORK_QUEUED, K_WORK_RUNNING,
3689 * K_WORK_CANCELING, and K_WORK_FLUSHING. A zero return value indicates the
3690 * work item appears to be idle.
3691 */
3692 int k_work_delayable_busy_get(const struct k_work_delayable *dwork);
3693
3694 /** @brief Test whether a delayed work item is currently pending.
3695 *
3696 * Wrapper to determine whether a delayed work item is in a non-idle state.
3697 *
3698 * @note This is a live snapshot of state, which may change before the result
3699 * can be inspected. Use locks where appropriate.
3700 *
3701 * @funcprops \isr_ok
3702 *
3703 * @param dwork pointer to the delayable work item.
3704 *
3705 * @return true if and only if k_work_delayable_busy_get() returns a non-zero
3706 * value.
3707 */
3708 static inline bool k_work_delayable_is_pending(
3709 const struct k_work_delayable *dwork);
3710
3711 /** @brief Get the absolute tick count at which a scheduled delayable work
3712 * will be submitted.
3713 *
3714 * @note This is a live snapshot of state, which may change before the result
3715 * can be inspected. Use locks where appropriate.
3716 *
3717 * @funcprops \isr_ok
3718 *
3719 * @param dwork pointer to the delayable work item.
3720 *
3721 * @return the tick count when the timer that will schedule the work item will
3722 * expire, or the current tick count if the work is not scheduled.
3723 */
3724 static inline k_ticks_t k_work_delayable_expires_get(
3725 const struct k_work_delayable *dwork);
3726
3727 /** @brief Get the number of ticks until a scheduled delayable work will be
3728 * submitted.
3729 *
3730 * @note This is a live snapshot of state, which may change before the result
3731 * can be inspected. Use locks where appropriate.
3732 *
3733 * @funcprops \isr_ok
3734 *
3735 * @param dwork pointer to the delayable work item.
3736 *
3737 * @return the number of ticks until the timer that will schedule the work
3738 * item will expire, or zero if the item is not scheduled.
3739 */
3740 static inline k_ticks_t k_work_delayable_remaining_get(
3741 const struct k_work_delayable *dwork);
3742
3743 /** @brief Submit an idle work item to a queue after a delay.
3744 *
3745 * Unlike k_work_reschedule_for_queue() this is a no-op if the work item is
3746 * already scheduled or submitted, even if @p delay is @c K_NO_WAIT.
3747 *
3748 * @funcprops \isr_ok
3749 *
3750 * @param queue the queue on which the work item should be submitted after the
3751 * delay.
3752 *
3753 * @param dwork pointer to the delayable work item.
3754 *
3755 * @param delay the time to wait before submitting the work item. If @c
3756 * K_NO_WAIT and the work is not pending this is equivalent to
3757 * k_work_submit_to_queue().
3758 *
3759 * @retval 0 if work was already scheduled or submitted.
3760 * @retval 1 if work has been scheduled.
3761 * @retval 2 if @p delay is @c K_NO_WAIT and work
3762 * was running and has been queued to the queue that was running it.
3763 * @retval -EBUSY if @p delay is @c K_NO_WAIT and
3764 * k_work_submit_to_queue() fails with this code.
3765 * @retval -EINVAL if @p delay is @c K_NO_WAIT and
3766 * k_work_submit_to_queue() fails with this code.
3767 * @retval -ENODEV if @p delay is @c K_NO_WAIT and
3768 * k_work_submit_to_queue() fails with this code.
3769 */
3770 int k_work_schedule_for_queue(struct k_work_q *queue,
3771 struct k_work_delayable *dwork,
3772 k_timeout_t delay);
3773
3774 /** @brief Submit an idle work item to the system work queue after a
3775 * delay.
3776 *
3777 * This is a thin wrapper around k_work_schedule_for_queue(), with all the API
3778 * characteristics of that function.
3779 *
3780 * @param dwork pointer to the delayable work item.
3781 *
3782 * @param delay the time to wait before submitting the work item. If @c
3783 * K_NO_WAIT this is equivalent to k_work_submit_to_queue().
3784 *
3785 * @return as with k_work_schedule_for_queue().
3786 */
3787 int k_work_schedule(struct k_work_delayable *dwork,
3788 k_timeout_t delay);
3789
3790 /** @brief Reschedule a work item to a queue after a delay.
3791 *
3792 * Unlike k_work_schedule_for_queue() this function can change the deadline of
3793 * a scheduled work item, and will schedule a work item that is in any state
3794 * (e.g. is idle, submitted, or running). This function does not affect
3795 * ("unsubmit") a work item that has been submitted to a queue.
3796 *
3797 * @funcprops \isr_ok
3798 *
3799 * @param queue the queue on which the work item should be submitted after the
3800 * delay.
3801 *
3802 * @param dwork pointer to the delayable work item.
3803 *
3804 * @param delay the time to wait before submitting the work item. If @c
3805 * K_NO_WAIT this is equivalent to k_work_submit_to_queue() after canceling
3806 * any previous scheduled submission.
3807 *
3808 * @note If delay is @c K_NO_WAIT ("no delay") the return values are as with
3809 * k_work_submit_to_queue().
3810 *
3811 * @retval 0 if delay is @c K_NO_WAIT and work was already on a queue
3812 * @retval 1 if
3813 * * delay is @c K_NO_WAIT and work was not submitted but has now been queued
3814 * to @p queue; or
3815 * * delay not @c K_NO_WAIT and work has been scheduled
3816 * @retval 2 if delay is @c K_NO_WAIT and work was running and has been queued
3817 * to the queue that was running it
3818 * @retval -EBUSY if @p delay is @c K_NO_WAIT and
3819 * k_work_submit_to_queue() fails with this code.
3820 * @retval -EINVAL if @p delay is @c K_NO_WAIT and
3821 * k_work_submit_to_queue() fails with this code.
3822 * @retval -ENODEV if @p delay is @c K_NO_WAIT and
3823 * k_work_submit_to_queue() fails with this code.
3824 */
3825 int k_work_reschedule_for_queue(struct k_work_q *queue,
3826 struct k_work_delayable *dwork,
3827 k_timeout_t delay);
3828
3829 /** @brief Reschedule a work item to the system work queue after a
3830 * delay.
3831 *
3832 * This is a thin wrapper around k_work_reschedule_for_queue(), with all the
3833 * API characteristics of that function.
3834 *
3835 * @param dwork pointer to the delayable work item.
3836 *
3837 * @param delay the time to wait before submitting the work item.
3838 *
3839 * @return as with k_work_reschedule_for_queue().
3840 */
3841 int k_work_reschedule(struct k_work_delayable *dwork,
3842 k_timeout_t delay);
3843
3844 /** @brief Flush delayable work.
3845 *
3846 * If the work is scheduled, it is immediately submitted. Then the caller
3847 * blocks until the work completes, as with k_work_flush().
3848 *
3849 * @note Be careful of caller and work queue thread relative priority. If
3850 * this function sleeps it will not return until the work queue thread
3851 * completes the tasks that allow this thread to resume.
3852 *
3853 * @note Behavior is undefined if this function is invoked on @p dwork from a
3854 * work queue running @p dwork.
3855 *
3856 * @param dwork pointer to the delayable work item.
3857 *
3858 * @param sync pointer to an opaque item containing state related to the
3859 * pending cancellation. The object must persist until the call returns, and
3860 * be accessible from both the caller thread and the work queue thread. The
3861 * object must not be used for any other flush or cancel operation until this
3862 * one completes. On architectures with CONFIG_KERNEL_COHERENCE the object
3863 * must be allocated in coherent memory.
3864 *
3865 * @retval true if call had to wait for completion
3866 * @retval false if work was already idle
3867 */
3868 bool k_work_flush_delayable(struct k_work_delayable *dwork,
3869 struct k_work_sync *sync);
3870
3871 /** @brief Cancel delayable work.
3872 *
3873 * Similar to k_work_cancel() but for delayable work. If the work is
3874 * scheduled or submitted it is canceled. This function does not wait for the
3875 * cancellation to complete.
3876 *
3877 * @note The work may still be running when this returns. Use
3878 * k_work_flush_delayable() or k_work_cancel_delayable_sync() to ensure it is
3879 * not running.
3880 *
3881 * @note Canceling delayable work does not prevent rescheduling it. It does
3882 * prevent submitting it until the cancellation completes.
3883 *
3884 * @funcprops \isr_ok
3885 *
3886 * @param dwork pointer to the delayable work item.
3887 *
3888 * @return the k_work_delayable_busy_get() status indicating the state of the
3889 * item after all cancellation steps performed by this call are completed.
3890 */
3891 int k_work_cancel_delayable(struct k_work_delayable *dwork);
3892
3893 /** @brief Cancel delayable work and wait.
3894 *
3895 * Like k_work_cancel_delayable() but waits until the work becomes idle.
3896 *
3897 * @note Canceling delayable work does not prevent rescheduling it. It does
3898 * prevent submitting it until the cancellation completes.
3899 *
3900 * @note Be careful of caller and work queue thread relative priority. If
3901 * this function sleeps it will not return until the work queue thread
3902 * completes the tasks that allow this thread to resume.
3903 *
3904 * @note Behavior is undefined if this function is invoked on @p dwork from a
3905 * work queue running @p dwork.
3906 *
3907 * @param dwork pointer to the delayable work item.
3908 *
3909 * @param sync pointer to an opaque item containing state related to the
3910 * pending cancellation. The object must persist until the call returns, and
3911 * be accessible from both the caller thread and the work queue thread. The
3912 * object must not be used for any other flush or cancel operation until this
3913 * one completes. On architectures with CONFIG_KERNEL_COHERENCE the object
3914 * must be allocated in coherent memory.
3915 *
3916 * @retval true if work was not idle (call had to wait for cancellation of a
3917 * running handler to complete, or scheduled or submitted operations were
3918 * cancelled);
3919 * @retval false otherwise
3920 */
3921 bool k_work_cancel_delayable_sync(struct k_work_delayable *dwork,
3922 struct k_work_sync *sync);
3923
3924 enum {
3925 /**
3926 * @cond INTERNAL_HIDDEN
3927 */
3928
3929 /* The atomic API is used for all work and queue flags fields to
3930 * enforce sequential consistency in SMP environments.
3931 */
3932
3933 /* Bits that represent the work item states. At least nine of the
3934 * combinations are distinct valid stable states.
3935 */
3936 K_WORK_RUNNING_BIT = 0,
3937 K_WORK_CANCELING_BIT = 1,
3938 K_WORK_QUEUED_BIT = 2,
3939 K_WORK_DELAYED_BIT = 3,
3940 K_WORK_FLUSHING_BIT = 4,
3941
3942 K_WORK_MASK = BIT(K_WORK_DELAYED_BIT) | BIT(K_WORK_QUEUED_BIT)
3943 | BIT(K_WORK_RUNNING_BIT) | BIT(K_WORK_CANCELING_BIT) | BIT(K_WORK_FLUSHING_BIT),
3944
3945 /* Static work flags */
3946 K_WORK_DELAYABLE_BIT = 8,
3947 K_WORK_DELAYABLE = BIT(K_WORK_DELAYABLE_BIT),
3948
3949 /* Dynamic work queue flags */
3950 K_WORK_QUEUE_STARTED_BIT = 0,
3951 K_WORK_QUEUE_STARTED = BIT(K_WORK_QUEUE_STARTED_BIT),
3952 K_WORK_QUEUE_BUSY_BIT = 1,
3953 K_WORK_QUEUE_BUSY = BIT(K_WORK_QUEUE_BUSY_BIT),
3954 K_WORK_QUEUE_DRAIN_BIT = 2,
3955 K_WORK_QUEUE_DRAIN = BIT(K_WORK_QUEUE_DRAIN_BIT),
3956 K_WORK_QUEUE_PLUGGED_BIT = 3,
3957 K_WORK_QUEUE_PLUGGED = BIT(K_WORK_QUEUE_PLUGGED_BIT),
3958 K_WORK_QUEUE_STOP_BIT = 4,
3959 K_WORK_QUEUE_STOP = BIT(K_WORK_QUEUE_STOP_BIT),
3960
3961 /* Static work queue flags */
3962 K_WORK_QUEUE_NO_YIELD_BIT = 8,
3963 K_WORK_QUEUE_NO_YIELD = BIT(K_WORK_QUEUE_NO_YIELD_BIT),
3964
3965 /**
3966 * INTERNAL_HIDDEN @endcond
3967 */
3968 /* Transient work flags */
3969
3970 /** @brief Flag indicating a work item that is running under a work
3971 * queue thread.
3972 *
3973 * Accessed via k_work_busy_get(). May co-occur with other flags.
3974 */
3975 K_WORK_RUNNING = BIT(K_WORK_RUNNING_BIT),
3976
3977 /** @brief Flag indicating a work item that is being canceled.
3978 *
3979 * Accessed via k_work_busy_get(). May co-occur with other flags.
3980 */
3981 K_WORK_CANCELING = BIT(K_WORK_CANCELING_BIT),
3982
3983 /** @brief Flag indicating a work item that has been submitted to a
3984 * queue but has not started running.
3985 *
3986 * Accessed via k_work_busy_get(). May co-occur with other flags.
3987 */
3988 K_WORK_QUEUED = BIT(K_WORK_QUEUED_BIT),
3989
3990 /** @brief Flag indicating a delayed work item that is scheduled for
3991 * submission to a queue.
3992 *
3993 * Accessed via k_work_busy_get(). May co-occur with other flags.
3994 */
3995 K_WORK_DELAYED = BIT(K_WORK_DELAYED_BIT),
3996
3997 /** @brief Flag indicating a synced work item that is being flushed.
3998 *
3999 * Accessed via k_work_busy_get(). May co-occur with other flags.
4000 */
4001 K_WORK_FLUSHING = BIT(K_WORK_FLUSHING_BIT),
4002 };
4003
4004 /** @brief A structure used to submit work. */
4005 struct k_work {
4006 /* All fields are protected by the work module spinlock. No fields
4007 * are to be accessed except through kernel API.
4008 */
4009
4010 /* Node to link into k_work_q pending list. */
4011 sys_snode_t node;
4012
4013 /* The function to be invoked by the work queue thread. */
4014 k_work_handler_t handler;
4015
4016 /* The queue on which the work item was last submitted. */
4017 struct k_work_q *queue;
4018
4019 /* State of the work item.
4020 *
4021 * The item can be DELAYED, QUEUED, and RUNNING simultaneously.
4022 *
4023 * It can be RUNNING and CANCELING simultaneously.
4024 */
4025 uint32_t flags;
4026 };
4027
4028 #define Z_WORK_INITIALIZER(work_handler) { \
4029 .handler = (work_handler), \
4030 }
4031
4032 /** @brief A structure used to submit work after a delay. */
4033 struct k_work_delayable {
4034 /* The work item. */
4035 struct k_work work;
4036
4037 /* Timeout used to submit work after a delay. */
4038 struct _timeout timeout;
4039
4040 /* The queue to which the work should be submitted. */
4041 struct k_work_q *queue;
4042 };
4043
4044 #define Z_WORK_DELAYABLE_INITIALIZER(work_handler) { \
4045 .work = { \
4046 .handler = (work_handler), \
4047 .flags = K_WORK_DELAYABLE, \
4048 }, \
4049 }
4050
4051 /**
4052 * @brief Initialize a statically-defined delayable work item.
4053 *
4054 * This macro can be used to initialize a statically-defined delayable
4055 * work item, prior to its first use. For example,
4056 *
4057 * @code static K_WORK_DELAYABLE_DEFINE(<dwork>, <work_handler>); @endcode
4058 *
4059 * Note that if the runtime dependencies support initialization with
4060 * k_work_init_delayable() using that will eliminate the initialized
4061 * object in ROM that is produced by this macro and copied in at
4062 * system startup.
4063 *
4064 * @param work Symbol name for delayable work item object
4065 * @param work_handler Function to invoke each time work item is processed.
4066 */
4067 #define K_WORK_DELAYABLE_DEFINE(work, work_handler) \
4068 struct k_work_delayable work \
4069 = Z_WORK_DELAYABLE_INITIALIZER(work_handler)
4070
4071 /**
4072 * @cond INTERNAL_HIDDEN
4073 */
4074
4075 /* Record used to wait for work to flush.
4076 *
4077 * The work item is inserted into the queue that will process (or is
4078 * processing) the item, and will be processed as soon as the item
4079 * completes. When the flusher is processed the semaphore will be
4080 * signaled, releasing the thread waiting for the flush.
4081 */
4082 struct z_work_flusher {
4083 struct k_work work;
4084 struct k_sem sem;
4085 };
4086
4087 /* Record used to wait for work to complete a cancellation.
4088 *
4089 * The work item is inserted into a global queue of pending cancels.
4090 * When a cancelling work item goes idle any matching waiters are
4091 * removed from pending_cancels and are woken.
4092 */
4093 struct z_work_canceller {
4094 sys_snode_t node;
4095 struct k_work *work;
4096 struct k_sem sem;
4097 };
4098
4099 /**
4100 * INTERNAL_HIDDEN @endcond
4101 */
4102
4103 /** @brief A structure holding internal state for a pending synchronous
4104 * operation on a work item or queue.
4105 *
4106 * Instances of this type are provided by the caller for invocation of
4107 * k_work_flush(), k_work_cancel_sync() and sibling flush and cancel APIs. A
4108 * referenced object must persist until the call returns, and be accessible
4109 * from both the caller thread and the work queue thread.
4110 *
4111 * @note If CONFIG_KERNEL_COHERENCE is enabled the object must be allocated in
4112 * coherent memory; see arch_mem_coherent(). The stack on these architectures
4113 * is generally not coherent. be stack-allocated. Violations are detected by
4114 * runtime assertion.
4115 */
4116 struct k_work_sync {
4117 union {
4118 struct z_work_flusher flusher;
4119 struct z_work_canceller canceller;
4120 };
4121 };
4122
4123 /** @brief A structure holding optional configuration items for a work
4124 * queue.
4125 *
4126 * This structure, and values it references, are not retained by
4127 * k_work_queue_start().
4128 */
4129 struct k_work_queue_config {
4130 /** The name to be given to the work queue thread.
4131 *
4132 * If left null the thread will not have a name.
4133 */
4134 const char *name;
4135
4136 /** Control whether the work queue thread should yield between
4137 * items.
4138 *
4139 * Yielding between items helps guarantee the work queue
4140 * thread does not starve other threads, including cooperative
4141 * ones released by a work item. This is the default behavior.
4142 *
4143 * Set this to @c true to prevent the work queue thread from
4144 * yielding between items. This may be appropriate when a
4145 * sequence of items should complete without yielding
4146 * control.
4147 */
4148 bool no_yield;
4149
4150 /** Control whether the work queue thread should be marked as
4151 * essential thread.
4152 */
4153 bool essential;
4154 };
4155
4156 /** @brief A structure used to hold work until it can be processed. */
4157 struct k_work_q {
4158 /* The thread that animates the work. */
4159 struct k_thread thread;
4160
4161 /* All the following fields must be accessed only while the
4162 * work module spinlock is held.
4163 */
4164
4165 /* List of k_work items to be worked. */
4166 sys_slist_t pending;
4167
4168 /* Wait queue for idle work thread. */
4169 _wait_q_t notifyq;
4170
4171 /* Wait queue for threads waiting for the queue to drain. */
4172 _wait_q_t drainq;
4173
4174 /* Flags describing queue state. */
4175 uint32_t flags;
4176 };
4177
4178 /* Provide the implementation for inline functions declared above */
4179
k_work_is_pending(const struct k_work * work)4180 static inline bool k_work_is_pending(const struct k_work *work)
4181 {
4182 return k_work_busy_get(work) != 0;
4183 }
4184
4185 static inline struct k_work_delayable *
k_work_delayable_from_work(struct k_work * work)4186 k_work_delayable_from_work(struct k_work *work)
4187 {
4188 return CONTAINER_OF(work, struct k_work_delayable, work);
4189 }
4190
k_work_delayable_is_pending(const struct k_work_delayable * dwork)4191 static inline bool k_work_delayable_is_pending(
4192 const struct k_work_delayable *dwork)
4193 {
4194 return k_work_delayable_busy_get(dwork) != 0;
4195 }
4196
k_work_delayable_expires_get(const struct k_work_delayable * dwork)4197 static inline k_ticks_t k_work_delayable_expires_get(
4198 const struct k_work_delayable *dwork)
4199 {
4200 return z_timeout_expires(&dwork->timeout);
4201 }
4202
k_work_delayable_remaining_get(const struct k_work_delayable * dwork)4203 static inline k_ticks_t k_work_delayable_remaining_get(
4204 const struct k_work_delayable *dwork)
4205 {
4206 return z_timeout_remaining(&dwork->timeout);
4207 }
4208
k_work_queue_thread_get(struct k_work_q * queue)4209 static inline k_tid_t k_work_queue_thread_get(struct k_work_q *queue)
4210 {
4211 return &queue->thread;
4212 }
4213
4214 /** @} */
4215
4216 struct k_work_user;
4217
4218 /**
4219 * @addtogroup workqueue_apis
4220 * @{
4221 */
4222
4223 /**
4224 * @typedef k_work_user_handler_t
4225 * @brief Work item handler function type for user work queues.
4226 *
4227 * A work item's handler function is executed by a user workqueue's thread
4228 * when the work item is processed by the workqueue.
4229 *
4230 * @param work Address of the work item.
4231 */
4232 typedef void (*k_work_user_handler_t)(struct k_work_user *work);
4233
4234 /**
4235 * @cond INTERNAL_HIDDEN
4236 */
4237
4238 struct k_work_user_q {
4239 struct k_queue queue;
4240 struct k_thread thread;
4241 };
4242
4243 enum {
4244 K_WORK_USER_STATE_PENDING, /* Work item pending state */
4245 };
4246
4247 struct k_work_user {
4248 void *_reserved; /* Used by k_queue implementation. */
4249 k_work_user_handler_t handler;
4250 atomic_t flags;
4251 };
4252
4253 /**
4254 * INTERNAL_HIDDEN @endcond
4255 */
4256
4257 #if defined(__cplusplus) && ((__cplusplus - 0) < 202002L)
4258 #define Z_WORK_USER_INITIALIZER(work_handler) { NULL, work_handler, 0 }
4259 #else
4260 #define Z_WORK_USER_INITIALIZER(work_handler) \
4261 { \
4262 ._reserved = NULL, \
4263 .handler = (work_handler), \
4264 .flags = 0 \
4265 }
4266 #endif
4267
4268 /**
4269 * @brief Initialize a statically-defined user work item.
4270 *
4271 * This macro can be used to initialize a statically-defined user work
4272 * item, prior to its first use. For example,
4273 *
4274 * @code static K_WORK_USER_DEFINE(<work>, <work_handler>); @endcode
4275 *
4276 * @param work Symbol name for work item object
4277 * @param work_handler Function to invoke each time work item is processed.
4278 */
4279 #define K_WORK_USER_DEFINE(work, work_handler) \
4280 struct k_work_user work = Z_WORK_USER_INITIALIZER(work_handler)
4281
4282 /**
4283 * @brief Initialize a userspace work item.
4284 *
4285 * This routine initializes a user workqueue work item, prior to its
4286 * first use.
4287 *
4288 * @param work Address of work item.
4289 * @param handler Function to invoke each time work item is processed.
4290 */
k_work_user_init(struct k_work_user * work,k_work_user_handler_t handler)4291 static inline void k_work_user_init(struct k_work_user *work,
4292 k_work_user_handler_t handler)
4293 {
4294 *work = (struct k_work_user)Z_WORK_USER_INITIALIZER(handler);
4295 }
4296
4297 /**
4298 * @brief Check if a userspace work item is pending.
4299 *
4300 * This routine indicates if user work item @a work is pending in a workqueue's
4301 * queue.
4302 *
4303 * @note Checking if the work is pending gives no guarantee that the
4304 * work will still be pending when this information is used. It is up to
4305 * the caller to make sure that this information is used in a safe manner.
4306 *
4307 * @funcprops \isr_ok
4308 *
4309 * @param work Address of work item.
4310 *
4311 * @return true if work item is pending, or false if it is not pending.
4312 */
k_work_user_is_pending(struct k_work_user * work)4313 static inline bool k_work_user_is_pending(struct k_work_user *work)
4314 {
4315 return atomic_test_bit(&work->flags, K_WORK_USER_STATE_PENDING);
4316 }
4317
4318 /**
4319 * @brief Submit a work item to a user mode workqueue
4320 *
4321 * Submits a work item to a workqueue that runs in user mode. A temporary
4322 * memory allocation is made from the caller's resource pool which is freed
4323 * once the worker thread consumes the k_work item. The workqueue
4324 * thread must have memory access to the k_work item being submitted. The caller
4325 * must have permission granted on the work_q parameter's queue object.
4326 *
4327 * @funcprops \isr_ok
4328 *
4329 * @param work_q Address of workqueue.
4330 * @param work Address of work item.
4331 *
4332 * @retval -EBUSY if the work item was already in some workqueue
4333 * @retval -ENOMEM if no memory for thread resource pool allocation
4334 * @retval 0 Success
4335 */
k_work_user_submit_to_queue(struct k_work_user_q * work_q,struct k_work_user * work)4336 static inline int k_work_user_submit_to_queue(struct k_work_user_q *work_q,
4337 struct k_work_user *work)
4338 {
4339 int ret = -EBUSY;
4340
4341 if (!atomic_test_and_set_bit(&work->flags,
4342 K_WORK_USER_STATE_PENDING)) {
4343 ret = k_queue_alloc_append(&work_q->queue, work);
4344
4345 /* Couldn't insert into the queue. Clear the pending bit
4346 * so the work item can be submitted again
4347 */
4348 if (ret != 0) {
4349 atomic_clear_bit(&work->flags,
4350 K_WORK_USER_STATE_PENDING);
4351 }
4352 }
4353
4354 return ret;
4355 }
4356
4357 /**
4358 * @brief Start a workqueue in user mode
4359 *
4360 * This works identically to k_work_queue_start() except it is callable from
4361 * user mode, and the worker thread created will run in user mode. The caller
4362 * must have permissions granted on both the work_q parameter's thread and
4363 * queue objects, and the same restrictions on priority apply as
4364 * k_thread_create().
4365 *
4366 * @param work_q Address of workqueue.
4367 * @param stack Pointer to work queue thread's stack space, as defined by
4368 * K_THREAD_STACK_DEFINE()
4369 * @param stack_size Size of the work queue thread's stack (in bytes), which
4370 * should either be the same constant passed to
4371 * K_THREAD_STACK_DEFINE() or the value of K_THREAD_STACK_SIZEOF().
4372 * @param prio Priority of the work queue's thread.
4373 * @param name optional thread name. If not null a copy is made into the
4374 * thread's name buffer.
4375 */
4376 void k_work_user_queue_start(struct k_work_user_q *work_q,
4377 k_thread_stack_t *stack,
4378 size_t stack_size, int prio,
4379 const char *name);
4380
4381 /**
4382 * @brief Access the user mode thread that animates a work queue.
4383 *
4384 * This is necessary to grant a user mode work queue thread access to things
4385 * the work items it will process are expected to use.
4386 *
4387 * @param work_q pointer to the user mode queue structure.
4388 *
4389 * @return the user mode thread associated with the work queue.
4390 */
k_work_user_queue_thread_get(struct k_work_user_q * work_q)4391 static inline k_tid_t k_work_user_queue_thread_get(struct k_work_user_q *work_q)
4392 {
4393 return &work_q->thread;
4394 }
4395
4396 /** @} */
4397
4398 /**
4399 * @cond INTERNAL_HIDDEN
4400 */
4401
4402 struct k_work_poll {
4403 struct k_work work;
4404 struct k_work_q *workq;
4405 struct z_poller poller;
4406 struct k_poll_event *events;
4407 int num_events;
4408 k_work_handler_t real_handler;
4409 struct _timeout timeout;
4410 int poll_result;
4411 };
4412
4413 /**
4414 * INTERNAL_HIDDEN @endcond
4415 */
4416
4417 /**
4418 * @addtogroup workqueue_apis
4419 * @{
4420 */
4421
4422 /**
4423 * @brief Initialize a statically-defined work item.
4424 *
4425 * This macro can be used to initialize a statically-defined workqueue work
4426 * item, prior to its first use. For example,
4427 *
4428 * @code static K_WORK_DEFINE(<work>, <work_handler>); @endcode
4429 *
4430 * @param work Symbol name for work item object
4431 * @param work_handler Function to invoke each time work item is processed.
4432 */
4433 #define K_WORK_DEFINE(work, work_handler) \
4434 struct k_work work = Z_WORK_INITIALIZER(work_handler)
4435
4436 /**
4437 * @brief Initialize a triggered work item.
4438 *
4439 * This routine initializes a workqueue triggered work item, prior to
4440 * its first use.
4441 *
4442 * @param work Address of triggered work item.
4443 * @param handler Function to invoke each time work item is processed.
4444 */
4445 void k_work_poll_init(struct k_work_poll *work,
4446 k_work_handler_t handler);
4447
4448 /**
4449 * @brief Submit a triggered work item.
4450 *
4451 * This routine schedules work item @a work to be processed by workqueue
4452 * @a work_q when one of the given @a events is signaled. The routine
4453 * initiates internal poller for the work item and then returns to the caller.
4454 * Only when one of the watched events happen the work item is actually
4455 * submitted to the workqueue and becomes pending.
4456 *
4457 * Submitting a previously submitted triggered work item that is still
4458 * waiting for the event cancels the existing submission and reschedules it
4459 * the using the new event list. Note that this behavior is inherently subject
4460 * to race conditions with the pre-existing triggered work item and work queue,
4461 * so care must be taken to synchronize such resubmissions externally.
4462 *
4463 * @funcprops \isr_ok
4464 *
4465 * @warning
4466 * Provided array of events as well as a triggered work item must be placed
4467 * in persistent memory (valid until work handler execution or work
4468 * cancellation) and cannot be modified after submission.
4469 *
4470 * @param work_q Address of workqueue.
4471 * @param work Address of delayed work item.
4472 * @param events An array of events which trigger the work.
4473 * @param num_events The number of events in the array.
4474 * @param timeout Timeout after which the work will be scheduled
4475 * for execution even if not triggered.
4476 *
4477 *
4478 * @retval 0 Work item started watching for events.
4479 * @retval -EINVAL Work item is being processed or has completed its work.
4480 * @retval -EADDRINUSE Work item is pending on a different workqueue.
4481 */
4482 int k_work_poll_submit_to_queue(struct k_work_q *work_q,
4483 struct k_work_poll *work,
4484 struct k_poll_event *events,
4485 int num_events,
4486 k_timeout_t timeout);
4487
4488 /**
4489 * @brief Submit a triggered work item to the system workqueue.
4490 *
4491 * This routine schedules work item @a work to be processed by system
4492 * workqueue when one of the given @a events is signaled. The routine
4493 * initiates internal poller for the work item and then returns to the caller.
4494 * Only when one of the watched events happen the work item is actually
4495 * submitted to the workqueue and becomes pending.
4496 *
4497 * Submitting a previously submitted triggered work item that is still
4498 * waiting for the event cancels the existing submission and reschedules it
4499 * the using the new event list. Note that this behavior is inherently subject
4500 * to race conditions with the pre-existing triggered work item and work queue,
4501 * so care must be taken to synchronize such resubmissions externally.
4502 *
4503 * @funcprops \isr_ok
4504 *
4505 * @warning
4506 * Provided array of events as well as a triggered work item must not be
4507 * modified until the item has been processed by the workqueue.
4508 *
4509 * @param work Address of delayed work item.
4510 * @param events An array of events which trigger the work.
4511 * @param num_events The number of events in the array.
4512 * @param timeout Timeout after which the work will be scheduled
4513 * for execution even if not triggered.
4514 *
4515 * @retval 0 Work item started watching for events.
4516 * @retval -EINVAL Work item is being processed or has completed its work.
4517 * @retval -EADDRINUSE Work item is pending on a different workqueue.
4518 */
4519 int k_work_poll_submit(struct k_work_poll *work,
4520 struct k_poll_event *events,
4521 int num_events,
4522 k_timeout_t timeout);
4523
4524 /**
4525 * @brief Cancel a triggered work item.
4526 *
4527 * This routine cancels the submission of triggered work item @a work.
4528 * A triggered work item can only be canceled if no event triggered work
4529 * submission.
4530 *
4531 * @funcprops \isr_ok
4532 *
4533 * @param work Address of delayed work item.
4534 *
4535 * @retval 0 Work item canceled.
4536 * @retval -EINVAL Work item is being processed or has completed its work.
4537 */
4538 int k_work_poll_cancel(struct k_work_poll *work);
4539
4540 /** @} */
4541
4542 /**
4543 * @defgroup msgq_apis Message Queue APIs
4544 * @ingroup kernel_apis
4545 * @{
4546 */
4547
4548 /**
4549 * @brief Message Queue Structure
4550 */
4551 struct k_msgq {
4552 /** Message queue wait queue */
4553 _wait_q_t wait_q;
4554 /** Lock */
4555 struct k_spinlock lock;
4556 /** Message size */
4557 size_t msg_size;
4558 /** Maximal number of messages */
4559 uint32_t max_msgs;
4560 /** Start of message buffer */
4561 char *buffer_start;
4562 /** End of message buffer */
4563 char *buffer_end;
4564 /** Read pointer */
4565 char *read_ptr;
4566 /** Write pointer */
4567 char *write_ptr;
4568 /** Number of used messages */
4569 uint32_t used_msgs;
4570
4571 Z_DECL_POLL_EVENT
4572
4573 /** Message queue */
4574 uint8_t flags;
4575
4576 SYS_PORT_TRACING_TRACKING_FIELD(k_msgq)
4577
4578 #ifdef CONFIG_OBJ_CORE_MSGQ
4579 struct k_obj_core obj_core;
4580 #endif
4581 };
4582 /**
4583 * @cond INTERNAL_HIDDEN
4584 */
4585
4586
4587 #define Z_MSGQ_INITIALIZER(obj, q_buffer, q_msg_size, q_max_msgs) \
4588 { \
4589 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
4590 .msg_size = q_msg_size, \
4591 .max_msgs = q_max_msgs, \
4592 .buffer_start = q_buffer, \
4593 .buffer_end = q_buffer + (q_max_msgs * q_msg_size), \
4594 .read_ptr = q_buffer, \
4595 .write_ptr = q_buffer, \
4596 .used_msgs = 0, \
4597 Z_POLL_EVENT_OBJ_INIT(obj) \
4598 }
4599
4600 /**
4601 * INTERNAL_HIDDEN @endcond
4602 */
4603
4604
4605 #define K_MSGQ_FLAG_ALLOC BIT(0)
4606
4607 /**
4608 * @brief Message Queue Attributes
4609 */
4610 struct k_msgq_attrs {
4611 /** Message Size */
4612 size_t msg_size;
4613 /** Maximal number of messages */
4614 uint32_t max_msgs;
4615 /** Used messages */
4616 uint32_t used_msgs;
4617 };
4618
4619
4620 /**
4621 * @brief Statically define and initialize a message queue.
4622 *
4623 * The message queue's ring buffer contains space for @a q_max_msgs messages,
4624 * each of which is @a q_msg_size bytes long. Alignment of the message queue's
4625 * ring buffer is not necessary, setting @a q_align to 1 is sufficient.
4626 *
4627 * The message queue can be accessed outside the module where it is defined
4628 * using:
4629 *
4630 * @code extern struct k_msgq <name>; @endcode
4631 *
4632 * @param q_name Name of the message queue.
4633 * @param q_msg_size Message size (in bytes).
4634 * @param q_max_msgs Maximum number of messages that can be queued.
4635 * @param q_align Alignment of the message queue's ring buffer (power of 2).
4636 *
4637 */
4638 #define K_MSGQ_DEFINE(q_name, q_msg_size, q_max_msgs, q_align) \
4639 static char __noinit __aligned(q_align) \
4640 _k_fifo_buf_##q_name[(q_max_msgs) * (q_msg_size)]; \
4641 STRUCT_SECTION_ITERABLE(k_msgq, q_name) = \
4642 Z_MSGQ_INITIALIZER(q_name, _k_fifo_buf_##q_name, \
4643 (q_msg_size), (q_max_msgs))
4644
4645 /**
4646 * @brief Initialize a message queue.
4647 *
4648 * This routine initializes a message queue object, prior to its first use.
4649 *
4650 * The message queue's ring buffer must contain space for @a max_msgs messages,
4651 * each of which is @a msg_size bytes long. Alignment of the message queue's
4652 * ring buffer is not necessary.
4653 *
4654 * @param msgq Address of the message queue.
4655 * @param buffer Pointer to ring buffer that holds queued messages.
4656 * @param msg_size Message size (in bytes).
4657 * @param max_msgs Maximum number of messages that can be queued.
4658 */
4659 void k_msgq_init(struct k_msgq *msgq, char *buffer, size_t msg_size,
4660 uint32_t max_msgs);
4661
4662 /**
4663 * @brief Initialize a message queue.
4664 *
4665 * This routine initializes a message queue object, prior to its first use,
4666 * allocating its internal ring buffer from the calling thread's resource
4667 * pool.
4668 *
4669 * Memory allocated for the ring buffer can be released by calling
4670 * k_msgq_cleanup(), or if userspace is enabled and the msgq object loses
4671 * all of its references.
4672 *
4673 * @param msgq Address of the message queue.
4674 * @param msg_size Message size (in bytes).
4675 * @param max_msgs Maximum number of messages that can be queued.
4676 *
4677 * @return 0 on success, -ENOMEM if there was insufficient memory in the
4678 * thread's resource pool, or -EINVAL if the size parameters cause
4679 * an integer overflow.
4680 */
4681 __syscall int k_msgq_alloc_init(struct k_msgq *msgq, size_t msg_size,
4682 uint32_t max_msgs);
4683
4684 /**
4685 * @brief Release allocated buffer for a queue
4686 *
4687 * Releases memory allocated for the ring buffer.
4688 *
4689 * @param msgq message queue to cleanup
4690 *
4691 * @retval 0 on success
4692 * @retval -EBUSY Queue not empty
4693 */
4694 int k_msgq_cleanup(struct k_msgq *msgq);
4695
4696 /**
4697 * @brief Send a message to a message queue.
4698 *
4699 * This routine sends a message to message queue @a q.
4700 *
4701 * @note The message content is copied from @a data into @a msgq and the @a data
4702 * pointer is not retained, so the message content will not be modified
4703 * by this function.
4704 *
4705 * @funcprops \isr_ok
4706 *
4707 * @param msgq Address of the message queue.
4708 * @param data Pointer to the message.
4709 * @param timeout Waiting period to add the message, or one of the special
4710 * values K_NO_WAIT and K_FOREVER.
4711 *
4712 * @retval 0 Message sent.
4713 * @retval -ENOMSG Returned without waiting or queue purged.
4714 * @retval -EAGAIN Waiting period timed out.
4715 */
4716 __syscall int k_msgq_put(struct k_msgq *msgq, const void *data, k_timeout_t timeout);
4717
4718 /**
4719 * @brief Receive a message from a message queue.
4720 *
4721 * This routine receives a message from message queue @a q in a "first in,
4722 * first out" manner.
4723 *
4724 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
4725 *
4726 * @funcprops \isr_ok
4727 *
4728 * @param msgq Address of the message queue.
4729 * @param data Address of area to hold the received message.
4730 * @param timeout Waiting period to receive the message,
4731 * or one of the special values K_NO_WAIT and
4732 * K_FOREVER.
4733 *
4734 * @retval 0 Message received.
4735 * @retval -ENOMSG Returned without waiting or queue purged.
4736 * @retval -EAGAIN Waiting period timed out.
4737 */
4738 __syscall int k_msgq_get(struct k_msgq *msgq, void *data, k_timeout_t timeout);
4739
4740 /**
4741 * @brief Peek/read a message from a message queue.
4742 *
4743 * This routine reads a message from message queue @a q in a "first in,
4744 * first out" manner and leaves the message in the queue.
4745 *
4746 * @funcprops \isr_ok
4747 *
4748 * @param msgq Address of the message queue.
4749 * @param data Address of area to hold the message read from the queue.
4750 *
4751 * @retval 0 Message read.
4752 * @retval -ENOMSG Returned when the queue has no message.
4753 */
4754 __syscall int k_msgq_peek(struct k_msgq *msgq, void *data);
4755
4756 /**
4757 * @brief Peek/read a message from a message queue at the specified index
4758 *
4759 * This routine reads a message from message queue at the specified index
4760 * and leaves the message in the queue.
4761 * k_msgq_peek_at(msgq, data, 0) is equivalent to k_msgq_peek(msgq, data)
4762 *
4763 * @funcprops \isr_ok
4764 *
4765 * @param msgq Address of the message queue.
4766 * @param data Address of area to hold the message read from the queue.
4767 * @param idx Message queue index at which to peek
4768 *
4769 * @retval 0 Message read.
4770 * @retval -ENOMSG Returned when the queue has no message at index.
4771 */
4772 __syscall int k_msgq_peek_at(struct k_msgq *msgq, void *data, uint32_t idx);
4773
4774 /**
4775 * @brief Purge a message queue.
4776 *
4777 * This routine discards all unreceived messages in a message queue's ring
4778 * buffer. Any threads that are blocked waiting to send a message to the
4779 * message queue are unblocked and see an -ENOMSG error code.
4780 *
4781 * @param msgq Address of the message queue.
4782 */
4783 __syscall void k_msgq_purge(struct k_msgq *msgq);
4784
4785 /**
4786 * @brief Get the amount of free space in a message queue.
4787 *
4788 * This routine returns the number of unused entries in a message queue's
4789 * ring buffer.
4790 *
4791 * @param msgq Address of the message queue.
4792 *
4793 * @return Number of unused ring buffer entries.
4794 */
4795 __syscall uint32_t k_msgq_num_free_get(struct k_msgq *msgq);
4796
4797 /**
4798 * @brief Get basic attributes of a message queue.
4799 *
4800 * This routine fetches basic attributes of message queue into attr argument.
4801 *
4802 * @param msgq Address of the message queue.
4803 * @param attrs pointer to message queue attribute structure.
4804 */
4805 __syscall void k_msgq_get_attrs(struct k_msgq *msgq,
4806 struct k_msgq_attrs *attrs);
4807
4808
z_impl_k_msgq_num_free_get(struct k_msgq * msgq)4809 static inline uint32_t z_impl_k_msgq_num_free_get(struct k_msgq *msgq)
4810 {
4811 return msgq->max_msgs - msgq->used_msgs;
4812 }
4813
4814 /**
4815 * @brief Get the number of messages in a message queue.
4816 *
4817 * This routine returns the number of messages in a message queue's ring buffer.
4818 *
4819 * @param msgq Address of the message queue.
4820 *
4821 * @return Number of messages.
4822 */
4823 __syscall uint32_t k_msgq_num_used_get(struct k_msgq *msgq);
4824
z_impl_k_msgq_num_used_get(struct k_msgq * msgq)4825 static inline uint32_t z_impl_k_msgq_num_used_get(struct k_msgq *msgq)
4826 {
4827 return msgq->used_msgs;
4828 }
4829
4830 /** @} */
4831
4832 /**
4833 * @defgroup mailbox_apis Mailbox APIs
4834 * @ingroup kernel_apis
4835 * @{
4836 */
4837
4838 /**
4839 * @brief Mailbox Message Structure
4840 *
4841 */
4842 struct k_mbox_msg {
4843 /** size of message (in bytes) */
4844 size_t size;
4845 /** application-defined information value */
4846 uint32_t info;
4847 /** sender's message data buffer */
4848 void *tx_data;
4849 /** source thread id */
4850 k_tid_t rx_source_thread;
4851 /** target thread id */
4852 k_tid_t tx_target_thread;
4853 /** internal use only - thread waiting on send (may be a dummy) */
4854 k_tid_t _syncing_thread;
4855 #if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
4856 /** internal use only - semaphore used during asynchronous send */
4857 struct k_sem *_async_sem;
4858 #endif
4859 };
4860 /**
4861 * @brief Mailbox Structure
4862 *
4863 */
4864 struct k_mbox {
4865 /** Transmit messages queue */
4866 _wait_q_t tx_msg_queue;
4867 /** Receive message queue */
4868 _wait_q_t rx_msg_queue;
4869 struct k_spinlock lock;
4870
4871 SYS_PORT_TRACING_TRACKING_FIELD(k_mbox)
4872
4873 #ifdef CONFIG_OBJ_CORE_MAILBOX
4874 struct k_obj_core obj_core;
4875 #endif
4876 };
4877 /**
4878 * @cond INTERNAL_HIDDEN
4879 */
4880
4881 #define Z_MBOX_INITIALIZER(obj) \
4882 { \
4883 .tx_msg_queue = Z_WAIT_Q_INIT(&obj.tx_msg_queue), \
4884 .rx_msg_queue = Z_WAIT_Q_INIT(&obj.rx_msg_queue), \
4885 }
4886
4887 /**
4888 * INTERNAL_HIDDEN @endcond
4889 */
4890
4891 /**
4892 * @brief Statically define and initialize a mailbox.
4893 *
4894 * The mailbox is to be accessed outside the module where it is defined using:
4895 *
4896 * @code extern struct k_mbox <name>; @endcode
4897 *
4898 * @param name Name of the mailbox.
4899 */
4900 #define K_MBOX_DEFINE(name) \
4901 STRUCT_SECTION_ITERABLE(k_mbox, name) = \
4902 Z_MBOX_INITIALIZER(name) \
4903
4904 /**
4905 * @brief Initialize a mailbox.
4906 *
4907 * This routine initializes a mailbox object, prior to its first use.
4908 *
4909 * @param mbox Address of the mailbox.
4910 */
4911 void k_mbox_init(struct k_mbox *mbox);
4912
4913 /**
4914 * @brief Send a mailbox message in a synchronous manner.
4915 *
4916 * This routine sends a message to @a mbox and waits for a receiver to both
4917 * receive and process it. The message data may be in a buffer or non-existent
4918 * (i.e. an empty message).
4919 *
4920 * @param mbox Address of the mailbox.
4921 * @param tx_msg Address of the transmit message descriptor.
4922 * @param timeout Waiting period for the message to be received,
4923 * or one of the special values K_NO_WAIT
4924 * and K_FOREVER. Once the message has been received,
4925 * this routine waits as long as necessary for the message
4926 * to be completely processed.
4927 *
4928 * @retval 0 Message sent.
4929 * @retval -ENOMSG Returned without waiting.
4930 * @retval -EAGAIN Waiting period timed out.
4931 */
4932 int k_mbox_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
4933 k_timeout_t timeout);
4934
4935 /**
4936 * @brief Send a mailbox message in an asynchronous manner.
4937 *
4938 * This routine sends a message to @a mbox without waiting for a receiver
4939 * to process it. The message data may be in a buffer or non-existent
4940 * (i.e. an empty message). Optionally, the semaphore @a sem will be given
4941 * when the message has been both received and completely processed by
4942 * the receiver.
4943 *
4944 * @param mbox Address of the mailbox.
4945 * @param tx_msg Address of the transmit message descriptor.
4946 * @param sem Address of a semaphore, or NULL if none is needed.
4947 */
4948 void k_mbox_async_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
4949 struct k_sem *sem);
4950
4951 /**
4952 * @brief Receive a mailbox message.
4953 *
4954 * This routine receives a message from @a mbox, then optionally retrieves
4955 * its data and disposes of the message.
4956 *
4957 * @param mbox Address of the mailbox.
4958 * @param rx_msg Address of the receive message descriptor.
4959 * @param buffer Address of the buffer to receive data, or NULL to defer data
4960 * retrieval and message disposal until later.
4961 * @param timeout Waiting period for a message to be received,
4962 * or one of the special values K_NO_WAIT and K_FOREVER.
4963 *
4964 * @retval 0 Message received.
4965 * @retval -ENOMSG Returned without waiting.
4966 * @retval -EAGAIN Waiting period timed out.
4967 */
4968 int k_mbox_get(struct k_mbox *mbox, struct k_mbox_msg *rx_msg,
4969 void *buffer, k_timeout_t timeout);
4970
4971 /**
4972 * @brief Retrieve mailbox message data into a buffer.
4973 *
4974 * This routine completes the processing of a received message by retrieving
4975 * its data into a buffer, then disposing of the message.
4976 *
4977 * Alternatively, this routine can be used to dispose of a received message
4978 * without retrieving its data.
4979 *
4980 * @param rx_msg Address of the receive message descriptor.
4981 * @param buffer Address of the buffer to receive data, or NULL to discard
4982 * the data.
4983 */
4984 void k_mbox_data_get(struct k_mbox_msg *rx_msg, void *buffer);
4985
4986 /** @} */
4987
4988 /**
4989 * @defgroup pipe_apis Pipe APIs
4990 * @ingroup kernel_apis
4991 * @{
4992 */
4993
4994 /** Pipe Structure */
4995 struct k_pipe {
4996 unsigned char *buffer; /**< Pipe buffer: may be NULL */
4997 size_t size; /**< Buffer size */
4998 size_t bytes_used; /**< Number of bytes used in buffer */
4999 size_t read_index; /**< Where in buffer to read from */
5000 size_t write_index; /**< Where in buffer to write */
5001 struct k_spinlock lock; /**< Synchronization lock */
5002
5003 struct {
5004 _wait_q_t readers; /**< Reader wait queue */
5005 _wait_q_t writers; /**< Writer wait queue */
5006 } wait_q; /** Wait queue */
5007
5008 Z_DECL_POLL_EVENT
5009
5010 uint8_t flags; /**< Flags */
5011
5012 SYS_PORT_TRACING_TRACKING_FIELD(k_pipe)
5013
5014 #ifdef CONFIG_OBJ_CORE_PIPE
5015 struct k_obj_core obj_core;
5016 #endif
5017 };
5018
5019 /**
5020 * @cond INTERNAL_HIDDEN
5021 */
5022 #define K_PIPE_FLAG_ALLOC BIT(0) /** Buffer was allocated */
5023
5024 #define Z_PIPE_INITIALIZER(obj, pipe_buffer, pipe_buffer_size) \
5025 { \
5026 .buffer = pipe_buffer, \
5027 .size = pipe_buffer_size, \
5028 .bytes_used = 0, \
5029 .read_index = 0, \
5030 .write_index = 0, \
5031 .lock = {}, \
5032 .wait_q = { \
5033 .readers = Z_WAIT_Q_INIT(&obj.wait_q.readers), \
5034 .writers = Z_WAIT_Q_INIT(&obj.wait_q.writers) \
5035 }, \
5036 Z_POLL_EVENT_OBJ_INIT(obj) \
5037 .flags = 0, \
5038 }
5039
5040 /**
5041 * INTERNAL_HIDDEN @endcond
5042 */
5043
5044 /**
5045 * @brief Statically define and initialize a pipe.
5046 *
5047 * The pipe can be accessed outside the module where it is defined using:
5048 *
5049 * @code extern struct k_pipe <name>; @endcode
5050 *
5051 * @param name Name of the pipe.
5052 * @param pipe_buffer_size Size of the pipe's ring buffer (in bytes),
5053 * or zero if no ring buffer is used.
5054 * @param pipe_align Alignment of the pipe's ring buffer (power of 2).
5055 *
5056 */
5057 #define K_PIPE_DEFINE(name, pipe_buffer_size, pipe_align) \
5058 static unsigned char __noinit __aligned(pipe_align) \
5059 _k_pipe_buf_##name[pipe_buffer_size]; \
5060 STRUCT_SECTION_ITERABLE(k_pipe, name) = \
5061 Z_PIPE_INITIALIZER(name, _k_pipe_buf_##name, pipe_buffer_size)
5062
5063 /**
5064 * @brief Initialize a pipe.
5065 *
5066 * This routine initializes a pipe object, prior to its first use.
5067 *
5068 * @param pipe Address of the pipe.
5069 * @param buffer Address of the pipe's ring buffer, or NULL if no ring buffer
5070 * is used.
5071 * @param size Size of the pipe's ring buffer (in bytes), or zero if no ring
5072 * buffer is used.
5073 */
5074 void k_pipe_init(struct k_pipe *pipe, unsigned char *buffer, size_t size);
5075
5076 /**
5077 * @brief Release a pipe's allocated buffer
5078 *
5079 * If a pipe object was given a dynamically allocated buffer via
5080 * k_pipe_alloc_init(), this will free it. This function does nothing
5081 * if the buffer wasn't dynamically allocated.
5082 *
5083 * @param pipe Address of the pipe.
5084 * @retval 0 on success
5085 * @retval -EAGAIN nothing to cleanup
5086 */
5087 int k_pipe_cleanup(struct k_pipe *pipe);
5088
5089 /**
5090 * @brief Initialize a pipe and allocate a buffer for it
5091 *
5092 * Storage for the buffer region will be allocated from the calling thread's
5093 * resource pool. This memory will be released if k_pipe_cleanup() is called,
5094 * or userspace is enabled and the pipe object loses all references to it.
5095 *
5096 * This function should only be called on uninitialized pipe objects.
5097 *
5098 * @param pipe Address of the pipe.
5099 * @param size Size of the pipe's ring buffer (in bytes), or zero if no ring
5100 * buffer is used.
5101 * @retval 0 on success
5102 * @retval -ENOMEM if memory couldn't be allocated
5103 */
5104 __syscall int k_pipe_alloc_init(struct k_pipe *pipe, size_t size);
5105
5106 /**
5107 * @brief Write data to a pipe.
5108 *
5109 * This routine writes up to @a bytes_to_write bytes of data to @a pipe.
5110 *
5111 * @param pipe Address of the pipe.
5112 * @param data Address of data to write.
5113 * @param bytes_to_write Size of data (in bytes).
5114 * @param bytes_written Address of area to hold the number of bytes written.
5115 * @param min_xfer Minimum number of bytes to write.
5116 * @param timeout Waiting period to wait for the data to be written,
5117 * or one of the special values K_NO_WAIT and K_FOREVER.
5118 *
5119 * @retval 0 At least @a min_xfer bytes of data were written.
5120 * @retval -EIO Returned without waiting; zero data bytes were written.
5121 * @retval -EAGAIN Waiting period timed out; between zero and @a min_xfer
5122 * minus one data bytes were written.
5123 */
5124 __syscall int k_pipe_put(struct k_pipe *pipe, const void *data,
5125 size_t bytes_to_write, size_t *bytes_written,
5126 size_t min_xfer, k_timeout_t timeout);
5127
5128 /**
5129 * @brief Read data from a pipe.
5130 *
5131 * This routine reads up to @a bytes_to_read bytes of data from @a pipe.
5132 *
5133 * @param pipe Address of the pipe.
5134 * @param data Address to place the data read from pipe.
5135 * @param bytes_to_read Maximum number of data bytes to read.
5136 * @param bytes_read Address of area to hold the number of bytes read.
5137 * @param min_xfer Minimum number of data bytes to read.
5138 * @param timeout Waiting period to wait for the data to be read,
5139 * or one of the special values K_NO_WAIT and K_FOREVER.
5140 *
5141 * @retval 0 At least @a min_xfer bytes of data were read.
5142 * @retval -EINVAL invalid parameters supplied
5143 * @retval -EIO Returned without waiting; zero data bytes were read.
5144 * @retval -EAGAIN Waiting period timed out; between zero and @a min_xfer
5145 * minus one data bytes were read.
5146 */
5147 __syscall int k_pipe_get(struct k_pipe *pipe, void *data,
5148 size_t bytes_to_read, size_t *bytes_read,
5149 size_t min_xfer, k_timeout_t timeout);
5150
5151 /**
5152 * @brief Query the number of bytes that may be read from @a pipe.
5153 *
5154 * @param pipe Address of the pipe.
5155 *
5156 * @retval a number n such that 0 <= n <= @ref k_pipe.size; the
5157 * result is zero for unbuffered pipes.
5158 */
5159 __syscall size_t k_pipe_read_avail(struct k_pipe *pipe);
5160
5161 /**
5162 * @brief Query the number of bytes that may be written to @a pipe
5163 *
5164 * @param pipe Address of the pipe.
5165 *
5166 * @retval a number n such that 0 <= n <= @ref k_pipe.size; the
5167 * result is zero for unbuffered pipes.
5168 */
5169 __syscall size_t k_pipe_write_avail(struct k_pipe *pipe);
5170
5171 /**
5172 * @brief Flush the pipe of write data
5173 *
5174 * This routine flushes the pipe. Flushing the pipe is equivalent to reading
5175 * both all the data in the pipe's buffer and all the data waiting to go into
5176 * that pipe into a large temporary buffer and discarding the buffer. Any
5177 * writers that were previously pended become unpended.
5178 *
5179 * @param pipe Address of the pipe.
5180 */
5181 __syscall void k_pipe_flush(struct k_pipe *pipe);
5182
5183 /**
5184 * @brief Flush the pipe's internal buffer
5185 *
5186 * This routine flushes the pipe's internal buffer. This is equivalent to
5187 * reading up to N bytes from the pipe (where N is the size of the pipe's
5188 * buffer) into a temporary buffer and then discarding that buffer. If there
5189 * were writers previously pending, then some may unpend as they try to fill
5190 * up the pipe's emptied buffer.
5191 *
5192 * @param pipe Address of the pipe.
5193 */
5194 __syscall void k_pipe_buffer_flush(struct k_pipe *pipe);
5195
5196 /** @} */
5197
5198 /**
5199 * @cond INTERNAL_HIDDEN
5200 */
5201
5202 struct k_mem_slab_info {
5203 uint32_t num_blocks;
5204 size_t block_size;
5205 uint32_t num_used;
5206 #ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION
5207 uint32_t max_used;
5208 #endif
5209 };
5210
5211 struct k_mem_slab {
5212 _wait_q_t wait_q;
5213 struct k_spinlock lock;
5214 char *buffer;
5215 char *free_list;
5216 struct k_mem_slab_info info;
5217
5218 SYS_PORT_TRACING_TRACKING_FIELD(k_mem_slab)
5219
5220 #ifdef CONFIG_OBJ_CORE_MEM_SLAB
5221 struct k_obj_core obj_core;
5222 #endif
5223 };
5224
5225 #define Z_MEM_SLAB_INITIALIZER(_slab, _slab_buffer, _slab_block_size, \
5226 _slab_num_blocks) \
5227 { \
5228 .wait_q = Z_WAIT_Q_INIT(&(_slab).wait_q), \
5229 .lock = {}, \
5230 .buffer = _slab_buffer, \
5231 .free_list = NULL, \
5232 .info = {_slab_num_blocks, _slab_block_size, 0} \
5233 }
5234
5235
5236 /**
5237 * INTERNAL_HIDDEN @endcond
5238 */
5239
5240 /**
5241 * @defgroup mem_slab_apis Memory Slab APIs
5242 * @ingroup kernel_apis
5243 * @{
5244 */
5245
5246 /**
5247 * @brief Statically define and initialize a memory slab in a public (non-static) scope.
5248 *
5249 * The memory slab's buffer contains @a slab_num_blocks memory blocks
5250 * that are @a slab_block_size bytes long. The buffer is aligned to a
5251 * @a slab_align -byte boundary. To ensure that each memory block is similarly
5252 * aligned to this boundary, @a slab_block_size must also be a multiple of
5253 * @a slab_align.
5254 *
5255 * The memory slab can be accessed outside the module where it is defined
5256 * using:
5257 *
5258 * @code extern struct k_mem_slab <name>; @endcode
5259 *
5260 * @note This macro cannot be used together with a static keyword.
5261 * If such a use-case is desired, use @ref K_MEM_SLAB_DEFINE_STATIC
5262 * instead.
5263 *
5264 * @param name Name of the memory slab.
5265 * @param slab_block_size Size of each memory block (in bytes).
5266 * @param slab_num_blocks Number memory blocks.
5267 * @param slab_align Alignment of the memory slab's buffer (power of 2).
5268 */
5269 #define K_MEM_SLAB_DEFINE(name, slab_block_size, slab_num_blocks, slab_align) \
5270 char __noinit_named(k_mem_slab_buf_##name) \
5271 __aligned(WB_UP(slab_align)) \
5272 _k_mem_slab_buf_##name[(slab_num_blocks) * WB_UP(slab_block_size)]; \
5273 STRUCT_SECTION_ITERABLE(k_mem_slab, name) = \
5274 Z_MEM_SLAB_INITIALIZER(name, _k_mem_slab_buf_##name, \
5275 WB_UP(slab_block_size), slab_num_blocks)
5276
5277 /**
5278 * @brief Statically define and initialize a memory slab in a private (static) scope.
5279 *
5280 * The memory slab's buffer contains @a slab_num_blocks memory blocks
5281 * that are @a slab_block_size bytes long. The buffer is aligned to a
5282 * @a slab_align -byte boundary. To ensure that each memory block is similarly
5283 * aligned to this boundary, @a slab_block_size must also be a multiple of
5284 * @a slab_align.
5285 *
5286 * @param name Name of the memory slab.
5287 * @param slab_block_size Size of each memory block (in bytes).
5288 * @param slab_num_blocks Number memory blocks.
5289 * @param slab_align Alignment of the memory slab's buffer (power of 2).
5290 */
5291 #define K_MEM_SLAB_DEFINE_STATIC(name, slab_block_size, slab_num_blocks, slab_align) \
5292 static char __noinit_named(k_mem_slab_buf_##name) \
5293 __aligned(WB_UP(slab_align)) \
5294 _k_mem_slab_buf_##name[(slab_num_blocks) * WB_UP(slab_block_size)]; \
5295 static STRUCT_SECTION_ITERABLE(k_mem_slab, name) = \
5296 Z_MEM_SLAB_INITIALIZER(name, _k_mem_slab_buf_##name, \
5297 WB_UP(slab_block_size), slab_num_blocks)
5298
5299 /**
5300 * @brief Initialize a memory slab.
5301 *
5302 * Initializes a memory slab, prior to its first use.
5303 *
5304 * The memory slab's buffer contains @a slab_num_blocks memory blocks
5305 * that are @a slab_block_size bytes long. The buffer must be aligned to an
5306 * N-byte boundary matching a word boundary, where N is a power of 2
5307 * (i.e. 4 on 32-bit systems, 8, 16, ...).
5308 * To ensure that each memory block is similarly aligned to this boundary,
5309 * @a slab_block_size must also be a multiple of N.
5310 *
5311 * @param slab Address of the memory slab.
5312 * @param buffer Pointer to buffer used for the memory blocks.
5313 * @param block_size Size of each memory block (in bytes).
5314 * @param num_blocks Number of memory blocks.
5315 *
5316 * @retval 0 on success
5317 * @retval -EINVAL invalid data supplied
5318 *
5319 */
5320 int k_mem_slab_init(struct k_mem_slab *slab, void *buffer,
5321 size_t block_size, uint32_t num_blocks);
5322
5323 /**
5324 * @brief Allocate memory from a memory slab.
5325 *
5326 * This routine allocates a memory block from a memory slab.
5327 *
5328 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
5329 * @note When CONFIG_MULTITHREADING=n any @a timeout is treated as K_NO_WAIT.
5330 *
5331 * @funcprops \isr_ok
5332 *
5333 * @param slab Address of the memory slab.
5334 * @param mem Pointer to block address area.
5335 * @param timeout Waiting period to wait for operation to complete.
5336 * Use K_NO_WAIT to return without waiting,
5337 * or K_FOREVER to wait as long as necessary.
5338 *
5339 * @retval 0 Memory allocated. The block address area pointed at by @a mem
5340 * is set to the starting address of the memory block.
5341 * @retval -ENOMEM Returned without waiting.
5342 * @retval -EAGAIN Waiting period timed out.
5343 * @retval -EINVAL Invalid data supplied
5344 */
5345 int k_mem_slab_alloc(struct k_mem_slab *slab, void **mem,
5346 k_timeout_t timeout);
5347
5348 /**
5349 * @brief Free memory allocated from a memory slab.
5350 *
5351 * This routine releases a previously allocated memory block back to its
5352 * associated memory slab.
5353 *
5354 * @param slab Address of the memory slab.
5355 * @param mem Pointer to the memory block (as returned by k_mem_slab_alloc()).
5356 */
5357 void k_mem_slab_free(struct k_mem_slab *slab, void *mem);
5358
5359 /**
5360 * @brief Get the number of used blocks in a memory slab.
5361 *
5362 * This routine gets the number of memory blocks that are currently
5363 * allocated in @a slab.
5364 *
5365 * @param slab Address of the memory slab.
5366 *
5367 * @return Number of allocated memory blocks.
5368 */
k_mem_slab_num_used_get(struct k_mem_slab * slab)5369 static inline uint32_t k_mem_slab_num_used_get(struct k_mem_slab *slab)
5370 {
5371 return slab->info.num_used;
5372 }
5373
5374 /**
5375 * @brief Get the number of maximum used blocks so far in a memory slab.
5376 *
5377 * This routine gets the maximum number of memory blocks that were
5378 * allocated in @a slab.
5379 *
5380 * @param slab Address of the memory slab.
5381 *
5382 * @return Maximum number of allocated memory blocks.
5383 */
k_mem_slab_max_used_get(struct k_mem_slab * slab)5384 static inline uint32_t k_mem_slab_max_used_get(struct k_mem_slab *slab)
5385 {
5386 #ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION
5387 return slab->info.max_used;
5388 #else
5389 ARG_UNUSED(slab);
5390 return 0;
5391 #endif
5392 }
5393
5394 /**
5395 * @brief Get the number of unused blocks in a memory slab.
5396 *
5397 * This routine gets the number of memory blocks that are currently
5398 * unallocated in @a slab.
5399 *
5400 * @param slab Address of the memory slab.
5401 *
5402 * @return Number of unallocated memory blocks.
5403 */
k_mem_slab_num_free_get(struct k_mem_slab * slab)5404 static inline uint32_t k_mem_slab_num_free_get(struct k_mem_slab *slab)
5405 {
5406 return slab->info.num_blocks - slab->info.num_used;
5407 }
5408
5409 /**
5410 * @brief Get the memory stats for a memory slab
5411 *
5412 * This routine gets the runtime memory usage stats for the slab @a slab.
5413 *
5414 * @param slab Address of the memory slab
5415 * @param stats Pointer to memory into which to copy memory usage statistics
5416 *
5417 * @retval 0 Success
5418 * @retval -EINVAL Any parameter points to NULL
5419 */
5420
5421 int k_mem_slab_runtime_stats_get(struct k_mem_slab *slab, struct sys_memory_stats *stats);
5422
5423 /**
5424 * @brief Reset the maximum memory usage for a slab
5425 *
5426 * This routine resets the maximum memory usage for the slab @a slab to its
5427 * current usage.
5428 *
5429 * @param slab Address of the memory slab
5430 *
5431 * @retval 0 Success
5432 * @retval -EINVAL Memory slab is NULL
5433 */
5434 int k_mem_slab_runtime_stats_reset_max(struct k_mem_slab *slab);
5435
5436 /** @} */
5437
5438 /**
5439 * @addtogroup heap_apis
5440 * @{
5441 */
5442
5443 /* kernel synchronized heap struct */
5444
5445 struct k_heap {
5446 struct sys_heap heap;
5447 _wait_q_t wait_q;
5448 struct k_spinlock lock;
5449 };
5450
5451 /**
5452 * @brief Initialize a k_heap
5453 *
5454 * This constructs a synchronized k_heap object over a memory region
5455 * specified by the user. Note that while any alignment and size can
5456 * be passed as valid parameters, internal alignment restrictions
5457 * inside the inner sys_heap mean that not all bytes may be usable as
5458 * allocated memory.
5459 *
5460 * @param h Heap struct to initialize
5461 * @param mem Pointer to memory.
5462 * @param bytes Size of memory region, in bytes
5463 */
5464 void k_heap_init(struct k_heap *h, void *mem,
5465 size_t bytes) __attribute_nonnull(1);
5466
5467 /**
5468 * @brief Allocate aligned memory from a k_heap
5469 *
5470 * Behaves in all ways like k_heap_alloc(), except that the returned
5471 * memory (if available) will have a starting address in memory which
5472 * is a multiple of the specified power-of-two alignment value in
5473 * bytes. The resulting memory can be returned to the heap using
5474 * k_heap_free().
5475 *
5476 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
5477 * @note When CONFIG_MULTITHREADING=n any @a timeout is treated as K_NO_WAIT.
5478 *
5479 * @funcprops \isr_ok
5480 *
5481 * @param h Heap from which to allocate
5482 * @param align Alignment in bytes, must be a power of two
5483 * @param bytes Number of bytes requested
5484 * @param timeout How long to wait, or K_NO_WAIT
5485 * @return Pointer to memory the caller can now use
5486 */
5487 void *k_heap_aligned_alloc(struct k_heap *h, size_t align, size_t bytes,
5488 k_timeout_t timeout) __attribute_nonnull(1);
5489
5490 /**
5491 * @brief Allocate memory from a k_heap
5492 *
5493 * Allocates and returns a memory buffer from the memory region owned
5494 * by the heap. If no memory is available immediately, the call will
5495 * block for the specified timeout (constructed via the standard
5496 * timeout API, or K_NO_WAIT or K_FOREVER) waiting for memory to be
5497 * freed. If the allocation cannot be performed by the expiration of
5498 * the timeout, NULL will be returned.
5499 * Allocated memory is aligned on a multiple of pointer sizes.
5500 *
5501 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
5502 * @note When CONFIG_MULTITHREADING=n any @a timeout is treated as K_NO_WAIT.
5503 *
5504 * @funcprops \isr_ok
5505 *
5506 * @param h Heap from which to allocate
5507 * @param bytes Desired size of block to allocate
5508 * @param timeout How long to wait, or K_NO_WAIT
5509 * @return A pointer to valid heap memory, or NULL
5510 */
5511 void *k_heap_alloc(struct k_heap *h, size_t bytes,
5512 k_timeout_t timeout) __attribute_nonnull(1);
5513
5514 /**
5515 * @brief Allocate and initialize memory for an array of objects from a k_heap
5516 *
5517 * Allocates memory for an array of num objects of size and initializes all
5518 * bytes in the allocated storage to zero. If no memory is available
5519 * immediately, the call will block for the specified timeout (constructed
5520 * via the standard timeout API, or K_NO_WAIT or K_FOREVER) waiting for memory
5521 * to be freed. If the allocation cannot be performed by the expiration of
5522 * the timeout, NULL will be returned.
5523 * Allocated memory is aligned on a multiple of pointer sizes.
5524 *
5525 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
5526 * @note When CONFIG_MULTITHREADING=n any @a timeout is treated as K_NO_WAIT.
5527 *
5528 * @funcprops \isr_ok
5529 *
5530 * @param h Heap from which to allocate
5531 * @param num Number of objects to allocate
5532 * @param size Desired size of each object to allocate
5533 * @param timeout How long to wait, or K_NO_WAIT
5534 * @return A pointer to valid heap memory, or NULL
5535 */
5536 void *k_heap_calloc(struct k_heap *h, size_t num, size_t size, k_timeout_t timeout)
5537 __attribute_nonnull(1);
5538
5539 /**
5540 * @brief Reallocate memory from a k_heap
5541 *
5542 * Reallocates and returns a memory buffer from the memory region owned
5543 * by the heap. If no memory is available immediately, the call will
5544 * block for the specified timeout (constructed via the standard
5545 * timeout API, or K_NO_WAIT or K_FOREVER) waiting for memory to be
5546 * freed. If the allocation cannot be performed by the expiration of
5547 * the timeout, NULL will be returned.
5548 * Reallocated memory is aligned on a multiple of pointer sizes.
5549 *
5550 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
5551 * @note When CONFIG_MULTITHREADING=n any @a timeout is treated as K_NO_WAIT.
5552 *
5553 * @funcprops \isr_ok
5554 *
5555 * @param h Heap from which to allocate
5556 * @param ptr Original pointer returned from a previous allocation
5557 * @param bytes Desired size of block to allocate
5558 * @param timeout How long to wait, or K_NO_WAIT
5559 *
5560 * @return Pointer to memory the caller can now use, or NULL
5561 */
5562 void *k_heap_realloc(struct k_heap *h, void *ptr, size_t bytes, k_timeout_t timeout)
5563 __attribute_nonnull(1);
5564
5565 /**
5566 * @brief Free memory allocated by k_heap_alloc()
5567 *
5568 * Returns the specified memory block, which must have been returned
5569 * from k_heap_alloc(), to the heap for use by other callers. Passing
5570 * a NULL block is legal, and has no effect.
5571 *
5572 * @param h Heap to which to return the memory
5573 * @param mem A valid memory block, or NULL
5574 */
5575 void k_heap_free(struct k_heap *h, void *mem) __attribute_nonnull(1);
5576
5577 /* Hand-calculated minimum heap sizes needed to return a successful
5578 * 1-byte allocation. See details in lib/os/heap.[ch]
5579 */
5580 #define Z_HEAP_MIN_SIZE ((sizeof(void *) > 4) ? 56 : 44)
5581
5582 /**
5583 * @brief Define a static k_heap in the specified linker section
5584 *
5585 * This macro defines and initializes a static memory region and
5586 * k_heap of the requested size in the specified linker section.
5587 * After kernel start, &name can be used as if k_heap_init() had
5588 * been called.
5589 *
5590 * Note that this macro enforces a minimum size on the memory region
5591 * to accommodate metadata requirements. Very small heaps will be
5592 * padded to fit.
5593 *
5594 * @param name Symbol name for the struct k_heap object
5595 * @param bytes Size of memory region, in bytes
5596 * @param in_section __attribute__((section(name))
5597 */
5598 #define Z_HEAP_DEFINE_IN_SECT(name, bytes, in_section) \
5599 char in_section \
5600 __aligned(8) /* CHUNK_UNIT */ \
5601 kheap_##name[MAX(bytes, Z_HEAP_MIN_SIZE)]; \
5602 STRUCT_SECTION_ITERABLE(k_heap, name) = { \
5603 .heap = { \
5604 .init_mem = kheap_##name, \
5605 .init_bytes = MAX(bytes, Z_HEAP_MIN_SIZE), \
5606 }, \
5607 }
5608
5609 /**
5610 * @brief Define a static k_heap
5611 *
5612 * This macro defines and initializes a static memory region and
5613 * k_heap of the requested size. After kernel start, &name can be
5614 * used as if k_heap_init() had been called.
5615 *
5616 * Note that this macro enforces a minimum size on the memory region
5617 * to accommodate metadata requirements. Very small heaps will be
5618 * padded to fit.
5619 *
5620 * @param name Symbol name for the struct k_heap object
5621 * @param bytes Size of memory region, in bytes
5622 */
5623 #define K_HEAP_DEFINE(name, bytes) \
5624 Z_HEAP_DEFINE_IN_SECT(name, bytes, \
5625 __noinit_named(kheap_buf_##name))
5626
5627 /**
5628 * @brief Define a static k_heap in uncached memory
5629 *
5630 * This macro defines and initializes a static memory region and
5631 * k_heap of the requested size in uncached memory. After kernel
5632 * start, &name can be used as if k_heap_init() had been called.
5633 *
5634 * Note that this macro enforces a minimum size on the memory region
5635 * to accommodate metadata requirements. Very small heaps will be
5636 * padded to fit.
5637 *
5638 * @param name Symbol name for the struct k_heap object
5639 * @param bytes Size of memory region, in bytes
5640 */
5641 #define K_HEAP_DEFINE_NOCACHE(name, bytes) \
5642 Z_HEAP_DEFINE_IN_SECT(name, bytes, __nocache)
5643
5644 /**
5645 * @}
5646 */
5647
5648 /**
5649 * @defgroup heap_apis Heap APIs
5650 * @ingroup kernel_apis
5651 * @{
5652 */
5653
5654 /**
5655 * @brief Allocate memory from the heap with a specified alignment.
5656 *
5657 * This routine provides semantics similar to aligned_alloc(); memory is
5658 * allocated from the heap with a specified alignment. However, one minor
5659 * difference is that k_aligned_alloc() accepts any non-zero @p size,
5660 * whereas aligned_alloc() only accepts a @p size that is an integral
5661 * multiple of @p align.
5662 *
5663 * Above, aligned_alloc() refers to:
5664 * C11 standard (ISO/IEC 9899:2011): 7.22.3.1
5665 * The aligned_alloc function (p: 347-348)
5666 *
5667 * @param align Alignment of memory requested (in bytes).
5668 * @param size Amount of memory requested (in bytes).
5669 *
5670 * @return Address of the allocated memory if successful; otherwise NULL.
5671 */
5672 void *k_aligned_alloc(size_t align, size_t size);
5673
5674 /**
5675 * @brief Allocate memory from the heap.
5676 *
5677 * This routine provides traditional malloc() semantics. Memory is
5678 * allocated from the heap memory pool.
5679 * Allocated memory is aligned on a multiple of pointer sizes.
5680 *
5681 * @param size Amount of memory requested (in bytes).
5682 *
5683 * @return Address of the allocated memory if successful; otherwise NULL.
5684 */
5685 void *k_malloc(size_t size);
5686
5687 /**
5688 * @brief Free memory allocated from heap.
5689 *
5690 * This routine provides traditional free() semantics. The memory being
5691 * returned must have been allocated from the heap memory pool.
5692 *
5693 * If @a ptr is NULL, no operation is performed.
5694 *
5695 * @param ptr Pointer to previously allocated memory.
5696 */
5697 void k_free(void *ptr);
5698
5699 /**
5700 * @brief Allocate memory from heap, array style
5701 *
5702 * This routine provides traditional calloc() semantics. Memory is
5703 * allocated from the heap memory pool and zeroed.
5704 *
5705 * @param nmemb Number of elements in the requested array
5706 * @param size Size of each array element (in bytes).
5707 *
5708 * @return Address of the allocated memory if successful; otherwise NULL.
5709 */
5710 void *k_calloc(size_t nmemb, size_t size);
5711
5712 /** @brief Expand the size of an existing allocation
5713 *
5714 * Returns a pointer to a new memory region with the same contents,
5715 * but a different allocated size. If the new allocation can be
5716 * expanded in place, the pointer returned will be identical.
5717 * Otherwise the data will be copies to a new block and the old one
5718 * will be freed as per sys_heap_free(). If the specified size is
5719 * smaller than the original, the block will be truncated in place and
5720 * the remaining memory returned to the heap. If the allocation of a
5721 * new block fails, then NULL will be returned and the old block will
5722 * not be freed or modified.
5723 *
5724 * @param ptr Original pointer returned from a previous allocation
5725 * @param size Amount of memory requested (in bytes).
5726 *
5727 * @return Pointer to memory the caller can now use, or NULL.
5728 */
5729 void *k_realloc(void *ptr, size_t size);
5730
5731 /** @} */
5732
5733 /* polling API - PRIVATE */
5734
5735 #ifdef CONFIG_POLL
5736 #define _INIT_OBJ_POLL_EVENT(obj) do { (obj)->poll_event = NULL; } while (false)
5737 #else
5738 #define _INIT_OBJ_POLL_EVENT(obj) do { } while (false)
5739 #endif
5740
5741 /* private - types bit positions */
5742 enum _poll_types_bits {
5743 /* can be used to ignore an event */
5744 _POLL_TYPE_IGNORE,
5745
5746 /* to be signaled by k_poll_signal_raise() */
5747 _POLL_TYPE_SIGNAL,
5748
5749 /* semaphore availability */
5750 _POLL_TYPE_SEM_AVAILABLE,
5751
5752 /* queue/FIFO/LIFO data availability */
5753 _POLL_TYPE_DATA_AVAILABLE,
5754
5755 /* msgq data availability */
5756 _POLL_TYPE_MSGQ_DATA_AVAILABLE,
5757
5758 /* pipe data availability */
5759 _POLL_TYPE_PIPE_DATA_AVAILABLE,
5760
5761 _POLL_NUM_TYPES
5762 };
5763
5764 #define Z_POLL_TYPE_BIT(type) (1U << ((type) - 1U))
5765
5766 /* private - states bit positions */
5767 enum _poll_states_bits {
5768 /* default state when creating event */
5769 _POLL_STATE_NOT_READY,
5770
5771 /* signaled by k_poll_signal_raise() */
5772 _POLL_STATE_SIGNALED,
5773
5774 /* semaphore is available */
5775 _POLL_STATE_SEM_AVAILABLE,
5776
5777 /* data is available to read on queue/FIFO/LIFO */
5778 _POLL_STATE_DATA_AVAILABLE,
5779
5780 /* queue/FIFO/LIFO wait was cancelled */
5781 _POLL_STATE_CANCELLED,
5782
5783 /* data is available to read on a message queue */
5784 _POLL_STATE_MSGQ_DATA_AVAILABLE,
5785
5786 /* data is available to read from a pipe */
5787 _POLL_STATE_PIPE_DATA_AVAILABLE,
5788
5789 _POLL_NUM_STATES
5790 };
5791
5792 #define Z_POLL_STATE_BIT(state) (1U << ((state) - 1U))
5793
5794 #define _POLL_EVENT_NUM_UNUSED_BITS \
5795 (32 - (0 \
5796 + 8 /* tag */ \
5797 + _POLL_NUM_TYPES \
5798 + _POLL_NUM_STATES \
5799 + 1 /* modes */ \
5800 ))
5801
5802 /* end of polling API - PRIVATE */
5803
5804
5805 /**
5806 * @defgroup poll_apis Async polling APIs
5807 * @ingroup kernel_apis
5808 * @{
5809 */
5810
5811 /* Public polling API */
5812
5813 /* public - values for k_poll_event.type bitfield */
5814 #define K_POLL_TYPE_IGNORE 0
5815 #define K_POLL_TYPE_SIGNAL Z_POLL_TYPE_BIT(_POLL_TYPE_SIGNAL)
5816 #define K_POLL_TYPE_SEM_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_SEM_AVAILABLE)
5817 #define K_POLL_TYPE_DATA_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_DATA_AVAILABLE)
5818 #define K_POLL_TYPE_FIFO_DATA_AVAILABLE K_POLL_TYPE_DATA_AVAILABLE
5819 #define K_POLL_TYPE_MSGQ_DATA_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_MSGQ_DATA_AVAILABLE)
5820 #define K_POLL_TYPE_PIPE_DATA_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_PIPE_DATA_AVAILABLE)
5821
5822 /* public - polling modes */
5823 enum k_poll_modes {
5824 /* polling thread does not take ownership of objects when available */
5825 K_POLL_MODE_NOTIFY_ONLY = 0,
5826
5827 K_POLL_NUM_MODES
5828 };
5829
5830 /* public - values for k_poll_event.state bitfield */
5831 #define K_POLL_STATE_NOT_READY 0
5832 #define K_POLL_STATE_SIGNALED Z_POLL_STATE_BIT(_POLL_STATE_SIGNALED)
5833 #define K_POLL_STATE_SEM_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_SEM_AVAILABLE)
5834 #define K_POLL_STATE_DATA_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_DATA_AVAILABLE)
5835 #define K_POLL_STATE_FIFO_DATA_AVAILABLE K_POLL_STATE_DATA_AVAILABLE
5836 #define K_POLL_STATE_MSGQ_DATA_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_MSGQ_DATA_AVAILABLE)
5837 #define K_POLL_STATE_PIPE_DATA_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_PIPE_DATA_AVAILABLE)
5838 #define K_POLL_STATE_CANCELLED Z_POLL_STATE_BIT(_POLL_STATE_CANCELLED)
5839
5840 /* public - poll signal object */
5841 struct k_poll_signal {
5842 /** PRIVATE - DO NOT TOUCH */
5843 sys_dlist_t poll_events;
5844
5845 /**
5846 * 1 if the event has been signaled, 0 otherwise. Stays set to 1 until
5847 * user resets it to 0.
5848 */
5849 unsigned int signaled;
5850
5851 /** custom result value passed to k_poll_signal_raise() if needed */
5852 int result;
5853 };
5854
5855 #define K_POLL_SIGNAL_INITIALIZER(obj) \
5856 { \
5857 .poll_events = SYS_DLIST_STATIC_INIT(&obj.poll_events), \
5858 .signaled = 0, \
5859 .result = 0, \
5860 }
5861 /**
5862 * @brief Poll Event
5863 *
5864 */
5865 struct k_poll_event {
5866 /** PRIVATE - DO NOT TOUCH */
5867 sys_dnode_t _node;
5868
5869 /** PRIVATE - DO NOT TOUCH */
5870 struct z_poller *poller;
5871
5872 /** optional user-specified tag, opaque, untouched by the API */
5873 uint32_t tag:8;
5874
5875 /** bitfield of event types (bitwise-ORed K_POLL_TYPE_xxx values) */
5876 uint32_t type:_POLL_NUM_TYPES;
5877
5878 /** bitfield of event states (bitwise-ORed K_POLL_STATE_xxx values) */
5879 uint32_t state:_POLL_NUM_STATES;
5880
5881 /** mode of operation, from enum k_poll_modes */
5882 uint32_t mode:1;
5883
5884 /** unused bits in 32-bit word */
5885 uint32_t unused:_POLL_EVENT_NUM_UNUSED_BITS;
5886
5887 /** per-type data */
5888 union {
5889 /* The typed_* fields below are used by K_POLL_EVENT_*INITIALIZER() macros to ensure
5890 * type safety of polled objects.
5891 */
5892 void *obj, *typed_K_POLL_TYPE_IGNORE;
5893 struct k_poll_signal *signal, *typed_K_POLL_TYPE_SIGNAL;
5894 struct k_sem *sem, *typed_K_POLL_TYPE_SEM_AVAILABLE;
5895 struct k_fifo *fifo, *typed_K_POLL_TYPE_FIFO_DATA_AVAILABLE;
5896 struct k_queue *queue, *typed_K_POLL_TYPE_DATA_AVAILABLE;
5897 struct k_msgq *msgq, *typed_K_POLL_TYPE_MSGQ_DATA_AVAILABLE;
5898 #ifdef CONFIG_PIPES
5899 struct k_pipe *pipe, *typed_K_POLL_TYPE_PIPE_DATA_AVAILABLE;
5900 #endif
5901 };
5902 };
5903
5904 #define K_POLL_EVENT_INITIALIZER(_event_type, _event_mode, _event_obj) \
5905 { \
5906 .poller = NULL, \
5907 .type = _event_type, \
5908 .state = K_POLL_STATE_NOT_READY, \
5909 .mode = _event_mode, \
5910 .unused = 0, \
5911 { \
5912 .typed_##_event_type = _event_obj, \
5913 }, \
5914 }
5915
5916 #define K_POLL_EVENT_STATIC_INITIALIZER(_event_type, _event_mode, _event_obj, \
5917 event_tag) \
5918 { \
5919 .tag = event_tag, \
5920 .type = _event_type, \
5921 .state = K_POLL_STATE_NOT_READY, \
5922 .mode = _event_mode, \
5923 .unused = 0, \
5924 { \
5925 .typed_##_event_type = _event_obj, \
5926 }, \
5927 }
5928
5929 /**
5930 * @brief Initialize one struct k_poll_event instance
5931 *
5932 * After this routine is called on a poll event, the event it ready to be
5933 * placed in an event array to be passed to k_poll().
5934 *
5935 * @param event The event to initialize.
5936 * @param type A bitfield of the types of event, from the K_POLL_TYPE_xxx
5937 * values. Only values that apply to the same object being polled
5938 * can be used together. Choosing K_POLL_TYPE_IGNORE disables the
5939 * event.
5940 * @param mode Future. Use K_POLL_MODE_NOTIFY_ONLY.
5941 * @param obj Kernel object or poll signal.
5942 */
5943
5944 void k_poll_event_init(struct k_poll_event *event, uint32_t type,
5945 int mode, void *obj);
5946
5947 /**
5948 * @brief Wait for one or many of multiple poll events to occur
5949 *
5950 * This routine allows a thread to wait concurrently for one or many of
5951 * multiple poll events to have occurred. Such events can be a kernel object
5952 * being available, like a semaphore, or a poll signal event.
5953 *
5954 * When an event notifies that a kernel object is available, the kernel object
5955 * is not "given" to the thread calling k_poll(): it merely signals the fact
5956 * that the object was available when the k_poll() call was in effect. Also,
5957 * all threads trying to acquire an object the regular way, i.e. by pending on
5958 * the object, have precedence over the thread polling on the object. This
5959 * means that the polling thread will never get the poll event on an object
5960 * until the object becomes available and its pend queue is empty. For this
5961 * reason, the k_poll() call is more effective when the objects being polled
5962 * only have one thread, the polling thread, trying to acquire them.
5963 *
5964 * When k_poll() returns 0, the caller should loop on all the events that were
5965 * passed to k_poll() and check the state field for the values that were
5966 * expected and take the associated actions.
5967 *
5968 * Before being reused for another call to k_poll(), the user has to reset the
5969 * state field to K_POLL_STATE_NOT_READY.
5970 *
5971 * When called from user mode, a temporary memory allocation is required from
5972 * the caller's resource pool.
5973 *
5974 * @param events An array of events to be polled for.
5975 * @param num_events The number of events in the array.
5976 * @param timeout Waiting period for an event to be ready,
5977 * or one of the special values K_NO_WAIT and K_FOREVER.
5978 *
5979 * @retval 0 One or more events are ready.
5980 * @retval -EAGAIN Waiting period timed out.
5981 * @retval -EINTR Polling has been interrupted, e.g. with
5982 * k_queue_cancel_wait(). All output events are still set and valid,
5983 * cancelled event(s) will be set to K_POLL_STATE_CANCELLED. In other
5984 * words, -EINTR status means that at least one of output events is
5985 * K_POLL_STATE_CANCELLED.
5986 * @retval -ENOMEM Thread resource pool insufficient memory (user mode only)
5987 * @retval -EINVAL Bad parameters (user mode only)
5988 */
5989
5990 __syscall int k_poll(struct k_poll_event *events, int num_events,
5991 k_timeout_t timeout);
5992
5993 /**
5994 * @brief Initialize a poll signal object.
5995 *
5996 * Ready a poll signal object to be signaled via k_poll_signal_raise().
5997 *
5998 * @param sig A poll signal.
5999 */
6000
6001 __syscall void k_poll_signal_init(struct k_poll_signal *sig);
6002
6003 /**
6004 * @brief Reset a poll signal object's state to unsignaled.
6005 *
6006 * @param sig A poll signal object
6007 */
6008 __syscall void k_poll_signal_reset(struct k_poll_signal *sig);
6009
6010 /**
6011 * @brief Fetch the signaled state and result value of a poll signal
6012 *
6013 * @param sig A poll signal object
6014 * @param signaled An integer buffer which will be written nonzero if the
6015 * object was signaled
6016 * @param result An integer destination buffer which will be written with the
6017 * result value if the object was signaled, or an undefined
6018 * value if it was not.
6019 */
6020 __syscall void k_poll_signal_check(struct k_poll_signal *sig,
6021 unsigned int *signaled, int *result);
6022
6023 /**
6024 * @brief Signal a poll signal object.
6025 *
6026 * This routine makes ready a poll signal, which is basically a poll event of
6027 * type K_POLL_TYPE_SIGNAL. If a thread was polling on that event, it will be
6028 * made ready to run. A @a result value can be specified.
6029 *
6030 * The poll signal contains a 'signaled' field that, when set by
6031 * k_poll_signal_raise(), stays set until the user sets it back to 0 with
6032 * k_poll_signal_reset(). It thus has to be reset by the user before being
6033 * passed again to k_poll() or k_poll() will consider it being signaled, and
6034 * will return immediately.
6035 *
6036 * @note The result is stored and the 'signaled' field is set even if
6037 * this function returns an error indicating that an expiring poll was
6038 * not notified. The next k_poll() will detect the missed raise.
6039 *
6040 * @param sig A poll signal.
6041 * @param result The value to store in the result field of the signal.
6042 *
6043 * @retval 0 The signal was delivered successfully.
6044 * @retval -EAGAIN The polling thread's timeout is in the process of expiring.
6045 */
6046
6047 __syscall int k_poll_signal_raise(struct k_poll_signal *sig, int result);
6048
6049 /** @} */
6050
6051 /**
6052 * @defgroup cpu_idle_apis CPU Idling APIs
6053 * @ingroup kernel_apis
6054 * @{
6055 */
6056 /**
6057 * @brief Make the CPU idle.
6058 *
6059 * This function makes the CPU idle until an event wakes it up.
6060 *
6061 * In a regular system, the idle thread should be the only thread responsible
6062 * for making the CPU idle and triggering any type of power management.
6063 * However, in some more constrained systems, such as a single-threaded system,
6064 * the only thread would be responsible for this if needed.
6065 *
6066 * @note In some architectures, before returning, the function unmasks interrupts
6067 * unconditionally.
6068 */
k_cpu_idle(void)6069 static inline void k_cpu_idle(void)
6070 {
6071 arch_cpu_idle();
6072 }
6073
6074 /**
6075 * @brief Make the CPU idle in an atomic fashion.
6076 *
6077 * Similar to k_cpu_idle(), but must be called with interrupts locked.
6078 *
6079 * Enabling interrupts and entering a low-power mode will be atomic,
6080 * i.e. there will be no period of time where interrupts are enabled before
6081 * the processor enters a low-power mode.
6082 *
6083 * After waking up from the low-power mode, the interrupt lockout state will
6084 * be restored as if by irq_unlock(key).
6085 *
6086 * @param key Interrupt locking key obtained from irq_lock().
6087 */
k_cpu_atomic_idle(unsigned int key)6088 static inline void k_cpu_atomic_idle(unsigned int key)
6089 {
6090 arch_cpu_atomic_idle(key);
6091 }
6092
6093 /**
6094 * @}
6095 */
6096
6097 /**
6098 * @cond INTERNAL_HIDDEN
6099 * @internal
6100 */
6101 #ifdef ARCH_EXCEPT
6102 /* This architecture has direct support for triggering a CPU exception */
6103 #define z_except_reason(reason) ARCH_EXCEPT(reason)
6104 #else
6105
6106 #if !defined(CONFIG_ASSERT_NO_FILE_INFO)
6107 #define __EXCEPT_LOC() __ASSERT_PRINT("@ %s:%d\n", __FILE__, __LINE__)
6108 #else
6109 #define __EXCEPT_LOC()
6110 #endif
6111
6112 /* NOTE: This is the implementation for arches that do not implement
6113 * ARCH_EXCEPT() to generate a real CPU exception.
6114 *
6115 * We won't have a real exception frame to determine the PC value when
6116 * the oops occurred, so print file and line number before we jump into
6117 * the fatal error handler.
6118 */
6119 #define z_except_reason(reason) do { \
6120 __EXCEPT_LOC(); \
6121 z_fatal_error(reason, NULL); \
6122 } while (false)
6123
6124 #endif /* _ARCH__EXCEPT */
6125 /**
6126 * INTERNAL_HIDDEN @endcond
6127 */
6128
6129 /**
6130 * @brief Fatally terminate a thread
6131 *
6132 * This should be called when a thread has encountered an unrecoverable
6133 * runtime condition and needs to terminate. What this ultimately
6134 * means is determined by the _fatal_error_handler() implementation, which
6135 * will be called will reason code K_ERR_KERNEL_OOPS.
6136 *
6137 * If this is called from ISR context, the default system fatal error handler
6138 * will treat it as an unrecoverable system error, just like k_panic().
6139 */
6140 #define k_oops() z_except_reason(K_ERR_KERNEL_OOPS)
6141
6142 /**
6143 * @brief Fatally terminate the system
6144 *
6145 * This should be called when the Zephyr kernel has encountered an
6146 * unrecoverable runtime condition and needs to terminate. What this ultimately
6147 * means is determined by the _fatal_error_handler() implementation, which
6148 * will be called will reason code K_ERR_KERNEL_PANIC.
6149 */
6150 #define k_panic() z_except_reason(K_ERR_KERNEL_PANIC)
6151
6152 /**
6153 * @cond INTERNAL_HIDDEN
6154 */
6155
6156 /*
6157 * private APIs that are utilized by one or more public APIs
6158 */
6159
6160 /**
6161 * @internal
6162 */
6163 void z_timer_expiration_handler(struct _timeout *timeout);
6164 /**
6165 * INTERNAL_HIDDEN @endcond
6166 */
6167
6168 #ifdef CONFIG_PRINTK
6169 /**
6170 * @brief Emit a character buffer to the console device
6171 *
6172 * @param c String of characters to print
6173 * @param n The length of the string
6174 *
6175 */
6176 __syscall void k_str_out(char *c, size_t n);
6177 #endif
6178
6179 /**
6180 * @defgroup float_apis Floating Point APIs
6181 * @ingroup kernel_apis
6182 * @{
6183 */
6184
6185 /**
6186 * @brief Disable preservation of floating point context information.
6187 *
6188 * This routine informs the kernel that the specified thread
6189 * will no longer be using the floating point registers.
6190 *
6191 * @warning
6192 * Some architectures apply restrictions on how the disabling of floating
6193 * point preservation may be requested, see arch_float_disable.
6194 *
6195 * @warning
6196 * This routine should only be used to disable floating point support for
6197 * a thread that currently has such support enabled.
6198 *
6199 * @param thread ID of thread.
6200 *
6201 * @retval 0 On success.
6202 * @retval -ENOTSUP If the floating point disabling is not implemented.
6203 * -EINVAL If the floating point disabling could not be performed.
6204 */
6205 __syscall int k_float_disable(struct k_thread *thread);
6206
6207 /**
6208 * @brief Enable preservation of floating point context information.
6209 *
6210 * This routine informs the kernel that the specified thread
6211 * will use the floating point registers.
6212
6213 * Invoking this routine initializes the thread's floating point context info
6214 * to that of an FPU that has been reset. The next time the thread is scheduled
6215 * by z_swap() it will either inherit an FPU that is guaranteed to be in a
6216 * "sane" state (if the most recent user of the FPU was cooperatively swapped
6217 * out) or the thread's own floating point context will be loaded (if the most
6218 * recent user of the FPU was preempted, or if this thread is the first user
6219 * of the FPU). Thereafter, the kernel will protect the thread's FP context
6220 * so that it is not altered during a preemptive context switch.
6221 *
6222 * The @a options parameter indicates which floating point register sets will
6223 * be used by the specified thread.
6224 *
6225 * For x86 options:
6226 *
6227 * - K_FP_REGS indicates x87 FPU and MMX registers only
6228 * - K_SSE_REGS indicates SSE registers (and also x87 FPU and MMX registers)
6229 *
6230 * @warning
6231 * Some architectures apply restrictions on how the enabling of floating
6232 * point preservation may be requested, see arch_float_enable.
6233 *
6234 * @warning
6235 * This routine should only be used to enable floating point support for
6236 * a thread that currently has such support enabled.
6237 *
6238 * @param thread ID of thread.
6239 * @param options architecture dependent options
6240 *
6241 * @retval 0 On success.
6242 * @retval -ENOTSUP If the floating point enabling is not implemented.
6243 * -EINVAL If the floating point enabling could not be performed.
6244 */
6245 __syscall int k_float_enable(struct k_thread *thread, unsigned int options);
6246
6247 /**
6248 * @}
6249 */
6250
6251 /**
6252 * @brief Get the runtime statistics of a thread
6253 *
6254 * @param thread ID of thread.
6255 * @param stats Pointer to struct to copy statistics into.
6256 * @return -EINVAL if null pointers, otherwise 0
6257 */
6258 int k_thread_runtime_stats_get(k_tid_t thread,
6259 k_thread_runtime_stats_t *stats);
6260
6261 /**
6262 * @brief Get the runtime statistics of all threads
6263 *
6264 * @param stats Pointer to struct to copy statistics into.
6265 * @return -EINVAL if null pointers, otherwise 0
6266 */
6267 int k_thread_runtime_stats_all_get(k_thread_runtime_stats_t *stats);
6268
6269 /**
6270 * @brief Get the runtime statistics of all threads on specified cpu
6271 *
6272 * @param cpu The cpu number
6273 * @param stats Pointer to struct to copy statistics into.
6274 * @return -EINVAL if null pointers, otherwise 0
6275 */
6276 int k_thread_runtime_stats_cpu_get(int cpu, k_thread_runtime_stats_t *stats);
6277
6278 /**
6279 * @brief Enable gathering of runtime statistics for specified thread
6280 *
6281 * This routine enables the gathering of runtime statistics for the specified
6282 * thread.
6283 *
6284 * @param thread ID of thread
6285 * @return -EINVAL if invalid thread ID, otherwise 0
6286 */
6287 int k_thread_runtime_stats_enable(k_tid_t thread);
6288
6289 /**
6290 * @brief Disable gathering of runtime statistics for specified thread
6291 *
6292 * This routine disables the gathering of runtime statistics for the specified
6293 * thread.
6294 *
6295 * @param thread ID of thread
6296 * @return -EINVAL if invalid thread ID, otherwise 0
6297 */
6298 int k_thread_runtime_stats_disable(k_tid_t thread);
6299
6300 /**
6301 * @brief Enable gathering of system runtime statistics
6302 *
6303 * This routine enables the gathering of system runtime statistics. Note that
6304 * it does not affect the gathering of similar statistics for individual
6305 * threads.
6306 */
6307 void k_sys_runtime_stats_enable(void);
6308
6309 /**
6310 * @brief Disable gathering of system runtime statistics
6311 *
6312 * This routine disables the gathering of system runtime statistics. Note that
6313 * it does not affect the gathering of similar statistics for individual
6314 * threads.
6315 */
6316 void k_sys_runtime_stats_disable(void);
6317
6318 #ifdef __cplusplus
6319 }
6320 #endif
6321
6322 #include <zephyr/tracing/tracing.h>
6323 #include <zephyr/syscalls/kernel.h>
6324
6325 #endif /* !_ASMLANGUAGE */
6326
6327 #endif /* ZEPHYR_INCLUDE_KERNEL_H_ */
6328