1 /*
2 * Copyright (c) 2016, Wind River Systems, Inc.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 /**
8 * @file
9 *
10 * @brief Public kernel APIs.
11 */
12
13 #ifndef ZEPHYR_INCLUDE_KERNEL_H_
14 #define ZEPHYR_INCLUDE_KERNEL_H_
15
16 #if !defined(_ASMLANGUAGE)
17 #include <zephyr/kernel_includes.h>
18 #include <errno.h>
19 #include <limits.h>
20 #include <stdbool.h>
21 #include <zephyr/toolchain.h>
22 #include <zephyr/tracing/tracing_macros.h>
23 #include <zephyr/sys/mem_stats.h>
24 #include <zephyr/sys/iterable_sections.h>
25
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29
30 /*
31 * Zephyr currently assumes the size of a couple standard types to simplify
32 * print string formats. Let's make sure this doesn't change without notice.
33 */
34 BUILD_ASSERT(sizeof(int32_t) == sizeof(int));
35 BUILD_ASSERT(sizeof(int64_t) == sizeof(long long));
36 BUILD_ASSERT(sizeof(intptr_t) == sizeof(long));
37
38 /**
39 * @brief Kernel APIs
40 * @defgroup kernel_apis Kernel APIs
41 * @since 1.0
42 * @version 1.0.0
43 * @{
44 * @}
45 */
46
47 #define K_ANY NULL
48
49 #if (CONFIG_NUM_COOP_PRIORITIES + CONFIG_NUM_PREEMPT_PRIORITIES) == 0
50 #error Zero available thread priorities defined!
51 #endif
52
53 #define K_PRIO_COOP(x) (-(CONFIG_NUM_COOP_PRIORITIES - (x)))
54 #define K_PRIO_PREEMPT(x) (x)
55
56 #define K_HIGHEST_THREAD_PRIO (-CONFIG_NUM_COOP_PRIORITIES)
57 #define K_LOWEST_THREAD_PRIO CONFIG_NUM_PREEMPT_PRIORITIES
58 #define K_IDLE_PRIO K_LOWEST_THREAD_PRIO
59 #define K_HIGHEST_APPLICATION_THREAD_PRIO (K_HIGHEST_THREAD_PRIO)
60 #define K_LOWEST_APPLICATION_THREAD_PRIO (K_LOWEST_THREAD_PRIO - 1)
61
62 #ifdef CONFIG_POLL
63 #define Z_POLL_EVENT_OBJ_INIT(obj) \
64 .poll_events = SYS_DLIST_STATIC_INIT(&obj.poll_events),
65 #define Z_DECL_POLL_EVENT sys_dlist_t poll_events;
66 #else
67 #define Z_POLL_EVENT_OBJ_INIT(obj)
68 #define Z_DECL_POLL_EVENT
69 #endif
70
71 struct k_thread;
72 struct k_mutex;
73 struct k_sem;
74 struct k_msgq;
75 struct k_mbox;
76 struct k_pipe;
77 struct k_queue;
78 struct k_fifo;
79 struct k_lifo;
80 struct k_stack;
81 struct k_mem_slab;
82 struct k_timer;
83 struct k_poll_event;
84 struct k_poll_signal;
85 struct k_mem_domain;
86 struct k_mem_partition;
87 struct k_futex;
88 struct k_event;
89
90 enum execution_context_types {
91 K_ISR = 0,
92 K_COOP_THREAD,
93 K_PREEMPT_THREAD,
94 };
95
96 /* private, used by k_poll and k_work_poll */
97 struct k_work_poll;
98 typedef int (*_poller_cb_t)(struct k_poll_event *event, uint32_t state);
99
100 /**
101 * @addtogroup thread_apis
102 * @{
103 */
104
105 typedef void (*k_thread_user_cb_t)(const struct k_thread *thread,
106 void *user_data);
107
108 /**
109 * @brief Iterate over all the threads in the system.
110 *
111 * This routine iterates over all the threads in the system and
112 * calls the user_cb function for each thread.
113 *
114 * @param user_cb Pointer to the user callback function.
115 * @param user_data Pointer to user data.
116 *
117 * @note @kconfig{CONFIG_THREAD_MONITOR} must be set for this function
118 * to be effective.
119 * @note This API uses @ref k_spin_lock to protect the _kernel.threads
120 * list which means creation of new threads and terminations of existing
121 * threads are blocked until this API returns.
122 */
123 void k_thread_foreach(k_thread_user_cb_t user_cb, void *user_data);
124
125 /**
126 * @brief Iterate over all the threads in running on specified cpu.
127 *
128 * This function is does otherwise the same thing as k_thread_foreach(),
129 * but it only loops through the threads running on specified cpu only.
130 * If CONFIG_SMP is not defined the implementation this is the same as
131 * k_thread_foreach(), with an assert cpu == 0.
132 *
133 * @param cpu The filtered cpu number
134 * @param user_cb Pointer to the user callback function.
135 * @param user_data Pointer to user data.
136 *
137 * @note @kconfig{CONFIG_THREAD_MONITOR} must be set for this function
138 * to be effective.
139 * @note This API uses @ref k_spin_lock to protect the _kernel.threads
140 * list which means creation of new threads and terminations of existing
141 * threads are blocked until this API returns.
142 */
143 #ifdef CONFIG_SMP
144 void k_thread_foreach_filter_by_cpu(unsigned int cpu,
145 k_thread_user_cb_t user_cb, void *user_data);
146 #else
147 static inline
k_thread_foreach_filter_by_cpu(unsigned int cpu,k_thread_user_cb_t user_cb,void * user_data)148 void k_thread_foreach_filter_by_cpu(unsigned int cpu,
149 k_thread_user_cb_t user_cb, void *user_data)
150 {
151 __ASSERT(cpu == 0, "cpu filter out of bounds");
152 ARG_UNUSED(cpu);
153 k_thread_foreach(user_cb, user_data);
154 }
155 #endif
156
157 /**
158 * @brief Iterate over all the threads in the system without locking.
159 *
160 * This routine works exactly the same like @ref k_thread_foreach
161 * but unlocks interrupts when user_cb is executed.
162 *
163 * @param user_cb Pointer to the user callback function.
164 * @param user_data Pointer to user data.
165 *
166 * @note @kconfig{CONFIG_THREAD_MONITOR} must be set for this function
167 * to be effective.
168 * @note This API uses @ref k_spin_lock only when accessing the _kernel.threads
169 * queue elements. It unlocks it during user callback function processing.
170 * If a new task is created when this @c foreach function is in progress,
171 * the added new task would not be included in the enumeration.
172 * If a task is aborted during this enumeration, there would be a race here
173 * and there is a possibility that this aborted task would be included in the
174 * enumeration.
175 * @note If the task is aborted and the memory occupied by its @c k_thread
176 * structure is reused when this @c k_thread_foreach_unlocked is in progress
177 * it might even lead to the system behave unstable.
178 * This function may never return, as it would follow some @c next task
179 * pointers treating given pointer as a pointer to the k_thread structure
180 * while it is something different right now.
181 * Do not reuse the memory that was occupied by k_thread structure of aborted
182 * task if it was aborted after this function was called in any context.
183 */
184 void k_thread_foreach_unlocked(
185 k_thread_user_cb_t user_cb, void *user_data);
186
187 /**
188 * @brief Iterate over the threads in running on current cpu without locking.
189 *
190 * This function does otherwise the same thing as
191 * k_thread_foreach_unlocked(), but it only loops through the threads
192 * running on specified cpu. If CONFIG_SMP is not defined the
193 * implementation this is the same as k_thread_foreach_unlocked(), with an
194 * assert requiring cpu == 0.
195 *
196 * @param cpu The filtered cpu number
197 * @param user_cb Pointer to the user callback function.
198 * @param user_data Pointer to user data.
199 *
200 * @note @kconfig{CONFIG_THREAD_MONITOR} must be set for this function
201 * to be effective.
202 * @note This API uses @ref k_spin_lock only when accessing the _kernel.threads
203 * queue elements. It unlocks it during user callback function processing.
204 * If a new task is created when this @c foreach function is in progress,
205 * the added new task would not be included in the enumeration.
206 * If a task is aborted during this enumeration, there would be a race here
207 * and there is a possibility that this aborted task would be included in the
208 * enumeration.
209 * @note If the task is aborted and the memory occupied by its @c k_thread
210 * structure is reused when this @c k_thread_foreach_unlocked is in progress
211 * it might even lead to the system behave unstable.
212 * This function may never return, as it would follow some @c next task
213 * pointers treating given pointer as a pointer to the k_thread structure
214 * while it is something different right now.
215 * Do not reuse the memory that was occupied by k_thread structure of aborted
216 * task if it was aborted after this function was called in any context.
217 */
218 #ifdef CONFIG_SMP
219 void k_thread_foreach_unlocked_filter_by_cpu(unsigned int cpu,
220 k_thread_user_cb_t user_cb, void *user_data);
221 #else
222 static inline
k_thread_foreach_unlocked_filter_by_cpu(unsigned int cpu,k_thread_user_cb_t user_cb,void * user_data)223 void k_thread_foreach_unlocked_filter_by_cpu(unsigned int cpu,
224 k_thread_user_cb_t user_cb, void *user_data)
225 {
226 __ASSERT(cpu == 0, "cpu filter out of bounds");
227 ARG_UNUSED(cpu);
228 k_thread_foreach_unlocked(user_cb, user_data);
229 }
230 #endif
231
232 /** @} */
233
234 /**
235 * @defgroup thread_apis Thread APIs
236 * @ingroup kernel_apis
237 * @{
238 */
239
240 #endif /* !_ASMLANGUAGE */
241
242
243 /*
244 * Thread user options. May be needed by assembly code. Common part uses low
245 * bits, arch-specific use high bits.
246 */
247
248 /**
249 * @brief system thread that must not abort
250 * */
251 #define K_ESSENTIAL (BIT(0))
252
253 /**
254 * @brief FPU registers are managed by context switch
255 *
256 * @details
257 * This option indicates that the thread uses the CPU's floating point
258 * registers. This instructs the kernel to take additional steps to save
259 * and restore the contents of these registers when scheduling the thread.
260 * No effect if @kconfig{CONFIG_FPU_SHARING} is not enabled.
261 */
262 #define K_FP_IDX 1
263 #define K_FP_REGS (BIT(K_FP_IDX))
264
265 /**
266 * @brief user mode thread
267 *
268 * This thread has dropped from supervisor mode to user mode and consequently
269 * has additional restrictions
270 */
271 #define K_USER (BIT(2))
272
273 /**
274 * @brief Inherit Permissions
275 *
276 * @details
277 * Indicates that the thread being created should inherit all kernel object
278 * permissions from the thread that created it. No effect if
279 * @kconfig{CONFIG_USERSPACE} is not enabled.
280 */
281 #define K_INHERIT_PERMS (BIT(3))
282
283 /**
284 * @brief Callback item state
285 *
286 * @details
287 * This is a single bit of state reserved for "callback manager"
288 * utilities (p4wq initially) who need to track operations invoked
289 * from within a user-provided callback they have been invoked.
290 * Effectively it serves as a tiny bit of zero-overhead TLS data.
291 */
292 #define K_CALLBACK_STATE (BIT(4))
293
294 /**
295 * @brief DSP registers are managed by context switch
296 *
297 * @details
298 * This option indicates that the thread uses the CPU's DSP registers.
299 * This instructs the kernel to take additional steps to save and
300 * restore the contents of these registers when scheduling the thread.
301 * No effect if @kconfig{CONFIG_DSP_SHARING} is not enabled.
302 */
303 #define K_DSP_IDX 6
304 #define K_DSP_REGS (BIT(K_DSP_IDX))
305
306 /**
307 * @brief AGU registers are managed by context switch
308 *
309 * @details
310 * This option indicates that the thread uses the ARC processor's XY
311 * memory and DSP feature. Often used with @kconfig{CONFIG_ARC_AGU_SHARING}.
312 * No effect if @kconfig{CONFIG_ARC_AGU_SHARING} is not enabled.
313 */
314 #define K_AGU_IDX 7
315 #define K_AGU_REGS (BIT(K_AGU_IDX))
316
317 /**
318 * @brief FP and SSE registers are managed by context switch on x86
319 *
320 * @details
321 * This option indicates that the thread uses the x86 CPU's floating point
322 * and SSE registers. This instructs the kernel to take additional steps to
323 * save and restore the contents of these registers when scheduling
324 * the thread. No effect if @kconfig{CONFIG_X86_SSE} is not enabled.
325 */
326 #define K_SSE_REGS (BIT(7))
327
328 /* end - thread options */
329
330 #if !defined(_ASMLANGUAGE)
331 /**
332 * @brief Dynamically allocate a thread stack.
333 *
334 * Relevant stack creation flags include:
335 * - @ref K_USER allocate a userspace thread (requires `CONFIG_USERSPACE=y`)
336 *
337 * @param size Stack size in bytes.
338 * @param flags Stack creation flags, or 0.
339 *
340 * @retval the allocated thread stack on success.
341 * @retval NULL on failure.
342 *
343 * @see CONFIG_DYNAMIC_THREAD
344 */
345 __syscall k_thread_stack_t *k_thread_stack_alloc(size_t size, int flags);
346
347 /**
348 * @brief Free a dynamically allocated thread stack.
349 *
350 * @param stack Pointer to the thread stack.
351 *
352 * @retval 0 on success.
353 * @retval -EBUSY if the thread stack is in use.
354 * @retval -EINVAL if @p stack is invalid.
355 * @retval -ENOSYS if dynamic thread stack allocation is disabled
356 *
357 * @see CONFIG_DYNAMIC_THREAD
358 */
359 __syscall int k_thread_stack_free(k_thread_stack_t *stack);
360
361 /**
362 * @brief Create a thread.
363 *
364 * This routine initializes a thread, then schedules it for execution.
365 *
366 * The new thread may be scheduled for immediate execution or a delayed start.
367 * If the newly spawned thread does not have a delayed start the kernel
368 * scheduler may preempt the current thread to allow the new thread to
369 * execute.
370 *
371 * Thread options are architecture-specific, and can include K_ESSENTIAL,
372 * K_FP_REGS, and K_SSE_REGS. Multiple options may be specified by separating
373 * them using "|" (the logical OR operator).
374 *
375 * Stack objects passed to this function must be originally defined with
376 * either of these macros in order to be portable:
377 *
378 * - K_THREAD_STACK_DEFINE() - For stacks that may support either user or
379 * supervisor threads.
380 * - K_KERNEL_STACK_DEFINE() - For stacks that may support supervisor
381 * threads only. These stacks use less memory if CONFIG_USERSPACE is
382 * enabled.
383 *
384 * The stack_size parameter has constraints. It must either be:
385 *
386 * - The original size value passed to K_THREAD_STACK_DEFINE() or
387 * K_KERNEL_STACK_DEFINE()
388 * - The return value of K_THREAD_STACK_SIZEOF(stack) if the stack was
389 * defined with K_THREAD_STACK_DEFINE()
390 * - The return value of K_KERNEL_STACK_SIZEOF(stack) if the stack was
391 * defined with K_KERNEL_STACK_DEFINE().
392 *
393 * Using other values, or sizeof(stack) may produce undefined behavior.
394 *
395 * @param new_thread Pointer to uninitialized struct k_thread
396 * @param stack Pointer to the stack space.
397 * @param stack_size Stack size in bytes.
398 * @param entry Thread entry function.
399 * @param p1 1st entry point parameter.
400 * @param p2 2nd entry point parameter.
401 * @param p3 3rd entry point parameter.
402 * @param prio Thread priority.
403 * @param options Thread options.
404 * @param delay Scheduling delay, or K_NO_WAIT (for no delay).
405 *
406 * @return ID of new thread.
407 *
408 */
409 __syscall k_tid_t k_thread_create(struct k_thread *new_thread,
410 k_thread_stack_t *stack,
411 size_t stack_size,
412 k_thread_entry_t entry,
413 void *p1, void *p2, void *p3,
414 int prio, uint32_t options, k_timeout_t delay);
415
416 /**
417 * @brief Drop a thread's privileges permanently to user mode
418 *
419 * This allows a supervisor thread to be re-used as a user thread.
420 * This function does not return, but control will transfer to the provided
421 * entry point as if this was a new user thread.
422 *
423 * The implementation ensures that the stack buffer contents are erased.
424 * Any thread-local storage will be reverted to a pristine state.
425 *
426 * Memory domain membership, resource pool assignment, kernel object
427 * permissions, priority, and thread options are preserved.
428 *
429 * A common use of this function is to re-use the main thread as a user thread
430 * once all supervisor mode-only tasks have been completed.
431 *
432 * @param entry Function to start executing from
433 * @param p1 1st entry point parameter
434 * @param p2 2nd entry point parameter
435 * @param p3 3rd entry point parameter
436 */
437 FUNC_NORETURN void k_thread_user_mode_enter(k_thread_entry_t entry,
438 void *p1, void *p2,
439 void *p3);
440
441 /**
442 * @brief Grant a thread access to a set of kernel objects
443 *
444 * This is a convenience function. For the provided thread, grant access to
445 * the remaining arguments, which must be pointers to kernel objects.
446 *
447 * The thread object must be initialized (i.e. running). The objects don't
448 * need to be.
449 * Note that NULL shouldn't be passed as an argument.
450 *
451 * @param thread Thread to grant access to objects
452 * @param ... list of kernel object pointers
453 */
454 #define k_thread_access_grant(thread, ...) \
455 FOR_EACH_FIXED_ARG(k_object_access_grant, (;), (thread), __VA_ARGS__)
456
457 /**
458 * @brief Assign a resource memory pool to a thread
459 *
460 * By default, threads have no resource pool assigned unless their parent
461 * thread has a resource pool, in which case it is inherited. Multiple
462 * threads may be assigned to the same memory pool.
463 *
464 * Changing a thread's resource pool will not migrate allocations from the
465 * previous pool.
466 *
467 * @param thread Target thread to assign a memory pool for resource requests.
468 * @param heap Heap object to use for resources,
469 * or NULL if the thread should no longer have a memory pool.
470 */
k_thread_heap_assign(struct k_thread * thread,struct k_heap * heap)471 static inline void k_thread_heap_assign(struct k_thread *thread,
472 struct k_heap *heap)
473 {
474 thread->resource_pool = heap;
475 }
476
477 #if defined(CONFIG_INIT_STACKS) && defined(CONFIG_THREAD_STACK_INFO)
478 /**
479 * @brief Obtain stack usage information for the specified thread
480 *
481 * User threads will need to have permission on the target thread object.
482 *
483 * Some hardware may prevent inspection of a stack buffer currently in use.
484 * If this API is called from supervisor mode, on the currently running thread,
485 * on a platform which selects @kconfig{CONFIG_NO_UNUSED_STACK_INSPECTION}, an
486 * error will be generated.
487 *
488 * @param thread Thread to inspect stack information
489 * @param unused_ptr Output parameter, filled in with the unused stack space
490 * of the target thread in bytes.
491 * @return 0 on success
492 * @return -EBADF Bad thread object (user mode only)
493 * @return -EPERM No permissions on thread object (user mode only)
494 * #return -ENOTSUP Forbidden by hardware policy
495 * @return -EINVAL Thread is uninitialized or exited (user mode only)
496 * @return -EFAULT Bad memory address for unused_ptr (user mode only)
497 */
498 __syscall int k_thread_stack_space_get(const struct k_thread *thread,
499 size_t *unused_ptr);
500 #endif
501
502 #if (K_HEAP_MEM_POOL_SIZE > 0)
503 /**
504 * @brief Assign the system heap as a thread's resource pool
505 *
506 * Similar to k_thread_heap_assign(), but the thread will use
507 * the kernel heap to draw memory.
508 *
509 * Use with caution, as a malicious thread could perform DoS attacks on the
510 * kernel heap.
511 *
512 * @param thread Target thread to assign the system heap for resource requests
513 *
514 */
515 void k_thread_system_pool_assign(struct k_thread *thread);
516 #endif /* (K_HEAP_MEM_POOL_SIZE > 0) */
517
518 /**
519 * @brief Sleep until a thread exits
520 *
521 * The caller will be put to sleep until the target thread exits, either due
522 * to being aborted, self-exiting, or taking a fatal error. This API returns
523 * immediately if the thread isn't running.
524 *
525 * This API may only be called from ISRs with a K_NO_WAIT timeout,
526 * where it can be useful as a predicate to detect when a thread has
527 * aborted.
528 *
529 * @param thread Thread to wait to exit
530 * @param timeout upper bound time to wait for the thread to exit.
531 * @retval 0 success, target thread has exited or wasn't running
532 * @retval -EBUSY returned without waiting
533 * @retval -EAGAIN waiting period timed out
534 * @retval -EDEADLK target thread is joining on the caller, or target thread
535 * is the caller
536 */
537 __syscall int k_thread_join(struct k_thread *thread, k_timeout_t timeout);
538
539 /**
540 * @brief Put the current thread to sleep.
541 *
542 * This routine puts the current thread to sleep for @a duration,
543 * specified as a k_timeout_t object.
544 *
545 * @note if @a timeout is set to K_FOREVER then the thread is suspended.
546 *
547 * @param timeout Desired duration of sleep.
548 *
549 * @return Zero if the requested time has elapsed or if the thread was woken up
550 * by the \ref k_wakeup call, the time left to sleep rounded up to the nearest
551 * millisecond.
552 */
553 __syscall int32_t k_sleep(k_timeout_t timeout);
554
555 /**
556 * @brief Put the current thread to sleep.
557 *
558 * This routine puts the current thread to sleep for @a duration milliseconds.
559 *
560 * @param ms Number of milliseconds to sleep.
561 *
562 * @return Zero if the requested time has elapsed or if the thread was woken up
563 * by the \ref k_wakeup call, the time left to sleep rounded up to the nearest
564 * millisecond.
565 */
k_msleep(int32_t ms)566 static inline int32_t k_msleep(int32_t ms)
567 {
568 return k_sleep(Z_TIMEOUT_MS(ms));
569 }
570
571 /**
572 * @brief Put the current thread to sleep with microsecond resolution.
573 *
574 * This function is unlikely to work as expected without kernel tuning.
575 * In particular, because the lower bound on the duration of a sleep is
576 * the duration of a tick, @kconfig{CONFIG_SYS_CLOCK_TICKS_PER_SEC} must be
577 * adjusted to achieve the resolution desired. The implications of doing
578 * this must be understood before attempting to use k_usleep(). Use with
579 * caution.
580 *
581 * @param us Number of microseconds to sleep.
582 *
583 * @return Zero if the requested time has elapsed or if the thread was woken up
584 * by the \ref k_wakeup call, the time left to sleep rounded up to the nearest
585 * microsecond.
586 */
587 __syscall int32_t k_usleep(int32_t us);
588
589 /**
590 * @brief Cause the current thread to busy wait.
591 *
592 * This routine causes the current thread to execute a "do nothing" loop for
593 * @a usec_to_wait microseconds.
594 *
595 * @note The clock used for the microsecond-resolution delay here may
596 * be skewed relative to the clock used for system timeouts like
597 * k_sleep(). For example k_busy_wait(1000) may take slightly more or
598 * less time than k_sleep(K_MSEC(1)), with the offset dependent on
599 * clock tolerances.
600 *
601 * @note In case when @kconfig{CONFIG_SYSTEM_CLOCK_SLOPPY_IDLE} and
602 * @kconfig{CONFIG_PM} options are enabled, this function may not work.
603 * The timer/clock used for delay processing may be disabled/inactive.
604 */
605 __syscall void k_busy_wait(uint32_t usec_to_wait);
606
607 /**
608 * @brief Check whether it is possible to yield in the current context.
609 *
610 * This routine checks whether the kernel is in a state where it is possible to
611 * yield or call blocking API's. It should be used by code that needs to yield
612 * to perform correctly, but can feasibly be called from contexts where that
613 * is not possible. For example in the PRE_KERNEL initialization step, or when
614 * being run from the idle thread.
615 *
616 * @return True if it is possible to yield in the current context, false otherwise.
617 */
618 bool k_can_yield(void);
619
620 /**
621 * @brief Yield the current thread.
622 *
623 * This routine causes the current thread to yield execution to another
624 * thread of the same or higher priority. If there are no other ready threads
625 * of the same or higher priority, the routine returns immediately.
626 */
627 __syscall void k_yield(void);
628
629 /**
630 * @brief Wake up a sleeping thread.
631 *
632 * This routine prematurely wakes up @a thread from sleeping.
633 *
634 * If @a thread is not currently sleeping, the routine has no effect.
635 *
636 * @param thread ID of thread to wake.
637 */
638 __syscall void k_wakeup(k_tid_t thread);
639
640 /**
641 * @brief Query thread ID of the current thread.
642 *
643 * This unconditionally queries the kernel via a system call.
644 *
645 * @note Use k_current_get() unless absolutely sure this is necessary.
646 * This should only be used directly where the thread local
647 * variable cannot be used or may contain invalid values
648 * if thread local storage (TLS) is enabled. If TLS is not
649 * enabled, this is the same as k_current_get().
650 *
651 * @return ID of current thread.
652 */
653 __attribute_const__
654 __syscall k_tid_t k_sched_current_thread_query(void);
655
656 /**
657 * @brief Get thread ID of the current thread.
658 *
659 * @return ID of current thread.
660 *
661 */
662 __attribute_const__
k_current_get(void)663 static inline k_tid_t k_current_get(void)
664 {
665 #ifdef CONFIG_CURRENT_THREAD_USE_TLS
666
667 /* Thread-local cache of current thread ID, set in z_thread_entry() */
668 extern __thread k_tid_t z_tls_current;
669
670 return z_tls_current;
671 #else
672 return k_sched_current_thread_query();
673 #endif
674 }
675
676 /**
677 * @brief Abort a thread.
678 *
679 * This routine permanently stops execution of @a thread. The thread is taken
680 * off all kernel queues it is part of (i.e. the ready queue, the timeout
681 * queue, or a kernel object wait queue). However, any kernel resources the
682 * thread might currently own (such as mutexes or memory blocks) are not
683 * released. It is the responsibility of the caller of this routine to ensure
684 * all necessary cleanup is performed.
685 *
686 * After k_thread_abort() returns, the thread is guaranteed not to be
687 * running or to become runnable anywhere on the system. Normally
688 * this is done via blocking the caller (in the same manner as
689 * k_thread_join()), but in interrupt context on SMP systems the
690 * implementation is required to spin for threads that are running on
691 * other CPUs.
692 *
693 * @param thread ID of thread to abort.
694 */
695 __syscall void k_thread_abort(k_tid_t thread);
696
697
698 /**
699 * @brief Start an inactive thread
700 *
701 * If a thread was created with K_FOREVER in the delay parameter, it will
702 * not be added to the scheduling queue until this function is called
703 * on it.
704 *
705 * @param thread thread to start
706 */
707 __syscall void k_thread_start(k_tid_t thread);
708
709 k_ticks_t z_timeout_expires(const struct _timeout *timeout);
710 k_ticks_t z_timeout_remaining(const struct _timeout *timeout);
711
712 #ifdef CONFIG_SYS_CLOCK_EXISTS
713
714 /**
715 * @brief Get time when a thread wakes up, in system ticks
716 *
717 * This routine computes the system uptime when a waiting thread next
718 * executes, in units of system ticks. If the thread is not waiting,
719 * it returns current system time.
720 */
721 __syscall k_ticks_t k_thread_timeout_expires_ticks(const struct k_thread *thread);
722
z_impl_k_thread_timeout_expires_ticks(const struct k_thread * thread)723 static inline k_ticks_t z_impl_k_thread_timeout_expires_ticks(
724 const struct k_thread *thread)
725 {
726 return z_timeout_expires(&thread->base.timeout);
727 }
728
729 /**
730 * @brief Get time remaining before a thread wakes up, in system ticks
731 *
732 * This routine computes the time remaining before a waiting thread
733 * next executes, in units of system ticks. If the thread is not
734 * waiting, it returns zero.
735 */
736 __syscall k_ticks_t k_thread_timeout_remaining_ticks(const struct k_thread *thread);
737
z_impl_k_thread_timeout_remaining_ticks(const struct k_thread * thread)738 static inline k_ticks_t z_impl_k_thread_timeout_remaining_ticks(
739 const struct k_thread *thread)
740 {
741 return z_timeout_remaining(&thread->base.timeout);
742 }
743
744 #endif /* CONFIG_SYS_CLOCK_EXISTS */
745
746 /**
747 * @cond INTERNAL_HIDDEN
748 */
749
750 struct _static_thread_data {
751 struct k_thread *init_thread;
752 k_thread_stack_t *init_stack;
753 unsigned int init_stack_size;
754 k_thread_entry_t init_entry;
755 void *init_p1;
756 void *init_p2;
757 void *init_p3;
758 int init_prio;
759 uint32_t init_options;
760 const char *init_name;
761 #ifdef CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME
762 int32_t init_delay_ms;
763 #else
764 k_timeout_t init_delay;
765 #endif
766 };
767
768 #ifdef CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME
769 #define Z_THREAD_INIT_DELAY_INITIALIZER(ms) .init_delay_ms = (ms)
770 #define Z_THREAD_INIT_DELAY(thread) SYS_TIMEOUT_MS((thread)->init_delay_ms)
771 #else
772 #define Z_THREAD_INIT_DELAY_INITIALIZER(ms) .init_delay = SYS_TIMEOUT_MS(ms)
773 #define Z_THREAD_INIT_DELAY(thread) (thread)->init_delay
774 #endif
775
776 #define Z_THREAD_INITIALIZER(thread, stack, stack_size, \
777 entry, p1, p2, p3, \
778 prio, options, delay, tname) \
779 { \
780 .init_thread = (thread), \
781 .init_stack = (stack), \
782 .init_stack_size = (stack_size), \
783 .init_entry = (k_thread_entry_t)entry, \
784 .init_p1 = (void *)p1, \
785 .init_p2 = (void *)p2, \
786 .init_p3 = (void *)p3, \
787 .init_prio = (prio), \
788 .init_options = (options), \
789 .init_name = STRINGIFY(tname), \
790 Z_THREAD_INIT_DELAY_INITIALIZER(delay) \
791 }
792
793 /*
794 * Refer to K_THREAD_DEFINE() and K_KERNEL_THREAD_DEFINE() for
795 * information on arguments.
796 */
797 #define Z_THREAD_COMMON_DEFINE(name, stack_size, \
798 entry, p1, p2, p3, \
799 prio, options, delay) \
800 struct k_thread _k_thread_obj_##name; \
801 STRUCT_SECTION_ITERABLE(_static_thread_data, \
802 _k_thread_data_##name) = \
803 Z_THREAD_INITIALIZER(&_k_thread_obj_##name, \
804 _k_thread_stack_##name, stack_size,\
805 entry, p1, p2, p3, prio, options, \
806 delay, name); \
807 const k_tid_t name = (k_tid_t)&_k_thread_obj_##name
808
809 /**
810 * INTERNAL_HIDDEN @endcond
811 */
812
813 /**
814 * @brief Statically define and initialize a thread.
815 *
816 * The thread may be scheduled for immediate execution or a delayed start.
817 *
818 * Thread options are architecture-specific, and can include K_ESSENTIAL,
819 * K_FP_REGS, and K_SSE_REGS. Multiple options may be specified by separating
820 * them using "|" (the logical OR operator).
821 *
822 * The ID of the thread can be accessed using:
823 *
824 * @code extern const k_tid_t <name>; @endcode
825 *
826 * @param name Name of the thread.
827 * @param stack_size Stack size in bytes.
828 * @param entry Thread entry function.
829 * @param p1 1st entry point parameter.
830 * @param p2 2nd entry point parameter.
831 * @param p3 3rd entry point parameter.
832 * @param prio Thread priority.
833 * @param options Thread options.
834 * @param delay Scheduling delay (in milliseconds), zero for no delay.
835 *
836 * @note Static threads with zero delay should not normally have
837 * MetaIRQ priority levels. This can preempt the system
838 * initialization handling (depending on the priority of the main
839 * thread) and cause surprising ordering side effects. It will not
840 * affect anything in the OS per se, but consider it bad practice.
841 * Use a SYS_INIT() callback if you need to run code before entrance
842 * to the application main().
843 */
844 #define K_THREAD_DEFINE(name, stack_size, \
845 entry, p1, p2, p3, \
846 prio, options, delay) \
847 K_THREAD_STACK_DEFINE(_k_thread_stack_##name, stack_size); \
848 Z_THREAD_COMMON_DEFINE(name, stack_size, entry, p1, p2, p3, \
849 prio, options, delay)
850
851 /**
852 * @brief Statically define and initialize a thread intended to run only in kernel mode.
853 *
854 * The thread may be scheduled for immediate execution or a delayed start.
855 *
856 * Thread options are architecture-specific, and can include K_ESSENTIAL,
857 * K_FP_REGS, and K_SSE_REGS. Multiple options may be specified by separating
858 * them using "|" (the logical OR operator).
859 *
860 * The ID of the thread can be accessed using:
861 *
862 * @code extern const k_tid_t <name>; @endcode
863 *
864 * @note Threads defined by this can only run in kernel mode, and cannot be
865 * transformed into user thread via k_thread_user_mode_enter().
866 *
867 * @warning Depending on the architecture, the stack size (@p stack_size)
868 * may need to be multiples of CONFIG_MMU_PAGE_SIZE (if MMU)
869 * or in power-of-two size (if MPU).
870 *
871 * @param name Name of the thread.
872 * @param stack_size Stack size in bytes.
873 * @param entry Thread entry function.
874 * @param p1 1st entry point parameter.
875 * @param p2 2nd entry point parameter.
876 * @param p3 3rd entry point parameter.
877 * @param prio Thread priority.
878 * @param options Thread options.
879 * @param delay Scheduling delay (in milliseconds), zero for no delay.
880 */
881 #define K_KERNEL_THREAD_DEFINE(name, stack_size, \
882 entry, p1, p2, p3, \
883 prio, options, delay) \
884 K_KERNEL_STACK_DEFINE(_k_thread_stack_##name, stack_size); \
885 Z_THREAD_COMMON_DEFINE(name, stack_size, entry, p1, p2, p3, \
886 prio, options, delay)
887
888 /**
889 * @brief Get a thread's priority.
890 *
891 * This routine gets the priority of @a thread.
892 *
893 * @param thread ID of thread whose priority is needed.
894 *
895 * @return Priority of @a thread.
896 */
897 __syscall int k_thread_priority_get(k_tid_t thread);
898
899 /**
900 * @brief Set a thread's priority.
901 *
902 * This routine immediately changes the priority of @a thread.
903 *
904 * Rescheduling can occur immediately depending on the priority @a thread is
905 * set to:
906 *
907 * - If its priority is raised above the priority of a currently scheduled
908 * preemptible thread, @a thread will be scheduled in.
909 *
910 * - If the caller lowers the priority of a currently scheduled preemptible
911 * thread below that of other threads in the system, the thread of the highest
912 * priority will be scheduled in.
913 *
914 * Priority can be assigned in the range of -CONFIG_NUM_COOP_PRIORITIES to
915 * CONFIG_NUM_PREEMPT_PRIORITIES-1, where -CONFIG_NUM_COOP_PRIORITIES is the
916 * highest priority.
917 *
918 * @param thread ID of thread whose priority is to be set.
919 * @param prio New priority.
920 *
921 * @warning Changing the priority of a thread currently involved in mutex
922 * priority inheritance may result in undefined behavior.
923 */
924 __syscall void k_thread_priority_set(k_tid_t thread, int prio);
925
926
927 #ifdef CONFIG_SCHED_DEADLINE
928 /**
929 * @brief Set deadline expiration time for scheduler
930 *
931 * This sets the "deadline" expiration as a time delta from the
932 * current time, in the same units used by k_cycle_get_32(). The
933 * scheduler (when deadline scheduling is enabled) will choose the
934 * next expiring thread when selecting between threads at the same
935 * static priority. Threads at different priorities will be scheduled
936 * according to their static priority.
937 *
938 * @note Deadlines are stored internally using 32 bit unsigned
939 * integers. The number of cycles between the "first" deadline in the
940 * scheduler queue and the "last" deadline must be less than 2^31 (i.e
941 * a signed non-negative quantity). Failure to adhere to this rule
942 * may result in scheduled threads running in an incorrect deadline
943 * order.
944 *
945 * @note Despite the API naming, the scheduler makes no guarantees
946 * the thread WILL be scheduled within that deadline, nor does it take
947 * extra metadata (like e.g. the "runtime" and "period" parameters in
948 * Linux sched_setattr()) that allows the kernel to validate the
949 * scheduling for achievability. Such features could be implemented
950 * above this call, which is simply input to the priority selection
951 * logic.
952 *
953 * @note You should enable @kconfig{CONFIG_SCHED_DEADLINE} in your project
954 * configuration.
955 *
956 * @param thread A thread on which to set the deadline
957 * @param deadline A time delta, in cycle units
958 *
959 */
960 __syscall void k_thread_deadline_set(k_tid_t thread, int deadline);
961 #endif
962
963 #ifdef CONFIG_SCHED_CPU_MASK
964 /**
965 * @brief Sets all CPU enable masks to zero
966 *
967 * After this returns, the thread will no longer be schedulable on any
968 * CPUs. The thread must not be currently runnable.
969 *
970 * @note You should enable @kconfig{CONFIG_SCHED_CPU_MASK} in your project
971 * configuration.
972 *
973 * @param thread Thread to operate upon
974 * @return Zero on success, otherwise error code
975 */
976 int k_thread_cpu_mask_clear(k_tid_t thread);
977
978 /**
979 * @brief Sets all CPU enable masks to one
980 *
981 * After this returns, the thread will be schedulable on any CPU. The
982 * thread must not be currently runnable.
983 *
984 * @note You should enable @kconfig{CONFIG_SCHED_CPU_MASK} in your project
985 * configuration.
986 *
987 * @param thread Thread to operate upon
988 * @return Zero on success, otherwise error code
989 */
990 int k_thread_cpu_mask_enable_all(k_tid_t thread);
991
992 /**
993 * @brief Enable thread to run on specified CPU
994 *
995 * The thread must not be currently runnable.
996 *
997 * @note You should enable @kconfig{CONFIG_SCHED_CPU_MASK} in your project
998 * configuration.
999 *
1000 * @param thread Thread to operate upon
1001 * @param cpu CPU index
1002 * @return Zero on success, otherwise error code
1003 */
1004 int k_thread_cpu_mask_enable(k_tid_t thread, int cpu);
1005
1006 /**
1007 * @brief Prevent thread to run on specified CPU
1008 *
1009 * The thread must not be currently runnable.
1010 *
1011 * @note You should enable @kconfig{CONFIG_SCHED_CPU_MASK} in your project
1012 * configuration.
1013 *
1014 * @param thread Thread to operate upon
1015 * @param cpu CPU index
1016 * @return Zero on success, otherwise error code
1017 */
1018 int k_thread_cpu_mask_disable(k_tid_t thread, int cpu);
1019
1020 /**
1021 * @brief Pin a thread to a CPU
1022 *
1023 * Pin a thread to a CPU by first clearing the cpu mask and then enabling the
1024 * thread on the selected CPU.
1025 *
1026 * @param thread Thread to operate upon
1027 * @param cpu CPU index
1028 * @return Zero on success, otherwise error code
1029 */
1030 int k_thread_cpu_pin(k_tid_t thread, int cpu);
1031 #endif
1032
1033 /**
1034 * @brief Suspend a thread.
1035 *
1036 * This routine prevents the kernel scheduler from making @a thread
1037 * the current thread. All other internal operations on @a thread are
1038 * still performed; for example, kernel objects it is waiting on are
1039 * still handed to it. Note that any existing timeouts
1040 * (e.g. k_sleep(), or a timeout argument to k_sem_take() et. al.)
1041 * will be canceled. On resume, the thread will begin running
1042 * immediately and return from the blocked call.
1043 *
1044 * When the target thread is active on another CPU, the caller will block until
1045 * the target thread is halted (suspended or aborted). But if the caller is in
1046 * an interrupt context, it will spin waiting for that target thread active on
1047 * another CPU to halt.
1048 *
1049 * If @a thread is already suspended, the routine has no effect.
1050 *
1051 * @param thread ID of thread to suspend.
1052 */
1053 __syscall void k_thread_suspend(k_tid_t thread);
1054
1055 /**
1056 * @brief Resume a suspended thread.
1057 *
1058 * This routine allows the kernel scheduler to make @a thread the current
1059 * thread, when it is next eligible for that role.
1060 *
1061 * If @a thread is not currently suspended, the routine has no effect.
1062 *
1063 * @param thread ID of thread to resume.
1064 */
1065 __syscall void k_thread_resume(k_tid_t thread);
1066
1067 /**
1068 * @brief Set time-slicing period and scope.
1069 *
1070 * This routine specifies how the scheduler will perform time slicing of
1071 * preemptible threads.
1072 *
1073 * To enable time slicing, @a slice must be non-zero. The scheduler
1074 * ensures that no thread runs for more than the specified time limit
1075 * before other threads of that priority are given a chance to execute.
1076 * Any thread whose priority is higher than @a prio is exempted, and may
1077 * execute as long as desired without being preempted due to time slicing.
1078 *
1079 * Time slicing only limits the maximum amount of time a thread may continuously
1080 * execute. Once the scheduler selects a thread for execution, there is no
1081 * minimum guaranteed time the thread will execute before threads of greater or
1082 * equal priority are scheduled.
1083 *
1084 * When the current thread is the only one of that priority eligible
1085 * for execution, this routine has no effect; the thread is immediately
1086 * rescheduled after the slice period expires.
1087 *
1088 * To disable timeslicing, set both @a slice and @a prio to zero.
1089 *
1090 * @param slice Maximum time slice length (in milliseconds).
1091 * @param prio Highest thread priority level eligible for time slicing.
1092 */
1093 void k_sched_time_slice_set(int32_t slice, int prio);
1094
1095 /**
1096 * @brief Set thread time slice
1097 *
1098 * As for k_sched_time_slice_set, but (when
1099 * CONFIG_TIMESLICE_PER_THREAD=y) sets the timeslice for a specific
1100 * thread. When non-zero, this timeslice will take precedence over
1101 * the global value.
1102 *
1103 * When such a thread's timeslice expires, the configured callback
1104 * will be called before the thread is removed/re-added to the run
1105 * queue. This callback will occur in interrupt context, and the
1106 * specified thread is guaranteed to have been preempted by the
1107 * currently-executing ISR. Such a callback is free to, for example,
1108 * modify the thread priority or slice time for future execution,
1109 * suspend the thread, etc...
1110 *
1111 * @note Unlike the older API, the time slice parameter here is
1112 * specified in ticks, not milliseconds. Ticks have always been the
1113 * internal unit, and not all platforms have integer conversions
1114 * between the two.
1115 *
1116 * @note Threads with a non-zero slice time set will be timesliced
1117 * always, even if they are higher priority than the maximum timeslice
1118 * priority set via k_sched_time_slice_set().
1119 *
1120 * @note The callback notification for slice expiration happens, as it
1121 * must, while the thread is still "current", and thus it happens
1122 * before any registered timeouts at this tick. This has the somewhat
1123 * confusing side effect that the tick time (c.f. k_uptime_get()) does
1124 * not yet reflect the expired ticks. Applications wishing to make
1125 * fine-grained timing decisions within this callback should use the
1126 * cycle API, or derived facilities like k_thread_runtime_stats_get().
1127 *
1128 * @param th A valid, initialized thread
1129 * @param slice_ticks Maximum timeslice, in ticks
1130 * @param expired Callback function called on slice expiration
1131 * @param data Parameter for the expiration handler
1132 */
1133 void k_thread_time_slice_set(struct k_thread *th, int32_t slice_ticks,
1134 k_thread_timeslice_fn_t expired, void *data);
1135
1136 /** @} */
1137
1138 /**
1139 * @addtogroup isr_apis
1140 * @{
1141 */
1142
1143 /**
1144 * @brief Determine if code is running at interrupt level.
1145 *
1146 * This routine allows the caller to customize its actions, depending on
1147 * whether it is a thread or an ISR.
1148 *
1149 * @funcprops \isr_ok
1150 *
1151 * @return false if invoked by a thread.
1152 * @return true if invoked by an ISR.
1153 */
1154 bool k_is_in_isr(void);
1155
1156 /**
1157 * @brief Determine if code is running in a preemptible thread.
1158 *
1159 * This routine allows the caller to customize its actions, depending on
1160 * whether it can be preempted by another thread. The routine returns a 'true'
1161 * value if all of the following conditions are met:
1162 *
1163 * - The code is running in a thread, not at ISR.
1164 * - The thread's priority is in the preemptible range.
1165 * - The thread has not locked the scheduler.
1166 *
1167 * @funcprops \isr_ok
1168 *
1169 * @return 0 if invoked by an ISR or by a cooperative thread.
1170 * @return Non-zero if invoked by a preemptible thread.
1171 */
1172 __syscall int k_is_preempt_thread(void);
1173
1174 /**
1175 * @brief Test whether startup is in the before-main-task phase.
1176 *
1177 * This routine allows the caller to customize its actions, depending on
1178 * whether it being invoked before the kernel is fully active.
1179 *
1180 * @funcprops \isr_ok
1181 *
1182 * @return true if invoked before post-kernel initialization
1183 * @return false if invoked during/after post-kernel initialization
1184 */
k_is_pre_kernel(void)1185 static inline bool k_is_pre_kernel(void)
1186 {
1187 extern bool z_sys_post_kernel; /* in init.c */
1188
1189 return !z_sys_post_kernel;
1190 }
1191
1192 /**
1193 * @}
1194 */
1195
1196 /**
1197 * @addtogroup thread_apis
1198 * @{
1199 */
1200
1201 /**
1202 * @brief Lock the scheduler.
1203 *
1204 * This routine prevents the current thread from being preempted by another
1205 * thread by instructing the scheduler to treat it as a cooperative thread.
1206 * If the thread subsequently performs an operation that makes it unready,
1207 * it will be context switched out in the normal manner. When the thread
1208 * again becomes the current thread, its non-preemptible status is maintained.
1209 *
1210 * This routine can be called recursively.
1211 *
1212 * Owing to clever implementation details, scheduler locks are
1213 * extremely fast for non-userspace threads (just one byte
1214 * inc/decrement in the thread struct).
1215 *
1216 * @note This works by elevating the thread priority temporarily to a
1217 * cooperative priority, allowing cheap synchronization vs. other
1218 * preemptible or cooperative threads running on the current CPU. It
1219 * does not prevent preemption or asynchrony of other types. It does
1220 * not prevent threads from running on other CPUs when CONFIG_SMP=y.
1221 * It does not prevent interrupts from happening, nor does it prevent
1222 * threads with MetaIRQ priorities from preempting the current thread.
1223 * In general this is a historical API not well-suited to modern
1224 * applications, use with care.
1225 */
1226 void k_sched_lock(void);
1227
1228 /**
1229 * @brief Unlock the scheduler.
1230 *
1231 * This routine reverses the effect of a previous call to k_sched_lock().
1232 * A thread must call the routine once for each time it called k_sched_lock()
1233 * before the thread becomes preemptible.
1234 */
1235 void k_sched_unlock(void);
1236
1237 /**
1238 * @brief Set current thread's custom data.
1239 *
1240 * This routine sets the custom data for the current thread to @ value.
1241 *
1242 * Custom data is not used by the kernel itself, and is freely available
1243 * for a thread to use as it sees fit. It can be used as a framework
1244 * upon which to build thread-local storage.
1245 *
1246 * @param value New custom data value.
1247 *
1248 */
1249 __syscall void k_thread_custom_data_set(void *value);
1250
1251 /**
1252 * @brief Get current thread's custom data.
1253 *
1254 * This routine returns the custom data for the current thread.
1255 *
1256 * @return Current custom data value.
1257 */
1258 __syscall void *k_thread_custom_data_get(void);
1259
1260 /**
1261 * @brief Set current thread name
1262 *
1263 * Set the name of the thread to be used when @kconfig{CONFIG_THREAD_MONITOR}
1264 * is enabled for tracing and debugging.
1265 *
1266 * @param thread Thread to set name, or NULL to set the current thread
1267 * @param str Name string
1268 * @retval 0 on success
1269 * @retval -EFAULT Memory access error with supplied string
1270 * @retval -ENOSYS Thread name configuration option not enabled
1271 * @retval -EINVAL Thread name too long
1272 */
1273 __syscall int k_thread_name_set(k_tid_t thread, const char *str);
1274
1275 /**
1276 * @brief Get thread name
1277 *
1278 * Get the name of a thread
1279 *
1280 * @param thread Thread ID
1281 * @retval Thread name, or NULL if configuration not enabled
1282 */
1283 const char *k_thread_name_get(k_tid_t thread);
1284
1285 /**
1286 * @brief Copy the thread name into a supplied buffer
1287 *
1288 * @param thread Thread to obtain name information
1289 * @param buf Destination buffer
1290 * @param size Destination buffer size
1291 * @retval -ENOSPC Destination buffer too small
1292 * @retval -EFAULT Memory access error
1293 * @retval -ENOSYS Thread name feature not enabled
1294 * @retval 0 Success
1295 */
1296 __syscall int k_thread_name_copy(k_tid_t thread, char *buf,
1297 size_t size);
1298
1299 /**
1300 * @brief Get thread state string
1301 *
1302 * This routine generates a human friendly string containing the thread's
1303 * state, and copies as much of it as possible into @a buf.
1304 *
1305 * @param thread_id Thread ID
1306 * @param buf Buffer into which to copy state strings
1307 * @param buf_size Size of the buffer
1308 *
1309 * @retval Pointer to @a buf if data was copied, else a pointer to "".
1310 */
1311 const char *k_thread_state_str(k_tid_t thread_id, char *buf, size_t buf_size);
1312
1313 /**
1314 * @}
1315 */
1316
1317 /**
1318 * @addtogroup clock_apis
1319 * @{
1320 */
1321
1322 /**
1323 * @brief Generate null timeout delay.
1324 *
1325 * This macro generates a timeout delay that instructs a kernel API
1326 * not to wait if the requested operation cannot be performed immediately.
1327 *
1328 * @return Timeout delay value.
1329 */
1330 #define K_NO_WAIT Z_TIMEOUT_NO_WAIT
1331
1332 /**
1333 * @brief Generate timeout delay from nanoseconds.
1334 *
1335 * This macro generates a timeout delay that instructs a kernel API to
1336 * wait up to @a t nanoseconds to perform the requested operation.
1337 * Note that timer precision is limited to the tick rate, not the
1338 * requested value.
1339 *
1340 * @param t Duration in nanoseconds.
1341 *
1342 * @return Timeout delay value.
1343 */
1344 #define K_NSEC(t) Z_TIMEOUT_NS(t)
1345
1346 /**
1347 * @brief Generate timeout delay from microseconds.
1348 *
1349 * This macro generates a timeout delay that instructs a kernel API
1350 * to wait up to @a t microseconds to perform the requested operation.
1351 * Note that timer precision is limited to the tick rate, not the
1352 * requested value.
1353 *
1354 * @param t Duration in microseconds.
1355 *
1356 * @return Timeout delay value.
1357 */
1358 #define K_USEC(t) Z_TIMEOUT_US(t)
1359
1360 /**
1361 * @brief Generate timeout delay from cycles.
1362 *
1363 * This macro generates a timeout delay that instructs a kernel API
1364 * to wait up to @a t cycles to perform the requested operation.
1365 *
1366 * @param t Duration in cycles.
1367 *
1368 * @return Timeout delay value.
1369 */
1370 #define K_CYC(t) Z_TIMEOUT_CYC(t)
1371
1372 /**
1373 * @brief Generate timeout delay from system ticks.
1374 *
1375 * This macro generates a timeout delay that instructs a kernel API
1376 * to wait up to @a t ticks to perform the requested operation.
1377 *
1378 * @param t Duration in system ticks.
1379 *
1380 * @return Timeout delay value.
1381 */
1382 #define K_TICKS(t) Z_TIMEOUT_TICKS(t)
1383
1384 /**
1385 * @brief Generate timeout delay from milliseconds.
1386 *
1387 * This macro generates a timeout delay that instructs a kernel API
1388 * to wait up to @a ms milliseconds to perform the requested operation.
1389 *
1390 * @param ms Duration in milliseconds.
1391 *
1392 * @return Timeout delay value.
1393 */
1394 #define K_MSEC(ms) Z_TIMEOUT_MS(ms)
1395
1396 /**
1397 * @brief Generate timeout delay from seconds.
1398 *
1399 * This macro generates a timeout delay that instructs a kernel API
1400 * to wait up to @a s seconds to perform the requested operation.
1401 *
1402 * @param s Duration in seconds.
1403 *
1404 * @return Timeout delay value.
1405 */
1406 #define K_SECONDS(s) K_MSEC((s) * MSEC_PER_SEC)
1407
1408 /**
1409 * @brief Generate timeout delay from minutes.
1410
1411 * This macro generates a timeout delay that instructs a kernel API
1412 * to wait up to @a m minutes to perform the requested operation.
1413 *
1414 * @param m Duration in minutes.
1415 *
1416 * @return Timeout delay value.
1417 */
1418 #define K_MINUTES(m) K_SECONDS((m) * 60)
1419
1420 /**
1421 * @brief Generate timeout delay from hours.
1422 *
1423 * This macro generates a timeout delay that instructs a kernel API
1424 * to wait up to @a h hours to perform the requested operation.
1425 *
1426 * @param h Duration in hours.
1427 *
1428 * @return Timeout delay value.
1429 */
1430 #define K_HOURS(h) K_MINUTES((h) * 60)
1431
1432 /**
1433 * @brief Generate infinite timeout delay.
1434 *
1435 * This macro generates a timeout delay that instructs a kernel API
1436 * to wait as long as necessary to perform the requested operation.
1437 *
1438 * @return Timeout delay value.
1439 */
1440 #define K_FOREVER Z_FOREVER
1441
1442 #ifdef CONFIG_TIMEOUT_64BIT
1443
1444 /**
1445 * @brief Generates an absolute/uptime timeout value from system ticks
1446 *
1447 * This macro generates a timeout delay that represents an expiration
1448 * at the absolute uptime value specified, in system ticks. That is, the
1449 * timeout will expire immediately after the system uptime reaches the
1450 * specified tick count.
1451 *
1452 * @param t Tick uptime value
1453 * @return Timeout delay value
1454 */
1455 #define K_TIMEOUT_ABS_TICKS(t) \
1456 Z_TIMEOUT_TICKS(Z_TICK_ABS((k_ticks_t)MAX(t, 0)))
1457
1458 /**
1459 * @brief Generates an absolute/uptime timeout value from milliseconds
1460 *
1461 * This macro generates a timeout delay that represents an expiration
1462 * at the absolute uptime value specified, in milliseconds. That is,
1463 * the timeout will expire immediately after the system uptime reaches
1464 * the specified tick count.
1465 *
1466 * @param t Millisecond uptime value
1467 * @return Timeout delay value
1468 */
1469 #define K_TIMEOUT_ABS_MS(t) K_TIMEOUT_ABS_TICKS(k_ms_to_ticks_ceil64(t))
1470
1471 /**
1472 * @brief Generates an absolute/uptime timeout value from microseconds
1473 *
1474 * This macro generates a timeout delay that represents an expiration
1475 * at the absolute uptime value specified, in microseconds. That is,
1476 * the timeout will expire immediately after the system uptime reaches
1477 * the specified time. Note that timer precision is limited by the
1478 * system tick rate and not the requested timeout value.
1479 *
1480 * @param t Microsecond uptime value
1481 * @return Timeout delay value
1482 */
1483 #define K_TIMEOUT_ABS_US(t) K_TIMEOUT_ABS_TICKS(k_us_to_ticks_ceil64(t))
1484
1485 /**
1486 * @brief Generates an absolute/uptime timeout value from nanoseconds
1487 *
1488 * This macro generates a timeout delay that represents an expiration
1489 * at the absolute uptime value specified, in nanoseconds. That is,
1490 * the timeout will expire immediately after the system uptime reaches
1491 * the specified time. Note that timer precision is limited by the
1492 * system tick rate and not the requested timeout value.
1493 *
1494 * @param t Nanosecond uptime value
1495 * @return Timeout delay value
1496 */
1497 #define K_TIMEOUT_ABS_NS(t) K_TIMEOUT_ABS_TICKS(k_ns_to_ticks_ceil64(t))
1498
1499 /**
1500 * @brief Generates an absolute/uptime timeout value from system cycles
1501 *
1502 * This macro generates a timeout delay that represents an expiration
1503 * at the absolute uptime value specified, in cycles. That is, the
1504 * timeout will expire immediately after the system uptime reaches the
1505 * specified time. Note that timer precision is limited by the system
1506 * tick rate and not the requested timeout value.
1507 *
1508 * @param t Cycle uptime value
1509 * @return Timeout delay value
1510 */
1511 #define K_TIMEOUT_ABS_CYC(t) K_TIMEOUT_ABS_TICKS(k_cyc_to_ticks_ceil64(t))
1512
1513 #endif
1514
1515 /**
1516 * @}
1517 */
1518
1519 /**
1520 * @cond INTERNAL_HIDDEN
1521 */
1522
1523 struct k_timer {
1524 /*
1525 * _timeout structure must be first here if we want to use
1526 * dynamic timer allocation. timeout.node is used in the double-linked
1527 * list of free timers
1528 */
1529 struct _timeout timeout;
1530
1531 /* wait queue for the (single) thread waiting on this timer */
1532 _wait_q_t wait_q;
1533
1534 /* runs in ISR context */
1535 void (*expiry_fn)(struct k_timer *timer);
1536
1537 /* runs in the context of the thread that calls k_timer_stop() */
1538 void (*stop_fn)(struct k_timer *timer);
1539
1540 /* timer period */
1541 k_timeout_t period;
1542
1543 /* timer status */
1544 uint32_t status;
1545
1546 /* user-specific data, also used to support legacy features */
1547 void *user_data;
1548
1549 SYS_PORT_TRACING_TRACKING_FIELD(k_timer)
1550
1551 #ifdef CONFIG_OBJ_CORE_TIMER
1552 struct k_obj_core obj_core;
1553 #endif
1554 };
1555
1556 #define Z_TIMER_INITIALIZER(obj, expiry, stop) \
1557 { \
1558 .timeout = { \
1559 .node = {},\
1560 .fn = z_timer_expiration_handler, \
1561 .dticks = 0, \
1562 }, \
1563 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
1564 .expiry_fn = expiry, \
1565 .stop_fn = stop, \
1566 .status = 0, \
1567 .user_data = 0, \
1568 }
1569
1570 /**
1571 * INTERNAL_HIDDEN @endcond
1572 */
1573
1574 /**
1575 * @defgroup timer_apis Timer APIs
1576 * @ingroup kernel_apis
1577 * @{
1578 */
1579
1580 /**
1581 * @typedef k_timer_expiry_t
1582 * @brief Timer expiry function type.
1583 *
1584 * A timer's expiry function is executed by the system clock interrupt handler
1585 * each time the timer expires. The expiry function is optional, and is only
1586 * invoked if the timer has been initialized with one.
1587 *
1588 * @param timer Address of timer.
1589 */
1590 typedef void (*k_timer_expiry_t)(struct k_timer *timer);
1591
1592 /**
1593 * @typedef k_timer_stop_t
1594 * @brief Timer stop function type.
1595 *
1596 * A timer's stop function is executed if the timer is stopped prematurely.
1597 * The function runs in the context of call that stops the timer. As
1598 * k_timer_stop() can be invoked from an ISR, the stop function must be
1599 * callable from interrupt context (isr-ok).
1600 *
1601 * The stop function is optional, and is only invoked if the timer has been
1602 * initialized with one.
1603 *
1604 * @param timer Address of timer.
1605 */
1606 typedef void (*k_timer_stop_t)(struct k_timer *timer);
1607
1608 /**
1609 * @brief Statically define and initialize a timer.
1610 *
1611 * The timer can be accessed outside the module where it is defined using:
1612 *
1613 * @code extern struct k_timer <name>; @endcode
1614 *
1615 * @param name Name of the timer variable.
1616 * @param expiry_fn Function to invoke each time the timer expires.
1617 * @param stop_fn Function to invoke if the timer is stopped while running.
1618 */
1619 #define K_TIMER_DEFINE(name, expiry_fn, stop_fn) \
1620 STRUCT_SECTION_ITERABLE(k_timer, name) = \
1621 Z_TIMER_INITIALIZER(name, expiry_fn, stop_fn)
1622
1623 /**
1624 * @brief Initialize a timer.
1625 *
1626 * This routine initializes a timer, prior to its first use.
1627 *
1628 * @param timer Address of timer.
1629 * @param expiry_fn Function to invoke each time the timer expires.
1630 * @param stop_fn Function to invoke if the timer is stopped while running.
1631 */
1632 void k_timer_init(struct k_timer *timer,
1633 k_timer_expiry_t expiry_fn,
1634 k_timer_stop_t stop_fn);
1635
1636 /**
1637 * @brief Start a timer.
1638 *
1639 * This routine starts a timer, and resets its status to zero. The timer
1640 * begins counting down using the specified duration and period values.
1641 *
1642 * Attempting to start a timer that is already running is permitted.
1643 * The timer's status is reset to zero and the timer begins counting down
1644 * using the new duration and period values.
1645 *
1646 * @param timer Address of timer.
1647 * @param duration Initial timer duration.
1648 * @param period Timer period.
1649 */
1650 __syscall void k_timer_start(struct k_timer *timer,
1651 k_timeout_t duration, k_timeout_t period);
1652
1653 /**
1654 * @brief Stop a timer.
1655 *
1656 * This routine stops a running timer prematurely. The timer's stop function,
1657 * if one exists, is invoked by the caller.
1658 *
1659 * Attempting to stop a timer that is not running is permitted, but has no
1660 * effect on the timer.
1661 *
1662 * @note The stop handler has to be callable from ISRs if @a k_timer_stop is to
1663 * be called from ISRs.
1664 *
1665 * @funcprops \isr_ok
1666 *
1667 * @param timer Address of timer.
1668 */
1669 __syscall void k_timer_stop(struct k_timer *timer);
1670
1671 /**
1672 * @brief Read timer status.
1673 *
1674 * This routine reads the timer's status, which indicates the number of times
1675 * it has expired since its status was last read.
1676 *
1677 * Calling this routine resets the timer's status to zero.
1678 *
1679 * @param timer Address of timer.
1680 *
1681 * @return Timer status.
1682 */
1683 __syscall uint32_t k_timer_status_get(struct k_timer *timer);
1684
1685 /**
1686 * @brief Synchronize thread to timer expiration.
1687 *
1688 * This routine blocks the calling thread until the timer's status is non-zero
1689 * (indicating that it has expired at least once since it was last examined)
1690 * or the timer is stopped. If the timer status is already non-zero,
1691 * or the timer is already stopped, the caller continues without waiting.
1692 *
1693 * Calling this routine resets the timer's status to zero.
1694 *
1695 * This routine must not be used by interrupt handlers, since they are not
1696 * allowed to block.
1697 *
1698 * @param timer Address of timer.
1699 *
1700 * @return Timer status.
1701 */
1702 __syscall uint32_t k_timer_status_sync(struct k_timer *timer);
1703
1704 #ifdef CONFIG_SYS_CLOCK_EXISTS
1705
1706 /**
1707 * @brief Get next expiration time of a timer, in system ticks
1708 *
1709 * This routine returns the future system uptime reached at the next
1710 * time of expiration of the timer, in units of system ticks. If the
1711 * timer is not running, current system time is returned.
1712 *
1713 * @param timer The timer object
1714 * @return Uptime of expiration, in ticks
1715 */
1716 __syscall k_ticks_t k_timer_expires_ticks(const struct k_timer *timer);
1717
z_impl_k_timer_expires_ticks(const struct k_timer * timer)1718 static inline k_ticks_t z_impl_k_timer_expires_ticks(
1719 const struct k_timer *timer)
1720 {
1721 return z_timeout_expires(&timer->timeout);
1722 }
1723
1724 /**
1725 * @brief Get time remaining before a timer next expires, in system ticks
1726 *
1727 * This routine computes the time remaining before a running timer
1728 * next expires, in units of system ticks. If the timer is not
1729 * running, it returns zero.
1730 *
1731 * @param timer The timer object
1732 * @return Remaining time until expiration, in ticks
1733 */
1734 __syscall k_ticks_t k_timer_remaining_ticks(const struct k_timer *timer);
1735
z_impl_k_timer_remaining_ticks(const struct k_timer * timer)1736 static inline k_ticks_t z_impl_k_timer_remaining_ticks(
1737 const struct k_timer *timer)
1738 {
1739 return z_timeout_remaining(&timer->timeout);
1740 }
1741
1742 /**
1743 * @brief Get time remaining before a timer next expires.
1744 *
1745 * This routine computes the (approximate) time remaining before a running
1746 * timer next expires. If the timer is not running, it returns zero.
1747 *
1748 * @param timer Address of timer.
1749 *
1750 * @return Remaining time (in milliseconds).
1751 */
k_timer_remaining_get(struct k_timer * timer)1752 static inline uint32_t k_timer_remaining_get(struct k_timer *timer)
1753 {
1754 return k_ticks_to_ms_floor32(k_timer_remaining_ticks(timer));
1755 }
1756
1757 #endif /* CONFIG_SYS_CLOCK_EXISTS */
1758
1759 /**
1760 * @brief Associate user-specific data with a timer.
1761 *
1762 * This routine records the @a user_data with the @a timer, to be retrieved
1763 * later.
1764 *
1765 * It can be used e.g. in a timer handler shared across multiple subsystems to
1766 * retrieve data specific to the subsystem this timer is associated with.
1767 *
1768 * @param timer Address of timer.
1769 * @param user_data User data to associate with the timer.
1770 */
1771 __syscall void k_timer_user_data_set(struct k_timer *timer, void *user_data);
1772
1773 /**
1774 * @internal
1775 */
z_impl_k_timer_user_data_set(struct k_timer * timer,void * user_data)1776 static inline void z_impl_k_timer_user_data_set(struct k_timer *timer,
1777 void *user_data)
1778 {
1779 timer->user_data = user_data;
1780 }
1781
1782 /**
1783 * @brief Retrieve the user-specific data from a timer.
1784 *
1785 * @param timer Address of timer.
1786 *
1787 * @return The user data.
1788 */
1789 __syscall void *k_timer_user_data_get(const struct k_timer *timer);
1790
z_impl_k_timer_user_data_get(const struct k_timer * timer)1791 static inline void *z_impl_k_timer_user_data_get(const struct k_timer *timer)
1792 {
1793 return timer->user_data;
1794 }
1795
1796 /** @} */
1797
1798 /**
1799 * @addtogroup clock_apis
1800 * @ingroup kernel_apis
1801 * @{
1802 */
1803
1804 /**
1805 * @brief Get system uptime, in system ticks.
1806 *
1807 * This routine returns the elapsed time since the system booted, in
1808 * ticks (c.f. @kconfig{CONFIG_SYS_CLOCK_TICKS_PER_SEC}), which is the
1809 * fundamental unit of resolution of kernel timekeeping.
1810 *
1811 * @return Current uptime in ticks.
1812 */
1813 __syscall int64_t k_uptime_ticks(void);
1814
1815 /**
1816 * @brief Get system uptime.
1817 *
1818 * This routine returns the elapsed time since the system booted,
1819 * in milliseconds.
1820 *
1821 * @note
1822 * While this function returns time in milliseconds, it does
1823 * not mean it has millisecond resolution. The actual resolution depends on
1824 * @kconfig{CONFIG_SYS_CLOCK_TICKS_PER_SEC} config option.
1825 *
1826 * @return Current uptime in milliseconds.
1827 */
k_uptime_get(void)1828 static inline int64_t k_uptime_get(void)
1829 {
1830 return k_ticks_to_ms_floor64(k_uptime_ticks());
1831 }
1832
1833 /**
1834 * @brief Get system uptime (32-bit version).
1835 *
1836 * This routine returns the lower 32 bits of the system uptime in
1837 * milliseconds.
1838 *
1839 * Because correct conversion requires full precision of the system
1840 * clock there is no benefit to using this over k_uptime_get() unless
1841 * you know the application will never run long enough for the system
1842 * clock to approach 2^32 ticks. Calls to this function may involve
1843 * interrupt blocking and 64-bit math.
1844 *
1845 * @note
1846 * While this function returns time in milliseconds, it does
1847 * not mean it has millisecond resolution. The actual resolution depends on
1848 * @kconfig{CONFIG_SYS_CLOCK_TICKS_PER_SEC} config option
1849 *
1850 * @return The low 32 bits of the current uptime, in milliseconds.
1851 */
k_uptime_get_32(void)1852 static inline uint32_t k_uptime_get_32(void)
1853 {
1854 return (uint32_t)k_uptime_get();
1855 }
1856
1857 /**
1858 * @brief Get system uptime in seconds.
1859 *
1860 * This routine returns the elapsed time since the system booted,
1861 * in seconds.
1862 *
1863 * @return Current uptime in seconds.
1864 */
k_uptime_seconds(void)1865 static inline uint32_t k_uptime_seconds(void)
1866 {
1867 return k_ticks_to_sec_floor32(k_uptime_ticks());
1868 }
1869
1870 /**
1871 * @brief Get elapsed time.
1872 *
1873 * This routine computes the elapsed time between the current system uptime
1874 * and an earlier reference time, in milliseconds.
1875 *
1876 * @param reftime Pointer to a reference time, which is updated to the current
1877 * uptime upon return.
1878 *
1879 * @return Elapsed time.
1880 */
k_uptime_delta(int64_t * reftime)1881 static inline int64_t k_uptime_delta(int64_t *reftime)
1882 {
1883 int64_t uptime, delta;
1884
1885 uptime = k_uptime_get();
1886 delta = uptime - *reftime;
1887 *reftime = uptime;
1888
1889 return delta;
1890 }
1891
1892 /**
1893 * @brief Read the hardware clock.
1894 *
1895 * This routine returns the current time, as measured by the system's hardware
1896 * clock.
1897 *
1898 * @return Current hardware clock up-counter (in cycles).
1899 */
k_cycle_get_32(void)1900 static inline uint32_t k_cycle_get_32(void)
1901 {
1902 return arch_k_cycle_get_32();
1903 }
1904
1905 /**
1906 * @brief Read the 64-bit hardware clock.
1907 *
1908 * This routine returns the current time in 64-bits, as measured by the
1909 * system's hardware clock, if available.
1910 *
1911 * @see CONFIG_TIMER_HAS_64BIT_CYCLE_COUNTER
1912 *
1913 * @return Current hardware clock up-counter (in cycles).
1914 */
k_cycle_get_64(void)1915 static inline uint64_t k_cycle_get_64(void)
1916 {
1917 if (!IS_ENABLED(CONFIG_TIMER_HAS_64BIT_CYCLE_COUNTER)) {
1918 __ASSERT(0, "64-bit cycle counter not enabled on this platform. "
1919 "See CONFIG_TIMER_HAS_64BIT_CYCLE_COUNTER");
1920 return 0;
1921 }
1922
1923 return arch_k_cycle_get_64();
1924 }
1925
1926 /**
1927 * @}
1928 */
1929
1930 struct k_queue {
1931 sys_sflist_t data_q;
1932 struct k_spinlock lock;
1933 _wait_q_t wait_q;
1934
1935 Z_DECL_POLL_EVENT
1936
1937 SYS_PORT_TRACING_TRACKING_FIELD(k_queue)
1938 };
1939
1940 /**
1941 * @cond INTERNAL_HIDDEN
1942 */
1943
1944 #define Z_QUEUE_INITIALIZER(obj) \
1945 { \
1946 .data_q = SYS_SFLIST_STATIC_INIT(&obj.data_q), \
1947 .lock = { }, \
1948 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
1949 Z_POLL_EVENT_OBJ_INIT(obj) \
1950 }
1951
1952 /**
1953 * INTERNAL_HIDDEN @endcond
1954 */
1955
1956 /**
1957 * @defgroup queue_apis Queue APIs
1958 * @ingroup kernel_apis
1959 * @{
1960 */
1961
1962 /**
1963 * @brief Initialize a queue.
1964 *
1965 * This routine initializes a queue object, prior to its first use.
1966 *
1967 * @param queue Address of the queue.
1968 */
1969 __syscall void k_queue_init(struct k_queue *queue);
1970
1971 /**
1972 * @brief Cancel waiting on a queue.
1973 *
1974 * This routine causes first thread pending on @a queue, if any, to
1975 * return from k_queue_get() call with NULL value (as if timeout expired).
1976 * If the queue is being waited on by k_poll(), it will return with
1977 * -EINTR and K_POLL_STATE_CANCELLED state (and per above, subsequent
1978 * k_queue_get() will return NULL).
1979 *
1980 * @funcprops \isr_ok
1981 *
1982 * @param queue Address of the queue.
1983 */
1984 __syscall void k_queue_cancel_wait(struct k_queue *queue);
1985
1986 /**
1987 * @brief Append an element to the end of a queue.
1988 *
1989 * This routine appends a data item to @a queue. A queue data item must be
1990 * aligned on a word boundary, and the first word of the item is reserved
1991 * for the kernel's use.
1992 *
1993 * @funcprops \isr_ok
1994 *
1995 * @param queue Address of the queue.
1996 * @param data Address of the data item.
1997 */
1998 void k_queue_append(struct k_queue *queue, void *data);
1999
2000 /**
2001 * @brief Append an element to a queue.
2002 *
2003 * This routine appends a data item to @a queue. There is an implicit memory
2004 * allocation to create an additional temporary bookkeeping data structure from
2005 * the calling thread's resource pool, which is automatically freed when the
2006 * item is removed. The data itself is not copied.
2007 *
2008 * @funcprops \isr_ok
2009 *
2010 * @param queue Address of the queue.
2011 * @param data Address of the data item.
2012 *
2013 * @retval 0 on success
2014 * @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
2015 */
2016 __syscall int32_t k_queue_alloc_append(struct k_queue *queue, void *data);
2017
2018 /**
2019 * @brief Prepend an element to a queue.
2020 *
2021 * This routine prepends a data item to @a queue. A queue data item must be
2022 * aligned on a word boundary, and the first word of the item is reserved
2023 * for the kernel's use.
2024 *
2025 * @funcprops \isr_ok
2026 *
2027 * @param queue Address of the queue.
2028 * @param data Address of the data item.
2029 */
2030 void k_queue_prepend(struct k_queue *queue, void *data);
2031
2032 /**
2033 * @brief Prepend an element to a queue.
2034 *
2035 * This routine prepends a data item to @a queue. There is an implicit memory
2036 * allocation to create an additional temporary bookkeeping data structure from
2037 * the calling thread's resource pool, which is automatically freed when the
2038 * item is removed. The data itself is not copied.
2039 *
2040 * @funcprops \isr_ok
2041 *
2042 * @param queue Address of the queue.
2043 * @param data Address of the data item.
2044 *
2045 * @retval 0 on success
2046 * @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
2047 */
2048 __syscall int32_t k_queue_alloc_prepend(struct k_queue *queue, void *data);
2049
2050 /**
2051 * @brief Inserts an element to a queue.
2052 *
2053 * This routine inserts a data item to @a queue after previous item. A queue
2054 * data item must be aligned on a word boundary, and the first word of
2055 * the item is reserved for the kernel's use.
2056 *
2057 * @funcprops \isr_ok
2058 *
2059 * @param queue Address of the queue.
2060 * @param prev Address of the previous data item.
2061 * @param data Address of the data item.
2062 */
2063 void k_queue_insert(struct k_queue *queue, void *prev, void *data);
2064
2065 /**
2066 * @brief Atomically append a list of elements to a queue.
2067 *
2068 * This routine adds a list of data items to @a queue in one operation.
2069 * The data items must be in a singly-linked list, with the first word
2070 * in each data item pointing to the next data item; the list must be
2071 * NULL-terminated.
2072 *
2073 * @funcprops \isr_ok
2074 *
2075 * @param queue Address of the queue.
2076 * @param head Pointer to first node in singly-linked list.
2077 * @param tail Pointer to last node in singly-linked list.
2078 *
2079 * @retval 0 on success
2080 * @retval -EINVAL on invalid supplied data
2081 *
2082 */
2083 int k_queue_append_list(struct k_queue *queue, void *head, void *tail);
2084
2085 /**
2086 * @brief Atomically add a list of elements to a queue.
2087 *
2088 * This routine adds a list of data items to @a queue in one operation.
2089 * The data items must be in a singly-linked list implemented using a
2090 * sys_slist_t object. Upon completion, the original list is empty.
2091 *
2092 * @funcprops \isr_ok
2093 *
2094 * @param queue Address of the queue.
2095 * @param list Pointer to sys_slist_t object.
2096 *
2097 * @retval 0 on success
2098 * @retval -EINVAL on invalid data
2099 */
2100 int k_queue_merge_slist(struct k_queue *queue, sys_slist_t *list);
2101
2102 /**
2103 * @brief Get an element from a queue.
2104 *
2105 * This routine removes first data item from @a queue. The first word of the
2106 * data item is reserved for the kernel's use.
2107 *
2108 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
2109 *
2110 * @funcprops \isr_ok
2111 *
2112 * @param queue Address of the queue.
2113 * @param timeout Waiting period to obtain a data item, or one of the special
2114 * values K_NO_WAIT and K_FOREVER.
2115 *
2116 * @return Address of the data item if successful; NULL if returned
2117 * without waiting, or waiting period timed out.
2118 */
2119 __syscall void *k_queue_get(struct k_queue *queue, k_timeout_t timeout);
2120
2121 /**
2122 * @brief Remove an element from a queue.
2123 *
2124 * This routine removes data item from @a queue. The first word of the
2125 * data item is reserved for the kernel's use. Removing elements from k_queue
2126 * rely on sys_slist_find_and_remove which is not a constant time operation.
2127 *
2128 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
2129 *
2130 * @funcprops \isr_ok
2131 *
2132 * @param queue Address of the queue.
2133 * @param data Address of the data item.
2134 *
2135 * @return true if data item was removed
2136 */
2137 bool k_queue_remove(struct k_queue *queue, void *data);
2138
2139 /**
2140 * @brief Append an element to a queue only if it's not present already.
2141 *
2142 * This routine appends data item to @a queue. The first word of the data
2143 * item is reserved for the kernel's use. Appending elements to k_queue
2144 * relies on sys_slist_is_node_in_list which is not a constant time operation.
2145 *
2146 * @funcprops \isr_ok
2147 *
2148 * @param queue Address of the queue.
2149 * @param data Address of the data item.
2150 *
2151 * @return true if data item was added, false if not
2152 */
2153 bool k_queue_unique_append(struct k_queue *queue, void *data);
2154
2155 /**
2156 * @brief Query a queue to see if it has data available.
2157 *
2158 * Note that the data might be already gone by the time this function returns
2159 * if other threads are also trying to read from the queue.
2160 *
2161 * @funcprops \isr_ok
2162 *
2163 * @param queue Address of the queue.
2164 *
2165 * @return Non-zero if the queue is empty.
2166 * @return 0 if data is available.
2167 */
2168 __syscall int k_queue_is_empty(struct k_queue *queue);
2169
z_impl_k_queue_is_empty(struct k_queue * queue)2170 static inline int z_impl_k_queue_is_empty(struct k_queue *queue)
2171 {
2172 return sys_sflist_is_empty(&queue->data_q) ? 1 : 0;
2173 }
2174
2175 /**
2176 * @brief Peek element at the head of queue.
2177 *
2178 * Return element from the head of queue without removing it.
2179 *
2180 * @param queue Address of the queue.
2181 *
2182 * @return Head element, or NULL if queue is empty.
2183 */
2184 __syscall void *k_queue_peek_head(struct k_queue *queue);
2185
2186 /**
2187 * @brief Peek element at the tail of queue.
2188 *
2189 * Return element from the tail of queue without removing it.
2190 *
2191 * @param queue Address of the queue.
2192 *
2193 * @return Tail element, or NULL if queue is empty.
2194 */
2195 __syscall void *k_queue_peek_tail(struct k_queue *queue);
2196
2197 /**
2198 * @brief Statically define and initialize a queue.
2199 *
2200 * The queue can be accessed outside the module where it is defined using:
2201 *
2202 * @code extern struct k_queue <name>; @endcode
2203 *
2204 * @param name Name of the queue.
2205 */
2206 #define K_QUEUE_DEFINE(name) \
2207 STRUCT_SECTION_ITERABLE(k_queue, name) = \
2208 Z_QUEUE_INITIALIZER(name)
2209
2210 /** @} */
2211
2212 #ifdef CONFIG_USERSPACE
2213 /**
2214 * @brief futex structure
2215 *
2216 * A k_futex is a lightweight mutual exclusion primitive designed
2217 * to minimize kernel involvement. Uncontended operation relies
2218 * only on atomic access to shared memory. k_futex are tracked as
2219 * kernel objects and can live in user memory so that any access
2220 * bypasses the kernel object permission management mechanism.
2221 */
2222 struct k_futex {
2223 atomic_t val;
2224 };
2225
2226 /**
2227 * @brief futex kernel data structure
2228 *
2229 * z_futex_data are the helper data structure for k_futex to complete
2230 * futex contended operation on kernel side, structure z_futex_data
2231 * of every futex object is invisible in user mode.
2232 */
2233 struct z_futex_data {
2234 _wait_q_t wait_q;
2235 struct k_spinlock lock;
2236 };
2237
2238 #define Z_FUTEX_DATA_INITIALIZER(obj) \
2239 { \
2240 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q) \
2241 }
2242
2243 /**
2244 * @defgroup futex_apis FUTEX APIs
2245 * @ingroup kernel_apis
2246 * @{
2247 */
2248
2249 /**
2250 * @brief Pend the current thread on a futex
2251 *
2252 * Tests that the supplied futex contains the expected value, and if so,
2253 * goes to sleep until some other thread calls k_futex_wake() on it.
2254 *
2255 * @param futex Address of the futex.
2256 * @param expected Expected value of the futex, if it is different the caller
2257 * will not wait on it.
2258 * @param timeout Waiting period on the futex, or one of the special values
2259 * K_NO_WAIT or K_FOREVER.
2260 * @retval -EACCES Caller does not have read access to futex address.
2261 * @retval -EAGAIN If the futex value did not match the expected parameter.
2262 * @retval -EINVAL Futex parameter address not recognized by the kernel.
2263 * @retval -ETIMEDOUT Thread woke up due to timeout and not a futex wakeup.
2264 * @retval 0 if the caller went to sleep and was woken up. The caller
2265 * should check the futex's value on wakeup to determine if it needs
2266 * to block again.
2267 */
2268 __syscall int k_futex_wait(struct k_futex *futex, int expected,
2269 k_timeout_t timeout);
2270
2271 /**
2272 * @brief Wake one/all threads pending on a futex
2273 *
2274 * Wake up the highest priority thread pending on the supplied futex, or
2275 * wakeup all the threads pending on the supplied futex, and the behavior
2276 * depends on wake_all.
2277 *
2278 * @param futex Futex to wake up pending threads.
2279 * @param wake_all If true, wake up all pending threads; If false,
2280 * wakeup the highest priority thread.
2281 * @retval -EACCES Caller does not have access to the futex address.
2282 * @retval -EINVAL Futex parameter address not recognized by the kernel.
2283 * @retval Number of threads that were woken up.
2284 */
2285 __syscall int k_futex_wake(struct k_futex *futex, bool wake_all);
2286
2287 /** @} */
2288 #endif
2289
2290 /**
2291 * @defgroup event_apis Event APIs
2292 * @ingroup kernel_apis
2293 * @{
2294 */
2295
2296 /**
2297 * Event Structure
2298 * @ingroup event_apis
2299 */
2300
2301 struct k_event {
2302 _wait_q_t wait_q;
2303 uint32_t events;
2304 struct k_spinlock lock;
2305
2306 SYS_PORT_TRACING_TRACKING_FIELD(k_event)
2307
2308 #ifdef CONFIG_OBJ_CORE_EVENT
2309 struct k_obj_core obj_core;
2310 #endif
2311
2312 };
2313
2314 #define Z_EVENT_INITIALIZER(obj) \
2315 { \
2316 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
2317 .events = 0 \
2318 }
2319
2320 /**
2321 * @brief Initialize an event object
2322 *
2323 * This routine initializes an event object, prior to its first use.
2324 *
2325 * @param event Address of the event object.
2326 */
2327 __syscall void k_event_init(struct k_event *event);
2328
2329 /**
2330 * @brief Post one or more events to an event object
2331 *
2332 * This routine posts one or more events to an event object. All tasks waiting
2333 * on the event object @a event whose waiting conditions become met by this
2334 * posting immediately unpend.
2335 *
2336 * Posting differs from setting in that posted events are merged together with
2337 * the current set of events tracked by the event object.
2338 *
2339 * @param event Address of the event object
2340 * @param events Set of events to post to @a event
2341 *
2342 * @retval Previous value of the events in @a event
2343 */
2344 __syscall uint32_t k_event_post(struct k_event *event, uint32_t events);
2345
2346 /**
2347 * @brief Set the events in an event object
2348 *
2349 * This routine sets the events stored in event object to the specified value.
2350 * All tasks waiting on the event object @a event whose waiting conditions
2351 * become met by this immediately unpend.
2352 *
2353 * Setting differs from posting in that set events replace the current set of
2354 * events tracked by the event object.
2355 *
2356 * @param event Address of the event object
2357 * @param events Set of events to set in @a event
2358 *
2359 * @retval Previous value of the events in @a event
2360 */
2361 __syscall uint32_t k_event_set(struct k_event *event, uint32_t events);
2362
2363 /**
2364 * @brief Set or clear the events in an event object
2365 *
2366 * This routine sets the events stored in event object to the specified value.
2367 * All tasks waiting on the event object @a event whose waiting conditions
2368 * become met by this immediately unpend. Unlike @ref k_event_set, this routine
2369 * allows specific event bits to be set and cleared as determined by the mask.
2370 *
2371 * @param event Address of the event object
2372 * @param events Set of events to set/clear in @a event
2373 * @param events_mask Mask to be applied to @a events
2374 *
2375 * @retval Previous value of the events in @a events_mask
2376 */
2377 __syscall uint32_t k_event_set_masked(struct k_event *event, uint32_t events,
2378 uint32_t events_mask);
2379
2380 /**
2381 * @brief Clear the events in an event object
2382 *
2383 * This routine clears (resets) the specified events stored in an event object.
2384 *
2385 * @param event Address of the event object
2386 * @param events Set of events to clear in @a event
2387 *
2388 * @retval Previous value of the events in @a event
2389 */
2390 __syscall uint32_t k_event_clear(struct k_event *event, uint32_t events);
2391
2392 /**
2393 * @brief Wait for any of the specified events
2394 *
2395 * This routine waits on event object @a event until any of the specified
2396 * events have been delivered to the event object, or the maximum wait time
2397 * @a timeout has expired. A thread may wait on up to 32 distinctly numbered
2398 * events that are expressed as bits in a single 32-bit word.
2399 *
2400 * @note The caller must be careful when resetting if there are multiple threads
2401 * waiting for the event object @a event.
2402 *
2403 * @param event Address of the event object
2404 * @param events Set of desired events on which to wait
2405 * @param reset If true, clear the set of events tracked by the event object
2406 * before waiting. If false, do not clear the events.
2407 * @param timeout Waiting period for the desired set of events or one of the
2408 * special values K_NO_WAIT and K_FOREVER.
2409 *
2410 * @retval set of matching events upon success
2411 * @retval 0 if matching events were not received within the specified time
2412 */
2413 __syscall uint32_t k_event_wait(struct k_event *event, uint32_t events,
2414 bool reset, k_timeout_t timeout);
2415
2416 /**
2417 * @brief Wait for all of the specified events
2418 *
2419 * This routine waits on event object @a event until all of the specified
2420 * events have been delivered to the event object, or the maximum wait time
2421 * @a timeout has expired. A thread may wait on up to 32 distinctly numbered
2422 * events that are expressed as bits in a single 32-bit word.
2423 *
2424 * @note The caller must be careful when resetting if there are multiple threads
2425 * waiting for the event object @a event.
2426 *
2427 * @param event Address of the event object
2428 * @param events Set of desired events on which to wait
2429 * @param reset If true, clear the set of events tracked by the event object
2430 * before waiting. If false, do not clear the events.
2431 * @param timeout Waiting period for the desired set of events or one of the
2432 * special values K_NO_WAIT and K_FOREVER.
2433 *
2434 * @retval set of matching events upon success
2435 * @retval 0 if matching events were not received within the specified time
2436 */
2437 __syscall uint32_t k_event_wait_all(struct k_event *event, uint32_t events,
2438 bool reset, k_timeout_t timeout);
2439
2440 /**
2441 * @brief Test the events currently tracked in the event object
2442 *
2443 * @param event Address of the event object
2444 * @param events_mask Set of desired events to test
2445 *
2446 * @retval Current value of events in @a events_mask
2447 */
k_event_test(struct k_event * event,uint32_t events_mask)2448 static inline uint32_t k_event_test(struct k_event *event, uint32_t events_mask)
2449 {
2450 return k_event_wait(event, events_mask, false, K_NO_WAIT);
2451 }
2452
2453 /**
2454 * @brief Statically define and initialize an event object
2455 *
2456 * The event can be accessed outside the module where it is defined using:
2457 *
2458 * @code extern struct k_event <name>; @endcode
2459 *
2460 * @param name Name of the event object.
2461 */
2462 #define K_EVENT_DEFINE(name) \
2463 STRUCT_SECTION_ITERABLE(k_event, name) = \
2464 Z_EVENT_INITIALIZER(name);
2465
2466 /** @} */
2467
2468 struct k_fifo {
2469 struct k_queue _queue;
2470 #ifdef CONFIG_OBJ_CORE_FIFO
2471 struct k_obj_core obj_core;
2472 #endif
2473 };
2474
2475 /**
2476 * @cond INTERNAL_HIDDEN
2477 */
2478 #define Z_FIFO_INITIALIZER(obj) \
2479 { \
2480 ._queue = Z_QUEUE_INITIALIZER(obj._queue) \
2481 }
2482
2483 /**
2484 * INTERNAL_HIDDEN @endcond
2485 */
2486
2487 /**
2488 * @defgroup fifo_apis FIFO APIs
2489 * @ingroup kernel_apis
2490 * @{
2491 */
2492
2493 /**
2494 * @brief Initialize a FIFO queue.
2495 *
2496 * This routine initializes a FIFO queue, prior to its first use.
2497 *
2498 * @param fifo Address of the FIFO queue.
2499 */
2500 #define k_fifo_init(fifo) \
2501 ({ \
2502 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, init, fifo); \
2503 k_queue_init(&(fifo)->_queue); \
2504 K_OBJ_CORE_INIT(K_OBJ_CORE(fifo), _obj_type_fifo); \
2505 K_OBJ_CORE_LINK(K_OBJ_CORE(fifo)); \
2506 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, init, fifo); \
2507 })
2508
2509 /**
2510 * @brief Cancel waiting on a FIFO queue.
2511 *
2512 * This routine causes first thread pending on @a fifo, if any, to
2513 * return from k_fifo_get() call with NULL value (as if timeout
2514 * expired).
2515 *
2516 * @funcprops \isr_ok
2517 *
2518 * @param fifo Address of the FIFO queue.
2519 */
2520 #define k_fifo_cancel_wait(fifo) \
2521 ({ \
2522 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, cancel_wait, fifo); \
2523 k_queue_cancel_wait(&(fifo)->_queue); \
2524 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, cancel_wait, fifo); \
2525 })
2526
2527 /**
2528 * @brief Add an element to a FIFO queue.
2529 *
2530 * This routine adds a data item to @a fifo. A FIFO data item must be
2531 * aligned on a word boundary, and the first word of the item is reserved
2532 * for the kernel's use.
2533 *
2534 * @funcprops \isr_ok
2535 *
2536 * @param fifo Address of the FIFO.
2537 * @param data Address of the data item.
2538 */
2539 #define k_fifo_put(fifo, data) \
2540 ({ \
2541 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, put, fifo, data); \
2542 k_queue_append(&(fifo)->_queue, data); \
2543 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, put, fifo, data); \
2544 })
2545
2546 /**
2547 * @brief Add an element to a FIFO queue.
2548 *
2549 * This routine adds a data item to @a fifo. There is an implicit memory
2550 * allocation to create an additional temporary bookkeeping data structure from
2551 * the calling thread's resource pool, which is automatically freed when the
2552 * item is removed. The data itself is not copied.
2553 *
2554 * @funcprops \isr_ok
2555 *
2556 * @param fifo Address of the FIFO.
2557 * @param data Address of the data item.
2558 *
2559 * @retval 0 on success
2560 * @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
2561 */
2562 #define k_fifo_alloc_put(fifo, data) \
2563 ({ \
2564 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, alloc_put, fifo, data); \
2565 int fap_ret = k_queue_alloc_append(&(fifo)->_queue, data); \
2566 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, alloc_put, fifo, data, fap_ret); \
2567 fap_ret; \
2568 })
2569
2570 /**
2571 * @brief Atomically add a list of elements to a FIFO.
2572 *
2573 * This routine adds a list of data items to @a fifo in one operation.
2574 * The data items must be in a singly-linked list, with the first word of
2575 * each data item pointing to the next data item; the list must be
2576 * NULL-terminated.
2577 *
2578 * @funcprops \isr_ok
2579 *
2580 * @param fifo Address of the FIFO queue.
2581 * @param head Pointer to first node in singly-linked list.
2582 * @param tail Pointer to last node in singly-linked list.
2583 */
2584 #define k_fifo_put_list(fifo, head, tail) \
2585 ({ \
2586 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, put_list, fifo, head, tail); \
2587 k_queue_append_list(&(fifo)->_queue, head, tail); \
2588 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, put_list, fifo, head, tail); \
2589 })
2590
2591 /**
2592 * @brief Atomically add a list of elements to a FIFO queue.
2593 *
2594 * This routine adds a list of data items to @a fifo in one operation.
2595 * The data items must be in a singly-linked list implemented using a
2596 * sys_slist_t object. Upon completion, the sys_slist_t object is invalid
2597 * and must be re-initialized via sys_slist_init().
2598 *
2599 * @funcprops \isr_ok
2600 *
2601 * @param fifo Address of the FIFO queue.
2602 * @param list Pointer to sys_slist_t object.
2603 */
2604 #define k_fifo_put_slist(fifo, list) \
2605 ({ \
2606 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, put_slist, fifo, list); \
2607 k_queue_merge_slist(&(fifo)->_queue, list); \
2608 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, put_slist, fifo, list); \
2609 })
2610
2611 /**
2612 * @brief Get an element from a FIFO queue.
2613 *
2614 * This routine removes a data item from @a fifo in a "first in, first out"
2615 * manner. The first word of the data item is reserved for the kernel's use.
2616 *
2617 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
2618 *
2619 * @funcprops \isr_ok
2620 *
2621 * @param fifo Address of the FIFO queue.
2622 * @param timeout Waiting period to obtain a data item,
2623 * or one of the special values K_NO_WAIT and K_FOREVER.
2624 *
2625 * @return Address of the data item if successful; NULL if returned
2626 * without waiting, or waiting period timed out.
2627 */
2628 #define k_fifo_get(fifo, timeout) \
2629 ({ \
2630 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, get, fifo, timeout); \
2631 void *fg_ret = k_queue_get(&(fifo)->_queue, timeout); \
2632 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, get, fifo, timeout, fg_ret); \
2633 fg_ret; \
2634 })
2635
2636 /**
2637 * @brief Query a FIFO queue to see if it has data available.
2638 *
2639 * Note that the data might be already gone by the time this function returns
2640 * if other threads is also trying to read from the FIFO.
2641 *
2642 * @funcprops \isr_ok
2643 *
2644 * @param fifo Address of the FIFO queue.
2645 *
2646 * @return Non-zero if the FIFO queue is empty.
2647 * @return 0 if data is available.
2648 */
2649 #define k_fifo_is_empty(fifo) \
2650 k_queue_is_empty(&(fifo)->_queue)
2651
2652 /**
2653 * @brief Peek element at the head of a FIFO queue.
2654 *
2655 * Return element from the head of FIFO queue without removing it. A usecase
2656 * for this is if elements of the FIFO object are themselves containers. Then
2657 * on each iteration of processing, a head container will be peeked,
2658 * and some data processed out of it, and only if the container is empty,
2659 * it will be completely remove from the FIFO queue.
2660 *
2661 * @param fifo Address of the FIFO queue.
2662 *
2663 * @return Head element, or NULL if the FIFO queue is empty.
2664 */
2665 #define k_fifo_peek_head(fifo) \
2666 ({ \
2667 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, peek_head, fifo); \
2668 void *fph_ret = k_queue_peek_head(&(fifo)->_queue); \
2669 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, peek_head, fifo, fph_ret); \
2670 fph_ret; \
2671 })
2672
2673 /**
2674 * @brief Peek element at the tail of FIFO queue.
2675 *
2676 * Return element from the tail of FIFO queue (without removing it). A usecase
2677 * for this is if elements of the FIFO queue are themselves containers. Then
2678 * it may be useful to add more data to the last container in a FIFO queue.
2679 *
2680 * @param fifo Address of the FIFO queue.
2681 *
2682 * @return Tail element, or NULL if a FIFO queue is empty.
2683 */
2684 #define k_fifo_peek_tail(fifo) \
2685 ({ \
2686 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, peek_tail, fifo); \
2687 void *fpt_ret = k_queue_peek_tail(&(fifo)->_queue); \
2688 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, peek_tail, fifo, fpt_ret); \
2689 fpt_ret; \
2690 })
2691
2692 /**
2693 * @brief Statically define and initialize a FIFO queue.
2694 *
2695 * The FIFO queue can be accessed outside the module where it is defined using:
2696 *
2697 * @code extern struct k_fifo <name>; @endcode
2698 *
2699 * @param name Name of the FIFO queue.
2700 */
2701 #define K_FIFO_DEFINE(name) \
2702 STRUCT_SECTION_ITERABLE(k_fifo, name) = \
2703 Z_FIFO_INITIALIZER(name)
2704
2705 /** @} */
2706
2707 struct k_lifo {
2708 struct k_queue _queue;
2709 #ifdef CONFIG_OBJ_CORE_LIFO
2710 struct k_obj_core obj_core;
2711 #endif
2712 };
2713
2714 /**
2715 * @cond INTERNAL_HIDDEN
2716 */
2717
2718 #define Z_LIFO_INITIALIZER(obj) \
2719 { \
2720 ._queue = Z_QUEUE_INITIALIZER(obj._queue) \
2721 }
2722
2723 /**
2724 * INTERNAL_HIDDEN @endcond
2725 */
2726
2727 /**
2728 * @defgroup lifo_apis LIFO APIs
2729 * @ingroup kernel_apis
2730 * @{
2731 */
2732
2733 /**
2734 * @brief Initialize a LIFO queue.
2735 *
2736 * This routine initializes a LIFO queue object, prior to its first use.
2737 *
2738 * @param lifo Address of the LIFO queue.
2739 */
2740 #define k_lifo_init(lifo) \
2741 ({ \
2742 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, init, lifo); \
2743 k_queue_init(&(lifo)->_queue); \
2744 K_OBJ_CORE_INIT(K_OBJ_CORE(lifo), _obj_type_lifo); \
2745 K_OBJ_CORE_LINK(K_OBJ_CORE(lifo)); \
2746 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, init, lifo); \
2747 })
2748
2749 /**
2750 * @brief Add an element to a LIFO queue.
2751 *
2752 * This routine adds a data item to @a lifo. A LIFO queue data item must be
2753 * aligned on a word boundary, and the first word of the item is
2754 * reserved for the kernel's use.
2755 *
2756 * @funcprops \isr_ok
2757 *
2758 * @param lifo Address of the LIFO queue.
2759 * @param data Address of the data item.
2760 */
2761 #define k_lifo_put(lifo, data) \
2762 ({ \
2763 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, put, lifo, data); \
2764 k_queue_prepend(&(lifo)->_queue, data); \
2765 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, put, lifo, data); \
2766 })
2767
2768 /**
2769 * @brief Add an element to a LIFO queue.
2770 *
2771 * This routine adds a data item to @a lifo. There is an implicit memory
2772 * allocation to create an additional temporary bookkeeping data structure from
2773 * the calling thread's resource pool, which is automatically freed when the
2774 * item is removed. The data itself is not copied.
2775 *
2776 * @funcprops \isr_ok
2777 *
2778 * @param lifo Address of the LIFO.
2779 * @param data Address of the data item.
2780 *
2781 * @retval 0 on success
2782 * @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
2783 */
2784 #define k_lifo_alloc_put(lifo, data) \
2785 ({ \
2786 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, alloc_put, lifo, data); \
2787 int lap_ret = k_queue_alloc_prepend(&(lifo)->_queue, data); \
2788 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, alloc_put, lifo, data, lap_ret); \
2789 lap_ret; \
2790 })
2791
2792 /**
2793 * @brief Get an element from a LIFO queue.
2794 *
2795 * This routine removes a data item from @a LIFO in a "last in, first out"
2796 * manner. The first word of the data item is reserved for the kernel's use.
2797 *
2798 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
2799 *
2800 * @funcprops \isr_ok
2801 *
2802 * @param lifo Address of the LIFO queue.
2803 * @param timeout Waiting period to obtain a data item,
2804 * or one of the special values K_NO_WAIT and K_FOREVER.
2805 *
2806 * @return Address of the data item if successful; NULL if returned
2807 * without waiting, or waiting period timed out.
2808 */
2809 #define k_lifo_get(lifo, timeout) \
2810 ({ \
2811 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, get, lifo, timeout); \
2812 void *lg_ret = k_queue_get(&(lifo)->_queue, timeout); \
2813 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, get, lifo, timeout, lg_ret); \
2814 lg_ret; \
2815 })
2816
2817 /**
2818 * @brief Statically define and initialize a LIFO queue.
2819 *
2820 * The LIFO queue can be accessed outside the module where it is defined using:
2821 *
2822 * @code extern struct k_lifo <name>; @endcode
2823 *
2824 * @param name Name of the fifo.
2825 */
2826 #define K_LIFO_DEFINE(name) \
2827 STRUCT_SECTION_ITERABLE(k_lifo, name) = \
2828 Z_LIFO_INITIALIZER(name)
2829
2830 /** @} */
2831
2832 /**
2833 * @cond INTERNAL_HIDDEN
2834 */
2835 #define K_STACK_FLAG_ALLOC ((uint8_t)1) /* Buffer was allocated */
2836
2837 typedef uintptr_t stack_data_t;
2838
2839 struct k_stack {
2840 _wait_q_t wait_q;
2841 struct k_spinlock lock;
2842 stack_data_t *base, *next, *top;
2843
2844 uint8_t flags;
2845
2846 SYS_PORT_TRACING_TRACKING_FIELD(k_stack)
2847
2848 #ifdef CONFIG_OBJ_CORE_STACK
2849 struct k_obj_core obj_core;
2850 #endif
2851 };
2852
2853 #define Z_STACK_INITIALIZER(obj, stack_buffer, stack_num_entries) \
2854 { \
2855 .wait_q = Z_WAIT_Q_INIT(&(obj).wait_q), \
2856 .base = (stack_buffer), \
2857 .next = (stack_buffer), \
2858 .top = (stack_buffer) + (stack_num_entries), \
2859 }
2860
2861 /**
2862 * INTERNAL_HIDDEN @endcond
2863 */
2864
2865 /**
2866 * @defgroup stack_apis Stack APIs
2867 * @ingroup kernel_apis
2868 * @{
2869 */
2870
2871 /**
2872 * @brief Initialize a stack.
2873 *
2874 * This routine initializes a stack object, prior to its first use.
2875 *
2876 * @param stack Address of the stack.
2877 * @param buffer Address of array used to hold stacked values.
2878 * @param num_entries Maximum number of values that can be stacked.
2879 */
2880 void k_stack_init(struct k_stack *stack,
2881 stack_data_t *buffer, uint32_t num_entries);
2882
2883
2884 /**
2885 * @brief Initialize a stack.
2886 *
2887 * This routine initializes a stack object, prior to its first use. Internal
2888 * buffers will be allocated from the calling thread's resource pool.
2889 * This memory will be released if k_stack_cleanup() is called, or
2890 * userspace is enabled and the stack object loses all references to it.
2891 *
2892 * @param stack Address of the stack.
2893 * @param num_entries Maximum number of values that can be stacked.
2894 *
2895 * @return -ENOMEM if memory couldn't be allocated
2896 */
2897
2898 __syscall int32_t k_stack_alloc_init(struct k_stack *stack,
2899 uint32_t num_entries);
2900
2901 /**
2902 * @brief Release a stack's allocated buffer
2903 *
2904 * If a stack object was given a dynamically allocated buffer via
2905 * k_stack_alloc_init(), this will free it. This function does nothing
2906 * if the buffer wasn't dynamically allocated.
2907 *
2908 * @param stack Address of the stack.
2909 * @retval 0 on success
2910 * @retval -EAGAIN when object is still in use
2911 */
2912 int k_stack_cleanup(struct k_stack *stack);
2913
2914 /**
2915 * @brief Push an element onto a stack.
2916 *
2917 * This routine adds a stack_data_t value @a data to @a stack.
2918 *
2919 * @funcprops \isr_ok
2920 *
2921 * @param stack Address of the stack.
2922 * @param data Value to push onto the stack.
2923 *
2924 * @retval 0 on success
2925 * @retval -ENOMEM if stack is full
2926 */
2927 __syscall int k_stack_push(struct k_stack *stack, stack_data_t data);
2928
2929 /**
2930 * @brief Pop an element from a stack.
2931 *
2932 * This routine removes a stack_data_t value from @a stack in a "last in,
2933 * first out" manner and stores the value in @a data.
2934 *
2935 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
2936 *
2937 * @funcprops \isr_ok
2938 *
2939 * @param stack Address of the stack.
2940 * @param data Address of area to hold the value popped from the stack.
2941 * @param timeout Waiting period to obtain a value,
2942 * or one of the special values K_NO_WAIT and
2943 * K_FOREVER.
2944 *
2945 * @retval 0 Element popped from stack.
2946 * @retval -EBUSY Returned without waiting.
2947 * @retval -EAGAIN Waiting period timed out.
2948 */
2949 __syscall int k_stack_pop(struct k_stack *stack, stack_data_t *data,
2950 k_timeout_t timeout);
2951
2952 /**
2953 * @brief Statically define and initialize a stack
2954 *
2955 * The stack can be accessed outside the module where it is defined using:
2956 *
2957 * @code extern struct k_stack <name>; @endcode
2958 *
2959 * @param name Name of the stack.
2960 * @param stack_num_entries Maximum number of values that can be stacked.
2961 */
2962 #define K_STACK_DEFINE(name, stack_num_entries) \
2963 stack_data_t __noinit \
2964 _k_stack_buf_##name[stack_num_entries]; \
2965 STRUCT_SECTION_ITERABLE(k_stack, name) = \
2966 Z_STACK_INITIALIZER(name, _k_stack_buf_##name, \
2967 stack_num_entries)
2968
2969 /** @} */
2970
2971 /**
2972 * @cond INTERNAL_HIDDEN
2973 */
2974
2975 struct k_work;
2976 struct k_work_q;
2977 struct k_work_queue_config;
2978 extern struct k_work_q k_sys_work_q;
2979
2980 /**
2981 * INTERNAL_HIDDEN @endcond
2982 */
2983
2984 /**
2985 * @defgroup mutex_apis Mutex APIs
2986 * @ingroup kernel_apis
2987 * @{
2988 */
2989
2990 /**
2991 * Mutex Structure
2992 * @ingroup mutex_apis
2993 */
2994 struct k_mutex {
2995 /** Mutex wait queue */
2996 _wait_q_t wait_q;
2997 /** Mutex owner */
2998 struct k_thread *owner;
2999
3000 /** Current lock count */
3001 uint32_t lock_count;
3002
3003 /** Original thread priority */
3004 int owner_orig_prio;
3005
3006 SYS_PORT_TRACING_TRACKING_FIELD(k_mutex)
3007
3008 #ifdef CONFIG_OBJ_CORE_MUTEX
3009 struct k_obj_core obj_core;
3010 #endif
3011 };
3012
3013 /**
3014 * @cond INTERNAL_HIDDEN
3015 */
3016 #define Z_MUTEX_INITIALIZER(obj) \
3017 { \
3018 .wait_q = Z_WAIT_Q_INIT(&(obj).wait_q), \
3019 .owner = NULL, \
3020 .lock_count = 0, \
3021 .owner_orig_prio = K_LOWEST_APPLICATION_THREAD_PRIO, \
3022 }
3023
3024 /**
3025 * INTERNAL_HIDDEN @endcond
3026 */
3027
3028 /**
3029 * @brief Statically define and initialize a mutex.
3030 *
3031 * The mutex can be accessed outside the module where it is defined using:
3032 *
3033 * @code extern struct k_mutex <name>; @endcode
3034 *
3035 * @param name Name of the mutex.
3036 */
3037 #define K_MUTEX_DEFINE(name) \
3038 STRUCT_SECTION_ITERABLE(k_mutex, name) = \
3039 Z_MUTEX_INITIALIZER(name)
3040
3041 /**
3042 * @brief Initialize a mutex.
3043 *
3044 * This routine initializes a mutex object, prior to its first use.
3045 *
3046 * Upon completion, the mutex is available and does not have an owner.
3047 *
3048 * @param mutex Address of the mutex.
3049 *
3050 * @retval 0 Mutex object created
3051 *
3052 */
3053 __syscall int k_mutex_init(struct k_mutex *mutex);
3054
3055
3056 /**
3057 * @brief Lock a mutex.
3058 *
3059 * This routine locks @a mutex. If the mutex is locked by another thread,
3060 * the calling thread waits until the mutex becomes available or until
3061 * a timeout occurs.
3062 *
3063 * A thread is permitted to lock a mutex it has already locked. The operation
3064 * completes immediately and the lock count is increased by 1.
3065 *
3066 * Mutexes may not be locked in ISRs.
3067 *
3068 * @param mutex Address of the mutex.
3069 * @param timeout Waiting period to lock the mutex,
3070 * or one of the special values K_NO_WAIT and
3071 * K_FOREVER.
3072 *
3073 * @retval 0 Mutex locked.
3074 * @retval -EBUSY Returned without waiting.
3075 * @retval -EAGAIN Waiting period timed out.
3076 */
3077 __syscall int k_mutex_lock(struct k_mutex *mutex, k_timeout_t timeout);
3078
3079 /**
3080 * @brief Unlock a mutex.
3081 *
3082 * This routine unlocks @a mutex. The mutex must already be locked by the
3083 * calling thread.
3084 *
3085 * The mutex cannot be claimed by another thread until it has been unlocked by
3086 * the calling thread as many times as it was previously locked by that
3087 * thread.
3088 *
3089 * Mutexes may not be unlocked in ISRs, as mutexes must only be manipulated
3090 * in thread context due to ownership and priority inheritance semantics.
3091 *
3092 * @param mutex Address of the mutex.
3093 *
3094 * @retval 0 Mutex unlocked.
3095 * @retval -EPERM The current thread does not own the mutex
3096 * @retval -EINVAL The mutex is not locked
3097 *
3098 */
3099 __syscall int k_mutex_unlock(struct k_mutex *mutex);
3100
3101 /**
3102 * @}
3103 */
3104
3105
3106 struct k_condvar {
3107 _wait_q_t wait_q;
3108
3109 #ifdef CONFIG_OBJ_CORE_CONDVAR
3110 struct k_obj_core obj_core;
3111 #endif
3112 };
3113
3114 #define Z_CONDVAR_INITIALIZER(obj) \
3115 { \
3116 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
3117 }
3118
3119 /**
3120 * @defgroup condvar_apis Condition Variables APIs
3121 * @ingroup kernel_apis
3122 * @{
3123 */
3124
3125 /**
3126 * @brief Initialize a condition variable
3127 *
3128 * @param condvar pointer to a @p k_condvar structure
3129 * @retval 0 Condition variable created successfully
3130 */
3131 __syscall int k_condvar_init(struct k_condvar *condvar);
3132
3133 /**
3134 * @brief Signals one thread that is pending on the condition variable
3135 *
3136 * @param condvar pointer to a @p k_condvar structure
3137 * @retval 0 On success
3138 */
3139 __syscall int k_condvar_signal(struct k_condvar *condvar);
3140
3141 /**
3142 * @brief Unblock all threads that are pending on the condition
3143 * variable
3144 *
3145 * @param condvar pointer to a @p k_condvar structure
3146 * @return An integer with number of woken threads on success
3147 */
3148 __syscall int k_condvar_broadcast(struct k_condvar *condvar);
3149
3150 /**
3151 * @brief Waits on the condition variable releasing the mutex lock
3152 *
3153 * Atomically releases the currently owned mutex, blocks the current thread
3154 * waiting on the condition variable specified by @a condvar,
3155 * and finally acquires the mutex again.
3156 *
3157 * The waiting thread unblocks only after another thread calls
3158 * k_condvar_signal, or k_condvar_broadcast with the same condition variable.
3159 *
3160 * @param condvar pointer to a @p k_condvar structure
3161 * @param mutex Address of the mutex.
3162 * @param timeout Waiting period for the condition variable
3163 * or one of the special values K_NO_WAIT and K_FOREVER.
3164 * @retval 0 On success
3165 * @retval -EAGAIN Waiting period timed out.
3166 */
3167 __syscall int k_condvar_wait(struct k_condvar *condvar, struct k_mutex *mutex,
3168 k_timeout_t timeout);
3169
3170 /**
3171 * @brief Statically define and initialize a condition variable.
3172 *
3173 * The condition variable can be accessed outside the module where it is
3174 * defined using:
3175 *
3176 * @code extern struct k_condvar <name>; @endcode
3177 *
3178 * @param name Name of the condition variable.
3179 */
3180 #define K_CONDVAR_DEFINE(name) \
3181 STRUCT_SECTION_ITERABLE(k_condvar, name) = \
3182 Z_CONDVAR_INITIALIZER(name)
3183 /**
3184 * @}
3185 */
3186
3187 /**
3188 * @cond INTERNAL_HIDDEN
3189 */
3190
3191 struct k_sem {
3192 _wait_q_t wait_q;
3193 unsigned int count;
3194 unsigned int limit;
3195
3196 Z_DECL_POLL_EVENT
3197
3198 SYS_PORT_TRACING_TRACKING_FIELD(k_sem)
3199
3200 #ifdef CONFIG_OBJ_CORE_SEM
3201 struct k_obj_core obj_core;
3202 #endif
3203 };
3204
3205 #define Z_SEM_INITIALIZER(obj, initial_count, count_limit) \
3206 { \
3207 .wait_q = Z_WAIT_Q_INIT(&(obj).wait_q), \
3208 .count = (initial_count), \
3209 .limit = (count_limit), \
3210 Z_POLL_EVENT_OBJ_INIT(obj) \
3211 }
3212
3213 /**
3214 * INTERNAL_HIDDEN @endcond
3215 */
3216
3217 /**
3218 * @defgroup semaphore_apis Semaphore APIs
3219 * @ingroup kernel_apis
3220 * @{
3221 */
3222
3223 /**
3224 * @brief Maximum limit value allowed for a semaphore.
3225 *
3226 * This is intended for use when a semaphore does not have
3227 * an explicit maximum limit, and instead is just used for
3228 * counting purposes.
3229 *
3230 */
3231 #define K_SEM_MAX_LIMIT UINT_MAX
3232
3233 /**
3234 * @brief Initialize a semaphore.
3235 *
3236 * This routine initializes a semaphore object, prior to its first use.
3237 *
3238 * @param sem Address of the semaphore.
3239 * @param initial_count Initial semaphore count.
3240 * @param limit Maximum permitted semaphore count.
3241 *
3242 * @see K_SEM_MAX_LIMIT
3243 *
3244 * @retval 0 Semaphore created successfully
3245 * @retval -EINVAL Invalid values
3246 *
3247 */
3248 __syscall int k_sem_init(struct k_sem *sem, unsigned int initial_count,
3249 unsigned int limit);
3250
3251 /**
3252 * @brief Take a semaphore.
3253 *
3254 * This routine takes @a sem.
3255 *
3256 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
3257 *
3258 * @funcprops \isr_ok
3259 *
3260 * @param sem Address of the semaphore.
3261 * @param timeout Waiting period to take the semaphore,
3262 * or one of the special values K_NO_WAIT and K_FOREVER.
3263 *
3264 * @retval 0 Semaphore taken.
3265 * @retval -EBUSY Returned without waiting.
3266 * @retval -EAGAIN Waiting period timed out,
3267 * or the semaphore was reset during the waiting period.
3268 */
3269 __syscall int k_sem_take(struct k_sem *sem, k_timeout_t timeout);
3270
3271 /**
3272 * @brief Give a semaphore.
3273 *
3274 * This routine gives @a sem, unless the semaphore is already at its maximum
3275 * permitted count.
3276 *
3277 * @funcprops \isr_ok
3278 *
3279 * @param sem Address of the semaphore.
3280 */
3281 __syscall void k_sem_give(struct k_sem *sem);
3282
3283 /**
3284 * @brief Resets a semaphore's count to zero.
3285 *
3286 * This routine sets the count of @a sem to zero.
3287 * Any outstanding semaphore takes will be aborted
3288 * with -EAGAIN.
3289 *
3290 * @param sem Address of the semaphore.
3291 */
3292 __syscall void k_sem_reset(struct k_sem *sem);
3293
3294 /**
3295 * @brief Get a semaphore's count.
3296 *
3297 * This routine returns the current count of @a sem.
3298 *
3299 * @param sem Address of the semaphore.
3300 *
3301 * @return Current semaphore count.
3302 */
3303 __syscall unsigned int k_sem_count_get(struct k_sem *sem);
3304
3305 /**
3306 * @internal
3307 */
z_impl_k_sem_count_get(struct k_sem * sem)3308 static inline unsigned int z_impl_k_sem_count_get(struct k_sem *sem)
3309 {
3310 return sem->count;
3311 }
3312
3313 /**
3314 * @brief Statically define and initialize a semaphore.
3315 *
3316 * The semaphore can be accessed outside the module where it is defined using:
3317 *
3318 * @code extern struct k_sem <name>; @endcode
3319 *
3320 * @param name Name of the semaphore.
3321 * @param initial_count Initial semaphore count.
3322 * @param count_limit Maximum permitted semaphore count.
3323 */
3324 #define K_SEM_DEFINE(name, initial_count, count_limit) \
3325 STRUCT_SECTION_ITERABLE(k_sem, name) = \
3326 Z_SEM_INITIALIZER(name, initial_count, count_limit); \
3327 BUILD_ASSERT(((count_limit) != 0) && \
3328 ((initial_count) <= (count_limit)) && \
3329 ((count_limit) <= K_SEM_MAX_LIMIT));
3330
3331 /** @} */
3332
3333 /**
3334 * @cond INTERNAL_HIDDEN
3335 */
3336
3337 struct k_work_delayable;
3338 struct k_work_sync;
3339
3340 /**
3341 * INTERNAL_HIDDEN @endcond
3342 */
3343
3344 /**
3345 * @defgroup workqueue_apis Work Queue APIs
3346 * @ingroup kernel_apis
3347 * @{
3348 */
3349
3350 /** @brief The signature for a work item handler function.
3351 *
3352 * The function will be invoked by the thread animating a work queue.
3353 *
3354 * @param work the work item that provided the handler.
3355 */
3356 typedef void (*k_work_handler_t)(struct k_work *work);
3357
3358 /** @brief Initialize a (non-delayable) work structure.
3359 *
3360 * This must be invoked before submitting a work structure for the first time.
3361 * It need not be invoked again on the same work structure. It can be
3362 * re-invoked to change the associated handler, but this must be done when the
3363 * work item is idle.
3364 *
3365 * @funcprops \isr_ok
3366 *
3367 * @param work the work structure to be initialized.
3368 *
3369 * @param handler the handler to be invoked by the work item.
3370 */
3371 void k_work_init(struct k_work *work,
3372 k_work_handler_t handler);
3373
3374 /** @brief Busy state flags from the work item.
3375 *
3376 * A zero return value indicates the work item appears to be idle.
3377 *
3378 * @note This is a live snapshot of state, which may change before the result
3379 * is checked. Use locks where appropriate.
3380 *
3381 * @funcprops \isr_ok
3382 *
3383 * @param work pointer to the work item.
3384 *
3385 * @return a mask of flags K_WORK_DELAYED, K_WORK_QUEUED,
3386 * K_WORK_RUNNING, K_WORK_CANCELING, and K_WORK_FLUSHING.
3387 */
3388 int k_work_busy_get(const struct k_work *work);
3389
3390 /** @brief Test whether a work item is currently pending.
3391 *
3392 * Wrapper to determine whether a work item is in a non-idle dstate.
3393 *
3394 * @note This is a live snapshot of state, which may change before the result
3395 * is checked. Use locks where appropriate.
3396 *
3397 * @funcprops \isr_ok
3398 *
3399 * @param work pointer to the work item.
3400 *
3401 * @return true if and only if k_work_busy_get() returns a non-zero value.
3402 */
3403 static inline bool k_work_is_pending(const struct k_work *work);
3404
3405 /** @brief Submit a work item to a queue.
3406 *
3407 * @param queue pointer to the work queue on which the item should run. If
3408 * NULL the queue from the most recent submission will be used.
3409 *
3410 * @funcprops \isr_ok
3411 *
3412 * @param work pointer to the work item.
3413 *
3414 * @retval 0 if work was already submitted to a queue
3415 * @retval 1 if work was not submitted and has been queued to @p queue
3416 * @retval 2 if work was running and has been queued to the queue that was
3417 * running it
3418 * @retval -EBUSY
3419 * * if work submission was rejected because the work item is cancelling; or
3420 * * @p queue is draining; or
3421 * * @p queue is plugged.
3422 * @retval -EINVAL if @p queue is null and the work item has never been run.
3423 * @retval -ENODEV if @p queue has not been started.
3424 */
3425 int k_work_submit_to_queue(struct k_work_q *queue,
3426 struct k_work *work);
3427
3428 /** @brief Submit a work item to the system queue.
3429 *
3430 * @funcprops \isr_ok
3431 *
3432 * @param work pointer to the work item.
3433 *
3434 * @return as with k_work_submit_to_queue().
3435 */
3436 int k_work_submit(struct k_work *work);
3437
3438 /** @brief Wait for last-submitted instance to complete.
3439 *
3440 * Resubmissions may occur while waiting, including chained submissions (from
3441 * within the handler).
3442 *
3443 * @note Be careful of caller and work queue thread relative priority. If
3444 * this function sleeps it will not return until the work queue thread
3445 * completes the tasks that allow this thread to resume.
3446 *
3447 * @note Behavior is undefined if this function is invoked on @p work from a
3448 * work queue running @p work.
3449 *
3450 * @param work pointer to the work item.
3451 *
3452 * @param sync pointer to an opaque item containing state related to the
3453 * pending cancellation. The object must persist until the call returns, and
3454 * be accessible from both the caller thread and the work queue thread. The
3455 * object must not be used for any other flush or cancel operation until this
3456 * one completes. On architectures with CONFIG_KERNEL_COHERENCE the object
3457 * must be allocated in coherent memory.
3458 *
3459 * @retval true if call had to wait for completion
3460 * @retval false if work was already idle
3461 */
3462 bool k_work_flush(struct k_work *work,
3463 struct k_work_sync *sync);
3464
3465 /** @brief Cancel a work item.
3466 *
3467 * This attempts to prevent a pending (non-delayable) work item from being
3468 * processed by removing it from the work queue. If the item is being
3469 * processed, the work item will continue to be processed, but resubmissions
3470 * are rejected until cancellation completes.
3471 *
3472 * If this returns zero cancellation is complete, otherwise something
3473 * (probably a work queue thread) is still referencing the item.
3474 *
3475 * See also k_work_cancel_sync().
3476 *
3477 * @funcprops \isr_ok
3478 *
3479 * @param work pointer to the work item.
3480 *
3481 * @return the k_work_busy_get() status indicating the state of the item after all
3482 * cancellation steps performed by this call are completed.
3483 */
3484 int k_work_cancel(struct k_work *work);
3485
3486 /** @brief Cancel a work item and wait for it to complete.
3487 *
3488 * Same as k_work_cancel() but does not return until cancellation is complete.
3489 * This can be invoked by a thread after k_work_cancel() to synchronize with a
3490 * previous cancellation.
3491 *
3492 * On return the work structure will be idle unless something submits it after
3493 * the cancellation was complete.
3494 *
3495 * @note Be careful of caller and work queue thread relative priority. If
3496 * this function sleeps it will not return until the work queue thread
3497 * completes the tasks that allow this thread to resume.
3498 *
3499 * @note Behavior is undefined if this function is invoked on @p work from a
3500 * work queue running @p work.
3501 *
3502 * @param work pointer to the work item.
3503 *
3504 * @param sync pointer to an opaque item containing state related to the
3505 * pending cancellation. The object must persist until the call returns, and
3506 * be accessible from both the caller thread and the work queue thread. The
3507 * object must not be used for any other flush or cancel operation until this
3508 * one completes. On architectures with CONFIG_KERNEL_COHERENCE the object
3509 * must be allocated in coherent memory.
3510 *
3511 * @retval true if work was pending (call had to wait for cancellation of a
3512 * running handler to complete, or scheduled or submitted operations were
3513 * cancelled);
3514 * @retval false otherwise
3515 */
3516 bool k_work_cancel_sync(struct k_work *work, struct k_work_sync *sync);
3517
3518 /** @brief Initialize a work queue structure.
3519 *
3520 * This must be invoked before starting a work queue structure for the first time.
3521 * It need not be invoked again on the same work queue structure.
3522 *
3523 * @funcprops \isr_ok
3524 *
3525 * @param queue the queue structure to be initialized.
3526 */
3527 void k_work_queue_init(struct k_work_q *queue);
3528
3529 /** @brief Initialize a work queue.
3530 *
3531 * This configures the work queue thread and starts it running. The function
3532 * should not be re-invoked on a queue.
3533 *
3534 * @param queue pointer to the queue structure. It must be initialized
3535 * in zeroed/bss memory or with @ref k_work_queue_init before
3536 * use.
3537 *
3538 * @param stack pointer to the work thread stack area.
3539 *
3540 * @param stack_size size of the work thread stack area, in bytes.
3541 *
3542 * @param prio initial thread priority
3543 *
3544 * @param cfg optional additional configuration parameters. Pass @c
3545 * NULL if not required, to use the defaults documented in
3546 * k_work_queue_config.
3547 */
3548 void k_work_queue_start(struct k_work_q *queue,
3549 k_thread_stack_t *stack, size_t stack_size,
3550 int prio, const struct k_work_queue_config *cfg);
3551
3552 /** @brief Access the thread that animates a work queue.
3553 *
3554 * This is necessary to grant a work queue thread access to things the work
3555 * items it will process are expected to use.
3556 *
3557 * @param queue pointer to the queue structure.
3558 *
3559 * @return the thread associated with the work queue.
3560 */
3561 static inline k_tid_t k_work_queue_thread_get(struct k_work_q *queue);
3562
3563 /** @brief Wait until the work queue has drained, optionally plugging it.
3564 *
3565 * This blocks submission to the work queue except when coming from queue
3566 * thread, and blocks the caller until no more work items are available in the
3567 * queue.
3568 *
3569 * If @p plug is true then submission will continue to be blocked after the
3570 * drain operation completes until k_work_queue_unplug() is invoked.
3571 *
3572 * Note that work items that are delayed are not yet associated with their
3573 * work queue. They must be cancelled externally if a goal is to ensure the
3574 * work queue remains empty. The @p plug feature can be used to prevent
3575 * delayed items from being submitted after the drain completes.
3576 *
3577 * @param queue pointer to the queue structure.
3578 *
3579 * @param plug if true the work queue will continue to block new submissions
3580 * after all items have drained.
3581 *
3582 * @retval 1 if call had to wait for the drain to complete
3583 * @retval 0 if call did not have to wait
3584 * @retval negative if wait was interrupted or failed
3585 */
3586 int k_work_queue_drain(struct k_work_q *queue, bool plug);
3587
3588 /** @brief Release a work queue to accept new submissions.
3589 *
3590 * This releases the block on new submissions placed when k_work_queue_drain()
3591 * is invoked with the @p plug option enabled. If this is invoked before the
3592 * drain completes new items may be submitted as soon as the drain completes.
3593 *
3594 * @funcprops \isr_ok
3595 *
3596 * @param queue pointer to the queue structure.
3597 *
3598 * @retval 0 if successfully unplugged
3599 * @retval -EALREADY if the work queue was not plugged.
3600 */
3601 int k_work_queue_unplug(struct k_work_q *queue);
3602
3603 /** @brief Initialize a delayable work structure.
3604 *
3605 * This must be invoked before scheduling a delayable work structure for the
3606 * first time. It need not be invoked again on the same work structure. It
3607 * can be re-invoked to change the associated handler, but this must be done
3608 * when the work item is idle.
3609 *
3610 * @funcprops \isr_ok
3611 *
3612 * @param dwork the delayable work structure to be initialized.
3613 *
3614 * @param handler the handler to be invoked by the work item.
3615 */
3616 void k_work_init_delayable(struct k_work_delayable *dwork,
3617 k_work_handler_t handler);
3618
3619 /**
3620 * @brief Get the parent delayable work structure from a work pointer.
3621 *
3622 * This function is necessary when a @c k_work_handler_t function is passed to
3623 * k_work_schedule_for_queue() and the handler needs to access data from the
3624 * container of the containing `k_work_delayable`.
3625 *
3626 * @param work Address passed to the work handler
3627 *
3628 * @return Address of the containing @c k_work_delayable structure.
3629 */
3630 static inline struct k_work_delayable *
3631 k_work_delayable_from_work(struct k_work *work);
3632
3633 /** @brief Busy state flags from the delayable work item.
3634 *
3635 * @funcprops \isr_ok
3636 *
3637 * @note This is a live snapshot of state, which may change before the result
3638 * can be inspected. Use locks where appropriate.
3639 *
3640 * @param dwork pointer to the delayable work item.
3641 *
3642 * @return a mask of flags K_WORK_DELAYED, K_WORK_QUEUED, K_WORK_RUNNING,
3643 * K_WORK_CANCELING, and K_WORK_FLUSHING. A zero return value indicates the
3644 * work item appears to be idle.
3645 */
3646 int k_work_delayable_busy_get(const struct k_work_delayable *dwork);
3647
3648 /** @brief Test whether a delayed work item is currently pending.
3649 *
3650 * Wrapper to determine whether a delayed work item is in a non-idle state.
3651 *
3652 * @note This is a live snapshot of state, which may change before the result
3653 * can be inspected. Use locks where appropriate.
3654 *
3655 * @funcprops \isr_ok
3656 *
3657 * @param dwork pointer to the delayable work item.
3658 *
3659 * @return true if and only if k_work_delayable_busy_get() returns a non-zero
3660 * value.
3661 */
3662 static inline bool k_work_delayable_is_pending(
3663 const struct k_work_delayable *dwork);
3664
3665 /** @brief Get the absolute tick count at which a scheduled delayable work
3666 * will be submitted.
3667 *
3668 * @note This is a live snapshot of state, which may change before the result
3669 * can be inspected. Use locks where appropriate.
3670 *
3671 * @funcprops \isr_ok
3672 *
3673 * @param dwork pointer to the delayable work item.
3674 *
3675 * @return the tick count when the timer that will schedule the work item will
3676 * expire, or the current tick count if the work is not scheduled.
3677 */
3678 static inline k_ticks_t k_work_delayable_expires_get(
3679 const struct k_work_delayable *dwork);
3680
3681 /** @brief Get the number of ticks until a scheduled delayable work will be
3682 * submitted.
3683 *
3684 * @note This is a live snapshot of state, which may change before the result
3685 * can be inspected. Use locks where appropriate.
3686 *
3687 * @funcprops \isr_ok
3688 *
3689 * @param dwork pointer to the delayable work item.
3690 *
3691 * @return the number of ticks until the timer that will schedule the work
3692 * item will expire, or zero if the item is not scheduled.
3693 */
3694 static inline k_ticks_t k_work_delayable_remaining_get(
3695 const struct k_work_delayable *dwork);
3696
3697 /** @brief Submit an idle work item to a queue after a delay.
3698 *
3699 * Unlike k_work_reschedule_for_queue() this is a no-op if the work item is
3700 * already scheduled or submitted, even if @p delay is @c K_NO_WAIT.
3701 *
3702 * @funcprops \isr_ok
3703 *
3704 * @param queue the queue on which the work item should be submitted after the
3705 * delay.
3706 *
3707 * @param dwork pointer to the delayable work item.
3708 *
3709 * @param delay the time to wait before submitting the work item. If @c
3710 * K_NO_WAIT and the work is not pending this is equivalent to
3711 * k_work_submit_to_queue().
3712 *
3713 * @retval 0 if work was already scheduled or submitted.
3714 * @retval 1 if work has been scheduled.
3715 * @retval 2 if @p delay is @c K_NO_WAIT and work
3716 * was running and has been queued to the queue that was running it.
3717 * @retval -EBUSY if @p delay is @c K_NO_WAIT and
3718 * k_work_submit_to_queue() fails with this code.
3719 * @retval -EINVAL if @p delay is @c K_NO_WAIT and
3720 * k_work_submit_to_queue() fails with this code.
3721 * @retval -ENODEV if @p delay is @c K_NO_WAIT and
3722 * k_work_submit_to_queue() fails with this code.
3723 */
3724 int k_work_schedule_for_queue(struct k_work_q *queue,
3725 struct k_work_delayable *dwork,
3726 k_timeout_t delay);
3727
3728 /** @brief Submit an idle work item to the system work queue after a
3729 * delay.
3730 *
3731 * This is a thin wrapper around k_work_schedule_for_queue(), with all the API
3732 * characteristics of that function.
3733 *
3734 * @param dwork pointer to the delayable work item.
3735 *
3736 * @param delay the time to wait before submitting the work item. If @c
3737 * K_NO_WAIT this is equivalent to k_work_submit_to_queue().
3738 *
3739 * @return as with k_work_schedule_for_queue().
3740 */
3741 int k_work_schedule(struct k_work_delayable *dwork,
3742 k_timeout_t delay);
3743
3744 /** @brief Reschedule a work item to a queue after a delay.
3745 *
3746 * Unlike k_work_schedule_for_queue() this function can change the deadline of
3747 * a scheduled work item, and will schedule a work item that is in any state
3748 * (e.g. is idle, submitted, or running). This function does not affect
3749 * ("unsubmit") a work item that has been submitted to a queue.
3750 *
3751 * @funcprops \isr_ok
3752 *
3753 * @param queue the queue on which the work item should be submitted after the
3754 * delay.
3755 *
3756 * @param dwork pointer to the delayable work item.
3757 *
3758 * @param delay the time to wait before submitting the work item. If @c
3759 * K_NO_WAIT this is equivalent to k_work_submit_to_queue() after canceling
3760 * any previous scheduled submission.
3761 *
3762 * @note If delay is @c K_NO_WAIT ("no delay") the return values are as with
3763 * k_work_submit_to_queue().
3764 *
3765 * @retval 0 if delay is @c K_NO_WAIT and work was already on a queue
3766 * @retval 1 if
3767 * * delay is @c K_NO_WAIT and work was not submitted but has now been queued
3768 * to @p queue; or
3769 * * delay not @c K_NO_WAIT and work has been scheduled
3770 * @retval 2 if delay is @c K_NO_WAIT and work was running and has been queued
3771 * to the queue that was running it
3772 * @retval -EBUSY if @p delay is @c K_NO_WAIT and
3773 * k_work_submit_to_queue() fails with this code.
3774 * @retval -EINVAL if @p delay is @c K_NO_WAIT and
3775 * k_work_submit_to_queue() fails with this code.
3776 * @retval -ENODEV if @p delay is @c K_NO_WAIT and
3777 * k_work_submit_to_queue() fails with this code.
3778 */
3779 int k_work_reschedule_for_queue(struct k_work_q *queue,
3780 struct k_work_delayable *dwork,
3781 k_timeout_t delay);
3782
3783 /** @brief Reschedule a work item to the system work queue after a
3784 * delay.
3785 *
3786 * This is a thin wrapper around k_work_reschedule_for_queue(), with all the
3787 * API characteristics of that function.
3788 *
3789 * @param dwork pointer to the delayable work item.
3790 *
3791 * @param delay the time to wait before submitting the work item.
3792 *
3793 * @return as with k_work_reschedule_for_queue().
3794 */
3795 int k_work_reschedule(struct k_work_delayable *dwork,
3796 k_timeout_t delay);
3797
3798 /** @brief Flush delayable work.
3799 *
3800 * If the work is scheduled, it is immediately submitted. Then the caller
3801 * blocks until the work completes, as with k_work_flush().
3802 *
3803 * @note Be careful of caller and work queue thread relative priority. If
3804 * this function sleeps it will not return until the work queue thread
3805 * completes the tasks that allow this thread to resume.
3806 *
3807 * @note Behavior is undefined if this function is invoked on @p dwork from a
3808 * work queue running @p dwork.
3809 *
3810 * @param dwork pointer to the delayable work item.
3811 *
3812 * @param sync pointer to an opaque item containing state related to the
3813 * pending cancellation. The object must persist until the call returns, and
3814 * be accessible from both the caller thread and the work queue thread. The
3815 * object must not be used for any other flush or cancel operation until this
3816 * one completes. On architectures with CONFIG_KERNEL_COHERENCE the object
3817 * must be allocated in coherent memory.
3818 *
3819 * @retval true if call had to wait for completion
3820 * @retval false if work was already idle
3821 */
3822 bool k_work_flush_delayable(struct k_work_delayable *dwork,
3823 struct k_work_sync *sync);
3824
3825 /** @brief Cancel delayable work.
3826 *
3827 * Similar to k_work_cancel() but for delayable work. If the work is
3828 * scheduled or submitted it is canceled. This function does not wait for the
3829 * cancellation to complete.
3830 *
3831 * @note The work may still be running when this returns. Use
3832 * k_work_flush_delayable() or k_work_cancel_delayable_sync() to ensure it is
3833 * not running.
3834 *
3835 * @note Canceling delayable work does not prevent rescheduling it. It does
3836 * prevent submitting it until the cancellation completes.
3837 *
3838 * @funcprops \isr_ok
3839 *
3840 * @param dwork pointer to the delayable work item.
3841 *
3842 * @return the k_work_delayable_busy_get() status indicating the state of the
3843 * item after all cancellation steps performed by this call are completed.
3844 */
3845 int k_work_cancel_delayable(struct k_work_delayable *dwork);
3846
3847 /** @brief Cancel delayable work and wait.
3848 *
3849 * Like k_work_cancel_delayable() but waits until the work becomes idle.
3850 *
3851 * @note Canceling delayable work does not prevent rescheduling it. It does
3852 * prevent submitting it until the cancellation completes.
3853 *
3854 * @note Be careful of caller and work queue thread relative priority. If
3855 * this function sleeps it will not return until the work queue thread
3856 * completes the tasks that allow this thread to resume.
3857 *
3858 * @note Behavior is undefined if this function is invoked on @p dwork from a
3859 * work queue running @p dwork.
3860 *
3861 * @param dwork pointer to the delayable work item.
3862 *
3863 * @param sync pointer to an opaque item containing state related to the
3864 * pending cancellation. The object must persist until the call returns, and
3865 * be accessible from both the caller thread and the work queue thread. The
3866 * object must not be used for any other flush or cancel operation until this
3867 * one completes. On architectures with CONFIG_KERNEL_COHERENCE the object
3868 * must be allocated in coherent memory.
3869 *
3870 * @retval true if work was not idle (call had to wait for cancellation of a
3871 * running handler to complete, or scheduled or submitted operations were
3872 * cancelled);
3873 * @retval false otherwise
3874 */
3875 bool k_work_cancel_delayable_sync(struct k_work_delayable *dwork,
3876 struct k_work_sync *sync);
3877
3878 enum {
3879 /**
3880 * @cond INTERNAL_HIDDEN
3881 */
3882
3883 /* The atomic API is used for all work and queue flags fields to
3884 * enforce sequential consistency in SMP environments.
3885 */
3886
3887 /* Bits that represent the work item states. At least nine of the
3888 * combinations are distinct valid stable states.
3889 */
3890 K_WORK_RUNNING_BIT = 0,
3891 K_WORK_CANCELING_BIT = 1,
3892 K_WORK_QUEUED_BIT = 2,
3893 K_WORK_DELAYED_BIT = 3,
3894 K_WORK_FLUSHING_BIT = 4,
3895
3896 K_WORK_MASK = BIT(K_WORK_DELAYED_BIT) | BIT(K_WORK_QUEUED_BIT)
3897 | BIT(K_WORK_RUNNING_BIT) | BIT(K_WORK_CANCELING_BIT) | BIT(K_WORK_FLUSHING_BIT),
3898
3899 /* Static work flags */
3900 K_WORK_DELAYABLE_BIT = 8,
3901 K_WORK_DELAYABLE = BIT(K_WORK_DELAYABLE_BIT),
3902
3903 /* Dynamic work queue flags */
3904 K_WORK_QUEUE_STARTED_BIT = 0,
3905 K_WORK_QUEUE_STARTED = BIT(K_WORK_QUEUE_STARTED_BIT),
3906 K_WORK_QUEUE_BUSY_BIT = 1,
3907 K_WORK_QUEUE_BUSY = BIT(K_WORK_QUEUE_BUSY_BIT),
3908 K_WORK_QUEUE_DRAIN_BIT = 2,
3909 K_WORK_QUEUE_DRAIN = BIT(K_WORK_QUEUE_DRAIN_BIT),
3910 K_WORK_QUEUE_PLUGGED_BIT = 3,
3911 K_WORK_QUEUE_PLUGGED = BIT(K_WORK_QUEUE_PLUGGED_BIT),
3912
3913 /* Static work queue flags */
3914 K_WORK_QUEUE_NO_YIELD_BIT = 8,
3915 K_WORK_QUEUE_NO_YIELD = BIT(K_WORK_QUEUE_NO_YIELD_BIT),
3916
3917 /**
3918 * INTERNAL_HIDDEN @endcond
3919 */
3920 /* Transient work flags */
3921
3922 /** @brief Flag indicating a work item that is running under a work
3923 * queue thread.
3924 *
3925 * Accessed via k_work_busy_get(). May co-occur with other flags.
3926 */
3927 K_WORK_RUNNING = BIT(K_WORK_RUNNING_BIT),
3928
3929 /** @brief Flag indicating a work item that is being canceled.
3930 *
3931 * Accessed via k_work_busy_get(). May co-occur with other flags.
3932 */
3933 K_WORK_CANCELING = BIT(K_WORK_CANCELING_BIT),
3934
3935 /** @brief Flag indicating a work item that has been submitted to a
3936 * queue but has not started running.
3937 *
3938 * Accessed via k_work_busy_get(). May co-occur with other flags.
3939 */
3940 K_WORK_QUEUED = BIT(K_WORK_QUEUED_BIT),
3941
3942 /** @brief Flag indicating a delayed work item that is scheduled for
3943 * submission to a queue.
3944 *
3945 * Accessed via k_work_busy_get(). May co-occur with other flags.
3946 */
3947 K_WORK_DELAYED = BIT(K_WORK_DELAYED_BIT),
3948
3949 /** @brief Flag indicating a synced work item that is being flushed.
3950 *
3951 * Accessed via k_work_busy_get(). May co-occur with other flags.
3952 */
3953 K_WORK_FLUSHING = BIT(K_WORK_FLUSHING_BIT),
3954 };
3955
3956 /** @brief A structure used to submit work. */
3957 struct k_work {
3958 /* All fields are protected by the work module spinlock. No fields
3959 * are to be accessed except through kernel API.
3960 */
3961
3962 /* Node to link into k_work_q pending list. */
3963 sys_snode_t node;
3964
3965 /* The function to be invoked by the work queue thread. */
3966 k_work_handler_t handler;
3967
3968 /* The queue on which the work item was last submitted. */
3969 struct k_work_q *queue;
3970
3971 /* State of the work item.
3972 *
3973 * The item can be DELAYED, QUEUED, and RUNNING simultaneously.
3974 *
3975 * It can be RUNNING and CANCELING simultaneously.
3976 */
3977 uint32_t flags;
3978 };
3979
3980 #define Z_WORK_INITIALIZER(work_handler) { \
3981 .handler = (work_handler), \
3982 }
3983
3984 /** @brief A structure used to submit work after a delay. */
3985 struct k_work_delayable {
3986 /* The work item. */
3987 struct k_work work;
3988
3989 /* Timeout used to submit work after a delay. */
3990 struct _timeout timeout;
3991
3992 /* The queue to which the work should be submitted. */
3993 struct k_work_q *queue;
3994 };
3995
3996 #define Z_WORK_DELAYABLE_INITIALIZER(work_handler) { \
3997 .work = { \
3998 .handler = (work_handler), \
3999 .flags = K_WORK_DELAYABLE, \
4000 }, \
4001 }
4002
4003 /**
4004 * @brief Initialize a statically-defined delayable work item.
4005 *
4006 * This macro can be used to initialize a statically-defined delayable
4007 * work item, prior to its first use. For example,
4008 *
4009 * @code static K_WORK_DELAYABLE_DEFINE(<dwork>, <work_handler>); @endcode
4010 *
4011 * Note that if the runtime dependencies support initialization with
4012 * k_work_init_delayable() using that will eliminate the initialized
4013 * object in ROM that is produced by this macro and copied in at
4014 * system startup.
4015 *
4016 * @param work Symbol name for delayable work item object
4017 * @param work_handler Function to invoke each time work item is processed.
4018 */
4019 #define K_WORK_DELAYABLE_DEFINE(work, work_handler) \
4020 struct k_work_delayable work \
4021 = Z_WORK_DELAYABLE_INITIALIZER(work_handler)
4022
4023 /**
4024 * @cond INTERNAL_HIDDEN
4025 */
4026
4027 /* Record used to wait for work to flush.
4028 *
4029 * The work item is inserted into the queue that will process (or is
4030 * processing) the item, and will be processed as soon as the item
4031 * completes. When the flusher is processed the semaphore will be
4032 * signaled, releasing the thread waiting for the flush.
4033 */
4034 struct z_work_flusher {
4035 struct k_work work;
4036 struct k_sem sem;
4037 };
4038
4039 /* Record used to wait for work to complete a cancellation.
4040 *
4041 * The work item is inserted into a global queue of pending cancels.
4042 * When a cancelling work item goes idle any matching waiters are
4043 * removed from pending_cancels and are woken.
4044 */
4045 struct z_work_canceller {
4046 sys_snode_t node;
4047 struct k_work *work;
4048 struct k_sem sem;
4049 };
4050
4051 /**
4052 * INTERNAL_HIDDEN @endcond
4053 */
4054
4055 /** @brief A structure holding internal state for a pending synchronous
4056 * operation on a work item or queue.
4057 *
4058 * Instances of this type are provided by the caller for invocation of
4059 * k_work_flush(), k_work_cancel_sync() and sibling flush and cancel APIs. A
4060 * referenced object must persist until the call returns, and be accessible
4061 * from both the caller thread and the work queue thread.
4062 *
4063 * @note If CONFIG_KERNEL_COHERENCE is enabled the object must be allocated in
4064 * coherent memory; see arch_mem_coherent(). The stack on these architectures
4065 * is generally not coherent. be stack-allocated. Violations are detected by
4066 * runtime assertion.
4067 */
4068 struct k_work_sync {
4069 union {
4070 struct z_work_flusher flusher;
4071 struct z_work_canceller canceller;
4072 };
4073 };
4074
4075 /** @brief A structure holding optional configuration items for a work
4076 * queue.
4077 *
4078 * This structure, and values it references, are not retained by
4079 * k_work_queue_start().
4080 */
4081 struct k_work_queue_config {
4082 /** The name to be given to the work queue thread.
4083 *
4084 * If left null the thread will not have a name.
4085 */
4086 const char *name;
4087
4088 /** Control whether the work queue thread should yield between
4089 * items.
4090 *
4091 * Yielding between items helps guarantee the work queue
4092 * thread does not starve other threads, including cooperative
4093 * ones released by a work item. This is the default behavior.
4094 *
4095 * Set this to @c true to prevent the work queue thread from
4096 * yielding between items. This may be appropriate when a
4097 * sequence of items should complete without yielding
4098 * control.
4099 */
4100 bool no_yield;
4101
4102 /** Control whether the work queue thread should be marked as
4103 * essential thread.
4104 */
4105 bool essential;
4106 };
4107
4108 /** @brief A structure used to hold work until it can be processed. */
4109 struct k_work_q {
4110 /* The thread that animates the work. */
4111 struct k_thread thread;
4112
4113 /* All the following fields must be accessed only while the
4114 * work module spinlock is held.
4115 */
4116
4117 /* List of k_work items to be worked. */
4118 sys_slist_t pending;
4119
4120 /* Wait queue for idle work thread. */
4121 _wait_q_t notifyq;
4122
4123 /* Wait queue for threads waiting for the queue to drain. */
4124 _wait_q_t drainq;
4125
4126 /* Flags describing queue state. */
4127 uint32_t flags;
4128 };
4129
4130 /* Provide the implementation for inline functions declared above */
4131
k_work_is_pending(const struct k_work * work)4132 static inline bool k_work_is_pending(const struct k_work *work)
4133 {
4134 return k_work_busy_get(work) != 0;
4135 }
4136
4137 static inline struct k_work_delayable *
k_work_delayable_from_work(struct k_work * work)4138 k_work_delayable_from_work(struct k_work *work)
4139 {
4140 return CONTAINER_OF(work, struct k_work_delayable, work);
4141 }
4142
k_work_delayable_is_pending(const struct k_work_delayable * dwork)4143 static inline bool k_work_delayable_is_pending(
4144 const struct k_work_delayable *dwork)
4145 {
4146 return k_work_delayable_busy_get(dwork) != 0;
4147 }
4148
k_work_delayable_expires_get(const struct k_work_delayable * dwork)4149 static inline k_ticks_t k_work_delayable_expires_get(
4150 const struct k_work_delayable *dwork)
4151 {
4152 return z_timeout_expires(&dwork->timeout);
4153 }
4154
k_work_delayable_remaining_get(const struct k_work_delayable * dwork)4155 static inline k_ticks_t k_work_delayable_remaining_get(
4156 const struct k_work_delayable *dwork)
4157 {
4158 return z_timeout_remaining(&dwork->timeout);
4159 }
4160
k_work_queue_thread_get(struct k_work_q * queue)4161 static inline k_tid_t k_work_queue_thread_get(struct k_work_q *queue)
4162 {
4163 return &queue->thread;
4164 }
4165
4166 /** @} */
4167
4168 struct k_work_user;
4169
4170 /**
4171 * @addtogroup workqueue_apis
4172 * @{
4173 */
4174
4175 /**
4176 * @typedef k_work_user_handler_t
4177 * @brief Work item handler function type for user work queues.
4178 *
4179 * A work item's handler function is executed by a user workqueue's thread
4180 * when the work item is processed by the workqueue.
4181 *
4182 * @param work Address of the work item.
4183 */
4184 typedef void (*k_work_user_handler_t)(struct k_work_user *work);
4185
4186 /**
4187 * @cond INTERNAL_HIDDEN
4188 */
4189
4190 struct k_work_user_q {
4191 struct k_queue queue;
4192 struct k_thread thread;
4193 };
4194
4195 enum {
4196 K_WORK_USER_STATE_PENDING, /* Work item pending state */
4197 };
4198
4199 struct k_work_user {
4200 void *_reserved; /* Used by k_queue implementation. */
4201 k_work_user_handler_t handler;
4202 atomic_t flags;
4203 };
4204
4205 /**
4206 * INTERNAL_HIDDEN @endcond
4207 */
4208
4209 #if defined(__cplusplus) && ((__cplusplus - 0) < 202002L)
4210 #define Z_WORK_USER_INITIALIZER(work_handler) { NULL, work_handler, 0 }
4211 #else
4212 #define Z_WORK_USER_INITIALIZER(work_handler) \
4213 { \
4214 ._reserved = NULL, \
4215 .handler = (work_handler), \
4216 .flags = 0 \
4217 }
4218 #endif
4219
4220 /**
4221 * @brief Initialize a statically-defined user work item.
4222 *
4223 * This macro can be used to initialize a statically-defined user work
4224 * item, prior to its first use. For example,
4225 *
4226 * @code static K_WORK_USER_DEFINE(<work>, <work_handler>); @endcode
4227 *
4228 * @param work Symbol name for work item object
4229 * @param work_handler Function to invoke each time work item is processed.
4230 */
4231 #define K_WORK_USER_DEFINE(work, work_handler) \
4232 struct k_work_user work = Z_WORK_USER_INITIALIZER(work_handler)
4233
4234 /**
4235 * @brief Initialize a userspace work item.
4236 *
4237 * This routine initializes a user workqueue work item, prior to its
4238 * first use.
4239 *
4240 * @param work Address of work item.
4241 * @param handler Function to invoke each time work item is processed.
4242 */
k_work_user_init(struct k_work_user * work,k_work_user_handler_t handler)4243 static inline void k_work_user_init(struct k_work_user *work,
4244 k_work_user_handler_t handler)
4245 {
4246 *work = (struct k_work_user)Z_WORK_USER_INITIALIZER(handler);
4247 }
4248
4249 /**
4250 * @brief Check if a userspace work item is pending.
4251 *
4252 * This routine indicates if user work item @a work is pending in a workqueue's
4253 * queue.
4254 *
4255 * @note Checking if the work is pending gives no guarantee that the
4256 * work will still be pending when this information is used. It is up to
4257 * the caller to make sure that this information is used in a safe manner.
4258 *
4259 * @funcprops \isr_ok
4260 *
4261 * @param work Address of work item.
4262 *
4263 * @return true if work item is pending, or false if it is not pending.
4264 */
k_work_user_is_pending(struct k_work_user * work)4265 static inline bool k_work_user_is_pending(struct k_work_user *work)
4266 {
4267 return atomic_test_bit(&work->flags, K_WORK_USER_STATE_PENDING);
4268 }
4269
4270 /**
4271 * @brief Submit a work item to a user mode workqueue
4272 *
4273 * Submits a work item to a workqueue that runs in user mode. A temporary
4274 * memory allocation is made from the caller's resource pool which is freed
4275 * once the worker thread consumes the k_work item. The workqueue
4276 * thread must have memory access to the k_work item being submitted. The caller
4277 * must have permission granted on the work_q parameter's queue object.
4278 *
4279 * @funcprops \isr_ok
4280 *
4281 * @param work_q Address of workqueue.
4282 * @param work Address of work item.
4283 *
4284 * @retval -EBUSY if the work item was already in some workqueue
4285 * @retval -ENOMEM if no memory for thread resource pool allocation
4286 * @retval 0 Success
4287 */
k_work_user_submit_to_queue(struct k_work_user_q * work_q,struct k_work_user * work)4288 static inline int k_work_user_submit_to_queue(struct k_work_user_q *work_q,
4289 struct k_work_user *work)
4290 {
4291 int ret = -EBUSY;
4292
4293 if (!atomic_test_and_set_bit(&work->flags,
4294 K_WORK_USER_STATE_PENDING)) {
4295 ret = k_queue_alloc_append(&work_q->queue, work);
4296
4297 /* Couldn't insert into the queue. Clear the pending bit
4298 * so the work item can be submitted again
4299 */
4300 if (ret != 0) {
4301 atomic_clear_bit(&work->flags,
4302 K_WORK_USER_STATE_PENDING);
4303 }
4304 }
4305
4306 return ret;
4307 }
4308
4309 /**
4310 * @brief Start a workqueue in user mode
4311 *
4312 * This works identically to k_work_queue_start() except it is callable from
4313 * user mode, and the worker thread created will run in user mode. The caller
4314 * must have permissions granted on both the work_q parameter's thread and
4315 * queue objects, and the same restrictions on priority apply as
4316 * k_thread_create().
4317 *
4318 * @param work_q Address of workqueue.
4319 * @param stack Pointer to work queue thread's stack space, as defined by
4320 * K_THREAD_STACK_DEFINE()
4321 * @param stack_size Size of the work queue thread's stack (in bytes), which
4322 * should either be the same constant passed to
4323 * K_THREAD_STACK_DEFINE() or the value of K_THREAD_STACK_SIZEOF().
4324 * @param prio Priority of the work queue's thread.
4325 * @param name optional thread name. If not null a copy is made into the
4326 * thread's name buffer.
4327 */
4328 void k_work_user_queue_start(struct k_work_user_q *work_q,
4329 k_thread_stack_t *stack,
4330 size_t stack_size, int prio,
4331 const char *name);
4332
4333 /**
4334 * @brief Access the user mode thread that animates a work queue.
4335 *
4336 * This is necessary to grant a user mode work queue thread access to things
4337 * the work items it will process are expected to use.
4338 *
4339 * @param work_q pointer to the user mode queue structure.
4340 *
4341 * @return the user mode thread associated with the work queue.
4342 */
k_work_user_queue_thread_get(struct k_work_user_q * work_q)4343 static inline k_tid_t k_work_user_queue_thread_get(struct k_work_user_q *work_q)
4344 {
4345 return &work_q->thread;
4346 }
4347
4348 /** @} */
4349
4350 /**
4351 * @cond INTERNAL_HIDDEN
4352 */
4353
4354 struct k_work_poll {
4355 struct k_work work;
4356 struct k_work_q *workq;
4357 struct z_poller poller;
4358 struct k_poll_event *events;
4359 int num_events;
4360 k_work_handler_t real_handler;
4361 struct _timeout timeout;
4362 int poll_result;
4363 };
4364
4365 /**
4366 * INTERNAL_HIDDEN @endcond
4367 */
4368
4369 /**
4370 * @addtogroup workqueue_apis
4371 * @{
4372 */
4373
4374 /**
4375 * @brief Initialize a statically-defined work item.
4376 *
4377 * This macro can be used to initialize a statically-defined workqueue work
4378 * item, prior to its first use. For example,
4379 *
4380 * @code static K_WORK_DEFINE(<work>, <work_handler>); @endcode
4381 *
4382 * @param work Symbol name for work item object
4383 * @param work_handler Function to invoke each time work item is processed.
4384 */
4385 #define K_WORK_DEFINE(work, work_handler) \
4386 struct k_work work = Z_WORK_INITIALIZER(work_handler)
4387
4388 /**
4389 * @brief Initialize a triggered work item.
4390 *
4391 * This routine initializes a workqueue triggered work item, prior to
4392 * its first use.
4393 *
4394 * @param work Address of triggered work item.
4395 * @param handler Function to invoke each time work item is processed.
4396 */
4397 void k_work_poll_init(struct k_work_poll *work,
4398 k_work_handler_t handler);
4399
4400 /**
4401 * @brief Submit a triggered work item.
4402 *
4403 * This routine schedules work item @a work to be processed by workqueue
4404 * @a work_q when one of the given @a events is signaled. The routine
4405 * initiates internal poller for the work item and then returns to the caller.
4406 * Only when one of the watched events happen the work item is actually
4407 * submitted to the workqueue and becomes pending.
4408 *
4409 * Submitting a previously submitted triggered work item that is still
4410 * waiting for the event cancels the existing submission and reschedules it
4411 * the using the new event list. Note that this behavior is inherently subject
4412 * to race conditions with the pre-existing triggered work item and work queue,
4413 * so care must be taken to synchronize such resubmissions externally.
4414 *
4415 * @funcprops \isr_ok
4416 *
4417 * @warning
4418 * Provided array of events as well as a triggered work item must be placed
4419 * in persistent memory (valid until work handler execution or work
4420 * cancellation) and cannot be modified after submission.
4421 *
4422 * @param work_q Address of workqueue.
4423 * @param work Address of delayed work item.
4424 * @param events An array of events which trigger the work.
4425 * @param num_events The number of events in the array.
4426 * @param timeout Timeout after which the work will be scheduled
4427 * for execution even if not triggered.
4428 *
4429 *
4430 * @retval 0 Work item started watching for events.
4431 * @retval -EINVAL Work item is being processed or has completed its work.
4432 * @retval -EADDRINUSE Work item is pending on a different workqueue.
4433 */
4434 int k_work_poll_submit_to_queue(struct k_work_q *work_q,
4435 struct k_work_poll *work,
4436 struct k_poll_event *events,
4437 int num_events,
4438 k_timeout_t timeout);
4439
4440 /**
4441 * @brief Submit a triggered work item to the system workqueue.
4442 *
4443 * This routine schedules work item @a work to be processed by system
4444 * workqueue when one of the given @a events is signaled. The routine
4445 * initiates internal poller for the work item and then returns to the caller.
4446 * Only when one of the watched events happen the work item is actually
4447 * submitted to the workqueue and becomes pending.
4448 *
4449 * Submitting a previously submitted triggered work item that is still
4450 * waiting for the event cancels the existing submission and reschedules it
4451 * the using the new event list. Note that this behavior is inherently subject
4452 * to race conditions with the pre-existing triggered work item and work queue,
4453 * so care must be taken to synchronize such resubmissions externally.
4454 *
4455 * @funcprops \isr_ok
4456 *
4457 * @warning
4458 * Provided array of events as well as a triggered work item must not be
4459 * modified until the item has been processed by the workqueue.
4460 *
4461 * @param work Address of delayed work item.
4462 * @param events An array of events which trigger the work.
4463 * @param num_events The number of events in the array.
4464 * @param timeout Timeout after which the work will be scheduled
4465 * for execution even if not triggered.
4466 *
4467 * @retval 0 Work item started watching for events.
4468 * @retval -EINVAL Work item is being processed or has completed its work.
4469 * @retval -EADDRINUSE Work item is pending on a different workqueue.
4470 */
4471 int k_work_poll_submit(struct k_work_poll *work,
4472 struct k_poll_event *events,
4473 int num_events,
4474 k_timeout_t timeout);
4475
4476 /**
4477 * @brief Cancel a triggered work item.
4478 *
4479 * This routine cancels the submission of triggered work item @a work.
4480 * A triggered work item can only be canceled if no event triggered work
4481 * submission.
4482 *
4483 * @funcprops \isr_ok
4484 *
4485 * @param work Address of delayed work item.
4486 *
4487 * @retval 0 Work item canceled.
4488 * @retval -EINVAL Work item is being processed or has completed its work.
4489 */
4490 int k_work_poll_cancel(struct k_work_poll *work);
4491
4492 /** @} */
4493
4494 /**
4495 * @defgroup msgq_apis Message Queue APIs
4496 * @ingroup kernel_apis
4497 * @{
4498 */
4499
4500 /**
4501 * @brief Message Queue Structure
4502 */
4503 struct k_msgq {
4504 /** Message queue wait queue */
4505 _wait_q_t wait_q;
4506 /** Lock */
4507 struct k_spinlock lock;
4508 /** Message size */
4509 size_t msg_size;
4510 /** Maximal number of messages */
4511 uint32_t max_msgs;
4512 /** Start of message buffer */
4513 char *buffer_start;
4514 /** End of message buffer */
4515 char *buffer_end;
4516 /** Read pointer */
4517 char *read_ptr;
4518 /** Write pointer */
4519 char *write_ptr;
4520 /** Number of used messages */
4521 uint32_t used_msgs;
4522
4523 Z_DECL_POLL_EVENT
4524
4525 /** Message queue */
4526 uint8_t flags;
4527
4528 SYS_PORT_TRACING_TRACKING_FIELD(k_msgq)
4529
4530 #ifdef CONFIG_OBJ_CORE_MSGQ
4531 struct k_obj_core obj_core;
4532 #endif
4533 };
4534 /**
4535 * @cond INTERNAL_HIDDEN
4536 */
4537
4538
4539 #define Z_MSGQ_INITIALIZER(obj, q_buffer, q_msg_size, q_max_msgs) \
4540 { \
4541 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
4542 .msg_size = q_msg_size, \
4543 .max_msgs = q_max_msgs, \
4544 .buffer_start = q_buffer, \
4545 .buffer_end = q_buffer + (q_max_msgs * q_msg_size), \
4546 .read_ptr = q_buffer, \
4547 .write_ptr = q_buffer, \
4548 .used_msgs = 0, \
4549 Z_POLL_EVENT_OBJ_INIT(obj) \
4550 }
4551
4552 /**
4553 * INTERNAL_HIDDEN @endcond
4554 */
4555
4556
4557 #define K_MSGQ_FLAG_ALLOC BIT(0)
4558
4559 /**
4560 * @brief Message Queue Attributes
4561 */
4562 struct k_msgq_attrs {
4563 /** Message Size */
4564 size_t msg_size;
4565 /** Maximal number of messages */
4566 uint32_t max_msgs;
4567 /** Used messages */
4568 uint32_t used_msgs;
4569 };
4570
4571
4572 /**
4573 * @brief Statically define and initialize a message queue.
4574 *
4575 * The message queue's ring buffer contains space for @a q_max_msgs messages,
4576 * each of which is @a q_msg_size bytes long. Alignment of the message queue's
4577 * ring buffer is not necessary, setting @a q_align to 1 is sufficient.
4578 *
4579 * The message queue can be accessed outside the module where it is defined
4580 * using:
4581 *
4582 * @code extern struct k_msgq <name>; @endcode
4583 *
4584 * @param q_name Name of the message queue.
4585 * @param q_msg_size Message size (in bytes).
4586 * @param q_max_msgs Maximum number of messages that can be queued.
4587 * @param q_align Alignment of the message queue's ring buffer (power of 2).
4588 *
4589 */
4590 #define K_MSGQ_DEFINE(q_name, q_msg_size, q_max_msgs, q_align) \
4591 static char __noinit __aligned(q_align) \
4592 _k_fifo_buf_##q_name[(q_max_msgs) * (q_msg_size)]; \
4593 STRUCT_SECTION_ITERABLE(k_msgq, q_name) = \
4594 Z_MSGQ_INITIALIZER(q_name, _k_fifo_buf_##q_name, \
4595 (q_msg_size), (q_max_msgs))
4596
4597 /**
4598 * @brief Initialize a message queue.
4599 *
4600 * This routine initializes a message queue object, prior to its first use.
4601 *
4602 * The message queue's ring buffer must contain space for @a max_msgs messages,
4603 * each of which is @a msg_size bytes long. Alignment of the message queue's
4604 * ring buffer is not necessary.
4605 *
4606 * @param msgq Address of the message queue.
4607 * @param buffer Pointer to ring buffer that holds queued messages.
4608 * @param msg_size Message size (in bytes).
4609 * @param max_msgs Maximum number of messages that can be queued.
4610 */
4611 void k_msgq_init(struct k_msgq *msgq, char *buffer, size_t msg_size,
4612 uint32_t max_msgs);
4613
4614 /**
4615 * @brief Initialize a message queue.
4616 *
4617 * This routine initializes a message queue object, prior to its first use,
4618 * allocating its internal ring buffer from the calling thread's resource
4619 * pool.
4620 *
4621 * Memory allocated for the ring buffer can be released by calling
4622 * k_msgq_cleanup(), or if userspace is enabled and the msgq object loses
4623 * all of its references.
4624 *
4625 * @param msgq Address of the message queue.
4626 * @param msg_size Message size (in bytes).
4627 * @param max_msgs Maximum number of messages that can be queued.
4628 *
4629 * @return 0 on success, -ENOMEM if there was insufficient memory in the
4630 * thread's resource pool, or -EINVAL if the size parameters cause
4631 * an integer overflow.
4632 */
4633 __syscall int k_msgq_alloc_init(struct k_msgq *msgq, size_t msg_size,
4634 uint32_t max_msgs);
4635
4636 /**
4637 * @brief Release allocated buffer for a queue
4638 *
4639 * Releases memory allocated for the ring buffer.
4640 *
4641 * @param msgq message queue to cleanup
4642 *
4643 * @retval 0 on success
4644 * @retval -EBUSY Queue not empty
4645 */
4646 int k_msgq_cleanup(struct k_msgq *msgq);
4647
4648 /**
4649 * @brief Send a message to a message queue.
4650 *
4651 * This routine sends a message to message queue @a q.
4652 *
4653 * @note The message content is copied from @a data into @a msgq and the @a data
4654 * pointer is not retained, so the message content will not be modified
4655 * by this function.
4656 *
4657 * @funcprops \isr_ok
4658 *
4659 * @param msgq Address of the message queue.
4660 * @param data Pointer to the message.
4661 * @param timeout Waiting period to add the message, or one of the special
4662 * values K_NO_WAIT and K_FOREVER.
4663 *
4664 * @retval 0 Message sent.
4665 * @retval -ENOMSG Returned without waiting or queue purged.
4666 * @retval -EAGAIN Waiting period timed out.
4667 */
4668 __syscall int k_msgq_put(struct k_msgq *msgq, const void *data, k_timeout_t timeout);
4669
4670 /**
4671 * @brief Receive a message from a message queue.
4672 *
4673 * This routine receives a message from message queue @a q in a "first in,
4674 * first out" manner.
4675 *
4676 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
4677 *
4678 * @funcprops \isr_ok
4679 *
4680 * @param msgq Address of the message queue.
4681 * @param data Address of area to hold the received message.
4682 * @param timeout Waiting period to receive the message,
4683 * or one of the special values K_NO_WAIT and
4684 * K_FOREVER.
4685 *
4686 * @retval 0 Message received.
4687 * @retval -ENOMSG Returned without waiting.
4688 * @retval -EAGAIN Waiting period timed out.
4689 */
4690 __syscall int k_msgq_get(struct k_msgq *msgq, void *data, k_timeout_t timeout);
4691
4692 /**
4693 * @brief Peek/read a message from a message queue.
4694 *
4695 * This routine reads a message from message queue @a q in a "first in,
4696 * first out" manner and leaves the message in the queue.
4697 *
4698 * @funcprops \isr_ok
4699 *
4700 * @param msgq Address of the message queue.
4701 * @param data Address of area to hold the message read from the queue.
4702 *
4703 * @retval 0 Message read.
4704 * @retval -ENOMSG Returned when the queue has no message.
4705 */
4706 __syscall int k_msgq_peek(struct k_msgq *msgq, void *data);
4707
4708 /**
4709 * @brief Peek/read a message from a message queue at the specified index
4710 *
4711 * This routine reads a message from message queue at the specified index
4712 * and leaves the message in the queue.
4713 * k_msgq_peek_at(msgq, data, 0) is equivalent to k_msgq_peek(msgq, data)
4714 *
4715 * @funcprops \isr_ok
4716 *
4717 * @param msgq Address of the message queue.
4718 * @param data Address of area to hold the message read from the queue.
4719 * @param idx Message queue index at which to peek
4720 *
4721 * @retval 0 Message read.
4722 * @retval -ENOMSG Returned when the queue has no message at index.
4723 */
4724 __syscall int k_msgq_peek_at(struct k_msgq *msgq, void *data, uint32_t idx);
4725
4726 /**
4727 * @brief Purge a message queue.
4728 *
4729 * This routine discards all unreceived messages in a message queue's ring
4730 * buffer. Any threads that are blocked waiting to send a message to the
4731 * message queue are unblocked and see an -ENOMSG error code.
4732 *
4733 * @param msgq Address of the message queue.
4734 */
4735 __syscall void k_msgq_purge(struct k_msgq *msgq);
4736
4737 /**
4738 * @brief Get the amount of free space in a message queue.
4739 *
4740 * This routine returns the number of unused entries in a message queue's
4741 * ring buffer.
4742 *
4743 * @param msgq Address of the message queue.
4744 *
4745 * @return Number of unused ring buffer entries.
4746 */
4747 __syscall uint32_t k_msgq_num_free_get(struct k_msgq *msgq);
4748
4749 /**
4750 * @brief Get basic attributes of a message queue.
4751 *
4752 * This routine fetches basic attributes of message queue into attr argument.
4753 *
4754 * @param msgq Address of the message queue.
4755 * @param attrs pointer to message queue attribute structure.
4756 */
4757 __syscall void k_msgq_get_attrs(struct k_msgq *msgq,
4758 struct k_msgq_attrs *attrs);
4759
4760
z_impl_k_msgq_num_free_get(struct k_msgq * msgq)4761 static inline uint32_t z_impl_k_msgq_num_free_get(struct k_msgq *msgq)
4762 {
4763 return msgq->max_msgs - msgq->used_msgs;
4764 }
4765
4766 /**
4767 * @brief Get the number of messages in a message queue.
4768 *
4769 * This routine returns the number of messages in a message queue's ring buffer.
4770 *
4771 * @param msgq Address of the message queue.
4772 *
4773 * @return Number of messages.
4774 */
4775 __syscall uint32_t k_msgq_num_used_get(struct k_msgq *msgq);
4776
z_impl_k_msgq_num_used_get(struct k_msgq * msgq)4777 static inline uint32_t z_impl_k_msgq_num_used_get(struct k_msgq *msgq)
4778 {
4779 return msgq->used_msgs;
4780 }
4781
4782 /** @} */
4783
4784 /**
4785 * @defgroup mailbox_apis Mailbox APIs
4786 * @ingroup kernel_apis
4787 * @{
4788 */
4789
4790 /**
4791 * @brief Mailbox Message Structure
4792 *
4793 */
4794 struct k_mbox_msg {
4795 /** size of message (in bytes) */
4796 size_t size;
4797 /** application-defined information value */
4798 uint32_t info;
4799 /** sender's message data buffer */
4800 void *tx_data;
4801 /** source thread id */
4802 k_tid_t rx_source_thread;
4803 /** target thread id */
4804 k_tid_t tx_target_thread;
4805 /** internal use only - thread waiting on send (may be a dummy) */
4806 k_tid_t _syncing_thread;
4807 #if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
4808 /** internal use only - semaphore used during asynchronous send */
4809 struct k_sem *_async_sem;
4810 #endif
4811 };
4812 /**
4813 * @brief Mailbox Structure
4814 *
4815 */
4816 struct k_mbox {
4817 /** Transmit messages queue */
4818 _wait_q_t tx_msg_queue;
4819 /** Receive message queue */
4820 _wait_q_t rx_msg_queue;
4821 struct k_spinlock lock;
4822
4823 SYS_PORT_TRACING_TRACKING_FIELD(k_mbox)
4824
4825 #ifdef CONFIG_OBJ_CORE_MAILBOX
4826 struct k_obj_core obj_core;
4827 #endif
4828 };
4829 /**
4830 * @cond INTERNAL_HIDDEN
4831 */
4832
4833 #define Z_MBOX_INITIALIZER(obj) \
4834 { \
4835 .tx_msg_queue = Z_WAIT_Q_INIT(&obj.tx_msg_queue), \
4836 .rx_msg_queue = Z_WAIT_Q_INIT(&obj.rx_msg_queue), \
4837 }
4838
4839 /**
4840 * INTERNAL_HIDDEN @endcond
4841 */
4842
4843 /**
4844 * @brief Statically define and initialize a mailbox.
4845 *
4846 * The mailbox is to be accessed outside the module where it is defined using:
4847 *
4848 * @code extern struct k_mbox <name>; @endcode
4849 *
4850 * @param name Name of the mailbox.
4851 */
4852 #define K_MBOX_DEFINE(name) \
4853 STRUCT_SECTION_ITERABLE(k_mbox, name) = \
4854 Z_MBOX_INITIALIZER(name) \
4855
4856 /**
4857 * @brief Initialize a mailbox.
4858 *
4859 * This routine initializes a mailbox object, prior to its first use.
4860 *
4861 * @param mbox Address of the mailbox.
4862 */
4863 void k_mbox_init(struct k_mbox *mbox);
4864
4865 /**
4866 * @brief Send a mailbox message in a synchronous manner.
4867 *
4868 * This routine sends a message to @a mbox and waits for a receiver to both
4869 * receive and process it. The message data may be in a buffer or non-existent
4870 * (i.e. an empty message).
4871 *
4872 * @param mbox Address of the mailbox.
4873 * @param tx_msg Address of the transmit message descriptor.
4874 * @param timeout Waiting period for the message to be received,
4875 * or one of the special values K_NO_WAIT
4876 * and K_FOREVER. Once the message has been received,
4877 * this routine waits as long as necessary for the message
4878 * to be completely processed.
4879 *
4880 * @retval 0 Message sent.
4881 * @retval -ENOMSG Returned without waiting.
4882 * @retval -EAGAIN Waiting period timed out.
4883 */
4884 int k_mbox_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
4885 k_timeout_t timeout);
4886
4887 /**
4888 * @brief Send a mailbox message in an asynchronous manner.
4889 *
4890 * This routine sends a message to @a mbox without waiting for a receiver
4891 * to process it. The message data may be in a buffer or non-existent
4892 * (i.e. an empty message). Optionally, the semaphore @a sem will be given
4893 * when the message has been both received and completely processed by
4894 * the receiver.
4895 *
4896 * @param mbox Address of the mailbox.
4897 * @param tx_msg Address of the transmit message descriptor.
4898 * @param sem Address of a semaphore, or NULL if none is needed.
4899 */
4900 void k_mbox_async_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
4901 struct k_sem *sem);
4902
4903 /**
4904 * @brief Receive a mailbox message.
4905 *
4906 * This routine receives a message from @a mbox, then optionally retrieves
4907 * its data and disposes of the message.
4908 *
4909 * @param mbox Address of the mailbox.
4910 * @param rx_msg Address of the receive message descriptor.
4911 * @param buffer Address of the buffer to receive data, or NULL to defer data
4912 * retrieval and message disposal until later.
4913 * @param timeout Waiting period for a message to be received,
4914 * or one of the special values K_NO_WAIT and K_FOREVER.
4915 *
4916 * @retval 0 Message received.
4917 * @retval -ENOMSG Returned without waiting.
4918 * @retval -EAGAIN Waiting period timed out.
4919 */
4920 int k_mbox_get(struct k_mbox *mbox, struct k_mbox_msg *rx_msg,
4921 void *buffer, k_timeout_t timeout);
4922
4923 /**
4924 * @brief Retrieve mailbox message data into a buffer.
4925 *
4926 * This routine completes the processing of a received message by retrieving
4927 * its data into a buffer, then disposing of the message.
4928 *
4929 * Alternatively, this routine can be used to dispose of a received message
4930 * without retrieving its data.
4931 *
4932 * @param rx_msg Address of the receive message descriptor.
4933 * @param buffer Address of the buffer to receive data, or NULL to discard
4934 * the data.
4935 */
4936 void k_mbox_data_get(struct k_mbox_msg *rx_msg, void *buffer);
4937
4938 /** @} */
4939
4940 /**
4941 * @defgroup pipe_apis Pipe APIs
4942 * @ingroup kernel_apis
4943 * @{
4944 */
4945
4946 /** Pipe Structure */
4947 struct k_pipe {
4948 unsigned char *buffer; /**< Pipe buffer: may be NULL */
4949 size_t size; /**< Buffer size */
4950 size_t bytes_used; /**< Number of bytes used in buffer */
4951 size_t read_index; /**< Where in buffer to read from */
4952 size_t write_index; /**< Where in buffer to write */
4953 struct k_spinlock lock; /**< Synchronization lock */
4954
4955 struct {
4956 _wait_q_t readers; /**< Reader wait queue */
4957 _wait_q_t writers; /**< Writer wait queue */
4958 } wait_q; /** Wait queue */
4959
4960 Z_DECL_POLL_EVENT
4961
4962 uint8_t flags; /**< Flags */
4963
4964 SYS_PORT_TRACING_TRACKING_FIELD(k_pipe)
4965
4966 #ifdef CONFIG_OBJ_CORE_PIPE
4967 struct k_obj_core obj_core;
4968 #endif
4969 };
4970
4971 /**
4972 * @cond INTERNAL_HIDDEN
4973 */
4974 #define K_PIPE_FLAG_ALLOC BIT(0) /** Buffer was allocated */
4975
4976 #define Z_PIPE_INITIALIZER(obj, pipe_buffer, pipe_buffer_size) \
4977 { \
4978 .buffer = pipe_buffer, \
4979 .size = pipe_buffer_size, \
4980 .bytes_used = 0, \
4981 .read_index = 0, \
4982 .write_index = 0, \
4983 .lock = {}, \
4984 .wait_q = { \
4985 .readers = Z_WAIT_Q_INIT(&obj.wait_q.readers), \
4986 .writers = Z_WAIT_Q_INIT(&obj.wait_q.writers) \
4987 }, \
4988 Z_POLL_EVENT_OBJ_INIT(obj) \
4989 .flags = 0, \
4990 }
4991
4992 /**
4993 * INTERNAL_HIDDEN @endcond
4994 */
4995
4996 /**
4997 * @brief Statically define and initialize a pipe.
4998 *
4999 * The pipe can be accessed outside the module where it is defined using:
5000 *
5001 * @code extern struct k_pipe <name>; @endcode
5002 *
5003 * @param name Name of the pipe.
5004 * @param pipe_buffer_size Size of the pipe's ring buffer (in bytes),
5005 * or zero if no ring buffer is used.
5006 * @param pipe_align Alignment of the pipe's ring buffer (power of 2).
5007 *
5008 */
5009 #define K_PIPE_DEFINE(name, pipe_buffer_size, pipe_align) \
5010 static unsigned char __noinit __aligned(pipe_align) \
5011 _k_pipe_buf_##name[pipe_buffer_size]; \
5012 STRUCT_SECTION_ITERABLE(k_pipe, name) = \
5013 Z_PIPE_INITIALIZER(name, _k_pipe_buf_##name, pipe_buffer_size)
5014
5015 /**
5016 * @brief Initialize a pipe.
5017 *
5018 * This routine initializes a pipe object, prior to its first use.
5019 *
5020 * @param pipe Address of the pipe.
5021 * @param buffer Address of the pipe's ring buffer, or NULL if no ring buffer
5022 * is used.
5023 * @param size Size of the pipe's ring buffer (in bytes), or zero if no ring
5024 * buffer is used.
5025 */
5026 void k_pipe_init(struct k_pipe *pipe, unsigned char *buffer, size_t size);
5027
5028 /**
5029 * @brief Release a pipe's allocated buffer
5030 *
5031 * If a pipe object was given a dynamically allocated buffer via
5032 * k_pipe_alloc_init(), this will free it. This function does nothing
5033 * if the buffer wasn't dynamically allocated.
5034 *
5035 * @param pipe Address of the pipe.
5036 * @retval 0 on success
5037 * @retval -EAGAIN nothing to cleanup
5038 */
5039 int k_pipe_cleanup(struct k_pipe *pipe);
5040
5041 /**
5042 * @brief Initialize a pipe and allocate a buffer for it
5043 *
5044 * Storage for the buffer region will be allocated from the calling thread's
5045 * resource pool. This memory will be released if k_pipe_cleanup() is called,
5046 * or userspace is enabled and the pipe object loses all references to it.
5047 *
5048 * This function should only be called on uninitialized pipe objects.
5049 *
5050 * @param pipe Address of the pipe.
5051 * @param size Size of the pipe's ring buffer (in bytes), or zero if no ring
5052 * buffer is used.
5053 * @retval 0 on success
5054 * @retval -ENOMEM if memory couldn't be allocated
5055 */
5056 __syscall int k_pipe_alloc_init(struct k_pipe *pipe, size_t size);
5057
5058 /**
5059 * @brief Write data to a pipe.
5060 *
5061 * This routine writes up to @a bytes_to_write bytes of data to @a pipe.
5062 *
5063 * @param pipe Address of the pipe.
5064 * @param data Address of data to write.
5065 * @param bytes_to_write Size of data (in bytes).
5066 * @param bytes_written Address of area to hold the number of bytes written.
5067 * @param min_xfer Minimum number of bytes to write.
5068 * @param timeout Waiting period to wait for the data to be written,
5069 * or one of the special values K_NO_WAIT and K_FOREVER.
5070 *
5071 * @retval 0 At least @a min_xfer bytes of data were written.
5072 * @retval -EIO Returned without waiting; zero data bytes were written.
5073 * @retval -EAGAIN Waiting period timed out; between zero and @a min_xfer
5074 * minus one data bytes were written.
5075 */
5076 __syscall int k_pipe_put(struct k_pipe *pipe, const void *data,
5077 size_t bytes_to_write, size_t *bytes_written,
5078 size_t min_xfer, k_timeout_t timeout);
5079
5080 /**
5081 * @brief Read data from a pipe.
5082 *
5083 * This routine reads up to @a bytes_to_read bytes of data from @a pipe.
5084 *
5085 * @param pipe Address of the pipe.
5086 * @param data Address to place the data read from pipe.
5087 * @param bytes_to_read Maximum number of data bytes to read.
5088 * @param bytes_read Address of area to hold the number of bytes read.
5089 * @param min_xfer Minimum number of data bytes to read.
5090 * @param timeout Waiting period to wait for the data to be read,
5091 * or one of the special values K_NO_WAIT and K_FOREVER.
5092 *
5093 * @retval 0 At least @a min_xfer bytes of data were read.
5094 * @retval -EINVAL invalid parameters supplied
5095 * @retval -EIO Returned without waiting; zero data bytes were read.
5096 * @retval -EAGAIN Waiting period timed out; between zero and @a min_xfer
5097 * minus one data bytes were read.
5098 */
5099 __syscall int k_pipe_get(struct k_pipe *pipe, void *data,
5100 size_t bytes_to_read, size_t *bytes_read,
5101 size_t min_xfer, k_timeout_t timeout);
5102
5103 /**
5104 * @brief Query the number of bytes that may be read from @a pipe.
5105 *
5106 * @param pipe Address of the pipe.
5107 *
5108 * @retval a number n such that 0 <= n <= @ref k_pipe.size; the
5109 * result is zero for unbuffered pipes.
5110 */
5111 __syscall size_t k_pipe_read_avail(struct k_pipe *pipe);
5112
5113 /**
5114 * @brief Query the number of bytes that may be written to @a pipe
5115 *
5116 * @param pipe Address of the pipe.
5117 *
5118 * @retval a number n such that 0 <= n <= @ref k_pipe.size; the
5119 * result is zero for unbuffered pipes.
5120 */
5121 __syscall size_t k_pipe_write_avail(struct k_pipe *pipe);
5122
5123 /**
5124 * @brief Flush the pipe of write data
5125 *
5126 * This routine flushes the pipe. Flushing the pipe is equivalent to reading
5127 * both all the data in the pipe's buffer and all the data waiting to go into
5128 * that pipe into a large temporary buffer and discarding the buffer. Any
5129 * writers that were previously pended become unpended.
5130 *
5131 * @param pipe Address of the pipe.
5132 */
5133 __syscall void k_pipe_flush(struct k_pipe *pipe);
5134
5135 /**
5136 * @brief Flush the pipe's internal buffer
5137 *
5138 * This routine flushes the pipe's internal buffer. This is equivalent to
5139 * reading up to N bytes from the pipe (where N is the size of the pipe's
5140 * buffer) into a temporary buffer and then discarding that buffer. If there
5141 * were writers previously pending, then some may unpend as they try to fill
5142 * up the pipe's emptied buffer.
5143 *
5144 * @param pipe Address of the pipe.
5145 */
5146 __syscall void k_pipe_buffer_flush(struct k_pipe *pipe);
5147
5148 /** @} */
5149
5150 /**
5151 * @cond INTERNAL_HIDDEN
5152 */
5153
5154 struct k_mem_slab_info {
5155 uint32_t num_blocks;
5156 size_t block_size;
5157 uint32_t num_used;
5158 #ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION
5159 uint32_t max_used;
5160 #endif
5161 };
5162
5163 struct k_mem_slab {
5164 _wait_q_t wait_q;
5165 struct k_spinlock lock;
5166 char *buffer;
5167 char *free_list;
5168 struct k_mem_slab_info info;
5169
5170 SYS_PORT_TRACING_TRACKING_FIELD(k_mem_slab)
5171
5172 #ifdef CONFIG_OBJ_CORE_MEM_SLAB
5173 struct k_obj_core obj_core;
5174 #endif
5175 };
5176
5177 #define Z_MEM_SLAB_INITIALIZER(_slab, _slab_buffer, _slab_block_size, \
5178 _slab_num_blocks) \
5179 { \
5180 .wait_q = Z_WAIT_Q_INIT(&(_slab).wait_q), \
5181 .lock = {}, \
5182 .buffer = _slab_buffer, \
5183 .free_list = NULL, \
5184 .info = {_slab_num_blocks, _slab_block_size, 0} \
5185 }
5186
5187
5188 /**
5189 * INTERNAL_HIDDEN @endcond
5190 */
5191
5192 /**
5193 * @defgroup mem_slab_apis Memory Slab APIs
5194 * @ingroup kernel_apis
5195 * @{
5196 */
5197
5198 /**
5199 * @brief Statically define and initialize a memory slab in a public (non-static) scope.
5200 *
5201 * The memory slab's buffer contains @a slab_num_blocks memory blocks
5202 * that are @a slab_block_size bytes long. The buffer is aligned to a
5203 * @a slab_align -byte boundary. To ensure that each memory block is similarly
5204 * aligned to this boundary, @a slab_block_size must also be a multiple of
5205 * @a slab_align.
5206 *
5207 * The memory slab can be accessed outside the module where it is defined
5208 * using:
5209 *
5210 * @code extern struct k_mem_slab <name>; @endcode
5211 *
5212 * @note This macro cannot be used together with a static keyword.
5213 * If such a use-case is desired, use @ref K_MEM_SLAB_DEFINE_STATIC
5214 * instead.
5215 *
5216 * @param name Name of the memory slab.
5217 * @param slab_block_size Size of each memory block (in bytes).
5218 * @param slab_num_blocks Number memory blocks.
5219 * @param slab_align Alignment of the memory slab's buffer (power of 2).
5220 */
5221 #define K_MEM_SLAB_DEFINE(name, slab_block_size, slab_num_blocks, slab_align) \
5222 char __noinit_named(k_mem_slab_buf_##name) \
5223 __aligned(WB_UP(slab_align)) \
5224 _k_mem_slab_buf_##name[(slab_num_blocks) * WB_UP(slab_block_size)]; \
5225 STRUCT_SECTION_ITERABLE(k_mem_slab, name) = \
5226 Z_MEM_SLAB_INITIALIZER(name, _k_mem_slab_buf_##name, \
5227 WB_UP(slab_block_size), slab_num_blocks)
5228
5229 /**
5230 * @brief Statically define and initialize a memory slab in a private (static) scope.
5231 *
5232 * The memory slab's buffer contains @a slab_num_blocks memory blocks
5233 * that are @a slab_block_size bytes long. The buffer is aligned to a
5234 * @a slab_align -byte boundary. To ensure that each memory block is similarly
5235 * aligned to this boundary, @a slab_block_size must also be a multiple of
5236 * @a slab_align.
5237 *
5238 * @param name Name of the memory slab.
5239 * @param slab_block_size Size of each memory block (in bytes).
5240 * @param slab_num_blocks Number memory blocks.
5241 * @param slab_align Alignment of the memory slab's buffer (power of 2).
5242 */
5243 #define K_MEM_SLAB_DEFINE_STATIC(name, slab_block_size, slab_num_blocks, slab_align) \
5244 static char __noinit_named(k_mem_slab_buf_##name) \
5245 __aligned(WB_UP(slab_align)) \
5246 _k_mem_slab_buf_##name[(slab_num_blocks) * WB_UP(slab_block_size)]; \
5247 static STRUCT_SECTION_ITERABLE(k_mem_slab, name) = \
5248 Z_MEM_SLAB_INITIALIZER(name, _k_mem_slab_buf_##name, \
5249 WB_UP(slab_block_size), slab_num_blocks)
5250
5251 /**
5252 * @brief Initialize a memory slab.
5253 *
5254 * Initializes a memory slab, prior to its first use.
5255 *
5256 * The memory slab's buffer contains @a slab_num_blocks memory blocks
5257 * that are @a slab_block_size bytes long. The buffer must be aligned to an
5258 * N-byte boundary matching a word boundary, where N is a power of 2
5259 * (i.e. 4 on 32-bit systems, 8, 16, ...).
5260 * To ensure that each memory block is similarly aligned to this boundary,
5261 * @a slab_block_size must also be a multiple of N.
5262 *
5263 * @param slab Address of the memory slab.
5264 * @param buffer Pointer to buffer used for the memory blocks.
5265 * @param block_size Size of each memory block (in bytes).
5266 * @param num_blocks Number of memory blocks.
5267 *
5268 * @retval 0 on success
5269 * @retval -EINVAL invalid data supplied
5270 *
5271 */
5272 int k_mem_slab_init(struct k_mem_slab *slab, void *buffer,
5273 size_t block_size, uint32_t num_blocks);
5274
5275 /**
5276 * @brief Allocate memory from a memory slab.
5277 *
5278 * This routine allocates a memory block from a memory slab.
5279 *
5280 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
5281 * @note When CONFIG_MULTITHREADING=n any @a timeout is treated as K_NO_WAIT.
5282 *
5283 * @funcprops \isr_ok
5284 *
5285 * @param slab Address of the memory slab.
5286 * @param mem Pointer to block address area.
5287 * @param timeout Waiting period to wait for operation to complete.
5288 * Use K_NO_WAIT to return without waiting,
5289 * or K_FOREVER to wait as long as necessary.
5290 *
5291 * @retval 0 Memory allocated. The block address area pointed at by @a mem
5292 * is set to the starting address of the memory block.
5293 * @retval -ENOMEM Returned without waiting.
5294 * @retval -EAGAIN Waiting period timed out.
5295 * @retval -EINVAL Invalid data supplied
5296 */
5297 int k_mem_slab_alloc(struct k_mem_slab *slab, void **mem,
5298 k_timeout_t timeout);
5299
5300 /**
5301 * @brief Free memory allocated from a memory slab.
5302 *
5303 * This routine releases a previously allocated memory block back to its
5304 * associated memory slab.
5305 *
5306 * @param slab Address of the memory slab.
5307 * @param mem Pointer to the memory block (as returned by k_mem_slab_alloc()).
5308 */
5309 void k_mem_slab_free(struct k_mem_slab *slab, void *mem);
5310
5311 /**
5312 * @brief Get the number of used blocks in a memory slab.
5313 *
5314 * This routine gets the number of memory blocks that are currently
5315 * allocated in @a slab.
5316 *
5317 * @param slab Address of the memory slab.
5318 *
5319 * @return Number of allocated memory blocks.
5320 */
k_mem_slab_num_used_get(struct k_mem_slab * slab)5321 static inline uint32_t k_mem_slab_num_used_get(struct k_mem_slab *slab)
5322 {
5323 return slab->info.num_used;
5324 }
5325
5326 /**
5327 * @brief Get the number of maximum used blocks so far in a memory slab.
5328 *
5329 * This routine gets the maximum number of memory blocks that were
5330 * allocated in @a slab.
5331 *
5332 * @param slab Address of the memory slab.
5333 *
5334 * @return Maximum number of allocated memory blocks.
5335 */
k_mem_slab_max_used_get(struct k_mem_slab * slab)5336 static inline uint32_t k_mem_slab_max_used_get(struct k_mem_slab *slab)
5337 {
5338 #ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION
5339 return slab->info.max_used;
5340 #else
5341 ARG_UNUSED(slab);
5342 return 0;
5343 #endif
5344 }
5345
5346 /**
5347 * @brief Get the number of unused blocks in a memory slab.
5348 *
5349 * This routine gets the number of memory blocks that are currently
5350 * unallocated in @a slab.
5351 *
5352 * @param slab Address of the memory slab.
5353 *
5354 * @return Number of unallocated memory blocks.
5355 */
k_mem_slab_num_free_get(struct k_mem_slab * slab)5356 static inline uint32_t k_mem_slab_num_free_get(struct k_mem_slab *slab)
5357 {
5358 return slab->info.num_blocks - slab->info.num_used;
5359 }
5360
5361 /**
5362 * @brief Get the memory stats for a memory slab
5363 *
5364 * This routine gets the runtime memory usage stats for the slab @a slab.
5365 *
5366 * @param slab Address of the memory slab
5367 * @param stats Pointer to memory into which to copy memory usage statistics
5368 *
5369 * @retval 0 Success
5370 * @retval -EINVAL Any parameter points to NULL
5371 */
5372
5373 int k_mem_slab_runtime_stats_get(struct k_mem_slab *slab, struct sys_memory_stats *stats);
5374
5375 /**
5376 * @brief Reset the maximum memory usage for a slab
5377 *
5378 * This routine resets the maximum memory usage for the slab @a slab to its
5379 * current usage.
5380 *
5381 * @param slab Address of the memory slab
5382 *
5383 * @retval 0 Success
5384 * @retval -EINVAL Memory slab is NULL
5385 */
5386 int k_mem_slab_runtime_stats_reset_max(struct k_mem_slab *slab);
5387
5388 /** @} */
5389
5390 /**
5391 * @addtogroup heap_apis
5392 * @{
5393 */
5394
5395 /* kernel synchronized heap struct */
5396
5397 struct k_heap {
5398 struct sys_heap heap;
5399 _wait_q_t wait_q;
5400 struct k_spinlock lock;
5401 };
5402
5403 /**
5404 * @brief Initialize a k_heap
5405 *
5406 * This constructs a synchronized k_heap object over a memory region
5407 * specified by the user. Note that while any alignment and size can
5408 * be passed as valid parameters, internal alignment restrictions
5409 * inside the inner sys_heap mean that not all bytes may be usable as
5410 * allocated memory.
5411 *
5412 * @param h Heap struct to initialize
5413 * @param mem Pointer to memory.
5414 * @param bytes Size of memory region, in bytes
5415 */
5416 void k_heap_init(struct k_heap *h, void *mem,
5417 size_t bytes) __attribute_nonnull(1);
5418
5419 /**
5420 * @brief Allocate aligned memory from a k_heap
5421 *
5422 * Behaves in all ways like k_heap_alloc(), except that the returned
5423 * memory (if available) will have a starting address in memory which
5424 * is a multiple of the specified power-of-two alignment value in
5425 * bytes. The resulting memory can be returned to the heap using
5426 * k_heap_free().
5427 *
5428 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
5429 * @note When CONFIG_MULTITHREADING=n any @a timeout is treated as K_NO_WAIT.
5430 *
5431 * @funcprops \isr_ok
5432 *
5433 * @param h Heap from which to allocate
5434 * @param align Alignment in bytes, must be a power of two
5435 * @param bytes Number of bytes requested
5436 * @param timeout How long to wait, or K_NO_WAIT
5437 * @return Pointer to memory the caller can now use
5438 */
5439 void *k_heap_aligned_alloc(struct k_heap *h, size_t align, size_t bytes,
5440 k_timeout_t timeout) __attribute_nonnull(1);
5441
5442 /**
5443 * @brief Allocate memory from a k_heap
5444 *
5445 * Allocates and returns a memory buffer from the memory region owned
5446 * by the heap. If no memory is available immediately, the call will
5447 * block for the specified timeout (constructed via the standard
5448 * timeout API, or K_NO_WAIT or K_FOREVER) waiting for memory to be
5449 * freed. If the allocation cannot be performed by the expiration of
5450 * the timeout, NULL will be returned.
5451 * Allocated memory is aligned on a multiple of pointer sizes.
5452 *
5453 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
5454 * @note When CONFIG_MULTITHREADING=n any @a timeout is treated as K_NO_WAIT.
5455 *
5456 * @funcprops \isr_ok
5457 *
5458 * @param h Heap from which to allocate
5459 * @param bytes Desired size of block to allocate
5460 * @param timeout How long to wait, or K_NO_WAIT
5461 * @return A pointer to valid heap memory, or NULL
5462 */
5463 void *k_heap_alloc(struct k_heap *h, size_t bytes,
5464 k_timeout_t timeout) __attribute_nonnull(1);
5465
5466 /**
5467 * @brief Reallocate memory from a k_heap
5468 *
5469 * Reallocates and returns a memory buffer from the memory region owned
5470 * by the heap. If no memory is available immediately, the call will
5471 * block for the specified timeout (constructed via the standard
5472 * timeout API, or K_NO_WAIT or K_FOREVER) waiting for memory to be
5473 * freed. If the allocation cannot be performed by the expiration of
5474 * the timeout, NULL will be returned.
5475 * Reallocated memory is aligned on a multiple of pointer sizes.
5476 *
5477 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
5478 * @note When CONFIG_MULTITHREADING=n any @a timeout is treated as K_NO_WAIT.
5479 *
5480 * @funcprops \isr_ok
5481 *
5482 * @param h Heap from which to allocate
5483 * @param ptr Original pointer returned from a previous allocation
5484 * @param bytes Desired size of block to allocate
5485 * @param timeout How long to wait, or K_NO_WAIT
5486 *
5487 * @return Pointer to memory the caller can now use, or NULL
5488 */
5489 void *k_heap_realloc(struct k_heap *h, void *ptr, size_t bytes, k_timeout_t timeout)
5490 __attribute_nonnull(1);
5491
5492 /**
5493 * @brief Free memory allocated by k_heap_alloc()
5494 *
5495 * Returns the specified memory block, which must have been returned
5496 * from k_heap_alloc(), to the heap for use by other callers. Passing
5497 * a NULL block is legal, and has no effect.
5498 *
5499 * @param h Heap to which to return the memory
5500 * @param mem A valid memory block, or NULL
5501 */
5502 void k_heap_free(struct k_heap *h, void *mem) __attribute_nonnull(1);
5503
5504 /* Hand-calculated minimum heap sizes needed to return a successful
5505 * 1-byte allocation. See details in lib/os/heap.[ch]
5506 */
5507 #define Z_HEAP_MIN_SIZE ((sizeof(void *) > 4) ? 56 : 44)
5508
5509 /**
5510 * @brief Define a static k_heap in the specified linker section
5511 *
5512 * This macro defines and initializes a static memory region and
5513 * k_heap of the requested size in the specified linker section.
5514 * After kernel start, &name can be used as if k_heap_init() had
5515 * been called.
5516 *
5517 * Note that this macro enforces a minimum size on the memory region
5518 * to accommodate metadata requirements. Very small heaps will be
5519 * padded to fit.
5520 *
5521 * @param name Symbol name for the struct k_heap object
5522 * @param bytes Size of memory region, in bytes
5523 * @param in_section __attribute__((section(name))
5524 */
5525 #define Z_HEAP_DEFINE_IN_SECT(name, bytes, in_section) \
5526 char in_section \
5527 __aligned(8) /* CHUNK_UNIT */ \
5528 kheap_##name[MAX(bytes, Z_HEAP_MIN_SIZE)]; \
5529 STRUCT_SECTION_ITERABLE(k_heap, name) = { \
5530 .heap = { \
5531 .init_mem = kheap_##name, \
5532 .init_bytes = MAX(bytes, Z_HEAP_MIN_SIZE), \
5533 }, \
5534 }
5535
5536 /**
5537 * @brief Define a static k_heap
5538 *
5539 * This macro defines and initializes a static memory region and
5540 * k_heap of the requested size. After kernel start, &name can be
5541 * used as if k_heap_init() had been called.
5542 *
5543 * Note that this macro enforces a minimum size on the memory region
5544 * to accommodate metadata requirements. Very small heaps will be
5545 * padded to fit.
5546 *
5547 * @param name Symbol name for the struct k_heap object
5548 * @param bytes Size of memory region, in bytes
5549 */
5550 #define K_HEAP_DEFINE(name, bytes) \
5551 Z_HEAP_DEFINE_IN_SECT(name, bytes, \
5552 __noinit_named(kheap_buf_##name))
5553
5554 /**
5555 * @brief Define a static k_heap in uncached memory
5556 *
5557 * This macro defines and initializes a static memory region and
5558 * k_heap of the requested size in uncached memory. After kernel
5559 * start, &name can be used as if k_heap_init() had been called.
5560 *
5561 * Note that this macro enforces a minimum size on the memory region
5562 * to accommodate metadata requirements. Very small heaps will be
5563 * padded to fit.
5564 *
5565 * @param name Symbol name for the struct k_heap object
5566 * @param bytes Size of memory region, in bytes
5567 */
5568 #define K_HEAP_DEFINE_NOCACHE(name, bytes) \
5569 Z_HEAP_DEFINE_IN_SECT(name, bytes, __nocache)
5570
5571 /**
5572 * @}
5573 */
5574
5575 /**
5576 * @defgroup heap_apis Heap APIs
5577 * @ingroup kernel_apis
5578 * @{
5579 */
5580
5581 /**
5582 * @brief Allocate memory from the heap with a specified alignment.
5583 *
5584 * This routine provides semantics similar to aligned_alloc(); memory is
5585 * allocated from the heap with a specified alignment. However, one minor
5586 * difference is that k_aligned_alloc() accepts any non-zero @p size,
5587 * whereas aligned_alloc() only accepts a @p size that is an integral
5588 * multiple of @p align.
5589 *
5590 * Above, aligned_alloc() refers to:
5591 * C11 standard (ISO/IEC 9899:2011): 7.22.3.1
5592 * The aligned_alloc function (p: 347-348)
5593 *
5594 * @param align Alignment of memory requested (in bytes).
5595 * @param size Amount of memory requested (in bytes).
5596 *
5597 * @return Address of the allocated memory if successful; otherwise NULL.
5598 */
5599 void *k_aligned_alloc(size_t align, size_t size);
5600
5601 /**
5602 * @brief Allocate memory from the heap.
5603 *
5604 * This routine provides traditional malloc() semantics. Memory is
5605 * allocated from the heap memory pool.
5606 * Allocated memory is aligned on a multiple of pointer sizes.
5607 *
5608 * @param size Amount of memory requested (in bytes).
5609 *
5610 * @return Address of the allocated memory if successful; otherwise NULL.
5611 */
5612 void *k_malloc(size_t size);
5613
5614 /**
5615 * @brief Free memory allocated from heap.
5616 *
5617 * This routine provides traditional free() semantics. The memory being
5618 * returned must have been allocated from the heap memory pool.
5619 *
5620 * If @a ptr is NULL, no operation is performed.
5621 *
5622 * @param ptr Pointer to previously allocated memory.
5623 */
5624 void k_free(void *ptr);
5625
5626 /**
5627 * @brief Allocate memory from heap, array style
5628 *
5629 * This routine provides traditional calloc() semantics. Memory is
5630 * allocated from the heap memory pool and zeroed.
5631 *
5632 * @param nmemb Number of elements in the requested array
5633 * @param size Size of each array element (in bytes).
5634 *
5635 * @return Address of the allocated memory if successful; otherwise NULL.
5636 */
5637 void *k_calloc(size_t nmemb, size_t size);
5638
5639 /** @brief Expand the size of an existing allocation
5640 *
5641 * Returns a pointer to a new memory region with the same contents,
5642 * but a different allocated size. If the new allocation can be
5643 * expanded in place, the pointer returned will be identical.
5644 * Otherwise the data will be copies to a new block and the old one
5645 * will be freed as per sys_heap_free(). If the specified size is
5646 * smaller than the original, the block will be truncated in place and
5647 * the remaining memory returned to the heap. If the allocation of a
5648 * new block fails, then NULL will be returned and the old block will
5649 * not be freed or modified.
5650 *
5651 * @param ptr Original pointer returned from a previous allocation
5652 * @param size Amount of memory requested (in bytes).
5653 *
5654 * @return Pointer to memory the caller can now use, or NULL.
5655 */
5656 void *k_realloc(void *ptr, size_t size);
5657
5658 /** @} */
5659
5660 /* polling API - PRIVATE */
5661
5662 #ifdef CONFIG_POLL
5663 #define _INIT_OBJ_POLL_EVENT(obj) do { (obj)->poll_event = NULL; } while (false)
5664 #else
5665 #define _INIT_OBJ_POLL_EVENT(obj) do { } while (false)
5666 #endif
5667
5668 /* private - types bit positions */
5669 enum _poll_types_bits {
5670 /* can be used to ignore an event */
5671 _POLL_TYPE_IGNORE,
5672
5673 /* to be signaled by k_poll_signal_raise() */
5674 _POLL_TYPE_SIGNAL,
5675
5676 /* semaphore availability */
5677 _POLL_TYPE_SEM_AVAILABLE,
5678
5679 /* queue/FIFO/LIFO data availability */
5680 _POLL_TYPE_DATA_AVAILABLE,
5681
5682 /* msgq data availability */
5683 _POLL_TYPE_MSGQ_DATA_AVAILABLE,
5684
5685 /* pipe data availability */
5686 _POLL_TYPE_PIPE_DATA_AVAILABLE,
5687
5688 _POLL_NUM_TYPES
5689 };
5690
5691 #define Z_POLL_TYPE_BIT(type) (1U << ((type) - 1U))
5692
5693 /* private - states bit positions */
5694 enum _poll_states_bits {
5695 /* default state when creating event */
5696 _POLL_STATE_NOT_READY,
5697
5698 /* signaled by k_poll_signal_raise() */
5699 _POLL_STATE_SIGNALED,
5700
5701 /* semaphore is available */
5702 _POLL_STATE_SEM_AVAILABLE,
5703
5704 /* data is available to read on queue/FIFO/LIFO */
5705 _POLL_STATE_DATA_AVAILABLE,
5706
5707 /* queue/FIFO/LIFO wait was cancelled */
5708 _POLL_STATE_CANCELLED,
5709
5710 /* data is available to read on a message queue */
5711 _POLL_STATE_MSGQ_DATA_AVAILABLE,
5712
5713 /* data is available to read from a pipe */
5714 _POLL_STATE_PIPE_DATA_AVAILABLE,
5715
5716 _POLL_NUM_STATES
5717 };
5718
5719 #define Z_POLL_STATE_BIT(state) (1U << ((state) - 1U))
5720
5721 #define _POLL_EVENT_NUM_UNUSED_BITS \
5722 (32 - (0 \
5723 + 8 /* tag */ \
5724 + _POLL_NUM_TYPES \
5725 + _POLL_NUM_STATES \
5726 + 1 /* modes */ \
5727 ))
5728
5729 /* end of polling API - PRIVATE */
5730
5731
5732 /**
5733 * @defgroup poll_apis Async polling APIs
5734 * @ingroup kernel_apis
5735 * @{
5736 */
5737
5738 /* Public polling API */
5739
5740 /* public - values for k_poll_event.type bitfield */
5741 #define K_POLL_TYPE_IGNORE 0
5742 #define K_POLL_TYPE_SIGNAL Z_POLL_TYPE_BIT(_POLL_TYPE_SIGNAL)
5743 #define K_POLL_TYPE_SEM_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_SEM_AVAILABLE)
5744 #define K_POLL_TYPE_DATA_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_DATA_AVAILABLE)
5745 #define K_POLL_TYPE_FIFO_DATA_AVAILABLE K_POLL_TYPE_DATA_AVAILABLE
5746 #define K_POLL_TYPE_MSGQ_DATA_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_MSGQ_DATA_AVAILABLE)
5747 #define K_POLL_TYPE_PIPE_DATA_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_PIPE_DATA_AVAILABLE)
5748
5749 /* public - polling modes */
5750 enum k_poll_modes {
5751 /* polling thread does not take ownership of objects when available */
5752 K_POLL_MODE_NOTIFY_ONLY = 0,
5753
5754 K_POLL_NUM_MODES
5755 };
5756
5757 /* public - values for k_poll_event.state bitfield */
5758 #define K_POLL_STATE_NOT_READY 0
5759 #define K_POLL_STATE_SIGNALED Z_POLL_STATE_BIT(_POLL_STATE_SIGNALED)
5760 #define K_POLL_STATE_SEM_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_SEM_AVAILABLE)
5761 #define K_POLL_STATE_DATA_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_DATA_AVAILABLE)
5762 #define K_POLL_STATE_FIFO_DATA_AVAILABLE K_POLL_STATE_DATA_AVAILABLE
5763 #define K_POLL_STATE_MSGQ_DATA_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_MSGQ_DATA_AVAILABLE)
5764 #define K_POLL_STATE_PIPE_DATA_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_PIPE_DATA_AVAILABLE)
5765 #define K_POLL_STATE_CANCELLED Z_POLL_STATE_BIT(_POLL_STATE_CANCELLED)
5766
5767 /* public - poll signal object */
5768 struct k_poll_signal {
5769 /** PRIVATE - DO NOT TOUCH */
5770 sys_dlist_t poll_events;
5771
5772 /**
5773 * 1 if the event has been signaled, 0 otherwise. Stays set to 1 until
5774 * user resets it to 0.
5775 */
5776 unsigned int signaled;
5777
5778 /** custom result value passed to k_poll_signal_raise() if needed */
5779 int result;
5780 };
5781
5782 #define K_POLL_SIGNAL_INITIALIZER(obj) \
5783 { \
5784 .poll_events = SYS_DLIST_STATIC_INIT(&obj.poll_events), \
5785 .signaled = 0, \
5786 .result = 0, \
5787 }
5788 /**
5789 * @brief Poll Event
5790 *
5791 */
5792 struct k_poll_event {
5793 /** PRIVATE - DO NOT TOUCH */
5794 sys_dnode_t _node;
5795
5796 /** PRIVATE - DO NOT TOUCH */
5797 struct z_poller *poller;
5798
5799 /** optional user-specified tag, opaque, untouched by the API */
5800 uint32_t tag:8;
5801
5802 /** bitfield of event types (bitwise-ORed K_POLL_TYPE_xxx values) */
5803 uint32_t type:_POLL_NUM_TYPES;
5804
5805 /** bitfield of event states (bitwise-ORed K_POLL_STATE_xxx values) */
5806 uint32_t state:_POLL_NUM_STATES;
5807
5808 /** mode of operation, from enum k_poll_modes */
5809 uint32_t mode:1;
5810
5811 /** unused bits in 32-bit word */
5812 uint32_t unused:_POLL_EVENT_NUM_UNUSED_BITS;
5813
5814 /** per-type data */
5815 union {
5816 /* The typed_* fields below are used by K_POLL_EVENT_*INITIALIZER() macros to ensure
5817 * type safety of polled objects.
5818 */
5819 void *obj, *typed_K_POLL_TYPE_IGNORE;
5820 struct k_poll_signal *signal, *typed_K_POLL_TYPE_SIGNAL;
5821 struct k_sem *sem, *typed_K_POLL_TYPE_SEM_AVAILABLE;
5822 struct k_fifo *fifo, *typed_K_POLL_TYPE_FIFO_DATA_AVAILABLE;
5823 struct k_queue *queue, *typed_K_POLL_TYPE_DATA_AVAILABLE;
5824 struct k_msgq *msgq, *typed_K_POLL_TYPE_MSGQ_DATA_AVAILABLE;
5825 #ifdef CONFIG_PIPES
5826 struct k_pipe *pipe, *typed_K_POLL_TYPE_PIPE_DATA_AVAILABLE;
5827 #endif
5828 };
5829 };
5830
5831 #define K_POLL_EVENT_INITIALIZER(_event_type, _event_mode, _event_obj) \
5832 { \
5833 .poller = NULL, \
5834 .type = _event_type, \
5835 .state = K_POLL_STATE_NOT_READY, \
5836 .mode = _event_mode, \
5837 .unused = 0, \
5838 { \
5839 .typed_##_event_type = _event_obj, \
5840 }, \
5841 }
5842
5843 #define K_POLL_EVENT_STATIC_INITIALIZER(_event_type, _event_mode, _event_obj, \
5844 event_tag) \
5845 { \
5846 .tag = event_tag, \
5847 .type = _event_type, \
5848 .state = K_POLL_STATE_NOT_READY, \
5849 .mode = _event_mode, \
5850 .unused = 0, \
5851 { \
5852 .typed_##_event_type = _event_obj, \
5853 }, \
5854 }
5855
5856 /**
5857 * @brief Initialize one struct k_poll_event instance
5858 *
5859 * After this routine is called on a poll event, the event it ready to be
5860 * placed in an event array to be passed to k_poll().
5861 *
5862 * @param event The event to initialize.
5863 * @param type A bitfield of the types of event, from the K_POLL_TYPE_xxx
5864 * values. Only values that apply to the same object being polled
5865 * can be used together. Choosing K_POLL_TYPE_IGNORE disables the
5866 * event.
5867 * @param mode Future. Use K_POLL_MODE_NOTIFY_ONLY.
5868 * @param obj Kernel object or poll signal.
5869 */
5870
5871 void k_poll_event_init(struct k_poll_event *event, uint32_t type,
5872 int mode, void *obj);
5873
5874 /**
5875 * @brief Wait for one or many of multiple poll events to occur
5876 *
5877 * This routine allows a thread to wait concurrently for one or many of
5878 * multiple poll events to have occurred. Such events can be a kernel object
5879 * being available, like a semaphore, or a poll signal event.
5880 *
5881 * When an event notifies that a kernel object is available, the kernel object
5882 * is not "given" to the thread calling k_poll(): it merely signals the fact
5883 * that the object was available when the k_poll() call was in effect. Also,
5884 * all threads trying to acquire an object the regular way, i.e. by pending on
5885 * the object, have precedence over the thread polling on the object. This
5886 * means that the polling thread will never get the poll event on an object
5887 * until the object becomes available and its pend queue is empty. For this
5888 * reason, the k_poll() call is more effective when the objects being polled
5889 * only have one thread, the polling thread, trying to acquire them.
5890 *
5891 * When k_poll() returns 0, the caller should loop on all the events that were
5892 * passed to k_poll() and check the state field for the values that were
5893 * expected and take the associated actions.
5894 *
5895 * Before being reused for another call to k_poll(), the user has to reset the
5896 * state field to K_POLL_STATE_NOT_READY.
5897 *
5898 * When called from user mode, a temporary memory allocation is required from
5899 * the caller's resource pool.
5900 *
5901 * @param events An array of events to be polled for.
5902 * @param num_events The number of events in the array.
5903 * @param timeout Waiting period for an event to be ready,
5904 * or one of the special values K_NO_WAIT and K_FOREVER.
5905 *
5906 * @retval 0 One or more events are ready.
5907 * @retval -EAGAIN Waiting period timed out.
5908 * @retval -EINTR Polling has been interrupted, e.g. with
5909 * k_queue_cancel_wait(). All output events are still set and valid,
5910 * cancelled event(s) will be set to K_POLL_STATE_CANCELLED. In other
5911 * words, -EINTR status means that at least one of output events is
5912 * K_POLL_STATE_CANCELLED.
5913 * @retval -ENOMEM Thread resource pool insufficient memory (user mode only)
5914 * @retval -EINVAL Bad parameters (user mode only)
5915 */
5916
5917 __syscall int k_poll(struct k_poll_event *events, int num_events,
5918 k_timeout_t timeout);
5919
5920 /**
5921 * @brief Initialize a poll signal object.
5922 *
5923 * Ready a poll signal object to be signaled via k_poll_signal_raise().
5924 *
5925 * @param sig A poll signal.
5926 */
5927
5928 __syscall void k_poll_signal_init(struct k_poll_signal *sig);
5929
5930 /**
5931 * @brief Reset a poll signal object's state to unsignaled.
5932 *
5933 * @param sig A poll signal object
5934 */
5935 __syscall void k_poll_signal_reset(struct k_poll_signal *sig);
5936
5937 /**
5938 * @brief Fetch the signaled state and result value of a poll signal
5939 *
5940 * @param sig A poll signal object
5941 * @param signaled An integer buffer which will be written nonzero if the
5942 * object was signaled
5943 * @param result An integer destination buffer which will be written with the
5944 * result value if the object was signaled, or an undefined
5945 * value if it was not.
5946 */
5947 __syscall void k_poll_signal_check(struct k_poll_signal *sig,
5948 unsigned int *signaled, int *result);
5949
5950 /**
5951 * @brief Signal a poll signal object.
5952 *
5953 * This routine makes ready a poll signal, which is basically a poll event of
5954 * type K_POLL_TYPE_SIGNAL. If a thread was polling on that event, it will be
5955 * made ready to run. A @a result value can be specified.
5956 *
5957 * The poll signal contains a 'signaled' field that, when set by
5958 * k_poll_signal_raise(), stays set until the user sets it back to 0 with
5959 * k_poll_signal_reset(). It thus has to be reset by the user before being
5960 * passed again to k_poll() or k_poll() will consider it being signaled, and
5961 * will return immediately.
5962 *
5963 * @note The result is stored and the 'signaled' field is set even if
5964 * this function returns an error indicating that an expiring poll was
5965 * not notified. The next k_poll() will detect the missed raise.
5966 *
5967 * @param sig A poll signal.
5968 * @param result The value to store in the result field of the signal.
5969 *
5970 * @retval 0 The signal was delivered successfully.
5971 * @retval -EAGAIN The polling thread's timeout is in the process of expiring.
5972 */
5973
5974 __syscall int k_poll_signal_raise(struct k_poll_signal *sig, int result);
5975
5976 /** @} */
5977
5978 /**
5979 * @defgroup cpu_idle_apis CPU Idling APIs
5980 * @ingroup kernel_apis
5981 * @{
5982 */
5983 /**
5984 * @brief Make the CPU idle.
5985 *
5986 * This function makes the CPU idle until an event wakes it up.
5987 *
5988 * In a regular system, the idle thread should be the only thread responsible
5989 * for making the CPU idle and triggering any type of power management.
5990 * However, in some more constrained systems, such as a single-threaded system,
5991 * the only thread would be responsible for this if needed.
5992 *
5993 * @note In some architectures, before returning, the function unmasks interrupts
5994 * unconditionally.
5995 */
k_cpu_idle(void)5996 static inline void k_cpu_idle(void)
5997 {
5998 arch_cpu_idle();
5999 }
6000
6001 /**
6002 * @brief Make the CPU idle in an atomic fashion.
6003 *
6004 * Similar to k_cpu_idle(), but must be called with interrupts locked.
6005 *
6006 * Enabling interrupts and entering a low-power mode will be atomic,
6007 * i.e. there will be no period of time where interrupts are enabled before
6008 * the processor enters a low-power mode.
6009 *
6010 * After waking up from the low-power mode, the interrupt lockout state will
6011 * be restored as if by irq_unlock(key).
6012 *
6013 * @param key Interrupt locking key obtained from irq_lock().
6014 */
k_cpu_atomic_idle(unsigned int key)6015 static inline void k_cpu_atomic_idle(unsigned int key)
6016 {
6017 arch_cpu_atomic_idle(key);
6018 }
6019
6020 /**
6021 * @}
6022 */
6023
6024 /**
6025 * @cond INTERNAL_HIDDEN
6026 * @internal
6027 */
6028 #ifdef ARCH_EXCEPT
6029 /* This architecture has direct support for triggering a CPU exception */
6030 #define z_except_reason(reason) ARCH_EXCEPT(reason)
6031 #else
6032
6033 #if !defined(CONFIG_ASSERT_NO_FILE_INFO)
6034 #define __EXCEPT_LOC() __ASSERT_PRINT("@ %s:%d\n", __FILE__, __LINE__)
6035 #else
6036 #define __EXCEPT_LOC()
6037 #endif
6038
6039 /* NOTE: This is the implementation for arches that do not implement
6040 * ARCH_EXCEPT() to generate a real CPU exception.
6041 *
6042 * We won't have a real exception frame to determine the PC value when
6043 * the oops occurred, so print file and line number before we jump into
6044 * the fatal error handler.
6045 */
6046 #define z_except_reason(reason) do { \
6047 __EXCEPT_LOC(); \
6048 z_fatal_error(reason, NULL); \
6049 } while (false)
6050
6051 #endif /* _ARCH__EXCEPT */
6052 /**
6053 * INTERNAL_HIDDEN @endcond
6054 */
6055
6056 /**
6057 * @brief Fatally terminate a thread
6058 *
6059 * This should be called when a thread has encountered an unrecoverable
6060 * runtime condition and needs to terminate. What this ultimately
6061 * means is determined by the _fatal_error_handler() implementation, which
6062 * will be called will reason code K_ERR_KERNEL_OOPS.
6063 *
6064 * If this is called from ISR context, the default system fatal error handler
6065 * will treat it as an unrecoverable system error, just like k_panic().
6066 */
6067 #define k_oops() z_except_reason(K_ERR_KERNEL_OOPS)
6068
6069 /**
6070 * @brief Fatally terminate the system
6071 *
6072 * This should be called when the Zephyr kernel has encountered an
6073 * unrecoverable runtime condition and needs to terminate. What this ultimately
6074 * means is determined by the _fatal_error_handler() implementation, which
6075 * will be called will reason code K_ERR_KERNEL_PANIC.
6076 */
6077 #define k_panic() z_except_reason(K_ERR_KERNEL_PANIC)
6078
6079 /**
6080 * @cond INTERNAL_HIDDEN
6081 */
6082
6083 /*
6084 * private APIs that are utilized by one or more public APIs
6085 */
6086
6087 /**
6088 * @internal
6089 */
6090 void z_timer_expiration_handler(struct _timeout *timeout);
6091 /**
6092 * INTERNAL_HIDDEN @endcond
6093 */
6094
6095 #ifdef CONFIG_PRINTK
6096 /**
6097 * @brief Emit a character buffer to the console device
6098 *
6099 * @param c String of characters to print
6100 * @param n The length of the string
6101 *
6102 */
6103 __syscall void k_str_out(char *c, size_t n);
6104 #endif
6105
6106 /**
6107 * @defgroup float_apis Floating Point APIs
6108 * @ingroup kernel_apis
6109 * @{
6110 */
6111
6112 /**
6113 * @brief Disable preservation of floating point context information.
6114 *
6115 * This routine informs the kernel that the specified thread
6116 * will no longer be using the floating point registers.
6117 *
6118 * @warning
6119 * Some architectures apply restrictions on how the disabling of floating
6120 * point preservation may be requested, see arch_float_disable.
6121 *
6122 * @warning
6123 * This routine should only be used to disable floating point support for
6124 * a thread that currently has such support enabled.
6125 *
6126 * @param thread ID of thread.
6127 *
6128 * @retval 0 On success.
6129 * @retval -ENOTSUP If the floating point disabling is not implemented.
6130 * -EINVAL If the floating point disabling could not be performed.
6131 */
6132 __syscall int k_float_disable(struct k_thread *thread);
6133
6134 /**
6135 * @brief Enable preservation of floating point context information.
6136 *
6137 * This routine informs the kernel that the specified thread
6138 * will use the floating point registers.
6139
6140 * Invoking this routine initializes the thread's floating point context info
6141 * to that of an FPU that has been reset. The next time the thread is scheduled
6142 * by z_swap() it will either inherit an FPU that is guaranteed to be in a
6143 * "sane" state (if the most recent user of the FPU was cooperatively swapped
6144 * out) or the thread's own floating point context will be loaded (if the most
6145 * recent user of the FPU was preempted, or if this thread is the first user
6146 * of the FPU). Thereafter, the kernel will protect the thread's FP context
6147 * so that it is not altered during a preemptive context switch.
6148 *
6149 * The @a options parameter indicates which floating point register sets will
6150 * be used by the specified thread.
6151 *
6152 * For x86 options:
6153 *
6154 * - K_FP_REGS indicates x87 FPU and MMX registers only
6155 * - K_SSE_REGS indicates SSE registers (and also x87 FPU and MMX registers)
6156 *
6157 * @warning
6158 * Some architectures apply restrictions on how the enabling of floating
6159 * point preservation may be requested, see arch_float_enable.
6160 *
6161 * @warning
6162 * This routine should only be used to enable floating point support for
6163 * a thread that currently has such support enabled.
6164 *
6165 * @param thread ID of thread.
6166 * @param options architecture dependent options
6167 *
6168 * @retval 0 On success.
6169 * @retval -ENOTSUP If the floating point enabling is not implemented.
6170 * -EINVAL If the floating point enabling could not be performed.
6171 */
6172 __syscall int k_float_enable(struct k_thread *thread, unsigned int options);
6173
6174 /**
6175 * @}
6176 */
6177
6178 /**
6179 * @brief Get the runtime statistics of a thread
6180 *
6181 * @param thread ID of thread.
6182 * @param stats Pointer to struct to copy statistics into.
6183 * @return -EINVAL if null pointers, otherwise 0
6184 */
6185 int k_thread_runtime_stats_get(k_tid_t thread,
6186 k_thread_runtime_stats_t *stats);
6187
6188 /**
6189 * @brief Get the runtime statistics of all threads
6190 *
6191 * @param stats Pointer to struct to copy statistics into.
6192 * @return -EINVAL if null pointers, otherwise 0
6193 */
6194 int k_thread_runtime_stats_all_get(k_thread_runtime_stats_t *stats);
6195
6196 /**
6197 * @brief Get the runtime statistics of all threads on specified cpu
6198 *
6199 * @param cpu The cpu number
6200 * @param stats Pointer to struct to copy statistics into.
6201 * @return -EINVAL if null pointers, otherwise 0
6202 */
6203 int k_thread_runtime_stats_cpu_get(int cpu, k_thread_runtime_stats_t *stats);
6204
6205 /**
6206 * @brief Enable gathering of runtime statistics for specified thread
6207 *
6208 * This routine enables the gathering of runtime statistics for the specified
6209 * thread.
6210 *
6211 * @param thread ID of thread
6212 * @return -EINVAL if invalid thread ID, otherwise 0
6213 */
6214 int k_thread_runtime_stats_enable(k_tid_t thread);
6215
6216 /**
6217 * @brief Disable gathering of runtime statistics for specified thread
6218 *
6219 * This routine disables the gathering of runtime statistics for the specified
6220 * thread.
6221 *
6222 * @param thread ID of thread
6223 * @return -EINVAL if invalid thread ID, otherwise 0
6224 */
6225 int k_thread_runtime_stats_disable(k_tid_t thread);
6226
6227 /**
6228 * @brief Enable gathering of system runtime statistics
6229 *
6230 * This routine enables the gathering of system runtime statistics. Note that
6231 * it does not affect the gathering of similar statistics for individual
6232 * threads.
6233 */
6234 void k_sys_runtime_stats_enable(void);
6235
6236 /**
6237 * @brief Disable gathering of system runtime statistics
6238 *
6239 * This routine disables the gathering of system runtime statistics. Note that
6240 * it does not affect the gathering of similar statistics for individual
6241 * threads.
6242 */
6243 void k_sys_runtime_stats_disable(void);
6244
6245 #ifdef __cplusplus
6246 }
6247 #endif
6248
6249 #include <zephyr/tracing/tracing.h>
6250 #include <zephyr/syscalls/kernel.h>
6251
6252 #endif /* !_ASMLANGUAGE */
6253
6254 #endif /* ZEPHYR_INCLUDE_KERNEL_H_ */
6255