1 /*
2 * Copyright (c) 2016, Wind River Systems, Inc.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 /**
8 * @file
9 *
10 * @brief Public kernel APIs.
11 */
12
13 #ifndef ZEPHYR_INCLUDE_KERNEL_H_
14 #define ZEPHYR_INCLUDE_KERNEL_H_
15
16 #if !defined(_ASMLANGUAGE)
17 #include <zephyr/kernel_includes.h>
18 #include <errno.h>
19 #include <limits.h>
20 #include <stdbool.h>
21 #include <zephyr/toolchain.h>
22 #include <zephyr/tracing/tracing_macros.h>
23 #include <zephyr/sys/mem_stats.h>
24 #include <zephyr/sys/iterable_sections.h>
25
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29
30 /*
31 * Zephyr currently assumes the size of a couple standard types to simplify
32 * print string formats. Let's make sure this doesn't change without notice.
33 */
34 BUILD_ASSERT(sizeof(int32_t) == sizeof(int));
35 BUILD_ASSERT(sizeof(int64_t) == sizeof(long long));
36 BUILD_ASSERT(sizeof(intptr_t) == sizeof(long));
37
38 /**
39 * @brief Kernel APIs
40 * @defgroup kernel_apis Kernel APIs
41 * @{
42 * @}
43 */
44
45 #define K_ANY NULL
46
47 #if CONFIG_NUM_COOP_PRIORITIES + CONFIG_NUM_PREEMPT_PRIORITIES == 0
48 #error Zero available thread priorities defined!
49 #endif
50
51 #define K_PRIO_COOP(x) (-(CONFIG_NUM_COOP_PRIORITIES - (x)))
52 #define K_PRIO_PREEMPT(x) (x)
53
54 #define K_HIGHEST_THREAD_PRIO (-CONFIG_NUM_COOP_PRIORITIES)
55 #define K_LOWEST_THREAD_PRIO CONFIG_NUM_PREEMPT_PRIORITIES
56 #define K_IDLE_PRIO K_LOWEST_THREAD_PRIO
57 #define K_HIGHEST_APPLICATION_THREAD_PRIO (K_HIGHEST_THREAD_PRIO)
58 #define K_LOWEST_APPLICATION_THREAD_PRIO (K_LOWEST_THREAD_PRIO - 1)
59
60 #ifdef CONFIG_POLL
61 #define _POLL_EVENT_OBJ_INIT(obj) \
62 .poll_events = SYS_DLIST_STATIC_INIT(&obj.poll_events),
63 #define _POLL_EVENT sys_dlist_t poll_events
64 #else
65 #define _POLL_EVENT_OBJ_INIT(obj)
66 #define _POLL_EVENT
67 #endif
68
69 struct k_thread;
70 struct k_mutex;
71 struct k_sem;
72 struct k_msgq;
73 struct k_mbox;
74 struct k_pipe;
75 struct k_queue;
76 struct k_fifo;
77 struct k_lifo;
78 struct k_stack;
79 struct k_mem_slab;
80 struct k_timer;
81 struct k_poll_event;
82 struct k_poll_signal;
83 struct k_mem_domain;
84 struct k_mem_partition;
85 struct k_futex;
86 struct k_event;
87
88 enum execution_context_types {
89 K_ISR = 0,
90 K_COOP_THREAD,
91 K_PREEMPT_THREAD,
92 };
93
94 /* private, used by k_poll and k_work_poll */
95 struct k_work_poll;
96 typedef int (*_poller_cb_t)(struct k_poll_event *event, uint32_t state);
97
98 /**
99 * @addtogroup thread_apis
100 * @{
101 */
102
103 typedef void (*k_thread_user_cb_t)(const struct k_thread *thread,
104 void *user_data);
105
106 /**
107 * @brief Iterate over all the threads in the system.
108 *
109 * This routine iterates over all the threads in the system and
110 * calls the user_cb function for each thread.
111 *
112 * @param user_cb Pointer to the user callback function.
113 * @param user_data Pointer to user data.
114 *
115 * @note @kconfig{CONFIG_THREAD_MONITOR} must be set for this function
116 * to be effective.
117 * @note This API uses @ref k_spin_lock to protect the _kernel.threads
118 * list which means creation of new threads and terminations of existing
119 * threads are blocked until this API returns.
120 */
121 extern void k_thread_foreach(k_thread_user_cb_t user_cb, void *user_data);
122
123 /**
124 * @brief Iterate over all the threads in the system without locking.
125 *
126 * This routine works exactly the same like @ref k_thread_foreach
127 * but unlocks interrupts when user_cb is executed.
128 *
129 * @param user_cb Pointer to the user callback function.
130 * @param user_data Pointer to user data.
131 *
132 * @note @kconfig{CONFIG_THREAD_MONITOR} must be set for this function
133 * to be effective.
134 * @note This API uses @ref k_spin_lock only when accessing the _kernel.threads
135 * queue elements. It unlocks it during user callback function processing.
136 * If a new task is created when this @c foreach function is in progress,
137 * the added new task would not be included in the enumeration.
138 * If a task is aborted during this enumeration, there would be a race here
139 * and there is a possibility that this aborted task would be included in the
140 * enumeration.
141 * @note If the task is aborted and the memory occupied by its @c k_thread
142 * structure is reused when this @c k_thread_foreach_unlocked is in progress
143 * it might even lead to the system behave unstable.
144 * This function may never return, as it would follow some @c next task
145 * pointers treating given pointer as a pointer to the k_thread structure
146 * while it is something different right now.
147 * Do not reuse the memory that was occupied by k_thread structure of aborted
148 * task if it was aborted after this function was called in any context.
149 */
150 extern void k_thread_foreach_unlocked(
151 k_thread_user_cb_t user_cb, void *user_data);
152
153 /** @} */
154
155 /**
156 * @defgroup thread_apis Thread APIs
157 * @ingroup kernel_apis
158 * @{
159 */
160
161 #endif /* !_ASMLANGUAGE */
162
163
164 /*
165 * Thread user options. May be needed by assembly code. Common part uses low
166 * bits, arch-specific use high bits.
167 */
168
169 /**
170 * @brief system thread that must not abort
171 * */
172 #define K_ESSENTIAL (BIT(0))
173
174 #if defined(CONFIG_FPU_SHARING)
175 /**
176 * @brief FPU registers are managed by context switch
177 *
178 * @details
179 * This option indicates that the thread uses the CPU's floating point
180 * registers. This instructs the kernel to take additional steps to save
181 * and restore the contents of these registers when scheduling the thread.
182 * No effect if @kconfig{CONFIG_FPU_SHARING} is not enabled.
183 */
184 #define K_FP_IDX 1
185 #define K_FP_REGS (BIT(K_FP_IDX))
186 #endif
187
188 /**
189 * @brief user mode thread
190 *
191 * This thread has dropped from supervisor mode to user mode and consequently
192 * has additional restrictions
193 */
194 #define K_USER (BIT(2))
195
196 /**
197 * @brief Inherit Permissions
198 *
199 * @details
200 * Indicates that the thread being created should inherit all kernel object
201 * permissions from the thread that created it. No effect if
202 * @kconfig{CONFIG_USERSPACE} is not enabled.
203 */
204 #define K_INHERIT_PERMS (BIT(3))
205
206 /**
207 * @brief Callback item state
208 *
209 * @details
210 * This is a single bit of state reserved for "callback manager"
211 * utilities (p4wq initially) who need to track operations invoked
212 * from within a user-provided callback they have been invoked.
213 * Effectively it serves as a tiny bit of zero-overhead TLS data.
214 */
215 #define K_CALLBACK_STATE (BIT(4))
216
217 #ifdef CONFIG_ARC
218 /* ARC processor Bitmask definitions for threads user options */
219
220 #if defined(CONFIG_ARC_DSP_SHARING)
221 /**
222 * @brief DSP registers are managed by context switch
223 *
224 * @details
225 * This option indicates that the thread uses the CPU's DSP registers.
226 * This instructs the kernel to take additional steps to save and
227 * restore the contents of these registers when scheduling the thread.
228 * No effect if @kconfig{CONFIG_ARC_DSP_SHARING} is not enabled.
229 */
230 #define K_DSP_IDX 6
231 #define K_ARC_DSP_REGS (BIT(K_DSP_IDX))
232 #endif
233
234 #if defined(CONFIG_ARC_AGU_SHARING)
235 /**
236 * @brief AGU registers are managed by context switch
237 *
238 * @details
239 * This option indicates that the thread uses the ARC processor's XY
240 * memory and DSP feature. Often used with @kconfig{CONFIG_ARC_AGU_SHARING}.
241 * No effect if @kconfig{CONFIG_ARC_AGU_SHARING} is not enabled.
242 */
243 #define K_AGU_IDX 7
244 #define K_ARC_AGU_REGS (BIT(K_AGU_IDX))
245 #endif
246 #endif
247
248 #ifdef CONFIG_X86
249 /* x86 Bitmask definitions for threads user options */
250
251 #if defined(CONFIG_FPU_SHARING) && defined(CONFIG_X86_SSE)
252 /**
253 * @brief FP and SSE registers are managed by context switch on x86
254 *
255 * @details
256 * This option indicates that the thread uses the x86 CPU's floating point
257 * and SSE registers. This instructs the kernel to take additional steps to
258 * save and restore the contents of these registers when scheduling
259 * the thread. No effect if @kconfig{CONFIG_X86_SSE} is not enabled.
260 */
261 #define K_SSE_REGS (BIT(7))
262 #endif
263 #endif
264
265 /* end - thread options */
266
267 #if !defined(_ASMLANGUAGE)
268 /**
269 * @brief Create a thread.
270 *
271 * This routine initializes a thread, then schedules it for execution.
272 *
273 * The new thread may be scheduled for immediate execution or a delayed start.
274 * If the newly spawned thread does not have a delayed start the kernel
275 * scheduler may preempt the current thread to allow the new thread to
276 * execute.
277 *
278 * Thread options are architecture-specific, and can include K_ESSENTIAL,
279 * K_FP_REGS, and K_SSE_REGS. Multiple options may be specified by separating
280 * them using "|" (the logical OR operator).
281 *
282 * Stack objects passed to this function must be originally defined with
283 * either of these macros in order to be portable:
284 *
285 * - K_THREAD_STACK_DEFINE() - For stacks that may support either user or
286 * supervisor threads.
287 * - K_KERNEL_STACK_DEFINE() - For stacks that may support supervisor
288 * threads only. These stacks use less memory if CONFIG_USERSPACE is
289 * enabled.
290 *
291 * The stack_size parameter has constraints. It must either be:
292 *
293 * - The original size value passed to K_THREAD_STACK_DEFINE() or
294 * K_KERNEL_STACK_DEFINE()
295 * - The return value of K_THREAD_STACK_SIZEOF(stack) if the stack was
296 * defined with K_THREAD_STACK_DEFINE()
297 * - The return value of K_KERNEL_STACK_SIZEOF(stack) if the stack was
298 * defined with K_KERNEL_STACK_DEFINE().
299 *
300 * Using other values, or sizeof(stack) may produce undefined behavior.
301 *
302 * @param new_thread Pointer to uninitialized struct k_thread
303 * @param stack Pointer to the stack space.
304 * @param stack_size Stack size in bytes.
305 * @param entry Thread entry function.
306 * @param p1 1st entry point parameter.
307 * @param p2 2nd entry point parameter.
308 * @param p3 3rd entry point parameter.
309 * @param prio Thread priority.
310 * @param options Thread options.
311 * @param delay Scheduling delay, or K_NO_WAIT (for no delay).
312 *
313 * @return ID of new thread.
314 *
315 */
316 __syscall k_tid_t k_thread_create(struct k_thread *new_thread,
317 k_thread_stack_t *stack,
318 size_t stack_size,
319 k_thread_entry_t entry,
320 void *p1, void *p2, void *p3,
321 int prio, uint32_t options, k_timeout_t delay);
322
323 /**
324 * @brief Drop a thread's privileges permanently to user mode
325 *
326 * This allows a supervisor thread to be re-used as a user thread.
327 * This function does not return, but control will transfer to the provided
328 * entry point as if this was a new user thread.
329 *
330 * The implementation ensures that the stack buffer contents are erased.
331 * Any thread-local storage will be reverted to a pristine state.
332 *
333 * Memory domain membership, resource pool assignment, kernel object
334 * permissions, priority, and thread options are preserved.
335 *
336 * A common use of this function is to re-use the main thread as a user thread
337 * once all supervisor mode-only tasks have been completed.
338 *
339 * @param entry Function to start executing from
340 * @param p1 1st entry point parameter
341 * @param p2 2nd entry point parameter
342 * @param p3 3rd entry point parameter
343 */
344 extern FUNC_NORETURN void k_thread_user_mode_enter(k_thread_entry_t entry,
345 void *p1, void *p2,
346 void *p3);
347
348 /**
349 * @brief Grant a thread access to a set of kernel objects
350 *
351 * This is a convenience function. For the provided thread, grant access to
352 * the remaining arguments, which must be pointers to kernel objects.
353 *
354 * The thread object must be initialized (i.e. running). The objects don't
355 * need to be.
356 * Note that NULL shouldn't be passed as an argument.
357 *
358 * @param thread Thread to grant access to objects
359 * @param ... list of kernel object pointers
360 */
361 #define k_thread_access_grant(thread, ...) \
362 FOR_EACH_FIXED_ARG(k_object_access_grant, (;), thread, __VA_ARGS__)
363
364 /**
365 * @brief Assign a resource memory pool to a thread
366 *
367 * By default, threads have no resource pool assigned unless their parent
368 * thread has a resource pool, in which case it is inherited. Multiple
369 * threads may be assigned to the same memory pool.
370 *
371 * Changing a thread's resource pool will not migrate allocations from the
372 * previous pool.
373 *
374 * @param thread Target thread to assign a memory pool for resource requests.
375 * @param heap Heap object to use for resources,
376 * or NULL if the thread should no longer have a memory pool.
377 */
k_thread_heap_assign(struct k_thread * thread,struct k_heap * heap)378 static inline void k_thread_heap_assign(struct k_thread *thread,
379 struct k_heap *heap)
380 {
381 thread->resource_pool = heap;
382 }
383
384 #if defined(CONFIG_INIT_STACKS) && defined(CONFIG_THREAD_STACK_INFO)
385 /**
386 * @brief Obtain stack usage information for the specified thread
387 *
388 * User threads will need to have permission on the target thread object.
389 *
390 * Some hardware may prevent inspection of a stack buffer currently in use.
391 * If this API is called from supervisor mode, on the currently running thread,
392 * on a platform which selects @kconfig{CONFIG_NO_UNUSED_STACK_INSPECTION}, an
393 * error will be generated.
394 *
395 * @param thread Thread to inspect stack information
396 * @param unused_ptr Output parameter, filled in with the unused stack space
397 * of the target thread in bytes.
398 * @return 0 on success
399 * @return -EBADF Bad thread object (user mode only)
400 * @return -EPERM No permissions on thread object (user mode only)
401 * #return -ENOTSUP Forbidden by hardware policy
402 * @return -EINVAL Thread is uninitialized or exited (user mode only)
403 * @return -EFAULT Bad memory address for unused_ptr (user mode only)
404 */
405 __syscall int k_thread_stack_space_get(const struct k_thread *thread,
406 size_t *unused_ptr);
407 #endif
408
409 #if (CONFIG_HEAP_MEM_POOL_SIZE > 0)
410 /**
411 * @brief Assign the system heap as a thread's resource pool
412 *
413 * Similar to z_thread_heap_assign(), but the thread will use
414 * the kernel heap to draw memory.
415 *
416 * Use with caution, as a malicious thread could perform DoS attacks on the
417 * kernel heap.
418 *
419 * @param thread Target thread to assign the system heap for resource requests
420 *
421 */
422 void k_thread_system_pool_assign(struct k_thread *thread);
423 #endif /* (CONFIG_HEAP_MEM_POOL_SIZE > 0) */
424
425 /**
426 * @brief Sleep until a thread exits
427 *
428 * The caller will be put to sleep until the target thread exits, either due
429 * to being aborted, self-exiting, or taking a fatal error. This API returns
430 * immediately if the thread isn't running.
431 *
432 * This API may only be called from ISRs with a K_NO_WAIT timeout,
433 * where it can be useful as a predicate to detect when a thread has
434 * aborted.
435 *
436 * @param thread Thread to wait to exit
437 * @param timeout upper bound time to wait for the thread to exit.
438 * @retval 0 success, target thread has exited or wasn't running
439 * @retval -EBUSY returned without waiting
440 * @retval -EAGAIN waiting period timed out
441 * @retval -EDEADLK target thread is joining on the caller, or target thread
442 * is the caller
443 */
444 __syscall int k_thread_join(struct k_thread *thread, k_timeout_t timeout);
445
446 /**
447 * @brief Put the current thread to sleep.
448 *
449 * This routine puts the current thread to sleep for @a duration,
450 * specified as a k_timeout_t object.
451 *
452 * @note if @a timeout is set to K_FOREVER then the thread is suspended.
453 *
454 * @param timeout Desired duration of sleep.
455 *
456 * @return Zero if the requested time has elapsed or the number of milliseconds
457 * left to sleep, if thread was woken up by \ref k_wakeup call.
458 */
459 __syscall int32_t k_sleep(k_timeout_t timeout);
460
461 /**
462 * @brief Put the current thread to sleep.
463 *
464 * This routine puts the current thread to sleep for @a duration milliseconds.
465 *
466 * @param ms Number of milliseconds to sleep.
467 *
468 * @return Zero if the requested time has elapsed or the number of milliseconds
469 * left to sleep, if thread was woken up by \ref k_wakeup call.
470 */
k_msleep(int32_t ms)471 static inline int32_t k_msleep(int32_t ms)
472 {
473 return k_sleep(Z_TIMEOUT_MS(ms));
474 }
475
476 /**
477 * @brief Put the current thread to sleep with microsecond resolution.
478 *
479 * This function is unlikely to work as expected without kernel tuning.
480 * In particular, because the lower bound on the duration of a sleep is
481 * the duration of a tick, @kconfig{CONFIG_SYS_CLOCK_TICKS_PER_SEC} must be
482 * adjusted to achieve the resolution desired. The implications of doing
483 * this must be understood before attempting to use k_usleep(). Use with
484 * caution.
485 *
486 * @param us Number of microseconds to sleep.
487 *
488 * @return Zero if the requested time has elapsed or the number of microseconds
489 * left to sleep, if thread was woken up by \ref k_wakeup call.
490 */
491 __syscall int32_t k_usleep(int32_t us);
492
493 /**
494 * @brief Cause the current thread to busy wait.
495 *
496 * This routine causes the current thread to execute a "do nothing" loop for
497 * @a usec_to_wait microseconds.
498 *
499 * @note The clock used for the microsecond-resolution delay here may
500 * be skewed relative to the clock used for system timeouts like
501 * k_sleep(). For example k_busy_wait(1000) may take slightly more or
502 * less time than k_sleep(K_MSEC(1)), with the offset dependent on
503 * clock tolerances.
504 */
505 __syscall void k_busy_wait(uint32_t usec_to_wait);
506
507 /**
508 * @brief Check whether it is possible to yield in the current context.
509 *
510 * This routine checks whether the kernel is in a state where it is possible to
511 * yield or call blocking API's. It should be used by code that needs to yield
512 * to perform correctly, but can feasibly be called from contexts where that
513 * is not possible. For example in the PRE_KERNEL initialization step, or when
514 * being run from the idle thread.
515 *
516 * @return True if it is possible to yield in the current context, false otherwise.
517 */
518 bool k_can_yield(void);
519
520 /**
521 * @brief Yield the current thread.
522 *
523 * This routine causes the current thread to yield execution to another
524 * thread of the same or higher priority. If there are no other ready threads
525 * of the same or higher priority, the routine returns immediately.
526 */
527 __syscall void k_yield(void);
528
529 /**
530 * @brief Wake up a sleeping thread.
531 *
532 * This routine prematurely wakes up @a thread from sleeping.
533 *
534 * If @a thread is not currently sleeping, the routine has no effect.
535 *
536 * @param thread ID of thread to wake.
537 */
538 __syscall void k_wakeup(k_tid_t thread);
539
540 /**
541 * @brief Get thread ID of the current thread.
542 *
543 * This unconditionally queries the kernel via a system call.
544 *
545 * @return ID of current thread.
546 */
547 __attribute_const__
548 __syscall k_tid_t z_current_get(void);
549
550 #ifdef CONFIG_THREAD_LOCAL_STORAGE
551 /* Thread-local cache of current thread ID, set in z_thread_entry() */
552 extern __thread k_tid_t z_tls_current;
553 #endif
554
555 /**
556 * @brief Get thread ID of the current thread.
557 *
558 * @return ID of current thread.
559 *
560 */
561 __attribute_const__
k_current_get(void)562 static inline k_tid_t k_current_get(void)
563 {
564 #ifdef CONFIG_THREAD_LOCAL_STORAGE
565 return z_tls_current;
566 #else
567 return z_current_get();
568 #endif
569 }
570
571 /**
572 * @brief Abort a thread.
573 *
574 * This routine permanently stops execution of @a thread. The thread is taken
575 * off all kernel queues it is part of (i.e. the ready queue, the timeout
576 * queue, or a kernel object wait queue). However, any kernel resources the
577 * thread might currently own (such as mutexes or memory blocks) are not
578 * released. It is the responsibility of the caller of this routine to ensure
579 * all necessary cleanup is performed.
580 *
581 * After k_thread_abort() returns, the thread is guaranteed not to be
582 * running or to become runnable anywhere on the system. Normally
583 * this is done via blocking the caller (in the same manner as
584 * k_thread_join()), but in interrupt context on SMP systems the
585 * implementation is required to spin for threads that are running on
586 * other CPUs. Note that as specified, this means that on SMP
587 * platforms it is possible for application code to create a deadlock
588 * condition by simultaneously aborting a cycle of threads using at
589 * least one termination from interrupt context. Zephyr cannot detect
590 * all such conditions.
591 *
592 * @param thread ID of thread to abort.
593 */
594 __syscall void k_thread_abort(k_tid_t thread);
595
596
597 /**
598 * @brief Start an inactive thread
599 *
600 * If a thread was created with K_FOREVER in the delay parameter, it will
601 * not be added to the scheduling queue until this function is called
602 * on it.
603 *
604 * @param thread thread to start
605 */
606 __syscall void k_thread_start(k_tid_t thread);
607
608 extern k_ticks_t z_timeout_expires(const struct _timeout *timeout);
609 extern k_ticks_t z_timeout_remaining(const struct _timeout *timeout);
610
611 #ifdef CONFIG_SYS_CLOCK_EXISTS
612
613 /**
614 * @brief Get time when a thread wakes up, in system ticks
615 *
616 * This routine computes the system uptime when a waiting thread next
617 * executes, in units of system ticks. If the thread is not waiting,
618 * it returns current system time.
619 */
620 __syscall k_ticks_t k_thread_timeout_expires_ticks(const struct k_thread *t);
621
z_impl_k_thread_timeout_expires_ticks(const struct k_thread * t)622 static inline k_ticks_t z_impl_k_thread_timeout_expires_ticks(
623 const struct k_thread *t)
624 {
625 return z_timeout_expires(&t->base.timeout);
626 }
627
628 /**
629 * @brief Get time remaining before a thread wakes up, in system ticks
630 *
631 * This routine computes the time remaining before a waiting thread
632 * next executes, in units of system ticks. If the thread is not
633 * waiting, it returns zero.
634 */
635 __syscall k_ticks_t k_thread_timeout_remaining_ticks(const struct k_thread *t);
636
z_impl_k_thread_timeout_remaining_ticks(const struct k_thread * t)637 static inline k_ticks_t z_impl_k_thread_timeout_remaining_ticks(
638 const struct k_thread *t)
639 {
640 return z_timeout_remaining(&t->base.timeout);
641 }
642
643 #endif /* CONFIG_SYS_CLOCK_EXISTS */
644
645 /**
646 * @cond INTERNAL_HIDDEN
647 */
648
649 /* timeout has timed out and is not on _timeout_q anymore */
650 #define _EXPIRED (-2)
651
652 struct _static_thread_data {
653 struct k_thread *init_thread;
654 k_thread_stack_t *init_stack;
655 unsigned int init_stack_size;
656 k_thread_entry_t init_entry;
657 void *init_p1;
658 void *init_p2;
659 void *init_p3;
660 int init_prio;
661 uint32_t init_options;
662 int32_t init_delay;
663 const char *init_name;
664 };
665
666 #define Z_THREAD_INITIALIZER(thread, stack, stack_size, \
667 entry, p1, p2, p3, \
668 prio, options, delay, tname) \
669 { \
670 .init_thread = (thread), \
671 .init_stack = (stack), \
672 .init_stack_size = (stack_size), \
673 .init_entry = (k_thread_entry_t)entry, \
674 .init_p1 = (void *)p1, \
675 .init_p2 = (void *)p2, \
676 .init_p3 = (void *)p3, \
677 .init_prio = (prio), \
678 .init_options = (options), \
679 .init_delay = (delay), \
680 .init_name = STRINGIFY(tname), \
681 }
682
683 /*
684 * Refer to K_THREAD_DEFINE() and K_KERNEL_THREAD_DEFINE() for
685 * information on arguments.
686 */
687 #define Z_THREAD_COMMON_DEFINE(name, stack_size, \
688 entry, p1, p2, p3, \
689 prio, options, delay) \
690 struct k_thread _k_thread_obj_##name; \
691 STRUCT_SECTION_ITERABLE(_static_thread_data, \
692 _k_thread_data_##name) = \
693 Z_THREAD_INITIALIZER(&_k_thread_obj_##name, \
694 _k_thread_stack_##name, stack_size,\
695 entry, p1, p2, p3, prio, options, \
696 delay, name); \
697 const k_tid_t name = (k_tid_t)&_k_thread_obj_##name
698
699 /**
700 * INTERNAL_HIDDEN @endcond
701 */
702
703 /**
704 * @brief Statically define and initialize a thread.
705 *
706 * The thread may be scheduled for immediate execution or a delayed start.
707 *
708 * Thread options are architecture-specific, and can include K_ESSENTIAL,
709 * K_FP_REGS, and K_SSE_REGS. Multiple options may be specified by separating
710 * them using "|" (the logical OR operator).
711 *
712 * The ID of the thread can be accessed using:
713 *
714 * @code extern const k_tid_t <name>; @endcode
715 *
716 * @param name Name of the thread.
717 * @param stack_size Stack size in bytes.
718 * @param entry Thread entry function.
719 * @param p1 1st entry point parameter.
720 * @param p2 2nd entry point parameter.
721 * @param p3 3rd entry point parameter.
722 * @param prio Thread priority.
723 * @param options Thread options.
724 * @param delay Scheduling delay (in milliseconds), zero for no delay.
725 *
726 * @note Static threads with zero delay should not normally have
727 * MetaIRQ priority levels. This can preempt the system
728 * initialization handling (depending on the priority of the main
729 * thread) and cause surprising ordering side effects. It will not
730 * affect anything in the OS per se, but consider it bad practice.
731 * Use a SYS_INIT() callback if you need to run code before entrance
732 * to the application main().
733 */
734 #define K_THREAD_DEFINE(name, stack_size, \
735 entry, p1, p2, p3, \
736 prio, options, delay) \
737 K_THREAD_STACK_DEFINE(_k_thread_stack_##name, stack_size); \
738 Z_THREAD_COMMON_DEFINE(name, stack_size, entry, p1, p2, p3, \
739 prio, options, delay)
740
741 /**
742 * @brief Statically define and initialize a thread intended to run only in kernel mode.
743 *
744 * The thread may be scheduled for immediate execution or a delayed start.
745 *
746 * Thread options are architecture-specific, and can include K_ESSENTIAL,
747 * K_FP_REGS, and K_SSE_REGS. Multiple options may be specified by separating
748 * them using "|" (the logical OR operator).
749 *
750 * The ID of the thread can be accessed using:
751 *
752 * @code extern const k_tid_t <name>; @endcode
753 *
754 * @note Threads defined by this can only run in kernel mode, and cannot be
755 * transformed into user thread via k_thread_user_mode_enter().
756 *
757 * @warning Depending on the architecture, the stack size (@p stack_size)
758 * may need to be multiples of CONFIG_MMU_PAGE_SIZE (if MMU)
759 * or in power-of-two size (if MPU).
760 *
761 * @param name Name of the thread.
762 * @param stack_size Stack size in bytes.
763 * @param entry Thread entry function.
764 * @param p1 1st entry point parameter.
765 * @param p2 2nd entry point parameter.
766 * @param p3 3rd entry point parameter.
767 * @param prio Thread priority.
768 * @param options Thread options.
769 * @param delay Scheduling delay (in milliseconds), zero for no delay.
770 */
771 #define K_KERNEL_THREAD_DEFINE(name, stack_size, \
772 entry, p1, p2, p3, \
773 prio, options, delay) \
774 K_KERNEL_STACK_DEFINE(_k_thread_stack_##name, stack_size); \
775 Z_THREAD_COMMON_DEFINE(name, stack_size, entry, p1, p2, p3, \
776 prio, options, delay)
777
778 /**
779 * @brief Get a thread's priority.
780 *
781 * This routine gets the priority of @a thread.
782 *
783 * @param thread ID of thread whose priority is needed.
784 *
785 * @return Priority of @a thread.
786 */
787 __syscall int k_thread_priority_get(k_tid_t thread);
788
789 /**
790 * @brief Set a thread's priority.
791 *
792 * This routine immediately changes the priority of @a thread.
793 *
794 * Rescheduling can occur immediately depending on the priority @a thread is
795 * set to:
796 *
797 * - If its priority is raised above the priority of the caller of this
798 * function, and the caller is preemptible, @a thread will be scheduled in.
799 *
800 * - If the caller operates on itself, it lowers its priority below that of
801 * other threads in the system, and the caller is preemptible, the thread of
802 * highest priority will be scheduled in.
803 *
804 * Priority can be assigned in the range of -CONFIG_NUM_COOP_PRIORITIES to
805 * CONFIG_NUM_PREEMPT_PRIORITIES-1, where -CONFIG_NUM_COOP_PRIORITIES is the
806 * highest priority.
807 *
808 * @param thread ID of thread whose priority is to be set.
809 * @param prio New priority.
810 *
811 * @warning Changing the priority of a thread currently involved in mutex
812 * priority inheritance may result in undefined behavior.
813 */
814 __syscall void k_thread_priority_set(k_tid_t thread, int prio);
815
816
817 #ifdef CONFIG_SCHED_DEADLINE
818 /**
819 * @brief Set deadline expiration time for scheduler
820 *
821 * This sets the "deadline" expiration as a time delta from the
822 * current time, in the same units used by k_cycle_get_32(). The
823 * scheduler (when deadline scheduling is enabled) will choose the
824 * next expiring thread when selecting between threads at the same
825 * static priority. Threads at different priorities will be scheduled
826 * according to their static priority.
827 *
828 * @note Deadlines are stored internally using 32 bit unsigned
829 * integers. The number of cycles between the "first" deadline in the
830 * scheduler queue and the "last" deadline must be less than 2^31 (i.e
831 * a signed non-negative quantity). Failure to adhere to this rule
832 * may result in scheduled threads running in an incorrect deadline
833 * order.
834 *
835 * @note Despite the API naming, the scheduler makes no guarantees the
836 * the thread WILL be scheduled within that deadline, nor does it take
837 * extra metadata (like e.g. the "runtime" and "period" parameters in
838 * Linux sched_setattr()) that allows the kernel to validate the
839 * scheduling for achievability. Such features could be implemented
840 * above this call, which is simply input to the priority selection
841 * logic.
842 *
843 * @note You should enable @kconfig{CONFIG_SCHED_DEADLINE} in your project
844 * configuration.
845 *
846 * @param thread A thread on which to set the deadline
847 * @param deadline A time delta, in cycle units
848 *
849 */
850 __syscall void k_thread_deadline_set(k_tid_t thread, int deadline);
851 #endif
852
853 #ifdef CONFIG_SCHED_CPU_MASK
854 /**
855 * @brief Sets all CPU enable masks to zero
856 *
857 * After this returns, the thread will no longer be schedulable on any
858 * CPUs. The thread must not be currently runnable.
859 *
860 * @note You should enable @kconfig{CONFIG_SCHED_CPU_MASK} in your project
861 * configuration.
862 *
863 * @param thread Thread to operate upon
864 * @return Zero on success, otherwise error code
865 */
866 int k_thread_cpu_mask_clear(k_tid_t thread);
867
868 /**
869 * @brief Sets all CPU enable masks to one
870 *
871 * After this returns, the thread will be schedulable on any CPU. The
872 * thread must not be currently runnable.
873 *
874 * @note You should enable @kconfig{CONFIG_SCHED_CPU_MASK} in your project
875 * configuration.
876 *
877 * @param thread Thread to operate upon
878 * @return Zero on success, otherwise error code
879 */
880 int k_thread_cpu_mask_enable_all(k_tid_t thread);
881
882 /**
883 * @brief Enable thread to run on specified CPU
884 *
885 * The thread must not be currently runnable.
886 *
887 * @note You should enable @kconfig{CONFIG_SCHED_CPU_MASK} in your project
888 * configuration.
889 *
890 * @param thread Thread to operate upon
891 * @param cpu CPU index
892 * @return Zero on success, otherwise error code
893 */
894 int k_thread_cpu_mask_enable(k_tid_t thread, int cpu);
895
896 /**
897 * @brief Prevent thread to run on specified CPU
898 *
899 * The thread must not be currently runnable.
900 *
901 * @note You should enable @kconfig{CONFIG_SCHED_CPU_MASK} in your project
902 * configuration.
903 *
904 * @param thread Thread to operate upon
905 * @param cpu CPU index
906 * @return Zero on success, otherwise error code
907 */
908 int k_thread_cpu_mask_disable(k_tid_t thread, int cpu);
909
910 /**
911 * @brief Pin a thread to a CPU
912 *
913 * Pin a thread to a CPU by first clearing the cpu mask and then enabling the
914 * thread on the selected CPU.
915 *
916 * @param thread Thread to operate upon
917 * @param cpu CPU index
918 * @return Zero on success, otherwise error code
919 */
920 int k_thread_cpu_pin(k_tid_t thread, int cpu);
921 #endif
922
923 /**
924 * @brief Suspend a thread.
925 *
926 * This routine prevents the kernel scheduler from making @a thread
927 * the current thread. All other internal operations on @a thread are
928 * still performed; for example, kernel objects it is waiting on are
929 * still handed to it. Note that any existing timeouts
930 * (e.g. k_sleep(), or a timeout argument to k_sem_take() et. al.)
931 * will be canceled. On resume, the thread will begin running
932 * immediately and return from the blocked call.
933 *
934 * If @a thread is already suspended, the routine has no effect.
935 *
936 * @param thread ID of thread to suspend.
937 */
938 __syscall void k_thread_suspend(k_tid_t thread);
939
940 /**
941 * @brief Resume a suspended thread.
942 *
943 * This routine allows the kernel scheduler to make @a thread the current
944 * thread, when it is next eligible for that role.
945 *
946 * If @a thread is not currently suspended, the routine has no effect.
947 *
948 * @param thread ID of thread to resume.
949 */
950 __syscall void k_thread_resume(k_tid_t thread);
951
952 /**
953 * @brief Set time-slicing period and scope.
954 *
955 * This routine specifies how the scheduler will perform time slicing of
956 * preemptible threads.
957 *
958 * To enable time slicing, @a slice must be non-zero. The scheduler
959 * ensures that no thread runs for more than the specified time limit
960 * before other threads of that priority are given a chance to execute.
961 * Any thread whose priority is higher than @a prio is exempted, and may
962 * execute as long as desired without being preempted due to time slicing.
963 *
964 * Time slicing only limits the maximum amount of time a thread may continuously
965 * execute. Once the scheduler selects a thread for execution, there is no
966 * minimum guaranteed time the thread will execute before threads of greater or
967 * equal priority are scheduled.
968 *
969 * When the current thread is the only one of that priority eligible
970 * for execution, this routine has no effect; the thread is immediately
971 * rescheduled after the slice period expires.
972 *
973 * To disable timeslicing, set both @a slice and @a prio to zero.
974 *
975 * @param slice Maximum time slice length (in milliseconds).
976 * @param prio Highest thread priority level eligible for time slicing.
977 */
978 extern void k_sched_time_slice_set(int32_t slice, int prio);
979
980 /**
981 * @brief Set thread time slice
982 *
983 * As for k_sched_time_slice_set, but (when
984 * CONFIG_TIMESLICE_PER_THREAD=y) sets the timeslice for a specific
985 * thread. When non-zero, this timeslice will take precedence over
986 * the global value.
987 *
988 * When such a thread's timeslice expires, the configured callback
989 * will be called before the thread is removed/re-added to the run
990 * queue. This callback will occur in interrupt context, and the
991 * specified thread is guaranteed to have been preempted by the
992 * currently-executing ISR. Such a callback is free to, for example,
993 * modify the thread priority or slice time for future execution,
994 * suspend the thread, etc...
995 *
996 * @note Unlike the older API, the time slice parameter here is
997 * specified in ticks, not milliseconds. Ticks have always been the
998 * internal unit, and not all platforms have integer conversions
999 * between the two.
1000 *
1001 * @note Threads with a non-zero slice time set will be timesliced
1002 * always, even if they are higher priority than the maximum timeslice
1003 * priority set via k_sched_time_slice_set().
1004 *
1005 * @note The callback notification for slice expiration happens, as it
1006 * must, while the thread is still "current", and thus it happens
1007 * before any registered timeouts at this tick. This has the somewhat
1008 * confusing side effect that the tick time (c.f. k_uptime_get()) does
1009 * not yet reflect the expired ticks. Applications wishing to make
1010 * fine-grained timing decisions within this callback should use the
1011 * cycle API, or derived facilities like k_thread_runtime_stats_get().
1012 *
1013 * @param th A valid, initialized thread
1014 * @param slice_ticks Maximum timeslice, in ticks
1015 * @param expired Callback function called on slice expiration
1016 * @param data Parameter for the expiration handler
1017 */
1018 void k_thread_time_slice_set(struct k_thread *th, int32_t slice_ticks,
1019 k_thread_timeslice_fn_t expired, void *data);
1020
1021 /** @} */
1022
1023 /**
1024 * @addtogroup isr_apis
1025 * @{
1026 */
1027
1028 /**
1029 * @brief Determine if code is running at interrupt level.
1030 *
1031 * This routine allows the caller to customize its actions, depending on
1032 * whether it is a thread or an ISR.
1033 *
1034 * @funcprops \isr_ok
1035 *
1036 * @return false if invoked by a thread.
1037 * @return true if invoked by an ISR.
1038 */
1039 extern bool k_is_in_isr(void);
1040
1041 /**
1042 * @brief Determine if code is running in a preemptible thread.
1043 *
1044 * This routine allows the caller to customize its actions, depending on
1045 * whether it can be preempted by another thread. The routine returns a 'true'
1046 * value if all of the following conditions are met:
1047 *
1048 * - The code is running in a thread, not at ISR.
1049 * - The thread's priority is in the preemptible range.
1050 * - The thread has not locked the scheduler.
1051 *
1052 * @funcprops \isr_ok
1053 *
1054 * @return 0 if invoked by an ISR or by a cooperative thread.
1055 * @return Non-zero if invoked by a preemptible thread.
1056 */
1057 __syscall int k_is_preempt_thread(void);
1058
1059 /**
1060 * @brief Test whether startup is in the before-main-task phase.
1061 *
1062 * This routine allows the caller to customize its actions, depending on
1063 * whether it being invoked before the kernel is fully active.
1064 *
1065 * @funcprops \isr_ok
1066 *
1067 * @return true if invoked before post-kernel initialization
1068 * @return false if invoked during/after post-kernel initialization
1069 */
k_is_pre_kernel(void)1070 static inline bool k_is_pre_kernel(void)
1071 {
1072 extern bool z_sys_post_kernel; /* in init.c */
1073
1074 return !z_sys_post_kernel;
1075 }
1076
1077 /**
1078 * @}
1079 */
1080
1081 /**
1082 * @addtogroup thread_apis
1083 * @{
1084 */
1085
1086 /**
1087 * @brief Lock the scheduler.
1088 *
1089 * This routine prevents the current thread from being preempted by another
1090 * thread by instructing the scheduler to treat it as a cooperative thread.
1091 * If the thread subsequently performs an operation that makes it unready,
1092 * it will be context switched out in the normal manner. When the thread
1093 * again becomes the current thread, its non-preemptible status is maintained.
1094 *
1095 * This routine can be called recursively.
1096 *
1097 * Owing to clever implementation details, scheduler locks are
1098 * extremely fast for non-userspace threads (just one byte
1099 * inc/decrement in the thread struct).
1100 *
1101 * @note This works by elevating the thread priority temporarily to a
1102 * cooperative priority, allowing cheap synchronization vs. other
1103 * preemptible or cooperative threads running on the current CPU. It
1104 * does not prevent preemption or asynchrony of other types. It does
1105 * not prevent threads from running on other CPUs when CONFIG_SMP=y.
1106 * It does not prevent interrupts from happening, nor does it prevent
1107 * threads with MetaIRQ priorities from preempting the current thread.
1108 * In general this is a historical API not well-suited to modern
1109 * applications, use with care.
1110 */
1111 extern void k_sched_lock(void);
1112
1113 /**
1114 * @brief Unlock the scheduler.
1115 *
1116 * This routine reverses the effect of a previous call to k_sched_lock().
1117 * A thread must call the routine once for each time it called k_sched_lock()
1118 * before the thread becomes preemptible.
1119 */
1120 extern void k_sched_unlock(void);
1121
1122 /**
1123 * @brief Set current thread's custom data.
1124 *
1125 * This routine sets the custom data for the current thread to @ value.
1126 *
1127 * Custom data is not used by the kernel itself, and is freely available
1128 * for a thread to use as it sees fit. It can be used as a framework
1129 * upon which to build thread-local storage.
1130 *
1131 * @param value New custom data value.
1132 *
1133 */
1134 __syscall void k_thread_custom_data_set(void *value);
1135
1136 /**
1137 * @brief Get current thread's custom data.
1138 *
1139 * This routine returns the custom data for the current thread.
1140 *
1141 * @return Current custom data value.
1142 */
1143 __syscall void *k_thread_custom_data_get(void);
1144
1145 /**
1146 * @brief Set current thread name
1147 *
1148 * Set the name of the thread to be used when @kconfig{CONFIG_THREAD_MONITOR}
1149 * is enabled for tracing and debugging.
1150 *
1151 * @param thread Thread to set name, or NULL to set the current thread
1152 * @param str Name string
1153 * @retval 0 on success
1154 * @retval -EFAULT Memory access error with supplied string
1155 * @retval -ENOSYS Thread name configuration option not enabled
1156 * @retval -EINVAL Thread name too long
1157 */
1158 __syscall int k_thread_name_set(k_tid_t thread, const char *str);
1159
1160 /**
1161 * @brief Get thread name
1162 *
1163 * Get the name of a thread
1164 *
1165 * @param thread Thread ID
1166 * @retval Thread name, or NULL if configuration not enabled
1167 */
1168 const char *k_thread_name_get(k_tid_t thread);
1169
1170 /**
1171 * @brief Copy the thread name into a supplied buffer
1172 *
1173 * @param thread Thread to obtain name information
1174 * @param buf Destination buffer
1175 * @param size Destination buffer size
1176 * @retval -ENOSPC Destination buffer too small
1177 * @retval -EFAULT Memory access error
1178 * @retval -ENOSYS Thread name feature not enabled
1179 * @retval 0 Success
1180 */
1181 __syscall int k_thread_name_copy(k_tid_t thread, char *buf,
1182 size_t size);
1183
1184 /**
1185 * @brief Get thread state string
1186 *
1187 * This routine generates a human friendly string containing the thread's
1188 * state, and copies as much of it as possible into @a buf.
1189 *
1190 * @param thread_id Thread ID
1191 * @param buf Buffer into which to copy state strings
1192 * @param buf_size Size of the buffer
1193 *
1194 * @retval Pointer to @a buf if data was copied, else a pointer to "".
1195 */
1196 const char *k_thread_state_str(k_tid_t thread_id, char *buf, size_t buf_size);
1197
1198 /**
1199 * @}
1200 */
1201
1202 /**
1203 * @addtogroup clock_apis
1204 * @{
1205 */
1206
1207 /**
1208 * @brief Generate null timeout delay.
1209 *
1210 * This macro generates a timeout delay that instructs a kernel API
1211 * not to wait if the requested operation cannot be performed immediately.
1212 *
1213 * @return Timeout delay value.
1214 */
1215 #define K_NO_WAIT Z_TIMEOUT_NO_WAIT
1216
1217 /**
1218 * @brief Generate timeout delay from nanoseconds.
1219 *
1220 * This macro generates a timeout delay that instructs a kernel API to
1221 * wait up to @a t nanoseconds to perform the requested operation.
1222 * Note that timer precision is limited to the tick rate, not the
1223 * requested value.
1224 *
1225 * @param t Duration in nanoseconds.
1226 *
1227 * @return Timeout delay value.
1228 */
1229 #define K_NSEC(t) Z_TIMEOUT_NS(t)
1230
1231 /**
1232 * @brief Generate timeout delay from microseconds.
1233 *
1234 * This macro generates a timeout delay that instructs a kernel API
1235 * to wait up to @a t microseconds to perform the requested operation.
1236 * Note that timer precision is limited to the tick rate, not the
1237 * requested value.
1238 *
1239 * @param t Duration in microseconds.
1240 *
1241 * @return Timeout delay value.
1242 */
1243 #define K_USEC(t) Z_TIMEOUT_US(t)
1244
1245 /**
1246 * @brief Generate timeout delay from cycles.
1247 *
1248 * This macro generates a timeout delay that instructs a kernel API
1249 * to wait up to @a t cycles to perform the requested operation.
1250 *
1251 * @param t Duration in cycles.
1252 *
1253 * @return Timeout delay value.
1254 */
1255 #define K_CYC(t) Z_TIMEOUT_CYC(t)
1256
1257 /**
1258 * @brief Generate timeout delay from system ticks.
1259 *
1260 * This macro generates a timeout delay that instructs a kernel API
1261 * to wait up to @a t ticks to perform the requested operation.
1262 *
1263 * @param t Duration in system ticks.
1264 *
1265 * @return Timeout delay value.
1266 */
1267 #define K_TICKS(t) Z_TIMEOUT_TICKS(t)
1268
1269 /**
1270 * @brief Generate timeout delay from milliseconds.
1271 *
1272 * This macro generates a timeout delay that instructs a kernel API
1273 * to wait up to @a ms milliseconds to perform the requested operation.
1274 *
1275 * @param ms Duration in milliseconds.
1276 *
1277 * @return Timeout delay value.
1278 */
1279 #define K_MSEC(ms) Z_TIMEOUT_MS(ms)
1280
1281 /**
1282 * @brief Generate timeout delay from seconds.
1283 *
1284 * This macro generates a timeout delay that instructs a kernel API
1285 * to wait up to @a s seconds to perform the requested operation.
1286 *
1287 * @param s Duration in seconds.
1288 *
1289 * @return Timeout delay value.
1290 */
1291 #define K_SECONDS(s) K_MSEC((s) * MSEC_PER_SEC)
1292
1293 /**
1294 * @brief Generate timeout delay from minutes.
1295
1296 * This macro generates a timeout delay that instructs a kernel API
1297 * to wait up to @a m minutes to perform the requested operation.
1298 *
1299 * @param m Duration in minutes.
1300 *
1301 * @return Timeout delay value.
1302 */
1303 #define K_MINUTES(m) K_SECONDS((m) * 60)
1304
1305 /**
1306 * @brief Generate timeout delay from hours.
1307 *
1308 * This macro generates a timeout delay that instructs a kernel API
1309 * to wait up to @a h hours to perform the requested operation.
1310 *
1311 * @param h Duration in hours.
1312 *
1313 * @return Timeout delay value.
1314 */
1315 #define K_HOURS(h) K_MINUTES((h) * 60)
1316
1317 /**
1318 * @brief Generate infinite timeout delay.
1319 *
1320 * This macro generates a timeout delay that instructs a kernel API
1321 * to wait as long as necessary to perform the requested operation.
1322 *
1323 * @return Timeout delay value.
1324 */
1325 #define K_FOREVER Z_FOREVER
1326
1327 #ifdef CONFIG_TIMEOUT_64BIT
1328
1329 /**
1330 * @brief Generates an absolute/uptime timeout value from system ticks
1331 *
1332 * This macro generates a timeout delay that represents an expiration
1333 * at the absolute uptime value specified, in system ticks. That is, the
1334 * timeout will expire immediately after the system uptime reaches the
1335 * specified tick count.
1336 *
1337 * @param t Tick uptime value
1338 * @return Timeout delay value
1339 */
1340 #define K_TIMEOUT_ABS_TICKS(t) \
1341 Z_TIMEOUT_TICKS(Z_TICK_ABS((k_ticks_t)MAX(t, 0)))
1342
1343 /**
1344 * @brief Generates an absolute/uptime timeout value from milliseconds
1345 *
1346 * This macro generates a timeout delay that represents an expiration
1347 * at the absolute uptime value specified, in milliseconds. That is,
1348 * the timeout will expire immediately after the system uptime reaches
1349 * the specified tick count.
1350 *
1351 * @param t Millisecond uptime value
1352 * @return Timeout delay value
1353 */
1354 #define K_TIMEOUT_ABS_MS(t) K_TIMEOUT_ABS_TICKS(k_ms_to_ticks_ceil64(t))
1355
1356 /**
1357 * @brief Generates an absolute/uptime timeout value from microseconds
1358 *
1359 * This macro generates a timeout delay that represents an expiration
1360 * at the absolute uptime value specified, in microseconds. That is,
1361 * the timeout will expire immediately after the system uptime reaches
1362 * the specified time. Note that timer precision is limited by the
1363 * system tick rate and not the requested timeout value.
1364 *
1365 * @param t Microsecond uptime value
1366 * @return Timeout delay value
1367 */
1368 #define K_TIMEOUT_ABS_US(t) K_TIMEOUT_ABS_TICKS(k_us_to_ticks_ceil64(t))
1369
1370 /**
1371 * @brief Generates an absolute/uptime timeout value from nanoseconds
1372 *
1373 * This macro generates a timeout delay that represents an expiration
1374 * at the absolute uptime value specified, in nanoseconds. That is,
1375 * the timeout will expire immediately after the system uptime reaches
1376 * the specified time. Note that timer precision is limited by the
1377 * system tick rate and not the requested timeout value.
1378 *
1379 * @param t Nanosecond uptime value
1380 * @return Timeout delay value
1381 */
1382 #define K_TIMEOUT_ABS_NS(t) K_TIMEOUT_ABS_TICKS(k_ns_to_ticks_ceil64(t))
1383
1384 /**
1385 * @brief Generates an absolute/uptime timeout value from system cycles
1386 *
1387 * This macro generates a timeout delay that represents an expiration
1388 * at the absolute uptime value specified, in cycles. That is, the
1389 * timeout will expire immediately after the system uptime reaches the
1390 * specified time. Note that timer precision is limited by the system
1391 * tick rate and not the requested timeout value.
1392 *
1393 * @param t Cycle uptime value
1394 * @return Timeout delay value
1395 */
1396 #define K_TIMEOUT_ABS_CYC(t) K_TIMEOUT_ABS_TICKS(k_cyc_to_ticks_ceil64(t))
1397
1398 #endif
1399
1400 /**
1401 * @}
1402 */
1403
1404 /**
1405 * @cond INTERNAL_HIDDEN
1406 */
1407
1408 struct k_timer {
1409 /*
1410 * _timeout structure must be first here if we want to use
1411 * dynamic timer allocation. timeout.node is used in the double-linked
1412 * list of free timers
1413 */
1414 struct _timeout timeout;
1415
1416 /* wait queue for the (single) thread waiting on this timer */
1417 _wait_q_t wait_q;
1418
1419 /* runs in ISR context */
1420 void (*expiry_fn)(struct k_timer *timer);
1421
1422 /* runs in the context of the thread that calls k_timer_stop() */
1423 void (*stop_fn)(struct k_timer *timer);
1424
1425 /* timer period */
1426 k_timeout_t period;
1427
1428 /* timer status */
1429 uint32_t status;
1430
1431 /* user-specific data, also used to support legacy features */
1432 void *user_data;
1433
1434 SYS_PORT_TRACING_TRACKING_FIELD(k_timer)
1435 };
1436
1437 #define Z_TIMER_INITIALIZER(obj, expiry, stop) \
1438 { \
1439 .timeout = { \
1440 .node = {},\
1441 .fn = z_timer_expiration_handler, \
1442 .dticks = 0, \
1443 }, \
1444 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
1445 .expiry_fn = expiry, \
1446 .stop_fn = stop, \
1447 .status = 0, \
1448 .user_data = 0, \
1449 }
1450
1451 /**
1452 * INTERNAL_HIDDEN @endcond
1453 */
1454
1455 /**
1456 * @defgroup timer_apis Timer APIs
1457 * @ingroup kernel_apis
1458 * @{
1459 */
1460
1461 /**
1462 * @typedef k_timer_expiry_t
1463 * @brief Timer expiry function type.
1464 *
1465 * A timer's expiry function is executed by the system clock interrupt handler
1466 * each time the timer expires. The expiry function is optional, and is only
1467 * invoked if the timer has been initialized with one.
1468 *
1469 * @param timer Address of timer.
1470 */
1471 typedef void (*k_timer_expiry_t)(struct k_timer *timer);
1472
1473 /**
1474 * @typedef k_timer_stop_t
1475 * @brief Timer stop function type.
1476 *
1477 * A timer's stop function is executed if the timer is stopped prematurely.
1478 * The function runs in the context of call that stops the timer. As
1479 * k_timer_stop() can be invoked from an ISR, the stop function must be
1480 * callable from interrupt context (isr-ok).
1481 *
1482 * The stop function is optional, and is only invoked if the timer has been
1483 * initialized with one.
1484 *
1485 * @param timer Address of timer.
1486 */
1487 typedef void (*k_timer_stop_t)(struct k_timer *timer);
1488
1489 /**
1490 * @brief Statically define and initialize a timer.
1491 *
1492 * The timer can be accessed outside the module where it is defined using:
1493 *
1494 * @code extern struct k_timer <name>; @endcode
1495 *
1496 * @param name Name of the timer variable.
1497 * @param expiry_fn Function to invoke each time the timer expires.
1498 * @param stop_fn Function to invoke if the timer is stopped while running.
1499 */
1500 #define K_TIMER_DEFINE(name, expiry_fn, stop_fn) \
1501 STRUCT_SECTION_ITERABLE(k_timer, name) = \
1502 Z_TIMER_INITIALIZER(name, expiry_fn, stop_fn)
1503
1504 /**
1505 * @brief Initialize a timer.
1506 *
1507 * This routine initializes a timer, prior to its first use.
1508 *
1509 * @param timer Address of timer.
1510 * @param expiry_fn Function to invoke each time the timer expires.
1511 * @param stop_fn Function to invoke if the timer is stopped while running.
1512 */
1513 extern void k_timer_init(struct k_timer *timer,
1514 k_timer_expiry_t expiry_fn,
1515 k_timer_stop_t stop_fn);
1516
1517 /**
1518 * @brief Start a timer.
1519 *
1520 * This routine starts a timer, and resets its status to zero. The timer
1521 * begins counting down using the specified duration and period values.
1522 *
1523 * Attempting to start a timer that is already running is permitted.
1524 * The timer's status is reset to zero and the timer begins counting down
1525 * using the new duration and period values.
1526 *
1527 * @param timer Address of timer.
1528 * @param duration Initial timer duration.
1529 * @param period Timer period.
1530 */
1531 __syscall void k_timer_start(struct k_timer *timer,
1532 k_timeout_t duration, k_timeout_t period);
1533
1534 /**
1535 * @brief Stop a timer.
1536 *
1537 * This routine stops a running timer prematurely. The timer's stop function,
1538 * if one exists, is invoked by the caller.
1539 *
1540 * Attempting to stop a timer that is not running is permitted, but has no
1541 * effect on the timer.
1542 *
1543 * @note The stop handler has to be callable from ISRs if @a k_timer_stop is to
1544 * be called from ISRs.
1545 *
1546 * @funcprops \isr_ok
1547 *
1548 * @param timer Address of timer.
1549 */
1550 __syscall void k_timer_stop(struct k_timer *timer);
1551
1552 /**
1553 * @brief Read timer status.
1554 *
1555 * This routine reads the timer's status, which indicates the number of times
1556 * it has expired since its status was last read.
1557 *
1558 * Calling this routine resets the timer's status to zero.
1559 *
1560 * @param timer Address of timer.
1561 *
1562 * @return Timer status.
1563 */
1564 __syscall uint32_t k_timer_status_get(struct k_timer *timer);
1565
1566 /**
1567 * @brief Synchronize thread to timer expiration.
1568 *
1569 * This routine blocks the calling thread until the timer's status is non-zero
1570 * (indicating that it has expired at least once since it was last examined)
1571 * or the timer is stopped. If the timer status is already non-zero,
1572 * or the timer is already stopped, the caller continues without waiting.
1573 *
1574 * Calling this routine resets the timer's status to zero.
1575 *
1576 * This routine must not be used by interrupt handlers, since they are not
1577 * allowed to block.
1578 *
1579 * @param timer Address of timer.
1580 *
1581 * @return Timer status.
1582 */
1583 __syscall uint32_t k_timer_status_sync(struct k_timer *timer);
1584
1585 #ifdef CONFIG_SYS_CLOCK_EXISTS
1586
1587 /**
1588 * @brief Get next expiration time of a timer, in system ticks
1589 *
1590 * This routine returns the future system uptime reached at the next
1591 * time of expiration of the timer, in units of system ticks. If the
1592 * timer is not running, current system time is returned.
1593 *
1594 * @param timer The timer object
1595 * @return Uptime of expiration, in ticks
1596 */
1597 __syscall k_ticks_t k_timer_expires_ticks(const struct k_timer *timer);
1598
z_impl_k_timer_expires_ticks(const struct k_timer * timer)1599 static inline k_ticks_t z_impl_k_timer_expires_ticks(
1600 const struct k_timer *timer)
1601 {
1602 return z_timeout_expires(&timer->timeout);
1603 }
1604
1605 /**
1606 * @brief Get time remaining before a timer next expires, in system ticks
1607 *
1608 * This routine computes the time remaining before a running timer
1609 * next expires, in units of system ticks. If the timer is not
1610 * running, it returns zero.
1611 */
1612 __syscall k_ticks_t k_timer_remaining_ticks(const struct k_timer *timer);
1613
z_impl_k_timer_remaining_ticks(const struct k_timer * timer)1614 static inline k_ticks_t z_impl_k_timer_remaining_ticks(
1615 const struct k_timer *timer)
1616 {
1617 return z_timeout_remaining(&timer->timeout);
1618 }
1619
1620 /**
1621 * @brief Get time remaining before a timer next expires.
1622 *
1623 * This routine computes the (approximate) time remaining before a running
1624 * timer next expires. If the timer is not running, it returns zero.
1625 *
1626 * @param timer Address of timer.
1627 *
1628 * @return Remaining time (in milliseconds).
1629 */
k_timer_remaining_get(struct k_timer * timer)1630 static inline uint32_t k_timer_remaining_get(struct k_timer *timer)
1631 {
1632 return k_ticks_to_ms_floor32(k_timer_remaining_ticks(timer));
1633 }
1634
1635 #endif /* CONFIG_SYS_CLOCK_EXISTS */
1636
1637 /**
1638 * @brief Associate user-specific data with a timer.
1639 *
1640 * This routine records the @a user_data with the @a timer, to be retrieved
1641 * later.
1642 *
1643 * It can be used e.g. in a timer handler shared across multiple subsystems to
1644 * retrieve data specific to the subsystem this timer is associated with.
1645 *
1646 * @param timer Address of timer.
1647 * @param user_data User data to associate with the timer.
1648 */
1649 __syscall void k_timer_user_data_set(struct k_timer *timer, void *user_data);
1650
1651 /**
1652 * @internal
1653 */
z_impl_k_timer_user_data_set(struct k_timer * timer,void * user_data)1654 static inline void z_impl_k_timer_user_data_set(struct k_timer *timer,
1655 void *user_data)
1656 {
1657 timer->user_data = user_data;
1658 }
1659
1660 /**
1661 * @brief Retrieve the user-specific data from a timer.
1662 *
1663 * @param timer Address of timer.
1664 *
1665 * @return The user data.
1666 */
1667 __syscall void *k_timer_user_data_get(const struct k_timer *timer);
1668
z_impl_k_timer_user_data_get(const struct k_timer * timer)1669 static inline void *z_impl_k_timer_user_data_get(const struct k_timer *timer)
1670 {
1671 return timer->user_data;
1672 }
1673
1674 /** @} */
1675
1676 /**
1677 * @addtogroup clock_apis
1678 * @ingroup kernel_apis
1679 * @{
1680 */
1681
1682 /**
1683 * @brief Get system uptime, in system ticks.
1684 *
1685 * This routine returns the elapsed time since the system booted, in
1686 * ticks (c.f. @kconfig{CONFIG_SYS_CLOCK_TICKS_PER_SEC}), which is the
1687 * fundamental unit of resolution of kernel timekeeping.
1688 *
1689 * @return Current uptime in ticks.
1690 */
1691 __syscall int64_t k_uptime_ticks(void);
1692
1693 /**
1694 * @brief Get system uptime.
1695 *
1696 * This routine returns the elapsed time since the system booted,
1697 * in milliseconds.
1698 *
1699 * @note
1700 * While this function returns time in milliseconds, it does
1701 * not mean it has millisecond resolution. The actual resolution depends on
1702 * @kconfig{CONFIG_SYS_CLOCK_TICKS_PER_SEC} config option.
1703 *
1704 * @return Current uptime in milliseconds.
1705 */
k_uptime_get(void)1706 static inline int64_t k_uptime_get(void)
1707 {
1708 return k_ticks_to_ms_floor64(k_uptime_ticks());
1709 }
1710
1711 /**
1712 * @brief Get system uptime (32-bit version).
1713 *
1714 * This routine returns the lower 32 bits of the system uptime in
1715 * milliseconds.
1716 *
1717 * Because correct conversion requires full precision of the system
1718 * clock there is no benefit to using this over k_uptime_get() unless
1719 * you know the application will never run long enough for the system
1720 * clock to approach 2^32 ticks. Calls to this function may involve
1721 * interrupt blocking and 64-bit math.
1722 *
1723 * @note
1724 * While this function returns time in milliseconds, it does
1725 * not mean it has millisecond resolution. The actual resolution depends on
1726 * @kconfig{CONFIG_SYS_CLOCK_TICKS_PER_SEC} config option
1727 *
1728 * @return The low 32 bits of the current uptime, in milliseconds.
1729 */
k_uptime_get_32(void)1730 static inline uint32_t k_uptime_get_32(void)
1731 {
1732 return (uint32_t)k_uptime_get();
1733 }
1734
1735 /**
1736 * @brief Get elapsed time.
1737 *
1738 * This routine computes the elapsed time between the current system uptime
1739 * and an earlier reference time, in milliseconds.
1740 *
1741 * @param reftime Pointer to a reference time, which is updated to the current
1742 * uptime upon return.
1743 *
1744 * @return Elapsed time.
1745 */
k_uptime_delta(int64_t * reftime)1746 static inline int64_t k_uptime_delta(int64_t *reftime)
1747 {
1748 int64_t uptime, delta;
1749
1750 uptime = k_uptime_get();
1751 delta = uptime - *reftime;
1752 *reftime = uptime;
1753
1754 return delta;
1755 }
1756
1757 /**
1758 * @brief Read the hardware clock.
1759 *
1760 * This routine returns the current time, as measured by the system's hardware
1761 * clock.
1762 *
1763 * @return Current hardware clock up-counter (in cycles).
1764 */
k_cycle_get_32(void)1765 static inline uint32_t k_cycle_get_32(void)
1766 {
1767 return arch_k_cycle_get_32();
1768 }
1769
1770 /**
1771 * @brief Read the 64-bit hardware clock.
1772 *
1773 * This routine returns the current time in 64-bits, as measured by the
1774 * system's hardware clock, if available.
1775 *
1776 * @see CONFIG_TIMER_HAS_64BIT_CYCLE_COUNTER
1777 *
1778 * @return Current hardware clock up-counter (in cycles).
1779 */
k_cycle_get_64(void)1780 static inline uint64_t k_cycle_get_64(void)
1781 {
1782 if (!IS_ENABLED(CONFIG_TIMER_HAS_64BIT_CYCLE_COUNTER)) {
1783 __ASSERT(0, "64-bit cycle counter not enabled on this platform. "
1784 "See CONFIG_TIMER_HAS_64BIT_CYCLE_COUNTER");
1785 return 0;
1786 }
1787
1788 return arch_k_cycle_get_64();
1789 }
1790
1791 /**
1792 * @}
1793 */
1794
1795 /**
1796 * @cond INTERNAL_HIDDEN
1797 */
1798
1799 struct k_queue {
1800 sys_sflist_t data_q;
1801 struct k_spinlock lock;
1802 _wait_q_t wait_q;
1803
1804 _POLL_EVENT;
1805
1806 SYS_PORT_TRACING_TRACKING_FIELD(k_queue)
1807 };
1808
1809 #define Z_QUEUE_INITIALIZER(obj) \
1810 { \
1811 .data_q = SYS_SFLIST_STATIC_INIT(&obj.data_q), \
1812 .lock = { }, \
1813 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
1814 _POLL_EVENT_OBJ_INIT(obj) \
1815 }
1816
1817 extern void *z_queue_node_peek(sys_sfnode_t *node, bool needs_free);
1818
1819 /**
1820 * INTERNAL_HIDDEN @endcond
1821 */
1822
1823 /**
1824 * @defgroup queue_apis Queue APIs
1825 * @ingroup kernel_apis
1826 * @{
1827 */
1828
1829 /**
1830 * @brief Initialize a queue.
1831 *
1832 * This routine initializes a queue object, prior to its first use.
1833 *
1834 * @param queue Address of the queue.
1835 */
1836 __syscall void k_queue_init(struct k_queue *queue);
1837
1838 /**
1839 * @brief Cancel waiting on a queue.
1840 *
1841 * This routine causes first thread pending on @a queue, if any, to
1842 * return from k_queue_get() call with NULL value (as if timeout expired).
1843 * If the queue is being waited on by k_poll(), it will return with
1844 * -EINTR and K_POLL_STATE_CANCELLED state (and per above, subsequent
1845 * k_queue_get() will return NULL).
1846 *
1847 * @funcprops \isr_ok
1848 *
1849 * @param queue Address of the queue.
1850 */
1851 __syscall void k_queue_cancel_wait(struct k_queue *queue);
1852
1853 /**
1854 * @brief Append an element to the end of a queue.
1855 *
1856 * This routine appends a data item to @a queue. A queue data item must be
1857 * aligned on a word boundary, and the first word of the item is reserved
1858 * for the kernel's use.
1859 *
1860 * @funcprops \isr_ok
1861 *
1862 * @param queue Address of the queue.
1863 * @param data Address of the data item.
1864 */
1865 extern void k_queue_append(struct k_queue *queue, void *data);
1866
1867 /**
1868 * @brief Append an element to a queue.
1869 *
1870 * This routine appends a data item to @a queue. There is an implicit memory
1871 * allocation to create an additional temporary bookkeeping data structure from
1872 * the calling thread's resource pool, which is automatically freed when the
1873 * item is removed. The data itself is not copied.
1874 *
1875 * @funcprops \isr_ok
1876 *
1877 * @param queue Address of the queue.
1878 * @param data Address of the data item.
1879 *
1880 * @retval 0 on success
1881 * @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
1882 */
1883 __syscall int32_t k_queue_alloc_append(struct k_queue *queue, void *data);
1884
1885 /**
1886 * @brief Prepend an element to a queue.
1887 *
1888 * This routine prepends a data item to @a queue. A queue data item must be
1889 * aligned on a word boundary, and the first word of the item is reserved
1890 * for the kernel's use.
1891 *
1892 * @funcprops \isr_ok
1893 *
1894 * @param queue Address of the queue.
1895 * @param data Address of the data item.
1896 */
1897 extern void k_queue_prepend(struct k_queue *queue, void *data);
1898
1899 /**
1900 * @brief Prepend an element to a queue.
1901 *
1902 * This routine prepends a data item to @a queue. There is an implicit memory
1903 * allocation to create an additional temporary bookkeeping data structure from
1904 * the calling thread's resource pool, which is automatically freed when the
1905 * item is removed. The data itself is not copied.
1906 *
1907 * @funcprops \isr_ok
1908 *
1909 * @param queue Address of the queue.
1910 * @param data Address of the data item.
1911 *
1912 * @retval 0 on success
1913 * @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
1914 */
1915 __syscall int32_t k_queue_alloc_prepend(struct k_queue *queue, void *data);
1916
1917 /**
1918 * @brief Inserts an element to a queue.
1919 *
1920 * This routine inserts a data item to @a queue after previous item. A queue
1921 * data item must be aligned on a word boundary, and the first word of
1922 * the item is reserved for the kernel's use.
1923 *
1924 * @funcprops \isr_ok
1925 *
1926 * @param queue Address of the queue.
1927 * @param prev Address of the previous data item.
1928 * @param data Address of the data item.
1929 */
1930 extern void k_queue_insert(struct k_queue *queue, void *prev, void *data);
1931
1932 /**
1933 * @brief Atomically append a list of elements to a queue.
1934 *
1935 * This routine adds a list of data items to @a queue in one operation.
1936 * The data items must be in a singly-linked list, with the first word
1937 * in each data item pointing to the next data item; the list must be
1938 * NULL-terminated.
1939 *
1940 * @funcprops \isr_ok
1941 *
1942 * @param queue Address of the queue.
1943 * @param head Pointer to first node in singly-linked list.
1944 * @param tail Pointer to last node in singly-linked list.
1945 *
1946 * @retval 0 on success
1947 * @retval -EINVAL on invalid supplied data
1948 *
1949 */
1950 extern int k_queue_append_list(struct k_queue *queue, void *head, void *tail);
1951
1952 /**
1953 * @brief Atomically add a list of elements to a queue.
1954 *
1955 * This routine adds a list of data items to @a queue in one operation.
1956 * The data items must be in a singly-linked list implemented using a
1957 * sys_slist_t object. Upon completion, the original list is empty.
1958 *
1959 * @funcprops \isr_ok
1960 *
1961 * @param queue Address of the queue.
1962 * @param list Pointer to sys_slist_t object.
1963 *
1964 * @retval 0 on success
1965 * @retval -EINVAL on invalid data
1966 */
1967 extern int k_queue_merge_slist(struct k_queue *queue, sys_slist_t *list);
1968
1969 /**
1970 * @brief Get an element from a queue.
1971 *
1972 * This routine removes first data item from @a queue. The first word of the
1973 * data item is reserved for the kernel's use.
1974 *
1975 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
1976 *
1977 * @funcprops \isr_ok
1978 *
1979 * @param queue Address of the queue.
1980 * @param timeout Non-negative waiting period to obtain a data item
1981 * or one of the special values K_NO_WAIT and
1982 * K_FOREVER.
1983 *
1984 * @return Address of the data item if successful; NULL if returned
1985 * without waiting, or waiting period timed out.
1986 */
1987 __syscall void *k_queue_get(struct k_queue *queue, k_timeout_t timeout);
1988
1989 /**
1990 * @brief Remove an element from a queue.
1991 *
1992 * This routine removes data item from @a queue. The first word of the
1993 * data item is reserved for the kernel's use. Removing elements from k_queue
1994 * rely on sys_slist_find_and_remove which is not a constant time operation.
1995 *
1996 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
1997 *
1998 * @funcprops \isr_ok
1999 *
2000 * @param queue Address of the queue.
2001 * @param data Address of the data item.
2002 *
2003 * @return true if data item was removed
2004 */
2005 bool k_queue_remove(struct k_queue *queue, void *data);
2006
2007 /**
2008 * @brief Append an element to a queue only if it's not present already.
2009 *
2010 * This routine appends data item to @a queue. The first word of the data
2011 * item is reserved for the kernel's use. Appending elements to k_queue
2012 * relies on sys_slist_is_node_in_list which is not a constant time operation.
2013 *
2014 * @funcprops \isr_ok
2015 *
2016 * @param queue Address of the queue.
2017 * @param data Address of the data item.
2018 *
2019 * @return true if data item was added, false if not
2020 */
2021 bool k_queue_unique_append(struct k_queue *queue, void *data);
2022
2023 /**
2024 * @brief Query a queue to see if it has data available.
2025 *
2026 * Note that the data might be already gone by the time this function returns
2027 * if other threads are also trying to read from the queue.
2028 *
2029 * @funcprops \isr_ok
2030 *
2031 * @param queue Address of the queue.
2032 *
2033 * @return Non-zero if the queue is empty.
2034 * @return 0 if data is available.
2035 */
2036 __syscall int k_queue_is_empty(struct k_queue *queue);
2037
z_impl_k_queue_is_empty(struct k_queue * queue)2038 static inline int z_impl_k_queue_is_empty(struct k_queue *queue)
2039 {
2040 return (int)sys_sflist_is_empty(&queue->data_q);
2041 }
2042
2043 /**
2044 * @brief Peek element at the head of queue.
2045 *
2046 * Return element from the head of queue without removing it.
2047 *
2048 * @param queue Address of the queue.
2049 *
2050 * @return Head element, or NULL if queue is empty.
2051 */
2052 __syscall void *k_queue_peek_head(struct k_queue *queue);
2053
2054 /**
2055 * @brief Peek element at the tail of queue.
2056 *
2057 * Return element from the tail of queue without removing it.
2058 *
2059 * @param queue Address of the queue.
2060 *
2061 * @return Tail element, or NULL if queue is empty.
2062 */
2063 __syscall void *k_queue_peek_tail(struct k_queue *queue);
2064
2065 /**
2066 * @brief Statically define and initialize a queue.
2067 *
2068 * The queue can be accessed outside the module where it is defined using:
2069 *
2070 * @code extern struct k_queue <name>; @endcode
2071 *
2072 * @param name Name of the queue.
2073 */
2074 #define K_QUEUE_DEFINE(name) \
2075 STRUCT_SECTION_ITERABLE(k_queue, name) = \
2076 Z_QUEUE_INITIALIZER(name)
2077
2078 /** @} */
2079
2080 #ifdef CONFIG_USERSPACE
2081 /**
2082 * @brief futex structure
2083 *
2084 * A k_futex is a lightweight mutual exclusion primitive designed
2085 * to minimize kernel involvement. Uncontended operation relies
2086 * only on atomic access to shared memory. k_futex are tracked as
2087 * kernel objects and can live in user memory so that any access
2088 * bypasses the kernel object permission management mechanism.
2089 */
2090 struct k_futex {
2091 atomic_t val;
2092 };
2093
2094 /**
2095 * @brief futex kernel data structure
2096 *
2097 * z_futex_data are the helper data structure for k_futex to complete
2098 * futex contended operation on kernel side, structure z_futex_data
2099 * of every futex object is invisible in user mode.
2100 */
2101 struct z_futex_data {
2102 _wait_q_t wait_q;
2103 struct k_spinlock lock;
2104 };
2105
2106 #define Z_FUTEX_DATA_INITIALIZER(obj) \
2107 { \
2108 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q) \
2109 }
2110
2111 /**
2112 * @defgroup futex_apis FUTEX APIs
2113 * @ingroup kernel_apis
2114 * @{
2115 */
2116
2117 /**
2118 * @brief Pend the current thread on a futex
2119 *
2120 * Tests that the supplied futex contains the expected value, and if so,
2121 * goes to sleep until some other thread calls k_futex_wake() on it.
2122 *
2123 * @param futex Address of the futex.
2124 * @param expected Expected value of the futex, if it is different the caller
2125 * will not wait on it.
2126 * @param timeout Non-negative waiting period on the futex, or
2127 * one of the special values K_NO_WAIT or K_FOREVER.
2128 * @retval -EACCES Caller does not have read access to futex address.
2129 * @retval -EAGAIN If the futex value did not match the expected parameter.
2130 * @retval -EINVAL Futex parameter address not recognized by the kernel.
2131 * @retval -ETIMEDOUT Thread woke up due to timeout and not a futex wakeup.
2132 * @retval 0 if the caller went to sleep and was woken up. The caller
2133 * should check the futex's value on wakeup to determine if it needs
2134 * to block again.
2135 */
2136 __syscall int k_futex_wait(struct k_futex *futex, int expected,
2137 k_timeout_t timeout);
2138
2139 /**
2140 * @brief Wake one/all threads pending on a futex
2141 *
2142 * Wake up the highest priority thread pending on the supplied futex, or
2143 * wakeup all the threads pending on the supplied futex, and the behavior
2144 * depends on wake_all.
2145 *
2146 * @param futex Futex to wake up pending threads.
2147 * @param wake_all If true, wake up all pending threads; If false,
2148 * wakeup the highest priority thread.
2149 * @retval -EACCES Caller does not have access to the futex address.
2150 * @retval -EINVAL Futex parameter address not recognized by the kernel.
2151 * @retval Number of threads that were woken up.
2152 */
2153 __syscall int k_futex_wake(struct k_futex *futex, bool wake_all);
2154
2155 /** @} */
2156 #endif
2157
2158 /**
2159 * @defgroup event_apis Event APIs
2160 * @ingroup kernel_apis
2161 * @{
2162 */
2163
2164 /**
2165 * Event Structure
2166 * @ingroup event_apis
2167 */
2168
2169 struct k_event {
2170 _wait_q_t wait_q;
2171 uint32_t events;
2172 struct k_spinlock lock;
2173
2174 SYS_PORT_TRACING_TRACKING_FIELD(k_event)
2175 };
2176
2177 #define Z_EVENT_INITIALIZER(obj) \
2178 { \
2179 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
2180 .events = 0 \
2181 }
2182
2183 /**
2184 * @brief Initialize an event object
2185 *
2186 * This routine initializes an event object, prior to its first use.
2187 *
2188 * @param event Address of the event object.
2189 */
2190 __syscall void k_event_init(struct k_event *event);
2191
2192 /**
2193 * @brief Post one or more events to an event object
2194 *
2195 * This routine posts one or more events to an event object. All tasks waiting
2196 * on the event object @a event whose waiting conditions become met by this
2197 * posting immediately unpend.
2198 *
2199 * Posting differs from setting in that posted events are merged together with
2200 * the current set of events tracked by the event object.
2201 *
2202 * @param event Address of the event object
2203 * @param events Set of events to post to @a event
2204 */
2205 __syscall void k_event_post(struct k_event *event, uint32_t events);
2206
2207 /**
2208 * @brief Set the events in an event object
2209 *
2210 * This routine sets the events stored in event object to the specified value.
2211 * All tasks waiting on the event object @a event whose waiting conditions
2212 * become met by this immediately unpend.
2213 *
2214 * Setting differs from posting in that set events replace the current set of
2215 * events tracked by the event object.
2216 *
2217 * @param event Address of the event object
2218 * @param events Set of events to set in @a event
2219 */
2220 __syscall void k_event_set(struct k_event *event, uint32_t events);
2221
2222 /**
2223 * @brief Set or clear the events in an event object
2224 *
2225 * This routine sets the events stored in event object to the specified value.
2226 * All tasks waiting on the event object @a event whose waiting conditions
2227 * become met by this immediately unpend. Unlike @ref k_event_set, this routine
2228 * allows specific event bits to be set and cleared as determined by the mask.
2229 *
2230 * @param event Address of the event object
2231 * @param events Set of events to set/clear in @a event
2232 * @param events_mask Mask to be applied to @a events
2233 */
2234 __syscall void k_event_set_masked(struct k_event *event, uint32_t events,
2235 uint32_t events_mask);
2236
2237 /**
2238 * @brief Clear the events in an event object
2239 *
2240 * This routine clears (resets) the specified events stored in an event object.
2241 *
2242 * @param event Address of the event object
2243 * @param events Set of events to clear in @a event
2244 */
2245 __syscall void k_event_clear(struct k_event *event, uint32_t events);
2246
2247 /**
2248 * @brief Wait for any of the specified events
2249 *
2250 * This routine waits on event object @a event until any of the specified
2251 * events have been delivered to the event object, or the maximum wait time
2252 * @a timeout has expired. A thread may wait on up to 32 distinctly numbered
2253 * events that are expressed as bits in a single 32-bit word.
2254 *
2255 * @note The caller must be careful when resetting if there are multiple threads
2256 * waiting for the event object @a event.
2257 *
2258 * @param event Address of the event object
2259 * @param events Set of desired events on which to wait
2260 * @param reset If true, clear the set of events tracked by the event object
2261 * before waiting. If false, do not clear the events.
2262 * @param timeout Waiting period for the desired set of events or one of the
2263 * special values K_NO_WAIT and K_FOREVER.
2264 *
2265 * @retval set of matching events upon success
2266 * @retval 0 if matching events were not received within the specified time
2267 */
2268 __syscall uint32_t k_event_wait(struct k_event *event, uint32_t events,
2269 bool reset, k_timeout_t timeout);
2270
2271 /**
2272 * @brief Wait for all of the specified events
2273 *
2274 * This routine waits on event object @a event until all of the specified
2275 * events have been delivered to the event object, or the maximum wait time
2276 * @a timeout has expired. A thread may wait on up to 32 distinctly numbered
2277 * events that are expressed as bits in a single 32-bit word.
2278 *
2279 * @note The caller must be careful when resetting if there are multiple threads
2280 * waiting for the event object @a event.
2281 *
2282 * @param event Address of the event object
2283 * @param events Set of desired events on which to wait
2284 * @param reset If true, clear the set of events tracked by the event object
2285 * before waiting. If false, do not clear the events.
2286 * @param timeout Waiting period for the desired set of events or one of the
2287 * special values K_NO_WAIT and K_FOREVER.
2288 *
2289 * @retval set of matching events upon success
2290 * @retval 0 if matching events were not received within the specified time
2291 */
2292 __syscall uint32_t k_event_wait_all(struct k_event *event, uint32_t events,
2293 bool reset, k_timeout_t timeout);
2294
2295 /**
2296 * @brief Statically define and initialize an event object
2297 *
2298 * The event can be accessed outside the module where it is defined using:
2299 *
2300 * @code extern struct k_event <name>; @endcode
2301 *
2302 * @param name Name of the event object.
2303 */
2304 #define K_EVENT_DEFINE(name) \
2305 STRUCT_SECTION_ITERABLE(k_event, name) = \
2306 Z_EVENT_INITIALIZER(name);
2307
2308 /** @} */
2309
2310 struct k_fifo {
2311 struct k_queue _queue;
2312 };
2313
2314 /**
2315 * @cond INTERNAL_HIDDEN
2316 */
2317 #define Z_FIFO_INITIALIZER(obj) \
2318 { \
2319 ._queue = Z_QUEUE_INITIALIZER(obj._queue) \
2320 }
2321
2322 /**
2323 * INTERNAL_HIDDEN @endcond
2324 */
2325
2326 /**
2327 * @defgroup fifo_apis FIFO APIs
2328 * @ingroup kernel_apis
2329 * @{
2330 */
2331
2332 /**
2333 * @brief Initialize a FIFO queue.
2334 *
2335 * This routine initializes a FIFO queue, prior to its first use.
2336 *
2337 * @param fifo Address of the FIFO queue.
2338 */
2339 #define k_fifo_init(fifo) \
2340 ({ \
2341 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, init, fifo); \
2342 k_queue_init(&(fifo)->_queue); \
2343 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, init, fifo); \
2344 })
2345
2346 /**
2347 * @brief Cancel waiting on a FIFO queue.
2348 *
2349 * This routine causes first thread pending on @a fifo, if any, to
2350 * return from k_fifo_get() call with NULL value (as if timeout
2351 * expired).
2352 *
2353 * @funcprops \isr_ok
2354 *
2355 * @param fifo Address of the FIFO queue.
2356 */
2357 #define k_fifo_cancel_wait(fifo) \
2358 ({ \
2359 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, cancel_wait, fifo); \
2360 k_queue_cancel_wait(&(fifo)->_queue); \
2361 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, cancel_wait, fifo); \
2362 })
2363
2364 /**
2365 * @brief Add an element to a FIFO queue.
2366 *
2367 * This routine adds a data item to @a fifo. A FIFO data item must be
2368 * aligned on a word boundary, and the first word of the item is reserved
2369 * for the kernel's use.
2370 *
2371 * @funcprops \isr_ok
2372 *
2373 * @param fifo Address of the FIFO.
2374 * @param data Address of the data item.
2375 */
2376 #define k_fifo_put(fifo, data) \
2377 ({ \
2378 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, put, fifo, data); \
2379 k_queue_append(&(fifo)->_queue, data); \
2380 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, put, fifo, data); \
2381 })
2382
2383 /**
2384 * @brief Add an element to a FIFO queue.
2385 *
2386 * This routine adds a data item to @a fifo. There is an implicit memory
2387 * allocation to create an additional temporary bookkeeping data structure from
2388 * the calling thread's resource pool, which is automatically freed when the
2389 * item is removed. The data itself is not copied.
2390 *
2391 * @funcprops \isr_ok
2392 *
2393 * @param fifo Address of the FIFO.
2394 * @param data Address of the data item.
2395 *
2396 * @retval 0 on success
2397 * @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
2398 */
2399 #define k_fifo_alloc_put(fifo, data) \
2400 ({ \
2401 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, alloc_put, fifo, data); \
2402 int ret = k_queue_alloc_append(&(fifo)->_queue, data); \
2403 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, alloc_put, fifo, data, ret); \
2404 ret; \
2405 })
2406
2407 /**
2408 * @brief Atomically add a list of elements to a FIFO.
2409 *
2410 * This routine adds a list of data items to @a fifo in one operation.
2411 * The data items must be in a singly-linked list, with the first word of
2412 * each data item pointing to the next data item; the list must be
2413 * NULL-terminated.
2414 *
2415 * @funcprops \isr_ok
2416 *
2417 * @param fifo Address of the FIFO queue.
2418 * @param head Pointer to first node in singly-linked list.
2419 * @param tail Pointer to last node in singly-linked list.
2420 */
2421 #define k_fifo_put_list(fifo, head, tail) \
2422 ({ \
2423 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, put_list, fifo, head, tail); \
2424 k_queue_append_list(&(fifo)->_queue, head, tail); \
2425 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, put_list, fifo, head, tail); \
2426 })
2427
2428 /**
2429 * @brief Atomically add a list of elements to a FIFO queue.
2430 *
2431 * This routine adds a list of data items to @a fifo in one operation.
2432 * The data items must be in a singly-linked list implemented using a
2433 * sys_slist_t object. Upon completion, the sys_slist_t object is invalid
2434 * and must be re-initialized via sys_slist_init().
2435 *
2436 * @funcprops \isr_ok
2437 *
2438 * @param fifo Address of the FIFO queue.
2439 * @param list Pointer to sys_slist_t object.
2440 */
2441 #define k_fifo_put_slist(fifo, list) \
2442 ({ \
2443 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, put_slist, fifo, list); \
2444 k_queue_merge_slist(&(fifo)->_queue, list); \
2445 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, put_slist, fifo, list); \
2446 })
2447
2448 /**
2449 * @brief Get an element from a FIFO queue.
2450 *
2451 * This routine removes a data item from @a fifo in a "first in, first out"
2452 * manner. The first word of the data item is reserved for the kernel's use.
2453 *
2454 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
2455 *
2456 * @funcprops \isr_ok
2457 *
2458 * @param fifo Address of the FIFO queue.
2459 * @param timeout Waiting period to obtain a data item,
2460 * or one of the special values K_NO_WAIT and K_FOREVER.
2461 *
2462 * @return Address of the data item if successful; NULL if returned
2463 * without waiting, or waiting period timed out.
2464 */
2465 #define k_fifo_get(fifo, timeout) \
2466 ({ \
2467 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, get, fifo, timeout); \
2468 void *ret = k_queue_get(&(fifo)->_queue, timeout); \
2469 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, get, fifo, timeout, ret); \
2470 ret; \
2471 })
2472
2473 /**
2474 * @brief Query a FIFO queue to see if it has data available.
2475 *
2476 * Note that the data might be already gone by the time this function returns
2477 * if other threads is also trying to read from the FIFO.
2478 *
2479 * @funcprops \isr_ok
2480 *
2481 * @param fifo Address of the FIFO queue.
2482 *
2483 * @return Non-zero if the FIFO queue is empty.
2484 * @return 0 if data is available.
2485 */
2486 #define k_fifo_is_empty(fifo) \
2487 k_queue_is_empty(&(fifo)->_queue)
2488
2489 /**
2490 * @brief Peek element at the head of a FIFO queue.
2491 *
2492 * Return element from the head of FIFO queue without removing it. A usecase
2493 * for this is if elements of the FIFO object are themselves containers. Then
2494 * on each iteration of processing, a head container will be peeked,
2495 * and some data processed out of it, and only if the container is empty,
2496 * it will be completely remove from the FIFO queue.
2497 *
2498 * @param fifo Address of the FIFO queue.
2499 *
2500 * @return Head element, or NULL if the FIFO queue is empty.
2501 */
2502 #define k_fifo_peek_head(fifo) \
2503 ({ \
2504 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, peek_head, fifo); \
2505 void *ret = k_queue_peek_head(&(fifo)->_queue); \
2506 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, peek_head, fifo, ret); \
2507 ret; \
2508 })
2509
2510 /**
2511 * @brief Peek element at the tail of FIFO queue.
2512 *
2513 * Return element from the tail of FIFO queue (without removing it). A usecase
2514 * for this is if elements of the FIFO queue are themselves containers. Then
2515 * it may be useful to add more data to the last container in a FIFO queue.
2516 *
2517 * @param fifo Address of the FIFO queue.
2518 *
2519 * @return Tail element, or NULL if a FIFO queue is empty.
2520 */
2521 #define k_fifo_peek_tail(fifo) \
2522 ({ \
2523 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, peek_tail, fifo); \
2524 void *ret = k_queue_peek_tail(&(fifo)->_queue); \
2525 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, peek_tail, fifo, ret); \
2526 ret; \
2527 })
2528
2529 /**
2530 * @brief Statically define and initialize a FIFO queue.
2531 *
2532 * The FIFO queue can be accessed outside the module where it is defined using:
2533 *
2534 * @code extern struct k_fifo <name>; @endcode
2535 *
2536 * @param name Name of the FIFO queue.
2537 */
2538 #define K_FIFO_DEFINE(name) \
2539 STRUCT_SECTION_ITERABLE_ALTERNATE(k_queue, k_fifo, name) = \
2540 Z_FIFO_INITIALIZER(name)
2541
2542 /** @} */
2543
2544 struct k_lifo {
2545 struct k_queue _queue;
2546 };
2547
2548 /**
2549 * @cond INTERNAL_HIDDEN
2550 */
2551
2552 #define Z_LIFO_INITIALIZER(obj) \
2553 { \
2554 ._queue = Z_QUEUE_INITIALIZER(obj._queue) \
2555 }
2556
2557 /**
2558 * INTERNAL_HIDDEN @endcond
2559 */
2560
2561 /**
2562 * @defgroup lifo_apis LIFO APIs
2563 * @ingroup kernel_apis
2564 * @{
2565 */
2566
2567 /**
2568 * @brief Initialize a LIFO queue.
2569 *
2570 * This routine initializes a LIFO queue object, prior to its first use.
2571 *
2572 * @param lifo Address of the LIFO queue.
2573 */
2574 #define k_lifo_init(lifo) \
2575 ({ \
2576 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, init, lifo); \
2577 k_queue_init(&(lifo)->_queue); \
2578 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, init, lifo); \
2579 })
2580
2581 /**
2582 * @brief Add an element to a LIFO queue.
2583 *
2584 * This routine adds a data item to @a lifo. A LIFO queue data item must be
2585 * aligned on a word boundary, and the first word of the item is
2586 * reserved for the kernel's use.
2587 *
2588 * @funcprops \isr_ok
2589 *
2590 * @param lifo Address of the LIFO queue.
2591 * @param data Address of the data item.
2592 */
2593 #define k_lifo_put(lifo, data) \
2594 ({ \
2595 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, put, lifo, data); \
2596 k_queue_prepend(&(lifo)->_queue, data); \
2597 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, put, lifo, data); \
2598 })
2599
2600 /**
2601 * @brief Add an element to a LIFO queue.
2602 *
2603 * This routine adds a data item to @a lifo. There is an implicit memory
2604 * allocation to create an additional temporary bookkeeping data structure from
2605 * the calling thread's resource pool, which is automatically freed when the
2606 * item is removed. The data itself is not copied.
2607 *
2608 * @funcprops \isr_ok
2609 *
2610 * @param lifo Address of the LIFO.
2611 * @param data Address of the data item.
2612 *
2613 * @retval 0 on success
2614 * @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
2615 */
2616 #define k_lifo_alloc_put(lifo, data) \
2617 ({ \
2618 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, alloc_put, lifo, data); \
2619 int ret = k_queue_alloc_prepend(&(lifo)->_queue, data); \
2620 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, alloc_put, lifo, data, ret); \
2621 ret; \
2622 })
2623
2624 /**
2625 * @brief Get an element from a LIFO queue.
2626 *
2627 * This routine removes a data item from @a LIFO in a "last in, first out"
2628 * manner. The first word of the data item is reserved for the kernel's use.
2629 *
2630 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
2631 *
2632 * @funcprops \isr_ok
2633 *
2634 * @param lifo Address of the LIFO queue.
2635 * @param timeout Waiting period to obtain a data item,
2636 * or one of the special values K_NO_WAIT and K_FOREVER.
2637 *
2638 * @return Address of the data item if successful; NULL if returned
2639 * without waiting, or waiting period timed out.
2640 */
2641 #define k_lifo_get(lifo, timeout) \
2642 ({ \
2643 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, get, lifo, timeout); \
2644 void *ret = k_queue_get(&(lifo)->_queue, timeout); \
2645 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, get, lifo, timeout, ret); \
2646 ret; \
2647 })
2648
2649 /**
2650 * @brief Statically define and initialize a LIFO queue.
2651 *
2652 * The LIFO queue can be accessed outside the module where it is defined using:
2653 *
2654 * @code extern struct k_lifo <name>; @endcode
2655 *
2656 * @param name Name of the fifo.
2657 */
2658 #define K_LIFO_DEFINE(name) \
2659 STRUCT_SECTION_ITERABLE_ALTERNATE(k_queue, k_lifo, name) = \
2660 Z_LIFO_INITIALIZER(name)
2661
2662 /** @} */
2663
2664 /**
2665 * @cond INTERNAL_HIDDEN
2666 */
2667 #define K_STACK_FLAG_ALLOC ((uint8_t)1) /* Buffer was allocated */
2668
2669 typedef uintptr_t stack_data_t;
2670
2671 struct k_stack {
2672 _wait_q_t wait_q;
2673 struct k_spinlock lock;
2674 stack_data_t *base, *next, *top;
2675
2676 uint8_t flags;
2677
2678 SYS_PORT_TRACING_TRACKING_FIELD(k_stack)
2679 };
2680
2681 #define Z_STACK_INITIALIZER(obj, stack_buffer, stack_num_entries) \
2682 { \
2683 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
2684 .base = stack_buffer, \
2685 .next = stack_buffer, \
2686 .top = stack_buffer + stack_num_entries, \
2687 }
2688
2689 /**
2690 * INTERNAL_HIDDEN @endcond
2691 */
2692
2693 /**
2694 * @defgroup stack_apis Stack APIs
2695 * @ingroup kernel_apis
2696 * @{
2697 */
2698
2699 /**
2700 * @brief Initialize a stack.
2701 *
2702 * This routine initializes a stack object, prior to its first use.
2703 *
2704 * @param stack Address of the stack.
2705 * @param buffer Address of array used to hold stacked values.
2706 * @param num_entries Maximum number of values that can be stacked.
2707 */
2708 void k_stack_init(struct k_stack *stack,
2709 stack_data_t *buffer, uint32_t num_entries);
2710
2711
2712 /**
2713 * @brief Initialize a stack.
2714 *
2715 * This routine initializes a stack object, prior to its first use. Internal
2716 * buffers will be allocated from the calling thread's resource pool.
2717 * This memory will be released if k_stack_cleanup() is called, or
2718 * userspace is enabled and the stack object loses all references to it.
2719 *
2720 * @param stack Address of the stack.
2721 * @param num_entries Maximum number of values that can be stacked.
2722 *
2723 * @return -ENOMEM if memory couldn't be allocated
2724 */
2725
2726 __syscall int32_t k_stack_alloc_init(struct k_stack *stack,
2727 uint32_t num_entries);
2728
2729 /**
2730 * @brief Release a stack's allocated buffer
2731 *
2732 * If a stack object was given a dynamically allocated buffer via
2733 * k_stack_alloc_init(), this will free it. This function does nothing
2734 * if the buffer wasn't dynamically allocated.
2735 *
2736 * @param stack Address of the stack.
2737 * @retval 0 on success
2738 * @retval -EAGAIN when object is still in use
2739 */
2740 int k_stack_cleanup(struct k_stack *stack);
2741
2742 /**
2743 * @brief Push an element onto a stack.
2744 *
2745 * This routine adds a stack_data_t value @a data to @a stack.
2746 *
2747 * @funcprops \isr_ok
2748 *
2749 * @param stack Address of the stack.
2750 * @param data Value to push onto the stack.
2751 *
2752 * @retval 0 on success
2753 * @retval -ENOMEM if stack is full
2754 */
2755 __syscall int k_stack_push(struct k_stack *stack, stack_data_t data);
2756
2757 /**
2758 * @brief Pop an element from a stack.
2759 *
2760 * This routine removes a stack_data_t value from @a stack in a "last in,
2761 * first out" manner and stores the value in @a data.
2762 *
2763 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
2764 *
2765 * @funcprops \isr_ok
2766 *
2767 * @param stack Address of the stack.
2768 * @param data Address of area to hold the value popped from the stack.
2769 * @param timeout Waiting period to obtain a value,
2770 * or one of the special values K_NO_WAIT and
2771 * K_FOREVER.
2772 *
2773 * @retval 0 Element popped from stack.
2774 * @retval -EBUSY Returned without waiting.
2775 * @retval -EAGAIN Waiting period timed out.
2776 */
2777 __syscall int k_stack_pop(struct k_stack *stack, stack_data_t *data,
2778 k_timeout_t timeout);
2779
2780 /**
2781 * @brief Statically define and initialize a stack
2782 *
2783 * The stack can be accessed outside the module where it is defined using:
2784 *
2785 * @code extern struct k_stack <name>; @endcode
2786 *
2787 * @param name Name of the stack.
2788 * @param stack_num_entries Maximum number of values that can be stacked.
2789 */
2790 #define K_STACK_DEFINE(name, stack_num_entries) \
2791 stack_data_t __noinit \
2792 _k_stack_buf_##name[stack_num_entries]; \
2793 STRUCT_SECTION_ITERABLE(k_stack, name) = \
2794 Z_STACK_INITIALIZER(name, _k_stack_buf_##name, \
2795 stack_num_entries)
2796
2797 /** @} */
2798
2799 /**
2800 * @cond INTERNAL_HIDDEN
2801 */
2802
2803 struct k_work;
2804 struct k_work_q;
2805 struct k_work_queue_config;
2806 extern struct k_work_q k_sys_work_q;
2807
2808 /**
2809 * INTERNAL_HIDDEN @endcond
2810 */
2811
2812 /**
2813 * @defgroup mutex_apis Mutex APIs
2814 * @ingroup kernel_apis
2815 * @{
2816 */
2817
2818 /**
2819 * Mutex Structure
2820 * @ingroup mutex_apis
2821 */
2822 struct k_mutex {
2823 /** Mutex wait queue */
2824 _wait_q_t wait_q;
2825 /** Mutex owner */
2826 struct k_thread *owner;
2827
2828 /** Current lock count */
2829 uint32_t lock_count;
2830
2831 /** Original thread priority */
2832 int owner_orig_prio;
2833
2834 SYS_PORT_TRACING_TRACKING_FIELD(k_mutex)
2835 };
2836
2837 /**
2838 * @cond INTERNAL_HIDDEN
2839 */
2840 #define Z_MUTEX_INITIALIZER(obj) \
2841 { \
2842 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
2843 .owner = NULL, \
2844 .lock_count = 0, \
2845 .owner_orig_prio = K_LOWEST_APPLICATION_THREAD_PRIO, \
2846 }
2847
2848 /**
2849 * INTERNAL_HIDDEN @endcond
2850 */
2851
2852 /**
2853 * @brief Statically define and initialize a mutex.
2854 *
2855 * The mutex can be accessed outside the module where it is defined using:
2856 *
2857 * @code extern struct k_mutex <name>; @endcode
2858 *
2859 * @param name Name of the mutex.
2860 */
2861 #define K_MUTEX_DEFINE(name) \
2862 STRUCT_SECTION_ITERABLE(k_mutex, name) = \
2863 Z_MUTEX_INITIALIZER(name)
2864
2865 /**
2866 * @brief Initialize a mutex.
2867 *
2868 * This routine initializes a mutex object, prior to its first use.
2869 *
2870 * Upon completion, the mutex is available and does not have an owner.
2871 *
2872 * @param mutex Address of the mutex.
2873 *
2874 * @retval 0 Mutex object created
2875 *
2876 */
2877 __syscall int k_mutex_init(struct k_mutex *mutex);
2878
2879
2880 /**
2881 * @brief Lock a mutex.
2882 *
2883 * This routine locks @a mutex. If the mutex is locked by another thread,
2884 * the calling thread waits until the mutex becomes available or until
2885 * a timeout occurs.
2886 *
2887 * A thread is permitted to lock a mutex it has already locked. The operation
2888 * completes immediately and the lock count is increased by 1.
2889 *
2890 * Mutexes may not be locked in ISRs.
2891 *
2892 * @param mutex Address of the mutex.
2893 * @param timeout Waiting period to lock the mutex,
2894 * or one of the special values K_NO_WAIT and
2895 * K_FOREVER.
2896 *
2897 * @retval 0 Mutex locked.
2898 * @retval -EBUSY Returned without waiting.
2899 * @retval -EAGAIN Waiting period timed out.
2900 */
2901 __syscall int k_mutex_lock(struct k_mutex *mutex, k_timeout_t timeout);
2902
2903 /**
2904 * @brief Unlock a mutex.
2905 *
2906 * This routine unlocks @a mutex. The mutex must already be locked by the
2907 * calling thread.
2908 *
2909 * The mutex cannot be claimed by another thread until it has been unlocked by
2910 * the calling thread as many times as it was previously locked by that
2911 * thread.
2912 *
2913 * Mutexes may not be unlocked in ISRs, as mutexes must only be manipulated
2914 * in thread context due to ownership and priority inheritance semantics.
2915 *
2916 * @param mutex Address of the mutex.
2917 *
2918 * @retval 0 Mutex unlocked.
2919 * @retval -EPERM The current thread does not own the mutex
2920 * @retval -EINVAL The mutex is not locked
2921 *
2922 */
2923 __syscall int k_mutex_unlock(struct k_mutex *mutex);
2924
2925 /**
2926 * @}
2927 */
2928
2929
2930 struct k_condvar {
2931 _wait_q_t wait_q;
2932 };
2933
2934 #define Z_CONDVAR_INITIALIZER(obj) \
2935 { \
2936 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
2937 }
2938
2939 /**
2940 * @defgroup condvar_apis Condition Variables APIs
2941 * @ingroup kernel_apis
2942 * @{
2943 */
2944
2945 /**
2946 * @brief Initialize a condition variable
2947 *
2948 * @param condvar pointer to a @p k_condvar structure
2949 * @retval 0 Condition variable created successfully
2950 */
2951 __syscall int k_condvar_init(struct k_condvar *condvar);
2952
2953 /**
2954 * @brief Signals one thread that is pending on the condition variable
2955 *
2956 * @param condvar pointer to a @p k_condvar structure
2957 * @retval 0 On success
2958 */
2959 __syscall int k_condvar_signal(struct k_condvar *condvar);
2960
2961 /**
2962 * @brief Unblock all threads that are pending on the condition
2963 * variable
2964 *
2965 * @param condvar pointer to a @p k_condvar structure
2966 * @return An integer with number of woken threads on success
2967 */
2968 __syscall int k_condvar_broadcast(struct k_condvar *condvar);
2969
2970 /**
2971 * @brief Waits on the condition variable releasing the mutex lock
2972 *
2973 * Atomically releases the currently owned mutex, blocks the current thread
2974 * waiting on the condition variable specified by @a condvar,
2975 * and finally acquires the mutex again.
2976 *
2977 * The waiting thread unblocks only after another thread calls
2978 * k_condvar_signal, or k_condvar_broadcast with the same condition variable.
2979 *
2980 * @param condvar pointer to a @p k_condvar structure
2981 * @param mutex Address of the mutex.
2982 * @param timeout Waiting period for the condition variable
2983 * or one of the special values K_NO_WAIT and K_FOREVER.
2984 * @retval 0 On success
2985 * @retval -EAGAIN Waiting period timed out.
2986 */
2987 __syscall int k_condvar_wait(struct k_condvar *condvar, struct k_mutex *mutex,
2988 k_timeout_t timeout);
2989
2990 /**
2991 * @brief Statically define and initialize a condition variable.
2992 *
2993 * The condition variable can be accessed outside the module where it is
2994 * defined using:
2995 *
2996 * @code extern struct k_condvar <name>; @endcode
2997 *
2998 * @param name Name of the condition variable.
2999 */
3000 #define K_CONDVAR_DEFINE(name) \
3001 STRUCT_SECTION_ITERABLE(k_condvar, name) = \
3002 Z_CONDVAR_INITIALIZER(name)
3003 /**
3004 * @}
3005 */
3006
3007 /**
3008 * @cond INTERNAL_HIDDEN
3009 */
3010
3011 struct k_sem {
3012 _wait_q_t wait_q;
3013 unsigned int count;
3014 unsigned int limit;
3015
3016 _POLL_EVENT;
3017
3018 SYS_PORT_TRACING_TRACKING_FIELD(k_sem)
3019
3020 };
3021
3022 #define Z_SEM_INITIALIZER(obj, initial_count, count_limit) \
3023 { \
3024 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
3025 .count = initial_count, \
3026 .limit = count_limit, \
3027 _POLL_EVENT_OBJ_INIT(obj) \
3028 }
3029
3030 /**
3031 * INTERNAL_HIDDEN @endcond
3032 */
3033
3034 /**
3035 * @defgroup semaphore_apis Semaphore APIs
3036 * @ingroup kernel_apis
3037 * @{
3038 */
3039
3040 /**
3041 * @brief Maximum limit value allowed for a semaphore.
3042 *
3043 * This is intended for use when a semaphore does not have
3044 * an explicit maximum limit, and instead is just used for
3045 * counting purposes.
3046 *
3047 */
3048 #define K_SEM_MAX_LIMIT UINT_MAX
3049
3050 /**
3051 * @brief Initialize a semaphore.
3052 *
3053 * This routine initializes a semaphore object, prior to its first use.
3054 *
3055 * @param sem Address of the semaphore.
3056 * @param initial_count Initial semaphore count.
3057 * @param limit Maximum permitted semaphore count.
3058 *
3059 * @see K_SEM_MAX_LIMIT
3060 *
3061 * @retval 0 Semaphore created successfully
3062 * @retval -EINVAL Invalid values
3063 *
3064 */
3065 __syscall int k_sem_init(struct k_sem *sem, unsigned int initial_count,
3066 unsigned int limit);
3067
3068 /**
3069 * @brief Take a semaphore.
3070 *
3071 * This routine takes @a sem.
3072 *
3073 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
3074 *
3075 * @funcprops \isr_ok
3076 *
3077 * @param sem Address of the semaphore.
3078 * @param timeout Waiting period to take the semaphore,
3079 * or one of the special values K_NO_WAIT and K_FOREVER.
3080 *
3081 * @retval 0 Semaphore taken.
3082 * @retval -EBUSY Returned without waiting.
3083 * @retval -EAGAIN Waiting period timed out,
3084 * or the semaphore was reset during the waiting period.
3085 */
3086 __syscall int k_sem_take(struct k_sem *sem, k_timeout_t timeout);
3087
3088 /**
3089 * @brief Give a semaphore.
3090 *
3091 * This routine gives @a sem, unless the semaphore is already at its maximum
3092 * permitted count.
3093 *
3094 * @funcprops \isr_ok
3095 *
3096 * @param sem Address of the semaphore.
3097 */
3098 __syscall void k_sem_give(struct k_sem *sem);
3099
3100 /**
3101 * @brief Resets a semaphore's count to zero.
3102 *
3103 * This routine sets the count of @a sem to zero.
3104 * Any outstanding semaphore takes will be aborted
3105 * with -EAGAIN.
3106 *
3107 * @param sem Address of the semaphore.
3108 */
3109 __syscall void k_sem_reset(struct k_sem *sem);
3110
3111 /**
3112 * @brief Get a semaphore's count.
3113 *
3114 * This routine returns the current count of @a sem.
3115 *
3116 * @param sem Address of the semaphore.
3117 *
3118 * @return Current semaphore count.
3119 */
3120 __syscall unsigned int k_sem_count_get(struct k_sem *sem);
3121
3122 /**
3123 * @internal
3124 */
z_impl_k_sem_count_get(struct k_sem * sem)3125 static inline unsigned int z_impl_k_sem_count_get(struct k_sem *sem)
3126 {
3127 return sem->count;
3128 }
3129
3130 /**
3131 * @brief Statically define and initialize a semaphore.
3132 *
3133 * The semaphore can be accessed outside the module where it is defined using:
3134 *
3135 * @code extern struct k_sem <name>; @endcode
3136 *
3137 * @param name Name of the semaphore.
3138 * @param initial_count Initial semaphore count.
3139 * @param count_limit Maximum permitted semaphore count.
3140 */
3141 #define K_SEM_DEFINE(name, initial_count, count_limit) \
3142 STRUCT_SECTION_ITERABLE(k_sem, name) = \
3143 Z_SEM_INITIALIZER(name, initial_count, count_limit); \
3144 BUILD_ASSERT(((count_limit) != 0) && \
3145 ((initial_count) <= (count_limit)) && \
3146 ((count_limit) <= K_SEM_MAX_LIMIT));
3147
3148 /** @} */
3149
3150 /**
3151 * @cond INTERNAL_HIDDEN
3152 */
3153
3154 struct k_work_delayable;
3155 struct k_work_sync;
3156
3157 /**
3158 * INTERNAL_HIDDEN @endcond
3159 */
3160
3161 /**
3162 * @defgroup workqueue_apis Work Queue APIs
3163 * @ingroup kernel_apis
3164 * @{
3165 */
3166
3167 /** @brief The signature for a work item handler function.
3168 *
3169 * The function will be invoked by the thread animating a work queue.
3170 *
3171 * @param work the work item that provided the handler.
3172 */
3173 typedef void (*k_work_handler_t)(struct k_work *work);
3174
3175 /** @brief Initialize a (non-delayable) work structure.
3176 *
3177 * This must be invoked before submitting a work structure for the first time.
3178 * It need not be invoked again on the same work structure. It can be
3179 * re-invoked to change the associated handler, but this must be done when the
3180 * work item is idle.
3181 *
3182 * @funcprops \isr_ok
3183 *
3184 * @param work the work structure to be initialized.
3185 *
3186 * @param handler the handler to be invoked by the work item.
3187 */
3188 void k_work_init(struct k_work *work,
3189 k_work_handler_t handler);
3190
3191 /** @brief Busy state flags from the work item.
3192 *
3193 * A zero return value indicates the work item appears to be idle.
3194 *
3195 * @note This is a live snapshot of state, which may change before the result
3196 * is checked. Use locks where appropriate.
3197 *
3198 * @funcprops \isr_ok
3199 *
3200 * @param work pointer to the work item.
3201 *
3202 * @return a mask of flags K_WORK_DELAYED, K_WORK_QUEUED,
3203 * K_WORK_RUNNING, and K_WORK_CANCELING.
3204 */
3205 int k_work_busy_get(const struct k_work *work);
3206
3207 /** @brief Test whether a work item is currently pending.
3208 *
3209 * Wrapper to determine whether a work item is in a non-idle dstate.
3210 *
3211 * @note This is a live snapshot of state, which may change before the result
3212 * is checked. Use locks where appropriate.
3213 *
3214 * @funcprops \isr_ok
3215 *
3216 * @param work pointer to the work item.
3217 *
3218 * @return true if and only if k_work_busy_get() returns a non-zero value.
3219 */
3220 static inline bool k_work_is_pending(const struct k_work *work);
3221
3222 /** @brief Submit a work item to a queue.
3223 *
3224 * @param queue pointer to the work queue on which the item should run. If
3225 * NULL the queue from the most recent submission will be used.
3226 *
3227 * @funcprops \isr_ok
3228 *
3229 * @param work pointer to the work item.
3230 *
3231 * @retval 0 if work was already submitted to a queue
3232 * @retval 1 if work was not submitted and has been queued to @p queue
3233 * @retval 2 if work was running and has been queued to the queue that was
3234 * running it
3235 * @retval -EBUSY
3236 * * if work submission was rejected because the work item is cancelling; or
3237 * * @p queue is draining; or
3238 * * @p queue is plugged.
3239 * @retval -EINVAL if @p queue is null and the work item has never been run.
3240 * @retval -ENODEV if @p queue has not been started.
3241 */
3242 int k_work_submit_to_queue(struct k_work_q *queue,
3243 struct k_work *work);
3244
3245 /** @brief Submit a work item to the system queue.
3246 *
3247 * @funcprops \isr_ok
3248 *
3249 * @param work pointer to the work item.
3250 *
3251 * @return as with k_work_submit_to_queue().
3252 */
3253 extern int k_work_submit(struct k_work *work);
3254
3255 /** @brief Wait for last-submitted instance to complete.
3256 *
3257 * Resubmissions may occur while waiting, including chained submissions (from
3258 * within the handler).
3259 *
3260 * @note Be careful of caller and work queue thread relative priority. If
3261 * this function sleeps it will not return until the work queue thread
3262 * completes the tasks that allow this thread to resume.
3263 *
3264 * @note Behavior is undefined if this function is invoked on @p work from a
3265 * work queue running @p work.
3266 *
3267 * @param work pointer to the work item.
3268 *
3269 * @param sync pointer to an opaque item containing state related to the
3270 * pending cancellation. The object must persist until the call returns, and
3271 * be accessible from both the caller thread and the work queue thread. The
3272 * object must not be used for any other flush or cancel operation until this
3273 * one completes. On architectures with CONFIG_KERNEL_COHERENCE the object
3274 * must be allocated in coherent memory.
3275 *
3276 * @retval true if call had to wait for completion
3277 * @retval false if work was already idle
3278 */
3279 bool k_work_flush(struct k_work *work,
3280 struct k_work_sync *sync);
3281
3282 /** @brief Cancel a work item.
3283 *
3284 * This attempts to prevent a pending (non-delayable) work item from being
3285 * processed by removing it from the work queue. If the item is being
3286 * processed, the work item will continue to be processed, but resubmissions
3287 * are rejected until cancellation completes.
3288 *
3289 * If this returns zero cancellation is complete, otherwise something
3290 * (probably a work queue thread) is still referencing the item.
3291 *
3292 * See also k_work_cancel_sync().
3293 *
3294 * @funcprops \isr_ok
3295 *
3296 * @param work pointer to the work item.
3297 *
3298 * @return the k_work_busy_get() status indicating the state of the item after all
3299 * cancellation steps performed by this call are completed.
3300 */
3301 int k_work_cancel(struct k_work *work);
3302
3303 /** @brief Cancel a work item and wait for it to complete.
3304 *
3305 * Same as k_work_cancel() but does not return until cancellation is complete.
3306 * This can be invoked by a thread after k_work_cancel() to synchronize with a
3307 * previous cancellation.
3308 *
3309 * On return the work structure will be idle unless something submits it after
3310 * the cancellation was complete.
3311 *
3312 * @note Be careful of caller and work queue thread relative priority. If
3313 * this function sleeps it will not return until the work queue thread
3314 * completes the tasks that allow this thread to resume.
3315 *
3316 * @note Behavior is undefined if this function is invoked on @p work from a
3317 * work queue running @p work.
3318 *
3319 * @param work pointer to the work item.
3320 *
3321 * @param sync pointer to an opaque item containing state related to the
3322 * pending cancellation. The object must persist until the call returns, and
3323 * be accessible from both the caller thread and the work queue thread. The
3324 * object must not be used for any other flush or cancel operation until this
3325 * one completes. On architectures with CONFIG_KERNEL_COHERENCE the object
3326 * must be allocated in coherent memory.
3327 *
3328 * @retval true if work was pending (call had to wait for cancellation of a
3329 * running handler to complete, or scheduled or submitted operations were
3330 * cancelled);
3331 * @retval false otherwise
3332 */
3333 bool k_work_cancel_sync(struct k_work *work, struct k_work_sync *sync);
3334
3335 /** @brief Initialize a work queue structure.
3336 *
3337 * This must be invoked before starting a work queue structure for the first time.
3338 * It need not be invoked again on the same work queue structure.
3339 *
3340 * @funcprops \isr_ok
3341 *
3342 * @param queue the queue structure to be initialized.
3343 */
3344 void k_work_queue_init(struct k_work_q *queue);
3345
3346 /** @brief Initialize a work queue.
3347 *
3348 * This configures the work queue thread and starts it running. The function
3349 * should not be re-invoked on a queue.
3350 *
3351 * @param queue pointer to the queue structure. It must be initialized
3352 * in zeroed/bss memory or with @ref k_work_queue_init before
3353 * use.
3354 *
3355 * @param stack pointer to the work thread stack area.
3356 *
3357 * @param stack_size size of the the work thread stack area, in bytes.
3358 *
3359 * @param prio initial thread priority
3360 *
3361 * @param cfg optional additional configuration parameters. Pass @c
3362 * NULL if not required, to use the defaults documented in
3363 * k_work_queue_config.
3364 */
3365 void k_work_queue_start(struct k_work_q *queue,
3366 k_thread_stack_t *stack, size_t stack_size,
3367 int prio, const struct k_work_queue_config *cfg);
3368
3369 /** @brief Access the thread that animates a work queue.
3370 *
3371 * This is necessary to grant a work queue thread access to things the work
3372 * items it will process are expected to use.
3373 *
3374 * @param queue pointer to the queue structure.
3375 *
3376 * @return the thread associated with the work queue.
3377 */
3378 static inline k_tid_t k_work_queue_thread_get(struct k_work_q *queue);
3379
3380 /** @brief Wait until the work queue has drained, optionally plugging it.
3381 *
3382 * This blocks submission to the work queue except when coming from queue
3383 * thread, and blocks the caller until no more work items are available in the
3384 * queue.
3385 *
3386 * If @p plug is true then submission will continue to be blocked after the
3387 * drain operation completes until k_work_queue_unplug() is invoked.
3388 *
3389 * Note that work items that are delayed are not yet associated with their
3390 * work queue. They must be cancelled externally if a goal is to ensure the
3391 * work queue remains empty. The @p plug feature can be used to prevent
3392 * delayed items from being submitted after the drain completes.
3393 *
3394 * @param queue pointer to the queue structure.
3395 *
3396 * @param plug if true the work queue will continue to block new submissions
3397 * after all items have drained.
3398 *
3399 * @retval 1 if call had to wait for the drain to complete
3400 * @retval 0 if call did not have to wait
3401 * @retval negative if wait was interrupted or failed
3402 */
3403 int k_work_queue_drain(struct k_work_q *queue, bool plug);
3404
3405 /** @brief Release a work queue to accept new submissions.
3406 *
3407 * This releases the block on new submissions placed when k_work_queue_drain()
3408 * is invoked with the @p plug option enabled. If this is invoked before the
3409 * drain completes new items may be submitted as soon as the drain completes.
3410 *
3411 * @funcprops \isr_ok
3412 *
3413 * @param queue pointer to the queue structure.
3414 *
3415 * @retval 0 if successfully unplugged
3416 * @retval -EALREADY if the work queue was not plugged.
3417 */
3418 int k_work_queue_unplug(struct k_work_q *queue);
3419
3420 /** @brief Initialize a delayable work structure.
3421 *
3422 * This must be invoked before scheduling a delayable work structure for the
3423 * first time. It need not be invoked again on the same work structure. It
3424 * can be re-invoked to change the associated handler, but this must be done
3425 * when the work item is idle.
3426 *
3427 * @funcprops \isr_ok
3428 *
3429 * @param dwork the delayable work structure to be initialized.
3430 *
3431 * @param handler the handler to be invoked by the work item.
3432 */
3433 void k_work_init_delayable(struct k_work_delayable *dwork,
3434 k_work_handler_t handler);
3435
3436 /**
3437 * @brief Get the parent delayable work structure from a work pointer.
3438 *
3439 * This function is necessary when a @c k_work_handler_t function is passed to
3440 * k_work_schedule_for_queue() and the handler needs to access data from the
3441 * container of the containing `k_work_delayable`.
3442 *
3443 * @param work Address passed to the work handler
3444 *
3445 * @return Address of the containing @c k_work_delayable structure.
3446 */
3447 static inline struct k_work_delayable *
3448 k_work_delayable_from_work(struct k_work *work);
3449
3450 /** @brief Busy state flags from the delayable work item.
3451 *
3452 * @funcprops \isr_ok
3453 *
3454 * @note This is a live snapshot of state, which may change before the result
3455 * can be inspected. Use locks where appropriate.
3456 *
3457 * @param dwork pointer to the delayable work item.
3458 *
3459 * @return a mask of flags K_WORK_DELAYED, K_WORK_QUEUED, K_WORK_RUNNING, and
3460 * K_WORK_CANCELING. A zero return value indicates the work item appears to
3461 * be idle.
3462 */
3463 int k_work_delayable_busy_get(const struct k_work_delayable *dwork);
3464
3465 /** @brief Test whether a delayed work item is currently pending.
3466 *
3467 * Wrapper to determine whether a delayed work item is in a non-idle state.
3468 *
3469 * @note This is a live snapshot of state, which may change before the result
3470 * can be inspected. Use locks where appropriate.
3471 *
3472 * @funcprops \isr_ok
3473 *
3474 * @param dwork pointer to the delayable work item.
3475 *
3476 * @return true if and only if k_work_delayable_busy_get() returns a non-zero
3477 * value.
3478 */
3479 static inline bool k_work_delayable_is_pending(
3480 const struct k_work_delayable *dwork);
3481
3482 /** @brief Get the absolute tick count at which a scheduled delayable work
3483 * will be submitted.
3484 *
3485 * @note This is a live snapshot of state, which may change before the result
3486 * can be inspected. Use locks where appropriate.
3487 *
3488 * @funcprops \isr_ok
3489 *
3490 * @param dwork pointer to the delayable work item.
3491 *
3492 * @return the tick count when the timer that will schedule the work item will
3493 * expire, or the current tick count if the work is not scheduled.
3494 */
3495 static inline k_ticks_t k_work_delayable_expires_get(
3496 const struct k_work_delayable *dwork);
3497
3498 /** @brief Get the number of ticks until a scheduled delayable work will be
3499 * submitted.
3500 *
3501 * @note This is a live snapshot of state, which may change before the result
3502 * can be inspected. Use locks where appropriate.
3503 *
3504 * @funcprops \isr_ok
3505 *
3506 * @param dwork pointer to the delayable work item.
3507 *
3508 * @return the number of ticks until the timer that will schedule the work
3509 * item will expire, or zero if the item is not scheduled.
3510 */
3511 static inline k_ticks_t k_work_delayable_remaining_get(
3512 const struct k_work_delayable *dwork);
3513
3514 /** @brief Submit an idle work item to a queue after a delay.
3515 *
3516 * Unlike k_work_reschedule_for_queue() this is a no-op if the work item is
3517 * already scheduled or submitted, even if @p delay is @c K_NO_WAIT.
3518 *
3519 * @funcprops \isr_ok
3520 *
3521 * @param queue the queue on which the work item should be submitted after the
3522 * delay.
3523 *
3524 * @param dwork pointer to the delayable work item.
3525 *
3526 * @param delay the time to wait before submitting the work item. If @c
3527 * K_NO_WAIT and the work is not pending this is equivalent to
3528 * k_work_submit_to_queue().
3529 *
3530 * @retval 0 if work was already scheduled or submitted.
3531 * @retval 1 if work has been scheduled.
3532 * @retval -EBUSY if @p delay is @c K_NO_WAIT and
3533 * k_work_submit_to_queue() fails with this code.
3534 * @retval -EINVAL if @p delay is @c K_NO_WAIT and
3535 * k_work_submit_to_queue() fails with this code.
3536 * @retval -ENODEV if @p delay is @c K_NO_WAIT and
3537 * k_work_submit_to_queue() fails with this code.
3538 */
3539 int k_work_schedule_for_queue(struct k_work_q *queue,
3540 struct k_work_delayable *dwork,
3541 k_timeout_t delay);
3542
3543 /** @brief Submit an idle work item to the system work queue after a
3544 * delay.
3545 *
3546 * This is a thin wrapper around k_work_schedule_for_queue(), with all the API
3547 * characteristics of that function.
3548 *
3549 * @param dwork pointer to the delayable work item.
3550 *
3551 * @param delay the time to wait before submitting the work item. If @c
3552 * K_NO_WAIT this is equivalent to k_work_submit_to_queue().
3553 *
3554 * @return as with k_work_schedule_for_queue().
3555 */
3556 extern int k_work_schedule(struct k_work_delayable *dwork,
3557 k_timeout_t delay);
3558
3559 /** @brief Reschedule a work item to a queue after a delay.
3560 *
3561 * Unlike k_work_schedule_for_queue() this function can change the deadline of
3562 * a scheduled work item, and will schedule a work item that is in any state
3563 * (e.g. is idle, submitted, or running). This function does not affect
3564 * ("unsubmit") a work item that has been submitted to a queue.
3565 *
3566 * @funcprops \isr_ok
3567 *
3568 * @param queue the queue on which the work item should be submitted after the
3569 * delay.
3570 *
3571 * @param dwork pointer to the delayable work item.
3572 *
3573 * @param delay the time to wait before submitting the work item. If @c
3574 * K_NO_WAIT this is equivalent to k_work_submit_to_queue() after canceling
3575 * any previous scheduled submission.
3576 *
3577 * @note If delay is @c K_NO_WAIT ("no delay") the return values are as with
3578 * k_work_submit_to_queue().
3579 *
3580 * @retval 0 if delay is @c K_NO_WAIT and work was already on a queue
3581 * @retval 1 if
3582 * * delay is @c K_NO_WAIT and work was not submitted but has now been queued
3583 * to @p queue; or
3584 * * delay not @c K_NO_WAIT and work has been scheduled
3585 * @retval 2 if delay is @c K_NO_WAIT and work was running and has been queued
3586 * to the queue that was running it
3587 * @retval -EBUSY if @p delay is @c K_NO_WAIT and
3588 * k_work_submit_to_queue() fails with this code.
3589 * @retval -EINVAL if @p delay is @c K_NO_WAIT and
3590 * k_work_submit_to_queue() fails with this code.
3591 * @retval -ENODEV if @p delay is @c K_NO_WAIT and
3592 * k_work_submit_to_queue() fails with this code.
3593 */
3594 int k_work_reschedule_for_queue(struct k_work_q *queue,
3595 struct k_work_delayable *dwork,
3596 k_timeout_t delay);
3597
3598 /** @brief Reschedule a work item to the system work queue after a
3599 * delay.
3600 *
3601 * This is a thin wrapper around k_work_reschedule_for_queue(), with all the
3602 * API characteristics of that function.
3603 *
3604 * @param dwork pointer to the delayable work item.
3605 *
3606 * @param delay the time to wait before submitting the work item.
3607 *
3608 * @return as with k_work_reschedule_for_queue().
3609 */
3610 extern int k_work_reschedule(struct k_work_delayable *dwork,
3611 k_timeout_t delay);
3612
3613 /** @brief Flush delayable work.
3614 *
3615 * If the work is scheduled, it is immediately submitted. Then the caller
3616 * blocks until the work completes, as with k_work_flush().
3617 *
3618 * @note Be careful of caller and work queue thread relative priority. If
3619 * this function sleeps it will not return until the work queue thread
3620 * completes the tasks that allow this thread to resume.
3621 *
3622 * @note Behavior is undefined if this function is invoked on @p dwork from a
3623 * work queue running @p dwork.
3624 *
3625 * @param dwork pointer to the delayable work item.
3626 *
3627 * @param sync pointer to an opaque item containing state related to the
3628 * pending cancellation. The object must persist until the call returns, and
3629 * be accessible from both the caller thread and the work queue thread. The
3630 * object must not be used for any other flush or cancel operation until this
3631 * one completes. On architectures with CONFIG_KERNEL_COHERENCE the object
3632 * must be allocated in coherent memory.
3633 *
3634 * @retval true if call had to wait for completion
3635 * @retval false if work was already idle
3636 */
3637 bool k_work_flush_delayable(struct k_work_delayable *dwork,
3638 struct k_work_sync *sync);
3639
3640 /** @brief Cancel delayable work.
3641 *
3642 * Similar to k_work_cancel() but for delayable work. If the work is
3643 * scheduled or submitted it is canceled. This function does not wait for the
3644 * cancellation to complete.
3645 *
3646 * @note The work may still be running when this returns. Use
3647 * k_work_flush_delayable() or k_work_cancel_delayable_sync() to ensure it is
3648 * not running.
3649 *
3650 * @note Canceling delayable work does not prevent rescheduling it. It does
3651 * prevent submitting it until the cancellation completes.
3652 *
3653 * @funcprops \isr_ok
3654 *
3655 * @param dwork pointer to the delayable work item.
3656 *
3657 * @return the k_work_delayable_busy_get() status indicating the state of the
3658 * item after all cancellation steps performed by this call are completed.
3659 */
3660 int k_work_cancel_delayable(struct k_work_delayable *dwork);
3661
3662 /** @brief Cancel delayable work and wait.
3663 *
3664 * Like k_work_cancel_delayable() but waits until the work becomes idle.
3665 *
3666 * @note Canceling delayable work does not prevent rescheduling it. It does
3667 * prevent submitting it until the cancellation completes.
3668 *
3669 * @note Be careful of caller and work queue thread relative priority. If
3670 * this function sleeps it will not return until the work queue thread
3671 * completes the tasks that allow this thread to resume.
3672 *
3673 * @note Behavior is undefined if this function is invoked on @p dwork from a
3674 * work queue running @p dwork.
3675 *
3676 * @param dwork pointer to the delayable work item.
3677 *
3678 * @param sync pointer to an opaque item containing state related to the
3679 * pending cancellation. The object must persist until the call returns, and
3680 * be accessible from both the caller thread and the work queue thread. The
3681 * object must not be used for any other flush or cancel operation until this
3682 * one completes. On architectures with CONFIG_KERNEL_COHERENCE the object
3683 * must be allocated in coherent memory.
3684 *
3685 * @retval true if work was not idle (call had to wait for cancellation of a
3686 * running handler to complete, or scheduled or submitted operations were
3687 * cancelled);
3688 * @retval false otherwise
3689 */
3690 bool k_work_cancel_delayable_sync(struct k_work_delayable *dwork,
3691 struct k_work_sync *sync);
3692
3693 enum {
3694 /**
3695 * @cond INTERNAL_HIDDEN
3696 */
3697
3698 /* The atomic API is used for all work and queue flags fields to
3699 * enforce sequential consistency in SMP environments.
3700 */
3701
3702 /* Bits that represent the work item states. At least nine of the
3703 * combinations are distinct valid stable states.
3704 */
3705 K_WORK_RUNNING_BIT = 0,
3706 K_WORK_CANCELING_BIT = 1,
3707 K_WORK_QUEUED_BIT = 2,
3708 K_WORK_DELAYED_BIT = 3,
3709
3710 K_WORK_MASK = BIT(K_WORK_DELAYED_BIT) | BIT(K_WORK_QUEUED_BIT)
3711 | BIT(K_WORK_RUNNING_BIT) | BIT(K_WORK_CANCELING_BIT),
3712
3713 /* Static work flags */
3714 K_WORK_DELAYABLE_BIT = 8,
3715 K_WORK_DELAYABLE = BIT(K_WORK_DELAYABLE_BIT),
3716
3717 /* Dynamic work queue flags */
3718 K_WORK_QUEUE_STARTED_BIT = 0,
3719 K_WORK_QUEUE_STARTED = BIT(K_WORK_QUEUE_STARTED_BIT),
3720 K_WORK_QUEUE_BUSY_BIT = 1,
3721 K_WORK_QUEUE_BUSY = BIT(K_WORK_QUEUE_BUSY_BIT),
3722 K_WORK_QUEUE_DRAIN_BIT = 2,
3723 K_WORK_QUEUE_DRAIN = BIT(K_WORK_QUEUE_DRAIN_BIT),
3724 K_WORK_QUEUE_PLUGGED_BIT = 3,
3725 K_WORK_QUEUE_PLUGGED = BIT(K_WORK_QUEUE_PLUGGED_BIT),
3726
3727 /* Static work queue flags */
3728 K_WORK_QUEUE_NO_YIELD_BIT = 8,
3729 K_WORK_QUEUE_NO_YIELD = BIT(K_WORK_QUEUE_NO_YIELD_BIT),
3730
3731 /**
3732 * INTERNAL_HIDDEN @endcond
3733 */
3734 /* Transient work flags */
3735
3736 /** @brief Flag indicating a work item that is running under a work
3737 * queue thread.
3738 *
3739 * Accessed via k_work_busy_get(). May co-occur with other flags.
3740 */
3741 K_WORK_RUNNING = BIT(K_WORK_RUNNING_BIT),
3742
3743 /** @brief Flag indicating a work item that is being canceled.
3744 *
3745 * Accessed via k_work_busy_get(). May co-occur with other flags.
3746 */
3747 K_WORK_CANCELING = BIT(K_WORK_CANCELING_BIT),
3748
3749 /** @brief Flag indicating a work item that has been submitted to a
3750 * queue but has not started running.
3751 *
3752 * Accessed via k_work_busy_get(). May co-occur with other flags.
3753 */
3754 K_WORK_QUEUED = BIT(K_WORK_QUEUED_BIT),
3755
3756 /** @brief Flag indicating a delayed work item that is scheduled for
3757 * submission to a queue.
3758 *
3759 * Accessed via k_work_busy_get(). May co-occur with other flags.
3760 */
3761 K_WORK_DELAYED = BIT(K_WORK_DELAYED_BIT),
3762 };
3763
3764 /** @brief A structure used to submit work. */
3765 struct k_work {
3766 /* All fields are protected by the work module spinlock. No fields
3767 * are to be accessed except through kernel API.
3768 */
3769
3770 /* Node to link into k_work_q pending list. */
3771 sys_snode_t node;
3772
3773 /* The function to be invoked by the work queue thread. */
3774 k_work_handler_t handler;
3775
3776 /* The queue on which the work item was last submitted. */
3777 struct k_work_q *queue;
3778
3779 /* State of the work item.
3780 *
3781 * The item can be DELAYED, QUEUED, and RUNNING simultaneously.
3782 *
3783 * It can be RUNNING and CANCELING simultaneously.
3784 */
3785 uint32_t flags;
3786 };
3787
3788 #define Z_WORK_INITIALIZER(work_handler) { \
3789 .handler = work_handler, \
3790 }
3791
3792 /** @brief A structure used to submit work after a delay. */
3793 struct k_work_delayable {
3794 /* The work item. */
3795 struct k_work work;
3796
3797 /* Timeout used to submit work after a delay. */
3798 struct _timeout timeout;
3799
3800 /* The queue to which the work should be submitted. */
3801 struct k_work_q *queue;
3802 };
3803
3804 #define Z_WORK_DELAYABLE_INITIALIZER(work_handler) { \
3805 .work = { \
3806 .handler = work_handler, \
3807 .flags = K_WORK_DELAYABLE, \
3808 }, \
3809 }
3810
3811 /**
3812 * @brief Initialize a statically-defined delayable work item.
3813 *
3814 * This macro can be used to initialize a statically-defined delayable
3815 * work item, prior to its first use. For example,
3816 *
3817 * @code static K_WORK_DELAYABLE_DEFINE(<dwork>, <work_handler>); @endcode
3818 *
3819 * Note that if the runtime dependencies support initialization with
3820 * k_work_init_delayable() using that will eliminate the initialized
3821 * object in ROM that is produced by this macro and copied in at
3822 * system startup.
3823 *
3824 * @param work Symbol name for delayable work item object
3825 * @param work_handler Function to invoke each time work item is processed.
3826 */
3827 #define K_WORK_DELAYABLE_DEFINE(work, work_handler) \
3828 struct k_work_delayable work \
3829 = Z_WORK_DELAYABLE_INITIALIZER(work_handler)
3830
3831 /**
3832 * @cond INTERNAL_HIDDEN
3833 */
3834
3835 /* Record used to wait for work to flush.
3836 *
3837 * The work item is inserted into the queue that will process (or is
3838 * processing) the item, and will be processed as soon as the item
3839 * completes. When the flusher is processed the semaphore will be
3840 * signaled, releasing the thread waiting for the flush.
3841 */
3842 struct z_work_flusher {
3843 struct k_work work;
3844 struct k_sem sem;
3845 };
3846
3847 /* Record used to wait for work to complete a cancellation.
3848 *
3849 * The work item is inserted into a global queue of pending cancels.
3850 * When a cancelling work item goes idle any matching waiters are
3851 * removed from pending_cancels and are woken.
3852 */
3853 struct z_work_canceller {
3854 sys_snode_t node;
3855 struct k_work *work;
3856 struct k_sem sem;
3857 };
3858
3859 /**
3860 * INTERNAL_HIDDEN @endcond
3861 */
3862
3863 /** @brief A structure holding internal state for a pending synchronous
3864 * operation on a work item or queue.
3865 *
3866 * Instances of this type are provided by the caller for invocation of
3867 * k_work_flush(), k_work_cancel_sync() and sibling flush and cancel APIs. A
3868 * referenced object must persist until the call returns, and be accessible
3869 * from both the caller thread and the work queue thread.
3870 *
3871 * @note If CONFIG_KERNEL_COHERENCE is enabled the object must be allocated in
3872 * coherent memory; see arch_mem_coherent(). The stack on these architectures
3873 * is generally not coherent. be stack-allocated. Violations are detected by
3874 * runtime assertion.
3875 */
3876 struct k_work_sync {
3877 union {
3878 struct z_work_flusher flusher;
3879 struct z_work_canceller canceller;
3880 };
3881 };
3882
3883 /** @brief A structure holding optional configuration items for a work
3884 * queue.
3885 *
3886 * This structure, and values it references, are not retained by
3887 * k_work_queue_start().
3888 */
3889 struct k_work_queue_config {
3890 /** The name to be given to the work queue thread.
3891 *
3892 * If left null the thread will not have a name.
3893 */
3894 const char *name;
3895
3896 /** Control whether the work queue thread should yield between
3897 * items.
3898 *
3899 * Yielding between items helps guarantee the work queue
3900 * thread does not starve other threads, including cooperative
3901 * ones released by a work item. This is the default behavior.
3902 *
3903 * Set this to @c true to prevent the work queue thread from
3904 * yielding between items. This may be appropriate when a
3905 * sequence of items should complete without yielding
3906 * control.
3907 */
3908 bool no_yield;
3909 };
3910
3911 /** @brief A structure used to hold work until it can be processed. */
3912 struct k_work_q {
3913 /* The thread that animates the work. */
3914 struct k_thread thread;
3915
3916 /* All the following fields must be accessed only while the
3917 * work module spinlock is held.
3918 */
3919
3920 /* List of k_work items to be worked. */
3921 sys_slist_t pending;
3922
3923 /* Wait queue for idle work thread. */
3924 _wait_q_t notifyq;
3925
3926 /* Wait queue for threads waiting for the queue to drain. */
3927 _wait_q_t drainq;
3928
3929 /* Flags describing queue state. */
3930 uint32_t flags;
3931 };
3932
3933 /* Provide the implementation for inline functions declared above */
3934
k_work_is_pending(const struct k_work * work)3935 static inline bool k_work_is_pending(const struct k_work *work)
3936 {
3937 return k_work_busy_get(work) != 0;
3938 }
3939
3940 static inline struct k_work_delayable *
k_work_delayable_from_work(struct k_work * work)3941 k_work_delayable_from_work(struct k_work *work)
3942 {
3943 return CONTAINER_OF(work, struct k_work_delayable, work);
3944 }
3945
k_work_delayable_is_pending(const struct k_work_delayable * dwork)3946 static inline bool k_work_delayable_is_pending(
3947 const struct k_work_delayable *dwork)
3948 {
3949 return k_work_delayable_busy_get(dwork) != 0;
3950 }
3951
k_work_delayable_expires_get(const struct k_work_delayable * dwork)3952 static inline k_ticks_t k_work_delayable_expires_get(
3953 const struct k_work_delayable *dwork)
3954 {
3955 return z_timeout_expires(&dwork->timeout);
3956 }
3957
k_work_delayable_remaining_get(const struct k_work_delayable * dwork)3958 static inline k_ticks_t k_work_delayable_remaining_get(
3959 const struct k_work_delayable *dwork)
3960 {
3961 return z_timeout_remaining(&dwork->timeout);
3962 }
3963
k_work_queue_thread_get(struct k_work_q * queue)3964 static inline k_tid_t k_work_queue_thread_get(struct k_work_q *queue)
3965 {
3966 return &queue->thread;
3967 }
3968
3969 /** @} */
3970
3971 struct k_work_user;
3972
3973 /**
3974 * @addtogroup workqueue_apis
3975 * @{
3976 */
3977
3978 /**
3979 * @typedef k_work_user_handler_t
3980 * @brief Work item handler function type for user work queues.
3981 *
3982 * A work item's handler function is executed by a user workqueue's thread
3983 * when the work item is processed by the workqueue.
3984 *
3985 * @param work Address of the work item.
3986 */
3987 typedef void (*k_work_user_handler_t)(struct k_work_user *work);
3988
3989 /**
3990 * @cond INTERNAL_HIDDEN
3991 */
3992
3993 struct k_work_user_q {
3994 struct k_queue queue;
3995 struct k_thread thread;
3996 };
3997
3998 enum {
3999 K_WORK_USER_STATE_PENDING, /* Work item pending state */
4000 };
4001
4002 struct k_work_user {
4003 void *_reserved; /* Used by k_queue implementation. */
4004 k_work_user_handler_t handler;
4005 atomic_t flags;
4006 };
4007
4008 /**
4009 * INTERNAL_HIDDEN @endcond
4010 */
4011
4012 #if defined(__cplusplus) && ((__cplusplus - 0) < 202002L)
4013 #define Z_WORK_USER_INITIALIZER(work_handler) { NULL, work_handler, 0 }
4014 #else
4015 #define Z_WORK_USER_INITIALIZER(work_handler) \
4016 { \
4017 ._reserved = NULL, \
4018 .handler = work_handler, \
4019 .flags = 0 \
4020 }
4021 #endif
4022
4023 /**
4024 * @brief Initialize a statically-defined user work item.
4025 *
4026 * This macro can be used to initialize a statically-defined user work
4027 * item, prior to its first use. For example,
4028 *
4029 * @code static K_WORK_USER_DEFINE(<work>, <work_handler>); @endcode
4030 *
4031 * @param work Symbol name for work item object
4032 * @param work_handler Function to invoke each time work item is processed.
4033 */
4034 #define K_WORK_USER_DEFINE(work, work_handler) \
4035 struct k_work_user work = Z_WORK_USER_INITIALIZER(work_handler)
4036
4037 /**
4038 * @brief Initialize a userspace work item.
4039 *
4040 * This routine initializes a user workqueue work item, prior to its
4041 * first use.
4042 *
4043 * @param work Address of work item.
4044 * @param handler Function to invoke each time work item is processed.
4045 */
k_work_user_init(struct k_work_user * work,k_work_user_handler_t handler)4046 static inline void k_work_user_init(struct k_work_user *work,
4047 k_work_user_handler_t handler)
4048 {
4049 *work = (struct k_work_user)Z_WORK_USER_INITIALIZER(handler);
4050 }
4051
4052 /**
4053 * @brief Check if a userspace work item is pending.
4054 *
4055 * This routine indicates if user work item @a work is pending in a workqueue's
4056 * queue.
4057 *
4058 * @note Checking if the work is pending gives no guarantee that the
4059 * work will still be pending when this information is used. It is up to
4060 * the caller to make sure that this information is used in a safe manner.
4061 *
4062 * @funcprops \isr_ok
4063 *
4064 * @param work Address of work item.
4065 *
4066 * @return true if work item is pending, or false if it is not pending.
4067 */
k_work_user_is_pending(struct k_work_user * work)4068 static inline bool k_work_user_is_pending(struct k_work_user *work)
4069 {
4070 return atomic_test_bit(&work->flags, K_WORK_USER_STATE_PENDING);
4071 }
4072
4073 /**
4074 * @brief Submit a work item to a user mode workqueue
4075 *
4076 * Submits a work item to a workqueue that runs in user mode. A temporary
4077 * memory allocation is made from the caller's resource pool which is freed
4078 * once the worker thread consumes the k_work item. The workqueue
4079 * thread must have memory access to the k_work item being submitted. The caller
4080 * must have permission granted on the work_q parameter's queue object.
4081 *
4082 * @funcprops \isr_ok
4083 *
4084 * @param work_q Address of workqueue.
4085 * @param work Address of work item.
4086 *
4087 * @retval -EBUSY if the work item was already in some workqueue
4088 * @retval -ENOMEM if no memory for thread resource pool allocation
4089 * @retval 0 Success
4090 */
k_work_user_submit_to_queue(struct k_work_user_q * work_q,struct k_work_user * work)4091 static inline int k_work_user_submit_to_queue(struct k_work_user_q *work_q,
4092 struct k_work_user *work)
4093 {
4094 int ret = -EBUSY;
4095
4096 if (!atomic_test_and_set_bit(&work->flags,
4097 K_WORK_USER_STATE_PENDING)) {
4098 ret = k_queue_alloc_append(&work_q->queue, work);
4099
4100 /* Couldn't insert into the queue. Clear the pending bit
4101 * so the work item can be submitted again
4102 */
4103 if (ret != 0) {
4104 atomic_clear_bit(&work->flags,
4105 K_WORK_USER_STATE_PENDING);
4106 }
4107 }
4108
4109 return ret;
4110 }
4111
4112 /**
4113 * @brief Start a workqueue in user mode
4114 *
4115 * This works identically to k_work_queue_start() except it is callable from
4116 * user mode, and the worker thread created will run in user mode. The caller
4117 * must have permissions granted on both the work_q parameter's thread and
4118 * queue objects, and the same restrictions on priority apply as
4119 * k_thread_create().
4120 *
4121 * @param work_q Address of workqueue.
4122 * @param stack Pointer to work queue thread's stack space, as defined by
4123 * K_THREAD_STACK_DEFINE()
4124 * @param stack_size Size of the work queue thread's stack (in bytes), which
4125 * should either be the same constant passed to
4126 * K_THREAD_STACK_DEFINE() or the value of K_THREAD_STACK_SIZEOF().
4127 * @param prio Priority of the work queue's thread.
4128 * @param name optional thread name. If not null a copy is made into the
4129 * thread's name buffer.
4130 */
4131 extern void k_work_user_queue_start(struct k_work_user_q *work_q,
4132 k_thread_stack_t *stack,
4133 size_t stack_size, int prio,
4134 const char *name);
4135
4136 /**
4137 * @brief Access the user mode thread that animates a work queue.
4138 *
4139 * This is necessary to grant a user mode work queue thread access to things
4140 * the work items it will process are expected to use.
4141 *
4142 * @param work_q pointer to the user mode queue structure.
4143 *
4144 * @return the user mode thread associated with the work queue.
4145 */
k_work_user_queue_thread_get(struct k_work_user_q * work_q)4146 static inline k_tid_t k_work_user_queue_thread_get(struct k_work_user_q *work_q)
4147 {
4148 return &work_q->thread;
4149 }
4150
4151 /** @} */
4152
4153 /**
4154 * @cond INTERNAL_HIDDEN
4155 */
4156
4157 struct k_work_poll {
4158 struct k_work work;
4159 struct k_work_q *workq;
4160 struct z_poller poller;
4161 struct k_poll_event *events;
4162 int num_events;
4163 k_work_handler_t real_handler;
4164 struct _timeout timeout;
4165 int poll_result;
4166 };
4167
4168 /**
4169 * INTERNAL_HIDDEN @endcond
4170 */
4171
4172 /**
4173 * @addtogroup workqueue_apis
4174 * @{
4175 */
4176
4177 /**
4178 * @brief Initialize a statically-defined work item.
4179 *
4180 * This macro can be used to initialize a statically-defined workqueue work
4181 * item, prior to its first use. For example,
4182 *
4183 * @code static K_WORK_DEFINE(<work>, <work_handler>); @endcode
4184 *
4185 * @param work Symbol name for work item object
4186 * @param work_handler Function to invoke each time work item is processed.
4187 */
4188 #define K_WORK_DEFINE(work, work_handler) \
4189 struct k_work work = Z_WORK_INITIALIZER(work_handler)
4190
4191 /**
4192 * @brief Initialize a triggered work item.
4193 *
4194 * This routine initializes a workqueue triggered work item, prior to
4195 * its first use.
4196 *
4197 * @param work Address of triggered work item.
4198 * @param handler Function to invoke each time work item is processed.
4199 */
4200 extern void k_work_poll_init(struct k_work_poll *work,
4201 k_work_handler_t handler);
4202
4203 /**
4204 * @brief Submit a triggered work item.
4205 *
4206 * This routine schedules work item @a work to be processed by workqueue
4207 * @a work_q when one of the given @a events is signaled. The routine
4208 * initiates internal poller for the work item and then returns to the caller.
4209 * Only when one of the watched events happen the work item is actually
4210 * submitted to the workqueue and becomes pending.
4211 *
4212 * Submitting a previously submitted triggered work item that is still
4213 * waiting for the event cancels the existing submission and reschedules it
4214 * the using the new event list. Note that this behavior is inherently subject
4215 * to race conditions with the pre-existing triggered work item and work queue,
4216 * so care must be taken to synchronize such resubmissions externally.
4217 *
4218 * @funcprops \isr_ok
4219 *
4220 * @warning
4221 * Provided array of events as well as a triggered work item must be placed
4222 * in persistent memory (valid until work handler execution or work
4223 * cancellation) and cannot be modified after submission.
4224 *
4225 * @param work_q Address of workqueue.
4226 * @param work Address of delayed work item.
4227 * @param events An array of events which trigger the work.
4228 * @param num_events The number of events in the array.
4229 * @param timeout Timeout after which the work will be scheduled
4230 * for execution even if not triggered.
4231 *
4232 *
4233 * @retval 0 Work item started watching for events.
4234 * @retval -EINVAL Work item is being processed or has completed its work.
4235 * @retval -EADDRINUSE Work item is pending on a different workqueue.
4236 */
4237 extern int k_work_poll_submit_to_queue(struct k_work_q *work_q,
4238 struct k_work_poll *work,
4239 struct k_poll_event *events,
4240 int num_events,
4241 k_timeout_t timeout);
4242
4243 /**
4244 * @brief Submit a triggered work item to the system workqueue.
4245 *
4246 * This routine schedules work item @a work to be processed by system
4247 * workqueue when one of the given @a events is signaled. The routine
4248 * initiates internal poller for the work item and then returns to the caller.
4249 * Only when one of the watched events happen the work item is actually
4250 * submitted to the workqueue and becomes pending.
4251 *
4252 * Submitting a previously submitted triggered work item that is still
4253 * waiting for the event cancels the existing submission and reschedules it
4254 * the using the new event list. Note that this behavior is inherently subject
4255 * to race conditions with the pre-existing triggered work item and work queue,
4256 * so care must be taken to synchronize such resubmissions externally.
4257 *
4258 * @funcprops \isr_ok
4259 *
4260 * @warning
4261 * Provided array of events as well as a triggered work item must not be
4262 * modified until the item has been processed by the workqueue.
4263 *
4264 * @param work Address of delayed work item.
4265 * @param events An array of events which trigger the work.
4266 * @param num_events The number of events in the array.
4267 * @param timeout Timeout after which the work will be scheduled
4268 * for execution even if not triggered.
4269 *
4270 * @retval 0 Work item started watching for events.
4271 * @retval -EINVAL Work item is being processed or has completed its work.
4272 * @retval -EADDRINUSE Work item is pending on a different workqueue.
4273 */
4274 extern int k_work_poll_submit(struct k_work_poll *work,
4275 struct k_poll_event *events,
4276 int num_events,
4277 k_timeout_t timeout);
4278
4279 /**
4280 * @brief Cancel a triggered work item.
4281 *
4282 * This routine cancels the submission of triggered work item @a work.
4283 * A triggered work item can only be canceled if no event triggered work
4284 * submission.
4285 *
4286 * @funcprops \isr_ok
4287 *
4288 * @param work Address of delayed work item.
4289 *
4290 * @retval 0 Work item canceled.
4291 * @retval -EINVAL Work item is being processed or has completed its work.
4292 */
4293 extern int k_work_poll_cancel(struct k_work_poll *work);
4294
4295 /** @} */
4296
4297 /**
4298 * @defgroup msgq_apis Message Queue APIs
4299 * @ingroup kernel_apis
4300 * @{
4301 */
4302
4303 /**
4304 * @brief Message Queue Structure
4305 */
4306 struct k_msgq {
4307 /** Message queue wait queue */
4308 _wait_q_t wait_q;
4309 /** Lock */
4310 struct k_spinlock lock;
4311 /** Message size */
4312 size_t msg_size;
4313 /** Maximal number of messages */
4314 uint32_t max_msgs;
4315 /** Start of message buffer */
4316 char *buffer_start;
4317 /** End of message buffer */
4318 char *buffer_end;
4319 /** Read pointer */
4320 char *read_ptr;
4321 /** Write pointer */
4322 char *write_ptr;
4323 /** Number of used messages */
4324 uint32_t used_msgs;
4325
4326 _POLL_EVENT;
4327
4328 /** Message queue */
4329 uint8_t flags;
4330
4331 SYS_PORT_TRACING_TRACKING_FIELD(k_msgq)
4332 };
4333 /**
4334 * @cond INTERNAL_HIDDEN
4335 */
4336
4337
4338 #define Z_MSGQ_INITIALIZER(obj, q_buffer, q_msg_size, q_max_msgs) \
4339 { \
4340 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
4341 .msg_size = q_msg_size, \
4342 .max_msgs = q_max_msgs, \
4343 .buffer_start = q_buffer, \
4344 .buffer_end = q_buffer + (q_max_msgs * q_msg_size), \
4345 .read_ptr = q_buffer, \
4346 .write_ptr = q_buffer, \
4347 .used_msgs = 0, \
4348 _POLL_EVENT_OBJ_INIT(obj) \
4349 }
4350
4351 /**
4352 * INTERNAL_HIDDEN @endcond
4353 */
4354
4355
4356 #define K_MSGQ_FLAG_ALLOC BIT(0)
4357
4358 /**
4359 * @brief Message Queue Attributes
4360 */
4361 struct k_msgq_attrs {
4362 /** Message Size */
4363 size_t msg_size;
4364 /** Maximal number of messages */
4365 uint32_t max_msgs;
4366 /** Used messages */
4367 uint32_t used_msgs;
4368 };
4369
4370
4371 /**
4372 * @brief Statically define and initialize a message queue.
4373 *
4374 * The message queue's ring buffer contains space for @a q_max_msgs messages,
4375 * each of which is @a q_msg_size bytes long. The buffer is aligned to a
4376 * @a q_align -byte boundary, which must be a power of 2. To ensure that each
4377 * message is similarly aligned to this boundary, @a q_msg_size must also be
4378 * a multiple of @a q_align.
4379 *
4380 * The message queue can be accessed outside the module where it is defined
4381 * using:
4382 *
4383 * @code extern struct k_msgq <name>; @endcode
4384 *
4385 * @param q_name Name of the message queue.
4386 * @param q_msg_size Message size (in bytes).
4387 * @param q_max_msgs Maximum number of messages that can be queued.
4388 * @param q_align Alignment of the message queue's ring buffer.
4389 *
4390 */
4391 #define K_MSGQ_DEFINE(q_name, q_msg_size, q_max_msgs, q_align) \
4392 static char __noinit __aligned(q_align) \
4393 _k_fifo_buf_##q_name[(q_max_msgs) * (q_msg_size)]; \
4394 STRUCT_SECTION_ITERABLE(k_msgq, q_name) = \
4395 Z_MSGQ_INITIALIZER(q_name, _k_fifo_buf_##q_name, \
4396 (q_msg_size), (q_max_msgs))
4397
4398 /**
4399 * @brief Initialize a message queue.
4400 *
4401 * This routine initializes a message queue object, prior to its first use.
4402 *
4403 * The message queue's ring buffer must contain space for @a max_msgs messages,
4404 * each of which is @a msg_size bytes long. The buffer must be aligned to an
4405 * N-byte boundary, where N is a power of 2 (i.e. 1, 2, 4, ...). To ensure
4406 * that each message is similarly aligned to this boundary, @a q_msg_size
4407 * must also be a multiple of N.
4408 *
4409 * @param msgq Address of the message queue.
4410 * @param buffer Pointer to ring buffer that holds queued messages.
4411 * @param msg_size Message size (in bytes).
4412 * @param max_msgs Maximum number of messages that can be queued.
4413 */
4414 void k_msgq_init(struct k_msgq *msgq, char *buffer, size_t msg_size,
4415 uint32_t max_msgs);
4416
4417 /**
4418 * @brief Initialize a message queue.
4419 *
4420 * This routine initializes a message queue object, prior to its first use,
4421 * allocating its internal ring buffer from the calling thread's resource
4422 * pool.
4423 *
4424 * Memory allocated for the ring buffer can be released by calling
4425 * k_msgq_cleanup(), or if userspace is enabled and the msgq object loses
4426 * all of its references.
4427 *
4428 * @param msgq Address of the message queue.
4429 * @param msg_size Message size (in bytes).
4430 * @param max_msgs Maximum number of messages that can be queued.
4431 *
4432 * @return 0 on success, -ENOMEM if there was insufficient memory in the
4433 * thread's resource pool, or -EINVAL if the size parameters cause
4434 * an integer overflow.
4435 */
4436 __syscall int k_msgq_alloc_init(struct k_msgq *msgq, size_t msg_size,
4437 uint32_t max_msgs);
4438
4439 /**
4440 * @brief Release allocated buffer for a queue
4441 *
4442 * Releases memory allocated for the ring buffer.
4443 *
4444 * @param msgq message queue to cleanup
4445 *
4446 * @retval 0 on success
4447 * @retval -EBUSY Queue not empty
4448 */
4449 int k_msgq_cleanup(struct k_msgq *msgq);
4450
4451 /**
4452 * @brief Send a message to a message queue.
4453 *
4454 * This routine sends a message to message queue @a q.
4455 *
4456 * @note The message content is copied from @a data into @a msgq and the @a data
4457 * pointer is not retained, so the message content will not be modified
4458 * by this function.
4459 *
4460 * @funcprops \isr_ok
4461 *
4462 * @param msgq Address of the message queue.
4463 * @param data Pointer to the message.
4464 * @param timeout Non-negative waiting period to add the message,
4465 * or one of the special values K_NO_WAIT and
4466 * K_FOREVER.
4467 *
4468 * @retval 0 Message sent.
4469 * @retval -ENOMSG Returned without waiting or queue purged.
4470 * @retval -EAGAIN Waiting period timed out.
4471 */
4472 __syscall int k_msgq_put(struct k_msgq *msgq, const void *data, k_timeout_t timeout);
4473
4474 /**
4475 * @brief Receive a message from a message queue.
4476 *
4477 * This routine receives a message from message queue @a q in a "first in,
4478 * first out" manner.
4479 *
4480 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
4481 *
4482 * @funcprops \isr_ok
4483 *
4484 * @param msgq Address of the message queue.
4485 * @param data Address of area to hold the received message.
4486 * @param timeout Waiting period to receive the message,
4487 * or one of the special values K_NO_WAIT and
4488 * K_FOREVER.
4489 *
4490 * @retval 0 Message received.
4491 * @retval -ENOMSG Returned without waiting.
4492 * @retval -EAGAIN Waiting period timed out.
4493 */
4494 __syscall int k_msgq_get(struct k_msgq *msgq, void *data, k_timeout_t timeout);
4495
4496 /**
4497 * @brief Peek/read a message from a message queue.
4498 *
4499 * This routine reads a message from message queue @a q in a "first in,
4500 * first out" manner and leaves the message in the queue.
4501 *
4502 * @funcprops \isr_ok
4503 *
4504 * @param msgq Address of the message queue.
4505 * @param data Address of area to hold the message read from the queue.
4506 *
4507 * @retval 0 Message read.
4508 * @retval -ENOMSG Returned when the queue has no message.
4509 */
4510 __syscall int k_msgq_peek(struct k_msgq *msgq, void *data);
4511
4512 /**
4513 * @brief Peek/read a message from a message queue at the specified index
4514 *
4515 * This routine reads a message from message queue at the specified index
4516 * and leaves the message in the queue.
4517 * k_msgq_peek_at(msgq, data, 0) is equivalent to k_msgq_peek(msgq, data)
4518 *
4519 * @funcprops \isr_ok
4520 *
4521 * @param msgq Address of the message queue.
4522 * @param data Address of area to hold the message read from the queue.
4523 * @param idx Message queue index at which to peek
4524 *
4525 * @retval 0 Message read.
4526 * @retval -ENOMSG Returned when the queue has no message at index.
4527 */
4528 __syscall int k_msgq_peek_at(struct k_msgq *msgq, void *data, uint32_t idx);
4529
4530 /**
4531 * @brief Purge a message queue.
4532 *
4533 * This routine discards all unreceived messages in a message queue's ring
4534 * buffer. Any threads that are blocked waiting to send a message to the
4535 * message queue are unblocked and see an -ENOMSG error code.
4536 *
4537 * @param msgq Address of the message queue.
4538 */
4539 __syscall void k_msgq_purge(struct k_msgq *msgq);
4540
4541 /**
4542 * @brief Get the amount of free space in a message queue.
4543 *
4544 * This routine returns the number of unused entries in a message queue's
4545 * ring buffer.
4546 *
4547 * @param msgq Address of the message queue.
4548 *
4549 * @return Number of unused ring buffer entries.
4550 */
4551 __syscall uint32_t k_msgq_num_free_get(struct k_msgq *msgq);
4552
4553 /**
4554 * @brief Get basic attributes of a message queue.
4555 *
4556 * This routine fetches basic attributes of message queue into attr argument.
4557 *
4558 * @param msgq Address of the message queue.
4559 * @param attrs pointer to message queue attribute structure.
4560 */
4561 __syscall void k_msgq_get_attrs(struct k_msgq *msgq,
4562 struct k_msgq_attrs *attrs);
4563
4564
z_impl_k_msgq_num_free_get(struct k_msgq * msgq)4565 static inline uint32_t z_impl_k_msgq_num_free_get(struct k_msgq *msgq)
4566 {
4567 return msgq->max_msgs - msgq->used_msgs;
4568 }
4569
4570 /**
4571 * @brief Get the number of messages in a message queue.
4572 *
4573 * This routine returns the number of messages in a message queue's ring buffer.
4574 *
4575 * @param msgq Address of the message queue.
4576 *
4577 * @return Number of messages.
4578 */
4579 __syscall uint32_t k_msgq_num_used_get(struct k_msgq *msgq);
4580
z_impl_k_msgq_num_used_get(struct k_msgq * msgq)4581 static inline uint32_t z_impl_k_msgq_num_used_get(struct k_msgq *msgq)
4582 {
4583 return msgq->used_msgs;
4584 }
4585
4586 /** @} */
4587
4588 /**
4589 * @defgroup mailbox_apis Mailbox APIs
4590 * @ingroup kernel_apis
4591 * @{
4592 */
4593
4594 /**
4595 * @brief Mailbox Message Structure
4596 *
4597 */
4598 struct k_mbox_msg {
4599 /** internal use only - needed for legacy API support */
4600 uint32_t _mailbox;
4601 /** size of message (in bytes) */
4602 size_t size;
4603 /** application-defined information value */
4604 uint32_t info;
4605 /** sender's message data buffer */
4606 void *tx_data;
4607 /** internal use only - needed for legacy API support */
4608 void *_rx_data;
4609 /** message data block descriptor */
4610 struct k_mem_block tx_block;
4611 /** source thread id */
4612 k_tid_t rx_source_thread;
4613 /** target thread id */
4614 k_tid_t tx_target_thread;
4615 /** internal use only - thread waiting on send (may be a dummy) */
4616 k_tid_t _syncing_thread;
4617 #if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
4618 /** internal use only - semaphore used during asynchronous send */
4619 struct k_sem *_async_sem;
4620 #endif
4621 };
4622 /**
4623 * @brief Mailbox Structure
4624 *
4625 */
4626 struct k_mbox {
4627 /** Transmit messages queue */
4628 _wait_q_t tx_msg_queue;
4629 /** Receive message queue */
4630 _wait_q_t rx_msg_queue;
4631 struct k_spinlock lock;
4632
4633 SYS_PORT_TRACING_TRACKING_FIELD(k_mbox)
4634 };
4635 /**
4636 * @cond INTERNAL_HIDDEN
4637 */
4638
4639 #define Z_MBOX_INITIALIZER(obj) \
4640 { \
4641 .tx_msg_queue = Z_WAIT_Q_INIT(&obj.tx_msg_queue), \
4642 .rx_msg_queue = Z_WAIT_Q_INIT(&obj.rx_msg_queue), \
4643 }
4644
4645 /**
4646 * INTERNAL_HIDDEN @endcond
4647 */
4648
4649 /**
4650 * @brief Statically define and initialize a mailbox.
4651 *
4652 * The mailbox is to be accessed outside the module where it is defined using:
4653 *
4654 * @code extern struct k_mbox <name>; @endcode
4655 *
4656 * @param name Name of the mailbox.
4657 */
4658 #define K_MBOX_DEFINE(name) \
4659 STRUCT_SECTION_ITERABLE(k_mbox, name) = \
4660 Z_MBOX_INITIALIZER(name) \
4661
4662 /**
4663 * @brief Initialize a mailbox.
4664 *
4665 * This routine initializes a mailbox object, prior to its first use.
4666 *
4667 * @param mbox Address of the mailbox.
4668 */
4669 extern void k_mbox_init(struct k_mbox *mbox);
4670
4671 /**
4672 * @brief Send a mailbox message in a synchronous manner.
4673 *
4674 * This routine sends a message to @a mbox and waits for a receiver to both
4675 * receive and process it. The message data may be in a buffer, in a memory
4676 * pool block, or non-existent (i.e. an empty message).
4677 *
4678 * @param mbox Address of the mailbox.
4679 * @param tx_msg Address of the transmit message descriptor.
4680 * @param timeout Waiting period for the message to be received,
4681 * or one of the special values K_NO_WAIT
4682 * and K_FOREVER. Once the message has been received,
4683 * this routine waits as long as necessary for the message
4684 * to be completely processed.
4685 *
4686 * @retval 0 Message sent.
4687 * @retval -ENOMSG Returned without waiting.
4688 * @retval -EAGAIN Waiting period timed out.
4689 */
4690 extern int k_mbox_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
4691 k_timeout_t timeout);
4692
4693 /**
4694 * @brief Send a mailbox message in an asynchronous manner.
4695 *
4696 * This routine sends a message to @a mbox without waiting for a receiver
4697 * to process it. The message data may be in a buffer, in a memory pool block,
4698 * or non-existent (i.e. an empty message). Optionally, the semaphore @a sem
4699 * will be given when the message has been both received and completely
4700 * processed by the receiver.
4701 *
4702 * @param mbox Address of the mailbox.
4703 * @param tx_msg Address of the transmit message descriptor.
4704 * @param sem Address of a semaphore, or NULL if none is needed.
4705 */
4706 extern void k_mbox_async_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
4707 struct k_sem *sem);
4708
4709 /**
4710 * @brief Receive a mailbox message.
4711 *
4712 * This routine receives a message from @a mbox, then optionally retrieves
4713 * its data and disposes of the message.
4714 *
4715 * @param mbox Address of the mailbox.
4716 * @param rx_msg Address of the receive message descriptor.
4717 * @param buffer Address of the buffer to receive data, or NULL to defer data
4718 * retrieval and message disposal until later.
4719 * @param timeout Waiting period for a message to be received,
4720 * or one of the special values K_NO_WAIT and K_FOREVER.
4721 *
4722 * @retval 0 Message received.
4723 * @retval -ENOMSG Returned without waiting.
4724 * @retval -EAGAIN Waiting period timed out.
4725 */
4726 extern int k_mbox_get(struct k_mbox *mbox, struct k_mbox_msg *rx_msg,
4727 void *buffer, k_timeout_t timeout);
4728
4729 /**
4730 * @brief Retrieve mailbox message data into a buffer.
4731 *
4732 * This routine completes the processing of a received message by retrieving
4733 * its data into a buffer, then disposing of the message.
4734 *
4735 * Alternatively, this routine can be used to dispose of a received message
4736 * without retrieving its data.
4737 *
4738 * @param rx_msg Address of the receive message descriptor.
4739 * @param buffer Address of the buffer to receive data, or NULL to discard
4740 * the data.
4741 */
4742 extern void k_mbox_data_get(struct k_mbox_msg *rx_msg, void *buffer);
4743
4744 /** @} */
4745
4746 /**
4747 * @defgroup pipe_apis Pipe APIs
4748 * @ingroup kernel_apis
4749 * @{
4750 */
4751
4752 /** Pipe Structure */
4753 struct k_pipe {
4754 unsigned char *buffer; /**< Pipe buffer: may be NULL */
4755 size_t size; /**< Buffer size */
4756 size_t bytes_used; /**< # bytes used in buffer */
4757 size_t read_index; /**< Where in buffer to read from */
4758 size_t write_index; /**< Where in buffer to write */
4759 struct k_spinlock lock; /**< Synchronization lock */
4760
4761 struct {
4762 _wait_q_t readers; /**< Reader wait queue */
4763 _wait_q_t writers; /**< Writer wait queue */
4764 } wait_q; /** Wait queue */
4765
4766 _POLL_EVENT;
4767
4768 uint8_t flags; /**< Flags */
4769
4770 SYS_PORT_TRACING_TRACKING_FIELD(k_pipe)
4771 };
4772
4773 /**
4774 * @cond INTERNAL_HIDDEN
4775 */
4776 #define K_PIPE_FLAG_ALLOC BIT(0) /** Buffer was allocated */
4777
4778 #define Z_PIPE_INITIALIZER(obj, pipe_buffer, pipe_buffer_size) \
4779 { \
4780 .buffer = pipe_buffer, \
4781 .size = pipe_buffer_size, \
4782 .bytes_used = 0, \
4783 .read_index = 0, \
4784 .write_index = 0, \
4785 .lock = {}, \
4786 .wait_q = { \
4787 .readers = Z_WAIT_Q_INIT(&obj.wait_q.readers), \
4788 .writers = Z_WAIT_Q_INIT(&obj.wait_q.writers) \
4789 }, \
4790 _POLL_EVENT_OBJ_INIT(obj) \
4791 .flags = 0, \
4792 }
4793
4794 /**
4795 * INTERNAL_HIDDEN @endcond
4796 */
4797
4798 /**
4799 * @brief Statically define and initialize a pipe.
4800 *
4801 * The pipe can be accessed outside the module where it is defined using:
4802 *
4803 * @code extern struct k_pipe <name>; @endcode
4804 *
4805 * @param name Name of the pipe.
4806 * @param pipe_buffer_size Size of the pipe's ring buffer (in bytes),
4807 * or zero if no ring buffer is used.
4808 * @param pipe_align Alignment of the pipe's ring buffer (power of 2).
4809 *
4810 */
4811 #define K_PIPE_DEFINE(name, pipe_buffer_size, pipe_align) \
4812 static unsigned char __noinit __aligned(pipe_align) \
4813 _k_pipe_buf_##name[pipe_buffer_size]; \
4814 STRUCT_SECTION_ITERABLE(k_pipe, name) = \
4815 Z_PIPE_INITIALIZER(name, _k_pipe_buf_##name, pipe_buffer_size)
4816
4817 /**
4818 * @brief Initialize a pipe.
4819 *
4820 * This routine initializes a pipe object, prior to its first use.
4821 *
4822 * @param pipe Address of the pipe.
4823 * @param buffer Address of the pipe's ring buffer, or NULL if no ring buffer
4824 * is used.
4825 * @param size Size of the pipe's ring buffer (in bytes), or zero if no ring
4826 * buffer is used.
4827 */
4828 void k_pipe_init(struct k_pipe *pipe, unsigned char *buffer, size_t size);
4829
4830 /**
4831 * @brief Release a pipe's allocated buffer
4832 *
4833 * If a pipe object was given a dynamically allocated buffer via
4834 * k_pipe_alloc_init(), this will free it. This function does nothing
4835 * if the buffer wasn't dynamically allocated.
4836 *
4837 * @param pipe Address of the pipe.
4838 * @retval 0 on success
4839 * @retval -EAGAIN nothing to cleanup
4840 */
4841 int k_pipe_cleanup(struct k_pipe *pipe);
4842
4843 /**
4844 * @brief Initialize a pipe and allocate a buffer for it
4845 *
4846 * Storage for the buffer region will be allocated from the calling thread's
4847 * resource pool. This memory will be released if k_pipe_cleanup() is called,
4848 * or userspace is enabled and the pipe object loses all references to it.
4849 *
4850 * This function should only be called on uninitialized pipe objects.
4851 *
4852 * @param pipe Address of the pipe.
4853 * @param size Size of the pipe's ring buffer (in bytes), or zero if no ring
4854 * buffer is used.
4855 * @retval 0 on success
4856 * @retval -ENOMEM if memory couldn't be allocated
4857 */
4858 __syscall int k_pipe_alloc_init(struct k_pipe *pipe, size_t size);
4859
4860 /**
4861 * @brief Write data to a pipe.
4862 *
4863 * This routine writes up to @a bytes_to_write bytes of data to @a pipe.
4864 *
4865 * @param pipe Address of the pipe.
4866 * @param data Address of data to write.
4867 * @param bytes_to_write Size of data (in bytes).
4868 * @param bytes_written Address of area to hold the number of bytes written.
4869 * @param min_xfer Minimum number of bytes to write.
4870 * @param timeout Waiting period to wait for the data to be written,
4871 * or one of the special values K_NO_WAIT and K_FOREVER.
4872 *
4873 * @retval 0 At least @a min_xfer bytes of data were written.
4874 * @retval -EIO Returned without waiting; zero data bytes were written.
4875 * @retval -EAGAIN Waiting period timed out; between zero and @a min_xfer
4876 * minus one data bytes were written.
4877 */
4878 __syscall int k_pipe_put(struct k_pipe *pipe, void *data,
4879 size_t bytes_to_write, size_t *bytes_written,
4880 size_t min_xfer, k_timeout_t timeout);
4881
4882 /**
4883 * @brief Read data from a pipe.
4884 *
4885 * This routine reads up to @a bytes_to_read bytes of data from @a pipe.
4886 *
4887 * @param pipe Address of the pipe.
4888 * @param data Address to place the data read from pipe.
4889 * @param bytes_to_read Maximum number of data bytes to read.
4890 * @param bytes_read Address of area to hold the number of bytes read.
4891 * @param min_xfer Minimum number of data bytes to read.
4892 * @param timeout Waiting period to wait for the data to be read,
4893 * or one of the special values K_NO_WAIT and K_FOREVER.
4894 *
4895 * @retval 0 At least @a min_xfer bytes of data were read.
4896 * @retval -EINVAL invalid parameters supplied
4897 * @retval -EIO Returned without waiting; zero data bytes were read.
4898 * @retval -EAGAIN Waiting period timed out; between zero and @a min_xfer
4899 * minus one data bytes were read.
4900 */
4901 __syscall int k_pipe_get(struct k_pipe *pipe, void *data,
4902 size_t bytes_to_read, size_t *bytes_read,
4903 size_t min_xfer, k_timeout_t timeout);
4904
4905 /**
4906 * @brief Query the number of bytes that may be read from @a pipe.
4907 *
4908 * @param pipe Address of the pipe.
4909 *
4910 * @retval a number n such that 0 <= n <= @ref k_pipe.size; the
4911 * result is zero for unbuffered pipes.
4912 */
4913 __syscall size_t k_pipe_read_avail(struct k_pipe *pipe);
4914
4915 /**
4916 * @brief Query the number of bytes that may be written to @a pipe
4917 *
4918 * @param pipe Address of the pipe.
4919 *
4920 * @retval a number n such that 0 <= n <= @ref k_pipe.size; the
4921 * result is zero for unbuffered pipes.
4922 */
4923 __syscall size_t k_pipe_write_avail(struct k_pipe *pipe);
4924
4925 /**
4926 * @brief Flush the pipe of write data
4927 *
4928 * This routine flushes the pipe. Flushing the pipe is equivalent to reading
4929 * both all the data in the pipe's buffer and all the data waiting to go into
4930 * that pipe into a large temporary buffer and discarding the buffer. Any
4931 * writers that were previously pended become unpended.
4932 *
4933 * @param pipe Address of the pipe.
4934 */
4935 __syscall void k_pipe_flush(struct k_pipe *pipe);
4936
4937 /**
4938 * @brief Flush the pipe's internal buffer
4939 *
4940 * This routine flushes the pipe's internal buffer. This is equivalent to
4941 * reading up to N bytes from the pipe (where N is the size of the pipe's
4942 * buffer) into a temporary buffer and then discarding that buffer. If there
4943 * were writers previously pending, then some may unpend as they try to fill
4944 * up the pipe's emptied buffer.
4945 *
4946 * @param pipe Address of the pipe.
4947 */
4948 __syscall void k_pipe_buffer_flush(struct k_pipe *pipe);
4949
4950 /** @} */
4951
4952 /**
4953 * @cond INTERNAL_HIDDEN
4954 */
4955
4956 struct k_mem_slab {
4957 _wait_q_t wait_q;
4958 struct k_spinlock lock;
4959 uint32_t num_blocks;
4960 size_t block_size;
4961 char *buffer;
4962 char *free_list;
4963 uint32_t num_used;
4964 #ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION
4965 uint32_t max_used;
4966 #endif
4967
4968 SYS_PORT_TRACING_TRACKING_FIELD(k_mem_slab)
4969 };
4970
4971 #define Z_MEM_SLAB_INITIALIZER(obj, slab_buffer, slab_block_size, \
4972 slab_num_blocks) \
4973 { \
4974 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
4975 .lock = {}, \
4976 .num_blocks = slab_num_blocks, \
4977 .block_size = slab_block_size, \
4978 .buffer = slab_buffer, \
4979 .free_list = NULL, \
4980 .num_used = 0, \
4981 }
4982
4983
4984 /**
4985 * INTERNAL_HIDDEN @endcond
4986 */
4987
4988 /**
4989 * @defgroup mem_slab_apis Memory Slab APIs
4990 * @ingroup kernel_apis
4991 * @{
4992 */
4993
4994 /**
4995 * @brief Statically define and initialize a memory slab in a public (non-static) scope.
4996 *
4997 * The memory slab's buffer contains @a slab_num_blocks memory blocks
4998 * that are @a slab_block_size bytes long. The buffer is aligned to a
4999 * @a slab_align -byte boundary. To ensure that each memory block is similarly
5000 * aligned to this boundary, @a slab_block_size must also be a multiple of
5001 * @a slab_align.
5002 *
5003 * The memory slab can be accessed outside the module where it is defined
5004 * using:
5005 *
5006 * @code extern struct k_mem_slab <name>; @endcode
5007 *
5008 * @note This macro cannot be used together with a static keyword.
5009 * If such a use-case is desired, use @ref K_MEM_SLAB_DEFINE_STATIC
5010 * instead.
5011 *
5012 * @param name Name of the memory slab.
5013 * @param slab_block_size Size of each memory block (in bytes).
5014 * @param slab_num_blocks Number memory blocks.
5015 * @param slab_align Alignment of the memory slab's buffer (power of 2).
5016 */
5017 #define K_MEM_SLAB_DEFINE(name, slab_block_size, slab_num_blocks, slab_align) \
5018 char __noinit_named(k_mem_slab_buf_##name) \
5019 __aligned(WB_UP(slab_align)) \
5020 _k_mem_slab_buf_##name[(slab_num_blocks) * WB_UP(slab_block_size)]; \
5021 STRUCT_SECTION_ITERABLE(k_mem_slab, name) = \
5022 Z_MEM_SLAB_INITIALIZER(name, _k_mem_slab_buf_##name, \
5023 WB_UP(slab_block_size), slab_num_blocks)
5024
5025 /**
5026 * @brief Statically define and initialize a memory slab in a private (static) scope.
5027 *
5028 * The memory slab's buffer contains @a slab_num_blocks memory blocks
5029 * that are @a slab_block_size bytes long. The buffer is aligned to a
5030 * @a slab_align -byte boundary. To ensure that each memory block is similarly
5031 * aligned to this boundary, @a slab_block_size must also be a multiple of
5032 * @a slab_align.
5033 *
5034 * @param name Name of the memory slab.
5035 * @param slab_block_size Size of each memory block (in bytes).
5036 * @param slab_num_blocks Number memory blocks.
5037 * @param slab_align Alignment of the memory slab's buffer (power of 2).
5038 */
5039 #define K_MEM_SLAB_DEFINE_STATIC(name, slab_block_size, slab_num_blocks, slab_align) \
5040 static char __noinit_named(k_mem_slab_buf_##name) \
5041 __aligned(WB_UP(slab_align)) \
5042 _k_mem_slab_buf_##name[(slab_num_blocks) * WB_UP(slab_block_size)]; \
5043 static STRUCT_SECTION_ITERABLE(k_mem_slab, name) = \
5044 Z_MEM_SLAB_INITIALIZER(name, _k_mem_slab_buf_##name, \
5045 WB_UP(slab_block_size), slab_num_blocks)
5046
5047 /**
5048 * @brief Initialize a memory slab.
5049 *
5050 * Initializes a memory slab, prior to its first use.
5051 *
5052 * The memory slab's buffer contains @a slab_num_blocks memory blocks
5053 * that are @a slab_block_size bytes long. The buffer must be aligned to an
5054 * N-byte boundary matching a word boundary, where N is a power of 2
5055 * (i.e. 4 on 32-bit systems, 8, 16, ...).
5056 * To ensure that each memory block is similarly aligned to this boundary,
5057 * @a slab_block_size must also be a multiple of N.
5058 *
5059 * @param slab Address of the memory slab.
5060 * @param buffer Pointer to buffer used for the memory blocks.
5061 * @param block_size Size of each memory block (in bytes).
5062 * @param num_blocks Number of memory blocks.
5063 *
5064 * @retval 0 on success
5065 * @retval -EINVAL invalid data supplied
5066 *
5067 */
5068 extern int k_mem_slab_init(struct k_mem_slab *slab, void *buffer,
5069 size_t block_size, uint32_t num_blocks);
5070
5071 /**
5072 * @brief Allocate memory from a memory slab.
5073 *
5074 * This routine allocates a memory block from a memory slab.
5075 *
5076 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
5077 * @note When CONFIG_MULTITHREADING=n any @a timeout is treated as K_NO_WAIT.
5078 *
5079 * @funcprops \isr_ok
5080 *
5081 * @param slab Address of the memory slab.
5082 * @param mem Pointer to block address area.
5083 * @param timeout Non-negative waiting period to wait for operation to complete.
5084 * Use K_NO_WAIT to return without waiting,
5085 * or K_FOREVER to wait as long as necessary.
5086 *
5087 * @retval 0 Memory allocated. The block address area pointed at by @a mem
5088 * is set to the starting address of the memory block.
5089 * @retval -ENOMEM Returned without waiting.
5090 * @retval -EAGAIN Waiting period timed out.
5091 * @retval -EINVAL Invalid data supplied
5092 */
5093 extern int k_mem_slab_alloc(struct k_mem_slab *slab, void **mem,
5094 k_timeout_t timeout);
5095
5096 /**
5097 * @brief Free memory allocated from a memory slab.
5098 *
5099 * This routine releases a previously allocated memory block back to its
5100 * associated memory slab.
5101 *
5102 * @param slab Address of the memory slab.
5103 * @param mem Pointer to block address area (as set by k_mem_slab_alloc()).
5104 */
5105 extern void k_mem_slab_free(struct k_mem_slab *slab, void **mem);
5106
5107 /**
5108 * @brief Get the number of used blocks in a memory slab.
5109 *
5110 * This routine gets the number of memory blocks that are currently
5111 * allocated in @a slab.
5112 *
5113 * @param slab Address of the memory slab.
5114 *
5115 * @return Number of allocated memory blocks.
5116 */
k_mem_slab_num_used_get(struct k_mem_slab * slab)5117 static inline uint32_t k_mem_slab_num_used_get(struct k_mem_slab *slab)
5118 {
5119 return slab->num_used;
5120 }
5121
5122 /**
5123 * @brief Get the number of maximum used blocks so far in a memory slab.
5124 *
5125 * This routine gets the maximum number of memory blocks that were
5126 * allocated in @a slab.
5127 *
5128 * @param slab Address of the memory slab.
5129 *
5130 * @return Maximum number of allocated memory blocks.
5131 */
k_mem_slab_max_used_get(struct k_mem_slab * slab)5132 static inline uint32_t k_mem_slab_max_used_get(struct k_mem_slab *slab)
5133 {
5134 #ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION
5135 return slab->max_used;
5136 #else
5137 ARG_UNUSED(slab);
5138 return 0;
5139 #endif
5140 }
5141
5142 /**
5143 * @brief Get the number of unused blocks in a memory slab.
5144 *
5145 * This routine gets the number of memory blocks that are currently
5146 * unallocated in @a slab.
5147 *
5148 * @param slab Address of the memory slab.
5149 *
5150 * @return Number of unallocated memory blocks.
5151 */
k_mem_slab_num_free_get(struct k_mem_slab * slab)5152 static inline uint32_t k_mem_slab_num_free_get(struct k_mem_slab *slab)
5153 {
5154 return slab->num_blocks - slab->num_used;
5155 }
5156
5157 /**
5158 * @brief Get the memory stats for a memory slab
5159 *
5160 * This routine gets the runtime memory usage stats for the slab @a slab.
5161 *
5162 * @param slab Address of the memory slab
5163 * @param stats Pointer to memory into which to copy memory usage statistics
5164 *
5165 * @retval 0 Success
5166 * @retval -EINVAL Any parameter points to NULL
5167 */
5168
5169 int k_mem_slab_runtime_stats_get(struct k_mem_slab *slab, struct sys_memory_stats *stats);
5170
5171 /**
5172 * @brief Reset the maximum memory usage for a slab
5173 *
5174 * This routine resets the maximum memory usage for the slab @a slab to its
5175 * current usage.
5176 *
5177 * @param slab Address of the memory slab
5178 *
5179 * @retval 0 Success
5180 * @retval -EINVAL Memory slab is NULL
5181 */
5182 int k_mem_slab_runtime_stats_reset_max(struct k_mem_slab *slab);
5183
5184 /** @} */
5185
5186 /**
5187 * @addtogroup heap_apis
5188 * @{
5189 */
5190
5191 /* kernel synchronized heap struct */
5192
5193 struct k_heap {
5194 struct sys_heap heap;
5195 _wait_q_t wait_q;
5196 struct k_spinlock lock;
5197 };
5198
5199 /**
5200 * @brief Initialize a k_heap
5201 *
5202 * This constructs a synchronized k_heap object over a memory region
5203 * specified by the user. Note that while any alignment and size can
5204 * be passed as valid parameters, internal alignment restrictions
5205 * inside the inner sys_heap mean that not all bytes may be usable as
5206 * allocated memory.
5207 *
5208 * @param h Heap struct to initialize
5209 * @param mem Pointer to memory.
5210 * @param bytes Size of memory region, in bytes
5211 */
5212 void k_heap_init(struct k_heap *h, void *mem, size_t bytes);
5213
5214 /** @brief Allocate aligned memory from a k_heap
5215 *
5216 * Behaves in all ways like k_heap_alloc(), except that the returned
5217 * memory (if available) will have a starting address in memory which
5218 * is a multiple of the specified power-of-two alignment value in
5219 * bytes. The resulting memory can be returned to the heap using
5220 * k_heap_free().
5221 *
5222 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
5223 * @note When CONFIG_MULTITHREADING=n any @a timeout is treated as K_NO_WAIT.
5224 *
5225 * @funcprops \isr_ok
5226 *
5227 * @param h Heap from which to allocate
5228 * @param align Alignment in bytes, must be a power of two
5229 * @param bytes Number of bytes requested
5230 * @param timeout How long to wait, or K_NO_WAIT
5231 * @return Pointer to memory the caller can now use
5232 */
5233 void *k_heap_aligned_alloc(struct k_heap *h, size_t align, size_t bytes,
5234 k_timeout_t timeout);
5235
5236 /**
5237 * @brief Allocate memory from a k_heap
5238 *
5239 * Allocates and returns a memory buffer from the memory region owned
5240 * by the heap. If no memory is available immediately, the call will
5241 * block for the specified timeout (constructed via the standard
5242 * timeout API, or K_NO_WAIT or K_FOREVER) waiting for memory to be
5243 * freed. If the allocation cannot be performed by the expiration of
5244 * the timeout, NULL will be returned.
5245 * Allocated memory is aligned on a multiple of pointer sizes.
5246 *
5247 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
5248 * @note When CONFIG_MULTITHREADING=n any @a timeout is treated as K_NO_WAIT.
5249 *
5250 * @funcprops \isr_ok
5251 *
5252 * @param h Heap from which to allocate
5253 * @param bytes Desired size of block to allocate
5254 * @param timeout How long to wait, or K_NO_WAIT
5255 * @return A pointer to valid heap memory, or NULL
5256 */
5257 void *k_heap_alloc(struct k_heap *h, size_t bytes,
5258 k_timeout_t timeout);
5259
5260 /**
5261 * @brief Free memory allocated by k_heap_alloc()
5262 *
5263 * Returns the specified memory block, which must have been returned
5264 * from k_heap_alloc(), to the heap for use by other callers. Passing
5265 * a NULL block is legal, and has no effect.
5266 *
5267 * @param h Heap to which to return the memory
5268 * @param mem A valid memory block, or NULL
5269 */
5270 void k_heap_free(struct k_heap *h, void *mem);
5271
5272 /* Hand-calculated minimum heap sizes needed to return a successful
5273 * 1-byte allocation. See details in lib/os/heap.[ch]
5274 */
5275 #define Z_HEAP_MIN_SIZE (sizeof(void *) > 4 ? 56 : 44)
5276
5277 /**
5278 * @brief Define a static k_heap in the specified linker section
5279 *
5280 * This macro defines and initializes a static memory region and
5281 * k_heap of the requested size in the specified linker section.
5282 * After kernel start, &name can be used as if k_heap_init() had
5283 * been called.
5284 *
5285 * Note that this macro enforces a minimum size on the memory region
5286 * to accommodate metadata requirements. Very small heaps will be
5287 * padded to fit.
5288 *
5289 * @param name Symbol name for the struct k_heap object
5290 * @param bytes Size of memory region, in bytes
5291 * @param in_section __attribute__((section(name))
5292 */
5293 #define Z_HEAP_DEFINE_IN_SECT(name, bytes, in_section) \
5294 char in_section \
5295 __aligned(8) /* CHUNK_UNIT */ \
5296 kheap_##name[MAX(bytes, Z_HEAP_MIN_SIZE)]; \
5297 STRUCT_SECTION_ITERABLE(k_heap, name) = { \
5298 .heap = { \
5299 .init_mem = kheap_##name, \
5300 .init_bytes = MAX(bytes, Z_HEAP_MIN_SIZE), \
5301 }, \
5302 }
5303
5304 /**
5305 * @brief Define a static k_heap
5306 *
5307 * This macro defines and initializes a static memory region and
5308 * k_heap of the requested size. After kernel start, &name can be
5309 * used as if k_heap_init() had been called.
5310 *
5311 * Note that this macro enforces a minimum size on the memory region
5312 * to accommodate metadata requirements. Very small heaps will be
5313 * padded to fit.
5314 *
5315 * @param name Symbol name for the struct k_heap object
5316 * @param bytes Size of memory region, in bytes
5317 */
5318 #define K_HEAP_DEFINE(name, bytes) \
5319 Z_HEAP_DEFINE_IN_SECT(name, bytes, \
5320 __noinit_named(kheap_buf_##name))
5321
5322 /**
5323 * @brief Define a static k_heap in uncached memory
5324 *
5325 * This macro defines and initializes a static memory region and
5326 * k_heap of the requested size in uncached memory. After kernel
5327 * start, &name can be used as if k_heap_init() had been called.
5328 *
5329 * Note that this macro enforces a minimum size on the memory region
5330 * to accommodate metadata requirements. Very small heaps will be
5331 * padded to fit.
5332 *
5333 * @param name Symbol name for the struct k_heap object
5334 * @param bytes Size of memory region, in bytes
5335 */
5336 #define K_HEAP_DEFINE_NOCACHE(name, bytes) \
5337 Z_HEAP_DEFINE_IN_SECT(name, bytes, __nocache)
5338
5339 /**
5340 * @}
5341 */
5342
5343 /**
5344 * @defgroup heap_apis Heap APIs
5345 * @ingroup kernel_apis
5346 * @{
5347 */
5348
5349 /**
5350 * @brief Allocate memory from the heap with a specified alignment.
5351 *
5352 * This routine provides semantics similar to aligned_alloc(); memory is
5353 * allocated from the heap with a specified alignment. However, one minor
5354 * difference is that k_aligned_alloc() accepts any non-zero @p size,
5355 * whereas aligned_alloc() only accepts a @p size that is an integral
5356 * multiple of @p align.
5357 *
5358 * Above, aligned_alloc() refers to:
5359 * C11 standard (ISO/IEC 9899:2011): 7.22.3.1
5360 * The aligned_alloc function (p: 347-348)
5361 *
5362 * @param align Alignment of memory requested (in bytes).
5363 * @param size Amount of memory requested (in bytes).
5364 *
5365 * @return Address of the allocated memory if successful; otherwise NULL.
5366 */
5367 extern void *k_aligned_alloc(size_t align, size_t size);
5368
5369 /**
5370 * @brief Allocate memory from the heap.
5371 *
5372 * This routine provides traditional malloc() semantics. Memory is
5373 * allocated from the heap memory pool.
5374 * Allocated memory is aligned on a multiple of pointer sizes.
5375 *
5376 * @param size Amount of memory requested (in bytes).
5377 *
5378 * @return Address of the allocated memory if successful; otherwise NULL.
5379 */
5380 extern void *k_malloc(size_t size);
5381
5382 /**
5383 * @brief Free memory allocated from heap.
5384 *
5385 * This routine provides traditional free() semantics. The memory being
5386 * returned must have been allocated from the heap memory pool.
5387 *
5388 * If @a ptr is NULL, no operation is performed.
5389 *
5390 * @param ptr Pointer to previously allocated memory.
5391 */
5392 extern void k_free(void *ptr);
5393
5394 /**
5395 * @brief Allocate memory from heap, array style
5396 *
5397 * This routine provides traditional calloc() semantics. Memory is
5398 * allocated from the heap memory pool and zeroed.
5399 *
5400 * @param nmemb Number of elements in the requested array
5401 * @param size Size of each array element (in bytes).
5402 *
5403 * @return Address of the allocated memory if successful; otherwise NULL.
5404 */
5405 extern void *k_calloc(size_t nmemb, size_t size);
5406
5407 /** @} */
5408
5409 /* polling API - PRIVATE */
5410
5411 #ifdef CONFIG_POLL
5412 #define _INIT_OBJ_POLL_EVENT(obj) do { (obj)->poll_event = NULL; } while (false)
5413 #else
5414 #define _INIT_OBJ_POLL_EVENT(obj) do { } while (false)
5415 #endif
5416
5417 /* private - types bit positions */
5418 enum _poll_types_bits {
5419 /* can be used to ignore an event */
5420 _POLL_TYPE_IGNORE,
5421
5422 /* to be signaled by k_poll_signal_raise() */
5423 _POLL_TYPE_SIGNAL,
5424
5425 /* semaphore availability */
5426 _POLL_TYPE_SEM_AVAILABLE,
5427
5428 /* queue/FIFO/LIFO data availability */
5429 _POLL_TYPE_DATA_AVAILABLE,
5430
5431 /* msgq data availability */
5432 _POLL_TYPE_MSGQ_DATA_AVAILABLE,
5433
5434 /* pipe data availability */
5435 _POLL_TYPE_PIPE_DATA_AVAILABLE,
5436
5437 _POLL_NUM_TYPES
5438 };
5439
5440 #define Z_POLL_TYPE_BIT(type) (1U << ((type) - 1U))
5441
5442 /* private - states bit positions */
5443 enum _poll_states_bits {
5444 /* default state when creating event */
5445 _POLL_STATE_NOT_READY,
5446
5447 /* signaled by k_poll_signal_raise() */
5448 _POLL_STATE_SIGNALED,
5449
5450 /* semaphore is available */
5451 _POLL_STATE_SEM_AVAILABLE,
5452
5453 /* data is available to read on queue/FIFO/LIFO */
5454 _POLL_STATE_DATA_AVAILABLE,
5455
5456 /* queue/FIFO/LIFO wait was cancelled */
5457 _POLL_STATE_CANCELLED,
5458
5459 /* data is available to read on a message queue */
5460 _POLL_STATE_MSGQ_DATA_AVAILABLE,
5461
5462 /* data is available to read from a pipe */
5463 _POLL_STATE_PIPE_DATA_AVAILABLE,
5464
5465 _POLL_NUM_STATES
5466 };
5467
5468 #define Z_POLL_STATE_BIT(state) (1U << ((state) - 1U))
5469
5470 #define _POLL_EVENT_NUM_UNUSED_BITS \
5471 (32 - (0 \
5472 + 8 /* tag */ \
5473 + _POLL_NUM_TYPES \
5474 + _POLL_NUM_STATES \
5475 + 1 /* modes */ \
5476 ))
5477
5478 /* end of polling API - PRIVATE */
5479
5480
5481 /**
5482 * @defgroup poll_apis Async polling APIs
5483 * @ingroup kernel_apis
5484 * @{
5485 */
5486
5487 /* Public polling API */
5488
5489 /* public - values for k_poll_event.type bitfield */
5490 #define K_POLL_TYPE_IGNORE 0
5491 #define K_POLL_TYPE_SIGNAL Z_POLL_TYPE_BIT(_POLL_TYPE_SIGNAL)
5492 #define K_POLL_TYPE_SEM_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_SEM_AVAILABLE)
5493 #define K_POLL_TYPE_DATA_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_DATA_AVAILABLE)
5494 #define K_POLL_TYPE_FIFO_DATA_AVAILABLE K_POLL_TYPE_DATA_AVAILABLE
5495 #define K_POLL_TYPE_MSGQ_DATA_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_MSGQ_DATA_AVAILABLE)
5496 #define K_POLL_TYPE_PIPE_DATA_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_PIPE_DATA_AVAILABLE)
5497
5498 /* public - polling modes */
5499 enum k_poll_modes {
5500 /* polling thread does not take ownership of objects when available */
5501 K_POLL_MODE_NOTIFY_ONLY = 0,
5502
5503 K_POLL_NUM_MODES
5504 };
5505
5506 /* public - values for k_poll_event.state bitfield */
5507 #define K_POLL_STATE_NOT_READY 0
5508 #define K_POLL_STATE_SIGNALED Z_POLL_STATE_BIT(_POLL_STATE_SIGNALED)
5509 #define K_POLL_STATE_SEM_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_SEM_AVAILABLE)
5510 #define K_POLL_STATE_DATA_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_DATA_AVAILABLE)
5511 #define K_POLL_STATE_FIFO_DATA_AVAILABLE K_POLL_STATE_DATA_AVAILABLE
5512 #define K_POLL_STATE_MSGQ_DATA_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_MSGQ_DATA_AVAILABLE)
5513 #define K_POLL_STATE_PIPE_DATA_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_PIPE_DATA_AVAILABLE)
5514 #define K_POLL_STATE_CANCELLED Z_POLL_STATE_BIT(_POLL_STATE_CANCELLED)
5515
5516 /* public - poll signal object */
5517 struct k_poll_signal {
5518 /** PRIVATE - DO NOT TOUCH */
5519 sys_dlist_t poll_events;
5520
5521 /**
5522 * 1 if the event has been signaled, 0 otherwise. Stays set to 1 until
5523 * user resets it to 0.
5524 */
5525 unsigned int signaled;
5526
5527 /** custom result value passed to k_poll_signal_raise() if needed */
5528 int result;
5529 };
5530
5531 #define K_POLL_SIGNAL_INITIALIZER(obj) \
5532 { \
5533 .poll_events = SYS_DLIST_STATIC_INIT(&obj.poll_events), \
5534 .signaled = 0, \
5535 .result = 0, \
5536 }
5537 /**
5538 * @brief Poll Event
5539 *
5540 */
5541 struct k_poll_event {
5542 /** PRIVATE - DO NOT TOUCH */
5543 sys_dnode_t _node;
5544
5545 /** PRIVATE - DO NOT TOUCH */
5546 struct z_poller *poller;
5547
5548 /** optional user-specified tag, opaque, untouched by the API */
5549 uint32_t tag:8;
5550
5551 /** bitfield of event types (bitwise-ORed K_POLL_TYPE_xxx values) */
5552 uint32_t type:_POLL_NUM_TYPES;
5553
5554 /** bitfield of event states (bitwise-ORed K_POLL_STATE_xxx values) */
5555 uint32_t state:_POLL_NUM_STATES;
5556
5557 /** mode of operation, from enum k_poll_modes */
5558 uint32_t mode:1;
5559
5560 /** unused bits in 32-bit word */
5561 uint32_t unused:_POLL_EVENT_NUM_UNUSED_BITS;
5562
5563 /** per-type data */
5564 union {
5565 void *obj;
5566 struct k_poll_signal *signal;
5567 struct k_sem *sem;
5568 struct k_fifo *fifo;
5569 struct k_queue *queue;
5570 struct k_msgq *msgq;
5571 #ifdef CONFIG_PIPES
5572 struct k_pipe *pipe;
5573 #endif
5574 };
5575 };
5576
5577 #define K_POLL_EVENT_INITIALIZER(_event_type, _event_mode, _event_obj) \
5578 { \
5579 .poller = NULL, \
5580 .type = _event_type, \
5581 .state = K_POLL_STATE_NOT_READY, \
5582 .mode = _event_mode, \
5583 .unused = 0, \
5584 { \
5585 .obj = _event_obj, \
5586 }, \
5587 }
5588
5589 #define K_POLL_EVENT_STATIC_INITIALIZER(_event_type, _event_mode, _event_obj, \
5590 event_tag) \
5591 { \
5592 .tag = event_tag, \
5593 .type = _event_type, \
5594 .state = K_POLL_STATE_NOT_READY, \
5595 .mode = _event_mode, \
5596 .unused = 0, \
5597 { \
5598 .obj = _event_obj, \
5599 }, \
5600 }
5601
5602 /**
5603 * @brief Initialize one struct k_poll_event instance
5604 *
5605 * After this routine is called on a poll event, the event it ready to be
5606 * placed in an event array to be passed to k_poll().
5607 *
5608 * @param event The event to initialize.
5609 * @param type A bitfield of the types of event, from the K_POLL_TYPE_xxx
5610 * values. Only values that apply to the same object being polled
5611 * can be used together. Choosing K_POLL_TYPE_IGNORE disables the
5612 * event.
5613 * @param mode Future. Use K_POLL_MODE_NOTIFY_ONLY.
5614 * @param obj Kernel object or poll signal.
5615 */
5616
5617 extern void k_poll_event_init(struct k_poll_event *event, uint32_t type,
5618 int mode, void *obj);
5619
5620 /**
5621 * @brief Wait for one or many of multiple poll events to occur
5622 *
5623 * This routine allows a thread to wait concurrently for one or many of
5624 * multiple poll events to have occurred. Such events can be a kernel object
5625 * being available, like a semaphore, or a poll signal event.
5626 *
5627 * When an event notifies that a kernel object is available, the kernel object
5628 * is not "given" to the thread calling k_poll(): it merely signals the fact
5629 * that the object was available when the k_poll() call was in effect. Also,
5630 * all threads trying to acquire an object the regular way, i.e. by pending on
5631 * the object, have precedence over the thread polling on the object. This
5632 * means that the polling thread will never get the poll event on an object
5633 * until the object becomes available and its pend queue is empty. For this
5634 * reason, the k_poll() call is more effective when the objects being polled
5635 * only have one thread, the polling thread, trying to acquire them.
5636 *
5637 * When k_poll() returns 0, the caller should loop on all the events that were
5638 * passed to k_poll() and check the state field for the values that were
5639 * expected and take the associated actions.
5640 *
5641 * Before being reused for another call to k_poll(), the user has to reset the
5642 * state field to K_POLL_STATE_NOT_READY.
5643 *
5644 * When called from user mode, a temporary memory allocation is required from
5645 * the caller's resource pool.
5646 *
5647 * @param events An array of events to be polled for.
5648 * @param num_events The number of events in the array.
5649 * @param timeout Waiting period for an event to be ready,
5650 * or one of the special values K_NO_WAIT and K_FOREVER.
5651 *
5652 * @retval 0 One or more events are ready.
5653 * @retval -EAGAIN Waiting period timed out.
5654 * @retval -EINTR Polling has been interrupted, e.g. with
5655 * k_queue_cancel_wait(). All output events are still set and valid,
5656 * cancelled event(s) will be set to K_POLL_STATE_CANCELLED. In other
5657 * words, -EINTR status means that at least one of output events is
5658 * K_POLL_STATE_CANCELLED.
5659 * @retval -ENOMEM Thread resource pool insufficient memory (user mode only)
5660 * @retval -EINVAL Bad parameters (user mode only)
5661 */
5662
5663 __syscall int k_poll(struct k_poll_event *events, int num_events,
5664 k_timeout_t timeout);
5665
5666 /**
5667 * @brief Initialize a poll signal object.
5668 *
5669 * Ready a poll signal object to be signaled via k_poll_signal_raise().
5670 *
5671 * @param sig A poll signal.
5672 */
5673
5674 __syscall void k_poll_signal_init(struct k_poll_signal *sig);
5675
5676 /*
5677 * @brief Reset a poll signal object's state to unsignaled.
5678 *
5679 * @param sig A poll signal object
5680 */
5681 __syscall void k_poll_signal_reset(struct k_poll_signal *sig);
5682
5683 /**
5684 * @brief Fetch the signaled state and result value of a poll signal
5685 *
5686 * @param sig A poll signal object
5687 * @param signaled An integer buffer which will be written nonzero if the
5688 * object was signaled
5689 * @param result An integer destination buffer which will be written with the
5690 * result value if the object was signaled, or an undefined
5691 * value if it was not.
5692 */
5693 __syscall void k_poll_signal_check(struct k_poll_signal *sig,
5694 unsigned int *signaled, int *result);
5695
5696 /**
5697 * @brief Signal a poll signal object.
5698 *
5699 * This routine makes ready a poll signal, which is basically a poll event of
5700 * type K_POLL_TYPE_SIGNAL. If a thread was polling on that event, it will be
5701 * made ready to run. A @a result value can be specified.
5702 *
5703 * The poll signal contains a 'signaled' field that, when set by
5704 * k_poll_signal_raise(), stays set until the user sets it back to 0 with
5705 * k_poll_signal_reset(). It thus has to be reset by the user before being
5706 * passed again to k_poll() or k_poll() will consider it being signaled, and
5707 * will return immediately.
5708 *
5709 * @note The result is stored and the 'signaled' field is set even if
5710 * this function returns an error indicating that an expiring poll was
5711 * not notified. The next k_poll() will detect the missed raise.
5712 *
5713 * @param sig A poll signal.
5714 * @param result The value to store in the result field of the signal.
5715 *
5716 * @retval 0 The signal was delivered successfully.
5717 * @retval -EAGAIN The polling thread's timeout is in the process of expiring.
5718 */
5719
5720 __syscall int k_poll_signal_raise(struct k_poll_signal *sig, int result);
5721
5722 /**
5723 * @internal
5724 */
5725 extern void z_handle_obj_poll_events(sys_dlist_t *events, uint32_t state);
5726
5727 /** @} */
5728
5729 /**
5730 * @defgroup cpu_idle_apis CPU Idling APIs
5731 * @ingroup kernel_apis
5732 * @{
5733 */
5734 /**
5735 * @brief Make the CPU idle.
5736 *
5737 * This function makes the CPU idle until an event wakes it up.
5738 *
5739 * In a regular system, the idle thread should be the only thread responsible
5740 * for making the CPU idle and triggering any type of power management.
5741 * However, in some more constrained systems, such as a single-threaded system,
5742 * the only thread would be responsible for this if needed.
5743 *
5744 * @note In some architectures, before returning, the function unmasks interrupts
5745 * unconditionally.
5746 */
k_cpu_idle(void)5747 static inline void k_cpu_idle(void)
5748 {
5749 arch_cpu_idle();
5750 }
5751
5752 /**
5753 * @brief Make the CPU idle in an atomic fashion.
5754 *
5755 * Similar to k_cpu_idle(), but must be called with interrupts locked.
5756 *
5757 * Enabling interrupts and entering a low-power mode will be atomic,
5758 * i.e. there will be no period of time where interrupts are enabled before
5759 * the processor enters a low-power mode.
5760 *
5761 * After waking up from the low-power mode, the interrupt lockout state will
5762 * be restored as if by irq_unlock(key).
5763 *
5764 * @param key Interrupt locking key obtained from irq_lock().
5765 */
k_cpu_atomic_idle(unsigned int key)5766 static inline void k_cpu_atomic_idle(unsigned int key)
5767 {
5768 arch_cpu_atomic_idle(key);
5769 }
5770
5771 /**
5772 * @}
5773 */
5774
5775 /**
5776 * @internal
5777 */
5778 #ifdef ARCH_EXCEPT
5779 /* This architecture has direct support for triggering a CPU exception */
5780 #define z_except_reason(reason) ARCH_EXCEPT(reason)
5781 #else
5782
5783 #if !defined(CONFIG_ASSERT_NO_FILE_INFO)
5784 #define __EXCEPT_LOC() __ASSERT_PRINT("@ %s:%d\n", __FILE__, __LINE__)
5785 #else
5786 #define __EXCEPT_LOC()
5787 #endif
5788
5789 /* NOTE: This is the implementation for arches that do not implement
5790 * ARCH_EXCEPT() to generate a real CPU exception.
5791 *
5792 * We won't have a real exception frame to determine the PC value when
5793 * the oops occurred, so print file and line number before we jump into
5794 * the fatal error handler.
5795 */
5796 #define z_except_reason(reason) do { \
5797 __EXCEPT_LOC(); \
5798 z_fatal_error(reason, NULL); \
5799 } while (false)
5800
5801 #endif /* _ARCH__EXCEPT */
5802
5803 /**
5804 * @brief Fatally terminate a thread
5805 *
5806 * This should be called when a thread has encountered an unrecoverable
5807 * runtime condition and needs to terminate. What this ultimately
5808 * means is determined by the _fatal_error_handler() implementation, which
5809 * will be called will reason code K_ERR_KERNEL_OOPS.
5810 *
5811 * If this is called from ISR context, the default system fatal error handler
5812 * will treat it as an unrecoverable system error, just like k_panic().
5813 */
5814 #define k_oops() z_except_reason(K_ERR_KERNEL_OOPS)
5815
5816 /**
5817 * @brief Fatally terminate the system
5818 *
5819 * This should be called when the Zephyr kernel has encountered an
5820 * unrecoverable runtime condition and needs to terminate. What this ultimately
5821 * means is determined by the _fatal_error_handler() implementation, which
5822 * will be called will reason code K_ERR_KERNEL_PANIC.
5823 */
5824 #define k_panic() z_except_reason(K_ERR_KERNEL_PANIC)
5825
5826 /*
5827 * private APIs that are utilized by one or more public APIs
5828 */
5829
5830 /**
5831 * @internal
5832 */
5833 extern void z_init_thread_base(struct _thread_base *thread_base,
5834 int priority, uint32_t initial_state,
5835 unsigned int options);
5836
5837 #ifdef CONFIG_MULTITHREADING
5838 /**
5839 * @internal
5840 */
5841 extern void z_init_static_threads(void);
5842 #else
5843 /**
5844 * @internal
5845 */
5846 #define z_init_static_threads() do { } while (false)
5847 #endif
5848
5849 /**
5850 * @internal
5851 */
5852 extern bool z_is_thread_essential(void);
5853
5854 #ifdef CONFIG_SMP
5855 void z_smp_thread_init(void *arg, struct k_thread *thread);
5856 void z_smp_thread_swap(void);
5857 #endif
5858
5859 /**
5860 * @internal
5861 */
5862 extern void z_timer_expiration_handler(struct _timeout *t);
5863
5864 #ifdef CONFIG_PRINTK
5865 /**
5866 * @brief Emit a character buffer to the console device
5867 *
5868 * @param c String of characters to print
5869 * @param n The length of the string
5870 *
5871 */
5872 __syscall void k_str_out(char *c, size_t n);
5873 #endif
5874
5875 /**
5876 * @brief Disable preservation of floating point context information.
5877 *
5878 * This routine informs the kernel that the specified thread
5879 * will no longer be using the floating point registers.
5880 *
5881 * @warning
5882 * Some architectures apply restrictions on how the disabling of floating
5883 * point preservation may be requested, see arch_float_disable.
5884 *
5885 * @warning
5886 * This routine should only be used to disable floating point support for
5887 * a thread that currently has such support enabled.
5888 *
5889 * @param thread ID of thread.
5890 *
5891 * @retval 0 On success.
5892 * @retval -ENOTSUP If the floating point disabling is not implemented.
5893 * -EINVAL If the floating point disabling could not be performed.
5894 */
5895 __syscall int k_float_disable(struct k_thread *thread);
5896
5897 /**
5898 * @brief Enable preservation of floating point context information.
5899 *
5900 * This routine informs the kernel that the specified thread
5901 * will use the floating point registers.
5902
5903 * Invoking this routine initializes the thread's floating point context info
5904 * to that of an FPU that has been reset. The next time the thread is scheduled
5905 * by z_swap() it will either inherit an FPU that is guaranteed to be in a
5906 * "sane" state (if the most recent user of the FPU was cooperatively swapped
5907 * out) or the thread's own floating point context will be loaded (if the most
5908 * recent user of the FPU was preempted, or if this thread is the first user
5909 * of the FPU). Thereafter, the kernel will protect the thread's FP context
5910 * so that it is not altered during a preemptive context switch.
5911 *
5912 * The @a options parameter indicates which floating point register sets will
5913 * be used by the specified thread.
5914 *
5915 * For x86 options:
5916 *
5917 * - K_FP_REGS indicates x87 FPU and MMX registers only
5918 * - K_SSE_REGS indicates SSE registers (and also x87 FPU and MMX registers)
5919 *
5920 * @warning
5921 * Some architectures apply restrictions on how the enabling of floating
5922 * point preservation may be requested, see arch_float_enable.
5923 *
5924 * @warning
5925 * This routine should only be used to enable floating point support for
5926 * a thread that currently has such support enabled.
5927 *
5928 * @param thread ID of thread.
5929 * @param options architecture dependent options
5930 *
5931 * @retval 0 On success.
5932 * @retval -ENOTSUP If the floating point enabling is not implemented.
5933 * -EINVAL If the floating point enabling could not be performed.
5934 */
5935 __syscall int k_float_enable(struct k_thread *thread, unsigned int options);
5936
5937 /**
5938 * @brief Get the runtime statistics of a thread
5939 *
5940 * @param thread ID of thread.
5941 * @param stats Pointer to struct to copy statistics into.
5942 * @return -EINVAL if null pointers, otherwise 0
5943 */
5944 int k_thread_runtime_stats_get(k_tid_t thread,
5945 k_thread_runtime_stats_t *stats);
5946
5947 /**
5948 * @brief Get the runtime statistics of all threads
5949 *
5950 * @param stats Pointer to struct to copy statistics into.
5951 * @return -EINVAL if null pointers, otherwise 0
5952 */
5953 int k_thread_runtime_stats_all_get(k_thread_runtime_stats_t *stats);
5954
5955 /**
5956 * @brief Enable gathering of runtime statistics for specified thread
5957 *
5958 * This routine enables the gathering of runtime statistics for the specified
5959 * thread.
5960 *
5961 * @param thread ID of thread
5962 * @return -EINVAL if invalid thread ID, otherwise 0
5963 */
5964 extern int k_thread_runtime_stats_enable(k_tid_t thread);
5965
5966 /**
5967 * @brief Disable gathering of runtime statistics for specified thread
5968 *
5969 * This routine disables the gathering of runtime statistics for the specified
5970 * thread.
5971 *
5972 * @param thread ID of thread
5973 * @return -EINVAL if invalid thread ID, otherwise 0
5974 */
5975 extern int k_thread_runtime_stats_disable(k_tid_t thread);
5976
5977 /**
5978 * @brief Enable gathering of system runtime statistics
5979 *
5980 * This routine enables the gathering of system runtime statistics. Note that
5981 * it does not affect the gathering of similar statistics for individual
5982 * threads.
5983 */
5984 extern void k_sys_runtime_stats_enable(void);
5985
5986 /**
5987 * @brief Disable gathering of system runtime statistics
5988 *
5989 * This routine disables the gathering of system runtime statistics. Note that
5990 * it does not affect the gathering of similar statistics for individual
5991 * threads.
5992 */
5993 extern void k_sys_runtime_stats_disable(void);
5994
5995 #ifdef __cplusplus
5996 }
5997 #endif
5998
5999 #include <zephyr/tracing/tracing.h>
6000 #include <syscalls/kernel.h>
6001
6002 #endif /* !_ASMLANGUAGE */
6003
6004 #endif /* ZEPHYR_INCLUDE_KERNEL_H_ */
6005