1 /*
2 * Copyright (c) 2016, Wind River Systems, Inc.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 /**
8 * @file
9 *
10 * @brief Public kernel APIs.
11 */
12
13 #ifndef ZEPHYR_INCLUDE_KERNEL_H_
14 #define ZEPHYR_INCLUDE_KERNEL_H_
15
16 #if !defined(_ASMLANGUAGE)
17 #include <zephyr/kernel_includes.h>
18 #include <errno.h>
19 #include <limits.h>
20 #include <stdbool.h>
21 #include <zephyr/toolchain.h>
22 #include <zephyr/tracing/tracing_macros.h>
23 #include <zephyr/sys/mem_stats.h>
24 #include <zephyr/sys/iterable_sections.h>
25
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29
30 /*
31 * Zephyr currently assumes the size of a couple standard types to simplify
32 * print string formats. Let's make sure this doesn't change without notice.
33 */
34 BUILD_ASSERT(sizeof(int32_t) == sizeof(int));
35 BUILD_ASSERT(sizeof(int64_t) == sizeof(long long));
36 BUILD_ASSERT(sizeof(intptr_t) == sizeof(long));
37
38 /**
39 * @brief Kernel APIs
40 * @defgroup kernel_apis Kernel APIs
41 * @{
42 * @}
43 */
44
45 #define K_ANY NULL
46
47 #if CONFIG_NUM_COOP_PRIORITIES + CONFIG_NUM_PREEMPT_PRIORITIES == 0
48 #error Zero available thread priorities defined!
49 #endif
50
51 #define K_PRIO_COOP(x) (-(CONFIG_NUM_COOP_PRIORITIES - (x)))
52 #define K_PRIO_PREEMPT(x) (x)
53
54 #define K_HIGHEST_THREAD_PRIO (-CONFIG_NUM_COOP_PRIORITIES)
55 #define K_LOWEST_THREAD_PRIO CONFIG_NUM_PREEMPT_PRIORITIES
56 #define K_IDLE_PRIO K_LOWEST_THREAD_PRIO
57 #define K_HIGHEST_APPLICATION_THREAD_PRIO (K_HIGHEST_THREAD_PRIO)
58 #define K_LOWEST_APPLICATION_THREAD_PRIO (K_LOWEST_THREAD_PRIO - 1)
59
60 #ifdef CONFIG_POLL
61 #define Z_POLL_EVENT_OBJ_INIT(obj) \
62 .poll_events = SYS_DLIST_STATIC_INIT(&obj.poll_events),
63 #define Z_DECL_POLL_EVENT sys_dlist_t poll_events;
64 #else
65 #define Z_POLL_EVENT_OBJ_INIT(obj)
66 #define Z_DECL_POLL_EVENT
67 #endif
68
69 struct k_thread;
70 struct k_mutex;
71 struct k_sem;
72 struct k_msgq;
73 struct k_mbox;
74 struct k_pipe;
75 struct k_queue;
76 struct k_fifo;
77 struct k_lifo;
78 struct k_stack;
79 struct k_mem_slab;
80 struct k_timer;
81 struct k_poll_event;
82 struct k_poll_signal;
83 struct k_mem_domain;
84 struct k_mem_partition;
85 struct k_futex;
86 struct k_event;
87
88 enum execution_context_types {
89 K_ISR = 0,
90 K_COOP_THREAD,
91 K_PREEMPT_THREAD,
92 };
93
94 /* private, used by k_poll and k_work_poll */
95 struct k_work_poll;
96 typedef int (*_poller_cb_t)(struct k_poll_event *event, uint32_t state);
97
98 /**
99 * @addtogroup thread_apis
100 * @{
101 */
102
103 typedef void (*k_thread_user_cb_t)(const struct k_thread *thread,
104 void *user_data);
105
106 /**
107 * @brief Iterate over all the threads in the system.
108 *
109 * This routine iterates over all the threads in the system and
110 * calls the user_cb function for each thread.
111 *
112 * @param user_cb Pointer to the user callback function.
113 * @param user_data Pointer to user data.
114 *
115 * @note @kconfig{CONFIG_THREAD_MONITOR} must be set for this function
116 * to be effective.
117 * @note This API uses @ref k_spin_lock to protect the _kernel.threads
118 * list which means creation of new threads and terminations of existing
119 * threads are blocked until this API returns.
120 */
121 void k_thread_foreach(k_thread_user_cb_t user_cb, void *user_data);
122
123 /**
124 * @brief Iterate over all the threads in the system without locking.
125 *
126 * This routine works exactly the same like @ref k_thread_foreach
127 * but unlocks interrupts when user_cb is executed.
128 *
129 * @param user_cb Pointer to the user callback function.
130 * @param user_data Pointer to user data.
131 *
132 * @note @kconfig{CONFIG_THREAD_MONITOR} must be set for this function
133 * to be effective.
134 * @note This API uses @ref k_spin_lock only when accessing the _kernel.threads
135 * queue elements. It unlocks it during user callback function processing.
136 * If a new task is created when this @c foreach function is in progress,
137 * the added new task would not be included in the enumeration.
138 * If a task is aborted during this enumeration, there would be a race here
139 * and there is a possibility that this aborted task would be included in the
140 * enumeration.
141 * @note If the task is aborted and the memory occupied by its @c k_thread
142 * structure is reused when this @c k_thread_foreach_unlocked is in progress
143 * it might even lead to the system behave unstable.
144 * This function may never return, as it would follow some @c next task
145 * pointers treating given pointer as a pointer to the k_thread structure
146 * while it is something different right now.
147 * Do not reuse the memory that was occupied by k_thread structure of aborted
148 * task if it was aborted after this function was called in any context.
149 */
150 void k_thread_foreach_unlocked(
151 k_thread_user_cb_t user_cb, void *user_data);
152
153 /** @} */
154
155 /**
156 * @defgroup thread_apis Thread APIs
157 * @ingroup kernel_apis
158 * @{
159 */
160
161 #endif /* !_ASMLANGUAGE */
162
163
164 /*
165 * Thread user options. May be needed by assembly code. Common part uses low
166 * bits, arch-specific use high bits.
167 */
168
169 /**
170 * @brief system thread that must not abort
171 * */
172 #define K_ESSENTIAL (BIT(0))
173
174 /**
175 * @brief FPU registers are managed by context switch
176 *
177 * @details
178 * This option indicates that the thread uses the CPU's floating point
179 * registers. This instructs the kernel to take additional steps to save
180 * and restore the contents of these registers when scheduling the thread.
181 * No effect if @kconfig{CONFIG_FPU_SHARING} is not enabled.
182 */
183 #define K_FP_IDX 1
184 #define K_FP_REGS (BIT(K_FP_IDX))
185
186 /**
187 * @brief user mode thread
188 *
189 * This thread has dropped from supervisor mode to user mode and consequently
190 * has additional restrictions
191 */
192 #define K_USER (BIT(2))
193
194 /**
195 * @brief Inherit Permissions
196 *
197 * @details
198 * Indicates that the thread being created should inherit all kernel object
199 * permissions from the thread that created it. No effect if
200 * @kconfig{CONFIG_USERSPACE} is not enabled.
201 */
202 #define K_INHERIT_PERMS (BIT(3))
203
204 /**
205 * @brief Callback item state
206 *
207 * @details
208 * This is a single bit of state reserved for "callback manager"
209 * utilities (p4wq initially) who need to track operations invoked
210 * from within a user-provided callback they have been invoked.
211 * Effectively it serves as a tiny bit of zero-overhead TLS data.
212 */
213 #define K_CALLBACK_STATE (BIT(4))
214
215 /**
216 * @brief DSP registers are managed by context switch
217 *
218 * @details
219 * This option indicates that the thread uses the CPU's DSP registers.
220 * This instructs the kernel to take additional steps to save and
221 * restore the contents of these registers when scheduling the thread.
222 * No effect if @kconfig{CONFIG_DSP_SHARING} is not enabled.
223 */
224 #define K_DSP_IDX 6
225 #define K_DSP_REGS (BIT(K_DSP_IDX))
226
227 /**
228 * @brief AGU registers are managed by context switch
229 *
230 * @details
231 * This option indicates that the thread uses the ARC processor's XY
232 * memory and DSP feature. Often used with @kconfig{CONFIG_ARC_AGU_SHARING}.
233 * No effect if @kconfig{CONFIG_ARC_AGU_SHARING} is not enabled.
234 */
235 #define K_AGU_IDX 7
236 #define K_AGU_REGS (BIT(K_AGU_IDX))
237
238 /**
239 * @brief FP and SSE registers are managed by context switch on x86
240 *
241 * @details
242 * This option indicates that the thread uses the x86 CPU's floating point
243 * and SSE registers. This instructs the kernel to take additional steps to
244 * save and restore the contents of these registers when scheduling
245 * the thread. No effect if @kconfig{CONFIG_X86_SSE} is not enabled.
246 */
247 #define K_SSE_REGS (BIT(7))
248
249 /* end - thread options */
250
251 #if !defined(_ASMLANGUAGE)
252 /**
253 * @brief Dynamically allocate a thread stack.
254 *
255 * Relevant stack creation flags include:
256 * - @ref K_USER allocate a userspace thread (requires `CONFIG_USERSPACE=y`)
257 *
258 * @param size Stack size in bytes.
259 * @param flags Stack creation flags, or 0.
260 *
261 * @retval the allocated thread stack on success.
262 * @retval NULL on failure.
263 *
264 * @see CONFIG_DYNAMIC_THREAD
265 */
266 __syscall k_thread_stack_t *k_thread_stack_alloc(size_t size, int flags);
267
268 /**
269 * @brief Free a dynamically allocated thread stack.
270 *
271 * @param stack Pointer to the thread stack.
272 *
273 * @retval 0 on success.
274 * @retval -EBUSY if the thread stack is in use.
275 * @retval -EINVAL if @p stack is invalid.
276 * @retval -ENOSYS if dynamic thread stack allocation is disabled
277 *
278 * @see CONFIG_DYNAMIC_THREAD
279 */
280 __syscall int k_thread_stack_free(k_thread_stack_t *stack);
281
282 /**
283 * @brief Create a thread.
284 *
285 * This routine initializes a thread, then schedules it for execution.
286 *
287 * The new thread may be scheduled for immediate execution or a delayed start.
288 * If the newly spawned thread does not have a delayed start the kernel
289 * scheduler may preempt the current thread to allow the new thread to
290 * execute.
291 *
292 * Thread options are architecture-specific, and can include K_ESSENTIAL,
293 * K_FP_REGS, and K_SSE_REGS. Multiple options may be specified by separating
294 * them using "|" (the logical OR operator).
295 *
296 * Stack objects passed to this function must be originally defined with
297 * either of these macros in order to be portable:
298 *
299 * - K_THREAD_STACK_DEFINE() - For stacks that may support either user or
300 * supervisor threads.
301 * - K_KERNEL_STACK_DEFINE() - For stacks that may support supervisor
302 * threads only. These stacks use less memory if CONFIG_USERSPACE is
303 * enabled.
304 *
305 * The stack_size parameter has constraints. It must either be:
306 *
307 * - The original size value passed to K_THREAD_STACK_DEFINE() or
308 * K_KERNEL_STACK_DEFINE()
309 * - The return value of K_THREAD_STACK_SIZEOF(stack) if the stack was
310 * defined with K_THREAD_STACK_DEFINE()
311 * - The return value of K_KERNEL_STACK_SIZEOF(stack) if the stack was
312 * defined with K_KERNEL_STACK_DEFINE().
313 *
314 * Using other values, or sizeof(stack) may produce undefined behavior.
315 *
316 * @param new_thread Pointer to uninitialized struct k_thread
317 * @param stack Pointer to the stack space.
318 * @param stack_size Stack size in bytes.
319 * @param entry Thread entry function.
320 * @param p1 1st entry point parameter.
321 * @param p2 2nd entry point parameter.
322 * @param p3 3rd entry point parameter.
323 * @param prio Thread priority.
324 * @param options Thread options.
325 * @param delay Scheduling delay, or K_NO_WAIT (for no delay).
326 *
327 * @return ID of new thread.
328 *
329 */
330 __syscall k_tid_t k_thread_create(struct k_thread *new_thread,
331 k_thread_stack_t *stack,
332 size_t stack_size,
333 k_thread_entry_t entry,
334 void *p1, void *p2, void *p3,
335 int prio, uint32_t options, k_timeout_t delay);
336
337 /**
338 * @brief Drop a thread's privileges permanently to user mode
339 *
340 * This allows a supervisor thread to be re-used as a user thread.
341 * This function does not return, but control will transfer to the provided
342 * entry point as if this was a new user thread.
343 *
344 * The implementation ensures that the stack buffer contents are erased.
345 * Any thread-local storage will be reverted to a pristine state.
346 *
347 * Memory domain membership, resource pool assignment, kernel object
348 * permissions, priority, and thread options are preserved.
349 *
350 * A common use of this function is to re-use the main thread as a user thread
351 * once all supervisor mode-only tasks have been completed.
352 *
353 * @param entry Function to start executing from
354 * @param p1 1st entry point parameter
355 * @param p2 2nd entry point parameter
356 * @param p3 3rd entry point parameter
357 */
358 FUNC_NORETURN void k_thread_user_mode_enter(k_thread_entry_t entry,
359 void *p1, void *p2,
360 void *p3);
361
362 /**
363 * @brief Grant a thread access to a set of kernel objects
364 *
365 * This is a convenience function. For the provided thread, grant access to
366 * the remaining arguments, which must be pointers to kernel objects.
367 *
368 * The thread object must be initialized (i.e. running). The objects don't
369 * need to be.
370 * Note that NULL shouldn't be passed as an argument.
371 *
372 * @param thread Thread to grant access to objects
373 * @param ... list of kernel object pointers
374 */
375 #define k_thread_access_grant(thread, ...) \
376 FOR_EACH_FIXED_ARG(k_object_access_grant, (;), thread, __VA_ARGS__)
377
378 /**
379 * @brief Assign a resource memory pool to a thread
380 *
381 * By default, threads have no resource pool assigned unless their parent
382 * thread has a resource pool, in which case it is inherited. Multiple
383 * threads may be assigned to the same memory pool.
384 *
385 * Changing a thread's resource pool will not migrate allocations from the
386 * previous pool.
387 *
388 * @param thread Target thread to assign a memory pool for resource requests.
389 * @param heap Heap object to use for resources,
390 * or NULL if the thread should no longer have a memory pool.
391 */
k_thread_heap_assign(struct k_thread * thread,struct k_heap * heap)392 static inline void k_thread_heap_assign(struct k_thread *thread,
393 struct k_heap *heap)
394 {
395 thread->resource_pool = heap;
396 }
397
398 #if defined(CONFIG_INIT_STACKS) && defined(CONFIG_THREAD_STACK_INFO)
399 /**
400 * @brief Obtain stack usage information for the specified thread
401 *
402 * User threads will need to have permission on the target thread object.
403 *
404 * Some hardware may prevent inspection of a stack buffer currently in use.
405 * If this API is called from supervisor mode, on the currently running thread,
406 * on a platform which selects @kconfig{CONFIG_NO_UNUSED_STACK_INSPECTION}, an
407 * error will be generated.
408 *
409 * @param thread Thread to inspect stack information
410 * @param unused_ptr Output parameter, filled in with the unused stack space
411 * of the target thread in bytes.
412 * @return 0 on success
413 * @return -EBADF Bad thread object (user mode only)
414 * @return -EPERM No permissions on thread object (user mode only)
415 * #return -ENOTSUP Forbidden by hardware policy
416 * @return -EINVAL Thread is uninitialized or exited (user mode only)
417 * @return -EFAULT Bad memory address for unused_ptr (user mode only)
418 */
419 __syscall int k_thread_stack_space_get(const struct k_thread *thread,
420 size_t *unused_ptr);
421 #endif
422
423 #if (K_HEAP_MEM_POOL_SIZE > 0)
424 /**
425 * @brief Assign the system heap as a thread's resource pool
426 *
427 * Similar to k_thread_heap_assign(), but the thread will use
428 * the kernel heap to draw memory.
429 *
430 * Use with caution, as a malicious thread could perform DoS attacks on the
431 * kernel heap.
432 *
433 * @param thread Target thread to assign the system heap for resource requests
434 *
435 */
436 void k_thread_system_pool_assign(struct k_thread *thread);
437 #endif /* (K_HEAP_MEM_POOL_SIZE > 0) */
438
439 /**
440 * @brief Sleep until a thread exits
441 *
442 * The caller will be put to sleep until the target thread exits, either due
443 * to being aborted, self-exiting, or taking a fatal error. This API returns
444 * immediately if the thread isn't running.
445 *
446 * This API may only be called from ISRs with a K_NO_WAIT timeout,
447 * where it can be useful as a predicate to detect when a thread has
448 * aborted.
449 *
450 * @param thread Thread to wait to exit
451 * @param timeout upper bound time to wait for the thread to exit.
452 * @retval 0 success, target thread has exited or wasn't running
453 * @retval -EBUSY returned without waiting
454 * @retval -EAGAIN waiting period timed out
455 * @retval -EDEADLK target thread is joining on the caller, or target thread
456 * is the caller
457 */
458 __syscall int k_thread_join(struct k_thread *thread, k_timeout_t timeout);
459
460 /**
461 * @brief Put the current thread to sleep.
462 *
463 * This routine puts the current thread to sleep for @a duration,
464 * specified as a k_timeout_t object.
465 *
466 * @note if @a timeout is set to K_FOREVER then the thread is suspended.
467 *
468 * @param timeout Desired duration of sleep.
469 *
470 * @return Zero if the requested time has elapsed or if the thread was woken up
471 * by the \ref k_wakeup call, the time left to sleep rounded up to the nearest
472 * millisecond.
473 */
474 __syscall int32_t k_sleep(k_timeout_t timeout);
475
476 /**
477 * @brief Put the current thread to sleep.
478 *
479 * This routine puts the current thread to sleep for @a duration milliseconds.
480 *
481 * @param ms Number of milliseconds to sleep.
482 *
483 * @return Zero if the requested time has elapsed or if the thread was woken up
484 * by the \ref k_wakeup call, the time left to sleep rounded up to the nearest
485 * millisecond.
486 */
k_msleep(int32_t ms)487 static inline int32_t k_msleep(int32_t ms)
488 {
489 return k_sleep(Z_TIMEOUT_MS(ms));
490 }
491
492 /**
493 * @brief Put the current thread to sleep with microsecond resolution.
494 *
495 * This function is unlikely to work as expected without kernel tuning.
496 * In particular, because the lower bound on the duration of a sleep is
497 * the duration of a tick, @kconfig{CONFIG_SYS_CLOCK_TICKS_PER_SEC} must be
498 * adjusted to achieve the resolution desired. The implications of doing
499 * this must be understood before attempting to use k_usleep(). Use with
500 * caution.
501 *
502 * @param us Number of microseconds to sleep.
503 *
504 * @return Zero if the requested time has elapsed or if the thread was woken up
505 * by the \ref k_wakeup call, the time left to sleep rounded up to the nearest
506 * microsecond.
507 */
508 __syscall int32_t k_usleep(int32_t us);
509
510 /**
511 * @brief Cause the current thread to busy wait.
512 *
513 * This routine causes the current thread to execute a "do nothing" loop for
514 * @a usec_to_wait microseconds.
515 *
516 * @note The clock used for the microsecond-resolution delay here may
517 * be skewed relative to the clock used for system timeouts like
518 * k_sleep(). For example k_busy_wait(1000) may take slightly more or
519 * less time than k_sleep(K_MSEC(1)), with the offset dependent on
520 * clock tolerances.
521 *
522 * @note In case when @kconfig{CONFIG_SYSTEM_CLOCK_SLOPPY_IDLE} and
523 * @kconfig{CONFIG_PM} options are enabled, this function may not work.
524 * The timer/clock used for delay processing may be disabled/inactive.
525 */
526 __syscall void k_busy_wait(uint32_t usec_to_wait);
527
528 /**
529 * @brief Check whether it is possible to yield in the current context.
530 *
531 * This routine checks whether the kernel is in a state where it is possible to
532 * yield or call blocking API's. It should be used by code that needs to yield
533 * to perform correctly, but can feasibly be called from contexts where that
534 * is not possible. For example in the PRE_KERNEL initialization step, or when
535 * being run from the idle thread.
536 *
537 * @return True if it is possible to yield in the current context, false otherwise.
538 */
539 bool k_can_yield(void);
540
541 /**
542 * @brief Yield the current thread.
543 *
544 * This routine causes the current thread to yield execution to another
545 * thread of the same or higher priority. If there are no other ready threads
546 * of the same or higher priority, the routine returns immediately.
547 */
548 __syscall void k_yield(void);
549
550 /**
551 * @brief Wake up a sleeping thread.
552 *
553 * This routine prematurely wakes up @a thread from sleeping.
554 *
555 * If @a thread is not currently sleeping, the routine has no effect.
556 *
557 * @param thread ID of thread to wake.
558 */
559 __syscall void k_wakeup(k_tid_t thread);
560
561 /**
562 * @brief Query thread ID of the current thread.
563 *
564 * This unconditionally queries the kernel via a system call.
565 *
566 * @note Use k_current_get() unless absolutely sure this is necessary.
567 * This should only be used directly where the thread local
568 * variable cannot be used or may contain invalid values
569 * if thread local storage (TLS) is enabled. If TLS is not
570 * enabled, this is the same as k_current_get().
571 *
572 * @return ID of current thread.
573 */
574 __attribute_const__
575 __syscall k_tid_t k_sched_current_thread_query(void);
576
577 /**
578 * @brief Get thread ID of the current thread.
579 *
580 * @return ID of current thread.
581 *
582 */
583 __attribute_const__
k_current_get(void)584 static inline k_tid_t k_current_get(void)
585 {
586 #ifdef CONFIG_CURRENT_THREAD_USE_TLS
587
588 /* Thread-local cache of current thread ID, set in z_thread_entry() */
589 extern __thread k_tid_t z_tls_current;
590
591 return z_tls_current;
592 #else
593 return k_sched_current_thread_query();
594 #endif
595 }
596
597 /**
598 * @brief Abort a thread.
599 *
600 * This routine permanently stops execution of @a thread. The thread is taken
601 * off all kernel queues it is part of (i.e. the ready queue, the timeout
602 * queue, or a kernel object wait queue). However, any kernel resources the
603 * thread might currently own (such as mutexes or memory blocks) are not
604 * released. It is the responsibility of the caller of this routine to ensure
605 * all necessary cleanup is performed.
606 *
607 * After k_thread_abort() returns, the thread is guaranteed not to be
608 * running or to become runnable anywhere on the system. Normally
609 * this is done via blocking the caller (in the same manner as
610 * k_thread_join()), but in interrupt context on SMP systems the
611 * implementation is required to spin for threads that are running on
612 * other CPUs.
613 *
614 * @param thread ID of thread to abort.
615 */
616 __syscall void k_thread_abort(k_tid_t thread);
617
618
619 /**
620 * @brief Start an inactive thread
621 *
622 * If a thread was created with K_FOREVER in the delay parameter, it will
623 * not be added to the scheduling queue until this function is called
624 * on it.
625 *
626 * @param thread thread to start
627 */
628 __syscall void k_thread_start(k_tid_t thread);
629
630 k_ticks_t z_timeout_expires(const struct _timeout *timeout);
631 k_ticks_t z_timeout_remaining(const struct _timeout *timeout);
632
633 #ifdef CONFIG_SYS_CLOCK_EXISTS
634
635 /**
636 * @brief Get time when a thread wakes up, in system ticks
637 *
638 * This routine computes the system uptime when a waiting thread next
639 * executes, in units of system ticks. If the thread is not waiting,
640 * it returns current system time.
641 */
642 __syscall k_ticks_t k_thread_timeout_expires_ticks(const struct k_thread *t);
643
z_impl_k_thread_timeout_expires_ticks(const struct k_thread * t)644 static inline k_ticks_t z_impl_k_thread_timeout_expires_ticks(
645 const struct k_thread *t)
646 {
647 return z_timeout_expires(&t->base.timeout);
648 }
649
650 /**
651 * @brief Get time remaining before a thread wakes up, in system ticks
652 *
653 * This routine computes the time remaining before a waiting thread
654 * next executes, in units of system ticks. If the thread is not
655 * waiting, it returns zero.
656 */
657 __syscall k_ticks_t k_thread_timeout_remaining_ticks(const struct k_thread *t);
658
z_impl_k_thread_timeout_remaining_ticks(const struct k_thread * t)659 static inline k_ticks_t z_impl_k_thread_timeout_remaining_ticks(
660 const struct k_thread *t)
661 {
662 return z_timeout_remaining(&t->base.timeout);
663 }
664
665 #endif /* CONFIG_SYS_CLOCK_EXISTS */
666
667 /**
668 * @cond INTERNAL_HIDDEN
669 */
670
671 struct _static_thread_data {
672 struct k_thread *init_thread;
673 k_thread_stack_t *init_stack;
674 unsigned int init_stack_size;
675 k_thread_entry_t init_entry;
676 void *init_p1;
677 void *init_p2;
678 void *init_p3;
679 int init_prio;
680 uint32_t init_options;
681 const char *init_name;
682 #ifdef CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME
683 int32_t init_delay_ms;
684 #else
685 k_timeout_t init_delay;
686 #endif
687 };
688
689 #ifdef CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME
690 #define Z_THREAD_INIT_DELAY_INITIALIZER(ms) .init_delay_ms = (ms)
691 #define Z_THREAD_INIT_DELAY(thread) SYS_TIMEOUT_MS((thread)->init_delay_ms)
692 #else
693 #define Z_THREAD_INIT_DELAY_INITIALIZER(ms) .init_delay = SYS_TIMEOUT_MS(ms)
694 #define Z_THREAD_INIT_DELAY(thread) (thread)->init_delay
695 #endif
696
697 #define Z_THREAD_INITIALIZER(thread, stack, stack_size, \
698 entry, p1, p2, p3, \
699 prio, options, delay, tname) \
700 { \
701 .init_thread = (thread), \
702 .init_stack = (stack), \
703 .init_stack_size = (stack_size), \
704 .init_entry = (k_thread_entry_t)entry, \
705 .init_p1 = (void *)p1, \
706 .init_p2 = (void *)p2, \
707 .init_p3 = (void *)p3, \
708 .init_prio = (prio), \
709 .init_options = (options), \
710 .init_name = STRINGIFY(tname), \
711 Z_THREAD_INIT_DELAY_INITIALIZER(delay) \
712 }
713
714 /*
715 * Refer to K_THREAD_DEFINE() and K_KERNEL_THREAD_DEFINE() for
716 * information on arguments.
717 */
718 #define Z_THREAD_COMMON_DEFINE(name, stack_size, \
719 entry, p1, p2, p3, \
720 prio, options, delay) \
721 struct k_thread _k_thread_obj_##name; \
722 STRUCT_SECTION_ITERABLE(_static_thread_data, \
723 _k_thread_data_##name) = \
724 Z_THREAD_INITIALIZER(&_k_thread_obj_##name, \
725 _k_thread_stack_##name, stack_size,\
726 entry, p1, p2, p3, prio, options, \
727 delay, name); \
728 const k_tid_t name = (k_tid_t)&_k_thread_obj_##name
729
730 /**
731 * INTERNAL_HIDDEN @endcond
732 */
733
734 /**
735 * @brief Statically define and initialize a thread.
736 *
737 * The thread may be scheduled for immediate execution or a delayed start.
738 *
739 * Thread options are architecture-specific, and can include K_ESSENTIAL,
740 * K_FP_REGS, and K_SSE_REGS. Multiple options may be specified by separating
741 * them using "|" (the logical OR operator).
742 *
743 * The ID of the thread can be accessed using:
744 *
745 * @code extern const k_tid_t <name>; @endcode
746 *
747 * @param name Name of the thread.
748 * @param stack_size Stack size in bytes.
749 * @param entry Thread entry function.
750 * @param p1 1st entry point parameter.
751 * @param p2 2nd entry point parameter.
752 * @param p3 3rd entry point parameter.
753 * @param prio Thread priority.
754 * @param options Thread options.
755 * @param delay Scheduling delay (in milliseconds), zero for no delay.
756 *
757 * @note Static threads with zero delay should not normally have
758 * MetaIRQ priority levels. This can preempt the system
759 * initialization handling (depending on the priority of the main
760 * thread) and cause surprising ordering side effects. It will not
761 * affect anything in the OS per se, but consider it bad practice.
762 * Use a SYS_INIT() callback if you need to run code before entrance
763 * to the application main().
764 */
765 #define K_THREAD_DEFINE(name, stack_size, \
766 entry, p1, p2, p3, \
767 prio, options, delay) \
768 K_THREAD_STACK_DEFINE(_k_thread_stack_##name, stack_size); \
769 Z_THREAD_COMMON_DEFINE(name, stack_size, entry, p1, p2, p3, \
770 prio, options, delay)
771
772 /**
773 * @brief Statically define and initialize a thread intended to run only in kernel mode.
774 *
775 * The thread may be scheduled for immediate execution or a delayed start.
776 *
777 * Thread options are architecture-specific, and can include K_ESSENTIAL,
778 * K_FP_REGS, and K_SSE_REGS. Multiple options may be specified by separating
779 * them using "|" (the logical OR operator).
780 *
781 * The ID of the thread can be accessed using:
782 *
783 * @code extern const k_tid_t <name>; @endcode
784 *
785 * @note Threads defined by this can only run in kernel mode, and cannot be
786 * transformed into user thread via k_thread_user_mode_enter().
787 *
788 * @warning Depending on the architecture, the stack size (@p stack_size)
789 * may need to be multiples of CONFIG_MMU_PAGE_SIZE (if MMU)
790 * or in power-of-two size (if MPU).
791 *
792 * @param name Name of the thread.
793 * @param stack_size Stack size in bytes.
794 * @param entry Thread entry function.
795 * @param p1 1st entry point parameter.
796 * @param p2 2nd entry point parameter.
797 * @param p3 3rd entry point parameter.
798 * @param prio Thread priority.
799 * @param options Thread options.
800 * @param delay Scheduling delay (in milliseconds), zero for no delay.
801 */
802 #define K_KERNEL_THREAD_DEFINE(name, stack_size, \
803 entry, p1, p2, p3, \
804 prio, options, delay) \
805 K_KERNEL_STACK_DEFINE(_k_thread_stack_##name, stack_size); \
806 Z_THREAD_COMMON_DEFINE(name, stack_size, entry, p1, p2, p3, \
807 prio, options, delay)
808
809 /**
810 * @brief Get a thread's priority.
811 *
812 * This routine gets the priority of @a thread.
813 *
814 * @param thread ID of thread whose priority is needed.
815 *
816 * @return Priority of @a thread.
817 */
818 __syscall int k_thread_priority_get(k_tid_t thread);
819
820 /**
821 * @brief Set a thread's priority.
822 *
823 * This routine immediately changes the priority of @a thread.
824 *
825 * Rescheduling can occur immediately depending on the priority @a thread is
826 * set to:
827 *
828 * - If its priority is raised above the priority of the caller of this
829 * function, and the caller is preemptible, @a thread will be scheduled in.
830 *
831 * - If the caller operates on itself, it lowers its priority below that of
832 * other threads in the system, and the caller is preemptible, the thread of
833 * highest priority will be scheduled in.
834 *
835 * Priority can be assigned in the range of -CONFIG_NUM_COOP_PRIORITIES to
836 * CONFIG_NUM_PREEMPT_PRIORITIES-1, where -CONFIG_NUM_COOP_PRIORITIES is the
837 * highest priority.
838 *
839 * @param thread ID of thread whose priority is to be set.
840 * @param prio New priority.
841 *
842 * @warning Changing the priority of a thread currently involved in mutex
843 * priority inheritance may result in undefined behavior.
844 */
845 __syscall void k_thread_priority_set(k_tid_t thread, int prio);
846
847
848 #ifdef CONFIG_SCHED_DEADLINE
849 /**
850 * @brief Set deadline expiration time for scheduler
851 *
852 * This sets the "deadline" expiration as a time delta from the
853 * current time, in the same units used by k_cycle_get_32(). The
854 * scheduler (when deadline scheduling is enabled) will choose the
855 * next expiring thread when selecting between threads at the same
856 * static priority. Threads at different priorities will be scheduled
857 * according to their static priority.
858 *
859 * @note Deadlines are stored internally using 32 bit unsigned
860 * integers. The number of cycles between the "first" deadline in the
861 * scheduler queue and the "last" deadline must be less than 2^31 (i.e
862 * a signed non-negative quantity). Failure to adhere to this rule
863 * may result in scheduled threads running in an incorrect deadline
864 * order.
865 *
866 * @note Despite the API naming, the scheduler makes no guarantees
867 * the thread WILL be scheduled within that deadline, nor does it take
868 * extra metadata (like e.g. the "runtime" and "period" parameters in
869 * Linux sched_setattr()) that allows the kernel to validate the
870 * scheduling for achievability. Such features could be implemented
871 * above this call, which is simply input to the priority selection
872 * logic.
873 *
874 * @note You should enable @kconfig{CONFIG_SCHED_DEADLINE} in your project
875 * configuration.
876 *
877 * @param thread A thread on which to set the deadline
878 * @param deadline A time delta, in cycle units
879 *
880 */
881 __syscall void k_thread_deadline_set(k_tid_t thread, int deadline);
882 #endif
883
884 #ifdef CONFIG_SCHED_CPU_MASK
885 /**
886 * @brief Sets all CPU enable masks to zero
887 *
888 * After this returns, the thread will no longer be schedulable on any
889 * CPUs. The thread must not be currently runnable.
890 *
891 * @note You should enable @kconfig{CONFIG_SCHED_CPU_MASK} in your project
892 * configuration.
893 *
894 * @param thread Thread to operate upon
895 * @return Zero on success, otherwise error code
896 */
897 int k_thread_cpu_mask_clear(k_tid_t thread);
898
899 /**
900 * @brief Sets all CPU enable masks to one
901 *
902 * After this returns, the thread will be schedulable on any CPU. The
903 * thread must not be currently runnable.
904 *
905 * @note You should enable @kconfig{CONFIG_SCHED_CPU_MASK} in your project
906 * configuration.
907 *
908 * @param thread Thread to operate upon
909 * @return Zero on success, otherwise error code
910 */
911 int k_thread_cpu_mask_enable_all(k_tid_t thread);
912
913 /**
914 * @brief Enable thread to run on specified CPU
915 *
916 * The thread must not be currently runnable.
917 *
918 * @note You should enable @kconfig{CONFIG_SCHED_CPU_MASK} in your project
919 * configuration.
920 *
921 * @param thread Thread to operate upon
922 * @param cpu CPU index
923 * @return Zero on success, otherwise error code
924 */
925 int k_thread_cpu_mask_enable(k_tid_t thread, int cpu);
926
927 /**
928 * @brief Prevent thread to run on specified CPU
929 *
930 * The thread must not be currently runnable.
931 *
932 * @note You should enable @kconfig{CONFIG_SCHED_CPU_MASK} in your project
933 * configuration.
934 *
935 * @param thread Thread to operate upon
936 * @param cpu CPU index
937 * @return Zero on success, otherwise error code
938 */
939 int k_thread_cpu_mask_disable(k_tid_t thread, int cpu);
940
941 /**
942 * @brief Pin a thread to a CPU
943 *
944 * Pin a thread to a CPU by first clearing the cpu mask and then enabling the
945 * thread on the selected CPU.
946 *
947 * @param thread Thread to operate upon
948 * @param cpu CPU index
949 * @return Zero on success, otherwise error code
950 */
951 int k_thread_cpu_pin(k_tid_t thread, int cpu);
952 #endif
953
954 /**
955 * @brief Suspend a thread.
956 *
957 * This routine prevents the kernel scheduler from making @a thread
958 * the current thread. All other internal operations on @a thread are
959 * still performed; for example, kernel objects it is waiting on are
960 * still handed to it. Note that any existing timeouts
961 * (e.g. k_sleep(), or a timeout argument to k_sem_take() et. al.)
962 * will be canceled. On resume, the thread will begin running
963 * immediately and return from the blocked call.
964 *
965 * When the target thread is active on another CPU, the caller will block until
966 * the target thread is halted (suspended or aborted). But if the caller is in
967 * an interrupt context, it will spin waiting for that target thread active on
968 * another CPU to halt.
969 *
970 * If @a thread is already suspended, the routine has no effect.
971 *
972 * @param thread ID of thread to suspend.
973 */
974 __syscall void k_thread_suspend(k_tid_t thread);
975
976 /**
977 * @brief Resume a suspended thread.
978 *
979 * This routine allows the kernel scheduler to make @a thread the current
980 * thread, when it is next eligible for that role.
981 *
982 * If @a thread is not currently suspended, the routine has no effect.
983 *
984 * @param thread ID of thread to resume.
985 */
986 __syscall void k_thread_resume(k_tid_t thread);
987
988 /**
989 * @brief Set time-slicing period and scope.
990 *
991 * This routine specifies how the scheduler will perform time slicing of
992 * preemptible threads.
993 *
994 * To enable time slicing, @a slice must be non-zero. The scheduler
995 * ensures that no thread runs for more than the specified time limit
996 * before other threads of that priority are given a chance to execute.
997 * Any thread whose priority is higher than @a prio is exempted, and may
998 * execute as long as desired without being preempted due to time slicing.
999 *
1000 * Time slicing only limits the maximum amount of time a thread may continuously
1001 * execute. Once the scheduler selects a thread for execution, there is no
1002 * minimum guaranteed time the thread will execute before threads of greater or
1003 * equal priority are scheduled.
1004 *
1005 * When the current thread is the only one of that priority eligible
1006 * for execution, this routine has no effect; the thread is immediately
1007 * rescheduled after the slice period expires.
1008 *
1009 * To disable timeslicing, set both @a slice and @a prio to zero.
1010 *
1011 * @param slice Maximum time slice length (in milliseconds).
1012 * @param prio Highest thread priority level eligible for time slicing.
1013 */
1014 void k_sched_time_slice_set(int32_t slice, int prio);
1015
1016 /**
1017 * @brief Set thread time slice
1018 *
1019 * As for k_sched_time_slice_set, but (when
1020 * CONFIG_TIMESLICE_PER_THREAD=y) sets the timeslice for a specific
1021 * thread. When non-zero, this timeslice will take precedence over
1022 * the global value.
1023 *
1024 * When such a thread's timeslice expires, the configured callback
1025 * will be called before the thread is removed/re-added to the run
1026 * queue. This callback will occur in interrupt context, and the
1027 * specified thread is guaranteed to have been preempted by the
1028 * currently-executing ISR. Such a callback is free to, for example,
1029 * modify the thread priority or slice time for future execution,
1030 * suspend the thread, etc...
1031 *
1032 * @note Unlike the older API, the time slice parameter here is
1033 * specified in ticks, not milliseconds. Ticks have always been the
1034 * internal unit, and not all platforms have integer conversions
1035 * between the two.
1036 *
1037 * @note Threads with a non-zero slice time set will be timesliced
1038 * always, even if they are higher priority than the maximum timeslice
1039 * priority set via k_sched_time_slice_set().
1040 *
1041 * @note The callback notification for slice expiration happens, as it
1042 * must, while the thread is still "current", and thus it happens
1043 * before any registered timeouts at this tick. This has the somewhat
1044 * confusing side effect that the tick time (c.f. k_uptime_get()) does
1045 * not yet reflect the expired ticks. Applications wishing to make
1046 * fine-grained timing decisions within this callback should use the
1047 * cycle API, or derived facilities like k_thread_runtime_stats_get().
1048 *
1049 * @param th A valid, initialized thread
1050 * @param slice_ticks Maximum timeslice, in ticks
1051 * @param expired Callback function called on slice expiration
1052 * @param data Parameter for the expiration handler
1053 */
1054 void k_thread_time_slice_set(struct k_thread *th, int32_t slice_ticks,
1055 k_thread_timeslice_fn_t expired, void *data);
1056
1057 /** @} */
1058
1059 /**
1060 * @addtogroup isr_apis
1061 * @{
1062 */
1063
1064 /**
1065 * @brief Determine if code is running at interrupt level.
1066 *
1067 * This routine allows the caller to customize its actions, depending on
1068 * whether it is a thread or an ISR.
1069 *
1070 * @funcprops \isr_ok
1071 *
1072 * @return false if invoked by a thread.
1073 * @return true if invoked by an ISR.
1074 */
1075 bool k_is_in_isr(void);
1076
1077 /**
1078 * @brief Determine if code is running in a preemptible thread.
1079 *
1080 * This routine allows the caller to customize its actions, depending on
1081 * whether it can be preempted by another thread. The routine returns a 'true'
1082 * value if all of the following conditions are met:
1083 *
1084 * - The code is running in a thread, not at ISR.
1085 * - The thread's priority is in the preemptible range.
1086 * - The thread has not locked the scheduler.
1087 *
1088 * @funcprops \isr_ok
1089 *
1090 * @return 0 if invoked by an ISR or by a cooperative thread.
1091 * @return Non-zero if invoked by a preemptible thread.
1092 */
1093 __syscall int k_is_preempt_thread(void);
1094
1095 /**
1096 * @brief Test whether startup is in the before-main-task phase.
1097 *
1098 * This routine allows the caller to customize its actions, depending on
1099 * whether it being invoked before the kernel is fully active.
1100 *
1101 * @funcprops \isr_ok
1102 *
1103 * @return true if invoked before post-kernel initialization
1104 * @return false if invoked during/after post-kernel initialization
1105 */
k_is_pre_kernel(void)1106 static inline bool k_is_pre_kernel(void)
1107 {
1108 extern bool z_sys_post_kernel; /* in init.c */
1109
1110 return !z_sys_post_kernel;
1111 }
1112
1113 /**
1114 * @}
1115 */
1116
1117 /**
1118 * @addtogroup thread_apis
1119 * @{
1120 */
1121
1122 /**
1123 * @brief Lock the scheduler.
1124 *
1125 * This routine prevents the current thread from being preempted by another
1126 * thread by instructing the scheduler to treat it as a cooperative thread.
1127 * If the thread subsequently performs an operation that makes it unready,
1128 * it will be context switched out in the normal manner. When the thread
1129 * again becomes the current thread, its non-preemptible status is maintained.
1130 *
1131 * This routine can be called recursively.
1132 *
1133 * Owing to clever implementation details, scheduler locks are
1134 * extremely fast for non-userspace threads (just one byte
1135 * inc/decrement in the thread struct).
1136 *
1137 * @note This works by elevating the thread priority temporarily to a
1138 * cooperative priority, allowing cheap synchronization vs. other
1139 * preemptible or cooperative threads running on the current CPU. It
1140 * does not prevent preemption or asynchrony of other types. It does
1141 * not prevent threads from running on other CPUs when CONFIG_SMP=y.
1142 * It does not prevent interrupts from happening, nor does it prevent
1143 * threads with MetaIRQ priorities from preempting the current thread.
1144 * In general this is a historical API not well-suited to modern
1145 * applications, use with care.
1146 */
1147 void k_sched_lock(void);
1148
1149 /**
1150 * @brief Unlock the scheduler.
1151 *
1152 * This routine reverses the effect of a previous call to k_sched_lock().
1153 * A thread must call the routine once for each time it called k_sched_lock()
1154 * before the thread becomes preemptible.
1155 */
1156 void k_sched_unlock(void);
1157
1158 /**
1159 * @brief Set current thread's custom data.
1160 *
1161 * This routine sets the custom data for the current thread to @ value.
1162 *
1163 * Custom data is not used by the kernel itself, and is freely available
1164 * for a thread to use as it sees fit. It can be used as a framework
1165 * upon which to build thread-local storage.
1166 *
1167 * @param value New custom data value.
1168 *
1169 */
1170 __syscall void k_thread_custom_data_set(void *value);
1171
1172 /**
1173 * @brief Get current thread's custom data.
1174 *
1175 * This routine returns the custom data for the current thread.
1176 *
1177 * @return Current custom data value.
1178 */
1179 __syscall void *k_thread_custom_data_get(void);
1180
1181 /**
1182 * @brief Set current thread name
1183 *
1184 * Set the name of the thread to be used when @kconfig{CONFIG_THREAD_MONITOR}
1185 * is enabled for tracing and debugging.
1186 *
1187 * @param thread Thread to set name, or NULL to set the current thread
1188 * @param str Name string
1189 * @retval 0 on success
1190 * @retval -EFAULT Memory access error with supplied string
1191 * @retval -ENOSYS Thread name configuration option not enabled
1192 * @retval -EINVAL Thread name too long
1193 */
1194 __syscall int k_thread_name_set(k_tid_t thread, const char *str);
1195
1196 /**
1197 * @brief Get thread name
1198 *
1199 * Get the name of a thread
1200 *
1201 * @param thread Thread ID
1202 * @retval Thread name, or NULL if configuration not enabled
1203 */
1204 const char *k_thread_name_get(k_tid_t thread);
1205
1206 /**
1207 * @brief Copy the thread name into a supplied buffer
1208 *
1209 * @param thread Thread to obtain name information
1210 * @param buf Destination buffer
1211 * @param size Destination buffer size
1212 * @retval -ENOSPC Destination buffer too small
1213 * @retval -EFAULT Memory access error
1214 * @retval -ENOSYS Thread name feature not enabled
1215 * @retval 0 Success
1216 */
1217 __syscall int k_thread_name_copy(k_tid_t thread, char *buf,
1218 size_t size);
1219
1220 /**
1221 * @brief Get thread state string
1222 *
1223 * This routine generates a human friendly string containing the thread's
1224 * state, and copies as much of it as possible into @a buf.
1225 *
1226 * @param thread_id Thread ID
1227 * @param buf Buffer into which to copy state strings
1228 * @param buf_size Size of the buffer
1229 *
1230 * @retval Pointer to @a buf if data was copied, else a pointer to "".
1231 */
1232 const char *k_thread_state_str(k_tid_t thread_id, char *buf, size_t buf_size);
1233
1234 /**
1235 * @}
1236 */
1237
1238 /**
1239 * @addtogroup clock_apis
1240 * @{
1241 */
1242
1243 /**
1244 * @brief Generate null timeout delay.
1245 *
1246 * This macro generates a timeout delay that instructs a kernel API
1247 * not to wait if the requested operation cannot be performed immediately.
1248 *
1249 * @return Timeout delay value.
1250 */
1251 #define K_NO_WAIT Z_TIMEOUT_NO_WAIT
1252
1253 /**
1254 * @brief Generate timeout delay from nanoseconds.
1255 *
1256 * This macro generates a timeout delay that instructs a kernel API to
1257 * wait up to @a t nanoseconds to perform the requested operation.
1258 * Note that timer precision is limited to the tick rate, not the
1259 * requested value.
1260 *
1261 * @param t Duration in nanoseconds.
1262 *
1263 * @return Timeout delay value.
1264 */
1265 #define K_NSEC(t) Z_TIMEOUT_NS(t)
1266
1267 /**
1268 * @brief Generate timeout delay from microseconds.
1269 *
1270 * This macro generates a timeout delay that instructs a kernel API
1271 * to wait up to @a t microseconds to perform the requested operation.
1272 * Note that timer precision is limited to the tick rate, not the
1273 * requested value.
1274 *
1275 * @param t Duration in microseconds.
1276 *
1277 * @return Timeout delay value.
1278 */
1279 #define K_USEC(t) Z_TIMEOUT_US(t)
1280
1281 /**
1282 * @brief Generate timeout delay from cycles.
1283 *
1284 * This macro generates a timeout delay that instructs a kernel API
1285 * to wait up to @a t cycles to perform the requested operation.
1286 *
1287 * @param t Duration in cycles.
1288 *
1289 * @return Timeout delay value.
1290 */
1291 #define K_CYC(t) Z_TIMEOUT_CYC(t)
1292
1293 /**
1294 * @brief Generate timeout delay from system ticks.
1295 *
1296 * This macro generates a timeout delay that instructs a kernel API
1297 * to wait up to @a t ticks to perform the requested operation.
1298 *
1299 * @param t Duration in system ticks.
1300 *
1301 * @return Timeout delay value.
1302 */
1303 #define K_TICKS(t) Z_TIMEOUT_TICKS(t)
1304
1305 /**
1306 * @brief Generate timeout delay from milliseconds.
1307 *
1308 * This macro generates a timeout delay that instructs a kernel API
1309 * to wait up to @a ms milliseconds to perform the requested operation.
1310 *
1311 * @param ms Duration in milliseconds.
1312 *
1313 * @return Timeout delay value.
1314 */
1315 #define K_MSEC(ms) Z_TIMEOUT_MS(ms)
1316
1317 /**
1318 * @brief Generate timeout delay from seconds.
1319 *
1320 * This macro generates a timeout delay that instructs a kernel API
1321 * to wait up to @a s seconds to perform the requested operation.
1322 *
1323 * @param s Duration in seconds.
1324 *
1325 * @return Timeout delay value.
1326 */
1327 #define K_SECONDS(s) K_MSEC((s) * MSEC_PER_SEC)
1328
1329 /**
1330 * @brief Generate timeout delay from minutes.
1331
1332 * This macro generates a timeout delay that instructs a kernel API
1333 * to wait up to @a m minutes to perform the requested operation.
1334 *
1335 * @param m Duration in minutes.
1336 *
1337 * @return Timeout delay value.
1338 */
1339 #define K_MINUTES(m) K_SECONDS((m) * 60)
1340
1341 /**
1342 * @brief Generate timeout delay from hours.
1343 *
1344 * This macro generates a timeout delay that instructs a kernel API
1345 * to wait up to @a h hours to perform the requested operation.
1346 *
1347 * @param h Duration in hours.
1348 *
1349 * @return Timeout delay value.
1350 */
1351 #define K_HOURS(h) K_MINUTES((h) * 60)
1352
1353 /**
1354 * @brief Generate infinite timeout delay.
1355 *
1356 * This macro generates a timeout delay that instructs a kernel API
1357 * to wait as long as necessary to perform the requested operation.
1358 *
1359 * @return Timeout delay value.
1360 */
1361 #define K_FOREVER Z_FOREVER
1362
1363 #ifdef CONFIG_TIMEOUT_64BIT
1364
1365 /**
1366 * @brief Generates an absolute/uptime timeout value from system ticks
1367 *
1368 * This macro generates a timeout delay that represents an expiration
1369 * at the absolute uptime value specified, in system ticks. That is, the
1370 * timeout will expire immediately after the system uptime reaches the
1371 * specified tick count.
1372 *
1373 * @param t Tick uptime value
1374 * @return Timeout delay value
1375 */
1376 #define K_TIMEOUT_ABS_TICKS(t) \
1377 Z_TIMEOUT_TICKS(Z_TICK_ABS((k_ticks_t)MAX(t, 0)))
1378
1379 /**
1380 * @brief Generates an absolute/uptime timeout value from milliseconds
1381 *
1382 * This macro generates a timeout delay that represents an expiration
1383 * at the absolute uptime value specified, in milliseconds. That is,
1384 * the timeout will expire immediately after the system uptime reaches
1385 * the specified tick count.
1386 *
1387 * @param t Millisecond uptime value
1388 * @return Timeout delay value
1389 */
1390 #define K_TIMEOUT_ABS_MS(t) K_TIMEOUT_ABS_TICKS(k_ms_to_ticks_ceil64(t))
1391
1392 /**
1393 * @brief Generates an absolute/uptime timeout value from microseconds
1394 *
1395 * This macro generates a timeout delay that represents an expiration
1396 * at the absolute uptime value specified, in microseconds. That is,
1397 * the timeout will expire immediately after the system uptime reaches
1398 * the specified time. Note that timer precision is limited by the
1399 * system tick rate and not the requested timeout value.
1400 *
1401 * @param t Microsecond uptime value
1402 * @return Timeout delay value
1403 */
1404 #define K_TIMEOUT_ABS_US(t) K_TIMEOUT_ABS_TICKS(k_us_to_ticks_ceil64(t))
1405
1406 /**
1407 * @brief Generates an absolute/uptime timeout value from nanoseconds
1408 *
1409 * This macro generates a timeout delay that represents an expiration
1410 * at the absolute uptime value specified, in nanoseconds. That is,
1411 * the timeout will expire immediately after the system uptime reaches
1412 * the specified time. Note that timer precision is limited by the
1413 * system tick rate and not the requested timeout value.
1414 *
1415 * @param t Nanosecond uptime value
1416 * @return Timeout delay value
1417 */
1418 #define K_TIMEOUT_ABS_NS(t) K_TIMEOUT_ABS_TICKS(k_ns_to_ticks_ceil64(t))
1419
1420 /**
1421 * @brief Generates an absolute/uptime timeout value from system cycles
1422 *
1423 * This macro generates a timeout delay that represents an expiration
1424 * at the absolute uptime value specified, in cycles. That is, the
1425 * timeout will expire immediately after the system uptime reaches the
1426 * specified time. Note that timer precision is limited by the system
1427 * tick rate and not the requested timeout value.
1428 *
1429 * @param t Cycle uptime value
1430 * @return Timeout delay value
1431 */
1432 #define K_TIMEOUT_ABS_CYC(t) K_TIMEOUT_ABS_TICKS(k_cyc_to_ticks_ceil64(t))
1433
1434 #endif
1435
1436 /**
1437 * @}
1438 */
1439
1440 /**
1441 * @cond INTERNAL_HIDDEN
1442 */
1443
1444 struct k_timer {
1445 /*
1446 * _timeout structure must be first here if we want to use
1447 * dynamic timer allocation. timeout.node is used in the double-linked
1448 * list of free timers
1449 */
1450 struct _timeout timeout;
1451
1452 /* wait queue for the (single) thread waiting on this timer */
1453 _wait_q_t wait_q;
1454
1455 /* runs in ISR context */
1456 void (*expiry_fn)(struct k_timer *timer);
1457
1458 /* runs in the context of the thread that calls k_timer_stop() */
1459 void (*stop_fn)(struct k_timer *timer);
1460
1461 /* timer period */
1462 k_timeout_t period;
1463
1464 /* timer status */
1465 uint32_t status;
1466
1467 /* user-specific data, also used to support legacy features */
1468 void *user_data;
1469
1470 SYS_PORT_TRACING_TRACKING_FIELD(k_timer)
1471
1472 #ifdef CONFIG_OBJ_CORE_TIMER
1473 struct k_obj_core obj_core;
1474 #endif
1475 };
1476
1477 #define Z_TIMER_INITIALIZER(obj, expiry, stop) \
1478 { \
1479 .timeout = { \
1480 .node = {},\
1481 .fn = z_timer_expiration_handler, \
1482 .dticks = 0, \
1483 }, \
1484 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
1485 .expiry_fn = expiry, \
1486 .stop_fn = stop, \
1487 .status = 0, \
1488 .user_data = 0, \
1489 }
1490
1491 /**
1492 * INTERNAL_HIDDEN @endcond
1493 */
1494
1495 /**
1496 * @defgroup timer_apis Timer APIs
1497 * @ingroup kernel_apis
1498 * @{
1499 */
1500
1501 /**
1502 * @typedef k_timer_expiry_t
1503 * @brief Timer expiry function type.
1504 *
1505 * A timer's expiry function is executed by the system clock interrupt handler
1506 * each time the timer expires. The expiry function is optional, and is only
1507 * invoked if the timer has been initialized with one.
1508 *
1509 * @param timer Address of timer.
1510 */
1511 typedef void (*k_timer_expiry_t)(struct k_timer *timer);
1512
1513 /**
1514 * @typedef k_timer_stop_t
1515 * @brief Timer stop function type.
1516 *
1517 * A timer's stop function is executed if the timer is stopped prematurely.
1518 * The function runs in the context of call that stops the timer. As
1519 * k_timer_stop() can be invoked from an ISR, the stop function must be
1520 * callable from interrupt context (isr-ok).
1521 *
1522 * The stop function is optional, and is only invoked if the timer has been
1523 * initialized with one.
1524 *
1525 * @param timer Address of timer.
1526 */
1527 typedef void (*k_timer_stop_t)(struct k_timer *timer);
1528
1529 /**
1530 * @brief Statically define and initialize a timer.
1531 *
1532 * The timer can be accessed outside the module where it is defined using:
1533 *
1534 * @code extern struct k_timer <name>; @endcode
1535 *
1536 * @param name Name of the timer variable.
1537 * @param expiry_fn Function to invoke each time the timer expires.
1538 * @param stop_fn Function to invoke if the timer is stopped while running.
1539 */
1540 #define K_TIMER_DEFINE(name, expiry_fn, stop_fn) \
1541 STRUCT_SECTION_ITERABLE(k_timer, name) = \
1542 Z_TIMER_INITIALIZER(name, expiry_fn, stop_fn)
1543
1544 /**
1545 * @brief Initialize a timer.
1546 *
1547 * This routine initializes a timer, prior to its first use.
1548 *
1549 * @param timer Address of timer.
1550 * @param expiry_fn Function to invoke each time the timer expires.
1551 * @param stop_fn Function to invoke if the timer is stopped while running.
1552 */
1553 void k_timer_init(struct k_timer *timer,
1554 k_timer_expiry_t expiry_fn,
1555 k_timer_stop_t stop_fn);
1556
1557 /**
1558 * @brief Start a timer.
1559 *
1560 * This routine starts a timer, and resets its status to zero. The timer
1561 * begins counting down using the specified duration and period values.
1562 *
1563 * Attempting to start a timer that is already running is permitted.
1564 * The timer's status is reset to zero and the timer begins counting down
1565 * using the new duration and period values.
1566 *
1567 * @param timer Address of timer.
1568 * @param duration Initial timer duration.
1569 * @param period Timer period.
1570 */
1571 __syscall void k_timer_start(struct k_timer *timer,
1572 k_timeout_t duration, k_timeout_t period);
1573
1574 /**
1575 * @brief Stop a timer.
1576 *
1577 * This routine stops a running timer prematurely. The timer's stop function,
1578 * if one exists, is invoked by the caller.
1579 *
1580 * Attempting to stop a timer that is not running is permitted, but has no
1581 * effect on the timer.
1582 *
1583 * @note The stop handler has to be callable from ISRs if @a k_timer_stop is to
1584 * be called from ISRs.
1585 *
1586 * @funcprops \isr_ok
1587 *
1588 * @param timer Address of timer.
1589 */
1590 __syscall void k_timer_stop(struct k_timer *timer);
1591
1592 /**
1593 * @brief Read timer status.
1594 *
1595 * This routine reads the timer's status, which indicates the number of times
1596 * it has expired since its status was last read.
1597 *
1598 * Calling this routine resets the timer's status to zero.
1599 *
1600 * @param timer Address of timer.
1601 *
1602 * @return Timer status.
1603 */
1604 __syscall uint32_t k_timer_status_get(struct k_timer *timer);
1605
1606 /**
1607 * @brief Synchronize thread to timer expiration.
1608 *
1609 * This routine blocks the calling thread until the timer's status is non-zero
1610 * (indicating that it has expired at least once since it was last examined)
1611 * or the timer is stopped. If the timer status is already non-zero,
1612 * or the timer is already stopped, the caller continues without waiting.
1613 *
1614 * Calling this routine resets the timer's status to zero.
1615 *
1616 * This routine must not be used by interrupt handlers, since they are not
1617 * allowed to block.
1618 *
1619 * @param timer Address of timer.
1620 *
1621 * @return Timer status.
1622 */
1623 __syscall uint32_t k_timer_status_sync(struct k_timer *timer);
1624
1625 #ifdef CONFIG_SYS_CLOCK_EXISTS
1626
1627 /**
1628 * @brief Get next expiration time of a timer, in system ticks
1629 *
1630 * This routine returns the future system uptime reached at the next
1631 * time of expiration of the timer, in units of system ticks. If the
1632 * timer is not running, current system time is returned.
1633 *
1634 * @param timer The timer object
1635 * @return Uptime of expiration, in ticks
1636 */
1637 __syscall k_ticks_t k_timer_expires_ticks(const struct k_timer *timer);
1638
z_impl_k_timer_expires_ticks(const struct k_timer * timer)1639 static inline k_ticks_t z_impl_k_timer_expires_ticks(
1640 const struct k_timer *timer)
1641 {
1642 return z_timeout_expires(&timer->timeout);
1643 }
1644
1645 /**
1646 * @brief Get time remaining before a timer next expires, in system ticks
1647 *
1648 * This routine computes the time remaining before a running timer
1649 * next expires, in units of system ticks. If the timer is not
1650 * running, it returns zero.
1651 */
1652 __syscall k_ticks_t k_timer_remaining_ticks(const struct k_timer *timer);
1653
z_impl_k_timer_remaining_ticks(const struct k_timer * timer)1654 static inline k_ticks_t z_impl_k_timer_remaining_ticks(
1655 const struct k_timer *timer)
1656 {
1657 return z_timeout_remaining(&timer->timeout);
1658 }
1659
1660 /**
1661 * @brief Get time remaining before a timer next expires.
1662 *
1663 * This routine computes the (approximate) time remaining before a running
1664 * timer next expires. If the timer is not running, it returns zero.
1665 *
1666 * @param timer Address of timer.
1667 *
1668 * @return Remaining time (in milliseconds).
1669 */
k_timer_remaining_get(struct k_timer * timer)1670 static inline uint32_t k_timer_remaining_get(struct k_timer *timer)
1671 {
1672 return k_ticks_to_ms_floor32(k_timer_remaining_ticks(timer));
1673 }
1674
1675 #endif /* CONFIG_SYS_CLOCK_EXISTS */
1676
1677 /**
1678 * @brief Associate user-specific data with a timer.
1679 *
1680 * This routine records the @a user_data with the @a timer, to be retrieved
1681 * later.
1682 *
1683 * It can be used e.g. in a timer handler shared across multiple subsystems to
1684 * retrieve data specific to the subsystem this timer is associated with.
1685 *
1686 * @param timer Address of timer.
1687 * @param user_data User data to associate with the timer.
1688 */
1689 __syscall void k_timer_user_data_set(struct k_timer *timer, void *user_data);
1690
1691 /**
1692 * @internal
1693 */
z_impl_k_timer_user_data_set(struct k_timer * timer,void * user_data)1694 static inline void z_impl_k_timer_user_data_set(struct k_timer *timer,
1695 void *user_data)
1696 {
1697 timer->user_data = user_data;
1698 }
1699
1700 /**
1701 * @brief Retrieve the user-specific data from a timer.
1702 *
1703 * @param timer Address of timer.
1704 *
1705 * @return The user data.
1706 */
1707 __syscall void *k_timer_user_data_get(const struct k_timer *timer);
1708
z_impl_k_timer_user_data_get(const struct k_timer * timer)1709 static inline void *z_impl_k_timer_user_data_get(const struct k_timer *timer)
1710 {
1711 return timer->user_data;
1712 }
1713
1714 /** @} */
1715
1716 /**
1717 * @addtogroup clock_apis
1718 * @ingroup kernel_apis
1719 * @{
1720 */
1721
1722 /**
1723 * @brief Get system uptime, in system ticks.
1724 *
1725 * This routine returns the elapsed time since the system booted, in
1726 * ticks (c.f. @kconfig{CONFIG_SYS_CLOCK_TICKS_PER_SEC}), which is the
1727 * fundamental unit of resolution of kernel timekeeping.
1728 *
1729 * @return Current uptime in ticks.
1730 */
1731 __syscall int64_t k_uptime_ticks(void);
1732
1733 /**
1734 * @brief Get system uptime.
1735 *
1736 * This routine returns the elapsed time since the system booted,
1737 * in milliseconds.
1738 *
1739 * @note
1740 * While this function returns time in milliseconds, it does
1741 * not mean it has millisecond resolution. The actual resolution depends on
1742 * @kconfig{CONFIG_SYS_CLOCK_TICKS_PER_SEC} config option.
1743 *
1744 * @return Current uptime in milliseconds.
1745 */
k_uptime_get(void)1746 static inline int64_t k_uptime_get(void)
1747 {
1748 return k_ticks_to_ms_floor64(k_uptime_ticks());
1749 }
1750
1751 /**
1752 * @brief Get system uptime (32-bit version).
1753 *
1754 * This routine returns the lower 32 bits of the system uptime in
1755 * milliseconds.
1756 *
1757 * Because correct conversion requires full precision of the system
1758 * clock there is no benefit to using this over k_uptime_get() unless
1759 * you know the application will never run long enough for the system
1760 * clock to approach 2^32 ticks. Calls to this function may involve
1761 * interrupt blocking and 64-bit math.
1762 *
1763 * @note
1764 * While this function returns time in milliseconds, it does
1765 * not mean it has millisecond resolution. The actual resolution depends on
1766 * @kconfig{CONFIG_SYS_CLOCK_TICKS_PER_SEC} config option
1767 *
1768 * @return The low 32 bits of the current uptime, in milliseconds.
1769 */
k_uptime_get_32(void)1770 static inline uint32_t k_uptime_get_32(void)
1771 {
1772 return (uint32_t)k_uptime_get();
1773 }
1774
1775 /**
1776 * @brief Get elapsed time.
1777 *
1778 * This routine computes the elapsed time between the current system uptime
1779 * and an earlier reference time, in milliseconds.
1780 *
1781 * @param reftime Pointer to a reference time, which is updated to the current
1782 * uptime upon return.
1783 *
1784 * @return Elapsed time.
1785 */
k_uptime_delta(int64_t * reftime)1786 static inline int64_t k_uptime_delta(int64_t *reftime)
1787 {
1788 int64_t uptime, delta;
1789
1790 uptime = k_uptime_get();
1791 delta = uptime - *reftime;
1792 *reftime = uptime;
1793
1794 return delta;
1795 }
1796
1797 /**
1798 * @brief Read the hardware clock.
1799 *
1800 * This routine returns the current time, as measured by the system's hardware
1801 * clock.
1802 *
1803 * @return Current hardware clock up-counter (in cycles).
1804 */
k_cycle_get_32(void)1805 static inline uint32_t k_cycle_get_32(void)
1806 {
1807 return arch_k_cycle_get_32();
1808 }
1809
1810 /**
1811 * @brief Read the 64-bit hardware clock.
1812 *
1813 * This routine returns the current time in 64-bits, as measured by the
1814 * system's hardware clock, if available.
1815 *
1816 * @see CONFIG_TIMER_HAS_64BIT_CYCLE_COUNTER
1817 *
1818 * @return Current hardware clock up-counter (in cycles).
1819 */
k_cycle_get_64(void)1820 static inline uint64_t k_cycle_get_64(void)
1821 {
1822 if (!IS_ENABLED(CONFIG_TIMER_HAS_64BIT_CYCLE_COUNTER)) {
1823 __ASSERT(0, "64-bit cycle counter not enabled on this platform. "
1824 "See CONFIG_TIMER_HAS_64BIT_CYCLE_COUNTER");
1825 return 0;
1826 }
1827
1828 return arch_k_cycle_get_64();
1829 }
1830
1831 /**
1832 * @}
1833 */
1834
1835 struct k_queue {
1836 sys_sflist_t data_q;
1837 struct k_spinlock lock;
1838 _wait_q_t wait_q;
1839
1840 Z_DECL_POLL_EVENT
1841
1842 SYS_PORT_TRACING_TRACKING_FIELD(k_queue)
1843 };
1844
1845 /**
1846 * @cond INTERNAL_HIDDEN
1847 */
1848
1849 #define Z_QUEUE_INITIALIZER(obj) \
1850 { \
1851 .data_q = SYS_SFLIST_STATIC_INIT(&obj.data_q), \
1852 .lock = { }, \
1853 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
1854 Z_POLL_EVENT_OBJ_INIT(obj) \
1855 }
1856
1857 /**
1858 * INTERNAL_HIDDEN @endcond
1859 */
1860
1861 /**
1862 * @defgroup queue_apis Queue APIs
1863 * @ingroup kernel_apis
1864 * @{
1865 */
1866
1867 /**
1868 * @brief Initialize a queue.
1869 *
1870 * This routine initializes a queue object, prior to its first use.
1871 *
1872 * @param queue Address of the queue.
1873 */
1874 __syscall void k_queue_init(struct k_queue *queue);
1875
1876 /**
1877 * @brief Cancel waiting on a queue.
1878 *
1879 * This routine causes first thread pending on @a queue, if any, to
1880 * return from k_queue_get() call with NULL value (as if timeout expired).
1881 * If the queue is being waited on by k_poll(), it will return with
1882 * -EINTR and K_POLL_STATE_CANCELLED state (and per above, subsequent
1883 * k_queue_get() will return NULL).
1884 *
1885 * @funcprops \isr_ok
1886 *
1887 * @param queue Address of the queue.
1888 */
1889 __syscall void k_queue_cancel_wait(struct k_queue *queue);
1890
1891 /**
1892 * @brief Append an element to the end of a queue.
1893 *
1894 * This routine appends a data item to @a queue. A queue data item must be
1895 * aligned on a word boundary, and the first word of the item is reserved
1896 * for the kernel's use.
1897 *
1898 * @funcprops \isr_ok
1899 *
1900 * @param queue Address of the queue.
1901 * @param data Address of the data item.
1902 */
1903 void k_queue_append(struct k_queue *queue, void *data);
1904
1905 /**
1906 * @brief Append an element to a queue.
1907 *
1908 * This routine appends a data item to @a queue. There is an implicit memory
1909 * allocation to create an additional temporary bookkeeping data structure from
1910 * the calling thread's resource pool, which is automatically freed when the
1911 * item is removed. The data itself is not copied.
1912 *
1913 * @funcprops \isr_ok
1914 *
1915 * @param queue Address of the queue.
1916 * @param data Address of the data item.
1917 *
1918 * @retval 0 on success
1919 * @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
1920 */
1921 __syscall int32_t k_queue_alloc_append(struct k_queue *queue, void *data);
1922
1923 /**
1924 * @brief Prepend an element to a queue.
1925 *
1926 * This routine prepends a data item to @a queue. A queue data item must be
1927 * aligned on a word boundary, and the first word of the item is reserved
1928 * for the kernel's use.
1929 *
1930 * @funcprops \isr_ok
1931 *
1932 * @param queue Address of the queue.
1933 * @param data Address of the data item.
1934 */
1935 void k_queue_prepend(struct k_queue *queue, void *data);
1936
1937 /**
1938 * @brief Prepend an element to a queue.
1939 *
1940 * This routine prepends a data item to @a queue. There is an implicit memory
1941 * allocation to create an additional temporary bookkeeping data structure from
1942 * the calling thread's resource pool, which is automatically freed when the
1943 * item is removed. The data itself is not copied.
1944 *
1945 * @funcprops \isr_ok
1946 *
1947 * @param queue Address of the queue.
1948 * @param data Address of the data item.
1949 *
1950 * @retval 0 on success
1951 * @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
1952 */
1953 __syscall int32_t k_queue_alloc_prepend(struct k_queue *queue, void *data);
1954
1955 /**
1956 * @brief Inserts an element to a queue.
1957 *
1958 * This routine inserts a data item to @a queue after previous item. A queue
1959 * data item must be aligned on a word boundary, and the first word of
1960 * the item is reserved for the kernel's use.
1961 *
1962 * @funcprops \isr_ok
1963 *
1964 * @param queue Address of the queue.
1965 * @param prev Address of the previous data item.
1966 * @param data Address of the data item.
1967 */
1968 void k_queue_insert(struct k_queue *queue, void *prev, void *data);
1969
1970 /**
1971 * @brief Atomically append a list of elements to a queue.
1972 *
1973 * This routine adds a list of data items to @a queue in one operation.
1974 * The data items must be in a singly-linked list, with the first word
1975 * in each data item pointing to the next data item; the list must be
1976 * NULL-terminated.
1977 *
1978 * @funcprops \isr_ok
1979 *
1980 * @param queue Address of the queue.
1981 * @param head Pointer to first node in singly-linked list.
1982 * @param tail Pointer to last node in singly-linked list.
1983 *
1984 * @retval 0 on success
1985 * @retval -EINVAL on invalid supplied data
1986 *
1987 */
1988 int k_queue_append_list(struct k_queue *queue, void *head, void *tail);
1989
1990 /**
1991 * @brief Atomically add a list of elements to a queue.
1992 *
1993 * This routine adds a list of data items to @a queue in one operation.
1994 * The data items must be in a singly-linked list implemented using a
1995 * sys_slist_t object. Upon completion, the original list is empty.
1996 *
1997 * @funcprops \isr_ok
1998 *
1999 * @param queue Address of the queue.
2000 * @param list Pointer to sys_slist_t object.
2001 *
2002 * @retval 0 on success
2003 * @retval -EINVAL on invalid data
2004 */
2005 int k_queue_merge_slist(struct k_queue *queue, sys_slist_t *list);
2006
2007 /**
2008 * @brief Get an element from a queue.
2009 *
2010 * This routine removes first data item from @a queue. The first word of the
2011 * data item is reserved for the kernel's use.
2012 *
2013 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
2014 *
2015 * @funcprops \isr_ok
2016 *
2017 * @param queue Address of the queue.
2018 * @param timeout Non-negative waiting period to obtain a data item
2019 * or one of the special values K_NO_WAIT and
2020 * K_FOREVER.
2021 *
2022 * @return Address of the data item if successful; NULL if returned
2023 * without waiting, or waiting period timed out.
2024 */
2025 __syscall void *k_queue_get(struct k_queue *queue, k_timeout_t timeout);
2026
2027 /**
2028 * @brief Remove an element from a queue.
2029 *
2030 * This routine removes data item from @a queue. The first word of the
2031 * data item is reserved for the kernel's use. Removing elements from k_queue
2032 * rely on sys_slist_find_and_remove which is not a constant time operation.
2033 *
2034 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
2035 *
2036 * @funcprops \isr_ok
2037 *
2038 * @param queue Address of the queue.
2039 * @param data Address of the data item.
2040 *
2041 * @return true if data item was removed
2042 */
2043 bool k_queue_remove(struct k_queue *queue, void *data);
2044
2045 /**
2046 * @brief Append an element to a queue only if it's not present already.
2047 *
2048 * This routine appends data item to @a queue. The first word of the data
2049 * item is reserved for the kernel's use. Appending elements to k_queue
2050 * relies on sys_slist_is_node_in_list which is not a constant time operation.
2051 *
2052 * @funcprops \isr_ok
2053 *
2054 * @param queue Address of the queue.
2055 * @param data Address of the data item.
2056 *
2057 * @return true if data item was added, false if not
2058 */
2059 bool k_queue_unique_append(struct k_queue *queue, void *data);
2060
2061 /**
2062 * @brief Query a queue to see if it has data available.
2063 *
2064 * Note that the data might be already gone by the time this function returns
2065 * if other threads are also trying to read from the queue.
2066 *
2067 * @funcprops \isr_ok
2068 *
2069 * @param queue Address of the queue.
2070 *
2071 * @return Non-zero if the queue is empty.
2072 * @return 0 if data is available.
2073 */
2074 __syscall int k_queue_is_empty(struct k_queue *queue);
2075
z_impl_k_queue_is_empty(struct k_queue * queue)2076 static inline int z_impl_k_queue_is_empty(struct k_queue *queue)
2077 {
2078 return (int)sys_sflist_is_empty(&queue->data_q);
2079 }
2080
2081 /**
2082 * @brief Peek element at the head of queue.
2083 *
2084 * Return element from the head of queue without removing it.
2085 *
2086 * @param queue Address of the queue.
2087 *
2088 * @return Head element, or NULL if queue is empty.
2089 */
2090 __syscall void *k_queue_peek_head(struct k_queue *queue);
2091
2092 /**
2093 * @brief Peek element at the tail of queue.
2094 *
2095 * Return element from the tail of queue without removing it.
2096 *
2097 * @param queue Address of the queue.
2098 *
2099 * @return Tail element, or NULL if queue is empty.
2100 */
2101 __syscall void *k_queue_peek_tail(struct k_queue *queue);
2102
2103 /**
2104 * @brief Statically define and initialize a queue.
2105 *
2106 * The queue can be accessed outside the module where it is defined using:
2107 *
2108 * @code extern struct k_queue <name>; @endcode
2109 *
2110 * @param name Name of the queue.
2111 */
2112 #define K_QUEUE_DEFINE(name) \
2113 STRUCT_SECTION_ITERABLE(k_queue, name) = \
2114 Z_QUEUE_INITIALIZER(name)
2115
2116 /** @} */
2117
2118 #ifdef CONFIG_USERSPACE
2119 /**
2120 * @brief futex structure
2121 *
2122 * A k_futex is a lightweight mutual exclusion primitive designed
2123 * to minimize kernel involvement. Uncontended operation relies
2124 * only on atomic access to shared memory. k_futex are tracked as
2125 * kernel objects and can live in user memory so that any access
2126 * bypasses the kernel object permission management mechanism.
2127 */
2128 struct k_futex {
2129 atomic_t val;
2130 };
2131
2132 /**
2133 * @brief futex kernel data structure
2134 *
2135 * z_futex_data are the helper data structure for k_futex to complete
2136 * futex contended operation on kernel side, structure z_futex_data
2137 * of every futex object is invisible in user mode.
2138 */
2139 struct z_futex_data {
2140 _wait_q_t wait_q;
2141 struct k_spinlock lock;
2142 };
2143
2144 #define Z_FUTEX_DATA_INITIALIZER(obj) \
2145 { \
2146 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q) \
2147 }
2148
2149 /**
2150 * @defgroup futex_apis FUTEX APIs
2151 * @ingroup kernel_apis
2152 * @{
2153 */
2154
2155 /**
2156 * @brief Pend the current thread on a futex
2157 *
2158 * Tests that the supplied futex contains the expected value, and if so,
2159 * goes to sleep until some other thread calls k_futex_wake() on it.
2160 *
2161 * @param futex Address of the futex.
2162 * @param expected Expected value of the futex, if it is different the caller
2163 * will not wait on it.
2164 * @param timeout Non-negative waiting period on the futex, or
2165 * one of the special values K_NO_WAIT or K_FOREVER.
2166 * @retval -EACCES Caller does not have read access to futex address.
2167 * @retval -EAGAIN If the futex value did not match the expected parameter.
2168 * @retval -EINVAL Futex parameter address not recognized by the kernel.
2169 * @retval -ETIMEDOUT Thread woke up due to timeout and not a futex wakeup.
2170 * @retval 0 if the caller went to sleep and was woken up. The caller
2171 * should check the futex's value on wakeup to determine if it needs
2172 * to block again.
2173 */
2174 __syscall int k_futex_wait(struct k_futex *futex, int expected,
2175 k_timeout_t timeout);
2176
2177 /**
2178 * @brief Wake one/all threads pending on a futex
2179 *
2180 * Wake up the highest priority thread pending on the supplied futex, or
2181 * wakeup all the threads pending on the supplied futex, and the behavior
2182 * depends on wake_all.
2183 *
2184 * @param futex Futex to wake up pending threads.
2185 * @param wake_all If true, wake up all pending threads; If false,
2186 * wakeup the highest priority thread.
2187 * @retval -EACCES Caller does not have access to the futex address.
2188 * @retval -EINVAL Futex parameter address not recognized by the kernel.
2189 * @retval Number of threads that were woken up.
2190 */
2191 __syscall int k_futex_wake(struct k_futex *futex, bool wake_all);
2192
2193 /** @} */
2194 #endif
2195
2196 /**
2197 * @defgroup event_apis Event APIs
2198 * @ingroup kernel_apis
2199 * @{
2200 */
2201
2202 /**
2203 * Event Structure
2204 * @ingroup event_apis
2205 */
2206
2207 struct k_event {
2208 _wait_q_t wait_q;
2209 uint32_t events;
2210 struct k_spinlock lock;
2211
2212 SYS_PORT_TRACING_TRACKING_FIELD(k_event)
2213
2214 #ifdef CONFIG_OBJ_CORE_EVENT
2215 struct k_obj_core obj_core;
2216 #endif
2217
2218 };
2219
2220 #define Z_EVENT_INITIALIZER(obj) \
2221 { \
2222 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
2223 .events = 0 \
2224 }
2225
2226 /**
2227 * @brief Initialize an event object
2228 *
2229 * This routine initializes an event object, prior to its first use.
2230 *
2231 * @param event Address of the event object.
2232 */
2233 __syscall void k_event_init(struct k_event *event);
2234
2235 /**
2236 * @brief Post one or more events to an event object
2237 *
2238 * This routine posts one or more events to an event object. All tasks waiting
2239 * on the event object @a event whose waiting conditions become met by this
2240 * posting immediately unpend.
2241 *
2242 * Posting differs from setting in that posted events are merged together with
2243 * the current set of events tracked by the event object.
2244 *
2245 * @param event Address of the event object
2246 * @param events Set of events to post to @a event
2247 *
2248 * @retval Previous value of the events in @a event
2249 */
2250 __syscall uint32_t k_event_post(struct k_event *event, uint32_t events);
2251
2252 /**
2253 * @brief Set the events in an event object
2254 *
2255 * This routine sets the events stored in event object to the specified value.
2256 * All tasks waiting on the event object @a event whose waiting conditions
2257 * become met by this immediately unpend.
2258 *
2259 * Setting differs from posting in that set events replace the current set of
2260 * events tracked by the event object.
2261 *
2262 * @param event Address of the event object
2263 * @param events Set of events to set in @a event
2264 *
2265 * @retval Previous value of the events in @a event
2266 */
2267 __syscall uint32_t k_event_set(struct k_event *event, uint32_t events);
2268
2269 /**
2270 * @brief Set or clear the events in an event object
2271 *
2272 * This routine sets the events stored in event object to the specified value.
2273 * All tasks waiting on the event object @a event whose waiting conditions
2274 * become met by this immediately unpend. Unlike @ref k_event_set, this routine
2275 * allows specific event bits to be set and cleared as determined by the mask.
2276 *
2277 * @param event Address of the event object
2278 * @param events Set of events to set/clear in @a event
2279 * @param events_mask Mask to be applied to @a events
2280 *
2281 * @retval Previous value of the events in @a events_mask
2282 */
2283 __syscall uint32_t k_event_set_masked(struct k_event *event, uint32_t events,
2284 uint32_t events_mask);
2285
2286 /**
2287 * @brief Clear the events in an event object
2288 *
2289 * This routine clears (resets) the specified events stored in an event object.
2290 *
2291 * @param event Address of the event object
2292 * @param events Set of events to clear in @a event
2293 *
2294 * @retval Previous value of the events in @a event
2295 */
2296 __syscall uint32_t k_event_clear(struct k_event *event, uint32_t events);
2297
2298 /**
2299 * @brief Wait for any of the specified events
2300 *
2301 * This routine waits on event object @a event until any of the specified
2302 * events have been delivered to the event object, or the maximum wait time
2303 * @a timeout has expired. A thread may wait on up to 32 distinctly numbered
2304 * events that are expressed as bits in a single 32-bit word.
2305 *
2306 * @note The caller must be careful when resetting if there are multiple threads
2307 * waiting for the event object @a event.
2308 *
2309 * @param event Address of the event object
2310 * @param events Set of desired events on which to wait
2311 * @param reset If true, clear the set of events tracked by the event object
2312 * before waiting. If false, do not clear the events.
2313 * @param timeout Waiting period for the desired set of events or one of the
2314 * special values K_NO_WAIT and K_FOREVER.
2315 *
2316 * @retval set of matching events upon success
2317 * @retval 0 if matching events were not received within the specified time
2318 */
2319 __syscall uint32_t k_event_wait(struct k_event *event, uint32_t events,
2320 bool reset, k_timeout_t timeout);
2321
2322 /**
2323 * @brief Wait for all of the specified events
2324 *
2325 * This routine waits on event object @a event until all of the specified
2326 * events have been delivered to the event object, or the maximum wait time
2327 * @a timeout has expired. A thread may wait on up to 32 distinctly numbered
2328 * events that are expressed as bits in a single 32-bit word.
2329 *
2330 * @note The caller must be careful when resetting if there are multiple threads
2331 * waiting for the event object @a event.
2332 *
2333 * @param event Address of the event object
2334 * @param events Set of desired events on which to wait
2335 * @param reset If true, clear the set of events tracked by the event object
2336 * before waiting. If false, do not clear the events.
2337 * @param timeout Waiting period for the desired set of events or one of the
2338 * special values K_NO_WAIT and K_FOREVER.
2339 *
2340 * @retval set of matching events upon success
2341 * @retval 0 if matching events were not received within the specified time
2342 */
2343 __syscall uint32_t k_event_wait_all(struct k_event *event, uint32_t events,
2344 bool reset, k_timeout_t timeout);
2345
2346 /**
2347 * @brief Test the events currently tracked in the event object
2348 *
2349 * @param event Address of the event object
2350 * @param events_mask Set of desired events to test
2351 *
2352 * @retval Current value of events in @a events_mask
2353 */
k_event_test(struct k_event * event,uint32_t events_mask)2354 static inline uint32_t k_event_test(struct k_event *event, uint32_t events_mask)
2355 {
2356 return k_event_wait(event, events_mask, false, K_NO_WAIT);
2357 }
2358
2359 /**
2360 * @brief Statically define and initialize an event object
2361 *
2362 * The event can be accessed outside the module where it is defined using:
2363 *
2364 * @code extern struct k_event <name>; @endcode
2365 *
2366 * @param name Name of the event object.
2367 */
2368 #define K_EVENT_DEFINE(name) \
2369 STRUCT_SECTION_ITERABLE(k_event, name) = \
2370 Z_EVENT_INITIALIZER(name);
2371
2372 /** @} */
2373
2374 struct k_fifo {
2375 struct k_queue _queue;
2376 #ifdef CONFIG_OBJ_CORE_FIFO
2377 struct k_obj_core obj_core;
2378 #endif
2379 };
2380
2381 /**
2382 * @cond INTERNAL_HIDDEN
2383 */
2384 #define Z_FIFO_INITIALIZER(obj) \
2385 { \
2386 ._queue = Z_QUEUE_INITIALIZER(obj._queue) \
2387 }
2388
2389 /**
2390 * INTERNAL_HIDDEN @endcond
2391 */
2392
2393 /**
2394 * @defgroup fifo_apis FIFO APIs
2395 * @ingroup kernel_apis
2396 * @{
2397 */
2398
2399 /**
2400 * @brief Initialize a FIFO queue.
2401 *
2402 * This routine initializes a FIFO queue, prior to its first use.
2403 *
2404 * @param fifo Address of the FIFO queue.
2405 */
2406 #define k_fifo_init(fifo) \
2407 ({ \
2408 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, init, fifo); \
2409 k_queue_init(&(fifo)->_queue); \
2410 K_OBJ_CORE_INIT(K_OBJ_CORE(fifo), _obj_type_fifo); \
2411 K_OBJ_CORE_LINK(K_OBJ_CORE(fifo)); \
2412 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, init, fifo); \
2413 })
2414
2415 /**
2416 * @brief Cancel waiting on a FIFO queue.
2417 *
2418 * This routine causes first thread pending on @a fifo, if any, to
2419 * return from k_fifo_get() call with NULL value (as if timeout
2420 * expired).
2421 *
2422 * @funcprops \isr_ok
2423 *
2424 * @param fifo Address of the FIFO queue.
2425 */
2426 #define k_fifo_cancel_wait(fifo) \
2427 ({ \
2428 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, cancel_wait, fifo); \
2429 k_queue_cancel_wait(&(fifo)->_queue); \
2430 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, cancel_wait, fifo); \
2431 })
2432
2433 /**
2434 * @brief Add an element to a FIFO queue.
2435 *
2436 * This routine adds a data item to @a fifo. A FIFO data item must be
2437 * aligned on a word boundary, and the first word of the item is reserved
2438 * for the kernel's use.
2439 *
2440 * @funcprops \isr_ok
2441 *
2442 * @param fifo Address of the FIFO.
2443 * @param data Address of the data item.
2444 */
2445 #define k_fifo_put(fifo, data) \
2446 ({ \
2447 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, put, fifo, data); \
2448 k_queue_append(&(fifo)->_queue, data); \
2449 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, put, fifo, data); \
2450 })
2451
2452 /**
2453 * @brief Add an element to a FIFO queue.
2454 *
2455 * This routine adds a data item to @a fifo. There is an implicit memory
2456 * allocation to create an additional temporary bookkeeping data structure from
2457 * the calling thread's resource pool, which is automatically freed when the
2458 * item is removed. The data itself is not copied.
2459 *
2460 * @funcprops \isr_ok
2461 *
2462 * @param fifo Address of the FIFO.
2463 * @param data Address of the data item.
2464 *
2465 * @retval 0 on success
2466 * @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
2467 */
2468 #define k_fifo_alloc_put(fifo, data) \
2469 ({ \
2470 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, alloc_put, fifo, data); \
2471 int fap_ret = k_queue_alloc_append(&(fifo)->_queue, data); \
2472 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, alloc_put, fifo, data, fap_ret); \
2473 fap_ret; \
2474 })
2475
2476 /**
2477 * @brief Atomically add a list of elements to a FIFO.
2478 *
2479 * This routine adds a list of data items to @a fifo in one operation.
2480 * The data items must be in a singly-linked list, with the first word of
2481 * each data item pointing to the next data item; the list must be
2482 * NULL-terminated.
2483 *
2484 * @funcprops \isr_ok
2485 *
2486 * @param fifo Address of the FIFO queue.
2487 * @param head Pointer to first node in singly-linked list.
2488 * @param tail Pointer to last node in singly-linked list.
2489 */
2490 #define k_fifo_put_list(fifo, head, tail) \
2491 ({ \
2492 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, put_list, fifo, head, tail); \
2493 k_queue_append_list(&(fifo)->_queue, head, tail); \
2494 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, put_list, fifo, head, tail); \
2495 })
2496
2497 /**
2498 * @brief Atomically add a list of elements to a FIFO queue.
2499 *
2500 * This routine adds a list of data items to @a fifo in one operation.
2501 * The data items must be in a singly-linked list implemented using a
2502 * sys_slist_t object. Upon completion, the sys_slist_t object is invalid
2503 * and must be re-initialized via sys_slist_init().
2504 *
2505 * @funcprops \isr_ok
2506 *
2507 * @param fifo Address of the FIFO queue.
2508 * @param list Pointer to sys_slist_t object.
2509 */
2510 #define k_fifo_put_slist(fifo, list) \
2511 ({ \
2512 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, put_slist, fifo, list); \
2513 k_queue_merge_slist(&(fifo)->_queue, list); \
2514 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, put_slist, fifo, list); \
2515 })
2516
2517 /**
2518 * @brief Get an element from a FIFO queue.
2519 *
2520 * This routine removes a data item from @a fifo in a "first in, first out"
2521 * manner. The first word of the data item is reserved for the kernel's use.
2522 *
2523 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
2524 *
2525 * @funcprops \isr_ok
2526 *
2527 * @param fifo Address of the FIFO queue.
2528 * @param timeout Waiting period to obtain a data item,
2529 * or one of the special values K_NO_WAIT and K_FOREVER.
2530 *
2531 * @return Address of the data item if successful; NULL if returned
2532 * without waiting, or waiting period timed out.
2533 */
2534 #define k_fifo_get(fifo, timeout) \
2535 ({ \
2536 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, get, fifo, timeout); \
2537 void *fg_ret = k_queue_get(&(fifo)->_queue, timeout); \
2538 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, get, fifo, timeout, fg_ret); \
2539 fg_ret; \
2540 })
2541
2542 /**
2543 * @brief Query a FIFO queue to see if it has data available.
2544 *
2545 * Note that the data might be already gone by the time this function returns
2546 * if other threads is also trying to read from the FIFO.
2547 *
2548 * @funcprops \isr_ok
2549 *
2550 * @param fifo Address of the FIFO queue.
2551 *
2552 * @return Non-zero if the FIFO queue is empty.
2553 * @return 0 if data is available.
2554 */
2555 #define k_fifo_is_empty(fifo) \
2556 k_queue_is_empty(&(fifo)->_queue)
2557
2558 /**
2559 * @brief Peek element at the head of a FIFO queue.
2560 *
2561 * Return element from the head of FIFO queue without removing it. A usecase
2562 * for this is if elements of the FIFO object are themselves containers. Then
2563 * on each iteration of processing, a head container will be peeked,
2564 * and some data processed out of it, and only if the container is empty,
2565 * it will be completely remove from the FIFO queue.
2566 *
2567 * @param fifo Address of the FIFO queue.
2568 *
2569 * @return Head element, or NULL if the FIFO queue is empty.
2570 */
2571 #define k_fifo_peek_head(fifo) \
2572 ({ \
2573 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, peek_head, fifo); \
2574 void *fph_ret = k_queue_peek_head(&(fifo)->_queue); \
2575 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, peek_head, fifo, fph_ret); \
2576 fph_ret; \
2577 })
2578
2579 /**
2580 * @brief Peek element at the tail of FIFO queue.
2581 *
2582 * Return element from the tail of FIFO queue (without removing it). A usecase
2583 * for this is if elements of the FIFO queue are themselves containers. Then
2584 * it may be useful to add more data to the last container in a FIFO queue.
2585 *
2586 * @param fifo Address of the FIFO queue.
2587 *
2588 * @return Tail element, or NULL if a FIFO queue is empty.
2589 */
2590 #define k_fifo_peek_tail(fifo) \
2591 ({ \
2592 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, peek_tail, fifo); \
2593 void *fpt_ret = k_queue_peek_tail(&(fifo)->_queue); \
2594 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, peek_tail, fifo, fpt_ret); \
2595 fpt_ret; \
2596 })
2597
2598 /**
2599 * @brief Statically define and initialize a FIFO queue.
2600 *
2601 * The FIFO queue can be accessed outside the module where it is defined using:
2602 *
2603 * @code extern struct k_fifo <name>; @endcode
2604 *
2605 * @param name Name of the FIFO queue.
2606 */
2607 #define K_FIFO_DEFINE(name) \
2608 STRUCT_SECTION_ITERABLE(k_fifo, name) = \
2609 Z_FIFO_INITIALIZER(name)
2610
2611 /** @} */
2612
2613 struct k_lifo {
2614 struct k_queue _queue;
2615 #ifdef CONFIG_OBJ_CORE_LIFO
2616 struct k_obj_core obj_core;
2617 #endif
2618 };
2619
2620 /**
2621 * @cond INTERNAL_HIDDEN
2622 */
2623
2624 #define Z_LIFO_INITIALIZER(obj) \
2625 { \
2626 ._queue = Z_QUEUE_INITIALIZER(obj._queue) \
2627 }
2628
2629 /**
2630 * INTERNAL_HIDDEN @endcond
2631 */
2632
2633 /**
2634 * @defgroup lifo_apis LIFO APIs
2635 * @ingroup kernel_apis
2636 * @{
2637 */
2638
2639 /**
2640 * @brief Initialize a LIFO queue.
2641 *
2642 * This routine initializes a LIFO queue object, prior to its first use.
2643 *
2644 * @param lifo Address of the LIFO queue.
2645 */
2646 #define k_lifo_init(lifo) \
2647 ({ \
2648 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, init, lifo); \
2649 k_queue_init(&(lifo)->_queue); \
2650 K_OBJ_CORE_INIT(K_OBJ_CORE(lifo), _obj_type_lifo); \
2651 K_OBJ_CORE_LINK(K_OBJ_CORE(lifo)); \
2652 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, init, lifo); \
2653 })
2654
2655 /**
2656 * @brief Add an element to a LIFO queue.
2657 *
2658 * This routine adds a data item to @a lifo. A LIFO queue data item must be
2659 * aligned on a word boundary, and the first word of the item is
2660 * reserved for the kernel's use.
2661 *
2662 * @funcprops \isr_ok
2663 *
2664 * @param lifo Address of the LIFO queue.
2665 * @param data Address of the data item.
2666 */
2667 #define k_lifo_put(lifo, data) \
2668 ({ \
2669 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, put, lifo, data); \
2670 k_queue_prepend(&(lifo)->_queue, data); \
2671 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, put, lifo, data); \
2672 })
2673
2674 /**
2675 * @brief Add an element to a LIFO queue.
2676 *
2677 * This routine adds a data item to @a lifo. There is an implicit memory
2678 * allocation to create an additional temporary bookkeeping data structure from
2679 * the calling thread's resource pool, which is automatically freed when the
2680 * item is removed. The data itself is not copied.
2681 *
2682 * @funcprops \isr_ok
2683 *
2684 * @param lifo Address of the LIFO.
2685 * @param data Address of the data item.
2686 *
2687 * @retval 0 on success
2688 * @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
2689 */
2690 #define k_lifo_alloc_put(lifo, data) \
2691 ({ \
2692 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, alloc_put, lifo, data); \
2693 int lap_ret = k_queue_alloc_prepend(&(lifo)->_queue, data); \
2694 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, alloc_put, lifo, data, lap_ret); \
2695 lap_ret; \
2696 })
2697
2698 /**
2699 * @brief Get an element from a LIFO queue.
2700 *
2701 * This routine removes a data item from @a LIFO in a "last in, first out"
2702 * manner. The first word of the data item is reserved for the kernel's use.
2703 *
2704 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
2705 *
2706 * @funcprops \isr_ok
2707 *
2708 * @param lifo Address of the LIFO queue.
2709 * @param timeout Waiting period to obtain a data item,
2710 * or one of the special values K_NO_WAIT and K_FOREVER.
2711 *
2712 * @return Address of the data item if successful; NULL if returned
2713 * without waiting, or waiting period timed out.
2714 */
2715 #define k_lifo_get(lifo, timeout) \
2716 ({ \
2717 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, get, lifo, timeout); \
2718 void *lg_ret = k_queue_get(&(lifo)->_queue, timeout); \
2719 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, get, lifo, timeout, lg_ret); \
2720 lg_ret; \
2721 })
2722
2723 /**
2724 * @brief Statically define and initialize a LIFO queue.
2725 *
2726 * The LIFO queue can be accessed outside the module where it is defined using:
2727 *
2728 * @code extern struct k_lifo <name>; @endcode
2729 *
2730 * @param name Name of the fifo.
2731 */
2732 #define K_LIFO_DEFINE(name) \
2733 STRUCT_SECTION_ITERABLE(k_lifo, name) = \
2734 Z_LIFO_INITIALIZER(name)
2735
2736 /** @} */
2737
2738 /**
2739 * @cond INTERNAL_HIDDEN
2740 */
2741 #define K_STACK_FLAG_ALLOC ((uint8_t)1) /* Buffer was allocated */
2742
2743 typedef uintptr_t stack_data_t;
2744
2745 struct k_stack {
2746 _wait_q_t wait_q;
2747 struct k_spinlock lock;
2748 stack_data_t *base, *next, *top;
2749
2750 uint8_t flags;
2751
2752 SYS_PORT_TRACING_TRACKING_FIELD(k_stack)
2753
2754 #ifdef CONFIG_OBJ_CORE_STACK
2755 struct k_obj_core obj_core;
2756 #endif
2757 };
2758
2759 #define Z_STACK_INITIALIZER(obj, stack_buffer, stack_num_entries) \
2760 { \
2761 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
2762 .base = stack_buffer, \
2763 .next = stack_buffer, \
2764 .top = stack_buffer + stack_num_entries, \
2765 }
2766
2767 /**
2768 * INTERNAL_HIDDEN @endcond
2769 */
2770
2771 /**
2772 * @defgroup stack_apis Stack APIs
2773 * @ingroup kernel_apis
2774 * @{
2775 */
2776
2777 /**
2778 * @brief Initialize a stack.
2779 *
2780 * This routine initializes a stack object, prior to its first use.
2781 *
2782 * @param stack Address of the stack.
2783 * @param buffer Address of array used to hold stacked values.
2784 * @param num_entries Maximum number of values that can be stacked.
2785 */
2786 void k_stack_init(struct k_stack *stack,
2787 stack_data_t *buffer, uint32_t num_entries);
2788
2789
2790 /**
2791 * @brief Initialize a stack.
2792 *
2793 * This routine initializes a stack object, prior to its first use. Internal
2794 * buffers will be allocated from the calling thread's resource pool.
2795 * This memory will be released if k_stack_cleanup() is called, or
2796 * userspace is enabled and the stack object loses all references to it.
2797 *
2798 * @param stack Address of the stack.
2799 * @param num_entries Maximum number of values that can be stacked.
2800 *
2801 * @return -ENOMEM if memory couldn't be allocated
2802 */
2803
2804 __syscall int32_t k_stack_alloc_init(struct k_stack *stack,
2805 uint32_t num_entries);
2806
2807 /**
2808 * @brief Release a stack's allocated buffer
2809 *
2810 * If a stack object was given a dynamically allocated buffer via
2811 * k_stack_alloc_init(), this will free it. This function does nothing
2812 * if the buffer wasn't dynamically allocated.
2813 *
2814 * @param stack Address of the stack.
2815 * @retval 0 on success
2816 * @retval -EAGAIN when object is still in use
2817 */
2818 int k_stack_cleanup(struct k_stack *stack);
2819
2820 /**
2821 * @brief Push an element onto a stack.
2822 *
2823 * This routine adds a stack_data_t value @a data to @a stack.
2824 *
2825 * @funcprops \isr_ok
2826 *
2827 * @param stack Address of the stack.
2828 * @param data Value to push onto the stack.
2829 *
2830 * @retval 0 on success
2831 * @retval -ENOMEM if stack is full
2832 */
2833 __syscall int k_stack_push(struct k_stack *stack, stack_data_t data);
2834
2835 /**
2836 * @brief Pop an element from a stack.
2837 *
2838 * This routine removes a stack_data_t value from @a stack in a "last in,
2839 * first out" manner and stores the value in @a data.
2840 *
2841 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
2842 *
2843 * @funcprops \isr_ok
2844 *
2845 * @param stack Address of the stack.
2846 * @param data Address of area to hold the value popped from the stack.
2847 * @param timeout Waiting period to obtain a value,
2848 * or one of the special values K_NO_WAIT and
2849 * K_FOREVER.
2850 *
2851 * @retval 0 Element popped from stack.
2852 * @retval -EBUSY Returned without waiting.
2853 * @retval -EAGAIN Waiting period timed out.
2854 */
2855 __syscall int k_stack_pop(struct k_stack *stack, stack_data_t *data,
2856 k_timeout_t timeout);
2857
2858 /**
2859 * @brief Statically define and initialize a stack
2860 *
2861 * The stack can be accessed outside the module where it is defined using:
2862 *
2863 * @code extern struct k_stack <name>; @endcode
2864 *
2865 * @param name Name of the stack.
2866 * @param stack_num_entries Maximum number of values that can be stacked.
2867 */
2868 #define K_STACK_DEFINE(name, stack_num_entries) \
2869 stack_data_t __noinit \
2870 _k_stack_buf_##name[stack_num_entries]; \
2871 STRUCT_SECTION_ITERABLE(k_stack, name) = \
2872 Z_STACK_INITIALIZER(name, _k_stack_buf_##name, \
2873 stack_num_entries)
2874
2875 /** @} */
2876
2877 /**
2878 * @cond INTERNAL_HIDDEN
2879 */
2880
2881 struct k_work;
2882 struct k_work_q;
2883 struct k_work_queue_config;
2884 extern struct k_work_q k_sys_work_q;
2885
2886 /**
2887 * INTERNAL_HIDDEN @endcond
2888 */
2889
2890 /**
2891 * @defgroup mutex_apis Mutex APIs
2892 * @ingroup kernel_apis
2893 * @{
2894 */
2895
2896 /**
2897 * Mutex Structure
2898 * @ingroup mutex_apis
2899 */
2900 struct k_mutex {
2901 /** Mutex wait queue */
2902 _wait_q_t wait_q;
2903 /** Mutex owner */
2904 struct k_thread *owner;
2905
2906 /** Current lock count */
2907 uint32_t lock_count;
2908
2909 /** Original thread priority */
2910 int owner_orig_prio;
2911
2912 SYS_PORT_TRACING_TRACKING_FIELD(k_mutex)
2913
2914 #ifdef CONFIG_OBJ_CORE_MUTEX
2915 struct k_obj_core obj_core;
2916 #endif
2917 };
2918
2919 /**
2920 * @cond INTERNAL_HIDDEN
2921 */
2922 #define Z_MUTEX_INITIALIZER(obj) \
2923 { \
2924 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
2925 .owner = NULL, \
2926 .lock_count = 0, \
2927 .owner_orig_prio = K_LOWEST_APPLICATION_THREAD_PRIO, \
2928 }
2929
2930 /**
2931 * INTERNAL_HIDDEN @endcond
2932 */
2933
2934 /**
2935 * @brief Statically define and initialize a mutex.
2936 *
2937 * The mutex can be accessed outside the module where it is defined using:
2938 *
2939 * @code extern struct k_mutex <name>; @endcode
2940 *
2941 * @param name Name of the mutex.
2942 */
2943 #define K_MUTEX_DEFINE(name) \
2944 STRUCT_SECTION_ITERABLE(k_mutex, name) = \
2945 Z_MUTEX_INITIALIZER(name)
2946
2947 /**
2948 * @brief Initialize a mutex.
2949 *
2950 * This routine initializes a mutex object, prior to its first use.
2951 *
2952 * Upon completion, the mutex is available and does not have an owner.
2953 *
2954 * @param mutex Address of the mutex.
2955 *
2956 * @retval 0 Mutex object created
2957 *
2958 */
2959 __syscall int k_mutex_init(struct k_mutex *mutex);
2960
2961
2962 /**
2963 * @brief Lock a mutex.
2964 *
2965 * This routine locks @a mutex. If the mutex is locked by another thread,
2966 * the calling thread waits until the mutex becomes available or until
2967 * a timeout occurs.
2968 *
2969 * A thread is permitted to lock a mutex it has already locked. The operation
2970 * completes immediately and the lock count is increased by 1.
2971 *
2972 * Mutexes may not be locked in ISRs.
2973 *
2974 * @param mutex Address of the mutex.
2975 * @param timeout Waiting period to lock the mutex,
2976 * or one of the special values K_NO_WAIT and
2977 * K_FOREVER.
2978 *
2979 * @retval 0 Mutex locked.
2980 * @retval -EBUSY Returned without waiting.
2981 * @retval -EAGAIN Waiting period timed out.
2982 */
2983 __syscall int k_mutex_lock(struct k_mutex *mutex, k_timeout_t timeout);
2984
2985 /**
2986 * @brief Unlock a mutex.
2987 *
2988 * This routine unlocks @a mutex. The mutex must already be locked by the
2989 * calling thread.
2990 *
2991 * The mutex cannot be claimed by another thread until it has been unlocked by
2992 * the calling thread as many times as it was previously locked by that
2993 * thread.
2994 *
2995 * Mutexes may not be unlocked in ISRs, as mutexes must only be manipulated
2996 * in thread context due to ownership and priority inheritance semantics.
2997 *
2998 * @param mutex Address of the mutex.
2999 *
3000 * @retval 0 Mutex unlocked.
3001 * @retval -EPERM The current thread does not own the mutex
3002 * @retval -EINVAL The mutex is not locked
3003 *
3004 */
3005 __syscall int k_mutex_unlock(struct k_mutex *mutex);
3006
3007 /**
3008 * @}
3009 */
3010
3011
3012 struct k_condvar {
3013 _wait_q_t wait_q;
3014
3015 #ifdef CONFIG_OBJ_CORE_CONDVAR
3016 struct k_obj_core obj_core;
3017 #endif
3018 };
3019
3020 #define Z_CONDVAR_INITIALIZER(obj) \
3021 { \
3022 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
3023 }
3024
3025 /**
3026 * @defgroup condvar_apis Condition Variables APIs
3027 * @ingroup kernel_apis
3028 * @{
3029 */
3030
3031 /**
3032 * @brief Initialize a condition variable
3033 *
3034 * @param condvar pointer to a @p k_condvar structure
3035 * @retval 0 Condition variable created successfully
3036 */
3037 __syscall int k_condvar_init(struct k_condvar *condvar);
3038
3039 /**
3040 * @brief Signals one thread that is pending on the condition variable
3041 *
3042 * @param condvar pointer to a @p k_condvar structure
3043 * @retval 0 On success
3044 */
3045 __syscall int k_condvar_signal(struct k_condvar *condvar);
3046
3047 /**
3048 * @brief Unblock all threads that are pending on the condition
3049 * variable
3050 *
3051 * @param condvar pointer to a @p k_condvar structure
3052 * @return An integer with number of woken threads on success
3053 */
3054 __syscall int k_condvar_broadcast(struct k_condvar *condvar);
3055
3056 /**
3057 * @brief Waits on the condition variable releasing the mutex lock
3058 *
3059 * Atomically releases the currently owned mutex, blocks the current thread
3060 * waiting on the condition variable specified by @a condvar,
3061 * and finally acquires the mutex again.
3062 *
3063 * The waiting thread unblocks only after another thread calls
3064 * k_condvar_signal, or k_condvar_broadcast with the same condition variable.
3065 *
3066 * @param condvar pointer to a @p k_condvar structure
3067 * @param mutex Address of the mutex.
3068 * @param timeout Waiting period for the condition variable
3069 * or one of the special values K_NO_WAIT and K_FOREVER.
3070 * @retval 0 On success
3071 * @retval -EAGAIN Waiting period timed out.
3072 */
3073 __syscall int k_condvar_wait(struct k_condvar *condvar, struct k_mutex *mutex,
3074 k_timeout_t timeout);
3075
3076 /**
3077 * @brief Statically define and initialize a condition variable.
3078 *
3079 * The condition variable can be accessed outside the module where it is
3080 * defined using:
3081 *
3082 * @code extern struct k_condvar <name>; @endcode
3083 *
3084 * @param name Name of the condition variable.
3085 */
3086 #define K_CONDVAR_DEFINE(name) \
3087 STRUCT_SECTION_ITERABLE(k_condvar, name) = \
3088 Z_CONDVAR_INITIALIZER(name)
3089 /**
3090 * @}
3091 */
3092
3093 /**
3094 * @cond INTERNAL_HIDDEN
3095 */
3096
3097 struct k_sem {
3098 _wait_q_t wait_q;
3099 unsigned int count;
3100 unsigned int limit;
3101
3102 Z_DECL_POLL_EVENT
3103
3104 SYS_PORT_TRACING_TRACKING_FIELD(k_sem)
3105
3106 #ifdef CONFIG_OBJ_CORE_SEM
3107 struct k_obj_core obj_core;
3108 #endif
3109 };
3110
3111 #define Z_SEM_INITIALIZER(obj, initial_count, count_limit) \
3112 { \
3113 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
3114 .count = initial_count, \
3115 .limit = count_limit, \
3116 Z_POLL_EVENT_OBJ_INIT(obj) \
3117 }
3118
3119 /**
3120 * INTERNAL_HIDDEN @endcond
3121 */
3122
3123 /**
3124 * @defgroup semaphore_apis Semaphore APIs
3125 * @ingroup kernel_apis
3126 * @{
3127 */
3128
3129 /**
3130 * @brief Maximum limit value allowed for a semaphore.
3131 *
3132 * This is intended for use when a semaphore does not have
3133 * an explicit maximum limit, and instead is just used for
3134 * counting purposes.
3135 *
3136 */
3137 #define K_SEM_MAX_LIMIT UINT_MAX
3138
3139 /**
3140 * @brief Initialize a semaphore.
3141 *
3142 * This routine initializes a semaphore object, prior to its first use.
3143 *
3144 * @param sem Address of the semaphore.
3145 * @param initial_count Initial semaphore count.
3146 * @param limit Maximum permitted semaphore count.
3147 *
3148 * @see K_SEM_MAX_LIMIT
3149 *
3150 * @retval 0 Semaphore created successfully
3151 * @retval -EINVAL Invalid values
3152 *
3153 */
3154 __syscall int k_sem_init(struct k_sem *sem, unsigned int initial_count,
3155 unsigned int limit);
3156
3157 /**
3158 * @brief Take a semaphore.
3159 *
3160 * This routine takes @a sem.
3161 *
3162 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
3163 *
3164 * @funcprops \isr_ok
3165 *
3166 * @param sem Address of the semaphore.
3167 * @param timeout Waiting period to take the semaphore,
3168 * or one of the special values K_NO_WAIT and K_FOREVER.
3169 *
3170 * @retval 0 Semaphore taken.
3171 * @retval -EBUSY Returned without waiting.
3172 * @retval -EAGAIN Waiting period timed out,
3173 * or the semaphore was reset during the waiting period.
3174 */
3175 __syscall int k_sem_take(struct k_sem *sem, k_timeout_t timeout);
3176
3177 /**
3178 * @brief Give a semaphore.
3179 *
3180 * This routine gives @a sem, unless the semaphore is already at its maximum
3181 * permitted count.
3182 *
3183 * @funcprops \isr_ok
3184 *
3185 * @param sem Address of the semaphore.
3186 */
3187 __syscall void k_sem_give(struct k_sem *sem);
3188
3189 /**
3190 * @brief Resets a semaphore's count to zero.
3191 *
3192 * This routine sets the count of @a sem to zero.
3193 * Any outstanding semaphore takes will be aborted
3194 * with -EAGAIN.
3195 *
3196 * @param sem Address of the semaphore.
3197 */
3198 __syscall void k_sem_reset(struct k_sem *sem);
3199
3200 /**
3201 * @brief Get a semaphore's count.
3202 *
3203 * This routine returns the current count of @a sem.
3204 *
3205 * @param sem Address of the semaphore.
3206 *
3207 * @return Current semaphore count.
3208 */
3209 __syscall unsigned int k_sem_count_get(struct k_sem *sem);
3210
3211 /**
3212 * @internal
3213 */
z_impl_k_sem_count_get(struct k_sem * sem)3214 static inline unsigned int z_impl_k_sem_count_get(struct k_sem *sem)
3215 {
3216 return sem->count;
3217 }
3218
3219 /**
3220 * @brief Statically define and initialize a semaphore.
3221 *
3222 * The semaphore can be accessed outside the module where it is defined using:
3223 *
3224 * @code extern struct k_sem <name>; @endcode
3225 *
3226 * @param name Name of the semaphore.
3227 * @param initial_count Initial semaphore count.
3228 * @param count_limit Maximum permitted semaphore count.
3229 */
3230 #define K_SEM_DEFINE(name, initial_count, count_limit) \
3231 STRUCT_SECTION_ITERABLE(k_sem, name) = \
3232 Z_SEM_INITIALIZER(name, initial_count, count_limit); \
3233 BUILD_ASSERT(((count_limit) != 0) && \
3234 ((initial_count) <= (count_limit)) && \
3235 ((count_limit) <= K_SEM_MAX_LIMIT));
3236
3237 /** @} */
3238
3239 /**
3240 * @cond INTERNAL_HIDDEN
3241 */
3242
3243 struct k_work_delayable;
3244 struct k_work_sync;
3245
3246 /**
3247 * INTERNAL_HIDDEN @endcond
3248 */
3249
3250 /**
3251 * @defgroup workqueue_apis Work Queue APIs
3252 * @ingroup kernel_apis
3253 * @{
3254 */
3255
3256 /** @brief The signature for a work item handler function.
3257 *
3258 * The function will be invoked by the thread animating a work queue.
3259 *
3260 * @param work the work item that provided the handler.
3261 */
3262 typedef void (*k_work_handler_t)(struct k_work *work);
3263
3264 /** @brief Initialize a (non-delayable) work structure.
3265 *
3266 * This must be invoked before submitting a work structure for the first time.
3267 * It need not be invoked again on the same work structure. It can be
3268 * re-invoked to change the associated handler, but this must be done when the
3269 * work item is idle.
3270 *
3271 * @funcprops \isr_ok
3272 *
3273 * @param work the work structure to be initialized.
3274 *
3275 * @param handler the handler to be invoked by the work item.
3276 */
3277 void k_work_init(struct k_work *work,
3278 k_work_handler_t handler);
3279
3280 /** @brief Busy state flags from the work item.
3281 *
3282 * A zero return value indicates the work item appears to be idle.
3283 *
3284 * @note This is a live snapshot of state, which may change before the result
3285 * is checked. Use locks where appropriate.
3286 *
3287 * @funcprops \isr_ok
3288 *
3289 * @param work pointer to the work item.
3290 *
3291 * @return a mask of flags K_WORK_DELAYED, K_WORK_QUEUED,
3292 * K_WORK_RUNNING, K_WORK_CANCELING, and K_WORK_FLUSHING.
3293 */
3294 int k_work_busy_get(const struct k_work *work);
3295
3296 /** @brief Test whether a work item is currently pending.
3297 *
3298 * Wrapper to determine whether a work item is in a non-idle dstate.
3299 *
3300 * @note This is a live snapshot of state, which may change before the result
3301 * is checked. Use locks where appropriate.
3302 *
3303 * @funcprops \isr_ok
3304 *
3305 * @param work pointer to the work item.
3306 *
3307 * @return true if and only if k_work_busy_get() returns a non-zero value.
3308 */
3309 static inline bool k_work_is_pending(const struct k_work *work);
3310
3311 /** @brief Submit a work item to a queue.
3312 *
3313 * @param queue pointer to the work queue on which the item should run. If
3314 * NULL the queue from the most recent submission will be used.
3315 *
3316 * @funcprops \isr_ok
3317 *
3318 * @param work pointer to the work item.
3319 *
3320 * @retval 0 if work was already submitted to a queue
3321 * @retval 1 if work was not submitted and has been queued to @p queue
3322 * @retval 2 if work was running and has been queued to the queue that was
3323 * running it
3324 * @retval -EBUSY
3325 * * if work submission was rejected because the work item is cancelling; or
3326 * * @p queue is draining; or
3327 * * @p queue is plugged.
3328 * @retval -EINVAL if @p queue is null and the work item has never been run.
3329 * @retval -ENODEV if @p queue has not been started.
3330 */
3331 int k_work_submit_to_queue(struct k_work_q *queue,
3332 struct k_work *work);
3333
3334 /** @brief Submit a work item to the system queue.
3335 *
3336 * @funcprops \isr_ok
3337 *
3338 * @param work pointer to the work item.
3339 *
3340 * @return as with k_work_submit_to_queue().
3341 */
3342 int k_work_submit(struct k_work *work);
3343
3344 /** @brief Wait for last-submitted instance to complete.
3345 *
3346 * Resubmissions may occur while waiting, including chained submissions (from
3347 * within the handler).
3348 *
3349 * @note Be careful of caller and work queue thread relative priority. If
3350 * this function sleeps it will not return until the work queue thread
3351 * completes the tasks that allow this thread to resume.
3352 *
3353 * @note Behavior is undefined if this function is invoked on @p work from a
3354 * work queue running @p work.
3355 *
3356 * @param work pointer to the work item.
3357 *
3358 * @param sync pointer to an opaque item containing state related to the
3359 * pending cancellation. The object must persist until the call returns, and
3360 * be accessible from both the caller thread and the work queue thread. The
3361 * object must not be used for any other flush or cancel operation until this
3362 * one completes. On architectures with CONFIG_KERNEL_COHERENCE the object
3363 * must be allocated in coherent memory.
3364 *
3365 * @retval true if call had to wait for completion
3366 * @retval false if work was already idle
3367 */
3368 bool k_work_flush(struct k_work *work,
3369 struct k_work_sync *sync);
3370
3371 /** @brief Cancel a work item.
3372 *
3373 * This attempts to prevent a pending (non-delayable) work item from being
3374 * processed by removing it from the work queue. If the item is being
3375 * processed, the work item will continue to be processed, but resubmissions
3376 * are rejected until cancellation completes.
3377 *
3378 * If this returns zero cancellation is complete, otherwise something
3379 * (probably a work queue thread) is still referencing the item.
3380 *
3381 * See also k_work_cancel_sync().
3382 *
3383 * @funcprops \isr_ok
3384 *
3385 * @param work pointer to the work item.
3386 *
3387 * @return the k_work_busy_get() status indicating the state of the item after all
3388 * cancellation steps performed by this call are completed.
3389 */
3390 int k_work_cancel(struct k_work *work);
3391
3392 /** @brief Cancel a work item and wait for it to complete.
3393 *
3394 * Same as k_work_cancel() but does not return until cancellation is complete.
3395 * This can be invoked by a thread after k_work_cancel() to synchronize with a
3396 * previous cancellation.
3397 *
3398 * On return the work structure will be idle unless something submits it after
3399 * the cancellation was complete.
3400 *
3401 * @note Be careful of caller and work queue thread relative priority. If
3402 * this function sleeps it will not return until the work queue thread
3403 * completes the tasks that allow this thread to resume.
3404 *
3405 * @note Behavior is undefined if this function is invoked on @p work from a
3406 * work queue running @p work.
3407 *
3408 * @param work pointer to the work item.
3409 *
3410 * @param sync pointer to an opaque item containing state related to the
3411 * pending cancellation. The object must persist until the call returns, and
3412 * be accessible from both the caller thread and the work queue thread. The
3413 * object must not be used for any other flush or cancel operation until this
3414 * one completes. On architectures with CONFIG_KERNEL_COHERENCE the object
3415 * must be allocated in coherent memory.
3416 *
3417 * @retval true if work was pending (call had to wait for cancellation of a
3418 * running handler to complete, or scheduled or submitted operations were
3419 * cancelled);
3420 * @retval false otherwise
3421 */
3422 bool k_work_cancel_sync(struct k_work *work, struct k_work_sync *sync);
3423
3424 /** @brief Initialize a work queue structure.
3425 *
3426 * This must be invoked before starting a work queue structure for the first time.
3427 * It need not be invoked again on the same work queue structure.
3428 *
3429 * @funcprops \isr_ok
3430 *
3431 * @param queue the queue structure to be initialized.
3432 */
3433 void k_work_queue_init(struct k_work_q *queue);
3434
3435 /** @brief Initialize a work queue.
3436 *
3437 * This configures the work queue thread and starts it running. The function
3438 * should not be re-invoked on a queue.
3439 *
3440 * @param queue pointer to the queue structure. It must be initialized
3441 * in zeroed/bss memory or with @ref k_work_queue_init before
3442 * use.
3443 *
3444 * @param stack pointer to the work thread stack area.
3445 *
3446 * @param stack_size size of the the work thread stack area, in bytes.
3447 *
3448 * @param prio initial thread priority
3449 *
3450 * @param cfg optional additional configuration parameters. Pass @c
3451 * NULL if not required, to use the defaults documented in
3452 * k_work_queue_config.
3453 */
3454 void k_work_queue_start(struct k_work_q *queue,
3455 k_thread_stack_t *stack, size_t stack_size,
3456 int prio, const struct k_work_queue_config *cfg);
3457
3458 /** @brief Access the thread that animates a work queue.
3459 *
3460 * This is necessary to grant a work queue thread access to things the work
3461 * items it will process are expected to use.
3462 *
3463 * @param queue pointer to the queue structure.
3464 *
3465 * @return the thread associated with the work queue.
3466 */
3467 static inline k_tid_t k_work_queue_thread_get(struct k_work_q *queue);
3468
3469 /** @brief Wait until the work queue has drained, optionally plugging it.
3470 *
3471 * This blocks submission to the work queue except when coming from queue
3472 * thread, and blocks the caller until no more work items are available in the
3473 * queue.
3474 *
3475 * If @p plug is true then submission will continue to be blocked after the
3476 * drain operation completes until k_work_queue_unplug() is invoked.
3477 *
3478 * Note that work items that are delayed are not yet associated with their
3479 * work queue. They must be cancelled externally if a goal is to ensure the
3480 * work queue remains empty. The @p plug feature can be used to prevent
3481 * delayed items from being submitted after the drain completes.
3482 *
3483 * @param queue pointer to the queue structure.
3484 *
3485 * @param plug if true the work queue will continue to block new submissions
3486 * after all items have drained.
3487 *
3488 * @retval 1 if call had to wait for the drain to complete
3489 * @retval 0 if call did not have to wait
3490 * @retval negative if wait was interrupted or failed
3491 */
3492 int k_work_queue_drain(struct k_work_q *queue, bool plug);
3493
3494 /** @brief Release a work queue to accept new submissions.
3495 *
3496 * This releases the block on new submissions placed when k_work_queue_drain()
3497 * is invoked with the @p plug option enabled. If this is invoked before the
3498 * drain completes new items may be submitted as soon as the drain completes.
3499 *
3500 * @funcprops \isr_ok
3501 *
3502 * @param queue pointer to the queue structure.
3503 *
3504 * @retval 0 if successfully unplugged
3505 * @retval -EALREADY if the work queue was not plugged.
3506 */
3507 int k_work_queue_unplug(struct k_work_q *queue);
3508
3509 /** @brief Initialize a delayable work structure.
3510 *
3511 * This must be invoked before scheduling a delayable work structure for the
3512 * first time. It need not be invoked again on the same work structure. It
3513 * can be re-invoked to change the associated handler, but this must be done
3514 * when the work item is idle.
3515 *
3516 * @funcprops \isr_ok
3517 *
3518 * @param dwork the delayable work structure to be initialized.
3519 *
3520 * @param handler the handler to be invoked by the work item.
3521 */
3522 void k_work_init_delayable(struct k_work_delayable *dwork,
3523 k_work_handler_t handler);
3524
3525 /**
3526 * @brief Get the parent delayable work structure from a work pointer.
3527 *
3528 * This function is necessary when a @c k_work_handler_t function is passed to
3529 * k_work_schedule_for_queue() and the handler needs to access data from the
3530 * container of the containing `k_work_delayable`.
3531 *
3532 * @param work Address passed to the work handler
3533 *
3534 * @return Address of the containing @c k_work_delayable structure.
3535 */
3536 static inline struct k_work_delayable *
3537 k_work_delayable_from_work(struct k_work *work);
3538
3539 /** @brief Busy state flags from the delayable work item.
3540 *
3541 * @funcprops \isr_ok
3542 *
3543 * @note This is a live snapshot of state, which may change before the result
3544 * can be inspected. Use locks where appropriate.
3545 *
3546 * @param dwork pointer to the delayable work item.
3547 *
3548 * @return a mask of flags K_WORK_DELAYED, K_WORK_QUEUED, K_WORK_RUNNING,
3549 * K_WORK_CANCELING, and K_WORK_FLUSHING. A zero return value indicates the
3550 * work item appears to be idle.
3551 */
3552 int k_work_delayable_busy_get(const struct k_work_delayable *dwork);
3553
3554 /** @brief Test whether a delayed work item is currently pending.
3555 *
3556 * Wrapper to determine whether a delayed work item is in a non-idle state.
3557 *
3558 * @note This is a live snapshot of state, which may change before the result
3559 * can be inspected. Use locks where appropriate.
3560 *
3561 * @funcprops \isr_ok
3562 *
3563 * @param dwork pointer to the delayable work item.
3564 *
3565 * @return true if and only if k_work_delayable_busy_get() returns a non-zero
3566 * value.
3567 */
3568 static inline bool k_work_delayable_is_pending(
3569 const struct k_work_delayable *dwork);
3570
3571 /** @brief Get the absolute tick count at which a scheduled delayable work
3572 * will be submitted.
3573 *
3574 * @note This is a live snapshot of state, which may change before the result
3575 * can be inspected. Use locks where appropriate.
3576 *
3577 * @funcprops \isr_ok
3578 *
3579 * @param dwork pointer to the delayable work item.
3580 *
3581 * @return the tick count when the timer that will schedule the work item will
3582 * expire, or the current tick count if the work is not scheduled.
3583 */
3584 static inline k_ticks_t k_work_delayable_expires_get(
3585 const struct k_work_delayable *dwork);
3586
3587 /** @brief Get the number of ticks until a scheduled delayable work will be
3588 * submitted.
3589 *
3590 * @note This is a live snapshot of state, which may change before the result
3591 * can be inspected. Use locks where appropriate.
3592 *
3593 * @funcprops \isr_ok
3594 *
3595 * @param dwork pointer to the delayable work item.
3596 *
3597 * @return the number of ticks until the timer that will schedule the work
3598 * item will expire, or zero if the item is not scheduled.
3599 */
3600 static inline k_ticks_t k_work_delayable_remaining_get(
3601 const struct k_work_delayable *dwork);
3602
3603 /** @brief Submit an idle work item to a queue after a delay.
3604 *
3605 * Unlike k_work_reschedule_for_queue() this is a no-op if the work item is
3606 * already scheduled or submitted, even if @p delay is @c K_NO_WAIT.
3607 *
3608 * @funcprops \isr_ok
3609 *
3610 * @param queue the queue on which the work item should be submitted after the
3611 * delay.
3612 *
3613 * @param dwork pointer to the delayable work item.
3614 *
3615 * @param delay the time to wait before submitting the work item. If @c
3616 * K_NO_WAIT and the work is not pending this is equivalent to
3617 * k_work_submit_to_queue().
3618 *
3619 * @retval 0 if work was already scheduled or submitted.
3620 * @retval 1 if work has been scheduled.
3621 * @retval -EBUSY if @p delay is @c K_NO_WAIT and
3622 * k_work_submit_to_queue() fails with this code.
3623 * @retval -EINVAL if @p delay is @c K_NO_WAIT and
3624 * k_work_submit_to_queue() fails with this code.
3625 * @retval -ENODEV if @p delay is @c K_NO_WAIT and
3626 * k_work_submit_to_queue() fails with this code.
3627 */
3628 int k_work_schedule_for_queue(struct k_work_q *queue,
3629 struct k_work_delayable *dwork,
3630 k_timeout_t delay);
3631
3632 /** @brief Submit an idle work item to the system work queue after a
3633 * delay.
3634 *
3635 * This is a thin wrapper around k_work_schedule_for_queue(), with all the API
3636 * characteristics of that function.
3637 *
3638 * @param dwork pointer to the delayable work item.
3639 *
3640 * @param delay the time to wait before submitting the work item. If @c
3641 * K_NO_WAIT this is equivalent to k_work_submit_to_queue().
3642 *
3643 * @return as with k_work_schedule_for_queue().
3644 */
3645 int k_work_schedule(struct k_work_delayable *dwork,
3646 k_timeout_t delay);
3647
3648 /** @brief Reschedule a work item to a queue after a delay.
3649 *
3650 * Unlike k_work_schedule_for_queue() this function can change the deadline of
3651 * a scheduled work item, and will schedule a work item that is in any state
3652 * (e.g. is idle, submitted, or running). This function does not affect
3653 * ("unsubmit") a work item that has been submitted to a queue.
3654 *
3655 * @funcprops \isr_ok
3656 *
3657 * @param queue the queue on which the work item should be submitted after the
3658 * delay.
3659 *
3660 * @param dwork pointer to the delayable work item.
3661 *
3662 * @param delay the time to wait before submitting the work item. If @c
3663 * K_NO_WAIT this is equivalent to k_work_submit_to_queue() after canceling
3664 * any previous scheduled submission.
3665 *
3666 * @note If delay is @c K_NO_WAIT ("no delay") the return values are as with
3667 * k_work_submit_to_queue().
3668 *
3669 * @retval 0 if delay is @c K_NO_WAIT and work was already on a queue
3670 * @retval 1 if
3671 * * delay is @c K_NO_WAIT and work was not submitted but has now been queued
3672 * to @p queue; or
3673 * * delay not @c K_NO_WAIT and work has been scheduled
3674 * @retval 2 if delay is @c K_NO_WAIT and work was running and has been queued
3675 * to the queue that was running it
3676 * @retval -EBUSY if @p delay is @c K_NO_WAIT and
3677 * k_work_submit_to_queue() fails with this code.
3678 * @retval -EINVAL if @p delay is @c K_NO_WAIT and
3679 * k_work_submit_to_queue() fails with this code.
3680 * @retval -ENODEV if @p delay is @c K_NO_WAIT and
3681 * k_work_submit_to_queue() fails with this code.
3682 */
3683 int k_work_reschedule_for_queue(struct k_work_q *queue,
3684 struct k_work_delayable *dwork,
3685 k_timeout_t delay);
3686
3687 /** @brief Reschedule a work item to the system work queue after a
3688 * delay.
3689 *
3690 * This is a thin wrapper around k_work_reschedule_for_queue(), with all the
3691 * API characteristics of that function.
3692 *
3693 * @param dwork pointer to the delayable work item.
3694 *
3695 * @param delay the time to wait before submitting the work item.
3696 *
3697 * @return as with k_work_reschedule_for_queue().
3698 */
3699 int k_work_reschedule(struct k_work_delayable *dwork,
3700 k_timeout_t delay);
3701
3702 /** @brief Flush delayable work.
3703 *
3704 * If the work is scheduled, it is immediately submitted. Then the caller
3705 * blocks until the work completes, as with k_work_flush().
3706 *
3707 * @note Be careful of caller and work queue thread relative priority. If
3708 * this function sleeps it will not return until the work queue thread
3709 * completes the tasks that allow this thread to resume.
3710 *
3711 * @note Behavior is undefined if this function is invoked on @p dwork from a
3712 * work queue running @p dwork.
3713 *
3714 * @param dwork pointer to the delayable work item.
3715 *
3716 * @param sync pointer to an opaque item containing state related to the
3717 * pending cancellation. The object must persist until the call returns, and
3718 * be accessible from both the caller thread and the work queue thread. The
3719 * object must not be used for any other flush or cancel operation until this
3720 * one completes. On architectures with CONFIG_KERNEL_COHERENCE the object
3721 * must be allocated in coherent memory.
3722 *
3723 * @retval true if call had to wait for completion
3724 * @retval false if work was already idle
3725 */
3726 bool k_work_flush_delayable(struct k_work_delayable *dwork,
3727 struct k_work_sync *sync);
3728
3729 /** @brief Cancel delayable work.
3730 *
3731 * Similar to k_work_cancel() but for delayable work. If the work is
3732 * scheduled or submitted it is canceled. This function does not wait for the
3733 * cancellation to complete.
3734 *
3735 * @note The work may still be running when this returns. Use
3736 * k_work_flush_delayable() or k_work_cancel_delayable_sync() to ensure it is
3737 * not running.
3738 *
3739 * @note Canceling delayable work does not prevent rescheduling it. It does
3740 * prevent submitting it until the cancellation completes.
3741 *
3742 * @funcprops \isr_ok
3743 *
3744 * @param dwork pointer to the delayable work item.
3745 *
3746 * @return the k_work_delayable_busy_get() status indicating the state of the
3747 * item after all cancellation steps performed by this call are completed.
3748 */
3749 int k_work_cancel_delayable(struct k_work_delayable *dwork);
3750
3751 /** @brief Cancel delayable work and wait.
3752 *
3753 * Like k_work_cancel_delayable() but waits until the work becomes idle.
3754 *
3755 * @note Canceling delayable work does not prevent rescheduling it. It does
3756 * prevent submitting it until the cancellation completes.
3757 *
3758 * @note Be careful of caller and work queue thread relative priority. If
3759 * this function sleeps it will not return until the work queue thread
3760 * completes the tasks that allow this thread to resume.
3761 *
3762 * @note Behavior is undefined if this function is invoked on @p dwork from a
3763 * work queue running @p dwork.
3764 *
3765 * @param dwork pointer to the delayable work item.
3766 *
3767 * @param sync pointer to an opaque item containing state related to the
3768 * pending cancellation. The object must persist until the call returns, and
3769 * be accessible from both the caller thread and the work queue thread. The
3770 * object must not be used for any other flush or cancel operation until this
3771 * one completes. On architectures with CONFIG_KERNEL_COHERENCE the object
3772 * must be allocated in coherent memory.
3773 *
3774 * @retval true if work was not idle (call had to wait for cancellation of a
3775 * running handler to complete, or scheduled or submitted operations were
3776 * cancelled);
3777 * @retval false otherwise
3778 */
3779 bool k_work_cancel_delayable_sync(struct k_work_delayable *dwork,
3780 struct k_work_sync *sync);
3781
3782 enum {
3783 /**
3784 * @cond INTERNAL_HIDDEN
3785 */
3786
3787 /* The atomic API is used for all work and queue flags fields to
3788 * enforce sequential consistency in SMP environments.
3789 */
3790
3791 /* Bits that represent the work item states. At least nine of the
3792 * combinations are distinct valid stable states.
3793 */
3794 K_WORK_RUNNING_BIT = 0,
3795 K_WORK_CANCELING_BIT = 1,
3796 K_WORK_QUEUED_BIT = 2,
3797 K_WORK_DELAYED_BIT = 3,
3798 K_WORK_FLUSHING_BIT = 4,
3799
3800 K_WORK_MASK = BIT(K_WORK_DELAYED_BIT) | BIT(K_WORK_QUEUED_BIT)
3801 | BIT(K_WORK_RUNNING_BIT) | BIT(K_WORK_CANCELING_BIT) | BIT(K_WORK_FLUSHING_BIT),
3802
3803 /* Static work flags */
3804 K_WORK_DELAYABLE_BIT = 8,
3805 K_WORK_DELAYABLE = BIT(K_WORK_DELAYABLE_BIT),
3806
3807 /* Dynamic work queue flags */
3808 K_WORK_QUEUE_STARTED_BIT = 0,
3809 K_WORK_QUEUE_STARTED = BIT(K_WORK_QUEUE_STARTED_BIT),
3810 K_WORK_QUEUE_BUSY_BIT = 1,
3811 K_WORK_QUEUE_BUSY = BIT(K_WORK_QUEUE_BUSY_BIT),
3812 K_WORK_QUEUE_DRAIN_BIT = 2,
3813 K_WORK_QUEUE_DRAIN = BIT(K_WORK_QUEUE_DRAIN_BIT),
3814 K_WORK_QUEUE_PLUGGED_BIT = 3,
3815 K_WORK_QUEUE_PLUGGED = BIT(K_WORK_QUEUE_PLUGGED_BIT),
3816
3817 /* Static work queue flags */
3818 K_WORK_QUEUE_NO_YIELD_BIT = 8,
3819 K_WORK_QUEUE_NO_YIELD = BIT(K_WORK_QUEUE_NO_YIELD_BIT),
3820
3821 /**
3822 * INTERNAL_HIDDEN @endcond
3823 */
3824 /* Transient work flags */
3825
3826 /** @brief Flag indicating a work item that is running under a work
3827 * queue thread.
3828 *
3829 * Accessed via k_work_busy_get(). May co-occur with other flags.
3830 */
3831 K_WORK_RUNNING = BIT(K_WORK_RUNNING_BIT),
3832
3833 /** @brief Flag indicating a work item that is being canceled.
3834 *
3835 * Accessed via k_work_busy_get(). May co-occur with other flags.
3836 */
3837 K_WORK_CANCELING = BIT(K_WORK_CANCELING_BIT),
3838
3839 /** @brief Flag indicating a work item that has been submitted to a
3840 * queue but has not started running.
3841 *
3842 * Accessed via k_work_busy_get(). May co-occur with other flags.
3843 */
3844 K_WORK_QUEUED = BIT(K_WORK_QUEUED_BIT),
3845
3846 /** @brief Flag indicating a delayed work item that is scheduled for
3847 * submission to a queue.
3848 *
3849 * Accessed via k_work_busy_get(). May co-occur with other flags.
3850 */
3851 K_WORK_DELAYED = BIT(K_WORK_DELAYED_BIT),
3852
3853 /** @brief Flag indicating a synced work item that is being flushed.
3854 *
3855 * Accessed via k_work_busy_get(). May co-occur with other flags.
3856 */
3857 K_WORK_FLUSHING = BIT(K_WORK_FLUSHING_BIT),
3858 };
3859
3860 /** @brief A structure used to submit work. */
3861 struct k_work {
3862 /* All fields are protected by the work module spinlock. No fields
3863 * are to be accessed except through kernel API.
3864 */
3865
3866 /* Node to link into k_work_q pending list. */
3867 sys_snode_t node;
3868
3869 /* The function to be invoked by the work queue thread. */
3870 k_work_handler_t handler;
3871
3872 /* The queue on which the work item was last submitted. */
3873 struct k_work_q *queue;
3874
3875 /* State of the work item.
3876 *
3877 * The item can be DELAYED, QUEUED, and RUNNING simultaneously.
3878 *
3879 * It can be RUNNING and CANCELING simultaneously.
3880 */
3881 uint32_t flags;
3882 };
3883
3884 #define Z_WORK_INITIALIZER(work_handler) { \
3885 .handler = work_handler, \
3886 }
3887
3888 /** @brief A structure used to submit work after a delay. */
3889 struct k_work_delayable {
3890 /* The work item. */
3891 struct k_work work;
3892
3893 /* Timeout used to submit work after a delay. */
3894 struct _timeout timeout;
3895
3896 /* The queue to which the work should be submitted. */
3897 struct k_work_q *queue;
3898 };
3899
3900 #define Z_WORK_DELAYABLE_INITIALIZER(work_handler) { \
3901 .work = { \
3902 .handler = work_handler, \
3903 .flags = K_WORK_DELAYABLE, \
3904 }, \
3905 }
3906
3907 /**
3908 * @brief Initialize a statically-defined delayable work item.
3909 *
3910 * This macro can be used to initialize a statically-defined delayable
3911 * work item, prior to its first use. For example,
3912 *
3913 * @code static K_WORK_DELAYABLE_DEFINE(<dwork>, <work_handler>); @endcode
3914 *
3915 * Note that if the runtime dependencies support initialization with
3916 * k_work_init_delayable() using that will eliminate the initialized
3917 * object in ROM that is produced by this macro and copied in at
3918 * system startup.
3919 *
3920 * @param work Symbol name for delayable work item object
3921 * @param work_handler Function to invoke each time work item is processed.
3922 */
3923 #define K_WORK_DELAYABLE_DEFINE(work, work_handler) \
3924 struct k_work_delayable work \
3925 = Z_WORK_DELAYABLE_INITIALIZER(work_handler)
3926
3927 /**
3928 * @cond INTERNAL_HIDDEN
3929 */
3930
3931 /* Record used to wait for work to flush.
3932 *
3933 * The work item is inserted into the queue that will process (or is
3934 * processing) the item, and will be processed as soon as the item
3935 * completes. When the flusher is processed the semaphore will be
3936 * signaled, releasing the thread waiting for the flush.
3937 */
3938 struct z_work_flusher {
3939 struct k_work work;
3940 struct k_sem sem;
3941 };
3942
3943 /* Record used to wait for work to complete a cancellation.
3944 *
3945 * The work item is inserted into a global queue of pending cancels.
3946 * When a cancelling work item goes idle any matching waiters are
3947 * removed from pending_cancels and are woken.
3948 */
3949 struct z_work_canceller {
3950 sys_snode_t node;
3951 struct k_work *work;
3952 struct k_sem sem;
3953 };
3954
3955 /**
3956 * INTERNAL_HIDDEN @endcond
3957 */
3958
3959 /** @brief A structure holding internal state for a pending synchronous
3960 * operation on a work item or queue.
3961 *
3962 * Instances of this type are provided by the caller for invocation of
3963 * k_work_flush(), k_work_cancel_sync() and sibling flush and cancel APIs. A
3964 * referenced object must persist until the call returns, and be accessible
3965 * from both the caller thread and the work queue thread.
3966 *
3967 * @note If CONFIG_KERNEL_COHERENCE is enabled the object must be allocated in
3968 * coherent memory; see arch_mem_coherent(). The stack on these architectures
3969 * is generally not coherent. be stack-allocated. Violations are detected by
3970 * runtime assertion.
3971 */
3972 struct k_work_sync {
3973 union {
3974 struct z_work_flusher flusher;
3975 struct z_work_canceller canceller;
3976 };
3977 };
3978
3979 /** @brief A structure holding optional configuration items for a work
3980 * queue.
3981 *
3982 * This structure, and values it references, are not retained by
3983 * k_work_queue_start().
3984 */
3985 struct k_work_queue_config {
3986 /** The name to be given to the work queue thread.
3987 *
3988 * If left null the thread will not have a name.
3989 */
3990 const char *name;
3991
3992 /** Control whether the work queue thread should yield between
3993 * items.
3994 *
3995 * Yielding between items helps guarantee the work queue
3996 * thread does not starve other threads, including cooperative
3997 * ones released by a work item. This is the default behavior.
3998 *
3999 * Set this to @c true to prevent the work queue thread from
4000 * yielding between items. This may be appropriate when a
4001 * sequence of items should complete without yielding
4002 * control.
4003 */
4004 bool no_yield;
4005 };
4006
4007 /** @brief A structure used to hold work until it can be processed. */
4008 struct k_work_q {
4009 /* The thread that animates the work. */
4010 struct k_thread thread;
4011
4012 /* All the following fields must be accessed only while the
4013 * work module spinlock is held.
4014 */
4015
4016 /* List of k_work items to be worked. */
4017 sys_slist_t pending;
4018
4019 /* Wait queue for idle work thread. */
4020 _wait_q_t notifyq;
4021
4022 /* Wait queue for threads waiting for the queue to drain. */
4023 _wait_q_t drainq;
4024
4025 /* Flags describing queue state. */
4026 uint32_t flags;
4027 };
4028
4029 /* Provide the implementation for inline functions declared above */
4030
k_work_is_pending(const struct k_work * work)4031 static inline bool k_work_is_pending(const struct k_work *work)
4032 {
4033 return k_work_busy_get(work) != 0;
4034 }
4035
4036 static inline struct k_work_delayable *
k_work_delayable_from_work(struct k_work * work)4037 k_work_delayable_from_work(struct k_work *work)
4038 {
4039 return CONTAINER_OF(work, struct k_work_delayable, work);
4040 }
4041
k_work_delayable_is_pending(const struct k_work_delayable * dwork)4042 static inline bool k_work_delayable_is_pending(
4043 const struct k_work_delayable *dwork)
4044 {
4045 return k_work_delayable_busy_get(dwork) != 0;
4046 }
4047
k_work_delayable_expires_get(const struct k_work_delayable * dwork)4048 static inline k_ticks_t k_work_delayable_expires_get(
4049 const struct k_work_delayable *dwork)
4050 {
4051 return z_timeout_expires(&dwork->timeout);
4052 }
4053
k_work_delayable_remaining_get(const struct k_work_delayable * dwork)4054 static inline k_ticks_t k_work_delayable_remaining_get(
4055 const struct k_work_delayable *dwork)
4056 {
4057 return z_timeout_remaining(&dwork->timeout);
4058 }
4059
k_work_queue_thread_get(struct k_work_q * queue)4060 static inline k_tid_t k_work_queue_thread_get(struct k_work_q *queue)
4061 {
4062 return &queue->thread;
4063 }
4064
4065 /** @} */
4066
4067 struct k_work_user;
4068
4069 /**
4070 * @addtogroup workqueue_apis
4071 * @{
4072 */
4073
4074 /**
4075 * @typedef k_work_user_handler_t
4076 * @brief Work item handler function type for user work queues.
4077 *
4078 * A work item's handler function is executed by a user workqueue's thread
4079 * when the work item is processed by the workqueue.
4080 *
4081 * @param work Address of the work item.
4082 */
4083 typedef void (*k_work_user_handler_t)(struct k_work_user *work);
4084
4085 /**
4086 * @cond INTERNAL_HIDDEN
4087 */
4088
4089 struct k_work_user_q {
4090 struct k_queue queue;
4091 struct k_thread thread;
4092 };
4093
4094 enum {
4095 K_WORK_USER_STATE_PENDING, /* Work item pending state */
4096 };
4097
4098 struct k_work_user {
4099 void *_reserved; /* Used by k_queue implementation. */
4100 k_work_user_handler_t handler;
4101 atomic_t flags;
4102 };
4103
4104 /**
4105 * INTERNAL_HIDDEN @endcond
4106 */
4107
4108 #if defined(__cplusplus) && ((__cplusplus - 0) < 202002L)
4109 #define Z_WORK_USER_INITIALIZER(work_handler) { NULL, work_handler, 0 }
4110 #else
4111 #define Z_WORK_USER_INITIALIZER(work_handler) \
4112 { \
4113 ._reserved = NULL, \
4114 .handler = work_handler, \
4115 .flags = 0 \
4116 }
4117 #endif
4118
4119 /**
4120 * @brief Initialize a statically-defined user work item.
4121 *
4122 * This macro can be used to initialize a statically-defined user work
4123 * item, prior to its first use. For example,
4124 *
4125 * @code static K_WORK_USER_DEFINE(<work>, <work_handler>); @endcode
4126 *
4127 * @param work Symbol name for work item object
4128 * @param work_handler Function to invoke each time work item is processed.
4129 */
4130 #define K_WORK_USER_DEFINE(work, work_handler) \
4131 struct k_work_user work = Z_WORK_USER_INITIALIZER(work_handler)
4132
4133 /**
4134 * @brief Initialize a userspace work item.
4135 *
4136 * This routine initializes a user workqueue work item, prior to its
4137 * first use.
4138 *
4139 * @param work Address of work item.
4140 * @param handler Function to invoke each time work item is processed.
4141 */
k_work_user_init(struct k_work_user * work,k_work_user_handler_t handler)4142 static inline void k_work_user_init(struct k_work_user *work,
4143 k_work_user_handler_t handler)
4144 {
4145 *work = (struct k_work_user)Z_WORK_USER_INITIALIZER(handler);
4146 }
4147
4148 /**
4149 * @brief Check if a userspace work item is pending.
4150 *
4151 * This routine indicates if user work item @a work is pending in a workqueue's
4152 * queue.
4153 *
4154 * @note Checking if the work is pending gives no guarantee that the
4155 * work will still be pending when this information is used. It is up to
4156 * the caller to make sure that this information is used in a safe manner.
4157 *
4158 * @funcprops \isr_ok
4159 *
4160 * @param work Address of work item.
4161 *
4162 * @return true if work item is pending, or false if it is not pending.
4163 */
k_work_user_is_pending(struct k_work_user * work)4164 static inline bool k_work_user_is_pending(struct k_work_user *work)
4165 {
4166 return atomic_test_bit(&work->flags, K_WORK_USER_STATE_PENDING);
4167 }
4168
4169 /**
4170 * @brief Submit a work item to a user mode workqueue
4171 *
4172 * Submits a work item to a workqueue that runs in user mode. A temporary
4173 * memory allocation is made from the caller's resource pool which is freed
4174 * once the worker thread consumes the k_work item. The workqueue
4175 * thread must have memory access to the k_work item being submitted. The caller
4176 * must have permission granted on the work_q parameter's queue object.
4177 *
4178 * @funcprops \isr_ok
4179 *
4180 * @param work_q Address of workqueue.
4181 * @param work Address of work item.
4182 *
4183 * @retval -EBUSY if the work item was already in some workqueue
4184 * @retval -ENOMEM if no memory for thread resource pool allocation
4185 * @retval 0 Success
4186 */
k_work_user_submit_to_queue(struct k_work_user_q * work_q,struct k_work_user * work)4187 static inline int k_work_user_submit_to_queue(struct k_work_user_q *work_q,
4188 struct k_work_user *work)
4189 {
4190 int ret = -EBUSY;
4191
4192 if (!atomic_test_and_set_bit(&work->flags,
4193 K_WORK_USER_STATE_PENDING)) {
4194 ret = k_queue_alloc_append(&work_q->queue, work);
4195
4196 /* Couldn't insert into the queue. Clear the pending bit
4197 * so the work item can be submitted again
4198 */
4199 if (ret != 0) {
4200 atomic_clear_bit(&work->flags,
4201 K_WORK_USER_STATE_PENDING);
4202 }
4203 }
4204
4205 return ret;
4206 }
4207
4208 /**
4209 * @brief Start a workqueue in user mode
4210 *
4211 * This works identically to k_work_queue_start() except it is callable from
4212 * user mode, and the worker thread created will run in user mode. The caller
4213 * must have permissions granted on both the work_q parameter's thread and
4214 * queue objects, and the same restrictions on priority apply as
4215 * k_thread_create().
4216 *
4217 * @param work_q Address of workqueue.
4218 * @param stack Pointer to work queue thread's stack space, as defined by
4219 * K_THREAD_STACK_DEFINE()
4220 * @param stack_size Size of the work queue thread's stack (in bytes), which
4221 * should either be the same constant passed to
4222 * K_THREAD_STACK_DEFINE() or the value of K_THREAD_STACK_SIZEOF().
4223 * @param prio Priority of the work queue's thread.
4224 * @param name optional thread name. If not null a copy is made into the
4225 * thread's name buffer.
4226 */
4227 void k_work_user_queue_start(struct k_work_user_q *work_q,
4228 k_thread_stack_t *stack,
4229 size_t stack_size, int prio,
4230 const char *name);
4231
4232 /**
4233 * @brief Access the user mode thread that animates a work queue.
4234 *
4235 * This is necessary to grant a user mode work queue thread access to things
4236 * the work items it will process are expected to use.
4237 *
4238 * @param work_q pointer to the user mode queue structure.
4239 *
4240 * @return the user mode thread associated with the work queue.
4241 */
k_work_user_queue_thread_get(struct k_work_user_q * work_q)4242 static inline k_tid_t k_work_user_queue_thread_get(struct k_work_user_q *work_q)
4243 {
4244 return &work_q->thread;
4245 }
4246
4247 /** @} */
4248
4249 /**
4250 * @cond INTERNAL_HIDDEN
4251 */
4252
4253 struct k_work_poll {
4254 struct k_work work;
4255 struct k_work_q *workq;
4256 struct z_poller poller;
4257 struct k_poll_event *events;
4258 int num_events;
4259 k_work_handler_t real_handler;
4260 struct _timeout timeout;
4261 int poll_result;
4262 };
4263
4264 /**
4265 * INTERNAL_HIDDEN @endcond
4266 */
4267
4268 /**
4269 * @addtogroup workqueue_apis
4270 * @{
4271 */
4272
4273 /**
4274 * @brief Initialize a statically-defined work item.
4275 *
4276 * This macro can be used to initialize a statically-defined workqueue work
4277 * item, prior to its first use. For example,
4278 *
4279 * @code static K_WORK_DEFINE(<work>, <work_handler>); @endcode
4280 *
4281 * @param work Symbol name for work item object
4282 * @param work_handler Function to invoke each time work item is processed.
4283 */
4284 #define K_WORK_DEFINE(work, work_handler) \
4285 struct k_work work = Z_WORK_INITIALIZER(work_handler)
4286
4287 /**
4288 * @brief Initialize a triggered work item.
4289 *
4290 * This routine initializes a workqueue triggered work item, prior to
4291 * its first use.
4292 *
4293 * @param work Address of triggered work item.
4294 * @param handler Function to invoke each time work item is processed.
4295 */
4296 void k_work_poll_init(struct k_work_poll *work,
4297 k_work_handler_t handler);
4298
4299 /**
4300 * @brief Submit a triggered work item.
4301 *
4302 * This routine schedules work item @a work to be processed by workqueue
4303 * @a work_q when one of the given @a events is signaled. The routine
4304 * initiates internal poller for the work item and then returns to the caller.
4305 * Only when one of the watched events happen the work item is actually
4306 * submitted to the workqueue and becomes pending.
4307 *
4308 * Submitting a previously submitted triggered work item that is still
4309 * waiting for the event cancels the existing submission and reschedules it
4310 * the using the new event list. Note that this behavior is inherently subject
4311 * to race conditions with the pre-existing triggered work item and work queue,
4312 * so care must be taken to synchronize such resubmissions externally.
4313 *
4314 * @funcprops \isr_ok
4315 *
4316 * @warning
4317 * Provided array of events as well as a triggered work item must be placed
4318 * in persistent memory (valid until work handler execution or work
4319 * cancellation) and cannot be modified after submission.
4320 *
4321 * @param work_q Address of workqueue.
4322 * @param work Address of delayed work item.
4323 * @param events An array of events which trigger the work.
4324 * @param num_events The number of events in the array.
4325 * @param timeout Timeout after which the work will be scheduled
4326 * for execution even if not triggered.
4327 *
4328 *
4329 * @retval 0 Work item started watching for events.
4330 * @retval -EINVAL Work item is being processed or has completed its work.
4331 * @retval -EADDRINUSE Work item is pending on a different workqueue.
4332 */
4333 int k_work_poll_submit_to_queue(struct k_work_q *work_q,
4334 struct k_work_poll *work,
4335 struct k_poll_event *events,
4336 int num_events,
4337 k_timeout_t timeout);
4338
4339 /**
4340 * @brief Submit a triggered work item to the system workqueue.
4341 *
4342 * This routine schedules work item @a work to be processed by system
4343 * workqueue when one of the given @a events is signaled. The routine
4344 * initiates internal poller for the work item and then returns to the caller.
4345 * Only when one of the watched events happen the work item is actually
4346 * submitted to the workqueue and becomes pending.
4347 *
4348 * Submitting a previously submitted triggered work item that is still
4349 * waiting for the event cancels the existing submission and reschedules it
4350 * the using the new event list. Note that this behavior is inherently subject
4351 * to race conditions with the pre-existing triggered work item and work queue,
4352 * so care must be taken to synchronize such resubmissions externally.
4353 *
4354 * @funcprops \isr_ok
4355 *
4356 * @warning
4357 * Provided array of events as well as a triggered work item must not be
4358 * modified until the item has been processed by the workqueue.
4359 *
4360 * @param work Address of delayed work item.
4361 * @param events An array of events which trigger the work.
4362 * @param num_events The number of events in the array.
4363 * @param timeout Timeout after which the work will be scheduled
4364 * for execution even if not triggered.
4365 *
4366 * @retval 0 Work item started watching for events.
4367 * @retval -EINVAL Work item is being processed or has completed its work.
4368 * @retval -EADDRINUSE Work item is pending on a different workqueue.
4369 */
4370 int k_work_poll_submit(struct k_work_poll *work,
4371 struct k_poll_event *events,
4372 int num_events,
4373 k_timeout_t timeout);
4374
4375 /**
4376 * @brief Cancel a triggered work item.
4377 *
4378 * This routine cancels the submission of triggered work item @a work.
4379 * A triggered work item can only be canceled if no event triggered work
4380 * submission.
4381 *
4382 * @funcprops \isr_ok
4383 *
4384 * @param work Address of delayed work item.
4385 *
4386 * @retval 0 Work item canceled.
4387 * @retval -EINVAL Work item is being processed or has completed its work.
4388 */
4389 int k_work_poll_cancel(struct k_work_poll *work);
4390
4391 /** @} */
4392
4393 /**
4394 * @defgroup msgq_apis Message Queue APIs
4395 * @ingroup kernel_apis
4396 * @{
4397 */
4398
4399 /**
4400 * @brief Message Queue Structure
4401 */
4402 struct k_msgq {
4403 /** Message queue wait queue */
4404 _wait_q_t wait_q;
4405 /** Lock */
4406 struct k_spinlock lock;
4407 /** Message size */
4408 size_t msg_size;
4409 /** Maximal number of messages */
4410 uint32_t max_msgs;
4411 /** Start of message buffer */
4412 char *buffer_start;
4413 /** End of message buffer */
4414 char *buffer_end;
4415 /** Read pointer */
4416 char *read_ptr;
4417 /** Write pointer */
4418 char *write_ptr;
4419 /** Number of used messages */
4420 uint32_t used_msgs;
4421
4422 Z_DECL_POLL_EVENT
4423
4424 /** Message queue */
4425 uint8_t flags;
4426
4427 SYS_PORT_TRACING_TRACKING_FIELD(k_msgq)
4428
4429 #ifdef CONFIG_OBJ_CORE_MSGQ
4430 struct k_obj_core obj_core;
4431 #endif
4432 };
4433 /**
4434 * @cond INTERNAL_HIDDEN
4435 */
4436
4437
4438 #define Z_MSGQ_INITIALIZER(obj, q_buffer, q_msg_size, q_max_msgs) \
4439 { \
4440 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
4441 .msg_size = q_msg_size, \
4442 .max_msgs = q_max_msgs, \
4443 .buffer_start = q_buffer, \
4444 .buffer_end = q_buffer + (q_max_msgs * q_msg_size), \
4445 .read_ptr = q_buffer, \
4446 .write_ptr = q_buffer, \
4447 .used_msgs = 0, \
4448 Z_POLL_EVENT_OBJ_INIT(obj) \
4449 }
4450
4451 /**
4452 * INTERNAL_HIDDEN @endcond
4453 */
4454
4455
4456 #define K_MSGQ_FLAG_ALLOC BIT(0)
4457
4458 /**
4459 * @brief Message Queue Attributes
4460 */
4461 struct k_msgq_attrs {
4462 /** Message Size */
4463 size_t msg_size;
4464 /** Maximal number of messages */
4465 uint32_t max_msgs;
4466 /** Used messages */
4467 uint32_t used_msgs;
4468 };
4469
4470
4471 /**
4472 * @brief Statically define and initialize a message queue.
4473 *
4474 * The message queue's ring buffer contains space for @a q_max_msgs messages,
4475 * each of which is @a q_msg_size bytes long. Alignment of the message queue's
4476 * ring buffer is not necessary, setting @a q_align to 1 is sufficient.
4477 *
4478 * The message queue can be accessed outside the module where it is defined
4479 * using:
4480 *
4481 * @code extern struct k_msgq <name>; @endcode
4482 *
4483 * @param q_name Name of the message queue.
4484 * @param q_msg_size Message size (in bytes).
4485 * @param q_max_msgs Maximum number of messages that can be queued.
4486 * @param q_align Alignment of the message queue's ring buffer (power of 2).
4487 *
4488 */
4489 #define K_MSGQ_DEFINE(q_name, q_msg_size, q_max_msgs, q_align) \
4490 static char __noinit __aligned(q_align) \
4491 _k_fifo_buf_##q_name[(q_max_msgs) * (q_msg_size)]; \
4492 STRUCT_SECTION_ITERABLE(k_msgq, q_name) = \
4493 Z_MSGQ_INITIALIZER(q_name, _k_fifo_buf_##q_name, \
4494 (q_msg_size), (q_max_msgs))
4495
4496 /**
4497 * @brief Initialize a message queue.
4498 *
4499 * This routine initializes a message queue object, prior to its first use.
4500 *
4501 * The message queue's ring buffer must contain space for @a max_msgs messages,
4502 * each of which is @a msg_size bytes long. Alignment of the message queue's
4503 * ring buffer is not necessary.
4504 *
4505 * @param msgq Address of the message queue.
4506 * @param buffer Pointer to ring buffer that holds queued messages.
4507 * @param msg_size Message size (in bytes).
4508 * @param max_msgs Maximum number of messages that can be queued.
4509 */
4510 void k_msgq_init(struct k_msgq *msgq, char *buffer, size_t msg_size,
4511 uint32_t max_msgs);
4512
4513 /**
4514 * @brief Initialize a message queue.
4515 *
4516 * This routine initializes a message queue object, prior to its first use,
4517 * allocating its internal ring buffer from the calling thread's resource
4518 * pool.
4519 *
4520 * Memory allocated for the ring buffer can be released by calling
4521 * k_msgq_cleanup(), or if userspace is enabled and the msgq object loses
4522 * all of its references.
4523 *
4524 * @param msgq Address of the message queue.
4525 * @param msg_size Message size (in bytes).
4526 * @param max_msgs Maximum number of messages that can be queued.
4527 *
4528 * @return 0 on success, -ENOMEM if there was insufficient memory in the
4529 * thread's resource pool, or -EINVAL if the size parameters cause
4530 * an integer overflow.
4531 */
4532 __syscall int k_msgq_alloc_init(struct k_msgq *msgq, size_t msg_size,
4533 uint32_t max_msgs);
4534
4535 /**
4536 * @brief Release allocated buffer for a queue
4537 *
4538 * Releases memory allocated for the ring buffer.
4539 *
4540 * @param msgq message queue to cleanup
4541 *
4542 * @retval 0 on success
4543 * @retval -EBUSY Queue not empty
4544 */
4545 int k_msgq_cleanup(struct k_msgq *msgq);
4546
4547 /**
4548 * @brief Send a message to a message queue.
4549 *
4550 * This routine sends a message to message queue @a q.
4551 *
4552 * @note The message content is copied from @a data into @a msgq and the @a data
4553 * pointer is not retained, so the message content will not be modified
4554 * by this function.
4555 *
4556 * @funcprops \isr_ok
4557 *
4558 * @param msgq Address of the message queue.
4559 * @param data Pointer to the message.
4560 * @param timeout Non-negative waiting period to add the message,
4561 * or one of the special values K_NO_WAIT and
4562 * K_FOREVER.
4563 *
4564 * @retval 0 Message sent.
4565 * @retval -ENOMSG Returned without waiting or queue purged.
4566 * @retval -EAGAIN Waiting period timed out.
4567 */
4568 __syscall int k_msgq_put(struct k_msgq *msgq, const void *data, k_timeout_t timeout);
4569
4570 /**
4571 * @brief Receive a message from a message queue.
4572 *
4573 * This routine receives a message from message queue @a q in a "first in,
4574 * first out" manner.
4575 *
4576 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
4577 *
4578 * @funcprops \isr_ok
4579 *
4580 * @param msgq Address of the message queue.
4581 * @param data Address of area to hold the received message.
4582 * @param timeout Waiting period to receive the message,
4583 * or one of the special values K_NO_WAIT and
4584 * K_FOREVER.
4585 *
4586 * @retval 0 Message received.
4587 * @retval -ENOMSG Returned without waiting.
4588 * @retval -EAGAIN Waiting period timed out.
4589 */
4590 __syscall int k_msgq_get(struct k_msgq *msgq, void *data, k_timeout_t timeout);
4591
4592 /**
4593 * @brief Peek/read a message from a message queue.
4594 *
4595 * This routine reads a message from message queue @a q in a "first in,
4596 * first out" manner and leaves the message in the queue.
4597 *
4598 * @funcprops \isr_ok
4599 *
4600 * @param msgq Address of the message queue.
4601 * @param data Address of area to hold the message read from the queue.
4602 *
4603 * @retval 0 Message read.
4604 * @retval -ENOMSG Returned when the queue has no message.
4605 */
4606 __syscall int k_msgq_peek(struct k_msgq *msgq, void *data);
4607
4608 /**
4609 * @brief Peek/read a message from a message queue at the specified index
4610 *
4611 * This routine reads a message from message queue at the specified index
4612 * and leaves the message in the queue.
4613 * k_msgq_peek_at(msgq, data, 0) is equivalent to k_msgq_peek(msgq, data)
4614 *
4615 * @funcprops \isr_ok
4616 *
4617 * @param msgq Address of the message queue.
4618 * @param data Address of area to hold the message read from the queue.
4619 * @param idx Message queue index at which to peek
4620 *
4621 * @retval 0 Message read.
4622 * @retval -ENOMSG Returned when the queue has no message at index.
4623 */
4624 __syscall int k_msgq_peek_at(struct k_msgq *msgq, void *data, uint32_t idx);
4625
4626 /**
4627 * @brief Purge a message queue.
4628 *
4629 * This routine discards all unreceived messages in a message queue's ring
4630 * buffer. Any threads that are blocked waiting to send a message to the
4631 * message queue are unblocked and see an -ENOMSG error code.
4632 *
4633 * @param msgq Address of the message queue.
4634 */
4635 __syscall void k_msgq_purge(struct k_msgq *msgq);
4636
4637 /**
4638 * @brief Get the amount of free space in a message queue.
4639 *
4640 * This routine returns the number of unused entries in a message queue's
4641 * ring buffer.
4642 *
4643 * @param msgq Address of the message queue.
4644 *
4645 * @return Number of unused ring buffer entries.
4646 */
4647 __syscall uint32_t k_msgq_num_free_get(struct k_msgq *msgq);
4648
4649 /**
4650 * @brief Get basic attributes of a message queue.
4651 *
4652 * This routine fetches basic attributes of message queue into attr argument.
4653 *
4654 * @param msgq Address of the message queue.
4655 * @param attrs pointer to message queue attribute structure.
4656 */
4657 __syscall void k_msgq_get_attrs(struct k_msgq *msgq,
4658 struct k_msgq_attrs *attrs);
4659
4660
z_impl_k_msgq_num_free_get(struct k_msgq * msgq)4661 static inline uint32_t z_impl_k_msgq_num_free_get(struct k_msgq *msgq)
4662 {
4663 return msgq->max_msgs - msgq->used_msgs;
4664 }
4665
4666 /**
4667 * @brief Get the number of messages in a message queue.
4668 *
4669 * This routine returns the number of messages in a message queue's ring buffer.
4670 *
4671 * @param msgq Address of the message queue.
4672 *
4673 * @return Number of messages.
4674 */
4675 __syscall uint32_t k_msgq_num_used_get(struct k_msgq *msgq);
4676
z_impl_k_msgq_num_used_get(struct k_msgq * msgq)4677 static inline uint32_t z_impl_k_msgq_num_used_get(struct k_msgq *msgq)
4678 {
4679 return msgq->used_msgs;
4680 }
4681
4682 /** @} */
4683
4684 /**
4685 * @defgroup mailbox_apis Mailbox APIs
4686 * @ingroup kernel_apis
4687 * @{
4688 */
4689
4690 /**
4691 * @brief Mailbox Message Structure
4692 *
4693 */
4694 struct k_mbox_msg {
4695 /** size of message (in bytes) */
4696 size_t size;
4697 /** application-defined information value */
4698 uint32_t info;
4699 /** sender's message data buffer */
4700 void *tx_data;
4701 /** source thread id */
4702 k_tid_t rx_source_thread;
4703 /** target thread id */
4704 k_tid_t tx_target_thread;
4705 /** internal use only - thread waiting on send (may be a dummy) */
4706 k_tid_t _syncing_thread;
4707 #if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
4708 /** internal use only - semaphore used during asynchronous send */
4709 struct k_sem *_async_sem;
4710 #endif
4711 };
4712 /**
4713 * @brief Mailbox Structure
4714 *
4715 */
4716 struct k_mbox {
4717 /** Transmit messages queue */
4718 _wait_q_t tx_msg_queue;
4719 /** Receive message queue */
4720 _wait_q_t rx_msg_queue;
4721 struct k_spinlock lock;
4722
4723 SYS_PORT_TRACING_TRACKING_FIELD(k_mbox)
4724
4725 #ifdef CONFIG_OBJ_CORE_MAILBOX
4726 struct k_obj_core obj_core;
4727 #endif
4728 };
4729 /**
4730 * @cond INTERNAL_HIDDEN
4731 */
4732
4733 #define Z_MBOX_INITIALIZER(obj) \
4734 { \
4735 .tx_msg_queue = Z_WAIT_Q_INIT(&obj.tx_msg_queue), \
4736 .rx_msg_queue = Z_WAIT_Q_INIT(&obj.rx_msg_queue), \
4737 }
4738
4739 /**
4740 * INTERNAL_HIDDEN @endcond
4741 */
4742
4743 /**
4744 * @brief Statically define and initialize a mailbox.
4745 *
4746 * The mailbox is to be accessed outside the module where it is defined using:
4747 *
4748 * @code extern struct k_mbox <name>; @endcode
4749 *
4750 * @param name Name of the mailbox.
4751 */
4752 #define K_MBOX_DEFINE(name) \
4753 STRUCT_SECTION_ITERABLE(k_mbox, name) = \
4754 Z_MBOX_INITIALIZER(name) \
4755
4756 /**
4757 * @brief Initialize a mailbox.
4758 *
4759 * This routine initializes a mailbox object, prior to its first use.
4760 *
4761 * @param mbox Address of the mailbox.
4762 */
4763 void k_mbox_init(struct k_mbox *mbox);
4764
4765 /**
4766 * @brief Send a mailbox message in a synchronous manner.
4767 *
4768 * This routine sends a message to @a mbox and waits for a receiver to both
4769 * receive and process it. The message data may be in a buffer or non-existent
4770 * (i.e. an empty message).
4771 *
4772 * @param mbox Address of the mailbox.
4773 * @param tx_msg Address of the transmit message descriptor.
4774 * @param timeout Waiting period for the message to be received,
4775 * or one of the special values K_NO_WAIT
4776 * and K_FOREVER. Once the message has been received,
4777 * this routine waits as long as necessary for the message
4778 * to be completely processed.
4779 *
4780 * @retval 0 Message sent.
4781 * @retval -ENOMSG Returned without waiting.
4782 * @retval -EAGAIN Waiting period timed out.
4783 */
4784 int k_mbox_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
4785 k_timeout_t timeout);
4786
4787 /**
4788 * @brief Send a mailbox message in an asynchronous manner.
4789 *
4790 * This routine sends a message to @a mbox without waiting for a receiver
4791 * to process it. The message data may be in a buffer or non-existent
4792 * (i.e. an empty message). Optionally, the semaphore @a sem will be given
4793 * when the message has been both received and completely processed by
4794 * the receiver.
4795 *
4796 * @param mbox Address of the mailbox.
4797 * @param tx_msg Address of the transmit message descriptor.
4798 * @param sem Address of a semaphore, or NULL if none is needed.
4799 */
4800 void k_mbox_async_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
4801 struct k_sem *sem);
4802
4803 /**
4804 * @brief Receive a mailbox message.
4805 *
4806 * This routine receives a message from @a mbox, then optionally retrieves
4807 * its data and disposes of the message.
4808 *
4809 * @param mbox Address of the mailbox.
4810 * @param rx_msg Address of the receive message descriptor.
4811 * @param buffer Address of the buffer to receive data, or NULL to defer data
4812 * retrieval and message disposal until later.
4813 * @param timeout Waiting period for a message to be received,
4814 * or one of the special values K_NO_WAIT and K_FOREVER.
4815 *
4816 * @retval 0 Message received.
4817 * @retval -ENOMSG Returned without waiting.
4818 * @retval -EAGAIN Waiting period timed out.
4819 */
4820 int k_mbox_get(struct k_mbox *mbox, struct k_mbox_msg *rx_msg,
4821 void *buffer, k_timeout_t timeout);
4822
4823 /**
4824 * @brief Retrieve mailbox message data into a buffer.
4825 *
4826 * This routine completes the processing of a received message by retrieving
4827 * its data into a buffer, then disposing of the message.
4828 *
4829 * Alternatively, this routine can be used to dispose of a received message
4830 * without retrieving its data.
4831 *
4832 * @param rx_msg Address of the receive message descriptor.
4833 * @param buffer Address of the buffer to receive data, or NULL to discard
4834 * the data.
4835 */
4836 void k_mbox_data_get(struct k_mbox_msg *rx_msg, void *buffer);
4837
4838 /** @} */
4839
4840 /**
4841 * @defgroup pipe_apis Pipe APIs
4842 * @ingroup kernel_apis
4843 * @{
4844 */
4845
4846 /** Pipe Structure */
4847 struct k_pipe {
4848 unsigned char *buffer; /**< Pipe buffer: may be NULL */
4849 size_t size; /**< Buffer size */
4850 size_t bytes_used; /**< # bytes used in buffer */
4851 size_t read_index; /**< Where in buffer to read from */
4852 size_t write_index; /**< Where in buffer to write */
4853 struct k_spinlock lock; /**< Synchronization lock */
4854
4855 struct {
4856 _wait_q_t readers; /**< Reader wait queue */
4857 _wait_q_t writers; /**< Writer wait queue */
4858 } wait_q; /** Wait queue */
4859
4860 Z_DECL_POLL_EVENT
4861
4862 uint8_t flags; /**< Flags */
4863
4864 SYS_PORT_TRACING_TRACKING_FIELD(k_pipe)
4865
4866 #ifdef CONFIG_OBJ_CORE_PIPE
4867 struct k_obj_core obj_core;
4868 #endif
4869 };
4870
4871 /**
4872 * @cond INTERNAL_HIDDEN
4873 */
4874 #define K_PIPE_FLAG_ALLOC BIT(0) /** Buffer was allocated */
4875
4876 #define Z_PIPE_INITIALIZER(obj, pipe_buffer, pipe_buffer_size) \
4877 { \
4878 .buffer = pipe_buffer, \
4879 .size = pipe_buffer_size, \
4880 .bytes_used = 0, \
4881 .read_index = 0, \
4882 .write_index = 0, \
4883 .lock = {}, \
4884 .wait_q = { \
4885 .readers = Z_WAIT_Q_INIT(&obj.wait_q.readers), \
4886 .writers = Z_WAIT_Q_INIT(&obj.wait_q.writers) \
4887 }, \
4888 Z_POLL_EVENT_OBJ_INIT(obj) \
4889 .flags = 0, \
4890 }
4891
4892 /**
4893 * INTERNAL_HIDDEN @endcond
4894 */
4895
4896 /**
4897 * @brief Statically define and initialize a pipe.
4898 *
4899 * The pipe can be accessed outside the module where it is defined using:
4900 *
4901 * @code extern struct k_pipe <name>; @endcode
4902 *
4903 * @param name Name of the pipe.
4904 * @param pipe_buffer_size Size of the pipe's ring buffer (in bytes),
4905 * or zero if no ring buffer is used.
4906 * @param pipe_align Alignment of the pipe's ring buffer (power of 2).
4907 *
4908 */
4909 #define K_PIPE_DEFINE(name, pipe_buffer_size, pipe_align) \
4910 static unsigned char __noinit __aligned(pipe_align) \
4911 _k_pipe_buf_##name[pipe_buffer_size]; \
4912 STRUCT_SECTION_ITERABLE(k_pipe, name) = \
4913 Z_PIPE_INITIALIZER(name, _k_pipe_buf_##name, pipe_buffer_size)
4914
4915 /**
4916 * @brief Initialize a pipe.
4917 *
4918 * This routine initializes a pipe object, prior to its first use.
4919 *
4920 * @param pipe Address of the pipe.
4921 * @param buffer Address of the pipe's ring buffer, or NULL if no ring buffer
4922 * is used.
4923 * @param size Size of the pipe's ring buffer (in bytes), or zero if no ring
4924 * buffer is used.
4925 */
4926 void k_pipe_init(struct k_pipe *pipe, unsigned char *buffer, size_t size);
4927
4928 /**
4929 * @brief Release a pipe's allocated buffer
4930 *
4931 * If a pipe object was given a dynamically allocated buffer via
4932 * k_pipe_alloc_init(), this will free it. This function does nothing
4933 * if the buffer wasn't dynamically allocated.
4934 *
4935 * @param pipe Address of the pipe.
4936 * @retval 0 on success
4937 * @retval -EAGAIN nothing to cleanup
4938 */
4939 int k_pipe_cleanup(struct k_pipe *pipe);
4940
4941 /**
4942 * @brief Initialize a pipe and allocate a buffer for it
4943 *
4944 * Storage for the buffer region will be allocated from the calling thread's
4945 * resource pool. This memory will be released if k_pipe_cleanup() is called,
4946 * or userspace is enabled and the pipe object loses all references to it.
4947 *
4948 * This function should only be called on uninitialized pipe objects.
4949 *
4950 * @param pipe Address of the pipe.
4951 * @param size Size of the pipe's ring buffer (in bytes), or zero if no ring
4952 * buffer is used.
4953 * @retval 0 on success
4954 * @retval -ENOMEM if memory couldn't be allocated
4955 */
4956 __syscall int k_pipe_alloc_init(struct k_pipe *pipe, size_t size);
4957
4958 /**
4959 * @brief Write data to a pipe.
4960 *
4961 * This routine writes up to @a bytes_to_write bytes of data to @a pipe.
4962 *
4963 * @param pipe Address of the pipe.
4964 * @param data Address of data to write.
4965 * @param bytes_to_write Size of data (in bytes).
4966 * @param bytes_written Address of area to hold the number of bytes written.
4967 * @param min_xfer Minimum number of bytes to write.
4968 * @param timeout Waiting period to wait for the data to be written,
4969 * or one of the special values K_NO_WAIT and K_FOREVER.
4970 *
4971 * @retval 0 At least @a min_xfer bytes of data were written.
4972 * @retval -EIO Returned without waiting; zero data bytes were written.
4973 * @retval -EAGAIN Waiting period timed out; between zero and @a min_xfer
4974 * minus one data bytes were written.
4975 */
4976 __syscall int k_pipe_put(struct k_pipe *pipe, const void *data,
4977 size_t bytes_to_write, size_t *bytes_written,
4978 size_t min_xfer, k_timeout_t timeout);
4979
4980 /**
4981 * @brief Read data from a pipe.
4982 *
4983 * This routine reads up to @a bytes_to_read bytes of data from @a pipe.
4984 *
4985 * @param pipe Address of the pipe.
4986 * @param data Address to place the data read from pipe.
4987 * @param bytes_to_read Maximum number of data bytes to read.
4988 * @param bytes_read Address of area to hold the number of bytes read.
4989 * @param min_xfer Minimum number of data bytes to read.
4990 * @param timeout Waiting period to wait for the data to be read,
4991 * or one of the special values K_NO_WAIT and K_FOREVER.
4992 *
4993 * @retval 0 At least @a min_xfer bytes of data were read.
4994 * @retval -EINVAL invalid parameters supplied
4995 * @retval -EIO Returned without waiting; zero data bytes were read.
4996 * @retval -EAGAIN Waiting period timed out; between zero and @a min_xfer
4997 * minus one data bytes were read.
4998 */
4999 __syscall int k_pipe_get(struct k_pipe *pipe, void *data,
5000 size_t bytes_to_read, size_t *bytes_read,
5001 size_t min_xfer, k_timeout_t timeout);
5002
5003 /**
5004 * @brief Query the number of bytes that may be read from @a pipe.
5005 *
5006 * @param pipe Address of the pipe.
5007 *
5008 * @retval a number n such that 0 <= n <= @ref k_pipe.size; the
5009 * result is zero for unbuffered pipes.
5010 */
5011 __syscall size_t k_pipe_read_avail(struct k_pipe *pipe);
5012
5013 /**
5014 * @brief Query the number of bytes that may be written to @a pipe
5015 *
5016 * @param pipe Address of the pipe.
5017 *
5018 * @retval a number n such that 0 <= n <= @ref k_pipe.size; the
5019 * result is zero for unbuffered pipes.
5020 */
5021 __syscall size_t k_pipe_write_avail(struct k_pipe *pipe);
5022
5023 /**
5024 * @brief Flush the pipe of write data
5025 *
5026 * This routine flushes the pipe. Flushing the pipe is equivalent to reading
5027 * both all the data in the pipe's buffer and all the data waiting to go into
5028 * that pipe into a large temporary buffer and discarding the buffer. Any
5029 * writers that were previously pended become unpended.
5030 *
5031 * @param pipe Address of the pipe.
5032 */
5033 __syscall void k_pipe_flush(struct k_pipe *pipe);
5034
5035 /**
5036 * @brief Flush the pipe's internal buffer
5037 *
5038 * This routine flushes the pipe's internal buffer. This is equivalent to
5039 * reading up to N bytes from the pipe (where N is the size of the pipe's
5040 * buffer) into a temporary buffer and then discarding that buffer. If there
5041 * were writers previously pending, then some may unpend as they try to fill
5042 * up the pipe's emptied buffer.
5043 *
5044 * @param pipe Address of the pipe.
5045 */
5046 __syscall void k_pipe_buffer_flush(struct k_pipe *pipe);
5047
5048 /** @} */
5049
5050 /**
5051 * @cond INTERNAL_HIDDEN
5052 */
5053
5054 struct k_mem_slab_info {
5055 uint32_t num_blocks;
5056 size_t block_size;
5057 uint32_t num_used;
5058 #ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION
5059 uint32_t max_used;
5060 #endif
5061 };
5062
5063 struct k_mem_slab {
5064 _wait_q_t wait_q;
5065 struct k_spinlock lock;
5066 char *buffer;
5067 char *free_list;
5068 struct k_mem_slab_info info;
5069
5070 SYS_PORT_TRACING_TRACKING_FIELD(k_mem_slab)
5071
5072 #ifdef CONFIG_OBJ_CORE_MEM_SLAB
5073 struct k_obj_core obj_core;
5074 #endif
5075 };
5076
5077 #define Z_MEM_SLAB_INITIALIZER(_slab, _slab_buffer, _slab_block_size, \
5078 _slab_num_blocks) \
5079 { \
5080 .wait_q = Z_WAIT_Q_INIT(&(_slab).wait_q), \
5081 .lock = {}, \
5082 .buffer = _slab_buffer, \
5083 .free_list = NULL, \
5084 .info = {_slab_num_blocks, _slab_block_size, 0} \
5085 }
5086
5087
5088 /**
5089 * INTERNAL_HIDDEN @endcond
5090 */
5091
5092 /**
5093 * @defgroup mem_slab_apis Memory Slab APIs
5094 * @ingroup kernel_apis
5095 * @{
5096 */
5097
5098 /**
5099 * @brief Statically define and initialize a memory slab in a public (non-static) scope.
5100 *
5101 * The memory slab's buffer contains @a slab_num_blocks memory blocks
5102 * that are @a slab_block_size bytes long. The buffer is aligned to a
5103 * @a slab_align -byte boundary. To ensure that each memory block is similarly
5104 * aligned to this boundary, @a slab_block_size must also be a multiple of
5105 * @a slab_align.
5106 *
5107 * The memory slab can be accessed outside the module where it is defined
5108 * using:
5109 *
5110 * @code extern struct k_mem_slab <name>; @endcode
5111 *
5112 * @note This macro cannot be used together with a static keyword.
5113 * If such a use-case is desired, use @ref K_MEM_SLAB_DEFINE_STATIC
5114 * instead.
5115 *
5116 * @param name Name of the memory slab.
5117 * @param slab_block_size Size of each memory block (in bytes).
5118 * @param slab_num_blocks Number memory blocks.
5119 * @param slab_align Alignment of the memory slab's buffer (power of 2).
5120 */
5121 #define K_MEM_SLAB_DEFINE(name, slab_block_size, slab_num_blocks, slab_align) \
5122 char __noinit_named(k_mem_slab_buf_##name) \
5123 __aligned(WB_UP(slab_align)) \
5124 _k_mem_slab_buf_##name[(slab_num_blocks) * WB_UP(slab_block_size)]; \
5125 STRUCT_SECTION_ITERABLE(k_mem_slab, name) = \
5126 Z_MEM_SLAB_INITIALIZER(name, _k_mem_slab_buf_##name, \
5127 WB_UP(slab_block_size), slab_num_blocks)
5128
5129 /**
5130 * @brief Statically define and initialize a memory slab in a private (static) scope.
5131 *
5132 * The memory slab's buffer contains @a slab_num_blocks memory blocks
5133 * that are @a slab_block_size bytes long. The buffer is aligned to a
5134 * @a slab_align -byte boundary. To ensure that each memory block is similarly
5135 * aligned to this boundary, @a slab_block_size must also be a multiple of
5136 * @a slab_align.
5137 *
5138 * @param name Name of the memory slab.
5139 * @param slab_block_size Size of each memory block (in bytes).
5140 * @param slab_num_blocks Number memory blocks.
5141 * @param slab_align Alignment of the memory slab's buffer (power of 2).
5142 */
5143 #define K_MEM_SLAB_DEFINE_STATIC(name, slab_block_size, slab_num_blocks, slab_align) \
5144 static char __noinit_named(k_mem_slab_buf_##name) \
5145 __aligned(WB_UP(slab_align)) \
5146 _k_mem_slab_buf_##name[(slab_num_blocks) * WB_UP(slab_block_size)]; \
5147 static STRUCT_SECTION_ITERABLE(k_mem_slab, name) = \
5148 Z_MEM_SLAB_INITIALIZER(name, _k_mem_slab_buf_##name, \
5149 WB_UP(slab_block_size), slab_num_blocks)
5150
5151 /**
5152 * @brief Initialize a memory slab.
5153 *
5154 * Initializes a memory slab, prior to its first use.
5155 *
5156 * The memory slab's buffer contains @a slab_num_blocks memory blocks
5157 * that are @a slab_block_size bytes long. The buffer must be aligned to an
5158 * N-byte boundary matching a word boundary, where N is a power of 2
5159 * (i.e. 4 on 32-bit systems, 8, 16, ...).
5160 * To ensure that each memory block is similarly aligned to this boundary,
5161 * @a slab_block_size must also be a multiple of N.
5162 *
5163 * @param slab Address of the memory slab.
5164 * @param buffer Pointer to buffer used for the memory blocks.
5165 * @param block_size Size of each memory block (in bytes).
5166 * @param num_blocks Number of memory blocks.
5167 *
5168 * @retval 0 on success
5169 * @retval -EINVAL invalid data supplied
5170 *
5171 */
5172 int k_mem_slab_init(struct k_mem_slab *slab, void *buffer,
5173 size_t block_size, uint32_t num_blocks);
5174
5175 /**
5176 * @brief Allocate memory from a memory slab.
5177 *
5178 * This routine allocates a memory block from a memory slab.
5179 *
5180 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
5181 * @note When CONFIG_MULTITHREADING=n any @a timeout is treated as K_NO_WAIT.
5182 *
5183 * @funcprops \isr_ok
5184 *
5185 * @param slab Address of the memory slab.
5186 * @param mem Pointer to block address area.
5187 * @param timeout Non-negative waiting period to wait for operation to complete.
5188 * Use K_NO_WAIT to return without waiting,
5189 * or K_FOREVER to wait as long as necessary.
5190 *
5191 * @retval 0 Memory allocated. The block address area pointed at by @a mem
5192 * is set to the starting address of the memory block.
5193 * @retval -ENOMEM Returned without waiting.
5194 * @retval -EAGAIN Waiting period timed out.
5195 * @retval -EINVAL Invalid data supplied
5196 */
5197 int k_mem_slab_alloc(struct k_mem_slab *slab, void **mem,
5198 k_timeout_t timeout);
5199
5200 /**
5201 * @brief Free memory allocated from a memory slab.
5202 *
5203 * This routine releases a previously allocated memory block back to its
5204 * associated memory slab.
5205 *
5206 * @param slab Address of the memory slab.
5207 * @param mem Pointer to the memory block (as returned by k_mem_slab_alloc()).
5208 */
5209 void k_mem_slab_free(struct k_mem_slab *slab, void *mem);
5210
5211 /**
5212 * @brief Get the number of used blocks in a memory slab.
5213 *
5214 * This routine gets the number of memory blocks that are currently
5215 * allocated in @a slab.
5216 *
5217 * @param slab Address of the memory slab.
5218 *
5219 * @return Number of allocated memory blocks.
5220 */
k_mem_slab_num_used_get(struct k_mem_slab * slab)5221 static inline uint32_t k_mem_slab_num_used_get(struct k_mem_slab *slab)
5222 {
5223 return slab->info.num_used;
5224 }
5225
5226 /**
5227 * @brief Get the number of maximum used blocks so far in a memory slab.
5228 *
5229 * This routine gets the maximum number of memory blocks that were
5230 * allocated in @a slab.
5231 *
5232 * @param slab Address of the memory slab.
5233 *
5234 * @return Maximum number of allocated memory blocks.
5235 */
k_mem_slab_max_used_get(struct k_mem_slab * slab)5236 static inline uint32_t k_mem_slab_max_used_get(struct k_mem_slab *slab)
5237 {
5238 #ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION
5239 return slab->info.max_used;
5240 #else
5241 ARG_UNUSED(slab);
5242 return 0;
5243 #endif
5244 }
5245
5246 /**
5247 * @brief Get the number of unused blocks in a memory slab.
5248 *
5249 * This routine gets the number of memory blocks that are currently
5250 * unallocated in @a slab.
5251 *
5252 * @param slab Address of the memory slab.
5253 *
5254 * @return Number of unallocated memory blocks.
5255 */
k_mem_slab_num_free_get(struct k_mem_slab * slab)5256 static inline uint32_t k_mem_slab_num_free_get(struct k_mem_slab *slab)
5257 {
5258 return slab->info.num_blocks - slab->info.num_used;
5259 }
5260
5261 /**
5262 * @brief Get the memory stats for a memory slab
5263 *
5264 * This routine gets the runtime memory usage stats for the slab @a slab.
5265 *
5266 * @param slab Address of the memory slab
5267 * @param stats Pointer to memory into which to copy memory usage statistics
5268 *
5269 * @retval 0 Success
5270 * @retval -EINVAL Any parameter points to NULL
5271 */
5272
5273 int k_mem_slab_runtime_stats_get(struct k_mem_slab *slab, struct sys_memory_stats *stats);
5274
5275 /**
5276 * @brief Reset the maximum memory usage for a slab
5277 *
5278 * This routine resets the maximum memory usage for the slab @a slab to its
5279 * current usage.
5280 *
5281 * @param slab Address of the memory slab
5282 *
5283 * @retval 0 Success
5284 * @retval -EINVAL Memory slab is NULL
5285 */
5286 int k_mem_slab_runtime_stats_reset_max(struct k_mem_slab *slab);
5287
5288 /** @} */
5289
5290 /**
5291 * @addtogroup heap_apis
5292 * @{
5293 */
5294
5295 /* kernel synchronized heap struct */
5296
5297 struct k_heap {
5298 struct sys_heap heap;
5299 _wait_q_t wait_q;
5300 struct k_spinlock lock;
5301 };
5302
5303 /**
5304 * @brief Initialize a k_heap
5305 *
5306 * This constructs a synchronized k_heap object over a memory region
5307 * specified by the user. Note that while any alignment and size can
5308 * be passed as valid parameters, internal alignment restrictions
5309 * inside the inner sys_heap mean that not all bytes may be usable as
5310 * allocated memory.
5311 *
5312 * @param h Heap struct to initialize
5313 * @param mem Pointer to memory.
5314 * @param bytes Size of memory region, in bytes
5315 */
5316 void k_heap_init(struct k_heap *h, void *mem,
5317 size_t bytes) __attribute_nonnull(1);
5318
5319 /** @brief Allocate aligned memory from a k_heap
5320 *
5321 * Behaves in all ways like k_heap_alloc(), except that the returned
5322 * memory (if available) will have a starting address in memory which
5323 * is a multiple of the specified power-of-two alignment value in
5324 * bytes. The resulting memory can be returned to the heap using
5325 * k_heap_free().
5326 *
5327 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
5328 * @note When CONFIG_MULTITHREADING=n any @a timeout is treated as K_NO_WAIT.
5329 *
5330 * @funcprops \isr_ok
5331 *
5332 * @param h Heap from which to allocate
5333 * @param align Alignment in bytes, must be a power of two
5334 * @param bytes Number of bytes requested
5335 * @param timeout How long to wait, or K_NO_WAIT
5336 * @return Pointer to memory the caller can now use
5337 */
5338 void *k_heap_aligned_alloc(struct k_heap *h, size_t align, size_t bytes,
5339 k_timeout_t timeout) __attribute_nonnull(1);
5340
5341 /**
5342 * @brief Allocate memory from a k_heap
5343 *
5344 * Allocates and returns a memory buffer from the memory region owned
5345 * by the heap. If no memory is available immediately, the call will
5346 * block for the specified timeout (constructed via the standard
5347 * timeout API, or K_NO_WAIT or K_FOREVER) waiting for memory to be
5348 * freed. If the allocation cannot be performed by the expiration of
5349 * the timeout, NULL will be returned.
5350 * Allocated memory is aligned on a multiple of pointer sizes.
5351 *
5352 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
5353 * @note When CONFIG_MULTITHREADING=n any @a timeout is treated as K_NO_WAIT.
5354 *
5355 * @funcprops \isr_ok
5356 *
5357 * @param h Heap from which to allocate
5358 * @param bytes Desired size of block to allocate
5359 * @param timeout How long to wait, or K_NO_WAIT
5360 * @return A pointer to valid heap memory, or NULL
5361 */
5362 void *k_heap_alloc(struct k_heap *h, size_t bytes,
5363 k_timeout_t timeout) __attribute_nonnull(1);
5364
5365 /**
5366 * @brief Free memory allocated by k_heap_alloc()
5367 *
5368 * Returns the specified memory block, which must have been returned
5369 * from k_heap_alloc(), to the heap for use by other callers. Passing
5370 * a NULL block is legal, and has no effect.
5371 *
5372 * @param h Heap to which to return the memory
5373 * @param mem A valid memory block, or NULL
5374 */
5375 void k_heap_free(struct k_heap *h, void *mem) __attribute_nonnull(1);
5376
5377 /* Hand-calculated minimum heap sizes needed to return a successful
5378 * 1-byte allocation. See details in lib/os/heap.[ch]
5379 */
5380 #define Z_HEAP_MIN_SIZE (sizeof(void *) > 4 ? 56 : 44)
5381
5382 /**
5383 * @brief Define a static k_heap in the specified linker section
5384 *
5385 * This macro defines and initializes a static memory region and
5386 * k_heap of the requested size in the specified linker section.
5387 * After kernel start, &name can be used as if k_heap_init() had
5388 * been called.
5389 *
5390 * Note that this macro enforces a minimum size on the memory region
5391 * to accommodate metadata requirements. Very small heaps will be
5392 * padded to fit.
5393 *
5394 * @param name Symbol name for the struct k_heap object
5395 * @param bytes Size of memory region, in bytes
5396 * @param in_section __attribute__((section(name))
5397 */
5398 #define Z_HEAP_DEFINE_IN_SECT(name, bytes, in_section) \
5399 char in_section \
5400 __aligned(8) /* CHUNK_UNIT */ \
5401 kheap_##name[MAX(bytes, Z_HEAP_MIN_SIZE)]; \
5402 STRUCT_SECTION_ITERABLE(k_heap, name) = { \
5403 .heap = { \
5404 .init_mem = kheap_##name, \
5405 .init_bytes = MAX(bytes, Z_HEAP_MIN_SIZE), \
5406 }, \
5407 }
5408
5409 /**
5410 * @brief Define a static k_heap
5411 *
5412 * This macro defines and initializes a static memory region and
5413 * k_heap of the requested size. After kernel start, &name can be
5414 * used as if k_heap_init() had been called.
5415 *
5416 * Note that this macro enforces a minimum size on the memory region
5417 * to accommodate metadata requirements. Very small heaps will be
5418 * padded to fit.
5419 *
5420 * @param name Symbol name for the struct k_heap object
5421 * @param bytes Size of memory region, in bytes
5422 */
5423 #define K_HEAP_DEFINE(name, bytes) \
5424 Z_HEAP_DEFINE_IN_SECT(name, bytes, \
5425 __noinit_named(kheap_buf_##name))
5426
5427 /**
5428 * @brief Define a static k_heap in uncached memory
5429 *
5430 * This macro defines and initializes a static memory region and
5431 * k_heap of the requested size in uncached memory. After kernel
5432 * start, &name can be used as if k_heap_init() had been called.
5433 *
5434 * Note that this macro enforces a minimum size on the memory region
5435 * to accommodate metadata requirements. Very small heaps will be
5436 * padded to fit.
5437 *
5438 * @param name Symbol name for the struct k_heap object
5439 * @param bytes Size of memory region, in bytes
5440 */
5441 #define K_HEAP_DEFINE_NOCACHE(name, bytes) \
5442 Z_HEAP_DEFINE_IN_SECT(name, bytes, __nocache)
5443
5444 /**
5445 * @}
5446 */
5447
5448 /**
5449 * @defgroup heap_apis Heap APIs
5450 * @ingroup kernel_apis
5451 * @{
5452 */
5453
5454 /**
5455 * @brief Allocate memory from the heap with a specified alignment.
5456 *
5457 * This routine provides semantics similar to aligned_alloc(); memory is
5458 * allocated from the heap with a specified alignment. However, one minor
5459 * difference is that k_aligned_alloc() accepts any non-zero @p size,
5460 * whereas aligned_alloc() only accepts a @p size that is an integral
5461 * multiple of @p align.
5462 *
5463 * Above, aligned_alloc() refers to:
5464 * C11 standard (ISO/IEC 9899:2011): 7.22.3.1
5465 * The aligned_alloc function (p: 347-348)
5466 *
5467 * @param align Alignment of memory requested (in bytes).
5468 * @param size Amount of memory requested (in bytes).
5469 *
5470 * @return Address of the allocated memory if successful; otherwise NULL.
5471 */
5472 void *k_aligned_alloc(size_t align, size_t size);
5473
5474 /**
5475 * @brief Allocate memory from the heap.
5476 *
5477 * This routine provides traditional malloc() semantics. Memory is
5478 * allocated from the heap memory pool.
5479 * Allocated memory is aligned on a multiple of pointer sizes.
5480 *
5481 * @param size Amount of memory requested (in bytes).
5482 *
5483 * @return Address of the allocated memory if successful; otherwise NULL.
5484 */
5485 void *k_malloc(size_t size);
5486
5487 /**
5488 * @brief Free memory allocated from heap.
5489 *
5490 * This routine provides traditional free() semantics. The memory being
5491 * returned must have been allocated from the heap memory pool.
5492 *
5493 * If @a ptr is NULL, no operation is performed.
5494 *
5495 * @param ptr Pointer to previously allocated memory.
5496 */
5497 void k_free(void *ptr);
5498
5499 /**
5500 * @brief Allocate memory from heap, array style
5501 *
5502 * This routine provides traditional calloc() semantics. Memory is
5503 * allocated from the heap memory pool and zeroed.
5504 *
5505 * @param nmemb Number of elements in the requested array
5506 * @param size Size of each array element (in bytes).
5507 *
5508 * @return Address of the allocated memory if successful; otherwise NULL.
5509 */
5510 void *k_calloc(size_t nmemb, size_t size);
5511
5512 /** @} */
5513
5514 /* polling API - PRIVATE */
5515
5516 #ifdef CONFIG_POLL
5517 #define _INIT_OBJ_POLL_EVENT(obj) do { (obj)->poll_event = NULL; } while (false)
5518 #else
5519 #define _INIT_OBJ_POLL_EVENT(obj) do { } while (false)
5520 #endif
5521
5522 /* private - types bit positions */
5523 enum _poll_types_bits {
5524 /* can be used to ignore an event */
5525 _POLL_TYPE_IGNORE,
5526
5527 /* to be signaled by k_poll_signal_raise() */
5528 _POLL_TYPE_SIGNAL,
5529
5530 /* semaphore availability */
5531 _POLL_TYPE_SEM_AVAILABLE,
5532
5533 /* queue/FIFO/LIFO data availability */
5534 _POLL_TYPE_DATA_AVAILABLE,
5535
5536 /* msgq data availability */
5537 _POLL_TYPE_MSGQ_DATA_AVAILABLE,
5538
5539 /* pipe data availability */
5540 _POLL_TYPE_PIPE_DATA_AVAILABLE,
5541
5542 _POLL_NUM_TYPES
5543 };
5544
5545 #define Z_POLL_TYPE_BIT(type) (1U << ((type) - 1U))
5546
5547 /* private - states bit positions */
5548 enum _poll_states_bits {
5549 /* default state when creating event */
5550 _POLL_STATE_NOT_READY,
5551
5552 /* signaled by k_poll_signal_raise() */
5553 _POLL_STATE_SIGNALED,
5554
5555 /* semaphore is available */
5556 _POLL_STATE_SEM_AVAILABLE,
5557
5558 /* data is available to read on queue/FIFO/LIFO */
5559 _POLL_STATE_DATA_AVAILABLE,
5560
5561 /* queue/FIFO/LIFO wait was cancelled */
5562 _POLL_STATE_CANCELLED,
5563
5564 /* data is available to read on a message queue */
5565 _POLL_STATE_MSGQ_DATA_AVAILABLE,
5566
5567 /* data is available to read from a pipe */
5568 _POLL_STATE_PIPE_DATA_AVAILABLE,
5569
5570 _POLL_NUM_STATES
5571 };
5572
5573 #define Z_POLL_STATE_BIT(state) (1U << ((state) - 1U))
5574
5575 #define _POLL_EVENT_NUM_UNUSED_BITS \
5576 (32 - (0 \
5577 + 8 /* tag */ \
5578 + _POLL_NUM_TYPES \
5579 + _POLL_NUM_STATES \
5580 + 1 /* modes */ \
5581 ))
5582
5583 /* end of polling API - PRIVATE */
5584
5585
5586 /**
5587 * @defgroup poll_apis Async polling APIs
5588 * @ingroup kernel_apis
5589 * @{
5590 */
5591
5592 /* Public polling API */
5593
5594 /* public - values for k_poll_event.type bitfield */
5595 #define K_POLL_TYPE_IGNORE 0
5596 #define K_POLL_TYPE_SIGNAL Z_POLL_TYPE_BIT(_POLL_TYPE_SIGNAL)
5597 #define K_POLL_TYPE_SEM_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_SEM_AVAILABLE)
5598 #define K_POLL_TYPE_DATA_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_DATA_AVAILABLE)
5599 #define K_POLL_TYPE_FIFO_DATA_AVAILABLE K_POLL_TYPE_DATA_AVAILABLE
5600 #define K_POLL_TYPE_MSGQ_DATA_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_MSGQ_DATA_AVAILABLE)
5601 #define K_POLL_TYPE_PIPE_DATA_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_PIPE_DATA_AVAILABLE)
5602
5603 /* public - polling modes */
5604 enum k_poll_modes {
5605 /* polling thread does not take ownership of objects when available */
5606 K_POLL_MODE_NOTIFY_ONLY = 0,
5607
5608 K_POLL_NUM_MODES
5609 };
5610
5611 /* public - values for k_poll_event.state bitfield */
5612 #define K_POLL_STATE_NOT_READY 0
5613 #define K_POLL_STATE_SIGNALED Z_POLL_STATE_BIT(_POLL_STATE_SIGNALED)
5614 #define K_POLL_STATE_SEM_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_SEM_AVAILABLE)
5615 #define K_POLL_STATE_DATA_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_DATA_AVAILABLE)
5616 #define K_POLL_STATE_FIFO_DATA_AVAILABLE K_POLL_STATE_DATA_AVAILABLE
5617 #define K_POLL_STATE_MSGQ_DATA_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_MSGQ_DATA_AVAILABLE)
5618 #define K_POLL_STATE_PIPE_DATA_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_PIPE_DATA_AVAILABLE)
5619 #define K_POLL_STATE_CANCELLED Z_POLL_STATE_BIT(_POLL_STATE_CANCELLED)
5620
5621 /* public - poll signal object */
5622 struct k_poll_signal {
5623 /** PRIVATE - DO NOT TOUCH */
5624 sys_dlist_t poll_events;
5625
5626 /**
5627 * 1 if the event has been signaled, 0 otherwise. Stays set to 1 until
5628 * user resets it to 0.
5629 */
5630 unsigned int signaled;
5631
5632 /** custom result value passed to k_poll_signal_raise() if needed */
5633 int result;
5634 };
5635
5636 #define K_POLL_SIGNAL_INITIALIZER(obj) \
5637 { \
5638 .poll_events = SYS_DLIST_STATIC_INIT(&obj.poll_events), \
5639 .signaled = 0, \
5640 .result = 0, \
5641 }
5642 /**
5643 * @brief Poll Event
5644 *
5645 */
5646 struct k_poll_event {
5647 /** PRIVATE - DO NOT TOUCH */
5648 sys_dnode_t _node;
5649
5650 /** PRIVATE - DO NOT TOUCH */
5651 struct z_poller *poller;
5652
5653 /** optional user-specified tag, opaque, untouched by the API */
5654 uint32_t tag:8;
5655
5656 /** bitfield of event types (bitwise-ORed K_POLL_TYPE_xxx values) */
5657 uint32_t type:_POLL_NUM_TYPES;
5658
5659 /** bitfield of event states (bitwise-ORed K_POLL_STATE_xxx values) */
5660 uint32_t state:_POLL_NUM_STATES;
5661
5662 /** mode of operation, from enum k_poll_modes */
5663 uint32_t mode:1;
5664
5665 /** unused bits in 32-bit word */
5666 uint32_t unused:_POLL_EVENT_NUM_UNUSED_BITS;
5667
5668 /** per-type data */
5669 union {
5670 void *obj;
5671 struct k_poll_signal *signal;
5672 struct k_sem *sem;
5673 struct k_fifo *fifo;
5674 struct k_queue *queue;
5675 struct k_msgq *msgq;
5676 #ifdef CONFIG_PIPES
5677 struct k_pipe *pipe;
5678 #endif
5679 };
5680 };
5681
5682 #define K_POLL_EVENT_INITIALIZER(_event_type, _event_mode, _event_obj) \
5683 { \
5684 .poller = NULL, \
5685 .type = _event_type, \
5686 .state = K_POLL_STATE_NOT_READY, \
5687 .mode = _event_mode, \
5688 .unused = 0, \
5689 { \
5690 .obj = _event_obj, \
5691 }, \
5692 }
5693
5694 #define K_POLL_EVENT_STATIC_INITIALIZER(_event_type, _event_mode, _event_obj, \
5695 event_tag) \
5696 { \
5697 .tag = event_tag, \
5698 .type = _event_type, \
5699 .state = K_POLL_STATE_NOT_READY, \
5700 .mode = _event_mode, \
5701 .unused = 0, \
5702 { \
5703 .obj = _event_obj, \
5704 }, \
5705 }
5706
5707 /**
5708 * @brief Initialize one struct k_poll_event instance
5709 *
5710 * After this routine is called on a poll event, the event it ready to be
5711 * placed in an event array to be passed to k_poll().
5712 *
5713 * @param event The event to initialize.
5714 * @param type A bitfield of the types of event, from the K_POLL_TYPE_xxx
5715 * values. Only values that apply to the same object being polled
5716 * can be used together. Choosing K_POLL_TYPE_IGNORE disables the
5717 * event.
5718 * @param mode Future. Use K_POLL_MODE_NOTIFY_ONLY.
5719 * @param obj Kernel object or poll signal.
5720 */
5721
5722 void k_poll_event_init(struct k_poll_event *event, uint32_t type,
5723 int mode, void *obj);
5724
5725 /**
5726 * @brief Wait for one or many of multiple poll events to occur
5727 *
5728 * This routine allows a thread to wait concurrently for one or many of
5729 * multiple poll events to have occurred. Such events can be a kernel object
5730 * being available, like a semaphore, or a poll signal event.
5731 *
5732 * When an event notifies that a kernel object is available, the kernel object
5733 * is not "given" to the thread calling k_poll(): it merely signals the fact
5734 * that the object was available when the k_poll() call was in effect. Also,
5735 * all threads trying to acquire an object the regular way, i.e. by pending on
5736 * the object, have precedence over the thread polling on the object. This
5737 * means that the polling thread will never get the poll event on an object
5738 * until the object becomes available and its pend queue is empty. For this
5739 * reason, the k_poll() call is more effective when the objects being polled
5740 * only have one thread, the polling thread, trying to acquire them.
5741 *
5742 * When k_poll() returns 0, the caller should loop on all the events that were
5743 * passed to k_poll() and check the state field for the values that were
5744 * expected and take the associated actions.
5745 *
5746 * Before being reused for another call to k_poll(), the user has to reset the
5747 * state field to K_POLL_STATE_NOT_READY.
5748 *
5749 * When called from user mode, a temporary memory allocation is required from
5750 * the caller's resource pool.
5751 *
5752 * @param events An array of events to be polled for.
5753 * @param num_events The number of events in the array.
5754 * @param timeout Waiting period for an event to be ready,
5755 * or one of the special values K_NO_WAIT and K_FOREVER.
5756 *
5757 * @retval 0 One or more events are ready.
5758 * @retval -EAGAIN Waiting period timed out.
5759 * @retval -EINTR Polling has been interrupted, e.g. with
5760 * k_queue_cancel_wait(). All output events are still set and valid,
5761 * cancelled event(s) will be set to K_POLL_STATE_CANCELLED. In other
5762 * words, -EINTR status means that at least one of output events is
5763 * K_POLL_STATE_CANCELLED.
5764 * @retval -ENOMEM Thread resource pool insufficient memory (user mode only)
5765 * @retval -EINVAL Bad parameters (user mode only)
5766 */
5767
5768 __syscall int k_poll(struct k_poll_event *events, int num_events,
5769 k_timeout_t timeout);
5770
5771 /**
5772 * @brief Initialize a poll signal object.
5773 *
5774 * Ready a poll signal object to be signaled via k_poll_signal_raise().
5775 *
5776 * @param sig A poll signal.
5777 */
5778
5779 __syscall void k_poll_signal_init(struct k_poll_signal *sig);
5780
5781 /*
5782 * @brief Reset a poll signal object's state to unsignaled.
5783 *
5784 * @param sig A poll signal object
5785 */
5786 __syscall void k_poll_signal_reset(struct k_poll_signal *sig);
5787
5788 /**
5789 * @brief Fetch the signaled state and result value of a poll signal
5790 *
5791 * @param sig A poll signal object
5792 * @param signaled An integer buffer which will be written nonzero if the
5793 * object was signaled
5794 * @param result An integer destination buffer which will be written with the
5795 * result value if the object was signaled, or an undefined
5796 * value if it was not.
5797 */
5798 __syscall void k_poll_signal_check(struct k_poll_signal *sig,
5799 unsigned int *signaled, int *result);
5800
5801 /**
5802 * @brief Signal a poll signal object.
5803 *
5804 * This routine makes ready a poll signal, which is basically a poll event of
5805 * type K_POLL_TYPE_SIGNAL. If a thread was polling on that event, it will be
5806 * made ready to run. A @a result value can be specified.
5807 *
5808 * The poll signal contains a 'signaled' field that, when set by
5809 * k_poll_signal_raise(), stays set until the user sets it back to 0 with
5810 * k_poll_signal_reset(). It thus has to be reset by the user before being
5811 * passed again to k_poll() or k_poll() will consider it being signaled, and
5812 * will return immediately.
5813 *
5814 * @note The result is stored and the 'signaled' field is set even if
5815 * this function returns an error indicating that an expiring poll was
5816 * not notified. The next k_poll() will detect the missed raise.
5817 *
5818 * @param sig A poll signal.
5819 * @param result The value to store in the result field of the signal.
5820 *
5821 * @retval 0 The signal was delivered successfully.
5822 * @retval -EAGAIN The polling thread's timeout is in the process of expiring.
5823 */
5824
5825 __syscall int k_poll_signal_raise(struct k_poll_signal *sig, int result);
5826
5827 /** @} */
5828
5829 /**
5830 * @defgroup cpu_idle_apis CPU Idling APIs
5831 * @ingroup kernel_apis
5832 * @{
5833 */
5834 /**
5835 * @brief Make the CPU idle.
5836 *
5837 * This function makes the CPU idle until an event wakes it up.
5838 *
5839 * In a regular system, the idle thread should be the only thread responsible
5840 * for making the CPU idle and triggering any type of power management.
5841 * However, in some more constrained systems, such as a single-threaded system,
5842 * the only thread would be responsible for this if needed.
5843 *
5844 * @note In some architectures, before returning, the function unmasks interrupts
5845 * unconditionally.
5846 */
k_cpu_idle(void)5847 static inline void k_cpu_idle(void)
5848 {
5849 arch_cpu_idle();
5850 }
5851
5852 /**
5853 * @brief Make the CPU idle in an atomic fashion.
5854 *
5855 * Similar to k_cpu_idle(), but must be called with interrupts locked.
5856 *
5857 * Enabling interrupts and entering a low-power mode will be atomic,
5858 * i.e. there will be no period of time where interrupts are enabled before
5859 * the processor enters a low-power mode.
5860 *
5861 * After waking up from the low-power mode, the interrupt lockout state will
5862 * be restored as if by irq_unlock(key).
5863 *
5864 * @param key Interrupt locking key obtained from irq_lock().
5865 */
k_cpu_atomic_idle(unsigned int key)5866 static inline void k_cpu_atomic_idle(unsigned int key)
5867 {
5868 arch_cpu_atomic_idle(key);
5869 }
5870
5871 /**
5872 * @}
5873 */
5874
5875 /**
5876 * @cond INTERNAL_HIDDEN
5877 * @internal
5878 */
5879 #ifdef ARCH_EXCEPT
5880 /* This architecture has direct support for triggering a CPU exception */
5881 #define z_except_reason(reason) ARCH_EXCEPT(reason)
5882 #else
5883
5884 #if !defined(CONFIG_ASSERT_NO_FILE_INFO)
5885 #define __EXCEPT_LOC() __ASSERT_PRINT("@ %s:%d\n", __FILE__, __LINE__)
5886 #else
5887 #define __EXCEPT_LOC()
5888 #endif
5889
5890 /* NOTE: This is the implementation for arches that do not implement
5891 * ARCH_EXCEPT() to generate a real CPU exception.
5892 *
5893 * We won't have a real exception frame to determine the PC value when
5894 * the oops occurred, so print file and line number before we jump into
5895 * the fatal error handler.
5896 */
5897 #define z_except_reason(reason) do { \
5898 __EXCEPT_LOC(); \
5899 z_fatal_error(reason, NULL); \
5900 } while (false)
5901
5902 #endif /* _ARCH__EXCEPT */
5903 /**
5904 * INTERNAL_HIDDEN @endcond
5905 */
5906
5907 /**
5908 * @brief Fatally terminate a thread
5909 *
5910 * This should be called when a thread has encountered an unrecoverable
5911 * runtime condition and needs to terminate. What this ultimately
5912 * means is determined by the _fatal_error_handler() implementation, which
5913 * will be called will reason code K_ERR_KERNEL_OOPS.
5914 *
5915 * If this is called from ISR context, the default system fatal error handler
5916 * will treat it as an unrecoverable system error, just like k_panic().
5917 */
5918 #define k_oops() z_except_reason(K_ERR_KERNEL_OOPS)
5919
5920 /**
5921 * @brief Fatally terminate the system
5922 *
5923 * This should be called when the Zephyr kernel has encountered an
5924 * unrecoverable runtime condition and needs to terminate. What this ultimately
5925 * means is determined by the _fatal_error_handler() implementation, which
5926 * will be called will reason code K_ERR_KERNEL_PANIC.
5927 */
5928 #define k_panic() z_except_reason(K_ERR_KERNEL_PANIC)
5929
5930 /**
5931 * @cond INTERNAL_HIDDEN
5932 */
5933
5934 /*
5935 * private APIs that are utilized by one or more public APIs
5936 */
5937
5938 /**
5939 * @internal
5940 */
5941 #ifdef CONFIG_MULTITHREADING
5942 /**
5943 * @internal
5944 */
5945 void z_init_static_threads(void);
5946 #else
5947 /**
5948 * @internal
5949 */
5950 #define z_init_static_threads() do { } while (false)
5951 #endif
5952
5953 /**
5954 * @internal
5955 */
5956 void z_timer_expiration_handler(struct _timeout *t);
5957 /**
5958 * INTERNAL_HIDDEN @endcond
5959 */
5960
5961 #ifdef CONFIG_PRINTK
5962 /**
5963 * @brief Emit a character buffer to the console device
5964 *
5965 * @param c String of characters to print
5966 * @param n The length of the string
5967 *
5968 */
5969 __syscall void k_str_out(char *c, size_t n);
5970 #endif
5971
5972 /**
5973 * @defgroup float_apis Floating Point APIs
5974 * @ingroup kernel_apis
5975 * @{
5976 */
5977
5978 /**
5979 * @brief Disable preservation of floating point context information.
5980 *
5981 * This routine informs the kernel that the specified thread
5982 * will no longer be using the floating point registers.
5983 *
5984 * @warning
5985 * Some architectures apply restrictions on how the disabling of floating
5986 * point preservation may be requested, see arch_float_disable.
5987 *
5988 * @warning
5989 * This routine should only be used to disable floating point support for
5990 * a thread that currently has such support enabled.
5991 *
5992 * @param thread ID of thread.
5993 *
5994 * @retval 0 On success.
5995 * @retval -ENOTSUP If the floating point disabling is not implemented.
5996 * -EINVAL If the floating point disabling could not be performed.
5997 */
5998 __syscall int k_float_disable(struct k_thread *thread);
5999
6000 /**
6001 * @brief Enable preservation of floating point context information.
6002 *
6003 * This routine informs the kernel that the specified thread
6004 * will use the floating point registers.
6005
6006 * Invoking this routine initializes the thread's floating point context info
6007 * to that of an FPU that has been reset. The next time the thread is scheduled
6008 * by z_swap() it will either inherit an FPU that is guaranteed to be in a
6009 * "sane" state (if the most recent user of the FPU was cooperatively swapped
6010 * out) or the thread's own floating point context will be loaded (if the most
6011 * recent user of the FPU was preempted, or if this thread is the first user
6012 * of the FPU). Thereafter, the kernel will protect the thread's FP context
6013 * so that it is not altered during a preemptive context switch.
6014 *
6015 * The @a options parameter indicates which floating point register sets will
6016 * be used by the specified thread.
6017 *
6018 * For x86 options:
6019 *
6020 * - K_FP_REGS indicates x87 FPU and MMX registers only
6021 * - K_SSE_REGS indicates SSE registers (and also x87 FPU and MMX registers)
6022 *
6023 * @warning
6024 * Some architectures apply restrictions on how the enabling of floating
6025 * point preservation may be requested, see arch_float_enable.
6026 *
6027 * @warning
6028 * This routine should only be used to enable floating point support for
6029 * a thread that currently has such support enabled.
6030 *
6031 * @param thread ID of thread.
6032 * @param options architecture dependent options
6033 *
6034 * @retval 0 On success.
6035 * @retval -ENOTSUP If the floating point enabling is not implemented.
6036 * -EINVAL If the floating point enabling could not be performed.
6037 */
6038 __syscall int k_float_enable(struct k_thread *thread, unsigned int options);
6039
6040 /**
6041 * @}
6042 */
6043
6044 /**
6045 * @brief Get the runtime statistics of a thread
6046 *
6047 * @param thread ID of thread.
6048 * @param stats Pointer to struct to copy statistics into.
6049 * @return -EINVAL if null pointers, otherwise 0
6050 */
6051 int k_thread_runtime_stats_get(k_tid_t thread,
6052 k_thread_runtime_stats_t *stats);
6053
6054 /**
6055 * @brief Get the runtime statistics of all threads
6056 *
6057 * @param stats Pointer to struct to copy statistics into.
6058 * @return -EINVAL if null pointers, otherwise 0
6059 */
6060 int k_thread_runtime_stats_all_get(k_thread_runtime_stats_t *stats);
6061
6062 /**
6063 * @brief Enable gathering of runtime statistics for specified thread
6064 *
6065 * This routine enables the gathering of runtime statistics for the specified
6066 * thread.
6067 *
6068 * @param thread ID of thread
6069 * @return -EINVAL if invalid thread ID, otherwise 0
6070 */
6071 int k_thread_runtime_stats_enable(k_tid_t thread);
6072
6073 /**
6074 * @brief Disable gathering of runtime statistics for specified thread
6075 *
6076 * This routine disables the gathering of runtime statistics for the specified
6077 * thread.
6078 *
6079 * @param thread ID of thread
6080 * @return -EINVAL if invalid thread ID, otherwise 0
6081 */
6082 int k_thread_runtime_stats_disable(k_tid_t thread);
6083
6084 /**
6085 * @brief Enable gathering of system runtime statistics
6086 *
6087 * This routine enables the gathering of system runtime statistics. Note that
6088 * it does not affect the gathering of similar statistics for individual
6089 * threads.
6090 */
6091 void k_sys_runtime_stats_enable(void);
6092
6093 /**
6094 * @brief Disable gathering of system runtime statistics
6095 *
6096 * This routine disables the gathering of system runtime statistics. Note that
6097 * it does not affect the gathering of similar statistics for individual
6098 * threads.
6099 */
6100 void k_sys_runtime_stats_disable(void);
6101
6102 #ifdef __cplusplus
6103 }
6104 #endif
6105
6106 #include <zephyr/tracing/tracing.h>
6107 #include <syscalls/kernel.h>
6108
6109 #endif /* !_ASMLANGUAGE */
6110
6111 #endif /* ZEPHYR_INCLUDE_KERNEL_H_ */
6112