1 /*
2 * Copyright (c) 2016, Wind River Systems, Inc.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 /**
8 * @file
9 *
10 * @brief Public kernel APIs.
11 */
12
13 #ifndef ZEPHYR_INCLUDE_KERNEL_H_
14 #define ZEPHYR_INCLUDE_KERNEL_H_
15
16 #if !defined(_ASMLANGUAGE)
17 #include <kernel_includes.h>
18 #include <errno.h>
19 #include <limits.h>
20 #include <stdbool.h>
21 #include <toolchain.h>
22 #include <tracing/tracing_macros.h>
23
24 #ifdef CONFIG_THREAD_RUNTIME_STATS_USE_TIMING_FUNCTIONS
25 #include <timing/timing.h>
26 #endif
27
28 #ifdef __cplusplus
29 extern "C" {
30 #endif
31
32 /**
33 * @brief Kernel APIs
34 * @defgroup kernel_apis Kernel APIs
35 * @{
36 * @}
37 */
38
39 #define K_ANY NULL
40 #define K_END NULL
41
42 #if CONFIG_NUM_COOP_PRIORITIES + CONFIG_NUM_PREEMPT_PRIORITIES == 0
43 #error Zero available thread priorities defined!
44 #endif
45
46 #define K_PRIO_COOP(x) (-(CONFIG_NUM_COOP_PRIORITIES - (x)))
47 #define K_PRIO_PREEMPT(x) (x)
48
49 #define K_HIGHEST_THREAD_PRIO (-CONFIG_NUM_COOP_PRIORITIES)
50 #define K_LOWEST_THREAD_PRIO CONFIG_NUM_PREEMPT_PRIORITIES
51 #define K_IDLE_PRIO K_LOWEST_THREAD_PRIO
52 #define K_HIGHEST_APPLICATION_THREAD_PRIO (K_HIGHEST_THREAD_PRIO)
53 #define K_LOWEST_APPLICATION_THREAD_PRIO (K_LOWEST_THREAD_PRIO - 1)
54
55 #ifdef CONFIG_POLL
56 #define _POLL_EVENT_OBJ_INIT(obj) \
57 .poll_events = SYS_DLIST_STATIC_INIT(&obj.poll_events),
58 #define _POLL_EVENT sys_dlist_t poll_events
59 #else
60 #define _POLL_EVENT_OBJ_INIT(obj)
61 #define _POLL_EVENT
62 #endif
63
64 struct k_thread;
65 struct k_mutex;
66 struct k_sem;
67 struct k_msgq;
68 struct k_mbox;
69 struct k_pipe;
70 struct k_queue;
71 struct k_fifo;
72 struct k_lifo;
73 struct k_stack;
74 struct k_mem_slab;
75 struct k_mem_pool;
76 struct k_timer;
77 struct k_poll_event;
78 struct k_poll_signal;
79 struct k_mem_domain;
80 struct k_mem_partition;
81 struct k_futex;
82
83 enum execution_context_types {
84 K_ISR = 0,
85 K_COOP_THREAD,
86 K_PREEMPT_THREAD,
87 };
88
89 /* private, used by k_poll and k_work_poll */
90 struct k_work_poll;
91 typedef int (*_poller_cb_t)(struct k_poll_event *event, uint32_t state);
92
93 /**
94 * @addtogroup thread_apis
95 * @{
96 */
97
98 typedef void (*k_thread_user_cb_t)(const struct k_thread *thread,
99 void *user_data);
100
101 /**
102 * @brief Iterate over all the threads in the system.
103 *
104 * This routine iterates over all the threads in the system and
105 * calls the user_cb function for each thread.
106 *
107 * @param user_cb Pointer to the user callback function.
108 * @param user_data Pointer to user data.
109 *
110 * @note @kconfig{CONFIG_THREAD_MONITOR} must be set for this function
111 * to be effective.
112 * @note This API uses @ref k_spin_lock to protect the _kernel.threads
113 * list which means creation of new threads and terminations of existing
114 * threads are blocked until this API returns.
115 *
116 * @return N/A
117 */
118 extern void k_thread_foreach(k_thread_user_cb_t user_cb, void *user_data);
119
120 /**
121 * @brief Iterate over all the threads in the system without locking.
122 *
123 * This routine works exactly the same like @ref k_thread_foreach
124 * but unlocks interrupts when user_cb is executed.
125 *
126 * @param user_cb Pointer to the user callback function.
127 * @param user_data Pointer to user data.
128 *
129 * @note @kconfig{CONFIG_THREAD_MONITOR} must be set for this function
130 * to be effective.
131 * @note This API uses @ref k_spin_lock only when accessing the _kernel.threads
132 * queue elements. It unlocks it during user callback function processing.
133 * If a new task is created when this @c foreach function is in progress,
134 * the added new task would not be included in the enumeration.
135 * If a task is aborted during this enumeration, there would be a race here
136 * and there is a possibility that this aborted task would be included in the
137 * enumeration.
138 * @note If the task is aborted and the memory occupied by its @c k_thread
139 * structure is reused when this @c k_thread_foreach_unlocked is in progress
140 * it might even lead to the system behave unstable.
141 * This function may never return, as it would follow some @c next task
142 * pointers treating given pointer as a pointer to the k_thread structure
143 * while it is something different right now.
144 * Do not reuse the memory that was occupied by k_thread structure of aborted
145 * task if it was aborted after this function was called in any context.
146 */
147 extern void k_thread_foreach_unlocked(
148 k_thread_user_cb_t user_cb, void *user_data);
149
150 /** @} */
151
152 /**
153 * @defgroup thread_apis Thread APIs
154 * @ingroup kernel_apis
155 * @{
156 */
157
158 #endif /* !_ASMLANGUAGE */
159
160
161 /*
162 * Thread user options. May be needed by assembly code. Common part uses low
163 * bits, arch-specific use high bits.
164 */
165
166 /**
167 * @brief system thread that must not abort
168 * */
169 #define K_ESSENTIAL (BIT(0))
170
171 #if defined(CONFIG_FPU_SHARING)
172 /**
173 * @brief FPU registers are managed by context switch
174 *
175 * @details
176 * This option indicates that the thread uses the CPU's floating point
177 * registers. This instructs the kernel to take additional steps to save
178 * and restore the contents of these registers when scheduling the thread.
179 * No effect if @kconfig{CONFIG_FPU_SHARING} is not enabled.
180 */
181 #define K_FP_REGS (BIT(1))
182 #endif
183
184 /**
185 * @brief user mode thread
186 *
187 * This thread has dropped from supervisor mode to user mode and consequently
188 * has additional restrictions
189 */
190 #define K_USER (BIT(2))
191
192 /**
193 * @brief Inherit Permissions
194 *
195 * @details
196 * Indicates that the thread being created should inherit all kernel object
197 * permissions from the thread that created it. No effect if
198 * @kconfig{CONFIG_USERSPACE} is not enabled.
199 */
200 #define K_INHERIT_PERMS (BIT(3))
201
202 /**
203 * @brief Callback item state
204 *
205 * @details
206 * This is a single bit of state reserved for "callback manager"
207 * utilities (p4wq initially) who need to track operations invoked
208 * from within a user-provided callback they have been invoked.
209 * Effectively it serves as a tiny bit of zero-overhead TLS data.
210 */
211 #define K_CALLBACK_STATE (BIT(4))
212
213 #ifdef CONFIG_X86
214 /* x86 Bitmask definitions for threads user options */
215
216 #if defined(CONFIG_FPU_SHARING) && defined(CONFIG_X86_SSE)
217 /**
218 * @brief FP and SSE registers are managed by context switch on x86
219 *
220 * @details
221 * This option indicates that the thread uses the x86 CPU's floating point
222 * and SSE registers. This instructs the kernel to take additional steps to
223 * save and restore the contents of these registers when scheduling
224 * the thread. No effect if @kconfig{CONFIG_X86_SSE} is not enabled.
225 */
226 #define K_SSE_REGS (BIT(7))
227 #endif
228 #endif
229
230 /* end - thread options */
231
232 #if !defined(_ASMLANGUAGE)
233 /**
234 * @brief Create a thread.
235 *
236 * This routine initializes a thread, then schedules it for execution.
237 *
238 * The new thread may be scheduled for immediate execution or a delayed start.
239 * If the newly spawned thread does not have a delayed start the kernel
240 * scheduler may preempt the current thread to allow the new thread to
241 * execute.
242 *
243 * Thread options are architecture-specific, and can include K_ESSENTIAL,
244 * K_FP_REGS, and K_SSE_REGS. Multiple options may be specified by separating
245 * them using "|" (the logical OR operator).
246 *
247 * Stack objects passed to this function must be originally defined with
248 * either of these macros in order to be portable:
249 *
250 * - K_THREAD_STACK_DEFINE() - For stacks that may support either user or
251 * supervisor threads.
252 * - K_KERNEL_STACK_DEFINE() - For stacks that may support supervisor
253 * threads only. These stacks use less memory if CONFIG_USERSPACE is
254 * enabled.
255 *
256 * The stack_size parameter has constraints. It must either be:
257 *
258 * - The original size value passed to K_THREAD_STACK_DEFINE() or
259 * K_KERNEL_STACK_DEFINE()
260 * - The return value of K_THREAD_STACK_SIZEOF(stack) if the stack was
261 * defined with K_THREAD_STACK_DEFINE()
262 * - The return value of K_KERNEL_STACK_SIZEOF(stack) if the stack was
263 * defined with K_KERNEL_STACK_DEFINE().
264 *
265 * Using other values, or sizeof(stack) may produce undefined behavior.
266 *
267 * @param new_thread Pointer to uninitialized struct k_thread
268 * @param stack Pointer to the stack space.
269 * @param stack_size Stack size in bytes.
270 * @param entry Thread entry function.
271 * @param p1 1st entry point parameter.
272 * @param p2 2nd entry point parameter.
273 * @param p3 3rd entry point parameter.
274 * @param prio Thread priority.
275 * @param options Thread options.
276 * @param delay Scheduling delay, or K_NO_WAIT (for no delay).
277 *
278 * @return ID of new thread.
279 *
280 */
281 __syscall k_tid_t k_thread_create(struct k_thread *new_thread,
282 k_thread_stack_t *stack,
283 size_t stack_size,
284 k_thread_entry_t entry,
285 void *p1, void *p2, void *p3,
286 int prio, uint32_t options, k_timeout_t delay);
287
288 /**
289 * @brief Drop a thread's privileges permanently to user mode
290 *
291 * This allows a supervisor thread to be re-used as a user thread.
292 * This function does not return, but control will transfer to the provided
293 * entry point as if this was a new user thread.
294 *
295 * The implementation ensures that the stack buffer contents are erased.
296 * Any thread-local storage will be reverted to a pristine state.
297 *
298 * Memory domain membership, resource pool assignment, kernel object
299 * permissions, priority, and thread options are preserved.
300 *
301 * A common use of this function is to re-use the main thread as a user thread
302 * once all supervisor mode-only tasks have been completed.
303 *
304 * @param entry Function to start executing from
305 * @param p1 1st entry point parameter
306 * @param p2 2nd entry point parameter
307 * @param p3 3rd entry point parameter
308 */
309 extern FUNC_NORETURN void k_thread_user_mode_enter(k_thread_entry_t entry,
310 void *p1, void *p2,
311 void *p3);
312
313 /**
314 * @brief Grant a thread access to a set of kernel objects
315 *
316 * This is a convenience function. For the provided thread, grant access to
317 * the remaining arguments, which must be pointers to kernel objects.
318 *
319 * The thread object must be initialized (i.e. running). The objects don't
320 * need to be.
321 * Note that NULL shouldn't be passed as an argument.
322 *
323 * @param thread Thread to grant access to objects
324 * @param ... list of kernel object pointers
325 */
326 #define k_thread_access_grant(thread, ...) \
327 FOR_EACH_FIXED_ARG(k_object_access_grant, (;), thread, __VA_ARGS__)
328
329 /**
330 * @brief Assign a resource memory pool to a thread
331 *
332 * By default, threads have no resource pool assigned unless their parent
333 * thread has a resource pool, in which case it is inherited. Multiple
334 * threads may be assigned to the same memory pool.
335 *
336 * Changing a thread's resource pool will not migrate allocations from the
337 * previous pool.
338 *
339 * @param thread Target thread to assign a memory pool for resource requests.
340 * @param heap Heap object to use for resources,
341 * or NULL if the thread should no longer have a memory pool.
342 */
k_thread_heap_assign(struct k_thread * thread,struct k_heap * heap)343 static inline void k_thread_heap_assign(struct k_thread *thread,
344 struct k_heap *heap)
345 {
346 thread->resource_pool = heap;
347 }
348
349 #if defined(CONFIG_INIT_STACKS) && defined(CONFIG_THREAD_STACK_INFO)
350 /**
351 * @brief Obtain stack usage information for the specified thread
352 *
353 * User threads will need to have permission on the target thread object.
354 *
355 * Some hardware may prevent inspection of a stack buffer currently in use.
356 * If this API is called from supervisor mode, on the currently running thread,
357 * on a platform which selects @kconfig{CONFIG_NO_UNUSED_STACK_INSPECTION}, an
358 * error will be generated.
359 *
360 * @param thread Thread to inspect stack information
361 * @param unused_ptr Output parameter, filled in with the unused stack space
362 * of the target thread in bytes.
363 * @return 0 on success
364 * @return -EBADF Bad thread object (user mode only)
365 * @return -EPERM No permissions on thread object (user mode only)
366 * #return -ENOTSUP Forbidden by hardware policy
367 * @return -EINVAL Thread is uninitialized or exited (user mode only)
368 * @return -EFAULT Bad memory address for unused_ptr (user mode only)
369 */
370 __syscall int k_thread_stack_space_get(const struct k_thread *thread,
371 size_t *unused_ptr);
372 #endif
373
374 #if (CONFIG_HEAP_MEM_POOL_SIZE > 0)
375 /**
376 * @brief Assign the system heap as a thread's resource pool
377 *
378 * Similar to z_thread_heap_assign(), but the thread will use
379 * the kernel heap to draw memory.
380 *
381 * Use with caution, as a malicious thread could perform DoS attacks on the
382 * kernel heap.
383 *
384 * @param thread Target thread to assign the system heap for resource requests
385 *
386 */
387 void k_thread_system_pool_assign(struct k_thread *thread);
388 #endif /* (CONFIG_HEAP_MEM_POOL_SIZE > 0) */
389
390 /**
391 * @brief Sleep until a thread exits
392 *
393 * The caller will be put to sleep until the target thread exits, either due
394 * to being aborted, self-exiting, or taking a fatal error. This API returns
395 * immediately if the thread isn't running.
396 *
397 * This API may only be called from ISRs with a K_NO_WAIT timeout,
398 * where it can be useful as a predicate to detect when a thread has
399 * aborted.
400 *
401 * @param thread Thread to wait to exit
402 * @param timeout upper bound time to wait for the thread to exit.
403 * @retval 0 success, target thread has exited or wasn't running
404 * @retval -EBUSY returned without waiting
405 * @retval -EAGAIN waiting period timed out
406 * @retval -EDEADLK target thread is joining on the caller, or target thread
407 * is the caller
408 */
409 __syscall int k_thread_join(struct k_thread *thread, k_timeout_t timeout);
410
411 /**
412 * @brief Put the current thread to sleep.
413 *
414 * This routine puts the current thread to sleep for @a duration,
415 * specified as a k_timeout_t object.
416 *
417 * @note if @a timeout is set to K_FOREVER then the thread is suspended.
418 *
419 * @param timeout Desired duration of sleep.
420 *
421 * @return Zero if the requested time has elapsed or the number of milliseconds
422 * left to sleep, if thread was woken up by \ref k_wakeup call.
423 */
424 __syscall int32_t k_sleep(k_timeout_t timeout);
425
426 /**
427 * @brief Put the current thread to sleep.
428 *
429 * This routine puts the current thread to sleep for @a duration milliseconds.
430 *
431 * @param ms Number of milliseconds to sleep.
432 *
433 * @return Zero if the requested time has elapsed or the number of milliseconds
434 * left to sleep, if thread was woken up by \ref k_wakeup call.
435 */
k_msleep(int32_t ms)436 static inline int32_t k_msleep(int32_t ms)
437 {
438 return k_sleep(Z_TIMEOUT_MS(ms));
439 }
440
441 /**
442 * @brief Put the current thread to sleep with microsecond resolution.
443 *
444 * This function is unlikely to work as expected without kernel tuning.
445 * In particular, because the lower bound on the duration of a sleep is
446 * the duration of a tick, @kconfig{CONFIG_SYS_CLOCK_TICKS_PER_SEC} must be
447 * adjusted to achieve the resolution desired. The implications of doing
448 * this must be understood before attempting to use k_usleep(). Use with
449 * caution.
450 *
451 * @param us Number of microseconds to sleep.
452 *
453 * @return Zero if the requested time has elapsed or the number of microseconds
454 * left to sleep, if thread was woken up by \ref k_wakeup call.
455 */
456 __syscall int32_t k_usleep(int32_t us);
457
458 /**
459 * @brief Cause the current thread to busy wait.
460 *
461 * This routine causes the current thread to execute a "do nothing" loop for
462 * @a usec_to_wait microseconds.
463 *
464 * @note The clock used for the microsecond-resolution delay here may
465 * be skewed relative to the clock used for system timeouts like
466 * k_sleep(). For example k_busy_wait(1000) may take slightly more or
467 * less time than k_sleep(K_MSEC(1)), with the offset dependent on
468 * clock tolerances.
469 *
470 * @return N/A
471 */
472 __syscall void k_busy_wait(uint32_t usec_to_wait);
473
474 /**
475 * @brief Yield the current thread.
476 *
477 * This routine causes the current thread to yield execution to another
478 * thread of the same or higher priority. If there are no other ready threads
479 * of the same or higher priority, the routine returns immediately.
480 *
481 * @return N/A
482 */
483 __syscall void k_yield(void);
484
485 /**
486 * @brief Wake up a sleeping thread.
487 *
488 * This routine prematurely wakes up @a thread from sleeping.
489 *
490 * If @a thread is not currently sleeping, the routine has no effect.
491 *
492 * @param thread ID of thread to wake.
493 *
494 * @return N/A
495 */
496 __syscall void k_wakeup(k_tid_t thread);
497
498 /**
499 * @brief Get thread ID of the current thread.
500 *
501 * This unconditionally queries the kernel via a system call.
502 *
503 * @return ID of current thread.
504 */
505 __attribute_const__
506 __syscall k_tid_t z_current_get(void);
507
508 #ifdef CONFIG_THREAD_LOCAL_STORAGE
509 /* Thread-local cache of current thread ID, set in z_thread_entry() */
510 extern __thread k_tid_t z_tls_current;
511 #endif
512
513 /**
514 * @brief Get thread ID of the current thread.
515 *
516 * @return ID of current thread.
517 *
518 */
519 __attribute_const__
k_current_get(void)520 static inline k_tid_t k_current_get(void)
521 {
522 #ifdef CONFIG_THREAD_LOCAL_STORAGE
523 return z_tls_current;
524 #else
525 return z_current_get();
526 #endif
527 }
528
529 /**
530 * @brief Abort a thread.
531 *
532 * This routine permanently stops execution of @a thread. The thread is taken
533 * off all kernel queues it is part of (i.e. the ready queue, the timeout
534 * queue, or a kernel object wait queue). However, any kernel resources the
535 * thread might currently own (such as mutexes or memory blocks) are not
536 * released. It is the responsibility of the caller of this routine to ensure
537 * all necessary cleanup is performed.
538 *
539 * After k_thread_abort() returns, the thread is guaranteed not to be
540 * running or to become runnable anywhere on the system. Normally
541 * this is done via blocking the caller (in the same manner as
542 * k_thread_join()), but in interrupt context on SMP systems the
543 * implementation is required to spin for threads that are running on
544 * other CPUs. Note that as specified, this means that on SMP
545 * platforms it is possible for application code to create a deadlock
546 * condition by simultaneously aborting a cycle of threads using at
547 * least one termination from interrupt context. Zephyr cannot detect
548 * all such conditions.
549 *
550 * @param thread ID of thread to abort.
551 *
552 * @return N/A
553 */
554 __syscall void k_thread_abort(k_tid_t thread);
555
556
557 /**
558 * @brief Start an inactive thread
559 *
560 * If a thread was created with K_FOREVER in the delay parameter, it will
561 * not be added to the scheduling queue until this function is called
562 * on it.
563 *
564 * @param thread thread to start
565 */
566 __syscall void k_thread_start(k_tid_t thread);
567
568 extern k_ticks_t z_timeout_expires(const struct _timeout *timeout);
569 extern k_ticks_t z_timeout_remaining(const struct _timeout *timeout);
570
571 #ifdef CONFIG_SYS_CLOCK_EXISTS
572
573 /**
574 * @brief Get time when a thread wakes up, in system ticks
575 *
576 * This routine computes the system uptime when a waiting thread next
577 * executes, in units of system ticks. If the thread is not waiting,
578 * it returns current system time.
579 */
580 __syscall k_ticks_t k_thread_timeout_expires_ticks(const struct k_thread *t);
581
z_impl_k_thread_timeout_expires_ticks(const struct k_thread * t)582 static inline k_ticks_t z_impl_k_thread_timeout_expires_ticks(
583 const struct k_thread *t)
584 {
585 return z_timeout_expires(&t->base.timeout);
586 }
587
588 /**
589 * @brief Get time remaining before a thread wakes up, in system ticks
590 *
591 * This routine computes the time remaining before a waiting thread
592 * next executes, in units of system ticks. If the thread is not
593 * waiting, it returns zero.
594 */
595 __syscall k_ticks_t k_thread_timeout_remaining_ticks(const struct k_thread *t);
596
z_impl_k_thread_timeout_remaining_ticks(const struct k_thread * t)597 static inline k_ticks_t z_impl_k_thread_timeout_remaining_ticks(
598 const struct k_thread *t)
599 {
600 return z_timeout_remaining(&t->base.timeout);
601 }
602
603 #endif /* CONFIG_SYS_CLOCK_EXISTS */
604
605 /**
606 * @cond INTERNAL_HIDDEN
607 */
608
609 /* timeout has timed out and is not on _timeout_q anymore */
610 #define _EXPIRED (-2)
611
612 struct _static_thread_data {
613 struct k_thread *init_thread;
614 k_thread_stack_t *init_stack;
615 unsigned int init_stack_size;
616 k_thread_entry_t init_entry;
617 void *init_p1;
618 void *init_p2;
619 void *init_p3;
620 int init_prio;
621 uint32_t init_options;
622 int32_t init_delay;
623 void (*init_abort)(void);
624 const char *init_name;
625 };
626
627 #define Z_THREAD_INITIALIZER(thread, stack, stack_size, \
628 entry, p1, p2, p3, \
629 prio, options, delay, abort, tname) \
630 { \
631 .init_thread = (thread), \
632 .init_stack = (stack), \
633 .init_stack_size = (stack_size), \
634 .init_entry = (k_thread_entry_t)entry, \
635 .init_p1 = (void *)p1, \
636 .init_p2 = (void *)p2, \
637 .init_p3 = (void *)p3, \
638 .init_prio = (prio), \
639 .init_options = (options), \
640 .init_delay = (delay), \
641 .init_abort = (abort), \
642 .init_name = STRINGIFY(tname), \
643 }
644
645 /**
646 * INTERNAL_HIDDEN @endcond
647 */
648
649 /**
650 * @brief Statically define and initialize a thread.
651 *
652 * The thread may be scheduled for immediate execution or a delayed start.
653 *
654 * Thread options are architecture-specific, and can include K_ESSENTIAL,
655 * K_FP_REGS, and K_SSE_REGS. Multiple options may be specified by separating
656 * them using "|" (the logical OR operator).
657 *
658 * The ID of the thread can be accessed using:
659 *
660 * @code extern const k_tid_t <name>; @endcode
661 *
662 * @param name Name of the thread.
663 * @param stack_size Stack size in bytes.
664 * @param entry Thread entry function.
665 * @param p1 1st entry point parameter.
666 * @param p2 2nd entry point parameter.
667 * @param p3 3rd entry point parameter.
668 * @param prio Thread priority.
669 * @param options Thread options.
670 * @param delay Scheduling delay (in milliseconds), zero for no delay.
671 *
672 *
673 * @internal It has been observed that the x86 compiler by default aligns
674 * these _static_thread_data structures to 32-byte boundaries, thereby
675 * wasting space. To work around this, force a 4-byte alignment.
676 *
677 */
678 #define K_THREAD_DEFINE(name, stack_size, \
679 entry, p1, p2, p3, \
680 prio, options, delay) \
681 K_THREAD_STACK_DEFINE(_k_thread_stack_##name, stack_size); \
682 struct k_thread _k_thread_obj_##name; \
683 STRUCT_SECTION_ITERABLE(_static_thread_data, _k_thread_data_##name) = \
684 Z_THREAD_INITIALIZER(&_k_thread_obj_##name, \
685 _k_thread_stack_##name, stack_size, \
686 entry, p1, p2, p3, prio, options, delay, \
687 NULL, name); \
688 const k_tid_t name = (k_tid_t)&_k_thread_obj_##name
689
690 /**
691 * @brief Get a thread's priority.
692 *
693 * This routine gets the priority of @a thread.
694 *
695 * @param thread ID of thread whose priority is needed.
696 *
697 * @return Priority of @a thread.
698 */
699 __syscall int k_thread_priority_get(k_tid_t thread);
700
701 /**
702 * @brief Set a thread's priority.
703 *
704 * This routine immediately changes the priority of @a thread.
705 *
706 * Rescheduling can occur immediately depending on the priority @a thread is
707 * set to:
708 *
709 * - If its priority is raised above the priority of the caller of this
710 * function, and the caller is preemptible, @a thread will be scheduled in.
711 *
712 * - If the caller operates on itself, it lowers its priority below that of
713 * other threads in the system, and the caller is preemptible, the thread of
714 * highest priority will be scheduled in.
715 *
716 * Priority can be assigned in the range of -CONFIG_NUM_COOP_PRIORITIES to
717 * CONFIG_NUM_PREEMPT_PRIORITIES-1, where -CONFIG_NUM_COOP_PRIORITIES is the
718 * highest priority.
719 *
720 * @param thread ID of thread whose priority is to be set.
721 * @param prio New priority.
722 *
723 * @warning Changing the priority of a thread currently involved in mutex
724 * priority inheritance may result in undefined behavior.
725 *
726 * @return N/A
727 */
728 __syscall void k_thread_priority_set(k_tid_t thread, int prio);
729
730
731 #ifdef CONFIG_SCHED_DEADLINE
732 /**
733 * @brief Set deadline expiration time for scheduler
734 *
735 * This sets the "deadline" expiration as a time delta from the
736 * current time, in the same units used by k_cycle_get_32(). The
737 * scheduler (when deadline scheduling is enabled) will choose the
738 * next expiring thread when selecting between threads at the same
739 * static priority. Threads at different priorities will be scheduled
740 * according to their static priority.
741 *
742 * @note Deadlines are stored internally using 32 bit unsigned
743 * integers. The number of cycles between the "first" deadline in the
744 * scheduler queue and the "last" deadline must be less than 2^31 (i.e
745 * a signed non-negative quantity). Failure to adhere to this rule
746 * may result in scheduled threads running in an incorrect deadline
747 * order.
748 *
749 * @note Despite the API naming, the scheduler makes no guarantees the
750 * the thread WILL be scheduled within that deadline, nor does it take
751 * extra metadata (like e.g. the "runtime" and "period" parameters in
752 * Linux sched_setattr()) that allows the kernel to validate the
753 * scheduling for achievability. Such features could be implemented
754 * above this call, which is simply input to the priority selection
755 * logic.
756 *
757 * @note You should enable @kconfig{CONFIG_SCHED_DEADLINE} in your project
758 * configuration.
759 *
760 * @param thread A thread on which to set the deadline
761 * @param deadline A time delta, in cycle units
762 *
763 */
764 __syscall void k_thread_deadline_set(k_tid_t thread, int deadline);
765 #endif
766
767 #ifdef CONFIG_SCHED_CPU_MASK
768 /**
769 * @brief Sets all CPU enable masks to zero
770 *
771 * After this returns, the thread will no longer be schedulable on any
772 * CPUs. The thread must not be currently runnable.
773 *
774 * @note You should enable @kconfig{CONFIG_SCHED_DEADLINE} in your project
775 * configuration.
776 *
777 * @param thread Thread to operate upon
778 * @return Zero on success, otherwise error code
779 */
780 int k_thread_cpu_mask_clear(k_tid_t thread);
781
782 /**
783 * @brief Sets all CPU enable masks to one
784 *
785 * After this returns, the thread will be schedulable on any CPU. The
786 * thread must not be currently runnable.
787 *
788 * @note You should enable @kconfig{CONFIG_SCHED_DEADLINE} in your project
789 * configuration.
790 *
791 * @param thread Thread to operate upon
792 * @return Zero on success, otherwise error code
793 */
794 int k_thread_cpu_mask_enable_all(k_tid_t thread);
795
796 /**
797 * @brief Enable thread to run on specified CPU
798 *
799 * The thread must not be currently runnable.
800 *
801 * @note You should enable @kconfig{CONFIG_SCHED_DEADLINE} in your project
802 * configuration.
803 *
804 * @param thread Thread to operate upon
805 * @param cpu CPU index
806 * @return Zero on success, otherwise error code
807 */
808 int k_thread_cpu_mask_enable(k_tid_t thread, int cpu);
809
810 /**
811 * @brief Prevent thread to run on specified CPU
812 *
813 * The thread must not be currently runnable.
814 *
815 * @note You should enable @kconfig{CONFIG_SCHED_DEADLINE} in your project
816 * configuration.
817 *
818 * @param thread Thread to operate upon
819 * @param cpu CPU index
820 * @return Zero on success, otherwise error code
821 */
822 int k_thread_cpu_mask_disable(k_tid_t thread, int cpu);
823 #endif
824
825 /**
826 * @brief Suspend a thread.
827 *
828 * This routine prevents the kernel scheduler from making @a thread
829 * the current thread. All other internal operations on @a thread are
830 * still performed; for example, kernel objects it is waiting on are
831 * still handed to it. Note that any existing timeouts
832 * (e.g. k_sleep(), or a timeout argument to k_sem_take() et. al.)
833 * will be canceled. On resume, the thread will begin running
834 * immediately and return from the blocked call.
835 *
836 * If @a thread is already suspended, the routine has no effect.
837 *
838 * @param thread ID of thread to suspend.
839 *
840 * @return N/A
841 */
842 __syscall void k_thread_suspend(k_tid_t thread);
843
844 /**
845 * @brief Resume a suspended thread.
846 *
847 * This routine allows the kernel scheduler to make @a thread the current
848 * thread, when it is next eligible for that role.
849 *
850 * If @a thread is not currently suspended, the routine has no effect.
851 *
852 * @param thread ID of thread to resume.
853 *
854 * @return N/A
855 */
856 __syscall void k_thread_resume(k_tid_t thread);
857
858 /**
859 * @brief Set time-slicing period and scope.
860 *
861 * This routine specifies how the scheduler will perform time slicing of
862 * preemptible threads.
863 *
864 * To enable time slicing, @a slice must be non-zero. The scheduler
865 * ensures that no thread runs for more than the specified time limit
866 * before other threads of that priority are given a chance to execute.
867 * Any thread whose priority is higher than @a prio is exempted, and may
868 * execute as long as desired without being preempted due to time slicing.
869 *
870 * Time slicing only limits the maximum amount of time a thread may continuously
871 * execute. Once the scheduler selects a thread for execution, there is no
872 * minimum guaranteed time the thread will execute before threads of greater or
873 * equal priority are scheduled.
874 *
875 * When the current thread is the only one of that priority eligible
876 * for execution, this routine has no effect; the thread is immediately
877 * rescheduled after the slice period expires.
878 *
879 * To disable timeslicing, set both @a slice and @a prio to zero.
880 *
881 * @param slice Maximum time slice length (in milliseconds).
882 * @param prio Highest thread priority level eligible for time slicing.
883 *
884 * @return N/A
885 */
886 extern void k_sched_time_slice_set(int32_t slice, int prio);
887
888 /** @} */
889
890 /**
891 * @addtogroup isr_apis
892 * @{
893 */
894
895 /**
896 * @brief Determine if code is running at interrupt level.
897 *
898 * This routine allows the caller to customize its actions, depending on
899 * whether it is a thread or an ISR.
900 *
901 * @funcprops \isr_ok
902 *
903 * @return false if invoked by a thread.
904 * @return true if invoked by an ISR.
905 */
906 extern bool k_is_in_isr(void);
907
908 /**
909 * @brief Determine if code is running in a preemptible thread.
910 *
911 * This routine allows the caller to customize its actions, depending on
912 * whether it can be preempted by another thread. The routine returns a 'true'
913 * value if all of the following conditions are met:
914 *
915 * - The code is running in a thread, not at ISR.
916 * - The thread's priority is in the preemptible range.
917 * - The thread has not locked the scheduler.
918 *
919 * @funcprops \isr_ok
920 *
921 * @return 0 if invoked by an ISR or by a cooperative thread.
922 * @return Non-zero if invoked by a preemptible thread.
923 */
924 __syscall int k_is_preempt_thread(void);
925
926 /**
927 * @brief Test whether startup is in the before-main-task phase.
928 *
929 * This routine allows the caller to customize its actions, depending on
930 * whether it being invoked before the kernel is fully active.
931 *
932 * @funcprops \isr_ok
933 *
934 * @return true if invoked before post-kernel initialization
935 * @return false if invoked during/after post-kernel initialization
936 */
k_is_pre_kernel(void)937 static inline bool k_is_pre_kernel(void)
938 {
939 extern bool z_sys_post_kernel; /* in init.c */
940
941 return !z_sys_post_kernel;
942 }
943
944 /**
945 * @}
946 */
947
948 /**
949 * @addtogroup thread_apis
950 * @{
951 */
952
953 /**
954 * @brief Lock the scheduler.
955 *
956 * This routine prevents the current thread from being preempted by another
957 * thread by instructing the scheduler to treat it as a cooperative thread.
958 * If the thread subsequently performs an operation that makes it unready,
959 * it will be context switched out in the normal manner. When the thread
960 * again becomes the current thread, its non-preemptible status is maintained.
961 *
962 * This routine can be called recursively.
963 *
964 * @note k_sched_lock() and k_sched_unlock() should normally be used
965 * when the operation being performed can be safely interrupted by ISRs.
966 * However, if the amount of processing involved is very small, better
967 * performance may be obtained by using irq_lock() and irq_unlock().
968 *
969 * @return N/A
970 */
971 extern void k_sched_lock(void);
972
973 /**
974 * @brief Unlock the scheduler.
975 *
976 * This routine reverses the effect of a previous call to k_sched_lock().
977 * A thread must call the routine once for each time it called k_sched_lock()
978 * before the thread becomes preemptible.
979 *
980 * @return N/A
981 */
982 extern void k_sched_unlock(void);
983
984 /**
985 * @brief Set current thread's custom data.
986 *
987 * This routine sets the custom data for the current thread to @ value.
988 *
989 * Custom data is not used by the kernel itself, and is freely available
990 * for a thread to use as it sees fit. It can be used as a framework
991 * upon which to build thread-local storage.
992 *
993 * @param value New custom data value.
994 *
995 * @return N/A
996 *
997 */
998 __syscall void k_thread_custom_data_set(void *value);
999
1000 /**
1001 * @brief Get current thread's custom data.
1002 *
1003 * This routine returns the custom data for the current thread.
1004 *
1005 * @return Current custom data value.
1006 */
1007 __syscall void *k_thread_custom_data_get(void);
1008
1009 /**
1010 * @brief Set current thread name
1011 *
1012 * Set the name of the thread to be used when @kconfig{CONFIG_THREAD_MONITOR}
1013 * is enabled for tracing and debugging.
1014 *
1015 * @param thread Thread to set name, or NULL to set the current thread
1016 * @param str Name string
1017 * @retval 0 on success
1018 * @retval -EFAULT Memory access error with supplied string
1019 * @retval -ENOSYS Thread name configuration option not enabled
1020 * @retval -EINVAL Thread name too long
1021 */
1022 __syscall int k_thread_name_set(k_tid_t thread, const char *str);
1023
1024 /**
1025 * @brief Get thread name
1026 *
1027 * Get the name of a thread
1028 *
1029 * @param thread Thread ID
1030 * @retval Thread name, or NULL if configuration not enabled
1031 */
1032 const char *k_thread_name_get(k_tid_t thread);
1033
1034 /**
1035 * @brief Copy the thread name into a supplied buffer
1036 *
1037 * @param thread Thread to obtain name information
1038 * @param buf Destination buffer
1039 * @param size Destination buffer size
1040 * @retval -ENOSPC Destination buffer too small
1041 * @retval -EFAULT Memory access error
1042 * @retval -ENOSYS Thread name feature not enabled
1043 * @retval 0 Success
1044 */
1045 __syscall int k_thread_name_copy(k_tid_t thread, char *buf,
1046 size_t size);
1047
1048 /**
1049 * @brief Get thread state string
1050 *
1051 * Get the human friendly thread state string
1052 *
1053 * @param thread_id Thread ID
1054 * @retval Thread state string, empty if no state flag is set
1055 */
1056 const char *k_thread_state_str(k_tid_t thread_id);
1057
1058 /**
1059 * @}
1060 */
1061
1062 /**
1063 * @addtogroup clock_apis
1064 * @{
1065 */
1066
1067 /**
1068 * @brief Generate null timeout delay.
1069 *
1070 * This macro generates a timeout delay that instructs a kernel API
1071 * not to wait if the requested operation cannot be performed immediately.
1072 *
1073 * @return Timeout delay value.
1074 */
1075 #define K_NO_WAIT Z_TIMEOUT_NO_WAIT
1076
1077 /**
1078 * @brief Generate timeout delay from nanoseconds.
1079 *
1080 * This macro generates a timeout delay that instructs a kernel API to
1081 * wait up to @a t nanoseconds to perform the requested operation.
1082 * Note that timer precision is limited to the tick rate, not the
1083 * requested value.
1084 *
1085 * @param t Duration in nanoseconds.
1086 *
1087 * @return Timeout delay value.
1088 */
1089 #define K_NSEC(t) Z_TIMEOUT_NS(t)
1090
1091 /**
1092 * @brief Generate timeout delay from microseconds.
1093 *
1094 * This macro generates a timeout delay that instructs a kernel API
1095 * to wait up to @a t microseconds to perform the requested operation.
1096 * Note that timer precision is limited to the tick rate, not the
1097 * requested value.
1098 *
1099 * @param t Duration in microseconds.
1100 *
1101 * @return Timeout delay value.
1102 */
1103 #define K_USEC(t) Z_TIMEOUT_US(t)
1104
1105 /**
1106 * @brief Generate timeout delay from cycles.
1107 *
1108 * This macro generates a timeout delay that instructs a kernel API
1109 * to wait up to @a t cycles to perform the requested operation.
1110 *
1111 * @param t Duration in cycles.
1112 *
1113 * @return Timeout delay value.
1114 */
1115 #define K_CYC(t) Z_TIMEOUT_CYC(t)
1116
1117 /**
1118 * @brief Generate timeout delay from system ticks.
1119 *
1120 * This macro generates a timeout delay that instructs a kernel API
1121 * to wait up to @a t ticks to perform the requested operation.
1122 *
1123 * @param t Duration in system ticks.
1124 *
1125 * @return Timeout delay value.
1126 */
1127 #define K_TICKS(t) Z_TIMEOUT_TICKS(t)
1128
1129 /**
1130 * @brief Generate timeout delay from milliseconds.
1131 *
1132 * This macro generates a timeout delay that instructs a kernel API
1133 * to wait up to @a ms milliseconds to perform the requested operation.
1134 *
1135 * @param ms Duration in milliseconds.
1136 *
1137 * @return Timeout delay value.
1138 */
1139 #define K_MSEC(ms) Z_TIMEOUT_MS(ms)
1140
1141 /**
1142 * @brief Generate timeout delay from seconds.
1143 *
1144 * This macro generates a timeout delay that instructs a kernel API
1145 * to wait up to @a s seconds to perform the requested operation.
1146 *
1147 * @param s Duration in seconds.
1148 *
1149 * @return Timeout delay value.
1150 */
1151 #define K_SECONDS(s) K_MSEC((s) * MSEC_PER_SEC)
1152
1153 /**
1154 * @brief Generate timeout delay from minutes.
1155
1156 * This macro generates a timeout delay that instructs a kernel API
1157 * to wait up to @a m minutes to perform the requested operation.
1158 *
1159 * @param m Duration in minutes.
1160 *
1161 * @return Timeout delay value.
1162 */
1163 #define K_MINUTES(m) K_SECONDS((m) * 60)
1164
1165 /**
1166 * @brief Generate timeout delay from hours.
1167 *
1168 * This macro generates a timeout delay that instructs a kernel API
1169 * to wait up to @a h hours to perform the requested operation.
1170 *
1171 * @param h Duration in hours.
1172 *
1173 * @return Timeout delay value.
1174 */
1175 #define K_HOURS(h) K_MINUTES((h) * 60)
1176
1177 /**
1178 * @brief Generate infinite timeout delay.
1179 *
1180 * This macro generates a timeout delay that instructs a kernel API
1181 * to wait as long as necessary to perform the requested operation.
1182 *
1183 * @return Timeout delay value.
1184 */
1185 #define K_FOREVER Z_FOREVER
1186
1187 #ifdef CONFIG_TIMEOUT_64BIT
1188
1189 /**
1190 * @brief Generates an absolute/uptime timeout value from system ticks
1191 *
1192 * This macro generates a timeout delay that represents an expiration
1193 * at the absolute uptime value specified, in system ticks. That is, the
1194 * timeout will expire immediately after the system uptime reaches the
1195 * specified tick count.
1196 *
1197 * @param t Tick uptime value
1198 * @return Timeout delay value
1199 */
1200 #define K_TIMEOUT_ABS_TICKS(t) \
1201 Z_TIMEOUT_TICKS(Z_TICK_ABS((k_ticks_t)MAX(t, 0)))
1202
1203 /**
1204 * @brief Generates an absolute/uptime timeout value from milliseconds
1205 *
1206 * This macro generates a timeout delay that represents an expiration
1207 * at the absolute uptime value specified, in milliseconds. That is,
1208 * the timeout will expire immediately after the system uptime reaches
1209 * the specified tick count.
1210 *
1211 * @param t Millisecond uptime value
1212 * @return Timeout delay value
1213 */
1214 #define K_TIMEOUT_ABS_MS(t) K_TIMEOUT_ABS_TICKS(k_ms_to_ticks_ceil64(t))
1215
1216 /**
1217 * @brief Generates an absolute/uptime timeout value from microseconds
1218 *
1219 * This macro generates a timeout delay that represents an expiration
1220 * at the absolute uptime value specified, in microseconds. That is,
1221 * the timeout will expire immediately after the system uptime reaches
1222 * the specified time. Note that timer precision is limited by the
1223 * system tick rate and not the requested timeout value.
1224 *
1225 * @param t Microsecond uptime value
1226 * @return Timeout delay value
1227 */
1228 #define K_TIMEOUT_ABS_US(t) K_TIMEOUT_ABS_TICKS(k_us_to_ticks_ceil64(t))
1229
1230 /**
1231 * @brief Generates an absolute/uptime timeout value from nanoseconds
1232 *
1233 * This macro generates a timeout delay that represents an expiration
1234 * at the absolute uptime value specified, in nanoseconds. That is,
1235 * the timeout will expire immediately after the system uptime reaches
1236 * the specified time. Note that timer precision is limited by the
1237 * system tick rate and not the requested timeout value.
1238 *
1239 * @param t Nanosecond uptime value
1240 * @return Timeout delay value
1241 */
1242 #define K_TIMEOUT_ABS_NS(t) K_TIMEOUT_ABS_TICKS(k_ns_to_ticks_ceil64(t))
1243
1244 /**
1245 * @brief Generates an absolute/uptime timeout value from system cycles
1246 *
1247 * This macro generates a timeout delay that represents an expiration
1248 * at the absolute uptime value specified, in cycles. That is, the
1249 * timeout will expire immediately after the system uptime reaches the
1250 * specified time. Note that timer precision is limited by the system
1251 * tick rate and not the requested timeout value.
1252 *
1253 * @param t Cycle uptime value
1254 * @return Timeout delay value
1255 */
1256 #define K_TIMEOUT_ABS_CYC(t) K_TIMEOUT_ABS_TICKS(k_cyc_to_ticks_ceil64(t))
1257
1258 #endif
1259
1260 /**
1261 * @}
1262 */
1263
1264 /**
1265 * @cond INTERNAL_HIDDEN
1266 */
1267
1268 struct k_timer {
1269 /*
1270 * _timeout structure must be first here if we want to use
1271 * dynamic timer allocation. timeout.node is used in the double-linked
1272 * list of free timers
1273 */
1274 struct _timeout timeout;
1275
1276 /* wait queue for the (single) thread waiting on this timer */
1277 _wait_q_t wait_q;
1278
1279 /* runs in ISR context */
1280 void (*expiry_fn)(struct k_timer *timer);
1281
1282 /* runs in the context of the thread that calls k_timer_stop() */
1283 void (*stop_fn)(struct k_timer *timer);
1284
1285 /* timer period */
1286 k_timeout_t period;
1287
1288 /* timer status */
1289 uint32_t status;
1290
1291 /* user-specific data, also used to support legacy features */
1292 void *user_data;
1293
1294 };
1295
1296 #define Z_TIMER_INITIALIZER(obj, expiry, stop) \
1297 { \
1298 .timeout = { \
1299 .node = {},\
1300 .fn = z_timer_expiration_handler, \
1301 .dticks = 0, \
1302 }, \
1303 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
1304 .expiry_fn = expiry, \
1305 .stop_fn = stop, \
1306 .status = 0, \
1307 .user_data = 0, \
1308 }
1309
1310 /**
1311 * INTERNAL_HIDDEN @endcond
1312 */
1313
1314 /**
1315 * @defgroup timer_apis Timer APIs
1316 * @ingroup kernel_apis
1317 * @{
1318 */
1319
1320 /**
1321 * @typedef k_timer_expiry_t
1322 * @brief Timer expiry function type.
1323 *
1324 * A timer's expiry function is executed by the system clock interrupt handler
1325 * each time the timer expires. The expiry function is optional, and is only
1326 * invoked if the timer has been initialized with one.
1327 *
1328 * @param timer Address of timer.
1329 *
1330 * @return N/A
1331 */
1332 typedef void (*k_timer_expiry_t)(struct k_timer *timer);
1333
1334 /**
1335 * @typedef k_timer_stop_t
1336 * @brief Timer stop function type.
1337 *
1338 * A timer's stop function is executed if the timer is stopped prematurely.
1339 * The function runs in the context of call that stops the timer. As
1340 * k_timer_stop() can be invoked from an ISR, the stop function must be
1341 * callable from interrupt context (isr-ok).
1342 *
1343 * The stop function is optional, and is only invoked if the timer has been
1344 * initialized with one.
1345 *
1346 * @param timer Address of timer.
1347 *
1348 * @return N/A
1349 */
1350 typedef void (*k_timer_stop_t)(struct k_timer *timer);
1351
1352 /**
1353 * @brief Statically define and initialize a timer.
1354 *
1355 * The timer can be accessed outside the module where it is defined using:
1356 *
1357 * @code extern struct k_timer <name>; @endcode
1358 *
1359 * @param name Name of the timer variable.
1360 * @param expiry_fn Function to invoke each time the timer expires.
1361 * @param stop_fn Function to invoke if the timer is stopped while running.
1362 */
1363 #define K_TIMER_DEFINE(name, expiry_fn, stop_fn) \
1364 STRUCT_SECTION_ITERABLE(k_timer, name) = \
1365 Z_TIMER_INITIALIZER(name, expiry_fn, stop_fn)
1366
1367 /**
1368 * @brief Initialize a timer.
1369 *
1370 * This routine initializes a timer, prior to its first use.
1371 *
1372 * @param timer Address of timer.
1373 * @param expiry_fn Function to invoke each time the timer expires.
1374 * @param stop_fn Function to invoke if the timer is stopped while running.
1375 *
1376 * @return N/A
1377 */
1378 extern void k_timer_init(struct k_timer *timer,
1379 k_timer_expiry_t expiry_fn,
1380 k_timer_stop_t stop_fn);
1381
1382 /**
1383 * @brief Start a timer.
1384 *
1385 * This routine starts a timer, and resets its status to zero. The timer
1386 * begins counting down using the specified duration and period values.
1387 *
1388 * Attempting to start a timer that is already running is permitted.
1389 * The timer's status is reset to zero and the timer begins counting down
1390 * using the new duration and period values.
1391 *
1392 * @param timer Address of timer.
1393 * @param duration Initial timer duration.
1394 * @param period Timer period.
1395 *
1396 * @return N/A
1397 */
1398 __syscall void k_timer_start(struct k_timer *timer,
1399 k_timeout_t duration, k_timeout_t period);
1400
1401 /**
1402 * @brief Stop a timer.
1403 *
1404 * This routine stops a running timer prematurely. The timer's stop function,
1405 * if one exists, is invoked by the caller.
1406 *
1407 * Attempting to stop a timer that is not running is permitted, but has no
1408 * effect on the timer.
1409 *
1410 * @note The stop handler has to be callable from ISRs if @a k_timer_stop is to
1411 * be called from ISRs.
1412 *
1413 * @funcprops \isr_ok
1414 *
1415 * @param timer Address of timer.
1416 *
1417 * @return N/A
1418 */
1419 __syscall void k_timer_stop(struct k_timer *timer);
1420
1421 /**
1422 * @brief Read timer status.
1423 *
1424 * This routine reads the timer's status, which indicates the number of times
1425 * it has expired since its status was last read.
1426 *
1427 * Calling this routine resets the timer's status to zero.
1428 *
1429 * @param timer Address of timer.
1430 *
1431 * @return Timer status.
1432 */
1433 __syscall uint32_t k_timer_status_get(struct k_timer *timer);
1434
1435 /**
1436 * @brief Synchronize thread to timer expiration.
1437 *
1438 * This routine blocks the calling thread until the timer's status is non-zero
1439 * (indicating that it has expired at least once since it was last examined)
1440 * or the timer is stopped. If the timer status is already non-zero,
1441 * or the timer is already stopped, the caller continues without waiting.
1442 *
1443 * Calling this routine resets the timer's status to zero.
1444 *
1445 * This routine must not be used by interrupt handlers, since they are not
1446 * allowed to block.
1447 *
1448 * @param timer Address of timer.
1449 *
1450 * @return Timer status.
1451 */
1452 __syscall uint32_t k_timer_status_sync(struct k_timer *timer);
1453
1454 #ifdef CONFIG_SYS_CLOCK_EXISTS
1455
1456 /**
1457 * @brief Get next expiration time of a timer, in system ticks
1458 *
1459 * This routine returns the future system uptime reached at the next
1460 * time of expiration of the timer, in units of system ticks. If the
1461 * timer is not running, current system time is returned.
1462 *
1463 * @param timer The timer object
1464 * @return Uptime of expiration, in ticks
1465 */
1466 __syscall k_ticks_t k_timer_expires_ticks(const struct k_timer *timer);
1467
z_impl_k_timer_expires_ticks(const struct k_timer * timer)1468 static inline k_ticks_t z_impl_k_timer_expires_ticks(
1469 const struct k_timer *timer)
1470 {
1471 return z_timeout_expires(&timer->timeout);
1472 }
1473
1474 /**
1475 * @brief Get time remaining before a timer next expires, in system ticks
1476 *
1477 * This routine computes the time remaining before a running timer
1478 * next expires, in units of system ticks. If the timer is not
1479 * running, it returns zero.
1480 */
1481 __syscall k_ticks_t k_timer_remaining_ticks(const struct k_timer *timer);
1482
z_impl_k_timer_remaining_ticks(const struct k_timer * timer)1483 static inline k_ticks_t z_impl_k_timer_remaining_ticks(
1484 const struct k_timer *timer)
1485 {
1486 return z_timeout_remaining(&timer->timeout);
1487 }
1488
1489 /**
1490 * @brief Get time remaining before a timer next expires.
1491 *
1492 * This routine computes the (approximate) time remaining before a running
1493 * timer next expires. If the timer is not running, it returns zero.
1494 *
1495 * @param timer Address of timer.
1496 *
1497 * @return Remaining time (in milliseconds).
1498 */
k_timer_remaining_get(struct k_timer * timer)1499 static inline uint32_t k_timer_remaining_get(struct k_timer *timer)
1500 {
1501 return k_ticks_to_ms_floor32(k_timer_remaining_ticks(timer));
1502 }
1503
1504 #endif /* CONFIG_SYS_CLOCK_EXISTS */
1505
1506 /**
1507 * @brief Associate user-specific data with a timer.
1508 *
1509 * This routine records the @a user_data with the @a timer, to be retrieved
1510 * later.
1511 *
1512 * It can be used e.g. in a timer handler shared across multiple subsystems to
1513 * retrieve data specific to the subsystem this timer is associated with.
1514 *
1515 * @param timer Address of timer.
1516 * @param user_data User data to associate with the timer.
1517 *
1518 * @return N/A
1519 */
1520 __syscall void k_timer_user_data_set(struct k_timer *timer, void *user_data);
1521
1522 /**
1523 * @internal
1524 */
z_impl_k_timer_user_data_set(struct k_timer * timer,void * user_data)1525 static inline void z_impl_k_timer_user_data_set(struct k_timer *timer,
1526 void *user_data)
1527 {
1528 timer->user_data = user_data;
1529 }
1530
1531 /**
1532 * @brief Retrieve the user-specific data from a timer.
1533 *
1534 * @param timer Address of timer.
1535 *
1536 * @return The user data.
1537 */
1538 __syscall void *k_timer_user_data_get(const struct k_timer *timer);
1539
z_impl_k_timer_user_data_get(const struct k_timer * timer)1540 static inline void *z_impl_k_timer_user_data_get(const struct k_timer *timer)
1541 {
1542 return timer->user_data;
1543 }
1544
1545 /** @} */
1546
1547 /**
1548 * @addtogroup clock_apis
1549 * @ingroup kernel_apis
1550 * @{
1551 */
1552
1553 /**
1554 * @brief Get system uptime, in system ticks.
1555 *
1556 * This routine returns the elapsed time since the system booted, in
1557 * ticks (c.f. @kconfig{CONFIG_SYS_CLOCK_TICKS_PER_SEC}), which is the
1558 * fundamental unit of resolution of kernel timekeeping.
1559 *
1560 * @return Current uptime in ticks.
1561 */
1562 __syscall int64_t k_uptime_ticks(void);
1563
1564 /**
1565 * @brief Get system uptime.
1566 *
1567 * This routine returns the elapsed time since the system booted,
1568 * in milliseconds.
1569 *
1570 * @note
1571 * While this function returns time in milliseconds, it does
1572 * not mean it has millisecond resolution. The actual resolution depends on
1573 * @kconfig{CONFIG_SYS_CLOCK_TICKS_PER_SEC} config option.
1574 *
1575 * @return Current uptime in milliseconds.
1576 */
k_uptime_get(void)1577 static inline int64_t k_uptime_get(void)
1578 {
1579 return k_ticks_to_ms_floor64(k_uptime_ticks());
1580 }
1581
1582 /**
1583 * @brief Get system uptime (32-bit version).
1584 *
1585 * This routine returns the lower 32 bits of the system uptime in
1586 * milliseconds.
1587 *
1588 * Because correct conversion requires full precision of the system
1589 * clock there is no benefit to using this over k_uptime_get() unless
1590 * you know the application will never run long enough for the system
1591 * clock to approach 2^32 ticks. Calls to this function may involve
1592 * interrupt blocking and 64-bit math.
1593 *
1594 * @note
1595 * While this function returns time in milliseconds, it does
1596 * not mean it has millisecond resolution. The actual resolution depends on
1597 * @kconfig{CONFIG_SYS_CLOCK_TICKS_PER_SEC} config option
1598 *
1599 * @return The low 32 bits of the current uptime, in milliseconds.
1600 */
k_uptime_get_32(void)1601 static inline uint32_t k_uptime_get_32(void)
1602 {
1603 return (uint32_t)k_uptime_get();
1604 }
1605
1606 /**
1607 * @brief Get elapsed time.
1608 *
1609 * This routine computes the elapsed time between the current system uptime
1610 * and an earlier reference time, in milliseconds.
1611 *
1612 * @param reftime Pointer to a reference time, which is updated to the current
1613 * uptime upon return.
1614 *
1615 * @return Elapsed time.
1616 */
k_uptime_delta(int64_t * reftime)1617 static inline int64_t k_uptime_delta(int64_t *reftime)
1618 {
1619 int64_t uptime, delta;
1620
1621 uptime = k_uptime_get();
1622 delta = uptime - *reftime;
1623 *reftime = uptime;
1624
1625 return delta;
1626 }
1627
1628 /**
1629 * @brief Read the hardware clock.
1630 *
1631 * This routine returns the current time, as measured by the system's hardware
1632 * clock.
1633 *
1634 * @return Current hardware clock up-counter (in cycles).
1635 */
k_cycle_get_32(void)1636 static inline uint32_t k_cycle_get_32(void)
1637 {
1638 return arch_k_cycle_get_32();
1639 }
1640
1641 /**
1642 * @}
1643 */
1644
1645 /**
1646 * @cond INTERNAL_HIDDEN
1647 */
1648
1649 struct k_queue {
1650 sys_sflist_t data_q;
1651 struct k_spinlock lock;
1652 _wait_q_t wait_q;
1653
1654 _POLL_EVENT;
1655 };
1656
1657 #define Z_QUEUE_INITIALIZER(obj) \
1658 { \
1659 .data_q = SYS_SFLIST_STATIC_INIT(&obj.data_q), \
1660 .lock = { }, \
1661 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
1662 _POLL_EVENT_OBJ_INIT(obj) \
1663 }
1664
1665 extern void *z_queue_node_peek(sys_sfnode_t *node, bool needs_free);
1666
1667 /**
1668 * INTERNAL_HIDDEN @endcond
1669 */
1670
1671 /**
1672 * @defgroup queue_apis Queue APIs
1673 * @ingroup kernel_apis
1674 * @{
1675 */
1676
1677 /**
1678 * @brief Initialize a queue.
1679 *
1680 * This routine initializes a queue object, prior to its first use.
1681 *
1682 * @param queue Address of the queue.
1683 *
1684 * @return N/A
1685 */
1686 __syscall void k_queue_init(struct k_queue *queue);
1687
1688 /**
1689 * @brief Cancel waiting on a queue.
1690 *
1691 * This routine causes first thread pending on @a queue, if any, to
1692 * return from k_queue_get() call with NULL value (as if timeout expired).
1693 * If the queue is being waited on by k_poll(), it will return with
1694 * -EINTR and K_POLL_STATE_CANCELLED state (and per above, subsequent
1695 * k_queue_get() will return NULL).
1696 *
1697 * @funcprops \isr_ok
1698 *
1699 * @param queue Address of the queue.
1700 *
1701 * @return N/A
1702 */
1703 __syscall void k_queue_cancel_wait(struct k_queue *queue);
1704
1705 /**
1706 * @brief Append an element to the end of a queue.
1707 *
1708 * This routine appends a data item to @a queue. A queue data item must be
1709 * aligned on a word boundary, and the first word of the item is reserved
1710 * for the kernel's use.
1711 *
1712 * @funcprops \isr_ok
1713 *
1714 * @param queue Address of the queue.
1715 * @param data Address of the data item.
1716 *
1717 * @return N/A
1718 */
1719 extern void k_queue_append(struct k_queue *queue, void *data);
1720
1721 /**
1722 * @brief Append an element to a queue.
1723 *
1724 * This routine appends a data item to @a queue. There is an implicit memory
1725 * allocation to create an additional temporary bookkeeping data structure from
1726 * the calling thread's resource pool, which is automatically freed when the
1727 * item is removed. The data itself is not copied.
1728 *
1729 * @funcprops \isr_ok
1730 *
1731 * @param queue Address of the queue.
1732 * @param data Address of the data item.
1733 *
1734 * @retval 0 on success
1735 * @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
1736 */
1737 __syscall int32_t k_queue_alloc_append(struct k_queue *queue, void *data);
1738
1739 /**
1740 * @brief Prepend an element to a queue.
1741 *
1742 * This routine prepends a data item to @a queue. A queue data item must be
1743 * aligned on a word boundary, and the first word of the item is reserved
1744 * for the kernel's use.
1745 *
1746 * @funcprops \isr_ok
1747 *
1748 * @param queue Address of the queue.
1749 * @param data Address of the data item.
1750 *
1751 * @return N/A
1752 */
1753 extern void k_queue_prepend(struct k_queue *queue, void *data);
1754
1755 /**
1756 * @brief Prepend an element to a queue.
1757 *
1758 * This routine prepends a data item to @a queue. There is an implicit memory
1759 * allocation to create an additional temporary bookkeeping data structure from
1760 * the calling thread's resource pool, which is automatically freed when the
1761 * item is removed. The data itself is not copied.
1762 *
1763 * @funcprops \isr_ok
1764 *
1765 * @param queue Address of the queue.
1766 * @param data Address of the data item.
1767 *
1768 * @retval 0 on success
1769 * @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
1770 */
1771 __syscall int32_t k_queue_alloc_prepend(struct k_queue *queue, void *data);
1772
1773 /**
1774 * @brief Inserts an element to a queue.
1775 *
1776 * This routine inserts a data item to @a queue after previous item. A queue
1777 * data item must be aligned on a word boundary, and the first word of
1778 * the item is reserved for the kernel's use.
1779 *
1780 * @funcprops \isr_ok
1781 *
1782 * @param queue Address of the queue.
1783 * @param prev Address of the previous data item.
1784 * @param data Address of the data item.
1785 *
1786 * @return N/A
1787 */
1788 extern void k_queue_insert(struct k_queue *queue, void *prev, void *data);
1789
1790 /**
1791 * @brief Atomically append a list of elements to a queue.
1792 *
1793 * This routine adds a list of data items to @a queue in one operation.
1794 * The data items must be in a singly-linked list, with the first word
1795 * in each data item pointing to the next data item; the list must be
1796 * NULL-terminated.
1797 *
1798 * @funcprops \isr_ok
1799 *
1800 * @param queue Address of the queue.
1801 * @param head Pointer to first node in singly-linked list.
1802 * @param tail Pointer to last node in singly-linked list.
1803 *
1804 * @retval 0 on success
1805 * @retval -EINVAL on invalid supplied data
1806 *
1807 */
1808 extern int k_queue_append_list(struct k_queue *queue, void *head, void *tail);
1809
1810 /**
1811 * @brief Atomically add a list of elements to a queue.
1812 *
1813 * This routine adds a list of data items to @a queue in one operation.
1814 * The data items must be in a singly-linked list implemented using a
1815 * sys_slist_t object. Upon completion, the original list is empty.
1816 *
1817 * @funcprops \isr_ok
1818 *
1819 * @param queue Address of the queue.
1820 * @param list Pointer to sys_slist_t object.
1821 *
1822 * @retval 0 on success
1823 * @retval -EINVAL on invalid data
1824 */
1825 extern int k_queue_merge_slist(struct k_queue *queue, sys_slist_t *list);
1826
1827 /**
1828 * @brief Get an element from a queue.
1829 *
1830 * This routine removes first data item from @a queue. The first word of the
1831 * data item is reserved for the kernel's use.
1832 *
1833 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
1834 *
1835 * @funcprops \isr_ok
1836 *
1837 * @param queue Address of the queue.
1838 * @param timeout Non-negative waiting period to obtain a data item
1839 * or one of the special values K_NO_WAIT and
1840 * K_FOREVER.
1841 *
1842 * @return Address of the data item if successful; NULL if returned
1843 * without waiting, or waiting period timed out.
1844 */
1845 __syscall void *k_queue_get(struct k_queue *queue, k_timeout_t timeout);
1846
1847 /**
1848 * @brief Remove an element from a queue.
1849 *
1850 * This routine removes data item from @a queue. The first word of the
1851 * data item is reserved for the kernel's use. Removing elements from k_queue
1852 * rely on sys_slist_find_and_remove which is not a constant time operation.
1853 *
1854 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
1855 *
1856 * @funcprops \isr_ok
1857 *
1858 * @param queue Address of the queue.
1859 * @param data Address of the data item.
1860 *
1861 * @return true if data item was removed
1862 */
1863 bool k_queue_remove(struct k_queue *queue, void *data);
1864
1865 /**
1866 * @brief Append an element to a queue only if it's not present already.
1867 *
1868 * This routine appends data item to @a queue. The first word of the data
1869 * item is reserved for the kernel's use. Appending elements to k_queue
1870 * relies on sys_slist_is_node_in_list which is not a constant time operation.
1871 *
1872 * @funcprops \isr_ok
1873 *
1874 * @param queue Address of the queue.
1875 * @param data Address of the data item.
1876 *
1877 * @return true if data item was added, false if not
1878 */
1879 bool k_queue_unique_append(struct k_queue *queue, void *data);
1880
1881 /**
1882 * @brief Query a queue to see if it has data available.
1883 *
1884 * Note that the data might be already gone by the time this function returns
1885 * if other threads are also trying to read from the queue.
1886 *
1887 * @funcprops \isr_ok
1888 *
1889 * @param queue Address of the queue.
1890 *
1891 * @return Non-zero if the queue is empty.
1892 * @return 0 if data is available.
1893 */
1894 __syscall int k_queue_is_empty(struct k_queue *queue);
1895
z_impl_k_queue_is_empty(struct k_queue * queue)1896 static inline int z_impl_k_queue_is_empty(struct k_queue *queue)
1897 {
1898 return (int)sys_sflist_is_empty(&queue->data_q);
1899 }
1900
1901 /**
1902 * @brief Peek element at the head of queue.
1903 *
1904 * Return element from the head of queue without removing it.
1905 *
1906 * @param queue Address of the queue.
1907 *
1908 * @return Head element, or NULL if queue is empty.
1909 */
1910 __syscall void *k_queue_peek_head(struct k_queue *queue);
1911
1912 /**
1913 * @brief Peek element at the tail of queue.
1914 *
1915 * Return element from the tail of queue without removing it.
1916 *
1917 * @param queue Address of the queue.
1918 *
1919 * @return Tail element, or NULL if queue is empty.
1920 */
1921 __syscall void *k_queue_peek_tail(struct k_queue *queue);
1922
1923 /**
1924 * @brief Statically define and initialize a queue.
1925 *
1926 * The queue can be accessed outside the module where it is defined using:
1927 *
1928 * @code extern struct k_queue <name>; @endcode
1929 *
1930 * @param name Name of the queue.
1931 */
1932 #define K_QUEUE_DEFINE(name) \
1933 STRUCT_SECTION_ITERABLE(k_queue, name) = \
1934 Z_QUEUE_INITIALIZER(name)
1935
1936 /** @} */
1937
1938 #ifdef CONFIG_USERSPACE
1939 /**
1940 * @brief futex structure
1941 *
1942 * A k_futex is a lightweight mutual exclusion primitive designed
1943 * to minimize kernel involvement. Uncontended operation relies
1944 * only on atomic access to shared memory. k_futex are tracked as
1945 * kernel objects and can live in user memory so that any access
1946 * bypasses the kernel object permission management mechanism.
1947 */
1948 struct k_futex {
1949 atomic_t val;
1950 };
1951
1952 /**
1953 * @brief futex kernel data structure
1954 *
1955 * z_futex_data are the helper data structure for k_futex to complete
1956 * futex contended operation on kernel side, structure z_futex_data
1957 * of every futex object is invisible in user mode.
1958 */
1959 struct z_futex_data {
1960 _wait_q_t wait_q;
1961 struct k_spinlock lock;
1962 };
1963
1964 #define Z_FUTEX_DATA_INITIALIZER(obj) \
1965 { \
1966 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q) \
1967 }
1968
1969 /**
1970 * @defgroup futex_apis FUTEX APIs
1971 * @ingroup kernel_apis
1972 * @{
1973 */
1974
1975 /**
1976 * @brief Pend the current thread on a futex
1977 *
1978 * Tests that the supplied futex contains the expected value, and if so,
1979 * goes to sleep until some other thread calls k_futex_wake() on it.
1980 *
1981 * @param futex Address of the futex.
1982 * @param expected Expected value of the futex, if it is different the caller
1983 * will not wait on it.
1984 * @param timeout Non-negative waiting period on the futex, or
1985 * one of the special values K_NO_WAIT or K_FOREVER.
1986 * @retval -EACCES Caller does not have read access to futex address.
1987 * @retval -EAGAIN If the futex value did not match the expected parameter.
1988 * @retval -EINVAL Futex parameter address not recognized by the kernel.
1989 * @retval -ETIMEDOUT Thread woke up due to timeout and not a futex wakeup.
1990 * @retval 0 if the caller went to sleep and was woken up. The caller
1991 * should check the futex's value on wakeup to determine if it needs
1992 * to block again.
1993 */
1994 __syscall int k_futex_wait(struct k_futex *futex, int expected,
1995 k_timeout_t timeout);
1996
1997 /**
1998 * @brief Wake one/all threads pending on a futex
1999 *
2000 * Wake up the highest priority thread pending on the supplied futex, or
2001 * wakeup all the threads pending on the supplied futex, and the behavior
2002 * depends on wake_all.
2003 *
2004 * @param futex Futex to wake up pending threads.
2005 * @param wake_all If true, wake up all pending threads; If false,
2006 * wakeup the highest priority thread.
2007 * @retval -EACCES Caller does not have access to the futex address.
2008 * @retval -EINVAL Futex parameter address not recognized by the kernel.
2009 * @retval Number of threads that were woken up.
2010 */
2011 __syscall int k_futex_wake(struct k_futex *futex, bool wake_all);
2012
2013 /** @} */
2014 #endif
2015
2016 struct k_fifo {
2017 struct k_queue _queue;
2018 };
2019
2020 /**
2021 * @cond INTERNAL_HIDDEN
2022 */
2023 #define Z_FIFO_INITIALIZER(obj) \
2024 { \
2025 ._queue = Z_QUEUE_INITIALIZER(obj._queue) \
2026 }
2027
2028 /**
2029 * INTERNAL_HIDDEN @endcond
2030 */
2031
2032 /**
2033 * @defgroup fifo_apis FIFO APIs
2034 * @ingroup kernel_apis
2035 * @{
2036 */
2037
2038 /**
2039 * @brief Initialize a FIFO queue.
2040 *
2041 * This routine initializes a FIFO queue, prior to its first use.
2042 *
2043 * @param fifo Address of the FIFO queue.
2044 *
2045 * @return N/A
2046 */
2047 #define k_fifo_init(fifo) \
2048 ({ \
2049 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, init, fifo); \
2050 k_queue_init(&(fifo)->_queue); \
2051 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, init, fifo); \
2052 })
2053
2054 /**
2055 * @brief Cancel waiting on a FIFO queue.
2056 *
2057 * This routine causes first thread pending on @a fifo, if any, to
2058 * return from k_fifo_get() call with NULL value (as if timeout
2059 * expired).
2060 *
2061 * @funcprops \isr_ok
2062 *
2063 * @param fifo Address of the FIFO queue.
2064 *
2065 * @return N/A
2066 */
2067 #define k_fifo_cancel_wait(fifo) \
2068 ({ \
2069 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, cancel_wait, fifo); \
2070 k_queue_cancel_wait(&(fifo)->_queue); \
2071 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, cancel_wait, fifo); \
2072 })
2073
2074 /**
2075 * @brief Add an element to a FIFO queue.
2076 *
2077 * This routine adds a data item to @a fifo. A FIFO data item must be
2078 * aligned on a word boundary, and the first word of the item is reserved
2079 * for the kernel's use.
2080 *
2081 * @funcprops \isr_ok
2082 *
2083 * @param fifo Address of the FIFO.
2084 * @param data Address of the data item.
2085 *
2086 * @return N/A
2087 */
2088 #define k_fifo_put(fifo, data) \
2089 ({ \
2090 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, put, fifo, data); \
2091 k_queue_append(&(fifo)->_queue, data); \
2092 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, put, fifo, data); \
2093 })
2094
2095 /**
2096 * @brief Add an element to a FIFO queue.
2097 *
2098 * This routine adds a data item to @a fifo. There is an implicit memory
2099 * allocation to create an additional temporary bookkeeping data structure from
2100 * the calling thread's resource pool, which is automatically freed when the
2101 * item is removed. The data itself is not copied.
2102 *
2103 * @funcprops \isr_ok
2104 *
2105 * @param fifo Address of the FIFO.
2106 * @param data Address of the data item.
2107 *
2108 * @retval 0 on success
2109 * @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
2110 */
2111 #define k_fifo_alloc_put(fifo, data) \
2112 ({ \
2113 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, alloc_put, fifo, data); \
2114 int ret = k_queue_alloc_append(&(fifo)->_queue, data); \
2115 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, alloc_put, fifo, data, ret); \
2116 ret; \
2117 })
2118
2119 /**
2120 * @brief Atomically add a list of elements to a FIFO.
2121 *
2122 * This routine adds a list of data items to @a fifo in one operation.
2123 * The data items must be in a singly-linked list, with the first word of
2124 * each data item pointing to the next data item; the list must be
2125 * NULL-terminated.
2126 *
2127 * @funcprops \isr_ok
2128 *
2129 * @param fifo Address of the FIFO queue.
2130 * @param head Pointer to first node in singly-linked list.
2131 * @param tail Pointer to last node in singly-linked list.
2132 *
2133 * @return N/A
2134 */
2135 #define k_fifo_put_list(fifo, head, tail) \
2136 ({ \
2137 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, put_list, fifo, head, tail); \
2138 k_queue_append_list(&(fifo)->_queue, head, tail); \
2139 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, put_list, fifo, head, tail); \
2140 })
2141
2142 /**
2143 * @brief Atomically add a list of elements to a FIFO queue.
2144 *
2145 * This routine adds a list of data items to @a fifo in one operation.
2146 * The data items must be in a singly-linked list implemented using a
2147 * sys_slist_t object. Upon completion, the sys_slist_t object is invalid
2148 * and must be re-initialized via sys_slist_init().
2149 *
2150 * @funcprops \isr_ok
2151 *
2152 * @param fifo Address of the FIFO queue.
2153 * @param list Pointer to sys_slist_t object.
2154 *
2155 * @return N/A
2156 */
2157 #define k_fifo_put_slist(fifo, list) \
2158 ({ \
2159 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, put_slist, fifo, list); \
2160 k_queue_merge_slist(&(fifo)->_queue, list); \
2161 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, put_slist, fifo, list); \
2162 })
2163
2164 /**
2165 * @brief Get an element from a FIFO queue.
2166 *
2167 * This routine removes a data item from @a fifo in a "first in, first out"
2168 * manner. The first word of the data item is reserved for the kernel's use.
2169 *
2170 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
2171 *
2172 * @funcprops \isr_ok
2173 *
2174 * @param fifo Address of the FIFO queue.
2175 * @param timeout Waiting period to obtain a data item,
2176 * or one of the special values K_NO_WAIT and K_FOREVER.
2177 *
2178 * @return Address of the data item if successful; NULL if returned
2179 * without waiting, or waiting period timed out.
2180 */
2181 #define k_fifo_get(fifo, timeout) \
2182 ({ \
2183 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, get, fifo, timeout); \
2184 void *ret = k_queue_get(&(fifo)->_queue, timeout); \
2185 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, get, fifo, timeout, ret); \
2186 ret; \
2187 })
2188
2189 /**
2190 * @brief Query a FIFO queue to see if it has data available.
2191 *
2192 * Note that the data might be already gone by the time this function returns
2193 * if other threads is also trying to read from the FIFO.
2194 *
2195 * @funcprops \isr_ok
2196 *
2197 * @param fifo Address of the FIFO queue.
2198 *
2199 * @return Non-zero if the FIFO queue is empty.
2200 * @return 0 if data is available.
2201 */
2202 #define k_fifo_is_empty(fifo) \
2203 k_queue_is_empty(&(fifo)->_queue)
2204
2205 /**
2206 * @brief Peek element at the head of a FIFO queue.
2207 *
2208 * Return element from the head of FIFO queue without removing it. A usecase
2209 * for this is if elements of the FIFO object are themselves containers. Then
2210 * on each iteration of processing, a head container will be peeked,
2211 * and some data processed out of it, and only if the container is empty,
2212 * it will be completely remove from the FIFO queue.
2213 *
2214 * @param fifo Address of the FIFO queue.
2215 *
2216 * @return Head element, or NULL if the FIFO queue is empty.
2217 */
2218 #define k_fifo_peek_head(fifo) \
2219 ({ \
2220 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, peek_head, fifo); \
2221 void *ret = k_queue_peek_head(&(fifo)->_queue); \
2222 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, peek_head, fifo, ret); \
2223 ret; \
2224 })
2225
2226 /**
2227 * @brief Peek element at the tail of FIFO queue.
2228 *
2229 * Return element from the tail of FIFO queue (without removing it). A usecase
2230 * for this is if elements of the FIFO queue are themselves containers. Then
2231 * it may be useful to add more data to the last container in a FIFO queue.
2232 *
2233 * @param fifo Address of the FIFO queue.
2234 *
2235 * @return Tail element, or NULL if a FIFO queue is empty.
2236 */
2237 #define k_fifo_peek_tail(fifo) \
2238 ({ \
2239 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, peek_tail, fifo); \
2240 void *ret = k_queue_peek_tail(&(fifo)->_queue); \
2241 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, peek_tail, fifo, ret); \
2242 ret; \
2243 })
2244
2245 /**
2246 * @brief Statically define and initialize a FIFO queue.
2247 *
2248 * The FIFO queue can be accessed outside the module where it is defined using:
2249 *
2250 * @code extern struct k_fifo <name>; @endcode
2251 *
2252 * @param name Name of the FIFO queue.
2253 */
2254 #define K_FIFO_DEFINE(name) \
2255 STRUCT_SECTION_ITERABLE_ALTERNATE(k_queue, k_fifo, name) = \
2256 Z_FIFO_INITIALIZER(name)
2257
2258 /** @} */
2259
2260 struct k_lifo {
2261 struct k_queue _queue;
2262 };
2263
2264 /**
2265 * @cond INTERNAL_HIDDEN
2266 */
2267
2268 #define Z_LIFO_INITIALIZER(obj) \
2269 { \
2270 ._queue = Z_QUEUE_INITIALIZER(obj._queue) \
2271 }
2272
2273 /**
2274 * INTERNAL_HIDDEN @endcond
2275 */
2276
2277 /**
2278 * @defgroup lifo_apis LIFO APIs
2279 * @ingroup kernel_apis
2280 * @{
2281 */
2282
2283 /**
2284 * @brief Initialize a LIFO queue.
2285 *
2286 * This routine initializes a LIFO queue object, prior to its first use.
2287 *
2288 * @param lifo Address of the LIFO queue.
2289 *
2290 * @return N/A
2291 */
2292 #define k_lifo_init(lifo) \
2293 ({ \
2294 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, init, lifo); \
2295 k_queue_init(&(lifo)->_queue); \
2296 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, init, lifo); \
2297 })
2298
2299 /**
2300 * @brief Add an element to a LIFO queue.
2301 *
2302 * This routine adds a data item to @a lifo. A LIFO queue data item must be
2303 * aligned on a word boundary, and the first word of the item is
2304 * reserved for the kernel's use.
2305 *
2306 * @funcprops \isr_ok
2307 *
2308 * @param lifo Address of the LIFO queue.
2309 * @param data Address of the data item.
2310 *
2311 * @return N/A
2312 */
2313 #define k_lifo_put(lifo, data) \
2314 ({ \
2315 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, put, lifo, data); \
2316 k_queue_prepend(&(lifo)->_queue, data); \
2317 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, put, lifo, data); \
2318 })
2319
2320 /**
2321 * @brief Add an element to a LIFO queue.
2322 *
2323 * This routine adds a data item to @a lifo. There is an implicit memory
2324 * allocation to create an additional temporary bookkeeping data structure from
2325 * the calling thread's resource pool, which is automatically freed when the
2326 * item is removed. The data itself is not copied.
2327 *
2328 * @funcprops \isr_ok
2329 *
2330 * @param lifo Address of the LIFO.
2331 * @param data Address of the data item.
2332 *
2333 * @retval 0 on success
2334 * @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
2335 */
2336 #define k_lifo_alloc_put(lifo, data) \
2337 ({ \
2338 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, alloc_put, lifo, data); \
2339 int ret = k_queue_alloc_prepend(&(lifo)->_queue, data); \
2340 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, alloc_put, lifo, data, ret); \
2341 ret; \
2342 })
2343
2344 /**
2345 * @brief Get an element from a LIFO queue.
2346 *
2347 * This routine removes a data item from @a LIFO in a "last in, first out"
2348 * manner. The first word of the data item is reserved for the kernel's use.
2349 *
2350 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
2351 *
2352 * @funcprops \isr_ok
2353 *
2354 * @param lifo Address of the LIFO queue.
2355 * @param timeout Waiting period to obtain a data item,
2356 * or one of the special values K_NO_WAIT and K_FOREVER.
2357 *
2358 * @return Address of the data item if successful; NULL if returned
2359 * without waiting, or waiting period timed out.
2360 */
2361 #define k_lifo_get(lifo, timeout) \
2362 ({ \
2363 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, get, lifo, timeout); \
2364 void *ret = k_queue_get(&(lifo)->_queue, timeout); \
2365 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, get, lifo, timeout, ret); \
2366 ret; \
2367 })
2368
2369 /**
2370 * @brief Statically define and initialize a LIFO queue.
2371 *
2372 * The LIFO queue can be accessed outside the module where it is defined using:
2373 *
2374 * @code extern struct k_lifo <name>; @endcode
2375 *
2376 * @param name Name of the fifo.
2377 */
2378 #define K_LIFO_DEFINE(name) \
2379 STRUCT_SECTION_ITERABLE_ALTERNATE(k_queue, k_lifo, name) = \
2380 Z_LIFO_INITIALIZER(name)
2381
2382 /** @} */
2383
2384 /**
2385 * @cond INTERNAL_HIDDEN
2386 */
2387 #define K_STACK_FLAG_ALLOC ((uint8_t)1) /* Buffer was allocated */
2388
2389 typedef uintptr_t stack_data_t;
2390
2391 struct k_stack {
2392 _wait_q_t wait_q;
2393 struct k_spinlock lock;
2394 stack_data_t *base, *next, *top;
2395
2396 uint8_t flags;
2397 };
2398
2399 #define Z_STACK_INITIALIZER(obj, stack_buffer, stack_num_entries) \
2400 { \
2401 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
2402 .base = stack_buffer, \
2403 .next = stack_buffer, \
2404 .top = stack_buffer + stack_num_entries, \
2405 }
2406
2407 /**
2408 * INTERNAL_HIDDEN @endcond
2409 */
2410
2411 /**
2412 * @defgroup stack_apis Stack APIs
2413 * @ingroup kernel_apis
2414 * @{
2415 */
2416
2417 /**
2418 * @brief Initialize a stack.
2419 *
2420 * This routine initializes a stack object, prior to its first use.
2421 *
2422 * @param stack Address of the stack.
2423 * @param buffer Address of array used to hold stacked values.
2424 * @param num_entries Maximum number of values that can be stacked.
2425 *
2426 * @return N/A
2427 */
2428 void k_stack_init(struct k_stack *stack,
2429 stack_data_t *buffer, uint32_t num_entries);
2430
2431
2432 /**
2433 * @brief Initialize a stack.
2434 *
2435 * This routine initializes a stack object, prior to its first use. Internal
2436 * buffers will be allocated from the calling thread's resource pool.
2437 * This memory will be released if k_stack_cleanup() is called, or
2438 * userspace is enabled and the stack object loses all references to it.
2439 *
2440 * @param stack Address of the stack.
2441 * @param num_entries Maximum number of values that can be stacked.
2442 *
2443 * @return -ENOMEM if memory couldn't be allocated
2444 */
2445
2446 __syscall int32_t k_stack_alloc_init(struct k_stack *stack,
2447 uint32_t num_entries);
2448
2449 /**
2450 * @brief Release a stack's allocated buffer
2451 *
2452 * If a stack object was given a dynamically allocated buffer via
2453 * k_stack_alloc_init(), this will free it. This function does nothing
2454 * if the buffer wasn't dynamically allocated.
2455 *
2456 * @param stack Address of the stack.
2457 * @retval 0 on success
2458 * @retval -EAGAIN when object is still in use
2459 */
2460 int k_stack_cleanup(struct k_stack *stack);
2461
2462 /**
2463 * @brief Push an element onto a stack.
2464 *
2465 * This routine adds a stack_data_t value @a data to @a stack.
2466 *
2467 * @funcprops \isr_ok
2468 *
2469 * @param stack Address of the stack.
2470 * @param data Value to push onto the stack.
2471 *
2472 * @retval 0 on success
2473 * @retval -ENOMEM if stack is full
2474 */
2475 __syscall int k_stack_push(struct k_stack *stack, stack_data_t data);
2476
2477 /**
2478 * @brief Pop an element from a stack.
2479 *
2480 * This routine removes a stack_data_t value from @a stack in a "last in,
2481 * first out" manner and stores the value in @a data.
2482 *
2483 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
2484 *
2485 * @funcprops \isr_ok
2486 *
2487 * @param stack Address of the stack.
2488 * @param data Address of area to hold the value popped from the stack.
2489 * @param timeout Waiting period to obtain a value,
2490 * or one of the special values K_NO_WAIT and
2491 * K_FOREVER.
2492 *
2493 * @retval 0 Element popped from stack.
2494 * @retval -EBUSY Returned without waiting.
2495 * @retval -EAGAIN Waiting period timed out.
2496 */
2497 __syscall int k_stack_pop(struct k_stack *stack, stack_data_t *data,
2498 k_timeout_t timeout);
2499
2500 /**
2501 * @brief Statically define and initialize a stack
2502 *
2503 * The stack can be accessed outside the module where it is defined using:
2504 *
2505 * @code extern struct k_stack <name>; @endcode
2506 *
2507 * @param name Name of the stack.
2508 * @param stack_num_entries Maximum number of values that can be stacked.
2509 */
2510 #define K_STACK_DEFINE(name, stack_num_entries) \
2511 stack_data_t __noinit \
2512 _k_stack_buf_##name[stack_num_entries]; \
2513 STRUCT_SECTION_ITERABLE(k_stack, name) = \
2514 Z_STACK_INITIALIZER(name, _k_stack_buf_##name, \
2515 stack_num_entries)
2516
2517 /** @} */
2518
2519 /**
2520 * @cond INTERNAL_HIDDEN
2521 */
2522
2523 struct k_work;
2524 struct k_work_q;
2525 struct k_work_queue_config;
2526 struct k_delayed_work;
2527 extern struct k_work_q k_sys_work_q;
2528
2529 /**
2530 * INTERNAL_HIDDEN @endcond
2531 */
2532
2533 /**
2534 * @defgroup mutex_apis Mutex APIs
2535 * @ingroup kernel_apis
2536 * @{
2537 */
2538
2539 /**
2540 * Mutex Structure
2541 * @ingroup mutex_apis
2542 */
2543 struct k_mutex {
2544 /** Mutex wait queue */
2545 _wait_q_t wait_q;
2546 /** Mutex owner */
2547 struct k_thread *owner;
2548
2549 /** Current lock count */
2550 uint32_t lock_count;
2551
2552 /** Original thread priority */
2553 int owner_orig_prio;
2554 };
2555
2556 /**
2557 * @cond INTERNAL_HIDDEN
2558 */
2559 #define Z_MUTEX_INITIALIZER(obj) \
2560 { \
2561 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
2562 .owner = NULL, \
2563 .lock_count = 0, \
2564 .owner_orig_prio = K_LOWEST_APPLICATION_THREAD_PRIO, \
2565 }
2566
2567 /**
2568 * INTERNAL_HIDDEN @endcond
2569 */
2570
2571 /**
2572 * @brief Statically define and initialize a mutex.
2573 *
2574 * The mutex can be accessed outside the module where it is defined using:
2575 *
2576 * @code extern struct k_mutex <name>; @endcode
2577 *
2578 * @param name Name of the mutex.
2579 */
2580 #define K_MUTEX_DEFINE(name) \
2581 STRUCT_SECTION_ITERABLE(k_mutex, name) = \
2582 Z_MUTEX_INITIALIZER(name)
2583
2584 /**
2585 * @brief Initialize a mutex.
2586 *
2587 * This routine initializes a mutex object, prior to its first use.
2588 *
2589 * Upon completion, the mutex is available and does not have an owner.
2590 *
2591 * @param mutex Address of the mutex.
2592 *
2593 * @retval 0 Mutex object created
2594 *
2595 */
2596 __syscall int k_mutex_init(struct k_mutex *mutex);
2597
2598
2599 /**
2600 * @brief Lock a mutex.
2601 *
2602 * This routine locks @a mutex. If the mutex is locked by another thread,
2603 * the calling thread waits until the mutex becomes available or until
2604 * a timeout occurs.
2605 *
2606 * A thread is permitted to lock a mutex it has already locked. The operation
2607 * completes immediately and the lock count is increased by 1.
2608 *
2609 * Mutexes may not be locked in ISRs.
2610 *
2611 * @param mutex Address of the mutex.
2612 * @param timeout Waiting period to lock the mutex,
2613 * or one of the special values K_NO_WAIT and
2614 * K_FOREVER.
2615 *
2616 * @retval 0 Mutex locked.
2617 * @retval -EBUSY Returned without waiting.
2618 * @retval -EAGAIN Waiting period timed out.
2619 */
2620 __syscall int k_mutex_lock(struct k_mutex *mutex, k_timeout_t timeout);
2621
2622 /**
2623 * @brief Unlock a mutex.
2624 *
2625 * This routine unlocks @a mutex. The mutex must already be locked by the
2626 * calling thread.
2627 *
2628 * The mutex cannot be claimed by another thread until it has been unlocked by
2629 * the calling thread as many times as it was previously locked by that
2630 * thread.
2631 *
2632 * Mutexes may not be unlocked in ISRs, as mutexes must only be manipulated
2633 * in thread context due to ownership and priority inheritance semantics.
2634 *
2635 * @param mutex Address of the mutex.
2636 *
2637 * @retval 0 Mutex unlocked.
2638 * @retval -EPERM The current thread does not own the mutex
2639 * @retval -EINVAL The mutex is not locked
2640 *
2641 */
2642 __syscall int k_mutex_unlock(struct k_mutex *mutex);
2643
2644 /**
2645 * @}
2646 */
2647
2648
2649 struct k_condvar {
2650 _wait_q_t wait_q;
2651 };
2652
2653 #define Z_CONDVAR_INITIALIZER(obj) \
2654 { \
2655 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
2656 }
2657
2658 /**
2659 * @defgroup condvar_apis Condition Variables APIs
2660 * @ingroup kernel_apis
2661 * @{
2662 */
2663
2664 /**
2665 * @brief Initialize a condition variable
2666 *
2667 * @param condvar pointer to a @p k_condvar structure
2668 * @retval 0 Condition variable created successfully
2669 */
2670 __syscall int k_condvar_init(struct k_condvar *condvar);
2671
2672 /**
2673 * @brief Signals one thread that is pending on the condition variable
2674 *
2675 * @param condvar pointer to a @p k_condvar structure
2676 * @retval 0 On success
2677 */
2678 __syscall int k_condvar_signal(struct k_condvar *condvar);
2679
2680 /**
2681 * @brief Unblock all threads that are pending on the condition
2682 * variable
2683 *
2684 * @param condvar pointer to a @p k_condvar structure
2685 * @return An integer with number of woken threads on success
2686 */
2687 __syscall int k_condvar_broadcast(struct k_condvar *condvar);
2688
2689 /**
2690 * @brief Waits on the condition variable releasing the mutex lock
2691 *
2692 * Automically releases the currently owned mutex, blocks the current thread
2693 * waiting on the condition variable specified by @a condvar,
2694 * and finally acquires the mutex again.
2695 *
2696 * The waiting thread unblocks only after another thread calls
2697 * k_condvar_signal, or k_condvar_broadcast with the same condition variable.
2698 *
2699 * @param condvar pointer to a @p k_condvar structure
2700 * @param mutex Address of the mutex.
2701 * @param timeout Waiting period for the condition variable
2702 * or one of the special values K_NO_WAIT and K_FOREVER.
2703 * @retval 0 On success
2704 * @retval -EAGAIN Waiting period timed out.
2705 */
2706 __syscall int k_condvar_wait(struct k_condvar *condvar, struct k_mutex *mutex,
2707 k_timeout_t timeout);
2708
2709 /**
2710 * @brief Statically define and initialize a condition variable.
2711 *
2712 * The condition variable can be accessed outside the module where it is
2713 * defined using:
2714 *
2715 * @code extern struct k_condvar <name>; @endcode
2716 *
2717 * @param name Name of the condition variable.
2718 */
2719 #define K_CONDVAR_DEFINE(name) \
2720 STRUCT_SECTION_ITERABLE(k_condvar, name) = \
2721 Z_CONDVAR_INITIALIZER(name)
2722 /**
2723 * @}
2724 */
2725
2726 /**
2727 * @cond INTERNAL_HIDDEN
2728 */
2729
2730 struct k_sem {
2731 _wait_q_t wait_q;
2732 unsigned int count;
2733 unsigned int limit;
2734
2735 _POLL_EVENT;
2736
2737 };
2738
2739 #define Z_SEM_INITIALIZER(obj, initial_count, count_limit) \
2740 { \
2741 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
2742 .count = initial_count, \
2743 .limit = count_limit, \
2744 _POLL_EVENT_OBJ_INIT(obj) \
2745 }
2746
2747 /**
2748 * INTERNAL_HIDDEN @endcond
2749 */
2750
2751 /**
2752 * @defgroup semaphore_apis Semaphore APIs
2753 * @ingroup kernel_apis
2754 * @{
2755 */
2756
2757 /**
2758 * @brief Maximum limit value allowed for a semaphore.
2759 *
2760 * This is intended for use when a semaphore does not have
2761 * an explicit maximum limit, and instead is just used for
2762 * counting purposes.
2763 *
2764 */
2765 #define K_SEM_MAX_LIMIT UINT_MAX
2766
2767 /**
2768 * @brief Initialize a semaphore.
2769 *
2770 * This routine initializes a semaphore object, prior to its first use.
2771 *
2772 * @param sem Address of the semaphore.
2773 * @param initial_count Initial semaphore count.
2774 * @param limit Maximum permitted semaphore count.
2775 *
2776 * @see K_SEM_MAX_LIMIT
2777 *
2778 * @retval 0 Semaphore created successfully
2779 * @retval -EINVAL Invalid values
2780 *
2781 */
2782 __syscall int k_sem_init(struct k_sem *sem, unsigned int initial_count,
2783 unsigned int limit);
2784
2785 /**
2786 * @brief Take a semaphore.
2787 *
2788 * This routine takes @a sem.
2789 *
2790 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
2791 *
2792 * @funcprops \isr_ok
2793 *
2794 * @param sem Address of the semaphore.
2795 * @param timeout Waiting period to take the semaphore,
2796 * or one of the special values K_NO_WAIT and K_FOREVER.
2797 *
2798 * @retval 0 Semaphore taken.
2799 * @retval -EBUSY Returned without waiting.
2800 * @retval -EAGAIN Waiting period timed out,
2801 * or the semaphore was reset during the waiting period.
2802 */
2803 __syscall int k_sem_take(struct k_sem *sem, k_timeout_t timeout);
2804
2805 /**
2806 * @brief Give a semaphore.
2807 *
2808 * This routine gives @a sem, unless the semaphore is already at its maximum
2809 * permitted count.
2810 *
2811 * @funcprops \isr_ok
2812 *
2813 * @param sem Address of the semaphore.
2814 *
2815 * @return N/A
2816 */
2817 __syscall void k_sem_give(struct k_sem *sem);
2818
2819 /**
2820 * @brief Resets a semaphore's count to zero.
2821 *
2822 * This routine sets the count of @a sem to zero.
2823 * Any outstanding semaphore takes will be aborted
2824 * with -EAGAIN.
2825 *
2826 * @param sem Address of the semaphore.
2827 *
2828 * @return N/A
2829 */
2830 __syscall void k_sem_reset(struct k_sem *sem);
2831
2832 /**
2833 * @brief Get a semaphore's count.
2834 *
2835 * This routine returns the current count of @a sem.
2836 *
2837 * @param sem Address of the semaphore.
2838 *
2839 * @return Current semaphore count.
2840 */
2841 __syscall unsigned int k_sem_count_get(struct k_sem *sem);
2842
2843 /**
2844 * @internal
2845 */
z_impl_k_sem_count_get(struct k_sem * sem)2846 static inline unsigned int z_impl_k_sem_count_get(struct k_sem *sem)
2847 {
2848 return sem->count;
2849 }
2850
2851 /**
2852 * @brief Statically define and initialize a semaphore.
2853 *
2854 * The semaphore can be accessed outside the module where it is defined using:
2855 *
2856 * @code extern struct k_sem <name>; @endcode
2857 *
2858 * @param name Name of the semaphore.
2859 * @param initial_count Initial semaphore count.
2860 * @param count_limit Maximum permitted semaphore count.
2861 */
2862 #define K_SEM_DEFINE(name, initial_count, count_limit) \
2863 STRUCT_SECTION_ITERABLE(k_sem, name) = \
2864 Z_SEM_INITIALIZER(name, initial_count, count_limit); \
2865 BUILD_ASSERT(((count_limit) != 0) && \
2866 ((initial_count) <= (count_limit)) && \
2867 ((count_limit) <= K_SEM_MAX_LIMIT));
2868
2869 /** @} */
2870
2871 /**
2872 * @cond INTERNAL_HIDDEN
2873 */
2874
2875 struct k_work_delayable;
2876 struct k_work_sync;
2877
2878 /**
2879 * INTERNAL_HIDDEN @endcond
2880 */
2881
2882 /**
2883 * @defgroup workqueue_apis Work Queue APIs
2884 * @ingroup kernel_apis
2885 * @{
2886 */
2887
2888 /** @brief The signature for a work item handler function.
2889 *
2890 * The function will be invoked by the thread animating a work queue.
2891 *
2892 * @param work the work item that provided the handler.
2893 */
2894 typedef void (*k_work_handler_t)(struct k_work *work);
2895
2896 /** @brief Initialize a (non-delayable) work structure.
2897 *
2898 * This must be invoked before submitting a work structure for the first time.
2899 * It need not be invoked again on the same work structure. It can be
2900 * re-invoked to change the associated handler, but this must be done when the
2901 * work item is idle.
2902 *
2903 * @funcprops \isr_ok
2904 *
2905 * @param work the work structure to be initialized.
2906 *
2907 * @param handler the handler to be invoked by the work item.
2908 */
2909 void k_work_init(struct k_work *work,
2910 k_work_handler_t handler);
2911
2912 /** @brief Busy state flags from the work item.
2913 *
2914 * A zero return value indicates the work item appears to be idle.
2915 *
2916 * @note This is a live snapshot of state, which may change before the result
2917 * is checked. Use locks where appropriate.
2918 *
2919 * @funcprops \isr_ok
2920 *
2921 * @param work pointer to the work item.
2922 *
2923 * @return a mask of flags K_WORK_DELAYED, K_WORK_QUEUED,
2924 * K_WORK_RUNNING, and K_WORK_CANCELING.
2925 */
2926 int k_work_busy_get(const struct k_work *work);
2927
2928 /** @brief Test whether a work item is currently pending.
2929 *
2930 * Wrapper to determine whether a work item is in a non-idle dstate.
2931 *
2932 * @note This is a live snapshot of state, which may change before the result
2933 * is checked. Use locks where appropriate.
2934 *
2935 * @funcprops \isr_ok
2936 *
2937 * @param work pointer to the work item.
2938 *
2939 * @return true if and only if k_work_busy_get() returns a non-zero value.
2940 */
2941 static inline bool k_work_is_pending(const struct k_work *work);
2942
2943 /** @brief Submit a work item to a queue.
2944 *
2945 * @param queue pointer to the work queue on which the item should run. If
2946 * NULL the queue from the most recent submission will be used.
2947 *
2948 * @funcprops \isr_ok
2949 *
2950 * @param work pointer to the work item.
2951 *
2952 * @retval 0 if work was already submitted to a queue
2953 * @retval 1 if work was not submitted and has been queued to @p queue
2954 * @retval 2 if work was running and has been queued to the queue that was
2955 * running it
2956 * @retval -EBUSY
2957 * * if work submission was rejected because the work item is cancelling; or
2958 * * @p queue is draining; or
2959 * * @p queue is plugged.
2960 * @retval -EINVAL if @p queue is null and the work item has never been run.
2961 * @retval -ENODEV if @p queue has not been started.
2962 */
2963 int k_work_submit_to_queue(struct k_work_q *queue,
2964 struct k_work *work);
2965
2966 /** @brief Submit a work item to the system queue.
2967 *
2968 * @funcprops \isr_ok
2969 *
2970 * @param work pointer to the work item.
2971 *
2972 * @return as with k_work_submit_to_queue().
2973 */
2974 extern int k_work_submit(struct k_work *work);
2975
2976 /** @brief Wait for last-submitted instance to complete.
2977 *
2978 * Resubmissions may occur while waiting, including chained submissions (from
2979 * within the handler).
2980 *
2981 * @note Be careful of caller and work queue thread relative priority. If
2982 * this function sleeps it will not return until the work queue thread
2983 * completes the tasks that allow this thread to resume.
2984 *
2985 * @note Behavior is undefined if this function is invoked on @p work from a
2986 * work queue running @p work.
2987 *
2988 * @param work pointer to the work item.
2989 *
2990 * @param sync pointer to an opaque item containing state related to the
2991 * pending cancellation. The object must persist until the call returns, and
2992 * be accessible from both the caller thread and the work queue thread. The
2993 * object must not be used for any other flush or cancel operation until this
2994 * one completes. On architectures with CONFIG_KERNEL_COHERENCE the object
2995 * must be allocated in coherent memory.
2996 *
2997 * @retval true if call had to wait for completion
2998 * @retval false if work was already idle
2999 */
3000 bool k_work_flush(struct k_work *work,
3001 struct k_work_sync *sync);
3002
3003 /** @brief Cancel a work item.
3004 *
3005 * This attempts to prevent a pending (non-delayable) work item from being
3006 * processed by removing it from the work queue. If the item is being
3007 * processed, the work item will continue to be processed, but resubmissions
3008 * are rejected until cancellation completes.
3009 *
3010 * If this returns zero cancellation is complete, otherwise something
3011 * (probably a work queue thread) is still referencing the item.
3012 *
3013 * See also k_work_cancel_sync().
3014 *
3015 * @funcprops \isr_ok
3016 *
3017 * @param work pointer to the work item.
3018 *
3019 * @return the k_work_busy_get() status indicating the state of the item after all
3020 * cancellation steps performed by this call are completed.
3021 */
3022 int k_work_cancel(struct k_work *work);
3023
3024 /** @brief Cancel a work item and wait for it to complete.
3025 *
3026 * Same as k_work_cancel() but does not return until cancellation is complete.
3027 * This can be invoked by a thread after k_work_cancel() to synchronize with a
3028 * previous cancellation.
3029 *
3030 * On return the work structure will be idle unless something submits it after
3031 * the cancellation was complete.
3032 *
3033 * @note Be careful of caller and work queue thread relative priority. If
3034 * this function sleeps it will not return until the work queue thread
3035 * completes the tasks that allow this thread to resume.
3036 *
3037 * @note Behavior is undefined if this function is invoked on @p work from a
3038 * work queue running @p work.
3039 *
3040 * @param work pointer to the work item.
3041 *
3042 * @param sync pointer to an opaque item containing state related to the
3043 * pending cancellation. The object must persist until the call returns, and
3044 * be accessible from both the caller thread and the work queue thread. The
3045 * object must not be used for any other flush or cancel operation until this
3046 * one completes. On architectures with CONFIG_KERNEL_COHERENCE the object
3047 * must be allocated in coherent memory.
3048 *
3049 * @retval true if work was pending (call had to wait for cancellation of a
3050 * running handler to complete, or scheduled or submitted operations were
3051 * cancelled);
3052 * @retval false otherwise
3053 */
3054 bool k_work_cancel_sync(struct k_work *work, struct k_work_sync *sync);
3055
3056 /** @brief Initialize a work queue structure.
3057 *
3058 * This must be invoked before starting a work queue structure for the first time.
3059 * It need not be invoked again on the same work queue structure.
3060 *
3061 * @funcprops \isr_ok
3062 *
3063 * @param queue the queue structure to be initialized.
3064 */
3065 void k_work_queue_init(struct k_work_q *queue);
3066
3067 /** @brief Initialize a work queue.
3068 *
3069 * This configures the work queue thread and starts it running. The function
3070 * should not be re-invoked on a queue.
3071 *
3072 * @param queue pointer to the queue structure. It must be initialized
3073 * in zeroed/bss memory or with @ref k_work_queue_init before
3074 * use.
3075 *
3076 * @param stack pointer to the work thread stack area.
3077 *
3078 * @param stack_size size of the the work thread stack area, in bytes.
3079 *
3080 * @param prio initial thread priority
3081 *
3082 * @param cfg optional additional configuration parameters. Pass @c
3083 * NULL if not required, to use the defaults documented in
3084 * k_work_queue_config.
3085 */
3086 void k_work_queue_start(struct k_work_q *queue,
3087 k_thread_stack_t *stack, size_t stack_size,
3088 int prio, const struct k_work_queue_config *cfg);
3089
3090 /** @brief Access the thread that animates a work queue.
3091 *
3092 * This is necessary to grant a work queue thread access to things the work
3093 * items it will process are expected to use.
3094 *
3095 * @param queue pointer to the queue structure.
3096 *
3097 * @return the thread associated with the work queue.
3098 */
3099 static inline k_tid_t k_work_queue_thread_get(struct k_work_q *queue);
3100
3101 /** @brief Wait until the work queue has drained, optionally plugging it.
3102 *
3103 * This blocks submission to the work queue except when coming from queue
3104 * thread, and blocks the caller until no more work items are available in the
3105 * queue.
3106 *
3107 * If @p plug is true then submission will continue to be blocked after the
3108 * drain operation completes until k_work_queue_unplug() is invoked.
3109 *
3110 * Note that work items that are delayed are not yet associated with their
3111 * work queue. They must be cancelled externally if a goal is to ensure the
3112 * work queue remains empty. The @p plug feature can be used to prevent
3113 * delayed items from being submitted after the drain completes.
3114 *
3115 * @param queue pointer to the queue structure.
3116 *
3117 * @param plug if true the work queue will continue to block new submissions
3118 * after all items have drained.
3119 *
3120 * @retval 1 if call had to wait for the drain to complete
3121 * @retval 0 if call did not have to wait
3122 * @retval negative if wait was interrupted or failed
3123 */
3124 int k_work_queue_drain(struct k_work_q *queue, bool plug);
3125
3126 /** @brief Release a work queue to accept new submissions.
3127 *
3128 * This releases the block on new submissions placed when k_work_queue_drain()
3129 * is invoked with the @p plug option enabled. If this is invoked before the
3130 * drain completes new items may be submitted as soon as the drain completes.
3131 *
3132 * @funcprops \isr_ok
3133 *
3134 * @param queue pointer to the queue structure.
3135 *
3136 * @retval 0 if successfully unplugged
3137 * @retval -EALREADY if the work queue was not plugged.
3138 */
3139 int k_work_queue_unplug(struct k_work_q *queue);
3140
3141 /** @brief Initialize a delayable work structure.
3142 *
3143 * This must be invoked before scheduling a delayable work structure for the
3144 * first time. It need not be invoked again on the same work structure. It
3145 * can be re-invoked to change the associated handler, but this must be done
3146 * when the work item is idle.
3147 *
3148 * @funcprops \isr_ok
3149 *
3150 * @param dwork the delayable work structure to be initialized.
3151 *
3152 * @param handler the handler to be invoked by the work item.
3153 */
3154 void k_work_init_delayable(struct k_work_delayable *dwork,
3155 k_work_handler_t handler);
3156
3157 /**
3158 * @brief Get the parent delayable work structure from a work pointer.
3159 *
3160 * This function is necessary when a @c k_work_handler_t function is passed to
3161 * k_work_schedule_for_queue() and the handler needs to access data from the
3162 * container of the containing `k_work_delayable`.
3163 *
3164 * @param work Address passed to the work handler
3165 *
3166 * @return Address of the containing @c k_work_delayable structure.
3167 */
3168 static inline struct k_work_delayable *
3169 k_work_delayable_from_work(struct k_work *work);
3170
3171 /** @brief Busy state flags from the delayable work item.
3172 *
3173 * @funcprops \isr_ok
3174 *
3175 * @note This is a live snapshot of state, which may change before the result
3176 * can be inspected. Use locks where appropriate.
3177 *
3178 * @param dwork pointer to the delayable work item.
3179 *
3180 * @return a mask of flags K_WORK_DELAYED, K_WORK_QUEUED, K_WORK_RUNNING, and
3181 * K_WORK_CANCELING. A zero return value indicates the work item appears to
3182 * be idle.
3183 */
3184 int k_work_delayable_busy_get(const struct k_work_delayable *dwork);
3185
3186 /** @brief Test whether a delayed work item is currently pending.
3187 *
3188 * Wrapper to determine whether a delayed work item is in a non-idle state.
3189 *
3190 * @note This is a live snapshot of state, which may change before the result
3191 * can be inspected. Use locks where appropriate.
3192 *
3193 * @funcprops \isr_ok
3194 *
3195 * @param dwork pointer to the delayable work item.
3196 *
3197 * @return true if and only if k_work_delayable_busy_get() returns a non-zero
3198 * value.
3199 */
3200 static inline bool k_work_delayable_is_pending(
3201 const struct k_work_delayable *dwork);
3202
3203 /** @brief Get the absolute tick count at which a scheduled delayable work
3204 * will be submitted.
3205 *
3206 * @note This is a live snapshot of state, which may change before the result
3207 * can be inspected. Use locks where appropriate.
3208 *
3209 * @funcprops \isr_ok
3210 *
3211 * @param dwork pointer to the delayable work item.
3212 *
3213 * @return the tick count when the timer that will schedule the work item will
3214 * expire, or the current tick count if the work is not scheduled.
3215 */
3216 static inline k_ticks_t k_work_delayable_expires_get(
3217 const struct k_work_delayable *dwork);
3218
3219 /** @brief Get the number of ticks until a scheduled delayable work will be
3220 * submitted.
3221 *
3222 * @note This is a live snapshot of state, which may change before the result
3223 * can be inspected. Use locks where appropriate.
3224 *
3225 * @funcprops \isr_ok
3226 *
3227 * @param dwork pointer to the delayable work item.
3228 *
3229 * @return the number of ticks until the timer that will schedule the work
3230 * item will expire, or zero if the item is not scheduled.
3231 */
3232 static inline k_ticks_t k_work_delayable_remaining_get(
3233 const struct k_work_delayable *dwork);
3234
3235 /** @brief Submit an idle work item to a queue after a delay.
3236 *
3237 * Unlike k_work_reschedule_for_queue() this is a no-op if the work item is
3238 * already scheduled or submitted, even if @p delay is @c K_NO_WAIT.
3239 *
3240 * @funcprops \isr_ok
3241 *
3242 * @param queue the queue on which the work item should be submitted after the
3243 * delay.
3244 *
3245 * @param dwork pointer to the delayable work item.
3246 *
3247 * @param delay the time to wait before submitting the work item. If @c
3248 * K_NO_WAIT and the work is not pending this is equivalent to
3249 * k_work_submit_to_queue().
3250 *
3251 * @retval 0 if work was already scheduled or submitted.
3252 * @retval 1 if work has been scheduled.
3253 * @retval -EBUSY if @p delay is @c K_NO_WAIT and
3254 * k_work_submit_to_queue() fails with this code.
3255 * @retval -EINVAL if @p delay is @c K_NO_WAIT and
3256 * k_work_submit_to_queue() fails with this code.
3257 * @retval -ENODEV if @p delay is @c K_NO_WAIT and
3258 * k_work_submit_to_queue() fails with this code.
3259 */
3260 int k_work_schedule_for_queue(struct k_work_q *queue,
3261 struct k_work_delayable *dwork,
3262 k_timeout_t delay);
3263
3264 /** @brief Submit an idle work item to the system work queue after a
3265 * delay.
3266 *
3267 * This is a thin wrapper around k_work_schedule_for_queue(), with all the API
3268 * characteristcs of that function.
3269 *
3270 * @param dwork pointer to the delayable work item.
3271 *
3272 * @param delay the time to wait before submitting the work item. If @c
3273 * K_NO_WAIT this is equivalent to k_work_submit_to_queue().
3274 *
3275 * @return as with k_work_schedule_for_queue().
3276 */
3277 extern int k_work_schedule(struct k_work_delayable *dwork,
3278 k_timeout_t delay);
3279
3280 /** @brief Reschedule a work item to a queue after a delay.
3281 *
3282 * Unlike k_work_schedule_for_queue() this function can change the deadline of
3283 * a scheduled work item, and will schedule a work item that isn't idle
3284 * (e.g. is submitted or running). This function does not affect ("unsubmit")
3285 * a work item that has been submitted to a queue.
3286 *
3287 * @funcprops \isr_ok
3288 *
3289 * @param queue the queue on which the work item should be submitted after the
3290 * delay.
3291 *
3292 * @param dwork pointer to the delayable work item.
3293 *
3294 * @param delay the time to wait before submitting the work item. If @c
3295 * K_NO_WAIT this is equivalent to k_work_submit_to_queue() after canceling
3296 * any previous scheduled submission.
3297 *
3298 * @note If delay is @c K_NO_WAIT ("no delay") the return values are as with
3299 * k_work_submit_to_queue().
3300 *
3301 * @retval 0 if delay is @c K_NO_WAIT and work was already on a queue
3302 * @retval 1 if
3303 * * delay is @c K_NO_WAIT and work was not submitted but has now been queued
3304 * to @p queue; or
3305 * * delay not @c K_NO_WAIT and work has been scheduled
3306 * @retval 2 if delay is @c K_NO_WAIT and work was running and has been queued
3307 * to the queue that was running it
3308 * @retval -EBUSY if @p delay is @c K_NO_WAIT and
3309 * k_work_submit_to_queue() fails with this code.
3310 * @retval -EINVAL if @p delay is @c K_NO_WAIT and
3311 * k_work_submit_to_queue() fails with this code.
3312 * @retval -ENODEV if @p delay is @c K_NO_WAIT and
3313 * k_work_submit_to_queue() fails with this code.
3314 */
3315 int k_work_reschedule_for_queue(struct k_work_q *queue,
3316 struct k_work_delayable *dwork,
3317 k_timeout_t delay);
3318
3319 /** @brief Reschedule a work item to the system work queue after a
3320 * delay.
3321 *
3322 * This is a thin wrapper around k_work_reschedule_for_queue(), with all the
3323 * API characteristcs of that function.
3324 *
3325 * @param dwork pointer to the delayable work item.
3326 *
3327 * @param delay the time to wait before submitting the work item.
3328 *
3329 * @return as with k_work_reschedule_for_queue().
3330 */
3331 extern int k_work_reschedule(struct k_work_delayable *dwork,
3332 k_timeout_t delay);
3333
3334 /** @brief Flush delayable work.
3335 *
3336 * If the work is scheduled, it is immediately submitted. Then the caller
3337 * blocks until the work completes, as with k_work_flush().
3338 *
3339 * @note Be careful of caller and work queue thread relative priority. If
3340 * this function sleeps it will not return until the work queue thread
3341 * completes the tasks that allow this thread to resume.
3342 *
3343 * @note Behavior is undefined if this function is invoked on @p dwork from a
3344 * work queue running @p dwork.
3345 *
3346 * @param dwork pointer to the delayable work item.
3347 *
3348 * @param sync pointer to an opaque item containing state related to the
3349 * pending cancellation. The object must persist until the call returns, and
3350 * be accessible from both the caller thread and the work queue thread. The
3351 * object must not be used for any other flush or cancel operation until this
3352 * one completes. On architectures with CONFIG_KERNEL_COHERENCE the object
3353 * must be allocated in coherent memory.
3354 *
3355 * @retval true if call had to wait for completion
3356 * @retval false if work was already idle
3357 */
3358 bool k_work_flush_delayable(struct k_work_delayable *dwork,
3359 struct k_work_sync *sync);
3360
3361 /** @brief Cancel delayable work.
3362 *
3363 * Similar to k_work_cancel() but for delayable work. If the work is
3364 * scheduled or submitted it is canceled. This function does not wait for the
3365 * cancellation to complete.
3366 *
3367 * @note The work may still be running when this returns. Use
3368 * k_work_flush_delayable() or k_work_cancel_delayable_sync() to ensure it is
3369 * not running.
3370 *
3371 * @note Canceling delayable work does not prevent rescheduling it. It does
3372 * prevent submitting it until the cancellation completes.
3373 *
3374 * @funcprops \isr_ok
3375 *
3376 * @param dwork pointer to the delayable work item.
3377 *
3378 * @return the k_work_delayable_busy_get() status indicating the state of the
3379 * item after all cancellation steps performed by this call are completed.
3380 */
3381 int k_work_cancel_delayable(struct k_work_delayable *dwork);
3382
3383 /** @brief Cancel delayable work and wait.
3384 *
3385 * Like k_work_cancel_delayable() but waits until the work becomes idle.
3386 *
3387 * @note Canceling delayable work does not prevent rescheduling it. It does
3388 * prevent submitting it until the cancellation completes.
3389 *
3390 * @note Be careful of caller and work queue thread relative priority. If
3391 * this function sleeps it will not return until the work queue thread
3392 * completes the tasks that allow this thread to resume.
3393 *
3394 * @note Behavior is undefined if this function is invoked on @p dwork from a
3395 * work queue running @p dwork.
3396 *
3397 * @param dwork pointer to the delayable work item.
3398 *
3399 * @param sync pointer to an opaque item containing state related to the
3400 * pending cancellation. The object must persist until the call returns, and
3401 * be accessible from both the caller thread and the work queue thread. The
3402 * object must not be used for any other flush or cancel operation until this
3403 * one completes. On architectures with CONFIG_KERNEL_COHERENCE the object
3404 * must be allocated in coherent memory.
3405 *
3406 * @retval true if work was not idle (call had to wait for cancellation of a
3407 * running handler to complete, or scheduled or submitted operations were
3408 * cancelled);
3409 * @retval false otherwise
3410 */
3411 bool k_work_cancel_delayable_sync(struct k_work_delayable *dwork,
3412 struct k_work_sync *sync);
3413
3414 enum {
3415 /**
3416 * @cond INTERNAL_HIDDEN
3417 */
3418
3419 /* The atomic API is used for all work and queue flags fields to
3420 * enforce sequential consistency in SMP environments.
3421 */
3422
3423 /* Bits that represent the work item states. At least nine of the
3424 * combinations are distinct valid stable states.
3425 */
3426 K_WORK_RUNNING_BIT = 0,
3427 K_WORK_CANCELING_BIT = 1,
3428 K_WORK_QUEUED_BIT = 2,
3429 K_WORK_DELAYED_BIT = 3,
3430
3431 K_WORK_MASK = BIT(K_WORK_DELAYED_BIT) | BIT(K_WORK_QUEUED_BIT)
3432 | BIT(K_WORK_RUNNING_BIT) | BIT(K_WORK_CANCELING_BIT),
3433
3434 /* Static work flags */
3435 K_WORK_DELAYABLE_BIT = 8,
3436 K_WORK_DELAYABLE = BIT(K_WORK_DELAYABLE_BIT),
3437
3438 /* Dynamic work queue flags */
3439 K_WORK_QUEUE_STARTED_BIT = 0,
3440 K_WORK_QUEUE_STARTED = BIT(K_WORK_QUEUE_STARTED_BIT),
3441 K_WORK_QUEUE_BUSY_BIT = 1,
3442 K_WORK_QUEUE_BUSY = BIT(K_WORK_QUEUE_BUSY_BIT),
3443 K_WORK_QUEUE_DRAIN_BIT = 2,
3444 K_WORK_QUEUE_DRAIN = BIT(K_WORK_QUEUE_DRAIN_BIT),
3445 K_WORK_QUEUE_PLUGGED_BIT = 3,
3446 K_WORK_QUEUE_PLUGGED = BIT(K_WORK_QUEUE_PLUGGED_BIT),
3447
3448 /* Static work queue flags */
3449 K_WORK_QUEUE_NO_YIELD_BIT = 8,
3450 K_WORK_QUEUE_NO_YIELD = BIT(K_WORK_QUEUE_NO_YIELD_BIT),
3451
3452 /**
3453 * INTERNAL_HIDDEN @endcond
3454 */
3455 /* Transient work flags */
3456
3457 /** @brief Flag indicating a work item that is running under a work
3458 * queue thread.
3459 *
3460 * Accessed via k_work_busy_get(). May co-occur with other flags.
3461 */
3462 K_WORK_RUNNING = BIT(K_WORK_RUNNING_BIT),
3463
3464 /** @brief Flag indicating a work item that is being canceled.
3465 *
3466 * Accessed via k_work_busy_get(). May co-occur with other flags.
3467 */
3468 K_WORK_CANCELING = BIT(K_WORK_CANCELING_BIT),
3469
3470 /** @brief Flag indicating a work item that has been submitted to a
3471 * queue but has not started running.
3472 *
3473 * Accessed via k_work_busy_get(). May co-occur with other flags.
3474 */
3475 K_WORK_QUEUED = BIT(K_WORK_QUEUED_BIT),
3476
3477 /** @brief Flag indicating a delayed work item that is scheduled for
3478 * submission to a queue.
3479 *
3480 * Accessed via k_work_busy_get(). May co-occur with other flags.
3481 */
3482 K_WORK_DELAYED = BIT(K_WORK_DELAYED_BIT),
3483 };
3484
3485 /** @brief A structure used to submit work. */
3486 struct k_work {
3487 /* All fields are protected by the work module spinlock. No fields
3488 * are to be accessed except through kernel API.
3489 */
3490
3491 /* Node to link into k_work_q pending list. */
3492 sys_snode_t node;
3493
3494 /* The function to be invoked by the work queue thread. */
3495 k_work_handler_t handler;
3496
3497 /* The queue on which the work item was last submitted. */
3498 struct k_work_q *queue;
3499
3500 /* State of the work item.
3501 *
3502 * The item can be DELAYED, QUEUED, and RUNNING simultaneously.
3503 *
3504 * It can be RUNNING and CANCELING simultaneously.
3505 */
3506 uint32_t flags;
3507 };
3508
3509 #define Z_WORK_INITIALIZER(work_handler) { \
3510 .handler = work_handler, \
3511 }
3512
3513 /** @brief A structure used to submit work after a delay. */
3514 struct k_work_delayable {
3515 /* The work item. */
3516 struct k_work work;
3517
3518 /* Timeout used to submit work after a delay. */
3519 struct _timeout timeout;
3520
3521 /* The queue to which the work should be submitted. */
3522 struct k_work_q *queue;
3523 };
3524
3525 #define Z_WORK_DELAYABLE_INITIALIZER(work_handler) { \
3526 .work = { \
3527 .handler = work_handler, \
3528 .flags = K_WORK_DELAYABLE, \
3529 }, \
3530 }
3531
3532 /**
3533 * @brief Initialize a statically-defined delayable work item.
3534 *
3535 * This macro can be used to initialize a statically-defined delayable
3536 * work item, prior to its first use. For example,
3537 *
3538 * @code static K_WORK_DELAYABLE_DEFINE(<dwork>, <work_handler>); @endcode
3539 *
3540 * Note that if the runtime dependencies support initialization with
3541 * k_work_init_delayable() using that will eliminate the initialized
3542 * object in ROM that is produced by this macro and copied in at
3543 * system startup.
3544 *
3545 * @param work Symbol name for delayable work item object
3546 * @param work_handler Function to invoke each time work item is processed.
3547 */
3548 #define K_WORK_DELAYABLE_DEFINE(work, work_handler) \
3549 struct k_work_delayable work \
3550 = Z_WORK_DELAYABLE_INITIALIZER(work_handler)
3551
3552 /**
3553 * @cond INTERNAL_HIDDEN
3554 */
3555
3556 /* Record used to wait for work to flush.
3557 *
3558 * The work item is inserted into the queue that will process (or is
3559 * processing) the item, and will be processed as soon as the item
3560 * completes. When the flusher is processed the semaphore will be
3561 * signaled, releasing the thread waiting for the flush.
3562 */
3563 struct z_work_flusher {
3564 struct k_work work;
3565 struct k_sem sem;
3566 };
3567
3568 /* Record used to wait for work to complete a cancellation.
3569 *
3570 * The work item is inserted into a global queue of pending cancels.
3571 * When a cancelling work item goes idle any matching waiters are
3572 * removed from pending_cancels and are woken.
3573 */
3574 struct z_work_canceller {
3575 sys_snode_t node;
3576 struct k_work *work;
3577 struct k_sem sem;
3578 };
3579
3580 /**
3581 * INTERNAL_HIDDEN @endcond
3582 */
3583
3584 /** @brief A structure holding internal state for a pending synchronous
3585 * operation on a work item or queue.
3586 *
3587 * Instances of this type are provided by the caller for invocation of
3588 * k_work_flush(), k_work_cancel_sync() and sibling flush and cancel APIs. A
3589 * referenced object must persist until the call returns, and be accessible
3590 * from both the caller thread and the work queue thread.
3591 *
3592 * @note If CONFIG_KERNEL_COHERENCE is enabled the object must be allocated in
3593 * coherent memory; see arch_mem_coherent(). The stack on these architectures
3594 * is generally not coherent. be stack-allocated. Violations are detected by
3595 * runtime assertion.
3596 */
3597 struct k_work_sync {
3598 union {
3599 struct z_work_flusher flusher;
3600 struct z_work_canceller canceller;
3601 };
3602 };
3603
3604 /** @brief A structure holding optional configuration items for a work
3605 * queue.
3606 *
3607 * This structure, and values it references, are not retained by
3608 * k_work_queue_start().
3609 */
3610 struct k_work_queue_config {
3611 /** The name to be given to the work queue thread.
3612 *
3613 * If left null the thread will not have a name.
3614 */
3615 const char *name;
3616
3617 /** Control whether the work queue thread should yield between
3618 * items.
3619 *
3620 * Yielding between items helps guarantee the work queue
3621 * thread does not starve other threads, including cooperative
3622 * ones released by a work item. This is the default behavior.
3623 *
3624 * Set this to @c true to prevent the work queue thread from
3625 * yielding between items. This may be appropriate when a
3626 * sequence of items should complete without yielding
3627 * control.
3628 */
3629 bool no_yield;
3630 };
3631
3632 /** @brief A structure used to hold work until it can be processed. */
3633 struct k_work_q {
3634 /* The thread that animates the work. */
3635 struct k_thread thread;
3636
3637 /* All the following fields must be accessed only while the
3638 * work module spinlock is held.
3639 */
3640
3641 /* List of k_work items to be worked. */
3642 sys_slist_t pending;
3643
3644 /* Wait queue for idle work thread. */
3645 _wait_q_t notifyq;
3646
3647 /* Wait queue for threads waiting for the queue to drain. */
3648 _wait_q_t drainq;
3649
3650 /* Flags describing queue state. */
3651 uint32_t flags;
3652 };
3653
3654 /* Provide the implementation for inline functions declared above */
3655
k_work_is_pending(const struct k_work * work)3656 static inline bool k_work_is_pending(const struct k_work *work)
3657 {
3658 return k_work_busy_get(work) != 0;
3659 }
3660
3661 static inline struct k_work_delayable *
k_work_delayable_from_work(struct k_work * work)3662 k_work_delayable_from_work(struct k_work *work)
3663 {
3664 return CONTAINER_OF(work, struct k_work_delayable, work);
3665 }
3666
k_work_delayable_is_pending(const struct k_work_delayable * dwork)3667 static inline bool k_work_delayable_is_pending(
3668 const struct k_work_delayable *dwork)
3669 {
3670 return k_work_delayable_busy_get(dwork) != 0;
3671 }
3672
k_work_delayable_expires_get(const struct k_work_delayable * dwork)3673 static inline k_ticks_t k_work_delayable_expires_get(
3674 const struct k_work_delayable *dwork)
3675 {
3676 return z_timeout_expires(&dwork->timeout);
3677 }
3678
k_work_delayable_remaining_get(const struct k_work_delayable * dwork)3679 static inline k_ticks_t k_work_delayable_remaining_get(
3680 const struct k_work_delayable *dwork)
3681 {
3682 return z_timeout_remaining(&dwork->timeout);
3683 }
3684
k_work_queue_thread_get(struct k_work_q * queue)3685 static inline k_tid_t k_work_queue_thread_get(struct k_work_q *queue)
3686 {
3687 return &queue->thread;
3688 }
3689
3690 /* Legacy wrappers */
3691
3692 __deprecated
k_work_pending(const struct k_work * work)3693 static inline bool k_work_pending(const struct k_work *work)
3694 {
3695 return k_work_is_pending(work);
3696 }
3697
3698 __deprecated
k_work_q_start(struct k_work_q * work_q,k_thread_stack_t * stack,size_t stack_size,int prio)3699 static inline void k_work_q_start(struct k_work_q *work_q,
3700 k_thread_stack_t *stack,
3701 size_t stack_size, int prio)
3702 {
3703 k_work_queue_start(work_q, stack, stack_size, prio, NULL);
3704 }
3705
3706 /* deprecated, remove when corresponding deprecated API is removed. */
3707 struct k_delayed_work {
3708 struct k_work_delayable work;
3709 };
3710
3711 #define Z_DELAYED_WORK_INITIALIZER(work_handler) __DEPRECATED_MACRO { \
3712 .work = Z_WORK_DELAYABLE_INITIALIZER(work_handler), \
3713 }
3714
3715 __deprecated
k_delayed_work_init(struct k_delayed_work * work,k_work_handler_t handler)3716 static inline void k_delayed_work_init(struct k_delayed_work *work,
3717 k_work_handler_t handler)
3718 {
3719 k_work_init_delayable(&work->work, handler);
3720 }
3721
3722 __deprecated
k_delayed_work_submit_to_queue(struct k_work_q * work_q,struct k_delayed_work * work,k_timeout_t delay)3723 static inline int k_delayed_work_submit_to_queue(struct k_work_q *work_q,
3724 struct k_delayed_work *work,
3725 k_timeout_t delay)
3726 {
3727 int rc = k_work_reschedule_for_queue(work_q, &work->work, delay);
3728
3729 /* Legacy API doesn't distinguish success cases. */
3730 return (rc >= 0) ? 0 : rc;
3731 }
3732
3733 __deprecated
k_delayed_work_submit(struct k_delayed_work * work,k_timeout_t delay)3734 static inline int k_delayed_work_submit(struct k_delayed_work *work,
3735 k_timeout_t delay)
3736 {
3737 int rc = k_work_reschedule(&work->work, delay);
3738
3739 /* Legacy API doesn't distinguish success cases. */
3740 return (rc >= 0) ? 0 : rc;
3741 }
3742
3743 __deprecated
k_delayed_work_cancel(struct k_delayed_work * work)3744 static inline int k_delayed_work_cancel(struct k_delayed_work *work)
3745 {
3746 bool pending = k_work_delayable_is_pending(&work->work);
3747 int rc = k_work_cancel_delayable(&work->work);
3748
3749 /* Old return value rules:
3750 *
3751 * 0 if:
3752 * * Work item countdown cancelled before the item was submitted to
3753 * its queue; or
3754 * * Work item was removed from its queue before it was processed.
3755 *
3756 * -EINVAL if:
3757 * * Work item has never been submitted; or
3758 * * Work item has been successfully cancelled; or
3759 * * Timeout handler is in the process of submitting the work item to
3760 * its queue; or
3761 * * Work queue thread has removed the work item from the queue but
3762 * has not called its handler.
3763 *
3764 * -EALREADY if:
3765 * * Work queue thread has removed the work item from the queue and
3766 * cleared its pending flag; or
3767 * * Work queue thread is invoking the item handler; or
3768 * * Work item handler has completed.
3769 *
3770
3771 * We can't reconstruct those states, so call it successful only when
3772 * a pending item is no longer pending, -EINVAL if it was pending and
3773 * still is, and cancel, and -EALREADY if it wasn't pending (so
3774 * presumably cancellation should have had no effect, assuming we
3775 * didn't hit a race condition).
3776 */
3777 if (pending) {
3778 return (rc == 0) ? 0 : -EINVAL;
3779 }
3780
3781 return -EALREADY;
3782 }
3783
3784 __deprecated
k_delayed_work_pending(struct k_delayed_work * work)3785 static inline bool k_delayed_work_pending(struct k_delayed_work *work)
3786 {
3787 return k_work_delayable_is_pending(&work->work);
3788 }
3789
3790 __deprecated
k_delayed_work_remaining_get(struct k_delayed_work * work)3791 static inline int32_t k_delayed_work_remaining_get(struct k_delayed_work *work)
3792 {
3793 k_ticks_t rem = k_work_delayable_remaining_get(&work->work);
3794
3795 /* Probably should be ceil32, but was floor32 */
3796 return k_ticks_to_ms_floor32(rem);
3797 }
3798
3799 __deprecated
k_delayed_work_expires_ticks(struct k_delayed_work * work)3800 static inline k_ticks_t k_delayed_work_expires_ticks(
3801 struct k_delayed_work *work)
3802 {
3803 return k_work_delayable_expires_get(&work->work);
3804 }
3805
3806 __deprecated
k_delayed_work_remaining_ticks(struct k_delayed_work * work)3807 static inline k_ticks_t k_delayed_work_remaining_ticks(
3808 struct k_delayed_work *work)
3809 {
3810 return k_work_delayable_remaining_get(&work->work);
3811 }
3812
3813 /** @} */
3814
3815 struct k_work_user;
3816
3817 /**
3818 * @addtogroup workqueue_apis
3819 * @{
3820 */
3821
3822 /**
3823 * @typedef k_work_user_handler_t
3824 * @brief Work item handler function type for user work queues.
3825 *
3826 * A work item's handler function is executed by a user workqueue's thread
3827 * when the work item is processed by the workqueue.
3828 *
3829 * @param work Address of the work item.
3830 *
3831 * @return N/A
3832 */
3833 typedef void (*k_work_user_handler_t)(struct k_work_user *work);
3834
3835 /**
3836 * @cond INTERNAL_HIDDEN
3837 */
3838
3839 struct k_work_user_q {
3840 struct k_queue queue;
3841 struct k_thread thread;
3842 };
3843
3844 enum {
3845 K_WORK_USER_STATE_PENDING, /* Work item pending state */
3846 };
3847
3848 struct k_work_user {
3849 void *_reserved; /* Used by k_queue implementation. */
3850 k_work_user_handler_t handler;
3851 atomic_t flags;
3852 };
3853
3854 /**
3855 * INTERNAL_HIDDEN @endcond
3856 */
3857
3858 #define Z_WORK_USER_INITIALIZER(work_handler) \
3859 { \
3860 ._reserved = NULL, \
3861 .handler = work_handler, \
3862 .flags = 0 \
3863 }
3864
3865 /**
3866 * @brief Initialize a statically-defined user work item.
3867 *
3868 * This macro can be used to initialize a statically-defined user work
3869 * item, prior to its first use. For example,
3870 *
3871 * @code static K_WORK_USER_DEFINE(<work>, <work_handler>); @endcode
3872 *
3873 * @param work Symbol name for work item object
3874 * @param work_handler Function to invoke each time work item is processed.
3875 */
3876 #define K_WORK_USER_DEFINE(work, work_handler) \
3877 struct k_work_user work = Z_WORK_USER_INITIALIZER(work_handler)
3878
3879 /**
3880 * @brief Initialize a userspace work item.
3881 *
3882 * This routine initializes a user workqueue work item, prior to its
3883 * first use.
3884 *
3885 * @param work Address of work item.
3886 * @param handler Function to invoke each time work item is processed.
3887 *
3888 * @return N/A
3889 */
k_work_user_init(struct k_work_user * work,k_work_user_handler_t handler)3890 static inline void k_work_user_init(struct k_work_user *work,
3891 k_work_user_handler_t handler)
3892 {
3893 *work = (struct k_work_user)Z_WORK_USER_INITIALIZER(handler);
3894 }
3895
3896 /**
3897 * @brief Check if a userspace work item is pending.
3898 *
3899 * This routine indicates if user work item @a work is pending in a workqueue's
3900 * queue.
3901 *
3902 * @note Checking if the work is pending gives no guarantee that the
3903 * work will still be pending when this information is used. It is up to
3904 * the caller to make sure that this information is used in a safe manner.
3905 *
3906 * @funcprops \isr_ok
3907 *
3908 * @param work Address of work item.
3909 *
3910 * @return true if work item is pending, or false if it is not pending.
3911 */
k_work_user_is_pending(struct k_work_user * work)3912 static inline bool k_work_user_is_pending(struct k_work_user *work)
3913 {
3914 return atomic_test_bit(&work->flags, K_WORK_USER_STATE_PENDING);
3915 }
3916
3917 /**
3918 * @brief Submit a work item to a user mode workqueue
3919 *
3920 * Submits a work item to a workqueue that runs in user mode. A temporary
3921 * memory allocation is made from the caller's resource pool which is freed
3922 * once the worker thread consumes the k_work item. The workqueue
3923 * thread must have memory access to the k_work item being submitted. The caller
3924 * must have permission granted on the work_q parameter's queue object.
3925 *
3926 * @funcprops \isr_ok
3927 *
3928 * @param work_q Address of workqueue.
3929 * @param work Address of work item.
3930 *
3931 * @retval -EBUSY if the work item was already in some workqueue
3932 * @retval -ENOMEM if no memory for thread resource pool allocation
3933 * @retval 0 Success
3934 */
k_work_user_submit_to_queue(struct k_work_user_q * work_q,struct k_work_user * work)3935 static inline int k_work_user_submit_to_queue(struct k_work_user_q *work_q,
3936 struct k_work_user *work)
3937 {
3938 int ret = -EBUSY;
3939
3940 if (!atomic_test_and_set_bit(&work->flags,
3941 K_WORK_USER_STATE_PENDING)) {
3942 ret = k_queue_alloc_append(&work_q->queue, work);
3943
3944 /* Couldn't insert into the queue. Clear the pending bit
3945 * so the work item can be submitted again
3946 */
3947 if (ret != 0) {
3948 atomic_clear_bit(&work->flags,
3949 K_WORK_USER_STATE_PENDING);
3950 }
3951 }
3952
3953 return ret;
3954 }
3955
3956 /**
3957 * @brief Start a workqueue in user mode
3958 *
3959 * This works identically to k_work_queue_start() except it is callable from
3960 * user mode, and the worker thread created will run in user mode. The caller
3961 * must have permissions granted on both the work_q parameter's thread and
3962 * queue objects, and the same restrictions on priority apply as
3963 * k_thread_create().
3964 *
3965 * @param work_q Address of workqueue.
3966 * @param stack Pointer to work queue thread's stack space, as defined by
3967 * K_THREAD_STACK_DEFINE()
3968 * @param stack_size Size of the work queue thread's stack (in bytes), which
3969 * should either be the same constant passed to
3970 * K_THREAD_STACK_DEFINE() or the value of K_THREAD_STACK_SIZEOF().
3971 * @param prio Priority of the work queue's thread.
3972 * @param name optional thread name. If not null a copy is made into the
3973 * thread's name buffer.
3974 *
3975 * @return N/A
3976 */
3977 extern void k_work_user_queue_start(struct k_work_user_q *work_q,
3978 k_thread_stack_t *stack,
3979 size_t stack_size, int prio,
3980 const char *name);
3981
3982 /** @} */
3983
3984 /**
3985 * @cond INTERNAL_HIDDEN
3986 */
3987
3988 struct k_work_poll {
3989 struct k_work work;
3990 struct k_work_q *workq;
3991 struct z_poller poller;
3992 struct k_poll_event *events;
3993 int num_events;
3994 k_work_handler_t real_handler;
3995 struct _timeout timeout;
3996 int poll_result;
3997 };
3998
3999 /**
4000 * INTERNAL_HIDDEN @endcond
4001 */
4002
4003 /**
4004 * @addtogroup workqueue_apis
4005 * @{
4006 */
4007
4008 /**
4009 * @brief Initialize a statically-defined work item.
4010 *
4011 * This macro can be used to initialize a statically-defined workqueue work
4012 * item, prior to its first use. For example,
4013 *
4014 * @code static K_WORK_DEFINE(<work>, <work_handler>); @endcode
4015 *
4016 * @param work Symbol name for work item object
4017 * @param work_handler Function to invoke each time work item is processed.
4018 */
4019 #define K_WORK_DEFINE(work, work_handler) \
4020 struct k_work work = Z_WORK_INITIALIZER(work_handler)
4021
4022 /**
4023 * @brief Initialize a statically-defined delayed work item.
4024 *
4025 * This macro can be used to initialize a statically-defined workqueue
4026 * delayed work item, prior to its first use. For example,
4027 *
4028 * @code static K_DELAYED_WORK_DEFINE(<work>, <work_handler>); @endcode
4029 *
4030 * @param work Symbol name for delayed work item object
4031 * @param work_handler Function to invoke each time work item is processed.
4032 */
4033 #define K_DELAYED_WORK_DEFINE(work, work_handler) __DEPRECATED_MACRO \
4034 struct k_delayed_work work = Z_DELAYED_WORK_INITIALIZER(work_handler)
4035
4036 /**
4037 * @brief Initialize a triggered work item.
4038 *
4039 * This routine initializes a workqueue triggered work item, prior to
4040 * its first use.
4041 *
4042 * @param work Address of triggered work item.
4043 * @param handler Function to invoke each time work item is processed.
4044 *
4045 * @return N/A
4046 */
4047 extern void k_work_poll_init(struct k_work_poll *work,
4048 k_work_handler_t handler);
4049
4050 /**
4051 * @brief Submit a triggered work item.
4052 *
4053 * This routine schedules work item @a work to be processed by workqueue
4054 * @a work_q when one of the given @a events is signaled. The routine
4055 * initiates internal poller for the work item and then returns to the caller.
4056 * Only when one of the watched events happen the work item is actually
4057 * submitted to the workqueue and becomes pending.
4058 *
4059 * Submitting a previously submitted triggered work item that is still
4060 * waiting for the event cancels the existing submission and reschedules it
4061 * the using the new event list. Note that this behavior is inherently subject
4062 * to race conditions with the pre-existing triggered work item and work queue,
4063 * so care must be taken to synchronize such resubmissions externally.
4064 *
4065 * @funcprops \isr_ok
4066 *
4067 * @warning
4068 * Provided array of events as well as a triggered work item must be placed
4069 * in persistent memory (valid until work handler execution or work
4070 * cancellation) and cannot be modified after submission.
4071 *
4072 * @param work_q Address of workqueue.
4073 * @param work Address of delayed work item.
4074 * @param events An array of events which trigger the work.
4075 * @param num_events The number of events in the array.
4076 * @param timeout Timeout after which the work will be scheduled
4077 * for execution even if not triggered.
4078 *
4079 *
4080 * @retval 0 Work item started watching for events.
4081 * @retval -EINVAL Work item is being processed or has completed its work.
4082 * @retval -EADDRINUSE Work item is pending on a different workqueue.
4083 */
4084 extern int k_work_poll_submit_to_queue(struct k_work_q *work_q,
4085 struct k_work_poll *work,
4086 struct k_poll_event *events,
4087 int num_events,
4088 k_timeout_t timeout);
4089
4090 /**
4091 * @brief Submit a triggered work item to the system workqueue.
4092 *
4093 * This routine schedules work item @a work to be processed by system
4094 * workqueue when one of the given @a events is signaled. The routine
4095 * initiates internal poller for the work item and then returns to the caller.
4096 * Only when one of the watched events happen the work item is actually
4097 * submitted to the workqueue and becomes pending.
4098 *
4099 * Submitting a previously submitted triggered work item that is still
4100 * waiting for the event cancels the existing submission and reschedules it
4101 * the using the new event list. Note that this behavior is inherently subject
4102 * to race conditions with the pre-existing triggered work item and work queue,
4103 * so care must be taken to synchronize such resubmissions externally.
4104 *
4105 * @funcprops \isr_ok
4106 *
4107 * @warning
4108 * Provided array of events as well as a triggered work item must not be
4109 * modified until the item has been processed by the workqueue.
4110 *
4111 * @param work Address of delayed work item.
4112 * @param events An array of events which trigger the work.
4113 * @param num_events The number of events in the array.
4114 * @param timeout Timeout after which the work will be scheduled
4115 * for execution even if not triggered.
4116 *
4117 * @retval 0 Work item started watching for events.
4118 * @retval -EINVAL Work item is being processed or has completed its work.
4119 * @retval -EADDRINUSE Work item is pending on a different workqueue.
4120 */
4121 extern int k_work_poll_submit(struct k_work_poll *work,
4122 struct k_poll_event *events,
4123 int num_events,
4124 k_timeout_t timeout);
4125
4126 /**
4127 * @brief Cancel a triggered work item.
4128 *
4129 * This routine cancels the submission of triggered work item @a work.
4130 * A triggered work item can only be canceled if no event triggered work
4131 * submission.
4132 *
4133 * @funcprops \isr_ok
4134 *
4135 * @param work Address of delayed work item.
4136 *
4137 * @retval 0 Work item canceled.
4138 * @retval -EINVAL Work item is being processed or has completed its work.
4139 */
4140 extern int k_work_poll_cancel(struct k_work_poll *work);
4141
4142 /** @} */
4143
4144 /**
4145 * @defgroup msgq_apis Message Queue APIs
4146 * @ingroup kernel_apis
4147 * @{
4148 */
4149
4150 /**
4151 * @brief Message Queue Structure
4152 */
4153 struct k_msgq {
4154 /** Message queue wait queue */
4155 _wait_q_t wait_q;
4156 /** Lock */
4157 struct k_spinlock lock;
4158 /** Message size */
4159 size_t msg_size;
4160 /** Maximal number of messages */
4161 uint32_t max_msgs;
4162 /** Start of message buffer */
4163 char *buffer_start;
4164 /** End of message buffer */
4165 char *buffer_end;
4166 /** Read pointer */
4167 char *read_ptr;
4168 /** Write pointer */
4169 char *write_ptr;
4170 /** Number of used messages */
4171 uint32_t used_msgs;
4172
4173 _POLL_EVENT;
4174
4175 /** Message queue */
4176 uint8_t flags;
4177 };
4178 /**
4179 * @cond INTERNAL_HIDDEN
4180 */
4181
4182
4183 #define Z_MSGQ_INITIALIZER(obj, q_buffer, q_msg_size, q_max_msgs) \
4184 { \
4185 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
4186 .msg_size = q_msg_size, \
4187 .max_msgs = q_max_msgs, \
4188 .buffer_start = q_buffer, \
4189 .buffer_end = q_buffer + (q_max_msgs * q_msg_size), \
4190 .read_ptr = q_buffer, \
4191 .write_ptr = q_buffer, \
4192 .used_msgs = 0, \
4193 _POLL_EVENT_OBJ_INIT(obj) \
4194 }
4195
4196 /**
4197 * INTERNAL_HIDDEN @endcond
4198 */
4199
4200
4201 #define K_MSGQ_FLAG_ALLOC BIT(0)
4202
4203 /**
4204 * @brief Message Queue Attributes
4205 */
4206 struct k_msgq_attrs {
4207 /** Message Size */
4208 size_t msg_size;
4209 /** Maximal number of messages */
4210 uint32_t max_msgs;
4211 /** Used messages */
4212 uint32_t used_msgs;
4213 };
4214
4215
4216 /**
4217 * @brief Statically define and initialize a message queue.
4218 *
4219 * The message queue's ring buffer contains space for @a q_max_msgs messages,
4220 * each of which is @a q_msg_size bytes long. The buffer is aligned to a
4221 * @a q_align -byte boundary, which must be a power of 2. To ensure that each
4222 * message is similarly aligned to this boundary, @a q_msg_size must also be
4223 * a multiple of @a q_align.
4224 *
4225 * The message queue can be accessed outside the module where it is defined
4226 * using:
4227 *
4228 * @code extern struct k_msgq <name>; @endcode
4229 *
4230 * @param q_name Name of the message queue.
4231 * @param q_msg_size Message size (in bytes).
4232 * @param q_max_msgs Maximum number of messages that can be queued.
4233 * @param q_align Alignment of the message queue's ring buffer.
4234 *
4235 */
4236 #define K_MSGQ_DEFINE(q_name, q_msg_size, q_max_msgs, q_align) \
4237 static char __noinit __aligned(q_align) \
4238 _k_fifo_buf_##q_name[(q_max_msgs) * (q_msg_size)]; \
4239 STRUCT_SECTION_ITERABLE(k_msgq, q_name) = \
4240 Z_MSGQ_INITIALIZER(q_name, _k_fifo_buf_##q_name, \
4241 q_msg_size, q_max_msgs)
4242
4243 /**
4244 * @brief Initialize a message queue.
4245 *
4246 * This routine initializes a message queue object, prior to its first use.
4247 *
4248 * The message queue's ring buffer must contain space for @a max_msgs messages,
4249 * each of which is @a msg_size bytes long. The buffer must be aligned to an
4250 * N-byte boundary, where N is a power of 2 (i.e. 1, 2, 4, ...). To ensure
4251 * that each message is similarly aligned to this boundary, @a q_msg_size
4252 * must also be a multiple of N.
4253 *
4254 * @param msgq Address of the message queue.
4255 * @param buffer Pointer to ring buffer that holds queued messages.
4256 * @param msg_size Message size (in bytes).
4257 * @param max_msgs Maximum number of messages that can be queued.
4258 *
4259 * @return N/A
4260 */
4261 void k_msgq_init(struct k_msgq *msgq, char *buffer, size_t msg_size,
4262 uint32_t max_msgs);
4263
4264 /**
4265 * @brief Initialize a message queue.
4266 *
4267 * This routine initializes a message queue object, prior to its first use,
4268 * allocating its internal ring buffer from the calling thread's resource
4269 * pool.
4270 *
4271 * Memory allocated for the ring buffer can be released by calling
4272 * k_msgq_cleanup(), or if userspace is enabled and the msgq object loses
4273 * all of its references.
4274 *
4275 * @param msgq Address of the message queue.
4276 * @param msg_size Message size (in bytes).
4277 * @param max_msgs Maximum number of messages that can be queued.
4278 *
4279 * @return 0 on success, -ENOMEM if there was insufficient memory in the
4280 * thread's resource pool, or -EINVAL if the size parameters cause
4281 * an integer overflow.
4282 */
4283 __syscall int k_msgq_alloc_init(struct k_msgq *msgq, size_t msg_size,
4284 uint32_t max_msgs);
4285
4286 /**
4287 * @brief Release allocated buffer for a queue
4288 *
4289 * Releases memory allocated for the ring buffer.
4290 *
4291 * @param msgq message queue to cleanup
4292 *
4293 * @retval 0 on success
4294 * @retval -EBUSY Queue not empty
4295 */
4296 int k_msgq_cleanup(struct k_msgq *msgq);
4297
4298 /**
4299 * @brief Send a message to a message queue.
4300 *
4301 * This routine sends a message to message queue @a q.
4302 *
4303 * @note The message content is copied from @a data into @a msgq and the @a data
4304 * pointer is not retained, so the message content will not be modified
4305 * by this function.
4306 *
4307 * @funcprops \isr_ok
4308 *
4309 * @param msgq Address of the message queue.
4310 * @param data Pointer to the message.
4311 * @param timeout Non-negative waiting period to add the message,
4312 * or one of the special values K_NO_WAIT and
4313 * K_FOREVER.
4314 *
4315 * @retval 0 Message sent.
4316 * @retval -ENOMSG Returned without waiting or queue purged.
4317 * @retval -EAGAIN Waiting period timed out.
4318 */
4319 __syscall int k_msgq_put(struct k_msgq *msgq, const void *data, k_timeout_t timeout);
4320
4321 /**
4322 * @brief Receive a message from a message queue.
4323 *
4324 * This routine receives a message from message queue @a q in a "first in,
4325 * first out" manner.
4326 *
4327 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
4328 *
4329 * @funcprops \isr_ok
4330 *
4331 * @param msgq Address of the message queue.
4332 * @param data Address of area to hold the received message.
4333 * @param timeout Waiting period to receive the message,
4334 * or one of the special values K_NO_WAIT and
4335 * K_FOREVER.
4336 *
4337 * @retval 0 Message received.
4338 * @retval -ENOMSG Returned without waiting.
4339 * @retval -EAGAIN Waiting period timed out.
4340 */
4341 __syscall int k_msgq_get(struct k_msgq *msgq, void *data, k_timeout_t timeout);
4342
4343 /**
4344 * @brief Peek/read a message from a message queue.
4345 *
4346 * This routine reads a message from message queue @a q in a "first in,
4347 * first out" manner and leaves the message in the queue.
4348 *
4349 * @funcprops \isr_ok
4350 *
4351 * @param msgq Address of the message queue.
4352 * @param data Address of area to hold the message read from the queue.
4353 *
4354 * @retval 0 Message read.
4355 * @retval -ENOMSG Returned when the queue has no message.
4356 */
4357 __syscall int k_msgq_peek(struct k_msgq *msgq, void *data);
4358
4359 /**
4360 * @brief Purge a message queue.
4361 *
4362 * This routine discards all unreceived messages in a message queue's ring
4363 * buffer. Any threads that are blocked waiting to send a message to the
4364 * message queue are unblocked and see an -ENOMSG error code.
4365 *
4366 * @param msgq Address of the message queue.
4367 *
4368 * @return N/A
4369 */
4370 __syscall void k_msgq_purge(struct k_msgq *msgq);
4371
4372 /**
4373 * @brief Get the amount of free space in a message queue.
4374 *
4375 * This routine returns the number of unused entries in a message queue's
4376 * ring buffer.
4377 *
4378 * @param msgq Address of the message queue.
4379 *
4380 * @return Number of unused ring buffer entries.
4381 */
4382 __syscall uint32_t k_msgq_num_free_get(struct k_msgq *msgq);
4383
4384 /**
4385 * @brief Get basic attributes of a message queue.
4386 *
4387 * This routine fetches basic attributes of message queue into attr argument.
4388 *
4389 * @param msgq Address of the message queue.
4390 * @param attrs pointer to message queue attribute structure.
4391 *
4392 * @return N/A
4393 */
4394 __syscall void k_msgq_get_attrs(struct k_msgq *msgq,
4395 struct k_msgq_attrs *attrs);
4396
4397
z_impl_k_msgq_num_free_get(struct k_msgq * msgq)4398 static inline uint32_t z_impl_k_msgq_num_free_get(struct k_msgq *msgq)
4399 {
4400 return msgq->max_msgs - msgq->used_msgs;
4401 }
4402
4403 /**
4404 * @brief Get the number of messages in a message queue.
4405 *
4406 * This routine returns the number of messages in a message queue's ring buffer.
4407 *
4408 * @param msgq Address of the message queue.
4409 *
4410 * @return Number of messages.
4411 */
4412 __syscall uint32_t k_msgq_num_used_get(struct k_msgq *msgq);
4413
z_impl_k_msgq_num_used_get(struct k_msgq * msgq)4414 static inline uint32_t z_impl_k_msgq_num_used_get(struct k_msgq *msgq)
4415 {
4416 return msgq->used_msgs;
4417 }
4418
4419 /** @} */
4420
4421 /**
4422 * @defgroup mailbox_apis Mailbox APIs
4423 * @ingroup kernel_apis
4424 * @{
4425 */
4426
4427 /**
4428 * @brief Mailbox Message Structure
4429 *
4430 */
4431 struct k_mbox_msg {
4432 /** internal use only - needed for legacy API support */
4433 uint32_t _mailbox;
4434 /** size of message (in bytes) */
4435 size_t size;
4436 /** application-defined information value */
4437 uint32_t info;
4438 /** sender's message data buffer */
4439 void *tx_data;
4440 /** internal use only - needed for legacy API support */
4441 void *_rx_data;
4442 /** message data block descriptor */
4443 struct k_mem_block tx_block;
4444 /** source thread id */
4445 k_tid_t rx_source_thread;
4446 /** target thread id */
4447 k_tid_t tx_target_thread;
4448 /** internal use only - thread waiting on send (may be a dummy) */
4449 k_tid_t _syncing_thread;
4450 #if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
4451 /** internal use only - semaphore used during asynchronous send */
4452 struct k_sem *_async_sem;
4453 #endif
4454 };
4455 /**
4456 * @brief Mailbox Structure
4457 *
4458 */
4459 struct k_mbox {
4460 /** Transmit messages queue */
4461 _wait_q_t tx_msg_queue;
4462 /** Receive message queue */
4463 _wait_q_t rx_msg_queue;
4464 struct k_spinlock lock;
4465
4466 };
4467 /**
4468 * @cond INTERNAL_HIDDEN
4469 */
4470
4471 #define Z_MBOX_INITIALIZER(obj) \
4472 { \
4473 .tx_msg_queue = Z_WAIT_Q_INIT(&obj.tx_msg_queue), \
4474 .rx_msg_queue = Z_WAIT_Q_INIT(&obj.rx_msg_queue), \
4475 }
4476
4477 /**
4478 * INTERNAL_HIDDEN @endcond
4479 */
4480
4481 /**
4482 * @brief Statically define and initialize a mailbox.
4483 *
4484 * The mailbox is to be accessed outside the module where it is defined using:
4485 *
4486 * @code extern struct k_mbox <name>; @endcode
4487 *
4488 * @param name Name of the mailbox.
4489 */
4490 #define K_MBOX_DEFINE(name) \
4491 STRUCT_SECTION_ITERABLE(k_mbox, name) = \
4492 Z_MBOX_INITIALIZER(name) \
4493
4494 /**
4495 * @brief Initialize a mailbox.
4496 *
4497 * This routine initializes a mailbox object, prior to its first use.
4498 *
4499 * @param mbox Address of the mailbox.
4500 *
4501 * @return N/A
4502 */
4503 extern void k_mbox_init(struct k_mbox *mbox);
4504
4505 /**
4506 * @brief Send a mailbox message in a synchronous manner.
4507 *
4508 * This routine sends a message to @a mbox and waits for a receiver to both
4509 * receive and process it. The message data may be in a buffer, in a memory
4510 * pool block, or non-existent (i.e. an empty message).
4511 *
4512 * @param mbox Address of the mailbox.
4513 * @param tx_msg Address of the transmit message descriptor.
4514 * @param timeout Waiting period for the message to be received,
4515 * or one of the special values K_NO_WAIT
4516 * and K_FOREVER. Once the message has been received,
4517 * this routine waits as long as necessary for the message
4518 * to be completely processed.
4519 *
4520 * @retval 0 Message sent.
4521 * @retval -ENOMSG Returned without waiting.
4522 * @retval -EAGAIN Waiting period timed out.
4523 */
4524 extern int k_mbox_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
4525 k_timeout_t timeout);
4526
4527 /**
4528 * @brief Send a mailbox message in an asynchronous manner.
4529 *
4530 * This routine sends a message to @a mbox without waiting for a receiver
4531 * to process it. The message data may be in a buffer, in a memory pool block,
4532 * or non-existent (i.e. an empty message). Optionally, the semaphore @a sem
4533 * will be given when the message has been both received and completely
4534 * processed by the receiver.
4535 *
4536 * @param mbox Address of the mailbox.
4537 * @param tx_msg Address of the transmit message descriptor.
4538 * @param sem Address of a semaphore, or NULL if none is needed.
4539 *
4540 * @return N/A
4541 */
4542 extern void k_mbox_async_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
4543 struct k_sem *sem);
4544
4545 /**
4546 * @brief Receive a mailbox message.
4547 *
4548 * This routine receives a message from @a mbox, then optionally retrieves
4549 * its data and disposes of the message.
4550 *
4551 * @param mbox Address of the mailbox.
4552 * @param rx_msg Address of the receive message descriptor.
4553 * @param buffer Address of the buffer to receive data, or NULL to defer data
4554 * retrieval and message disposal until later.
4555 * @param timeout Waiting period for a message to be received,
4556 * or one of the special values K_NO_WAIT and K_FOREVER.
4557 *
4558 * @retval 0 Message received.
4559 * @retval -ENOMSG Returned without waiting.
4560 * @retval -EAGAIN Waiting period timed out.
4561 */
4562 extern int k_mbox_get(struct k_mbox *mbox, struct k_mbox_msg *rx_msg,
4563 void *buffer, k_timeout_t timeout);
4564
4565 /**
4566 * @brief Retrieve mailbox message data into a buffer.
4567 *
4568 * This routine completes the processing of a received message by retrieving
4569 * its data into a buffer, then disposing of the message.
4570 *
4571 * Alternatively, this routine can be used to dispose of a received message
4572 * without retrieving its data.
4573 *
4574 * @param rx_msg Address of the receive message descriptor.
4575 * @param buffer Address of the buffer to receive data, or NULL to discard
4576 * the data.
4577 *
4578 * @return N/A
4579 */
4580 extern void k_mbox_data_get(struct k_mbox_msg *rx_msg, void *buffer);
4581
4582 /** @} */
4583
4584 /**
4585 * @defgroup pipe_apis Pipe APIs
4586 * @ingroup kernel_apis
4587 * @{
4588 */
4589
4590 /** Pipe Structure */
4591 struct k_pipe {
4592 unsigned char *buffer; /**< Pipe buffer: may be NULL */
4593 size_t size; /**< Buffer size */
4594 size_t bytes_used; /**< # bytes used in buffer */
4595 size_t read_index; /**< Where in buffer to read from */
4596 size_t write_index; /**< Where in buffer to write */
4597 struct k_spinlock lock; /**< Synchronization lock */
4598
4599 struct {
4600 _wait_q_t readers; /**< Reader wait queue */
4601 _wait_q_t writers; /**< Writer wait queue */
4602 } wait_q; /** Wait queue */
4603
4604 uint8_t flags; /**< Flags */
4605 };
4606
4607 /**
4608 * @cond INTERNAL_HIDDEN
4609 */
4610 #define K_PIPE_FLAG_ALLOC BIT(0) /** Buffer was allocated */
4611
4612 #define Z_PIPE_INITIALIZER(obj, pipe_buffer, pipe_buffer_size) \
4613 { \
4614 .buffer = pipe_buffer, \
4615 .size = pipe_buffer_size, \
4616 .bytes_used = 0, \
4617 .read_index = 0, \
4618 .write_index = 0, \
4619 .lock = {}, \
4620 .wait_q = { \
4621 .readers = Z_WAIT_Q_INIT(&obj.wait_q.readers), \
4622 .writers = Z_WAIT_Q_INIT(&obj.wait_q.writers) \
4623 }, \
4624 .flags = 0 \
4625 }
4626
4627 /**
4628 * INTERNAL_HIDDEN @endcond
4629 */
4630
4631 /**
4632 * @brief Statically define and initialize a pipe.
4633 *
4634 * The pipe can be accessed outside the module where it is defined using:
4635 *
4636 * @code extern struct k_pipe <name>; @endcode
4637 *
4638 * @param name Name of the pipe.
4639 * @param pipe_buffer_size Size of the pipe's ring buffer (in bytes),
4640 * or zero if no ring buffer is used.
4641 * @param pipe_align Alignment of the pipe's ring buffer (power of 2).
4642 *
4643 */
4644 #define K_PIPE_DEFINE(name, pipe_buffer_size, pipe_align) \
4645 static unsigned char __noinit __aligned(pipe_align) \
4646 _k_pipe_buf_##name[pipe_buffer_size]; \
4647 STRUCT_SECTION_ITERABLE(k_pipe, name) = \
4648 Z_PIPE_INITIALIZER(name, _k_pipe_buf_##name, pipe_buffer_size)
4649
4650 /**
4651 * @brief Initialize a pipe.
4652 *
4653 * This routine initializes a pipe object, prior to its first use.
4654 *
4655 * @param pipe Address of the pipe.
4656 * @param buffer Address of the pipe's ring buffer, or NULL if no ring buffer
4657 * is used.
4658 * @param size Size of the pipe's ring buffer (in bytes), or zero if no ring
4659 * buffer is used.
4660 *
4661 * @return N/A
4662 */
4663 void k_pipe_init(struct k_pipe *pipe, unsigned char *buffer, size_t size);
4664
4665 /**
4666 * @brief Release a pipe's allocated buffer
4667 *
4668 * If a pipe object was given a dynamically allocated buffer via
4669 * k_pipe_alloc_init(), this will free it. This function does nothing
4670 * if the buffer wasn't dynamically allocated.
4671 *
4672 * @param pipe Address of the pipe.
4673 * @retval 0 on success
4674 * @retval -EAGAIN nothing to cleanup
4675 */
4676 int k_pipe_cleanup(struct k_pipe *pipe);
4677
4678 /**
4679 * @brief Initialize a pipe and allocate a buffer for it
4680 *
4681 * Storage for the buffer region will be allocated from the calling thread's
4682 * resource pool. This memory will be released if k_pipe_cleanup() is called,
4683 * or userspace is enabled and the pipe object loses all references to it.
4684 *
4685 * This function should only be called on uninitialized pipe objects.
4686 *
4687 * @param pipe Address of the pipe.
4688 * @param size Size of the pipe's ring buffer (in bytes), or zero if no ring
4689 * buffer is used.
4690 * @retval 0 on success
4691 * @retval -ENOMEM if memory couldn't be allocated
4692 */
4693 __syscall int k_pipe_alloc_init(struct k_pipe *pipe, size_t size);
4694
4695 /**
4696 * @brief Write data to a pipe.
4697 *
4698 * This routine writes up to @a bytes_to_write bytes of data to @a pipe.
4699 *
4700 * @param pipe Address of the pipe.
4701 * @param data Address of data to write.
4702 * @param bytes_to_write Size of data (in bytes).
4703 * @param bytes_written Address of area to hold the number of bytes written.
4704 * @param min_xfer Minimum number of bytes to write.
4705 * @param timeout Waiting period to wait for the data to be written,
4706 * or one of the special values K_NO_WAIT and K_FOREVER.
4707 *
4708 * @retval 0 At least @a min_xfer bytes of data were written.
4709 * @retval -EIO Returned without waiting; zero data bytes were written.
4710 * @retval -EAGAIN Waiting period timed out; between zero and @a min_xfer
4711 * minus one data bytes were written.
4712 */
4713 __syscall int k_pipe_put(struct k_pipe *pipe, void *data,
4714 size_t bytes_to_write, size_t *bytes_written,
4715 size_t min_xfer, k_timeout_t timeout);
4716
4717 /**
4718 * @brief Read data from a pipe.
4719 *
4720 * This routine reads up to @a bytes_to_read bytes of data from @a pipe.
4721 *
4722 * @param pipe Address of the pipe.
4723 * @param data Address to place the data read from pipe.
4724 * @param bytes_to_read Maximum number of data bytes to read.
4725 * @param bytes_read Address of area to hold the number of bytes read.
4726 * @param min_xfer Minimum number of data bytes to read.
4727 * @param timeout Waiting period to wait for the data to be read,
4728 * or one of the special values K_NO_WAIT and K_FOREVER.
4729 *
4730 * @retval 0 At least @a min_xfer bytes of data were read.
4731 * @retval -EINVAL invalid parameters supplied
4732 * @retval -EIO Returned without waiting; zero data bytes were read.
4733 * @retval -EAGAIN Waiting period timed out; between zero and @a min_xfer
4734 * minus one data bytes were read.
4735 */
4736 __syscall int k_pipe_get(struct k_pipe *pipe, void *data,
4737 size_t bytes_to_read, size_t *bytes_read,
4738 size_t min_xfer, k_timeout_t timeout);
4739
4740 /**
4741 * @brief Query the number of bytes that may be read from @a pipe.
4742 *
4743 * @param pipe Address of the pipe.
4744 *
4745 * @retval a number n such that 0 <= n <= @ref k_pipe.size; the
4746 * result is zero for unbuffered pipes.
4747 */
4748 __syscall size_t k_pipe_read_avail(struct k_pipe *pipe);
4749
4750 /**
4751 * @brief Query the number of bytes that may be written to @a pipe
4752 *
4753 * @param pipe Address of the pipe.
4754 *
4755 * @retval a number n such that 0 <= n <= @ref k_pipe.size; the
4756 * result is zero for unbuffered pipes.
4757 */
4758 __syscall size_t k_pipe_write_avail(struct k_pipe *pipe);
4759
4760 /** @} */
4761
4762 /**
4763 * @cond INTERNAL_HIDDEN
4764 */
4765
4766 struct k_mem_slab {
4767 _wait_q_t wait_q;
4768 struct k_spinlock lock;
4769 uint32_t num_blocks;
4770 size_t block_size;
4771 char *buffer;
4772 char *free_list;
4773 uint32_t num_used;
4774 #ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION
4775 uint32_t max_used;
4776 #endif
4777
4778 };
4779
4780 #define Z_MEM_SLAB_INITIALIZER(obj, slab_buffer, slab_block_size, \
4781 slab_num_blocks) \
4782 { \
4783 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
4784 .lock = {}, \
4785 .num_blocks = slab_num_blocks, \
4786 .block_size = slab_block_size, \
4787 .buffer = slab_buffer, \
4788 .free_list = NULL, \
4789 .num_used = 0, \
4790 }
4791
4792
4793 /**
4794 * INTERNAL_HIDDEN @endcond
4795 */
4796
4797 /**
4798 * @defgroup mem_slab_apis Memory Slab APIs
4799 * @ingroup kernel_apis
4800 * @{
4801 */
4802
4803 /**
4804 * @brief Statically define and initialize a memory slab.
4805 *
4806 * The memory slab's buffer contains @a slab_num_blocks memory blocks
4807 * that are @a slab_block_size bytes long. The buffer is aligned to a
4808 * @a slab_align -byte boundary. To ensure that each memory block is similarly
4809 * aligned to this boundary, @a slab_block_size must also be a multiple of
4810 * @a slab_align.
4811 *
4812 * The memory slab can be accessed outside the module where it is defined
4813 * using:
4814 *
4815 * @code extern struct k_mem_slab <name>; @endcode
4816 *
4817 * @param name Name of the memory slab.
4818 * @param slab_block_size Size of each memory block (in bytes).
4819 * @param slab_num_blocks Number memory blocks.
4820 * @param slab_align Alignment of the memory slab's buffer (power of 2).
4821 */
4822 #define K_MEM_SLAB_DEFINE(name, slab_block_size, slab_num_blocks, slab_align) \
4823 char __noinit_named(k_mem_slab_buf_##name) \
4824 __aligned(WB_UP(slab_align)) \
4825 _k_mem_slab_buf_##name[(slab_num_blocks) * WB_UP(slab_block_size)]; \
4826 STRUCT_SECTION_ITERABLE(k_mem_slab, name) = \
4827 Z_MEM_SLAB_INITIALIZER(name, _k_mem_slab_buf_##name, \
4828 WB_UP(slab_block_size), slab_num_blocks)
4829
4830 /**
4831 * @brief Initialize a memory slab.
4832 *
4833 * Initializes a memory slab, prior to its first use.
4834 *
4835 * The memory slab's buffer contains @a slab_num_blocks memory blocks
4836 * that are @a slab_block_size bytes long. The buffer must be aligned to an
4837 * N-byte boundary matching a word boundary, where N is a power of 2
4838 * (i.e. 4 on 32-bit systems, 8, 16, ...).
4839 * To ensure that each memory block is similarly aligned to this boundary,
4840 * @a slab_block_size must also be a multiple of N.
4841 *
4842 * @param slab Address of the memory slab.
4843 * @param buffer Pointer to buffer used for the memory blocks.
4844 * @param block_size Size of each memory block (in bytes).
4845 * @param num_blocks Number of memory blocks.
4846 *
4847 * @retval 0 on success
4848 * @retval -EINVAL invalid data supplied
4849 *
4850 */
4851 extern int k_mem_slab_init(struct k_mem_slab *slab, void *buffer,
4852 size_t block_size, uint32_t num_blocks);
4853
4854 /**
4855 * @brief Allocate memory from a memory slab.
4856 *
4857 * This routine allocates a memory block from a memory slab.
4858 *
4859 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
4860 * @note When CONFIG_MULTITHREADING=n any @a timeout is treated as K_NO_WAIT.
4861 *
4862 * @funcprops \isr_ok
4863 *
4864 * @param slab Address of the memory slab.
4865 * @param mem Pointer to block address area.
4866 * @param timeout Non-negative waiting period to wait for operation to complete.
4867 * Use K_NO_WAIT to return without waiting,
4868 * or K_FOREVER to wait as long as necessary.
4869 *
4870 * @retval 0 Memory allocated. The block address area pointed at by @a mem
4871 * is set to the starting address of the memory block.
4872 * @retval -ENOMEM Returned without waiting.
4873 * @retval -EAGAIN Waiting period timed out.
4874 * @retval -EINVAL Invalid data supplied
4875 */
4876 extern int k_mem_slab_alloc(struct k_mem_slab *slab, void **mem,
4877 k_timeout_t timeout);
4878
4879 /**
4880 * @brief Free memory allocated from a memory slab.
4881 *
4882 * This routine releases a previously allocated memory block back to its
4883 * associated memory slab.
4884 *
4885 * @param slab Address of the memory slab.
4886 * @param mem Pointer to block address area (as set by k_mem_slab_alloc()).
4887 *
4888 * @return N/A
4889 */
4890 extern void k_mem_slab_free(struct k_mem_slab *slab, void **mem);
4891
4892 /**
4893 * @brief Get the number of used blocks in a memory slab.
4894 *
4895 * This routine gets the number of memory blocks that are currently
4896 * allocated in @a slab.
4897 *
4898 * @param slab Address of the memory slab.
4899 *
4900 * @return Number of allocated memory blocks.
4901 */
k_mem_slab_num_used_get(struct k_mem_slab * slab)4902 static inline uint32_t k_mem_slab_num_used_get(struct k_mem_slab *slab)
4903 {
4904 return slab->num_used;
4905 }
4906
4907 /**
4908 * @brief Get the number of maximum used blocks so far in a memory slab.
4909 *
4910 * This routine gets the maximum number of memory blocks that were
4911 * allocated in @a slab.
4912 *
4913 * @param slab Address of the memory slab.
4914 *
4915 * @return Maximum number of allocated memory blocks.
4916 */
k_mem_slab_max_used_get(struct k_mem_slab * slab)4917 static inline uint32_t k_mem_slab_max_used_get(struct k_mem_slab *slab)
4918 {
4919 #ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION
4920 return slab->max_used;
4921 #else
4922 ARG_UNUSED(slab);
4923 return 0;
4924 #endif
4925 }
4926
4927 /**
4928 * @brief Get the number of unused blocks in a memory slab.
4929 *
4930 * This routine gets the number of memory blocks that are currently
4931 * unallocated in @a slab.
4932 *
4933 * @param slab Address of the memory slab.
4934 *
4935 * @return Number of unallocated memory blocks.
4936 */
k_mem_slab_num_free_get(struct k_mem_slab * slab)4937 static inline uint32_t k_mem_slab_num_free_get(struct k_mem_slab *slab)
4938 {
4939 return slab->num_blocks - slab->num_used;
4940 }
4941
4942 /** @} */
4943
4944 /**
4945 * @addtogroup heap_apis
4946 * @{
4947 */
4948
4949 /* kernel synchronized heap struct */
4950
4951 struct k_heap {
4952 struct sys_heap heap;
4953 _wait_q_t wait_q;
4954 struct k_spinlock lock;
4955 };
4956
4957 /**
4958 * @brief Initialize a k_heap
4959 *
4960 * This constructs a synchronized k_heap object over a memory region
4961 * specified by the user. Note that while any alignment and size can
4962 * be passed as valid parameters, internal alignment restrictions
4963 * inside the inner sys_heap mean that not all bytes may be usable as
4964 * allocated memory.
4965 *
4966 * @param h Heap struct to initialize
4967 * @param mem Pointer to memory.
4968 * @param bytes Size of memory region, in bytes
4969 */
4970 void k_heap_init(struct k_heap *h, void *mem, size_t bytes);
4971
4972 /** @brief Allocate aligned memory from a k_heap
4973 *
4974 * Behaves in all ways like k_heap_alloc(), except that the returned
4975 * memory (if available) will have a starting address in memory which
4976 * is a multiple of the specified power-of-two alignment value in
4977 * bytes. The resulting memory can be returned to the heap using
4978 * k_heap_free().
4979 *
4980 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
4981 * @note When CONFIG_MULTITHREADING=n any @a timeout is treated as K_NO_WAIT.
4982 *
4983 * @funcprops \isr_ok
4984 *
4985 * @param h Heap from which to allocate
4986 * @param align Alignment in bytes, must be a power of two
4987 * @param bytes Number of bytes requested
4988 * @param timeout How long to wait, or K_NO_WAIT
4989 * @return Pointer to memory the caller can now use
4990 */
4991 void *k_heap_aligned_alloc(struct k_heap *h, size_t align, size_t bytes,
4992 k_timeout_t timeout);
4993
4994 /**
4995 * @brief Allocate memory from a k_heap
4996 *
4997 * Allocates and returns a memory buffer from the memory region owned
4998 * by the heap. If no memory is available immediately, the call will
4999 * block for the specified timeout (constructed via the standard
5000 * timeout API, or K_NO_WAIT or K_FOREVER) waiting for memory to be
5001 * freed. If the allocation cannot be performed by the expiration of
5002 * the timeout, NULL will be returned.
5003 *
5004 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
5005 * @note When CONFIG_MULTITHREADING=n any @a timeout is treated as K_NO_WAIT.
5006 *
5007 * @funcprops \isr_ok
5008 *
5009 * @param h Heap from which to allocate
5010 * @param bytes Desired size of block to allocate
5011 * @param timeout How long to wait, or K_NO_WAIT
5012 * @return A pointer to valid heap memory, or NULL
5013 */
5014 void *k_heap_alloc(struct k_heap *h, size_t bytes,
5015 k_timeout_t timeout);
5016
5017 /**
5018 * @brief Free memory allocated by k_heap_alloc()
5019 *
5020 * Returns the specified memory block, which must have been returned
5021 * from k_heap_alloc(), to the heap for use by other callers. Passing
5022 * a NULL block is legal, and has no effect.
5023 *
5024 * @param h Heap to which to return the memory
5025 * @param mem A valid memory block, or NULL
5026 */
5027 void k_heap_free(struct k_heap *h, void *mem);
5028
5029 /* Hand-calculated minimum heap sizes needed to return a successful
5030 * 1-byte allocation. See details in lib/os/heap.[ch]
5031 */
5032 #define Z_HEAP_MIN_SIZE (sizeof(void *) > 4 ? 56 : 44)
5033
5034 /**
5035 * @brief Define a static k_heap in the specified linker section
5036 *
5037 * This macro defines and initializes a static memory region and
5038 * k_heap of the requested size in the specified linker section.
5039 * After kernel start, &name can be used as if k_heap_init() had
5040 * been called.
5041 *
5042 * Note that this macro enforces a minimum size on the memory region
5043 * to accommodate metadata requirements. Very small heaps will be
5044 * padded to fit.
5045 *
5046 * @param name Symbol name for the struct k_heap object
5047 * @param bytes Size of memory region, in bytes
5048 * @param in_section __attribute__((section(name))
5049 */
5050 #define Z_HEAP_DEFINE_IN_SECT(name, bytes, in_section) \
5051 char in_section \
5052 __aligned(8) /* CHUNK_UNIT */ \
5053 kheap_##name[MAX(bytes, Z_HEAP_MIN_SIZE)]; \
5054 STRUCT_SECTION_ITERABLE(k_heap, name) = { \
5055 .heap = { \
5056 .init_mem = kheap_##name, \
5057 .init_bytes = MAX(bytes, Z_HEAP_MIN_SIZE), \
5058 }, \
5059 }
5060
5061 /**
5062 * @brief Define a static k_heap
5063 *
5064 * This macro defines and initializes a static memory region and
5065 * k_heap of the requested size. After kernel start, &name can be
5066 * used as if k_heap_init() had been called.
5067 *
5068 * Note that this macro enforces a minimum size on the memory region
5069 * to accommodate metadata requirements. Very small heaps will be
5070 * padded to fit.
5071 *
5072 * @param name Symbol name for the struct k_heap object
5073 * @param bytes Size of memory region, in bytes
5074 */
5075 #define K_HEAP_DEFINE(name, bytes) \
5076 Z_HEAP_DEFINE_IN_SECT(name, bytes, \
5077 __noinit_named(kheap_buf_##name))
5078
5079 /**
5080 * @brief Define a static k_heap in uncached memory
5081 *
5082 * This macro defines and initializes a static memory region and
5083 * k_heap of the requested size in uncache memory. After kernel
5084 * start, &name can be used as if k_heap_init() had been called.
5085 *
5086 * Note that this macro enforces a minimum size on the memory region
5087 * to accommodate metadata requirements. Very small heaps will be
5088 * padded to fit.
5089 *
5090 * @param name Symbol name for the struct k_heap object
5091 * @param bytes Size of memory region, in bytes
5092 */
5093 #define K_HEAP_DEFINE_NOCACHE(name, bytes) \
5094 Z_HEAP_DEFINE_IN_SECT(name, bytes, __nocache)
5095
5096 /**
5097 * @}
5098 */
5099
5100 /**
5101 * @defgroup heap_apis Heap APIs
5102 * @ingroup kernel_apis
5103 * @{
5104 */
5105
5106 /**
5107 * @brief Allocate memory from the heap with a specified alignment.
5108 *
5109 * This routine provides semantics similar to aligned_alloc(); memory is
5110 * allocated from the heap with a specified alignment. However, one minor
5111 * difference is that k_aligned_alloc() accepts any non-zero @p size,
5112 * wherase aligned_alloc() only accepts a @p size that is an integral
5113 * multiple of @p align.
5114 *
5115 * Above, aligned_alloc() refers to:
5116 * C11 standard (ISO/IEC 9899:2011): 7.22.3.1
5117 * The aligned_alloc function (p: 347-348)
5118 *
5119 * @param align Alignment of memory requested (in bytes).
5120 * @param size Amount of memory requested (in bytes).
5121 *
5122 * @return Address of the allocated memory if successful; otherwise NULL.
5123 */
5124 extern void *k_aligned_alloc(size_t align, size_t size);
5125
5126 /**
5127 * @brief Allocate memory from the heap.
5128 *
5129 * This routine provides traditional malloc() semantics. Memory is
5130 * allocated from the heap memory pool.
5131 *
5132 * @param size Amount of memory requested (in bytes).
5133 *
5134 * @return Address of the allocated memory if successful; otherwise NULL.
5135 */
5136 extern void *k_malloc(size_t size);
5137
5138 /**
5139 * @brief Free memory allocated from heap.
5140 *
5141 * This routine provides traditional free() semantics. The memory being
5142 * returned must have been allocated from the heap memory pool or
5143 * k_mem_pool_malloc().
5144 *
5145 * If @a ptr is NULL, no operation is performed.
5146 *
5147 * @param ptr Pointer to previously allocated memory.
5148 *
5149 * @return N/A
5150 */
5151 extern void k_free(void *ptr);
5152
5153 /**
5154 * @brief Allocate memory from heap, array style
5155 *
5156 * This routine provides traditional calloc() semantics. Memory is
5157 * allocated from the heap memory pool and zeroed.
5158 *
5159 * @param nmemb Number of elements in the requested array
5160 * @param size Size of each array element (in bytes).
5161 *
5162 * @return Address of the allocated memory if successful; otherwise NULL.
5163 */
5164 extern void *k_calloc(size_t nmemb, size_t size);
5165
5166 /** @} */
5167
5168 /* polling API - PRIVATE */
5169
5170 #ifdef CONFIG_POLL
5171 #define _INIT_OBJ_POLL_EVENT(obj) do { (obj)->poll_event = NULL; } while (false)
5172 #else
5173 #define _INIT_OBJ_POLL_EVENT(obj) do { } while (false)
5174 #endif
5175
5176 /* private - types bit positions */
5177 enum _poll_types_bits {
5178 /* can be used to ignore an event */
5179 _POLL_TYPE_IGNORE,
5180
5181 /* to be signaled by k_poll_signal_raise() */
5182 _POLL_TYPE_SIGNAL,
5183
5184 /* semaphore availability */
5185 _POLL_TYPE_SEM_AVAILABLE,
5186
5187 /* queue/FIFO/LIFO data availability */
5188 _POLL_TYPE_DATA_AVAILABLE,
5189
5190 /* msgq data availability */
5191 _POLL_TYPE_MSGQ_DATA_AVAILABLE,
5192
5193 _POLL_NUM_TYPES
5194 };
5195
5196 #define Z_POLL_TYPE_BIT(type) (1U << ((type) - 1U))
5197
5198 /* private - states bit positions */
5199 enum _poll_states_bits {
5200 /* default state when creating event */
5201 _POLL_STATE_NOT_READY,
5202
5203 /* signaled by k_poll_signal_raise() */
5204 _POLL_STATE_SIGNALED,
5205
5206 /* semaphore is available */
5207 _POLL_STATE_SEM_AVAILABLE,
5208
5209 /* data is available to read on queue/FIFO/LIFO */
5210 _POLL_STATE_DATA_AVAILABLE,
5211
5212 /* queue/FIFO/LIFO wait was cancelled */
5213 _POLL_STATE_CANCELLED,
5214
5215 /* data is available to read on a message queue */
5216 _POLL_STATE_MSGQ_DATA_AVAILABLE,
5217
5218 _POLL_NUM_STATES
5219 };
5220
5221 #define Z_POLL_STATE_BIT(state) (1U << ((state) - 1U))
5222
5223 #define _POLL_EVENT_NUM_UNUSED_BITS \
5224 (32 - (0 \
5225 + 8 /* tag */ \
5226 + _POLL_NUM_TYPES \
5227 + _POLL_NUM_STATES \
5228 + 1 /* modes */ \
5229 ))
5230
5231 /* end of polling API - PRIVATE */
5232
5233
5234 /**
5235 * @defgroup poll_apis Async polling APIs
5236 * @ingroup kernel_apis
5237 * @{
5238 */
5239
5240 /* Public polling API */
5241
5242 /* public - values for k_poll_event.type bitfield */
5243 #define K_POLL_TYPE_IGNORE 0
5244 #define K_POLL_TYPE_SIGNAL Z_POLL_TYPE_BIT(_POLL_TYPE_SIGNAL)
5245 #define K_POLL_TYPE_SEM_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_SEM_AVAILABLE)
5246 #define K_POLL_TYPE_DATA_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_DATA_AVAILABLE)
5247 #define K_POLL_TYPE_FIFO_DATA_AVAILABLE K_POLL_TYPE_DATA_AVAILABLE
5248 #define K_POLL_TYPE_MSGQ_DATA_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_MSGQ_DATA_AVAILABLE)
5249
5250 /* public - polling modes */
5251 enum k_poll_modes {
5252 /* polling thread does not take ownership of objects when available */
5253 K_POLL_MODE_NOTIFY_ONLY = 0,
5254
5255 K_POLL_NUM_MODES
5256 };
5257
5258 /* public - values for k_poll_event.state bitfield */
5259 #define K_POLL_STATE_NOT_READY 0
5260 #define K_POLL_STATE_SIGNALED Z_POLL_STATE_BIT(_POLL_STATE_SIGNALED)
5261 #define K_POLL_STATE_SEM_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_SEM_AVAILABLE)
5262 #define K_POLL_STATE_DATA_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_DATA_AVAILABLE)
5263 #define K_POLL_STATE_FIFO_DATA_AVAILABLE K_POLL_STATE_DATA_AVAILABLE
5264 #define K_POLL_STATE_MSGQ_DATA_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_MSGQ_DATA_AVAILABLE)
5265 #define K_POLL_STATE_CANCELLED Z_POLL_STATE_BIT(_POLL_STATE_CANCELLED)
5266
5267 /* public - poll signal object */
5268 struct k_poll_signal {
5269 /** PRIVATE - DO NOT TOUCH */
5270 sys_dlist_t poll_events;
5271
5272 /**
5273 * 1 if the event has been signaled, 0 otherwise. Stays set to 1 until
5274 * user resets it to 0.
5275 */
5276 unsigned int signaled;
5277
5278 /** custom result value passed to k_poll_signal_raise() if needed */
5279 int result;
5280 };
5281
5282 #define K_POLL_SIGNAL_INITIALIZER(obj) \
5283 { \
5284 .poll_events = SYS_DLIST_STATIC_INIT(&obj.poll_events), \
5285 .signaled = 0, \
5286 .result = 0, \
5287 }
5288 /**
5289 * @brief Poll Event
5290 *
5291 */
5292 struct k_poll_event {
5293 /** PRIVATE - DO NOT TOUCH */
5294 sys_dnode_t _node;
5295
5296 /** PRIVATE - DO NOT TOUCH */
5297 struct z_poller *poller;
5298
5299 /** optional user-specified tag, opaque, untouched by the API */
5300 uint32_t tag:8;
5301
5302 /** bitfield of event types (bitwise-ORed K_POLL_TYPE_xxx values) */
5303 uint32_t type:_POLL_NUM_TYPES;
5304
5305 /** bitfield of event states (bitwise-ORed K_POLL_STATE_xxx values) */
5306 uint32_t state:_POLL_NUM_STATES;
5307
5308 /** mode of operation, from enum k_poll_modes */
5309 uint32_t mode:1;
5310
5311 /** unused bits in 32-bit word */
5312 uint32_t unused:_POLL_EVENT_NUM_UNUSED_BITS;
5313
5314 /** per-type data */
5315 union {
5316 void *obj;
5317 struct k_poll_signal *signal;
5318 struct k_sem *sem;
5319 struct k_fifo *fifo;
5320 struct k_queue *queue;
5321 struct k_msgq *msgq;
5322 };
5323 };
5324
5325 #define K_POLL_EVENT_INITIALIZER(_event_type, _event_mode, _event_obj) \
5326 { \
5327 .poller = NULL, \
5328 .type = _event_type, \
5329 .state = K_POLL_STATE_NOT_READY, \
5330 .mode = _event_mode, \
5331 .unused = 0, \
5332 { \
5333 .obj = _event_obj, \
5334 }, \
5335 }
5336
5337 #define K_POLL_EVENT_STATIC_INITIALIZER(_event_type, _event_mode, _event_obj, \
5338 event_tag) \
5339 { \
5340 .tag = event_tag, \
5341 .type = _event_type, \
5342 .state = K_POLL_STATE_NOT_READY, \
5343 .mode = _event_mode, \
5344 .unused = 0, \
5345 { \
5346 .obj = _event_obj, \
5347 }, \
5348 }
5349
5350 /**
5351 * @brief Initialize one struct k_poll_event instance
5352 *
5353 * After this routine is called on a poll event, the event it ready to be
5354 * placed in an event array to be passed to k_poll().
5355 *
5356 * @param event The event to initialize.
5357 * @param type A bitfield of the types of event, from the K_POLL_TYPE_xxx
5358 * values. Only values that apply to the same object being polled
5359 * can be used together. Choosing K_POLL_TYPE_IGNORE disables the
5360 * event.
5361 * @param mode Future. Use K_POLL_MODE_NOTIFY_ONLY.
5362 * @param obj Kernel object or poll signal.
5363 *
5364 * @return N/A
5365 */
5366
5367 extern void k_poll_event_init(struct k_poll_event *event, uint32_t type,
5368 int mode, void *obj);
5369
5370 /**
5371 * @brief Wait for one or many of multiple poll events to occur
5372 *
5373 * This routine allows a thread to wait concurrently for one or many of
5374 * multiple poll events to have occurred. Such events can be a kernel object
5375 * being available, like a semaphore, or a poll signal event.
5376 *
5377 * When an event notifies that a kernel object is available, the kernel object
5378 * is not "given" to the thread calling k_poll(): it merely signals the fact
5379 * that the object was available when the k_poll() call was in effect. Also,
5380 * all threads trying to acquire an object the regular way, i.e. by pending on
5381 * the object, have precedence over the thread polling on the object. This
5382 * means that the polling thread will never get the poll event on an object
5383 * until the object becomes available and its pend queue is empty. For this
5384 * reason, the k_poll() call is more effective when the objects being polled
5385 * only have one thread, the polling thread, trying to acquire them.
5386 *
5387 * When k_poll() returns 0, the caller should loop on all the events that were
5388 * passed to k_poll() and check the state field for the values that were
5389 * expected and take the associated actions.
5390 *
5391 * Before being reused for another call to k_poll(), the user has to reset the
5392 * state field to K_POLL_STATE_NOT_READY.
5393 *
5394 * When called from user mode, a temporary memory allocation is required from
5395 * the caller's resource pool.
5396 *
5397 * @param events An array of events to be polled for.
5398 * @param num_events The number of events in the array.
5399 * @param timeout Waiting period for an event to be ready,
5400 * or one of the special values K_NO_WAIT and K_FOREVER.
5401 *
5402 * @retval 0 One or more events are ready.
5403 * @retval -EAGAIN Waiting period timed out.
5404 * @retval -EINTR Polling has been interrupted, e.g. with
5405 * k_queue_cancel_wait(). All output events are still set and valid,
5406 * cancelled event(s) will be set to K_POLL_STATE_CANCELLED. In other
5407 * words, -EINTR status means that at least one of output events is
5408 * K_POLL_STATE_CANCELLED.
5409 * @retval -ENOMEM Thread resource pool insufficient memory (user mode only)
5410 * @retval -EINVAL Bad parameters (user mode only)
5411 */
5412
5413 __syscall int k_poll(struct k_poll_event *events, int num_events,
5414 k_timeout_t timeout);
5415
5416 /**
5417 * @brief Initialize a poll signal object.
5418 *
5419 * Ready a poll signal object to be signaled via k_poll_signal_raise().
5420 *
5421 * @param sig A poll signal.
5422 *
5423 * @return N/A
5424 */
5425
5426 __syscall void k_poll_signal_init(struct k_poll_signal *sig);
5427
5428 /*
5429 * @brief Reset a poll signal object's state to unsignaled.
5430 *
5431 * @param sig A poll signal object
5432 */
5433 __syscall void k_poll_signal_reset(struct k_poll_signal *sig);
5434
5435 /**
5436 * @brief Fetch the signaled state and result value of a poll signal
5437 *
5438 * @param sig A poll signal object
5439 * @param signaled An integer buffer which will be written nonzero if the
5440 * object was signaled
5441 * @param result An integer destination buffer which will be written with the
5442 * result value if the object was signaled, or an undefined
5443 * value if it was not.
5444 */
5445 __syscall void k_poll_signal_check(struct k_poll_signal *sig,
5446 unsigned int *signaled, int *result);
5447
5448 /**
5449 * @brief Signal a poll signal object.
5450 *
5451 * This routine makes ready a poll signal, which is basically a poll event of
5452 * type K_POLL_TYPE_SIGNAL. If a thread was polling on that event, it will be
5453 * made ready to run. A @a result value can be specified.
5454 *
5455 * The poll signal contains a 'signaled' field that, when set by
5456 * k_poll_signal_raise(), stays set until the user sets it back to 0 with
5457 * k_poll_signal_reset(). It thus has to be reset by the user before being
5458 * passed again to k_poll() or k_poll() will consider it being signaled, and
5459 * will return immediately.
5460 *
5461 * @note The result is stored and the 'signaled' field is set even if
5462 * this function returns an error indicating that an expiring poll was
5463 * not notified. The next k_poll() will detect the missed raise.
5464 *
5465 * @param sig A poll signal.
5466 * @param result The value to store in the result field of the signal.
5467 *
5468 * @retval 0 The signal was delivered successfully.
5469 * @retval -EAGAIN The polling thread's timeout is in the process of expiring.
5470 */
5471
5472 __syscall int k_poll_signal_raise(struct k_poll_signal *sig, int result);
5473
5474 /**
5475 * @internal
5476 */
5477 extern void z_handle_obj_poll_events(sys_dlist_t *events, uint32_t state);
5478
5479 /** @} */
5480
5481 /**
5482 * @defgroup cpu_idle_apis CPU Idling APIs
5483 * @ingroup kernel_apis
5484 * @{
5485 */
5486 /**
5487 * @brief Make the CPU idle.
5488 *
5489 * This function makes the CPU idle until an event wakes it up.
5490 *
5491 * In a regular system, the idle thread should be the only thread responsible
5492 * for making the CPU idle and triggering any type of power management.
5493 * However, in some more constrained systems, such as a single-threaded system,
5494 * the only thread would be responsible for this if needed.
5495 *
5496 * @note In some architectures, before returning, the function unmasks interrupts
5497 * unconditionally.
5498 *
5499 * @return N/A
5500 */
k_cpu_idle(void)5501 static inline void k_cpu_idle(void)
5502 {
5503 arch_cpu_idle();
5504 }
5505
5506 /**
5507 * @brief Make the CPU idle in an atomic fashion.
5508 *
5509 * Similar to k_cpu_idle(), but must be called with interrupts locked.
5510 *
5511 * Enabling interrupts and entering a low-power mode will be atomic,
5512 * i.e. there will be no period of time where interrupts are enabled before
5513 * the processor enters a low-power mode.
5514 *
5515 * After waking up from the low-power mode, the interrupt lockout state will
5516 * be restored as if by irq_unlock(key).
5517 *
5518 * @param key Interrupt locking key obtained from irq_lock().
5519 *
5520 * @return N/A
5521 */
k_cpu_atomic_idle(unsigned int key)5522 static inline void k_cpu_atomic_idle(unsigned int key)
5523 {
5524 arch_cpu_atomic_idle(key);
5525 }
5526
5527 /**
5528 * @}
5529 */
5530
5531 /**
5532 * @internal
5533 */
5534 #ifdef ARCH_EXCEPT
5535 /* This architecture has direct support for triggering a CPU exception */
5536 #define z_except_reason(reason) ARCH_EXCEPT(reason)
5537 #else
5538
5539 #if !defined(CONFIG_ASSERT_NO_FILE_INFO)
5540 #define __EXCEPT_LOC() __ASSERT_PRINT("@ %s:%d\n", __FILE__, __LINE__)
5541 #else
5542 #define __EXCEPT_LOC()
5543 #endif
5544
5545 /* NOTE: This is the implementation for arches that do not implement
5546 * ARCH_EXCEPT() to generate a real CPU exception.
5547 *
5548 * We won't have a real exception frame to determine the PC value when
5549 * the oops occurred, so print file and line number before we jump into
5550 * the fatal error handler.
5551 */
5552 #define z_except_reason(reason) do { \
5553 __EXCEPT_LOC(); \
5554 z_fatal_error(reason, NULL); \
5555 } while (false)
5556
5557 #endif /* _ARCH__EXCEPT */
5558
5559 /**
5560 * @brief Fatally terminate a thread
5561 *
5562 * This should be called when a thread has encountered an unrecoverable
5563 * runtime condition and needs to terminate. What this ultimately
5564 * means is determined by the _fatal_error_handler() implementation, which
5565 * will be called will reason code K_ERR_KERNEL_OOPS.
5566 *
5567 * If this is called from ISR context, the default system fatal error handler
5568 * will treat it as an unrecoverable system error, just like k_panic().
5569 */
5570 #define k_oops() z_except_reason(K_ERR_KERNEL_OOPS)
5571
5572 /**
5573 * @brief Fatally terminate the system
5574 *
5575 * This should be called when the Zephyr kernel has encountered an
5576 * unrecoverable runtime condition and needs to terminate. What this ultimately
5577 * means is determined by the _fatal_error_handler() implementation, which
5578 * will be called will reason code K_ERR_KERNEL_PANIC.
5579 */
5580 #define k_panic() z_except_reason(K_ERR_KERNEL_PANIC)
5581
5582 /*
5583 * private APIs that are utilized by one or more public APIs
5584 */
5585
5586 /**
5587 * @internal
5588 */
5589 extern void z_init_thread_base(struct _thread_base *thread_base,
5590 int priority, uint32_t initial_state,
5591 unsigned int options);
5592
5593 #ifdef CONFIG_MULTITHREADING
5594 /**
5595 * @internal
5596 */
5597 extern void z_init_static_threads(void);
5598 #else
5599 /**
5600 * @internal
5601 */
5602 #define z_init_static_threads() do { } while (false)
5603 #endif
5604
5605 /**
5606 * @internal
5607 */
5608 extern bool z_is_thread_essential(void);
5609
5610 #ifdef CONFIG_SMP
5611 void z_smp_thread_init(void *arg, struct k_thread *thread);
5612 void z_smp_thread_swap(void);
5613 #endif
5614
5615 /**
5616 * @internal
5617 */
5618 extern void z_timer_expiration_handler(struct _timeout *t);
5619
5620 #ifdef CONFIG_PRINTK
5621 /**
5622 * @brief Emit a character buffer to the console device
5623 *
5624 * @param c String of characters to print
5625 * @param n The length of the string
5626 *
5627 */
5628 __syscall void k_str_out(char *c, size_t n);
5629 #endif
5630
5631 /**
5632 * @brief Disable preservation of floating point context information.
5633 *
5634 * This routine informs the kernel that the specified thread
5635 * will no longer be using the floating point registers.
5636 *
5637 * @warning
5638 * Some architectures apply restrictions on how the disabling of floating
5639 * point preservation may be requested, see arch_float_disable.
5640 *
5641 * @warning
5642 * This routine should only be used to disable floating point support for
5643 * a thread that currently has such support enabled.
5644 *
5645 * @param thread ID of thread.
5646 *
5647 * @retval 0 On success.
5648 * @retval -ENOTSUP If the floating point disabling is not implemented.
5649 * -EINVAL If the floating point disabling could not be performed.
5650 */
5651 __syscall int k_float_disable(struct k_thread *thread);
5652
5653 /**
5654 * @brief Enable preservation of floating point context information.
5655 *
5656 * This routine informs the kernel that the specified thread
5657 * will use the floating point registers.
5658
5659 * Invoking this routine initializes the thread's floating point context info
5660 * to that of an FPU that has been reset. The next time the thread is scheduled
5661 * by z_swap() it will either inherit an FPU that is guaranteed to be in a
5662 * "sane" state (if the most recent user of the FPU was cooperatively swapped
5663 * out) or the thread's own floating point context will be loaded (if the most
5664 * recent user of the FPU was preempted, or if this thread is the first user
5665 * of the FPU). Thereafter, the kernel will protect the thread's FP context
5666 * so that it is not altered during a preemptive context switch.
5667 *
5668 * The @a options parameter indicates which floating point register sets will
5669 * be used by the specified thread.
5670 *
5671 * For x86 options:
5672 *
5673 * - K_FP_REGS indicates x87 FPU and MMX registers only
5674 * - K_SSE_REGS indicates SSE registers (and also x87 FPU and MMX registers)
5675 *
5676 * @warning
5677 * Some architectures apply restrictions on how the enabling of floating
5678 * point preservation may be requested, see arch_float_enable.
5679 *
5680 * @warning
5681 * This routine should only be used to enable floating point support for
5682 * a thread that currently has such support enabled.
5683 *
5684 * @param thread ID of thread.
5685 * @param options architecture dependent options
5686 *
5687 * @retval 0 On success.
5688 * @retval -ENOTSUP If the floating point enabling is not implemented.
5689 * -EINVAL If the floating point enabling could not be performed.
5690 */
5691 __syscall int k_float_enable(struct k_thread *thread, unsigned int options);
5692
5693 #ifdef CONFIG_THREAD_RUNTIME_STATS
5694
5695 /**
5696 * @brief Get the runtime statistics of a thread
5697 *
5698 * @param thread ID of thread.
5699 * @param stats Pointer to struct to copy statistics into.
5700 * @return -EINVAL if null pointers, otherwise 0
5701 */
5702 int k_thread_runtime_stats_get(k_tid_t thread,
5703 k_thread_runtime_stats_t *stats);
5704
5705 /**
5706 * @brief Get the runtime statistics of all threads
5707 *
5708 * @param stats Pointer to struct to copy statistics into.
5709 * @return -EINVAL if null pointers, otherwise 0
5710 */
5711 int k_thread_runtime_stats_all_get(k_thread_runtime_stats_t *stats);
5712
5713 #endif
5714
5715 #ifdef __cplusplus
5716 }
5717 #endif
5718
5719 #include <tracing/tracing.h>
5720 #include <syscalls/kernel.h>
5721
5722 #endif /* !_ASMLANGUAGE */
5723
5724 #endif /* ZEPHYR_INCLUDE_KERNEL_H_ */
5725