1 /*
2 * Copyright (c) 2016, Wind River Systems, Inc.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 /**
8 * @file
9 *
10 * @brief Public kernel APIs.
11 */
12
13 #ifndef ZEPHYR_INCLUDE_KERNEL_H_
14 #define ZEPHYR_INCLUDE_KERNEL_H_
15
16 #if !defined(_ASMLANGUAGE)
17 #include <zephyr/kernel_includes.h>
18 #include <errno.h>
19 #include <limits.h>
20 #include <stdbool.h>
21 #include <zephyr/toolchain.h>
22 #include <zephyr/tracing/tracing_macros.h>
23 #include <zephyr/sys/mem_stats.h>
24 #include <zephyr/sys/iterable_sections.h>
25 #include <zephyr/sys/ring_buffer.h>
26
27 #ifdef __cplusplus
28 extern "C" {
29 #endif
30
31 /*
32 * Zephyr currently assumes the size of a couple standard types to simplify
33 * print string formats. Let's make sure this doesn't change without notice.
34 */
35 BUILD_ASSERT(sizeof(int32_t) == sizeof(int));
36 BUILD_ASSERT(sizeof(int64_t) == sizeof(long long));
37 BUILD_ASSERT(sizeof(intptr_t) == sizeof(long));
38
39 /**
40 * @brief Kernel APIs
41 * @defgroup kernel_apis Kernel APIs
42 * @since 1.0
43 * @version 1.0.0
44 * @{
45 * @}
46 */
47
48 #define K_ANY NULL
49
50 #if (CONFIG_NUM_COOP_PRIORITIES + CONFIG_NUM_PREEMPT_PRIORITIES) == 0
51 #error Zero available thread priorities defined!
52 #endif
53
54 #define K_PRIO_COOP(x) (-(CONFIG_NUM_COOP_PRIORITIES - (x)))
55 #define K_PRIO_PREEMPT(x) (x)
56
57 #define K_HIGHEST_THREAD_PRIO (-CONFIG_NUM_COOP_PRIORITIES)
58 #define K_LOWEST_THREAD_PRIO CONFIG_NUM_PREEMPT_PRIORITIES
59 #define K_IDLE_PRIO K_LOWEST_THREAD_PRIO
60 #define K_HIGHEST_APPLICATION_THREAD_PRIO (K_HIGHEST_THREAD_PRIO)
61 #define K_LOWEST_APPLICATION_THREAD_PRIO (K_LOWEST_THREAD_PRIO - 1)
62
63 #ifdef CONFIG_POLL
64 #define Z_POLL_EVENT_OBJ_INIT(obj) \
65 .poll_events = SYS_DLIST_STATIC_INIT(&obj.poll_events),
66 #define Z_DECL_POLL_EVENT sys_dlist_t poll_events;
67 #else
68 #define Z_POLL_EVENT_OBJ_INIT(obj)
69 #define Z_DECL_POLL_EVENT
70 #endif
71
72 struct k_thread;
73 struct k_mutex;
74 struct k_sem;
75 struct k_msgq;
76 struct k_mbox;
77 struct k_pipe;
78 struct k_queue;
79 struct k_fifo;
80 struct k_lifo;
81 struct k_stack;
82 struct k_mem_slab;
83 struct k_timer;
84 struct k_poll_event;
85 struct k_poll_signal;
86 struct k_mem_domain;
87 struct k_mem_partition;
88 struct k_futex;
89 struct k_event;
90
91 enum execution_context_types {
92 K_ISR = 0,
93 K_COOP_THREAD,
94 K_PREEMPT_THREAD,
95 };
96
97 /* private, used by k_poll and k_work_poll */
98 struct k_work_poll;
99 typedef int (*_poller_cb_t)(struct k_poll_event *event, uint32_t state);
100
101 /**
102 * @addtogroup thread_apis
103 * @{
104 */
105
106 /**
107 * @brief Resets thread longest frame usage data for specified thread
108 *
109 * This routine resets the longest frame value statistic
110 * after printing to zero, enabling observation of the
111 * longest frame from the most recent interval rather than
112 * the longest frame since startup.
113 *
114 * @param thread Pointer to the thread to reset counter.
115 *
116 * @note @kconfig{CONFIG_THREAD_ANALYZER_LONG_FRAME_PER_INTERVAL} must
117 * be set for this function to be effective.
118 */
119 static inline void
k_thread_runtime_stats_longest_frame_reset(__maybe_unused struct k_thread * thread)120 k_thread_runtime_stats_longest_frame_reset(__maybe_unused struct k_thread *thread)
121 {
122 #ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
123 thread->base.usage.longest = 0ULL;
124 #endif
125 }
126
127 typedef void (*k_thread_user_cb_t)(const struct k_thread *thread,
128 void *user_data);
129
130 /**
131 * @brief Iterate over all the threads in the system.
132 *
133 * This routine iterates over all the threads in the system and
134 * calls the user_cb function for each thread.
135 *
136 * @param user_cb Pointer to the user callback function.
137 * @param user_data Pointer to user data.
138 *
139 * @note @kconfig{CONFIG_THREAD_MONITOR} must be set for this function
140 * to be effective.
141 * @note This API uses @ref k_spin_lock to protect the _kernel.threads
142 * list which means creation of new threads and terminations of existing
143 * threads are blocked until this API returns.
144 */
145 void k_thread_foreach(k_thread_user_cb_t user_cb, void *user_data);
146
147 /**
148 * @brief Iterate over all the threads in running on specified cpu.
149 *
150 * This function is does otherwise the same thing as k_thread_foreach(),
151 * but it only loops through the threads running on specified cpu only.
152 * If CONFIG_SMP is not defined the implementation this is the same as
153 * k_thread_foreach(), with an assert cpu == 0.
154 *
155 * @param cpu The filtered cpu number
156 * @param user_cb Pointer to the user callback function.
157 * @param user_data Pointer to user data.
158 *
159 * @note @kconfig{CONFIG_THREAD_MONITOR} must be set for this function
160 * to be effective.
161 * @note This API uses @ref k_spin_lock to protect the _kernel.threads
162 * list which means creation of new threads and terminations of existing
163 * threads are blocked until this API returns.
164 */
165 #ifdef CONFIG_SMP
166 void k_thread_foreach_filter_by_cpu(unsigned int cpu,
167 k_thread_user_cb_t user_cb, void *user_data);
168 #else
169 static inline
k_thread_foreach_filter_by_cpu(unsigned int cpu,k_thread_user_cb_t user_cb,void * user_data)170 void k_thread_foreach_filter_by_cpu(unsigned int cpu,
171 k_thread_user_cb_t user_cb, void *user_data)
172 {
173 __ASSERT(cpu == 0, "cpu filter out of bounds");
174 ARG_UNUSED(cpu);
175 k_thread_foreach(user_cb, user_data);
176 }
177 #endif
178
179 /**
180 * @brief Iterate over all the threads in the system without locking.
181 *
182 * This routine works exactly the same like @ref k_thread_foreach
183 * but unlocks interrupts when user_cb is executed.
184 *
185 * @param user_cb Pointer to the user callback function.
186 * @param user_data Pointer to user data.
187 *
188 * @note @kconfig{CONFIG_THREAD_MONITOR} must be set for this function
189 * to be effective.
190 * @note This API uses @ref k_spin_lock only when accessing the _kernel.threads
191 * queue elements. It unlocks it during user callback function processing.
192 * If a new task is created when this @c foreach function is in progress,
193 * the added new task would not be included in the enumeration.
194 * If a task is aborted during this enumeration, there would be a race here
195 * and there is a possibility that this aborted task would be included in the
196 * enumeration.
197 * @note If the task is aborted and the memory occupied by its @c k_thread
198 * structure is reused when this @c k_thread_foreach_unlocked is in progress
199 * it might even lead to the system behave unstable.
200 * This function may never return, as it would follow some @c next task
201 * pointers treating given pointer as a pointer to the k_thread structure
202 * while it is something different right now.
203 * Do not reuse the memory that was occupied by k_thread structure of aborted
204 * task if it was aborted after this function was called in any context.
205 */
206 void k_thread_foreach_unlocked(
207 k_thread_user_cb_t user_cb, void *user_data);
208
209 /**
210 * @brief Iterate over the threads in running on current cpu without locking.
211 *
212 * This function does otherwise the same thing as
213 * k_thread_foreach_unlocked(), but it only loops through the threads
214 * running on specified cpu. If CONFIG_SMP is not defined the
215 * implementation this is the same as k_thread_foreach_unlocked(), with an
216 * assert requiring cpu == 0.
217 *
218 * @param cpu The filtered cpu number
219 * @param user_cb Pointer to the user callback function.
220 * @param user_data Pointer to user data.
221 *
222 * @note @kconfig{CONFIG_THREAD_MONITOR} must be set for this function
223 * to be effective.
224 * @note This API uses @ref k_spin_lock only when accessing the _kernel.threads
225 * queue elements. It unlocks it during user callback function processing.
226 * If a new task is created when this @c foreach function is in progress,
227 * the added new task would not be included in the enumeration.
228 * If a task is aborted during this enumeration, there would be a race here
229 * and there is a possibility that this aborted task would be included in the
230 * enumeration.
231 * @note If the task is aborted and the memory occupied by its @c k_thread
232 * structure is reused when this @c k_thread_foreach_unlocked is in progress
233 * it might even lead to the system behave unstable.
234 * This function may never return, as it would follow some @c next task
235 * pointers treating given pointer as a pointer to the k_thread structure
236 * while it is something different right now.
237 * Do not reuse the memory that was occupied by k_thread structure of aborted
238 * task if it was aborted after this function was called in any context.
239 */
240 #ifdef CONFIG_SMP
241 void k_thread_foreach_unlocked_filter_by_cpu(unsigned int cpu,
242 k_thread_user_cb_t user_cb, void *user_data);
243 #else
244 static inline
k_thread_foreach_unlocked_filter_by_cpu(unsigned int cpu,k_thread_user_cb_t user_cb,void * user_data)245 void k_thread_foreach_unlocked_filter_by_cpu(unsigned int cpu,
246 k_thread_user_cb_t user_cb, void *user_data)
247 {
248 __ASSERT(cpu == 0, "cpu filter out of bounds");
249 ARG_UNUSED(cpu);
250 k_thread_foreach_unlocked(user_cb, user_data);
251 }
252 #endif
253
254 /** @} */
255
256 /**
257 * @defgroup thread_apis Thread APIs
258 * @ingroup kernel_apis
259 * @{
260 */
261
262 #endif /* !_ASMLANGUAGE */
263
264
265 /*
266 * Thread user options. May be needed by assembly code. Common part uses low
267 * bits, arch-specific use high bits.
268 */
269
270 /**
271 * @brief system thread that must not abort
272 * */
273 #define K_ESSENTIAL (BIT(0))
274
275 #define K_FP_IDX 1
276 /**
277 * @brief FPU registers are managed by context switch
278 *
279 * @details
280 * This option indicates that the thread uses the CPU's floating point
281 * registers. This instructs the kernel to take additional steps to save
282 * and restore the contents of these registers when scheduling the thread.
283 * No effect if @kconfig{CONFIG_FPU_SHARING} is not enabled.
284 */
285 #define K_FP_REGS (BIT(K_FP_IDX))
286
287 /**
288 * @brief user mode thread
289 *
290 * This thread has dropped from supervisor mode to user mode and consequently
291 * has additional restrictions
292 */
293 #define K_USER (BIT(2))
294
295 /**
296 * @brief Inherit Permissions
297 *
298 * @details
299 * Indicates that the thread being created should inherit all kernel object
300 * permissions from the thread that created it. No effect if
301 * @kconfig{CONFIG_USERSPACE} is not enabled.
302 */
303 #define K_INHERIT_PERMS (BIT(3))
304
305 /**
306 * @brief Callback item state
307 *
308 * @details
309 * This is a single bit of state reserved for "callback manager"
310 * utilities (p4wq initially) who need to track operations invoked
311 * from within a user-provided callback they have been invoked.
312 * Effectively it serves as a tiny bit of zero-overhead TLS data.
313 */
314 #define K_CALLBACK_STATE (BIT(4))
315
316 /**
317 * @brief DSP registers are managed by context switch
318 *
319 * @details
320 * This option indicates that the thread uses the CPU's DSP registers.
321 * This instructs the kernel to take additional steps to save and
322 * restore the contents of these registers when scheduling the thread.
323 * No effect if @kconfig{CONFIG_DSP_SHARING} is not enabled.
324 */
325 #define K_DSP_IDX 6
326 #define K_DSP_REGS (BIT(K_DSP_IDX))
327
328 /**
329 * @brief AGU registers are managed by context switch
330 *
331 * @details
332 * This option indicates that the thread uses the ARC processor's XY
333 * memory and DSP feature. Often used with @kconfig{CONFIG_ARC_AGU_SHARING}.
334 * No effect if @kconfig{CONFIG_ARC_AGU_SHARING} is not enabled.
335 */
336 #define K_AGU_IDX 7
337 #define K_AGU_REGS (BIT(K_AGU_IDX))
338
339 /**
340 * @brief FP and SSE registers are managed by context switch on x86
341 *
342 * @details
343 * This option indicates that the thread uses the x86 CPU's floating point
344 * and SSE registers. This instructs the kernel to take additional steps to
345 * save and restore the contents of these registers when scheduling
346 * the thread. No effect if @kconfig{CONFIG_X86_SSE} is not enabled.
347 */
348 #define K_SSE_REGS (BIT(7))
349
350 /* end - thread options */
351
352 #if !defined(_ASMLANGUAGE)
353 /**
354 * @brief Dynamically allocate a thread stack.
355 *
356 * Dynamically allocate a thread stack either from a pool of thread stacks of
357 * size @kconfig{CONFIG_DYNAMIC_THREAD_POOL_SIZE}, or from the system heap.
358 * Order is determined by the @kconfig{CONFIG_DYNAMIC_THREAD_PREFER_ALLOC} and
359 * @kconfig{CONFIG_DYNAMIC_THREAD_PREFER_POOL} options. Thread stacks from the
360 * pool are of maximum size @kconfig{CONFIG_DYNAMIC_THREAD_STACK_SIZE}.
361 *
362 * @note When no longer required, thread stacks allocated with
363 * `k_thread_stack_alloc()` must be freed with @ref k_thread_stack_free to
364 * avoid leaking memory.
365 *
366 * @param size Stack size in bytes.
367 * @param flags Stack creation flags, or 0.
368 *
369 * @retval the allocated thread stack on success.
370 * @retval NULL on failure.
371 *
372 * Relevant stack creation flags include:
373 * - @ref K_USER allocate a userspace thread (requires @kconfig{CONFIG_USERSPACE})
374 *
375 * @see @kconfig{CONFIG_DYNAMIC_THREAD}
376 */
377 __syscall k_thread_stack_t *k_thread_stack_alloc(size_t size, int flags);
378
379 /**
380 * @brief Free a dynamically allocated thread stack.
381 *
382 * @param stack Pointer to the thread stack.
383 *
384 * @retval 0 on success.
385 * @retval -EBUSY if the thread stack is in use.
386 * @retval -EINVAL if @p stack is invalid.
387 * @retval -ENOSYS if dynamic thread stack allocation is disabled
388 *
389 * @see @kconfig{CONFIG_DYNAMIC_THREAD}
390 */
391 __syscall int k_thread_stack_free(k_thread_stack_t *stack);
392
393 /**
394 * @brief Create a thread.
395 *
396 * This routine initializes a thread, then schedules it for execution.
397 *
398 * The new thread may be scheduled for immediate execution or a delayed start.
399 * If the newly spawned thread does not have a delayed start the kernel
400 * scheduler may preempt the current thread to allow the new thread to
401 * execute.
402 *
403 * Thread options are architecture-specific, and can include K_ESSENTIAL,
404 * K_FP_REGS, and K_SSE_REGS. Multiple options may be specified by separating
405 * them using "|" (the logical OR operator).
406 *
407 * Stack objects passed to this function may be statically allocated with
408 * either of these macros in order to be portable:
409 *
410 * - K_THREAD_STACK_DEFINE() - For stacks that may support either user or
411 * supervisor threads.
412 * - K_KERNEL_STACK_DEFINE() - For stacks that may support supervisor
413 * threads only. These stacks use less memory if CONFIG_USERSPACE is
414 * enabled.
415 *
416 * Alternatively, the stack may be dynamically allocated using
417 * @ref k_thread_stack_alloc.
418 *
419 * The stack_size parameter has constraints. It must either be:
420 *
421 * - The original size value passed to K_THREAD_STACK_DEFINE() or
422 * K_KERNEL_STACK_DEFINE()
423 * - The return value of K_THREAD_STACK_SIZEOF(stack) if the stack was
424 * defined with K_THREAD_STACK_DEFINE()
425 * - The return value of K_KERNEL_STACK_SIZEOF(stack) if the stack was
426 * defined with K_KERNEL_STACK_DEFINE().
427 *
428 * Using other values, or sizeof(stack) may produce undefined behavior.
429 *
430 * @param new_thread Pointer to uninitialized struct k_thread
431 * @param stack Pointer to the stack space.
432 * @param stack_size Stack size in bytes.
433 * @param entry Thread entry function.
434 * @param p1 1st entry point parameter.
435 * @param p2 2nd entry point parameter.
436 * @param p3 3rd entry point parameter.
437 * @param prio Thread priority.
438 * @param options Thread options.
439 * @param delay Scheduling delay, or K_NO_WAIT (for no delay).
440 *
441 * @return ID of new thread.
442 *
443 */
444 __syscall k_tid_t k_thread_create(struct k_thread *new_thread,
445 k_thread_stack_t *stack,
446 size_t stack_size,
447 k_thread_entry_t entry,
448 void *p1, void *p2, void *p3,
449 int prio, uint32_t options, k_timeout_t delay);
450
451 /**
452 * @brief Drop a thread's privileges permanently to user mode
453 *
454 * This allows a supervisor thread to be re-used as a user thread.
455 * This function does not return, but control will transfer to the provided
456 * entry point as if this was a new user thread.
457 *
458 * The implementation ensures that the stack buffer contents are erased.
459 * Any thread-local storage will be reverted to a pristine state.
460 *
461 * Memory domain membership, resource pool assignment, kernel object
462 * permissions, priority, and thread options are preserved.
463 *
464 * A common use of this function is to re-use the main thread as a user thread
465 * once all supervisor mode-only tasks have been completed.
466 *
467 * @param entry Function to start executing from
468 * @param p1 1st entry point parameter
469 * @param p2 2nd entry point parameter
470 * @param p3 3rd entry point parameter
471 */
472 FUNC_NORETURN void k_thread_user_mode_enter(k_thread_entry_t entry,
473 void *p1, void *p2,
474 void *p3);
475
476 /**
477 * @brief Grant a thread access to a set of kernel objects
478 *
479 * This is a convenience function. For the provided thread, grant access to
480 * the remaining arguments, which must be pointers to kernel objects.
481 *
482 * The thread object must be initialized (i.e. running). The objects don't
483 * need to be.
484 * Note that NULL shouldn't be passed as an argument.
485 *
486 * @param thread Thread to grant access to objects
487 * @param ... list of kernel object pointers
488 */
489 #define k_thread_access_grant(thread, ...) \
490 FOR_EACH_FIXED_ARG(k_object_access_grant, (;), (thread), __VA_ARGS__)
491
492 /**
493 * @brief Assign a resource memory pool to a thread
494 *
495 * By default, threads have no resource pool assigned unless their parent
496 * thread has a resource pool, in which case it is inherited. Multiple
497 * threads may be assigned to the same memory pool.
498 *
499 * Changing a thread's resource pool will not migrate allocations from the
500 * previous pool.
501 *
502 * @param thread Target thread to assign a memory pool for resource requests.
503 * @param heap Heap object to use for resources,
504 * or NULL if the thread should no longer have a memory pool.
505 */
k_thread_heap_assign(struct k_thread * thread,struct k_heap * heap)506 static inline void k_thread_heap_assign(struct k_thread *thread,
507 struct k_heap *heap)
508 {
509 thread->resource_pool = heap;
510 }
511
512 #if defined(CONFIG_INIT_STACKS) && defined(CONFIG_THREAD_STACK_INFO)
513 /**
514 * @brief Obtain stack usage information for the specified thread
515 *
516 * User threads will need to have permission on the target thread object.
517 *
518 * Some hardware may prevent inspection of a stack buffer currently in use.
519 * If this API is called from supervisor mode, on the currently running thread,
520 * on a platform which selects @kconfig{CONFIG_NO_UNUSED_STACK_INSPECTION}, an
521 * error will be generated.
522 *
523 * @param thread Thread to inspect stack information
524 * @param unused_ptr Output parameter, filled in with the unused stack space
525 * of the target thread in bytes.
526 * @return 0 on success
527 * @return -EBADF Bad thread object (user mode only)
528 * @return -EPERM No permissions on thread object (user mode only)
529 * #return -ENOTSUP Forbidden by hardware policy
530 * @return -EINVAL Thread is uninitialized or exited (user mode only)
531 * @return -EFAULT Bad memory address for unused_ptr (user mode only)
532 */
533 __syscall int k_thread_stack_space_get(const struct k_thread *thread,
534 size_t *unused_ptr);
535
536 /**
537 * @brief Set the unused stack threshold for a thread as a percentage
538 *
539 * This function sets the unused stack safety usage threshold for a thread as a
540 * percentage of the specified thread's total stack size. When performing a
541 * runtime stack safety usage check, if the thread's unused stack is detected
542 * to be below this threshold, then a runtime stack safety usage hook will be
543 * invoked. Setting this threshold to 0% disables the hook.
544 *
545 * @param thread Thread on which to set the threshold
546 * @param pct Percentage of total stack size to use as threshold
547 *
548 * @retval 0 on success
549 * @retval -EINVAL if @p pct exceeds 99%
550 */
551 __syscall int k_thread_runtime_stack_unused_threshold_pct_set(struct k_thread *thread,
552 uint32_t pct);
553
554 /**
555 * @brief Set the unused stack threshold for a thread as a number of bytes
556 *
557 * This function sets the unused stack safety usage threshold for a thread as a
558 * number of bytes. When performing a runtime stack safety usage check, if the
559 * thread's unused stack is detected to be below this threshold, then a runtime
560 * stack safety usage hook will be invoked. Setting this threshold to 0 bytes
561 * disables the hook.
562 *
563 * @param thread Thread on which to set the threshold
564 * @param threshold Number of bytes to use as threshold
565 *
566 * @retval 0 on success
567 * @retval -EINVAL if @p threshold exceeds stack size
568 */
569 __syscall int k_thread_runtime_stack_unused_threshold_set(struct k_thread *thread,
570 size_t threshold);
571
572 /**
573 * @brief Get the unused stack usage threshold (in bytes)
574 *
575 * This function retrieves the unused stack usage threshold for a thread as a
576 * number of bytes. A value of 0 bytes indicates thread does not have an
577 * unused stack usage threshold and that the runtime stack safety usage hook is
578 * disabled for this thread.
579 *
580 * @param thread Thread from which to retrieve the threshold
581 *
582 * @retval Unused stack threshold (in bytes)
583 */
584 __syscall size_t k_thread_runtime_stack_unused_threshold_get(struct k_thread *thread);
585
586 /**
587 * @brief Thread stack safety handler type
588 *
589 * This type defines the prototype for a custom thread stack safety handler.
590 * The handler is invoked when a thread's unused stack space is detected to
591 * have crossed below its configured threshold.
592 *
593 * @param thread Thread whose stack has crossed the safety threshold
594 * @param unused_space Amount of unused stack space remaining
595 * @param arg Pointer to user defined argument passed to the handler
596 */
597 typedef void (*k_thread_stack_safety_handler_t)(const struct k_thread *thread,
598 size_t unused_space, void *arg);
599
600 /**
601 * @brief Run the full stack safety check on a thread
602 *
603 * This function scans the specified thread's stack to determine how much of it
604 * remains unused. If the unused stack space is found to be less than the
605 * thread's configured threshold then the specified handler is executed.
606 *
607 * @param thread Thread whose stack to check
608 * @param unused_ptr Amount of unused stack space remaining
609 * @param handler Custom handler to invoke if threshold crossed
610 * @param arg Argument to pass to handler
611 *
612 * @return 0 on success, -ENOTSUP if forbidden by hardware policy
613 */
614 int k_thread_runtime_stack_safety_full_check(const struct k_thread *thread,
615 size_t *unused_ptr,
616 k_thread_stack_safety_handler_t handler,
617 void *arg);
618
619 /**
620 * @brief Run the an abbreviated stack safety check on a thread
621 *
622 * This function scans the specified thread's stack for evidence that it has
623 * crossed its configured threshold of unused stack space. If this evidence is
624 * found, the specified handler is executed.
625 *
626 * @param thread Thread whose stack to check
627 * @param unused_ptr Amount of unused stack space remaining
628 * @param handler Custom handler to invoke if threshold crossed
629 * @param arg Argument to pass to handler
630 *
631 * @return 0 on success, -ENOTSUP if forbidden by hardware policy
632 */
633 int k_thread_runtime_stack_safety_threshold_check(const struct k_thread *thread,
634 size_t *unused_ptr,
635 k_thread_stack_safety_handler_t handler,
636 void *arg);
637 #endif
638
639 #if (K_HEAP_MEM_POOL_SIZE > 0)
640 /**
641 * @brief Assign the system heap as a thread's resource pool
642 *
643 * Similar to k_thread_heap_assign(), but the thread will use
644 * the kernel heap to draw memory.
645 *
646 * Use with caution, as a malicious thread could perform DoS attacks on the
647 * kernel heap.
648 *
649 * @param thread Target thread to assign the system heap for resource requests
650 *
651 */
652 void k_thread_system_pool_assign(struct k_thread *thread);
653 #endif /* (K_HEAP_MEM_POOL_SIZE > 0) */
654
655 /**
656 * @brief Sleep until a thread exits
657 *
658 * The caller will be put to sleep until the target thread exits, either due
659 * to being aborted, self-exiting, or taking a fatal error. This API returns
660 * immediately if the thread isn't running.
661 *
662 * This API may only be called from ISRs with a K_NO_WAIT timeout,
663 * where it can be useful as a predicate to detect when a thread has
664 * aborted.
665 *
666 * @param thread Thread to wait to exit
667 * @param timeout upper bound time to wait for the thread to exit.
668 * @retval 0 success, target thread has exited or wasn't running
669 * @retval -EBUSY returned without waiting
670 * @retval -EAGAIN waiting period timed out
671 * @retval -EDEADLK target thread is joining on the caller, or target thread
672 * is the caller
673 */
674 __syscall int k_thread_join(struct k_thread *thread, k_timeout_t timeout);
675
676 /**
677 * @brief Put the current thread to sleep.
678 *
679 * This routine puts the current thread to sleep for @a duration,
680 * specified as a k_timeout_t object.
681 *
682 * @param timeout Desired duration of sleep.
683 *
684 * @return Zero if the requested time has elapsed or the time left to
685 * sleep rounded up to the nearest millisecond (e.g. if the thread was
686 * awoken by the \ref k_wakeup call). Will be clamped to INT_MAX in
687 * the case where the remaining time is unrepresentable in an int32_t.
688 */
689 __syscall int32_t k_sleep(k_timeout_t timeout);
690
691 /**
692 * @brief Put the current thread to sleep.
693 *
694 * This routine puts the current thread to sleep for @a duration milliseconds.
695 *
696 * @param ms Number of milliseconds to sleep.
697 *
698 * @return Zero if the requested time has elapsed or if the thread was woken up
699 * by the \ref k_wakeup call, the time left to sleep rounded up to the nearest
700 * millisecond.
701 */
k_msleep(int32_t ms)702 static inline int32_t k_msleep(int32_t ms)
703 {
704 return k_sleep(Z_TIMEOUT_MS(ms));
705 }
706
707 /**
708 * @brief Put the current thread to sleep with microsecond resolution.
709 *
710 * This function is unlikely to work as expected without kernel tuning.
711 * In particular, because the lower bound on the duration of a sleep is
712 * the duration of a tick, @kconfig{CONFIG_SYS_CLOCK_TICKS_PER_SEC} must be
713 * adjusted to achieve the resolution desired. The implications of doing
714 * this must be understood before attempting to use k_usleep(). Use with
715 * caution.
716 *
717 * @param us Number of microseconds to sleep.
718 *
719 * @return Zero if the requested time has elapsed or if the thread was woken up
720 * by the \ref k_wakeup call, the time left to sleep rounded up to the nearest
721 * microsecond.
722 */
723 __syscall int32_t k_usleep(int32_t us);
724
725 /**
726 * @brief Cause the current thread to busy wait.
727 *
728 * This routine causes the current thread to execute a "do nothing" loop for
729 * @a usec_to_wait microseconds.
730 *
731 * @note The clock used for the microsecond-resolution delay here may
732 * be skewed relative to the clock used for system timeouts like
733 * k_sleep(). For example k_busy_wait(1000) may take slightly more or
734 * less time than k_sleep(K_MSEC(1)), with the offset dependent on
735 * clock tolerances.
736 *
737 * @note In case when @kconfig{CONFIG_SYSTEM_CLOCK_SLOPPY_IDLE} and
738 * @kconfig{CONFIG_PM} options are enabled, this function may not work.
739 * The timer/clock used for delay processing may be disabled/inactive.
740 */
741 __syscall void k_busy_wait(uint32_t usec_to_wait);
742
743 /**
744 * @brief Check whether it is possible to yield in the current context.
745 *
746 * This routine checks whether the kernel is in a state where it is possible to
747 * yield or call blocking API's. It should be used by code that needs to yield
748 * to perform correctly, but can feasibly be called from contexts where that
749 * is not possible. For example in the PRE_KERNEL initialization step, or when
750 * being run from the idle thread.
751 *
752 * @return True if it is possible to yield in the current context, false otherwise.
753 */
754 bool k_can_yield(void);
755
756 /**
757 * @brief Yield the current thread.
758 *
759 * This routine causes the current thread to yield execution to another
760 * thread of the same or higher priority. If there are no other ready threads
761 * of the same or higher priority, the routine returns immediately.
762 */
763 __syscall void k_yield(void);
764
765 /**
766 * @brief Wake up a sleeping thread.
767 *
768 * This routine prematurely wakes up @a thread from sleeping.
769 *
770 * If @a thread is not currently sleeping, the routine has no effect.
771 *
772 * @param thread ID of thread to wake.
773 */
774 __syscall void k_wakeup(k_tid_t thread);
775
776 /**
777 * @brief Query thread ID of the current thread.
778 *
779 * This unconditionally queries the kernel via a system call.
780 *
781 * @note Use k_current_get() unless absolutely sure this is necessary.
782 * This should only be used directly where the thread local
783 * variable cannot be used or may contain invalid values
784 * if thread local storage (TLS) is enabled. If TLS is not
785 * enabled, this is the same as k_current_get().
786 *
787 * @return ID of current thread.
788 */
789 __attribute_const__
790 __syscall k_tid_t k_sched_current_thread_query(void);
791
792 /**
793 * @brief Test whether startup is in the before-main-task phase.
794 *
795 * This routine allows the caller to customize its actions, depending on
796 * whether it being invoked before the kernel is fully active.
797 *
798 * @funcprops \isr_ok
799 *
800 * @return true if invoked before post-kernel initialization
801 * @return false if invoked during/after post-kernel initialization
802 */
k_is_pre_kernel(void)803 static inline bool k_is_pre_kernel(void)
804 {
805 extern bool z_sys_post_kernel; /* in init.c */
806
807 /*
808 * If called from userspace, it must be post kernel.
809 * This guard is necessary because z_sys_post_kernel memory
810 * is not accessible to user threads.
811 */
812 if (k_is_user_context()) {
813 return false;
814 }
815
816 /*
817 * Some compilers might optimize by pre-reading
818 * z_sys_post_kernel. This is absolutely not desirable.
819 * We are trying to avoid reading it if we are in user
820 * context as reading z_sys_post_kernel in user context
821 * will result in access fault. So add a compiler barrier
822 * here to stop that kind of optimizations.
823 */
824 compiler_barrier();
825
826 return !z_sys_post_kernel;
827 }
828
829 /**
830 * @brief Get thread ID of the current thread.
831 *
832 * @return ID of current thread.
833 *
834 */
835 __attribute_const__
k_current_get(void)836 static inline k_tid_t k_current_get(void)
837 {
838 __ASSERT(!k_is_pre_kernel(), "k_current_get called pre-kernel");
839
840 #ifdef CONFIG_CURRENT_THREAD_USE_TLS
841
842 /* Thread-local cache of current thread ID, set in z_thread_entry() */
843 extern Z_THREAD_LOCAL k_tid_t z_tls_current;
844
845 return z_tls_current;
846 #else
847 return k_sched_current_thread_query();
848 #endif
849 }
850
851 /**
852 * @brief Abort a thread.
853 *
854 * This routine permanently stops execution of @a thread. The thread is taken
855 * off all kernel queues it is part of (i.e. the ready queue, the timeout
856 * queue, or a kernel object wait queue). However, any kernel resources the
857 * thread might currently own (such as mutexes or memory blocks) are not
858 * released. It is the responsibility of the caller of this routine to ensure
859 * all necessary cleanup is performed.
860 *
861 * After k_thread_abort() returns, the thread is guaranteed not to be
862 * running or to become runnable anywhere on the system. Normally
863 * this is done via blocking the caller (in the same manner as
864 * k_thread_join()), but in interrupt context on SMP systems the
865 * implementation is required to spin for threads that are running on
866 * other CPUs.
867 *
868 * @param thread ID of thread to abort.
869 */
870 __syscall void k_thread_abort(k_tid_t thread);
871
872 k_ticks_t z_timeout_expires(const struct _timeout *timeout);
873 k_ticks_t z_timeout_remaining(const struct _timeout *timeout);
874
875 #ifdef CONFIG_SYS_CLOCK_EXISTS
876
877 /**
878 * @brief Get time when a thread wakes up, in system ticks
879 *
880 * This routine computes the system uptime when a waiting thread next
881 * executes, in units of system ticks. If the thread is not waiting,
882 * it returns current system time.
883 */
884 __syscall k_ticks_t k_thread_timeout_expires_ticks(const struct k_thread *thread);
885
z_impl_k_thread_timeout_expires_ticks(const struct k_thread * thread)886 static inline k_ticks_t z_impl_k_thread_timeout_expires_ticks(
887 const struct k_thread *thread)
888 {
889 return z_timeout_expires(&thread->base.timeout);
890 }
891
892 /**
893 * @brief Get time remaining before a thread wakes up, in system ticks
894 *
895 * This routine computes the time remaining before a waiting thread
896 * next executes, in units of system ticks. If the thread is not
897 * waiting, it returns zero.
898 */
899 __syscall k_ticks_t k_thread_timeout_remaining_ticks(const struct k_thread *thread);
900
z_impl_k_thread_timeout_remaining_ticks(const struct k_thread * thread)901 static inline k_ticks_t z_impl_k_thread_timeout_remaining_ticks(
902 const struct k_thread *thread)
903 {
904 return z_timeout_remaining(&thread->base.timeout);
905 }
906
907 #endif /* CONFIG_SYS_CLOCK_EXISTS */
908
909 /**
910 * @cond INTERNAL_HIDDEN
911 */
912
913 struct _static_thread_data {
914 struct k_thread *init_thread;
915 k_thread_stack_t *init_stack;
916 unsigned int init_stack_size;
917 k_thread_entry_t init_entry;
918 void *init_p1;
919 void *init_p2;
920 void *init_p3;
921 int init_prio;
922 uint32_t init_options;
923 const char *init_name;
924 #ifdef CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME
925 int32_t init_delay_ms;
926 #else
927 k_timeout_t init_delay;
928 #endif
929 };
930
931 #ifdef CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME
932 #define Z_THREAD_INIT_DELAY_INITIALIZER(ms) .init_delay_ms = (ms)
933 #define Z_THREAD_INIT_DELAY(thread) SYS_TIMEOUT_MS((thread)->init_delay_ms)
934 #else
935 #define Z_THREAD_INIT_DELAY_INITIALIZER(ms) .init_delay = SYS_TIMEOUT_MS_INIT(ms)
936 #define Z_THREAD_INIT_DELAY(thread) (thread)->init_delay
937 #endif
938
939 #define Z_THREAD_INITIALIZER(thread, stack, stack_size, \
940 entry, p1, p2, p3, \
941 prio, options, delay, tname) \
942 { \
943 .init_thread = (thread), \
944 .init_stack = (stack), \
945 .init_stack_size = (stack_size), \
946 .init_entry = (k_thread_entry_t)entry, \
947 .init_p1 = (void *)p1, \
948 .init_p2 = (void *)p2, \
949 .init_p3 = (void *)p3, \
950 .init_prio = (prio), \
951 .init_options = (options), \
952 .init_name = STRINGIFY(tname), \
953 Z_THREAD_INIT_DELAY_INITIALIZER(delay) \
954 }
955
956 /*
957 * Refer to K_THREAD_DEFINE() and K_KERNEL_THREAD_DEFINE() for
958 * information on arguments.
959 */
960 #define Z_THREAD_COMMON_DEFINE(name, stack_size, \
961 entry, p1, p2, p3, \
962 prio, options, delay) \
963 struct k_thread _k_thread_obj_##name; \
964 const STRUCT_SECTION_ITERABLE(_static_thread_data, \
965 _k_thread_data_##name) = \
966 Z_THREAD_INITIALIZER(&_k_thread_obj_##name, \
967 _k_thread_stack_##name, stack_size,\
968 entry, p1, p2, p3, prio, options, \
969 delay, name); \
970 __maybe_unused const k_tid_t name = (k_tid_t)&_k_thread_obj_##name
971
972 /**
973 * INTERNAL_HIDDEN @endcond
974 */
975
976 /**
977 * @brief Statically define and initialize a thread.
978 *
979 * The thread may be scheduled for immediate execution or a delayed start.
980 *
981 * Thread options are architecture-specific, and can include K_ESSENTIAL,
982 * K_FP_REGS, and K_SSE_REGS. Multiple options may be specified by separating
983 * them using "|" (the logical OR operator).
984 *
985 * The ID of the thread can be accessed using:
986 *
987 * @code extern const k_tid_t <name>; @endcode
988 *
989 * @param name Name of the thread.
990 * @param stack_size Stack size in bytes.
991 * @param entry Thread entry function.
992 * @param p1 1st entry point parameter.
993 * @param p2 2nd entry point parameter.
994 * @param p3 3rd entry point parameter.
995 * @param prio Thread priority.
996 * @param options Thread options.
997 * @param delay Scheduling delay (in milliseconds), zero for no delay.
998 *
999 * @note Static threads with zero delay should not normally have
1000 * MetaIRQ priority levels. This can preempt the system
1001 * initialization handling (depending on the priority of the main
1002 * thread) and cause surprising ordering side effects. It will not
1003 * affect anything in the OS per se, but consider it bad practice.
1004 * Use a SYS_INIT() callback if you need to run code before entrance
1005 * to the application main().
1006 */
1007 #define K_THREAD_DEFINE(name, stack_size, \
1008 entry, p1, p2, p3, \
1009 prio, options, delay) \
1010 K_THREAD_STACK_DEFINE(_k_thread_stack_##name, stack_size); \
1011 Z_THREAD_COMMON_DEFINE(name, stack_size, entry, p1, p2, p3, \
1012 prio, options, delay)
1013
1014 /**
1015 * @brief Statically define and initialize a thread intended to run only in kernel mode.
1016 *
1017 * The thread may be scheduled for immediate execution or a delayed start.
1018 *
1019 * Thread options are architecture-specific, and can include K_ESSENTIAL,
1020 * K_FP_REGS, and K_SSE_REGS. Multiple options may be specified by separating
1021 * them using "|" (the logical OR operator).
1022 *
1023 * The ID of the thread can be accessed using:
1024 *
1025 * @code extern const k_tid_t <name>; @endcode
1026 *
1027 * @note Threads defined by this can only run in kernel mode, and cannot be
1028 * transformed into user thread via k_thread_user_mode_enter().
1029 *
1030 * @warning Depending on the architecture, the stack size (@p stack_size)
1031 * may need to be multiples of CONFIG_MMU_PAGE_SIZE (if MMU)
1032 * or in power-of-two size (if MPU).
1033 *
1034 * @param name Name of the thread.
1035 * @param stack_size Stack size in bytes.
1036 * @param entry Thread entry function.
1037 * @param p1 1st entry point parameter.
1038 * @param p2 2nd entry point parameter.
1039 * @param p3 3rd entry point parameter.
1040 * @param prio Thread priority.
1041 * @param options Thread options.
1042 * @param delay Scheduling delay (in milliseconds), zero for no delay.
1043 */
1044 #define K_KERNEL_THREAD_DEFINE(name, stack_size, \
1045 entry, p1, p2, p3, \
1046 prio, options, delay) \
1047 K_KERNEL_STACK_DEFINE(_k_thread_stack_##name, stack_size); \
1048 Z_THREAD_COMMON_DEFINE(name, stack_size, entry, p1, p2, p3, \
1049 prio, options, delay)
1050
1051 /**
1052 * @brief Get a thread's priority.
1053 *
1054 * This routine gets the priority of @a thread.
1055 *
1056 * @param thread ID of thread whose priority is needed.
1057 *
1058 * @return Priority of @a thread.
1059 */
1060 __syscall int k_thread_priority_get(k_tid_t thread);
1061
1062 /**
1063 * @brief Set a thread's priority.
1064 *
1065 * This routine immediately changes the priority of @a thread.
1066 *
1067 * Rescheduling can occur immediately depending on the priority @a thread is
1068 * set to:
1069 *
1070 * - If its priority is raised above the priority of a currently scheduled
1071 * preemptible thread, @a thread will be scheduled in.
1072 *
1073 * - If the caller lowers the priority of a currently scheduled preemptible
1074 * thread below that of other threads in the system, the thread of the highest
1075 * priority will be scheduled in.
1076 *
1077 * Priority can be assigned in the range of -CONFIG_NUM_COOP_PRIORITIES to
1078 * CONFIG_NUM_PREEMPT_PRIORITIES-1, where -CONFIG_NUM_COOP_PRIORITIES is the
1079 * highest priority.
1080 *
1081 * @param thread ID of thread whose priority is to be set.
1082 * @param prio New priority.
1083 *
1084 * @warning Changing the priority of a thread currently involved in mutex
1085 * priority inheritance may result in undefined behavior.
1086 */
1087 __syscall void k_thread_priority_set(k_tid_t thread, int prio);
1088
1089
1090 #ifdef CONFIG_SCHED_DEADLINE
1091 /**
1092 * @brief Set relative deadline expiration time for scheduler
1093 *
1094 * This sets the "deadline" expiration as a time delta from the
1095 * current time, in the same units used by k_cycle_get_32(). The
1096 * scheduler (when deadline scheduling is enabled) will choose the
1097 * next expiring thread when selecting between threads at the same
1098 * static priority. Threads at different priorities will be scheduled
1099 * according to their static priority.
1100 *
1101 * @note Deadlines are stored internally using 32 bit unsigned
1102 * integers. The number of cycles between the "first" deadline in the
1103 * scheduler queue and the "last" deadline must be less than 2^31 (i.e
1104 * a signed non-negative quantity). Failure to adhere to this rule
1105 * may result in scheduled threads running in an incorrect deadline
1106 * order.
1107 *
1108 * @note Despite the API naming, the scheduler makes no guarantees
1109 * the thread WILL be scheduled within that deadline, nor does it take
1110 * extra metadata (like e.g. the "runtime" and "period" parameters in
1111 * Linux sched_setattr()) that allows the kernel to validate the
1112 * scheduling for achievability. Such features could be implemented
1113 * above this call, which is simply input to the priority selection
1114 * logic.
1115 *
1116 * @kconfig_dep{CONFIG_SCHED_DEADLINE}
1117 *
1118 * @param thread A thread on which to set the deadline
1119 * @param deadline A time delta, in cycle units
1120 *
1121 */
1122 __syscall void k_thread_deadline_set(k_tid_t thread, int deadline);
1123
1124 /**
1125 * @brief Set absolute deadline expiration time for scheduler
1126 *
1127 * This sets the "deadline" expiration as a timestamp in the same
1128 * units used by k_cycle_get_32(). The scheduler (when deadline scheduling
1129 * is enabled) will choose the next expiring thread when selecting between
1130 * threads at the same static priority. Threads at different priorities
1131 * will be scheduled according to their static priority.
1132 *
1133 * Unlike @ref k_thread_deadline_set which sets a relative timestamp to a
1134 * "now" implicitly determined during its call, this routine sets an
1135 * absolute timestamp that is computed from a timestamp relative to
1136 * an explicit "now" that was determined before this routine is called.
1137 * This allows the caller to specify deadlines for multiple threads
1138 * using a common "now".
1139 *
1140 * @note Deadlines are stored internally using 32 bit unsigned
1141 * integers. The number of cycles between the "first" deadline in the
1142 * scheduler queue and the "last" deadline must be less than 2^31 (i.e
1143 * a signed non-negative quantity). Failure to adhere to this rule
1144 * may result in scheduled threads running in an incorrect deadline
1145 * order.
1146 *
1147 * @note Even if a provided timestamp is in the past, the kernel will
1148 * still schedule threads with deadlines in order from the earliest to
1149 * the latest.
1150 *
1151 * @note Despite the API naming, the scheduler makes no guarantees
1152 * the thread WILL be scheduled within that deadline, nor does it take
1153 * extra metadata (like e.g. the "runtime" and "period" parameters in
1154 * Linux sched_setattr()) that allows the kernel to validate the
1155 * scheduling for achievability. Such features could be implemented
1156 * above this call, which is simply input to the priority selection
1157 * logic.
1158 *
1159 * @kconfig_dep{CONFIG_SCHED_DEADLINE}
1160 *
1161 * @param thread A thread on which to set the deadline
1162 * @param deadline A timestamp, in cycle units
1163 */
1164 __syscall void k_thread_absolute_deadline_set(k_tid_t thread, int deadline);
1165 #endif
1166
1167 /**
1168 * @brief Invoke the scheduler
1169 *
1170 * This routine invokes the scheduler to force a schedule point on the current
1171 * CPU. If invoked from within a thread, the scheduler will be invoked
1172 * immediately (provided interrupts were not locked when invoked). If invoked
1173 * from within an ISR, the scheduler will be invoked upon exiting the ISR.
1174 *
1175 * Invoking the scheduler allows the kernel to make an immediate determination
1176 * as to what the next thread to execute should be. Unlike yielding, this
1177 * routine is not guaranteed to switch to a thread of equal or higher priority
1178 * if any are available. For example, if the current thread is cooperative and
1179 * there is a still higher priority cooperative thread that is ready, then
1180 * yielding will switch to that higher priority thread whereas this routine
1181 * will not.
1182 *
1183 * Most applications will never use this routine.
1184 */
1185 __syscall void k_reschedule(void);
1186
1187 #ifdef CONFIG_SCHED_CPU_MASK
1188 /**
1189 * @brief Sets all CPU enable masks to zero
1190 *
1191 * After this returns, the thread will no longer be schedulable on any
1192 * CPUs. The thread must not be currently runnable.
1193 *
1194 * @note You should enable @kconfig{CONFIG_SCHED_CPU_MASK} in your project
1195 * configuration.
1196 *
1197 * @param thread Thread to operate upon
1198 * @return Zero on success, otherwise error code
1199 */
1200 int k_thread_cpu_mask_clear(k_tid_t thread);
1201
1202 /**
1203 * @brief Sets all CPU enable masks to one
1204 *
1205 * After this returns, the thread will be schedulable on any CPU. The
1206 * thread must not be currently runnable.
1207 *
1208 * @note You should enable @kconfig{CONFIG_SCHED_CPU_MASK} in your project
1209 * configuration.
1210 *
1211 * @param thread Thread to operate upon
1212 * @return Zero on success, otherwise error code
1213 */
1214 int k_thread_cpu_mask_enable_all(k_tid_t thread);
1215
1216 /**
1217 * @brief Enable thread to run on specified CPU
1218 *
1219 * The thread must not be currently runnable.
1220 *
1221 * @note You should enable @kconfig{CONFIG_SCHED_CPU_MASK} in your project
1222 * configuration.
1223 *
1224 * @param thread Thread to operate upon
1225 * @param cpu CPU index
1226 * @return Zero on success, otherwise error code
1227 */
1228 int k_thread_cpu_mask_enable(k_tid_t thread, int cpu);
1229
1230 /**
1231 * @brief Prevent thread to run on specified CPU
1232 *
1233 * The thread must not be currently runnable.
1234 *
1235 * @note You should enable @kconfig{CONFIG_SCHED_CPU_MASK} in your project
1236 * configuration.
1237 *
1238 * @param thread Thread to operate upon
1239 * @param cpu CPU index
1240 * @return Zero on success, otherwise error code
1241 */
1242 int k_thread_cpu_mask_disable(k_tid_t thread, int cpu);
1243
1244 /**
1245 * @brief Pin a thread to a CPU
1246 *
1247 * Pin a thread to a CPU by first clearing the cpu mask and then enabling the
1248 * thread on the selected CPU.
1249 *
1250 * @param thread Thread to operate upon
1251 * @param cpu CPU index
1252 * @return Zero on success, otherwise error code
1253 */
1254 int k_thread_cpu_pin(k_tid_t thread, int cpu);
1255 #endif
1256
1257 /**
1258 * @brief Suspend a thread.
1259 *
1260 * This routine prevents the kernel scheduler from making @a thread
1261 * the current thread. All other internal operations on @a thread are
1262 * still performed; for example, kernel objects it is waiting on are
1263 * still handed to it. Thread suspension does not impact any timeout
1264 * upon which the thread may be waiting (such as a timeout from a call
1265 * to k_sem_take() or k_sleep()). Thus if the timeout expires while the
1266 * thread is suspended, it is still suspended until k_thread_resume()
1267 * is called.
1268 *
1269 * When the target thread is active on another CPU, the caller will block until
1270 * the target thread is halted (suspended or aborted). But if the caller is in
1271 * an interrupt context, it will spin waiting for that target thread active on
1272 * another CPU to halt.
1273 *
1274 * If @a thread is already suspended, the routine has no effect.
1275 *
1276 * @param thread ID of thread to suspend.
1277 */
1278 __syscall void k_thread_suspend(k_tid_t thread);
1279
1280 /**
1281 * @brief Resume a suspended thread.
1282 *
1283 * This routine reverses the thread suspension from k_thread_suspend()
1284 * and allows the kernel scheduler to make @a thread the current thread
1285 * when it is next eligible for that role.
1286 *
1287 * If @a thread is not currently suspended, the routine has no effect.
1288 *
1289 * @param thread ID of thread to resume.
1290 */
1291 __syscall void k_thread_resume(k_tid_t thread);
1292
1293 /**
1294 * @brief Start an inactive thread
1295 *
1296 * If a thread was created with K_FOREVER in the delay parameter, it will
1297 * not be added to the scheduling queue until this function is called
1298 * on it.
1299 *
1300 * @note This is a legacy API for compatibility. Modern Zephyr
1301 * threads are initialized in the "sleeping" state and do not need
1302 * special handling for "start".
1303 *
1304 * @param thread thread to start
1305 */
k_thread_start(k_tid_t thread)1306 static inline void k_thread_start(k_tid_t thread)
1307 {
1308 k_wakeup(thread);
1309 }
1310
1311 /**
1312 * @brief Set time-slicing period and scope.
1313 *
1314 * This routine specifies how the scheduler will perform time slicing of
1315 * preemptible threads.
1316 *
1317 * To enable time slicing, @a slice must be non-zero. The scheduler
1318 * ensures that no thread runs for more than the specified time limit
1319 * before other threads of that priority are given a chance to execute.
1320 * Any thread whose priority is higher than @a prio is exempted, and may
1321 * execute as long as desired without being preempted due to time slicing.
1322 *
1323 * Time slicing only limits the maximum amount of time a thread may continuously
1324 * execute. Once the scheduler selects a thread for execution, there is no
1325 * minimum guaranteed time the thread will execute before threads of greater or
1326 * equal priority are scheduled.
1327 *
1328 * When the current thread is the only one of that priority eligible
1329 * for execution, this routine has no effect; the thread is immediately
1330 * rescheduled after the slice period expires.
1331 *
1332 * To disable timeslicing, set both @a slice and @a prio to zero.
1333 *
1334 * @param slice Maximum time slice length (in milliseconds).
1335 * @param prio Highest thread priority level eligible for time slicing.
1336 */
1337 void k_sched_time_slice_set(int32_t slice, int prio);
1338
1339 /**
1340 * @brief Set thread time slice
1341 *
1342 * As for k_sched_time_slice_set, but (when
1343 * CONFIG_TIMESLICE_PER_THREAD=y) sets the timeslice for a specific
1344 * thread. When non-zero, this timeslice will take precedence over
1345 * the global value.
1346 *
1347 * When such a thread's timeslice expires, the configured callback
1348 * will be called before the thread is removed/re-added to the run
1349 * queue. This callback will occur in interrupt context, and the
1350 * specified thread is guaranteed to have been preempted by the
1351 * currently-executing ISR. Such a callback is free to, for example,
1352 * modify the thread priority or slice time for future execution,
1353 * suspend the thread, etc...
1354 *
1355 * @note Unlike the older API, the time slice parameter here is
1356 * specified in ticks, not milliseconds. Ticks have always been the
1357 * internal unit, and not all platforms have integer conversions
1358 * between the two.
1359 *
1360 * @note Threads with a non-zero slice time set will be timesliced
1361 * always, even if they are higher priority than the maximum timeslice
1362 * priority set via k_sched_time_slice_set().
1363 *
1364 * @note The callback notification for slice expiration happens, as it
1365 * must, while the thread is still "current", and thus it happens
1366 * before any registered timeouts at this tick. This has the somewhat
1367 * confusing side effect that the tick time (c.f. k_uptime_get()) does
1368 * not yet reflect the expired ticks. Applications wishing to make
1369 * fine-grained timing decisions within this callback should use the
1370 * cycle API, or derived facilities like k_thread_runtime_stats_get().
1371 *
1372 * @param th A valid, initialized thread
1373 * @param slice_ticks Maximum timeslice, in ticks
1374 * @param expired Callback function called on slice expiration
1375 * @param data Parameter for the expiration handler
1376 */
1377 void k_thread_time_slice_set(struct k_thread *th, int32_t slice_ticks,
1378 k_thread_timeslice_fn_t expired, void *data);
1379
1380 /** @} */
1381
1382 /**
1383 * @addtogroup isr_apis
1384 * @{
1385 */
1386
1387 /**
1388 * @brief Determine if code is running at interrupt level.
1389 *
1390 * This routine allows the caller to customize its actions, depending on
1391 * whether it is a thread or an ISR.
1392 *
1393 * @funcprops \isr_ok
1394 *
1395 * @return false if invoked by a thread.
1396 * @return true if invoked by an ISR.
1397 */
1398 bool k_is_in_isr(void);
1399
1400 /**
1401 * @brief Determine if code is running in a preemptible thread.
1402 *
1403 * This routine allows the caller to customize its actions, depending on
1404 * whether it can be preempted by another thread. The routine returns a 'true'
1405 * value if all of the following conditions are met:
1406 *
1407 * - The code is running in a thread, not at ISR.
1408 * - The thread's priority is in the preemptible range.
1409 * - The thread has not locked the scheduler.
1410 *
1411 * @funcprops \isr_ok
1412 *
1413 * @return 0 if invoked by an ISR or by a cooperative thread.
1414 * @return Non-zero if invoked by a preemptible thread.
1415 */
1416 __syscall int k_is_preempt_thread(void);
1417
1418 /**
1419 * @}
1420 */
1421
1422 /**
1423 * @addtogroup thread_apis
1424 * @{
1425 */
1426
1427 /**
1428 * @brief Lock the scheduler.
1429 *
1430 * This routine prevents the current thread from being preempted by another
1431 * thread by instructing the scheduler to treat it as a cooperative thread.
1432 * If the thread subsequently performs an operation that makes it unready,
1433 * it will be context switched out in the normal manner. When the thread
1434 * again becomes the current thread, its non-preemptible status is maintained.
1435 *
1436 * This routine can be called recursively.
1437 *
1438 * Owing to clever implementation details, scheduler locks are
1439 * extremely fast for non-userspace threads (just one byte
1440 * inc/decrement in the thread struct).
1441 *
1442 * @note This works by elevating the thread priority temporarily to a
1443 * cooperative priority, allowing cheap synchronization vs. other
1444 * preemptible or cooperative threads running on the current CPU. It
1445 * does not prevent preemption or asynchrony of other types. It does
1446 * not prevent threads from running on other CPUs when CONFIG_SMP=y.
1447 * It does not prevent interrupts from happening, nor does it prevent
1448 * threads with MetaIRQ priorities from preempting the current thread.
1449 * In general this is a historical API not well-suited to modern
1450 * applications, use with care.
1451 */
1452 void k_sched_lock(void);
1453
1454 /**
1455 * @brief Unlock the scheduler.
1456 *
1457 * This routine reverses the effect of a previous call to k_sched_lock().
1458 * A thread must call the routine once for each time it called k_sched_lock()
1459 * before the thread becomes preemptible.
1460 */
1461 void k_sched_unlock(void);
1462
1463 /**
1464 * @brief Set current thread's custom data.
1465 *
1466 * This routine sets the custom data for the current thread to @ value.
1467 *
1468 * Custom data is not used by the kernel itself, and is freely available
1469 * for a thread to use as it sees fit. It can be used as a framework
1470 * upon which to build thread-local storage.
1471 *
1472 * @param value New custom data value.
1473 *
1474 */
1475 __syscall void k_thread_custom_data_set(void *value);
1476
1477 /**
1478 * @brief Get current thread's custom data.
1479 *
1480 * This routine returns the custom data for the current thread.
1481 *
1482 * @return Current custom data value.
1483 */
1484 __syscall void *k_thread_custom_data_get(void);
1485
1486 /**
1487 * @brief Set current thread name
1488 *
1489 * Set the name of the thread to be used when @kconfig{CONFIG_THREAD_MONITOR}
1490 * is enabled for tracing and debugging.
1491 *
1492 * @param thread Thread to set name, or NULL to set the current thread
1493 * @param str Name string
1494 * @retval 0 on success
1495 * @retval -EFAULT Memory access error with supplied string
1496 * @retval -ENOSYS Thread name configuration option not enabled
1497 * @retval -EINVAL Thread name too long
1498 */
1499 __syscall int k_thread_name_set(k_tid_t thread, const char *str);
1500
1501 /**
1502 * @brief Get thread name
1503 *
1504 * Get the name of a thread
1505 *
1506 * @param thread Thread ID
1507 * @retval Thread name, or NULL if configuration not enabled
1508 */
1509 const char *k_thread_name_get(k_tid_t thread);
1510
1511 /**
1512 * @brief Copy the thread name into a supplied buffer
1513 *
1514 * @param thread Thread to obtain name information
1515 * @param buf Destination buffer
1516 * @param size Destination buffer size
1517 * @retval -ENOSPC Destination buffer too small
1518 * @retval -EFAULT Memory access error
1519 * @retval -ENOSYS Thread name feature not enabled
1520 * @retval 0 Success
1521 */
1522 __syscall int k_thread_name_copy(k_tid_t thread, char *buf,
1523 size_t size);
1524
1525 /**
1526 * @brief Get thread state string
1527 *
1528 * This routine generates a human friendly string containing the thread's
1529 * state, and copies as much of it as possible into @a buf.
1530 *
1531 * @param thread_id Thread ID
1532 * @param buf Buffer into which to copy state strings
1533 * @param buf_size Size of the buffer
1534 *
1535 * @retval Pointer to @a buf if data was copied, else a pointer to "".
1536 */
1537 const char *k_thread_state_str(k_tid_t thread_id, char *buf, size_t buf_size);
1538
1539 /**
1540 * @}
1541 */
1542
1543 /**
1544 * @addtogroup clock_apis
1545 * @{
1546 */
1547
1548 /**
1549 * @brief Generate null timeout delay.
1550 *
1551 * This macro generates a timeout delay that instructs a kernel API
1552 * not to wait if the requested operation cannot be performed immediately.
1553 *
1554 * @return Timeout delay value.
1555 */
1556 #define K_NO_WAIT Z_TIMEOUT_NO_WAIT
1557
1558 /**
1559 * @brief Generate timeout delay from nanoseconds.
1560 *
1561 * This macro generates a timeout delay that instructs a kernel API to
1562 * wait up to @a t nanoseconds to perform the requested operation.
1563 * Note that timer precision is limited to the tick rate, not the
1564 * requested value.
1565 *
1566 * @param t Duration in nanoseconds.
1567 *
1568 * @return Timeout delay value.
1569 */
1570 #define K_NSEC(t) Z_TIMEOUT_NS(t)
1571
1572 /**
1573 * @brief Generate timeout delay from microseconds.
1574 *
1575 * This macro generates a timeout delay that instructs a kernel API
1576 * to wait up to @a t microseconds to perform the requested operation.
1577 * Note that timer precision is limited to the tick rate, not the
1578 * requested value.
1579 *
1580 * @param t Duration in microseconds.
1581 *
1582 * @return Timeout delay value.
1583 */
1584 #define K_USEC(t) Z_TIMEOUT_US(t)
1585
1586 /**
1587 * @brief Generate timeout delay from cycles.
1588 *
1589 * This macro generates a timeout delay that instructs a kernel API
1590 * to wait up to @a t cycles to perform the requested operation.
1591 *
1592 * @param t Duration in cycles.
1593 *
1594 * @return Timeout delay value.
1595 */
1596 #define K_CYC(t) Z_TIMEOUT_CYC(t)
1597
1598 /**
1599 * @brief Generate timeout delay from system ticks.
1600 *
1601 * This macro generates a timeout delay that instructs a kernel API
1602 * to wait up to @a t ticks to perform the requested operation.
1603 *
1604 * @param t Duration in system ticks.
1605 *
1606 * @return Timeout delay value.
1607 */
1608 #define K_TICKS(t) Z_TIMEOUT_TICKS(t)
1609
1610 /**
1611 * @brief Generate timeout delay from milliseconds.
1612 *
1613 * This macro generates a timeout delay that instructs a kernel API
1614 * to wait up to @a ms milliseconds to perform the requested operation.
1615 *
1616 * @param ms Duration in milliseconds.
1617 *
1618 * @return Timeout delay value.
1619 */
1620 #define K_MSEC(ms) Z_TIMEOUT_MS(ms)
1621
1622 /**
1623 * @brief Generate timeout delay from seconds.
1624 *
1625 * This macro generates a timeout delay that instructs a kernel API
1626 * to wait up to @a s seconds to perform the requested operation.
1627 *
1628 * @param s Duration in seconds.
1629 *
1630 * @return Timeout delay value.
1631 */
1632 #define K_SECONDS(s) K_MSEC((s) * MSEC_PER_SEC)
1633
1634 /**
1635 * @brief Generate timeout delay from minutes.
1636
1637 * This macro generates a timeout delay that instructs a kernel API
1638 * to wait up to @a m minutes to perform the requested operation.
1639 *
1640 * @param m Duration in minutes.
1641 *
1642 * @return Timeout delay value.
1643 */
1644 #define K_MINUTES(m) K_SECONDS((m) * 60)
1645
1646 /**
1647 * @brief Generate timeout delay from hours.
1648 *
1649 * This macro generates a timeout delay that instructs a kernel API
1650 * to wait up to @a h hours to perform the requested operation.
1651 *
1652 * @param h Duration in hours.
1653 *
1654 * @return Timeout delay value.
1655 */
1656 #define K_HOURS(h) K_MINUTES((h) * 60)
1657
1658 /**
1659 * @brief Generate infinite timeout delay.
1660 *
1661 * This macro generates a timeout delay that instructs a kernel API
1662 * to wait as long as necessary to perform the requested operation.
1663 *
1664 * @return Timeout delay value.
1665 */
1666 #define K_FOREVER Z_FOREVER
1667
1668 #ifdef CONFIG_TIMEOUT_64BIT
1669
1670 /**
1671 * @brief Generates an absolute/uptime timeout value from system ticks
1672 *
1673 * This macro generates a timeout delay that represents an expiration
1674 * at the absolute uptime value specified, in system ticks. That is, the
1675 * timeout will expire immediately after the system uptime reaches the
1676 * specified tick count. Value is clamped to the range 0 to INT64_MAX-1.
1677 *
1678 * @param t Tick uptime value
1679 * @return Timeout delay value
1680 */
1681 #define K_TIMEOUT_ABS_TICKS(t) \
1682 Z_TIMEOUT_TICKS(Z_TICK_ABS((k_ticks_t)CLAMP(t, 0, (INT64_MAX - 1))))
1683
1684 /**
1685 * @brief Generates an absolute/uptime timeout value from seconds
1686 *
1687 * This macro generates a timeout delay that represents an expiration
1688 * at the absolute uptime value specified, in seconds. That is, the
1689 * timeout will expire immediately after the system uptime reaches the
1690 * specified tick count.
1691 *
1692 * @param t Second uptime value
1693 * @return Timeout delay value
1694 */
1695 #define K_TIMEOUT_ABS_SEC(t) K_TIMEOUT_ABS_TICKS(k_sec_to_ticks_ceil64(t))
1696
1697 /**
1698 * @brief Generates an absolute/uptime timeout value from milliseconds
1699 *
1700 * This macro generates a timeout delay that represents an expiration
1701 * at the absolute uptime value specified, in milliseconds. That is,
1702 * the timeout will expire immediately after the system uptime reaches
1703 * the specified tick count.
1704 *
1705 * @param t Millisecond uptime value
1706 * @return Timeout delay value
1707 */
1708 #define K_TIMEOUT_ABS_MS(t) K_TIMEOUT_ABS_TICKS(k_ms_to_ticks_ceil64(t))
1709
1710 /**
1711 * @brief Generates an absolute/uptime timeout value from microseconds
1712 *
1713 * This macro generates a timeout delay that represents an expiration
1714 * at the absolute uptime value specified, in microseconds. That is,
1715 * the timeout will expire immediately after the system uptime reaches
1716 * the specified time. Note that timer precision is limited by the
1717 * system tick rate and not the requested timeout value.
1718 *
1719 * @param t Microsecond uptime value
1720 * @return Timeout delay value
1721 */
1722 #define K_TIMEOUT_ABS_US(t) K_TIMEOUT_ABS_TICKS(k_us_to_ticks_ceil64(t))
1723
1724 /**
1725 * @brief Generates an absolute/uptime timeout value from nanoseconds
1726 *
1727 * This macro generates a timeout delay that represents an expiration
1728 * at the absolute uptime value specified, in nanoseconds. That is,
1729 * the timeout will expire immediately after the system uptime reaches
1730 * the specified time. Note that timer precision is limited by the
1731 * system tick rate and not the requested timeout value.
1732 *
1733 * @param t Nanosecond uptime value
1734 * @return Timeout delay value
1735 */
1736 #define K_TIMEOUT_ABS_NS(t) K_TIMEOUT_ABS_TICKS(k_ns_to_ticks_ceil64(t))
1737
1738 /**
1739 * @brief Generates an absolute/uptime timeout value from system cycles
1740 *
1741 * This macro generates a timeout delay that represents an expiration
1742 * at the absolute uptime value specified, in cycles. That is, the
1743 * timeout will expire immediately after the system uptime reaches the
1744 * specified time. Note that timer precision is limited by the system
1745 * tick rate and not the requested timeout value.
1746 *
1747 * @param t Cycle uptime value
1748 * @return Timeout delay value
1749 */
1750 #define K_TIMEOUT_ABS_CYC(t) K_TIMEOUT_ABS_TICKS(k_cyc_to_ticks_ceil64(t))
1751
1752 #endif
1753
1754 /**
1755 * @}
1756 */
1757
1758 /**
1759 * @brief Kernel timer structure
1760 *
1761 * This structure is used to represent a kernel timer.
1762 * All the members are internal and should not be accessed directly.
1763 */
1764 struct k_timer {
1765 /**
1766 * @cond INTERNAL_HIDDEN
1767 */
1768
1769 /*
1770 * _timeout structure must be first here if we want to use
1771 * dynamic timer allocation. timeout.node is used in the double-linked
1772 * list of free timers
1773 */
1774 struct _timeout timeout;
1775
1776 /* wait queue for the (single) thread waiting on this timer */
1777 _wait_q_t wait_q;
1778
1779 /* runs in ISR context */
1780 void (*expiry_fn)(struct k_timer *timer);
1781
1782 /* runs in the context of the thread that calls k_timer_stop() */
1783 void (*stop_fn)(struct k_timer *timer);
1784
1785 /* timer period */
1786 k_timeout_t period;
1787
1788 /* timer status */
1789 uint32_t status;
1790
1791 /* user-specific data, also used to support legacy features */
1792 void *user_data;
1793
1794 SYS_PORT_TRACING_TRACKING_FIELD(k_timer)
1795
1796 #ifdef CONFIG_OBJ_CORE_TIMER
1797 struct k_obj_core obj_core;
1798 #endif
1799 /**
1800 * INTERNAL_HIDDEN @endcond
1801 */
1802 };
1803
1804 /**
1805 * @cond INTERNAL_HIDDEN
1806 */
1807 #define Z_TIMER_INITIALIZER(obj, expiry, stop) \
1808 { \
1809 .timeout = { \
1810 .node = {},\
1811 .fn = z_timer_expiration_handler, \
1812 .dticks = 0, \
1813 }, \
1814 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
1815 .expiry_fn = expiry, \
1816 .stop_fn = stop, \
1817 .period = {}, \
1818 .status = 0, \
1819 .user_data = 0, \
1820 }
1821
1822 /**
1823 * INTERNAL_HIDDEN @endcond
1824 */
1825
1826 /**
1827 * @defgroup timer_apis Timer APIs
1828 * @ingroup kernel_apis
1829 * @{
1830 */
1831
1832 /**
1833 * @typedef k_timer_expiry_t
1834 * @brief Timer expiry function type.
1835 *
1836 * A timer's expiry function is executed by the system clock interrupt handler
1837 * each time the timer expires. The expiry function is optional, and is only
1838 * invoked if the timer has been initialized with one.
1839 *
1840 * @param timer Address of timer.
1841 */
1842 typedef void (*k_timer_expiry_t)(struct k_timer *timer);
1843
1844 /**
1845 * @typedef k_timer_stop_t
1846 * @brief Timer stop function type.
1847 *
1848 * A timer's stop function is executed if the timer is stopped prematurely.
1849 * The function runs in the context of call that stops the timer. As
1850 * k_timer_stop() can be invoked from an ISR, the stop function must be
1851 * callable from interrupt context (isr-ok).
1852 *
1853 * The stop function is optional, and is only invoked if the timer has been
1854 * initialized with one.
1855 *
1856 * @param timer Address of timer.
1857 */
1858 typedef void (*k_timer_stop_t)(struct k_timer *timer);
1859
1860 /**
1861 * @brief Statically define and initialize a timer.
1862 *
1863 * The timer can be accessed outside the module where it is defined using:
1864 *
1865 * @code extern struct k_timer <name>; @endcode
1866 *
1867 * @param name Name of the timer variable.
1868 * @param expiry_fn Function to invoke each time the timer expires.
1869 * @param stop_fn Function to invoke if the timer is stopped while running.
1870 */
1871 #define K_TIMER_DEFINE(name, expiry_fn, stop_fn) \
1872 STRUCT_SECTION_ITERABLE(k_timer, name) = \
1873 Z_TIMER_INITIALIZER(name, expiry_fn, stop_fn)
1874
1875 /**
1876 * @brief Initialize a timer.
1877 *
1878 * This routine initializes a timer, prior to its first use.
1879 *
1880 * @param timer Address of timer.
1881 * @param expiry_fn Function to invoke each time the timer expires.
1882 * @param stop_fn Function to invoke if the timer is stopped while running.
1883 */
1884 void k_timer_init(struct k_timer *timer,
1885 k_timer_expiry_t expiry_fn,
1886 k_timer_stop_t stop_fn);
1887
1888 /**
1889 * @brief Start a timer.
1890 *
1891 * This routine starts a timer, and resets its status to zero. The timer
1892 * begins counting down using the specified duration and period values.
1893 *
1894 * Attempting to start a timer that is already running is permitted.
1895 * The timer's status is reset to zero and the timer begins counting down
1896 * using the new duration and period values.
1897 *
1898 * This routine neither updates nor has any other effect on the specified
1899 * timer if @a duration is K_FOREVER.
1900 *
1901 * @param timer Address of timer.
1902 * @param duration Initial timer duration.
1903 * @param period Timer period.
1904 */
1905 __syscall void k_timer_start(struct k_timer *timer,
1906 k_timeout_t duration, k_timeout_t period);
1907
1908 /**
1909 * @brief Stop a timer.
1910 *
1911 * This routine stops a running timer prematurely. The timer's stop function,
1912 * if one exists, is invoked by the caller.
1913 *
1914 * Attempting to stop a timer that is not running is permitted, but has no
1915 * effect on the timer.
1916 *
1917 * @note The stop handler has to be callable from ISRs if @a k_timer_stop is to
1918 * be called from ISRs.
1919 *
1920 * @funcprops \isr_ok
1921 *
1922 * @param timer Address of timer.
1923 */
1924 __syscall void k_timer_stop(struct k_timer *timer);
1925
1926 /**
1927 * @brief Read timer status.
1928 *
1929 * This routine reads the timer's status, which indicates the number of times
1930 * it has expired since its status was last read.
1931 *
1932 * Calling this routine resets the timer's status to zero.
1933 *
1934 * @param timer Address of timer.
1935 *
1936 * @return Timer status.
1937 */
1938 __syscall uint32_t k_timer_status_get(struct k_timer *timer);
1939
1940 /**
1941 * @brief Synchronize thread to timer expiration.
1942 *
1943 * This routine blocks the calling thread until the timer's status is non-zero
1944 * (indicating that it has expired at least once since it was last examined)
1945 * or the timer is stopped. If the timer status is already non-zero,
1946 * or the timer is already stopped, the caller continues without waiting.
1947 *
1948 * Calling this routine resets the timer's status to zero.
1949 *
1950 * This routine must not be used by interrupt handlers, since they are not
1951 * allowed to block.
1952 *
1953 * @param timer Address of timer.
1954 *
1955 * @return Timer status.
1956 */
1957 __syscall uint32_t k_timer_status_sync(struct k_timer *timer);
1958
1959 #ifdef CONFIG_SYS_CLOCK_EXISTS
1960
1961 /**
1962 * @brief Get next expiration time of a timer, in system ticks
1963 *
1964 * This routine returns the future system uptime reached at the next
1965 * time of expiration of the timer, in units of system ticks. If the
1966 * timer is not running, current system time is returned.
1967 *
1968 * @param timer The timer object
1969 * @return Uptime of expiration, in ticks
1970 */
1971 __syscall k_ticks_t k_timer_expires_ticks(const struct k_timer *timer);
1972
z_impl_k_timer_expires_ticks(const struct k_timer * timer)1973 static inline k_ticks_t z_impl_k_timer_expires_ticks(
1974 const struct k_timer *timer)
1975 {
1976 return z_timeout_expires(&timer->timeout);
1977 }
1978
1979 /**
1980 * @brief Get time remaining before a timer next expires, in system ticks
1981 *
1982 * This routine computes the time remaining before a running timer
1983 * next expires, in units of system ticks. If the timer is not
1984 * running, it returns zero.
1985 *
1986 * @param timer The timer object
1987 * @return Remaining time until expiration, in ticks
1988 */
1989 __syscall k_ticks_t k_timer_remaining_ticks(const struct k_timer *timer);
1990
z_impl_k_timer_remaining_ticks(const struct k_timer * timer)1991 static inline k_ticks_t z_impl_k_timer_remaining_ticks(
1992 const struct k_timer *timer)
1993 {
1994 return z_timeout_remaining(&timer->timeout);
1995 }
1996
1997 /**
1998 * @brief Get time remaining before a timer next expires.
1999 *
2000 * This routine computes the (approximate) time remaining before a running
2001 * timer next expires. If the timer is not running, it returns zero.
2002 *
2003 * @param timer Address of timer.
2004 *
2005 * @return Remaining time (in milliseconds).
2006 */
k_timer_remaining_get(struct k_timer * timer)2007 static inline uint32_t k_timer_remaining_get(struct k_timer *timer)
2008 {
2009 return k_ticks_to_ms_floor32(k_timer_remaining_ticks(timer));
2010 }
2011
2012 #endif /* CONFIG_SYS_CLOCK_EXISTS */
2013
2014 /**
2015 * @brief Associate user-specific data with a timer.
2016 *
2017 * This routine records the @a user_data with the @a timer, to be retrieved
2018 * later.
2019 *
2020 * It can be used e.g. in a timer handler shared across multiple subsystems to
2021 * retrieve data specific to the subsystem this timer is associated with.
2022 *
2023 * @param timer Address of timer.
2024 * @param user_data User data to associate with the timer.
2025 */
2026 __syscall void k_timer_user_data_set(struct k_timer *timer, void *user_data);
2027
2028 /**
2029 * @internal
2030 */
z_impl_k_timer_user_data_set(struct k_timer * timer,void * user_data)2031 static inline void z_impl_k_timer_user_data_set(struct k_timer *timer,
2032 void *user_data)
2033 {
2034 timer->user_data = user_data;
2035 }
2036
2037 /**
2038 * @brief Retrieve the user-specific data from a timer.
2039 *
2040 * @param timer Address of timer.
2041 *
2042 * @return The user data.
2043 */
2044 __syscall void *k_timer_user_data_get(const struct k_timer *timer);
2045
z_impl_k_timer_user_data_get(const struct k_timer * timer)2046 static inline void *z_impl_k_timer_user_data_get(const struct k_timer *timer)
2047 {
2048 return timer->user_data;
2049 }
2050
2051 /** @} */
2052
2053 /**
2054 * @addtogroup clock_apis
2055 * @ingroup kernel_apis
2056 * @{
2057 */
2058
2059 /**
2060 * @brief Get system uptime, in system ticks.
2061 *
2062 * This routine returns the elapsed time since the system booted, in
2063 * ticks (c.f. @kconfig{CONFIG_SYS_CLOCK_TICKS_PER_SEC}), which is the
2064 * fundamental unit of resolution of kernel timekeeping.
2065 *
2066 * @return Current uptime in ticks.
2067 */
2068 __syscall int64_t k_uptime_ticks(void);
2069
2070 /**
2071 * @brief Get system uptime.
2072 *
2073 * This routine returns the elapsed time since the system booted,
2074 * in milliseconds.
2075 *
2076 * @note
2077 * While this function returns time in milliseconds, it does
2078 * not mean it has millisecond resolution. The actual resolution depends on
2079 * @kconfig{CONFIG_SYS_CLOCK_TICKS_PER_SEC} config option.
2080 *
2081 * @return Current uptime in milliseconds.
2082 */
k_uptime_get(void)2083 static inline int64_t k_uptime_get(void)
2084 {
2085 return k_ticks_to_ms_floor64(k_uptime_ticks());
2086 }
2087
2088 /**
2089 * @brief Get system uptime (32-bit version).
2090 *
2091 * This routine returns the lower 32 bits of the system uptime in
2092 * milliseconds.
2093 *
2094 * Because correct conversion requires full precision of the system
2095 * clock there is no benefit to using this over k_uptime_get() unless
2096 * you know the application will never run long enough for the system
2097 * clock to approach 2^32 ticks. Calls to this function may involve
2098 * interrupt blocking and 64-bit math.
2099 *
2100 * @note
2101 * While this function returns time in milliseconds, it does
2102 * not mean it has millisecond resolution. The actual resolution depends on
2103 * @kconfig{CONFIG_SYS_CLOCK_TICKS_PER_SEC} config option
2104 *
2105 * @return The low 32 bits of the current uptime, in milliseconds.
2106 */
k_uptime_get_32(void)2107 static inline uint32_t k_uptime_get_32(void)
2108 {
2109 return (uint32_t)k_uptime_get();
2110 }
2111
2112 /**
2113 * @brief Get system uptime in seconds.
2114 *
2115 * This routine returns the elapsed time since the system booted,
2116 * in seconds.
2117 *
2118 * @return Current uptime in seconds.
2119 */
k_uptime_seconds(void)2120 static inline uint32_t k_uptime_seconds(void)
2121 {
2122 return k_ticks_to_sec_floor32(k_uptime_ticks());
2123 }
2124
2125 /**
2126 * @brief Get elapsed time.
2127 *
2128 * This routine computes the elapsed time between the current system uptime
2129 * and an earlier reference time, in milliseconds.
2130 *
2131 * @param reftime Pointer to a reference time, which is updated to the current
2132 * uptime upon return.
2133 *
2134 * @return Elapsed time.
2135 */
k_uptime_delta(int64_t * reftime)2136 static inline int64_t k_uptime_delta(int64_t *reftime)
2137 {
2138 int64_t uptime, delta;
2139
2140 uptime = k_uptime_get();
2141 delta = uptime - *reftime;
2142 *reftime = uptime;
2143
2144 return delta;
2145 }
2146
2147 /**
2148 * @brief Read the hardware clock.
2149 *
2150 * This routine returns the current time, as measured by the system's hardware
2151 * clock.
2152 *
2153 * @return Current hardware clock up-counter (in cycles).
2154 */
k_cycle_get_32(void)2155 static inline uint32_t k_cycle_get_32(void)
2156 {
2157 return arch_k_cycle_get_32();
2158 }
2159
2160 /**
2161 * @brief Read the 64-bit hardware clock.
2162 *
2163 * This routine returns the current time in 64-bits, as measured by the
2164 * system's hardware clock, if available.
2165 *
2166 * @see CONFIG_TIMER_HAS_64BIT_CYCLE_COUNTER
2167 *
2168 * @return Current hardware clock up-counter (in cycles).
2169 */
k_cycle_get_64(void)2170 static inline uint64_t k_cycle_get_64(void)
2171 {
2172 if (!IS_ENABLED(CONFIG_TIMER_HAS_64BIT_CYCLE_COUNTER)) {
2173 __ASSERT(0, "64-bit cycle counter not enabled on this platform. "
2174 "See CONFIG_TIMER_HAS_64BIT_CYCLE_COUNTER");
2175 return 0;
2176 }
2177
2178 return arch_k_cycle_get_64();
2179 }
2180
2181 /**
2182 * @}
2183 */
2184
2185 struct k_queue {
2186 sys_sflist_t data_q;
2187 struct k_spinlock lock;
2188 _wait_q_t wait_q;
2189
2190 Z_DECL_POLL_EVENT
2191
2192 SYS_PORT_TRACING_TRACKING_FIELD(k_queue)
2193 };
2194
2195 /**
2196 * @cond INTERNAL_HIDDEN
2197 */
2198
2199 #define Z_QUEUE_INITIALIZER(obj) \
2200 { \
2201 .data_q = SYS_SFLIST_STATIC_INIT(&obj.data_q), \
2202 .lock = { }, \
2203 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
2204 Z_POLL_EVENT_OBJ_INIT(obj) \
2205 }
2206
2207 /**
2208 * INTERNAL_HIDDEN @endcond
2209 */
2210
2211 /**
2212 * @defgroup queue_apis Queue APIs
2213 * @ingroup kernel_apis
2214 * @{
2215 */
2216
2217 /**
2218 * @brief Initialize a queue.
2219 *
2220 * This routine initializes a queue object, prior to its first use.
2221 *
2222 * @param queue Address of the queue.
2223 */
2224 __syscall void k_queue_init(struct k_queue *queue);
2225
2226 /**
2227 * @brief Cancel waiting on a queue.
2228 *
2229 * This routine causes first thread pending on @a queue, if any, to
2230 * return from k_queue_get() call with NULL value (as if timeout expired).
2231 * If the queue is being waited on by k_poll(), it will return with
2232 * -EINTR and K_POLL_STATE_CANCELLED state (and per above, subsequent
2233 * k_queue_get() will return NULL).
2234 *
2235 * @funcprops \isr_ok
2236 *
2237 * @param queue Address of the queue.
2238 */
2239 __syscall void k_queue_cancel_wait(struct k_queue *queue);
2240
2241 /**
2242 * @brief Append an element to the end of a queue.
2243 *
2244 * This routine appends a data item to @a queue. A queue data item must be
2245 * aligned on a word boundary, and the first word of the item is reserved
2246 * for the kernel's use.
2247 *
2248 * @funcprops \isr_ok
2249 *
2250 * @param queue Address of the queue.
2251 * @param data Address of the data item.
2252 */
2253 void k_queue_append(struct k_queue *queue, void *data);
2254
2255 /**
2256 * @brief Append an element to a queue.
2257 *
2258 * This routine appends a data item to @a queue. There is an implicit memory
2259 * allocation to create an additional temporary bookkeeping data structure from
2260 * the calling thread's resource pool, which is automatically freed when the
2261 * item is removed. The data itself is not copied.
2262 *
2263 * @funcprops \isr_ok
2264 *
2265 * @param queue Address of the queue.
2266 * @param data Address of the data item.
2267 *
2268 * @retval 0 on success
2269 * @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
2270 */
2271 __syscall int32_t k_queue_alloc_append(struct k_queue *queue, void *data);
2272
2273 /**
2274 * @brief Prepend an element to a queue.
2275 *
2276 * This routine prepends a data item to @a queue. A queue data item must be
2277 * aligned on a word boundary, and the first word of the item is reserved
2278 * for the kernel's use.
2279 *
2280 * @funcprops \isr_ok
2281 *
2282 * @param queue Address of the queue.
2283 * @param data Address of the data item.
2284 */
2285 void k_queue_prepend(struct k_queue *queue, void *data);
2286
2287 /**
2288 * @brief Prepend an element to a queue.
2289 *
2290 * This routine prepends a data item to @a queue. There is an implicit memory
2291 * allocation to create an additional temporary bookkeeping data structure from
2292 * the calling thread's resource pool, which is automatically freed when the
2293 * item is removed. The data itself is not copied.
2294 *
2295 * @funcprops \isr_ok
2296 *
2297 * @param queue Address of the queue.
2298 * @param data Address of the data item.
2299 *
2300 * @retval 0 on success
2301 * @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
2302 */
2303 __syscall int32_t k_queue_alloc_prepend(struct k_queue *queue, void *data);
2304
2305 /**
2306 * @brief Inserts an element to a queue.
2307 *
2308 * This routine inserts a data item to @a queue after previous item. A queue
2309 * data item must be aligned on a word boundary, and the first word of
2310 * the item is reserved for the kernel's use.
2311 *
2312 * @funcprops \isr_ok
2313 *
2314 * @param queue Address of the queue.
2315 * @param prev Address of the previous data item.
2316 * @param data Address of the data item.
2317 */
2318 void k_queue_insert(struct k_queue *queue, void *prev, void *data);
2319
2320 /**
2321 * @brief Atomically append a list of elements to a queue.
2322 *
2323 * This routine adds a list of data items to @a queue in one operation.
2324 * The data items must be in a singly-linked list, with the first word
2325 * in each data item pointing to the next data item; the list must be
2326 * NULL-terminated.
2327 *
2328 * @funcprops \isr_ok
2329 *
2330 * @param queue Address of the queue.
2331 * @param head Pointer to first node in singly-linked list.
2332 * @param tail Pointer to last node in singly-linked list.
2333 *
2334 * @retval 0 on success
2335 * @retval -EINVAL on invalid supplied data
2336 *
2337 */
2338 int k_queue_append_list(struct k_queue *queue, void *head, void *tail);
2339
2340 /**
2341 * @brief Atomically add a list of elements to a queue.
2342 *
2343 * This routine adds a list of data items to @a queue in one operation.
2344 * The data items must be in a singly-linked list implemented using a
2345 * sys_slist_t object. Upon completion, the original list is empty.
2346 *
2347 * @funcprops \isr_ok
2348 *
2349 * @param queue Address of the queue.
2350 * @param list Pointer to sys_slist_t object.
2351 *
2352 * @retval 0 on success
2353 * @retval -EINVAL on invalid data
2354 */
2355 int k_queue_merge_slist(struct k_queue *queue, sys_slist_t *list);
2356
2357 /**
2358 * @brief Get an element from a queue.
2359 *
2360 * This routine removes first data item from @a queue. The first word of the
2361 * data item is reserved for the kernel's use.
2362 *
2363 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
2364 *
2365 * @funcprops \isr_ok
2366 *
2367 * @param queue Address of the queue.
2368 * @param timeout Waiting period to obtain a data item, or one of the special
2369 * values K_NO_WAIT and K_FOREVER.
2370 *
2371 * @return Address of the data item if successful; NULL if returned
2372 * without waiting, or waiting period timed out.
2373 */
2374 __syscall void *k_queue_get(struct k_queue *queue, k_timeout_t timeout);
2375
2376 /**
2377 * @brief Remove an element from a queue.
2378 *
2379 * This routine removes data item from @a queue. The first word of the
2380 * data item is reserved for the kernel's use. Removing elements from k_queue
2381 * rely on sys_slist_find_and_remove which is not a constant time operation.
2382 *
2383 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
2384 *
2385 * @funcprops \isr_ok
2386 *
2387 * @param queue Address of the queue.
2388 * @param data Address of the data item.
2389 *
2390 * @return true if data item was removed
2391 */
2392 bool k_queue_remove(struct k_queue *queue, void *data);
2393
2394 /**
2395 * @brief Append an element to a queue only if it's not present already.
2396 *
2397 * This routine appends data item to @a queue. The first word of the data
2398 * item is reserved for the kernel's use. Appending elements to k_queue
2399 * relies on sys_slist_is_node_in_list which is not a constant time operation.
2400 *
2401 * @funcprops \isr_ok
2402 *
2403 * @param queue Address of the queue.
2404 * @param data Address of the data item.
2405 *
2406 * @return true if data item was added, false if not
2407 */
2408 bool k_queue_unique_append(struct k_queue *queue, void *data);
2409
2410 /**
2411 * @brief Query a queue to see if it has data available.
2412 *
2413 * Note that the data might be already gone by the time this function returns
2414 * if other threads are also trying to read from the queue.
2415 *
2416 * @funcprops \isr_ok
2417 *
2418 * @param queue Address of the queue.
2419 *
2420 * @return Non-zero if the queue is empty.
2421 * @return 0 if data is available.
2422 */
2423 __syscall int k_queue_is_empty(struct k_queue *queue);
2424
z_impl_k_queue_is_empty(struct k_queue * queue)2425 static inline int z_impl_k_queue_is_empty(struct k_queue *queue)
2426 {
2427 return sys_sflist_is_empty(&queue->data_q) ? 1 : 0;
2428 }
2429
2430 /**
2431 * @brief Peek element at the head of queue.
2432 *
2433 * Return element from the head of queue without removing it.
2434 *
2435 * @param queue Address of the queue.
2436 *
2437 * @return Head element, or NULL if queue is empty.
2438 */
2439 __syscall void *k_queue_peek_head(struct k_queue *queue);
2440
2441 /**
2442 * @brief Peek element at the tail of queue.
2443 *
2444 * Return element from the tail of queue without removing it.
2445 *
2446 * @param queue Address of the queue.
2447 *
2448 * @return Tail element, or NULL if queue is empty.
2449 */
2450 __syscall void *k_queue_peek_tail(struct k_queue *queue);
2451
2452 /**
2453 * @brief Statically define and initialize a queue.
2454 *
2455 * The queue can be accessed outside the module where it is defined using:
2456 *
2457 * @code extern struct k_queue <name>; @endcode
2458 *
2459 * @param name Name of the queue.
2460 */
2461 #define K_QUEUE_DEFINE(name) \
2462 STRUCT_SECTION_ITERABLE(k_queue, name) = \
2463 Z_QUEUE_INITIALIZER(name)
2464
2465 /** @} */
2466
2467 #ifdef CONFIG_USERSPACE
2468 /**
2469 * @brief futex structure
2470 *
2471 * A k_futex is a lightweight mutual exclusion primitive designed
2472 * to minimize kernel involvement. Uncontended operation relies
2473 * only on atomic access to shared memory. k_futex are tracked as
2474 * kernel objects and can live in user memory so that any access
2475 * bypasses the kernel object permission management mechanism.
2476 */
2477 struct k_futex {
2478 atomic_t val;
2479 };
2480
2481 /**
2482 * @brief futex kernel data structure
2483 *
2484 * z_futex_data are the helper data structure for k_futex to complete
2485 * futex contended operation on kernel side, structure z_futex_data
2486 * of every futex object is invisible in user mode.
2487 */
2488 struct z_futex_data {
2489 _wait_q_t wait_q;
2490 struct k_spinlock lock;
2491 };
2492
2493 #define Z_FUTEX_DATA_INITIALIZER(obj) \
2494 { \
2495 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q) \
2496 }
2497
2498 /**
2499 * @defgroup futex_apis FUTEX APIs
2500 * @ingroup kernel_apis
2501 * @{
2502 */
2503
2504 /**
2505 * @brief Pend the current thread on a futex
2506 *
2507 * Tests that the supplied futex contains the expected value, and if so,
2508 * goes to sleep until some other thread calls k_futex_wake() on it.
2509 *
2510 * @param futex Address of the futex.
2511 * @param expected Expected value of the futex, if it is different the caller
2512 * will not wait on it.
2513 * @param timeout Waiting period on the futex, or one of the special values
2514 * K_NO_WAIT or K_FOREVER.
2515 * @retval -EACCES Caller does not have read access to futex address.
2516 * @retval -EAGAIN If the futex value did not match the expected parameter.
2517 * @retval -EINVAL Futex parameter address not recognized by the kernel.
2518 * @retval -ETIMEDOUT Thread woke up due to timeout and not a futex wakeup.
2519 * @retval 0 if the caller went to sleep and was woken up. The caller
2520 * should check the futex's value on wakeup to determine if it needs
2521 * to block again.
2522 */
2523 __syscall int k_futex_wait(struct k_futex *futex, int expected,
2524 k_timeout_t timeout);
2525
2526 /**
2527 * @brief Wake one/all threads pending on a futex
2528 *
2529 * Wake up the highest priority thread pending on the supplied futex, or
2530 * wakeup all the threads pending on the supplied futex, and the behavior
2531 * depends on wake_all.
2532 *
2533 * @param futex Futex to wake up pending threads.
2534 * @param wake_all If true, wake up all pending threads; If false,
2535 * wakeup the highest priority thread.
2536 * @retval -EACCES Caller does not have access to the futex address.
2537 * @retval -EINVAL Futex parameter address not recognized by the kernel.
2538 * @retval Number of threads that were woken up.
2539 */
2540 __syscall int k_futex_wake(struct k_futex *futex, bool wake_all);
2541
2542 /** @} */
2543 #endif
2544
2545 /**
2546 * @defgroup event_apis Event APIs
2547 * @ingroup kernel_apis
2548 * @{
2549 */
2550
2551 /**
2552 * Event Structure
2553 * @ingroup event_apis
2554 */
2555
2556 /**
2557 * @brief Kernel Event structure
2558 *
2559 * This structure is used to represent kernel events. All the members
2560 * are internal and should not be accessed directly.
2561 */
2562
2563 struct k_event {
2564 /**
2565 * @cond INTERNAL_HIDDEN
2566 */
2567 _wait_q_t wait_q;
2568 uint32_t events;
2569 struct k_spinlock lock;
2570
2571 SYS_PORT_TRACING_TRACKING_FIELD(k_event)
2572
2573 #ifdef CONFIG_OBJ_CORE_EVENT
2574 struct k_obj_core obj_core;
2575 #endif
2576 /**
2577 * INTERNAL_HIDDEN @endcond
2578 */
2579
2580 };
2581
2582 /**
2583 * @cond INTERNAL_HIDDEN
2584 */
2585
2586 #define Z_EVENT_INITIALIZER(obj) \
2587 { \
2588 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
2589 .events = 0, \
2590 .lock = {}, \
2591 }
2592 /**
2593 * INTERNAL_HIDDEN @endcond
2594 */
2595
2596 /**
2597 * @brief Initialize an event object
2598 *
2599 * This routine initializes an event object, prior to its first use.
2600 *
2601 * @param event Address of the event object.
2602 */
2603 __syscall void k_event_init(struct k_event *event);
2604
2605 /**
2606 * @brief Post one or more events to an event object
2607 *
2608 * This routine posts one or more events to an event object. All tasks waiting
2609 * on the event object @a event whose waiting conditions become met by this
2610 * posting immediately unpend.
2611 *
2612 * Posting differs from setting in that posted events are merged together with
2613 * the current set of events tracked by the event object.
2614 *
2615 * @funcprops \isr_ok
2616 *
2617 * @param event Address of the event object
2618 * @param events Set of events to post to @a event
2619 *
2620 * @retval Previous value of the events in @a event
2621 */
2622 __syscall uint32_t k_event_post(struct k_event *event, uint32_t events);
2623
2624 /**
2625 * @brief Set the events in an event object
2626 *
2627 * This routine sets the events stored in event object to the specified value.
2628 * All tasks waiting on the event object @a event whose waiting conditions
2629 * become met by this immediately unpend.
2630 *
2631 * Setting differs from posting in that set events replace the current set of
2632 * events tracked by the event object.
2633 *
2634 * @funcprops \isr_ok
2635 *
2636 * @param event Address of the event object
2637 * @param events Set of events to set in @a event
2638 *
2639 * @retval Previous value of the events in @a event
2640 */
2641 __syscall uint32_t k_event_set(struct k_event *event, uint32_t events);
2642
2643 /**
2644 * @brief Set or clear the events in an event object
2645 *
2646 * This routine sets the events stored in event object to the specified value.
2647 * All tasks waiting on the event object @a event whose waiting conditions
2648 * become met by this immediately unpend. Unlike @ref k_event_set, this routine
2649 * allows specific event bits to be set and cleared as determined by the mask.
2650 *
2651 * @funcprops \isr_ok
2652 *
2653 * @param event Address of the event object
2654 * @param events Set of events to set/clear in @a event
2655 * @param events_mask Mask to be applied to @a events
2656 *
2657 * @retval Previous value of the events in @a events_mask
2658 */
2659 __syscall uint32_t k_event_set_masked(struct k_event *event, uint32_t events,
2660 uint32_t events_mask);
2661
2662 /**
2663 * @brief Clear the events in an event object
2664 *
2665 * This routine clears (resets) the specified events stored in an event object.
2666 *
2667 * @funcprops \isr_ok
2668 *
2669 * @param event Address of the event object
2670 * @param events Set of events to clear in @a event
2671 *
2672 * @retval Previous value of the events in @a event
2673 */
2674 __syscall uint32_t k_event_clear(struct k_event *event, uint32_t events);
2675
2676 /**
2677 * @brief Wait for any of the specified events
2678 *
2679 * This routine waits on event object @a event until any of the specified
2680 * events have been delivered to the event object, or the maximum wait time
2681 * @a timeout has expired. A thread may wait on up to 32 distinctly numbered
2682 * events that are expressed as bits in a single 32-bit word.
2683 *
2684 * @note The caller must be careful when resetting if there are multiple threads
2685 * waiting for the event object @a event.
2686 *
2687 * @note This function may be called from ISR context only when @a timeout is
2688 * set to K_NO_WAIT.
2689 *
2690 * @param event Address of the event object
2691 * @param events Set of desired events on which to wait
2692 * @param reset If true, clear the set of events tracked by the event object
2693 * before waiting. If false, do not clear the events.
2694 * @param timeout Waiting period for the desired set of events or one of the
2695 * special values K_NO_WAIT and K_FOREVER.
2696 *
2697 * @retval set of matching events upon success
2698 * @retval 0 if matching events were not received within the specified time
2699 */
2700 __syscall uint32_t k_event_wait(struct k_event *event, uint32_t events,
2701 bool reset, k_timeout_t timeout);
2702
2703 /**
2704 * @brief Wait for all of the specified events
2705 *
2706 * This routine waits on event object @a event until all of the specified
2707 * events have been delivered to the event object, or the maximum wait time
2708 * @a timeout has expired. A thread may wait on up to 32 distinctly numbered
2709 * events that are expressed as bits in a single 32-bit word.
2710 *
2711 * @note The caller must be careful when resetting if there are multiple threads
2712 * waiting for the event object @a event.
2713 *
2714 * @note This function may be called from ISR context only when @a timeout is
2715 * set to K_NO_WAIT.
2716 *
2717 * @param event Address of the event object
2718 * @param events Set of desired events on which to wait
2719 * @param reset If true, clear the set of events tracked by the event object
2720 * before waiting. If false, do not clear the events.
2721 * @param timeout Waiting period for the desired set of events or one of the
2722 * special values K_NO_WAIT and K_FOREVER.
2723 *
2724 * @retval set of matching events upon success
2725 * @retval 0 if matching events were not received within the specified time
2726 */
2727 __syscall uint32_t k_event_wait_all(struct k_event *event, uint32_t events,
2728 bool reset, k_timeout_t timeout);
2729
2730 /**
2731 * @brief Wait for any of the specified events (safe version)
2732 *
2733 * This call is nearly identical to @ref k_event_wait with the main difference
2734 * being that the safe version atomically clears received events from the
2735 * event object. This mitigates the need for calling @ref k_event_clear, or
2736 * passing a "reset" argument, since doing so may result in lost event
2737 * information.
2738 *
2739 * @param event Address of the event object
2740 * @param events Set of desired events on which to wait
2741 * @param reset If true, clear the set of events tracked by the event object
2742 * before waiting. If false, do not clear the events.
2743 * @param timeout Waiting period for the desired set of events or one of the
2744 * special values K_NO_WAIT and K_FOREVER.
2745 *
2746 * @retval set of matching events upon success
2747 * @retval 0 if no matching event was received within the specified time
2748 */
2749 __syscall uint32_t k_event_wait_safe(struct k_event *event, uint32_t events,
2750 bool reset, k_timeout_t timeout);
2751
2752 /**
2753 * @brief Wait for all of the specified events (safe version)
2754 *
2755 * This call is nearly identical to @ref k_event_wait_all with the main
2756 * difference being that the safe version atomically clears received events
2757 * from the event object. This mitigates the need for calling
2758 * @ref k_event_clear, or passing a "reset" argument, since doing so may
2759 * result in lost event information.
2760 *
2761 * @param event Address of the event object
2762 * @param events Set of desired events on which to wait
2763 * @param reset If true, clear the set of events tracked by the event object
2764 * before waiting. If false, do not clear the events.
2765 * @param timeout Waiting period for the desired set of events or one of the
2766 * special values K_NO_WAIT and K_FOREVER.
2767 *
2768 * @retval set of matching events upon success
2769 * @retval 0 if all matching events were not received within the specified time
2770 */
2771 __syscall uint32_t k_event_wait_all_safe(struct k_event *event, uint32_t events,
2772 bool reset, k_timeout_t timeout);
2773
2774
2775
2776 /**
2777 * @brief Test the events currently tracked in the event object
2778 *
2779 * @funcprops \isr_ok
2780 *
2781 * @param event Address of the event object
2782 * @param events_mask Set of desired events to test
2783 *
2784 * @retval Current value of events in @a events_mask
2785 */
k_event_test(struct k_event * event,uint32_t events_mask)2786 static inline uint32_t k_event_test(struct k_event *event, uint32_t events_mask)
2787 {
2788 return k_event_wait(event, events_mask, false, K_NO_WAIT);
2789 }
2790
2791 /**
2792 * @brief Statically define and initialize an event object
2793 *
2794 * The event can be accessed outside the module where it is defined using:
2795 *
2796 * @code extern struct k_event <name>; @endcode
2797 *
2798 * @param name Name of the event object.
2799 */
2800 #define K_EVENT_DEFINE(name) \
2801 STRUCT_SECTION_ITERABLE(k_event, name) = \
2802 Z_EVENT_INITIALIZER(name);
2803
2804 /** @} */
2805
2806 struct k_fifo {
2807 struct k_queue _queue;
2808 #ifdef CONFIG_OBJ_CORE_FIFO
2809 struct k_obj_core obj_core;
2810 #endif
2811 };
2812
2813 /**
2814 * @cond INTERNAL_HIDDEN
2815 */
2816 #define Z_FIFO_INITIALIZER(obj) \
2817 { \
2818 ._queue = Z_QUEUE_INITIALIZER(obj._queue) \
2819 }
2820
2821 /**
2822 * INTERNAL_HIDDEN @endcond
2823 */
2824
2825 /**
2826 * @defgroup fifo_apis FIFO APIs
2827 * @ingroup kernel_apis
2828 * @{
2829 */
2830
2831 /**
2832 * @brief Initialize a FIFO queue.
2833 *
2834 * This routine initializes a FIFO queue, prior to its first use.
2835 *
2836 * @param fifo Address of the FIFO queue.
2837 */
2838 #define k_fifo_init(fifo) \
2839 ({ \
2840 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, init, fifo); \
2841 k_queue_init(&(fifo)->_queue); \
2842 K_OBJ_CORE_INIT(K_OBJ_CORE(fifo), _obj_type_fifo); \
2843 K_OBJ_CORE_LINK(K_OBJ_CORE(fifo)); \
2844 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, init, fifo); \
2845 })
2846
2847 /**
2848 * @brief Cancel waiting on a FIFO queue.
2849 *
2850 * This routine causes first thread pending on @a fifo, if any, to
2851 * return from k_fifo_get() call with NULL value (as if timeout
2852 * expired).
2853 *
2854 * @funcprops \isr_ok
2855 *
2856 * @param fifo Address of the FIFO queue.
2857 */
2858 #define k_fifo_cancel_wait(fifo) \
2859 ({ \
2860 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, cancel_wait, fifo); \
2861 k_queue_cancel_wait(&(fifo)->_queue); \
2862 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, cancel_wait, fifo); \
2863 })
2864
2865 /**
2866 * @brief Add an element to a FIFO queue.
2867 *
2868 * This routine adds a data item to @a fifo. A FIFO data item must be
2869 * aligned on a word boundary, and the first word of the item is reserved
2870 * for the kernel's use.
2871 *
2872 * @funcprops \isr_ok
2873 *
2874 * @param fifo Address of the FIFO.
2875 * @param data Address of the data item.
2876 */
2877 #define k_fifo_put(fifo, data) \
2878 ({ \
2879 void *_data = data; \
2880 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, put, fifo, _data); \
2881 k_queue_append(&(fifo)->_queue, _data); \
2882 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, put, fifo, _data); \
2883 })
2884
2885 /**
2886 * @brief Add an element to a FIFO queue.
2887 *
2888 * This routine adds a data item to @a fifo. There is an implicit memory
2889 * allocation to create an additional temporary bookkeeping data structure from
2890 * the calling thread's resource pool, which is automatically freed when the
2891 * item is removed. The data itself is not copied.
2892 *
2893 * @funcprops \isr_ok
2894 *
2895 * @param fifo Address of the FIFO.
2896 * @param data Address of the data item.
2897 *
2898 * @retval 0 on success
2899 * @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
2900 */
2901 #define k_fifo_alloc_put(fifo, data) \
2902 ({ \
2903 void *_data = data; \
2904 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, alloc_put, fifo, _data); \
2905 int fap_ret = k_queue_alloc_append(&(fifo)->_queue, _data); \
2906 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, alloc_put, fifo, _data, fap_ret); \
2907 fap_ret; \
2908 })
2909
2910 /**
2911 * @brief Atomically add a list of elements to a FIFO.
2912 *
2913 * This routine adds a list of data items to @a fifo in one operation.
2914 * The data items must be in a singly-linked list, with the first word of
2915 * each data item pointing to the next data item; the list must be
2916 * NULL-terminated.
2917 *
2918 * @funcprops \isr_ok
2919 *
2920 * @param fifo Address of the FIFO queue.
2921 * @param head Pointer to first node in singly-linked list.
2922 * @param tail Pointer to last node in singly-linked list.
2923 */
2924 #define k_fifo_put_list(fifo, head, tail) \
2925 ({ \
2926 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, put_list, fifo, head, tail); \
2927 k_queue_append_list(&(fifo)->_queue, head, tail); \
2928 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, put_list, fifo, head, tail); \
2929 })
2930
2931 /**
2932 * @brief Atomically add a list of elements to a FIFO queue.
2933 *
2934 * This routine adds a list of data items to @a fifo in one operation.
2935 * The data items must be in a singly-linked list implemented using a
2936 * sys_slist_t object. Upon completion, the sys_slist_t object is invalid
2937 * and must be re-initialized via sys_slist_init().
2938 *
2939 * @funcprops \isr_ok
2940 *
2941 * @param fifo Address of the FIFO queue.
2942 * @param list Pointer to sys_slist_t object.
2943 */
2944 #define k_fifo_put_slist(fifo, list) \
2945 ({ \
2946 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, put_slist, fifo, list); \
2947 k_queue_merge_slist(&(fifo)->_queue, list); \
2948 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, put_slist, fifo, list); \
2949 })
2950
2951 /**
2952 * @brief Get an element from a FIFO queue.
2953 *
2954 * This routine removes a data item from @a fifo in a "first in, first out"
2955 * manner. The first word of the data item is reserved for the kernel's use.
2956 *
2957 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
2958 *
2959 * @funcprops \isr_ok
2960 *
2961 * @param fifo Address of the FIFO queue.
2962 * @param timeout Waiting period to obtain a data item,
2963 * or one of the special values K_NO_WAIT and K_FOREVER.
2964 *
2965 * @return Address of the data item if successful; NULL if returned
2966 * without waiting, or waiting period timed out.
2967 */
2968 #define k_fifo_get(fifo, timeout) \
2969 ({ \
2970 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, get, fifo, timeout); \
2971 void *fg_ret = k_queue_get(&(fifo)->_queue, timeout); \
2972 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, get, fifo, timeout, fg_ret); \
2973 fg_ret; \
2974 })
2975
2976 /**
2977 * @brief Query a FIFO queue to see if it has data available.
2978 *
2979 * Note that the data might be already gone by the time this function returns
2980 * if other threads is also trying to read from the FIFO.
2981 *
2982 * @funcprops \isr_ok
2983 *
2984 * @param fifo Address of the FIFO queue.
2985 *
2986 * @return Non-zero if the FIFO queue is empty.
2987 * @return 0 if data is available.
2988 */
2989 #define k_fifo_is_empty(fifo) \
2990 k_queue_is_empty(&(fifo)->_queue)
2991
2992 /**
2993 * @brief Peek element at the head of a FIFO queue.
2994 *
2995 * Return element from the head of FIFO queue without removing it. A usecase
2996 * for this is if elements of the FIFO object are themselves containers. Then
2997 * on each iteration of processing, a head container will be peeked,
2998 * and some data processed out of it, and only if the container is empty,
2999 * it will be completely remove from the FIFO queue.
3000 *
3001 * @param fifo Address of the FIFO queue.
3002 *
3003 * @return Head element, or NULL if the FIFO queue is empty.
3004 */
3005 #define k_fifo_peek_head(fifo) \
3006 ({ \
3007 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, peek_head, fifo); \
3008 void *fph_ret = k_queue_peek_head(&(fifo)->_queue); \
3009 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, peek_head, fifo, fph_ret); \
3010 fph_ret; \
3011 })
3012
3013 /**
3014 * @brief Peek element at the tail of FIFO queue.
3015 *
3016 * Return element from the tail of FIFO queue (without removing it). A usecase
3017 * for this is if elements of the FIFO queue are themselves containers. Then
3018 * it may be useful to add more data to the last container in a FIFO queue.
3019 *
3020 * @param fifo Address of the FIFO queue.
3021 *
3022 * @return Tail element, or NULL if a FIFO queue is empty.
3023 */
3024 #define k_fifo_peek_tail(fifo) \
3025 ({ \
3026 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, peek_tail, fifo); \
3027 void *fpt_ret = k_queue_peek_tail(&(fifo)->_queue); \
3028 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, peek_tail, fifo, fpt_ret); \
3029 fpt_ret; \
3030 })
3031
3032 /**
3033 * @brief Statically define and initialize a FIFO queue.
3034 *
3035 * The FIFO queue can be accessed outside the module where it is defined using:
3036 *
3037 * @code extern struct k_fifo <name>; @endcode
3038 *
3039 * @param name Name of the FIFO queue.
3040 */
3041 #define K_FIFO_DEFINE(name) \
3042 STRUCT_SECTION_ITERABLE(k_fifo, name) = \
3043 Z_FIFO_INITIALIZER(name)
3044
3045 /** @} */
3046
3047 struct k_lifo {
3048 struct k_queue _queue;
3049 #ifdef CONFIG_OBJ_CORE_LIFO
3050 struct k_obj_core obj_core;
3051 #endif
3052 };
3053
3054 /**
3055 * @cond INTERNAL_HIDDEN
3056 */
3057
3058 #define Z_LIFO_INITIALIZER(obj) \
3059 { \
3060 ._queue = Z_QUEUE_INITIALIZER(obj._queue) \
3061 }
3062
3063 /**
3064 * INTERNAL_HIDDEN @endcond
3065 */
3066
3067 /**
3068 * @defgroup lifo_apis LIFO APIs
3069 * @ingroup kernel_apis
3070 * @{
3071 */
3072
3073 /**
3074 * @brief Initialize a LIFO queue.
3075 *
3076 * This routine initializes a LIFO queue object, prior to its first use.
3077 *
3078 * @param lifo Address of the LIFO queue.
3079 */
3080 #define k_lifo_init(lifo) \
3081 ({ \
3082 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, init, lifo); \
3083 k_queue_init(&(lifo)->_queue); \
3084 K_OBJ_CORE_INIT(K_OBJ_CORE(lifo), _obj_type_lifo); \
3085 K_OBJ_CORE_LINK(K_OBJ_CORE(lifo)); \
3086 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, init, lifo); \
3087 })
3088
3089 /**
3090 * @brief Add an element to a LIFO queue.
3091 *
3092 * This routine adds a data item to @a lifo. A LIFO queue data item must be
3093 * aligned on a word boundary, and the first word of the item is
3094 * reserved for the kernel's use.
3095 *
3096 * @funcprops \isr_ok
3097 *
3098 * @param lifo Address of the LIFO queue.
3099 * @param data Address of the data item.
3100 */
3101 #define k_lifo_put(lifo, data) \
3102 ({ \
3103 void *_data = data; \
3104 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, put, lifo, _data); \
3105 k_queue_prepend(&(lifo)->_queue, _data); \
3106 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, put, lifo, _data); \
3107 })
3108
3109 /**
3110 * @brief Add an element to a LIFO queue.
3111 *
3112 * This routine adds a data item to @a lifo. There is an implicit memory
3113 * allocation to create an additional temporary bookkeeping data structure from
3114 * the calling thread's resource pool, which is automatically freed when the
3115 * item is removed. The data itself is not copied.
3116 *
3117 * @funcprops \isr_ok
3118 *
3119 * @param lifo Address of the LIFO.
3120 * @param data Address of the data item.
3121 *
3122 * @retval 0 on success
3123 * @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
3124 */
3125 #define k_lifo_alloc_put(lifo, data) \
3126 ({ \
3127 void *_data = data; \
3128 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, alloc_put, lifo, _data); \
3129 int lap_ret = k_queue_alloc_prepend(&(lifo)->_queue, _data); \
3130 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, alloc_put, lifo, _data, lap_ret); \
3131 lap_ret; \
3132 })
3133
3134 /**
3135 * @brief Get an element from a LIFO queue.
3136 *
3137 * This routine removes a data item from @a LIFO in a "last in, first out"
3138 * manner. The first word of the data item is reserved for the kernel's use.
3139 *
3140 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
3141 *
3142 * @funcprops \isr_ok
3143 *
3144 * @param lifo Address of the LIFO queue.
3145 * @param timeout Waiting period to obtain a data item,
3146 * or one of the special values K_NO_WAIT and K_FOREVER.
3147 *
3148 * @return Address of the data item if successful; NULL if returned
3149 * without waiting, or waiting period timed out.
3150 */
3151 #define k_lifo_get(lifo, timeout) \
3152 ({ \
3153 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, get, lifo, timeout); \
3154 void *lg_ret = k_queue_get(&(lifo)->_queue, timeout); \
3155 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, get, lifo, timeout, lg_ret); \
3156 lg_ret; \
3157 })
3158
3159 /**
3160 * @brief Statically define and initialize a LIFO queue.
3161 *
3162 * The LIFO queue can be accessed outside the module where it is defined using:
3163 *
3164 * @code extern struct k_lifo <name>; @endcode
3165 *
3166 * @param name Name of the fifo.
3167 */
3168 #define K_LIFO_DEFINE(name) \
3169 STRUCT_SECTION_ITERABLE(k_lifo, name) = \
3170 Z_LIFO_INITIALIZER(name)
3171
3172 /** @} */
3173
3174 /**
3175 * @cond INTERNAL_HIDDEN
3176 */
3177 #define K_STACK_FLAG_ALLOC ((uint8_t)1) /* Buffer was allocated */
3178
3179 typedef uintptr_t stack_data_t;
3180
3181 struct k_stack {
3182 _wait_q_t wait_q;
3183 struct k_spinlock lock;
3184 stack_data_t *base, *next, *top;
3185
3186 uint8_t flags;
3187
3188 SYS_PORT_TRACING_TRACKING_FIELD(k_stack)
3189
3190 #ifdef CONFIG_OBJ_CORE_STACK
3191 struct k_obj_core obj_core;
3192 #endif
3193 };
3194
3195 #define Z_STACK_INITIALIZER(obj, stack_buffer, stack_num_entries) \
3196 { \
3197 .wait_q = Z_WAIT_Q_INIT(&(obj).wait_q), \
3198 .base = (stack_buffer), \
3199 .next = (stack_buffer), \
3200 .top = (stack_buffer) + (stack_num_entries), \
3201 }
3202
3203 /**
3204 * INTERNAL_HIDDEN @endcond
3205 */
3206
3207 /**
3208 * @defgroup stack_apis Stack APIs
3209 * @ingroup kernel_apis
3210 * @{
3211 */
3212
3213 /**
3214 * @brief Initialize a stack.
3215 *
3216 * This routine initializes a stack object, prior to its first use.
3217 *
3218 * @param stack Address of the stack.
3219 * @param buffer Address of array used to hold stacked values.
3220 * @param num_entries Maximum number of values that can be stacked.
3221 */
3222 void k_stack_init(struct k_stack *stack,
3223 stack_data_t *buffer, uint32_t num_entries);
3224
3225
3226 /**
3227 * @brief Initialize a stack.
3228 *
3229 * This routine initializes a stack object, prior to its first use. Internal
3230 * buffers will be allocated from the calling thread's resource pool.
3231 * This memory will be released if k_stack_cleanup() is called, or
3232 * userspace is enabled and the stack object loses all references to it.
3233 *
3234 * @param stack Address of the stack.
3235 * @param num_entries Maximum number of values that can be stacked.
3236 *
3237 * @return -ENOMEM if memory couldn't be allocated
3238 */
3239
3240 __syscall int32_t k_stack_alloc_init(struct k_stack *stack,
3241 uint32_t num_entries);
3242
3243 /**
3244 * @brief Release a stack's allocated buffer
3245 *
3246 * If a stack object was given a dynamically allocated buffer via
3247 * k_stack_alloc_init(), this will free it. This function does nothing
3248 * if the buffer wasn't dynamically allocated.
3249 *
3250 * @param stack Address of the stack.
3251 * @retval 0 on success
3252 * @retval -EAGAIN when object is still in use
3253 */
3254 int k_stack_cleanup(struct k_stack *stack);
3255
3256 /**
3257 * @brief Push an element onto a stack.
3258 *
3259 * This routine adds a stack_data_t value @a data to @a stack.
3260 *
3261 * @funcprops \isr_ok
3262 *
3263 * @param stack Address of the stack.
3264 * @param data Value to push onto the stack.
3265 *
3266 * @retval 0 on success
3267 * @retval -ENOMEM if stack is full
3268 */
3269 __syscall int k_stack_push(struct k_stack *stack, stack_data_t data);
3270
3271 /**
3272 * @brief Pop an element from a stack.
3273 *
3274 * This routine removes a stack_data_t value from @a stack in a "last in,
3275 * first out" manner and stores the value in @a data.
3276 *
3277 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
3278 *
3279 * @funcprops \isr_ok
3280 *
3281 * @param stack Address of the stack.
3282 * @param data Address of area to hold the value popped from the stack.
3283 * @param timeout Waiting period to obtain a value,
3284 * or one of the special values K_NO_WAIT and
3285 * K_FOREVER.
3286 *
3287 * @retval 0 Element popped from stack.
3288 * @retval -EBUSY Returned without waiting.
3289 * @retval -EAGAIN Waiting period timed out.
3290 */
3291 __syscall int k_stack_pop(struct k_stack *stack, stack_data_t *data,
3292 k_timeout_t timeout);
3293
3294 /**
3295 * @brief Statically define and initialize a stack
3296 *
3297 * The stack can be accessed outside the module where it is defined using:
3298 *
3299 * @code extern struct k_stack <name>; @endcode
3300 *
3301 * @param name Name of the stack.
3302 * @param stack_num_entries Maximum number of values that can be stacked.
3303 */
3304 #define K_STACK_DEFINE(name, stack_num_entries) \
3305 stack_data_t __noinit \
3306 _k_stack_buf_##name[stack_num_entries]; \
3307 STRUCT_SECTION_ITERABLE(k_stack, name) = \
3308 Z_STACK_INITIALIZER(name, _k_stack_buf_##name, \
3309 stack_num_entries)
3310
3311 /** @} */
3312
3313 /**
3314 * @cond INTERNAL_HIDDEN
3315 */
3316
3317 struct k_work;
3318 struct k_work_q;
3319 struct k_work_queue_config;
3320 extern struct k_work_q k_sys_work_q;
3321
3322 /**
3323 * INTERNAL_HIDDEN @endcond
3324 */
3325
3326 /**
3327 * @defgroup mutex_apis Mutex APIs
3328 * @ingroup kernel_apis
3329 * @{
3330 */
3331
3332 /**
3333 * Mutex Structure
3334 * @ingroup mutex_apis
3335 */
3336 struct k_mutex {
3337 /** Mutex wait queue */
3338 _wait_q_t wait_q;
3339 /** Mutex owner */
3340 struct k_thread *owner;
3341
3342 /** Current lock count */
3343 uint32_t lock_count;
3344
3345 /** Original thread priority */
3346 int owner_orig_prio;
3347
3348 SYS_PORT_TRACING_TRACKING_FIELD(k_mutex)
3349
3350 #ifdef CONFIG_OBJ_CORE_MUTEX
3351 struct k_obj_core obj_core;
3352 #endif
3353 };
3354
3355 /**
3356 * @cond INTERNAL_HIDDEN
3357 */
3358 #define Z_MUTEX_INITIALIZER(obj) \
3359 { \
3360 .wait_q = Z_WAIT_Q_INIT(&(obj).wait_q), \
3361 .owner = NULL, \
3362 .lock_count = 0, \
3363 .owner_orig_prio = K_LOWEST_APPLICATION_THREAD_PRIO, \
3364 }
3365
3366 /**
3367 * INTERNAL_HIDDEN @endcond
3368 */
3369
3370 /**
3371 * @brief Statically define and initialize a mutex.
3372 *
3373 * The mutex can be accessed outside the module where it is defined using:
3374 *
3375 * @code extern struct k_mutex <name>; @endcode
3376 *
3377 * @param name Name of the mutex.
3378 */
3379 #define K_MUTEX_DEFINE(name) \
3380 STRUCT_SECTION_ITERABLE(k_mutex, name) = \
3381 Z_MUTEX_INITIALIZER(name)
3382
3383 /**
3384 * @brief Initialize a mutex.
3385 *
3386 * This routine initializes a mutex object, prior to its first use.
3387 *
3388 * Upon completion, the mutex is available and does not have an owner.
3389 *
3390 * @param mutex Address of the mutex.
3391 *
3392 * @retval 0 Mutex object created
3393 *
3394 */
3395 __syscall int k_mutex_init(struct k_mutex *mutex);
3396
3397
3398 /**
3399 * @brief Lock a mutex.
3400 *
3401 * This routine locks @a mutex. If the mutex is locked by another thread,
3402 * the calling thread waits until the mutex becomes available or until
3403 * a timeout occurs.
3404 *
3405 * A thread is permitted to lock a mutex it has already locked. The operation
3406 * completes immediately and the lock count is increased by 1.
3407 *
3408 * Mutexes may not be locked in ISRs.
3409 *
3410 * @param mutex Address of the mutex.
3411 * @param timeout Waiting period to lock the mutex,
3412 * or one of the special values K_NO_WAIT and
3413 * K_FOREVER.
3414 *
3415 * @retval 0 Mutex locked.
3416 * @retval -EBUSY Returned without waiting.
3417 * @retval -EAGAIN Waiting period timed out.
3418 */
3419 __syscall int k_mutex_lock(struct k_mutex *mutex, k_timeout_t timeout);
3420
3421 /**
3422 * @brief Unlock a mutex.
3423 *
3424 * This routine unlocks @a mutex. The mutex must already be locked by the
3425 * calling thread.
3426 *
3427 * The mutex cannot be claimed by another thread until it has been unlocked by
3428 * the calling thread as many times as it was previously locked by that
3429 * thread.
3430 *
3431 * Mutexes may not be unlocked in ISRs, as mutexes must only be manipulated
3432 * in thread context due to ownership and priority inheritance semantics.
3433 *
3434 * @param mutex Address of the mutex.
3435 *
3436 * @retval 0 Mutex unlocked.
3437 * @retval -EPERM The current thread does not own the mutex
3438 * @retval -EINVAL The mutex is not locked
3439 *
3440 */
3441 __syscall int k_mutex_unlock(struct k_mutex *mutex);
3442
3443 /**
3444 * @}
3445 */
3446
3447
3448 struct k_condvar {
3449 _wait_q_t wait_q;
3450
3451 #ifdef CONFIG_OBJ_CORE_CONDVAR
3452 struct k_obj_core obj_core;
3453 #endif
3454 };
3455
3456 #define Z_CONDVAR_INITIALIZER(obj) \
3457 { \
3458 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
3459 }
3460
3461 /**
3462 * @defgroup condvar_apis Condition Variables APIs
3463 * @ingroup kernel_apis
3464 * @{
3465 */
3466
3467 /**
3468 * @brief Initialize a condition variable
3469 *
3470 * @param condvar pointer to a @p k_condvar structure
3471 * @retval 0 Condition variable created successfully
3472 */
3473 __syscall int k_condvar_init(struct k_condvar *condvar);
3474
3475 /**
3476 * @brief Signals one thread that is pending on the condition variable
3477 *
3478 * @param condvar pointer to a @p k_condvar structure
3479 * @retval 0 On success
3480 */
3481 __syscall int k_condvar_signal(struct k_condvar *condvar);
3482
3483 /**
3484 * @brief Unblock all threads that are pending on the condition
3485 * variable
3486 *
3487 * @param condvar pointer to a @p k_condvar structure
3488 * @return An integer with number of woken threads on success
3489 */
3490 __syscall int k_condvar_broadcast(struct k_condvar *condvar);
3491
3492 /**
3493 * @brief Waits on the condition variable releasing the mutex lock
3494 *
3495 * Atomically releases the currently owned mutex, blocks the current thread
3496 * waiting on the condition variable specified by @a condvar,
3497 * and finally acquires the mutex again.
3498 *
3499 * The waiting thread unblocks only after another thread calls
3500 * k_condvar_signal, or k_condvar_broadcast with the same condition variable.
3501 *
3502 * @param condvar pointer to a @p k_condvar structure
3503 * @param mutex Address of the mutex.
3504 * @param timeout Waiting period for the condition variable
3505 * or one of the special values K_NO_WAIT and K_FOREVER.
3506 * @retval 0 On success
3507 * @retval -EAGAIN Waiting period timed out.
3508 */
3509 __syscall int k_condvar_wait(struct k_condvar *condvar, struct k_mutex *mutex,
3510 k_timeout_t timeout);
3511
3512 /**
3513 * @brief Statically define and initialize a condition variable.
3514 *
3515 * The condition variable can be accessed outside the module where it is
3516 * defined using:
3517 *
3518 * @code extern struct k_condvar <name>; @endcode
3519 *
3520 * @param name Name of the condition variable.
3521 */
3522 #define K_CONDVAR_DEFINE(name) \
3523 STRUCT_SECTION_ITERABLE(k_condvar, name) = \
3524 Z_CONDVAR_INITIALIZER(name)
3525 /**
3526 * @}
3527 */
3528
3529 /**
3530 * @defgroup semaphore_apis Semaphore APIs
3531 * @ingroup kernel_apis
3532 * @{
3533 */
3534
3535 /**
3536 * @brief Semaphore structure
3537 *
3538 * This structure is used to represent a semaphore.
3539 * All the members are internal and should not be accessed directly.
3540 */
3541 struct k_sem {
3542 /**
3543 * @cond INTERNAL_HIDDEN
3544 */
3545 _wait_q_t wait_q;
3546 unsigned int count;
3547 unsigned int limit;
3548
3549 Z_DECL_POLL_EVENT
3550
3551 SYS_PORT_TRACING_TRACKING_FIELD(k_sem)
3552
3553 #ifdef CONFIG_OBJ_CORE_SEM
3554 struct k_obj_core obj_core;
3555 #endif
3556 /** @endcond */
3557 };
3558
3559 /**
3560 * @cond INTERNAL_HIDDEN
3561 */
3562
3563 #define Z_SEM_INITIALIZER(obj, initial_count, count_limit) \
3564 { \
3565 .wait_q = Z_WAIT_Q_INIT(&(obj).wait_q), \
3566 .count = (initial_count), \
3567 .limit = (count_limit), \
3568 Z_POLL_EVENT_OBJ_INIT(obj) \
3569 }
3570
3571 /**
3572 * @endcond
3573 */
3574
3575 /**
3576 * @brief Maximum limit value allowed for a semaphore.
3577 *
3578 * This is intended for use when a semaphore does not have
3579 * an explicit maximum limit, and instead is just used for
3580 * counting purposes.
3581 *
3582 */
3583 #define K_SEM_MAX_LIMIT UINT_MAX
3584
3585 /**
3586 * @brief Initialize a semaphore.
3587 *
3588 * This routine initializes a semaphore object, prior to its first use.
3589 *
3590 * @param sem Address of the semaphore.
3591 * @param initial_count Initial semaphore count.
3592 * @param limit Maximum permitted semaphore count.
3593 *
3594 * @see K_SEM_MAX_LIMIT
3595 *
3596 * @retval 0 Semaphore created successfully
3597 * @retval -EINVAL Invalid values
3598 *
3599 */
3600 __syscall int k_sem_init(struct k_sem *sem, unsigned int initial_count,
3601 unsigned int limit);
3602
3603 /**
3604 * @brief Take a semaphore.
3605 *
3606 * This routine takes @a sem.
3607 *
3608 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
3609 *
3610 * @funcprops \isr_ok
3611 *
3612 * @param sem Address of the semaphore.
3613 * @param timeout Waiting period to take the semaphore,
3614 * or one of the special values K_NO_WAIT and K_FOREVER.
3615 *
3616 * @retval 0 Semaphore taken.
3617 * @retval -EBUSY Returned without waiting.
3618 * @retval -EAGAIN Waiting period timed out,
3619 * or the semaphore was reset during the waiting period.
3620 */
3621 __syscall int k_sem_take(struct k_sem *sem, k_timeout_t timeout);
3622
3623 /**
3624 * @brief Give a semaphore.
3625 *
3626 * This routine gives @a sem, unless the semaphore is already at its maximum
3627 * permitted count.
3628 *
3629 * @funcprops \isr_ok
3630 *
3631 * @param sem Address of the semaphore.
3632 */
3633 __syscall void k_sem_give(struct k_sem *sem);
3634
3635 /**
3636 * @brief Resets a semaphore's count to zero.
3637 *
3638 * This routine sets the count of @a sem to zero.
3639 * Any outstanding semaphore takes will be aborted
3640 * with -EAGAIN.
3641 *
3642 * @param sem Address of the semaphore.
3643 */
3644 __syscall void k_sem_reset(struct k_sem *sem);
3645
3646 /**
3647 * @brief Get a semaphore's count.
3648 *
3649 * This routine returns the current count of @a sem.
3650 *
3651 * @param sem Address of the semaphore.
3652 *
3653 * @return Current semaphore count.
3654 */
3655 __syscall unsigned int k_sem_count_get(struct k_sem *sem);
3656
3657 /**
3658 * @internal
3659 */
z_impl_k_sem_count_get(struct k_sem * sem)3660 static inline unsigned int z_impl_k_sem_count_get(struct k_sem *sem)
3661 {
3662 return sem->count;
3663 }
3664
3665 /**
3666 * @brief Statically define and initialize a semaphore.
3667 *
3668 * The semaphore can be accessed outside the module where it is defined using:
3669 *
3670 * @code extern struct k_sem <name>; @endcode
3671 *
3672 * @param name Name of the semaphore.
3673 * @param initial_count Initial semaphore count.
3674 * @param count_limit Maximum permitted semaphore count.
3675 */
3676 #define K_SEM_DEFINE(name, initial_count, count_limit) \
3677 STRUCT_SECTION_ITERABLE(k_sem, name) = \
3678 Z_SEM_INITIALIZER(name, initial_count, count_limit); \
3679 BUILD_ASSERT(((count_limit) != 0) && \
3680 (((initial_count) < (count_limit)) || ((initial_count) == (count_limit))) && \
3681 ((count_limit) <= K_SEM_MAX_LIMIT));
3682
3683 /** @} */
3684
3685 #if defined(CONFIG_SCHED_IPI_SUPPORTED) || defined(__DOXYGEN__)
3686 struct k_ipi_work;
3687
3688
3689 typedef void (*k_ipi_func_t)(struct k_ipi_work *work);
3690
3691 /**
3692 * @brief IPI work item structure
3693 *
3694 * This structure is used to represent an IPI work item.
3695 * All the members are internal and should not be accessed directly.
3696 */
3697 struct k_ipi_work {
3698 /**
3699 * @cond INTERNAL_HIDDEN
3700 */
3701 sys_dnode_t node[CONFIG_MP_MAX_NUM_CPUS]; /* Node in IPI work queue */
3702 k_ipi_func_t func; /* Function to execute on target CPU */
3703 struct k_event event; /* Event to signal when processed */
3704 uint32_t bitmask; /* Bitmask of targeted CPUs */
3705 /** INTERNAL_HIDDEN @endcond */
3706 };
3707
3708
3709 /**
3710 * @brief Initialize the specified IPI work item
3711 *
3712 * @kconfig_dep{CONFIG_SCHED_IPI_SUPPORTED}
3713 *
3714 * @param work Pointer to the IPI work item to be initialized
3715 */
k_ipi_work_init(struct k_ipi_work * work)3716 static inline void k_ipi_work_init(struct k_ipi_work *work)
3717 {
3718 k_event_init(&work->event);
3719 for (unsigned int i = 0; i < CONFIG_MP_MAX_NUM_CPUS; i++) {
3720 sys_dnode_init(&work->node[i]);
3721 }
3722 work->bitmask = 0;
3723 }
3724
3725 /**
3726 * @brief Add an IPI work item to the IPI work queue
3727 *
3728 * Adds the specified IPI work item to the IPI work queues of each CPU
3729 * identified by @a cpu_bitmask. The specified IPI work item will subsequently
3730 * execute at ISR level as those CPUs process their received IPIs. Do not
3731 * re-use the specified IPI work item until it has been processed by all of
3732 * the identified CPUs.
3733 *
3734 * @kconfig_dep{CONFIG_SCHED_IPI_SUPPORTED}
3735 *
3736 * @param work Pointer to the IPI work item
3737 * @param cpu_bitmask Set of CPUs to which the IPI work item will be sent
3738 * @param func Function to execute on the targeted CPU(s)
3739 *
3740 * @retval 0 on success
3741 * @retval -EBUSY if the specified IPI work item is still being processed
3742 */
3743 int k_ipi_work_add(struct k_ipi_work *work, uint32_t cpu_bitmask,
3744 k_ipi_func_t func);
3745
3746 /**
3747 * @brief Wait until the IPI work item has been processed by all targeted CPUs
3748 *
3749 * This routine waits until the IPI work item has been processed by all CPUs
3750 * to which it was sent. If called from an ISR, then @a timeout must be set to
3751 * K_NO_WAIT. To prevent deadlocks the caller must not have IRQs locked when
3752 * calling this function.
3753 *
3754 * @note It is not in general possible to poll safely for completion of this
3755 * function in ISR or locked contexts where the calling CPU cannot service IPIs
3756 * (because the targeted CPUs may themselves be waiting on the calling CPU).
3757 * Application code must be prepared for failure or to poll from a thread
3758 * context.
3759 *
3760 * @kconfig_dep{CONFIG_SCHED_IPI_SUPPORTED}
3761 *
3762 * @param work Pointer to the IPI work item
3763 * @param timeout Maximum time to wait for IPI work to be processed
3764 *
3765 * @retval -EAGAIN Waiting period timed out.
3766 * @retval 0 if processed by all targeted CPUs
3767 */
3768 int k_ipi_work_wait(struct k_ipi_work *work, k_timeout_t timeout);
3769
3770 /**
3771 * @brief Signal that there is one or more IPI work items to process
3772 *
3773 * This routine sends an IPI to the set of CPUs identified by calls to
3774 * k_ipi_work_add() since this CPU sent its last set of IPIs.
3775 *
3776 * @kconfig_dep{CONFIG_SCHED_IPI_SUPPORTED}
3777 */
3778 void k_ipi_work_signal(void);
3779
3780 #endif /* CONFIG_SCHED_IPI_SUPPORTED */
3781
3782 /**
3783 * @cond INTERNAL_HIDDEN
3784 */
3785
3786 struct k_work_delayable;
3787 struct k_work_sync;
3788
3789 /**
3790 * INTERNAL_HIDDEN @endcond
3791 */
3792
3793 /**
3794 * @defgroup workqueue_apis Work Queue APIs
3795 * @ingroup kernel_apis
3796 * @{
3797 */
3798
3799 /** @brief The signature for a work item handler function.
3800 *
3801 * The function will be invoked by the thread animating a work queue.
3802 *
3803 * @param work the work item that provided the handler.
3804 */
3805 typedef void (*k_work_handler_t)(struct k_work *work);
3806
3807 /** @brief Initialize a (non-delayable) work structure.
3808 *
3809 * This must be invoked before submitting a work structure for the first time.
3810 * It need not be invoked again on the same work structure. It can be
3811 * re-invoked to change the associated handler, but this must be done when the
3812 * work item is idle.
3813 *
3814 * @funcprops \isr_ok
3815 *
3816 * @param work the work structure to be initialized.
3817 *
3818 * @param handler the handler to be invoked by the work item.
3819 */
3820 void k_work_init(struct k_work *work,
3821 k_work_handler_t handler);
3822
3823 /** @brief Busy state flags from the work item.
3824 *
3825 * A zero return value indicates the work item appears to be idle.
3826 *
3827 * @note This is a live snapshot of state, which may change before the result
3828 * is checked. Use locks where appropriate.
3829 *
3830 * @funcprops \isr_ok
3831 *
3832 * @param work pointer to the work item.
3833 *
3834 * @return a mask of flags K_WORK_DELAYED, K_WORK_QUEUED,
3835 * K_WORK_RUNNING, K_WORK_CANCELING, and K_WORK_FLUSHING.
3836 */
3837 int k_work_busy_get(const struct k_work *work);
3838
3839 /** @brief Test whether a work item is currently pending.
3840 *
3841 * Wrapper to determine whether a work item is in a non-idle state.
3842 *
3843 * @note This is a live snapshot of state, which may change before the result
3844 * is checked. Use locks where appropriate.
3845 *
3846 * @funcprops \isr_ok
3847 *
3848 * @param work pointer to the work item.
3849 *
3850 * @return true if and only if k_work_busy_get() returns a non-zero value.
3851 */
3852 static inline bool k_work_is_pending(const struct k_work *work);
3853
3854 /** @brief Submit a work item to a queue.
3855 *
3856 * @param queue pointer to the work queue on which the item should run. If
3857 * NULL the queue from the most recent submission will be used.
3858 *
3859 * @funcprops \isr_ok
3860 *
3861 * @param work pointer to the work item.
3862 *
3863 * @retval 0 if work was already submitted to a queue
3864 * @retval 1 if work was not submitted and has been queued to @p queue
3865 * @retval 2 if work was running and has been queued to the queue that was
3866 * running it
3867 * @retval -EBUSY
3868 * * if work submission was rejected because the work item is cancelling; or
3869 * * @p queue is draining; or
3870 * * @p queue is plugged.
3871 * @retval -EINVAL if @p queue is null and the work item has never been run.
3872 * @retval -ENODEV if @p queue has not been started.
3873 */
3874 int k_work_submit_to_queue(struct k_work_q *queue,
3875 struct k_work *work);
3876
3877 /** @brief Submit a work item to the system queue.
3878 *
3879 * @funcprops \isr_ok
3880 *
3881 * @param work pointer to the work item.
3882 *
3883 * @return as with k_work_submit_to_queue().
3884 */
3885 int k_work_submit(struct k_work *work);
3886
3887 /** @brief Wait for last-submitted instance to complete.
3888 *
3889 * Resubmissions may occur while waiting, including chained submissions (from
3890 * within the handler).
3891 *
3892 * @note Be careful of caller and work queue thread relative priority. If
3893 * this function sleeps it will not return until the work queue thread
3894 * completes the tasks that allow this thread to resume.
3895 *
3896 * @note Behavior is undefined if this function is invoked on @p work from a
3897 * work queue running @p work.
3898 *
3899 * @param work pointer to the work item.
3900 *
3901 * @param sync pointer to an opaque item containing state related to the
3902 * pending cancellation. The object must persist until the call returns, and
3903 * be accessible from both the caller thread and the work queue thread. The
3904 * object must not be used for any other flush or cancel operation until this
3905 * one completes. On architectures with CONFIG_KERNEL_COHERENCE the object
3906 * must be allocated in coherent memory.
3907 *
3908 * @retval true if call had to wait for completion
3909 * @retval false if work was already idle
3910 */
3911 bool k_work_flush(struct k_work *work,
3912 struct k_work_sync *sync);
3913
3914 /** @brief Cancel a work item.
3915 *
3916 * This attempts to prevent a pending (non-delayable) work item from being
3917 * processed by removing it from the work queue. If the item is being
3918 * processed, the work item will continue to be processed, but resubmissions
3919 * are rejected until cancellation completes.
3920 *
3921 * If this returns zero cancellation is complete, otherwise something
3922 * (probably a work queue thread) is still referencing the item.
3923 *
3924 * See also k_work_cancel_sync().
3925 *
3926 * @funcprops \isr_ok
3927 *
3928 * @param work pointer to the work item.
3929 *
3930 * @return the k_work_busy_get() status indicating the state of the item after all
3931 * cancellation steps performed by this call are completed.
3932 */
3933 int k_work_cancel(struct k_work *work);
3934
3935 /** @brief Cancel a work item and wait for it to complete.
3936 *
3937 * Same as k_work_cancel() but does not return until cancellation is complete.
3938 * This can be invoked by a thread after k_work_cancel() to synchronize with a
3939 * previous cancellation.
3940 *
3941 * On return the work structure will be idle unless something submits it after
3942 * the cancellation was complete.
3943 *
3944 * @note Be careful of caller and work queue thread relative priority. If
3945 * this function sleeps it will not return until the work queue thread
3946 * completes the tasks that allow this thread to resume.
3947 *
3948 * @note Behavior is undefined if this function is invoked on @p work from a
3949 * work queue running @p work.
3950 *
3951 * @param work pointer to the work item.
3952 *
3953 * @param sync pointer to an opaque item containing state related to the
3954 * pending cancellation. The object must persist until the call returns, and
3955 * be accessible from both the caller thread and the work queue thread. The
3956 * object must not be used for any other flush or cancel operation until this
3957 * one completes. On architectures with CONFIG_KERNEL_COHERENCE the object
3958 * must be allocated in coherent memory.
3959 *
3960 * @retval true if work was pending (call had to wait for cancellation of a
3961 * running handler to complete, or scheduled or submitted operations were
3962 * cancelled);
3963 * @retval false otherwise
3964 */
3965 bool k_work_cancel_sync(struct k_work *work, struct k_work_sync *sync);
3966
3967 /** @brief Initialize a work queue structure.
3968 *
3969 * This must be invoked before starting a work queue structure for the first time.
3970 * It need not be invoked again on the same work queue structure.
3971 *
3972 * @funcprops \isr_ok
3973 *
3974 * @param queue the queue structure to be initialized.
3975 */
3976 void k_work_queue_init(struct k_work_q *queue);
3977
3978 /** @brief Initialize a work queue.
3979 *
3980 * This configures the work queue thread and starts it running. The function
3981 * should not be re-invoked on a queue.
3982 *
3983 * @param queue pointer to the queue structure. It must be initialized
3984 * in zeroed/bss memory or with @ref k_work_queue_init before
3985 * use.
3986 *
3987 * @param stack pointer to the work thread stack area.
3988 *
3989 * @param stack_size size of the work thread stack area, in bytes.
3990 *
3991 * @param prio initial thread priority
3992 *
3993 * @param cfg optional additional configuration parameters. Pass @c
3994 * NULL if not required, to use the defaults documented in
3995 * k_work_queue_config.
3996 */
3997 void k_work_queue_start(struct k_work_q *queue,
3998 k_thread_stack_t *stack, size_t stack_size,
3999 int prio, const struct k_work_queue_config *cfg);
4000
4001 /** @brief Run work queue using calling thread
4002 *
4003 * This will run the work queue forever unless stopped by @ref k_work_queue_stop.
4004 *
4005 * @param queue the queue to run
4006 *
4007 * @param cfg optional additional configuration parameters. Pass @c
4008 * NULL if not required, to use the defaults documented in
4009 * k_work_queue_config.
4010 */
4011 void k_work_queue_run(struct k_work_q *queue, const struct k_work_queue_config *cfg);
4012
4013 /** @brief Access the thread that animates a work queue.
4014 *
4015 * This is necessary to grant a work queue thread access to things the work
4016 * items it will process are expected to use.
4017 *
4018 * @param queue pointer to the queue structure.
4019 *
4020 * @return the thread associated with the work queue.
4021 */
4022 static inline k_tid_t k_work_queue_thread_get(struct k_work_q *queue);
4023
4024 /** @brief Wait until the work queue has drained, optionally plugging it.
4025 *
4026 * This blocks submission to the work queue except when coming from queue
4027 * thread, and blocks the caller until no more work items are available in the
4028 * queue.
4029 *
4030 * If @p plug is true then submission will continue to be blocked after the
4031 * drain operation completes until k_work_queue_unplug() is invoked.
4032 *
4033 * Note that work items that are delayed are not yet associated with their
4034 * work queue. They must be cancelled externally if a goal is to ensure the
4035 * work queue remains empty. The @p plug feature can be used to prevent
4036 * delayed items from being submitted after the drain completes.
4037 *
4038 * @param queue pointer to the queue structure.
4039 *
4040 * @param plug if true the work queue will continue to block new submissions
4041 * after all items have drained.
4042 *
4043 * @retval 1 if call had to wait for the drain to complete
4044 * @retval 0 if call did not have to wait
4045 * @retval negative if wait was interrupted or failed
4046 */
4047 int k_work_queue_drain(struct k_work_q *queue, bool plug);
4048
4049 /** @brief Release a work queue to accept new submissions.
4050 *
4051 * This releases the block on new submissions placed when k_work_queue_drain()
4052 * is invoked with the @p plug option enabled. If this is invoked before the
4053 * drain completes new items may be submitted as soon as the drain completes.
4054 *
4055 * @funcprops \isr_ok
4056 *
4057 * @param queue pointer to the queue structure.
4058 *
4059 * @retval 0 if successfully unplugged
4060 * @retval -EALREADY if the work queue was not plugged.
4061 */
4062 int k_work_queue_unplug(struct k_work_q *queue);
4063
4064 /** @brief Stop a work queue.
4065 *
4066 * Stops the work queue thread and ensures that no further work will be processed.
4067 * This call is blocking and guarantees that the work queue thread has terminated
4068 * cleanly if successful, no work will be processed past this point.
4069 *
4070 * @param queue Pointer to the queue structure.
4071 * @param timeout Maximum time to wait for the work queue to stop.
4072 *
4073 * @retval 0 if the work queue was stopped
4074 * @retval -EALREADY if the work queue was not started (or already stopped)
4075 * @retval -EBUSY if the work queue is actively processing work items
4076 * @retval -ETIMEDOUT if the work queue did not stop within the stipulated timeout
4077 * @retval -ENOSUP if the work queue is essential
4078 */
4079 int k_work_queue_stop(struct k_work_q *queue, k_timeout_t timeout);
4080
4081 /** @brief Initialize a delayable work structure.
4082 *
4083 * This must be invoked before scheduling a delayable work structure for the
4084 * first time. It need not be invoked again on the same work structure. It
4085 * can be re-invoked to change the associated handler, but this must be done
4086 * when the work item is idle.
4087 *
4088 * @funcprops \isr_ok
4089 *
4090 * @param dwork the delayable work structure to be initialized.
4091 *
4092 * @param handler the handler to be invoked by the work item.
4093 */
4094 void k_work_init_delayable(struct k_work_delayable *dwork,
4095 k_work_handler_t handler);
4096
4097 /**
4098 * @brief Get the parent delayable work structure from a work pointer.
4099 *
4100 * This function is necessary when a @c k_work_handler_t function is passed to
4101 * k_work_schedule_for_queue() and the handler needs to access data from the
4102 * container of the containing `k_work_delayable`.
4103 *
4104 * @param work Address passed to the work handler
4105 *
4106 * @return Address of the containing @c k_work_delayable structure.
4107 */
4108 static inline struct k_work_delayable *
4109 k_work_delayable_from_work(struct k_work *work);
4110
4111 /** @brief Busy state flags from the delayable work item.
4112 *
4113 * @funcprops \isr_ok
4114 *
4115 * @note This is a live snapshot of state, which may change before the result
4116 * can be inspected. Use locks where appropriate.
4117 *
4118 * @param dwork pointer to the delayable work item.
4119 *
4120 * @return a mask of flags K_WORK_DELAYED, K_WORK_QUEUED, K_WORK_RUNNING,
4121 * K_WORK_CANCELING, and K_WORK_FLUSHING. A zero return value indicates the
4122 * work item appears to be idle.
4123 */
4124 int k_work_delayable_busy_get(const struct k_work_delayable *dwork);
4125
4126 /** @brief Test whether a delayed work item is currently pending.
4127 *
4128 * Wrapper to determine whether a delayed work item is in a non-idle state.
4129 *
4130 * @note This is a live snapshot of state, which may change before the result
4131 * can be inspected. Use locks where appropriate.
4132 *
4133 * @funcprops \isr_ok
4134 *
4135 * @param dwork pointer to the delayable work item.
4136 *
4137 * @return true if and only if k_work_delayable_busy_get() returns a non-zero
4138 * value.
4139 */
4140 static inline bool k_work_delayable_is_pending(
4141 const struct k_work_delayable *dwork);
4142
4143 /** @brief Get the absolute tick count at which a scheduled delayable work
4144 * will be submitted.
4145 *
4146 * @note This is a live snapshot of state, which may change before the result
4147 * can be inspected. Use locks where appropriate.
4148 *
4149 * @funcprops \isr_ok
4150 *
4151 * @param dwork pointer to the delayable work item.
4152 *
4153 * @return the tick count when the timer that will schedule the work item will
4154 * expire, or the current tick count if the work is not scheduled.
4155 */
4156 static inline k_ticks_t k_work_delayable_expires_get(
4157 const struct k_work_delayable *dwork);
4158
4159 /** @brief Get the number of ticks until a scheduled delayable work will be
4160 * submitted.
4161 *
4162 * @note This is a live snapshot of state, which may change before the result
4163 * can be inspected. Use locks where appropriate.
4164 *
4165 * @funcprops \isr_ok
4166 *
4167 * @param dwork pointer to the delayable work item.
4168 *
4169 * @return the number of ticks until the timer that will schedule the work
4170 * item will expire, or zero if the item is not scheduled.
4171 */
4172 static inline k_ticks_t k_work_delayable_remaining_get(
4173 const struct k_work_delayable *dwork);
4174
4175 /** @brief Submit an idle work item to a queue after a delay.
4176 *
4177 * Unlike k_work_reschedule_for_queue() this is a no-op if the work item is
4178 * already scheduled or submitted, even if @p delay is @c K_NO_WAIT.
4179 *
4180 * @funcprops \isr_ok
4181 *
4182 * @param queue the queue on which the work item should be submitted after the
4183 * delay.
4184 *
4185 * @param dwork pointer to the delayable work item.
4186 *
4187 * @param delay the time to wait before submitting the work item. If @c
4188 * K_NO_WAIT and the work is not pending this is equivalent to
4189 * k_work_submit_to_queue().
4190 *
4191 * @retval 0 if work was already scheduled or submitted.
4192 * @retval 1 if work has been scheduled.
4193 * @retval 2 if @p delay is @c K_NO_WAIT and work
4194 * was running and has been queued to the queue that was running it.
4195 * @retval -EBUSY if @p delay is @c K_NO_WAIT and
4196 * k_work_submit_to_queue() fails with this code.
4197 * @retval -EINVAL if @p delay is @c K_NO_WAIT and
4198 * k_work_submit_to_queue() fails with this code.
4199 * @retval -ENODEV if @p delay is @c K_NO_WAIT and
4200 * k_work_submit_to_queue() fails with this code.
4201 */
4202 int k_work_schedule_for_queue(struct k_work_q *queue,
4203 struct k_work_delayable *dwork,
4204 k_timeout_t delay);
4205
4206 /** @brief Submit an idle work item to the system work queue after a
4207 * delay.
4208 *
4209 * This is a thin wrapper around k_work_schedule_for_queue(), with all the API
4210 * characteristics of that function.
4211 *
4212 * @param dwork pointer to the delayable work item.
4213 *
4214 * @param delay the time to wait before submitting the work item. If @c
4215 * K_NO_WAIT this is equivalent to k_work_submit_to_queue().
4216 *
4217 * @return as with k_work_schedule_for_queue().
4218 */
4219 int k_work_schedule(struct k_work_delayable *dwork,
4220 k_timeout_t delay);
4221
4222 /** @brief Reschedule a work item to a queue after a delay.
4223 *
4224 * Unlike k_work_schedule_for_queue() this function can change the deadline of
4225 * a scheduled work item, and will schedule a work item that is in any state
4226 * (e.g. is idle, submitted, or running). This function does not affect
4227 * ("unsubmit") a work item that has been submitted to a queue.
4228 *
4229 * @funcprops \isr_ok
4230 *
4231 * @param queue the queue on which the work item should be submitted after the
4232 * delay.
4233 *
4234 * @param dwork pointer to the delayable work item.
4235 *
4236 * @param delay the time to wait before submitting the work item. If @c
4237 * K_NO_WAIT this is equivalent to k_work_submit_to_queue() after canceling
4238 * any previous scheduled submission.
4239 *
4240 * @note If delay is @c K_NO_WAIT ("no delay") the return values are as with
4241 * k_work_submit_to_queue().
4242 *
4243 * @retval 0 if delay is @c K_NO_WAIT and work was already on a queue
4244 * @retval 1 if
4245 * * delay is @c K_NO_WAIT and work was not submitted but has now been queued
4246 * to @p queue; or
4247 * * delay not @c K_NO_WAIT and work has been scheduled
4248 * @retval 2 if delay is @c K_NO_WAIT and work was running and has been queued
4249 * to the queue that was running it
4250 * @retval -EBUSY if @p delay is @c K_NO_WAIT and
4251 * k_work_submit_to_queue() fails with this code.
4252 * @retval -EINVAL if @p delay is @c K_NO_WAIT and
4253 * k_work_submit_to_queue() fails with this code.
4254 * @retval -ENODEV if @p delay is @c K_NO_WAIT and
4255 * k_work_submit_to_queue() fails with this code.
4256 */
4257 int k_work_reschedule_for_queue(struct k_work_q *queue,
4258 struct k_work_delayable *dwork,
4259 k_timeout_t delay);
4260
4261 /** @brief Reschedule a work item to the system work queue after a
4262 * delay.
4263 *
4264 * This is a thin wrapper around k_work_reschedule_for_queue(), with all the
4265 * API characteristics of that function.
4266 *
4267 * @param dwork pointer to the delayable work item.
4268 *
4269 * @param delay the time to wait before submitting the work item.
4270 *
4271 * @return as with k_work_reschedule_for_queue().
4272 */
4273 int k_work_reschedule(struct k_work_delayable *dwork,
4274 k_timeout_t delay);
4275
4276 /** @brief Flush delayable work.
4277 *
4278 * If the work is scheduled, it is immediately submitted. Then the caller
4279 * blocks until the work completes, as with k_work_flush().
4280 *
4281 * @note Be careful of caller and work queue thread relative priority. If
4282 * this function sleeps it will not return until the work queue thread
4283 * completes the tasks that allow this thread to resume.
4284 *
4285 * @note Behavior is undefined if this function is invoked on @p dwork from a
4286 * work queue running @p dwork.
4287 *
4288 * @param dwork pointer to the delayable work item.
4289 *
4290 * @param sync pointer to an opaque item containing state related to the
4291 * pending cancellation. The object must persist until the call returns, and
4292 * be accessible from both the caller thread and the work queue thread. The
4293 * object must not be used for any other flush or cancel operation until this
4294 * one completes. On architectures with CONFIG_KERNEL_COHERENCE the object
4295 * must be allocated in coherent memory.
4296 *
4297 * @retval true if call had to wait for completion
4298 * @retval false if work was already idle
4299 */
4300 bool k_work_flush_delayable(struct k_work_delayable *dwork,
4301 struct k_work_sync *sync);
4302
4303 /** @brief Cancel delayable work.
4304 *
4305 * Similar to k_work_cancel() but for delayable work. If the work is
4306 * scheduled or submitted it is canceled. This function does not wait for the
4307 * cancellation to complete.
4308 *
4309 * @note The work may still be running when this returns. Use
4310 * k_work_flush_delayable() or k_work_cancel_delayable_sync() to ensure it is
4311 * not running.
4312 *
4313 * @note Canceling delayable work does not prevent rescheduling it. It does
4314 * prevent submitting it until the cancellation completes.
4315 *
4316 * @funcprops \isr_ok
4317 *
4318 * @param dwork pointer to the delayable work item.
4319 *
4320 * @return the k_work_delayable_busy_get() status indicating the state of the
4321 * item after all cancellation steps performed by this call are completed.
4322 */
4323 int k_work_cancel_delayable(struct k_work_delayable *dwork);
4324
4325 /** @brief Cancel delayable work and wait.
4326 *
4327 * Like k_work_cancel_delayable() but waits until the work becomes idle.
4328 *
4329 * @note Canceling delayable work does not prevent rescheduling it. It does
4330 * prevent submitting it until the cancellation completes.
4331 *
4332 * @note Be careful of caller and work queue thread relative priority. If
4333 * this function sleeps it will not return until the work queue thread
4334 * completes the tasks that allow this thread to resume.
4335 *
4336 * @note Behavior is undefined if this function is invoked on @p dwork from a
4337 * work queue running @p dwork.
4338 *
4339 * @param dwork pointer to the delayable work item.
4340 *
4341 * @param sync pointer to an opaque item containing state related to the
4342 * pending cancellation. The object must persist until the call returns, and
4343 * be accessible from both the caller thread and the work queue thread. The
4344 * object must not be used for any other flush or cancel operation until this
4345 * one completes. On architectures with CONFIG_KERNEL_COHERENCE the object
4346 * must be allocated in coherent memory.
4347 *
4348 * @retval true if work was not idle (call had to wait for cancellation of a
4349 * running handler to complete, or scheduled or submitted operations were
4350 * cancelled);
4351 * @retval false otherwise
4352 */
4353 bool k_work_cancel_delayable_sync(struct k_work_delayable *dwork,
4354 struct k_work_sync *sync);
4355
4356 enum {
4357 /**
4358 * @cond INTERNAL_HIDDEN
4359 */
4360
4361 /* The atomic API is used for all work and queue flags fields to
4362 * enforce sequential consistency in SMP environments.
4363 */
4364
4365 /* Bits that represent the work item states. At least nine of the
4366 * combinations are distinct valid stable states.
4367 */
4368 K_WORK_RUNNING_BIT = 0,
4369 K_WORK_CANCELING_BIT = 1,
4370 K_WORK_QUEUED_BIT = 2,
4371 K_WORK_DELAYED_BIT = 3,
4372 K_WORK_FLUSHING_BIT = 4,
4373
4374 K_WORK_MASK = BIT(K_WORK_DELAYED_BIT) | BIT(K_WORK_QUEUED_BIT)
4375 | BIT(K_WORK_RUNNING_BIT) | BIT(K_WORK_CANCELING_BIT) | BIT(K_WORK_FLUSHING_BIT),
4376
4377 /* Static work flags */
4378 K_WORK_DELAYABLE_BIT = 8,
4379 K_WORK_DELAYABLE = BIT(K_WORK_DELAYABLE_BIT),
4380
4381 /* Dynamic work queue flags */
4382 K_WORK_QUEUE_STARTED_BIT = 0,
4383 K_WORK_QUEUE_STARTED = BIT(K_WORK_QUEUE_STARTED_BIT),
4384 K_WORK_QUEUE_BUSY_BIT = 1,
4385 K_WORK_QUEUE_BUSY = BIT(K_WORK_QUEUE_BUSY_BIT),
4386 K_WORK_QUEUE_DRAIN_BIT = 2,
4387 K_WORK_QUEUE_DRAIN = BIT(K_WORK_QUEUE_DRAIN_BIT),
4388 K_WORK_QUEUE_PLUGGED_BIT = 3,
4389 K_WORK_QUEUE_PLUGGED = BIT(K_WORK_QUEUE_PLUGGED_BIT),
4390 K_WORK_QUEUE_STOP_BIT = 4,
4391 K_WORK_QUEUE_STOP = BIT(K_WORK_QUEUE_STOP_BIT),
4392
4393 /* Static work queue flags */
4394 K_WORK_QUEUE_NO_YIELD_BIT = 8,
4395 K_WORK_QUEUE_NO_YIELD = BIT(K_WORK_QUEUE_NO_YIELD_BIT),
4396
4397 /**
4398 * INTERNAL_HIDDEN @endcond
4399 */
4400 /* Transient work flags */
4401
4402 /** @brief Flag indicating a work item that is running under a work
4403 * queue thread.
4404 *
4405 * Accessed via k_work_busy_get(). May co-occur with other flags.
4406 */
4407 K_WORK_RUNNING = BIT(K_WORK_RUNNING_BIT),
4408
4409 /** @brief Flag indicating a work item that is being canceled.
4410 *
4411 * Accessed via k_work_busy_get(). May co-occur with other flags.
4412 */
4413 K_WORK_CANCELING = BIT(K_WORK_CANCELING_BIT),
4414
4415 /** @brief Flag indicating a work item that has been submitted to a
4416 * queue but has not started running.
4417 *
4418 * Accessed via k_work_busy_get(). May co-occur with other flags.
4419 */
4420 K_WORK_QUEUED = BIT(K_WORK_QUEUED_BIT),
4421
4422 /** @brief Flag indicating a delayed work item that is scheduled for
4423 * submission to a queue.
4424 *
4425 * Accessed via k_work_busy_get(). May co-occur with other flags.
4426 */
4427 K_WORK_DELAYED = BIT(K_WORK_DELAYED_BIT),
4428
4429 /** @brief Flag indicating a synced work item that is being flushed.
4430 *
4431 * Accessed via k_work_busy_get(). May co-occur with other flags.
4432 */
4433 K_WORK_FLUSHING = BIT(K_WORK_FLUSHING_BIT),
4434 };
4435
4436 /** @brief A structure used to submit work. */
4437 struct k_work {
4438 /* All fields are protected by the work module spinlock. No fields
4439 * are to be accessed except through kernel API.
4440 */
4441
4442 /* Node to link into k_work_q pending list. */
4443 sys_snode_t node;
4444
4445 /* The function to be invoked by the work queue thread. */
4446 k_work_handler_t handler;
4447
4448 /* The queue on which the work item was last submitted. */
4449 struct k_work_q *queue;
4450
4451 /* State of the work item.
4452 *
4453 * The item can be DELAYED, QUEUED, and RUNNING simultaneously.
4454 *
4455 * It can be RUNNING and CANCELING simultaneously.
4456 */
4457 uint32_t flags;
4458 };
4459
4460 #define Z_WORK_INITIALIZER(work_handler) { \
4461 .handler = (work_handler), \
4462 }
4463
4464 /** @brief A structure used to submit work after a delay. */
4465 struct k_work_delayable {
4466 /* The work item. */
4467 struct k_work work;
4468
4469 /* Timeout used to submit work after a delay. */
4470 struct _timeout timeout;
4471
4472 /* The queue to which the work should be submitted. */
4473 struct k_work_q *queue;
4474 };
4475
4476 #define Z_WORK_DELAYABLE_INITIALIZER(work_handler) { \
4477 .work = { \
4478 .handler = (work_handler), \
4479 .flags = K_WORK_DELAYABLE, \
4480 }, \
4481 }
4482
4483 /**
4484 * @brief Initialize a statically-defined delayable work item.
4485 *
4486 * This macro can be used to initialize a statically-defined delayable
4487 * work item, prior to its first use. For example,
4488 *
4489 * @code static K_WORK_DELAYABLE_DEFINE(<dwork>, <work_handler>); @endcode
4490 *
4491 * Note that if the runtime dependencies support initialization with
4492 * k_work_init_delayable() using that will eliminate the initialized
4493 * object in ROM that is produced by this macro and copied in at
4494 * system startup.
4495 *
4496 * @param work Symbol name for delayable work item object
4497 * @param work_handler Function to invoke each time work item is processed.
4498 */
4499 #define K_WORK_DELAYABLE_DEFINE(work, work_handler) \
4500 struct k_work_delayable work \
4501 = Z_WORK_DELAYABLE_INITIALIZER(work_handler)
4502
4503 /**
4504 * @cond INTERNAL_HIDDEN
4505 */
4506
4507 /* Record used to wait for work to flush.
4508 *
4509 * The work item is inserted into the queue that will process (or is
4510 * processing) the item, and will be processed as soon as the item
4511 * completes. When the flusher is processed the semaphore will be
4512 * signaled, releasing the thread waiting for the flush.
4513 */
4514 struct z_work_flusher {
4515 struct k_work work;
4516 struct k_sem sem;
4517 };
4518
4519 /* Record used to wait for work to complete a cancellation.
4520 *
4521 * The work item is inserted into a global queue of pending cancels.
4522 * When a cancelling work item goes idle any matching waiters are
4523 * removed from pending_cancels and are woken.
4524 */
4525 struct z_work_canceller {
4526 sys_snode_t node;
4527 struct k_work *work;
4528 struct k_sem sem;
4529 };
4530
4531 /**
4532 * INTERNAL_HIDDEN @endcond
4533 */
4534
4535 /** @brief A structure holding internal state for a pending synchronous
4536 * operation on a work item or queue.
4537 *
4538 * Instances of this type are provided by the caller for invocation of
4539 * k_work_flush(), k_work_cancel_sync() and sibling flush and cancel APIs. A
4540 * referenced object must persist until the call returns, and be accessible
4541 * from both the caller thread and the work queue thread.
4542 *
4543 * @note If CONFIG_KERNEL_COHERENCE is enabled the object must be allocated in
4544 * coherent memory; see sys_cache_is_mem_coherent(). The stack on these
4545 * architectures is generally not coherent. be stack-allocated. Violations are
4546 * detected by runtime assertion.
4547 */
4548 struct k_work_sync {
4549 union {
4550 struct z_work_flusher flusher;
4551 struct z_work_canceller canceller;
4552 };
4553 };
4554
4555 /** @brief A structure holding optional configuration items for a work
4556 * queue.
4557 *
4558 * This structure, and values it references, are not retained by
4559 * k_work_queue_start().
4560 */
4561 struct k_work_queue_config {
4562 /** The name to be given to the work queue thread.
4563 *
4564 * If left null the thread will not have a name.
4565 */
4566 const char *name;
4567
4568 /** Control whether the work queue thread should yield between
4569 * items.
4570 *
4571 * Yielding between items helps guarantee the work queue
4572 * thread does not starve other threads, including cooperative
4573 * ones released by a work item. This is the default behavior.
4574 *
4575 * Set this to @c true to prevent the work queue thread from
4576 * yielding between items. This may be appropriate when a
4577 * sequence of items should complete without yielding
4578 * control.
4579 */
4580 bool no_yield;
4581
4582 /** Control whether the work queue thread should be marked as
4583 * essential thread.
4584 */
4585 bool essential;
4586
4587 /** Controls whether work queue monitors work timeouts.
4588 *
4589 * If non-zero, and CONFIG_WORKQUEUE_WORK_TIMEOUT is enabled,
4590 * the work queue will monitor the duration of each work item.
4591 * If the work item handler takes longer than the specified
4592 * time to execute, the work queue thread will be aborted, and
4593 * an error will be logged if CONFIG_LOG is enabled.
4594 */
4595 uint32_t work_timeout_ms;
4596 };
4597
4598 /** @brief A structure used to hold work until it can be processed. */
4599 struct k_work_q {
4600 /* The thread that animates the work. */
4601 struct k_thread thread;
4602
4603 /* The thread ID that animates the work. This may be an external thread
4604 * if k_work_queue_run() is used.
4605 */
4606 k_tid_t thread_id;
4607
4608 /* All the following fields must be accessed only while the
4609 * work module spinlock is held.
4610 */
4611
4612 /* List of k_work items to be worked. */
4613 sys_slist_t pending;
4614
4615 /* Wait queue for idle work thread. */
4616 _wait_q_t notifyq;
4617
4618 /* Wait queue for threads waiting for the queue to drain. */
4619 _wait_q_t drainq;
4620
4621 /* Flags describing queue state. */
4622 uint32_t flags;
4623
4624 #if defined(CONFIG_WORKQUEUE_WORK_TIMEOUT)
4625 struct _timeout work_timeout_record;
4626 struct k_work *work;
4627 k_timeout_t work_timeout;
4628 #endif /* defined(CONFIG_WORKQUEUE_WORK_TIMEOUT) */
4629 };
4630
4631 /* Provide the implementation for inline functions declared above */
4632
k_work_is_pending(const struct k_work * work)4633 static inline bool k_work_is_pending(const struct k_work *work)
4634 {
4635 return k_work_busy_get(work) != 0;
4636 }
4637
4638 static inline struct k_work_delayable *
k_work_delayable_from_work(struct k_work * work)4639 k_work_delayable_from_work(struct k_work *work)
4640 {
4641 return CONTAINER_OF(work, struct k_work_delayable, work);
4642 }
4643
k_work_delayable_is_pending(const struct k_work_delayable * dwork)4644 static inline bool k_work_delayable_is_pending(
4645 const struct k_work_delayable *dwork)
4646 {
4647 return k_work_delayable_busy_get(dwork) != 0;
4648 }
4649
k_work_delayable_expires_get(const struct k_work_delayable * dwork)4650 static inline k_ticks_t k_work_delayable_expires_get(
4651 const struct k_work_delayable *dwork)
4652 {
4653 return z_timeout_expires(&dwork->timeout);
4654 }
4655
k_work_delayable_remaining_get(const struct k_work_delayable * dwork)4656 static inline k_ticks_t k_work_delayable_remaining_get(
4657 const struct k_work_delayable *dwork)
4658 {
4659 return z_timeout_remaining(&dwork->timeout);
4660 }
4661
k_work_queue_thread_get(struct k_work_q * queue)4662 static inline k_tid_t k_work_queue_thread_get(struct k_work_q *queue)
4663 {
4664 return queue->thread_id;
4665 }
4666
4667 /** @} */
4668
4669 struct k_work_user;
4670
4671 /**
4672 * @addtogroup workqueue_apis
4673 * @{
4674 */
4675
4676 /**
4677 * @typedef k_work_user_handler_t
4678 * @brief Work item handler function type for user work queues.
4679 *
4680 * A work item's handler function is executed by a user workqueue's thread
4681 * when the work item is processed by the workqueue.
4682 *
4683 * @param work Address of the work item.
4684 */
4685 typedef void (*k_work_user_handler_t)(struct k_work_user *work);
4686
4687 /**
4688 * @cond INTERNAL_HIDDEN
4689 */
4690
4691 struct k_work_user_q {
4692 struct k_queue queue;
4693 struct k_thread thread;
4694 };
4695
4696 enum {
4697 K_WORK_USER_STATE_PENDING, /* Work item pending state */
4698 };
4699
4700 struct k_work_user {
4701 void *_reserved; /* Used by k_queue implementation. */
4702 k_work_user_handler_t handler;
4703 atomic_t flags;
4704 };
4705
4706 /**
4707 * INTERNAL_HIDDEN @endcond
4708 */
4709
4710 #if defined(__cplusplus) && ((__cplusplus - 0) < 202002L)
4711 #define Z_WORK_USER_INITIALIZER(work_handler) { NULL, work_handler, 0 }
4712 #else
4713 #define Z_WORK_USER_INITIALIZER(work_handler) \
4714 { \
4715 ._reserved = NULL, \
4716 .handler = (work_handler), \
4717 .flags = 0 \
4718 }
4719 #endif
4720
4721 /**
4722 * @brief Initialize a statically-defined user work item.
4723 *
4724 * This macro can be used to initialize a statically-defined user work
4725 * item, prior to its first use. For example,
4726 *
4727 * @code static K_WORK_USER_DEFINE(<work>, <work_handler>); @endcode
4728 *
4729 * @param work Symbol name for work item object
4730 * @param work_handler Function to invoke each time work item is processed.
4731 */
4732 #define K_WORK_USER_DEFINE(work, work_handler) \
4733 struct k_work_user work = Z_WORK_USER_INITIALIZER(work_handler)
4734
4735 /**
4736 * @brief Initialize a userspace work item.
4737 *
4738 * This routine initializes a user workqueue work item, prior to its
4739 * first use.
4740 *
4741 * @param work Address of work item.
4742 * @param handler Function to invoke each time work item is processed.
4743 */
k_work_user_init(struct k_work_user * work,k_work_user_handler_t handler)4744 static inline void k_work_user_init(struct k_work_user *work,
4745 k_work_user_handler_t handler)
4746 {
4747 *work = (struct k_work_user)Z_WORK_USER_INITIALIZER(handler);
4748 }
4749
4750 /**
4751 * @brief Check if a userspace work item is pending.
4752 *
4753 * This routine indicates if user work item @a work is pending in a workqueue's
4754 * queue.
4755 *
4756 * @note Checking if the work is pending gives no guarantee that the
4757 * work will still be pending when this information is used. It is up to
4758 * the caller to make sure that this information is used in a safe manner.
4759 *
4760 * @funcprops \isr_ok
4761 *
4762 * @param work Address of work item.
4763 *
4764 * @return true if work item is pending, or false if it is not pending.
4765 */
k_work_user_is_pending(struct k_work_user * work)4766 static inline bool k_work_user_is_pending(struct k_work_user *work)
4767 {
4768 return atomic_test_bit(&work->flags, K_WORK_USER_STATE_PENDING);
4769 }
4770
4771 /**
4772 * @brief Submit a work item to a user mode workqueue
4773 *
4774 * Submits a work item to a workqueue that runs in user mode. A temporary
4775 * memory allocation is made from the caller's resource pool which is freed
4776 * once the worker thread consumes the k_work item. The workqueue
4777 * thread must have memory access to the k_work item being submitted. The caller
4778 * must have permission granted on the work_q parameter's queue object.
4779 *
4780 * @funcprops \isr_ok
4781 *
4782 * @param work_q Address of workqueue.
4783 * @param work Address of work item.
4784 *
4785 * @retval -EBUSY if the work item was already in some workqueue
4786 * @retval -ENOMEM if no memory for thread resource pool allocation
4787 * @retval 0 Success
4788 */
k_work_user_submit_to_queue(struct k_work_user_q * work_q,struct k_work_user * work)4789 static inline int k_work_user_submit_to_queue(struct k_work_user_q *work_q,
4790 struct k_work_user *work)
4791 {
4792 int ret = -EBUSY;
4793
4794 if (!atomic_test_and_set_bit(&work->flags,
4795 K_WORK_USER_STATE_PENDING)) {
4796 ret = k_queue_alloc_append(&work_q->queue, work);
4797
4798 /* Couldn't insert into the queue. Clear the pending bit
4799 * so the work item can be submitted again
4800 */
4801 if (ret != 0) {
4802 atomic_clear_bit(&work->flags,
4803 K_WORK_USER_STATE_PENDING);
4804 }
4805 }
4806
4807 return ret;
4808 }
4809
4810 /**
4811 * @brief Start a workqueue in user mode
4812 *
4813 * This works identically to k_work_queue_start() except it is callable from
4814 * user mode, and the worker thread created will run in user mode. The caller
4815 * must have permissions granted on both the work_q parameter's thread and
4816 * queue objects, and the same restrictions on priority apply as
4817 * k_thread_create().
4818 *
4819 * @param work_q Address of workqueue.
4820 * @param stack Pointer to work queue thread's stack space, as defined by
4821 * K_THREAD_STACK_DEFINE()
4822 * @param stack_size Size of the work queue thread's stack (in bytes), which
4823 * should either be the same constant passed to
4824 * K_THREAD_STACK_DEFINE() or the value of K_THREAD_STACK_SIZEOF().
4825 * @param prio Priority of the work queue's thread.
4826 * @param name optional thread name. If not null a copy is made into the
4827 * thread's name buffer.
4828 */
4829 void k_work_user_queue_start(struct k_work_user_q *work_q,
4830 k_thread_stack_t *stack,
4831 size_t stack_size, int prio,
4832 const char *name);
4833
4834 /**
4835 * @brief Access the user mode thread that animates a work queue.
4836 *
4837 * This is necessary to grant a user mode work queue thread access to things
4838 * the work items it will process are expected to use.
4839 *
4840 * @param work_q pointer to the user mode queue structure.
4841 *
4842 * @return the user mode thread associated with the work queue.
4843 */
k_work_user_queue_thread_get(struct k_work_user_q * work_q)4844 static inline k_tid_t k_work_user_queue_thread_get(struct k_work_user_q *work_q)
4845 {
4846 return &work_q->thread;
4847 }
4848
4849 /** @} */
4850
4851 /**
4852 * @cond INTERNAL_HIDDEN
4853 */
4854
4855 struct k_work_poll {
4856 struct k_work work;
4857 struct k_work_q *workq;
4858 struct z_poller poller;
4859 struct k_poll_event *events;
4860 int num_events;
4861 k_work_handler_t real_handler;
4862 struct _timeout timeout;
4863 int poll_result;
4864 };
4865
4866 /**
4867 * INTERNAL_HIDDEN @endcond
4868 */
4869
4870 /**
4871 * @addtogroup workqueue_apis
4872 * @{
4873 */
4874
4875 /**
4876 * @brief Initialize a statically-defined work item.
4877 *
4878 * This macro can be used to initialize a statically-defined workqueue work
4879 * item, prior to its first use. For example,
4880 *
4881 * @code static K_WORK_DEFINE(<work>, <work_handler>); @endcode
4882 *
4883 * @param work Symbol name for work item object
4884 * @param work_handler Function to invoke each time work item is processed.
4885 */
4886 #define K_WORK_DEFINE(work, work_handler) \
4887 struct k_work work = Z_WORK_INITIALIZER(work_handler)
4888
4889 /**
4890 * @brief Initialize a triggered work item.
4891 *
4892 * This routine initializes a workqueue triggered work item, prior to
4893 * its first use.
4894 *
4895 * @param work Address of triggered work item.
4896 * @param handler Function to invoke each time work item is processed.
4897 */
4898 void k_work_poll_init(struct k_work_poll *work,
4899 k_work_handler_t handler);
4900
4901 /**
4902 * @brief Submit a triggered work item.
4903 *
4904 * This routine schedules work item @a work to be processed by workqueue
4905 * @a work_q when one of the given @a events is signaled. The routine
4906 * initiates internal poller for the work item and then returns to the caller.
4907 * Only when one of the watched events happen the work item is actually
4908 * submitted to the workqueue and becomes pending.
4909 *
4910 * Submitting a previously submitted triggered work item that is still
4911 * waiting for the event cancels the existing submission and reschedules it
4912 * the using the new event list. Note that this behavior is inherently subject
4913 * to race conditions with the pre-existing triggered work item and work queue,
4914 * so care must be taken to synchronize such resubmissions externally.
4915 *
4916 * @funcprops \isr_ok
4917 *
4918 * @warning
4919 * Provided array of events as well as a triggered work item must be placed
4920 * in persistent memory (valid until work handler execution or work
4921 * cancellation) and cannot be modified after submission.
4922 *
4923 * @param work_q Address of workqueue.
4924 * @param work Address of delayed work item.
4925 * @param events An array of events which trigger the work.
4926 * @param num_events The number of events in the array.
4927 * @param timeout Timeout after which the work will be scheduled
4928 * for execution even if not triggered.
4929 *
4930 *
4931 * @retval 0 Work item started watching for events.
4932 * @retval -EINVAL Work item is being processed or has completed its work.
4933 * @retval -EADDRINUSE Work item is pending on a different workqueue.
4934 */
4935 int k_work_poll_submit_to_queue(struct k_work_q *work_q,
4936 struct k_work_poll *work,
4937 struct k_poll_event *events,
4938 int num_events,
4939 k_timeout_t timeout);
4940
4941 /**
4942 * @brief Submit a triggered work item to the system workqueue.
4943 *
4944 * This routine schedules work item @a work to be processed by system
4945 * workqueue when one of the given @a events is signaled. The routine
4946 * initiates internal poller for the work item and then returns to the caller.
4947 * Only when one of the watched events happen the work item is actually
4948 * submitted to the workqueue and becomes pending.
4949 *
4950 * Submitting a previously submitted triggered work item that is still
4951 * waiting for the event cancels the existing submission and reschedules it
4952 * the using the new event list. Note that this behavior is inherently subject
4953 * to race conditions with the pre-existing triggered work item and work queue,
4954 * so care must be taken to synchronize such resubmissions externally.
4955 *
4956 * @funcprops \isr_ok
4957 *
4958 * @warning
4959 * Provided array of events as well as a triggered work item must not be
4960 * modified until the item has been processed by the workqueue.
4961 *
4962 * @param work Address of delayed work item.
4963 * @param events An array of events which trigger the work.
4964 * @param num_events The number of events in the array.
4965 * @param timeout Timeout after which the work will be scheduled
4966 * for execution even if not triggered.
4967 *
4968 * @retval 0 Work item started watching for events.
4969 * @retval -EINVAL Work item is being processed or has completed its work.
4970 * @retval -EADDRINUSE Work item is pending on a different workqueue.
4971 */
4972 int k_work_poll_submit(struct k_work_poll *work,
4973 struct k_poll_event *events,
4974 int num_events,
4975 k_timeout_t timeout);
4976
4977 /**
4978 * @brief Cancel a triggered work item.
4979 *
4980 * This routine cancels the submission of triggered work item @a work.
4981 * A triggered work item can only be canceled if no event triggered work
4982 * submission.
4983 *
4984 * @funcprops \isr_ok
4985 *
4986 * @param work Address of delayed work item.
4987 *
4988 * @retval 0 Work item canceled.
4989 * @retval -EINVAL Work item is being processed or has completed its work.
4990 */
4991 int k_work_poll_cancel(struct k_work_poll *work);
4992
4993 /** @} */
4994
4995 /**
4996 * @defgroup msgq_apis Message Queue APIs
4997 * @ingroup kernel_apis
4998 * @{
4999 */
5000
5001 /**
5002 * @brief Message Queue Structure
5003 */
5004 struct k_msgq {
5005 /** Message queue wait queue */
5006 _wait_q_t wait_q;
5007 /** Lock */
5008 struct k_spinlock lock;
5009 /** Message size */
5010 size_t msg_size;
5011 /** Maximal number of messages */
5012 uint32_t max_msgs;
5013 /** Start of message buffer */
5014 char *buffer_start;
5015 /** End of message buffer */
5016 char *buffer_end;
5017 /** Read pointer */
5018 char *read_ptr;
5019 /** Write pointer */
5020 char *write_ptr;
5021 /** Number of used messages */
5022 uint32_t used_msgs;
5023
5024 Z_DECL_POLL_EVENT
5025
5026 /** Message queue */
5027 uint8_t flags;
5028
5029 SYS_PORT_TRACING_TRACKING_FIELD(k_msgq)
5030
5031 #ifdef CONFIG_OBJ_CORE_MSGQ
5032 struct k_obj_core obj_core;
5033 #endif
5034 };
5035 /**
5036 * @cond INTERNAL_HIDDEN
5037 */
5038
5039
5040 #define Z_MSGQ_INITIALIZER(obj, q_buffer, q_msg_size, q_max_msgs) \
5041 { \
5042 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
5043 .lock = {}, \
5044 .msg_size = q_msg_size, \
5045 .max_msgs = q_max_msgs, \
5046 .buffer_start = q_buffer, \
5047 .buffer_end = q_buffer + (q_max_msgs * q_msg_size), \
5048 .read_ptr = q_buffer, \
5049 .write_ptr = q_buffer, \
5050 .used_msgs = 0, \
5051 Z_POLL_EVENT_OBJ_INIT(obj) \
5052 .flags = 0, \
5053 }
5054
5055 /**
5056 * INTERNAL_HIDDEN @endcond
5057 */
5058
5059
5060 #define K_MSGQ_FLAG_ALLOC BIT(0)
5061
5062 /**
5063 * @brief Message Queue Attributes
5064 */
5065 struct k_msgq_attrs {
5066 /** Message Size */
5067 size_t msg_size;
5068 /** Maximal number of messages */
5069 uint32_t max_msgs;
5070 /** Used messages */
5071 uint32_t used_msgs;
5072 };
5073
5074
5075 /**
5076 * @brief Statically define and initialize a message queue.
5077 *
5078 * The message queue's ring buffer contains space for @a q_max_msgs messages,
5079 * each of which is @a q_msg_size bytes long. Alignment of the message queue's
5080 * ring buffer is not necessary, setting @a q_align to 1 is sufficient.
5081 *
5082 * The message queue can be accessed outside the module where it is defined
5083 * using:
5084 *
5085 * @code extern struct k_msgq <name>; @endcode
5086 *
5087 * @param q_name Name of the message queue.
5088 * @param q_msg_size Message size (in bytes).
5089 * @param q_max_msgs Maximum number of messages that can be queued.
5090 * @param q_align Alignment of the message queue's ring buffer (power of 2).
5091 *
5092 */
5093 #define K_MSGQ_DEFINE(q_name, q_msg_size, q_max_msgs, q_align) \
5094 static char __noinit __aligned(q_align) \
5095 _k_fifo_buf_##q_name[(q_max_msgs) * (q_msg_size)]; \
5096 STRUCT_SECTION_ITERABLE(k_msgq, q_name) = \
5097 Z_MSGQ_INITIALIZER(q_name, _k_fifo_buf_##q_name, \
5098 (q_msg_size), (q_max_msgs))
5099
5100 /**
5101 * @brief Initialize a message queue.
5102 *
5103 * This routine initializes a message queue object, prior to its first use.
5104 *
5105 * The message queue's ring buffer must contain space for @a max_msgs messages,
5106 * each of which is @a msg_size bytes long. Alignment of the message queue's
5107 * ring buffer is not necessary.
5108 *
5109 * @param msgq Address of the message queue.
5110 * @param buffer Pointer to ring buffer that holds queued messages.
5111 * @param msg_size Message size (in bytes).
5112 * @param max_msgs Maximum number of messages that can be queued.
5113 */
5114 void k_msgq_init(struct k_msgq *msgq, char *buffer, size_t msg_size,
5115 uint32_t max_msgs);
5116
5117 /**
5118 * @brief Initialize a message queue.
5119 *
5120 * This routine initializes a message queue object, prior to its first use,
5121 * allocating its internal ring buffer from the calling thread's resource
5122 * pool.
5123 *
5124 * Memory allocated for the ring buffer can be released by calling
5125 * k_msgq_cleanup(), or if userspace is enabled and the msgq object loses
5126 * all of its references.
5127 *
5128 * @param msgq Address of the message queue.
5129 * @param msg_size Message size (in bytes).
5130 * @param max_msgs Maximum number of messages that can be queued.
5131 *
5132 * @return 0 on success, -ENOMEM if there was insufficient memory in the
5133 * thread's resource pool, or -EINVAL if the size parameters cause
5134 * an integer overflow.
5135 */
5136 __syscall int k_msgq_alloc_init(struct k_msgq *msgq, size_t msg_size,
5137 uint32_t max_msgs);
5138
5139 /**
5140 * @brief Release allocated buffer for a queue
5141 *
5142 * Releases memory allocated for the ring buffer.
5143 *
5144 * @param msgq message queue to cleanup
5145 *
5146 * @retval 0 on success
5147 * @retval -EBUSY Queue not empty
5148 */
5149 int k_msgq_cleanup(struct k_msgq *msgq);
5150
5151 /**
5152 * @brief Send a message to the end of a message queue.
5153 *
5154 * This routine sends a message to message queue @a q.
5155 *
5156 * @note The message content is copied from @a data into @a msgq and the @a data
5157 * pointer is not retained, so the message content will not be modified
5158 * by this function.
5159 *
5160 * @funcprops \isr_ok
5161 *
5162 * @param msgq Address of the message queue.
5163 * @param data Pointer to the message.
5164 * @param timeout Waiting period to add the message, or one of the special
5165 * values K_NO_WAIT and K_FOREVER.
5166 *
5167 * @retval 0 Message sent.
5168 * @retval -ENOMSG Returned without waiting or queue purged.
5169 * @retval -EAGAIN Waiting period timed out.
5170 */
5171 __syscall int k_msgq_put(struct k_msgq *msgq, const void *data, k_timeout_t timeout);
5172
5173 /**
5174 * @brief Send a message to the front of a message queue.
5175 *
5176 * This routine sends a message to the beginning (head) of message queue @a q.
5177 * Messages sent with this method will be retrieved before any pre-existing
5178 * messages in the queue.
5179 *
5180 * @note if there is no space in the message queue, this function will
5181 * behave the same as k_msgq_put.
5182 *
5183 * @note The message content is copied from @a data into @a msgq and the @a data
5184 * pointer is not retained, so the message content will not be modified
5185 * by this function.
5186 *
5187 * @note k_msgq_put_front() does not block.
5188 *
5189 * @funcprops \isr_ok
5190 *
5191 * @param msgq Address of the message queue.
5192 * @param data Pointer to the message.
5193 *
5194 * @retval 0 Message sent.
5195 * @retval -ENOMSG Returned without waiting or queue purged.
5196 */
5197 __syscall int k_msgq_put_front(struct k_msgq *msgq, const void *data);
5198
5199 /**
5200 * @brief Receive a message from a message queue.
5201 *
5202 * This routine receives a message from message queue @a q in a "first in,
5203 * first out" manner.
5204 *
5205 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
5206 *
5207 * @funcprops \isr_ok
5208 *
5209 * @param msgq Address of the message queue.
5210 * @param data Address of area to hold the received message.
5211 * @param timeout Waiting period to receive the message,
5212 * or one of the special values K_NO_WAIT and
5213 * K_FOREVER.
5214 *
5215 * @retval 0 Message received.
5216 * @retval -ENOMSG Returned without waiting or queue purged.
5217 * @retval -EAGAIN Waiting period timed out.
5218 */
5219 __syscall int k_msgq_get(struct k_msgq *msgq, void *data, k_timeout_t timeout);
5220
5221 /**
5222 * @brief Peek/read a message from a message queue.
5223 *
5224 * This routine reads a message from message queue @a q in a "first in,
5225 * first out" manner and leaves the message in the queue.
5226 *
5227 * @funcprops \isr_ok
5228 *
5229 * @param msgq Address of the message queue.
5230 * @param data Address of area to hold the message read from the queue.
5231 *
5232 * @retval 0 Message read.
5233 * @retval -ENOMSG Returned when the queue has no message.
5234 */
5235 __syscall int k_msgq_peek(struct k_msgq *msgq, void *data);
5236
5237 /**
5238 * @brief Peek/read a message from a message queue at the specified index
5239 *
5240 * This routine reads a message from message queue at the specified index
5241 * and leaves the message in the queue.
5242 * k_msgq_peek_at(msgq, data, 0) is equivalent to k_msgq_peek(msgq, data)
5243 *
5244 * @funcprops \isr_ok
5245 *
5246 * @param msgq Address of the message queue.
5247 * @param data Address of area to hold the message read from the queue.
5248 * @param idx Message queue index at which to peek
5249 *
5250 * @retval 0 Message read.
5251 * @retval -ENOMSG Returned when the queue has no message at index.
5252 */
5253 __syscall int k_msgq_peek_at(struct k_msgq *msgq, void *data, uint32_t idx);
5254
5255 /**
5256 * @brief Purge a message queue.
5257 *
5258 * This routine discards all unreceived messages in a message queue's ring
5259 * buffer. Any threads that are blocked waiting to send a message to the
5260 * message queue are unblocked and see an -ENOMSG error code.
5261 *
5262 * @param msgq Address of the message queue.
5263 */
5264 __syscall void k_msgq_purge(struct k_msgq *msgq);
5265
5266 /**
5267 * @brief Get the amount of free space in a message queue.
5268 *
5269 * This routine returns the number of unused entries in a message queue's
5270 * ring buffer.
5271 *
5272 * @param msgq Address of the message queue.
5273 *
5274 * @return Number of unused ring buffer entries.
5275 */
5276 __syscall uint32_t k_msgq_num_free_get(struct k_msgq *msgq);
5277
5278 /**
5279 * @brief Get basic attributes of a message queue.
5280 *
5281 * This routine fetches basic attributes of message queue into attr argument.
5282 *
5283 * @param msgq Address of the message queue.
5284 * @param attrs pointer to message queue attribute structure.
5285 */
5286 __syscall void k_msgq_get_attrs(struct k_msgq *msgq,
5287 struct k_msgq_attrs *attrs);
5288
5289
z_impl_k_msgq_num_free_get(struct k_msgq * msgq)5290 static inline uint32_t z_impl_k_msgq_num_free_get(struct k_msgq *msgq)
5291 {
5292 return msgq->max_msgs - msgq->used_msgs;
5293 }
5294
5295 /**
5296 * @brief Get the number of messages in a message queue.
5297 *
5298 * This routine returns the number of messages in a message queue's ring buffer.
5299 *
5300 * @param msgq Address of the message queue.
5301 *
5302 * @return Number of messages.
5303 */
5304 __syscall uint32_t k_msgq_num_used_get(struct k_msgq *msgq);
5305
z_impl_k_msgq_num_used_get(struct k_msgq * msgq)5306 static inline uint32_t z_impl_k_msgq_num_used_get(struct k_msgq *msgq)
5307 {
5308 return msgq->used_msgs;
5309 }
5310
5311 /** @} */
5312
5313 /**
5314 * @defgroup mailbox_apis Mailbox APIs
5315 * @ingroup kernel_apis
5316 * @{
5317 */
5318
5319 /**
5320 * @brief Mailbox Message Structure
5321 *
5322 */
5323 struct k_mbox_msg {
5324 /** size of message (in bytes) */
5325 size_t size;
5326 /** application-defined information value */
5327 uint32_t info;
5328 /** sender's message data buffer */
5329 void *tx_data;
5330 /** source thread id */
5331 k_tid_t rx_source_thread;
5332 /** target thread id */
5333 k_tid_t tx_target_thread;
5334 /** internal use only - thread waiting on send (may be a dummy) */
5335 k_tid_t _syncing_thread;
5336 #if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
5337 /** internal use only - semaphore used during asynchronous send */
5338 struct k_sem *_async_sem;
5339 #endif
5340 };
5341 /**
5342 * @brief Mailbox Structure
5343 *
5344 */
5345 struct k_mbox {
5346 /** Transmit messages queue */
5347 _wait_q_t tx_msg_queue;
5348 /** Receive message queue */
5349 _wait_q_t rx_msg_queue;
5350 struct k_spinlock lock;
5351
5352 SYS_PORT_TRACING_TRACKING_FIELD(k_mbox)
5353
5354 #ifdef CONFIG_OBJ_CORE_MAILBOX
5355 struct k_obj_core obj_core;
5356 #endif
5357 };
5358 /**
5359 * @cond INTERNAL_HIDDEN
5360 */
5361
5362 #define Z_MBOX_INITIALIZER(obj) \
5363 { \
5364 .tx_msg_queue = Z_WAIT_Q_INIT(&obj.tx_msg_queue), \
5365 .rx_msg_queue = Z_WAIT_Q_INIT(&obj.rx_msg_queue), \
5366 }
5367
5368 /**
5369 * INTERNAL_HIDDEN @endcond
5370 */
5371
5372 /**
5373 * @brief Statically define and initialize a mailbox.
5374 *
5375 * The mailbox is to be accessed outside the module where it is defined using:
5376 *
5377 * @code extern struct k_mbox <name>; @endcode
5378 *
5379 * @param name Name of the mailbox.
5380 */
5381 #define K_MBOX_DEFINE(name) \
5382 STRUCT_SECTION_ITERABLE(k_mbox, name) = \
5383 Z_MBOX_INITIALIZER(name) \
5384
5385 /**
5386 * @brief Initialize a mailbox.
5387 *
5388 * This routine initializes a mailbox object, prior to its first use.
5389 *
5390 * @param mbox Address of the mailbox.
5391 */
5392 void k_mbox_init(struct k_mbox *mbox);
5393
5394 /**
5395 * @brief Send a mailbox message in a synchronous manner.
5396 *
5397 * This routine sends a message to @a mbox and waits for a receiver to both
5398 * receive and process it. The message data may be in a buffer or non-existent
5399 * (i.e. an empty message).
5400 *
5401 * @param mbox Address of the mailbox.
5402 * @param tx_msg Address of the transmit message descriptor.
5403 * @param timeout Waiting period for the message to be received,
5404 * or one of the special values K_NO_WAIT
5405 * and K_FOREVER. Once the message has been received,
5406 * this routine waits as long as necessary for the message
5407 * to be completely processed.
5408 *
5409 * @retval 0 Message sent.
5410 * @retval -ENOMSG Returned without waiting.
5411 * @retval -EAGAIN Waiting period timed out.
5412 */
5413 int k_mbox_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
5414 k_timeout_t timeout);
5415
5416 /**
5417 * @brief Send a mailbox message in an asynchronous manner.
5418 *
5419 * This routine sends a message to @a mbox without waiting for a receiver
5420 * to process it. The message data may be in a buffer or non-existent
5421 * (i.e. an empty message). Optionally, the semaphore @a sem will be given
5422 * when the message has been both received and completely processed by
5423 * the receiver.
5424 *
5425 * @param mbox Address of the mailbox.
5426 * @param tx_msg Address of the transmit message descriptor.
5427 * @param sem Address of a semaphore, or NULL if none is needed.
5428 */
5429 void k_mbox_async_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
5430 struct k_sem *sem);
5431
5432 /**
5433 * @brief Receive a mailbox message.
5434 *
5435 * This routine receives a message from @a mbox, then optionally retrieves
5436 * its data and disposes of the message.
5437 *
5438 * @param mbox Address of the mailbox.
5439 * @param rx_msg Address of the receive message descriptor.
5440 * @param buffer Address of the buffer to receive data, or NULL to defer data
5441 * retrieval and message disposal until later.
5442 * @param timeout Waiting period for a message to be received,
5443 * or one of the special values K_NO_WAIT and K_FOREVER.
5444 *
5445 * @retval 0 Message received.
5446 * @retval -ENOMSG Returned without waiting.
5447 * @retval -EAGAIN Waiting period timed out.
5448 */
5449 int k_mbox_get(struct k_mbox *mbox, struct k_mbox_msg *rx_msg,
5450 void *buffer, k_timeout_t timeout);
5451
5452 /**
5453 * @brief Retrieve mailbox message data into a buffer.
5454 *
5455 * This routine completes the processing of a received message by retrieving
5456 * its data into a buffer, then disposing of the message.
5457 *
5458 * Alternatively, this routine can be used to dispose of a received message
5459 * without retrieving its data.
5460 *
5461 * @param rx_msg Address of the receive message descriptor.
5462 * @param buffer Address of the buffer to receive data, or NULL to discard
5463 * the data.
5464 */
5465 void k_mbox_data_get(struct k_mbox_msg *rx_msg, void *buffer);
5466
5467 /** @} */
5468
5469 /**
5470 * @defgroup pipe_apis Pipe APIs
5471 * @ingroup kernel_apis
5472 * @{
5473 */
5474
5475 /**
5476 * @brief initialize a pipe
5477 *
5478 * This routine initializes a pipe object, prior to its first use.
5479 *
5480 * @param pipe Address of the pipe.
5481 * @param buffer Address of the pipe's buffer, or NULL if no ring buffer is used.
5482 * @param buffer_size Size of the pipe's buffer, or zero if no ring buffer is used.
5483 */
5484 __syscall void k_pipe_init(struct k_pipe *pipe, uint8_t *buffer, size_t buffer_size);
5485
5486 enum pipe_flags {
5487 PIPE_FLAG_OPEN = BIT(0),
5488 PIPE_FLAG_RESET = BIT(1),
5489 };
5490
5491 struct k_pipe {
5492 size_t waiting;
5493 struct ring_buf buf;
5494 struct k_spinlock lock;
5495 _wait_q_t data;
5496 _wait_q_t space;
5497 uint8_t flags;
5498
5499 Z_DECL_POLL_EVENT
5500 #ifdef CONFIG_OBJ_CORE_PIPE
5501 struct k_obj_core obj_core;
5502 #endif
5503 SYS_PORT_TRACING_TRACKING_FIELD(k_pipe)
5504 };
5505
5506 /**
5507 * @cond INTERNAL_HIDDEN
5508 */
5509 #define Z_PIPE_INITIALIZER(obj, pipe_buffer, pipe_buffer_size) \
5510 { \
5511 .waiting = 0, \
5512 .buf = RING_BUF_INIT(pipe_buffer, pipe_buffer_size), \
5513 .data = Z_WAIT_Q_INIT(&obj.data), \
5514 .space = Z_WAIT_Q_INIT(&obj.space), \
5515 .flags = PIPE_FLAG_OPEN, \
5516 Z_POLL_EVENT_OBJ_INIT(obj) \
5517 }
5518 /**
5519 * INTERNAL_HIDDEN @endcond
5520 */
5521
5522 /**
5523 * @brief Statically define and initialize a pipe.
5524 *
5525 * The pipe can be accessed outside the module where it is defined using:
5526 *
5527 * @code extern struct k_pipe <name>; @endcode
5528 *
5529 * @param name Name of the pipe.
5530 * @param pipe_buffer_size Size of the pipe's ring buffer (in bytes)
5531 * or zero if no ring buffer is used.
5532 * @param pipe_align Alignment of the pipe's ring buffer (power of 2).
5533 *
5534 */
5535 #define K_PIPE_DEFINE(name, pipe_buffer_size, pipe_align) \
5536 static unsigned char __noinit __aligned(pipe_align) \
5537 _k_pipe_buf_##name[pipe_buffer_size]; \
5538 STRUCT_SECTION_ITERABLE(k_pipe, name) = \
5539 Z_PIPE_INITIALIZER(name, _k_pipe_buf_##name, pipe_buffer_size)
5540
5541
5542 /**
5543 * @brief Write data to a pipe
5544 *
5545 * This routine writes up to @a len bytes of data to @a pipe.
5546 * If the pipe is full, the routine will block until the data can be written or the timeout expires.
5547 *
5548 * @param pipe Address of the pipe.
5549 * @param data Address of data to write.
5550 * @param len Size of data (in bytes).
5551 * @param timeout Waiting period to wait for the data to be written.
5552 *
5553 * @retval number of bytes written on success
5554 * @retval -EAGAIN if no data could be written before the timeout expired
5555 * @retval -ECANCELED if the write was interrupted by k_pipe_reset(..)
5556 * @retval -EPIPE if the pipe was closed
5557 */
5558 __syscall int k_pipe_write(struct k_pipe *pipe, const uint8_t *data, size_t len,
5559 k_timeout_t timeout);
5560
5561 /**
5562 * @brief Read data from a pipe
5563 * This routine reads up to @a len bytes of data from @a pipe.
5564 * If the pipe is empty, the routine will block until the data can be read or the timeout expires.
5565 *
5566 * @param pipe Address of the pipe.
5567 * @param data Address to place the data read from pipe.
5568 * @param len Requested number of bytes to read.
5569 * @param timeout Waiting period to wait for the data to be read.
5570 *
5571 * @retval number of bytes read on success
5572 * @retval -EAGAIN if no data could be read before the timeout expired
5573 * @retval -ECANCELED if the read was interrupted by k_pipe_reset(..)
5574 * @retval -EPIPE if the pipe was closed
5575 */
5576 __syscall int k_pipe_read(struct k_pipe *pipe, uint8_t *data, size_t len,
5577 k_timeout_t timeout);
5578
5579 /**
5580 * @brief Reset a pipe
5581 * This routine resets the pipe, discarding any unread data and unblocking any threads waiting to
5582 * write or read, causing the waiting threads to return with -ECANCELED. Calling k_pipe_read(..) or
5583 * k_pipe_write(..) when the pipe is resetting but not yet reset will return -ECANCELED.
5584 * The pipe is left open after a reset and can be used as normal.
5585 *
5586 * @param pipe Address of the pipe.
5587 */
5588 __syscall void k_pipe_reset(struct k_pipe *pipe);
5589
5590 /**
5591 * @brief Close a pipe
5592 *
5593 * This routine closes a pipe. Any threads that were blocked on the pipe
5594 * will be unblocked and receive an error code.
5595 *
5596 * @param pipe Address of the pipe.
5597 */
5598 __syscall void k_pipe_close(struct k_pipe *pipe);
5599 /** @} */
5600
5601 /**
5602 * @cond INTERNAL_HIDDEN
5603 */
5604 struct k_mem_slab_info {
5605 uint32_t num_blocks;
5606 size_t block_size;
5607 uint32_t num_used;
5608 #ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION
5609 uint32_t max_used;
5610 #endif
5611 };
5612
5613 struct k_mem_slab {
5614 _wait_q_t wait_q;
5615 struct k_spinlock lock;
5616 char *buffer;
5617 char *free_list;
5618 struct k_mem_slab_info info;
5619
5620 SYS_PORT_TRACING_TRACKING_FIELD(k_mem_slab)
5621
5622 #ifdef CONFIG_OBJ_CORE_MEM_SLAB
5623 struct k_obj_core obj_core;
5624 #endif
5625 };
5626
5627 #define Z_MEM_SLAB_INITIALIZER(_slab, _slab_buffer, _slab_block_size, \
5628 _slab_num_blocks) \
5629 { \
5630 .wait_q = Z_WAIT_Q_INIT(&(_slab).wait_q), \
5631 .lock = {}, \
5632 .buffer = _slab_buffer, \
5633 .free_list = NULL, \
5634 .info = {_slab_num_blocks, _slab_block_size, 0} \
5635 }
5636
5637
5638 /**
5639 * INTERNAL_HIDDEN @endcond
5640 */
5641
5642 /**
5643 * @defgroup mem_slab_apis Memory Slab APIs
5644 * @ingroup kernel_apis
5645 * @{
5646 */
5647
5648 /**
5649 * @brief Statically define and initialize a memory slab in a user-provided memory section with
5650 * public (non-static) scope.
5651 *
5652 * The memory slab's buffer contains @a slab_num_blocks memory blocks
5653 * that are @a slab_block_size bytes long. The buffer is aligned to a
5654 * @a slab_align -byte boundary. To ensure that each memory block is similarly
5655 * aligned to this boundary, @a slab_block_size must also be a multiple of
5656 * @a slab_align.
5657 *
5658 * The memory slab can be accessed outside the module where it is defined
5659 * using:
5660 *
5661 * @code extern struct k_mem_slab <name>; @endcode
5662 *
5663 * @note This macro cannot be used together with a static keyword.
5664 * If such a use-case is desired, use @ref K_MEM_SLAB_DEFINE_IN_SECT_STATIC
5665 * instead.
5666 *
5667 * @param name Name of the memory slab.
5668 * @param in_section Section attribute specifier such as Z_GENERIC_SECTION.
5669 * @param slab_block_size Size of each memory block (in bytes).
5670 * @param slab_num_blocks Number memory blocks.
5671 * @param slab_align Alignment of the memory slab's buffer (power of 2).
5672 */
5673 #define K_MEM_SLAB_DEFINE_IN_SECT(name, in_section, slab_block_size, slab_num_blocks, slab_align) \
5674 BUILD_ASSERT(((slab_block_size) % (slab_align)) == 0, \
5675 "slab_block_size must be a multiple of slab_align"); \
5676 BUILD_ASSERT((((slab_align) & ((slab_align) - 1)) == 0), \
5677 "slab_align must be a power of 2"); \
5678 char in_section __aligned(WB_UP( \
5679 slab_align)) _k_mem_slab_buf_##name[(slab_num_blocks) * WB_UP(slab_block_size)]; \
5680 STRUCT_SECTION_ITERABLE(k_mem_slab, name) = Z_MEM_SLAB_INITIALIZER( \
5681 name, _k_mem_slab_buf_##name, WB_UP(slab_block_size), slab_num_blocks)
5682
5683 /**
5684 * @brief Statically define and initialize a memory slab in a public (non-static) scope.
5685 *
5686 * The memory slab's buffer contains @a slab_num_blocks memory blocks
5687 * that are @a slab_block_size bytes long. The buffer is aligned to a
5688 * @a slab_align -byte boundary. To ensure that each memory block is similarly
5689 * aligned to this boundary, @a slab_block_size must also be a multiple of
5690 * @a slab_align.
5691 *
5692 * The memory slab can be accessed outside the module where it is defined
5693 * using:
5694 *
5695 * @code extern struct k_mem_slab <name>; @endcode
5696 *
5697 * @note This macro cannot be used together with a static keyword.
5698 * If such a use-case is desired, use @ref K_MEM_SLAB_DEFINE_STATIC
5699 * instead.
5700 *
5701 * @param name Name of the memory slab.
5702 * @param slab_block_size Size of each memory block (in bytes).
5703 * @param slab_num_blocks Number memory blocks.
5704 * @param slab_align Alignment of the memory slab's buffer (power of 2).
5705 */
5706 #define K_MEM_SLAB_DEFINE(name, slab_block_size, slab_num_blocks, slab_align) \
5707 K_MEM_SLAB_DEFINE_IN_SECT(name, __noinit_named(k_mem_slab_buf_##name), slab_block_size, \
5708 slab_num_blocks, slab_align)
5709
5710 /**
5711 * @brief Statically define and initialize a memory slab in a user-provided memory section with
5712 * private (static) scope.
5713 *
5714 * The memory slab's buffer contains @a slab_num_blocks memory blocks
5715 * that are @a slab_block_size bytes long. The buffer is aligned to a
5716 * @a slab_align -byte boundary. To ensure that each memory block is similarly
5717 * aligned to this boundary, @a slab_block_size must also be a multiple of
5718 * @a slab_align.
5719 *
5720 * @param name Name of the memory slab.
5721 * @param in_section Section attribute specifier such as Z_GENERIC_SECTION.
5722 * @param slab_block_size Size of each memory block (in bytes).
5723 * @param slab_num_blocks Number memory blocks.
5724 * @param slab_align Alignment of the memory slab's buffer (power of 2).
5725 */
5726 #define K_MEM_SLAB_DEFINE_IN_SECT_STATIC(name, in_section, slab_block_size, slab_num_blocks, \
5727 slab_align) \
5728 BUILD_ASSERT(((slab_block_size) % (slab_align)) == 0, \
5729 "slab_block_size must be a multiple of slab_align"); \
5730 BUILD_ASSERT((((slab_align) & ((slab_align) - 1)) == 0), \
5731 "slab_align must be a power of 2"); \
5732 static char in_section __aligned(WB_UP( \
5733 slab_align)) _k_mem_slab_buf_##name[(slab_num_blocks) * WB_UP(slab_block_size)]; \
5734 static STRUCT_SECTION_ITERABLE(k_mem_slab, name) = Z_MEM_SLAB_INITIALIZER( \
5735 name, _k_mem_slab_buf_##name, WB_UP(slab_block_size), slab_num_blocks)
5736
5737 /**
5738 * @brief Statically define and initialize a memory slab in a private (static) scope.
5739 *
5740 * The memory slab's buffer contains @a slab_num_blocks memory blocks
5741 * that are @a slab_block_size bytes long. The buffer is aligned to a
5742 * @a slab_align -byte boundary. To ensure that each memory block is similarly
5743 * aligned to this boundary, @a slab_block_size must also be a multiple of
5744 * @a slab_align.
5745 *
5746 * @param name Name of the memory slab.
5747 * @param slab_block_size Size of each memory block (in bytes).
5748 * @param slab_num_blocks Number memory blocks.
5749 * @param slab_align Alignment of the memory slab's buffer (power of 2).
5750 */
5751 #define K_MEM_SLAB_DEFINE_STATIC(name, slab_block_size, slab_num_blocks, slab_align) \
5752 K_MEM_SLAB_DEFINE_IN_SECT_STATIC(name, __noinit_named(k_mem_slab_buf_##name), \
5753 slab_block_size, slab_num_blocks, slab_align)
5754
5755 /**
5756 * @brief Initialize a memory slab.
5757 *
5758 * Initializes a memory slab, prior to its first use.
5759 *
5760 * The memory slab's buffer contains @a slab_num_blocks memory blocks
5761 * that are @a slab_block_size bytes long. The buffer must be aligned to an
5762 * N-byte boundary matching a word boundary, where N is a power of 2
5763 * (i.e. 4 on 32-bit systems, 8, 16, ...).
5764 * To ensure that each memory block is similarly aligned to this boundary,
5765 * @a slab_block_size must also be a multiple of N.
5766 *
5767 * @param slab Address of the memory slab.
5768 * @param buffer Pointer to buffer used for the memory blocks.
5769 * @param block_size Size of each memory block (in bytes).
5770 * @param num_blocks Number of memory blocks.
5771 *
5772 * @retval 0 on success
5773 * @retval -EINVAL invalid data supplied
5774 *
5775 */
5776 int k_mem_slab_init(struct k_mem_slab *slab, void *buffer,
5777 size_t block_size, uint32_t num_blocks);
5778
5779 /**
5780 * @brief Allocate memory from a memory slab.
5781 *
5782 * This routine allocates a memory block from a memory slab.
5783 *
5784 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
5785 * @note When CONFIG_MULTITHREADING=n any @a timeout is treated as K_NO_WAIT.
5786 *
5787 * @funcprops \isr_ok
5788 *
5789 * @param slab Address of the memory slab.
5790 * @param mem Pointer to block address area.
5791 * @param timeout Waiting period to wait for operation to complete.
5792 * Use K_NO_WAIT to return without waiting,
5793 * or K_FOREVER to wait as long as necessary.
5794 *
5795 * @retval 0 Memory allocated. The block address area pointed at by @a mem
5796 * is set to the starting address of the memory block.
5797 * @retval -ENOMEM Returned without waiting.
5798 * @retval -EAGAIN Waiting period timed out.
5799 * @retval -EINVAL Invalid data supplied
5800 */
5801 int k_mem_slab_alloc(struct k_mem_slab *slab, void **mem,
5802 k_timeout_t timeout);
5803
5804 /**
5805 * @brief Free memory allocated from a memory slab.
5806 *
5807 * This routine releases a previously allocated memory block back to its
5808 * associated memory slab.
5809 *
5810 * @funcprops \isr_ok
5811 *
5812 * @param slab Address of the memory slab.
5813 * @param mem Pointer to the memory block (as returned by k_mem_slab_alloc()).
5814 */
5815 void k_mem_slab_free(struct k_mem_slab *slab, void *mem);
5816
5817 /**
5818 * @brief Get the number of used blocks in a memory slab.
5819 *
5820 * This routine gets the number of memory blocks that are currently
5821 * allocated in @a slab.
5822 *
5823 * @funcprops \isr_ok
5824 *
5825 * @param slab Address of the memory slab.
5826 *
5827 * @return Number of allocated memory blocks.
5828 */
k_mem_slab_num_used_get(struct k_mem_slab * slab)5829 static inline uint32_t k_mem_slab_num_used_get(struct k_mem_slab *slab)
5830 {
5831 return slab->info.num_used;
5832 }
5833
5834 /**
5835 * @brief Get the number of maximum used blocks so far in a memory slab.
5836 *
5837 * This routine gets the maximum number of memory blocks that were
5838 * allocated in @a slab.
5839 *
5840 * @funcprops \isr_ok
5841 *
5842 * @param slab Address of the memory slab.
5843 *
5844 * @return Maximum number of allocated memory blocks.
5845 */
k_mem_slab_max_used_get(struct k_mem_slab * slab)5846 static inline uint32_t k_mem_slab_max_used_get(struct k_mem_slab *slab)
5847 {
5848 #ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION
5849 return slab->info.max_used;
5850 #else
5851 ARG_UNUSED(slab);
5852 return 0;
5853 #endif
5854 }
5855
5856 /**
5857 * @brief Get the number of unused blocks in a memory slab.
5858 *
5859 * This routine gets the number of memory blocks that are currently
5860 * unallocated in @a slab.
5861 *
5862 * @funcprops \isr_ok
5863 *
5864 * @param slab Address of the memory slab.
5865 *
5866 * @return Number of unallocated memory blocks.
5867 */
k_mem_slab_num_free_get(struct k_mem_slab * slab)5868 static inline uint32_t k_mem_slab_num_free_get(struct k_mem_slab *slab)
5869 {
5870 return slab->info.num_blocks - slab->info.num_used;
5871 }
5872
5873 /**
5874 * @brief Get the memory stats for a memory slab
5875 *
5876 * This routine gets the runtime memory usage stats for the slab @a slab.
5877 *
5878 * @funcprops \isr_ok
5879 *
5880 * @param slab Address of the memory slab
5881 * @param stats Pointer to memory into which to copy memory usage statistics
5882 *
5883 * @retval 0 Success
5884 * @retval -EINVAL Any parameter points to NULL
5885 */
5886
5887 int k_mem_slab_runtime_stats_get(struct k_mem_slab *slab, struct sys_memory_stats *stats);
5888
5889 /**
5890 * @brief Reset the maximum memory usage for a slab
5891 *
5892 * This routine resets the maximum memory usage for the slab @a slab to its
5893 * current usage.
5894 *
5895 * @funcprops \isr_ok
5896 *
5897 * @param slab Address of the memory slab
5898 *
5899 * @retval 0 Success
5900 * @retval -EINVAL Memory slab is NULL
5901 */
5902 int k_mem_slab_runtime_stats_reset_max(struct k_mem_slab *slab);
5903
5904 /** @} */
5905
5906 /**
5907 * @addtogroup heap_apis
5908 * @{
5909 */
5910
5911 /* kernel synchronized heap struct */
5912
5913 struct k_heap {
5914 struct sys_heap heap;
5915 _wait_q_t wait_q;
5916 struct k_spinlock lock;
5917 };
5918
5919 /**
5920 * @brief Initialize a k_heap
5921 *
5922 * This constructs a synchronized k_heap object over a memory region
5923 * specified by the user. Note that while any alignment and size can
5924 * be passed as valid parameters, internal alignment restrictions
5925 * inside the inner sys_heap mean that not all bytes may be usable as
5926 * allocated memory.
5927 *
5928 * @param h Heap struct to initialize
5929 * @param mem Pointer to memory.
5930 * @param bytes Size of memory region, in bytes
5931 */
5932 void k_heap_init(struct k_heap *h, void *mem,
5933 size_t bytes) __attribute_nonnull(1);
5934
5935 /**
5936 * @brief Allocate aligned memory from a k_heap
5937 *
5938 * Behaves in all ways like k_heap_alloc(), except that the returned
5939 * memory (if available) will have a starting address in memory which
5940 * is a multiple of the specified power-of-two alignment value in
5941 * bytes. The resulting memory can be returned to the heap using
5942 * k_heap_free().
5943 *
5944 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
5945 * @note When CONFIG_MULTITHREADING=n any @a timeout is treated as K_NO_WAIT.
5946 *
5947 * @funcprops \isr_ok
5948 *
5949 * @param h Heap from which to allocate
5950 * @param align Alignment in bytes, must be a power of two
5951 * @param bytes Number of bytes requested
5952 * @param timeout How long to wait, or K_NO_WAIT
5953 * @return Pointer to memory the caller can now use
5954 */
5955 void *k_heap_aligned_alloc(struct k_heap *h, size_t align, size_t bytes,
5956 k_timeout_t timeout) __attribute_nonnull(1);
5957
5958 /**
5959 * @brief Allocate memory from a k_heap
5960 *
5961 * Allocates and returns a memory buffer from the memory region owned
5962 * by the heap. If no memory is available immediately, the call will
5963 * block for the specified timeout (constructed via the standard
5964 * timeout API, or K_NO_WAIT or K_FOREVER) waiting for memory to be
5965 * freed. If the allocation cannot be performed by the expiration of
5966 * the timeout, NULL will be returned.
5967 * Allocated memory is aligned on a multiple of pointer sizes.
5968 *
5969 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
5970 * @note When CONFIG_MULTITHREADING=n any @a timeout is treated as K_NO_WAIT.
5971 *
5972 * @funcprops \isr_ok
5973 *
5974 * @param h Heap from which to allocate
5975 * @param bytes Desired size of block to allocate
5976 * @param timeout How long to wait, or K_NO_WAIT
5977 * @return A pointer to valid heap memory, or NULL
5978 */
5979 void *k_heap_alloc(struct k_heap *h, size_t bytes,
5980 k_timeout_t timeout) __attribute_nonnull(1);
5981
5982 /**
5983 * @brief Allocate and initialize memory for an array of objects from a k_heap
5984 *
5985 * Allocates memory for an array of num objects of size and initializes all
5986 * bytes in the allocated storage to zero. If no memory is available
5987 * immediately, the call will block for the specified timeout (constructed
5988 * via the standard timeout API, or K_NO_WAIT or K_FOREVER) waiting for memory
5989 * to be freed. If the allocation cannot be performed by the expiration of
5990 * the timeout, NULL will be returned.
5991 * Allocated memory is aligned on a multiple of pointer sizes.
5992 *
5993 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
5994 * @note When CONFIG_MULTITHREADING=n any @a timeout is treated as K_NO_WAIT.
5995 *
5996 * @funcprops \isr_ok
5997 *
5998 * @param h Heap from which to allocate
5999 * @param num Number of objects to allocate
6000 * @param size Desired size of each object to allocate
6001 * @param timeout How long to wait, or K_NO_WAIT
6002 * @return A pointer to valid heap memory, or NULL
6003 */
6004 void *k_heap_calloc(struct k_heap *h, size_t num, size_t size, k_timeout_t timeout)
6005 __attribute_nonnull(1);
6006
6007 /**
6008 * @brief Reallocate memory from a k_heap
6009 *
6010 * Reallocates and returns a memory buffer from the memory region owned
6011 * by the heap. If no memory is available immediately, the call will
6012 * block for the specified timeout (constructed via the standard
6013 * timeout API, or K_NO_WAIT or K_FOREVER) waiting for memory to be
6014 * freed. If the allocation cannot be performed by the expiration of
6015 * the timeout, NULL will be returned.
6016 * Reallocated memory is aligned on a multiple of pointer sizes.
6017 *
6018 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
6019 * @note When CONFIG_MULTITHREADING=n any @a timeout is treated as K_NO_WAIT.
6020 *
6021 * @funcprops \isr_ok
6022 *
6023 * @param h Heap from which to allocate
6024 * @param ptr Original pointer returned from a previous allocation
6025 * @param bytes Desired size of block to allocate
6026 * @param timeout How long to wait, or K_NO_WAIT
6027 *
6028 * @return Pointer to memory the caller can now use, or NULL
6029 */
6030 void *k_heap_realloc(struct k_heap *h, void *ptr, size_t bytes, k_timeout_t timeout)
6031 __attribute_nonnull(1);
6032
6033 /**
6034 * @brief Free memory allocated by k_heap_alloc()
6035 *
6036 * Returns the specified memory block, which must have been returned
6037 * from k_heap_alloc(), to the heap for use by other callers. Passing
6038 * a NULL block is legal, and has no effect.
6039 *
6040 * @param h Heap to which to return the memory
6041 * @param mem A valid memory block, or NULL
6042 */
6043 void k_heap_free(struct k_heap *h, void *mem) __attribute_nonnull(1);
6044
6045 /* Minimum heap sizes needed to return a successful 1-byte allocation.
6046 * Assumes a chunk aligned (8 byte) memory buffer.
6047 */
6048 #ifdef CONFIG_SYS_HEAP_RUNTIME_STATS
6049 #define Z_HEAP_MIN_SIZE ((sizeof(void *) > 4) ? 80 : 52)
6050 #else
6051 #define Z_HEAP_MIN_SIZE ((sizeof(void *) > 4) ? 56 : 44)
6052 #endif /* CONFIG_SYS_HEAP_RUNTIME_STATS */
6053
6054 /**
6055 * @brief Define a static k_heap in the specified linker section
6056 *
6057 * This macro defines and initializes a static memory region and
6058 * k_heap of the requested size in the specified linker section.
6059 * After kernel start, &name can be used as if k_heap_init() had
6060 * been called.
6061 *
6062 * Note that this macro enforces a minimum size on the memory region
6063 * to accommodate metadata requirements. Very small heaps will be
6064 * padded to fit.
6065 *
6066 * @param name Symbol name for the struct k_heap object
6067 * @param bytes Size of memory region, in bytes
6068 * @param in_section Section attribute specifier such as Z_GENERIC_SECTION.
6069 */
6070 #define Z_HEAP_DEFINE_IN_SECT(name, bytes, in_section) \
6071 char in_section \
6072 __aligned(8) /* CHUNK_UNIT */ \
6073 kheap_##name[MAX(bytes, Z_HEAP_MIN_SIZE)]; \
6074 STRUCT_SECTION_ITERABLE(k_heap, name) = { \
6075 .heap = { \
6076 .init_mem = kheap_##name, \
6077 .init_bytes = MAX(bytes, Z_HEAP_MIN_SIZE), \
6078 }, \
6079 }
6080
6081 /**
6082 * @brief Define a static k_heap
6083 *
6084 * This macro defines and initializes a static memory region and
6085 * k_heap of the requested size. After kernel start, &name can be
6086 * used as if k_heap_init() had been called.
6087 *
6088 * Note that this macro enforces a minimum size on the memory region
6089 * to accommodate metadata requirements. Very small heaps will be
6090 * padded to fit.
6091 *
6092 * @param name Symbol name for the struct k_heap object
6093 * @param bytes Size of memory region, in bytes
6094 */
6095 #define K_HEAP_DEFINE(name, bytes) \
6096 Z_HEAP_DEFINE_IN_SECT(name, bytes, \
6097 __noinit_named(kheap_buf_##name))
6098
6099 /**
6100 * @brief Define a static k_heap in uncached memory
6101 *
6102 * This macro defines and initializes a static memory region and
6103 * k_heap of the requested size in uncached memory. After kernel
6104 * start, &name can be used as if k_heap_init() had been called.
6105 *
6106 * Note that this macro enforces a minimum size on the memory region
6107 * to accommodate metadata requirements. Very small heaps will be
6108 * padded to fit.
6109 *
6110 * @param name Symbol name for the struct k_heap object
6111 * @param bytes Size of memory region, in bytes
6112 */
6113 #define K_HEAP_DEFINE_NOCACHE(name, bytes) \
6114 Z_HEAP_DEFINE_IN_SECT(name, bytes, __nocache)
6115
6116 /** @brief Get the array of statically defined heaps
6117 *
6118 * Returns the pointer to the start of the static heap array.
6119 * Static heaps are those declared through one of the `K_HEAP_DEFINE`
6120 * macros.
6121 *
6122 * @param heap Pointer to location where heap array address is written
6123 * @return Number of static heaps
6124 */
6125 int k_heap_array_get(struct k_heap **heap);
6126
6127 /**
6128 * @}
6129 */
6130
6131 /**
6132 * @defgroup heap_apis Heap APIs
6133 * @brief Memory allocation from the Heap
6134 * @ingroup kernel_apis
6135 * @{
6136 */
6137
6138 /**
6139 * @brief Allocate memory from the heap with a specified alignment.
6140 *
6141 * This routine provides semantics similar to aligned_alloc(); memory is
6142 * allocated from the heap with a specified alignment. However, one minor
6143 * difference is that k_aligned_alloc() accepts any non-zero @p size,
6144 * whereas aligned_alloc() only accepts a @p size that is an integral
6145 * multiple of @p align.
6146 *
6147 * Above, aligned_alloc() refers to:
6148 * C11 standard (ISO/IEC 9899:2011): 7.22.3.1
6149 * The aligned_alloc function (p: 347-348)
6150 *
6151 * @param align Alignment of memory requested (in bytes).
6152 * @param size Amount of memory requested (in bytes).
6153 *
6154 * @return Address of the allocated memory if successful; otherwise NULL.
6155 */
6156 void *k_aligned_alloc(size_t align, size_t size);
6157
6158 /**
6159 * @brief Allocate memory from the heap.
6160 *
6161 * This routine provides traditional malloc() semantics. Memory is
6162 * allocated from the heap memory pool.
6163 * Allocated memory is aligned on a multiple of pointer sizes.
6164 *
6165 * @param size Amount of memory requested (in bytes).
6166 *
6167 * @return Address of the allocated memory if successful; otherwise NULL.
6168 */
6169 void *k_malloc(size_t size);
6170
6171 /**
6172 * @brief Free memory allocated from heap.
6173 *
6174 * This routine provides traditional free() semantics. The memory being
6175 * returned must have been allocated from the heap memory pool.
6176 *
6177 * If @a ptr is NULL, no operation is performed.
6178 *
6179 * @param ptr Pointer to previously allocated memory.
6180 */
6181 void k_free(void *ptr);
6182
6183 /**
6184 * @brief Allocate memory from heap, array style
6185 *
6186 * This routine provides traditional calloc() semantics. Memory is
6187 * allocated from the heap memory pool and zeroed.
6188 *
6189 * @param nmemb Number of elements in the requested array
6190 * @param size Size of each array element (in bytes).
6191 *
6192 * @return Address of the allocated memory if successful; otherwise NULL.
6193 */
6194 void *k_calloc(size_t nmemb, size_t size);
6195
6196 /** @brief Expand the size of an existing allocation
6197 *
6198 * Returns a pointer to a new memory region with the same contents,
6199 * but a different allocated size. If the new allocation can be
6200 * expanded in place, the pointer returned will be identical.
6201 * Otherwise the data will be copies to a new block and the old one
6202 * will be freed as per sys_heap_free(). If the specified size is
6203 * smaller than the original, the block will be truncated in place and
6204 * the remaining memory returned to the heap. If the allocation of a
6205 * new block fails, then NULL will be returned and the old block will
6206 * not be freed or modified.
6207 *
6208 * @param ptr Original pointer returned from a previous allocation
6209 * @param size Amount of memory requested (in bytes).
6210 *
6211 * @return Pointer to memory the caller can now use, or NULL.
6212 */
6213 void *k_realloc(void *ptr, size_t size);
6214
6215 /** @} */
6216
6217 /* polling API - PRIVATE */
6218
6219 #ifdef CONFIG_POLL
6220 #define _INIT_OBJ_POLL_EVENT(obj) do { (obj)->poll_event = NULL; } while (false)
6221 #else
6222 #define _INIT_OBJ_POLL_EVENT(obj) do { } while (false)
6223 #endif
6224
6225 /* private - types bit positions */
6226 enum _poll_types_bits {
6227 /* can be used to ignore an event */
6228 _POLL_TYPE_IGNORE,
6229
6230 /* to be signaled by k_poll_signal_raise() */
6231 _POLL_TYPE_SIGNAL,
6232
6233 /* semaphore availability */
6234 _POLL_TYPE_SEM_AVAILABLE,
6235
6236 /* queue/FIFO/LIFO data availability */
6237 _POLL_TYPE_DATA_AVAILABLE,
6238
6239 /* msgq data availability */
6240 _POLL_TYPE_MSGQ_DATA_AVAILABLE,
6241
6242 /* pipe data availability */
6243 _POLL_TYPE_PIPE_DATA_AVAILABLE,
6244
6245 _POLL_NUM_TYPES
6246 };
6247
6248 #define Z_POLL_TYPE_BIT(type) (1U << ((type) - 1U))
6249
6250 /* private - states bit positions */
6251 enum _poll_states_bits {
6252 /* default state when creating event */
6253 _POLL_STATE_NOT_READY,
6254
6255 /* signaled by k_poll_signal_raise() */
6256 _POLL_STATE_SIGNALED,
6257
6258 /* semaphore is available */
6259 _POLL_STATE_SEM_AVAILABLE,
6260
6261 /* data is available to read on queue/FIFO/LIFO */
6262 _POLL_STATE_DATA_AVAILABLE,
6263
6264 /* queue/FIFO/LIFO wait was cancelled */
6265 _POLL_STATE_CANCELLED,
6266
6267 /* data is available to read on a message queue */
6268 _POLL_STATE_MSGQ_DATA_AVAILABLE,
6269
6270 /* data is available to read from a pipe */
6271 _POLL_STATE_PIPE_DATA_AVAILABLE,
6272
6273 _POLL_NUM_STATES
6274 };
6275
6276 #define Z_POLL_STATE_BIT(state) (1U << ((state) - 1U))
6277
6278 #define _POLL_EVENT_NUM_UNUSED_BITS \
6279 (32 - (0 \
6280 + 8 /* tag */ \
6281 + _POLL_NUM_TYPES \
6282 + _POLL_NUM_STATES \
6283 + 1 /* modes */ \
6284 ))
6285
6286 /* end of polling API - PRIVATE */
6287
6288
6289 /**
6290 * @defgroup poll_apis Async polling APIs
6291 * @brief An API to wait concurrently for any one of multiple conditions to be
6292 * fulfilled
6293 * @ingroup kernel_apis
6294 * @{
6295 */
6296
6297 /* Public polling API */
6298
6299 /* public - values for k_poll_event.type bitfield */
6300 #define K_POLL_TYPE_IGNORE 0
6301 #define K_POLL_TYPE_SIGNAL Z_POLL_TYPE_BIT(_POLL_TYPE_SIGNAL)
6302 #define K_POLL_TYPE_SEM_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_SEM_AVAILABLE)
6303 #define K_POLL_TYPE_DATA_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_DATA_AVAILABLE)
6304 #define K_POLL_TYPE_FIFO_DATA_AVAILABLE K_POLL_TYPE_DATA_AVAILABLE
6305 #define K_POLL_TYPE_MSGQ_DATA_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_MSGQ_DATA_AVAILABLE)
6306 #define K_POLL_TYPE_PIPE_DATA_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_PIPE_DATA_AVAILABLE)
6307
6308 /* public - polling modes */
6309 enum k_poll_modes {
6310 /* polling thread does not take ownership of objects when available */
6311 K_POLL_MODE_NOTIFY_ONLY = 0,
6312
6313 K_POLL_NUM_MODES
6314 };
6315
6316 /* public - values for k_poll_event.state bitfield */
6317 #define K_POLL_STATE_NOT_READY 0
6318 #define K_POLL_STATE_SIGNALED Z_POLL_STATE_BIT(_POLL_STATE_SIGNALED)
6319 #define K_POLL_STATE_SEM_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_SEM_AVAILABLE)
6320 #define K_POLL_STATE_DATA_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_DATA_AVAILABLE)
6321 #define K_POLL_STATE_FIFO_DATA_AVAILABLE K_POLL_STATE_DATA_AVAILABLE
6322 #define K_POLL_STATE_MSGQ_DATA_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_MSGQ_DATA_AVAILABLE)
6323 #define K_POLL_STATE_PIPE_DATA_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_PIPE_DATA_AVAILABLE)
6324 #define K_POLL_STATE_CANCELLED Z_POLL_STATE_BIT(_POLL_STATE_CANCELLED)
6325
6326 /* public - poll signal object */
6327 struct k_poll_signal {
6328 /** PRIVATE - DO NOT TOUCH */
6329 sys_dlist_t poll_events;
6330
6331 /**
6332 * 1 if the event has been signaled, 0 otherwise. Stays set to 1 until
6333 * user resets it to 0.
6334 */
6335 unsigned int signaled;
6336
6337 /** custom result value passed to k_poll_signal_raise() if needed */
6338 int result;
6339 };
6340
6341 #define K_POLL_SIGNAL_INITIALIZER(obj) \
6342 { \
6343 .poll_events = SYS_DLIST_STATIC_INIT(&obj.poll_events), \
6344 .signaled = 0, \
6345 .result = 0, \
6346 }
6347 /**
6348 * @brief Poll Event
6349 *
6350 */
6351 struct k_poll_event {
6352 /** PRIVATE - DO NOT TOUCH */
6353 sys_dnode_t _node;
6354
6355 /** PRIVATE - DO NOT TOUCH */
6356 struct z_poller *poller;
6357
6358 /** optional user-specified tag, opaque, untouched by the API */
6359 uint32_t tag:8;
6360
6361 /** bitfield of event types (bitwise-ORed K_POLL_TYPE_xxx values) */
6362 uint32_t type:_POLL_NUM_TYPES;
6363
6364 /** bitfield of event states (bitwise-ORed K_POLL_STATE_xxx values) */
6365 uint32_t state:_POLL_NUM_STATES;
6366
6367 /** mode of operation, from enum k_poll_modes */
6368 uint32_t mode:1;
6369
6370 /** unused bits in 32-bit word */
6371 uint32_t unused:_POLL_EVENT_NUM_UNUSED_BITS;
6372
6373 /** per-type data */
6374 union {
6375 /* The typed_* fields below are used by K_POLL_EVENT_*INITIALIZER() macros to ensure
6376 * type safety of polled objects.
6377 */
6378 void *obj, *typed_K_POLL_TYPE_IGNORE;
6379 struct k_poll_signal *signal, *typed_K_POLL_TYPE_SIGNAL;
6380 struct k_sem *sem, *typed_K_POLL_TYPE_SEM_AVAILABLE;
6381 struct k_fifo *fifo, *typed_K_POLL_TYPE_FIFO_DATA_AVAILABLE;
6382 struct k_queue *queue, *typed_K_POLL_TYPE_DATA_AVAILABLE;
6383 struct k_msgq *msgq, *typed_K_POLL_TYPE_MSGQ_DATA_AVAILABLE;
6384 struct k_pipe *pipe, *typed_K_POLL_TYPE_PIPE_DATA_AVAILABLE;
6385 };
6386 };
6387
6388 #define K_POLL_EVENT_INITIALIZER(_event_type, _event_mode, _event_obj) \
6389 { \
6390 .poller = NULL, \
6391 .type = _event_type, \
6392 .state = K_POLL_STATE_NOT_READY, \
6393 .mode = _event_mode, \
6394 .unused = 0, \
6395 { \
6396 .typed_##_event_type = _event_obj, \
6397 }, \
6398 }
6399
6400 #define K_POLL_EVENT_STATIC_INITIALIZER(_event_type, _event_mode, _event_obj, \
6401 event_tag) \
6402 { \
6403 .tag = event_tag, \
6404 .type = _event_type, \
6405 .state = K_POLL_STATE_NOT_READY, \
6406 .mode = _event_mode, \
6407 .unused = 0, \
6408 { \
6409 .typed_##_event_type = _event_obj, \
6410 }, \
6411 }
6412
6413 /**
6414 * @brief Initialize one struct k_poll_event instance
6415 *
6416 * After this routine is called on a poll event, the event it ready to be
6417 * placed in an event array to be passed to k_poll().
6418 *
6419 * @param event The event to initialize.
6420 * @param type A bitfield of the types of event, from the K_POLL_TYPE_xxx
6421 * values. Only values that apply to the same object being polled
6422 * can be used together. Choosing K_POLL_TYPE_IGNORE disables the
6423 * event.
6424 * @param mode Future. Use K_POLL_MODE_NOTIFY_ONLY.
6425 * @param obj Kernel object or poll signal.
6426 */
6427
6428 void k_poll_event_init(struct k_poll_event *event, uint32_t type,
6429 int mode, void *obj);
6430
6431 /**
6432 * @brief Wait for one or many of multiple poll events to occur
6433 *
6434 * This routine allows a thread to wait concurrently for one or many of
6435 * multiple poll events to have occurred. Such events can be a kernel object
6436 * being available, like a semaphore, or a poll signal event.
6437 *
6438 * When an event notifies that a kernel object is available, the kernel object
6439 * is not "given" to the thread calling k_poll(): it merely signals the fact
6440 * that the object was available when the k_poll() call was in effect. Also,
6441 * all threads trying to acquire an object the regular way, i.e. by pending on
6442 * the object, have precedence over the thread polling on the object. This
6443 * means that the polling thread will never get the poll event on an object
6444 * until the object becomes available and its pend queue is empty. For this
6445 * reason, the k_poll() call is more effective when the objects being polled
6446 * only have one thread, the polling thread, trying to acquire them.
6447 *
6448 * When k_poll() returns 0, the caller should loop on all the events that were
6449 * passed to k_poll() and check the state field for the values that were
6450 * expected and take the associated actions.
6451 *
6452 * Before being reused for another call to k_poll(), the user has to reset the
6453 * state field to K_POLL_STATE_NOT_READY.
6454 *
6455 * When called from user mode, a temporary memory allocation is required from
6456 * the caller's resource pool.
6457 *
6458 * @param events An array of events to be polled for.
6459 * @param num_events The number of events in the array.
6460 * @param timeout Waiting period for an event to be ready,
6461 * or one of the special values K_NO_WAIT and K_FOREVER.
6462 *
6463 * @retval 0 One or more events are ready.
6464 * @retval -EAGAIN Waiting period timed out.
6465 * @retval -EINTR Polling has been interrupted, e.g. with
6466 * k_queue_cancel_wait(). All output events are still set and valid,
6467 * cancelled event(s) will be set to K_POLL_STATE_CANCELLED. In other
6468 * words, -EINTR status means that at least one of output events is
6469 * K_POLL_STATE_CANCELLED.
6470 * @retval -ENOMEM Thread resource pool insufficient memory (user mode only)
6471 * @retval -EINVAL Bad parameters (user mode only)
6472 */
6473
6474 __syscall int k_poll(struct k_poll_event *events, int num_events,
6475 k_timeout_t timeout);
6476
6477 /**
6478 * @brief Initialize a poll signal object.
6479 *
6480 * Ready a poll signal object to be signaled via k_poll_signal_raise().
6481 *
6482 * @param sig A poll signal.
6483 */
6484
6485 __syscall void k_poll_signal_init(struct k_poll_signal *sig);
6486
6487 /**
6488 * @brief Reset a poll signal object's state to unsignaled.
6489 *
6490 * @param sig A poll signal object
6491 */
6492 __syscall void k_poll_signal_reset(struct k_poll_signal *sig);
6493
6494 /**
6495 * @brief Fetch the signaled state and result value of a poll signal
6496 *
6497 * @param sig A poll signal object
6498 * @param signaled An integer buffer which will be written nonzero if the
6499 * object was signaled
6500 * @param result An integer destination buffer which will be written with the
6501 * result value if the object was signaled, or an undefined
6502 * value if it was not.
6503 */
6504 __syscall void k_poll_signal_check(struct k_poll_signal *sig,
6505 unsigned int *signaled, int *result);
6506
6507 /**
6508 * @brief Signal a poll signal object.
6509 *
6510 * This routine makes ready a poll signal, which is basically a poll event of
6511 * type K_POLL_TYPE_SIGNAL. If a thread was polling on that event, it will be
6512 * made ready to run. A @a result value can be specified.
6513 *
6514 * The poll signal contains a 'signaled' field that, when set by
6515 * k_poll_signal_raise(), stays set until the user sets it back to 0 with
6516 * k_poll_signal_reset(). It thus has to be reset by the user before being
6517 * passed again to k_poll() or k_poll() will consider it being signaled, and
6518 * will return immediately.
6519 *
6520 * @note The result is stored and the 'signaled' field is set even if
6521 * this function returns an error indicating that an expiring poll was
6522 * not notified. The next k_poll() will detect the missed raise.
6523 *
6524 * @param sig A poll signal.
6525 * @param result The value to store in the result field of the signal.
6526 *
6527 * @retval 0 The signal was delivered successfully.
6528 * @retval -EAGAIN The polling thread's timeout is in the process of expiring.
6529 */
6530
6531 __syscall int k_poll_signal_raise(struct k_poll_signal *sig, int result);
6532
6533 /** @} */
6534
6535 /**
6536 * @defgroup cpu_idle_apis CPU Idling APIs
6537 * @ingroup kernel_apis
6538 * @{
6539 */
6540 /**
6541 * @brief Make the CPU idle.
6542 *
6543 * This function makes the CPU idle until an event wakes it up.
6544 *
6545 * In a regular system, the idle thread should be the only thread responsible
6546 * for making the CPU idle and triggering any type of power management.
6547 * However, in some more constrained systems, such as a single-threaded system,
6548 * the only thread would be responsible for this if needed.
6549 *
6550 * @note In some architectures, before returning, the function unmasks interrupts
6551 * unconditionally.
6552 */
k_cpu_idle(void)6553 static inline void k_cpu_idle(void)
6554 {
6555 arch_cpu_idle();
6556 }
6557
6558 /**
6559 * @brief Make the CPU idle in an atomic fashion.
6560 *
6561 * Similar to k_cpu_idle(), but must be called with interrupts locked.
6562 *
6563 * Enabling interrupts and entering a low-power mode will be atomic,
6564 * i.e. there will be no period of time where interrupts are enabled before
6565 * the processor enters a low-power mode.
6566 *
6567 * After waking up from the low-power mode, the interrupt lockout state will
6568 * be restored as if by irq_unlock(key).
6569 *
6570 * @param key Interrupt locking key obtained from irq_lock().
6571 */
k_cpu_atomic_idle(unsigned int key)6572 static inline void k_cpu_atomic_idle(unsigned int key)
6573 {
6574 arch_cpu_atomic_idle(key);
6575 }
6576
6577 /**
6578 * @}
6579 */
6580
6581 /**
6582 * @cond INTERNAL_HIDDEN
6583 * @internal
6584 */
6585 #ifdef ARCH_EXCEPT
6586 /* This architecture has direct support for triggering a CPU exception */
6587 #define z_except_reason(reason) ARCH_EXCEPT(reason)
6588 #else
6589
6590 #if !defined(CONFIG_ASSERT_NO_FILE_INFO)
6591 #define __EXCEPT_LOC() __ASSERT_PRINT("@ %s:%d\n", __FILE__, __LINE__)
6592 #else
6593 #define __EXCEPT_LOC()
6594 #endif
6595
6596 /* NOTE: This is the implementation for arches that do not implement
6597 * ARCH_EXCEPT() to generate a real CPU exception.
6598 *
6599 * We won't have a real exception frame to determine the PC value when
6600 * the oops occurred, so print file and line number before we jump into
6601 * the fatal error handler.
6602 */
6603 #define z_except_reason(reason) do { \
6604 __EXCEPT_LOC(); \
6605 z_fatal_error(reason, NULL); \
6606 } while (false)
6607
6608 #endif /* _ARCH__EXCEPT */
6609 /**
6610 * INTERNAL_HIDDEN @endcond
6611 */
6612
6613 /**
6614 * @brief Fatally terminate a thread
6615 *
6616 * This should be called when a thread has encountered an unrecoverable
6617 * runtime condition and needs to terminate. What this ultimately
6618 * means is determined by the _fatal_error_handler() implementation, which
6619 * will be called with reason code K_ERR_KERNEL_OOPS.
6620 *
6621 * If this is called from ISR context, the default system fatal error handler
6622 * will treat it as an unrecoverable system error, just like k_panic().
6623 */
6624 #define k_oops() z_except_reason(K_ERR_KERNEL_OOPS)
6625
6626 /**
6627 * @brief Fatally terminate the system
6628 *
6629 * This should be called when the Zephyr kernel has encountered an
6630 * unrecoverable runtime condition and needs to terminate. What this ultimately
6631 * means is determined by the _fatal_error_handler() implementation, which
6632 * will be called with reason code K_ERR_KERNEL_PANIC.
6633 */
6634 #define k_panic() z_except_reason(K_ERR_KERNEL_PANIC)
6635
6636 /**
6637 * @cond INTERNAL_HIDDEN
6638 */
6639
6640 /*
6641 * private APIs that are utilized by one or more public APIs
6642 */
6643
6644 /**
6645 * @internal
6646 */
6647 void z_timer_expiration_handler(struct _timeout *timeout);
6648 /**
6649 * INTERNAL_HIDDEN @endcond
6650 */
6651
6652 #ifdef CONFIG_PRINTK
6653 /**
6654 * @brief Emit a character buffer to the console device
6655 *
6656 * @param c String of characters to print
6657 * @param n The length of the string
6658 *
6659 */
6660 __syscall void k_str_out(char *c, size_t n);
6661 #endif
6662
6663 /**
6664 * @defgroup float_apis Floating Point APIs
6665 * @ingroup kernel_apis
6666 * @{
6667 */
6668
6669 /**
6670 * @brief Disable preservation of floating point context information.
6671 *
6672 * This routine informs the kernel that the specified thread
6673 * will no longer be using the floating point registers.
6674 *
6675 * @warning
6676 * Some architectures apply restrictions on how the disabling of floating
6677 * point preservation may be requested, see arch_float_disable.
6678 *
6679 * @warning
6680 * This routine should only be used to disable floating point support for
6681 * a thread that currently has such support enabled.
6682 *
6683 * @param thread ID of thread.
6684 *
6685 * @retval 0 On success.
6686 * @retval -ENOTSUP If the floating point disabling is not implemented.
6687 * -EINVAL If the floating point disabling could not be performed.
6688 */
6689 __syscall int k_float_disable(struct k_thread *thread);
6690
6691 /**
6692 * @brief Enable preservation of floating point context information.
6693 *
6694 * This routine informs the kernel that the specified thread
6695 * will use the floating point registers.
6696
6697 * Invoking this routine initializes the thread's floating point context info
6698 * to that of an FPU that has been reset. The next time the thread is scheduled
6699 * by z_swap() it will either inherit an FPU that is guaranteed to be in a
6700 * "sane" state (if the most recent user of the FPU was cooperatively swapped
6701 * out) or the thread's own floating point context will be loaded (if the most
6702 * recent user of the FPU was preempted, or if this thread is the first user
6703 * of the FPU). Thereafter, the kernel will protect the thread's FP context
6704 * so that it is not altered during a preemptive context switch.
6705 *
6706 * The @a options parameter indicates which floating point register sets will
6707 * be used by the specified thread.
6708 *
6709 * For x86 options:
6710 *
6711 * - K_FP_REGS indicates x87 FPU and MMX registers only
6712 * - K_SSE_REGS indicates SSE registers (and also x87 FPU and MMX registers)
6713 *
6714 * @warning
6715 * Some architectures apply restrictions on how the enabling of floating
6716 * point preservation may be requested, see arch_float_enable.
6717 *
6718 * @warning
6719 * This routine should only be used to enable floating point support for
6720 * a thread that currently has such support enabled.
6721 *
6722 * @param thread ID of thread.
6723 * @param options architecture dependent options
6724 *
6725 * @retval 0 On success.
6726 * @retval -ENOTSUP If the floating point enabling is not implemented.
6727 * -EINVAL If the floating point enabling could not be performed.
6728 */
6729 __syscall int k_float_enable(struct k_thread *thread, unsigned int options);
6730
6731 /**
6732 * @}
6733 */
6734
6735 /**
6736 * @brief Get the runtime statistics of a thread
6737 *
6738 * @param thread ID of thread.
6739 * @param stats Pointer to struct to copy statistics into.
6740 * @return -EINVAL if null pointers, otherwise 0
6741 */
6742 int k_thread_runtime_stats_get(k_tid_t thread,
6743 k_thread_runtime_stats_t *stats);
6744
6745 /**
6746 * @brief Get the runtime statistics of all threads
6747 *
6748 * @param stats Pointer to struct to copy statistics into.
6749 * @return -EINVAL if null pointers, otherwise 0
6750 */
6751 int k_thread_runtime_stats_all_get(k_thread_runtime_stats_t *stats);
6752
6753 /**
6754 * @brief Get the runtime statistics of all threads on specified cpu
6755 *
6756 * @param cpu The cpu number
6757 * @param stats Pointer to struct to copy statistics into.
6758 * @return -EINVAL if null pointers, otherwise 0
6759 */
6760 int k_thread_runtime_stats_cpu_get(int cpu, k_thread_runtime_stats_t *stats);
6761
6762 /**
6763 * @brief Enable gathering of runtime statistics for specified thread
6764 *
6765 * This routine enables the gathering of runtime statistics for the specified
6766 * thread.
6767 *
6768 * @param thread ID of thread
6769 * @return -EINVAL if invalid thread ID, otherwise 0
6770 */
6771 int k_thread_runtime_stats_enable(k_tid_t thread);
6772
6773 /**
6774 * @brief Disable gathering of runtime statistics for specified thread
6775 *
6776 * This routine disables the gathering of runtime statistics for the specified
6777 * thread.
6778 *
6779 * @param thread ID of thread
6780 * @return -EINVAL if invalid thread ID, otherwise 0
6781 */
6782 int k_thread_runtime_stats_disable(k_tid_t thread);
6783
6784 /**
6785 * @brief Enable gathering of system runtime statistics
6786 *
6787 * This routine enables the gathering of system runtime statistics. Note that
6788 * it does not affect the gathering of similar statistics for individual
6789 * threads.
6790 */
6791 void k_sys_runtime_stats_enable(void);
6792
6793 /**
6794 * @brief Disable gathering of system runtime statistics
6795 *
6796 * This routine disables the gathering of system runtime statistics. Note that
6797 * it does not affect the gathering of similar statistics for individual
6798 * threads.
6799 */
6800 void k_sys_runtime_stats_disable(void);
6801
6802 #ifdef __cplusplus
6803 }
6804 #endif
6805
6806 #include <zephyr/tracing/tracing.h>
6807 #include <zephyr/syscalls/kernel.h>
6808
6809 #endif /* !_ASMLANGUAGE */
6810
6811 #endif /* ZEPHYR_INCLUDE_KERNEL_H_ */
6812