Lines Matching +full:trigger +full:- +full:conversion +full:- +full:via +full:- +full:int

4  * SPDX-License-Identifier: Apache-2.0
34 BUILD_ASSERT(sizeof(int32_t) == sizeof(int));
53 #define K_PRIO_COOP(x) (-(CONFIG_NUM_COOP_PRIORITIES - (x)))
56 #define K_HIGHEST_THREAD_PRIO (-CONFIG_NUM_COOP_PRIORITIES)
60 #define K_LOWEST_APPLICATION_THREAD_PRIO (K_LOWEST_THREAD_PRIO - 1)
98 typedef int (*_poller_cb_t)(struct k_poll_event *event, uint32_t state);
144 void k_thread_foreach_filter_by_cpu(unsigned int cpu,
148 void k_thread_foreach_filter_by_cpu(unsigned int cpu, in k_thread_foreach_filter_by_cpu()
219 void k_thread_foreach_unlocked_filter_by_cpu(unsigned int cpu,
223 void k_thread_foreach_unlocked_filter_by_cpu(unsigned int cpu, in k_thread_foreach_unlocked_filter_by_cpu()
245 * bits, arch-specific use high bits.
289 * from within a user-provided callback they have been invoked.
290 * Effectively it serves as a tiny bit of zero-overhead TLS data.
328 /* end - thread options */
335 * - @ref K_USER allocate a userspace thread (requires `CONFIG_USERSPACE=y`)
345 __syscall k_thread_stack_t *k_thread_stack_alloc(size_t size, int flags);
353 * @retval -EBUSY if the thread stack is in use.
354 * @retval -EINVAL if @p stack is invalid.
355 * @retval -ENOSYS if dynamic thread stack allocation is disabled
359 __syscall int k_thread_stack_free(k_thread_stack_t *stack);
371 * Thread options are architecture-specific, and can include K_ESSENTIAL,
378 * - K_THREAD_STACK_DEFINE() - For stacks that may support either user or
380 * - K_KERNEL_STACK_DEFINE() - For stacks that may support supervisor
386 * - The original size value passed to K_THREAD_STACK_DEFINE() or
388 * - The return value of K_THREAD_STACK_SIZEOF(stack) if the stack was
390 * - The return value of K_KERNEL_STACK_SIZEOF(stack) if the stack was
414 int prio, uint32_t options, k_timeout_t delay);
419 * This allows a supervisor thread to be re-used as a user thread.
424 * Any thread-local storage will be reverted to a pristine state.
429 * A common use of this function is to re-use the main thread as a user thread
430 * once all supervisor mode-only tasks have been completed.
474 thread->resource_pool = heap; in k_thread_heap_assign()
492 * @return -EBADF Bad thread object (user mode only)
493 * @return -EPERM No permissions on thread object (user mode only)
494 * #return -ENOTSUP Forbidden by hardware policy
495 * @return -EINVAL Thread is uninitialized or exited (user mode only)
496 * @return -EFAULT Bad memory address for unused_ptr (user mode only)
498 __syscall int k_thread_stack_space_get(const struct k_thread *thread,
522 * to being aborted, self-exiting, or taking a fatal error. This API returns
532 * @retval -EBUSY returned without waiting
533 * @retval -EAGAIN waiting period timed out
534 * @retval -EDEADLK target thread is joining on the caller, or target thread
537 __syscall int k_thread_join(struct k_thread *thread, k_timeout_t timeout);
593 * @note The clock used for the microsecond-resolution delay here may
641 * This unconditionally queries the kernel via a system call.
665 /* Thread-local cache of current thread ID, set in z_thread_entry() */ in k_current_get()
686 * this is done via blocking the caller (in the same manner as
712 return z_timeout_expires(&thread->base.timeout); in z_impl_k_thread_timeout_expires_ticks()
727 return z_timeout_remaining(&thread->base.timeout); in z_impl_k_thread_timeout_remaining_ticks()
739 unsigned int init_stack_size;
744 int init_prio;
756 #define Z_THREAD_INIT_DELAY(thread) SYS_TIMEOUT_MS((thread)->init_delay_ms)
759 #define Z_THREAD_INIT_DELAY(thread) (thread)->init_delay
804 * Thread options are architecture-specific, and can include K_ESSENTIAL,
842 * Thread options are architecture-specific, and can include K_ESSENTIAL,
851 * transformed into user thread via k_thread_user_mode_enter().
855 * or in power-of-two size (if MPU).
883 __syscall int k_thread_priority_get(k_tid_t thread);
893 * - If its priority is raised above the priority of a currently scheduled
896 * - If the caller lowers the priority of a currently scheduled preemptible
900 * Priority can be assigned in the range of -CONFIG_NUM_COOP_PRIORITIES to
901 * CONFIG_NUM_PREEMPT_PRIORITIES-1, where -CONFIG_NUM_COOP_PRIORITIES is the
910 __syscall void k_thread_priority_set(k_tid_t thread, int prio);
927 * a signed non-negative quantity). Failure to adhere to this rule
946 __syscall void k_thread_deadline_set(k_tid_t thread, int deadline);
982 int k_thread_cpu_mask_clear(k_tid_t thread);
996 int k_thread_cpu_mask_enable_all(k_tid_t thread);
1010 int k_thread_cpu_mask_enable(k_tid_t thread, int cpu);
1024 int k_thread_cpu_mask_disable(k_tid_t thread, int cpu);
1036 int k_thread_cpu_pin(k_tid_t thread, int cpu);
1094 * @brief Set time-slicing period and scope.
1099 * To enable time slicing, @a slice must be non-zero. The scheduler
1119 void k_sched_time_slice_set(int32_t slice, int prio);
1126 * thread. When non-zero, this timeslice will take precedence over
1130 * will be called before the thread is removed/re-added to the run
1133 * currently-executing ISR. Such a callback is free to, for example,
1142 * @note Threads with a non-zero slice time set will be timesliced
1144 * priority set via k_sched_time_slice_set().
1151 * fine-grained timing decisions within this callback should use the
1189 * - The code is running in a thread, not at ISR.
1190 * - The thread's priority is in the preemptible range.
1191 * - The thread has not locked the scheduler.
1196 * @return Non-zero if invoked by a preemptible thread.
1198 __syscall int k_is_preempt_thread(void);
1201 * @brief Test whether startup is in the before-main-task phase.
1208 * @return true if invoked before post-kernel initialization
1209 * @return false if invoked during/after post-kernel initialization
1234 * again becomes the current thread, its non-preemptible status is maintained.
1239 * extremely fast for non-userspace threads (just one byte
1249 * In general this is a historical API not well-suited to modern
1270 * upon which to build thread-local storage.
1295 * @retval -EFAULT Memory access error with supplied string
1296 * @retval -ENOSYS Thread name configuration option not enabled
1297 * @retval -EINVAL Thread name too long
1299 __syscall int k_thread_name_set(k_tid_t thread, const char *str);
1317 * @retval -ENOSPC Destination buffer too small
1318 * @retval -EFAULT Memory access error
1319 * @retval -ENOSYS Thread name feature not enabled
1322 __syscall int k_thread_name_copy(k_tid_t thread, char *buf,
1552 * dynamic timer allocation. timeout.node is used in the double-linked
1572 /* user-specific data, also used to support legacy features */
1625 * callable from interrupt context (isr-ok).
1714 * This routine blocks the calling thread until the timer's status is non-zero
1716 * or the timer is stopped. If the timer status is already non-zero,
1747 return z_timeout_expires(&timer->timeout); in z_impl_k_timer_expires_ticks()
1765 return z_timeout_remaining(&timer->timeout); in z_impl_k_timer_remaining_ticks()
1786 * @brief Associate user-specific data with a timer.
1805 timer->user_data = user_data; in z_impl_k_timer_user_data_set()
1809 * @brief Retrieve the user-specific data from a timer.
1819 return timer->user_data; in z_impl_k_timer_user_data_get()
1860 * @brief Get system uptime (32-bit version).
1865 * Because correct conversion requires full precision of the system
1869 * interrupt blocking and 64-bit math.
1912 delta = uptime - *reftime; in k_uptime_delta()
1924 * @return Current hardware clock up-counter (in cycles).
1932 * @brief Read the 64-bit hardware clock.
1934 * This routine returns the current time in 64-bits, as measured by the
1939 * @return Current hardware clock up-counter (in cycles).
1944 __ASSERT(0, "64-bit cycle counter not enabled on this platform. " in k_cycle_get_64()
2003 * -EINTR and K_POLL_STATE_CANCELLED state (and per above, subsequent
2040 * @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
2072 * @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
2095 * The data items must be in a singly-linked list, with the first word
2097 * NULL-terminated.
2102 * @param head Pointer to first node in singly-linked list.
2103 * @param tail Pointer to last node in singly-linked list.
2106 * @retval -EINVAL on invalid supplied data
2109 int k_queue_append_list(struct k_queue *queue, void *head, void *tail);
2115 * The data items must be in a singly-linked list implemented using a
2124 * @retval -EINVAL on invalid data
2126 int k_queue_merge_slist(struct k_queue *queue, sys_slist_t *list);
2191 * @return Non-zero if the queue is empty.
2194 __syscall int k_queue_is_empty(struct k_queue *queue);
2196 static inline int z_impl_k_queue_is_empty(struct k_queue *queue) in z_impl_k_queue_is_empty()
2198 return sys_sflist_is_empty(&queue->data_q) ? 1 : 0; in z_impl_k_queue_is_empty()
2286 * @retval -EACCES Caller does not have read access to futex address.
2287 * @retval -EAGAIN If the futex value did not match the expected parameter.
2288 * @retval -EINVAL Futex parameter address not recognized by the kernel.
2289 * @retval -ETIMEDOUT Thread woke up due to timeout and not a futex wakeup.
2294 __syscall int k_futex_wait(struct k_futex *futex, int expected,
2307 * @retval -EACCES Caller does not have access to the futex address.
2308 * @retval -EINVAL Futex parameter address not recognized by the kernel.
2311 __syscall int k_futex_wake(struct k_futex *futex, bool wake_all);
2424 * events that are expressed as bits in a single 32-bit word.
2448 * events that are expressed as bits in a single 32-bit word.
2529 k_queue_init(&(fifo)->_queue); \
2549 k_queue_cancel_wait(&(fifo)->_queue); \
2569 k_queue_append(&(fifo)->_queue, _data); \
2587 * @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
2593 int fap_ret = k_queue_alloc_append(&(fifo)->_queue, _data); \
2602 * The data items must be in a singly-linked list, with the first word of
2604 * NULL-terminated.
2609 * @param head Pointer to first node in singly-linked list.
2610 * @param tail Pointer to last node in singly-linked list.
2615 k_queue_append_list(&(fifo)->_queue, head, tail); \
2623 * The data items must be in a singly-linked list implemented using a
2625 * and must be re-initialized via sys_slist_init().
2635 k_queue_merge_slist(&(fifo)->_queue, list); \
2659 void *fg_ret = k_queue_get(&(fifo)->_queue, timeout); \
2674 * @return Non-zero if the FIFO queue is empty.
2678 k_queue_is_empty(&(fifo)->_queue)
2696 void *fph_ret = k_queue_peek_head(&(fifo)->_queue); \
2715 void *fpt_ret = k_queue_peek_tail(&(fifo)->_queue); \
2771 k_queue_init(&(lifo)->_queue); \
2793 k_queue_prepend(&(lifo)->_queue, _data); \
2811 * @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
2817 int lap_ret = k_queue_alloc_prepend(&(lifo)->_queue, _data); \
2842 void *lg_ret = k_queue_get(&(lifo)->_queue, timeout); \
2925 * @return -ENOMEM if memory couldn't be allocated
2934 * If a stack object was given a dynamically allocated buffer via
2940 * @retval -EAGAIN when object is still in use
2942 int k_stack_cleanup(struct k_stack *stack);
2955 * @retval -ENOMEM if stack is full
2957 __syscall int k_stack_push(struct k_stack *stack, stack_data_t data);
2976 * @retval -EBUSY Returned without waiting.
2977 * @retval -EAGAIN Waiting period timed out.
2979 __syscall int k_stack_pop(struct k_stack *stack, stack_data_t *data,
3034 int owner_orig_prio;
3083 __syscall int k_mutex_init(struct k_mutex *mutex);
3104 * @retval -EBUSY Returned without waiting.
3105 * @retval -EAGAIN Waiting period timed out.
3107 __syscall int k_mutex_lock(struct k_mutex *mutex, k_timeout_t timeout);
3125 * @retval -EPERM The current thread does not own the mutex
3126 * @retval -EINVAL The mutex is not locked
3129 __syscall int k_mutex_unlock(struct k_mutex *mutex);
3161 __syscall int k_condvar_init(struct k_condvar *condvar);
3169 __syscall int k_condvar_signal(struct k_condvar *condvar);
3178 __syscall int k_condvar_broadcast(struct k_condvar *condvar);
3195 * @retval -EAGAIN Waiting period timed out.
3197 __syscall int k_condvar_wait(struct k_condvar *condvar, struct k_mutex *mutex,
3223 unsigned int count;
3224 unsigned int limit;
3275 * @retval -EINVAL Invalid values
3278 __syscall int k_sem_init(struct k_sem *sem, unsigned int initial_count,
3279 unsigned int limit);
3295 * @retval -EBUSY Returned without waiting.
3296 * @retval -EAGAIN Waiting period timed out,
3299 __syscall int k_sem_take(struct k_sem *sem, k_timeout_t timeout);
3318 * with -EAGAIN.
3333 __syscall unsigned int k_sem_count_get(struct k_sem *sem);
3338 static inline unsigned int z_impl_k_sem_count_get(struct k_sem *sem) in z_impl_k_sem_count_get()
3340 return sem->count; in z_impl_k_sem_count_get()
3388 /** @brief Initialize a (non-delayable) work structure.
3392 * re-invoked to change the associated handler, but this must be done when the
3418 int k_work_busy_get(const struct k_work *work);
3422 * Wrapper to determine whether a work item is in a non-idle dstate.
3431 * @return true if and only if k_work_busy_get() returns a non-zero value.
3448 * @retval -EBUSY
3452 * @retval -EINVAL if @p queue is null and the work item has never been run.
3453 * @retval -ENODEV if @p queue has not been started.
3455 int k_work_submit_to_queue(struct k_work_q *queue,
3466 int k_work_submit(struct k_work *work);
3468 /** @brief Wait for last-submitted instance to complete.
3497 * This attempts to prevent a pending (non-delayable) work item from being
3514 int k_work_cancel(struct k_work *work);
3562 * should not be re-invoked on a queue.
3580 int prio, const struct k_work_queue_config *cfg);
3616 int k_work_queue_drain(struct k_work_q *queue, bool plug);
3629 * @retval -EALREADY if the work queue was not plugged.
3631 int k_work_queue_unplug(struct k_work_q *queue);
3643 * @retval -EALREADY if the work queue was not started (or already stopped)
3644 * @retval -EBUSY if the work queue is actively processing work items
3645 * @retval -ETIMEDOUT if the work queue did not stop within the stipulated timeout
3647 int k_work_queue_stop(struct k_work_q *queue, k_timeout_t timeout);
3653 * can be re-invoked to change the associated handler, but this must be done
3692 int k_work_delayable_busy_get(const struct k_work_delayable *dwork);
3696 * Wrapper to determine whether a delayed work item is in a non-idle state.
3705 * @return true if and only if k_work_delayable_busy_get() returns a non-zero
3745 * Unlike k_work_reschedule_for_queue() this is a no-op if the work item is
3763 * @retval -EBUSY if @p delay is @c K_NO_WAIT and
3765 * @retval -EINVAL if @p delay is @c K_NO_WAIT and
3767 * @retval -ENODEV if @p delay is @c K_NO_WAIT and
3770 int k_work_schedule_for_queue(struct k_work_q *queue,
3787 int k_work_schedule(struct k_work_delayable *dwork,
3818 * @retval -EBUSY if @p delay is @c K_NO_WAIT and
3820 * @retval -EINVAL if @p delay is @c K_NO_WAIT and
3822 * @retval -ENODEV if @p delay is @c K_NO_WAIT and
3825 int k_work_reschedule_for_queue(struct k_work_q *queue,
3841 int k_work_reschedule(struct k_work_delayable *dwork,
3891 int k_work_cancel_delayable(struct k_work_delayable *dwork);
3973 * Accessed via k_work_busy_get(). May co-occur with other flags.
3979 * Accessed via k_work_busy_get(). May co-occur with other flags.
3986 * Accessed via k_work_busy_get(). May co-occur with other flags.
3993 * Accessed via k_work_busy_get(). May co-occur with other flags.
3999 * Accessed via k_work_busy_get(). May co-occur with other flags.
4052 * @brief Initialize a statically-defined delayable work item.
4054 * This macro can be used to initialize a statically-defined delayable
4113 * is generally not coherent. be stack-allocated. Violations are detected by
4200 return z_timeout_expires(&dwork->timeout); in k_work_delayable_expires_get()
4206 return z_timeout_remaining(&dwork->timeout); in k_work_delayable_remaining_get()
4211 return &queue->thread; in k_work_queue_thread_get()
4257 #if defined(__cplusplus) && ((__cplusplus - 0) < 202002L)
4269 * @brief Initialize a statically-defined user work item.
4271 * This macro can be used to initialize a statically-defined user work
4315 return atomic_test_bit(&work->flags, K_WORK_USER_STATE_PENDING); in k_work_user_is_pending()
4332 * @retval -EBUSY if the work item was already in some workqueue
4333 * @retval -ENOMEM if no memory for thread resource pool allocation
4336 static inline int k_work_user_submit_to_queue(struct k_work_user_q *work_q, in k_work_user_submit_to_queue()
4339 int ret = -EBUSY; in k_work_user_submit_to_queue()
4341 if (!atomic_test_and_set_bit(&work->flags, in k_work_user_submit_to_queue()
4343 ret = k_queue_alloc_append(&work_q->queue, work); in k_work_user_submit_to_queue()
4349 atomic_clear_bit(&work->flags, in k_work_user_submit_to_queue()
4378 size_t stack_size, int prio,
4393 return &work_q->thread; in k_work_user_queue_thread_get()
4407 int num_events;
4410 int poll_result;
4423 * @brief Initialize a statically-defined work item.
4425 * This macro can be used to initialize a statically-defined workqueue work
4460 * to race conditions with the pre-existing triggered work item and work queue,
4472 * @param events An array of events which trigger the work.
4479 * @retval -EINVAL Work item is being processed or has completed its work.
4480 * @retval -EADDRINUSE Work item is pending on a different workqueue.
4482 int k_work_poll_submit_to_queue(struct k_work_q *work_q,
4485 int num_events,
4500 * to race conditions with the pre-existing triggered work item and work queue,
4510 * @param events An array of events which trigger the work.
4516 * @retval -EINVAL Work item is being processed or has completed its work.
4517 * @retval -EADDRINUSE Work item is pending on a different workqueue.
4519 int k_work_poll_submit(struct k_work_poll *work,
4521 int num_events,
4536 * @retval -EINVAL Work item is being processed or has completed its work.
4538 int k_work_poll_cancel(struct k_work_poll *work);
4677 * @return 0 on success, -ENOMEM if there was insufficient memory in the
4678 * thread's resource pool, or -EINVAL if the size parameters cause
4681 __syscall int k_msgq_alloc_init(struct k_msgq *msgq, size_t msg_size,
4692 * @retval -EBUSY Queue not empty
4694 int k_msgq_cleanup(struct k_msgq *msgq);
4713 * @retval -ENOMSG Returned without waiting or queue purged.
4714 * @retval -EAGAIN Waiting period timed out.
4716 __syscall int k_msgq_put(struct k_msgq *msgq, const void *data, k_timeout_t timeout);
4735 * @retval -ENOMSG Returned without waiting or queue purged.
4736 * @retval -EAGAIN Waiting period timed out.
4738 __syscall int k_msgq_get(struct k_msgq *msgq, void *data, k_timeout_t timeout);
4752 * @retval -ENOMSG Returned when the queue has no message.
4754 __syscall int k_msgq_peek(struct k_msgq *msgq, void *data);
4770 * @retval -ENOMSG Returned when the queue has no message at index.
4772 __syscall int k_msgq_peek_at(struct k_msgq *msgq, void *data, uint32_t idx);
4779 * message queue are unblocked and see an -ENOMSG error code.
4811 return msgq->max_msgs - msgq->used_msgs; in z_impl_k_msgq_num_free_get()
4827 return msgq->used_msgs; in z_impl_k_msgq_num_used_get()
4845 /** application-defined information value */
4853 /** internal use only - thread waiting on send (may be a dummy) */
4856 /** internal use only - semaphore used during asynchronous send */
4917 * receive and process it. The message data may be in a buffer or non-existent
4929 * @retval -ENOMSG Returned without waiting.
4930 * @retval -EAGAIN Waiting period timed out.
4932 int k_mbox_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
4939 * to process it. The message data may be in a buffer or non-existent
4965 * @retval -ENOMSG Returned without waiting.
4966 * @retval -EAGAIN Waiting period timed out.
4968 int k_mbox_get(struct k_mbox *mbox, struct k_mbox_msg *rx_msg,
5079 * If a pipe object was given a dynamically allocated buffer via
5085 * @retval -EAGAIN nothing to cleanup
5087 int k_pipe_cleanup(struct k_pipe *pipe);
5102 * @retval -ENOMEM if memory couldn't be allocated
5104 __syscall int k_pipe_alloc_init(struct k_pipe *pipe, size_t size);
5120 * @retval -EIO Returned without waiting; zero data bytes were written.
5121 * @retval -EAGAIN Waiting period timed out; between zero and @a min_xfer
5124 __syscall int k_pipe_put(struct k_pipe *pipe, const void *data,
5142 * @retval -EINVAL invalid parameters supplied
5143 * @retval -EIO Returned without waiting; zero data bytes were read.
5144 * @retval -EAGAIN Waiting period timed out; between zero and @a min_xfer
5147 __syscall int k_pipe_get(struct k_pipe *pipe, void *data,
5247 * @brief Statically define and initialize a memory slab in a public (non-static) scope.
5251 * @a slab_align -byte boundary. To ensure that each memory block is similarly
5261 * If such a use-case is desired, use @ref K_MEM_SLAB_DEFINE_STATIC
5282 * @a slab_align -byte boundary. To ensure that each memory block is similarly
5306 * N-byte boundary matching a word boundary, where N is a power of 2
5307 * (i.e. 4 on 32-bit systems, 8, 16, ...).
5317 * @retval -EINVAL invalid data supplied
5320 int k_mem_slab_init(struct k_mem_slab *slab, void *buffer,
5341 * @retval -ENOMEM Returned without waiting.
5342 * @retval -EAGAIN Waiting period timed out.
5343 * @retval -EINVAL Invalid data supplied
5345 int k_mem_slab_alloc(struct k_mem_slab *slab, void **mem,
5371 return slab->info.num_used; in k_mem_slab_num_used_get()
5387 return slab->info.max_used; in k_mem_slab_max_used_get()
5406 return slab->info.num_blocks - slab->info.num_used; in k_mem_slab_num_free_get()
5418 * @retval -EINVAL Any parameter points to NULL
5421 int k_mem_slab_runtime_stats_get(struct k_mem_slab *slab, struct sys_memory_stats *stats);
5432 * @retval -EINVAL Memory slab is NULL
5434 int k_mem_slab_runtime_stats_reset_max(struct k_mem_slab *slab);
5472 * is a multiple of the specified power-of-two alignment value in
5495 * block for the specified timeout (constructed via the standard
5520 * via the standard timeout API, or K_NO_WAIT or K_FOREVER) waiting for memory
5544 * block for the specified timeout (constructed via the standard
5577 /* Hand-calculated minimum heap sizes needed to return a successful
5578 * 1-byte allocation. See details in lib/os/heap.[ch]
5659 * difference is that k_aligned_alloc() accepts any non-zero @p size,
5665 * The aligned_alloc function (p: 347-348)
5733 /* polling API - PRIVATE */
5736 #define _INIT_OBJ_POLL_EVENT(obj) do { (obj)->poll_event = NULL; } while (false)
5741 /* private - types bit positions */
5764 #define Z_POLL_TYPE_BIT(type) (1U << ((type) - 1U))
5766 /* private - states bit positions */
5792 #define Z_POLL_STATE_BIT(state) (1U << ((state) - 1U))
5795 (32 - (0 \
5802 /* end of polling API - PRIVATE */
5813 /* public - values for k_poll_event.type bitfield */
5822 /* public - polling modes */
5830 /* public - values for k_poll_event.state bitfield */
5840 /* public - poll signal object */
5842 /** PRIVATE - DO NOT TOUCH */
5849 unsigned int signaled;
5852 int result;
5866 /** PRIVATE - DO NOT TOUCH */
5869 /** PRIVATE - DO NOT TOUCH */
5872 /** optional user-specified tag, opaque, untouched by the API */
5875 /** bitfield of event types (bitwise-ORed K_POLL_TYPE_xxx values) */
5878 /** bitfield of event states (bitwise-ORed K_POLL_STATE_xxx values) */
5884 /** unused bits in 32-bit word */
5887 /** per-type data */
5945 int mode, void *obj);
5980 * @retval -EAGAIN Waiting period timed out.
5981 * @retval -EINTR Polling has been interrupted, e.g. with
5984 * words, -EINTR status means that at least one of output events is
5986 * @retval -ENOMEM Thread resource pool insufficient memory (user mode only)
5987 * @retval -EINVAL Bad parameters (user mode only)
5990 __syscall int k_poll(struct k_poll_event *events, int num_events,
5996 * Ready a poll signal object to be signaled via k_poll_signal_raise().
6021 unsigned int *signaled, int *result);
6044 * @retval -EAGAIN The polling thread's timeout is in the process of expiring.
6047 __syscall int k_poll_signal_raise(struct k_poll_signal *sig, int result);
6063 * However, in some more constrained systems, such as a single-threaded system,
6079 * Enabling interrupts and entering a low-power mode will be atomic,
6081 * the processor enters a low-power mode.
6083 * After waking up from the low-power mode, the interrupt lockout state will
6088 static inline void k_cpu_atomic_idle(unsigned int key) in k_cpu_atomic_idle()
6202 * @retval -ENOTSUP If the floating point disabling is not implemented.
6203 * -EINVAL If the floating point disabling could not be performed.
6205 __syscall int k_float_disable(struct k_thread *thread);
6227 * - K_FP_REGS indicates x87 FPU and MMX registers only
6228 * - K_SSE_REGS indicates SSE registers (and also x87 FPU and MMX registers)
6242 * @retval -ENOTSUP If the floating point enabling is not implemented.
6243 * -EINVAL If the floating point enabling could not be performed.
6245 __syscall int k_float_enable(struct k_thread *thread, unsigned int options);
6256 * @return -EINVAL if null pointers, otherwise 0
6258 int k_thread_runtime_stats_get(k_tid_t thread,
6265 * @return -EINVAL if null pointers, otherwise 0
6267 int k_thread_runtime_stats_all_get(k_thread_runtime_stats_t *stats);
6274 * @return -EINVAL if null pointers, otherwise 0
6276 int k_thread_runtime_stats_cpu_get(int cpu, k_thread_runtime_stats_t *stats);
6285 * @return -EINVAL if invalid thread ID, otherwise 0
6287 int k_thread_runtime_stats_enable(k_tid_t thread);
6296 * @return -EINVAL if invalid thread ID, otherwise 0
6298 int k_thread_runtime_stats_disable(k_tid_t thread);