Home
last modified time | relevance | path

Searched refs:thread (Results 26 – 50 of 920) sorted by relevance

12345678910>>...37

/Zephyr-latest/tests/kernel/context/
DREADME.txt12 - start a helper thread to help with k_yield() tests
13 - start a thread to test thread related functionality
16 - Called by a higher priority thread when there is another thread
17 - Called by an equal priority thread when there is another thread
18 - Called by a lower priority thread when there is another thread
22 - Called from an ISR (interrupted a thread)
24 - Called from a thread
28 - Called from an ISR that interrupted a thread
30 - Called from a thread
81 tc_start() - Test kernel CPU and thread routines
[all …]
/Zephyr-latest/modules/lvgl/
Dlvgl_zephyr_osal.c14 static void thread_entry(void *thread, void *cb, void *user_data);
16 lv_result_t lv_thread_init(lv_thread_t *thread, const char *const name, lv_thread_prio_t prio, in lv_thread_init() argument
21 thread->stack = k_thread_stack_alloc(stack_size, 0); in lv_thread_init()
22 if (thread->stack == NULL) { in lv_thread_init()
29 thread->tid = k_thread_create(&thread->thread, thread->stack, stack_size, thread_entry, in lv_thread_init()
30 thread, callback, user_data, thread_priority, 0, K_NO_WAIT); in lv_thread_init()
32 k_thread_name_set(thread->tid, name); in lv_thread_init()
37 lv_result_t lv_thread_delete(lv_thread_t *thread) in lv_thread_delete() argument
41 k_thread_abort(thread->tid); in lv_thread_delete()
42 ret = k_thread_stack_free(thread->stack); in lv_thread_delete()
[all …]
/Zephyr-latest/arch/arm64/core/
Dthread.c79 static bool is_user(struct k_thread *thread) in is_user() argument
81 return (thread->base.user_options & K_USER) != 0; in is_user()
85 void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, in arch_new_thread() argument
96 memset(&thread->arch, 0, sizeof(thread->arch)); in arch_new_thread()
123 if (is_user(thread)) { in arch_new_thread()
137 thread->arch.exception_depth = 1; in arch_new_thread()
144 thread->callee_saved.sp_elx = (uint64_t)pInitCtx; in arch_new_thread()
145 thread->callee_saved.lr = (uint64_t)z_arm64_exit_exc; in arch_new_thread()
147 thread->switch_handle = thread; in arch_new_thread()
149 thread->arch.stack_limit = (uint64_t)stack + Z_ARM64_STACK_GUARD_SIZE; in arch_new_thread()
[all …]
/Zephyr-latest/subsys/shell/modules/kernel_service/thread/
Dunwind.c29 struct k_thread *thread; in cmd_kernel_thread_unwind() local
33 thread = _current; in cmd_kernel_thread_unwind()
35 thread = UINT_TO_POINTER(shell_strtoull(argv[1], 16, &err)); in cmd_kernel_thread_unwind()
41 if (!z_thread_is_valid(thread)) { in cmd_kernel_thread_unwind()
42 shell_error(sh, "Invalid thread id %p", (void *)thread); in cmd_kernel_thread_unwind()
46 shell_print(sh, "Unwinding %p %s", (void *)thread, thread->name); in cmd_kernel_thread_unwind()
48 arch_stack_walk(print_trace_address, (void *)sh, thread, NULL); in cmd_kernel_thread_unwind()
/Zephyr-latest/arch/xtensa/core/
Dthread.c38 static void *init_stack(struct k_thread *thread, int *stack_top, in init_stack() argument
46 (struct xtensa_thread_stack_header *)thread->stack_obj; in init_stack()
48 thread->arch.psp = header->privilege_stack + in init_stack()
58 thread->arch.last_cpu = -1; in init_stack()
75 if ((thread->base.user_options & K_USER) == K_USER) { in init_stack()
98 frame->bsa.threadptr = thread->tls; in init_stack()
100 frame->bsa.threadptr = (uintptr_t)((thread->base.user_options & K_USER) ? thread : NULL); in init_stack()
128 void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, in arch_new_thread() argument
132 thread->switch_handle = init_stack(thread, (int *)stack_ptr, entry, in arch_new_thread()
142 int arch_float_disable(struct k_thread *thread) in arch_float_disable() argument
[all …]
/Zephyr-latest/tests/subsys/tracing/tracing_api/src/
Dmain.c126 struct k_thread thread; in ZTEST() local
137 sys_trace_k_thread_priority_set(&thread); in ZTEST()
138 sys_trace_k_thread_sched_set_priority(&thread, prio); in ZTEST()
139 sys_trace_k_thread_create(&thread, stack, prio); in ZTEST()
140 sys_trace_k_thread_start(&thread); in ZTEST()
141 sys_trace_k_thread_abort(&thread); in ZTEST()
142 sys_trace_k_thread_suspend(&thread); in ZTEST()
143 sys_trace_k_thread_resume(&thread); in ZTEST()
144 sys_trace_k_thread_ready(&thread); in ZTEST()
145 sys_trace_k_thread_sched_ready(&thread); in ZTEST()
[all …]
/Zephyr-latest/arch/riscv/core/
Dstacktrace.c38 static inline bool in_kernel_thread_stack_bound(uintptr_t addr, const struct k_thread *const thread) in in_kernel_thread_stack_bound() argument
43 start = thread->stack_info.start; in in_kernel_thread_stack_bound()
44 end = Z_STACK_PTR_ALIGN(thread->stack_info.start + thread->stack_info.size); in in_kernel_thread_stack_bound()
49 ARG_UNUSED(thread); in in_kernel_thread_stack_bound()
56 static inline bool in_user_thread_stack_bound(uintptr_t addr, const struct k_thread *const thread) in in_user_thread_stack_bound() argument
62 start = thread->arch.priv_stack_start + Z_RISCV_STACK_GUARD_SIZE; in in_user_thread_stack_bound()
64 start = thread->stack_info.start - CONFIG_PRIVILEGED_STACK_SIZE; in in_user_thread_stack_bound()
66 end = Z_STACK_PTR_ALIGN(thread->arch.priv_stack_start + K_KERNEL_STACK_RESERVED + in in_user_thread_stack_bound()
73 static bool in_stack_bound(uintptr_t addr, const struct k_thread *const thread, in in_stack_bound() argument
83 if ((thread->base.user_options & K_USER) != 0) { in in_stack_bound()
[all …]
Dpmp.c321 #define PMP_M_MODE(thread) \ argument
322 thread->arch.m_mode_pmpaddr_regs, \
323 thread->arch.m_mode_pmpcfg_regs, \
324 ARRAY_SIZE(thread->arch.m_mode_pmpaddr_regs)
330 #define PMP_U_MODE(thread) \ argument
331 thread->arch.u_mode_pmpaddr_regs, \
332 thread->arch.u_mode_pmpcfg_regs, \
333 ARRAY_SIZE(thread->arch.u_mode_pmpaddr_regs)
478 void z_riscv_pmp_stackguard_prepare(struct k_thread *thread) in z_riscv_pmp_stackguard_prepare() argument
480 unsigned int index = z_riscv_pmp_thread_init(PMP_M_MODE(thread)); in z_riscv_pmp_stackguard_prepare()
[all …]
/Zephyr-latest/doc/kernel/services/threads/
Dindex.rst16 A :dfn:`thread` is a kernel object that is used for application processing
20 available RAM). Each thread is referenced by a :dfn:`thread id` that is assigned
21 when the thread is spawned.
23 A thread has the following key properties:
25 * A **stack area**, which is a region of memory used for the thread's stack.
27 of the thread's processing. Special macros exist to create and work with
30 * A **thread control block** for private kernel bookkeeping of the thread's
33 * An **entry point function**, which is invoked when the thread is started.
37 allocate CPU time to the thread. (See :ref:`scheduling_v2`.)
39 * A set of **thread options**, which allow the thread to receive special
[all …]
/Zephyr-latest/samples/userspace/syscall_perf/
DREADME.rst10 user thread has to go through a system call compared to a supervisor thread that
17 This application creates a supervisor and a user thread.
19 current thread. The user thread has to go through a system call.
30 User thread: 18012 cycles 748 instructions
31 Supervisor thread: 7 cycles 4 instructions
32 User thread: 20136 cycles 748 instructions
33 Supervisor thread: 7 cycles 4 instructions
34 User thread: 18014 cycles 748 instructions
35 Supervisor thread: 7 cycles 4 instructions
/Zephyr-latest/arch/riscv/include/
Dpmp.h11 void z_riscv_pmp_stackguard_prepare(struct k_thread *thread);
12 void z_riscv_pmp_stackguard_enable(struct k_thread *thread);
14 void z_riscv_pmp_usermode_init(struct k_thread *thread);
15 void z_riscv_pmp_usermode_prepare(struct k_thread *thread);
16 void z_riscv_pmp_usermode_enable(struct k_thread *thread);
/Zephyr-latest/kernel/
Dthread.c124 int z_impl_k_thread_priority_get(k_tid_t thread) in z_impl_k_thread_priority_get() argument
126 return thread->base.prio; in z_impl_k_thread_priority_get()
130 static inline int z_vrfy_k_thread_priority_get(k_tid_t thread) in z_vrfy_k_thread_priority_get() argument
132 K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD)); in z_vrfy_k_thread_priority_get()
133 return z_impl_k_thread_priority_get(thread); in z_vrfy_k_thread_priority_get()
138 int z_impl_k_thread_name_set(k_tid_t thread, const char *str) in z_impl_k_thread_name_set() argument
141 if (thread == NULL) { in z_impl_k_thread_name_set()
142 thread = _current; in z_impl_k_thread_name_set()
145 strncpy(thread->name, str, CONFIG_THREAD_MAX_NAME_LEN - 1); in z_impl_k_thread_name_set()
146 thread->name[CONFIG_THREAD_MAX_NAME_LEN - 1] = '\0'; in z_impl_k_thread_name_set()
[all …]
Dfatal.c49 static const char *thread_name_get(struct k_thread *thread) in thread_name_get() argument
51 const char *thread_name = (thread != NULL) ? k_thread_name_get(thread) : NULL; in thread_name_get()
92 struct k_thread *thread = IS_ENABLED(CONFIG_MULTITHREADING) ? in z_fatal_error() local
114 LOG_ERR("Current thread: %p (%s)", thread, thread_name_get(thread)); in z_fatal_error()
117 coredump(reason, esf, thread); in z_fatal_error()
177 k_thread_abort(thread); in z_fatal_error()
Devents.c101 static int event_walk_op(struct k_thread *thread, void *data) in event_walk_op() argument
106 wait_condition = thread->event_options & K_EVENT_WAIT_MASK; in event_walk_op()
108 if (are_wait_conditions_met(thread->events, event_data->events, in event_walk_op()
117 thread->no_wake_on_timeout = true; in event_walk_op()
123 thread->next_event_link = event_data->head; in event_walk_op()
124 event_data->head = thread; in event_walk_op()
125 z_abort_timeout(&thread->base.timeout); in event_walk_op()
135 struct k_thread *thread; in k_event_post_internal() local
163 thread = data.head; in k_event_post_internal()
166 arch_thread_return_value_set(thread, 0); in k_event_post_internal()
[all …]
/Zephyr-latest/arch/x86/core/
Duserspace.c71 void *z_x86_userspace_prepare_thread(struct k_thread *thread) in z_x86_userspace_prepare_thread() argument
75 if (z_stack_is_user_capable(thread->stack_obj)) { in z_x86_userspace_prepare_thread()
78 (struct z_x86_thread_stack_header *)thread->stack_info.mapped.addr; in z_x86_userspace_prepare_thread()
80 (struct z_x86_thread_stack_header *)thread->stack_obj; in z_x86_userspace_prepare_thread()
83 thread->arch.psp = header->privilege_stack + sizeof(header->privilege_stack); in z_x86_userspace_prepare_thread()
85 thread->arch.psp = NULL; in z_x86_userspace_prepare_thread()
93 thread->arch.ptables = (uintptr_t)NULL; in z_x86_userspace_prepare_thread()
96 if ((thread->base.user_options & K_USER) != 0U) { in z_x86_userspace_prepare_thread()
114 (struct z_x86_thread_stack_header *)thread->stack_obj; in z_x86_userspace_prepare_thread()
189 int arch_thread_priv_stack_space_get(const struct k_thread *thread, size_t *stack_size, in arch_thread_priv_stack_space_get() argument
[all …]
/Zephyr-latest/include/zephyr/debug/
Dstack.h19 static inline void log_stack_usage(const struct k_thread *thread) in log_stack_usage() argument
22 size_t unused, size = thread->stack_info.size; in log_stack_usage()
28 if (k_thread_stack_space_get(thread, &unused) == 0) { in log_stack_usage()
32 tname = k_thread_name_get((k_tid_t)thread); in log_stack_usage()
38 thread, tname, unused, size - unused, size, in log_stack_usage()
42 ARG_UNUSED(thread); in log_stack_usage()
/Zephyr-latest/arch/arm/core/cortex_m/
Dthread_abort.c26 void z_impl_k_thread_abort(k_tid_t thread) in z_impl_k_thread_abort() argument
28 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, abort, thread); in z_impl_k_thread_abort()
30 if (_current == thread) { in z_impl_k_thread_abort()
53 z_thread_abort(thread); in z_impl_k_thread_abort()
55 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, abort, thread); in z_impl_k_thread_abort()
/Zephyr-latest/subsys/sensing/
DKconfig48 int "stack size for sensing subsystem runtime thread"
52 This is the stack size for sensing subsystem runtime thread
56 int "priority for sensing subsystem runtime thread"
60 This is the thread priority for sensor subsystem runtime thread
61 Ring buffer data is stored by runtime thread, and then give semaphore
62 to notify dispatch thread, runtime thread priority should lower than
63 dispatch thread priority to ensure dispatch thread could fetch data as
64 soon as runtime thread give semaphore. Take for example, if runtime
65 priority is higher than dispatch thread, and runtime running in full
66 loading with no sleep, then dispatch thread has no change to fetch
[all …]
/Zephyr-latest/kernel/include/
Dpriority_q.h106 static ALWAYS_INLINE void z_priq_simple_add(sys_dlist_t *pq, struct k_thread *thread) in z_priq_simple_add() argument
111 if (z_sched_prio_cmp(thread, t) > 0) { in z_priq_simple_add()
112 sys_dlist_insert(&t->base.qnode_dlist, &thread->base.qnode_dlist); in z_priq_simple_add()
117 sys_dlist_append(pq, &thread->base.qnode_dlist); in z_priq_simple_add()
120 static ALWAYS_INLINE void z_priq_simple_remove(sys_dlist_t *pq, struct k_thread *thread) in z_priq_simple_remove() argument
124 sys_dlist_remove(&thread->base.qnode_dlist); in z_priq_simple_remove()
160 struct k_thread *thread = NULL; in z_priq_simple_best() local
164 thread = CONTAINER_OF(n, struct k_thread, base.qnode_dlist); in z_priq_simple_best()
166 return thread; in z_priq_simple_best()
175 struct k_thread *thread; in z_priq_simple_mask_best() local
[all …]
/Zephyr-latest/tests/kernel/threads/thread_apis/src/
Dtest_threads_cpu_mask.c28 k_tid_t thread; in ZTEST() local
62 thread = k_thread_create(&child_thread, in ZTEST()
70 ret = k_thread_cpu_mask_clear(thread); in ZTEST()
73 ret = k_thread_cpu_mask_enable_all(thread); in ZTEST()
76 ret = k_thread_cpu_mask_disable(thread, 0); in ZTEST()
79 ret = k_thread_cpu_mask_enable(thread, 0); in ZTEST()
82 ret = k_thread_cpu_pin(thread, 0); in ZTEST()
90 k_thread_start(thread); in ZTEST()
99 k_thread_abort(thread); in ZTEST()
/Zephyr-latest/tests/benchmarks/sched/
DREADME.rst7 abstractions. It works very simply: a main thread creates a "partner"
8 thread at a higher priority, the partner then sleeps using
11 1. The main thread calls _unpend_first_thread()
12 2. The main thread calls _ready_thread()
13 3. The main thread calls k_yield()
14 (the kernel switches to the partner thread)
15 4. The partner thread then runs and calls _pend_curr_irqlock() again
16 (the kernel switches to the main thread)
17 5. The main thread returns from k_yield()
/Zephyr-latest/subsys/tracing/user/
Dtracing_user.c13 void __weak sys_trace_thread_create_user(struct k_thread *thread) {} in sys_trace_thread_create_user() argument
14 void __weak sys_trace_thread_abort_user(struct k_thread *thread) {} in sys_trace_thread_abort_user() argument
15 void __weak sys_trace_thread_suspend_user(struct k_thread *thread) {} in sys_trace_thread_suspend_user() argument
16 void __weak sys_trace_thread_resume_user(struct k_thread *thread) {} in sys_trace_thread_resume_user() argument
17 void __weak sys_trace_thread_name_set_user(struct k_thread *thread) {} in sys_trace_thread_name_set_user() argument
20 void __weak sys_trace_thread_info_user(struct k_thread *thread) {} in sys_trace_thread_info_user() argument
21 void __weak sys_trace_thread_sched_ready_user(struct k_thread *thread) {} in sys_trace_thread_sched_ready_user() argument
22 void __weak sys_trace_thread_pend_user(struct k_thread *thread) {} in sys_trace_thread_pend_user() argument
23 void __weak sys_trace_thread_priority_set_user(struct k_thread *thread, int prio) {} in sys_trace_thread_priority_set_user() argument
79 void sys_trace_thread_create(struct k_thread *thread) in sys_trace_thread_create() argument
[all …]
/Zephyr-latest/doc/kernel/services/data_passing/
Dmailboxes.rst31 A thread that sends a message is known as the **sending thread**,
32 while a thread that receives the message is known as the **receiving thread**.
33 Each message may be received by only one thread (i.e. point-to-multipoint and
38 (and even specify) the identity of the other thread.
45 Both the sending thread and the receiving thread supply a message descriptor
55 A **message buffer** is an area of memory provided by the thread that sends or
69 it is given to a mailbox by the sending thread. The message is then owned
70 by the mailbox until it is given to a receiving thread. The receiving thread
78 A sending thread can specify the address of the thread to which the message
79 is sent, or send it to any thread by specifying :c:macro:`K_ANY`.
[all …]
/Zephyr-latest/subsys/debug/coredump/
Dcoredump_core.c41 __weak void arch_coredump_priv_stack_dump(struct k_thread *thread) in arch_coredump_priv_stack_dump() argument
44 ARG_UNUSED(thread); in arch_coredump_priv_stack_dump()
71 static void dump_thread(struct k_thread *thread) in dump_thread() argument
81 if (thread == NULL) { in dump_thread()
85 end_addr = POINTER_TO_UINT(thread) + sizeof(*thread); in dump_thread()
87 coredump_memory_dump(POINTER_TO_UINT(thread), end_addr); in dump_thread()
89 end_addr = thread->stack_info.start + thread->stack_info.size; in dump_thread()
91 coredump_memory_dump(thread->stack_info.start, end_addr); in dump_thread()
94 if ((thread->base.user_options & K_USER) == K_USER) { in dump_thread()
95 arch_coredump_priv_stack_dump(thread); in dump_thread()
[all …]
/Zephyr-latest/arch/arm/core/mpu/
Darm_core_mpu.c57 uint32_t z_arm_mpu_stack_guard_and_fpu_adjust(struct k_thread *thread);
182 void z_arm_configure_dynamic_mpu_regions(struct k_thread *thread) in z_arm_configure_dynamic_mpu_regions() argument
211 LOG_DBG("configure thread %p's domain", thread); in z_arm_configure_dynamic_mpu_regions()
212 struct k_mem_domain *mem_domain = thread->mem_domain_info.mem_domain; in z_arm_configure_dynamic_mpu_regions()
247 LOG_DBG("configure user thread %p's context", thread); in z_arm_configure_dynamic_mpu_regions()
248 if (thread->arch.priv_stack_start) { in z_arm_configure_dynamic_mpu_regions()
250 uintptr_t base = (uintptr_t)thread->stack_obj; in z_arm_configure_dynamic_mpu_regions()
251 size_t size = thread->stack_info.size + in z_arm_configure_dynamic_mpu_regions()
252 (thread->stack_info.start - base); in z_arm_configure_dynamic_mpu_regions()
276 guard_size = z_arm_mpu_stack_guard_and_fpu_adjust(thread); in z_arm_configure_dynamic_mpu_regions()
[all …]

12345678910>>...37