1 /*
2 * Copyright (c) 2010-2014 Wind River Systems, Inc.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 /**
8 * @file
9 * @brief Kernel thread support
10 *
11 * This module provides general purpose thread support.
12 */
13
14 #include <zephyr/kernel.h>
15 #include <zephyr/spinlock.h>
16 #include <zephyr/sys/math_extras.h>
17 #include <zephyr/sys_clock.h>
18 #include <ksched.h>
19 #include <kthread.h>
20 #include <wait_q.h>
21 #include <zephyr/internal/syscall_handler.h>
22 #include <kernel_internal.h>
23 #include <kswap.h>
24 #include <zephyr/init.h>
25 #include <zephyr/tracing/tracing.h>
26 #include <string.h>
27 #include <stdbool.h>
28 #include <zephyr/sys/check.h>
29 #include <zephyr/random/random.h>
30 #include <zephyr/sys/atomic.h>
31 #include <zephyr/logging/log.h>
32 #include <zephyr/llext/symbol.h>
33 #include <zephyr/sys/iterable_sections.h>
34
35 LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
36
37 #ifdef CONFIG_OBJ_CORE_THREAD
38 static struct k_obj_type obj_type_thread;
39
40 #ifdef CONFIG_OBJ_CORE_STATS_THREAD
41 static struct k_obj_core_stats_desc thread_stats_desc = {
42 .raw_size = sizeof(struct k_cycle_stats),
43 .query_size = sizeof(struct k_thread_runtime_stats),
44 .raw = z_thread_stats_raw,
45 .query = z_thread_stats_query,
46 .reset = z_thread_stats_reset,
47 .disable = z_thread_stats_disable,
48 .enable = z_thread_stats_enable,
49 };
50 #endif /* CONFIG_OBJ_CORE_STATS_THREAD */
51
init_thread_obj_core_list(void)52 static int init_thread_obj_core_list(void)
53 {
54 /* Initialize mem_slab object type */
55
56 #ifdef CONFIG_OBJ_CORE_THREAD
57 z_obj_type_init(&obj_type_thread, K_OBJ_TYPE_THREAD_ID,
58 offsetof(struct k_thread, obj_core));
59 #endif /* CONFIG_OBJ_CORE_THREAD */
60
61 #ifdef CONFIG_OBJ_CORE_STATS_THREAD
62 k_obj_type_stats_init(&obj_type_thread, &thread_stats_desc);
63 #endif /* CONFIG_OBJ_CORE_STATS_THREAD */
64
65 return 0;
66 }
67
68 SYS_INIT(init_thread_obj_core_list, PRE_KERNEL_1,
69 CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
70 #endif /* CONFIG_OBJ_CORE_THREAD */
71
72
73 #define _FOREACH_STATIC_THREAD(thread_data) \
74 STRUCT_SECTION_FOREACH(_static_thread_data, thread_data)
75
k_is_in_isr(void)76 bool k_is_in_isr(void)
77 {
78 return arch_is_in_isr();
79 }
80 EXPORT_SYMBOL(k_is_in_isr);
81
82 #ifdef CONFIG_THREAD_CUSTOM_DATA
z_impl_k_thread_custom_data_set(void * value)83 void z_impl_k_thread_custom_data_set(void *value)
84 {
85 _current->custom_data = value;
86 }
87
88 #ifdef CONFIG_USERSPACE
z_vrfy_k_thread_custom_data_set(void * data)89 static inline void z_vrfy_k_thread_custom_data_set(void *data)
90 {
91 z_impl_k_thread_custom_data_set(data);
92 }
93 #include <zephyr/syscalls/k_thread_custom_data_set_mrsh.c>
94 #endif /* CONFIG_USERSPACE */
95
z_impl_k_thread_custom_data_get(void)96 void *z_impl_k_thread_custom_data_get(void)
97 {
98 return _current->custom_data;
99 }
100
101 #ifdef CONFIG_USERSPACE
z_vrfy_k_thread_custom_data_get(void)102 static inline void *z_vrfy_k_thread_custom_data_get(void)
103 {
104 return z_impl_k_thread_custom_data_get();
105 }
106 #include <zephyr/syscalls/k_thread_custom_data_get_mrsh.c>
107
108 #endif /* CONFIG_USERSPACE */
109 #endif /* CONFIG_THREAD_CUSTOM_DATA */
110
z_impl_k_is_preempt_thread(void)111 int z_impl_k_is_preempt_thread(void)
112 {
113 return !arch_is_in_isr() && thread_is_preemptible(_current);
114 }
115
116 #ifdef CONFIG_USERSPACE
z_vrfy_k_is_preempt_thread(void)117 static inline int z_vrfy_k_is_preempt_thread(void)
118 {
119 return z_impl_k_is_preempt_thread();
120 }
121 #include <zephyr/syscalls/k_is_preempt_thread_mrsh.c>
122 #endif /* CONFIG_USERSPACE */
123
z_impl_k_thread_priority_get(k_tid_t thread)124 int z_impl_k_thread_priority_get(k_tid_t thread)
125 {
126 return thread->base.prio;
127 }
128
129 #ifdef CONFIG_USERSPACE
z_vrfy_k_thread_priority_get(k_tid_t thread)130 static inline int z_vrfy_k_thread_priority_get(k_tid_t thread)
131 {
132 K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
133 return z_impl_k_thread_priority_get(thread);
134 }
135 #include <zephyr/syscalls/k_thread_priority_get_mrsh.c>
136 #endif /* CONFIG_USERSPACE */
137
z_impl_k_thread_name_set(k_tid_t thread,const char * str)138 int z_impl_k_thread_name_set(k_tid_t thread, const char *str)
139 {
140 #ifdef CONFIG_THREAD_NAME
141 if (thread == NULL) {
142 thread = _current;
143 }
144
145 strncpy(thread->name, str, CONFIG_THREAD_MAX_NAME_LEN - 1);
146 thread->name[CONFIG_THREAD_MAX_NAME_LEN - 1] = '\0';
147
148 #ifdef CONFIG_ARCH_HAS_THREAD_NAME_HOOK
149 arch_thread_name_set(thread, str);
150 #endif /* CONFIG_ARCH_HAS_THREAD_NAME_HOOK */
151
152 SYS_PORT_TRACING_OBJ_FUNC(k_thread, name_set, thread, 0);
153
154 return 0;
155 #else
156 ARG_UNUSED(thread);
157 ARG_UNUSED(str);
158
159 SYS_PORT_TRACING_OBJ_FUNC(k_thread, name_set, thread, -ENOSYS);
160
161 return -ENOSYS;
162 #endif /* CONFIG_THREAD_NAME */
163 }
164
165 #ifdef CONFIG_USERSPACE
z_vrfy_k_thread_name_set(k_tid_t thread,const char * str)166 static inline int z_vrfy_k_thread_name_set(k_tid_t thread, const char *str)
167 {
168 #ifdef CONFIG_THREAD_NAME
169 char name[CONFIG_THREAD_MAX_NAME_LEN];
170
171 if (thread != NULL) {
172 if (K_SYSCALL_OBJ(thread, K_OBJ_THREAD) != 0) {
173 return -EINVAL;
174 }
175 }
176
177 /* In theory we could copy directly into thread->name, but
178 * the current z_vrfy / z_impl split does not provide a
179 * means of doing so.
180 */
181 if (k_usermode_string_copy(name, str, sizeof(name)) != 0) {
182 return -EFAULT;
183 }
184
185 return z_impl_k_thread_name_set(thread, name);
186 #else
187 return -ENOSYS;
188 #endif /* CONFIG_THREAD_NAME */
189 }
190 #include <zephyr/syscalls/k_thread_name_set_mrsh.c>
191 #endif /* CONFIG_USERSPACE */
192
k_thread_name_get(k_tid_t thread)193 const char *k_thread_name_get(k_tid_t thread)
194 {
195 #ifdef CONFIG_THREAD_NAME
196 return (const char *)thread->name;
197 #else
198 ARG_UNUSED(thread);
199 return NULL;
200 #endif /* CONFIG_THREAD_NAME */
201 }
202
z_impl_k_thread_name_copy(k_tid_t thread,char * buf,size_t size)203 int z_impl_k_thread_name_copy(k_tid_t thread, char *buf, size_t size)
204 {
205 #ifdef CONFIG_THREAD_NAME
206 strncpy(buf, thread->name, size);
207 return 0;
208 #else
209 ARG_UNUSED(thread);
210 ARG_UNUSED(buf);
211 ARG_UNUSED(size);
212 return -ENOSYS;
213 #endif /* CONFIG_THREAD_NAME */
214 }
215
copy_bytes(char * dest,size_t dest_size,const char * src,size_t src_size)216 static size_t copy_bytes(char *dest, size_t dest_size, const char *src, size_t src_size)
217 {
218 size_t bytes_to_copy;
219
220 bytes_to_copy = MIN(dest_size, src_size);
221 memcpy(dest, src, bytes_to_copy);
222
223 return bytes_to_copy;
224 }
225
k_thread_state_str(k_tid_t thread_id,char * buf,size_t buf_size)226 const char *k_thread_state_str(k_tid_t thread_id, char *buf, size_t buf_size)
227 {
228 size_t off = 0;
229 uint8_t bit;
230 uint8_t thread_state = thread_id->base.thread_state;
231 #define SS_ENT(s) { Z_STATE_STR_##s, _THREAD_##s, sizeof(Z_STATE_STR_##s) - 1 }
232 static const struct {
233 const char *str;
234 uint16_t bit;
235 uint16_t len;
236 } state_string[] = {
237 SS_ENT(DUMMY),
238 SS_ENT(PENDING),
239 SS_ENT(SLEEPING),
240 SS_ENT(DEAD),
241 SS_ENT(SUSPENDED),
242 SS_ENT(ABORTING),
243 SS_ENT(SUSPENDING),
244 SS_ENT(QUEUED),
245 };
246 #undef SS_ENT
247
248 if ((buf == NULL) || (buf_size == 0)) {
249 return "";
250 }
251
252 buf_size--; /* Reserve 1 byte for end-of-string character */
253
254 /*
255 * Loop through each bit in the thread_state. Stop once all have
256 * been processed. If more than one thread_state bit is set, then
257 * separate the descriptive strings with a '+'.
258 */
259
260
261 for (unsigned int index = 0; thread_state != 0; index++) {
262 bit = state_string[index].bit;
263 if ((thread_state & bit) == 0) {
264 continue;
265 }
266
267 off += copy_bytes(buf + off, buf_size - off,
268 state_string[index].str,
269 state_string[index].len);
270
271 thread_state &= ~bit;
272
273 if (thread_state != 0) {
274 off += copy_bytes(buf + off, buf_size - off, "+", 1);
275 }
276 }
277
278 buf[off] = '\0';
279
280 return (const char *)buf;
281 }
282
283 #ifdef CONFIG_USERSPACE
z_vrfy_k_thread_name_copy(k_tid_t thread,char * buf,size_t size)284 static inline int z_vrfy_k_thread_name_copy(k_tid_t thread,
285 char *buf, size_t size)
286 {
287 #ifdef CONFIG_THREAD_NAME
288 size_t len;
289 struct k_object *ko = k_object_find(thread);
290
291 /* Special case: we allow reading the names of initialized threads
292 * even if we don't have permission on them
293 */
294 if ((thread == NULL) || (ko->type != K_OBJ_THREAD) ||
295 ((ko->flags & K_OBJ_FLAG_INITIALIZED) == 0)) {
296 return -EINVAL;
297 }
298 if (K_SYSCALL_MEMORY_WRITE(buf, size) != 0) {
299 return -EFAULT;
300 }
301 len = strlen(thread->name);
302 if ((len + 1) > size) {
303 return -ENOSPC;
304 }
305
306 return k_usermode_to_copy((void *)buf, thread->name, len + 1);
307 #else
308 ARG_UNUSED(thread);
309 ARG_UNUSED(buf);
310 ARG_UNUSED(size);
311 return -ENOSYS;
312 #endif /* CONFIG_THREAD_NAME */
313 }
314 #include <zephyr/syscalls/k_thread_name_copy_mrsh.c>
315 #endif /* CONFIG_USERSPACE */
316
317 #ifdef CONFIG_STACK_SENTINEL
318 /* Check that the stack sentinel is still present
319 *
320 * The stack sentinel feature writes a magic value to the lowest 4 bytes of
321 * the thread's stack when the thread is initialized. This value gets checked
322 * in a few places:
323 *
324 * 1) In k_yield() if the current thread is not swapped out
325 * 2) After servicing a non-nested interrupt
326 * 3) In z_swap(), check the sentinel in the outgoing thread
327 *
328 * Item 2 requires support in arch/ code.
329 *
330 * If the check fails, the thread will be terminated appropriately through
331 * the system fatal error handler.
332 */
z_check_stack_sentinel(void)333 void z_check_stack_sentinel(void)
334 {
335 uint32_t *stack;
336
337 if ((_current->base.thread_state & _THREAD_DUMMY) != 0) {
338 return;
339 }
340
341 stack = (uint32_t *)_current->stack_info.start;
342 if (*stack != STACK_SENTINEL) {
343 /* Restore it so further checks don't trigger this same error */
344 *stack = STACK_SENTINEL;
345 z_except_reason(K_ERR_STACK_CHK_FAIL);
346 }
347 }
348 #endif /* CONFIG_STACK_SENTINEL */
349
350 #if defined(CONFIG_STACK_POINTER_RANDOM) && (CONFIG_STACK_POINTER_RANDOM != 0)
351 int z_stack_adjust_initialized;
352
random_offset(size_t stack_size)353 static size_t random_offset(size_t stack_size)
354 {
355 size_t random_val;
356
357 if (!z_stack_adjust_initialized) {
358 z_early_rand_get((uint8_t *)&random_val, sizeof(random_val));
359 } else {
360 sys_rand_get((uint8_t *)&random_val, sizeof(random_val));
361 }
362
363 /* Don't need to worry about alignment of the size here,
364 * arch_new_thread() is required to do it.
365 *
366 * FIXME: Not the best way to get a random number in a range.
367 * See #6493
368 */
369 const size_t fuzz = random_val % CONFIG_STACK_POINTER_RANDOM;
370
371 if (unlikely(fuzz * 2 > stack_size)) {
372 return 0;
373 }
374
375 return fuzz;
376 }
377 #if defined(CONFIG_STACK_GROWS_UP)
378 /* This is so rare not bothering for now */
379 #error "Stack pointer randomization not implemented for upward growing stacks"
380 #endif /* CONFIG_STACK_GROWS_UP */
381 #endif /* CONFIG_STACK_POINTER_RANDOM */
382
setup_thread_stack(struct k_thread * new_thread,k_thread_stack_t * stack,size_t stack_size)383 static char *setup_thread_stack(struct k_thread *new_thread,
384 k_thread_stack_t *stack, size_t stack_size)
385 {
386 size_t stack_obj_size, stack_buf_size;
387 char *stack_ptr, *stack_buf_start;
388 size_t delta = 0;
389
390 #ifdef CONFIG_USERSPACE
391 if (z_stack_is_user_capable(stack)) {
392 stack_obj_size = K_THREAD_STACK_LEN(stack_size);
393 stack_buf_start = K_THREAD_STACK_BUFFER(stack);
394 stack_buf_size = stack_obj_size - K_THREAD_STACK_RESERVED;
395 } else
396 #endif /* CONFIG_USERSPACE */
397 {
398 /* Object cannot host a user mode thread */
399 stack_obj_size = K_KERNEL_STACK_LEN(stack_size);
400 stack_buf_start = K_KERNEL_STACK_BUFFER(stack);
401 stack_buf_size = stack_obj_size - K_KERNEL_STACK_RESERVED;
402
403 #if defined(ARCH_KERNEL_STACK_RESERVED)
404 /* Zephyr treats stack overflow as an app bug. But
405 * this particular overflow can be seen by static
406 * analysis so needs to be handled somehow.
407 */
408 if (K_KERNEL_STACK_RESERVED > stack_obj_size) {
409 k_panic();
410 }
411 #endif
412 }
413
414 #ifdef CONFIG_THREAD_STACK_MEM_MAPPED
415 /* Map the stack into virtual memory and use that as the base to
416 * calculate the initial stack pointer at the high end of the stack
417 * object. The stack pointer may be reduced later in this function
418 * by TLS or random offset.
419 *
420 * K_MEM_MAP_UNINIT is used to mimic the behavior of non-mapped
421 * stack. If CONFIG_INIT_STACKS is enabled, the stack will be
422 * cleared below.
423 */
424 void *stack_mapped = k_mem_map_phys_guard((uintptr_t)stack, stack_obj_size,
425 K_MEM_PERM_RW | K_MEM_CACHE_WB | K_MEM_MAP_UNINIT,
426 false);
427
428 __ASSERT_NO_MSG((uintptr_t)stack_mapped != 0);
429
430 #ifdef CONFIG_USERSPACE
431 if (z_stack_is_user_capable(stack)) {
432 stack_buf_start = K_THREAD_STACK_BUFFER(stack_mapped);
433 } else
434 #endif /* CONFIG_USERSPACE */
435 {
436 stack_buf_start = K_KERNEL_STACK_BUFFER(stack_mapped);
437 }
438
439 stack_ptr = (char *)stack_mapped + stack_obj_size;
440
441 /* Need to store the info on mapped stack so we can remove the mappings
442 * when the thread ends.
443 */
444 new_thread->stack_info.mapped.addr = stack_mapped;
445 new_thread->stack_info.mapped.sz = stack_obj_size;
446
447 #else /* CONFIG_THREAD_STACK_MEM_MAPPED */
448
449 /* Initial stack pointer at the high end of the stack object, may
450 * be reduced later in this function by TLS or random offset
451 */
452 stack_ptr = (char *)stack + stack_obj_size;
453
454 #endif /* CONFIG_THREAD_STACK_MEM_MAPPED */
455
456 LOG_DBG("stack %p for thread %p: obj_size=%zu buf_start=%p "
457 " buf_size %zu stack_ptr=%p",
458 stack, new_thread, stack_obj_size, (void *)stack_buf_start,
459 stack_buf_size, (void *)stack_ptr);
460
461 #ifdef CONFIG_INIT_STACKS
462 memset(stack_buf_start, 0xaa, stack_buf_size);
463 #endif /* CONFIG_INIT_STACKS */
464 #ifdef CONFIG_STACK_SENTINEL
465 /* Put the stack sentinel at the lowest 4 bytes of the stack area.
466 * We periodically check that it's still present and kill the thread
467 * if it isn't.
468 */
469 *((uint32_t *)stack_buf_start) = STACK_SENTINEL;
470 #endif /* CONFIG_STACK_SENTINEL */
471 #ifdef CONFIG_THREAD_LOCAL_STORAGE
472 /* TLS is always last within the stack buffer */
473 delta += arch_tls_stack_setup(new_thread, stack_ptr);
474 #endif /* CONFIG_THREAD_LOCAL_STORAGE */
475 #ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA
476 size_t tls_size = sizeof(struct _thread_userspace_local_data);
477
478 /* reserve space on highest memory of stack buffer for local data */
479 delta += tls_size;
480 new_thread->userspace_local_data =
481 (struct _thread_userspace_local_data *)(stack_ptr - delta);
482 #endif /* CONFIG_THREAD_USERSPACE_LOCAL_DATA */
483 #if defined(CONFIG_STACK_POINTER_RANDOM) && (CONFIG_STACK_POINTER_RANDOM != 0)
484 delta += random_offset(stack_buf_size);
485 #endif /* CONFIG_STACK_POINTER_RANDOM */
486 delta = ROUND_UP(delta, ARCH_STACK_PTR_ALIGN);
487 #ifdef CONFIG_THREAD_STACK_INFO
488 /* Initial values. Arches which implement MPU guards that "borrow"
489 * memory from the stack buffer (not tracked in K_THREAD_STACK_RESERVED)
490 * will need to appropriately update this.
491 *
492 * The bounds tracked here correspond to the area of the stack object
493 * that the thread can access, which includes TLS.
494 */
495 new_thread->stack_info.start = (uintptr_t)stack_buf_start;
496 new_thread->stack_info.size = stack_buf_size;
497 new_thread->stack_info.delta = delta;
498 #endif /* CONFIG_THREAD_STACK_INFO */
499 stack_ptr -= delta;
500
501 return stack_ptr;
502 }
503
504 /*
505 * The provided stack_size value is presumed to be either the result of
506 * K_THREAD_STACK_SIZEOF(stack), or the size value passed to the instance
507 * of K_THREAD_STACK_DEFINE() which defined 'stack'.
508 */
z_setup_new_thread(struct k_thread * new_thread,k_thread_stack_t * stack,size_t stack_size,k_thread_entry_t entry,void * p1,void * p2,void * p3,int prio,uint32_t options,const char * name)509 char *z_setup_new_thread(struct k_thread *new_thread,
510 k_thread_stack_t *stack, size_t stack_size,
511 k_thread_entry_t entry,
512 void *p1, void *p2, void *p3,
513 int prio, uint32_t options, const char *name)
514 {
515 char *stack_ptr;
516
517 Z_ASSERT_VALID_PRIO(prio, entry);
518
519 #ifdef CONFIG_THREAD_ABORT_NEED_CLEANUP
520 k_thread_abort_cleanup_check_reuse(new_thread);
521 #endif /* CONFIG_THREAD_ABORT_NEED_CLEANUP */
522
523 #ifdef CONFIG_OBJ_CORE_THREAD
524 k_obj_core_init_and_link(K_OBJ_CORE(new_thread), &obj_type_thread);
525 #ifdef CONFIG_OBJ_CORE_STATS_THREAD
526 k_obj_core_stats_register(K_OBJ_CORE(new_thread),
527 &new_thread->base.usage,
528 sizeof(new_thread->base.usage));
529 #endif /* CONFIG_OBJ_CORE_STATS_THREAD */
530 #endif /* CONFIG_OBJ_CORE_THREAD */
531
532 #ifdef CONFIG_USERSPACE
533 __ASSERT((options & K_USER) == 0U || z_stack_is_user_capable(stack),
534 "user thread %p with kernel-only stack %p",
535 new_thread, stack);
536 k_object_init(new_thread);
537 k_object_init(stack);
538 new_thread->stack_obj = stack;
539 new_thread->syscall_frame = NULL;
540
541 /* Any given thread has access to itself */
542 k_object_access_grant(new_thread, new_thread);
543 #endif /* CONFIG_USERSPACE */
544 z_waitq_init(&new_thread->join_queue);
545
546 /* Initialize various struct k_thread members */
547 z_init_thread_base(&new_thread->base, prio, _THREAD_SLEEPING, options);
548 stack_ptr = setup_thread_stack(new_thread, stack, stack_size);
549
550 #ifdef CONFIG_KERNEL_COHERENCE
551 /* Check that the thread object is safe, but that the stack is
552 * still cached!
553 */
554 __ASSERT_NO_MSG(arch_mem_coherent(new_thread));
555
556 /* When dynamic thread stack is available, the stack may come from
557 * uncached area.
558 */
559 #ifndef CONFIG_DYNAMIC_THREAD
560 __ASSERT_NO_MSG(!arch_mem_coherent(stack));
561 #endif /* CONFIG_DYNAMIC_THREAD */
562
563 #endif /* CONFIG_KERNEL_COHERENCE */
564
565 arch_new_thread(new_thread, stack, stack_ptr, entry, p1, p2, p3);
566
567 /* static threads overwrite it afterwards with real value */
568 new_thread->init_data = NULL;
569
570 #ifdef CONFIG_USE_SWITCH
571 /* switch_handle must be non-null except when inside z_swap()
572 * for synchronization reasons. Historically some notional
573 * USE_SWITCH architectures have actually ignored the field
574 */
575 __ASSERT(new_thread->switch_handle != NULL,
576 "arch layer failed to initialize switch_handle");
577 #endif /* CONFIG_USE_SWITCH */
578 #ifdef CONFIG_THREAD_CUSTOM_DATA
579 /* Initialize custom data field (value is opaque to kernel) */
580 new_thread->custom_data = NULL;
581 #endif /* CONFIG_THREAD_CUSTOM_DATA */
582 #ifdef CONFIG_EVENTS
583 new_thread->no_wake_on_timeout = false;
584 #endif /* CONFIG_EVENTS */
585 #ifdef CONFIG_THREAD_MONITOR
586 new_thread->entry.pEntry = entry;
587 new_thread->entry.parameter1 = p1;
588 new_thread->entry.parameter2 = p2;
589 new_thread->entry.parameter3 = p3;
590
591 k_spinlock_key_t key = k_spin_lock(&z_thread_monitor_lock);
592
593 new_thread->next_thread = _kernel.threads;
594 _kernel.threads = new_thread;
595 k_spin_unlock(&z_thread_monitor_lock, key);
596 #endif /* CONFIG_THREAD_MONITOR */
597 #ifdef CONFIG_THREAD_NAME
598 if (name != NULL) {
599 strncpy(new_thread->name, name,
600 CONFIG_THREAD_MAX_NAME_LEN - 1);
601 /* Ensure NULL termination, truncate if longer */
602 new_thread->name[CONFIG_THREAD_MAX_NAME_LEN - 1] = '\0';
603 #ifdef CONFIG_ARCH_HAS_THREAD_NAME_HOOK
604 arch_thread_name_set(new_thread, name);
605 #endif /* CONFIG_ARCH_HAS_THREAD_NAME_HOOK */
606 } else {
607 new_thread->name[0] = '\0';
608 }
609 #endif /* CONFIG_THREAD_NAME */
610 #ifdef CONFIG_SCHED_CPU_MASK
611 if (IS_ENABLED(CONFIG_SCHED_CPU_MASK_PIN_ONLY)) {
612 new_thread->base.cpu_mask = 1; /* must specify only one cpu */
613 } else {
614 new_thread->base.cpu_mask = -1; /* allow all cpus */
615 }
616 #endif /* CONFIG_SCHED_CPU_MASK */
617 #ifdef CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN
618 /* _current may be null if the dummy thread is not used */
619 if (!_current) {
620 new_thread->resource_pool = NULL;
621 return stack_ptr;
622 }
623 #endif /* CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN */
624 #ifdef CONFIG_USERSPACE
625 z_mem_domain_init_thread(new_thread);
626
627 if ((options & K_INHERIT_PERMS) != 0U) {
628 k_thread_perms_inherit(_current, new_thread);
629 }
630 #endif /* CONFIG_USERSPACE */
631 #ifdef CONFIG_SCHED_DEADLINE
632 new_thread->base.prio_deadline = 0;
633 #endif /* CONFIG_SCHED_DEADLINE */
634 new_thread->resource_pool = _current->resource_pool;
635
636 #ifdef CONFIG_SMP
637 z_waitq_init(&new_thread->halt_queue);
638 #endif /* CONFIG_SMP */
639
640 #ifdef CONFIG_SCHED_THREAD_USAGE
641 new_thread->base.usage = (struct k_cycle_stats) {};
642 new_thread->base.usage.track_usage =
643 CONFIG_SCHED_THREAD_USAGE_AUTO_ENABLE;
644 #endif /* CONFIG_SCHED_THREAD_USAGE */
645
646 SYS_PORT_TRACING_OBJ_FUNC(k_thread, create, new_thread);
647
648 return stack_ptr;
649 }
650
651
z_impl_k_thread_create(struct k_thread * new_thread,k_thread_stack_t * stack,size_t stack_size,k_thread_entry_t entry,void * p1,void * p2,void * p3,int prio,uint32_t options,k_timeout_t delay)652 k_tid_t z_impl_k_thread_create(struct k_thread *new_thread,
653 k_thread_stack_t *stack,
654 size_t stack_size, k_thread_entry_t entry,
655 void *p1, void *p2, void *p3,
656 int prio, uint32_t options, k_timeout_t delay)
657 {
658 __ASSERT(!arch_is_in_isr(), "Threads may not be created in ISRs");
659
660 z_setup_new_thread(new_thread, stack, stack_size, entry, p1, p2, p3,
661 prio, options, NULL);
662
663 if (!K_TIMEOUT_EQ(delay, K_FOREVER)) {
664 thread_schedule_new(new_thread, delay);
665 }
666
667 return new_thread;
668 }
669
670 #ifdef CONFIG_USERSPACE
z_stack_is_user_capable(k_thread_stack_t * stack)671 bool z_stack_is_user_capable(k_thread_stack_t *stack)
672 {
673 return k_object_find(stack) != NULL;
674 }
675
z_vrfy_k_thread_create(struct k_thread * new_thread,k_thread_stack_t * stack,size_t stack_size,k_thread_entry_t entry,void * p1,void * p2,void * p3,int prio,uint32_t options,k_timeout_t delay)676 k_tid_t z_vrfy_k_thread_create(struct k_thread *new_thread,
677 k_thread_stack_t *stack,
678 size_t stack_size, k_thread_entry_t entry,
679 void *p1, void *p2, void *p3,
680 int prio, uint32_t options, k_timeout_t delay)
681 {
682 size_t total_size, stack_obj_size;
683 struct k_object *stack_object;
684
685 /* The thread and stack objects *must* be in an uninitialized state */
686 K_OOPS(K_SYSCALL_OBJ_NEVER_INIT(new_thread, K_OBJ_THREAD));
687
688 /* No need to check z_stack_is_user_capable(), it won't be in the
689 * object table if it isn't
690 */
691 stack_object = k_object_find(stack);
692 K_OOPS(K_SYSCALL_VERIFY_MSG(k_object_validation_check(stack_object, stack,
693 K_OBJ_THREAD_STACK_ELEMENT,
694 _OBJ_INIT_FALSE) == 0,
695 "bad stack object"));
696
697 /* Verify that the stack size passed in is OK by computing the total
698 * size and comparing it with the size value in the object metadata
699 */
700 K_OOPS(K_SYSCALL_VERIFY_MSG(!size_add_overflow(K_THREAD_STACK_RESERVED,
701 stack_size, &total_size),
702 "stack size overflow (%zu+%zu)",
703 stack_size,
704 K_THREAD_STACK_RESERVED));
705
706 /* Testing less-than-or-equal since additional room may have been
707 * allocated for alignment constraints
708 */
709 #ifdef CONFIG_GEN_PRIV_STACKS
710 stack_obj_size = stack_object->data.stack_data->size;
711 #else
712 stack_obj_size = stack_object->data.stack_size;
713 #endif /* CONFIG_GEN_PRIV_STACKS */
714 K_OOPS(K_SYSCALL_VERIFY_MSG(total_size <= stack_obj_size,
715 "stack size %zu is too big, max is %zu",
716 total_size, stack_obj_size));
717
718 /* User threads may only create other user threads and they can't
719 * be marked as essential
720 */
721 K_OOPS(K_SYSCALL_VERIFY(options & K_USER));
722 K_OOPS(K_SYSCALL_VERIFY(!(options & K_ESSENTIAL)));
723
724 /* Check validity of prio argument; must be the same or worse priority
725 * than the caller
726 */
727 K_OOPS(K_SYSCALL_VERIFY(_is_valid_prio(prio, NULL)));
728 K_OOPS(K_SYSCALL_VERIFY(z_is_prio_lower_or_equal(prio,
729 _current->base.prio)));
730
731 z_setup_new_thread(new_thread, stack, stack_size,
732 entry, p1, p2, p3, prio, options, NULL);
733
734 if (!K_TIMEOUT_EQ(delay, K_FOREVER)) {
735 thread_schedule_new(new_thread, delay);
736 }
737
738 return new_thread;
739 }
740 #include <zephyr/syscalls/k_thread_create_mrsh.c>
741 #endif /* CONFIG_USERSPACE */
742
z_init_thread_base(struct _thread_base * thread_base,int priority,uint32_t initial_state,unsigned int options)743 void z_init_thread_base(struct _thread_base *thread_base, int priority,
744 uint32_t initial_state, unsigned int options)
745 {
746 /* k_q_node is initialized upon first insertion in a list */
747 thread_base->pended_on = NULL;
748 thread_base->user_options = (uint8_t)options;
749 thread_base->thread_state = (uint8_t)initial_state;
750
751 thread_base->prio = priority;
752
753 thread_base->sched_locked = 0U;
754
755 #ifdef CONFIG_SMP
756 thread_base->is_idle = 0;
757 #endif /* CONFIG_SMP */
758
759 #ifdef CONFIG_TIMESLICE_PER_THREAD
760 thread_base->slice_ticks = 0;
761 thread_base->slice_expired = NULL;
762 #endif /* CONFIG_TIMESLICE_PER_THREAD */
763
764 /* swap_data does not need to be initialized */
765
766 z_init_thread_timeout(thread_base);
767 }
768
k_thread_user_mode_enter(k_thread_entry_t entry,void * p1,void * p2,void * p3)769 FUNC_NORETURN void k_thread_user_mode_enter(k_thread_entry_t entry,
770 void *p1, void *p2, void *p3)
771 {
772 SYS_PORT_TRACING_FUNC(k_thread, user_mode_enter);
773
774 _current->base.user_options |= K_USER;
775 z_thread_essential_clear(_current);
776 #ifdef CONFIG_THREAD_MONITOR
777 _current->entry.pEntry = entry;
778 _current->entry.parameter1 = p1;
779 _current->entry.parameter2 = p2;
780 _current->entry.parameter3 = p3;
781 #endif /* CONFIG_THREAD_MONITOR */
782 #ifdef CONFIG_USERSPACE
783 __ASSERT(z_stack_is_user_capable(_current->stack_obj),
784 "dropping to user mode with kernel-only stack object");
785 #ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA
786 memset(_current->userspace_local_data, 0,
787 sizeof(struct _thread_userspace_local_data));
788 #endif /* CONFIG_THREAD_USERSPACE_LOCAL_DATA */
789 #ifdef CONFIG_THREAD_LOCAL_STORAGE
790 arch_tls_stack_setup(_current,
791 (char *)(_current->stack_info.start +
792 _current->stack_info.size));
793 #endif /* CONFIG_THREAD_LOCAL_STORAGE */
794 arch_user_mode_enter(entry, p1, p2, p3);
795 #else
796 /* XXX In this case we do not reset the stack */
797 z_thread_entry(entry, p1, p2, p3);
798 #endif /* CONFIG_USERSPACE */
799 }
800
801 #if defined(CONFIG_INIT_STACKS) && defined(CONFIG_THREAD_STACK_INFO)
802 #ifdef CONFIG_STACK_GROWS_UP
803 #error "Unsupported configuration for stack analysis"
804 #endif /* CONFIG_STACK_GROWS_UP */
805
z_stack_space_get(const uint8_t * stack_start,size_t size,size_t * unused_ptr)806 int z_stack_space_get(const uint8_t *stack_start, size_t size, size_t *unused_ptr)
807 {
808 size_t unused = 0;
809 const uint8_t *checked_stack = stack_start;
810 /* Take the address of any local variable as a shallow bound for the
811 * stack pointer. Addresses above it are guaranteed to be
812 * accessible.
813 */
814 const uint8_t *stack_pointer = (const uint8_t *)&stack_start;
815
816 /* If we are currently running on the stack being analyzed, some
817 * memory management hardware will generate an exception if we
818 * read unused stack memory.
819 *
820 * This never happens when invoked from user mode, as user mode
821 * will always run this function on the privilege elevation stack.
822 */
823 if ((stack_pointer > stack_start) && (stack_pointer <= (stack_start + size)) &&
824 IS_ENABLED(CONFIG_NO_UNUSED_STACK_INSPECTION)) {
825 /* TODO: We could add an arch_ API call to temporarily
826 * disable the stack checking in the CPU, but this would
827 * need to be properly managed wrt context switches/interrupts
828 */
829 return -ENOTSUP;
830 }
831
832 if (IS_ENABLED(CONFIG_STACK_SENTINEL)) {
833 /* First 4 bytes of the stack buffer reserved for the
834 * sentinel value, it won't be 0xAAAAAAAA for thread
835 * stacks.
836 *
837 * FIXME: thread->stack_info.start ought to reflect
838 * this!
839 */
840 checked_stack += 4;
841 size -= 4;
842 }
843
844 for (size_t i = 0; i < size; i++) {
845 if ((checked_stack[i]) == 0xaaU) {
846 unused++;
847 } else {
848 break;
849 }
850 }
851
852 *unused_ptr = unused;
853
854 return 0;
855 }
856
z_impl_k_thread_stack_space_get(const struct k_thread * thread,size_t * unused_ptr)857 int z_impl_k_thread_stack_space_get(const struct k_thread *thread,
858 size_t *unused_ptr)
859 {
860 #ifdef CONFIG_THREAD_STACK_MEM_MAPPED
861 if (thread->stack_info.mapped.addr == NULL) {
862 return -EINVAL;
863 }
864 #endif /* CONFIG_THREAD_STACK_MEM_MAPPED */
865
866 return z_stack_space_get((const uint8_t *)thread->stack_info.start,
867 thread->stack_info.size, unused_ptr);
868 }
869
870 #ifdef CONFIG_USERSPACE
z_vrfy_k_thread_stack_space_get(const struct k_thread * thread,size_t * unused_ptr)871 int z_vrfy_k_thread_stack_space_get(const struct k_thread *thread,
872 size_t *unused_ptr)
873 {
874 size_t unused;
875 int ret;
876
877 ret = K_SYSCALL_OBJ(thread, K_OBJ_THREAD);
878 CHECKIF(ret != 0) {
879 return ret;
880 }
881
882 ret = z_impl_k_thread_stack_space_get(thread, &unused);
883 CHECKIF(ret != 0) {
884 return ret;
885 }
886
887 ret = k_usermode_to_copy(unused_ptr, &unused, sizeof(size_t));
888 CHECKIF(ret != 0) {
889 return ret;
890 }
891
892 return 0;
893 }
894 #include <zephyr/syscalls/k_thread_stack_space_get_mrsh.c>
895 #endif /* CONFIG_USERSPACE */
896 #endif /* CONFIG_INIT_STACKS && CONFIG_THREAD_STACK_INFO */
897
898 #ifdef CONFIG_USERSPACE
z_vrfy_k_thread_timeout_remaining_ticks(const struct k_thread * thread)899 static inline k_ticks_t z_vrfy_k_thread_timeout_remaining_ticks(
900 const struct k_thread *thread)
901 {
902 K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
903 return z_impl_k_thread_timeout_remaining_ticks(thread);
904 }
905 #include <zephyr/syscalls/k_thread_timeout_remaining_ticks_mrsh.c>
906
z_vrfy_k_thread_timeout_expires_ticks(const struct k_thread * thread)907 static inline k_ticks_t z_vrfy_k_thread_timeout_expires_ticks(
908 const struct k_thread *thread)
909 {
910 K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
911 return z_impl_k_thread_timeout_expires_ticks(thread);
912 }
913 #include <zephyr/syscalls/k_thread_timeout_expires_ticks_mrsh.c>
914 #endif /* CONFIG_USERSPACE */
915
916 #ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING
z_thread_mark_switched_in(void)917 void z_thread_mark_switched_in(void)
918 {
919 #if defined(CONFIG_SCHED_THREAD_USAGE) && !defined(CONFIG_USE_SWITCH)
920 z_sched_usage_start(_current);
921 #endif /* CONFIG_SCHED_THREAD_USAGE && !CONFIG_USE_SWITCH */
922
923 #ifdef CONFIG_TRACING
924 SYS_PORT_TRACING_FUNC(k_thread, switched_in);
925 #endif /* CONFIG_TRACING */
926 }
927
z_thread_mark_switched_out(void)928 void z_thread_mark_switched_out(void)
929 {
930 #if defined(CONFIG_SCHED_THREAD_USAGE) && !defined(CONFIG_USE_SWITCH)
931 z_sched_usage_stop();
932 #endif /*CONFIG_SCHED_THREAD_USAGE && !CONFIG_USE_SWITCH */
933
934 #ifdef CONFIG_TRACING
935 #ifdef CONFIG_THREAD_LOCAL_STORAGE
936 /* Dummy thread won't have TLS set up to run arbitrary code */
937 if (!_current ||
938 (_current->base.thread_state & _THREAD_DUMMY) != 0) {
939 return;
940 }
941 #endif /* CONFIG_THREAD_LOCAL_STORAGE */
942 SYS_PORT_TRACING_FUNC(k_thread, switched_out);
943 #endif /* CONFIG_TRACING */
944 }
945 #endif /* CONFIG_INSTRUMENT_THREAD_SWITCHING */
946
k_thread_runtime_stats_get(k_tid_t thread,k_thread_runtime_stats_t * stats)947 int k_thread_runtime_stats_get(k_tid_t thread,
948 k_thread_runtime_stats_t *stats)
949 {
950 if ((thread == NULL) || (stats == NULL)) {
951 return -EINVAL;
952 }
953
954 #ifdef CONFIG_SCHED_THREAD_USAGE
955 z_sched_thread_usage(thread, stats);
956 #else
957 *stats = (k_thread_runtime_stats_t) {};
958 #endif /* CONFIG_SCHED_THREAD_USAGE */
959
960 return 0;
961 }
962
k_thread_runtime_stats_all_get(k_thread_runtime_stats_t * stats)963 int k_thread_runtime_stats_all_get(k_thread_runtime_stats_t *stats)
964 {
965 #ifdef CONFIG_SCHED_THREAD_USAGE_ALL
966 k_thread_runtime_stats_t tmp_stats;
967 #endif /* CONFIG_SCHED_THREAD_USAGE_ALL */
968
969 if (stats == NULL) {
970 return -EINVAL;
971 }
972
973 *stats = (k_thread_runtime_stats_t) {};
974
975 #ifdef CONFIG_SCHED_THREAD_USAGE_ALL
976 /* Retrieve the usage stats for each core and amalgamate them. */
977
978 unsigned int num_cpus = arch_num_cpus();
979
980 for (uint8_t i = 0; i < num_cpus; i++) {
981 z_sched_cpu_usage(i, &tmp_stats);
982
983 stats->execution_cycles += tmp_stats.execution_cycles;
984 stats->total_cycles += tmp_stats.total_cycles;
985 #ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
986 stats->current_cycles += tmp_stats.current_cycles;
987 stats->peak_cycles += tmp_stats.peak_cycles;
988 stats->average_cycles += tmp_stats.average_cycles;
989 #endif /* CONFIG_SCHED_THREAD_USAGE_ANALYSIS */
990 stats->idle_cycles += tmp_stats.idle_cycles;
991 }
992 #endif /* CONFIG_SCHED_THREAD_USAGE_ALL */
993
994 return 0;
995 }
996
k_thread_runtime_stats_cpu_get(int cpu,k_thread_runtime_stats_t * stats)997 int k_thread_runtime_stats_cpu_get(int cpu, k_thread_runtime_stats_t *stats)
998 {
999 if (stats == NULL) {
1000 return -EINVAL;
1001 }
1002
1003 *stats = (k_thread_runtime_stats_t) {};
1004
1005 #ifdef CONFIG_SCHED_THREAD_USAGE_ALL
1006 #ifdef CONFIG_SMP
1007 z_sched_cpu_usage(cpu, stats);
1008 #else
1009 __ASSERT(cpu == 0, "cpu filter out of bounds");
1010 ARG_UNUSED(cpu);
1011 z_sched_cpu_usage(0, stats);
1012 #endif
1013 #endif
1014
1015 return 0;
1016 }
1017
1018 #ifdef CONFIG_THREAD_ABORT_NEED_CLEANUP
1019 /** Pointer to thread which needs to be cleaned up. */
1020 static struct k_thread *thread_to_cleanup;
1021
1022 /** Spinlock for thread abort cleanup. */
1023 static struct k_spinlock thread_cleanup_lock;
1024
1025 #ifdef CONFIG_THREAD_STACK_MEM_MAPPED
1026 static void *thread_cleanup_stack_addr;
1027 static size_t thread_cleanup_stack_sz;
1028 #endif /* CONFIG_THREAD_STACK_MEM_MAPPED */
1029
defer_thread_cleanup(struct k_thread * thread)1030 void defer_thread_cleanup(struct k_thread *thread)
1031 {
1032 /* Note when adding new deferred cleanup steps:
1033 * - The thread object may have been overwritten by the time
1034 * the actual cleanup is being done (e.g. thread object
1035 * allocated on a stack). So stash any necessary data here
1036 * that will be used in the actual cleanup steps.
1037 */
1038 thread_to_cleanup = thread;
1039
1040 #ifdef CONFIG_THREAD_STACK_MEM_MAPPED
1041 /* Note that the permission of the stack should have been
1042 * stripped of user thread access due to the thread having
1043 * already exited from a memory domain. That is done via
1044 * k_thread_abort().
1045 */
1046
1047 /* Stash the address and size so the region can be unmapped
1048 * later.
1049 */
1050 thread_cleanup_stack_addr = thread->stack_info.mapped.addr;
1051 thread_cleanup_stack_sz = thread->stack_info.mapped.sz;
1052
1053 /* The stack is now considered un-usable. This should prevent any functions
1054 * from looking directly into the mapped stack if they are made to be aware
1055 * of memory mapped stacks, e.g., z_stack_space_get().
1056 */
1057 thread->stack_info.mapped.addr = NULL;
1058 thread->stack_info.mapped.sz = 0;
1059 #endif /* CONFIG_THREAD_STACK_MEM_MAPPED */
1060 }
1061
do_thread_cleanup(struct k_thread * thread)1062 void do_thread_cleanup(struct k_thread *thread)
1063 {
1064 /* Note when adding new actual cleanup steps:
1065 * - The thread object may have been overwritten when this is
1066 * called. So avoid using any data from the thread object.
1067 */
1068 ARG_UNUSED(thread);
1069
1070 #ifdef CONFIG_THREAD_STACK_MEM_MAPPED
1071 if (thread_cleanup_stack_addr != NULL) {
1072 k_mem_unmap_phys_guard(thread_cleanup_stack_addr,
1073 thread_cleanup_stack_sz, false);
1074
1075 thread_cleanup_stack_addr = NULL;
1076 }
1077 #endif /* CONFIG_THREAD_STACK_MEM_MAPPED */
1078 }
1079
k_thread_abort_cleanup(struct k_thread * thread)1080 void k_thread_abort_cleanup(struct k_thread *thread)
1081 {
1082 K_SPINLOCK(&thread_cleanup_lock) {
1083 if (thread_to_cleanup != NULL) {
1084 /* Finish the pending one first. */
1085 do_thread_cleanup(thread_to_cleanup);
1086 thread_to_cleanup = NULL;
1087 }
1088
1089 if (thread == _current) {
1090 /* Need to defer for current running thread as the cleanup
1091 * might result in exception. Actual cleanup will be done
1092 * at the next time k_thread_abort() is called, or at thread
1093 * creation if the same thread object is being reused. This
1094 * is to make sure the cleanup code no longer needs this
1095 * thread's stack. This is not exactly ideal as the stack
1096 * may still be memory mapped for a while. However, this is
1097 * a simple solution without a) the need to workaround
1098 * the schedule lock during k_thread_abort(), b) creating
1099 * another thread to perform the cleanup, and c) does not
1100 * require architecture code support (e.g. via exception).
1101 */
1102 defer_thread_cleanup(thread);
1103 } else {
1104 /* Not the current running thread, so we are safe to do
1105 * cleanups.
1106 */
1107 do_thread_cleanup(thread);
1108 }
1109 }
1110 }
1111
k_thread_abort_cleanup_check_reuse(struct k_thread * thread)1112 void k_thread_abort_cleanup_check_reuse(struct k_thread *thread)
1113 {
1114 K_SPINLOCK(&thread_cleanup_lock) {
1115 /* This is to guard reuse of the same thread object and make sure
1116 * any pending cleanups of it needs to be finished before the thread
1117 * object can be reused.
1118 */
1119 if (thread_to_cleanup == thread) {
1120 do_thread_cleanup(thread_to_cleanup);
1121 thread_to_cleanup = NULL;
1122 }
1123 }
1124 }
1125
1126 #endif /* CONFIG_THREAD_ABORT_NEED_CLEANUP */
1127
z_dummy_thread_init(struct k_thread * dummy_thread)1128 void z_dummy_thread_init(struct k_thread *dummy_thread)
1129 {
1130 dummy_thread->base.thread_state = _THREAD_DUMMY;
1131 #ifdef CONFIG_SCHED_CPU_MASK
1132 dummy_thread->base.cpu_mask = -1;
1133 #endif /* CONFIG_SCHED_CPU_MASK */
1134 dummy_thread->base.user_options = K_ESSENTIAL;
1135 #ifdef CONFIG_THREAD_STACK_INFO
1136 dummy_thread->stack_info.start = 0U;
1137 dummy_thread->stack_info.size = 0U;
1138 #endif /* CONFIG_THREAD_STACK_INFO */
1139 #ifdef CONFIG_USERSPACE
1140 dummy_thread->mem_domain_info.mem_domain = &k_mem_domain_default;
1141 #endif /* CONFIG_USERSPACE */
1142 #if (K_HEAP_MEM_POOL_SIZE > 0)
1143 k_thread_system_pool_assign(dummy_thread);
1144 #else
1145 dummy_thread->resource_pool = NULL;
1146 #endif /* K_HEAP_MEM_POOL_SIZE */
1147
1148 #ifdef CONFIG_TIMESLICE_PER_THREAD
1149 dummy_thread->base.slice_ticks = 0;
1150 #endif /* CONFIG_TIMESLICE_PER_THREAD */
1151
1152 z_current_thread_set(dummy_thread);
1153 }
1154