1 /*
2  * Copyright (c) 2010-2014 Wind River Systems, Inc.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 /**
8  * @file
9  * @brief Kernel thread support
10  *
11  * This module provides general purpose thread support.
12  */
13 
14 #include <zephyr/kernel.h>
15 #include <zephyr/spinlock.h>
16 #include <zephyr/sys/math_extras.h>
17 #include <zephyr/sys_clock.h>
18 #include <ksched.h>
19 #include <kthread.h>
20 #include <wait_q.h>
21 #include <zephyr/internal/syscall_handler.h>
22 #include <kernel_internal.h>
23 #include <kswap.h>
24 #include <zephyr/init.h>
25 #include <zephyr/tracing/tracing.h>
26 #include <string.h>
27 #include <stdbool.h>
28 #include <zephyr/sys/check.h>
29 #include <zephyr/random/random.h>
30 #include <zephyr/sys/atomic.h>
31 #include <zephyr/logging/log.h>
32 #include <zephyr/llext/symbol.h>
33 #include <zephyr/sys/iterable_sections.h>
34 
35 LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
36 
37 #ifdef CONFIG_OBJ_CORE_THREAD
38 static struct k_obj_type  obj_type_thread;
39 
40 #ifdef CONFIG_OBJ_CORE_STATS_THREAD
41 static struct k_obj_core_stats_desc  thread_stats_desc = {
42 	.raw_size = sizeof(struct k_cycle_stats),
43 	.query_size = sizeof(struct k_thread_runtime_stats),
44 	.raw   = z_thread_stats_raw,
45 	.query = z_thread_stats_query,
46 	.reset = z_thread_stats_reset,
47 	.disable = z_thread_stats_disable,
48 	.enable  = z_thread_stats_enable,
49 };
50 #endif /* CONFIG_OBJ_CORE_STATS_THREAD */
51 
init_thread_obj_core_list(void)52 static int init_thread_obj_core_list(void)
53 {
54 	/* Initialize mem_slab object type */
55 
56 #ifdef CONFIG_OBJ_CORE_THREAD
57 	z_obj_type_init(&obj_type_thread, K_OBJ_TYPE_THREAD_ID,
58 			offsetof(struct k_thread, obj_core));
59 #endif /* CONFIG_OBJ_CORE_THREAD */
60 
61 #ifdef CONFIG_OBJ_CORE_STATS_THREAD
62 	k_obj_type_stats_init(&obj_type_thread, &thread_stats_desc);
63 #endif /* CONFIG_OBJ_CORE_STATS_THREAD */
64 
65 	return 0;
66 }
67 
68 SYS_INIT(init_thread_obj_core_list, PRE_KERNEL_1,
69 	 CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
70 #endif /* CONFIG_OBJ_CORE_THREAD */
71 
72 
73 #define _FOREACH_STATIC_THREAD(thread_data)              \
74 	STRUCT_SECTION_FOREACH(_static_thread_data, thread_data)
75 
k_is_in_isr(void)76 bool k_is_in_isr(void)
77 {
78 	return arch_is_in_isr();
79 }
80 EXPORT_SYMBOL(k_is_in_isr);
81 
82 #ifdef CONFIG_THREAD_CUSTOM_DATA
z_impl_k_thread_custom_data_set(void * value)83 void z_impl_k_thread_custom_data_set(void *value)
84 {
85 	_current->custom_data = value;
86 }
87 
88 #ifdef CONFIG_USERSPACE
z_vrfy_k_thread_custom_data_set(void * data)89 static inline void z_vrfy_k_thread_custom_data_set(void *data)
90 {
91 	z_impl_k_thread_custom_data_set(data);
92 }
93 #include <zephyr/syscalls/k_thread_custom_data_set_mrsh.c>
94 #endif /* CONFIG_USERSPACE */
95 
z_impl_k_thread_custom_data_get(void)96 void *z_impl_k_thread_custom_data_get(void)
97 {
98 	return _current->custom_data;
99 }
100 
101 #ifdef CONFIG_USERSPACE
z_vrfy_k_thread_custom_data_get(void)102 static inline void *z_vrfy_k_thread_custom_data_get(void)
103 {
104 	return z_impl_k_thread_custom_data_get();
105 }
106 #include <zephyr/syscalls/k_thread_custom_data_get_mrsh.c>
107 
108 #endif /* CONFIG_USERSPACE */
109 #endif /* CONFIG_THREAD_CUSTOM_DATA */
110 
z_impl_k_is_preempt_thread(void)111 int z_impl_k_is_preempt_thread(void)
112 {
113 	return !arch_is_in_isr() && thread_is_preemptible(_current);
114 }
115 
116 #ifdef CONFIG_USERSPACE
z_vrfy_k_is_preempt_thread(void)117 static inline int z_vrfy_k_is_preempt_thread(void)
118 {
119 	return z_impl_k_is_preempt_thread();
120 }
121 #include <zephyr/syscalls/k_is_preempt_thread_mrsh.c>
122 #endif /* CONFIG_USERSPACE */
123 
z_impl_k_thread_priority_get(k_tid_t thread)124 int z_impl_k_thread_priority_get(k_tid_t thread)
125 {
126 	return thread->base.prio;
127 }
128 
129 #ifdef CONFIG_USERSPACE
z_vrfy_k_thread_priority_get(k_tid_t thread)130 static inline int z_vrfy_k_thread_priority_get(k_tid_t thread)
131 {
132 	K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
133 	return z_impl_k_thread_priority_get(thread);
134 }
135 #include <zephyr/syscalls/k_thread_priority_get_mrsh.c>
136 #endif /* CONFIG_USERSPACE */
137 
z_impl_k_thread_name_set(k_tid_t thread,const char * str)138 int z_impl_k_thread_name_set(k_tid_t thread, const char *str)
139 {
140 #ifdef CONFIG_THREAD_NAME
141 	if (thread == NULL) {
142 		thread = _current;
143 	}
144 
145 	strncpy(thread->name, str, CONFIG_THREAD_MAX_NAME_LEN - 1);
146 	thread->name[CONFIG_THREAD_MAX_NAME_LEN - 1] = '\0';
147 
148 #ifdef CONFIG_ARCH_HAS_THREAD_NAME_HOOK
149 	arch_thread_name_set(thread, str);
150 #endif /* CONFIG_ARCH_HAS_THREAD_NAME_HOOK */
151 
152 	SYS_PORT_TRACING_OBJ_FUNC(k_thread, name_set, thread, 0);
153 
154 	return 0;
155 #else
156 	ARG_UNUSED(thread);
157 	ARG_UNUSED(str);
158 
159 	SYS_PORT_TRACING_OBJ_FUNC(k_thread, name_set, thread, -ENOSYS);
160 
161 	return -ENOSYS;
162 #endif /* CONFIG_THREAD_NAME */
163 }
164 
165 #ifdef CONFIG_USERSPACE
z_vrfy_k_thread_name_set(k_tid_t thread,const char * str)166 static inline int z_vrfy_k_thread_name_set(k_tid_t thread, const char *str)
167 {
168 #ifdef CONFIG_THREAD_NAME
169 	char name[CONFIG_THREAD_MAX_NAME_LEN];
170 
171 	if (thread != NULL) {
172 		if (K_SYSCALL_OBJ(thread, K_OBJ_THREAD) != 0) {
173 			return -EINVAL;
174 		}
175 	}
176 
177 	/* In theory we could copy directly into thread->name, but
178 	 * the current z_vrfy / z_impl split does not provide a
179 	 * means of doing so.
180 	 */
181 	if (k_usermode_string_copy(name, str, sizeof(name)) != 0) {
182 		return -EFAULT;
183 	}
184 
185 	return z_impl_k_thread_name_set(thread, name);
186 #else
187 	return -ENOSYS;
188 #endif /* CONFIG_THREAD_NAME */
189 }
190 #include <zephyr/syscalls/k_thread_name_set_mrsh.c>
191 #endif /* CONFIG_USERSPACE */
192 
k_thread_name_get(k_tid_t thread)193 const char *k_thread_name_get(k_tid_t thread)
194 {
195 #ifdef CONFIG_THREAD_NAME
196 	return (const char *)thread->name;
197 #else
198 	ARG_UNUSED(thread);
199 	return NULL;
200 #endif /* CONFIG_THREAD_NAME */
201 }
202 
z_impl_k_thread_name_copy(k_tid_t thread,char * buf,size_t size)203 int z_impl_k_thread_name_copy(k_tid_t thread, char *buf, size_t size)
204 {
205 #ifdef CONFIG_THREAD_NAME
206 	strncpy(buf, thread->name, size);
207 	return 0;
208 #else
209 	ARG_UNUSED(thread);
210 	ARG_UNUSED(buf);
211 	ARG_UNUSED(size);
212 	return -ENOSYS;
213 #endif /* CONFIG_THREAD_NAME */
214 }
215 
copy_bytes(char * dest,size_t dest_size,const char * src,size_t src_size)216 static size_t copy_bytes(char *dest, size_t dest_size, const char *src, size_t src_size)
217 {
218 	size_t  bytes_to_copy;
219 
220 	bytes_to_copy = min(dest_size, src_size);
221 	memcpy(dest, src, bytes_to_copy);
222 
223 	return bytes_to_copy;
224 }
225 
k_thread_state_str(k_tid_t thread_id,char * buf,size_t buf_size)226 const char *k_thread_state_str(k_tid_t thread_id, char *buf, size_t buf_size)
227 {
228 	size_t      off = 0;
229 	uint8_t     bit;
230 	uint8_t     thread_state = thread_id->base.thread_state;
231 #define SS_ENT(s) { Z_STATE_STR_##s, _THREAD_##s, sizeof(Z_STATE_STR_##s) - 1 }
232 	static const struct {
233 		const char *str;
234 		uint16_t    bit;
235 		uint16_t    len;
236 	} state_string[] = {
237 		SS_ENT(DUMMY),
238 		SS_ENT(PENDING),
239 		SS_ENT(SLEEPING),
240 		SS_ENT(DEAD),
241 		SS_ENT(SUSPENDED),
242 		SS_ENT(ABORTING),
243 		SS_ENT(SUSPENDING),
244 		SS_ENT(QUEUED),
245 	};
246 #undef SS_ENT
247 
248 	if ((buf == NULL) || (buf_size == 0)) {
249 		return "";
250 	}
251 
252 	buf_size--;   /* Reserve 1 byte for end-of-string character */
253 
254 	/*
255 	 * Loop through each bit in the thread_state. Stop once all have
256 	 * been processed. If more than one thread_state bit is set, then
257 	 * separate the descriptive strings with a '+'.
258 	 */
259 
260 
261 	for (unsigned int index = 0; thread_state != 0; index++) {
262 		bit = state_string[index].bit;
263 		if ((thread_state & bit) == 0) {
264 			continue;
265 		}
266 
267 		off += copy_bytes(buf + off, buf_size - off,
268 				  state_string[index].str,
269 				  state_string[index].len);
270 
271 		thread_state &= ~bit;
272 
273 		if (thread_state != 0) {
274 			off += copy_bytes(buf + off, buf_size - off, "+", 1);
275 		}
276 	}
277 
278 	buf[off] = '\0';
279 
280 	return (const char *)buf;
281 }
282 
283 #ifdef CONFIG_USERSPACE
z_vrfy_k_thread_name_copy(k_tid_t thread,char * buf,size_t size)284 static inline int z_vrfy_k_thread_name_copy(k_tid_t thread,
285 					    char *buf, size_t size)
286 {
287 #ifdef CONFIG_THREAD_NAME
288 	size_t len;
289 	struct k_object *ko = k_object_find(thread);
290 
291 	/* Special case: we allow reading the names of initialized threads
292 	 * even if we don't have permission on them
293 	 */
294 	if ((thread == NULL) || (ko->type != K_OBJ_THREAD) ||
295 		((ko->flags & K_OBJ_FLAG_INITIALIZED) == 0)) {
296 		return -EINVAL;
297 	}
298 	if (K_SYSCALL_MEMORY_WRITE(buf, size) != 0) {
299 		return -EFAULT;
300 	}
301 	len = strlen(thread->name);
302 	if ((len + 1) > size) {
303 		return -ENOSPC;
304 	}
305 
306 	return k_usermode_to_copy((void *)buf, thread->name, len + 1);
307 #else
308 	ARG_UNUSED(thread);
309 	ARG_UNUSED(buf);
310 	ARG_UNUSED(size);
311 	return -ENOSYS;
312 #endif /* CONFIG_THREAD_NAME */
313 }
314 #include <zephyr/syscalls/k_thread_name_copy_mrsh.c>
315 #endif /* CONFIG_USERSPACE */
316 
317 #ifdef CONFIG_STACK_SENTINEL
318 /* Check that the stack sentinel is still present
319  *
320  * The stack sentinel feature writes a magic value to the lowest 4 bytes of
321  * the thread's stack when the thread is initialized. This value gets checked
322  * in a few places:
323  *
324  * 1) In k_yield() if the current thread is not swapped out
325  * 2) After servicing a non-nested interrupt
326  * 3) In z_swap(), check the sentinel in the outgoing thread
327  *
328  * Item 2 requires support in arch/ code.
329  *
330  * If the check fails, the thread will be terminated appropriately through
331  * the system fatal error handler.
332  */
z_check_stack_sentinel(void)333 void z_check_stack_sentinel(void)
334 {
335 	uint32_t *stack;
336 
337 	if ((_current->base.thread_state & _THREAD_DUMMY) != 0) {
338 		return;
339 	}
340 
341 	stack = (uint32_t *)_current->stack_info.start;
342 	if (*stack != STACK_SENTINEL) {
343 		/* Restore it so further checks don't trigger this same error */
344 		*stack = STACK_SENTINEL;
345 		z_except_reason(K_ERR_STACK_CHK_FAIL);
346 	}
347 }
348 #endif /* CONFIG_STACK_SENTINEL */
349 
350 #if defined(CONFIG_STACK_POINTER_RANDOM) && (CONFIG_STACK_POINTER_RANDOM != 0)
351 int z_stack_adjust_initialized;
352 
random_offset(size_t stack_size)353 static size_t random_offset(size_t stack_size)
354 {
355 	size_t random_val;
356 
357 	if (!z_stack_adjust_initialized) {
358 		z_early_rand_get((uint8_t *)&random_val, sizeof(random_val));
359 	} else {
360 		sys_rand_get((uint8_t *)&random_val, sizeof(random_val));
361 	}
362 
363 	/* Don't need to worry about alignment of the size here,
364 	 * arch_new_thread() is required to do it.
365 	 *
366 	 * FIXME: Not the best way to get a random number in a range.
367 	 * See #6493
368 	 */
369 	const size_t fuzz = random_val % CONFIG_STACK_POINTER_RANDOM;
370 
371 	if (unlikely(fuzz * 2 > stack_size)) {
372 		return 0;
373 	}
374 
375 	return fuzz;
376 }
377 #if defined(CONFIG_STACK_GROWS_UP)
378 	/* This is so rare not bothering for now */
379 #error "Stack pointer randomization not implemented for upward growing stacks"
380 #endif /* CONFIG_STACK_GROWS_UP */
381 #endif /* CONFIG_STACK_POINTER_RANDOM */
382 
setup_thread_stack(struct k_thread * new_thread,k_thread_stack_t * stack,size_t stack_size)383 static char *setup_thread_stack(struct k_thread *new_thread,
384 				k_thread_stack_t *stack, size_t stack_size)
385 {
386 	size_t stack_obj_size, stack_buf_size;
387 	char *stack_ptr, *stack_buf_start;
388 	size_t delta = 0;
389 
390 #ifdef CONFIG_USERSPACE
391 	if (z_stack_is_user_capable(stack)) {
392 		stack_obj_size = K_THREAD_STACK_LEN(stack_size);
393 		stack_buf_start = K_THREAD_STACK_BUFFER(stack);
394 		stack_buf_size = stack_obj_size - K_THREAD_STACK_RESERVED;
395 	} else
396 #endif /* CONFIG_USERSPACE */
397 	{
398 		/* Object cannot host a user mode thread */
399 		stack_obj_size = K_KERNEL_STACK_LEN(stack_size);
400 		stack_buf_start = K_KERNEL_STACK_BUFFER(stack);
401 		stack_buf_size = stack_obj_size - K_KERNEL_STACK_RESERVED;
402 
403 #if defined(ARCH_KERNEL_STACK_RESERVED)
404 		/* Zephyr treats stack overflow as an app bug.  But
405 		 * this particular overflow can be seen by static
406 		 * analysis so needs to be handled somehow.
407 		 */
408 		if (K_KERNEL_STACK_RESERVED > stack_obj_size) {
409 			k_panic();
410 		}
411 #endif
412 	}
413 
414 #ifdef CONFIG_THREAD_STACK_MEM_MAPPED
415 	/* Map the stack into virtual memory and use that as the base to
416 	 * calculate the initial stack pointer at the high end of the stack
417 	 * object. The stack pointer may be reduced later in this function
418 	 * by TLS or random offset.
419 	 *
420 	 * K_MEM_MAP_UNINIT is used to mimic the behavior of non-mapped
421 	 * stack. If CONFIG_INIT_STACKS is enabled, the stack will be
422 	 * cleared below.
423 	 */
424 	void *stack_mapped = k_mem_map_phys_guard((uintptr_t)stack, stack_obj_size,
425 				K_MEM_PERM_RW | K_MEM_CACHE_WB | K_MEM_MAP_UNINIT,
426 				false);
427 
428 	__ASSERT_NO_MSG((uintptr_t)stack_mapped != 0);
429 
430 #ifdef CONFIG_USERSPACE
431 	if (z_stack_is_user_capable(stack)) {
432 		stack_buf_start = K_THREAD_STACK_BUFFER(stack_mapped);
433 	} else
434 #endif /* CONFIG_USERSPACE */
435 	{
436 		stack_buf_start = K_KERNEL_STACK_BUFFER(stack_mapped);
437 	}
438 
439 	stack_ptr = (char *)stack_mapped + stack_obj_size;
440 
441 	/* Need to store the info on mapped stack so we can remove the mappings
442 	 * when the thread ends.
443 	 */
444 	new_thread->stack_info.mapped.addr = stack_mapped;
445 	new_thread->stack_info.mapped.sz = stack_obj_size;
446 
447 #else /* CONFIG_THREAD_STACK_MEM_MAPPED */
448 
449 	/* Initial stack pointer at the high end of the stack object, may
450 	 * be reduced later in this function by TLS or random offset
451 	 */
452 	stack_ptr = (char *)stack + stack_obj_size;
453 
454 #endif /* CONFIG_THREAD_STACK_MEM_MAPPED */
455 
456 	LOG_DBG("stack %p for thread %p: obj_size=%zu buf_start=%p "
457 		" buf_size %zu stack_ptr=%p",
458 		stack, new_thread, stack_obj_size, (void *)stack_buf_start,
459 		stack_buf_size, (void *)stack_ptr);
460 
461 #ifdef CONFIG_INIT_STACKS
462 	memset(stack_buf_start, 0xaa, stack_buf_size);
463 #endif /* CONFIG_INIT_STACKS */
464 #ifdef CONFIG_STACK_SENTINEL
465 	/* Put the stack sentinel at the lowest 4 bytes of the stack area.
466 	 * We periodically check that it's still present and kill the thread
467 	 * if it isn't.
468 	 */
469 	*((uint32_t *)stack_buf_start) = STACK_SENTINEL;
470 #endif /* CONFIG_STACK_SENTINEL */
471 #ifdef CONFIG_THREAD_LOCAL_STORAGE
472 	/* TLS is always last within the stack buffer */
473 	delta += arch_tls_stack_setup(new_thread, stack_ptr);
474 #endif /* CONFIG_THREAD_LOCAL_STORAGE */
475 #ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA
476 	size_t tls_size = sizeof(struct _thread_userspace_local_data);
477 
478 	/* reserve space on highest memory of stack buffer for local data */
479 	delta += tls_size;
480 	new_thread->userspace_local_data =
481 		(struct _thread_userspace_local_data *)(stack_ptr - delta);
482 #endif /* CONFIG_THREAD_USERSPACE_LOCAL_DATA */
483 #if defined(CONFIG_STACK_POINTER_RANDOM) && (CONFIG_STACK_POINTER_RANDOM != 0)
484 	delta += random_offset(stack_buf_size);
485 #endif /* CONFIG_STACK_POINTER_RANDOM */
486 	delta = ROUND_UP(delta, ARCH_STACK_PTR_ALIGN);
487 #ifdef CONFIG_THREAD_STACK_INFO
488 	/* Initial values. Arches which implement MPU guards that "borrow"
489 	 * memory from the stack buffer (not tracked in K_THREAD_STACK_RESERVED)
490 	 * will need to appropriately update this.
491 	 *
492 	 * The bounds tracked here correspond to the area of the stack object
493 	 * that the thread can access, which includes TLS.
494 	 */
495 	new_thread->stack_info.start = (uintptr_t)stack_buf_start;
496 	new_thread->stack_info.size = stack_buf_size;
497 	new_thread->stack_info.delta = delta;
498 
499 #ifdef CONFIG_THREAD_RUNTIME_STACK_SAFETY
500 	new_thread->stack_info.usage.unused_threshold =
501 		(CONFIG_THREAD_RUNTIME_STACK_SAFETY_DEFAULT_UNUSED_THRESHOLD_PCT *
502 		 stack_buf_size) / 100;
503 #endif
504 #endif /* CONFIG_THREAD_STACK_INFO */
505 	stack_ptr -= delta;
506 
507 	return stack_ptr;
508 }
509 
510 #ifdef CONFIG_HW_SHADOW_STACK
setup_shadow_stack(struct k_thread * new_thread,k_thread_stack_t * stack)511 static void setup_shadow_stack(struct k_thread *new_thread,
512 			 k_thread_stack_t *stack)
513 {
514 	int ret = -ENOENT;
515 
516 	STRUCT_SECTION_FOREACH(_stack_to_hw_shadow_stack, stk_to_hw_shstk) {
517 		if (stk_to_hw_shstk->stack == stack) {
518 			ret = k_thread_hw_shadow_stack_attach(new_thread,
519 							      stk_to_hw_shstk->shstk_addr,
520 							      stk_to_hw_shstk->size);
521 			if (ret != 0) {
522 				LOG_ERR("Could not set thread %p shadow stack %p, got error %d",
523 					new_thread, stk_to_hw_shstk->shstk_addr, ret);
524 				k_panic();
525 			}
526 			break;
527 		}
528 	}
529 
530 	/* Check if the stack isn't in a stack array, and use the corresponding
531 	 * shadow stack.
532 	 */
533 	if (ret != -ENOENT) {
534 		return;
535 	}
536 
537 	STRUCT_SECTION_FOREACH(_stack_to_hw_shadow_stack_arr, stk_to_hw_shstk) {
538 		if ((uintptr_t)stack >= stk_to_hw_shstk->stack_addr &&
539 		    (uintptr_t)stack < stk_to_hw_shstk->stack_addr +
540 		    stk_to_hw_shstk->stack_size * stk_to_hw_shstk->nmemb) {
541 			/* Now we have to guess which index of the stack array is being used */
542 			uintptr_t stack_offset = (uintptr_t)stack - stk_to_hw_shstk->stack_addr;
543 			uintptr_t stack_index = stack_offset / stk_to_hw_shstk->stack_size;
544 			uintptr_t addr;
545 
546 			if (stack_index >= stk_to_hw_shstk->nmemb) {
547 				LOG_ERR("Could not find shadow stack for thread %p, stack %p",
548 					new_thread, stack);
549 				k_panic();
550 			}
551 
552 			addr = stk_to_hw_shstk->shstk_addr +
553 				stk_to_hw_shstk->shstk_size * stack_index;
554 			ret = k_thread_hw_shadow_stack_attach(new_thread,
555 							      (arch_thread_hw_shadow_stack_t *)addr,
556 							      stk_to_hw_shstk->shstk_size);
557 			if (ret != 0) {
558 				LOG_ERR("Could not set thread %p shadow stack 0x%lx, got error %d",
559 					new_thread, stk_to_hw_shstk->shstk_addr, ret);
560 				k_panic();
561 			}
562 			break;
563 		}
564 	}
565 
566 	if (ret == -ENOENT) {
567 		LOG_ERR("Could not find shadow stack for thread %p, stack %p",
568 			new_thread, stack);
569 		k_panic();
570 	}
571 }
572 #endif
573 
574 /*
575  * The provided stack_size value is presumed to be either the result of
576  * K_THREAD_STACK_SIZEOF(stack), or the size value passed to the instance
577  * of K_THREAD_STACK_DEFINE() which defined 'stack'.
578  */
z_setup_new_thread(struct k_thread * new_thread,k_thread_stack_t * stack,size_t stack_size,k_thread_entry_t entry,void * p1,void * p2,void * p3,int prio,uint32_t options,const char * name)579 char *z_setup_new_thread(struct k_thread *new_thread,
580 			 k_thread_stack_t *stack, size_t stack_size,
581 			 k_thread_entry_t entry,
582 			 void *p1, void *p2, void *p3,
583 			 int prio, uint32_t options, const char *name)
584 {
585 	char *stack_ptr;
586 
587 	Z_ASSERT_VALID_PRIO(prio, entry);
588 
589 #ifdef CONFIG_THREAD_ABORT_NEED_CLEANUP
590 	k_thread_abort_cleanup_check_reuse(new_thread);
591 #endif /* CONFIG_THREAD_ABORT_NEED_CLEANUP */
592 
593 #ifdef CONFIG_OBJ_CORE_THREAD
594 	k_obj_core_init_and_link(K_OBJ_CORE(new_thread), &obj_type_thread);
595 #ifdef CONFIG_OBJ_CORE_STATS_THREAD
596 	k_obj_core_stats_register(K_OBJ_CORE(new_thread),
597 				  &new_thread->base.usage,
598 				  sizeof(new_thread->base.usage));
599 #endif /* CONFIG_OBJ_CORE_STATS_THREAD */
600 #endif /* CONFIG_OBJ_CORE_THREAD */
601 
602 #ifdef CONFIG_USERSPACE
603 	__ASSERT((options & K_USER) == 0U || z_stack_is_user_capable(stack),
604 		 "user thread %p with kernel-only stack %p",
605 		 new_thread, stack);
606 	k_object_init(new_thread);
607 	k_object_init(stack);
608 	new_thread->stack_obj = stack;
609 	new_thread->syscall_frame = NULL;
610 
611 	/* Any given thread has access to itself */
612 	k_object_access_grant(new_thread, new_thread);
613 #endif /* CONFIG_USERSPACE */
614 	z_waitq_init(&new_thread->join_queue);
615 
616 	/* Initialize various struct k_thread members */
617 	z_init_thread_base(&new_thread->base, prio, _THREAD_SLEEPING, options);
618 	stack_ptr = setup_thread_stack(new_thread, stack, stack_size);
619 
620 #ifdef CONFIG_HW_SHADOW_STACK
621 	setup_shadow_stack(new_thread, stack);
622 #endif
623 
624 #ifdef CONFIG_KERNEL_COHERENCE
625 	/* Check that the thread object is safe, but that the stack is
626 	 * still cached!
627 	 */
628 	__ASSERT_NO_MSG(sys_cache_is_mem_coherent(new_thread));
629 
630 	/* When dynamic thread stack is available, the stack may come from
631 	 * uncached area.
632 	 */
633 #ifndef CONFIG_DYNAMIC_THREAD
634 	__ASSERT_NO_MSG(!sys_cache_is_mem_coherent(stack));
635 #endif  /* CONFIG_DYNAMIC_THREAD */
636 
637 #endif /* CONFIG_KERNEL_COHERENCE */
638 
639 	arch_new_thread(new_thread, stack, stack_ptr, entry, p1, p2, p3);
640 
641 	/* static threads overwrite it afterwards with real value */
642 	new_thread->init_data = NULL;
643 
644 #ifdef CONFIG_USE_SWITCH
645 	/* switch_handle must be non-null except when inside z_swap()
646 	 * for synchronization reasons.  Historically some notional
647 	 * USE_SWITCH architectures have actually ignored the field
648 	 */
649 	__ASSERT(new_thread->switch_handle != NULL,
650 		 "arch layer failed to initialize switch_handle");
651 #endif /* CONFIG_USE_SWITCH */
652 #ifdef CONFIG_THREAD_CUSTOM_DATA
653 	/* Initialize custom data field (value is opaque to kernel) */
654 	new_thread->custom_data = NULL;
655 #endif /* CONFIG_THREAD_CUSTOM_DATA */
656 #ifdef CONFIG_EVENTS
657 	new_thread->no_wake_on_timeout = false;
658 #endif /* CONFIG_EVENTS */
659 #ifdef CONFIG_THREAD_MONITOR
660 	new_thread->entry.pEntry = entry;
661 	new_thread->entry.parameter1 = p1;
662 	new_thread->entry.parameter2 = p2;
663 	new_thread->entry.parameter3 = p3;
664 
665 	k_spinlock_key_t key = k_spin_lock(&z_thread_monitor_lock);
666 
667 	new_thread->next_thread = _kernel.threads;
668 	_kernel.threads = new_thread;
669 	k_spin_unlock(&z_thread_monitor_lock, key);
670 #endif /* CONFIG_THREAD_MONITOR */
671 #ifdef CONFIG_THREAD_NAME
672 	if (name != NULL) {
673 		strncpy(new_thread->name, name,
674 			CONFIG_THREAD_MAX_NAME_LEN - 1);
675 		/* Ensure NULL termination, truncate if longer */
676 		new_thread->name[CONFIG_THREAD_MAX_NAME_LEN - 1] = '\0';
677 #ifdef CONFIG_ARCH_HAS_THREAD_NAME_HOOK
678 		arch_thread_name_set(new_thread, name);
679 #endif /* CONFIG_ARCH_HAS_THREAD_NAME_HOOK */
680 	} else {
681 		new_thread->name[0] = '\0';
682 	}
683 #endif /* CONFIG_THREAD_NAME */
684 #ifdef CONFIG_SCHED_CPU_MASK
685 	if (IS_ENABLED(CONFIG_SCHED_CPU_MASK_PIN_ONLY)) {
686 		new_thread->base.cpu_mask = 1; /* must specify only one cpu */
687 	} else {
688 		new_thread->base.cpu_mask = -1; /* allow all cpus */
689 	}
690 #endif /* CONFIG_SCHED_CPU_MASK */
691 #ifdef CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN
692 	/* _current may be null if the dummy thread is not used */
693 	if (!_current) {
694 		new_thread->resource_pool = NULL;
695 		return stack_ptr;
696 	}
697 #endif /* CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN */
698 #ifdef CONFIG_USERSPACE
699 	z_mem_domain_init_thread(new_thread);
700 
701 	if ((options & K_INHERIT_PERMS) != 0U) {
702 		k_thread_perms_inherit(_current, new_thread);
703 	}
704 #endif /* CONFIG_USERSPACE */
705 #ifdef CONFIG_SCHED_DEADLINE
706 	new_thread->base.prio_deadline = 0;
707 #endif /* CONFIG_SCHED_DEADLINE */
708 	new_thread->resource_pool = _current->resource_pool;
709 
710 #ifdef CONFIG_SMP
711 	z_waitq_init(&new_thread->halt_queue);
712 #endif /* CONFIG_SMP */
713 
714 #ifdef CONFIG_SCHED_THREAD_USAGE
715 	new_thread->base.usage = (struct k_cycle_stats) {};
716 	new_thread->base.usage.track_usage =
717 		CONFIG_SCHED_THREAD_USAGE_AUTO_ENABLE;
718 #endif /* CONFIG_SCHED_THREAD_USAGE */
719 
720 	SYS_PORT_TRACING_OBJ_FUNC(k_thread, create, new_thread);
721 
722 	return stack_ptr;
723 }
724 
725 #ifdef CONFIG_THREAD_RUNTIME_STACK_SAFETY
z_impl_k_thread_runtime_stack_safety_unused_threshold_pct_set(struct k_thread * thread,uint32_t pct)726 int z_impl_k_thread_runtime_stack_safety_unused_threshold_pct_set(struct k_thread *thread,
727 								  uint32_t pct)
728 {
729 	size_t unused_threshold;
730 
731 	if (pct > 99) {
732 		return -EINVAL;   /* 100% unused stack and up is invalid */
733 	}
734 
735 	unused_threshold = (thread->stack_info.size * pct) / 100;
736 
737 	thread->stack_info.usage.unused_threshold = unused_threshold;
738 
739 	return 0;
740 }
741 
z_impl_k_thread_runtime_stack_safety_unused_threshold_set(struct k_thread * thread,size_t threshold)742 int z_impl_k_thread_runtime_stack_safety_unused_threshold_set(struct k_thread *thread,
743 							      size_t threshold)
744 {
745 	if (threshold > thread->stack_info.size) {
746 		return -EINVAL;
747 	}
748 
749 	thread->stack_info.usage.unused_threshold = threshold;
750 
751 	return 0;
752 }
753 
z_impl_k_thread_runtime_stack_safety_unused_threshold_get(struct k_thread * thread)754 size_t z_impl_k_thread_runtime_stack_safety_unused_threshold_get(struct k_thread *thread)
755 {
756 	return thread->stack_info.usage.unused_threshold;
757 }
758 
759 #ifdef CONFIG_USERSPACE
z_vrfy_k_thread_runtime_stack_safety_unused_threshold_pct_set(struct k_thread * thread,uint32_t pct)760 int z_vrfy_k_thread_runtime_stack_safety_unused_threshold_pct_set(struct k_thread *thread,
761 								  uint32_t pct)
762 {
763 	K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
764 
765 	return z_impl_k_thread_runtime_stack_safety_unused_threshold_pct_set(thread, pct);
766 }
767 #include <zephyr/syscalls/k_thread_runtime_stack_safety_unused_threshold_pct_set_mrsh.c>
768 
z_vrfy_k_thread_runtime_stack_safety_unused_threshold_set(struct k_thread * thread,size_t threshold)769 int z_vrfy_k_thread_runtime_stack_safety_unused_threshold_set(struct k_thread *thread,
770 							      size_t threshold)
771 {
772 	K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
773 
774 	return z_impl_k_thread_runtime_stack_safety_unused_threshold_set(thread, threshold);
775 }
776 #include <zephyr/syscalls/k_thread_runtime_stack_safety_unused_threshold_set_mrsh.c>
777 
z_vrfy_k_thread_runtime_stack_safety_unused_threshold_get(struct k_thread * thread)778 size_t z_vrfy_k_thread_runtime_stack_safety_unused_threshold_get(struct k_thread *thread)
779 {
780 	K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
781 
782 	return z_impl_k_thread_runtime_stack_safety_unused_threshold_get(thread);
783 }
784 #include <zephyr/syscalls/k_thread_runtime_stack_safety_unused_threshold_get_mrsh.c>
785 #endif /* CONFIG_USERSPACE */
786 #endif /* CONFIG_THREAD_RUNTIME_STACK_SAFETY */
787 
z_impl_k_thread_create(struct k_thread * new_thread,k_thread_stack_t * stack,size_t stack_size,k_thread_entry_t entry,void * p1,void * p2,void * p3,int prio,uint32_t options,k_timeout_t delay)788 k_tid_t z_impl_k_thread_create(struct k_thread *new_thread,
789 			      k_thread_stack_t *stack,
790 			      size_t stack_size, k_thread_entry_t entry,
791 			      void *p1, void *p2, void *p3,
792 			      int prio, uint32_t options, k_timeout_t delay)
793 {
794 	__ASSERT(!arch_is_in_isr(), "Threads may not be created in ISRs");
795 
796 	z_setup_new_thread(new_thread, stack, stack_size, entry, p1, p2, p3,
797 			  prio, options, NULL);
798 
799 	if (!K_TIMEOUT_EQ(delay, K_FOREVER)) {
800 		thread_schedule_new(new_thread, delay);
801 	}
802 
803 	return new_thread;
804 }
805 
806 #ifdef CONFIG_USERSPACE
z_stack_is_user_capable(k_thread_stack_t * stack)807 bool z_stack_is_user_capable(k_thread_stack_t *stack)
808 {
809 	return k_object_find(stack) != NULL;
810 }
811 
z_vrfy_k_thread_create(struct k_thread * new_thread,k_thread_stack_t * stack,size_t stack_size,k_thread_entry_t entry,void * p1,void * p2,void * p3,int prio,uint32_t options,k_timeout_t delay)812 k_tid_t z_vrfy_k_thread_create(struct k_thread *new_thread,
813 			       k_thread_stack_t *stack,
814 			       size_t stack_size, k_thread_entry_t entry,
815 			       void *p1, void *p2, void *p3,
816 			       int prio, uint32_t options, k_timeout_t delay)
817 {
818 	size_t total_size, stack_obj_size;
819 	struct k_object *stack_object;
820 
821 	/* The thread and stack objects *must* be in an uninitialized state */
822 	K_OOPS(K_SYSCALL_OBJ_NEVER_INIT(new_thread, K_OBJ_THREAD));
823 
824 	/* No need to check z_stack_is_user_capable(), it won't be in the
825 	 * object table if it isn't
826 	 */
827 	stack_object = k_object_find(stack);
828 	K_OOPS(K_SYSCALL_VERIFY_MSG(k_object_validation_check(stack_object, stack,
829 						K_OBJ_THREAD_STACK_ELEMENT,
830 						_OBJ_INIT_FALSE) == 0,
831 				    "bad stack object"));
832 
833 	/* Verify that the stack size passed in is OK by computing the total
834 	 * size and comparing it with the size value in the object metadata
835 	 */
836 	K_OOPS(K_SYSCALL_VERIFY_MSG(!size_add_overflow(K_THREAD_STACK_RESERVED,
837 						       stack_size, &total_size),
838 				    "stack size overflow (%zu+%zu)",
839 				    stack_size,
840 				    K_THREAD_STACK_RESERVED));
841 
842 	/* Testing less-than-or-equal since additional room may have been
843 	 * allocated for alignment constraints
844 	 */
845 #ifdef CONFIG_GEN_PRIV_STACKS
846 	stack_obj_size = stack_object->data.stack_data->size;
847 #else
848 	stack_obj_size = stack_object->data.stack_size;
849 #endif /* CONFIG_GEN_PRIV_STACKS */
850 	K_OOPS(K_SYSCALL_VERIFY_MSG(total_size <= stack_obj_size,
851 				    "stack size %zu is too big, max is %zu",
852 				    total_size, stack_obj_size));
853 
854 	/* User threads may only create other user threads and they can't
855 	 * be marked as essential
856 	 */
857 	K_OOPS(K_SYSCALL_VERIFY(options & K_USER));
858 	K_OOPS(K_SYSCALL_VERIFY(!(options & K_ESSENTIAL)));
859 
860 	/* Check validity of prio argument; must be the same or worse priority
861 	 * than the caller
862 	 */
863 	K_OOPS(K_SYSCALL_VERIFY(_is_valid_prio(prio, NULL)));
864 	K_OOPS(K_SYSCALL_VERIFY(z_is_prio_lower_or_equal(prio,
865 							_current->base.prio)));
866 
867 	z_setup_new_thread(new_thread, stack, stack_size,
868 			   entry, p1, p2, p3, prio, options, NULL);
869 
870 	if (!K_TIMEOUT_EQ(delay, K_FOREVER)) {
871 		thread_schedule_new(new_thread, delay);
872 	}
873 
874 	return new_thread;
875 }
876 #include <zephyr/syscalls/k_thread_create_mrsh.c>
877 #endif /* CONFIG_USERSPACE */
878 
z_init_thread_base(struct _thread_base * thread_base,int priority,uint32_t initial_state,unsigned int options)879 void z_init_thread_base(struct _thread_base *thread_base, int priority,
880 		       uint32_t initial_state, unsigned int options)
881 {
882 	/* k_q_node is initialized upon first insertion in a list */
883 	thread_base->pended_on = NULL;
884 	thread_base->user_options = (uint8_t)options;
885 	thread_base->thread_state = (uint8_t)initial_state;
886 
887 	thread_base->prio = priority;
888 
889 	thread_base->sched_locked = 0U;
890 
891 #ifdef CONFIG_SMP
892 	thread_base->is_idle = 0;
893 #endif /* CONFIG_SMP */
894 
895 #ifdef CONFIG_TIMESLICE_PER_THREAD
896 	thread_base->slice_ticks = 0;
897 	thread_base->slice_expired = NULL;
898 #endif /* CONFIG_TIMESLICE_PER_THREAD */
899 
900 	/* swap_data does not need to be initialized */
901 
902 	z_init_thread_timeout(thread_base);
903 }
904 
k_thread_user_mode_enter(k_thread_entry_t entry,void * p1,void * p2,void * p3)905 FUNC_NORETURN void k_thread_user_mode_enter(k_thread_entry_t entry,
906 					    void *p1, void *p2, void *p3)
907 {
908 	SYS_PORT_TRACING_FUNC(k_thread, user_mode_enter);
909 
910 	_current->base.user_options |= K_USER;
911 	z_thread_essential_clear(_current);
912 #ifdef CONFIG_THREAD_MONITOR
913 	_current->entry.pEntry = entry;
914 	_current->entry.parameter1 = p1;
915 	_current->entry.parameter2 = p2;
916 	_current->entry.parameter3 = p3;
917 #endif /* CONFIG_THREAD_MONITOR */
918 #ifdef CONFIG_USERSPACE
919 	__ASSERT(z_stack_is_user_capable(_current->stack_obj),
920 		 "dropping to user mode with kernel-only stack object");
921 #ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA
922 	memset(_current->userspace_local_data, 0,
923 	       sizeof(struct _thread_userspace_local_data));
924 #endif /* CONFIG_THREAD_USERSPACE_LOCAL_DATA */
925 #ifdef CONFIG_THREAD_LOCAL_STORAGE
926 	arch_tls_stack_setup(_current,
927 			     (char *)(_current->stack_info.start +
928 				      _current->stack_info.size));
929 #endif /* CONFIG_THREAD_LOCAL_STORAGE */
930 	arch_user_mode_enter(entry, p1, p2, p3);
931 #else
932 	/* XXX In this case we do not reset the stack */
933 	z_thread_entry(entry, p1, p2, p3);
934 #endif /* CONFIG_USERSPACE */
935 }
936 
937 #if defined(CONFIG_INIT_STACKS) && defined(CONFIG_THREAD_STACK_INFO)
938 #ifdef CONFIG_STACK_GROWS_UP
939 #error "Unsupported configuration for stack analysis"
940 #endif /* CONFIG_STACK_GROWS_UP */
941 
z_stack_space_get(const uint8_t * stack_start,size_t size,size_t * unused_ptr)942 int z_stack_space_get(const uint8_t *stack_start, size_t size, size_t *unused_ptr)
943 {
944 	size_t unused = 0;
945 	const uint8_t *checked_stack = stack_start;
946 	/* Take the address of any local variable as a shallow bound for the
947 	 * stack pointer.  Addresses above it are guaranteed to be
948 	 * accessible.
949 	 */
950 	const uint8_t *stack_pointer = (const uint8_t *)&stack_start;
951 
952 	/* If we are currently running on the stack being analyzed, some
953 	 * memory management hardware will generate an exception if we
954 	 * read unused stack memory.
955 	 *
956 	 * This never happens when invoked from user mode, as user mode
957 	 * will always run this function on the privilege elevation stack.
958 	 */
959 	if ((stack_pointer > stack_start) && (stack_pointer <= (stack_start + size)) &&
960 	    IS_ENABLED(CONFIG_NO_UNUSED_STACK_INSPECTION)) {
961 		/* TODO: We could add an arch_ API call to temporarily
962 		 * disable the stack checking in the CPU, but this would
963 		 * need to be properly managed wrt context switches/interrupts
964 		 */
965 		return -ENOTSUP;
966 	}
967 
968 	if (IS_ENABLED(CONFIG_STACK_SENTINEL)) {
969 		/* First 4 bytes of the stack buffer reserved for the
970 		 * sentinel value, it won't be 0xAAAAAAAA for thread
971 		 * stacks.
972 		 *
973 		 * FIXME: thread->stack_info.start ought to reflect
974 		 * this!
975 		 */
976 		checked_stack += 4;
977 		size -= 4;
978 	}
979 
980 	for (size_t i = 0; i < size; i++) {
981 		if ((checked_stack[i]) == 0xaaU) {
982 			unused++;
983 		} else {
984 			break;
985 		}
986 	}
987 
988 	*unused_ptr = unused;
989 
990 	return 0;
991 }
992 
993 #ifdef CONFIG_THREAD_RUNTIME_STACK_SAFETY
k_thread_runtime_stack_safety_full_check(const struct k_thread * thread,size_t * unused_ptr,k_thread_stack_safety_handler_t handler,void * arg)994 int k_thread_runtime_stack_safety_full_check(const struct k_thread *thread,
995 					     size_t *unused_ptr,
996 					     k_thread_stack_safety_handler_t handler,
997 					     void *arg)
998 {
999 	int    rv;
1000 	size_t unused_space;
1001 
1002 	__ASSERT_NO_MSG(thread != NULL);
1003 
1004 	rv = z_stack_space_get((const uint8_t *)thread->stack_info.start,
1005 			       thread->stack_info.size, &unused_space);
1006 
1007 	if (rv != 0) {
1008 		return rv;
1009 	}
1010 
1011 	if (unused_ptr != NULL) {
1012 		*unused_ptr = unused_space;
1013 	}
1014 
1015 	if ((unused_space < thread->stack_info.usage.unused_threshold) &&
1016 	    (handler != NULL)) {
1017 		handler(thread, unused_space, arg);
1018 	}
1019 
1020 	return 0;
1021 }
1022 
k_thread_runtime_stack_safety_threshold_check(const struct k_thread * thread,size_t * unused_ptr,k_thread_stack_safety_handler_t handler,void * arg)1023 int k_thread_runtime_stack_safety_threshold_check(const struct k_thread *thread,
1024 						  size_t *unused_ptr,
1025 						  k_thread_stack_safety_handler_t handler,
1026 						  void *arg)
1027 {
1028 	int    rv;
1029 	size_t unused_space;
1030 
1031 	__ASSERT_NO_MSG(thread != NULL);
1032 
1033 	rv = z_stack_space_get((const uint8_t *)thread->stack_info.start,
1034 			       thread->stack_info.usage.unused_threshold,
1035 			       &unused_space);
1036 
1037 	if (rv != 0) {
1038 		return rv;
1039 	}
1040 
1041 	if (unused_ptr != NULL) {
1042 		*unused_ptr = unused_space;
1043 	}
1044 
1045 	if ((unused_space < thread->stack_info.usage.unused_threshold) &&
1046 	    (handler != NULL)) {
1047 		handler(thread, unused_space, arg);
1048 	}
1049 
1050 	return 0;
1051 }
1052 
1053 #ifdef CONFIG_USERSPACE
z_vrfy_k_thread_runtime_stack_safety_unused_threshold_get(struct k_thread * thread)1054 int z_vrfy_k_thread_runtime_stack_safety_unused_threshold_get(struct k_thread *thread)
1055 {
1056 	K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
1057 
1058 	return z_impl_k_thread_runtime_stack_safety_unused_threshold_set(thread);
1059 }
1060 #include <zephyr/syscalls/k_thread_runtime_stack_safety_unused_threshold_get_mrsh.c>
1061 #endif /* CONFIG_USERSPACE */
1062 #endif /* CONFIG_THREAD_RUNTIME_STACK_SAFETY */
1063 
z_impl_k_thread_stack_space_get(const struct k_thread * thread,size_t * unused_ptr)1064 int z_impl_k_thread_stack_space_get(const struct k_thread *thread,
1065 				    size_t *unused_ptr)
1066 {
1067 #ifdef CONFIG_THREAD_STACK_MEM_MAPPED
1068 	if (thread->stack_info.mapped.addr == NULL) {
1069 		return -EINVAL;
1070 	}
1071 #endif /* CONFIG_THREAD_STACK_MEM_MAPPED */
1072 
1073 	return z_stack_space_get((const uint8_t *)thread->stack_info.start,
1074 				 thread->stack_info.size, unused_ptr);
1075 }
1076 
1077 #ifdef CONFIG_USERSPACE
z_vrfy_k_thread_stack_space_get(const struct k_thread * thread,size_t * unused_ptr)1078 int z_vrfy_k_thread_stack_space_get(const struct k_thread *thread,
1079 				    size_t *unused_ptr)
1080 {
1081 	size_t unused;
1082 	int ret;
1083 
1084 	ret = K_SYSCALL_OBJ(thread, K_OBJ_THREAD);
1085 	CHECKIF(ret != 0) {
1086 		return ret;
1087 	}
1088 
1089 	ret = z_impl_k_thread_stack_space_get(thread, &unused);
1090 	CHECKIF(ret != 0) {
1091 		return ret;
1092 	}
1093 
1094 	ret = k_usermode_to_copy(unused_ptr, &unused, sizeof(size_t));
1095 	CHECKIF(ret != 0) {
1096 		return ret;
1097 	}
1098 
1099 	return 0;
1100 }
1101 #include <zephyr/syscalls/k_thread_stack_space_get_mrsh.c>
1102 #endif /* CONFIG_USERSPACE */
1103 #endif /* CONFIG_INIT_STACKS && CONFIG_THREAD_STACK_INFO */
1104 
1105 #ifdef CONFIG_USERSPACE
z_vrfy_k_thread_timeout_remaining_ticks(const struct k_thread * thread)1106 static inline k_ticks_t z_vrfy_k_thread_timeout_remaining_ticks(
1107 						    const struct k_thread *thread)
1108 {
1109 	K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
1110 	return z_impl_k_thread_timeout_remaining_ticks(thread);
1111 }
1112 #include <zephyr/syscalls/k_thread_timeout_remaining_ticks_mrsh.c>
1113 
z_vrfy_k_thread_timeout_expires_ticks(const struct k_thread * thread)1114 static inline k_ticks_t z_vrfy_k_thread_timeout_expires_ticks(
1115 						  const struct k_thread *thread)
1116 {
1117 	K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
1118 	return z_impl_k_thread_timeout_expires_ticks(thread);
1119 }
1120 #include <zephyr/syscalls/k_thread_timeout_expires_ticks_mrsh.c>
1121 #endif /* CONFIG_USERSPACE */
1122 
1123 #ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING
z_thread_mark_switched_in(void)1124 void z_thread_mark_switched_in(void)
1125 {
1126 #if defined(CONFIG_SCHED_THREAD_USAGE) && !defined(CONFIG_USE_SWITCH)
1127 	z_sched_usage_start(_current);
1128 #endif /* CONFIG_SCHED_THREAD_USAGE && !CONFIG_USE_SWITCH */
1129 
1130 #ifdef CONFIG_TRACING
1131 	SYS_PORT_TRACING_FUNC(k_thread, switched_in);
1132 #endif /* CONFIG_TRACING */
1133 }
1134 
z_thread_mark_switched_out(void)1135 void z_thread_mark_switched_out(void)
1136 {
1137 #if defined(CONFIG_SCHED_THREAD_USAGE) && !defined(CONFIG_USE_SWITCH)
1138 	z_sched_usage_stop();
1139 #endif /*CONFIG_SCHED_THREAD_USAGE && !CONFIG_USE_SWITCH */
1140 
1141 #ifdef CONFIG_TRACING
1142 #ifdef CONFIG_THREAD_LOCAL_STORAGE
1143 	/* Dummy thread won't have TLS set up to run arbitrary code */
1144 	if (!_current ||
1145 	    (_current->base.thread_state & _THREAD_DUMMY) != 0) {
1146 		return;
1147 	}
1148 #endif /* CONFIG_THREAD_LOCAL_STORAGE */
1149 	SYS_PORT_TRACING_FUNC(k_thread, switched_out);
1150 #endif /* CONFIG_TRACING */
1151 }
1152 #endif /* CONFIG_INSTRUMENT_THREAD_SWITCHING */
1153 
k_thread_runtime_stats_get(k_tid_t thread,k_thread_runtime_stats_t * stats)1154 int k_thread_runtime_stats_get(k_tid_t thread,
1155 			       k_thread_runtime_stats_t *stats)
1156 {
1157 	if ((thread == NULL) || (stats == NULL)) {
1158 		return -EINVAL;
1159 	}
1160 
1161 #ifdef CONFIG_SCHED_THREAD_USAGE
1162 	z_sched_thread_usage(thread, stats);
1163 #else
1164 	*stats = (k_thread_runtime_stats_t) {};
1165 #endif /* CONFIG_SCHED_THREAD_USAGE */
1166 
1167 	return 0;
1168 }
1169 
k_thread_runtime_stats_all_get(k_thread_runtime_stats_t * stats)1170 int k_thread_runtime_stats_all_get(k_thread_runtime_stats_t *stats)
1171 {
1172 #ifdef CONFIG_SCHED_THREAD_USAGE_ALL
1173 	k_thread_runtime_stats_t  tmp_stats;
1174 #endif /* CONFIG_SCHED_THREAD_USAGE_ALL */
1175 
1176 	if (stats == NULL) {
1177 		return -EINVAL;
1178 	}
1179 
1180 	*stats = (k_thread_runtime_stats_t) {};
1181 
1182 #ifdef CONFIG_SCHED_THREAD_USAGE_ALL
1183 	/* Retrieve the usage stats for each core and amalgamate them. */
1184 
1185 	unsigned int num_cpus = arch_num_cpus();
1186 
1187 	for (uint8_t i = 0; i < num_cpus; i++) {
1188 		z_sched_cpu_usage(i, &tmp_stats);
1189 
1190 		stats->execution_cycles += tmp_stats.execution_cycles;
1191 		stats->total_cycles     += tmp_stats.total_cycles;
1192 #ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
1193 		stats->current_cycles   += tmp_stats.current_cycles;
1194 		stats->peak_cycles      += tmp_stats.peak_cycles;
1195 		stats->average_cycles   += tmp_stats.average_cycles;
1196 #endif /* CONFIG_SCHED_THREAD_USAGE_ANALYSIS */
1197 		stats->idle_cycles      += tmp_stats.idle_cycles;
1198 	}
1199 #endif /* CONFIG_SCHED_THREAD_USAGE_ALL */
1200 
1201 	return 0;
1202 }
1203 
k_thread_runtime_stats_cpu_get(int cpu,k_thread_runtime_stats_t * stats)1204 int k_thread_runtime_stats_cpu_get(int cpu, k_thread_runtime_stats_t *stats)
1205 {
1206 	if (stats == NULL) {
1207 		return -EINVAL;
1208 	}
1209 
1210 	*stats = (k_thread_runtime_stats_t) {};
1211 
1212 #ifdef CONFIG_SCHED_THREAD_USAGE_ALL
1213 #ifdef CONFIG_SMP
1214 	z_sched_cpu_usage(cpu, stats);
1215 #else
1216 	__ASSERT(cpu == 0, "cpu filter out of bounds");
1217 	ARG_UNUSED(cpu);
1218 	z_sched_cpu_usage(0, stats);
1219 #endif
1220 #endif
1221 
1222 	return 0;
1223 }
1224 
1225 #ifdef CONFIG_THREAD_ABORT_NEED_CLEANUP
1226 /** Pointer to thread which needs to be cleaned up. */
1227 static struct k_thread *thread_to_cleanup;
1228 
1229 /** Spinlock for thread abort cleanup. */
1230 static struct k_spinlock thread_cleanup_lock;
1231 
1232 #ifdef CONFIG_THREAD_STACK_MEM_MAPPED
1233 static void *thread_cleanup_stack_addr;
1234 static size_t thread_cleanup_stack_sz;
1235 #endif /* CONFIG_THREAD_STACK_MEM_MAPPED */
1236 
defer_thread_cleanup(struct k_thread * thread)1237 void defer_thread_cleanup(struct k_thread *thread)
1238 {
1239 	/* Note when adding new deferred cleanup steps:
1240 	 * - The thread object may have been overwritten by the time
1241 	 *   the actual cleanup is being done (e.g. thread object
1242 	 *   allocated on a stack). So stash any necessary data here
1243 	 *   that will be used in the actual cleanup steps.
1244 	 */
1245 	thread_to_cleanup = thread;
1246 
1247 #ifdef CONFIG_THREAD_STACK_MEM_MAPPED
1248 	/* Note that the permission of the stack should have been
1249 	 * stripped of user thread access due to the thread having
1250 	 * already exited from a memory domain. That is done via
1251 	 * k_thread_abort().
1252 	 */
1253 
1254 	/* Stash the address and size so the region can be unmapped
1255 	 * later.
1256 	 */
1257 	thread_cleanup_stack_addr = thread->stack_info.mapped.addr;
1258 	thread_cleanup_stack_sz = thread->stack_info.mapped.sz;
1259 
1260 	/* The stack is now considered un-usable. This should prevent any functions
1261 	 * from looking directly into the mapped stack if they are made to be aware
1262 	 * of memory mapped stacks, e.g., z_stack_space_get().
1263 	 */
1264 	thread->stack_info.mapped.addr = NULL;
1265 	thread->stack_info.mapped.sz = 0;
1266 #endif /* CONFIG_THREAD_STACK_MEM_MAPPED */
1267 }
1268 
do_thread_cleanup(struct k_thread * thread)1269 void do_thread_cleanup(struct k_thread *thread)
1270 {
1271 	/* Note when adding new actual cleanup steps:
1272 	 * - The thread object may have been overwritten when this is
1273 	 *   called. So avoid using any data from the thread object.
1274 	 */
1275 	ARG_UNUSED(thread);
1276 
1277 #ifdef CONFIG_THREAD_STACK_MEM_MAPPED
1278 	if (thread_cleanup_stack_addr != NULL) {
1279 		k_mem_unmap_phys_guard(thread_cleanup_stack_addr,
1280 				       thread_cleanup_stack_sz, false);
1281 
1282 		thread_cleanup_stack_addr = NULL;
1283 	}
1284 #endif /* CONFIG_THREAD_STACK_MEM_MAPPED */
1285 }
1286 
k_thread_abort_cleanup(struct k_thread * thread)1287 void k_thread_abort_cleanup(struct k_thread *thread)
1288 {
1289 	K_SPINLOCK(&thread_cleanup_lock) {
1290 		if (thread_to_cleanup != NULL) {
1291 			/* Finish the pending one first. */
1292 			do_thread_cleanup(thread_to_cleanup);
1293 			thread_to_cleanup = NULL;
1294 		}
1295 
1296 		if (thread == _current) {
1297 			/* Need to defer for current running thread as the cleanup
1298 			 * might result in exception. Actual cleanup will be done
1299 			 * at the next time k_thread_abort() is called, or at thread
1300 			 * creation if the same thread object is being reused. This
1301 			 * is to make sure the cleanup code no longer needs this
1302 			 * thread's stack. This is not exactly ideal as the stack
1303 			 * may still be memory mapped for a while. However, this is
1304 			 * a simple solution without a) the need to workaround
1305 			 * the schedule lock during k_thread_abort(), b) creating
1306 			 * another thread to perform the cleanup, and c) does not
1307 			 * require architecture code support (e.g. via exception).
1308 			 */
1309 			defer_thread_cleanup(thread);
1310 		} else {
1311 			/* Not the current running thread, so we are safe to do
1312 			 * cleanups.
1313 			 */
1314 			do_thread_cleanup(thread);
1315 		}
1316 	}
1317 }
1318 
k_thread_abort_cleanup_check_reuse(struct k_thread * thread)1319 void k_thread_abort_cleanup_check_reuse(struct k_thread *thread)
1320 {
1321 	K_SPINLOCK(&thread_cleanup_lock) {
1322 		/* This is to guard reuse of the same thread object and make sure
1323 		 * any pending cleanups of it needs to be finished before the thread
1324 		 * object can be reused.
1325 		 */
1326 		if (thread_to_cleanup == thread) {
1327 			do_thread_cleanup(thread_to_cleanup);
1328 			thread_to_cleanup = NULL;
1329 		}
1330 	}
1331 }
1332 
1333 #endif /* CONFIG_THREAD_ABORT_NEED_CLEANUP */
1334 
z_dummy_thread_init(struct k_thread * dummy_thread)1335 void z_dummy_thread_init(struct k_thread *dummy_thread)
1336 {
1337 	dummy_thread->base.thread_state = _THREAD_DUMMY;
1338 #ifdef CONFIG_SCHED_CPU_MASK
1339 	dummy_thread->base.cpu_mask = -1;
1340 #endif /* CONFIG_SCHED_CPU_MASK */
1341 	dummy_thread->base.user_options = K_ESSENTIAL;
1342 #ifdef CONFIG_THREAD_STACK_INFO
1343 	dummy_thread->stack_info.start = 0U;
1344 	dummy_thread->stack_info.size = 0U;
1345 #endif /* CONFIG_THREAD_STACK_INFO */
1346 #ifdef CONFIG_USERSPACE
1347 	dummy_thread->mem_domain_info.mem_domain = &k_mem_domain_default;
1348 #endif /* CONFIG_USERSPACE */
1349 #if (K_HEAP_MEM_POOL_SIZE > 0)
1350 	k_thread_system_pool_assign(dummy_thread);
1351 #else
1352 	dummy_thread->resource_pool = NULL;
1353 #endif /* K_HEAP_MEM_POOL_SIZE */
1354 
1355 #ifdef CONFIG_USE_SWITCH
1356 	dummy_thread->switch_handle = NULL;
1357 #endif /* CONFIG_USE_SWITCH */
1358 
1359 #ifdef CONFIG_TIMESLICE_PER_THREAD
1360 	dummy_thread->base.slice_ticks = 0;
1361 #endif /* CONFIG_TIMESLICE_PER_THREAD */
1362 
1363 	z_current_thread_set(dummy_thread);
1364 }
1365