1 /*
2  * Copyright (c) 2010-2014 Wind River Systems, Inc.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 /**
8  * @file
9  * @brief Kernel thread support
10  *
11  * This module provides general purpose thread support.
12  */
13 
14 #include <zephyr/kernel.h>
15 #include <zephyr/spinlock.h>
16 #include <zephyr/sys/math_extras.h>
17 #include <zephyr/sys_clock.h>
18 #include <ksched.h>
19 #include <wait_q.h>
20 #include <zephyr/internal/syscall_handler.h>
21 #include <kernel_internal.h>
22 #include <kswap.h>
23 #include <zephyr/init.h>
24 #include <zephyr/tracing/tracing.h>
25 #include <string.h>
26 #include <stdbool.h>
27 #include <zephyr/irq_offload.h>
28 #include <zephyr/sys/check.h>
29 #include <zephyr/random/random.h>
30 #include <zephyr/sys/atomic.h>
31 #include <zephyr/logging/log.h>
32 #include <zephyr/llext/symbol.h>
33 #include <zephyr/sys/iterable_sections.h>
34 
35 LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
36 
37 #ifdef CONFIG_OBJ_CORE_THREAD
38 static struct k_obj_type  obj_type_thread;
39 
40 #ifdef CONFIG_OBJ_CORE_STATS_THREAD
41 static struct k_obj_core_stats_desc  thread_stats_desc = {
42 	.raw_size = sizeof(struct k_cycle_stats),
43 	.query_size = sizeof(struct k_thread_runtime_stats),
44 	.raw   = z_thread_stats_raw,
45 	.query = z_thread_stats_query,
46 	.reset = z_thread_stats_reset,
47 	.disable = z_thread_stats_disable,
48 	.enable  = z_thread_stats_enable,
49 };
50 #endif
51 
init_thread_obj_core_list(void)52 static int init_thread_obj_core_list(void)
53 {
54 	/* Initialize mem_slab object type */
55 
56 #ifdef CONFIG_OBJ_CORE_THREAD
57 	z_obj_type_init(&obj_type_thread, K_OBJ_TYPE_THREAD_ID,
58 			offsetof(struct k_thread, obj_core));
59 #endif
60 
61 #ifdef CONFIG_OBJ_CORE_STATS_THREAD
62 	k_obj_type_stats_init(&obj_type_thread, &thread_stats_desc);
63 #endif
64 
65 	return 0;
66 }
67 
68 SYS_INIT(init_thread_obj_core_list, PRE_KERNEL_1,
69 	 CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
70 #endif
71 
72 #ifdef CONFIG_THREAD_MONITOR
73 /* This lock protects the linked list of active threads; i.e. the
74  * initial _kernel.threads pointer and the linked list made up of
75  * thread->next_thread (until NULL)
76  */
77 static struct k_spinlock z_thread_monitor_lock;
78 #endif /* CONFIG_THREAD_MONITOR */
79 
80 #define _FOREACH_STATIC_THREAD(thread_data)              \
81 	STRUCT_SECTION_FOREACH(_static_thread_data, thread_data)
82 
k_thread_foreach(k_thread_user_cb_t user_cb,void * user_data)83 void k_thread_foreach(k_thread_user_cb_t user_cb, void *user_data)
84 {
85 #if defined(CONFIG_THREAD_MONITOR)
86 	struct k_thread *thread;
87 	k_spinlock_key_t key;
88 
89 	__ASSERT(user_cb != NULL, "user_cb can not be NULL");
90 
91 	/*
92 	 * Lock is needed to make sure that the _kernel.threads is not being
93 	 * modified by the user_cb either directly or indirectly.
94 	 * The indirect ways are through calling k_thread_create and
95 	 * k_thread_abort from user_cb.
96 	 */
97 	key = k_spin_lock(&z_thread_monitor_lock);
98 
99 	SYS_PORT_TRACING_FUNC_ENTER(k_thread, foreach);
100 
101 	for (thread = _kernel.threads; thread; thread = thread->next_thread) {
102 		user_cb(thread, user_data);
103 	}
104 
105 	SYS_PORT_TRACING_FUNC_EXIT(k_thread, foreach);
106 
107 	k_spin_unlock(&z_thread_monitor_lock, key);
108 #else
109 	ARG_UNUSED(user_cb);
110 	ARG_UNUSED(user_data);
111 #endif
112 }
113 
k_thread_foreach_unlocked(k_thread_user_cb_t user_cb,void * user_data)114 void k_thread_foreach_unlocked(k_thread_user_cb_t user_cb, void *user_data)
115 {
116 #if defined(CONFIG_THREAD_MONITOR)
117 	struct k_thread *thread;
118 	k_spinlock_key_t key;
119 
120 	__ASSERT(user_cb != NULL, "user_cb can not be NULL");
121 
122 	key = k_spin_lock(&z_thread_monitor_lock);
123 
124 	SYS_PORT_TRACING_FUNC_ENTER(k_thread, foreach_unlocked);
125 
126 	for (thread = _kernel.threads; thread; thread = thread->next_thread) {
127 		k_spin_unlock(&z_thread_monitor_lock, key);
128 		user_cb(thread, user_data);
129 		key = k_spin_lock(&z_thread_monitor_lock);
130 	}
131 
132 	SYS_PORT_TRACING_FUNC_EXIT(k_thread, foreach_unlocked);
133 
134 	k_spin_unlock(&z_thread_monitor_lock, key);
135 #else
136 	ARG_UNUSED(user_cb);
137 	ARG_UNUSED(user_data);
138 #endif
139 }
140 
k_is_in_isr(void)141 bool k_is_in_isr(void)
142 {
143 	return arch_is_in_isr();
144 }
145 EXPORT_SYMBOL(k_is_in_isr);
146 
147 /*
148  * This function tags the current thread as essential to system operation.
149  * Exceptions raised by this thread will be treated as a fatal system error.
150  */
z_thread_essential_set(void)151 void z_thread_essential_set(void)
152 {
153 	_current->base.user_options |= K_ESSENTIAL;
154 }
155 
156 /*
157  * This function tags the current thread as not essential to system operation.
158  * Exceptions raised by this thread may be recoverable.
159  * (This is the default tag for a thread.)
160  */
z_thread_essential_clear(void)161 void z_thread_essential_clear(void)
162 {
163 	_current->base.user_options &= ~K_ESSENTIAL;
164 }
165 
166 /*
167  * This routine indicates if the current thread is an essential system thread.
168  *
169  * Returns true if current thread is essential, false if it is not.
170  */
z_is_thread_essential(void)171 bool z_is_thread_essential(void)
172 {
173 	return (_current->base.user_options & K_ESSENTIAL) == K_ESSENTIAL;
174 }
175 
176 #ifdef CONFIG_THREAD_CUSTOM_DATA
z_impl_k_thread_custom_data_set(void * value)177 void z_impl_k_thread_custom_data_set(void *value)
178 {
179 	_current->custom_data = value;
180 }
181 
182 #ifdef CONFIG_USERSPACE
z_vrfy_k_thread_custom_data_set(void * data)183 static inline void z_vrfy_k_thread_custom_data_set(void *data)
184 {
185 	z_impl_k_thread_custom_data_set(data);
186 }
187 #include <syscalls/k_thread_custom_data_set_mrsh.c>
188 #endif
189 
z_impl_k_thread_custom_data_get(void)190 void *z_impl_k_thread_custom_data_get(void)
191 {
192 	return _current->custom_data;
193 }
194 
195 #ifdef CONFIG_USERSPACE
z_vrfy_k_thread_custom_data_get(void)196 static inline void *z_vrfy_k_thread_custom_data_get(void)
197 {
198 	return z_impl_k_thread_custom_data_get();
199 }
200 #include <syscalls/k_thread_custom_data_get_mrsh.c>
201 
202 #endif /* CONFIG_USERSPACE */
203 #endif /* CONFIG_THREAD_CUSTOM_DATA */
204 
205 #if defined(CONFIG_THREAD_MONITOR)
206 /*
207  * Remove a thread from the kernel's list of active threads.
208  */
z_thread_monitor_exit(struct k_thread * thread)209 void z_thread_monitor_exit(struct k_thread *thread)
210 {
211 	k_spinlock_key_t key = k_spin_lock(&z_thread_monitor_lock);
212 
213 	if (thread == _kernel.threads) {
214 		_kernel.threads = _kernel.threads->next_thread;
215 	} else {
216 		struct k_thread *prev_thread;
217 
218 		prev_thread = _kernel.threads;
219 		while ((prev_thread != NULL) &&
220 			(thread != prev_thread->next_thread)) {
221 			prev_thread = prev_thread->next_thread;
222 		}
223 		if (prev_thread != NULL) {
224 			prev_thread->next_thread = thread->next_thread;
225 		}
226 	}
227 
228 	k_spin_unlock(&z_thread_monitor_lock, key);
229 }
230 #endif
231 
z_impl_k_thread_name_set(struct k_thread * thread,const char * value)232 int z_impl_k_thread_name_set(struct k_thread *thread, const char *value)
233 {
234 #ifdef CONFIG_THREAD_NAME
235 	if (thread == NULL) {
236 		thread = _current;
237 	}
238 
239 	strncpy(thread->name, value, CONFIG_THREAD_MAX_NAME_LEN - 1);
240 	thread->name[CONFIG_THREAD_MAX_NAME_LEN - 1] = '\0';
241 
242 	SYS_PORT_TRACING_OBJ_FUNC(k_thread, name_set, thread, 0);
243 
244 	return 0;
245 #else
246 	ARG_UNUSED(thread);
247 	ARG_UNUSED(value);
248 
249 	SYS_PORT_TRACING_OBJ_FUNC(k_thread, name_set, thread, -ENOSYS);
250 
251 	return -ENOSYS;
252 #endif /* CONFIG_THREAD_NAME */
253 }
254 
255 #ifdef CONFIG_USERSPACE
z_vrfy_k_thread_name_set(struct k_thread * thread,const char * str)256 static inline int z_vrfy_k_thread_name_set(struct k_thread *thread, const char *str)
257 {
258 #ifdef CONFIG_THREAD_NAME
259 	char name[CONFIG_THREAD_MAX_NAME_LEN];
260 
261 	if (thread != NULL) {
262 		if (K_SYSCALL_OBJ(thread, K_OBJ_THREAD) != 0) {
263 			return -EINVAL;
264 		}
265 	}
266 
267 	/* In theory we could copy directly into thread->name, but
268 	 * the current z_vrfy / z_impl split does not provide a
269 	 * means of doing so.
270 	 */
271 	if (k_usermode_string_copy(name, (char *)str, sizeof(name)) != 0) {
272 		return -EFAULT;
273 	}
274 
275 	return z_impl_k_thread_name_set(thread, name);
276 #else
277 	return -ENOSYS;
278 #endif /* CONFIG_THREAD_NAME */
279 }
280 #include <syscalls/k_thread_name_set_mrsh.c>
281 #endif /* CONFIG_USERSPACE */
282 
k_thread_name_get(k_tid_t thread)283 const char *k_thread_name_get(k_tid_t thread)
284 {
285 #ifdef CONFIG_THREAD_NAME
286 	return (const char *)thread->name;
287 #else
288 	ARG_UNUSED(thread);
289 	return NULL;
290 #endif /* CONFIG_THREAD_NAME */
291 }
292 
z_impl_k_thread_name_copy(k_tid_t thread,char * buf,size_t size)293 int z_impl_k_thread_name_copy(k_tid_t thread, char *buf, size_t size)
294 {
295 #ifdef CONFIG_THREAD_NAME
296 	strncpy(buf, thread->name, size);
297 	return 0;
298 #else
299 	ARG_UNUSED(thread);
300 	ARG_UNUSED(buf);
301 	ARG_UNUSED(size);
302 	return -ENOSYS;
303 #endif /* CONFIG_THREAD_NAME */
304 }
305 
copy_bytes(char * dest,size_t dest_size,const char * src,size_t src_size)306 static size_t copy_bytes(char *dest, size_t dest_size, const char *src, size_t src_size)
307 {
308 	size_t  bytes_to_copy;
309 
310 	bytes_to_copy = MIN(dest_size, src_size);
311 	memcpy(dest, src, bytes_to_copy);
312 
313 	return bytes_to_copy;
314 }
315 
316 #define Z_STATE_STR_DUMMY       "dummy"
317 #define Z_STATE_STR_PENDING     "pending"
318 #define Z_STATE_STR_PRESTART    "prestart"
319 #define Z_STATE_STR_DEAD        "dead"
320 #define Z_STATE_STR_SUSPENDED   "suspended"
321 #define Z_STATE_STR_ABORTING    "aborting"
322 #define Z_STATE_STR_SUSPENDING  "suspending"
323 #define Z_STATE_STR_QUEUED      "queued"
324 
k_thread_state_str(k_tid_t thread_id,char * buf,size_t buf_size)325 const char *k_thread_state_str(k_tid_t thread_id, char *buf, size_t buf_size)
326 {
327 	size_t      off = 0;
328 	uint8_t     bit;
329 	uint8_t     thread_state = thread_id->base.thread_state;
330 	static const struct {
331 		const char *str;
332 		size_t      len;
333 	} state_string[] = {
334 		{ Z_STATE_STR_DUMMY, sizeof(Z_STATE_STR_DUMMY) - 1},
335 		{ Z_STATE_STR_PENDING, sizeof(Z_STATE_STR_PENDING) - 1},
336 		{ Z_STATE_STR_PRESTART, sizeof(Z_STATE_STR_PRESTART) - 1},
337 		{ Z_STATE_STR_DEAD, sizeof(Z_STATE_STR_DEAD) - 1},
338 		{ Z_STATE_STR_SUSPENDED, sizeof(Z_STATE_STR_SUSPENDED) - 1},
339 		{ Z_STATE_STR_ABORTING, sizeof(Z_STATE_STR_ABORTING) - 1},
340 		{ Z_STATE_STR_SUSPENDING, sizeof(Z_STATE_STR_SUSPENDING) - 1},
341 		{ Z_STATE_STR_QUEUED, sizeof(Z_STATE_STR_QUEUED) - 1},
342 	};
343 
344 	if ((buf == NULL) || (buf_size == 0)) {
345 		return "";
346 	}
347 
348 	buf_size--;   /* Reserve 1 byte for end-of-string character */
349 
350 	/*
351 	 * Loop through each bit in the thread_state. Stop once all have
352 	 * been processed. If more than one thread_state bit is set, then
353 	 * separate the descriptive strings with a '+'.
354 	 */
355 
356 
357 	for (unsigned int index = 0; thread_state != 0; index++) {
358 		bit = BIT(index);
359 		if ((thread_state & bit) == 0) {
360 			continue;
361 		}
362 
363 		off += copy_bytes(buf + off, buf_size - off,
364 				  state_string[index].str,
365 				  state_string[index].len);
366 
367 		thread_state &= ~bit;
368 
369 		if (thread_state != 0) {
370 			off += copy_bytes(buf + off, buf_size - off, "+", 1);
371 		}
372 	}
373 
374 	buf[off] = '\0';
375 
376 	return (const char *)buf;
377 }
378 
379 #ifdef CONFIG_USERSPACE
z_vrfy_k_thread_name_copy(k_tid_t thread,char * buf,size_t size)380 static inline int z_vrfy_k_thread_name_copy(k_tid_t thread,
381 					    char *buf, size_t size)
382 {
383 #ifdef CONFIG_THREAD_NAME
384 	size_t len;
385 	struct k_object *ko = k_object_find(thread);
386 
387 	/* Special case: we allow reading the names of initialized threads
388 	 * even if we don't have permission on them
389 	 */
390 	if (thread == NULL || ko->type != K_OBJ_THREAD ||
391 	    (ko->flags & K_OBJ_FLAG_INITIALIZED) == 0) {
392 		return -EINVAL;
393 	}
394 	if (K_SYSCALL_MEMORY_WRITE(buf, size) != 0) {
395 		return -EFAULT;
396 	}
397 	len = strlen(thread->name);
398 	if (len + 1 > size) {
399 		return -ENOSPC;
400 	}
401 
402 	return k_usermode_to_copy((void *)buf, thread->name, len + 1);
403 #else
404 	ARG_UNUSED(thread);
405 	ARG_UNUSED(buf);
406 	ARG_UNUSED(size);
407 	return -ENOSYS;
408 #endif /* CONFIG_THREAD_NAME */
409 }
410 #include <syscalls/k_thread_name_copy_mrsh.c>
411 #endif /* CONFIG_USERSPACE */
412 
413 
414 #ifdef CONFIG_MULTITHREADING
415 #ifdef CONFIG_STACK_SENTINEL
416 /* Check that the stack sentinel is still present
417  *
418  * The stack sentinel feature writes a magic value to the lowest 4 bytes of
419  * the thread's stack when the thread is initialized. This value gets checked
420  * in a few places:
421  *
422  * 1) In k_yield() if the current thread is not swapped out
423  * 2) After servicing a non-nested interrupt
424  * 3) In z_swap(), check the sentinel in the outgoing thread
425  *
426  * Item 2 requires support in arch/ code.
427  *
428  * If the check fails, the thread will be terminated appropriately through
429  * the system fatal error handler.
430  */
z_check_stack_sentinel(void)431 void z_check_stack_sentinel(void)
432 {
433 	uint32_t *stack;
434 
435 	if ((_current->base.thread_state & _THREAD_DUMMY) != 0) {
436 		return;
437 	}
438 
439 	stack = (uint32_t *)_current->stack_info.start;
440 	if (*stack != STACK_SENTINEL) {
441 		/* Restore it so further checks don't trigger this same error */
442 		*stack = STACK_SENTINEL;
443 		z_except_reason(K_ERR_STACK_CHK_FAIL);
444 	}
445 }
446 #endif /* CONFIG_STACK_SENTINEL */
447 
z_impl_k_thread_start(struct k_thread * thread)448 void z_impl_k_thread_start(struct k_thread *thread)
449 {
450 	SYS_PORT_TRACING_OBJ_FUNC(k_thread, start, thread);
451 
452 	z_sched_start(thread);
453 }
454 
455 #ifdef CONFIG_USERSPACE
z_vrfy_k_thread_start(struct k_thread * thread)456 static inline void z_vrfy_k_thread_start(struct k_thread *thread)
457 {
458 	K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
459 	return z_impl_k_thread_start(thread);
460 }
461 #include <syscalls/k_thread_start_mrsh.c>
462 #endif
463 #endif
464 
465 #ifdef CONFIG_MULTITHREADING
schedule_new_thread(struct k_thread * thread,k_timeout_t delay)466 static void schedule_new_thread(struct k_thread *thread, k_timeout_t delay)
467 {
468 #ifdef CONFIG_SYS_CLOCK_EXISTS
469 	if (K_TIMEOUT_EQ(delay, K_NO_WAIT)) {
470 		k_thread_start(thread);
471 	} else {
472 		z_add_thread_timeout(thread, delay);
473 	}
474 #else
475 	ARG_UNUSED(delay);
476 	k_thread_start(thread);
477 #endif
478 }
479 #endif
480 
481 #if CONFIG_STACK_POINTER_RANDOM
482 int z_stack_adjust_initialized;
483 
random_offset(size_t stack_size)484 static size_t random_offset(size_t stack_size)
485 {
486 	size_t random_val;
487 
488 	if (!z_stack_adjust_initialized) {
489 		z_early_rand_get((uint8_t *)&random_val, sizeof(random_val));
490 	} else {
491 		sys_rand_get((uint8_t *)&random_val, sizeof(random_val));
492 	}
493 
494 	/* Don't need to worry about alignment of the size here,
495 	 * arch_new_thread() is required to do it.
496 	 *
497 	 * FIXME: Not the best way to get a random number in a range.
498 	 * See #6493
499 	 */
500 	const size_t fuzz = random_val % CONFIG_STACK_POINTER_RANDOM;
501 
502 	if (unlikely(fuzz * 2 > stack_size)) {
503 		return 0;
504 	}
505 
506 	return fuzz;
507 }
508 #if defined(CONFIG_STACK_GROWS_UP)
509 	/* This is so rare not bothering for now */
510 #error "Stack pointer randomization not implemented for upward growing stacks"
511 #endif /* CONFIG_STACK_GROWS_UP */
512 #endif /* CONFIG_STACK_POINTER_RANDOM */
513 
setup_thread_stack(struct k_thread * new_thread,k_thread_stack_t * stack,size_t stack_size)514 static char *setup_thread_stack(struct k_thread *new_thread,
515 				k_thread_stack_t *stack, size_t stack_size)
516 {
517 	size_t stack_obj_size, stack_buf_size;
518 	char *stack_ptr, *stack_buf_start;
519 	size_t delta = 0;
520 
521 #ifdef CONFIG_USERSPACE
522 	if (z_stack_is_user_capable(stack)) {
523 		stack_obj_size = Z_THREAD_STACK_SIZE_ADJUST(stack_size);
524 		stack_buf_start = Z_THREAD_STACK_BUFFER(stack);
525 		stack_buf_size = stack_obj_size - K_THREAD_STACK_RESERVED;
526 	} else
527 #endif
528 	{
529 		/* Object cannot host a user mode thread */
530 		stack_obj_size = Z_KERNEL_STACK_SIZE_ADJUST(stack_size);
531 		stack_buf_start = Z_KERNEL_STACK_BUFFER(stack);
532 		stack_buf_size = stack_obj_size - K_KERNEL_STACK_RESERVED;
533 
534 		/* Zephyr treats stack overflow as an app bug.  But
535 		 * this particular overflow can be seen by static
536 		 * analysis so needs to be handled somehow.
537 		 */
538 		if (K_KERNEL_STACK_RESERVED > stack_obj_size) {
539 			k_panic();
540 		}
541 
542 	}
543 
544 	/* Initial stack pointer at the high end of the stack object, may
545 	 * be reduced later in this function by TLS or random offset
546 	 */
547 	stack_ptr = (char *)stack + stack_obj_size;
548 
549 	LOG_DBG("stack %p for thread %p: obj_size=%zu buf_start=%p "
550 		" buf_size %zu stack_ptr=%p",
551 		stack, new_thread, stack_obj_size, (void *)stack_buf_start,
552 		stack_buf_size, (void *)stack_ptr);
553 
554 #ifdef CONFIG_INIT_STACKS
555 	memset(stack_buf_start, 0xaa, stack_buf_size);
556 #endif
557 #ifdef CONFIG_STACK_SENTINEL
558 	/* Put the stack sentinel at the lowest 4 bytes of the stack area.
559 	 * We periodically check that it's still present and kill the thread
560 	 * if it isn't.
561 	 */
562 	*((uint32_t *)stack_buf_start) = STACK_SENTINEL;
563 #endif /* CONFIG_STACK_SENTINEL */
564 #ifdef CONFIG_THREAD_LOCAL_STORAGE
565 	/* TLS is always last within the stack buffer */
566 	delta += arch_tls_stack_setup(new_thread, stack_ptr);
567 #endif /* CONFIG_THREAD_LOCAL_STORAGE */
568 #ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA
569 	size_t tls_size = sizeof(struct _thread_userspace_local_data);
570 
571 	/* reserve space on highest memory of stack buffer for local data */
572 	delta += tls_size;
573 	new_thread->userspace_local_data =
574 		(struct _thread_userspace_local_data *)(stack_ptr - delta);
575 #endif
576 #if CONFIG_STACK_POINTER_RANDOM
577 	delta += random_offset(stack_buf_size);
578 #endif
579 	delta = ROUND_UP(delta, ARCH_STACK_PTR_ALIGN);
580 #ifdef CONFIG_THREAD_STACK_INFO
581 	/* Initial values. Arches which implement MPU guards that "borrow"
582 	 * memory from the stack buffer (not tracked in K_THREAD_STACK_RESERVED)
583 	 * will need to appropriately update this.
584 	 *
585 	 * The bounds tracked here correspond to the area of the stack object
586 	 * that the thread can access, which includes TLS.
587 	 */
588 	new_thread->stack_info.start = (uintptr_t)stack_buf_start;
589 	new_thread->stack_info.size = stack_buf_size;
590 	new_thread->stack_info.delta = delta;
591 #endif
592 	stack_ptr -= delta;
593 
594 	return stack_ptr;
595 }
596 
597 /*
598  * The provided stack_size value is presumed to be either the result of
599  * K_THREAD_STACK_SIZEOF(stack), or the size value passed to the instance
600  * of K_THREAD_STACK_DEFINE() which defined 'stack'.
601  */
z_setup_new_thread(struct k_thread * new_thread,k_thread_stack_t * stack,size_t stack_size,k_thread_entry_t entry,void * p1,void * p2,void * p3,int prio,uint32_t options,const char * name)602 char *z_setup_new_thread(struct k_thread *new_thread,
603 			 k_thread_stack_t *stack, size_t stack_size,
604 			 k_thread_entry_t entry,
605 			 void *p1, void *p2, void *p3,
606 			 int prio, uint32_t options, const char *name)
607 {
608 	char *stack_ptr;
609 
610 	Z_ASSERT_VALID_PRIO(prio, entry);
611 
612 #ifdef CONFIG_OBJ_CORE_THREAD
613 	k_obj_core_init_and_link(K_OBJ_CORE(new_thread), &obj_type_thread);
614 #ifdef CONFIG_OBJ_CORE_STATS_THREAD
615 	k_obj_core_stats_register(K_OBJ_CORE(new_thread),
616 				  &new_thread->base.usage,
617 				  sizeof(new_thread->base.usage));
618 #endif
619 #endif
620 
621 #ifdef CONFIG_USERSPACE
622 	__ASSERT((options & K_USER) == 0U || z_stack_is_user_capable(stack),
623 		 "user thread %p with kernel-only stack %p",
624 		 new_thread, stack);
625 	k_object_init(new_thread);
626 	k_object_init(stack);
627 	new_thread->stack_obj = stack;
628 	new_thread->syscall_frame = NULL;
629 
630 	/* Any given thread has access to itself */
631 	k_object_access_grant(new_thread, new_thread);
632 #endif
633 	z_waitq_init(&new_thread->join_queue);
634 
635 	/* Initialize various struct k_thread members */
636 	z_init_thread_base(&new_thread->base, prio, _THREAD_PRESTART, options);
637 	stack_ptr = setup_thread_stack(new_thread, stack, stack_size);
638 
639 #ifdef CONFIG_KERNEL_COHERENCE
640 	/* Check that the thread object is safe, but that the stack is
641 	 * still cached!
642 	 */
643 	__ASSERT_NO_MSG(arch_mem_coherent(new_thread));
644 
645 	/* When dynamic thread stack is available, the stack may come from
646 	 * uncached area.
647 	 */
648 #ifndef CONFIG_DYNAMIC_THREAD
649 	__ASSERT_NO_MSG(!arch_mem_coherent(stack));
650 #endif  /* CONFIG_DYNAMIC_THREAD */
651 
652 #endif
653 
654 	arch_new_thread(new_thread, stack, stack_ptr, entry, p1, p2, p3);
655 
656 	/* static threads overwrite it afterwards with real value */
657 	new_thread->init_data = NULL;
658 
659 #ifdef CONFIG_USE_SWITCH
660 	/* switch_handle must be non-null except when inside z_swap()
661 	 * for synchronization reasons.  Historically some notional
662 	 * USE_SWITCH architectures have actually ignored the field
663 	 */
664 	__ASSERT(new_thread->switch_handle != NULL,
665 		 "arch layer failed to initialize switch_handle");
666 #endif
667 #ifdef CONFIG_THREAD_CUSTOM_DATA
668 	/* Initialize custom data field (value is opaque to kernel) */
669 	new_thread->custom_data = NULL;
670 #endif
671 #ifdef CONFIG_EVENTS
672 	new_thread->no_wake_on_timeout = false;
673 #endif
674 #ifdef CONFIG_THREAD_MONITOR
675 	new_thread->entry.pEntry = entry;
676 	new_thread->entry.parameter1 = p1;
677 	new_thread->entry.parameter2 = p2;
678 	new_thread->entry.parameter3 = p3;
679 
680 	k_spinlock_key_t key = k_spin_lock(&z_thread_monitor_lock);
681 
682 	new_thread->next_thread = _kernel.threads;
683 	_kernel.threads = new_thread;
684 	k_spin_unlock(&z_thread_monitor_lock, key);
685 #endif
686 #ifdef CONFIG_THREAD_NAME
687 	if (name != NULL) {
688 		strncpy(new_thread->name, name,
689 			CONFIG_THREAD_MAX_NAME_LEN - 1);
690 		/* Ensure NULL termination, truncate if longer */
691 		new_thread->name[CONFIG_THREAD_MAX_NAME_LEN - 1] = '\0';
692 	} else {
693 		new_thread->name[0] = '\0';
694 	}
695 #endif
696 #ifdef CONFIG_SCHED_CPU_MASK
697 	if (IS_ENABLED(CONFIG_SCHED_CPU_MASK_PIN_ONLY)) {
698 		new_thread->base.cpu_mask = 1; /* must specify only one cpu */
699 	} else {
700 		new_thread->base.cpu_mask = -1; /* allow all cpus */
701 	}
702 #endif
703 #ifdef CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN
704 	/* _current may be null if the dummy thread is not used */
705 	if (!_current) {
706 		new_thread->resource_pool = NULL;
707 		return stack_ptr;
708 	}
709 #endif
710 #ifdef CONFIG_USERSPACE
711 	z_mem_domain_init_thread(new_thread);
712 
713 	if ((options & K_INHERIT_PERMS) != 0U) {
714 		k_thread_perms_inherit(_current, new_thread);
715 	}
716 #endif
717 #ifdef CONFIG_SCHED_DEADLINE
718 	new_thread->base.prio_deadline = 0;
719 #endif
720 	new_thread->resource_pool = _current->resource_pool;
721 
722 #ifdef CONFIG_SMP
723 	z_waitq_init(&new_thread->halt_queue);
724 #endif
725 
726 #ifdef CONFIG_SCHED_THREAD_USAGE
727 	new_thread->base.usage = (struct k_cycle_stats) {};
728 	new_thread->base.usage.track_usage =
729 		CONFIG_SCHED_THREAD_USAGE_AUTO_ENABLE;
730 #endif
731 
732 	SYS_PORT_TRACING_OBJ_FUNC(k_thread, create, new_thread);
733 
734 	return stack_ptr;
735 }
736 
737 #ifdef CONFIG_MULTITHREADING
z_impl_k_thread_create(struct k_thread * new_thread,k_thread_stack_t * stack,size_t stack_size,k_thread_entry_t entry,void * p1,void * p2,void * p3,int prio,uint32_t options,k_timeout_t delay)738 k_tid_t z_impl_k_thread_create(struct k_thread *new_thread,
739 			      k_thread_stack_t *stack,
740 			      size_t stack_size, k_thread_entry_t entry,
741 			      void *p1, void *p2, void *p3,
742 			      int prio, uint32_t options, k_timeout_t delay)
743 {
744 	__ASSERT(!arch_is_in_isr(), "Threads may not be created in ISRs");
745 
746 	z_setup_new_thread(new_thread, stack, stack_size, entry, p1, p2, p3,
747 			  prio, options, NULL);
748 
749 	if (!K_TIMEOUT_EQ(delay, K_FOREVER)) {
750 		schedule_new_thread(new_thread, delay);
751 	}
752 
753 	return new_thread;
754 }
755 
756 
757 #ifdef CONFIG_USERSPACE
z_stack_is_user_capable(k_thread_stack_t * stack)758 bool z_stack_is_user_capable(k_thread_stack_t *stack)
759 {
760 	return k_object_find(stack) != NULL;
761 }
762 
z_vrfy_k_thread_create(struct k_thread * new_thread,k_thread_stack_t * stack,size_t stack_size,k_thread_entry_t entry,void * p1,void * p2,void * p3,int prio,uint32_t options,k_timeout_t delay)763 k_tid_t z_vrfy_k_thread_create(struct k_thread *new_thread,
764 			       k_thread_stack_t *stack,
765 			       size_t stack_size, k_thread_entry_t entry,
766 			       void *p1, void *p2, void *p3,
767 			       int prio, uint32_t options, k_timeout_t delay)
768 {
769 	size_t total_size, stack_obj_size;
770 	struct k_object *stack_object;
771 
772 	/* The thread and stack objects *must* be in an uninitialized state */
773 	K_OOPS(K_SYSCALL_OBJ_NEVER_INIT(new_thread, K_OBJ_THREAD));
774 
775 	/* No need to check z_stack_is_user_capable(), it won't be in the
776 	 * object table if it isn't
777 	 */
778 	stack_object = k_object_find(stack);
779 	K_OOPS(K_SYSCALL_VERIFY_MSG(k_object_validation_check(stack_object, stack,
780 						K_OBJ_THREAD_STACK_ELEMENT,
781 						_OBJ_INIT_FALSE) == 0,
782 				    "bad stack object"));
783 
784 	/* Verify that the stack size passed in is OK by computing the total
785 	 * size and comparing it with the size value in the object metadata
786 	 */
787 	K_OOPS(K_SYSCALL_VERIFY_MSG(!size_add_overflow(K_THREAD_STACK_RESERVED,
788 						       stack_size, &total_size),
789 				    "stack size overflow (%zu+%zu)",
790 				    stack_size,
791 				    K_THREAD_STACK_RESERVED));
792 
793 	/* Testing less-than-or-equal since additional room may have been
794 	 * allocated for alignment constraints
795 	 */
796 #ifdef CONFIG_GEN_PRIV_STACKS
797 	stack_obj_size = stack_object->data.stack_data->size;
798 #else
799 	stack_obj_size = stack_object->data.stack_size;
800 #endif
801 	K_OOPS(K_SYSCALL_VERIFY_MSG(total_size <= stack_obj_size,
802 				    "stack size %zu is too big, max is %zu",
803 				    total_size, stack_obj_size));
804 
805 	/* User threads may only create other user threads and they can't
806 	 * be marked as essential
807 	 */
808 	K_OOPS(K_SYSCALL_VERIFY(options & K_USER));
809 	K_OOPS(K_SYSCALL_VERIFY(!(options & K_ESSENTIAL)));
810 
811 	/* Check validity of prio argument; must be the same or worse priority
812 	 * than the caller
813 	 */
814 	K_OOPS(K_SYSCALL_VERIFY(_is_valid_prio(prio, NULL)));
815 	K_OOPS(K_SYSCALL_VERIFY(z_is_prio_lower_or_equal(prio,
816 							_current->base.prio)));
817 
818 	z_setup_new_thread(new_thread, stack, stack_size,
819 			   entry, p1, p2, p3, prio, options, NULL);
820 
821 	if (!K_TIMEOUT_EQ(delay, K_FOREVER)) {
822 		schedule_new_thread(new_thread, delay);
823 	}
824 
825 	return new_thread;
826 }
827 #include <syscalls/k_thread_create_mrsh.c>
828 #endif /* CONFIG_USERSPACE */
829 #endif /* CONFIG_MULTITHREADING */
830 
831 #ifdef CONFIG_MULTITHREADING
832 #ifdef CONFIG_USERSPACE
833 
grant_static_access(void)834 static void grant_static_access(void)
835 {
836 	STRUCT_SECTION_FOREACH(k_object_assignment, pos) {
837 		for (int i = 0; pos->objects[i] != NULL; i++) {
838 			k_object_access_grant(pos->objects[i],
839 					      pos->thread);
840 		}
841 	}
842 }
843 #endif /* CONFIG_USERSPACE */
844 
z_init_static_threads(void)845 void z_init_static_threads(void)
846 {
847 	_FOREACH_STATIC_THREAD(thread_data) {
848 		z_setup_new_thread(
849 			thread_data->init_thread,
850 			thread_data->init_stack,
851 			thread_data->init_stack_size,
852 			thread_data->init_entry,
853 			thread_data->init_p1,
854 			thread_data->init_p2,
855 			thread_data->init_p3,
856 			thread_data->init_prio,
857 			thread_data->init_options,
858 			thread_data->init_name);
859 
860 		thread_data->init_thread->init_data = thread_data;
861 	}
862 
863 #ifdef CONFIG_USERSPACE
864 	grant_static_access();
865 #endif
866 
867 	/*
868 	 * Non-legacy static threads may be started immediately or
869 	 * after a previously specified delay. Even though the
870 	 * scheduler is locked, ticks can still be delivered and
871 	 * processed. Take a sched lock to prevent them from running
872 	 * until they are all started.
873 	 *
874 	 * Note that static threads defined using the legacy API have a
875 	 * delay of K_FOREVER.
876 	 */
877 	k_sched_lock();
878 	_FOREACH_STATIC_THREAD(thread_data) {
879 		k_timeout_t init_delay = Z_THREAD_INIT_DELAY(thread_data);
880 
881 		if (!K_TIMEOUT_EQ(init_delay, K_FOREVER)) {
882 			schedule_new_thread(thread_data->init_thread,
883 					    init_delay);
884 		}
885 	}
886 	k_sched_unlock();
887 }
888 #endif
889 
z_init_thread_base(struct _thread_base * thread_base,int priority,uint32_t initial_state,unsigned int options)890 void z_init_thread_base(struct _thread_base *thread_base, int priority,
891 		       uint32_t initial_state, unsigned int options)
892 {
893 	/* k_q_node is initialized upon first insertion in a list */
894 	thread_base->pended_on = NULL;
895 	thread_base->user_options = (uint8_t)options;
896 	thread_base->thread_state = (uint8_t)initial_state;
897 
898 	thread_base->prio = priority;
899 
900 	thread_base->sched_locked = 0U;
901 
902 #ifdef CONFIG_SMP
903 	thread_base->is_idle = 0;
904 #endif
905 
906 #ifdef CONFIG_TIMESLICE_PER_THREAD
907 	thread_base->slice_ticks = 0;
908 	thread_base->slice_expired = NULL;
909 #endif
910 
911 	/* swap_data does not need to be initialized */
912 
913 	z_init_thread_timeout(thread_base);
914 }
915 
k_thread_user_mode_enter(k_thread_entry_t entry,void * p1,void * p2,void * p3)916 FUNC_NORETURN void k_thread_user_mode_enter(k_thread_entry_t entry,
917 					    void *p1, void *p2, void *p3)
918 {
919 	SYS_PORT_TRACING_FUNC(k_thread, user_mode_enter);
920 
921 	_current->base.user_options |= K_USER;
922 	z_thread_essential_clear();
923 #ifdef CONFIG_THREAD_MONITOR
924 	_current->entry.pEntry = entry;
925 	_current->entry.parameter1 = p1;
926 	_current->entry.parameter2 = p2;
927 	_current->entry.parameter3 = p3;
928 #endif
929 #ifdef CONFIG_USERSPACE
930 	__ASSERT(z_stack_is_user_capable(_current->stack_obj),
931 		 "dropping to user mode with kernel-only stack object");
932 #ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA
933 	memset(_current->userspace_local_data, 0,
934 	       sizeof(struct _thread_userspace_local_data));
935 #endif
936 #ifdef CONFIG_THREAD_LOCAL_STORAGE
937 	arch_tls_stack_setup(_current,
938 			     (char *)(_current->stack_info.start +
939 				      _current->stack_info.size));
940 #endif
941 	arch_user_mode_enter(entry, p1, p2, p3);
942 #else
943 	/* XXX In this case we do not reset the stack */
944 	z_thread_entry(entry, p1, p2, p3);
945 #endif
946 }
947 
948 /* These spinlock assertion predicates are defined here because having
949  * them in spinlock.h is a giant header ordering headache.
950  */
951 #ifdef CONFIG_SPIN_VALIDATE
z_spin_lock_valid(struct k_spinlock * l)952 bool z_spin_lock_valid(struct k_spinlock *l)
953 {
954 	uintptr_t thread_cpu = l->thread_cpu;
955 
956 	if (thread_cpu != 0U) {
957 		if ((thread_cpu & 3U) == _current_cpu->id) {
958 			return false;
959 		}
960 	}
961 	return true;
962 }
963 
z_spin_unlock_valid(struct k_spinlock * l)964 bool z_spin_unlock_valid(struct k_spinlock *l)
965 {
966 	if (l->thread_cpu != (_current_cpu->id | (uintptr_t)_current)) {
967 		return false;
968 	}
969 	l->thread_cpu = 0;
970 	return true;
971 }
972 
z_spin_lock_set_owner(struct k_spinlock * l)973 void z_spin_lock_set_owner(struct k_spinlock *l)
974 {
975 	l->thread_cpu = _current_cpu->id | (uintptr_t)_current;
976 }
977 
978 #ifdef CONFIG_KERNEL_COHERENCE
z_spin_lock_mem_coherent(struct k_spinlock * l)979 bool z_spin_lock_mem_coherent(struct k_spinlock *l)
980 {
981 	return arch_mem_coherent((void *)l);
982 }
983 #endif /* CONFIG_KERNEL_COHERENCE */
984 
985 #endif /* CONFIG_SPIN_VALIDATE */
986 
z_impl_k_float_disable(struct k_thread * thread)987 int z_impl_k_float_disable(struct k_thread *thread)
988 {
989 #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
990 	return arch_float_disable(thread);
991 #else
992 	ARG_UNUSED(thread);
993 	return -ENOTSUP;
994 #endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
995 }
996 
z_impl_k_float_enable(struct k_thread * thread,unsigned int options)997 int z_impl_k_float_enable(struct k_thread *thread, unsigned int options)
998 {
999 #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
1000 	return arch_float_enable(thread, options);
1001 #else
1002 	ARG_UNUSED(thread);
1003 	ARG_UNUSED(options);
1004 	return -ENOTSUP;
1005 #endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
1006 }
1007 
1008 #ifdef CONFIG_USERSPACE
z_vrfy_k_float_disable(struct k_thread * thread)1009 static inline int z_vrfy_k_float_disable(struct k_thread *thread)
1010 {
1011 	K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
1012 	return z_impl_k_float_disable(thread);
1013 }
1014 #include <syscalls/k_float_disable_mrsh.c>
1015 #endif /* CONFIG_USERSPACE */
1016 
1017 #ifdef CONFIG_IRQ_OFFLOAD
1018 /* Make offload_sem visible outside under testing, in order to release
1019  * it outside when error happened.
1020  */
1021 K_SEM_DEFINE(offload_sem, 1, 1);
1022 
irq_offload(irq_offload_routine_t routine,const void * parameter)1023 void irq_offload(irq_offload_routine_t routine, const void *parameter)
1024 {
1025 #ifdef CONFIG_IRQ_OFFLOAD_NESTED
1026 	arch_irq_offload(routine, parameter);
1027 #else
1028 	k_sem_take(&offload_sem, K_FOREVER);
1029 	arch_irq_offload(routine, parameter);
1030 	k_sem_give(&offload_sem);
1031 #endif
1032 }
1033 #endif
1034 
1035 #if defined(CONFIG_INIT_STACKS) && defined(CONFIG_THREAD_STACK_INFO)
1036 #ifdef CONFIG_STACK_GROWS_UP
1037 #error "Unsupported configuration for stack analysis"
1038 #endif
1039 
z_stack_space_get(const uint8_t * stack_start,size_t size,size_t * unused_ptr)1040 int z_stack_space_get(const uint8_t *stack_start, size_t size, size_t *unused_ptr)
1041 {
1042 	size_t unused = 0;
1043 	const uint8_t *checked_stack = stack_start;
1044 	/* Take the address of any local variable as a shallow bound for the
1045 	 * stack pointer.  Addresses above it are guaranteed to be
1046 	 * accessible.
1047 	 */
1048 	const uint8_t *stack_pointer = (const uint8_t *)&stack_start;
1049 
1050 	/* If we are currently running on the stack being analyzed, some
1051 	 * memory management hardware will generate an exception if we
1052 	 * read unused stack memory.
1053 	 *
1054 	 * This never happens when invoked from user mode, as user mode
1055 	 * will always run this function on the privilege elevation stack.
1056 	 */
1057 	if ((stack_pointer > stack_start) && (stack_pointer <= (stack_start + size)) &&
1058 	    IS_ENABLED(CONFIG_NO_UNUSED_STACK_INSPECTION)) {
1059 		/* TODO: We could add an arch_ API call to temporarily
1060 		 * disable the stack checking in the CPU, but this would
1061 		 * need to be properly managed wrt context switches/interrupts
1062 		 */
1063 		return -ENOTSUP;
1064 	}
1065 
1066 	if (IS_ENABLED(CONFIG_STACK_SENTINEL)) {
1067 		/* First 4 bytes of the stack buffer reserved for the
1068 		 * sentinel value, it won't be 0xAAAAAAAA for thread
1069 		 * stacks.
1070 		 *
1071 		 * FIXME: thread->stack_info.start ought to reflect
1072 		 * this!
1073 		 */
1074 		checked_stack += 4;
1075 		size -= 4;
1076 	}
1077 
1078 	for (size_t i = 0; i < size; i++) {
1079 		if ((checked_stack[i]) == 0xaaU) {
1080 			unused++;
1081 		} else {
1082 			break;
1083 		}
1084 	}
1085 
1086 	*unused_ptr = unused;
1087 
1088 	return 0;
1089 }
1090 
z_impl_k_thread_stack_space_get(const struct k_thread * thread,size_t * unused_ptr)1091 int z_impl_k_thread_stack_space_get(const struct k_thread *thread,
1092 				    size_t *unused_ptr)
1093 {
1094 	return z_stack_space_get((const uint8_t *)thread->stack_info.start,
1095 				 thread->stack_info.size, unused_ptr);
1096 }
1097 
1098 #ifdef CONFIG_USERSPACE
z_vrfy_k_thread_stack_space_get(const struct k_thread * thread,size_t * unused_ptr)1099 int z_vrfy_k_thread_stack_space_get(const struct k_thread *thread,
1100 				    size_t *unused_ptr)
1101 {
1102 	size_t unused;
1103 	int ret;
1104 
1105 	ret = K_SYSCALL_OBJ(thread, K_OBJ_THREAD);
1106 	CHECKIF(ret != 0) {
1107 		return ret;
1108 	}
1109 
1110 	ret = z_impl_k_thread_stack_space_get(thread, &unused);
1111 	CHECKIF(ret != 0) {
1112 		return ret;
1113 	}
1114 
1115 	ret = k_usermode_to_copy(unused_ptr, &unused, sizeof(size_t));
1116 	CHECKIF(ret != 0) {
1117 		return ret;
1118 	}
1119 
1120 	return 0;
1121 }
1122 #include <syscalls/k_thread_stack_space_get_mrsh.c>
1123 #endif /* CONFIG_USERSPACE */
1124 #endif /* CONFIG_INIT_STACKS && CONFIG_THREAD_STACK_INFO */
1125 
1126 #ifdef CONFIG_USERSPACE
z_vrfy_k_thread_timeout_remaining_ticks(const struct k_thread * t)1127 static inline k_ticks_t z_vrfy_k_thread_timeout_remaining_ticks(
1128 						    const struct k_thread *t)
1129 {
1130 	K_OOPS(K_SYSCALL_OBJ(t, K_OBJ_THREAD));
1131 	return z_impl_k_thread_timeout_remaining_ticks(t);
1132 }
1133 #include <syscalls/k_thread_timeout_remaining_ticks_mrsh.c>
1134 
z_vrfy_k_thread_timeout_expires_ticks(const struct k_thread * t)1135 static inline k_ticks_t z_vrfy_k_thread_timeout_expires_ticks(
1136 						  const struct k_thread *t)
1137 {
1138 	K_OOPS(K_SYSCALL_OBJ(t, K_OBJ_THREAD));
1139 	return z_impl_k_thread_timeout_expires_ticks(t);
1140 }
1141 #include <syscalls/k_thread_timeout_expires_ticks_mrsh.c>
1142 #endif
1143 
1144 #ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING
z_thread_mark_switched_in(void)1145 void z_thread_mark_switched_in(void)
1146 {
1147 #if defined(CONFIG_SCHED_THREAD_USAGE) && !defined(CONFIG_USE_SWITCH)
1148 	z_sched_usage_start(_current);
1149 #endif
1150 
1151 #ifdef CONFIG_TRACING
1152 	SYS_PORT_TRACING_FUNC(k_thread, switched_in);
1153 #endif
1154 }
1155 
z_thread_mark_switched_out(void)1156 void z_thread_mark_switched_out(void)
1157 {
1158 #if defined(CONFIG_SCHED_THREAD_USAGE) && !defined(CONFIG_USE_SWITCH)
1159 	z_sched_usage_stop();
1160 #endif
1161 
1162 #ifdef CONFIG_TRACING
1163 #ifdef CONFIG_THREAD_LOCAL_STORAGE
1164 	/* Dummy thread won't have TLS set up to run arbitrary code */
1165 	if (!_current_cpu->current ||
1166 	    (_current_cpu->current->base.thread_state & _THREAD_DUMMY) != 0)
1167 		return;
1168 #endif
1169 	SYS_PORT_TRACING_FUNC(k_thread, switched_out);
1170 #endif
1171 }
1172 #endif /* CONFIG_INSTRUMENT_THREAD_SWITCHING */
1173 
k_thread_runtime_stats_get(k_tid_t thread,k_thread_runtime_stats_t * stats)1174 int k_thread_runtime_stats_get(k_tid_t thread,
1175 			       k_thread_runtime_stats_t *stats)
1176 {
1177 	if ((thread == NULL) || (stats == NULL)) {
1178 		return -EINVAL;
1179 	}
1180 
1181 #ifdef CONFIG_SCHED_THREAD_USAGE
1182 	z_sched_thread_usage(thread, stats);
1183 #else
1184 	*stats = (k_thread_runtime_stats_t) {};
1185 #endif
1186 
1187 	return 0;
1188 }
1189 
k_thread_runtime_stats_all_get(k_thread_runtime_stats_t * stats)1190 int k_thread_runtime_stats_all_get(k_thread_runtime_stats_t *stats)
1191 {
1192 #ifdef CONFIG_SCHED_THREAD_USAGE_ALL
1193 	k_thread_runtime_stats_t  tmp_stats;
1194 #endif
1195 
1196 	if (stats == NULL) {
1197 		return -EINVAL;
1198 	}
1199 
1200 	*stats = (k_thread_runtime_stats_t) {};
1201 
1202 #ifdef CONFIG_SCHED_THREAD_USAGE_ALL
1203 	/* Retrieve the usage stats for each core and amalgamate them. */
1204 
1205 	unsigned int num_cpus = arch_num_cpus();
1206 
1207 	for (uint8_t i = 0; i < num_cpus; i++) {
1208 		z_sched_cpu_usage(i, &tmp_stats);
1209 
1210 		stats->execution_cycles += tmp_stats.execution_cycles;
1211 		stats->total_cycles     += tmp_stats.total_cycles;
1212 #ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
1213 		stats->current_cycles   += tmp_stats.current_cycles;
1214 		stats->peak_cycles      += tmp_stats.peak_cycles;
1215 		stats->average_cycles   += tmp_stats.average_cycles;
1216 #endif
1217 		stats->idle_cycles      += tmp_stats.idle_cycles;
1218 	}
1219 #endif
1220 
1221 	return 0;
1222 }
1223