1 /*
2 * Copyright (c) 2010-2014 Wind River Systems, Inc.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 /**
8 * @file
9 * @brief Kernel thread support
10 *
11 * This module provides general purpose thread support.
12 */
13
14 #include <zephyr/kernel.h>
15 #include <zephyr/spinlock.h>
16 #include <zephyr/sys/math_extras.h>
17 #include <zephyr/sys_clock.h>
18 #include <ksched.h>
19 #include <wait_q.h>
20 #include <zephyr/syscall_handler.h>
21 #include <kernel_internal.h>
22 #include <kswap.h>
23 #include <zephyr/init.h>
24 #include <zephyr/tracing/tracing.h>
25 #include <string.h>
26 #include <stdbool.h>
27 #include <zephyr/irq_offload.h>
28 #include <zephyr/sys/check.h>
29 #include <zephyr/random/random.h>
30 #include <zephyr/sys/atomic.h>
31 #include <zephyr/logging/log.h>
32 #include <zephyr/sys/iterable_sections.h>
33
34 LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
35
36 #ifdef CONFIG_OBJ_CORE_THREAD
37 static struct k_obj_type obj_type_thread;
38
39 #ifdef CONFIG_OBJ_CORE_STATS_THREAD
40 static struct k_obj_core_stats_desc thread_stats_desc = {
41 .raw_size = sizeof(struct k_cycle_stats),
42 .query_size = sizeof(struct k_thread_runtime_stats),
43 .raw = z_thread_stats_raw,
44 .query = z_thread_stats_query,
45 .reset = z_thread_stats_reset,
46 .disable = z_thread_stats_disable,
47 .enable = z_thread_stats_enable,
48 };
49 #endif
50
init_thread_obj_core_list(void)51 static int init_thread_obj_core_list(void)
52 {
53 /* Initialize mem_slab object type */
54
55 #ifdef CONFIG_OBJ_CORE_THREAD
56 z_obj_type_init(&obj_type_thread, K_OBJ_TYPE_THREAD_ID,
57 offsetof(struct k_thread, obj_core));
58 #endif
59
60 #ifdef CONFIG_OBJ_CORE_STATS_THREAD
61 k_obj_type_stats_init(&obj_type_thread, &thread_stats_desc);
62 #endif
63
64 return 0;
65 }
66
67 SYS_INIT(init_thread_obj_core_list, PRE_KERNEL_1,
68 CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
69 #endif
70
71 #ifdef CONFIG_THREAD_MONITOR
72 /* This lock protects the linked list of active threads; i.e. the
73 * initial _kernel.threads pointer and the linked list made up of
74 * thread->next_thread (until NULL)
75 */
76 static struct k_spinlock z_thread_monitor_lock;
77 #endif /* CONFIG_THREAD_MONITOR */
78
79 #define _FOREACH_STATIC_THREAD(thread_data) \
80 STRUCT_SECTION_FOREACH(_static_thread_data, thread_data)
81
k_thread_foreach(k_thread_user_cb_t user_cb,void * user_data)82 void k_thread_foreach(k_thread_user_cb_t user_cb, void *user_data)
83 {
84 #if defined(CONFIG_THREAD_MONITOR)
85 struct k_thread *thread;
86 k_spinlock_key_t key;
87
88 __ASSERT(user_cb != NULL, "user_cb can not be NULL");
89
90 /*
91 * Lock is needed to make sure that the _kernel.threads is not being
92 * modified by the user_cb either directly or indirectly.
93 * The indirect ways are through calling k_thread_create and
94 * k_thread_abort from user_cb.
95 */
96 key = k_spin_lock(&z_thread_monitor_lock);
97
98 SYS_PORT_TRACING_FUNC_ENTER(k_thread, foreach);
99
100 for (thread = _kernel.threads; thread; thread = thread->next_thread) {
101 user_cb(thread, user_data);
102 }
103
104 SYS_PORT_TRACING_FUNC_EXIT(k_thread, foreach);
105
106 k_spin_unlock(&z_thread_monitor_lock, key);
107 #else
108 ARG_UNUSED(user_cb);
109 ARG_UNUSED(user_data);
110 #endif
111 }
112
k_thread_foreach_unlocked(k_thread_user_cb_t user_cb,void * user_data)113 void k_thread_foreach_unlocked(k_thread_user_cb_t user_cb, void *user_data)
114 {
115 #if defined(CONFIG_THREAD_MONITOR)
116 struct k_thread *thread;
117 k_spinlock_key_t key;
118
119 __ASSERT(user_cb != NULL, "user_cb can not be NULL");
120
121 key = k_spin_lock(&z_thread_monitor_lock);
122
123 SYS_PORT_TRACING_FUNC_ENTER(k_thread, foreach_unlocked);
124
125 for (thread = _kernel.threads; thread; thread = thread->next_thread) {
126 k_spin_unlock(&z_thread_monitor_lock, key);
127 user_cb(thread, user_data);
128 key = k_spin_lock(&z_thread_monitor_lock);
129 }
130
131 SYS_PORT_TRACING_FUNC_EXIT(k_thread, foreach_unlocked);
132
133 k_spin_unlock(&z_thread_monitor_lock, key);
134 #else
135 ARG_UNUSED(user_cb);
136 ARG_UNUSED(user_data);
137 #endif
138 }
139
k_is_in_isr(void)140 bool k_is_in_isr(void)
141 {
142 return arch_is_in_isr();
143 }
144
145 /*
146 * This function tags the current thread as essential to system operation.
147 * Exceptions raised by this thread will be treated as a fatal system error.
148 */
z_thread_essential_set(void)149 void z_thread_essential_set(void)
150 {
151 _current->base.user_options |= K_ESSENTIAL;
152 }
153
154 /*
155 * This function tags the current thread as not essential to system operation.
156 * Exceptions raised by this thread may be recoverable.
157 * (This is the default tag for a thread.)
158 */
z_thread_essential_clear(void)159 void z_thread_essential_clear(void)
160 {
161 _current->base.user_options &= ~K_ESSENTIAL;
162 }
163
164 /*
165 * This routine indicates if the current thread is an essential system thread.
166 *
167 * Returns true if current thread is essential, false if it is not.
168 */
z_is_thread_essential(void)169 bool z_is_thread_essential(void)
170 {
171 return (_current->base.user_options & K_ESSENTIAL) == K_ESSENTIAL;
172 }
173
174 #ifdef CONFIG_THREAD_CUSTOM_DATA
z_impl_k_thread_custom_data_set(void * value)175 void z_impl_k_thread_custom_data_set(void *value)
176 {
177 _current->custom_data = value;
178 }
179
180 #ifdef CONFIG_USERSPACE
z_vrfy_k_thread_custom_data_set(void * data)181 static inline void z_vrfy_k_thread_custom_data_set(void *data)
182 {
183 z_impl_k_thread_custom_data_set(data);
184 }
185 #include <syscalls/k_thread_custom_data_set_mrsh.c>
186 #endif
187
z_impl_k_thread_custom_data_get(void)188 void *z_impl_k_thread_custom_data_get(void)
189 {
190 return _current->custom_data;
191 }
192
193 #ifdef CONFIG_USERSPACE
z_vrfy_k_thread_custom_data_get(void)194 static inline void *z_vrfy_k_thread_custom_data_get(void)
195 {
196 return z_impl_k_thread_custom_data_get();
197 }
198 #include <syscalls/k_thread_custom_data_get_mrsh.c>
199
200 #endif /* CONFIG_USERSPACE */
201 #endif /* CONFIG_THREAD_CUSTOM_DATA */
202
203 #if defined(CONFIG_THREAD_MONITOR)
204 /*
205 * Remove a thread from the kernel's list of active threads.
206 */
z_thread_monitor_exit(struct k_thread * thread)207 void z_thread_monitor_exit(struct k_thread *thread)
208 {
209 k_spinlock_key_t key = k_spin_lock(&z_thread_monitor_lock);
210
211 if (thread == _kernel.threads) {
212 _kernel.threads = _kernel.threads->next_thread;
213 } else {
214 struct k_thread *prev_thread;
215
216 prev_thread = _kernel.threads;
217 while ((prev_thread != NULL) &&
218 (thread != prev_thread->next_thread)) {
219 prev_thread = prev_thread->next_thread;
220 }
221 if (prev_thread != NULL) {
222 prev_thread->next_thread = thread->next_thread;
223 }
224 }
225
226 k_spin_unlock(&z_thread_monitor_lock, key);
227 }
228 #endif
229
z_impl_k_thread_name_set(struct k_thread * thread,const char * value)230 int z_impl_k_thread_name_set(struct k_thread *thread, const char *value)
231 {
232 #ifdef CONFIG_THREAD_NAME
233 if (thread == NULL) {
234 thread = _current;
235 }
236
237 strncpy(thread->name, value, CONFIG_THREAD_MAX_NAME_LEN - 1);
238 thread->name[CONFIG_THREAD_MAX_NAME_LEN - 1] = '\0';
239
240 SYS_PORT_TRACING_OBJ_FUNC(k_thread, name_set, thread, 0);
241
242 return 0;
243 #else
244 ARG_UNUSED(thread);
245 ARG_UNUSED(value);
246
247 SYS_PORT_TRACING_OBJ_FUNC(k_thread, name_set, thread, -ENOSYS);
248
249 return -ENOSYS;
250 #endif /* CONFIG_THREAD_NAME */
251 }
252
253 #ifdef CONFIG_USERSPACE
z_vrfy_k_thread_name_set(struct k_thread * thread,const char * str)254 static inline int z_vrfy_k_thread_name_set(struct k_thread *thread, const char *str)
255 {
256 #ifdef CONFIG_THREAD_NAME
257 char name[CONFIG_THREAD_MAX_NAME_LEN];
258
259 if (thread != NULL) {
260 if (Z_SYSCALL_OBJ(thread, K_OBJ_THREAD) != 0) {
261 return -EINVAL;
262 }
263 }
264
265 /* In theory we could copy directly into thread->name, but
266 * the current z_vrfy / z_impl split does not provide a
267 * means of doing so.
268 */
269 if (z_user_string_copy(name, (char *)str, sizeof(name)) != 0) {
270 return -EFAULT;
271 }
272
273 return z_impl_k_thread_name_set(thread, name);
274 #else
275 return -ENOSYS;
276 #endif /* CONFIG_THREAD_NAME */
277 }
278 #include <syscalls/k_thread_name_set_mrsh.c>
279 #endif /* CONFIG_USERSPACE */
280
k_thread_name_get(struct k_thread * thread)281 const char *k_thread_name_get(struct k_thread *thread)
282 {
283 #ifdef CONFIG_THREAD_NAME
284 return (const char *)thread->name;
285 #else
286 ARG_UNUSED(thread);
287 return NULL;
288 #endif /* CONFIG_THREAD_NAME */
289 }
290
z_impl_k_thread_name_copy(k_tid_t thread,char * buf,size_t size)291 int z_impl_k_thread_name_copy(k_tid_t thread, char *buf, size_t size)
292 {
293 #ifdef CONFIG_THREAD_NAME
294 strncpy(buf, thread->name, size);
295 return 0;
296 #else
297 ARG_UNUSED(thread);
298 ARG_UNUSED(buf);
299 ARG_UNUSED(size);
300 return -ENOSYS;
301 #endif /* CONFIG_THREAD_NAME */
302 }
303
copy_bytes(char * dest,size_t dest_size,const char * src,size_t src_size)304 static size_t copy_bytes(char *dest, size_t dest_size, const char *src, size_t src_size)
305 {
306 size_t bytes_to_copy;
307
308 bytes_to_copy = MIN(dest_size, src_size);
309 memcpy(dest, src, bytes_to_copy);
310
311 return bytes_to_copy;
312 }
313
k_thread_state_str(k_tid_t thread_id,char * buf,size_t buf_size)314 const char *k_thread_state_str(k_tid_t thread_id, char *buf, size_t buf_size)
315 {
316 size_t off = 0;
317 uint8_t bit;
318 uint8_t thread_state = thread_id->base.thread_state;
319 static const char *states_str[8] = {"dummy", "pending", "prestart",
320 "dead", "suspended", "aborting",
321 "", "queued"};
322 static const size_t states_sz[8] = {5, 7, 8, 4, 9, 8, 0, 6};
323
324 if ((buf == NULL) || (buf_size == 0)) {
325 return "";
326 }
327
328 buf_size--; /* Reserve 1 byte for end-of-string character */
329
330 /*
331 * Loop through each bit in the thread_state. Stop once all have
332 * been processed. If more than one thread_state bit is set, then
333 * separate the descriptive strings with a '+'.
334 */
335
336 for (uint8_t index = 0; thread_state != 0; index++) {
337 bit = BIT(index);
338 if ((thread_state & bit) == 0) {
339 continue;
340 }
341
342 off += copy_bytes(buf + off, buf_size - off,
343 states_str[index], states_sz[index]);
344
345 thread_state &= ~bit;
346
347 if (thread_state != 0) {
348 off += copy_bytes(buf + off, buf_size - off, "+", 1);
349 }
350 }
351
352 buf[off] = '\0';
353
354 return (const char *)buf;
355 }
356
357 #ifdef CONFIG_USERSPACE
z_vrfy_k_thread_name_copy(k_tid_t thread,char * buf,size_t size)358 static inline int z_vrfy_k_thread_name_copy(k_tid_t thread,
359 char *buf, size_t size)
360 {
361 #ifdef CONFIG_THREAD_NAME
362 size_t len;
363 struct z_object *ko = z_object_find(thread);
364
365 /* Special case: we allow reading the names of initialized threads
366 * even if we don't have permission on them
367 */
368 if (thread == NULL || ko->type != K_OBJ_THREAD ||
369 (ko->flags & K_OBJ_FLAG_INITIALIZED) == 0) {
370 return -EINVAL;
371 }
372 if (Z_SYSCALL_MEMORY_WRITE(buf, size) != 0) {
373 return -EFAULT;
374 }
375 len = strlen(thread->name);
376 if (len + 1 > size) {
377 return -ENOSPC;
378 }
379
380 return z_user_to_copy((void *)buf, thread->name, len + 1);
381 #else
382 ARG_UNUSED(thread);
383 ARG_UNUSED(buf);
384 ARG_UNUSED(size);
385 return -ENOSYS;
386 #endif /* CONFIG_THREAD_NAME */
387 }
388 #include <syscalls/k_thread_name_copy_mrsh.c>
389 #endif /* CONFIG_USERSPACE */
390
391
392 #ifdef CONFIG_MULTITHREADING
393 #ifdef CONFIG_STACK_SENTINEL
394 /* Check that the stack sentinel is still present
395 *
396 * The stack sentinel feature writes a magic value to the lowest 4 bytes of
397 * the thread's stack when the thread is initialized. This value gets checked
398 * in a few places:
399 *
400 * 1) In k_yield() if the current thread is not swapped out
401 * 2) After servicing a non-nested interrupt
402 * 3) In z_swap(), check the sentinel in the outgoing thread
403 *
404 * Item 2 requires support in arch/ code.
405 *
406 * If the check fails, the thread will be terminated appropriately through
407 * the system fatal error handler.
408 */
z_check_stack_sentinel(void)409 void z_check_stack_sentinel(void)
410 {
411 uint32_t *stack;
412
413 if ((_current->base.thread_state & _THREAD_DUMMY) != 0) {
414 return;
415 }
416
417 stack = (uint32_t *)_current->stack_info.start;
418 if (*stack != STACK_SENTINEL) {
419 /* Restore it so further checks don't trigger this same error */
420 *stack = STACK_SENTINEL;
421 z_except_reason(K_ERR_STACK_CHK_FAIL);
422 }
423 }
424 #endif /* CONFIG_STACK_SENTINEL */
425
z_impl_k_thread_start(struct k_thread * thread)426 void z_impl_k_thread_start(struct k_thread *thread)
427 {
428 SYS_PORT_TRACING_OBJ_FUNC(k_thread, start, thread);
429
430 z_sched_start(thread);
431 }
432
433 #ifdef CONFIG_USERSPACE
z_vrfy_k_thread_start(struct k_thread * thread)434 static inline void z_vrfy_k_thread_start(struct k_thread *thread)
435 {
436 Z_OOPS(Z_SYSCALL_OBJ(thread, K_OBJ_THREAD));
437 return z_impl_k_thread_start(thread);
438 }
439 #include <syscalls/k_thread_start_mrsh.c>
440 #endif
441 #endif
442
443 #ifdef CONFIG_MULTITHREADING
schedule_new_thread(struct k_thread * thread,k_timeout_t delay)444 static void schedule_new_thread(struct k_thread *thread, k_timeout_t delay)
445 {
446 #ifdef CONFIG_SYS_CLOCK_EXISTS
447 if (K_TIMEOUT_EQ(delay, K_NO_WAIT)) {
448 k_thread_start(thread);
449 } else {
450 z_add_thread_timeout(thread, delay);
451 }
452 #else
453 ARG_UNUSED(delay);
454 k_thread_start(thread);
455 #endif
456 }
457 #endif
458
459 #if CONFIG_STACK_POINTER_RANDOM
460 int z_stack_adjust_initialized;
461
random_offset(size_t stack_size)462 static size_t random_offset(size_t stack_size)
463 {
464 size_t random_val;
465
466 if (!z_stack_adjust_initialized) {
467 z_early_rand_get((uint8_t *)&random_val, sizeof(random_val));
468 } else {
469 sys_rand_get((uint8_t *)&random_val, sizeof(random_val));
470 }
471
472 /* Don't need to worry about alignment of the size here,
473 * arch_new_thread() is required to do it.
474 *
475 * FIXME: Not the best way to get a random number in a range.
476 * See #6493
477 */
478 const size_t fuzz = random_val % CONFIG_STACK_POINTER_RANDOM;
479
480 if (unlikely(fuzz * 2 > stack_size)) {
481 return 0;
482 }
483
484 return fuzz;
485 }
486 #if defined(CONFIG_STACK_GROWS_UP)
487 /* This is so rare not bothering for now */
488 #error "Stack pointer randomization not implemented for upward growing stacks"
489 #endif /* CONFIG_STACK_GROWS_UP */
490 #endif /* CONFIG_STACK_POINTER_RANDOM */
491
setup_thread_stack(struct k_thread * new_thread,k_thread_stack_t * stack,size_t stack_size)492 static char *setup_thread_stack(struct k_thread *new_thread,
493 k_thread_stack_t *stack, size_t stack_size)
494 {
495 size_t stack_obj_size, stack_buf_size;
496 char *stack_ptr, *stack_buf_start;
497 size_t delta = 0;
498
499 #ifdef CONFIG_USERSPACE
500 if (z_stack_is_user_capable(stack)) {
501 stack_obj_size = Z_THREAD_STACK_SIZE_ADJUST(stack_size);
502 stack_buf_start = Z_THREAD_STACK_BUFFER(stack);
503 stack_buf_size = stack_obj_size - K_THREAD_STACK_RESERVED;
504 } else
505 #endif
506 {
507 /* Object cannot host a user mode thread */
508 stack_obj_size = Z_KERNEL_STACK_SIZE_ADJUST(stack_size);
509 stack_buf_start = Z_KERNEL_STACK_BUFFER(stack);
510 stack_buf_size = stack_obj_size - K_KERNEL_STACK_RESERVED;
511 }
512
513 /* Initial stack pointer at the high end of the stack object, may
514 * be reduced later in this function by TLS or random offset
515 */
516 stack_ptr = (char *)stack + stack_obj_size;
517
518 LOG_DBG("stack %p for thread %p: obj_size=%zu buf_start=%p "
519 " buf_size %zu stack_ptr=%p",
520 stack, new_thread, stack_obj_size, (void *)stack_buf_start,
521 stack_buf_size, (void *)stack_ptr);
522
523 #ifdef CONFIG_INIT_STACKS
524 memset(stack_buf_start, 0xaa, stack_buf_size);
525 #endif
526 #ifdef CONFIG_STACK_SENTINEL
527 /* Put the stack sentinel at the lowest 4 bytes of the stack area.
528 * We periodically check that it's still present and kill the thread
529 * if it isn't.
530 */
531 *((uint32_t *)stack_buf_start) = STACK_SENTINEL;
532 #endif /* CONFIG_STACK_SENTINEL */
533 #ifdef CONFIG_THREAD_LOCAL_STORAGE
534 /* TLS is always last within the stack buffer */
535 delta += arch_tls_stack_setup(new_thread, stack_ptr);
536 #endif /* CONFIG_THREAD_LOCAL_STORAGE */
537 #ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA
538 size_t tls_size = sizeof(struct _thread_userspace_local_data);
539
540 /* reserve space on highest memory of stack buffer for local data */
541 delta += tls_size;
542 new_thread->userspace_local_data =
543 (struct _thread_userspace_local_data *)(stack_ptr - delta);
544 #endif
545 #if CONFIG_STACK_POINTER_RANDOM
546 delta += random_offset(stack_buf_size);
547 #endif
548 delta = ROUND_UP(delta, ARCH_STACK_PTR_ALIGN);
549 #ifdef CONFIG_THREAD_STACK_INFO
550 /* Initial values. Arches which implement MPU guards that "borrow"
551 * memory from the stack buffer (not tracked in K_THREAD_STACK_RESERVED)
552 * will need to appropriately update this.
553 *
554 * The bounds tracked here correspond to the area of the stack object
555 * that the thread can access, which includes TLS.
556 */
557 new_thread->stack_info.start = (uintptr_t)stack_buf_start;
558 new_thread->stack_info.size = stack_buf_size;
559 new_thread->stack_info.delta = delta;
560 #endif
561 stack_ptr -= delta;
562
563 return stack_ptr;
564 }
565
566 /*
567 * The provided stack_size value is presumed to be either the result of
568 * K_THREAD_STACK_SIZEOF(stack), or the size value passed to the instance
569 * of K_THREAD_STACK_DEFINE() which defined 'stack'.
570 */
z_setup_new_thread(struct k_thread * new_thread,k_thread_stack_t * stack,size_t stack_size,k_thread_entry_t entry,void * p1,void * p2,void * p3,int prio,uint32_t options,const char * name)571 char *z_setup_new_thread(struct k_thread *new_thread,
572 k_thread_stack_t *stack, size_t stack_size,
573 k_thread_entry_t entry,
574 void *p1, void *p2, void *p3,
575 int prio, uint32_t options, const char *name)
576 {
577 char *stack_ptr;
578
579 Z_ASSERT_VALID_PRIO(prio, entry);
580
581 #ifdef CONFIG_OBJ_CORE_THREAD
582 k_obj_core_init_and_link(K_OBJ_CORE(new_thread), &obj_type_thread);
583 #ifdef CONFIG_OBJ_CORE_STATS_THREAD
584 k_obj_core_stats_register(K_OBJ_CORE(new_thread),
585 &new_thread->base.usage,
586 sizeof(new_thread->base.usage));
587 #endif
588 #endif
589
590 #ifdef CONFIG_USERSPACE
591 __ASSERT((options & K_USER) == 0U || z_stack_is_user_capable(stack),
592 "user thread %p with kernel-only stack %p",
593 new_thread, stack);
594 z_object_init(new_thread);
595 z_object_init(stack);
596 new_thread->stack_obj = stack;
597 new_thread->syscall_frame = NULL;
598
599 /* Any given thread has access to itself */
600 k_object_access_grant(new_thread, new_thread);
601 #endif
602 z_waitq_init(&new_thread->join_queue);
603
604 /* Initialize various struct k_thread members */
605 z_init_thread_base(&new_thread->base, prio, _THREAD_PRESTART, options);
606 stack_ptr = setup_thread_stack(new_thread, stack, stack_size);
607
608 #ifdef CONFIG_KERNEL_COHERENCE
609 /* Check that the thread object is safe, but that the stack is
610 * still cached!
611 */
612 __ASSERT_NO_MSG(arch_mem_coherent(new_thread));
613 __ASSERT_NO_MSG(!arch_mem_coherent(stack));
614 #endif
615
616 arch_new_thread(new_thread, stack, stack_ptr, entry, p1, p2, p3);
617
618 /* static threads overwrite it afterwards with real value */
619 new_thread->init_data = NULL;
620
621 #ifdef CONFIG_USE_SWITCH
622 /* switch_handle must be non-null except when inside z_swap()
623 * for synchronization reasons. Historically some notional
624 * USE_SWITCH architectures have actually ignored the field
625 */
626 __ASSERT(new_thread->switch_handle != NULL,
627 "arch layer failed to initialize switch_handle");
628 #endif
629 #ifdef CONFIG_THREAD_CUSTOM_DATA
630 /* Initialize custom data field (value is opaque to kernel) */
631 new_thread->custom_data = NULL;
632 #endif
633 #ifdef CONFIG_EVENTS
634 new_thread->no_wake_on_timeout = false;
635 #endif
636 #ifdef CONFIG_THREAD_MONITOR
637 new_thread->entry.pEntry = entry;
638 new_thread->entry.parameter1 = p1;
639 new_thread->entry.parameter2 = p2;
640 new_thread->entry.parameter3 = p3;
641
642 k_spinlock_key_t key = k_spin_lock(&z_thread_monitor_lock);
643
644 new_thread->next_thread = _kernel.threads;
645 _kernel.threads = new_thread;
646 k_spin_unlock(&z_thread_monitor_lock, key);
647 #endif
648 #ifdef CONFIG_THREAD_NAME
649 if (name != NULL) {
650 strncpy(new_thread->name, name,
651 CONFIG_THREAD_MAX_NAME_LEN - 1);
652 /* Ensure NULL termination, truncate if longer */
653 new_thread->name[CONFIG_THREAD_MAX_NAME_LEN - 1] = '\0';
654 } else {
655 new_thread->name[0] = '\0';
656 }
657 #endif
658 #ifdef CONFIG_SCHED_CPU_MASK
659 if (IS_ENABLED(CONFIG_SCHED_CPU_MASK_PIN_ONLY)) {
660 new_thread->base.cpu_mask = 1; /* must specify only one cpu */
661 } else {
662 new_thread->base.cpu_mask = -1; /* allow all cpus */
663 }
664 #endif
665 #ifdef CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN
666 /* _current may be null if the dummy thread is not used */
667 if (!_current) {
668 new_thread->resource_pool = NULL;
669 return stack_ptr;
670 }
671 #endif
672 #ifdef CONFIG_USERSPACE
673 z_mem_domain_init_thread(new_thread);
674
675 if ((options & K_INHERIT_PERMS) != 0U) {
676 z_thread_perms_inherit(_current, new_thread);
677 }
678 #endif
679 #ifdef CONFIG_SCHED_DEADLINE
680 new_thread->base.prio_deadline = 0;
681 #endif
682 new_thread->resource_pool = _current->resource_pool;
683
684 #ifdef CONFIG_SCHED_THREAD_USAGE
685 new_thread->base.usage = (struct k_cycle_stats) {};
686 new_thread->base.usage.track_usage =
687 CONFIG_SCHED_THREAD_USAGE_AUTO_ENABLE;
688 #endif
689
690 SYS_PORT_TRACING_OBJ_FUNC(k_thread, create, new_thread);
691
692 return stack_ptr;
693 }
694
695 #ifdef CONFIG_MULTITHREADING
z_impl_k_thread_create(struct k_thread * new_thread,k_thread_stack_t * stack,size_t stack_size,k_thread_entry_t entry,void * p1,void * p2,void * p3,int prio,uint32_t options,k_timeout_t delay)696 k_tid_t z_impl_k_thread_create(struct k_thread *new_thread,
697 k_thread_stack_t *stack,
698 size_t stack_size, k_thread_entry_t entry,
699 void *p1, void *p2, void *p3,
700 int prio, uint32_t options, k_timeout_t delay)
701 {
702 __ASSERT(!arch_is_in_isr(), "Threads may not be created in ISRs");
703
704 z_setup_new_thread(new_thread, stack, stack_size, entry, p1, p2, p3,
705 prio, options, NULL);
706
707 if (!K_TIMEOUT_EQ(delay, K_FOREVER)) {
708 schedule_new_thread(new_thread, delay);
709 }
710
711 return new_thread;
712 }
713
714
715 #ifdef CONFIG_USERSPACE
z_stack_is_user_capable(k_thread_stack_t * stack)716 bool z_stack_is_user_capable(k_thread_stack_t *stack)
717 {
718 return z_object_find(stack) != NULL;
719 }
720
z_vrfy_k_thread_create(struct k_thread * new_thread,k_thread_stack_t * stack,size_t stack_size,k_thread_entry_t entry,void * p1,void * p2,void * p3,int prio,uint32_t options,k_timeout_t delay)721 k_tid_t z_vrfy_k_thread_create(struct k_thread *new_thread,
722 k_thread_stack_t *stack,
723 size_t stack_size, k_thread_entry_t entry,
724 void *p1, void *p2, void *p3,
725 int prio, uint32_t options, k_timeout_t delay)
726 {
727 size_t total_size, stack_obj_size;
728 struct z_object *stack_object;
729
730 /* The thread and stack objects *must* be in an uninitialized state */
731 Z_OOPS(Z_SYSCALL_OBJ_NEVER_INIT(new_thread, K_OBJ_THREAD));
732
733 /* No need to check z_stack_is_user_capable(), it won't be in the
734 * object table if it isn't
735 */
736 stack_object = z_object_find(stack);
737 Z_OOPS(Z_SYSCALL_VERIFY_MSG(z_obj_validation_check(stack_object, stack,
738 K_OBJ_THREAD_STACK_ELEMENT,
739 _OBJ_INIT_FALSE) == 0,
740 "bad stack object"));
741
742 /* Verify that the stack size passed in is OK by computing the total
743 * size and comparing it with the size value in the object metadata
744 */
745 Z_OOPS(Z_SYSCALL_VERIFY_MSG(!size_add_overflow(K_THREAD_STACK_RESERVED,
746 stack_size, &total_size),
747 "stack size overflow (%zu+%zu)",
748 stack_size,
749 K_THREAD_STACK_RESERVED));
750
751 /* Testing less-than-or-equal since additional room may have been
752 * allocated for alignment constraints
753 */
754 #ifdef CONFIG_GEN_PRIV_STACKS
755 stack_obj_size = stack_object->data.stack_data->size;
756 #else
757 stack_obj_size = stack_object->data.stack_size;
758 #endif
759 Z_OOPS(Z_SYSCALL_VERIFY_MSG(total_size <= stack_obj_size,
760 "stack size %zu is too big, max is %zu",
761 total_size, stack_obj_size));
762
763 /* User threads may only create other user threads and they can't
764 * be marked as essential
765 */
766 Z_OOPS(Z_SYSCALL_VERIFY(options & K_USER));
767 Z_OOPS(Z_SYSCALL_VERIFY(!(options & K_ESSENTIAL)));
768
769 /* Check validity of prio argument; must be the same or worse priority
770 * than the caller
771 */
772 Z_OOPS(Z_SYSCALL_VERIFY(_is_valid_prio(prio, NULL)));
773 Z_OOPS(Z_SYSCALL_VERIFY(z_is_prio_lower_or_equal(prio,
774 _current->base.prio)));
775
776 z_setup_new_thread(new_thread, stack, stack_size,
777 entry, p1, p2, p3, prio, options, NULL);
778
779 if (!K_TIMEOUT_EQ(delay, K_FOREVER)) {
780 schedule_new_thread(new_thread, delay);
781 }
782
783 return new_thread;
784 }
785 #include <syscalls/k_thread_create_mrsh.c>
786 #endif /* CONFIG_USERSPACE */
787 #endif /* CONFIG_MULTITHREADING */
788
789 #ifdef CONFIG_MULTITHREADING
790 #ifdef CONFIG_USERSPACE
791
grant_static_access(void)792 static void grant_static_access(void)
793 {
794 STRUCT_SECTION_FOREACH(z_object_assignment, pos) {
795 for (int i = 0; pos->objects[i] != NULL; i++) {
796 k_object_access_grant(pos->objects[i],
797 pos->thread);
798 }
799 }
800 }
801 #endif /* CONFIG_USERSPACE */
802
z_init_static_threads(void)803 void z_init_static_threads(void)
804 {
805 _FOREACH_STATIC_THREAD(thread_data) {
806 z_setup_new_thread(
807 thread_data->init_thread,
808 thread_data->init_stack,
809 thread_data->init_stack_size,
810 thread_data->init_entry,
811 thread_data->init_p1,
812 thread_data->init_p2,
813 thread_data->init_p3,
814 thread_data->init_prio,
815 thread_data->init_options,
816 thread_data->init_name);
817
818 thread_data->init_thread->init_data = thread_data;
819 }
820
821 #ifdef CONFIG_USERSPACE
822 grant_static_access();
823 #endif
824
825 /*
826 * Non-legacy static threads may be started immediately or
827 * after a previously specified delay. Even though the
828 * scheduler is locked, ticks can still be delivered and
829 * processed. Take a sched lock to prevent them from running
830 * until they are all started.
831 *
832 * Note that static threads defined using the legacy API have a
833 * delay of K_FOREVER.
834 */
835 k_sched_lock();
836 _FOREACH_STATIC_THREAD(thread_data) {
837 k_timeout_t init_delay = Z_THREAD_INIT_DELAY(thread_data);
838
839 if (!K_TIMEOUT_EQ(init_delay, K_FOREVER)) {
840 schedule_new_thread(thread_data->init_thread,
841 init_delay);
842 }
843 }
844 k_sched_unlock();
845 }
846 #endif
847
z_init_thread_base(struct _thread_base * thread_base,int priority,uint32_t initial_state,unsigned int options)848 void z_init_thread_base(struct _thread_base *thread_base, int priority,
849 uint32_t initial_state, unsigned int options)
850 {
851 /* k_q_node is initialized upon first insertion in a list */
852 thread_base->pended_on = NULL;
853 thread_base->user_options = (uint8_t)options;
854 thread_base->thread_state = (uint8_t)initial_state;
855
856 thread_base->prio = priority;
857
858 thread_base->sched_locked = 0U;
859
860 #ifdef CONFIG_SMP
861 thread_base->is_idle = 0;
862 #endif
863
864 #ifdef CONFIG_TIMESLICE_PER_THREAD
865 thread_base->slice_ticks = 0;
866 thread_base->slice_expired = NULL;
867 #endif
868
869 /* swap_data does not need to be initialized */
870
871 z_init_thread_timeout(thread_base);
872 }
873
k_thread_user_mode_enter(k_thread_entry_t entry,void * p1,void * p2,void * p3)874 FUNC_NORETURN void k_thread_user_mode_enter(k_thread_entry_t entry,
875 void *p1, void *p2, void *p3)
876 {
877 SYS_PORT_TRACING_FUNC(k_thread, user_mode_enter);
878
879 _current->base.user_options |= K_USER;
880 z_thread_essential_clear();
881 #ifdef CONFIG_THREAD_MONITOR
882 _current->entry.pEntry = entry;
883 _current->entry.parameter1 = p1;
884 _current->entry.parameter2 = p2;
885 _current->entry.parameter3 = p3;
886 #endif
887 #ifdef CONFIG_USERSPACE
888 __ASSERT(z_stack_is_user_capable(_current->stack_obj),
889 "dropping to user mode with kernel-only stack object");
890 #ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA
891 memset(_current->userspace_local_data, 0,
892 sizeof(struct _thread_userspace_local_data));
893 #endif
894 #ifdef CONFIG_THREAD_LOCAL_STORAGE
895 arch_tls_stack_setup(_current,
896 (char *)(_current->stack_info.start +
897 _current->stack_info.size));
898 #endif
899 arch_user_mode_enter(entry, p1, p2, p3);
900 #else
901 /* XXX In this case we do not reset the stack */
902 z_thread_entry(entry, p1, p2, p3);
903 #endif
904 }
905
906 /* These spinlock assertion predicates are defined here because having
907 * them in spinlock.h is a giant header ordering headache.
908 */
909 #ifdef CONFIG_SPIN_VALIDATE
z_spin_lock_valid(struct k_spinlock * l)910 bool z_spin_lock_valid(struct k_spinlock *l)
911 {
912 uintptr_t thread_cpu = l->thread_cpu;
913
914 if (thread_cpu != 0U) {
915 if ((thread_cpu & 3U) == _current_cpu->id) {
916 return false;
917 }
918 }
919 return true;
920 }
921
z_spin_unlock_valid(struct k_spinlock * l)922 bool z_spin_unlock_valid(struct k_spinlock *l)
923 {
924 if (l->thread_cpu != (_current_cpu->id | (uintptr_t)_current)) {
925 return false;
926 }
927 l->thread_cpu = 0;
928 return true;
929 }
930
z_spin_lock_set_owner(struct k_spinlock * l)931 void z_spin_lock_set_owner(struct k_spinlock *l)
932 {
933 l->thread_cpu = _current_cpu->id | (uintptr_t)_current;
934 }
935
936 #ifdef CONFIG_KERNEL_COHERENCE
z_spin_lock_mem_coherent(struct k_spinlock * l)937 bool z_spin_lock_mem_coherent(struct k_spinlock *l)
938 {
939 return arch_mem_coherent((void *)l);
940 }
941 #endif /* CONFIG_KERNEL_COHERENCE */
942
943 #endif /* CONFIG_SPIN_VALIDATE */
944
z_impl_k_float_disable(struct k_thread * thread)945 int z_impl_k_float_disable(struct k_thread *thread)
946 {
947 #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
948 return arch_float_disable(thread);
949 #else
950 ARG_UNUSED(thread);
951 return -ENOTSUP;
952 #endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
953 }
954
z_impl_k_float_enable(struct k_thread * thread,unsigned int options)955 int z_impl_k_float_enable(struct k_thread *thread, unsigned int options)
956 {
957 #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
958 return arch_float_enable(thread, options);
959 #else
960 ARG_UNUSED(thread);
961 ARG_UNUSED(options);
962 return -ENOTSUP;
963 #endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
964 }
965
966 #ifdef CONFIG_USERSPACE
z_vrfy_k_float_disable(struct k_thread * thread)967 static inline int z_vrfy_k_float_disable(struct k_thread *thread)
968 {
969 Z_OOPS(Z_SYSCALL_OBJ(thread, K_OBJ_THREAD));
970 return z_impl_k_float_disable(thread);
971 }
972 #include <syscalls/k_float_disable_mrsh.c>
973 #endif /* CONFIG_USERSPACE */
974
975 #ifdef CONFIG_IRQ_OFFLOAD
976 /* Make offload_sem visible outside under testing, in order to release
977 * it outside when error happened.
978 */
979 K_SEM_DEFINE(offload_sem, 1, 1);
980
irq_offload(irq_offload_routine_t routine,const void * parameter)981 void irq_offload(irq_offload_routine_t routine, const void *parameter)
982 {
983 #ifdef CONFIG_IRQ_OFFLOAD_NESTED
984 arch_irq_offload(routine, parameter);
985 #else
986 k_sem_take(&offload_sem, K_FOREVER);
987 arch_irq_offload(routine, parameter);
988 k_sem_give(&offload_sem);
989 #endif
990 }
991 #endif
992
993 #if defined(CONFIG_INIT_STACKS) && defined(CONFIG_THREAD_STACK_INFO)
994 #ifdef CONFIG_STACK_GROWS_UP
995 #error "Unsupported configuration for stack analysis"
996 #endif
997
z_stack_space_get(const uint8_t * stack_start,size_t size,size_t * unused_ptr)998 int z_stack_space_get(const uint8_t *stack_start, size_t size, size_t *unused_ptr)
999 {
1000 size_t unused = 0;
1001 const uint8_t *checked_stack = stack_start;
1002 /* Take the address of any local variable as a shallow bound for the
1003 * stack pointer. Addresses above it are guaranteed to be
1004 * accessible.
1005 */
1006 const uint8_t *stack_pointer = (const uint8_t *)&stack_start;
1007
1008 /* If we are currently running on the stack being analyzed, some
1009 * memory management hardware will generate an exception if we
1010 * read unused stack memory.
1011 *
1012 * This never happens when invoked from user mode, as user mode
1013 * will always run this function on the privilege elevation stack.
1014 */
1015 if ((stack_pointer > stack_start) && (stack_pointer <= (stack_start + size)) &&
1016 IS_ENABLED(CONFIG_NO_UNUSED_STACK_INSPECTION)) {
1017 /* TODO: We could add an arch_ API call to temporarily
1018 * disable the stack checking in the CPU, but this would
1019 * need to be properly managed wrt context switches/interrupts
1020 */
1021 return -ENOTSUP;
1022 }
1023
1024 if (IS_ENABLED(CONFIG_STACK_SENTINEL)) {
1025 /* First 4 bytes of the stack buffer reserved for the
1026 * sentinel value, it won't be 0xAAAAAAAA for thread
1027 * stacks.
1028 *
1029 * FIXME: thread->stack_info.start ought to reflect
1030 * this!
1031 */
1032 checked_stack += 4;
1033 size -= 4;
1034 }
1035
1036 for (size_t i = 0; i < size; i++) {
1037 if ((checked_stack[i]) == 0xaaU) {
1038 unused++;
1039 } else {
1040 break;
1041 }
1042 }
1043
1044 *unused_ptr = unused;
1045
1046 return 0;
1047 }
1048
z_impl_k_thread_stack_space_get(const struct k_thread * thread,size_t * unused_ptr)1049 int z_impl_k_thread_stack_space_get(const struct k_thread *thread,
1050 size_t *unused_ptr)
1051 {
1052 return z_stack_space_get((const uint8_t *)thread->stack_info.start,
1053 thread->stack_info.size, unused_ptr);
1054 }
1055
1056 #ifdef CONFIG_USERSPACE
z_vrfy_k_thread_stack_space_get(const struct k_thread * thread,size_t * unused_ptr)1057 int z_vrfy_k_thread_stack_space_get(const struct k_thread *thread,
1058 size_t *unused_ptr)
1059 {
1060 size_t unused;
1061 int ret;
1062
1063 ret = Z_SYSCALL_OBJ(thread, K_OBJ_THREAD);
1064 CHECKIF(ret != 0) {
1065 return ret;
1066 }
1067
1068 ret = z_impl_k_thread_stack_space_get(thread, &unused);
1069 CHECKIF(ret != 0) {
1070 return ret;
1071 }
1072
1073 ret = z_user_to_copy(unused_ptr, &unused, sizeof(size_t));
1074 CHECKIF(ret != 0) {
1075 return ret;
1076 }
1077
1078 return 0;
1079 }
1080 #include <syscalls/k_thread_stack_space_get_mrsh.c>
1081 #endif /* CONFIG_USERSPACE */
1082 #endif /* CONFIG_INIT_STACKS && CONFIG_THREAD_STACK_INFO */
1083
1084 #ifdef CONFIG_USERSPACE
z_vrfy_k_thread_timeout_remaining_ticks(const struct k_thread * t)1085 static inline k_ticks_t z_vrfy_k_thread_timeout_remaining_ticks(
1086 const struct k_thread *t)
1087 {
1088 Z_OOPS(Z_SYSCALL_OBJ(t, K_OBJ_THREAD));
1089 return z_impl_k_thread_timeout_remaining_ticks(t);
1090 }
1091 #include <syscalls/k_thread_timeout_remaining_ticks_mrsh.c>
1092
z_vrfy_k_thread_timeout_expires_ticks(const struct k_thread * t)1093 static inline k_ticks_t z_vrfy_k_thread_timeout_expires_ticks(
1094 const struct k_thread *t)
1095 {
1096 Z_OOPS(Z_SYSCALL_OBJ(t, K_OBJ_THREAD));
1097 return z_impl_k_thread_timeout_expires_ticks(t);
1098 }
1099 #include <syscalls/k_thread_timeout_expires_ticks_mrsh.c>
1100 #endif
1101
1102 #ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING
z_thread_mark_switched_in(void)1103 void z_thread_mark_switched_in(void)
1104 {
1105 #if defined(CONFIG_SCHED_THREAD_USAGE) && !defined(CONFIG_USE_SWITCH)
1106 z_sched_usage_start(_current);
1107 #endif
1108
1109 #ifdef CONFIG_TRACING
1110 SYS_PORT_TRACING_FUNC(k_thread, switched_in);
1111 #endif
1112 }
1113
z_thread_mark_switched_out(void)1114 void z_thread_mark_switched_out(void)
1115 {
1116 #if defined(CONFIG_SCHED_THREAD_USAGE) && !defined(CONFIG_USE_SWITCH)
1117 z_sched_usage_stop();
1118 #endif
1119
1120 #ifdef CONFIG_TRACING
1121 #ifdef CONFIG_THREAD_LOCAL_STORAGE
1122 /* Dummy thread won't have TLS set up to run arbitrary code */
1123 if (!_current_cpu->current ||
1124 (_current_cpu->current->base.thread_state & _THREAD_DUMMY) != 0)
1125 return;
1126 #endif
1127 SYS_PORT_TRACING_FUNC(k_thread, switched_out);
1128 #endif
1129 }
1130 #endif /* CONFIG_INSTRUMENT_THREAD_SWITCHING */
1131
k_thread_runtime_stats_get(k_tid_t thread,k_thread_runtime_stats_t * stats)1132 int k_thread_runtime_stats_get(k_tid_t thread,
1133 k_thread_runtime_stats_t *stats)
1134 {
1135 if ((thread == NULL) || (stats == NULL)) {
1136 return -EINVAL;
1137 }
1138
1139 #ifdef CONFIG_SCHED_THREAD_USAGE
1140 z_sched_thread_usage(thread, stats);
1141 #else
1142 *stats = (k_thread_runtime_stats_t) {};
1143 #endif
1144
1145 return 0;
1146 }
1147
k_thread_runtime_stats_all_get(k_thread_runtime_stats_t * stats)1148 int k_thread_runtime_stats_all_get(k_thread_runtime_stats_t *stats)
1149 {
1150 #ifdef CONFIG_SCHED_THREAD_USAGE_ALL
1151 k_thread_runtime_stats_t tmp_stats;
1152 #endif
1153
1154 if (stats == NULL) {
1155 return -EINVAL;
1156 }
1157
1158 *stats = (k_thread_runtime_stats_t) {};
1159
1160 #ifdef CONFIG_SCHED_THREAD_USAGE_ALL
1161 /* Retrieve the usage stats for each core and amalgamate them. */
1162
1163 unsigned int num_cpus = arch_num_cpus();
1164
1165 for (uint8_t i = 0; i < num_cpus; i++) {
1166 z_sched_cpu_usage(i, &tmp_stats);
1167
1168 stats->execution_cycles += tmp_stats.execution_cycles;
1169 stats->total_cycles += tmp_stats.total_cycles;
1170 #ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
1171 stats->current_cycles += tmp_stats.current_cycles;
1172 stats->peak_cycles += tmp_stats.peak_cycles;
1173 stats->average_cycles += tmp_stats.average_cycles;
1174 #endif
1175 stats->idle_cycles += tmp_stats.idle_cycles;
1176 }
1177 #endif
1178
1179 return 0;
1180 }
1181