1 /*
2 * Copyright (c) 2010-2014 Wind River Systems, Inc.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 /**
8 * @file
9 * @brief Kernel thread support
10 *
11 * This module provides general purpose thread support.
12 */
13
14 #include <zephyr/kernel.h>
15 #include <zephyr/spinlock.h>
16 #include <zephyr/sys/math_extras.h>
17 #include <zephyr/sys_clock.h>
18 #include <ksched.h>
19 #include <zephyr/wait_q.h>
20 #include <zephyr/syscall_handler.h>
21 #include <kernel_internal.h>
22 #include <kswap.h>
23 #include <zephyr/init.h>
24 #include <zephyr/tracing/tracing.h>
25 #include <string.h>
26 #include <stdbool.h>
27 #include <zephyr/irq_offload.h>
28 #include <zephyr/sys/check.h>
29 #include <zephyr/random/rand32.h>
30 #include <zephyr/sys/atomic.h>
31 #include <zephyr/logging/log.h>
32 #include <zephyr/sys/iterable_sections.h>
33
34 LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
35
36 #ifdef CONFIG_THREAD_MONITOR
37 /* This lock protects the linked list of active threads; i.e. the
38 * initial _kernel.threads pointer and the linked list made up of
39 * thread->next_thread (until NULL)
40 */
41 static struct k_spinlock z_thread_monitor_lock;
42 #endif /* CONFIG_THREAD_MONITOR */
43
44 #define _FOREACH_STATIC_THREAD(thread_data) \
45 STRUCT_SECTION_FOREACH(_static_thread_data, thread_data)
46
k_thread_foreach(k_thread_user_cb_t user_cb,void * user_data)47 void k_thread_foreach(k_thread_user_cb_t user_cb, void *user_data)
48 {
49 #if defined(CONFIG_THREAD_MONITOR)
50 struct k_thread *thread;
51 k_spinlock_key_t key;
52
53 __ASSERT(user_cb != NULL, "user_cb can not be NULL");
54
55 /*
56 * Lock is needed to make sure that the _kernel.threads is not being
57 * modified by the user_cb either directly or indirectly.
58 * The indirect ways are through calling k_thread_create and
59 * k_thread_abort from user_cb.
60 */
61 key = k_spin_lock(&z_thread_monitor_lock);
62
63 SYS_PORT_TRACING_FUNC_ENTER(k_thread, foreach);
64
65 for (thread = _kernel.threads; thread; thread = thread->next_thread) {
66 user_cb(thread, user_data);
67 }
68
69 SYS_PORT_TRACING_FUNC_EXIT(k_thread, foreach);
70
71 k_spin_unlock(&z_thread_monitor_lock, key);
72 #endif
73 }
74
k_thread_foreach_unlocked(k_thread_user_cb_t user_cb,void * user_data)75 void k_thread_foreach_unlocked(k_thread_user_cb_t user_cb, void *user_data)
76 {
77 #if defined(CONFIG_THREAD_MONITOR)
78 struct k_thread *thread;
79 k_spinlock_key_t key;
80
81 __ASSERT(user_cb != NULL, "user_cb can not be NULL");
82
83 key = k_spin_lock(&z_thread_monitor_lock);
84
85 SYS_PORT_TRACING_FUNC_ENTER(k_thread, foreach_unlocked);
86
87 for (thread = _kernel.threads; thread; thread = thread->next_thread) {
88 k_spin_unlock(&z_thread_monitor_lock, key);
89 user_cb(thread, user_data);
90 key = k_spin_lock(&z_thread_monitor_lock);
91 }
92
93 SYS_PORT_TRACING_FUNC_EXIT(k_thread, foreach_unlocked);
94
95 k_spin_unlock(&z_thread_monitor_lock, key);
96 #endif
97 }
98
k_is_in_isr(void)99 bool k_is_in_isr(void)
100 {
101 return arch_is_in_isr();
102 }
103
104 /*
105 * This function tags the current thread as essential to system operation.
106 * Exceptions raised by this thread will be treated as a fatal system error.
107 */
z_thread_essential_set(void)108 void z_thread_essential_set(void)
109 {
110 _current->base.user_options |= K_ESSENTIAL;
111 }
112
113 /*
114 * This function tags the current thread as not essential to system operation.
115 * Exceptions raised by this thread may be recoverable.
116 * (This is the default tag for a thread.)
117 */
z_thread_essential_clear(void)118 void z_thread_essential_clear(void)
119 {
120 _current->base.user_options &= ~K_ESSENTIAL;
121 }
122
123 /*
124 * This routine indicates if the current thread is an essential system thread.
125 *
126 * Returns true if current thread is essential, false if it is not.
127 */
z_is_thread_essential(void)128 bool z_is_thread_essential(void)
129 {
130 return (_current->base.user_options & K_ESSENTIAL) == K_ESSENTIAL;
131 }
132
133 #ifdef CONFIG_THREAD_CUSTOM_DATA
z_impl_k_thread_custom_data_set(void * value)134 void z_impl_k_thread_custom_data_set(void *value)
135 {
136 _current->custom_data = value;
137 }
138
139 #ifdef CONFIG_USERSPACE
z_vrfy_k_thread_custom_data_set(void * data)140 static inline void z_vrfy_k_thread_custom_data_set(void *data)
141 {
142 z_impl_k_thread_custom_data_set(data);
143 }
144 #include <syscalls/k_thread_custom_data_set_mrsh.c>
145 #endif
146
z_impl_k_thread_custom_data_get(void)147 void *z_impl_k_thread_custom_data_get(void)
148 {
149 return _current->custom_data;
150 }
151
152 #ifdef CONFIG_USERSPACE
z_vrfy_k_thread_custom_data_get(void)153 static inline void *z_vrfy_k_thread_custom_data_get(void)
154 {
155 return z_impl_k_thread_custom_data_get();
156 }
157 #include <syscalls/k_thread_custom_data_get_mrsh.c>
158
159 #endif /* CONFIG_USERSPACE */
160 #endif /* CONFIG_THREAD_CUSTOM_DATA */
161
162 #if defined(CONFIG_THREAD_MONITOR)
163 /*
164 * Remove a thread from the kernel's list of active threads.
165 */
z_thread_monitor_exit(struct k_thread * thread)166 void z_thread_monitor_exit(struct k_thread *thread)
167 {
168 k_spinlock_key_t key = k_spin_lock(&z_thread_monitor_lock);
169
170 if (thread == _kernel.threads) {
171 _kernel.threads = _kernel.threads->next_thread;
172 } else {
173 struct k_thread *prev_thread;
174
175 prev_thread = _kernel.threads;
176 while ((prev_thread != NULL) &&
177 (thread != prev_thread->next_thread)) {
178 prev_thread = prev_thread->next_thread;
179 }
180 if (prev_thread != NULL) {
181 prev_thread->next_thread = thread->next_thread;
182 }
183 }
184
185 k_spin_unlock(&z_thread_monitor_lock, key);
186 }
187 #endif
188
z_impl_k_thread_name_set(struct k_thread * thread,const char * value)189 int z_impl_k_thread_name_set(struct k_thread *thread, const char *value)
190 {
191 #ifdef CONFIG_THREAD_NAME
192 if (thread == NULL) {
193 thread = _current;
194 }
195
196 strncpy(thread->name, value, CONFIG_THREAD_MAX_NAME_LEN - 1);
197 thread->name[CONFIG_THREAD_MAX_NAME_LEN - 1] = '\0';
198
199 SYS_PORT_TRACING_OBJ_FUNC(k_thread, name_set, thread, 0);
200
201 return 0;
202 #else
203 ARG_UNUSED(thread);
204 ARG_UNUSED(value);
205
206 SYS_PORT_TRACING_OBJ_FUNC(k_thread, name_set, thread, -ENOSYS);
207
208 return -ENOSYS;
209 #endif /* CONFIG_THREAD_NAME */
210 }
211
212 #ifdef CONFIG_USERSPACE
z_vrfy_k_thread_name_set(struct k_thread * thread,const char * str)213 static inline int z_vrfy_k_thread_name_set(struct k_thread *thread, const char *str)
214 {
215 #ifdef CONFIG_THREAD_NAME
216 char name[CONFIG_THREAD_MAX_NAME_LEN];
217
218 if (thread != NULL) {
219 if (Z_SYSCALL_OBJ(thread, K_OBJ_THREAD) != 0) {
220 return -EINVAL;
221 }
222 }
223
224 /* In theory we could copy directly into thread->name, but
225 * the current z_vrfy / z_impl split does not provide a
226 * means of doing so.
227 */
228 if (z_user_string_copy(name, (char *)str, sizeof(name)) != 0) {
229 return -EFAULT;
230 }
231
232 return z_impl_k_thread_name_set(thread, name);
233 #else
234 return -ENOSYS;
235 #endif /* CONFIG_THREAD_NAME */
236 }
237 #include <syscalls/k_thread_name_set_mrsh.c>
238 #endif /* CONFIG_USERSPACE */
239
k_thread_name_get(struct k_thread * thread)240 const char *k_thread_name_get(struct k_thread *thread)
241 {
242 #ifdef CONFIG_THREAD_NAME
243 return (const char *)thread->name;
244 #else
245 ARG_UNUSED(thread);
246 return NULL;
247 #endif /* CONFIG_THREAD_NAME */
248 }
249
z_impl_k_thread_name_copy(k_tid_t thread,char * buf,size_t size)250 int z_impl_k_thread_name_copy(k_tid_t thread, char *buf, size_t size)
251 {
252 #ifdef CONFIG_THREAD_NAME
253 strncpy(buf, thread->name, size);
254 return 0;
255 #else
256 ARG_UNUSED(thread);
257 ARG_UNUSED(buf);
258 ARG_UNUSED(size);
259 return -ENOSYS;
260 #endif /* CONFIG_THREAD_NAME */
261 }
262
copy_bytes(char * dest,size_t dest_size,const char * src,size_t src_size)263 static size_t copy_bytes(char *dest, size_t dest_size, const char *src, size_t src_size)
264 {
265 size_t bytes_to_copy;
266
267 bytes_to_copy = MIN(dest_size, src_size);
268 memcpy(dest, src, bytes_to_copy);
269
270 return bytes_to_copy;
271 }
272
k_thread_state_str(k_tid_t thread_id,char * buf,size_t buf_size)273 const char *k_thread_state_str(k_tid_t thread_id, char *buf, size_t buf_size)
274 {
275 size_t off = 0;
276 uint8_t bit;
277 uint8_t thread_state = thread_id->base.thread_state;
278 static const char *states_str[8] = {"dummy", "pending", "prestart",
279 "dead", "suspended", "aborting",
280 "", "queued"};
281 static const size_t states_sz[8] = {5, 7, 8, 4, 9, 8, 0, 6};
282
283 if ((buf == NULL) || (buf_size == 0)) {
284 return "";
285 }
286
287 buf_size--; /* Reserve 1 byte for end-of-string character */
288
289 /*
290 * Loop through each bit in the thread_state. Stop once all have
291 * been processed. If more than one thread_state bit is set, then
292 * separate the descriptive strings with a '+'.
293 */
294
295 for (uint8_t index = 0; thread_state != 0; index++) {
296 bit = BIT(index);
297 if ((thread_state & bit) == 0) {
298 continue;
299 }
300
301 off += copy_bytes(buf + off, buf_size - off,
302 states_str[index], states_sz[index]);
303
304 thread_state &= ~bit;
305
306 if (thread_state != 0) {
307 off += copy_bytes(buf + off, buf_size - off, "+", 1);
308 }
309 }
310
311 buf[off] = '\0';
312
313 return (const char *)buf;
314 }
315
316 #ifdef CONFIG_USERSPACE
z_vrfy_k_thread_name_copy(k_tid_t thread,char * buf,size_t size)317 static inline int z_vrfy_k_thread_name_copy(k_tid_t thread,
318 char *buf, size_t size)
319 {
320 #ifdef CONFIG_THREAD_NAME
321 size_t len;
322 struct z_object *ko = z_object_find(thread);
323
324 /* Special case: we allow reading the names of initialized threads
325 * even if we don't have permission on them
326 */
327 if (thread == NULL || ko->type != K_OBJ_THREAD ||
328 (ko->flags & K_OBJ_FLAG_INITIALIZED) == 0) {
329 return -EINVAL;
330 }
331 if (Z_SYSCALL_MEMORY_WRITE(buf, size) != 0) {
332 return -EFAULT;
333 }
334 len = strlen(thread->name);
335 if (len + 1 > size) {
336 return -ENOSPC;
337 }
338
339 return z_user_to_copy((void *)buf, thread->name, len + 1);
340 #else
341 ARG_UNUSED(thread);
342 ARG_UNUSED(buf);
343 ARG_UNUSED(size);
344 return -ENOSYS;
345 #endif /* CONFIG_THREAD_NAME */
346 }
347 #include <syscalls/k_thread_name_copy_mrsh.c>
348 #endif /* CONFIG_USERSPACE */
349
350
351 #ifdef CONFIG_MULTITHREADING
352 #ifdef CONFIG_STACK_SENTINEL
353 /* Check that the stack sentinel is still present
354 *
355 * The stack sentinel feature writes a magic value to the lowest 4 bytes of
356 * the thread's stack when the thread is initialized. This value gets checked
357 * in a few places:
358 *
359 * 1) In k_yield() if the current thread is not swapped out
360 * 2) After servicing a non-nested interrupt
361 * 3) In z_swap(), check the sentinel in the outgoing thread
362 *
363 * Item 2 requires support in arch/ code.
364 *
365 * If the check fails, the thread will be terminated appropriately through
366 * the system fatal error handler.
367 */
z_check_stack_sentinel(void)368 void z_check_stack_sentinel(void)
369 {
370 uint32_t *stack;
371
372 if ((_current->base.thread_state & _THREAD_DUMMY) != 0) {
373 return;
374 }
375
376 stack = (uint32_t *)_current->stack_info.start;
377 if (*stack != STACK_SENTINEL) {
378 /* Restore it so further checks don't trigger this same error */
379 *stack = STACK_SENTINEL;
380 z_except_reason(K_ERR_STACK_CHK_FAIL);
381 }
382 }
383 #endif /* CONFIG_STACK_SENTINEL */
384
z_impl_k_thread_start(struct k_thread * thread)385 void z_impl_k_thread_start(struct k_thread *thread)
386 {
387 SYS_PORT_TRACING_OBJ_FUNC(k_thread, start, thread);
388
389 z_sched_start(thread);
390 }
391
392 #ifdef CONFIG_USERSPACE
z_vrfy_k_thread_start(struct k_thread * thread)393 static inline void z_vrfy_k_thread_start(struct k_thread *thread)
394 {
395 Z_OOPS(Z_SYSCALL_OBJ(thread, K_OBJ_THREAD));
396 return z_impl_k_thread_start(thread);
397 }
398 #include <syscalls/k_thread_start_mrsh.c>
399 #endif
400 #endif
401
402 #ifdef CONFIG_MULTITHREADING
schedule_new_thread(struct k_thread * thread,k_timeout_t delay)403 static void schedule_new_thread(struct k_thread *thread, k_timeout_t delay)
404 {
405 #ifdef CONFIG_SYS_CLOCK_EXISTS
406 if (K_TIMEOUT_EQ(delay, K_NO_WAIT)) {
407 k_thread_start(thread);
408 } else {
409 z_add_thread_timeout(thread, delay);
410 }
411 #else
412 ARG_UNUSED(delay);
413 k_thread_start(thread);
414 #endif
415 }
416 #endif
417
418 #if CONFIG_STACK_POINTER_RANDOM
419 int z_stack_adjust_initialized;
420
random_offset(size_t stack_size)421 static size_t random_offset(size_t stack_size)
422 {
423 size_t random_val;
424
425 if (!z_stack_adjust_initialized) {
426 z_early_boot_rand_get((uint8_t *)&random_val, sizeof(random_val));
427 } else {
428 sys_rand_get((uint8_t *)&random_val, sizeof(random_val));
429 }
430
431 /* Don't need to worry about alignment of the size here,
432 * arch_new_thread() is required to do it.
433 *
434 * FIXME: Not the best way to get a random number in a range.
435 * See #6493
436 */
437 const size_t fuzz = random_val % CONFIG_STACK_POINTER_RANDOM;
438
439 if (unlikely(fuzz * 2 > stack_size)) {
440 return 0;
441 }
442
443 return fuzz;
444 }
445 #if defined(CONFIG_STACK_GROWS_UP)
446 /* This is so rare not bothering for now */
447 #error "Stack pointer randomization not implemented for upward growing stacks"
448 #endif /* CONFIG_STACK_GROWS_UP */
449 #endif /* CONFIG_STACK_POINTER_RANDOM */
450
setup_thread_stack(struct k_thread * new_thread,k_thread_stack_t * stack,size_t stack_size)451 static char *setup_thread_stack(struct k_thread *new_thread,
452 k_thread_stack_t *stack, size_t stack_size)
453 {
454 size_t stack_obj_size, stack_buf_size;
455 char *stack_ptr, *stack_buf_start;
456 size_t delta = 0;
457
458 #ifdef CONFIG_USERSPACE
459 if (z_stack_is_user_capable(stack)) {
460 stack_obj_size = Z_THREAD_STACK_SIZE_ADJUST(stack_size);
461 stack_buf_start = Z_THREAD_STACK_BUFFER(stack);
462 stack_buf_size = stack_obj_size - K_THREAD_STACK_RESERVED;
463 } else
464 #endif
465 {
466 /* Object cannot host a user mode thread */
467 stack_obj_size = Z_KERNEL_STACK_SIZE_ADJUST(stack_size);
468 stack_buf_start = Z_KERNEL_STACK_BUFFER(stack);
469 stack_buf_size = stack_obj_size - K_KERNEL_STACK_RESERVED;
470 }
471
472 /* Initial stack pointer at the high end of the stack object, may
473 * be reduced later in this function by TLS or random offset
474 */
475 stack_ptr = (char *)stack + stack_obj_size;
476
477 LOG_DBG("stack %p for thread %p: obj_size=%zu buf_start=%p "
478 " buf_size %zu stack_ptr=%p",
479 stack, new_thread, stack_obj_size, (void *)stack_buf_start,
480 stack_buf_size, (void *)stack_ptr);
481
482 #ifdef CONFIG_INIT_STACKS
483 memset(stack_buf_start, 0xaa, stack_buf_size);
484 #endif
485 #ifdef CONFIG_STACK_SENTINEL
486 /* Put the stack sentinel at the lowest 4 bytes of the stack area.
487 * We periodically check that it's still present and kill the thread
488 * if it isn't.
489 */
490 *((uint32_t *)stack_buf_start) = STACK_SENTINEL;
491 #endif /* CONFIG_STACK_SENTINEL */
492 #ifdef CONFIG_THREAD_LOCAL_STORAGE
493 /* TLS is always last within the stack buffer */
494 delta += arch_tls_stack_setup(new_thread, stack_ptr);
495 #endif /* CONFIG_THREAD_LOCAL_STORAGE */
496 #ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA
497 size_t tls_size = sizeof(struct _thread_userspace_local_data);
498
499 /* reserve space on highest memory of stack buffer for local data */
500 delta += tls_size;
501 new_thread->userspace_local_data =
502 (struct _thread_userspace_local_data *)(stack_ptr - delta);
503 #endif
504 #if CONFIG_STACK_POINTER_RANDOM
505 delta += random_offset(stack_buf_size);
506 #endif
507 delta = ROUND_UP(delta, ARCH_STACK_PTR_ALIGN);
508 #ifdef CONFIG_THREAD_STACK_INFO
509 /* Initial values. Arches which implement MPU guards that "borrow"
510 * memory from the stack buffer (not tracked in K_THREAD_STACK_RESERVED)
511 * will need to appropriately update this.
512 *
513 * The bounds tracked here correspond to the area of the stack object
514 * that the thread can access, which includes TLS.
515 */
516 new_thread->stack_info.start = (uintptr_t)stack_buf_start;
517 new_thread->stack_info.size = stack_buf_size;
518 new_thread->stack_info.delta = delta;
519 #endif
520 stack_ptr -= delta;
521
522 return stack_ptr;
523 }
524
525 /*
526 * The provided stack_size value is presumed to be either the result of
527 * K_THREAD_STACK_SIZEOF(stack), or the size value passed to the instance
528 * of K_THREAD_STACK_DEFINE() which defined 'stack'.
529 */
z_setup_new_thread(struct k_thread * new_thread,k_thread_stack_t * stack,size_t stack_size,k_thread_entry_t entry,void * p1,void * p2,void * p3,int prio,uint32_t options,const char * name)530 char *z_setup_new_thread(struct k_thread *new_thread,
531 k_thread_stack_t *stack, size_t stack_size,
532 k_thread_entry_t entry,
533 void *p1, void *p2, void *p3,
534 int prio, uint32_t options, const char *name)
535 {
536 char *stack_ptr;
537
538 Z_ASSERT_VALID_PRIO(prio, entry);
539
540 #ifdef CONFIG_USERSPACE
541 __ASSERT((options & K_USER) == 0U || z_stack_is_user_capable(stack),
542 "user thread %p with kernel-only stack %p",
543 new_thread, stack);
544 z_object_init(new_thread);
545 z_object_init(stack);
546 new_thread->stack_obj = stack;
547 new_thread->syscall_frame = NULL;
548
549 /* Any given thread has access to itself */
550 k_object_access_grant(new_thread, new_thread);
551 #endif
552 z_waitq_init(&new_thread->join_queue);
553
554 /* Initialize various struct k_thread members */
555 z_init_thread_base(&new_thread->base, prio, _THREAD_PRESTART, options);
556 stack_ptr = setup_thread_stack(new_thread, stack, stack_size);
557
558 #ifdef CONFIG_KERNEL_COHERENCE
559 /* Check that the thread object is safe, but that the stack is
560 * still cached!
561 */
562 __ASSERT_NO_MSG(arch_mem_coherent(new_thread));
563 __ASSERT_NO_MSG(!arch_mem_coherent(stack));
564 #endif
565
566 arch_new_thread(new_thread, stack, stack_ptr, entry, p1, p2, p3);
567
568 /* static threads overwrite it afterwards with real value */
569 new_thread->init_data = NULL;
570
571 #ifdef CONFIG_USE_SWITCH
572 /* switch_handle must be non-null except when inside z_swap()
573 * for synchronization reasons. Historically some notional
574 * USE_SWITCH architectures have actually ignored the field
575 */
576 __ASSERT(new_thread->switch_handle != NULL,
577 "arch layer failed to initialize switch_handle");
578 #endif
579 #ifdef CONFIG_THREAD_CUSTOM_DATA
580 /* Initialize custom data field (value is opaque to kernel) */
581 new_thread->custom_data = NULL;
582 #endif
583 #ifdef CONFIG_EVENTS
584 new_thread->no_wake_on_timeout = false;
585 #endif
586 #ifdef CONFIG_THREAD_MONITOR
587 new_thread->entry.pEntry = entry;
588 new_thread->entry.parameter1 = p1;
589 new_thread->entry.parameter2 = p2;
590 new_thread->entry.parameter3 = p3;
591
592 k_spinlock_key_t key = k_spin_lock(&z_thread_monitor_lock);
593
594 new_thread->next_thread = _kernel.threads;
595 _kernel.threads = new_thread;
596 k_spin_unlock(&z_thread_monitor_lock, key);
597 #endif
598 #ifdef CONFIG_THREAD_NAME
599 if (name != NULL) {
600 strncpy(new_thread->name, name,
601 CONFIG_THREAD_MAX_NAME_LEN - 1);
602 /* Ensure NULL termination, truncate if longer */
603 new_thread->name[CONFIG_THREAD_MAX_NAME_LEN - 1] = '\0';
604 } else {
605 new_thread->name[0] = '\0';
606 }
607 #endif
608 #ifdef CONFIG_SCHED_CPU_MASK
609 if (IS_ENABLED(CONFIG_SCHED_CPU_MASK_PIN_ONLY)) {
610 new_thread->base.cpu_mask = 1; /* must specify only one cpu */
611 } else {
612 new_thread->base.cpu_mask = -1; /* allow all cpus */
613 }
614 #endif
615 #ifdef CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN
616 /* _current may be null if the dummy thread is not used */
617 if (!_current) {
618 new_thread->resource_pool = NULL;
619 return stack_ptr;
620 }
621 #endif
622 #ifdef CONFIG_USERSPACE
623 z_mem_domain_init_thread(new_thread);
624
625 if ((options & K_INHERIT_PERMS) != 0U) {
626 z_thread_perms_inherit(_current, new_thread);
627 }
628 #endif
629 #ifdef CONFIG_SCHED_DEADLINE
630 new_thread->base.prio_deadline = 0;
631 #endif
632 new_thread->resource_pool = _current->resource_pool;
633
634 #ifdef CONFIG_SCHED_THREAD_USAGE
635 new_thread->base.usage = (struct k_cycle_stats) {};
636 new_thread->base.usage.track_usage =
637 CONFIG_SCHED_THREAD_USAGE_AUTO_ENABLE;
638 #endif
639
640 SYS_PORT_TRACING_OBJ_FUNC(k_thread, create, new_thread);
641
642 return stack_ptr;
643 }
644
645 #ifdef CONFIG_MULTITHREADING
z_impl_k_thread_create(struct k_thread * new_thread,k_thread_stack_t * stack,size_t stack_size,k_thread_entry_t entry,void * p1,void * p2,void * p3,int prio,uint32_t options,k_timeout_t delay)646 k_tid_t z_impl_k_thread_create(struct k_thread *new_thread,
647 k_thread_stack_t *stack,
648 size_t stack_size, k_thread_entry_t entry,
649 void *p1, void *p2, void *p3,
650 int prio, uint32_t options, k_timeout_t delay)
651 {
652 __ASSERT(!arch_is_in_isr(), "Threads may not be created in ISRs");
653
654 z_setup_new_thread(new_thread, stack, stack_size, entry, p1, p2, p3,
655 prio, options, NULL);
656
657 if (!K_TIMEOUT_EQ(delay, K_FOREVER)) {
658 schedule_new_thread(new_thread, delay);
659 }
660
661 return new_thread;
662 }
663
664
665 #ifdef CONFIG_USERSPACE
z_stack_is_user_capable(k_thread_stack_t * stack)666 bool z_stack_is_user_capable(k_thread_stack_t *stack)
667 {
668 return z_object_find(stack) != NULL;
669 }
670
z_vrfy_k_thread_create(struct k_thread * new_thread,k_thread_stack_t * stack,size_t stack_size,k_thread_entry_t entry,void * p1,void * p2,void * p3,int prio,uint32_t options,k_timeout_t delay)671 k_tid_t z_vrfy_k_thread_create(struct k_thread *new_thread,
672 k_thread_stack_t *stack,
673 size_t stack_size, k_thread_entry_t entry,
674 void *p1, void *p2, void *p3,
675 int prio, uint32_t options, k_timeout_t delay)
676 {
677 size_t total_size, stack_obj_size;
678 struct z_object *stack_object;
679
680 /* The thread and stack objects *must* be in an uninitialized state */
681 Z_OOPS(Z_SYSCALL_OBJ_NEVER_INIT(new_thread, K_OBJ_THREAD));
682
683 /* No need to check z_stack_is_user_capable(), it won't be in the
684 * object table if it isn't
685 */
686 stack_object = z_object_find(stack);
687 Z_OOPS(Z_SYSCALL_VERIFY_MSG(z_obj_validation_check(stack_object, stack,
688 K_OBJ_THREAD_STACK_ELEMENT,
689 _OBJ_INIT_FALSE) == 0,
690 "bad stack object"));
691
692 /* Verify that the stack size passed in is OK by computing the total
693 * size and comparing it with the size value in the object metadata
694 */
695 Z_OOPS(Z_SYSCALL_VERIFY_MSG(!size_add_overflow(K_THREAD_STACK_RESERVED,
696 stack_size, &total_size),
697 "stack size overflow (%zu+%zu)",
698 stack_size,
699 K_THREAD_STACK_RESERVED));
700
701 /* Testing less-than-or-equal since additional room may have been
702 * allocated for alignment constraints
703 */
704 #ifdef CONFIG_GEN_PRIV_STACKS
705 stack_obj_size = stack_object->data.stack_data->size;
706 #else
707 stack_obj_size = stack_object->data.stack_size;
708 #endif
709 Z_OOPS(Z_SYSCALL_VERIFY_MSG(total_size <= stack_obj_size,
710 "stack size %zu is too big, max is %zu",
711 total_size, stack_obj_size));
712
713 /* User threads may only create other user threads and they can't
714 * be marked as essential
715 */
716 Z_OOPS(Z_SYSCALL_VERIFY(options & K_USER));
717 Z_OOPS(Z_SYSCALL_VERIFY(!(options & K_ESSENTIAL)));
718
719 /* Check validity of prio argument; must be the same or worse priority
720 * than the caller
721 */
722 Z_OOPS(Z_SYSCALL_VERIFY(_is_valid_prio(prio, NULL)));
723 Z_OOPS(Z_SYSCALL_VERIFY(z_is_prio_lower_or_equal(prio,
724 _current->base.prio)));
725
726 z_setup_new_thread(new_thread, stack, stack_size,
727 entry, p1, p2, p3, prio, options, NULL);
728
729 if (!K_TIMEOUT_EQ(delay, K_FOREVER)) {
730 schedule_new_thread(new_thread, delay);
731 }
732
733 return new_thread;
734 }
735 #include <syscalls/k_thread_create_mrsh.c>
736 #endif /* CONFIG_USERSPACE */
737 #endif /* CONFIG_MULTITHREADING */
738
739 #ifdef CONFIG_MULTITHREADING
740 #ifdef CONFIG_USERSPACE
741
grant_static_access(void)742 static void grant_static_access(void)
743 {
744 STRUCT_SECTION_FOREACH(z_object_assignment, pos) {
745 for (int i = 0; pos->objects[i] != NULL; i++) {
746 k_object_access_grant(pos->objects[i],
747 pos->thread);
748 }
749 }
750 }
751 #endif /* CONFIG_USERSPACE */
752
z_init_static_threads(void)753 void z_init_static_threads(void)
754 {
755 _FOREACH_STATIC_THREAD(thread_data) {
756 z_setup_new_thread(
757 thread_data->init_thread,
758 thread_data->init_stack,
759 thread_data->init_stack_size,
760 thread_data->init_entry,
761 thread_data->init_p1,
762 thread_data->init_p2,
763 thread_data->init_p3,
764 thread_data->init_prio,
765 thread_data->init_options,
766 thread_data->init_name);
767
768 thread_data->init_thread->init_data = thread_data;
769 }
770
771 #ifdef CONFIG_USERSPACE
772 grant_static_access();
773 #endif
774
775 /*
776 * Non-legacy static threads may be started immediately or
777 * after a previously specified delay. Even though the
778 * scheduler is locked, ticks can still be delivered and
779 * processed. Take a sched lock to prevent them from running
780 * until they are all started.
781 *
782 * Note that static threads defined using the legacy API have a
783 * delay of K_FOREVER.
784 */
785 k_sched_lock();
786 _FOREACH_STATIC_THREAD(thread_data) {
787 if (thread_data->init_delay != K_TICKS_FOREVER) {
788 schedule_new_thread(thread_data->init_thread,
789 K_MSEC(thread_data->init_delay));
790 }
791 }
792 k_sched_unlock();
793 }
794 #endif
795
z_init_thread_base(struct _thread_base * thread_base,int priority,uint32_t initial_state,unsigned int options)796 void z_init_thread_base(struct _thread_base *thread_base, int priority,
797 uint32_t initial_state, unsigned int options)
798 {
799 /* k_q_node is initialized upon first insertion in a list */
800 thread_base->pended_on = NULL;
801 thread_base->user_options = (uint8_t)options;
802 thread_base->thread_state = (uint8_t)initial_state;
803
804 thread_base->prio = priority;
805
806 thread_base->sched_locked = 0U;
807
808 #ifdef CONFIG_SMP
809 thread_base->is_idle = 0;
810 #endif
811
812 #ifdef CONFIG_TIMESLICE_PER_THREAD
813 thread_base->slice_ticks = 0;
814 thread_base->slice_expired = NULL;
815 #endif
816
817 /* swap_data does not need to be initialized */
818
819 z_init_thread_timeout(thread_base);
820 }
821
k_thread_user_mode_enter(k_thread_entry_t entry,void * p1,void * p2,void * p3)822 FUNC_NORETURN void k_thread_user_mode_enter(k_thread_entry_t entry,
823 void *p1, void *p2, void *p3)
824 {
825 SYS_PORT_TRACING_FUNC(k_thread, user_mode_enter);
826
827 _current->base.user_options |= K_USER;
828 z_thread_essential_clear();
829 #ifdef CONFIG_THREAD_MONITOR
830 _current->entry.pEntry = entry;
831 _current->entry.parameter1 = p1;
832 _current->entry.parameter2 = p2;
833 _current->entry.parameter3 = p3;
834 #endif
835 #ifdef CONFIG_USERSPACE
836 __ASSERT(z_stack_is_user_capable(_current->stack_obj),
837 "dropping to user mode with kernel-only stack object");
838 #ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA
839 memset(_current->userspace_local_data, 0,
840 sizeof(struct _thread_userspace_local_data));
841 #endif
842 #ifdef CONFIG_THREAD_LOCAL_STORAGE
843 arch_tls_stack_setup(_current,
844 (char *)(_current->stack_info.start +
845 _current->stack_info.size));
846 #endif
847 arch_user_mode_enter(entry, p1, p2, p3);
848 #else
849 /* XXX In this case we do not reset the stack */
850 z_thread_entry(entry, p1, p2, p3);
851 #endif
852 }
853
854 /* These spinlock assertion predicates are defined here because having
855 * them in spinlock.h is a giant header ordering headache.
856 */
857 #ifdef CONFIG_SPIN_VALIDATE
z_spin_lock_valid(struct k_spinlock * l)858 bool z_spin_lock_valid(struct k_spinlock *l)
859 {
860 uintptr_t thread_cpu = l->thread_cpu;
861
862 if (thread_cpu != 0U) {
863 if ((thread_cpu & 3U) == _current_cpu->id) {
864 return false;
865 }
866 }
867 return true;
868 }
869
z_spin_unlock_valid(struct k_spinlock * l)870 bool z_spin_unlock_valid(struct k_spinlock *l)
871 {
872 if (l->thread_cpu != (_current_cpu->id | (uintptr_t)_current)) {
873 return false;
874 }
875 l->thread_cpu = 0;
876 return true;
877 }
878
z_spin_lock_set_owner(struct k_spinlock * l)879 void z_spin_lock_set_owner(struct k_spinlock *l)
880 {
881 l->thread_cpu = _current_cpu->id | (uintptr_t)_current;
882 }
883
884 #ifdef CONFIG_KERNEL_COHERENCE
z_spin_lock_mem_coherent(struct k_spinlock * l)885 bool z_spin_lock_mem_coherent(struct k_spinlock *l)
886 {
887 return arch_mem_coherent((void *)l);
888 }
889 #endif /* CONFIG_KERNEL_COHERENCE */
890
891 #endif /* CONFIG_SPIN_VALIDATE */
892
z_impl_k_float_disable(struct k_thread * thread)893 int z_impl_k_float_disable(struct k_thread *thread)
894 {
895 #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
896 return arch_float_disable(thread);
897 #else
898 return -ENOTSUP;
899 #endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
900 }
901
z_impl_k_float_enable(struct k_thread * thread,unsigned int options)902 int z_impl_k_float_enable(struct k_thread *thread, unsigned int options)
903 {
904 #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
905 return arch_float_enable(thread, options);
906 #else
907 return -ENOTSUP;
908 #endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
909 }
910
911 #ifdef CONFIG_USERSPACE
z_vrfy_k_float_disable(struct k_thread * thread)912 static inline int z_vrfy_k_float_disable(struct k_thread *thread)
913 {
914 Z_OOPS(Z_SYSCALL_OBJ(thread, K_OBJ_THREAD));
915 return z_impl_k_float_disable(thread);
916 }
917 #include <syscalls/k_float_disable_mrsh.c>
918 #endif /* CONFIG_USERSPACE */
919
920 #ifdef CONFIG_IRQ_OFFLOAD
921 /* Make offload_sem visible outside under testing, in order to release
922 * it outside when error happened.
923 */
924 K_SEM_DEFINE(offload_sem, 1, 1);
925
irq_offload(irq_offload_routine_t routine,const void * parameter)926 void irq_offload(irq_offload_routine_t routine, const void *parameter)
927 {
928 #ifdef CONFIG_IRQ_OFFLOAD_NESTED
929 arch_irq_offload(routine, parameter);
930 #else
931 k_sem_take(&offload_sem, K_FOREVER);
932 arch_irq_offload(routine, parameter);
933 k_sem_give(&offload_sem);
934 #endif
935 }
936 #endif
937
938 #if defined(CONFIG_INIT_STACKS) && defined(CONFIG_THREAD_STACK_INFO)
939 #ifdef CONFIG_STACK_GROWS_UP
940 #error "Unsupported configuration for stack analysis"
941 #endif
942
z_stack_space_get(const uint8_t * stack_start,size_t size,size_t * unused_ptr)943 int z_stack_space_get(const uint8_t *stack_start, size_t size, size_t *unused_ptr)
944 {
945 size_t unused = 0;
946 const uint8_t *checked_stack = stack_start;
947 /* Take the address of any local variable as a shallow bound for the
948 * stack pointer. Addresses above it are guaranteed to be
949 * accessible.
950 */
951 const uint8_t *stack_pointer = (const uint8_t *)&stack_start;
952
953 /* If we are currently running on the stack being analyzed, some
954 * memory management hardware will generate an exception if we
955 * read unused stack memory.
956 *
957 * This never happens when invoked from user mode, as user mode
958 * will always run this function on the privilege elevation stack.
959 */
960 if ((stack_pointer > stack_start) && (stack_pointer <= (stack_start + size)) &&
961 IS_ENABLED(CONFIG_NO_UNUSED_STACK_INSPECTION)) {
962 /* TODO: We could add an arch_ API call to temporarily
963 * disable the stack checking in the CPU, but this would
964 * need to be properly managed wrt context switches/interrupts
965 */
966 return -ENOTSUP;
967 }
968
969 if (IS_ENABLED(CONFIG_STACK_SENTINEL)) {
970 /* First 4 bytes of the stack buffer reserved for the
971 * sentinel value, it won't be 0xAAAAAAAA for thread
972 * stacks.
973 *
974 * FIXME: thread->stack_info.start ought to reflect
975 * this!
976 */
977 checked_stack += 4;
978 size -= 4;
979 }
980
981 for (size_t i = 0; i < size; i++) {
982 if ((checked_stack[i]) == 0xaaU) {
983 unused++;
984 } else {
985 break;
986 }
987 }
988
989 *unused_ptr = unused;
990
991 return 0;
992 }
993
z_impl_k_thread_stack_space_get(const struct k_thread * thread,size_t * unused_ptr)994 int z_impl_k_thread_stack_space_get(const struct k_thread *thread,
995 size_t *unused_ptr)
996 {
997 return z_stack_space_get((const uint8_t *)thread->stack_info.start,
998 thread->stack_info.size, unused_ptr);
999 }
1000
1001 #ifdef CONFIG_USERSPACE
z_vrfy_k_thread_stack_space_get(const struct k_thread * thread,size_t * unused_ptr)1002 int z_vrfy_k_thread_stack_space_get(const struct k_thread *thread,
1003 size_t *unused_ptr)
1004 {
1005 size_t unused;
1006 int ret;
1007
1008 ret = Z_SYSCALL_OBJ(thread, K_OBJ_THREAD);
1009 CHECKIF(ret != 0) {
1010 return ret;
1011 }
1012
1013 ret = z_impl_k_thread_stack_space_get(thread, &unused);
1014 CHECKIF(ret != 0) {
1015 return ret;
1016 }
1017
1018 ret = z_user_to_copy(unused_ptr, &unused, sizeof(size_t));
1019 CHECKIF(ret != 0) {
1020 return ret;
1021 }
1022
1023 return 0;
1024 }
1025 #include <syscalls/k_thread_stack_space_get_mrsh.c>
1026 #endif /* CONFIG_USERSPACE */
1027 #endif /* CONFIG_INIT_STACKS && CONFIG_THREAD_STACK_INFO */
1028
1029 #ifdef CONFIG_USERSPACE
z_vrfy_k_thread_timeout_remaining_ticks(const struct k_thread * t)1030 static inline k_ticks_t z_vrfy_k_thread_timeout_remaining_ticks(
1031 const struct k_thread *t)
1032 {
1033 Z_OOPS(Z_SYSCALL_OBJ(t, K_OBJ_THREAD));
1034 return z_impl_k_thread_timeout_remaining_ticks(t);
1035 }
1036 #include <syscalls/k_thread_timeout_remaining_ticks_mrsh.c>
1037
z_vrfy_k_thread_timeout_expires_ticks(const struct k_thread * t)1038 static inline k_ticks_t z_vrfy_k_thread_timeout_expires_ticks(
1039 const struct k_thread *t)
1040 {
1041 Z_OOPS(Z_SYSCALL_OBJ(t, K_OBJ_THREAD));
1042 return z_impl_k_thread_timeout_expires_ticks(t);
1043 }
1044 #include <syscalls/k_thread_timeout_expires_ticks_mrsh.c>
1045 #endif
1046
1047 #ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING
z_thread_mark_switched_in(void)1048 void z_thread_mark_switched_in(void)
1049 {
1050 #if defined(CONFIG_SCHED_THREAD_USAGE) && !defined(CONFIG_USE_SWITCH)
1051 z_sched_usage_start(_current);
1052 #endif
1053
1054 #ifdef CONFIG_TRACING
1055 SYS_PORT_TRACING_FUNC(k_thread, switched_in);
1056 #endif
1057 }
1058
z_thread_mark_switched_out(void)1059 void z_thread_mark_switched_out(void)
1060 {
1061 #if defined(CONFIG_SCHED_THREAD_USAGE) && !defined(CONFIG_USE_SWITCH)
1062 z_sched_usage_stop();
1063 #endif
1064
1065 #ifdef CONFIG_TRACING
1066 #ifdef CONFIG_THREAD_LOCAL_STORAGE
1067 /* Dummy thread won't have TLS set up to run arbitrary code */
1068 if (!_current_cpu->current ||
1069 (_current_cpu->current->base.thread_state & _THREAD_DUMMY) != 0)
1070 return;
1071 #endif
1072 SYS_PORT_TRACING_FUNC(k_thread, switched_out);
1073 #endif
1074 }
1075 #endif /* CONFIG_INSTRUMENT_THREAD_SWITCHING */
1076
k_thread_runtime_stats_get(k_tid_t thread,k_thread_runtime_stats_t * stats)1077 int k_thread_runtime_stats_get(k_tid_t thread,
1078 k_thread_runtime_stats_t *stats)
1079 {
1080 if ((thread == NULL) || (stats == NULL)) {
1081 return -EINVAL;
1082 }
1083
1084 #ifdef CONFIG_SCHED_THREAD_USAGE
1085 z_sched_thread_usage(thread, stats);
1086 #else
1087 *stats = (k_thread_runtime_stats_t) {};
1088 #endif
1089
1090 return 0;
1091 }
1092
k_thread_runtime_stats_all_get(k_thread_runtime_stats_t * stats)1093 int k_thread_runtime_stats_all_get(k_thread_runtime_stats_t *stats)
1094 {
1095 #ifdef CONFIG_SCHED_THREAD_USAGE_ALL
1096 k_thread_runtime_stats_t tmp_stats;
1097 #endif
1098
1099 if (stats == NULL) {
1100 return -EINVAL;
1101 }
1102
1103 *stats = (k_thread_runtime_stats_t) {};
1104
1105 #ifdef CONFIG_SCHED_THREAD_USAGE_ALL
1106 /* Retrieve the usage stats for each core and amalgamate them. */
1107
1108 unsigned int num_cpus = arch_num_cpus();
1109
1110 for (uint8_t i = 0; i < num_cpus; i++) {
1111 z_sched_cpu_usage(i, &tmp_stats);
1112
1113 stats->execution_cycles += tmp_stats.execution_cycles;
1114 stats->total_cycles += tmp_stats.total_cycles;
1115 #ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
1116 stats->current_cycles += tmp_stats.current_cycles;
1117 stats->peak_cycles += tmp_stats.peak_cycles;
1118 stats->average_cycles += tmp_stats.average_cycles;
1119 #endif
1120 stats->idle_cycles += tmp_stats.idle_cycles;
1121 }
1122 #endif
1123
1124 return 0;
1125 }
1126