1 /*
2 * Copyright (c) 2010-2014 Wind River Systems, Inc.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 /**
8 * @file
9 * @brief Kernel thread support
10 *
11 * This module provides general purpose thread support.
12 */
13
14 #include <kernel.h>
15 #include <spinlock.h>
16 #include <sys/math_extras.h>
17 #include <sys_clock.h>
18 #include <ksched.h>
19 #include <wait_q.h>
20 #include <syscall_handler.h>
21 #include <kernel_internal.h>
22 #include <kswap.h>
23 #include <init.h>
24 #include <tracing/tracing.h>
25 #include <string.h>
26 #include <stdbool.h>
27 #include <irq_offload.h>
28 #include <sys/check.h>
29 #include <random/rand32.h>
30 #include <sys/atomic.h>
31 #include <logging/log.h>
32 LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
33
34 #ifdef CONFIG_THREAD_RUNTIME_STATS
35 k_thread_runtime_stats_t threads_runtime_stats;
36 #endif
37
38 #ifdef CONFIG_THREAD_MONITOR
39 /* This lock protects the linked list of active threads; i.e. the
40 * initial _kernel.threads pointer and the linked list made up of
41 * thread->next_thread (until NULL)
42 */
43 static struct k_spinlock z_thread_monitor_lock;
44 #endif /* CONFIG_THREAD_MONITOR */
45
46 #define _FOREACH_STATIC_THREAD(thread_data) \
47 STRUCT_SECTION_FOREACH(_static_thread_data, thread_data)
48
k_thread_foreach(k_thread_user_cb_t user_cb,void * user_data)49 void k_thread_foreach(k_thread_user_cb_t user_cb, void *user_data)
50 {
51 #if defined(CONFIG_THREAD_MONITOR)
52 struct k_thread *thread;
53 k_spinlock_key_t key;
54
55 __ASSERT(user_cb != NULL, "user_cb can not be NULL");
56
57 /*
58 * Lock is needed to make sure that the _kernel.threads is not being
59 * modified by the user_cb either directly or indirectly.
60 * The indirect ways are through calling k_thread_create and
61 * k_thread_abort from user_cb.
62 */
63 key = k_spin_lock(&z_thread_monitor_lock);
64
65 SYS_PORT_TRACING_FUNC_ENTER(k_thread, foreach);
66
67 for (thread = _kernel.threads; thread; thread = thread->next_thread) {
68 user_cb(thread, user_data);
69 }
70
71 SYS_PORT_TRACING_FUNC_EXIT(k_thread, foreach);
72
73 k_spin_unlock(&z_thread_monitor_lock, key);
74 #endif
75 }
76
k_thread_foreach_unlocked(k_thread_user_cb_t user_cb,void * user_data)77 void k_thread_foreach_unlocked(k_thread_user_cb_t user_cb, void *user_data)
78 {
79 #if defined(CONFIG_THREAD_MONITOR)
80 struct k_thread *thread;
81 k_spinlock_key_t key;
82
83 __ASSERT(user_cb != NULL, "user_cb can not be NULL");
84
85 key = k_spin_lock(&z_thread_monitor_lock);
86
87 SYS_PORT_TRACING_FUNC_ENTER(k_thread, foreach_unlocked);
88
89 for (thread = _kernel.threads; thread; thread = thread->next_thread) {
90 k_spin_unlock(&z_thread_monitor_lock, key);
91 user_cb(thread, user_data);
92 key = k_spin_lock(&z_thread_monitor_lock);
93 }
94
95 SYS_PORT_TRACING_FUNC_EXIT(k_thread, foreach_unlocked);
96
97 k_spin_unlock(&z_thread_monitor_lock, key);
98 #endif
99 }
100
k_is_in_isr(void)101 bool k_is_in_isr(void)
102 {
103 return arch_is_in_isr();
104 }
105
106 /*
107 * This function tags the current thread as essential to system operation.
108 * Exceptions raised by this thread will be treated as a fatal system error.
109 */
z_thread_essential_set(void)110 void z_thread_essential_set(void)
111 {
112 _current->base.user_options |= K_ESSENTIAL;
113 }
114
115 /*
116 * This function tags the current thread as not essential to system operation.
117 * Exceptions raised by this thread may be recoverable.
118 * (This is the default tag for a thread.)
119 */
z_thread_essential_clear(void)120 void z_thread_essential_clear(void)
121 {
122 _current->base.user_options &= ~K_ESSENTIAL;
123 }
124
125 /*
126 * This routine indicates if the current thread is an essential system thread.
127 *
128 * Returns true if current thread is essential, false if it is not.
129 */
z_is_thread_essential(void)130 bool z_is_thread_essential(void)
131 {
132 return (_current->base.user_options & K_ESSENTIAL) == K_ESSENTIAL;
133 }
134
135 #ifdef CONFIG_THREAD_CUSTOM_DATA
z_impl_k_thread_custom_data_set(void * value)136 void z_impl_k_thread_custom_data_set(void *value)
137 {
138 _current->custom_data = value;
139 }
140
141 #ifdef CONFIG_USERSPACE
z_vrfy_k_thread_custom_data_set(void * data)142 static inline void z_vrfy_k_thread_custom_data_set(void *data)
143 {
144 z_impl_k_thread_custom_data_set(data);
145 }
146 #include <syscalls/k_thread_custom_data_set_mrsh.c>
147 #endif
148
z_impl_k_thread_custom_data_get(void)149 void *z_impl_k_thread_custom_data_get(void)
150 {
151 return _current->custom_data;
152 }
153
154 #ifdef CONFIG_USERSPACE
z_vrfy_k_thread_custom_data_get(void)155 static inline void *z_vrfy_k_thread_custom_data_get(void)
156 {
157 return z_impl_k_thread_custom_data_get();
158 }
159 #include <syscalls/k_thread_custom_data_get_mrsh.c>
160
161 #endif /* CONFIG_USERSPACE */
162 #endif /* CONFIG_THREAD_CUSTOM_DATA */
163
164 #if defined(CONFIG_THREAD_MONITOR)
165 /*
166 * Remove a thread from the kernel's list of active threads.
167 */
z_thread_monitor_exit(struct k_thread * thread)168 void z_thread_monitor_exit(struct k_thread *thread)
169 {
170 k_spinlock_key_t key = k_spin_lock(&z_thread_monitor_lock);
171
172 if (thread == _kernel.threads) {
173 _kernel.threads = _kernel.threads->next_thread;
174 } else {
175 struct k_thread *prev_thread;
176
177 prev_thread = _kernel.threads;
178 while ((prev_thread != NULL) &&
179 (thread != prev_thread->next_thread)) {
180 prev_thread = prev_thread->next_thread;
181 }
182 if (prev_thread != NULL) {
183 prev_thread->next_thread = thread->next_thread;
184 }
185 }
186
187 k_spin_unlock(&z_thread_monitor_lock, key);
188 }
189 #endif
190
z_impl_k_thread_name_set(struct k_thread * thread,const char * value)191 int z_impl_k_thread_name_set(struct k_thread *thread, const char *value)
192 {
193 #ifdef CONFIG_THREAD_NAME
194 if (thread == NULL) {
195 thread = _current;
196 }
197
198 strncpy(thread->name, value, CONFIG_THREAD_MAX_NAME_LEN);
199 thread->name[CONFIG_THREAD_MAX_NAME_LEN - 1] = '\0';
200
201 SYS_PORT_TRACING_OBJ_FUNC(k_thread, name_set, thread, 0);
202
203 return 0;
204 #else
205 ARG_UNUSED(thread);
206 ARG_UNUSED(value);
207
208 SYS_PORT_TRACING_OBJ_FUNC(k_thread, name_set, thread, -ENOSYS);
209
210 return -ENOSYS;
211 #endif /* CONFIG_THREAD_NAME */
212 }
213
214 #ifdef CONFIG_USERSPACE
z_vrfy_k_thread_name_set(struct k_thread * thread,const char * str)215 static inline int z_vrfy_k_thread_name_set(struct k_thread *thread, const char *str)
216 {
217 #ifdef CONFIG_THREAD_NAME
218 char name[CONFIG_THREAD_MAX_NAME_LEN];
219
220 if (thread != NULL) {
221 if (Z_SYSCALL_OBJ(thread, K_OBJ_THREAD) != 0) {
222 return -EINVAL;
223 }
224 }
225
226 /* In theory we could copy directly into thread->name, but
227 * the current z_vrfy / z_impl split does not provide a
228 * means of doing so.
229 */
230 if (z_user_string_copy(name, (char *)str, sizeof(name)) != 0) {
231 return -EFAULT;
232 }
233
234 return z_impl_k_thread_name_set(thread, name);
235 #else
236 return -ENOSYS;
237 #endif /* CONFIG_THREAD_NAME */
238 }
239 #include <syscalls/k_thread_name_set_mrsh.c>
240 #endif /* CONFIG_USERSPACE */
241
k_thread_name_get(struct k_thread * thread)242 const char *k_thread_name_get(struct k_thread *thread)
243 {
244 #ifdef CONFIG_THREAD_NAME
245 return (const char *)thread->name;
246 #else
247 ARG_UNUSED(thread);
248 return NULL;
249 #endif /* CONFIG_THREAD_NAME */
250 }
251
z_impl_k_thread_name_copy(k_tid_t thread,char * buf,size_t size)252 int z_impl_k_thread_name_copy(k_tid_t thread, char *buf, size_t size)
253 {
254 #ifdef CONFIG_THREAD_NAME
255 strncpy(buf, thread->name, size);
256 return 0;
257 #else
258 ARG_UNUSED(thread);
259 ARG_UNUSED(buf);
260 ARG_UNUSED(size);
261 return -ENOSYS;
262 #endif /* CONFIG_THREAD_NAME */
263 }
264
k_thread_state_str(k_tid_t thread_id)265 const char *k_thread_state_str(k_tid_t thread_id)
266 {
267 switch (thread_id->base.thread_state) {
268 case 0:
269 return "";
270 case _THREAD_DUMMY:
271 return "dummy";
272 case _THREAD_PENDING:
273 return "pending";
274 case _THREAD_PRESTART:
275 return "prestart";
276 case _THREAD_DEAD:
277 return "dead";
278 case _THREAD_SUSPENDED:
279 return "suspended";
280 case _THREAD_ABORTING:
281 return "aborting";
282 case _THREAD_QUEUED:
283 return "queued";
284 default:
285 /* Add a break, some day when another case gets added at the end,
286 * this bit of defensive programming will be useful
287 */
288 break;
289 }
290 return "unknown";
291 }
292
293 #ifdef CONFIG_USERSPACE
z_vrfy_k_thread_name_copy(k_tid_t thread,char * buf,size_t size)294 static inline int z_vrfy_k_thread_name_copy(k_tid_t thread,
295 char *buf, size_t size)
296 {
297 #ifdef CONFIG_THREAD_NAME
298 size_t len;
299 struct z_object *ko = z_object_find(thread);
300
301 /* Special case: we allow reading the names of initialized threads
302 * even if we don't have permission on them
303 */
304 if (thread == NULL || ko->type != K_OBJ_THREAD ||
305 (ko->flags & K_OBJ_FLAG_INITIALIZED) == 0) {
306 return -EINVAL;
307 }
308 if (Z_SYSCALL_MEMORY_WRITE(buf, size) != 0) {
309 return -EFAULT;
310 }
311 len = strlen(thread->name);
312 if (len + 1 > size) {
313 return -ENOSPC;
314 }
315
316 return z_user_to_copy((void *)buf, thread->name, len + 1);
317 #else
318 ARG_UNUSED(thread);
319 ARG_UNUSED(buf);
320 ARG_UNUSED(size);
321 return -ENOSYS;
322 #endif /* CONFIG_THREAD_NAME */
323 }
324 #include <syscalls/k_thread_name_copy_mrsh.c>
325 #endif /* CONFIG_USERSPACE */
326
327
328 #ifdef CONFIG_MULTITHREADING
329 #ifdef CONFIG_STACK_SENTINEL
330 /* Check that the stack sentinel is still present
331 *
332 * The stack sentinel feature writes a magic value to the lowest 4 bytes of
333 * the thread's stack when the thread is initialized. This value gets checked
334 * in a few places:
335 *
336 * 1) In k_yield() if the current thread is not swapped out
337 * 2) After servicing a non-nested interrupt
338 * 3) In z_swap(), check the sentinel in the outgoing thread
339 *
340 * Item 2 requires support in arch/ code.
341 *
342 * If the check fails, the thread will be terminated appropriately through
343 * the system fatal error handler.
344 */
z_check_stack_sentinel(void)345 void z_check_stack_sentinel(void)
346 {
347 uint32_t *stack;
348
349 if ((_current->base.thread_state & _THREAD_DUMMY) != 0) {
350 return;
351 }
352
353 stack = (uint32_t *)_current->stack_info.start;
354 if (*stack != STACK_SENTINEL) {
355 /* Restore it so further checks don't trigger this same error */
356 *stack = STACK_SENTINEL;
357 z_except_reason(K_ERR_STACK_CHK_FAIL);
358 }
359 }
360 #endif /* CONFIG_STACK_SENTINEL */
361
z_impl_k_thread_start(struct k_thread * thread)362 void z_impl_k_thread_start(struct k_thread *thread)
363 {
364 SYS_PORT_TRACING_OBJ_FUNC(k_thread, start, thread);
365
366 z_sched_start(thread);
367 }
368
369 #ifdef CONFIG_USERSPACE
z_vrfy_k_thread_start(struct k_thread * thread)370 static inline void z_vrfy_k_thread_start(struct k_thread *thread)
371 {
372 Z_OOPS(Z_SYSCALL_OBJ(thread, K_OBJ_THREAD));
373 return z_impl_k_thread_start(thread);
374 }
375 #include <syscalls/k_thread_start_mrsh.c>
376 #endif
377 #endif
378
379 #ifdef CONFIG_MULTITHREADING
schedule_new_thread(struct k_thread * thread,k_timeout_t delay)380 static void schedule_new_thread(struct k_thread *thread, k_timeout_t delay)
381 {
382 #ifdef CONFIG_SYS_CLOCK_EXISTS
383 if (K_TIMEOUT_EQ(delay, K_NO_WAIT)) {
384 k_thread_start(thread);
385 } else {
386 z_add_thread_timeout(thread, delay);
387 }
388 #else
389 ARG_UNUSED(delay);
390 k_thread_start(thread);
391 #endif
392 }
393 #endif
394
395 #if CONFIG_STACK_POINTER_RANDOM
396 int z_stack_adjust_initialized;
397
random_offset(size_t stack_size)398 static size_t random_offset(size_t stack_size)
399 {
400 size_t random_val;
401
402 if (!z_stack_adjust_initialized) {
403 z_early_boot_rand_get((uint8_t *)&random_val, sizeof(random_val));
404 } else {
405 sys_rand_get((uint8_t *)&random_val, sizeof(random_val));
406 }
407
408 /* Don't need to worry about alignment of the size here,
409 * arch_new_thread() is required to do it.
410 *
411 * FIXME: Not the best way to get a random number in a range.
412 * See #6493
413 */
414 const size_t fuzz = random_val % CONFIG_STACK_POINTER_RANDOM;
415
416 if (unlikely(fuzz * 2 > stack_size)) {
417 return 0;
418 }
419
420 return fuzz;
421 }
422 #if defined(CONFIG_STACK_GROWS_UP)
423 /* This is so rare not bothering for now */
424 #error "Stack pointer randomization not implemented for upward growing stacks"
425 #endif /* CONFIG_STACK_GROWS_UP */
426 #endif /* CONFIG_STACK_POINTER_RANDOM */
427
setup_thread_stack(struct k_thread * new_thread,k_thread_stack_t * stack,size_t stack_size)428 static char *setup_thread_stack(struct k_thread *new_thread,
429 k_thread_stack_t *stack, size_t stack_size)
430 {
431 size_t stack_obj_size, stack_buf_size;
432 char *stack_ptr, *stack_buf_start;
433 size_t delta = 0;
434
435 #ifdef CONFIG_USERSPACE
436 if (z_stack_is_user_capable(stack)) {
437 stack_obj_size = Z_THREAD_STACK_SIZE_ADJUST(stack_size);
438 stack_buf_start = Z_THREAD_STACK_BUFFER(stack);
439 stack_buf_size = stack_obj_size - K_THREAD_STACK_RESERVED;
440 } else
441 #endif
442 {
443 /* Object cannot host a user mode thread */
444 stack_obj_size = Z_KERNEL_STACK_SIZE_ADJUST(stack_size);
445 stack_buf_start = Z_KERNEL_STACK_BUFFER(stack);
446 stack_buf_size = stack_obj_size - K_KERNEL_STACK_RESERVED;
447 }
448
449 /* Initial stack pointer at the high end of the stack object, may
450 * be reduced later in this function by TLS or random offset
451 */
452 stack_ptr = (char *)stack + stack_obj_size;
453
454 LOG_DBG("stack %p for thread %p: obj_size=%zu buf_start=%p "
455 " buf_size %zu stack_ptr=%p",
456 stack, new_thread, stack_obj_size, stack_buf_start,
457 stack_buf_size, stack_ptr);
458
459 #ifdef CONFIG_INIT_STACKS
460 memset(stack_buf_start, 0xaa, stack_buf_size);
461 #endif
462 #ifdef CONFIG_STACK_SENTINEL
463 /* Put the stack sentinel at the lowest 4 bytes of the stack area.
464 * We periodically check that it's still present and kill the thread
465 * if it isn't.
466 */
467 *((uint32_t *)stack_buf_start) = STACK_SENTINEL;
468 #endif /* CONFIG_STACK_SENTINEL */
469 #ifdef CONFIG_THREAD_LOCAL_STORAGE
470 /* TLS is always last within the stack buffer */
471 delta += arch_tls_stack_setup(new_thread, stack_ptr);
472 #endif /* CONFIG_THREAD_LOCAL_STORAGE */
473 #ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA
474 size_t tls_size = sizeof(struct _thread_userspace_local_data);
475
476 /* reserve space on highest memory of stack buffer for local data */
477 delta += tls_size;
478 new_thread->userspace_local_data =
479 (struct _thread_userspace_local_data *)(stack_ptr - delta);
480 #endif
481 #if CONFIG_STACK_POINTER_RANDOM
482 delta += random_offset(stack_buf_size);
483 #endif
484 delta = ROUND_UP(delta, ARCH_STACK_PTR_ALIGN);
485 #ifdef CONFIG_THREAD_STACK_INFO
486 /* Initial values. Arches which implement MPU guards that "borrow"
487 * memory from the stack buffer (not tracked in K_THREAD_STACK_RESERVED)
488 * will need to appropriately update this.
489 *
490 * The bounds tracked here correspond to the area of the stack object
491 * that the thread can access, which includes TLS.
492 */
493 new_thread->stack_info.start = (uintptr_t)stack_buf_start;
494 new_thread->stack_info.size = stack_buf_size;
495 new_thread->stack_info.delta = delta;
496 #endif
497 stack_ptr -= delta;
498
499 return stack_ptr;
500 }
501
502 #define THREAD_COOKIE 0x1337C0D3
503
504 /*
505 * The provided stack_size value is presumed to be either the result of
506 * K_THREAD_STACK_SIZEOF(stack), or the size value passed to the instance
507 * of K_THREAD_STACK_DEFINE() which defined 'stack'.
508 */
z_setup_new_thread(struct k_thread * new_thread,k_thread_stack_t * stack,size_t stack_size,k_thread_entry_t entry,void * p1,void * p2,void * p3,int prio,uint32_t options,const char * name)509 char *z_setup_new_thread(struct k_thread *new_thread,
510 k_thread_stack_t *stack, size_t stack_size,
511 k_thread_entry_t entry,
512 void *p1, void *p2, void *p3,
513 int prio, uint32_t options, const char *name)
514 {
515 char *stack_ptr;
516
517 Z_ASSERT_VALID_PRIO(prio, entry);
518
519 #ifdef CONFIG_USERSPACE
520 __ASSERT((options & K_USER) == 0U || z_stack_is_user_capable(stack),
521 "user thread %p with kernel-only stack %p",
522 new_thread, stack);
523 z_object_init(new_thread);
524 z_object_init(stack);
525 new_thread->stack_obj = stack;
526 new_thread->syscall_frame = NULL;
527
528 /* Any given thread has access to itself */
529 k_object_access_grant(new_thread, new_thread);
530 #endif
531 z_waitq_init(&new_thread->join_queue);
532
533 /* Initialize various struct k_thread members */
534 z_init_thread_base(&new_thread->base, prio, _THREAD_PRESTART, options);
535 stack_ptr = setup_thread_stack(new_thread, stack, stack_size);
536
537 #ifdef CONFIG_KERNEL_COHERENCE
538 /* Check that the thread object is safe, but that the stack is
539 * still cached!
540 */
541 __ASSERT_NO_MSG(arch_mem_coherent(new_thread));
542 __ASSERT_NO_MSG(!arch_mem_coherent(stack));
543 #endif
544
545 arch_new_thread(new_thread, stack, stack_ptr, entry, p1, p2, p3);
546
547 /* static threads overwrite it afterwards with real value */
548 new_thread->init_data = NULL;
549
550 #ifdef CONFIG_USE_SWITCH
551 /* switch_handle must be non-null except when inside z_swap()
552 * for synchronization reasons. Historically some notional
553 * USE_SWITCH architectures have actually ignored the field
554 */
555 __ASSERT(new_thread->switch_handle != NULL,
556 "arch layer failed to initialize switch_handle");
557 #endif
558 #ifdef CONFIG_THREAD_CUSTOM_DATA
559 /* Initialize custom data field (value is opaque to kernel) */
560 new_thread->custom_data = NULL;
561 #endif
562 #ifdef CONFIG_THREAD_MONITOR
563 new_thread->entry.pEntry = entry;
564 new_thread->entry.parameter1 = p1;
565 new_thread->entry.parameter2 = p2;
566 new_thread->entry.parameter3 = p3;
567
568 k_spinlock_key_t key = k_spin_lock(&z_thread_monitor_lock);
569
570 new_thread->next_thread = _kernel.threads;
571 _kernel.threads = new_thread;
572 k_spin_unlock(&z_thread_monitor_lock, key);
573 #endif
574 #ifdef CONFIG_THREAD_NAME
575 if (name != NULL) {
576 strncpy(new_thread->name, name,
577 CONFIG_THREAD_MAX_NAME_LEN - 1);
578 /* Ensure NULL termination, truncate if longer */
579 new_thread->name[CONFIG_THREAD_MAX_NAME_LEN - 1] = '\0';
580 } else {
581 new_thread->name[0] = '\0';
582 }
583 #endif
584 #ifdef CONFIG_SCHED_CPU_MASK
585 new_thread->base.cpu_mask = -1;
586 #endif
587 #ifdef CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN
588 /* _current may be null if the dummy thread is not used */
589 if (!_current) {
590 new_thread->resource_pool = NULL;
591 return stack_ptr;
592 }
593 #endif
594 #ifdef CONFIG_USERSPACE
595 z_mem_domain_init_thread(new_thread);
596
597 if ((options & K_INHERIT_PERMS) != 0U) {
598 z_thread_perms_inherit(_current, new_thread);
599 }
600 #endif
601 #ifdef CONFIG_SCHED_DEADLINE
602 new_thread->base.prio_deadline = 0;
603 #endif
604 new_thread->resource_pool = _current->resource_pool;
605
606 SYS_PORT_TRACING_OBJ_FUNC(k_thread, create, new_thread);
607
608 #ifdef CONFIG_THREAD_RUNTIME_STATS
609 memset(&new_thread->rt_stats, 0, sizeof(new_thread->rt_stats));
610 #endif
611
612 return stack_ptr;
613 }
614
615 #ifdef CONFIG_MULTITHREADING
z_impl_k_thread_create(struct k_thread * new_thread,k_thread_stack_t * stack,size_t stack_size,k_thread_entry_t entry,void * p1,void * p2,void * p3,int prio,uint32_t options,k_timeout_t delay)616 k_tid_t z_impl_k_thread_create(struct k_thread *new_thread,
617 k_thread_stack_t *stack,
618 size_t stack_size, k_thread_entry_t entry,
619 void *p1, void *p2, void *p3,
620 int prio, uint32_t options, k_timeout_t delay)
621 {
622 __ASSERT(!arch_is_in_isr(), "Threads may not be created in ISRs");
623
624 z_setup_new_thread(new_thread, stack, stack_size, entry, p1, p2, p3,
625 prio, options, NULL);
626
627 if (!K_TIMEOUT_EQ(delay, K_FOREVER)) {
628 schedule_new_thread(new_thread, delay);
629 }
630
631 return new_thread;
632 }
633
634
635 #ifdef CONFIG_USERSPACE
z_stack_is_user_capable(k_thread_stack_t * stack)636 bool z_stack_is_user_capable(k_thread_stack_t *stack)
637 {
638 return z_object_find(stack) != NULL;
639 }
640
z_vrfy_k_thread_create(struct k_thread * new_thread,k_thread_stack_t * stack,size_t stack_size,k_thread_entry_t entry,void * p1,void * p2,void * p3,int prio,uint32_t options,k_timeout_t delay)641 k_tid_t z_vrfy_k_thread_create(struct k_thread *new_thread,
642 k_thread_stack_t *stack,
643 size_t stack_size, k_thread_entry_t entry,
644 void *p1, void *p2, void *p3,
645 int prio, uint32_t options, k_timeout_t delay)
646 {
647 size_t total_size, stack_obj_size;
648 struct z_object *stack_object;
649
650 /* The thread and stack objects *must* be in an uninitialized state */
651 Z_OOPS(Z_SYSCALL_OBJ_NEVER_INIT(new_thread, K_OBJ_THREAD));
652
653 /* No need to check z_stack_is_user_capable(), it won't be in the
654 * object table if it isn't
655 */
656 stack_object = z_object_find(stack);
657 Z_OOPS(Z_SYSCALL_VERIFY_MSG(z_obj_validation_check(stack_object, stack,
658 K_OBJ_THREAD_STACK_ELEMENT,
659 _OBJ_INIT_FALSE) == 0,
660 "bad stack object"));
661
662 /* Verify that the stack size passed in is OK by computing the total
663 * size and comparing it with the size value in the object metadata
664 */
665 Z_OOPS(Z_SYSCALL_VERIFY_MSG(!size_add_overflow(K_THREAD_STACK_RESERVED,
666 stack_size, &total_size),
667 "stack size overflow (%zu+%zu)",
668 stack_size,
669 K_THREAD_STACK_RESERVED));
670
671 /* Testing less-than-or-equal since additional room may have been
672 * allocated for alignment constraints
673 */
674 #ifdef CONFIG_GEN_PRIV_STACKS
675 stack_obj_size = stack_object->data.stack_data->size;
676 #else
677 stack_obj_size = stack_object->data.stack_size;
678 #endif
679 Z_OOPS(Z_SYSCALL_VERIFY_MSG(total_size <= stack_obj_size,
680 "stack size %zu is too big, max is %zu",
681 total_size, stack_obj_size));
682
683 /* User threads may only create other user threads and they can't
684 * be marked as essential
685 */
686 Z_OOPS(Z_SYSCALL_VERIFY(options & K_USER));
687 Z_OOPS(Z_SYSCALL_VERIFY(!(options & K_ESSENTIAL)));
688
689 /* Check validity of prio argument; must be the same or worse priority
690 * than the caller
691 */
692 Z_OOPS(Z_SYSCALL_VERIFY(_is_valid_prio(prio, NULL)));
693 Z_OOPS(Z_SYSCALL_VERIFY(z_is_prio_lower_or_equal(prio,
694 _current->base.prio)));
695
696 z_setup_new_thread(new_thread, stack, stack_size,
697 entry, p1, p2, p3, prio, options, NULL);
698
699 if (!K_TIMEOUT_EQ(delay, K_FOREVER)) {
700 schedule_new_thread(new_thread, delay);
701 }
702
703 return new_thread;
704 }
705 #include <syscalls/k_thread_create_mrsh.c>
706 #endif /* CONFIG_USERSPACE */
707 #endif /* CONFIG_MULTITHREADING */
708
709 #ifdef CONFIG_MULTITHREADING
710 #ifdef CONFIG_USERSPACE
711
grant_static_access(void)712 static void grant_static_access(void)
713 {
714 STRUCT_SECTION_FOREACH(z_object_assignment, pos) {
715 for (int i = 0; pos->objects[i] != NULL; i++) {
716 k_object_access_grant(pos->objects[i],
717 pos->thread);
718 }
719 }
720 }
721 #endif /* CONFIG_USERSPACE */
722
z_init_static_threads(void)723 void z_init_static_threads(void)
724 {
725 _FOREACH_STATIC_THREAD(thread_data) {
726 z_setup_new_thread(
727 thread_data->init_thread,
728 thread_data->init_stack,
729 thread_data->init_stack_size,
730 thread_data->init_entry,
731 thread_data->init_p1,
732 thread_data->init_p2,
733 thread_data->init_p3,
734 thread_data->init_prio,
735 thread_data->init_options,
736 thread_data->init_name);
737
738 thread_data->init_thread->init_data = thread_data;
739 }
740
741 #ifdef CONFIG_USERSPACE
742 grant_static_access();
743 #endif
744
745 /*
746 * Non-legacy static threads may be started immediately or
747 * after a previously specified delay. Even though the
748 * scheduler is locked, ticks can still be delivered and
749 * processed. Take a sched lock to prevent them from running
750 * until they are all started.
751 *
752 * Note that static threads defined using the legacy API have a
753 * delay of K_FOREVER.
754 */
755 k_sched_lock();
756 _FOREACH_STATIC_THREAD(thread_data) {
757 if (thread_data->init_delay != K_TICKS_FOREVER) {
758 schedule_new_thread(thread_data->init_thread,
759 K_MSEC(thread_data->init_delay));
760 }
761 }
762 k_sched_unlock();
763 }
764 #endif
765
z_init_thread_base(struct _thread_base * thread_base,int priority,uint32_t initial_state,unsigned int options)766 void z_init_thread_base(struct _thread_base *thread_base, int priority,
767 uint32_t initial_state, unsigned int options)
768 {
769 /* k_q_node is initialized upon first insertion in a list */
770 thread_base->pended_on = NULL;
771 thread_base->user_options = (uint8_t)options;
772 thread_base->thread_state = (uint8_t)initial_state;
773
774 thread_base->prio = priority;
775
776 thread_base->sched_locked = 0U;
777
778 #ifdef CONFIG_SMP
779 thread_base->is_idle = 0;
780 #endif
781
782 /* swap_data does not need to be initialized */
783
784 z_init_thread_timeout(thread_base);
785 }
786
k_thread_user_mode_enter(k_thread_entry_t entry,void * p1,void * p2,void * p3)787 FUNC_NORETURN void k_thread_user_mode_enter(k_thread_entry_t entry,
788 void *p1, void *p2, void *p3)
789 {
790 SYS_PORT_TRACING_FUNC(k_thread, user_mode_enter);
791
792 _current->base.user_options |= K_USER;
793 z_thread_essential_clear();
794 #ifdef CONFIG_THREAD_MONITOR
795 _current->entry.pEntry = entry;
796 _current->entry.parameter1 = p1;
797 _current->entry.parameter2 = p2;
798 _current->entry.parameter3 = p3;
799 #endif
800 #ifdef CONFIG_USERSPACE
801 __ASSERT(z_stack_is_user_capable(_current->stack_obj),
802 "dropping to user mode with kernel-only stack object");
803 #ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA
804 memset(_current->userspace_local_data, 0,
805 sizeof(struct _thread_userspace_local_data));
806 #endif
807 #ifdef CONFIG_THREAD_LOCAL_STORAGE
808 arch_tls_stack_setup(_current,
809 (char *)(_current->stack_info.start +
810 _current->stack_info.size));
811 #endif
812 arch_user_mode_enter(entry, p1, p2, p3);
813 #else
814 /* XXX In this case we do not reset the stack */
815 z_thread_entry(entry, p1, p2, p3);
816 #endif
817 }
818
819 /* These spinlock assertion predicates are defined here because having
820 * them in spinlock.h is a giant header ordering headache.
821 */
822 #ifdef CONFIG_SPIN_VALIDATE
z_spin_lock_valid(struct k_spinlock * l)823 bool z_spin_lock_valid(struct k_spinlock *l)
824 {
825 uintptr_t thread_cpu = l->thread_cpu;
826
827 if (thread_cpu != 0U) {
828 if ((thread_cpu & 3U) == _current_cpu->id) {
829 return false;
830 }
831 }
832 return true;
833 }
834
z_spin_unlock_valid(struct k_spinlock * l)835 bool z_spin_unlock_valid(struct k_spinlock *l)
836 {
837 if (l->thread_cpu != (_current_cpu->id | (uintptr_t)_current)) {
838 return false;
839 }
840 l->thread_cpu = 0;
841 return true;
842 }
843
z_spin_lock_set_owner(struct k_spinlock * l)844 void z_spin_lock_set_owner(struct k_spinlock *l)
845 {
846 l->thread_cpu = _current_cpu->id | (uintptr_t)_current;
847 }
848
849 #ifdef CONFIG_KERNEL_COHERENCE
z_spin_lock_mem_coherent(struct k_spinlock * l)850 bool z_spin_lock_mem_coherent(struct k_spinlock *l)
851 {
852 return arch_mem_coherent((void *)l);
853 }
854 #endif /* CONFIG_KERNEL_COHERENCE */
855
856 #endif /* CONFIG_SPIN_VALIDATE */
857
z_impl_k_float_disable(struct k_thread * thread)858 int z_impl_k_float_disable(struct k_thread *thread)
859 {
860 #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
861 return arch_float_disable(thread);
862 #else
863 return -ENOTSUP;
864 #endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
865 }
866
z_impl_k_float_enable(struct k_thread * thread,unsigned int options)867 int z_impl_k_float_enable(struct k_thread *thread, unsigned int options)
868 {
869 #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
870 return arch_float_enable(thread, options);
871 #else
872 return -ENOTSUP;
873 #endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
874 }
875
876 #ifdef CONFIG_USERSPACE
z_vrfy_k_float_disable(struct k_thread * thread)877 static inline int z_vrfy_k_float_disable(struct k_thread *thread)
878 {
879 Z_OOPS(Z_SYSCALL_OBJ(thread, K_OBJ_THREAD));
880 return z_impl_k_float_disable(thread);
881 }
882 #include <syscalls/k_float_disable_mrsh.c>
883 #endif /* CONFIG_USERSPACE */
884
885 #ifdef CONFIG_IRQ_OFFLOAD
886 /* Make offload_sem visible outside under testing, in order to release
887 * it outside when error happened.
888 */
889 K_SEM_DEFINE(offload_sem, 1, 1);
890
irq_offload(irq_offload_routine_t routine,const void * parameter)891 void irq_offload(irq_offload_routine_t routine, const void *parameter)
892 {
893 k_sem_take(&offload_sem, K_FOREVER);
894 arch_irq_offload(routine, parameter);
895 k_sem_give(&offload_sem);
896 }
897 #endif
898
899 #if defined(CONFIG_INIT_STACKS) && defined(CONFIG_THREAD_STACK_INFO)
900 #ifdef CONFIG_STACK_GROWS_UP
901 #error "Unsupported configuration for stack analysis"
902 #endif
903
z_impl_k_thread_stack_space_get(const struct k_thread * thread,size_t * unused_ptr)904 int z_impl_k_thread_stack_space_get(const struct k_thread *thread,
905 size_t *unused_ptr)
906 {
907 const uint8_t *start = (uint8_t *)thread->stack_info.start;
908 size_t size = thread->stack_info.size;
909 size_t unused = 0;
910 const uint8_t *checked_stack = start;
911 /* Take the address of any local variable as a shallow bound for the
912 * stack pointer. Addresses above it are guaranteed to be
913 * accessible.
914 */
915 const uint8_t *stack_pointer = (const uint8_t *)&start;
916
917 /* If we are currently running on the stack being analyzed, some
918 * memory management hardware will generate an exception if we
919 * read unused stack memory.
920 *
921 * This never happens when invoked from user mode, as user mode
922 * will always run this function on the privilege elevation stack.
923 */
924 if ((stack_pointer > start) && (stack_pointer <= (start + size)) &&
925 IS_ENABLED(CONFIG_NO_UNUSED_STACK_INSPECTION)) {
926 /* TODO: We could add an arch_ API call to temporarily
927 * disable the stack checking in the CPU, but this would
928 * need to be properly managed wrt context switches/interrupts
929 */
930 return -ENOTSUP;
931 }
932
933 if (IS_ENABLED(CONFIG_STACK_SENTINEL)) {
934 /* First 4 bytes of the stack buffer reserved for the
935 * sentinel value, it won't be 0xAAAAAAAA for thread
936 * stacks.
937 *
938 * FIXME: thread->stack_info.start ought to reflect
939 * this!
940 */
941 checked_stack += 4;
942 size -= 4;
943 }
944
945 for (size_t i = 0; i < size; i++) {
946 if ((checked_stack[i]) == 0xaaU) {
947 unused++;
948 } else {
949 break;
950 }
951 }
952
953 *unused_ptr = unused;
954
955 return 0;
956 }
957
958 #ifdef CONFIG_USERSPACE
z_vrfy_k_thread_stack_space_get(const struct k_thread * thread,size_t * unused_ptr)959 int z_vrfy_k_thread_stack_space_get(const struct k_thread *thread,
960 size_t *unused_ptr)
961 {
962 size_t unused;
963 int ret;
964
965 ret = Z_SYSCALL_OBJ(thread, K_OBJ_THREAD);
966 CHECKIF(ret != 0) {
967 return ret;
968 }
969
970 ret = z_impl_k_thread_stack_space_get(thread, &unused);
971 CHECKIF(ret != 0) {
972 return ret;
973 }
974
975 ret = z_user_to_copy(unused_ptr, &unused, sizeof(size_t));
976 CHECKIF(ret != 0) {
977 return ret;
978 }
979
980 return 0;
981 }
982 #include <syscalls/k_thread_stack_space_get_mrsh.c>
983 #endif /* CONFIG_USERSPACE */
984 #endif /* CONFIG_INIT_STACKS && CONFIG_THREAD_STACK_INFO */
985
986 #ifdef CONFIG_USERSPACE
z_vrfy_k_thread_timeout_remaining_ticks(const struct k_thread * t)987 static inline k_ticks_t z_vrfy_k_thread_timeout_remaining_ticks(
988 const struct k_thread *t)
989 {
990 Z_OOPS(Z_SYSCALL_OBJ(t, K_OBJ_THREAD));
991 return z_impl_k_thread_timeout_remaining_ticks(t);
992 }
993 #include <syscalls/k_thread_timeout_remaining_ticks_mrsh.c>
994
z_vrfy_k_thread_timeout_expires_ticks(const struct k_thread * t)995 static inline k_ticks_t z_vrfy_k_thread_timeout_expires_ticks(
996 const struct k_thread *t)
997 {
998 Z_OOPS(Z_SYSCALL_OBJ(t, K_OBJ_THREAD));
999 return z_impl_k_thread_timeout_expires_ticks(t);
1000 }
1001 #include <syscalls/k_thread_timeout_expires_ticks_mrsh.c>
1002 #endif
1003
1004 #ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING
z_thread_mark_switched_in(void)1005 void z_thread_mark_switched_in(void)
1006 {
1007 #ifdef CONFIG_TRACING
1008 SYS_PORT_TRACING_FUNC(k_thread, switched_in);
1009 #endif
1010
1011 #ifdef CONFIG_THREAD_RUNTIME_STATS
1012 struct k_thread *thread;
1013
1014 thread = z_current_get();
1015 #ifdef CONFIG_THREAD_RUNTIME_STATS_USE_TIMING_FUNCTIONS
1016 thread->rt_stats.last_switched_in = timing_counter_get();
1017 #else
1018 thread->rt_stats.last_switched_in = k_cycle_get_32();
1019 #endif /* CONFIG_THREAD_RUNTIME_STATS_USE_TIMING_FUNCTIONS */
1020
1021 #endif /* CONFIG_THREAD_RUNTIME_STATS */
1022 }
1023
z_thread_mark_switched_out(void)1024 void z_thread_mark_switched_out(void)
1025 {
1026 #ifdef CONFIG_THREAD_RUNTIME_STATS
1027 #ifdef CONFIG_THREAD_RUNTIME_STATS_USE_TIMING_FUNCTIONS
1028 timing_t now;
1029 #else
1030 uint32_t now;
1031 #endif /* CONFIG_THREAD_RUNTIME_STATS_USE_TIMING_FUNCTIONS */
1032
1033 uint64_t diff;
1034 struct k_thread *thread;
1035
1036 thread = z_current_get();
1037
1038 if (unlikely(thread->rt_stats.last_switched_in == 0)) {
1039 /* Has not run before */
1040 return;
1041 }
1042
1043 if (unlikely(thread->base.thread_state == _THREAD_DUMMY)) {
1044 /* dummy thread has no stat struct */
1045 return;
1046 }
1047
1048 #ifdef CONFIG_THREAD_RUNTIME_STATS_USE_TIMING_FUNCTIONS
1049 now = timing_counter_get();
1050 diff = timing_cycles_get(&thread->rt_stats.last_switched_in, &now);
1051 #else
1052 now = k_cycle_get_32();
1053 diff = (uint64_t)(now - thread->rt_stats.last_switched_in);
1054 thread->rt_stats.last_switched_in = 0;
1055 #endif /* CONFIG_THREAD_RUNTIME_STATS_USE_TIMING_FUNCTIONS */
1056
1057 thread->rt_stats.stats.execution_cycles += diff;
1058
1059 threads_runtime_stats.execution_cycles += diff;
1060 #endif /* CONFIG_THREAD_RUNTIME_STATS */
1061
1062 #ifdef CONFIG_TRACING
1063 SYS_PORT_TRACING_FUNC(k_thread, switched_out);
1064 #endif
1065 }
1066
1067 #ifdef CONFIG_THREAD_RUNTIME_STATS
k_thread_runtime_stats_get(k_tid_t thread,k_thread_runtime_stats_t * stats)1068 int k_thread_runtime_stats_get(k_tid_t thread,
1069 k_thread_runtime_stats_t *stats)
1070 {
1071 if ((thread == NULL) || (stats == NULL)) {
1072 return -EINVAL;
1073 }
1074
1075 (void)memcpy(stats, &thread->rt_stats.stats,
1076 sizeof(thread->rt_stats.stats));
1077
1078 return 0;
1079 }
1080
k_thread_runtime_stats_all_get(k_thread_runtime_stats_t * stats)1081 int k_thread_runtime_stats_all_get(k_thread_runtime_stats_t *stats)
1082 {
1083 if (stats == NULL) {
1084 return -EINVAL;
1085 }
1086
1087 (void)memcpy(stats, &threads_runtime_stats,
1088 sizeof(threads_runtime_stats));
1089
1090 return 0;
1091 }
1092 #endif /* CONFIG_THREAD_RUNTIME_STATS */
1093
1094 #endif /* CONFIG_INSTRUMENT_THREAD_SWITCHING */
1095