1 /*
2  * Copyright (c) 2022, Meta
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include "kernel_internal.h"
8 
9 #include <zephyr/kernel.h>
10 #include <ksched.h>
11 #include <zephyr/kernel/thread_stack.h>
12 #include <zephyr/logging/log.h>
13 #include <zephyr/sys/bitarray.h>
14 #include <zephyr/sys/kobject.h>
15 #include <zephyr/internal/syscall_handler.h>
16 
17 LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
18 
19 #if CONFIG_DYNAMIC_THREAD_POOL_SIZE > 0
20 #define BA_SIZE CONFIG_DYNAMIC_THREAD_POOL_SIZE
21 #else
22 #define BA_SIZE 1
23 #endif /* CONFIG_DYNAMIC_THREAD_POOL_SIZE > 0 */
24 
25 struct dyn_cb_data {
26 	k_tid_t tid;
27 	k_thread_stack_t *stack;
28 };
29 
30 static K_THREAD_STACK_ARRAY_DEFINE(dynamic_stack, CONFIG_DYNAMIC_THREAD_POOL_SIZE,
31 				   CONFIG_DYNAMIC_THREAD_STACK_SIZE);
32 SYS_BITARRAY_DEFINE_STATIC(dynamic_ba, BA_SIZE);
33 
z_thread_stack_alloc_pool(size_t size)34 static k_thread_stack_t *z_thread_stack_alloc_pool(size_t size)
35 {
36 	int rv;
37 	size_t offset;
38 	k_thread_stack_t *stack;
39 
40 	if (size > CONFIG_DYNAMIC_THREAD_STACK_SIZE) {
41 		LOG_DBG("stack size %zu is > pool stack size %d", size,
42 			CONFIG_DYNAMIC_THREAD_STACK_SIZE);
43 		return NULL;
44 	}
45 
46 	rv = sys_bitarray_alloc(&dynamic_ba, 1, &offset);
47 	if (rv < 0) {
48 		LOG_DBG("unable to allocate stack from pool");
49 		return NULL;
50 	}
51 
52 	__ASSERT_NO_MSG(offset < CONFIG_DYNAMIC_THREAD_POOL_SIZE);
53 
54 	stack = (k_thread_stack_t *)&dynamic_stack[offset];
55 
56 	return stack;
57 }
58 
z_thread_stack_alloc_dyn(size_t size,int flags)59 static k_thread_stack_t *z_thread_stack_alloc_dyn(size_t size, int flags)
60 {
61 	if ((flags & K_USER) == K_USER) {
62 #ifdef CONFIG_DYNAMIC_OBJECTS
63 		return k_object_alloc_size(K_OBJ_THREAD_STACK_ELEMENT, size);
64 #else
65 		/* Dynamic user stack needs a kobject, so if this option is not
66 		 * enabled we can't proceed.
67 		 */
68 		return NULL;
69 #endif /* CONFIG_DYNAMIC_OBJECTS */
70 	}
71 
72 	return z_thread_aligned_alloc(Z_KERNEL_STACK_OBJ_ALIGN, K_KERNEL_STACK_LEN(size));
73 }
74 
z_impl_k_thread_stack_alloc(size_t size,int flags)75 k_thread_stack_t *z_impl_k_thread_stack_alloc(size_t size, int flags)
76 {
77 	k_thread_stack_t *stack = NULL;
78 
79 	if (IS_ENABLED(CONFIG_DYNAMIC_THREAD_PREFER_ALLOC)) {
80 		stack = z_thread_stack_alloc_dyn(size, flags);
81 		if (stack == NULL && CONFIG_DYNAMIC_THREAD_POOL_SIZE > 0) {
82 			stack = z_thread_stack_alloc_pool(size);
83 		}
84 	} else if (IS_ENABLED(CONFIG_DYNAMIC_THREAD_PREFER_POOL)) {
85 		if (CONFIG_DYNAMIC_THREAD_POOL_SIZE > 0) {
86 			stack = z_thread_stack_alloc_pool(size);
87 		}
88 
89 		if ((stack == NULL) && IS_ENABLED(CONFIG_DYNAMIC_THREAD_ALLOC)) {
90 			stack = z_thread_stack_alloc_dyn(size, flags);
91 		}
92 	}
93 
94 	return stack;
95 }
96 
97 #ifdef CONFIG_USERSPACE
z_vrfy_k_thread_stack_alloc(size_t size,int flags)98 static inline k_thread_stack_t *z_vrfy_k_thread_stack_alloc(size_t size, int flags)
99 {
100 	return z_impl_k_thread_stack_alloc(size, flags);
101 }
102 #include <zephyr/syscalls/k_thread_stack_alloc_mrsh.c>
103 #endif /* CONFIG_USERSPACE */
104 
dyn_cb(const struct k_thread * thread,void * user_data)105 static void dyn_cb(const struct k_thread *thread, void *user_data)
106 {
107 	struct dyn_cb_data *const data = (struct dyn_cb_data *)user_data;
108 
109 	if (data->stack == (k_thread_stack_t *)thread->stack_info.start) {
110 		__ASSERT(data->tid == NULL, "stack %p is associated with more than one thread!",
111 			 (void *)thread->stack_info.start);
112 		data->tid = (k_tid_t)thread;
113 	}
114 }
115 
z_impl_k_thread_stack_free(k_thread_stack_t * stack)116 int z_impl_k_thread_stack_free(k_thread_stack_t *stack)
117 {
118 	struct dyn_cb_data data = {.stack = stack};
119 
120 	/* Get a possible tid associated with stack */
121 	k_thread_foreach(dyn_cb, &data);
122 
123 	if (data.tid != NULL) {
124 		if (!(z_is_thread_state_set(data.tid, _THREAD_DUMMY) ||
125 		      z_is_thread_state_set(data.tid, _THREAD_DEAD))) {
126 			LOG_ERR("tid %p is in use!", data.tid);
127 			return -EBUSY;
128 		}
129 	}
130 
131 	if (CONFIG_DYNAMIC_THREAD_POOL_SIZE > 0) {
132 		if (IS_ARRAY_ELEMENT(dynamic_stack, stack)) {
133 			if (sys_bitarray_free(&dynamic_ba, 1, ARRAY_INDEX(dynamic_stack, stack))) {
134 				LOG_ERR("stack %p is not allocated!", stack);
135 				return -EINVAL;
136 			}
137 
138 			return 0;
139 		}
140 	}
141 
142 	if (IS_ENABLED(CONFIG_DYNAMIC_THREAD_ALLOC)) {
143 #ifdef CONFIG_USERSPACE
144 		if (k_object_find(stack)) {
145 			k_object_free(stack);
146 		} else {
147 			k_free(stack);
148 		}
149 #else
150 		k_free(stack);
151 #endif /* CONFIG_USERSPACE */
152 	} else {
153 		LOG_DBG("Invalid stack %p", stack);
154 		return -EINVAL;
155 	}
156 
157 	return 0;
158 }
159 
160 #ifdef CONFIG_USERSPACE
z_vrfy_k_thread_stack_free(k_thread_stack_t * stack)161 static inline int z_vrfy_k_thread_stack_free(k_thread_stack_t *stack)
162 {
163 	/* The thread stack object must not be in initialized state.
164 	 *
165 	 * Thread stack objects are initialized when the thread is created
166 	 * and de-initialized when the thread is destroyed. Since we can't
167 	 * free a stack that is in use, we have to check that the caller
168 	 * has access to the object but that it is not in use anymore.
169 	 */
170 	K_OOPS(K_SYSCALL_OBJ_NEVER_INIT(stack, K_OBJ_THREAD_STACK_ELEMENT));
171 
172 	return z_impl_k_thread_stack_free(stack);
173 }
174 #include <zephyr/syscalls/k_thread_stack_free_mrsh.c>
175 #endif /* CONFIG_USERSPACE */
176