1 /*
2  * Copyright (c) 2022, Meta
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include "kernel_internal.h"
8 
9 #include <zephyr/kernel.h>
10 #include <zephyr/kernel/thread_stack.h>
11 #include <zephyr/logging/log.h>
12 #include <zephyr/sys/bitarray.h>
13 #include <zephyr/sys/kobject.h>
14 #include <zephyr/syscall_handler.h>
15 
16 LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
17 
18 #if CONFIG_DYNAMIC_THREAD_POOL_SIZE > 0
19 #define BA_SIZE CONFIG_DYNAMIC_THREAD_POOL_SIZE
20 #else
21 #define BA_SIZE 1
22 #endif
23 
24 struct dyn_cb_data {
25 	k_tid_t tid;
26 	k_thread_stack_t *stack;
27 };
28 
29 static K_THREAD_STACK_ARRAY_DEFINE(dynamic_stack, CONFIG_DYNAMIC_THREAD_POOL_SIZE,
30 				   CONFIG_DYNAMIC_THREAD_STACK_SIZE);
31 SYS_BITARRAY_DEFINE_STATIC(dynamic_ba, BA_SIZE);
32 
z_thread_stack_alloc_dyn(size_t align,size_t size)33 static k_thread_stack_t *z_thread_stack_alloc_dyn(size_t align, size_t size)
34 {
35 	return z_thread_aligned_alloc(align, size);
36 }
37 
z_thread_stack_alloc_pool(size_t size)38 static k_thread_stack_t *z_thread_stack_alloc_pool(size_t size)
39 {
40 	int rv;
41 	size_t offset;
42 	k_thread_stack_t *stack;
43 
44 	if (size > CONFIG_DYNAMIC_THREAD_STACK_SIZE) {
45 		LOG_DBG("stack size %zu is > pool stack size %d", size,
46 			CONFIG_DYNAMIC_THREAD_STACK_SIZE);
47 		return NULL;
48 	}
49 
50 	rv = sys_bitarray_alloc(&dynamic_ba, 1, &offset);
51 	if (rv < 0) {
52 		LOG_DBG("unable to allocate stack from pool");
53 		return NULL;
54 	}
55 
56 	__ASSERT_NO_MSG(offset < CONFIG_DYNAMIC_THREAD_POOL_SIZE);
57 
58 	stack = (k_thread_stack_t *)&dynamic_stack[offset];
59 
60 	return stack;
61 }
62 
stack_alloc_dyn(size_t size,int flags)63 static k_thread_stack_t *stack_alloc_dyn(size_t size, int flags)
64 {
65 	if ((flags & K_USER) == K_USER) {
66 #ifdef CONFIG_DYNAMIC_OBJECTS
67 		return k_object_alloc_size(K_OBJ_THREAD_STACK_ELEMENT, size);
68 #else
69 		/* Dynamic user stack needs a kobject, so if this option is not
70 		 * enabled we can't proceed.
71 		 */
72 		return NULL;
73 #endif
74 	}
75 
76 	return z_thread_stack_alloc_dyn(Z_KERNEL_STACK_OBJ_ALIGN,
77 			Z_KERNEL_STACK_SIZE_ADJUST(size));
78 }
79 
z_impl_k_thread_stack_alloc(size_t size,int flags)80 k_thread_stack_t *z_impl_k_thread_stack_alloc(size_t size, int flags)
81 {
82 	k_thread_stack_t *stack = NULL;
83 
84 	if (IS_ENABLED(CONFIG_DYNAMIC_THREAD_PREFER_ALLOC)) {
85 		stack = stack_alloc_dyn(size, flags);
86 		if (stack == NULL && CONFIG_DYNAMIC_THREAD_POOL_SIZE > 0) {
87 			stack = z_thread_stack_alloc_pool(size);
88 		}
89 	} else if (IS_ENABLED(CONFIG_DYNAMIC_THREAD_PREFER_POOL)) {
90 		if (CONFIG_DYNAMIC_THREAD_POOL_SIZE > 0) {
91 			stack = z_thread_stack_alloc_pool(size);
92 		}
93 
94 		if ((stack == NULL) && IS_ENABLED(CONFIG_DYNAMIC_THREAD_ALLOC)) {
95 			stack = stack_alloc_dyn(size, flags);
96 		}
97 	}
98 
99 	return stack;
100 }
101 
102 #ifdef CONFIG_USERSPACE
z_vrfy_k_thread_stack_alloc(size_t size,int flags)103 static inline k_thread_stack_t *z_vrfy_k_thread_stack_alloc(size_t size, int flags)
104 {
105 	return z_impl_k_thread_stack_alloc(size, flags);
106 }
107 #include <syscalls/k_thread_stack_alloc_mrsh.c>
108 #endif
109 
dyn_cb(const struct k_thread * thread,void * user_data)110 static void dyn_cb(const struct k_thread *thread, void *user_data)
111 {
112 	struct dyn_cb_data *const data = (struct dyn_cb_data *)user_data;
113 
114 	if (data->stack == (k_thread_stack_t *)thread->stack_info.start) {
115 		__ASSERT(data->tid == NULL, "stack %p is associated with more than one thread!",
116 			 (void *)thread->stack_info.start);
117 		data->tid = (k_tid_t)thread;
118 	}
119 }
120 
z_impl_k_thread_stack_free(k_thread_stack_t * stack)121 int z_impl_k_thread_stack_free(k_thread_stack_t *stack)
122 {
123 	char state_buf[16] = {0};
124 	struct dyn_cb_data data = {.stack = stack};
125 
126 	/* Get a possible tid associated with stack */
127 	k_thread_foreach(dyn_cb, &data);
128 
129 	if (data.tid != NULL) {
130 		/* Check if thread is in use */
131 		if (k_thread_state_str(data.tid, state_buf, sizeof(state_buf)) != state_buf) {
132 			LOG_ERR("tid %p is invalid!", data.tid);
133 			return -EINVAL;
134 		}
135 
136 		if (!(strcmp("dummy", state_buf) == 0) || (strcmp("dead", state_buf) == 0)) {
137 			LOG_ERR("tid %p is in use!", data.tid);
138 			return -EBUSY;
139 		}
140 	}
141 
142 	if (CONFIG_DYNAMIC_THREAD_POOL_SIZE > 0) {
143 		if (IS_ARRAY_ELEMENT(dynamic_stack, stack)) {
144 			if (sys_bitarray_free(&dynamic_ba, 1, ARRAY_INDEX(dynamic_stack, stack))) {
145 				LOG_ERR("stack %p is not allocated!", stack);
146 				return -EINVAL;
147 			}
148 
149 			return 0;
150 		}
151 	}
152 
153 	if (IS_ENABLED(CONFIG_DYNAMIC_THREAD_ALLOC)) {
154 #ifdef CONFIG_USERSPACE
155 		if (z_object_find(stack)) {
156 			k_object_free(stack);
157 		} else {
158 			k_free(stack);
159 		}
160 #else
161 		k_free(stack);
162 #endif
163 	} else {
164 		LOG_ERR("Invalid stack %p", stack);
165 		return -EINVAL;
166 	}
167 
168 	return 0;
169 }
170 
171 #ifdef CONFIG_USERSPACE
z_vrfy_k_thread_stack_free(k_thread_stack_t * stack)172 static inline int z_vrfy_k_thread_stack_free(k_thread_stack_t *stack)
173 {
174 	return z_impl_k_thread_stack_free(stack);
175 }
176 #include <syscalls/k_thread_stack_free_mrsh.c>
177 #endif
178