1 /*
2 * Copyright (c) 2022, Meta
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/kernel.h>
8 #include <zephyr/ztest.h>
9
10 #define TIMEOUT_MS 500
11
12 #define POOL_SIZE 28672
13
14 #ifdef CONFIG_USERSPACE
15 #define STACK_OBJ_SIZE K_THREAD_STACK_LEN(CONFIG_DYNAMIC_THREAD_STACK_SIZE)
16 #else
17 #define STACK_OBJ_SIZE K_KERNEL_STACK_LEN(CONFIG_DYNAMIC_THREAD_STACK_SIZE)
18 #endif
19
20 #define MAX_HEAP_STACKS (POOL_SIZE / STACK_OBJ_SIZE)
21
22 K_HEAP_DEFINE(stack_heap, POOL_SIZE);
23
24 ZTEST_DMEM bool tflag[MAX(CONFIG_DYNAMIC_THREAD_POOL_SIZE, MAX_HEAP_STACKS)];
25
func(void * arg1,void * arg2,void * arg3)26 static void func(void *arg1, void *arg2, void *arg3)
27 {
28 bool *flag = (bool *)arg1;
29
30 ARG_UNUSED(arg2);
31 ARG_UNUSED(arg3);
32
33 printk("Hello, dynamic world!\n");
34
35 *flag = true;
36 }
37
38 /** @brief Check we can create a thread from userspace, using dynamic objects */
ZTEST_USER(dynamic_thread_stack,test_dynamic_thread_stack_userspace_dyn_obj)39 ZTEST_USER(dynamic_thread_stack, test_dynamic_thread_stack_userspace_dyn_obj)
40 {
41 k_tid_t tid;
42 struct k_thread *th;
43 k_thread_stack_t *stack;
44
45 if (!IS_ENABLED(CONFIG_USERSPACE)) {
46 ztest_test_skip();
47 }
48
49 if (!IS_ENABLED(CONFIG_DYNAMIC_THREAD_PREFER_ALLOC)) {
50 ztest_test_skip();
51 }
52
53 if (!IS_ENABLED(CONFIG_DYNAMIC_THREAD_ALLOC)) {
54 ztest_test_skip();
55 }
56
57 stack = k_thread_stack_alloc(CONFIG_DYNAMIC_THREAD_STACK_SIZE, K_USER);
58 zassert_not_null(stack);
59
60 th = k_object_alloc(K_OBJ_THREAD);
61 zassert_not_null(th);
62
63 tid = k_thread_create(th, stack, CONFIG_DYNAMIC_THREAD_STACK_SIZE, func,
64 &tflag[0], NULL, NULL, 0,
65 K_USER | K_INHERIT_PERMS, K_NO_WAIT);
66
67 zassert_not_null(tid);
68
69 zassert_ok(k_thread_join(tid, K_MSEC(TIMEOUT_MS)));
70 zassert_true(tflag[0]);
71 zassert_ok(k_thread_stack_free(stack));
72 }
73
74 /** @brief Exercise the pool-based thread stack allocator */
ZTEST(dynamic_thread_stack,test_dynamic_thread_stack_pool)75 ZTEST(dynamic_thread_stack, test_dynamic_thread_stack_pool)
76 {
77 static k_tid_t tid[CONFIG_DYNAMIC_THREAD_POOL_SIZE];
78 static struct k_thread th[CONFIG_DYNAMIC_THREAD_POOL_SIZE];
79 static k_thread_stack_t *stack[CONFIG_DYNAMIC_THREAD_POOL_SIZE];
80
81 if (!IS_ENABLED(CONFIG_DYNAMIC_THREAD_PREFER_POOL)) {
82 ztest_test_skip();
83 }
84
85 /* allocate all thread stacks from the pool */
86 for (size_t i = 0; i < CONFIG_DYNAMIC_THREAD_POOL_SIZE; ++i) {
87 stack[i] = k_thread_stack_alloc(CONFIG_DYNAMIC_THREAD_STACK_SIZE,
88 IS_ENABLED(CONFIG_USERSPACE) ? K_USER : 0);
89
90 zassert_not_null(stack[i]);
91 }
92
93 if (IS_ENABLED(CONFIG_DYNAMIC_THREAD_ALLOC)) {
94 /* ensure 1 thread can be allocated from the heap when the pool is depleted */
95 zassert_ok(k_thread_stack_free(
96 k_thread_stack_alloc(CONFIG_DYNAMIC_THREAD_STACK_SIZE,
97 IS_ENABLED(CONFIG_USERSPACE) ? K_USER : 0)));
98 } else {
99 /* ensure that no more thread stacks can be allocated from the pool */
100 zassert_is_null(k_thread_stack_alloc(CONFIG_DYNAMIC_THREAD_STACK_SIZE,
101 IS_ENABLED(CONFIG_USERSPACE) ? K_USER : 0));
102 }
103
104 /* spawn our threads */
105 for (size_t i = 0; i < CONFIG_DYNAMIC_THREAD_POOL_SIZE; ++i) {
106 tflag[i] = false;
107 tid[i] = k_thread_create(&th[i], stack[i],
108 CONFIG_DYNAMIC_THREAD_STACK_SIZE, func,
109 &tflag[i], NULL, NULL, 0,
110 K_USER | K_INHERIT_PERMS, K_NO_WAIT);
111 }
112
113 /* join all threads and check that flags have been set */
114 for (size_t i = 0; i < CONFIG_DYNAMIC_THREAD_POOL_SIZE; ++i) {
115 zassert_ok(k_thread_join(tid[i], K_MSEC(TIMEOUT_MS)));
116 zassert_true(tflag[i]);
117 }
118
119 /* clean up stacks allocated from the pool */
120 for (size_t i = 0; i < CONFIG_DYNAMIC_THREAD_POOL_SIZE; ++i) {
121 zassert_ok(k_thread_stack_free(stack[i]));
122 }
123 }
124
125 /** @brief Exercise the heap-based thread stack allocator */
ZTEST(dynamic_thread_stack,test_dynamic_thread_stack_alloc)126 ZTEST(dynamic_thread_stack, test_dynamic_thread_stack_alloc)
127 {
128 size_t N;
129 static k_tid_t tid[MAX_HEAP_STACKS];
130 static struct k_thread th[MAX_HEAP_STACKS];
131 static k_thread_stack_t *stack[MAX_HEAP_STACKS];
132
133 if (!IS_ENABLED(CONFIG_DYNAMIC_THREAD_PREFER_ALLOC)) {
134 ztest_test_skip();
135 }
136
137 if (!IS_ENABLED(CONFIG_DYNAMIC_THREAD_ALLOC)) {
138 ztest_test_skip();
139 }
140
141 /* allocate all thread stacks from the heap */
142 for (N = 0; N < MAX_HEAP_STACKS; ++N) {
143 stack[N] = k_thread_stack_alloc(CONFIG_DYNAMIC_THREAD_STACK_SIZE,
144 IS_ENABLED(CONFIG_USERSPACE) ? K_USER : 0);
145 if (stack[N] == NULL) {
146 break;
147 }
148 }
149
150 /* spwan our threads */
151 for (size_t i = 0; i < N; ++i) {
152 tflag[i] = false;
153 tid[i] = k_thread_create(&th[i], stack[i],
154 CONFIG_DYNAMIC_THREAD_STACK_SIZE, func,
155 &tflag[i], NULL, NULL, 0,
156 K_USER | K_INHERIT_PERMS, K_NO_WAIT);
157 }
158
159 /* join all threads and check that flags have been set */
160 for (size_t i = 0; i < N; ++i) {
161 zassert_ok(k_thread_join(tid[i], K_MSEC(TIMEOUT_MS)));
162 zassert_true(tflag[i]);
163 }
164
165 /* clean up stacks allocated from the heap */
166 for (size_t i = 0; i < N; ++i) {
167 zassert_ok(k_thread_stack_free(stack[i]));
168 }
169 }
170
171 K_SEM_DEFINE(perm_sem, 0, 1);
172 ZTEST_BMEM static volatile bool expect_fault;
173 ZTEST_BMEM static volatile unsigned int expected_reason;
174
set_fault(unsigned int reason)175 static void set_fault(unsigned int reason)
176 {
177 expect_fault = true;
178 expected_reason = reason;
179 compiler_barrier();
180 }
181
k_sys_fatal_error_handler(unsigned int reason,const struct arch_esf * pEsf)182 void k_sys_fatal_error_handler(unsigned int reason, const struct arch_esf *pEsf)
183 {
184 if (expect_fault) {
185 if (expected_reason == reason) {
186 printk("System error was expected\n");
187 expect_fault = false;
188 } else {
189 printk("Wrong fault reason, expecting %d\n",
190 expected_reason);
191 TC_END_REPORT(TC_FAIL);
192 k_fatal_halt(reason);
193 }
194 } else {
195 printk("Unexpected fault during test\n");
196 TC_END_REPORT(TC_FAIL);
197 k_fatal_halt(reason);
198 }
199 }
200
perm_func(void * arg1,void * arg2,void * arg3)201 static void perm_func(void *arg1, void *arg2, void *arg3)
202 {
203 k_sem_take((struct k_sem *)arg1, K_FOREVER);
204 }
205
perm_func_violator(void * arg1,void * arg2,void * arg3)206 static void perm_func_violator(void *arg1, void *arg2, void *arg3)
207 {
208 (void)k_thread_stack_free((k_thread_stack_t *)arg2);
209
210 zassert_unreachable("should not reach here");
211 }
212
213 /** @brief Exercise stack permissions */
ZTEST(dynamic_thread_stack,test_dynamic_thread_stack_permission)214 ZTEST(dynamic_thread_stack, test_dynamic_thread_stack_permission)
215 {
216 static k_tid_t tid[2];
217 static struct k_thread th[2];
218 static k_thread_stack_t *stack[2];
219
220 if (!IS_ENABLED(CONFIG_DYNAMIC_THREAD_PREFER_ALLOC)) {
221 ztest_test_skip();
222 }
223
224 if (!IS_ENABLED(CONFIG_DYNAMIC_THREAD_ALLOC)) {
225 ztest_test_skip();
226 }
227
228 if (!IS_ENABLED(CONFIG_USERSPACE)) {
229 ztest_test_skip();
230 }
231
232 stack[0] = k_thread_stack_alloc(CONFIG_DYNAMIC_THREAD_STACK_SIZE, K_USER);
233 zassert_not_null(stack[0]);
234
235 stack[1] = k_thread_stack_alloc(CONFIG_DYNAMIC_THREAD_STACK_SIZE, K_USER);
236 zassert_not_null(stack[1]);
237
238 k_thread_access_grant(k_current_get(), &perm_sem);
239
240 /* First thread inherit permissions */
241 tid[0] = k_thread_create(&th[0], stack[0], CONFIG_DYNAMIC_THREAD_STACK_SIZE, perm_func,
242 &perm_sem, NULL, NULL, 0, K_USER | K_INHERIT_PERMS, K_NO_WAIT);
243 zassert_not_null(tid[0]);
244
245 /* Second thread will have access to specific kobjects only */
246 tid[1] = k_thread_create(&th[1], stack[1], CONFIG_DYNAMIC_THREAD_STACK_SIZE,
247 perm_func_violator, &perm_sem, stack[0], NULL, 0, K_USER,
248 K_FOREVER);
249 zassert_not_null(tid[1]);
250 k_thread_access_grant(tid[1], &perm_sem);
251 k_thread_access_grant(tid[1], &stack[1]);
252
253 set_fault(K_ERR_KERNEL_OOPS);
254
255 k_thread_start(tid[1]);
256
257 /* join all threads and check that flags have been set */
258 zassert_ok(k_thread_join(tid[1], K_MSEC(TIMEOUT_MS)));
259
260 k_sem_give(&perm_sem);
261 zassert_ok(k_thread_join(tid[0], K_MSEC(TIMEOUT_MS)));
262
263 /* clean up stacks allocated from the heap */
264 zassert_ok(k_thread_stack_free(stack[0]));
265 zassert_ok(k_thread_stack_free(stack[1]));
266 }
267
dynamic_thread_stack_setup(void)268 static void *dynamic_thread_stack_setup(void)
269 {
270 k_thread_heap_assign(k_current_get(), &stack_heap);
271 return NULL;
272 }
273
274 ZTEST_SUITE(dynamic_thread_stack, NULL, dynamic_thread_stack_setup, NULL, NULL, NULL);
275