1 /*
2 * Copyright (c) 2020 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/kernel.h>
8 #include <zephyr/ztest.h>
9 #include <zephyr/syscall_handler.h>
10 #include <kernel_internal.h>
11
12 #include "test_syscall.h"
13
14 /*
15 * Stack testing
16 */
17 struct k_thread test_thread;
18 #define NUM_STACKS 3
19 #define STEST_STACKSIZE (512 + CONFIG_TEST_EXTRA_STACK_SIZE)
20 K_THREAD_STACK_DEFINE(user_stack, STEST_STACKSIZE);
21 K_THREAD_STACK_ARRAY_DEFINE(user_stack_array, NUM_STACKS, STEST_STACKSIZE);
22 K_KERNEL_STACK_DEFINE(kern_stack, STEST_STACKSIZE);
23 K_KERNEL_STACK_ARRAY_DEFINE(kern_stack_array, NUM_STACKS, STEST_STACKSIZE);
24
25 struct foo {
26 int bar;
27
28 K_KERNEL_STACK_MEMBER(stack, STEST_STACKSIZE);
29 int baz;
30 };
31
32 __kstackmem struct foo stest_member_stack;
33
z_impl_stack_info_get(char ** start_addr,size_t * size)34 void z_impl_stack_info_get(char **start_addr, size_t *size)
35 {
36 *start_addr = (char *)k_current_get()->stack_info.start;
37 *size = k_current_get()->stack_info.size;
38 }
39
40 #ifdef CONFIG_USERSPACE
z_vrfy_stack_info_get(char ** start_addr,size_t * size)41 static inline void z_vrfy_stack_info_get(char **start_addr,
42 size_t *size)
43 {
44 Z_OOPS(Z_SYSCALL_MEMORY_WRITE(start_addr, sizeof(uintptr_t)));
45 Z_OOPS(Z_SYSCALL_MEMORY_WRITE(size, sizeof(size_t)));
46
47 z_impl_stack_info_get(start_addr, size);
48 }
49 #include <syscalls/stack_info_get_mrsh.c>
50
z_impl_check_perms(void * addr,size_t size,int write)51 int z_impl_check_perms(void *addr, size_t size, int write)
52 {
53 return arch_buffer_validate(addr, size, write);
54 }
55
z_vrfy_check_perms(void * addr,size_t size,int write)56 static inline int z_vrfy_check_perms(void *addr, size_t size, int write)
57 {
58 return z_impl_check_perms((void *)addr, size, write);
59 }
60 #include <syscalls/check_perms_mrsh.c>
61 #endif /* CONFIG_USERSPACE */
62
63 /* Global data structure with object information, used by
64 * stack_buffer_scenarios
65 */
66 ZTEST_BMEM struct scenario_data {
67 k_thread_stack_t *stack;
68
69 /* If this was declared with K_THREAD_STACK_DEFINE and not
70 * K_KERNEL_STACK_DEFINE
71 */
72 bool is_user;
73
74 /* Stack size stored in kernel object metadata if a user stack */
75 size_t metadata_size;
76
77 /* Return value of sizeof(stack) */
78 size_t object_size;
79
80 /* Return value of K_{THREAD|KERNEL}_STACK_SIZEOF(stack) */
81 size_t reported_size;
82
83 /* Original size argument passed to K_{THREAD|KERNEL}_STACK_DECLARE */
84 size_t declared_size;
85
86 /* Whether this stack is part of an array of thread stacks */
87 bool is_array;
88 } scenario_data;
89
stack_buffer_scenarios(void)90 void stack_buffer_scenarios(void)
91 {
92 k_thread_stack_t *stack_obj = scenario_data.stack;
93 size_t obj_size = scenario_data.object_size;
94 size_t stack_size, unused, carveout, reserved, alignment, adjusted;
95 uint8_t val = 0;
96 char *stack_start, *stack_ptr, *stack_end, *obj_start, *obj_end;
97 char *stack_buf;
98 volatile char *pos;
99 int ret, expected;
100 uintptr_t base = (uintptr_t)stack_obj;
101 bool is_usermode;
102 long int end_space;
103
104 #ifdef CONFIG_USERSPACE
105 is_usermode = arch_is_user_context();
106 #else
107 is_usermode = false;
108 #endif
109 /* Dump interesting information */
110 stack_info_get(&stack_start, &stack_size);
111 printk(" - Thread reports buffer %p size %zu\n", stack_start,
112 stack_size);
113
114 #ifdef CONFIG_USERSPACE
115 if (scenario_data.is_user) {
116 reserved = K_THREAD_STACK_RESERVED;
117 stack_buf = Z_THREAD_STACK_BUFFER(stack_obj);
118 /* always use the original size here */
119 alignment = Z_THREAD_STACK_OBJ_ALIGN(STEST_STACKSIZE);
120 } else
121 #endif
122 {
123 reserved = K_KERNEL_STACK_RESERVED;
124 stack_buf = Z_KERNEL_STACK_BUFFER(stack_obj);
125 alignment = Z_KERNEL_STACK_OBJ_ALIGN;
126 }
127
128 stack_end = stack_start + stack_size;
129 obj_end = (char *)stack_obj + obj_size;
130 obj_start = (char *)stack_obj;
131
132
133
134 /* Assert that the created stack object, with the reserved data
135 * removed, can hold a thread buffer of STEST_STACKSIZE
136 */
137 zassert_true(STEST_STACKSIZE <= (obj_size - reserved),
138 "bad stack size in object");
139
140 /* Check that the stack info in the thread marks a region
141 * completely contained within the stack object
142 */
143 zassert_true(stack_end <= obj_end,
144 "stack size in thread struct out of bounds (overflow)");
145 zassert_true(stack_start >= obj_start,
146 "stack size in thread struct out of bounds (underflow)");
147
148 /* Check that the base of the stack is aligned properly. */
149 zassert_true(base % alignment == 0,
150 "stack base address %p not aligned to %zu",
151 stack_obj, alignment);
152
153 /* Check that the entire stack buffer is read/writable */
154 printk(" - check read/write to stack buffer\n");
155
156 /* Address of this stack variable is guaranteed to part of
157 * the active stack, and close to the actual stack pointer.
158 * Some CPUs have hardware stack overflow detection which
159 * faults on memory access within the stack buffer but below
160 * the stack pointer.
161 *
162 * First test does direct read & write starting at the estimated
163 * stack pointer up to the highest addresses in the buffer
164 * Starting from &val which is close enough to stack pointer
165 */
166 stack_ptr = &val;
167 for (pos = stack_ptr; pos < stack_end; pos++) {
168 /* pos is volatile so this doesn't get optimized out */
169 val = *pos;
170 *pos = val;
171 }
172
173 #ifdef CONFIG_USERSPACE
174 if (is_usermode) {
175 /* If we're in user mode, check every byte in the stack buffer
176 * to ensure that the thread has permissions on it.
177 */
178 for (pos = stack_start; pos < stack_end; pos++) {
179 zassert_false(check_perms((void *)pos, 1, 1),
180 "bad MPU/MMU permission on stack buffer at address %p",
181 pos);
182 }
183
184 /* Bounds check the user accessible area, it shouldn't extend
185 * before or after the stack. Because of memory protection HW
186 * alignment constraints, we test the end of the stack object
187 * and not the buffer.
188 */
189 zassert_true(check_perms(stack_start - 1, 1, 0),
190 "user mode access to memory %p before start of stack object",
191 obj_start - 1);
192 zassert_true(check_perms(stack_end, 1, 0),
193 "user mode access to memory %p past end of stack object",
194 obj_end);
195
196 /*
197 * The reserved area, when it exists, is dropped at run time
198 * when transitioning to user mode on RISC-V. Reinstate that
199 * reserved area here for the next tests to work properly
200 * with a static non-zero K_THREAD_STACK_RESERVED definition.
201 */
202 if (IS_ENABLED(CONFIG_RISCV) &&
203 IS_ENABLED(CONFIG_GEN_PRIV_STACKS) &&
204 K_THREAD_STACK_RESERVED != 0) {
205 stack_start += reserved;
206 stack_size -= reserved;
207 }
208
209 zassert_true(stack_size <= obj_size - reserved,
210 "bad stack size %zu in thread struct",
211 stack_size);
212 }
213 #endif
214 carveout = stack_start - stack_buf;
215 printk(" - Carved-out space in buffer: %zu\n", carveout);
216 zassert_true(carveout < stack_size,
217 "Suspicious carve-out space reported");
218 /* 0 unless this is a stack array */
219 end_space = obj_end - stack_end;
220 printk(" - Unused objects space: %ld\n", end_space);
221
222 /* For all stacks, when k_thread_create() is called with a stack object,
223 * it is equivalent to pass either the original requested stack size, or
224 * the return value of K_*_STACK_SIZEOF() for that stack object.
225 *
226 * When the stack is actually instantiated, both expand to fill any space
227 * rounded up, except rounding space for array members.
228 */
229 if (!scenario_data.is_array) {
230 /* These should be exactly the same. We have an equivalence relation:
231 * For some stack declared with:
232 *
233 * K_THREAD_STACK_DEFINE(my_stack, X);
234 * Z_THREAD_STACK_SIZE_ADJUST(X) - K_THREAD_STACK_RESERVED ==
235 * K_THREAD_STACK_SIZEOF(my_stack)
236 *
237 * K_KERNEL_STACK_DEFINE(my_kern_stack, Y):
238 * Z_KERNEL_STACK_SIZE_ADJUST(Y) - K_KERNEL_STACK_RESERVED ==
239 * K_KERNEL_STACK_SIZEOF(my_stack)
240 */
241 #ifdef CONFIG_USERSPACE
242 /* Not defined if user mode disabled, all stacks are kernel stacks */
243 if (scenario_data.is_user) {
244 adjusted = Z_THREAD_STACK_SIZE_ADJUST(scenario_data.declared_size);
245 } else
246 #endif
247 {
248 adjusted = Z_KERNEL_STACK_SIZE_ADJUST(scenario_data.declared_size);
249 }
250 adjusted -= reserved;
251
252 zassert_equal(end_space, 0, "unexpected unused space\n");
253 } else {
254 /* For arrays there may be unused space per-object. This is because
255 * every single array member must be aligned to the value returned
256 * by Z_{KERNEL|THREAD}_STACK_OBJ_ALIGN.
257 *
258 * If we define:
259 *
260 * K_{THREAD|KERNEL}_STACK_ARRAY_DEFINE(my_stack_array, num_stacks, X);
261 *
262 * We do not auto-expand usable space to cover this unused area. Doing
263 * this would require some way for the kernel to know that a stack object
264 * pointer passed in is an array member, which is currently not possible.
265 *
266 * The equivalence here is computable with:
267 * K_THREAD_STACK_SIZEOF(my_stack_array[0]) ==
268 * K_THREAD_STACK_LEN(X) - K_THREAD_STACK_RESERVED;
269 */
270
271 if (scenario_data.is_user) {
272 adjusted = K_THREAD_STACK_LEN(scenario_data.declared_size);
273 } else {
274 adjusted = Z_KERNEL_STACK_LEN(scenario_data.declared_size);
275 }
276 adjusted -= reserved;
277
278 /* At least make sure it's not negative, that means stack_info isn't set
279 * right
280 */
281 zassert_true(end_space >= 0, "bad stack bounds in stack_info");
282 }
283
284 zassert_true(adjusted == scenario_data.reported_size,
285 "size mismatch: adjusted %zu vs. reported %zu",
286 adjusted, scenario_data.reported_size);
287
288 ret = k_thread_stack_space_get(k_current_get(), &unused);
289 if (!is_usermode && IS_ENABLED(CONFIG_NO_UNUSED_STACK_INSPECTION)) {
290 expected = -ENOTSUP;
291 } else {
292 expected = 0;
293 }
294
295 zassert_equal(ret, expected, "unexpected return value %d", ret);
296 if (ret == 0) {
297 printk("self-reported unused stack space: %zu\n", unused);
298 }
299 }
300
stest_thread_entry(void * p1,void * p2,void * p3)301 void stest_thread_entry(void *p1, void *p2, void *p3)
302 {
303 bool drop = (bool)p1;
304
305 if (drop) {
306 k_thread_user_mode_enter(stest_thread_entry, (void *)false,
307 p2, p3);
308 } else {
309 stack_buffer_scenarios();
310 }
311 }
312
stest_thread_launch(uint32_t flags,bool drop)313 void stest_thread_launch(uint32_t flags, bool drop)
314 {
315 int ret;
316 size_t unused;
317
318 k_thread_create(&test_thread, scenario_data.stack, STEST_STACKSIZE,
319 stest_thread_entry,
320 (void *)drop, NULL, NULL,
321 -1, flags, K_NO_WAIT);
322 k_thread_join(&test_thread, K_FOREVER);
323
324 ret = k_thread_stack_space_get(&test_thread, &unused);
325 zassert_equal(ret, 0, "failed to calculate unused stack space\n");
326 printk("target thread unused stack space: %zu\n", unused);
327 }
328
scenario_entry(void * stack_obj,size_t obj_size,size_t reported_size,size_t declared_size,bool is_array)329 void scenario_entry(void *stack_obj, size_t obj_size, size_t reported_size,
330 size_t declared_size, bool is_array)
331 {
332 bool is_user;
333 size_t metadata_size;
334
335 #ifdef CONFIG_USERSPACE
336 struct z_object *zo;
337
338 zo = z_object_find(stack_obj);
339 if (zo != NULL) {
340 is_user = true;
341 #ifdef CONFIG_GEN_PRIV_STACKS
342 metadata_size = zo->data.stack_data->size;
343 #else
344 metadata_size = zo->data.stack_size;
345 #endif /* CONFIG_GEN_PRIV_STACKS */
346 printk("stack may host user thread, size in metadata is %zu\n",
347 metadata_size);
348 } else
349 #endif /* CONFIG_USERSPACE */
350 {
351 metadata_size = 0;
352 is_user = false;
353 }
354
355 scenario_data.stack = stack_obj;
356 scenario_data.object_size = obj_size;
357 scenario_data.is_user = is_user;
358 scenario_data.metadata_size = metadata_size;
359 scenario_data.reported_size = reported_size;
360 scenario_data.declared_size = declared_size;
361 scenario_data.is_array = is_array;
362
363 printk("Stack object %p[%zu]\n", stack_obj, obj_size);
364 printk(" - Testing supervisor mode\n");
365 stest_thread_launch(0, false);
366
367 #ifdef CONFIG_USERSPACE
368 if (is_user) {
369 printk(" - Testing user mode (direct launch)\n");
370 stest_thread_launch(K_USER | K_INHERIT_PERMS, false);
371 printk(" - Testing user mode (drop)\n");
372 stest_thread_launch(K_INHERIT_PERMS, true);
373 }
374 #endif /* CONFIG_USERSPACE */
375 }
376
377 /**
378 * @brief Test kernel provides user thread read/write access to its own stack
379 * memory buffer
380 *
381 * @details Thread can access its own stack memory buffer and perform
382 * read/write operations.
383 *
384 * @ingroup kernel_memprotect_tests
385 */
ZTEST(userspace_thread_stack,test_stack_buffer)386 ZTEST(userspace_thread_stack, test_stack_buffer)
387 {
388 printk("Reserved space (thread stacks): %zu\n",
389 K_THREAD_STACK_RESERVED);
390 printk("Reserved space (kernel stacks): %zu\n",
391 K_KERNEL_STACK_RESERVED);
392
393 printk("CONFIG_ISR_STACK_SIZE %zu\n", (size_t)CONFIG_ISR_STACK_SIZE);
394
395 unsigned int num_cpus = arch_num_cpus();
396
397 for (int i = 0; i < num_cpus; i++) {
398 printk("irq stack %d: %p size %zu\n",
399 i, &z_interrupt_stacks[i],
400 sizeof(z_interrupt_stacks[i]));
401 }
402
403 printk("Provided stack size: %u\n", STEST_STACKSIZE);
404
405 printk("\ntesting user_stack\n");
406 scenario_entry(user_stack, sizeof(user_stack), K_THREAD_STACK_SIZEOF(user_stack),
407 STEST_STACKSIZE, false);
408
409 for (int i = 0; i < NUM_STACKS; i++) {
410 printk("\ntesting user_stack_array[%d]\n", i);
411 scenario_entry(user_stack_array[i],
412 sizeof(user_stack_array[i]),
413 K_THREAD_STACK_SIZEOF(user_stack_array[i]),
414 STEST_STACKSIZE, true);
415 }
416
417 printk("\ntesting kern_stack\n");
418 scenario_entry(kern_stack, sizeof(kern_stack), K_KERNEL_STACK_SIZEOF(kern_stack),
419 STEST_STACKSIZE, false);
420
421 for (int i = 0; i < NUM_STACKS; i++) {
422 printk("\ntesting kern_stack_array[%d]\n", i);
423 scenario_entry(kern_stack_array[i],
424 sizeof(kern_stack_array[i]),
425 K_KERNEL_STACK_SIZEOF(kern_stack_array[i]),
426 STEST_STACKSIZE, true);
427 }
428
429 printk("\ntesting stest_member_stack\n");
430 scenario_entry(&stest_member_stack.stack,
431 sizeof(stest_member_stack.stack),
432 K_KERNEL_STACK_SIZEOF(stest_member_stack.stack),
433 STEST_STACKSIZE, false);
434 }
435
no_op_entry(void * p1,void * p2,void * p3)436 void no_op_entry(void *p1, void *p2, void *p3)
437 {
438
439 printk("hi! bye!\n");
440
441 #ifdef CONFIG_DYNAMIC_OBJECTS
442 /* Allocate a dynamic kernel object, which gets freed on thread
443 * cleanup since this thread has the only reference.
444 */
445 struct k_sem *dyn_sem = k_object_alloc(K_OBJ_SEM);
446 k_sem_init(dyn_sem, 1, 1);
447 printk("allocated semaphore %p\n", dyn_sem);
448 #endif
449 /* thread self-aborts, triggering idle thread cleanup */
450 }
451
452 /**
453 * @brief Show that the idle thread stack size is correct
454 *
455 * The idle thread has to occasionally clean up self-exiting threads.
456 * Exercise this and show that we didn't overflow, reporting out stack
457 * usage.
458 *
459 * @ingroup kernel_memprotect_tests
460 */
ZTEST(userspace_thread_stack,test_idle_stack)461 ZTEST(userspace_thread_stack, test_idle_stack)
462 {
463 if (IS_ENABLED(CONFIG_KERNEL_COHERENCE)) {
464 /* Stacks on coherence platforms aren't coherent, and
465 * the idle stack may have been initialized on a
466 * different CPU!
467 */
468 ztest_test_skip();
469 }
470
471 int ret;
472 #ifdef CONFIG_SMP
473 /* 1cpu test case, so all other CPUs are spinning with co-op
474 * threads blocking them. _current_cpu triggers an assertion.
475 */
476 struct k_thread *idle = arch_curr_cpu()->idle_thread;
477 #else
478 struct k_thread *idle = _current_cpu->idle_thread;
479 #endif
480 size_t unused_bytes;
481
482 /* Spwawn a child thread which self-exits */
483 k_thread_create(&test_thread, kern_stack, STEST_STACKSIZE,
484 no_op_entry,
485 NULL, NULL, NULL,
486 -1, 0, K_NO_WAIT);
487
488 k_thread_join(&test_thread, K_FOREVER);
489
490 /* Also sleep for a bit, which also exercises the idle thread
491 * in case some PM hooks will run
492 */
493 k_sleep(K_MSEC(1));
494
495 /* Now measure idle thread stack usage */
496 ret = k_thread_stack_space_get(idle, &unused_bytes);
497 zassert_true(ret == 0, "failed to obtain stack space");
498 zassert_true(unused_bytes > 0, "idle thread stack size %d too low",
499 CONFIG_IDLE_STACK_SIZE);
500 printk("unused idle thread stack size: %zu/%d (%zu used)\n",
501 unused_bytes, CONFIG_IDLE_STACK_SIZE,
502 CONFIG_IDLE_STACK_SIZE - unused_bytes);
503
504 }
505
thread_setup(void)506 void *thread_setup(void)
507 {
508 k_thread_system_pool_assign(k_current_get());
509
510 return NULL;
511 }
512
513 ZTEST_SUITE(userspace_thread_stack, NULL, thread_setup,
514 ztest_simple_1cpu_before, ztest_simple_1cpu_after, NULL);
515