1 /*
2 * Copyright (c) 2020 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/kernel.h>
8 #include <zephyr/ztest.h>
9 #include <zephyr/internal/syscall_handler.h>
10 #include <kernel_internal.h>
11
12 #include "test_syscall.h"
13
14 /*
15 * Stack testing
16 */
17 struct k_thread test_thread;
18 #define NUM_STACKS 3
19 #define STEST_STACKSIZE (512 + CONFIG_TEST_EXTRA_STACK_SIZE)
20 K_THREAD_STACK_DEFINE(user_stack, STEST_STACKSIZE);
21 K_THREAD_STACK_ARRAY_DEFINE(user_stack_array, NUM_STACKS, STEST_STACKSIZE);
22 K_KERNEL_STACK_DEFINE(kern_stack, STEST_STACKSIZE);
23 K_KERNEL_STACK_ARRAY_DEFINE(kern_stack_array, NUM_STACKS, STEST_STACKSIZE);
24
25 struct foo {
26 int bar;
27
28 K_KERNEL_STACK_MEMBER(stack, STEST_STACKSIZE);
29 int baz;
30 };
31
32 __kstackmem struct foo stest_member_stack;
33
z_impl_stack_info_get(char ** start_addr,size_t * size)34 void z_impl_stack_info_get(char **start_addr, size_t *size)
35 {
36 *start_addr = (char *)k_current_get()->stack_info.start;
37 *size = k_current_get()->stack_info.size;
38 }
39
40 #ifdef CONFIG_USERSPACE
z_vrfy_stack_info_get(char ** start_addr,size_t * size)41 static inline void z_vrfy_stack_info_get(char **start_addr,
42 size_t *size)
43 {
44 K_OOPS(K_SYSCALL_MEMORY_WRITE(start_addr, sizeof(uintptr_t)));
45 K_OOPS(K_SYSCALL_MEMORY_WRITE(size, sizeof(size_t)));
46
47 z_impl_stack_info_get(start_addr, size);
48 }
49 #include <zephyr/syscalls/stack_info_get_mrsh.c>
50
z_impl_check_perms(void * addr,size_t size,int write)51 int z_impl_check_perms(void *addr, size_t size, int write)
52 {
53 return arch_buffer_validate(addr, size, write);
54 }
55
z_vrfy_check_perms(void * addr,size_t size,int write)56 static inline int z_vrfy_check_perms(void *addr, size_t size, int write)
57 {
58 return z_impl_check_perms((void *)addr, size, write);
59 }
60 #include <zephyr/syscalls/check_perms_mrsh.c>
61 #endif /* CONFIG_USERSPACE */
62
63 /* Global data structure with object information, used by
64 * stack_buffer_scenarios
65 */
66 ZTEST_BMEM struct scenario_data {
67 k_thread_stack_t *stack;
68
69 #ifdef CONFIG_THREAD_STACK_MEM_MAPPED
70 k_thread_stack_t *stack_mapped;
71 #endif
72
73 /* If this was declared with K_THREAD_STACK_DEFINE and not
74 * K_KERNEL_STACK_DEFINE
75 */
76 bool is_user;
77
78 /* Stack size stored in kernel object metadata if a user stack */
79 size_t metadata_size;
80
81 /* Return value of sizeof(stack) */
82 size_t object_size;
83
84 /* Return value of K_{THREAD|KERNEL}_STACK_SIZEOF(stack) */
85 size_t reported_size;
86
87 /* Original size argument passed to K_{THREAD|KERNEL}_STACK_DECLARE */
88 size_t declared_size;
89
90 /* Whether this stack is part of an array of thread stacks */
91 bool is_array;
92 } scenario_data;
93
stack_buffer_scenarios(void)94 void stack_buffer_scenarios(void)
95 {
96 k_thread_stack_t *stack_obj;
97 size_t obj_size = scenario_data.object_size;
98 size_t stack_size, unused, carveout, reserved, alignment, adjusted;
99 uint8_t val = 0;
100 char *stack_start, *stack_ptr, *stack_end, *obj_start, *obj_end;
101 char *stack_buf;
102 volatile char *pos;
103 int ret, expected;
104 uintptr_t base;
105 bool is_usermode;
106 long int end_space;
107
108 #ifdef CONFIG_THREAD_STACK_MEM_MAPPED
109 stack_obj = scenario_data.stack_mapped;
110 #else
111 stack_obj = scenario_data.stack;
112 #endif
113
114 base = (uintptr_t)stack_obj;
115
116 #ifdef CONFIG_USERSPACE
117 is_usermode = arch_is_user_context();
118 #else
119 is_usermode = false;
120 #endif
121 /* Dump interesting information */
122 stack_info_get(&stack_start, &stack_size);
123 printk(" - Thread reports buffer %p size %zu\n", stack_start,
124 stack_size);
125
126 #ifdef CONFIG_USERSPACE
127 if (scenario_data.is_user) {
128 reserved = K_THREAD_STACK_RESERVED;
129 stack_buf = K_THREAD_STACK_BUFFER(stack_obj);
130 /* always use the original size here */
131 alignment = Z_THREAD_STACK_OBJ_ALIGN(STEST_STACKSIZE);
132 } else
133 #endif
134 {
135 reserved = K_KERNEL_STACK_RESERVED;
136 stack_buf = K_KERNEL_STACK_BUFFER(stack_obj);
137 alignment = Z_KERNEL_STACK_OBJ_ALIGN;
138 }
139
140 stack_end = stack_start + stack_size;
141 obj_end = (char *)stack_obj + obj_size;
142 obj_start = (char *)stack_obj;
143
144
145
146 /* Assert that the created stack object, with the reserved data
147 * removed, can hold a thread buffer of STEST_STACKSIZE
148 */
149 zassert_true(STEST_STACKSIZE <= (obj_size - reserved),
150 "bad stack size in object");
151
152 /* Check that the stack info in the thread marks a region
153 * completely contained within the stack object
154 */
155 zassert_true(stack_end <= obj_end,
156 "stack size in thread struct out of bounds (overflow)");
157 zassert_true(stack_start >= obj_start,
158 "stack size in thread struct out of bounds (underflow)");
159
160 /* Check that the base of the stack is aligned properly. */
161 zassert_true(base % alignment == 0,
162 "stack base address %p not aligned to %zu",
163 stack_obj, alignment);
164
165 /* Check that the entire stack buffer is read/writable */
166 printk(" - check read/write to stack buffer\n");
167
168 /* Address of this stack variable is guaranteed to part of
169 * the active stack, and close to the actual stack pointer.
170 * Some CPUs have hardware stack overflow detection which
171 * faults on memory access within the stack buffer but below
172 * the stack pointer.
173 *
174 * First test does direct read & write starting at the estimated
175 * stack pointer up to the highest addresses in the buffer
176 * Starting from &val which is close enough to stack pointer
177 */
178 stack_ptr = &val;
179 for (pos = stack_ptr; pos < stack_end; pos++) {
180 /* pos is volatile so this doesn't get optimized out */
181 val = *pos;
182 *pos = val;
183 }
184
185 #ifdef CONFIG_USERSPACE
186 if (is_usermode) {
187 /* If we're in user mode, check every byte in the stack buffer
188 * to ensure that the thread has permissions on it.
189 */
190 for (pos = stack_start; pos < stack_end; pos++) {
191 zassert_false(check_perms((void *)pos, 1, 1),
192 "bad MPU/MMU permission on stack buffer at address %p",
193 pos);
194 }
195
196 /* Bounds check the user accessible area, it shouldn't extend
197 * before or after the stack. Because of memory protection HW
198 * alignment constraints, we test the end of the stack object
199 * and not the buffer.
200 */
201 zassert_true(check_perms(stack_start - 1, 1, 0),
202 "user mode access to memory %p before start of stack object",
203 obj_start - 1);
204 zassert_true(check_perms(stack_end, 1, 0),
205 "user mode access to memory %p past end of stack object",
206 obj_end);
207
208 /*
209 * The reserved area, when it exists, is dropped at run time
210 * when transitioning to user mode on RISC-V. Reinstate that
211 * reserved area here for the next tests to work properly
212 * with a static non-zero K_THREAD_STACK_RESERVED definition.
213 */
214 if (IS_ENABLED(CONFIG_RISCV) &&
215 IS_ENABLED(CONFIG_GEN_PRIV_STACKS) &&
216 K_THREAD_STACK_RESERVED != 0) {
217 stack_start += reserved;
218 stack_size -= reserved;
219 }
220
221 zassert_true(stack_size <= obj_size - reserved,
222 "bad stack size %zu in thread struct",
223 stack_size);
224 }
225 #endif
226
227 carveout = stack_start - stack_buf;
228 printk(" - Carved-out space in buffer: %zu\n", carveout);
229
230 zassert_true(carveout < stack_size,
231 "Suspicious carve-out space reported");
232 /* 0 unless this is a stack array */
233 end_space = obj_end - stack_end;
234 printk(" - Unused objects space: %ld\n", end_space);
235
236 /* For all stacks, when k_thread_create() is called with a stack object,
237 * it is equivalent to pass either the original requested stack size, or
238 * the return value of K_*_STACK_SIZEOF() for that stack object.
239 *
240 * When the stack is actually instantiated, both expand to fill any space
241 * rounded up, except rounding space for array members.
242 */
243 if (!scenario_data.is_array) {
244 /* These should be exactly the same. We have an equivalence relation:
245 * For some stack declared with:
246 *
247 * K_THREAD_STACK_DEFINE(my_stack, X);
248 * K_THREAD_STACK_LEN(X) - K_THREAD_STACK_RESERVED ==
249 * K_THREAD_STACK_SIZEOF(my_stack)
250 *
251 * K_KERNEL_STACK_DEFINE(my_kern_stack, Y):
252 * K_KERNEL_STACK_LEN(Y) - K_KERNEL_STACK_RESERVED ==
253 * K_KERNEL_STACK_SIZEOF(my_stack)
254 */
255 #ifdef CONFIG_USERSPACE
256 /* Not defined if user mode disabled, all stacks are kernel stacks */
257 if (scenario_data.is_user) {
258 adjusted = K_THREAD_STACK_LEN(scenario_data.declared_size);
259 } else
260 #endif
261 {
262 adjusted = K_KERNEL_STACK_LEN(scenario_data.declared_size);
263 }
264 adjusted -= reserved;
265
266 zassert_equal(end_space, 0, "unexpected unused space\n");
267 } else {
268 /* For arrays there may be unused space per-object. This is because
269 * every single array member must be aligned to the value returned
270 * by Z_{KERNEL|THREAD}_STACK_OBJ_ALIGN.
271 *
272 * If we define:
273 *
274 * K_{THREAD|KERNEL}_STACK_ARRAY_DEFINE(my_stack_array, num_stacks, X);
275 *
276 * We do not auto-expand usable space to cover this unused area. Doing
277 * this would require some way for the kernel to know that a stack object
278 * pointer passed in is an array member, which is currently not possible.
279 *
280 * The equivalence here is computable with:
281 * K_THREAD_STACK_SIZEOF(my_stack_array[0]) ==
282 * K_THREAD_STACK_LEN(X) - K_THREAD_STACK_RESERVED;
283 */
284
285 if (scenario_data.is_user) {
286 adjusted = K_THREAD_STACK_LEN(scenario_data.declared_size);
287 } else {
288 adjusted = K_KERNEL_STACK_LEN(scenario_data.declared_size);
289 }
290 adjusted -= reserved;
291
292 /* At least make sure it's not negative, that means stack_info isn't set
293 * right
294 */
295 zassert_true(end_space >= 0, "bad stack bounds in stack_info");
296 }
297
298 zassert_true(adjusted == scenario_data.reported_size,
299 "size mismatch: adjusted %zu vs. reported %zu",
300 adjusted, scenario_data.reported_size);
301
302 ret = k_thread_stack_space_get(k_current_get(), &unused);
303 if (!is_usermode && IS_ENABLED(CONFIG_NO_UNUSED_STACK_INSPECTION)) {
304 expected = -ENOTSUP;
305 } else {
306 expected = 0;
307 }
308
309 zassert_equal(ret, expected, "unexpected return value %d", ret);
310 if (ret == 0) {
311 printk("self-reported unused stack space: %zu\n", unused);
312 }
313 }
314
stest_thread_entry(void * p1,void * p2,void * p3)315 void stest_thread_entry(void *p1, void *p2, void *p3)
316 {
317 bool drop = (bool)p1;
318
319 if (drop) {
320 k_thread_user_mode_enter(stest_thread_entry, (void *)false,
321 p2, p3);
322 } else {
323 stack_buffer_scenarios();
324 }
325 }
326
stest_thread_launch(uint32_t flags,bool drop)327 void stest_thread_launch(uint32_t flags, bool drop)
328 {
329 int ret;
330 size_t unused;
331
332 k_thread_create(&test_thread, scenario_data.stack, STEST_STACKSIZE,
333 stest_thread_entry,
334 (void *)drop, NULL, NULL,
335 -1, flags, K_FOREVER);
336
337 #ifdef CONFIG_THREAD_STACK_MEM_MAPPED
338 scenario_data.stack_mapped = test_thread.stack_info.mapped.addr;
339
340 printk(" - Memory mapped stack object %p\n", scenario_data.stack_mapped);
341 #endif /* CONFIG_THREAD_STACK_MEM_MAPPED */
342
343 k_thread_start(&test_thread);
344 k_thread_join(&test_thread, K_FOREVER);
345
346 ret = k_thread_stack_space_get(&test_thread, &unused);
347
348 #ifdef CONFIG_THREAD_STACK_MEM_MAPPED
349 if (ret == -EINVAL) {
350 printk("! cannot report unused stack space due to stack no longer mapped.\n");
351 } else
352 #endif /* CONFIG_THREAD_STACK_MEM_MAPPED */
353 {
354 zassert_equal(ret, 0, "failed to calculate unused stack space\n");
355 printk("target thread unused stack space: %zu\n", unused);
356 }
357 }
358
scenario_entry(void * stack_obj,size_t obj_size,size_t reported_size,size_t declared_size,bool is_array)359 void scenario_entry(void *stack_obj, size_t obj_size, size_t reported_size,
360 size_t declared_size, bool is_array)
361 {
362 bool is_user;
363 size_t metadata_size;
364
365 #ifdef CONFIG_USERSPACE
366 struct k_object *zo;
367
368 zo = k_object_find(stack_obj);
369 if (zo != NULL) {
370 is_user = true;
371 #ifdef CONFIG_GEN_PRIV_STACKS
372 metadata_size = zo->data.stack_data->size;
373 #else
374 metadata_size = zo->data.stack_size;
375 #endif /* CONFIG_GEN_PRIV_STACKS */
376 printk("stack may host user thread, size in metadata is %zu\n",
377 metadata_size);
378 } else
379 #endif /* CONFIG_USERSPACE */
380 {
381 metadata_size = 0;
382 is_user = false;
383 }
384
385 scenario_data.stack = stack_obj;
386 scenario_data.object_size = obj_size;
387 scenario_data.is_user = is_user;
388 scenario_data.metadata_size = metadata_size;
389 scenario_data.reported_size = reported_size;
390 scenario_data.declared_size = declared_size;
391 scenario_data.is_array = is_array;
392
393 printk("Stack object %p[%zu]\n", stack_obj, obj_size);
394 printk(" - Testing supervisor mode\n");
395 stest_thread_launch(0, false);
396
397 #ifdef CONFIG_USERSPACE
398 if (is_user) {
399 printk(" - Testing user mode (direct launch)\n");
400 stest_thread_launch(K_USER | K_INHERIT_PERMS, false);
401 printk(" - Testing user mode (drop)\n");
402 stest_thread_launch(K_INHERIT_PERMS, true);
403 }
404 #endif /* CONFIG_USERSPACE */
405 }
406
407 /**
408 * @brief Test kernel provides user thread read/write access to its own stack
409 * memory buffer
410 *
411 * @details Thread can access its own stack memory buffer and perform
412 * read/write operations.
413 *
414 * @ingroup kernel_memprotect_tests
415 */
ZTEST(userspace_thread_stack,test_stack_buffer)416 ZTEST(userspace_thread_stack, test_stack_buffer)
417 {
418 printk("Reserved space (thread stacks): %zu\n",
419 K_THREAD_STACK_RESERVED);
420 printk("Reserved space (kernel stacks): %zu\n",
421 K_KERNEL_STACK_RESERVED);
422
423 printk("CONFIG_ISR_STACK_SIZE %zu\n", (size_t)CONFIG_ISR_STACK_SIZE);
424
425 unsigned int num_cpus = arch_num_cpus();
426
427 for (int i = 0; i < num_cpus; i++) {
428 printk("irq stack %d: %p size %zu\n",
429 i, &z_interrupt_stacks[i],
430 sizeof(z_interrupt_stacks[i]));
431 }
432
433 printk("Provided stack size: %u\n", STEST_STACKSIZE);
434
435 printk("\ntesting user_stack\n");
436 scenario_entry(user_stack, sizeof(user_stack), K_THREAD_STACK_SIZEOF(user_stack),
437 STEST_STACKSIZE, false);
438
439 for (int i = 0; i < NUM_STACKS; i++) {
440 printk("\ntesting user_stack_array[%d]\n", i);
441 scenario_entry(user_stack_array[i],
442 sizeof(user_stack_array[i]),
443 K_THREAD_STACK_SIZEOF(user_stack_array[i]),
444 STEST_STACKSIZE, true);
445 }
446
447 printk("\ntesting kern_stack\n");
448 scenario_entry(kern_stack, sizeof(kern_stack), K_KERNEL_STACK_SIZEOF(kern_stack),
449 STEST_STACKSIZE, false);
450
451 for (int i = 0; i < NUM_STACKS; i++) {
452 printk("\ntesting kern_stack_array[%d]\n", i);
453 scenario_entry(kern_stack_array[i],
454 sizeof(kern_stack_array[i]),
455 K_KERNEL_STACK_SIZEOF(kern_stack_array[i]),
456 STEST_STACKSIZE, true);
457 }
458
459 printk("\ntesting stest_member_stack\n");
460 scenario_entry(&stest_member_stack.stack,
461 sizeof(stest_member_stack.stack),
462 K_KERNEL_STACK_SIZEOF(stest_member_stack.stack),
463 STEST_STACKSIZE, false);
464 }
465
no_op_entry(void * p1,void * p2,void * p3)466 void no_op_entry(void *p1, void *p2, void *p3)
467 {
468
469 printk("hi! bye!\n");
470
471 #ifdef CONFIG_DYNAMIC_OBJECTS
472 /* Allocate a dynamic kernel object, which gets freed on thread
473 * cleanup since this thread has the only reference.
474 */
475 struct k_sem *dyn_sem = k_object_alloc(K_OBJ_SEM);
476 k_sem_init(dyn_sem, 1, 1);
477 printk("allocated semaphore %p\n", dyn_sem);
478 #endif
479 /* thread self-aborts, triggering idle thread cleanup */
480 }
481
482 /**
483 * @brief Show that the idle thread stack size is correct
484 *
485 * The idle thread has to occasionally clean up self-exiting threads.
486 * Exercise this and show that we didn't overflow, reporting out stack
487 * usage.
488 *
489 * @ingroup kernel_memprotect_tests
490 */
ZTEST(userspace_thread_stack,test_idle_stack)491 ZTEST(userspace_thread_stack, test_idle_stack)
492 {
493 if (IS_ENABLED(CONFIG_KERNEL_COHERENCE)) {
494 /* Stacks on coherence platforms aren't coherent, and
495 * the idle stack may have been initialized on a
496 * different CPU!
497 */
498 ztest_test_skip();
499 }
500
501 int ret;
502 #ifdef CONFIG_SMP
503 /* 1cpu test case, so all other CPUs are spinning with co-op
504 * threads blocking them. _current_cpu triggers an assertion.
505 */
506 struct k_thread *idle = arch_curr_cpu()->idle_thread;
507 #else
508 struct k_thread *idle = _current_cpu->idle_thread;
509 #endif
510 size_t unused_bytes;
511
512 /* Spwawn a child thread which self-exits */
513 k_thread_create(&test_thread, kern_stack, STEST_STACKSIZE,
514 no_op_entry,
515 NULL, NULL, NULL,
516 -1, 0, K_NO_WAIT);
517
518 k_thread_join(&test_thread, K_FOREVER);
519
520 /* Also sleep for a bit, which also exercises the idle thread
521 * in case some PM hooks will run
522 */
523 k_sleep(K_MSEC(1));
524
525 /* Now measure idle thread stack usage */
526 ret = k_thread_stack_space_get(idle, &unused_bytes);
527 zassert_true(ret == 0, "failed to obtain stack space");
528 zassert_true(unused_bytes > 0, "idle thread stack size %d too low",
529 CONFIG_IDLE_STACK_SIZE);
530 printk("unused idle thread stack size: %zu/%d (%zu used)\n",
531 unused_bytes, CONFIG_IDLE_STACK_SIZE,
532 CONFIG_IDLE_STACK_SIZE - unused_bytes);
533
534 }
535
thread_setup(void)536 void *thread_setup(void)
537 {
538 k_thread_system_pool_assign(k_current_get());
539
540 return NULL;
541 }
542
543 ZTEST_SUITE(userspace_thread_stack, NULL, thread_setup,
544 ztest_simple_1cpu_before, ztest_simple_1cpu_after, NULL);
545