1 /*
2 * Parts derived from tests/kernel/fatal/src/main.c, which has the
3 * following copyright and license:
4 *
5 * Copyright (c) 2017 Intel Corporation
6 *
7 * SPDX-License-Identifier: Apache-2.0
8 */
9
10 #include <zephyr/kernel.h>
11 #include <zephyr/ztest.h>
12 #include <zephyr/kernel_structs.h>
13 #include <string.h>
14 #include <stdlib.h>
15 #include <zephyr/app_memory/app_memdomain.h>
16 #include <zephyr/sys/util.h>
17 #include <zephyr/sys/barrier.h>
18 #include <zephyr/debug/stack.h>
19 #include <zephyr/internal/syscall_handler.h>
20 #include "test_syscall.h"
21 #include <zephyr/sys/libc-hooks.h> /* for z_libc_partition */
22
23 #if defined(CONFIG_XTENSA)
24 #include <zephyr/arch/xtensa/cache.h>
25 #if defined(CONFIG_XTENSA_MMU)
26 #include <zephyr/arch/xtensa/xtensa_mmu.h>
27 #endif
28 #if defined(CONFIG_XTENSA_MPU)
29 #include <zephyr/arch/xtensa/mpu.h>
30 #endif
31 #endif
32
33 #if defined(CONFIG_ARC)
34 #include <zephyr/arch/arc/v2/mpu/arc_core_mpu.h>
35 #endif
36
37 #if defined(CONFIG_ARM)
38 extern void arm_core_mpu_disable(void);
39 #endif
40
41 #define INFO(fmt, ...) printk(fmt, ##__VA_ARGS__)
42 #define PIPE_LEN 1
43 #define BYTES_TO_READ_WRITE 1
44 #define STACKSIZE (256 + CONFIG_TEST_EXTRA_STACK_SIZE)
45
46 K_SEM_DEFINE(test_revoke_sem, 0, 1);
47
48 /* Used for tests that switch between domains, we will switch between the
49 * default domain and this one.
50 */
51 struct k_mem_domain alternate_domain;
52
53 ZTEST_BMEM static volatile bool expect_fault;
54 ZTEST_BMEM static volatile unsigned int expected_reason;
55
56 /* Partition unique to default domain */
57 K_APPMEM_PARTITION_DEFINE(default_part);
58 K_APP_BMEM(default_part) volatile bool default_bool;
59 /* Partition unique to alternate domain */
60 K_APPMEM_PARTITION_DEFINE(alt_part);
61 K_APP_BMEM(alt_part) volatile bool alt_bool;
62
63 static struct k_thread test_thread;
64 static K_THREAD_STACK_DEFINE(test_stack, STACKSIZE);
65
clear_fault(void)66 static void clear_fault(void)
67 {
68 expect_fault = false;
69 compiler_barrier();
70 }
71
set_fault(unsigned int reason)72 static void set_fault(unsigned int reason)
73 {
74 expect_fault = true;
75 expected_reason = reason;
76 compiler_barrier();
77 }
78
k_sys_fatal_error_handler(unsigned int reason,const struct arch_esf * pEsf)79 void k_sys_fatal_error_handler(unsigned int reason, const struct arch_esf *pEsf)
80 {
81 INFO("Caught system error -- reason %d\n", reason);
82
83 if (expect_fault) {
84 if (expected_reason == reason) {
85 printk("System error was expected\n");
86 clear_fault();
87 } else {
88 printk("Wrong fault reason, expecting %d\n",
89 expected_reason);
90 TC_END_REPORT(TC_FAIL);
91 k_fatal_halt(reason);
92 }
93 } else {
94 printk("Unexpected fault during test\n");
95 TC_END_REPORT(TC_FAIL);
96 k_fatal_halt(reason);
97 }
98 }
99
100 /**
101 * @brief Test to check if the thread is in user mode
102 *
103 * @ingroup kernel_memprotect_tests
104 */
ZTEST_USER(userspace,test_is_usermode)105 ZTEST_USER(userspace, test_is_usermode)
106 {
107 /* Confirm that we are in fact running in user mode. */
108 clear_fault();
109
110 zassert_true(k_is_user_context(), "thread left in kernel mode");
111 }
112
113 /**
114 * @brief Test to write to a control register
115 *
116 * @ingroup kernel_memprotect_tests
117 */
ZTEST_USER(userspace,test_write_control)118 ZTEST_USER(userspace, test_write_control)
119 {
120 /* Try to write to a control register. */
121 #if defined(CONFIG_X86)
122 set_fault(K_ERR_CPU_EXCEPTION);
123
124 #ifdef CONFIG_X86_64
125 __asm__ volatile (
126 "movq $0xFFFFFFFF, %rax;\n\t"
127 "movq %rax, %cr0;\n\t"
128 );
129 #else
130 __asm__ volatile (
131 "mov %cr0, %eax;\n\t"
132 "and $0xfffeffff, %eax;\n\t"
133 "mov %eax, %cr0;\n\t"
134 );
135 #endif
136 zassert_unreachable("Write to control register did not fault");
137
138 #elif defined(CONFIG_ARM64)
139 uint64_t val = SPSR_MODE_EL1T;
140
141 set_fault(K_ERR_CPU_EXCEPTION);
142
143 __asm__ volatile("msr spsr_el1, %0"
144 :
145 : "r" (val)
146 : "memory", "cc");
147
148 zassert_unreachable("Write to control register did not fault");
149
150 #elif defined(CONFIG_ARM)
151 #if defined(CONFIG_CPU_CORTEX_M)
152 unsigned int msr_value;
153
154 clear_fault();
155
156 msr_value = __get_CONTROL();
157 msr_value &= ~(CONTROL_nPRIV_Msk);
158 __set_CONTROL(msr_value);
159 barrier_dsync_fence_full();
160 barrier_isync_fence_full();
161 msr_value = __get_CONTROL();
162 zassert_true((msr_value & (CONTROL_nPRIV_Msk)),
163 "Write to control register was successful");
164 #else
165 uint32_t val;
166
167 set_fault(K_ERR_CPU_EXCEPTION);
168
169 val = __get_SCTLR();
170 val |= SCTLR_DZ_Msk;
171 __set_SCTLR(val);
172
173 zassert_unreachable("Write to control register did not fault");
174 #endif
175 #elif defined(CONFIG_ARC)
176 unsigned int er_status;
177
178 set_fault(K_ERR_CPU_EXCEPTION);
179
180 /* _ARC_V2_ERSTATUS is privilege aux reg */
181 __asm__ volatile (
182 "lr %0, [0x402]\n"
183 : "=r" (er_status)::
184 );
185 #elif defined(CONFIG_RISCV)
186 unsigned int status;
187
188 set_fault(K_ERR_CPU_EXCEPTION);
189
190 __asm__ volatile("csrr %0, mstatus" : "=r" (status));
191 #elif defined(CONFIG_XTENSA)
192 unsigned int ps;
193
194 set_fault(K_ERR_CPU_EXCEPTION);
195
196 __asm__ volatile("rsr.ps %0" : "=r" (ps));
197 #else
198 #error "Not implemented for this architecture"
199 zassert_unreachable("Write to control register did not fault");
200 #endif
201 }
202
203 /**
204 * @brief Test to disable memory protection
205 *
206 * @ingroup kernel_memprotect_tests
207 */
ZTEST_USER(userspace,test_disable_mmu_mpu)208 ZTEST_USER(userspace, test_disable_mmu_mpu)
209 {
210 /* Try to disable memory protections. */
211 #if defined(CONFIG_X86)
212 set_fault(K_ERR_CPU_EXCEPTION);
213
214 #ifdef CONFIG_X86_64
215 __asm__ volatile (
216 "movq %cr0, %rax;\n\t"
217 "andq $0x7ffeffff, %rax;\n\t"
218 "movq %rax, %cr0;\n\t"
219 );
220 #else
221 __asm__ volatile (
222 "mov %cr0, %eax;\n\t"
223 "and $0x7ffeffff, %eax;\n\t"
224 "mov %eax, %cr0;\n\t"
225 );
226 #endif
227 #elif defined(CONFIG_ARM64)
228 uint64_t val;
229
230 set_fault(K_ERR_CPU_EXCEPTION);
231
232 __asm__ volatile("mrs %0, sctlr_el1" : "=r" (val));
233 __asm__ volatile("msr sctlr_el1, %0"
234 :
235 : "r" (val & ~(SCTLR_M_BIT | SCTLR_C_BIT))
236 : "memory", "cc");
237
238 #elif defined(CONFIG_ARM)
239 #ifndef CONFIG_TRUSTED_EXECUTION_NONSECURE
240 set_fault(K_ERR_CPU_EXCEPTION);
241
242 arm_core_mpu_disable();
243 #else
244 /* Disabling MPU from unprivileged code
245 * generates BusFault which is not banked
246 * between Security states. Do not execute
247 * this scenario for Non-Secure Cortex-M.
248 */
249 return;
250 #endif /* !CONFIG_TRUSTED_EXECUTION_NONSECURE */
251 #elif defined(CONFIG_ARC)
252 set_fault(K_ERR_CPU_EXCEPTION);
253
254 arc_core_mpu_disable();
255 #elif defined(CONFIG_RISCV)
256 set_fault(K_ERR_CPU_EXCEPTION);
257
258 /*
259 * Try to make everything accessible through PMP slot 3
260 * which should not be locked.
261 */
262 csr_write(pmpaddr3, LLONG_MAX);
263 csr_write(pmpcfg0, (PMP_R|PMP_W|PMP_X|PMP_NAPOT) << 24);
264 #elif defined(CONFIG_XTENSA)
265 set_fault(K_ERR_CPU_EXCEPTION);
266
267 #if defined(CONFIG_XTENSA_MMU)
268 /* Reset way 6 to do identity mapping.
269 * Complier would complain addr going out of range if we
270 * simply do addr = i * 0x20000000 inside the loop. So
271 * we do increment instead.
272 */
273 uint32_t addr = 0U;
274
275 for (int i = 0; i < 8; i++) {
276 uint32_t attr = addr | XTENSA_MMU_PERM_WX;
277
278 __asm__ volatile("wdtlb %0, %1; witlb %0, %1"
279 :: "r"(attr), "r"(addr));
280
281 addr += 0x20000000;
282 }
283 #endif
284
285 #if defined(CONFIG_XTENSA_MPU)
286 /* Technically, simply clearing out all foreground MPU entries
287 * allows the background map to take over, so it is not exactly
288 * disabling MPU. However, this test is about catching userspace
289 * trying to manipulate the MPU regions. So as long as there is
290 * kernel OOPS, we would be fine.
291 */
292 for (int i = 0; i < XTENSA_MPU_NUM_ENTRIES; i++) {
293 __asm__ volatile("wptlb %0, %1\n\t" : : "a"(i), "a"(0));
294 }
295 #endif
296
297 #else
298 #error "Not implemented for this architecture"
299 #endif
300 zassert_unreachable("Disable MMU/MPU did not fault");
301 }
302
303 /**
304 * @brief Test to read from kernel RAM
305 *
306 * @ingroup kernel_memprotect_tests
307 */
ZTEST_USER(userspace,test_read_kernram)308 ZTEST_USER(userspace, test_read_kernram)
309 {
310 /* Try to read from kernel RAM. */
311 void *p;
312
313 set_fault(K_ERR_CPU_EXCEPTION);
314
315 p = _current->init_data;
316 printk("%p\n", p);
317 zassert_unreachable("Read from kernel RAM did not fault");
318 }
319
320 /**
321 * @brief Test to write to kernel RAM
322 *
323 * @ingroup kernel_memprotect_tests
324 */
ZTEST_USER(userspace,test_write_kernram)325 ZTEST_USER(userspace, test_write_kernram)
326 {
327 /* Try to write to kernel RAM. */
328 set_fault(K_ERR_CPU_EXCEPTION);
329
330 _current->init_data = NULL;
331 zassert_unreachable("Write to kernel RAM did not fault");
332 }
333
334 extern int _k_neg_eagain;
335
336 #include <zephyr/linker/linker-defs.h>
337
338 /**
339 * @brief Test to write kernel RO
340 *
341 * @ingroup kernel_memprotect_tests
342 */
ZTEST_USER(userspace,test_write_kernro)343 ZTEST_USER(userspace, test_write_kernro)
344 {
345 bool in_rodata;
346
347 /* Try to write to kernel RO. */
348 const char *const ptr = (const char *const)&_k_neg_eagain;
349
350 in_rodata = ptr < __rodata_region_end &&
351 ptr >= __rodata_region_start;
352
353 #ifdef CONFIG_LINKER_USE_PINNED_SECTION
354 if (!in_rodata) {
355 in_rodata = ptr < lnkr_pinned_rodata_end &&
356 ptr >= lnkr_pinned_rodata_start;
357 }
358 #endif
359
360 zassert_true(in_rodata,
361 "_k_neg_eagain is not in rodata");
362
363 set_fault(K_ERR_CPU_EXCEPTION);
364
365 _k_neg_eagain = -EINVAL;
366 zassert_unreachable("Write to kernel RO did not fault");
367 }
368
369 /**
370 * @brief Test to write to kernel text section
371 *
372 * @ingroup kernel_memprotect_tests
373 */
ZTEST_USER(userspace,test_write_kerntext)374 ZTEST_USER(userspace, test_write_kerntext)
375 {
376 /* Try to write to kernel text. */
377 set_fault(K_ERR_CPU_EXCEPTION);
378
379 memset(&k_current_get, 0, 4);
380 zassert_unreachable("Write to kernel text did not fault");
381 }
382
383 static int kernel_data;
384
385 /**
386 * @brief Test to read from kernel data section
387 *
388 * @ingroup kernel_memprotect_tests
389 */
ZTEST_USER(userspace,test_read_kernel_data)390 ZTEST_USER(userspace, test_read_kernel_data)
391 {
392 set_fault(K_ERR_CPU_EXCEPTION);
393
394 printk("%d\n", kernel_data);
395 zassert_unreachable("Read from data did not fault");
396 }
397
398 /**
399 * @brief Test to write to kernel data section
400 *
401 * @ingroup kernel_memprotect_tests
402 */
ZTEST_USER(userspace,test_write_kernel_data)403 ZTEST_USER(userspace, test_write_kernel_data)
404 {
405 set_fault(K_ERR_CPU_EXCEPTION);
406
407 kernel_data = 1;
408 zassert_unreachable("Write to data did not fault");
409 }
410
411 /*
412 * volatile to avoid compiler mischief.
413 */
414 K_APP_DMEM(default_part) volatile char *priv_stack_ptr;
415 #if defined(CONFIG_ARC)
416 K_APP_DMEM(default_part) int32_t size = (0 - CONFIG_PRIVILEGED_STACK_SIZE -
417 Z_ARC_STACK_GUARD_SIZE);
418 #endif
419
420 /**
421 * @brief Test to read privileged stack
422 *
423 * @ingroup kernel_memprotect_tests
424 */
ZTEST_USER(userspace,test_read_priv_stack)425 ZTEST_USER(userspace, test_read_priv_stack)
426 {
427 /* Try to read from privileged stack. */
428 #if defined(CONFIG_ARC)
429 int s[1];
430
431 s[0] = 0;
432 priv_stack_ptr = (char *)&s[0] - size;
433 #elif defined(CONFIG_ARM) || defined(CONFIG_X86) || defined(CONFIG_RISCV) || \
434 defined(CONFIG_ARM64) || defined(CONFIG_XTENSA)
435 /* priv_stack_ptr set by test_main() */
436 #else
437 #error "Not implemented for this architecture"
438 #endif
439 set_fault(K_ERR_CPU_EXCEPTION);
440
441 printk("%c\n", *priv_stack_ptr);
442 zassert_unreachable("Read from privileged stack did not fault");
443 }
444
445 /**
446 * @brief Test to write to privilege stack
447 *
448 * @ingroup kernel_memprotect_tests
449 */
ZTEST_USER(userspace,test_write_priv_stack)450 ZTEST_USER(userspace, test_write_priv_stack)
451 {
452 /* Try to write to privileged stack. */
453 #if defined(CONFIG_ARC)
454 int s[1];
455
456 s[0] = 0;
457 priv_stack_ptr = (char *)&s[0] - size;
458 #elif defined(CONFIG_ARM) || defined(CONFIG_X86) || defined(CONFIG_RISCV) || \
459 defined(CONFIG_ARM64) || defined(CONFIG_XTENSA)
460 /* priv_stack_ptr set by test_main() */
461 #else
462 #error "Not implemented for this architecture"
463 #endif
464 set_fault(K_ERR_CPU_EXCEPTION);
465
466 *priv_stack_ptr = 42;
467 zassert_unreachable("Write to privileged stack did not fault");
468 }
469
470
471 K_APP_BMEM(default_part) static struct k_sem sem;
472
473 /**
474 * @brief Test to pass a user object to system call
475 *
476 * @ingroup kernel_memprotect_tests
477 */
ZTEST_USER(userspace,test_pass_user_object)478 ZTEST_USER(userspace, test_pass_user_object)
479 {
480 /* Try to pass a user object to a system call. */
481 set_fault(K_ERR_KERNEL_OOPS);
482
483 k_sem_init(&sem, 0, 1);
484 zassert_unreachable("Pass a user object to a syscall did not fault");
485 }
486
487 static struct k_sem ksem;
488
489 /**
490 * @brief Test to pass object to a system call without permissions
491 *
492 * @ingroup kernel_memprotect_tests
493 */
ZTEST_USER(userspace,test_pass_noperms_object)494 ZTEST_USER(userspace, test_pass_noperms_object)
495 {
496 /* Try to pass a object to a system call w/o permissions. */
497 set_fault(K_ERR_KERNEL_OOPS);
498
499 k_sem_init(&ksem, 0, 1);
500 zassert_unreachable("Pass an unauthorized object to a "
501 "syscall did not fault");
502 }
503
504
thread_body(void * p1,void * p2,void * p3)505 void thread_body(void *p1, void *p2, void *p3)
506 {
507 ARG_UNUSED(p1);
508 ARG_UNUSED(p2);
509 ARG_UNUSED(p3);
510 }
511
512 /**
513 * @brief Test to start kernel thread from usermode
514 *
515 * @ingroup kernel_memprotect_tests
516 */
ZTEST_USER(userspace,test_start_kernel_thread)517 ZTEST_USER(userspace, test_start_kernel_thread)
518 {
519 /* Try to start a kernel thread from a usermode thread */
520 set_fault(K_ERR_KERNEL_OOPS);
521 k_thread_create(&test_thread, test_stack, STACKSIZE,
522 thread_body, NULL, NULL, NULL,
523 K_PRIO_PREEMPT(1), K_INHERIT_PERMS,
524 K_NO_WAIT);
525 zassert_unreachable("Create a kernel thread did not fault");
526 }
527
uthread_read_body(void * p1,void * p2,void * p3)528 static void uthread_read_body(void *p1, void *p2, void *p3)
529 {
530 unsigned int *vptr = p1;
531
532 set_fault(K_ERR_CPU_EXCEPTION);
533 printk("%u\n", *vptr);
534 zassert_unreachable("Read from other thread stack did not fault");
535 }
536
uthread_write_body(void * p1,void * p2,void * p3)537 static void uthread_write_body(void *p1, void *p2, void *p3)
538 {
539 unsigned int *vptr = p1;
540
541 set_fault(K_ERR_CPU_EXCEPTION);
542 *vptr = 2U;
543 zassert_unreachable("Write to other thread stack did not fault");
544 }
545
546 /**
547 * @brief Test to read from another thread's stack
548 *
549 * @ingroup kernel_memprotect_tests
550 */
ZTEST_USER(userspace,test_read_other_stack)551 ZTEST_USER(userspace, test_read_other_stack)
552 {
553 /* Try to read from another thread's stack. */
554 unsigned int val;
555
556 #if !defined(CONFIG_MEM_DOMAIN_ISOLATED_STACKS)
557 /* The minimal requirement to support memory domain permits
558 * threads of the same memory domain to access each others' stacks.
559 * Some architectures supports further restricting access which
560 * can be enabled via a kconfig. So if the kconfig is not enabled,
561 * skip the test.
562 */
563 ztest_test_skip();
564 #endif
565
566 k_thread_create(&test_thread, test_stack, STACKSIZE,
567 uthread_read_body, &val, NULL, NULL,
568 -1, K_USER | K_INHERIT_PERMS,
569 K_NO_WAIT);
570
571 k_thread_join(&test_thread, K_FOREVER);
572 }
573
574
575 /**
576 * @brief Test to write to other thread's stack
577 *
578 * @ingroup kernel_memprotect_tests
579 */
ZTEST_USER(userspace,test_write_other_stack)580 ZTEST_USER(userspace, test_write_other_stack)
581 {
582 /* Try to write to another thread's stack. */
583 unsigned int val;
584
585 #if !defined(CONFIG_MEM_DOMAIN_ISOLATED_STACKS)
586 /* The minimal requirement to support memory domain permits
587 * threads of the same memory domain to access each others' stacks.
588 * Some architectures supports further restricting access which
589 * can be enabled via a kconfig. So if the kconfig is not enabled,
590 * skip the test.
591 */
592 ztest_test_skip();
593 #endif
594
595 k_thread_create(&test_thread, test_stack, STACKSIZE,
596 uthread_write_body, &val, NULL, NULL,
597 -1, K_USER | K_INHERIT_PERMS,
598 K_NO_WAIT);
599 k_thread_join(&test_thread, K_FOREVER);
600 }
601
602 /**
603 * @brief Test to revoke access to kobject without permission
604 *
605 * @details User thread can only revoke their own access to an object.
606 * In that test user thread to revokes access to unathorized object, as a result
607 * the system will assert.
608 *
609 * @ingroup kernel_memprotect_tests
610 */
ZTEST_USER(userspace,test_revoke_noperms_object)611 ZTEST_USER(userspace, test_revoke_noperms_object)
612 {
613 /* Attempt to revoke access to kobject w/o permissions*/
614 set_fault(K_ERR_KERNEL_OOPS);
615
616 k_object_release(&ksem);
617
618 zassert_unreachable("Revoke access to unauthorized object "
619 "did not fault");
620 }
621
622 /**
623 * @brief Test to access object after revoking access
624 *
625 * @ingroup kernel_memprotect_tests
626 */
ZTEST_USER(userspace,test_access_after_revoke)627 ZTEST_USER(userspace, test_access_after_revoke)
628 {
629 k_object_release(&test_revoke_sem);
630
631 /* Try to access an object after revoking access to it */
632 set_fault(K_ERR_KERNEL_OOPS);
633
634 k_sem_take(&test_revoke_sem, K_NO_WAIT);
635
636 zassert_unreachable("Using revoked object did not fault");
637 }
638
umode_enter_func(void * p1,void * p2,void * p3)639 static void umode_enter_func(void *p1, void *p2, void *p3)
640 {
641 ARG_UNUSED(p1);
642 ARG_UNUSED(p2);
643 ARG_UNUSED(p3);
644
645 zassert_true(k_is_user_context(),
646 "Thread did not enter user mode");
647 }
648
649 /**
650 * @brief Test to check supervisor thread enter one-way to usermode
651 *
652 * @details A thread running in supervisor mode must have one-way operation
653 * ability to drop privileges to user mode.
654 *
655 * @ingroup kernel_memprotect_tests
656 */
ZTEST(userspace,test_user_mode_enter)657 ZTEST(userspace, test_user_mode_enter)
658 {
659 clear_fault();
660
661 k_thread_user_mode_enter(umode_enter_func,
662 NULL, NULL, NULL);
663 }
664
665 /* Define and initialize pipe. */
666 K_PIPE_DEFINE(kpipe, PIPE_LEN, BYTES_TO_READ_WRITE);
667 /**
668 * @brief Test to write to kobject using pipe
669 *
670 * @ingroup kernel_memprotect_tests
671 */
ZTEST_USER(userspace,test_write_kobject_user_pipe)672 ZTEST_USER(userspace, test_write_kobject_user_pipe)
673 {
674 /*
675 * Attempt to use system call from k_pipe_get to write over
676 * a kernel object.
677 */
678 set_fault(K_ERR_KERNEL_OOPS);
679
680 k_pipe_read(&kpipe, (uint8_t *)&test_revoke_sem, BYTES_TO_READ_WRITE, K_NO_WAIT);
681
682 zassert_unreachable("System call memory write validation "
683 "did not fault");
684 }
685
686 /**
687 * @brief Test to read from kobject using pipe
688 *
689 * @ingroup kernel_memprotect_tests
690 */
ZTEST_USER(userspace,test_read_kobject_user_pipe)691 ZTEST_USER(userspace, test_read_kobject_user_pipe)
692 {
693 /*
694 * Attempt to use system call from k_pipe_put to read a
695 * kernel object.
696 */
697 set_fault(K_ERR_KERNEL_OOPS);
698
699 k_pipe_write(&kpipe, (uint8_t *)&test_revoke_sem, BYTES_TO_READ_WRITE, K_NO_WAIT);
700
701 zassert_unreachable("System call memory read validation "
702 "did not fault");
703 }
704
user_half(void * arg1,void * arg2,void * arg3)705 static void user_half(void *arg1, void *arg2, void *arg3)
706 {
707 volatile bool *bool_ptr = arg1;
708
709 *bool_ptr = true;
710 compiler_barrier();
711 if (expect_fault) {
712 printk("Expecting a fatal error %d but succeeded instead\n",
713 expected_reason);
714 ztest_test_fail();
715 }
716 }
717
718
spawn_user(volatile bool * to_modify)719 static void spawn_user(volatile bool *to_modify)
720 {
721 k_thread_create(&test_thread, test_stack, STACKSIZE, user_half,
722 (void *)to_modify, NULL, NULL,
723 -1, K_INHERIT_PERMS | K_USER, K_NO_WAIT);
724
725 k_thread_join(&test_thread, K_FOREVER);
726 }
727
drop_user(volatile bool * to_modify)728 static void drop_user(volatile bool *to_modify)
729 {
730 k_sleep(K_MSEC(1)); /* Force a context switch */
731 k_thread_user_mode_enter(user_half, (void *)to_modify, NULL, NULL);
732 }
733
734 /**
735 * @brief Test creation of new memory domains
736 *
737 * We initialize a new memory domain and show that its partition configuration
738 * is correct. This new domain has "alt_part" in it, but not "default_part".
739 * We then try to modify data in "default_part" and show it produces an
740 * exception since that partition is not in the new domain.
741 *
742 * This caught a bug once where an MMU system copied page tables for the new
743 * domain and accidentally copied memory partition permissions from the source
744 * page tables, allowing the write to "default_part" to work.
745 *
746 * @ingroup kernel_memprotect_tests
747 */
ZTEST(userspace_domain,test_1st_init_and_access_other_memdomain)748 ZTEST(userspace_domain, test_1st_init_and_access_other_memdomain)
749 {
750 struct k_mem_partition *parts[] = {
751 #if Z_LIBC_PARTITION_EXISTS
752 &z_libc_partition,
753 #endif
754 &ztest_mem_partition, &alt_part
755 };
756
757 zassert_equal(
758 k_mem_domain_init(&alternate_domain, ARRAY_SIZE(parts), parts),
759 0, "failed to initialize memory domain");
760
761 /* Switch to alternate_domain which does not have default_part that
762 * contains default_bool. This should fault when we try to write it.
763 */
764 k_mem_domain_add_thread(&alternate_domain, k_current_get());
765 set_fault(K_ERR_CPU_EXCEPTION);
766 spawn_user(&default_bool);
767 }
768
769 #if (defined(CONFIG_ARM) || (defined(CONFIG_GEN_PRIV_STACKS) && defined(CONFIG_RISCV)))
770 extern uint8_t *z_priv_stack_find(void *obj);
771 #endif
772 extern k_thread_stack_t ztest_thread_stack[];
773
774 /**
775 * Show that changing between memory domains and dropping to user mode works
776 * as expected.
777 *
778 * @ingroup kernel_memprotect_tests
779 */
ZTEST(userspace_domain,test_domain_add_thread_drop_to_user)780 ZTEST(userspace_domain, test_domain_add_thread_drop_to_user)
781 {
782 clear_fault();
783 k_mem_domain_add_thread(&alternate_domain, k_current_get());
784 drop_user(&alt_bool);
785 }
786
787 /* @brief Test adding application memory partition to memory domain
788 *
789 * @details Show that adding a partition to a domain and then dropping to user
790 * mode works as expected.
791 *
792 * @ingroup kernel_memprotect_tests
793 */
ZTEST(userspace_domain,test_domain_add_part_drop_to_user)794 ZTEST(userspace_domain, test_domain_add_part_drop_to_user)
795 {
796 clear_fault();
797
798 zassert_equal(
799 k_mem_domain_add_partition(&k_mem_domain_default, &alt_part),
800 0, "failed to add memory partition");
801
802 drop_user(&alt_bool);
803 }
804
805 /**
806 * Show that self-removing a partition from a domain we are a member of,
807 * and then dropping to user mode faults as expected.
808 *
809 * @ingroup kernel_memprotect_tests
810 */
ZTEST(userspace_domain,test_domain_remove_part_drop_to_user)811 ZTEST(userspace_domain, test_domain_remove_part_drop_to_user)
812 {
813 /* We added alt_part to the default domain in the previous test,
814 * remove it, and then try to access again.
815 */
816 set_fault(K_ERR_CPU_EXCEPTION);
817
818 zassert_equal(
819 k_mem_domain_remove_partition(&k_mem_domain_default, &alt_part),
820 0, "failed to remove partition");
821
822 drop_user(&alt_bool);
823 }
824
825 /**
826 * Show that changing between memory domains and then switching to another
827 * thread in the same domain works as expected.
828 *
829 * @ingroup kernel_memprotect_tests
830 */
ZTEST(userspace_domain_ctx,test_domain_add_thread_context_switch)831 ZTEST(userspace_domain_ctx, test_domain_add_thread_context_switch)
832 {
833 clear_fault();
834 k_mem_domain_add_thread(&alternate_domain, k_current_get());
835 spawn_user(&alt_bool);
836 }
837
838 /* Show that adding a partition to a domain and then switching to another
839 * user thread in the same domain works as expected.
840 *
841 * @ingroup kernel_memprotect_tests
842 */
ZTEST(userspace_domain_ctx,test_domain_add_part_context_switch)843 ZTEST(userspace_domain_ctx, test_domain_add_part_context_switch)
844 {
845 clear_fault();
846
847 zassert_equal(
848 k_mem_domain_add_partition(&k_mem_domain_default, &alt_part),
849 0, "failed to add memory partition");
850
851 spawn_user(&alt_bool);
852 }
853
854 /**
855 * Show that self-removing a partition from a domain we are a member of,
856 * and then switching to another user thread in the same domain faults as
857 * expected.
858 *
859 * @ingroup kernel_memprotect_tests
860 */
ZTEST(userspace_domain_ctx,test_domain_remove_part_context_switch)861 ZTEST(userspace_domain_ctx, test_domain_remove_part_context_switch)
862 {
863 /* We added alt_part to the default domain in the previous test,
864 * remove it, and then try to access again.
865 */
866 set_fault(K_ERR_CPU_EXCEPTION);
867
868 zassert_equal(
869 k_mem_domain_remove_partition(&k_mem_domain_default, &alt_part),
870 0, "failed to remove memory partition");
871
872 spawn_user(&alt_bool);
873 }
874
z_impl_missing_syscall(void)875 void z_impl_missing_syscall(void)
876 {
877 /* Shouldn't ever get here; no handler function compiled */
878 k_panic();
879 }
880
881 /**
882 * @brief Test unimplemented system call
883 *
884 * @details Created a syscall with name missing_syscall() without a verification
885 * function. The kernel shall safety handle invocations of unimplemented system
886 * calls.
887 *
888 * @ingroup kernel_memprotect_tests
889 */
ZTEST_USER(userspace,test_unimplemented_syscall)890 ZTEST_USER(userspace, test_unimplemented_syscall)
891 {
892 set_fault(K_ERR_KERNEL_OOPS);
893
894 missing_syscall();
895 }
896
897 /**
898 * @brief Test bad syscall handler
899 *
900 * @details When a system call handler decides to terminate the calling thread,
901 * the kernel will produce error which indicates the context, where the faulting
902 * system call was made from user code.
903 *
904 * @ingroup kernel_memprotect_tests
905 */
ZTEST_USER(userspace,test_bad_syscall)906 ZTEST_USER(userspace, test_bad_syscall)
907 {
908 set_fault(K_ERR_KERNEL_OOPS);
909
910 arch_syscall_invoke0(INT_MAX);
911
912 set_fault(K_ERR_KERNEL_OOPS);
913
914 arch_syscall_invoke0(UINT_MAX);
915 }
916
917 static struct k_sem recycle_sem;
918
919 /**
920 * @brief Test recycle object
921 *
922 * @details Test recycle valid/invalid kernel object, see if
923 * perms_count changes as expected.
924 *
925 * @see k_object_recycle(), k_object_find()
926 *
927 * @ingroup kernel_memprotect_tests
928 */
ZTEST(userspace,test_object_recycle)929 ZTEST(userspace, test_object_recycle)
930 {
931 struct k_object *ko;
932 int perms_count = 0;
933 int dummy = 0;
934
935 /* Validate recycle invalid objects, after recycling this invalid
936 * object, perms_count should finally still be 1.
937 */
938 ko = k_object_find(&dummy);
939 zassert_true(ko == NULL, "not an invalid object");
940
941 k_object_recycle(&dummy);
942
943 ko = k_object_find(&recycle_sem);
944 (void)memset(ko->perms, 0xFF, sizeof(ko->perms));
945
946 k_object_recycle(&recycle_sem);
947 zassert_true(ko != NULL, "kernel object not found");
948 zassert_true(ko->flags & K_OBJ_FLAG_INITIALIZED,
949 "object wasn't marked as initialized");
950
951 for (int i = 0; i < CONFIG_MAX_THREAD_BYTES; i++) {
952 perms_count += POPCOUNT(ko->perms[i]);
953 }
954
955 zassert_true(perms_count == 1, "invalid number of thread permissions");
956 }
957
958 #define test_oops(provided, expected) do { \
959 expect_fault = true; \
960 expected_reason = expected; \
961 z_except_reason(provided); \
962 } while (false)
963
ZTEST_USER(userspace,test_oops_panic)964 ZTEST_USER(userspace, test_oops_panic)
965 {
966 test_oops(K_ERR_KERNEL_PANIC, K_ERR_KERNEL_OOPS);
967 }
968
ZTEST_USER(userspace,test_oops_oops)969 ZTEST_USER(userspace, test_oops_oops)
970 {
971 test_oops(K_ERR_KERNEL_OOPS, K_ERR_KERNEL_OOPS);
972 }
973
ZTEST_USER(userspace,test_oops_exception)974 ZTEST_USER(userspace, test_oops_exception)
975 {
976 test_oops(K_ERR_CPU_EXCEPTION, K_ERR_KERNEL_OOPS);
977 }
978
ZTEST_USER(userspace,test_oops_maxint)979 ZTEST_USER(userspace, test_oops_maxint)
980 {
981 test_oops(INT_MAX, K_ERR_KERNEL_OOPS);
982 }
983
ZTEST_USER(userspace,test_oops_stackcheck)984 ZTEST_USER(userspace, test_oops_stackcheck)
985 {
986 test_oops(K_ERR_STACK_CHK_FAIL, K_ERR_STACK_CHK_FAIL);
987 }
988
z_impl_check_syscall_context(void)989 void z_impl_check_syscall_context(void)
990 {
991 unsigned int key = irq_lock();
992
993 irq_unlock(key);
994
995 /* Make sure that interrupts aren't locked when handling system calls;
996 * key has the previous locking state before the above irq_lock() call.
997 */
998 zassert_true(arch_irq_unlocked(key), "irqs locked during syscall");
999
1000 /* The kernel should not think we are in ISR context either */
1001 zassert_false(k_is_in_isr(), "kernel reports irq context");
1002 }
1003
z_vrfy_check_syscall_context(void)1004 static inline void z_vrfy_check_syscall_context(void)
1005 {
1006 z_impl_check_syscall_context();
1007 }
1008 #include <zephyr/syscalls/check_syscall_context_mrsh.c>
1009
ZTEST_USER(userspace,test_syscall_context)1010 ZTEST_USER(userspace, test_syscall_context)
1011 {
1012 check_syscall_context();
1013 }
1014
1015 #ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA
tls_leakage_user_part(void * p1,void * p2,void * p3)1016 static void tls_leakage_user_part(void *p1, void *p2, void *p3)
1017 {
1018 char *tls_area = p1;
1019
1020 for (int i = 0; i < sizeof(struct _thread_userspace_local_data); i++) {
1021 zassert_false(tls_area[i] == 0xff,
1022 "TLS data leakage to user mode");
1023 }
1024 }
1025 #endif
1026
ZTEST(userspace,test_tls_leakage)1027 ZTEST(userspace, test_tls_leakage)
1028 {
1029 #ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA
1030 /* Tests two assertions:
1031 *
1032 * - That a user thread has full access to its TLS area
1033 * - That dropping to user mode doesn't allow any TLS data set in
1034 * supervisor mode to be leaked
1035 */
1036
1037 memset(_current->userspace_local_data, 0xff,
1038 sizeof(struct _thread_userspace_local_data));
1039
1040 k_thread_user_mode_enter(tls_leakage_user_part,
1041 _current->userspace_local_data, NULL, NULL);
1042 #else
1043 ztest_test_skip();
1044 #endif
1045 }
1046
1047 #ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA
tls_entry(void * p1,void * p2,void * p3)1048 void tls_entry(void *p1, void *p2, void *p3)
1049 {
1050 printk("tls_entry\n");
1051 }
1052 #endif
1053
ZTEST(userspace,test_tls_pointer)1054 ZTEST(userspace, test_tls_pointer)
1055 {
1056 #ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA
1057 char *stack_obj_ptr;
1058 size_t stack_obj_sz;
1059
1060 k_thread_create(&test_thread, test_stack, STACKSIZE, tls_entry,
1061 NULL, NULL, NULL, 1, K_USER, K_FOREVER);
1062
1063 printk("tls pointer for thread %p: %p\n",
1064 &test_thread, (void *)test_thread.userspace_local_data);
1065
1066 printk("stack buffer reported bounds: [%p, %p)\n",
1067 (void *)test_thread.stack_info.start,
1068 (void *)(test_thread.stack_info.start +
1069 test_thread.stack_info.size));
1070
1071 #ifdef CONFIG_THREAD_STACK_MEM_MAPPED
1072 stack_obj_ptr = (char *)test_thread.stack_obj_mapped;
1073 stack_obj_sz = test_thread.stack_obj_size;
1074 #else
1075 stack_obj_ptr = (char *)test_stack;
1076 stack_obj_sz = sizeof(test_stack);
1077 #endif
1078
1079 printk("stack object bounds: [%p, %p)\n",
1080 stack_obj_ptr, stack_obj_ptr + stack_obj_sz);
1081
1082 uintptr_t tls_start = (uintptr_t)test_thread.userspace_local_data;
1083 uintptr_t tls_end = tls_start +
1084 sizeof(struct _thread_userspace_local_data);
1085
1086 if ((tls_start < (uintptr_t)stack_obj_ptr) ||
1087 (tls_end > (uintptr_t)stack_obj_ptr + stack_obj_sz)) {
1088 printk("tls area out of bounds\n");
1089 ztest_test_fail();
1090 }
1091
1092 k_thread_abort(&test_thread);
1093 #else
1094 ztest_test_skip();
1095 #endif
1096 }
1097
1098 K_APP_BMEM(default_part) volatile bool kernel_only_thread_ran;
1099 K_APP_BMEM(default_part) volatile bool kernel_only_thread_user_ran;
1100 static K_SEM_DEFINE(kernel_only_thread_run_sem, 0, 1);
1101
kernel_only_thread_user_entry(void * p1,void * p2,void * p3)1102 void kernel_only_thread_user_entry(void *p1, void *p2, void *p3)
1103 {
1104 printk("kernel only thread in user mode\n");
1105
1106 kernel_only_thread_user_ran = true;
1107 }
1108
kernel_only_thread_entry(void * p1,void * p2,void * p3)1109 void kernel_only_thread_entry(void *p1, void *p2, void *p3)
1110 {
1111 k_sem_take(&kernel_only_thread_run_sem, K_FOREVER);
1112
1113 printk("kernel only thread in kernel mode\n");
1114
1115 /* Some architectures emit kernel OOPS instead of panic. */
1116 #if defined(CONFIG_ARM64)
1117 set_fault(K_ERR_KERNEL_OOPS);
1118 #else
1119 set_fault(K_ERR_KERNEL_PANIC);
1120 #endif
1121
1122 kernel_only_thread_ran = true;
1123
1124 k_thread_user_mode_enter(kernel_only_thread_user_entry, NULL, NULL, NULL);
1125 }
1126
1127 #ifdef CONFIG_MMU
1128 #define KERNEL_ONLY_THREAD_STACK_SIZE (ROUND_UP(1024, CONFIG_MMU_PAGE_SIZE))
1129 #elif CONFIG_64BIT
1130 #define KERNEL_ONLY_THREAD_STACK_SIZE (2048)
1131 #else
1132 #define KERNEL_ONLY_THREAD_STACK_SIZE (1024)
1133 #endif
1134
1135 static K_KERNEL_THREAD_DEFINE(kernel_only_thread,
1136 KERNEL_ONLY_THREAD_STACK_SIZE,
1137 kernel_only_thread_entry, NULL, NULL, NULL,
1138 0, 0, 0);
1139
ZTEST(userspace,test_kernel_only_thread)1140 ZTEST(userspace, test_kernel_only_thread)
1141 {
1142 kernel_only_thread_ran = false;
1143 kernel_only_thread_user_ran = false;
1144
1145 k_sem_give(&kernel_only_thread_run_sem);
1146
1147 k_sleep(K_MSEC(500));
1148
1149 if (!kernel_only_thread_ran) {
1150 printk("kernel only thread not running in kernel mode!\n");
1151 ztest_test_fail();
1152 }
1153
1154 if (kernel_only_thread_user_ran) {
1155 printk("kernel only thread should not have run in user mode!\n");
1156 ztest_test_fail();
1157 }
1158 }
1159
userspace_setup(void)1160 void *userspace_setup(void)
1161 {
1162 int ret;
1163
1164 /* Most of these scenarios use the default domain */
1165 ret = k_mem_domain_add_partition(&k_mem_domain_default, &default_part);
1166 if (ret != 0) {
1167 printk("Failed to add default memory partition (%d)\n", ret);
1168 k_oops();
1169 }
1170
1171 #if defined(CONFIG_ARM64)
1172 struct z_arm64_thread_stack_header *hdr;
1173 void *vhdr = ((struct z_arm64_thread_stack_header *)ztest_thread_stack);
1174
1175 hdr = vhdr;
1176 priv_stack_ptr = (((char *)&hdr->privilege_stack) +
1177 (sizeof(hdr->privilege_stack) - 1));
1178 #elif defined(CONFIG_ARM)
1179 priv_stack_ptr = (char *)z_priv_stack_find(ztest_thread_stack);
1180 #elif defined(CONFIG_X86)
1181 struct z_x86_thread_stack_header *hdr;
1182 void *vhdr = ((struct z_x86_thread_stack_header *)ztest_thread_stack);
1183
1184 hdr = vhdr;
1185 priv_stack_ptr = (((char *)&hdr->privilege_stack) +
1186 (sizeof(hdr->privilege_stack) - 1));
1187 #elif defined(CONFIG_RISCV)
1188 #if defined(CONFIG_GEN_PRIV_STACKS)
1189 priv_stack_ptr = (char *)z_priv_stack_find(ztest_thread_stack);
1190 #else
1191 priv_stack_ptr = (char *)((uintptr_t)ztest_thread_stack +
1192 Z_RISCV_STACK_GUARD_SIZE);
1193 #endif
1194 #endif
1195 k_thread_access_grant(k_current_get(),
1196 &test_thread, &test_stack,
1197 &kernel_only_thread_run_sem,
1198 &test_revoke_sem, &kpipe);
1199 return NULL;
1200 }
1201
1202 ZTEST_SUITE(userspace, NULL, userspace_setup, NULL, NULL, NULL);
1203
1204 ZTEST_SUITE(userspace_domain, NULL, NULL, NULL, NULL, NULL);
1205
1206 ZTEST_SUITE(userspace_domain_ctx, NULL, NULL, NULL, NULL, NULL);
1207