1 /*
2  * Parts derived from tests/kernel/fatal/src/main.c, which has the
3  * following copyright and license:
4  *
5  * Copyright (c) 2017 Intel Corporation
6  *
7  * SPDX-License-Identifier: Apache-2.0
8  */
9 
10 #include <zephyr/kernel.h>
11 #include <zephyr/ztest.h>
12 #include <zephyr/kernel_structs.h>
13 #include <string.h>
14 #include <stdlib.h>
15 #include <zephyr/app_memory/app_memdomain.h>
16 #include <zephyr/sys/util.h>
17 #include <zephyr/sys/barrier.h>
18 #include <zephyr/debug/stack.h>
19 #include <zephyr/internal/syscall_handler.h>
20 #include "test_syscall.h"
21 #include <zephyr/sys/libc-hooks.h> /* for z_libc_partition */
22 
23 #if defined(CONFIG_XTENSA)
24 #include <zephyr/arch/xtensa/cache.h>
25 #if defined(CONFIG_XTENSA_MMU)
26 #include <zephyr/arch/xtensa/xtensa_mmu.h>
27 #endif
28 #if defined(CONFIG_XTENSA_MPU)
29 #include <zephyr/arch/xtensa/mpu.h>
30 #endif
31 #endif
32 
33 #if defined(CONFIG_ARC)
34 #include <zephyr/arch/arc/v2/mpu/arc_core_mpu.h>
35 #endif
36 
37 #if defined(CONFIG_ARM)
38 extern void arm_core_mpu_disable(void);
39 #endif
40 
41 #define INFO(fmt, ...) printk(fmt, ##__VA_ARGS__)
42 #define PIPE_LEN 1
43 #define BYTES_TO_READ_WRITE 1
44 #define STACKSIZE (256 + CONFIG_TEST_EXTRA_STACK_SIZE)
45 
46 K_SEM_DEFINE(test_revoke_sem, 0, 1);
47 
48 /* Used for tests that switch between domains, we will switch between the
49  * default domain and this one.
50  */
51 struct k_mem_domain alternate_domain;
52 
53 ZTEST_BMEM static volatile bool expect_fault;
54 ZTEST_BMEM static volatile unsigned int expected_reason;
55 
56 /* Partition unique to default domain */
57 K_APPMEM_PARTITION_DEFINE(default_part);
58 K_APP_BMEM(default_part) volatile bool default_bool;
59 /* Partition unique to alternate domain */
60 K_APPMEM_PARTITION_DEFINE(alt_part);
61 K_APP_BMEM(alt_part) volatile bool alt_bool;
62 
63 static struct k_thread test_thread;
64 static K_THREAD_STACK_DEFINE(test_stack, STACKSIZE);
65 
clear_fault(void)66 void clear_fault(void)
67 {
68 	expect_fault = false;
69 	compiler_barrier();
70 }
71 
set_fault(unsigned int reason)72 static void set_fault(unsigned int reason)
73 {
74 	expect_fault = true;
75 	expected_reason = reason;
76 	compiler_barrier();
77 }
78 
k_sys_fatal_error_handler(unsigned int reason,const struct arch_esf * pEsf)79 void k_sys_fatal_error_handler(unsigned int reason, const struct arch_esf *pEsf)
80 {
81 	INFO("Caught system error -- reason %d\n", reason);
82 
83 	if (expect_fault) {
84 		if (expected_reason == reason) {
85 			printk("System error was expected\n");
86 			clear_fault();
87 		} else {
88 			printk("Wrong fault reason, expecting %d\n",
89 			       expected_reason);
90 			TC_END_REPORT(TC_FAIL);
91 			k_fatal_halt(reason);
92 		}
93 	} else {
94 		printk("Unexpected fault during test\n");
95 		TC_END_REPORT(TC_FAIL);
96 		k_fatal_halt(reason);
97 	}
98 }
99 
100 /**
101  * @brief Test to check if the thread is in user mode
102  *
103  * @ingroup kernel_memprotect_tests
104  */
ZTEST_USER(userspace,test_is_usermode)105 ZTEST_USER(userspace, test_is_usermode)
106 {
107 	/* Confirm that we are in fact running in user mode. */
108 	clear_fault();
109 
110 	zassert_true(k_is_user_context(), "thread left in kernel mode");
111 }
112 
113 /**
114  * @brief Test to check if k_is_pre_kernel works from user mode
115  *
116  * @ingroup kernel_memprotect_tests
117  */
ZTEST_USER(userspace,test_is_post_kernel)118 ZTEST_USER(userspace, test_is_post_kernel)
119 {
120 	clear_fault();
121 
122 	zassert_false(k_is_pre_kernel(), "still pre-kernel in user mode");
123 }
124 
125 /**
126  * @brief Test to write to a control register
127  *
128  * @ingroup kernel_memprotect_tests
129  */
ZTEST_USER(userspace,test_write_control)130 ZTEST_USER(userspace, test_write_control)
131 {
132 	/* Try to write to a control register. */
133 #if defined(CONFIG_X86)
134 	set_fault(K_ERR_CPU_EXCEPTION);
135 
136 #ifdef CONFIG_X86_64
137 	__asm__ volatile (
138 		"movq $0xFFFFFFFF, %rax;\n\t"
139 		"movq %rax, %cr0;\n\t"
140 		);
141 #else
142 	__asm__ volatile (
143 		"mov %cr0, %eax;\n\t"
144 		"and $0xfffeffff, %eax;\n\t"
145 		"mov %eax, %cr0;\n\t"
146 		);
147 #endif
148 	zassert_unreachable("Write to control register did not fault");
149 
150 #elif defined(CONFIG_ARM64)
151 	uint64_t val = SPSR_MODE_EL1T;
152 
153 	set_fault(K_ERR_CPU_EXCEPTION);
154 
155 	__asm__ volatile("msr spsr_el1, %0"
156 			:
157 			: "r" (val)
158 			: "memory", "cc");
159 
160 	zassert_unreachable("Write to control register did not fault");
161 
162 #elif defined(CONFIG_ARM)
163 #if defined(CONFIG_CPU_CORTEX_M)
164 	unsigned int msr_value;
165 
166 	clear_fault();
167 
168 	msr_value = __get_CONTROL();
169 	msr_value &= ~(CONTROL_nPRIV_Msk);
170 	__set_CONTROL(msr_value);
171 	barrier_dsync_fence_full();
172 	barrier_isync_fence_full();
173 	msr_value = __get_CONTROL();
174 	zassert_true((msr_value & (CONTROL_nPRIV_Msk)),
175 		     "Write to control register was successful");
176 #else
177 	uint32_t val;
178 
179 	set_fault(K_ERR_CPU_EXCEPTION);
180 
181 	val = __get_SCTLR();
182 	val |= SCTLR_DZ_Msk;
183 	__set_SCTLR(val);
184 
185 	zassert_unreachable("Write to control register did not fault");
186 #endif
187 #elif defined(CONFIG_ARC)
188 	unsigned int er_status;
189 
190 	set_fault(K_ERR_CPU_EXCEPTION);
191 
192 	/* _ARC_V2_ERSTATUS is privilege aux reg */
193 	__asm__ volatile (
194 		"lr %0, [0x402]\n"
195 		: "=r" (er_status)::
196 	);
197 #elif defined(CONFIG_RISCV)
198 	unsigned int status;
199 
200 	set_fault(K_ERR_CPU_EXCEPTION);
201 
202 	__asm__ volatile("csrr %0, mstatus" : "=r" (status));
203 #elif defined(CONFIG_XTENSA)
204 	unsigned int ps;
205 
206 	set_fault(K_ERR_CPU_EXCEPTION);
207 
208 	__asm__ volatile("rsr.ps %0" : "=r" (ps));
209 #else
210 #error "Not implemented for this architecture"
211 	zassert_unreachable("Write to control register did not fault");
212 #endif
213 }
214 
215 /**
216  * @brief Test to disable memory protection
217  *
218  * @ingroup kernel_memprotect_tests
219  */
ZTEST_USER(userspace,test_disable_mmu_mpu)220 ZTEST_USER(userspace, test_disable_mmu_mpu)
221 {
222 	/* Try to disable memory protections. */
223 #if defined(CONFIG_X86)
224 	set_fault(K_ERR_CPU_EXCEPTION);
225 
226 #ifdef CONFIG_X86_64
227 	__asm__ volatile (
228 		"movq %cr0, %rax;\n\t"
229 		"andq $0x7ffeffff, %rax;\n\t"
230 		"movq %rax, %cr0;\n\t"
231 		);
232 #else
233 	__asm__ volatile (
234 		"mov %cr0, %eax;\n\t"
235 		"and $0x7ffeffff, %eax;\n\t"
236 		"mov %eax, %cr0;\n\t"
237 		);
238 #endif
239 #elif defined(CONFIG_ARM64)
240 	uint64_t val;
241 
242 	set_fault(K_ERR_CPU_EXCEPTION);
243 
244 	__asm__ volatile("mrs %0, sctlr_el1" : "=r" (val));
245 	__asm__ volatile("msr sctlr_el1, %0"
246 			:
247 			: "r" (val & ~(SCTLR_M_BIT | SCTLR_C_BIT))
248 			: "memory", "cc");
249 
250 #elif defined(CONFIG_ARM)
251 #ifndef CONFIG_TRUSTED_EXECUTION_NONSECURE
252 	set_fault(K_ERR_CPU_EXCEPTION);
253 
254 	arm_core_mpu_disable();
255 #else
256 	/* Disabling MPU from unprivileged code
257 	 * generates BusFault which is not banked
258 	 * between Security states. Do not execute
259 	 * this scenario for Non-Secure Cortex-M.
260 	 */
261 	return;
262 #endif /* !CONFIG_TRUSTED_EXECUTION_NONSECURE */
263 #elif defined(CONFIG_ARC)
264 	set_fault(K_ERR_CPU_EXCEPTION);
265 
266 	arc_core_mpu_disable();
267 #elif defined(CONFIG_RISCV)
268 	set_fault(K_ERR_CPU_EXCEPTION);
269 
270 	/*
271 	 * Try to make everything accessible through PMP slot 3
272 	 * which should not be locked.
273 	 */
274 	csr_write(pmpaddr3, LLONG_MAX);
275 	csr_write(pmpcfg0, (PMP_R|PMP_W|PMP_X|PMP_NAPOT) << 24);
276 #elif defined(CONFIG_XTENSA)
277 	set_fault(K_ERR_CPU_EXCEPTION);
278 
279 #if defined(CONFIG_XTENSA_MMU)
280 	/* Reset way 6 to do identity mapping.
281 	 * Complier would complain addr going out of range if we
282 	 * simply do addr = i * 0x20000000 inside the loop. So
283 	 * we do increment instead.
284 	 */
285 	uint32_t addr = 0U;
286 
287 	for (int i = 0; i < 8; i++) {
288 		uint32_t attr = addr | XTENSA_MMU_PERM_WX;
289 
290 		__asm__ volatile("wdtlb %0, %1; witlb %0, %1"
291 				 :: "r"(attr), "r"(addr));
292 
293 		addr += 0x20000000;
294 	}
295 #endif
296 
297 #if defined(CONFIG_XTENSA_MPU)
298 	/* Technically, simply clearing out all foreground MPU entries
299 	 * allows the background map to take over, so it is not exactly
300 	 * disabling MPU. However, this test is about catching userspace
301 	 * trying to manipulate the MPU regions. So as long as there is
302 	 * kernel OOPS, we would be fine.
303 	 */
304 	for (int i = 0; i < XTENSA_MPU_NUM_ENTRIES; i++) {
305 		__asm__ volatile("wptlb %0, %1\n\t" : : "a"(i), "a"(0));
306 	}
307 #endif
308 
309 #else
310 #error "Not implemented for this architecture"
311 #endif
312 	zassert_unreachable("Disable MMU/MPU did not fault");
313 }
314 
315 /**
316  * @brief Test to read from kernel RAM
317  *
318  * @ingroup kernel_memprotect_tests
319  */
ZTEST_USER(userspace,test_read_kernram)320 ZTEST_USER(userspace, test_read_kernram)
321 {
322 	/* Try to read from kernel RAM. */
323 	void *p;
324 
325 	set_fault(K_ERR_CPU_EXCEPTION);
326 
327 	p = _current->init_data;
328 	printk("%p\n", p);
329 	zassert_unreachable("Read from kernel RAM did not fault");
330 }
331 
332 /**
333  * @brief Test to write to kernel RAM
334  *
335  * @ingroup kernel_memprotect_tests
336  */
ZTEST_USER(userspace,test_write_kernram)337 ZTEST_USER(userspace, test_write_kernram)
338 {
339 	/* Try to write to kernel RAM. */
340 	set_fault(K_ERR_CPU_EXCEPTION);
341 
342 	_current->init_data = NULL;
343 	zassert_unreachable("Write to kernel RAM did not fault");
344 }
345 
346 extern int _k_neg_eagain;
347 
348 #include <zephyr/linker/linker-defs.h>
349 
350 /**
351  * @brief Test to write kernel RO
352  *
353  * @ingroup kernel_memprotect_tests
354  */
ZTEST_USER(userspace,test_write_kernro)355 ZTEST_USER(userspace, test_write_kernro)
356 {
357 	bool in_rodata;
358 
359 	/* Try to write to kernel RO. */
360 	const char *const ptr = (const char *const)&_k_neg_eagain;
361 
362 	in_rodata = ptr < __rodata_region_end &&
363 		    ptr >= __rodata_region_start;
364 
365 #ifdef CONFIG_LINKER_USE_PINNED_SECTION
366 	if (!in_rodata) {
367 		in_rodata = ptr < lnkr_pinned_rodata_end &&
368 			    ptr >= lnkr_pinned_rodata_start;
369 	}
370 #endif
371 
372 	zassert_true(in_rodata,
373 		     "_k_neg_eagain is not in rodata");
374 
375 	set_fault(K_ERR_CPU_EXCEPTION);
376 
377 	_k_neg_eagain = -EINVAL;
378 	zassert_unreachable("Write to kernel RO did not fault");
379 }
380 
381 /**
382  * @brief Test to write to kernel text section
383  *
384  * @ingroup kernel_memprotect_tests
385  */
ZTEST_USER(userspace,test_write_kerntext)386 ZTEST_USER(userspace, test_write_kerntext)
387 {
388 	/* Try to write to kernel text. */
389 	set_fault(K_ERR_CPU_EXCEPTION);
390 
391 	memset(&k_current_get, 0, 4);
392 	zassert_unreachable("Write to kernel text did not fault");
393 }
394 
395 static int kernel_data;
396 
397 /**
398  * @brief Test to read from kernel data section
399  *
400  * @ingroup kernel_memprotect_tests
401  */
ZTEST_USER(userspace,test_read_kernel_data)402 ZTEST_USER(userspace, test_read_kernel_data)
403 {
404 	set_fault(K_ERR_CPU_EXCEPTION);
405 
406 	printk("%d\n", kernel_data);
407 	zassert_unreachable("Read from data did not fault");
408 }
409 
410 /**
411  * @brief Test to write to kernel data section
412  *
413  * @ingroup kernel_memprotect_tests
414  */
ZTEST_USER(userspace,test_write_kernel_data)415 ZTEST_USER(userspace, test_write_kernel_data)
416 {
417 	set_fault(K_ERR_CPU_EXCEPTION);
418 
419 	kernel_data = 1;
420 	zassert_unreachable("Write to  data did not fault");
421 }
422 
423 /*
424  * volatile to avoid compiler mischief.
425  */
426 K_APP_DMEM(default_part) volatile char *priv_stack_ptr;
427 #if defined(CONFIG_ARC)
428 K_APP_DMEM(default_part) int32_t size = (0 - CONFIG_PRIVILEGED_STACK_SIZE -
429 				 Z_ARC_STACK_GUARD_SIZE);
430 #endif
431 
432 /**
433  * @brief Test to read privileged stack
434  *
435  * @ingroup kernel_memprotect_tests
436  */
ZTEST_USER(userspace,test_read_priv_stack)437 ZTEST_USER(userspace, test_read_priv_stack)
438 {
439 	/* Try to read from privileged stack. */
440 #if defined(CONFIG_ARC)
441 	int s[1];
442 
443 	s[0] = 0;
444 	priv_stack_ptr = (char *)&s[0] - size;
445 #elif defined(CONFIG_ARM) || defined(CONFIG_X86) || defined(CONFIG_RISCV) || \
446 	defined(CONFIG_ARM64) || defined(CONFIG_XTENSA)
447 	/* priv_stack_ptr set by test_main() */
448 #else
449 #error "Not implemented for this architecture"
450 #endif
451 	set_fault(K_ERR_CPU_EXCEPTION);
452 
453 	printk("%c\n", *priv_stack_ptr);
454 	zassert_unreachable("Read from privileged stack did not fault");
455 }
456 
457 /**
458  * @brief Test to write to privilege stack
459  *
460  * @ingroup kernel_memprotect_tests
461  */
ZTEST_USER(userspace,test_write_priv_stack)462 ZTEST_USER(userspace, test_write_priv_stack)
463 {
464 	/* Try to write to privileged stack. */
465 #if defined(CONFIG_ARC)
466 	int s[1];
467 
468 	s[0] = 0;
469 	priv_stack_ptr = (char *)&s[0] - size;
470 #elif defined(CONFIG_ARM) || defined(CONFIG_X86) || defined(CONFIG_RISCV) || \
471 	defined(CONFIG_ARM64) || defined(CONFIG_XTENSA)
472 	/* priv_stack_ptr set by test_main() */
473 #else
474 #error "Not implemented for this architecture"
475 #endif
476 	set_fault(K_ERR_CPU_EXCEPTION);
477 
478 	*priv_stack_ptr = 42;
479 	zassert_unreachable("Write to privileged stack did not fault");
480 }
481 
482 
483 K_APP_BMEM(default_part) static struct k_sem sem;
484 
485 /**
486  * @brief Test to pass a user object to system call
487  *
488  * @ingroup kernel_memprotect_tests
489  */
ZTEST_USER(userspace,test_pass_user_object)490 ZTEST_USER(userspace, test_pass_user_object)
491 {
492 	/* Try to pass a user object to a system call. */
493 	set_fault(K_ERR_KERNEL_OOPS);
494 
495 	k_sem_init(&sem, 0, 1);
496 	zassert_unreachable("Pass a user object to a syscall did not fault");
497 }
498 
499 static struct k_sem ksem;
500 
501 /**
502  * @brief Test to pass object to a system call without permissions
503  *
504  * @ingroup kernel_memprotect_tests
505  */
ZTEST_USER(userspace,test_pass_noperms_object)506 ZTEST_USER(userspace, test_pass_noperms_object)
507 {
508 	/* Try to pass a object to a system call w/o permissions. */
509 	set_fault(K_ERR_KERNEL_OOPS);
510 
511 	k_sem_init(&ksem, 0, 1);
512 	zassert_unreachable("Pass an unauthorized object to a "
513 			    "syscall did not fault");
514 }
515 
516 
thread_body(void * p1,void * p2,void * p3)517 void thread_body(void *p1, void *p2, void *p3)
518 {
519 	ARG_UNUSED(p1);
520 	ARG_UNUSED(p2);
521 	ARG_UNUSED(p3);
522 }
523 
524 /**
525  * @brief Test to start kernel thread from usermode
526  *
527  * @ingroup kernel_memprotect_tests
528  */
ZTEST_USER(userspace,test_start_kernel_thread)529 ZTEST_USER(userspace, test_start_kernel_thread)
530 {
531 	/* Try to start a kernel thread from a usermode thread */
532 	set_fault(K_ERR_KERNEL_OOPS);
533 	k_thread_create(&test_thread, test_stack, STACKSIZE,
534 			thread_body, NULL, NULL, NULL,
535 			K_PRIO_PREEMPT(1), K_INHERIT_PERMS,
536 			K_NO_WAIT);
537 	zassert_unreachable("Create a kernel thread did not fault");
538 }
539 
uthread_read_body(void * p1,void * p2,void * p3)540 static void uthread_read_body(void *p1, void *p2, void *p3)
541 {
542 	unsigned int *vptr = p1;
543 
544 	set_fault(K_ERR_CPU_EXCEPTION);
545 	printk("%u\n", *vptr);
546 	zassert_unreachable("Read from other thread stack did not fault");
547 }
548 
uthread_write_body(void * p1,void * p2,void * p3)549 static void uthread_write_body(void *p1, void *p2, void *p3)
550 {
551 	unsigned int *vptr = p1;
552 
553 	set_fault(K_ERR_CPU_EXCEPTION);
554 	*vptr = 2U;
555 	zassert_unreachable("Write to other thread stack did not fault");
556 }
557 
558 /**
559  * @brief Test to read from another thread's stack
560  *
561  * @ingroup kernel_memprotect_tests
562  */
ZTEST_USER(userspace,test_read_other_stack)563 ZTEST_USER(userspace, test_read_other_stack)
564 {
565 	/* Try to read from another thread's stack. */
566 	unsigned int val;
567 
568 #if !defined(CONFIG_MEM_DOMAIN_ISOLATED_STACKS)
569 	/* The minimal requirement to support memory domain permits
570 	 * threads of the same memory domain to access each others' stacks.
571 	 * Some architectures supports further restricting access which
572 	 * can be enabled via a kconfig. So if the kconfig is not enabled,
573 	 * skip the test.
574 	 */
575 	ztest_test_skip();
576 #endif
577 
578 	k_thread_create(&test_thread, test_stack, STACKSIZE,
579 			uthread_read_body, &val, NULL, NULL,
580 			-1, K_USER | K_INHERIT_PERMS,
581 			K_NO_WAIT);
582 
583 	k_thread_join(&test_thread, K_FOREVER);
584 }
585 
586 
587 /**
588  * @brief Test to write to other thread's stack
589  *
590  * @ingroup kernel_memprotect_tests
591  */
ZTEST_USER(userspace,test_write_other_stack)592 ZTEST_USER(userspace, test_write_other_stack)
593 {
594 	/* Try to write to another thread's stack. */
595 	unsigned int val;
596 
597 #if !defined(CONFIG_MEM_DOMAIN_ISOLATED_STACKS)
598 	/* The minimal requirement to support memory domain permits
599 	 * threads of the same memory domain to access each others' stacks.
600 	 * Some architectures supports further restricting access which
601 	 * can be enabled via a kconfig. So if the kconfig is not enabled,
602 	 * skip the test.
603 	 */
604 	ztest_test_skip();
605 #endif
606 
607 	k_thread_create(&test_thread, test_stack, STACKSIZE,
608 			uthread_write_body, &val, NULL, NULL,
609 			-1, K_USER | K_INHERIT_PERMS,
610 			K_NO_WAIT);
611 	k_thread_join(&test_thread, K_FOREVER);
612 }
613 
614 /**
615  * @brief Test to revoke access to kobject without permission
616  *
617  * @details User thread can only revoke their own access to an object.
618  * In that test user thread to revokes access to unathorized object, as a result
619  * the system will assert.
620  *
621  * @ingroup kernel_memprotect_tests
622  */
ZTEST_USER(userspace,test_revoke_noperms_object)623 ZTEST_USER(userspace, test_revoke_noperms_object)
624 {
625 	/* Attempt to revoke access to kobject w/o permissions*/
626 	set_fault(K_ERR_KERNEL_OOPS);
627 
628 	k_object_release(&ksem);
629 
630 	zassert_unreachable("Revoke access to unauthorized object "
631 			    "did not fault");
632 }
633 
634 /**
635  * @brief Test to access object after revoking access
636  *
637  * @ingroup kernel_memprotect_tests
638  */
ZTEST_USER(userspace,test_access_after_revoke)639 ZTEST_USER(userspace, test_access_after_revoke)
640 {
641 	k_object_release(&test_revoke_sem);
642 
643 	/* Try to access an object after revoking access to it */
644 	set_fault(K_ERR_KERNEL_OOPS);
645 
646 	k_sem_take(&test_revoke_sem, K_NO_WAIT);
647 
648 	zassert_unreachable("Using revoked object did not fault");
649 }
650 
umode_enter_func(void * p1,void * p2,void * p3)651 static void umode_enter_func(void *p1, void *p2, void *p3)
652 {
653 	ARG_UNUSED(p1);
654 	ARG_UNUSED(p2);
655 	ARG_UNUSED(p3);
656 
657 	zassert_true(k_is_user_context(),
658 		     "Thread did not enter user mode");
659 }
660 
661 /**
662 * @brief Test to check supervisor thread enter one-way to usermode
663 *
664 * @details A thread running in supervisor mode must have one-way operation
665 * ability to drop privileges to user mode.
666 *
667 * @ingroup kernel_memprotect_tests
668 */
ZTEST(userspace,test_user_mode_enter)669 ZTEST(userspace, test_user_mode_enter)
670 {
671 	clear_fault();
672 
673 	k_thread_user_mode_enter(umode_enter_func,
674 				 NULL, NULL, NULL);
675 }
676 
677 /* Define and initialize pipe. */
678 K_PIPE_DEFINE(kpipe, PIPE_LEN, BYTES_TO_READ_WRITE);
679 /**
680  * @brief Test to write to kobject using pipe
681  *
682  * @ingroup kernel_memprotect_tests
683  */
ZTEST_USER(userspace,test_write_kobject_user_pipe)684 ZTEST_USER(userspace, test_write_kobject_user_pipe)
685 {
686 	/*
687 	 * Attempt to use system call from k_pipe_read to write over
688 	 * a kernel object.
689 	 */
690 	set_fault(K_ERR_KERNEL_OOPS);
691 
692 	k_pipe_read(&kpipe, (uint8_t *)&test_revoke_sem, BYTES_TO_READ_WRITE, K_NO_WAIT);
693 
694 	zassert_unreachable("System call memory write validation "
695 			    "did not fault");
696 }
697 
698 /**
699  * @brief Test to read from kobject using pipe
700  *
701  * @ingroup kernel_memprotect_tests
702  */
ZTEST_USER(userspace,test_read_kobject_user_pipe)703 ZTEST_USER(userspace, test_read_kobject_user_pipe)
704 {
705 	/*
706 	 * Attempt to use system call from k_pipe_write to read a
707 	 * kernel object.
708 	 */
709 	set_fault(K_ERR_KERNEL_OOPS);
710 
711 	k_pipe_write(&kpipe, (uint8_t *)&test_revoke_sem, BYTES_TO_READ_WRITE, K_NO_WAIT);
712 
713 	zassert_unreachable("System call memory read validation "
714 			    "did not fault");
715 }
716 
user_half(void * arg1,void * arg2,void * arg3)717 static void user_half(void *arg1, void *arg2, void *arg3)
718 {
719 	volatile bool *bool_ptr = arg1;
720 
721 	*bool_ptr = true;
722 	compiler_barrier();
723 	if (expect_fault) {
724 		printk("Expecting a fatal error %d but succeeded instead\n",
725 		       expected_reason);
726 		ztest_test_fail();
727 	}
728 }
729 
730 
spawn_user(volatile bool * to_modify)731 static void spawn_user(volatile bool *to_modify)
732 {
733 	k_thread_create(&test_thread, test_stack, STACKSIZE, user_half,
734 			(void *)to_modify, NULL, NULL,
735 			-1, K_INHERIT_PERMS | K_USER, K_NO_WAIT);
736 
737 	k_thread_join(&test_thread, K_FOREVER);
738 }
739 
drop_user(volatile bool * to_modify)740 static void drop_user(volatile bool *to_modify)
741 {
742 	k_sleep(K_MSEC(1)); /* Force a context switch */
743 	k_thread_user_mode_enter(user_half, (void *)to_modify, NULL, NULL);
744 }
745 
746 /**
747  * @brief Test creation of new memory domains
748  *
749  * We initialize a new memory domain and show that its partition configuration
750  * is correct. This new domain has "alt_part" in it, but not "default_part".
751  * We then try to modify data in "default_part" and show it produces an
752  * exception since that partition is not in the new domain.
753  *
754  * This caught a bug once where an MMU system copied page tables for the new
755  * domain and accidentally copied memory partition permissions from the source
756  * page tables, allowing the write to "default_part" to work.
757  *
758  * @ingroup kernel_memprotect_tests
759  */
ZTEST(userspace_domain,test_1st_init_and_access_other_memdomain)760 ZTEST(userspace_domain, test_1st_init_and_access_other_memdomain)
761 {
762 	struct k_mem_partition *parts[] = {
763 #if Z_LIBC_PARTITION_EXISTS
764 		&z_libc_partition,
765 #endif
766 		&ztest_mem_partition, &alt_part
767 	};
768 
769 	zassert_equal(
770 		k_mem_domain_init(&alternate_domain, ARRAY_SIZE(parts), parts),
771 		0, "failed to initialize memory domain");
772 
773 	/* Switch to alternate_domain which does not have default_part that
774 	 * contains default_bool. This should fault when we try to write it.
775 	 */
776 	k_mem_domain_add_thread(&alternate_domain, k_current_get());
777 	set_fault(K_ERR_CPU_EXCEPTION);
778 	spawn_user(&default_bool);
779 }
780 
781 #if (defined(CONFIG_ARM) || (defined(CONFIG_GEN_PRIV_STACKS) && defined(CONFIG_RISCV)))
782 extern uint8_t *z_priv_stack_find(void *obj);
783 #endif
784 extern k_thread_stack_t ztest_thread_stack[];
785 
786 /**
787  * Show that changing between memory domains and dropping to user mode works
788  * as expected.
789  *
790  * @ingroup kernel_memprotect_tests
791  */
ZTEST(userspace_domain,test_domain_add_thread_drop_to_user)792 ZTEST(userspace_domain, test_domain_add_thread_drop_to_user)
793 {
794 	clear_fault();
795 	k_mem_domain_add_thread(&alternate_domain, k_current_get());
796 	drop_user(&alt_bool);
797 }
798 
799 /* @brief Test adding application memory partition to memory domain
800  *
801  * @details Show that adding a partition to a domain and then dropping to user
802  * mode works as expected.
803  *
804  * @ingroup kernel_memprotect_tests
805  */
ZTEST(userspace_domain,test_domain_add_part_drop_to_user)806 ZTEST(userspace_domain, test_domain_add_part_drop_to_user)
807 {
808 	clear_fault();
809 
810 	zassert_equal(
811 		k_mem_domain_add_partition(&k_mem_domain_default, &alt_part),
812 		0, "failed to add memory partition");
813 
814 	drop_user(&alt_bool);
815 }
816 
817 /**
818  * Show that self-removing a partition from a domain we are a member of,
819  * and then dropping to user mode faults as expected.
820  *
821  * @ingroup kernel_memprotect_tests
822  */
ZTEST(userspace_domain,test_domain_remove_part_drop_to_user)823 ZTEST(userspace_domain, test_domain_remove_part_drop_to_user)
824 {
825 	/* We added alt_part to the default domain in the previous test,
826 	 * remove it, and then try to access again.
827 	 */
828 	set_fault(K_ERR_CPU_EXCEPTION);
829 
830 	zassert_equal(
831 		k_mem_domain_remove_partition(&k_mem_domain_default, &alt_part),
832 		0, "failed to remove partition");
833 
834 	drop_user(&alt_bool);
835 }
836 
837 /**
838  * Show that changing between memory domains and then switching to another
839  * thread in the same domain works as expected.
840  *
841  * @ingroup kernel_memprotect_tests
842  */
ZTEST(userspace_domain_ctx,test_domain_add_thread_context_switch)843 ZTEST(userspace_domain_ctx, test_domain_add_thread_context_switch)
844 {
845 	clear_fault();
846 	k_mem_domain_add_thread(&alternate_domain, k_current_get());
847 	spawn_user(&alt_bool);
848 }
849 
850 /* Show that adding a partition to a domain and then switching to another
851  * user thread in the same domain works as expected.
852  *
853  * @ingroup kernel_memprotect_tests
854  */
ZTEST(userspace_domain_ctx,test_domain_add_part_context_switch)855 ZTEST(userspace_domain_ctx, test_domain_add_part_context_switch)
856 {
857 	clear_fault();
858 
859 	zassert_equal(
860 		k_mem_domain_add_partition(&k_mem_domain_default, &alt_part),
861 		0, "failed to add memory partition");
862 
863 	spawn_user(&alt_bool);
864 }
865 
866 /**
867  * Show that self-removing a partition from a domain we are a member of,
868  * and then switching to another user thread in the same domain faults as
869  * expected.
870  *
871  * @ingroup kernel_memprotect_tests
872  */
ZTEST(userspace_domain_ctx,test_domain_remove_part_context_switch)873 ZTEST(userspace_domain_ctx, test_domain_remove_part_context_switch)
874 {
875 	/* We added alt_part to the default domain in the previous test,
876 	 * remove it, and then try to access again.
877 	 */
878 	set_fault(K_ERR_CPU_EXCEPTION);
879 
880 	zassert_equal(
881 		k_mem_domain_remove_partition(&k_mem_domain_default, &alt_part),
882 		0, "failed to remove memory partition");
883 
884 	spawn_user(&alt_bool);
885 }
886 
z_impl_missing_syscall(void)887 void z_impl_missing_syscall(void)
888 {
889 	/* Shouldn't ever get here; no handler function compiled */
890 	k_panic();
891 }
892 
893 /**
894  * @brief Test unimplemented system call
895  *
896  * @details Created a syscall with name missing_syscall() without a verification
897  * function. The kernel shall safety handle invocations of unimplemented system
898  * calls.
899  *
900  * @ingroup kernel_memprotect_tests
901  */
ZTEST_USER(userspace,test_unimplemented_syscall)902 ZTEST_USER(userspace, test_unimplemented_syscall)
903 {
904 	set_fault(K_ERR_KERNEL_OOPS);
905 
906 	missing_syscall();
907 }
908 
909 /**
910  * @brief Test bad syscall handler
911  *
912  * @details When a system call handler decides to terminate the calling thread,
913  * the kernel will produce error which indicates the context, where the faulting
914  * system call was made from user code.
915  *
916  * @ingroup kernel_memprotect_tests
917  */
ZTEST_USER(userspace,test_bad_syscall)918 ZTEST_USER(userspace, test_bad_syscall)
919 {
920 	set_fault(K_ERR_KERNEL_OOPS);
921 
922 	arch_syscall_invoke0(INT_MAX);
923 
924 	set_fault(K_ERR_KERNEL_OOPS);
925 
926 	arch_syscall_invoke0(UINT_MAX);
927 }
928 
929 static struct k_sem recycle_sem;
930 
931 /**
932  * @brief Test recycle object
933  *
934  * @details Test recycle valid/invalid kernel object, see if
935  * perms_count changes as expected.
936  *
937  * @see k_object_recycle(), k_object_find()
938  *
939  * @ingroup kernel_memprotect_tests
940  */
ZTEST(userspace,test_object_recycle)941 ZTEST(userspace, test_object_recycle)
942 {
943 	struct k_object *ko;
944 	int perms_count = 0;
945 	int dummy = 0;
946 
947 	/* Validate recycle invalid objects, after recycling this invalid
948 	 * object, perms_count should finally still be 1.
949 	 */
950 	ko = k_object_find(&dummy);
951 	zassert_true(ko == NULL, "not an invalid object");
952 
953 	k_object_recycle(&dummy);
954 
955 	ko = k_object_find(&recycle_sem);
956 	(void)memset(ko->perms, 0xFF, sizeof(ko->perms));
957 
958 	k_object_recycle(&recycle_sem);
959 	zassert_true(ko != NULL, "kernel object not found");
960 	zassert_true(ko->flags & K_OBJ_FLAG_INITIALIZED,
961 		     "object wasn't marked as initialized");
962 
963 	for (int i = 0; i < CONFIG_MAX_THREAD_BYTES; i++) {
964 		perms_count += POPCOUNT(ko->perms[i]);
965 	}
966 
967 	zassert_true(perms_count == 1, "invalid number of thread permissions");
968 }
969 
970 #define test_oops(provided, expected) do { \
971 	expect_fault = true; \
972 	expected_reason = expected; \
973 	z_except_reason(provided); \
974 } while (false)
975 
ZTEST_USER(userspace,test_oops_panic)976 ZTEST_USER(userspace, test_oops_panic)
977 {
978 	test_oops(K_ERR_KERNEL_PANIC, K_ERR_KERNEL_OOPS);
979 }
980 
ZTEST_USER(userspace,test_oops_oops)981 ZTEST_USER(userspace, test_oops_oops)
982 {
983 	test_oops(K_ERR_KERNEL_OOPS, K_ERR_KERNEL_OOPS);
984 }
985 
ZTEST_USER(userspace,test_oops_exception)986 ZTEST_USER(userspace, test_oops_exception)
987 {
988 	test_oops(K_ERR_CPU_EXCEPTION, K_ERR_KERNEL_OOPS);
989 }
990 
ZTEST_USER(userspace,test_oops_maxint)991 ZTEST_USER(userspace, test_oops_maxint)
992 {
993 	test_oops(INT_MAX, K_ERR_KERNEL_OOPS);
994 }
995 
ZTEST_USER(userspace,test_oops_stackcheck)996 ZTEST_USER(userspace, test_oops_stackcheck)
997 {
998 	test_oops(K_ERR_STACK_CHK_FAIL, K_ERR_STACK_CHK_FAIL);
999 }
1000 
z_impl_check_syscall_context(void)1001 void z_impl_check_syscall_context(void)
1002 {
1003 	unsigned int key = irq_lock();
1004 
1005 	irq_unlock(key);
1006 
1007 	/* Make sure that interrupts aren't locked when handling system calls;
1008 	 * key has the previous locking state before the above irq_lock() call.
1009 	 */
1010 	zassert_true(arch_irq_unlocked(key), "irqs locked during syscall");
1011 
1012 	/* The kernel should not think we are in ISR context either */
1013 	zassert_false(k_is_in_isr(), "kernel reports irq context");
1014 }
1015 
z_vrfy_check_syscall_context(void)1016 static inline void z_vrfy_check_syscall_context(void)
1017 {
1018 	z_impl_check_syscall_context();
1019 }
1020 #include <zephyr/syscalls/check_syscall_context_mrsh.c>
1021 
ZTEST_USER(userspace,test_syscall_context)1022 ZTEST_USER(userspace, test_syscall_context)
1023 {
1024 	check_syscall_context();
1025 }
1026 
1027 #ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA
tls_leakage_user_part(void * p1,void * p2,void * p3)1028 static void tls_leakage_user_part(void *p1, void *p2, void *p3)
1029 {
1030 	char *tls_area = p1;
1031 
1032 	for (int i = 0; i < sizeof(struct _thread_userspace_local_data); i++) {
1033 		zassert_false(tls_area[i] == 0xff,
1034 			      "TLS data leakage to user mode");
1035 	}
1036 }
1037 #endif
1038 
ZTEST(userspace,test_tls_leakage)1039 ZTEST(userspace, test_tls_leakage)
1040 {
1041 #ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA
1042 	/* Tests two assertions:
1043 	 *
1044 	 * - That a user thread has full access to its TLS area
1045 	 * - That dropping to user mode doesn't allow any TLS data set in
1046 	 * supervisor mode to be leaked
1047 	 */
1048 
1049 	memset(_current->userspace_local_data, 0xff,
1050 	       sizeof(struct _thread_userspace_local_data));
1051 
1052 	k_thread_user_mode_enter(tls_leakage_user_part,
1053 				 _current->userspace_local_data, NULL, NULL);
1054 #else
1055 	ztest_test_skip();
1056 #endif
1057 }
1058 
1059 #ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA
tls_entry(void * p1,void * p2,void * p3)1060 void tls_entry(void *p1, void *p2, void *p3)
1061 {
1062 	printk("tls_entry\n");
1063 }
1064 #endif
1065 
ZTEST(userspace,test_tls_pointer)1066 ZTEST(userspace, test_tls_pointer)
1067 {
1068 #ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA
1069 	char *stack_obj_ptr;
1070 	size_t stack_obj_sz;
1071 
1072 	k_thread_create(&test_thread, test_stack, STACKSIZE, tls_entry,
1073 			NULL, NULL, NULL, 1, K_USER, K_FOREVER);
1074 
1075 	printk("tls pointer for thread %p: %p\n",
1076 	       &test_thread, (void *)test_thread.userspace_local_data);
1077 
1078 	printk("stack buffer reported bounds: [%p, %p)\n",
1079 	       (void *)test_thread.stack_info.start,
1080 	       (void *)(test_thread.stack_info.start +
1081 			test_thread.stack_info.size));
1082 
1083 #ifdef CONFIG_THREAD_STACK_MEM_MAPPED
1084 	stack_obj_ptr = (char *)test_thread.stack_obj_mapped;
1085 	stack_obj_sz = test_thread.stack_obj_size;
1086 #else
1087 	stack_obj_ptr = (char *)test_stack;
1088 	stack_obj_sz = sizeof(test_stack);
1089 #endif
1090 
1091 	printk("stack object bounds: [%p, %p)\n",
1092 	       stack_obj_ptr, stack_obj_ptr + stack_obj_sz);
1093 
1094 	uintptr_t tls_start = (uintptr_t)test_thread.userspace_local_data;
1095 	uintptr_t tls_end = tls_start +
1096 		sizeof(struct _thread_userspace_local_data);
1097 
1098 	if ((tls_start < (uintptr_t)stack_obj_ptr) ||
1099 	    (tls_end > (uintptr_t)stack_obj_ptr + stack_obj_sz)) {
1100 		printk("tls area out of bounds\n");
1101 		ztest_test_fail();
1102 	}
1103 
1104 	k_thread_abort(&test_thread);
1105 #else
1106 	ztest_test_skip();
1107 #endif
1108 }
1109 
1110 K_APP_BMEM(default_part) volatile bool kernel_only_thread_ran;
1111 K_APP_BMEM(default_part) volatile bool kernel_only_thread_user_ran;
1112 static K_SEM_DEFINE(kernel_only_thread_run_sem, 0, 1);
1113 
kernel_only_thread_user_entry(void * p1,void * p2,void * p3)1114 void kernel_only_thread_user_entry(void *p1, void *p2, void *p3)
1115 {
1116 	printk("kernel only thread in user mode\n");
1117 
1118 	kernel_only_thread_user_ran = true;
1119 }
1120 
kernel_only_thread_entry(void * p1,void * p2,void * p3)1121 void kernel_only_thread_entry(void *p1, void *p2, void *p3)
1122 {
1123 	k_sem_take(&kernel_only_thread_run_sem, K_FOREVER);
1124 
1125 	printk("kernel only thread in kernel mode\n");
1126 
1127 	/* Some architectures emit kernel OOPS instead of panic. */
1128 #if defined(CONFIG_ARM64)
1129 	set_fault(K_ERR_KERNEL_OOPS);
1130 #else
1131 	set_fault(K_ERR_KERNEL_PANIC);
1132 #endif
1133 
1134 	kernel_only_thread_ran = true;
1135 
1136 	k_thread_user_mode_enter(kernel_only_thread_user_entry, NULL, NULL, NULL);
1137 }
1138 
1139 #ifdef CONFIG_MMU
1140 #define KERNEL_ONLY_THREAD_STACK_SIZE (ROUND_UP(1024, CONFIG_MMU_PAGE_SIZE))
1141 #elif CONFIG_64BIT
1142 #define KERNEL_ONLY_THREAD_STACK_SIZE (2048)
1143 #else
1144 #define KERNEL_ONLY_THREAD_STACK_SIZE (1024)
1145 #endif
1146 
1147 static K_KERNEL_THREAD_DEFINE(kernel_only_thread,
1148 			      KERNEL_ONLY_THREAD_STACK_SIZE,
1149 			      kernel_only_thread_entry, NULL, NULL, NULL,
1150 			      0, 0, 0);
1151 
ZTEST(userspace,test_kernel_only_thread)1152 ZTEST(userspace, test_kernel_only_thread)
1153 {
1154 	kernel_only_thread_ran = false;
1155 	kernel_only_thread_user_ran = false;
1156 
1157 	k_sem_give(&kernel_only_thread_run_sem);
1158 
1159 	k_sleep(K_MSEC(500));
1160 
1161 	if (!kernel_only_thread_ran) {
1162 		printk("kernel only thread not running in kernel mode!\n");
1163 		ztest_test_fail();
1164 	}
1165 
1166 	if (kernel_only_thread_user_ran) {
1167 		printk("kernel only thread should not have run in user mode!\n");
1168 		ztest_test_fail();
1169 	}
1170 }
1171 
userspace_setup(void)1172 void *userspace_setup(void)
1173 {
1174 	int ret;
1175 
1176 	/* Most of these scenarios use the default domain */
1177 	ret = k_mem_domain_add_partition(&k_mem_domain_default, &default_part);
1178 	if (ret != 0) {
1179 		printk("Failed to add default memory partition (%d)\n", ret);
1180 		k_oops();
1181 	}
1182 
1183 #if defined(CONFIG_ARM64)
1184 	struct z_arm64_thread_stack_header *hdr;
1185 	void *vhdr = ((struct z_arm64_thread_stack_header *)ztest_thread_stack);
1186 
1187 	hdr = vhdr;
1188 	priv_stack_ptr = (((char *)&hdr->privilege_stack) +
1189 			  (sizeof(hdr->privilege_stack) - 1));
1190 #elif defined(CONFIG_ARM)
1191 	priv_stack_ptr = (char *)z_priv_stack_find(ztest_thread_stack);
1192 #elif defined(CONFIG_X86)
1193 	struct z_x86_thread_stack_header *hdr;
1194 	void *vhdr = ((struct z_x86_thread_stack_header *)ztest_thread_stack);
1195 
1196 	hdr = vhdr;
1197 	priv_stack_ptr = (((char *)&hdr->privilege_stack) +
1198 			  (sizeof(hdr->privilege_stack) - 1));
1199 #elif defined(CONFIG_RISCV)
1200 #if defined(CONFIG_GEN_PRIV_STACKS)
1201 	priv_stack_ptr = (char *)z_priv_stack_find(ztest_thread_stack);
1202 #else
1203 	priv_stack_ptr = (char *)((uintptr_t)ztest_thread_stack +
1204 				  Z_RISCV_STACK_GUARD_SIZE);
1205 #endif
1206 #elif defined(CONFIG_XTENSA)
1207 	struct xtensa_thread_stack_header *hdr;
1208 	void *vhdr = ((struct xtensa_thread_stack_header *)ztest_thread_stack);
1209 
1210 	hdr = vhdr;
1211 	priv_stack_ptr = (((char *)&hdr->privilege_stack) + (sizeof(hdr->privilege_stack) - 1));
1212 #endif
1213 	k_thread_access_grant(k_current_get(),
1214 			      &test_thread, &test_stack,
1215 			      &kernel_only_thread_run_sem,
1216 			      &test_revoke_sem, &kpipe);
1217 	return NULL;
1218 }
1219 
1220 ZTEST_SUITE(userspace, NULL, userspace_setup, NULL, NULL, NULL);
1221 
1222 ZTEST_SUITE(userspace_domain, NULL, NULL, NULL, NULL, NULL);
1223 
1224 ZTEST_SUITE(userspace_domain_ctx, NULL, NULL, NULL, NULL, NULL);
1225