1 /*
2  * Parts derived from tests/kernel/fatal/src/main.c, which has the
3  * following copyright and license:
4  *
5  * Copyright (c) 2017 Intel Corporation
6  *
7  * SPDX-License-Identifier: Apache-2.0
8  */
9 
10 #include <zephyr/kernel.h>
11 #include <zephyr/ztest.h>
12 #include <zephyr/kernel_structs.h>
13 #include <string.h>
14 #include <stdlib.h>
15 #include <zephyr/app_memory/app_memdomain.h>
16 #include <zephyr/sys/util.h>
17 #include <zephyr/sys/barrier.h>
18 #include <zephyr/debug/stack.h>
19 #include <zephyr/internal/syscall_handler.h>
20 #include "test_syscall.h"
21 #include <zephyr/sys/libc-hooks.h> /* for z_libc_partition */
22 
23 #if defined(CONFIG_XTENSA)
24 #include <zephyr/arch/xtensa/cache.h>
25 #if defined(CONFIG_XTENSA_MMU)
26 #include <zephyr/arch/xtensa/xtensa_mmu.h>
27 #endif
28 #if defined(CONFIG_XTENSA_MPU)
29 #include <zephyr/arch/xtensa/mpu.h>
30 #endif
31 #endif
32 
33 #if defined(CONFIG_ARC)
34 #include <zephyr/arch/arc/v2/mpu/arc_core_mpu.h>
35 #endif
36 
37 #if defined(CONFIG_ARM)
38 extern void arm_core_mpu_disable(void);
39 #endif
40 
41 #define INFO(fmt, ...) printk(fmt, ##__VA_ARGS__)
42 #define PIPE_LEN 1
43 #define BYTES_TO_READ_WRITE 1
44 #define STACKSIZE (256 + CONFIG_TEST_EXTRA_STACK_SIZE)
45 
46 K_SEM_DEFINE(test_revoke_sem, 0, 1);
47 
48 /* Used for tests that switch between domains, we will switch between the
49  * default domain and this one.
50  */
51 struct k_mem_domain alternate_domain;
52 
53 ZTEST_BMEM static volatile bool expect_fault;
54 ZTEST_BMEM static volatile unsigned int expected_reason;
55 
56 /* Partition unique to default domain */
57 K_APPMEM_PARTITION_DEFINE(default_part);
58 K_APP_BMEM(default_part) volatile bool default_bool;
59 /* Partition unique to alternate domain */
60 K_APPMEM_PARTITION_DEFINE(alt_part);
61 K_APP_BMEM(alt_part) volatile bool alt_bool;
62 
63 static struct k_thread test_thread;
64 static K_THREAD_STACK_DEFINE(test_stack, STACKSIZE);
65 
clear_fault(void)66 static void clear_fault(void)
67 {
68 	expect_fault = false;
69 	compiler_barrier();
70 }
71 
set_fault(unsigned int reason)72 static void set_fault(unsigned int reason)
73 {
74 	expect_fault = true;
75 	expected_reason = reason;
76 	compiler_barrier();
77 }
78 
k_sys_fatal_error_handler(unsigned int reason,const struct arch_esf * pEsf)79 void k_sys_fatal_error_handler(unsigned int reason, const struct arch_esf *pEsf)
80 {
81 	INFO("Caught system error -- reason %d\n", reason);
82 
83 	if (expect_fault) {
84 		if (expected_reason == reason) {
85 			printk("System error was expected\n");
86 			clear_fault();
87 		} else {
88 			printk("Wrong fault reason, expecting %d\n",
89 			       expected_reason);
90 			TC_END_REPORT(TC_FAIL);
91 			k_fatal_halt(reason);
92 		}
93 	} else {
94 		printk("Unexpected fault during test\n");
95 		TC_END_REPORT(TC_FAIL);
96 		k_fatal_halt(reason);
97 	}
98 }
99 
100 /**
101  * @brief Test to check if the thread is in user mode
102  *
103  * @ingroup kernel_memprotect_tests
104  */
ZTEST_USER(userspace,test_is_usermode)105 ZTEST_USER(userspace, test_is_usermode)
106 {
107 	/* Confirm that we are in fact running in user mode. */
108 	clear_fault();
109 
110 	zassert_true(k_is_user_context(), "thread left in kernel mode");
111 }
112 
113 /**
114  * @brief Test to write to a control register
115  *
116  * @ingroup kernel_memprotect_tests
117  */
ZTEST_USER(userspace,test_write_control)118 ZTEST_USER(userspace, test_write_control)
119 {
120 	/* Try to write to a control register. */
121 #if defined(CONFIG_X86)
122 	set_fault(K_ERR_CPU_EXCEPTION);
123 
124 #ifdef CONFIG_X86_64
125 	__asm__ volatile (
126 		"movq $0xFFFFFFFF, %rax;\n\t"
127 		"movq %rax, %cr0;\n\t"
128 		);
129 #else
130 	__asm__ volatile (
131 		"mov %cr0, %eax;\n\t"
132 		"and $0xfffeffff, %eax;\n\t"
133 		"mov %eax, %cr0;\n\t"
134 		);
135 #endif
136 	zassert_unreachable("Write to control register did not fault");
137 
138 #elif defined(CONFIG_ARM64)
139 	uint64_t val = SPSR_MODE_EL1T;
140 
141 	set_fault(K_ERR_CPU_EXCEPTION);
142 
143 	__asm__ volatile("msr spsr_el1, %0"
144 			:
145 			: "r" (val)
146 			: "memory", "cc");
147 
148 	zassert_unreachable("Write to control register did not fault");
149 
150 #elif defined(CONFIG_ARM)
151 #if defined(CONFIG_CPU_CORTEX_M)
152 	unsigned int msr_value;
153 
154 	clear_fault();
155 
156 	msr_value = __get_CONTROL();
157 	msr_value &= ~(CONTROL_nPRIV_Msk);
158 	__set_CONTROL(msr_value);
159 	barrier_dsync_fence_full();
160 	barrier_isync_fence_full();
161 	msr_value = __get_CONTROL();
162 	zassert_true((msr_value & (CONTROL_nPRIV_Msk)),
163 		     "Write to control register was successful");
164 #else
165 	uint32_t val;
166 
167 	set_fault(K_ERR_CPU_EXCEPTION);
168 
169 	val = __get_SCTLR();
170 	val |= SCTLR_DZ_Msk;
171 	__set_SCTLR(val);
172 
173 	zassert_unreachable("Write to control register did not fault");
174 #endif
175 #elif defined(CONFIG_ARC)
176 	unsigned int er_status;
177 
178 	set_fault(K_ERR_CPU_EXCEPTION);
179 
180 	/* _ARC_V2_ERSTATUS is privilege aux reg */
181 	__asm__ volatile (
182 		"lr %0, [0x402]\n"
183 		: "=r" (er_status)::
184 	);
185 #elif defined(CONFIG_RISCV)
186 	unsigned int status;
187 
188 	set_fault(K_ERR_CPU_EXCEPTION);
189 
190 	__asm__ volatile("csrr %0, mstatus" : "=r" (status));
191 #elif defined(CONFIG_XTENSA)
192 	unsigned int ps;
193 
194 	set_fault(K_ERR_CPU_EXCEPTION);
195 
196 	__asm__ volatile("rsr.ps %0" : "=r" (ps));
197 #else
198 #error "Not implemented for this architecture"
199 	zassert_unreachable("Write to control register did not fault");
200 #endif
201 }
202 
203 /**
204  * @brief Test to disable memory protection
205  *
206  * @ingroup kernel_memprotect_tests
207  */
ZTEST_USER(userspace,test_disable_mmu_mpu)208 ZTEST_USER(userspace, test_disable_mmu_mpu)
209 {
210 	/* Try to disable memory protections. */
211 #if defined(CONFIG_X86)
212 	set_fault(K_ERR_CPU_EXCEPTION);
213 
214 #ifdef CONFIG_X86_64
215 	__asm__ volatile (
216 		"movq %cr0, %rax;\n\t"
217 		"andq $0x7ffeffff, %rax;\n\t"
218 		"movq %rax, %cr0;\n\t"
219 		);
220 #else
221 	__asm__ volatile (
222 		"mov %cr0, %eax;\n\t"
223 		"and $0x7ffeffff, %eax;\n\t"
224 		"mov %eax, %cr0;\n\t"
225 		);
226 #endif
227 #elif defined(CONFIG_ARM64)
228 	uint64_t val;
229 
230 	set_fault(K_ERR_CPU_EXCEPTION);
231 
232 	__asm__ volatile("mrs %0, sctlr_el1" : "=r" (val));
233 	__asm__ volatile("msr sctlr_el1, %0"
234 			:
235 			: "r" (val & ~(SCTLR_M_BIT | SCTLR_C_BIT))
236 			: "memory", "cc");
237 
238 #elif defined(CONFIG_ARM)
239 #ifndef CONFIG_TRUSTED_EXECUTION_NONSECURE
240 	set_fault(K_ERR_CPU_EXCEPTION);
241 
242 	arm_core_mpu_disable();
243 #else
244 	/* Disabling MPU from unprivileged code
245 	 * generates BusFault which is not banked
246 	 * between Security states. Do not execute
247 	 * this scenario for Non-Secure Cortex-M.
248 	 */
249 	return;
250 #endif /* !CONFIG_TRUSTED_EXECUTION_NONSECURE */
251 #elif defined(CONFIG_ARC)
252 	set_fault(K_ERR_CPU_EXCEPTION);
253 
254 	arc_core_mpu_disable();
255 #elif defined(CONFIG_RISCV)
256 	set_fault(K_ERR_CPU_EXCEPTION);
257 
258 	/*
259 	 * Try to make everything accessible through PMP slot 3
260 	 * which should not be locked.
261 	 */
262 	csr_write(pmpaddr3, LLONG_MAX);
263 	csr_write(pmpcfg0, (PMP_R|PMP_W|PMP_X|PMP_NAPOT) << 24);
264 #elif defined(CONFIG_XTENSA)
265 	set_fault(K_ERR_CPU_EXCEPTION);
266 
267 #if defined(CONFIG_XTENSA_MMU)
268 	/* Reset way 6 to do identity mapping.
269 	 * Complier would complain addr going out of range if we
270 	 * simply do addr = i * 0x20000000 inside the loop. So
271 	 * we do increment instead.
272 	 */
273 	uint32_t addr = 0U;
274 
275 	for (int i = 0; i < 8; i++) {
276 		uint32_t attr = addr | XTENSA_MMU_PERM_WX;
277 
278 		__asm__ volatile("wdtlb %0, %1; witlb %0, %1"
279 				 :: "r"(attr), "r"(addr));
280 
281 		addr += 0x20000000;
282 	}
283 #endif
284 
285 #if defined(CONFIG_XTENSA_MPU)
286 	/* Technically, simply clearing out all foreground MPU entries
287 	 * allows the background map to take over, so it is not exactly
288 	 * disabling MPU. However, this test is about catching userspace
289 	 * trying to manipulate the MPU regions. So as long as there is
290 	 * kernel OOPS, we would be fine.
291 	 */
292 	for (int i = 0; i < XTENSA_MPU_NUM_ENTRIES; i++) {
293 		__asm__ volatile("wptlb %0, %1\n\t" : : "a"(i), "a"(0));
294 	}
295 #endif
296 
297 #else
298 #error "Not implemented for this architecture"
299 #endif
300 	zassert_unreachable("Disable MMU/MPU did not fault");
301 }
302 
303 /**
304  * @brief Test to read from kernel RAM
305  *
306  * @ingroup kernel_memprotect_tests
307  */
ZTEST_USER(userspace,test_read_kernram)308 ZTEST_USER(userspace, test_read_kernram)
309 {
310 	/* Try to read from kernel RAM. */
311 	void *p;
312 
313 	set_fault(K_ERR_CPU_EXCEPTION);
314 
315 	p = _current->init_data;
316 	printk("%p\n", p);
317 	zassert_unreachable("Read from kernel RAM did not fault");
318 }
319 
320 /**
321  * @brief Test to write to kernel RAM
322  *
323  * @ingroup kernel_memprotect_tests
324  */
ZTEST_USER(userspace,test_write_kernram)325 ZTEST_USER(userspace, test_write_kernram)
326 {
327 	/* Try to write to kernel RAM. */
328 	set_fault(K_ERR_CPU_EXCEPTION);
329 
330 	_current->init_data = NULL;
331 	zassert_unreachable("Write to kernel RAM did not fault");
332 }
333 
334 extern int _k_neg_eagain;
335 
336 #include <zephyr/linker/linker-defs.h>
337 
338 /**
339  * @brief Test to write kernel RO
340  *
341  * @ingroup kernel_memprotect_tests
342  */
ZTEST_USER(userspace,test_write_kernro)343 ZTEST_USER(userspace, test_write_kernro)
344 {
345 	bool in_rodata;
346 
347 	/* Try to write to kernel RO. */
348 	const char *const ptr = (const char *const)&_k_neg_eagain;
349 
350 	in_rodata = ptr < __rodata_region_end &&
351 		    ptr >= __rodata_region_start;
352 
353 #ifdef CONFIG_LINKER_USE_PINNED_SECTION
354 	if (!in_rodata) {
355 		in_rodata = ptr < lnkr_pinned_rodata_end &&
356 			    ptr >= lnkr_pinned_rodata_start;
357 	}
358 #endif
359 
360 	zassert_true(in_rodata,
361 		     "_k_neg_eagain is not in rodata");
362 
363 	set_fault(K_ERR_CPU_EXCEPTION);
364 
365 	_k_neg_eagain = -EINVAL;
366 	zassert_unreachable("Write to kernel RO did not fault");
367 }
368 
369 /**
370  * @brief Test to write to kernel text section
371  *
372  * @ingroup kernel_memprotect_tests
373  */
ZTEST_USER(userspace,test_write_kerntext)374 ZTEST_USER(userspace, test_write_kerntext)
375 {
376 	/* Try to write to kernel text. */
377 	set_fault(K_ERR_CPU_EXCEPTION);
378 
379 	memset(&k_current_get, 0, 4);
380 	zassert_unreachable("Write to kernel text did not fault");
381 }
382 
383 static int kernel_data;
384 
385 /**
386  * @brief Test to read from kernel data section
387  *
388  * @ingroup kernel_memprotect_tests
389  */
ZTEST_USER(userspace,test_read_kernel_data)390 ZTEST_USER(userspace, test_read_kernel_data)
391 {
392 	set_fault(K_ERR_CPU_EXCEPTION);
393 
394 	printk("%d\n", kernel_data);
395 	zassert_unreachable("Read from data did not fault");
396 }
397 
398 /**
399  * @brief Test to write to kernel data section
400  *
401  * @ingroup kernel_memprotect_tests
402  */
ZTEST_USER(userspace,test_write_kernel_data)403 ZTEST_USER(userspace, test_write_kernel_data)
404 {
405 	set_fault(K_ERR_CPU_EXCEPTION);
406 
407 	kernel_data = 1;
408 	zassert_unreachable("Write to  data did not fault");
409 }
410 
411 /*
412  * volatile to avoid compiler mischief.
413  */
414 K_APP_DMEM(default_part) volatile char *priv_stack_ptr;
415 #if defined(CONFIG_ARC)
416 K_APP_DMEM(default_part) int32_t size = (0 - CONFIG_PRIVILEGED_STACK_SIZE -
417 				 Z_ARC_STACK_GUARD_SIZE);
418 #endif
419 
420 /**
421  * @brief Test to read privileged stack
422  *
423  * @ingroup kernel_memprotect_tests
424  */
ZTEST_USER(userspace,test_read_priv_stack)425 ZTEST_USER(userspace, test_read_priv_stack)
426 {
427 	/* Try to read from privileged stack. */
428 #if defined(CONFIG_ARC)
429 	int s[1];
430 
431 	s[0] = 0;
432 	priv_stack_ptr = (char *)&s[0] - size;
433 #elif defined(CONFIG_ARM) || defined(CONFIG_X86) || defined(CONFIG_RISCV) || \
434 	defined(CONFIG_ARM64) || defined(CONFIG_XTENSA)
435 	/* priv_stack_ptr set by test_main() */
436 #else
437 #error "Not implemented for this architecture"
438 #endif
439 	set_fault(K_ERR_CPU_EXCEPTION);
440 
441 	printk("%c\n", *priv_stack_ptr);
442 	zassert_unreachable("Read from privileged stack did not fault");
443 }
444 
445 /**
446  * @brief Test to write to privilege stack
447  *
448  * @ingroup kernel_memprotect_tests
449  */
ZTEST_USER(userspace,test_write_priv_stack)450 ZTEST_USER(userspace, test_write_priv_stack)
451 {
452 	/* Try to write to privileged stack. */
453 #if defined(CONFIG_ARC)
454 	int s[1];
455 
456 	s[0] = 0;
457 	priv_stack_ptr = (char *)&s[0] - size;
458 #elif defined(CONFIG_ARM) || defined(CONFIG_X86) || defined(CONFIG_RISCV) || \
459 	defined(CONFIG_ARM64) || defined(CONFIG_XTENSA)
460 	/* priv_stack_ptr set by test_main() */
461 #else
462 #error "Not implemented for this architecture"
463 #endif
464 	set_fault(K_ERR_CPU_EXCEPTION);
465 
466 	*priv_stack_ptr = 42;
467 	zassert_unreachable("Write to privileged stack did not fault");
468 }
469 
470 
471 K_APP_BMEM(default_part) static struct k_sem sem;
472 
473 /**
474  * @brief Test to pass a user object to system call
475  *
476  * @ingroup kernel_memprotect_tests
477  */
ZTEST_USER(userspace,test_pass_user_object)478 ZTEST_USER(userspace, test_pass_user_object)
479 {
480 	/* Try to pass a user object to a system call. */
481 	set_fault(K_ERR_KERNEL_OOPS);
482 
483 	k_sem_init(&sem, 0, 1);
484 	zassert_unreachable("Pass a user object to a syscall did not fault");
485 }
486 
487 static struct k_sem ksem;
488 
489 /**
490  * @brief Test to pass object to a system call without permissions
491  *
492  * @ingroup kernel_memprotect_tests
493  */
ZTEST_USER(userspace,test_pass_noperms_object)494 ZTEST_USER(userspace, test_pass_noperms_object)
495 {
496 	/* Try to pass a object to a system call w/o permissions. */
497 	set_fault(K_ERR_KERNEL_OOPS);
498 
499 	k_sem_init(&ksem, 0, 1);
500 	zassert_unreachable("Pass an unauthorized object to a "
501 			    "syscall did not fault");
502 }
503 
504 
thread_body(void * p1,void * p2,void * p3)505 void thread_body(void *p1, void *p2, void *p3)
506 {
507 	ARG_UNUSED(p1);
508 	ARG_UNUSED(p2);
509 	ARG_UNUSED(p3);
510 }
511 
512 /**
513  * @brief Test to start kernel thread from usermode
514  *
515  * @ingroup kernel_memprotect_tests
516  */
ZTEST_USER(userspace,test_start_kernel_thread)517 ZTEST_USER(userspace, test_start_kernel_thread)
518 {
519 	/* Try to start a kernel thread from a usermode thread */
520 	set_fault(K_ERR_KERNEL_OOPS);
521 	k_thread_create(&test_thread, test_stack, STACKSIZE,
522 			thread_body, NULL, NULL, NULL,
523 			K_PRIO_PREEMPT(1), K_INHERIT_PERMS,
524 			K_NO_WAIT);
525 	zassert_unreachable("Create a kernel thread did not fault");
526 }
527 
uthread_read_body(void * p1,void * p2,void * p3)528 static void uthread_read_body(void *p1, void *p2, void *p3)
529 {
530 	unsigned int *vptr = p1;
531 
532 	set_fault(K_ERR_CPU_EXCEPTION);
533 	printk("%u\n", *vptr);
534 	zassert_unreachable("Read from other thread stack did not fault");
535 }
536 
uthread_write_body(void * p1,void * p2,void * p3)537 static void uthread_write_body(void *p1, void *p2, void *p3)
538 {
539 	unsigned int *vptr = p1;
540 
541 	set_fault(K_ERR_CPU_EXCEPTION);
542 	*vptr = 2U;
543 	zassert_unreachable("Write to other thread stack did not fault");
544 }
545 
546 /**
547  * @brief Test to read from another thread's stack
548  *
549  * @ingroup kernel_memprotect_tests
550  */
ZTEST_USER(userspace,test_read_other_stack)551 ZTEST_USER(userspace, test_read_other_stack)
552 {
553 	/* Try to read from another thread's stack. */
554 	unsigned int val;
555 
556 #if !defined(CONFIG_MEM_DOMAIN_ISOLATED_STACKS)
557 	/* The minimal requirement to support memory domain permits
558 	 * threads of the same memory domain to access each others' stacks.
559 	 * Some architectures supports further restricting access which
560 	 * can be enabled via a kconfig. So if the kconfig is not enabled,
561 	 * skip the test.
562 	 */
563 	ztest_test_skip();
564 #endif
565 
566 	k_thread_create(&test_thread, test_stack, STACKSIZE,
567 			uthread_read_body, &val, NULL, NULL,
568 			-1, K_USER | K_INHERIT_PERMS,
569 			K_NO_WAIT);
570 
571 	k_thread_join(&test_thread, K_FOREVER);
572 }
573 
574 
575 /**
576  * @brief Test to write to other thread's stack
577  *
578  * @ingroup kernel_memprotect_tests
579  */
ZTEST_USER(userspace,test_write_other_stack)580 ZTEST_USER(userspace, test_write_other_stack)
581 {
582 	/* Try to write to another thread's stack. */
583 	unsigned int val;
584 
585 #if !defined(CONFIG_MEM_DOMAIN_ISOLATED_STACKS)
586 	/* The minimal requirement to support memory domain permits
587 	 * threads of the same memory domain to access each others' stacks.
588 	 * Some architectures supports further restricting access which
589 	 * can be enabled via a kconfig. So if the kconfig is not enabled,
590 	 * skip the test.
591 	 */
592 	ztest_test_skip();
593 #endif
594 
595 	k_thread_create(&test_thread, test_stack, STACKSIZE,
596 			uthread_write_body, &val, NULL, NULL,
597 			-1, K_USER | K_INHERIT_PERMS,
598 			K_NO_WAIT);
599 	k_thread_join(&test_thread, K_FOREVER);
600 }
601 
602 /**
603  * @brief Test to revoke access to kobject without permission
604  *
605  * @details User thread can only revoke their own access to an object.
606  * In that test user thread to revokes access to unathorized object, as a result
607  * the system will assert.
608  *
609  * @ingroup kernel_memprotect_tests
610  */
ZTEST_USER(userspace,test_revoke_noperms_object)611 ZTEST_USER(userspace, test_revoke_noperms_object)
612 {
613 	/* Attempt to revoke access to kobject w/o permissions*/
614 	set_fault(K_ERR_KERNEL_OOPS);
615 
616 	k_object_release(&ksem);
617 
618 	zassert_unreachable("Revoke access to unauthorized object "
619 			    "did not fault");
620 }
621 
622 /**
623  * @brief Test to access object after revoking access
624  *
625  * @ingroup kernel_memprotect_tests
626  */
ZTEST_USER(userspace,test_access_after_revoke)627 ZTEST_USER(userspace, test_access_after_revoke)
628 {
629 	k_object_release(&test_revoke_sem);
630 
631 	/* Try to access an object after revoking access to it */
632 	set_fault(K_ERR_KERNEL_OOPS);
633 
634 	k_sem_take(&test_revoke_sem, K_NO_WAIT);
635 
636 	zassert_unreachable("Using revoked object did not fault");
637 }
638 
umode_enter_func(void * p1,void * p2,void * p3)639 static void umode_enter_func(void *p1, void *p2, void *p3)
640 {
641 	ARG_UNUSED(p1);
642 	ARG_UNUSED(p2);
643 	ARG_UNUSED(p3);
644 
645 	zassert_true(k_is_user_context(),
646 		     "Thread did not enter user mode");
647 }
648 
649 /**
650 * @brief Test to check supervisor thread enter one-way to usermode
651 *
652 * @details A thread running in supervisor mode must have one-way operation
653 * ability to drop privileges to user mode.
654 *
655 * @ingroup kernel_memprotect_tests
656 */
ZTEST(userspace,test_user_mode_enter)657 ZTEST(userspace, test_user_mode_enter)
658 {
659 	clear_fault();
660 
661 	k_thread_user_mode_enter(umode_enter_func,
662 				 NULL, NULL, NULL);
663 }
664 
665 /* Define and initialize pipe. */
666 K_PIPE_DEFINE(kpipe, PIPE_LEN, BYTES_TO_READ_WRITE);
K_APP_BMEM(default_part)667 K_APP_BMEM(default_part) static size_t bytes_written_read;
668 
669 /**
670  * @brief Test to write to kobject using pipe
671  *
672  * @ingroup kernel_memprotect_tests
673  */
674 ZTEST_USER(userspace, test_write_kobject_user_pipe)
675 {
676 	/*
677 	 * Attempt to use system call from k_pipe_get to write over
678 	 * a kernel object.
679 	 */
680 	set_fault(K_ERR_KERNEL_OOPS);
681 
682 	k_pipe_get(&kpipe, &test_revoke_sem, BYTES_TO_READ_WRITE,
683 		   &bytes_written_read, 1, K_NO_WAIT);
684 
685 	zassert_unreachable("System call memory write validation "
686 			    "did not fault");
687 }
688 
689 /**
690  * @brief Test to read from kobject using pipe
691  *
692  * @ingroup kernel_memprotect_tests
693  */
ZTEST_USER(userspace,test_read_kobject_user_pipe)694 ZTEST_USER(userspace, test_read_kobject_user_pipe)
695 {
696 	/*
697 	 * Attempt to use system call from k_pipe_put to read a
698 	 * kernel object.
699 	 */
700 	set_fault(K_ERR_KERNEL_OOPS);
701 
702 	k_pipe_put(&kpipe, &test_revoke_sem, BYTES_TO_READ_WRITE,
703 		   &bytes_written_read, 1, K_NO_WAIT);
704 
705 	zassert_unreachable("System call memory read validation "
706 			    "did not fault");
707 }
708 
user_half(void * arg1,void * arg2,void * arg3)709 static void user_half(void *arg1, void *arg2, void *arg3)
710 {
711 	volatile bool *bool_ptr = arg1;
712 
713 	*bool_ptr = true;
714 	compiler_barrier();
715 	if (expect_fault) {
716 		printk("Expecting a fatal error %d but succeeded instead\n",
717 		       expected_reason);
718 		ztest_test_fail();
719 	}
720 }
721 
722 
spawn_user(volatile bool * to_modify)723 static void spawn_user(volatile bool *to_modify)
724 {
725 	k_thread_create(&test_thread, test_stack, STACKSIZE, user_half,
726 			(void *)to_modify, NULL, NULL,
727 			-1, K_INHERIT_PERMS | K_USER, K_NO_WAIT);
728 
729 	k_thread_join(&test_thread, K_FOREVER);
730 }
731 
drop_user(volatile bool * to_modify)732 static void drop_user(volatile bool *to_modify)
733 {
734 	k_sleep(K_MSEC(1)); /* Force a context switch */
735 	k_thread_user_mode_enter(user_half, (void *)to_modify, NULL, NULL);
736 }
737 
738 /**
739  * @brief Test creation of new memory domains
740  *
741  * We initialize a new memory domain and show that its partition configuration
742  * is correct. This new domain has "alt_part" in it, but not "default_part".
743  * We then try to modify data in "default_part" and show it produces an
744  * exception since that partition is not in the new domain.
745  *
746  * This caught a bug once where an MMU system copied page tables for the new
747  * domain and accidentally copied memory partition permissions from the source
748  * page tables, allowing the write to "default_part" to work.
749  *
750  * @ingroup kernel_memprotect_tests
751  */
ZTEST(userspace_domain,test_1st_init_and_access_other_memdomain)752 ZTEST(userspace_domain, test_1st_init_and_access_other_memdomain)
753 {
754 	struct k_mem_partition *parts[] = {
755 #if Z_LIBC_PARTITION_EXISTS
756 		&z_libc_partition,
757 #endif
758 		&ztest_mem_partition, &alt_part
759 	};
760 
761 	zassert_equal(
762 		k_mem_domain_init(&alternate_domain, ARRAY_SIZE(parts), parts),
763 		0, "failed to initialize memory domain");
764 
765 	/* Switch to alternate_domain which does not have default_part that
766 	 * contains default_bool. This should fault when we try to write it.
767 	 */
768 	k_mem_domain_add_thread(&alternate_domain, k_current_get());
769 	set_fault(K_ERR_CPU_EXCEPTION);
770 	spawn_user(&default_bool);
771 }
772 
773 #if (defined(CONFIG_ARM) || (defined(CONFIG_GEN_PRIV_STACKS) && defined(CONFIG_RISCV)))
774 extern uint8_t *z_priv_stack_find(void *obj);
775 #endif
776 extern k_thread_stack_t ztest_thread_stack[];
777 
778 /**
779  * Show that changing between memory domains and dropping to user mode works
780  * as expected.
781  *
782  * @ingroup kernel_memprotect_tests
783  */
ZTEST(userspace_domain,test_domain_add_thread_drop_to_user)784 ZTEST(userspace_domain, test_domain_add_thread_drop_to_user)
785 {
786 	clear_fault();
787 	k_mem_domain_add_thread(&alternate_domain, k_current_get());
788 	drop_user(&alt_bool);
789 }
790 
791 /* @brief Test adding application memory partition to memory domain
792  *
793  * @details Show that adding a partition to a domain and then dropping to user
794  * mode works as expected.
795  *
796  * @ingroup kernel_memprotect_tests
797  */
ZTEST(userspace_domain,test_domain_add_part_drop_to_user)798 ZTEST(userspace_domain, test_domain_add_part_drop_to_user)
799 {
800 	clear_fault();
801 
802 	zassert_equal(
803 		k_mem_domain_add_partition(&k_mem_domain_default, &alt_part),
804 		0, "failed to add memory partition");
805 
806 	drop_user(&alt_bool);
807 }
808 
809 /**
810  * Show that self-removing a partition from a domain we are a member of,
811  * and then dropping to user mode faults as expected.
812  *
813  * @ingroup kernel_memprotect_tests
814  */
ZTEST(userspace_domain,test_domain_remove_part_drop_to_user)815 ZTEST(userspace_domain, test_domain_remove_part_drop_to_user)
816 {
817 	/* We added alt_part to the default domain in the previous test,
818 	 * remove it, and then try to access again.
819 	 */
820 	set_fault(K_ERR_CPU_EXCEPTION);
821 
822 	zassert_equal(
823 		k_mem_domain_remove_partition(&k_mem_domain_default, &alt_part),
824 		0, "failed to remove partition");
825 
826 	drop_user(&alt_bool);
827 }
828 
829 /**
830  * Show that changing between memory domains and then switching to another
831  * thread in the same domain works as expected.
832  *
833  * @ingroup kernel_memprotect_tests
834  */
ZTEST(userspace_domain_ctx,test_domain_add_thread_context_switch)835 ZTEST(userspace_domain_ctx, test_domain_add_thread_context_switch)
836 {
837 	clear_fault();
838 	k_mem_domain_add_thread(&alternate_domain, k_current_get());
839 	spawn_user(&alt_bool);
840 }
841 
842 /* Show that adding a partition to a domain and then switching to another
843  * user thread in the same domain works as expected.
844  *
845  * @ingroup kernel_memprotect_tests
846  */
ZTEST(userspace_domain_ctx,test_domain_add_part_context_switch)847 ZTEST(userspace_domain_ctx, test_domain_add_part_context_switch)
848 {
849 	clear_fault();
850 
851 	zassert_equal(
852 		k_mem_domain_add_partition(&k_mem_domain_default, &alt_part),
853 		0, "failed to add memory partition");
854 
855 	spawn_user(&alt_bool);
856 }
857 
858 /**
859  * Show that self-removing a partition from a domain we are a member of,
860  * and then switching to another user thread in the same domain faults as
861  * expected.
862  *
863  * @ingroup kernel_memprotect_tests
864  */
ZTEST(userspace_domain_ctx,test_domain_remove_part_context_switch)865 ZTEST(userspace_domain_ctx, test_domain_remove_part_context_switch)
866 {
867 	/* We added alt_part to the default domain in the previous test,
868 	 * remove it, and then try to access again.
869 	 */
870 	set_fault(K_ERR_CPU_EXCEPTION);
871 
872 	zassert_equal(
873 		k_mem_domain_remove_partition(&k_mem_domain_default, &alt_part),
874 		0, "failed to remove memory partition");
875 
876 	spawn_user(&alt_bool);
877 }
878 
z_impl_missing_syscall(void)879 void z_impl_missing_syscall(void)
880 {
881 	/* Shouldn't ever get here; no handler function compiled */
882 	k_panic();
883 }
884 
885 /**
886  * @brief Test unimplemented system call
887  *
888  * @details Created a syscall with name missing_syscall() without a verification
889  * function. The kernel shall safety handle invocations of unimplemented system
890  * calls.
891  *
892  * @ingroup kernel_memprotect_tests
893  */
ZTEST_USER(userspace,test_unimplemented_syscall)894 ZTEST_USER(userspace, test_unimplemented_syscall)
895 {
896 	set_fault(K_ERR_KERNEL_OOPS);
897 
898 	missing_syscall();
899 }
900 
901 /**
902  * @brief Test bad syscall handler
903  *
904  * @details When a system call handler decides to terminate the calling thread,
905  * the kernel will produce error which indicates the context, where the faulting
906  * system call was made from user code.
907  *
908  * @ingroup kernel_memprotect_tests
909  */
ZTEST_USER(userspace,test_bad_syscall)910 ZTEST_USER(userspace, test_bad_syscall)
911 {
912 	set_fault(K_ERR_KERNEL_OOPS);
913 
914 	arch_syscall_invoke0(INT_MAX);
915 
916 	set_fault(K_ERR_KERNEL_OOPS);
917 
918 	arch_syscall_invoke0(UINT_MAX);
919 }
920 
921 static struct k_sem recycle_sem;
922 
923 /**
924  * @brief Test recycle object
925  *
926  * @details Test recycle valid/invalid kernel object, see if
927  * perms_count changes as expected.
928  *
929  * @see k_object_recycle(), k_object_find()
930  *
931  * @ingroup kernel_memprotect_tests
932  */
ZTEST(userspace,test_object_recycle)933 ZTEST(userspace, test_object_recycle)
934 {
935 	struct k_object *ko;
936 	int perms_count = 0;
937 	int dummy = 0;
938 
939 	/* Validate recycle invalid objects, after recycling this invalid
940 	 * object, perms_count should finally still be 1.
941 	 */
942 	ko = k_object_find(&dummy);
943 	zassert_true(ko == NULL, "not an invalid object");
944 
945 	k_object_recycle(&dummy);
946 
947 	ko = k_object_find(&recycle_sem);
948 	(void)memset(ko->perms, 0xFF, sizeof(ko->perms));
949 
950 	k_object_recycle(&recycle_sem);
951 	zassert_true(ko != NULL, "kernel object not found");
952 	zassert_true(ko->flags & K_OBJ_FLAG_INITIALIZED,
953 		     "object wasn't marked as initialized");
954 
955 	for (int i = 0; i < CONFIG_MAX_THREAD_BYTES; i++) {
956 		perms_count += POPCOUNT(ko->perms[i]);
957 	}
958 
959 	zassert_true(perms_count == 1, "invalid number of thread permissions");
960 }
961 
962 #define test_oops(provided, expected) do { \
963 	expect_fault = true; \
964 	expected_reason = expected; \
965 	z_except_reason(provided); \
966 } while (false)
967 
ZTEST_USER(userspace,test_oops_panic)968 ZTEST_USER(userspace, test_oops_panic)
969 {
970 	test_oops(K_ERR_KERNEL_PANIC, K_ERR_KERNEL_OOPS);
971 }
972 
ZTEST_USER(userspace,test_oops_oops)973 ZTEST_USER(userspace, test_oops_oops)
974 {
975 	test_oops(K_ERR_KERNEL_OOPS, K_ERR_KERNEL_OOPS);
976 }
977 
ZTEST_USER(userspace,test_oops_exception)978 ZTEST_USER(userspace, test_oops_exception)
979 {
980 	test_oops(K_ERR_CPU_EXCEPTION, K_ERR_KERNEL_OOPS);
981 }
982 
ZTEST_USER(userspace,test_oops_maxint)983 ZTEST_USER(userspace, test_oops_maxint)
984 {
985 	test_oops(INT_MAX, K_ERR_KERNEL_OOPS);
986 }
987 
ZTEST_USER(userspace,test_oops_stackcheck)988 ZTEST_USER(userspace, test_oops_stackcheck)
989 {
990 	test_oops(K_ERR_STACK_CHK_FAIL, K_ERR_STACK_CHK_FAIL);
991 }
992 
z_impl_check_syscall_context(void)993 void z_impl_check_syscall_context(void)
994 {
995 	unsigned int key = irq_lock();
996 
997 	irq_unlock(key);
998 
999 	/* Make sure that interrupts aren't locked when handling system calls;
1000 	 * key has the previous locking state before the above irq_lock() call.
1001 	 */
1002 	zassert_true(arch_irq_unlocked(key), "irqs locked during syscall");
1003 
1004 	/* The kernel should not think we are in ISR context either */
1005 	zassert_false(k_is_in_isr(), "kernel reports irq context");
1006 }
1007 
z_vrfy_check_syscall_context(void)1008 static inline void z_vrfy_check_syscall_context(void)
1009 {
1010 	return z_impl_check_syscall_context();
1011 }
1012 #include <zephyr/syscalls/check_syscall_context_mrsh.c>
1013 
ZTEST_USER(userspace,test_syscall_context)1014 ZTEST_USER(userspace, test_syscall_context)
1015 {
1016 	check_syscall_context();
1017 }
1018 
1019 #ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA
tls_leakage_user_part(void * p1,void * p2,void * p3)1020 static void tls_leakage_user_part(void *p1, void *p2, void *p3)
1021 {
1022 	char *tls_area = p1;
1023 
1024 	for (int i = 0; i < sizeof(struct _thread_userspace_local_data); i++) {
1025 		zassert_false(tls_area[i] == 0xff,
1026 			      "TLS data leakage to user mode");
1027 	}
1028 }
1029 #endif
1030 
ZTEST(userspace,test_tls_leakage)1031 ZTEST(userspace, test_tls_leakage)
1032 {
1033 #ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA
1034 	/* Tests two assertions:
1035 	 *
1036 	 * - That a user thread has full access to its TLS area
1037 	 * - That dropping to user mode doesn't allow any TLS data set in
1038 	 * supervisor mode to be leaked
1039 	 */
1040 
1041 	memset(_current->userspace_local_data, 0xff,
1042 	       sizeof(struct _thread_userspace_local_data));
1043 
1044 	k_thread_user_mode_enter(tls_leakage_user_part,
1045 				 _current->userspace_local_data, NULL, NULL);
1046 #else
1047 	ztest_test_skip();
1048 #endif
1049 }
1050 
1051 #ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA
tls_entry(void * p1,void * p2,void * p3)1052 void tls_entry(void *p1, void *p2, void *p3)
1053 {
1054 	printk("tls_entry\n");
1055 }
1056 #endif
1057 
ZTEST(userspace,test_tls_pointer)1058 ZTEST(userspace, test_tls_pointer)
1059 {
1060 #ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA
1061 	char *stack_obj_ptr;
1062 	size_t stack_obj_sz;
1063 
1064 	k_thread_create(&test_thread, test_stack, STACKSIZE, tls_entry,
1065 			NULL, NULL, NULL, 1, K_USER, K_FOREVER);
1066 
1067 	printk("tls pointer for thread %p: %p\n",
1068 	       &test_thread, (void *)test_thread.userspace_local_data);
1069 
1070 	printk("stack buffer reported bounds: [%p, %p)\n",
1071 	       (void *)test_thread.stack_info.start,
1072 	       (void *)(test_thread.stack_info.start +
1073 			test_thread.stack_info.size));
1074 
1075 #ifdef CONFIG_THREAD_STACK_MEM_MAPPED
1076 	stack_obj_ptr = (char *)test_thread.stack_obj_mapped;
1077 	stack_obj_sz = test_thread.stack_obj_size;
1078 #else
1079 	stack_obj_ptr = (char *)test_stack;
1080 	stack_obj_sz = sizeof(test_stack);
1081 #endif
1082 
1083 	printk("stack object bounds: [%p, %p)\n",
1084 	       stack_obj_ptr, stack_obj_ptr + stack_obj_sz);
1085 
1086 	uintptr_t tls_start = (uintptr_t)test_thread.userspace_local_data;
1087 	uintptr_t tls_end = tls_start +
1088 		sizeof(struct _thread_userspace_local_data);
1089 
1090 	if ((tls_start < (uintptr_t)stack_obj_ptr) ||
1091 	    (tls_end > (uintptr_t)stack_obj_ptr + stack_obj_sz)) {
1092 		printk("tls area out of bounds\n");
1093 		ztest_test_fail();
1094 	}
1095 
1096 	k_thread_abort(&test_thread);
1097 #else
1098 	ztest_test_skip();
1099 #endif
1100 }
1101 
1102 K_APP_BMEM(default_part) volatile bool kernel_only_thread_ran;
1103 K_APP_BMEM(default_part) volatile bool kernel_only_thread_user_ran;
1104 static K_SEM_DEFINE(kernel_only_thread_run_sem, 0, 1);
1105 
kernel_only_thread_user_entry(void * p1,void * p2,void * p3)1106 void kernel_only_thread_user_entry(void *p1, void *p2, void *p3)
1107 {
1108 	printk("kernel only thread in user mode\n");
1109 
1110 	kernel_only_thread_user_ran = true;
1111 }
1112 
kernel_only_thread_entry(void * p1,void * p2,void * p3)1113 void kernel_only_thread_entry(void *p1, void *p2, void *p3)
1114 {
1115 	k_sem_take(&kernel_only_thread_run_sem, K_FOREVER);
1116 
1117 	printk("kernel only thread in kernel mode\n");
1118 
1119 	/* Some architectures emit kernel OOPS instead of panic. */
1120 #if defined(CONFIG_ARM64)
1121 	set_fault(K_ERR_KERNEL_OOPS);
1122 #else
1123 	set_fault(K_ERR_KERNEL_PANIC);
1124 #endif
1125 
1126 	kernel_only_thread_ran = true;
1127 
1128 	k_thread_user_mode_enter(kernel_only_thread_user_entry, NULL, NULL, NULL);
1129 }
1130 
1131 #ifdef CONFIG_MMU
1132 #define KERNEL_ONLY_THREAD_STACK_SIZE (ROUND_UP(1024, CONFIG_MMU_PAGE_SIZE))
1133 #elif CONFIG_64BIT
1134 #define KERNEL_ONLY_THREAD_STACK_SIZE (2048)
1135 #else
1136 #define KERNEL_ONLY_THREAD_STACK_SIZE (1024)
1137 #endif
1138 
1139 static K_KERNEL_THREAD_DEFINE(kernel_only_thread,
1140 			      KERNEL_ONLY_THREAD_STACK_SIZE,
1141 			      kernel_only_thread_entry, NULL, NULL, NULL,
1142 			      0, 0, 0);
1143 
ZTEST(userspace,test_kernel_only_thread)1144 ZTEST(userspace, test_kernel_only_thread)
1145 {
1146 	kernel_only_thread_ran = false;
1147 	kernel_only_thread_user_ran = false;
1148 
1149 	k_sem_give(&kernel_only_thread_run_sem);
1150 
1151 	k_sleep(K_MSEC(500));
1152 
1153 	if (!kernel_only_thread_ran) {
1154 		printk("kernel only thread not running in kernel mode!\n");
1155 		ztest_test_fail();
1156 	}
1157 
1158 	if (kernel_only_thread_user_ran) {
1159 		printk("kernel only thread should not have run in user mode!\n");
1160 		ztest_test_fail();
1161 	}
1162 }
1163 
userspace_setup(void)1164 void *userspace_setup(void)
1165 {
1166 	int ret;
1167 
1168 	/* Most of these scenarios use the default domain */
1169 	ret = k_mem_domain_add_partition(&k_mem_domain_default, &default_part);
1170 	if (ret != 0) {
1171 		printk("Failed to add default memory partition (%d)\n", ret);
1172 		k_oops();
1173 	}
1174 
1175 #if defined(CONFIG_ARM64)
1176 	struct z_arm64_thread_stack_header *hdr;
1177 	void *vhdr = ((struct z_arm64_thread_stack_header *)ztest_thread_stack);
1178 
1179 	hdr = vhdr;
1180 	priv_stack_ptr = (((char *)&hdr->privilege_stack) +
1181 			  (sizeof(hdr->privilege_stack) - 1));
1182 #elif defined(CONFIG_ARM)
1183 	priv_stack_ptr = (char *)z_priv_stack_find(ztest_thread_stack);
1184 #elif defined(CONFIG_X86)
1185 	struct z_x86_thread_stack_header *hdr;
1186 	void *vhdr = ((struct z_x86_thread_stack_header *)ztest_thread_stack);
1187 
1188 	hdr = vhdr;
1189 	priv_stack_ptr = (((char *)&hdr->privilege_stack) +
1190 			  (sizeof(hdr->privilege_stack) - 1));
1191 #elif defined(CONFIG_RISCV)
1192 #if defined(CONFIG_GEN_PRIV_STACKS)
1193 	priv_stack_ptr = (char *)z_priv_stack_find(ztest_thread_stack);
1194 #else
1195 	priv_stack_ptr = (char *)((uintptr_t)ztest_thread_stack +
1196 				  Z_RISCV_STACK_GUARD_SIZE);
1197 #endif
1198 #endif
1199 	k_thread_access_grant(k_current_get(),
1200 			      &test_thread, &test_stack,
1201 			      &kernel_only_thread_run_sem,
1202 			      &test_revoke_sem, &kpipe);
1203 	return NULL;
1204 }
1205 
1206 ZTEST_SUITE(userspace, NULL, userspace_setup, NULL, NULL, NULL);
1207 
1208 ZTEST_SUITE(userspace_domain, NULL, NULL, NULL, NULL, NULL);
1209 
1210 ZTEST_SUITE(userspace_domain_ctx, NULL, NULL, NULL, NULL, NULL);
1211