1 /*
2  * Copyright (c) 2020 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/kernel.h>
8 #include <zephyr/internal/syscall_handler.h>
9 #include <zephyr/ztest.h>
10 #include "test_syscalls.h"
11 
12 #define DB_VAL 0xDEADBEEF
13 
z_impl_test_cpu_write_reg(void)14 void z_impl_test_cpu_write_reg(void)
15 {
16 	/* User thread CPU write registers system call for testing
17 	*
18 	* Verify the following
19 	* - Write 0xDEADBEEF values during system call into registers
20 	* - In main test we will read that registers to verify
21 	* that all of them were scrubbed and do not contain any sensitive data
22 	*/
23 
24 	/* Part below is made to test that kernel scrubs CPU registers
25 	 * after returning from the system call
26 	 */
27 #ifndef CONFIG_X86_64
28 	__asm__ volatile (
29 		"movl $0xDEADBEEF, %%eax;\n\t"
30 		"movl $0xDEADBEEF, %%ebx;\n\t"
31 		"movl $0xDEADBEEF, %%ecx;\n\t"
32 		"movl $0xDEADBEEF, %%edx;\n\t"
33 		"movl $0xDEADBEEF, %%edi;\n\t"
34 		: : : "eax", "ebx", "ecx", "edx", "edi"
35 		);
36 #else
37 	__asm__ volatile (
38 		"movq $0xDEADBEEF, %%rax;\n\t"
39 		"movq $0xDEADBEEF, %%rcx;\n\t"
40 		"movq $0xDEADBEEF, %%rdx;\n\t"
41 		"movq $0xDEADBEEF, %%rsi;\n\t"
42 		"movq $0xDEADBEEF, %%rdi;\n\t"
43 		"movq $0xDEADBEEF, %%r8;\n\t"
44 		"movq $0xDEADBEEF, %%r9;\n\t"
45 		"movq $0xDEADBEEF, %%r10;\n\t"
46 		"movq $0xDEADBEEF, %%r11;\n\t"
47 		: : : "rax", "rcx", "rdx", "rsi", "rdi",
48 		      "r8",  "r9",  "r10", "r11"
49 		);
50 #endif
51 }
52 
z_vrfy_test_cpu_write_reg(void)53 static inline void z_vrfy_test_cpu_write_reg(void)
54 {
55 	z_impl_test_cpu_write_reg();
56 }
57 #include <zephyr/syscalls/test_cpu_write_reg_mrsh.c>
58 
59 /**
60  * @brief Test CPU scrubs registers after system call
61  *
62  * @details - Call from user mode a syscall test_x86_cpu_write_reg(),
63  * the system call function writes into registers 0xDEADBEEF value
64  * - Then in main test function below check registers values,
65  * if no 0xDEADBEEF value detected, that means CPU scrubbed registers
66  * before exit from the system call.
67  *
68  * @ingroup kernel_memprotect_tests
69  */
ZTEST_USER(x86_cpu_scrubs_regs,test_syscall_cpu_scrubs_regs)70 ZTEST_USER(x86_cpu_scrubs_regs, test_syscall_cpu_scrubs_regs)
71 {
72 #ifndef CONFIG_X86_64
73 	int x86_reg_val[5];
74 
75 	test_cpu_write_reg();
76 	__asm__ volatile (
77 		"\t movl %%eax,%0" : "=r"(x86_reg_val[0]));
78 	__asm__ volatile (
79 		"\t movl %%ebx,%0" : "=r"(x86_reg_val[1]));
80 	__asm__ volatile (
81 		"\t movl %%ecx,%0" : "=r"(x86_reg_val[2]));
82 	__asm__ volatile (
83 		"\t movl %%edx,%0" : "=r"(x86_reg_val[3]));
84 	__asm__ volatile (
85 		"\t movl %%edi,%0" : "=r"(x86_reg_val[4]));
86 
87 	for (int i = 0; i < 5; i++) {
88 		zassert_not_equal(x86_reg_val[i], DB_VAL,
89 				"reg val is 0xDEADBEEF, "
90 				"not scrubbed after system call.");
91 	}
92 #else
93 	long x86_64_reg_val[9];
94 
95 	test_cpu_write_reg();
96 
97 	__asm__ volatile(
98 		"\t movq %%rax,%0" : "=r"(x86_64_reg_val[0]));
99 	__asm__ volatile(
100 		"\t movq %%rcx,%0" : "=r"(x86_64_reg_val[1]));
101 	__asm__ volatile(
102 		"\t movq %%rdx,%0" : "=r"(x86_64_reg_val[2]));
103 	__asm__ volatile(
104 		"\t movq %%rsi,%0" : "=r"(x86_64_reg_val[3]));
105 	__asm__ volatile(
106 		"\t movq %%rdi,%0" : "=r"(x86_64_reg_val[4]));
107 	__asm__ volatile(
108 		"\t movq %%r8,%0" : "=r"(x86_64_reg_val[5]));
109 	__asm__ volatile(
110 		"\t movq %%r9,%0" : "=r"(x86_64_reg_val[6]));
111 	__asm__ volatile(
112 		"\t movq %%r10,%0" : "=r"(x86_64_reg_val[7]));
113 	__asm__ volatile(
114 		"\t movq %%r11,%0" : "=r"(x86_64_reg_val[8]));
115 
116 	for (int i = 0; i < 9; i++) {
117 		zassert_not_equal(x86_64_reg_val[i], DB_VAL,
118 				"register value is 0xDEADBEEF, "
119 				"not scrubbed after system call.");
120 	}
121 #endif
122 }
123 
124 ZTEST_SUITE(x86_cpu_scrubs_regs, NULL, NULL, NULL, NULL, NULL);
125