1 /*
2 * Copyright (c) 2020 Intel Corporation.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 /**
8 * @file
9 * x86-specific tests for MMU features and page tables
10 */
11
12 #include <zephyr/kernel.h>
13 #include <zephyr/ztest.h>
14 #include <zephyr/tc_util.h>
15 #include <zephyr/arch/x86/mmustructs.h>
16 #include <x86_mmu.h>
17 #include <zephyr/linker/linker-defs.h>
18 #include <mmu.h>
19 #include "main.h"
20
21 #ifdef CONFIG_X86_64
22 #define PT_LEVEL 3
23 #elif CONFIG_X86_PAE
24 #define PT_LEVEL 2
25 #else
26 #define PT_LEVEL 1
27 #endif
28
29 /* Set of flags whose state we will check. Ignore Accessed/Dirty
30 * At leaf level PS bit indicates PAT, but regardless we don't set it
31 */
32 #define FLAGS_MASK (MMU_P | MMU_RW | MMU_US | MMU_PWT | MMU_PCD | \
33 MMU_G | MMU_PS | MMU_XD)
34
35 #define LPTR(name, suffix) ((uint8_t *)&_CONCAT(name, suffix))
36 #define LSIZE(name, suffix) ((size_t)&_CONCAT(name, suffix))
37 #define IN_REGION(name, virt) \
38 (virt >= LPTR(name, _start) && \
39 virt < (LPTR(name, _start) + LSIZE(name, _size)))
40
41 #ifdef CONFIG_X86_64
42 extern char _locore_start[];
43 extern char _locore_size[];
44 extern char _lorodata_start[];
45 extern char _lorodata_size[];
46 extern char _lodata_end[];
47
48 #define LOCORE_START ((uint8_t *)&_locore_start)
49 #define LOCORE_END ((uint8_t *)&_lodata_end)
50 #endif
51
52 #ifdef CONFIG_COVERAGE_GCOV
53 extern char __gcov_bss_start[];
54 extern char __gcov_bss_size[];
55 #endif
56
57 #include <zephyr/sys/libc-hooks.h>
58 #ifdef Z_LIBC_PARTITION_EXISTS
59 extern char z_data_smem_z_libc_partition_part_start[];
60 extern char z_data_smem_z_libc_partition_part_size[];
61 #endif
62
get_entry(pentry_t * flags,void * addr)63 static pentry_t get_entry(pentry_t *flags, void *addr)
64 {
65 int level;
66 pentry_t entry;
67
68 z_x86_pentry_get(&level, &entry, z_x86_page_tables_get(), addr);
69
70 zassert_true((entry & MMU_P) != 0,
71 "non-present RAM entry");
72 zassert_equal(level, PT_LEVEL, "bigpage found");
73 *flags = entry & FLAGS_MASK;
74
75 return entry;
76 }
77
78 /**
79 * Test that MMU flags on RAM virtual address range are set properly
80 *
81 * @ingroup kernel_memprotect_tests
82 */
ZTEST(x86_pagetables,test_ram_perms)83 ZTEST(x86_pagetables, test_ram_perms)
84 {
85 uint8_t *pos;
86
87 pentry_t entry, flags, expected;
88
89 #ifdef CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT
90 const uint8_t *mem_range_end = K_MEM_KERNEL_VIRT_END;
91 #else
92 const uint8_t *mem_range_end = (uint8_t *)lnkr_pinned_end;
93 #endif /* CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT */
94
95 for (pos = K_MEM_KERNEL_VIRT_START; pos < mem_range_end;
96 pos += CONFIG_MMU_PAGE_SIZE) {
97 if (pos == NULL) {
98 /* We have another test specifically for NULL page */
99 continue;
100 }
101
102 entry = get_entry(&flags, pos);
103
104 if (!IS_ENABLED(CONFIG_SRAM_REGION_PERMISSIONS)) {
105 expected = MMU_P | MMU_RW;
106 } else if (IN_REGION(__text_region, pos)) {
107 expected = MMU_P | MMU_US;
108 } else if (IN_REGION(__rodata_region, pos)) {
109 expected = MMU_P | MMU_US | MMU_XD;
110 #ifdef CONFIG_COVERAGE_GCOV
111 } else if (IN_REGION(__gcov_bss, pos)) {
112 expected = MMU_P | MMU_RW | MMU_US | MMU_XD;
113 #endif
114 #if defined(CONFIG_LINKER_USE_PINNED_SECTION) && \
115 !defined(CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT)
116 } else if (IN_REGION(_app_smem_pinned, pos)) {
117 expected = MMU_P | MMU_RW | MMU_US | MMU_XD;
118 #endif
119 #if !defined(CONFIG_X86_KPTI) && !defined(CONFIG_X86_COMMON_PAGE_TABLE) && \
120 defined(CONFIG_USERSPACE)
121 } else if (IN_REGION(_app_smem, pos)) {
122 /* If KPTI is not enabled, then the default memory
123 * domain affects our page tables even though we are
124 * in supervisor mode. We'd expect everything in
125 * the _app_smem region to have US set since all the
126 * partitions within it would be active in
127 * k_mem_domain_default (ztest_partition and any libc
128 * partitions)
129 *
130 * If we have a common page table, no thread has
131 * entered user mode yet and no domain regions
132 * will be programmed.
133 */
134 expected = MMU_P | MMU_US | MMU_RW | MMU_XD;
135 #endif /* CONFIG_X86_KPTI */
136 #ifdef CONFIG_LINKER_USE_BOOT_SECTION
137 } else if (IN_REGION(lnkr_boot_text, pos)) {
138 expected = MMU_P | MMU_US;
139 } else if (IN_REGION(lnkr_boot_rodata, pos)) {
140 expected = MMU_P | MMU_US | MMU_XD;
141 #endif
142 #ifdef CONFIG_LINKER_USE_PINNED_SECTION
143 } else if (IN_REGION(lnkr_pinned_text, pos)) {
144 expected = MMU_P | MMU_US;
145 } else if (IN_REGION(lnkr_pinned_rodata, pos)) {
146 expected = MMU_P | MMU_US | MMU_XD;
147 #endif
148 #ifdef Z_LIBC_PARTITION_EXISTS
149 } else if (IN_REGION(z_data_smem_z_libc_partition_part, pos)) {
150 expected = MMU_P | MMU_RW | MMU_XD;
151 #endif
152 } else {
153 /* We forced CONFIG_HW_STACK_PROTECTION off otherwise
154 * guard pages will have RW cleared. We can relax this
155 * once we start memory-mapping stacks.
156 */
157 expected = MMU_P | MMU_RW | MMU_XD;
158 }
159 zassert_equal(flags, expected,
160 "bad flags " PRI_ENTRY " at %p, expected "
161 PRI_ENTRY, flags, pos, expected);
162 }
163
164 #ifdef CONFIG_X86_64
165 /* Check the locore too */
166 for (pos = LOCORE_START; pos < LOCORE_END;
167 pos += CONFIG_MMU_PAGE_SIZE) {
168 if (pos == NULL) {
169 /* We have another test specifically for NULL page */
170 continue;
171 }
172
173 entry = get_entry(&flags, pos);
174
175 if (IN_REGION(_locore, pos)) {
176 if (IS_ENABLED(CONFIG_X86_KPTI)) {
177 expected = MMU_P | MMU_US;
178 } else {
179 expected = MMU_P;
180 }
181 } else if (IN_REGION(_lorodata, pos)) {
182 if (IS_ENABLED(CONFIG_X86_KPTI)) {
183 expected = MMU_P | MMU_US | MMU_XD;
184 } else {
185 expected = MMU_P | MMU_XD;
186 }
187 } else {
188 expected = MMU_P | MMU_RW | MMU_XD;
189 }
190 zassert_equal(flags, expected,
191 "bad flags " PRI_ENTRY " at %p, expected "
192 PRI_ENTRY, flags, pos, expected);
193 }
194 #endif /* CONFIG_X86_64 */
195
196 #ifdef CONFIG_ARCH_MAPS_ALL_RAM
197 /* All RAM page frame entries aside from 0x0 must have a mapping.
198 * We currently identity-map on x86, no conversion necessary other than a cast
199 */
200 for (pos = (uint8_t *)K_MEM_PHYS_RAM_START;
201 pos < (uint8_t *)K_MEM_PHYS_RAM_END;
202 pos += CONFIG_MMU_PAGE_SIZE) {
203 if (pos == NULL) {
204 continue;
205 }
206
207 entry = get_entry(&flags, pos);
208 zassert_true((flags & MMU_P) != 0,
209 "address %p isn't mapped", pos);
210 }
211 #endif
212 }
213
214 /**
215 * Test that the NULL virtual page is always non-present
216 *
217 * @ingroup kernel_memprotect_tests
218 */
ZTEST(x86_pagetables,test_null_map)219 ZTEST(x86_pagetables, test_null_map)
220 {
221 int level;
222 pentry_t entry;
223
224 /* The NULL page must always be non-present */
225 z_x86_pentry_get(&level, &entry, z_x86_page_tables_get(), NULL);
226 zassert_true((entry & MMU_P) == 0, "present NULL entry");
227 }
228
z_impl_dump_my_ptables(void)229 void z_impl_dump_my_ptables(void)
230 {
231 struct k_thread *cur = k_current_get();
232
233 printk("Page tables for thread %p\n", cur);
234 z_x86_dump_page_tables(z_x86_thread_page_tables_get(cur));
235 }
236
237 #ifdef CONFIG_USERSPACE
z_vrfy_dump_my_ptables(void)238 void z_vrfy_dump_my_ptables(void)
239 {
240 z_impl_dump_my_ptables();
241 }
242 #include <zephyr/syscalls/dump_my_ptables_mrsh.c>
243 #endif /* CONFIG_USERSPACE */
244
dump_pagetables(void)245 void dump_pagetables(void)
246 {
247 #if CONFIG_SRAM_SIZE > (32 << 10)
248 /*
249 * Takes too long to dump page table, so skip dumping
250 * if memory size is larger than 32MB.
251 */
252 ztest_test_skip();
253 #else
254 dump_my_ptables();
255 #endif
256 }
257
258 /**
259 * Dump kernel's page tables to console
260 *
261 * We don't verify any specific output, but this shouldn't crash
262 *
263 * @ingroup kernel_memprotect_tests
264 */
ZTEST_USER(x86_pagetables,test_dump_ptables_user)265 ZTEST_USER(x86_pagetables, test_dump_ptables_user)
266 {
267 dump_pagetables();
268 }
269
ZTEST(x86_pagetables,test_dump_ptables)270 ZTEST(x86_pagetables, test_dump_ptables)
271 {
272 dump_pagetables();
273 }
274
275 ZTEST_SUITE(x86_pagetables, NULL, NULL, NULL, NULL, NULL);
276