1 /*
2  * Copyright (c) 2020 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/ztest.h>
8 #include <zephyr/sys/mem_manage.h>
9 #include <zephyr/toolchain.h>
10 #include <mmu.h>
11 #include <zephyr/linker/sections.h>
12 
13 /* 32-bit IA32 page tables have no mechanism to restrict execution */
14 #if defined(CONFIG_X86) && !defined(CONFIG_X86_64) && !defined(CONFIG_X86_PAE)
15 #define SKIP_EXECUTE_TESTS
16 #endif
17 
18 #define BASE_FLAGS	(K_MEM_CACHE_WB)
19 volatile bool expect_fault;
20 
21 /* z_phys_map() doesn't have alignment requirements, any oddly-sized buffer
22  * can get mapped. BUF_SIZE has a odd size to make sure the mapped buffer
23  * spans multiple pages.
24  */
25 #define BUF_SIZE	(CONFIG_MMU_PAGE_SIZE + 907)
26 #define BUF_OFFSET	1238
27 
28 #define TEST_PAGE_SZ	ROUND_UP(BUF_OFFSET + BUF_SIZE, CONFIG_MMU_PAGE_SIZE)
29 
30 __pinned_noinit
__aligned(CONFIG_MMU_PAGE_SIZE)31 static uint8_t __aligned(CONFIG_MMU_PAGE_SIZE) test_page[TEST_PAGE_SZ];
32 
33 void k_sys_fatal_error_handler(unsigned int reason, const z_arch_esf_t *pEsf)
34 {
35 	printk("Caught system error -- reason %d\n", reason);
36 
37 	if (expect_fault && reason == 0) {
38 		expect_fault = false;
39 		ztest_test_pass();
40 	} else {
41 		printk("Unexpected fault during test\n");
42 		printk("PROJECT EXECUTION FAILED\n");
43 		k_fatal_halt(reason);
44 	}
45 }
46 
47 /**
48  * Show that mapping an irregular size buffer works and RW flag is respected
49  *
50  * @ingroup kernel_memprotect_tests
51  */
ZTEST(mem_map,test_z_phys_map_rw)52 ZTEST(mem_map, test_z_phys_map_rw)
53 {
54 	uint8_t *mapped_rw, *mapped_ro;
55 	uint8_t *buf = test_page + BUF_OFFSET;
56 
57 	expect_fault = false;
58 
59 	/* Map in a page that allows writes */
60 	z_phys_map(&mapped_rw, z_mem_phys_addr(buf),
61 		   BUF_SIZE, BASE_FLAGS | K_MEM_PERM_RW);
62 
63 	/* Map again this time only allowing reads */
64 	z_phys_map(&mapped_ro, z_mem_phys_addr(buf),
65 		   BUF_SIZE, BASE_FLAGS);
66 
67 	/* Initialize read-write buf with some bytes */
68 	for (int i = 0; i < BUF_SIZE; i++) {
69 		mapped_rw[i] = (uint8_t)(i % 256);
70 	}
71 
72 	/* Check that the backing buffer contains the expected data. */
73 	for (int i = 0; i < BUF_SIZE; i++) {
74 		uint8_t expected_val = (uint8_t)(i % 256);
75 
76 		zassert_equal(expected_val, buf[i],
77 			      "unexpected byte at buffer index %d (%u != %u)",
78 			      i, expected_val, buf[i]);
79 
80 		zassert_equal(buf[i], mapped_rw[i],
81 			      "unequal byte at RW index %d (%u != %u)",
82 			      i, buf[i], mapped_rw[i]);
83 	}
84 
85 	/* Check that the read-only mapped area contains the expected data. */
86 	for (int i = 0; i < BUF_SIZE; i++) {
87 		uint8_t expected_val = (uint8_t)(i % 256);
88 
89 		zassert_equal(expected_val, mapped_ro[i],
90 			      "unexpected byte at RO index %d (%u != %u)",
91 			      i, expected_val, mapped_ro[i]);
92 
93 		zassert_equal(buf[i], mapped_ro[i],
94 			      "unequal byte at RO index %d (%u != %u)",
95 			      i, buf[i], mapped_ro[i]);
96 	}
97 
98 	/* This should explode since writes are forbidden */
99 	expect_fault = true;
100 	mapped_ro[0] = 42;
101 
102 	printk("shouldn't get here\n");
103 	ztest_test_fail();
104 }
105 
106 #ifndef SKIP_EXECUTE_TESTS
107 extern char __test_mem_map_start[];
108 extern char __test_mem_map_end[];
109 
__in_section_unique(test_mem_map)110 __in_section_unique(test_mem_map) __used
111 static void transplanted_function(bool *executed)
112 {
113 	*executed = true;
114 }
115 #endif
116 
117 /**
118  * Show that mapping with/without K_MEM_PERM_EXEC works as expected
119  *
120  * @ingroup kernel_memprotect_tests
121  */
ZTEST(mem_map,test_z_phys_map_exec)122 ZTEST(mem_map, test_z_phys_map_exec)
123 {
124 #ifndef SKIP_EXECUTE_TESTS
125 	uint8_t *mapped_exec, *mapped_ro;
126 	bool executed = false;
127 	void (*func)(bool *executed);
128 
129 	expect_fault = false;
130 
131 	/*
132 	 * Need to reference the function or else linker would
133 	 * garbage collected it.
134 	 */
135 	func = transplanted_function;
136 
137 	/* Now map with execution enabled and try to run the copied fn */
138 	z_phys_map(&mapped_exec, z_mem_phys_addr(__test_mem_map_start),
139 		   (uintptr_t)(__test_mem_map_end - __test_mem_map_start),
140 		   BASE_FLAGS | K_MEM_PERM_EXEC);
141 
142 	func = (void (*)(bool *executed))mapped_exec;
143 	func(&executed);
144 	zassert_true(executed, "function did not execute");
145 
146 	/* Now map without execution and execution should now fail */
147 	z_phys_map(&mapped_ro, z_mem_phys_addr(__test_mem_map_start),
148 		   (uintptr_t)(__test_mem_map_end - __test_mem_map_start), BASE_FLAGS);
149 
150 	func = (void (*)(bool *executed))mapped_ro;
151 	expect_fault = true;
152 	func(&executed);
153 
154 	printk("shouldn't get here\n");
155 	ztest_test_fail();
156 #else
157 	ztest_test_skip();
158 #endif /* SKIP_EXECUTE_TESTS */
159 }
160 
161 /**
162  * Show that memory mapping doesn't have unintended side effects
163  *
164  * @ingroup kernel_memprotect_tests
165  */
ZTEST(mem_map,test_z_phys_map_side_effect)166 ZTEST(mem_map, test_z_phys_map_side_effect)
167 {
168 	uint8_t *mapped;
169 
170 	expect_fault = false;
171 
172 	/* z_phys_map() is supposed to always create fresh mappings.
173 	 * Show that by mapping test_page to an RO region, we can still
174 	 * modify test_page.
175 	 */
176 	z_phys_map(&mapped, z_mem_phys_addr(test_page),
177 		   sizeof(test_page), BASE_FLAGS);
178 
179 	/* Should NOT fault */
180 	test_page[0] = 42;
181 
182 	/* Should fault */
183 	expect_fault = true;
184 	mapped[0] = 42;
185 	printk("shouldn't get here\n");
186 	ztest_test_fail();
187 }
188 
189 /**
190  * Test that z_phys_unmap() unmaps the memory and it is no longer
191  * accessible afterwards.
192  *
193  * @ingroup kernel_memprotect_tests
194  */
ZTEST(mem_map,test_z_phys_unmap)195 ZTEST(mem_map, test_z_phys_unmap)
196 {
197 	uint8_t *mapped;
198 
199 	expect_fault = false;
200 
201 	/* Map in a page that allows writes */
202 	z_phys_map(&mapped, z_mem_phys_addr(test_page),
203 		   sizeof(test_page), BASE_FLAGS | K_MEM_PERM_RW);
204 
205 	/* Should NOT fault */
206 	mapped[0] = 42;
207 
208 	/* Unmap the memory */
209 	z_phys_unmap(mapped, sizeof(test_page));
210 
211 	/* Should fault since test_page is no longer accessible */
212 	expect_fault = true;
213 	mapped[0] = 42;
214 	printk("shouldn't get here\n");
215 	ztest_test_fail();
216 }
217 
218 /**
219  * Show that z_phys_unmap() can reclaim the virtual region correctly.
220  *
221  * @ingroup kernel_memprotect_tests
222  */
ZTEST(mem_map,test_z_phys_map_unmap_reclaim_addr)223 ZTEST(mem_map, test_z_phys_map_unmap_reclaim_addr)
224 {
225 	uint8_t *mapped, *mapped_old;
226 	uint8_t *buf = test_page + BUF_OFFSET;
227 
228 	/* Map the buffer the first time. */
229 	z_phys_map(&mapped, z_mem_phys_addr(buf),
230 		   BUF_SIZE, BASE_FLAGS);
231 
232 	printk("Mapped (1st time): %p\n", mapped);
233 
234 	/* Store the pointer for later comparison. */
235 	mapped_old = mapped;
236 
237 	/*
238 	 * Unmap the buffer.
239 	 * This should reclaim the bits in virtual region tracking,
240 	 * so that the next time z_phys_map() is called with
241 	 * the same arguments, it will return the same address.
242 	 */
243 	z_phys_unmap(mapped, BUF_SIZE);
244 
245 	/*
246 	 * Map again the same buffer using same parameters.
247 	 * It should give us back the same virtual address
248 	 * as above when it is mapped the first time.
249 	 */
250 	z_phys_map(&mapped, z_mem_phys_addr(buf),
251 		   BUF_SIZE, BASE_FLAGS);
252 
253 	printk("Mapped (2nd time): %p\n", mapped);
254 
255 	zassert_equal(mapped, mapped_old, "Virtual memory region not reclaimed!");
256 }
257 
258 /**
259  * Basic k_mem_map() and k_mem_unmap() functionality
260  *
261  * Does not exercise K_MEM_MAP_* control flags, just default behavior
262  */
ZTEST(mem_map_api,test_k_mem_map_unmap)263 ZTEST(mem_map_api, test_k_mem_map_unmap)
264 {
265 	size_t free_mem, free_mem_after_map, free_mem_after_unmap;
266 	char *mapped, *last_mapped;
267 	int i, repeat;
268 
269 	expect_fault = false;
270 	last_mapped = NULL;
271 
272 	free_mem = k_mem_free_get();
273 	zassert_not_equal(free_mem, 0, "no free memory");
274 	printk("Free memory: %zu\n", free_mem);
275 
276 	/* Repeat a couple times to make sure everything still works */
277 	for (repeat = 1; repeat <= 10; repeat++) {
278 		mapped = k_mem_map(CONFIG_MMU_PAGE_SIZE, K_MEM_PERM_RW);
279 		zassert_not_null(mapped, "failed to map memory");
280 		printk("mapped a page to %p\n", mapped);
281 
282 		if (last_mapped != NULL) {
283 			zassert_equal(mapped, last_mapped,
284 				      "should have mapped at same address");
285 		}
286 		last_mapped = mapped;
287 
288 		/* Page should be zeroed */
289 		for (i = 0; i < CONFIG_MMU_PAGE_SIZE; i++) {
290 			zassert_equal(mapped[i], '\x00', "page not zeroed");
291 		}
292 
293 		free_mem_after_map = k_mem_free_get();
294 		printk("Free memory after mapping: %zu\n", free_mem_after_map);
295 		zassert_equal(free_mem, free_mem_after_map + CONFIG_MMU_PAGE_SIZE,
296 			"incorrect free memory accounting");
297 
298 		/* Show we can write to page without exploding */
299 		(void)memset(mapped, '\xFF', CONFIG_MMU_PAGE_SIZE);
300 		for (i = 0; i < CONFIG_MMU_PAGE_SIZE; i++) {
301 			zassert_true(mapped[i] == '\xFF',
302 				"incorrect value 0x%hhx read at index %d",
303 				mapped[i], i);
304 		}
305 
306 		k_mem_unmap(mapped, CONFIG_MMU_PAGE_SIZE);
307 
308 		free_mem_after_unmap = k_mem_free_get();
309 		printk("Free memory after unmapping: %zu\n", free_mem_after_unmap);
310 		zassert_equal(free_mem, free_mem_after_unmap,
311 			"k_mem_unmap has not freed physical memory");
312 
313 		if (repeat == 10) {
314 			/* Should fault since mapped is no longer accessible */
315 			expect_fault = true;
316 			mapped[0] = 42;
317 			printk("shouldn't get here\n");
318 			ztest_test_fail();
319 		}
320 	}
321 }
322 
323 /**
324  * Test that the "before" guard page is in place for k_mem_map().
325  */
ZTEST(mem_map_api,test_k_mem_map_guard_before)326 ZTEST(mem_map_api, test_k_mem_map_guard_before)
327 {
328 	uint8_t *mapped;
329 
330 	expect_fault = false;
331 
332 	mapped = k_mem_map(CONFIG_MMU_PAGE_SIZE, K_MEM_PERM_RW);
333 	zassert_not_null(mapped, "failed to map memory");
334 	printk("mapped a page: %p - %p\n", mapped,
335 		mapped + CONFIG_MMU_PAGE_SIZE);
336 
337 	/* Should NOT fault */
338 	mapped[0] = 42;
339 
340 	/* Should fault here in the guard page location */
341 	expect_fault = true;
342 	mapped -= sizeof(void *);
343 
344 	printk("trying to access %p\n", mapped);
345 
346 	mapped[0] = 42;
347 	printk("shouldn't get here\n");
348 	ztest_test_fail();
349 }
350 
351 /**
352  * Test that the "after" guard page is in place for k_mem_map().
353  */
ZTEST(mem_map_api,test_k_mem_map_guard_after)354 ZTEST(mem_map_api, test_k_mem_map_guard_after)
355 {
356 	uint8_t *mapped;
357 
358 	expect_fault = false;
359 
360 	mapped = k_mem_map(CONFIG_MMU_PAGE_SIZE, K_MEM_PERM_RW);
361 	zassert_not_null(mapped, "failed to map memory");
362 	printk("mapped a page: %p - %p\n", mapped,
363 		mapped + CONFIG_MMU_PAGE_SIZE);
364 
365 	/* Should NOT fault */
366 	mapped[0] = 42;
367 
368 	/* Should fault here in the guard page location */
369 	expect_fault = true;
370 	mapped += CONFIG_MMU_PAGE_SIZE + sizeof(void *);
371 
372 	printk("trying to access %p\n", mapped);
373 
374 	mapped[0] = 42;
375 	printk("shouldn't get here\n");
376 	ztest_test_fail();
377 }
378 
ZTEST(mem_map_api,test_k_mem_map_exhaustion)379 ZTEST(mem_map_api, test_k_mem_map_exhaustion)
380 {
381 	/* With demand paging enabled, there is backing store
382 	 * which extends available memory. However, we don't
383 	 * have a way to figure out how much extra memory
384 	 * is available. So skip for now.
385 	 */
386 #if !defined(CONFIG_DEMAND_PAGING)
387 	uint8_t *addr;
388 	size_t free_mem, free_mem_now, free_mem_expected;
389 	size_t cnt, expected_cnt;
390 	uint8_t *last_mapped = NULL;
391 
392 	free_mem = k_mem_free_get();
393 	printk("Free memory: %zu\n", free_mem);
394 	zassert_not_equal(free_mem, 0, "no free memory");
395 
396 	/* Determine how many times we can map */
397 	expected_cnt = free_mem / CONFIG_MMU_PAGE_SIZE;
398 
399 	/* Figure out how many pages we can map within
400 	 * the remaining virtual address space by:
401 	 *
402 	 * 1. Find out the top of available space. This can be
403 	 *    done by mapping one page, and use the returned
404 	 *    virtual address (plus itself and guard page)
405 	 *    to obtain the end address.
406 	 * 2. Calculate how big this region is from
407 	 *    Z_FREE_VM_START to end address.
408 	 * 3. Calculate how many times we can call k_mem_map().
409 	 *    Remember there are two guard pages for every
410 	 *    mapping call (hence 1 + 2 == 3).
411 	 */
412 	addr = k_mem_map(CONFIG_MMU_PAGE_SIZE, K_MEM_PERM_RW);
413 	zassert_not_null(addr, "fail to map memory");
414 	k_mem_unmap(addr, CONFIG_MMU_PAGE_SIZE);
415 
416 	cnt = POINTER_TO_UINT(addr) + CONFIG_MMU_PAGE_SIZE * 2;
417 	cnt -= POINTER_TO_UINT(Z_FREE_VM_START);
418 	cnt /= CONFIG_MMU_PAGE_SIZE * 3;
419 
420 	/* If we are limited by virtual address space... */
421 	if (cnt < expected_cnt) {
422 		expected_cnt = cnt;
423 	}
424 
425 	/* Now k_mem_map() until it fails */
426 	free_mem_expected = free_mem - (expected_cnt * CONFIG_MMU_PAGE_SIZE);
427 	cnt = 0;
428 	do {
429 		addr = k_mem_map(CONFIG_MMU_PAGE_SIZE, K_MEM_PERM_RW);
430 
431 		if (addr != NULL) {
432 			*((uintptr_t *)addr) = POINTER_TO_UINT(last_mapped);
433 			last_mapped = addr;
434 			cnt++;
435 		}
436 	} while (addr != NULL);
437 
438 	printk("Mapped %zu pages\n", cnt);
439 	zassert_equal(cnt, expected_cnt,
440 		      "number of pages mapped: expected %u, got %u",
441 		      expected_cnt, cnt);
442 
443 	free_mem_now = k_mem_free_get();
444 	printk("Free memory now: %zu\n", free_mem_now);
445 	zassert_equal(free_mem_now, free_mem_expected,
446 		      "free memory should be %zu", free_mem_expected);
447 
448 	/* Now free all of them */
449 	cnt = 0;
450 	while (last_mapped != NULL) {
451 		addr = last_mapped;
452 		last_mapped = UINT_TO_POINTER(*((uintptr_t *)addr));
453 		k_mem_unmap(addr, CONFIG_MMU_PAGE_SIZE);
454 
455 		cnt++;
456 	}
457 
458 	printk("Unmapped %zu pages\n", cnt);
459 	zassert_equal(cnt, expected_cnt,
460 		      "number of pages unmapped: expected %u, got %u",
461 		      expected_cnt, cnt);
462 
463 	free_mem_now = k_mem_free_get();
464 	printk("Free memory now: %zu\n", free_mem_now);
465 	zassert_equal(free_mem_now, free_mem,
466 		      "free memory should be %zu", free_mem);
467 #else
468 	ztest_test_skip();
469 #endif /* !CONFIG_DEMAND_PAGING */
470 }
471 
472 #ifdef CONFIG_USERSPACE
473 #define USER_STACKSIZE	(128)
474 
475 struct k_thread user_thread;
476 K_THREAD_STACK_DEFINE(user_stack, USER_STACKSIZE);
477 
478 K_APPMEM_PARTITION_DEFINE(default_part);
K_APP_DMEM(default_part)479 K_APP_DMEM(default_part) uint8_t *mapped;
480 
481 static void user_function(void *p1, void *p2, void *p3)
482 {
483 	mapped[0] = 42;
484 }
485 #endif /* CONFIG_USERSPACE */
486 
487 /**
488  * Test that the allocated region will be only accessible to userspace when
489  * K_MEM_PERM_USER is used.
490  */
ZTEST(mem_map_api,test_k_mem_map_user)491 ZTEST(mem_map_api, test_k_mem_map_user)
492 {
493 #ifdef CONFIG_USERSPACE
494 	int ret;
495 
496 	ret = k_mem_domain_add_partition(&k_mem_domain_default, &default_part);
497 	if (ret != 0) {
498 		printk("Failed to add default memory partition (%d)\n", ret);
499 		k_oops();
500 	}
501 
502 	/*
503 	 * Map the region using K_MEM_PERM_USER and try to access it from
504 	 * userspace
505 	 */
506 	expect_fault = false;
507 
508 	z_phys_map(&mapped, z_mem_phys_addr(test_page), sizeof(test_page),
509 		   BASE_FLAGS | K_MEM_PERM_RW | K_MEM_PERM_USER);
510 
511 	printk("mapped a page: %p - %p (with K_MEM_PERM_USER)\n", mapped,
512 		mapped + CONFIG_MMU_PAGE_SIZE);
513 	printk("trying to access %p from userspace\n", mapped);
514 
515 	k_thread_create(&user_thread, user_stack, USER_STACKSIZE,
516 			user_function, NULL, NULL, NULL,
517 			-1, K_USER, K_NO_WAIT);
518 	k_thread_join(&user_thread, K_FOREVER);
519 
520 	/* Unmap the memory */
521 	z_phys_unmap(mapped, sizeof(test_page));
522 
523 	/*
524 	 * Map the region without using K_MEM_PERM_USER and try to access it
525 	 * from userspace. This should fault and fail.
526 	 */
527 	expect_fault = true;
528 
529 	z_phys_map(&mapped, z_mem_phys_addr(test_page), sizeof(test_page),
530 		   BASE_FLAGS | K_MEM_PERM_RW);
531 
532 	printk("mapped a page: %p - %p (without K_MEM_PERM_USER)\n", mapped,
533 		mapped + CONFIG_MMU_PAGE_SIZE);
534 	printk("trying to access %p from userspace\n", mapped);
535 
536 	k_thread_create(&user_thread, user_stack, USER_STACKSIZE,
537 			user_function, NULL, NULL, NULL,
538 			-1, K_USER, K_NO_WAIT);
539 	k_thread_join(&user_thread, K_FOREVER);
540 
541 	printk("shouldn't get here\n");
542 	ztest_test_fail();
543 #else
544 	ztest_test_skip();
545 #endif /* CONFIG_USERSPACE */
546 }
547 
548 /* ztest main entry*/
mem_map_env_setup(void)549 void *mem_map_env_setup(void)
550 {
551 #ifdef CONFIG_DEMAND_PAGING
552 	/* This test sets up multiple mappings of RAM pages, which is only
553 	 * allowed for pinned memory
554 	 */
555 	k_mem_pin(test_page, sizeof(test_page));
556 #endif
557 	return NULL;
558 }
559 
560 ZTEST_SUITE(mem_map, NULL, NULL, NULL, NULL, NULL);
561 ZTEST_SUITE(mem_map_api, NULL, mem_map_env_setup, NULL, NULL, NULL);
562