1 /*
2  * Copyright (c) 2020 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/ztest.h>
8 #include <zephyr/toolchain.h>
9 #include <mmu.h>
10 #include <zephyr/linker/sections.h>
11 #include <zephyr/cache.h>
12 
13 #ifdef CONFIG_DEMAND_PAGING
14 #include <zephyr/kernel/mm/demand_paging.h>
15 #endif /* CONFIG_DEMAND_PAGING */
16 
17 /* 32-bit IA32 page tables have no mechanism to restrict execution */
18 #if defined(CONFIG_X86) && !defined(CONFIG_X86_64) && !defined(CONFIG_X86_PAE)
19 #define SKIP_EXECUTE_TESTS
20 #endif
21 
22 #define BASE_FLAGS	(K_MEM_CACHE_WB)
23 volatile bool expect_fault;
24 
25 /* k_mem_map_phys_bare() doesn't have alignment requirements, any oddly-sized buffer
26  * can get mapped. BUF_SIZE has a odd size to make sure the mapped buffer
27  * spans multiple pages.
28  */
29 #define BUF_SIZE	(CONFIG_MMU_PAGE_SIZE + 907)
30 #define BUF_OFFSET	1238
31 
32 #define TEST_PAGE_SZ	ROUND_UP(BUF_OFFSET + BUF_SIZE, CONFIG_MMU_PAGE_SIZE)
33 
34 __pinned_noinit
__aligned(CONFIG_MMU_PAGE_SIZE)35 static uint8_t __aligned(CONFIG_MMU_PAGE_SIZE) test_page[TEST_PAGE_SZ];
36 
37 void k_sys_fatal_error_handler(unsigned int reason, const struct arch_esf *pEsf)
38 {
39 	printk("Caught system error -- reason %d\n", reason);
40 
41 	if (expect_fault && reason == 0) {
42 		expect_fault = false;
43 		ztest_test_pass();
44 	} else {
45 		printk("Unexpected fault during test\n");
46 		TC_END_REPORT(TC_FAIL);
47 		k_fatal_halt(reason);
48 	}
49 }
50 
51 /**
52  * Show that mapping an irregular size buffer works and RW flag is respected
53  *
54  * @ingroup kernel_memprotect_tests
55  */
ZTEST(mem_map,test_k_mem_map_phys_bare_rw)56 ZTEST(mem_map, test_k_mem_map_phys_bare_rw)
57 {
58 	uint8_t *mapped_rw, *mapped_ro;
59 	uint8_t *buf = test_page + BUF_OFFSET;
60 	uintptr_t aligned_addr;
61 	size_t aligned_size;
62 	size_t aligned_offset;
63 
64 	expect_fault = false;
65 
66 	if (IS_ENABLED(CONFIG_DCACHE)) {
67 		/* Flush everything and invalidating all addresses to
68 		 * prepare fot comparison test below.
69 		 */
70 		sys_cache_data_flush_and_invd_all();
71 	}
72 
73 	/* Map in a page that allows writes */
74 	k_mem_map_phys_bare(&mapped_rw, k_mem_phys_addr(buf),
75 			    BUF_SIZE, BASE_FLAGS | K_MEM_PERM_RW);
76 
77 	/* Map again this time only allowing reads */
78 	k_mem_map_phys_bare(&mapped_ro, k_mem_phys_addr(buf),
79 			    BUF_SIZE, BASE_FLAGS);
80 
81 	/* Initialize read-write buf with some bytes */
82 	for (int i = 0; i < BUF_SIZE; i++) {
83 		mapped_rw[i] = (uint8_t)(i % 256);
84 	}
85 
86 	if (IS_ENABLED(CONFIG_DCACHE)) {
87 		/* Flush the data to memory after write. */
88 		aligned_offset =
89 			k_mem_region_align(&aligned_addr, &aligned_size, (uintptr_t)mapped_rw,
90 					   BUF_SIZE, CONFIG_MMU_PAGE_SIZE);
91 		zassert_equal(aligned_offset, BUF_OFFSET,
92 			      "unexpected mapped_rw aligned offset: %u != %u", aligned_offset,
93 			      BUF_OFFSET);
94 		sys_cache_data_flush_and_invd_range((void *)aligned_addr, aligned_size);
95 	}
96 
97 	/* Check that the backing buffer contains the expected data. */
98 	for (int i = 0; i < BUF_SIZE; i++) {
99 		uint8_t expected_val = (uint8_t)(i % 256);
100 
101 		zassert_equal(expected_val, buf[i],
102 			      "unexpected byte at buffer index %d (%u != %u)",
103 			      i, expected_val, buf[i]);
104 
105 		zassert_equal(buf[i], mapped_rw[i],
106 			      "unequal byte at RW index %d (%u != %u)",
107 			      i, buf[i], mapped_rw[i]);
108 	}
109 
110 	/* Check that the read-only mapped area contains the expected data. */
111 	for (int i = 0; i < BUF_SIZE; i++) {
112 		uint8_t expected_val = (uint8_t)(i % 256);
113 
114 		zassert_equal(expected_val, mapped_ro[i],
115 			      "unexpected byte at RO index %d (%u != %u)",
116 			      i, expected_val, mapped_ro[i]);
117 
118 		zassert_equal(buf[i], mapped_ro[i],
119 			      "unequal byte at RO index %d (%u != %u)",
120 			      i, buf[i], mapped_ro[i]);
121 	}
122 
123 	/* This should explode since writes are forbidden */
124 	expect_fault = true;
125 	mapped_ro[0] = 42;
126 
127 	printk("shouldn't get here\n");
128 	ztest_test_fail();
129 }
130 
131 #ifndef SKIP_EXECUTE_TESTS
132 extern char __test_mem_map_start[];
133 extern char __test_mem_map_end[];
134 
__in_section_unique(test_mem_map)135 __in_section_unique(test_mem_map) __used
136 static void transplanted_function(bool *executed)
137 {
138 	*executed = true;
139 }
140 #endif
141 
142 /**
143  * Show that mapping with/without K_MEM_PERM_EXEC works as expected
144  *
145  * @ingroup kernel_memprotect_tests
146  */
ZTEST(mem_map,test_k_mem_map_phys_bare_exec)147 ZTEST(mem_map, test_k_mem_map_phys_bare_exec)
148 {
149 #ifndef SKIP_EXECUTE_TESTS
150 	uint8_t *mapped_exec, *mapped_ro;
151 	bool executed = false;
152 	void (*func)(bool *executed);
153 
154 	expect_fault = false;
155 
156 	/*
157 	 * Need to reference the function or else linker would
158 	 * garbage collected it.
159 	 */
160 	func = transplanted_function;
161 
162 	/* Now map with execution enabled and try to run the copied fn */
163 	k_mem_map_phys_bare(&mapped_exec, k_mem_phys_addr(__test_mem_map_start),
164 			    (uintptr_t)(__test_mem_map_end - __test_mem_map_start),
165 			    BASE_FLAGS | K_MEM_PERM_EXEC);
166 
167 	func = (void (*)(bool *executed))mapped_exec;
168 	func(&executed);
169 	zassert_true(executed, "function did not execute");
170 
171 	/* Now map without execution and execution should now fail */
172 	k_mem_map_phys_bare(&mapped_ro, k_mem_phys_addr(__test_mem_map_start),
173 			    (uintptr_t)(__test_mem_map_end - __test_mem_map_start),
174 			    BASE_FLAGS);
175 
176 	func = (void (*)(bool *executed))mapped_ro;
177 	expect_fault = true;
178 	func(&executed);
179 
180 	printk("shouldn't get here\n");
181 	ztest_test_fail();
182 #else
183 	ztest_test_skip();
184 #endif /* SKIP_EXECUTE_TESTS */
185 }
186 
187 /**
188  * Show that memory mapping doesn't have unintended side effects
189  *
190  * @ingroup kernel_memprotect_tests
191  */
ZTEST(mem_map,test_k_mem_map_phys_bare_side_effect)192 ZTEST(mem_map, test_k_mem_map_phys_bare_side_effect)
193 {
194 	uint8_t *mapped;
195 
196 	expect_fault = false;
197 
198 	/* k_mem_map_phys_bare() is supposed to always create fresh mappings.
199 	 * Show that by mapping test_page to an RO region, we can still
200 	 * modify test_page.
201 	 */
202 	k_mem_map_phys_bare(&mapped, k_mem_phys_addr(test_page),
203 			    sizeof(test_page), BASE_FLAGS);
204 
205 	/* Should NOT fault */
206 	test_page[0] = 42;
207 
208 	/* Should fault */
209 	expect_fault = true;
210 	mapped[0] = 42;
211 	printk("shouldn't get here\n");
212 	ztest_test_fail();
213 }
214 
215 /**
216  * Test that k_mem_unmap_phys_bare() unmaps the memory and it is no longer
217  * accessible afterwards.
218  *
219  * @ingroup kernel_memprotect_tests
220  */
ZTEST(mem_map,test_k_mem_unmap_phys_bare)221 ZTEST(mem_map, test_k_mem_unmap_phys_bare)
222 {
223 	uint8_t *mapped;
224 
225 	expect_fault = false;
226 
227 	/* Map in a page that allows writes */
228 	k_mem_map_phys_bare(&mapped, k_mem_phys_addr(test_page),
229 			    sizeof(test_page), BASE_FLAGS | K_MEM_PERM_RW);
230 
231 	/* Should NOT fault */
232 	mapped[0] = 42;
233 
234 	/* Unmap the memory */
235 	k_mem_unmap_phys_bare(mapped, sizeof(test_page));
236 
237 	/* Should fault since test_page is no longer accessible */
238 	expect_fault = true;
239 	mapped[0] = 42;
240 	printk("shouldn't get here\n");
241 	ztest_test_fail();
242 }
243 
244 /**
245  * Show that k_mem_unmap_phys_bare() can reclaim the virtual region correctly.
246  *
247  * @ingroup kernel_memprotect_tests
248  */
ZTEST(mem_map,test_k_mem_map_phys_bare_unmap_reclaim_addr)249 ZTEST(mem_map, test_k_mem_map_phys_bare_unmap_reclaim_addr)
250 {
251 	uint8_t *mapped, *mapped_old;
252 	uint8_t *buf = test_page + BUF_OFFSET;
253 
254 	/* Map the buffer the first time. */
255 	k_mem_map_phys_bare(&mapped, k_mem_phys_addr(buf),
256 			    BUF_SIZE, BASE_FLAGS);
257 
258 	printk("Mapped (1st time): %p\n", mapped);
259 
260 	/* Store the pointer for later comparison. */
261 	mapped_old = mapped;
262 
263 	/*
264 	 * Unmap the buffer.
265 	 * This should reclaim the bits in virtual region tracking,
266 	 * so that the next time k_mem_map_phys_bare() is called with
267 	 * the same arguments, it will return the same address.
268 	 */
269 	k_mem_unmap_phys_bare(mapped, BUF_SIZE);
270 
271 	/*
272 	 * Map again the same buffer using same parameters.
273 	 * It should give us back the same virtual address
274 	 * as above when it is mapped the first time.
275 	 */
276 	k_mem_map_phys_bare(&mapped, k_mem_phys_addr(buf), BUF_SIZE, BASE_FLAGS);
277 
278 	printk("Mapped (2nd time): %p\n", mapped);
279 
280 	zassert_equal(mapped, mapped_old, "Virtual memory region not reclaimed!");
281 }
282 
283 /**
284  * Basic k_mem_map() and k_mem_unmap() functionality
285  *
286  * Does not exercise K_MEM_MAP_* control flags, just default behavior
287  */
ZTEST(mem_map_api,test_k_mem_map_unmap)288 ZTEST(mem_map_api, test_k_mem_map_unmap)
289 {
290 	size_t free_mem, free_mem_after_map, free_mem_after_unmap;
291 	char *mapped, *last_mapped;
292 	int i, repeat;
293 
294 	expect_fault = false;
295 	last_mapped = NULL;
296 
297 	free_mem = k_mem_free_get();
298 	zassert_not_equal(free_mem, 0, "no free memory");
299 	printk("Free memory: %zu\n", free_mem);
300 
301 	/* Repeat a couple times to make sure everything still works */
302 	for (repeat = 1; repeat <= 10; repeat++) {
303 		mapped = k_mem_map(CONFIG_MMU_PAGE_SIZE, K_MEM_PERM_RW);
304 		zassert_not_null(mapped, "failed to map memory");
305 		printk("mapped a page to %p\n", mapped);
306 
307 		if (last_mapped != NULL) {
308 			zassert_equal(mapped, last_mapped,
309 				      "should have mapped at same address");
310 		}
311 		last_mapped = mapped;
312 
313 		if (IS_ENABLED(CONFIG_DCACHE)) {
314 			sys_cache_data_flush_and_invd_range((void *)mapped, CONFIG_MMU_PAGE_SIZE);
315 		}
316 
317 		/* Page should be zeroed */
318 		for (i = 0; i < CONFIG_MMU_PAGE_SIZE; i++) {
319 			zassert_equal(mapped[i], '\x00', "page not zeroed");
320 		}
321 
322 		free_mem_after_map = k_mem_free_get();
323 		printk("Free memory after mapping: %zu\n", free_mem_after_map);
324 		zassert_equal(free_mem, free_mem_after_map + CONFIG_MMU_PAGE_SIZE,
325 			"incorrect free memory accounting");
326 
327 		/* Show we can write to page without exploding */
328 		(void)memset(mapped, '\xFF', CONFIG_MMU_PAGE_SIZE);
329 
330 		if (IS_ENABLED(CONFIG_DCACHE)) {
331 			sys_cache_data_flush_and_invd_range((void *)mapped, CONFIG_MMU_PAGE_SIZE);
332 		}
333 
334 		for (i = 0; i < CONFIG_MMU_PAGE_SIZE; i++) {
335 			zassert_true(mapped[i] == '\xFF',
336 				"incorrect value 0x%hhx read at index %d",
337 				mapped[i], i);
338 		}
339 
340 		k_mem_unmap(mapped, CONFIG_MMU_PAGE_SIZE);
341 
342 		free_mem_after_unmap = k_mem_free_get();
343 		printk("Free memory after unmapping: %zu\n", free_mem_after_unmap);
344 		zassert_equal(free_mem, free_mem_after_unmap,
345 			"k_mem_unmap has not freed physical memory");
346 
347 		if (repeat == 10) {
348 			/* Should fault since mapped is no longer accessible */
349 			expect_fault = true;
350 			mapped[0] = 42;
351 			printk("shouldn't get here\n");
352 			ztest_test_fail();
353 		}
354 	}
355 }
356 
357 /**
358  * Test that the "before" guard page is in place for k_mem_map().
359  */
ZTEST(mem_map_api,test_k_mem_map_guard_before)360 ZTEST(mem_map_api, test_k_mem_map_guard_before)
361 {
362 	uint8_t *mapped;
363 
364 	expect_fault = false;
365 
366 	mapped = k_mem_map(CONFIG_MMU_PAGE_SIZE, K_MEM_PERM_RW);
367 	zassert_not_null(mapped, "failed to map memory");
368 	printk("mapped a page: %p - %p\n", mapped,
369 		mapped + CONFIG_MMU_PAGE_SIZE);
370 
371 	/* Should NOT fault */
372 	mapped[0] = 42;
373 
374 	/* Should fault here in the guard page location */
375 	expect_fault = true;
376 	mapped -= sizeof(void *);
377 
378 	printk("trying to access %p\n", mapped);
379 
380 	mapped[0] = 42;
381 	printk("shouldn't get here\n");
382 	ztest_test_fail();
383 }
384 
385 /**
386  * Test that the "after" guard page is in place for k_mem_map().
387  */
ZTEST(mem_map_api,test_k_mem_map_guard_after)388 ZTEST(mem_map_api, test_k_mem_map_guard_after)
389 {
390 	uint8_t *mapped;
391 
392 	expect_fault = false;
393 
394 	mapped = k_mem_map(CONFIG_MMU_PAGE_SIZE, K_MEM_PERM_RW);
395 	zassert_not_null(mapped, "failed to map memory");
396 	printk("mapped a page: %p - %p\n", mapped,
397 		mapped + CONFIG_MMU_PAGE_SIZE);
398 
399 	/* Should NOT fault */
400 	mapped[0] = 42;
401 
402 	/* Should fault here in the guard page location */
403 	expect_fault = true;
404 	mapped += CONFIG_MMU_PAGE_SIZE + sizeof(void *);
405 
406 	printk("trying to access %p\n", mapped);
407 
408 	mapped[0] = 42;
409 	printk("shouldn't get here\n");
410 	ztest_test_fail();
411 }
412 
ZTEST(mem_map_api,test_k_mem_map_exhaustion)413 ZTEST(mem_map_api, test_k_mem_map_exhaustion)
414 {
415 	/* With demand paging enabled, there is backing store
416 	 * which extends available memory. However, we don't
417 	 * have a way to figure out how much extra memory
418 	 * is available. So skip for now.
419 	 */
420 #if !defined(CONFIG_DEMAND_PAGING)
421 	uint8_t *addr;
422 	size_t free_mem, free_mem_now, free_mem_expected;
423 	size_t cnt, expected_cnt;
424 	uint8_t *last_mapped = NULL;
425 
426 	free_mem = k_mem_free_get();
427 	printk("Free memory: %zu\n", free_mem);
428 	zassert_not_equal(free_mem, 0, "no free memory");
429 
430 	/* Determine how many times we can map */
431 	expected_cnt = free_mem / CONFIG_MMU_PAGE_SIZE;
432 
433 	/* Figure out how many pages we can map within
434 	 * the remaining virtual address space by:
435 	 *
436 	 * 1. Find out the top of available space. This can be
437 	 *    done by mapping one page, and use the returned
438 	 *    virtual address (plus itself and guard page)
439 	 *    to obtain the end address.
440 	 * 2. Calculate how big this region is from
441 	 *    K_MEM_VM_FREE_START to end address.
442 	 * 3. Calculate how many times we can call k_mem_map().
443 	 *    Remember there are two guard pages for every
444 	 *    mapping call (hence 1 + 2 == 3).
445 	 */
446 	addr = k_mem_map(CONFIG_MMU_PAGE_SIZE, K_MEM_PERM_RW);
447 	zassert_not_null(addr, "fail to map memory");
448 	k_mem_unmap(addr, CONFIG_MMU_PAGE_SIZE);
449 
450 	cnt = POINTER_TO_UINT(addr) + CONFIG_MMU_PAGE_SIZE * 2;
451 	cnt -= POINTER_TO_UINT(K_MEM_VM_FREE_START);
452 	cnt /= CONFIG_MMU_PAGE_SIZE * 3;
453 
454 	/* If we are limited by virtual address space... */
455 	if (cnt < expected_cnt) {
456 		expected_cnt = cnt;
457 	}
458 
459 	/* Now k_mem_map() until it fails */
460 	free_mem_expected = free_mem - (expected_cnt * CONFIG_MMU_PAGE_SIZE);
461 	cnt = 0;
462 	do {
463 		addr = k_mem_map(CONFIG_MMU_PAGE_SIZE, K_MEM_PERM_RW);
464 
465 		if (addr != NULL) {
466 			*((uintptr_t *)addr) = POINTER_TO_UINT(last_mapped);
467 			last_mapped = addr;
468 			cnt++;
469 		}
470 	} while (addr != NULL);
471 
472 	printk("Mapped %zu pages\n", cnt);
473 	zassert_equal(cnt, expected_cnt,
474 		      "number of pages mapped: expected %u, got %u",
475 		      expected_cnt, cnt);
476 
477 	free_mem_now = k_mem_free_get();
478 	printk("Free memory now: %zu\n", free_mem_now);
479 	zassert_equal(free_mem_now, free_mem_expected,
480 		      "free memory should be %zu", free_mem_expected);
481 
482 	/* Now free all of them */
483 	cnt = 0;
484 	while (last_mapped != NULL) {
485 		addr = last_mapped;
486 		last_mapped = UINT_TO_POINTER(*((uintptr_t *)addr));
487 		k_mem_unmap(addr, CONFIG_MMU_PAGE_SIZE);
488 
489 		cnt++;
490 	}
491 
492 	printk("Unmapped %zu pages\n", cnt);
493 	zassert_equal(cnt, expected_cnt,
494 		      "number of pages unmapped: expected %u, got %u",
495 		      expected_cnt, cnt);
496 
497 	free_mem_now = k_mem_free_get();
498 	printk("Free memory now: %zu\n", free_mem_now);
499 	zassert_equal(free_mem_now, free_mem,
500 		      "free memory should be %zu", free_mem);
501 #else
502 	ztest_test_skip();
503 #endif /* !CONFIG_DEMAND_PAGING */
504 }
505 
506 #ifdef CONFIG_USERSPACE
507 #define USER_STACKSIZE	(128)
508 
509 struct k_thread user_thread;
510 K_THREAD_STACK_DEFINE(user_stack, USER_STACKSIZE);
511 
512 K_APPMEM_PARTITION_DEFINE(default_part);
K_APP_DMEM(default_part)513 K_APP_DMEM(default_part) uint8_t *mapped;
514 
515 static void user_function(void *p1, void *p2, void *p3)
516 {
517 	mapped[0] = 42;
518 }
519 #endif /* CONFIG_USERSPACE */
520 
521 /**
522  * Test that the allocated region will be only accessible to userspace when
523  * K_MEM_PERM_USER is used.
524  */
ZTEST(mem_map_api,test_k_mem_map_user)525 ZTEST(mem_map_api, test_k_mem_map_user)
526 {
527 #ifdef CONFIG_USERSPACE
528 	int ret;
529 
530 	ret = k_mem_domain_add_partition(&k_mem_domain_default, &default_part);
531 	if (ret != 0) {
532 		printk("Failed to add default memory partition (%d)\n", ret);
533 		k_oops();
534 	}
535 
536 	/*
537 	 * Map the region using K_MEM_PERM_USER and try to access it from
538 	 * userspace
539 	 */
540 	expect_fault = false;
541 
542 	k_mem_map_phys_bare(&mapped, k_mem_phys_addr(test_page), sizeof(test_page),
543 			    BASE_FLAGS | K_MEM_PERM_RW | K_MEM_PERM_USER);
544 
545 	printk("mapped a page: %p - %p (with K_MEM_PERM_USER)\n", mapped,
546 		mapped + CONFIG_MMU_PAGE_SIZE);
547 	printk("trying to access %p from userspace\n", mapped);
548 
549 	k_thread_create(&user_thread, user_stack, USER_STACKSIZE,
550 			user_function, NULL, NULL, NULL,
551 			-1, K_USER, K_NO_WAIT);
552 	k_thread_join(&user_thread, K_FOREVER);
553 
554 	/* Unmap the memory */
555 	k_mem_unmap_phys_bare(mapped, sizeof(test_page));
556 
557 	/*
558 	 * Map the region without using K_MEM_PERM_USER and try to access it
559 	 * from userspace. This should fault and fail.
560 	 */
561 	expect_fault = true;
562 
563 	k_mem_map_phys_bare(&mapped, k_mem_phys_addr(test_page), sizeof(test_page),
564 			    BASE_FLAGS | K_MEM_PERM_RW);
565 
566 	printk("mapped a page: %p - %p (without K_MEM_PERM_USER)\n", mapped,
567 		mapped + CONFIG_MMU_PAGE_SIZE);
568 	printk("trying to access %p from userspace\n", mapped);
569 
570 	k_thread_create(&user_thread, user_stack, USER_STACKSIZE,
571 			user_function, NULL, NULL, NULL,
572 			-1, K_USER, K_NO_WAIT);
573 	k_thread_join(&user_thread, K_FOREVER);
574 
575 	printk("shouldn't get here\n");
576 	ztest_test_fail();
577 #else
578 	ztest_test_skip();
579 #endif /* CONFIG_USERSPACE */
580 }
581 
582 /* ztest main entry*/
mem_map_env_setup(void)583 void *mem_map_env_setup(void)
584 {
585 #ifdef CONFIG_DEMAND_PAGING
586 	/* This test sets up multiple mappings of RAM pages, which is only
587 	 * allowed for pinned memory
588 	 */
589 	k_mem_pin(test_page, sizeof(test_page));
590 #endif
591 	return NULL;
592 }
593 
594 /* For CPUs with incoherent cache under SMP, the tests to read/write
595  * buffer (... majority of tests here) may not work correctly if
596  * the test thread jumps between CPUs. So use the test infrastructure
597  * to limit the test to 1 CPU.
598  */
599 #ifdef CONFIG_CPU_CACHE_INCOHERENT
600 #define FUNC_BEFORE ztest_simple_1cpu_before
601 #define FUNC_AFTER  ztest_simple_1cpu_after
602 #else
603 #define FUNC_BEFORE NULL
604 #define FUNC_AFTER  NULL
605 #endif
606 
607 ZTEST_SUITE(mem_map, NULL, NULL, FUNC_BEFORE, FUNC_AFTER, NULL);
608 ZTEST_SUITE(mem_map_api, NULL, mem_map_env_setup, FUNC_BEFORE, FUNC_AFTER, NULL);
609