1 /*
2 * Copyright (c) 2024 BayLibre, SAS
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/kernel.h>
8 #include <zephyr/ztest.h>
9 #include <kernel_arch_interface.h>
10
11 /*
12 * Virtual and physical addresses used to exercize MMU page table recycling.
13 * Those are completely arbitrary addresses away from any existing addresses
14 * (no worry, the test will fail otherwise). Those addresses don't have
15 * to be valid as we won't attempt any access to the mapped memory.
16 */
17 #define TEST_VIRT_ADDR 0x456560000
18 #define TEST_PHYS_ADDR 0x123230000
19
20 /* special test hooks in arch/arm64/core/mmu.c for test purpose */
21 extern int arm64_mmu_nb_free_tables(void);
22 extern int arm64_mmu_tables_total_usage(void);
23
24 /* initial states to compare against */
25 static int initial_nb_free_tables;
26 static int initial_tables_usage;
27
arm64_mmu_test_init(void)28 static void *arm64_mmu_test_init(void)
29 {
30 /* get initial states */
31 initial_nb_free_tables = arm64_mmu_nb_free_tables();
32 initial_tables_usage = arm64_mmu_tables_total_usage();
33
34 TC_PRINT(" Total page tables: %d\n", CONFIG_MAX_XLAT_TABLES);
35 TC_PRINT(" Initial free tables: %d\n", initial_nb_free_tables);
36 TC_PRINT(" Initial total table usage: %#x\n", initial_tables_usage);
37
38 zassert_true(initial_nb_free_tables > 1,
39 "initial_nb_free_tables = %d", initial_nb_free_tables);
40 zassert_true(initial_tables_usage > 1,
41 "initial_tables_usage = %d", initial_tables_usage);
42
43 return NULL;
44 }
45
mem_map_test(uintptr_t virt_addr,uintptr_t phys_addr,size_t size)46 static int mem_map_test(uintptr_t virt_addr, uintptr_t phys_addr, size_t size)
47 {
48 /*
49 * This is not defined to return any error but the implementation
50 * will call k_panic() if an error occurs.
51 */
52 arch_mem_map((void *)virt_addr, phys_addr, size, K_MEM_ARM_NORMAL_NC);
53
54 int mapped_nb_free_tables = arm64_mmu_nb_free_tables();
55 int mapped_tables_usage = arm64_mmu_tables_total_usage();
56
57 TC_PRINT(" After arch_mem_map:\n");
58 TC_PRINT(" current free tables: %d\n", mapped_nb_free_tables);
59 TC_PRINT(" current total table usage: %#x\n", mapped_tables_usage);
60
61 zassert_true(mapped_nb_free_tables < initial_nb_free_tables,
62 "%d vs %d", mapped_nb_free_tables, initial_nb_free_tables);
63 zassert_true(mapped_tables_usage > initial_tables_usage,
64 "%#x vs %#x", mapped_tables_usage > initial_tables_usage);
65
66 arch_mem_unmap((void *)virt_addr, size);
67
68 int unmapped_nb_free_tables = arm64_mmu_nb_free_tables();
69 int unmapped_tables_usage = arm64_mmu_tables_total_usage();
70
71 TC_PRINT(" After arch_mem_unmap:\n");
72 TC_PRINT(" current free tables: %d\n", unmapped_nb_free_tables);
73 TC_PRINT(" current total table usage: %#x\n", unmapped_tables_usage);
74
75 zassert_true(unmapped_nb_free_tables == initial_nb_free_tables,
76 "%d vs %d", unmapped_nb_free_tables, initial_nb_free_tables);
77 zassert_true(unmapped_tables_usage == initial_tables_usage,
78 "%#x vs %#x", unmapped_tables_usage > initial_tables_usage);
79
80 int tables_used = unmapped_nb_free_tables - mapped_nb_free_tables;
81 return tables_used;
82 }
83
ZTEST(arm64_mmu,test_arm64_mmu_01_single_page)84 ZTEST(arm64_mmu, test_arm64_mmu_01_single_page)
85 {
86 /*
87 * Let's map a single page to start with. This will allocate
88 * multiple tables to reach the deepest level.
89 */
90 uintptr_t virt = TEST_VIRT_ADDR;
91 uintptr_t phys = TEST_PHYS_ADDR;
92 size_t size = CONFIG_MMU_PAGE_SIZE;
93
94 int tables_used = mem_map_test(virt, phys, size);
95
96 zassert_true(tables_used == 2, "used %d tables", tables_used);
97 }
98
ZTEST(arm64_mmu,test_arm64_mmu_02_single_block)99 ZTEST(arm64_mmu, test_arm64_mmu_02_single_block)
100 {
101 /*
102 * Same thing as above, except that we expect a block mapping
103 * this time. Both addresses and the size must be properly aligned.
104 * Table allocation won't go as deep as for a page.
105 */
106 int table_entries = CONFIG_MMU_PAGE_SIZE / sizeof(uint64_t);
107 size_t block_size = table_entries * CONFIG_MMU_PAGE_SIZE;
108 uintptr_t virt = TEST_VIRT_ADDR & ~(block_size - 1);
109 uintptr_t phys = TEST_PHYS_ADDR & ~(block_size - 1);
110
111 int tables_used = mem_map_test(virt, phys, block_size);
112
113 zassert_true(tables_used == 1, "used %d tables", tables_used);
114 }
115
ZTEST(arm64_mmu,test_arm64_mmu_03_block_and_page)116 ZTEST(arm64_mmu, test_arm64_mmu_03_block_and_page)
117 {
118 /*
119 * Same thing as above, except that we expect a block mapping
120 * followed by a page mapping to exercize range splitting.
121 * To achieve that we simply increase the size by one page and keep
122 * starting addresses aligned to a block.
123 */
124 int table_entries = CONFIG_MMU_PAGE_SIZE / sizeof(uint64_t);
125 size_t block_size = table_entries * CONFIG_MMU_PAGE_SIZE;
126 uintptr_t virt = TEST_VIRT_ADDR & ~(block_size - 1);
127 uintptr_t phys = TEST_PHYS_ADDR & ~(block_size - 1);
128 size_t size = block_size + CONFIG_MMU_PAGE_SIZE;
129
130 int tables_used = mem_map_test(virt, phys, size);
131
132 zassert_true(tables_used == 2, "used %d tables", tables_used);
133 }
134
ZTEST(arm64_mmu,test_arm64_mmu_04_page_and_block)135 ZTEST(arm64_mmu, test_arm64_mmu_04_page_and_block)
136 {
137 /*
138 * Same thing as above, except that we expect a page mapping
139 * followed by a block mapping to exercize range splitting.
140 * To achieve that we increase the size by one page and decrease
141 * starting addresses by one page below block alignment.
142 */
143 int table_entries = CONFIG_MMU_PAGE_SIZE / sizeof(uint64_t);
144 size_t block_size = table_entries * CONFIG_MMU_PAGE_SIZE;
145 uintptr_t virt = (TEST_VIRT_ADDR & ~(block_size - 1)) - CONFIG_MMU_PAGE_SIZE;
146 uintptr_t phys = (TEST_PHYS_ADDR & ~(block_size - 1)) - CONFIG_MMU_PAGE_SIZE;
147 size_t size = block_size + CONFIG_MMU_PAGE_SIZE;
148
149 int tables_used = mem_map_test(virt, phys, size);
150
151 zassert_true(tables_used == 2, "used %d tables", tables_used);
152 }
153
154 ZTEST_SUITE(arm64_mmu, NULL, arm64_mmu_test_init, NULL, NULL, NULL);
155