1 /*
2 * ARMv7 MMU support
3 *
4 * This implementation supports the Short-descriptor translation
5 * table format. The standard page size is 4 kB, 1 MB sections
6 * are only used for mapping the code and data of the Zephyr image.
7 * Secure mode and PL1 is always assumed. LPAE and PXN extensions
8 * as well as TEX remapping are not supported. The AP[2:1] plus
9 * Access flag permissions model is used, as the AP[2:0] model is
10 * deprecated. As the AP[2:1] model can only disable write access,
11 * the read permission flag is always implied.
12 *
13 * Reference documentation:
14 * ARM Architecture Reference Manual, ARMv7-A and ARMv7-R edition,
15 * ARM document ID DDI0406C Rev. d, March 2018
16 *
17 * Copyright (c) 2021 Weidmueller Interface GmbH & Co. KG
18 * SPDX-License-Identifier: Apache-2.0
19 */
20
21 #include <zephyr/device.h>
22 #include <zephyr/init.h>
23 #include <zephyr/kernel.h>
24
25 #include <zephyr/linker/linker-defs.h>
26 #include <zephyr/logging/log.h>
27 #include <zephyr/sys/__assert.h>
28 #include <zephyr/sys/util.h>
29 #include <zephyr/kernel/mm.h>
30 #include <zephyr/sys/barrier.h>
31
32 #include <cmsis_core.h>
33
34 #include <zephyr/arch/arm/mmu/arm_mmu.h>
35 #include "arm_mmu_priv.h"
36
37 LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
38
39 /* Level 1 page table: always required, must be 16k-aligned */
40 static struct arm_mmu_l1_page_table
41 l1_page_table __aligned(KB(16)) = {0};
42 /*
43 * Array of level 2 page tables with 4k granularity:
44 * each table covers a range of 1 MB, the number of L2 tables
45 * is configurable.
46 */
47 static struct arm_mmu_l2_page_table
48 l2_page_tables[CONFIG_ARM_MMU_NUM_L2_TABLES] __aligned(KB(1)) = {0};
49 /*
50 * For each level 2 page table, a separate dataset tracks
51 * if the respective table is in use, if so, to which 1 MB
52 * virtual address range it is assigned, and how many entries,
53 * each mapping a 4 kB page, it currently contains.
54 */
55 static struct arm_mmu_l2_page_table_status
56 l2_page_tables_status[CONFIG_ARM_MMU_NUM_L2_TABLES] = {0};
57
58 /* Available L2 tables count & next free index for an L2 table request */
59 static uint32_t arm_mmu_l2_tables_free = CONFIG_ARM_MMU_NUM_L2_TABLES;
60 static uint32_t arm_mmu_l2_next_free_table;
61
62 /*
63 * Static definition of all code & data memory regions of the
64 * current Zephyr image. This information must be available &
65 * processed upon MMU initialization.
66 */
67 static const struct arm_mmu_flat_range mmu_zephyr_ranges[] = {
68 /*
69 * Mark the zephyr execution regions (data, bss, noinit, etc.)
70 * cacheable, read / write and non-executable
71 */
72 { .name = "zephyr_data",
73 .start = (uint32_t)_image_ram_start,
74 .end = (uint32_t)_image_ram_end,
75 .attrs = MT_NORMAL | MATTR_SHARED |
76 MPERM_R | MPERM_W |
77 MATTR_CACHE_OUTER_WB_WA | MATTR_CACHE_INNER_WB_WA},
78
79 /* Mark text segment cacheable, read only and executable */
80 { .name = "zephyr_code",
81 .start = (uint32_t)__text_region_start,
82 .end = (uint32_t)__text_region_end,
83 .attrs = MT_NORMAL | MATTR_SHARED |
84 /* The code needs to have write permission in order for
85 * software breakpoints (which modify instructions) to work
86 */
87 #if defined(CONFIG_GDBSTUB)
88 MPERM_R | MPERM_X | MPERM_W |
89 #else
90 MPERM_R | MPERM_X |
91 #endif
92 MATTR_CACHE_OUTER_WB_nWA | MATTR_CACHE_INNER_WB_nWA |
93 MATTR_MAY_MAP_L1_SECTION},
94
95 /* Mark rodata segment cacheable, read only and non-executable */
96 { .name = "zephyr_rodata",
97 .start = (uint32_t)__rodata_region_start,
98 .end = (uint32_t)__rodata_region_end,
99 .attrs = MT_NORMAL | MATTR_SHARED |
100 MPERM_R |
101 MATTR_CACHE_OUTER_WB_nWA | MATTR_CACHE_INNER_WB_nWA |
102 MATTR_MAY_MAP_L1_SECTION},
103 #ifdef CONFIG_NOCACHE_MEMORY
104 /* Mark nocache segment read / write and non-executable */
105 { .name = "nocache",
106 .start = (uint32_t)_nocache_ram_start,
107 .end = (uint32_t)_nocache_ram_end,
108 .attrs = MT_STRONGLY_ORDERED |
109 MPERM_R | MPERM_W},
110 #endif
111 };
112
113 static void arm_mmu_l2_map_page(uint32_t va, uint32_t pa,
114 struct arm_mmu_perms_attrs perms_attrs);
115
116 /**
117 * @brief Invalidates the TLB
118 * Helper function which invalidates the entire TLB. This action
119 * is performed whenever the MMU is (re-)enabled or changes to the
120 * page tables are made at run-time, as the TLB might contain entries
121 * which are no longer valid once the changes are applied.
122 */
invalidate_tlb_all(void)123 static void invalidate_tlb_all(void)
124 {
125 __set_TLBIALL(0); /* 0 = opc2 = invalidate entire TLB */
126 barrier_dsync_fence_full();
127 barrier_isync_fence_full();
128 }
129
130 /**
131 * @brief Returns a free level 2 page table
132 * Initializes and returns the next free L2 page table whenever
133 * a page is to be mapped in a 1 MB virtual address range that
134 * is not yet covered by a level 2 page table.
135 *
136 * @param va 32-bit virtual address to be mapped.
137 * @retval pointer to the L2 table now assigned to the 1 MB
138 * address range the target virtual address is in.
139 */
arm_mmu_assign_l2_table(uint32_t va)140 static struct arm_mmu_l2_page_table *arm_mmu_assign_l2_table(uint32_t va)
141 {
142 struct arm_mmu_l2_page_table *l2_page_table;
143
144 __ASSERT(arm_mmu_l2_tables_free > 0,
145 "Cannot set up L2 page table for VA 0x%08X: "
146 "no more free L2 page tables available\n",
147 va);
148 __ASSERT(l2_page_tables_status[arm_mmu_l2_next_free_table].entries == 0,
149 "Cannot set up L2 page table for VA 0x%08X: "
150 "expected empty L2 table at index [%u], but the "
151 "entries value is %u\n",
152 va, arm_mmu_l2_next_free_table,
153 l2_page_tables_status[arm_mmu_l2_next_free_table].entries);
154
155 /*
156 * Store in the status dataset of the L2 table to be returned
157 * which 1 MB virtual address range it is being assigned to.
158 * Set the current page table entry count to 0.
159 */
160 l2_page_tables_status[arm_mmu_l2_next_free_table].l1_index =
161 ((va >> ARM_MMU_PTE_L1_INDEX_PA_SHIFT) & ARM_MMU_PTE_L1_INDEX_MASK);
162 l2_page_tables_status[arm_mmu_l2_next_free_table].entries = 0;
163 l2_page_table = &l2_page_tables[arm_mmu_l2_next_free_table];
164
165 /*
166 * Decrement the available L2 page table count. As long as at
167 * least one more L2 table is available afterwards, update the
168 * L2 next free table index. If we're about to return the last
169 * available L2 table, calculating a next free table index is
170 * impossible.
171 */
172 --arm_mmu_l2_tables_free;
173 if (arm_mmu_l2_tables_free > 0) {
174 do {
175 arm_mmu_l2_next_free_table = (arm_mmu_l2_next_free_table + 1) %
176 CONFIG_ARM_MMU_NUM_L2_TABLES;
177 } while (l2_page_tables_status[arm_mmu_l2_next_free_table].entries != 0);
178 }
179
180 return l2_page_table;
181 }
182
183 /**
184 * @brief Releases a level 2 page table
185 * Releases a level 2 page table, marking it as no longer in use.
186 * From that point on, it can be re-used for mappings in another
187 * 1 MB virtual address range. This function is called whenever
188 * it is determined during an unmap call at run-time that the page
189 * table entry count in the respective page table has reached 0.
190 *
191 * @param l2_page_table Pointer to L2 page table to be released.
192 */
arm_mmu_release_l2_table(struct arm_mmu_l2_page_table * l2_page_table)193 static void arm_mmu_release_l2_table(struct arm_mmu_l2_page_table *l2_page_table)
194 {
195 uint32_t l2_page_table_index = ARM_MMU_L2_PT_INDEX(l2_page_table);
196
197 l2_page_tables_status[l2_page_table_index].l1_index = 0;
198 if (arm_mmu_l2_tables_free == 0) {
199 arm_mmu_l2_next_free_table = l2_page_table_index;
200 }
201 ++arm_mmu_l2_tables_free;
202 }
203
204 /**
205 * @brief Increments the page table entry counter of a L2 page table
206 * Increments the page table entry counter of a level 2 page table.
207 * Contains a check to ensure that no attempts are made to set up
208 * more page table entries than the table can hold.
209 *
210 * @param l2_page_table Pointer to the L2 page table whose entry
211 * counter shall be incremented.
212 */
arm_mmu_inc_l2_table_entries(struct arm_mmu_l2_page_table * l2_page_table)213 static void arm_mmu_inc_l2_table_entries(struct arm_mmu_l2_page_table *l2_page_table)
214 {
215 uint32_t l2_page_table_index = ARM_MMU_L2_PT_INDEX(l2_page_table);
216
217 __ASSERT(l2_page_tables_status[l2_page_table_index].entries < ARM_MMU_PT_L2_NUM_ENTRIES,
218 "Cannot increment entry count of the L2 page table at index "
219 "[%u] / addr %p / ref L1[%u]: maximum entry count already reached",
220 l2_page_table_index, l2_page_table,
221 l2_page_tables_status[l2_page_table_index].l1_index);
222
223 ++l2_page_tables_status[l2_page_table_index].entries;
224 }
225
226 /**
227 * @brief Decrements the page table entry counter of a L2 page table
228 * Decrements the page table entry counter of a level 2 page table.
229 * Contains a check to ensure that no attempts are made to remove
230 * entries from the respective table that aren't actually there.
231 *
232 * @param l2_page_table Pointer to the L2 page table whose entry
233 * counter shall be decremented.
234 */
arm_mmu_dec_l2_table_entries(struct arm_mmu_l2_page_table * l2_page_table)235 static void arm_mmu_dec_l2_table_entries(struct arm_mmu_l2_page_table *l2_page_table)
236 {
237 uint32_t l2_page_table_index = ARM_MMU_L2_PT_INDEX(l2_page_table);
238
239 __ASSERT(l2_page_tables_status[l2_page_table_index].entries > 0,
240 "Cannot decrement entry count of the L2 page table at index "
241 "[%u] / addr %p / ref L1[%u]: entry count is already zero",
242 l2_page_table_index, l2_page_table,
243 l2_page_tables_status[l2_page_table_index].l1_index);
244
245 if (--l2_page_tables_status[l2_page_table_index].entries == 0) {
246 arm_mmu_release_l2_table(l2_page_table);
247 }
248 }
249
250 /**
251 * @brief Converts memory attributes and permissions to MMU format
252 * Converts memory attributes and permissions as used in the boot-
253 * time memory mapping configuration data array (MT_..., MATTR_...,
254 * MPERM_...) to the equivalent bit (field) values used in the MMU's
255 * L1 and L2 page table entries. Contains plausibility checks.
256 *
257 * @param attrs type/attribute/permissions flags word obtained from
258 * an entry of the mmu_config mapping data array.
259 * @retval A struct containing the information from the input flags
260 * word converted to the bits / bit fields used in L1 and
261 * L2 page table entries.
262 */
arm_mmu_convert_attr_flags(uint32_t attrs)263 static struct arm_mmu_perms_attrs arm_mmu_convert_attr_flags(uint32_t attrs)
264 {
265 struct arm_mmu_perms_attrs perms_attrs = {0};
266
267 __ASSERT(((attrs & MT_MASK) > 0),
268 "Cannot convert attrs word to PTE control bits: no "
269 "memory type specified");
270 __ASSERT(!((attrs & MPERM_W) && !(attrs & MPERM_R)),
271 "attrs must not define write permission without read "
272 "permission");
273 __ASSERT(!((attrs & MPERM_W) && (attrs & MPERM_X)),
274 "attrs must not define executable memory with write "
275 "permission");
276
277 /*
278 * The translation of the memory type / permissions / attributes
279 * flags in the attrs word to the TEX, C, B, S and AP bits of the
280 * target PTE is based on the reference manual:
281 * TEX, C, B, S: Table B3-10, chap. B3.8.2, p. B3-1363f.
282 * AP : Table B3-6, chap. B3.7.1, p. B3-1353.
283 * Device / strongly ordered memory is always assigned to a domain
284 * other than that used for normal memory. Assuming that userspace
285 * support utilizing the MMU is eventually implemented, a single
286 * modification of the DACR register when entering/leaving unprivi-
287 * leged mode could be used in order to enable/disable all device
288 * memory access without having to modify any PTs/PTEs.
289 */
290
291 if (attrs & MT_STRONGLY_ORDERED) {
292 /* Strongly ordered is always shareable, S bit is ignored */
293 perms_attrs.tex = 0;
294 perms_attrs.cacheable = 0;
295 perms_attrs.bufferable = 0;
296 perms_attrs.shared = 0;
297 perms_attrs.domain = ARM_MMU_DOMAIN_DEVICE;
298 } else if (attrs & MT_DEVICE) {
299 /*
300 * Shareability of device memory is determined by TEX, C, B.
301 * The S bit is ignored. C is always 0 for device memory.
302 */
303 perms_attrs.shared = 0;
304 perms_attrs.cacheable = 0;
305 perms_attrs.domain = ARM_MMU_DOMAIN_DEVICE;
306
307 if (attrs & MATTR_SHARED) {
308 perms_attrs.tex = 0;
309 perms_attrs.bufferable = 1;
310 } else {
311 perms_attrs.tex = 2;
312 perms_attrs.bufferable = 0;
313 }
314 } else if (attrs & MT_NORMAL) {
315 /*
316 * TEX[2] is always 1. TEX[1:0] contain the outer cache attri-
317 * butes encoding, C and B contain the inner cache attributes
318 * encoding.
319 */
320 perms_attrs.tex |= ARM_MMU_TEX2_CACHEABLE_MEMORY;
321 perms_attrs.domain = ARM_MMU_DOMAIN_OS;
322
323 /* For normal memory, shareability depends on the S bit */
324 if (attrs & MATTR_SHARED) {
325 perms_attrs.shared = 1;
326 }
327
328 if (attrs & MATTR_CACHE_OUTER_WB_WA) {
329 perms_attrs.tex |= ARM_MMU_TEX_CACHE_ATTRS_WB_WA;
330 } else if (attrs & MATTR_CACHE_OUTER_WT_nWA) {
331 perms_attrs.tex |= ARM_MMU_TEX_CACHE_ATTRS_WT_nWA;
332 } else if (attrs & MATTR_CACHE_OUTER_WB_nWA) {
333 perms_attrs.tex |= ARM_MMU_TEX_CACHE_ATTRS_WB_nWA;
334 }
335
336 if (attrs & MATTR_CACHE_INNER_WB_WA) {
337 perms_attrs.cacheable = ARM_MMU_C_CACHE_ATTRS_WB_WA;
338 perms_attrs.bufferable = ARM_MMU_B_CACHE_ATTRS_WB_WA;
339 } else if (attrs & MATTR_CACHE_INNER_WT_nWA) {
340 perms_attrs.cacheable = ARM_MMU_C_CACHE_ATTRS_WT_nWA;
341 perms_attrs.bufferable = ARM_MMU_B_CACHE_ATTRS_WT_nWA;
342 } else if (attrs & MATTR_CACHE_INNER_WB_nWA) {
343 perms_attrs.cacheable = ARM_MMU_C_CACHE_ATTRS_WB_nWA;
344 perms_attrs.bufferable = ARM_MMU_B_CACHE_ATTRS_WB_nWA;
345 }
346 }
347
348 if (attrs & MATTR_NON_SECURE) {
349 perms_attrs.non_sec = 1;
350 }
351 if (attrs & MATTR_NON_GLOBAL) {
352 perms_attrs.not_global = 1;
353 }
354
355 /*
356 * Up next is the consideration of the case that a PTE shall be configured
357 * for a page that shall not be accessible at all (e.g. guard pages), and
358 * therefore has neither read nor write permissions. In the AP[2:1] access
359 * permission specification model, the only way to indicate this is to
360 * actually mask out the PTE's identifier bits, as otherwise, read permission
361 * is always granted for any valid PTE, it can't be revoked explicitly,
362 * unlike the write permission.
363 */
364 if (!((attrs & MPERM_R) || (attrs & MPERM_W))) {
365 perms_attrs.id_mask = 0x0;
366 } else {
367 perms_attrs.id_mask = 0x3;
368 }
369 if (!(attrs & MPERM_W)) {
370 perms_attrs.acc_perms |= ARM_MMU_PERMS_AP2_DISABLE_WR;
371 }
372 if (attrs & MPERM_UNPRIVILEGED) {
373 perms_attrs.acc_perms |= ARM_MMU_PERMS_AP1_ENABLE_PL0;
374 }
375 if (!(attrs & MPERM_X)) {
376 perms_attrs.exec_never = 1;
377 }
378
379 return perms_attrs;
380 }
381
382 /**
383 * @brief Maps a 1 MB memory range via a level 1 page table entry
384 * Maps a 1 MB memory range using a level 1 page table entry of type
385 * 'section'. This type of entry saves a level 2 page table, but has
386 * two pre-conditions: the memory area to be mapped must contain at
387 * least 1 MB of contiguous memory, starting at an address with suit-
388 * able alignment. This mapping method should only be used for map-
389 * pings for which it is unlikely that the attributes of those mappings
390 * will mappings will change at run-time (e.g. code sections will al-
391 * ways be read-only and executable). Should the case occur that the
392 * permissions or attributes of a subset of a 1 MB section entry shall
393 * be re-configured at run-time, a L1 section entry will be broken
394 * down into 4k segments using a L2 table with identical attributes
395 * before any modifications are performed for the subset of the affec-
396 * ted 1 MB range. This comes with an undeterministic performance
397 * penalty at the time of re-configuration, therefore, any mappings
398 * for which L1 section entries are a valid option, shall be marked in
399 * their declaration with the MATTR_MAY_MAP_L1_SECTION flag.
400 *
401 * @param va 32-bit target virtual address to be mapped.
402 * @param pa 32-bit physical address to be mapped.
403 * @param perms_attrs Permission and attribute bits in the format
404 * used in the MMU's L1 page table entries.
405 */
arm_mmu_l1_map_section(uint32_t va,uint32_t pa,struct arm_mmu_perms_attrs perms_attrs)406 static void arm_mmu_l1_map_section(uint32_t va, uint32_t pa,
407 struct arm_mmu_perms_attrs perms_attrs)
408 {
409 uint32_t l1_index = (va >> ARM_MMU_PTE_L1_INDEX_PA_SHIFT) &
410 ARM_MMU_PTE_L1_INDEX_MASK;
411
412 __ASSERT(l1_page_table.entries[l1_index].undefined.id == ARM_MMU_PTE_ID_INVALID,
413 "Unexpected non-zero L1 PTE ID %u for VA 0x%08X / PA 0x%08X",
414 l1_page_table.entries[l1_index].undefined.id,
415 va, pa);
416
417 l1_page_table.entries[l1_index].l1_section_1m.id =
418 (ARM_MMU_PTE_ID_SECTION & perms_attrs.id_mask);
419 l1_page_table.entries[l1_index].l1_section_1m.bufferable = perms_attrs.bufferable;
420 l1_page_table.entries[l1_index].l1_section_1m.cacheable = perms_attrs.cacheable;
421 l1_page_table.entries[l1_index].l1_section_1m.exec_never = perms_attrs.exec_never;
422 l1_page_table.entries[l1_index].l1_section_1m.domain = perms_attrs.domain;
423 l1_page_table.entries[l1_index].l1_section_1m.impl_def = 0;
424 l1_page_table.entries[l1_index].l1_section_1m.acc_perms10 =
425 ((perms_attrs.acc_perms & 0x1) << 1) | 0x1;
426 l1_page_table.entries[l1_index].l1_section_1m.tex = perms_attrs.tex;
427 l1_page_table.entries[l1_index].l1_section_1m.acc_perms2 =
428 (perms_attrs.acc_perms >> 1) & 0x1;
429 l1_page_table.entries[l1_index].l1_section_1m.shared = perms_attrs.shared;
430 l1_page_table.entries[l1_index].l1_section_1m.not_global = perms_attrs.not_global;
431 l1_page_table.entries[l1_index].l1_section_1m.zero = 0;
432 l1_page_table.entries[l1_index].l1_section_1m.non_sec = perms_attrs.non_sec;
433 l1_page_table.entries[l1_index].l1_section_1m.base_address =
434 (pa >> ARM_MMU_PTE_L1_INDEX_PA_SHIFT);
435 }
436
437 /**
438 * @brief Converts a L1 1 MB section mapping to a full L2 table
439 * When this function is called, something has happened that shouldn't
440 * happen for the sake of run-time performance and determinism: the
441 * attributes and/or permissions of a subset of a 1 MB memory range
442 * currently represented by a level 1 page table entry of type 'section'
443 * shall be modified so that they differ from the rest of the 1 MB
444 * range's attributes/permissions. Therefore, the single L1 page table
445 * entry has to be broken down to the full 256 4k-wide entries of a
446 * L2 page table with identical properties so that afterwards, the
447 * modification of the subset can be performed with a 4k granularity.
448 * The risk at this point is that all L2 tables are already in use,
449 * which will result in an assertion failure in the first contained
450 * #arm_mmu_l2_map_page() call.
451 * @warning While the conversion is being performed, interrupts are
452 * locked globally and the MMU is disabled (the required
453 * Zephyr code & data are still accessible in this state as
454 * those are identity mapped). Expect non-deterministic be-
455 * haviour / interrupt latencies while the conversion is in
456 * progress!
457 *
458 * @param va 32-bit virtual address within the 1 MB range that shall
459 * be converted from L1 1 MB section mapping to L2 4 kB page
460 * mappings.
461 * @param l2_page_table Pointer to an empty L2 page table allocated
462 * for the purpose of replacing the L1 section
463 * mapping.
464 */
arm_mmu_remap_l1_section_to_l2_table(uint32_t va,struct arm_mmu_l2_page_table * l2_page_table)465 static void arm_mmu_remap_l1_section_to_l2_table(uint32_t va,
466 struct arm_mmu_l2_page_table *l2_page_table)
467 {
468 struct arm_mmu_perms_attrs perms_attrs = {0};
469 uint32_t l1_index = (va >> ARM_MMU_PTE_L1_INDEX_PA_SHIFT) &
470 ARM_MMU_PTE_L1_INDEX_MASK;
471 uint32_t rem_size = MB(1);
472 uint32_t reg_val;
473 int lock_key;
474
475 /*
476 * Extract the permissions and attributes from the current 1 MB section entry.
477 * This data will be carried over to the resulting L2 page table.
478 */
479
480 perms_attrs.acc_perms = (l1_page_table.entries[l1_index].l1_section_1m.acc_perms2 << 1) |
481 ((l1_page_table.entries[l1_index].l1_section_1m.acc_perms10 >> 1) & 0x1);
482 perms_attrs.bufferable = l1_page_table.entries[l1_index].l1_section_1m.bufferable;
483 perms_attrs.cacheable = l1_page_table.entries[l1_index].l1_section_1m.cacheable;
484 perms_attrs.domain = l1_page_table.entries[l1_index].l1_section_1m.domain;
485 perms_attrs.id_mask = (l1_page_table.entries[l1_index].l1_section_1m.id ==
486 ARM_MMU_PTE_ID_INVALID) ? 0x0 : 0x3;
487 perms_attrs.not_global = l1_page_table.entries[l1_index].l1_section_1m.not_global;
488 perms_attrs.non_sec = l1_page_table.entries[l1_index].l1_section_1m.non_sec;
489 perms_attrs.shared = l1_page_table.entries[l1_index].l1_section_1m.shared;
490 perms_attrs.tex = l1_page_table.entries[l1_index].l1_section_1m.tex;
491 perms_attrs.exec_never = l1_page_table.entries[l1_index].l1_section_1m.exec_never;
492
493 /*
494 * Disable interrupts - no interrupts shall occur before the L2 table has
495 * been set up in place of the former L1 section entry.
496 */
497
498 lock_key = arch_irq_lock();
499
500 /*
501 * Disable the MMU. The L1 PTE array and the L2 PT array may actually be
502 * covered by the L1 PTE we're about to replace, so access to this data
503 * must remain functional during the entire remap process. Yet, the only
504 * memory areas for which L1 1 MB section entries are even considered are
505 * those belonging to the Zephyr image. Those areas are *always* identity
506 * mapped, so the MMU can be turned off and the relevant data will still
507 * be available.
508 */
509
510 reg_val = __get_SCTLR();
511 __set_SCTLR(reg_val & (~ARM_MMU_SCTLR_MMU_ENABLE_BIT));
512
513 /*
514 * Clear the entire L1 PTE & re-configure it as a L2 PT reference
515 * -> already sets the correct values for: zero0, zero1, impl_def.
516 */
517 l1_page_table.entries[l1_index].word = 0;
518
519 l1_page_table.entries[l1_index].l2_page_table_ref.id = ARM_MMU_PTE_ID_L2_PT;
520 l1_page_table.entries[l1_index].l2_page_table_ref.domain = perms_attrs.domain;
521 l1_page_table.entries[l1_index].l2_page_table_ref.non_sec = perms_attrs.non_sec;
522 l1_page_table.entries[l1_index].l2_page_table_ref.l2_page_table_address =
523 (((uint32_t)l2_page_table >> ARM_MMU_PT_L2_ADDR_SHIFT) &
524 ARM_MMU_PT_L2_ADDR_MASK);
525
526 /* Align the target VA to the base address of the section we're converting */
527 va &= ~(MB(1) - 1);
528 while (rem_size > 0) {
529 arm_mmu_l2_map_page(va, va, perms_attrs);
530 rem_size -= KB(4);
531 va += KB(4);
532 }
533
534 /* Remap complete, re-enable the MMU, unlock the interrupts. */
535
536 invalidate_tlb_all();
537 __set_SCTLR(reg_val);
538
539 arch_irq_unlock(lock_key);
540 }
541
542 /**
543 * @brief Maps a 4 kB memory page using a L2 page table entry
544 * Maps a single 4 kB page of memory from the specified physical
545 * address to the specified virtual address, using the provided
546 * attributes and permissions which have already been converted
547 * from the system's format provided to arch_mem_map() to the
548 * bits / bit masks used in the L2 page table entry.
549 *
550 * @param va 32-bit target virtual address.
551 * @param pa 32-bit physical address.
552 * @param perms_attrs Permission and attribute bits in the format
553 * used in the MMU's L2 page table entries.
554 */
arm_mmu_l2_map_page(uint32_t va,uint32_t pa,struct arm_mmu_perms_attrs perms_attrs)555 static void arm_mmu_l2_map_page(uint32_t va, uint32_t pa,
556 struct arm_mmu_perms_attrs perms_attrs)
557 {
558 struct arm_mmu_l2_page_table *l2_page_table = NULL;
559 uint32_t l1_index = (va >> ARM_MMU_PTE_L1_INDEX_PA_SHIFT) &
560 ARM_MMU_PTE_L1_INDEX_MASK;
561 uint32_t l2_index = (va >> ARM_MMU_PTE_L2_INDEX_PA_SHIFT) &
562 ARM_MMU_PTE_L2_INDEX_MASK;
563
564 /*
565 * Use the calculated L1 index in order to determine if a L2 page
566 * table is required in order to complete the current mapping.
567 * -> See below for an explanation of the possible scenarios.
568 */
569
570 if (l1_page_table.entries[l1_index].undefined.id == ARM_MMU_PTE_ID_INVALID ||
571 (l1_page_table.entries[l1_index].undefined.id & ARM_MMU_PTE_ID_SECTION) != 0) {
572 l2_page_table = arm_mmu_assign_l2_table(pa);
573 __ASSERT(l2_page_table != NULL,
574 "Unexpected L2 page table NULL pointer for VA 0x%08X",
575 va);
576 }
577
578 /*
579 * Check what is currently present at the corresponding L1 table entry.
580 * The following scenarios are possible:
581 * 1) The L1 PTE's ID bits are zero, as is the rest of the entry.
582 * In this case, the L1 PTE is currently unused. A new L2 PT to
583 * refer to in this entry has already been allocated above.
584 * 2) The L1 PTE's ID bits indicate a L2 PT reference entry (01).
585 * The corresponding L2 PT's address will be resolved using this
586 * entry.
587 * 3) The L1 PTE's ID bits may or may not be zero, and the rest of
588 * the descriptor contains some non-zero data. This always indicates
589 * an existing 1 MB section entry in this place. Checking only the
590 * ID bits wouldn't be enough, as the only way to indicate a section
591 * with neither R nor W permissions is to set the ID bits to 00 in
592 * the AP[2:1] permissions model. As we're now about to map a single
593 * page overlapping with the 1 MB section, the section has to be
594 * converted into a L2 table. Afterwards, the current page mapping
595 * can be added/modified.
596 */
597
598 if (l1_page_table.entries[l1_index].word == 0) {
599 /* The matching L1 PT entry is currently unused */
600 l1_page_table.entries[l1_index].l2_page_table_ref.id = ARM_MMU_PTE_ID_L2_PT;
601 l1_page_table.entries[l1_index].l2_page_table_ref.zero0 = 0;
602 l1_page_table.entries[l1_index].l2_page_table_ref.zero1 = 0;
603 l1_page_table.entries[l1_index].l2_page_table_ref.impl_def = 0;
604 l1_page_table.entries[l1_index].l2_page_table_ref.domain = 0; /* TODO */
605 l1_page_table.entries[l1_index].l2_page_table_ref.non_sec =
606 perms_attrs.non_sec;
607 l1_page_table.entries[l1_index].l2_page_table_ref.l2_page_table_address =
608 (((uint32_t)l2_page_table >> ARM_MMU_PT_L2_ADDR_SHIFT) &
609 ARM_MMU_PT_L2_ADDR_MASK);
610 } else if (l1_page_table.entries[l1_index].undefined.id == ARM_MMU_PTE_ID_L2_PT) {
611 /* The matching L1 PT entry already points to a L2 PT */
612 l2_page_table = (struct arm_mmu_l2_page_table *)
613 ((l1_page_table.entries[l1_index].word &
614 (ARM_MMU_PT_L2_ADDR_MASK << ARM_MMU_PT_L2_ADDR_SHIFT)));
615 /*
616 * The only configuration bit contained in the L2 PT entry is the
617 * NS bit. Set it according to the attributes passed to this function,
618 * warn if there is a mismatch between the current page's NS attribute
619 * value and the value currently contained in the L2 PT entry.
620 */
621 if (l1_page_table.entries[l1_index].l2_page_table_ref.non_sec !=
622 perms_attrs.non_sec) {
623 LOG_WRN("NS bit mismatch in L2 PT reference at L1 index [%u], "
624 "re-configuring from %u to %u",
625 l1_index,
626 l1_page_table.entries[l1_index].l2_page_table_ref.non_sec,
627 perms_attrs.non_sec);
628 l1_page_table.entries[l1_index].l2_page_table_ref.non_sec =
629 perms_attrs.non_sec;
630 }
631 } else if (l1_page_table.entries[l1_index].undefined.reserved != 0) {
632 /*
633 * The matching L1 PT entry currently holds a 1 MB section entry
634 * in order to save a L2 table (as it's neither completely blank
635 * nor a L2 PT reference), but now we have to map an overlapping
636 * 4 kB page, so the section entry must be converted to a L2 table
637 * first before the individual L2 entry for the page to be mapped is
638 * accessed. A blank L2 PT has already been assigned above.
639 */
640 arm_mmu_remap_l1_section_to_l2_table(va, l2_page_table);
641 }
642
643 /*
644 * If the matching L2 PTE is blank, increment the number of used entries
645 * in the L2 table. If the L2 PTE already contains some data, we're re-
646 * placing the entry's data instead, the used entry count remains unchanged.
647 * Once again, checking the ID bits might be misleading if the PTE declares
648 * a page which has neither R nor W permissions.
649 */
650 if (l2_page_table->entries[l2_index].word == 0) {
651 arm_mmu_inc_l2_table_entries(l2_page_table);
652 }
653
654 l2_page_table->entries[l2_index].l2_page_4k.id =
655 (ARM_MMU_PTE_ID_SMALL_PAGE & perms_attrs.id_mask);
656 l2_page_table->entries[l2_index].l2_page_4k.id |= perms_attrs.exec_never; /* XN in [0] */
657 l2_page_table->entries[l2_index].l2_page_4k.bufferable = perms_attrs.bufferable;
658 l2_page_table->entries[l2_index].l2_page_4k.cacheable = perms_attrs.cacheable;
659 l2_page_table->entries[l2_index].l2_page_4k.acc_perms10 =
660 ((perms_attrs.acc_perms & 0x1) << 1) | 0x1;
661 l2_page_table->entries[l2_index].l2_page_4k.tex = perms_attrs.tex;
662 l2_page_table->entries[l2_index].l2_page_4k.acc_perms2 =
663 ((perms_attrs.acc_perms >> 1) & 0x1);
664 l2_page_table->entries[l2_index].l2_page_4k.shared = perms_attrs.shared;
665 l2_page_table->entries[l2_index].l2_page_4k.not_global = perms_attrs.not_global;
666 l2_page_table->entries[l2_index].l2_page_4k.pa_base =
667 ((pa >> ARM_MMU_PTE_L2_SMALL_PAGE_ADDR_SHIFT) &
668 ARM_MMU_PTE_L2_SMALL_PAGE_ADDR_MASK);
669 }
670
671 /**
672 * @brief Unmaps a 4 kB memory page by clearing its L2 page table entry
673 * Unmaps a single 4 kB page of memory from the specified virtual
674 * address by clearing its respective L2 page table entry.
675 *
676 * @param va 32-bit virtual address to be unmapped.
677 */
arm_mmu_l2_unmap_page(uint32_t va)678 static void arm_mmu_l2_unmap_page(uint32_t va)
679 {
680 struct arm_mmu_l2_page_table *l2_page_table;
681 uint32_t l1_index = (va >> ARM_MMU_PTE_L1_INDEX_PA_SHIFT) &
682 ARM_MMU_PTE_L1_INDEX_MASK;
683 uint32_t l2_index = (va >> ARM_MMU_PTE_L2_INDEX_PA_SHIFT) &
684 ARM_MMU_PTE_L2_INDEX_MASK;
685
686 if (l1_page_table.entries[l1_index].undefined.id != ARM_MMU_PTE_ID_L2_PT) {
687 /*
688 * No L2 PT currently exists for the given VA - this should be
689 * tolerated without an error, just as in the case that while
690 * a L2 PT exists, the corresponding PTE is blank - see explanation
691 * below, the same applies here.
692 */
693 return;
694 }
695
696 l2_page_table = (struct arm_mmu_l2_page_table *)
697 ((l1_page_table.entries[l1_index].word &
698 (ARM_MMU_PT_L2_ADDR_MASK << ARM_MMU_PT_L2_ADDR_SHIFT)));
699
700 if (l2_page_table->entries[l2_index].word == 0) {
701 /*
702 * We're supposed to unmap a page at the given VA, but there currently
703 * isn't anything mapped at this address, the L2 PTE is blank.
704 * -> This is normal if a memory area is being mapped via k_mem_map,
705 * which contains two calls to arch_mem_unmap (which effectively end up
706 * here) in order to unmap the leading and trailing guard pages.
707 * Therefore, it has to be expected that unmap calls are made for unmapped
708 * memory which hasn't been in use before.
709 * -> Just return, don't decrement the entry counter of the corresponding
710 * L2 page table, as we're not actually clearing any PTEs.
711 */
712 return;
713 }
714
715 if ((l2_page_table->entries[l2_index].undefined.id & ARM_MMU_PTE_ID_SMALL_PAGE) !=
716 ARM_MMU_PTE_ID_SMALL_PAGE) {
717 LOG_ERR("Cannot unmap virtual memory at 0x%08X: invalid "
718 "page table entry type in level 2 page table at "
719 "L1 index [%u], L2 index [%u]", va, l1_index, l2_index);
720 return;
721 }
722
723 l2_page_table->entries[l2_index].word = 0;
724
725 arm_mmu_dec_l2_table_entries(l2_page_table);
726 }
727
728 /**
729 * @brief MMU boot-time initialization function
730 * Initializes the MMU at boot time. Sets up the page tables and
731 * applies any specified memory mappings for either the different
732 * sections of the Zephyr binary image, or for device memory as
733 * specified at the SoC level.
734 *
735 * @retval Always 0, errors are handled by assertions.
736 */
z_arm_mmu_init(void)737 int z_arm_mmu_init(void)
738 {
739 uint32_t mem_range;
740 uint32_t pa;
741 uint32_t va;
742 uint32_t attrs;
743 uint32_t pt_attrs = 0;
744 uint32_t rem_size;
745 uint32_t reg_val = 0;
746 struct arm_mmu_perms_attrs perms_attrs;
747
748 __ASSERT(KB(4) == CONFIG_MMU_PAGE_SIZE,
749 "MMU_PAGE_SIZE value %u is invalid, only 4 kB pages are supported\n",
750 CONFIG_MMU_PAGE_SIZE);
751
752 /* Set up the memory regions pre-defined by the image */
753 for (mem_range = 0; mem_range < ARRAY_SIZE(mmu_zephyr_ranges); mem_range++) {
754 pa = mmu_zephyr_ranges[mem_range].start;
755 rem_size = mmu_zephyr_ranges[mem_range].end - pa;
756 attrs = mmu_zephyr_ranges[mem_range].attrs;
757 perms_attrs = arm_mmu_convert_attr_flags(attrs);
758
759 /*
760 * Check if the L1 page table is within the region currently
761 * being mapped. If so, store the permissions and attributes
762 * of the current section. This information is required when
763 * writing to the TTBR0 register.
764 */
765 if (((uint32_t)&l1_page_table >= pa) &&
766 ((uint32_t)&l1_page_table < (pa + rem_size))) {
767 pt_attrs = attrs;
768 }
769
770 while (rem_size > 0) {
771 if (rem_size >= MB(1) && (pa & 0xFFFFF) == 0 &&
772 (attrs & MATTR_MAY_MAP_L1_SECTION)) {
773 /*
774 * Remaining area size > 1 MB & matching alignment
775 * -> map a 1 MB section instead of individual 4 kB
776 * pages with identical configuration.
777 */
778 arm_mmu_l1_map_section(pa, pa, perms_attrs);
779 rem_size -= MB(1);
780 pa += MB(1);
781 } else {
782 arm_mmu_l2_map_page(pa, pa, perms_attrs);
783 rem_size -= (rem_size >= KB(4)) ? KB(4) : rem_size;
784 pa += KB(4);
785 }
786 }
787 }
788
789 /* Set up the memory regions defined at the SoC level */
790 for (mem_range = 0; mem_range < mmu_config.num_regions; mem_range++) {
791 pa = (uint32_t)(mmu_config.mmu_regions[mem_range].base_pa);
792 va = (uint32_t)(mmu_config.mmu_regions[mem_range].base_va);
793 rem_size = (uint32_t)(mmu_config.mmu_regions[mem_range].size);
794 attrs = mmu_config.mmu_regions[mem_range].attrs;
795 perms_attrs = arm_mmu_convert_attr_flags(attrs);
796
797 while (rem_size > 0) {
798 arm_mmu_l2_map_page(va, pa, perms_attrs);
799 rem_size -= (rem_size >= KB(4)) ? KB(4) : rem_size;
800 va += KB(4);
801 pa += KB(4);
802 }
803 }
804
805 /* Clear TTBR1 */
806 __asm__ __volatile__("mcr p15, 0, %0, c2, c0, 1" : : "r"(reg_val));
807
808 /* Write TTBCR: EAE, security not yet relevant, N[2:0] = 0 */
809 __asm__ __volatile__("mcr p15, 0, %0, c2, c0, 2"
810 : : "r"(reg_val));
811
812 /* Write TTBR0 */
813 reg_val = ((uint32_t)&l1_page_table.entries[0] & ~0x3FFF);
814
815 /*
816 * Set IRGN, RGN, S in TTBR0 based on the configuration of the
817 * memory area the actual page tables are located in.
818 */
819 if (pt_attrs & MATTR_SHARED) {
820 reg_val |= ARM_MMU_TTBR_SHAREABLE_BIT;
821 }
822
823 if (pt_attrs & MATTR_CACHE_OUTER_WB_WA) {
824 reg_val |= (ARM_MMU_TTBR_RGN_OUTER_WB_WA_CACHEABLE <<
825 ARM_MMU_TTBR_RGN_SHIFT);
826 } else if (pt_attrs & MATTR_CACHE_OUTER_WT_nWA) {
827 reg_val |= (ARM_MMU_TTBR_RGN_OUTER_WT_CACHEABLE <<
828 ARM_MMU_TTBR_RGN_SHIFT);
829 } else if (pt_attrs & MATTR_CACHE_OUTER_WB_nWA) {
830 reg_val |= (ARM_MMU_TTBR_RGN_OUTER_WB_nWA_CACHEABLE <<
831 ARM_MMU_TTBR_RGN_SHIFT);
832 }
833
834 if (pt_attrs & MATTR_CACHE_INNER_WB_WA) {
835 reg_val |= ARM_MMU_TTBR_IRGN0_BIT_MP_EXT_ONLY;
836 } else if (pt_attrs & MATTR_CACHE_INNER_WT_nWA) {
837 reg_val |= ARM_MMU_TTBR_IRGN1_BIT_MP_EXT_ONLY;
838 } else if (pt_attrs & MATTR_CACHE_INNER_WB_nWA) {
839 reg_val |= ARM_MMU_TTBR_IRGN0_BIT_MP_EXT_ONLY;
840 reg_val |= ARM_MMU_TTBR_IRGN1_BIT_MP_EXT_ONLY;
841 }
842
843 __set_TTBR0(reg_val);
844
845 /* Write DACR -> all domains to client = 01b. */
846 reg_val = ARM_MMU_DACR_ALL_DOMAINS_CLIENT;
847 __set_DACR(reg_val);
848
849 invalidate_tlb_all();
850
851 /* Enable the MMU and Cache in SCTLR */
852 reg_val = __get_SCTLR();
853 reg_val |= ARM_MMU_SCTLR_AFE_BIT;
854 reg_val |= ARM_MMU_SCTLR_ICACHE_ENABLE_BIT;
855 reg_val |= ARM_MMU_SCTLR_DCACHE_ENABLE_BIT;
856 reg_val |= ARM_MMU_SCTLR_MMU_ENABLE_BIT;
857 __set_SCTLR(reg_val);
858
859 return 0;
860 }
861
862 /**
863 * @brief ARMv7-specific implementation of memory mapping at run-time
864 * Maps memory according to the parameters provided by the caller
865 * at run-time.
866 *
867 * @param virt 32-bit target virtual address.
868 * @param phys 32-bit physical address.
869 * @param size Size (in bytes) of the memory area to map.
870 * @param flags Memory attributes & permissions. Comp. K_MEM_...
871 * flags in kernel/mm.h.
872 * @retval 0 on success, -EINVAL if an invalid parameter is detected.
873 */
__arch_mem_map(void * virt,uintptr_t phys,size_t size,uint32_t flags)874 static int __arch_mem_map(void *virt, uintptr_t phys, size_t size, uint32_t flags)
875 {
876 uint32_t va = (uint32_t)virt;
877 uint32_t pa = (uint32_t)phys;
878 uint32_t rem_size = (uint32_t)size;
879 uint32_t conv_flags = MPERM_R;
880 struct arm_mmu_perms_attrs perms_attrs;
881 int key;
882
883 if (size == 0) {
884 LOG_ERR("Cannot map physical memory at 0x%08X: invalid "
885 "zero size", (uint32_t)phys);
886 return -EINVAL;
887 }
888
889 switch (flags & K_MEM_CACHE_MASK) {
890
891 case K_MEM_CACHE_NONE:
892 default:
893 conv_flags |= MT_DEVICE;
894 break;
895 case K_MEM_CACHE_WB:
896 conv_flags |= MT_NORMAL;
897 conv_flags |= MATTR_SHARED;
898 if (flags & K_MEM_PERM_RW) {
899 conv_flags |= MATTR_CACHE_OUTER_WB_WA;
900 conv_flags |= MATTR_CACHE_INNER_WB_WA;
901 } else {
902 conv_flags |= MATTR_CACHE_OUTER_WB_nWA;
903 conv_flags |= MATTR_CACHE_INNER_WB_nWA;
904 }
905 break;
906 case K_MEM_CACHE_WT:
907 conv_flags |= MT_NORMAL;
908 conv_flags |= MATTR_SHARED;
909 conv_flags |= MATTR_CACHE_OUTER_WT_nWA;
910 conv_flags |= MATTR_CACHE_INNER_WT_nWA;
911 break;
912
913 }
914
915 if (flags & K_MEM_PERM_RW) {
916 conv_flags |= MPERM_W;
917 }
918 if (flags & K_MEM_PERM_EXEC) {
919 conv_flags |= MPERM_X;
920 }
921
922 perms_attrs = arm_mmu_convert_attr_flags(conv_flags);
923
924 key = arch_irq_lock();
925
926 while (rem_size > 0) {
927 arm_mmu_l2_map_page(va, pa, perms_attrs);
928 rem_size -= (rem_size >= KB(4)) ? KB(4) : rem_size;
929 va += KB(4);
930 pa += KB(4);
931 }
932
933 arch_irq_unlock(key);
934
935 return 0;
936 }
937
938 /**
939 * @brief Arch-specific wrapper function for memory mapping at run-time
940 * Maps memory according to the parameters provided by the caller
941 * at run-time. This function wraps the ARMv7 MMU specific implementation
942 * #__arch_mem_map() for the upper layers of the memory management.
943 * If the map operation fails, a kernel panic will be triggered.
944 *
945 * @param virt 32-bit target virtual address.
946 * @param phys 32-bit physical address.
947 * @param size Size (in bytes) of the memory area to map.
948 * @param flags Memory attributes & permissions. Comp. K_MEM_...
949 * flags in kernel/mm.h.
950 */
arch_mem_map(void * virt,uintptr_t phys,size_t size,uint32_t flags)951 void arch_mem_map(void *virt, uintptr_t phys, size_t size, uint32_t flags)
952 {
953 int ret = __arch_mem_map(virt, phys, size, flags);
954
955 if (ret) {
956 LOG_ERR("__arch_mem_map() returned %d", ret);
957 k_panic();
958 } else {
959 invalidate_tlb_all();
960 }
961 }
962
963 /**
964 * @brief ARMv7-specific implementation of memory unmapping at run-time
965 * Unmaps memory according to the parameters provided by the caller
966 * at run-time.
967 *
968 * @param addr 32-bit virtual address to unmap.
969 * @param size Size (in bytes) of the memory area to unmap.
970 * @retval 0 on success, -EINVAL if an invalid parameter is detected.
971 */
__arch_mem_unmap(void * addr,size_t size)972 static int __arch_mem_unmap(void *addr, size_t size)
973 {
974 uint32_t va = (uint32_t)addr;
975 uint32_t rem_size = (uint32_t)size;
976 int key;
977
978 if (addr == NULL) {
979 LOG_ERR("Cannot unmap virtual memory: invalid NULL pointer");
980 return -EINVAL;
981 }
982
983 if (size == 0) {
984 LOG_ERR("Cannot unmap virtual memory at 0x%08X: invalid "
985 "zero size", (uint32_t)addr);
986 return -EINVAL;
987 }
988
989 key = arch_irq_lock();
990
991 while (rem_size > 0) {
992 arm_mmu_l2_unmap_page(va);
993 rem_size -= (rem_size >= KB(4)) ? KB(4) : rem_size;
994 va += KB(4);
995 }
996
997 arch_irq_unlock(key);
998
999 return 0;
1000 }
1001
1002 /**
1003 * @brief Arch-specific wrapper function for memory unmapping at run-time
1004 * Unmaps memory according to the parameters provided by the caller
1005 * at run-time. This function wraps the ARMv7 MMU specific implementation
1006 * #__arch_mem_unmap() for the upper layers of the memory management.
1007 *
1008 * @param addr 32-bit virtual address to unmap.
1009 * @param size Size (in bytes) of the memory area to unmap.
1010 */
arch_mem_unmap(void * addr,size_t size)1011 void arch_mem_unmap(void *addr, size_t size)
1012 {
1013 int ret = __arch_mem_unmap(addr, size);
1014
1015 if (ret) {
1016 LOG_ERR("__arch_mem_unmap() returned %d", ret);
1017 } else {
1018 invalidate_tlb_all();
1019 }
1020 }
1021
1022 /**
1023 * @brief Arch-specific virtual-to-physical address resolver function
1024 * ARMv7 MMU specific implementation of a function that resolves the
1025 * physical address corresponding to the given virtual address.
1026 *
1027 * @param virt 32-bit target virtual address to resolve.
1028 * @param phys Pointer to a variable to which the resolved physical
1029 * address will be written. May be NULL if this information
1030 * is not actually required by the caller.
1031 * @retval 0 if the physical address corresponding to the specified
1032 * virtual address could be resolved successfully, -EFAULT
1033 * if the specified virtual address is not currently mapped.
1034 */
arch_page_phys_get(void * virt,uintptr_t * phys)1035 int arch_page_phys_get(void *virt, uintptr_t *phys)
1036 {
1037 uint32_t l1_index = ((uint32_t)virt >> ARM_MMU_PTE_L1_INDEX_PA_SHIFT) &
1038 ARM_MMU_PTE_L1_INDEX_MASK;
1039 uint32_t l2_index = ((uint32_t)virt >> ARM_MMU_PTE_L2_INDEX_PA_SHIFT) &
1040 ARM_MMU_PTE_L2_INDEX_MASK;
1041 struct arm_mmu_l2_page_table *l2_page_table;
1042
1043 uint32_t pa_resolved = 0;
1044 uint32_t l2_pt_resolved;
1045
1046 int rc = 0;
1047 int key;
1048
1049 key = arch_irq_lock();
1050
1051 if (l1_page_table.entries[l1_index].undefined.id == ARM_MMU_PTE_ID_SECTION) {
1052 /*
1053 * If the virtual address points to a level 1 PTE whose ID bits
1054 * identify it as a 1 MB section entry rather than a level 2 PT
1055 * entry, the given VA belongs to a memory region used by the
1056 * Zephyr image itself - it is only for those static regions that
1057 * L1 Section entries are used to save L2 tables if a sufficient-
1058 * ly large block of memory is specified. The memory regions be-
1059 * longing to the Zephyr image are identity mapped -> just return
1060 * the value of the VA as the value of the PA.
1061 */
1062 pa_resolved = (uint32_t)virt;
1063 } else if (l1_page_table.entries[l1_index].undefined.id == ARM_MMU_PTE_ID_L2_PT) {
1064 /*
1065 * The VA points to a level 1 PTE which re-directs to a level 2
1066 * PT. -> Assemble the level 2 PT pointer and resolve the PA for
1067 * the specified VA from there.
1068 */
1069 l2_pt_resolved =
1070 l1_page_table.entries[l1_index].l2_page_table_ref.l2_page_table_address;
1071 l2_pt_resolved <<= ARM_MMU_PT_L2_ADDR_SHIFT;
1072 l2_page_table = (struct arm_mmu_l2_page_table *)l2_pt_resolved;
1073
1074 /*
1075 * Check if the PTE for the specified VA is actually in use before
1076 * assembling & returning the corresponding PA. k_mem_unmap will
1077 * call this function for the leading & trailing guard pages when
1078 * unmapping a VA. As those guard pages were explicitly unmapped
1079 * when the VA was originally mapped, their L2 PTEs will be empty.
1080 * In that case, the return code of this function must not be 0.
1081 */
1082 if (l2_page_table->entries[l2_index].word == 0) {
1083 rc = -EFAULT;
1084 }
1085
1086 pa_resolved = l2_page_table->entries[l2_index].l2_page_4k.pa_base;
1087 pa_resolved <<= ARM_MMU_PTE_L2_SMALL_PAGE_ADDR_SHIFT;
1088 pa_resolved |= ((uint32_t)virt & ARM_MMU_ADDR_BELOW_PAGE_GRAN_MASK);
1089 } else {
1090 /* The level 1 PTE is invalid -> the specified VA is not mapped */
1091 rc = -EFAULT;
1092 }
1093
1094 arch_irq_unlock(key);
1095
1096 if (phys) {
1097 *phys = (uintptr_t)pa_resolved;
1098 }
1099 return rc;
1100 }
1101