1 /*
2  * ARMv7 MMU support
3  *
4  * This implementation supports the Short-descriptor translation
5  * table format. The standard page size is 4 kB, 1 MB sections
6  * are only used for mapping the code and data of the Zephyr image.
7  * Secure mode and PL1 is always assumed. LPAE and PXN extensions
8  * as well as TEX remapping are not supported. The AP[2:1] plus
9  * Access flag permissions model is used, as the AP[2:0] model is
10  * deprecated. As the AP[2:1] model can only disable write access,
11  * the read permission flag is always implied.
12  *
13  * Reference documentation:
14  * ARM Architecture Reference Manual, ARMv7-A and ARMv7-R edition,
15  * ARM document ID DDI0406C Rev. d, March 2018
16  *
17  * Copyright (c) 2021 Weidmueller Interface GmbH & Co. KG
18  * SPDX-License-Identifier: Apache-2.0
19  */
20 
21 #include <zephyr/device.h>
22 #include <zephyr/init.h>
23 #include <zephyr/kernel.h>
24 
25 #include <zephyr/linker/linker-defs.h>
26 #include <zephyr/logging/log.h>
27 #include <zephyr/sys/__assert.h>
28 #include <zephyr/sys/util.h>
29 #include <zephyr/kernel/mm.h>
30 #include <zephyr/sys/barrier.h>
31 
32 #include <cmsis_core.h>
33 
34 #include <zephyr/arch/arm/mmu/arm_mmu.h>
35 #include "arm_mmu_priv.h"
36 
37 LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
38 
39 /* Level 1 page table: always required, must be 16k-aligned */
40 static struct arm_mmu_l1_page_table
41 	l1_page_table __aligned(KB(16)) = {0};
42 /*
43  * Array of level 2 page tables with 4k granularity:
44  * each table covers a range of 1 MB, the number of L2 tables
45  * is configurable.
46  */
47 static struct arm_mmu_l2_page_table
48 	l2_page_tables[CONFIG_ARM_MMU_NUM_L2_TABLES] __aligned(KB(1)) = {0};
49 /*
50  * For each level 2 page table, a separate dataset tracks
51  * if the respective table is in use, if so, to which 1 MB
52  * virtual address range it is assigned, and how many entries,
53  * each mapping a 4 kB page, it currently contains.
54  */
55 static struct arm_mmu_l2_page_table_status
56 	l2_page_tables_status[CONFIG_ARM_MMU_NUM_L2_TABLES] = {0};
57 
58 /* Available L2 tables count & next free index for an L2 table request */
59 static uint32_t arm_mmu_l2_tables_free = CONFIG_ARM_MMU_NUM_L2_TABLES;
60 static uint32_t arm_mmu_l2_next_free_table;
61 
62 /*
63  * Static definition of all code & data memory regions of the
64  * current Zephyr image. This information must be available &
65  * processed upon MMU initialization.
66  */
67 static const struct arm_mmu_flat_range mmu_zephyr_ranges[] = {
68 	/*
69 	 * Mark the zephyr execution regions (data, bss, noinit, etc.)
70 	 * cacheable, read / write and non-executable
71 	 */
72 	{ .name  = "zephyr_data",
73 	  .start = (uint32_t)_image_ram_start,
74 	  .end   = (uint32_t)_image_ram_end,
75 	  .attrs = MT_NORMAL | MATTR_SHARED |
76 		   MPERM_R | MPERM_W |
77 		   MATTR_CACHE_OUTER_WB_WA | MATTR_CACHE_INNER_WB_WA},
78 
79 	/* Mark text segment cacheable, read only and executable */
80 	{ .name  = "zephyr_code",
81 	  .start = (uint32_t)__text_region_start,
82 	  .end   = (uint32_t)__text_region_end,
83 	  .attrs = MT_NORMAL | MATTR_SHARED |
84 	  /* The code needs to have write permission in order for
85 	   * software breakpoints (which modify instructions) to work
86 	   */
87 #if defined(CONFIG_GDBSTUB)
88 		   MPERM_R | MPERM_X | MPERM_W |
89 #else
90 		   MPERM_R | MPERM_X |
91 #endif
92 		   MATTR_CACHE_OUTER_WB_nWA | MATTR_CACHE_INNER_WB_nWA |
93 		   MATTR_MAY_MAP_L1_SECTION},
94 
95 	/* Mark rodata segment cacheable, read only and non-executable */
96 	{ .name  = "zephyr_rodata",
97 	  .start = (uint32_t)__rodata_region_start,
98 	  .end   = (uint32_t)__rodata_region_end,
99 	  .attrs = MT_NORMAL | MATTR_SHARED |
100 		   MPERM_R |
101 		   MATTR_CACHE_OUTER_WB_nWA | MATTR_CACHE_INNER_WB_nWA |
102 		   MATTR_MAY_MAP_L1_SECTION},
103 #ifdef CONFIG_NOCACHE_MEMORY
104 	/* Mark nocache segment read / write and non-executable */
105 	{ .name  = "nocache",
106 	  .start = (uint32_t)_nocache_ram_start,
107 	  .end   = (uint32_t)_nocache_ram_end,
108 	  .attrs = MT_STRONGLY_ORDERED |
109 		   MPERM_R | MPERM_W},
110 #endif
111 };
112 
113 static void arm_mmu_l2_map_page(uint32_t va, uint32_t pa,
114 				struct arm_mmu_perms_attrs perms_attrs);
115 
116 /**
117  * @brief Invalidates the TLB
118  * Helper function which invalidates the entire TLB. This action
119  * is performed whenever the MMU is (re-)enabled or changes to the
120  * page tables are made at run-time, as the TLB might contain entries
121  * which are no longer valid once the changes are applied.
122  */
invalidate_tlb_all(void)123 static void invalidate_tlb_all(void)
124 {
125 	__set_TLBIALL(0); /* 0 = opc2 = invalidate entire TLB */
126 	barrier_dsync_fence_full();
127 	barrier_isync_fence_full();
128 }
129 
130 /**
131  * @brief Returns a free level 2 page table
132  * Initializes and returns the next free L2 page table whenever
133  * a page is to be mapped in a 1 MB virtual address range that
134  * is not yet covered by a level 2 page table.
135  *
136  * @param va 32-bit virtual address to be mapped.
137  * @retval pointer to the L2 table now assigned to the 1 MB
138  *         address range the target virtual address is in.
139  */
arm_mmu_assign_l2_table(uint32_t va)140 static struct arm_mmu_l2_page_table *arm_mmu_assign_l2_table(uint32_t va)
141 {
142 	struct arm_mmu_l2_page_table *l2_page_table;
143 
144 	__ASSERT(arm_mmu_l2_tables_free > 0,
145 		 "Cannot set up L2 page table for VA 0x%08X: "
146 		 "no more free L2 page tables available\n",
147 		 va);
148 	__ASSERT(l2_page_tables_status[arm_mmu_l2_next_free_table].entries == 0,
149 		 "Cannot set up L2 page table for VA 0x%08X: "
150 		 "expected empty L2 table at index [%u], but the "
151 		 "entries value is %u\n",
152 		 va, arm_mmu_l2_next_free_table,
153 		 l2_page_tables_status[arm_mmu_l2_next_free_table].entries);
154 
155 	/*
156 	 * Store in the status dataset of the L2 table to be returned
157 	 * which 1 MB virtual address range it is being assigned to.
158 	 * Set the current page table entry count to 0.
159 	 */
160 	l2_page_tables_status[arm_mmu_l2_next_free_table].l1_index =
161 		((va >> ARM_MMU_PTE_L1_INDEX_PA_SHIFT) & ARM_MMU_PTE_L1_INDEX_MASK);
162 	l2_page_tables_status[arm_mmu_l2_next_free_table].entries = 0;
163 	l2_page_table = &l2_page_tables[arm_mmu_l2_next_free_table];
164 
165 	/*
166 	 * Decrement the available L2 page table count. As long as at
167 	 * least one more L2 table is available afterwards, update the
168 	 * L2 next free table index. If we're about to return the last
169 	 * available L2 table, calculating a next free table index is
170 	 * impossible.
171 	 */
172 	--arm_mmu_l2_tables_free;
173 	if (arm_mmu_l2_tables_free > 0)	{
174 		do {
175 			arm_mmu_l2_next_free_table = (arm_mmu_l2_next_free_table + 1) %
176 						      CONFIG_ARM_MMU_NUM_L2_TABLES;
177 		} while (l2_page_tables_status[arm_mmu_l2_next_free_table].entries != 0);
178 	}
179 
180 	return l2_page_table;
181 }
182 
183 /**
184  * @brief Releases a level 2 page table
185  * Releases a level 2 page table, marking it as no longer in use.
186  * From that point on, it can be re-used for mappings in another
187  * 1 MB virtual address range. This function is called whenever
188  * it is determined during an unmap call at run-time that the page
189  * table entry count in the respective page table has reached 0.
190  *
191  * @param l2_page_table Pointer to L2 page table to be released.
192  */
arm_mmu_release_l2_table(struct arm_mmu_l2_page_table * l2_page_table)193 static void arm_mmu_release_l2_table(struct arm_mmu_l2_page_table *l2_page_table)
194 {
195 	uint32_t l2_page_table_index = ARM_MMU_L2_PT_INDEX(l2_page_table);
196 
197 	l2_page_tables_status[l2_page_table_index].l1_index = 0;
198 	if (arm_mmu_l2_tables_free == 0) {
199 		arm_mmu_l2_next_free_table = l2_page_table_index;
200 	}
201 	++arm_mmu_l2_tables_free;
202 }
203 
204 /**
205  * @brief Increments the page table entry counter of a L2 page table
206  * Increments the page table entry counter of a level 2 page table.
207  * Contains a check to ensure that no attempts are made to set up
208  * more page table entries than the table can hold.
209  *
210  * @param l2_page_table Pointer to the L2 page table whose entry
211  *                      counter shall be incremented.
212  */
arm_mmu_inc_l2_table_entries(struct arm_mmu_l2_page_table * l2_page_table)213 static void arm_mmu_inc_l2_table_entries(struct arm_mmu_l2_page_table *l2_page_table)
214 {
215 	uint32_t l2_page_table_index = ARM_MMU_L2_PT_INDEX(l2_page_table);
216 
217 	__ASSERT(l2_page_tables_status[l2_page_table_index].entries < ARM_MMU_PT_L2_NUM_ENTRIES,
218 		 "Cannot increment entry count of the L2 page table at index "
219 		 "[%u] / addr %p / ref L1[%u]: maximum entry count already reached",
220 		 l2_page_table_index, l2_page_table,
221 		 l2_page_tables_status[l2_page_table_index].l1_index);
222 
223 	++l2_page_tables_status[l2_page_table_index].entries;
224 }
225 
226 /**
227  * @brief Decrements the page table entry counter of a L2 page table
228  * Decrements the page table entry counter of a level 2 page table.
229  * Contains a check to ensure that no attempts are made to remove
230  * entries from the respective table that aren't actually there.
231  *
232  * @param l2_page_table Pointer to the L2 page table whose entry
233  *                      counter shall be decremented.
234  */
arm_mmu_dec_l2_table_entries(struct arm_mmu_l2_page_table * l2_page_table)235 static void arm_mmu_dec_l2_table_entries(struct arm_mmu_l2_page_table *l2_page_table)
236 {
237 	uint32_t l2_page_table_index = ARM_MMU_L2_PT_INDEX(l2_page_table);
238 
239 	__ASSERT(l2_page_tables_status[l2_page_table_index].entries > 0,
240 		 "Cannot decrement entry count of the L2 page table at index "
241 		 "[%u] / addr %p / ref L1[%u]: entry count is already zero",
242 		 l2_page_table_index, l2_page_table,
243 		 l2_page_tables_status[l2_page_table_index].l1_index);
244 
245 	if (--l2_page_tables_status[l2_page_table_index].entries == 0) {
246 		arm_mmu_release_l2_table(l2_page_table);
247 	}
248 }
249 
250 /**
251  * @brief Converts memory attributes and permissions to MMU format
252  * Converts memory attributes and permissions as used in the boot-
253  * time memory mapping configuration data array (MT_..., MATTR_...,
254  * MPERM_...) to the equivalent bit (field) values used in the MMU's
255  * L1 and L2 page table entries. Contains plausibility checks.
256  *
257  * @param attrs type/attribute/permissions flags word obtained from
258  *              an entry of the mmu_config mapping data array.
259  * @retval A struct containing the information from the input flags
260  *         word converted to the bits / bit fields used in L1 and
261  *         L2 page table entries.
262  */
arm_mmu_convert_attr_flags(uint32_t attrs)263 static struct arm_mmu_perms_attrs arm_mmu_convert_attr_flags(uint32_t attrs)
264 {
265 	struct arm_mmu_perms_attrs perms_attrs = {0};
266 
267 	__ASSERT(((attrs & MT_MASK) > 0),
268 		 "Cannot convert attrs word to PTE control bits: no "
269 		 "memory type specified");
270 	__ASSERT(!((attrs & MPERM_W) && !(attrs & MPERM_R)),
271 		 "attrs must not define write permission without read "
272 		 "permission");
273 	__ASSERT(!((attrs & MPERM_W) && (attrs & MPERM_X)),
274 		 "attrs must not define executable memory with write "
275 		 "permission");
276 
277 	/*
278 	 * The translation of the memory type / permissions / attributes
279 	 * flags in the attrs word to the TEX, C, B, S and AP bits of the
280 	 * target PTE is based on the reference manual:
281 	 * TEX, C, B, S: Table B3-10, chap. B3.8.2, p. B3-1363f.
282 	 * AP          : Table B3-6,  chap. B3.7.1, p. B3-1353.
283 	 * Device / strongly ordered memory is always assigned to a domain
284 	 * other than that used for normal memory. Assuming that userspace
285 	 * support utilizing the MMU is eventually implemented, a single
286 	 * modification of the DACR register when entering/leaving unprivi-
287 	 * leged mode could be used in order to enable/disable all device
288 	 * memory access without having to modify any PTs/PTEs.
289 	 */
290 
291 	if (attrs & MT_STRONGLY_ORDERED) {
292 		/* Strongly ordered is always shareable, S bit is ignored */
293 		perms_attrs.tex        = 0;
294 		perms_attrs.cacheable  = 0;
295 		perms_attrs.bufferable = 0;
296 		perms_attrs.shared     = 0;
297 		perms_attrs.domain     = ARM_MMU_DOMAIN_DEVICE;
298 	} else if (attrs & MT_DEVICE) {
299 		/*
300 		 * Shareability of device memory is determined by TEX, C, B.
301 		 * The S bit is ignored. C is always 0 for device memory.
302 		 */
303 		perms_attrs.shared    = 0;
304 		perms_attrs.cacheable = 0;
305 		perms_attrs.domain    = ARM_MMU_DOMAIN_DEVICE;
306 
307 		/*
308 		 * ARM deprecates the marking of Device memory with a
309 		 * shareability attribute other than Outer Shareable
310 		 * or Shareable. This means ARM strongly recommends
311 		 * that Device memory is never assigned a shareability
312 		 * attribute of Non-shareable or Inner Shareable.
313 		 */
314 		perms_attrs.tex        = 0;
315 		perms_attrs.bufferable = 1;
316 	} else if (attrs & MT_NORMAL) {
317 		/*
318 		 * TEX[2] is always 1. TEX[1:0] contain the outer cache attri-
319 		 * butes encoding, C and B contain the inner cache attributes
320 		 * encoding.
321 		 */
322 		perms_attrs.tex |= ARM_MMU_TEX2_CACHEABLE_MEMORY;
323 		perms_attrs.domain = ARM_MMU_DOMAIN_OS;
324 
325 		/* For normal memory, shareability depends on the S bit */
326 		if (attrs & MATTR_SHARED) {
327 			perms_attrs.shared = 1;
328 		}
329 
330 		if (attrs & MATTR_CACHE_OUTER_WB_WA) {
331 			perms_attrs.tex |= ARM_MMU_TEX_CACHE_ATTRS_WB_WA;
332 		} else if (attrs & MATTR_CACHE_OUTER_WT_nWA) {
333 			perms_attrs.tex |= ARM_MMU_TEX_CACHE_ATTRS_WT_nWA;
334 		} else if (attrs & MATTR_CACHE_OUTER_WB_nWA) {
335 			perms_attrs.tex |= ARM_MMU_TEX_CACHE_ATTRS_WB_nWA;
336 		}
337 
338 		if (attrs & MATTR_CACHE_INNER_WB_WA) {
339 			perms_attrs.cacheable  = ARM_MMU_C_CACHE_ATTRS_WB_WA;
340 			perms_attrs.bufferable = ARM_MMU_B_CACHE_ATTRS_WB_WA;
341 		} else if (attrs & MATTR_CACHE_INNER_WT_nWA) {
342 			perms_attrs.cacheable  = ARM_MMU_C_CACHE_ATTRS_WT_nWA;
343 			perms_attrs.bufferable = ARM_MMU_B_CACHE_ATTRS_WT_nWA;
344 		} else if (attrs & MATTR_CACHE_INNER_WB_nWA) {
345 			perms_attrs.cacheable  = ARM_MMU_C_CACHE_ATTRS_WB_nWA;
346 			perms_attrs.bufferable = ARM_MMU_B_CACHE_ATTRS_WB_nWA;
347 		}
348 	}
349 
350 	if (attrs & MATTR_NON_SECURE) {
351 		perms_attrs.non_sec = 1;
352 	}
353 	if (attrs & MATTR_NON_GLOBAL) {
354 		perms_attrs.not_global = 1;
355 	}
356 
357 	/*
358 	 * Up next is the consideration of the case that a PTE shall be configured
359 	 * for a page that shall not be accessible at all (e.g. guard pages), and
360 	 * therefore has neither read nor write permissions. In the AP[2:1] access
361 	 * permission specification model, the only way to indicate this is to
362 	 * actually mask out the PTE's identifier bits, as otherwise, read permission
363 	 * is always granted for any valid PTE, it can't be revoked explicitly,
364 	 * unlike the write permission.
365 	 */
366 	if (!((attrs & MPERM_R) || (attrs & MPERM_W))) {
367 		perms_attrs.id_mask = 0x0;
368 	} else {
369 		perms_attrs.id_mask = 0x3;
370 	}
371 	if (!(attrs & MPERM_W)) {
372 		perms_attrs.acc_perms |= ARM_MMU_PERMS_AP2_DISABLE_WR;
373 	}
374 	if (attrs & MPERM_UNPRIVILEGED) {
375 		perms_attrs.acc_perms |= ARM_MMU_PERMS_AP1_ENABLE_PL0;
376 	}
377 	if (!(attrs & MPERM_X)) {
378 		perms_attrs.exec_never = 1;
379 	}
380 
381 	return perms_attrs;
382 }
383 
384 /**
385  * @brief Maps a 1 MB memory range via a level 1 page table entry
386  * Maps a 1 MB memory range using a level 1 page table entry of type
387  * 'section'. This type of entry saves a level 2 page table, but has
388  * two pre-conditions: the memory area to be mapped must contain at
389  * least 1 MB of contiguous memory, starting at an address with suit-
390  * able alignment. This mapping method should only be used for map-
391  * pings for which it is unlikely that the attributes of those mappings
392  * will mappings will change at run-time (e.g. code sections will al-
393  * ways be read-only and executable). Should the case occur that the
394  * permissions or attributes of a subset of a 1 MB section entry shall
395  * be re-configured at run-time, a L1 section entry will be broken
396  * down into 4k segments using a L2 table with identical attributes
397  * before any modifications are performed for the subset of the affec-
398  * ted 1 MB range. This comes with an undeterministic performance
399  * penalty at the time of re-configuration, therefore, any mappings
400  * for which L1 section entries are a valid option, shall be marked in
401  * their declaration with the MATTR_MAY_MAP_L1_SECTION flag.
402  *
403  * @param va 32-bit target virtual address to be mapped.
404  * @param pa 32-bit physical address to be mapped.
405  * @param perms_attrs Permission and attribute bits in the format
406  *                    used in the MMU's L1 page table entries.
407  */
arm_mmu_l1_map_section(uint32_t va,uint32_t pa,struct arm_mmu_perms_attrs perms_attrs)408 static void arm_mmu_l1_map_section(uint32_t va, uint32_t pa,
409 				   struct arm_mmu_perms_attrs perms_attrs)
410 {
411 	uint32_t l1_index = (va >> ARM_MMU_PTE_L1_INDEX_PA_SHIFT) &
412 			    ARM_MMU_PTE_L1_INDEX_MASK;
413 
414 	__ASSERT(l1_page_table.entries[l1_index].undefined.id == ARM_MMU_PTE_ID_INVALID,
415 		 "Unexpected non-zero L1 PTE ID %u for VA 0x%08X / PA 0x%08X",
416 		 l1_page_table.entries[l1_index].undefined.id,
417 		 va, pa);
418 
419 	l1_page_table.entries[l1_index].l1_section_1m.id =
420 		(ARM_MMU_PTE_ID_SECTION & perms_attrs.id_mask);
421 	l1_page_table.entries[l1_index].l1_section_1m.bufferable = perms_attrs.bufferable;
422 	l1_page_table.entries[l1_index].l1_section_1m.cacheable = perms_attrs.cacheable;
423 	l1_page_table.entries[l1_index].l1_section_1m.exec_never = perms_attrs.exec_never;
424 	l1_page_table.entries[l1_index].l1_section_1m.domain = perms_attrs.domain;
425 	l1_page_table.entries[l1_index].l1_section_1m.impl_def = 0;
426 	l1_page_table.entries[l1_index].l1_section_1m.acc_perms10 =
427 		((perms_attrs.acc_perms & 0x1) << 1) | 0x1;
428 	l1_page_table.entries[l1_index].l1_section_1m.tex = perms_attrs.tex;
429 	l1_page_table.entries[l1_index].l1_section_1m.acc_perms2 =
430 		(perms_attrs.acc_perms >> 1) & 0x1;
431 	l1_page_table.entries[l1_index].l1_section_1m.shared = perms_attrs.shared;
432 	l1_page_table.entries[l1_index].l1_section_1m.not_global = perms_attrs.not_global;
433 	l1_page_table.entries[l1_index].l1_section_1m.zero = 0;
434 	l1_page_table.entries[l1_index].l1_section_1m.non_sec = perms_attrs.non_sec;
435 	l1_page_table.entries[l1_index].l1_section_1m.base_address =
436 		(pa >> ARM_MMU_PTE_L1_INDEX_PA_SHIFT);
437 }
438 
439 /**
440  * @brief Converts a L1 1 MB section mapping to a full L2 table
441  * When this function is called, something has happened that shouldn't
442  * happen for the sake of run-time performance and determinism: the
443  * attributes and/or permissions of a subset of a 1 MB memory range
444  * currently represented by a level 1 page table entry of type 'section'
445  * shall be modified so that they differ from the rest of the 1 MB
446  * range's attributes/permissions. Therefore, the single L1 page table
447  * entry has to be broken down to the full 256 4k-wide entries of a
448  * L2 page table with identical properties so that afterwards, the
449  * modification of the subset can be performed with a 4k granularity.
450  * The risk at this point is that all L2 tables are already in use,
451  * which will result in an assertion failure in the first contained
452  * #arm_mmu_l2_map_page() call.
453  * @warning While the conversion is being performed, interrupts are
454  *          locked globally and the MMU is disabled (the required
455  *          Zephyr code & data are still accessible in this state as
456  *          those are identity mapped). Expect non-deterministic be-
457  *          haviour / interrupt latencies while the conversion is in
458  *          progress!
459  *
460  * @param va 32-bit virtual address within the 1 MB range that shall
461  *           be converted from L1 1 MB section mapping to L2 4 kB page
462  *           mappings.
463  * @param l2_page_table Pointer to an empty L2 page table allocated
464  *                      for the purpose of replacing the L1 section
465  *                      mapping.
466  */
arm_mmu_remap_l1_section_to_l2_table(uint32_t va,struct arm_mmu_l2_page_table * l2_page_table)467 static void arm_mmu_remap_l1_section_to_l2_table(uint32_t va,
468 						 struct arm_mmu_l2_page_table *l2_page_table)
469 {
470 	struct arm_mmu_perms_attrs perms_attrs = {0};
471 	uint32_t l1_index = (va >> ARM_MMU_PTE_L1_INDEX_PA_SHIFT) &
472 			    ARM_MMU_PTE_L1_INDEX_MASK;
473 	uint32_t rem_size = MB(1);
474 	uint32_t reg_val;
475 	int lock_key;
476 
477 	/*
478 	 * Extract the permissions and attributes from the current 1 MB section entry.
479 	 * This data will be carried over to the resulting L2 page table.
480 	 */
481 
482 	perms_attrs.acc_perms = (l1_page_table.entries[l1_index].l1_section_1m.acc_perms2 << 1) |
483 		((l1_page_table.entries[l1_index].l1_section_1m.acc_perms10 >> 1) & 0x1);
484 	perms_attrs.bufferable = l1_page_table.entries[l1_index].l1_section_1m.bufferable;
485 	perms_attrs.cacheable = l1_page_table.entries[l1_index].l1_section_1m.cacheable;
486 	perms_attrs.domain = l1_page_table.entries[l1_index].l1_section_1m.domain;
487 	perms_attrs.id_mask = (l1_page_table.entries[l1_index].l1_section_1m.id ==
488 			      ARM_MMU_PTE_ID_INVALID) ? 0x0 : 0x3;
489 	perms_attrs.not_global = l1_page_table.entries[l1_index].l1_section_1m.not_global;
490 	perms_attrs.non_sec = l1_page_table.entries[l1_index].l1_section_1m.non_sec;
491 	perms_attrs.shared = l1_page_table.entries[l1_index].l1_section_1m.shared;
492 	perms_attrs.tex = l1_page_table.entries[l1_index].l1_section_1m.tex;
493 	perms_attrs.exec_never = l1_page_table.entries[l1_index].l1_section_1m.exec_never;
494 
495 	/*
496 	 * Disable interrupts - no interrupts shall occur before the L2 table has
497 	 * been set up in place of the former L1 section entry.
498 	 */
499 
500 	lock_key = arch_irq_lock();
501 
502 	/*
503 	 * Disable the MMU. The L1 PTE array and the L2 PT array may actually be
504 	 * covered by the L1 PTE we're about to replace, so access to this data
505 	 * must remain functional during the entire remap process. Yet, the only
506 	 * memory areas for which L1 1 MB section entries are even considered are
507 	 * those belonging to the Zephyr image. Those areas are *always* identity
508 	 * mapped, so the MMU can be turned off and the relevant data will still
509 	 * be available.
510 	 */
511 
512 	reg_val = __get_SCTLR();
513 	__set_SCTLR(reg_val & (~ARM_MMU_SCTLR_MMU_ENABLE_BIT));
514 
515 	/*
516 	 * Clear the entire L1 PTE & re-configure it as a L2 PT reference
517 	 * -> already sets the correct values for: zero0, zero1, impl_def.
518 	 */
519 	l1_page_table.entries[l1_index].word = 0;
520 
521 	l1_page_table.entries[l1_index].l2_page_table_ref.id = ARM_MMU_PTE_ID_L2_PT;
522 	l1_page_table.entries[l1_index].l2_page_table_ref.domain = perms_attrs.domain;
523 	l1_page_table.entries[l1_index].l2_page_table_ref.non_sec = perms_attrs.non_sec;
524 	l1_page_table.entries[l1_index].l2_page_table_ref.l2_page_table_address =
525 		(((uint32_t)l2_page_table >> ARM_MMU_PT_L2_ADDR_SHIFT) &
526 		ARM_MMU_PT_L2_ADDR_MASK);
527 
528 	/* Align the target VA to the base address of the section we're converting */
529 	va &= ~(MB(1) - 1);
530 	while (rem_size > 0) {
531 		arm_mmu_l2_map_page(va, va, perms_attrs);
532 		rem_size -= KB(4);
533 		va += KB(4);
534 	}
535 
536 	/* Remap complete, re-enable the MMU, unlock the interrupts. */
537 
538 	invalidate_tlb_all();
539 	__set_SCTLR(reg_val);
540 
541 	arch_irq_unlock(lock_key);
542 }
543 
544 /**
545  * @brief Maps a 4 kB memory page using a L2 page table entry
546  * Maps a single 4 kB page of memory from the specified physical
547  * address to the specified virtual address, using the provided
548  * attributes and permissions which have already been converted
549  * from the system's format provided to arch_mem_map() to the
550  * bits / bit masks used in the L2 page table entry.
551  *
552  * @param va 32-bit target virtual address.
553  * @param pa 32-bit physical address.
554  * @param perms_attrs Permission and attribute bits in the format
555  *                    used in the MMU's L2 page table entries.
556  */
arm_mmu_l2_map_page(uint32_t va,uint32_t pa,struct arm_mmu_perms_attrs perms_attrs)557 static void arm_mmu_l2_map_page(uint32_t va, uint32_t pa,
558 				struct arm_mmu_perms_attrs perms_attrs)
559 {
560 	struct arm_mmu_l2_page_table *l2_page_table = NULL;
561 	uint32_t l1_index = (va >> ARM_MMU_PTE_L1_INDEX_PA_SHIFT) &
562 			    ARM_MMU_PTE_L1_INDEX_MASK;
563 	uint32_t l2_index = (va >> ARM_MMU_PTE_L2_INDEX_PA_SHIFT) &
564 			    ARM_MMU_PTE_L2_INDEX_MASK;
565 
566 	/*
567 	 * Use the calculated L1 index in order to determine if a L2 page
568 	 * table is required in order to complete the current mapping.
569 	 * -> See below for an explanation of the possible scenarios.
570 	 */
571 
572 	if (l1_page_table.entries[l1_index].undefined.id == ARM_MMU_PTE_ID_INVALID ||
573 	    (l1_page_table.entries[l1_index].undefined.id & ARM_MMU_PTE_ID_SECTION) != 0) {
574 		l2_page_table = arm_mmu_assign_l2_table(pa);
575 		__ASSERT(l2_page_table != NULL,
576 			 "Unexpected L2 page table NULL pointer for VA 0x%08X",
577 			 va);
578 	}
579 
580 	/*
581 	 * Check what is currently present at the corresponding L1 table entry.
582 	 * The following scenarios are possible:
583 	 * 1) The L1 PTE's ID bits are zero, as is the rest of the entry.
584 	 *    In this case, the L1 PTE is currently unused. A new L2 PT to
585 	 *    refer to in this entry has already been allocated above.
586 	 * 2) The L1 PTE's ID bits indicate a L2 PT reference entry (01).
587 	 *    The corresponding L2 PT's address will be resolved using this
588 	 *    entry.
589 	 * 3) The L1 PTE's ID bits may or may not be zero, and the rest of
590 	 *    the descriptor contains some non-zero data. This always indicates
591 	 *    an existing 1 MB section entry in this place. Checking only the
592 	 *    ID bits wouldn't be enough, as the only way to indicate a section
593 	 *    with neither R nor W permissions is to set the ID bits to 00 in
594 	 *    the AP[2:1] permissions model. As we're now about to map a single
595 	 *    page overlapping with the 1 MB section, the section has to be
596 	 *    converted into a L2 table. Afterwards, the current page mapping
597 	 *    can be added/modified.
598 	 */
599 
600 	if (l1_page_table.entries[l1_index].word == 0) {
601 		/* The matching L1 PT entry is currently unused */
602 		l1_page_table.entries[l1_index].l2_page_table_ref.id = ARM_MMU_PTE_ID_L2_PT;
603 		l1_page_table.entries[l1_index].l2_page_table_ref.zero0 = 0;
604 		l1_page_table.entries[l1_index].l2_page_table_ref.zero1 = 0;
605 		l1_page_table.entries[l1_index].l2_page_table_ref.impl_def = 0;
606 		l1_page_table.entries[l1_index].l2_page_table_ref.domain = 0; /* TODO */
607 		l1_page_table.entries[l1_index].l2_page_table_ref.non_sec =
608 			perms_attrs.non_sec;
609 		l1_page_table.entries[l1_index].l2_page_table_ref.l2_page_table_address =
610 			(((uint32_t)l2_page_table >> ARM_MMU_PT_L2_ADDR_SHIFT) &
611 			ARM_MMU_PT_L2_ADDR_MASK);
612 	} else if (l1_page_table.entries[l1_index].undefined.id == ARM_MMU_PTE_ID_L2_PT) {
613 		/* The matching L1 PT entry already points to a L2 PT */
614 		l2_page_table = (struct arm_mmu_l2_page_table *)
615 				((l1_page_table.entries[l1_index].word &
616 				(ARM_MMU_PT_L2_ADDR_MASK << ARM_MMU_PT_L2_ADDR_SHIFT)));
617 		/*
618 		 * The only configuration bit contained in the L2 PT entry is the
619 		 * NS bit. Set it according to the attributes passed to this function,
620 		 * warn if there is a mismatch between the current page's NS attribute
621 		 * value and the value currently contained in the L2 PT entry.
622 		 */
623 		if (l1_page_table.entries[l1_index].l2_page_table_ref.non_sec !=
624 		    perms_attrs.non_sec) {
625 			LOG_WRN("NS bit mismatch in L2 PT reference at L1 index [%u], "
626 				"re-configuring from %u to %u",
627 				l1_index,
628 				l1_page_table.entries[l1_index].l2_page_table_ref.non_sec,
629 				perms_attrs.non_sec);
630 			l1_page_table.entries[l1_index].l2_page_table_ref.non_sec =
631 				perms_attrs.non_sec;
632 		}
633 	} else if (l1_page_table.entries[l1_index].undefined.reserved != 0) {
634 		/*
635 		 * The matching L1 PT entry currently holds a 1 MB section entry
636 		 * in order to save a L2 table (as it's neither completely blank
637 		 * nor a L2 PT reference), but now we have to map an overlapping
638 		 * 4 kB page, so the section entry must be converted to a L2 table
639 		 * first before the individual L2 entry for the page to be mapped is
640 		 * accessed. A blank L2 PT has already been assigned above.
641 		 */
642 		arm_mmu_remap_l1_section_to_l2_table(va, l2_page_table);
643 	}
644 
645 	/*
646 	 * If the matching L2 PTE is blank, increment the number of used entries
647 	 * in the L2 table. If the L2 PTE already contains some data, we're re-
648 	 * placing the entry's data instead, the used entry count remains unchanged.
649 	 * Once again, checking the ID bits might be misleading if the PTE declares
650 	 * a page which has neither R nor W permissions.
651 	 */
652 	if (l2_page_table->entries[l2_index].word == 0) {
653 		arm_mmu_inc_l2_table_entries(l2_page_table);
654 	}
655 
656 	l2_page_table->entries[l2_index].l2_page_4k.id =
657 		(ARM_MMU_PTE_ID_SMALL_PAGE & perms_attrs.id_mask);
658 	l2_page_table->entries[l2_index].l2_page_4k.id |= perms_attrs.exec_never; /* XN in [0] */
659 	l2_page_table->entries[l2_index].l2_page_4k.bufferable = perms_attrs.bufferable;
660 	l2_page_table->entries[l2_index].l2_page_4k.cacheable = perms_attrs.cacheable;
661 	l2_page_table->entries[l2_index].l2_page_4k.acc_perms10 =
662 		((perms_attrs.acc_perms & 0x1) << 1) | 0x1;
663 	l2_page_table->entries[l2_index].l2_page_4k.tex = perms_attrs.tex;
664 	l2_page_table->entries[l2_index].l2_page_4k.acc_perms2 =
665 		((perms_attrs.acc_perms >> 1) & 0x1);
666 	l2_page_table->entries[l2_index].l2_page_4k.shared = perms_attrs.shared;
667 	l2_page_table->entries[l2_index].l2_page_4k.not_global = perms_attrs.not_global;
668 	l2_page_table->entries[l2_index].l2_page_4k.pa_base =
669 		((pa >> ARM_MMU_PTE_L2_SMALL_PAGE_ADDR_SHIFT) &
670 		ARM_MMU_PTE_L2_SMALL_PAGE_ADDR_MASK);
671 }
672 
673 /**
674  * @brief Unmaps a 4 kB memory page by clearing its L2 page table entry
675  * Unmaps a single 4 kB page of memory from the specified virtual
676  * address by clearing its respective L2 page table entry.
677  *
678  * @param va 32-bit virtual address to be unmapped.
679  */
arm_mmu_l2_unmap_page(uint32_t va)680 static void arm_mmu_l2_unmap_page(uint32_t va)
681 {
682 	struct arm_mmu_l2_page_table *l2_page_table;
683 	uint32_t l1_index = (va >> ARM_MMU_PTE_L1_INDEX_PA_SHIFT) &
684 			    ARM_MMU_PTE_L1_INDEX_MASK;
685 	uint32_t l2_index = (va >> ARM_MMU_PTE_L2_INDEX_PA_SHIFT) &
686 			    ARM_MMU_PTE_L2_INDEX_MASK;
687 
688 	if (l1_page_table.entries[l1_index].undefined.id != ARM_MMU_PTE_ID_L2_PT) {
689 		/*
690 		 * No L2 PT currently exists for the given VA - this should be
691 		 * tolerated without an error, just as in the case that while
692 		 * a L2 PT exists, the corresponding PTE is blank - see explanation
693 		 * below, the same applies here.
694 		 */
695 		return;
696 	}
697 
698 	l2_page_table = (struct arm_mmu_l2_page_table *)
699 			((l1_page_table.entries[l1_index].word &
700 			(ARM_MMU_PT_L2_ADDR_MASK << ARM_MMU_PT_L2_ADDR_SHIFT)));
701 
702 	if (l2_page_table->entries[l2_index].word == 0) {
703 		/*
704 		 * We're supposed to unmap a page at the given VA, but there currently
705 		 * isn't anything mapped at this address, the L2 PTE is blank.
706 		 * -> This is normal if a memory area is being mapped via k_mem_map,
707 		 * which contains two calls to arch_mem_unmap (which effectively end up
708 		 * here) in order to unmap the leading and trailing guard pages.
709 		 * Therefore, it has to be expected that unmap calls are made for unmapped
710 		 * memory which hasn't been in use before.
711 		 * -> Just return, don't decrement the entry counter of the corresponding
712 		 * L2 page table, as we're not actually clearing any PTEs.
713 		 */
714 		return;
715 	}
716 
717 	if ((l2_page_table->entries[l2_index].undefined.id & ARM_MMU_PTE_ID_SMALL_PAGE) !=
718 			ARM_MMU_PTE_ID_SMALL_PAGE) {
719 		LOG_ERR("Cannot unmap virtual memory at 0x%08X: invalid "
720 			"page table entry type in level 2 page table at "
721 			"L1 index [%u], L2 index [%u]", va, l1_index, l2_index);
722 		return;
723 	}
724 
725 	l2_page_table->entries[l2_index].word = 0;
726 
727 	arm_mmu_dec_l2_table_entries(l2_page_table);
728 }
729 
730 /**
731  * @brief MMU boot-time initialization function
732  * Initializes the MMU at boot time. Sets up the page tables and
733  * applies any specified memory mappings for either the different
734  * sections of the Zephyr binary image, or for device memory as
735  * specified at the SoC level.
736  *
737  * @retval Always 0, errors are handled by assertions.
738  */
z_arm_mmu_init(void)739 int z_arm_mmu_init(void)
740 {
741 	uint32_t mem_range;
742 	uint32_t pa;
743 	uint32_t va;
744 	uint32_t attrs;
745 	uint32_t pt_attrs = 0;
746 	uint32_t rem_size;
747 	uint32_t reg_val = 0;
748 	struct arm_mmu_perms_attrs perms_attrs;
749 
750 	__ASSERT(KB(4) == CONFIG_MMU_PAGE_SIZE,
751 		 "MMU_PAGE_SIZE value %u is invalid, only 4 kB pages are supported\n",
752 		 CONFIG_MMU_PAGE_SIZE);
753 
754 	/* Set up the memory regions pre-defined by the image */
755 	for (mem_range = 0; mem_range < ARRAY_SIZE(mmu_zephyr_ranges); mem_range++) {
756 		pa          = mmu_zephyr_ranges[mem_range].start;
757 		rem_size    = mmu_zephyr_ranges[mem_range].end - pa;
758 		attrs       = mmu_zephyr_ranges[mem_range].attrs;
759 		perms_attrs = arm_mmu_convert_attr_flags(attrs);
760 
761 		/*
762 		 * Check if the L1 page table is within the region currently
763 		 * being mapped. If so, store the permissions and attributes
764 		 * of the current section. This information is required when
765 		 * writing to the TTBR0 register.
766 		 */
767 		if (((uint32_t)&l1_page_table >= pa) &&
768 				((uint32_t)&l1_page_table < (pa + rem_size))) {
769 			pt_attrs = attrs;
770 		}
771 
772 		while (rem_size > 0) {
773 			if (rem_size >= MB(1) && (pa & 0xFFFFF) == 0 &&
774 			    (attrs & MATTR_MAY_MAP_L1_SECTION)) {
775 				/*
776 				 * Remaining area size > 1 MB & matching alignment
777 				 * -> map a 1 MB section instead of individual 4 kB
778 				 * pages with identical configuration.
779 				 */
780 				arm_mmu_l1_map_section(pa, pa, perms_attrs);
781 				rem_size -= MB(1);
782 				pa += MB(1);
783 			} else {
784 				arm_mmu_l2_map_page(pa, pa, perms_attrs);
785 				rem_size -= (rem_size >= KB(4)) ? KB(4) : rem_size;
786 				pa += KB(4);
787 			}
788 		}
789 	}
790 
791 	/* Set up the memory regions defined at the SoC level */
792 	for (mem_range = 0; mem_range < mmu_config.num_regions; mem_range++) {
793 		pa          = (uint32_t)(mmu_config.mmu_regions[mem_range].base_pa);
794 		va          = (uint32_t)(mmu_config.mmu_regions[mem_range].base_va);
795 		rem_size    = (uint32_t)(mmu_config.mmu_regions[mem_range].size);
796 		attrs       = mmu_config.mmu_regions[mem_range].attrs;
797 		perms_attrs = arm_mmu_convert_attr_flags(attrs);
798 
799 		while (rem_size > 0) {
800 			arm_mmu_l2_map_page(va, pa, perms_attrs);
801 			rem_size -= (rem_size >= KB(4)) ? KB(4) : rem_size;
802 			va += KB(4);
803 			pa += KB(4);
804 		}
805 	}
806 
807 	/* Clear TTBR1 */
808 	__asm__ volatile("mcr p15, 0, %0, c2, c0, 1" : : "r"(reg_val));
809 
810 	/* Write TTBCR: EAE, security not yet relevant, N[2:0] = 0 */
811 	__asm__ volatile("mcr p15, 0, %0, c2, c0, 2"
812 			     : : "r"(reg_val));
813 
814 	/* Write TTBR0 */
815 	reg_val = ((uint32_t)&l1_page_table.entries[0] & ~0x3FFF);
816 
817 	/*
818 	 * Set IRGN, RGN, S in TTBR0 based on the configuration of the
819 	 * memory area the actual page tables are located in.
820 	 */
821 	if (pt_attrs & MATTR_SHARED) {
822 		reg_val |= ARM_MMU_TTBR_SHAREABLE_BIT;
823 	}
824 
825 	if (pt_attrs & MATTR_CACHE_OUTER_WB_WA) {
826 		reg_val |= (ARM_MMU_TTBR_RGN_OUTER_WB_WA_CACHEABLE <<
827 			    ARM_MMU_TTBR_RGN_SHIFT);
828 	} else if (pt_attrs & MATTR_CACHE_OUTER_WT_nWA) {
829 		reg_val |= (ARM_MMU_TTBR_RGN_OUTER_WT_CACHEABLE <<
830 			    ARM_MMU_TTBR_RGN_SHIFT);
831 	} else if (pt_attrs & MATTR_CACHE_OUTER_WB_nWA) {
832 		reg_val |= (ARM_MMU_TTBR_RGN_OUTER_WB_nWA_CACHEABLE <<
833 			    ARM_MMU_TTBR_RGN_SHIFT);
834 	}
835 
836 	if (pt_attrs & MATTR_CACHE_INNER_WB_WA) {
837 		reg_val |= ARM_MMU_TTBR_IRGN0_BIT_MP_EXT_ONLY;
838 	} else if (pt_attrs & MATTR_CACHE_INNER_WT_nWA) {
839 		reg_val |= ARM_MMU_TTBR_IRGN1_BIT_MP_EXT_ONLY;
840 	} else if (pt_attrs & MATTR_CACHE_INNER_WB_nWA) {
841 		reg_val |= ARM_MMU_TTBR_IRGN0_BIT_MP_EXT_ONLY;
842 		reg_val |= ARM_MMU_TTBR_IRGN1_BIT_MP_EXT_ONLY;
843 	}
844 
845 	__set_TTBR0(reg_val);
846 
847 	/* Write DACR -> all domains to client = 01b. */
848 	reg_val = ARM_MMU_DACR_ALL_DOMAINS_CLIENT;
849 	__set_DACR(reg_val);
850 
851 	invalidate_tlb_all();
852 
853 	/* Enable the MMU and Cache in SCTLR */
854 	reg_val  = __get_SCTLR();
855 	reg_val |= ARM_MMU_SCTLR_AFE_BIT;
856 	reg_val |= ARM_MMU_SCTLR_ICACHE_ENABLE_BIT;
857 	reg_val |= ARM_MMU_SCTLR_DCACHE_ENABLE_BIT;
858 	reg_val |= ARM_MMU_SCTLR_MMU_ENABLE_BIT;
859 	__set_SCTLR(reg_val);
860 
861 	return 0;
862 }
863 
864 /**
865  * @brief ARMv7-specific implementation of memory mapping at run-time
866  * Maps memory according to the parameters provided by the caller
867  * at run-time.
868  *
869  * @param virt 32-bit target virtual address.
870  * @param phys 32-bit physical address.
871  * @param size Size (in bytes) of the memory area to map.
872  * @param flags Memory attributes & permissions. Comp. K_MEM_...
873  *              flags in kernel/mm.h.
874  * @retval 0 on success, -EINVAL if an invalid parameter is detected.
875  */
__arch_mem_map(void * virt,uintptr_t phys,size_t size,uint32_t flags)876 static int __arch_mem_map(void *virt, uintptr_t phys, size_t size, uint32_t flags)
877 {
878 	uint32_t va = (uint32_t)virt;
879 	uint32_t pa = (uint32_t)phys;
880 	uint32_t rem_size = (uint32_t)size;
881 	uint32_t conv_flags = MPERM_R;
882 	struct arm_mmu_perms_attrs perms_attrs;
883 	int key;
884 
885 	if (size == 0) {
886 		LOG_ERR("Cannot map physical memory at 0x%08X: invalid "
887 			"zero size", (uint32_t)phys);
888 		return -EINVAL;
889 	}
890 
891 	switch (flags & K_MEM_CACHE_MASK) {
892 
893 	case K_MEM_CACHE_NONE:
894 	default:
895 		conv_flags |= MT_DEVICE;
896 		break;
897 	case K_MEM_CACHE_WB:
898 		conv_flags |= MT_NORMAL;
899 		conv_flags |= MATTR_SHARED;
900 		if (flags & K_MEM_PERM_RW) {
901 			conv_flags |= MATTR_CACHE_OUTER_WB_WA;
902 			conv_flags |= MATTR_CACHE_INNER_WB_WA;
903 		} else {
904 			conv_flags |= MATTR_CACHE_OUTER_WB_nWA;
905 			conv_flags |= MATTR_CACHE_INNER_WB_nWA;
906 		}
907 		break;
908 	case K_MEM_CACHE_WT:
909 		conv_flags |= MT_NORMAL;
910 		conv_flags |= MATTR_SHARED;
911 		conv_flags |= MATTR_CACHE_OUTER_WT_nWA;
912 		conv_flags |= MATTR_CACHE_INNER_WT_nWA;
913 		break;
914 
915 	}
916 
917 	if (flags & K_MEM_PERM_RW) {
918 		conv_flags |= MPERM_W;
919 	}
920 	if (flags & K_MEM_PERM_EXEC) {
921 		conv_flags |= MPERM_X;
922 	}
923 
924 	perms_attrs = arm_mmu_convert_attr_flags(conv_flags);
925 
926 	key = arch_irq_lock();
927 
928 	while (rem_size > 0) {
929 		arm_mmu_l2_map_page(va, pa, perms_attrs);
930 		rem_size -= (rem_size >= KB(4)) ? KB(4) : rem_size;
931 		va += KB(4);
932 		pa += KB(4);
933 	}
934 
935 	arch_irq_unlock(key);
936 
937 	return 0;
938 }
939 
940 /**
941  * @brief Arch-specific wrapper function for memory mapping at run-time
942  * Maps memory according to the parameters provided by the caller
943  * at run-time. This function wraps the ARMv7 MMU specific implementation
944  * #__arch_mem_map() for the upper layers of the memory management.
945  * If the map operation fails, a kernel panic will be triggered.
946  *
947  * @param virt 32-bit target virtual address.
948  * @param phys 32-bit physical address.
949  * @param size Size (in bytes) of the memory area to map.
950  * @param flags Memory attributes & permissions. Comp. K_MEM_...
951  *              flags in kernel/mm.h.
952  */
arch_mem_map(void * virt,uintptr_t phys,size_t size,uint32_t flags)953 void arch_mem_map(void *virt, uintptr_t phys, size_t size, uint32_t flags)
954 {
955 	int ret = __arch_mem_map(virt, phys, size, flags);
956 
957 	if (ret) {
958 		LOG_ERR("__arch_mem_map() returned %d", ret);
959 		k_panic();
960 	} else {
961 		invalidate_tlb_all();
962 	}
963 }
964 
965 /**
966  * @brief ARMv7-specific implementation of memory unmapping at run-time
967  * Unmaps memory according to the parameters provided by the caller
968  * at run-time.
969  *
970  * @param addr 32-bit virtual address to unmap.
971  * @param size Size (in bytes) of the memory area to unmap.
972  * @retval 0 on success, -EINVAL if an invalid parameter is detected.
973  */
__arch_mem_unmap(void * addr,size_t size)974 static int __arch_mem_unmap(void *addr, size_t size)
975 {
976 	uint32_t va = (uint32_t)addr;
977 	uint32_t rem_size = (uint32_t)size;
978 	int key;
979 
980 	if (addr == NULL) {
981 		LOG_ERR("Cannot unmap virtual memory: invalid NULL pointer");
982 		return -EINVAL;
983 	}
984 
985 	if (size == 0) {
986 		LOG_ERR("Cannot unmap virtual memory at 0x%08X: invalid "
987 			"zero size", (uint32_t)addr);
988 		return -EINVAL;
989 	}
990 
991 	key = arch_irq_lock();
992 
993 	while (rem_size > 0) {
994 		arm_mmu_l2_unmap_page(va);
995 		rem_size -= (rem_size >= KB(4)) ? KB(4) : rem_size;
996 		va += KB(4);
997 	}
998 
999 	arch_irq_unlock(key);
1000 
1001 	return 0;
1002 }
1003 
1004 /**
1005  * @brief Arch-specific wrapper function for memory unmapping at run-time
1006  * Unmaps memory according to the parameters provided by the caller
1007  * at run-time. This function wraps the ARMv7 MMU specific implementation
1008  * #__arch_mem_unmap() for the upper layers of the memory management.
1009  *
1010  * @param addr 32-bit virtual address to unmap.
1011  * @param size Size (in bytes) of the memory area to unmap.
1012  */
arch_mem_unmap(void * addr,size_t size)1013 void arch_mem_unmap(void *addr, size_t size)
1014 {
1015 	int ret = __arch_mem_unmap(addr, size);
1016 
1017 	if (ret) {
1018 		LOG_ERR("__arch_mem_unmap() returned %d", ret);
1019 	} else {
1020 		invalidate_tlb_all();
1021 	}
1022 }
1023 
1024 /**
1025  * @brief Arch-specific virtual-to-physical address resolver function
1026  * ARMv7 MMU specific implementation of a function that resolves the
1027  * physical address corresponding to the given virtual address.
1028  *
1029  * @param virt 32-bit target virtual address to resolve.
1030  * @param phys Pointer to a variable to which the resolved physical
1031  *             address will be written. May be NULL if this information
1032  *             is not actually required by the caller.
1033  * @retval 0 if the physical address corresponding to the specified
1034  *         virtual address could be resolved successfully, -EFAULT
1035  *         if the specified virtual address is not currently mapped.
1036  */
arch_page_phys_get(void * virt,uintptr_t * phys)1037 int arch_page_phys_get(void *virt, uintptr_t *phys)
1038 {
1039 	uint32_t l1_index = ((uint32_t)virt >> ARM_MMU_PTE_L1_INDEX_PA_SHIFT) &
1040 			    ARM_MMU_PTE_L1_INDEX_MASK;
1041 	uint32_t l2_index = ((uint32_t)virt >> ARM_MMU_PTE_L2_INDEX_PA_SHIFT) &
1042 			    ARM_MMU_PTE_L2_INDEX_MASK;
1043 	struct arm_mmu_l2_page_table *l2_page_table;
1044 
1045 	uint32_t pa_resolved = 0;
1046 	uint32_t l2_pt_resolved;
1047 
1048 	int rc = 0;
1049 	int key;
1050 
1051 	key = arch_irq_lock();
1052 
1053 	if (l1_page_table.entries[l1_index].undefined.id == ARM_MMU_PTE_ID_SECTION) {
1054 		/*
1055 		 * If the virtual address points to a level 1 PTE whose ID bits
1056 		 * identify it as a 1 MB section entry rather than a level 2 PT
1057 		 * entry, the given VA belongs to a memory region used by the
1058 		 * Zephyr image itself - it is only for those static regions that
1059 		 * L1 Section entries are used to save L2 tables if a sufficient-
1060 		 * ly large block of memory is specified. The memory regions be-
1061 		 * longing to the Zephyr image are identity mapped -> just return
1062 		 * the value of the VA as the value of the PA.
1063 		 */
1064 		pa_resolved = (uint32_t)virt;
1065 	} else if (l1_page_table.entries[l1_index].undefined.id == ARM_MMU_PTE_ID_L2_PT) {
1066 		/*
1067 		 * The VA points to a level 1 PTE which re-directs to a level 2
1068 		 * PT. -> Assemble the level 2 PT pointer and resolve the PA for
1069 		 * the specified VA from there.
1070 		 */
1071 		l2_pt_resolved =
1072 			l1_page_table.entries[l1_index].l2_page_table_ref.l2_page_table_address;
1073 		l2_pt_resolved <<= ARM_MMU_PT_L2_ADDR_SHIFT;
1074 		l2_page_table = (struct arm_mmu_l2_page_table *)l2_pt_resolved;
1075 
1076 		/*
1077 		 * Check if the PTE for the specified VA is actually in use before
1078 		 * assembling & returning the corresponding PA. k_mem_unmap will
1079 		 * call this function for the leading & trailing guard pages when
1080 		 * unmapping a VA. As those guard pages were explicitly unmapped
1081 		 * when the VA was originally mapped, their L2 PTEs will be empty.
1082 		 * In that case, the return code of this function must not be 0.
1083 		 */
1084 		if (l2_page_table->entries[l2_index].word == 0) {
1085 			rc = -EFAULT;
1086 		}
1087 
1088 		pa_resolved = l2_page_table->entries[l2_index].l2_page_4k.pa_base;
1089 		pa_resolved <<= ARM_MMU_PTE_L2_SMALL_PAGE_ADDR_SHIFT;
1090 		pa_resolved |= ((uint32_t)virt & ARM_MMU_ADDR_BELOW_PAGE_GRAN_MASK);
1091 	} else {
1092 		/* The level 1 PTE is invalid -> the specified VA is not mapped */
1093 		rc = -EFAULT;
1094 	}
1095 
1096 	arch_irq_unlock(key);
1097 
1098 	if (phys) {
1099 		*phys = (uintptr_t)pa_resolved;
1100 	}
1101 	return rc;
1102 }
1103