1 /**************************************************************************//**
2  * @file     mmu_armv8a.c
3  * @brief    CMSIS Cortex-Axx MMU Source file
4  * @version  V1.0.0
5  * @date     20. october 2021
6  ******************************************************************************/
7 /*
8  * Copyright 2019 Broadcom
9  * The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries.
10  * Copyright (c) 2021 Arm Limited. All rights reserved.
11  * Copyright 2021 NXP
12  *
13  * SPDX-License-Identifier: Apache-2.0
14  *
15  * Licensed under the Apache License, Version 2.0 (the License); you may
16  * not use this file except in compliance with the License.
17  * You may obtain a copy of the License at
18  *
19  * www.apache.org/licenses/LICENSE-2.0
20  *
21  * Unless required by applicable law or agreed to in writing, software
22  * distributed under the License is distributed on an AS IS BASIS, WITHOUT
23  * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
24  * See the License for the specific language governing permissions and
25  * limitations under the License.
26  */
27 
28 #include <stdbool.h>
29 #include <stdint.h>
30 
31 #include "core_ca53.h"
32 #include "mmu_armv8a.h"
33 
34 
35 /*******************************************************************************
36  * Definitions
37  ******************************************************************************/
38 
39 #define __ASSERT(op, fmt, ...) \
40   do { \
41     if (!(op)) { \
42       while(1) \
43         /* wait here */; \
44     } \
45   } while (0)
46 
47 #ifndef MAX
48 #define MAX(a, b) (((a) > (b)) ? (a) : (b))
49 #endif
50 
51 #ifndef KB
52 #define KB(x)                  ((x) << 10)
53 #endif
54 
55 #ifndef CONFIG_MMU_PAGE_SIZE
56 #define CONFIG_MMU_PAGE_SIZE                   4096
57 #endif
58 #ifndef CONFIG_MAX_XLAT_TABLES
59 #define CONFIG_MAX_XLAT_TABLES                 32
60 #endif
61 #ifndef CONFIG_ARM64_PA_BITS
62 #define CONFIG_ARM64_PA_BITS                   48
63 #endif
64 #ifndef CONFIG_ARM64_VA_BITS
65 #define CONFIG_ARM64_VA_BITS                   48
66 #endif
67 
68 #define LOG_ERR(fmt, ...)                      (void)(fmt)
69 #define ARG_UNUSED(x)                          (void)(x)
70 
71 #define BITS_PER_LONG                          (__CHAR_BIT__ * __SIZEOF_LONG__)
72 #define GENMASK(h, l) \
73   (((~0UL) - (1UL << (l)) + 1) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
74 
75 /*******************************************************************************
76  * from zephyr:/arch/arm/core/aarch64/mmu/arm_mmu.h:
77  ******************************************************************************/
78 
79 /* Set below flag to get debug prints */
80 //#define MMU_DEBUG_PRINTS                       0
81 
82 #if defined (MMU_DEBUG_PRINTS) && (MMU_DEBUG_PRINTS == 1)
83 /* To dump page table entries while filling them, set DUMP_PTE macro */
84 #define DUMP_PTE                               0
85   #define MMU_DEBUG(fmt, ...)                    PRINTF(fmt, ##__VA_ARGS__)
86 #else
87   #define MMU_DEBUG(...)
88 #endif
89 
90 /*
91  * 48-bit address with 4KB granule size:
92  *
93  * +------------+------------+------------+------------+-----------+
94  * | VA [47:39] | VA [38:30] | VA [29:21] | VA [20:12] | VA [11:0] |
95  * +---------------------------------------------------------------+
96  * |     L0     |     L1     |     L2     |     L3     | block off |
97  * +------------+------------+------------+------------+-----------+
98  */
99 
100 /* Only 4K granule is supported */
101 #define PAGE_SIZE_SHIFT                        12U
102 
103 /* 48-bit VA address */
104 #define VA_SIZE_SHIFT_MAX                      48U
105 
106 /* Maximum 4 XLAT table levels (L0 - L3) */
107 #define XLAT_LAST_LEVEL                        3U
108 
109 /* The VA shift of L3 depends on the granule size */
110 #define L3_XLAT_VA_SIZE_SHIFT                  PAGE_SIZE_SHIFT
111 
112 /* Number of VA bits to assign to each table (9 bits) */
113 #define Ln_XLAT_VA_SIZE_SHIFT                  (PAGE_SIZE_SHIFT - 3)
114 
115 /* Starting bit in the VA address for each level */
116 #define L2_XLAT_VA_SIZE_SHIFT                  (L3_XLAT_VA_SIZE_SHIFT + Ln_XLAT_VA_SIZE_SHIFT)
117 #define L1_XLAT_VA_SIZE_SHIFT                  (L2_XLAT_VA_SIZE_SHIFT + Ln_XLAT_VA_SIZE_SHIFT)
118 #define L0_XLAT_VA_SIZE_SHIFT                  (L1_XLAT_VA_SIZE_SHIFT + Ln_XLAT_VA_SIZE_SHIFT)
119 
120 #define LEVEL_TO_VA_SIZE_SHIFT(level)			\
121 	(PAGE_SIZE_SHIFT + (Ln_XLAT_VA_SIZE_SHIFT *	\
122 	(XLAT_LAST_LEVEL - (level))))
123 
124 /* Number of entries for each table (512) */
125 #define Ln_XLAT_NUM_ENTRIES                    ((1U << PAGE_SIZE_SHIFT) / 8U)
126 
127 /* Virtual Address Index within a given translation table level */
128 #define XLAT_TABLE_VA_IDX(va_addr, level) \
129 	((va_addr >> LEVEL_TO_VA_SIZE_SHIFT(level)) & (Ln_XLAT_NUM_ENTRIES - 1))
130 
131 /*
132  * Calculate the initial translation table level from CONFIG_ARM64_VA_BITS
133  * For a 4 KB page size:
134  *
135  * (va_bits <= 20)       - base level 3
136  * (21 <= va_bits <= 29) - base level 2
137  * (30 <= va_bits <= 38) - base level 1
138  * (39 <= va_bits <= 47) - base level 0
139  */
140 #define GET_BASE_XLAT_LEVEL(va_bits)				\
141 	 ((va_bits > L0_XLAT_VA_SIZE_SHIFT) ? 0U		\
142 	: (va_bits > L1_XLAT_VA_SIZE_SHIFT) ? 1U		\
143 	: (va_bits > L2_XLAT_VA_SIZE_SHIFT) ? 2U : 3U)
144 
145 /* Level for the base XLAT */
146 #define BASE_XLAT_LEVEL	GET_BASE_XLAT_LEVEL(CONFIG_ARM64_VA_BITS)
147 
148 #if (CONFIG_ARM64_PA_BITS == 48)
149 #define TCR_PS_BITS TCR_PS_BITS_256TB
150 #elif (CONFIG_ARM64_PA_BITS == 44)
151 #define TCR_PS_BITS TCR_PS_BITS_16TB
152 #elif (CONFIG_ARM64_PA_BITS == 42)
153 #define TCR_PS_BITS TCR_PS_BITS_4TB
154 #elif (CONFIG_ARM64_PA_BITS == 40)
155 #define TCR_PS_BITS TCR_PS_BITS_1TB
156 #elif (CONFIG_ARM64_PA_BITS == 36)
157 #define TCR_PS_BITS TCR_PS_BITS_64GB
158 #else
159 #define TCR_PS_BITS TCR_PS_BITS_4GB
160 #endif
161 
162 /* Upper and lower attributes mask for page/block descriptor */
163 #define DESC_ATTRS_UPPER_MASK                  GENMASK(63, 51)
164 #define DESC_ATTRS_LOWER_MASK                  GENMASK(11, 2)
165 
166 #define DESC_ATTRS_MASK		(DESC_ATTRS_UPPER_MASK | DESC_ATTRS_LOWER_MASK)
167 
168 /******************************************************************************/
169 
170 static uint64_t xlat_tables[CONFIG_MAX_XLAT_TABLES * Ln_XLAT_NUM_ENTRIES]
171 		__aligned(Ln_XLAT_NUM_ENTRIES * sizeof(uint64_t));
172 static uint16_t xlat_use_count[CONFIG_MAX_XLAT_TABLES];
173 
174 /* Returns a reference to a free table */
new_table(void)175 static uint64_t *new_table(void)
176 {
177 	unsigned int i;
178 
179 	/* Look for a free table. */
180 	for (i = 0; i < CONFIG_MAX_XLAT_TABLES; i++) {
181 		if (xlat_use_count[i] == 0) {
182 			xlat_use_count[i] = 1;
183 			return &xlat_tables[i * Ln_XLAT_NUM_ENTRIES];
184 		}
185 	}
186 
187 	LOG_ERR("CONFIG_MAX_XLAT_TABLES, too small");
188 	return NULL;
189 }
190 
table_index(uint64_t * pte)191 static inline unsigned int table_index(uint64_t *pte)
192 {
193 	unsigned int i = (pte - xlat_tables) / Ln_XLAT_NUM_ENTRIES;
194 
195 	__ASSERT(i < CONFIG_MAX_XLAT_TABLES, "table %p out of range", pte);
196 	return i;
197 }
198 
199 /* Makes a table free for reuse. */
free_table(uint64_t * table)200 static void free_table(uint64_t *table)
201 {
202 	unsigned int i = table_index(table);
203 
204 	MMU_DEBUG("freeing table [%d]%p\r\n", i, table);
205 	__ASSERT(xlat_use_count[i] == 1, "table still in use");
206 	xlat_use_count[i] = 0;
207 }
208 
209 /* Adjusts usage count and returns current count. */
table_usage(uint64_t * table,int adjustment)210 static int table_usage(uint64_t *table, int adjustment)
211 {
212 	unsigned int i = table_index(table);
213 
214 	xlat_use_count[i] += adjustment;
215 	__ASSERT(xlat_use_count[i] > 0, "usage count underflow");
216 	return xlat_use_count[i];
217 }
218 
is_table_unused(uint64_t * table)219 static inline bool is_table_unused(uint64_t *table)
220 {
221 	return table_usage(table, 0) == 1;
222 }
223 
is_free_desc(uint64_t desc)224 static inline bool is_free_desc(uint64_t desc)
225 {
226 	return (desc & PTE_DESC_TYPE_MASK) == PTE_INVALID_DESC;
227 }
228 
is_table_desc(uint64_t desc,unsigned int level)229 static inline bool is_table_desc(uint64_t desc, unsigned int level)
230 {
231 	return level != XLAT_LAST_LEVEL &&
232 	       (desc & PTE_DESC_TYPE_MASK) == PTE_TABLE_DESC;
233 }
234 
is_block_desc(uint64_t desc)235 static inline bool is_block_desc(uint64_t desc)
236 {
237 	return (desc & PTE_DESC_TYPE_MASK) == PTE_BLOCK_DESC;
238 }
239 
pte_desc_table(uint64_t desc)240 static inline uint64_t *pte_desc_table(uint64_t desc)
241 {
242 	uint64_t address = desc & GENMASK(47, PAGE_SIZE_SHIFT);
243 
244 	return (uint64_t *)address;
245 }
246 
is_desc_superset(uint64_t desc1,uint64_t desc2,unsigned int level)247 static inline bool is_desc_superset(uint64_t desc1, uint64_t desc2,
248 				    unsigned int level)
249 {
250 	uint64_t mask = DESC_ATTRS_MASK | GENMASK(47, LEVEL_TO_VA_SIZE_SHIFT(level));
251 
252 	return (desc1 & mask) == (desc2 & mask);
253 }
254 
255 #if DUMP_PTE
debug_show_pte(uint64_t * pte,unsigned int level)256 static void debug_show_pte(uint64_t *pte, unsigned int level)
257 {
258 	MMU_DEBUG("%.*s", level * 2, ". . . ");
259 	MMU_DEBUG("[%d]%p: ", table_index(pte), pte);
260 
261 	if (is_free_desc(*pte)) {
262 		MMU_DEBUG("---\r\n");
263 		return;
264 	}
265 
266 	if (is_table_desc(*pte, level)) {
267 		uint64_t *table = pte_desc_table(*pte);
268 
269 		MMU_DEBUG("[Table] [%d]%p\r\n", table_index(table), table);
270 		return;
271 	}
272 
273 	if (is_block_desc(*pte)) {
274 		MMU_DEBUG("[Block] ");
275 	} else {
276 		MMU_DEBUG("[Page] ");
277 	}
278 
279 	uint8_t mem_type = (*pte >> 2) & MT_TYPE_MASK;
280 
281 	MMU_DEBUG((mem_type == MT_NORMAL) ? "MEM" :
282 		  ((mem_type == MT_NORMAL_NC) ? "NC" : "DEV"));
283 	MMU_DEBUG((*pte & PTE_BLOCK_DESC_AP_RO) ? "-RO" : "-RW");
284 	MMU_DEBUG((*pte & PTE_BLOCK_DESC_NS) ? "-NS" : "-S");
285 	MMU_DEBUG((*pte & PTE_BLOCK_DESC_AP_ELx) ? "-ELx" : "-ELh");
286 	MMU_DEBUG((*pte & PTE_BLOCK_DESC_PXN) ? "-PXN" : "-PX");
287 	MMU_DEBUG((*pte & PTE_BLOCK_DESC_UXN) ? "-UXN" : "-UX");
288 	MMU_DEBUG("\r\n");
289 }
290 #else
debug_show_pte(uint64_t * pte,unsigned int level)291 static inline void debug_show_pte(uint64_t *pte, unsigned int level) { }
292 #endif
293 
set_pte_table_desc(uint64_t * pte,uint64_t * table,unsigned int level)294 static void set_pte_table_desc(uint64_t *pte, uint64_t *table, unsigned int level)
295 {
296 	/* Point pte to new table */
297 	*pte = PTE_TABLE_DESC | (uint64_t)table;
298 	debug_show_pte(pte, level);
299 }
300 
set_pte_block_desc(uint64_t * pte,uint64_t desc,unsigned int level)301 static void set_pte_block_desc(uint64_t *pte, uint64_t desc, unsigned int level)
302 {
303 	if (desc) {
304 		desc |= (level == XLAT_LAST_LEVEL) ? PTE_PAGE_DESC : PTE_BLOCK_DESC;
305 	}
306 	*pte = desc;
307 	debug_show_pte(pte, level);
308 }
309 
expand_to_table(uint64_t * pte,unsigned int level)310 static uint64_t *expand_to_table(uint64_t *pte, unsigned int level)
311 {
312 	uint64_t *table;
313 
314 	__ASSERT(level < XLAT_LAST_LEVEL, "can't expand last level");
315 
316 	table = new_table();
317 	if (!table) {
318 		return NULL;
319 	}
320 
321 	if (!is_free_desc(*pte)) {
322 		/*
323 		 * If entry at current level was already populated
324 		 * then we need to reflect that in the new table.
325 		 */
326 		uint64_t desc = *pte;
327 		unsigned int i, stride_shift;
328 
329 		MMU_DEBUG("expanding PTE 0x%016llx into table [%d]%p\r\n",
330 			  desc, table_index(table), table);
331 		__ASSERT(is_block_desc(desc), "");
332 
333 		if (level + 1 == XLAT_LAST_LEVEL) {
334 			desc |= PTE_PAGE_DESC;
335 		}
336 
337 		stride_shift = LEVEL_TO_VA_SIZE_SHIFT(level + 1);
338 		for (i = 0; i < Ln_XLAT_NUM_ENTRIES; i++) {
339 			table[i] = desc | (i << stride_shift);
340 		}
341 		table_usage(table, Ln_XLAT_NUM_ENTRIES);
342 	} else {
343 		/*
344 		 * Adjust usage count for parent table's entry
345 		 * that will no longer be free.
346 		 */
347 		table_usage(pte, 1);
348 	}
349 
350 	/* Link the new table in place of the pte it replaces */
351 	set_pte_table_desc(pte, table, level);
352 	table_usage(table, 1);
353 
354 	return table;
355 }
356 
set_mapping(struct ARM_MMU_ptables * ptables,uintptr_t virt,size_t size,uint64_t desc,bool may_overwrite)357 static int set_mapping(struct ARM_MMU_ptables *ptables,
358 		       uintptr_t virt, size_t size,
359 		       uint64_t desc, bool may_overwrite)
360 {
361 	uint64_t *pte, *ptes[XLAT_LAST_LEVEL + 1];
362 	uint64_t level_size;
363 	uint64_t *table = ptables->base_xlat_table;
364 	unsigned int level = BASE_XLAT_LEVEL;
365 	int ret = 0;
366 
367 	while (size) {
368 		__ASSERT(level <= XLAT_LAST_LEVEL,
369 			 "max translation table level exceeded\r\n");
370 
371 		/* Locate PTE for given virtual address and page table level */
372 		pte = &table[XLAT_TABLE_VA_IDX(virt, level)];
373 		ptes[level] = pte;
374 
375 		if (is_table_desc(*pte, level)) {
376 			/* Move to the next translation table level */
377 			level++;
378 			table = pte_desc_table(*pte);
379 			continue;
380 		}
381 
382 		if (!may_overwrite && !is_free_desc(*pte)) {
383 			/* the entry is already allocated */
384 			LOG_ERR("entry already in use: "
385 				"level %d pte %p *pte 0x%016llx",
386 				level, pte, *pte);
387 			ret = -1;
388 			break;
389 		}
390 
391 		level_size = 1ULL << LEVEL_TO_VA_SIZE_SHIFT(level);
392 
393 		if (is_desc_superset(*pte, desc, level)) {
394 			/* This block already covers our range */
395 			level_size -= (virt & (level_size - 1));
396 			if (level_size > size) {
397 				level_size = size;
398 			}
399 			goto move_on;
400 		}
401 
402 		if ((size < level_size) || (virt & (level_size - 1))) {
403 			/* Range doesn't fit, create subtable */
404 			table = expand_to_table(pte, level);
405 			if (!table) {
406 				ret = -1;
407 				break;
408 			}
409 			level++;
410 			continue;
411 		}
412 
413 		/* Adjust usage count for corresponding table */
414 		if (is_free_desc(*pte)) {
415 			table_usage(pte, 1);
416 		}
417 		if (!desc) {
418 			table_usage(pte, -1);
419 		}
420 		/* Create (or erase) block/page descriptor */
421 		set_pte_block_desc(pte, desc, level);
422 
423 		/* recursively free unused tables if any */
424 		while (level != BASE_XLAT_LEVEL &&
425 		       is_table_unused(pte)) {
426 			free_table(pte);
427 			pte = ptes[--level];
428 			set_pte_block_desc(pte, 0, level);
429 			table_usage(pte, -1);
430 		}
431 
432 move_on:
433 		virt += level_size;
434 		desc += desc ? level_size : 0;
435 		size -= level_size;
436 
437 		/* Range is mapped, start again for next range */
438 		table = ptables->base_xlat_table;
439 		level = BASE_XLAT_LEVEL;
440 	}
441 
442 	return ret;
443 }
444 
get_region_desc(uint32_t attrs)445 static uint64_t get_region_desc(uint32_t attrs)
446 {
447 	unsigned int mem_type;
448 	uint64_t desc = 0;
449 
450 	/* NS bit for security memory access from secure state */
451 	desc |= (attrs & MT_NS) ? PTE_BLOCK_DESC_NS : 0;
452 
453 	/*
454 	 * AP bits for EL0 / ELh Data access permission
455 	 *
456 	 *   AP[2:1]   ELh  EL0
457 	 * +--------------------+
458 	 *     00      RW   NA
459 	 *     01      RW   RW
460 	 *     10      RO   NA
461 	 *     11      RO   RO
462 	 */
463 
464 	/* AP bits for Data access permission */
465 	desc |= (attrs & MT_RW) ? PTE_BLOCK_DESC_AP_RW : PTE_BLOCK_DESC_AP_RO;
466 
467 	/* Mirror permissions to EL0 */
468 	desc |= (attrs & MT_RW_AP_ELx) ?
469 		 PTE_BLOCK_DESC_AP_ELx : PTE_BLOCK_DESC_AP_EL_HIGHER;
470 
471 	/* the access flag */
472 	desc |= PTE_BLOCK_DESC_AF;
473 
474 	/* memory attribute index field */
475 	mem_type = MT_TYPE(attrs);
476 	desc |= PTE_BLOCK_DESC_MEMTYPE(mem_type);
477 
478 	switch (mem_type) {
479 	case MT_DEVICE_nGnRnE:
480 	case MT_DEVICE_nGnRE:
481 	case MT_DEVICE_GRE:
482 		/* Access to Device memory and non-cacheable memory are coherent
483 		 * for all observers in the system and are treated as
484 		 * Outer shareable, so, for these 2 types of memory,
485 		 * it is not strictly needed to set shareability field
486 		 */
487 		desc |= PTE_BLOCK_DESC_OUTER_SHARE;
488 		/* Map device memory as execute-never */
489 		desc |= PTE_BLOCK_DESC_PXN;
490 		desc |= PTE_BLOCK_DESC_UXN;
491 		break;
492 	case MT_NORMAL_NC:
493 	case MT_NORMAL:
494 		/* Make Normal RW memory as execute never */
495 		if ((attrs & MT_RW) || (attrs & MT_P_EXECUTE_NEVER))
496 			desc |= PTE_BLOCK_DESC_PXN;
497 
498 		if (((attrs & MT_RW) && (attrs & MT_RW_AP_ELx)) ||
499 		     (attrs & MT_U_EXECUTE_NEVER))
500 			desc |= PTE_BLOCK_DESC_UXN;
501 
502 		if (mem_type == MT_NORMAL)
503 			desc |= PTE_BLOCK_DESC_INNER_SHARE;
504 		else
505 			desc |= PTE_BLOCK_DESC_OUTER_SHARE;
506 		break;
507 	default:
508 		break;
509 	}
510 
511 	return desc;
512 }
513 
add_map(struct ARM_MMU_ptables * ptables,const char * name,uintptr_t phys,uintptr_t virt,size_t size,uint32_t attrs)514 static int add_map(struct ARM_MMU_ptables *ptables, const char *name,
515 		   uintptr_t phys, uintptr_t virt, size_t size, uint32_t attrs)
516 {
517 	uint64_t desc = get_region_desc(attrs);
518 	bool may_overwrite = !(attrs & MT_NO_OVERWRITE);
519 
520 	MMU_DEBUG("mmap [%s]: virt %lx phys %lx size %lx attr %llx\r\n",
521 		  name, virt, phys, size, desc);
522 	__ASSERT(((virt | phys | size) & (CONFIG_MMU_PAGE_SIZE - 1)) == 0,
523 		 "address/size are not page aligned\r\n");
524 	desc |= phys;
525 	return set_mapping(ptables, virt, size, desc, may_overwrite);
526 }
527 
528 /* OS execution regions with appropriate attributes */
529 
add_ARM_MMU_flat_range(struct ARM_MMU_ptables * ptables,const struct ARM_MMU_flat_range * range,uint32_t extra_flags)530 static inline void add_ARM_MMU_flat_range(struct ARM_MMU_ptables *ptables,
531 					  const struct ARM_MMU_flat_range *range,
532 					  uint32_t extra_flags)
533 {
534 	uintptr_t address = (uintptr_t)range->start;
535 	size_t size = (uintptr_t)range->end - address;
536 
537 	if (size) {
538 		add_map(ptables, range->name, address, address,
539 			size, range->attrs | extra_flags);
540 	}
541 }
542 
add_ARM_MMU_region(struct ARM_MMU_ptables * ptables,const struct ARM_MMU_region * region,uint32_t extra_flags)543 static inline void add_ARM_MMU_region(struct ARM_MMU_ptables *ptables,
544 				      const struct ARM_MMU_region *region,
545 				      uint32_t extra_flags)
546 {
547 	if (region->size || region->attrs) {
548 		add_map(ptables, region->name, region->base_pa, region->base_va,
549 			region->size, region->attrs | extra_flags);
550 	}
551 }
552 
setup_page_tables(const struct ARM_MMU_config * MMU_config,struct ARM_MMU_ptables * ptables)553 static void setup_page_tables(const struct ARM_MMU_config *MMU_config,
554 		struct ARM_MMU_ptables *ptables)
555 {
556 	unsigned int index;
557 	const struct ARM_MMU_flat_range *range;
558 	const struct ARM_MMU_region *region;
559 	uintptr_t max_va = 0, max_pa = 0;
560 
561 	MMU_DEBUG("xlat tables:\r\n");
562 	for (index = 0; index < CONFIG_MAX_XLAT_TABLES; index++)
563 		MMU_DEBUG("%d: %p\r\n", index, xlat_tables + index * Ln_XLAT_NUM_ENTRIES);
564 
565 	for (index = 0; index < MMU_config->num_regions; index++) {
566 		region = &MMU_config->mmu_regions[index];
567 		max_va = MAX(max_va, region->base_va + region->size);
568 		max_pa = MAX(max_pa, region->base_pa + region->size);
569 	}
570 
571 	__ASSERT(max_va <= (1ULL << CONFIG_ARM64_VA_BITS),
572 		 "Maximum VA not supported\r\n");
573 	__ASSERT(max_pa <= (1ULL << CONFIG_ARM64_PA_BITS),
574 		 "Maximum PA not supported\r\n");
575 
576 	/* setup translation table for OS execution regions */
577 	for (index = 0; index < MMU_config->num_os_ranges; index++) {
578 		range = &MMU_config->mmu_os_ranges[index];
579 		add_ARM_MMU_flat_range(ptables, range, 0);
580 	}
581 
582 	/*
583 	 * Create translation tables for user provided platform regions.
584 	 * Those must not conflict with our default mapping.
585 	 */
586 	for (index = 0; index < MMU_config->num_regions; index++) {
587 		region = &MMU_config->mmu_regions[index];
588 		add_ARM_MMU_region(ptables, region, MT_NO_OVERWRITE);
589 	}
590 
591 	ARM_MMU_InvalidateTLB();
592 }
593 
594 /* Translation table control register settings */
get_tcr(int el)595 static uint64_t get_tcr(int el)
596 {
597 	uint64_t tcr;
598 	uint64_t va_bits = CONFIG_ARM64_VA_BITS;
599 	uint64_t tcr_ps_bits;
600 
601 	tcr_ps_bits = TCR_PS_BITS;
602 
603 	if (el == 1) {
604 		tcr = (tcr_ps_bits << TCR_EL1_IPS_SHIFT);
605 		/*
606 		 * TCR_EL1.EPD1: Disable translation table walk for addresses
607 		 * that are translated using TTBR1_EL1.
608 		 */
609 		tcr |= TCR_EPD1_DISABLE;
610 	} else
611 		tcr = (tcr_ps_bits << TCR_EL3_PS_SHIFT);
612 
613 	tcr |= TCR_T0SZ(va_bits);
614 	/*
615 	 * Translation table walk is cacheable, inner/outer WBWA
616 	 */
617 	tcr |= TCR_TG0_4K | TCR_ORGN_WBWA | TCR_IRGN_WBWA;
618 
619 	return tcr;
620 }
621 
enable_mmu_el1(struct ARM_MMU_ptables * ptables,unsigned int flags)622 static void enable_mmu_el1(struct ARM_MMU_ptables *ptables, unsigned int flags)
623 {
624 	ARG_UNUSED(flags);
625 	uint64_t val;
626 
627 	/* Set MAIR, TCR and TBBR registers */
628 	__MSR(MAIR_EL1, MEMORY_ATTRIBUTES);
629 	__MSR(TCR_EL1, get_tcr(1));
630 	__MSR(TTBR0_EL1, (uint64_t)ptables->base_xlat_table);
631 
632 	/* Ensure these changes are seen before MMU is enabled */
633 	__ISB();
634 
635 	/* Enable the MMU and caches */
636 	__MRS(SCTLR_EL1, &val);
637 	__MSR(SCTLR_EL1, val | SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_I_BIT);
638 
639 	/* Ensure the MMU enable takes effect immediately */
640 	__ISB();
641 
642 	MMU_DEBUG("MMU enabled with caches\r\n");
643 }
644 
645 /* ARM MMU Driver Initial Setup */
646 
647 static struct ARM_MMU_ptables kernel_ptables;
648 
649 /*
650  * @brief MMU default configuration
651  *
652  * This function provides the default configuration mechanism for the Memory
653  * Management Unit (MMU).
654  */
ARM_MMU_Initialize(const struct ARM_MMU_config * MMU_config,bool is_primary_core)655 void ARM_MMU_Initialize(const struct ARM_MMU_config *MMU_config,
656 			bool is_primary_core)
657 {
658 	unsigned int flags = 0;
659 	uint64_t val;
660 
661 	__ASSERT(CONFIG_MMU_PAGE_SIZE == KB(4),
662 		 "Only 4K page size is supported\r\n");
663 
664 	__MRS(CURRENTEL, &val);
665 	__ASSERT(GET_EL(val) == MODE_EL1,
666 		 "Exception level not EL1, MMU not enabled!\r\n");
667 
668 	/* Ensure that MMU is already not enabled */
669 	__MRS(SCTLR_EL1, &val);
670 	__ASSERT((val & SCTLR_M_BIT) == 0, "MMU is already enabled\r\n");
671 
672 	/*
673 	 * Only booting core setup up the page tables.
674 	 */
675 	if (is_primary_core) {
676 		kernel_ptables.base_xlat_table = new_table();
677 		setup_page_tables(MMU_config, &kernel_ptables);
678 	}
679 
680 	/* currently only EL1 is supported */
681 	enable_mmu_el1(&kernel_ptables, flags);
682 }
683 
684 /*
685  * @brief MMU mapping setup
686  *
687  * This function sets a new MMU region mapping
688  */
ARM_MMU_AddMap(const char * name,uintptr_t phys,uintptr_t virt,size_t size,uint32_t attrs)689 int ARM_MMU_AddMap(const char *name, uintptr_t phys, uintptr_t virt, size_t size, uint32_t attrs)
690 {
691 	int ret = -1;
692 
693 	if ((virt + size) > (1ULL << CONFIG_ARM64_VA_BITS))
694 		goto exit;
695 
696 	if ((phys + size) > (1ULL << CONFIG_ARM64_PA_BITS))
697 		goto exit;
698 
699 	if (size) {
700 		ret = add_map(&kernel_ptables, name, phys, virt, size, attrs);
701 
702 		ARM_MMU_InvalidateTLB();
703 	}
704 
705 exit:
706 	return ret;
707 }
708 
709 /*
710  * @brief MMU TLB invalidation
711  *
712  * This function invalidates the entire unified TLB
713  */
ARM_MMU_InvalidateTLB(void)714 void ARM_MMU_InvalidateTLB(void)
715 {
716 	__DSB();
717 	__ASM volatile("tlbi vmalle1");
718 	__DSB();
719 	__ISB();
720 }
721