1 /*
2  * Copyright (c) 2017 Linaro Limited.
3  * Copyright (c) 2021 Arm Limited (or its affiliates). All rights reserved.
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 #include <zephyr/device.h>
9 #include <zephyr/init.h>
10 #include <zephyr/kernel.h>
11 #include <kernel_arch_func.h>
12 #include <zephyr/arch/arm64/mm.h>
13 #include <zephyr/linker/linker-defs.h>
14 #include <zephyr/logging/log.h>
15 #include <zephyr/sys/check.h>
16 #include <zephyr/sys/barrier.h>
17 #include <zephyr/cache.h>
18 #include <kernel_internal.h>
19 #include <zephyr/mem_mgmt/mem_attr.h>
20 #include <zephyr/dt-bindings/memory-attr/memory-attr-arm.h>
21 
22 LOG_MODULE_REGISTER(mpu, CONFIG_MPU_LOG_LEVEL);
23 
24 #define NODE_HAS_PROP_AND_OR(node_id, prop) \
25 	DT_NODE_HAS_PROP(node_id, prop) ||
26 
27 BUILD_ASSERT((DT_FOREACH_STATUS_OKAY_NODE_VARGS(
28 	      NODE_HAS_PROP_AND_OR, zephyr_memory_region_mpu) false) == false,
29 	      "`zephyr,memory-region-mpu` was deprecated in favor of `zephyr,memory-attr`");
30 
31 #define MPU_DYNAMIC_REGION_AREAS_NUM	3
32 
33 #if defined(CONFIG_USERSPACE) || defined(CONFIG_ARM64_STACK_PROTECTION)
34 static struct dynamic_region_info
35 	      sys_dyn_regions[CONFIG_MP_MAX_NUM_CPUS][MPU_DYNAMIC_REGION_AREAS_NUM];
36 static int sys_dyn_regions_num[CONFIG_MP_MAX_NUM_CPUS];
37 
38 static void dynamic_regions_init(void);
39 static int dynamic_areas_init(uintptr_t start, size_t size);
40 static int flush_dynamic_regions_to_mpu(struct dynamic_region_info *dyn_regions,
41 					uint8_t region_num);
42 
43 #if defined(CONFIG_USERSPACE)
44 #define MPU_DYNAMIC_REGIONS_AREA_START ((uintptr_t)&_app_smem_start)
45 #else
46 #define MPU_DYNAMIC_REGIONS_AREA_START ((uintptr_t)&__kernel_ram_start)
47 #endif
48 
49 #define MPU_DYNAMIC_REGIONS_AREA_SIZE  ((size_t)((uintptr_t)&__kernel_ram_end - \
50 	MPU_DYNAMIC_REGIONS_AREA_START))
51 #endif
52 
53 /*
54  * AArch64 Memory Model Feature Register 0
55  * Provides information about the implemented memory model and memory
56  * management support in AArch64 state.
57  * See Arm Architecture Reference Manual Supplement
58  *  Armv8, for Armv8-R AArch64 architecture profile, G1.3.7
59  *
60  * ID_AA64MMFR0_MSA_FRAC, bits[55:52]
61  * ID_AA64MMFR0_MSA, bits [51:48]
62  */
63 #define ID_AA64MMFR0_MSA_msk		(0xFFUL << 48U)
64 #define ID_AA64MMFR0_PMSA_EN		(0x1FUL << 48U)
65 #define ID_AA64MMFR0_PMSA_VMSA_EN	(0x2FUL << 48U)
66 
67 /*
68  * Global status variable holding the number of HW MPU region indices, which
69  * have been reserved by the MPU driver to program the static (fixed) memory
70  * regions.
71  */
72 static uint8_t static_regions_num;
73 
74 /* Get the number of supported MPU regions. */
get_num_regions(void)75 static ALWAYS_INLINE uint8_t get_num_regions(void)
76 {
77 	uint64_t type;
78 
79 	type = read_mpuir_el1();
80 	type = type & MPU_IR_REGION_Msk;
81 
82 	return (uint8_t)type;
83 }
84 
85 /* ARM Core MPU Driver API Implementation for ARM MPU */
86 
87 /**
88  * @brief enable the MPU
89  *
90  * On the SMP system, The function that enables MPU can not insert stack protector
91  * code because the canary values read by the secondary CPUs before enabling MPU
92  * and after enabling it are not equal due to cache coherence issues.
93  */
arm_core_mpu_enable(void)94 FUNC_NO_STACK_PROTECTOR void arm_core_mpu_enable(void)
95 {
96 	uint64_t val;
97 
98 	val = read_sctlr_el1();
99 	val |= SCTLR_M_BIT;
100 	write_sctlr_el1(val);
101 	barrier_dsync_fence_full();
102 	barrier_isync_fence_full();
103 }
104 
105 /**
106  * @brief disable the MPU
107  */
arm_core_mpu_disable(void)108 void arm_core_mpu_disable(void)
109 {
110 	uint64_t val;
111 
112 	/* Force any outstanding transfers to complete before disabling MPU */
113 	barrier_dmem_fence_full();
114 
115 	val = read_sctlr_el1();
116 	val &= ~SCTLR_M_BIT;
117 	write_sctlr_el1(val);
118 	barrier_dsync_fence_full();
119 	barrier_isync_fence_full();
120 }
121 
122 /* ARM MPU Driver Initial Setup
123  *
124  * Configure the cache-ability attributes for all the
125  * different types of memory regions.
126  */
mpu_init(void)127 static void mpu_init(void)
128 {
129 	/* Device region(s): Attribute-0
130 	 * Flash region(s): Attribute-1
131 	 * SRAM region(s): Attribute-2
132 	 * SRAM no cache-able regions(s): Attribute-3
133 	 */
134 	uint64_t mair = MPU_MAIR_ATTRS;
135 
136 	write_mair_el1(mair);
137 	barrier_dsync_fence_full();
138 	barrier_isync_fence_full();
139 }
140 
141 /*
142  * Changing the MPU region may change the cache related attribute and cause
143  * cache coherence issues, so it's necessary to avoid invoking functions in such
144  * critical scope to avoid memory access before the MPU regions are all configured.
145  */
mpu_set_region(uint32_t rnr,uint64_t rbar,uint64_t rlar)146 static ALWAYS_INLINE void mpu_set_region(uint32_t rnr, uint64_t rbar,
147 				  uint64_t rlar)
148 {
149 	write_prselr_el1(rnr);
150 	barrier_dsync_fence_full();
151 	write_prbar_el1(rbar);
152 	write_prlar_el1(rlar);
153 	barrier_dsync_fence_full();
154 	barrier_isync_fence_full();
155 }
156 
mpu_clr_region(uint32_t rnr)157 static ALWAYS_INLINE void mpu_clr_region(uint32_t rnr)
158 {
159 	write_prselr_el1(rnr);
160 	barrier_dsync_fence_full();
161 	/*
162 	 * Have to set limit register first as the enable/disable bit of the
163 	 * region is in the limit register.
164 	 */
165 	write_prlar_el1(0);
166 	write_prbar_el1(0);
167 	barrier_dsync_fence_full();
168 	barrier_isync_fence_full();
169 }
170 
171 /*
172  * This internal functions performs MPU region initialization.
173  *
174  * Changing the MPU region may change the cache related attribute and cause
175  * cache coherence issues, so it's necessary to avoid invoking functions in such
176  * critical scope to avoid memory access before the MPU regions are all configured.
177  */
region_init(const uint32_t index,const struct arm_mpu_region * region_conf)178 static ALWAYS_INLINE void region_init(const uint32_t index,
179 			const struct arm_mpu_region *region_conf)
180 {
181 	uint64_t rbar = region_conf->base & MPU_RBAR_BASE_Msk;
182 	uint64_t rlar = (region_conf->limit - 1) & MPU_RLAR_LIMIT_Msk;
183 
184 	rbar |= region_conf->attr.rbar &
185 		(MPU_RBAR_XN_Msk | MPU_RBAR_AP_Msk | MPU_RBAR_SH_Msk);
186 	rlar |= (region_conf->attr.mair_idx << MPU_RLAR_AttrIndx_Pos) &
187 		MPU_RLAR_AttrIndx_Msk;
188 	rlar |= MPU_RLAR_EN_Msk;
189 
190 	mpu_set_region(index, rbar, rlar);
191 }
192 
193 #define _BUILD_REGION_CONF(reg, _ATTR)						\
194 	(struct arm_mpu_region) { .name  = (reg).dt_name,			\
195 				  .base  = (reg).dt_addr,			\
196 				  .limit = (reg).dt_addr + (reg).dt_size,	\
197 				  .attr  = _ATTR,				\
198 				}
199 
200 /* This internal function programs the MPU regions defined in the DT when using
201  * the `zephyr,memory-attr = <( DT_MEM_ARM(...) )>` property.
202  */
mpu_configure_regions_from_dt(uint8_t * reg_index)203 static int mpu_configure_regions_from_dt(uint8_t *reg_index)
204 {
205 	const struct mem_attr_region_t *region;
206 	size_t num_regions;
207 
208 	num_regions = mem_attr_get_regions(&region);
209 
210 	for (size_t idx = 0; idx < num_regions; idx++) {
211 		struct arm_mpu_region region_conf;
212 
213 		switch (DT_MEM_ARM_GET(region[idx].dt_attr)) {
214 		case DT_MEM_ARM_MPU_RAM:
215 			region_conf = _BUILD_REGION_CONF(region[idx], REGION_RAM_ATTR);
216 			break;
217 #ifdef REGION_RAM_NOCACHE_ATTR
218 		case DT_MEM_ARM_MPU_RAM_NOCACHE:
219 			region_conf = _BUILD_REGION_CONF(region[idx], REGION_RAM_NOCACHE_ATTR);
220 			__ASSERT(!(region[idx].dt_attr & DT_MEM_CACHEABLE),
221 				 "RAM_NOCACHE with DT_MEM_CACHEABLE attribute\n");
222 			break;
223 #endif
224 #ifdef REGION_FLASH_ATTR
225 		case DT_MEM_ARM_MPU_FLASH:
226 			region_conf = _BUILD_REGION_CONF(region[idx], REGION_FLASH_ATTR);
227 			break;
228 #endif
229 #ifdef REGION_IO_ATTR
230 		case DT_MEM_ARM_MPU_IO:
231 			region_conf = _BUILD_REGION_CONF(region[idx], REGION_IO_ATTR);
232 			break;
233 #endif
234 		default:
235 			/* Either the specified `ATTR_MPU_*` attribute does not
236 			 * exists or the `REGION_*_ATTR` macro is not defined
237 			 * for that attribute.
238 			 */
239 			LOG_ERR("Invalid attribute for the region\n");
240 			return -EINVAL;
241 		}
242 
243 		region_init((*reg_index), (const struct arm_mpu_region *) &region_conf);
244 
245 		(*reg_index)++;
246 	}
247 
248 	return 0;
249 }
250 
251 /*
252  * @brief MPU default configuration
253  *
254  * This function here provides the default configuration mechanism
255  * for the Memory Protection Unit (MPU).
256  *
257  * On the SMP system, The function that enables MPU can not insert stack protector
258  * code because the canary values read by the secondary CPUs before enabling MPU
259  * and after enabling it are not equal due to cache coherence issues.
260  */
z_arm64_mm_init(bool is_primary_core)261 FUNC_NO_STACK_PROTECTOR void z_arm64_mm_init(bool is_primary_core)
262 {
263 	uint64_t val;
264 	uint32_t r_index;
265 	uint8_t tmp_static_num;
266 
267 	/* Current MPU code supports only EL1 */
268 	val = read_currentel();
269 	__ASSERT(GET_EL(val) == MODE_EL1,
270 		 "Exception level not EL1, MPU not enabled!\n");
271 
272 	/* Check whether the processor supports MPU */
273 	val = read_id_aa64mmfr0_el1() & ID_AA64MMFR0_MSA_msk;
274 	if ((val != ID_AA64MMFR0_PMSA_EN) &&
275 	    (val != ID_AA64MMFR0_PMSA_VMSA_EN)) {
276 		__ASSERT(0, "MPU not supported!\n");
277 		return;
278 	}
279 
280 	if (mpu_config.num_regions > get_num_regions()) {
281 		/* Attempt to configure more MPU regions than
282 		 * what is supported by hardware. As this operation
283 		 * is executed during system (pre-kernel) initialization,
284 		 * we want to ensure we can detect an attempt to
285 		 * perform invalid configuration.
286 		 */
287 		__ASSERT(0,
288 			 "Request to configure: %u regions (supported: %u)\n",
289 			 mpu_config.num_regions,
290 			 get_num_regions());
291 		return;
292 	}
293 
294 	arm_core_mpu_disable();
295 
296 	/* Architecture-specific configuration */
297 	mpu_init();
298 
299 	/* Program fixed regions configured at SOC definition. */
300 	for (r_index = 0U; r_index < mpu_config.num_regions; r_index++) {
301 		region_init(r_index, &mpu_config.mpu_regions[r_index]);
302 	}
303 
304 	/* Update the number of programmed MPU regions. */
305 	tmp_static_num = mpu_config.num_regions;
306 
307 	/* DT-defined MPU regions. */
308 	if (mpu_configure_regions_from_dt(&tmp_static_num) == -EINVAL) {
309 		__ASSERT(0, "Failed to allocate MPU regions from DT\n");
310 		return;
311 	}
312 
313 	arm_core_mpu_enable();
314 
315 	if (!is_primary_core) {
316 		/*
317 		 * primary core might reprogram the sys_regions, so secondary cores
318 		 * should re-flush the sys regions
319 		 */
320 		goto out;
321 	}
322 
323 	/* Only primary core init the static_regions_num */
324 	static_regions_num = tmp_static_num;
325 
326 #if defined(CONFIG_USERSPACE) || defined(CONFIG_ARM64_STACK_PROTECTION)
327 	dynamic_regions_init();
328 	/* Only primary core do the dynamic_areas_init. */
329 	int rc = dynamic_areas_init(MPU_DYNAMIC_REGIONS_AREA_START,
330 				    MPU_DYNAMIC_REGIONS_AREA_SIZE);
331 	if (rc < 0) {
332 		__ASSERT(0, "Dynamic areas init fail");
333 		return;
334 	}
335 #endif
336 
337 out:
338 #if defined(CONFIG_ARM64_STACK_PROTECTION)
339 	(void)flush_dynamic_regions_to_mpu(sys_dyn_regions[arch_curr_cpu()->id],
340 					   sys_dyn_regions_num[arch_curr_cpu()->id]);
341 #endif
342 	return;
343 }
344 
345 #if defined(CONFIG_USERSPACE) || defined(CONFIG_ARM64_STACK_PROTECTION)
346 static int insert_region(struct dynamic_region_info *dyn_regions, uint8_t region_num,
347 			 uintptr_t start, size_t size, struct arm_mpu_region_attr *attr);
348 
arm_core_mpu_background_region_enable(void)349 static void arm_core_mpu_background_region_enable(void)
350 {
351 	uint64_t val;
352 
353 	val = read_sctlr_el1();
354 	val |= SCTLR_BR_BIT;
355 	write_sctlr_el1(val);
356 	barrier_dsync_fence_full();
357 	barrier_isync_fence_full();
358 }
359 
arm_core_mpu_background_region_disable(void)360 static void arm_core_mpu_background_region_disable(void)
361 {
362 	uint64_t val;
363 
364 	/* Force any outstanding transfers to complete before disabling MPU */
365 	barrier_dmem_fence_full();
366 	val = read_sctlr_el1();
367 	val &= ~SCTLR_BR_BIT;
368 	write_sctlr_el1(val);
369 	barrier_dsync_fence_full();
370 	barrier_isync_fence_full();
371 }
372 
dynamic_regions_init(void)373 static void dynamic_regions_init(void)
374 {
375 	for (int cpuid = 0; cpuid < arch_num_cpus(); cpuid++) {
376 		for (int i = 0; i < MPU_DYNAMIC_REGION_AREAS_NUM; i++) {
377 			sys_dyn_regions[cpuid][i].index = -1;
378 		}
379 	}
380 }
381 
dynamic_areas_init(uintptr_t start,size_t size)382 static int dynamic_areas_init(uintptr_t start, size_t size)
383 {
384 	const struct arm_mpu_region *region;
385 	struct dynamic_region_info *tmp_info;
386 	int ret = -ENOENT;
387 
388 	uint64_t base = start;
389 	uint64_t limit = base + size;
390 
391 	for (int cpuid = 0; cpuid < arch_num_cpus(); cpuid++) {
392 		/* Check the following searching does not overflow the room */
393 		if (sys_dyn_regions_num[cpuid] + 1 > MPU_DYNAMIC_REGION_AREAS_NUM) {
394 			return -ENOSPC;
395 		}
396 
397 		ret = -ENOENT;
398 
399 		for (int i = 0; i < mpu_config.num_regions; i++) {
400 			region = &mpu_config.mpu_regions[i];
401 			tmp_info = &sys_dyn_regions[cpuid][sys_dyn_regions_num[cpuid]];
402 
403 			if (base >= region->base && limit <= region->limit) {
404 				tmp_info->index = i;
405 				tmp_info->region_conf = *region;
406 				sys_dyn_regions_num[cpuid] += 1;
407 				/* find the region, reset ret to no error */
408 				ret = 0;
409 				break;
410 			}
411 		}
412 #if defined(CONFIG_ARM64_STACK_PROTECTION)
413 		ret = insert_region(sys_dyn_regions[cpuid],
414 				    MPU_DYNAMIC_REGION_AREAS_NUM,
415 				    (uintptr_t)z_interrupt_stacks[cpuid],
416 				    Z_ARM64_STACK_GUARD_SIZE,
417 				    NULL /* delete this region */);
418 		if (ret < 0) {
419 			break;
420 		}
421 		/*
422 		 * No need to check here if (sys_dyn_regions[cpuid] + ret) overflows,
423 		 * because the insert_region has checked it.
424 		 */
425 		sys_dyn_regions_num[cpuid] += ret;
426 #endif
427 	}
428 
429 	return ret < 0 ? ret : 0;
430 }
431 
set_region(struct arm_mpu_region * region,uint64_t base,uint64_t limit,struct arm_mpu_region_attr * attr)432 static void set_region(struct arm_mpu_region *region,
433 		       uint64_t base, uint64_t limit,
434 		       struct arm_mpu_region_attr *attr)
435 {
436 	region->base = base;
437 	region->limit = limit;
438 	if (attr != NULL) {
439 		region->attr = *attr;
440 	} else {
441 		memset(&region->attr, 0, sizeof(struct arm_mpu_region_attr));
442 	}
443 }
444 
clear_region(struct arm_mpu_region * region)445 static void clear_region(struct arm_mpu_region *region)
446 {
447 	set_region(region, 0, 0, NULL);
448 }
449 
dup_dynamic_regions(struct dynamic_region_info * dst,int len)450 static int dup_dynamic_regions(struct dynamic_region_info *dst, int len)
451 {
452 	size_t i;
453 	int num = sys_dyn_regions_num[arch_curr_cpu()->id];
454 
455 	if (num >= len) {
456 		LOG_ERR("system dynamic region nums too large.");
457 		return -EINVAL;
458 	}
459 
460 	for (i = 0; i < num; i++) {
461 		dst[i] = sys_dyn_regions[arch_curr_cpu()->id][i];
462 	}
463 	for (; i < len; i++) {
464 		clear_region(&dst[i].region_conf);
465 		dst[i].index = -1;
466 	}
467 
468 	return num;
469 }
470 
get_underlying_region(struct dynamic_region_info * dyn_regions,uint8_t region_num,uint64_t base,uint64_t limit)471 static struct dynamic_region_info *get_underlying_region(struct dynamic_region_info *dyn_regions,
472 							 uint8_t region_num, uint64_t base,
473 							 uint64_t limit)
474 {
475 	for (int idx = 0; idx < region_num; idx++) {
476 		struct arm_mpu_region *region = &(dyn_regions[idx].region_conf);
477 
478 		if (base >= region->base && limit <= region->limit) {
479 			return &(dyn_regions[idx]);
480 		}
481 	}
482 
483 	return NULL;
484 }
485 
find_available_region(struct dynamic_region_info * dyn_regions,uint8_t region_num)486 static struct dynamic_region_info *find_available_region(struct dynamic_region_info *dyn_regions,
487 							 uint8_t region_num)
488 {
489 	return get_underlying_region(dyn_regions, region_num, 0, 0);
490 }
491 
492 /*
493  * return -ENOENT if there is no more available region
494  * do nothing if attr is NULL
495  */
_insert_region(struct dynamic_region_info * dyn_regions,uint8_t region_num,uint64_t base,uint64_t limit,struct arm_mpu_region_attr * attr)496 static int _insert_region(struct dynamic_region_info *dyn_regions, uint8_t region_num,
497 			  uint64_t base, uint64_t limit, struct arm_mpu_region_attr *attr)
498 {
499 	struct dynamic_region_info *tmp_region;
500 
501 	if (attr == NULL) {
502 		return 0;
503 	}
504 
505 	tmp_region = find_available_region(dyn_regions, region_num);
506 
507 	if (tmp_region == NULL) {
508 		return -ENOENT;
509 	}
510 
511 	set_region(&tmp_region->region_conf, base, limit, attr);
512 
513 	return 0;
514 }
515 
insert_region(struct dynamic_region_info * dyn_regions,uint8_t region_num,uintptr_t start,size_t size,struct arm_mpu_region_attr * attr)516 static int insert_region(struct dynamic_region_info *dyn_regions, uint8_t region_num,
517 			 uintptr_t start, size_t size, struct arm_mpu_region_attr *attr)
518 {
519 
520 	int ret = 0;
521 	/* base: inclusive, limit: exclusive */
522 	uint64_t base = (uint64_t)start;
523 	uint64_t limit = base + size;
524 	struct dynamic_region_info *u_region;
525 	uint64_t u_base;
526 	uint64_t u_limit;
527 	struct arm_mpu_region_attr u_attr;
528 
529 	int count = 0;
530 
531 	u_region = get_underlying_region(dyn_regions, region_num, base, limit);
532 
533 	if (u_region == NULL) {
534 		return -ENOENT;
535 	}
536 
537 	/* restore the underlying region range and attr */
538 	u_base = u_region->region_conf.base;
539 	u_limit = u_region->region_conf.limit;
540 	u_attr = u_region->region_conf.attr;
541 
542 	clear_region(&u_region->region_conf);
543 	count--;
544 
545 	/* if attr is NULL, meaning we are going to delete a region */
546 	if (base == u_base && limit == u_limit) {
547 		/*
548 		 * The new region overlaps entirely with the
549 		 * underlying region. Simply update the attr.
550 		 */
551 		ret += _insert_region(dyn_regions, region_num, base, limit, attr);
552 		count++;
553 	} else if (base == u_base) {
554 		ret += _insert_region(dyn_regions, region_num, limit, u_limit, &u_attr);
555 		count++;
556 		ret += _insert_region(dyn_regions, region_num, base, limit, attr);
557 		count++;
558 	} else if (limit == u_limit) {
559 		ret += _insert_region(dyn_regions, region_num, u_base, base, &u_attr);
560 		count++;
561 		ret += _insert_region(dyn_regions, region_num, base, limit, attr);
562 		count++;
563 	} else {
564 		ret += _insert_region(dyn_regions, region_num, u_base, base, &u_attr);
565 		count++;
566 		ret += _insert_region(dyn_regions, region_num, base, limit, attr);
567 		count++;
568 		ret += _insert_region(dyn_regions, region_num, limit, u_limit, &u_attr);
569 		count++;
570 	}
571 
572 	if (ret < 0) {
573 		return -ENOENT;
574 	}
575 
576 	if (attr == NULL) {
577 		/* meanning we removed a region, so fix the count by decreasing 1 */
578 		count--;
579 	}
580 
581 	return count;
582 }
583 
flush_dynamic_regions_to_mpu(struct dynamic_region_info * dyn_regions,uint8_t region_num)584 static int flush_dynamic_regions_to_mpu(struct dynamic_region_info *dyn_regions,
585 					uint8_t region_num)
586 {
587 	__ASSERT(read_daif() & DAIF_IRQ_BIT, "mpu flushing must be called with IRQs disabled");
588 
589 	int reg_avail_idx = static_regions_num;
590 
591 	if (region_num >= get_num_regions()) {
592 		LOG_ERR("Out-of-bounds error for mpu regions. "
593 			"region num: %d, total mpu regions: %d",
594 			region_num, get_num_regions());
595 		return -ENOENT;
596 	}
597 
598 	arm_core_mpu_background_region_enable();
599 
600 	/*
601 	 * Clean the dynamic regions
602 	 * Before cleaning them, we need to flush dyn_regions to memory, because we need to read it
603 	 * in updating mpu region.
604 	 */
605 	sys_cache_data_flush_range(dyn_regions, sizeof(struct dynamic_region_info) * region_num);
606 	for (size_t i = reg_avail_idx; i < get_num_regions(); i++) {
607 		mpu_clr_region(i);
608 	}
609 
610 	/*
611 	 * flush the dyn_regions to MPU
612 	 */
613 	for (size_t i = 0; i < region_num; i++) {
614 		int region_idx = dyn_regions[i].index;
615 		/*
616 		 * dyn_regions has two types of regions:
617 		 * 1) The fixed dyn background region which has a real index.
618 		 * 2) The normal region whose index will accumulate from
619 		 *    static_regions_num.
620 		 *
621 		 * Region_idx < 0 means not the fixed dyn background region.
622 		 * In this case, region_idx should be the reg_avail_idx which
623 		 * is accumulated from static_regions_num.
624 		 */
625 		if (region_idx < 0) {
626 			region_idx = reg_avail_idx++;
627 		}
628 
629 		region_init(region_idx, &(dyn_regions[i].region_conf));
630 	}
631 	arm_core_mpu_background_region_disable();
632 
633 	return 0;
634 }
635 
configure_dynamic_mpu_regions(struct k_thread * thread)636 static int configure_dynamic_mpu_regions(struct k_thread *thread)
637 {
638 	__ASSERT(read_daif() & DAIF_IRQ_BIT, "must be called with IRQs disabled");
639 
640 	struct dynamic_region_info *dyn_regions = thread->arch.regions;
641 	const uint8_t max_region_num = ARM64_MPU_MAX_DYNAMIC_REGIONS;
642 	int region_num;
643 	int ret = 0;
644 
645 	/* Busy wait if it is flushing somewhere else */
646 	while (!atomic_cas(&thread->arch.flushing, 0, 1)) {
647 	}
648 
649 	thread->arch.region_num = 0;
650 
651 	ret = dup_dynamic_regions(dyn_regions, max_region_num);
652 
653 	if (ret < 0) {
654 		goto out;
655 	}
656 
657 	region_num = ret;
658 
659 #if defined(CONFIG_USERSPACE)
660 	struct k_mem_domain *mem_domain = thread->mem_domain_info.mem_domain;
661 
662 	if (mem_domain) {
663 		LOG_DBG("configure domain: %p", mem_domain);
664 
665 		uint32_t num_parts = mem_domain->num_partitions;
666 		uint32_t max_parts = CONFIG_MAX_DOMAIN_PARTITIONS;
667 		struct k_mem_partition *partition;
668 
669 		for (size_t i = 0; i < max_parts && num_parts > 0; i++, num_parts--) {
670 			partition = &mem_domain->partitions[i];
671 			if (partition->size == 0) {
672 				continue;
673 			}
674 			LOG_DBG("set region 0x%lx 0x%lx\n",
675 				partition->start, partition->size);
676 			ret = insert_region(dyn_regions,
677 					    max_region_num,
678 					    partition->start,
679 					    partition->size,
680 					    &partition->attr);
681 
682 			if (ret < 0) {
683 				goto out;
684 			}
685 
686 			region_num += ret;
687 		}
688 	}
689 
690 	LOG_DBG("configure user thread %p's context", thread);
691 	if ((thread->base.user_options & K_USER) != 0) {
692 		/* K_USER thread stack needs a region */
693 		ret = insert_region(dyn_regions,
694 				    max_region_num,
695 				    thread->stack_info.start,
696 				    thread->stack_info.size,
697 				    &K_MEM_PARTITION_P_RW_U_RW);
698 		if (ret < 0) {
699 			goto out;
700 		}
701 
702 		region_num += ret;
703 	}
704 #endif
705 
706 #if defined(CONFIG_ARM64_STACK_PROTECTION)
707 	uintptr_t guard_start;
708 
709 	if (thread->arch.stack_limit != 0) {
710 		guard_start = (uintptr_t)thread->arch.stack_limit - Z_ARM64_STACK_GUARD_SIZE;
711 		ret = insert_region(dyn_regions,
712 				    max_region_num,
713 				    guard_start,
714 				    Z_ARM64_STACK_GUARD_SIZE,
715 				    NULL);
716 		if (ret < 0) {
717 			goto out;
718 		}
719 		region_num += ret;
720 	}
721 #endif
722 
723 	/*
724 	 * There is no need to check if region_num is overflow the uint8_t,
725 	 * because the insert_region make sure there is enough room to store a region,
726 	 * otherwise the insert_region will return a negtive error number
727 	 */
728 	thread->arch.region_num = (uint8_t)region_num;
729 
730 	if (thread == _current) {
731 		ret = flush_dynamic_regions_to_mpu(dyn_regions, region_num);
732 	}
733 
734 out:
735 	atomic_clear(&thread->arch.flushing);
736 	return ret < 0 ? ret : 0;
737 }
738 #endif /* defined(CONFIG_USERSPACE) || defined(CONFIG_ARM64_STACK_PROTECTION) */
739 
740 #if defined(CONFIG_USERSPACE)
arch_mem_domain_max_partitions_get(void)741 int arch_mem_domain_max_partitions_get(void)
742 {
743 	int remaining_regions = get_num_regions() - static_regions_num + 1;
744 
745 	/*
746 	 * Check remianing regions, should more than ARM64_MPU_MAX_DYNAMIC_REGIONS
747 	 * which equals CONFIG_MAX_DOMAIN_PARTITIONS + necessary regions (stack, guard)
748 	 */
749 	if (remaining_regions < ARM64_MPU_MAX_DYNAMIC_REGIONS) {
750 		LOG_WRN("MPU regions not enough, demand: %d, regions: %d",
751 			ARM64_MPU_MAX_DYNAMIC_REGIONS, remaining_regions);
752 		return remaining_regions;
753 	}
754 
755 	return CONFIG_MAX_DOMAIN_PARTITIONS;
756 }
757 
configure_domain_partitions(struct k_mem_domain * domain)758 static int configure_domain_partitions(struct k_mem_domain *domain)
759 {
760 	struct k_thread *thread;
761 	int ret;
762 
763 	SYS_DLIST_FOR_EACH_CONTAINER(&domain->mem_domain_q, thread,
764 				     mem_domain_info.mem_domain_q_node) {
765 		ret = configure_dynamic_mpu_regions(thread);
766 		if (ret != 0) {
767 			return ret;
768 		}
769 	}
770 #ifdef CONFIG_SMP
771 	/* the thread could be running on another CPU right now */
772 	z_arm64_mem_cfg_ipi();
773 #endif
774 
775 	return 0;
776 }
777 
arch_mem_domain_partition_add(struct k_mem_domain * domain,uint32_t partition_id)778 int arch_mem_domain_partition_add(struct k_mem_domain *domain, uint32_t partition_id)
779 {
780 	ARG_UNUSED(partition_id);
781 
782 	return configure_domain_partitions(domain);
783 }
784 
arch_mem_domain_partition_remove(struct k_mem_domain * domain,uint32_t partition_id)785 int arch_mem_domain_partition_remove(struct k_mem_domain *domain, uint32_t partition_id)
786 {
787 	ARG_UNUSED(partition_id);
788 
789 	return configure_domain_partitions(domain);
790 }
791 
arch_mem_domain_thread_add(struct k_thread * thread)792 int arch_mem_domain_thread_add(struct k_thread *thread)
793 {
794 	int ret = 0;
795 
796 	ret = configure_dynamic_mpu_regions(thread);
797 #ifdef CONFIG_SMP
798 	if (ret == 0 && thread != _current) {
799 		/* the thread could be running on another CPU right now */
800 		z_arm64_mem_cfg_ipi();
801 	}
802 #endif
803 
804 	return ret;
805 }
806 
arch_mem_domain_thread_remove(struct k_thread * thread)807 int arch_mem_domain_thread_remove(struct k_thread *thread)
808 {
809 	int ret = 0;
810 
811 	ret = configure_dynamic_mpu_regions(thread);
812 #ifdef CONFIG_SMP
813 	if (ret == 0 && thread != _current) {
814 		/* the thread could be running on another CPU right now */
815 		z_arm64_mem_cfg_ipi();
816 	}
817 #endif
818 
819 	return ret;
820 }
821 #endif /* CONFIG_USERSPACE */
822 
823 #if defined(CONFIG_USERSPACE) || defined(CONFIG_ARM64_STACK_PROTECTION)
z_arm64_thread_mem_domains_init(struct k_thread * thread)824 void z_arm64_thread_mem_domains_init(struct k_thread *thread)
825 {
826 	unsigned int key = arch_irq_lock();
827 
828 	configure_dynamic_mpu_regions(thread);
829 	arch_irq_unlock(key);
830 }
831 
z_arm64_swap_mem_domains(struct k_thread * thread)832 void z_arm64_swap_mem_domains(struct k_thread *thread)
833 {
834 	int cpuid = arch_curr_cpu()->id;
835 
836 	/* Busy wait if it is configuring somewhere else */
837 	while (!atomic_cas(&thread->arch.flushing, 0, 1)) {
838 	}
839 
840 	if (thread->arch.region_num == 0) {
841 		(void)flush_dynamic_regions_to_mpu(sys_dyn_regions[cpuid],
842 						   sys_dyn_regions_num[cpuid]);
843 	} else {
844 		(void)flush_dynamic_regions_to_mpu(thread->arch.regions,
845 						   thread->arch.region_num);
846 	}
847 
848 	atomic_clear(&thread->arch.flushing);
849 }
850 #endif
851