1 /*
2  * Copyright (c) 2017 Linaro Limited.
3  * Copyright (c) 2018 Nordic Semiconductor ASA.
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 #ifndef ZEPHYR_ARCH_ARM_CORE_AARCH32_MPU_ARM_MPU_V8_INTERNAL_H_
9 #define ZEPHYR_ARCH_ARM_CORE_AARCH32_MPU_ARM_MPU_V8_INTERNAL_H_
10 
11 #include <cortex_m/cmse.h>
12 #define LOG_LEVEL CONFIG_MPU_LOG_LEVEL
13 #include <zephyr/logging/log.h>
14 #include <zephyr/sys/math_extras.h>
15 #include <zephyr/sys/barrier.h>
16 
17 /**
18  * @brief internal structure holding information of
19  *        memory areas where dynamic MPU programming
20  *        is allowed.
21  */
22 struct dynamic_region_info {
23 	int index;
24 	struct arm_mpu_region region_conf;
25 };
26 
27 /**
28  * Global array, holding the MPU region index of
29  * the memory region inside which dynamic memory
30  * regions may be configured.
31  */
32 static struct dynamic_region_info dyn_reg_info[MPU_DYNAMIC_REGION_AREAS_NUM];
33 #if defined(CONFIG_CPU_CORTEX_M23) || defined(CONFIG_CPU_CORTEX_M33) || \
34 	defined(CONFIG_CPU_CORTEX_M55) || defined(CONFIG_CPU_CORTEX_M85)
mpu_set_mair0(uint32_t mair0)35 static inline void mpu_set_mair0(uint32_t mair0)
36 {
37 	MPU->MAIR0 = mair0;
38 }
39 
mpu_set_rnr(uint32_t rnr)40 static inline void mpu_set_rnr(uint32_t rnr)
41 {
42 	MPU->RNR = rnr;
43 }
44 
mpu_set_rbar(uint32_t rbar)45 static inline void mpu_set_rbar(uint32_t rbar)
46 {
47 	MPU->RBAR = rbar;
48 }
49 
mpu_get_rbar(void)50 static inline uint32_t mpu_get_rbar(void)
51 {
52 	return MPU->RBAR;
53 }
54 
mpu_set_rlar(uint32_t rlar)55 static inline void mpu_set_rlar(uint32_t rlar)
56 {
57 	MPU->RLAR = rlar;
58 }
59 
mpu_get_rlar(void)60 static inline uint32_t mpu_get_rlar(void)
61 {
62 	return MPU->RLAR;
63 }
64 
mpu_get_num_regions(void)65 static inline uint8_t mpu_get_num_regions(void)
66 {
67 	uint32_t type = MPU->TYPE;
68 
69 	type = (type & MPU_TYPE_DREGION_Msk) >> MPU_TYPE_DREGION_Pos;
70 
71 	return (uint8_t)type;
72 }
73 
mpu_clear_region(uint32_t rnr)74 static inline void mpu_clear_region(uint32_t rnr)
75 {
76 	ARM_MPU_ClrRegion(rnr);
77 }
78 
79 #elif defined(CONFIG_AARCH32_ARMV8_R)
mpu_set_mair0(uint32_t mair0)80 static inline void mpu_set_mair0(uint32_t mair0)
81 {
82 	write_mair0(mair0);
83 	barrier_dsync_fence_full();
84 	barrier_isync_fence_full();
85 }
86 
mpu_set_rnr(uint32_t rnr)87 static inline void mpu_set_rnr(uint32_t rnr)
88 {
89 	write_prselr(rnr);
90 	barrier_dsync_fence_full();
91 }
92 
mpu_set_rbar(uint32_t rbar)93 static inline void mpu_set_rbar(uint32_t rbar)
94 {
95 	write_prbar(rbar);
96 	barrier_dsync_fence_full();
97 	barrier_isync_fence_full();
98 }
99 
mpu_get_rbar(void)100 static inline uint32_t mpu_get_rbar(void)
101 {
102 	return read_prbar();
103 }
104 
mpu_set_rlar(uint32_t rlar)105 static inline void mpu_set_rlar(uint32_t rlar)
106 {
107 	write_prlar(rlar);
108 	barrier_dsync_fence_full();
109 	barrier_isync_fence_full();
110 }
111 
mpu_get_rlar(void)112 static inline uint32_t mpu_get_rlar(void)
113 {
114 	return read_prlar();
115 }
116 
mpu_get_num_regions(void)117 static inline uint8_t mpu_get_num_regions(void)
118 {
119 	uint32_t type = read_mpuir();
120 
121 	type = (type >> MPU_IR_REGION_Pos) & MPU_IR_REGION_Msk;
122 
123 	return (uint8_t)type;
124 }
125 
mpu_clear_region(uint32_t rnr)126 static inline void mpu_clear_region(uint32_t rnr)
127 {
128 	mpu_set_rnr(rnr);
129 	mpu_set_rbar(0);
130 	mpu_set_rlar(0);
131 }
132 
133 #else
134 #error "Unsupported ARM CPU"
135 #endif
136 
137 /* Global MPU configuration at system initialization. */
mpu_init(void)138 static void mpu_init(void)
139 {
140 	/* Configure the cache-ability attributes for all the
141 	 * different types of memory regions.
142 	 */
143 	mpu_set_mair0(MPU_MAIR_ATTRS);
144 }
145 
mpu_set_region(uint32_t rnr,uint32_t rbar,uint32_t rlar)146 static void mpu_set_region(uint32_t rnr, uint32_t rbar, uint32_t rlar)
147 {
148 	mpu_set_rnr(rnr);
149 	mpu_set_rbar(rbar);
150 	mpu_set_rlar(rlar);
151 }
152 
153 /* This internal function performs MPU region initialization.
154  *
155  * Note:
156  *   The caller must provide a valid region index.
157  */
region_init(const uint32_t index,const struct arm_mpu_region * region_conf)158 static void region_init(const uint32_t index,
159 	const struct arm_mpu_region *region_conf)
160 {
161 	mpu_set_region(
162 		/* RNR */
163 		index,
164 		/* RBAR */
165 		(region_conf->base & MPU_RBAR_BASE_Msk)
166 		| (region_conf->attr.rbar &
167 			(MPU_RBAR_XN_Msk | MPU_RBAR_AP_Msk | MPU_RBAR_SH_Msk)),
168 		/* RLAR */
169 		(region_conf->attr.r_limit & MPU_RLAR_LIMIT_Msk)
170 		| ((region_conf->attr.mair_idx << MPU_RLAR_AttrIndx_Pos)
171 			& MPU_RLAR_AttrIndx_Msk)
172 		| MPU_RLAR_EN_Msk
173 	);
174 
175 	LOG_DBG("[%d] 0x%08x 0x%08x 0x%08x 0x%08x",
176 			index, region_conf->base, region_conf->attr.rbar,
177 			region_conf->attr.mair_idx, region_conf->attr.r_limit);
178 }
179 
180 /* @brief Partition sanity check
181  *
182  * This internal function performs run-time sanity check for
183  * MPU region start address and size.
184  *
185  * @param part Pointer to the data structure holding the partition
186  *             information (must be valid).
187  * */
mpu_partition_is_valid(const struct z_arm_mpu_partition * part)188 static int mpu_partition_is_valid(const struct z_arm_mpu_partition *part)
189 {
190 	/* Partition size must be a multiple of the minimum MPU region
191 	 * size. Start address of the partition must align with the
192 	 * minimum MPU region size.
193 	 */
194 	int partition_is_valid =
195 		(part->size >= CONFIG_ARM_MPU_REGION_MIN_ALIGN_AND_SIZE)
196 		&&
197 		((part->size &
198 			(~(CONFIG_ARM_MPU_REGION_MIN_ALIGN_AND_SIZE - 1)))
199 			== part->size)
200 		&&
201 		((part->start &
202 			(CONFIG_ARM_MPU_REGION_MIN_ALIGN_AND_SIZE - 1)) == 0U);
203 
204 	return partition_is_valid;
205 }
206 
207 /**
208  * This internal function returns the MPU region, in which a
209  * buffer, specified by its start address and size, lies. If
210  * a valid MPU region cannot be derived the function returns
211  * -EINVAL.
212  *
213  * Note that, for the function to work properly, the ARM MPU
214  * needs to be enabled.
215  *
216  */
217 #if defined(CONFIG_AARCH32_ARMV8_R)
get_region_index(uint32_t start,uint32_t size)218 static inline int get_region_index(uint32_t start, uint32_t size)
219 {
220 	uint32_t limit = (start + size - 1) & MPU_RLAR_LIMIT_Msk;
221 
222 	for (uint8_t idx = 0; idx < mpu_get_num_regions(); idx++) {
223 		mpu_set_rnr(idx);
224 		if (start >= (mpu_get_rbar() & MPU_RBAR_BASE_Msk) &&
225 		    limit <= (mpu_get_rlar() & MPU_RLAR_LIMIT_Msk)) {
226 			return idx;
227 		}
228 	}
229 	return -EINVAL;
230 }
231 #else
get_region_index(uint32_t start,uint32_t size)232 static inline int get_region_index(uint32_t start, uint32_t size)
233 {
234 	uint32_t region_start_addr = arm_cmse_mpu_region_get(start);
235 	uint32_t region_end_addr = arm_cmse_mpu_region_get(start + size - 1);
236 
237 	/* MPU regions are contiguous so return the region number,
238 	 * if both start and end address are in the same region.
239 	 */
240 	if (region_start_addr == region_end_addr) {
241 		return region_start_addr;
242 	}
243 	return -EINVAL;
244 }
245 #endif
246 
mpu_region_get_base(const uint32_t index)247 static inline uint32_t mpu_region_get_base(const uint32_t index)
248 {
249 	mpu_set_rnr(index);
250 	return mpu_get_rbar() & MPU_RBAR_BASE_Msk;
251 }
252 
mpu_region_set_base(const uint32_t index,const uint32_t base)253 static inline void mpu_region_set_base(const uint32_t index, const uint32_t base)
254 {
255 	mpu_set_rnr(index);
256 	mpu_set_rbar((mpu_get_rbar() & (~MPU_RBAR_BASE_Msk))
257 		     | (base & MPU_RBAR_BASE_Msk));
258 }
259 
mpu_region_get_last_addr(const uint32_t index)260 static inline uint32_t mpu_region_get_last_addr(const uint32_t index)
261 {
262 	mpu_set_rnr(index);
263 	return (mpu_get_rlar() & MPU_RLAR_LIMIT_Msk) | (~MPU_RLAR_LIMIT_Msk);
264 }
265 
mpu_region_set_limit(const uint32_t index,const uint32_t limit)266 static inline void mpu_region_set_limit(const uint32_t index, const uint32_t limit)
267 {
268 	mpu_set_rnr(index);
269 	mpu_set_rlar((mpu_get_rlar() & (~MPU_RLAR_LIMIT_Msk))
270 		     | (limit & MPU_RLAR_LIMIT_Msk));
271 }
272 
mpu_region_get_access_attr(const uint32_t index,arm_mpu_region_attr_t * attr)273 static inline void mpu_region_get_access_attr(const uint32_t index,
274 	arm_mpu_region_attr_t *attr)
275 {
276 	mpu_set_rnr(index);
277 
278 	attr->rbar = mpu_get_rbar() &
279 		(MPU_RBAR_XN_Msk | MPU_RBAR_AP_Msk | MPU_RBAR_SH_Msk);
280 	attr->mair_idx = (mpu_get_rlar() & MPU_RLAR_AttrIndx_Msk) >>
281 		MPU_RLAR_AttrIndx_Pos;
282 }
283 
mpu_region_get_conf(const uint32_t index,struct arm_mpu_region * region_conf)284 static inline void mpu_region_get_conf(const uint32_t index,
285 	struct arm_mpu_region *region_conf)
286 {
287 	mpu_set_rnr(index);
288 
289 	/* Region attribution:
290 	 * - Cache-ability
291 	 * - Share-ability
292 	 * - Access Permissions
293 	 */
294 	mpu_region_get_access_attr(index, &region_conf->attr);
295 
296 	/* Region base address */
297 	region_conf->base = mpu_get_rbar() & MPU_RBAR_BASE_Msk;
298 
299 	/* Region limit address */
300 	region_conf->attr.r_limit = mpu_get_rlar() & MPU_RLAR_LIMIT_Msk;
301 }
302 
303 /**
304  * This internal function is utilized by the MPU driver to combine a given
305  * region attribute configuration and size and fill-in a driver-specific
306  * structure with the correct MPU region configuration.
307  */
get_region_attr_from_mpu_partition_info(arm_mpu_region_attr_t * p_attr,const k_mem_partition_attr_t * attr,uint32_t base,uint32_t size)308 static inline void get_region_attr_from_mpu_partition_info(
309 	arm_mpu_region_attr_t *p_attr,
310 	const k_mem_partition_attr_t *attr, uint32_t base, uint32_t size)
311 {
312 	p_attr->rbar = attr->rbar &
313 		(MPU_RBAR_XN_Msk | MPU_RBAR_AP_Msk | MPU_RBAR_SH_Msk);
314 	p_attr->mair_idx = attr->mair_idx;
315 	p_attr->r_limit = REGION_LIMIT_ADDR(base, size);
316 }
317 
318 #if defined(CONFIG_USERSPACE)
319 
320 /**
321  * This internal function returns the minimum HW MPU region index
322  * that may hold the configuration of a dynamic memory region.
323  *
324  * Browse through the memory areas marked for dynamic MPU programming,
325  * pick the one with the minimum MPU region index. Return that index.
326  *
327  * The function is optimized for the (most common) use-case of a single
328  * marked area for dynamic memory regions.
329  */
get_dyn_region_min_index(void)330 static inline int get_dyn_region_min_index(void)
331 {
332 	int dyn_reg_min_index = dyn_reg_info[0].index;
333 #if MPU_DYNAMIC_REGION_AREAS_NUM > 1
334 	for (int i = 1; i < MPU_DYNAMIC_REGION_AREAS_NUM; i++) {
335 		if ((dyn_reg_info[i].index != -EINVAL) &&
336 			(dyn_reg_info[i].index < dyn_reg_min_index)
337 		) {
338 			dyn_reg_min_index = dyn_reg_info[i].index;
339 		}
340 	}
341 #endif
342 	return dyn_reg_min_index;
343 }
344 
mpu_region_get_size(uint32_t index)345 static inline uint32_t mpu_region_get_size(uint32_t index)
346 {
347 	return mpu_region_get_last_addr(index) + 1
348 		- mpu_region_get_base(index);
349 }
350 
351 /**
352  * This internal function checks if region is enabled or not.
353  *
354  * Note:
355  *   The caller must provide a valid region number.
356  */
is_enabled_region(uint32_t index)357 static inline int is_enabled_region(uint32_t index)
358 {
359 	mpu_set_rnr(index);
360 
361 	return (mpu_get_rlar() & MPU_RLAR_EN_Msk) ? 1 : 0;
362 }
363 
364 #if defined(CONFIG_AARCH32_ARMV8_R)
365 /**
366  * This internal function checks if the given buffer is in the region.
367  *
368  * Note:
369  *   The caller must provide a valid region number.
370  */
is_in_region(uint32_t rnr,uint32_t start,uint32_t size)371 static inline int is_in_region(uint32_t rnr, uint32_t start, uint32_t size)
372 {
373 	uint32_t r_addr_start;
374 	uint32_t r_addr_end;
375 	uint32_t end;
376 
377 	r_addr_start = mpu_region_get_base(rnr);
378 	r_addr_end = mpu_region_get_last_addr(rnr);
379 
380 	size = size == 0U ? 0U : size - 1U;
381 	if (u32_add_overflow(start, size, &end)) {
382 		return 0;
383 	}
384 
385 	if ((start >= r_addr_start) && (end <= r_addr_end)) {
386 		return 1;
387 	}
388 
389 	return 0;
390 }
391 
is_user_accessible_region(uint32_t rnr,int write)392 static inline int is_user_accessible_region(uint32_t rnr, int write)
393 {
394 	uint32_t r_ap;
395 
396 	mpu_set_rnr(rnr);
397 
398 	r_ap = (mpu_get_rbar() & MPU_RBAR_AP_Msk) >> MPU_RBAR_AP_Pos;
399 
400 	if (write != 0) {
401 		return r_ap == P_RW_U_RW;
402 	}
403 
404 	return ((r_ap == P_RW_U_RW) ||  (r_ap == P_RO_U_RO));
405 }
406 
407 /**
408  * This internal function validates whether a given memory buffer
409  * is user accessible or not.
410  */
mpu_buffer_validate(const void * addr,size_t size,int write)411 static inline int mpu_buffer_validate(const void *addr, size_t size, int write)
412 {
413 	int32_t rnr;
414 	int rc = -EPERM;
415 
416 	int key = arch_irq_lock();
417 
418 	/* Iterate all mpu regions in reversed order */
419 	for (rnr = 0; rnr < mpu_get_num_regions(); rnr++) {
420 		if (!is_enabled_region(rnr) ||
421 		    !is_in_region(rnr, (uint32_t)addr, size)) {
422 			continue;
423 		}
424 
425 		if (is_user_accessible_region(rnr, write)) {
426 			rc = 0;
427 		}
428 	}
429 
430 	arch_irq_unlock(key);
431 	return rc;
432 }
433 
434 #else
435 /**
436  * This internal function validates whether a given memory buffer
437  * is user accessible or not.
438  *
439  * Note: [Doc. number: ARM-ECM-0359818]
440  * "Some SAU, IDAU, and MPU configurations block the efficient implementation
441  * of an address range check. The CMSE intrinsic operates under the assumption
442  * that the configuration of the SAU, IDAU, and MPU is constrained as follows:
443  * - An object is allocated in a single MPU/SAU/IDAU region.
444  * - A stack is allocated in a single region.
445  *
446  * These points imply that the memory buffer does not span across multiple MPU,
447  * SAU, or IDAU regions."
448  *
449  * MPU regions are configurable, however, some platforms might have fixed-size
450  * SAU or IDAU regions. So, even if a buffer is allocated inside a single MPU
451  * region, it might span across multiple SAU/IDAU regions, which will make the
452  * TT-based address range check fail.
453  *
454  * Therefore, the function performs a second check, which is based on MPU only,
455  * in case the fast address range check fails.
456  *
457  */
mpu_buffer_validate(const void * addr,size_t size,int write)458 static inline int mpu_buffer_validate(const void *addr, size_t size, int write)
459 {
460 	uint32_t _addr = (uint32_t)addr;
461 	uint32_t _size = (uint32_t)size;
462 
463 	if (write) {
464 		if (arm_cmse_addr_range_readwrite_ok(_addr, _size, 1)) {
465 			return 0;
466 		}
467 	} else {
468 		if (arm_cmse_addr_range_read_ok(_addr, _size, 1)) {
469 			return 0;
470 		}
471 	}
472 
473 #if defined(CONFIG_CPU_HAS_TEE)
474 	/*
475 	 * Validation failure may be due to SAU/IDAU presence.
476 	 * We re-check user accessibility based on MPU only.
477 	 */
478 	int32_t r_index_base = arm_cmse_mpu_region_get(_addr);
479 	int32_t r_index_last = arm_cmse_mpu_region_get(_addr + _size - 1);
480 
481 	if ((r_index_base != -EINVAL) && (r_index_base == r_index_last)) {
482 		/* Valid MPU region, check permissions on base address only. */
483 		if (write) {
484 			if (arm_cmse_addr_readwrite_ok(_addr, 1)) {
485 				return 0;
486 			}
487 		} else {
488 			if (arm_cmse_addr_read_ok(_addr, 1)) {
489 				return 0;
490 			}
491 		}
492 	}
493 #endif /* CONFIG_CPU_HAS_TEE */
494 	return -EPERM;
495 }
496 #endif /* CONFIG_AARCH32_ARMV8_R */
497 
498 #endif /* CONFIG_USERSPACE */
499 
500 static int region_allocate_and_init(const uint8_t index,
501 	const struct arm_mpu_region *region_conf);
502 
503 static int mpu_configure_region(const uint8_t index,
504 	const struct z_arm_mpu_partition *new_region);
505 
506 #if !defined(CONFIG_MPU_GAP_FILLING)
507 static int mpu_configure_regions(const struct z_arm_mpu_partition
508 	regions[], uint8_t regions_num, uint8_t start_reg_index,
509 	bool do_sanity_check);
510 #endif
511 
512 /* This internal function programs a set of given MPU regions
513  * over a background memory area, optionally performing a
514  * sanity check of the memory regions to be programmed.
515  *
516  * The function performs a full partition of the background memory
517  * area, effectively, leaving no space in this area uncovered by MPU.
518  */
mpu_configure_regions_and_partition(const struct z_arm_mpu_partition regions[],uint8_t regions_num,uint8_t start_reg_index,bool do_sanity_check)519 static int mpu_configure_regions_and_partition(const struct z_arm_mpu_partition
520 	regions[], uint8_t regions_num, uint8_t start_reg_index,
521 	bool do_sanity_check)
522 {
523 	int i;
524 	int reg_index = start_reg_index;
525 
526 	for (i = 0; i < regions_num; i++) {
527 		if (regions[i].size == 0U) {
528 			continue;
529 		}
530 		/* Non-empty region. */
531 
532 		if (do_sanity_check &&
533 			(!mpu_partition_is_valid(&regions[i]))) {
534 			LOG_ERR("Partition %u: sanity check failed.", i);
535 			return -EINVAL;
536 		}
537 
538 		/* Derive the index of the underlying MPU region,
539 		 * inside which the new region will be configured.
540 		 */
541 		int u_reg_index =
542 			get_region_index(regions[i].start, regions[i].size);
543 
544 		if ((u_reg_index == -EINVAL) ||
545 			(u_reg_index > (reg_index - 1))) {
546 			LOG_ERR("Invalid underlying region index %u",
547 				u_reg_index);
548 			return -EINVAL;
549 		}
550 
551 		/*
552 		 * The new memory region is to be placed inside the underlying
553 		 * region, possibly splitting the underlying region into two.
554 		 */
555 		uint32_t u_reg_base = mpu_region_get_base(u_reg_index);
556 		uint32_t u_reg_last = mpu_region_get_last_addr(u_reg_index);
557 		uint32_t reg_last = regions[i].start + regions[i].size - 1;
558 
559 		if ((regions[i].start == u_reg_base) &&
560 			(reg_last == u_reg_last)) {
561 			/* The new region overlaps entirely with the
562 			 * underlying region. In this case we simply
563 			 * update the partition attributes of the
564 			 * underlying region with those of the new
565 			 * region.
566 			 */
567 			mpu_configure_region(u_reg_index, &regions[i]);
568 		} else if (regions[i].start == u_reg_base) {
569 			/* The new region starts exactly at the start of the
570 			 * underlying region; the start of the underlying
571 			 * region needs to be set to the end of the new region.
572 			 */
573 			mpu_region_set_base(u_reg_index,
574 				regions[i].start + regions[i].size);
575 
576 			reg_index =
577 				mpu_configure_region(reg_index, &regions[i]);
578 
579 			if (reg_index == -EINVAL) {
580 				return reg_index;
581 			}
582 
583 			reg_index++;
584 		} else if (reg_last == u_reg_last) {
585 			/* The new region ends exactly at the end of the
586 			 * underlying region; the end of the underlying
587 			 * region needs to be set to the start of the
588 			 * new region.
589 			 */
590 			mpu_region_set_limit(u_reg_index,
591 				regions[i].start - 1);
592 
593 			reg_index =
594 				mpu_configure_region(reg_index, &regions[i]);
595 
596 			if (reg_index == -EINVAL) {
597 				return reg_index;
598 			}
599 
600 			reg_index++;
601 		} else {
602 			/* The new regions lies strictly inside the
603 			 * underlying region, which needs to split
604 			 * into two regions.
605 			 */
606 			mpu_region_set_limit(u_reg_index,
607 				regions[i].start - 1);
608 
609 			reg_index =
610 				mpu_configure_region(reg_index, &regions[i]);
611 
612 			if (reg_index == -EINVAL) {
613 				return reg_index;
614 			}
615 			reg_index++;
616 
617 			/* The additional region shall have the same
618 			 * access attributes as the initial underlying
619 			 * region.
620 			 */
621 			struct arm_mpu_region fill_region;
622 
623 			mpu_region_get_access_attr(u_reg_index,
624 				&fill_region.attr);
625 			fill_region.base = regions[i].start +
626 				regions[i].size;
627 			fill_region.attr.r_limit =
628 			REGION_LIMIT_ADDR((regions[i].start +
629 				regions[i].size), (u_reg_last - reg_last));
630 
631 			reg_index =
632 				region_allocate_and_init(reg_index,
633 					(const struct arm_mpu_region *)
634 						&fill_region);
635 
636 			if (reg_index == -EINVAL) {
637 				return reg_index;
638 			}
639 
640 			reg_index++;
641 		}
642 	}
643 
644 	return reg_index;
645 }
646 
647 /* This internal function programs the static MPU regions.
648  *
649  * It returns the number of MPU region indices configured.
650  *
651  * Note:
652  * If the static MPU regions configuration has not been successfully
653  * performed, the error signal is propagated to the caller of the function.
654  */
mpu_configure_static_mpu_regions(const struct z_arm_mpu_partition static_regions[],const uint8_t regions_num,const uint32_t background_area_base,const uint32_t background_area_end)655 static int mpu_configure_static_mpu_regions(const struct z_arm_mpu_partition
656 	static_regions[], const uint8_t regions_num,
657 	const uint32_t background_area_base,
658 	const uint32_t background_area_end)
659 {
660 	int mpu_reg_index = static_regions_num;
661 
662 	/* In ARMv8-M architecture the static regions are programmed on SRAM,
663 	 * forming a full partition of the background area, specified by the
664 	 * given boundaries.
665 	 */
666 	ARG_UNUSED(background_area_base);
667 	ARG_UNUSED(background_area_end);
668 
669 	mpu_reg_index = mpu_configure_regions_and_partition(static_regions,
670 		regions_num, mpu_reg_index, true);
671 
672 	static_regions_num = mpu_reg_index;
673 
674 	return mpu_reg_index;
675 }
676 
677 /* This internal function marks and stores the configuration of memory areas
678  * where dynamic region programming is allowed. Return zero on success, or
679  * -EINVAL on error.
680  */
mpu_mark_areas_for_dynamic_regions(const struct z_arm_mpu_partition dyn_region_areas[],const uint8_t dyn_region_areas_num)681 static int mpu_mark_areas_for_dynamic_regions(
682 		const struct z_arm_mpu_partition dyn_region_areas[],
683 		const uint8_t dyn_region_areas_num)
684 {
685 	/* In ARMv8-M architecture we need to store the index values
686 	 * and the default configuration of the MPU regions, inside
687 	 * which dynamic memory regions may be programmed at run-time.
688 	 */
689 	for (int i = 0; i < dyn_region_areas_num; i++) {
690 		if (dyn_region_areas[i].size == 0U) {
691 			continue;
692 		}
693 		/* Non-empty area */
694 
695 		/* Retrieve HW MPU region index */
696 		dyn_reg_info[i].index =
697 			get_region_index(dyn_region_areas[i].start,
698 					dyn_region_areas[i].size);
699 
700 		if (dyn_reg_info[i].index == -EINVAL) {
701 
702 			return -EINVAL;
703 		}
704 
705 		if (dyn_reg_info[i].index >= static_regions_num) {
706 
707 			return -EINVAL;
708 		}
709 
710 		/* Store default configuration */
711 		mpu_region_get_conf(dyn_reg_info[i].index,
712 			&dyn_reg_info[i].region_conf);
713 	}
714 
715 	return 0;
716 }
717 
718 /**
719  *  Get the number of supported MPU regions.
720  */
get_num_regions(void)721 static inline uint8_t get_num_regions(void)
722 {
723 	return mpu_get_num_regions();
724 }
725 
726 /* This internal function programs the dynamic MPU regions.
727  *
728  * It returns the number of MPU region indices configured.
729  *
730  * Note:
731  * If the dynamic MPU regions configuration has not been successfully
732  * performed, the error signal is propagated to the caller of the function.
733  */
mpu_configure_dynamic_mpu_regions(const struct z_arm_mpu_partition dynamic_regions[],uint8_t regions_num)734 static int mpu_configure_dynamic_mpu_regions(const struct z_arm_mpu_partition
735 	dynamic_regions[], uint8_t regions_num)
736 {
737 	int mpu_reg_index = static_regions_num;
738 
739 	/* Disable all MPU regions except for the static ones. */
740 	for (int i = mpu_reg_index; i < get_num_regions(); i++) {
741 		mpu_clear_region(i);
742 	}
743 
744 #if defined(CONFIG_MPU_GAP_FILLING)
745 	/* Reset MPU regions inside which dynamic memory regions may
746 	 * be programmed.
747 	 */
748 	for (int i = 0; i < MPU_DYNAMIC_REGION_AREAS_NUM; i++) {
749 		region_init(dyn_reg_info[i].index,
750 			&dyn_reg_info[i].region_conf);
751 	}
752 
753 	/* In ARMv8-M architecture the dynamic regions are programmed on SRAM,
754 	 * forming a full partition of the background area, specified by the
755 	 * given boundaries.
756 	 */
757 	mpu_reg_index = mpu_configure_regions_and_partition(dynamic_regions,
758 		regions_num, mpu_reg_index, true);
759 #else
760 
761 	/* We are going to skip the full partition of the background areas.
762 	 * So we can disable MPU regions inside which dynamic memory regions
763 	 * may be programmed.
764 	 */
765 	for (int i = 0; i < MPU_DYNAMIC_REGION_AREAS_NUM; i++) {
766 		mpu_clear_region(dyn_reg_info[i].index);
767 	}
768 
769 	/* The dynamic regions are now programmed on top of
770 	 * existing SRAM region configuration.
771 	 */
772 	mpu_reg_index = mpu_configure_regions(dynamic_regions,
773 		regions_num, mpu_reg_index, true);
774 
775 #endif /* CONFIG_MPU_GAP_FILLING */
776 	return mpu_reg_index;
777 }
778 
779 #endif	/* ZEPHYR_ARCH_ARM_CORE_AARCH32_MPU_ARM_MPU_V8_INTERNAL_H_ */
780