1 /*
2 * Copyright (c) 2017 Linaro Limited.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/device.h>
8 #include <zephyr/init.h>
9 #include <zephyr/kernel.h>
10 #include <zephyr/sys/barrier.h>
11 #include "arm_core_mpu_dev.h"
12 #include <zephyr/linker/linker-defs.h>
13 #include <kernel_arch_data.h>
14 #include <zephyr/mem_mgmt/mem_attr.h>
15 #include <zephyr/dt-bindings/memory-attr/memory-attr-arm.h>
16
17 #define LOG_LEVEL CONFIG_MPU_LOG_LEVEL
18 #include <zephyr/logging/log.h>
19 LOG_MODULE_DECLARE(mpu);
20
21 #if defined(CONFIG_ARMV8_M_BASELINE) || defined(CONFIG_ARMV8_M_MAINLINE)
22 /* The order here is on purpose since ARMv8-M SoCs may define
23 * CONFIG_ARMV6_M_ARMV8_M_BASELINE or CONFIG_ARMV7_M_ARMV8_M_MAINLINE
24 * so we want to check for ARMv8-M first.
25 */
26 #define MPU_NODEID DT_INST(0, arm_armv8m_mpu)
27 #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
28 #define MPU_NODEID DT_INST(0, arm_armv7m_mpu)
29 #elif defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
30 #define MPU_NODEID DT_INST(0, arm_armv6m_mpu)
31 #endif
32
33 #define NODE_HAS_PROP_AND_OR(node_id, prop) \
34 DT_NODE_HAS_PROP(node_id, prop) ||
35
36 BUILD_ASSERT((DT_FOREACH_STATUS_OKAY_NODE_VARGS(
37 NODE_HAS_PROP_AND_OR, zephyr_memory_region_mpu) false) == false,
38 "`zephyr,memory-region-mpu` was deprecated in favor of `zephyr,memory-attr`");
39
40 #define NULL_PAGE_DETECT_NODE_FINDER(node_id, prop) \
41 (DT_NODE_HAS_PROP(node_id, prop) && (DT_REG_ADDR(node_id) == 0) && \
42 (DT_REG_SIZE(node_id) >= CONFIG_CORTEX_M_NULL_POINTER_EXCEPTION_PAGE_SIZE)) ||
43
44 #define DT_NULL_PAGE_DETECT_NODE_EXIST \
45 (DT_FOREACH_STATUS_OKAY_VARGS(zephyr_memory_region, NULL_PAGE_DETECT_NODE_FINDER, \
46 zephyr_memory_attr) false)
47
48 /*
49 * Global status variable holding the number of HW MPU region indices, which
50 * have been reserved by the MPU driver to program the static (fixed) memory
51 * regions.
52 */
53 static uint8_t static_regions_num;
54
55 /* Include architecture-specific internal headers. */
56 #if defined(CONFIG_CPU_CORTEX_M0PLUS) || \
57 defined(CONFIG_CPU_CORTEX_M3) || \
58 defined(CONFIG_CPU_CORTEX_M4) || \
59 defined(CONFIG_CPU_CORTEX_M7) || \
60 defined(CONFIG_ARMV7_R)
61 #include "arm_mpu_v7_internal.h"
62 #elif defined(CONFIG_CPU_CORTEX_M23) || \
63 defined(CONFIG_CPU_CORTEX_M33) || \
64 defined(CONFIG_CPU_CORTEX_M55) || \
65 defined(CONFIG_CPU_CORTEX_M85) || \
66 defined(CONFIG_AARCH32_ARMV8_R)
67 #include "arm_mpu_v8_internal.h"
68 #else
69 #error "Unsupported ARM CPU"
70 #endif
71
region_allocate_and_init(const uint8_t index,const struct arm_mpu_region * region_conf)72 static int region_allocate_and_init(const uint8_t index,
73 const struct arm_mpu_region *region_conf)
74 {
75 /* Attempt to allocate new region index. */
76 if (index > (get_num_regions() - 1U)) {
77
78 /* No available MPU region index. */
79 LOG_ERR("Failed to allocate new MPU region %u\n", index);
80 return -EINVAL;
81 }
82
83 LOG_DBG("Program MPU region at index 0x%x", index);
84
85 /* Program region */
86 region_init(index, region_conf);
87
88 return index;
89 }
90
91 #define _BUILD_REGION_CONF(reg, _ATTR) \
92 (struct arm_mpu_region) ARM_MPU_REGION_INIT((reg).dt_name, \
93 (reg).dt_addr, \
94 (reg).dt_size, \
95 _ATTR)
96 #ifdef CONFIG_MEM_ATTR
97 /* This internal function programs the MPU regions defined in the DT when using
98 * the `zephyr,memory-attr = <( DT_MEM_ARM(...) )>` property.
99 */
mpu_configure_regions_from_dt(uint8_t * reg_index)100 static int mpu_configure_regions_from_dt(uint8_t *reg_index)
101 {
102 const struct mem_attr_region_t *region;
103 size_t num_regions;
104
105 num_regions = mem_attr_get_regions(®ion);
106
107 for (size_t idx = 0; idx < num_regions; idx++) {
108 struct arm_mpu_region region_conf;
109
110 switch (DT_MEM_ARM_GET(region[idx].dt_attr)) {
111 case DT_MEM_ARM_MPU_RAM:
112 region_conf = _BUILD_REGION_CONF(region[idx], REGION_RAM_ATTR);
113 break;
114 #ifdef REGION_RAM_NOCACHE_ATTR
115 case DT_MEM_ARM_MPU_RAM_NOCACHE:
116 region_conf = _BUILD_REGION_CONF(region[idx], REGION_RAM_NOCACHE_ATTR);
117 __ASSERT(!(region[idx].dt_attr & DT_MEM_CACHEABLE),
118 "RAM_NOCACHE with DT_MEM_CACHEABLE attribute\n");
119 break;
120 #endif
121 #ifdef REGION_FLASH_ATTR
122 case DT_MEM_ARM_MPU_FLASH:
123 region_conf = _BUILD_REGION_CONF(region[idx], REGION_FLASH_ATTR);
124 break;
125 #endif
126 #ifdef REGION_PPB_ATTR
127 case DT_MEM_ARM_MPU_PPB:
128 region_conf = _BUILD_REGION_CONF(region[idx], REGION_PPB_ATTR);
129 break;
130 #endif
131 #ifdef REGION_IO_ATTR
132 case DT_MEM_ARM_MPU_IO:
133 region_conf = _BUILD_REGION_CONF(region[idx], REGION_IO_ATTR);
134 break;
135 #endif
136 #ifdef REGION_EXTMEM_ATTR
137 case DT_MEM_ARM_MPU_EXTMEM:
138 region_conf = _BUILD_REGION_CONF(region[idx], REGION_EXTMEM_ATTR);
139 break;
140 #endif
141 default:
142 /* Attribute other than ARM-specific is set.
143 * This region should not be configured in MPU.
144 */
145 continue;
146 }
147 #if defined(CONFIG_ARMV7_R)
148 region_conf.size = size_to_mpu_rasr_size(region[idx].dt_size);
149 #endif
150
151 if (region_allocate_and_init((*reg_index),
152 (const struct arm_mpu_region *) ®ion_conf) < 0) {
153 return -EINVAL;
154 }
155
156 (*reg_index)++;
157 }
158
159 return 0;
160 }
161 #endif /* CONFIG_MEM_ATTR */
162 /* This internal function programs an MPU region
163 * of a given configuration at a given MPU index.
164 */
mpu_configure_region(const uint8_t index,const struct z_arm_mpu_partition * new_region)165 static int mpu_configure_region(const uint8_t index,
166 const struct z_arm_mpu_partition *new_region)
167 {
168 struct arm_mpu_region region_conf;
169
170 LOG_DBG("Configure MPU region at index 0x%x", index);
171
172 /* Populate internal ARM MPU region configuration structure. */
173 region_conf.base = new_region->start;
174 #if defined(CONFIG_ARMV7_R)
175 region_conf.size = size_to_mpu_rasr_size(new_region->size);
176 #endif
177 get_region_attr_from_mpu_partition_info(®ion_conf.attr,
178 &new_region->attr, new_region->start, new_region->size);
179
180 /* Allocate and program region */
181 return region_allocate_and_init(index,
182 (const struct arm_mpu_region *)®ion_conf);
183 }
184
185 #if !defined(CONFIG_MPU_REQUIRES_NON_OVERLAPPING_REGIONS) || \
186 !defined(CONFIG_MPU_GAP_FILLING)
187 /* This internal function programs a set of given MPU regions
188 * over a background memory area, optionally performing a
189 * sanity check of the memory regions to be programmed.
190 */
mpu_configure_regions(const struct z_arm_mpu_partition regions[],uint8_t regions_num,uint8_t start_reg_index,bool do_sanity_check)191 static int mpu_configure_regions(const struct z_arm_mpu_partition
192 regions[], uint8_t regions_num, uint8_t start_reg_index,
193 bool do_sanity_check)
194 {
195 int i;
196 int reg_index = start_reg_index;
197
198 for (i = 0; i < regions_num; i++) {
199 if (regions[i].size == 0U) {
200 continue;
201 }
202 /* Non-empty region. */
203
204 if (do_sanity_check &&
205 (!mpu_partition_is_valid(®ions[i]))) {
206 LOG_ERR("Partition %u: sanity check failed.", i);
207 return -EINVAL;
208 }
209
210 reg_index = mpu_configure_region(reg_index, ®ions[i]);
211
212 if (reg_index == -EINVAL) {
213 return reg_index;
214 }
215
216 /* Increment number of programmed MPU indices. */
217 reg_index++;
218 }
219
220 return reg_index;
221 }
222 #endif
223
224 /* ARM Core MPU Driver API Implementation for ARM MPU */
225
226
227 #if defined(CONFIG_CPU_AARCH32_CORTEX_R)
228 /**
229 * @brief enable the MPU by setting bit in SCTRL register
230 */
arm_core_mpu_enable(void)231 void arm_core_mpu_enable(void)
232 {
233 uint32_t val;
234
235 val = __get_SCTLR();
236 val |= SCTLR_MPU_ENABLE;
237 __set_SCTLR(val);
238
239 /* Make sure that all the registers are set before proceeding */
240 barrier_dsync_fence_full();
241 barrier_isync_fence_full();
242 }
243
244 /**
245 * @brief disable the MPU by clearing bit in SCTRL register
246 */
arm_core_mpu_disable(void)247 void arm_core_mpu_disable(void)
248 {
249 uint32_t val;
250
251 /* Force any outstanding transfers to complete before disabling MPU */
252 barrier_dsync_fence_full();
253
254 val = __get_SCTLR();
255 val &= ~SCTLR_MPU_ENABLE;
256 __set_SCTLR(val);
257
258 /* Make sure that all the registers are set before proceeding */
259 barrier_dsync_fence_full();
260 barrier_isync_fence_full();
261 }
262 #else
263 /**
264 * @brief enable the MPU
265 */
arm_core_mpu_enable(void)266 void arm_core_mpu_enable(void)
267 {
268 /* Enable MPU and use the default memory map as a
269 * background region for privileged software access if desired.
270 */
271 #if defined(CONFIG_MPU_DISABLE_BACKGROUND_MAP)
272 MPU->CTRL = MPU_CTRL_ENABLE_Msk;
273 #else
274 MPU->CTRL = MPU_CTRL_ENABLE_Msk | MPU_CTRL_PRIVDEFENA_Msk;
275 #endif
276
277 /* Make sure that all the registers are set before proceeding */
278 barrier_dsync_fence_full();
279 barrier_isync_fence_full();
280 }
281
282 /**
283 * @brief disable the MPU
284 */
arm_core_mpu_disable(void)285 void arm_core_mpu_disable(void)
286 {
287 /* Force any outstanding transfers to complete before disabling MPU */
288 barrier_dmem_fence_full();
289
290 /* Disable MPU */
291 MPU->CTRL = 0;
292 }
293 #endif
294
295 #if defined(CONFIG_USERSPACE)
296 /**
297 * @brief update configuration of an active memory partition
298 */
arm_core_mpu_mem_partition_config_update(struct z_arm_mpu_partition * partition,k_mem_partition_attr_t * new_attr)299 void arm_core_mpu_mem_partition_config_update(
300 struct z_arm_mpu_partition *partition,
301 k_mem_partition_attr_t *new_attr)
302 {
303 /* Find the partition. ASSERT if not found. */
304 uint8_t i;
305 uint8_t reg_index = get_num_regions();
306
307 for (i = get_dyn_region_min_index(); i < get_num_regions(); i++) {
308 if (!is_enabled_region(i)) {
309 continue;
310 }
311
312 uint32_t base = mpu_region_get_base(i);
313
314 if (base != partition->start) {
315 continue;
316 }
317
318 uint32_t size = mpu_region_get_size(i);
319
320 if (size != partition->size) {
321 continue;
322 }
323
324 /* Region found */
325 reg_index = i;
326 break;
327 }
328 __ASSERT(reg_index != get_num_regions(),
329 "Memory domain partition %p size %zu not found\n",
330 (void *)partition->start, partition->size);
331
332 /* Modify the permissions */
333 partition->attr = *new_attr;
334 mpu_configure_region(reg_index, partition);
335 }
336
337 /**
338 * @brief get the maximum number of available (free) MPU region indices
339 * for configuring dynamic MPU partitions
340 */
arm_core_mpu_get_max_available_dyn_regions(void)341 int arm_core_mpu_get_max_available_dyn_regions(void)
342 {
343 return get_num_regions() - static_regions_num;
344 }
345
346 /**
347 * @brief validate the given buffer is user accessible or not
348 *
349 * Presumes the background mapping is NOT user accessible.
350 */
arm_core_mpu_buffer_validate(const void * addr,size_t size,int write)351 int arm_core_mpu_buffer_validate(const void *addr, size_t size, int write)
352 {
353 return mpu_buffer_validate(addr, size, write);
354 }
355
356 #endif /* CONFIG_USERSPACE */
357
358 /**
359 * @brief configure fixed (static) MPU regions.
360 */
arm_core_mpu_configure_static_mpu_regions(const struct z_arm_mpu_partition * static_regions,const uint8_t regions_num,const uint32_t background_area_start,const uint32_t background_area_end)361 void arm_core_mpu_configure_static_mpu_regions(const struct z_arm_mpu_partition
362 *static_regions, const uint8_t regions_num,
363 const uint32_t background_area_start, const uint32_t background_area_end)
364 {
365 if (mpu_configure_static_mpu_regions(static_regions, regions_num,
366 background_area_start, background_area_end) == -EINVAL) {
367
368 __ASSERT(0, "Configuring %u static MPU regions failed\n",
369 regions_num);
370 }
371 }
372
373 #if defined(CONFIG_MPU_REQUIRES_NON_OVERLAPPING_REGIONS)
374 /**
375 * @brief mark memory areas for dynamic region configuration
376 */
arm_core_mpu_mark_areas_for_dynamic_regions(const struct z_arm_mpu_partition dyn_region_areas[],const uint8_t dyn_region_areas_num)377 void arm_core_mpu_mark_areas_for_dynamic_regions(
378 const struct z_arm_mpu_partition dyn_region_areas[],
379 const uint8_t dyn_region_areas_num)
380 {
381 if (mpu_mark_areas_for_dynamic_regions(dyn_region_areas,
382 dyn_region_areas_num) == -EINVAL) {
383
384 __ASSERT(0, "Marking %u areas for dynamic regions failed\n",
385 dyn_region_areas_num);
386 }
387 }
388 #endif /* CONFIG_MPU_REQUIRES_NON_OVERLAPPING_REGIONS */
389
390 /**
391 * @brief configure dynamic MPU regions.
392 */
arm_core_mpu_configure_dynamic_mpu_regions(const struct z_arm_mpu_partition * dynamic_regions,uint8_t regions_num)393 void arm_core_mpu_configure_dynamic_mpu_regions(const struct z_arm_mpu_partition
394 *dynamic_regions, uint8_t regions_num)
395 {
396 if (mpu_configure_dynamic_mpu_regions(dynamic_regions, regions_num)
397 == -EINVAL) {
398
399 __ASSERT(0, "Configuring %u dynamic MPU regions failed\n",
400 regions_num);
401 }
402 }
403
404 /* ARM MPU Driver Initial Setup */
405
406 /*
407 * @brief MPU default configuration
408 *
409 * This function provides the default configuration mechanism for the Memory
410 * Protection Unit (MPU).
411 */
z_arm_mpu_init(void)412 int z_arm_mpu_init(void)
413 {
414 uint32_t r_index;
415
416 if (mpu_config.num_regions > get_num_regions()) {
417 /* Attempt to configure more MPU regions than
418 * what is supported by hardware. As this operation
419 * is executed during system (pre-kernel) initialization,
420 * we want to ensure we can detect an attempt to
421 * perform invalid configuration.
422 */
423 __ASSERT(0,
424 "Request to configure: %u regions (supported: %u)\n",
425 mpu_config.num_regions,
426 get_num_regions()
427 );
428 return -1;
429 }
430
431 LOG_DBG("total region count: %d", get_num_regions());
432
433 arm_core_mpu_disable();
434
435 #if defined(CONFIG_NOCACHE_MEMORY)
436 /* Clean and invalidate data cache if it is enabled and
437 * that was not already done at boot
438 */
439 #if defined(CONFIG_CPU_AARCH32_CORTEX_R)
440 if (__get_SCTLR() & SCTLR_C_Msk) {
441 L1C_CleanInvalidateDCacheAll();
442 }
443 #else
444 #if !defined(CONFIG_INIT_ARCH_HW_AT_BOOT)
445 if (SCB->CCR & SCB_CCR_DC_Msk) {
446 SCB_CleanInvalidateDCache();
447 }
448 #endif
449 #endif
450 #endif /* CONFIG_NOCACHE_MEMORY */
451
452 /* Architecture-specific configuration */
453 mpu_init();
454
455 /* Program fixed regions configured at SOC definition. */
456 for (r_index = 0U; r_index < mpu_config.num_regions; r_index++) {
457 region_init(r_index, &mpu_config.mpu_regions[r_index]);
458 }
459
460 /* Update the number of programmed MPU regions. */
461 static_regions_num = mpu_config.num_regions;
462 #ifdef CONFIG_MEM_ATTR
463 /* DT-defined MPU regions. */
464 if (mpu_configure_regions_from_dt(&static_regions_num) == -EINVAL) {
465 __ASSERT(0, "Failed to allocate MPU regions from DT\n");
466 return -EINVAL;
467 }
468 #endif /* CONFIG_MEM_ATTR */
469 /* Clear all regions before enabling MPU */
470 for (int i = static_regions_num; i < get_num_regions(); i++) {
471 mpu_clear_region(i);
472 }
473
474 arm_core_mpu_enable();
475
476 /* Program additional fixed flash region for null-pointer
477 * dereferencing detection (debug feature)
478 */
479 #if defined(CONFIG_NULL_POINTER_EXCEPTION_DETECTION_MPU)
480 #if (defined(CONFIG_ARMV8_M_BASELINE) || defined(CONFIG_ARMV8_M_MAINLINE)) && \
481 (CONFIG_FLASH_BASE_ADDRESS > CONFIG_CORTEX_M_NULL_POINTER_EXCEPTION_PAGE_SIZE) && \
482 (!DT_NULL_PAGE_DETECT_NODE_EXIST)
483
484 #pragma message "Null-Pointer exception detection cannot be configured on un-mapped flash areas"
485 #else
486 const struct z_arm_mpu_partition unmap_region = {
487 .start = 0x0,
488 .size = CONFIG_CORTEX_M_NULL_POINTER_EXCEPTION_PAGE_SIZE,
489 #if defined(CONFIG_ARMV8_M_BASELINE) || defined(CONFIG_ARMV8_M_MAINLINE)
490 /* Overlapping region (with any permissions)
491 * will result in fault generation
492 */
493 .attr = K_MEM_PARTITION_P_RO_U_NA,
494 #else
495 /* Explicit no-access policy */
496 .attr = K_MEM_PARTITION_P_NA_U_NA,
497 #endif
498 };
499
500 /* The flash region for null pointer dereferencing detection shall
501 * comply with the regular MPU partition definition restrictions
502 * (size and alignment).
503 */
504 _ARCH_MEM_PARTITION_ALIGN_CHECK(0x0,
505 CONFIG_CORTEX_M_NULL_POINTER_EXCEPTION_PAGE_SIZE);
506
507 #if defined(CONFIG_ARMV8_M_BASELINE) || defined(CONFIG_ARMV8_M_MAINLINE)
508 /* ARMv8-M requires that the area:
509 * 0x0 - CORTEX_M_NULL_POINTER_EXCEPTION_PAGE_SIZE
510 * is not unmapped (belongs to a valid MPU region already).
511 */
512 if ((arm_cmse_mpu_region_get(0x0) == -EINVAL) ||
513 (arm_cmse_mpu_region_get(
514 CONFIG_CORTEX_M_NULL_POINTER_EXCEPTION_PAGE_SIZE - 1)
515 == -EINVAL)) {
516 __ASSERT(0,
517 "Null pointer detection page unmapped\n");
518 }
519 #endif
520
521 if (mpu_configure_region(static_regions_num, &unmap_region) == -EINVAL) {
522
523 __ASSERT(0,
524 "Programming null-pointer detection region failed\n");
525 return -EINVAL;
526 }
527
528 static_regions_num++;
529
530 #endif
531 #endif /* CONFIG_NULL_POINTER_EXCEPTION_DETECTION_MPU */
532
533 /* Sanity check for number of regions in Cortex-M0+, M3, and M4. */
534 #if defined(CONFIG_CPU_CORTEX_M0PLUS) || \
535 defined(CONFIG_CPU_CORTEX_M3) || \
536 defined(CONFIG_CPU_CORTEX_M4)
537 __ASSERT(
538 (MPU->TYPE & MPU_TYPE_DREGION_Msk) >> MPU_TYPE_DREGION_Pos == 8,
539 "Invalid number of MPU regions\n");
540 #endif /* CORTEX_M0PLUS || CPU_CORTEX_M3 || CPU_CORTEX_M4 */
541
542 return 0;
543 }
544