1 /*
2 * Copyright (c) 2017 Linaro Limited.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/device.h>
8 #include <zephyr/init.h>
9 #include <zephyr/kernel.h>
10 #include <zephyr/sys/barrier.h>
11 #include "arm_core_mpu_dev.h"
12 #include <zephyr/linker/linker-defs.h>
13 #include <kernel_arch_data.h>
14 #include <zephyr/mem_mgmt/mem_attr.h>
15 #include <zephyr/dt-bindings/memory-attr/memory-attr-arm.h>
16
17 #define LOG_LEVEL CONFIG_MPU_LOG_LEVEL
18 #include <zephyr/logging/log.h>
19 LOG_MODULE_DECLARE(mpu);
20
21 #if defined(CONFIG_ARMV8_M_BASELINE) || defined(CONFIG_ARMV8_M_MAINLINE)
22 /* The order here is on purpose since ARMv8-M SoCs may define
23 * CONFIG_ARMV6_M_ARMV8_M_BASELINE or CONFIG_ARMV7_M_ARMV8_M_MAINLINE
24 * so we want to check for ARMv8-M first.
25 */
26 #define MPU_NODEID DT_INST(0, arm_armv8m_mpu)
27 #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
28 #define MPU_NODEID DT_INST(0, arm_armv7m_mpu)
29 #elif defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
30 #define MPU_NODEID DT_INST(0, arm_armv6m_mpu)
31 #endif
32
33 #define NODE_HAS_PROP_AND_OR(node_id, prop) \
34 DT_NODE_HAS_PROP(node_id, prop) ||
35
36 BUILD_ASSERT((DT_FOREACH_STATUS_OKAY_NODE_VARGS(
37 NODE_HAS_PROP_AND_OR, zephyr_memory_region_mpu) false) == false,
38 "`zephyr,memory-region-mpu` was deprecated in favor of `zephyr,memory-attr`");
39
40 #define NULL_PAGE_DETECT_NODE_FINDER(node_id, prop) \
41 (DT_NODE_HAS_PROP(node_id, prop) && (DT_REG_ADDR(node_id) == 0) && \
42 (DT_REG_SIZE(node_id) >= CONFIG_CORTEX_M_NULL_POINTER_EXCEPTION_PAGE_SIZE)) ||
43
44 #define DT_NULL_PAGE_DETECT_NODE_EXIST \
45 (DT_FOREACH_STATUS_OKAY_NODE_VARGS(NULL_PAGE_DETECT_NODE_FINDER, zephyr_memory_attr) false)
46
47 /*
48 * Global status variable holding the number of HW MPU region indices, which
49 * have been reserved by the MPU driver to program the static (fixed) memory
50 * regions.
51 */
52 static uint8_t static_regions_num;
53
54 /* Include architecture-specific internal headers. */
55 #if defined(CONFIG_CPU_CORTEX_M0PLUS) || \
56 defined(CONFIG_CPU_CORTEX_M3) || \
57 defined(CONFIG_CPU_CORTEX_M4) || \
58 defined(CONFIG_CPU_CORTEX_M7) || \
59 defined(CONFIG_ARMV7_R)
60 #include "arm_mpu_v7_internal.h"
61 #elif defined(CONFIG_CPU_CORTEX_M23) || \
62 defined(CONFIG_CPU_CORTEX_M33) || \
63 defined(CONFIG_CPU_CORTEX_M55) || \
64 defined(CONFIG_CPU_CORTEX_M85) || \
65 defined(CONFIG_AARCH32_ARMV8_R)
66 #include "arm_mpu_v8_internal.h"
67 #else
68 #error "Unsupported ARM CPU"
69 #endif
70
region_allocate_and_init(const uint8_t index,const struct arm_mpu_region * region_conf)71 static int region_allocate_and_init(const uint8_t index,
72 const struct arm_mpu_region *region_conf)
73 {
74 /* Attempt to allocate new region index. */
75 if (index > (get_num_regions() - 1U)) {
76
77 /* No available MPU region index. */
78 LOG_ERR("Failed to allocate new MPU region %u\n", index);
79 return -EINVAL;
80 }
81
82 LOG_DBG("Program MPU region at index 0x%x", index);
83
84 /* Program region */
85 region_init(index, region_conf);
86
87 return index;
88 }
89
90 #define _BUILD_REGION_CONF(reg, _ATTR) \
91 (struct arm_mpu_region) ARM_MPU_REGION_INIT((reg).dt_name, \
92 (reg).dt_addr, \
93 (reg).dt_size, \
94 _ATTR)
95
96 /* This internal function programs the MPU regions defined in the DT when using
97 * the `zephyr,memory-attr = <( DT_MEM_ARM(...) )>` property.
98 */
mpu_configure_regions_from_dt(uint8_t * reg_index)99 static int mpu_configure_regions_from_dt(uint8_t *reg_index)
100 {
101 const struct mem_attr_region_t *region;
102 size_t num_regions;
103
104 num_regions = mem_attr_get_regions(®ion);
105
106 for (size_t idx = 0; idx < num_regions; idx++) {
107 struct arm_mpu_region region_conf;
108
109 switch (DT_MEM_ARM_GET(region[idx].dt_attr)) {
110 case DT_MEM_ARM_MPU_RAM:
111 region_conf = _BUILD_REGION_CONF(region[idx], REGION_RAM_ATTR);
112 break;
113 #ifdef REGION_RAM_NOCACHE_ATTR
114 case DT_MEM_ARM_MPU_RAM_NOCACHE:
115 region_conf = _BUILD_REGION_CONF(region[idx], REGION_RAM_NOCACHE_ATTR);
116 __ASSERT(!(region[idx].dt_attr & DT_MEM_CACHEABLE),
117 "RAM_NOCACHE with DT_MEM_CACHEABLE attribute\n");
118 break;
119 #endif
120 #ifdef REGION_FLASH_ATTR
121 case DT_MEM_ARM_MPU_FLASH:
122 region_conf = _BUILD_REGION_CONF(region[idx], REGION_FLASH_ATTR);
123 break;
124 #endif
125 #ifdef REGION_PPB_ATTR
126 case DT_MEM_ARM_MPU_PPB:
127 region_conf = _BUILD_REGION_CONF(region[idx], REGION_PPB_ATTR);
128 break;
129 #endif
130 #ifdef REGION_IO_ATTR
131 case DT_MEM_ARM_MPU_IO:
132 region_conf = _BUILD_REGION_CONF(region[idx], REGION_IO_ATTR);
133 break;
134 #endif
135 #ifdef REGION_EXTMEM_ATTR
136 case DT_MEM_ARM_MPU_EXTMEM:
137 region_conf = _BUILD_REGION_CONF(region[idx], REGION_EXTMEM_ATTR);
138 break;
139 #endif
140 default:
141 /* Attribute other than ARM-specific is set.
142 * This region should not be configured in MPU.
143 */
144 continue;
145 }
146 #if defined(CONFIG_ARMV7_R)
147 region_conf.size = size_to_mpu_rasr_size(region[idx].dt_size);
148 #endif
149
150 if (region_allocate_and_init((*reg_index),
151 (const struct arm_mpu_region *) ®ion_conf) < 0) {
152 return -EINVAL;
153 }
154
155 (*reg_index)++;
156 }
157
158 return 0;
159 }
160
161 /* This internal function programs an MPU region
162 * of a given configuration at a given MPU index.
163 */
mpu_configure_region(const uint8_t index,const struct z_arm_mpu_partition * new_region)164 static int mpu_configure_region(const uint8_t index,
165 const struct z_arm_mpu_partition *new_region)
166 {
167 struct arm_mpu_region region_conf;
168
169 LOG_DBG("Configure MPU region at index 0x%x", index);
170
171 /* Populate internal ARM MPU region configuration structure. */
172 region_conf.base = new_region->start;
173 #if defined(CONFIG_ARMV7_R)
174 region_conf.size = size_to_mpu_rasr_size(new_region->size);
175 #endif
176 get_region_attr_from_mpu_partition_info(®ion_conf.attr,
177 &new_region->attr, new_region->start, new_region->size);
178
179 /* Allocate and program region */
180 return region_allocate_and_init(index,
181 (const struct arm_mpu_region *)®ion_conf);
182 }
183
184 #if !defined(CONFIG_MPU_REQUIRES_NON_OVERLAPPING_REGIONS) || \
185 !defined(CONFIG_MPU_GAP_FILLING)
186 /* This internal function programs a set of given MPU regions
187 * over a background memory area, optionally performing a
188 * sanity check of the memory regions to be programmed.
189 */
mpu_configure_regions(const struct z_arm_mpu_partition regions[],uint8_t regions_num,uint8_t start_reg_index,bool do_sanity_check)190 static int mpu_configure_regions(const struct z_arm_mpu_partition
191 regions[], uint8_t regions_num, uint8_t start_reg_index,
192 bool do_sanity_check)
193 {
194 int i;
195 int reg_index = start_reg_index;
196
197 for (i = 0; i < regions_num; i++) {
198 if (regions[i].size == 0U) {
199 continue;
200 }
201 /* Non-empty region. */
202
203 if (do_sanity_check &&
204 (!mpu_partition_is_valid(®ions[i]))) {
205 LOG_ERR("Partition %u: sanity check failed.", i);
206 return -EINVAL;
207 }
208
209 reg_index = mpu_configure_region(reg_index, ®ions[i]);
210
211 if (reg_index == -EINVAL) {
212 return reg_index;
213 }
214
215 /* Increment number of programmed MPU indices. */
216 reg_index++;
217 }
218
219 return reg_index;
220 }
221 #endif
222
223 /* ARM Core MPU Driver API Implementation for ARM MPU */
224
225
226 #if defined(CONFIG_CPU_AARCH32_CORTEX_R)
227 /**
228 * @brief enable the MPU by setting bit in SCTRL register
229 */
arm_core_mpu_enable(void)230 void arm_core_mpu_enable(void)
231 {
232 uint32_t val;
233
234 val = __get_SCTLR();
235 val |= SCTLR_MPU_ENABLE;
236 __set_SCTLR(val);
237
238 /* Make sure that all the registers are set before proceeding */
239 barrier_dsync_fence_full();
240 barrier_isync_fence_full();
241 }
242
243 /**
244 * @brief disable the MPU by clearing bit in SCTRL register
245 */
arm_core_mpu_disable(void)246 void arm_core_mpu_disable(void)
247 {
248 uint32_t val;
249
250 /* Force any outstanding transfers to complete before disabling MPU */
251 barrier_dsync_fence_full();
252
253 val = __get_SCTLR();
254 val &= ~SCTLR_MPU_ENABLE;
255 __set_SCTLR(val);
256
257 /* Make sure that all the registers are set before proceeding */
258 barrier_dsync_fence_full();
259 barrier_isync_fence_full();
260 }
261 #else
262 /**
263 * @brief enable the MPU
264 */
arm_core_mpu_enable(void)265 void arm_core_mpu_enable(void)
266 {
267 /* Enable MPU and use the default memory map as a
268 * background region for privileged software access if desired.
269 */
270 #if defined(CONFIG_MPU_DISABLE_BACKGROUND_MAP)
271 MPU->CTRL = MPU_CTRL_ENABLE_Msk;
272 #else
273 MPU->CTRL = MPU_CTRL_ENABLE_Msk | MPU_CTRL_PRIVDEFENA_Msk;
274 #endif
275
276 /* Make sure that all the registers are set before proceeding */
277 barrier_dsync_fence_full();
278 barrier_isync_fence_full();
279 }
280
281 /**
282 * @brief disable the MPU
283 */
arm_core_mpu_disable(void)284 void arm_core_mpu_disable(void)
285 {
286 /* Force any outstanding transfers to complete before disabling MPU */
287 barrier_dmem_fence_full();
288
289 /* Disable MPU */
290 MPU->CTRL = 0;
291 }
292 #endif
293
294 #if defined(CONFIG_USERSPACE)
295 /**
296 * @brief update configuration of an active memory partition
297 */
arm_core_mpu_mem_partition_config_update(struct z_arm_mpu_partition * partition,k_mem_partition_attr_t * new_attr)298 void arm_core_mpu_mem_partition_config_update(
299 struct z_arm_mpu_partition *partition,
300 k_mem_partition_attr_t *new_attr)
301 {
302 /* Find the partition. ASSERT if not found. */
303 uint8_t i;
304 uint8_t reg_index = get_num_regions();
305
306 for (i = get_dyn_region_min_index(); i < get_num_regions(); i++) {
307 if (!is_enabled_region(i)) {
308 continue;
309 }
310
311 uint32_t base = mpu_region_get_base(i);
312
313 if (base != partition->start) {
314 continue;
315 }
316
317 uint32_t size = mpu_region_get_size(i);
318
319 if (size != partition->size) {
320 continue;
321 }
322
323 /* Region found */
324 reg_index = i;
325 break;
326 }
327 __ASSERT(reg_index != get_num_regions(),
328 "Memory domain partition %p size %zu not found\n",
329 (void *)partition->start, partition->size);
330
331 /* Modify the permissions */
332 partition->attr = *new_attr;
333 mpu_configure_region(reg_index, partition);
334 }
335
336 /**
337 * @brief get the maximum number of available (free) MPU region indices
338 * for configuring dynamic MPU partitions
339 */
arm_core_mpu_get_max_available_dyn_regions(void)340 int arm_core_mpu_get_max_available_dyn_regions(void)
341 {
342 return get_num_regions() - static_regions_num;
343 }
344
345 /**
346 * @brief validate the given buffer is user accessible or not
347 *
348 * Presumes the background mapping is NOT user accessible.
349 */
arm_core_mpu_buffer_validate(const void * addr,size_t size,int write)350 int arm_core_mpu_buffer_validate(const void *addr, size_t size, int write)
351 {
352 return mpu_buffer_validate(addr, size, write);
353 }
354
355 #endif /* CONFIG_USERSPACE */
356
357 /**
358 * @brief configure fixed (static) MPU regions.
359 */
arm_core_mpu_configure_static_mpu_regions(const struct z_arm_mpu_partition * static_regions,const uint8_t regions_num,const uint32_t background_area_start,const uint32_t background_area_end)360 void arm_core_mpu_configure_static_mpu_regions(const struct z_arm_mpu_partition
361 *static_regions, const uint8_t regions_num,
362 const uint32_t background_area_start, const uint32_t background_area_end)
363 {
364 if (mpu_configure_static_mpu_regions(static_regions, regions_num,
365 background_area_start, background_area_end) == -EINVAL) {
366
367 __ASSERT(0, "Configuring %u static MPU regions failed\n",
368 regions_num);
369 }
370 }
371
372 #if defined(CONFIG_MPU_REQUIRES_NON_OVERLAPPING_REGIONS)
373 /**
374 * @brief mark memory areas for dynamic region configuration
375 */
arm_core_mpu_mark_areas_for_dynamic_regions(const struct z_arm_mpu_partition dyn_region_areas[],const uint8_t dyn_region_areas_num)376 void arm_core_mpu_mark_areas_for_dynamic_regions(
377 const struct z_arm_mpu_partition dyn_region_areas[],
378 const uint8_t dyn_region_areas_num)
379 {
380 if (mpu_mark_areas_for_dynamic_regions(dyn_region_areas,
381 dyn_region_areas_num) == -EINVAL) {
382
383 __ASSERT(0, "Marking %u areas for dynamic regions failed\n",
384 dyn_region_areas_num);
385 }
386 }
387 #endif /* CONFIG_MPU_REQUIRES_NON_OVERLAPPING_REGIONS */
388
389 /**
390 * @brief configure dynamic MPU regions.
391 */
arm_core_mpu_configure_dynamic_mpu_regions(const struct z_arm_mpu_partition * dynamic_regions,uint8_t regions_num)392 void arm_core_mpu_configure_dynamic_mpu_regions(const struct z_arm_mpu_partition
393 *dynamic_regions, uint8_t regions_num)
394 {
395 if (mpu_configure_dynamic_mpu_regions(dynamic_regions, regions_num)
396 == -EINVAL) {
397
398 __ASSERT(0, "Configuring %u dynamic MPU regions failed\n",
399 regions_num);
400 }
401 }
402
403 /* ARM MPU Driver Initial Setup */
404
405 /*
406 * @brief MPU default configuration
407 *
408 * This function provides the default configuration mechanism for the Memory
409 * Protection Unit (MPU).
410 */
z_arm_mpu_init(void)411 int z_arm_mpu_init(void)
412 {
413 uint32_t r_index;
414
415 if (mpu_config.num_regions > get_num_regions()) {
416 /* Attempt to configure more MPU regions than
417 * what is supported by hardware. As this operation
418 * is executed during system (pre-kernel) initialization,
419 * we want to ensure we can detect an attempt to
420 * perform invalid configuration.
421 */
422 __ASSERT(0,
423 "Request to configure: %u regions (supported: %u)\n",
424 mpu_config.num_regions,
425 get_num_regions()
426 );
427 return -1;
428 }
429
430 LOG_DBG("total region count: %d", get_num_regions());
431
432 arm_core_mpu_disable();
433
434 #if defined(CONFIG_NOCACHE_MEMORY)
435 /* Clean and invalidate data cache if it is enabled and
436 * that was not already done at boot
437 */
438 #if defined(CONFIG_CPU_AARCH32_CORTEX_R)
439 if (__get_SCTLR() & SCTLR_C_Msk) {
440 L1C_CleanInvalidateDCacheAll();
441 }
442 #else
443 #if !defined(CONFIG_INIT_ARCH_HW_AT_BOOT)
444 if (SCB->CCR & SCB_CCR_DC_Msk) {
445 SCB_CleanInvalidateDCache();
446 }
447 #endif
448 #endif
449 #endif /* CONFIG_NOCACHE_MEMORY */
450
451 /* Architecture-specific configuration */
452 mpu_init();
453
454 /* Program fixed regions configured at SOC definition. */
455 for (r_index = 0U; r_index < mpu_config.num_regions; r_index++) {
456 region_init(r_index, &mpu_config.mpu_regions[r_index]);
457 }
458
459 /* Update the number of programmed MPU regions. */
460 static_regions_num = mpu_config.num_regions;
461
462 /* DT-defined MPU regions. */
463 if (mpu_configure_regions_from_dt(&static_regions_num) == -EINVAL) {
464 __ASSERT(0, "Failed to allocate MPU regions from DT\n");
465 return -EINVAL;
466 }
467
468 /* Clear all regions before enabling MPU */
469 for (int i = static_regions_num; i < get_num_regions(); i++) {
470 mpu_clear_region(i);
471 }
472
473 arm_core_mpu_enable();
474
475 /* Program additional fixed flash region for null-pointer
476 * dereferencing detection (debug feature)
477 */
478 #if defined(CONFIG_NULL_POINTER_EXCEPTION_DETECTION_MPU)
479 #if (defined(CONFIG_ARMV8_M_BASELINE) || defined(CONFIG_ARMV8_M_MAINLINE)) && \
480 (CONFIG_FLASH_BASE_ADDRESS > CONFIG_CORTEX_M_NULL_POINTER_EXCEPTION_PAGE_SIZE) && \
481 (!DT_NULL_PAGE_DETECT_NODE_EXIST)
482
483 #pragma message "Null-Pointer exception detection cannot be configured on un-mapped flash areas"
484 #else
485 const struct z_arm_mpu_partition unmap_region = {
486 .start = 0x0,
487 .size = CONFIG_CORTEX_M_NULL_POINTER_EXCEPTION_PAGE_SIZE,
488 #if defined(CONFIG_ARMV8_M_BASELINE) || defined(CONFIG_ARMV8_M_MAINLINE)
489 /* Overlapping region (with any permissions)
490 * will result in fault generation
491 */
492 .attr = K_MEM_PARTITION_P_RO_U_NA,
493 #else
494 /* Explicit no-access policy */
495 .attr = K_MEM_PARTITION_P_NA_U_NA,
496 #endif
497 };
498
499 /* The flash region for null pointer dereferencing detection shall
500 * comply with the regular MPU partition definition restrictions
501 * (size and alignment).
502 */
503 _ARCH_MEM_PARTITION_ALIGN_CHECK(0x0,
504 CONFIG_CORTEX_M_NULL_POINTER_EXCEPTION_PAGE_SIZE);
505
506 #if defined(CONFIG_ARMV8_M_BASELINE) || defined(CONFIG_ARMV8_M_MAINLINE)
507 /* ARMv8-M requires that the area:
508 * 0x0 - CORTEX_M_NULL_POINTER_EXCEPTION_PAGE_SIZE
509 * is not unmapped (belongs to a valid MPU region already).
510 */
511 if ((arm_cmse_mpu_region_get(0x0) == -EINVAL) ||
512 (arm_cmse_mpu_region_get(
513 CONFIG_CORTEX_M_NULL_POINTER_EXCEPTION_PAGE_SIZE - 1)
514 == -EINVAL)) {
515 __ASSERT(0,
516 "Null pointer detection page unmapped\n");
517 }
518 #endif
519
520 if (mpu_configure_region(static_regions_num, &unmap_region) == -EINVAL) {
521
522 __ASSERT(0,
523 "Programming null-pointer detection region failed\n");
524 return -EINVAL;
525 }
526
527 static_regions_num++;
528
529 #endif
530 #endif /* CONFIG_NULL_POINTER_EXCEPTION_DETECTION_MPU */
531
532 /* Sanity check for number of regions in Cortex-M0+, M3, and M4. */
533 #if defined(CONFIG_CPU_CORTEX_M0PLUS) || \
534 defined(CONFIG_CPU_CORTEX_M3) || \
535 defined(CONFIG_CPU_CORTEX_M4)
536 __ASSERT(
537 (MPU->TYPE & MPU_TYPE_DREGION_Msk) >> MPU_TYPE_DREGION_Pos == 8,
538 "Invalid number of MPU regions\n");
539 #endif /* CORTEX_M0PLUS || CPU_CORTEX_M3 || CPU_CORTEX_M4 */
540
541 return 0;
542 }
543