1 /*
2 * Copyright (c) 2019 Synopsys.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6 #ifndef ZEPHYR_ARCH_ARC_CORE_MPU_ARC_MPU_V4_INTERNAL_H_
7 #define ZEPHYR_ARCH_ARC_CORE_MPU_ARC_MPU_V4_INTERNAL_H_
8
9 #define AUX_MPU_RPER_SID1 0x10000
10 /* valid mask: SID1+secure+valid */
11 #define AUX_MPU_RPER_VALID_MASK ((0x1) | AUX_MPU_RPER_SID1 | AUX_MPU_ATTR_S)
12
13 #define AUX_MPU_RPER_ATTR_MASK (0x1FF)
14
15 /* For MPU version 4, the minimum protection region size is 32 bytes */
16 #define ARC_FEATURE_MPU_ALIGNMENT_BITS 5
17
18 #define CALC_REGION_END_ADDR(start, size) \
19 (start + size - (1 << ARC_FEATURE_MPU_ALIGNMENT_BITS))
20
21 /* ARC MPU version 4 does not support mpu region overlap in hardware
22 * so if we want to allocate MPU region dynamically, e.g. thread stack,
23 * memory domain from a background region, a dynamic region splitting
24 * approach is designed. pls see comments in
25 * _dynamic_region_allocate_and_init
26 * But this approach has an impact on performance of thread switch.
27 * As a trade off, we can use the default mpu region as the background region
28 * to avoid the dynamic region splitting. This will give more privilege to
29 * codes in kernel mode which can access the memory region not covered by
30 * explicit mpu entry. Considering memory protection is mainly used to
31 * isolate malicious codes in user mode, it makes sense to get better
32 * thread switch performance through default mpu region.
33 * CONFIG_MPU_GAP_FILLING is used to turn this on/off.
34 *
35 */
36 #if defined(CONFIG_MPU_GAP_FILLING)
37
38 #if defined(CONFIG_USERSPACE) && defined(CONFIG_MPU_STACK_GUARD)
39 /* 1 for stack guard , 1 for user thread, 1 for split */
40 #define MPU_REGION_NUM_FOR_THREAD 3
41 #elif defined(CONFIG_USERSPACE) || defined(CONFIG_MPU_STACK_GUARD)
42 /* 1 for stack guard or user thread stack , 1 for split */
43 #define MPU_REGION_NUM_FOR_THREAD 2
44 #else
45 #define MPU_REGION_NUM_FOR_THREAD 0
46 #endif
47
48 #define MPU_DYNAMIC_REGION_AREAS_NUM 2
49
50 /**
51 * @brief internal structure holding information of
52 * memory areas where dynamic MPU programming is allowed.
53 */
54 struct dynamic_region_info {
55 uint8_t index;
56 uint32_t base;
57 uint32_t size;
58 uint32_t attr;
59 };
60
61 static uint8_t dynamic_regions_num;
62 static uint8_t dynamic_region_index;
63
64 /**
65 * Global array, holding the MPU region index of
66 * the memory region inside which dynamic memory
67 * regions may be configured.
68 */
69 static struct dynamic_region_info dyn_reg_info[MPU_DYNAMIC_REGION_AREAS_NUM];
70 #endif /* CONFIG_MPU_GAP_FILLING */
71
72 static uint8_t static_regions_num;
73
74 #ifdef CONFIG_ARC_NORMAL_FIRMWARE
75 /* \todo through secure service to access mpu */
_region_init(uint32_t index,uint32_t region_addr,uint32_t size,uint32_t region_attr)76 static inline void _region_init(uint32_t index, uint32_t region_addr, uint32_t size,
77 uint32_t region_attr)
78 {
79 }
80
_region_set_attr(uint32_t index,uint32_t attr)81 static inline void _region_set_attr(uint32_t index, uint32_t attr)
82 {
83
84 }
85
_region_get_attr(uint32_t index)86 static inline uint32_t _region_get_attr(uint32_t index)
87 {
88 return 0;
89 }
90
_region_get_start(uint32_t index)91 static inline uint32_t _region_get_start(uint32_t index)
92 {
93 return 0;
94 }
95
_region_set_start(uint32_t index,uint32_t start)96 static inline void _region_set_start(uint32_t index, uint32_t start)
97 {
98
99 }
100
_region_get_end(uint32_t index)101 static inline uint32_t _region_get_end(uint32_t index)
102 {
103 return 0;
104 }
105
_region_set_end(uint32_t index,uint32_t end)106 static inline void _region_set_end(uint32_t index, uint32_t end)
107 {
108 }
109
110 /**
111 * This internal function probes the given addr's MPU index.if not
112 * in MPU, returns error
113 */
_mpu_probe(uint32_t addr)114 static inline int _mpu_probe(uint32_t addr)
115 {
116 return -EINVAL;
117 }
118
119 /**
120 * This internal function checks if MPU region is enabled or not
121 */
_is_enabled_region(uint32_t r_index)122 static inline bool _is_enabled_region(uint32_t r_index)
123 {
124 return false;
125 }
126
127 /**
128 * This internal function check if the region is user accessible or not
129 */
_is_user_accessible_region(uint32_t r_index,int write)130 static inline bool _is_user_accessible_region(uint32_t r_index, int write)
131 {
132 return false;
133 }
134 #else /* CONFIG_ARC_NORMAL_FIRMWARE */
135 /* the following functions are prepared for SECURE_FIRMWARE */
_region_init(uint32_t index,uint32_t region_addr,uint32_t size,uint32_t region_attr)136 static inline void _region_init(uint32_t index, uint32_t region_addr, uint32_t size,
137 uint32_t region_attr)
138 {
139 if (size < (1 << ARC_FEATURE_MPU_ALIGNMENT_BITS)) {
140 size = (1 << ARC_FEATURE_MPU_ALIGNMENT_BITS);
141 }
142
143 if (region_attr) {
144 region_attr &= AUX_MPU_RPER_ATTR_MASK;
145 region_attr |= AUX_MPU_RPER_VALID_MASK;
146 }
147
148 z_arc_v2_aux_reg_write(_ARC_V2_MPU_INDEX, index);
149 z_arc_v2_aux_reg_write(_ARC_V2_MPU_RSTART, region_addr);
150 z_arc_v2_aux_reg_write(_ARC_V2_MPU_REND,
151 CALC_REGION_END_ADDR(region_addr, size));
152 z_arc_v2_aux_reg_write(_ARC_V2_MPU_RPER, region_attr);
153 }
154
_region_set_attr(uint32_t index,uint32_t attr)155 static inline void _region_set_attr(uint32_t index, uint32_t attr)
156 {
157 z_arc_v2_aux_reg_write(_ARC_V2_MPU_INDEX, index);
158 z_arc_v2_aux_reg_write(_ARC_V2_MPU_RPER, attr |
159 AUX_MPU_RPER_VALID_MASK);
160 }
161
_region_get_attr(uint32_t index)162 static inline uint32_t _region_get_attr(uint32_t index)
163 {
164 z_arc_v2_aux_reg_write(_ARC_V2_MPU_INDEX, index);
165
166 return z_arc_v2_aux_reg_read(_ARC_V2_MPU_RPER);
167 }
168
_region_get_start(uint32_t index)169 static inline uint32_t _region_get_start(uint32_t index)
170 {
171 z_arc_v2_aux_reg_write(_ARC_V2_MPU_INDEX, index);
172
173 return z_arc_v2_aux_reg_read(_ARC_V2_MPU_RSTART);
174 }
175
_region_set_start(uint32_t index,uint32_t start)176 static inline void _region_set_start(uint32_t index, uint32_t start)
177 {
178 z_arc_v2_aux_reg_write(_ARC_V2_MPU_INDEX, index);
179 z_arc_v2_aux_reg_write(_ARC_V2_MPU_RSTART, start);
180 }
181
_region_get_end(uint32_t index)182 static inline uint32_t _region_get_end(uint32_t index)
183 {
184 z_arc_v2_aux_reg_write(_ARC_V2_MPU_INDEX, index);
185
186 return z_arc_v2_aux_reg_read(_ARC_V2_MPU_REND) +
187 (1 << ARC_FEATURE_MPU_ALIGNMENT_BITS);
188 }
189
_region_set_end(uint32_t index,uint32_t end)190 static inline void _region_set_end(uint32_t index, uint32_t end)
191 {
192 z_arc_v2_aux_reg_write(_ARC_V2_MPU_INDEX, index);
193 z_arc_v2_aux_reg_write(_ARC_V2_MPU_REND, end -
194 (1 << ARC_FEATURE_MPU_ALIGNMENT_BITS));
195 }
196
197 /**
198 * This internal function probes the given addr's MPU index.if not
199 * in MPU, returns error
200 */
_mpu_probe(uint32_t addr)201 static inline int _mpu_probe(uint32_t addr)
202 {
203 uint32_t val;
204
205 z_arc_v2_aux_reg_write(_ARC_V2_MPU_PROBE, addr);
206 val = z_arc_v2_aux_reg_read(_ARC_V2_MPU_INDEX);
207
208 /* if no match or multiple regions match, return error */
209 if (val & 0xC0000000) {
210 return -EINVAL;
211 } else {
212 return val;
213 }
214 }
215
216 /**
217 * This internal function checks if MPU region is enabled or not
218 */
_is_enabled_region(uint32_t r_index)219 static inline bool _is_enabled_region(uint32_t r_index)
220 {
221 z_arc_v2_aux_reg_write(_ARC_V2_MPU_INDEX, r_index);
222 return ((z_arc_v2_aux_reg_read(_ARC_V2_MPU_RPER) &
223 AUX_MPU_RPER_VALID_MASK) == AUX_MPU_RPER_VALID_MASK);
224 }
225
226 /**
227 * This internal function check if the region is user accessible or not
228 */
_is_user_accessible_region(uint32_t r_index,int write)229 static inline bool _is_user_accessible_region(uint32_t r_index, int write)
230 {
231 uint32_t r_ap;
232
233 z_arc_v2_aux_reg_write(_ARC_V2_MPU_INDEX, r_index);
234 r_ap = z_arc_v2_aux_reg_read(_ARC_V2_MPU_RPER);
235 r_ap &= AUX_MPU_RPER_ATTR_MASK;
236
237 if (write) {
238 return ((r_ap & (AUX_MPU_ATTR_UW | AUX_MPU_ATTR_KW)) ==
239 (AUX_MPU_ATTR_UW | AUX_MPU_ATTR_KW));
240 }
241
242 return ((r_ap & (AUX_MPU_ATTR_UR | AUX_MPU_ATTR_KR)) ==
243 (AUX_MPU_ATTR_UR | AUX_MPU_ATTR_KR));
244 }
245
246 #endif /* CONFIG_ARC_NORMAL_FIRMWARE */
247
248 /**
249 * This internal function checks the area given by (start, size)
250 * and returns the index if the area match one MPU entry
251 */
_get_region_index(uint32_t start,uint32_t size)252 static inline int _get_region_index(uint32_t start, uint32_t size)
253 {
254 int index = _mpu_probe(start);
255
256 if (index > 0 && index == _mpu_probe(start + size - 1)) {
257 return index;
258 }
259
260 return -EINVAL;
261 }
262
263 #if defined(CONFIG_MPU_GAP_FILLING)
264 /**
265 * This internal function allocates a dynamic MPU region and returns
266 * the index or error
267 */
_dynamic_region_allocate_index(void)268 static inline int _dynamic_region_allocate_index(void)
269 {
270 if (dynamic_region_index >= get_num_regions()) {
271 LOG_ERR("no enough mpu entries %d", dynamic_region_index);
272 return -EINVAL;
273 }
274
275 return dynamic_region_index++;
276 }
277
278 /* @brief allocate and init a dynamic MPU region
279 *
280 * This internal function performs the allocation and initialization of
281 * a dynamic MPU region
282 *
283 * @param base region base
284 * @param size region size
285 * @param attr region attribute
286 * @return <0 failure, >0 allocated dynamic region index
287 */
_dynamic_region_allocate_and_init(uint32_t base,uint32_t size,uint32_t attr)288 static int _dynamic_region_allocate_and_init(uint32_t base, uint32_t size,
289 uint32_t attr)
290 {
291 int u_region_index = _get_region_index(base, size);
292 int region_index;
293
294 LOG_DBG("Region info: base 0x%x size 0x%x attr 0x%x", base, size, attr);
295
296 if (u_region_index == -EINVAL) {
297 /* no underlying region */
298
299 region_index = _dynamic_region_allocate_index();
300
301 if (region_index > 0) {
302 /* a new region */
303 _region_init(region_index, base, size, attr);
304 }
305
306 return region_index;
307 }
308
309 /*
310 * The new memory region is to be placed inside the underlying
311 * region, possibly splitting the underlying region into two.
312 */
313
314 uint32_t u_region_start = _region_get_start(u_region_index);
315 uint32_t u_region_end = _region_get_end(u_region_index);
316 uint32_t u_region_attr = _region_get_attr(u_region_index);
317 uint32_t end = base + size;
318
319
320 if ((base == u_region_start) && (end == u_region_end)) {
321 /* The new region overlaps entirely with the
322 * underlying region. In this case we simply
323 * update the partition attributes of the
324 * underlying region with those of the new
325 * region.
326 */
327 _region_init(u_region_index, base, size, attr);
328 region_index = u_region_index;
329 } else if (base == u_region_start) {
330 /* The new region starts exactly at the start of the
331 * underlying region; the start of the underlying
332 * region needs to be set to the end of the new region.
333 */
334 _region_set_start(u_region_index, base + size);
335 _region_set_attr(u_region_index, u_region_attr);
336
337 region_index = _dynamic_region_allocate_index();
338
339 if (region_index > 0) {
340 _region_init(region_index, base, size, attr);
341 }
342
343 } else if (end == u_region_end) {
344 /* The new region ends exactly at the end of the
345 * underlying region; the end of the underlying
346 * region needs to be set to the start of the
347 * new region.
348 */
349 _region_set_end(u_region_index, base);
350 _region_set_attr(u_region_index, u_region_attr);
351
352 region_index = _dynamic_region_allocate_index();
353
354 if (region_index > 0) {
355 _region_init(region_index, base, size, attr);
356 }
357
358 } else {
359 /* The new region lies strictly inside the
360 * underlying region, which needs to split
361 * into two regions.
362 */
363
364 _region_set_end(u_region_index, base);
365 _region_set_attr(u_region_index, u_region_attr);
366
367 region_index = _dynamic_region_allocate_index();
368
369 if (region_index > 0) {
370 _region_init(region_index, base, size, attr);
371
372 region_index = _dynamic_region_allocate_index();
373
374 if (region_index > 0) {
375 _region_init(region_index, base + size,
376 u_region_end - end, u_region_attr);
377 }
378 }
379 }
380
381 return region_index;
382 }
383
384 /* @brief reset the dynamic MPU regions
385 *
386 * This internal function performs the reset of dynamic MPU regions
387 */
_mpu_reset_dynamic_regions(void)388 static void _mpu_reset_dynamic_regions(void)
389 {
390 uint32_t i;
391 uint32_t num_regions = get_num_regions();
392
393 for (i = static_regions_num; i < num_regions; i++) {
394 _region_init(i, 0, 0, 0);
395 }
396
397 for (i = 0U; i < dynamic_regions_num; i++) {
398 _region_init(
399 dyn_reg_info[i].index,
400 dyn_reg_info[i].base,
401 dyn_reg_info[i].size,
402 dyn_reg_info[i].attr);
403 }
404
405 /* dynamic regions are after static regions */
406 dynamic_region_index = static_regions_num;
407 }
408
409 /**
410 * @brief configure the base address and size for an MPU region
411 *
412 * @param type MPU region type
413 * @param base base address in RAM
414 * @param size size of the region
415 */
_mpu_configure(uint8_t type,uint32_t base,uint32_t size)416 static inline int _mpu_configure(uint8_t type, uint32_t base, uint32_t size)
417 {
418 uint32_t region_attr = get_region_attr_by_type(type);
419
420 return _dynamic_region_allocate_and_init(base, size, region_attr);
421 }
422 #else
423 /**
424 * This internal function is utilized by the MPU driver to parse the intent
425 * type (i.e. THREAD_STACK_REGION) and return the correct region index.
426 */
get_region_index_by_type(uint32_t type)427 static inline int get_region_index_by_type(uint32_t type)
428 {
429 /*
430 * The new MPU regions are allocated per type after the statically
431 * configured regions. The type is one-indexed rather than
432 * zero-indexed.
433 *
434 * For ARC MPU v2, the smaller index has higher priority, so the
435 * index is allocated in reverse order. Static regions start from
436 * the biggest index, then thread related regions.
437 *
438 */
439 switch (type) {
440 case THREAD_STACK_USER_REGION:
441 return static_regions_num + THREAD_STACK_REGION;
442 case THREAD_STACK_REGION:
443 case THREAD_APP_DATA_REGION:
444 case THREAD_STACK_GUARD_REGION:
445 return static_regions_num + type;
446 case THREAD_DOMAIN_PARTITION_REGION:
447 #if defined(CONFIG_MPU_STACK_GUARD)
448 return static_regions_num + type;
449 #else
450 /*
451 * Start domain partition region from stack guard region
452 * since stack guard is not enabled.
453 */
454 return static_regions_num + type - 1;
455 #endif
456 default:
457 __ASSERT(0, "Unsupported type");
458 return -EINVAL;
459 }
460 }
461
462 /**
463 * @brief configure the base address and size for an MPU region
464 *
465 * @param type MPU region type
466 * @param base base address in RAM
467 * @param size size of the region
468 */
_mpu_configure(uint8_t type,uint32_t base,uint32_t size)469 static inline int _mpu_configure(uint8_t type, uint32_t base, uint32_t size)
470 {
471 int region_index = get_region_index_by_type(type);
472 uint32_t region_attr = get_region_attr_by_type(type);
473
474 LOG_DBG("Region info: 0x%x 0x%x", base, size);
475
476 if (region_attr == 0U || region_index < 0) {
477 return -EINVAL;
478 }
479
480 _region_init(region_index, base, size, region_attr);
481
482 return 0;
483 }
484 #endif
485
486 /* ARC Core MPU Driver API Implementation for ARC MPUv3 */
487
488 /**
489 * @brief enable the MPU
490 */
arc_core_mpu_enable(void)491 void arc_core_mpu_enable(void)
492 {
493 #ifdef CONFIG_ARC_SECURE_FIRMWARE
494 /* the default region:
495 * secure:0x8000, SID:0x10000, KW:0x100 KR:0x80
496 */
497 #define MPU_ENABLE_ATTR 0x18180
498 #else
499 #define MPU_ENABLE_ATTR 0
500 #endif
501 arc_core_mpu_default(MPU_ENABLE_ATTR);
502 }
503
504 /**
505 * @brief disable the MPU
506 */
arc_core_mpu_disable(void)507 void arc_core_mpu_disable(void)
508 {
509 /* MPU is always enabled, use default region to
510 * simulate MPU disable
511 */
512 arc_core_mpu_default(REGION_ALL_ATTR | AUX_MPU_ATTR_S |
513 AUX_MPU_RPER_SID1);
514 }
515
516 /**
517 * @brief configure the thread's mpu regions
518 *
519 * @param thread the target thread
520 */
arc_core_mpu_configure_thread(struct k_thread * thread)521 void arc_core_mpu_configure_thread(struct k_thread *thread)
522 {
523 #if defined(CONFIG_MPU_GAP_FILLING)
524 /* the mpu entries of ARC MPUv4 are divided into 2 parts:
525 * static entries: global mpu entries, not changed in context switch
526 * dynamic entries: MPU entries changed in context switch and
527 * memory domain configure, including:
528 * MPU entries for user thread stack
529 * MPU entries for stack guard
530 * MPU entries for mem domain
531 * MPU entries for other thread specific regions
532 * before configuring thread specific mpu entries, need to reset dynamic
533 * entries
534 */
535 _mpu_reset_dynamic_regions();
536 #endif
537 #if defined(CONFIG_MPU_STACK_GUARD)
538 uint32_t guard_start;
539
540 /* Set location of guard area when the thread is running in
541 * supervisor mode. For a supervisor thread, this is just low
542 * memory in the stack buffer. For a user thread, it only runs
543 * in supervisor mode when handling a system call on the privilege
544 * elevation stack.
545 */
546 #if defined(CONFIG_USERSPACE)
547 if ((thread->base.user_options & K_USER) != 0U) {
548 guard_start = thread->arch.priv_stack_start;
549 } else
550 #endif
551 {
552 guard_start = thread->stack_info.start;
553 }
554 guard_start -= Z_ARC_STACK_GUARD_SIZE;
555
556 if (_mpu_configure(THREAD_STACK_GUARD_REGION, guard_start,
557 Z_ARC_STACK_GUARD_SIZE) < 0) {
558 LOG_ERR("thread %p's stack guard failed", thread);
559 return;
560 }
561 #endif /* CONFIG_MPU_STACK_GUARD */
562
563 #if defined(CONFIG_USERSPACE)
564 /* configure stack region of user thread */
565 if (thread->base.user_options & K_USER) {
566 LOG_DBG("configure user thread %p's stack", thread);
567 if (_mpu_configure(THREAD_STACK_USER_REGION,
568 (uint32_t)thread->stack_info.start,
569 thread->stack_info.size) < 0) {
570 LOG_ERR("thread %p's stack failed", thread);
571 return;
572 }
573 }
574
575 #if defined(CONFIG_MPU_GAP_FILLING)
576 uint32_t num_partitions;
577 struct k_mem_partition *pparts;
578 struct k_mem_domain *mem_domain = thread->mem_domain_info.mem_domain;
579
580 /* configure thread's memory domain */
581 if (mem_domain) {
582 LOG_DBG("configure thread %p's domain: %p",
583 thread, mem_domain);
584 num_partitions = mem_domain->num_partitions;
585 pparts = mem_domain->partitions;
586 } else {
587 num_partitions = 0U;
588 pparts = NULL;
589 }
590
591 for (uint32_t i = 0; i < num_partitions; i++) {
592 if (pparts->size) {
593 if (_dynamic_region_allocate_and_init(pparts->start,
594 pparts->size, pparts->attr) < 0) {
595 LOG_ERR(
596 "thread %p's mem region: %p failed",
597 thread, pparts);
598 return;
599 }
600 }
601 pparts++;
602 }
603 #else
604 arc_core_mpu_configure_mem_domain(thread);
605 #endif
606 #endif
607 }
608
609 /**
610 * @brief configure the default region
611 *
612 * @param region_attr region attribute of default region
613 */
arc_core_mpu_default(uint32_t region_attr)614 void arc_core_mpu_default(uint32_t region_attr)
615 {
616 #ifdef CONFIG_ARC_NORMAL_FIRMWARE
617 /* \todo through secure service to access mpu */
618 #else
619 z_arc_v2_aux_reg_write(_ARC_V2_MPU_EN, region_attr);
620 #endif
621 }
622
623 /**
624 * @brief configure the MPU region
625 *
626 * @param index MPU region index
627 * @param base base address
628 * @param size region size
629 * @param region_attr region attribute
630 */
arc_core_mpu_region(uint32_t index,uint32_t base,uint32_t size,uint32_t region_attr)631 int arc_core_mpu_region(uint32_t index, uint32_t base, uint32_t size,
632 uint32_t region_attr)
633 {
634 if (index >= get_num_regions()) {
635 return -EINVAL;
636 }
637
638 region_attr &= AUX_MPU_RPER_ATTR_MASK;
639
640 _region_init(index, base, size, region_attr);
641
642 return 0;
643 }
644
645 #if defined(CONFIG_USERSPACE)
646 /**
647 * @brief configure MPU regions for the memory partitions of the memory domain
648 *
649 * @param thread the thread which has memory domain
650 */
651 #if defined(CONFIG_MPU_GAP_FILLING)
arc_core_mpu_configure_mem_domain(struct k_thread * thread)652 void arc_core_mpu_configure_mem_domain(struct k_thread *thread)
653 {
654 arc_core_mpu_configure_thread(thread);
655 }
656 #else
arc_core_mpu_configure_mem_domain(struct k_thread * thread)657 void arc_core_mpu_configure_mem_domain(struct k_thread *thread)
658 {
659 uint32_t region_index;
660 uint32_t num_partitions;
661 uint32_t num_regions;
662 struct k_mem_partition *pparts;
663 struct k_mem_domain *mem_domain = NULL;
664
665 if (thread) {
666 mem_domain = thread->mem_domain_info.mem_domain;
667 }
668
669 if (mem_domain) {
670 LOG_DBG("configure domain: %p", mem_domain);
671 num_partitions = mem_domain->num_partitions;
672 pparts = mem_domain->partitions;
673 } else {
674 LOG_DBG("disable domain partition regions");
675 num_partitions = 0U;
676 pparts = NULL;
677 }
678
679 num_regions = get_num_regions();
680 region_index = get_region_index_by_type(THREAD_DOMAIN_PARTITION_REGION);
681
682 while (num_partitions && region_index < num_regions) {
683 if (pparts->size > 0) {
684 LOG_DBG("set region 0x%x 0x%lx 0x%x",
685 region_index, pparts->start, pparts->size);
686 _region_init(region_index, pparts->start,
687 pparts->size, pparts->attr);
688 region_index++;
689 }
690 pparts++;
691 num_partitions--;
692 }
693
694 while (region_index < num_regions) {
695 /* clear the left mpu entries */
696 _region_init(region_index, 0, 0, 0);
697 region_index++;
698 }
699 }
700 #endif
701
702 /**
703 * @brief remove MPU regions for the memory partitions of the memory domain
704 *
705 * @param mem_domain the target memory domain
706 */
arc_core_mpu_remove_mem_domain(struct k_mem_domain * mem_domain)707 void arc_core_mpu_remove_mem_domain(struct k_mem_domain *mem_domain)
708 {
709 uint32_t num_partitions;
710 struct k_mem_partition *pparts;
711 int index;
712
713 if (mem_domain) {
714 LOG_DBG("configure domain: %p", mem_domain);
715 num_partitions = mem_domain->num_partitions;
716 pparts = mem_domain->partitions;
717 } else {
718 LOG_DBG("disable domain partition regions");
719 num_partitions = 0U;
720 pparts = NULL;
721 }
722
723 for (uint32_t i = 0; i < num_partitions; i++) {
724 if (pparts->size) {
725 index = _get_region_index(pparts->start,
726 pparts->size);
727 if (index > 0) {
728 #if defined(CONFIG_MPU_GAP_FILLING)
729 _region_set_attr(index,
730 REGION_KERNEL_RAM_ATTR);
731 #else
732 _region_init(index, 0, 0, 0);
733 #endif
734 }
735 }
736 pparts++;
737 }
738 }
739
740 /**
741 * @brief reset MPU region for a single memory partition
742 *
743 * @param partition_id memory partition id
744 */
arc_core_mpu_remove_mem_partition(struct k_mem_domain * domain,uint32_t partition_id)745 void arc_core_mpu_remove_mem_partition(struct k_mem_domain *domain,
746 uint32_t partition_id)
747 {
748 struct k_mem_partition *partition = &domain->partitions[partition_id];
749
750 int region_index = _get_region_index(partition->start,
751 partition->size);
752
753 if (region_index < 0) {
754 return;
755 }
756
757 LOG_DBG("remove region 0x%x", region_index);
758 #if defined(CONFIG_MPU_GAP_FILLING)
759 _region_set_attr(region_index, REGION_KERNEL_RAM_ATTR);
760 #else
761 _region_init(region_index, 0, 0, 0);
762 #endif
763 }
764
765 /**
766 * @brief get the maximum number of free regions for memory domain partitions
767 */
arc_core_mpu_get_max_domain_partition_regions(void)768 int arc_core_mpu_get_max_domain_partition_regions(void)
769 {
770 #if defined(CONFIG_MPU_GAP_FILLING)
771 /* consider the worst case: each partition requires split */
772 return (get_num_regions() - MPU_REGION_NUM_FOR_THREAD) / 2;
773 #else
774 return get_num_regions() -
775 get_region_index_by_type(THREAD_DOMAIN_PARTITION_REGION) - 1;
776 #endif
777 }
778
779 /**
780 * @brief validate the given buffer is user accessible or not
781 */
arc_core_mpu_buffer_validate(const void * addr,size_t size,int write)782 int arc_core_mpu_buffer_validate(const void *addr, size_t size, int write)
783 {
784 int r_index;
785 int key = arch_irq_lock();
786
787 /*
788 * For ARC MPU v4, overlapping is not supported.
789 * we can stop the iteration immediately once we find the
790 * matched region that grants permission or denies access.
791 */
792 r_index = _mpu_probe((uint32_t)addr);
793 /* match and the area is in one region */
794 if (r_index >= 0 && r_index == _mpu_probe((uint32_t)addr + (size - 1))) {
795 if (_is_user_accessible_region(r_index, write)) {
796 r_index = 0;
797 } else {
798 r_index = -EPERM;
799 }
800 } else {
801 r_index = -EPERM;
802 }
803
804 arch_irq_unlock(key);
805
806 return r_index;
807 }
808 #endif /* CONFIG_USERSPACE */
809
810 /* ARC MPU Driver Initial Setup */
811 /*
812 * @brief MPU default initialization and configuration
813 *
814 * This function provides the default configuration mechanism for the Memory
815 * Protection Unit (MPU).
816 */
arc_mpu_init(void)817 void arc_mpu_init(void)
818 {
819 uint32_t num_regions;
820 uint32_t i;
821
822 num_regions = get_num_regions();
823
824 /* ARC MPU supports up to 16 Regions */
825 if (mpu_config.num_regions > num_regions) {
826 __ASSERT(0,
827 "Request to configure: %u regions (supported: %u)\n",
828 mpu_config.num_regions, num_regions);
829 return;
830 }
831
832 static_regions_num = 0U;
833
834 /* Disable MPU */
835 arc_core_mpu_disable();
836
837 for (i = 0U; i < mpu_config.num_regions; i++) {
838 /* skip empty region */
839 if (mpu_config.mpu_regions[i].size == 0) {
840 continue;
841 }
842 #if defined(CONFIG_MPU_GAP_FILLING)
843 _region_init(static_regions_num,
844 mpu_config.mpu_regions[i].base,
845 mpu_config.mpu_regions[i].size,
846 mpu_config.mpu_regions[i].attr);
847
848 /* record the static region which can be split */
849 if (mpu_config.mpu_regions[i].attr & REGION_DYNAMIC) {
850 if (dynamic_regions_num >=
851 MPU_DYNAMIC_REGION_AREAS_NUM) {
852 LOG_ERR("not enough dynamic regions %d",
853 dynamic_regions_num);
854 return;
855 }
856
857 dyn_reg_info[dynamic_regions_num].index = i;
858 dyn_reg_info[dynamic_regions_num].base =
859 mpu_config.mpu_regions[i].base;
860 dyn_reg_info[dynamic_regions_num].size =
861 mpu_config.mpu_regions[i].size;
862 dyn_reg_info[dynamic_regions_num].attr =
863 mpu_config.mpu_regions[i].attr;
864
865 dynamic_regions_num++;
866 }
867 static_regions_num++;
868 #else
869 /* dynamic region will be covered by default mpu setting
870 * no need to configure
871 */
872 if (!(mpu_config.mpu_regions[i].attr & REGION_DYNAMIC)) {
873 _region_init(static_regions_num,
874 mpu_config.mpu_regions[i].base,
875 mpu_config.mpu_regions[i].size,
876 mpu_config.mpu_regions[i].attr);
877 static_regions_num++;
878 }
879 #endif
880 }
881
882 for (i = static_regions_num; i < num_regions; i++) {
883 _region_init(i, 0, 0, 0);
884 }
885
886 /* Enable MPU */
887 arc_core_mpu_enable();
888
889 return;
890 }
891
892
893 #endif /* ZEPHYR_ARCH_ARC_CORE_MPU_ARC_MPU_V4_INTERNAL_H_ */
894