1 /*
2 * Copyright (c) 2023 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <stdint.h>
8 #include <stdlib.h>
9 #include <string.h>
10
11 #include <zephyr/kernel.h>
12 #include <zephyr/spinlock.h>
13 #include <zephyr/toolchain.h>
14 #include <zephyr/arch/xtensa/arch_inlines.h>
15 #include <zephyr/arch/xtensa/mpu.h>
16 #include <zephyr/linker/linker-defs.h>
17 #include <zephyr/sys/__assert.h>
18 #include <zephyr/sys/util_macro.h>
19
20 #include <xtensa/corebits.h>
21 #include <xtensa/config/core-matmap.h>
22 #include <xtensa/config/core-isa.h>
23 #include <xtensa_mpu_priv.h>
24
25 #ifdef CONFIG_USERSPACE
26 BUILD_ASSERT((CONFIG_PRIVILEGED_STACK_SIZE > 0) &&
27 (CONFIG_PRIVILEGED_STACK_SIZE % XCHAL_MPU_ALIGN) == 0);
28 #endif
29
30 extern char _heap_end[];
31 extern char _heap_start[];
32
33 /** MPU foreground map for kernel mode. */
34 static struct xtensa_mpu_map xtensa_mpu_map_fg_kernel;
35
36 /** Make sure write to the MPU region is atomic. */
37 static struct k_spinlock xtensa_mpu_lock;
38
39 /*
40 * Additional information about the MPU maps: foreground and background
41 * maps.
42 *
43 *
44 * Some things to keep in mind:
45 * - Each MPU region is described by TWO entries:
46 * [entry_a_address, entry_b_address). For contiguous memory regions,
47 * this should not much of an issue. However, disjoint memory regions
48 * "waste" another entry to describe the end of those regions.
49 * We might run out of available entries in the MPU map because of
50 * this.
51 * - The last entry is a special case as there is no more "next"
52 * entry in the map. In this case, the end of memory is
53 * the implicit boundary. In another word, the last entry
54 * describes the region between the start address of this entry
55 * and the end of memory.
56 * - Current implementation has following limitations:
57 * - All enabled entries are grouped towards the end of the map.
58 * - Except the last entry which can be disabled. This is
59 * the end of the last foreground region. With a disabled
60 * entry, memory after this will use the background map
61 * for access control.
62 * - No disabled MPU entries allowed in between.
63 *
64 *
65 * For foreground map to be valid, its entries must follow these rules:
66 * - The start addresses must always be in non-descending order.
67 * - The access rights and memory type fields must contain valid values.
68 * - The segment field needs to be correct for each entry.
69 * - MBZ fields must contain only zeroes.
70 * - Although the start address occupies 27 bits of the register,
71 * it does not mean all 27 bits are usable. The macro
72 * XCHAL_MPU_ALIGN_BITS provided by the toolchain indicates
73 * that only bits of and left of this value are valid. This
74 * corresponds to the minimum segment size (MINSEGMENTSIZE)
75 * definied in the processor configuration.
76 */
77
78 #ifndef CONFIG_XTENSA_MPU_ONLY_SOC_RANGES
79 /**
80 * Static definition of all code and data memory regions of the
81 * current Zephyr image. This information must be available and
82 * need to be processed upon MPU initialization.
83 */
84 static const struct xtensa_mpu_range mpu_zephyr_ranges[] = {
85 /* Region for vector handlers. */
86 {
87 .start = (uintptr_t)XCHAL_VECBASE_RESET_VADDR,
88 /*
89 * There is nothing from the Xtensa overlay about how big
90 * the vector handler region is. So we make an assumption
91 * that vecbase and .text are contiguous.
92 *
93 * SoC can override as needed if this is not the case,
94 * especially if the SoC reset/startup code relocates
95 * vecbase.
96 */
97 .end = (uintptr_t)__text_region_start,
98 .access_rights = XTENSA_MPU_ACCESS_P_RX_U_RX,
99 .memory_type = CONFIG_XTENSA_MPU_DEFAULT_MEM_TYPE,
100 },
101 /*
102 * Mark the zephyr execution regions (data, bss, noinit, etc.)
103 * cacheable, read / write and non-executable
104 */
105 {
106 /* This includes .data, .bss and various kobject sections. */
107 .start = (uintptr_t)_image_ram_start,
108 .end = (uintptr_t)_image_ram_end,
109 .access_rights = XTENSA_MPU_ACCESS_P_RW_U_NA,
110 .memory_type = CONFIG_XTENSA_MPU_DEFAULT_MEM_TYPE,
111 },
112 #if K_HEAP_MEM_POOL_SIZE > 0
113 /* System heap memory */
114 {
115 .start = (uintptr_t)_heap_start,
116 .end = (uintptr_t)_heap_end,
117 .access_rights = XTENSA_MPU_ACCESS_P_RW_U_NA,
118 .memory_type = CONFIG_XTENSA_MPU_DEFAULT_MEM_TYPE,
119 },
120 #endif
121 /* Mark text segment cacheable, read only and executable */
122 {
123 .start = (uintptr_t)__text_region_start,
124 .end = (uintptr_t)__text_region_end,
125 .access_rights = XTENSA_MPU_ACCESS_P_RX_U_RX,
126 .memory_type = CONFIG_XTENSA_MPU_DEFAULT_MEM_TYPE,
127 },
128 /* Mark rodata segment cacheable, read only and non-executable */
129 {
130 .start = (uintptr_t)__rodata_region_start,
131 .end = (uintptr_t)__rodata_region_end,
132 .access_rights = XTENSA_MPU_ACCESS_P_RO_U_RO,
133 .memory_type = CONFIG_XTENSA_MPU_DEFAULT_MEM_TYPE,
134 },
135 };
136 #endif /* !CONFIG_XTENSA_MPU_ONLY_SOC_RANGES */
137
138 /**
139 * Return the pointer to the entry encompassing @a addr out of an array of MPU entries.
140 *
141 * Returning the entry where @a addr is greater or equal to the entry's start address,
142 * and where @a addr is less than the starting address of the next entry.
143 *
144 * @param[in] entries Array of MPU entries.
145 * @param[in] addr Address to be matched to one background entry.
146 * @param[in] first_enabled_idx The index of the first enabled entry.
147 * Use 0 if not sure.
148 * @param[out] exact Set to true if address matches exactly.
149 * NULL if do not care.
150 * @param[out] entry_idx Set to the index of the entry array if entry is found.
151 * NULL if do not care.
152 *
153 * @return Pointer to the map entry encompassing @a addr, or NULL if no such entry found.
154 */
155 static const
check_addr_in_mpu_entries(const struct xtensa_mpu_entry * entries,uintptr_t addr,uint8_t first_enabled_idx,bool * exact,uint8_t * entry_idx)156 struct xtensa_mpu_entry *check_addr_in_mpu_entries(const struct xtensa_mpu_entry *entries,
157 uintptr_t addr, uint8_t first_enabled_idx,
158 bool *exact, uint8_t *entry_idx)
159 {
160 const struct xtensa_mpu_entry *ret = NULL;
161 uintptr_t s_addr, e_addr;
162 uint8_t idx;
163
164 if (first_enabled_idx >= XTENSA_MPU_NUM_ENTRIES) {
165 goto out_null;
166 }
167
168 if (addr < xtensa_mpu_entry_start_address_get(&entries[first_enabled_idx])) {
169 /* Before the start address of very first entry. So no match. */
170 goto out_null;
171 }
172
173 /* Loop through the map except the last entry (which is a special case). */
174 for (idx = first_enabled_idx; idx < (XTENSA_MPU_NUM_ENTRIES - 1); idx++) {
175 s_addr = xtensa_mpu_entry_start_address_get(&entries[idx]);
176 e_addr = xtensa_mpu_entry_start_address_get(&entries[idx + 1]);
177
178 if ((addr >= s_addr) && (addr < e_addr)) {
179 ret = &entries[idx];
180 goto out;
181 }
182 }
183
184 idx = XTENSA_MPU_NUM_ENTRIES - 1;
185 s_addr = xtensa_mpu_entry_start_address_get(&entries[idx]);
186 if (addr >= s_addr) {
187 /* Last entry encompasses the start address to end of memory. */
188 ret = &entries[idx];
189 }
190
191 out:
192 if (ret != NULL) {
193 if (exact != NULL) {
194 if (addr == s_addr) {
195 *exact = true;
196 } else {
197 *exact = false;
198 }
199 }
200
201 if (entry_idx != NULL) {
202 *entry_idx = idx;
203 }
204 }
205
206 out_null:
207 return ret;
208 }
209
210 /**
211 * Find the first enabled MPU entry.
212 *
213 * @param entries Array of MPU entries with XTENSA_MPU_NUM_ENTRIES elements.
214 *
215 * @return Index of the first enabled entry.
216 * @retval XTENSA_MPU_NUM_ENTRIES if no entry is enabled.
217 */
find_first_enabled_entry(const struct xtensa_mpu_entry * entries)218 static inline uint8_t find_first_enabled_entry(const struct xtensa_mpu_entry *entries)
219 {
220 int first_enabled_idx;
221
222 for (first_enabled_idx = 0; first_enabled_idx < XTENSA_MPU_NUM_ENTRIES;
223 first_enabled_idx++) {
224 if (entries[first_enabled_idx].as.p.enable) {
225 break;
226 }
227 }
228
229 return first_enabled_idx;
230 }
231
232 /**
233 * Compare two MPU entries.
234 *
235 * This is used by qsort to compare two MPU entries on their ordering
236 * based on starting address.
237 *
238 * @param a First MPU entry.
239 * @param b Second MPU entry.
240 *
241 * @retval -1 First address is less than second address.
242 * @retval 0 First address is equal to second address.
243 * @retval 1 First address is great than second address.
244 */
compare_entries(const void * a,const void * b)245 static int compare_entries(const void *a, const void *b)
246 {
247 struct xtensa_mpu_entry *e_a = (struct xtensa_mpu_entry *)a;
248 struct xtensa_mpu_entry *e_b = (struct xtensa_mpu_entry *)b;
249
250 uintptr_t addr_a = xtensa_mpu_entry_start_address_get(e_a);
251 uintptr_t addr_b = xtensa_mpu_entry_start_address_get(e_b);
252
253 if (addr_a < addr_b) {
254 return -1;
255 } else if (addr_a == addr_b) {
256 return 0;
257 } else {
258 return 1;
259 }
260 }
261
262 /**
263 * Sort the MPU entries base on starting address.
264 *
265 * This sorts the MPU entries in ascending order of starting address.
266 * After sorting, it rewrites the segment numbers of all entries.
267 */
sort_entries(struct xtensa_mpu_entry * entries)268 static void sort_entries(struct xtensa_mpu_entry *entries)
269 {
270 qsort(entries, XTENSA_MPU_NUM_ENTRIES, sizeof(entries[0]), compare_entries);
271
272 for (uint32_t idx = 0; idx < XTENSA_MPU_NUM_ENTRIES; idx++) {
273 /* Segment value must correspond to the index. */
274 entries[idx].at.p.segment = idx;
275 }
276 }
277
278 /**
279 * Consolidate the MPU entries.
280 *
281 * This removes consecutive entries where the attributes are the same.
282 *
283 * @param entries Array of MPU entries with XTENSA_MPU_NUM_ENTRIES elements.
284 * @param first_enabled_idx Index of first enabled entry.
285 *
286 * @return Index of the first enabled entry after consolidation.
287 */
consolidate_entries(struct xtensa_mpu_entry * entries,uint8_t first_enabled_idx)288 static uint8_t consolidate_entries(struct xtensa_mpu_entry *entries,
289 uint8_t first_enabled_idx)
290 {
291 uint8_t new_first;
292 uint8_t idx_0 = first_enabled_idx;
293 uint8_t idx_1 = first_enabled_idx + 1;
294 bool to_consolidate = false;
295
296 /* For each a pair of entries... */
297 while (idx_1 < XTENSA_MPU_NUM_ENTRIES) {
298 struct xtensa_mpu_entry *entry_0 = &entries[idx_0];
299 struct xtensa_mpu_entry *entry_1 = &entries[idx_1];
300 bool mark_disable_0 = false;
301 bool mark_disable_1 = false;
302
303 if (xtensa_mpu_entries_has_same_attributes(entry_0, entry_1)) {
304 /*
305 * If both entry has same attributes (access_rights and memory type),
306 * they can be consolidated into one by removing the higher indexed
307 * one.
308 */
309 mark_disable_1 = true;
310 } else if (xtensa_mpu_entries_has_same_address(entry_0, entry_1)) {
311 /*
312 * If both entries have the same address, the higher index
313 * one always override the lower one. So remove the lower indexed
314 * one.
315 */
316 mark_disable_0 = true;
317 }
318
319 /*
320 * Marking an entry as disabled here so it can be removed later.
321 *
322 * The MBZ field of the AS register is re-purposed to indicate that
323 * this is an entry to be removed.
324 */
325 if (mark_disable_1) {
326 /* Remove the higher indexed entry. */
327 to_consolidate = true;
328
329 entry_1->as.p.mbz = 1U;
330
331 /* Skip ahead for next comparison. */
332 idx_1++;
333 continue;
334 } else if (mark_disable_0) {
335 /* Remove the lower indexed entry. */
336 to_consolidate = true;
337
338 entry_0->as.p.mbz = 1U;
339 }
340
341 idx_0 = idx_1;
342 idx_1++;
343 }
344
345 if (to_consolidate) {
346 uint8_t read_idx = XTENSA_MPU_NUM_ENTRIES - 1;
347 uint8_t write_idx = XTENSA_MPU_NUM_ENTRIES;
348
349 /* Go through the map from the end and copy enabled entries in place. */
350 while (read_idx >= first_enabled_idx) {
351 struct xtensa_mpu_entry *entry_rd = &entries[read_idx];
352
353 if (entry_rd->as.p.mbz != 1U) {
354 struct xtensa_mpu_entry *entry_wr;
355
356 write_idx--;
357 entry_wr = &entries[write_idx];
358
359 *entry_wr = *entry_rd;
360 entry_wr->at.p.segment = write_idx;
361 }
362
363 read_idx--;
364 }
365
366 /* New first enabled entry is where the last written entry is. */
367 new_first = write_idx;
368
369 for (idx_0 = 0; idx_0 < new_first; idx_0++) {
370 struct xtensa_mpu_entry *e = &entries[idx_0];
371
372 /* Shortcut to zero out address and enabled bit. */
373 e->as.raw = 0U;
374
375 /* Segment value must correspond to the index. */
376 e->at.p.segment = idx_0;
377
378 /* No access at all for both kernel and user modes. */
379 e->at.p.access_rights = XTENSA_MPU_ACCESS_P_NA_U_NA;
380
381 /* Use default memory type for disabled entries. */
382 e->at.p.memory_type = CONFIG_XTENSA_MPU_DEFAULT_MEM_TYPE;
383 }
384 } else {
385 /* No need to conlidate entries. Map is same as before. */
386 new_first = first_enabled_idx;
387 }
388
389 return new_first;
390 }
391
392 /**
393 * Add a memory region to the MPU map.
394 *
395 * This adds a memory region to the MPU map, by setting the appropriate
396 * start and end entries. This may re-use existing entries or add new
397 * entries to the map.
398 *
399 * @param[in,out] map Pointer to MPU map.
400 * @param[in] start_addr Start address of the region.
401 * @param[in] end_addr End address of the region.
402 * @param[in] access_rights Access rights of this region.
403 * @param[in] memory_type Memory type of this region.
404 * @param[out] first_idx Return index of first enabled entry if not NULL.
405 *
406 * @retval 0 Successful in adding the region.
407 * @retval -EINVAL Invalid values in function arguments.
408 */
mpu_map_region_add(struct xtensa_mpu_map * map,uintptr_t start_addr,uintptr_t end_addr,uint32_t access_rights,uint32_t memory_type,uint8_t * first_idx)409 static int mpu_map_region_add(struct xtensa_mpu_map *map,
410 uintptr_t start_addr, uintptr_t end_addr,
411 uint32_t access_rights, uint32_t memory_type,
412 uint8_t *first_idx)
413 {
414 int ret;
415 bool exact_s, exact_e;
416 uint8_t idx_s, idx_e, first_enabled_idx;
417 struct xtensa_mpu_entry *entry_slot_s, *entry_slot_e, prev_entry;
418
419 struct xtensa_mpu_entry *entries = map->entries;
420
421 if (start_addr >= end_addr) {
422 ret = -EINVAL;
423 goto out;
424 }
425
426 first_enabled_idx = find_first_enabled_entry(entries);
427 if (first_enabled_idx >= XTENSA_MPU_NUM_ENTRIES) {
428
429 /*
430 * If the last entry in the map is not enabled and the start
431 * address is NULL, we can assume the map has not been populated
432 * at all. This is because we group all enabled entries at
433 * the end of map.
434 */
435 struct xtensa_mpu_entry *last_entry = &entries[XTENSA_MPU_NUM_ENTRIES - 1];
436
437 if (!xtensa_mpu_entry_enable_get(last_entry) &&
438 (xtensa_mpu_entry_start_address_get(last_entry) == 0U)) {
439 /* Empty table, so populate the entries as-is. */
440 if (end_addr == 0xFFFFFFFFU) {
441 /*
442 * Region goes to end of memory, so only need to
443 * program one entry.
444 */
445 entry_slot_s = &entries[XTENSA_MPU_NUM_ENTRIES - 1];
446
447 xtensa_mpu_entry_set(entry_slot_s, start_addr, true,
448 access_rights, memory_type);
449 first_enabled_idx = XTENSA_MPU_NUM_ENTRIES - 1;
450 goto end;
451 } else {
452 /*
453 * Populate the last two entries to indicate
454 * a memory region. Notice that the second entry
455 * is not enabled as it is merely marking the end of
456 * a region and is not the starting of another
457 * enabled MPU region.
458 */
459 entry_slot_s = &entries[XTENSA_MPU_NUM_ENTRIES - 2];
460 entry_slot_e = &entries[XTENSA_MPU_NUM_ENTRIES - 1];
461
462 xtensa_mpu_entry_set(entry_slot_s, start_addr, true,
463 access_rights, memory_type);
464 xtensa_mpu_entry_set(entry_slot_e, end_addr, false,
465 XTENSA_MPU_ACCESS_P_NA_U_NA,
466 CONFIG_XTENSA_MPU_DEFAULT_MEM_TYPE);
467 first_enabled_idx = XTENSA_MPU_NUM_ENTRIES - 2;
468 goto end;
469 }
470
471 ret = 0;
472 goto out;
473 }
474
475 first_enabled_idx = consolidate_entries(entries, first_enabled_idx);
476
477 if (first_enabled_idx >= XTENSA_MPU_NUM_ENTRIES) {
478 ret = -EINVAL;
479 goto out;
480 }
481 }
482
483 entry_slot_s = (struct xtensa_mpu_entry *)
484 check_addr_in_mpu_entries(entries, start_addr, first_enabled_idx,
485 &exact_s, &idx_s);
486 entry_slot_e = (struct xtensa_mpu_entry *)
487 check_addr_in_mpu_entries(entries, end_addr, first_enabled_idx,
488 &exact_e, &idx_e);
489
490 __ASSERT_NO_MSG(entry_slot_s != NULL);
491 __ASSERT_NO_MSG(entry_slot_e != NULL);
492 __ASSERT_NO_MSG(start_addr < end_addr);
493
494 if ((entry_slot_s == NULL) || (entry_slot_e == NULL)) {
495 ret = -EINVAL;
496 goto out;
497 }
498
499 /*
500 * Figure out if we need to add new slots for either addresses.
501 * If the addresses match exactly the addresses current in map,
502 * we can reuse those entries without adding new one.
503 */
504 if (!exact_s || !exact_e) {
505 uint8_t needed = (exact_s ? 0 : 1) + (exact_e ? 0 : 1);
506
507 /* Check if there are enough empty slots. */
508 if (first_enabled_idx < needed) {
509 ret = -ENOMEM;
510 goto out;
511 }
512 }
513
514 /*
515 * Need to keep track of the attributes of the memory region before
516 * we start adding entries, as we will need to apply the same
517 * attributes to the "ending address" entry to preseve the attributes
518 * of existing map.
519 */
520 prev_entry = *entry_slot_e;
521
522 /*
523 * Entry for beginning of new region.
524 *
525 * - Use existing entry if start addresses are the same for existing
526 * and incoming region. We can simply reuse the entry.
527 * - Add an entry if incoming region is within existing region.
528 */
529 if (!exact_s) {
530 /*
531 * Put a new entry before the first enabled entry.
532 * We will sort the entries later.
533 */
534 first_enabled_idx--;
535
536 entry_slot_s = &entries[first_enabled_idx];
537 }
538
539 xtensa_mpu_entry_set(entry_slot_s, start_addr, true, access_rights, memory_type);
540
541 /*
542 * Entry for ending of region.
543 *
544 * - Add an entry if incoming region is within existing region.
545 * - If the end address matches exactly to existing entry, there is
546 * no need to do anything.
547 */
548 if (!exact_e) {
549 /*
550 * Put a new entry before the first enabled entry.
551 * We will sort the entries later.
552 */
553 first_enabled_idx--;
554
555 entry_slot_e = &entries[first_enabled_idx];
556
557 /*
558 * Since we are going to punch a hole in the map,
559 * we need to preserve the attribute of existing region
560 * between the end address and next entry.
561 */
562 *entry_slot_e = prev_entry;
563 xtensa_mpu_entry_start_address_set(entry_slot_e, end_addr);
564 }
565
566 /* Sort the entries in ascending order of starting address */
567 sort_entries(entries);
568
569 /*
570 * Need to figure out where the start and end entries are as sorting
571 * may change their positions.
572 */
573 entry_slot_s = (struct xtensa_mpu_entry *)
574 check_addr_in_mpu_entries(entries, start_addr, first_enabled_idx,
575 &exact_s, &idx_s);
576 entry_slot_e = (struct xtensa_mpu_entry *)
577 check_addr_in_mpu_entries(entries, end_addr, first_enabled_idx,
578 &exact_e, &idx_e);
579
580 /* Both must be exact match. */
581 __ASSERT_NO_MSG(exact_s);
582 __ASSERT_NO_MSG(exact_e);
583
584 if (end_addr == 0xFFFFFFFFU) {
585 /*
586 * If end_addr = 0xFFFFFFFFU, entry_slot_e and idx_e both
587 * point to the last slot. Because the incoming region goes
588 * to the end of memory, we simply cheat by including
589 * the last entry by incrementing idx_e so the loop to
590 * update entries will change the attribute of last entry
591 * in map.
592 */
593 idx_e++;
594 }
595
596 /*
597 * Any existing entries between the "newly" popluated start and
598 * end entries must bear the same attributes. So modify them
599 * here.
600 */
601 for (int idx = idx_s + 1; idx < idx_e; idx++) {
602 xtensa_mpu_entry_attributes_set(&entries[idx], access_rights, memory_type);
603 }
604
605 end:
606 if (first_idx != NULL) {
607 *first_idx = first_enabled_idx;
608 }
609
610 ret = 0;
611
612 out:
613 return ret;
614 }
615
616 /**
617 * Write the MPU map to hardware.
618 *
619 * @param map Pointer to foreground MPU map.
620 */
621 #ifdef CONFIG_USERSPACE
622 /* With userspace enabled, the pointer to per memory domain MPU map is stashed
623 * inside the thread struct. If we still only take struct xtensa_mpu_map as
624 * argument, a wrapper function is needed. To avoid the cost associated with
625 * calling that wrapper function, takes thread pointer directly as argument
626 * when userspace is enabled. Not to mention that writing the map to hardware
627 * is already a costly operation per context switch. So every little bit helps.
628 */
xtensa_mpu_map_write(struct k_thread * thread)629 void xtensa_mpu_map_write(struct k_thread *thread)
630 #else
631 void xtensa_mpu_map_write(struct xtensa_mpu_map *map)
632 #endif
633 {
634 int entry;
635 k_spinlock_key_t key;
636
637 key = k_spin_lock(&xtensa_mpu_lock);
638
639 #ifdef CONFIG_USERSPACE
640 struct xtensa_mpu_map *map = thread->arch.mpu_map;
641 #endif
642
643 /*
644 * Clear MPU entries first, then write MPU entries in reverse order.
645 *
646 * Remember that the boundary of each memory region is marked by
647 * two consecutive entries, and that the addresses of all entries
648 * must not be in descending order (i.e. equal or increasing).
649 * To ensure this, we clear out the entries first then write them
650 * in reverse order. This avoids any intermediate invalid
651 * configuration with regard to ordering.
652 */
653 for (entry = 0; entry < XTENSA_MPU_NUM_ENTRIES; entry++) {
654 __asm__ volatile("wptlb %0, %1\n\t" : : "a"(entry), "a"(0));
655 }
656
657 for (entry = XTENSA_MPU_NUM_ENTRIES - 1; entry >= 0; entry--) {
658 __asm__ volatile("wptlb %0, %1\n\t"
659 : : "a"(map->entries[entry].at), "a"(map->entries[entry].as));
660 }
661
662 k_spin_unlock(&xtensa_mpu_lock, key);
663 }
664
665 /**
666 * Perform necessary steps to enable MPU.
667 */
xtensa_mpu_init(void)668 void xtensa_mpu_init(void)
669 {
670 unsigned int entry;
671 uint8_t first_enabled_idx;
672
673 /* Disable all foreground segments before we start configuration. */
674 xtensa_mpu_mpuenb_write(0);
675
676 /*
677 * Clear the foreground MPU map so we can populate it later with valid entries.
678 * Note that we still need to make sure the map is valid, and cannot be totally
679 * zeroed.
680 */
681 for (entry = 0; entry < XTENSA_MPU_NUM_ENTRIES; entry++) {
682 /* Make sure to zero out everything as a start, especially the MBZ fields. */
683 struct xtensa_mpu_entry ent = {0};
684
685 /* Segment value must correspond to the index. */
686 ent.at.p.segment = entry;
687
688 /* No access at all for both kernel and user modes. */
689 ent.at.p.access_rights = XTENSA_MPU_ACCESS_P_NA_U_NA;
690
691 /* Use default memory type for disabled entries. */
692 ent.at.p.memory_type = CONFIG_XTENSA_MPU_DEFAULT_MEM_TYPE;
693
694 xtensa_mpu_map_fg_kernel.entries[entry] = ent;
695 }
696
697 #ifndef CONFIG_XTENSA_MPU_ONLY_SOC_RANGES
698 /*
699 * Add necessary MPU entries for the memory regions of base Zephyr image.
700 */
701 for (entry = 0; entry < ARRAY_SIZE(mpu_zephyr_ranges); entry++) {
702 const struct xtensa_mpu_range *range = &mpu_zephyr_ranges[entry];
703
704 int ret = mpu_map_region_add(&xtensa_mpu_map_fg_kernel,
705 range->start, range->end,
706 range->access_rights, range->memory_type,
707 &first_enabled_idx);
708
709 ARG_UNUSED(ret);
710 __ASSERT(ret == 0, "Unable to add region [0x%08x, 0x%08x): %d",
711 (unsigned int)range->start,
712 (unsigned int)range->end,
713 ret);
714 }
715 #endif /* !CONFIG_XTENSA_MPU_ONLY_SOC_RANGES */
716
717 /*
718 * Now for the entries for memory regions needed by SoC.
719 */
720 for (entry = 0; entry < xtensa_soc_mpu_ranges_num; entry++) {
721 const struct xtensa_mpu_range *range = &xtensa_soc_mpu_ranges[entry];
722
723 int ret = mpu_map_region_add(&xtensa_mpu_map_fg_kernel,
724 range->start, range->end,
725 range->access_rights, range->memory_type,
726 &first_enabled_idx);
727
728 ARG_UNUSED(ret);
729 __ASSERT(ret == 0, "Unable to add region [0x%08x, 0x%08x): %d",
730 (unsigned int)range->start,
731 (unsigned int)range->end,
732 ret);
733 }
734
735 /* Consolidate entries so we have a compact map at boot. */
736 consolidate_entries(xtensa_mpu_map_fg_kernel.entries, first_enabled_idx);
737
738 /* Write the map into hardware. There is no turning back now. */
739 #ifdef CONFIG_USERSPACE
740 struct k_thread dummy_map_thread;
741
742 dummy_map_thread.arch.mpu_map = &xtensa_mpu_map_fg_kernel;
743 xtensa_mpu_map_write(&dummy_map_thread);
744 #else
745 xtensa_mpu_map_write(&xtensa_mpu_map_fg_kernel);
746 #endif
747 }
748
749 #ifdef CONFIG_USERSPACE
arch_mem_domain_init(struct k_mem_domain * domain)750 int arch_mem_domain_init(struct k_mem_domain *domain)
751 {
752 domain->arch.mpu_map = xtensa_mpu_map_fg_kernel;
753
754 return 0;
755 }
756
arch_mem_domain_max_partitions_get(void)757 int arch_mem_domain_max_partitions_get(void)
758 {
759 /*
760 * Due to each memory region requiring 2 MPU entries to describe,
761 * it is hard to figure out how many partitions are available.
762 * For example, if all those partitions are contiguous, it only
763 * needs 2 entries (1 if the end of region already has an entry).
764 * If they are all disjoint, it will need (2 * n) entries to
765 * describe all of them. So just use CONFIG_MAX_DOMAIN_PARTITIONS
766 * here and let the application set this instead.
767 */
768 return CONFIG_MAX_DOMAIN_PARTITIONS;
769 }
770
arch_mem_domain_partition_remove(struct k_mem_domain * domain,uint32_t partition_id)771 int arch_mem_domain_partition_remove(struct k_mem_domain *domain,
772 uint32_t partition_id)
773 {
774 int ret;
775 uint32_t perm;
776 struct k_thread *cur_thread;
777 struct xtensa_mpu_map *map = &domain->arch.mpu_map;
778 struct k_mem_partition *partition = &domain->partitions[partition_id];
779 uintptr_t end_addr = partition->start + partition->size;
780
781 if (end_addr <= partition->start) {
782 ret = -EINVAL;
783 goto out;
784 }
785
786 /*
787 * This is simply to get rid of the user permissions and retain
788 * whatever the kernel permissions are. So that we won't be
789 * setting the memory region permission incorrectly, for example,
790 * marking read only region writable.
791 *
792 * Note that Zephyr does not do RWX partitions so we can treat it
793 * as invalid.
794 */
795 switch (partition->attr) {
796 case XTENSA_MPU_ACCESS_P_RO_U_NA:
797 __fallthrough;
798 case XTENSA_MPU_ACCESS_P_RX_U_NA:
799 __fallthrough;
800 case XTENSA_MPU_ACCESS_P_RO_U_RO:
801 __fallthrough;
802 case XTENSA_MPU_ACCESS_P_RX_U_RX:
803 perm = XTENSA_MPU_ACCESS_P_RO_U_NA;
804 break;
805
806 case XTENSA_MPU_ACCESS_P_RW_U_NA:
807 __fallthrough;
808 case XTENSA_MPU_ACCESS_P_RWX_U_NA:
809 __fallthrough;
810 case XTENSA_MPU_ACCESS_P_RW_U_RWX:
811 __fallthrough;
812 case XTENSA_MPU_ACCESS_P_RW_U_RO:
813 __fallthrough;
814 case XTENSA_MPU_ACCESS_P_RWX_U_RX:
815 __fallthrough;
816 case XTENSA_MPU_ACCESS_P_RW_U_RW:
817 __fallthrough;
818 case XTENSA_MPU_ACCESS_P_RWX_U_RWX:
819 perm = XTENSA_MPU_ACCESS_P_RW_U_NA;
820 break;
821
822 default:
823 /* _P_X_U_NA is not a valid permission for userspace, so ignore.
824 * _P_NA_U_X becomes _P_NA_U_NA when removing user permissions.
825 * _P_WO_U_WO has not kernel only counterpart so just force no access.
826 * If we get here with _P_NA_P_NA, there is something seriously
827 * wrong with the userspace and/or application code.
828 */
829 perm = XTENSA_MPU_ACCESS_P_NA_U_NA;
830 break;
831 }
832
833 /*
834 * Reset the memory region attributes by simply "adding"
835 * a region with default attributes. If entries already
836 * exist for the region, the corresponding entries will
837 * be updated with the default attributes. Or new entries
838 * will be added to carve a hole in existing regions.
839 */
840 ret = mpu_map_region_add(map, partition->start, end_addr,
841 perm,
842 CONFIG_XTENSA_MPU_DEFAULT_MEM_TYPE,
843 NULL);
844
845 /*
846 * Need to update hardware MPU regions if we are removing
847 * partition from the domain of the current running thread.
848 */
849 cur_thread = _current_cpu->current;
850 if (cur_thread->mem_domain_info.mem_domain == domain) {
851 xtensa_mpu_map_write(cur_thread);
852 }
853
854 out:
855 return ret;
856 }
857
arch_mem_domain_partition_add(struct k_mem_domain * domain,uint32_t partition_id)858 int arch_mem_domain_partition_add(struct k_mem_domain *domain,
859 uint32_t partition_id)
860 {
861 int ret;
862 struct k_thread *cur_thread;
863 struct xtensa_mpu_map *map = &domain->arch.mpu_map;
864 struct k_mem_partition *partition = &domain->partitions[partition_id];
865 uintptr_t end_addr = partition->start + partition->size;
866
867 if (end_addr <= partition->start) {
868 ret = -EINVAL;
869 goto out;
870 }
871
872 ret = mpu_map_region_add(map, partition->start, end_addr,
873 (uint8_t)partition->attr,
874 CONFIG_XTENSA_MPU_DEFAULT_MEM_TYPE,
875 NULL);
876
877 /*
878 * Need to update hardware MPU regions if we are removing
879 * partition from the domain of the current running thread.
880 *
881 * Note that this function can be called with dummy thread
882 * at boot so we need to avoid writing MPU regions to
883 * hardware.
884 */
885 cur_thread = _current_cpu->current;
886 if (((cur_thread->base.thread_state & _THREAD_DUMMY) != _THREAD_DUMMY) &&
887 (cur_thread->mem_domain_info.mem_domain == domain)) {
888 xtensa_mpu_map_write(cur_thread);
889 }
890
891 out:
892 return ret;
893 }
894
arch_mem_domain_thread_add(struct k_thread * thread)895 int arch_mem_domain_thread_add(struct k_thread *thread)
896 {
897 int ret = 0;
898
899 /* New memory domain we are being added to */
900 struct k_mem_domain *domain = thread->mem_domain_info.mem_domain;
901
902 /*
903 * this is only set for threads that were migrating from some other
904 * memory domain; new threads this is NULL.
905 */
906 struct xtensa_mpu_map *old_map = thread->arch.mpu_map;
907
908 bool is_user = (thread->base.user_options & K_USER) != 0;
909 bool is_migration = (old_map != NULL) && is_user;
910
911 uintptr_t stack_end_addr = thread->stack_info.start + thread->stack_info.size;
912
913 if (stack_end_addr < thread->stack_info.start) {
914 /* Account for wrapping around back to 0. */
915 stack_end_addr = 0xFFFFFFFFU;
916 }
917
918 /*
919 * Allow USER access to the thread's stack in its new domain if
920 * we are migrating. If we are not migrating this is done in
921 * xtensa_user_stack_perms().
922 */
923 if (is_migration) {
924 /* Add stack to new domain's MPU map. */
925 ret = mpu_map_region_add(&domain->arch.mpu_map,
926 thread->stack_info.start, stack_end_addr,
927 XTENSA_MPU_ACCESS_P_RW_U_RW,
928 CONFIG_XTENSA_MPU_DEFAULT_MEM_TYPE,
929 NULL);
930
931 /* Probably this fails due to no more available slots in MPU map. */
932 __ASSERT_NO_MSG(ret == 0);
933 }
934
935 thread->arch.mpu_map = &domain->arch.mpu_map;
936
937 /*
938 * Remove thread stack from old memory domain if we are
939 * migrating away from old memory domain. This is done
940 * by simply remove USER access from the region.
941 */
942 if (is_migration) {
943 /*
944 * Remove stack from old MPU map by...
945 * "adding" a new memory region to the map
946 * as this carves a hole in the existing map.
947 */
948 ret = mpu_map_region_add(old_map,
949 thread->stack_info.start, stack_end_addr,
950 XTENSA_MPU_ACCESS_P_RW_U_NA,
951 CONFIG_XTENSA_MPU_DEFAULT_MEM_TYPE,
952 NULL);
953 }
954
955 /*
956 * Need to switch to new MPU map if this is the current
957 * running thread.
958 */
959 if (thread == _current_cpu->current) {
960 xtensa_mpu_map_write(thread);
961 }
962
963 return ret;
964 }
965
arch_mem_domain_thread_remove(struct k_thread * thread)966 int arch_mem_domain_thread_remove(struct k_thread *thread)
967 {
968 uintptr_t stack_end_addr;
969 int ret;
970
971 struct k_mem_domain *domain = thread->mem_domain_info.mem_domain;
972
973 if ((thread->base.user_options & K_USER) == 0) {
974 ret = 0;
975 goto out;
976 }
977
978 if ((thread->base.thread_state & _THREAD_DEAD) == 0) {
979 /* Thread is migrating to another memory domain and not
980 * exiting for good; we weren't called from
981 * z_thread_abort(). Resetting the stack region will
982 * take place in the forthcoming thread_add() call.
983 */
984 ret = 0;
985 goto out;
986 }
987
988 stack_end_addr = thread->stack_info.start + thread->stack_info.size;
989 if (stack_end_addr < thread->stack_info.start) {
990 /* Account for wrapping around back to 0. */
991 stack_end_addr = 0xFFFFFFFFU;
992 }
993
994 /*
995 * Restore permissions on the thread's stack area since it is no
996 * longer a member of the domain.
997 */
998 ret = mpu_map_region_add(&domain->arch.mpu_map,
999 thread->stack_info.start, stack_end_addr,
1000 XTENSA_MPU_ACCESS_P_RW_U_NA,
1001 CONFIG_XTENSA_MPU_DEFAULT_MEM_TYPE,
1002 NULL);
1003
1004 xtensa_mpu_map_write(thread);
1005
1006 out:
1007 return ret;
1008 }
1009
arch_buffer_validate(const void * addr,size_t size,int write)1010 int arch_buffer_validate(const void *addr, size_t size, int write)
1011 {
1012 uintptr_t aligned_addr;
1013 size_t aligned_size, addr_offset;
1014 int ret = 0;
1015
1016 /* addr/size arbitrary, fix this up into an aligned region */
1017 aligned_addr = ROUND_DOWN((uintptr_t)addr, XCHAL_MPU_ALIGN);
1018 addr_offset = (uintptr_t)addr - aligned_addr;
1019 aligned_size = ROUND_UP(size + addr_offset, XCHAL_MPU_ALIGN);
1020
1021 for (size_t offset = 0; offset < aligned_size;
1022 offset += XCHAL_MPU_ALIGN) {
1023 uint32_t probed = xtensa_pptlb_probe(aligned_addr + offset);
1024
1025 if ((probed & XTENSA_MPU_PROBE_VALID_ENTRY_MASK) == 0U) {
1026 /* There is no foreground or background entry associated
1027 * with the region.
1028 */
1029 ret = -EPERM;
1030 goto out;
1031 }
1032
1033 uint8_t access_rights = (probed & XTENSA_MPU_PPTLB_ACCESS_RIGHTS_MASK)
1034 >> XTENSA_MPU_PPTLB_ACCESS_RIGHTS_SHIFT;
1035
1036 if (write) {
1037 /* Need to check write permission. */
1038 switch (access_rights) {
1039 case XTENSA_MPU_ACCESS_P_WO_U_WO:
1040 __fallthrough;
1041 case XTENSA_MPU_ACCESS_P_RW_U_RWX:
1042 __fallthrough;
1043 case XTENSA_MPU_ACCESS_P_RW_U_RW:
1044 __fallthrough;
1045 case XTENSA_MPU_ACCESS_P_RWX_U_RWX:
1046 /* These permissions are okay. */
1047 break;
1048 default:
1049 ret = -EPERM;
1050 goto out;
1051 }
1052 } else {
1053 /* Only check read permission. */
1054 switch (access_rights) {
1055 case XTENSA_MPU_ACCESS_P_RW_U_RWX:
1056 __fallthrough;
1057 case XTENSA_MPU_ACCESS_P_RW_U_RO:
1058 __fallthrough;
1059 case XTENSA_MPU_ACCESS_P_RWX_U_RX:
1060 __fallthrough;
1061 case XTENSA_MPU_ACCESS_P_RO_U_RO:
1062 __fallthrough;
1063 case XTENSA_MPU_ACCESS_P_RX_U_RX:
1064 __fallthrough;
1065 case XTENSA_MPU_ACCESS_P_RW_U_RW:
1066 __fallthrough;
1067 case XTENSA_MPU_ACCESS_P_RWX_U_RWX:
1068 /* These permissions are okay. */
1069 break;
1070 default:
1071 ret = -EPERM;
1072 goto out;
1073 }
1074 }
1075 }
1076
1077 out:
1078 return ret;
1079 }
1080
xtensa_mem_kernel_has_access(void * addr,size_t size,int write)1081 bool xtensa_mem_kernel_has_access(void *addr, size_t size, int write)
1082 {
1083 uintptr_t aligned_addr;
1084 size_t aligned_size, addr_offset;
1085 bool ret = true;
1086
1087 /* addr/size arbitrary, fix this up into an aligned region */
1088 aligned_addr = ROUND_DOWN((uintptr_t)addr, XCHAL_MPU_ALIGN);
1089 addr_offset = (uintptr_t)addr - aligned_addr;
1090 aligned_size = ROUND_UP(size + addr_offset, XCHAL_MPU_ALIGN);
1091
1092 for (size_t offset = 0; offset < aligned_size;
1093 offset += XCHAL_MPU_ALIGN) {
1094 uint32_t probed = xtensa_pptlb_probe(aligned_addr + offset);
1095
1096 if ((probed & XTENSA_MPU_PROBE_VALID_ENTRY_MASK) == 0U) {
1097 /* There is no foreground or background entry associated
1098 * with the region.
1099 */
1100 ret = false;
1101 goto out;
1102 }
1103
1104 uint8_t access_rights = (probed & XTENSA_MPU_PPTLB_ACCESS_RIGHTS_MASK)
1105 >> XTENSA_MPU_PPTLB_ACCESS_RIGHTS_SHIFT;
1106
1107
1108 if (write != 0) {
1109 /* Need to check write permission. */
1110 switch (access_rights) {
1111 case XTENSA_MPU_ACCESS_P_RW_U_NA:
1112 __fallthrough;
1113 case XTENSA_MPU_ACCESS_P_RWX_U_NA:
1114 __fallthrough;
1115 case XTENSA_MPU_ACCESS_P_WO_U_WO:
1116 __fallthrough;
1117 case XTENSA_MPU_ACCESS_P_RW_U_RWX:
1118 __fallthrough;
1119 case XTENSA_MPU_ACCESS_P_RW_U_RO:
1120 __fallthrough;
1121 case XTENSA_MPU_ACCESS_P_RWX_U_RX:
1122 __fallthrough;
1123 case XTENSA_MPU_ACCESS_P_RW_U_RW:
1124 __fallthrough;
1125 case XTENSA_MPU_ACCESS_P_RWX_U_RWX:
1126 /* These permissions are okay. */
1127 break;
1128 default:
1129 ret = false;
1130 goto out;
1131 }
1132 } else {
1133 /* Only check read permission. */
1134 switch (access_rights) {
1135 case XTENSA_MPU_ACCESS_P_RO_U_NA:
1136 __fallthrough;
1137 case XTENSA_MPU_ACCESS_P_RX_U_NA:
1138 __fallthrough;
1139 case XTENSA_MPU_ACCESS_P_RW_U_NA:
1140 __fallthrough;
1141 case XTENSA_MPU_ACCESS_P_RWX_U_NA:
1142 __fallthrough;
1143 case XTENSA_MPU_ACCESS_P_RW_U_RWX:
1144 __fallthrough;
1145 case XTENSA_MPU_ACCESS_P_RW_U_RO:
1146 __fallthrough;
1147 case XTENSA_MPU_ACCESS_P_RWX_U_RX:
1148 __fallthrough;
1149 case XTENSA_MPU_ACCESS_P_RO_U_RO:
1150 __fallthrough;
1151 case XTENSA_MPU_ACCESS_P_RX_U_RX:
1152 __fallthrough;
1153 case XTENSA_MPU_ACCESS_P_RW_U_RW:
1154 __fallthrough;
1155 case XTENSA_MPU_ACCESS_P_RWX_U_RWX:
1156 /* These permissions are okay. */
1157 break;
1158 default:
1159 ret = false;
1160 goto out;
1161 }
1162 }
1163 }
1164
1165 out:
1166 return ret;
1167 }
1168
1169
xtensa_user_stack_perms(struct k_thread * thread)1170 void xtensa_user_stack_perms(struct k_thread *thread)
1171 {
1172 int ret;
1173
1174 uintptr_t stack_end_addr = thread->stack_info.start + thread->stack_info.size;
1175
1176 if (stack_end_addr < thread->stack_info.start) {
1177 /* Account for wrapping around back to 0. */
1178 stack_end_addr = 0xFFFFFFFFU;
1179 }
1180
1181 (void)memset((void *)thread->stack_info.start,
1182 (IS_ENABLED(CONFIG_INIT_STACKS)) ? 0xAA : 0x00,
1183 thread->stack_info.size - thread->stack_info.delta);
1184
1185 /* Add stack to new domain's MPU map. */
1186 ret = mpu_map_region_add(thread->arch.mpu_map,
1187 thread->stack_info.start, stack_end_addr,
1188 XTENSA_MPU_ACCESS_P_RW_U_RW,
1189 CONFIG_XTENSA_MPU_DEFAULT_MEM_TYPE,
1190 NULL);
1191
1192 xtensa_mpu_map_write(thread);
1193
1194 /* Probably this fails due to no more available slots in MPU map. */
1195 ARG_UNUSED(ret);
1196 __ASSERT_NO_MSG(ret == 0);
1197 }
1198
1199 #endif /* CONFIG_USERSPACE */
1200