1 /*
2  * Copyright (c) 2023 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <stdint.h>
8 #include <stdlib.h>
9 #include <string.h>
10 
11 #include <zephyr/kernel.h>
12 #include <zephyr/spinlock.h>
13 #include <zephyr/toolchain.h>
14 #include <zephyr/arch/xtensa/arch_inlines.h>
15 #include <zephyr/arch/xtensa/mpu.h>
16 #include <zephyr/linker/linker-defs.h>
17 #include <zephyr/sys/__assert.h>
18 #include <zephyr/sys/util_macro.h>
19 
20 #include <xtensa/corebits.h>
21 #include <xtensa/config/core-matmap.h>
22 #include <xtensa/config/core-isa.h>
23 #include <xtensa_mpu_priv.h>
24 
25 #ifdef CONFIG_USERSPACE
26 BUILD_ASSERT((CONFIG_PRIVILEGED_STACK_SIZE > 0) &&
27 	     (CONFIG_PRIVILEGED_STACK_SIZE % XCHAL_MPU_ALIGN) == 0);
28 #endif
29 
30 extern char _heap_end[];
31 extern char _heap_start[];
32 
33 /** MPU foreground map for kernel mode. */
34 static struct xtensa_mpu_map xtensa_mpu_map_fg_kernel;
35 
36 /*
37  * Additional information about the MPU maps: foreground and background
38  * maps.
39  *
40  *
41  * Some things to keep in mind:
42  * - Each MPU region is described by TWO entries:
43  *   [entry_a_address, entry_b_address). For contiguous memory regions,
44  *   this should not much of an issue. However, disjoint memory regions
45  *   "waste" another entry to describe the end of those regions.
46  *   We might run out of available entries in the MPU map because of
47  *   this.
48  *   - The last entry is a special case as there is no more "next"
49  *     entry in the map. In this case, the end of memory is
50  *     the implicit boundary. In another word, the last entry
51  *     describes the region between the start address of this entry
52  *     and the end of memory.
53  * - Current implementation has following limitations:
54  *   - All enabled entries are grouped towards the end of the map.
55  *     - Except the last entry which can be disabled. This is
56  *       the end of the last foreground region. With a disabled
57  *       entry, memory after this will use the background map
58  *       for access control.
59  *   - No disabled MPU entries allowed in between.
60  *
61  *
62  * For foreground map to be valid, its entries must follow these rules:
63  * - The start addresses must always be in non-descending order.
64  * - The access rights and memory type fields must contain valid values.
65  * - The segment field needs to be correct for each entry.
66  * - MBZ fields must contain only zeroes.
67  * - Although the start address occupies 27 bits of the register,
68  *   it does not mean all 27 bits are usable. The macro
69  *   XCHAL_MPU_ALIGN_BITS provided by the toolchain indicates
70  *   that only bits of and left of this value are valid. This
71  *   corresponds to the minimum segment size (MINSEGMENTSIZE)
72  *   definied in the processor configuration.
73  */
74 
75 #ifndef CONFIG_XTENSA_MPU_ONLY_SOC_RANGES
76 /**
77  * Static definition of all code and data memory regions of the
78  * current Zephyr image. This information must be available and
79  * need to be processed upon MPU initialization.
80  */
81 static const struct xtensa_mpu_range mpu_zephyr_ranges[] = {
82 	/* Region for vector handlers. */
83 	{
84 		.start = (uintptr_t)XCHAL_VECBASE_RESET_VADDR,
85 		/*
86 		 * There is nothing from the Xtensa overlay about how big
87 		 * the vector handler region is. So we make an assumption
88 		 * that vecbase and .text are contiguous.
89 		 *
90 		 * SoC can override as needed if this is not the case,
91 		 * especially if the SoC reset/startup code relocates
92 		 * vecbase.
93 		 */
94 		.end   = (uintptr_t)__text_region_start,
95 		.access_rights = XTENSA_MPU_ACCESS_P_RX_U_RX,
96 		.memory_type = CONFIG_XTENSA_MPU_DEFAULT_MEM_TYPE,
97 	},
98 	/*
99 	 * Mark the zephyr execution regions (data, bss, noinit, etc.)
100 	 * cacheable, read / write and non-executable
101 	 */
102 	{
103 		/* This includes .data, .bss and various kobject sections. */
104 		.start = (uintptr_t)_image_ram_start,
105 		.end   = (uintptr_t)_image_ram_end,
106 		.access_rights = XTENSA_MPU_ACCESS_P_RW_U_NA,
107 		.memory_type = CONFIG_XTENSA_MPU_DEFAULT_MEM_TYPE,
108 	},
109 #if K_HEAP_MEM_POOL_SIZE > 0
110 	/* System heap memory */
111 	{
112 		.start = (uintptr_t)_heap_start,
113 		.end   = (uintptr_t)_heap_end,
114 		.access_rights = XTENSA_MPU_ACCESS_P_RW_U_NA,
115 		.memory_type = CONFIG_XTENSA_MPU_DEFAULT_MEM_TYPE,
116 	},
117 #endif
118 	/* Mark text segment cacheable, read only and executable */
119 	{
120 		.start = (uintptr_t)__text_region_start,
121 		.end   = (uintptr_t)__text_region_end,
122 		.access_rights = XTENSA_MPU_ACCESS_P_RX_U_RX,
123 		.memory_type = CONFIG_XTENSA_MPU_DEFAULT_MEM_TYPE,
124 	},
125 	/* Mark rodata segment cacheable, read only and non-executable */
126 	{
127 		.start = (uintptr_t)__rodata_region_start,
128 		.end   = (uintptr_t)__rodata_region_end,
129 		.access_rights = XTENSA_MPU_ACCESS_P_RO_U_RO,
130 		.memory_type = CONFIG_XTENSA_MPU_DEFAULT_MEM_TYPE,
131 	},
132 };
133 #endif /* !CONFIG_XTENSA_MPU_ONLY_SOC_RANGES */
134 
135 /**
136  * Return the pointer to the entry encompassing @a addr out of an array of MPU entries.
137  *
138  * Returning the entry where @a addr is greater or equal to the entry's start address,
139  * and where @a addr is less than the starting address of the next entry.
140  *
141  * @param[in]  entries Array of MPU entries.
142  * @param[in]  addr Address to be matched to one background entry.
143  * @param[in]  first_enabled_idx The index of the first enabled entry.
144  *                               Use 0 if not sure.
145  * @param[out] exact Set to true if address matches exactly.
146  *                   NULL if do not care.
147  * @param[out] entry_idx Set to the index of the entry array if entry is found.
148  *                       NULL if do not care.
149  *
150  * @return Pointer to the map entry encompassing @a addr, or NULL if no such entry found.
151  */
152 static const
check_addr_in_mpu_entries(const struct xtensa_mpu_entry * entries,uintptr_t addr,uint8_t first_enabled_idx,bool * exact,uint8_t * entry_idx)153 struct xtensa_mpu_entry *check_addr_in_mpu_entries(const struct xtensa_mpu_entry *entries,
154 						   uintptr_t addr, uint8_t first_enabled_idx,
155 						   bool *exact, uint8_t *entry_idx)
156 {
157 	const struct xtensa_mpu_entry *ret = NULL;
158 	uintptr_t s_addr, e_addr;
159 	uint8_t idx;
160 
161 	if (first_enabled_idx >= XTENSA_MPU_NUM_ENTRIES) {
162 		goto out_null;
163 	}
164 
165 	if (addr < xtensa_mpu_entry_start_address_get(&entries[first_enabled_idx])) {
166 		/* Before the start address of very first entry. So no match. */
167 		goto out_null;
168 	}
169 
170 	/* Loop through the map except the last entry (which is a special case). */
171 	for (idx = first_enabled_idx; idx < (XTENSA_MPU_NUM_ENTRIES - 1); idx++) {
172 		s_addr = xtensa_mpu_entry_start_address_get(&entries[idx]);
173 		e_addr = xtensa_mpu_entry_start_address_get(&entries[idx + 1]);
174 
175 		if ((addr >= s_addr) && (addr < e_addr)) {
176 			ret = &entries[idx];
177 			goto out;
178 		}
179 	}
180 
181 	idx = XTENSA_MPU_NUM_ENTRIES - 1;
182 	s_addr = xtensa_mpu_entry_start_address_get(&entries[idx]);
183 	if (addr >= s_addr) {
184 		/* Last entry encompasses the start address to end of memory. */
185 		ret = &entries[idx];
186 	}
187 
188 out:
189 	if (ret != NULL) {
190 		if (exact != NULL) {
191 			if (addr == s_addr) {
192 				*exact = true;
193 			} else {
194 				*exact = false;
195 			}
196 		}
197 
198 		if (entry_idx != NULL) {
199 			*entry_idx = idx;
200 		}
201 	}
202 
203 out_null:
204 	return ret;
205 }
206 
207 /**
208  * Find the first enabled MPU entry.
209  *
210  * @param entries Array of MPU entries with XTENSA_MPU_NUM_ENTRIES elements.
211  *
212  * @return Index of the first enabled entry.
213  * @retval XTENSA_MPU_NUM_ENTRIES if no entry is enabled.
214  */
find_first_enabled_entry(const struct xtensa_mpu_entry * entries)215 static inline uint8_t find_first_enabled_entry(const struct xtensa_mpu_entry *entries)
216 {
217 	int first_enabled_idx;
218 
219 	for (first_enabled_idx = 0; first_enabled_idx < XTENSA_MPU_NUM_ENTRIES;
220 	     first_enabled_idx++) {
221 		if (entries[first_enabled_idx].as.p.enable) {
222 			break;
223 		}
224 	}
225 
226 	return first_enabled_idx;
227 }
228 
229 /**
230  * Compare two MPU entries.
231  *
232  * This is used by qsort to compare two MPU entries on their ordering
233  * based on starting address.
234  *
235  * @param a First MPU entry.
236  * @param b Second MPU entry.
237  *
238  * @retval -1 First address is less than second address.
239  * @retval  0 First address is equal to second address.
240  * @retval  1 First address is great than second address.
241  */
compare_entries(const void * a,const void * b)242 static int compare_entries(const void *a, const void *b)
243 {
244 	struct xtensa_mpu_entry *e_a = (struct xtensa_mpu_entry *)a;
245 	struct xtensa_mpu_entry *e_b = (struct xtensa_mpu_entry *)b;
246 
247 	uintptr_t addr_a = xtensa_mpu_entry_start_address_get(e_a);
248 	uintptr_t addr_b = xtensa_mpu_entry_start_address_get(e_b);
249 
250 	if (addr_a < addr_b) {
251 		return -1;
252 	} else if (addr_a == addr_b) {
253 		return 0;
254 	} else {
255 		return 1;
256 	}
257 }
258 
259 /**
260  * Sort the MPU entries base on starting address.
261  *
262  * This sorts the MPU entries in ascending order of starting address.
263  * After sorting, it rewrites the segment numbers of all entries.
264  */
sort_entries(struct xtensa_mpu_entry * entries)265 static void sort_entries(struct xtensa_mpu_entry *entries)
266 {
267 	qsort(entries, XTENSA_MPU_NUM_ENTRIES, sizeof(entries[0]), compare_entries);
268 
269 	for (uint32_t idx = 0; idx < XTENSA_MPU_NUM_ENTRIES; idx++) {
270 		/* Segment value must correspond to the index. */
271 		entries[idx].at.p.segment = idx;
272 	}
273 }
274 
275 /**
276  * Consolidate the MPU entries.
277  *
278  * This removes consecutive entries where the attributes are the same.
279  *
280  * @param entries Array of MPU entries with XTENSA_MPU_NUM_ENTRIES elements.
281  * @param first_enabled_idx Index of first enabled entry.
282  *
283  * @return Index of the first enabled entry after consolidation.
284  */
consolidate_entries(struct xtensa_mpu_entry * entries,uint8_t first_enabled_idx)285 static uint8_t consolidate_entries(struct xtensa_mpu_entry *entries,
286 				   uint8_t first_enabled_idx)
287 {
288 	uint8_t new_first;
289 	uint8_t idx_0 = first_enabled_idx;
290 	uint8_t idx_1 = first_enabled_idx + 1;
291 	bool to_consolidate = false;
292 
293 	/* For each a pair of entries... */
294 	while (idx_1 < XTENSA_MPU_NUM_ENTRIES) {
295 		struct xtensa_mpu_entry *entry_0 = &entries[idx_0];
296 		struct xtensa_mpu_entry *entry_1 = &entries[idx_1];
297 		bool mark_disable_0 = false;
298 		bool mark_disable_1 = false;
299 
300 		if (xtensa_mpu_entries_has_same_attributes(entry_0, entry_1)) {
301 			/*
302 			 * If both entry has same attributes (access_rights and memory type),
303 			 * they can be consolidated into one by removing the higher indexed
304 			 * one.
305 			 */
306 			mark_disable_1 = true;
307 		} else if (xtensa_mpu_entries_has_same_address(entry_0, entry_1)) {
308 			/*
309 			 * If both entries have the same address, the higher index
310 			 * one always override the lower one. So remove the lower indexed
311 			 * one.
312 			 */
313 			mark_disable_0 = true;
314 		}
315 
316 		/*
317 		 * Marking an entry as disabled here so it can be removed later.
318 		 *
319 		 * The MBZ field of the AS register is re-purposed to indicate that
320 		 * this is an entry to be removed.
321 		 */
322 		if (mark_disable_1) {
323 			/* Remove the higher indexed entry. */
324 			to_consolidate = true;
325 
326 			entry_1->as.p.mbz = 1U;
327 
328 			/* Skip ahead for next comparison. */
329 			idx_1++;
330 			continue;
331 		} else if (mark_disable_0) {
332 			/* Remove the lower indexed entry. */
333 			to_consolidate = true;
334 
335 			entry_0->as.p.mbz = 1U;
336 		}
337 
338 		idx_0 = idx_1;
339 		idx_1++;
340 	}
341 
342 	if (to_consolidate) {
343 		uint8_t read_idx = XTENSA_MPU_NUM_ENTRIES - 1;
344 		uint8_t write_idx = XTENSA_MPU_NUM_ENTRIES;
345 
346 		/* Go through the map from the end and copy enabled entries in place. */
347 		while (read_idx >= first_enabled_idx) {
348 			struct xtensa_mpu_entry *entry_rd = &entries[read_idx];
349 
350 			if (entry_rd->as.p.mbz != 1U) {
351 				struct xtensa_mpu_entry *entry_wr;
352 
353 				write_idx--;
354 				entry_wr = &entries[write_idx];
355 
356 				*entry_wr = *entry_rd;
357 				entry_wr->at.p.segment = write_idx;
358 			}
359 
360 			read_idx--;
361 		}
362 
363 		/* New first enabled entry is where the last written entry is. */
364 		new_first = write_idx;
365 
366 		for (idx_0 = 0; idx_0 < new_first; idx_0++) {
367 			struct xtensa_mpu_entry *e = &entries[idx_0];
368 
369 			/* Shortcut to zero out address and enabled bit. */
370 			e->as.raw = 0U;
371 
372 			/* Segment value must correspond to the index. */
373 			e->at.p.segment = idx_0;
374 
375 			/* No access at all for both kernel and user modes. */
376 			e->at.p.access_rights =	XTENSA_MPU_ACCESS_P_NA_U_NA;
377 
378 			/* Use default memory type for disabled entries. */
379 			e->at.p.memory_type = CONFIG_XTENSA_MPU_DEFAULT_MEM_TYPE;
380 		}
381 	} else {
382 		/* No need to conlidate entries. Map is same as before. */
383 		new_first = first_enabled_idx;
384 	}
385 
386 	return new_first;
387 }
388 
389 /**
390  * Add a memory region to the MPU map.
391  *
392  * This adds a memory region to the MPU map, by setting the appropriate
393  * start and end entries. This may re-use existing entries or add new
394  * entries to the map.
395  *
396  * @param[in,out] map Pointer to MPU map.
397  * @param[in] start_addr Start address of the region.
398  * @param[in] end_addr End address of the region.
399  * @param[in] access_rights Access rights of this region.
400  * @param[in] memory_type Memory type of this region.
401  * @param[out] first_idx Return index of first enabled entry if not NULL.
402  *
403  * @retval 0 Successful in adding the region.
404  * @retval -EINVAL Invalid values in function arguments.
405  */
mpu_map_region_add(struct xtensa_mpu_map * map,uintptr_t start_addr,uintptr_t end_addr,uint32_t access_rights,uint32_t memory_type,uint8_t * first_idx)406 static int mpu_map_region_add(struct xtensa_mpu_map *map,
407 			      uintptr_t start_addr, uintptr_t end_addr,
408 			      uint32_t access_rights, uint32_t memory_type,
409 			      uint8_t *first_idx)
410 {
411 	int ret;
412 	bool exact_s, exact_e;
413 	uint8_t idx_s, idx_e, first_enabled_idx;
414 	struct xtensa_mpu_entry *entry_slot_s, *entry_slot_e, prev_entry;
415 
416 	struct xtensa_mpu_entry *entries = map->entries;
417 
418 	if (start_addr >= end_addr) {
419 		ret = -EINVAL;
420 		goto out;
421 	}
422 
423 	first_enabled_idx = find_first_enabled_entry(entries);
424 	if (first_enabled_idx >= XTENSA_MPU_NUM_ENTRIES) {
425 
426 		/*
427 		 * If the last entry in the map is not enabled and the start
428 		 * address is NULL, we can assume the map has not been populated
429 		 * at all. This is because we group all enabled entries at
430 		 * the end of map.
431 		 */
432 		struct xtensa_mpu_entry *last_entry = &entries[XTENSA_MPU_NUM_ENTRIES - 1];
433 
434 		if (!xtensa_mpu_entry_enable_get(last_entry) &&
435 		    (xtensa_mpu_entry_start_address_get(last_entry) == 0U)) {
436 			/* Empty table, so populate the entries as-is. */
437 			if (end_addr == 0xFFFFFFFFU) {
438 				/*
439 				 * Region goes to end of memory, so only need to
440 				 * program one entry.
441 				 */
442 				entry_slot_s = &entries[XTENSA_MPU_NUM_ENTRIES - 1];
443 
444 				xtensa_mpu_entry_set(entry_slot_s, start_addr, true,
445 						     access_rights, memory_type);
446 				first_enabled_idx = XTENSA_MPU_NUM_ENTRIES - 1;
447 				goto end;
448 			} else {
449 				/*
450 				 * Populate the last two entries to indicate
451 				 * a memory region. Notice that the second entry
452 				 * is not enabled as it is merely marking the end of
453 				 * a region and is not the starting of another
454 				 * enabled MPU region.
455 				 */
456 				entry_slot_s = &entries[XTENSA_MPU_NUM_ENTRIES - 2];
457 				entry_slot_e = &entries[XTENSA_MPU_NUM_ENTRIES - 1];
458 
459 				xtensa_mpu_entry_set(entry_slot_s, start_addr, true,
460 						     access_rights, memory_type);
461 				xtensa_mpu_entry_set(entry_slot_e, end_addr, false,
462 						     XTENSA_MPU_ACCESS_P_NA_U_NA,
463 						     CONFIG_XTENSA_MPU_DEFAULT_MEM_TYPE);
464 				first_enabled_idx = XTENSA_MPU_NUM_ENTRIES - 2;
465 				goto end;
466 			}
467 
468 			ret = 0;
469 			goto out;
470 		}
471 
472 		first_enabled_idx = consolidate_entries(entries, first_enabled_idx);
473 
474 		if (first_enabled_idx >= XTENSA_MPU_NUM_ENTRIES) {
475 			ret = -EINVAL;
476 			goto out;
477 		}
478 	}
479 
480 	entry_slot_s = (struct xtensa_mpu_entry *)
481 		       check_addr_in_mpu_entries(entries, start_addr, first_enabled_idx,
482 						 &exact_s, &idx_s);
483 	entry_slot_e = (struct xtensa_mpu_entry *)
484 		       check_addr_in_mpu_entries(entries, end_addr, first_enabled_idx,
485 						 &exact_e, &idx_e);
486 
487 	__ASSERT_NO_MSG(entry_slot_s != NULL);
488 	__ASSERT_NO_MSG(entry_slot_e != NULL);
489 	__ASSERT_NO_MSG(start_addr < end_addr);
490 
491 	if ((entry_slot_s == NULL) || (entry_slot_e == NULL)) {
492 		ret = -EINVAL;
493 		goto out;
494 	}
495 
496 	/*
497 	 * Figure out if we need to add new slots for either addresses.
498 	 * If the addresses match exactly the addresses current in map,
499 	 * we can reuse those entries without adding new one.
500 	 */
501 	if (!exact_s || !exact_e) {
502 		uint8_t needed = (exact_s ? 0 : 1) + (exact_e ? 0 : 1);
503 
504 		/* Check if there are enough empty slots. */
505 		if (first_enabled_idx < needed) {
506 			ret = -ENOMEM;
507 			goto out;
508 		}
509 	}
510 
511 	/*
512 	 * Need to keep track of the attributes of the memory region before
513 	 * we start adding entries, as we will need to apply the same
514 	 * attributes to the "ending address" entry to preseve the attributes
515 	 * of existing map.
516 	 */
517 	prev_entry = *entry_slot_e;
518 
519 	/*
520 	 * Entry for beginning of new region.
521 	 *
522 	 * - Use existing entry if start addresses are the same for existing
523 	 *   and incoming region. We can simply reuse the entry.
524 	 * - Add an entry if incoming region is within existing region.
525 	 */
526 	if (!exact_s) {
527 		/*
528 		 * Put a new entry before the first enabled entry.
529 		 * We will sort the entries later.
530 		 */
531 		first_enabled_idx--;
532 
533 		entry_slot_s = &entries[first_enabled_idx];
534 	}
535 
536 	xtensa_mpu_entry_set(entry_slot_s, start_addr, true, access_rights, memory_type);
537 
538 	/*
539 	 * Entry for ending of region.
540 	 *
541 	 * - Add an entry if incoming region is within existing region.
542 	 * - If the end address matches exactly to existing entry, there is
543 	 *   no need to do anything.
544 	 */
545 	if (!exact_e) {
546 		/*
547 		 * Put a new entry before the first enabled entry.
548 		 * We will sort the entries later.
549 		 */
550 		first_enabled_idx--;
551 
552 		entry_slot_e = &entries[first_enabled_idx];
553 
554 		/*
555 		 * Since we are going to punch a hole in the map,
556 		 * we need to preserve the attribute of existing region
557 		 * between the end address and next entry.
558 		 */
559 		*entry_slot_e = prev_entry;
560 		xtensa_mpu_entry_start_address_set(entry_slot_e, end_addr);
561 	}
562 
563 	/* Sort the entries in ascending order of starting address */
564 	sort_entries(entries);
565 
566 	/*
567 	 * Need to figure out where the start and end entries are as sorting
568 	 * may change their positions.
569 	 */
570 	entry_slot_s = (struct xtensa_mpu_entry *)
571 		       check_addr_in_mpu_entries(entries, start_addr, first_enabled_idx,
572 						 &exact_s, &idx_s);
573 	entry_slot_e = (struct xtensa_mpu_entry *)
574 		       check_addr_in_mpu_entries(entries, end_addr, first_enabled_idx,
575 						 &exact_e, &idx_e);
576 
577 	/* Both must be exact match. */
578 	__ASSERT_NO_MSG(exact_s);
579 	__ASSERT_NO_MSG(exact_e);
580 
581 	if (end_addr == 0xFFFFFFFFU) {
582 		/*
583 		 * If end_addr = 0xFFFFFFFFU, entry_slot_e and idx_e both
584 		 * point to the last slot. Because the incoming region goes
585 		 * to the end of memory, we simply cheat by including
586 		 * the last entry by incrementing idx_e so the loop to
587 		 * update entries will change the attribute of last entry
588 		 * in map.
589 		 */
590 		idx_e++;
591 	}
592 
593 	/*
594 	 * Any existing entries between the "newly" popluated start and
595 	 * end entries must bear the same attributes. So modify them
596 	 * here.
597 	 */
598 	for (int idx = idx_s + 1; idx < idx_e; idx++) {
599 		xtensa_mpu_entry_attributes_set(&entries[idx], access_rights, memory_type);
600 	}
601 
602 end:
603 	if (first_idx != NULL) {
604 		*first_idx = first_enabled_idx;
605 	}
606 
607 	ret = 0;
608 
609 out:
610 	return ret;
611 }
612 
613 /**
614  * Write the MPU map to hardware.
615  *
616  * @param map Pointer to foreground MPU map.
617  */
618 #ifdef CONFIG_USERSPACE
619 /* With userspace enabled, the pointer to per memory domain MPU map is stashed
620  * inside the thread struct. If we still only take struct xtensa_mpu_map as
621  * argument, a wrapper function is needed. To avoid the cost associated with
622  * calling that wrapper function, takes thread pointer directly as argument
623  * when userspace is enabled. Not to mention that writing the map to hardware
624  * is already a costly operation per context switch. So every little bit helps.
625  */
xtensa_mpu_map_write(struct k_thread * thread)626 void xtensa_mpu_map_write(struct k_thread *thread)
627 #else
628 void xtensa_mpu_map_write(struct xtensa_mpu_map *map)
629 #endif
630 {
631 	int entry;
632 
633 #ifdef CONFIG_USERSPACE
634 	struct xtensa_mpu_map *map = thread->arch.mpu_map;
635 #endif
636 
637 	/*
638 	 * Clear MPU entries first, then write MPU entries in reverse order.
639 	 *
640 	 * Remember that the boundary of each memory region is marked by
641 	 * two consecutive entries, and that the addresses of all entries
642 	 * must not be in descending order (i.e. equal or increasing).
643 	 * To ensure this, we clear out the entries first then write them
644 	 * in reverse order. This avoids any intermediate invalid
645 	 * configuration with regard to ordering.
646 	 */
647 	for (entry = 0; entry < XTENSA_MPU_NUM_ENTRIES; entry++) {
648 		__asm__ volatile("wptlb %0, %1\n\t" : : "a"(entry), "a"(0));
649 	}
650 
651 	for (entry = XTENSA_MPU_NUM_ENTRIES - 1; entry >= 0; entry--) {
652 		__asm__ volatile("wptlb %0, %1\n\t"
653 				 : : "a"(map->entries[entry].at), "a"(map->entries[entry].as));
654 	}
655 }
656 
657 /**
658  * Perform necessary steps to enable MPU.
659  */
xtensa_mpu_init(void)660 void xtensa_mpu_init(void)
661 {
662 	unsigned int entry;
663 	uint8_t first_enabled_idx;
664 
665 	/* Disable all foreground segments before we start configuration. */
666 	xtensa_mpu_mpuenb_write(0);
667 
668 	/*
669 	 * Clear the foreground MPU map so we can populate it later with valid entries.
670 	 * Note that we still need to make sure the map is valid, and cannot be totally
671 	 * zeroed.
672 	 */
673 	for (entry = 0; entry < XTENSA_MPU_NUM_ENTRIES; entry++) {
674 		/* Make sure to zero out everything as a start, especially the MBZ fields. */
675 		struct xtensa_mpu_entry ent = {0};
676 
677 		/* Segment value must correspond to the index. */
678 		ent.at.p.segment = entry;
679 
680 		/* No access at all for both kernel and user modes. */
681 		ent.at.p.access_rights = XTENSA_MPU_ACCESS_P_NA_U_NA;
682 
683 		/* Use default memory type for disabled entries. */
684 		ent.at.p.memory_type = CONFIG_XTENSA_MPU_DEFAULT_MEM_TYPE;
685 
686 		xtensa_mpu_map_fg_kernel.entries[entry] = ent;
687 	}
688 
689 #ifndef CONFIG_XTENSA_MPU_ONLY_SOC_RANGES
690 	/*
691 	 * Add necessary MPU entries for the memory regions of base Zephyr image.
692 	 */
693 	for (entry = 0; entry < ARRAY_SIZE(mpu_zephyr_ranges); entry++) {
694 		const struct xtensa_mpu_range *range = &mpu_zephyr_ranges[entry];
695 
696 		int ret = mpu_map_region_add(&xtensa_mpu_map_fg_kernel,
697 					     range->start, range->end,
698 					     range->access_rights, range->memory_type,
699 					     &first_enabled_idx);
700 
701 		ARG_UNUSED(ret);
702 		__ASSERT(ret == 0, "Unable to add region [0x%08x, 0x%08x): %d",
703 				   (unsigned int)range->start,
704 				   (unsigned int)range->end,
705 				   ret);
706 	}
707 #endif /* !CONFIG_XTENSA_MPU_ONLY_SOC_RANGES */
708 
709 	/*
710 	 * Now for the entries for memory regions needed by SoC.
711 	 */
712 	for (entry = 0; entry < xtensa_soc_mpu_ranges_num; entry++) {
713 		const struct xtensa_mpu_range *range = &xtensa_soc_mpu_ranges[entry];
714 
715 		int ret = mpu_map_region_add(&xtensa_mpu_map_fg_kernel,
716 					     range->start, range->end,
717 					     range->access_rights, range->memory_type,
718 					     &first_enabled_idx);
719 
720 		ARG_UNUSED(ret);
721 		__ASSERT(ret == 0, "Unable to add region [0x%08x, 0x%08x): %d",
722 				   (unsigned int)range->start,
723 				   (unsigned int)range->end,
724 				   ret);
725 	}
726 
727 	/* Consolidate entries so we have a compact map at boot. */
728 	consolidate_entries(xtensa_mpu_map_fg_kernel.entries, first_enabled_idx);
729 
730 	/* Write the map into hardware. There is no turning back now. */
731 #ifdef CONFIG_USERSPACE
732 	struct k_thread dummy_map_thread;
733 
734 	dummy_map_thread.arch.mpu_map = &xtensa_mpu_map_fg_kernel;
735 	xtensa_mpu_map_write(&dummy_map_thread);
736 #else
737 	xtensa_mpu_map_write(&xtensa_mpu_map_fg_kernel);
738 #endif
739 }
740 
741 #ifdef CONFIG_USERSPACE
arch_mem_domain_init(struct k_mem_domain * domain)742 int arch_mem_domain_init(struct k_mem_domain *domain)
743 {
744 	domain->arch.mpu_map = xtensa_mpu_map_fg_kernel;
745 
746 	return 0;
747 }
748 
arch_mem_domain_max_partitions_get(void)749 int arch_mem_domain_max_partitions_get(void)
750 {
751 	/*
752 	 * Due to each memory region requiring 2 MPU entries to describe,
753 	 * it is hard to figure out how many partitions are available.
754 	 * For example, if all those partitions are contiguous, it only
755 	 * needs 2 entries (1 if the end of region already has an entry).
756 	 * If they are all disjoint, it will need (2 * n) entries to
757 	 * describe all of them. So just use CONFIG_MAX_DOMAIN_PARTITIONS
758 	 * here and let the application set this instead.
759 	 */
760 	return CONFIG_MAX_DOMAIN_PARTITIONS;
761 }
762 
arch_mem_domain_partition_remove(struct k_mem_domain * domain,uint32_t partition_id)763 int arch_mem_domain_partition_remove(struct k_mem_domain *domain,
764 				     uint32_t partition_id)
765 {
766 	int ret;
767 	uint32_t perm;
768 	struct xtensa_mpu_map *map = &domain->arch.mpu_map;
769 	struct k_mem_partition *partition = &domain->partitions[partition_id];
770 	uintptr_t end_addr = partition->start + partition->size;
771 
772 	if (end_addr <= partition->start) {
773 		ret = -EINVAL;
774 		goto out;
775 	}
776 
777 	/*
778 	 * This is simply to get rid of the user permissions and retain
779 	 * whatever the kernel permissions are. So that we won't be
780 	 * setting the memory region permission incorrectly, for example,
781 	 * marking read only region writable.
782 	 *
783 	 * Note that Zephyr does not do RWX partitions so we can treat it
784 	 * as invalid.
785 	 */
786 	switch (partition->attr) {
787 	case XTENSA_MPU_ACCESS_P_RO_U_NA:
788 		__fallthrough;
789 	case XTENSA_MPU_ACCESS_P_RX_U_NA:
790 		__fallthrough;
791 	case XTENSA_MPU_ACCESS_P_RO_U_RO:
792 		__fallthrough;
793 	case XTENSA_MPU_ACCESS_P_RX_U_RX:
794 		perm = XTENSA_MPU_ACCESS_P_RO_U_NA;
795 		break;
796 
797 	case XTENSA_MPU_ACCESS_P_RW_U_NA:
798 		__fallthrough;
799 	case XTENSA_MPU_ACCESS_P_RWX_U_NA:
800 		__fallthrough;
801 	case XTENSA_MPU_ACCESS_P_RW_U_RWX:
802 		__fallthrough;
803 	case XTENSA_MPU_ACCESS_P_RW_U_RO:
804 		__fallthrough;
805 	case XTENSA_MPU_ACCESS_P_RWX_U_RX:
806 		__fallthrough;
807 	case XTENSA_MPU_ACCESS_P_RW_U_RW:
808 		__fallthrough;
809 	case XTENSA_MPU_ACCESS_P_RWX_U_RWX:
810 		perm = XTENSA_MPU_ACCESS_P_RW_U_NA;
811 		break;
812 
813 	default:
814 		/* _P_X_U_NA is not a valid permission for userspace, so ignore.
815 		 * _P_NA_U_X becomes _P_NA_U_NA when removing user permissions.
816 		 * _P_WO_U_WO has not kernel only counterpart so just force no access.
817 		 * If we get here with _P_NA_P_NA, there is something seriously
818 		 * wrong with the userspace and/or application code.
819 		 */
820 		perm = XTENSA_MPU_ACCESS_P_NA_U_NA;
821 		break;
822 	}
823 
824 	/*
825 	 * Reset the memory region attributes by simply "adding"
826 	 * a region with default attributes. If entries already
827 	 * exist for the region, the corresponding entries will
828 	 * be updated with the default attributes. Or new entries
829 	 * will be added to carve a hole in existing regions.
830 	 */
831 	ret = mpu_map_region_add(map, partition->start, end_addr,
832 				 perm,
833 				 CONFIG_XTENSA_MPU_DEFAULT_MEM_TYPE,
834 				 NULL);
835 
836 out:
837 	return ret;
838 }
839 
arch_mem_domain_partition_add(struct k_mem_domain * domain,uint32_t partition_id)840 int arch_mem_domain_partition_add(struct k_mem_domain *domain,
841 				  uint32_t partition_id)
842 {
843 	int ret;
844 	struct xtensa_mpu_map *map = &domain->arch.mpu_map;
845 	struct k_mem_partition *partition = &domain->partitions[partition_id];
846 	uintptr_t end_addr = partition->start + partition->size;
847 
848 	if (end_addr <= partition->start) {
849 		ret = -EINVAL;
850 		goto out;
851 	}
852 
853 	ret = mpu_map_region_add(map, partition->start, end_addr,
854 				 (uint8_t)partition->attr,
855 				 CONFIG_XTENSA_MPU_DEFAULT_MEM_TYPE,
856 				 NULL);
857 
858 out:
859 	return ret;
860 }
861 
arch_mem_domain_thread_add(struct k_thread * thread)862 int arch_mem_domain_thread_add(struct k_thread *thread)
863 {
864 	int ret = 0;
865 
866 	/* New memory domain we are being added to */
867 	struct k_mem_domain *domain = thread->mem_domain_info.mem_domain;
868 
869 	/*
870 	 * this is only set for threads that were migrating from some other
871 	 * memory domain; new threads this is NULL.
872 	 */
873 	struct xtensa_mpu_map *old_map = thread->arch.mpu_map;
874 
875 	bool is_user = (thread->base.user_options & K_USER) != 0;
876 	bool is_migration = (old_map != NULL) && is_user;
877 
878 	uintptr_t stack_end_addr = thread->stack_info.start + thread->stack_info.size;
879 
880 	if (stack_end_addr < thread->stack_info.start) {
881 		/* Account for wrapping around back to 0. */
882 		stack_end_addr = 0xFFFFFFFFU;
883 	}
884 
885 	/*
886 	 * Allow USER access to the thread's stack in its new domain if
887 	 * we are migrating. If we are not migrating this is done in
888 	 * xtensa_user_stack_perms().
889 	 */
890 	if (is_migration) {
891 		/* Add stack to new domain's MPU map. */
892 		ret = mpu_map_region_add(&domain->arch.mpu_map,
893 					 thread->stack_info.start, stack_end_addr,
894 					 XTENSA_MPU_ACCESS_P_RW_U_RW,
895 					 CONFIG_XTENSA_MPU_DEFAULT_MEM_TYPE,
896 					 NULL);
897 
898 		/* Probably this fails due to no more available slots in MPU map. */
899 		__ASSERT_NO_MSG(ret == 0);
900 	}
901 
902 	thread->arch.mpu_map = &domain->arch.mpu_map;
903 
904 	/*
905 	 * Remove thread stack from old memory domain if we are
906 	 * migrating away from old memory domain. This is done
907 	 * by simply remove USER access from the region.
908 	 */
909 	if (is_migration) {
910 		/*
911 		 * Remove stack from old MPU map by...
912 		 * "adding" a new memory region to the map
913 		 * as this carves a hole in the existing map.
914 		 */
915 		ret = mpu_map_region_add(old_map,
916 					 thread->stack_info.start, stack_end_addr,
917 					 XTENSA_MPU_ACCESS_P_RW_U_NA,
918 					 CONFIG_XTENSA_MPU_DEFAULT_MEM_TYPE,
919 					 NULL);
920 	}
921 
922 	/*
923 	 * Need to switch to new MPU map if this is the current
924 	 * running thread.
925 	 */
926 	if (thread == _current_cpu->current) {
927 		xtensa_mpu_map_write(thread);
928 	}
929 
930 	return ret;
931 }
932 
arch_mem_domain_thread_remove(struct k_thread * thread)933 int arch_mem_domain_thread_remove(struct k_thread *thread)
934 {
935 	uintptr_t stack_end_addr;
936 	int ret;
937 
938 	struct k_mem_domain *domain = thread->mem_domain_info.mem_domain;
939 
940 	if ((thread->base.user_options & K_USER) == 0) {
941 		ret = 0;
942 		goto out;
943 	}
944 
945 	if ((thread->base.thread_state & _THREAD_DEAD) == 0) {
946 		/* Thread is migrating to another memory domain and not
947 		 * exiting for good; we weren't called from
948 		 * z_thread_abort().  Resetting the stack region will
949 		 * take place in the forthcoming thread_add() call.
950 		 */
951 		ret = 0;
952 		goto out;
953 	}
954 
955 	stack_end_addr = thread->stack_info.start + thread->stack_info.size;
956 	if (stack_end_addr < thread->stack_info.start) {
957 		/* Account for wrapping around back to 0. */
958 		stack_end_addr = 0xFFFFFFFFU;
959 	}
960 
961 	/*
962 	 * Restore permissions on the thread's stack area since it is no
963 	 * longer a member of the domain.
964 	 */
965 	ret = mpu_map_region_add(&domain->arch.mpu_map,
966 				 thread->stack_info.start, stack_end_addr,
967 				 XTENSA_MPU_ACCESS_P_RW_U_NA,
968 				 CONFIG_XTENSA_MPU_DEFAULT_MEM_TYPE,
969 				 NULL);
970 
971 	xtensa_mpu_map_write(thread);
972 
973 out:
974 	return ret;
975 }
976 
arch_buffer_validate(const void * addr,size_t size,int write)977 int arch_buffer_validate(const void *addr, size_t size, int write)
978 {
979 	uintptr_t aligned_addr;
980 	size_t aligned_size, addr_offset;
981 	int ret = 0;
982 
983 	/* addr/size arbitrary, fix this up into an aligned region */
984 	aligned_addr = ROUND_DOWN((uintptr_t)addr, XCHAL_MPU_ALIGN);
985 	addr_offset = (uintptr_t)addr - aligned_addr;
986 	aligned_size = ROUND_UP(size + addr_offset, XCHAL_MPU_ALIGN);
987 
988 	for (size_t offset = 0; offset < aligned_size;
989 	     offset += XCHAL_MPU_ALIGN) {
990 		uint32_t probed = xtensa_pptlb_probe(aligned_addr + offset);
991 
992 		if ((probed & XTENSA_MPU_PROBE_VALID_ENTRY_MASK) == 0U) {
993 			/* There is no foreground or background entry associated
994 			 * with the region.
995 			 */
996 			ret = -EPERM;
997 			goto out;
998 		}
999 
1000 		uint8_t access_rights = (probed & XTENSA_MPU_PPTLB_ACCESS_RIGHTS_MASK)
1001 					>> XTENSA_MPU_PPTLB_ACCESS_RIGHTS_SHIFT;
1002 
1003 		if (write) {
1004 			/* Need to check write permission. */
1005 			switch (access_rights) {
1006 			case XTENSA_MPU_ACCESS_P_WO_U_WO:
1007 				__fallthrough;
1008 			case XTENSA_MPU_ACCESS_P_RW_U_RWX:
1009 				__fallthrough;
1010 			case XTENSA_MPU_ACCESS_P_RW_U_RW:
1011 				__fallthrough;
1012 			case XTENSA_MPU_ACCESS_P_RWX_U_RWX:
1013 				/* These permissions are okay. */
1014 				break;
1015 			default:
1016 				ret = -EPERM;
1017 				goto out;
1018 			}
1019 		} else {
1020 			/* Only check read permission. */
1021 			switch (access_rights) {
1022 			case XTENSA_MPU_ACCESS_P_RW_U_RWX:
1023 				__fallthrough;
1024 			case XTENSA_MPU_ACCESS_P_RW_U_RO:
1025 				__fallthrough;
1026 			case XTENSA_MPU_ACCESS_P_RWX_U_RX:
1027 				__fallthrough;
1028 			case XTENSA_MPU_ACCESS_P_RO_U_RO:
1029 				__fallthrough;
1030 			case XTENSA_MPU_ACCESS_P_RX_U_RX:
1031 				__fallthrough;
1032 			case XTENSA_MPU_ACCESS_P_RW_U_RW:
1033 				__fallthrough;
1034 			case XTENSA_MPU_ACCESS_P_RWX_U_RWX:
1035 				/* These permissions are okay. */
1036 				break;
1037 			default:
1038 				ret = -EPERM;
1039 				goto out;
1040 			}
1041 		}
1042 	}
1043 
1044 out:
1045 	return ret;
1046 }
1047 
xtensa_mem_kernel_has_access(void * addr,size_t size,int write)1048 bool xtensa_mem_kernel_has_access(void *addr, size_t size, int write)
1049 {
1050 	uintptr_t aligned_addr;
1051 	size_t aligned_size, addr_offset;
1052 	bool ret = true;
1053 
1054 	/* addr/size arbitrary, fix this up into an aligned region */
1055 	aligned_addr = ROUND_DOWN((uintptr_t)addr, XCHAL_MPU_ALIGN);
1056 	addr_offset = (uintptr_t)addr - aligned_addr;
1057 	aligned_size = ROUND_UP(size + addr_offset, XCHAL_MPU_ALIGN);
1058 
1059 	for (size_t offset = 0; offset < aligned_size;
1060 	     offset += XCHAL_MPU_ALIGN) {
1061 		uint32_t probed = xtensa_pptlb_probe(aligned_addr + offset);
1062 
1063 		if ((probed & XTENSA_MPU_PROBE_VALID_ENTRY_MASK) == 0U) {
1064 			/* There is no foreground or background entry associated
1065 			 * with the region.
1066 			 */
1067 			ret = false;
1068 			goto out;
1069 		}
1070 
1071 		uint8_t access_rights = (probed & XTENSA_MPU_PPTLB_ACCESS_RIGHTS_MASK)
1072 					>> XTENSA_MPU_PPTLB_ACCESS_RIGHTS_SHIFT;
1073 
1074 
1075 		if (write != 0) {
1076 			/* Need to check write permission. */
1077 			switch (access_rights) {
1078 			case XTENSA_MPU_ACCESS_P_RW_U_NA:
1079 				__fallthrough;
1080 			case XTENSA_MPU_ACCESS_P_RWX_U_NA:
1081 				__fallthrough;
1082 			case XTENSA_MPU_ACCESS_P_WO_U_WO:
1083 				__fallthrough;
1084 			case XTENSA_MPU_ACCESS_P_RW_U_RWX:
1085 				__fallthrough;
1086 			case XTENSA_MPU_ACCESS_P_RW_U_RO:
1087 				__fallthrough;
1088 			case XTENSA_MPU_ACCESS_P_RWX_U_RX:
1089 				__fallthrough;
1090 			case XTENSA_MPU_ACCESS_P_RW_U_RW:
1091 				__fallthrough;
1092 			case XTENSA_MPU_ACCESS_P_RWX_U_RWX:
1093 				/* These permissions are okay. */
1094 				break;
1095 			default:
1096 				ret = false;
1097 				goto out;
1098 			}
1099 		} else {
1100 			/* Only check read permission. */
1101 			switch (access_rights) {
1102 			case XTENSA_MPU_ACCESS_P_RO_U_NA:
1103 				__fallthrough;
1104 			case XTENSA_MPU_ACCESS_P_RX_U_NA:
1105 				__fallthrough;
1106 			case XTENSA_MPU_ACCESS_P_RW_U_NA:
1107 				__fallthrough;
1108 			case XTENSA_MPU_ACCESS_P_RWX_U_NA:
1109 				__fallthrough;
1110 			case XTENSA_MPU_ACCESS_P_RW_U_RWX:
1111 				__fallthrough;
1112 			case XTENSA_MPU_ACCESS_P_RW_U_RO:
1113 				__fallthrough;
1114 			case XTENSA_MPU_ACCESS_P_RWX_U_RX:
1115 				__fallthrough;
1116 			case XTENSA_MPU_ACCESS_P_RO_U_RO:
1117 				__fallthrough;
1118 			case XTENSA_MPU_ACCESS_P_RX_U_RX:
1119 				__fallthrough;
1120 			case XTENSA_MPU_ACCESS_P_RW_U_RW:
1121 				__fallthrough;
1122 			case XTENSA_MPU_ACCESS_P_RWX_U_RWX:
1123 				/* These permissions are okay. */
1124 				break;
1125 			default:
1126 				ret = false;
1127 				goto out;
1128 			}
1129 		}
1130 	}
1131 
1132 out:
1133 	return ret;
1134 }
1135 
1136 
xtensa_user_stack_perms(struct k_thread * thread)1137 void xtensa_user_stack_perms(struct k_thread *thread)
1138 {
1139 	int ret;
1140 
1141 	uintptr_t stack_end_addr = thread->stack_info.start + thread->stack_info.size;
1142 
1143 	if (stack_end_addr < thread->stack_info.start) {
1144 		/* Account for wrapping around back to 0. */
1145 		stack_end_addr = 0xFFFFFFFFU;
1146 	}
1147 
1148 	(void)memset((void *)thread->stack_info.start,
1149 		     (IS_ENABLED(CONFIG_INIT_STACKS)) ? 0xAA : 0x00,
1150 		     thread->stack_info.size - thread->stack_info.delta);
1151 
1152 	/* Add stack to new domain's MPU map. */
1153 	ret = mpu_map_region_add(thread->arch.mpu_map,
1154 				 thread->stack_info.start, stack_end_addr,
1155 				 XTENSA_MPU_ACCESS_P_RW_U_RW,
1156 				 CONFIG_XTENSA_MPU_DEFAULT_MEM_TYPE,
1157 				 NULL);
1158 
1159 	xtensa_mpu_map_write(thread);
1160 
1161 	/* Probably this fails due to no more available slots in MPU map. */
1162 	ARG_UNUSED(ret);
1163 	__ASSERT_NO_MSG(ret == 0);
1164 }
1165 
1166 #endif /* CONFIG_USERSPACE */
1167