1 /*
2  * Copyright (c) 2022 BayLibre, SAS
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  *
6  * Physical Memory Protection (PMP) is RISC-V parlance for an MPU.
7  *
8  * The PMP is comprized of a number of entries or slots. This number depends
9  * on the hardware design. For each slot there is an address register and
10  * a configuration register. While each address register is matched to an
11  * actual CSR register, configuration registers are small and therefore
12  * several of them are bundled in a few additional CSR registers.
13  *
14  * PMP slot configurations are updated in memory to avoid read-modify-write
15  * cycles on corresponding CSR registers. Relevant CSR registers are always
16  * written in batch from their shadow copy in RAM for better efficiency.
17  *
18  * In the stackguard case we keep an m-mode copy for each thread. Each user
19  * mode threads also has a u-mode copy. This makes faster context switching
20  * as precomputed content just have to be written to actual registers with
21  * no additional processing.
22  *
23  * Thread-specific m-mode and u-mode PMP entries start from the PMP slot
24  * indicated by global_pmp_end_index. Lower slots are used by global entries
25  * which are never modified.
26  */
27 
28 #include <zephyr/kernel.h>
29 #include <kernel_internal.h>
30 #include <zephyr/linker/linker-defs.h>
31 #include <pmp.h>
32 #include <zephyr/arch/arch_interface.h>
33 #include <zephyr/arch/riscv/csr.h>
34 
35 #define LOG_LEVEL CONFIG_MPU_LOG_LEVEL
36 #include <zephyr/logging/log.h>
37 LOG_MODULE_REGISTER(mpu);
38 
39 #define PMP_DEBUG_DUMP 0
40 
41 #ifdef CONFIG_64BIT
42 # define PR_ADDR "0x%016lx"
43 #else
44 # define PR_ADDR "0x%08lx"
45 #endif
46 
47 #define PMP_TOR_SUPPORTED	!IS_ENABLED(CONFIG_PMP_NO_TOR)
48 #define PMP_NA4_SUPPORTED	!IS_ENABLED(CONFIG_PMP_NO_NA4)
49 #define PMP_NAPOT_SUPPORTED	!IS_ENABLED(CONFIG_PMP_NO_NAPOT)
50 
51 #define PMPCFG_STRIDE sizeof(unsigned long)
52 
53 #define PMP_ADDR(addr)			((addr) >> 2)
54 #define NAPOT_RANGE(size)		(((size) - 1) >> 1)
55 #define PMP_ADDR_NAPOT(addr, size)	PMP_ADDR(addr | NAPOT_RANGE(size))
56 
57 #define PMP_NONE 0
58 
print_pmp_entries(unsigned int pmp_start,unsigned int pmp_end,unsigned long * pmp_addr,unsigned long * pmp_cfg,const char * banner)59 static void print_pmp_entries(unsigned int pmp_start, unsigned int pmp_end,
60 			      unsigned long *pmp_addr, unsigned long *pmp_cfg,
61 			      const char *banner)
62 {
63 	uint8_t *pmp_n_cfg = (uint8_t *)pmp_cfg;
64 	unsigned int index;
65 
66 	LOG_DBG("PMP %s:", banner);
67 	for (index = pmp_start; index < pmp_end; index++) {
68 		unsigned long start, end, tmp;
69 
70 		switch (pmp_n_cfg[index] & PMP_A) {
71 		case PMP_TOR:
72 			start = (index == 0) ? 0 : (pmp_addr[index - 1] << 2);
73 			end = (pmp_addr[index] << 2) - 1;
74 			break;
75 		case PMP_NA4:
76 			start = pmp_addr[index] << 2;
77 			end = start + 3;
78 			break;
79 		case PMP_NAPOT:
80 			tmp = (pmp_addr[index] << 2) | 0x3;
81 			start = tmp & (tmp + 1);
82 			end   = tmp | (tmp + 1);
83 			break;
84 		default:
85 			start = 0;
86 			end = 0;
87 			break;
88 		}
89 
90 		if (end == 0) {
91 			LOG_DBG("%3d: "PR_ADDR" 0x%02x", index,
92 				pmp_addr[index],
93 				pmp_n_cfg[index]);
94 		} else {
95 			LOG_DBG("%3d: "PR_ADDR" 0x%02x --> "
96 				PR_ADDR"-"PR_ADDR" %c%c%c%s",
97 				index, pmp_addr[index], pmp_n_cfg[index],
98 				start, end,
99 				(pmp_n_cfg[index] & PMP_R) ? 'R' : '-',
100 				(pmp_n_cfg[index] & PMP_W) ? 'W' : '-',
101 				(pmp_n_cfg[index] & PMP_X) ? 'X' : '-',
102 				(pmp_n_cfg[index] & PMP_L) ? " LOCKED" : "");
103 		}
104 	}
105 }
106 
dump_pmp_regs(const char * banner)107 static void dump_pmp_regs(const char *banner)
108 {
109 	unsigned long pmp_addr[CONFIG_PMP_SLOTS];
110 	unsigned long pmp_cfg[CONFIG_PMP_SLOTS / PMPCFG_STRIDE];
111 
112 #define PMPADDR_READ(x) pmp_addr[x] = csr_read(pmpaddr##x)
113 
114 	FOR_EACH(PMPADDR_READ, (;), 0, 1, 2, 3, 4, 5, 6, 7);
115 #if CONFIG_PMP_SLOTS > 8
116 	FOR_EACH(PMPADDR_READ, (;), 8, 9, 10, 11, 12, 13, 14, 15);
117 #endif
118 
119 #undef PMPADDR_READ
120 
121 #ifdef CONFIG_64BIT
122 	pmp_cfg[0] = csr_read(pmpcfg0);
123 #if CONFIG_PMP_SLOTS > 8
124 	pmp_cfg[1] = csr_read(pmpcfg2);
125 #endif
126 #else
127 	pmp_cfg[0] = csr_read(pmpcfg0);
128 	pmp_cfg[1] = csr_read(pmpcfg1);
129 #if CONFIG_PMP_SLOTS > 8
130 	pmp_cfg[2] = csr_read(pmpcfg2);
131 	pmp_cfg[3] = csr_read(pmpcfg3);
132 #endif
133 #endif
134 
135 	print_pmp_entries(0, CONFIG_PMP_SLOTS, pmp_addr, pmp_cfg, banner);
136 }
137 
138 /**
139  * @brief Set PMP shadow register values in memory
140  *
141  * Register content is built using this function which selects the most
142  * appropriate address matching mode automatically. Note that the special
143  * case start=0 size=0 is valid and means the whole address range.
144  *
145  * @param index_p Location of the current PMP slot index to use. This index
146  *                will be updated according to the number of slots used.
147  * @param perm PMP permission flags
148  * @param start Start address of the memory area to cover
149  * @param size Size of the memory area to cover
150  * @param pmp_addr Array of pmpaddr values (starting at entry 0).
151  * @param pmp_cfg Array of pmpcfg values (starting at entry 0).
152  * @param index_limit Index value representing the size of the provided arrays.
153  * @return true on success, false when out of free PMP slots.
154  */
set_pmp_entry(unsigned int * index_p,uint8_t perm,uintptr_t start,size_t size,unsigned long * pmp_addr,unsigned long * pmp_cfg,unsigned int index_limit)155 static bool set_pmp_entry(unsigned int *index_p, uint8_t perm,
156 			  uintptr_t start, size_t size,
157 			  unsigned long *pmp_addr, unsigned long *pmp_cfg,
158 			  unsigned int index_limit)
159 {
160 	uint8_t *pmp_n_cfg = (uint8_t *)pmp_cfg;
161 	unsigned int index = *index_p;
162 	bool ok = true;
163 
164 	__ASSERT((start & (CONFIG_PMP_GRANULARITY - 1)) == 0, "misaligned start address");
165 	__ASSERT((size & (CONFIG_PMP_GRANULARITY - 1)) == 0, "misaligned size");
166 
167 	if (index >= index_limit) {
168 		LOG_ERR("out of PMP slots");
169 		ok = false;
170 	} else if (PMP_TOR_SUPPORTED &&
171 		   ((index == 0 && start == 0) ||
172 		    (index != 0 && pmp_addr[index - 1] == PMP_ADDR(start)))) {
173 		/* We can use TOR using only one additional slot */
174 		pmp_addr[index] = PMP_ADDR(start + size);
175 		pmp_n_cfg[index] = perm | PMP_TOR;
176 		index += 1;
177 	} else if (PMP_NA4_SUPPORTED && size == 4) {
178 		pmp_addr[index] = PMP_ADDR(start);
179 		pmp_n_cfg[index] = perm | PMP_NA4;
180 		index += 1;
181 	} else if (PMP_NAPOT_SUPPORTED &&
182 		   ((size  & (size - 1)) == 0) /* power of 2 */ &&
183 		   ((start & (size - 1)) == 0) /* naturally aligned */ &&
184 		   (PMP_NA4_SUPPORTED || (size != 4))) {
185 		pmp_addr[index] = PMP_ADDR_NAPOT(start, size);
186 		pmp_n_cfg[index] = perm | PMP_NAPOT;
187 		index += 1;
188 	} else if (PMP_TOR_SUPPORTED && index + 1 >= index_limit) {
189 		LOG_ERR("out of PMP slots");
190 		ok = false;
191 	} else if (PMP_TOR_SUPPORTED) {
192 		pmp_addr[index] = PMP_ADDR(start);
193 		pmp_n_cfg[index] = 0;
194 		index += 1;
195 		pmp_addr[index] = PMP_ADDR(start + size);
196 		pmp_n_cfg[index] = perm | PMP_TOR;
197 		index += 1;
198 	} else {
199 		LOG_ERR("inappropriate PMP range (start=%#lx size=%#zx)", start, size);
200 		ok = false;
201 	}
202 
203 	*index_p = index;
204 	return ok;
205 }
206 
set_pmp_mprv_catchall(unsigned int * index_p,unsigned long * pmp_addr,unsigned long * pmp_cfg,unsigned int index_limit)207 static inline bool set_pmp_mprv_catchall(unsigned int *index_p,
208 					 unsigned long *pmp_addr, unsigned long *pmp_cfg,
209 					 unsigned int index_limit)
210 {
211 	/*
212 	 * We'll be using MPRV. Make a fallback entry with everything
213 	 * accessible as if no PMP entries were matched which is otherwise
214 	 * the default behavior for m-mode without MPRV.
215 	 */
216 	bool ok = set_pmp_entry(index_p, PMP_R | PMP_W | PMP_X,
217 				0, 0, pmp_addr, pmp_cfg, index_limit);
218 
219 #ifdef CONFIG_QEMU_TARGET
220 	if (ok) {
221 		/*
222 		 * Workaround: The above produced 0x1fffffff which is correct.
223 		 * But there is a QEMU bug that prevents it from interpreting
224 		 * this value correctly. Hardcode the special case used by
225 		 * QEMU to bypass this bug for now. The QEMU fix is here:
226 		 * https://lists.gnu.org/archive/html/qemu-devel/2022-04/msg00961.html
227 		 */
228 		pmp_addr[*index_p - 1] = -1L;
229 	}
230 #endif
231 
232 	return ok;
233 }
234 
235 /**
236  * @brief Write a range of PMP entries to corresponding PMP registers
237  *
238  * PMP registers are accessed with the csr instruction which only takes an
239  * immediate value as the actual register. This is performed more efficiently
240  * in assembly code (pmp.S) than what is possible with C code.
241  *
242  * Requirement: start < end && end <= CONFIG_PMP_SLOTS
243  *
244  * @param start Start of the PMP range to be written
245  * @param end End (exclusive) of the PMP range to be written
246  * @param clear_trailing_entries True if trailing entries must be turned off
247  * @param pmp_addr Array of pmpaddr values (starting at entry 0).
248  * @param pmp_cfg Array of pmpcfg values (starting at entry 0).
249  */
250 extern void z_riscv_write_pmp_entries(unsigned int start, unsigned int end,
251 				      bool clear_trailing_entries,
252 				      const unsigned long *pmp_addr,
253 				      const unsigned long *pmp_cfg);
254 
255 /**
256  * @brief Write a range of PMP entries to corresponding PMP registers
257  *
258  * This performs some sanity checks before calling z_riscv_write_pmp_entries().
259  *
260  * @param start Start of the PMP range to be written
261  * @param end End (exclusive) of the PMP range to be written
262  * @param clear_trailing_entries True if trailing entries must be turned off
263  * @param pmp_addr Array of pmpaddr values (starting at entry 0).
264  * @param pmp_cfg Array of pmpcfg values (starting at entry 0).
265  * @param index_limit Index value representing the size of the provided arrays.
266  */
write_pmp_entries(unsigned int start,unsigned int end,bool clear_trailing_entries,unsigned long * pmp_addr,unsigned long * pmp_cfg,unsigned int index_limit)267 static void write_pmp_entries(unsigned int start, unsigned int end,
268 			      bool clear_trailing_entries,
269 			      unsigned long *pmp_addr, unsigned long *pmp_cfg,
270 			      unsigned int index_limit)
271 {
272 	__ASSERT(start < end && end <= index_limit &&
273 		 index_limit <= CONFIG_PMP_SLOTS,
274 		 "bad PMP range (start=%u end=%u)", start, end);
275 
276 	/* Be extra paranoid in case assertions are disabled */
277 	if (start >= end || end > index_limit) {
278 		k_panic();
279 	}
280 
281 	if (clear_trailing_entries) {
282 		/*
283 		 * There are many config entries per pmpcfg register.
284 		 * Make sure to clear trailing garbage in the last
285 		 * register to be written if any. Remaining registers
286 		 * will be cleared in z_riscv_write_pmp_entries().
287 		 */
288 		uint8_t *pmp_n_cfg = (uint8_t *)pmp_cfg;
289 		unsigned int index;
290 
291 		for (index = end; index % PMPCFG_STRIDE != 0; index++) {
292 			pmp_n_cfg[index] = 0;
293 		}
294 	}
295 
296 	print_pmp_entries(start, end, pmp_addr, pmp_cfg, "register write");
297 
298 #ifdef CONFIG_QEMU_TARGET
299 	/*
300 	 * A QEMU bug may create bad transient PMP representations causing
301 	 * false access faults to be reported. Work around it by setting
302 	 * pmp registers to zero from the update start point to the end
303 	 * before updating them with new values.
304 	 * The QEMU fix is here with more details about this bug:
305 	 * https://lists.gnu.org/archive/html/qemu-devel/2022-06/msg02800.html
306 	 */
307 	static const unsigned long pmp_zero[CONFIG_PMP_SLOTS] = { 0, };
308 
309 	z_riscv_write_pmp_entries(start, CONFIG_PMP_SLOTS, false,
310 				  pmp_zero, pmp_zero);
311 #endif
312 
313 	z_riscv_write_pmp_entries(start, end, clear_trailing_entries,
314 				  pmp_addr, pmp_cfg);
315 }
316 
317 /**
318  * @brief Abstract the last 3 arguments to set_pmp_entry() and
319  *        write_pmp_entries( for m-mode.
320  */
321 #define PMP_M_MODE(thread) \
322 	thread->arch.m_mode_pmpaddr_regs, \
323 	thread->arch.m_mode_pmpcfg_regs, \
324 	ARRAY_SIZE(thread->arch.m_mode_pmpaddr_regs)
325 
326 /**
327  * @brief Abstract the last 3 arguments to set_pmp_entry() and
328  *        write_pmp_entries( for u-mode.
329  */
330 #define PMP_U_MODE(thread) \
331 	thread->arch.u_mode_pmpaddr_regs, \
332 	thread->arch.u_mode_pmpcfg_regs, \
333 	ARRAY_SIZE(thread->arch.u_mode_pmpaddr_regs)
334 
335 /*
336  * This is used to seed thread PMP copies with global m-mode cfg entries
337  * sharing the same cfg register. Locked entries aren't modifiable but
338  * we could have non-locked entries here too.
339  */
340 static unsigned long global_pmp_cfg[1];
341 static unsigned long global_pmp_last_addr;
342 
343 /* End of global PMP entry range */
344 static unsigned int global_pmp_end_index;
345 
346 /**
347  * @Brief Initialize the PMP with global entries on each CPU
348  */
z_riscv_pmp_init(void)349 void z_riscv_pmp_init(void)
350 {
351 	unsigned long pmp_addr[CONFIG_PMP_SLOTS];
352 	unsigned long pmp_cfg[CONFIG_PMP_SLOTS / PMPCFG_STRIDE];
353 	unsigned int index = 0;
354 
355 	/* The read-only area is always there for every mode */
356 	set_pmp_entry(&index, PMP_R | PMP_X | PMP_L,
357 		      (uintptr_t)__rom_region_start,
358 		      (size_t)__rom_region_size,
359 		      pmp_addr, pmp_cfg, ARRAY_SIZE(pmp_addr));
360 
361 #ifdef CONFIG_NULL_POINTER_EXCEPTION_DETECTION_PMP
362 	/*
363 	 * Use a PMP slot to make region (starting at address 0x0) inaccessible
364 	 * for detecting null pointer dereferencing.
365 	 */
366 	set_pmp_entry(&index, PMP_NONE | PMP_L,
367 		      0,
368 		      CONFIG_NULL_POINTER_EXCEPTION_REGION_SIZE,
369 		      pmp_addr, pmp_cfg, ARRAY_SIZE(pmp_addr));
370 #endif
371 
372 #ifdef CONFIG_PMP_STACK_GUARD
373 #ifdef CONFIG_MULTITHREADING
374 	/*
375 	 * Set the stack guard for this CPU's IRQ stack by making the bottom
376 	 * addresses inaccessible. This will never change so we do it here
377 	 * and lock it too.
378 	 */
379 	set_pmp_entry(&index, PMP_NONE | PMP_L,
380 		      (uintptr_t)z_interrupt_stacks[_current_cpu->id],
381 		      Z_RISCV_STACK_GUARD_SIZE,
382 		      pmp_addr, pmp_cfg, ARRAY_SIZE(pmp_addr));
383 
384 	/*
385 	 * This early, the kernel init code uses the IRQ stack and we want to
386 	 * safeguard it as soon as possible. But we need a temporary default
387 	 * "catch all" PMP entry for MPRV to work. Later on, this entry will
388 	 * be set for each thread by z_riscv_pmp_stackguard_prepare().
389 	 */
390 	set_pmp_mprv_catchall(&index, pmp_addr, pmp_cfg, ARRAY_SIZE(pmp_addr));
391 
392 	 /* Write those entries to PMP regs. */
393 	write_pmp_entries(0, index, true, pmp_addr, pmp_cfg, ARRAY_SIZE(pmp_addr));
394 
395 	/* Activate our non-locked PMP entries for m-mode */
396 	csr_set(mstatus, MSTATUS_MPRV);
397 
398 	/* And forget about that last entry as we won't need it later */
399 	index--;
400 #else
401 	/* Without multithreading setup stack guards for IRQ and main stacks */
402 	set_pmp_entry(&index, PMP_NONE | PMP_L,
403 		      (uintptr_t)z_interrupt_stacks,
404 		      Z_RISCV_STACK_GUARD_SIZE,
405 		      pmp_addr, pmp_cfg, ARRAY_SIZE(pmp_addr));
406 
407 	set_pmp_entry(&index, PMP_NONE | PMP_L,
408 		      (uintptr_t)z_main_stack,
409 		      Z_RISCV_STACK_GUARD_SIZE,
410 		      pmp_addr, pmp_cfg, ARRAY_SIZE(pmp_addr));
411 
412 	/* Write those entries to PMP regs. */
413 	write_pmp_entries(0, index, true, pmp_addr, pmp_cfg, ARRAY_SIZE(pmp_addr));
414 #endif /* CONFIG_MULTITHREADING */
415 #else
416 	 /* Write those entries to PMP regs. */
417 	write_pmp_entries(0, index, true, pmp_addr, pmp_cfg, ARRAY_SIZE(pmp_addr));
418 #endif
419 
420 #ifdef CONFIG_SMP
421 #ifdef CONFIG_PMP_STACK_GUARD
422 	/*
423 	 * The IRQ stack guard area is different for each CPU.
424 	 * Make sure TOR entry sharing won't be attempted with it by
425 	 * remembering a bogus address for those entries.
426 	 */
427 	pmp_addr[index - 1] = -1L;
428 #endif
429 
430 	/* Make sure secondary CPUs produced the same values */
431 	if (global_pmp_end_index != 0) {
432 		__ASSERT(global_pmp_end_index == index, "");
433 		__ASSERT(global_pmp_cfg[0] == pmp_cfg[0], "");
434 		__ASSERT(global_pmp_last_addr == pmp_addr[index - 1], "");
435 	}
436 #endif
437 
438 	global_pmp_cfg[0] = pmp_cfg[0];
439 	global_pmp_last_addr = pmp_addr[index - 1];
440 	global_pmp_end_index = index;
441 
442 	if (PMP_DEBUG_DUMP) {
443 		dump_pmp_regs("initial register dump");
444 	}
445 }
446 
447 /**
448  * @Brief Initialize the per-thread PMP register copy with global values.
449  */
z_riscv_pmp_thread_init(unsigned long * pmp_addr,unsigned long * pmp_cfg,unsigned int index_limit)450 static inline unsigned int z_riscv_pmp_thread_init(unsigned long *pmp_addr,
451 						   unsigned long *pmp_cfg,
452 						   unsigned int index_limit)
453 {
454 	ARG_UNUSED(index_limit);
455 
456 	/*
457 	 * Retrieve pmpcfg0 partial content from global entries.
458 	 */
459 	pmp_cfg[0] = global_pmp_cfg[0];
460 
461 	/*
462 	 * Retrieve the pmpaddr value matching the last global PMP slot.
463 	 * This is so that set_pmp_entry() can safely attempt TOR with it.
464 	 */
465 	pmp_addr[global_pmp_end_index - 1] = global_pmp_last_addr;
466 
467 	return global_pmp_end_index;
468 }
469 
470 #ifdef CONFIG_PMP_STACK_GUARD
471 
472 #ifdef CONFIG_MULTITHREADING
473 /**
474  * @brief Prepare the PMP stackguard content for given thread.
475  *
476  * This is called once during new thread creation.
477  */
z_riscv_pmp_stackguard_prepare(struct k_thread * thread)478 void z_riscv_pmp_stackguard_prepare(struct k_thread *thread)
479 {
480 	unsigned int index = z_riscv_pmp_thread_init(PMP_M_MODE(thread));
481 	uintptr_t stack_bottom;
482 
483 	/* make the bottom addresses of our stack inaccessible */
484 	stack_bottom = thread->stack_info.start - K_KERNEL_STACK_RESERVED;
485 #ifdef CONFIG_USERSPACE
486 	if (thread->arch.priv_stack_start != 0) {
487 		stack_bottom = thread->arch.priv_stack_start;
488 	} else if (z_stack_is_user_capable(thread->stack_obj)) {
489 		stack_bottom = thread->stack_info.start - K_THREAD_STACK_RESERVED;
490 	}
491 #endif
492 	set_pmp_entry(&index, PMP_NONE,
493 		      stack_bottom, Z_RISCV_STACK_GUARD_SIZE,
494 		      PMP_M_MODE(thread));
495 	set_pmp_mprv_catchall(&index, PMP_M_MODE(thread));
496 
497 	/* remember how many entries we use */
498 	thread->arch.m_mode_pmp_end_index = index;
499 }
500 
501 /**
502  * @brief Write PMP stackguard content to actual PMP registers
503  *
504  * This is called on every context switch.
505  */
z_riscv_pmp_stackguard_enable(struct k_thread * thread)506 void z_riscv_pmp_stackguard_enable(struct k_thread *thread)
507 {
508 	LOG_DBG("pmp_stackguard_enable for thread %p", thread);
509 
510 	/*
511 	 * Disable (non-locked) PMP entries for m-mode while we update them.
512 	 * While at it, also clear MSTATUS_MPP as it must be cleared for
513 	 * MSTATUS_MPRV to be effective later.
514 	 */
515 	csr_clear(mstatus, MSTATUS_MPRV | MSTATUS_MPP);
516 
517 	/* Write our m-mode MPP entries */
518 	write_pmp_entries(global_pmp_end_index, thread->arch.m_mode_pmp_end_index,
519 			  false /* no need to clear to the end */,
520 			  PMP_M_MODE(thread));
521 
522 	if (PMP_DEBUG_DUMP) {
523 		dump_pmp_regs("m-mode register dump");
524 	}
525 
526 	/* Activate our non-locked PMP entries in m-mode */
527 	csr_set(mstatus, MSTATUS_MPRV);
528 }
529 
530 #endif /* CONFIG_MULTITHREADING */
531 
532 /**
533  * @brief Remove PMP stackguard content to actual PMP registers
534  */
z_riscv_pmp_stackguard_disable(void)535 void z_riscv_pmp_stackguard_disable(void)
536 {
537 
538 	unsigned long pmp_addr[PMP_M_MODE_SLOTS];
539 	unsigned long pmp_cfg[PMP_M_MODE_SLOTS / sizeof(unsigned long)];
540 	unsigned int index = global_pmp_end_index;
541 
542 	/* Retrieve the pmpaddr value matching the last global PMP slot. */
543 	pmp_addr[global_pmp_end_index - 1] = global_pmp_last_addr;
544 
545 	/* Disable (non-locked) PMP entries for m-mode while we update them. */
546 	csr_clear(mstatus, MSTATUS_MPRV);
547 
548 	/*
549 	 * Set a temporary default "catch all" PMP entry for MPRV to work,
550 	 * except for the global locked entries.
551 	 */
552 	set_pmp_mprv_catchall(&index, pmp_addr, pmp_cfg, ARRAY_SIZE(pmp_addr));
553 
554 	/* Write "catch all" entry and clear unlocked entries to PMP regs. */
555 	write_pmp_entries(global_pmp_end_index, index,
556 			  true, pmp_addr, pmp_cfg, ARRAY_SIZE(pmp_addr));
557 
558 	if (PMP_DEBUG_DUMP) {
559 		dump_pmp_regs("catch all register dump");
560 	}
561 }
562 
563 #endif /* CONFIG_PMP_STACK_GUARD */
564 
565 #ifdef CONFIG_USERSPACE
566 
567 /**
568  * @brief Initialize the usermode portion of the PMP configuration.
569  *
570  * This is called once during new thread creation.
571  */
z_riscv_pmp_usermode_init(struct k_thread * thread)572 void z_riscv_pmp_usermode_init(struct k_thread *thread)
573 {
574 	/* Only indicate that the u-mode PMP is not prepared yet */
575 	thread->arch.u_mode_pmp_end_index = 0;
576 }
577 
578 /**
579  * @brief Prepare the u-mode PMP content for given thread.
580  *
581  * This is called once before making the transition to usermode.
582  */
z_riscv_pmp_usermode_prepare(struct k_thread * thread)583 void z_riscv_pmp_usermode_prepare(struct k_thread *thread)
584 {
585 	unsigned int index = z_riscv_pmp_thread_init(PMP_U_MODE(thread));
586 
587 	LOG_DBG("pmp_usermode_prepare for thread %p", thread);
588 
589 	/* Map the usermode stack */
590 	set_pmp_entry(&index, PMP_R | PMP_W,
591 		      thread->stack_info.start, thread->stack_info.size,
592 		      PMP_U_MODE(thread));
593 
594 	thread->arch.u_mode_pmp_domain_offset = index;
595 	thread->arch.u_mode_pmp_end_index = index;
596 	thread->arch.u_mode_pmp_update_nr = 0;
597 }
598 
599 /**
600  * @brief Convert partition information into PMP entries
601  */
resync_pmp_domain(struct k_thread * thread,struct k_mem_domain * domain)602 static void resync_pmp_domain(struct k_thread *thread,
603 			      struct k_mem_domain *domain)
604 {
605 	unsigned int index = thread->arch.u_mode_pmp_domain_offset;
606 	int p_idx, remaining_partitions;
607 	bool ok;
608 
609 	k_spinlock_key_t key = k_spin_lock(&z_mem_domain_lock);
610 
611 	remaining_partitions = domain->num_partitions;
612 	for (p_idx = 0; remaining_partitions > 0; p_idx++) {
613 		struct k_mem_partition *part = &domain->partitions[p_idx];
614 
615 		if (part->size == 0) {
616 			/* skip empty partition */
617 			continue;
618 		}
619 
620 		remaining_partitions--;
621 
622 		if (part->size < 4) {
623 			/* * 4 bytes is the minimum we can map */
624 			LOG_ERR("non-empty partition too small");
625 			__ASSERT(false, "");
626 			continue;
627 		}
628 
629 		ok = set_pmp_entry(&index, part->attr.pmp_attr,
630 				   part->start, part->size, PMP_U_MODE(thread));
631 		__ASSERT(ok,
632 			 "no PMP slot left for %d remaining partitions in domain %p",
633 			 remaining_partitions + 1, domain);
634 	}
635 
636 	thread->arch.u_mode_pmp_end_index = index;
637 	thread->arch.u_mode_pmp_update_nr = domain->arch.pmp_update_nr;
638 
639 	k_spin_unlock(&z_mem_domain_lock, key);
640 }
641 
642 /**
643  * @brief Write PMP usermode content to actual PMP registers
644  *
645  * This is called on every context switch.
646  */
z_riscv_pmp_usermode_enable(struct k_thread * thread)647 void z_riscv_pmp_usermode_enable(struct k_thread *thread)
648 {
649 	struct k_mem_domain *domain = thread->mem_domain_info.mem_domain;
650 
651 	LOG_DBG("pmp_usermode_enable for thread %p with domain %p", thread, domain);
652 
653 	if (thread->arch.u_mode_pmp_end_index == 0) {
654 		/* z_riscv_pmp_usermode_prepare() has not been called yet */
655 		return;
656 	}
657 
658 	if (thread->arch.u_mode_pmp_update_nr != domain->arch.pmp_update_nr) {
659 		/*
660 		 * Resynchronize our PMP entries with
661 		 * the latest domain partition information.
662 		 */
663 		resync_pmp_domain(thread, domain);
664 	}
665 
666 #ifdef CONFIG_PMP_STACK_GUARD
667 	/* Make sure m-mode PMP usage is disabled before we reprogram it */
668 	csr_clear(mstatus, MSTATUS_MPRV);
669 #endif
670 
671 	/* Write our u-mode MPP entries */
672 	write_pmp_entries(global_pmp_end_index, thread->arch.u_mode_pmp_end_index,
673 			  true /* must clear to the end */,
674 			  PMP_U_MODE(thread));
675 
676 	if (PMP_DEBUG_DUMP) {
677 		dump_pmp_regs("u-mode register dump");
678 	}
679 }
680 
arch_mem_domain_max_partitions_get(void)681 int arch_mem_domain_max_partitions_get(void)
682 {
683 	int available_pmp_slots = CONFIG_PMP_SLOTS;
684 
685 	/* remove those slots dedicated to global entries */
686 	available_pmp_slots -= global_pmp_end_index;
687 
688 	/*
689 	 * User thread stack mapping:
690 	 * 1 slot if CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT=y,
691 	 * most likely 2 slots otherwise.
692 	 */
693 	available_pmp_slots -=
694 		IS_ENABLED(CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT) ? 1 : 2;
695 
696 	/*
697 	 * Each partition may require either 1 or 2 PMP slots depending
698 	 * on a couple factors that are not known in advance. Even when
699 	 * arch_mem_domain_partition_add() is called, we can't tell if a
700 	 * given partition will fit in the remaining PMP slots of an
701 	 * affected thread if it hasn't executed in usermode yet.
702 	 *
703 	 * Give the most optimistic answer here (which should be pretty
704 	 * accurate if CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT=y) and be
705 	 * prepared to deny availability in resync_pmp_domain() if this
706 	 * estimate was too high.
707 	 */
708 	return available_pmp_slots;
709 }
710 
arch_mem_domain_init(struct k_mem_domain * domain)711 int arch_mem_domain_init(struct k_mem_domain *domain)
712 {
713 	domain->arch.pmp_update_nr = 0;
714 	return 0;
715 }
716 
arch_mem_domain_partition_add(struct k_mem_domain * domain,uint32_t partition_id)717 int arch_mem_domain_partition_add(struct k_mem_domain *domain,
718 				  uint32_t partition_id)
719 {
720 	/* Force resynchronization for every thread using this domain */
721 	domain->arch.pmp_update_nr += 1;
722 	return 0;
723 }
724 
arch_mem_domain_partition_remove(struct k_mem_domain * domain,uint32_t partition_id)725 int arch_mem_domain_partition_remove(struct k_mem_domain *domain,
726 				     uint32_t partition_id)
727 {
728 	/* Force resynchronization for every thread using this domain */
729 	domain->arch.pmp_update_nr += 1;
730 	return 0;
731 }
732 
arch_mem_domain_thread_add(struct k_thread * thread)733 int arch_mem_domain_thread_add(struct k_thread *thread)
734 {
735 	/* Force resynchronization for this thread */
736 	thread->arch.u_mode_pmp_update_nr = 0;
737 	return 0;
738 }
739 
arch_mem_domain_thread_remove(struct k_thread * thread)740 int arch_mem_domain_thread_remove(struct k_thread *thread)
741 {
742 	return 0;
743 }
744 
745 #define IS_WITHIN(inner_start, inner_size, outer_start, outer_size) \
746 	((inner_start) >= (outer_start) && (inner_size) <= (outer_size) && \
747 	 ((inner_start) - (outer_start)) <= ((outer_size) - (inner_size)))
748 
arch_buffer_validate(const void * addr,size_t size,int write)749 int arch_buffer_validate(const void *addr, size_t size, int write)
750 {
751 	uintptr_t start = (uintptr_t)addr;
752 	int ret = -1;
753 
754 	/* Check if this is on the stack */
755 	if (IS_WITHIN(start, size, arch_current_thread()->stack_info.start,
756 		      arch_current_thread()->stack_info.size)) {
757 		return 0;
758 	}
759 
760 	/* Check if this is within the global read-only area */
761 	if (!write) {
762 		uintptr_t ro_start = (uintptr_t)__rom_region_start;
763 		size_t ro_size = (size_t)__rom_region_size;
764 
765 		if (IS_WITHIN(start, size, ro_start, ro_size)) {
766 			return 0;
767 		}
768 	}
769 
770 	/* Look for a matching partition in our memory domain */
771 	struct k_mem_domain *domain = arch_current_thread()->mem_domain_info.mem_domain;
772 	int p_idx, remaining_partitions;
773 	k_spinlock_key_t key = k_spin_lock(&z_mem_domain_lock);
774 
775 	remaining_partitions = domain->num_partitions;
776 	for (p_idx = 0; remaining_partitions > 0; p_idx++) {
777 		struct k_mem_partition *part = &domain->partitions[p_idx];
778 
779 		if (part->size == 0) {
780 			/* unused partition */
781 			continue;
782 		}
783 
784 		remaining_partitions--;
785 
786 		if (!IS_WITHIN(start, size, part->start, part->size)) {
787 			/* unmatched partition */
788 			continue;
789 		}
790 
791 		/* partition matched: determine access result */
792 		if ((part->attr.pmp_attr & (write ? PMP_W : PMP_R)) != 0) {
793 			ret = 0;
794 		}
795 		break;
796 	}
797 
798 	k_spin_unlock(&z_mem_domain_lock, key);
799 	return ret;
800 }
801 
802 #endif /* CONFIG_USERSPACE */
803