1 /*
2  * Copyright (c) 2020 BayLibre, SAS
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <kernel.h>
8 #include <kernel_internal.h>
9 #include <sys/__assert.h>
10 #include "core_pmp.h"
11 #include <arch/riscv/csr.h>
12 #include <stdio.h>
13 
14 #define PMP_SLOT_NUMBER	CONFIG_PMP_SLOT
15 
16 #ifdef CONFIG_USERSPACE
17 extern ulong_t is_user_mode;
18 #endif
19 
20 enum {
21 	CSR_PMPCFG0,
22 	CSR_PMPCFG1,
23 	CSR_PMPCFG2,
24 	CSR_PMPCFG3,
25 	CSR_PMPADDR0,
26 	CSR_PMPADDR1,
27 	CSR_PMPADDR2,
28 	CSR_PMPADDR3,
29 	CSR_PMPADDR4,
30 	CSR_PMPADDR5,
31 	CSR_PMPADDR6,
32 	CSR_PMPADDR7,
33 	CSR_PMPADDR8,
34 	CSR_PMPADDR9,
35 	CSR_PMPADDR10,
36 	CSR_PMPADDR11,
37 	CSR_PMPADDR12,
38 	CSR_PMPADDR13,
39 	CSR_PMPADDR14,
40 	CSR_PMPADDR15
41 };
42 
csr_read_enum(int pmp_csr_enum)43 ulong_t csr_read_enum(int pmp_csr_enum)
44 {
45 	ulong_t res = -1;
46 
47 	switch (pmp_csr_enum) {
48 	case CSR_PMPCFG0:
49 		res = csr_read(0x3A0); break;
50 	case CSR_PMPCFG1:
51 		res = csr_read(0x3A1); break;
52 	case CSR_PMPCFG2:
53 		res = csr_read(0x3A2); break;
54 	case CSR_PMPCFG3:
55 		res = csr_read(0x3A3); break;
56 	case CSR_PMPADDR0:
57 		res = csr_read(0x3B0); break;
58 	case CSR_PMPADDR1:
59 		res = csr_read(0x3B1); break;
60 	case CSR_PMPADDR2:
61 		res = csr_read(0x3B2); break;
62 	case CSR_PMPADDR3:
63 		res = csr_read(0x3B3); break;
64 	case CSR_PMPADDR4:
65 		res = csr_read(0x3B4); break;
66 	case CSR_PMPADDR5:
67 		res = csr_read(0x3B5); break;
68 	case CSR_PMPADDR6:
69 		res = csr_read(0x3B6); break;
70 	case CSR_PMPADDR7:
71 		res = csr_read(0x3B7); break;
72 	case CSR_PMPADDR8:
73 		res = csr_read(0x3B8); break;
74 	case CSR_PMPADDR9:
75 		res = csr_read(0x3B9); break;
76 	case CSR_PMPADDR10:
77 		res = csr_read(0x3BA); break;
78 	case CSR_PMPADDR11:
79 		res = csr_read(0x3BB); break;
80 	case CSR_PMPADDR12:
81 		res = csr_read(0x3BC); break;
82 	case CSR_PMPADDR13:
83 		res = csr_read(0x3BD); break;
84 	case CSR_PMPADDR14:
85 		res = csr_read(0x3BE); break;
86 	case CSR_PMPADDR15:
87 		res = csr_read(0x3BF); break;
88 	default:
89 		break;
90 	}
91 	return res;
92 }
93 
csr_write_enum(int pmp_csr_enum,ulong_t value)94 void csr_write_enum(int pmp_csr_enum, ulong_t value)
95 {
96 	switch (pmp_csr_enum) {
97 	case CSR_PMPCFG0:
98 		csr_write(0x3A0, value); break;
99 	case CSR_PMPCFG1:
100 		csr_write(0x3A1, value); break;
101 	case CSR_PMPCFG2:
102 		csr_write(0x3A2, value); break;
103 	case CSR_PMPCFG3:
104 		csr_write(0x3A3, value); break;
105 	case CSR_PMPADDR0:
106 		csr_write(0x3B0, value); break;
107 	case CSR_PMPADDR1:
108 		csr_write(0x3B1, value); break;
109 	case CSR_PMPADDR2:
110 		csr_write(0x3B2, value); break;
111 	case CSR_PMPADDR3:
112 		csr_write(0x3B3, value); break;
113 	case CSR_PMPADDR4:
114 		csr_write(0x3B4, value); break;
115 	case CSR_PMPADDR5:
116 		csr_write(0x3B5, value); break;
117 	case CSR_PMPADDR6:
118 		csr_write(0x3B6, value); break;
119 	case CSR_PMPADDR7:
120 		csr_write(0x3B7, value); break;
121 	case CSR_PMPADDR8:
122 		csr_write(0x3B8, value); break;
123 	case CSR_PMPADDR9:
124 		csr_write(0x3B9, value); break;
125 	case CSR_PMPADDR10:
126 		csr_write(0x3BA, value); break;
127 	case CSR_PMPADDR11:
128 		csr_write(0x3BB, value); break;
129 	case CSR_PMPADDR12:
130 		csr_write(0x3BC, value); break;
131 	case CSR_PMPADDR13:
132 		csr_write(0x3BD, value); break;
133 	case CSR_PMPADDR14:
134 		csr_write(0x3BE, value); break;
135 	case CSR_PMPADDR15:
136 		csr_write(0x3BF, value); break;
137 	default:
138 		break;
139 	}
140 }
141 
z_riscv_pmp_set(unsigned int index,ulong_t cfg_val,ulong_t addr_val)142 int z_riscv_pmp_set(unsigned int index, ulong_t cfg_val, ulong_t addr_val)
143 {
144 	ulong_t reg_val;
145 	ulong_t shift, mask;
146 	int pmpcfg_csr;
147 	int pmpaddr_csr;
148 
149 	if (index >= PMP_SLOT_NUMBER) {
150 		return -1;
151 	}
152 
153 	/* Calculate PMP config/addr register, shift and mask */
154 #ifdef CONFIG_64BIT
155 	pmpcfg_csr = CSR_PMPCFG0 + ((index >> 3) << 1);
156 	shift = (index & 0x7) << 3;
157 #else
158 	pmpcfg_csr = CSR_PMPCFG0 + (index >> 2);
159 	shift = (index & 0x3) << 3;
160 #endif /* CONFIG_64BIT */
161 	pmpaddr_csr = CSR_PMPADDR0 + index;
162 
163 	/* Mask = 0x000000FF<<((index%4)*8) */
164 	mask = 0x000000FF << shift;
165 
166 	cfg_val = cfg_val << shift;
167 	addr_val = TO_PMP_ADDR(addr_val);
168 
169 	reg_val = csr_read_enum(pmpcfg_csr);
170 	reg_val = reg_val & ~mask;
171 	reg_val = reg_val | cfg_val;
172 
173 	csr_write_enum(pmpaddr_csr, addr_val);
174 	csr_write_enum(pmpcfg_csr, reg_val);
175 	return 0;
176 }
177 
pmp_get(unsigned int index,ulong_t * cfg_val,ulong_t * addr_val)178 int pmp_get(unsigned int index, ulong_t *cfg_val, ulong_t *addr_val)
179 {
180 	ulong_t shift;
181 	int pmpcfg_csr;
182 	int pmpaddr_csr;
183 
184 	if (index >= PMP_SLOT_NUMBER) {
185 		return -1;
186 	}
187 
188 	/* Calculate PMP config/addr register and shift */
189 #ifdef CONFIG_64BIT
190 	pmpcfg_csr = CSR_PMPCFG0 + (index >> 4);
191 	shift = (index & 0x0007) << 3;
192 #else
193 	pmpcfg_csr = CSR_PMPCFG0 + (index >> 2);
194 	shift = (index & 0x0003) << 3;
195 #endif /* CONFIG_64BIT */
196 	pmpaddr_csr = CSR_PMPADDR0 + index;
197 
198 	*cfg_val = (csr_read_enum(pmpcfg_csr) >> shift) & 0xFF;
199 	*addr_val = FROM_PMP_ADDR(csr_read_enum(pmpaddr_csr));
200 
201 	return 0;
202 }
203 
z_riscv_pmp_clear_config(void)204 void z_riscv_pmp_clear_config(void)
205 {
206 	for (unsigned int i = 0; i < RISCV_PMP_CFG_NUM; i++)
207 		csr_write_enum(CSR_PMPCFG0 + i, 0);
208 }
209 
210 /* Function to help debug */
z_riscv_pmp_print(unsigned int index)211 void z_riscv_pmp_print(unsigned int index)
212 {
213 	ulong_t cfg_val;
214 	ulong_t addr_val;
215 
216 	if (pmp_get(index, &cfg_val, &addr_val)) {
217 		return;
218 	}
219 #ifdef CONFIG_64BIT
220 	printf("PMP[%d] :\t%02lX %16lX\n", index, cfg_val, addr_val);
221 #else
222 	printf("PMP[%d] :\t%02lX %08lX\n", index, cfg_val, addr_val);
223 #endif /* CONFIG_64BIT */
224 }
225 
226 #if defined(CONFIG_USERSPACE)
227 #include <linker/linker-defs.h>
z_riscv_init_user_accesses(struct k_thread * thread)228 void z_riscv_init_user_accesses(struct k_thread *thread)
229 {
230 	unsigned char index;
231 	unsigned char *uchar_pmpcfg;
232 	ulong_t rom_start = (ulong_t) __rom_region_start;
233 #if defined(CONFIG_PMP_POWER_OF_TWO_ALIGNMENT)
234 	ulong_t rom_size = (ulong_t) __rom_region_size;
235 #else /* CONFIG_PMP_POWER_OF_TWO_ALIGNMENT */
236 	ulong_t rom_end = (ulong_t) __rom_region_end;
237 #endif /* CONFIG_PMP_POWER_OF_TWO_ALIGNMENT */
238 	index = 0U;
239 	uchar_pmpcfg = (unsigned char *) thread->arch.u_pmpcfg;
240 
241 #ifdef CONFIG_PMP_STACK_GUARD
242 	index++;
243 #endif /* CONFIG_PMP_STACK_GUARD */
244 
245 	/* MCU state */
246 	thread->arch.u_pmpaddr[index] = TO_PMP_ADDR((ulong_t) &is_user_mode);
247 	uchar_pmpcfg[index++] = PMP_NA4 | PMP_R;
248 #if defined(CONFIG_PMP_POWER_OF_TWO_ALIGNMENT)
249 	/* Program and RO data */
250 	thread->arch.u_pmpaddr[index] = TO_PMP_NAPOT(rom_start, rom_size);
251 	uchar_pmpcfg[index++] = PMP_NAPOT | PMP_R | PMP_X;
252 
253 	/* RAM */
254 	thread->arch.u_pmpaddr[index] = TO_PMP_NAPOT(thread->stack_info.start,
255 					thread->stack_info.size);
256 
257 	uchar_pmpcfg[index++] = PMP_NAPOT | PMP_R | PMP_W;
258 #else /* CONFIG_PMP_POWER_OF_TWO_ALIGNMENT */
259 	/* Program and RO data */
260 	thread->arch.u_pmpaddr[index] = TO_PMP_ADDR(rom_start);
261 	uchar_pmpcfg[index++] = PMP_NA4 | PMP_R | PMP_X;
262 	thread->arch.u_pmpaddr[index] = TO_PMP_ADDR(rom_end);
263 	uchar_pmpcfg[index++] = PMP_TOR | PMP_R | PMP_X;
264 
265 	/* RAM */
266 	thread->arch.u_pmpaddr[index] = TO_PMP_ADDR(thread->stack_info.start);
267 	uchar_pmpcfg[index++] = PMP_NA4 | PMP_R | PMP_W;
268 	thread->arch.u_pmpaddr[index] = TO_PMP_ADDR(thread->stack_info.start +
269 		thread->stack_info.size);
270 	uchar_pmpcfg[index++] = PMP_TOR | PMP_R | PMP_W;
271 #endif /* CONFIG_PMP_POWER_OF_TWO_ALIGNMENT */
272 }
273 
z_riscv_configure_user_allowed_stack(struct k_thread * thread)274 void z_riscv_configure_user_allowed_stack(struct k_thread *thread)
275 {
276 	unsigned int i;
277 
278 	z_riscv_pmp_clear_config();
279 
280 	for (i = 0U; i < CONFIG_PMP_SLOT; i++)
281 		csr_write_enum(CSR_PMPADDR0 + i, thread->arch.u_pmpaddr[i]);
282 
283 	for (i = 0U; i < RISCV_PMP_CFG_NUM; i++)
284 		csr_write_enum(CSR_PMPCFG0 + i, thread->arch.u_pmpcfg[i]);
285 }
286 
z_riscv_pmp_add_dynamic(struct k_thread * thread,ulong_t addr,ulong_t size,unsigned char flags)287 void z_riscv_pmp_add_dynamic(struct k_thread *thread,
288 			ulong_t addr,
289 			ulong_t size,
290 			unsigned char flags)
291 {
292 	unsigned char index = 0U;
293 	unsigned char *uchar_pmpcfg;
294 
295 	/* Check 4 bytes alignment */
296 	__ASSERT(((addr & 0x3) == 0) && ((size & 0x3) == 0) && size,
297 		 "address/size are not 4 bytes aligned\n");
298 
299 	/* Get next free entry */
300 	uchar_pmpcfg = (unsigned char *) thread->arch.u_pmpcfg;
301 
302 	index = PMP_REGION_NUM_FOR_U_THREAD;
303 
304 	while ((index < CONFIG_PMP_SLOT) && uchar_pmpcfg[index]) {
305 		index++;
306 	}
307 
308 	__ASSERT((index < CONFIG_PMP_SLOT), "no free PMP entry\n");
309 
310 	/* Select the best type */
311 	if (size == 4) {
312 		thread->arch.u_pmpaddr[index] = TO_PMP_ADDR(addr);
313 		uchar_pmpcfg[index] = flags | PMP_NA4;
314 	}
315 #if !defined(CONFIG_PMP_POWER_OF_TWO_ALIGNMENT)
316 	else if ((addr & (size - 1)) || (size & (size - 1))) {
317 		__ASSERT(((index + 1) < CONFIG_PMP_SLOT),
318 			"not enough free PMP entries\n");
319 		thread->arch.u_pmpaddr[index] = TO_PMP_ADDR(addr);
320 		uchar_pmpcfg[index++] = flags | PMP_NA4;
321 		thread->arch.u_pmpaddr[index] = TO_PMP_ADDR(addr + size);
322 		uchar_pmpcfg[index++] = flags | PMP_TOR;
323 	}
324 #endif /* !CONFIG_PMP_POWER_OF_TWO_ALIGNMENT */
325 	else {
326 		thread->arch.u_pmpaddr[index] = TO_PMP_NAPOT(addr, size);
327 		uchar_pmpcfg[index] = flags | PMP_NAPOT;
328 	}
329 }
330 
arch_buffer_validate(void * addr,size_t size,int write)331 int arch_buffer_validate(void *addr, size_t size, int write)
332 {
333 	uint32_t index, i;
334 	ulong_t pmp_type, pmp_addr_start, pmp_addr_stop;
335 	unsigned char *uchar_pmpcfg;
336 	struct k_thread *thread = _current;
337 	ulong_t start = (ulong_t) addr;
338 	ulong_t access_type = PMP_R;
339 	ulong_t napot_mask;
340 #ifdef CONFIG_64BIT
341 	ulong_t max_bit = 64;
342 #else
343 	ulong_t max_bit = 32;
344 #endif /* CONFIG_64BIT */
345 
346 	if (write) {
347 		access_type |= PMP_W;
348 	}
349 
350 	uchar_pmpcfg = (unsigned char *) thread->arch.u_pmpcfg;
351 
352 #ifdef CONFIG_PMP_STACK_GUARD
353 	index = 1U;
354 #else
355 	index = 0U;
356 #endif /* CONFIG_PMP_STACK_GUARD */
357 
358 #if !defined(CONFIG_PMP_POWER_OF_TWO_ALIGNMENT) || defined(CONFIG_PMP_STACK_GUARD)
359 __ASSERT((uchar_pmpcfg[index] & PMP_TYPE_MASK) != PMP_TOR,
360 	"The 1st PMP entry shouldn't configured as TOR");
361 #endif /* CONFIG_PMP_POWER_OF_TWO_ALIGNMENT || CONFIG_PMP_STACK_GUARD */
362 
363 	for (; (index < CONFIG_PMP_SLOT) && uchar_pmpcfg[index]; index++) {
364 		if ((uchar_pmpcfg[index] & access_type) != access_type) {
365 			continue;
366 		}
367 
368 		pmp_type = uchar_pmpcfg[index] & PMP_TYPE_MASK;
369 
370 #if !defined(CONFIG_PMP_POWER_OF_TWO_ALIGNMENT) || defined(CONFIG_PMP_STACK_GUARD)
371 		if (pmp_type == PMP_TOR) {
372 			continue;
373 		}
374 #endif /* CONFIG_PMP_POWER_OF_TWO_ALIGNMENT || CONFIG_PMP_STACK_GUARD */
375 
376 		if (pmp_type == PMP_NA4) {
377 			pmp_addr_start =
378 				FROM_PMP_ADDR(thread->arch.u_pmpaddr[index]);
379 
380 			if ((index == CONFIG_PMP_SLOT - 1)  ||
381 				((uchar_pmpcfg[index + 1U] & PMP_TYPE_MASK)
382 					!= PMP_TOR)) {
383 				pmp_addr_stop = pmp_addr_start + 4;
384 			} else {
385 				pmp_addr_stop = FROM_PMP_ADDR(
386 					thread->arch.u_pmpaddr[index + 1U]);
387 				index++;
388 			}
389 		} else { /* pmp_type == PMP_NAPOT */
390 			for (i = 0U; i < max_bit; i++) {
391 				if (!(thread->arch.u_pmpaddr[index] & (1 << i))) {
392 					break;
393 				}
394 			}
395 
396 			napot_mask = (1 << i) - 1;
397 			pmp_addr_start = FROM_PMP_ADDR(
398 				thread->arch.u_pmpaddr[index] & ~napot_mask);
399 			pmp_addr_stop = pmp_addr_start + (1 << (i + 3));
400 		}
401 
402 		if ((start >= pmp_addr_start) && ((start + size - 1) <
403 			pmp_addr_stop)) {
404 			return 0;
405 		}
406 	}
407 
408 	return 1;
409 }
410 
arch_mem_domain_max_partitions_get(void)411 int arch_mem_domain_max_partitions_get(void)
412 {
413 	return PMP_MAX_DYNAMIC_REGION;
414 }
415 
arch_mem_domain_partition_remove(struct k_mem_domain * domain,uint32_t partition_id)416 void arch_mem_domain_partition_remove(struct k_mem_domain *domain,
417 				       uint32_t  partition_id)
418 {
419 	sys_dnode_t *node, *next_node;
420 	uint32_t index, i, num;
421 	ulong_t pmp_type, pmp_addr;
422 	unsigned char *uchar_pmpcfg;
423 	struct k_thread *thread;
424 	ulong_t size = (ulong_t) domain->partitions[partition_id].size;
425 	ulong_t start = (ulong_t) domain->partitions[partition_id].start;
426 
427 	if (size == 4) {
428 		pmp_type = PMP_NA4;
429 		pmp_addr = TO_PMP_ADDR(start);
430 		num = 1U;
431 	}
432 #if !defined(CONFIG_PMP_POWER_OF_TWO_ALIGNMENT) || defined(CONFIG_PMP_STACK_GUARD)
433 	else if ((start & (size - 1)) || (size & (size - 1))) {
434 		pmp_type = PMP_TOR;
435 		pmp_addr = TO_PMP_ADDR(start + size);
436 		num = 2U;
437 	}
438 #endif /* CONFIG_PMP_POWER_OF_TWO_ALIGNMENT || CONFIG_PMP_STACK_GUARD */
439 	else {
440 		pmp_type = PMP_NAPOT;
441 		pmp_addr = TO_PMP_NAPOT(start, size);
442 		num = 1U;
443 	}
444 
445 	node = sys_dlist_peek_head(&domain->mem_domain_q);
446 	if (!node) {
447 		return;
448 	}
449 
450 	thread = CONTAINER_OF(node, struct k_thread, mem_domain_info);
451 
452 	uchar_pmpcfg = (unsigned char *) thread->arch.u_pmpcfg;
453 	for (index = PMP_REGION_NUM_FOR_U_THREAD;
454 		index < CONFIG_PMP_SLOT;
455 		index++) {
456 		if (((uchar_pmpcfg[index] & PMP_TYPE_MASK) == pmp_type) &&
457 			(pmp_addr == thread->arch.u_pmpaddr[index])) {
458 			break;
459 		}
460 	}
461 
462 	__ASSERT((index < CONFIG_PMP_SLOT), "partition not found\n");
463 
464 #if !defined(CONFIG_PMP_POWER_OF_TWO_ALIGNMENT) || defined(CONFIG_PMP_STACK_GUARD)
465 	if (pmp_type == PMP_TOR) {
466 		index--;
467 	}
468 #endif /* CONFIG_PMP_POWER_OF_TWO_ALIGNMENT || CONFIG_PMP_STACK_GUARD */
469 
470 	SYS_DLIST_FOR_EACH_NODE_SAFE(&domain->mem_domain_q, node, next_node) {
471 		thread = CONTAINER_OF(node, struct k_thread, mem_domain_info);
472 
473 		uchar_pmpcfg = (unsigned char *) thread->arch.u_pmpcfg;
474 
475 		for (i = index + num; i < CONFIG_PMP_SLOT; i++) {
476 			uchar_pmpcfg[i - num] = uchar_pmpcfg[i];
477 			thread->arch.u_pmpaddr[i - num] =
478 				thread->arch.u_pmpaddr[i];
479 		}
480 
481 		uchar_pmpcfg[CONFIG_PMP_SLOT - 1] = 0U;
482 		if (num == 2U) {
483 			uchar_pmpcfg[CONFIG_PMP_SLOT - 2] = 0U;
484 		}
485 	}
486 }
487 
arch_mem_domain_thread_add(struct k_thread * thread)488 void arch_mem_domain_thread_add(struct k_thread *thread)
489 {
490 	struct k_mem_partition *partition;
491 
492 	for (int i = 0, pcount = 0;
493 		pcount < thread->mem_domain_info.mem_domain->num_partitions;
494 		i++) {
495 		partition = &thread->mem_domain_info.mem_domain->partitions[i];
496 		if (partition->size == 0) {
497 			continue;
498 		}
499 		pcount++;
500 
501 		z_riscv_pmp_add_dynamic(thread, (ulong_t) partition->start,
502 			(ulong_t) partition->size, partition->attr.pmp_attr);
503 	}
504 }
505 
arch_mem_domain_partition_add(struct k_mem_domain * domain,uint32_t partition_id)506 void arch_mem_domain_partition_add(struct k_mem_domain *domain,
507 				    uint32_t partition_id)
508 {
509 	sys_dnode_t *node, *next_node;
510 	struct k_thread *thread;
511 	struct k_mem_partition *partition;
512 
513 	partition = &domain->partitions[partition_id];
514 
515 	SYS_DLIST_FOR_EACH_NODE_SAFE(&domain->mem_domain_q, node, next_node) {
516 		thread = CONTAINER_OF(node, struct k_thread, mem_domain_info);
517 
518 		z_riscv_pmp_add_dynamic(thread, (ulong_t) partition->start,
519 			(ulong_t) partition->size, partition->attr.pmp_attr);
520 	}
521 }
522 
arch_mem_domain_thread_remove(struct k_thread * thread)523 void arch_mem_domain_thread_remove(struct k_thread *thread)
524 {
525 	uint32_t i;
526 	unsigned char *uchar_pmpcfg;
527 
528 	uchar_pmpcfg = (unsigned char *) thread->arch.u_pmpcfg;
529 
530 	for (i = PMP_REGION_NUM_FOR_U_THREAD; i < CONFIG_PMP_SLOT; i++) {
531 		uchar_pmpcfg[i] = 0U;
532 	}
533 }
534 
535 #endif /* CONFIG_USERSPACE */
536 
537 #ifdef CONFIG_PMP_STACK_GUARD
538 
z_riscv_init_stack_guard(struct k_thread * thread)539 void z_riscv_init_stack_guard(struct k_thread *thread)
540 {
541 	unsigned char index = 0U;
542 	unsigned char *uchar_pmpcfg;
543 	ulong_t stack_guard_addr;
544 
545 	uchar_pmpcfg = (unsigned char *) thread->arch.s_pmpcfg;
546 
547 	uchar_pmpcfg++;
548 
549 	/* stack guard: None */
550 	thread->arch.s_pmpaddr[index] = TO_PMP_ADDR(thread->stack_info.start);
551 	uchar_pmpcfg[index++] = PMP_NA4;
552 	thread->arch.s_pmpaddr[index] =
553 		TO_PMP_ADDR(thread->stack_info.start +
554 			PMP_GUARD_ALIGN_AND_SIZE);
555 	uchar_pmpcfg[index++] = PMP_TOR;
556 
557 #ifdef CONFIG_USERSPACE
558 	if (thread->arch.priv_stack_start) {
559 #ifdef CONFIG_PMP_POWER_OF_TWO_ALIGNMENT
560 		stack_guard_addr = thread->arch.priv_stack_start;
561 #else
562 		stack_guard_addr = (ulong_t) thread->stack_obj;
563 #endif /* CONFIG_PMP_POWER_OF_TWO_ALIGNMENT */
564 		thread->arch.s_pmpaddr[index] =
565 			TO_PMP_ADDR(stack_guard_addr);
566 		uchar_pmpcfg[index++] = PMP_NA4;
567 		thread->arch.s_pmpaddr[index] =
568 			TO_PMP_ADDR(stack_guard_addr +
569 				PMP_GUARD_ALIGN_AND_SIZE);
570 		uchar_pmpcfg[index++] = PMP_TOR;
571 	}
572 #endif /* CONFIG_USERSPACE */
573 
574 	/* RAM: RW */
575 	thread->arch.s_pmpaddr[index] = TO_PMP_ADDR(CONFIG_SRAM_BASE_ADDRESS |
576 				TO_NAPOT_RANGE(KB(CONFIG_SRAM_SIZE)));
577 	uchar_pmpcfg[index++] = (PMP_NAPOT | PMP_R | PMP_W);
578 
579 	/* All other memory: RWX */
580 #ifdef CONFIG_64BIT
581 	thread->arch.s_pmpaddr[index] = 0x1FFFFFFFFFFFFFFF;
582 #else
583 	thread->arch.s_pmpaddr[index] = 0x1FFFFFFF;
584 #endif /* CONFIG_64BIT */
585 	uchar_pmpcfg[index] = PMP_NAPOT | PMP_R | PMP_W | PMP_X;
586 }
587 
z_riscv_configure_stack_guard(struct k_thread * thread)588 void z_riscv_configure_stack_guard(struct k_thread *thread)
589 {
590 	unsigned int i;
591 
592 	/* Disable PMP for machine mode */
593 	csr_clear(mstatus, MSTATUS_MPRV);
594 
595 	z_riscv_pmp_clear_config();
596 
597 	for (i = 0U; i < PMP_REGION_NUM_FOR_STACK_GUARD; i++)
598 		csr_write_enum(CSR_PMPADDR1 + i, thread->arch.s_pmpaddr[i]);
599 
600 	for (i = 0U; i < PMP_CFG_CSR_NUM_FOR_STACK_GUARD; i++)
601 		csr_write_enum(CSR_PMPCFG0 + i, thread->arch.s_pmpcfg[i]);
602 
603 	/* Enable PMP for machine mode */
604 	csr_set(mstatus, MSTATUS_MPRV);
605 }
606 
z_riscv_configure_interrupt_stack_guard(void)607 void z_riscv_configure_interrupt_stack_guard(void)
608 {
609 	if (PMP_GUARD_ALIGN_AND_SIZE > 4) {
610 		z_riscv_pmp_set(0, PMP_NAPOT | PMP_L,
611 			(ulong_t) z_interrupt_stacks[0] |
612 			TO_NAPOT_RANGE(PMP_GUARD_ALIGN_AND_SIZE));
613 	} else {
614 		z_riscv_pmp_set(0, PMP_NA4 | PMP_L,
615 			(ulong_t) z_interrupt_stacks[0]);
616 	}
617 }
618 #endif /* CONFIG_PMP_STACK_GUARD */
619 
620 #if defined(CONFIG_PMP_STACK_GUARD) || defined(CONFIG_USERSPACE)
621 
z_riscv_pmp_init_thread(struct k_thread * thread)622 void z_riscv_pmp_init_thread(struct k_thread *thread)
623 {
624 	unsigned char i;
625 	ulong_t *pmpcfg;
626 
627 #if defined(CONFIG_PMP_STACK_GUARD)
628 	pmpcfg = thread->arch.s_pmpcfg;
629 	for (i = 0U; i < PMP_CFG_CSR_NUM_FOR_STACK_GUARD; i++)
630 		pmpcfg[i] = 0;
631 #endif /* CONFIG_PMP_STACK_GUARD */
632 
633 #if defined(CONFIG_USERSPACE)
634 	pmpcfg = thread->arch.u_pmpcfg;
635 	for (i = 0U; i < RISCV_PMP_CFG_NUM; i++)
636 		pmpcfg[i] = 0;
637 #endif /* CONFIG_USERSPACE */
638 }
639 #endif /* CONFIG_PMP_STACK_GUARD || CONFIG_USERSPACE */
640