1 /*
2  * Copyright (c) 2016 Jean-Paul Etienne <fractalclone@gmail.com>
3  * Contributors: 2018 Antmicro <www.antmicro.com>
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 /**
9  * @file
10  * @brief RISCV specific kernel interface header
11  * This header contains the RISCV specific kernel interface.  It is
12  * included by the generic kernel interface header (arch/cpu.h)
13  */
14 
15 #ifndef ZEPHYR_INCLUDE_ARCH_RISCV_ARCH_H_
16 #define ZEPHYR_INCLUDE_ARCH_RISCV_ARCH_H_
17 
18 #include <arch/riscv/thread.h>
19 #include <arch/riscv/exp.h>
20 #include <arch/common/sys_bitops.h>
21 #include <arch/common/sys_io.h>
22 #include <arch/common/ffs.h>
23 #if defined(CONFIG_USERSPACE)
24 #include <arch/riscv/syscall.h>
25 #endif /* CONFIG_USERSPACE */
26 #include <irq.h>
27 #include <sw_isr_table.h>
28 #include <soc.h>
29 #include <devicetree.h>
30 #include <arch/riscv/csr.h>
31 
32 /* stacks, for RISCV architecture stack should be 16byte-aligned */
33 #define ARCH_STACK_PTR_ALIGN  16
34 
35 #ifdef CONFIG_PMP_STACK_GUARD
36 #define Z_RISCV_PMP_ALIGN CONFIG_PMP_STACK_GUARD_MIN_SIZE
37 #define Z_RISCV_STACK_GUARD_SIZE  Z_RISCV_PMP_ALIGN
38 #else
39 #define Z_RISCV_PMP_ALIGN 4
40 #define Z_RISCV_STACK_GUARD_SIZE  0
41 #endif
42 
43 /* Kernel-only stacks have the following layout if a stack guard is enabled:
44  *
45  * +------------+ <- thread.stack_obj
46  * | Guard      | } Z_RISCV_STACK_GUARD_SIZE
47  * +------------+ <- thread.stack_info.start
48  * | Kernel     |
49  * | stack      |
50  * |            |
51  * +............|
52  * | TLS        | } thread.stack_info.delta
53  * +------------+ <- thread.stack_info.start + thread.stack_info.size
54  */
55 #ifdef CONFIG_PMP_STACK_GUARD
56 #define ARCH_KERNEL_STACK_RESERVED	Z_RISCV_STACK_GUARD_SIZE
57 #define ARCH_KERNEL_STACK_OBJ_ALIGN \
58 		MAX(Z_RISCV_PMP_ALIGN, ARCH_STACK_PTR_ALIGN)
59 #endif
60 
61 #ifdef CONFIG_USERSPACE
62 /* Any thread running In user mode will have full access to the region denoted
63  * by thread.stack_info.
64  *
65  * Thread-local storage is at the very highest memory locations of this area.
66  * Memory for TLS and any initial random stack pointer offset is captured
67  * in thread.stack_info.delta.
68  */
69 #ifdef CONFIG_PMP_STACK_GUARD
70 #ifdef CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT
71 /* Use defaults for everything. The privilege elevation stack is located
72  * in another area of memory generated at build time by gen_kobject_list.py
73  *
74  * +------------+ <- thread.arch.priv_stack_start
75  * | Guard      | } Z_RISCV_STACK_GUARD_SIZE
76  * +------------+
77  * | Priv Stack | } CONFIG_PRIVILEGED_STACK_SIZE - Z_RISCV_STACK_GUARD_SIZE
78  * +------------+ <- thread.arch.priv_stack_start +
79  *                   CONFIG_PRIVILEGED_STACK_SIZE
80  *
81  * +------------+ <- thread.stack_obj = thread.stack_info.start
82  * | Thread     |
83  * | stack      |
84  * |            |
85  * +............|
86  * | TLS        | } thread.stack_info.delta
87  * +------------+ <- thread.stack_info.start + thread.stack_info.size
88  */
89 #define ARCH_THREAD_STACK_SIZE_ADJUST(size) \
90 		Z_POW2_CEIL(ROUND_UP((size), Z_RISCV_PMP_ALIGN))
91 #define ARCH_THREAD_STACK_OBJ_ALIGN(size) \
92 		ARCH_THREAD_STACK_SIZE_ADJUST(size)
93 #define ARCH_THREAD_STACK_RESERVED		0
94 #else /* !CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT */
95 /* The stack object will contain the PMP guard, the privilege stack, and then
96  * the stack buffer in that order:
97  *
98  * +------------+ <- thread.stack_obj
99  * | Guard      | } Z_RISCV_STACK_GUARD_SIZE
100  * +------------+ <- thread.arch.priv_stack_start
101  * | Priv Stack | } CONFIG_PRIVILEGED_STACK_SIZE
102  * +------------+ <- thread.stack_info.start
103  * | Thread     |
104  * | stack      |
105  * |            |
106  * +............|
107  * | TLS        | } thread.stack_info.delta
108  * +------------+ <- thread.stack_info.start + thread.stack_info.size
109  */
110 #define ARCH_THREAD_STACK_RESERVED	(Z_RISCV_STACK_GUARD_SIZE + \
111 					 CONFIG_PRIVILEGED_STACK_SIZE)
112 #define ARCH_THREAD_STACK_OBJ_ALIGN(size)	Z_RISCV_PMP_ALIGN
113 /* We need to be able to exactly cover the stack buffer with an PMP region,
114  * so round its size up to the required granularity of the PMP
115  */
116 #define ARCH_THREAD_STACK_SIZE_ADJUST(size) \
117 		(ROUND_UP((size), Z_RISCV_PMP_ALIGN))
118 
119 #endif /* CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT */
120 #else /* !CONFIG_PMP_STACK_GUARD */
121 #ifdef CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT
122 /* Use defaults for everything. The privilege elevation stack is located
123  * in another area of memory generated at build time by gen_kobject_list.py
124  *
125  * +------------+ <- thread.arch.priv_stack_start
126  * | Priv Stack | } Z_KERNEL_STACK_LEN(CONFIG_PRIVILEGED_STACK_SIZE)
127  * +------------+
128  *
129  * +------------+ <- thread.stack_obj = thread.stack_info.start
130  * | Thread     |
131  * | stack      |
132  * |            |
133  * +............|
134  * | TLS        | } thread.stack_info.delta
135  * +------------+ <- thread.stack_info.start + thread.stack_info.size
136  */
137 #define ARCH_THREAD_STACK_SIZE_ADJUST(size) \
138 		Z_POW2_CEIL(ROUND_UP((size), Z_RISCV_PMP_ALIGN))
139 #define ARCH_THREAD_STACK_OBJ_ALIGN(size) \
140 		ARCH_THREAD_STACK_SIZE_ADJUST(size)
141 #define ARCH_THREAD_STACK_RESERVED		0
142 #else /* !CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT */
143 /* Userspace enabled, but supervisor stack guards are not in use */
144 
145 /* Reserved area of the thread object just contains the privilege stack:
146  *
147  * +------------+ <- thread.stack_obj = thread.arch.priv_stack_start
148  * | Priv Stack | } CONFIG_PRIVILEGED_STACK_SIZE
149  * +------------+ <- thread.stack_info.start
150  * | Thread     |
151  * | stack      |
152  * |            |
153  * +............|
154  * | TLS        | } thread.stack_info.delta
155  * +------------+ <- thread.stack_info.start + thread.stack_info.size
156  */
157 #define ARCH_THREAD_STACK_RESERVED		CONFIG_PRIVILEGED_STACK_SIZE
158 #define ARCH_THREAD_STACK_SIZE_ADJUST(size) \
159 		(ROUND_UP((size), Z_RISCV_PMP_ALIGN))
160 #define ARCH_THREAD_STACK_OBJ_ALIGN(size)	Z_RISCV_PMP_ALIGN
161 
162 #endif /* CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT */
163 #endif /* CONFIG_PMP_STACK_GUARD */
164 
165 #else /* !CONFIG_USERSPACE */
166 
167 #ifdef CONFIG_PMP_STACK_GUARD
168 /* Reserve some memory for the stack guard.
169  * This is just a minimally-sized region at the beginning of the stack
170  * object, which is programmed to produce an exception if written to.
171  *
172  * +------------+ <- thread.stack_obj
173  * | Guard      | } Z_RISCV_STACK_GUARD_SIZE
174  * +------------+ <- thread.stack_info.start
175  * | Thread     |
176  * | stack      |
177  * |            |
178  * +............|
179  * | TLS        | } thread.stack_info.delta
180  * +------------+ <- thread.stack_info.start + thread.stack_info.size
181  */
182 #define ARCH_THREAD_STACK_RESERVED		Z_RISCV_STACK_GUARD_SIZE
183 #define ARCH_THREAD_STACK_OBJ_ALIGN(size)	Z_RISCV_PMP_ALIGN
184 /* Default for ARCH_THREAD_STACK_SIZE_ADJUST */
185 #else /* !CONFIG_PMP_STACK_GUARD */
186 /* No stack guard, no userspace, Use defaults for everything. */
187 #endif /* CONFIG_PMP_STACK_GUARD */
188 #endif /* CONFIG_USERSPACE */
189 
190 #ifdef CONFIG_64BIT
191 #define RV_OP_LOADREG ld
192 #define RV_OP_STOREREG sd
193 #define RV_REGSIZE 8
194 #define RV_REGSHIFT 3
195 #else
196 #define RV_OP_LOADREG lw
197 #define RV_OP_STOREREG sw
198 #define RV_REGSIZE 4
199 #define RV_REGSHIFT 2
200 #endif
201 
202 #ifdef CONFIG_CPU_HAS_FPU_DOUBLE_PRECISION
203 #define RV_OP_LOADFPREG fld
204 #define RV_OP_STOREFPREG fsd
205 #else
206 #define RV_OP_LOADFPREG flw
207 #define RV_OP_STOREFPREG fsw
208 #endif
209 
210 /* Common mstatus bits. All supported cores today have the same
211  * layouts.
212  */
213 
214 #define MSTATUS_IEN     (1UL << 3)
215 #define MSTATUS_MPP_M   (3UL << 11)
216 #define MSTATUS_MPIE_EN (1UL << 7)
217 #define MSTATUS_FS_INIT (1UL << 13)
218 #define MSTATUS_FS_MASK ((1UL << 13) | (1UL << 14))
219 
220 
221 /* This comes from openisa_rv32m1, but doesn't seem to hurt on other
222  * platforms:
223  * - Preserve machine privileges in MPP. If you see any documentation
224  *   telling you that MPP is read-only on this SoC, don't believe its
225  *   lies.
226  * - Enable interrupts when exiting from exception into a new thread
227  *   by setting MPIE now, so it will be copied into IE on mret.
228  */
229 #define MSTATUS_DEF_RESTORE (MSTATUS_MPP_M | MSTATUS_MPIE_EN)
230 
231 #ifndef _ASMLANGUAGE
232 #include <sys/util.h>
233 
234 #ifdef __cplusplus
235 extern "C" {
236 #endif
237 
238 #define STACK_ROUND_UP(x) ROUND_UP(x, ARCH_STACK_PTR_ALIGN)
239 
240 /* macros convert value of its argument to a string */
241 #define DO_TOSTR(s) #s
242 #define TOSTR(s) DO_TOSTR(s)
243 
244 /* concatenate the values of the arguments into one */
245 #define DO_CONCAT(x, y) x ## y
246 #define CONCAT(x, y) DO_CONCAT(x, y)
247 
248 /* Kernel macros for memory attribution
249  * (access permissions and cache-ability).
250  *
251  * The macros are to be stored in k_mem_partition_attr_t
252  * objects. The format of a k_mem_partition_attr_t object
253  * is an uint8_t composed by configuration register flags
254  * located in arch/riscv/include/core_pmp.h
255  */
256 
257 /* Read-Write access permission attributes */
258 #define K_MEM_PARTITION_P_RW_U_RW ((k_mem_partition_attr_t) \
259 	{PMP_R | PMP_W})
260 #define K_MEM_PARTITION_P_RW_U_RO ((k_mem_partition_attr_t) \
261 	{PMP_R})
262 #define K_MEM_PARTITION_P_RW_U_NA ((k_mem_partition_attr_t) \
263 	{0})
264 #define K_MEM_PARTITION_P_RO_U_RO ((k_mem_partition_attr_t) \
265 	{PMP_R})
266 #define K_MEM_PARTITION_P_RO_U_NA ((k_mem_partition_attr_t) \
267 	{0})
268 #define K_MEM_PARTITION_P_NA_U_NA ((k_mem_partition_attr_t) \
269 	{0})
270 
271 /* Execution-allowed attributes */
272 #define K_MEM_PARTITION_P_RWX_U_RWX ((k_mem_partition_attr_t) \
273 	{PMP_R | PMP_W | PMP_X})
274 #define K_MEM_PARTITION_P_RX_U_RX ((k_mem_partition_attr_t) \
275 	{PMP_R | PMP_X})
276 
277 /* Typedef for the k_mem_partition attribute */
278 typedef struct {
279 	uint8_t pmp_attr;
280 } k_mem_partition_attr_t;
281 
282 void arch_irq_enable(unsigned int irq);
283 void arch_irq_disable(unsigned int irq);
284 int arch_irq_is_enabled(unsigned int irq);
285 void arch_irq_priority_set(unsigned int irq, unsigned int prio);
286 void z_irq_spurious(const void *unused);
287 
288 #if defined(CONFIG_RISCV_HAS_PLIC)
289 #define ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \
290 { \
291 	Z_ISR_DECLARE(irq_p, 0, isr_p, isr_param_p); \
292 	arch_irq_priority_set(irq_p, priority_p); \
293 }
294 #else
295 #define ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \
296 { \
297 	Z_ISR_DECLARE(irq_p, 0, isr_p, isr_param_p); \
298 }
299 #endif
300 
301 /*
302  * use atomic instruction csrrc to lock global irq
303  * csrrc: atomic read and clear bits in CSR register
304  */
arch_irq_lock(void)305 static ALWAYS_INLINE unsigned int arch_irq_lock(void)
306 {
307 	unsigned int key;
308 	ulong_t mstatus;
309 
310 	__asm__ volatile ("csrrc %0, mstatus, %1"
311 			  : "=r" (mstatus)
312 			  : "r" (MSTATUS_IEN)
313 			  : "memory");
314 
315 	key = (mstatus & MSTATUS_IEN);
316 	return key;
317 }
318 
319 /*
320  * use atomic instruction csrrs to unlock global irq
321  * csrrs: atomic read and set bits in CSR register
322  */
arch_irq_unlock(unsigned int key)323 static ALWAYS_INLINE void arch_irq_unlock(unsigned int key)
324 {
325 	ulong_t mstatus;
326 
327 	__asm__ volatile ("csrrs %0, mstatus, %1"
328 			  : "=r" (mstatus)
329 			  : "r" (key & MSTATUS_IEN)
330 			  : "memory");
331 }
332 
arch_irq_unlocked(unsigned int key)333 static ALWAYS_INLINE bool arch_irq_unlocked(unsigned int key)
334 {
335 	/* FIXME: looking at arch_irq_lock, this should be reducable
336 	 * to just testing that key is nonzero (because it should only
337 	 * have the single bit set).  But there is a mask applied to
338 	 * the argument in arch_irq_unlock() that has me worried
339 	 * that something elseswhere might try to set a bit?  Do it
340 	 * the safe way for now.
341 	 */
342 	return (key & MSTATUS_IEN) == MSTATUS_IEN;
343 }
344 
arch_nop(void)345 static ALWAYS_INLINE void arch_nop(void)
346 {
347 	__asm__ volatile("nop");
348 }
349 
350 extern uint32_t sys_clock_cycle_get_32(void);
351 
arch_k_cycle_get_32(void)352 static inline uint32_t arch_k_cycle_get_32(void)
353 {
354 	return sys_clock_cycle_get_32();
355 }
356 
357 #ifdef CONFIG_USERSPACE
358 #include <arch/riscv/error.h>
359 #endif /* CONFIG_USERSPACE */
360 
361 #ifdef __cplusplus
362 }
363 #endif
364 
365 #endif /*_ASMLANGUAGE */
366 
367 #if defined(CONFIG_SOC_FAMILY_RISCV_PRIVILEGE)
368 #include <arch/riscv/riscv-privilege/asm_inline.h>
369 #endif
370 
371 
372 #endif
373