1/*
2 * Copyright (c) 2016 Jean-Paul Etienne <fractalclone@gmail.com>
3 * Copyright (c) 2018 Foundries.io Ltd
4 * Copyright (c) 2020 BayLibre, SAS
5 *
6 * SPDX-License-Identifier: Apache-2.0
7 */
8
9#include <zephyr/toolchain.h>
10#include <zephyr/linker/sections.h>
11#include <offsets_short.h>
12#include <zephyr/arch/cpu.h>
13#include <zephyr/sys/util.h>
14#include <zephyr/kernel.h>
15#include <zephyr/syscall.h>
16#include <zephyr/arch/riscv/csr.h>
17#include <zephyr/arch/riscv/irq.h>
18#include <zephyr/arch/riscv/syscall.h>
19#include "asm_macros.inc"
20
21#ifdef CONFIG_RISCV_SOC_HAS_ISR_STACKING
22#include <soc_isr_stacking.h>
23#endif
24
25/* Convenience macro for loading/storing register states. */
26#define DO_CALLER_SAVED(op) \
27	RV_E(	op t0, __struct_arch_esf_t0_OFFSET(sp)	);\
28	RV_E(	op t1, __struct_arch_esf_t1_OFFSET(sp)	);\
29	RV_E(	op t2, __struct_arch_esf_t2_OFFSET(sp)	);\
30	RV_I(	op t3, __struct_arch_esf_t3_OFFSET(sp)	);\
31	RV_I(	op t4, __struct_arch_esf_t4_OFFSET(sp)	);\
32	RV_I(	op t5, __struct_arch_esf_t5_OFFSET(sp)	);\
33	RV_I(	op t6, __struct_arch_esf_t6_OFFSET(sp)	);\
34	RV_E(	op a0, __struct_arch_esf_a0_OFFSET(sp)	);\
35	RV_E(	op a1, __struct_arch_esf_a1_OFFSET(sp)	);\
36	RV_E(	op a2, __struct_arch_esf_a2_OFFSET(sp)	);\
37	RV_E(	op a3, __struct_arch_esf_a3_OFFSET(sp)	);\
38	RV_E(	op a4, __struct_arch_esf_a4_OFFSET(sp)	);\
39	RV_E(	op a5, __struct_arch_esf_a5_OFFSET(sp)	);\
40	RV_I(	op a6, __struct_arch_esf_a6_OFFSET(sp)	);\
41	RV_I(	op a7, __struct_arch_esf_a7_OFFSET(sp)	);\
42	RV_E(	op ra, __struct_arch_esf_ra_OFFSET(sp)	)
43
44#ifdef CONFIG_EXCEPTION_DEBUG
45/* Convenience macro for storing callee saved register [s0 - s11] states. */
46#define STORE_CALLEE_SAVED() \
47	RV_E(	sr s0, ___callee_saved_t_s0_OFFSET(sp)		);\
48	RV_E(	sr s1, ___callee_saved_t_s1_OFFSET(sp)		);\
49	RV_I(	sr s2, ___callee_saved_t_s2_OFFSET(sp)		);\
50	RV_I(	sr s3, ___callee_saved_t_s3_OFFSET(sp)		);\
51	RV_I(	sr s4, ___callee_saved_t_s4_OFFSET(sp)		);\
52	RV_I(	sr s5, ___callee_saved_t_s5_OFFSET(sp)		);\
53	RV_I(	sr s6, ___callee_saved_t_s6_OFFSET(sp)		);\
54	RV_I(	sr s7, ___callee_saved_t_s7_OFFSET(sp)		);\
55	RV_I(	sr s8, ___callee_saved_t_s8_OFFSET(sp)		);\
56	RV_I(	sr s9, ___callee_saved_t_s9_OFFSET(sp)		);\
57	RV_I(	sr s10, ___callee_saved_t_s10_OFFSET(sp)	);\
58	RV_I(	sr s11, ___callee_saved_t_s11_OFFSET(sp)	)
59#endif /* CONFIG_EXCEPTION_DEBUG */
60
61	.macro get_current_cpu dst
62#if defined(CONFIG_SMP) || defined(CONFIG_USERSPACE)
63	csrr \dst, mscratch
64#else
65	la \dst, _kernel + ___kernel_t_cpus_OFFSET
66#endif
67	.endm
68
69/* imports */
70GDATA(_sw_isr_table)
71#ifdef CONFIG_RISCV_SOC_EXCEPTION_FROM_IRQ
72GTEXT(__soc_is_irq)
73#endif
74GTEXT(__soc_handle_irq)
75GTEXT(z_riscv_fault)
76#ifdef CONFIG_RISCV_SOC_CONTEXT_SAVE
77GTEXT(__soc_save_context)
78GTEXT(__soc_restore_context)
79#endif /* CONFIG_RISCV_SOC_CONTEXT_SAVE */
80
81#ifdef CONFIG_EXCEPTION_DEBUG
82GTEXT(z_riscv_fatal_error_csf)
83#else
84GTEXT(z_riscv_fatal_error)
85#endif /* CONFIG_EXCEPTION_DEBUG */
86
87GTEXT(z_get_next_switch_handle)
88GTEXT(z_riscv_switch)
89GTEXT(z_riscv_thread_start)
90
91#ifdef CONFIG_TRACING
92GTEXT(sys_trace_isr_enter)
93GTEXT(sys_trace_isr_exit)
94#endif
95
96#ifdef CONFIG_USERSPACE
97GDATA(_k_syscall_table)
98#endif
99
100#ifdef CONFIG_RISCV_SOC_HAS_CUSTOM_IRQ_HANDLING
101GTEXT(__soc_handle_all_irqs)
102#endif
103
104/* exports */
105GTEXT(_isr_wrapper)
106
107/* use ABI name of registers for the sake of simplicity */
108
109/*
110 * Generic architecture-level IRQ handling, along with callouts to
111 * SoC-specific routines.
112 *
113 * Architecture level IRQ handling includes basic context save/restore
114 * of standard registers and calling ISRs registered at Zephyr's driver
115 * level.
116 *
117 * Since RISC-V does not completely prescribe IRQ handling behavior,
118 * implementations vary (some implementations also deviate from
119 * what standard behavior is defined). Hence, the arch level code expects
120 * the following functions to be provided at the SOC level:
121 *
122 *     - __soc_is_irq (optional): decide if we're handling an interrupt or an
123         exception
124 *     - __soc_handle_irq: handle SoC-specific details for a pending IRQ
125 *       (e.g. clear a pending bit in a SoC-specific register)
126 *
127 * If CONFIG_RISCV_SOC_CONTEXT_SAVE=y, calls to SoC-level context save/restore
128 * routines are also made here. For details, see the Kconfig help text.
129 */
130
131/*
132 * Handler called upon each exception/interrupt/fault
133 */
134SECTION_FUNC(exception.entry, _isr_wrapper)
135
136/* Provide requested alignment, which depends e.g. on MTVEC format */
137.balign CONFIG_RISCV_TRAP_HANDLER_ALIGNMENT
138
139#ifdef CONFIG_USERSPACE
140	/* retrieve address of _current_cpu preserving s0 */
141	csrrw s0, mscratch, s0
142
143	/* preserve t0 and t1 temporarily */
144	sr t0, _curr_cpu_arch_user_exc_tmp0(s0)
145	sr t1, _curr_cpu_arch_user_exc_tmp1(s0)
146
147	/* determine if we come from user space */
148	csrr t0, mstatus
149	li t1, MSTATUS_MPP
150	and t0, t0, t1
151	bnez t0, 1f
152
153	/* in user space we were: switch to our privileged stack */
154	mv t0, sp
155	lr sp, _curr_cpu_arch_user_exc_sp(s0)
156
157	/* Save user stack value. Coming from user space, we know this
158	 * can't overflow the privileged stack. The esf will be allocated
159	 * later but it is safe to store our saved user sp here. */
160	sr t0, (-__struct_arch_esf_SIZEOF + __struct_arch_esf_sp_OFFSET)(sp)
161
162	/* Make sure tls pointer is sane */
163	lr t0, ___cpu_t_current_OFFSET(s0)
164	lr tp, _thread_offset_to_tls(t0)
165
166	/* Make sure global pointer is sane */
167#ifdef CONFIG_RISCV_GP
168	.option push
169	.option norelax
170	la gp, __global_pointer$
171	.option pop
172#elif defined(CONFIG_RISCV_CURRENT_VIA_GP)
173	lr gp, ___cpu_t_current_OFFSET(s0)
174#endif /* CONFIG_RISCV_GP / CONFIG_RISCV_CURRENT_VIA_GP */
175
176	/* Clear our per-thread usermode flag */
177	lui t0, %tprel_hi(is_user_mode)
178	add t0, t0, tp, %tprel_add(is_user_mode)
179	sb zero, %tprel_lo(is_user_mode)(t0)
1801:
181	/* retrieve original t0/t1 values */
182	lr t0, _curr_cpu_arch_user_exc_tmp0(s0)
183	lr t1, _curr_cpu_arch_user_exc_tmp1(s0)
184
185	/* retrieve original s0 and restore _current_cpu in mscratch */
186	csrrw s0, mscratch, s0
187#endif
188
189#ifdef CONFIG_RISCV_SOC_HAS_ISR_STACKING
190	SOC_ISR_SW_STACKING
191#else
192	/* Save caller-saved registers on current thread stack. */
193	addi sp, sp, -__struct_arch_esf_SIZEOF
194	DO_CALLER_SAVED(sr)		;
195#endif /* CONFIG_RISCV_SOC_HAS_ISR_STACKING */
196
197	/* Save s0 in the esf and load it with &_current_cpu. */
198	sr s0, __struct_arch_esf_s0_OFFSET(sp)
199	get_current_cpu s0
200
201#ifdef CONFIG_CLIC_SUPPORT_INTERRUPT_LEVEL
202	/* Save mcause register */
203	csrr t0, mcause
204	sr t0, __struct_arch_esf_mcause_OFFSET(sp)
205#endif /* CONFIG_CLIC_SUPPORT_INTERRUPT_LEVEL */
206
207	/* Save MEPC register */
208	csrr t0, mepc
209	sr t0, __struct_arch_esf_mepc_OFFSET(sp)
210
211	/* Save MSTATUS register */
212	csrr t2, mstatus
213	sr t2, __struct_arch_esf_mstatus_OFFSET(sp)
214
215#if defined(CONFIG_FPU_SHARING)
216	/* determine if FPU access was disabled */
217	li t1, MSTATUS_FS
218	and t1, t1, t2
219	bnez t1, no_fp
220	/* determine if this is an Illegal Instruction exception */
221	csrr t2, mcause
222	li t1, CONFIG_RISCV_MCAUSE_EXCEPTION_MASK
223	and t2, t2, t1
224	li t1, 2		/* 2 = illegal instruction */
225	bne t1, t2, no_fp
226	/* determine if we trapped on an FP instruction. */
227	csrr t2, mtval		/* get faulting instruction */
228#ifdef CONFIG_QEMU_TARGET
229	/*
230	 * Some implementations may not support MTVAL in this capacity.
231	 * Notably QEMU when a CSR instruction is involved.
232	 */
233	bnez t2, 1f
234	lw t2, 0(t0)		/* t0 = mepc */
2351:
236#endif
237	andi t0, t2, 0x7f	/* keep only the opcode bits */
238	/*
239	 * Major FP opcodes:
240	 * 0000111 = LOAD-FP
241	 * 0100111 = STORE-FP
242	 * 1000011 = MADD
243	 * 1000111 = MSUB
244	 * 1001011 = NMSUB
245	 * 1001111 = NMADD
246	 * 1010011 = OP-FP
247	 */
248	xori t1, t0, 0b1010011	/* OP-FP */
249	beqz t1, is_fp
250	ori  t1, t0, 0b0100000
251	xori t1, t1, 0b0100111	/* LOAD-FP / STORE-FP */
252	beqz t1, is_fp
253	ori  t1, t0, 0b0001100
254	xori t1, t1, 0b1001111	/* MADD / MSUB / NMSUB / NMADD */
255	beqz t1, is_fp
256	/*
257	 * The FRCSR, FSCSR, FRRM, FSRM, FSRMI, FRFLAGS, FSFLAGS and FSFLAGSI
258	 * are in fact CSR instructions targeting the fcsr, frm and fflags
259	 * registers. They should be caught as FPU instructions as well.
260	 *
261	 * CSR format: csr#[31-20] src[19-15] op[14-12] dst[11-7] SYSTEM[6-0]
262	 * SYSTEM = 0b1110011, op = 0b.xx where xx is never 0
263	 * The csr# of interest are: 1=fflags, 2=frm, 3=fcsr
264	 */
265	xori t1, t0, 0b1110011	/* SYSTEM opcode */
266	bnez t1, 2f		/* not a CSR insn */
267	srli t0, t2, 12
268	andi t0, t0, 0x3
269	beqz t0, 2f		/* not a CSR insn */
270	srli t0, t2, 20		/* isolate the csr register number */
271	beqz t0, 2f		/* 0=ustatus */
272	andi t0, t0, ~0x3	/* 1=fflags, 2=frm, 3=fcsr */
273#if !defined(CONFIG_RISCV_ISA_EXT_C)
274	bnez t0, no_fp
275#else
276	beqz t0, is_fp
2772:	/* remaining non RVC (0b11) and RVC with 0b01 are not FP instructions */
278	andi t1, t2, 1
279	bnez t1, no_fp
280	/*
281	 * 001...........00 = C.FLD    RV32/64  (RV128 = C.LQ)
282	 * 001...........10 = C.FLDSP  RV32/64  (RV128 = C.LQSP)
283	 * 011...........00 = C.FLW    RV32     (RV64/128 = C.LD)
284	 * 011...........10 = C.FLWSPP RV32     (RV64/128 = C.LDSP)
285	 * 101...........00 = C.FSD    RV32/64  (RV128 = C.SQ)
286	 * 101...........10 = C.FSDSP  RV32/64  (RV128 = C.SQSP)
287	 * 111...........00 = C.FSW    RV32     (RV64/128 = C.SD)
288	 * 111...........10 = C.FSWSP  RV32     (RV64/128 = C.SDSP)
289	 *
290	 * so must be .01............. on RV64 and ..1............. on RV32.
291	 */
292	srli t0, t2, 8
293#if defined(CONFIG_64BIT)
294	andi t1, t0, 0b01100000
295	xori t1, t1, 0b00100000
296	bnez t1, no_fp
297#else
298	andi t1, t0, 0b00100000
299	beqz t1, no_fp
300#endif
301#endif /* CONFIG_RISCV_ISA_EXT_C */
302
303is_fp:	/* Process the FP trap and quickly return from exception */
304	la ra, fp_trap_exit
305	mv a0, sp
306	tail z_riscv_fpu_trap
3072:
308no_fp:	/* increment _current->arch.exception_depth */
309	lr t0, ___cpu_t_current_OFFSET(s0)
310	lb t1, _thread_offset_to_exception_depth(t0)
311	add t1, t1, 1
312	sb t1, _thread_offset_to_exception_depth(t0)
313
314	/* configure the FPU for exception mode */
315	call z_riscv_fpu_enter_exc
316#endif /* CONFIG_FPU_SHARING */
317
318#ifdef CONFIG_RISCV_SOC_CONTEXT_SAVE
319	/* Handle context saving at SOC level. */
320	addi a0, sp, __struct_arch_esf_soc_context_OFFSET
321	jal ra, __soc_save_context
322#endif /* CONFIG_RISCV_SOC_CONTEXT_SAVE */
323
324	/*
325	 * Check if exception is the result of an interrupt or not.
326	 * (SOC dependent). Following the RISC-V architecture spec, the MSB
327	 * of the mcause register is used to indicate whether an exception
328	 * is the result of an interrupt or an exception/fault. But for some
329	 * SOCs (like pulpino or riscv-qemu), the MSB is never set to indicate
330	 * interrupt. Hence, check for interrupt/exception via the __soc_is_irq
331	 * function (that needs to be implemented by each SOC). The result is
332	 * returned via register a0 (1: interrupt, 0 exception)
333	 */
334#ifdef CONFIG_RISCV_SOC_EXCEPTION_FROM_IRQ
335	jal ra, __soc_is_irq
336	bnez a0, is_interrupt
337#else
338	csrr t0, mcause
339	srli t0, t0, RISCV_MCAUSE_IRQ_POS
340	bnez t0, is_interrupt
341#endif
342
343	/*
344	 * If the exception is the result of an ECALL, check whether to
345	 * perform a context-switch or an IRQ offload. Otherwise call z_riscv_fault
346	 * to report the exception.
347	 */
348	csrr t0, mcause
349	li t2, CONFIG_RISCV_MCAUSE_EXCEPTION_MASK
350	and t0, t0, t2
351
352	/*
353	 * If mcause == RISCV_EXC_ECALLM, handle system call from
354	 * kernel thread.
355	 */
356	li t1, RISCV_EXC_ECALLM
357	beq t0, t1, is_kernel_syscall
358
359#ifdef CONFIG_USERSPACE
360	/*
361	 * If mcause == RISCV_EXC_ECALLU, handle system call
362	 * for user mode thread.
363	 */
364	li t1, RISCV_EXC_ECALLU
365	beq t0, t1, is_user_syscall
366
367#if defined(CONFIG_PMP_STACK_GUARD) && defined(CONFIG_MULTITHREADING)
368	/*
369	 * Determine if we come from user space. If so, reconfigure the PMP for
370	 * kernel mode stack guard.
371	 */
372	csrr t0, mstatus
373	li t1, MSTATUS_MPP
374	and t0, t0, t1
375	bnez t0, 1f
376	lr a0, ___cpu_t_current_OFFSET(s0)
377	call z_riscv_pmp_stackguard_enable
3781:
379#endif /* CONFIG_PMP_STACK_GUARD */
380
381#endif /* CONFIG_USERSPACE */
382
383	/*
384	 * Call z_riscv_fault to handle exception.
385	 * Stack pointer is pointing to a struct_arch_esf structure, pass it
386	 * to z_riscv_fault (via register a0).
387	 * If z_riscv_fault shall return, set return address to
388	 * no_reschedule to restore stack.
389	 */
390	mv a0, sp
391	la ra, no_reschedule
392	tail z_riscv_fault
393
394is_kernel_syscall:
395	/*
396	 * A syscall is the result of an ecall instruction, in which case the
397	 * MEPC will contain the address of the ecall instruction.
398	 * Increment saved MEPC by 4 to prevent triggering the same ecall
399	 * again upon exiting the ISR.
400	 *
401	 * It's safe to always increment by 4, even with compressed
402	 * instructions, because the ecall instruction is always 4 bytes.
403	 */
404	lr t0, __struct_arch_esf_mepc_OFFSET(sp)
405	addi t0, t0, 4
406	sr t0, __struct_arch_esf_mepc_OFFSET(sp)
407
408#if defined(CONFIG_PMP_STACK_GUARD) && defined(CONFIG_MULTITHREADING)
409	/* Re-activate PMP for m-mode */
410	li t1, MSTATUS_MPP
411	csrc mstatus, t1
412	li t1, MSTATUS_MPRV
413	csrs mstatus, t1
414#endif
415
416	/* Determine what to do. Operation code is in t0. */
417	lr t0, __struct_arch_esf_t0_OFFSET(sp)
418
419	.if RV_ECALL_RUNTIME_EXCEPT != 0; .err; .endif
420	beqz t0, do_fault
421
422#if defined(CONFIG_IRQ_OFFLOAD)
423	li t1, RV_ECALL_IRQ_OFFLOAD
424	beq t0, t1, do_irq_offload
425#endif
426
427#ifdef CONFIG_RISCV_ALWAYS_SWITCH_THROUGH_ECALL
428	li t1, RV_ECALL_SCHEDULE
429	bne t0, t1, skip_schedule
430	lr a0, __struct_arch_esf_a0_OFFSET(sp)
431	lr a1, __struct_arch_esf_a1_OFFSET(sp)
432
433#ifdef CONFIG_FPU_SHARING
434	/*
435	 * When an ECALL is used for a context-switch, the current thread has
436	 * been updated to the next thread.
437	 * Add the exception_depth back to the previous thread.
438	 */
439	lb t1, _thread_offset_to_exception_depth(a0)
440	add t1, t1, -1
441	sb t1, _thread_offset_to_exception_depth(a0)
442
443	lb t1, _thread_offset_to_exception_depth(a1)
444	add t1, t1, 1
445	sb t1, _thread_offset_to_exception_depth(a1)
446#endif
447
448	j reschedule
449skip_schedule:
450#endif
451
452	/* default fault code is K_ERR_KERNEL_OOPS */
453	li a0, 3
454	j 1f
455
456do_fault:
457	/* Handle RV_ECALL_RUNTIME_EXCEPT. Retrieve reason in a0, esf in A1. */
458	lr a0, __struct_arch_esf_a0_OFFSET(sp)
4591:	mv a1, sp
460
461#ifdef CONFIG_EXCEPTION_DEBUG
462	/*
463	 * Restore the s0 we saved early in ISR entry
464	 * so it shows up properly in the CSF.
465	 */
466	lr s0, __struct_arch_esf_s0_OFFSET(sp)
467
468	/* Allocate space for caller-saved registers on current thread stack */
469	addi sp, sp, -__callee_saved_t_SIZEOF
470
471	/* Save callee-saved registers to be passed as 3rd arg */
472	STORE_CALLEE_SAVED()		;
473	mv a2, sp
474
475#ifdef CONFIG_EXTRA_EXCEPTION_INFO
476	/* Store csf's addr into esf (a1 still holds the pointer to the esf at this point) */
477	sr a2 __struct_arch_esf_csf_OFFSET(a1)
478#endif /* CONFIG_EXTRA_EXCEPTION_INFO */
479
480	tail z_riscv_fatal_error_csf
481#else
482	tail z_riscv_fatal_error
483#endif /* CONFIG_EXCEPTION_DEBUG */
484
485#if defined(CONFIG_IRQ_OFFLOAD)
486do_irq_offload:
487	/*
488	 * Retrieve provided routine and argument from the stack.
489	 * Routine pointer is in saved a0, argument in saved a1
490	 * so we load them with a1/a0 (reversed).
491	 */
492	lr a1, __struct_arch_esf_a0_OFFSET(sp)
493	lr a0, __struct_arch_esf_a1_OFFSET(sp)
494
495	/* Increment _current_cpu->nested */
496	lw t1, ___cpu_t_nested_OFFSET(s0)
497	addi t2, t1, 1
498	sw t2, ___cpu_t_nested_OFFSET(s0)
499	bnez t1, 1f
500
501	/* Switch to interrupt stack */
502	mv t0, sp
503	lr sp, ___cpu_t_irq_stack_OFFSET(s0)
504
505	/* Save thread stack pointer on interrupt stack */
506	addi sp, sp, -16
507	sr t0, 0(sp)
5081:
509	/* Execute provided routine (argument is in a0 already). */
510	jalr ra, a1, 0
511
512	/* Leave through the regular IRQ exit path */
513	j irq_done
514#endif /* CONFIG_IRQ_OFFLOAD */
515
516#ifdef CONFIG_USERSPACE
517is_user_syscall:
518
519#if defined(CONFIG_PMP_STACK_GUARD) && defined(CONFIG_MULTITHREADING)
520	/*
521	 * We came from userspace and need to reconfigure the
522	 * PMP for kernel mode stack guard.
523	 */
524	lr a0, ___cpu_t_current_OFFSET(s0)
525	call z_riscv_pmp_stackguard_enable
526#endif
527
528	/* It is safe to re-enable IRQs now */
529	csrs mstatus, MSTATUS_IEN
530
531	/*
532	 * Same as for is_kernel_syscall: increment saved MEPC by 4 to
533	 * prevent triggering the same ecall again upon exiting the ISR.
534	 */
535	lr t1, __struct_arch_esf_mepc_OFFSET(sp)
536	addi t1, t1, 4
537	sr t1, __struct_arch_esf_mepc_OFFSET(sp)
538
539	/* Restore argument registers from user stack */
540	lr a0, __struct_arch_esf_a0_OFFSET(sp)
541	lr a1, __struct_arch_esf_a1_OFFSET(sp)
542	lr a2, __struct_arch_esf_a2_OFFSET(sp)
543	lr a3, __struct_arch_esf_a3_OFFSET(sp)
544	lr a4, __struct_arch_esf_a4_OFFSET(sp)
545	lr a5, __struct_arch_esf_a5_OFFSET(sp)
546	lr t0, __struct_arch_esf_t0_OFFSET(sp)
547#if defined(CONFIG_RISCV_ISA_RV32E)
548	/* Stack alignment for RV32E is 4 bytes */
549	addi sp, sp, -4
550	mv t1, sp
551	sw t1, 0(sp)
552#else
553	mv a6, sp
554#endif /* CONFIG_RISCV_ISA_RV32E */
555
556	/* validate syscall limit */
557	li t1, K_SYSCALL_LIMIT
558	bltu t0, t1, valid_syscall_id
559
560	/* bad syscall id.  Set arg1 to bad id and set call_id to SYSCALL_BAD */
561	mv a0, t0
562	li t0, K_SYSCALL_BAD
563
564valid_syscall_id:
565
566	la t2, _k_syscall_table
567
568	slli t1, t0, RV_REGSHIFT	# Determine offset from indice value
569	add t2, t2, t1			# Table addr + offset = function addr
570	lr t2, 0(t2)			# Load function address
571
572	/* Execute syscall function */
573	jalr ra, t2, 0
574
575#if defined(CONFIG_RISCV_ISA_RV32E)
576	addi sp, sp, 4
577#endif /* CONFIG_RISCV_ISA_RV32E */
578
579	/* Update a0 (return value) on the stack */
580	sr a0, __struct_arch_esf_a0_OFFSET(sp)
581
582	/* Disable IRQs again before leaving */
583	csrc mstatus, MSTATUS_IEN
584	j might_have_rescheduled
585#endif /* CONFIG_USERSPACE */
586
587is_interrupt:
588
589#if defined(CONFIG_PMP_STACK_GUARD) && defined(CONFIG_MULTITHREADING)
590#ifdef CONFIG_USERSPACE
591	/*
592	 * If we came from userspace then we need to reconfigure the
593	 * PMP for kernel mode stack guard.
594	 */
595	lr t0, __struct_arch_esf_mstatus_OFFSET(sp)
596	li t1, MSTATUS_MPP
597	and t0, t0, t1
598	bnez t0, 1f
599	lr a0, ___cpu_t_current_OFFSET(s0)
600	call z_riscv_pmp_stackguard_enable
601	j 2f
602#endif /* CONFIG_USERSPACE */
6031:	/* Re-activate PMP for m-mode */
604	li t1, MSTATUS_MPP
605	csrc mstatus, t1
606	li t1, MSTATUS_MPRV
607	csrs mstatus, t1
6082:
609#endif
610
611	/* Increment _current_cpu->nested */
612	lw t1, ___cpu_t_nested_OFFSET(s0)
613	addi t2, t1, 1
614	sw t2, ___cpu_t_nested_OFFSET(s0)
615	bnez t1, on_irq_stack
616
617	/* Switch to interrupt stack */
618	mv t0, sp
619	lr sp, ___cpu_t_irq_stack_OFFSET(s0)
620
621	/*
622	 * Save thread stack pointer on interrupt stack
623	 * In RISC-V, stack pointer needs to be 16-byte aligned
624	 */
625	addi sp, sp, -16
626	sr t0, 0(sp)
627
628on_irq_stack:
629
630#ifdef CONFIG_RISCV_SOC_HAS_CUSTOM_IRQ_HANDLING
631	call __soc_handle_all_irqs
632#else
633
634#ifdef CONFIG_TRACING_ISR
635	call sys_trace_isr_enter
636#endif
637
638	/* Get IRQ causing interrupt */
639	csrr a0, mcause
640	li t0, CONFIG_RISCV_MCAUSE_EXCEPTION_MASK
641	and a0, a0, t0
642
643	/*
644	 * Clear pending IRQ generating the interrupt at SOC level
645	 * Pass IRQ number to __soc_handle_irq via register a0
646	 */
647	jal ra, __soc_handle_irq
648
649	/*
650	 * Call corresponding registered function in _sw_isr_table.
651	 * (table is 2-word wide, we should shift index accordingly)
652	 */
653	la t0, _sw_isr_table
654	slli a0, a0, (RV_REGSHIFT + 1)
655	add t0, t0, a0
656
657	/* Load argument in a0 register */
658	lr a0, 0(t0)
659
660	/* Load ISR function address in register t1 */
661	lr t1, RV_REGSIZE(t0)
662
663	/* Call ISR function */
664	jalr ra, t1, 0
665
666#ifdef CONFIG_TRACING_ISR
667	call sys_trace_isr_exit
668#endif
669
670#endif
671
672irq_done:
673	/* Decrement _current_cpu->nested */
674	lw t2, ___cpu_t_nested_OFFSET(s0)
675	addi t2, t2, -1
676	sw t2, ___cpu_t_nested_OFFSET(s0)
677	bnez t2, no_reschedule
678
679	/* nested count is back to 0: Return to thread stack */
680	lr sp, 0(sp)
681
682#ifdef CONFIG_STACK_SENTINEL
683	call z_check_stack_sentinel
684#endif
685
686check_reschedule:
687
688#ifdef CONFIG_MULTITHREADING
689
690	/* Get pointer to current thread on this CPU */
691	lr a1, ___cpu_t_current_OFFSET(s0)
692
693	/*
694	 * Get next thread to schedule with z_get_next_switch_handle().
695	 * We pass it a NULL as we didn't save the whole thread context yet.
696	 * If no scheduling is necessary then NULL will be returned.
697	 */
698	addi sp, sp, -16
699	sr a1, 0(sp)
700	mv a0, zero
701	call z_get_next_switch_handle
702	lr a1, 0(sp)
703	addi sp, sp, 16
704	beqz a0, no_reschedule
705
706reschedule:
707
708	/*
709	 * Perform context switch:
710	 * a0 = new thread
711	 * a1 = old thread
712	 */
713	call z_riscv_switch
714
715z_riscv_thread_start:
716might_have_rescheduled:
717	/* reload s0 with &_current_cpu as it might have changed or be unset */
718	get_current_cpu s0
719
720#endif /* CONFIG_MULTITHREADING */
721
722no_reschedule:
723
724#ifdef CONFIG_RISCV_SOC_CONTEXT_SAVE
725	/* Restore context at SOC level */
726	addi a0, sp, __struct_arch_esf_soc_context_OFFSET
727	jal ra, __soc_restore_context
728#endif /* CONFIG_RISCV_SOC_CONTEXT_SAVE */
729
730#if defined(CONFIG_FPU_SHARING)
731	/* FPU handling upon exception mode exit */
732	mv a0, sp
733	call z_riscv_fpu_exit_exc
734
735	/* decrement _current->arch.exception_depth */
736	lr t0, ___cpu_t_current_OFFSET(s0)
737	lb t1, _thread_offset_to_exception_depth(t0)
738	add t1, t1, -1
739	sb t1, _thread_offset_to_exception_depth(t0)
740fp_trap_exit:
741#endif
742
743	/* Restore MEPC and MSTATUS registers */
744	lr t0, __struct_arch_esf_mepc_OFFSET(sp)
745	lr t2, __struct_arch_esf_mstatus_OFFSET(sp)
746
747#ifdef CONFIG_CLIC_SUPPORT_INTERRUPT_LEVEL
748	/* Restore MCAUSE register for previous interrupt level. */
749	lr t1, __struct_arch_esf_mcause_OFFSET(sp)
750	csrw mcause, t1
751#endif /* CONFIG_CLIC_SUPPORT_INTERRUPT_LEVEL */
752
753	csrw mepc, t0
754	csrw mstatus, t2
755
756#ifdef CONFIG_USERSPACE
757	/*
758	 * Check if we are returning to user mode. If so then we must
759	 * set is_user_mode to true and preserve our kernel mode stack for
760	 * the next exception to come.
761	 */
762	li t1, MSTATUS_MPP
763	and t0, t2, t1
764	bnez t0, 1f
765
766#if defined(CONFIG_PMP_STACK_GUARD) && defined(CONFIG_MULTITHREADING)
767	/* Remove kernel stack guard and Reconfigure PMP for user mode */
768	lr a0, ___cpu_t_current_OFFSET(s0)
769	call z_riscv_pmp_usermode_enable
770#endif
771
772	/* Set our per-thread usermode flag */
773	li t1, 1
774	lui t0, %tprel_hi(is_user_mode)
775	add t0, t0, tp, %tprel_add(is_user_mode)
776	sb t1, %tprel_lo(is_user_mode)(t0)
777
778	/* preserve stack pointer for next exception entry */
779	add t0, sp, __struct_arch_esf_SIZEOF
780	sr t0, _curr_cpu_arch_user_exc_sp(s0)
781
782	j 2f
7831:
784	/*
785	 * We are returning to kernel mode. Store the stack pointer to
786	 * be re-loaded further down.
787	 */
788	addi t0, sp, __struct_arch_esf_SIZEOF
789	sr t0, __struct_arch_esf_sp_OFFSET(sp)
7902:
791#endif
792
793	/* Restore s0 (it is no longer ours) */
794	lr s0, __struct_arch_esf_s0_OFFSET(sp)
795
796#ifdef CONFIG_RISCV_SOC_HAS_ISR_STACKING
797	SOC_ISR_SW_UNSTACKING
798#else
799	/* Restore caller-saved registers from thread stack */
800	DO_CALLER_SAVED(lr)
801
802#ifdef CONFIG_USERSPACE
803	/* retrieve saved stack pointer */
804	lr sp, __struct_arch_esf_sp_OFFSET(sp)
805#else
806	/* remove esf from the stack */
807	addi sp, sp, __struct_arch_esf_SIZEOF
808#endif
809
810#endif /* CONFIG_RISCV_SOC_HAS_ISR_STACKING */
811
812	mret
813