1/*
2 * Copyright (c) 2016 Jean-Paul Etienne <fractalclone@gmail.com>
3 * Copyright (c) 2018 Foundries.io Ltd
4 * Copyright (c) 2020 BayLibre, SAS
5 *
6 * SPDX-License-Identifier: Apache-2.0
7 */
8
9#include <zephyr/toolchain.h>
10#include <zephyr/linker/sections.h>
11#include <offsets_short.h>
12#include <zephyr/arch/cpu.h>
13#include <zephyr/sys/util.h>
14#include <zephyr/kernel.h>
15#include <zephyr/syscall.h>
16#include <zephyr/arch/riscv/csr.h>
17#include <zephyr/arch/riscv/irq.h>
18#include <zephyr/arch/riscv/syscall.h>
19#include "asm_macros.inc"
20
21#ifdef CONFIG_RISCV_SOC_HAS_ISR_STACKING
22#include <soc_isr_stacking.h>
23#endif
24
25/* Convenience macro for loading/storing register states. */
26#define DO_CALLER_SAVED(op) \
27	RV_E(	op t0, __struct_arch_esf_t0_OFFSET(sp)	);\
28	RV_E(	op t1, __struct_arch_esf_t1_OFFSET(sp)	);\
29	RV_E(	op t2, __struct_arch_esf_t2_OFFSET(sp)	);\
30	RV_I(	op t3, __struct_arch_esf_t3_OFFSET(sp)	);\
31	RV_I(	op t4, __struct_arch_esf_t4_OFFSET(sp)	);\
32	RV_I(	op t5, __struct_arch_esf_t5_OFFSET(sp)	);\
33	RV_I(	op t6, __struct_arch_esf_t6_OFFSET(sp)	);\
34	RV_E(	op a0, __struct_arch_esf_a0_OFFSET(sp)	);\
35	RV_E(	op a1, __struct_arch_esf_a1_OFFSET(sp)	);\
36	RV_E(	op a2, __struct_arch_esf_a2_OFFSET(sp)	);\
37	RV_E(	op a3, __struct_arch_esf_a3_OFFSET(sp)	);\
38	RV_E(	op a4, __struct_arch_esf_a4_OFFSET(sp)	);\
39	RV_E(	op a5, __struct_arch_esf_a5_OFFSET(sp)	);\
40	RV_I(	op a6, __struct_arch_esf_a6_OFFSET(sp)	);\
41	RV_I(	op a7, __struct_arch_esf_a7_OFFSET(sp)	);\
42	RV_E(	op ra, __struct_arch_esf_ra_OFFSET(sp)	)
43
44#ifdef CONFIG_EXCEPTION_DEBUG
45/* Convenience macro for storing callee saved register [s0 - s11] states. */
46#define STORE_CALLEE_SAVED() \
47	RV_E(	sr s0, ___callee_saved_t_s0_OFFSET(sp)		);\
48	RV_E(	sr s1, ___callee_saved_t_s1_OFFSET(sp)		);\
49	RV_I(	sr s2, ___callee_saved_t_s2_OFFSET(sp)		);\
50	RV_I(	sr s3, ___callee_saved_t_s3_OFFSET(sp)		);\
51	RV_I(	sr s4, ___callee_saved_t_s4_OFFSET(sp)		);\
52	RV_I(	sr s5, ___callee_saved_t_s5_OFFSET(sp)		);\
53	RV_I(	sr s6, ___callee_saved_t_s6_OFFSET(sp)		);\
54	RV_I(	sr s7, ___callee_saved_t_s7_OFFSET(sp)		);\
55	RV_I(	sr s8, ___callee_saved_t_s8_OFFSET(sp)		);\
56	RV_I(	sr s9, ___callee_saved_t_s9_OFFSET(sp)		);\
57	RV_I(	sr s10, ___callee_saved_t_s10_OFFSET(sp)	);\
58	RV_I(	sr s11, ___callee_saved_t_s11_OFFSET(sp)	)
59#endif /* CONFIG_EXCEPTION_DEBUG */
60
61	.macro get_current_cpu dst
62#if defined(CONFIG_SMP) || defined(CONFIG_USERSPACE)
63	csrr \dst, mscratch
64#else
65	la \dst, _kernel + ___kernel_t_cpus_OFFSET
66#endif
67	.endm
68
69/* imports */
70GDATA(_sw_isr_table)
71#ifdef CONFIG_RISCV_SOC_EXCEPTION_FROM_IRQ
72GTEXT(__soc_is_irq)
73#endif
74GTEXT(__soc_handle_irq)
75GTEXT(_Fault)
76#ifdef CONFIG_RISCV_SOC_CONTEXT_SAVE
77GTEXT(__soc_save_context)
78GTEXT(__soc_restore_context)
79#endif /* CONFIG_RISCV_SOC_CONTEXT_SAVE */
80
81#ifdef CONFIG_EXCEPTION_DEBUG
82GTEXT(z_riscv_fatal_error_csf)
83#else
84GTEXT(z_riscv_fatal_error)
85#endif /* CONFIG_EXCEPTION_DEBUG */
86
87GTEXT(z_get_next_switch_handle)
88GTEXT(z_riscv_switch)
89GTEXT(z_riscv_thread_start)
90
91#ifdef CONFIG_TRACING
92GTEXT(sys_trace_isr_enter)
93GTEXT(sys_trace_isr_exit)
94#endif
95
96#ifdef CONFIG_USERSPACE
97GDATA(_k_syscall_table)
98#endif
99
100#ifdef CONFIG_RISCV_SOC_HAS_CUSTOM_IRQ_HANDLING
101GTEXT(__soc_handle_all_irqs)
102#endif
103
104/* exports */
105GTEXT(_isr_wrapper)
106
107/* use ABI name of registers for the sake of simplicity */
108
109/*
110 * Generic architecture-level IRQ handling, along with callouts to
111 * SoC-specific routines.
112 *
113 * Architecture level IRQ handling includes basic context save/restore
114 * of standard registers and calling ISRs registered at Zephyr's driver
115 * level.
116 *
117 * Since RISC-V does not completely prescribe IRQ handling behavior,
118 * implementations vary (some implementations also deviate from
119 * what standard behavior is defined). Hence, the arch level code expects
120 * the following functions to be provided at the SOC level:
121 *
122 *     - __soc_is_irq (optional): decide if we're handling an interrupt or an
123         exception
124 *     - __soc_handle_irq: handle SoC-specific details for a pending IRQ
125 *       (e.g. clear a pending bit in a SoC-specific register)
126 *
127 * If CONFIG_RISCV_SOC_CONTEXT_SAVE=y, calls to SoC-level context save/restore
128 * routines are also made here. For details, see the Kconfig help text.
129 */
130
131/*
132 * Handler called upon each exception/interrupt/fault
133 */
134SECTION_FUNC(exception.entry, _isr_wrapper)
135
136/* Provide requested alignment, which depends e.g. on MTVEC format */
137.balign CONFIG_RISCV_TRAP_HANDLER_ALIGNMENT
138
139#ifdef CONFIG_USERSPACE
140	/* retrieve address of _current_cpu preserving s0 */
141	csrrw s0, mscratch, s0
142
143	/* preserve t0 and t1 temporarily */
144	sr t0, _curr_cpu_arch_user_exc_tmp0(s0)
145	sr t1, _curr_cpu_arch_user_exc_tmp1(s0)
146
147	/* determine if we come from user space */
148	csrr t0, mstatus
149	li t1, MSTATUS_MPP
150	and t0, t0, t1
151	bnez t0, 1f
152
153	/* in user space we were: switch to our privileged stack */
154	mv t0, sp
155	lr sp, _curr_cpu_arch_user_exc_sp(s0)
156
157	/* Save user stack value. Coming from user space, we know this
158	 * can't overflow the privileged stack. The esf will be allocated
159	 * later but it is safe to store our saved user sp here. */
160	sr t0, (-__struct_arch_esf_SIZEOF + __struct_arch_esf_sp_OFFSET)(sp)
161
162	/* Make sure tls pointer is sane */
163	lr t0, ___cpu_t_current_OFFSET(s0)
164	lr tp, _thread_offset_to_tls(t0)
165
166	/* Make sure global pointer is sane */
167#ifdef CONFIG_RISCV_GP
168	.option push
169	.option norelax
170	la gp, __global_pointer$
171	.option pop
172#elif defined(CONFIG_RISCV_CURRENT_VIA_GP)
173	lr gp, ___cpu_t_current_OFFSET(s0)
174#endif /* CONFIG_RISCV_GP / CONFIG_RISCV_CURRENT_VIA_GP */
175
176	/* Clear our per-thread usermode flag */
177	lui t0, %tprel_hi(is_user_mode)
178	add t0, t0, tp, %tprel_add(is_user_mode)
179	sb zero, %tprel_lo(is_user_mode)(t0)
1801:
181	/* retrieve original t0/t1 values */
182	lr t0, _curr_cpu_arch_user_exc_tmp0(s0)
183	lr t1, _curr_cpu_arch_user_exc_tmp1(s0)
184
185	/* retrieve original s0 and restore _current_cpu in mscratch */
186	csrrw s0, mscratch, s0
187#endif
188
189#ifdef CONFIG_RISCV_SOC_HAS_ISR_STACKING
190	SOC_ISR_SW_STACKING
191#else
192	/* Save caller-saved registers on current thread stack. */
193	addi sp, sp, -__struct_arch_esf_SIZEOF
194	DO_CALLER_SAVED(sr)		;
195#endif /* CONFIG_RISCV_SOC_HAS_ISR_STACKING */
196
197	/* Save s0 in the esf and load it with &_current_cpu. */
198	sr s0, __struct_arch_esf_s0_OFFSET(sp)
199	get_current_cpu s0
200
201	/* Save MEPC register */
202	csrr t0, mepc
203	sr t0, __struct_arch_esf_mepc_OFFSET(sp)
204
205	/* Save MSTATUS register */
206	csrr t2, mstatus
207	sr t2, __struct_arch_esf_mstatus_OFFSET(sp)
208
209#if defined(CONFIG_FPU_SHARING)
210	/* determine if FPU access was disabled */
211	li t1, MSTATUS_FS
212	and t1, t1, t2
213	bnez t1, no_fp
214	/* determine if this is an Illegal Instruction exception */
215	csrr t2, mcause
216	li t1, CONFIG_RISCV_MCAUSE_EXCEPTION_MASK
217	and t2, t2, t1
218	li t1, 2		/* 2 = illegal instruction */
219	bne t1, t2, no_fp
220	/* determine if we trapped on an FP instruction. */
221	csrr t2, mtval		/* get faulting instruction */
222#ifdef CONFIG_QEMU_TARGET
223	/*
224	 * Some implementations may not support MTVAL in this capacity.
225	 * Notably QEMU when a CSR instruction is involved.
226	 */
227	bnez t2, 1f
228	lw t2, 0(t0)		/* t0 = mepc */
2291:
230#endif
231	andi t0, t2, 0x7f	/* keep only the opcode bits */
232	/*
233	 * Major FP opcodes:
234	 * 0000111 = LOAD-FP
235	 * 0100111 = STORE-FP
236	 * 1000011 = MADD
237	 * 1000111 = MSUB
238	 * 1001011 = NMSUB
239	 * 1001111 = NMADD
240	 * 1010011 = OP-FP
241	 */
242	xori t1, t0, 0b1010011	/* OP-FP */
243	beqz t1, is_fp
244	ori  t1, t0, 0b0100000
245	xori t1, t1, 0b0100111	/* LOAD-FP / STORE-FP */
246	beqz t1, is_fp
247	ori  t1, t0, 0b0001100
248	xori t1, t1, 0b1001111	/* MADD / MSUB / NMSUB / NMADD */
249	beqz t1, is_fp
250	/*
251	 * The FRCSR, FSCSR, FRRM, FSRM, FSRMI, FRFLAGS, FSFLAGS and FSFLAGSI
252	 * are in fact CSR instructions targeting the fcsr, frm and fflags
253	 * registers. They should be caught as FPU instructions as well.
254	 *
255	 * CSR format: csr#[31-20] src[19-15] op[14-12] dst[11-7] SYSTEM[6-0]
256	 * SYSTEM = 0b1110011, op = 0b.xx where xx is never 0
257	 * The csr# of interest are: 1=fflags, 2=frm, 3=fcsr
258	 */
259	xori t1, t0, 0b1110011	/* SYSTEM opcode */
260	bnez t1, 2f		/* not a CSR insn */
261	srli t0, t2, 12
262	andi t0, t0, 0x3
263	beqz t0, 2f		/* not a CSR insn */
264	srli t0, t2, 20		/* isolate the csr register number */
265	beqz t0, 2f		/* 0=ustatus */
266	andi t0, t0, ~0x3	/* 1=fflags, 2=frm, 3=fcsr */
267#if !defined(CONFIG_RISCV_ISA_EXT_C)
268	bnez t0, no_fp
269#else
270	beqz t0, is_fp
2712:	/* remaining non RVC (0b11) and RVC with 0b01 are not FP instructions */
272	andi t1, t2, 1
273	bnez t1, no_fp
274	/*
275	 * 001...........00 = C.FLD    RV32/64  (RV128 = C.LQ)
276	 * 001...........10 = C.FLDSP  RV32/64  (RV128 = C.LQSP)
277	 * 011...........00 = C.FLW    RV32     (RV64/128 = C.LD)
278	 * 011...........10 = C.FLWSPP RV32     (RV64/128 = C.LDSP)
279	 * 101...........00 = C.FSD    RV32/64  (RV128 = C.SQ)
280	 * 101...........10 = C.FSDSP  RV32/64  (RV128 = C.SQSP)
281	 * 111...........00 = C.FSW    RV32     (RV64/128 = C.SD)
282	 * 111...........10 = C.FSWSP  RV32     (RV64/128 = C.SDSP)
283	 *
284	 * so must be .01............. on RV64 and ..1............. on RV32.
285	 */
286	srli t0, t2, 8
287#if defined(CONFIG_64BIT)
288	andi t1, t0, 0b01100000
289	xori t1, t1, 0b00100000
290	bnez t1, no_fp
291#else
292	andi t1, t0, 0b00100000
293	beqz t1, no_fp
294#endif
295#endif /* CONFIG_RISCV_ISA_EXT_C */
296
297is_fp:	/* Process the FP trap and quickly return from exception */
298	la ra, fp_trap_exit
299	mv a0, sp
300	tail z_riscv_fpu_trap
3012:
302no_fp:	/* increment arch_current_thread()->arch.exception_depth */
303	lr t0, ___cpu_t_current_OFFSET(s0)
304	lb t1, _thread_offset_to_exception_depth(t0)
305	add t1, t1, 1
306	sb t1, _thread_offset_to_exception_depth(t0)
307
308	/* configure the FPU for exception mode */
309	call z_riscv_fpu_enter_exc
310#endif /* CONFIG_FPU_SHARING */
311
312#ifdef CONFIG_RISCV_SOC_CONTEXT_SAVE
313	/* Handle context saving at SOC level. */
314	addi a0, sp, __struct_arch_esf_soc_context_OFFSET
315	jal ra, __soc_save_context
316#endif /* CONFIG_RISCV_SOC_CONTEXT_SAVE */
317
318	/*
319	 * Check if exception is the result of an interrupt or not.
320	 * (SOC dependent). Following the RISC-V architecture spec, the MSB
321	 * of the mcause register is used to indicate whether an exception
322	 * is the result of an interrupt or an exception/fault. But for some
323	 * SOCs (like pulpino or riscv-qemu), the MSB is never set to indicate
324	 * interrupt. Hence, check for interrupt/exception via the __soc_is_irq
325	 * function (that needs to be implemented by each SOC). The result is
326	 * returned via register a0 (1: interrupt, 0 exception)
327	 */
328#ifdef CONFIG_RISCV_SOC_EXCEPTION_FROM_IRQ
329	jal ra, __soc_is_irq
330	bnez a0, is_interrupt
331#else
332	csrr t0, mcause
333	srli t0, t0, RISCV_MCAUSE_IRQ_POS
334	bnez t0, is_interrupt
335#endif
336
337	/*
338	 * If the exception is the result of an ECALL, check whether to
339	 * perform a context-switch or an IRQ offload. Otherwise call _Fault
340	 * to report the exception.
341	 */
342	csrr t0, mcause
343	li t2, CONFIG_RISCV_MCAUSE_EXCEPTION_MASK
344	and t0, t0, t2
345
346	/*
347	 * If mcause == RISCV_EXC_ECALLM, handle system call from
348	 * kernel thread.
349	 */
350	li t1, RISCV_EXC_ECALLM
351	beq t0, t1, is_kernel_syscall
352
353#ifdef CONFIG_USERSPACE
354	/*
355	 * If mcause == RISCV_EXC_ECALLU, handle system call
356	 * for user mode thread.
357	 */
358	li t1, RISCV_EXC_ECALLU
359	beq t0, t1, is_user_syscall
360
361#if defined(CONFIG_PMP_STACK_GUARD) && defined(CONFIG_MULTITHREADING)
362	/*
363	 * Determine if we come from user space. If so, reconfigure the PMP for
364	 * kernel mode stack guard.
365	 */
366	csrr t0, mstatus
367	li t1, MSTATUS_MPP
368	and t0, t0, t1
369	bnez t0, 1f
370	lr a0, ___cpu_t_current_OFFSET(s0)
371	call z_riscv_pmp_stackguard_enable
3721:
373#endif /* CONFIG_PMP_STACK_GUARD */
374
375#endif /* CONFIG_USERSPACE */
376
377	/*
378	 * Call _Fault to handle exception.
379	 * Stack pointer is pointing to a struct_arch_esf structure, pass it
380	 * to _Fault (via register a0).
381	 * If _Fault shall return, set return address to
382	 * no_reschedule to restore stack.
383	 */
384	mv a0, sp
385	la ra, no_reschedule
386	tail _Fault
387
388is_kernel_syscall:
389	/*
390	 * A syscall is the result of an ecall instruction, in which case the
391	 * MEPC will contain the address of the ecall instruction.
392	 * Increment saved MEPC by 4 to prevent triggering the same ecall
393	 * again upon exiting the ISR.
394	 *
395	 * It's safe to always increment by 4, even with compressed
396	 * instructions, because the ecall instruction is always 4 bytes.
397	 */
398	lr t0, __struct_arch_esf_mepc_OFFSET(sp)
399	addi t0, t0, 4
400	sr t0, __struct_arch_esf_mepc_OFFSET(sp)
401
402#if defined(CONFIG_PMP_STACK_GUARD) && defined(CONFIG_MULTITHREADING)
403	/* Re-activate PMP for m-mode */
404	li t1, MSTATUS_MPP
405	csrc mstatus, t1
406	li t1, MSTATUS_MPRV
407	csrs mstatus, t1
408#endif
409
410	/* Determine what to do. Operation code is in t0. */
411	lr t0, __struct_arch_esf_t0_OFFSET(sp)
412
413	.if RV_ECALL_RUNTIME_EXCEPT != 0; .err; .endif
414	beqz t0, do_fault
415
416#if defined(CONFIG_IRQ_OFFLOAD)
417	li t1, RV_ECALL_IRQ_OFFLOAD
418	beq t0, t1, do_irq_offload
419#endif
420
421#ifdef CONFIG_RISCV_ALWAYS_SWITCH_THROUGH_ECALL
422	li t1, RV_ECALL_SCHEDULE
423	bne t0, t1, skip_schedule
424	lr a0, __struct_arch_esf_a0_OFFSET(sp)
425	lr a1, __struct_arch_esf_a1_OFFSET(sp)
426
427#ifdef CONFIG_FPU_SHARING
428	/*
429	 * When an ECALL is used for a context-switch, the current thread has
430	 * been updated to the next thread.
431	 * Add the exception_depth back to the previous thread.
432	 */
433	lb t1, _thread_offset_to_exception_depth(a0)
434	add t1, t1, -1
435	sb t1, _thread_offset_to_exception_depth(a0)
436
437	lb t1, _thread_offset_to_exception_depth(a1)
438	add t1, t1, 1
439	sb t1, _thread_offset_to_exception_depth(a1)
440#endif
441
442	j reschedule
443skip_schedule:
444#endif
445
446	/* default fault code is K_ERR_KERNEL_OOPS */
447	li a0, 3
448	j 1f
449
450do_fault:
451	/* Handle RV_ECALL_RUNTIME_EXCEPT. Retrieve reason in a0, esf in A1. */
452	lr a0, __struct_arch_esf_a0_OFFSET(sp)
4531:	mv a1, sp
454
455#ifdef CONFIG_EXCEPTION_DEBUG
456	/*
457	 * Restore the s0 we saved early in ISR entry
458	 * so it shows up properly in the CSF.
459	 */
460	lr s0, __struct_arch_esf_s0_OFFSET(sp)
461
462	/* Allocate space for caller-saved registers on current thread stack */
463	addi sp, sp, -__callee_saved_t_SIZEOF
464
465	/* Save callee-saved registers to be passed as 3rd arg */
466	STORE_CALLEE_SAVED()		;
467	mv a2, sp
468
469#ifdef CONFIG_EXTRA_EXCEPTION_INFO
470	/* Store csf's addr into esf (a1 still holds the pointer to the esf at this point) */
471	sr a2 __struct_arch_esf_csf_OFFSET(a1)
472#endif /* CONFIG_EXTRA_EXCEPTION_INFO */
473
474	tail z_riscv_fatal_error_csf
475#else
476	tail z_riscv_fatal_error
477#endif /* CONFIG_EXCEPTION_DEBUG */
478
479#if defined(CONFIG_IRQ_OFFLOAD)
480do_irq_offload:
481	/*
482	 * Retrieve provided routine and argument from the stack.
483	 * Routine pointer is in saved a0, argument in saved a1
484	 * so we load them with a1/a0 (reversed).
485	 */
486	lr a1, __struct_arch_esf_a0_OFFSET(sp)
487	lr a0, __struct_arch_esf_a1_OFFSET(sp)
488
489	/* Increment _current_cpu->nested */
490	lw t1, ___cpu_t_nested_OFFSET(s0)
491	addi t2, t1, 1
492	sw t2, ___cpu_t_nested_OFFSET(s0)
493	bnez t1, 1f
494
495	/* Switch to interrupt stack */
496	mv t0, sp
497	lr sp, ___cpu_t_irq_stack_OFFSET(s0)
498
499	/* Save thread stack pointer on interrupt stack */
500	addi sp, sp, -16
501	sr t0, 0(sp)
5021:
503	/* Execute provided routine (argument is in a0 already). */
504	jalr ra, a1, 0
505
506	/* Leave through the regular IRQ exit path */
507	j irq_done
508#endif /* CONFIG_IRQ_OFFLOAD */
509
510#ifdef CONFIG_USERSPACE
511is_user_syscall:
512
513#if defined(CONFIG_PMP_STACK_GUARD) && defined(CONFIG_MULTITHREADING)
514	/*
515	 * We came from userspace and need to reconfigure the
516	 * PMP for kernel mode stack guard.
517	 */
518	lr a0, ___cpu_t_current_OFFSET(s0)
519	call z_riscv_pmp_stackguard_enable
520#endif
521
522	/* It is safe to re-enable IRQs now */
523	csrs mstatus, MSTATUS_IEN
524
525	/*
526	 * Same as for is_kernel_syscall: increment saved MEPC by 4 to
527	 * prevent triggering the same ecall again upon exiting the ISR.
528	 */
529	lr t1, __struct_arch_esf_mepc_OFFSET(sp)
530	addi t1, t1, 4
531	sr t1, __struct_arch_esf_mepc_OFFSET(sp)
532
533	/* Restore argument registers from user stack */
534	lr a0, __struct_arch_esf_a0_OFFSET(sp)
535	lr a1, __struct_arch_esf_a1_OFFSET(sp)
536	lr a2, __struct_arch_esf_a2_OFFSET(sp)
537	lr a3, __struct_arch_esf_a3_OFFSET(sp)
538	lr a4, __struct_arch_esf_a4_OFFSET(sp)
539	lr a5, __struct_arch_esf_a5_OFFSET(sp)
540	lr t0, __struct_arch_esf_t0_OFFSET(sp)
541#if defined(CONFIG_RISCV_ISA_RV32E)
542	/* Stack alignment for RV32E is 4 bytes */
543	addi sp, sp, -4
544	mv t1, sp
545	sw t1, 0(sp)
546#else
547	mv a6, sp
548#endif /* CONFIG_RISCV_ISA_RV32E */
549
550	/* validate syscall limit */
551	li t1, K_SYSCALL_LIMIT
552	bltu t0, t1, valid_syscall_id
553
554	/* bad syscall id.  Set arg1 to bad id and set call_id to SYSCALL_BAD */
555	mv a0, t0
556	li t0, K_SYSCALL_BAD
557
558valid_syscall_id:
559
560	la t2, _k_syscall_table
561
562	slli t1, t0, RV_REGSHIFT	# Determine offset from indice value
563	add t2, t2, t1			# Table addr + offset = function addr
564	lr t2, 0(t2)			# Load function address
565
566	/* Execute syscall function */
567	jalr ra, t2, 0
568
569#if defined(CONFIG_RISCV_ISA_RV32E)
570	addi sp, sp, 4
571#endif /* CONFIG_RISCV_ISA_RV32E */
572
573	/* Update a0 (return value) on the stack */
574	sr a0, __struct_arch_esf_a0_OFFSET(sp)
575
576	/* Disable IRQs again before leaving */
577	csrc mstatus, MSTATUS_IEN
578	j might_have_rescheduled
579#endif /* CONFIG_USERSPACE */
580
581is_interrupt:
582
583#if defined(CONFIG_PMP_STACK_GUARD) && defined(CONFIG_MULTITHREADING)
584#ifdef CONFIG_USERSPACE
585	/*
586	 * If we came from userspace then we need to reconfigure the
587	 * PMP for kernel mode stack guard.
588	 */
589	lr t0, __struct_arch_esf_mstatus_OFFSET(sp)
590	li t1, MSTATUS_MPP
591	and t0, t0, t1
592	bnez t0, 1f
593	lr a0, ___cpu_t_current_OFFSET(s0)
594	call z_riscv_pmp_stackguard_enable
595	j 2f
596#endif /* CONFIG_USERSPACE */
5971:	/* Re-activate PMP for m-mode */
598	li t1, MSTATUS_MPP
599	csrc mstatus, t1
600	li t1, MSTATUS_MPRV
601	csrs mstatus, t1
6022:
603#endif
604
605	/* Increment _current_cpu->nested */
606	lw t1, ___cpu_t_nested_OFFSET(s0)
607	addi t2, t1, 1
608	sw t2, ___cpu_t_nested_OFFSET(s0)
609	bnez t1, on_irq_stack
610
611	/* Switch to interrupt stack */
612	mv t0, sp
613	lr sp, ___cpu_t_irq_stack_OFFSET(s0)
614
615	/*
616	 * Save thread stack pointer on interrupt stack
617	 * In RISC-V, stack pointer needs to be 16-byte aligned
618	 */
619	addi sp, sp, -16
620	sr t0, 0(sp)
621
622on_irq_stack:
623
624#ifdef CONFIG_RISCV_SOC_HAS_CUSTOM_IRQ_HANDLING
625	call __soc_handle_all_irqs
626#else
627
628#ifdef CONFIG_TRACING_ISR
629	call sys_trace_isr_enter
630#endif
631
632	/* Get IRQ causing interrupt */
633	csrr a0, mcause
634	li t0, CONFIG_RISCV_MCAUSE_EXCEPTION_MASK
635	and a0, a0, t0
636
637	/*
638	 * Clear pending IRQ generating the interrupt at SOC level
639	 * Pass IRQ number to __soc_handle_irq via register a0
640	 */
641	jal ra, __soc_handle_irq
642
643	/*
644	 * Call corresponding registered function in _sw_isr_table.
645	 * (table is 2-word wide, we should shift index accordingly)
646	 */
647	la t0, _sw_isr_table
648	slli a0, a0, (RV_REGSHIFT + 1)
649	add t0, t0, a0
650
651	/* Load argument in a0 register */
652	lr a0, 0(t0)
653
654	/* Load ISR function address in register t1 */
655	lr t1, RV_REGSIZE(t0)
656
657	/* Call ISR function */
658	jalr ra, t1, 0
659
660#ifdef CONFIG_TRACING_ISR
661	call sys_trace_isr_exit
662#endif
663
664#endif
665
666irq_done:
667	/* Decrement _current_cpu->nested */
668	lw t2, ___cpu_t_nested_OFFSET(s0)
669	addi t2, t2, -1
670	sw t2, ___cpu_t_nested_OFFSET(s0)
671	bnez t2, no_reschedule
672
673	/* nested count is back to 0: Return to thread stack */
674	lr sp, 0(sp)
675
676#ifdef CONFIG_STACK_SENTINEL
677	call z_check_stack_sentinel
678#endif
679
680check_reschedule:
681
682#ifdef CONFIG_MULTITHREADING
683
684	/* Get pointer to current thread on this CPU */
685	lr a1, ___cpu_t_current_OFFSET(s0)
686
687	/*
688	 * Get next thread to schedule with z_get_next_switch_handle().
689	 * We pass it a NULL as we didn't save the whole thread context yet.
690	 * If no scheduling is necessary then NULL will be returned.
691	 */
692	addi sp, sp, -16
693	sr a1, 0(sp)
694	mv a0, zero
695	call z_get_next_switch_handle
696	lr a1, 0(sp)
697	addi sp, sp, 16
698	beqz a0, no_reschedule
699
700reschedule:
701
702	/*
703	 * Perform context switch:
704	 * a0 = new thread
705	 * a1 = old thread
706	 */
707	call z_riscv_switch
708
709z_riscv_thread_start:
710might_have_rescheduled:
711	/* reload s0 with &_current_cpu as it might have changed or be unset */
712	get_current_cpu s0
713
714#endif /* CONFIG_MULTITHREADING */
715
716no_reschedule:
717
718#ifdef CONFIG_RISCV_SOC_CONTEXT_SAVE
719	/* Restore context at SOC level */
720	addi a0, sp, __struct_arch_esf_soc_context_OFFSET
721	jal ra, __soc_restore_context
722#endif /* CONFIG_RISCV_SOC_CONTEXT_SAVE */
723
724#if defined(CONFIG_FPU_SHARING)
725	/* FPU handling upon exception mode exit */
726	mv a0, sp
727	call z_riscv_fpu_exit_exc
728
729	/* decrement arch_current_thread()->arch.exception_depth */
730	lr t0, ___cpu_t_current_OFFSET(s0)
731	lb t1, _thread_offset_to_exception_depth(t0)
732	add t1, t1, -1
733	sb t1, _thread_offset_to_exception_depth(t0)
734fp_trap_exit:
735#endif
736
737	/* Restore MEPC and MSTATUS registers */
738	lr t0, __struct_arch_esf_mepc_OFFSET(sp)
739	lr t2, __struct_arch_esf_mstatus_OFFSET(sp)
740	csrw mepc, t0
741	csrw mstatus, t2
742
743#ifdef CONFIG_USERSPACE
744	/*
745	 * Check if we are returning to user mode. If so then we must
746	 * set is_user_mode to true and preserve our kernel mode stack for
747	 * the next exception to come.
748	 */
749	li t1, MSTATUS_MPP
750	and t0, t2, t1
751	bnez t0, 1f
752
753#if defined(CONFIG_PMP_STACK_GUARD) && defined(CONFIG_MULTITHREADING)
754	/* Remove kernel stack guard and Reconfigure PMP for user mode */
755	lr a0, ___cpu_t_current_OFFSET(s0)
756	call z_riscv_pmp_usermode_enable
757#endif
758
759	/* Set our per-thread usermode flag */
760	li t1, 1
761	lui t0, %tprel_hi(is_user_mode)
762	add t0, t0, tp, %tprel_add(is_user_mode)
763	sb t1, %tprel_lo(is_user_mode)(t0)
764
765	/* preserve stack pointer for next exception entry */
766	add t0, sp, __struct_arch_esf_SIZEOF
767	sr t0, _curr_cpu_arch_user_exc_sp(s0)
768
769	j 2f
7701:
771	/*
772	 * We are returning to kernel mode. Store the stack pointer to
773	 * be re-loaded further down.
774	 */
775	addi t0, sp, __struct_arch_esf_SIZEOF
776	sr t0, __struct_arch_esf_sp_OFFSET(sp)
7772:
778#endif
779
780	/* Restore s0 (it is no longer ours) */
781	lr s0, __struct_arch_esf_s0_OFFSET(sp)
782
783#ifdef CONFIG_RISCV_SOC_HAS_ISR_STACKING
784	SOC_ISR_SW_UNSTACKING
785#else
786	/* Restore caller-saved registers from thread stack */
787	DO_CALLER_SAVED(lr)
788
789#ifdef CONFIG_USERSPACE
790	/* retrieve saved stack pointer */
791	lr sp, __struct_arch_esf_sp_OFFSET(sp)
792#else
793	/* remove esf from the stack */
794	addi sp, sp, __struct_arch_esf_SIZEOF
795#endif
796
797#endif /* CONFIG_RISCV_SOC_HAS_ISR_STACKING */
798
799	mret
800