1/*
2 * Copyright (c) 2016 Jean-Paul Etienne <fractalclone@gmail.com>
3 * Copyright (c) 2018 Foundries.io Ltd
4 * Copyright (c) 2020 BayLibre, SAS
5 *
6 * SPDX-License-Identifier: Apache-2.0
7 */
8
9#include <zephyr/toolchain.h>
10#include <zephyr/linker/sections.h>
11#include <offsets_short.h>
12#include <zephyr/arch/cpu.h>
13#include <zephyr/sys/util.h>
14#include <zephyr/kernel.h>
15#include <zephyr/syscall.h>
16#include <zephyr/arch/riscv/csr.h>
17#include <zephyr/arch/riscv/irq.h>
18#include <zephyr/arch/riscv/syscall.h>
19#include "asm_macros.inc"
20
21#ifdef CONFIG_RISCV_SOC_HAS_ISR_STACKING
22#include <soc_isr_stacking.h>
23#endif
24
25/* Convenience macro for loading/storing register states. */
26#define DO_CALLER_SAVED(op) \
27	RV_E(	op t0, __struct_arch_esf_t0_OFFSET(sp)	);\
28	RV_E(	op t1, __struct_arch_esf_t1_OFFSET(sp)	);\
29	RV_E(	op t2, __struct_arch_esf_t2_OFFSET(sp)	);\
30	RV_I(	op t3, __struct_arch_esf_t3_OFFSET(sp)	);\
31	RV_I(	op t4, __struct_arch_esf_t4_OFFSET(sp)	);\
32	RV_I(	op t5, __struct_arch_esf_t5_OFFSET(sp)	);\
33	RV_I(	op t6, __struct_arch_esf_t6_OFFSET(sp)	);\
34	RV_E(	op a0, __struct_arch_esf_a0_OFFSET(sp)	);\
35	RV_E(	op a1, __struct_arch_esf_a1_OFFSET(sp)	);\
36	RV_E(	op a2, __struct_arch_esf_a2_OFFSET(sp)	);\
37	RV_E(	op a3, __struct_arch_esf_a3_OFFSET(sp)	);\
38	RV_E(	op a4, __struct_arch_esf_a4_OFFSET(sp)	);\
39	RV_E(	op a5, __struct_arch_esf_a5_OFFSET(sp)	);\
40	RV_I(	op a6, __struct_arch_esf_a6_OFFSET(sp)	);\
41	RV_I(	op a7, __struct_arch_esf_a7_OFFSET(sp)	);\
42	RV_E(	op ra, __struct_arch_esf_ra_OFFSET(sp)	)
43
44#ifdef CONFIG_EXCEPTION_DEBUG
45/* Convenience macro for storing callee saved register [s0 - s11] states. */
46#define STORE_CALLEE_SAVED() \
47	RV_E(	sr s0, ___callee_saved_t_s0_OFFSET(sp)		);\
48	RV_E(	sr s1, ___callee_saved_t_s1_OFFSET(sp)		);\
49	RV_I(	sr s2, ___callee_saved_t_s2_OFFSET(sp)		);\
50	RV_I(	sr s3, ___callee_saved_t_s3_OFFSET(sp)		);\
51	RV_I(	sr s4, ___callee_saved_t_s4_OFFSET(sp)		);\
52	RV_I(	sr s5, ___callee_saved_t_s5_OFFSET(sp)		);\
53	RV_I(	sr s6, ___callee_saved_t_s6_OFFSET(sp)		);\
54	RV_I(	sr s7, ___callee_saved_t_s7_OFFSET(sp)		);\
55	RV_I(	sr s8, ___callee_saved_t_s8_OFFSET(sp)		);\
56	RV_I(	sr s9, ___callee_saved_t_s9_OFFSET(sp)		);\
57	RV_I(	sr s10, ___callee_saved_t_s10_OFFSET(sp)	);\
58	RV_I(	sr s11, ___callee_saved_t_s11_OFFSET(sp)	)
59#endif /* CONFIG_EXCEPTION_DEBUG */
60
61	.macro get_current_cpu dst
62#if defined(CONFIG_SMP) || defined(CONFIG_USERSPACE)
63	csrr \dst, mscratch
64#else
65	la \dst, _kernel + ___kernel_t_cpus_OFFSET
66#endif
67	.endm
68
69/* imports */
70GDATA(_sw_isr_table)
71#ifdef CONFIG_RISCV_SOC_EXCEPTION_FROM_IRQ
72GTEXT(__soc_is_irq)
73#endif
74GTEXT(__soc_handle_irq)
75GTEXT(_Fault)
76#ifdef CONFIG_RISCV_SOC_CONTEXT_SAVE
77GTEXT(__soc_save_context)
78GTEXT(__soc_restore_context)
79#endif /* CONFIG_RISCV_SOC_CONTEXT_SAVE */
80
81#ifdef CONFIG_EXCEPTION_DEBUG
82GTEXT(z_riscv_fatal_error_csf)
83#else
84GTEXT(z_riscv_fatal_error)
85#endif /* CONFIG_EXCEPTION_DEBUG */
86
87GTEXT(z_get_next_switch_handle)
88GTEXT(z_riscv_switch)
89GTEXT(z_riscv_thread_start)
90
91#ifdef CONFIG_TRACING
92GTEXT(sys_trace_isr_enter)
93GTEXT(sys_trace_isr_exit)
94#endif
95
96#ifdef CONFIG_USERSPACE
97GDATA(_k_syscall_table)
98#endif
99
100#ifdef CONFIG_RISCV_SOC_HAS_CUSTOM_IRQ_HANDLING
101GTEXT(__soc_handle_all_irqs)
102#endif
103
104/* exports */
105GTEXT(_isr_wrapper)
106
107/* use ABI name of registers for the sake of simplicity */
108
109/*
110 * Generic architecture-level IRQ handling, along with callouts to
111 * SoC-specific routines.
112 *
113 * Architecture level IRQ handling includes basic context save/restore
114 * of standard registers and calling ISRs registered at Zephyr's driver
115 * level.
116 *
117 * Since RISC-V does not completely prescribe IRQ handling behavior,
118 * implementations vary (some implementations also deviate from
119 * what standard behavior is defined). Hence, the arch level code expects
120 * the following functions to be provided at the SOC level:
121 *
122 *     - __soc_is_irq (optional): decide if we're handling an interrupt or an
123         exception
124 *     - __soc_handle_irq: handle SoC-specific details for a pending IRQ
125 *       (e.g. clear a pending bit in a SoC-specific register)
126 *
127 * If CONFIG_RISCV_SOC_CONTEXT_SAVE=y, calls to SoC-level context save/restore
128 * routines are also made here. For details, see the Kconfig help text.
129 */
130
131/*
132 * Handler called upon each exception/interrupt/fault
133 */
134SECTION_FUNC(exception.entry, _isr_wrapper)
135
136/* Provide requested alignment, which depends e.g. on MTVEC format */
137.balign CONFIG_RISCV_TRAP_HANDLER_ALIGNMENT
138
139#ifdef CONFIG_USERSPACE
140	/* retrieve address of _current_cpu preserving s0 */
141	csrrw s0, mscratch, s0
142
143	/* preserve t0 and t1 temporarily */
144	sr t0, _curr_cpu_arch_user_exc_tmp0(s0)
145	sr t1, _curr_cpu_arch_user_exc_tmp1(s0)
146
147	/* determine if we come from user space */
148	csrr t0, mstatus
149	li t1, MSTATUS_MPP
150	and t0, t0, t1
151	bnez t0, 1f
152
153	/* in user space we were: switch to our privileged stack */
154	mv t0, sp
155	lr sp, _curr_cpu_arch_user_exc_sp(s0)
156
157	/* Save user stack value. Coming from user space, we know this
158	 * can't overflow the privileged stack. The esf will be allocated
159	 * later but it is safe to store our saved user sp here. */
160	sr t0, (-__struct_arch_esf_SIZEOF + __struct_arch_esf_sp_OFFSET)(sp)
161
162	/* Make sure tls pointer is sane */
163	lr t0, ___cpu_t_current_OFFSET(s0)
164	lr tp, _thread_offset_to_tls(t0)
165
166	/* Clear our per-thread usermode flag */
167	lui t0, %tprel_hi(is_user_mode)
168	add t0, t0, tp, %tprel_add(is_user_mode)
169	sb zero, %tprel_lo(is_user_mode)(t0)
1701:
171	/* retrieve original t0/t1 values */
172	lr t0, _curr_cpu_arch_user_exc_tmp0(s0)
173	lr t1, _curr_cpu_arch_user_exc_tmp1(s0)
174
175	/* retrieve original s0 and restore _current_cpu in mscratch */
176	csrrw s0, mscratch, s0
177#endif
178
179#ifdef CONFIG_RISCV_SOC_HAS_ISR_STACKING
180	SOC_ISR_SW_STACKING
181#else
182	/* Save caller-saved registers on current thread stack. */
183	addi sp, sp, -__struct_arch_esf_SIZEOF
184	DO_CALLER_SAVED(sr)		;
185#endif /* CONFIG_RISCV_SOC_HAS_ISR_STACKING */
186
187	/* Save s0 in the esf and load it with &_current_cpu. */
188	sr s0, __struct_arch_esf_s0_OFFSET(sp)
189	get_current_cpu s0
190
191	/* Save MEPC register */
192	csrr t0, mepc
193	sr t0, __struct_arch_esf_mepc_OFFSET(sp)
194
195	/* Save MSTATUS register */
196	csrr t2, mstatus
197	sr t2, __struct_arch_esf_mstatus_OFFSET(sp)
198
199#if defined(CONFIG_FPU_SHARING)
200	/* determine if FPU access was disabled */
201	li t1, MSTATUS_FS
202	and t1, t1, t2
203	bnez t1, no_fp
204	/* determine if this is an Illegal Instruction exception */
205	csrr t2, mcause
206	li t1, CONFIG_RISCV_MCAUSE_EXCEPTION_MASK
207	and t2, t2, t1
208	li t1, 2		/* 2 = illegal instruction */
209	bne t1, t2, no_fp
210	/* determine if we trapped on an FP instruction. */
211	csrr t2, mtval		/* get faulting instruction */
212#ifdef CONFIG_QEMU_TARGET
213	/*
214	 * Some implementations may not support MTVAL in this capacity.
215	 * Notably QEMU when a CSR instruction is involved.
216	 */
217	bnez t2, 1f
218	lw t2, 0(t0)		/* t0 = mepc */
2191:
220#endif
221	andi t0, t2, 0x7f	/* keep only the opcode bits */
222	/*
223	 * Major FP opcodes:
224	 * 0000111 = LOAD-FP
225	 * 0100111 = STORE-FP
226	 * 1000011 = MADD
227	 * 1000111 = MSUB
228	 * 1001011 = NMSUB
229	 * 1001111 = NMADD
230	 * 1010011 = OP-FP
231	 */
232	xori t1, t0, 0b1010011	/* OP-FP */
233	beqz t1, is_fp
234	ori  t1, t0, 0b0100000
235	xori t1, t1, 0b0100111	/* LOAD-FP / STORE-FP */
236	beqz t1, is_fp
237	ori  t1, t0, 0b0001100
238	xori t1, t1, 0b1001111	/* MADD / MSUB / NMSUB / NMADD */
239	beqz t1, is_fp
240	/*
241	 * The FRCSR, FSCSR, FRRM, FSRM, FSRMI, FRFLAGS, FSFLAGS and FSFLAGSI
242	 * are in fact CSR instructions targeting the fcsr, frm and fflags
243	 * registers. They should be caught as FPU instructions as well.
244	 *
245	 * CSR format: csr#[31-20] src[19-15] op[14-12] dst[11-7] SYSTEM[6-0]
246	 * SYSTEM = 0b1110011, op = 0b.xx where xx is never 0
247	 * The csr# of interest are: 1=fflags, 2=frm, 3=fcsr
248	 */
249	xori t1, t0, 0b1110011	/* SYSTEM opcode */
250	bnez t1, 2f		/* not a CSR insn */
251	srli t0, t2, 12
252	andi t0, t0, 0x3
253	beqz t0, 2f		/* not a CSR insn */
254	srli t0, t2, 20		/* isolate the csr register number */
255	beqz t0, 2f		/* 0=ustatus */
256	andi t0, t0, ~0x3	/* 1=fflags, 2=frm, 3=fcsr */
257#if !defined(CONFIG_RISCV_ISA_EXT_C)
258	bnez t0, no_fp
259#else
260	beqz t0, is_fp
2612:	/* remaining non RVC (0b11) and RVC with 0b01 are not FP instructions */
262	andi t1, t2, 1
263	bnez t1, no_fp
264	/*
265	 * 001...........00 = C.FLD    RV32/64  (RV128 = C.LQ)
266	 * 001...........10 = C.FLDSP  RV32/64  (RV128 = C.LQSP)
267	 * 011...........00 = C.FLW    RV32     (RV64/128 = C.LD)
268	 * 011...........10 = C.FLWSPP RV32     (RV64/128 = C.LDSP)
269	 * 101...........00 = C.FSD    RV32/64  (RV128 = C.SQ)
270	 * 101...........10 = C.FSDSP  RV32/64  (RV128 = C.SQSP)
271	 * 111...........00 = C.FSW    RV32     (RV64/128 = C.SD)
272	 * 111...........10 = C.FSWSP  RV32     (RV64/128 = C.SDSP)
273	 *
274	 * so must be .01............. on RV64 and ..1............. on RV32.
275	 */
276	srli t0, t2, 8
277#if defined(CONFIG_64BIT)
278	andi t1, t0, 0b01100000
279	xori t1, t1, 0b00100000
280	bnez t1, no_fp
281#else
282	andi t1, t0, 0b00100000
283	beqz t1, no_fp
284#endif
285#endif /* CONFIG_RISCV_ISA_EXT_C */
286
287is_fp:	/* Process the FP trap and quickly return from exception */
288	la ra, fp_trap_exit
289	mv a0, sp
290	tail z_riscv_fpu_trap
2912:
292no_fp:	/* increment _current->arch.exception_depth */
293	lr t0, ___cpu_t_current_OFFSET(s0)
294	lb t1, _thread_offset_to_exception_depth(t0)
295	add t1, t1, 1
296	sb t1, _thread_offset_to_exception_depth(t0)
297
298	/* configure the FPU for exception mode */
299	call z_riscv_fpu_enter_exc
300#endif /* CONFIG_FPU_SHARING */
301
302#ifdef CONFIG_RISCV_SOC_CONTEXT_SAVE
303	/* Handle context saving at SOC level. */
304	addi a0, sp, __struct_arch_esf_soc_context_OFFSET
305	jal ra, __soc_save_context
306#endif /* CONFIG_RISCV_SOC_CONTEXT_SAVE */
307
308	/*
309	 * Check if exception is the result of an interrupt or not.
310	 * (SOC dependent). Following the RISC-V architecture spec, the MSB
311	 * of the mcause register is used to indicate whether an exception
312	 * is the result of an interrupt or an exception/fault. But for some
313	 * SOCs (like pulpino or riscv-qemu), the MSB is never set to indicate
314	 * interrupt. Hence, check for interrupt/exception via the __soc_is_irq
315	 * function (that needs to be implemented by each SOC). The result is
316	 * returned via register a0 (1: interrupt, 0 exception)
317	 */
318#ifdef CONFIG_RISCV_SOC_EXCEPTION_FROM_IRQ
319	jal ra, __soc_is_irq
320	bnez a0, is_interrupt
321#else
322	csrr t0, mcause
323	srli t0, t0, RISCV_MCAUSE_IRQ_POS
324	bnez t0, is_interrupt
325#endif
326
327	/*
328	 * If the exception is the result of an ECALL, check whether to
329	 * perform a context-switch or an IRQ offload. Otherwise call _Fault
330	 * to report the exception.
331	 */
332	csrr t0, mcause
333	li t2, CONFIG_RISCV_MCAUSE_EXCEPTION_MASK
334	and t0, t0, t2
335
336	/*
337	 * If mcause == RISCV_EXC_ECALLM, handle system call from
338	 * kernel thread.
339	 */
340	li t1, RISCV_EXC_ECALLM
341	beq t0, t1, is_kernel_syscall
342
343#ifdef CONFIG_USERSPACE
344	/*
345	 * If mcause == RISCV_EXC_ECALLU, handle system call
346	 * for user mode thread.
347	 */
348	li t1, RISCV_EXC_ECALLU
349	beq t0, t1, is_user_syscall
350
351#ifdef CONFIG_PMP_STACK_GUARD
352	/*
353	 * Determine if we come from user space. If so, reconfigure the PMP for
354	 * kernel mode stack guard.
355	 */
356	csrr t0, mstatus
357	li t1, MSTATUS_MPP
358	and t0, t0, t1
359	bnez t0, 1f
360	lr a0, ___cpu_t_current_OFFSET(s0)
361	call z_riscv_pmp_stackguard_enable
3621:
363#endif /* CONFIG_PMP_STACK_GUARD */
364
365#endif /* CONFIG_USERSPACE */
366
367	/*
368	 * Call _Fault to handle exception.
369	 * Stack pointer is pointing to a struct_arch_esf structure, pass it
370	 * to _Fault (via register a0).
371	 * If _Fault shall return, set return address to
372	 * no_reschedule to restore stack.
373	 */
374	mv a0, sp
375	la ra, no_reschedule
376	tail _Fault
377
378is_kernel_syscall:
379	/*
380	 * A syscall is the result of an ecall instruction, in which case the
381	 * MEPC will contain the address of the ecall instruction.
382	 * Increment saved MEPC by 4 to prevent triggering the same ecall
383	 * again upon exiting the ISR.
384	 *
385	 * It's safe to always increment by 4, even with compressed
386	 * instructions, because the ecall instruction is always 4 bytes.
387	 */
388	lr t0, __struct_arch_esf_mepc_OFFSET(sp)
389	addi t0, t0, 4
390	sr t0, __struct_arch_esf_mepc_OFFSET(sp)
391
392#ifdef CONFIG_PMP_STACK_GUARD
393	/* Re-activate PMP for m-mode */
394	li t1, MSTATUS_MPP
395	csrc mstatus, t1
396	li t1, MSTATUS_MPRV
397	csrs mstatus, t1
398#endif
399
400	/* Determine what to do. Operation code is in t0. */
401	lr t0, __struct_arch_esf_t0_OFFSET(sp)
402
403	.if RV_ECALL_RUNTIME_EXCEPT != 0; .err; .endif
404	beqz t0, do_fault
405
406#if defined(CONFIG_IRQ_OFFLOAD)
407	li t1, RV_ECALL_IRQ_OFFLOAD
408	beq t0, t1, do_irq_offload
409#endif
410
411#ifdef CONFIG_RISCV_ALWAYS_SWITCH_THROUGH_ECALL
412	li t1, RV_ECALL_SCHEDULE
413	bne t0, t1, skip_schedule
414	lr a0, __struct_arch_esf_a0_OFFSET(sp)
415	lr a1, __struct_arch_esf_a1_OFFSET(sp)
416
417#ifdef CONFIG_FPU_SHARING
418	/*
419	 * When an ECALL is used for a context-switch, the current thread has
420	 * been updated to the next thread.
421	 * Add the exception_depth back to the previous thread.
422	 */
423	lb t1, _thread_offset_to_exception_depth(a0)
424	add t1, t1, -1
425	sb t1, _thread_offset_to_exception_depth(a0)
426
427	lb t1, _thread_offset_to_exception_depth(a1)
428	add t1, t1, 1
429	sb t1, _thread_offset_to_exception_depth(a1)
430#endif
431
432	j reschedule
433skip_schedule:
434#endif
435
436	/* default fault code is K_ERR_KERNEL_OOPS */
437	li a0, 3
438	j 1f
439
440do_fault:
441	/* Handle RV_ECALL_RUNTIME_EXCEPT. Retrieve reason in a0, esf in A1. */
442	lr a0, __struct_arch_esf_a0_OFFSET(sp)
4431:	mv a1, sp
444
445#ifdef CONFIG_EXCEPTION_DEBUG
446	/* Allocate space for caller-saved registers on current thread stack */
447	addi sp, sp, -__callee_saved_t_SIZEOF
448
449	/* Save callee-saved registers to be passed as 3rd arg */
450	STORE_CALLEE_SAVED()		;
451	mv a2, sp
452
453	tail z_riscv_fatal_error_csf
454#else
455	tail z_riscv_fatal_error
456#endif
457
458#if defined(CONFIG_IRQ_OFFLOAD)
459do_irq_offload:
460	/*
461	 * Retrieve provided routine and argument from the stack.
462	 * Routine pointer is in saved a0, argument in saved a1
463	 * so we load them with a1/a0 (reversed).
464	 */
465	lr a1, __struct_arch_esf_a0_OFFSET(sp)
466	lr a0, __struct_arch_esf_a1_OFFSET(sp)
467
468	/* Increment _current_cpu->nested */
469	lw t1, ___cpu_t_nested_OFFSET(s0)
470	addi t2, t1, 1
471	sw t2, ___cpu_t_nested_OFFSET(s0)
472	bnez t1, 1f
473
474	/* Switch to interrupt stack */
475	mv t0, sp
476	lr sp, ___cpu_t_irq_stack_OFFSET(s0)
477
478	/* Save thread stack pointer on interrupt stack */
479	addi sp, sp, -16
480	sr t0, 0(sp)
4811:
482	/* Execute provided routine (argument is in a0 already). */
483	jalr ra, a1, 0
484
485	/* Leave through the regular IRQ exit path */
486	j irq_done
487#endif /* CONFIG_IRQ_OFFLOAD */
488
489#ifdef CONFIG_USERSPACE
490is_user_syscall:
491
492#ifdef CONFIG_PMP_STACK_GUARD
493	/*
494	 * We came from userspace and need to reconfigure the
495	 * PMP for kernel mode stack guard.
496	 */
497	lr a0, ___cpu_t_current_OFFSET(s0)
498	call z_riscv_pmp_stackguard_enable
499#endif
500
501	/* It is safe to re-enable IRQs now */
502	csrs mstatus, MSTATUS_IEN
503
504	/*
505	 * Same as for is_kernel_syscall: increment saved MEPC by 4 to
506	 * prevent triggering the same ecall again upon exiting the ISR.
507	 */
508	lr t1, __struct_arch_esf_mepc_OFFSET(sp)
509	addi t1, t1, 4
510	sr t1, __struct_arch_esf_mepc_OFFSET(sp)
511
512	/* Restore argument registers from user stack */
513	lr a0, __struct_arch_esf_a0_OFFSET(sp)
514	lr a1, __struct_arch_esf_a1_OFFSET(sp)
515	lr a2, __struct_arch_esf_a2_OFFSET(sp)
516	lr a3, __struct_arch_esf_a3_OFFSET(sp)
517	lr a4, __struct_arch_esf_a4_OFFSET(sp)
518	lr a5, __struct_arch_esf_a5_OFFSET(sp)
519	lr t0, __struct_arch_esf_t0_OFFSET(sp)
520#if defined(CONFIG_RISCV_ISA_RV32E)
521	/* Stack alignment for RV32E is 4 bytes */
522	addi sp, sp, -4
523	mv t1, sp
524	sw t1, 0(sp)
525#else
526	mv a6, sp
527#endif /* CONFIG_RISCV_ISA_RV32E */
528
529	/* validate syscall limit */
530	li t1, K_SYSCALL_LIMIT
531	bltu t0, t1, valid_syscall_id
532
533	/* bad syscall id.  Set arg1 to bad id and set call_id to SYSCALL_BAD */
534	mv a0, t0
535	li t0, K_SYSCALL_BAD
536
537valid_syscall_id:
538
539	la t2, _k_syscall_table
540
541	slli t1, t0, RV_REGSHIFT	# Determine offset from indice value
542	add t2, t2, t1			# Table addr + offset = function addr
543	lr t2, 0(t2)			# Load function address
544
545	/* Execute syscall function */
546	jalr ra, t2, 0
547
548#if defined(CONFIG_RISCV_ISA_RV32E)
549	addi sp, sp, 4
550#endif /* CONFIG_RISCV_ISA_RV32E */
551
552	/* Update a0 (return value) on the stack */
553	sr a0, __struct_arch_esf_a0_OFFSET(sp)
554
555	/* Disable IRQs again before leaving */
556	csrc mstatus, MSTATUS_IEN
557	j might_have_rescheduled
558#endif /* CONFIG_USERSPACE */
559
560is_interrupt:
561
562#ifdef CONFIG_PMP_STACK_GUARD
563#ifdef CONFIG_USERSPACE
564	/*
565	 * If we came from userspace then we need to reconfigure the
566	 * PMP for kernel mode stack guard.
567	 */
568	lr t0, __struct_arch_esf_mstatus_OFFSET(sp)
569	li t1, MSTATUS_MPP
570	and t0, t0, t1
571	bnez t0, 1f
572	lr a0, ___cpu_t_current_OFFSET(s0)
573	call z_riscv_pmp_stackguard_enable
574	j 2f
575#endif /* CONFIG_USERSPACE */
5761:	/* Re-activate PMP for m-mode */
577	li t1, MSTATUS_MPP
578	csrc mstatus, t1
579	li t1, MSTATUS_MPRV
580	csrs mstatus, t1
5812:
582#endif
583
584	/* Increment _current_cpu->nested */
585	lw t1, ___cpu_t_nested_OFFSET(s0)
586	addi t2, t1, 1
587	sw t2, ___cpu_t_nested_OFFSET(s0)
588	bnez t1, on_irq_stack
589
590	/* Switch to interrupt stack */
591	mv t0, sp
592	lr sp, ___cpu_t_irq_stack_OFFSET(s0)
593
594	/*
595	 * Save thread stack pointer on interrupt stack
596	 * In RISC-V, stack pointer needs to be 16-byte aligned
597	 */
598	addi sp, sp, -16
599	sr t0, 0(sp)
600
601on_irq_stack:
602
603#ifdef CONFIG_RISCV_SOC_HAS_CUSTOM_IRQ_HANDLING
604	call __soc_handle_all_irqs
605#else
606
607#ifdef CONFIG_TRACING_ISR
608	call sys_trace_isr_enter
609#endif
610
611	/* Get IRQ causing interrupt */
612	csrr a0, mcause
613	li t0, CONFIG_RISCV_MCAUSE_EXCEPTION_MASK
614	and a0, a0, t0
615
616	/*
617	 * Clear pending IRQ generating the interrupt at SOC level
618	 * Pass IRQ number to __soc_handle_irq via register a0
619	 */
620	jal ra, __soc_handle_irq
621
622	/*
623	 * Call corresponding registered function in _sw_isr_table.
624	 * (table is 2-word wide, we should shift index accordingly)
625	 */
626	la t0, _sw_isr_table
627	slli a0, a0, (RV_REGSHIFT + 1)
628	add t0, t0, a0
629
630	/* Load argument in a0 register */
631	lr a0, 0(t0)
632
633	/* Load ISR function address in register t1 */
634	lr t1, RV_REGSIZE(t0)
635
636	/* Call ISR function */
637	jalr ra, t1, 0
638
639#ifdef CONFIG_TRACING_ISR
640	call sys_trace_isr_exit
641#endif
642
643#endif
644
645irq_done:
646	/* Decrement _current_cpu->nested */
647	lw t2, ___cpu_t_nested_OFFSET(s0)
648	addi t2, t2, -1
649	sw t2, ___cpu_t_nested_OFFSET(s0)
650	bnez t2, no_reschedule
651
652	/* nested count is back to 0: Return to thread stack */
653	lr sp, 0(sp)
654
655#ifdef CONFIG_STACK_SENTINEL
656	call z_check_stack_sentinel
657#endif
658
659check_reschedule:
660
661#ifdef CONFIG_MULTITHREADING
662
663	/* Get pointer to current thread on this CPU */
664	lr a1, ___cpu_t_current_OFFSET(s0)
665
666	/*
667	 * Get next thread to schedule with z_get_next_switch_handle().
668	 * We pass it a NULL as we didn't save the whole thread context yet.
669	 * If no scheduling is necessary then NULL will be returned.
670	 */
671	addi sp, sp, -16
672	sr a1, 0(sp)
673	mv a0, zero
674	call z_get_next_switch_handle
675	lr a1, 0(sp)
676	addi sp, sp, 16
677	beqz a0, no_reschedule
678
679reschedule:
680
681	/*
682	 * Perform context switch:
683	 * a0 = new thread
684	 * a1 = old thread
685	 */
686	call z_riscv_switch
687
688z_riscv_thread_start:
689might_have_rescheduled:
690	/* reload s0 with &_current_cpu as it might have changed or be unset */
691	get_current_cpu s0
692
693#endif /* CONFIG_MULTITHREADING */
694
695no_reschedule:
696
697#ifdef CONFIG_RISCV_SOC_CONTEXT_SAVE
698	/* Restore context at SOC level */
699	addi a0, sp, __struct_arch_esf_soc_context_OFFSET
700	jal ra, __soc_restore_context
701#endif /* CONFIG_RISCV_SOC_CONTEXT_SAVE */
702
703#if defined(CONFIG_FPU_SHARING)
704	/* FPU handling upon exception mode exit */
705	mv a0, sp
706	call z_riscv_fpu_exit_exc
707
708	/* decrement _current->arch.exception_depth */
709	lr t0, ___cpu_t_current_OFFSET(s0)
710	lb t1, _thread_offset_to_exception_depth(t0)
711	add t1, t1, -1
712	sb t1, _thread_offset_to_exception_depth(t0)
713fp_trap_exit:
714#endif
715
716	/* Restore MEPC and MSTATUS registers */
717	lr t0, __struct_arch_esf_mepc_OFFSET(sp)
718	lr t2, __struct_arch_esf_mstatus_OFFSET(sp)
719	csrw mepc, t0
720	csrw mstatus, t2
721
722#ifdef CONFIG_USERSPACE
723	/*
724	 * Check if we are returning to user mode. If so then we must
725	 * set is_user_mode to true and preserve our kernel mode stack for
726	 * the next exception to come.
727	 */
728	li t1, MSTATUS_MPP
729	and t0, t2, t1
730	bnez t0, 1f
731
732#ifdef CONFIG_PMP_STACK_GUARD
733	/* Remove kernel stack guard and Reconfigure PMP for user mode */
734	lr a0, ___cpu_t_current_OFFSET(s0)
735	call z_riscv_pmp_usermode_enable
736#endif
737
738	/* Set our per-thread usermode flag */
739	li t1, 1
740	lui t0, %tprel_hi(is_user_mode)
741	add t0, t0, tp, %tprel_add(is_user_mode)
742	sb t1, %tprel_lo(is_user_mode)(t0)
743
744	/* preserve stack pointer for next exception entry */
745	add t0, sp, __struct_arch_esf_SIZEOF
746	sr t0, _curr_cpu_arch_user_exc_sp(s0)
747
748	j 2f
7491:
750	/*
751	 * We are returning to kernel mode. Store the stack pointer to
752	 * be re-loaded further down.
753	 */
754	addi t0, sp, __struct_arch_esf_SIZEOF
755	sr t0, __struct_arch_esf_sp_OFFSET(sp)
7562:
757#endif
758
759	/* Restore s0 (it is no longer ours) */
760	lr s0, __struct_arch_esf_s0_OFFSET(sp)
761
762#ifdef CONFIG_RISCV_SOC_HAS_ISR_STACKING
763	SOC_ISR_SW_UNSTACKING
764#else
765	/* Restore caller-saved registers from thread stack */
766	DO_CALLER_SAVED(lr)
767
768#ifdef CONFIG_USERSPACE
769	/* retrieve saved stack pointer */
770	lr sp, __struct_arch_esf_sp_OFFSET(sp)
771#else
772	/* remove esf from the stack */
773	addi sp, sp, __struct_arch_esf_SIZEOF
774#endif
775
776#endif /* CONFIG_RISCV_SOC_HAS_ISR_STACKING */
777
778	mret
779