1/*
2 * Copyright (c) 2021 Antony Pavlov <antonynpavlov@gmail.com>
3 *
4 * based on arch/riscv/core/isr.S and arch/nios2/core/exception.S
5 *
6 * SPDX-License-Identifier: Apache-2.0
7 */
8
9#include <zephyr/toolchain.h>
10#include <zephyr/kernel_structs.h>
11#include <offsets_short.h>
12#include <zephyr/arch/cpu.h>
13
14#include <mips/regdef.h>
15#include <mips/mipsregs.h>
16
17#define ESF_O(FIELD)	__struct_arch_esf_##FIELD##_OFFSET
18#define THREAD_O(FIELD)	_thread_offset_to_##FIELD
19
20/* Convenience macros for loading/storing register states. */
21
22#define DO_CALLEE_SAVED(op, reg) \
23	op s0, THREAD_O(s0)(reg)		;\
24	op s1, THREAD_O(s1)(reg)		;\
25	op s2, THREAD_O(s2)(reg)		;\
26	op s3, THREAD_O(s3)(reg)		;\
27	op s4, THREAD_O(s4)(reg)		;\
28	op s5, THREAD_O(s5)(reg)		;\
29	op s6, THREAD_O(s6)(reg)		;\
30	op s7, THREAD_O(s7)(reg)		;\
31	op s8, THREAD_O(s8)(reg)		;
32
33#define STORE_CALLEE_SAVED(reg) \
34	DO_CALLEE_SAVED(OP_STOREREG, reg)
35
36#define LOAD_CALLEE_SAVED(reg) \
37	DO_CALLEE_SAVED(OP_LOADREG, reg)
38
39#define DO_CALLER_SAVED(op) \
40	op ra, ESF_O(ra)(sp)		;\
41	op gp, ESF_O(gp)(sp)		;\
42	op AT, ESF_O(at)(sp)		;\
43	op t0, ESF_O(t0)(sp)		;\
44	op t1, ESF_O(t1)(sp)		;\
45	op t2, ESF_O(t2)(sp)		;\
46	op t3, ESF_O(t3)(sp)		;\
47	op t4, ESF_O(t4)(sp)		;\
48	op t5, ESF_O(t5)(sp)		;\
49	op t6, ESF_O(t6)(sp)		;\
50	op t7, ESF_O(t7)(sp)		;\
51	op t8, ESF_O(t8)(sp)		;\
52	op t9, ESF_O(t9)(sp)		;\
53	op a0, ESF_O(a0)(sp)		;\
54	op a1, ESF_O(a1)(sp)		;\
55	op a2, ESF_O(a2)(sp)		;\
56	op a3, ESF_O(a3)(sp)		;\
57	op v0, ESF_O(v0)(sp)		;\
58	op v1, ESF_O(v1)(sp)		;
59
60#define STORE_CALLER_SAVED() \
61	addi sp, sp, -__struct_arch_esf_SIZEOF	;\
62	DO_CALLER_SAVED(OP_STOREREG)		;
63
64#define LOAD_CALLER_SAVED() \
65	DO_CALLER_SAVED(OP_LOADREG)		;\
66	addi sp, sp, __struct_arch_esf_SIZEOF	;
67
68/* imports */
69GTEXT(_Fault)
70
71GTEXT(_k_neg_eagain)
72GTEXT(z_thread_mark_switched_in)
73GTEXT(z_thread_mark_switched_out)
74
75/* exports */
76GTEXT(__isr_vec)
77
78SECTION_FUNC(exception.entry, __isr_vec)
79	la k0, _mips_interrupt
80	jr k0
81
82SECTION_FUNC(exception.other, _mips_interrupt)
83	.set noat
84	/*
85	 * Save caller-saved registers on current thread stack.
86	 */
87	STORE_CALLER_SAVED()
88
89	/* save CP0 registers */
90	mfhi t0
91	mflo t1
92	OP_STOREREG t0, ESF_O(hi)(sp)
93	OP_STOREREG t1, ESF_O(lo)(sp)
94	mfc0 t0, CP0_EPC
95	OP_STOREREG t0, ESF_O(epc)(sp)
96	mfc0 t1, CP0_BADVADDR
97	OP_STOREREG t1, ESF_O(badvaddr)(sp)
98	mfc0 t0, CP0_STATUS
99	OP_STOREREG t0, ESF_O(status)(sp)
100	mfc0 t1, CP0_CAUSE
101	OP_STOREREG t1, ESF_O(cause)(sp)
102
103	/*
104	 * Check if exception is the result of an interrupt or not.
105	 */
106	li k0, CAUSE_EXP_MASK
107	and k1, k0, t1
108	srl k1, k1, CAUSE_EXP_SHIFT
109
110	/* ExcCode == 8 (SYSCALL) ? */
111	li k0, 8
112	beq k0, k1, is_kernel_syscall
113
114	/* a0 = ((cause & status) & CAUSE_IP_MASK) >> CAUSE_IP_SHIFT */
115	and t1, t1, t0
116	li a0, CAUSE_IP_MASK
117	and a0, a0, t1
118	srl a0, a0, CAUSE_IP_SHIFT
119
120	/* ExcCode == 0 (INTERRUPT) ? if not, go to unhandled */
121	bnez k1, unhandled
122
123	/* cause IP_MASK != 0 ? */
124	bnez a0, is_interrupt
125
126unhandled:
127	move a0, sp
128	jal _Fault
129	eret
130
131is_kernel_syscall:
132	/*
133	 * A syscall is the result of an syscall instruction, in which case the
134	 * EPC will contain the address of the syscall instruction.
135	 * Increment saved EPC by 4 to prevent triggering the same syscall
136	 * again upon exiting the ISR.
137	 */
138	OP_LOADREG k0, ESF_O(epc)(sp)
139	addi k0, k0, 4
140	OP_STOREREG k0, ESF_O(epc)(sp)
141
142#ifdef CONFIG_IRQ_OFFLOAD
143	/*
144	 * Determine if the system call is the result of an IRQ offloading.
145	 * Done by checking if _offload_routine is not pointing to NULL.
146	 * If NULL, jump to reschedule to perform a context-switch, otherwise,
147	 * jump to is_interrupt to handle the IRQ offload.
148	 */
149	la t0, _offload_routine
150	OP_LOADREG t1, 0(t0)
151	/*
152	 * Put 0 into a0: call z_mips_enter_irq() with ipending==0
153	 * to prevent spurious interrupt.
154	 */
155	move a0, zero
156	bnez t1, is_interrupt
157#endif /* CONFIG_IRQ_OFFLOAD */
158
159	/*
160	 * Go to reschedule to handle context-switch
161	 */
162	j reschedule
163
164is_interrupt:
165	/*
166	 * Save current thread stack pointer and switch
167	 * stack pointer to interrupt stack.
168	 */
169
170	/* Save thread stack pointer to temp register k0 */
171	move k0, sp
172
173	/* Switch to interrupt stack */
174	la k1, _kernel
175	OP_LOADREG sp, _kernel_offset_to_irq_stack(k1)
176
177	/*
178	 * Save thread stack pointer on interrupt stack
179	 */
180	addi sp, sp, -16
181	OP_STOREREG k0, 0(sp)
182
183on_irq_stack:
184	/*
185	 * Enter C interrupt handling code. Value of ipending will be the
186	 * function parameter since we put it in a0
187	 */
188	jal z_mips_enter_irq
189
190on_thread_stack:
191	/* Restore thread stack pointer */
192	OP_LOADREG sp, 0(sp)
193
194#ifdef CONFIG_PREEMPT_ENABLED
195	/*
196	 * Check if we need to perform a reschedule
197	 */
198
199	/* Get pointer to _kernel.current */
200	OP_LOADREG t2, _kernel_offset_to_current(k1)
201
202	/*
203	 * Check if next thread to schedule is current thread.
204	 * If yes do not perform a reschedule
205	 */
206	OP_LOADREG t3, _kernel_offset_to_ready_q_cache(k1)
207	beq t3, t2, no_reschedule
208#else
209	j no_reschedule
210#endif /* CONFIG_PREEMPT_ENABLED */
211
212reschedule:
213#ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING
214	jal z_thread_mark_switched_out
215#endif
216	/*
217	 * Check if the current thread is the same as the thread on the ready Q. If
218	 * so, do not reschedule.
219	 * Note:
220	 *   Sometimes this code is execute back-to-back before the target thread
221	 *   has a chance to run. If this happens, the current thread and the
222	 *   target thread will be the same.
223	 */
224	la t0, _kernel
225	OP_LOADREG t2, _kernel_offset_to_current(t0)
226	OP_LOADREG t3, _kernel_offset_to_ready_q_cache(t0)
227	beq t2, t3, no_reschedule
228
229	/* Get reference to _kernel */
230	la t0, _kernel
231
232	/* Get pointer to _kernel.current */
233	OP_LOADREG t1, _kernel_offset_to_current(t0)
234
235	/*
236	 * Save callee-saved registers of current kernel thread
237	 * prior to handle context-switching
238	 */
239	STORE_CALLEE_SAVED(t1)
240
241skip_callee_saved_reg:
242
243	/*
244	 * Save stack pointer of current thread and set the default return value
245	 * of z_swap to _k_neg_eagain for the thread.
246	 */
247	OP_STOREREG sp, _thread_offset_to_sp(t1)
248	la t2, _k_neg_eagain
249	lw t3, 0(t2)
250	sw t3, _thread_offset_to_swap_return_value(t1)
251
252	/* Get next thread to schedule. */
253	OP_LOADREG t1, _kernel_offset_to_ready_q_cache(t0)
254
255	/*
256	 * Set _kernel.current to new thread loaded in t1
257	 */
258	OP_STOREREG t1, _kernel_offset_to_current(t0)
259
260	/* Switch to new thread stack */
261	OP_LOADREG sp, _thread_offset_to_sp(t1)
262
263	/* Restore callee-saved registers of new thread */
264	LOAD_CALLEE_SAVED(t1)
265
266#ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING
267	jal z_thread_mark_switched_in
268#endif
269
270	/* fallthrough */
271
272no_reschedule:
273	/* restore CP0 */
274	OP_LOADREG t1, ESF_O(hi)(sp)
275	OP_LOADREG t2, ESF_O(lo)(sp)
276	mthi t1
277	mtlo t2
278
279	OP_LOADREG k0, ESF_O(epc)(sp)
280	mtc0 k0, CP0_EPC
281	OP_LOADREG k1, ESF_O(status)(sp)
282	mtc0 k1, CP0_STATUS
283	ehb
284
285	/* Restore caller-saved registers from thread stack */
286	LOAD_CALLER_SAVED()
287
288	/* exit ISR */
289	eret
290