1/*
2 * Copyright (c) 2020 Stephanos Ioannidis <root@stephanos.io>
3 * Copyright (c) 2016 Wind River Systems, Inc.
4 *
5 * SPDX-License-Identifier: Apache-2.0
6 */
7
8/**
9 * @file
10 * @brief ARM Cortex-A and Cortex-R exception/interrupt exit API
11 *
12 * Provides functions for performing kernel handling when exiting exceptions,
13 * or interrupts that are installed directly in the vector table (i.e. that are
14 * not wrapped around by _isr_wrapper()).
15 */
16
17#include <zephyr/toolchain.h>
18#include <zephyr/linker/sections.h>
19#include <offsets_short.h>
20#include <zephyr/arch/cpu.h>
21#include "macro_priv.inc"
22
23_ASM_FILE_PROLOGUE
24
25GTEXT(z_arm_exc_exit)
26GTEXT(z_arm_int_exit)
27GTEXT(z_arm_do_swap)
28GDATA(_kernel)
29
30.macro userspace_exc_exit
31#if defined(CONFIG_USERSPACE)
32	cps #MODE_SVC
33	sub sp, #8
34	push {r0-r1}
35
36	/*
37	 * Copy return state from sys/usr state onto the svc stack.
38	 * We have to put $sp_usr back into $sp since we switched to
39	 * the privileged stack on exception entry.  The return state
40	 * is on the privileged stack so it needs to be copied to the
41	 * svc stack since we cannot trust the usr stack.
42	 */
43	cps #MODE_SYS
44	pop {r0-r1}
45
46	cps #MODE_SVC
47	str r0, [sp, #8]
48	str r1, [sp, #12]
49
50	/* Only switch the stacks if returning to a user thread */
51	and r1, #MODE_MASK
52	cmp r1, #MODE_USR
53	bne system_thread_exit\@
54
55	/* Restore user stack pointer */
56	get_cpu r0
57	ldr r0, [r0, #___cpu_t_current_OFFSET]
58	cps #MODE_SYS
59	ldr sp, [r0, #_thread_offset_to_sp_usr] /* sp_usr */
60	cps #MODE_SVC
61system_thread_exit\@:
62	pop {r0-r1}
63#endif
64.endm
65
66.macro fpu_exc_exit
67#if defined(CONFIG_FPU_SHARING)
68	/*
69	 * If the floating point context pointer is null, then a context was
70	 * saved so restore the float context from the exception stack frame.
71	 */
72	get_cpu r2
73	ldr r1, [r2, #___cpu_t_fp_ctx_OFFSET]
74	cmp r1, #0
75	beq vfp_restore\@
76
77	/*
78	 * If leaving the last interrupt context, remove the floating point
79	 * context pointer.
80	 */
81	cmp r0, #0
82	moveq r1, #0
83	streq r1, [r2, #___cpu_t_fp_ctx_OFFSET]
84	b vfp_exit\@
85
86vfp_restore\@:
87	add r3, sp, #___fpu_sf_t_fpscr_OFFSET
88	ldm r3, {r1, r2}
89	tst r2, #FPEXC_EN
90	beq vfp_exit\@
91
92	vmsr fpexc, r2
93	vmsr fpscr, r1
94	mov r3, sp
95	vldmia r3!, {s0-s15}
96#ifdef CONFIG_VFP_FEATURE_REGS_S64_D32
97	vldmia r3!, {d16-d31}
98#endif
99
100vfp_exit\@:
101	/* Leave the VFP disabled when leaving */
102	mov r1, #0
103	vmsr fpexc, r1
104
105	add sp, sp, #___fpu_t_SIZEOF
106#endif
107.endm
108
109/**
110 * @brief Kernel housekeeping when exiting interrupt handler installed directly
111 *        in the vector table
112 *
113 * Kernel allows installing interrupt handlers (ISRs) directly into the vector
114 * table to get the lowest interrupt latency possible. This allows the ISR to
115 * be invoked directly without going through a software interrupt table.
116 * However, upon exiting the ISR, some kernel work must still be performed,
117 * namely possible context switching. While ISRs connected in the software
118 * interrupt table do this automatically via a wrapper, ISRs connected directly
119 * in the vector table must invoke z_arm_int_exit() as the *very last* action
120 * before returning.
121 *
122 * e.g.
123 *
124 * void myISR(void)
125 * {
126 * 	printk("in %s\n", __FUNCTION__);
127 * 	doStuff();
128 * 	z_arm_int_exit();
129 * }
130 */
131SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, z_arm_int_exit)
132
133#ifdef CONFIG_STACK_SENTINEL
134	bl z_check_stack_sentinel
135#endif /* CONFIG_STACK_SENTINEL */
136
137	/* Disable nested interrupts while exiting, this should happens
138	 * before context switch also, to ensure interrupts are disabled.
139	 */
140	cpsid i
141
142#ifdef CONFIG_PREEMPT_ENABLED
143	/* Do not context switch if exiting a nested interrupt */
144	get_cpu r3
145	ldr r0, [r3, #___cpu_t_nested_OFFSET]
146	cmp r0, #1
147	bhi __EXIT_INT
148
149	ldr r1, [r3, #___cpu_t_current_OFFSET]
150	ldr r2, =_kernel
151	ldr r0, [r2, #_kernel_offset_to_ready_q_cache]
152	cmp r0, r1
153	blne z_arm_do_swap
154__EXIT_INT:
155#endif /* CONFIG_PREEMPT_ENABLED */
156
157	/* Decrement interrupt nesting count */
158	get_cpu r2
159	ldr r0, [r2, #___cpu_t_nested_OFFSET]
160	sub r0, r0, #1
161	str r0, [r2, #___cpu_t_nested_OFFSET]
162
163	/* Restore previous stack pointer */
164	pop {r2, r3}
165	add sp, sp, r3
166
167	/*
168	 * Restore lr_svc stored into the SVC mode stack by the mode entry
169	 * function. This ensures that the return address of the interrupted
170	 * context is preserved in case of interrupt nesting.
171	 */
172	pop {lr}
173
174	/*
175	 * Restore r0-r3, r12 and lr_irq stored into the process stack by the
176	 * mode entry function. These registers are saved by _isr_wrapper for
177	 * IRQ mode and z_arm_svc for SVC mode.
178	 *
179	 * r0-r3 are either the values from the thread before it was switched
180	 * out or they are the args to _new_thread for a new thread.
181	 */
182	cps #MODE_SYS
183
184#if defined(CONFIG_FPU_SHARING)
185	fpu_exc_exit
186#endif
187
188	pop {r0-r3, r12, lr}
189	userspace_exc_exit
190	rfeia sp!
191
192/**
193 * @brief Kernel housekeeping when exiting exception handler
194 *
195 * The exception exit routine performs appropriate housekeeping tasks depending
196 * on the mode of exit:
197 *
198 * If exiting a nested or non-fatal exception, the exit routine restores the
199 * saved exception stack frame to resume the excepted context.
200 *
201 * If exiting a non-nested fatal exception, the exit routine, assuming that the
202 * current faulting thread is aborted, discards the saved exception stack
203 * frame containing the aborted thread context and switches to the next
204 * scheduled thread.
205 *
206 * void z_arm_exc_exit(bool fatal)
207 *
208 * @param fatal True if exiting from a fatal fault; otherwise, false
209 */
210SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, z_arm_exc_exit)
211	/* Do not context switch if exiting a nested exception */
212	get_cpu r3
213	ldr r1, [r3, #___cpu_t_nested_OFFSET]
214	cmp r1, #1
215	bhi __EXIT_EXC
216
217	/* If the fault is not fatal, return to the current thread context */
218	cmp r0, #0
219	beq __EXIT_EXC
220
221	/*
222	 * If the fault is fatal, the current thread must have been aborted by
223	 * the exception handler. Clean up the exception stack frame and switch
224	 * to the next scheduled thread.
225	 */
226
227	/* Clean up exception stack frame */
228#if defined(CONFIG_FPU_SHARING)
229	add sp, sp, #___fpu_t_SIZEOF
230#endif
231	add sp, #32
232
233	/*
234	 * Switch in the next scheduled thread.
235	 *
236	 * Note that z_arm_do_swap must be called in the SVC mode because it
237	 * switches to the SVC mode during context switch and returns to the
238	 * caller using lr_svc.
239	 */
240	cps #MODE_SVC
241	bl z_arm_do_swap
242
243	/* Decrement exception nesting count */
244	get_cpu r3
245	ldr r0, [r3, #___cpu_t_nested_OFFSET]
246	sub r0, r0, #1
247	str r0, [r3, #___cpu_t_nested_OFFSET]
248
249	/* Return to the switched thread */
250	cps #MODE_SYS
251#if defined(CONFIG_FPU_SHARING)
252	fpu_exc_exit
253#endif
254	pop {r0-r3, r12, lr}
255	userspace_exc_exit
256	rfeia sp!
257
258__EXIT_EXC:
259	/* Decrement exception nesting count */
260	ldr r0, [r3, #___cpu_t_nested_OFFSET]
261	sub r0, r0, #1
262	str r0, [r3, #___cpu_t_nested_OFFSET]
263
264#if defined(CONFIG_FPU_SHARING)
265	add sp, sp, #___fpu_t_SIZEOF
266#endif
267	/*
268	 * Restore r0-r3, r12, lr, lr_und and spsr_und from the exception stack
269	 * and return to the current thread.
270	 */
271	ldmia sp, {r0-r3, r12, lr}^
272	add sp, #24
273	rfeia sp!
274