1/*
2 * Copyright (c) 2019 Carlo Caione <ccaione@baylibre.com>
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7/*
8 * Populated vector table
9 */
10
11#include <zephyr/toolchain.h>
12#include <zephyr/linker/sections.h>
13#include <zephyr/offsets.h>
14#include <zephyr/arch/cpu.h>
15#include <zephyr/arch/arm64/tpidrro_el0.h>
16#include <offsets_short.h>
17#include "macro_priv.inc"
18
19
20_ASM_FILE_PROLOGUE
21
22/*
23 * Save volatile registers, LR, SPSR_EL1 and ELR_EL1
24 *
25 * Save the volatile registers and LR on the process stack. This is
26 * needed if the thread is switched out because they can be clobbered by the
27 * ISR and/or context switch.
28 */
29
30.macro z_arm64_enter_exc xreg0, xreg1, el
31	/*
32	 * Two things can happen to the remaining registers:
33	 *
34	 * - No context-switch: in this case x19-x28 are callee-saved register
35	 *   so we can be sure they are not going to be clobbered by ISR.
36	 * - Context-switch: the callee-saved registers are saved by
37	 *   z_arm64_context_switch() in the kernel structure.
38	 */
39
40	sub	sp, sp, ___esf_t_SIZEOF
41
42#ifdef CONFIG_ARM64_SAFE_EXCEPTION_STACK
43	.if	\el == el1
44	/*
45	 * EL1t mode cannot access sp_el1, so set x0 to sp_el1 without corrupt
46	 * other registers
47	 */
48	add	sp, sp, x0			// sp' = sp + x0
49	sub	x0, sp, x0			// x0' = sp' - x0 = sp
50	msr	SPSel, #0
51	stp	x16, x17, [sp, -(___esf_t_SIZEOF - ___esf_t_x16_x17_OFFSET)]
52	stp	x18, lr,  [sp, -(___esf_t_SIZEOF - ___esf_t_x18_lr_OFFSET)]
53	bl	z_arm64_quick_stack_check
54	.endif
55#endif
56
57	stp	x0, x1, [sp, ___esf_t_x0_x1_OFFSET]
58	stp	x2, x3, [sp, ___esf_t_x2_x3_OFFSET]
59	stp	x4, x5, [sp, ___esf_t_x4_x5_OFFSET]
60	stp	x6, x7, [sp, ___esf_t_x6_x7_OFFSET]
61	stp	x8, x9, [sp, ___esf_t_x8_x9_OFFSET]
62	stp	x10, x11, [sp, ___esf_t_x10_x11_OFFSET]
63	stp	x12, x13, [sp, ___esf_t_x12_x13_OFFSET]
64	stp	x14, x15, [sp, ___esf_t_x14_x15_OFFSET]
65#ifdef CONFIG_ARM64_SAFE_EXCEPTION_STACK
66	/* The expection from el1 does not need to save x16, x17, x18 and lr */
67	.if	\el == el0
68#endif
69	stp	x16, x17, [sp, ___esf_t_x16_x17_OFFSET]
70	stp	x18, lr, [sp, ___esf_t_x18_lr_OFFSET]
71#ifdef CONFIG_ARM64_SAFE_EXCEPTION_STACK
72	.endif
73#endif
74
75#ifdef CONFIG_FRAME_POINTER
76	str	x29, [sp, ___esf_t_fp_OFFSET]
77#endif
78
79	mrs	\xreg0, spsr_el1
80	mrs	\xreg1, elr_el1
81	stp	\xreg0, \xreg1, [sp, ___esf_t_spsr_elr_OFFSET]
82
83#ifdef CONFIG_ARM64_SAFE_EXCEPTION_STACK
84	.if	\el == el0
85	mrs	x0, sp_el0
86	str	x0, [sp, ___esf_t_sp_el0_OFFSET]
87
88	/* Retrieving safe exception stack */
89	get_cpu	x0
90	ldr	x1, [x0, #_cpu_offset_to_safe_exception_stack]
91	msr	sp_el0, x1
92	.endif
93#endif
94
95	/* Clear usermode flag and increment exception depth */
96	mrs	\xreg0, tpidrro_el0
97	mov	\xreg1, #TPIDRROEL0_EXC_UNIT
98	bic	\xreg0, \xreg0, #TPIDRROEL0_IN_EL0
99	add	\xreg0, \xreg0, \xreg1
100	msr	tpidrro_el0, \xreg0
101
102#ifdef CONFIG_FPU_SHARING
103	bl	z_arm64_fpu_enter_exc
104#endif
105
106.endm
107
108/*
109 * Four types of exceptions:
110 * - synchronous: aborts from MMU, SP/CP alignment checking, unallocated
111 *   instructions, SVCs/SMCs/HVCs, ...)
112 * - IRQ: group 1 (normal) interrupts
113 * - FIQ: group 0 or secure interrupts
114 * - SError: fatal system errors
115 *
116 * Four different contexts:
117 * - from same exception level, when using the SP_EL0 stack pointer
118 * - from same exception level, when using the SP_ELx stack pointer
119 * - from lower exception level, when this is AArch64
120 * - from lower exception level, when this is AArch32
121 *
122 * +------------------+------------------+-------------------------+
123 * |     Address      |  Exception type  |       Description       |
124 * +------------------+------------------+-------------------------+
125 * | VBAR_ELn + 0x000 | Synchronous      | Current EL with SP0     |
126 * |          + 0x080 | IRQ / vIRQ       |                         |
127 * |          + 0x100 | FIQ / vFIQ       |                         |
128 * |          + 0x180 | SError / vSError |                         |
129 * +------------------+------------------+-------------------------+
130 * |          + 0x200 | Synchronous      | Current EL with SPx     |
131 * |          + 0x280 | IRQ / vIRQ       |                         |
132 * |          + 0x300 | FIQ / vFIQ       |                         |
133 * |          + 0x380 | SError / vSError |                         |
134 * +------------------+------------------+-------------------------+
135 * |          + 0x400 | Synchronous      | Lower EL using AArch64  |
136 * |          + 0x480 | IRQ / vIRQ       |                         |
137 * |          + 0x500 | FIQ / vFIQ       |                         |
138 * |          + 0x580 | SError / vSError |                         |
139 * +------------------+------------------+-------------------------+
140 * |          + 0x600 | Synchronous      | Lower EL using AArch32  |
141 * |          + 0x680 | IRQ / vIRQ       |                         |
142 * |          + 0x700 | FIQ / vFIQ       |                         |
143 * |          + 0x780 | SError / vSError |                         |
144 * +------------------+------------------+-------------------------+
145 */
146
147GDATA(_vector_table)
148SECTION_SUBSEC_FUNC(exc_vector_table,_vector_table_section,_vector_table)
149
150	/* The whole table must be 2K aligned */
151	.align 11
152
153	/* Current EL with SP0 / Synchronous */
154	.align 7
155	z_arm64_enter_exc x0, x1, el1
156	b	z_arm64_sync_exc
157
158	/* Current EL with SP0 / IRQ */
159	.align 7
160	z_arm64_enter_exc x0, x1, el1
161#ifdef CONFIG_GEN_SW_ISR_TABLE
162	b 	_isr_wrapper
163#else
164	b	z_irq_spurious
165#endif
166
167	/* Current EL with SP0 / FIQ */
168	.align 7
169	b 	.
170
171	/* Current EL with SP0 / SError */
172	.align 7
173	z_arm64_enter_exc x0, x1, el1
174	b	z_arm64_serror
175
176	/* Current EL with SPx / Synchronous */
177	.align 7
178	z_arm64_enter_exc x0, x1, el1
179	b	z_arm64_sync_exc
180
181	/* Current EL with SPx / IRQ */
182	.align 7
183	z_arm64_enter_exc x0, x1, el1
184#ifdef CONFIG_GEN_SW_ISR_TABLE
185	b 	_isr_wrapper
186#else
187	b	z_irq_spurious
188#endif
189
190	/* Current EL with SPx / FIQ */
191	.align 7
192	b	.
193
194	/* Current EL with SPx / SError */
195	.align 7
196	z_arm64_enter_exc x0, x1, el1
197	b	z_arm64_serror
198
199	/* Lower EL using AArch64 / Synchronous */
200	.align 7
201	z_arm64_enter_exc x0, x1, el0
202	b	z_arm64_sync_exc
203
204	/* Lower EL using AArch64 / IRQ */
205	.align 7
206	z_arm64_enter_exc x0, x1, el0
207#ifdef CONFIG_GEN_SW_ISR_TABLE
208	b 	_isr_wrapper
209#else
210	b	z_irq_spurious
211#endif
212
213	/* Lower EL using AArch64 / FIQ */
214	.align 7
215	b	.
216
217	/* Lower EL using AArch64 / SError */
218	.align 7
219	z_arm64_enter_exc x0, x1, el0
220	b	z_arm64_serror
221
222	/* Lower EL using AArch32 / Synchronous */
223	.align 7
224	b	.
225
226	/* Lower EL using AArch32 / IRQ */
227	.align 7
228	b	.
229
230	/* Lower EL using AArch32 / FIQ */
231	.align 7
232	b	.
233
234	/* Lower EL using AArch32 / SError */
235	.align 7
236	b	.
237
238GTEXT(z_arm64_serror)
239SECTION_FUNC(TEXT, z_arm64_serror)
240
241	mov	x1, sp
242	mov	x0, #0 /* K_ERR_CPU_EXCEPTION */
243
244	bl	z_arm64_fatal_error
245	/* Return here only in case of recoverable error */
246	b	z_arm64_exit_exc
247
248#ifdef CONFIG_ARM64_SAFE_EXCEPTION_STACK
249GTEXT(z_arm64_quick_stack_check)
250SECTION_FUNC(TEXT, z_arm64_quick_stack_check)
251	/*
252	 * x0 is SP_EL1
253	 * Retrieve the current stack limit
254	 */
255	get_cpu	x16
256	ldr	x17, [x16, #_cpu_offset_to_current_stack_limit]
257	/*
258	 * If priv sp <= the stack limit, then keep the safe exception stack
259	 * go to the stack overflow process.
260	 */
261	cmp	x0, x17
262	/* Restore the sp_el1 */
263	msr	SPSel, #1			// switch sp to sp_el1
264	sub	x0, sp, x0			// x0'' = sp' - x0' = x0
265	sub	sp, sp, x0			// sp'' = sp' - x0 = sp
266	ble	1f
267	/*
268	 * If the stack does not overflow, keep using sp_el1, copy the original
269	 * x16, x17, x18, lr from sp_el0 (safe_exception_stack) to sp_el1. So
270	 * the four registers can be restroed directly from sp_el1 without a
271	 * stack mode switch.
272	 */
273	mrs	x18, sp_el0
274	ldp	x16, x17, [x18, -(___esf_t_SIZEOF - ___esf_t_x16_x17_OFFSET)]
275	stp	x16, x17, [sp, ___esf_t_x16_x17_OFFSET]
276	ldp	x16, x17, [x18, -(___esf_t_SIZEOF - ___esf_t_x18_lr_OFFSET)]
277	stp	x16, x17, [sp, ___esf_t_x18_lr_OFFSET]
278	ret
2791:	/*
280	 * If stack overflow, save the current sp and then switch sp to safe
281	 * exception stack
282	 * x16 is still the current _cpu
283	 */
284	mrs	x18, sp_el0
285	mov	x17, sp
286	str	x17, [x16, #_cpu_offset_to_corrupted_sp]
287	/*
288	 * switch sp to safe exception stack, which means we handle the fatal
289	 * error with safe exception stack.
290	 */
291	sub	sp, x18, ___esf_t_SIZEOF
292	ret
293#endif
294
295/*
296 * Restore volatile registers, LR, SPSR_EL1 and ELR_EL1
297 *
298 * This is the common exit point for z_arm64_sync_exc() and _isr_wrapper().
299 */
300
301GTEXT(z_arm64_exit_exc)
302SECTION_FUNC(TEXT, z_arm64_exit_exc)
303
304#ifdef CONFIG_FPU_SHARING
305	bl	z_arm64_fpu_exit_exc
306
307 GTEXT(z_arm64_exit_exc_fpu_done)
308 z_arm64_exit_exc_fpu_done:
309#endif
310
311	ldp	x0, x1, [sp, ___esf_t_spsr_elr_OFFSET]
312	msr	spsr_el1, x0
313	msr	elr_el1, x1
314
315	/* Restore the kernel/user mode flag and decrement exception depth */
316	tst	x0, #SPSR_MODE_MASK	/* EL0 == 0 */
317	mrs	x0, tpidrro_el0
318	mov	x1, #TPIDRROEL0_EXC_UNIT
319	orr	x2, x0, #TPIDRROEL0_IN_EL0
320	csel	x0, x2, x0, eq
321	sub	x0, x0, x1
322	msr	tpidrro_el0, x0
323
324#ifdef CONFIG_ARM64_SAFE_EXCEPTION_STACK
325	bne	1f
326	ldr	x0, [sp, ___esf_t_sp_el0_OFFSET]
327	msr	sp_el0, x0
3281:
329#endif
330
331	ldp	x0, x1, [sp, ___esf_t_x0_x1_OFFSET]
332	ldp	x2, x3, [sp, ___esf_t_x2_x3_OFFSET]
333	ldp	x4, x5, [sp, ___esf_t_x4_x5_OFFSET]
334	ldp	x6, x7, [sp, ___esf_t_x6_x7_OFFSET]
335	ldp	x8, x9, [sp, ___esf_t_x8_x9_OFFSET]
336	ldp	x10, x11, [sp, ___esf_t_x10_x11_OFFSET]
337	ldp	x12, x13, [sp, ___esf_t_x12_x13_OFFSET]
338	ldp	x14, x15, [sp, ___esf_t_x14_x15_OFFSET]
339	ldp	x16, x17, [sp, ___esf_t_x16_x17_OFFSET]
340	ldp	x18, lr, [sp, ___esf_t_x18_lr_OFFSET]
341
342#ifdef CONFIG_FRAME_POINTER
343	ldr	x29, [sp,  ___esf_t_fp_OFFSET]
344#endif
345
346	add	sp, sp, ___esf_t_SIZEOF
347
348	/*
349	 * In general in the ELR_EL1 register we can find:
350	 *
351	 * - The address of ret in z_arm64_call_svc()
352	 * - The address of the next instruction at the time of the IRQ when the
353	 *   thread was switched out.
354	 * - The address of z_thread_entry() for new threads (see thread.c).
355	 */
356	eret
357
358