1/*
2 * Copyright (c) 2013-2014 Wind River Systems, Inc.
3 * Copyright (c) 2020 Stephanos Ioannidis <root@stephanos.io>
4 *
5 * SPDX-License-Identifier: Apache-2.0
6 */
7
8/**
9 * @file
10 * @brief ARM Cortex-A and Cortex-R wrapper for ISRs with parameter
11 *
12 * Wrapper installed in vector table for handling dynamic interrupts that accept
13 * a parameter.
14 */
15/*
16 * Tell armclang that stack alignment are ensured.
17 */
18.eabi_attribute Tag_ABI_align_preserved, 1
19
20#include <zephyr/toolchain.h>
21#include <zephyr/linker/sections.h>
22#include <offsets_short.h>
23#include <zephyr/arch/cpu.h>
24#include <zephyr/sw_isr_table.h>
25#include "macro_priv.inc"
26
27
28_ASM_FILE_PROLOGUE
29
30GDATA(_sw_isr_table)
31
32GTEXT(_isr_wrapper)
33GTEXT(z_arm_int_exit)
34
35#ifndef CONFIG_USE_SWITCH
36/**
37 *
38 * @brief Wrapper around ISRs when inserted in software ISR table
39 *
40 * When inserted in the vector table, _isr_wrapper() demuxes the ISR table
41 * using the running interrupt number as the index, and invokes the registered
42 * ISR with its corresponding argument. When returning from the ISR, it
43 * determines if a context switch needs to happen (see documentation for
44 * z_arm_pendsv()) and pends the PendSV exception if so: the latter will
45 * perform the context switch itself.
46 *
47 */
48SECTION_FUNC(TEXT, _isr_wrapper)
49
50#if defined(CONFIG_USERSPACE)
51	/* See comment below about svc stack usage */
52	cps #MODE_SVC
53	push {r0}
54
55	/* Determine if interrupted thread was in user context */
56	cps #MODE_IRQ
57	mrs r0, spsr
58	and r0, #MODE_MASK
59	cmp r0, #MODE_USR
60	bne isr_system_thread
61
62	get_cpu r0
63	ldr r0, [r0, #___cpu_t_current_OFFSET]
64
65	/* Save away user stack pointer */
66	cps #MODE_SYS
67	str sp, [r0, #_thread_offset_to_sp_usr] /* sp_usr */
68
69	/* Switch to privileged stack */
70	ldr sp, [r0, #_thread_offset_to_priv_stack_end] /* priv stack end */
71
72isr_system_thread:
73	cps #MODE_SVC
74	pop {r0}
75	cps #MODE_IRQ
76#endif
77
78	/*
79	 * Save away r0-r3, r12 and lr_irq for the previous context to the
80	 * process stack since they are clobbered here.  Also, save away lr
81	 * and spsr_irq since we may swap processes and return to a different
82	 * thread.
83	 */
84	sub lr, lr, #4
85	srsdb #MODE_SYS!
86	cps #MODE_SYS
87	push {r0-r3, r12, lr}
88
89#if defined(CONFIG_FPU_SHARING)
90	sub sp, sp, #___fpu_t_SIZEOF
91
92	/*
93	 * Note that this handler was entered with the VFP unit enabled.
94	 * The undefined instruction handler uses this to know that it
95	 * needs to save the current floating context.
96	 */
97	vmrs r0, fpexc
98	str r0, [sp, #___fpu_t_SIZEOF - 4]
99	tst r0, #FPEXC_EN
100	beq _vfp_not_enabled
101	vmrs r0, fpscr
102	str r0, [sp, #___fpu_t_SIZEOF - 8]
103
104	/* Disable VFP */
105	mov r0, #0
106	vmsr fpexc, r0
107
108_vfp_not_enabled:
109	/*
110	 * Mark where to store the floating context for the undefined
111	 * instruction handler
112	 */
113	get_cpu r2
114	ldr r0, [r2, #___cpu_t_fp_ctx_OFFSET]
115	cmp r0, #0
116	streq sp, [r2, #___cpu_t_fp_ctx_OFFSET]
117#endif /* CONFIG_FPU_SHARING */
118
119	/*
120	 * Use SVC mode stack for predictable interrupt behaviour; running ISRs
121	 * in the SYS/USR mode stack (i.e. interrupted thread stack) leaves the
122	 * ISR stack usage at the mercy of the interrupted thread and this can
123	 * be prone to stack overflows if any of the ISRs and/or preemptible
124	 * threads have high stack usage.
125	 *
126	 * When userspace is enabled, this also prevents leaking privileged
127	 * information to the user mode.
128	 */
129	cps #MODE_SVC
130
131	/*
132	 * Preserve lr_svc which may contain the branch return address of the
133	 * interrupted context in case of a nested interrupt. This value will
134	 * be restored prior to exiting the interrupt in z_arm_int_exit.
135	 */
136	push {lr}
137
138	/* Align stack at double-word boundary */
139	and r3, sp, #4
140	sub sp, sp, r3
141	push {r2, r3}
142
143	/* Increment interrupt nesting count */
144	get_cpu r2
145	ldr r0, [r2, #___cpu_t_nested_OFFSET]
146	add r0, r0, #1
147	str r0, [r2, #___cpu_t_nested_OFFSET]
148
149#ifdef CONFIG_TRACING_ISR
150	bl sys_trace_isr_enter
151#endif
152
153#ifdef CONFIG_PM
154	/*
155	 * All interrupts are disabled when handling idle wakeup.  For tickless
156	 * idle, this ensures that the calculation and programming of the
157	 * device for the next timer deadline is not interrupted.  For
158	 * non-tickless idle, this ensures that the clearing of the kernel idle
159	 * state is not interrupted.  In each case, pm_system_resume
160	 * is called with interrupts disabled.
161	 */
162
163	/* is this a wakeup from idle ? */
164	ldr r2, =_kernel
165	/* requested idle duration, in ticks */
166	ldr r0, [r2, #_kernel_offset_to_idle]
167	cmp r0, #0
168
169	beq _idle_state_cleared
170	movs r1, #0
171	/* clear kernel idle state */
172	str r1, [r2, #_kernel_offset_to_idle]
173	bl pm_system_resume
174_idle_state_cleared:
175
176#endif /* CONFIG_PM */
177
178	/* Get active IRQ number from the interrupt controller */
179#if !defined(CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER)
180	bl arm_gic_get_active
181#else
182	bl z_soc_irq_get_active
183#endif /* !CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER */
184	push {r0, r1}
185	lsl r0, r0, #3	/* table is 8-byte wide */
186
187	/*
188	 * Enable interrupts to allow nesting.
189	 *
190	 * Note that interrupts are disabled up to this point on the ARM
191	 * architecture variants other than the Cortex-M. It is also important
192	 * to note that most interrupt controllers require that the nested
193	 * interrupts are handled after the active interrupt is acknowledged;
194	 * this is be done through the `get_active` interrupt controller
195	 * interface function.
196	 */
197	cpsie i
198
199	/*
200	 * Skip calling the isr if it is a spurious interrupt.
201	 */
202	mov r1, #CONFIG_NUM_IRQS
203	lsl r1, r1, #3
204	cmp r0, r1
205	bge spurious_continue
206
207	ldr r1, =_sw_isr_table
208	add r1, r1, r0	/* table entry: ISRs must have their MSB set to stay
209			 * in thumb mode */
210
211	ldm r1!,{r0,r3}	/* arg in r0, ISR in r3 */
212	blx r3		/* call ISR */
213
214spurious_continue:
215	/* Signal end-of-interrupt */
216	pop {r0, r1}
217#if !defined(CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER)
218	bl arm_gic_eoi
219#else
220	bl z_soc_irq_eoi
221#endif /* !CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER */
222
223#ifdef CONFIG_TRACING_ISR
224	bl sys_trace_isr_exit
225#endif
226
227	/* Use 'bx' instead of 'b' because 'bx' can jump further, and use
228	 * 'bx' instead of 'blx' because exception return is done in
229	 * z_arm_int_exit() */
230	ldr r1, =z_arm_int_exit
231	bx r1
232
233#else
234/**
235 *
236 * @brief Wrapper around ISRs when inserted in software ISR table
237 *
238 * When inserted in the vector table, _isr_wrapper() demuxes the ISR table
239 * using the running interrupt number as the index, and invokes the registered
240 * ISR with its corresponding argument. When returning from the ISR, it
241 * determines if a context switch needs to happen and invoke the arch_switch
242 * function if so.
243 *
244 */
245SECTION_FUNC(TEXT, _isr_wrapper)
246	sub lr, #4
247	z_arm_cortex_ar_enter_exc
248
249	/* Increment interrupt nesting count */
250	get_cpu r2
251	ldr r0, [r2, #___cpu_t_nested_OFFSET]
252	add r0, #1
253	str r0, [r2, #___cpu_t_nested_OFFSET]
254
255	/* If not nested: switch to IRQ stack and save current sp on it. */
256	cmp r0, #1
257	bhi 1f
258	mov r0, sp
259	cps #MODE_IRQ
260	push {r0}
2611:
262#ifdef CONFIG_TRACING_ISR
263	bl sys_trace_isr_enter
264#endif /* CONFIG_TRACING_ISR */
265
266#ifdef CONFIG_PM
267	/*
268	 * All interrupts are disabled when handling idle wakeup.  For tickless
269	 * idle, this ensures that the calculation and programming of the
270	 * device for the next timer deadline is not interrupted.  For
271	 * non-tickless idle, this ensures that the clearing of the kernel idle
272	 * state is not interrupted.  In each case, pm_system_resume
273	 * is called with interrupts disabled.
274	 */
275
276	/* is this a wakeup from idle ? */
277	ldr r2, =_kernel
278	/* requested idle duration, in ticks */
279	ldr r0, [r2, #_kernel_offset_to_idle]
280	cmp r0, #0
281
282	beq _idle_state_cleared
283	movs r1, #0
284	/* clear kernel idle state */
285	str r1, [r2, #_kernel_offset_to_idle]
286	bl pm_system_resume
287_idle_state_cleared:
288#endif /* CONFIG_PM */
289
290	/* Get active IRQ number from the interrupt controller */
291#if !defined(CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER)
292	bl arm_gic_get_active
293#else
294	bl z_soc_irq_get_active
295#endif /* !CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER */
296
297	push {r0, r1}
298	lsl r0, r0, #3	/* table is 8-byte wide */
299
300	/*
301	 * Skip calling the isr if it is a spurious interrupt.
302	 */
303	mov r1, #CONFIG_NUM_IRQS
304	lsl r1, r1, #3
305	cmp r0, r1
306	bge spurious_continue
307
308	ldr r1, =_sw_isr_table
309	add r1, r1, r0	/* table entry: ISRs must have their MSB set to stay
310			 * in thumb mode */
311	ldm r1!,{r0,r3}	/* arg in r0, ISR in r3 */
312
313	/*
314	 * Enable and disable interrupts again to allow nested in exception handlers.
315	 */
316	cpsie i
317	blx r3		/* call ISR */
318	cpsid i
319
320spurious_continue:
321	/* Signal end-of-interrupt */
322	pop {r0, r1}
323#if !defined(CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER)
324	bl arm_gic_eoi
325#else
326	bl z_soc_irq_eoi
327#endif /* !CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER */
328
329#ifdef CONFIG_TRACING_ISR
330	bl sys_trace_isr_exit
331#endif
332
333GTEXT(z_arm_cortex_ar_irq_done)
334z_arm_cortex_ar_irq_done:
335	/* Decrement interrupt nesting count */
336	get_cpu r2
337	ldr r0, [r2, #___cpu_t_nested_OFFSET]
338	sub r0, r0, #1
339	str r0, [r2, #___cpu_t_nested_OFFSET]
340	/* Do not context switch if exiting a nested interrupt */
341	cmp r0, #0
342	/* Note that this function is only called from `z_arm_svc`,
343	 *	while handling irq_offload, with below modes set:
344	 *	```
345	 *		if (cpu interrupts are nested)
346	 *			mode=MODE_SYS
347	 *		else
348	 *			mode=MODE_IRQ
349	 *	```
350	 */
351	bhi __EXIT_INT
352
353	/* retrieve pointer to the current thread */
354	pop {r0}
355	cps #MODE_SYS
356	mov sp, r0
357
358	ldr r1, [r2, #___cpu_t_current_OFFSET]
359	push {r1}
360	mov r0, #0
361	bl z_get_next_switch_handle
362
363	pop {r1}
364	cmp r0, #0
365	beq __EXIT_INT
366
367	/*
368	 * Switch thread
369	 * r0: new thread
370	 * r1: old thread
371	 */
372	bl z_arm_context_switch
373
374__EXIT_INT:
375
376#ifdef CONFIG_STACK_SENTINEL
377	bl z_check_stack_sentinel
378#endif /* CONFIG_STACK_SENTINEL */
379
380	b z_arm_cortex_ar_exit_exc
381
382#endif
383