1/*
2 * Copyright (c) 2013-2014 Wind River Systems, Inc.
3 * Copyright (c) 2020 Stephanos Ioannidis <root@stephanos.io>
4 *
5 * SPDX-License-Identifier: Apache-2.0
6 */
7
8/**
9 * @file
10 * @brief ARM Cortex-M and Cortex-R wrapper for ISRs with parameter
11 *
12 * Wrapper installed in vector table for handling dynamic interrupts that accept
13 * a parameter.
14 */
15/*
16 * Tell armclang that stack alignment are ensured.
17 */
18.eabi_attribute Tag_ABI_align_preserved, 1
19
20#include <toolchain.h>
21#include <linker/sections.h>
22#include <offsets_short.h>
23#include <arch/cpu.h>
24#include <sw_isr_table.h>
25
26
27_ASM_FILE_PROLOGUE
28
29GDATA(_sw_isr_table)
30
31GTEXT(_isr_wrapper)
32GTEXT(z_arm_int_exit)
33
34/**
35 *
36 * @brief Wrapper around ISRs when inserted in software ISR table
37 *
38 * When inserted in the vector table, _isr_wrapper() demuxes the ISR table
39 * using the running interrupt number as the index, and invokes the registered
40 * ISR with its corresponding argument. When returning from the ISR, it
41 * determines if a context switch needs to happen (see documentation for
42 * z_arm_pendsv()) and pends the PendSV exception if so: the latter will
43 * perform the context switch itself.
44 *
45 * @return N/A
46 */
47SECTION_FUNC(TEXT, _isr_wrapper)
48
49#if defined(CONFIG_CPU_CORTEX_M)
50	push {r0,lr}		/* r0, lr are now the first items on the stack */
51#elif defined(CONFIG_CPU_CORTEX_R)
52
53#if defined(CONFIG_USERSPACE)
54	/* See comment below about svc stack usage */
55	cps #MODE_SVC
56	push {r0}
57
58	/* Determine if interrupted thread was in user context */
59	cps #MODE_IRQ
60	mrs r0, spsr
61	and r0, #MODE_MASK
62	cmp r0, #MODE_USR
63	bne isr_system_thread
64
65	ldr r0, =_kernel
66	ldr r0, [r0, #_kernel_offset_to_current]
67
68	/* Save away user stack pointer */
69	cps #MODE_SYS
70	str sp, [r0, #_thread_offset_to_sp_usr] /* sp_usr */
71
72	/* Switch to privileged stack */
73	ldr sp, [r0, #_thread_offset_to_priv_stack_end] /* priv stack end */
74
75isr_system_thread:
76	cps #MODE_SVC
77	pop {r0}
78	cps #MODE_IRQ
79#endif
80
81	/*
82	 * Save away r0-r3, r12 and lr_irq for the previous context to the
83	 * process stack since they are clobbered here.  Also, save away lr
84	 * and spsr_irq since we may swap processes and return to a different
85	 * thread.
86	 */
87	sub lr, lr, #4
88	srsdb #MODE_SYS!
89	cps #MODE_SYS
90	push {r0-r3, r12, lr}
91
92	/*
93	 * Use SVC mode stack for predictable interrupt behaviour; running ISRs
94	 * in the SYS/USR mode stack (i.e. interrupted thread stack) leaves the
95	 * ISR stack usage at the mercy of the interrupted thread and this can
96	 * be prone to stack overflows if any of the ISRs and/or preemptible
97	 * threads have high stack usage.
98	 *
99	 * When userspace is enabled, this also prevents leaking privileged
100	 * information to the user mode.
101	 */
102	cps #MODE_SVC
103
104	/*
105	 * Preserve lr_svc which may contain the branch return address of the
106	 * interrupted context in case of a nested interrupt. This value will
107	 * be restored prior to exiting the interrupt in z_arm_int_exit.
108	 */
109	push {lr}
110
111	/* Align stack at double-word boundary */
112	and r3, sp, #4
113	sub sp, sp, r3
114	push {r2, r3}
115
116	/* Increment interrupt nesting count */
117	ldr r2, =_kernel
118	ldr r0, [r2, #_kernel_offset_to_nested]
119	add r0, r0, #1
120	str r0, [r2, #_kernel_offset_to_nested]
121#endif /* CONFIG_CPU_CORTEX_M */
122
123#ifdef CONFIG_TRACING_ISR
124	bl sys_trace_isr_enter
125#endif
126
127#ifdef CONFIG_PM
128	/*
129	 * All interrupts are disabled when handling idle wakeup.  For tickless
130	 * idle, this ensures that the calculation and programming of the
131	 * device for the next timer deadline is not interrupted.  For
132	 * non-tickless idle, this ensures that the clearing of the kernel idle
133	 * state is not interrupted.  In each case, z_pm_save_idle_exit
134	 * is called with interrupts disabled.
135	 */
136
137#if defined(CONFIG_CPU_CORTEX_M)
138	/*
139	 * Disable interrupts to prevent nesting while exiting idle state. This
140	 * is only necessary for the Cortex-M because it is the only ARM
141	 * architecture variant that automatically enables interrupts when
142	 * entering an ISR.
143	 */
144	cpsid i  /* PRIMASK = 1 */
145#endif
146
147	/* is this a wakeup from idle ? */
148	ldr r2, =_kernel
149	/* requested idle duration, in ticks */
150	ldr r0, [r2, #_kernel_offset_to_idle]
151	cmp r0, #0
152
153#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
154	beq _idle_state_cleared
155	movs.n r1, #0
156	/* clear kernel idle state */
157	str r1, [r2, #_kernel_offset_to_idle]
158	bl z_pm_save_idle_exit
159_idle_state_cleared:
160
161#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
162	ittt ne
163	movne	r1, #0
164		/* clear kernel idle state */
165		strne	r1, [r2, #_kernel_offset_to_idle]
166		blne	z_pm_save_idle_exit
167#elif defined(CONFIG_ARMV7_R)
168	beq _idle_state_cleared
169	movs r1, #0
170	/* clear kernel idle state */
171	str r1, [r2, #_kernel_offset_to_idle]
172	bl z_pm_save_idle_exit
173_idle_state_cleared:
174#else
175#error Unknown ARM architecture
176#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
177
178#if defined(CONFIG_CPU_CORTEX_M)
179	cpsie i		/* re-enable interrupts (PRIMASK = 0) */
180#endif
181
182#endif /* CONFIG_PM */
183
184#if defined(CONFIG_CPU_CORTEX_M)
185	mrs r0, IPSR	/* get exception number */
186#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
187	ldr r1, =16
188	subs r0, r1	/* get IRQ number */
189	lsls r0, #3	/* table is 8-byte wide */
190#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
191	sub r0, r0, #16	/* get IRQ number */
192	lsl r0, r0, #3	/* table is 8-byte wide */
193#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
194#elif defined(CONFIG_CPU_CORTEX_R)
195	/* Get active IRQ number from the interrupt controller */
196#if !defined(CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER)
197	bl arm_gic_get_active
198#else
199	bl z_soc_irq_get_active
200#endif /* !CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER */
201	push {r0, r1}
202	lsl r0, r0, #3	/* table is 8-byte wide */
203#else
204#error Unknown ARM architecture
205#endif /* CONFIG_CPU_CORTEX_M */
206
207#if !defined(CONFIG_CPU_CORTEX_M)
208	/*
209	 * Enable interrupts to allow nesting.
210	 *
211	 * Note that interrupts are disabled up to this point on the ARM
212	 * architecture variants other than the Cortex-M. It is also important
213	 * to note that that most interrupt controllers require that the nested
214	 * interrupts are handled after the active interrupt is acknowledged;
215	 * this is be done through the `get_active` interrupt controller
216	 * interface function.
217	 */
218	cpsie i
219
220	/*
221	 * Skip calling the isr if it is a spurious interrupt.
222	 */
223	mov r1, #CONFIG_NUM_IRQS
224	lsl r1, r1, #3
225	cmp r0, r1
226	bge spurious_continue
227#endif /* !CONFIG_CPU_CORTEX_M */
228
229	ldr r1, =_sw_isr_table
230	add r1, r1, r0	/* table entry: ISRs must have their MSB set to stay
231			 * in thumb mode */
232
233	ldm r1!,{r0,r3}	/* arg in r0, ISR in r3 */
234	blx r3		/* call ISR */
235
236#if defined(CONFIG_CPU_CORTEX_R)
237spurious_continue:
238	/* Signal end-of-interrupt */
239	pop {r0, r1}
240#if !defined(CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER)
241	bl arm_gic_eoi
242#else
243	bl z_soc_irq_eoi
244#endif /* !CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER */
245#endif /* CONFIG_CPU_CORTEX_R */
246
247#ifdef CONFIG_TRACING_ISR
248	bl sys_trace_isr_exit
249#endif
250
251#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
252	pop {r0, r3}
253	mov lr, r3
254#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
255	pop {r0, lr}
256#elif defined(CONFIG_ARMV7_R)
257	/*
258	 * r0 and lr_irq were saved on the process stack since a swap could
259	 * happen.  exc_exit will handle getting those values back
260	 * from the process stack to return to the correct location
261	 * so there is no need to do anything here.
262	 */
263#else
264#error Unknown ARM architecture
265#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
266
267	/* Use 'bx' instead of 'b' because 'bx' can jump further, and use
268	 * 'bx' instead of 'blx' because exception return is done in
269	 * z_arm_int_exit() */
270	ldr r1, =z_arm_int_exit
271	bx r1
272