1 /*
2 * Copyright (c) 2013-2014 Wind River Systems, Inc.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 /**
8 * @file
9 * @brief Exception/interrupt context helpers for Cortex-M CPUs
10 *
11 * Exception/interrupt context helpers.
12 */
13
14 #ifndef ZEPHYR_ARCH_ARM_INCLUDE_CORTEX_M_EXCEPTION_H_
15 #define ZEPHYR_ARCH_ARM_INCLUDE_CORTEX_M_EXCEPTION_H_
16
17 #include <zephyr/arch/cpu.h>
18
19 #ifdef _ASMLANGUAGE
20
21 /* nothing */
22
23 #else
24
25 #include <cmsis_core.h>
26 #include <zephyr/arch/arm/exception.h>
27 #include <zephyr/irq_offload.h>
28
29 #ifdef __cplusplus
30 extern "C" {
31 #endif
32
33 #ifdef CONFIG_IRQ_OFFLOAD
34 extern volatile irq_offload_routine_t offload_routine;
35 #endif
36
37 /* Writes to the AIRCR must be accompanied by a write of the value 0x05FA
38 * to the Vector Key field, otherwise the writes are ignored.
39 */
40 #define AIRCR_VECT_KEY_PERMIT_WRITE 0x05FAUL
41
42 /* Exception Return (EXC_RETURN) is provided in LR upon exception entry.
43 * It is used to perform an exception return and to detect possible state
44 * transition upon exception.
45 */
46
47 /* Prefix. Indicates that this is an EXC_RETURN value.
48 * This field reads as 0b11111111.
49 */
50 #define EXC_RETURN_INDICATOR_PREFIX (0xFFU << 24)
51 /* bit[0]: Exception Secure. The security domain the exception was taken to. */
52 #define EXC_RETURN_EXCEPTION_SECURE_Pos 0
53 #define EXC_RETURN_EXCEPTION_SECURE_Msk BIT(EXC_RETURN_EXCEPTION_SECURE_Pos)
54 #define EXC_RETURN_EXCEPTION_SECURE_Non_Secure 0
55 #define EXC_RETURN_EXCEPTION_SECURE_Secure EXC_RETURN_EXCEPTION_SECURE_Msk
56 /* bit[2]: Stack Pointer selection. */
57 #define EXC_RETURN_SPSEL_Pos 2
58 #define EXC_RETURN_SPSEL_Msk BIT(EXC_RETURN_SPSEL_Pos)
59 #define EXC_RETURN_SPSEL_MAIN 0
60 #define EXC_RETURN_SPSEL_PROCESS EXC_RETURN_SPSEL_Msk
61 /* bit[3]: Mode. Indicates the Mode that was stacked from. */
62 #define EXC_RETURN_MODE_Pos 3
63 #define EXC_RETURN_MODE_Msk BIT(EXC_RETURN_MODE_Pos)
64 #define EXC_RETURN_MODE_HANDLER 0
65 #define EXC_RETURN_MODE_THREAD EXC_RETURN_MODE_Msk
66 /* bit[4]: Stack frame type. Indicates whether the stack frame is a standard
67 * integer only stack frame or an extended floating-point stack frame.
68 */
69 #define EXC_RETURN_STACK_FRAME_TYPE_Pos 4
70 #define EXC_RETURN_STACK_FRAME_TYPE_Msk BIT(EXC_RETURN_STACK_FRAME_TYPE_Pos)
71 #define EXC_RETURN_STACK_FRAME_TYPE_EXTENDED 0
72 #define EXC_RETURN_STACK_FRAME_TYPE_STANDARD EXC_RETURN_STACK_FRAME_TYPE_Msk
73 /* bit[5]: Default callee register stacking. Indicates whether the default
74 * stacking rules apply, or whether the callee registers are already on the
75 * stack.
76 */
77 #define EXC_RETURN_CALLEE_STACK_Pos 5
78 #define EXC_RETURN_CALLEE_STACK_Msk BIT(EXC_RETURN_CALLEE_STACK_Pos)
79 #define EXC_RETURN_CALLEE_STACK_SKIPPED 0
80 #define EXC_RETURN_CALLEE_STACK_DEFAULT EXC_RETURN_CALLEE_STACK_Msk
81 /* bit[6]: Secure or Non-secure stack. Indicates whether a Secure or
82 * Non-secure stack is used to restore stack frame on exception return.
83 */
84 #define EXC_RETURN_RETURN_STACK_Pos 6
85 #define EXC_RETURN_RETURN_STACK_Msk BIT(EXC_RETURN_RETURN_STACK_Pos)
86 #define EXC_RETURN_RETURN_STACK_Non_Secure 0
87 #define EXC_RETURN_RETURN_STACK_Secure EXC_RETURN_RETURN_STACK_Msk
88
89 /*
90 * The current executing vector is found in the IPSR register. All
91 * IRQs and system exceptions are considered as interrupt context.
92 */
arch_is_in_isr(void)93 static ALWAYS_INLINE bool arch_is_in_isr(void)
94 {
95 return (__get_IPSR()) ? (true) : (false);
96 }
97
98 /**
99 * @brief Find out if we were in ISR context
100 * before the current exception occurred.
101 *
102 * A function that determines, based on inspecting the current
103 * ESF, whether the processor was in handler mode before entering
104 * the current exception state (i.e. nested exception) or not.
105 *
106 * Notes:
107 * - The function shall only be called from ISR context.
108 * - We do not use ARM processor state flags to determine
109 * whether we are in a nested exception; we rely on the
110 * RETPSR value stacked on the ESF. Hence, the function
111 * assumes that the ESF stack frame has a valid RETPSR
112 * value.
113 *
114 * @param esf the exception stack frame (cannot be NULL)
115 * @return true if execution state was in handler mode, before
116 * the current exception occurred, otherwise false.
117 */
arch_is_in_nested_exception(const struct arch_esf * esf)118 static ALWAYS_INLINE bool arch_is_in_nested_exception(const struct arch_esf *esf)
119 {
120 return (esf->basic.xpsr & IPSR_ISR_Msk) ? (true) : (false);
121 }
122
123 #if defined(CONFIG_USERSPACE)
124 /**
125 * @brief Is the thread in unprivileged mode
126 *
127 * @param esf the exception stack frame (unused)
128 * @return true if the current thread was in unprivileged mode
129 */
z_arm_preempted_thread_in_user_mode(const struct arch_esf * esf)130 static ALWAYS_INLINE bool z_arm_preempted_thread_in_user_mode(const struct arch_esf *esf)
131 {
132 return z_arm_thread_is_in_user_mode();
133 }
134 #endif
135
136 /**
137 * @brief Setup system exceptions
138 *
139 * Set exception priorities to conform with the BASEPRI locking mechanism.
140 * Set PendSV priority to lowest possible.
141 *
142 * Enable fault exceptions.
143 */
z_arm_exc_setup(void)144 static ALWAYS_INLINE void z_arm_exc_setup(void)
145 {
146 /* PendSV is set to lowest priority, regardless of it being used.
147 * This is done as the IRQ is always enabled.
148 */
149 NVIC_SetPriority(PendSV_IRQn, _EXC_PENDSV_PRIO);
150
151 #ifdef CONFIG_CPU_CORTEX_M_HAS_BASEPRI
152 /* Note: SVCall IRQ priority level is left to default (0)
153 * for Cortex-M variants without BASEPRI (e.g. ARMv6-M).
154 */
155 NVIC_SetPriority(SVCall_IRQn, _EXC_SVC_PRIO);
156 #endif
157
158 #ifdef CONFIG_CPU_CORTEX_M_HAS_PROGRAMMABLE_FAULT_PRIOS
159 NVIC_SetPriority(MemoryManagement_IRQn, _EXC_FAULT_PRIO);
160 NVIC_SetPriority(BusFault_IRQn, _EXC_FAULT_PRIO);
161 NVIC_SetPriority(UsageFault_IRQn, _EXC_FAULT_PRIO);
162 #if defined(CONFIG_CORTEX_M_DEBUG_MONITOR_HOOK)
163 NVIC_SetPriority(DebugMonitor_IRQn, IRQ_PRIO_LOWEST);
164 #elif defined(CONFIG_CPU_CORTEX_M_HAS_DWT)
165 NVIC_SetPriority(DebugMonitor_IRQn, _EXC_FAULT_PRIO);
166 #endif
167 #if defined(CONFIG_ARM_SECURE_FIRMWARE)
168 NVIC_SetPriority(SecureFault_IRQn, _EXC_FAULT_PRIO);
169 #endif /* CONFIG_ARM_SECURE_FIRMWARE */
170
171 /* Enable Usage, Mem, & Bus Faults */
172 SCB->SHCSR |=
173 SCB_SHCSR_USGFAULTENA_Msk | SCB_SHCSR_MEMFAULTENA_Msk | SCB_SHCSR_BUSFAULTENA_Msk;
174 #if defined(CONFIG_ARM_SECURE_FIRMWARE)
175 /* Enable Secure Fault */
176 SCB->SHCSR |= SCB_SHCSR_SECUREFAULTENA_Msk;
177 /* Clear BFAR before setting BusFaults to target Non-Secure state. */
178 SCB->BFAR = 0;
179 #endif /* CONFIG_ARM_SECURE_FIRMWARE */
180 #endif /* CONFIG_CPU_CORTEX_M_HAS_PROGRAMMABLE_FAULT_PRIOS */
181
182 #if defined(CONFIG_ARM_SECURE_FIRMWARE) && !defined(CONFIG_ARM_SECURE_BUSFAULT_HARDFAULT_NMI)
183 /* Set NMI, Hard, and Bus Faults as Non-Secure.
184 * NMI and Bus Faults targeting the Secure state will
185 * escalate to a SecureFault or SecureHardFault.
186 */
187 SCB->AIRCR =
188 (SCB->AIRCR & (~(SCB_AIRCR_VECTKEY_Msk))) | SCB_AIRCR_BFHFNMINS_Msk |
189 ((AIRCR_VECT_KEY_PERMIT_WRITE << SCB_AIRCR_VECTKEY_Pos) & SCB_AIRCR_VECTKEY_Msk);
190 /* Note: Fault conditions that would generate a SecureFault
191 * in a PE with the Main Extension instead generate a
192 * SecureHardFault in a PE without the Main Extension.
193 */
194 #endif /* ARM_SECURE_FIRMWARE && !ARM_SECURE_BUSFAULT_HARDFAULT_NMI */
195
196 #if defined(CONFIG_CPU_CORTEX_M_HAS_SYSTICK) && !defined(CONFIG_CORTEX_M_SYSTICK)
197 /* SoC implements SysTick, but the system does not use it
198 * as driver for system timing. However, the SysTick IRQ is
199 * always enabled, so we must ensure the interrupt priority
200 * is set to a level lower than the kernel interrupts (for
201 * the assert mechanism to work properly) in case the SysTick
202 * interrupt is accidentally raised.
203 */
204 NVIC_SetPriority(SysTick_IRQn, _EXC_IRQ_DEFAULT_PRIO);
205 #endif /* CPU_CORTEX_M_HAS_SYSTICK && ! CORTEX_M_SYSTICK */
206 }
207
208 /**
209 * @brief Clear Fault exceptions
210 *
211 * Clear out exceptions for Mem, Bus, Usage and Hard Faults
212 */
z_arm_clear_faults(void)213 static ALWAYS_INLINE void z_arm_clear_faults(void)
214 {
215 #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
216 #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
217 /* Reset all faults */
218 SCB->CFSR = SCB_CFSR_USGFAULTSR_Msk | SCB_CFSR_MEMFAULTSR_Msk | SCB_CFSR_BUSFAULTSR_Msk;
219
220 /* Clear all Hard Faults - HFSR is write-one-to-clear */
221 SCB->HFSR = 0xffffffff;
222 #else
223 #error Unknown ARM architecture
224 #endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
225 }
226
227 /**
228 * @brief Set z_arm_coredump_fault_sp to stack pointer value expected by GDB
229 *
230 * @param esf exception frame
231 * @param exc_return EXC_RETURN value present in LR after exception entry.
232 */
z_arm_set_fault_sp(const struct arch_esf * esf,uint32_t exc_return)233 static ALWAYS_INLINE void z_arm_set_fault_sp(const struct arch_esf *esf, uint32_t exc_return)
234 {
235 #ifdef CONFIG_DEBUG_COREDUMP
236 z_arm_coredump_fault_sp = POINTER_TO_UINT(esf);
237 #if defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) || defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
238 /* Gdb expects a stack pointer that does not include the exception stack frame in order to
239 * unwind. So adjust the stack pointer accordingly.
240 */
241 z_arm_coredump_fault_sp += sizeof(esf->basic);
242
243 #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
244 /* Assess whether thread had been using the FP registers and add size of additional
245 * registers if necessary
246 */
247 if ((exc_return & EXC_RETURN_STACK_FRAME_TYPE_STANDARD) ==
248 EXC_RETURN_STACK_FRAME_TYPE_EXTENDED) {
249 z_arm_coredump_fault_sp += sizeof(esf->fpu);
250 }
251 #endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
252
253 #if !(defined(CONFIG_ARMV8_M_MAINLINE) || defined(CONFIG_ARMV8_M_BASELINE))
254 if ((esf->basic.xpsr & SCB_CCR_STKALIGN_Msk) == SCB_CCR_STKALIGN_Msk) {
255 /* Adjust stack alignment after PSR bit[9] detected */
256 z_arm_coredump_fault_sp |= 0x4;
257 }
258 #endif /* !CONFIG_ARMV8_M_MAINLINE */
259
260 #endif /* CONFIG_ARMV7_M_ARMV8_M_MAINLINE || CONFIG_ARMV6_M_ARMV8_M_BASELINE */
261 #endif /* CONFIG_DEBUG_COREDUMP */
262 }
263
264 #ifdef __cplusplus
265 }
266 #endif
267
268 #endif /* _ASMLANGUAGE */
269
270 #endif /* ZEPHYR_ARCH_ARM_INCLUDE_CORTEX_M_EXCEPTION_H_ */
271