1 /*
2  * Copyright (c) 2013-2014 Wind River Systems, Inc.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 /**
8  * @file
9  * @brief Exception/interrupt context helpers for Cortex-M CPUs
10  *
11  * Exception/interrupt context helpers.
12  */
13 
14 #ifndef ZEPHYR_ARCH_ARM_INCLUDE_CORTEX_M_EXCEPTION_H_
15 #define ZEPHYR_ARCH_ARM_INCLUDE_CORTEX_M_EXCEPTION_H_
16 
17 #include <zephyr/arch/cpu.h>
18 
19 #ifdef _ASMLANGUAGE
20 
21 /* nothing */
22 
23 #else
24 
25 #include <cmsis_core.h>
26 #include <zephyr/arch/arm/exception.h>
27 #include <zephyr/irq_offload.h>
28 
29 #ifdef __cplusplus
30 extern "C" {
31 #endif
32 
33 #ifdef CONFIG_IRQ_OFFLOAD
34 extern volatile irq_offload_routine_t offload_routine;
35 #endif
36 
37 /* Writes to the AIRCR must be accompanied by a write of the value 0x05FA
38  * to the Vector Key field, otherwise the writes are ignored.
39  */
40 #define AIRCR_VECT_KEY_PERMIT_WRITE 0x05FAUL
41 
42 /* Exception Return (EXC_RETURN) is provided in LR upon exception entry.
43  * It is used to perform an exception return and to detect possible state
44  * transition upon exception.
45  */
46 
47 /* Prefix. Indicates that this is an EXC_RETURN value.
48  * This field reads as 0b11111111.
49  */
50 #define EXC_RETURN_INDICATOR_PREFIX     (0xFF << 24)
51 /* bit[0]: Exception Secure. The security domain the exception was taken to. */
52 #define EXC_RETURN_EXCEPTION_SECURE_Pos 0
53 #define EXC_RETURN_EXCEPTION_SECURE_Msk \
54 		BIT(EXC_RETURN_EXCEPTION_SECURE_Pos)
55 #define EXC_RETURN_EXCEPTION_SECURE_Non_Secure 0
56 #define EXC_RETURN_EXCEPTION_SECURE_Secure EXC_RETURN_EXCEPTION_SECURE_Msk
57 /* bit[2]: Stack Pointer selection. */
58 #define EXC_RETURN_SPSEL_Pos 2
59 #define EXC_RETURN_SPSEL_Msk BIT(EXC_RETURN_SPSEL_Pos)
60 #define EXC_RETURN_SPSEL_MAIN 0
61 #define EXC_RETURN_SPSEL_PROCESS EXC_RETURN_SPSEL_Msk
62 /* bit[3]: Mode. Indicates the Mode that was stacked from. */
63 #define EXC_RETURN_MODE_Pos 3
64 #define EXC_RETURN_MODE_Msk BIT(EXC_RETURN_MODE_Pos)
65 #define EXC_RETURN_MODE_HANDLER 0
66 #define EXC_RETURN_MODE_THREAD EXC_RETURN_MODE_Msk
67 /* bit[4]: Stack frame type. Indicates whether the stack frame is a standard
68  * integer only stack frame or an extended floating-point stack frame.
69  */
70 #define EXC_RETURN_STACK_FRAME_TYPE_Pos 4
71 #define EXC_RETURN_STACK_FRAME_TYPE_Msk BIT(EXC_RETURN_STACK_FRAME_TYPE_Pos)
72 #define EXC_RETURN_STACK_FRAME_TYPE_EXTENDED 0
73 #define EXC_RETURN_STACK_FRAME_TYPE_STANDARD EXC_RETURN_STACK_FRAME_TYPE_Msk
74 /* bit[5]: Default callee register stacking. Indicates whether the default
75  * stacking rules apply, or whether the callee registers are already on the
76  * stack.
77  */
78 #define EXC_RETURN_CALLEE_STACK_Pos 5
79 #define EXC_RETURN_CALLEE_STACK_Msk BIT(EXC_RETURN_CALLEE_STACK_Pos)
80 #define EXC_RETURN_CALLEE_STACK_SKIPPED 0
81 #define EXC_RETURN_CALLEE_STACK_DEFAULT EXC_RETURN_CALLEE_STACK_Msk
82 /* bit[6]: Secure or Non-secure stack. Indicates whether a Secure or
83  * Non-secure stack is used to restore stack frame on exception return.
84  */
85 #define EXC_RETURN_RETURN_STACK_Pos 6
86 #define EXC_RETURN_RETURN_STACK_Msk BIT(EXC_RETURN_RETURN_STACK_Pos)
87 #define EXC_RETURN_RETURN_STACK_Non_Secure 0
88 #define EXC_RETURN_RETURN_STACK_Secure EXC_RETURN_RETURN_STACK_Msk
89 
90 /*
91  * The current executing vector is found in the IPSR register. All
92  * IRQs and system exceptions are considered as interrupt context.
93  */
arch_is_in_isr(void)94 static ALWAYS_INLINE bool arch_is_in_isr(void)
95 {
96 	return (__get_IPSR()) ? (true) : (false);
97 }
98 
99 /**
100  * @brief Find out if we were in ISR context
101  *        before the current exception occurred.
102  *
103  * A function that determines, based on inspecting the current
104  * ESF, whether the processor was in handler mode before entering
105  * the current exception state (i.e. nested exception) or not.
106  *
107  * Notes:
108  * - The function shall only be called from ISR context.
109  * - We do not use ARM processor state flags to determine
110  *   whether we are in a nested exception; we rely on the
111  *   RETPSR value stacked on the ESF. Hence, the function
112  *   assumes that the ESF stack frame has a valid RETPSR
113  *   value.
114  *
115  * @param esf the exception stack frame (cannot be NULL)
116  * @return true if execution state was in handler mode, before
117  *              the current exception occurred, otherwise false.
118  */
arch_is_in_nested_exception(const struct arch_esf * esf)119 static ALWAYS_INLINE bool arch_is_in_nested_exception(const struct arch_esf *esf)
120 {
121 	return (esf->basic.xpsr & IPSR_ISR_Msk) ? (true) : (false);
122 }
123 
124 #if defined(CONFIG_USERSPACE)
125 /**
126  * @brief Is the thread in unprivileged mode
127  *
128  * @param esf the exception stack frame (unused)
129  * @return true if the current thread was in unprivileged mode
130  */
z_arm_preempted_thread_in_user_mode(const struct arch_esf * esf)131 static ALWAYS_INLINE bool z_arm_preempted_thread_in_user_mode(const struct arch_esf *esf)
132 {
133 	return z_arm_thread_is_in_user_mode();
134 }
135 #endif
136 
137 /**
138  * @brief Setup system exceptions
139  *
140  * Set exception priorities to conform with the BASEPRI locking mechanism.
141  * Set PendSV priority to lowest possible.
142  *
143  * Enable fault exceptions.
144  */
z_arm_exc_setup(void)145 static ALWAYS_INLINE void z_arm_exc_setup(void)
146 {
147 	/* PendSV is set to lowest priority, regardless of it being used.
148 	 * This is done as the IRQ is always enabled.
149 	 */
150 	NVIC_SetPriority(PendSV_IRQn, _EXC_PENDSV_PRIO);
151 
152 #ifdef CONFIG_CPU_CORTEX_M_HAS_BASEPRI
153 	/* Note: SVCall IRQ priority level is left to default (0)
154 	 * for Cortex-M variants without BASEPRI (e.g. ARMv6-M).
155 	 */
156 	NVIC_SetPriority(SVCall_IRQn, _EXC_SVC_PRIO);
157 #endif
158 
159 #ifdef CONFIG_CPU_CORTEX_M_HAS_PROGRAMMABLE_FAULT_PRIOS
160 	NVIC_SetPriority(MemoryManagement_IRQn, _EXC_FAULT_PRIO);
161 	NVIC_SetPriority(BusFault_IRQn, _EXC_FAULT_PRIO);
162 	NVIC_SetPriority(UsageFault_IRQn, _EXC_FAULT_PRIO);
163 #if defined(CONFIG_CORTEX_M_DEBUG_MONITOR_HOOK)
164 	NVIC_SetPriority(DebugMonitor_IRQn, IRQ_PRIO_LOWEST);
165 #elif defined(CONFIG_CPU_CORTEX_M_HAS_DWT)
166 	NVIC_SetPriority(DebugMonitor_IRQn, _EXC_FAULT_PRIO);
167 #endif
168 #if defined(CONFIG_ARM_SECURE_FIRMWARE)
169 	NVIC_SetPriority(SecureFault_IRQn, _EXC_FAULT_PRIO);
170 #endif /* CONFIG_ARM_SECURE_FIRMWARE */
171 
172 	/* Enable Usage, Mem, & Bus Faults */
173 	SCB->SHCSR |= SCB_SHCSR_USGFAULTENA_Msk | SCB_SHCSR_MEMFAULTENA_Msk |
174 		      SCB_SHCSR_BUSFAULTENA_Msk;
175 #if defined(CONFIG_ARM_SECURE_FIRMWARE)
176 	/* Enable Secure Fault */
177 	SCB->SHCSR |= SCB_SHCSR_SECUREFAULTENA_Msk;
178 	/* Clear BFAR before setting BusFaults to target Non-Secure state. */
179 	SCB->BFAR = 0;
180 #endif /* CONFIG_ARM_SECURE_FIRMWARE */
181 #endif /* CONFIG_CPU_CORTEX_M_HAS_PROGRAMMABLE_FAULT_PRIOS */
182 
183 #if defined(CONFIG_ARM_SECURE_FIRMWARE) && \
184 	!defined(CONFIG_ARM_SECURE_BUSFAULT_HARDFAULT_NMI)
185 	/* Set NMI, Hard, and Bus Faults as Non-Secure.
186 	 * NMI and Bus Faults targeting the Secure state will
187 	 * escalate to a SecureFault or SecureHardFault.
188 	 */
189 	SCB->AIRCR =
190 		(SCB->AIRCR & (~(SCB_AIRCR_VECTKEY_Msk)))
191 		| SCB_AIRCR_BFHFNMINS_Msk
192 		| ((AIRCR_VECT_KEY_PERMIT_WRITE << SCB_AIRCR_VECTKEY_Pos) &
193 			SCB_AIRCR_VECTKEY_Msk);
194 	/* Note: Fault conditions that would generate a SecureFault
195 	 * in a PE with the Main Extension instead generate a
196 	 * SecureHardFault in a PE without the Main Extension.
197 	 */
198 #endif /* ARM_SECURE_FIRMWARE && !ARM_SECURE_BUSFAULT_HARDFAULT_NMI */
199 
200 #if defined(CONFIG_CPU_CORTEX_M_HAS_SYSTICK) && \
201 	!defined(CONFIG_CORTEX_M_SYSTICK)
202 	/* SoC implements SysTick, but the system does not use it
203 	 * as driver for system timing. However, the SysTick IRQ is
204 	 * always enabled, so we must ensure the interrupt priority
205 	 * is set to a level lower than the kernel interrupts (for
206 	 * the assert mechanism to work properly) in case the SysTick
207 	 * interrupt is accidentally raised.
208 	 */
209 	NVIC_SetPriority(SysTick_IRQn, _EXC_IRQ_DEFAULT_PRIO);
210 #endif /* CPU_CORTEX_M_HAS_SYSTICK && ! CORTEX_M_SYSTICK */
211 
212 }
213 
214 /**
215  * @brief Clear Fault exceptions
216  *
217  * Clear out exceptions for Mem, Bus, Usage and Hard Faults
218  */
z_arm_clear_faults(void)219 static ALWAYS_INLINE void z_arm_clear_faults(void)
220 {
221 #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
222 #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
223 	/* Reset all faults */
224 	SCB->CFSR = SCB_CFSR_USGFAULTSR_Msk |
225 		    SCB_CFSR_MEMFAULTSR_Msk |
226 		    SCB_CFSR_BUSFAULTSR_Msk;
227 
228 	/* Clear all Hard Faults - HFSR is write-one-to-clear */
229 	SCB->HFSR = 0xffffffff;
230 #else
231 #error Unknown ARM architecture
232 #endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
233 }
234 
235 /**
236  * @brief Set z_arm_coredump_fault_sp to stack pointer value expected by GDB
237  *
238  * @param esf exception frame
239  * @param exc_return EXC_RETURN value present in LR after exception entry.
240  */
z_arm_set_fault_sp(const struct arch_esf * esf,uint32_t exc_return)241 static ALWAYS_INLINE void z_arm_set_fault_sp(const struct arch_esf *esf, uint32_t exc_return)
242 {
243 #ifdef CONFIG_DEBUG_COREDUMP
244 	z_arm_coredump_fault_sp = POINTER_TO_UINT(esf);
245 #if defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) || defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
246 	/* Gdb expects a stack pointer that does not include the exception stack frame in order to
247 	 * unwind. So adjust the stack pointer accordingly.
248 	 */
249 	z_arm_coredump_fault_sp += sizeof(esf->basic);
250 
251 #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
252 	/* Assess whether thread had been using the FP registers and add size of additional
253 	 * registers if necessary
254 	 */
255 	if ((exc_return & EXC_RETURN_STACK_FRAME_TYPE_STANDARD) ==
256 			EXC_RETURN_STACK_FRAME_TYPE_EXTENDED) {
257 		z_arm_coredump_fault_sp += sizeof(esf->fpu);
258 	}
259 #endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
260 
261 #if !(defined(CONFIG_ARMV8_M_MAINLINE) || defined(CONFIG_ARMV8_M_BASELINE))
262 	if ((esf->basic.xpsr & SCB_CCR_STKALIGN_Msk) == SCB_CCR_STKALIGN_Msk) {
263 		/* Adjust stack alignment after PSR bit[9] detected */
264 		z_arm_coredump_fault_sp |= 0x4;
265 	}
266 #endif /* !CONFIG_ARMV8_M_MAINLINE */
267 
268 #endif /* CONFIG_ARMV7_M_ARMV8_M_MAINLINE || CONFIG_ARMV6_M_ARMV8_M_BASELINE */
269 #endif /* CONFIG_DEBUG_COREDUMP */
270 }
271 
272 /**
273  * @brief Assess whether a debug monitor event should be treated as an error
274  *
275  * This routine checks the status of a debug_monitor() exception, and
276  * evaluates whether this needs to be considered as a processor error.
277  *
278  * @return true if the DM exception is a processor error, otherwise false
279  */
280 bool z_arm_debug_monitor_event_error_check(void);
281 
282 #ifdef __cplusplus
283 }
284 #endif
285 
286 #endif /* _ASMLANGUAGE */
287 
288 #endif /* ZEPHYR_ARCH_ARM_INCLUDE_CORTEX_M_EXCEPTION_H_ */
289