1 /*
2  * Copyright (c) 2013-2014 Wind River Systems, Inc.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 /**
8  * @file
9  * @brief ARM AArch32 specific kernel interface header
10  *
11  * This header contains the ARM AArch32 specific kernel interface.  It is
12  * included by the kernel interface architecture-abstraction header
13  * (include/arm/cpu.h)
14  */
15 
16 #ifndef ZEPHYR_INCLUDE_ARCH_ARM_ARCH_H_
17 #define ZEPHYR_INCLUDE_ARCH_ARM_ARCH_H_
18 
19 /* Add include for DTS generated information */
20 #include <zephyr/devicetree.h>
21 
22 /* ARM GPRs are often designated by two different names */
23 #define sys_define_gpr_with_alias(name1, name2) union { uint32_t name1, name2; }
24 
25 #include <zephyr/arch/arm/thread.h>
26 #include <zephyr/arch/arm/exception.h>
27 #include <zephyr/arch/arm/irq.h>
28 #include <zephyr/arch/arm/error.h>
29 #include <zephyr/arch/arm/misc.h>
30 #include <zephyr/arch/common/addr_types.h>
31 #include <zephyr/arch/common/ffs.h>
32 #include <zephyr/arch/arm/nmi.h>
33 #include <zephyr/arch/arm/asm_inline.h>
34 #include <zephyr/arch/common/sys_bitops.h>
35 #if defined(CONFIG_GDBSTUB)
36 #include <zephyr/arch/arm/gdbstub.h>
37 #endif
38 
39 #ifdef CONFIG_CPU_CORTEX_M
40 #include <zephyr/arch/arm/cortex_m/cpu.h>
41 #include <zephyr/arch/arm/cortex_m/memory_map.h>
42 #include <zephyr/arch/common/sys_io.h>
43 #elif defined(CONFIG_CPU_AARCH32_CORTEX_R) || defined(CONFIG_CPU_AARCH32_CORTEX_A)
44 #include <zephyr/arch/arm/cortex_a_r/cpu.h>
45 #include <zephyr/arch/arm/cortex_a_r/sys_io.h>
46 #if defined(CONFIG_AARCH32_ARMV8_R)
47 #include <zephyr/arch/arm/cortex_a_r/lib_helpers.h>
48 #include <zephyr/arch/arm/cortex_a_r/armv8_timer.h>
49 #else
50 #include <zephyr/arch/arm/cortex_a_r/timer.h>
51 #endif
52 #endif
53 
54 #ifdef __cplusplus
55 extern "C" {
56 #endif
57 
58 #ifndef _ASMLANGUAGE
59 
60 #include <zephyr/fatal_types.h>
61 
62 enum k_fatal_error_reason_arch {
63 	/* Cortex-M MEMFAULT exceptions */
64 	K_ERR_ARM_MEM_GENERIC = K_ERR_ARCH_START,
65 	K_ERR_ARM_MEM_STACKING,
66 	K_ERR_ARM_MEM_UNSTACKING,
67 	K_ERR_ARM_MEM_DATA_ACCESS,
68 	K_ERR_ARM_MEM_INSTRUCTION_ACCESS,
69 	K_ERR_ARM_MEM_FP_LAZY_STATE_PRESERVATION,
70 
71 	/* Cortex-M BUSFAULT exceptions */
72 	K_ERR_ARM_BUS_GENERIC,
73 	K_ERR_ARM_BUS_STACKING,
74 	K_ERR_ARM_BUS_UNSTACKING,
75 	K_ERR_ARM_BUS_PRECISE_DATA_BUS,
76 	K_ERR_ARM_BUS_IMPRECISE_DATA_BUS,
77 	K_ERR_ARM_BUS_INSTRUCTION_BUS,
78 	K_ERR_ARM_BUS_FP_LAZY_STATE_PRESERVATION,
79 
80 	/* Cortex-M USAGEFAULT exceptions */
81 	K_ERR_ARM_USAGE_GENERIC,
82 	K_ERR_ARM_USAGE_DIV_0,
83 	K_ERR_ARM_USAGE_UNALIGNED_ACCESS,
84 	K_ERR_ARM_USAGE_STACK_OVERFLOW,
85 	K_ERR_ARM_USAGE_NO_COPROCESSOR,
86 	K_ERR_ARM_USAGE_ILLEGAL_EXC_RETURN,
87 	K_ERR_ARM_USAGE_ILLEGAL_EPSR,
88 	K_ERR_ARM_USAGE_UNDEFINED_INSTRUCTION,
89 
90 	/* Cortex-M SECURE exceptions */
91 	K_ERR_ARM_SECURE_GENERIC,
92 	K_ERR_ARM_SECURE_ENTRY_POINT,
93 	K_ERR_ARM_SECURE_INTEGRITY_SIGNATURE,
94 	K_ERR_ARM_SECURE_EXCEPTION_RETURN,
95 	K_ERR_ARM_SECURE_ATTRIBUTION_UNIT,
96 	K_ERR_ARM_SECURE_TRANSITION,
97 	K_ERR_ARM_SECURE_LAZY_STATE_PRESERVATION,
98 	K_ERR_ARM_SECURE_LAZY_STATE_ERROR,
99 
100 	/* Cortex-A/R exceptions*/
101 	K_ERR_ARM_UNDEFINED_INSTRUCTION,
102 	K_ERR_ARM_ALIGNMENT_FAULT,
103 	K_ERR_ARM_BACKGROUND_FAULT,
104 	K_ERR_ARM_PERMISSION_FAULT,
105 	K_ERR_ARM_SYNC_EXTERNAL_ABORT,
106 	K_ERR_ARM_ASYNC_EXTERNAL_ABORT,
107 	K_ERR_ARM_SYNC_PARITY_ERROR,
108 	K_ERR_ARM_ASYNC_PARITY_ERROR,
109 	K_ERR_ARM_DEBUG_EVENT,
110 	K_ERR_ARM_TRANSLATION_FAULT,
111 	K_ERR_ARM_UNSUPPORTED_EXCLUSIVE_ACCESS_FAULT
112 };
113 
114 #endif /* _ASMLANGUAGE */
115 
116 /**
117  * @brief Declare the ARCH_STACK_PTR_ALIGN
118  *
119  * Denotes the required alignment of the stack pointer on public API
120  * boundaries
121  *
122  */
123 #ifdef CONFIG_STACK_ALIGN_DOUBLE_WORD
124 #define ARCH_STACK_PTR_ALIGN 8
125 #else
126 #define ARCH_STACK_PTR_ALIGN 4
127 #endif
128 
129 /**
130  * @brief Declare the minimum alignment for a thread stack
131  *
132  * Denotes the minimum required alignment of a thread stack.
133  *
134  * Note:
135  * User thread stacks must respect the minimum MPU region
136  * alignment requirement.
137  */
138 #if defined(CONFIG_USERSPACE)
139 #define Z_THREAD_MIN_STACK_ALIGN CONFIG_ARM_MPU_REGION_MIN_ALIGN_AND_SIZE
140 #elif defined(CONFIG_ARM_AARCH32_MMU)
141 #define Z_THREAD_MIN_STACK_ALIGN CONFIG_ARM_MMU_REGION_MIN_ALIGN_AND_SIZE
142 #else
143 #define Z_THREAD_MIN_STACK_ALIGN ARCH_STACK_PTR_ALIGN
144 #endif
145 
146 /**
147  * @brief Declare a minimum MPU guard alignment and size
148  *
149  * This specifies the minimum MPU guard alignment/size for the MPU. This
150  * will be used to denote the guard section of the stack, if it exists.
151  *
152  * One key note is that this guard results in extra bytes being added to
153  * the stack. APIs which give the stack ptr and stack size will take this
154  * guard size into account.
155  *
156  * Stack is allocated, but initial stack pointer is at the end
157  * (highest address).  Stack grows down to the actual allocation
158  * address (lowest address).  Stack guard, if present, will comprise
159  * the lowest MPU_GUARD_ALIGN_AND_SIZE bytes of the stack.
160  *
161  * The guard region must include enough space for an exception frame
162  * below the trapping region as a stack fault will end up storing
163  * the exception data (0x20 bytes) onto the stack below wherever
164  * the stack pointer refers, even if that is within the guard region,
165  * so we make sure the region is strictly larger than this size by
166  * setting it to 0x40 (to respect any power-of-two requirements).
167  *
168  * As the stack grows down, it will reach the end of the stack when it
169  * encounters either the stack guard region, or the stack allocation
170  * address.
171  *
172  * ----------------------- <---- Stack allocation address + stack size +
173  * |                     |            MPU_GUARD_ALIGN_AND_SIZE
174  * |  Some thread data   | <---- Defined when thread is created
175  * |        ...          |
176  * |---------------------| <---- Actual initial stack ptr
177  * |  Initial Stack Ptr  |       aligned to ARCH_STACK_PTR_ALIGN
178  * |        ...          |
179  * |        ...          |
180  * |        ...          |
181  * |        ...          |
182  * |        ...          |
183  * |        ...          |
184  * |        ...          |
185  * |        ...          |
186  * |  Stack Ends         |
187  * |---------------------- <---- Stack Buffer Ptr from API
188  * |  MPU Guard,         |
189  * |     if present      |
190  * ----------------------- <---- Stack Allocation address
191  *
192  */
193 #if defined(CONFIG_MPU_STACK_GUARD)
194 /* make sure there's more than enough space for an exception frame */
195 #if CONFIG_ARM_MPU_REGION_MIN_ALIGN_AND_SIZE <= 0x20
196 #define MPU_GUARD_ALIGN_AND_SIZE 0x40
197 #else
198 #define MPU_GUARD_ALIGN_AND_SIZE CONFIG_ARM_MPU_REGION_MIN_ALIGN_AND_SIZE
199 #endif
200 #else
201 #define MPU_GUARD_ALIGN_AND_SIZE 0
202 #endif
203 
204 /**
205  * @brief Declare the MPU guard alignment and size for a thread stack
206  *        that is using the Floating Point services.
207  *
208  * For threads that are using the Floating Point services under Shared
209  * Registers (CONFIG_FPU_SHARING=y) mode, the exception stack frame may
210  * contain both the basic stack frame and the FP caller-saved context,
211  * upon exception entry. Therefore, a wide guard region is required to
212  * guarantee that stack-overflow detection will always be successful.
213  */
214 #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING) \
215 	&& defined(CONFIG_MPU_STACK_GUARD)
216 #if CONFIG_MPU_STACK_GUARD_MIN_SIZE_FLOAT <= 0x20
217 #define MPU_GUARD_ALIGN_AND_SIZE_FLOAT 0x40
218 #else
219 #define MPU_GUARD_ALIGN_AND_SIZE_FLOAT CONFIG_MPU_STACK_GUARD_MIN_SIZE_FLOAT
220 #endif
221 #else
222 #define MPU_GUARD_ALIGN_AND_SIZE_FLOAT 0
223 #endif
224 
225 /**
226  * @brief Define alignment of an MPU guard
227  *
228  * Minimum alignment of the start address of an MPU guard, depending on
229  * whether the MPU architecture enforces a size (and power-of-two) alignment
230  * requirement.
231  */
232 #if defined(CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT)
233 #define Z_MPU_GUARD_ALIGN (MAX(MPU_GUARD_ALIGN_AND_SIZE, \
234 	MPU_GUARD_ALIGN_AND_SIZE_FLOAT))
235 #else
236 #define Z_MPU_GUARD_ALIGN MPU_GUARD_ALIGN_AND_SIZE
237 #endif
238 
239 #if defined(CONFIG_USERSPACE) && \
240 	defined(CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT)
241 /* This MPU requires regions to be sized to a power of two, and aligned to
242  * their own size. Since an MPU region must be able to cover the entire
243  * user-accessible stack buffer, we size/align to match. The privilege
244  * mode stack is generated elsewhere in memory.
245  */
246 #define ARCH_THREAD_STACK_OBJ_ALIGN(size)	Z_POW2_CEIL(size)
247 #define ARCH_THREAD_STACK_SIZE_ADJUST(size)	Z_POW2_CEIL(size)
248 #else
249 #define ARCH_THREAD_STACK_OBJ_ALIGN(size)	MAX(Z_THREAD_MIN_STACK_ALIGN, \
250 						    Z_MPU_GUARD_ALIGN)
251 #ifdef CONFIG_USERSPACE
252 #define ARCH_THREAD_STACK_SIZE_ADJUST(size) \
253 	ROUND_UP(size, CONFIG_ARM_MPU_REGION_MIN_ALIGN_AND_SIZE)
254 #endif
255 #endif
256 
257 #ifdef CONFIG_MPU_STACK_GUARD
258 /* Kernel-only stacks need an MPU guard region programmed at the beginning of
259  * the stack object, so align the object appropriately.
260  */
261 #define ARCH_KERNEL_STACK_RESERVED	MPU_GUARD_ALIGN_AND_SIZE
262 #define ARCH_KERNEL_STACK_OBJ_ALIGN	Z_MPU_GUARD_ALIGN
263 #endif
264 
265 /* On arm, all MPU guards are carve-outs. */
266 #define ARCH_THREAD_STACK_RESERVED 0
267 
268 /* Legacy case: retain containing extern "C" with C++ */
269 #ifdef CONFIG_ARM_MPU
270 #ifdef CONFIG_CPU_HAS_ARM_MPU
271 #include <zephyr/arch/arm/mpu/arm_mpu.h>
272 #endif /* CONFIG_CPU_HAS_ARM_MPU */
273 #ifdef CONFIG_CPU_HAS_NXP_MPU
274 #include <zephyr/arch/arm/mpu/nxp_mpu.h>
275 #endif /* CONFIG_CPU_HAS_NXP_MPU */
276 #endif /* CONFIG_ARM_MPU */
277 #ifdef CONFIG_ARM_AARCH32_MMU
278 #include <zephyr/arch/arm/mmu/arm_mmu.h>
279 #endif /* CONFIG_ARM_AARCH32_MMU */
280 
281 #ifdef __cplusplus
282 }
283 #endif
284 
285 #endif /* ZEPHYR_INCLUDE_ARCH_ARM_ARCH_H_ */
286