1 /* 2 * Copyright (c) 2013-2014 Wind River Systems, Inc. 3 * 4 * SPDX-License-Identifier: Apache-2.0 5 */ 6 7 /** 8 * @file 9 * @brief ARM AArch32 specific kernel interface header 10 * 11 * This header contains the ARM AArch32 specific kernel interface. It is 12 * included by the kernel interface architecture-abstraction header 13 * (include/arm/cpu.h) 14 */ 15 16 #ifndef ZEPHYR_INCLUDE_ARCH_ARM_AARCH32_ARCH_H_ 17 #define ZEPHYR_INCLUDE_ARCH_ARM_AARCH32_ARCH_H_ 18 19 /* Add include for DTS generated information */ 20 #include <zephyr/devicetree.h> 21 22 /* ARM GPRs are often designated by two different names */ 23 #define sys_define_gpr_with_alias(name1, name2) union { uint32_t name1, name2; } 24 25 #include <zephyr/arch/arm/thread.h> 26 #include <zephyr/arch/arm/exc.h> 27 #include <zephyr/arch/arm/irq.h> 28 #include <zephyr/arch/arm/error.h> 29 #include <zephyr/arch/arm/misc.h> 30 #include <zephyr/arch/common/addr_types.h> 31 #include <zephyr/arch/common/ffs.h> 32 #include <zephyr/arch/arm/nmi.h> 33 #include <zephyr/arch/arm/asm_inline.h> 34 #include <zephyr/arch/common/sys_bitops.h> 35 36 #ifdef CONFIG_CPU_CORTEX_M 37 #include <zephyr/arch/arm/cortex_m/cpu.h> 38 #include <zephyr/arch/arm/cortex_m/memory_map.h> 39 #include <zephyr/arch/common/sys_io.h> 40 #elif defined(CONFIG_CPU_AARCH32_CORTEX_R) || defined(CONFIG_CPU_AARCH32_CORTEX_A) 41 #include <zephyr/arch/arm/cortex_a_r/cpu.h> 42 #include <zephyr/arch/arm/cortex_a_r/sys_io.h> 43 #if defined(CONFIG_AARCH32_ARMV8_R) 44 #include <zephyr/arch/arm/cortex_a_r/lib_helpers.h> 45 #include <zephyr/arch/arm/cortex_a_r/armv8_timer.h> 46 #else 47 #include <zephyr/arch/arm/cortex_a_r/timer.h> 48 #endif 49 #endif 50 51 #ifdef __cplusplus 52 extern "C" { 53 #endif 54 55 #ifndef _ASMLANGUAGE 56 57 #include <zephyr/fatal_types.h> 58 59 enum k_fatal_error_reason_arch { 60 /* Cortex-M MEMFAULT exceptions */ 61 K_ERR_ARM_MEM_GENERIC = K_ERR_ARCH_START, 62 K_ERR_ARM_MEM_STACKING, 63 K_ERR_ARM_MEM_UNSTACKING, 64 K_ERR_ARM_MEM_DATA_ACCESS, 65 K_ERR_ARM_MEM_INSTRUCTION_ACCESS, 66 K_ERR_ARM_MEM_FP_LAZY_STATE_PRESERVATION, 67 68 /* Cortex-M BUSFAULT exceptions */ 69 K_ERR_ARM_BUS_GENERIC, 70 K_ERR_ARM_BUS_STACKING, 71 K_ERR_ARM_BUS_UNSTACKING, 72 K_ERR_ARM_BUS_PRECISE_DATA_BUS, 73 K_ERR_ARM_BUS_IMPRECISE_DATA_BUS, 74 K_ERR_ARM_BUS_INSTRUCTION_BUS, 75 K_ERR_ARM_BUS_FP_LAZY_STATE_PRESERVATION, 76 77 /* Cortex-M USAGEFAULT exceptions */ 78 K_ERR_ARM_USAGE_GENERIC, 79 K_ERR_ARM_USAGE_DIV_0, 80 K_ERR_ARM_USAGE_UNALIGNED_ACCESS, 81 K_ERR_ARM_USAGE_STACK_OVERFLOW, 82 K_ERR_ARM_USAGE_NO_COPROCESSOR, 83 K_ERR_ARM_USAGE_ILLEGAL_EXC_RETURN, 84 K_ERR_ARM_USAGE_ILLEGAL_EPSR, 85 K_ERR_ARM_USAGE_UNDEFINED_INSTRUCTION, 86 87 /* Cortex-M SECURE exceptions */ 88 K_ERR_ARM_SECURE_GENERIC, 89 K_ERR_ARM_SECURE_ENTRY_POINT, 90 K_ERR_ARM_SECURE_INTEGRITY_SIGNATURE, 91 K_ERR_ARM_SECURE_EXCEPTION_RETURN, 92 K_ERR_ARM_SECURE_ATTRIBUTION_UNIT, 93 K_ERR_ARM_SECURE_TRANSITION, 94 K_ERR_ARM_SECURE_LAZY_STATE_PRESERVATION, 95 K_ERR_ARM_SECURE_LAZY_STATE_ERROR, 96 97 /* Cortex-A/R exceptions*/ 98 K_ERR_ARM_UNDEFINED_INSTRUCTION, 99 K_ERR_ARM_ALIGNMENT_FAULT, 100 K_ERR_ARM_BACKGROUND_FAULT, 101 K_ERR_ARM_PERMISSION_FAULT, 102 K_ERR_ARM_SYNC_EXTERNAL_ABORT, 103 K_ERR_ARM_ASYNC_EXTERNAL_ABORT, 104 K_ERR_ARM_SYNC_PARITY_ERROR, 105 K_ERR_ARM_ASYNC_PARITY_ERROR, 106 K_ERR_ARM_DEBUG_EVENT, 107 K_ERR_ARM_TRANSLATION_FAULT, 108 K_ERR_ARM_UNSUPPORTED_EXCLUSIVE_ACCESS_FAULT 109 }; 110 111 #endif /* _ASMLANGUAGE */ 112 113 /** 114 * @brief Declare the ARCH_STACK_PTR_ALIGN 115 * 116 * Denotes the required alignment of the stack pointer on public API 117 * boundaries 118 * 119 */ 120 #ifdef CONFIG_STACK_ALIGN_DOUBLE_WORD 121 #define ARCH_STACK_PTR_ALIGN 8 122 #else 123 #define ARCH_STACK_PTR_ALIGN 4 124 #endif 125 126 /** 127 * @brief Declare the minimum alignment for a thread stack 128 * 129 * Denotes the minimum required alignment of a thread stack. 130 * 131 * Note: 132 * User thread stacks must respect the minimum MPU region 133 * alignment requirement. 134 */ 135 #if defined(CONFIG_USERSPACE) 136 #define Z_THREAD_MIN_STACK_ALIGN CONFIG_ARM_MPU_REGION_MIN_ALIGN_AND_SIZE 137 #elif defined(CONFIG_ARM_AARCH32_MMU) 138 #define Z_THREAD_MIN_STACK_ALIGN CONFIG_ARM_MMU_REGION_MIN_ALIGN_AND_SIZE 139 #else 140 #define Z_THREAD_MIN_STACK_ALIGN ARCH_STACK_PTR_ALIGN 141 #endif 142 143 /** 144 * @brief Declare a minimum MPU guard alignment and size 145 * 146 * This specifies the minimum MPU guard alignment/size for the MPU. This 147 * will be used to denote the guard section of the stack, if it exists. 148 * 149 * One key note is that this guard results in extra bytes being added to 150 * the stack. APIs which give the stack ptr and stack size will take this 151 * guard size into account. 152 * 153 * Stack is allocated, but initial stack pointer is at the end 154 * (highest address). Stack grows down to the actual allocation 155 * address (lowest address). Stack guard, if present, will comprise 156 * the lowest MPU_GUARD_ALIGN_AND_SIZE bytes of the stack. 157 * 158 * The guard region must include enough space for an exception frame 159 * below the trapping region as a stack fault will end up storing 160 * the exception data (0x20 bytes) onto the stack below wherever 161 * the stack pointer refers, even if that is within the guard region, 162 * so we make sure the region is strictly larger than this size by 163 * setting it to 0x40 (to respect any power-of-two requirements). 164 * 165 * As the stack grows down, it will reach the end of the stack when it 166 * encounters either the stack guard region, or the stack allocation 167 * address. 168 * 169 * ----------------------- <---- Stack allocation address + stack size + 170 * | | MPU_GUARD_ALIGN_AND_SIZE 171 * | Some thread data | <---- Defined when thread is created 172 * | ... | 173 * |---------------------| <---- Actual initial stack ptr 174 * | Initial Stack Ptr | aligned to ARCH_STACK_PTR_ALIGN 175 * | ... | 176 * | ... | 177 * | ... | 178 * | ... | 179 * | ... | 180 * | ... | 181 * | ... | 182 * | ... | 183 * | Stack Ends | 184 * |---------------------- <---- Stack Buffer Ptr from API 185 * | MPU Guard, | 186 * | if present | 187 * ----------------------- <---- Stack Allocation address 188 * 189 */ 190 #if defined(CONFIG_MPU_STACK_GUARD) 191 /* make sure there's more than enough space for an exception frame */ 192 #if CONFIG_ARM_MPU_REGION_MIN_ALIGN_AND_SIZE <= 0x20 193 #define MPU_GUARD_ALIGN_AND_SIZE 0x40 194 #else 195 #define MPU_GUARD_ALIGN_AND_SIZE CONFIG_ARM_MPU_REGION_MIN_ALIGN_AND_SIZE 196 #endif 197 #else 198 #define MPU_GUARD_ALIGN_AND_SIZE 0 199 #endif 200 201 /** 202 * @brief Declare the MPU guard alignment and size for a thread stack 203 * that is using the Floating Point services. 204 * 205 * For threads that are using the Floating Point services under Shared 206 * Registers (CONFIG_FPU_SHARING=y) mode, the exception stack frame may 207 * contain both the basic stack frame and the FP caller-saved context, 208 * upon exception entry. Therefore, a wide guard region is required to 209 * guarantee that stack-overflow detection will always be successful. 210 */ 211 #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING) \ 212 && defined(CONFIG_MPU_STACK_GUARD) 213 #if CONFIG_MPU_STACK_GUARD_MIN_SIZE_FLOAT <= 0x20 214 #define MPU_GUARD_ALIGN_AND_SIZE_FLOAT 0x40 215 #else 216 #define MPU_GUARD_ALIGN_AND_SIZE_FLOAT CONFIG_MPU_STACK_GUARD_MIN_SIZE_FLOAT 217 #endif 218 #else 219 #define MPU_GUARD_ALIGN_AND_SIZE_FLOAT 0 220 #endif 221 222 /** 223 * @brief Define alignment of an MPU guard 224 * 225 * Minimum alignment of the start address of an MPU guard, depending on 226 * whether the MPU architecture enforces a size (and power-of-two) alignment 227 * requirement. 228 */ 229 #if defined(CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT) 230 #define Z_MPU_GUARD_ALIGN (MAX(MPU_GUARD_ALIGN_AND_SIZE, \ 231 MPU_GUARD_ALIGN_AND_SIZE_FLOAT)) 232 #else 233 #define Z_MPU_GUARD_ALIGN MPU_GUARD_ALIGN_AND_SIZE 234 #endif 235 236 #if defined(CONFIG_USERSPACE) && \ 237 defined(CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT) 238 /* This MPU requires regions to be sized to a power of two, and aligned to 239 * their own size. Since an MPU region must be able to cover the entire 240 * user-accessible stack buffer, we size/align to match. The privilege 241 * mode stack is generated elsewhere in memory. 242 */ 243 #define ARCH_THREAD_STACK_OBJ_ALIGN(size) Z_POW2_CEIL(size) 244 #define ARCH_THREAD_STACK_SIZE_ADJUST(size) Z_POW2_CEIL(size) 245 #else 246 #define ARCH_THREAD_STACK_OBJ_ALIGN(size) MAX(Z_THREAD_MIN_STACK_ALIGN, \ 247 Z_MPU_GUARD_ALIGN) 248 #ifdef CONFIG_USERSPACE 249 #define ARCH_THREAD_STACK_SIZE_ADJUST(size) \ 250 ROUND_UP(size, CONFIG_ARM_MPU_REGION_MIN_ALIGN_AND_SIZE) 251 #endif 252 #endif 253 254 #ifdef CONFIG_MPU_STACK_GUARD 255 /* Kernel-only stacks need an MPU guard region programmed at the beginning of 256 * the stack object, so align the object appropriately. 257 */ 258 #define ARCH_KERNEL_STACK_RESERVED MPU_GUARD_ALIGN_AND_SIZE 259 #define ARCH_KERNEL_STACK_OBJ_ALIGN Z_MPU_GUARD_ALIGN 260 #endif 261 262 /* On arm, all MPU guards are carve-outs. */ 263 #define ARCH_THREAD_STACK_RESERVED 0 264 265 /* Legacy case: retain containing extern "C" with C++ */ 266 #ifdef CONFIG_ARM_MPU 267 #ifdef CONFIG_CPU_HAS_ARM_MPU 268 #include <zephyr/arch/arm/mpu/arm_mpu.h> 269 #endif /* CONFIG_CPU_HAS_ARM_MPU */ 270 #ifdef CONFIG_CPU_HAS_NXP_MPU 271 #include <zephyr/arch/arm/mpu/nxp_mpu.h> 272 #endif /* CONFIG_CPU_HAS_NXP_MPU */ 273 #endif /* CONFIG_ARM_MPU */ 274 #ifdef CONFIG_ARM_AARCH32_MMU 275 #include <zephyr/arch/arm/mmu/arm_mmu.h> 276 #endif /* CONFIG_ARM_AARCH32_MMU */ 277 278 #ifdef __cplusplus 279 } 280 #endif 281 282 #endif /* ZEPHYR_INCLUDE_ARCH_ARM_AARCH32_ARCH_H_ */ 283