1 /*
2 * Copyright (c) 2009-2023 ARM Limited. All rights reserved.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 *
6 * Licensed under the Apache License, Version 2.0 (the License); you may
7 * not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an AS IS BASIS, WITHOUT
14 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 */
18
19 /*
20 * CMSIS Cortex-A Core Peripheral Access Layer Header File
21 */
22
23 #ifndef __CORE_CA_H_GENERIC
24 #define __CORE_CA_H_GENERIC
25
26 #if defined ( __ICCARM__ )
27 #pragma system_include /* treat file as system include file for MISRA check */
28 #elif defined (__clang__)
29 #pragma clang system_header /* treat file as system include file */
30 #endif
31
32 #ifdef __cplusplus
33 extern "C" {
34 #endif
35
36 /*******************************************************************************
37 * CMSIS definitions
38 ******************************************************************************/
39
40 #include "cmsis_version.h"
41
42 /* CMSIS CA definitions */
43
44 #if defined ( __CC_ARM )
45 #if defined (__TARGET_FPU_VFP)
46 #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)
47 #define __FPU_USED 1U
48 #else
49 #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
50 #define __FPU_USED 0U
51 #endif
52 #else
53 #define __FPU_USED 0U
54 #endif
55
56 #elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050)
57 #if defined (__ARM_FP)
58 #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)
59 #define __FPU_USED 1U
60 #else
61 #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
62 #define __FPU_USED 0U
63 #endif
64 #else
65 #define __FPU_USED 0U
66 #endif
67
68 #elif defined ( __ICCARM__ )
69 #if defined (__ARMVFP__)
70 #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)
71 #define __FPU_USED 1U
72 #else
73 #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
74 #define __FPU_USED 0U
75 #endif
76 #else
77 #define __FPU_USED 0U
78 #endif
79
80 #elif defined ( __TMS470__ )
81 #if defined __TI_VFP_SUPPORT__
82 #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)
83 #define __FPU_USED 1U
84 #else
85 #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
86 #define __FPU_USED 0U
87 #endif
88 #else
89 #define __FPU_USED 0U
90 #endif
91
92 #elif defined ( __GNUC__ )
93 #if defined (__VFP_FP__) && !defined(__SOFTFP__)
94 #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)
95 #define __FPU_USED 1U
96 #else
97 #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
98 #define __FPU_USED 0U
99 #endif
100 #else
101 #define __FPU_USED 0U
102 #endif
103
104 #elif defined ( __TASKING__ )
105 #if defined (__FPU_VFP__)
106 #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)
107 #define __FPU_USED 1U
108 #else
109 #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
110 #define __FPU_USED 0U
111 #endif
112 #else
113 #define __FPU_USED 0U
114 #endif
115 #endif
116
117 #include "cmsis_compiler.h" /* CMSIS compiler specific defines */
118
119 #ifdef __cplusplus
120 }
121 #endif
122
123 #endif /* __CORE_CA_H_GENERIC */
124
125 #ifndef __CMSIS_GENERIC
126
127 #ifndef __CORE_CA_H_DEPENDANT
128 #define __CORE_CA_H_DEPENDANT
129
130 #if defined ( __ICCARM__ )
131 #pragma system_include /* treat file as system include file for MISRA check */
132 #elif defined (__clang__)
133 #pragma clang system_header /* treat file as system include file */
134 #endif
135
136 #ifdef __cplusplus
137 extern "C" {
138 #endif
139
140 /* check device defines and use defaults */
141 #if defined __CHECK_DEVICE_DEFINES
142 #ifndef __CA_REV
143 #define __CA_REV 0x0000U /*!< \brief Contains the core revision for a Cortex-A class device */
144 #warning "__CA_REV not defined in device header file; using default!"
145 #endif
146
147 #ifndef __FPU_PRESENT
148 #define __FPU_PRESENT 0U
149 #warning "__FPU_PRESENT not defined in device header file; using default!"
150 #endif
151
152 #ifndef __GIC_PRESENT
153 #define __GIC_PRESENT 1U
154 #warning "__GIC_PRESENT not defined in device header file; using default!"
155 #endif
156
157 #ifndef __TIM_PRESENT
158 #define __TIM_PRESENT 1U
159 #warning "__TIM_PRESENT not defined in device header file; using default!"
160 #endif
161
162 #ifndef __L2C_PRESENT
163 #define __L2C_PRESENT 0U
164 #warning "__L2C_PRESENT not defined in device header file; using default!"
165 #endif
166 #endif
167
168 /* IO definitions (access restrictions to peripheral registers) */
169 #ifdef __cplusplus
170 #define __I volatile /*!< \brief Defines 'read only' permissions */
171 #else
172 #define __I volatile const /*!< \brief Defines 'read only' permissions */
173 #endif
174 #define __O volatile /*!< \brief Defines 'write only' permissions */
175 #define __IO volatile /*!< \brief Defines 'read / write' permissions */
176
177 /* following defines should be used for structure members */
178 #define __IM volatile const /*!< \brief Defines 'read only' structure member permissions */
179 #define __OM volatile /*!< \brief Defines 'write only' structure member permissions */
180 #define __IOM volatile /*!< \brief Defines 'read / write' structure member permissions */
181 #define RESERVED(N, T) T RESERVED##N; // placeholder struct members used for "reserved" areas
182
183 /*******************************************************************************
184 * Register Abstraction
185 Core Register contain:
186 - CPSR
187 - CP15 Registers
188 - L2C-310 Cache Controller
189 - Generic Interrupt Controller Distributor
190 - Generic Interrupt Controller Interface
191 ******************************************************************************/
192
193 /* Core Register CPSR */
194 typedef union
195 {
196 struct
197 {
198 uint32_t M:5; /*!< \brief bit: 0.. 4 Mode field */
199 uint32_t T:1; /*!< \brief bit: 5 Thumb execution state bit */
200 uint32_t F:1; /*!< \brief bit: 6 FIQ mask bit */
201 uint32_t I:1; /*!< \brief bit: 7 IRQ mask bit */
202 uint32_t A:1; /*!< \brief bit: 8 Asynchronous abort mask bit */
203 uint32_t E:1; /*!< \brief bit: 9 Endianness execution state bit */
204 uint32_t IT1:6; /*!< \brief bit: 10..15 If-Then execution state bits 2-7 */
205 uint32_t GE:4; /*!< \brief bit: 16..19 Greater than or Equal flags */
206 RESERVED(0:4, uint32_t)
207 uint32_t J:1; /*!< \brief bit: 24 Jazelle bit */
208 uint32_t IT0:2; /*!< \brief bit: 25..26 If-Then execution state bits 0-1 */
209 uint32_t Q:1; /*!< \brief bit: 27 Saturation condition flag */
210 uint32_t V:1; /*!< \brief bit: 28 Overflow condition code flag */
211 uint32_t C:1; /*!< \brief bit: 29 Carry condition code flag */
212 uint32_t Z:1; /*!< \brief bit: 30 Zero condition code flag */
213 uint32_t N:1; /*!< \brief bit: 31 Negative condition code flag */
214 } b; /*!< \brief Structure used for bit access */
215 uint32_t w; /*!< \brief Type used for word access */
216 } CPSR_Type;
217
218
219
220 /* CPSR Register Definitions */
221 #define CPSR_N_Pos 31U /*!< \brief CPSR: N Position */
222 #define CPSR_N_Msk (1UL << CPSR_N_Pos) /*!< \brief CPSR: N Mask */
223
224 #define CPSR_Z_Pos 30U /*!< \brief CPSR: Z Position */
225 #define CPSR_Z_Msk (1UL << CPSR_Z_Pos) /*!< \brief CPSR: Z Mask */
226
227 #define CPSR_C_Pos 29U /*!< \brief CPSR: C Position */
228 #define CPSR_C_Msk (1UL << CPSR_C_Pos) /*!< \brief CPSR: C Mask */
229
230 #define CPSR_V_Pos 28U /*!< \brief CPSR: V Position */
231 #define CPSR_V_Msk (1UL << CPSR_V_Pos) /*!< \brief CPSR: V Mask */
232
233 #define CPSR_Q_Pos 27U /*!< \brief CPSR: Q Position */
234 #define CPSR_Q_Msk (1UL << CPSR_Q_Pos) /*!< \brief CPSR: Q Mask */
235
236 #define CPSR_IT0_Pos 25U /*!< \brief CPSR: IT0 Position */
237 #define CPSR_IT0_Msk (3UL << CPSR_IT0_Pos) /*!< \brief CPSR: IT0 Mask */
238
239 #define CPSR_J_Pos 24U /*!< \brief CPSR: J Position */
240 #define CPSR_J_Msk (1UL << CPSR_J_Pos) /*!< \brief CPSR: J Mask */
241
242 #define CPSR_GE_Pos 16U /*!< \brief CPSR: GE Position */
243 #define CPSR_GE_Msk (0xFUL << CPSR_GE_Pos) /*!< \brief CPSR: GE Mask */
244
245 #define CPSR_IT1_Pos 10U /*!< \brief CPSR: IT1 Position */
246 #define CPSR_IT1_Msk (0x3FUL << CPSR_IT1_Pos) /*!< \brief CPSR: IT1 Mask */
247
248 #define CPSR_E_Pos 9U /*!< \brief CPSR: E Position */
249 #define CPSR_E_Msk (1UL << CPSR_E_Pos) /*!< \brief CPSR: E Mask */
250
251 #define CPSR_A_Pos 8U /*!< \brief CPSR: A Position */
252 #define CPSR_A_Msk (1UL << CPSR_A_Pos) /*!< \brief CPSR: A Mask */
253
254 #define CPSR_I_Pos 7U /*!< \brief CPSR: I Position */
255 #define CPSR_I_Msk (1UL << CPSR_I_Pos) /*!< \brief CPSR: I Mask */
256
257 #define CPSR_F_Pos 6U /*!< \brief CPSR: F Position */
258 #define CPSR_F_Msk (1UL << CPSR_F_Pos) /*!< \brief CPSR: F Mask */
259
260 #define CPSR_T_Pos 5U /*!< \brief CPSR: T Position */
261 #define CPSR_T_Msk (1UL << CPSR_T_Pos) /*!< \brief CPSR: T Mask */
262
263 #define CPSR_M_Pos 0U /*!< \brief CPSR: M Position */
264 #define CPSR_M_Msk (0x1FUL << CPSR_M_Pos) /*!< \brief CPSR: M Mask */
265
266 #define CPSR_M_USR 0x10U /*!< \brief CPSR: M User mode (PL0) */
267 #define CPSR_M_FIQ 0x11U /*!< \brief CPSR: M Fast Interrupt mode (PL1) */
268 #define CPSR_M_IRQ 0x12U /*!< \brief CPSR: M Interrupt mode (PL1) */
269 #define CPSR_M_SVC 0x13U /*!< \brief CPSR: M Supervisor mode (PL1) */
270 #define CPSR_M_MON 0x16U /*!< \brief CPSR: M Monitor mode (PL1) */
271 #define CPSR_M_ABT 0x17U /*!< \brief CPSR: M Abort mode (PL1) */
272 #define CPSR_M_HYP 0x1AU /*!< \brief CPSR: M Hypervisor mode (PL2) */
273 #define CPSR_M_UND 0x1BU /*!< \brief CPSR: M Undefined mode (PL1) */
274 #define CPSR_M_SYS 0x1FU /*!< \brief CPSR: M System mode (PL1) */
275
276 /* CP15 Register SCTLR */
277 typedef union
278 {
279 struct
280 {
281 uint32_t M:1; /*!< \brief bit: 0 MMU enable */
282 uint32_t A:1; /*!< \brief bit: 1 Alignment check enable */
283 uint32_t C:1; /*!< \brief bit: 2 Cache enable */
284 RESERVED(0:2, uint32_t)
285 uint32_t CP15BEN:1; /*!< \brief bit: 5 CP15 barrier enable */
286 RESERVED(1:1, uint32_t)
287 uint32_t B:1; /*!< \brief bit: 7 Endianness model */
288 RESERVED(2:2, uint32_t)
289 uint32_t SW:1; /*!< \brief bit: 10 SWP and SWPB enable */
290 uint32_t Z:1; /*!< \brief bit: 11 Branch prediction enable */
291 uint32_t I:1; /*!< \brief bit: 12 Instruction cache enable */
292 uint32_t V:1; /*!< \brief bit: 13 Vectors bit */
293 uint32_t RR:1; /*!< \brief bit: 14 Round Robin select */
294 RESERVED(3:2, uint32_t)
295 uint32_t HA:1; /*!< \brief bit: 17 Hardware Access flag enable */
296 RESERVED(4:1, uint32_t)
297 uint32_t WXN:1; /*!< \brief bit: 19 Write permission implies XN */
298 uint32_t UWXN:1; /*!< \brief bit: 20 Unprivileged write permission implies PL1 XN */
299 uint32_t FI:1; /*!< \brief bit: 21 Fast interrupts configuration enable */
300 uint32_t U:1; /*!< \brief bit: 22 Alignment model */
301 RESERVED(5:1, uint32_t)
302 uint32_t VE:1; /*!< \brief bit: 24 Interrupt Vectors Enable */
303 uint32_t EE:1; /*!< \brief bit: 25 Exception Endianness */
304 RESERVED(6:1, uint32_t)
305 uint32_t NMFI:1; /*!< \brief bit: 27 Non-maskable FIQ (NMFI) support */
306 uint32_t TRE:1; /*!< \brief bit: 28 TEX remap enable. */
307 uint32_t AFE:1; /*!< \brief bit: 29 Access flag enable */
308 uint32_t TE:1; /*!< \brief bit: 30 Thumb Exception enable */
309 RESERVED(7:1, uint32_t)
310 } b; /*!< \brief Structure used for bit access */
311 uint32_t w; /*!< \brief Type used for word access */
312 } SCTLR_Type;
313
314 #define SCTLR_TE_Pos 30U /*!< \brief SCTLR: TE Position */
315 #define SCTLR_TE_Msk (1UL << SCTLR_TE_Pos) /*!< \brief SCTLR: TE Mask */
316
317 #define SCTLR_AFE_Pos 29U /*!< \brief SCTLR: AFE Position */
318 #define SCTLR_AFE_Msk (1UL << SCTLR_AFE_Pos) /*!< \brief SCTLR: AFE Mask */
319
320 #define SCTLR_TRE_Pos 28U /*!< \brief SCTLR: TRE Position */
321 #define SCTLR_TRE_Msk (1UL << SCTLR_TRE_Pos) /*!< \brief SCTLR: TRE Mask */
322
323 #define SCTLR_NMFI_Pos 27U /*!< \brief SCTLR: NMFI Position */
324 #define SCTLR_NMFI_Msk (1UL << SCTLR_NMFI_Pos) /*!< \brief SCTLR: NMFI Mask */
325
326 #define SCTLR_EE_Pos 25U /*!< \brief SCTLR: EE Position */
327 #define SCTLR_EE_Msk (1UL << SCTLR_EE_Pos) /*!< \brief SCTLR: EE Mask */
328
329 #define SCTLR_VE_Pos 24U /*!< \brief SCTLR: VE Position */
330 #define SCTLR_VE_Msk (1UL << SCTLR_VE_Pos) /*!< \brief SCTLR: VE Mask */
331
332 #define SCTLR_U_Pos 22U /*!< \brief SCTLR: U Position */
333 #define SCTLR_U_Msk (1UL << SCTLR_U_Pos) /*!< \brief SCTLR: U Mask */
334
335 #define SCTLR_FI_Pos 21U /*!< \brief SCTLR: FI Position */
336 #define SCTLR_FI_Msk (1UL << SCTLR_FI_Pos) /*!< \brief SCTLR: FI Mask */
337
338 #define SCTLR_UWXN_Pos 20U /*!< \brief SCTLR: UWXN Position */
339 #define SCTLR_UWXN_Msk (1UL << SCTLR_UWXN_Pos) /*!< \brief SCTLR: UWXN Mask */
340
341 #define SCTLR_WXN_Pos 19U /*!< \brief SCTLR: WXN Position */
342 #define SCTLR_WXN_Msk (1UL << SCTLR_WXN_Pos) /*!< \brief SCTLR: WXN Mask */
343
344 #define SCTLR_HA_Pos 17U /*!< \brief SCTLR: HA Position */
345 #define SCTLR_HA_Msk (1UL << SCTLR_HA_Pos) /*!< \brief SCTLR: HA Mask */
346
347 #define SCTLR_RR_Pos 14U /*!< \brief SCTLR: RR Position */
348 #define SCTLR_RR_Msk (1UL << SCTLR_RR_Pos) /*!< \brief SCTLR: RR Mask */
349
350 #define SCTLR_V_Pos 13U /*!< \brief SCTLR: V Position */
351 #define SCTLR_V_Msk (1UL << SCTLR_V_Pos) /*!< \brief SCTLR: V Mask */
352
353 #define SCTLR_I_Pos 12U /*!< \brief SCTLR: I Position */
354 #define SCTLR_I_Msk (1UL << SCTLR_I_Pos) /*!< \brief SCTLR: I Mask */
355
356 #define SCTLR_Z_Pos 11U /*!< \brief SCTLR: Z Position */
357 #define SCTLR_Z_Msk (1UL << SCTLR_Z_Pos) /*!< \brief SCTLR: Z Mask */
358
359 #define SCTLR_SW_Pos 10U /*!< \brief SCTLR: SW Position */
360 #define SCTLR_SW_Msk (1UL << SCTLR_SW_Pos) /*!< \brief SCTLR: SW Mask */
361
362 #define SCTLR_B_Pos 7U /*!< \brief SCTLR: B Position */
363 #define SCTLR_B_Msk (1UL << SCTLR_B_Pos) /*!< \brief SCTLR: B Mask */
364
365 #define SCTLR_CP15BEN_Pos 5U /*!< \brief SCTLR: CP15BEN Position */
366 #define SCTLR_CP15BEN_Msk (1UL << SCTLR_CP15BEN_Pos) /*!< \brief SCTLR: CP15BEN Mask */
367
368 #define SCTLR_C_Pos 2U /*!< \brief SCTLR: C Position */
369 #define SCTLR_C_Msk (1UL << SCTLR_C_Pos) /*!< \brief SCTLR: C Mask */
370
371 #define SCTLR_A_Pos 1U /*!< \brief SCTLR: A Position */
372 #define SCTLR_A_Msk (1UL << SCTLR_A_Pos) /*!< \brief SCTLR: A Mask */
373
374 #define SCTLR_M_Pos 0U /*!< \brief SCTLR: M Position */
375 #define SCTLR_M_Msk (1UL << SCTLR_M_Pos) /*!< \brief SCTLR: M Mask */
376
377 /* CP15 Register ACTLR */
378 typedef union
379 {
380 #if __CORTEX_A == 5 || defined(DOXYGEN)
381 /** \brief Structure used for bit access on Cortex-A5 */
382 struct
383 {
384 uint32_t FW:1; /*!< \brief bit: 0 Cache and TLB maintenance broadcast */
385 RESERVED(0:5, uint32_t)
386 uint32_t SMP:1; /*!< \brief bit: 6 Enables coherent requests to the processor */
387 uint32_t EXCL:1; /*!< \brief bit: 7 Exclusive L1/L2 cache control */
388 RESERVED(1:2, uint32_t)
389 uint32_t DODMBS:1; /*!< \brief bit: 10 Disable optimized data memory barrier behavior */
390 uint32_t DWBST:1; /*!< \brief bit: 11 AXI data write bursts to Normal memory */
391 uint32_t RADIS:1; /*!< \brief bit: 12 L1 Data Cache read-allocate mode disable */
392 uint32_t L1PCTL:2; /*!< \brief bit:13..14 L1 Data prefetch control */
393 uint32_t BP:2; /*!< \brief bit:16..15 Branch prediction policy */
394 uint32_t RSDIS:1; /*!< \brief bit: 17 Disable return stack operation */
395 uint32_t BTDIS:1; /*!< \brief bit: 18 Disable indirect Branch Target Address Cache (BTAC) */
396 RESERVED(3:9, uint32_t)
397 uint32_t DBDI:1; /*!< \brief bit: 28 Disable branch dual issue */
398 RESERVED(7:3, uint32_t)
399 } b;
400 #endif
401 #if __CORTEX_A == 7 || defined(DOXYGEN)
402 /** \brief Structure used for bit access on Cortex-A7 */
403 struct
404 {
405 RESERVED(0:6, uint32_t)
406 uint32_t SMP:1; /*!< \brief bit: 6 Enables coherent requests to the processor */
407 RESERVED(1:3, uint32_t)
408 uint32_t DODMBS:1; /*!< \brief bit: 10 Disable optimized data memory barrier behavior */
409 uint32_t L2RADIS:1; /*!< \brief bit: 11 L2 Data Cache read-allocate mode disable */
410 uint32_t L1RADIS:1; /*!< \brief bit: 12 L1 Data Cache read-allocate mode disable */
411 uint32_t L1PCTL:2; /*!< \brief bit:13..14 L1 Data prefetch control */
412 uint32_t DDVM:1; /*!< \brief bit: 15 Disable Distributed Virtual Memory (DVM) transactions */
413 RESERVED(3:12, uint32_t)
414 uint32_t DDI:1; /*!< \brief bit: 28 Disable dual issue */
415 RESERVED(7:3, uint32_t)
416 } b;
417 #endif
418 #if __CORTEX_A == 9 || defined(DOXYGEN)
419 /** \brief Structure used for bit access on Cortex-A9 */
420 struct
421 {
422 uint32_t FW:1; /*!< \brief bit: 0 Cache and TLB maintenance broadcast */
423 RESERVED(0:1, uint32_t)
424 uint32_t L1PE:1; /*!< \brief bit: 2 Dside prefetch */
425 uint32_t WFLZM:1; /*!< \brief bit: 3 Cache and TLB maintenance broadcast */
426 RESERVED(1:2, uint32_t)
427 uint32_t SMP:1; /*!< \brief bit: 6 Enables coherent requests to the processor */
428 uint32_t EXCL:1; /*!< \brief bit: 7 Exclusive L1/L2 cache control */
429 uint32_t AOW:1; /*!< \brief bit: 8 Enable allocation in one cache way only */
430 uint32_t PARITY:1; /*!< \brief bit: 9 Support for parity checking, if implemented */
431 RESERVED(7:22, uint32_t)
432 } b;
433 #endif
434 uint32_t w; /*!< \brief Type used for word access */
435 } ACTLR_Type;
436
437 #define ACTLR_DDI_Pos 28U /*!< \brief ACTLR: DDI Position */
438 #define ACTLR_DDI_Msk (1UL << ACTLR_DDI_Pos) /*!< \brief ACTLR: DDI Mask */
439
440 #define ACTLR_DBDI_Pos 28U /*!< \brief ACTLR: DBDI Position */
441 #define ACTLR_DBDI_Msk (1UL << ACTLR_DBDI_Pos) /*!< \brief ACTLR: DBDI Mask */
442
443 #define ACTLR_BTDIS_Pos 18U /*!< \brief ACTLR: BTDIS Position */
444 #define ACTLR_BTDIS_Msk (1UL << ACTLR_BTDIS_Pos) /*!< \brief ACTLR: BTDIS Mask */
445
446 #define ACTLR_RSDIS_Pos 17U /*!< \brief ACTLR: RSDIS Position */
447 #define ACTLR_RSDIS_Msk (1UL << ACTLR_RSDIS_Pos) /*!< \brief ACTLR: RSDIS Mask */
448
449 #define ACTLR_BP_Pos 15U /*!< \brief ACTLR: BP Position */
450 #define ACTLR_BP_Msk (3UL << ACTLR_BP_Pos) /*!< \brief ACTLR: BP Mask */
451
452 #define ACTLR_DDVM_Pos 15U /*!< \brief ACTLR: DDVM Position */
453 #define ACTLR_DDVM_Msk (1UL << ACTLR_DDVM_Pos) /*!< \brief ACTLR: DDVM Mask */
454
455 #define ACTLR_L1PCTL_Pos 13U /*!< \brief ACTLR: L1PCTL Position */
456 #define ACTLR_L1PCTL_Msk (3UL << ACTLR_L1PCTL_Pos) /*!< \brief ACTLR: L1PCTL Mask */
457
458 #define ACTLR_RADIS_Pos 12U /*!< \brief ACTLR: RADIS Position */
459 #define ACTLR_RADIS_Msk (1UL << ACTLR_RADIS_Pos) /*!< \brief ACTLR: RADIS Mask */
460
461 #define ACTLR_L1RADIS_Pos 12U /*!< \brief ACTLR: L1RADIS Position */
462 #define ACTLR_L1RADIS_Msk (1UL << ACTLR_L1RADIS_Pos) /*!< \brief ACTLR: L1RADIS Mask */
463
464 #define ACTLR_DWBST_Pos 11U /*!< \brief ACTLR: DWBST Position */
465 #define ACTLR_DWBST_Msk (1UL << ACTLR_DWBST_Pos) /*!< \brief ACTLR: DWBST Mask */
466
467 #define ACTLR_L2RADIS_Pos 11U /*!< \brief ACTLR: L2RADIS Position */
468 #define ACTLR_L2RADIS_Msk (1UL << ACTLR_L2RADIS_Pos) /*!< \brief ACTLR: L2RADIS Mask */
469
470 #define ACTLR_DODMBS_Pos 10U /*!< \brief ACTLR: DODMBS Position */
471 #define ACTLR_DODMBS_Msk (1UL << ACTLR_DODMBS_Pos) /*!< \brief ACTLR: DODMBS Mask */
472
473 #define ACTLR_PARITY_Pos 9U /*!< \brief ACTLR: PARITY Position */
474 #define ACTLR_PARITY_Msk (1UL << ACTLR_PARITY_Pos) /*!< \brief ACTLR: PARITY Mask */
475
476 #define ACTLR_AOW_Pos 8U /*!< \brief ACTLR: AOW Position */
477 #define ACTLR_AOW_Msk (1UL << ACTLR_AOW_Pos) /*!< \brief ACTLR: AOW Mask */
478
479 #define ACTLR_EXCL_Pos 7U /*!< \brief ACTLR: EXCL Position */
480 #define ACTLR_EXCL_Msk (1UL << ACTLR_EXCL_Pos) /*!< \brief ACTLR: EXCL Mask */
481
482 #define ACTLR_SMP_Pos 6U /*!< \brief ACTLR: SMP Position */
483 #define ACTLR_SMP_Msk (1UL << ACTLR_SMP_Pos) /*!< \brief ACTLR: SMP Mask */
484
485 #define ACTLR_WFLZM_Pos 3U /*!< \brief ACTLR: WFLZM Position */
486 #define ACTLR_WFLZM_Msk (1UL << ACTLR_WFLZM_Pos) /*!< \brief ACTLR: WFLZM Mask */
487
488 #define ACTLR_L1PE_Pos 2U /*!< \brief ACTLR: L1PE Position */
489 #define ACTLR_L1PE_Msk (1UL << ACTLR_L1PE_Pos) /*!< \brief ACTLR: L1PE Mask */
490
491 #define ACTLR_FW_Pos 0U /*!< \brief ACTLR: FW Position */
492 #define ACTLR_FW_Msk (1UL << ACTLR_FW_Pos) /*!< \brief ACTLR: FW Mask */
493
494 /* CP15 Register CPACR */
495 typedef union
496 {
497 struct
498 {
499 uint32_t CP0:2; /*!< \brief bit: 0..1 Access rights for coprocessor 0 */
500 uint32_t CP1:2; /*!< \brief bit: 2..3 Access rights for coprocessor 1 */
501 uint32_t CP2:2; /*!< \brief bit: 4..5 Access rights for coprocessor 2 */
502 uint32_t CP3:2; /*!< \brief bit: 6..7 Access rights for coprocessor 3 */
503 uint32_t CP4:2; /*!< \brief bit: 8..9 Access rights for coprocessor 4 */
504 uint32_t CP5:2; /*!< \brief bit:10..11 Access rights for coprocessor 5 */
505 uint32_t CP6:2; /*!< \brief bit:12..13 Access rights for coprocessor 6 */
506 uint32_t CP7:2; /*!< \brief bit:14..15 Access rights for coprocessor 7 */
507 uint32_t CP8:2; /*!< \brief bit:16..17 Access rights for coprocessor 8 */
508 uint32_t CP9:2; /*!< \brief bit:18..19 Access rights for coprocessor 9 */
509 uint32_t CP10:2; /*!< \brief bit:20..21 Access rights for coprocessor 10 */
510 uint32_t CP11:2; /*!< \brief bit:22..23 Access rights for coprocessor 11 */
511 uint32_t CP12:2; /*!< \brief bit:24..25 Access rights for coprocessor 11 */
512 uint32_t CP13:2; /*!< \brief bit:26..27 Access rights for coprocessor 11 */
513 uint32_t TRCDIS:1; /*!< \brief bit: 28 Disable CP14 access to trace registers */
514 RESERVED(0:1, uint32_t)
515 uint32_t D32DIS:1; /*!< \brief bit: 30 Disable use of registers D16-D31 of the VFP register file */
516 uint32_t ASEDIS:1; /*!< \brief bit: 31 Disable Advanced SIMD Functionality */
517 } b; /*!< \brief Structure used for bit access */
518 uint32_t w; /*!< \brief Type used for word access */
519 } CPACR_Type;
520
521 #define CPACR_ASEDIS_Pos 31U /*!< \brief CPACR: ASEDIS Position */
522 #define CPACR_ASEDIS_Msk (1UL << CPACR_ASEDIS_Pos) /*!< \brief CPACR: ASEDIS Mask */
523
524 #define CPACR_D32DIS_Pos 30U /*!< \brief CPACR: D32DIS Position */
525 #define CPACR_D32DIS_Msk (1UL << CPACR_D32DIS_Pos) /*!< \brief CPACR: D32DIS Mask */
526
527 #define CPACR_TRCDIS_Pos 28U /*!< \brief CPACR: D32DIS Position */
528 #define CPACR_TRCDIS_Msk (1UL << CPACR_D32DIS_Pos) /*!< \brief CPACR: D32DIS Mask */
529
530 #define CPACR_CP_Pos_(n) (n*2U) /*!< \brief CPACR: CPn Position */
531 #define CPACR_CP_Msk_(n) (3UL << CPACR_CP_Pos_(n)) /*!< \brief CPACR: CPn Mask */
532
533 #define CPACR_CP_NA 0U /*!< \brief CPACR CPn field: Access denied. */
534 #define CPACR_CP_PL1 1U /*!< \brief CPACR CPn field: Accessible from PL1 only. */
535 #define CPACR_CP_FA 3U /*!< \brief CPACR CPn field: Full access. */
536
537 /* CP15 Register DFSR */
538 typedef union
539 {
540 struct
541 {
542 uint32_t FS0:4; /*!< \brief bit: 0.. 3 Fault Status bits bit 0-3 */
543 uint32_t Domain:4; /*!< \brief bit: 4.. 7 Fault on which domain */
544 RESERVED(0:1, uint32_t)
545 uint32_t LPAE:1; /*!< \brief bit: 9 Large Physical Address Extension */
546 uint32_t FS1:1; /*!< \brief bit: 10 Fault Status bits bit 4 */
547 uint32_t WnR:1; /*!< \brief bit: 11 Write not Read bit */
548 uint32_t ExT:1; /*!< \brief bit: 12 External abort type */
549 uint32_t CM:1; /*!< \brief bit: 13 Cache maintenance fault */
550 RESERVED(1:18, uint32_t)
551 } s; /*!< \brief Structure used for bit access in short format */
552 struct
553 {
554 uint32_t STATUS:5; /*!< \brief bit: 0.. 5 Fault Status bits */
555 RESERVED(0:3, uint32_t)
556 uint32_t LPAE:1; /*!< \brief bit: 9 Large Physical Address Extension */
557 RESERVED(1:1, uint32_t)
558 uint32_t WnR:1; /*!< \brief bit: 11 Write not Read bit */
559 uint32_t ExT:1; /*!< \brief bit: 12 External abort type */
560 uint32_t CM:1; /*!< \brief bit: 13 Cache maintenance fault */
561 RESERVED(2:18, uint32_t)
562 } l; /*!< \brief Structure used for bit access in long format */
563 uint32_t w; /*!< \brief Type used for word access */
564 } DFSR_Type;
565
566 #define DFSR_CM_Pos 13U /*!< \brief DFSR: CM Position */
567 #define DFSR_CM_Msk (1UL << DFSR_CM_Pos) /*!< \brief DFSR: CM Mask */
568
569 #define DFSR_Ext_Pos 12U /*!< \brief DFSR: Ext Position */
570 #define DFSR_Ext_Msk (1UL << DFSR_Ext_Pos) /*!< \brief DFSR: Ext Mask */
571
572 #define DFSR_WnR_Pos 11U /*!< \brief DFSR: WnR Position */
573 #define DFSR_WnR_Msk (1UL << DFSR_WnR_Pos) /*!< \brief DFSR: WnR Mask */
574
575 #define DFSR_FS1_Pos 10U /*!< \brief DFSR: FS1 Position */
576 #define DFSR_FS1_Msk (1UL << DFSR_FS1_Pos) /*!< \brief DFSR: FS1 Mask */
577
578 #define DFSR_LPAE_Pos 9U /*!< \brief DFSR: LPAE Position */
579 #define DFSR_LPAE_Msk (1UL << DFSR_LPAE_Pos) /*!< \brief DFSR: LPAE Mask */
580
581 #define DFSR_Domain_Pos 4U /*!< \brief DFSR: Domain Position */
582 #define DFSR_Domain_Msk (0xFUL << DFSR_Domain_Pos) /*!< \brief DFSR: Domain Mask */
583
584 #define DFSR_FS0_Pos 0U /*!< \brief DFSR: FS0 Position */
585 #define DFSR_FS0_Msk (0xFUL << DFSR_FS0_Pos) /*!< \brief DFSR: FS0 Mask */
586
587 #define DFSR_STATUS_Pos 0U /*!< \brief DFSR: STATUS Position */
588 #define DFSR_STATUS_Msk (0x3FUL << DFSR_STATUS_Pos) /*!< \brief DFSR: STATUS Mask */
589
590 /* CP15 Register IFSR */
591 typedef union
592 {
593 struct
594 {
595 uint32_t FS0:4; /*!< \brief bit: 0.. 3 Fault Status bits bit 0-3 */
596 RESERVED(0:5, uint32_t)
597 uint32_t LPAE:1; /*!< \brief bit: 9 Large Physical Address Extension */
598 uint32_t FS1:1; /*!< \brief bit: 10 Fault Status bits bit 4 */
599 RESERVED(1:1, uint32_t)
600 uint32_t ExT:1; /*!< \brief bit: 12 External abort type */
601 RESERVED(2:19, uint32_t)
602 } s; /*!< \brief Structure used for bit access in short format */
603 struct
604 {
605 uint32_t STATUS:6; /*!< \brief bit: 0.. 5 Fault Status bits */
606 RESERVED(0:3, uint32_t)
607 uint32_t LPAE:1; /*!< \brief bit: 9 Large Physical Address Extension */
608 RESERVED(1:2, uint32_t)
609 uint32_t ExT:1; /*!< \brief bit: 12 External abort type */
610 RESERVED(2:19, uint32_t)
611 } l; /*!< \brief Structure used for bit access in long format */
612 uint32_t w; /*!< \brief Type used for word access */
613 } IFSR_Type;
614
615 #define IFSR_ExT_Pos 12U /*!< \brief IFSR: ExT Position */
616 #define IFSR_ExT_Msk (1UL << IFSR_ExT_Pos) /*!< \brief IFSR: ExT Mask */
617
618 #define IFSR_FS1_Pos 10U /*!< \brief IFSR: FS1 Position */
619 #define IFSR_FS1_Msk (1UL << IFSR_FS1_Pos) /*!< \brief IFSR: FS1 Mask */
620
621 #define IFSR_LPAE_Pos 9U /*!< \brief IFSR: LPAE Position */
622 #define IFSR_LPAE_Msk (0x1UL << IFSR_LPAE_Pos) /*!< \brief IFSR: LPAE Mask */
623
624 #define IFSR_FS0_Pos 0U /*!< \brief IFSR: FS0 Position */
625 #define IFSR_FS0_Msk (0xFUL << IFSR_FS0_Pos) /*!< \brief IFSR: FS0 Mask */
626
627 #define IFSR_STATUS_Pos 0U /*!< \brief IFSR: STATUS Position */
628 #define IFSR_STATUS_Msk (0x3FUL << IFSR_STATUS_Pos) /*!< \brief IFSR: STATUS Mask */
629
630 /* CP15 Register ISR */
631 typedef union
632 {
633 struct
634 {
635 RESERVED(0:6, uint32_t)
636 uint32_t F:1; /*!< \brief bit: 6 FIQ pending bit */
637 uint32_t I:1; /*!< \brief bit: 7 IRQ pending bit */
638 uint32_t A:1; /*!< \brief bit: 8 External abort pending bit */
639 RESERVED(1:23, uint32_t)
640 } b; /*!< \brief Structure used for bit access */
641 uint32_t w; /*!< \brief Type used for word access */
642 } ISR_Type;
643
644 #define ISR_A_Pos 13U /*!< \brief ISR: A Position */
645 #define ISR_A_Msk (1UL << ISR_A_Pos) /*!< \brief ISR: A Mask */
646
647 #define ISR_I_Pos 12U /*!< \brief ISR: I Position */
648 #define ISR_I_Msk (1UL << ISR_I_Pos) /*!< \brief ISR: I Mask */
649
650 #define ISR_F_Pos 11U /*!< \brief ISR: F Position */
651 #define ISR_F_Msk (1UL << ISR_F_Pos) /*!< \brief ISR: F Mask */
652
653 /* DACR Register */
654 #define DACR_D_Pos_(n) (2U*n) /*!< \brief DACR: Dn Position */
655 #define DACR_D_Msk_(n) (3UL << DACR_D_Pos_(n)) /*!< \brief DACR: Dn Mask */
656 #define DACR_Dn_NOACCESS 0U /*!< \brief DACR Dn field: No access */
657 #define DACR_Dn_CLIENT 1U /*!< \brief DACR Dn field: Client */
658 #define DACR_Dn_MANAGER 3U /*!< \brief DACR Dn field: Manager */
659
660 /**
661 \brief Mask and shift a bit field value for use in a register bit range.
662 \param [in] field Name of the register bit field.
663 \param [in] value Value of the bit field. This parameter is interpreted as an uint32_t type.
664 \return Masked and shifted value.
665 */
666 #define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk)
667
668 /**
669 \brief Mask and shift a register value to extract a bit filed value.
670 \param [in] field Name of the register bit field.
671 \param [in] value Value of register. This parameter is interpreted as an uint32_t type.
672 \return Masked and shifted bit field value.
673 */
674 #define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos)
675
676
677 /**
678 \brief Union type to access the L2C_310 Cache Controller.
679 */
680 #if (defined(__L2C_PRESENT) && (__L2C_PRESENT == 1U)) || \
681 defined(DOXYGEN)
682 typedef struct
683 {
684 __IM uint32_t CACHE_ID; /*!< \brief Offset: 0x0000 (R/ ) Cache ID Register */
685 __IM uint32_t CACHE_TYPE; /*!< \brief Offset: 0x0004 (R/ ) Cache Type Register */
686 RESERVED(0[0x3e], uint32_t)
687 __IOM uint32_t CONTROL; /*!< \brief Offset: 0x0100 (R/W) Control Register */
688 __IOM uint32_t AUX_CNT; /*!< \brief Offset: 0x0104 (R/W) Auxiliary Control */
689 RESERVED(1[0x3e], uint32_t)
690 __IOM uint32_t EVENT_CONTROL; /*!< \brief Offset: 0x0200 (R/W) Event Counter Control */
691 __IOM uint32_t EVENT_COUNTER1_CONF; /*!< \brief Offset: 0x0204 (R/W) Event Counter 1 Configuration */
692 __IOM uint32_t EVENT_COUNTER0_CONF; /*!< \brief Offset: 0x0208 (R/W) Event Counter 1 Configuration */
693 RESERVED(2[0x2], uint32_t)
694 __IOM uint32_t INTERRUPT_MASK; /*!< \brief Offset: 0x0214 (R/W) Interrupt Mask */
695 __IM uint32_t MASKED_INT_STATUS; /*!< \brief Offset: 0x0218 (R/ ) Masked Interrupt Status */
696 __IM uint32_t RAW_INT_STATUS; /*!< \brief Offset: 0x021c (R/ ) Raw Interrupt Status */
697 __OM uint32_t INTERRUPT_CLEAR; /*!< \brief Offset: 0x0220 ( /W) Interrupt Clear */
698 RESERVED(3[0x143], uint32_t)
699 __IOM uint32_t CACHE_SYNC; /*!< \brief Offset: 0x0730 (R/W) Cache Sync */
700 RESERVED(4[0xf], uint32_t)
701 __IOM uint32_t INV_LINE_PA; /*!< \brief Offset: 0x0770 (R/W) Invalidate Line By PA */
702 RESERVED(6[2], uint32_t)
703 __IOM uint32_t INV_WAY; /*!< \brief Offset: 0x077c (R/W) Invalidate by Way */
704 RESERVED(5[0xc], uint32_t)
705 __IOM uint32_t CLEAN_LINE_PA; /*!< \brief Offset: 0x07b0 (R/W) Clean Line by PA */
706 RESERVED(7[1], uint32_t)
707 __IOM uint32_t CLEAN_LINE_INDEX_WAY; /*!< \brief Offset: 0x07b8 (R/W) Clean Line by Index/Way */
708 __IOM uint32_t CLEAN_WAY; /*!< \brief Offset: 0x07bc (R/W) Clean by Way */
709 RESERVED(8[0xc], uint32_t)
710 __IOM uint32_t CLEAN_INV_LINE_PA; /*!< \brief Offset: 0x07f0 (R/W) Clean and Invalidate Line by PA */
711 RESERVED(9[1], uint32_t)
712 __IOM uint32_t CLEAN_INV_LINE_INDEX_WAY; /*!< \brief Offset: 0x07f8 (R/W) Clean and Invalidate Line by Index/Way */
713 __IOM uint32_t CLEAN_INV_WAY; /*!< \brief Offset: 0x07fc (R/W) Clean and Invalidate by Way */
714 RESERVED(10[0x40], uint32_t)
715 __IOM uint32_t DATA_LOCK_0_WAY; /*!< \brief Offset: 0x0900 (R/W) Data Lockdown 0 by Way */
716 __IOM uint32_t INST_LOCK_0_WAY; /*!< \brief Offset: 0x0904 (R/W) Instruction Lockdown 0 by Way */
717 __IOM uint32_t DATA_LOCK_1_WAY; /*!< \brief Offset: 0x0908 (R/W) Data Lockdown 1 by Way */
718 __IOM uint32_t INST_LOCK_1_WAY; /*!< \brief Offset: 0x090c (R/W) Instruction Lockdown 1 by Way */
719 __IOM uint32_t DATA_LOCK_2_WAY; /*!< \brief Offset: 0x0910 (R/W) Data Lockdown 2 by Way */
720 __IOM uint32_t INST_LOCK_2_WAY; /*!< \brief Offset: 0x0914 (R/W) Instruction Lockdown 2 by Way */
721 __IOM uint32_t DATA_LOCK_3_WAY; /*!< \brief Offset: 0x0918 (R/W) Data Lockdown 3 by Way */
722 __IOM uint32_t INST_LOCK_3_WAY; /*!< \brief Offset: 0x091c (R/W) Instruction Lockdown 3 by Way */
723 __IOM uint32_t DATA_LOCK_4_WAY; /*!< \brief Offset: 0x0920 (R/W) Data Lockdown 4 by Way */
724 __IOM uint32_t INST_LOCK_4_WAY; /*!< \brief Offset: 0x0924 (R/W) Instruction Lockdown 4 by Way */
725 __IOM uint32_t DATA_LOCK_5_WAY; /*!< \brief Offset: 0x0928 (R/W) Data Lockdown 5 by Way */
726 __IOM uint32_t INST_LOCK_5_WAY; /*!< \brief Offset: 0x092c (R/W) Instruction Lockdown 5 by Way */
727 __IOM uint32_t DATA_LOCK_6_WAY; /*!< \brief Offset: 0x0930 (R/W) Data Lockdown 5 by Way */
728 __IOM uint32_t INST_LOCK_6_WAY; /*!< \brief Offset: 0x0934 (R/W) Instruction Lockdown 5 by Way */
729 __IOM uint32_t DATA_LOCK_7_WAY; /*!< \brief Offset: 0x0938 (R/W) Data Lockdown 6 by Way */
730 __IOM uint32_t INST_LOCK_7_WAY; /*!< \brief Offset: 0x093c (R/W) Instruction Lockdown 6 by Way */
731 RESERVED(11[0x4], uint32_t)
732 __IOM uint32_t LOCK_LINE_EN; /*!< \brief Offset: 0x0950 (R/W) Lockdown by Line Enable */
733 __IOM uint32_t UNLOCK_ALL_BY_WAY; /*!< \brief Offset: 0x0954 (R/W) Unlock All Lines by Way */
734 RESERVED(12[0xaa], uint32_t)
735 __IOM uint32_t ADDRESS_FILTER_START; /*!< \brief Offset: 0x0c00 (R/W) Address Filtering Start */
736 __IOM uint32_t ADDRESS_FILTER_END; /*!< \brief Offset: 0x0c04 (R/W) Address Filtering End */
737 RESERVED(13[0xce], uint32_t)
738 __IOM uint32_t DEBUG_CONTROL; /*!< \brief Offset: 0x0f40 (R/W) Debug Control Register */
739 } L2C_310_TypeDef;
740
741 #define L2C_310 ((L2C_310_TypeDef *)L2C_310_BASE) /*!< \brief L2C_310 register set access pointer */
742 #endif
743
744 #if (defined(__GIC_PRESENT) && (__GIC_PRESENT == 1U)) || \
745 defined(DOXYGEN)
746
747 /** \brief Structure type to access the Generic Interrupt Controller Distributor (GICD)
748 */
749 typedef struct
750 {
751 __IOM uint32_t CTLR; /*!< \brief Offset: 0x000 (R/W) Distributor Control Register */
752 __IM uint32_t TYPER; /*!< \brief Offset: 0x004 (R/ ) Interrupt Controller Type Register */
753 __IM uint32_t IIDR; /*!< \brief Offset: 0x008 (R/ ) Distributor Implementer Identification Register */
754 RESERVED(0, uint32_t)
755 __IOM uint32_t STATUSR; /*!< \brief Offset: 0x010 (R/W) Error Reporting Status Register, optional */
756 RESERVED(1[11], uint32_t)
757 __OM uint32_t SETSPI_NSR; /*!< \brief Offset: 0x040 ( /W) Set SPI Register */
758 RESERVED(2, uint32_t)
759 __OM uint32_t CLRSPI_NSR; /*!< \brief Offset: 0x048 ( /W) Clear SPI Register */
760 RESERVED(3, uint32_t)
761 __OM uint32_t SETSPI_SR; /*!< \brief Offset: 0x050 ( /W) Set SPI, Secure Register */
762 RESERVED(4, uint32_t)
763 __OM uint32_t CLRSPI_SR; /*!< \brief Offset: 0x058 ( /W) Clear SPI, Secure Register */
764 RESERVED(5[9], uint32_t)
765 __IOM uint32_t IGROUPR[32]; /*!< \brief Offset: 0x080 (R/W) Interrupt Group Registers */
766 __IOM uint32_t ISENABLER[32]; /*!< \brief Offset: 0x100 (R/W) Interrupt Set-Enable Registers */
767 __IOM uint32_t ICENABLER[32]; /*!< \brief Offset: 0x180 (R/W) Interrupt Clear-Enable Registers */
768 __IOM uint32_t ISPENDR[32]; /*!< \brief Offset: 0x200 (R/W) Interrupt Set-Pending Registers */
769 __IOM uint32_t ICPENDR[32]; /*!< \brief Offset: 0x280 (R/W) Interrupt Clear-Pending Registers */
770 __IOM uint32_t ISACTIVER[32]; /*!< \brief Offset: 0x300 (R/W) Interrupt Set-Active Registers */
771 __IOM uint32_t ICACTIVER[32]; /*!< \brief Offset: 0x380 (R/W) Interrupt Clear-Active Registers */
772 __IOM uint32_t IPRIORITYR[255]; /*!< \brief Offset: 0x400 (R/W) Interrupt Priority Registers */
773 RESERVED(6, uint32_t)
774 __IOM uint32_t ITARGETSR[255]; /*!< \brief Offset: 0x800 (R/W) Interrupt Targets Registers */
775 RESERVED(7, uint32_t)
776 __IOM uint32_t ICFGR[64]; /*!< \brief Offset: 0xC00 (R/W) Interrupt Configuration Registers */
777 __IOM uint32_t IGRPMODR[32]; /*!< \brief Offset: 0xD00 (R/W) Interrupt Group Modifier Registers */
778 RESERVED(8[32], uint32_t)
779 __IOM uint32_t NSACR[64]; /*!< \brief Offset: 0xE00 (R/W) Non-secure Access Control Registers */
780 __OM uint32_t SGIR; /*!< \brief Offset: 0xF00 ( /W) Software Generated Interrupt Register */
781 RESERVED(9[3], uint32_t)
782 __IOM uint32_t CPENDSGIR[4]; /*!< \brief Offset: 0xF10 (R/W) SGI Clear-Pending Registers */
783 __IOM uint32_t SPENDSGIR[4]; /*!< \brief Offset: 0xF20 (R/W) SGI Set-Pending Registers */
784 RESERVED(10[5236], uint32_t)
785 __IOM uint64_t IROUTER[988]; /*!< \brief Offset: 0x6100(R/W) Interrupt Routing Registers */
786 } GICDistributor_Type;
787
788 #define GICDistributor ((GICDistributor_Type *) GIC_DISTRIBUTOR_BASE ) /*!< \brief GIC Distributor register set access pointer */
789
790 /* GICDistributor CTLR Register */
791 #define GICDistributor_CTLR_EnableGrp0_Pos 0U /*!< GICDistributor CTLR: EnableGrp0 Position */
792 #define GICDistributor_CTLR_EnableGrp0_Msk (0x1U /*<< GICDistributor_CTLR_EnableGrp0_Pos*/) /*!< GICDistributor CTLR: EnableGrp0 Mask */
793 #define GICDistributor_CTLR_EnableGrp0(x) (((uint32_t)(((uint32_t)(x)) /*<< GICDistributor_CTLR_EnableGrp0_Pos*/)) & GICDistributor_CTLR_EnableGrp0_Msk)
794
795 #define GICDistributor_CTLR_EnableGrp1_Pos 1U /*!< GICDistributor CTLR: EnableGrp1 Position */
796 #define GICDistributor_CTLR_EnableGrp1_Msk (0x1U << GICDistributor_CTLR_EnableGrp1_Pos) /*!< GICDistributor CTLR: EnableGrp1 Mask */
797 #define GICDistributor_CTLR_EnableGrp1(x) (((uint32_t)(((uint32_t)(x)) << GICDistributor_CTLR_EnableGrp1_Pos)) & GICDistributor_CTLR_EnableGrp1_Msk)
798
799 #define GICDistributor_CTLR_ARE_Pos 4U /*!< GICDistributor CTLR: ARE Position */
800 #define GICDistributor_CTLR_ARE_Msk (0x1U << GICDistributor_CTLR_ARE_Pos) /*!< GICDistributor CTLR: ARE Mask */
801 #define GICDistributor_CTLR_ARE(x) (((uint32_t)(((uint32_t)(x)) << GICDistributor_CTLR_ARE_Pos)) & GICDistributor_CTLR_ARE_Msk)
802
803 #define GICDistributor_CTLR_DC_Pos 6U /*!< GICDistributor CTLR: DC Position */
804 #define GICDistributor_CTLR_DC_Msk (0x1U << GICDistributor_CTLR_DC_Pos) /*!< GICDistributor CTLR: DC Mask */
805 #define GICDistributor_CTLR_DC(x) (((uint32_t)(((uint32_t)(x)) << GICDistributor_CTLR_DC_Pos)) & GICDistributor_CTLR_DC_Msk)
806
807 #define GICDistributor_CTLR_EINWF_Pos 7U /*!< GICDistributor CTLR: EINWF Position */
808 #define GICDistributor_CTLR_EINWF_Msk (0x1U << GICDistributor_CTLR_EINWF_Pos) /*!< GICDistributor CTLR: EINWF Mask */
809 #define GICDistributor_CTLR_EINWF(x) (((uint32_t)(((uint32_t)(x)) << GICDistributor_CTLR_EINWF_Pos)) & GICDistributor_CTLR_EINWF_Msk)
810
811 #define GICDistributor_CTLR_RWP_Pos 31U /*!< GICDistributor CTLR: RWP Position */
812 #define GICDistributor_CTLR_RWP_Msk (0x1U << GICDistributor_CTLR_RWP_Pos) /*!< GICDistributor CTLR: RWP Mask */
813 #define GICDistributor_CTLR_RWP(x) (((uint32_t)(((uint32_t)(x)) << GICDistributor_CTLR_RWP_Pos)) & GICDistributor_CTLR_RWP_Msk)
814
815 /* GICDistributor TYPER Register */
816 #define GICDistributor_TYPER_ITLinesNumber_Pos 0U /*!< GICDistributor TYPER: ITLinesNumber Position */
817 #define GICDistributor_TYPER_ITLinesNumber_Msk (0x1FU /*<< GICDistributor_TYPER_ITLinesNumber_Pos*/) /*!< GICDistributor TYPER: ITLinesNumber Mask */
818 #define GICDistributor_TYPER_ITLinesNumber(x) (((uint32_t)(((uint32_t)(x)) /*<< GICDistributor_TYPER_ITLinesNumber_Pos*/)) & GICDistributor_CTLR_ITLinesNumber_Msk)
819
820 #define GICDistributor_TYPER_CPUNumber_Pos 5U /*!< GICDistributor TYPER: CPUNumber Position */
821 #define GICDistributor_TYPER_CPUNumber_Msk (0x7U << GICDistributor_TYPER_CPUNumber_Pos) /*!< GICDistributor TYPER: CPUNumber Mask */
822 #define GICDistributor_TYPER_CPUNumber(x) (((uint32_t)(((uint32_t)(x)) << GICDistributor_TYPER_CPUNumber_Pos)) & GICDistributor_TYPER_CPUNumber_Msk)
823
824 #define GICDistributor_TYPER_SecurityExtn_Pos 10U /*!< GICDistributor TYPER: SecurityExtn Position */
825 #define GICDistributor_TYPER_SecurityExtn_Msk (0x1U << GICDistributor_TYPER_SecurityExtn_Pos) /*!< GICDistributor TYPER: SecurityExtn Mask */
826 #define GICDistributor_TYPER_SecurityExtn(x) (((uint32_t)(((uint32_t)(x)) << GICDistributor_TYPER_SecurityExtn_Pos)) & GICDistributor_TYPER_SecurityExtn_Msk)
827
828 #define GICDistributor_TYPER_LSPI_Pos 11U /*!< GICDistributor TYPER: LSPI Position */
829 #define GICDistributor_TYPER_LSPI_Msk (0x1FU << GICDistributor_TYPER_LSPI_Pos) /*!< GICDistributor TYPER: LSPI Mask */
830 #define GICDistributor_TYPER_LSPI(x) (((uint32_t)(((uint32_t)(x)) << GICDistributor_TYPER_LSPI_Pos)) & GICDistributor_TYPER_LSPI_Msk)
831
832 /* GICDistributor IIDR Register */
833 #define GICDistributor_IIDR_Implementer_Pos 0U /*!< GICDistributor IIDR: Implementer Position */
834 #define GICDistributor_IIDR_Implementer_Msk (0xFFFU /*<< GICDistributor_IIDR_Implementer_Pos*/) /*!< GICDistributor IIDR: Implementer Mask */
835 #define GICDistributor_IIDR_Implementer(x) (((uint32_t)(((uint32_t)(x)) /*<< GICDistributor_IIDR_Implementer_Pos*/)) & GICDistributor_IIDR_Implementer_Msk)
836
837 #define GICDistributor_IIDR_Revision_Pos 12U /*!< GICDistributor IIDR: Revision Position */
838 #define GICDistributor_IIDR_Revision_Msk (0xFU << GICDistributor_IIDR_Revision_Pos) /*!< GICDistributor IIDR: Revision Mask */
839 #define GICDistributor_IIDR_Revision(x) (((uint32_t)(((uint32_t)(x)) << GICDistributor_IIDR_Revision_Pos)) & GICDistributor_IIDR_Revision_Msk)
840
841 #define GICDistributor_IIDR_Variant_Pos 16U /*!< GICDistributor IIDR: Variant Position */
842 #define GICDistributor_IIDR_Variant_Msk (0xFU << GICDistributor_IIDR_Variant_Pos) /*!< GICDistributor IIDR: Variant Mask */
843 #define GICDistributor_IIDR_Variant(x) (((uint32_t)(((uint32_t)(x)) << GICDistributor_IIDR_Variant_Pos)) & GICDistributor_IIDR_Variant_Msk)
844
845 #define GICDistributor_IIDR_ProductID_Pos 24U /*!< GICDistributor IIDR: ProductID Position */
846 #define GICDistributor_IIDR_ProductID_Msk (0xFFU << GICDistributor_IIDR_ProductID_Pos) /*!< GICDistributor IIDR: ProductID Mask */
847 #define GICDistributor_IIDR_ProductID(x) (((uint32_t)(((uint32_t)(x)) << GICDistributor_IIDR_ProductID_Pos)) & GICDistributor_IIDR_ProductID_Msk)
848
849 /* GICDistributor STATUSR Register */
850 #define GICDistributor_STATUSR_RRD_Pos 0U /*!< GICDistributor STATUSR: RRD Position */
851 #define GICDistributor_STATUSR_RRD_Msk (0x1U /*<< GICDistributor_STATUSR_RRD_Pos*/) /*!< GICDistributor STATUSR: RRD Mask */
852 #define GICDistributor_STATUSR_RRD(x) (((uint32_t)(((uint32_t)(x)) /*<< GICDistributor_STATUSR_RRD_Pos*/)) & GICDistributor_STATUSR_RRD_Msk)
853
854 #define GICDistributor_STATUSR_WRD_Pos 1U /*!< GICDistributor STATUSR: WRD Position */
855 #define GICDistributor_STATUSR_WRD_Msk (0x1U << GICDistributor_STATUSR_WRD_Pos) /*!< GICDistributor STATUSR: WRD Mask */
856 #define GICDistributor_STATUSR_WRD(x) (((uint32_t)(((uint32_t)(x)) << GICDistributor_STATUSR_WRD_Pos)) & GICDistributor_STATUSR_WRD_Msk)
857
858 #define GICDistributor_STATUSR_RWOD_Pos 2U /*!< GICDistributor STATUSR: RWOD Position */
859 #define GICDistributor_STATUSR_RWOD_Msk (0x1U << GICDistributor_STATUSR_RWOD_Pos) /*!< GICDistributor STATUSR: RWOD Mask */
860 #define GICDistributor_STATUSR_RWOD(x) (((uint32_t)(((uint32_t)(x)) << GICDistributor_STATUSR_RWOD_Pos)) & GICDistributor_STATUSR_RWOD_Msk)
861
862 #define GICDistributor_STATUSR_WROD_Pos 3U /*!< GICDistributor STATUSR: WROD Position */
863 #define GICDistributor_STATUSR_WROD_Msk (0x1U << GICDistributor_STATUSR_WROD_Pos) /*!< GICDistributor STATUSR: WROD Mask */
864 #define GICDistributor_STATUSR_WROD(x) (((uint32_t)(((uint32_t)(x)) << GICDistributor_STATUSR_WROD_Pos)) & GICDistributor_STATUSR_WROD_Msk)
865
866 /* GICDistributor SETSPI_NSR Register */
867 #define GICDistributor_SETSPI_NSR_INTID_Pos 0U /*!< GICDistributor SETSPI_NSR: INTID Position */
868 #define GICDistributor_SETSPI_NSR_INTID_Msk (0x3FFU /*<< GICDistributor_SETSPI_NSR_INTID_Pos*/) /*!< GICDistributor SETSPI_NSR: INTID Mask */
869 #define GICDistributor_SETSPI_NSR_INTID(x) (((uint32_t)(((uint32_t)(x)) /*<< GICDistributor_SETSPI_NSR_INTID_Pos*/)) & GICDistributor_SETSPI_NSR_INTID_Msk)
870
871 /* GICDistributor CLRSPI_NSR Register */
872 #define GICDistributor_CLRSPI_NSR_INTID_Pos 0U /*!< GICDistributor CLRSPI_NSR: INTID Position */
873 #define GICDistributor_CLRSPI_NSR_INTID_Msk (0x3FFU /*<< GICDistributor_CLRSPI_NSR_INTID_Pos*/) /*!< GICDistributor CLRSPI_NSR: INTID Mask */
874 #define GICDistributor_CLRSPI_NSR_INTID(x) (((uint32_t)(((uint32_t)(x)) /*<< GICDistributor_CLRSPI_NSR_INTID_Pos*/)) & GICDistributor_CLRSPI_NSR_INTID_Msk)
875
876 /* GICDistributor SETSPI_SR Register */
877 #define GICDistributor_SETSPI_SR_INTID_Pos 0U /*!< GICDistributor SETSPI_SR: INTID Position */
878 #define GICDistributor_SETSPI_SR_INTID_Msk (0x3FFU /*<< GICDistributor_SETSPI_SR_INTID_Pos*/) /*!< GICDistributor SETSPI_SR: INTID Mask */
879 #define GICDistributor_SETSPI_SR_INTID(x) (((uint32_t)(((uint32_t)(x)) /*<< GICDistributor_SETSPI_SR_INTID_Pos*/)) & GICDistributor_SETSPI_SR_INTID_Msk)
880
881 /* GICDistributor CLRSPI_SR Register */
882 #define GICDistributor_CLRSPI_SR_INTID_Pos 0U /*!< GICDistributor CLRSPI_SR: INTID Position */
883 #define GICDistributor_CLRSPI_SR_INTID_Msk (0x3FFU /*<< GICDistributor_CLRSPI_SR_INTID_Pos*/) /*!< GICDistributor CLRSPI_SR: INTID Mask */
884 #define GICDistributor_CLRSPI_SR_INTID(x) (((uint32_t)(((uint32_t)(x)) /*<< GICDistributor_CLRSPI_SR_INTID_Pos*/)) & GICDistributor_CLRSPI_SR_INTID_Msk)
885
886 /* GICDistributor ITARGETSR Register */
887 #define GICDistributor_ITARGETSR_CPU0_Pos 0U /*!< GICDistributor ITARGETSR: CPU0 Position */
888 #define GICDistributor_ITARGETSR_CPU0_Msk (0x1U /*<< GICDistributor_ITARGETSR_CPU0_Pos*/) /*!< GICDistributor ITARGETSR: CPU0 Mask */
889 #define GICDistributor_ITARGETSR_CPU0(x) (((uint8_t)(((uint8_t)(x)) /*<< GICDistributor_ITARGETSR_CPU0_Pos*/)) & GICDistributor_ITARGETSR_CPU0_Msk)
890
891 #define GICDistributor_ITARGETSR_CPU1_Pos 1U /*!< GICDistributor ITARGETSR: CPU1 Position */
892 #define GICDistributor_ITARGETSR_CPU1_Msk (0x1U << GICDistributor_ITARGETSR_CPU1_Pos) /*!< GICDistributor ITARGETSR: CPU1 Mask */
893 #define GICDistributor_ITARGETSR_CPU1(x) (((uint8_t)(((uint8_t)(x)) << GICDistributor_ITARGETSR_CPU1_Pos)) & GICDistributor_ITARGETSR_CPU1_Msk)
894
895 #define GICDistributor_ITARGETSR_CPU2_Pos 2U /*!< GICDistributor ITARGETSR: CPU2 Position */
896 #define GICDistributor_ITARGETSR_CPU2_Msk (0x1U << GICDistributor_ITARGETSR_CPU2_Pos) /*!< GICDistributor ITARGETSR: CPU2 Mask */
897 #define GICDistributor_ITARGETSR_CPU2(x) (((uint8_t)(((uint8_t)(x)) << GICDistributor_ITARGETSR_CPU2_Pos)) & GICDistributor_ITARGETSR_CPU2_Msk)
898
899 #define GICDistributor_ITARGETSR_CPU3_Pos 3U /*!< GICDistributor ITARGETSR: CPU3 Position */
900 #define GICDistributor_ITARGETSR_CPU3_Msk (0x1U << GICDistributor_ITARGETSR_CPU3_Pos) /*!< GICDistributor ITARGETSR: CPU3 Mask */
901 #define GICDistributor_ITARGETSR_CPU3(x) (((uint8_t)(((uint8_t)(x)) << GICDistributor_ITARGETSR_CPU3_Pos)) & GICDistributor_ITARGETSR_CPU3_Msk)
902
903 #define GICDistributor_ITARGETSR_CPU4_Pos 4U /*!< GICDistributor ITARGETSR: CPU4 Position */
904 #define GICDistributor_ITARGETSR_CPU4_Msk (0x1U << GICDistributor_ITARGETSR_CPU4_Pos) /*!< GICDistributor ITARGETSR: CPU4 Mask */
905 #define GICDistributor_ITARGETSR_CPU4(x) (((uint8_t)(((uint8_t)(x)) << GICDistributor_ITARGETSR_CPU4_Pos)) & GICDistributor_ITARGETSR_CPU4_Msk)
906
907 #define GICDistributor_ITARGETSR_CPU5_Pos 5U /*!< GICDistributor ITARGETSR: CPU5 Position */
908 #define GICDistributor_ITARGETSR_CPU5_Msk (0x1U << GICDistributor_ITARGETSR_CPU5_Pos) /*!< GICDistributor ITARGETSR: CPU5 Mask */
909 #define GICDistributor_ITARGETSR_CPU5(x) (((uint8_t)(((uint8_t)(x)) << GICDistributor_ITARGETSR_CPU5_Pos)) & GICDistributor_ITARGETSR_CPU5_Msk)
910
911 #define GICDistributor_ITARGETSR_CPU6_Pos 6U /*!< GICDistributor ITARGETSR: CPU6 Position */
912 #define GICDistributor_ITARGETSR_CPU6_Msk (0x1U << GICDistributor_ITARGETSR_CPU6_Pos) /*!< GICDistributor ITARGETSR: CPU6 Mask */
913 #define GICDistributor_ITARGETSR_CPU6(x) (((uint8_t)(((uint8_t)(x)) << GICDistributor_ITARGETSR_CPU6_Pos)) & GICDistributor_ITARGETSR_CPU6_Msk)
914
915 #define GICDistributor_ITARGETSR_CPU7_Pos 7U /*!< GICDistributor ITARGETSR: CPU7 Position */
916 #define GICDistributor_ITARGETSR_CPU7_Msk (0x1U << GICDistributor_ITARGETSR_CPU7_Pos) /*!< GICDistributor ITARGETSR: CPU7 Mask */
917 #define GICDistributor_ITARGETSR_CPU7(x) (((uint8_t)(((uint8_t)(x)) << GICDistributor_ITARGETSR_CPU7_Pos)) & GICDistributor_ITARGETSR_CPU7_Msk)
918
919 /* GICDistributor SGIR Register */
920 #define GICDistributor_SGIR_INTID_Pos 0U /*!< GICDistributor SGIR: INTID Position */
921 #define GICDistributor_SGIR_INTID_Msk (0x7U /*<< GICDistributor_SGIR_INTID_Pos*/) /*!< GICDistributor SGIR: INTID Mask */
922 #define GICDistributor_SGIR_INTID(x) (((uint32_t)(((uint32_t)(x)) /*<< GICDistributor_SGIR_INTID_Pos*/)) & GICDistributor_SGIR_INTID_Msk)
923
924 #define GICDistributor_SGIR_NSATT_Pos 15U /*!< GICDistributor SGIR: NSATT Position */
925 #define GICDistributor_SGIR_NSATT_Msk (0x1U << GICDistributor_SGIR_NSATT_Pos) /*!< GICDistributor SGIR: NSATT Mask */
926 #define GICDistributor_SGIR_NSATT(x) (((uint32_t)(((uint32_t)(x)) << GICDistributor_SGIR_NSATT_Pos)) & GICDistributor_SGIR_NSATT_Msk)
927
928 #define GICDistributor_SGIR_CPUTargetList_Pos 16U /*!< GICDistributor SGIR: CPUTargetList Position */
929 #define GICDistributor_SGIR_CPUTargetList_Msk (0xFFU << GICDistributor_SGIR_CPUTargetList_Pos) /*!< GICDistributor SGIR: CPUTargetList Mask */
930 #define GICDistributor_SGIR_CPUTargetList(x) (((uint32_t)(((uint32_t)(x)) << GICDistributor_SGIR_CPUTargetList_Pos)) & GICDistributor_SGIR_CPUTargetList_Msk)
931
932 #define GICDistributor_SGIR_TargetFilterList_Pos 24U /*!< GICDistributor SGIR: TargetFilterList Position */
933 #define GICDistributor_SGIR_TargetFilterList_Msk (0x3U << GICDistributor_SGIR_TargetFilterList_Pos) /*!< GICDistributor SGIR: TargetFilterList Mask */
934 #define GICDistributor_SGIR_TargetFilterList(x) (((uint32_t)(((uint32_t)(x)) << GICDistributor_SGIR_TargetFilterList_Pos)) & GICDistributor_SGIR_TargetFilterList_Msk)
935
936 /* GICDistributor IROUTER Register */
937 #define GICDistributor_IROUTER_Aff0_Pos 0UL /*!< GICDistributor IROUTER: Aff0 Position */
938 #define GICDistributor_IROUTER_Aff0_Msk (0xFFUL /*<< GICDistributor_IROUTER_Aff0_Pos*/) /*!< GICDistributor IROUTER: Aff0 Mask */
939 #define GICDistributor_IROUTER_Aff0(x) (((uint64_t)(((uint64_t)(x)) /*<< GICDistributor_IROUTER_Aff0_Pos*/)) & GICDistributor_IROUTER_Aff0_Msk)
940
941 #define GICDistributor_IROUTER_Aff1_Pos 8UL /*!< GICDistributor IROUTER: Aff1 Position */
942 #define GICDistributor_IROUTER_Aff1_Msk (0xFFUL << GICDistributor_IROUTER_Aff1_Pos) /*!< GICDistributor IROUTER: Aff1 Mask */
943 #define GICDistributor_IROUTER_Aff1(x) (((uint64_t)(((uint64_t)(x)) << GICDistributor_IROUTER_Aff1_Pos)) & GICDistributor_IROUTER_Aff1_Msk)
944
945 #define GICDistributor_IROUTER_Aff2_Pos 16UL /*!< GICDistributor IROUTER: Aff2 Position */
946 #define GICDistributor_IROUTER_Aff2_Msk (0xFFUL << GICDistributor_IROUTER_Aff2_Pos) /*!< GICDistributor IROUTER: Aff2 Mask */
947 #define GICDistributor_IROUTER_Aff2(x) (((uint64_t)(((uint64_t)(x)) << GICDistributor_IROUTER_Aff2_Pos)) & GICDistributor_IROUTER_Aff2_Msk)
948
949 #define GICDistributor_IROUTER_IRM_Pos 31UL /*!< GICDistributor IROUTER: IRM Position */
950 #define GICDistributor_IROUTER_IRM_Msk (0xFFUL << GICDistributor_IROUTER_IRM_Pos) /*!< GICDistributor IROUTER: IRM Mask */
951 #define GICDistributor_IROUTER_IRM(x) (((uint64_t)(((uint64_t)(x)) << GICDistributor_IROUTER_IRM_Pos)) & GICDistributor_IROUTER_IRM_Msk)
952
953 #define GICDistributor_IROUTER_Aff3_Pos 32UL /*!< GICDistributor IROUTER: Aff3 Position */
954 #define GICDistributor_IROUTER_Aff3_Msk (0xFFUL << GICDistributor_IROUTER_Aff3_Pos) /*!< GICDistributor IROUTER: Aff3 Mask */
955 #define GICDistributor_IROUTER_Aff3(x) (((uint64_t)(((uint64_t)(x)) << GICDistributor_IROUTER_Aff3_Pos)) & GICDistributor_IROUTER_Aff3_Msk)
956
957
958
959 /** \brief Structure type to access the Generic Interrupt Controller Interface (GICC)
960 */
961 typedef struct
962 {
963 __IOM uint32_t CTLR; /*!< \brief Offset: 0x000 (R/W) CPU Interface Control Register */
964 __IOM uint32_t PMR; /*!< \brief Offset: 0x004 (R/W) Interrupt Priority Mask Register */
965 __IOM uint32_t BPR; /*!< \brief Offset: 0x008 (R/W) Binary Point Register */
966 __IM uint32_t IAR; /*!< \brief Offset: 0x00C (R/ ) Interrupt Acknowledge Register */
967 __OM uint32_t EOIR; /*!< \brief Offset: 0x010 ( /W) End Of Interrupt Register */
968 __IM uint32_t RPR; /*!< \brief Offset: 0x014 (R/ ) Running Priority Register */
969 __IM uint32_t HPPIR; /*!< \brief Offset: 0x018 (R/ ) Highest Priority Pending Interrupt Register */
970 __IOM uint32_t ABPR; /*!< \brief Offset: 0x01C (R/W) Aliased Binary Point Register */
971 __IM uint32_t AIAR; /*!< \brief Offset: 0x020 (R/ ) Aliased Interrupt Acknowledge Register */
972 __OM uint32_t AEOIR; /*!< \brief Offset: 0x024 ( /W) Aliased End Of Interrupt Register */
973 __IM uint32_t AHPPIR; /*!< \brief Offset: 0x028 (R/ ) Aliased Highest Priority Pending Interrupt Register */
974 __IOM uint32_t STATUSR; /*!< \brief Offset: 0x02C (R/W) Error Reporting Status Register, optional */
975 RESERVED(1[40], uint32_t)
976 __IOM uint32_t APR[4]; /*!< \brief Offset: 0x0D0 (R/W) Active Priority Register */
977 __IOM uint32_t NSAPR[4]; /*!< \brief Offset: 0x0E0 (R/W) Non-secure Active Priority Register */
978 RESERVED(2[3], uint32_t)
979 __IM uint32_t IIDR; /*!< \brief Offset: 0x0FC (R/ ) CPU Interface Identification Register */
980 RESERVED(3[960], uint32_t)
981 __OM uint32_t DIR; /*!< \brief Offset: 0x1000( /W) Deactivate Interrupt Register */
982 } GICInterface_Type;
983
984 #define GICInterface ((GICInterface_Type *) GIC_INTERFACE_BASE ) /*!< \brief GIC Interface register set access pointer */
985
986 /* GICInterface CTLR Register */
987 #define GICInterface_CTLR_Enable_Pos 0U /*!< PTIM CTLR: Enable Position */
988 #define GICInterface_CTLR_Enable_Msk (0x1U /*<< GICInterface_CTLR_Enable_Pos*/) /*!< PTIM CTLR: Enable Mask */
989 #define GICInterface_CTLR_Enable(x) (((uint32_t)(((uint32_t)(x)) /*<< GICInterface_CTLR_Enable_Pos*/)) & GICInterface_CTLR_Enable_Msk)
990
991 /* GICInterface PMR Register */
992 #define GICInterface_PMR_Priority_Pos 0U /*!< PTIM PMR: Priority Position */
993 #define GICInterface_PMR_Priority_Msk (0xFFU /*<< GICInterface_PMR_Priority_Pos*/) /*!< PTIM PMR: Priority Mask */
994 #define GICInterface_PMR_Priority(x) (((uint32_t)(((uint32_t)(x)) /*<< GICInterface_PMR_Priority_Pos*/)) & GICInterface_PMR_Priority_Msk)
995
996 /* GICInterface BPR Register */
997 #define GICInterface_BPR_Binary_Point_Pos 0U /*!< PTIM BPR: Binary_Point Position */
998 #define GICInterface_BPR_Binary_Point_Msk (0x7U /*<< GICInterface_BPR_Binary_Point_Pos*/) /*!< PTIM BPR: Binary_Point Mask */
999 #define GICInterface_BPR_Binary_Point(x) (((uint32_t)(((uint32_t)(x)) /*<< GICInterface_BPR_Binary_Point_Pos*/)) & GICInterface_BPR_Binary_Point_Msk)
1000
1001 /* GICInterface IAR Register */
1002 #define GICInterface_IAR_INTID_Pos 0U /*!< PTIM IAR: INTID Position */
1003 #define GICInterface_IAR_INTID_Msk (0xFFFFFFU /*<< GICInterface_IAR_INTID_Pos*/) /*!< PTIM IAR: INTID Mask */
1004 #define GICInterface_IAR_INTID(x) (((uint32_t)(((uint32_t)(x)) /*<< GICInterface_IAR_INTID_Pos*/)) & GICInterface_IAR_INTID_Msk)
1005
1006 /* GICInterface EOIR Register */
1007 #define GICInterface_EOIR_INTID_Pos 0U /*!< PTIM EOIR: INTID Position */
1008 #define GICInterface_EOIR_INTID_Msk (0xFFFFFFU /*<< GICInterface_EOIR_INTID_Pos*/) /*!< PTIM EOIR: INTID Mask */
1009 #define GICInterface_EOIR_INTID(x) (((uint32_t)(((uint32_t)(x)) /*<< GICInterface_EOIR_INTID_Pos*/)) & GICInterface_EOIR_INTID_Msk)
1010
1011 /* GICInterface RPR Register */
1012 #define GICInterface_RPR_INTID_Pos 0U /*!< PTIM RPR: INTID Position */
1013 #define GICInterface_RPR_INTID_Msk (0xFFU /*<< GICInterface_RPR_INTID_Pos*/) /*!< PTIM RPR: INTID Mask */
1014 #define GICInterface_RPR_INTID(x) (((uint32_t)(((uint32_t)(x)) /*<< GICInterface_RPR_INTID_Pos*/)) & GICInterface_RPR_INTID_Msk)
1015
1016 /* GICInterface HPPIR Register */
1017 #define GICInterface_HPPIR_INTID_Pos 0U /*!< PTIM HPPIR: INTID Position */
1018 #define GICInterface_HPPIR_INTID_Msk (0xFFFFFFU /*<< GICInterface_HPPIR_INTID_Pos*/) /*!< PTIM HPPIR: INTID Mask */
1019 #define GICInterface_HPPIR_INTID(x) (((uint32_t)(((uint32_t)(x)) /*<< GICInterface_HPPIR_INTID_Pos*/)) & GICInterface_HPPIR_INTID_Msk)
1020
1021 /* GICInterface ABPR Register */
1022 #define GICInterface_ABPR_Binary_Point_Pos 0U /*!< PTIM ABPR: Binary_Point Position */
1023 #define GICInterface_ABPR_Binary_Point_Msk (0x7U /*<< GICInterface_ABPR_Binary_Point_Pos*/) /*!< PTIM ABPR: Binary_Point Mask */
1024 #define GICInterface_ABPR_Binary_Point(x) (((uint32_t)(((uint32_t)(x)) /*<< GICInterface_ABPR_Binary_Point_Pos*/)) & GICInterface_ABPR_Binary_Point_Msk)
1025
1026 /* GICInterface AIAR Register */
1027 #define GICInterface_AIAR_INTID_Pos 0U /*!< PTIM AIAR: INTID Position */
1028 #define GICInterface_AIAR_INTID_Msk (0xFFFFFFU /*<< GICInterface_AIAR_INTID_Pos*/) /*!< PTIM AIAR: INTID Mask */
1029 #define GICInterface_AIAR_INTID(x) (((uint32_t)(((uint32_t)(x)) /*<< GICInterface_AIAR_INTID_Pos*/)) & GICInterface_AIAR_INTID_Msk)
1030
1031 /* GICInterface AEOIR Register */
1032 #define GICInterface_AEOIR_INTID_Pos 0U /*!< PTIM AEOIR: INTID Position */
1033 #define GICInterface_AEOIR_INTID_Msk (0xFFFFFFU /*<< GICInterface_AEOIR_INTID_Pos*/) /*!< PTIM AEOIR: INTID Mask */
1034 #define GICInterface_AEOIR_INTID(x) (((uint32_t)(((uint32_t)(x)) /*<< GICInterface_AEOIR_INTID_Pos*/)) & GICInterface_AEOIR_INTID_Msk)
1035
1036 /* GICInterface AHPPIR Register */
1037 #define GICInterface_AHPPIR_INTID_Pos 0U /*!< PTIM AHPPIR: INTID Position */
1038 #define GICInterface_AHPPIR_INTID_Msk (0xFFFFFFU /*<< GICInterface_AHPPIR_INTID_Pos*/) /*!< PTIM AHPPIR: INTID Mask */
1039 #define GICInterface_AHPPIR_INTID(x) (((uint32_t)(((uint32_t)(x)) /*<< GICInterface_AHPPIR_INTID_Pos*/)) & GICInterface_AHPPIR_INTID_Msk)
1040
1041 /* GICInterface STATUSR Register */
1042 #define GICInterface_STATUSR_RRD_Pos 0U /*!< GICInterface STATUSR: RRD Position */
1043 #define GICInterface_STATUSR_RRD_Msk (0x1U /*<< GICInterface_STATUSR_RRD_Pos*/) /*!< GICInterface STATUSR: RRD Mask */
1044 #define GICInterface_STATUSR_RRD(x) (((uint32_t)(((uint32_t)(x)) /*<< GICInterface_STATUSR_RRD_Pos*/)) & GICInterface_STATUSR_RRD_Msk)
1045
1046 #define GICInterface_STATUSR_WRD_Pos 1U /*!< GICInterface STATUSR: WRD Position */
1047 #define GICInterface_STATUSR_WRD_Msk (0x1U << GICInterface_STATUSR_WRD_Pos) /*!< GICInterface STATUSR: WRD Mask */
1048 #define GICInterface_STATUSR_WRD(x) (((uint32_t)(((uint32_t)(x)) << GICInterface_STATUSR_WRD_Pos)) & GICInterface_STATUSR_WRD_Msk)
1049
1050 #define GICInterface_STATUSR_RWOD_Pos 2U /*!< GICInterface STATUSR: RWOD Position */
1051 #define GICInterface_STATUSR_RWOD_Msk (0x1U << GICInterface_STATUSR_RWOD_Pos) /*!< GICInterface STATUSR: RWOD Mask */
1052 #define GICInterface_STATUSR_RWOD(x) (((uint32_t)(((uint32_t)(x)) << GICInterface_STATUSR_RWOD_Pos)) & GICInterface_STATUSR_RWOD_Msk)
1053
1054 #define GICInterface_STATUSR_WROD_Pos 3U /*!< GICInterface STATUSR: WROD Position */
1055 #define GICInterface_STATUSR_WROD_Msk (0x1U << GICInterface_STATUSR_WROD_Pos) /*!< GICInterface STATUSR: WROD Mask */
1056 #define GICInterface_STATUSR_WROD(x) (((uint32_t)(((uint32_t)(x)) << GICInterface_STATUSR_WROD_Pos)) & GICInterface_STATUSR_WROD_Msk)
1057
1058 #define GICInterface_STATUSR_ASV_Pos 4U /*!< GICInterface STATUSR: ASV Position */
1059 #define GICInterface_STATUSR_ASV_Msk (0x1U << GICInterface_STATUSR_ASV_Pos) /*!< GICInterface STATUSR: ASV Mask */
1060 #define GICInterface_STATUSR_ASV(x) (((uint32_t)(((uint32_t)(x)) << GICInterface_STATUSR_ASV_Pos)) & GICInterface_STATUSR_ASV_Msk)
1061
1062 /* GICInterface IIDR Register */
1063 #define GICInterface_IIDR_Implementer_Pos 0U /*!< GICInterface IIDR: Implementer Position */
1064 #define GICInterface_IIDR_Implementer_Msk (0xFFFU /*<< GICInterface_IIDR_Implementer_Pos*/) /*!< GICInterface IIDR: Implementer Mask */
1065 #define GICInterface_IIDR_Implementer(x) (((uint32_t)(((uint32_t)(x)) /*<< GICInterface_IIDR_Implementer_Pos*/)) & GICInterface_IIDR_Implementer_Msk)
1066
1067 #define GICInterface_IIDR_Revision_Pos 12U /*!< GICInterface IIDR: Revision Position */
1068 #define GICInterface_IIDR_Revision_Msk (0xFU << GICInterface_IIDR_Revision_Pos) /*!< GICInterface IIDR: Revision Mask */
1069 #define GICInterface_IIDR_Revision(x) (((uint32_t)(((uint32_t)(x)) << GICInterface_IIDR_Revision_Pos)) & GICInterface_IIDR_Revision_Msk)
1070
1071 #define GICInterface_IIDR_Arch_version_Pos 16U /*!< GICInterface IIDR: Arch_version Position */
1072 #define GICInterface_IIDR_Arch_version_Msk (0xFU << GICInterface_IIDR_Arch_version_Pos) /*!< GICInterface IIDR: Arch_version Mask */
1073 #define GICInterface_IIDR_Arch_version(x) (((uint32_t)(((uint32_t)(x)) << GICInterface_IIDR_Arch_version_Pos)) & GICInterface_IIDR_Arch_version_Msk)
1074
1075 #define GICInterface_IIDR_ProductID_Pos 20U /*!< GICInterface IIDR: ProductID Position */
1076 #define GICInterface_IIDR_ProductID_Msk (0xFFFU << GICInterface_IIDR_ProductID_Pos) /*!< GICInterface IIDR: ProductID Mask */
1077 #define GICInterface_IIDR_ProductID(x) (((uint32_t)(((uint32_t)(x)) << GICInterface_IIDR_ProductID_Pos)) & GICInterface_IIDR_ProductID_Msk)
1078
1079 /* GICInterface DIR Register */
1080 #define GICInterface_DIR_INTID_Pos 0U /*!< PTIM DIR: INTID Position */
1081 #define GICInterface_DIR_INTID_Msk (0xFFFFFFU /*<< GICInterface_DIR_INTID_Pos*/) /*!< PTIM DIR: INTID Mask */
1082 #define GICInterface_DIR_INTID(x) (((uint32_t)(((uint32_t)(x)) /*<< GICInterface_DIR_INTID_Pos*/)) & GICInterface_DIR_INTID_Msk)
1083 #endif /* (__GIC_PRESENT == 1U) || defined(DOXYGEN) */
1084
1085 #if (defined(__TIM_PRESENT) && (__TIM_PRESENT == 1U)) || \
1086 defined(DOXYGEN)
1087 #if ((__CORTEX_A == 5U) || (__CORTEX_A == 9U)) || defined(DOXYGEN)
1088 /** \brief Structure type to access the Private Timer
1089 */
1090 typedef struct
1091 {
1092 __IOM uint32_t LOAD; //!< \brief Offset: 0x000 (R/W) Private Timer Load Register
1093 __IOM uint32_t COUNTER; //!< \brief Offset: 0x004 (R/W) Private Timer Counter Register
1094 __IOM uint32_t CONTROL; //!< \brief Offset: 0x008 (R/W) Private Timer Control Register
1095 __IOM uint32_t ISR; //!< \brief Offset: 0x00C (R/W) Private Timer Interrupt Status Register
1096 RESERVED(0[4], uint32_t)
1097 __IOM uint32_t WLOAD; //!< \brief Offset: 0x020 (R/W) Watchdog Load Register
1098 __IOM uint32_t WCOUNTER; //!< \brief Offset: 0x024 (R/W) Watchdog Counter Register
1099 __IOM uint32_t WCONTROL; //!< \brief Offset: 0x028 (R/W) Watchdog Control Register
1100 __IOM uint32_t WISR; //!< \brief Offset: 0x02C (R/W) Watchdog Interrupt Status Register
1101 __IOM uint32_t WRESET; //!< \brief Offset: 0x030 (R/W) Watchdog Reset Status Register
1102 __OM uint32_t WDISABLE; //!< \brief Offset: 0x034 ( /W) Watchdog Disable Register
1103 } Timer_Type;
1104 #define PTIM ((Timer_Type *) TIMER_BASE ) /*!< \brief Timer register struct */
1105
1106 /* PTIM Control Register */
1107 #define PTIM_CONTROL_Enable_Pos 0U /*!< PTIM CONTROL: Enable Position */
1108 #define PTIM_CONTROL_Enable_Msk (0x1U /*<< PTIM_CONTROL_Enable_Pos*/) /*!< PTIM CONTROL: Enable Mask */
1109 #define PTIM_CONTROL_Enable(x) (((uint32_t)(((uint32_t)(x)) /*<< PTIM_CONTROL_Enable_Pos*/)) & PTIM_CONTROL_Enable_Msk)
1110
1111 #define PTIM_CONTROL_AutoReload_Pos 1U /*!< PTIM CONTROL: Auto Reload Position */
1112 #define PTIM_CONTROL_AutoReload_Msk (0x1U << PTIM_CONTROL_AutoReload_Pos) /*!< PTIM CONTROL: Auto Reload Mask */
1113 #define PTIM_CONTROL_AutoReload(x) (((uint32_t)(((uint32_t)(x)) << PTIM_CONTROL_AutoReload_Pos)) & PTIM_CONTROL_AutoReload_Msk)
1114
1115 #define PTIM_CONTROL_IRQenable_Pos 2U /*!< PTIM CONTROL: IRQ Enabel Position */
1116 #define PTIM_CONTROL_IRQenable_Msk (0x1U << PTIM_CONTROL_IRQenable_Pos) /*!< PTIM CONTROL: IRQ Enabel Mask */
1117 #define PTIM_CONTROL_IRQenable(x) (((uint32_t)(((uint32_t)(x)) << PTIM_CONTROL_IRQenable_Pos)) & PTIM_CONTROL_IRQenable_Msk)
1118
1119 #define PTIM_CONTROL_Prescaler_Pos 8U /*!< PTIM CONTROL: Prescaler Position */
1120 #define PTIM_CONTROL_Prescaler_Msk (0xFFU << PTIM_CONTROL_Prescaler_Pos) /*!< PTIM CONTROL: Prescaler Mask */
1121 #define PTIM_CONTROL_Prescaler(x) (((uint32_t)(((uint32_t)(x)) << PTIM_CONTROL_Prescaler_Pos)) & PTIM_CONTROL_Prescaler_Msk)
1122
1123 /* WCONTROL Watchdog Control Register */
1124 #define PTIM_WCONTROL_Enable_Pos 0U /*!< PTIM WCONTROL: Enable Position */
1125 #define PTIM_WCONTROL_Enable_Msk (0x1U /*<< PTIM_WCONTROL_Enable_Pos*/) /*!< PTIM WCONTROL: Enable Mask */
1126 #define PTIM_WCONTROL_Enable(x) (((uint32_t)(((uint32_t)(x)) /*<< PTIM_WCONTROL_Enable_Pos*/)) & PTIM_WCONTROL_Enable_Msk)
1127
1128 #define PTIM_WCONTROL_AutoReload_Pos 1U /*!< PTIM WCONTROL: Auto Reload Position */
1129 #define PTIM_WCONTROL_AutoReload_Msk (0x1U << PTIM_WCONTROL_AutoReload_Pos) /*!< PTIM WCONTROL: Auto Reload Mask */
1130 #define PTIM_WCONTROL_AutoReload(x) (((uint32_t)(((uint32_t)(x)) << PTIM_WCONTROL_AutoReload_Pos)) & PTIM_WCONTROL_AutoReload_Msk)
1131
1132 #define PTIM_WCONTROL_IRQenable_Pos 2U /*!< PTIM WCONTROL: IRQ Enable Position */
1133 #define PTIM_WCONTROL_IRQenable_Msk (0x1U << PTIM_WCONTROL_IRQenable_Pos) /*!< PTIM WCONTROL: IRQ Enable Mask */
1134 #define PTIM_WCONTROL_IRQenable(x) (((uint32_t)(((uint32_t)(x)) << PTIM_WCONTROL_IRQenable_Pos)) & PTIM_WCONTROL_IRQenable_Msk)
1135
1136 #define PTIM_WCONTROL_Mode_Pos 3U /*!< PTIM WCONTROL: Watchdog Mode Position */
1137 #define PTIM_WCONTROL_Mode_Msk (0x1U << PTIM_WCONTROL_Mode_Pos) /*!< PTIM WCONTROL: Watchdog Mode Mask */
1138 #define PTIM_WCONTROL_Mode(x) (((uint32_t)(((uint32_t)(x)) << PTIM_WCONTROL_Mode_Pos)) & PTIM_WCONTROL_Mode_Msk)
1139
1140 #define PTIM_WCONTROL_Presacler_Pos 8U /*!< PTIM WCONTROL: Prescaler Position */
1141 #define PTIM_WCONTROL_Presacler_Msk (0xFFU << PTIM_WCONTROL_Presacler_Pos) /*!< PTIM WCONTROL: Prescaler Mask */
1142 #define PTIM_WCONTROL_Presacler(x) (((uint32_t)(((uint32_t)(x)) << PTIM_WCONTROL_Presacler_Pos)) & PTIM_WCONTROL_Presacler_Msk)
1143
1144 /* WISR Watchdog Interrupt Status Register */
1145 #define PTIM_WISR_EventFlag_Pos 0U /*!< PTIM WISR: Event Flag Position */
1146 #define PTIM_WISR_EventFlag_Msk (0x1U /*<< PTIM_WISR_EventFlag_Pos*/) /*!< PTIM WISR: Event Flag Mask */
1147 #define PTIM_WISR_EventFlag(x) (((uint32_t)(((uint32_t)(x)) /*<< PTIM_WISR_EventFlag_Pos*/)) & PTIM_WISR_EventFlag_Msk)
1148
1149 /* WRESET Watchdog Reset Status */
1150 #define PTIM_WRESET_ResetFlag_Pos 0U /*!< PTIM WRESET: Reset Flag Position */
1151 #define PTIM_WRESET_ResetFlag_Msk (0x1U /*<< PTIM_WRESET_ResetFlag_Pos*/) /*!< PTIM WRESET: Reset Flag Mask */
1152 #define PTIM_WRESET_ResetFlag(x) (((uint32_t)(((uint32_t)(x)) /*<< PTIM_WRESET_ResetFlag_Pos*/)) & PTIM_WRESET_ResetFlag_Msk)
1153
1154 #endif /* ((__CORTEX_A == 5U) || (__CORTEX_A == 9U)) || defined(DOXYGEN) */
1155 #endif /* (__TIM_PRESENT == 1U) || defined(DOXYGEN) */
1156
1157 /*******************************************************************************
1158 * Hardware Abstraction Layer
1159 Core Function Interface contains:
1160 - L1 Cache Functions
1161 - L2C-310 Cache Controller Functions
1162 - PL1 Timer Functions
1163 - GIC Functions
1164 - MMU Functions
1165 ******************************************************************************/
1166
1167 /* ########################## L1 Cache functions ################################# */
1168
1169 /** \brief Enable Caches by setting I and C bits in SCTLR register.
1170 */
L1C_EnableCaches(void)1171 __STATIC_FORCEINLINE void L1C_EnableCaches(void) {
1172 __set_SCTLR( __get_SCTLR() | SCTLR_I_Msk | SCTLR_C_Msk);
1173 __ISB();
1174 }
1175
1176 /** \brief Disable Caches by clearing I and C bits in SCTLR register.
1177 */
L1C_DisableCaches(void)1178 __STATIC_FORCEINLINE void L1C_DisableCaches(void) {
1179 __set_SCTLR( __get_SCTLR() & (~SCTLR_I_Msk) & (~SCTLR_C_Msk));
1180 __ISB();
1181 }
1182
1183 /** \brief Enable Branch Prediction by setting Z bit in SCTLR register.
1184 */
L1C_EnableBTAC(void)1185 __STATIC_FORCEINLINE void L1C_EnableBTAC(void) {
1186 __set_SCTLR( __get_SCTLR() | SCTLR_Z_Msk);
1187 __ISB();
1188 }
1189
1190 /** \brief Disable Branch Prediction by clearing Z bit in SCTLR register.
1191 */
L1C_DisableBTAC(void)1192 __STATIC_FORCEINLINE void L1C_DisableBTAC(void) {
1193 __set_SCTLR( __get_SCTLR() & (~SCTLR_Z_Msk));
1194 __ISB();
1195 }
1196
1197 /** \brief Invalidate entire branch predictor array
1198 */
L1C_InvalidateBTAC(void)1199 __STATIC_FORCEINLINE void L1C_InvalidateBTAC(void) {
1200 __set_BPIALL(0);
1201 __DSB(); //ensure completion of the invalidation
1202 __ISB(); //ensure instruction fetch path sees new state
1203 }
1204
1205 /** \brief Clean instruction cache line by address.
1206 * \param [in] va Pointer to instructions to clear the cache for.
1207 */
L1C_InvalidateICacheMVA(void * va)1208 __STATIC_FORCEINLINE void L1C_InvalidateICacheMVA(void *va) {
1209 __set_ICIMVAC((uint32_t)va);
1210 __DSB(); //ensure completion of the invalidation
1211 __ISB(); //ensure instruction fetch path sees new I cache state
1212 }
1213
1214 /** \brief Invalidate the whole instruction cache
1215 */
L1C_InvalidateICacheAll(void)1216 __STATIC_FORCEINLINE void L1C_InvalidateICacheAll(void) {
1217 __set_ICIALLU(0);
1218 __DSB(); //ensure completion of the invalidation
1219 __ISB(); //ensure instruction fetch path sees new I cache state
1220 }
1221
1222 /** \brief Clean data cache line by address.
1223 * \param [in] va Pointer to data to clear the cache for.
1224 */
L1C_CleanDCacheMVA(void * va)1225 __STATIC_FORCEINLINE void L1C_CleanDCacheMVA(void *va) {
1226 __set_DCCMVAC((uint32_t)va);
1227 __DMB(); //ensure the ordering of data cache maintenance operations and their effects
1228 }
1229
1230 /** \brief Invalidate data cache line by address.
1231 * \param [in] va Pointer to data to invalidate the cache for.
1232 */
L1C_InvalidateDCacheMVA(void * va)1233 __STATIC_FORCEINLINE void L1C_InvalidateDCacheMVA(void *va) {
1234 __set_DCIMVAC((uint32_t)va);
1235 __DMB(); //ensure the ordering of data cache maintenance operations and their effects
1236 }
1237
1238 /** \brief Clean and Invalidate data cache by address.
1239 * \param [in] va Pointer to data to invalidate the cache for.
1240 */
L1C_CleanInvalidateDCacheMVA(void * va)1241 __STATIC_FORCEINLINE void L1C_CleanInvalidateDCacheMVA(void *va) {
1242 __set_DCCIMVAC((uint32_t)va);
1243 __DMB(); //ensure the ordering of data cache maintenance operations and their effects
1244 }
1245
1246 /** \brief Calculate log2 rounded up
1247 * - log(0) => 0
1248 * - log(1) => 0
1249 * - log(2) => 1
1250 * - log(3) => 2
1251 * - log(4) => 2
1252 * - log(5) => 3
1253 * : :
1254 * - log(16) => 4
1255 * - log(32) => 5
1256 * : :
1257 * \param [in] n input value parameter
1258 * \return log2(n)
1259 */
__log2_up(uint32_t n)1260 __STATIC_FORCEINLINE uint8_t __log2_up(uint32_t n)
1261 {
1262 if (n < 2U) {
1263 return 0U;
1264 }
1265 uint8_t log = 0U;
1266 uint32_t t = n;
1267 while(t > 1U)
1268 {
1269 log++;
1270 t >>= 1U;
1271 }
1272 if (n & 1U) { log++; }
1273 return log;
1274 }
1275
1276 /** \brief Apply cache maintenance to given cache level.
1277 * \param [in] level cache level to be maintained
1278 * \param [in] maint 0 - invalidate, 1 - clean, otherwise - invalidate and clean
1279 */
__L1C_MaintainDCacheSetWay(uint32_t level,uint32_t maint)1280 __STATIC_FORCEINLINE void __L1C_MaintainDCacheSetWay(uint32_t level, uint32_t maint)
1281 {
1282 uint32_t Dummy;
1283 uint32_t ccsidr;
1284 uint32_t num_sets;
1285 uint32_t num_ways;
1286 uint32_t shift_way;
1287 uint32_t log2_linesize;
1288 uint8_t log2_num_ways;
1289
1290 Dummy = level << 1U;
1291 /* set csselr, select ccsidr register */
1292 __set_CSSELR(Dummy);
1293 /* get current ccsidr register */
1294 ccsidr = __get_CCSIDR();
1295 num_sets = ((ccsidr & 0x0FFFE000U) >> 13U) + 1U;
1296 num_ways = ((ccsidr & 0x00001FF8U) >> 3U) + 1U;
1297 log2_linesize = (ccsidr & 0x00000007U) + 2U + 2U;
1298 log2_num_ways = __log2_up(num_ways);
1299 if (log2_num_ways > 32U) {
1300 return; // FATAL ERROR
1301 }
1302 shift_way = 32U - log2_num_ways;
1303 for(int32_t way = num_ways-1; way >= 0; way--)
1304 {
1305 for(int32_t set = num_sets-1; set >= 0; set--)
1306 {
1307 Dummy = (level << 1U) | (((uint32_t)set) << log2_linesize) | (((uint32_t)way) << shift_way);
1308 switch (maint)
1309 {
1310 case 0U: __set_DCISW(Dummy); break;
1311 case 1U: __set_DCCSW(Dummy); break;
1312 default: __set_DCCISW(Dummy); break;
1313 }
1314 }
1315 }
1316 __DMB();
1317 }
1318
1319 /** \brief Clean and Invalidate the entire data or unified cache
1320 * \param [in] op 0 - invalidate, 1 - clean, otherwise - invalidate and clean
1321 */
L1C_CleanInvalidateCache(uint32_t op)1322 __STATIC_FORCEINLINE void L1C_CleanInvalidateCache(uint32_t op) {
1323 uint32_t clidr;
1324 uint32_t cache_type;
1325 clidr = __get_CLIDR();
1326 for(uint32_t i = 0U; i<7U; i++)
1327 {
1328 cache_type = (clidr >> i*3U) & 0x7UL;
1329 if ((cache_type >= 2U) && (cache_type <= 4U))
1330 {
1331 __L1C_MaintainDCacheSetWay(i, op);
1332 }
1333 }
1334 }
1335
1336 /** \brief Invalidate the whole data cache.
1337 */
L1C_InvalidateDCacheAll(void)1338 __STATIC_FORCEINLINE void L1C_InvalidateDCacheAll(void) {
1339 L1C_CleanInvalidateCache(0);
1340 }
1341
1342 /** \brief Clean the whole data cache.
1343 */
L1C_CleanDCacheAll(void)1344 __STATIC_FORCEINLINE void L1C_CleanDCacheAll(void) {
1345 L1C_CleanInvalidateCache(1);
1346 }
1347
1348 /** \brief Clean and invalidate the whole data cache.
1349 */
L1C_CleanInvalidateDCacheAll(void)1350 __STATIC_FORCEINLINE void L1C_CleanInvalidateDCacheAll(void) {
1351 L1C_CleanInvalidateCache(2);
1352 }
1353
1354 /* ########################## L2 Cache functions ################################# */
1355 #if (defined(__L2C_PRESENT) && (__L2C_PRESENT == 1U)) || \
1356 defined(DOXYGEN)
1357 /** \brief Cache Sync operation by writing CACHE_SYNC register.
1358 */
L2C_Sync(void)1359 __STATIC_INLINE void L2C_Sync(void)
1360 {
1361 L2C_310->CACHE_SYNC = 0x0;
1362 }
1363
1364 /** \brief Read cache controller cache ID from CACHE_ID register.
1365 * \return L2C_310_TypeDef::CACHE_ID
1366 */
L2C_GetID(void)1367 __STATIC_INLINE int L2C_GetID (void)
1368 {
1369 return L2C_310->CACHE_ID;
1370 }
1371
1372 /** \brief Read cache controller cache type from CACHE_TYPE register.
1373 * \return L2C_310_TypeDef::CACHE_TYPE
1374 */
L2C_GetType(void)1375 __STATIC_INLINE int L2C_GetType (void)
1376 {
1377 return L2C_310->CACHE_TYPE;
1378 }
1379
1380 /** \brief Invalidate all cache by way
1381 */
L2C_InvAllByWay(void)1382 __STATIC_INLINE void L2C_InvAllByWay (void)
1383 {
1384 unsigned int assoc;
1385
1386 if (L2C_310->AUX_CNT & (1U << 16U)) {
1387 assoc = 16U;
1388 } else {
1389 assoc = 8U;
1390 }
1391
1392 L2C_310->INV_WAY = (1U << assoc) - 1U;
1393 while(L2C_310->INV_WAY & ((1U << assoc) - 1U)); //poll invalidate
1394
1395 L2C_Sync();
1396 }
1397
1398 /** \brief Clean and Invalidate all cache by way
1399 */
L2C_CleanInvAllByWay(void)1400 __STATIC_INLINE void L2C_CleanInvAllByWay (void)
1401 {
1402 unsigned int assoc;
1403
1404 if (L2C_310->AUX_CNT & (1U << 16U)) {
1405 assoc = 16U;
1406 } else {
1407 assoc = 8U;
1408 }
1409
1410 L2C_310->CLEAN_INV_WAY = (1U << assoc) - 1U;
1411 while(L2C_310->CLEAN_INV_WAY & ((1U << assoc) - 1U)); //poll invalidate
1412
1413 L2C_Sync();
1414 }
1415
1416 /** \brief Enable Level 2 Cache
1417 */
L2C_Enable(void)1418 __STATIC_INLINE void L2C_Enable(void)
1419 {
1420 L2C_310->CONTROL = 0;
1421 L2C_310->INTERRUPT_CLEAR = 0x000001FFuL;
1422 L2C_310->DEBUG_CONTROL = 0;
1423 L2C_310->DATA_LOCK_0_WAY = 0;
1424 L2C_310->CACHE_SYNC = 0;
1425 L2C_310->CONTROL = 0x01;
1426 L2C_Sync();
1427 }
1428
1429 /** \brief Disable Level 2 Cache
1430 */
L2C_Disable(void)1431 __STATIC_INLINE void L2C_Disable(void)
1432 {
1433 L2C_310->CONTROL = 0x00;
1434 L2C_Sync();
1435 }
1436
1437 /** \brief Invalidate cache by physical address
1438 * \param [in] pa Pointer to data to invalidate cache for.
1439 */
L2C_InvPa(void * pa)1440 __STATIC_INLINE void L2C_InvPa (void *pa)
1441 {
1442 L2C_310->INV_LINE_PA = (unsigned int)pa;
1443 L2C_Sync();
1444 }
1445
1446 /** \brief Clean cache by physical address
1447 * \param [in] pa Pointer to data to invalidate cache for.
1448 */
L2C_CleanPa(void * pa)1449 __STATIC_INLINE void L2C_CleanPa (void *pa)
1450 {
1451 L2C_310->CLEAN_LINE_PA = (unsigned int)pa;
1452 L2C_Sync();
1453 }
1454
1455 /** \brief Clean and invalidate cache by physical address
1456 * \param [in] pa Pointer to data to invalidate cache for.
1457 */
L2C_CleanInvPa(void * pa)1458 __STATIC_INLINE void L2C_CleanInvPa (void *pa)
1459 {
1460 L2C_310->CLEAN_INV_LINE_PA = (unsigned int)pa;
1461 L2C_Sync();
1462 }
1463 #endif
1464
1465 /* ########################## GIC functions ###################################### */
1466 #if (defined(__GIC_PRESENT) && (__GIC_PRESENT == 1U)) || \
1467 defined(DOXYGEN)
1468
1469 /** \brief Enable the interrupt distributor using the GIC's CTLR register.
1470 */
GIC_EnableDistributor(void)1471 __STATIC_INLINE void GIC_EnableDistributor(void)
1472 {
1473 GICDistributor->CTLR |= 1U;
1474 }
1475
1476 /** \brief Disable the interrupt distributor using the GIC's CTLR register.
1477 */
GIC_DisableDistributor(void)1478 __STATIC_INLINE void GIC_DisableDistributor(void)
1479 {
1480 GICDistributor->CTLR &=~1U;
1481 }
1482
1483 /** \brief Read the GIC's TYPER register.
1484 * \return GICDistributor_Type::TYPER
1485 */
GIC_DistributorInfo(void)1486 __STATIC_INLINE uint32_t GIC_DistributorInfo(void)
1487 {
1488 return (GICDistributor->TYPER);
1489 }
1490
1491 /** \brief Reads the GIC's IIDR register.
1492 * \return GICDistributor_Type::IIDR
1493 */
GIC_DistributorImplementer(void)1494 __STATIC_INLINE uint32_t GIC_DistributorImplementer(void)
1495 {
1496 return (GICDistributor->IIDR);
1497 }
1498
1499 /** \brief Sets the GIC's ITARGETSR register for the given interrupt.
1500 * \param [in] IRQn Interrupt to be configured.
1501 * \param [in] cpu_target CPU interfaces to assign this interrupt to.
1502 */
GIC_SetTarget(IRQn_Type IRQn,uint32_t cpu_target)1503 __STATIC_INLINE void GIC_SetTarget(IRQn_Type IRQn, uint32_t cpu_target)
1504 {
1505 uint32_t mask = GICDistributor->ITARGETSR[IRQn / 4U] & ~(0xFFUL << ((IRQn % 4U) * 8U));
1506 GICDistributor->ITARGETSR[IRQn / 4U] = mask | ((cpu_target & 0xFFUL) << ((IRQn % 4U) * 8U));
1507 }
1508
1509 /** \brief Read the GIC's ITARGETSR register.
1510 * \param [in] IRQn Interrupt to acquire the configuration for.
1511 * \return GICDistributor_Type::ITARGETSR
1512 */
GIC_GetTarget(IRQn_Type IRQn)1513 __STATIC_INLINE uint32_t GIC_GetTarget(IRQn_Type IRQn)
1514 {
1515 return (GICDistributor->ITARGETSR[IRQn / 4U] >> ((IRQn % 4U) * 8U)) & 0xFFUL;
1516 }
1517
1518 /** \brief Enable the CPU's interrupt interface.
1519 */
GIC_EnableInterface(void)1520 __STATIC_INLINE void GIC_EnableInterface(void)
1521 {
1522 GICInterface->CTLR |= 1U; //enable interface
1523 }
1524
1525 /** \brief Disable the CPU's interrupt interface.
1526 */
GIC_DisableInterface(void)1527 __STATIC_INLINE void GIC_DisableInterface(void)
1528 {
1529 GICInterface->CTLR &=~1U; //disable distributor
1530 }
1531
1532 /** \brief Read the CPU's IAR register.
1533 * \return GICInterface_Type::IAR
1534 */
GIC_AcknowledgePending(void)1535 __STATIC_INLINE IRQn_Type GIC_AcknowledgePending(void)
1536 {
1537 return (IRQn_Type)(GICInterface->IAR);
1538 }
1539
1540 /** \brief Writes the given interrupt number to the CPU's EOIR register.
1541 * \param [in] IRQn The interrupt to be signaled as finished.
1542 */
GIC_EndInterrupt(IRQn_Type IRQn)1543 __STATIC_INLINE void GIC_EndInterrupt(IRQn_Type IRQn)
1544 {
1545 GICInterface->EOIR = IRQn;
1546 }
1547
1548 /** \brief Enables the given interrupt using GIC's ISENABLER register.
1549 * \param [in] IRQn The interrupt to be enabled.
1550 */
GIC_EnableIRQ(IRQn_Type IRQn)1551 __STATIC_INLINE void GIC_EnableIRQ(IRQn_Type IRQn)
1552 {
1553 GICDistributor->ISENABLER[IRQn / 32U] = 1U << (IRQn % 32U);
1554 }
1555
1556 /** \brief Get interrupt enable status using GIC's ISENABLER register.
1557 * \param [in] IRQn The interrupt to be queried.
1558 * \return 0 - interrupt is not enabled, 1 - interrupt is enabled.
1559 */
GIC_GetEnableIRQ(IRQn_Type IRQn)1560 __STATIC_INLINE uint32_t GIC_GetEnableIRQ(IRQn_Type IRQn)
1561 {
1562 return (GICDistributor->ISENABLER[IRQn / 32U] >> (IRQn % 32U)) & 1UL;
1563 }
1564
1565 /** \brief Disables the given interrupt using GIC's ICENABLER register.
1566 * \param [in] IRQn The interrupt to be disabled.
1567 */
GIC_DisableIRQ(IRQn_Type IRQn)1568 __STATIC_INLINE void GIC_DisableIRQ(IRQn_Type IRQn)
1569 {
1570 GICDistributor->ICENABLER[IRQn / 32U] = 1U << (IRQn % 32U);
1571 }
1572
1573 /** \brief Get interrupt pending status from GIC's ISPENDR register.
1574 * \param [in] IRQn The interrupt to be queried.
1575 * \return 0 - interrupt is not pending, 1 - interrupt is pendig.
1576 */
GIC_GetPendingIRQ(IRQn_Type IRQn)1577 __STATIC_INLINE uint32_t GIC_GetPendingIRQ(IRQn_Type IRQn)
1578 {
1579 uint32_t pend;
1580
1581 if (IRQn >= 16U) {
1582 pend = (GICDistributor->ISPENDR[IRQn / 32U] >> (IRQn % 32U)) & 1UL;
1583 } else {
1584 // INTID 0-15 Software Generated Interrupt
1585 pend = (GICDistributor->SPENDSGIR[IRQn / 4U] >> ((IRQn % 4U) * 8U)) & 0xFFUL;
1586 // No CPU identification offered
1587 if (pend != 0U) {
1588 pend = 1U;
1589 } else {
1590 pend = 0U;
1591 }
1592 }
1593
1594 return (pend);
1595 }
1596
1597 /** \brief Sets the given interrupt as pending using GIC's ISPENDR register.
1598 * \param [in] IRQn The interrupt to be enabled.
1599 */
GIC_SetPendingIRQ(IRQn_Type IRQn)1600 __STATIC_INLINE void GIC_SetPendingIRQ(IRQn_Type IRQn)
1601 {
1602 if (IRQn >= 16U) {
1603 GICDistributor->ISPENDR[IRQn / 32U] = 1U << (IRQn % 32U);
1604 } else {
1605 // INTID 0-15 Software Generated Interrupt
1606 // Forward the interrupt to the CPU interface that requested it
1607 GICDistributor->SGIR = (IRQn | 0x02000000U);
1608 }
1609 }
1610
1611 /** \brief Clears the given interrupt from being pending using GIC's ICPENDR register.
1612 * \param [in] IRQn The interrupt to be enabled.
1613 */
GIC_ClearPendingIRQ(IRQn_Type IRQn)1614 __STATIC_INLINE void GIC_ClearPendingIRQ(IRQn_Type IRQn)
1615 {
1616 if (IRQn >= 16U) {
1617 GICDistributor->ICPENDR[IRQn / 32U] = 1U << (IRQn % 32U);
1618 } else {
1619 // INTID 0-15 Software Generated Interrupt
1620 GICDistributor->CPENDSGIR[IRQn / 4U] = 1U << ((IRQn % 4U) * 8U);
1621 }
1622 }
1623
1624 /** \brief Sets the interrupt configuration using GIC's ICFGR register.
1625 * \param [in] IRQn The interrupt to be configured.
1626 * \param [in] int_config Int_config field value. Bit 0: Reserved (0 - N-N model, 1 - 1-N model for some GIC before v1)
1627 * Bit 1: 0 - level sensitive, 1 - edge triggered
1628 */
GIC_SetConfiguration(IRQn_Type IRQn,uint32_t int_config)1629 __STATIC_INLINE void GIC_SetConfiguration(IRQn_Type IRQn, uint32_t int_config)
1630 {
1631 uint32_t icfgr = GICDistributor->ICFGR[IRQn / 16U]; /* read current register content */
1632 uint32_t shift = (IRQn % 16U) << 1U; /* calculate shift value */
1633
1634 int_config &= 3U; /* only 2 bits are valid */
1635 icfgr &= (~(3U << shift)); /* clear bits to change */
1636 icfgr |= ( int_config << shift); /* set new configuration */
1637
1638 GICDistributor->ICFGR[IRQn / 16U] = icfgr; /* write new register content */
1639 }
1640
1641 /** \brief Get the interrupt configuration from the GIC's ICFGR register.
1642 * \param [in] IRQn Interrupt to acquire the configuration for.
1643 * \return Int_config field value. Bit 0: Reserved (0 - N-N model, 1 - 1-N model for some GIC before v1)
1644 * Bit 1: 0 - level sensitive, 1 - edge triggered
1645 */
GIC_GetConfiguration(IRQn_Type IRQn)1646 __STATIC_INLINE uint32_t GIC_GetConfiguration(IRQn_Type IRQn)
1647 {
1648 return (GICDistributor->ICFGR[IRQn / 16U] >> ((IRQn % 16U) >> 1U));
1649 }
1650
1651 /** \brief Set the priority for the given interrupt in the GIC's IPRIORITYR register.
1652 * \param [in] IRQn The interrupt to be configured.
1653 * \param [in] priority The priority for the interrupt, lower values denote higher priorities.
1654 */
GIC_SetPriority(IRQn_Type IRQn,uint32_t priority)1655 __STATIC_INLINE void GIC_SetPriority(IRQn_Type IRQn, uint32_t priority)
1656 {
1657 uint32_t mask = GICDistributor->IPRIORITYR[IRQn / 4U] & ~(0xFFUL << ((IRQn % 4U) * 8U));
1658 GICDistributor->IPRIORITYR[IRQn / 4U] = mask | ((priority & 0xFFUL) << ((IRQn % 4U) * 8U));
1659 }
1660
1661 /** \brief Read the current interrupt priority from GIC's IPRIORITYR register.
1662 * \param [in] IRQn The interrupt to be queried.
1663 */
GIC_GetPriority(IRQn_Type IRQn)1664 __STATIC_INLINE uint32_t GIC_GetPriority(IRQn_Type IRQn)
1665 {
1666 return (GICDistributor->IPRIORITYR[IRQn / 4U] >> ((IRQn % 4U) * 8U)) & 0xFFUL;
1667 }
1668
1669 /** \brief Set the interrupt priority mask using CPU's PMR register.
1670 * \param [in] priority Priority mask to be set.
1671 */
GIC_SetInterfacePriorityMask(uint32_t priority)1672 __STATIC_INLINE void GIC_SetInterfacePriorityMask(uint32_t priority)
1673 {
1674 GICInterface->PMR = priority & 0xFFUL; //set priority mask
1675 }
1676
1677 /** \brief Read the current interrupt priority mask from CPU's PMR register.
1678 * \result GICInterface_Type::PMR
1679 */
GIC_GetInterfacePriorityMask(void)1680 __STATIC_INLINE uint32_t GIC_GetInterfacePriorityMask(void)
1681 {
1682 return GICInterface->PMR;
1683 }
1684
1685 /** \brief Configures the group priority and subpriority split point using CPU's BPR register.
1686 * \param [in] binary_point Amount of bits used as subpriority.
1687 */
GIC_SetBinaryPoint(uint32_t binary_point)1688 __STATIC_INLINE void GIC_SetBinaryPoint(uint32_t binary_point)
1689 {
1690 GICInterface->BPR = binary_point & 7U; //set binary point
1691 }
1692
1693 /** \brief Read the current group priority and subpriority split point from CPU's BPR register.
1694 * \return GICInterface_Type::BPR
1695 */
GIC_GetBinaryPoint(void)1696 __STATIC_INLINE uint32_t GIC_GetBinaryPoint(void)
1697 {
1698 return GICInterface->BPR;
1699 }
1700
1701 /** \brief Get the status for a given interrupt.
1702 * \param [in] IRQn The interrupt to get status for.
1703 * \return 0 - not pending/active, 1 - pending, 2 - active, 3 - pending and active
1704 */
GIC_GetIRQStatus(IRQn_Type IRQn)1705 __STATIC_INLINE uint32_t GIC_GetIRQStatus(IRQn_Type IRQn)
1706 {
1707 uint32_t pending, active;
1708
1709 active = ((GICDistributor->ISACTIVER[IRQn / 32U]) >> (IRQn % 32U)) & 1UL;
1710 pending = ((GICDistributor->ISPENDR[IRQn / 32U]) >> (IRQn % 32U)) & 1UL;
1711
1712 return ((active<<1U) | pending);
1713 }
1714
1715 /** \brief Generate a software interrupt using GIC's SGIR register.
1716 * \param [in] IRQn Software interrupt to be generated.
1717 * \param [in] target_list List of CPUs the software interrupt should be forwarded to.
1718 * \param [in] filter_list Filter to be applied to determine interrupt receivers.
1719 */
GIC_SendSGI(IRQn_Type IRQn,uint32_t target_list,uint32_t filter_list)1720 __STATIC_INLINE void GIC_SendSGI(IRQn_Type IRQn, uint32_t target_list, uint32_t filter_list)
1721 {
1722 GICDistributor->SGIR = ((filter_list & 3U) << 24U) | ((target_list & 0xFFUL) << 16U) | (IRQn & 0x0FUL);
1723 }
1724
1725 /** \brief Get the interrupt number of the highest interrupt pending from CPU's HPPIR register.
1726 * \return GICInterface_Type::HPPIR
1727 */
GIC_GetHighPendingIRQ(void)1728 __STATIC_INLINE uint32_t GIC_GetHighPendingIRQ(void)
1729 {
1730 return GICInterface->HPPIR;
1731 }
1732
1733 /** \brief Provides information about the implementer and revision of the CPU interface.
1734 * \return GICInterface_Type::IIDR
1735 */
GIC_GetInterfaceId(void)1736 __STATIC_INLINE uint32_t GIC_GetInterfaceId(void)
1737 {
1738 return GICInterface->IIDR;
1739 }
1740
1741 /** \brief Set the interrupt group from the GIC's IGROUPR register.
1742 * \param [in] IRQn The interrupt to be queried.
1743 * \param [in] group Interrupt group number: 0 - Group 0, 1 - Group 1
1744 */
GIC_SetGroup(IRQn_Type IRQn,uint32_t group)1745 __STATIC_INLINE void GIC_SetGroup(IRQn_Type IRQn, uint32_t group)
1746 {
1747 uint32_t igroupr = GICDistributor->IGROUPR[IRQn / 32U];
1748 uint32_t shift = (IRQn % 32U);
1749
1750 igroupr &= (~(1U << shift));
1751 igroupr |= ( (group & 1U) << shift);
1752
1753 GICDistributor->IGROUPR[IRQn / 32U] = igroupr;
1754 }
1755 #define GIC_SetSecurity GIC_SetGroup
1756
1757 /** \brief Get the interrupt group from the GIC's IGROUPR register.
1758 * \param [in] IRQn The interrupt to be queried.
1759 * \return 0 - Group 0, 1 - Group 1
1760 */
GIC_GetGroup(IRQn_Type IRQn)1761 __STATIC_INLINE uint32_t GIC_GetGroup(IRQn_Type IRQn)
1762 {
1763 return (GICDistributor->IGROUPR[IRQn / 32U] >> (IRQn % 32U)) & 1UL;
1764 }
1765 #define GIC_GetSecurity GIC_GetGroup
1766
1767 /** \brief Initialize the interrupt distributor.
1768 */
GIC_DistInit(void)1769 __STATIC_INLINE void GIC_DistInit(void)
1770 {
1771 uint32_t i;
1772 uint32_t num_irq = 0U;
1773 uint32_t priority_field;
1774
1775 //A reset sets all bits in the IGROUPRs corresponding to the SPIs to 0,
1776 //configuring all of the interrupts as Secure.
1777
1778 //Disable interrupt forwarding
1779 GIC_DisableDistributor();
1780 //Get the maximum number of interrupts that the GIC supports
1781 num_irq = 32U * ((GIC_DistributorInfo() & 0x1FU) + 1U);
1782
1783 /* Priority level is implementation defined.
1784 To determine the number of priority bits implemented write 0xFF to an IPRIORITYR
1785 priority field and read back the value stored.*/
1786 GIC_SetPriority((IRQn_Type)0U, 0xFFU);
1787 priority_field = GIC_GetPriority((IRQn_Type)0U);
1788
1789 for (i = 32U; i < num_irq; i++)
1790 {
1791 //Disable the SPI interrupt
1792 GIC_DisableIRQ((IRQn_Type)i);
1793 //Set level-sensitive (and N-N model)
1794 GIC_SetConfiguration((IRQn_Type)i, 0U);
1795 //Set priority
1796 GIC_SetPriority((IRQn_Type)i, priority_field/2U);
1797 //Set target list to CPU0
1798 GIC_SetTarget((IRQn_Type)i, 1U);
1799 }
1800 //Enable distributor
1801 GIC_EnableDistributor();
1802 }
1803
1804 /** \brief Initialize the CPU's interrupt interface
1805 */
GIC_CPUInterfaceInit(void)1806 __STATIC_INLINE void GIC_CPUInterfaceInit(void)
1807 {
1808 uint32_t i;
1809 uint32_t priority_field;
1810
1811 //A reset sets all bits in the IGROUPRs corresponding to the SPIs to 0,
1812 //configuring all of the interrupts as Secure.
1813
1814 //Disable interrupt forwarding
1815 GIC_DisableInterface();
1816
1817 /* Priority level is implementation defined.
1818 To determine the number of priority bits implemented write 0xFF to an IPRIORITYR
1819 priority field and read back the value stored.*/
1820 GIC_SetPriority((IRQn_Type)0U, 0xFFU);
1821 priority_field = GIC_GetPriority((IRQn_Type)0U);
1822
1823 //SGI and PPI
1824 for (i = 0U; i < 32U; i++)
1825 {
1826 if(i > 15U) {
1827 //Set level-sensitive (and N-N model) for PPI
1828 GIC_SetConfiguration((IRQn_Type)i, 0U);
1829 }
1830 //Disable SGI and PPI interrupts
1831 GIC_DisableIRQ((IRQn_Type)i);
1832 //Set priority
1833 GIC_SetPriority((IRQn_Type)i, priority_field/2U);
1834 }
1835 //Enable interface
1836 GIC_EnableInterface();
1837 //Set binary point to 0
1838 GIC_SetBinaryPoint(0U);
1839 //Set priority mask
1840 GIC_SetInterfacePriorityMask(0xFFU);
1841 }
1842
1843 /** \brief Initialize and enable the GIC
1844 */
GIC_Enable(void)1845 __STATIC_INLINE void GIC_Enable(void)
1846 {
1847 GIC_DistInit();
1848 GIC_CPUInterfaceInit(); //per CPU
1849 }
1850 #endif
1851
1852 /* ########################## Generic Timer functions ############################ */
1853 #if (defined(__TIM_PRESENT) && (__TIM_PRESENT == 1U)) || \
1854 defined(DOXYGEN)
1855
1856 /* PL1 Physical Timer */
1857 #if (__CORTEX_A == 7U) || defined(DOXYGEN)
1858
1859 /** \brief Physical Timer Control register */
1860 typedef union
1861 {
1862 struct
1863 {
1864 uint32_t ENABLE:1; /*!< \brief bit: 0 Enables the timer. */
1865 uint32_t IMASK:1; /*!< \brief bit: 1 Timer output signal mask bit. */
1866 uint32_t ISTATUS:1; /*!< \brief bit: 2 The status of the timer. */
1867 RESERVED(0:29, uint32_t)
1868 } b; /*!< \brief Structure used for bit access */
1869 uint32_t w; /*!< \brief Type used for word access */
1870 } CNTP_CTL_Type;
1871
1872 /** \brief Configures the frequency the timer shall run at.
1873 * \param [in] value The timer frequency in Hz.
1874 */
PL1_SetCounterFrequency(uint32_t value)1875 __STATIC_INLINE void PL1_SetCounterFrequency(uint32_t value)
1876 {
1877 __set_CNTFRQ(value);
1878 __ISB();
1879 }
1880
1881 /** \brief Sets the reset value of the timer.
1882 * \param [in] value The value the timer is loaded with.
1883 */
PL1_SetLoadValue(uint32_t value)1884 __STATIC_INLINE void PL1_SetLoadValue(uint32_t value)
1885 {
1886 __set_CNTP_TVAL(value);
1887 __ISB();
1888 }
1889
1890 /** \brief Get the current counter value.
1891 * \return Current counter value.
1892 */
PL1_GetCurrentValue(void)1893 __STATIC_INLINE uint32_t PL1_GetCurrentValue(void)
1894 {
1895 return(__get_CNTP_TVAL());
1896 }
1897
1898 /** \brief Get the current physical counter value.
1899 * \return Current physical counter value.
1900 */
PL1_GetCurrentPhysicalValue(void)1901 __STATIC_INLINE uint64_t PL1_GetCurrentPhysicalValue(void)
1902 {
1903 return(__get_CNTPCT());
1904 }
1905
1906 /** \brief Set the physical compare value.
1907 * \param [in] value New physical timer compare value.
1908 */
PL1_SetPhysicalCompareValue(uint64_t value)1909 __STATIC_INLINE void PL1_SetPhysicalCompareValue(uint64_t value)
1910 {
1911 __set_CNTP_CVAL(value);
1912 __ISB();
1913 }
1914
1915 /** \brief Get the physical compare value.
1916 * \return Physical compare value.
1917 */
PL1_GetPhysicalCompareValue(void)1918 __STATIC_INLINE uint64_t PL1_GetPhysicalCompareValue(void)
1919 {
1920 return(__get_CNTP_CVAL());
1921 }
1922
1923 /** \brief Configure the timer by setting the control value.
1924 * \param [in] value New timer control value.
1925 */
PL1_SetControl(uint32_t value)1926 __STATIC_INLINE void PL1_SetControl(uint32_t value)
1927 {
1928 __set_CNTP_CTL(value);
1929 __ISB();
1930 }
1931
1932 /** \brief Get the control value.
1933 * \return Control value.
1934 */
PL1_GetControl(void)1935 __STATIC_INLINE uint32_t PL1_GetControl(void)
1936 {
1937 return(__get_CNTP_CTL());
1938 }
1939
1940 /******************************* VIRTUAL TIMER *******************************/
1941 /** \brief Virtual Timer Control register */
1942
1943 /** \brief Sets the reset value of the virtual timer.
1944 * \param [in] value The value the virtual timer is loaded with.
1945 */
VL1_SetCurrentTimerValue(uint32_t value)1946 __STATIC_INLINE void VL1_SetCurrentTimerValue(uint32_t value)
1947 {
1948 __set_CNTV_TVAL(value);
1949 __ISB();
1950 }
1951
1952 /** \brief Get the current virtual timer value.
1953 * \return Current virtual timer value.
1954 */
VL1_GetCurrentTimerValue(void)1955 __STATIC_INLINE uint32_t VL1_GetCurrentTimerValue(void)
1956 {
1957 return(__get_CNTV_TVAL());
1958 }
1959
1960 /** \brief Get the current virtual count value.
1961 * \return Current virtual count value.
1962 */
VL1_GetCurrentCountValue(void)1963 __STATIC_INLINE uint64_t VL1_GetCurrentCountValue(void)
1964 {
1965 return(__get_CNTVCT());
1966 }
1967
1968 /** \brief Set the virtual timer compare value.
1969 * \param [in] value New virtual timer compare value.
1970 */
VL1_SetTimerCompareValue(uint64_t value)1971 __STATIC_INLINE void VL1_SetTimerCompareValue(uint64_t value)
1972 {
1973 __set_CNTV_CVAL(value);
1974 __ISB();
1975 }
1976
1977 /** \brief Get the virtual timer compare value.
1978 * \return Virtual timer compare value.
1979 */
VL1_GetTimerCompareValue(void)1980 __STATIC_INLINE uint64_t VL1_GetTimerCompareValue(void)
1981 {
1982 return(__get_CNTV_CVAL());
1983 }
1984
1985 /** \brief Configure the virtual timer by setting the control value.
1986 * \param [in] value New virtual timer control value.
1987 */
VL1_SetControl(uint32_t value)1988 __STATIC_INLINE void VL1_SetControl(uint32_t value)
1989 {
1990 __set_CNTV_CTL(value);
1991 __ISB();
1992 }
1993
1994 /** \brief Get the virtual timer control value.
1995 * \return Virtual timer control value.
1996 */
VL1_GetControl(void)1997 __STATIC_INLINE uint32_t VL1_GetControl(void)
1998 {
1999 return(__get_CNTV_CTL());
2000 }
2001 /***************************** VIRTUAL TIMER END *****************************/
2002 #endif
2003
2004 /* Private Timer */
2005 #if ((__CORTEX_A == 5U) || (__CORTEX_A == 9U)) || defined(DOXYGEN)
2006 /** \brief Set the load value to timers LOAD register.
2007 * \param [in] value The load value to be set.
2008 */
PTIM_SetLoadValue(uint32_t value)2009 __STATIC_INLINE void PTIM_SetLoadValue(uint32_t value)
2010 {
2011 PTIM->LOAD = value;
2012 }
2013
2014 /** \brief Get the load value from timers LOAD register.
2015 * \return Timer_Type::LOAD
2016 */
PTIM_GetLoadValue(void)2017 __STATIC_INLINE uint32_t PTIM_GetLoadValue(void)
2018 {
2019 return(PTIM->LOAD);
2020 }
2021
2022 /** \brief Set current counter value from its COUNTER register.
2023 */
PTIM_SetCurrentValue(uint32_t value)2024 __STATIC_INLINE void PTIM_SetCurrentValue(uint32_t value)
2025 {
2026 PTIM->COUNTER = value;
2027 }
2028
2029 /** \brief Get current counter value from timers COUNTER register.
2030 * \result Timer_Type::COUNTER
2031 */
PTIM_GetCurrentValue(void)2032 __STATIC_INLINE uint32_t PTIM_GetCurrentValue(void)
2033 {
2034 return(PTIM->COUNTER);
2035 }
2036
2037 /** \brief Configure the timer using its CONTROL register.
2038 * \param [in] value The new configuration value to be set.
2039 */
PTIM_SetControl(uint32_t value)2040 __STATIC_INLINE void PTIM_SetControl(uint32_t value)
2041 {
2042 PTIM->CONTROL = value;
2043 }
2044
2045 /** ref Timer_Type::CONTROL Get the current timer configuration from its CONTROL register.
2046 * \return Timer_Type::CONTROL
2047 */
PTIM_GetControl(void)2048 __STATIC_INLINE uint32_t PTIM_GetControl(void)
2049 {
2050 return(PTIM->CONTROL);
2051 }
2052
2053 /** ref Timer_Type::CONTROL Get the event flag in timers ISR register.
2054 * \return 0 - flag is not set, 1- flag is set
2055 */
PTIM_GetEventFlag(void)2056 __STATIC_INLINE uint32_t PTIM_GetEventFlag(void)
2057 {
2058 return (PTIM->ISR & 1UL);
2059 }
2060
2061 /** ref Timer_Type::CONTROL Clears the event flag in timers ISR register.
2062 */
PTIM_ClearEventFlag(void)2063 __STATIC_INLINE void PTIM_ClearEventFlag(void)
2064 {
2065 PTIM->ISR = 1;
2066 }
2067 #endif
2068 #endif
2069
2070 /* ########################## MMU functions ###################################### */
2071
2072 #define SECTION_DESCRIPTOR (0x2)
2073 #define SECTION_MASK (0xFFFFFFFC)
2074
2075 #define SECTION_TEXCB_MASK (0xFFFF8FF3)
2076 #define SECTION_B_SHIFT (2)
2077 #define SECTION_C_SHIFT (3)
2078 #define SECTION_TEX0_SHIFT (12)
2079 #define SECTION_TEX1_SHIFT (13)
2080 #define SECTION_TEX2_SHIFT (14)
2081
2082 #define SECTION_XN_MASK (0xFFFFFFEF)
2083 #define SECTION_XN_SHIFT (4)
2084
2085 #define SECTION_DOMAIN_MASK (0xFFFFFE1F)
2086 #define SECTION_DOMAIN_SHIFT (5)
2087
2088 #define SECTION_P_MASK (0xFFFFFDFF)
2089 #define SECTION_P_SHIFT (9)
2090
2091 #define SECTION_AP_MASK (0xFFFF73FF)
2092 #define SECTION_AP_SHIFT (10)
2093 #define SECTION_AP2_SHIFT (15)
2094
2095 #define SECTION_S_MASK (0xFFFEFFFF)
2096 #define SECTION_S_SHIFT (16)
2097
2098 #define SECTION_NG_MASK (0xFFFDFFFF)
2099 #define SECTION_NG_SHIFT (17)
2100
2101 #define SECTION_NS_MASK (0xFFF7FFFF)
2102 #define SECTION_NS_SHIFT (19)
2103
2104 #define PAGE_L1_DESCRIPTOR (0x1)
2105 #define PAGE_L1_MASK (0xFFFFFFFC)
2106
2107 #define PAGE_L2_4K_DESC (0x2)
2108 #define PAGE_L2_4K_MASK (0xFFFFFFFD)
2109
2110 #define PAGE_L2_64K_DESC (0x1)
2111 #define PAGE_L2_64K_MASK (0xFFFFFFFC)
2112
2113 #define PAGE_4K_TEXCB_MASK (0xFFFFFE33)
2114 #define PAGE_4K_B_SHIFT (2)
2115 #define PAGE_4K_C_SHIFT (3)
2116 #define PAGE_4K_TEX0_SHIFT (6)
2117 #define PAGE_4K_TEX1_SHIFT (7)
2118 #define PAGE_4K_TEX2_SHIFT (8)
2119
2120 #define PAGE_64K_TEXCB_MASK (0xFFFF8FF3)
2121 #define PAGE_64K_B_SHIFT (2)
2122 #define PAGE_64K_C_SHIFT (3)
2123 #define PAGE_64K_TEX0_SHIFT (12)
2124 #define PAGE_64K_TEX1_SHIFT (13)
2125 #define PAGE_64K_TEX2_SHIFT (14)
2126
2127 #define PAGE_TEXCB_MASK (0xFFFF8FF3)
2128 #define PAGE_B_SHIFT (2)
2129 #define PAGE_C_SHIFT (3)
2130 #define PAGE_TEX_SHIFT (12)
2131
2132 #define PAGE_XN_4K_MASK (0xFFFFFFFE)
2133 #define PAGE_XN_4K_SHIFT (0)
2134 #define PAGE_XN_64K_MASK (0xFFFF7FFF)
2135 #define PAGE_XN_64K_SHIFT (15)
2136
2137 #define PAGE_DOMAIN_MASK (0xFFFFFE1F)
2138 #define PAGE_DOMAIN_SHIFT (5)
2139
2140 #define PAGE_P_MASK (0xFFFFFDFF)
2141 #define PAGE_P_SHIFT (9)
2142
2143 #define PAGE_AP_MASK (0xFFFFFDCF)
2144 #define PAGE_AP_SHIFT (4)
2145 #define PAGE_AP2_SHIFT (9)
2146
2147 #define PAGE_S_MASK (0xFFFFFBFF)
2148 #define PAGE_S_SHIFT (10)
2149
2150 #define PAGE_NG_MASK (0xFFFFF7FF)
2151 #define PAGE_NG_SHIFT (11)
2152
2153 #define PAGE_NS_MASK (0xFFFFFFF7)
2154 #define PAGE_NS_SHIFT (3)
2155
2156 #define OFFSET_1M (0x00100000)
2157 #define OFFSET_64K (0x00010000)
2158 #define OFFSET_4K (0x00001000)
2159
2160 #define DESCRIPTOR_FAULT (0x00000000)
2161
2162 /* Attributes enumerations */
2163
2164 /* Region size attributes */
2165 typedef enum
2166 {
2167 SECTION,
2168 PAGE_4k,
2169 PAGE_64k,
2170 } mmu_region_size_Type;
2171
2172 /* Region type attributes */
2173 typedef enum
2174 {
2175 NORMAL,
2176 DEVICE,
2177 SHARED_DEVICE,
2178 NON_SHARED_DEVICE,
2179 STRONGLY_ORDERED
2180 } mmu_memory_Type;
2181
2182 /* Region cacheability attributes */
2183 typedef enum
2184 {
2185 NON_CACHEABLE,
2186 WB_WA,
2187 WT,
2188 WB_NO_WA,
2189 } mmu_cacheability_Type;
2190
2191 /* Region parity check attributes */
2192 typedef enum
2193 {
2194 ECC_DISABLED,
2195 ECC_ENABLED,
2196 } mmu_ecc_check_Type;
2197
2198 /* Region execution attributes */
2199 typedef enum
2200 {
2201 EXECUTE,
2202 NON_EXECUTE,
2203 } mmu_execute_Type;
2204
2205 /* Region global attributes */
2206 typedef enum
2207 {
2208 GLOBAL,
2209 NON_GLOBAL,
2210 } mmu_global_Type;
2211
2212 /* Region shareability attributes */
2213 typedef enum
2214 {
2215 NON_SHARED,
2216 SHARED,
2217 } mmu_shared_Type;
2218
2219 /* Region security attributes */
2220 typedef enum
2221 {
2222 SECURE,
2223 NON_SECURE,
2224 } mmu_secure_Type;
2225
2226 /* Region access attributes */
2227 typedef enum
2228 {
2229 NO_ACCESS,
2230 RW,
2231 READ,
2232 } mmu_access_Type;
2233
2234 /* Memory Region definition */
2235 typedef struct RegionStruct {
2236 mmu_region_size_Type rg_t;
2237 mmu_memory_Type mem_t;
2238 uint8_t domain;
2239 mmu_cacheability_Type inner_norm_t;
2240 mmu_cacheability_Type outer_norm_t;
2241 mmu_ecc_check_Type e_t;
2242 mmu_execute_Type xn_t;
2243 mmu_global_Type g_t;
2244 mmu_secure_Type sec_t;
2245 mmu_access_Type priv_t;
2246 mmu_access_Type user_t;
2247 mmu_shared_Type sh_t;
2248
2249 } mmu_region_attributes_Type;
2250
2251 //Following macros define the descriptors and attributes
2252 //Sect_Normal. Outer & inner wb/wa, non-shareable, executable, rw, domain 0
2253 #define section_normal(descriptor_l1, region) region.rg_t = SECTION; \
2254 region.domain = 0x0; \
2255 region.e_t = ECC_DISABLED; \
2256 region.g_t = GLOBAL; \
2257 region.inner_norm_t = WB_WA; \
2258 region.outer_norm_t = WB_WA; \
2259 region.mem_t = NORMAL; \
2260 region.sec_t = SECURE; \
2261 region.xn_t = EXECUTE; \
2262 region.priv_t = RW; \
2263 region.user_t = RW; \
2264 region.sh_t = NON_SHARED; \
2265 MMU_GetSectionDescriptor(&descriptor_l1, region);
2266
2267 //Sect_Normal_NC. Outer & inner non-cacheable, non-shareable, executable, rw, domain 0
2268 #define section_normal_nc(descriptor_l1, region) region.rg_t = SECTION; \
2269 region.domain = 0x0; \
2270 region.e_t = ECC_DISABLED; \
2271 region.g_t = GLOBAL; \
2272 region.inner_norm_t = NON_CACHEABLE; \
2273 region.outer_norm_t = NON_CACHEABLE; \
2274 region.mem_t = NORMAL; \
2275 region.sec_t = SECURE; \
2276 region.xn_t = EXECUTE; \
2277 region.priv_t = RW; \
2278 region.user_t = RW; \
2279 region.sh_t = NON_SHARED; \
2280 MMU_GetSectionDescriptor(&descriptor_l1, region);
2281
2282 //Sect_Normal_Cod. Outer & inner wb/wa, non-shareable, executable, ro, domain 0
2283 #define section_normal_cod(descriptor_l1, region) region.rg_t = SECTION; \
2284 region.domain = 0x0; \
2285 region.e_t = ECC_DISABLED; \
2286 region.g_t = GLOBAL; \
2287 region.inner_norm_t = WB_WA; \
2288 region.outer_norm_t = WB_WA; \
2289 region.mem_t = NORMAL; \
2290 region.sec_t = SECURE; \
2291 region.xn_t = EXECUTE; \
2292 region.priv_t = READ; \
2293 region.user_t = READ; \
2294 region.sh_t = NON_SHARED; \
2295 MMU_GetSectionDescriptor(&descriptor_l1, region);
2296
2297 //Sect_Normal_RO. Sect_Normal_Cod, but not executable
2298 #define section_normal_ro(descriptor_l1, region) region.rg_t = SECTION; \
2299 region.domain = 0x0; \
2300 region.e_t = ECC_DISABLED; \
2301 region.g_t = GLOBAL; \
2302 region.inner_norm_t = WB_WA; \
2303 region.outer_norm_t = WB_WA; \
2304 region.mem_t = NORMAL; \
2305 region.sec_t = SECURE; \
2306 region.xn_t = NON_EXECUTE; \
2307 region.priv_t = READ; \
2308 region.user_t = READ; \
2309 region.sh_t = NON_SHARED; \
2310 MMU_GetSectionDescriptor(&descriptor_l1, region);
2311
2312 //Sect_Normal_RW. Sect_Normal_Cod, but writeable and not executable
2313 #define section_normal_rw(descriptor_l1, region) region.rg_t = SECTION; \
2314 region.domain = 0x0; \
2315 region.e_t = ECC_DISABLED; \
2316 region.g_t = GLOBAL; \
2317 region.inner_norm_t = WB_WA; \
2318 region.outer_norm_t = WB_WA; \
2319 region.mem_t = NORMAL; \
2320 region.sec_t = SECURE; \
2321 region.xn_t = NON_EXECUTE; \
2322 region.priv_t = RW; \
2323 region.user_t = RW; \
2324 region.sh_t = NON_SHARED; \
2325 MMU_GetSectionDescriptor(&descriptor_l1, region);
2326 //Sect_SO. Strongly-ordered (therefore shareable), not executable, rw, domain 0, base addr 0
2327 #define section_so(descriptor_l1, region) region.rg_t = SECTION; \
2328 region.domain = 0x0; \
2329 region.e_t = ECC_DISABLED; \
2330 region.g_t = GLOBAL; \
2331 region.inner_norm_t = NON_CACHEABLE; \
2332 region.outer_norm_t = NON_CACHEABLE; \
2333 region.mem_t = STRONGLY_ORDERED; \
2334 region.sec_t = SECURE; \
2335 region.xn_t = NON_EXECUTE; \
2336 region.priv_t = RW; \
2337 region.user_t = RW; \
2338 region.sh_t = NON_SHARED; \
2339 MMU_GetSectionDescriptor(&descriptor_l1, region);
2340
2341 //Sect_Device_RO. Device, non-shareable, non-executable, ro, domain 0, base addr 0
2342 #define section_device_ro(descriptor_l1, region) region.rg_t = SECTION; \
2343 region.domain = 0x0; \
2344 region.e_t = ECC_DISABLED; \
2345 region.g_t = GLOBAL; \
2346 region.inner_norm_t = NON_CACHEABLE; \
2347 region.outer_norm_t = NON_CACHEABLE; \
2348 region.mem_t = STRONGLY_ORDERED; \
2349 region.sec_t = SECURE; \
2350 region.xn_t = NON_EXECUTE; \
2351 region.priv_t = READ; \
2352 region.user_t = READ; \
2353 region.sh_t = NON_SHARED; \
2354 MMU_GetSectionDescriptor(&descriptor_l1, region);
2355
2356 //Sect_Device_RW. Sect_Device_RO, but writeable
2357 #define section_device_rw(descriptor_l1, region) region.rg_t = SECTION; \
2358 region.domain = 0x0; \
2359 region.e_t = ECC_DISABLED; \
2360 region.g_t = GLOBAL; \
2361 region.inner_norm_t = NON_CACHEABLE; \
2362 region.outer_norm_t = NON_CACHEABLE; \
2363 region.mem_t = STRONGLY_ORDERED; \
2364 region.sec_t = SECURE; \
2365 region.xn_t = NON_EXECUTE; \
2366 region.priv_t = RW; \
2367 region.user_t = RW; \
2368 region.sh_t = NON_SHARED; \
2369 MMU_GetSectionDescriptor(&descriptor_l1, region);
2370 //Page_4k_Device_RW. Shared device, not executable, rw, domain 0
2371 #define page4k_device_rw(descriptor_l1, descriptor_l2, region) region.rg_t = PAGE_4k; \
2372 region.domain = 0x0; \
2373 region.e_t = ECC_DISABLED; \
2374 region.g_t = GLOBAL; \
2375 region.inner_norm_t = NON_CACHEABLE; \
2376 region.outer_norm_t = NON_CACHEABLE; \
2377 region.mem_t = SHARED_DEVICE; \
2378 region.sec_t = SECURE; \
2379 region.xn_t = NON_EXECUTE; \
2380 region.priv_t = RW; \
2381 region.user_t = RW; \
2382 region.sh_t = NON_SHARED; \
2383 MMU_GetPageDescriptor(&descriptor_l1, &descriptor_l2, region);
2384
2385 //Page_64k_Device_RW. Shared device, not executable, rw, domain 0
2386 #define page64k_device_rw(descriptor_l1, descriptor_l2, region) region.rg_t = PAGE_64k; \
2387 region.domain = 0x0; \
2388 region.e_t = ECC_DISABLED; \
2389 region.g_t = GLOBAL; \
2390 region.inner_norm_t = NON_CACHEABLE; \
2391 region.outer_norm_t = NON_CACHEABLE; \
2392 region.mem_t = SHARED_DEVICE; \
2393 region.sec_t = SECURE; \
2394 region.xn_t = NON_EXECUTE; \
2395 region.priv_t = RW; \
2396 region.user_t = RW; \
2397 region.sh_t = NON_SHARED; \
2398 MMU_GetPageDescriptor(&descriptor_l1, &descriptor_l2, region);
2399
2400 /** \brief Set section execution-never attribute
2401
2402 \param [out] descriptor_l1 L1 descriptor.
2403 \param [in] xn Section execution-never attribute : EXECUTE , NON_EXECUTE.
2404
2405 \return 0
2406 */
MMU_XNSection(uint32_t * descriptor_l1,mmu_execute_Type xn)2407 __STATIC_INLINE int MMU_XNSection(uint32_t *descriptor_l1, mmu_execute_Type xn)
2408 {
2409 *descriptor_l1 &= SECTION_XN_MASK;
2410 *descriptor_l1 |= ((xn & 0x1) << SECTION_XN_SHIFT);
2411 return 0;
2412 }
2413
2414 /** \brief Set section domain
2415
2416 \param [out] descriptor_l1 L1 descriptor.
2417 \param [in] domain Section domain
2418
2419 \return 0
2420 */
MMU_DomainSection(uint32_t * descriptor_l1,uint8_t domain)2421 __STATIC_INLINE int MMU_DomainSection(uint32_t *descriptor_l1, uint8_t domain)
2422 {
2423 *descriptor_l1 &= SECTION_DOMAIN_MASK;
2424 *descriptor_l1 |= ((domain & 0xF) << SECTION_DOMAIN_SHIFT);
2425 return 0;
2426 }
2427
2428 /** \brief Set section parity check
2429
2430 \param [out] descriptor_l1 L1 descriptor.
2431 \param [in] p_bit Parity check: ECC_DISABLED, ECC_ENABLED
2432
2433 \return 0
2434 */
MMU_PSection(uint32_t * descriptor_l1,mmu_ecc_check_Type p_bit)2435 __STATIC_INLINE int MMU_PSection(uint32_t *descriptor_l1, mmu_ecc_check_Type p_bit)
2436 {
2437 *descriptor_l1 &= SECTION_P_MASK;
2438 *descriptor_l1 |= ((p_bit & 0x1) << SECTION_P_SHIFT);
2439 return 0;
2440 }
2441
2442 /** \brief Set section access privileges
2443
2444 \param [out] descriptor_l1 L1 descriptor.
2445 \param [in] user User Level Access: NO_ACCESS, RW, READ
2446 \param [in] priv Privilege Level Access: NO_ACCESS, RW, READ
2447 \param [in] afe Access flag enable
2448
2449 \return 0
2450 */
MMU_APSection(uint32_t * descriptor_l1,mmu_access_Type user,mmu_access_Type priv,uint32_t afe)2451 __STATIC_INLINE int MMU_APSection(uint32_t *descriptor_l1, mmu_access_Type user, mmu_access_Type priv, uint32_t afe)
2452 {
2453 uint32_t ap = 0;
2454
2455 if (afe == 0) { //full access
2456 if ((priv == NO_ACCESS) && (user == NO_ACCESS)) { ap = 0x0; }
2457 else if ((priv == RW) && (user == NO_ACCESS)) { ap = 0x1; }
2458 else if ((priv == RW) && (user == READ)) { ap = 0x2; }
2459 else if ((priv == RW) && (user == RW)) { ap = 0x3; }
2460 else if ((priv == READ) && (user == NO_ACCESS)) { ap = 0x5; }
2461 else if ((priv == READ) && (user == READ)) { ap = 0x7; }
2462 }
2463
2464 else { //Simplified access
2465 if ((priv == RW) && (user == NO_ACCESS)) { ap = 0x1; }
2466 else if ((priv == RW) && (user == RW)) { ap = 0x3; }
2467 else if ((priv == READ) && (user == NO_ACCESS)) { ap = 0x5; }
2468 else if ((priv == READ) && (user == READ)) { ap = 0x7; }
2469 }
2470
2471 *descriptor_l1 &= SECTION_AP_MASK;
2472 *descriptor_l1 |= (ap & 0x3) << SECTION_AP_SHIFT;
2473 *descriptor_l1 |= ((ap & 0x4)>>2) << SECTION_AP2_SHIFT;
2474
2475 return 0;
2476 }
2477
2478 /** \brief Set section shareability
2479
2480 \param [out] descriptor_l1 L1 descriptor.
2481 \param [in] s_bit Section shareability: NON_SHARED, SHARED
2482
2483 \return 0
2484 */
MMU_SharedSection(uint32_t * descriptor_l1,mmu_shared_Type s_bit)2485 __STATIC_INLINE int MMU_SharedSection(uint32_t *descriptor_l1, mmu_shared_Type s_bit)
2486 {
2487 *descriptor_l1 &= SECTION_S_MASK;
2488 *descriptor_l1 |= ((s_bit & 0x1) << SECTION_S_SHIFT);
2489 return 0;
2490 }
2491
2492 /** \brief Set section Global attribute
2493
2494 \param [out] descriptor_l1 L1 descriptor.
2495 \param [in] g_bit Section attribute: GLOBAL, NON_GLOBAL
2496
2497 \return 0
2498 */
MMU_GlobalSection(uint32_t * descriptor_l1,mmu_global_Type g_bit)2499 __STATIC_INLINE int MMU_GlobalSection(uint32_t *descriptor_l1, mmu_global_Type g_bit)
2500 {
2501 *descriptor_l1 &= SECTION_NG_MASK;
2502 *descriptor_l1 |= ((g_bit & 0x1) << SECTION_NG_SHIFT);
2503 return 0;
2504 }
2505
2506 /** \brief Set section Security attribute
2507
2508 \param [out] descriptor_l1 L1 descriptor.
2509 \param [in] s_bit Section Security attribute: SECURE, NON_SECURE
2510
2511 \return 0
2512 */
MMU_SecureSection(uint32_t * descriptor_l1,mmu_secure_Type s_bit)2513 __STATIC_INLINE int MMU_SecureSection(uint32_t *descriptor_l1, mmu_secure_Type s_bit)
2514 {
2515 *descriptor_l1 &= SECTION_NS_MASK;
2516 *descriptor_l1 |= ((s_bit & 0x1) << SECTION_NS_SHIFT);
2517 return 0;
2518 }
2519
2520 /* Page 4k or 64k */
2521 /** \brief Set 4k/64k page execution-never attribute
2522
2523 \param [out] descriptor_l2 L2 descriptor.
2524 \param [in] xn Page execution-never attribute : EXECUTE , NON_EXECUTE.
2525 \param [in] page Page size: PAGE_4k, PAGE_64k,
2526
2527 \return 0
2528 */
MMU_XNPage(uint32_t * descriptor_l2,mmu_execute_Type xn,mmu_region_size_Type page)2529 __STATIC_INLINE int MMU_XNPage(uint32_t *descriptor_l2, mmu_execute_Type xn, mmu_region_size_Type page)
2530 {
2531 if (page == PAGE_4k)
2532 {
2533 *descriptor_l2 &= PAGE_XN_4K_MASK;
2534 *descriptor_l2 |= ((xn & 0x1) << PAGE_XN_4K_SHIFT);
2535 }
2536 else
2537 {
2538 *descriptor_l2 &= PAGE_XN_64K_MASK;
2539 *descriptor_l2 |= ((xn & 0x1) << PAGE_XN_64K_SHIFT);
2540 }
2541 return 0;
2542 }
2543
2544 /** \brief Set 4k/64k page domain
2545
2546 \param [out] descriptor_l1 L1 descriptor.
2547 \param [in] domain Page domain
2548
2549 \return 0
2550 */
MMU_DomainPage(uint32_t * descriptor_l1,uint8_t domain)2551 __STATIC_INLINE int MMU_DomainPage(uint32_t *descriptor_l1, uint8_t domain)
2552 {
2553 *descriptor_l1 &= PAGE_DOMAIN_MASK;
2554 *descriptor_l1 |= ((domain & 0xf) << PAGE_DOMAIN_SHIFT);
2555 return 0;
2556 }
2557
2558 /** \brief Set 4k/64k page parity check
2559
2560 \param [out] descriptor_l1 L1 descriptor.
2561 \param [in] p_bit Parity check: ECC_DISABLED, ECC_ENABLED
2562
2563 \return 0
2564 */
MMU_PPage(uint32_t * descriptor_l1,mmu_ecc_check_Type p_bit)2565 __STATIC_INLINE int MMU_PPage(uint32_t *descriptor_l1, mmu_ecc_check_Type p_bit)
2566 {
2567 *descriptor_l1 &= SECTION_P_MASK;
2568 *descriptor_l1 |= ((p_bit & 0x1) << SECTION_P_SHIFT);
2569 return 0;
2570 }
2571
2572 /** \brief Set 4k/64k page access privileges
2573
2574 \param [out] descriptor_l2 L2 descriptor.
2575 \param [in] user User Level Access: NO_ACCESS, RW, READ
2576 \param [in] priv Privilege Level Access: NO_ACCESS, RW, READ
2577 \param [in] afe Access flag enable
2578
2579 \return 0
2580 */
MMU_APPage(uint32_t * descriptor_l2,mmu_access_Type user,mmu_access_Type priv,uint32_t afe)2581 __STATIC_INLINE int MMU_APPage(uint32_t *descriptor_l2, mmu_access_Type user, mmu_access_Type priv, uint32_t afe)
2582 {
2583 uint32_t ap = 0;
2584
2585 if (afe == 0) { //full access
2586 if ((priv == NO_ACCESS) && (user == NO_ACCESS)) { ap = 0x0; }
2587 else if ((priv == RW) && (user == NO_ACCESS)) { ap = 0x1; }
2588 else if ((priv == RW) && (user == READ)) { ap = 0x2; }
2589 else if ((priv == RW) && (user == RW)) { ap = 0x3; }
2590 else if ((priv == READ) && (user == NO_ACCESS)) { ap = 0x5; }
2591 else if ((priv == READ) && (user == READ)) { ap = 0x6; }
2592 }
2593
2594 else { //Simplified access
2595 if ((priv == RW) && (user == NO_ACCESS)) { ap = 0x1; }
2596 else if ((priv == RW) && (user == RW)) { ap = 0x3; }
2597 else if ((priv == READ) && (user == NO_ACCESS)) { ap = 0x5; }
2598 else if ((priv == READ) && (user == READ)) { ap = 0x7; }
2599 }
2600
2601 *descriptor_l2 &= PAGE_AP_MASK;
2602 *descriptor_l2 |= (ap & 0x3) << PAGE_AP_SHIFT;
2603 *descriptor_l2 |= ((ap & 0x4)>>2) << PAGE_AP2_SHIFT;
2604
2605 return 0;
2606 }
2607
2608 /** \brief Set 4k/64k page shareability
2609
2610 \param [out] descriptor_l2 L2 descriptor.
2611 \param [in] s_bit 4k/64k page shareability: NON_SHARED, SHARED
2612
2613 \return 0
2614 */
MMU_SharedPage(uint32_t * descriptor_l2,mmu_shared_Type s_bit)2615 __STATIC_INLINE int MMU_SharedPage(uint32_t *descriptor_l2, mmu_shared_Type s_bit)
2616 {
2617 *descriptor_l2 &= PAGE_S_MASK;
2618 *descriptor_l2 |= ((s_bit & 0x1) << PAGE_S_SHIFT);
2619 return 0;
2620 }
2621
2622 /** \brief Set 4k/64k page Global attribute
2623
2624 \param [out] descriptor_l2 L2 descriptor.
2625 \param [in] g_bit 4k/64k page attribute: GLOBAL, NON_GLOBAL
2626
2627 \return 0
2628 */
MMU_GlobalPage(uint32_t * descriptor_l2,mmu_global_Type g_bit)2629 __STATIC_INLINE int MMU_GlobalPage(uint32_t *descriptor_l2, mmu_global_Type g_bit)
2630 {
2631 *descriptor_l2 &= PAGE_NG_MASK;
2632 *descriptor_l2 |= ((g_bit & 0x1) << PAGE_NG_SHIFT);
2633 return 0;
2634 }
2635
2636 /** \brief Set 4k/64k page Security attribute
2637
2638 \param [out] descriptor_l1 L1 descriptor.
2639 \param [in] s_bit 4k/64k page Security attribute: SECURE, NON_SECURE
2640
2641 \return 0
2642 */
MMU_SecurePage(uint32_t * descriptor_l1,mmu_secure_Type s_bit)2643 __STATIC_INLINE int MMU_SecurePage(uint32_t *descriptor_l1, mmu_secure_Type s_bit)
2644 {
2645 *descriptor_l1 &= PAGE_NS_MASK;
2646 *descriptor_l1 |= ((s_bit & 0x1) << PAGE_NS_SHIFT);
2647 return 0;
2648 }
2649
2650 /** \brief Set Section memory attributes
2651
2652 \param [out] descriptor_l1 L1 descriptor.
2653 \param [in] mem Section memory type: NORMAL, DEVICE, SHARED_DEVICE, NON_SHARED_DEVICE, STRONGLY_ORDERED
2654 \param [in] outer Outer cacheability: NON_CACHEABLE, WB_WA, WT, WB_NO_WA,
2655 \param [in] inner Inner cacheability: NON_CACHEABLE, WB_WA, WT, WB_NO_WA,
2656
2657 \return 0
2658 */
MMU_MemorySection(uint32_t * descriptor_l1,mmu_memory_Type mem,mmu_cacheability_Type outer,mmu_cacheability_Type inner)2659 __STATIC_INLINE int MMU_MemorySection(uint32_t *descriptor_l1, mmu_memory_Type mem, mmu_cacheability_Type outer, mmu_cacheability_Type inner)
2660 {
2661 *descriptor_l1 &= SECTION_TEXCB_MASK;
2662
2663 if (STRONGLY_ORDERED == mem)
2664 {
2665 return 0;
2666 }
2667 else if (SHARED_DEVICE == mem)
2668 {
2669 *descriptor_l1 |= (1 << SECTION_B_SHIFT);
2670 }
2671 else if (NON_SHARED_DEVICE == mem)
2672 {
2673 *descriptor_l1 |= (1 << SECTION_TEX1_SHIFT);
2674 }
2675 else if (NORMAL == mem)
2676 {
2677 *descriptor_l1 |= 1 << SECTION_TEX2_SHIFT;
2678 switch(inner)
2679 {
2680 case NON_CACHEABLE:
2681 break;
2682 case WB_WA:
2683 *descriptor_l1 |= (1 << SECTION_B_SHIFT);
2684 break;
2685 case WT:
2686 *descriptor_l1 |= 1 << SECTION_C_SHIFT;
2687 break;
2688 case WB_NO_WA:
2689 *descriptor_l1 |= (1 << SECTION_B_SHIFT) | (1 << SECTION_C_SHIFT);
2690 break;
2691 }
2692 switch(outer)
2693 {
2694 case NON_CACHEABLE:
2695 break;
2696 case WB_WA:
2697 *descriptor_l1 |= (1 << SECTION_TEX0_SHIFT);
2698 break;
2699 case WT:
2700 *descriptor_l1 |= 1 << SECTION_TEX1_SHIFT;
2701 break;
2702 case WB_NO_WA:
2703 *descriptor_l1 |= (1 << SECTION_TEX0_SHIFT) | (1 << SECTION_TEX0_SHIFT);
2704 break;
2705 }
2706 }
2707 return 0;
2708 }
2709
2710 /** \brief Set 4k/64k page memory attributes
2711
2712 \param [out] descriptor_l2 L2 descriptor.
2713 \param [in] mem 4k/64k page memory type: NORMAL, DEVICE, SHARED_DEVICE, NON_SHARED_DEVICE, STRONGLY_ORDERED
2714 \param [in] outer Outer cacheability: NON_CACHEABLE, WB_WA, WT, WB_NO_WA,
2715 \param [in] inner Inner cacheability: NON_CACHEABLE, WB_WA, WT, WB_NO_WA,
2716 \param [in] page Page size
2717
2718 \return 0
2719 */
MMU_MemoryPage(uint32_t * descriptor_l2,mmu_memory_Type mem,mmu_cacheability_Type outer,mmu_cacheability_Type inner,mmu_region_size_Type page)2720 __STATIC_INLINE int MMU_MemoryPage(uint32_t *descriptor_l2, mmu_memory_Type mem, mmu_cacheability_Type outer, mmu_cacheability_Type inner, mmu_region_size_Type page)
2721 {
2722 *descriptor_l2 &= PAGE_4K_TEXCB_MASK;
2723
2724 if (page == PAGE_64k)
2725 {
2726 //same as section
2727 MMU_MemorySection(descriptor_l2, mem, outer, inner);
2728 }
2729 else
2730 {
2731 if (STRONGLY_ORDERED == mem)
2732 {
2733 return 0;
2734 }
2735 else if (SHARED_DEVICE == mem)
2736 {
2737 *descriptor_l2 |= (1 << PAGE_4K_B_SHIFT);
2738 }
2739 else if (NON_SHARED_DEVICE == mem)
2740 {
2741 *descriptor_l2 |= (1 << PAGE_4K_TEX1_SHIFT);
2742 }
2743 else if (NORMAL == mem)
2744 {
2745 *descriptor_l2 |= 1 << PAGE_4K_TEX2_SHIFT;
2746 switch(inner)
2747 {
2748 case NON_CACHEABLE:
2749 break;
2750 case WB_WA:
2751 *descriptor_l2 |= (1 << PAGE_4K_B_SHIFT);
2752 break;
2753 case WT:
2754 *descriptor_l2 |= 1 << PAGE_4K_C_SHIFT;
2755 break;
2756 case WB_NO_WA:
2757 *descriptor_l2 |= (1 << PAGE_4K_B_SHIFT) | (1 << PAGE_4K_C_SHIFT);
2758 break;
2759 }
2760 switch(outer)
2761 {
2762 case NON_CACHEABLE:
2763 break;
2764 case WB_WA:
2765 *descriptor_l2 |= (1 << PAGE_4K_TEX0_SHIFT);
2766 break;
2767 case WT:
2768 *descriptor_l2 |= 1 << PAGE_4K_TEX1_SHIFT;
2769 break;
2770 case WB_NO_WA:
2771 *descriptor_l2 |= (1 << PAGE_4K_TEX0_SHIFT) | (1 << PAGE_4K_TEX0_SHIFT);
2772 break;
2773 }
2774 }
2775 }
2776
2777 return 0;
2778 }
2779
2780 /** \brief Create a L1 section descriptor
2781
2782 \param [out] descriptor L1 descriptor
2783 \param [in] reg Section attributes
2784
2785 \return 0
2786 */
MMU_GetSectionDescriptor(uint32_t * descriptor,mmu_region_attributes_Type reg)2787 __STATIC_INLINE int MMU_GetSectionDescriptor(uint32_t *descriptor, mmu_region_attributes_Type reg)
2788 {
2789 *descriptor = 0;
2790
2791 MMU_MemorySection(descriptor, reg.mem_t, reg.outer_norm_t, reg.inner_norm_t);
2792 MMU_XNSection(descriptor,reg.xn_t);
2793 MMU_DomainSection(descriptor, reg.domain);
2794 MMU_PSection(descriptor, reg.e_t);
2795 MMU_APSection(descriptor, reg.user_t, reg.priv_t, 1);
2796 MMU_SharedSection(descriptor,reg.sh_t);
2797 MMU_GlobalSection(descriptor,reg.g_t);
2798 MMU_SecureSection(descriptor,reg.sec_t);
2799 *descriptor &= SECTION_MASK;
2800 *descriptor |= SECTION_DESCRIPTOR;
2801
2802 return 0;
2803 }
2804
2805
2806 /** \brief Create a L1 and L2 4k/64k page descriptor
2807
2808 \param [out] descriptor L1 descriptor
2809 \param [out] descriptor2 L2 descriptor
2810 \param [in] reg 4k/64k page attributes
2811
2812 \return 0
2813 */
MMU_GetPageDescriptor(uint32_t * descriptor,uint32_t * descriptor2,mmu_region_attributes_Type reg)2814 __STATIC_INLINE int MMU_GetPageDescriptor(uint32_t *descriptor, uint32_t *descriptor2, mmu_region_attributes_Type reg)
2815 {
2816 *descriptor = 0;
2817 *descriptor2 = 0;
2818
2819 switch (reg.rg_t)
2820 {
2821 case PAGE_4k:
2822 MMU_MemoryPage(descriptor2, reg.mem_t, reg.outer_norm_t, reg.inner_norm_t, PAGE_4k);
2823 MMU_XNPage(descriptor2, reg.xn_t, PAGE_4k);
2824 MMU_DomainPage(descriptor, reg.domain);
2825 MMU_PPage(descriptor, reg.e_t);
2826 MMU_APPage(descriptor2, reg.user_t, reg.priv_t, 1);
2827 MMU_SharedPage(descriptor2,reg.sh_t);
2828 MMU_GlobalPage(descriptor2,reg.g_t);
2829 MMU_SecurePage(descriptor,reg.sec_t);
2830 *descriptor &= PAGE_L1_MASK;
2831 *descriptor |= PAGE_L1_DESCRIPTOR;
2832 *descriptor2 &= PAGE_L2_4K_MASK;
2833 *descriptor2 |= PAGE_L2_4K_DESC;
2834 break;
2835
2836 case PAGE_64k:
2837 MMU_MemoryPage(descriptor2, reg.mem_t, reg.outer_norm_t, reg.inner_norm_t, PAGE_64k);
2838 MMU_XNPage(descriptor2, reg.xn_t, PAGE_64k);
2839 MMU_DomainPage(descriptor, reg.domain);
2840 MMU_PPage(descriptor, reg.e_t);
2841 MMU_APPage(descriptor2, reg.user_t, reg.priv_t, 1);
2842 MMU_SharedPage(descriptor2,reg.sh_t);
2843 MMU_GlobalPage(descriptor2,reg.g_t);
2844 MMU_SecurePage(descriptor,reg.sec_t);
2845 *descriptor &= PAGE_L1_MASK;
2846 *descriptor |= PAGE_L1_DESCRIPTOR;
2847 *descriptor2 &= PAGE_L2_64K_MASK;
2848 *descriptor2 |= PAGE_L2_64K_DESC;
2849 break;
2850
2851 case SECTION:
2852 //error
2853 break;
2854 }
2855
2856 return 0;
2857 }
2858
2859 /** \brief Create a 1MB Section
2860
2861 \param [in] ttb Translation table base address
2862 \param [in] base_address Section base address
2863 \param [in] count Number of sections to create
2864 \param [in] descriptor_l1 L1 descriptor (region attributes)
2865
2866 */
MMU_TTSection(uint32_t * ttb,uint32_t base_address,uint32_t count,uint32_t descriptor_l1)2867 __STATIC_INLINE void MMU_TTSection(uint32_t *ttb, uint32_t base_address, uint32_t count, uint32_t descriptor_l1)
2868 {
2869 uint32_t offset;
2870 uint32_t entry;
2871 uint32_t i;
2872
2873 offset = base_address >> 20;
2874 entry = (base_address & 0xFFF00000) | descriptor_l1;
2875
2876 //4 bytes aligned
2877 ttb = ttb + offset;
2878
2879 for (i = 0; i < count; i++ )
2880 {
2881 //4 bytes aligned
2882 *ttb++ = entry;
2883 entry += OFFSET_1M;
2884 }
2885 }
2886
2887 /** \brief Create a 4k page entry
2888
2889 \param [in] ttb L1 table base address
2890 \param [in] base_address 4k base address
2891 \param [in] count Number of 4k pages to create
2892 \param [in] descriptor_l1 L1 descriptor (region attributes)
2893 \param [in] ttb_l2 L2 table base address
2894 \param [in] descriptor_l2 L2 descriptor (region attributes)
2895
2896 */
MMU_TTPage4k(uint32_t * ttb,uint32_t base_address,uint32_t count,uint32_t descriptor_l1,uint32_t * ttb_l2,uint32_t descriptor_l2)2897 __STATIC_INLINE void MMU_TTPage4k(uint32_t *ttb, uint32_t base_address, uint32_t count, uint32_t descriptor_l1, uint32_t *ttb_l2, uint32_t descriptor_l2 )
2898 {
2899
2900 uint32_t offset, offset2;
2901 uint32_t entry, entry2;
2902 uint32_t i;
2903
2904 offset = base_address >> 20;
2905 entry = ((int)ttb_l2 & 0xFFFFFC00) | descriptor_l1;
2906
2907 //4 bytes aligned
2908 ttb += offset;
2909 //create l1_entry
2910 *ttb = entry;
2911
2912 offset2 = (base_address & 0xff000) >> 12;
2913 ttb_l2 += offset2;
2914 entry2 = (base_address & 0xFFFFF000) | descriptor_l2;
2915 for (i = 0; i < count; i++ )
2916 {
2917 //4 bytes aligned
2918 *ttb_l2++ = entry2;
2919 entry2 += OFFSET_4K;
2920 }
2921 }
2922
2923 /** \brief Create a 64k page entry
2924
2925 \param [in] ttb L1 table base address
2926 \param [in] base_address 64k base address
2927 \param [in] count Number of 64k pages to create
2928 \param [in] descriptor_l1 L1 descriptor (region attributes)
2929 \param [in] ttb_l2 L2 table base address
2930 \param [in] descriptor_l2 L2 descriptor (region attributes)
2931
2932 */
MMU_TTPage64k(uint32_t * ttb,uint32_t base_address,uint32_t count,uint32_t descriptor_l1,uint32_t * ttb_l2,uint32_t descriptor_l2)2933 __STATIC_INLINE void MMU_TTPage64k(uint32_t *ttb, uint32_t base_address, uint32_t count, uint32_t descriptor_l1, uint32_t *ttb_l2, uint32_t descriptor_l2 )
2934 {
2935 uint32_t offset, offset2;
2936 uint32_t entry, entry2;
2937 uint32_t i,j;
2938
2939
2940 offset = base_address >> 20;
2941 entry = ((int)ttb_l2 & 0xFFFFFC00) | descriptor_l1;
2942
2943 //4 bytes aligned
2944 ttb += offset;
2945 //create l1_entry
2946 *ttb = entry;
2947
2948 offset2 = (base_address & 0xff000) >> 12;
2949 ttb_l2 += offset2;
2950 entry2 = (base_address & 0xFFFF0000) | descriptor_l2;
2951 for (i = 0; i < count; i++ )
2952 {
2953 //create 16 entries
2954 for (j = 0; j < 16; j++)
2955 {
2956 //4 bytes aligned
2957 *ttb_l2++ = entry2;
2958 }
2959 entry2 += OFFSET_64K;
2960 }
2961 }
2962
2963 /** \brief Enable MMU
2964 */
MMU_Enable(void)2965 __STATIC_INLINE void MMU_Enable(void)
2966 {
2967 // Set M bit 0 to enable the MMU
2968 // Set AFE bit to enable simplified access permissions model
2969 // Clear TRE bit to disable TEX remap and A bit to disable strict alignment fault checking
2970 __set_SCTLR( (__get_SCTLR() & ~(1 << 28) & ~(1 << 1)) | 1 | (1 << 29));
2971 __ISB();
2972 }
2973
2974 /** \brief Disable MMU
2975 */
MMU_Disable(void)2976 __STATIC_INLINE void MMU_Disable(void)
2977 {
2978 // Clear M bit 0 to disable the MMU
2979 __set_SCTLR( __get_SCTLR() & ~1);
2980 __ISB();
2981 }
2982
2983 /** \brief Invalidate entire unified TLB
2984 */
2985
MMU_InvalidateTLB(void)2986 __STATIC_INLINE void MMU_InvalidateTLB(void)
2987 {
2988 __set_TLBIALL(0);
2989 __DSB(); //ensure completion of the invalidation
2990 __ISB(); //ensure instruction fetch path sees new state
2991 }
2992
2993
2994 #ifdef __cplusplus
2995 }
2996 #endif
2997
2998 #endif /* __CORE_CA_H_DEPENDANT */
2999
3000 #endif /* __CMSIS_GENERIC */
3001