1 /*
2 * Copyright (c) 2015-2016, Freescale Semiconductor, Inc.
3 * Copyright 2016-2022, 2024 NXP
4 * All rights reserved.
5 *
6 * SPDX-License-Identifier: BSD-3-Clause
7 */
8
9 #ifndef FSL_COMMON_ARM_H_
10 #define FSL_COMMON_ARM_H_
11
12 /*
13 * For CMSIS pack RTE.
14 * CMSIS pack RTE generates "RTC_Components.h" which contains the statements
15 * of the related <RTE_Components_h> element for all selected software components.
16 */
17 #ifdef _RTE_
18 #include "RTE_Components.h"
19 #endif
20
21 /*!
22 * @addtogroup ksdk_common
23 * @{
24 */
25
26 /*! @name Atomic modification
27 *
28 * These macros are used for atomic access, such as read-modify-write
29 * to the peripheral registers.
30 *
31 * - SDK_ATOMIC_LOCAL_ADD
32 * - SDK_ATOMIC_LOCAL_SET
33 * - SDK_ATOMIC_LOCAL_CLEAR
34 * - SDK_ATOMIC_LOCAL_TOGGLE
35 * - SDK_ATOMIC_LOCAL_CLEAR_AND_SET
36 *
37 * Take SDK_ATOMIC_LOCAL_CLEAR_AND_SET as an example: the parameter @c addr
38 * means the address of the peripheral register or variable you want to modify
39 * atomically, the parameter @c clearBits is the bits to clear, the parameter
40 * @c setBits it the bits to set.
41 * For example, to set a 32-bit register bit1:bit0 to 0b10, use like this:
42 *
43 * @code
44 volatile uint32_t * reg = (volatile uint32_t *)REG_ADDR;
45
46 SDK_ATOMIC_LOCAL_CLEAR_AND_SET(reg, 0x03, 0x02);
47 @endcode
48 *
49 * In this example, the register bit1:bit0 are cleared and bit1 is set, as a result,
50 * register bit1:bit0 = 0b10.
51 *
52 * @note For the platforms don't support exclusive load and store, these macros
53 * disable the global interrupt to pretect the modification.
54 *
55 * @note These macros only guarantee the local processor atomic operations. For
56 * the multi-processor devices, use hardware semaphore such as SEMA42 to
57 * guarantee exclusive access if necessary.
58 *
59 * @{
60 */
61
62 /* clang-format off */
63 #if ((defined(__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \
64 (defined(__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \
65 (defined(__ARM_ARCH_8M_MAIN__) && (__ARM_ARCH_8M_MAIN__ == 1)) || \
66 (defined(__ARM_ARCH_8M_BASE__) && (__ARM_ARCH_8M_BASE__ == 1)))
67 /* clang-format on */
68
69 /* If the LDREX and STREX are supported, use them. */
70 #define _SDK_ATOMIC_LOCAL_OPS_1BYTE(addr, val, ops) \
71 do \
72 { \
73 (val) = __LDREXB(addr); \
74 (ops); \
75 } while (0UL != __STREXB((val), (addr)))
76
77 #define _SDK_ATOMIC_LOCAL_OPS_2BYTE(addr, val, ops) \
78 do \
79 { \
80 (val) = __LDREXH(addr); \
81 (ops); \
82 } while (0UL != __STREXH((val), (addr)))
83
84 #define _SDK_ATOMIC_LOCAL_OPS_4BYTE(addr, val, ops) \
85 do \
86 { \
87 (val) = __LDREXW(addr); \
88 (ops); \
89 } while (0UL != __STREXW((val), (addr)))
90
_SDK_AtomicLocalAdd1Byte(volatile uint8_t * addr,uint8_t val)91 static inline void _SDK_AtomicLocalAdd1Byte(volatile uint8_t *addr, uint8_t val)
92 {
93 uint8_t s_val;
94
95 _SDK_ATOMIC_LOCAL_OPS_1BYTE(addr, s_val, s_val += val);
96 }
97
_SDK_AtomicLocalAdd2Byte(volatile uint16_t * addr,uint16_t val)98 static inline void _SDK_AtomicLocalAdd2Byte(volatile uint16_t *addr, uint16_t val)
99 {
100 uint16_t s_val;
101
102 _SDK_ATOMIC_LOCAL_OPS_2BYTE(addr, s_val, s_val += val);
103 }
104
_SDK_AtomicLocalAdd4Byte(volatile uint32_t * addr,uint32_t val)105 static inline void _SDK_AtomicLocalAdd4Byte(volatile uint32_t *addr, uint32_t val)
106 {
107 uint32_t s_val;
108
109 _SDK_ATOMIC_LOCAL_OPS_4BYTE(addr, s_val, s_val += val);
110 }
111
_SDK_AtomicLocalSub1Byte(volatile uint8_t * addr,uint8_t val)112 static inline void _SDK_AtomicLocalSub1Byte(volatile uint8_t *addr, uint8_t val)
113 {
114 uint8_t s_val;
115
116 _SDK_ATOMIC_LOCAL_OPS_1BYTE(addr, s_val, s_val -= val);
117 }
118
_SDK_AtomicLocalSub2Byte(volatile uint16_t * addr,uint16_t val)119 static inline void _SDK_AtomicLocalSub2Byte(volatile uint16_t *addr, uint16_t val)
120 {
121 uint16_t s_val;
122
123 _SDK_ATOMIC_LOCAL_OPS_2BYTE(addr, s_val, s_val -= val);
124 }
125
_SDK_AtomicLocalSub4Byte(volatile uint32_t * addr,uint32_t val)126 static inline void _SDK_AtomicLocalSub4Byte(volatile uint32_t *addr, uint32_t val)
127 {
128 uint32_t s_val;
129
130 _SDK_ATOMIC_LOCAL_OPS_4BYTE(addr, s_val, s_val -= val);
131 }
132
_SDK_AtomicLocalSet1Byte(volatile uint8_t * addr,uint8_t bits)133 static inline void _SDK_AtomicLocalSet1Byte(volatile uint8_t *addr, uint8_t bits)
134 {
135 uint8_t s_val;
136
137 _SDK_ATOMIC_LOCAL_OPS_1BYTE(addr, s_val, s_val |= bits);
138 }
139
_SDK_AtomicLocalSet2Byte(volatile uint16_t * addr,uint16_t bits)140 static inline void _SDK_AtomicLocalSet2Byte(volatile uint16_t *addr, uint16_t bits)
141 {
142 uint16_t s_val;
143
144 _SDK_ATOMIC_LOCAL_OPS_2BYTE(addr, s_val, s_val |= bits);
145 }
146
_SDK_AtomicLocalSet4Byte(volatile uint32_t * addr,uint32_t bits)147 static inline void _SDK_AtomicLocalSet4Byte(volatile uint32_t *addr, uint32_t bits)
148 {
149 uint32_t s_val;
150
151 _SDK_ATOMIC_LOCAL_OPS_4BYTE(addr, s_val, s_val |= bits);
152 }
153
_SDK_AtomicLocalClear1Byte(volatile uint8_t * addr,uint8_t bits)154 static inline void _SDK_AtomicLocalClear1Byte(volatile uint8_t *addr, uint8_t bits)
155 {
156 uint8_t s_val;
157
158 _SDK_ATOMIC_LOCAL_OPS_1BYTE(addr, s_val, s_val &= ~bits);
159 }
160
_SDK_AtomicLocalClear2Byte(volatile uint16_t * addr,uint16_t bits)161 static inline void _SDK_AtomicLocalClear2Byte(volatile uint16_t *addr, uint16_t bits)
162 {
163 uint16_t s_val;
164
165 _SDK_ATOMIC_LOCAL_OPS_2BYTE(addr, s_val, s_val &= ~bits);
166 }
167
_SDK_AtomicLocalClear4Byte(volatile uint32_t * addr,uint32_t bits)168 static inline void _SDK_AtomicLocalClear4Byte(volatile uint32_t *addr, uint32_t bits)
169 {
170 uint32_t s_val;
171
172 _SDK_ATOMIC_LOCAL_OPS_4BYTE(addr, s_val, s_val &= ~bits);
173 }
174
_SDK_AtomicLocalToggle1Byte(volatile uint8_t * addr,uint8_t bits)175 static inline void _SDK_AtomicLocalToggle1Byte(volatile uint8_t *addr, uint8_t bits)
176 {
177 uint8_t s_val;
178
179 _SDK_ATOMIC_LOCAL_OPS_1BYTE(addr, s_val, s_val ^= bits);
180 }
181
_SDK_AtomicLocalToggle2Byte(volatile uint16_t * addr,uint16_t bits)182 static inline void _SDK_AtomicLocalToggle2Byte(volatile uint16_t *addr, uint16_t bits)
183 {
184 uint16_t s_val;
185
186 _SDK_ATOMIC_LOCAL_OPS_2BYTE(addr, s_val, s_val ^= bits);
187 }
188
_SDK_AtomicLocalToggle4Byte(volatile uint32_t * addr,uint32_t bits)189 static inline void _SDK_AtomicLocalToggle4Byte(volatile uint32_t *addr, uint32_t bits)
190 {
191 uint32_t s_val;
192
193 _SDK_ATOMIC_LOCAL_OPS_4BYTE(addr, s_val, s_val ^= bits);
194 }
195
_SDK_AtomicLocalClearAndSet1Byte(volatile uint8_t * addr,uint8_t clearBits,uint8_t setBits)196 static inline void _SDK_AtomicLocalClearAndSet1Byte(volatile uint8_t *addr, uint8_t clearBits, uint8_t setBits)
197 {
198 uint8_t s_val;
199
200 _SDK_ATOMIC_LOCAL_OPS_1BYTE(addr, s_val, s_val = (s_val & ~clearBits) | setBits);
201 }
202
_SDK_AtomicLocalClearAndSet2Byte(volatile uint16_t * addr,uint16_t clearBits,uint16_t setBits)203 static inline void _SDK_AtomicLocalClearAndSet2Byte(volatile uint16_t *addr, uint16_t clearBits, uint16_t setBits)
204 {
205 uint16_t s_val;
206
207 _SDK_ATOMIC_LOCAL_OPS_2BYTE(addr, s_val, s_val = (s_val & ~clearBits) | setBits);
208 }
209
_SDK_AtomicLocalClearAndSet4Byte(volatile uint32_t * addr,uint32_t clearBits,uint32_t setBits)210 static inline void _SDK_AtomicLocalClearAndSet4Byte(volatile uint32_t *addr, uint32_t clearBits, uint32_t setBits)
211 {
212 uint32_t s_val;
213
214 _SDK_ATOMIC_LOCAL_OPS_4BYTE(addr, s_val, s_val = (s_val & ~clearBits) | setBits);
215 }
216
217 #define SDK_ATOMIC_LOCAL_ADD(addr, val) \
218 ((1UL == sizeof(*(addr))) ? \
219 _SDK_AtomicLocalAdd1Byte((volatile uint8_t *)(volatile void *)(addr), (uint8_t)(val)) : \
220 ((2UL == sizeof(*(addr))) ? _SDK_AtomicLocalAdd2Byte((volatile uint16_t *)(volatile void *)(addr), (uint16_t)(val)) : \
221 _SDK_AtomicLocalAdd4Byte((volatile uint32_t *)(volatile void *)(addr), (uint32_t)(val))))
222
223 #define SDK_ATOMIC_LOCAL_SUB(addr, val) \
224 ((1UL == sizeof(*(addr))) ? \
225 _SDK_AtomicLocalSub1Byte((volatile uint8_t *)(volatile void *)(addr), (uint8_t)(val)) : \
226 ((2UL == sizeof(*(addr))) ? _SDK_AtomicLocalSub2Byte((volatile uint16_t *)(volatile void *)(addr), (uint16_t)(val)) : \
227 _SDK_AtomicLocalSub4Byte((volatile uint32_t *)(volatile void *)(addr), (uint32_t)(val))))
228
229 #define SDK_ATOMIC_LOCAL_SET(addr, bits) \
230 ((1UL == sizeof(*(addr))) ? \
231 _SDK_AtomicLocalSet1Byte((volatile uint8_t *)(volatile void *)(addr), (uint8_t)(bits)) : \
232 ((2UL == sizeof(*(addr))) ? _SDK_AtomicLocalSet2Byte((volatile uint16_t *)(volatile void *)(addr), (uint16_t)(bits)) : \
233 _SDK_AtomicLocalSet4Byte((volatile uint32_t *)(volatile void *)(addr), (uint32_t)(bits))))
234
235 #define SDK_ATOMIC_LOCAL_CLEAR(addr, bits) \
236 ((1UL == sizeof(*(addr))) ? \
237 _SDK_AtomicLocalClear1Byte((volatile uint8_t *)(volatile void *)(addr), (uint8_t)(bits)) : \
238 ((2UL == sizeof(*(addr))) ? \
239 _SDK_AtomicLocalClear2Byte((volatile uint16_t *)(volatile void *)(addr), (uint16_t)(bits)) : \
240 _SDK_AtomicLocalClear4Byte((volatile uint32_t *)(volatile void *)(addr), (uint32_t)(bits))))
241
242 #define SDK_ATOMIC_LOCAL_TOGGLE(addr, bits) \
243 ((1UL == sizeof(*(addr))) ? \
244 _SDK_AtomicLocalToggle1Byte((volatile uint8_t *)(volatile void *)(addr), (uint8_t)(bits)) : \
245 ((2UL == sizeof(*(addr))) ? \
246 _SDK_AtomicLocalToggle2Byte((volatile uint16_t *)(volatile void *)(addr), (uint16_t)(bits)) : \
247 _SDK_AtomicLocalToggle4Byte((volatile uint32_t *)(volatile void *)(addr), (uint32_t)(bits))))
248
249 #define SDK_ATOMIC_LOCAL_CLEAR_AND_SET(addr, clearBits, setBits) \
250 ((1UL == sizeof(*(addr))) ? \
251 _SDK_AtomicLocalClearAndSet1Byte((volatile uint8_t *)(volatile void *)(addr), (uint8_t)(clearBits), (uint8_t)(setBits)) : \
252 ((2UL == sizeof(*(addr))) ? \
253 _SDK_AtomicLocalClearAndSet2Byte((volatile uint16_t *)(volatile void *)(addr), (uint16_t)(clearBits), (uint16_t)(setBits)) : \
254 _SDK_AtomicLocalClearAndSet4Byte((volatile uint32_t *)(volatile void *)(addr), (uint32_t)(clearBits), (uint32_t)(setBits))))
255 #else
256
257 #define SDK_ATOMIC_LOCAL_ADD(addr, val) \
258 do \
259 { \
260 uint32_t s_atomicOldInt; \
261 s_atomicOldInt = DisableGlobalIRQ(); \
262 *(addr) += (val); \
263 EnableGlobalIRQ(s_atomicOldInt); \
264 } while (false)
265
266 #define SDK_ATOMIC_LOCAL_SUB(addr, val) \
267 do \
268 { \
269 uint32_t s_atomicOldInt; \
270 s_atomicOldInt = DisableGlobalIRQ(); \
271 *(addr) -= (val); \
272 EnableGlobalIRQ(s_atomicOldInt); \
273 } while (false)
274
275 #define SDK_ATOMIC_LOCAL_SET(addr, bits) \
276 do \
277 { \
278 uint32_t s_atomicOldInt; \
279 s_atomicOldInt = DisableGlobalIRQ(); \
280 *(addr) |= (bits); \
281 EnableGlobalIRQ(s_atomicOldInt); \
282 } while (false)
283
284 #define SDK_ATOMIC_LOCAL_CLEAR(addr, bits) \
285 do \
286 { \
287 uint32_t s_atomicOldInt; \
288 s_atomicOldInt = DisableGlobalIRQ(); \
289 *(addr) &= ~(bits); \
290 EnableGlobalIRQ(s_atomicOldInt); \
291 } while (false)
292
293 #define SDK_ATOMIC_LOCAL_TOGGLE(addr, bits) \
294 do \
295 { \
296 uint32_t s_atomicOldInt; \
297 s_atomicOldInt = DisableGlobalIRQ(); \
298 *(addr) ^= (bits); \
299 EnableGlobalIRQ(s_atomicOldInt); \
300 } while (false)
301
302 #define SDK_ATOMIC_LOCAL_CLEAR_AND_SET(addr, clearBits, setBits) \
303 do \
304 { \
305 uint32_t s_atomicOldInt; \
306 s_atomicOldInt = DisableGlobalIRQ(); \
307 *(addr) = (*(addr) & ~(clearBits)) | (setBits); \
308 EnableGlobalIRQ(s_atomicOldInt); \
309 } while (false)
310
311 #endif
312 /* @} */
313
314 /*! @name Timer utilities */
315 /* @{ */
316 /*! Macro to convert a microsecond period to raw count value */
317 #define USEC_TO_COUNT(us, clockFreqInHz) (uint64_t)(((uint64_t)(us) * (clockFreqInHz)) / 1000000U)
318 /*! Macro to convert a raw count value to microsecond */
319 #define COUNT_TO_USEC(count, clockFreqInHz) (uint64_t)((uint64_t)(count)*1000000U / (clockFreqInHz))
320
321 /*! Macro to convert a millisecond period to raw count value */
322 #define MSEC_TO_COUNT(ms, clockFreqInHz) (uint64_t)((uint64_t)(ms) * (clockFreqInHz) / 1000U)
323 /*! Macro to convert a raw count value to millisecond */
324 #define COUNT_TO_MSEC(count, clockFreqInHz) (uint64_t)((uint64_t)(count)*1000U / (clockFreqInHz))
325 /* @} */
326
327 /*! @name ISR exit barrier
328 * @{
329 *
330 * ARM errata 838869, affects Cortex-M4, Cortex-M4F Store immediate overlapping
331 * exception return operation might vector to incorrect interrupt.
332 * For Cortex-M7, if core speed much faster than peripheral register write speed,
333 * the peripheral interrupt flags may be still set after exiting ISR, this results to
334 * the same error similar with errata 83869.
335 */
336 #if (defined __CORTEX_M) && ((__CORTEX_M == 4U) || (__CORTEX_M == 7U))
337 #define SDK_ISR_EXIT_BARRIER __DSB()
338 #else
339 #define SDK_ISR_EXIT_BARRIER
340 #endif
341
342 /* @} */
343
344 /*! @name Alignment variable definition macros */
345 /* @{ */
346 #if (defined(__ICCARM__))
347 /*
348 * Workaround to disable MISRA C message suppress warnings for IAR compiler.
349 * http:/ /supp.iar.com/Support/?note=24725
350 */
351 _Pragma("diag_suppress=Pm120")
352 #define SDK_PRAGMA(x) _Pragma(#x)
353 _Pragma("diag_error=Pm120")
354 /*! Macro to define a variable with alignbytes alignment */
355 #define SDK_ALIGN(var, alignbytes) SDK_PRAGMA(data_alignment = alignbytes) var
356 #elif defined(__CC_ARM) || defined(__ARMCC_VERSION)
357 /*! Macro to define a variable with alignbytes alignment */
358 #define SDK_ALIGN(var, alignbytes) __attribute__((aligned(alignbytes))) var
359 #elif defined(__GNUC__)
360 /*! Macro to define a variable with alignbytes alignment */
361 #define SDK_ALIGN(var, alignbytes) var __attribute__((aligned(alignbytes)))
362 #else
363 #error Toolchain not supported
364 #endif
365
366 /*! Macro to define a variable with L1 d-cache line size alignment */
367 #if defined(FSL_FEATURE_L1DCACHE_LINESIZE_BYTE)
368 #define SDK_L1DCACHE_ALIGN(var) SDK_ALIGN(var, FSL_FEATURE_L1DCACHE_LINESIZE_BYTE)
369 #endif
370 /*! Macro to define a variable with L2 cache line size alignment */
371 #if defined(FSL_FEATURE_L2CACHE_LINESIZE_BYTE)
372 #define SDK_L2CACHE_ALIGN(var) SDK_ALIGN(var, FSL_FEATURE_L2CACHE_LINESIZE_BYTE)
373 #endif
374
375 /*! Macro to change a value to a given size aligned value */
376 #define SDK_SIZEALIGN(var, alignbytes) \
377 ((unsigned int)((var) + ((alignbytes)-1U)) & (unsigned int)(~(unsigned int)((alignbytes)-1U)))
378 /* @} */
379
380 /*! @name Non-cacheable region definition macros */
381 /* For initialized non-zero non-cacheable variables, please using "AT_NONCACHEABLE_SECTION_INIT(var) ={xx};" or
382 * "AT_NONCACHEABLE_SECTION_ALIGN_INIT(var) ={xx};" in your projects to define them, for zero-inited non-cacheable
383 * variables, please using "AT_NONCACHEABLE_SECTION(var);" or "AT_NONCACHEABLE_SECTION_ALIGN(var);" to define them,
384 * these zero-inited variables will be initialized to zero in system startup.
385 */
386 /* @{ */
387
388 #if ((!(defined(FSL_FEATURE_HAS_NO_NONCACHEABLE_SECTION) && FSL_FEATURE_HAS_NO_NONCACHEABLE_SECTION)) && \
389 defined(FSL_FEATURE_L1ICACHE_LINESIZE_BYTE))
390
391 #if (defined(__ICCARM__))
392 #define AT_NONCACHEABLE_SECTION(var) var @"NonCacheable"
393 #define AT_NONCACHEABLE_SECTION_ALIGN(var, alignbytes) SDK_PRAGMA(data_alignment = alignbytes) var @"NonCacheable"
394 #define AT_NONCACHEABLE_SECTION_INIT(var) var @"NonCacheable.init"
395 #define AT_NONCACHEABLE_SECTION_ALIGN_INIT(var, alignbytes) \
396 SDK_PRAGMA(data_alignment = alignbytes) var @"NonCacheable.init"
397
398 #elif (defined(__CC_ARM) || defined(__ARMCC_VERSION))
399 #define AT_NONCACHEABLE_SECTION_INIT(var) __attribute__((section("NonCacheable.init"))) var
400 #define AT_NONCACHEABLE_SECTION_ALIGN_INIT(var, alignbytes) \
401 __attribute__((section("NonCacheable.init"))) __attribute__((aligned(alignbytes))) var
402 #if (defined(__CC_ARM))
403 #define AT_NONCACHEABLE_SECTION(var) __attribute__((section("NonCacheable"), zero_init)) var
404 #define AT_NONCACHEABLE_SECTION_ALIGN(var, alignbytes) \
405 __attribute__((section("NonCacheable"), zero_init)) __attribute__((aligned(alignbytes))) var
406 #else
407 #define AT_NONCACHEABLE_SECTION(var) __attribute__((section(".bss.NonCacheable"))) var
408 #define AT_NONCACHEABLE_SECTION_ALIGN(var, alignbytes) \
409 __attribute__((section(".bss.NonCacheable"))) __attribute__((aligned(alignbytes))) var
410 #endif
411
412 #elif (defined(__GNUC__))
413 #if defined(__ARM_ARCH_8A__) /* This macro is ARMv8-A specific */
414 #define __CS "//"
415 #else
416 #define __CS "@"
417 #endif
418
419 /* For GCC, when the non-cacheable section is required, please define "__STARTUP_INITIALIZE_NONCACHEDATA"
420 * in your projects to make sure the non-cacheable section variables will be initialized in system startup.
421 */
422 #define AT_NONCACHEABLE_SECTION_INIT(var) __attribute__((section("NonCacheable.init"))) var
423 #define AT_NONCACHEABLE_SECTION_ALIGN_INIT(var, alignbytes) \
424 __attribute__((section("NonCacheable.init"))) var __attribute__((aligned(alignbytes)))
425 #define AT_NONCACHEABLE_SECTION(var) __attribute__((section("NonCacheable,\"aw\",%nobits " __CS))) var
426 #define AT_NONCACHEABLE_SECTION_ALIGN(var, alignbytes) \
427 __attribute__((section("NonCacheable,\"aw\",%nobits " __CS))) var __attribute__((aligned(alignbytes)))
428 #else
429 #error Toolchain not supported.
430 #endif
431
432 #else
433
434 #define AT_NONCACHEABLE_SECTION(var) var
435 #define AT_NONCACHEABLE_SECTION_ALIGN(var, alignbytes) SDK_ALIGN(var, alignbytes)
436 #define AT_NONCACHEABLE_SECTION_INIT(var) var
437 #define AT_NONCACHEABLE_SECTION_ALIGN_INIT(var, alignbytes) SDK_ALIGN(var, alignbytes)
438
439 #endif
440
441 /* @} */
442
443 /*!
444 * @name Time sensitive region
445 * @{
446 */
447 #if (defined(__ICCARM__))
448 #define AT_QUICKACCESS_SECTION_CODE(func) func @"CodeQuickAccess"
449 #define AT_QUICKACCESS_SECTION_DATA(var) var @"DataQuickAccess"
450 #define AT_QUICKACCESS_SECTION_DATA_ALIGN(var, alignbytes) \
451 SDK_PRAGMA(data_alignment = alignbytes) var @"DataQuickAccess"
452 #elif (defined(__CC_ARM) || defined(__ARMCC_VERSION))
453 #define AT_QUICKACCESS_SECTION_CODE(func) __attribute__((section("CodeQuickAccess"), __noinline__)) func
454 #define AT_QUICKACCESS_SECTION_DATA(var) __attribute__((section("DataQuickAccess"))) var
455 #define AT_QUICKACCESS_SECTION_DATA_ALIGN(var, alignbytes) \
456 __attribute__((section("DataQuickAccess"))) __attribute__((aligned(alignbytes))) var
457 #elif (defined(__GNUC__))
458 #define AT_QUICKACCESS_SECTION_CODE(func) __attribute__((section("CodeQuickAccess"), __noinline__)) func
459 #define AT_QUICKACCESS_SECTION_DATA(var) __attribute__((section("DataQuickAccess"))) var
460 #define AT_QUICKACCESS_SECTION_DATA_ALIGN(var, alignbytes) \
461 __attribute__((section("DataQuickAccess"))) var __attribute__((aligned(alignbytes)))
462 #else
463 #error Toolchain not supported.
464 #endif /* defined(__ICCARM__) */
465
466 /*! @name Ram Function */
467 #if (defined(__ICCARM__))
468 #define RAMFUNCTION_SECTION_CODE(func) func @"RamFunction"
469 #elif (defined(__CC_ARM) || defined(__ARMCC_VERSION))
470 #define RAMFUNCTION_SECTION_CODE(func) __attribute__((section("RamFunction"))) func
471 #elif (defined(__GNUC__))
472 #define RAMFUNCTION_SECTION_CODE(func) __attribute__((section("RamFunction"))) func
473 #else
474 #error Toolchain not supported.
475 #endif /* defined(__ICCARM__) */
476 /* @} */
477
478 /*!
479 * @def MSDK_REG_SECURE_ADDR(x)
480 * Convert the register address to the one used in secure mode.
481 *
482 * @def MSDK_REG_NONSECURE_ADDR(x)
483 * Convert the register address to the one used in non-secure mode.
484 */
485
486 #if (defined(__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE & 0x2))
487 #define MSDK_REG_SECURE_ADDR(x) ((uintptr_t)(x) | (0x1UL << 28))
488 #define MSDK_REG_NONSECURE_ADDR(x) ((uintptr_t)(x) & ~(0x1UL << 28))
489 #else
490 #define MSDK_REG_SECURE_ADDR(x) (x)
491 #define MSDK_REG_NONSECURE_ADDR(x) (x)
492 #endif
493
494 #if defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050)
495 void DefaultISR(void);
496 #endif
497
498 /*
499 * The fsl_clock.h is included here because it needs MAKE_VERSION/MAKE_STATUS/status_t
500 * defined in previous of this file.
501 */
502 #include "fsl_clock.h"
503
504 /*
505 * Chip level peripheral reset API, for MCUs that implement peripheral reset control external to a peripheral
506 */
507 #if ((defined(FSL_FEATURE_SOC_SYSCON_COUNT) && (FSL_FEATURE_SOC_SYSCON_COUNT > 0)) || \
508 (defined(FSL_FEATURE_SOC_ASYNC_SYSCON_COUNT) && (FSL_FEATURE_SOC_ASYNC_SYSCON_COUNT > 0)))
509 #include "fsl_reset.h"
510 #endif
511
512 #if defined(FSL_FEATURE_IRQSTEER_EXT_INT_MAX_NUM) && (FSL_FEATURE_IRQSTEER_EXT_INT_MAX_NUM > 0) && defined(FSL_FEATURE_IRQSTEER_IRQ_START_INDEX) && (FSL_FEATURE_IRQSTEER_IRQ_START_INDEX > 0)
513 void IRQSTEER_EnableInterrupt(int32_t instIdx, IRQn_Type irq);
514 void IRQSTEER_DisableInterrupt(int32_t instIdx, IRQn_Type irq);
515 #endif
516
517 /*******************************************************************************
518 * API
519 ******************************************************************************/
520
521 #if defined(__cplusplus)
522 extern "C" {
523 #endif /* __cplusplus*/
524
525 /*!
526 * @brief Enable specific interrupt.
527 *
528 * Enable LEVEL1 interrupt. For some devices, there might be multiple interrupt
529 * levels. For example, there are NVIC and intmux. Here the interrupts connected
530 * to NVIC are the LEVEL1 interrupts, because they are routed to the core directly.
531 * The interrupts connected to intmux are the LEVEL2 interrupts, they are routed
532 * to NVIC first then routed to core.
533 *
534 * This function only enables the LEVEL1 interrupts. The number of LEVEL1 interrupts
535 * is indicated by the feature macro FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS.
536 *
537 * @param interrupt The IRQ number.
538 * @retval kStatus_Success Interrupt enabled successfully
539 * @retval kStatus_Fail Failed to enable the interrupt
540 */
EnableIRQ(IRQn_Type interrupt)541 static inline status_t EnableIRQ(IRQn_Type interrupt)
542 {
543 status_t status = kStatus_Success;
544
545 if (NotAvail_IRQn == interrupt)
546 {
547 status = kStatus_Fail;
548 }
549
550 #if defined(FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS) && (FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS > 0)
551 else if ((int32_t)interrupt >= (int32_t)FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS)
552 {
553 #if defined(FSL_FEATURE_IRQSTEER_EXT_INT_MAX_NUM) && (FSL_FEATURE_IRQSTEER_EXT_INT_MAX_NUM > 0) && defined(FSL_FEATURE_IRQSTEER_IRQ_START_INDEX) && (FSL_FEATURE_IRQSTEER_IRQ_START_INDEX > 0)
554 int32_t irqsteerInstIdx = (int32_t)((interrupt + 1 - FSL_FEATURE_IRQSTEER_IRQ_START_INDEX) / FSL_FEATURE_IRQSTEER_EXT_INT_MAX_NUM);
555
556 IRQSTEER_EnableInterrupt(irqsteerInstIdx, interrupt);
557 #else
558 status = kStatus_Fail;
559 #endif
560 }
561 #endif
562
563 else
564 {
565 #if defined(__GIC_PRIO_BITS)
566 GIC_EnableIRQ(interrupt);
567 #else
568 NVIC_EnableIRQ(interrupt);
569 #endif
570 }
571
572 return status;
573 }
574
575 /*!
576 * @brief Disable specific interrupt.
577 *
578 * Disable LEVEL1 interrupt. For some devices, there might be multiple interrupt
579 * levels. For example, there are NVIC and intmux. Here the interrupts connected
580 * to NVIC are the LEVEL1 interrupts, because they are routed to the core directly.
581 * The interrupts connected to intmux are the LEVEL2 interrupts, they are routed
582 * to NVIC first then routed to core.
583 *
584 * This function only disables the LEVEL1 interrupts. The number of LEVEL1 interrupts
585 * is indicated by the feature macro FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS.
586 *
587 * @param interrupt The IRQ number.
588 * @retval kStatus_Success Interrupt disabled successfully
589 * @retval kStatus_Fail Failed to disable the interrupt
590 */
DisableIRQ(IRQn_Type interrupt)591 static inline status_t DisableIRQ(IRQn_Type interrupt)
592 {
593 status_t status = kStatus_Success;
594
595 if (NotAvail_IRQn == interrupt)
596 {
597 status = kStatus_Fail;
598 }
599
600 #if defined(FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS) && (FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS > 0)
601 else if ((int32_t)interrupt >= (int32_t)FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS)
602 {
603 #if defined(FSL_FEATURE_IRQSTEER_EXT_INT_MAX_NUM) && (FSL_FEATURE_IRQSTEER_EXT_INT_MAX_NUM > 0) && defined(FSL_FEATURE_IRQSTEER_IRQ_START_INDEX) && (FSL_FEATURE_IRQSTEER_IRQ_START_INDEX > 0)
604 int32_t irqsteerInstIdx = (int32_t)((interrupt - FSL_FEATURE_IRQSTEER_IRQ_START_INDEX) / FSL_FEATURE_IRQSTEER_EXT_INT_MAX_NUM);
605
606 IRQSTEER_DisableInterrupt(irqsteerInstIdx, interrupt);
607 #else
608 status = kStatus_Fail;
609 #endif
610 }
611 #endif
612
613 else
614 {
615 #if defined(__GIC_PRIO_BITS)
616 GIC_DisableIRQ(interrupt);
617 #else
618 NVIC_DisableIRQ(interrupt);
619 #endif
620 }
621
622 return status;
623 }
624
625 #if defined(__GIC_PRIO_BITS)
626 #define NVIC_SetPriority(irq, prio) do {} while(0)
627 #endif
628
629 /*!
630 * @brief Enable the IRQ, and also set the interrupt priority.
631 *
632 * Only handle LEVEL1 interrupt. For some devices, there might be multiple interrupt
633 * levels. For example, there are NVIC and intmux. Here the interrupts connected
634 * to NVIC are the LEVEL1 interrupts, because they are routed to the core directly.
635 * The interrupts connected to intmux are the LEVEL2 interrupts, they are routed
636 * to NVIC first then routed to core.
637 *
638 * This function only handles the LEVEL1 interrupts. The number of LEVEL1 interrupts
639 * is indicated by the feature macro FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS.
640 *
641 * @param interrupt The IRQ to Enable.
642 * @param priNum Priority number set to interrupt controller register.
643 * @retval kStatus_Success Interrupt priority set successfully
644 * @retval kStatus_Fail Failed to set the interrupt priority.
645 */
EnableIRQWithPriority(IRQn_Type interrupt,uint8_t priNum)646 static inline status_t EnableIRQWithPriority(IRQn_Type interrupt, uint8_t priNum)
647 {
648 status_t status = kStatus_Success;
649
650 if (NotAvail_IRQn == interrupt)
651 {
652 status = kStatus_Fail;
653 }
654
655 #if defined(FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS) && (FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS > 0)
656 else if ((int32_t)interrupt >= (int32_t)FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS)
657 {
658 status = kStatus_Fail;
659 }
660 #endif
661
662 else
663 {
664 #if defined(__GIC_PRIO_BITS)
665 GIC_SetPriority(interrupt, priNum);
666 GIC_EnableIRQ(interrupt);
667 #else
668 NVIC_SetPriority(interrupt, priNum);
669 NVIC_EnableIRQ(interrupt);
670 #endif
671 }
672
673 return status;
674 }
675
676 /*!
677 * @brief Set the IRQ priority.
678 *
679 * Only handle LEVEL1 interrupt. For some devices, there might be multiple interrupt
680 * levels. For example, there are NVIC and intmux. Here the interrupts connected
681 * to NVIC are the LEVEL1 interrupts, because they are routed to the core directly.
682 * The interrupts connected to intmux are the LEVEL2 interrupts, they are routed
683 * to NVIC first then routed to core.
684 *
685 * This function only handles the LEVEL1 interrupts. The number of LEVEL1 interrupts
686 * is indicated by the feature macro FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS.
687 *
688 * @param interrupt The IRQ to set.
689 * @param priNum Priority number set to interrupt controller register.
690 *
691 * @retval kStatus_Success Interrupt priority set successfully
692 * @retval kStatus_Fail Failed to set the interrupt priority.
693 */
IRQ_SetPriority(IRQn_Type interrupt,uint8_t priNum)694 static inline status_t IRQ_SetPriority(IRQn_Type interrupt, uint8_t priNum)
695 {
696 status_t status = kStatus_Success;
697
698 if (NotAvail_IRQn == interrupt)
699 {
700 status = kStatus_Fail;
701 }
702
703 #if defined(FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS) && (FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS > 0)
704 else if ((int32_t)interrupt >= (int32_t)FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS)
705 {
706 status = kStatus_Fail;
707 }
708 #endif
709
710 else
711 {
712 #if defined(__GIC_PRIO_BITS)
713 GIC_SetPriority(interrupt, priNum);
714 #else
715 NVIC_SetPriority(interrupt, priNum);
716 #endif
717 }
718
719 return status;
720 }
721
722 /*!
723 * @brief Clear the pending IRQ flag.
724 *
725 * Only handle LEVEL1 interrupt. For some devices, there might be multiple interrupt
726 * levels. For example, there are NVIC and intmux. Here the interrupts connected
727 * to NVIC are the LEVEL1 interrupts, because they are routed to the core directly.
728 * The interrupts connected to intmux are the LEVEL2 interrupts, they are routed
729 * to NVIC first then routed to core.
730 *
731 * This function only handles the LEVEL1 interrupts. The number of LEVEL1 interrupts
732 * is indicated by the feature macro FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS.
733 *
734 * @param interrupt The flag which IRQ to clear.
735 *
736 * @retval kStatus_Success Interrupt priority set successfully
737 * @retval kStatus_Fail Failed to set the interrupt priority.
738 */
IRQ_ClearPendingIRQ(IRQn_Type interrupt)739 static inline status_t IRQ_ClearPendingIRQ(IRQn_Type interrupt)
740 {
741 status_t status = kStatus_Success;
742
743 if (NotAvail_IRQn == interrupt)
744 {
745 status = kStatus_Fail;
746 }
747
748 #if defined(FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS) && (FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS > 0)
749 else if ((int32_t)interrupt >= (int32_t)FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS)
750 {
751 status = kStatus_Fail;
752 }
753 #endif
754
755 else
756 {
757 #if defined(__GIC_PRIO_BITS)
758 GIC_ClearPendingIRQ(interrupt);
759 #else
760 NVIC_ClearPendingIRQ(interrupt);
761 #endif
762 }
763
764 return status;
765 }
766
767 /*!
768 * @brief Disable the global IRQ
769 *
770 * Disable the global interrupt and return the current primask register. User is required to provided the primask
771 * register for the EnableGlobalIRQ().
772 *
773 * @return Current primask value.
774 */
DisableGlobalIRQ(void)775 static inline uint32_t DisableGlobalIRQ(void)
776 {
777 uint32_t mask;
778
779 #if defined(CPSR_I_Msk)
780 mask = __get_CPSR() & CPSR_I_Msk;
781 #elif defined(DAIF_I_BIT)
782 mask = __get_DAIF() & DAIF_I_BIT;
783 #else
784 mask = __get_PRIMASK();
785 #endif
786 __disable_irq();
787
788 return mask;
789 }
790
791 /*!
792 * @brief Enable the global IRQ
793 *
794 * Set the primask register with the provided primask value but not just enable the primask. The idea is for the
795 * convenience of integration of RTOS. some RTOS get its own management mechanism of primask. User is required to
796 * use the EnableGlobalIRQ() and DisableGlobalIRQ() in pair.
797 *
798 * @param primask value of primask register to be restored. The primask value is supposed to be provided by the
799 * DisableGlobalIRQ().
800 */
EnableGlobalIRQ(uint32_t primask)801 static inline void EnableGlobalIRQ(uint32_t primask)
802 {
803 #if defined(CPSR_I_Msk)
804 __set_CPSR((__get_CPSR() & ~CPSR_I_Msk) | primask);
805 #elif defined(DAIF_I_BIT)
806 if (0UL == primask)
807 {
808 __enable_irq();
809 }
810 #else
811 __set_PRIMASK(primask);
812 #endif
813 }
814
815 #if defined(ENABLE_RAM_VECTOR_TABLE)
816 /*!
817 * @brief install IRQ handler
818 *
819 * @param irq IRQ number
820 * @param irqHandler IRQ handler address
821 * @return The old IRQ handler address
822 */
823 uint32_t InstallIRQHandler(IRQn_Type irq, uint32_t irqHandler);
824 #endif /* ENABLE_RAM_VECTOR_TABLE. */
825
826 #if (defined(FSL_FEATURE_SOC_SYSCON_COUNT) && (FSL_FEATURE_SOC_SYSCON_COUNT > 0))
827
828 /*
829 * When FSL_FEATURE_POWERLIB_EXTEND is defined to non-zero value,
830 * powerlib should be used instead of these functions.
831 */
832 #if !(defined(FSL_FEATURE_POWERLIB_EXTEND) && (FSL_FEATURE_POWERLIB_EXTEND != 0))
833 /*!
834 * @brief Enable specific interrupt for wake-up from deep-sleep mode.
835 *
836 * Enable the interrupt for wake-up from deep sleep mode.
837 * Some interrupts are typically used in sleep mode only and will not occur during
838 * deep-sleep mode because relevant clocks are stopped. However, it is possible to enable
839 * those clocks (significantly increasing power consumption in the reduced power mode),
840 * making these wake-ups possible.
841 *
842 * @note This function also enables the interrupt in the NVIC (EnableIRQ() is called internaly).
843 *
844 * @param interrupt The IRQ number.
845 */
846 void EnableDeepSleepIRQ(IRQn_Type interrupt);
847
848 /*!
849 * @brief Disable specific interrupt for wake-up from deep-sleep mode.
850 *
851 * Disable the interrupt for wake-up from deep sleep mode.
852 * Some interrupts are typically used in sleep mode only and will not occur during
853 * deep-sleep mode because relevant clocks are stopped. However, it is possible to enable
854 * those clocks (significantly increasing power consumption in the reduced power mode),
855 * making these wake-ups possible.
856 *
857 * @note This function also disables the interrupt in the NVIC (DisableIRQ() is called internaly).
858 *
859 * @param interrupt The IRQ number.
860 */
861 void DisableDeepSleepIRQ(IRQn_Type interrupt);
862 #endif /* FSL_FEATURE_POWERLIB_EXTEND */
863 #endif /* FSL_FEATURE_SOC_SYSCON_COUNT */
864
865 #if defined(DWT)
866 /*!
867 * @brief Enable the counter to get CPU cycles.
868 */
869 void MSDK_EnableCpuCycleCounter(void);
870
871 /*!
872 * @brief Get the current CPU cycle count.
873 *
874 * @return Current CPU cycle count.
875 */
876 uint32_t MSDK_GetCpuCycleCount(void);
877 #endif
878
879 #if defined(__cplusplus)
880 }
881 #endif /* __cplusplus*/
882
883 /*! @} */
884
885 #endif /* FSL_COMMON_ARM_H_ */
886