1 /*
2 * Copyright (c) 2015-2016, Freescale Semiconductor, Inc.
3 * Copyright 2016-2022 NXP
4 * All rights reserved.
5 *
6 * SPDX-License-Identifier: BSD-3-Clause
7 */
8
9 #ifndef FSL_COMMON_ARM_H_
10 #define FSL_COMMON_ARM_H_
11
12 /*
13 * For CMSIS pack RTE.
14 * CMSIS pack RTE generates "RTC_Components.h" which contains the statements
15 * of the related <RTE_Components_h> element for all selected software components.
16 */
17 #ifdef _RTE_
18 #include "RTE_Components.h"
19 #endif
20
21 /*!
22 * @addtogroup ksdk_common
23 * @{
24 */
25
26 /*! @name Atomic modification
27 *
28 * These macros are used for atomic access, such as read-modify-write
29 * to the peripheral registers.
30 *
31 * - SDK_ATOMIC_LOCAL_ADD
32 * - SDK_ATOMIC_LOCAL_SET
33 * - SDK_ATOMIC_LOCAL_CLEAR
34 * - SDK_ATOMIC_LOCAL_TOGGLE
35 * - SDK_ATOMIC_LOCAL_CLEAR_AND_SET
36 *
37 * Take SDK_ATOMIC_LOCAL_CLEAR_AND_SET as an example: the parameter @c addr
38 * means the address of the peripheral register or variable you want to modify
39 * atomically, the parameter @c clearBits is the bits to clear, the parameter
40 * @c setBits it the bits to set.
41 * For example, to set a 32-bit register bit1:bit0 to 0b10, use like this:
42 *
43 * @code
44 volatile uint32_t * reg = (volatile uint32_t *)REG_ADDR;
45
46 SDK_ATOMIC_LOCAL_CLEAR_AND_SET(reg, 0x03, 0x02);
47 @endcode
48 *
49 * In this example, the register bit1:bit0 are cleared and bit1 is set, as a result,
50 * register bit1:bit0 = 0b10.
51 *
52 * @note For the platforms don't support exclusive load and store, these macros
53 * disable the global interrupt to pretect the modification.
54 *
55 * @note These macros only guarantee the local processor atomic operations. For
56 * the multi-processor devices, use hardware semaphore such as SEMA42 to
57 * guarantee exclusive access if necessary.
58 *
59 * @{
60 */
61
62 /* clang-format off */
63 #if ((defined(__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \
64 (defined(__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \
65 (defined(__ARM_ARCH_8M_MAIN__) && (__ARM_ARCH_8M_MAIN__ == 1)) || \
66 (defined(__ARM_ARCH_8M_BASE__) && (__ARM_ARCH_8M_BASE__ == 1)))
67 /* clang-format on */
68
69 /* If the LDREX and STREX are supported, use them. */
70 #define _SDK_ATOMIC_LOCAL_OPS_1BYTE(addr, val, ops) \
71 do \
72 { \
73 (val) = __LDREXB(addr); \
74 (ops); \
75 } while (0UL != __STREXB((val), (addr)))
76
77 #define _SDK_ATOMIC_LOCAL_OPS_2BYTE(addr, val, ops) \
78 do \
79 { \
80 (val) = __LDREXH(addr); \
81 (ops); \
82 } while (0UL != __STREXH((val), (addr)))
83
84 #define _SDK_ATOMIC_LOCAL_OPS_4BYTE(addr, val, ops) \
85 do \
86 { \
87 (val) = __LDREXW(addr); \
88 (ops); \
89 } while (0UL != __STREXW((val), (addr)))
90
_SDK_AtomicLocalAdd1Byte(volatile uint8_t * addr,uint8_t val)91 static inline void _SDK_AtomicLocalAdd1Byte(volatile uint8_t *addr, uint8_t val)
92 {
93 uint8_t s_val;
94
95 _SDK_ATOMIC_LOCAL_OPS_1BYTE(addr, s_val, s_val += val);
96 }
97
_SDK_AtomicLocalAdd2Byte(volatile uint16_t * addr,uint16_t val)98 static inline void _SDK_AtomicLocalAdd2Byte(volatile uint16_t *addr, uint16_t val)
99 {
100 uint16_t s_val;
101
102 _SDK_ATOMIC_LOCAL_OPS_2BYTE(addr, s_val, s_val += val);
103 }
104
_SDK_AtomicLocalAdd4Byte(volatile uint32_t * addr,uint32_t val)105 static inline void _SDK_AtomicLocalAdd4Byte(volatile uint32_t *addr, uint32_t val)
106 {
107 uint32_t s_val;
108
109 _SDK_ATOMIC_LOCAL_OPS_4BYTE(addr, s_val, s_val += val);
110 }
111
_SDK_AtomicLocalSub1Byte(volatile uint8_t * addr,uint8_t val)112 static inline void _SDK_AtomicLocalSub1Byte(volatile uint8_t *addr, uint8_t val)
113 {
114 uint8_t s_val;
115
116 _SDK_ATOMIC_LOCAL_OPS_1BYTE(addr, s_val, s_val -= val);
117 }
118
_SDK_AtomicLocalSub2Byte(volatile uint16_t * addr,uint16_t val)119 static inline void _SDK_AtomicLocalSub2Byte(volatile uint16_t *addr, uint16_t val)
120 {
121 uint16_t s_val;
122
123 _SDK_ATOMIC_LOCAL_OPS_2BYTE(addr, s_val, s_val -= val);
124 }
125
_SDK_AtomicLocalSub4Byte(volatile uint32_t * addr,uint32_t val)126 static inline void _SDK_AtomicLocalSub4Byte(volatile uint32_t *addr, uint32_t val)
127 {
128 uint32_t s_val;
129
130 _SDK_ATOMIC_LOCAL_OPS_4BYTE(addr, s_val, s_val -= val);
131 }
132
_SDK_AtomicLocalSet1Byte(volatile uint8_t * addr,uint8_t bits)133 static inline void _SDK_AtomicLocalSet1Byte(volatile uint8_t *addr, uint8_t bits)
134 {
135 uint8_t s_val;
136
137 _SDK_ATOMIC_LOCAL_OPS_1BYTE(addr, s_val, s_val |= bits);
138 }
139
_SDK_AtomicLocalSet2Byte(volatile uint16_t * addr,uint16_t bits)140 static inline void _SDK_AtomicLocalSet2Byte(volatile uint16_t *addr, uint16_t bits)
141 {
142 uint16_t s_val;
143
144 _SDK_ATOMIC_LOCAL_OPS_2BYTE(addr, s_val, s_val |= bits);
145 }
146
_SDK_AtomicLocalSet4Byte(volatile uint32_t * addr,uint32_t bits)147 static inline void _SDK_AtomicLocalSet4Byte(volatile uint32_t *addr, uint32_t bits)
148 {
149 uint32_t s_val;
150
151 _SDK_ATOMIC_LOCAL_OPS_4BYTE(addr, s_val, s_val |= bits);
152 }
153
_SDK_AtomicLocalClear1Byte(volatile uint8_t * addr,uint8_t bits)154 static inline void _SDK_AtomicLocalClear1Byte(volatile uint8_t *addr, uint8_t bits)
155 {
156 uint8_t s_val;
157
158 _SDK_ATOMIC_LOCAL_OPS_1BYTE(addr, s_val, s_val &= ~bits);
159 }
160
_SDK_AtomicLocalClear2Byte(volatile uint16_t * addr,uint16_t bits)161 static inline void _SDK_AtomicLocalClear2Byte(volatile uint16_t *addr, uint16_t bits)
162 {
163 uint16_t s_val;
164
165 _SDK_ATOMIC_LOCAL_OPS_2BYTE(addr, s_val, s_val &= ~bits);
166 }
167
_SDK_AtomicLocalClear4Byte(volatile uint32_t * addr,uint32_t bits)168 static inline void _SDK_AtomicLocalClear4Byte(volatile uint32_t *addr, uint32_t bits)
169 {
170 uint32_t s_val;
171
172 _SDK_ATOMIC_LOCAL_OPS_4BYTE(addr, s_val, s_val &= ~bits);
173 }
174
_SDK_AtomicLocalToggle1Byte(volatile uint8_t * addr,uint8_t bits)175 static inline void _SDK_AtomicLocalToggle1Byte(volatile uint8_t *addr, uint8_t bits)
176 {
177 uint8_t s_val;
178
179 _SDK_ATOMIC_LOCAL_OPS_1BYTE(addr, s_val, s_val ^= bits);
180 }
181
_SDK_AtomicLocalToggle2Byte(volatile uint16_t * addr,uint16_t bits)182 static inline void _SDK_AtomicLocalToggle2Byte(volatile uint16_t *addr, uint16_t bits)
183 {
184 uint16_t s_val;
185
186 _SDK_ATOMIC_LOCAL_OPS_2BYTE(addr, s_val, s_val ^= bits);
187 }
188
_SDK_AtomicLocalToggle4Byte(volatile uint32_t * addr,uint32_t bits)189 static inline void _SDK_AtomicLocalToggle4Byte(volatile uint32_t *addr, uint32_t bits)
190 {
191 uint32_t s_val;
192
193 _SDK_ATOMIC_LOCAL_OPS_4BYTE(addr, s_val, s_val ^= bits);
194 }
195
_SDK_AtomicLocalClearAndSet1Byte(volatile uint8_t * addr,uint8_t clearBits,uint8_t setBits)196 static inline void _SDK_AtomicLocalClearAndSet1Byte(volatile uint8_t *addr, uint8_t clearBits, uint8_t setBits)
197 {
198 uint8_t s_val;
199
200 _SDK_ATOMIC_LOCAL_OPS_1BYTE(addr, s_val, s_val = (s_val & ~clearBits) | setBits);
201 }
202
_SDK_AtomicLocalClearAndSet2Byte(volatile uint16_t * addr,uint16_t clearBits,uint16_t setBits)203 static inline void _SDK_AtomicLocalClearAndSet2Byte(volatile uint16_t *addr, uint16_t clearBits, uint16_t setBits)
204 {
205 uint16_t s_val;
206
207 _SDK_ATOMIC_LOCAL_OPS_2BYTE(addr, s_val, s_val = (s_val & ~clearBits) | setBits);
208 }
209
_SDK_AtomicLocalClearAndSet4Byte(volatile uint32_t * addr,uint32_t clearBits,uint32_t setBits)210 static inline void _SDK_AtomicLocalClearAndSet4Byte(volatile uint32_t *addr, uint32_t clearBits, uint32_t setBits)
211 {
212 uint32_t s_val;
213
214 _SDK_ATOMIC_LOCAL_OPS_4BYTE(addr, s_val, s_val = (s_val & ~clearBits) | setBits);
215 }
216
217 #define SDK_ATOMIC_LOCAL_ADD(addr, val) \
218 ((1UL == sizeof(*(addr))) ? \
219 _SDK_AtomicLocalAdd1Byte((volatile uint8_t *)(volatile void *)(addr), (uint8_t)(val)) : \
220 ((2UL == sizeof(*(addr))) ? _SDK_AtomicLocalAdd2Byte((volatile uint16_t *)(volatile void *)(addr), (uint16_t)(val)) : \
221 _SDK_AtomicLocalAdd4Byte((volatile uint32_t *)(volatile void *)(addr), (uint32_t)(val))))
222
223 #define SDK_ATOMIC_LOCAL_SUB(addr, val) \
224 ((1UL == sizeof(*(addr))) ? \
225 _SDK_AtomicLocalSub1Byte((volatile uint8_t *)(volatile void *)(addr), (uint8_t)(val)) : \
226 ((2UL == sizeof(*(addr))) ? _SDK_AtomicLocalSub2Byte((volatile uint16_t *)(volatile void *)(addr), (uint16_t)(val)) : \
227 _SDK_AtomicLocalSub4Byte((volatile uint32_t *)(volatile void *)(addr), (uint32_t)(val))))
228
229 #define SDK_ATOMIC_LOCAL_SET(addr, bits) \
230 ((1UL == sizeof(*(addr))) ? \
231 _SDK_AtomicLocalSet1Byte((volatile uint8_t *)(volatile void *)(addr), (uint8_t)(bits)) : \
232 ((2UL == sizeof(*(addr))) ? _SDK_AtomicLocalSet2Byte((volatile uint16_t *)(volatile void *)(addr), (uint16_t)(bits)) : \
233 _SDK_AtomicLocalSet4Byte((volatile uint32_t *)(volatile void *)(addr), (uint32_t)(bits))))
234
235 #define SDK_ATOMIC_LOCAL_CLEAR(addr, bits) \
236 ((1UL == sizeof(*(addr))) ? \
237 _SDK_AtomicLocalClear1Byte((volatile uint8_t *)(volatile void *)(addr), (uint8_t)(bits)) : \
238 ((2UL == sizeof(*(addr))) ? \
239 _SDK_AtomicLocalClear2Byte((volatile uint16_t *)(volatile void *)(addr), (uint16_t)(bits)) : \
240 _SDK_AtomicLocalClear4Byte((volatile uint32_t *)(volatile void *)(addr), (uint32_t)(bits))))
241
242 #define SDK_ATOMIC_LOCAL_TOGGLE(addr, bits) \
243 ((1UL == sizeof(*(addr))) ? \
244 _SDK_AtomicLocalToggle1Byte((volatile uint8_t *)(volatile void *)(addr), (uint8_t)(bits)) : \
245 ((2UL == sizeof(*(addr))) ? \
246 _SDK_AtomicLocalToggle2Byte((volatile uint16_t *)(volatile void *)(addr), (uint16_t)(bits)) : \
247 _SDK_AtomicLocalToggle4Byte((volatile uint32_t *)(volatile void *)(addr), (uint32_t)(bits))))
248
249 #define SDK_ATOMIC_LOCAL_CLEAR_AND_SET(addr, clearBits, setBits) \
250 ((1UL == sizeof(*(addr))) ? \
251 _SDK_AtomicLocalClearAndSet1Byte((volatile uint8_t *)(volatile void *)(addr), (uint8_t)(clearBits), (uint8_t)(setBits)) : \
252 ((2UL == sizeof(*(addr))) ? \
253 _SDK_AtomicLocalClearAndSet2Byte((volatile uint16_t *)(volatile void *)(addr), (uint16_t)(clearBits), (uint16_t)(setBits)) : \
254 _SDK_AtomicLocalClearAndSet4Byte((volatile uint32_t *)(volatile void *)(addr), (uint32_t)(clearBits), (uint32_t)(setBits))))
255 #else
256
257 #define SDK_ATOMIC_LOCAL_ADD(addr, val) \
258 do \
259 { \
260 uint32_t s_atomicOldInt; \
261 s_atomicOldInt = DisableGlobalIRQ(); \
262 *(addr) += (val); \
263 EnableGlobalIRQ(s_atomicOldInt); \
264 } while (false)
265
266 #define SDK_ATOMIC_LOCAL_SUB(addr, val) \
267 do \
268 { \
269 uint32_t s_atomicOldInt; \
270 s_atomicOldInt = DisableGlobalIRQ(); \
271 *(addr) -= (val); \
272 EnableGlobalIRQ(s_atomicOldInt); \
273 } while (false)
274
275 #define SDK_ATOMIC_LOCAL_SET(addr, bits) \
276 do \
277 { \
278 uint32_t s_atomicOldInt; \
279 s_atomicOldInt = DisableGlobalIRQ(); \
280 *(addr) |= (bits); \
281 EnableGlobalIRQ(s_atomicOldInt); \
282 } while (false)
283
284 #define SDK_ATOMIC_LOCAL_CLEAR(addr, bits) \
285 do \
286 { \
287 uint32_t s_atomicOldInt; \
288 s_atomicOldInt = DisableGlobalIRQ(); \
289 *(addr) &= ~(bits); \
290 EnableGlobalIRQ(s_atomicOldInt); \
291 } while (false)
292
293 #define SDK_ATOMIC_LOCAL_TOGGLE(addr, bits) \
294 do \
295 { \
296 uint32_t s_atomicOldInt; \
297 s_atomicOldInt = DisableGlobalIRQ(); \
298 *(addr) ^= (bits); \
299 EnableGlobalIRQ(s_atomicOldInt); \
300 } while (false)
301
302 #define SDK_ATOMIC_LOCAL_CLEAR_AND_SET(addr, clearBits, setBits) \
303 do \
304 { \
305 uint32_t s_atomicOldInt; \
306 s_atomicOldInt = DisableGlobalIRQ(); \
307 *(addr) = (*(addr) & ~(clearBits)) | (setBits); \
308 EnableGlobalIRQ(s_atomicOldInt); \
309 } while (false)
310
311 #endif
312 /* @} */
313
314 /*! @name Timer utilities */
315 /* @{ */
316 /*! Macro to convert a microsecond period to raw count value */
317 #define USEC_TO_COUNT(us, clockFreqInHz) (uint64_t)(((uint64_t)(us) * (clockFreqInHz)) / 1000000U)
318 /*! Macro to convert a raw count value to microsecond */
319 #define COUNT_TO_USEC(count, clockFreqInHz) (uint64_t)((uint64_t)(count)*1000000U / (clockFreqInHz))
320
321 /*! Macro to convert a millisecond period to raw count value */
322 #define MSEC_TO_COUNT(ms, clockFreqInHz) (uint64_t)((uint64_t)(ms) * (clockFreqInHz) / 1000U)
323 /*! Macro to convert a raw count value to millisecond */
324 #define COUNT_TO_MSEC(count, clockFreqInHz) (uint64_t)((uint64_t)(count)*1000U / (clockFreqInHz))
325 /* @} */
326
327 /*! @name ISR exit barrier
328 * @{
329 *
330 * ARM errata 838869, affects Cortex-M4, Cortex-M4F Store immediate overlapping
331 * exception return operation might vector to incorrect interrupt.
332 * For Cortex-M7, if core speed much faster than peripheral register write speed,
333 * the peripheral interrupt flags may be still set after exiting ISR, this results to
334 * the same error similar with errata 83869.
335 */
336 #if (defined __CORTEX_M) && ((__CORTEX_M == 4U) || (__CORTEX_M == 7U))
337 #define SDK_ISR_EXIT_BARRIER __DSB()
338 #else
339 #define SDK_ISR_EXIT_BARRIER
340 #endif
341
342 /* @} */
343
344 /*! @name Alignment variable definition macros */
345 /* @{ */
346 #if (defined(__ICCARM__))
347 /*
348 * Workaround to disable MISRA C message suppress warnings for IAR compiler.
349 * http:/ /supp.iar.com/Support/?note=24725
350 */
351 _Pragma("diag_suppress=Pm120")
352 #define SDK_PRAGMA(x) _Pragma(#x)
353 _Pragma("diag_error=Pm120")
354 /*! Macro to define a variable with alignbytes alignment */
355 #define SDK_ALIGN(var, alignbytes) SDK_PRAGMA(data_alignment = alignbytes) var
356 #elif defined(__CC_ARM) || defined(__ARMCC_VERSION)
357 /*! Macro to define a variable with alignbytes alignment */
358 #define SDK_ALIGN(var, alignbytes) __attribute__((aligned(alignbytes))) var
359 #elif defined(__GNUC__)
360 /*! Macro to define a variable with alignbytes alignment */
361 #define SDK_ALIGN(var, alignbytes) var __attribute__((aligned(alignbytes)))
362 #else
363 #error Toolchain not supported
364 #endif
365
366 /*! Macro to define a variable with L1 d-cache line size alignment */
367 #if defined(FSL_FEATURE_L1DCACHE_LINESIZE_BYTE)
368 #define SDK_L1DCACHE_ALIGN(var) SDK_ALIGN(var, FSL_FEATURE_L1DCACHE_LINESIZE_BYTE)
369 #endif
370 /*! Macro to define a variable with L2 cache line size alignment */
371 #if defined(FSL_FEATURE_L2CACHE_LINESIZE_BYTE)
372 #define SDK_L2CACHE_ALIGN(var) SDK_ALIGN(var, FSL_FEATURE_L2CACHE_LINESIZE_BYTE)
373 #endif
374
375 /*! Macro to change a value to a given size aligned value */
376 #define SDK_SIZEALIGN(var, alignbytes) \
377 ((unsigned int)((var) + ((alignbytes)-1U)) & (unsigned int)(~(unsigned int)((alignbytes)-1U)))
378 /* @} */
379
380 /*! @name Non-cacheable region definition macros */
381 /* For initialized non-zero non-cacheable variables, please using "AT_NONCACHEABLE_SECTION_INIT(var) ={xx};" or
382 * "AT_NONCACHEABLE_SECTION_ALIGN_INIT(var) ={xx};" in your projects to define them, for zero-inited non-cacheable
383 * variables, please using "AT_NONCACHEABLE_SECTION(var);" or "AT_NONCACHEABLE_SECTION_ALIGN(var);" to define them,
384 * these zero-inited variables will be initialized to zero in system startup.
385 */
386 /* @{ */
387
388 #if ((!(defined(FSL_FEATURE_HAS_NO_NONCACHEABLE_SECTION) && FSL_FEATURE_HAS_NO_NONCACHEABLE_SECTION)) && \
389 defined(FSL_FEATURE_L1ICACHE_LINESIZE_BYTE))
390
391 #if (defined(__ICCARM__))
392 #define AT_NONCACHEABLE_SECTION(var) var @"NonCacheable"
393 #define AT_NONCACHEABLE_SECTION_ALIGN(var, alignbytes) SDK_PRAGMA(data_alignment = alignbytes) var @"NonCacheable"
394 #define AT_NONCACHEABLE_SECTION_INIT(var) var @"NonCacheable.init"
395 #define AT_NONCACHEABLE_SECTION_ALIGN_INIT(var, alignbytes) \
396 SDK_PRAGMA(data_alignment = alignbytes) var @"NonCacheable.init"
397
398 #elif (defined(__CC_ARM) || defined(__ARMCC_VERSION))
399 #define AT_NONCACHEABLE_SECTION_INIT(var) __attribute__((section("NonCacheable.init"))) var
400 #define AT_NONCACHEABLE_SECTION_ALIGN_INIT(var, alignbytes) \
401 __attribute__((section("NonCacheable.init"))) __attribute__((aligned(alignbytes))) var
402 #if (defined(__CC_ARM))
403 #define AT_NONCACHEABLE_SECTION(var) __attribute__((section("NonCacheable"), zero_init)) var
404 #define AT_NONCACHEABLE_SECTION_ALIGN(var, alignbytes) \
405 __attribute__((section("NonCacheable"), zero_init)) __attribute__((aligned(alignbytes))) var
406 #else
407 #define AT_NONCACHEABLE_SECTION(var) __attribute__((section(".bss.NonCacheable"))) var
408 #define AT_NONCACHEABLE_SECTION_ALIGN(var, alignbytes) \
409 __attribute__((section(".bss.NonCacheable"))) __attribute__((aligned(alignbytes))) var
410 #endif
411
412 #elif (defined(__GNUC__))
413 #if defined(__ARM_ARCH_8A__) /* This macro is ARMv8-A specific */
414 #define __CS "//"
415 #else
416 #define __CS "@"
417 #endif
418
419 /* For GCC, when the non-cacheable section is required, please define "__STARTUP_INITIALIZE_NONCACHEDATA"
420 * in your projects to make sure the non-cacheable section variables will be initialized in system startup.
421 */
422 #define AT_NONCACHEABLE_SECTION_INIT(var) __attribute__((section("NonCacheable.init"))) var
423 #define AT_NONCACHEABLE_SECTION_ALIGN_INIT(var, alignbytes) \
424 __attribute__((section("NonCacheable.init"))) var __attribute__((aligned(alignbytes)))
425 #define AT_NONCACHEABLE_SECTION(var) __attribute__((section("NonCacheable,\"aw\",%nobits " __CS))) var
426 #define AT_NONCACHEABLE_SECTION_ALIGN(var, alignbytes) \
427 __attribute__((section("NonCacheable,\"aw\",%nobits " __CS))) var __attribute__((aligned(alignbytes)))
428 #else
429 #error Toolchain not supported.
430 #endif
431
432 #else
433
434 #define AT_NONCACHEABLE_SECTION(var) var
435 #define AT_NONCACHEABLE_SECTION_ALIGN(var, alignbytes) SDK_ALIGN(var, alignbytes)
436 #define AT_NONCACHEABLE_SECTION_INIT(var) var
437 #define AT_NONCACHEABLE_SECTION_ALIGN_INIT(var, alignbytes) SDK_ALIGN(var, alignbytes)
438
439 #endif
440
441 /* @} */
442
443 /*!
444 * @name Time sensitive region
445 * @{
446 */
447 #if (defined(__ICCARM__))
448 #define AT_QUICKACCESS_SECTION_CODE(func) func @"CodeQuickAccess"
449 #define AT_QUICKACCESS_SECTION_DATA(var) var @"DataQuickAccess"
450 #define AT_QUICKACCESS_SECTION_DATA_ALIGN(var, alignbytes) \
451 SDK_PRAGMA(data_alignment = alignbytes) var @"DataQuickAccess"
452 #elif (defined(__CC_ARM) || defined(__ARMCC_VERSION))
453 #define AT_QUICKACCESS_SECTION_CODE(func) __attribute__((section("CodeQuickAccess"), __noinline__)) func
454 #define AT_QUICKACCESS_SECTION_DATA(var) __attribute__((section("DataQuickAccess"))) var
455 #define AT_QUICKACCESS_SECTION_DATA_ALIGN(var, alignbytes) \
456 __attribute__((section("DataQuickAccess"))) __attribute__((aligned(alignbytes))) var
457 #elif (defined(__GNUC__))
458 #define AT_QUICKACCESS_SECTION_CODE(func) __attribute__((section("CodeQuickAccess"), __noinline__)) func
459 #define AT_QUICKACCESS_SECTION_DATA(var) __attribute__((section("DataQuickAccess"))) var
460 #define AT_QUICKACCESS_SECTION_DATA_ALIGN(var, alignbytes) \
461 __attribute__((section("DataQuickAccess"))) var __attribute__((aligned(alignbytes)))
462 #else
463 #error Toolchain not supported.
464 #endif /* defined(__ICCARM__) */
465
466 /*! @name Ram Function */
467 #if (defined(__ICCARM__))
468 #define RAMFUNCTION_SECTION_CODE(func) func @"RamFunction"
469 #elif (defined(__CC_ARM) || defined(__ARMCC_VERSION))
470 #define RAMFUNCTION_SECTION_CODE(func) __attribute__((section("RamFunction"))) func
471 #elif (defined(__GNUC__))
472 #define RAMFUNCTION_SECTION_CODE(func) __attribute__((section("RamFunction"))) func
473 #else
474 #error Toolchain not supported.
475 #endif /* defined(__ICCARM__) */
476 /*! @} */
477
478 /*!
479 * @def MSDK_REG_SECURE_ADDR(x)
480 * Convert the register address to the one used in secure mode.
481 *
482 * @def MSDK_REG_NONSECURE_ADDR(x)
483 * Convert the register address to the one used in non-secure mode.
484 */
485
486 #if (defined(__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE & 0x2))
487 #define MSDK_REG_SECURE_ADDR(x) ((uintptr_t)(x) | (0x1UL << 28))
488 #define MSDK_REG_NONSECURE_ADDR(x) ((uintptr_t)(x) & ~(0x1UL << 28))
489 #else
490 #define MSDK_REG_SECURE_ADDR(x) (x)
491 #define MSDK_REG_NONSECURE_ADDR(x) (x)
492 #endif
493
494 #if defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050)
495 void DefaultISR(void);
496 #endif
497
498 /*
499 * The fsl_clock.h is included here because it needs MAKE_VERSION/MAKE_STATUS/status_t
500 * defined in previous of this file.
501 */
502 #include "fsl_clock.h"
503
504 /*
505 * Chip level peripheral reset API, for MCUs that implement peripheral reset control external to a peripheral
506 */
507 #if ((defined(FSL_FEATURE_SOC_SYSCON_COUNT) && (FSL_FEATURE_SOC_SYSCON_COUNT > 0)) || \
508 (defined(FSL_FEATURE_SOC_ASYNC_SYSCON_COUNT) && (FSL_FEATURE_SOC_ASYNC_SYSCON_COUNT > 0)))
509 #include "fsl_reset.h"
510 #endif
511
512 /*******************************************************************************
513 * API
514 ******************************************************************************/
515
516 #if defined(__cplusplus)
517 extern "C" {
518 #endif /* __cplusplus*/
519
520 /*!
521 * @brief Enable specific interrupt.
522 *
523 * Enable LEVEL1 interrupt. For some devices, there might be multiple interrupt
524 * levels. For example, there are NVIC and intmux. Here the interrupts connected
525 * to NVIC are the LEVEL1 interrupts, because they are routed to the core directly.
526 * The interrupts connected to intmux are the LEVEL2 interrupts, they are routed
527 * to NVIC first then routed to core.
528 *
529 * This function only enables the LEVEL1 interrupts. The number of LEVEL1 interrupts
530 * is indicated by the feature macro FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS.
531 *
532 * @param interrupt The IRQ number.
533 * @retval kStatus_Success Interrupt enabled successfully
534 * @retval kStatus_Fail Failed to enable the interrupt
535 */
EnableIRQ(IRQn_Type interrupt)536 static inline status_t EnableIRQ(IRQn_Type interrupt)
537 {
538 status_t status = kStatus_Success;
539
540 if (NotAvail_IRQn == interrupt)
541 {
542 status = kStatus_Fail;
543 }
544
545 #if defined(FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS) && (FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS > 0)
546 else if ((int32_t)interrupt >= (int32_t)FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS)
547 {
548 status = kStatus_Fail;
549 }
550 #endif
551
552 else
553 {
554 #if defined(__GIC_PRIO_BITS)
555 GIC_EnableIRQ(interrupt);
556 #else
557 NVIC_EnableIRQ(interrupt);
558 #endif
559 }
560
561 return status;
562 }
563
564 /*!
565 * @brief Disable specific interrupt.
566 *
567 * Disable LEVEL1 interrupt. For some devices, there might be multiple interrupt
568 * levels. For example, there are NVIC and intmux. Here the interrupts connected
569 * to NVIC are the LEVEL1 interrupts, because they are routed to the core directly.
570 * The interrupts connected to intmux are the LEVEL2 interrupts, they are routed
571 * to NVIC first then routed to core.
572 *
573 * This function only disables the LEVEL1 interrupts. The number of LEVEL1 interrupts
574 * is indicated by the feature macro FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS.
575 *
576 * @param interrupt The IRQ number.
577 * @retval kStatus_Success Interrupt disabled successfully
578 * @retval kStatus_Fail Failed to disable the interrupt
579 */
DisableIRQ(IRQn_Type interrupt)580 static inline status_t DisableIRQ(IRQn_Type interrupt)
581 {
582 status_t status = kStatus_Success;
583
584 if (NotAvail_IRQn == interrupt)
585 {
586 status = kStatus_Fail;
587 }
588
589 #if defined(FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS) && (FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS > 0)
590 else if ((int32_t)interrupt >= (int32_t)FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS)
591 {
592 status = kStatus_Fail;
593 }
594 #endif
595
596 else
597 {
598 #if defined(__GIC_PRIO_BITS)
599 GIC_DisableIRQ(interrupt);
600 #else
601 NVIC_DisableIRQ(interrupt);
602 #endif
603 }
604
605 return status;
606 }
607
608 #if defined(__GIC_PRIO_BITS)
609 #define NVIC_SetPriority(irq, prio) do {} while(0)
610 #endif
611
612 /*!
613 * @brief Enable the IRQ, and also set the interrupt priority.
614 *
615 * Only handle LEVEL1 interrupt. For some devices, there might be multiple interrupt
616 * levels. For example, there are NVIC and intmux. Here the interrupts connected
617 * to NVIC are the LEVEL1 interrupts, because they are routed to the core directly.
618 * The interrupts connected to intmux are the LEVEL2 interrupts, they are routed
619 * to NVIC first then routed to core.
620 *
621 * This function only handles the LEVEL1 interrupts. The number of LEVEL1 interrupts
622 * is indicated by the feature macro FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS.
623 *
624 * @param interrupt The IRQ to Enable.
625 * @param priNum Priority number set to interrupt controller register.
626 * @retval kStatus_Success Interrupt priority set successfully
627 * @retval kStatus_Fail Failed to set the interrupt priority.
628 */
EnableIRQWithPriority(IRQn_Type interrupt,uint8_t priNum)629 static inline status_t EnableIRQWithPriority(IRQn_Type interrupt, uint8_t priNum)
630 {
631 status_t status = kStatus_Success;
632
633 if (NotAvail_IRQn == interrupt)
634 {
635 status = kStatus_Fail;
636 }
637
638 #if defined(FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS) && (FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS > 0)
639 else if ((int32_t)interrupt >= (int32_t)FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS)
640 {
641 status = kStatus_Fail;
642 }
643 #endif
644
645 else
646 {
647 #if defined(__GIC_PRIO_BITS)
648 GIC_SetPriority(interrupt, priNum);
649 GIC_EnableIRQ(interrupt);
650 #else
651 NVIC_SetPriority(interrupt, priNum);
652 NVIC_EnableIRQ(interrupt);
653 #endif
654 }
655
656 return status;
657 }
658
659 /*!
660 * @brief Set the IRQ priority.
661 *
662 * Only handle LEVEL1 interrupt. For some devices, there might be multiple interrupt
663 * levels. For example, there are NVIC and intmux. Here the interrupts connected
664 * to NVIC are the LEVEL1 interrupts, because they are routed to the core directly.
665 * The interrupts connected to intmux are the LEVEL2 interrupts, they are routed
666 * to NVIC first then routed to core.
667 *
668 * This function only handles the LEVEL1 interrupts. The number of LEVEL1 interrupts
669 * is indicated by the feature macro FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS.
670 *
671 * @param interrupt The IRQ to set.
672 * @param priNum Priority number set to interrupt controller register.
673 *
674 * @retval kStatus_Success Interrupt priority set successfully
675 * @retval kStatus_Fail Failed to set the interrupt priority.
676 */
IRQ_SetPriority(IRQn_Type interrupt,uint8_t priNum)677 static inline status_t IRQ_SetPriority(IRQn_Type interrupt, uint8_t priNum)
678 {
679 status_t status = kStatus_Success;
680
681 if (NotAvail_IRQn == interrupt)
682 {
683 status = kStatus_Fail;
684 }
685
686 #if defined(FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS) && (FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS > 0)
687 else if ((int32_t)interrupt >= (int32_t)FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS)
688 {
689 status = kStatus_Fail;
690 }
691 #endif
692
693 else
694 {
695 #if defined(__GIC_PRIO_BITS)
696 GIC_SetPriority(interrupt, priNum);
697 #else
698 NVIC_SetPriority(interrupt, priNum);
699 #endif
700 }
701
702 return status;
703 }
704
705 /*!
706 * @brief Clear the pending IRQ flag.
707 *
708 * Only handle LEVEL1 interrupt. For some devices, there might be multiple interrupt
709 * levels. For example, there are NVIC and intmux. Here the interrupts connected
710 * to NVIC are the LEVEL1 interrupts, because they are routed to the core directly.
711 * The interrupts connected to intmux are the LEVEL2 interrupts, they are routed
712 * to NVIC first then routed to core.
713 *
714 * This function only handles the LEVEL1 interrupts. The number of LEVEL1 interrupts
715 * is indicated by the feature macro FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS.
716 *
717 * @param interrupt The flag which IRQ to clear.
718 *
719 * @retval kStatus_Success Interrupt priority set successfully
720 * @retval kStatus_Fail Failed to set the interrupt priority.
721 */
IRQ_ClearPendingIRQ(IRQn_Type interrupt)722 static inline status_t IRQ_ClearPendingIRQ(IRQn_Type interrupt)
723 {
724 status_t status = kStatus_Success;
725
726 if (NotAvail_IRQn == interrupt)
727 {
728 status = kStatus_Fail;
729 }
730
731 #if defined(FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS) && (FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS > 0)
732 else if ((int32_t)interrupt >= (int32_t)FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS)
733 {
734 status = kStatus_Fail;
735 }
736 #endif
737
738 else
739 {
740 #if defined(__GIC_PRIO_BITS)
741 GIC_ClearPendingIRQ(interrupt);
742 #else
743 NVIC_ClearPendingIRQ(interrupt);
744 #endif
745 }
746
747 return status;
748 }
749
750 /*!
751 * @brief Disable the global IRQ
752 *
753 * Disable the global interrupt and return the current primask register. User is required to provided the primask
754 * register for the EnableGlobalIRQ().
755 *
756 * @return Current primask value.
757 */
DisableGlobalIRQ(void)758 static inline uint32_t DisableGlobalIRQ(void)
759 {
760 uint32_t mask;
761
762 #if defined(CPSR_I_Msk)
763 mask = __get_CPSR() & CPSR_I_Msk;
764 #elif defined(DAIF_I_BIT)
765 mask = __get_DAIF() & DAIF_I_BIT;
766 #else
767 mask = __get_PRIMASK();
768 #endif
769 __disable_irq();
770
771 return mask;
772 }
773
774 /*!
775 * @brief Enable the global IRQ
776 *
777 * Set the primask register with the provided primask value but not just enable the primask. The idea is for the
778 * convenience of integration of RTOS. some RTOS get its own management mechanism of primask. User is required to
779 * use the EnableGlobalIRQ() and DisableGlobalIRQ() in pair.
780 *
781 * @param primask value of primask register to be restored. The primask value is supposed to be provided by the
782 * DisableGlobalIRQ().
783 */
EnableGlobalIRQ(uint32_t primask)784 static inline void EnableGlobalIRQ(uint32_t primask)
785 {
786 #if defined(CPSR_I_Msk)
787 __set_CPSR((__get_CPSR() & ~CPSR_I_Msk) | primask);
788 #elif defined(DAIF_I_BIT)
789 if (0UL == primask)
790 {
791 __enable_irq();
792 }
793 #else
794 __set_PRIMASK(primask);
795 #endif
796 }
797
798 #if defined(ENABLE_RAM_VECTOR_TABLE)
799 /*!
800 * @brief install IRQ handler
801 *
802 * @param irq IRQ number
803 * @param irqHandler IRQ handler address
804 * @return The old IRQ handler address
805 */
806 uint32_t InstallIRQHandler(IRQn_Type irq, uint32_t irqHandler);
807 #endif /* ENABLE_RAM_VECTOR_TABLE. */
808
809 #if (defined(FSL_FEATURE_SOC_SYSCON_COUNT) && (FSL_FEATURE_SOC_SYSCON_COUNT > 0))
810
811 /*
812 * When FSL_FEATURE_POWERLIB_EXTEND is defined to non-zero value,
813 * powerlib should be used instead of these functions.
814 */
815 #if !(defined(FSL_FEATURE_POWERLIB_EXTEND) && (FSL_FEATURE_POWERLIB_EXTEND != 0))
816 /*!
817 * @brief Enable specific interrupt for wake-up from deep-sleep mode.
818 *
819 * Enable the interrupt for wake-up from deep sleep mode.
820 * Some interrupts are typically used in sleep mode only and will not occur during
821 * deep-sleep mode because relevant clocks are stopped. However, it is possible to enable
822 * those clocks (significantly increasing power consumption in the reduced power mode),
823 * making these wake-ups possible.
824 *
825 * @note This function also enables the interrupt in the NVIC (EnableIRQ() is called internaly).
826 *
827 * @param interrupt The IRQ number.
828 */
829 void EnableDeepSleepIRQ(IRQn_Type interrupt);
830
831 /*!
832 * @brief Disable specific interrupt for wake-up from deep-sleep mode.
833 *
834 * Disable the interrupt for wake-up from deep sleep mode.
835 * Some interrupts are typically used in sleep mode only and will not occur during
836 * deep-sleep mode because relevant clocks are stopped. However, it is possible to enable
837 * those clocks (significantly increasing power consumption in the reduced power mode),
838 * making these wake-ups possible.
839 *
840 * @note This function also disables the interrupt in the NVIC (DisableIRQ() is called internaly).
841 *
842 * @param interrupt The IRQ number.
843 */
844 void DisableDeepSleepIRQ(IRQn_Type interrupt);
845 #endif /* FSL_FEATURE_POWERLIB_EXTEND */
846 #endif /* FSL_FEATURE_SOC_SYSCON_COUNT */
847
848 #if defined(DWT)
849 /*!
850 * @brief Enable the counter to get CPU cycles.
851 */
852 void MSDK_EnableCpuCycleCounter(void);
853
854 /*!
855 * @brief Get the current CPU cycle count.
856 *
857 * @return Current CPU cycle count.
858 */
859 uint32_t MSDK_GetCpuCycleCount(void);
860 #endif
861
862 #if defined(__cplusplus)
863 }
864 #endif /* __cplusplus*/
865
866 /*! @} */
867
868 #endif /* FSL_COMMON_ARM_H_ */
869