1 /*
2 * Copyright (c) 2015-2016, Freescale Semiconductor, Inc.
3 * Copyright 2016-2022 NXP
4 * All rights reserved.
5 *
6 * SPDX-License-Identifier: BSD-3-Clause
7 */
8
9 #ifndef _FSL_COMMON_ARM_H_
10 #define _FSL_COMMON_ARM_H_
11
12 /*
13 * For CMSIS pack RTE.
14 * CMSIS pack RTE generates "RTC_Components.h" which contains the statements
15 * of the related <RTE_Components_h> element for all selected software components.
16 */
17 #ifdef _RTE_
18 #include "RTE_Components.h"
19 #endif
20
21 /*!
22 * @addtogroup ksdk_common
23 * @{
24 */
25
26 /*! @name Atomic modification
27 *
28 * These macros are used for atomic access, such as read-modify-write
29 * to the peripheral registers.
30 *
31 * - SDK_ATOMIC_LOCAL_ADD
32 * - SDK_ATOMIC_LOCAL_SET
33 * - SDK_ATOMIC_LOCAL_CLEAR
34 * - SDK_ATOMIC_LOCAL_TOGGLE
35 * - SDK_ATOMIC_LOCAL_CLEAR_AND_SET
36 *
37 * Take SDK_ATOMIC_LOCAL_CLEAR_AND_SET as an example: the parameter @c addr
38 * means the address of the peripheral register or variable you want to modify
39 * atomically, the parameter @c clearBits is the bits to clear, the parameter
40 * @c setBits it the bits to set.
41 * For example, to set a 32-bit register bit1:bit0 to 0b10, use like this:
42 *
43 * @code
44 volatile uint32_t * reg = (volatile uint32_t *)REG_ADDR;
45
46 SDK_ATOMIC_LOCAL_CLEAR_AND_SET(reg, 0x03, 0x02);
47 @endcode
48 *
49 * In this example, the register bit1:bit0 are cleared and bit1 is set, as a result,
50 * register bit1:bit0 = 0b10.
51 *
52 * @note For the platforms don't support exclusive load and store, these macros
53 * disable the global interrupt to pretect the modification.
54 *
55 * @note These macros only guarantee the local processor atomic operations. For
56 * the multi-processor devices, use hardware semaphore such as SEMA42 to
57 * guarantee exclusive access if necessary.
58 *
59 * @{
60 */
61
62 /* clang-format off */
63 #if ((defined(__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \
64 (defined(__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \
65 (defined(__ARM_ARCH_8M_MAIN__) && (__ARM_ARCH_8M_MAIN__ == 1)) || \
66 (defined(__ARM_ARCH_8M_BASE__) && (__ARM_ARCH_8M_BASE__ == 1)))
67 /* clang-format on */
68
69 /* If the LDREX and STREX are supported, use them. */
70 #define _SDK_ATOMIC_LOCAL_OPS_1BYTE(addr, val, ops) \
71 do \
72 { \
73 (val) = __LDREXB(addr); \
74 (ops); \
75 } while (0UL != __STREXB((val), (addr)))
76
77 #define _SDK_ATOMIC_LOCAL_OPS_2BYTE(addr, val, ops) \
78 do \
79 { \
80 (val) = __LDREXH(addr); \
81 (ops); \
82 } while (0UL != __STREXH((val), (addr)))
83
84 #define _SDK_ATOMIC_LOCAL_OPS_4BYTE(addr, val, ops) \
85 do \
86 { \
87 (val) = __LDREXW(addr); \
88 (ops); \
89 } while (0UL != __STREXW((val), (addr)))
90
_SDK_AtomicLocalAdd1Byte(volatile uint8_t * addr,uint8_t val)91 static inline void _SDK_AtomicLocalAdd1Byte(volatile uint8_t *addr, uint8_t val)
92 {
93 uint8_t s_val;
94
95 _SDK_ATOMIC_LOCAL_OPS_1BYTE(addr, s_val, s_val += val);
96 }
97
_SDK_AtomicLocalAdd2Byte(volatile uint16_t * addr,uint16_t val)98 static inline void _SDK_AtomicLocalAdd2Byte(volatile uint16_t *addr, uint16_t val)
99 {
100 uint16_t s_val;
101
102 _SDK_ATOMIC_LOCAL_OPS_2BYTE(addr, s_val, s_val += val);
103 }
104
_SDK_AtomicLocalAdd4Byte(volatile uint32_t * addr,uint32_t val)105 static inline void _SDK_AtomicLocalAdd4Byte(volatile uint32_t *addr, uint32_t val)
106 {
107 uint32_t s_val;
108
109 _SDK_ATOMIC_LOCAL_OPS_4BYTE(addr, s_val, s_val += val);
110 }
111
_SDK_AtomicLocalSub1Byte(volatile uint8_t * addr,uint8_t val)112 static inline void _SDK_AtomicLocalSub1Byte(volatile uint8_t *addr, uint8_t val)
113 {
114 uint8_t s_val;
115
116 _SDK_ATOMIC_LOCAL_OPS_1BYTE(addr, s_val, s_val -= val);
117 }
118
_SDK_AtomicLocalSub2Byte(volatile uint16_t * addr,uint16_t val)119 static inline void _SDK_AtomicLocalSub2Byte(volatile uint16_t *addr, uint16_t val)
120 {
121 uint16_t s_val;
122
123 _SDK_ATOMIC_LOCAL_OPS_2BYTE(addr, s_val, s_val -= val);
124 }
125
_SDK_AtomicLocalSub4Byte(volatile uint32_t * addr,uint32_t val)126 static inline void _SDK_AtomicLocalSub4Byte(volatile uint32_t *addr, uint32_t val)
127 {
128 uint32_t s_val;
129
130 _SDK_ATOMIC_LOCAL_OPS_4BYTE(addr, s_val, s_val -= val);
131 }
132
_SDK_AtomicLocalSet1Byte(volatile uint8_t * addr,uint8_t bits)133 static inline void _SDK_AtomicLocalSet1Byte(volatile uint8_t *addr, uint8_t bits)
134 {
135 uint8_t s_val;
136
137 _SDK_ATOMIC_LOCAL_OPS_1BYTE(addr, s_val, s_val |= bits);
138 }
139
_SDK_AtomicLocalSet2Byte(volatile uint16_t * addr,uint16_t bits)140 static inline void _SDK_AtomicLocalSet2Byte(volatile uint16_t *addr, uint16_t bits)
141 {
142 uint16_t s_val;
143
144 _SDK_ATOMIC_LOCAL_OPS_2BYTE(addr, s_val, s_val |= bits);
145 }
146
_SDK_AtomicLocalSet4Byte(volatile uint32_t * addr,uint32_t bits)147 static inline void _SDK_AtomicLocalSet4Byte(volatile uint32_t *addr, uint32_t bits)
148 {
149 uint32_t s_val;
150
151 _SDK_ATOMIC_LOCAL_OPS_4BYTE(addr, s_val, s_val |= bits);
152 }
153
_SDK_AtomicLocalClear1Byte(volatile uint8_t * addr,uint8_t bits)154 static inline void _SDK_AtomicLocalClear1Byte(volatile uint8_t *addr, uint8_t bits)
155 {
156 uint8_t s_val;
157
158 _SDK_ATOMIC_LOCAL_OPS_1BYTE(addr, s_val, s_val &= ~bits);
159 }
160
_SDK_AtomicLocalClear2Byte(volatile uint16_t * addr,uint16_t bits)161 static inline void _SDK_AtomicLocalClear2Byte(volatile uint16_t *addr, uint16_t bits)
162 {
163 uint16_t s_val;
164
165 _SDK_ATOMIC_LOCAL_OPS_2BYTE(addr, s_val, s_val &= ~bits);
166 }
167
_SDK_AtomicLocalClear4Byte(volatile uint32_t * addr,uint32_t bits)168 static inline void _SDK_AtomicLocalClear4Byte(volatile uint32_t *addr, uint32_t bits)
169 {
170 uint32_t s_val;
171
172 _SDK_ATOMIC_LOCAL_OPS_4BYTE(addr, s_val, s_val &= ~bits);
173 }
174
_SDK_AtomicLocalToggle1Byte(volatile uint8_t * addr,uint8_t bits)175 static inline void _SDK_AtomicLocalToggle1Byte(volatile uint8_t *addr, uint8_t bits)
176 {
177 uint8_t s_val;
178
179 _SDK_ATOMIC_LOCAL_OPS_1BYTE(addr, s_val, s_val ^= bits);
180 }
181
_SDK_AtomicLocalToggle2Byte(volatile uint16_t * addr,uint16_t bits)182 static inline void _SDK_AtomicLocalToggle2Byte(volatile uint16_t *addr, uint16_t bits)
183 {
184 uint16_t s_val;
185
186 _SDK_ATOMIC_LOCAL_OPS_2BYTE(addr, s_val, s_val ^= bits);
187 }
188
_SDK_AtomicLocalToggle4Byte(volatile uint32_t * addr,uint32_t bits)189 static inline void _SDK_AtomicLocalToggle4Byte(volatile uint32_t *addr, uint32_t bits)
190 {
191 uint32_t s_val;
192
193 _SDK_ATOMIC_LOCAL_OPS_4BYTE(addr, s_val, s_val ^= bits);
194 }
195
_SDK_AtomicLocalClearAndSet1Byte(volatile uint8_t * addr,uint8_t clearBits,uint8_t setBits)196 static inline void _SDK_AtomicLocalClearAndSet1Byte(volatile uint8_t *addr, uint8_t clearBits, uint8_t setBits)
197 {
198 uint8_t s_val;
199
200 _SDK_ATOMIC_LOCAL_OPS_1BYTE(addr, s_val, s_val = (s_val & ~clearBits) | setBits);
201 }
202
_SDK_AtomicLocalClearAndSet2Byte(volatile uint16_t * addr,uint16_t clearBits,uint16_t setBits)203 static inline void _SDK_AtomicLocalClearAndSet2Byte(volatile uint16_t *addr, uint16_t clearBits, uint16_t setBits)
204 {
205 uint16_t s_val;
206
207 _SDK_ATOMIC_LOCAL_OPS_2BYTE(addr, s_val, s_val = (s_val & ~clearBits) | setBits);
208 }
209
_SDK_AtomicLocalClearAndSet4Byte(volatile uint32_t * addr,uint32_t clearBits,uint32_t setBits)210 static inline void _SDK_AtomicLocalClearAndSet4Byte(volatile uint32_t *addr, uint32_t clearBits, uint32_t setBits)
211 {
212 uint32_t s_val;
213
214 _SDK_ATOMIC_LOCAL_OPS_4BYTE(addr, s_val, s_val = (s_val & ~clearBits) | setBits);
215 }
216
217 #define SDK_ATOMIC_LOCAL_ADD(addr, val) \
218 ((1UL == sizeof(*(addr))) ? \
219 _SDK_AtomicLocalAdd1Byte((volatile uint8_t *)(volatile void *)(addr), (uint8_t)(val)) : \
220 ((2UL == sizeof(*(addr))) ? _SDK_AtomicLocalAdd2Byte((volatile uint16_t *)(volatile void *)(addr), (uint16_t)(val)) : \
221 _SDK_AtomicLocalAdd4Byte((volatile uint32_t *)(volatile void *)(addr), (uint32_t)(val))))
222
223 #define SDK_ATOMIC_LOCAL_SET(addr, bits) \
224 ((1UL == sizeof(*(addr))) ? \
225 _SDK_AtomicLocalSet1Byte((volatile uint8_t *)(volatile void *)(addr), (uint8_t)(bits)) : \
226 ((2UL == sizeof(*(addr))) ? _SDK_AtomicLocalSet2Byte((volatile uint16_t *)(volatile void *)(addr), (uint16_t)(bits)) : \
227 _SDK_AtomicLocalSet4Byte((volatile uint32_t *)(volatile void *)(addr), (uint32_t)(bits))))
228
229 #define SDK_ATOMIC_LOCAL_CLEAR(addr, bits) \
230 ((1UL == sizeof(*(addr))) ? \
231 _SDK_AtomicLocalClear1Byte((volatile uint8_t *)(volatile void *)(addr), (uint8_t)(bits)) : \
232 ((2UL == sizeof(*(addr))) ? \
233 _SDK_AtomicLocalClear2Byte((volatile uint16_t *)(volatile void *)(addr), (uint16_t)(bits)) : \
234 _SDK_AtomicLocalClear4Byte((volatile uint32_t *)(volatile void *)(addr), (uint32_t)(bits))))
235
236 #define SDK_ATOMIC_LOCAL_TOGGLE(addr, bits) \
237 ((1UL == sizeof(*(addr))) ? \
238 _SDK_AtomicLocalToggle1Byte((volatile uint8_t *)(volatile void *)(addr), (uint8_t)(bits)) : \
239 ((2UL == sizeof(*(addr))) ? \
240 _SDK_AtomicLocalToggle2Byte((volatile uint16_t *)(volatile void *)(addr), (uint16_t)(bits)) : \
241 _SDK_AtomicLocalToggle4Byte((volatile uint32_t *)(volatile void *)(addr), (uint32_t)(bits))))
242
243 #define SDK_ATOMIC_LOCAL_CLEAR_AND_SET(addr, clearBits, setBits) \
244 ((1UL == sizeof(*(addr))) ? \
245 _SDK_AtomicLocalClearAndSet1Byte((volatile uint8_t *)(volatile void *)(addr), (uint8_t)(clearBits), (uint8_t)(setBits)) : \
246 ((2UL == sizeof(*(addr))) ? \
247 _SDK_AtomicLocalClearAndSet2Byte((volatile uint16_t *)(volatile void *)(addr), (uint16_t)(clearBits), (uint16_t)(setBits)) : \
248 _SDK_AtomicLocalClearAndSet4Byte((volatile uint32_t *)(volatile void *)(addr), (uint32_t)(clearBits), (uint32_t)(setBits))))
249 #else
250
251 #define SDK_ATOMIC_LOCAL_ADD(addr, val) \
252 do \
253 { \
254 uint32_t s_atomicOldInt; \
255 s_atomicOldInt = DisableGlobalIRQ(); \
256 *(addr) += (val); \
257 EnableGlobalIRQ(s_atomicOldInt); \
258 } while (0)
259
260 #define SDK_ATOMIC_LOCAL_SET(addr, bits) \
261 do \
262 { \
263 uint32_t s_atomicOldInt; \
264 s_atomicOldInt = DisableGlobalIRQ(); \
265 *(addr) |= (bits); \
266 EnableGlobalIRQ(s_atomicOldInt); \
267 } while (0)
268
269 #define SDK_ATOMIC_LOCAL_CLEAR(addr, bits) \
270 do \
271 { \
272 uint32_t s_atomicOldInt; \
273 s_atomicOldInt = DisableGlobalIRQ(); \
274 *(addr) &= ~(bits); \
275 EnableGlobalIRQ(s_atomicOldInt); \
276 } while (0)
277
278 #define SDK_ATOMIC_LOCAL_TOGGLE(addr, bits) \
279 do \
280 { \
281 uint32_t s_atomicOldInt; \
282 s_atomicOldInt = DisableGlobalIRQ(); \
283 *(addr) ^= (bits); \
284 EnableGlobalIRQ(s_atomicOldInt); \
285 } while (0)
286
287 #define SDK_ATOMIC_LOCAL_CLEAR_AND_SET(addr, clearBits, setBits) \
288 do \
289 { \
290 uint32_t s_atomicOldInt; \
291 s_atomicOldInt = DisableGlobalIRQ(); \
292 *(addr) = (*(addr) & ~(clearBits)) | (setBits); \
293 EnableGlobalIRQ(s_atomicOldInt); \
294 } while (0)
295
296 #endif
297 /* @} */
298
299 /*! @name Timer utilities */
300 /* @{ */
301 /*! Macro to convert a microsecond period to raw count value */
302 #define USEC_TO_COUNT(us, clockFreqInHz) (uint64_t)(((uint64_t)(us) * (clockFreqInHz)) / 1000000U)
303 /*! Macro to convert a raw count value to microsecond */
304 #define COUNT_TO_USEC(count, clockFreqInHz) (uint64_t)((uint64_t)(count)*1000000U / (clockFreqInHz))
305
306 /*! Macro to convert a millisecond period to raw count value */
307 #define MSEC_TO_COUNT(ms, clockFreqInHz) (uint64_t)((uint64_t)(ms) * (clockFreqInHz) / 1000U)
308 /*! Macro to convert a raw count value to millisecond */
309 #define COUNT_TO_MSEC(count, clockFreqInHz) (uint64_t)((uint64_t)(count)*1000U / (clockFreqInHz))
310 /* @} */
311
312 /*! @name ISR exit barrier
313 * @{
314 *
315 * ARM errata 838869, affects Cortex-M4, Cortex-M4F Store immediate overlapping
316 * exception return operation might vector to incorrect interrupt.
317 * For Cortex-M7, if core speed much faster than peripheral register write speed,
318 * the peripheral interrupt flags may be still set after exiting ISR, this results to
319 * the same error similar with errata 83869.
320 */
321 #if (defined __CORTEX_M) && ((__CORTEX_M == 4U) || (__CORTEX_M == 7U))
322 #define SDK_ISR_EXIT_BARRIER __DSB()
323 #else
324 #define SDK_ISR_EXIT_BARRIER
325 #endif
326
327 /* @} */
328
329 /*! @name Alignment variable definition macros */
330 /* @{ */
331 #if (defined(__ICCARM__))
332 /*
333 * Workaround to disable MISRA C message suppress warnings for IAR compiler.
334 * http:/ /supp.iar.com/Support/?note=24725
335 */
336 _Pragma("diag_suppress=Pm120")
337 #define SDK_PRAGMA(x) _Pragma(#x)
338 _Pragma("diag_error=Pm120")
339 /*! Macro to define a variable with alignbytes alignment */
340 #define SDK_ALIGN(var, alignbytes) SDK_PRAGMA(data_alignment = alignbytes) var
341 #elif defined(__CC_ARM) || defined(__ARMCC_VERSION)
342 /*! Macro to define a variable with alignbytes alignment */
343 #define SDK_ALIGN(var, alignbytes) __attribute__((aligned(alignbytes))) var
344 #elif defined(__GNUC__)
345 /*! Macro to define a variable with alignbytes alignment */
346 #define SDK_ALIGN(var, alignbytes) var __attribute__((aligned(alignbytes)))
347 #else
348 #error Toolchain not supported
349 #endif
350
351 /*! Macro to define a variable with L1 d-cache line size alignment */
352 #if defined(FSL_FEATURE_L1DCACHE_LINESIZE_BYTE)
353 #define SDK_L1DCACHE_ALIGN(var) SDK_ALIGN(var, FSL_FEATURE_L1DCACHE_LINESIZE_BYTE)
354 #endif
355 /*! Macro to define a variable with L2 cache line size alignment */
356 #if defined(FSL_FEATURE_L2CACHE_LINESIZE_BYTE)
357 #define SDK_L2CACHE_ALIGN(var) SDK_ALIGN(var, FSL_FEATURE_L2CACHE_LINESIZE_BYTE)
358 #endif
359
360 /*! Macro to change a value to a given size aligned value */
361 #define SDK_SIZEALIGN(var, alignbytes) \
362 ((unsigned int)((var) + ((alignbytes)-1U)) & (unsigned int)(~(unsigned int)((alignbytes)-1U)))
363 /* @} */
364
365 /*! @name Non-cacheable region definition macros */
366 /* For initialized non-zero non-cacheable variables, please using "AT_NONCACHEABLE_SECTION_INIT(var) ={xx};" or
367 * "AT_NONCACHEABLE_SECTION_ALIGN_INIT(var) ={xx};" in your projects to define them, for zero-inited non-cacheable
368 * variables, please using "AT_NONCACHEABLE_SECTION(var);" or "AT_NONCACHEABLE_SECTION_ALIGN(var);" to define them,
369 * these zero-inited variables will be initialized to zero in system startup.
370 */
371 /* @{ */
372
373 #if ((!(defined(FSL_FEATURE_HAS_NO_NONCACHEABLE_SECTION) && FSL_FEATURE_HAS_NO_NONCACHEABLE_SECTION)) && \
374 defined(FSL_FEATURE_L1ICACHE_LINESIZE_BYTE))
375
376 #if (defined(__ICCARM__))
377 #define AT_NONCACHEABLE_SECTION(var) var @"NonCacheable"
378 #define AT_NONCACHEABLE_SECTION_ALIGN(var, alignbytes) SDK_PRAGMA(data_alignment = alignbytes) var @"NonCacheable"
379 #define AT_NONCACHEABLE_SECTION_INIT(var) var @"NonCacheable.init"
380 #define AT_NONCACHEABLE_SECTION_ALIGN_INIT(var, alignbytes) \
381 SDK_PRAGMA(data_alignment = alignbytes) var @"NonCacheable.init"
382
383 #elif (defined(__CC_ARM) || defined(__ARMCC_VERSION))
384 #define AT_NONCACHEABLE_SECTION_INIT(var) __attribute__((section("NonCacheable.init"))) var
385 #define AT_NONCACHEABLE_SECTION_ALIGN_INIT(var, alignbytes) \
386 __attribute__((section("NonCacheable.init"))) __attribute__((aligned(alignbytes))) var
387 #if (defined(__CC_ARM))
388 #define AT_NONCACHEABLE_SECTION(var) __attribute__((section("NonCacheable"), zero_init)) var
389 #define AT_NONCACHEABLE_SECTION_ALIGN(var, alignbytes) \
390 __attribute__((section("NonCacheable"), zero_init)) __attribute__((aligned(alignbytes))) var
391 #else
392 #define AT_NONCACHEABLE_SECTION(var) __attribute__((section(".bss.NonCacheable"))) var
393 #define AT_NONCACHEABLE_SECTION_ALIGN(var, alignbytes) \
394 __attribute__((section(".bss.NonCacheable"))) __attribute__((aligned(alignbytes))) var
395 #endif
396
397 #elif (defined(__GNUC__))
398 /* For GCC, when the non-cacheable section is required, please define "__STARTUP_INITIALIZE_NONCACHEDATA"
399 * in your projects to make sure the non-cacheable section variables will be initialized in system startup.
400 */
401 #define AT_NONCACHEABLE_SECTION_INIT(var) __attribute__((section("NonCacheable.init"))) var
402 #define AT_NONCACHEABLE_SECTION_ALIGN_INIT(var, alignbytes) \
403 __attribute__((section("NonCacheable.init"))) var __attribute__((aligned(alignbytes)))
404 #define AT_NONCACHEABLE_SECTION(var) __attribute__((section("NonCacheable,\"aw\",%nobits @"))) var
405 #define AT_NONCACHEABLE_SECTION_ALIGN(var, alignbytes) \
406 __attribute__((section("NonCacheable,\"aw\",%nobits @"))) var __attribute__((aligned(alignbytes)))
407 #else
408 #error Toolchain not supported.
409 #endif
410
411 #else
412
413 #define AT_NONCACHEABLE_SECTION(var) var
414 #define AT_NONCACHEABLE_SECTION_ALIGN(var, alignbytes) SDK_ALIGN(var, alignbytes)
415 #define AT_NONCACHEABLE_SECTION_INIT(var) var
416 #define AT_NONCACHEABLE_SECTION_ALIGN_INIT(var, alignbytes) SDK_ALIGN(var, alignbytes)
417
418 #endif
419
420 /* @} */
421
422 /*!
423 * @name Time sensitive region
424 * @{
425 */
426 #if (defined(__ICCARM__))
427 #define AT_QUICKACCESS_SECTION_CODE(func) func @"CodeQuickAccess"
428 #define AT_QUICKACCESS_SECTION_DATA(var) var @"DataQuickAccess"
429 #define AT_QUICKACCESS_SECTION_DATA_ALIGN(var, alignbytes) \
430 SDK_PRAGMA(data_alignment = alignbytes) var @"DataQuickAccess"
431 #elif (defined(__CC_ARM) || defined(__ARMCC_VERSION))
432 #define AT_QUICKACCESS_SECTION_CODE(func) __attribute__((section("CodeQuickAccess"), __noinline__)) func
433 #define AT_QUICKACCESS_SECTION_DATA(var) __attribute__((section("DataQuickAccess"))) var
434 #define AT_QUICKACCESS_SECTION_DATA_ALIGN(var, alignbytes) \
435 __attribute__((section("DataQuickAccess"))) __attribute__((aligned(alignbytes))) var
436 #elif (defined(__GNUC__))
437 #define AT_QUICKACCESS_SECTION_CODE(func) __attribute__((section("CodeQuickAccess"), __noinline__)) func
438 #define AT_QUICKACCESS_SECTION_DATA(var) __attribute__((section("DataQuickAccess"))) var
439 #define AT_QUICKACCESS_SECTION_DATA_ALIGN(var, alignbytes) \
440 __attribute__((section("DataQuickAccess"))) var __attribute__((aligned(alignbytes)))
441 #else
442 #error Toolchain not supported.
443 #endif /* defined(__ICCARM__) */
444
445 /*! @name Ram Function */
446 #if (defined(__ICCARM__))
447 #define RAMFUNCTION_SECTION_CODE(func) func @"RamFunction"
448 #elif (defined(__CC_ARM) || defined(__ARMCC_VERSION))
449 #define RAMFUNCTION_SECTION_CODE(func) __attribute__((section("RamFunction"))) func
450 #elif (defined(__GNUC__))
451 #define RAMFUNCTION_SECTION_CODE(func) __attribute__((section("RamFunction"))) func
452 #else
453 #error Toolchain not supported.
454 #endif /* defined(__ICCARM__) */
455 /* @} */
456
457 #if defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050)
458 void DefaultISR(void);
459 #endif
460
461 /*
462 * The fsl_clock.h is included here because it needs MAKE_VERSION/MAKE_STATUS/status_t
463 * defined in previous of this file.
464 */
465 #include "fsl_clock.h"
466
467 /*
468 * Chip level peripheral reset API, for MCUs that implement peripheral reset control external to a peripheral
469 */
470 #if ((defined(FSL_FEATURE_SOC_SYSCON_COUNT) && (FSL_FEATURE_SOC_SYSCON_COUNT > 0)) || \
471 (defined(FSL_FEATURE_SOC_ASYNC_SYSCON_COUNT) && (FSL_FEATURE_SOC_ASYNC_SYSCON_COUNT > 0)))
472 #include "fsl_reset.h"
473 #endif
474
475 /*******************************************************************************
476 * API
477 ******************************************************************************/
478
479 #if defined(__cplusplus)
480 extern "C" {
481 #endif /* __cplusplus*/
482
483 /*!
484 * @brief Enable specific interrupt.
485 *
486 * Enable LEVEL1 interrupt. For some devices, there might be multiple interrupt
487 * levels. For example, there are NVIC and intmux. Here the interrupts connected
488 * to NVIC are the LEVEL1 interrupts, because they are routed to the core directly.
489 * The interrupts connected to intmux are the LEVEL2 interrupts, they are routed
490 * to NVIC first then routed to core.
491 *
492 * This function only enables the LEVEL1 interrupts. The number of LEVEL1 interrupts
493 * is indicated by the feature macro FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS.
494 *
495 * @param interrupt The IRQ number.
496 * @retval kStatus_Success Interrupt enabled successfully
497 * @retval kStatus_Fail Failed to enable the interrupt
498 */
EnableIRQ(IRQn_Type interrupt)499 static inline status_t EnableIRQ(IRQn_Type interrupt)
500 {
501 status_t status = kStatus_Success;
502
503 if (NotAvail_IRQn == interrupt)
504 {
505 status = kStatus_Fail;
506 }
507
508 #if defined(FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS) && (FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS > 0)
509 else if ((int32_t)interrupt >= (int32_t)FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS)
510 {
511 status = kStatus_Fail;
512 }
513 #endif
514
515 else
516 {
517 #if defined(__GIC_PRIO_BITS)
518 GIC_EnableIRQ(interrupt);
519 #else
520 NVIC_EnableIRQ(interrupt);
521 #endif
522 }
523
524 return status;
525 }
526
527 /*!
528 * @brief Disable specific interrupt.
529 *
530 * Disable LEVEL1 interrupt. For some devices, there might be multiple interrupt
531 * levels. For example, there are NVIC and intmux. Here the interrupts connected
532 * to NVIC are the LEVEL1 interrupts, because they are routed to the core directly.
533 * The interrupts connected to intmux are the LEVEL2 interrupts, they are routed
534 * to NVIC first then routed to core.
535 *
536 * This function only disables the LEVEL1 interrupts. The number of LEVEL1 interrupts
537 * is indicated by the feature macro FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS.
538 *
539 * @param interrupt The IRQ number.
540 * @retval kStatus_Success Interrupt disabled successfully
541 * @retval kStatus_Fail Failed to disable the interrupt
542 */
DisableIRQ(IRQn_Type interrupt)543 static inline status_t DisableIRQ(IRQn_Type interrupt)
544 {
545 status_t status = kStatus_Success;
546
547 if (NotAvail_IRQn == interrupt)
548 {
549 status = kStatus_Fail;
550 }
551
552 #if defined(FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS) && (FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS > 0)
553 else if ((int32_t)interrupt >= (int32_t)FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS)
554 {
555 status = kStatus_Fail;
556 }
557 #endif
558
559 else
560 {
561 #if defined(__GIC_PRIO_BITS)
562 GIC_DisableIRQ(interrupt);
563 #else
564 NVIC_DisableIRQ(interrupt);
565 #endif
566 }
567
568 return status;
569 }
570
571 /*!
572 * @brief Enable the IRQ, and also set the interrupt priority.
573 *
574 * Only handle LEVEL1 interrupt. For some devices, there might be multiple interrupt
575 * levels. For example, there are NVIC and intmux. Here the interrupts connected
576 * to NVIC are the LEVEL1 interrupts, because they are routed to the core directly.
577 * The interrupts connected to intmux are the LEVEL2 interrupts, they are routed
578 * to NVIC first then routed to core.
579 *
580 * This function only handles the LEVEL1 interrupts. The number of LEVEL1 interrupts
581 * is indicated by the feature macro FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS.
582 *
583 * @param interrupt The IRQ to Enable.
584 * @param priNum Priority number set to interrupt controller register.
585 * @retval kStatus_Success Interrupt priority set successfully
586 * @retval kStatus_Fail Failed to set the interrupt priority.
587 */
EnableIRQWithPriority(IRQn_Type interrupt,uint8_t priNum)588 static inline status_t EnableIRQWithPriority(IRQn_Type interrupt, uint8_t priNum)
589 {
590 status_t status = kStatus_Success;
591
592 if (NotAvail_IRQn == interrupt)
593 {
594 status = kStatus_Fail;
595 }
596
597 #if defined(FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS) && (FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS > 0)
598 else if ((int32_t)interrupt >= (int32_t)FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS)
599 {
600 status = kStatus_Fail;
601 }
602 #endif
603
604 else
605 {
606 #if defined(__GIC_PRIO_BITS)
607 GIC_SetPriority(interrupt, priNum);
608 GIC_EnableIRQ(interrupt);
609 #else
610 NVIC_SetPriority(interrupt, priNum);
611 NVIC_EnableIRQ(interrupt);
612 #endif
613 }
614
615 return status;
616 }
617
618 /*!
619 * @brief Set the IRQ priority.
620 *
621 * Only handle LEVEL1 interrupt. For some devices, there might be multiple interrupt
622 * levels. For example, there are NVIC and intmux. Here the interrupts connected
623 * to NVIC are the LEVEL1 interrupts, because they are routed to the core directly.
624 * The interrupts connected to intmux are the LEVEL2 interrupts, they are routed
625 * to NVIC first then routed to core.
626 *
627 * This function only handles the LEVEL1 interrupts. The number of LEVEL1 interrupts
628 * is indicated by the feature macro FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS.
629 *
630 * @param interrupt The IRQ to set.
631 * @param priNum Priority number set to interrupt controller register.
632 *
633 * @retval kStatus_Success Interrupt priority set successfully
634 * @retval kStatus_Fail Failed to set the interrupt priority.
635 */
IRQ_SetPriority(IRQn_Type interrupt,uint8_t priNum)636 static inline status_t IRQ_SetPriority(IRQn_Type interrupt, uint8_t priNum)
637 {
638 status_t status = kStatus_Success;
639
640 if (NotAvail_IRQn == interrupt)
641 {
642 status = kStatus_Fail;
643 }
644
645 #if defined(FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS) && (FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS > 0)
646 else if ((int32_t)interrupt >= (int32_t)FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS)
647 {
648 status = kStatus_Fail;
649 }
650 #endif
651
652 else
653 {
654 #if defined(__GIC_PRIO_BITS)
655 GIC_SetPriority(interrupt, priNum);
656 #else
657 NVIC_SetPriority(interrupt, priNum);
658 #endif
659 }
660
661 return status;
662 }
663
664 /*!
665 * @brief Clear the pending IRQ flag.
666 *
667 * Only handle LEVEL1 interrupt. For some devices, there might be multiple interrupt
668 * levels. For example, there are NVIC and intmux. Here the interrupts connected
669 * to NVIC are the LEVEL1 interrupts, because they are routed to the core directly.
670 * The interrupts connected to intmux are the LEVEL2 interrupts, they are routed
671 * to NVIC first then routed to core.
672 *
673 * This function only handles the LEVEL1 interrupts. The number of LEVEL1 interrupts
674 * is indicated by the feature macro FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS.
675 *
676 * @param interrupt The flag which IRQ to clear.
677 *
678 * @retval kStatus_Success Interrupt priority set successfully
679 * @retval kStatus_Fail Failed to set the interrupt priority.
680 */
IRQ_ClearPendingIRQ(IRQn_Type interrupt)681 static inline status_t IRQ_ClearPendingIRQ(IRQn_Type interrupt)
682 {
683 status_t status = kStatus_Success;
684
685 if (NotAvail_IRQn == interrupt)
686 {
687 status = kStatus_Fail;
688 }
689
690 #if defined(FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS) && (FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS > 0)
691 else if ((int32_t)interrupt >= (int32_t)FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS)
692 {
693 status = kStatus_Fail;
694 }
695 #endif
696
697 else
698 {
699 #if defined(__GIC_PRIO_BITS)
700 GIC_ClearPendingIRQ(interrupt);
701 #else
702 NVIC_ClearPendingIRQ(interrupt);
703 #endif
704 }
705
706 return status;
707 }
708
709 /*!
710 * @brief Disable the global IRQ
711 *
712 * Disable the global interrupt and return the current primask register. User is required to provided the primask
713 * register for the EnableGlobalIRQ().
714 *
715 * @return Current primask value.
716 */
DisableGlobalIRQ(void)717 static inline uint32_t DisableGlobalIRQ(void)
718 {
719 uint32_t mask;
720
721 #if defined(CPSR_I_Msk)
722 mask = __get_CPSR() & CPSR_I_Msk;
723 #elif defined(DAIF_I_BIT)
724 mask = __get_DAIF() & DAIF_I_BIT;
725 #else
726 mask = __get_PRIMASK();
727 #endif
728 __disable_irq();
729
730 return mask;
731 }
732
733 /*!
734 * @brief Enable the global IRQ
735 *
736 * Set the primask register with the provided primask value but not just enable the primask. The idea is for the
737 * convenience of integration of RTOS. some RTOS get its own management mechanism of primask. User is required to
738 * use the EnableGlobalIRQ() and DisableGlobalIRQ() in pair.
739 *
740 * @param primask value of primask register to be restored. The primask value is supposed to be provided by the
741 * DisableGlobalIRQ().
742 */
EnableGlobalIRQ(uint32_t primask)743 static inline void EnableGlobalIRQ(uint32_t primask)
744 {
745 #if defined(CPSR_I_Msk)
746 __set_CPSR((__get_CPSR() & ~CPSR_I_Msk) | primask);
747 #elif defined(DAIF_I_BIT)
748 if (0UL == primask)
749 {
750 __enable_irq();
751 }
752 #else
753 __set_PRIMASK(primask);
754 #endif
755 }
756
757 #if defined(ENABLE_RAM_VECTOR_TABLE)
758 /*!
759 * @brief install IRQ handler
760 *
761 * @param irq IRQ number
762 * @param irqHandler IRQ handler address
763 * @return The old IRQ handler address
764 */
765 uint32_t InstallIRQHandler(IRQn_Type irq, uint32_t irqHandler);
766 #endif /* ENABLE_RAM_VECTOR_TABLE. */
767
768 #if (defined(FSL_FEATURE_SOC_SYSCON_COUNT) && (FSL_FEATURE_SOC_SYSCON_COUNT > 0))
769
770 /*
771 * When FSL_FEATURE_POWERLIB_EXTEND is defined to non-zero value,
772 * powerlib should be used instead of these functions.
773 */
774 #if !(defined(FSL_FEATURE_POWERLIB_EXTEND) && (FSL_FEATURE_POWERLIB_EXTEND != 0))
775 /*!
776 * @brief Enable specific interrupt for wake-up from deep-sleep mode.
777 *
778 * Enable the interrupt for wake-up from deep sleep mode.
779 * Some interrupts are typically used in sleep mode only and will not occur during
780 * deep-sleep mode because relevant clocks are stopped. However, it is possible to enable
781 * those clocks (significantly increasing power consumption in the reduced power mode),
782 * making these wake-ups possible.
783 *
784 * @note This function also enables the interrupt in the NVIC (EnableIRQ() is called internaly).
785 *
786 * @param interrupt The IRQ number.
787 */
788 void EnableDeepSleepIRQ(IRQn_Type interrupt);
789
790 /*!
791 * @brief Disable specific interrupt for wake-up from deep-sleep mode.
792 *
793 * Disable the interrupt for wake-up from deep sleep mode.
794 * Some interrupts are typically used in sleep mode only and will not occur during
795 * deep-sleep mode because relevant clocks are stopped. However, it is possible to enable
796 * those clocks (significantly increasing power consumption in the reduced power mode),
797 * making these wake-ups possible.
798 *
799 * @note This function also disables the interrupt in the NVIC (DisableIRQ() is called internaly).
800 *
801 * @param interrupt The IRQ number.
802 */
803 void DisableDeepSleepIRQ(IRQn_Type interrupt);
804 #endif /* FSL_FEATURE_POWERLIB_EXTEND */
805 #endif /* FSL_FEATURE_SOC_SYSCON_COUNT */
806
807 #if defined(DWT)
808 /*!
809 * @brief Enable the counter to get CPU cycles.
810 */
811 void MSDK_EnableCpuCycleCounter(void);
812
813 /*!
814 * @brief Get the current CPU cycle count.
815 *
816 * @return Current CPU cycle count.
817 */
818 uint32_t MSDK_GetCpuCycleCount(void);
819 #endif
820
821 #if defined(__cplusplus)
822 }
823 #endif /* __cplusplus*/
824
825 /*! @} */
826
827 #endif /* _FSL_COMMON_ARM_H_ */
828