1 /*
2 * Copyright (c) 2015-2016, Freescale Semiconductor, Inc.
3 * Copyright 2016-2021 NXP
4 * All rights reserved.
5 *
6 * SPDX-License-Identifier: BSD-3-Clause
7 */
8
9 #ifndef _FSL_COMMON_ARM_H_
10 #define _FSL_COMMON_ARM_H_
11
12 /*
13 * For CMSIS pack RTE.
14 * CMSIS pack RTE generates "RTC_Components.h" which contains the statements
15 * of the related <RTE_Components_h> element for all selected software components.
16 */
17 #ifdef _RTE_
18 #include "RTE_Components.h"
19 #endif
20
21 /*!
22 * @addtogroup ksdk_common
23 * @{
24 */
25
26 /*! @name Atomic modification
27 *
28 * These macros are used for atomic access, such as read-modify-write
29 * to the peripheral registers.
30 *
31 * - SDK_ATOMIC_LOCAL_ADD
32 * - SDK_ATOMIC_LOCAL_SET
33 * - SDK_ATOMIC_LOCAL_CLEAR
34 * - SDK_ATOMIC_LOCAL_TOGGLE
35 * - SDK_ATOMIC_LOCAL_CLEAR_AND_SET
36 *
37 * Take SDK_ATOMIC_LOCAL_CLEAR_AND_SET as an example: the parameter @c addr
38 * means the address of the peripheral register or variable you want to modify
39 * atomically, the parameter @c clearBits is the bits to clear, the parameter
40 * @c setBits it the bits to set.
41 * For example, to set a 32-bit register bit1:bit0 to 0b10, use like this:
42 *
43 * @code
44 volatile uint32_t * reg = (volatile uint32_t *)REG_ADDR;
45
46 SDK_ATOMIC_LOCAL_CLEAR_AND_SET(reg, 0x03, 0x02);
47 @endcode
48 *
49 * In this example, the register bit1:bit0 are cleared and bit1 is set, as a result,
50 * register bit1:bit0 = 0b10.
51 *
52 * @note For the platforms don't support exclusive load and store, these macros
53 * disable the global interrupt to pretect the modification.
54 *
55 * @note These macros only guarantee the local processor atomic operations. For
56 * the multi-processor devices, use hardware semaphore such as SEMA42 to
57 * guarantee exclusive access if necessary.
58 *
59 * @{
60 */
61
62 /* clang-format off */
63 #if ((defined(__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \
64 (defined(__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \
65 (defined(__ARM_ARCH_8M_MAIN__) && (__ARM_ARCH_8M_MAIN__ == 1)) || \
66 (defined(__ARM_ARCH_8M_BASE__) && (__ARM_ARCH_8M_BASE__ == 1)))
67 /* clang-format on */
68
69 /* If the LDREX and STREX are supported, use them. */
70 #define _SDK_ATOMIC_LOCAL_OPS_1BYTE(addr, val, ops) \
71 do \
72 { \
73 (val) = __LDREXB(addr); \
74 (ops); \
75 } while (0UL != __STREXB((val), (addr)))
76
77 #define _SDK_ATOMIC_LOCAL_OPS_2BYTE(addr, val, ops) \
78 do \
79 { \
80 (val) = __LDREXH(addr); \
81 (ops); \
82 } while (0UL != __STREXH((val), (addr)))
83
84 #define _SDK_ATOMIC_LOCAL_OPS_4BYTE(addr, val, ops) \
85 do \
86 { \
87 (val) = __LDREXW(addr); \
88 (ops); \
89 } while (0UL != __STREXW((val), (addr)))
90
_SDK_AtomicLocalAdd1Byte(volatile uint8_t * addr,uint8_t val)91 static inline void _SDK_AtomicLocalAdd1Byte(volatile uint8_t *addr, uint8_t val)
92 {
93 uint8_t s_val;
94
95 _SDK_ATOMIC_LOCAL_OPS_1BYTE(addr, s_val, s_val += val);
96 }
97
_SDK_AtomicLocalAdd2Byte(volatile uint16_t * addr,uint16_t val)98 static inline void _SDK_AtomicLocalAdd2Byte(volatile uint16_t *addr, uint16_t val)
99 {
100 uint16_t s_val;
101
102 _SDK_ATOMIC_LOCAL_OPS_2BYTE(addr, s_val, s_val += val);
103 }
104
_SDK_AtomicLocalAdd4Byte(volatile uint32_t * addr,uint32_t val)105 static inline void _SDK_AtomicLocalAdd4Byte(volatile uint32_t *addr, uint32_t val)
106 {
107 uint32_t s_val;
108
109 _SDK_ATOMIC_LOCAL_OPS_4BYTE(addr, s_val, s_val += val);
110 }
111
_SDK_AtomicLocalSub1Byte(volatile uint8_t * addr,uint8_t val)112 static inline void _SDK_AtomicLocalSub1Byte(volatile uint8_t *addr, uint8_t val)
113 {
114 uint8_t s_val;
115
116 _SDK_ATOMIC_LOCAL_OPS_1BYTE(addr, s_val, s_val -= val);
117 }
118
_SDK_AtomicLocalSub2Byte(volatile uint16_t * addr,uint16_t val)119 static inline void _SDK_AtomicLocalSub2Byte(volatile uint16_t *addr, uint16_t val)
120 {
121 uint16_t s_val;
122
123 _SDK_ATOMIC_LOCAL_OPS_2BYTE(addr, s_val, s_val -= val);
124 }
125
_SDK_AtomicLocalSub4Byte(volatile uint32_t * addr,uint32_t val)126 static inline void _SDK_AtomicLocalSub4Byte(volatile uint32_t *addr, uint32_t val)
127 {
128 uint32_t s_val;
129
130 _SDK_ATOMIC_LOCAL_OPS_4BYTE(addr, s_val, s_val -= val);
131 }
132
_SDK_AtomicLocalSet1Byte(volatile uint8_t * addr,uint8_t bits)133 static inline void _SDK_AtomicLocalSet1Byte(volatile uint8_t *addr, uint8_t bits)
134 {
135 uint8_t s_val;
136
137 _SDK_ATOMIC_LOCAL_OPS_1BYTE(addr, s_val, s_val |= bits);
138 }
139
_SDK_AtomicLocalSet2Byte(volatile uint16_t * addr,uint16_t bits)140 static inline void _SDK_AtomicLocalSet2Byte(volatile uint16_t *addr, uint16_t bits)
141 {
142 uint16_t s_val;
143
144 _SDK_ATOMIC_LOCAL_OPS_2BYTE(addr, s_val, s_val |= bits);
145 }
146
_SDK_AtomicLocalSet4Byte(volatile uint32_t * addr,uint32_t bits)147 static inline void _SDK_AtomicLocalSet4Byte(volatile uint32_t *addr, uint32_t bits)
148 {
149 uint32_t s_val;
150
151 _SDK_ATOMIC_LOCAL_OPS_4BYTE(addr, s_val, s_val |= bits);
152 }
153
_SDK_AtomicLocalClear1Byte(volatile uint8_t * addr,uint8_t bits)154 static inline void _SDK_AtomicLocalClear1Byte(volatile uint8_t *addr, uint8_t bits)
155 {
156 uint8_t s_val;
157
158 _SDK_ATOMIC_LOCAL_OPS_1BYTE(addr, s_val, s_val &= ~bits);
159 }
160
_SDK_AtomicLocalClear2Byte(volatile uint16_t * addr,uint16_t bits)161 static inline void _SDK_AtomicLocalClear2Byte(volatile uint16_t *addr, uint16_t bits)
162 {
163 uint16_t s_val;
164
165 _SDK_ATOMIC_LOCAL_OPS_2BYTE(addr, s_val, s_val &= ~bits);
166 }
167
_SDK_AtomicLocalClear4Byte(volatile uint32_t * addr,uint32_t bits)168 static inline void _SDK_AtomicLocalClear4Byte(volatile uint32_t *addr, uint32_t bits)
169 {
170 uint32_t s_val;
171
172 _SDK_ATOMIC_LOCAL_OPS_4BYTE(addr, s_val, s_val &= ~bits);
173 }
174
_SDK_AtomicLocalToggle1Byte(volatile uint8_t * addr,uint8_t bits)175 static inline void _SDK_AtomicLocalToggle1Byte(volatile uint8_t *addr, uint8_t bits)
176 {
177 uint8_t s_val;
178
179 _SDK_ATOMIC_LOCAL_OPS_1BYTE(addr, s_val, s_val ^= bits);
180 }
181
_SDK_AtomicLocalToggle2Byte(volatile uint16_t * addr,uint16_t bits)182 static inline void _SDK_AtomicLocalToggle2Byte(volatile uint16_t *addr, uint16_t bits)
183 {
184 uint16_t s_val;
185
186 _SDK_ATOMIC_LOCAL_OPS_2BYTE(addr, s_val, s_val ^= bits);
187 }
188
_SDK_AtomicLocalToggle4Byte(volatile uint32_t * addr,uint32_t bits)189 static inline void _SDK_AtomicLocalToggle4Byte(volatile uint32_t *addr, uint32_t bits)
190 {
191 uint32_t s_val;
192
193 _SDK_ATOMIC_LOCAL_OPS_4BYTE(addr, s_val, s_val ^= bits);
194 }
195
_SDK_AtomicLocalClearAndSet1Byte(volatile uint8_t * addr,uint8_t clearBits,uint8_t setBits)196 static inline void _SDK_AtomicLocalClearAndSet1Byte(volatile uint8_t *addr, uint8_t clearBits, uint8_t setBits)
197 {
198 uint8_t s_val;
199
200 _SDK_ATOMIC_LOCAL_OPS_1BYTE(addr, s_val, s_val = (s_val & ~clearBits) | setBits);
201 }
202
_SDK_AtomicLocalClearAndSet2Byte(volatile uint16_t * addr,uint16_t clearBits,uint16_t setBits)203 static inline void _SDK_AtomicLocalClearAndSet2Byte(volatile uint16_t *addr, uint16_t clearBits, uint16_t setBits)
204 {
205 uint16_t s_val;
206
207 _SDK_ATOMIC_LOCAL_OPS_2BYTE(addr, s_val, s_val = (s_val & ~clearBits) | setBits);
208 }
209
_SDK_AtomicLocalClearAndSet4Byte(volatile uint32_t * addr,uint32_t clearBits,uint32_t setBits)210 static inline void _SDK_AtomicLocalClearAndSet4Byte(volatile uint32_t *addr, uint32_t clearBits, uint32_t setBits)
211 {
212 uint32_t s_val;
213
214 _SDK_ATOMIC_LOCAL_OPS_4BYTE(addr, s_val, s_val = (s_val & ~clearBits) | setBits);
215 }
216
217 #define SDK_ATOMIC_LOCAL_ADD(addr, val) \
218 ((1UL == sizeof(*(addr))) ? _SDK_AtomicLocalAdd1Byte((volatile uint8_t*)(volatile void*)(addr), (val)) : \
219 ((2UL == sizeof(*(addr))) ? _SDK_AtomicLocalAdd2Byte((volatile uint16_t*)(volatile void*)(addr), (val)) : \
220 _SDK_AtomicLocalAdd4Byte((volatile uint32_t *)(volatile void*)(addr), (val))))
221
222 #define SDK_ATOMIC_LOCAL_SET(addr, bits) \
223 ((1UL == sizeof(*(addr))) ? _SDK_AtomicLocalSet1Byte((volatile uint8_t*)(volatile void*)(addr), (bits)) : \
224 ((2UL == sizeof(*(addr))) ? _SDK_AtomicLocalSet2Byte((volatile uint16_t*)(volatile void*)(addr), (bits)) : \
225 _SDK_AtomicLocalSet4Byte((volatile uint32_t *)(volatile void*)(addr), (bits))))
226
227 #define SDK_ATOMIC_LOCAL_CLEAR(addr, bits) \
228 ((1UL == sizeof(*(addr))) ? _SDK_AtomicLocalClear1Byte((volatile uint8_t*)(volatile void*)(addr), (bits)) : \
229 ((2UL == sizeof(*(addr))) ? _SDK_AtomicLocalClear2Byte((volatile uint16_t*)(volatile void*)(addr), (bits)) : \
230 _SDK_AtomicLocalClear4Byte((volatile uint32_t *)(volatile void*)(addr), (bits))))
231
232 #define SDK_ATOMIC_LOCAL_TOGGLE(addr, bits) \
233 ((1UL == sizeof(*(addr))) ? _SDK_AtomicLocalToggle1Byte((volatile uint8_t*)(volatile void*)(addr), (bits)) : \
234 ((2UL == sizeof(*(addr))) ? _SDK_AtomicLocalToggle2Byte((volatile uint16_t*)(volatile void*)(addr), (bits)) : \
235 _SDK_AtomicLocalToggle4Byte((volatile uint32_t *)(volatile void*)(addr), (bits))))
236
237 #define SDK_ATOMIC_LOCAL_CLEAR_AND_SET(addr, clearBits, setBits) \
238 ((1UL == sizeof(*(addr))) ? _SDK_AtomicLocalClearAndSet1Byte((volatile uint8_t*)(volatile void*)(addr), (clearBits), (setBits)) : \
239 ((2UL == sizeof(*(addr))) ? _SDK_AtomicLocalClearAndSet2Byte((volatile uint16_t*)(volatile void*)(addr), (clearBits), (setBits)) : \
240 _SDK_AtomicLocalClearAndSet4Byte((volatile uint32_t *)(volatile void*)(addr), (clearBits), (setBits))))
241 #else
242
243 #define SDK_ATOMIC_LOCAL_ADD(addr, val) \
244 do { \
245 uint32_t s_atomicOldInt; \
246 s_atomicOldInt = DisableGlobalIRQ(); \
247 *(addr) += (val); \
248 EnableGlobalIRQ(s_atomicOldInt); \
249 } while (0)
250
251 #define SDK_ATOMIC_LOCAL_SET(addr, bits) \
252 do { \
253 uint32_t s_atomicOldInt; \
254 s_atomicOldInt = DisableGlobalIRQ(); \
255 *(addr) |= (bits); \
256 EnableGlobalIRQ(s_atomicOldInt); \
257 } while (0)
258
259 #define SDK_ATOMIC_LOCAL_CLEAR(addr, bits) \
260 do { \
261 uint32_t s_atomicOldInt; \
262 s_atomicOldInt = DisableGlobalIRQ(); \
263 *(addr) &= ~(bits); \
264 EnableGlobalIRQ(s_atomicOldInt); \
265 } while (0)
266
267 #define SDK_ATOMIC_LOCAL_TOGGLE(addr, bits) \
268 do { \
269 uint32_t s_atomicOldInt; \
270 s_atomicOldInt = DisableGlobalIRQ(); \
271 *(addr) ^= (bits); \
272 EnableGlobalIRQ(s_atomicOldInt); \
273 } while (0)
274
275 #define SDK_ATOMIC_LOCAL_CLEAR_AND_SET(addr, clearBits, setBits) \
276 do { \
277 uint32_t s_atomicOldInt; \
278 s_atomicOldInt = DisableGlobalIRQ(); \
279 *(addr) = (*(addr) & ~(clearBits)) | (setBits); \
280 EnableGlobalIRQ(s_atomicOldInt); \
281 } while (0)
282
283 #endif
284 /* @} */
285
286 /*! @name Timer utilities */
287 /* @{ */
288 /*! Macro to convert a microsecond period to raw count value */
289 #define USEC_TO_COUNT(us, clockFreqInHz) (uint64_t)(((uint64_t)(us) * (clockFreqInHz)) / 1000000U)
290 /*! Macro to convert a raw count value to microsecond */
291 #define COUNT_TO_USEC(count, clockFreqInHz) (uint64_t)((uint64_t)(count) * 1000000U / (clockFreqInHz))
292
293 /*! Macro to convert a millisecond period to raw count value */
294 #define MSEC_TO_COUNT(ms, clockFreqInHz) (uint64_t)((uint64_t)(ms) * (clockFreqInHz) / 1000U)
295 /*! Macro to convert a raw count value to millisecond */
296 #define COUNT_TO_MSEC(count, clockFreqInHz) (uint64_t)((uint64_t)(count) * 1000U / (clockFreqInHz))
297 /* @} */
298
299 /*! @name ISR exit barrier
300 * @{
301 *
302 * ARM errata 838869, affects Cortex-M4, Cortex-M4F Store immediate overlapping
303 * exception return operation might vector to incorrect interrupt.
304 * For Cortex-M7, if core speed much faster than peripheral register write speed,
305 * the peripheral interrupt flags may be still set after exiting ISR, this results to
306 * the same error similar with errata 83869.
307 */
308 #if (defined __CORTEX_M) && ((__CORTEX_M == 4U) || (__CORTEX_M == 7U))
309 #define SDK_ISR_EXIT_BARRIER __DSB()
310 #else
311 #define SDK_ISR_EXIT_BARRIER
312 #endif
313
314 /* @} */
315
316 /*! @name Alignment variable definition macros */
317 /* @{ */
318 #if (defined(__ICCARM__))
319 /*
320 * Workaround to disable MISRA C message suppress warnings for IAR compiler.
321 * http:/ /supp.iar.com/Support/?note=24725
322 */
323 _Pragma("diag_suppress=Pm120")
324 #define SDK_PRAGMA(x) _Pragma(#x)
325 _Pragma("diag_error=Pm120")
326 /*! Macro to define a variable with alignbytes alignment */
327 #define SDK_ALIGN(var, alignbytes) SDK_PRAGMA(data_alignment = alignbytes) var
328 #elif defined(__CC_ARM) || defined(__ARMCC_VERSION)
329 /*! Macro to define a variable with alignbytes alignment */
330 #define SDK_ALIGN(var, alignbytes) __attribute__((aligned(alignbytes))) var
331 #elif defined(__GNUC__)
332 /*! Macro to define a variable with alignbytes alignment */
333 #define SDK_ALIGN(var, alignbytes) var __attribute__((aligned(alignbytes)))
334 #else
335 #error Toolchain not supported
336 #endif
337
338 /*! Macro to define a variable with L1 d-cache line size alignment */
339 #if defined(FSL_FEATURE_L1DCACHE_LINESIZE_BYTE)
340 #define SDK_L1DCACHE_ALIGN(var) SDK_ALIGN(var, FSL_FEATURE_L1DCACHE_LINESIZE_BYTE)
341 #endif
342 /*! Macro to define a variable with L2 cache line size alignment */
343 #if defined(FSL_FEATURE_L2CACHE_LINESIZE_BYTE)
344 #define SDK_L2CACHE_ALIGN(var) SDK_ALIGN(var, FSL_FEATURE_L2CACHE_LINESIZE_BYTE)
345 #endif
346
347 /*! Macro to change a value to a given size aligned value */
348 #define SDK_SIZEALIGN(var, alignbytes) \
349 ((unsigned int)((var) + ((alignbytes)-1U)) & (unsigned int)(~(unsigned int)((alignbytes)-1U)))
350 /* @} */
351
352 /*! @name Non-cacheable region definition macros */
353 /* For initialized non-zero non-cacheable variables, please using "AT_NONCACHEABLE_SECTION_INIT(var) ={xx};" or
354 * "AT_NONCACHEABLE_SECTION_ALIGN_INIT(var) ={xx};" in your projects to define them, for zero-inited non-cacheable variables,
355 * please using "AT_NONCACHEABLE_SECTION(var);" or "AT_NONCACHEABLE_SECTION_ALIGN(var);" to define them, these zero-inited variables
356 * will be initialized to zero in system startup.
357 */
358 /* @{ */
359
360 #if ((!(defined(FSL_FEATURE_HAS_NO_NONCACHEABLE_SECTION) && FSL_FEATURE_HAS_NO_NONCACHEABLE_SECTION)) && defined(FSL_FEATURE_L1ICACHE_LINESIZE_BYTE))
361
362 #if (defined(__ICCARM__))
363 #define AT_NONCACHEABLE_SECTION(var) var @"NonCacheable"
364 #define AT_NONCACHEABLE_SECTION_ALIGN(var, alignbytes) SDK_PRAGMA(data_alignment = alignbytes) var @"NonCacheable"
365 #define AT_NONCACHEABLE_SECTION_INIT(var) var @"NonCacheable.init"
366 #define AT_NONCACHEABLE_SECTION_ALIGN_INIT(var, alignbytes) SDK_PRAGMA(data_alignment = alignbytes) var @"NonCacheable.init"
367
368 #elif(defined(__CC_ARM) || defined(__ARMCC_VERSION))
369 #define AT_NONCACHEABLE_SECTION_INIT(var) __attribute__((section("NonCacheable.init"))) var
370 #define AT_NONCACHEABLE_SECTION_ALIGN_INIT(var, alignbytes) \
371 __attribute__((section("NonCacheable.init"))) __attribute__((aligned(alignbytes))) var
372 #if(defined(__CC_ARM))
373 #define AT_NONCACHEABLE_SECTION(var) __attribute__((section("NonCacheable"), zero_init)) var
374 #define AT_NONCACHEABLE_SECTION_ALIGN(var, alignbytes) \
375 __attribute__((section("NonCacheable"), zero_init)) __attribute__((aligned(alignbytes))) var
376 #else
377 #define AT_NONCACHEABLE_SECTION(var) __attribute__((section(".bss.NonCacheable"))) var
378 #define AT_NONCACHEABLE_SECTION_ALIGN(var, alignbytes) \
379 __attribute__((section(".bss.NonCacheable"))) __attribute__((aligned(alignbytes))) var
380 #endif
381
382 #elif(defined(__GNUC__))
383 /* For GCC, when the non-cacheable section is required, please define "__STARTUP_INITIALIZE_NONCACHEDATA"
384 * in your projects to make sure the non-cacheable section variables will be initialized in system startup.
385 */
386 #define AT_NONCACHEABLE_SECTION_INIT(var) __attribute__((section("NonCacheable.init"))) var
387 #define AT_NONCACHEABLE_SECTION_ALIGN_INIT(var, alignbytes) \
388 __attribute__((section("NonCacheable.init"))) var __attribute__((aligned(alignbytes)))
389 #define AT_NONCACHEABLE_SECTION(var) __attribute__((section("NonCacheable,\"aw\",%nobits @"))) var
390 #define AT_NONCACHEABLE_SECTION_ALIGN(var, alignbytes) \
391 __attribute__((section("NonCacheable,\"aw\",%nobits @"))) var __attribute__((aligned(alignbytes)))
392 #else
393 #error Toolchain not supported.
394 #endif
395
396 #else
397
398 #define AT_NONCACHEABLE_SECTION(var) var
399 #define AT_NONCACHEABLE_SECTION_ALIGN(var, alignbytes) SDK_ALIGN(var, alignbytes)
400 #define AT_NONCACHEABLE_SECTION_INIT(var) var
401 #define AT_NONCACHEABLE_SECTION_ALIGN_INIT(var, alignbytes) SDK_ALIGN(var, alignbytes)
402
403 #endif
404
405 /* @} */
406
407 /*!
408 * @name Time sensitive region
409 * @{
410 */
411 #if (defined(FSL_SDK_DRIVER_QUICK_ACCESS_ENABLE) && FSL_SDK_DRIVER_QUICK_ACCESS_ENABLE)
412
413 #if (defined(__ICCARM__))
414 #define AT_QUICKACCESS_SECTION_CODE(func) func @"CodeQuickAccess"
415 #define AT_QUICKACCESS_SECTION_DATA(func) func @"DataQuickAccess"
416 #elif(defined(__CC_ARM) || defined(__ARMCC_VERSION))
417 #define AT_QUICKACCESS_SECTION_CODE(func) __attribute__((section("CodeQuickAccess"), __noinline__)) func
418 #define AT_QUICKACCESS_SECTION_DATA(func) __attribute__((section("DataQuickAccess"))) func
419 #elif(defined(__GNUC__))
420 #define AT_QUICKACCESS_SECTION_CODE(func) __attribute__((section("CodeQuickAccess"), __noinline__)) func
421 #define AT_QUICKACCESS_SECTION_DATA(func) __attribute__((section("DataQuickAccess"))) func
422 #else
423 #error Toolchain not supported.
424 #endif /* defined(__ICCARM__) */
425
426 #else /* __FSL_SDK_DRIVER_QUICK_ACCESS_ENABLE */
427
428 #define AT_QUICKACCESS_SECTION_CODE(func) func
429 #define AT_QUICKACCESS_SECTION_DATA(func) func
430
431 #endif /* __FSL_SDK_DRIVER_QUICK_ACCESS_ENABLE */
432 /* @} */
433
434 /*! @name Ram Function */
435 #if (defined(__ICCARM__))
436 #define RAMFUNCTION_SECTION_CODE(func) func @"RamFunction"
437 #elif(defined(__CC_ARM) || defined(__ARMCC_VERSION))
438 #define RAMFUNCTION_SECTION_CODE(func) __attribute__((section("RamFunction"))) func
439 #elif(defined(__GNUC__))
440 #define RAMFUNCTION_SECTION_CODE(func) __attribute__((section("RamFunction"))) func
441 #else
442 #error Toolchain not supported.
443 #endif /* defined(__ICCARM__) */
444 /* @} */
445
446 #if defined ( __ARMCC_VERSION ) && ( __ARMCC_VERSION >= 6010050 )
447 void DefaultISR(void);
448 #endif
449
450 /*
451 * The fsl_clock.h is included here because it needs MAKE_VERSION/MAKE_STATUS/status_t
452 * defined in previous of this file.
453 */
454 #include "fsl_clock.h"
455
456 /*
457 * Chip level peripheral reset API, for MCUs that implement peripheral reset control external to a peripheral
458 */
459 #if ((defined(FSL_FEATURE_SOC_SYSCON_COUNT) && (FSL_FEATURE_SOC_SYSCON_COUNT > 0)) || \
460 (defined(FSL_FEATURE_SOC_ASYNC_SYSCON_COUNT) && (FSL_FEATURE_SOC_ASYNC_SYSCON_COUNT > 0)))
461 #include "fsl_reset.h"
462 #endif
463
464 /*******************************************************************************
465 * API
466 ******************************************************************************/
467
468 #if defined(__cplusplus)
469 extern "C" {
470 #endif /* __cplusplus*/
471
472 /*!
473 * @brief Enable specific interrupt.
474 *
475 * Enable LEVEL1 interrupt. For some devices, there might be multiple interrupt
476 * levels. For example, there are NVIC and intmux. Here the interrupts connected
477 * to NVIC are the LEVEL1 interrupts, because they are routed to the core directly.
478 * The interrupts connected to intmux are the LEVEL2 interrupts, they are routed
479 * to NVIC first then routed to core.
480 *
481 * This function only enables the LEVEL1 interrupts. The number of LEVEL1 interrupts
482 * is indicated by the feature macro FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS.
483 *
484 * @param interrupt The IRQ number.
485 * @retval kStatus_Success Interrupt enabled successfully
486 * @retval kStatus_Fail Failed to enable the interrupt
487 */
EnableIRQ(IRQn_Type interrupt)488 static inline status_t EnableIRQ(IRQn_Type interrupt)
489 {
490 status_t status = kStatus_Success;
491
492 if (NotAvail_IRQn == interrupt)
493 {
494 status = kStatus_Fail;
495 }
496
497 #if defined(FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS) && (FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS > 0)
498 else if ((int32_t)interrupt >= (int32_t)FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS)
499 {
500 status = kStatus_Fail;
501 }
502 #endif
503
504 else
505 {
506 #if defined(__GIC_PRIO_BITS)
507 GIC_EnableIRQ(interrupt);
508 #else
509 NVIC_EnableIRQ(interrupt);
510 #endif
511 }
512
513 return status;
514 }
515
516 /*!
517 * @brief Disable specific interrupt.
518 *
519 * Disable LEVEL1 interrupt. For some devices, there might be multiple interrupt
520 * levels. For example, there are NVIC and intmux. Here the interrupts connected
521 * to NVIC are the LEVEL1 interrupts, because they are routed to the core directly.
522 * The interrupts connected to intmux are the LEVEL2 interrupts, they are routed
523 * to NVIC first then routed to core.
524 *
525 * This function only disables the LEVEL1 interrupts. The number of LEVEL1 interrupts
526 * is indicated by the feature macro FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS.
527 *
528 * @param interrupt The IRQ number.
529 * @retval kStatus_Success Interrupt disabled successfully
530 * @retval kStatus_Fail Failed to disable the interrupt
531 */
DisableIRQ(IRQn_Type interrupt)532 static inline status_t DisableIRQ(IRQn_Type interrupt)
533 {
534 status_t status = kStatus_Success;
535
536 if (NotAvail_IRQn == interrupt)
537 {
538 status = kStatus_Fail;
539 }
540
541 #if defined(FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS) && (FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS > 0)
542 else if ((int32_t)interrupt >= (int32_t)FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS)
543 {
544 status = kStatus_Fail;
545 }
546 #endif
547
548 else
549 {
550 #if defined(__GIC_PRIO_BITS)
551 GIC_DisableIRQ(interrupt);
552 #else
553 NVIC_DisableIRQ(interrupt);
554 #endif
555 }
556
557 return status;
558 }
559
560 /*!
561 * @brief Disable the global IRQ
562 *
563 * Disable the global interrupt and return the current primask register. User is required to provided the primask
564 * register for the EnableGlobalIRQ().
565 *
566 * @return Current primask value.
567 */
DisableGlobalIRQ(void)568 static inline uint32_t DisableGlobalIRQ(void)
569 {
570 #if defined(CPSR_I_Msk)
571 uint32_t cpsr = __get_CPSR() & CPSR_I_Msk;
572
573 __disable_irq();
574
575 return cpsr;
576 #else
577 uint32_t regPrimask = __get_PRIMASK();
578
579 __disable_irq();
580
581 return regPrimask;
582 #endif
583 }
584
585 /*!
586 * @brief Enable the global IRQ
587 *
588 * Set the primask register with the provided primask value but not just enable the primask. The idea is for the
589 * convenience of integration of RTOS. some RTOS get its own management mechanism of primask. User is required to
590 * use the EnableGlobalIRQ() and DisableGlobalIRQ() in pair.
591 *
592 * @param primask value of primask register to be restored. The primask value is supposed to be provided by the
593 * DisableGlobalIRQ().
594 */
EnableGlobalIRQ(uint32_t primask)595 static inline void EnableGlobalIRQ(uint32_t primask)
596 {
597 #if defined(CPSR_I_Msk)
598 __set_CPSR((__get_CPSR() & ~CPSR_I_Msk) | primask);
599 #else
600 __set_PRIMASK(primask);
601 #endif
602 }
603
604 #if defined(ENABLE_RAM_VECTOR_TABLE)
605 /*!
606 * @brief install IRQ handler
607 *
608 * @param irq IRQ number
609 * @param irqHandler IRQ handler address
610 * @return The old IRQ handler address
611 */
612 uint32_t InstallIRQHandler(IRQn_Type irq, uint32_t irqHandler);
613 #endif /* ENABLE_RAM_VECTOR_TABLE. */
614
615 #if (defined(FSL_FEATURE_SOC_SYSCON_COUNT) && (FSL_FEATURE_SOC_SYSCON_COUNT > 0))
616
617 /*
618 * When FSL_FEATURE_POWERLIB_EXTEND is defined to non-zero value,
619 * powerlib should be used instead of these functions.
620 */
621 #if !(defined(FSL_FEATURE_POWERLIB_EXTEND) && (FSL_FEATURE_POWERLIB_EXTEND != 0))
622 /*!
623 * @brief Enable specific interrupt for wake-up from deep-sleep mode.
624 *
625 * Enable the interrupt for wake-up from deep sleep mode.
626 * Some interrupts are typically used in sleep mode only and will not occur during
627 * deep-sleep mode because relevant clocks are stopped. However, it is possible to enable
628 * those clocks (significantly increasing power consumption in the reduced power mode),
629 * making these wake-ups possible.
630 *
631 * @note This function also enables the interrupt in the NVIC (EnableIRQ() is called internaly).
632 *
633 * @param interrupt The IRQ number.
634 */
635 void EnableDeepSleepIRQ(IRQn_Type interrupt);
636
637 /*!
638 * @brief Disable specific interrupt for wake-up from deep-sleep mode.
639 *
640 * Disable the interrupt for wake-up from deep sleep mode.
641 * Some interrupts are typically used in sleep mode only and will not occur during
642 * deep-sleep mode because relevant clocks are stopped. However, it is possible to enable
643 * those clocks (significantly increasing power consumption in the reduced power mode),
644 * making these wake-ups possible.
645 *
646 * @note This function also disables the interrupt in the NVIC (DisableIRQ() is called internaly).
647 *
648 * @param interrupt The IRQ number.
649 */
650 void DisableDeepSleepIRQ(IRQn_Type interrupt);
651 #endif /* FSL_FEATURE_POWERLIB_EXTEND */
652 #endif /* FSL_FEATURE_SOC_SYSCON_COUNT */
653
654 #if defined(__cplusplus)
655 }
656 #endif /* __cplusplus*/
657
658 /*! @} */
659
660 #endif /* _FSL_COMMON_ARM_H_ */
661