1 /*
2 * Copyright (c) 2015-2016, Freescale Semiconductor, Inc.
3 * Copyright 2016-2022 NXP
4 * All rights reserved.
5 *
6 * SPDX-License-Identifier: BSD-3-Clause
7 */
8
9 #ifndef FSL_COMMON_ARM_H_
10 #define FSL_COMMON_ARM_H_
11
12 /*
13 * For CMSIS pack RTE.
14 * CMSIS pack RTE generates "RTC_Components.h" which contains the statements
15 * of the related <RTE_Components_h> element for all selected software components.
16 */
17 #ifdef _RTE_
18 #include "RTE_Components.h"
19 #endif
20
21 /*!
22 * @addtogroup ksdk_common
23 * @{
24 */
25
26 /*! @name Atomic modification
27 *
28 * These macros are used for atomic access, such as read-modify-write
29 * to the peripheral registers.
30 *
31 * Take @ref SDK_ATOMIC_LOCAL_CLEAR_AND_SET as an example: the parameter @c addr
32 * means the address of the peripheral register or variable you want to modify
33 * atomically, the parameter @c clearBits is the bits to clear, the parameter
34 * @c setBits it the bits to set.
35 * For example, to set a 32-bit register bit1:bit0 to 0b10, use like this:
36 *
37 * @code
38 volatile uint32_t * reg = (volatile uint32_t *)REG_ADDR;
39
40 SDK_ATOMIC_LOCAL_CLEAR_AND_SET(reg, 0x03, 0x02);
41 @endcode
42 *
43 * In this example, the register bit1:bit0 are cleared and bit1 is set, as a result,
44 * register bit1:bit0 = 0b10.
45 *
46 * @note For the platforms don't support exclusive load and store, these macros
47 * disable the global interrupt to pretect the modification.
48 *
49 * @note These macros only guarantee the local processor atomic operations. For
50 * the multi-processor devices, use hardware semaphore such as SEMA42 to
51 * guarantee exclusive access if necessary.
52 *
53 * @{
54 */
55
56 /*!
57 * @def SDK_ATOMIC_LOCAL_ADD(addr, val)
58 * Add value \a val from the variable at address \a address.
59 *
60 * @def SDK_ATOMIC_LOCAL_SUB(addr, val)
61 * Subtract value \a val to the variable at address \a address.
62 *
63 * @def SDK_ATOMIC_LOCAL_SET(addr, bits)
64 * Set the bits specifiled by \a bits to the variable at address \a address.
65 *
66 * @def SDK_ATOMIC_LOCAL_CLEAR(addr, bits)
67 * Clear the bits specifiled by \a bits to the variable at address \a address.
68 *
69 * @def SDK_ATOMIC_LOCAL_TOGGLE(addr, bits)
70 * Toggle the bits specifiled by \a bits to the variable at address \a address.
71 *
72 * @def SDK_ATOMIC_LOCAL_CLEAR_AND_SET(addr, clearBits, setBits)
73 * For the variable at address \a address, clear the bits specifiled by \a clearBits
74 * and set the bits specifiled by \a setBits.
75 */
76
77 /* clang-format off */
78 #if ((defined(__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \
79 (defined(__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \
80 (defined(__ARM_ARCH_8M_MAIN__) && (__ARM_ARCH_8M_MAIN__ == 1)) || \
81 (defined(__ARM_ARCH_8M_BASE__) && (__ARM_ARCH_8M_BASE__ == 1)))
82 /* clang-format on */
83
84 /* If the LDREX and STREX are supported, use them. */
85 #define _SDK_ATOMIC_LOCAL_OPS_1BYTE(addr, val, ops) \
86 do \
87 { \
88 (val) = __LDREXB(addr); \
89 (ops); \
90 } while (0UL != __STREXB((val), (addr)))
91
92 #define _SDK_ATOMIC_LOCAL_OPS_2BYTE(addr, val, ops) \
93 do \
94 { \
95 (val) = __LDREXH(addr); \
96 (ops); \
97 } while (0UL != __STREXH((val), (addr)))
98
99 #define _SDK_ATOMIC_LOCAL_OPS_4BYTE(addr, val, ops) \
100 do \
101 { \
102 (val) = __LDREXW(addr); \
103 (ops); \
104 } while (0UL != __STREXW((val), (addr)))
105
_SDK_AtomicLocalAdd1Byte(volatile uint8_t * addr,uint8_t val)106 static inline void _SDK_AtomicLocalAdd1Byte(volatile uint8_t *addr, uint8_t val)
107 {
108 uint8_t s_val;
109
110 _SDK_ATOMIC_LOCAL_OPS_1BYTE(addr, s_val, s_val += val);
111 }
112
_SDK_AtomicLocalAdd2Byte(volatile uint16_t * addr,uint16_t val)113 static inline void _SDK_AtomicLocalAdd2Byte(volatile uint16_t *addr, uint16_t val)
114 {
115 uint16_t s_val;
116
117 _SDK_ATOMIC_LOCAL_OPS_2BYTE(addr, s_val, s_val += val);
118 }
119
_SDK_AtomicLocalAdd4Byte(volatile uint32_t * addr,uint32_t val)120 static inline void _SDK_AtomicLocalAdd4Byte(volatile uint32_t *addr, uint32_t val)
121 {
122 uint32_t s_val;
123
124 _SDK_ATOMIC_LOCAL_OPS_4BYTE(addr, s_val, s_val += val);
125 }
126
_SDK_AtomicLocalSub1Byte(volatile uint8_t * addr,uint8_t val)127 static inline void _SDK_AtomicLocalSub1Byte(volatile uint8_t *addr, uint8_t val)
128 {
129 uint8_t s_val;
130
131 _SDK_ATOMIC_LOCAL_OPS_1BYTE(addr, s_val, s_val -= val);
132 }
133
_SDK_AtomicLocalSub2Byte(volatile uint16_t * addr,uint16_t val)134 static inline void _SDK_AtomicLocalSub2Byte(volatile uint16_t *addr, uint16_t val)
135 {
136 uint16_t s_val;
137
138 _SDK_ATOMIC_LOCAL_OPS_2BYTE(addr, s_val, s_val -= val);
139 }
140
_SDK_AtomicLocalSub4Byte(volatile uint32_t * addr,uint32_t val)141 static inline void _SDK_AtomicLocalSub4Byte(volatile uint32_t *addr, uint32_t val)
142 {
143 uint32_t s_val;
144
145 _SDK_ATOMIC_LOCAL_OPS_4BYTE(addr, s_val, s_val -= val);
146 }
147
_SDK_AtomicLocalSet1Byte(volatile uint8_t * addr,uint8_t bits)148 static inline void _SDK_AtomicLocalSet1Byte(volatile uint8_t *addr, uint8_t bits)
149 {
150 uint8_t s_val;
151
152 _SDK_ATOMIC_LOCAL_OPS_1BYTE(addr, s_val, s_val |= bits);
153 }
154
_SDK_AtomicLocalSet2Byte(volatile uint16_t * addr,uint16_t bits)155 static inline void _SDK_AtomicLocalSet2Byte(volatile uint16_t *addr, uint16_t bits)
156 {
157 uint16_t s_val;
158
159 _SDK_ATOMIC_LOCAL_OPS_2BYTE(addr, s_val, s_val |= bits);
160 }
161
_SDK_AtomicLocalSet4Byte(volatile uint32_t * addr,uint32_t bits)162 static inline void _SDK_AtomicLocalSet4Byte(volatile uint32_t *addr, uint32_t bits)
163 {
164 uint32_t s_val;
165
166 _SDK_ATOMIC_LOCAL_OPS_4BYTE(addr, s_val, s_val |= bits);
167 }
168
_SDK_AtomicLocalClear1Byte(volatile uint8_t * addr,uint8_t bits)169 static inline void _SDK_AtomicLocalClear1Byte(volatile uint8_t *addr, uint8_t bits)
170 {
171 uint8_t s_val;
172
173 _SDK_ATOMIC_LOCAL_OPS_1BYTE(addr, s_val, s_val &= ~bits);
174 }
175
_SDK_AtomicLocalClear2Byte(volatile uint16_t * addr,uint16_t bits)176 static inline void _SDK_AtomicLocalClear2Byte(volatile uint16_t *addr, uint16_t bits)
177 {
178 uint16_t s_val;
179
180 _SDK_ATOMIC_LOCAL_OPS_2BYTE(addr, s_val, s_val &= ~bits);
181 }
182
_SDK_AtomicLocalClear4Byte(volatile uint32_t * addr,uint32_t bits)183 static inline void _SDK_AtomicLocalClear4Byte(volatile uint32_t *addr, uint32_t bits)
184 {
185 uint32_t s_val;
186
187 _SDK_ATOMIC_LOCAL_OPS_4BYTE(addr, s_val, s_val &= ~bits);
188 }
189
_SDK_AtomicLocalToggle1Byte(volatile uint8_t * addr,uint8_t bits)190 static inline void _SDK_AtomicLocalToggle1Byte(volatile uint8_t *addr, uint8_t bits)
191 {
192 uint8_t s_val;
193
194 _SDK_ATOMIC_LOCAL_OPS_1BYTE(addr, s_val, s_val ^= bits);
195 }
196
_SDK_AtomicLocalToggle2Byte(volatile uint16_t * addr,uint16_t bits)197 static inline void _SDK_AtomicLocalToggle2Byte(volatile uint16_t *addr, uint16_t bits)
198 {
199 uint16_t s_val;
200
201 _SDK_ATOMIC_LOCAL_OPS_2BYTE(addr, s_val, s_val ^= bits);
202 }
203
_SDK_AtomicLocalToggle4Byte(volatile uint32_t * addr,uint32_t bits)204 static inline void _SDK_AtomicLocalToggle4Byte(volatile uint32_t *addr, uint32_t bits)
205 {
206 uint32_t s_val;
207
208 _SDK_ATOMIC_LOCAL_OPS_4BYTE(addr, s_val, s_val ^= bits);
209 }
210
_SDK_AtomicLocalClearAndSet1Byte(volatile uint8_t * addr,uint8_t clearBits,uint8_t setBits)211 static inline void _SDK_AtomicLocalClearAndSet1Byte(volatile uint8_t *addr, uint8_t clearBits, uint8_t setBits)
212 {
213 uint8_t s_val;
214
215 _SDK_ATOMIC_LOCAL_OPS_1BYTE(addr, s_val, s_val = (s_val & ~clearBits) | setBits);
216 }
217
_SDK_AtomicLocalClearAndSet2Byte(volatile uint16_t * addr,uint16_t clearBits,uint16_t setBits)218 static inline void _SDK_AtomicLocalClearAndSet2Byte(volatile uint16_t *addr, uint16_t clearBits, uint16_t setBits)
219 {
220 uint16_t s_val;
221
222 _SDK_ATOMIC_LOCAL_OPS_2BYTE(addr, s_val, s_val = (s_val & ~clearBits) | setBits);
223 }
224
_SDK_AtomicLocalClearAndSet4Byte(volatile uint32_t * addr,uint32_t clearBits,uint32_t setBits)225 static inline void _SDK_AtomicLocalClearAndSet4Byte(volatile uint32_t *addr, uint32_t clearBits, uint32_t setBits)
226 {
227 uint32_t s_val;
228
229 _SDK_ATOMIC_LOCAL_OPS_4BYTE(addr, s_val, s_val = (s_val & ~clearBits) | setBits);
230 }
231
232 #define SDK_ATOMIC_LOCAL_ADD(addr, val) \
233 ((1UL == sizeof(*(addr))) ? \
234 _SDK_AtomicLocalAdd1Byte((volatile uint8_t *)(volatile void *)(addr), (uint8_t)(val)) : \
235 ((2UL == sizeof(*(addr))) ? _SDK_AtomicLocalAdd2Byte((volatile uint16_t *)(volatile void *)(addr), (uint16_t)(val)) : \
236 _SDK_AtomicLocalAdd4Byte((volatile uint32_t *)(volatile void *)(addr), (uint32_t)(val))))
237
238 #define SDK_ATOMIC_LOCAL_SUB(addr, val) \
239 ((1UL == sizeof(*(addr))) ? \
240 _SDK_AtomicLocalSub1Byte((volatile uint8_t *)(volatile void *)(addr), (uint8_t)(val)) : \
241 ((2UL == sizeof(*(addr))) ? _SDK_AtomicLocalSub2Byte((volatile uint16_t *)(volatile void *)(addr), (uint16_t)(val)) : \
242 _SDK_AtomicLocalSub4Byte((volatile uint32_t *)(volatile void *)(addr), (uint32_t)(val))))
243
244 #define SDK_ATOMIC_LOCAL_SET(addr, bits) \
245 ((1UL == sizeof(*(addr))) ? \
246 _SDK_AtomicLocalSet1Byte((volatile uint8_t *)(volatile void *)(addr), (uint8_t)(bits)) : \
247 ((2UL == sizeof(*(addr))) ? _SDK_AtomicLocalSet2Byte((volatile uint16_t *)(volatile void *)(addr), (uint16_t)(bits)) : \
248 _SDK_AtomicLocalSet4Byte((volatile uint32_t *)(volatile void *)(addr), (uint32_t)(bits))))
249
250 #define SDK_ATOMIC_LOCAL_CLEAR(addr, bits) \
251 ((1UL == sizeof(*(addr))) ? \
252 _SDK_AtomicLocalClear1Byte((volatile uint8_t *)(volatile void *)(addr), (uint8_t)(bits)) : \
253 ((2UL == sizeof(*(addr))) ? \
254 _SDK_AtomicLocalClear2Byte((volatile uint16_t *)(volatile void *)(addr), (uint16_t)(bits)) : \
255 _SDK_AtomicLocalClear4Byte((volatile uint32_t *)(volatile void *)(addr), (uint32_t)(bits))))
256
257 #define SDK_ATOMIC_LOCAL_TOGGLE(addr, bits) \
258 ((1UL == sizeof(*(addr))) ? \
259 _SDK_AtomicLocalToggle1Byte((volatile uint8_t *)(volatile void *)(addr), (uint8_t)(bits)) : \
260 ((2UL == sizeof(*(addr))) ? \
261 _SDK_AtomicLocalToggle2Byte((volatile uint16_t *)(volatile void *)(addr), (uint16_t)(bits)) : \
262 _SDK_AtomicLocalToggle4Byte((volatile uint32_t *)(volatile void *)(addr), (uint32_t)(bits))))
263
264 #define SDK_ATOMIC_LOCAL_CLEAR_AND_SET(addr, clearBits, setBits) \
265 ((1UL == sizeof(*(addr))) ? \
266 _SDK_AtomicLocalClearAndSet1Byte((volatile uint8_t *)(volatile void *)(addr), (uint8_t)(clearBits), (uint8_t)(setBits)) : \
267 ((2UL == sizeof(*(addr))) ? \
268 _SDK_AtomicLocalClearAndSet2Byte((volatile uint16_t *)(volatile void *)(addr), (uint16_t)(clearBits), (uint16_t)(setBits)) : \
269 _SDK_AtomicLocalClearAndSet4Byte((volatile uint32_t *)(volatile void *)(addr), (uint32_t)(clearBits), (uint32_t)(setBits))))
270 #else
271
272 #define SDK_ATOMIC_LOCAL_ADD(addr, val) \
273 do \
274 { \
275 uint32_t s_atomicOldInt; \
276 s_atomicOldInt = DisableGlobalIRQ(); \
277 *(addr) += (val); \
278 EnableGlobalIRQ(s_atomicOldInt); \
279 } while (false)
280
281 #define SDK_ATOMIC_LOCAL_SUB(addr, val) \
282 do \
283 { \
284 uint32_t s_atomicOldInt; \
285 s_atomicOldInt = DisableGlobalIRQ(); \
286 *(addr) -= (val); \
287 EnableGlobalIRQ(s_atomicOldInt); \
288 } while (false)
289
290 #define SDK_ATOMIC_LOCAL_SET(addr, bits) \
291 do \
292 { \
293 uint32_t s_atomicOldInt; \
294 s_atomicOldInt = DisableGlobalIRQ(); \
295 *(addr) |= (bits); \
296 EnableGlobalIRQ(s_atomicOldInt); \
297 } while (false)
298
299 #define SDK_ATOMIC_LOCAL_CLEAR(addr, bits) \
300 do \
301 { \
302 uint32_t s_atomicOldInt; \
303 s_atomicOldInt = DisableGlobalIRQ(); \
304 *(addr) &= ~(bits); \
305 EnableGlobalIRQ(s_atomicOldInt); \
306 } while (false)
307
308 #define SDK_ATOMIC_LOCAL_TOGGLE(addr, bits) \
309 do \
310 { \
311 uint32_t s_atomicOldInt; \
312 s_atomicOldInt = DisableGlobalIRQ(); \
313 *(addr) ^= (bits); \
314 EnableGlobalIRQ(s_atomicOldInt); \
315 } while (false)
316
317 #define SDK_ATOMIC_LOCAL_CLEAR_AND_SET(addr, clearBits, setBits) \
318 do \
319 { \
320 uint32_t s_atomicOldInt; \
321 s_atomicOldInt = DisableGlobalIRQ(); \
322 *(addr) = (*(addr) & ~(clearBits)) | (setBits); \
323 EnableGlobalIRQ(s_atomicOldInt); \
324 } while (false)
325
326 #endif
327 /*! @} */
328
329 /*! @name Timer utilities */
330 /*! @{ */
331 /*! Macro to convert a microsecond period to raw count value */
332 #define USEC_TO_COUNT(us, clockFreqInHz) (uint64_t)(((uint64_t)(us) * (clockFreqInHz)) / 1000000U)
333 /*! Macro to convert a raw count value to microsecond */
334 #define COUNT_TO_USEC(count, clockFreqInHz) (uint64_t)((uint64_t)(count)*1000000U / (clockFreqInHz))
335
336 /*! Macro to convert a millisecond period to raw count value */
337 #define MSEC_TO_COUNT(ms, clockFreqInHz) (uint64_t)((uint64_t)(ms) * (clockFreqInHz) / 1000U)
338 /*! Macro to convert a raw count value to millisecond */
339 #define COUNT_TO_MSEC(count, clockFreqInHz) (uint64_t)((uint64_t)(count)*1000U / (clockFreqInHz))
340 /*! @} */
341
342 /*! @name ISR exit barrier
343 * @{
344 *
345 * ARM errata 838869, affects Cortex-M4, Cortex-M4F Store immediate overlapping
346 * exception return operation might vector to incorrect interrupt.
347 * For Cortex-M7, if core speed much faster than peripheral register write speed,
348 * the peripheral interrupt flags may be still set after exiting ISR, this results to
349 * the same error similar with errata 83869.
350 */
351 #if (defined __CORTEX_M) && ((__CORTEX_M == 4U) || (__CORTEX_M == 7U))
352 #define SDK_ISR_EXIT_BARRIER __DSB()
353 #else
354 #define SDK_ISR_EXIT_BARRIER
355 #endif
356
357 /*! @} */
358
359 /*! @name Alignment variable definition macros */
360 /*! @{ */
361 #if (defined(__ICCARM__))
362 /*
363 * Workaround to disable MISRA C message suppress warnings for IAR compiler.
364 * http:/ /supp.iar.com/Support/?note=24725
365 */
366 _Pragma("diag_suppress=Pm120")
367 #define SDK_PRAGMA(x) _Pragma(#x)
368 _Pragma("diag_error=Pm120")
369 /*! Macro to define a variable with alignbytes alignment */
370 #define SDK_ALIGN(var, alignbytes) SDK_PRAGMA(data_alignment = alignbytes) var
371 #elif defined(__CC_ARM) || defined(__ARMCC_VERSION)
372 /*! Macro to define a variable with alignbytes alignment */
373 #define SDK_ALIGN(var, alignbytes) __attribute__((aligned(alignbytes))) var
374 #elif defined(__GNUC__) || defined(DOXYGEN_OUTPUT)
375 /*! Macro to define a variable with alignbytes alignment */
376 #define SDK_ALIGN(var, alignbytes) var __attribute__((aligned(alignbytes)))
377 #else
378 #error Toolchain not supported
379 #endif
380
381 /*! Macro to define a variable with L1 d-cache line size alignment */
382 #if defined(FSL_FEATURE_L1DCACHE_LINESIZE_BYTE)
383 #define SDK_L1DCACHE_ALIGN(var) SDK_ALIGN(var, FSL_FEATURE_L1DCACHE_LINESIZE_BYTE)
384 #endif
385 /*! Macro to define a variable with L2 cache line size alignment */
386 #if defined(FSL_FEATURE_L2CACHE_LINESIZE_BYTE)
387 #define SDK_L2CACHE_ALIGN(var) SDK_ALIGN(var, FSL_FEATURE_L2CACHE_LINESIZE_BYTE)
388 #endif
389
390 /*! Macro to change a value to a given size aligned value */
391 #define SDK_SIZEALIGN(var, alignbytes) \
392 ((unsigned int)((var) + ((alignbytes)-1U)) & (unsigned int)(~(unsigned int)((alignbytes)-1U)))
393 /*! @} */
394
395 /*!
396 * @name Non-cacheable region definition macros
397 *
398 * For initialized non-zero non-cacheable variables, please use "AT_NONCACHEABLE_SECTION_INIT(var) ={xx};" or
399 * "AT_NONCACHEABLE_SECTION_ALIGN_INIT(var) ={xx};" in your projects to define them. For zero-inited non-cacheable
400 * variables, please use "AT_NONCACHEABLE_SECTION(var);" or "AT_NONCACHEABLE_SECTION_ALIGN(var);" to define them,
401 * these zero-inited variables will be initialized to zero in system startup.
402 *
403 * @note For GCC, when the non-cacheable section is required, please define "__STARTUP_INITIALIZE_NONCACHEDATA"
404 * in your projects to make sure the non-cacheable section variables will be initialized in system startup.
405 *
406 * @{
407 */
408
409 /*!
410 * @def AT_NONCACHEABLE_SECTION(var)
411 * Define a variable \a var, and place it in non-cacheable section.
412 *
413 * @def AT_NONCACHEABLE_SECTION_ALIGN(var, alignbytes)
414 * Define a variable \a var, and place it in non-cacheable section, the start address
415 * of the variable is aligned to \a alignbytes.
416 *
417 * @def AT_NONCACHEABLE_SECTION_INIT(var)
418 * Define a variable \a var with initial value, and place it in non-cacheable section.
419 *
420 * @def AT_NONCACHEABLE_SECTION_ALIGN_INIT(var, alignbytes)
421 * Define a variable \a var with initial value, and place it in non-cacheable section,
422 * the start address of the variable is aligned to \a alignbytes.
423 */
424
425 #if ((!(defined(FSL_FEATURE_HAS_NO_NONCACHEABLE_SECTION) && FSL_FEATURE_HAS_NO_NONCACHEABLE_SECTION)) && \
426 defined(FSL_FEATURE_L1ICACHE_LINESIZE_BYTE))
427
428 #if (defined(__ICCARM__))
429 #define AT_NONCACHEABLE_SECTION(var) var @"NonCacheable"
430 #define AT_NONCACHEABLE_SECTION_ALIGN(var, alignbytes) SDK_PRAGMA(data_alignment = alignbytes) var @"NonCacheable"
431 #define AT_NONCACHEABLE_SECTION_INIT(var) var @"NonCacheable.init"
432 #define AT_NONCACHEABLE_SECTION_ALIGN_INIT(var, alignbytes) \
433 SDK_PRAGMA(data_alignment = alignbytes) var @"NonCacheable.init"
434
435 #elif (defined(__CC_ARM) || defined(__ARMCC_VERSION))
436 #define AT_NONCACHEABLE_SECTION_INIT(var) __attribute__((section("NonCacheable.init"))) var
437 #define AT_NONCACHEABLE_SECTION_ALIGN_INIT(var, alignbytes) \
438 __attribute__((section("NonCacheable.init"))) __attribute__((aligned(alignbytes))) var
439 #if (defined(__CC_ARM))
440 #define AT_NONCACHEABLE_SECTION(var) __attribute__((section("NonCacheable"), zero_init)) var
441 #define AT_NONCACHEABLE_SECTION_ALIGN(var, alignbytes) \
442 __attribute__((section("NonCacheable"), zero_init)) __attribute__((aligned(alignbytes))) var
443 #else
444 #define AT_NONCACHEABLE_SECTION(var) __attribute__((section(".bss.NonCacheable"))) var
445 #define AT_NONCACHEABLE_SECTION_ALIGN(var, alignbytes) \
446 __attribute__((section(".bss.NonCacheable"))) __attribute__((aligned(alignbytes))) var
447 #endif
448
449 #elif (defined(__GNUC__)) || defined(DOXYGEN_OUTPUT)
450 /* For GCC, when the non-cacheable section is required, please define "__STARTUP_INITIALIZE_NONCACHEDATA"
451 * in your projects to make sure the non-cacheable section variables will be initialized in system startup.
452 */
453 #define AT_NONCACHEABLE_SECTION_INIT(var) __attribute__((section("NonCacheable.init"))) var
454 #define AT_NONCACHEABLE_SECTION_ALIGN_INIT(var, alignbytes) \
455 __attribute__((section("NonCacheable.init"))) var __attribute__((aligned(alignbytes)))
456 #define AT_NONCACHEABLE_SECTION(var) __attribute__((section("NonCacheable,\"aw\",%nobits @"))) var
457 #define AT_NONCACHEABLE_SECTION_ALIGN(var, alignbytes) \
458 __attribute__((section("NonCacheable,\"aw\",%nobits @"))) var __attribute__((aligned(alignbytes)))
459 #else
460 #error Toolchain not supported.
461 #endif
462
463 #else
464
465 #define AT_NONCACHEABLE_SECTION(var) var
466 #define AT_NONCACHEABLE_SECTION_ALIGN(var, alignbytes) SDK_ALIGN(var, alignbytes)
467 #define AT_NONCACHEABLE_SECTION_INIT(var) var
468 #define AT_NONCACHEABLE_SECTION_ALIGN_INIT(var, alignbytes) SDK_ALIGN(var, alignbytes)
469
470 #endif
471
472 /*! @} */
473
474 /*!
475 * @name Time sensitive region
476 * @{
477 */
478
479 /*!
480 * @def AT_QUICKACCESS_SECTION_CODE(func)
481 * Place function in a section which can be accessed quickly by core.
482 *
483 * @def AT_QUICKACCESS_SECTION_DATA(var)
484 * Place data in a section which can be accessed quickly by core.
485 *
486 * @def AT_QUICKACCESS_SECTION_DATA_ALIGN(var, alignbytes)
487 * Place data in a section which can be accessed quickly by core, and the variable
488 * address is set to align with \a alignbytes.
489 */
490 #if (defined(__ICCARM__))
491 #define AT_QUICKACCESS_SECTION_CODE(func) func @"CodeQuickAccess"
492 #define AT_QUICKACCESS_SECTION_DATA(var) var @"DataQuickAccess"
493 #define AT_QUICKACCESS_SECTION_DATA_ALIGN(var, alignbytes) \
494 SDK_PRAGMA(data_alignment = alignbytes) var @"DataQuickAccess"
495 #elif (defined(__CC_ARM) || defined(__ARMCC_VERSION))
496 #define AT_QUICKACCESS_SECTION_CODE(func) __attribute__((section("CodeQuickAccess"), __noinline__)) func
497 #define AT_QUICKACCESS_SECTION_DATA(var) __attribute__((section("DataQuickAccess"))) var
498 #define AT_QUICKACCESS_SECTION_DATA_ALIGN(var, alignbytes) \
499 __attribute__((section("DataQuickAccess"))) __attribute__((aligned(alignbytes))) var
500 #elif (defined(__GNUC__)) || defined(DOXYGEN_OUTPUT)
501 #define AT_QUICKACCESS_SECTION_CODE(func) __attribute__((section("CodeQuickAccess"), __noinline__)) func
502 #define AT_QUICKACCESS_SECTION_DATA(var) __attribute__((section("DataQuickAccess"))) var
503 #define AT_QUICKACCESS_SECTION_DATA_ALIGN(var, alignbytes) \
504 __attribute__((section("DataQuickAccess"))) var __attribute__((aligned(alignbytes)))
505 #else
506 #error Toolchain not supported.
507 #endif /* defined(__ICCARM__) */
508 /*! @} */
509
510 /*!
511 * @name Ram Function
512 * @{
513 *
514 * @def RAMFUNCTION_SECTION_CODE(func)
515 * Place function in ram.
516 */
517 #if (defined(__ICCARM__))
518 #define RAMFUNCTION_SECTION_CODE(func) func @"RamFunction"
519 #elif (defined(__CC_ARM) || defined(__ARMCC_VERSION))
520 #define RAMFUNCTION_SECTION_CODE(func) __attribute__((section("RamFunction"))) func
521 #elif (defined(__GNUC__)) || defined(DOXYGEN_OUTPUT)
522 #define RAMFUNCTION_SECTION_CODE(func) __attribute__((section("RamFunction"))) func
523 #else
524 #error Toolchain not supported.
525 #endif /* defined(__ICCARM__) */
526 /*! @} */
527
528 #if defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050)
529 void DefaultISR(void);
530 #endif
531
532 /*
533 * The fsl_clock.h is included here because it needs MAKE_VERSION/MAKE_STATUS/status_t
534 * defined in previous of this file.
535 */
536 #include "fsl_clock.h"
537
538 /*
539 * Chip level peripheral reset API, for MCUs that implement peripheral reset control external to a peripheral
540 */
541 #if ((defined(FSL_FEATURE_SOC_SYSCON_COUNT) && (FSL_FEATURE_SOC_SYSCON_COUNT > 0)) || \
542 (defined(FSL_FEATURE_SOC_ASYNC_SYSCON_COUNT) && (FSL_FEATURE_SOC_ASYNC_SYSCON_COUNT > 0)))
543 #include "fsl_reset.h"
544 #endif
545
546 /*******************************************************************************
547 * API
548 ******************************************************************************/
549
550 #if defined(__cplusplus)
551 extern "C" {
552 #endif /* __cplusplus*/
553
554 /*!
555 * @brief Enable specific interrupt.
556 *
557 * Enable LEVEL1 interrupt. For some devices, there might be multiple interrupt
558 * levels. For example, there are NVIC and intmux. Here the interrupts connected
559 * to NVIC are the LEVEL1 interrupts, because they are routed to the core directly.
560 * The interrupts connected to intmux are the LEVEL2 interrupts, they are routed
561 * to NVIC first then routed to core.
562 *
563 * This function only enables the LEVEL1 interrupts. The number of LEVEL1 interrupts
564 * is indicated by the feature macro FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS.
565 *
566 * @param interrupt The IRQ number.
567 * @retval kStatus_Success Interrupt enabled successfully
568 * @retval kStatus_Fail Failed to enable the interrupt
569 */
EnableIRQ(IRQn_Type interrupt)570 static inline status_t EnableIRQ(IRQn_Type interrupt)
571 {
572 status_t status = kStatus_Success;
573
574 if (NotAvail_IRQn == interrupt)
575 {
576 status = kStatus_Fail;
577 }
578
579 #if defined(FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS) && (FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS > 0)
580 else if ((int32_t)interrupt >= (int32_t)FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS)
581 {
582 status = kStatus_Fail;
583 }
584 #endif
585
586 else
587 {
588 #if defined(__GIC_PRIO_BITS)
589 GIC_EnableIRQ(interrupt);
590 #else
591 NVIC_EnableIRQ(interrupt);
592 #endif
593 }
594
595 return status;
596 }
597
598 /*!
599 * @brief Disable specific interrupt.
600 *
601 * Disable LEVEL1 interrupt. For some devices, there might be multiple interrupt
602 * levels. For example, there are NVIC and intmux. Here the interrupts connected
603 * to NVIC are the LEVEL1 interrupts, because they are routed to the core directly.
604 * The interrupts connected to intmux are the LEVEL2 interrupts, they are routed
605 * to NVIC first then routed to core.
606 *
607 * This function only disables the LEVEL1 interrupts. The number of LEVEL1 interrupts
608 * is indicated by the feature macro FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS.
609 *
610 * @param interrupt The IRQ number.
611 * @retval kStatus_Success Interrupt disabled successfully
612 * @retval kStatus_Fail Failed to disable the interrupt
613 */
DisableIRQ(IRQn_Type interrupt)614 static inline status_t DisableIRQ(IRQn_Type interrupt)
615 {
616 status_t status = kStatus_Success;
617
618 if (NotAvail_IRQn == interrupt)
619 {
620 status = kStatus_Fail;
621 }
622
623 #if defined(FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS) && (FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS > 0)
624 else if ((int32_t)interrupt >= (int32_t)FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS)
625 {
626 status = kStatus_Fail;
627 }
628 #endif
629
630 else
631 {
632 #if defined(__GIC_PRIO_BITS)
633 GIC_DisableIRQ(interrupt);
634 #else
635 NVIC_DisableIRQ(interrupt);
636 #endif
637 }
638
639 return status;
640 }
641
642 /*!
643 * @brief Enable the IRQ, and also set the interrupt priority.
644 *
645 * Only handle LEVEL1 interrupt. For some devices, there might be multiple interrupt
646 * levels. For example, there are NVIC and intmux. Here the interrupts connected
647 * to NVIC are the LEVEL1 interrupts, because they are routed to the core directly.
648 * The interrupts connected to intmux are the LEVEL2 interrupts, they are routed
649 * to NVIC first then routed to core.
650 *
651 * This function only handles the LEVEL1 interrupts. The number of LEVEL1 interrupts
652 * is indicated by the feature macro FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS.
653 *
654 * @param interrupt The IRQ to Enable.
655 * @param priNum Priority number set to interrupt controller register.
656 * @retval kStatus_Success Interrupt priority set successfully
657 * @retval kStatus_Fail Failed to set the interrupt priority.
658 */
EnableIRQWithPriority(IRQn_Type interrupt,uint8_t priNum)659 static inline status_t EnableIRQWithPriority(IRQn_Type interrupt, uint8_t priNum)
660 {
661 status_t status = kStatus_Success;
662
663 if (NotAvail_IRQn == interrupt)
664 {
665 status = kStatus_Fail;
666 }
667
668 #if defined(FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS) && (FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS > 0)
669 else if ((int32_t)interrupt >= (int32_t)FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS)
670 {
671 status = kStatus_Fail;
672 }
673 #endif
674
675 else
676 {
677 #if defined(__GIC_PRIO_BITS)
678 GIC_SetPriority(interrupt, priNum);
679 GIC_EnableIRQ(interrupt);
680 #else
681 NVIC_SetPriority(interrupt, priNum);
682 NVIC_EnableIRQ(interrupt);
683 #endif
684 }
685
686 return status;
687 }
688
689 /*!
690 * @brief Set the IRQ priority.
691 *
692 * Only handle LEVEL1 interrupt. For some devices, there might be multiple interrupt
693 * levels. For example, there are NVIC and intmux. Here the interrupts connected
694 * to NVIC are the LEVEL1 interrupts, because they are routed to the core directly.
695 * The interrupts connected to intmux are the LEVEL2 interrupts, they are routed
696 * to NVIC first then routed to core.
697 *
698 * This function only handles the LEVEL1 interrupts. The number of LEVEL1 interrupts
699 * is indicated by the feature macro FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS.
700 *
701 * @param interrupt The IRQ to set.
702 * @param priNum Priority number set to interrupt controller register.
703 *
704 * @retval kStatus_Success Interrupt priority set successfully
705 * @retval kStatus_Fail Failed to set the interrupt priority.
706 */
IRQ_SetPriority(IRQn_Type interrupt,uint8_t priNum)707 static inline status_t IRQ_SetPriority(IRQn_Type interrupt, uint8_t priNum)
708 {
709 status_t status = kStatus_Success;
710
711 if (NotAvail_IRQn == interrupt)
712 {
713 status = kStatus_Fail;
714 }
715
716 #if defined(FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS) && (FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS > 0)
717 else if ((int32_t)interrupt >= (int32_t)FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS)
718 {
719 status = kStatus_Fail;
720 }
721 #endif
722
723 else
724 {
725 #if defined(__GIC_PRIO_BITS)
726 GIC_SetPriority(interrupt, priNum);
727 #else
728 NVIC_SetPriority(interrupt, priNum);
729 #endif
730 }
731
732 return status;
733 }
734
735 /*!
736 * @brief Clear the pending IRQ flag.
737 *
738 * Only handle LEVEL1 interrupt. For some devices, there might be multiple interrupt
739 * levels. For example, there are NVIC and intmux. Here the interrupts connected
740 * to NVIC are the LEVEL1 interrupts, because they are routed to the core directly.
741 * The interrupts connected to intmux are the LEVEL2 interrupts, they are routed
742 * to NVIC first then routed to core.
743 *
744 * This function only handles the LEVEL1 interrupts. The number of LEVEL1 interrupts
745 * is indicated by the feature macro FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS.
746 *
747 * @param interrupt The flag which IRQ to clear.
748 *
749 * @retval kStatus_Success Interrupt priority set successfully
750 * @retval kStatus_Fail Failed to set the interrupt priority.
751 */
IRQ_ClearPendingIRQ(IRQn_Type interrupt)752 static inline status_t IRQ_ClearPendingIRQ(IRQn_Type interrupt)
753 {
754 status_t status = kStatus_Success;
755
756 if (NotAvail_IRQn == interrupt)
757 {
758 status = kStatus_Fail;
759 }
760
761 #if defined(FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS) && (FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS > 0)
762 else if ((int32_t)interrupt >= (int32_t)FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS)
763 {
764 status = kStatus_Fail;
765 }
766 #endif
767
768 else
769 {
770 #if defined(__GIC_PRIO_BITS)
771 GIC_ClearPendingIRQ(interrupt);
772 #else
773 NVIC_ClearPendingIRQ(interrupt);
774 #endif
775 }
776
777 return status;
778 }
779
780 /*!
781 * @brief Disable the global IRQ
782 *
783 * Disable the global interrupt and return the current primask register. User is required to provided the primask
784 * register for the EnableGlobalIRQ().
785 *
786 * @return Current primask value.
787 */
DisableGlobalIRQ(void)788 static inline uint32_t DisableGlobalIRQ(void)
789 {
790 uint32_t mask;
791
792 #if defined(CPSR_I_Msk)
793 mask = __get_CPSR() & CPSR_I_Msk;
794 #elif defined(DAIF_I_BIT)
795 mask = __get_DAIF() & DAIF_I_BIT;
796 #else
797 mask = __get_PRIMASK();
798 #endif
799 __disable_irq();
800
801 return mask;
802 }
803
804 /*!
805 * @brief Enable the global IRQ
806 *
807 * Set the primask register with the provided primask value but not just enable the primask. The idea is for the
808 * convenience of integration of RTOS. some RTOS get its own management mechanism of primask. User is required to
809 * use the EnableGlobalIRQ() and DisableGlobalIRQ() in pair.
810 *
811 * @param primask value of primask register to be restored. The primask value is supposed to be provided by the
812 * DisableGlobalIRQ().
813 */
EnableGlobalIRQ(uint32_t primask)814 static inline void EnableGlobalIRQ(uint32_t primask)
815 {
816 #if defined(CPSR_I_Msk)
817 __set_CPSR((__get_CPSR() & ~CPSR_I_Msk) | primask);
818 #elif defined(DAIF_I_BIT)
819 if (0UL == primask)
820 {
821 __enable_irq();
822 }
823 #else
824 __set_PRIMASK(primask);
825 #endif
826 }
827
828 #if defined(ENABLE_RAM_VECTOR_TABLE)
829 /*!
830 * @brief install IRQ handler
831 *
832 * @param irq IRQ number
833 * @param irqHandler IRQ handler address
834 * @return The old IRQ handler address
835 */
836 uint32_t InstallIRQHandler(IRQn_Type irq, uint32_t irqHandler);
837 #endif /* ENABLE_RAM_VECTOR_TABLE. */
838
839 #if (defined(FSL_FEATURE_SOC_SYSCON_COUNT) && (FSL_FEATURE_SOC_SYSCON_COUNT > 0))
840
841 /*
842 * When FSL_FEATURE_POWERLIB_EXTEND is defined to non-zero value,
843 * powerlib should be used instead of these functions.
844 */
845 #if !(defined(FSL_FEATURE_POWERLIB_EXTEND) && (FSL_FEATURE_POWERLIB_EXTEND != 0))
846 /*!
847 * @brief Enable specific interrupt for wake-up from deep-sleep mode.
848 *
849 * Enable the interrupt for wake-up from deep sleep mode.
850 * Some interrupts are typically used in sleep mode only and will not occur during
851 * deep-sleep mode because relevant clocks are stopped. However, it is possible to enable
852 * those clocks (significantly increasing power consumption in the reduced power mode),
853 * making these wake-ups possible.
854 *
855 * @note This function also enables the interrupt in the NVIC (EnableIRQ() is called internaly).
856 *
857 * @param interrupt The IRQ number.
858 */
859 void EnableDeepSleepIRQ(IRQn_Type interrupt);
860
861 /*!
862 * @brief Disable specific interrupt for wake-up from deep-sleep mode.
863 *
864 * Disable the interrupt for wake-up from deep sleep mode.
865 * Some interrupts are typically used in sleep mode only and will not occur during
866 * deep-sleep mode because relevant clocks are stopped. However, it is possible to enable
867 * those clocks (significantly increasing power consumption in the reduced power mode),
868 * making these wake-ups possible.
869 *
870 * @note This function also disables the interrupt in the NVIC (DisableIRQ() is called internaly).
871 *
872 * @param interrupt The IRQ number.
873 */
874 void DisableDeepSleepIRQ(IRQn_Type interrupt);
875 #endif /* FSL_FEATURE_POWERLIB_EXTEND */
876 #endif /* FSL_FEATURE_SOC_SYSCON_COUNT */
877
878 #if defined(DWT)
879 /*!
880 * @brief Enable the counter to get CPU cycles.
881 */
882 void MSDK_EnableCpuCycleCounter(void);
883
884 /*!
885 * @brief Get the current CPU cycle count.
886 *
887 * @return Current CPU cycle count.
888 */
889 uint32_t MSDK_GetCpuCycleCount(void);
890 #endif
891
892 #if defined(__cplusplus)
893 }
894 #endif /* __cplusplus*/
895
896 /*! @} */
897
898 #endif /* FSL_COMMON_ARM_H_ */
899