1 /**************************************************************************//**
2 * @file cmsis_gcc.h for CMSIS-Core(R)
3 * @brief CMSIS compiler specific macros, functions, instructions
4 * @version V1.1.0
5 * @date 13. April 2020
6 ******************************************************************************/
7 /*
8 * Copyright (c) 2009-2020 Arm Limited. All rights reserved.
9 *
10 * SPDX-License-Identifier: Apache-2.0
11 *
12 * Licensed under the Apache License, Version 2.0 (the License); you may
13 * not use this file except in compliance with the License.
14 * You may obtain a copy of the License at
15 *
16 * www.apache.org/licenses/LICENSE-2.0
17 *
18 * Unless required by applicable law or agreed to in writing, software
19 * distributed under the License is distributed on an AS IS BASIS, WITHOUT
20 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
21 * See the License for the specific language governing permissions and
22 * limitations under the License.
23 */
24
25 #ifndef __CMSIS_GCC_H
26 #define __CMSIS_GCC_H
27
28 /* ignore some GCC warnings */
29 #pragma GCC diagnostic push
30 #pragma GCC diagnostic ignored "-Wsign-conversion"
31 #pragma GCC diagnostic ignored "-Wconversion"
32 #pragma GCC diagnostic ignored "-Wunused-parameter"
33
34 /* Fallback for __has_builtin */
35 #ifndef __has_builtin
36 #define __has_builtin(x) (0)
37 #endif
38
39 /* CMSIS compiler specific defines */
40
41 #ifndef __ASM
42 #define __ASM __asm
43 #endif
44 #ifndef __INLINE
45 #define __INLINE inline
46 #endif
47 #ifndef __FORCEINLINE
48 #define __FORCEINLINE __attribute__((always_inline))
49 #endif
50 #ifndef __STATIC_INLINE
51 #define __STATIC_INLINE static inline
52 #endif
53 #ifndef __STATIC_FORCEINLINE
54 #define __STATIC_FORCEINLINE __attribute__((always_inline)) static inline
55 #endif
56 #ifndef __NO_RETURN
57 #define __NO_RETURN __attribute__((__noreturn__))
58 #endif
59 #ifndef CMSIS_DEPRECATED
60 #define CMSIS_DEPRECATED __attribute__((deprecated))
61 #endif
62 #ifndef __USED
63 #define __USED __attribute__((used))
64 #endif
65 #ifndef __WEAK
66 #define __WEAK __attribute__((weak))
67 #endif
68 #ifndef __PACKED
69 #define __PACKED __attribute__((packed, aligned(1)))
70 #endif
71 #ifndef __PACKED_STRUCT
72 #define __PACKED_STRUCT struct __attribute__((packed, aligned(1)))
73 #endif
74 #ifndef __UNALIGNED_UINT16_WRITE
75 #pragma GCC diagnostic push
76 #pragma GCC diagnostic ignored "-Wpacked"
77 /*lint -esym(9058, T_UINT16_WRITE)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT16_WRITE */
78 __PACKED_STRUCT T_UINT16_WRITE { uint16_t v; };
79 #pragma GCC diagnostic pop
80 #define __UNALIGNED_UINT16_WRITE(addr, val) (void)((((struct T_UINT16_WRITE *)(void *)(addr))->v) = (val))
81 #endif
82 #ifndef __UNALIGNED_UINT16_READ
83 #pragma GCC diagnostic push
84 #pragma GCC diagnostic ignored "-Wpacked"
85 /*lint -esym(9058, T_UINT16_READ)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT16_READ */
86 __PACKED_STRUCT T_UINT16_READ { uint16_t v; };
87 #pragma GCC diagnostic pop
88 #define __UNALIGNED_UINT16_READ(addr) (((const struct T_UINT16_READ *)(const void *)(addr))->v)
89 #endif
90 #ifndef __UNALIGNED_UINT32_WRITE
91 #pragma GCC diagnostic push
92 #pragma GCC diagnostic ignored "-Wpacked"
93 /*lint -esym(9058, T_UINT32_WRITE)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT32_WRITE */
94 __PACKED_STRUCT T_UINT32_WRITE { uint32_t v; };
95 #pragma GCC diagnostic pop
96 #define __UNALIGNED_UINT32_WRITE(addr, val) (void)((((struct T_UINT32_WRITE *)(void *)(addr))->v) = (val))
97 #endif
98 #ifndef __UNALIGNED_UINT32_READ
99 #pragma GCC diagnostic push
100 #pragma GCC diagnostic ignored "-Wpacked"
101 __PACKED_STRUCT T_UINT32_READ { uint32_t v; };
102 #pragma GCC diagnostic pop
103 #define __UNALIGNED_UINT32_READ(addr) (((const struct T_UINT32_READ *)(const void *)(addr))->v)
104 #endif
105 #ifndef __ALIGNED
106 #define __ALIGNED(x) __attribute__((aligned(x)))
107 #endif
108 #ifndef __COMPILER_BARRIER
109 #define __COMPILER_BARRIER() __ASM volatile("":::"memory")
110 #endif
111
112
__QSUB16(uint32_t op1,uint32_t op2)113 __STATIC_FORCEINLINE uint32_t __QSUB16(uint32_t op1, uint32_t op2)
114 {
115 uint32_t result;
116
117 __ASM volatile ("qsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
118 return(result);
119 }
120
__QSUB8(uint32_t op1,uint32_t op2)121 __STATIC_FORCEINLINE uint32_t __QSUB8(uint32_t op1, uint32_t op2)
122 {
123 uint32_t result;
124
125 __ASM volatile ("qsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
126 return(result);
127 }
128
__QADD16(uint32_t op1,uint32_t op2)129 __STATIC_FORCEINLINE uint32_t __QADD16(uint32_t op1, uint32_t op2)
130 {
131 uint32_t result;
132
133 __ASM volatile ("qadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
134 return(result);
135 }
136
__QADD8(uint32_t op1,uint32_t op2)137 __STATIC_FORCEINLINE uint32_t __QADD8(uint32_t op1, uint32_t op2)
138 {
139 uint32_t result;
140
141 __ASM volatile ("qadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
142 return(result);
143 }
144
__QADD(int32_t op1,int32_t op2)145 __STATIC_FORCEINLINE int32_t __QADD( int32_t op1, int32_t op2)
146 {
147 int32_t result;
148
149 __ASM volatile ("qadd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
150 return(result);
151 }
152
__QSAX(uint32_t op1,uint32_t op2)153 __STATIC_FORCEINLINE uint32_t __QSAX(uint32_t op1, uint32_t op2)
154 {
155 uint32_t result;
156
157 __ASM ("qsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
158 return(result);
159 }
160
__SHSAX(uint32_t op1,uint32_t op2)161 __STATIC_FORCEINLINE uint32_t __SHSAX(uint32_t op1, uint32_t op2)
162 {
163 uint32_t result;
164
165 __ASM ("shsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
166 return(result);
167 }
168
__SMLALD(uint32_t op1,uint32_t op2,uint64_t acc)169 __STATIC_FORCEINLINE uint64_t __SMLALD (uint32_t op1, uint32_t op2, uint64_t acc)
170 {
171 union llreg_u{
172 uint32_t w32[2];
173 uint64_t w64;
174 } llr;
175 llr.w64 = acc;
176
177 #ifndef __ARMEB__ /* Little endian */
178 __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
179 #else /* Big endian */
180 __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
181 #endif
182
183 return(llr.w64);
184 }
185
__QSUB(int32_t op1,int32_t op2)186 __STATIC_FORCEINLINE int32_t __QSUB( int32_t op1, int32_t op2)
187 {
188 int32_t result;
189
190 __ASM volatile ("qsub %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
191 return(result);
192 }
193
__SXTB16(uint32_t op1)194 __STATIC_FORCEINLINE uint32_t __SXTB16(uint32_t op1)
195 {
196 uint32_t result;
197
198 __ASM ("sxtb16 %0, %1" : "=r" (result) : "r" (op1));
199 return(result);
200 }
201
__SMUAD(uint32_t op1,uint32_t op2)202 __STATIC_FORCEINLINE uint32_t __SMUAD (uint32_t op1, uint32_t op2)
203 {
204 uint32_t result;
205
206 __ASM volatile ("smuad %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
207 return(result);
208 }
209
210
211
212 #define __PKHBT(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0x0000FFFFUL) | \
213 ((((uint32_t)(ARG2)) << (ARG3)) & 0xFFFF0000UL) )
214
215 #define __PKHTB(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0xFFFF0000UL) | \
216 ((((uint32_t)(ARG2)) >> (ARG3)) & 0x0000FFFFUL) )
217
__SMLAD(uint32_t op1,uint32_t op2,uint32_t op3)218 __STATIC_FORCEINLINE uint32_t __SMLAD (uint32_t op1, uint32_t op2, uint32_t op3)
219 {
220 uint32_t result;
221
222 __ASM volatile ("smlad %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
223 return(result);
224 }
225
__SMUADX(uint32_t op1,uint32_t op2)226 __STATIC_FORCEINLINE uint32_t __SMUADX (uint32_t op1, uint32_t op2)
227 {
228 uint32_t result;
229
230 __ASM volatile ("smuadx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
231 return(result);
232 }
233
__SMLADX(uint32_t op1,uint32_t op2,uint32_t op3)234 __STATIC_FORCEINLINE uint32_t __SMLADX (uint32_t op1, uint32_t op2, uint32_t op3)
235 {
236 uint32_t result;
237
238 __ASM volatile ("smladx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
239 return(result);
240 }
241
__SMLALDX(uint32_t op1,uint32_t op2,uint64_t acc)242 __STATIC_FORCEINLINE uint64_t __SMLALDX (uint32_t op1, uint32_t op2, uint64_t acc)
243 {
244 union llreg_u{
245 uint32_t w32[2];
246 uint64_t w64;
247 } llr;
248 llr.w64 = acc;
249
250 #ifndef __ARMEB__ /* Little endian */
251 __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
252 #else /* Big endian */
253 __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
254 #endif
255
256 return(llr.w64);
257 }
258
__SMMLA(int32_t op1,int32_t op2,int32_t op3)259 __STATIC_FORCEINLINE int32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3)
260 {
261 int32_t result;
262
263 __ASM volatile ("smmla %0, %1, %2, %3" : "=r" (result): "r" (op1), "r" (op2), "r" (op3) );
264 return(result);
265 }
266
__SMUSD(uint32_t op1,uint32_t op2)267 __STATIC_FORCEINLINE uint32_t __SMUSD (uint32_t op1, uint32_t op2)
268 {
269 uint32_t result;
270
271 __ASM volatile ("smusd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
272 return(result);
273 }
274
__SMUSDX(uint32_t op1,uint32_t op2)275 __STATIC_FORCEINLINE uint32_t __SMUSDX (uint32_t op1, uint32_t op2)
276 {
277 uint32_t result;
278
279 __ASM volatile ("smusdx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
280 return(result);
281 }
282
__QASX(uint32_t op1,uint32_t op2)283 __STATIC_FORCEINLINE uint32_t __QASX(uint32_t op1, uint32_t op2)
284 {
285 uint32_t result;
286
287 __ASM ("qasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
288 return(result);
289 }
290
__SHADD16(uint32_t op1,uint32_t op2)291 __STATIC_FORCEINLINE uint32_t __SHADD16(uint32_t op1, uint32_t op2)
292 {
293 uint32_t result;
294
295 __ASM ("shadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
296 return(result);
297 }
298
__SHSUB16(uint32_t op1,uint32_t op2)299 __STATIC_FORCEINLINE uint32_t __SHSUB16(uint32_t op1, uint32_t op2)
300 {
301 uint32_t result;
302
303 __ASM ("shsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
304 return(result);
305 }
306
__SHASX(uint32_t op1,uint32_t op2)307 __STATIC_FORCEINLINE uint32_t __SHASX(uint32_t op1, uint32_t op2)
308 {
309 uint32_t result;
310
311 __ASM ("shasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
312 return(result);
313 }
314
__SMLSDX(uint32_t op1,uint32_t op2,uint32_t op3)315 __STATIC_FORCEINLINE uint32_t __SMLSDX (uint32_t op1, uint32_t op2, uint32_t op3)
316 {
317 uint32_t result;
318
319 __ASM volatile ("smlsdx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
320 return(result);
321 }
322
323
324
325 /* ########################## Core Instruction Access ######################### */
326 /**
327 \brief No Operation
328 */
329 #define __NOP() __ASM volatile ("nop")
330
331 /**
332 \brief Wait For Interrupt
333 */
334 #define __WFI() __ASM volatile ("wfi":::"memory")
335
336 /**
337 \brief Wait For Event
338 */
339 #define __WFE() __ASM volatile ("wfe":::"memory")
340
341 /**
342 \brief Send Event
343 */
344 #define __SEV() __ASM volatile ("sev")
345
346 /**
347 \brief Instruction Synchronization Barrier
348 \details Instruction Synchronization Barrier flushes the pipeline in the processor,
349 so that all instructions following the ISB are fetched from cache or memory,
350 after the instruction has been completed.
351 */
__ISB(void)352 __STATIC_FORCEINLINE void __ISB(void)
353 {
354 __ASM volatile ("isb 0xF":::"memory");
355 }
356
357
358 /**
359 \brief Data Synchronization Barrier
360 \details Acts as a special kind of Data Memory Barrier.
361 It completes when all explicit memory accesses before this instruction complete.
362 */
__DSB(void)363 __STATIC_FORCEINLINE void __DSB(void)
364 {
365 __ASM volatile ("dsb 0xF":::"memory");
366 }
367
368 /**
369 \brief Data Memory Barrier
370 \details Ensures the apparent order of the explicit memory operations before
371 and after the instruction, without ensuring their completion.
372 */
__DMB(void)373 __STATIC_FORCEINLINE void __DMB(void)
374 {
375 __ASM volatile ("dmb 0xF":::"memory");
376 }
377
378 /**
379 \brief Reverse byte order (32 bit)
380 \details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412.
381 \param [in] value Value to reverse
382 \return Reversed value
383 */
__REV(uint32_t value)384 __STATIC_FORCEINLINE uint32_t __REV(uint32_t value)
385 {
386 #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5)
387 return __builtin_bswap32(value);
388 #else
389 uint32_t result;
390
391 __ASM ("rev %0, %1" : "=r" (result) : "r" (value) );
392 return result;
393 #endif
394 }
395
396 /**
397 \brief Reverse byte order (16 bit)
398 \details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856.
399 \param [in] value Value to reverse
400 \return Reversed value
401 */
__REV16(uint32_t value)402 __STATIC_FORCEINLINE uint32_t __REV16(uint32_t value)
403 {
404 uint32_t result;
405 __ASM ("rev16 %0, %1" : "=r" (result) : "r" (value));
406 return result;
407 }
408
409 /**
410 \brief Reverse byte order (16 bit)
411 \details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000.
412 \param [in] value Value to reverse
413 \return Reversed value
414 */
__REVSH(int16_t value)415 __STATIC_FORCEINLINE int16_t __REVSH(int16_t value)
416 {
417 #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
418 return (int16_t)__builtin_bswap16(value);
419 #else
420 int16_t result;
421
422 __ASM ("revsh %0, %1" : "=r" (result) : "r" (value) );
423 return result;
424 #endif
425 }
426
427 /**
428 \brief Rotate Right in unsigned value (32 bit)
429 \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits.
430 \param [in] op1 Value to rotate
431 \param [in] op2 Number of Bits to rotate
432 \return Rotated value
433 */
__ROR(uint32_t op1,uint32_t op2)434 __STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2)
435 {
436 op2 %= 32U;
437 if (op2 == 0U) {
438 return op1;
439 }
440 return (op1 >> op2) | (op1 << (32U - op2));
441 }
442
443
444 /**
445 \brief Breakpoint
446 \param [in] value is ignored by the processor.
447 If required, a debugger can use it to store additional information about the breakpoint.
448 */
449 #define __BKPT(value) __ASM volatile ("bkpt "#value)
450
451 /**
452 \brief Reverse bit order of value
453 \details Reverses the bit order of the given value.
454 \param [in] value Value to reverse
455 \return Reversed value
456 */
__RBIT(uint32_t value)457 __STATIC_FORCEINLINE uint32_t __RBIT(uint32_t value)
458 {
459 uint32_t result;
460
461 #if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \
462 (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \
463 (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) )
464 __ASM ("rbit %0, %1" : "=r" (result) : "r" (value) );
465 #else
466 int32_t s = (4U /*sizeof(v)*/ * 8U) - 1U; /* extra shift needed at end */
467
468 result = value; /* r will be reversed bits of v; first get LSB of v */
469 for (value >>= 1U; value; value >>= 1U)
470 {
471 result <<= 1U;
472 result |= value & 1U;
473 s--;
474 }
475 result <<= s; /* shift when v's highest bits are zero */
476 #endif
477 return result;
478 }
479
480 /**
481 \brief Count leading zeros
482 \param [in] value Value to count the leading zeros
483 \return number of leading zeros in value
484 */
__CLZ(uint32_t value)485 __STATIC_FORCEINLINE uint8_t __CLZ(uint32_t value)
486 {
487 /* Even though __builtin_clz produces a CLZ instruction on ARM, formally
488 __builtin_clz(0) is undefined behaviour, so handle this case specially.
489 This guarantees ARM-compatible results if happening to compile on a non-ARM
490 target, and ensures the compiler doesn't decide to activate any
491 optimisations using the logic "value was passed to __builtin_clz, so it
492 is non-zero".
493 ARM GCC 7.3 and possibly earlier will optimise this test away, leaving a
494 single CLZ instruction.
495 */
496 if (value == 0U)
497 {
498 return 32U;
499 }
500 return __builtin_clz(value);
501 }
502
503 /**
504 \brief LDR Exclusive (8 bit)
505 \details Executes a exclusive LDR instruction for 8 bit value.
506 \param [in] ptr Pointer to data
507 \return value of type uint8_t at (*ptr)
508 */
__LDREXB(volatile uint8_t * addr)509 __STATIC_FORCEINLINE uint8_t __LDREXB(volatile uint8_t *addr)
510 {
511 uint32_t result;
512
513 #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
514 __ASM volatile ("ldrexb %0, %1" : "=r" (result) : "Q" (*addr) );
515 #else
516 /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
517 accepted by assembler. So has to use following less efficient pattern.
518 */
519 __ASM volatile ("ldrexb %0, [%1]" : "=r" (result) : "r" (addr) : "memory" );
520 #endif
521 return ((uint8_t) result); /* Add explicit type cast here */
522 }
523
524
525 /**
526 \brief LDR Exclusive (16 bit)
527 \details Executes a exclusive LDR instruction for 16 bit values.
528 \param [in] ptr Pointer to data
529 \return value of type uint16_t at (*ptr)
530 */
__LDREXH(volatile uint16_t * addr)531 __STATIC_FORCEINLINE uint16_t __LDREXH(volatile uint16_t *addr)
532 {
533 uint32_t result;
534
535 #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
536 __ASM volatile ("ldrexh %0, %1" : "=r" (result) : "Q" (*addr) );
537 #else
538 /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
539 accepted by assembler. So has to use following less efficient pattern.
540 */
541 __ASM volatile ("ldrexh %0, [%1]" : "=r" (result) : "r" (addr) : "memory" );
542 #endif
543 return ((uint16_t) result); /* Add explicit type cast here */
544 }
545
546
547 /**
548 \brief LDR Exclusive (32 bit)
549 \details Executes a exclusive LDR instruction for 32 bit values.
550 \param [in] ptr Pointer to data
551 \return value of type uint32_t at (*ptr)
552 */
__LDREXW(volatile uint32_t * addr)553 __STATIC_FORCEINLINE uint32_t __LDREXW(volatile uint32_t *addr)
554 {
555 uint32_t result;
556
557 __ASM volatile ("ldrex %0, %1" : "=r" (result) : "Q" (*addr) );
558 return(result);
559 }
560
561
562 /**
563 \brief STR Exclusive (8 bit)
564 \details Executes a exclusive STR instruction for 8 bit values.
565 \param [in] value Value to store
566 \param [in] ptr Pointer to location
567 \return 0 Function succeeded
568 \return 1 Function failed
569 */
__STREXB(uint8_t value,volatile uint8_t * addr)570 __STATIC_FORCEINLINE uint32_t __STREXB(uint8_t value, volatile uint8_t *addr)
571 {
572 uint32_t result;
573
574 __ASM volatile ("strexb %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) );
575 return(result);
576 }
577
578
579 /**
580 \brief STR Exclusive (16 bit)
581 \details Executes a exclusive STR instruction for 16 bit values.
582 \param [in] value Value to store
583 \param [in] ptr Pointer to location
584 \return 0 Function succeeded
585 \return 1 Function failed
586 */
__STREXH(uint16_t value,volatile uint16_t * addr)587 __STATIC_FORCEINLINE uint32_t __STREXH(uint16_t value, volatile uint16_t *addr)
588 {
589 uint32_t result;
590
591 __ASM volatile ("strexh %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) );
592 return(result);
593 }
594
595
596 /**
597 \brief STR Exclusive (32 bit)
598 \details Executes a exclusive STR instruction for 32 bit values.
599 \param [in] value Value to store
600 \param [in] ptr Pointer to location
601 \return 0 Function succeeded
602 \return 1 Function failed
603 */
__STREXW(uint32_t value,volatile uint32_t * addr)604 __STATIC_FORCEINLINE uint32_t __STREXW(uint32_t value, volatile uint32_t *addr)
605 {
606 uint32_t result;
607
608 __ASM volatile ("strex %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" (value) );
609 return(result);
610 }
611
612
613 /**
614 \brief Remove the exclusive lock
615 \details Removes the exclusive lock which is created by LDREX.
616 */
__CLREX(void)617 __STATIC_FORCEINLINE void __CLREX(void)
618 {
619 __ASM volatile ("clrex" ::: "memory");
620 }
621
622 /**
623 \brief Signed Saturate
624 \details Saturates a signed value.
625 \param [in] value Value to be saturated
626 \param [in] sat Bit position to saturate to (1..32)
627 \return Saturated value
628 */
629 #define __SSAT(ARG1, ARG2) \
630 __extension__ \
631 ({ \
632 int32_t __RES, __ARG1 = (ARG1); \
633 __ASM volatile ("ssat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) : "cc" ); \
634 __RES; \
635 })
636
637
638 /**
639 \brief Unsigned Saturate
640 \details Saturates an unsigned value.
641 \param [in] value Value to be saturated
642 \param [in] sat Bit position to saturate to (0..31)
643 \return Saturated value
644 */
645 #define __USAT(ARG1, ARG2) \
646 __extension__ \
647 ({ \
648 uint32_t __RES, __ARG1 = (ARG1); \
649 __ASM volatile ("usat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) : "cc" ); \
650 __RES; \
651 })
652
653 /* ########################### Core Function Access ########################### */
654
655 /**
656 \brief Enable IRQ Interrupts
657 \details Enables IRQ interrupts by clearing the I-bit in the CPSR.
658 Can only be executed in Privileged modes.
659 */
__enable_irq(void)660 __STATIC_FORCEINLINE void __enable_irq(void)
661 {
662 __ASM volatile ("cpsie i" : : : "memory");
663 }
664
665 /**
666 \brief Disable IRQ Interrupts
667 \details Disables IRQ interrupts by setting the I-bit in the CPSR.
668 Can only be executed in Privileged modes.
669 */
__disable_irq(void)670 __STATIC_FORCEINLINE void __disable_irq(void)
671 {
672 __ASM volatile ("cpsid i" : : : "memory");
673 }
674
675 /**
676 \brief Get FPSCR
677 \details Returns the current value of the Floating Point Status/Control register.
678 \return Floating Point Status/Control register value
679 */
__get_FPSCR(void)680 __STATIC_FORCEINLINE uint32_t __get_FPSCR(void)
681 {
682 #if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \
683 (defined (__FPU_USED ) && (__FPU_USED == 1U)) )
684 #if __has_builtin(__builtin_arm_get_fpscr)
685 // Re-enable using built-in when GCC has been fixed
686 // || (__GNUC__ > 7) || (__GNUC__ == 7 && __GNUC_MINOR__ >= 2)
687 /* see https://gcc.gnu.org/ml/gcc-patches/2017-04/msg00443.html */
688 return __builtin_arm_get_fpscr();
689 #else
690 uint32_t result;
691
692 __ASM volatile ("VMRS %0, fpscr" : "=r" (result) );
693 return(result);
694 #endif
695 #else
696 return(0U);
697 #endif
698 }
699
700 /**
701 \brief Set FPSCR
702 \details Assigns the given value to the Floating Point Status/Control register.
703 \param [in] fpscr Floating Point Status/Control value to set
704 */
__set_FPSCR(uint32_t fpscr)705 __STATIC_FORCEINLINE void __set_FPSCR(uint32_t fpscr)
706 {
707 #if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \
708 (defined (__FPU_USED ) && (__FPU_USED == 1U)) )
709 #if __has_builtin(__builtin_arm_set_fpscr)
710 // Re-enable using built-in when GCC has been fixed
711 // || (__GNUC__ > 7) || (__GNUC__ == 7 && __GNUC_MINOR__ >= 2)
712 /* see https://gcc.gnu.org/ml/gcc-patches/2017-04/msg00443.html */
713 __builtin_arm_set_fpscr(fpscr);
714 #else
715 __ASM volatile ("VMSR fpscr, %0" : : "r" (fpscr) : "vfpcc", "memory");
716 #endif
717 #else
718 (void)fpscr;
719 #endif
720 }
721
722 /** \brief Get CPSR Register
723 \return CPSR Register value
724 */
__get_CPSR(void)725 __STATIC_FORCEINLINE uint32_t __get_CPSR(void)
726 {
727 uint32_t result;
728 __ASM volatile("MRS %0, cpsr" : "=r" (result) );
729 return(result);
730 }
731
732 /** \brief Set CPSR Register
733 \param [in] cpsr CPSR value to set
734 */
__set_CPSR(uint32_t cpsr)735 __STATIC_FORCEINLINE void __set_CPSR(uint32_t cpsr)
736 {
737 __ASM volatile ("MSR cpsr, %0" : : "r" (cpsr) : "cc", "memory");
738 }
739
740 /** \brief Get Mode
741 \return Processor Mode
742 */
__get_mode(void)743 __STATIC_FORCEINLINE uint32_t __get_mode(void)
744 {
745 return (__get_CPSR() & 0x1FU);
746 }
747
748 /** \brief Set Mode
749 \param [in] mode Mode value to set
750 */
__set_mode(uint32_t mode)751 __STATIC_FORCEINLINE void __set_mode(uint32_t mode)
752 {
753 __ASM volatile("MSR cpsr_c, %0" : : "r" (mode) : "memory");
754 }
755
756 /** \brief Get Stack Pointer
757 \return Stack Pointer value
758 */
__get_SP(void)759 __STATIC_FORCEINLINE uint32_t __get_SP(void)
760 {
761 uint32_t result;
762 __ASM volatile("MOV %0, sp" : "=r" (result) : : "memory");
763 return result;
764 }
765
766 /** \brief Set Stack Pointer
767 \param [in] stack Stack Pointer value to set
768 */
__set_SP(uint32_t stack)769 __STATIC_FORCEINLINE void __set_SP(uint32_t stack)
770 {
771 __ASM volatile("MOV sp, %0" : : "r" (stack) : "memory");
772 }
773
774 /** \brief Get USR/SYS Stack Pointer
775 \return USR/SYS Stack Pointer value
776 */
__get_SP_usr(void)777 __STATIC_FORCEINLINE uint32_t __get_SP_usr(void)
778 {
779 uint32_t cpsr = __get_CPSR();
780 uint32_t result;
781 __ASM volatile(
782 "CPS #0x1F \n"
783 "MOV %0, sp " : "=r"(result) : : "memory"
784 );
785 __set_CPSR(cpsr);
786 __ISB();
787 return result;
788 }
789
790 /** \brief Set USR/SYS Stack Pointer
791 \param [in] topOfProcStack USR/SYS Stack Pointer value to set
792 */
__set_SP_usr(uint32_t topOfProcStack)793 __STATIC_FORCEINLINE void __set_SP_usr(uint32_t topOfProcStack)
794 {
795 uint32_t cpsr = __get_CPSR();
796 __ASM volatile(
797 "CPS #0x1F \n"
798 "MOV sp, %0 " : : "r" (topOfProcStack) : "memory"
799 );
800 __set_CPSR(cpsr);
801 __ISB();
802 }
803
804 /** \brief Get FPEXC
805 \return Floating Point Exception Control register value
806 */
__get_FPEXC(void)807 __STATIC_FORCEINLINE uint32_t __get_FPEXC(void)
808 {
809 #if (__FPU_PRESENT == 1)
810 uint32_t result;
811 __ASM volatile("VMRS %0, fpexc" : "=r" (result) );
812 return(result);
813 #else
814 return(0);
815 #endif
816 }
817
818 /** \brief Set FPEXC
819 \param [in] fpexc Floating Point Exception Control value to set
820 */
__set_FPEXC(uint32_t fpexc)821 __STATIC_FORCEINLINE void __set_FPEXC(uint32_t fpexc)
822 {
823 #if (__FPU_PRESENT == 1)
824 __ASM volatile ("VMSR fpexc, %0" : : "r" (fpexc) : "memory");
825 #endif
826 }
827
828 /*
829 * Include common core functions to access Coprocessor 15 registers
830 */
831
832 #define __get_CP(cp, op1, Rt, CRn, CRm, op2) __ASM volatile("MRC p" # cp ", " # op1 ", %0, c" # CRn ", c" # CRm ", " # op2 : "=r" (Rt) : : "memory" )
833 #define __set_CP(cp, op1, Rt, CRn, CRm, op2) __ASM volatile("MCR p" # cp ", " # op1 ", %0, c" # CRn ", c" # CRm ", " # op2 : : "r" (Rt) : "memory" )
834 #define __get_CP64(cp, op1, Rt, CRm) __ASM volatile("MRRC p" # cp ", " # op1 ", %Q0, %R0, c" # CRm : "=r" (Rt) : : "memory" )
835 #define __set_CP64(cp, op1, Rt, CRm) __ASM volatile("MCRR p" # cp ", " # op1 ", %Q0, %R0, c" # CRm : : "r" (Rt) : "memory" )
836
837 #include "cmsis_cp15.h"
838
839 /** \brief Enable Floating Point Unit
840
841 Critical section, called from undef handler, so systick is disabled
842 */
__FPU_Enable(void)843 __STATIC_INLINE void __FPU_Enable(void)
844 {
845 __ASM volatile(
846 //Permit access to VFP/NEON, registers by modifying CPACR
847 " MRC p15,0,R1,c1,c0,2 \n"
848 " ORR R1,R1,#0x00F00000 \n"
849 " MCR p15,0,R1,c1,c0,2 \n"
850
851 //Ensure that subsequent instructions occur in the context of VFP/NEON access permitted
852 " ISB \n"
853
854 //Enable VFP/NEON
855 " VMRS R1,FPEXC \n"
856 " ORR R1,R1,#0x40000000 \n"
857 " VMSR FPEXC,R1 \n"
858
859 //Initialise VFP/NEON registers to 0
860 " MOV R2,#0 \n"
861
862 //Initialise D16 registers to 0
863 " VMOV D0, R2,R2 \n"
864 " VMOV D1, R2,R2 \n"
865 " VMOV D2, R2,R2 \n"
866 " VMOV D3, R2,R2 \n"
867 " VMOV D4, R2,R2 \n"
868 " VMOV D5, R2,R2 \n"
869 " VMOV D6, R2,R2 \n"
870 " VMOV D7, R2,R2 \n"
871 " VMOV D8, R2,R2 \n"
872 " VMOV D9, R2,R2 \n"
873 " VMOV D10,R2,R2 \n"
874 " VMOV D11,R2,R2 \n"
875 " VMOV D12,R2,R2 \n"
876 " VMOV D13,R2,R2 \n"
877 " VMOV D14,R2,R2 \n"
878 " VMOV D15,R2,R2 \n"
879
880 #if (defined(__ARM_NEON) && (__ARM_NEON == 1))
881 //Initialise D32 registers to 0
882 " VMOV D16,R2,R2 \n"
883 " VMOV D17,R2,R2 \n"
884 " VMOV D18,R2,R2 \n"
885 " VMOV D19,R2,R2 \n"
886 " VMOV D20,R2,R2 \n"
887 " VMOV D21,R2,R2 \n"
888 " VMOV D22,R2,R2 \n"
889 " VMOV D23,R2,R2 \n"
890 " VMOV D24,R2,R2 \n"
891 " VMOV D25,R2,R2 \n"
892 " VMOV D26,R2,R2 \n"
893 " VMOV D27,R2,R2 \n"
894 " VMOV D28,R2,R2 \n"
895 " VMOV D29,R2,R2 \n"
896 " VMOV D30,R2,R2 \n"
897 " VMOV D31,R2,R2 \n"
898 #endif
899
900 //Initialise FPSCR to a known state
901 " VMRS R1,FPSCR \n"
902 " LDR R2,=0x00086060 \n" //Mask off all bits that do not have to be preserved. Non-preserved bits can/should be zero.
903 " AND R1,R1,R2 \n"
904 " VMSR FPSCR,R1 "
905 : : : "cc", "r1", "r2"
906 );
907 }
908
909 #pragma GCC diagnostic pop
910
911 #endif /* __CMSIS_GCC_H */
912