1 /**************************************************************************//**
2  * @file     cmsis_armclang.h
3  * @brief    CMSIS compiler specific macros, functions, instructions
4  * @version  V1.2.1
5  * @date     05. May 2021
6  ******************************************************************************/
7 /*
8  * Copyright (c) 2009-2021 Arm Limited. All rights reserved.
9  *
10  * SPDX-License-Identifier: Apache-2.0
11  *
12  * Licensed under the Apache License, Version 2.0 (the License); you may
13  * not use this file except in compliance with the License.
14  * You may obtain a copy of the License at
15  *
16  * www.apache.org/licenses/LICENSE-2.0
17  *
18  * Unless required by applicable law or agreed to in writing, software
19  * distributed under the License is distributed on an AS IS BASIS, WITHOUT
20  * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
21  * See the License for the specific language governing permissions and
22  * limitations under the License.
23  */
24 
25 #ifndef __CMSIS_ARMCLANG_H
26 #define __CMSIS_ARMCLANG_H
27 
28 #pragma clang system_header   /* treat file as system include file */
29 
30 /* CMSIS compiler specific defines */
31 #ifndef   __ASM
32   #define __ASM                                  __asm
33 #endif
34 #ifndef   __INLINE
35   #define __INLINE                               __inline
36 #endif
37 #ifndef   __FORCEINLINE
38   #define __FORCEINLINE                          __attribute__((always_inline))
39 #endif
40 #ifndef   __STATIC_INLINE
41   #define __STATIC_INLINE                        static __inline
42 #endif
43 #ifndef   __STATIC_FORCEINLINE
44   #define __STATIC_FORCEINLINE                   __attribute__((always_inline)) static __inline
45 #endif
46 #ifndef   __NO_RETURN
47   #define __NO_RETURN                            __attribute__((__noreturn__))
48 #endif
49 #ifndef   CMSIS_DEPRECATED
50   #define CMSIS_DEPRECATED                       __attribute__((deprecated))
51 #endif
52 #ifndef   __USED
53   #define __USED                                 __attribute__((used))
54 #endif
55 #ifndef   __WEAK
56   #define __WEAK                                 __attribute__((weak))
57 #endif
58 #ifndef   __PACKED
59   #define __PACKED                               __attribute__((packed, aligned(1)))
60 #endif
61 #ifndef   __PACKED_STRUCT
62   #define __PACKED_STRUCT                        struct __attribute__((packed, aligned(1)))
63 #endif
64 #ifndef   __UNALIGNED_UINT16_WRITE
65   #pragma clang diagnostic push
66   #pragma clang diagnostic ignored "-Wpacked"
67 /*lint -esym(9058, T_UINT16_WRITE)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT16_WRITE */
68   __PACKED_STRUCT T_UINT16_WRITE { uint16_t v; };
69   #pragma clang diagnostic pop
70   #define __UNALIGNED_UINT16_WRITE(addr, val)    (void)((((struct T_UINT16_WRITE *)(void *)(addr))->v) = (val))
71 #endif
72 #ifndef   __UNALIGNED_UINT16_READ
73   #pragma clang diagnostic push
74   #pragma clang diagnostic ignored "-Wpacked"
75 /*lint -esym(9058, T_UINT16_READ)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT16_READ */
76   __PACKED_STRUCT T_UINT16_READ { uint16_t v; };
77   #pragma clang diagnostic pop
78   #define __UNALIGNED_UINT16_READ(addr)          (((const struct T_UINT16_READ *)(const void *)(addr))->v)
79 #endif
80 #ifndef   __UNALIGNED_UINT32_WRITE
81   #pragma clang diagnostic push
82   #pragma clang diagnostic ignored "-Wpacked"
83 /*lint -esym(9058, T_UINT32_WRITE)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT32_WRITE */
84   __PACKED_STRUCT T_UINT32_WRITE { uint32_t v; };
85   #pragma clang diagnostic pop
86   #define __UNALIGNED_UINT32_WRITE(addr, val)    (void)((((struct T_UINT32_WRITE *)(void *)(addr))->v) = (val))
87 #endif
88 #ifndef   __UNALIGNED_UINT32_READ
89   #pragma clang diagnostic push
90   #pragma clang diagnostic ignored "-Wpacked"
91   __PACKED_STRUCT T_UINT32_READ { uint32_t v; };
92   #pragma clang diagnostic pop
93   #define __UNALIGNED_UINT32_READ(addr)          (((const struct T_UINT32_READ *)(const void *)(addr))->v)
94 #endif
95 #ifndef   __ALIGNED
96   #define __ALIGNED(x)                           __attribute__((aligned(x)))
97 #endif
98 #ifndef   __PACKED
99   #define __PACKED                               __attribute__((packed))
100 #endif
101 #ifndef   __COMPILER_BARRIER
102   #define __COMPILER_BARRIER()                   __ASM volatile("":::"memory")
103 #endif
104 
105 /* ##########################  Core Instruction Access  ######################### */
106 /**
107   \brief   No Operation
108  */
109 #define __NOP                             __builtin_arm_nop
110 
111 /**
112   \brief   Wait For Interrupt
113  */
114 #define __WFI                             __builtin_arm_wfi
115 
116 /**
117   \brief   Wait For Event
118  */
119 #define __WFE                             __builtin_arm_wfe
120 
121 /**
122   \brief   Send Event
123  */
124 #define __SEV                             __builtin_arm_sev
125 
126 /**
127   \brief   Instruction Synchronization Barrier
128  */
129 #define __ISB()                           __builtin_arm_isb(0xF)
130 
131 /**
132   \brief   Data Synchronization Barrier
133  */
134 #define __DSB()                           __builtin_arm_dsb(0xF)
135 
136 /**
137   \brief   Data Memory Barrier
138  */
139 #define __DMB()                           __builtin_arm_dmb(0xF)
140 
141 /**
142   \brief   Reverse byte order (32 bit)
143   \details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412.
144   \param [in]    value  Value to reverse
145   \return               Reversed value
146  */
147 #define __REV(value)   __builtin_bswap32(value)
148 
149 /**
150   \brief   Reverse byte order (16 bit)
151   \details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856.
152   \param [in]    value  Value to reverse
153   \return               Reversed value
154  */
155 #define __REV16(value) __ROR(__REV(value), 16)
156 
157 
158 /**
159   \brief   Reverse byte order (16 bit)
160   \details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000.
161   \param [in]    value  Value to reverse
162   \return               Reversed value
163  */
164 #define __REVSH(value) (int16_t)__builtin_bswap16(value)
165 
166 
167 /**
168   \brief   Rotate Right in unsigned value (32 bit)
169   \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits.
170   \param [in]    op1  Value to rotate
171   \param [in]    op2  Number of Bits to rotate
172   \return               Rotated value
173  */
__ROR(uint32_t op1,uint32_t op2)174 __STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2)
175 {
176   op2 %= 32U;
177   if (op2 == 0U)
178   {
179     return op1;
180   }
181   return (op1 >> op2) | (op1 << (32U - op2));
182 }
183 
184 
185 /**
186   \brief   Breakpoint
187   \param [in]    value  is ignored by the processor.
188                  If required, a debugger can use it to store additional information about the breakpoint.
189  */
190 #define __BKPT(value)   __ASM volatile ("bkpt "#value)
191 
192 /**
193   \brief   Reverse bit order of value
194   \param [in]    value  Value to reverse
195   \return               Reversed value
196  */
197 #define __RBIT          __builtin_arm_rbit
198 
199 /**
200   \brief   Count leading zeros
201   \param [in]  value  Value to count the leading zeros
202   \return             number of leading zeros in value
203  */
__CLZ(uint32_t value)204 __STATIC_FORCEINLINE uint8_t __CLZ(uint32_t value)
205 {
206   /* Even though __builtin_clz produces a CLZ instruction on ARM, formally
207      __builtin_clz(0) is undefined behaviour, so handle this case specially.
208      This guarantees ARM-compatible results if happening to compile on a non-ARM
209      target, and ensures the compiler doesn't decide to activate any
210      optimisations using the logic "value was passed to __builtin_clz, so it
211      is non-zero".
212      ARM Compiler 6.10 and possibly earlier will optimise this test away, leaving a
213      single CLZ instruction.
214    */
215   if (value == 0U)
216   {
217     return 32U;
218   }
219   return __builtin_clz(value);
220 }
221 
222 /**
223   \brief   LDR Exclusive (8 bit)
224   \details Executes a exclusive LDR instruction for 8 bit value.
225   \param [in]    ptr  Pointer to data
226   \return             value of type uint8_t at (*ptr)
227  */
228 #define __LDREXB        (uint8_t)__builtin_arm_ldrex
229 
230 
231 /**
232   \brief   LDR Exclusive (16 bit)
233   \details Executes a exclusive LDR instruction for 16 bit values.
234   \param [in]    ptr  Pointer to data
235   \return        value of type uint16_t at (*ptr)
236  */
237 #define __LDREXH        (uint16_t)__builtin_arm_ldrex
238 
239 /**
240   \brief   LDR Exclusive (32 bit)
241   \details Executes a exclusive LDR instruction for 32 bit values.
242   \param [in]    ptr  Pointer to data
243   \return        value of type uint32_t at (*ptr)
244  */
245 #define __LDREXW        (uint32_t)__builtin_arm_ldrex
246 
247 /**
248   \brief   STR Exclusive (8 bit)
249   \details Executes a exclusive STR instruction for 8 bit values.
250   \param [in]  value  Value to store
251   \param [in]    ptr  Pointer to location
252   \return          0  Function succeeded
253   \return          1  Function failed
254  */
255 #define __STREXB        (uint32_t)__builtin_arm_strex
256 
257 /**
258   \brief   STR Exclusive (16 bit)
259   \details Executes a exclusive STR instruction for 16 bit values.
260   \param [in]  value  Value to store
261   \param [in]    ptr  Pointer to location
262   \return          0  Function succeeded
263   \return          1  Function failed
264  */
265 #define __STREXH        (uint32_t)__builtin_arm_strex
266 
267 /**
268   \brief   STR Exclusive (32 bit)
269   \details Executes a exclusive STR instruction for 32 bit values.
270   \param [in]  value  Value to store
271   \param [in]    ptr  Pointer to location
272   \return          0  Function succeeded
273   \return          1  Function failed
274  */
275 #define __STREXW        (uint32_t)__builtin_arm_strex
276 
277 /**
278   \brief   Remove the exclusive lock
279   \details Removes the exclusive lock which is created by LDREX.
280  */
281 #define __CLREX             __builtin_arm_clrex
282 
283 /**
284   \brief   Signed Saturate
285   \details Saturates a signed value.
286   \param [in]  value  Value to be saturated
287   \param [in]    sat  Bit position to saturate to (1..32)
288   \return             Saturated value
289  */
290 #define __SSAT             __builtin_arm_ssat
291 
292 /**
293   \brief   Unsigned Saturate
294   \details Saturates an unsigned value.
295   \param [in]  value  Value to be saturated
296   \param [in]    sat  Bit position to saturate to (0..31)
297   \return             Saturated value
298  */
299 #define __USAT             __builtin_arm_usat
300 
301 /* ###################  Compiler specific Intrinsics  ########################### */
302 /** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics
303   Access to dedicated SIMD instructions
304   @{
305 */
306 
307 #if (defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1))
308 
309 #define     __SADD8                 __builtin_arm_sadd8
310 #define     __SADD16                __builtin_arm_sadd16
311 #define     __QADD8                 __builtin_arm_qadd8
312 #define     __QSUB8                 __builtin_arm_qsub8
313 #define     __QADD16                __builtin_arm_qadd16
314 #define     __SHADD16               __builtin_arm_shadd16
315 #define     __QSUB16                __builtin_arm_qsub16
316 #define     __SHSUB16               __builtin_arm_shsub16
317 #define     __QASX                  __builtin_arm_qasx
318 #define     __SHASX                 __builtin_arm_shasx
319 #define     __QSAX                  __builtin_arm_qsax
320 #define     __SHSAX                 __builtin_arm_shsax
321 #define     __SXTB16                __builtin_arm_sxtb16
322 #define     __SMUAD                 __builtin_arm_smuad
323 #define     __SMUADX                __builtin_arm_smuadx
324 #define     __SMLAD                 __builtin_arm_smlad
325 #define     __SMLADX                __builtin_arm_smladx
326 #define     __SMLALD                __builtin_arm_smlald
327 #define     __SMLALDX               __builtin_arm_smlaldx
328 #define     __SMUSD                 __builtin_arm_smusd
329 #define     __SMUSDX                __builtin_arm_smusdx
330 #define     __SMLSDX                __builtin_arm_smlsdx
331 #define     __USAT16                __builtin_arm_usat16
332 #define     __SSUB8                 __builtin_arm_ssub8
333 #define     __SXTB16                __builtin_arm_sxtb16
334 #define     __SXTAB16               __builtin_arm_sxtab16
335 
336 
__QADD(int32_t op1,int32_t op2)337 __STATIC_FORCEINLINE  int32_t __QADD( int32_t op1,  int32_t op2)
338 {
339   int32_t result;
340 
341   __ASM volatile ("qadd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
342   return(result);
343 }
344 
__QSUB(int32_t op1,int32_t op2)345 __STATIC_FORCEINLINE  int32_t __QSUB( int32_t op1,  int32_t op2)
346 {
347   int32_t result;
348 
349   __ASM volatile ("qsub %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
350   return(result);
351 }
352 
353 #define __PKHBT(ARG1,ARG2,ARG3)          ( ((((uint32_t)(ARG1))          ) & 0x0000FFFFUL) |  \
354                                            ((((uint32_t)(ARG2)) << (ARG3)) & 0xFFFF0000UL)  )
355 
356 #define __PKHTB(ARG1,ARG2,ARG3)          ( ((((uint32_t)(ARG1))          ) & 0xFFFF0000UL) |  \
357                                            ((((uint32_t)(ARG2)) >> (ARG3)) & 0x0000FFFFUL)  )
358 
__SMMLA(int32_t op1,int32_t op2,int32_t op3)359 __STATIC_FORCEINLINE int32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3)
360 {
361   int32_t result;
362 
363   __ASM volatile ("smmla %0, %1, %2, %3" : "=r" (result): "r"  (op1), "r" (op2), "r" (op3) );
364   return(result);
365 }
366 
367 #endif /* (__ARM_FEATURE_DSP == 1) */
368 
369 /* ###########################  Core Function Access  ########################### */
370 
371 /**
372   \brief   Enable IRQ Interrupts
373   \details Enables IRQ interrupts by clearing the I-bit in the CPSR.
374            Can only be executed in Privileged modes.
375  */
__enable_irq(void)376 __STATIC_FORCEINLINE void __enable_irq(void)
377 {
378   __ASM volatile ("cpsie i" : : : "memory");
379 }
380 
381 /**
382   \brief   Disable IRQ Interrupts
383   \details Disables IRQ interrupts by setting the I-bit in the CPSR.
384   Can only be executed in Privileged modes.
385  */
__disable_irq(void)386 __STATIC_FORCEINLINE void __disable_irq(void)
387 {
388   __ASM volatile ("cpsid i" : : : "memory");
389 }
390 
391 /**
392   \brief   Enable FIQ
393   \details Enables FIQ interrupts by clearing the F-bit in the CPSR.
394            Can only be executed in Privileged modes.
395  */
__enable_fault_irq(void)396 __STATIC_FORCEINLINE void __enable_fault_irq(void)
397 {
398   __ASM volatile ("cpsie f" : : : "memory");
399 }
400 
401 /**
402   \brief   Disable FIQ
403   \details Disables FIQ interrupts by setting the F-bit in the CPSR.
404            Can only be executed in Privileged modes.
405  */
__disable_fault_irq(void)406 __STATIC_FORCEINLINE void __disable_fault_irq(void)
407 {
408   __ASM volatile ("cpsid f" : : : "memory");
409 }
410 
411 /**
412   \brief   Get FPSCR
413   \details Returns the current value of the Floating Point Status/Control register.
414   \return               Floating Point Status/Control register value
415  */
416 #define __get_FPSCR      __builtin_arm_get_fpscr
417 
418 /**
419   \brief   Set FPSCR
420   \details Assigns the given value to the Floating Point Status/Control register.
421   \param [in]    fpscr  Floating Point Status/Control value to set
422  */
423 #define __set_FPSCR      __builtin_arm_set_fpscr
424 
425 /** \brief  Get CPSR Register
426     \return               CPSR Register value
427  */
__get_CPSR(void)428 __STATIC_FORCEINLINE uint32_t __get_CPSR(void)
429 {
430   uint32_t result;
431   __ASM volatile("MRS %0, cpsr" : "=r" (result) );
432   return(result);
433 }
434 
435 /** \brief  Set CPSR Register
436     \param [in]    cpsr  CPSR value to set
437  */
__set_CPSR(uint32_t cpsr)438 __STATIC_FORCEINLINE void __set_CPSR(uint32_t cpsr)
439 {
440   __ASM volatile ("MSR cpsr, %0" : : "r" (cpsr) : "cc", "memory");
441 }
442 
443 /** \brief  Get Mode
444     \return                Processor Mode
445  */
__get_mode(void)446 __STATIC_FORCEINLINE uint32_t __get_mode(void)
447 {
448   return (__get_CPSR() & 0x1FU);
449 }
450 
451 /** \brief  Set Mode
452     \param [in]    mode  Mode value to set
453  */
__set_mode(uint32_t mode)454 __STATIC_FORCEINLINE void __set_mode(uint32_t mode)
455 {
456   __ASM volatile("MSR  cpsr_c, %0" : : "r" (mode) : "memory");
457 }
458 
459 /** \brief  Get Stack Pointer
460     \return Stack Pointer value
461  */
__get_SP(void)462 __STATIC_FORCEINLINE uint32_t __get_SP(void)
463 {
464   uint32_t result;
465   __ASM volatile("MOV  %0, sp" : "=r" (result) : : "memory");
466   return result;
467 }
468 
469 /** \brief  Set Stack Pointer
470     \param [in]    stack  Stack Pointer value to set
471  */
__set_SP(uint32_t stack)472 __STATIC_FORCEINLINE void __set_SP(uint32_t stack)
473 {
474   __ASM volatile("MOV  sp, %0" : : "r" (stack) : "memory");
475 }
476 
477 /** \brief  Get USR/SYS Stack Pointer
478     \return USR/SYS Stack Pointer value
479  */
__get_SP_usr(void)480 __STATIC_FORCEINLINE uint32_t __get_SP_usr(void)
481 {
482   uint32_t cpsr;
483   uint32_t result;
484   __ASM volatile(
485     "MRS     %0, cpsr   \n"
486     "CPS     #0x1F      \n" // no effect in USR mode
487     "MOV     %1, sp     \n"
488     "MSR     cpsr_c, %0 \n" // no effect in USR mode
489     "ISB" :  "=r"(cpsr), "=r"(result) : : "memory"
490    );
491   return result;
492 }
493 
494 /** \brief  Set USR/SYS Stack Pointer
495     \param [in]    topOfProcStack  USR/SYS Stack Pointer value to set
496  */
__set_SP_usr(uint32_t topOfProcStack)497 __STATIC_FORCEINLINE void __set_SP_usr(uint32_t topOfProcStack)
498 {
499   uint32_t cpsr;
500   __ASM volatile(
501     "MRS     %0, cpsr   \n"
502     "CPS     #0x1F      \n" // no effect in USR mode
503     "MOV     sp, %1     \n"
504     "MSR     cpsr_c, %0 \n" // no effect in USR mode
505     "ISB" : "=r"(cpsr) : "r" (topOfProcStack) : "memory"
506    );
507 }
508 
509 /** \brief  Get FPEXC
510     \return               Floating Point Exception Control register value
511  */
__get_FPEXC(void)512 __STATIC_FORCEINLINE uint32_t __get_FPEXC(void)
513 {
514 #if (__FPU_PRESENT == 1)
515   uint32_t result;
516   __ASM volatile("VMRS %0, fpexc" : "=r" (result) : : "memory");
517   return(result);
518 #else
519   return(0);
520 #endif
521 }
522 
523 /** \brief  Set FPEXC
524     \param [in]    fpexc  Floating Point Exception Control value to set
525  */
__set_FPEXC(uint32_t fpexc)526 __STATIC_FORCEINLINE void __set_FPEXC(uint32_t fpexc)
527 {
528 #if (__FPU_PRESENT == 1)
529   __ASM volatile ("VMSR fpexc, %0" : : "r" (fpexc) : "memory");
530 #endif
531 }
532 
533 /*
534  * Include common core functions to access Coprocessor 15 registers
535  */
536 
537 #define __get_CP(cp, op1, Rt, CRn, CRm, op2) __ASM volatile("MRC p" # cp ", " # op1 ", %0, c" # CRn ", c" # CRm ", " # op2 : "=r" (Rt) : : "memory" )
538 #define __set_CP(cp, op1, Rt, CRn, CRm, op2) __ASM volatile("MCR p" # cp ", " # op1 ", %0, c" # CRn ", c" # CRm ", " # op2 : : "r" (Rt) : "memory" )
539 #define __get_CP64(cp, op1, Rt, CRm)         __ASM volatile("MRRC p" # cp ", " # op1 ", %Q0, %R0, c" # CRm  : "=r" (Rt) : : "memory" )
540 #define __set_CP64(cp, op1, Rt, CRm)         __ASM volatile("MCRR p" # cp ", " # op1 ", %Q0, %R0, c" # CRm  : : "r" (Rt) : "memory" )
541 
542 #include "cmsis_cp15.h"
543 
544 /** \brief  Enable Floating Point Unit
545 
546   Critical section, called from undef handler, so systick is disabled
547  */
__FPU_Enable(void)548 __STATIC_INLINE void __FPU_Enable(void)
549 {
550   __ASM volatile(
551     //Permit access to VFP/NEON, registers by modifying CPACR
552     "        MRC     p15,0,R1,c1,c0,2  \n"
553     "        ORR     R1,R1,#0x00F00000 \n"
554     "        MCR     p15,0,R1,c1,c0,2  \n"
555 
556     //Ensure that subsequent instructions occur in the context of VFP/NEON access permitted
557     "        ISB                       \n"
558 
559     //Enable VFP/NEON
560     "        VMRS    R1,FPEXC          \n"
561     "        ORR     R1,R1,#0x40000000 \n"
562     "        VMSR    FPEXC,R1          \n"
563 
564     //Initialise VFP/NEON registers to 0
565     "        MOV     R2,#0             \n"
566 
567     //Initialise D16 registers to 0
568     "        VMOV    D0, R2,R2         \n"
569     "        VMOV    D1, R2,R2         \n"
570     "        VMOV    D2, R2,R2         \n"
571     "        VMOV    D3, R2,R2         \n"
572     "        VMOV    D4, R2,R2         \n"
573     "        VMOV    D5, R2,R2         \n"
574     "        VMOV    D6, R2,R2         \n"
575     "        VMOV    D7, R2,R2         \n"
576     "        VMOV    D8, R2,R2         \n"
577     "        VMOV    D9, R2,R2         \n"
578     "        VMOV    D10,R2,R2         \n"
579     "        VMOV    D11,R2,R2         \n"
580     "        VMOV    D12,R2,R2         \n"
581     "        VMOV    D13,R2,R2         \n"
582     "        VMOV    D14,R2,R2         \n"
583     "        VMOV    D15,R2,R2         \n"
584 
585 #if (defined(__ARM_NEON) && (__ARM_NEON == 1))
586     //Initialise D32 registers to 0
587     "        VMOV    D16,R2,R2         \n"
588     "        VMOV    D17,R2,R2         \n"
589     "        VMOV    D18,R2,R2         \n"
590     "        VMOV    D19,R2,R2         \n"
591     "        VMOV    D20,R2,R2         \n"
592     "        VMOV    D21,R2,R2         \n"
593     "        VMOV    D22,R2,R2         \n"
594     "        VMOV    D23,R2,R2         \n"
595     "        VMOV    D24,R2,R2         \n"
596     "        VMOV    D25,R2,R2         \n"
597     "        VMOV    D26,R2,R2         \n"
598     "        VMOV    D27,R2,R2         \n"
599     "        VMOV    D28,R2,R2         \n"
600     "        VMOV    D29,R2,R2         \n"
601     "        VMOV    D30,R2,R2         \n"
602     "        VMOV    D31,R2,R2         \n"
603 #endif
604 
605     //Initialise FPSCR to a known state
606     "        VMRS    R1,FPSCR          \n"
607     "        LDR     R2,=0x00086060    \n" //Mask off all bits that do not have to be preserved. Non-preserved bits can/should be zero.
608     "        AND     R1,R1,R2          \n"
609     "        VMSR    FPSCR,R1            "
610     : : : "cc", "r1", "r2"
611   );
612 }
613 
614 #endif /* __CMSIS_ARMCLANG_H */
615