1 /**************************************************************************//**
2 * @file cmsis_gcc.h
3 * @brief CMSIS compiler GCC header file
4 * @version V5.3.0
5 * @date 26. March 2020
6 ******************************************************************************/
7 /*
8 * Copyright (c) 2009-2020 Arm Limited. All rights reserved.
9 *
10 * SPDX-License-Identifier: Apache-2.0
11 *
12 * Licensed under the Apache License, Version 2.0 (the License); you may
13 * not use this file except in compliance with the License.
14 * You may obtain a copy of the License at
15 *
16 * www.apache.org/licenses/LICENSE-2.0
17 *
18 * Unless required by applicable law or agreed to in writing, software
19 * distributed under the License is distributed on an AS IS BASIS, WITHOUT
20 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
21 * See the License for the specific language governing permissions and
22 * limitations under the License.
23 */
24
25 #ifndef __CMSIS_GCC_H
26 #define __CMSIS_GCC_H
27
28 /* ignore some GCC warnings */
29 #pragma GCC diagnostic push
30 #pragma GCC diagnostic ignored "-Wsign-conversion"
31 #pragma GCC diagnostic ignored "-Wconversion"
32 #pragma GCC diagnostic ignored "-Wunused-parameter"
33
34 /* Fallback for __has_builtin */
35 #ifndef __has_builtin
36 #define __has_builtin(x) (0)
37 #endif
38
39 /* CMSIS compiler specific defines */
40 #ifndef __ASM
41 #define __ASM __asm
42 #endif
43 #ifndef __INLINE
44 #define __INLINE inline
45 #endif
46 #ifndef __STATIC_INLINE
47 #define __STATIC_INLINE static inline
48 #endif
49 #ifndef __STATIC_FORCEINLINE
50 #define __STATIC_FORCEINLINE __attribute__((always_inline)) static inline
51 #endif
52 #ifndef __NO_RETURN
53 #define __NO_RETURN __attribute__((__noreturn__))
54 #endif
55 #ifndef __USED
56 #define __USED __attribute__((used))
57 #endif
58 #ifndef __WEAK
59 #define __WEAK __attribute__((weak))
60 #endif
61 #ifndef __PACKED
62 #define __PACKED __attribute__((packed, aligned(1)))
63 #endif
64 #ifndef __PACKED_STRUCT
65 #define __PACKED_STRUCT struct __attribute__((packed, aligned(1)))
66 #endif
67 #ifndef __PACKED_UNION
68 #define __PACKED_UNION union __attribute__((packed, aligned(1)))
69 #endif
70 #ifndef __UNALIGNED_UINT32 /* deprecated */
71 #pragma GCC diagnostic push
72 #pragma GCC diagnostic ignored "-Wpacked"
73 #pragma GCC diagnostic ignored "-Wattributes"
74 struct __attribute__((packed)) T_UINT32 { uint32_t v; };
75 #pragma GCC diagnostic pop
76 #define __UNALIGNED_UINT32(x) (((struct T_UINT32 *)(x))->v)
77 #endif
78 #ifndef __UNALIGNED_UINT16_WRITE
79 #pragma GCC diagnostic push
80 #pragma GCC diagnostic ignored "-Wpacked"
81 #pragma GCC diagnostic ignored "-Wattributes"
82 __PACKED_STRUCT T_UINT16_WRITE { uint16_t v; };
83 #pragma GCC diagnostic pop
84 #define __UNALIGNED_UINT16_WRITE(addr, val) (void)((((struct T_UINT16_WRITE *)(void *)(addr))->v) = (val))
85 #endif
86 #ifndef __UNALIGNED_UINT16_READ
87 #pragma GCC diagnostic push
88 #pragma GCC diagnostic ignored "-Wpacked"
89 #pragma GCC diagnostic ignored "-Wattributes"
90 __PACKED_STRUCT T_UINT16_READ { uint16_t v; };
91 #pragma GCC diagnostic pop
92 #define __UNALIGNED_UINT16_READ(addr) (((const struct T_UINT16_READ *)(const void *)(addr))->v)
93 #endif
94 #ifndef __UNALIGNED_UINT32_WRITE
95 #pragma GCC diagnostic push
96 #pragma GCC diagnostic ignored "-Wpacked"
97 #pragma GCC diagnostic ignored "-Wattributes"
98 __PACKED_STRUCT T_UINT32_WRITE { uint32_t v; };
99 #pragma GCC diagnostic pop
100 #define __UNALIGNED_UINT32_WRITE(addr, val) (void)((((struct T_UINT32_WRITE *)(void *)(addr))->v) = (val))
101 #endif
102 #ifndef __UNALIGNED_UINT32_READ
103 #pragma GCC diagnostic push
104 #pragma GCC diagnostic ignored "-Wpacked"
105 #pragma GCC diagnostic ignored "-Wattributes"
106 __PACKED_STRUCT T_UINT32_READ { uint32_t v; };
107 #pragma GCC diagnostic pop
108 #define __UNALIGNED_UINT32_READ(addr) (((const struct T_UINT32_READ *)(const void *)(addr))->v)
109 #endif
110 #ifndef __ALIGNED
111 #define __ALIGNED(x) __attribute__((aligned(x)))
112 #endif
113 #ifndef __RESTRICT
114 #define __RESTRICT __restrict
115 #endif
116 #ifndef __COMPILER_BARRIER
117 #define __COMPILER_BARRIER() __ASM volatile("":::"memory")
118 #endif
119
120 /* ######################### Startup and Lowlevel Init ######################## */
121
122 #ifndef __PROGRAM_START
123
124 /**
125 \brief Initializes data and bss sections
126 \details This default implementations initialized all data and additional bss
127 sections relying on .copy.table and .zero.table specified properly
128 in the used linker script.
129
130 */
__cmsis_start(void)131 __STATIC_FORCEINLINE __NO_RETURN void __cmsis_start(void)
132 {
133 extern void _start(void) __NO_RETURN;
134
135 typedef struct {
136 uint32_t const* src;
137 uint32_t* dest;
138 uint32_t wlen;
139 } __copy_table_t;
140
141 typedef struct {
142 uint32_t* dest;
143 uint32_t wlen;
144 } __zero_table_t;
145
146 extern const __copy_table_t __copy_table_start__;
147 extern const __copy_table_t __copy_table_end__;
148 extern const __zero_table_t __zero_table_start__;
149 extern const __zero_table_t __zero_table_end__;
150
151 for (__copy_table_t const* pTable = &__copy_table_start__; pTable < &__copy_table_end__; ++pTable) {
152 for(uint32_t i=0u; i<pTable->wlen; ++i) {
153 pTable->dest[i] = pTable->src[i];
154 }
155 }
156
157 for (__zero_table_t const* pTable = &__zero_table_start__; pTable < &__zero_table_end__; ++pTable) {
158 for(uint32_t i=0u; i<pTable->wlen; ++i) {
159 pTable->dest[i] = 0u;
160 }
161 }
162
163 _start();
164 }
165
166 #define __PROGRAM_START __cmsis_start
167 #endif
168
169 #ifndef __INITIAL_SP
170 #define __INITIAL_SP __StackTop
171 #endif
172
173 #ifndef __STACK_LIMIT
174 #define __STACK_LIMIT __StackLimit
175 #endif
176
177 #ifndef __VECTOR_TABLE
178 #define __VECTOR_TABLE __Vectors
179 #endif
180
181 #ifndef __VECTOR_TABLE_ATTRIBUTE
182 #define __VECTOR_TABLE_ATTRIBUTE __attribute__((used, section(".vectors")))
183 #endif
184
185 /* ########################### Core Function Access ########################### */
186 /** \ingroup CMSIS_Core_FunctionInterface
187 \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions
188 @{
189 */
190
191 /**
192 \brief Enable IRQ Interrupts
193 \details Enables IRQ interrupts by clearing the I-bit in the CPSR.
194 Can only be executed in Privileged modes.
195 */
__enable_irq(void)196 __STATIC_FORCEINLINE void __enable_irq(void)
197 {
198 __ASM volatile ("cpsie i" : : : "memory");
199 }
200
201
202 /**
203 \brief Disable IRQ Interrupts
204 \details Disables IRQ interrupts by setting the I-bit in the CPSR.
205 Can only be executed in Privileged modes.
206 */
__disable_irq(void)207 __STATIC_FORCEINLINE void __disable_irq(void)
208 {
209 __ASM volatile ("cpsid i" : : : "memory");
210 }
211
212
213 /**
214 \brief Get Control Register
215 \details Returns the content of the Control Register.
216 \return Control Register value
217 */
__get_CONTROL(void)218 __STATIC_FORCEINLINE uint32_t __get_CONTROL(void)
219 {
220 uint32_t result;
221
222 __ASM volatile ("MRS %0, control" : "=r" (result) );
223 return(result);
224 }
225
226
227 #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
228 /**
229 \brief Get Control Register (non-secure)
230 \details Returns the content of the non-secure Control Register when in secure mode.
231 \return non-secure Control Register value
232 */
__TZ_get_CONTROL_NS(void)233 __STATIC_FORCEINLINE uint32_t __TZ_get_CONTROL_NS(void)
234 {
235 uint32_t result;
236
237 __ASM volatile ("MRS %0, control_ns" : "=r" (result) );
238 return(result);
239 }
240 #endif
241
242
243 /**
244 \brief Set Control Register
245 \details Writes the given value to the Control Register.
246 \param [in] control Control Register value to set
247 */
__set_CONTROL(uint32_t control)248 __STATIC_FORCEINLINE void __set_CONTROL(uint32_t control)
249 {
250 __ASM volatile ("MSR control, %0" : : "r" (control) : "memory");
251 }
252
253
254 #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
255 /**
256 \brief Set Control Register (non-secure)
257 \details Writes the given value to the non-secure Control Register when in secure state.
258 \param [in] control Control Register value to set
259 */
__TZ_set_CONTROL_NS(uint32_t control)260 __STATIC_FORCEINLINE void __TZ_set_CONTROL_NS(uint32_t control)
261 {
262 __ASM volatile ("MSR control_ns, %0" : : "r" (control) : "memory");
263 }
264 #endif
265
266
267 /**
268 \brief Get IPSR Register
269 \details Returns the content of the IPSR Register.
270 \return IPSR Register value
271 */
__get_IPSR(void)272 __STATIC_FORCEINLINE uint32_t __get_IPSR(void)
273 {
274 uint32_t result;
275
276 __ASM volatile ("MRS %0, ipsr" : "=r" (result) );
277 return(result);
278 }
279
280
281 /**
282 \brief Get APSR Register
283 \details Returns the content of the APSR Register.
284 \return APSR Register value
285 */
__get_APSR(void)286 __STATIC_FORCEINLINE uint32_t __get_APSR(void)
287 {
288 uint32_t result;
289
290 __ASM volatile ("MRS %0, apsr" : "=r" (result) );
291 return(result);
292 }
293
294
295 /**
296 \brief Get xPSR Register
297 \details Returns the content of the xPSR Register.
298 \return xPSR Register value
299 */
__get_xPSR(void)300 __STATIC_FORCEINLINE uint32_t __get_xPSR(void)
301 {
302 uint32_t result;
303
304 __ASM volatile ("MRS %0, xpsr" : "=r" (result) );
305 return(result);
306 }
307
308
309 /**
310 \brief Get Process Stack Pointer
311 \details Returns the current value of the Process Stack Pointer (PSP).
312 \return PSP Register value
313 */
__get_PSP(void)314 __STATIC_FORCEINLINE uint32_t __get_PSP(void)
315 {
316 uint32_t result;
317
318 __ASM volatile ("MRS %0, psp" : "=r" (result) );
319 return(result);
320 }
321
322
323 #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
324 /**
325 \brief Get Process Stack Pointer (non-secure)
326 \details Returns the current value of the non-secure Process Stack Pointer (PSP) when in secure state.
327 \return PSP Register value
328 */
__TZ_get_PSP_NS(void)329 __STATIC_FORCEINLINE uint32_t __TZ_get_PSP_NS(void)
330 {
331 uint32_t result;
332
333 __ASM volatile ("MRS %0, psp_ns" : "=r" (result) );
334 return(result);
335 }
336 #endif
337
338
339 /**
340 \brief Set Process Stack Pointer
341 \details Assigns the given value to the Process Stack Pointer (PSP).
342 \param [in] topOfProcStack Process Stack Pointer value to set
343 */
__set_PSP(uint32_t topOfProcStack)344 __STATIC_FORCEINLINE void __set_PSP(uint32_t topOfProcStack)
345 {
346 __ASM volatile ("MSR psp, %0" : : "r" (topOfProcStack) : );
347 }
348
349
350 #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
351 /**
352 \brief Set Process Stack Pointer (non-secure)
353 \details Assigns the given value to the non-secure Process Stack Pointer (PSP) when in secure state.
354 \param [in] topOfProcStack Process Stack Pointer value to set
355 */
__TZ_set_PSP_NS(uint32_t topOfProcStack)356 __STATIC_FORCEINLINE void __TZ_set_PSP_NS(uint32_t topOfProcStack)
357 {
358 __ASM volatile ("MSR psp_ns, %0" : : "r" (topOfProcStack) : );
359 }
360 #endif
361
362
363 /**
364 \brief Get Main Stack Pointer
365 \details Returns the current value of the Main Stack Pointer (MSP).
366 \return MSP Register value
367 */
__get_MSP(void)368 __STATIC_FORCEINLINE uint32_t __get_MSP(void)
369 {
370 uint32_t result;
371
372 __ASM volatile ("MRS %0, msp" : "=r" (result) );
373 return(result);
374 }
375
376
377 #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
378 /**
379 \brief Get Main Stack Pointer (non-secure)
380 \details Returns the current value of the non-secure Main Stack Pointer (MSP) when in secure state.
381 \return MSP Register value
382 */
__TZ_get_MSP_NS(void)383 __STATIC_FORCEINLINE uint32_t __TZ_get_MSP_NS(void)
384 {
385 uint32_t result;
386
387 __ASM volatile ("MRS %0, msp_ns" : "=r" (result) );
388 return(result);
389 }
390 #endif
391
392
393 /**
394 \brief Set Main Stack Pointer
395 \details Assigns the given value to the Main Stack Pointer (MSP).
396 \param [in] topOfMainStack Main Stack Pointer value to set
397 */
__set_MSP(uint32_t topOfMainStack)398 __STATIC_FORCEINLINE void __set_MSP(uint32_t topOfMainStack)
399 {
400 __ASM volatile ("MSR msp, %0" : : "r" (topOfMainStack) : );
401 }
402
403
404 #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
405 /**
406 \brief Set Main Stack Pointer (non-secure)
407 \details Assigns the given value to the non-secure Main Stack Pointer (MSP) when in secure state.
408 \param [in] topOfMainStack Main Stack Pointer value to set
409 */
__TZ_set_MSP_NS(uint32_t topOfMainStack)410 __STATIC_FORCEINLINE void __TZ_set_MSP_NS(uint32_t topOfMainStack)
411 {
412 __ASM volatile ("MSR msp_ns, %0" : : "r" (topOfMainStack) : );
413 }
414 #endif
415
416
417 #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
418 /**
419 \brief Get Stack Pointer (non-secure)
420 \details Returns the current value of the non-secure Stack Pointer (SP) when in secure state.
421 \return SP Register value
422 */
__TZ_get_SP_NS(void)423 __STATIC_FORCEINLINE uint32_t __TZ_get_SP_NS(void)
424 {
425 uint32_t result;
426
427 __ASM volatile ("MRS %0, sp_ns" : "=r" (result) );
428 return(result);
429 }
430
431
432 /**
433 \brief Set Stack Pointer (non-secure)
434 \details Assigns the given value to the non-secure Stack Pointer (SP) when in secure state.
435 \param [in] topOfStack Stack Pointer value to set
436 */
__TZ_set_SP_NS(uint32_t topOfStack)437 __STATIC_FORCEINLINE void __TZ_set_SP_NS(uint32_t topOfStack)
438 {
439 __ASM volatile ("MSR sp_ns, %0" : : "r" (topOfStack) : );
440 }
441 #endif
442
443
444 /**
445 \brief Get Priority Mask
446 \details Returns the current state of the priority mask bit from the Priority Mask Register.
447 \return Priority Mask value
448 */
__get_PRIMASK(void)449 __STATIC_FORCEINLINE uint32_t __get_PRIMASK(void)
450 {
451 uint32_t result;
452
453 __ASM volatile ("MRS %0, primask" : "=r" (result) );
454 return(result);
455 }
456
457
458 #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
459 /**
460 \brief Get Priority Mask (non-secure)
461 \details Returns the current state of the non-secure priority mask bit from the Priority Mask Register when in secure state.
462 \return Priority Mask value
463 */
__TZ_get_PRIMASK_NS(void)464 __STATIC_FORCEINLINE uint32_t __TZ_get_PRIMASK_NS(void)
465 {
466 uint32_t result;
467
468 __ASM volatile ("MRS %0, primask_ns" : "=r" (result) );
469 return(result);
470 }
471 #endif
472
473
474 /**
475 \brief Set Priority Mask
476 \details Assigns the given value to the Priority Mask Register.
477 \param [in] priMask Priority Mask
478 */
__set_PRIMASK(uint32_t priMask)479 __STATIC_FORCEINLINE void __set_PRIMASK(uint32_t priMask)
480 {
481 __ASM volatile ("MSR primask, %0" : : "r" (priMask) : "memory");
482 }
483
484
485 #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
486 /**
487 \brief Set Priority Mask (non-secure)
488 \details Assigns the given value to the non-secure Priority Mask Register when in secure state.
489 \param [in] priMask Priority Mask
490 */
__TZ_set_PRIMASK_NS(uint32_t priMask)491 __STATIC_FORCEINLINE void __TZ_set_PRIMASK_NS(uint32_t priMask)
492 {
493 __ASM volatile ("MSR primask_ns, %0" : : "r" (priMask) : "memory");
494 }
495 #endif
496
497
498 #if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \
499 (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \
500 (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) )
501 /**
502 \brief Enable FIQ
503 \details Enables FIQ interrupts by clearing the F-bit in the CPSR.
504 Can only be executed in Privileged modes.
505 */
__enable_fault_irq(void)506 __STATIC_FORCEINLINE void __enable_fault_irq(void)
507 {
508 __ASM volatile ("cpsie f" : : : "memory");
509 }
510
511
512 /**
513 \brief Disable FIQ
514 \details Disables FIQ interrupts by setting the F-bit in the CPSR.
515 Can only be executed in Privileged modes.
516 */
__disable_fault_irq(void)517 __STATIC_FORCEINLINE void __disable_fault_irq(void)
518 {
519 __ASM volatile ("cpsid f" : : : "memory");
520 }
521
522
523 /**
524 \brief Get Base Priority
525 \details Returns the current value of the Base Priority register.
526 \return Base Priority register value
527 */
__get_BASEPRI(void)528 __STATIC_FORCEINLINE uint32_t __get_BASEPRI(void)
529 {
530 uint32_t result;
531
532 __ASM volatile ("MRS %0, basepri" : "=r" (result) );
533 return(result);
534 }
535
536
537 #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
538 /**
539 \brief Get Base Priority (non-secure)
540 \details Returns the current value of the non-secure Base Priority register when in secure state.
541 \return Base Priority register value
542 */
__TZ_get_BASEPRI_NS(void)543 __STATIC_FORCEINLINE uint32_t __TZ_get_BASEPRI_NS(void)
544 {
545 uint32_t result;
546
547 __ASM volatile ("MRS %0, basepri_ns" : "=r" (result) );
548 return(result);
549 }
550 #endif
551
552
553 /**
554 \brief Set Base Priority
555 \details Assigns the given value to the Base Priority register.
556 \param [in] basePri Base Priority value to set
557 */
__set_BASEPRI(uint32_t basePri)558 __STATIC_FORCEINLINE void __set_BASEPRI(uint32_t basePri)
559 {
560 __ASM volatile ("MSR basepri, %0" : : "r" (basePri) : "memory");
561 }
562
563
564 #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
565 /**
566 \brief Set Base Priority (non-secure)
567 \details Assigns the given value to the non-secure Base Priority register when in secure state.
568 \param [in] basePri Base Priority value to set
569 */
__TZ_set_BASEPRI_NS(uint32_t basePri)570 __STATIC_FORCEINLINE void __TZ_set_BASEPRI_NS(uint32_t basePri)
571 {
572 __ASM volatile ("MSR basepri_ns, %0" : : "r" (basePri) : "memory");
573 }
574 #endif
575
576
577 /**
578 \brief Set Base Priority with condition
579 \details Assigns the given value to the Base Priority register only if BASEPRI masking is disabled,
580 or the new value increases the BASEPRI priority level.
581 \param [in] basePri Base Priority value to set
582 */
__set_BASEPRI_MAX(uint32_t basePri)583 __STATIC_FORCEINLINE void __set_BASEPRI_MAX(uint32_t basePri)
584 {
585 __ASM volatile ("MSR basepri_max, %0" : : "r" (basePri) : "memory");
586 }
587
588
589 /**
590 \brief Get Fault Mask
591 \details Returns the current value of the Fault Mask register.
592 \return Fault Mask register value
593 */
__get_FAULTMASK(void)594 __STATIC_FORCEINLINE uint32_t __get_FAULTMASK(void)
595 {
596 uint32_t result;
597
598 __ASM volatile ("MRS %0, faultmask" : "=r" (result) );
599 return(result);
600 }
601
602
603 #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
604 /**
605 \brief Get Fault Mask (non-secure)
606 \details Returns the current value of the non-secure Fault Mask register when in secure state.
607 \return Fault Mask register value
608 */
__TZ_get_FAULTMASK_NS(void)609 __STATIC_FORCEINLINE uint32_t __TZ_get_FAULTMASK_NS(void)
610 {
611 uint32_t result;
612
613 __ASM volatile ("MRS %0, faultmask_ns" : "=r" (result) );
614 return(result);
615 }
616 #endif
617
618
619 /**
620 \brief Set Fault Mask
621 \details Assigns the given value to the Fault Mask register.
622 \param [in] faultMask Fault Mask value to set
623 */
__set_FAULTMASK(uint32_t faultMask)624 __STATIC_FORCEINLINE void __set_FAULTMASK(uint32_t faultMask)
625 {
626 __ASM volatile ("MSR faultmask, %0" : : "r" (faultMask) : "memory");
627 }
628
629
630 #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
631 /**
632 \brief Set Fault Mask (non-secure)
633 \details Assigns the given value to the non-secure Fault Mask register when in secure state.
634 \param [in] faultMask Fault Mask value to set
635 */
__TZ_set_FAULTMASK_NS(uint32_t faultMask)636 __STATIC_FORCEINLINE void __TZ_set_FAULTMASK_NS(uint32_t faultMask)
637 {
638 __ASM volatile ("MSR faultmask_ns, %0" : : "r" (faultMask) : "memory");
639 }
640 #endif
641
642 #endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \
643 (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \
644 (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) */
645
646
647 #if ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \
648 (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) )
649
650 /**
651 \brief Get Process Stack Pointer Limit
652 Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure
653 Stack Pointer Limit register hence zero is returned always in non-secure
654 mode.
655
656 \details Returns the current value of the Process Stack Pointer Limit (PSPLIM).
657 \return PSPLIM Register value
658 */
__get_PSPLIM(void)659 __STATIC_FORCEINLINE uint32_t __get_PSPLIM(void)
660 {
661 #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \
662 (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3)))
663 // without main extensions, the non-secure PSPLIM is RAZ/WI
664 return 0U;
665 #else
666 uint32_t result;
667 __ASM volatile ("MRS %0, psplim" : "=r" (result) );
668 return result;
669 #endif
670 }
671
672 #if (defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3))
673 /**
674 \brief Get Process Stack Pointer Limit (non-secure)
675 Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure
676 Stack Pointer Limit register hence zero is returned always.
677
678 \details Returns the current value of the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state.
679 \return PSPLIM Register value
680 */
__TZ_get_PSPLIM_NS(void)681 __STATIC_FORCEINLINE uint32_t __TZ_get_PSPLIM_NS(void)
682 {
683 #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)))
684 // without main extensions, the non-secure PSPLIM is RAZ/WI
685 return 0U;
686 #else
687 uint32_t result;
688 __ASM volatile ("MRS %0, psplim_ns" : "=r" (result) );
689 return result;
690 #endif
691 }
692 #endif
693
694
695 /**
696 \brief Set Process Stack Pointer Limit
697 Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure
698 Stack Pointer Limit register hence the write is silently ignored in non-secure
699 mode.
700
701 \details Assigns the given value to the Process Stack Pointer Limit (PSPLIM).
702 \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set
703 */
__set_PSPLIM(uint32_t ProcStackPtrLimit)704 __STATIC_FORCEINLINE void __set_PSPLIM(uint32_t ProcStackPtrLimit)
705 {
706 #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \
707 (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3)))
708 // without main extensions, the non-secure PSPLIM is RAZ/WI
709 (void)ProcStackPtrLimit;
710 #else
711 __ASM volatile ("MSR psplim, %0" : : "r" (ProcStackPtrLimit));
712 #endif
713 }
714
715
716 #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
717 /**
718 \brief Set Process Stack Pointer (non-secure)
719 Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure
720 Stack Pointer Limit register hence the write is silently ignored.
721
722 \details Assigns the given value to the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state.
723 \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set
724 */
__TZ_set_PSPLIM_NS(uint32_t ProcStackPtrLimit)725 __STATIC_FORCEINLINE void __TZ_set_PSPLIM_NS(uint32_t ProcStackPtrLimit)
726 {
727 #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)))
728 // without main extensions, the non-secure PSPLIM is RAZ/WI
729 (void)ProcStackPtrLimit;
730 #else
731 __ASM volatile ("MSR psplim_ns, %0\n" : : "r" (ProcStackPtrLimit));
732 #endif
733 }
734 #endif
735
736
737 /**
738 \brief Get Main Stack Pointer Limit
739 Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure
740 Stack Pointer Limit register hence zero is returned always in non-secure
741 mode.
742
743 \details Returns the current value of the Main Stack Pointer Limit (MSPLIM).
744 \return MSPLIM Register value
745 */
__get_MSPLIM(void)746 __STATIC_FORCEINLINE uint32_t __get_MSPLIM(void)
747 {
748 #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \
749 (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3)))
750 // without main extensions, the non-secure MSPLIM is RAZ/WI
751 return 0U;
752 #else
753 uint32_t result;
754 __ASM volatile ("MRS %0, msplim" : "=r" (result) );
755 return result;
756 #endif
757 }
758
759
760 #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
761 /**
762 \brief Get Main Stack Pointer Limit (non-secure)
763 Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure
764 Stack Pointer Limit register hence zero is returned always.
765
766 \details Returns the current value of the non-secure Main Stack Pointer Limit(MSPLIM) when in secure state.
767 \return MSPLIM Register value
768 */
__TZ_get_MSPLIM_NS(void)769 __STATIC_FORCEINLINE uint32_t __TZ_get_MSPLIM_NS(void)
770 {
771 #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)))
772 // without main extensions, the non-secure MSPLIM is RAZ/WI
773 return 0U;
774 #else
775 uint32_t result;
776 __ASM volatile ("MRS %0, msplim_ns" : "=r" (result) );
777 return result;
778 #endif
779 }
780 #endif
781
782
783 /**
784 \brief Set Main Stack Pointer Limit
785 Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure
786 Stack Pointer Limit register hence the write is silently ignored in non-secure
787 mode.
788
789 \details Assigns the given value to the Main Stack Pointer Limit (MSPLIM).
790 \param [in] MainStackPtrLimit Main Stack Pointer Limit value to set
791 */
__set_MSPLIM(uint32_t MainStackPtrLimit)792 __STATIC_FORCEINLINE void __set_MSPLIM(uint32_t MainStackPtrLimit)
793 {
794 #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \
795 (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3)))
796 // without main extensions, the non-secure MSPLIM is RAZ/WI
797 (void)MainStackPtrLimit;
798 #else
799 __ASM volatile ("MSR msplim, %0" : : "r" (MainStackPtrLimit));
800 #endif
801 }
802
803
804 #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
805 /**
806 \brief Set Main Stack Pointer Limit (non-secure)
807 Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure
808 Stack Pointer Limit register hence the write is silently ignored.
809
810 \details Assigns the given value to the non-secure Main Stack Pointer Limit (MSPLIM) when in secure state.
811 \param [in] MainStackPtrLimit Main Stack Pointer value to set
812 */
__TZ_set_MSPLIM_NS(uint32_t MainStackPtrLimit)813 __STATIC_FORCEINLINE void __TZ_set_MSPLIM_NS(uint32_t MainStackPtrLimit)
814 {
815 #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)))
816 // without main extensions, the non-secure MSPLIM is RAZ/WI
817 (void)MainStackPtrLimit;
818 #else
819 __ASM volatile ("MSR msplim_ns, %0" : : "r" (MainStackPtrLimit));
820 #endif
821 }
822 #endif
823
824 #endif /* ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \
825 (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */
826
827
828 /**
829 \brief Get FPSCR
830 \details Returns the current value of the Floating Point Status/Control register.
831 \return Floating Point Status/Control register value
832 */
__get_FPSCR(void)833 __STATIC_FORCEINLINE uint32_t __get_FPSCR(void)
834 {
835 #if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \
836 (defined (__FPU_USED ) && (__FPU_USED == 1U)) )
837 #if __has_builtin(__builtin_arm_get_fpscr)
838 // Re-enable using built-in when GCC has been fixed
839 // || (__GNUC__ > 7) || (__GNUC__ == 7 && __GNUC_MINOR__ >= 2)
840 /* see https://gcc.gnu.org/ml/gcc-patches/2017-04/msg00443.html */
841 return __builtin_arm_get_fpscr();
842 #else
843 uint32_t result;
844
845 __ASM volatile ("VMRS %0, fpscr" : "=r" (result) );
846 return(result);
847 #endif
848 #else
849 return(0U);
850 #endif
851 }
852
853
854 /**
855 \brief Set FPSCR
856 \details Assigns the given value to the Floating Point Status/Control register.
857 \param [in] fpscr Floating Point Status/Control value to set
858 */
__set_FPSCR(uint32_t fpscr)859 __STATIC_FORCEINLINE void __set_FPSCR(uint32_t fpscr)
860 {
861 #if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \
862 (defined (__FPU_USED ) && (__FPU_USED == 1U)) )
863 #if __has_builtin(__builtin_arm_set_fpscr)
864 // Re-enable using built-in when GCC has been fixed
865 // || (__GNUC__ > 7) || (__GNUC__ == 7 && __GNUC_MINOR__ >= 2)
866 /* see https://gcc.gnu.org/ml/gcc-patches/2017-04/msg00443.html */
867 __builtin_arm_set_fpscr(fpscr);
868 #else
869 __ASM volatile ("VMSR fpscr, %0" : : "r" (fpscr) : "vfpcc", "memory");
870 #endif
871 #else
872 (void)fpscr;
873 #endif
874 }
875
876
877 /*@} end of CMSIS_Core_RegAccFunctions */
878
879
880 /* ########################## Core Instruction Access ######################### */
881 /** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface
882 Access to dedicated instructions
883 @{
884 */
885
886 /* Define macros for porting to both thumb1 and thumb2.
887 * For thumb1, use low register (r0-r7), specified by constraint "l"
888 * Otherwise, use general registers, specified by constraint "r" */
889 #if defined (__thumb__) && !defined (__thumb2__)
890 #define __CMSIS_GCC_OUT_REG(r) "=l" (r)
891 #define __CMSIS_GCC_RW_REG(r) "+l" (r)
892 #define __CMSIS_GCC_USE_REG(r) "l" (r)
893 #else
894 #define __CMSIS_GCC_OUT_REG(r) "=r" (r)
895 #define __CMSIS_GCC_RW_REG(r) "+r" (r)
896 #define __CMSIS_GCC_USE_REG(r) "r" (r)
897 #endif
898
899 /**
900 \brief No Operation
901 \details No Operation does nothing. This instruction can be used for code alignment purposes.
902 */
903 #define __NOP() __ASM volatile ("nop")
904
905 /**
906 \brief Wait For Interrupt
907 \details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs.
908 */
909 #define __WFI() __ASM volatile ("wfi":::"memory")
910
911
912 /**
913 \brief Wait For Event
914 \details Wait For Event is a hint instruction that permits the processor to enter
915 a low-power state until one of a number of events occurs.
916 */
917 #define __WFE() __ASM volatile ("wfe":::"memory")
918
919
920 /**
921 \brief Send Event
922 \details Send Event is a hint instruction. It causes an event to be signaled to the CPU.
923 */
924 #define __SEV() __ASM volatile ("sev")
925
926
927 /**
928 \brief Instruction Synchronization Barrier
929 \details Instruction Synchronization Barrier flushes the pipeline in the processor,
930 so that all instructions following the ISB are fetched from cache or memory,
931 after the instruction has been completed.
932 */
__ISB(void)933 __STATIC_FORCEINLINE void __ISB(void)
934 {
935 __ASM volatile ("isb 0xF":::"memory");
936 }
937
938
939 /**
940 \brief Data Synchronization Barrier
941 \details Acts as a special kind of Data Memory Barrier.
942 It completes when all explicit memory accesses before this instruction complete.
943 */
__DSB(void)944 __STATIC_FORCEINLINE void __DSB(void)
945 {
946 __ASM volatile ("dsb 0xF":::"memory");
947 }
948
949
950 /**
951 \brief Data Memory Barrier
952 \details Ensures the apparent order of the explicit memory operations before
953 and after the instruction, without ensuring their completion.
954 */
__DMB(void)955 __STATIC_FORCEINLINE void __DMB(void)
956 {
957 __ASM volatile ("dmb 0xF":::"memory");
958 }
959
960
961 /**
962 \brief Reverse byte order (32 bit)
963 \details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412.
964 \param [in] value Value to reverse
965 \return Reversed value
966 */
__REV(uint32_t value)967 __STATIC_FORCEINLINE uint32_t __REV(uint32_t value)
968 {
969 #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5)
970 return __builtin_bswap32(value);
971 #else
972 uint32_t result;
973
974 __ASM ("rev %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
975 return result;
976 #endif
977 }
978
979
980 /**
981 \brief Reverse byte order (16 bit)
982 \details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856.
983 \param [in] value Value to reverse
984 \return Reversed value
985 */
__REV16(uint32_t value)986 __STATIC_FORCEINLINE uint32_t __REV16(uint32_t value)
987 {
988 uint32_t result;
989
990 __ASM ("rev16 %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
991 return result;
992 }
993
994
995 /**
996 \brief Reverse byte order (16 bit)
997 \details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000.
998 \param [in] value Value to reverse
999 \return Reversed value
1000 */
__REVSH(int16_t value)1001 __STATIC_FORCEINLINE int16_t __REVSH(int16_t value)
1002 {
1003 #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
1004 return (int16_t)__builtin_bswap16(value);
1005 #else
1006 int16_t result;
1007
1008 __ASM ("revsh %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
1009 return result;
1010 #endif
1011 }
1012
1013
1014 /**
1015 \brief Rotate Right in unsigned value (32 bit)
1016 \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits.
1017 \param [in] op1 Value to rotate
1018 \param [in] op2 Number of Bits to rotate
1019 \return Rotated value
1020 */
__ROR(uint32_t op1,uint32_t op2)1021 __STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2)
1022 {
1023 op2 %= 32U;
1024 if (op2 == 0U)
1025 {
1026 return op1;
1027 }
1028 return (op1 >> op2) | (op1 << (32U - op2));
1029 }
1030
1031
1032 /**
1033 \brief Breakpoint
1034 \details Causes the processor to enter Debug state.
1035 Debug tools can use this to investigate system state when the instruction at a particular address is reached.
1036 \param [in] value is ignored by the processor.
1037 If required, a debugger can use it to store additional information about the breakpoint.
1038 */
1039 #define __BKPT(value) __ASM volatile ("bkpt "#value)
1040
1041
1042 /**
1043 \brief Reverse bit order of value
1044 \details Reverses the bit order of the given value.
1045 \param [in] value Value to reverse
1046 \return Reversed value
1047 */
__RBIT(uint32_t value)1048 __STATIC_FORCEINLINE uint32_t __RBIT(uint32_t value)
1049 {
1050 uint32_t result;
1051
1052 #if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \
1053 (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \
1054 (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) )
1055 __ASM ("rbit %0, %1" : "=r" (result) : "r" (value) );
1056 #else
1057 uint32_t s = (4U /*sizeof(v)*/ * 8U) - 1U; /* extra shift needed at end */
1058
1059 result = value; /* r will be reversed bits of v; first get LSB of v */
1060 for (value >>= 1U; value != 0U; value >>= 1U)
1061 {
1062 result <<= 1U;
1063 result |= value & 1U;
1064 s--;
1065 }
1066 result <<= s; /* shift when v's highest bits are zero */
1067 #endif
1068 return result;
1069 }
1070
1071
1072 /**
1073 \brief Count leading zeros
1074 \details Counts the number of leading zeros of a data value.
1075 \param [in] value Value to count the leading zeros
1076 \return number of leading zeros in value
1077 */
__CLZ(uint32_t value)1078 __STATIC_FORCEINLINE uint8_t __CLZ(uint32_t value)
1079 {
1080 /* Even though __builtin_clz produces a CLZ instruction on ARM, formally
1081 __builtin_clz(0) is undefined behaviour, so handle this case specially.
1082 This guarantees ARM-compatible results if happening to compile on a non-ARM
1083 target, and ensures the compiler doesn't decide to activate any
1084 optimisations using the logic "value was passed to __builtin_clz, so it
1085 is non-zero".
1086 ARM GCC 7.3 and possibly earlier will optimise this test away, leaving a
1087 single CLZ instruction.
1088 */
1089 if (value == 0U)
1090 {
1091 return 32U;
1092 }
1093 return __builtin_clz(value);
1094 }
1095
1096
1097 #if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \
1098 (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \
1099 (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \
1100 (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) )
1101 /**
1102 \brief LDR Exclusive (8 bit)
1103 \details Executes a exclusive LDR instruction for 8 bit value.
1104 \param [in] ptr Pointer to data
1105 \return value of type uint8_t at (*ptr)
1106 */
__LDREXB(volatile uint8_t * addr)1107 __STATIC_FORCEINLINE uint8_t __LDREXB(volatile uint8_t *addr)
1108 {
1109 uint32_t result;
1110
1111 #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
1112 __ASM volatile ("ldrexb %0, %1" : "=r" (result) : "Q" (*addr) );
1113 #else
1114 /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
1115 accepted by assembler. So has to use following less efficient pattern.
1116 */
1117 __ASM volatile ("ldrexb %0, [%1]" : "=r" (result) : "r" (addr) : "memory" );
1118 #endif
1119 return ((uint8_t) result); /* Add explicit type cast here */
1120 }
1121
1122
1123 /**
1124 \brief LDR Exclusive (16 bit)
1125 \details Executes a exclusive LDR instruction for 16 bit values.
1126 \param [in] ptr Pointer to data
1127 \return value of type uint16_t at (*ptr)
1128 */
__LDREXH(volatile uint16_t * addr)1129 __STATIC_FORCEINLINE uint16_t __LDREXH(volatile uint16_t *addr)
1130 {
1131 uint32_t result;
1132
1133 #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
1134 __ASM volatile ("ldrexh %0, %1" : "=r" (result) : "Q" (*addr) );
1135 #else
1136 /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
1137 accepted by assembler. So has to use following less efficient pattern.
1138 */
1139 __ASM volatile ("ldrexh %0, [%1]" : "=r" (result) : "r" (addr) : "memory" );
1140 #endif
1141 return ((uint16_t) result); /* Add explicit type cast here */
1142 }
1143
1144
1145 /**
1146 \brief LDR Exclusive (32 bit)
1147 \details Executes a exclusive LDR instruction for 32 bit values.
1148 \param [in] ptr Pointer to data
1149 \return value of type uint32_t at (*ptr)
1150 */
__LDREXW(volatile uint32_t * addr)1151 __STATIC_FORCEINLINE uint32_t __LDREXW(volatile uint32_t *addr)
1152 {
1153 uint32_t result;
1154
1155 __ASM volatile ("ldrex %0, %1" : "=r" (result) : "Q" (*addr) );
1156 return(result);
1157 }
1158
1159
1160 /**
1161 \brief STR Exclusive (8 bit)
1162 \details Executes a exclusive STR instruction for 8 bit values.
1163 \param [in] value Value to store
1164 \param [in] ptr Pointer to location
1165 \return 0 Function succeeded
1166 \return 1 Function failed
1167 */
__STREXB(uint8_t value,volatile uint8_t * addr)1168 __STATIC_FORCEINLINE uint32_t __STREXB(uint8_t value, volatile uint8_t *addr)
1169 {
1170 uint32_t result;
1171
1172 __ASM volatile ("strexb %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) );
1173 return(result);
1174 }
1175
1176
1177 /**
1178 \brief STR Exclusive (16 bit)
1179 \details Executes a exclusive STR instruction for 16 bit values.
1180 \param [in] value Value to store
1181 \param [in] ptr Pointer to location
1182 \return 0 Function succeeded
1183 \return 1 Function failed
1184 */
__STREXH(uint16_t value,volatile uint16_t * addr)1185 __STATIC_FORCEINLINE uint32_t __STREXH(uint16_t value, volatile uint16_t *addr)
1186 {
1187 uint32_t result;
1188
1189 __ASM volatile ("strexh %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) );
1190 return(result);
1191 }
1192
1193
1194 /**
1195 \brief STR Exclusive (32 bit)
1196 \details Executes a exclusive STR instruction for 32 bit values.
1197 \param [in] value Value to store
1198 \param [in] ptr Pointer to location
1199 \return 0 Function succeeded
1200 \return 1 Function failed
1201 */
__STREXW(uint32_t value,volatile uint32_t * addr)1202 __STATIC_FORCEINLINE uint32_t __STREXW(uint32_t value, volatile uint32_t *addr)
1203 {
1204 uint32_t result;
1205
1206 __ASM volatile ("strex %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" (value) );
1207 return(result);
1208 }
1209
1210
1211 /**
1212 \brief Remove the exclusive lock
1213 \details Removes the exclusive lock which is created by LDREX.
1214 */
__CLREX(void)1215 __STATIC_FORCEINLINE void __CLREX(void)
1216 {
1217 __ASM volatile ("clrex" ::: "memory");
1218 }
1219
1220 #endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \
1221 (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \
1222 (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \
1223 (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */
1224
1225
1226 #if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \
1227 (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \
1228 (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) )
1229 /**
1230 \brief Signed Saturate
1231 \details Saturates a signed value.
1232 \param [in] ARG1 Value to be saturated
1233 \param [in] ARG2 Bit position to saturate to (1..32)
1234 \return Saturated value
1235 */
1236 #define __SSAT(ARG1, ARG2) \
1237 __extension__ \
1238 ({ \
1239 int32_t __RES, __ARG1 = (ARG1); \
1240 __ASM volatile ("ssat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) : "cc" ); \
1241 __RES; \
1242 })
1243
1244
1245 /**
1246 \brief Unsigned Saturate
1247 \details Saturates an unsigned value.
1248 \param [in] ARG1 Value to be saturated
1249 \param [in] ARG2 Bit position to saturate to (0..31)
1250 \return Saturated value
1251 */
1252 #define __USAT(ARG1, ARG2) \
1253 __extension__ \
1254 ({ \
1255 uint32_t __RES, __ARG1 = (ARG1); \
1256 __ASM volatile ("usat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) : "cc" ); \
1257 __RES; \
1258 })
1259
1260
1261 /**
1262 \brief Rotate Right with Extend (32 bit)
1263 \details Moves each bit of a bitstring right by one bit.
1264 The carry input is shifted in at the left end of the bitstring.
1265 \param [in] value Value to rotate
1266 \return Rotated value
1267 */
__RRX(uint32_t value)1268 __STATIC_FORCEINLINE uint32_t __RRX(uint32_t value)
1269 {
1270 uint32_t result;
1271
1272 __ASM volatile ("rrx %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
1273 return(result);
1274 }
1275
1276
1277 /**
1278 \brief LDRT Unprivileged (8 bit)
1279 \details Executes a Unprivileged LDRT instruction for 8 bit value.
1280 \param [in] ptr Pointer to data
1281 \return value of type uint8_t at (*ptr)
1282 */
__LDRBT(volatile uint8_t * ptr)1283 __STATIC_FORCEINLINE uint8_t __LDRBT(volatile uint8_t *ptr)
1284 {
1285 uint32_t result;
1286
1287 #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
1288 __ASM volatile ("ldrbt %0, %1" : "=r" (result) : "Q" (*ptr) );
1289 #else
1290 /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
1291 accepted by assembler. So has to use following less efficient pattern.
1292 */
1293 __ASM volatile ("ldrbt %0, [%1]" : "=r" (result) : "r" (ptr) : "memory" );
1294 #endif
1295 return ((uint8_t) result); /* Add explicit type cast here */
1296 }
1297
1298
1299 /**
1300 \brief LDRT Unprivileged (16 bit)
1301 \details Executes a Unprivileged LDRT instruction for 16 bit values.
1302 \param [in] ptr Pointer to data
1303 \return value of type uint16_t at (*ptr)
1304 */
__LDRHT(volatile uint16_t * ptr)1305 __STATIC_FORCEINLINE uint16_t __LDRHT(volatile uint16_t *ptr)
1306 {
1307 uint32_t result;
1308
1309 #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
1310 __ASM volatile ("ldrht %0, %1" : "=r" (result) : "Q" (*ptr) );
1311 #else
1312 /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
1313 accepted by assembler. So has to use following less efficient pattern.
1314 */
1315 __ASM volatile ("ldrht %0, [%1]" : "=r" (result) : "r" (ptr) : "memory" );
1316 #endif
1317 return ((uint16_t) result); /* Add explicit type cast here */
1318 }
1319
1320
1321 /**
1322 \brief LDRT Unprivileged (32 bit)
1323 \details Executes a Unprivileged LDRT instruction for 32 bit values.
1324 \param [in] ptr Pointer to data
1325 \return value of type uint32_t at (*ptr)
1326 */
__LDRT(volatile uint32_t * ptr)1327 __STATIC_FORCEINLINE uint32_t __LDRT(volatile uint32_t *ptr)
1328 {
1329 uint32_t result;
1330
1331 __ASM volatile ("ldrt %0, %1" : "=r" (result) : "Q" (*ptr) );
1332 return(result);
1333 }
1334
1335
1336 /**
1337 \brief STRT Unprivileged (8 bit)
1338 \details Executes a Unprivileged STRT instruction for 8 bit values.
1339 \param [in] value Value to store
1340 \param [in] ptr Pointer to location
1341 */
__STRBT(uint8_t value,volatile uint8_t * ptr)1342 __STATIC_FORCEINLINE void __STRBT(uint8_t value, volatile uint8_t *ptr)
1343 {
1344 __ASM volatile ("strbt %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) );
1345 }
1346
1347
1348 /**
1349 \brief STRT Unprivileged (16 bit)
1350 \details Executes a Unprivileged STRT instruction for 16 bit values.
1351 \param [in] value Value to store
1352 \param [in] ptr Pointer to location
1353 */
__STRHT(uint16_t value,volatile uint16_t * ptr)1354 __STATIC_FORCEINLINE void __STRHT(uint16_t value, volatile uint16_t *ptr)
1355 {
1356 __ASM volatile ("strht %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) );
1357 }
1358
1359
1360 /**
1361 \brief STRT Unprivileged (32 bit)
1362 \details Executes a Unprivileged STRT instruction for 32 bit values.
1363 \param [in] value Value to store
1364 \param [in] ptr Pointer to location
1365 */
__STRT(uint32_t value,volatile uint32_t * ptr)1366 __STATIC_FORCEINLINE void __STRT(uint32_t value, volatile uint32_t *ptr)
1367 {
1368 __ASM volatile ("strt %1, %0" : "=Q" (*ptr) : "r" (value) );
1369 }
1370
1371 #else /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \
1372 (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \
1373 (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) */
1374
1375 /**
1376 \brief Signed Saturate
1377 \details Saturates a signed value.
1378 \param [in] value Value to be saturated
1379 \param [in] sat Bit position to saturate to (1..32)
1380 \return Saturated value
1381 */
__SSAT(int32_t val,uint32_t sat)1382 __STATIC_FORCEINLINE int32_t __SSAT(int32_t val, uint32_t sat)
1383 {
1384 if ((sat >= 1U) && (sat <= 32U))
1385 {
1386 const int32_t max = (int32_t)((1U << (sat - 1U)) - 1U);
1387 const int32_t min = -1 - max ;
1388 if (val > max)
1389 {
1390 return max;
1391 }
1392 else if (val < min)
1393 {
1394 return min;
1395 }
1396 }
1397 return val;
1398 }
1399
1400 /**
1401 \brief Unsigned Saturate
1402 \details Saturates an unsigned value.
1403 \param [in] value Value to be saturated
1404 \param [in] sat Bit position to saturate to (0..31)
1405 \return Saturated value
1406 */
__USAT(int32_t val,uint32_t sat)1407 __STATIC_FORCEINLINE uint32_t __USAT(int32_t val, uint32_t sat)
1408 {
1409 if (sat <= 31U)
1410 {
1411 const uint32_t max = ((1U << sat) - 1U);
1412 if (val > (int32_t)max)
1413 {
1414 return max;
1415 }
1416 else if (val < 0)
1417 {
1418 return 0U;
1419 }
1420 }
1421 return (uint32_t)val;
1422 }
1423
1424 #endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \
1425 (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \
1426 (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) */
1427
1428
1429 #if ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \
1430 (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) )
1431 /**
1432 \brief Load-Acquire (8 bit)
1433 \details Executes a LDAB instruction for 8 bit value.
1434 \param [in] ptr Pointer to data
1435 \return value of type uint8_t at (*ptr)
1436 */
__LDAB(volatile uint8_t * ptr)1437 __STATIC_FORCEINLINE uint8_t __LDAB(volatile uint8_t *ptr)
1438 {
1439 uint32_t result;
1440
1441 __ASM volatile ("ldab %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" );
1442 return ((uint8_t) result);
1443 }
1444
1445
1446 /**
1447 \brief Load-Acquire (16 bit)
1448 \details Executes a LDAH instruction for 16 bit values.
1449 \param [in] ptr Pointer to data
1450 \return value of type uint16_t at (*ptr)
1451 */
__LDAH(volatile uint16_t * ptr)1452 __STATIC_FORCEINLINE uint16_t __LDAH(volatile uint16_t *ptr)
1453 {
1454 uint32_t result;
1455
1456 __ASM volatile ("ldah %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" );
1457 return ((uint16_t) result);
1458 }
1459
1460
1461 /**
1462 \brief Load-Acquire (32 bit)
1463 \details Executes a LDA instruction for 32 bit values.
1464 \param [in] ptr Pointer to data
1465 \return value of type uint32_t at (*ptr)
1466 */
__LDA(volatile uint32_t * ptr)1467 __STATIC_FORCEINLINE uint32_t __LDA(volatile uint32_t *ptr)
1468 {
1469 uint32_t result;
1470
1471 __ASM volatile ("lda %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" );
1472 return(result);
1473 }
1474
1475
1476 /**
1477 \brief Store-Release (8 bit)
1478 \details Executes a STLB instruction for 8 bit values.
1479 \param [in] value Value to store
1480 \param [in] ptr Pointer to location
1481 */
__STLB(uint8_t value,volatile uint8_t * ptr)1482 __STATIC_FORCEINLINE void __STLB(uint8_t value, volatile uint8_t *ptr)
1483 {
1484 __ASM volatile ("stlb %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" );
1485 }
1486
1487
1488 /**
1489 \brief Store-Release (16 bit)
1490 \details Executes a STLH instruction for 16 bit values.
1491 \param [in] value Value to store
1492 \param [in] ptr Pointer to location
1493 */
__STLH(uint16_t value,volatile uint16_t * ptr)1494 __STATIC_FORCEINLINE void __STLH(uint16_t value, volatile uint16_t *ptr)
1495 {
1496 __ASM volatile ("stlh %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" );
1497 }
1498
1499
1500 /**
1501 \brief Store-Release (32 bit)
1502 \details Executes a STL instruction for 32 bit values.
1503 \param [in] value Value to store
1504 \param [in] ptr Pointer to location
1505 */
__STL(uint32_t value,volatile uint32_t * ptr)1506 __STATIC_FORCEINLINE void __STL(uint32_t value, volatile uint32_t *ptr)
1507 {
1508 __ASM volatile ("stl %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" );
1509 }
1510
1511
1512 /**
1513 \brief Load-Acquire Exclusive (8 bit)
1514 \details Executes a LDAB exclusive instruction for 8 bit value.
1515 \param [in] ptr Pointer to data
1516 \return value of type uint8_t at (*ptr)
1517 */
__LDAEXB(volatile uint8_t * ptr)1518 __STATIC_FORCEINLINE uint8_t __LDAEXB(volatile uint8_t *ptr)
1519 {
1520 uint32_t result;
1521
1522 __ASM volatile ("ldaexb %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" );
1523 return ((uint8_t) result);
1524 }
1525
1526
1527 /**
1528 \brief Load-Acquire Exclusive (16 bit)
1529 \details Executes a LDAH exclusive instruction for 16 bit values.
1530 \param [in] ptr Pointer to data
1531 \return value of type uint16_t at (*ptr)
1532 */
__LDAEXH(volatile uint16_t * ptr)1533 __STATIC_FORCEINLINE uint16_t __LDAEXH(volatile uint16_t *ptr)
1534 {
1535 uint32_t result;
1536
1537 __ASM volatile ("ldaexh %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" );
1538 return ((uint16_t) result);
1539 }
1540
1541
1542 /**
1543 \brief Load-Acquire Exclusive (32 bit)
1544 \details Executes a LDA exclusive instruction for 32 bit values.
1545 \param [in] ptr Pointer to data
1546 \return value of type uint32_t at (*ptr)
1547 */
__LDAEX(volatile uint32_t * ptr)1548 __STATIC_FORCEINLINE uint32_t __LDAEX(volatile uint32_t *ptr)
1549 {
1550 uint32_t result;
1551
1552 __ASM volatile ("ldaex %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" );
1553 return(result);
1554 }
1555
1556
1557 /**
1558 \brief Store-Release Exclusive (8 bit)
1559 \details Executes a STLB exclusive instruction for 8 bit values.
1560 \param [in] value Value to store
1561 \param [in] ptr Pointer to location
1562 \return 0 Function succeeded
1563 \return 1 Function failed
1564 */
__STLEXB(uint8_t value,volatile uint8_t * ptr)1565 __STATIC_FORCEINLINE uint32_t __STLEXB(uint8_t value, volatile uint8_t *ptr)
1566 {
1567 uint32_t result;
1568
1569 __ASM volatile ("stlexb %0, %2, %1" : "=&r" (result), "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" );
1570 return(result);
1571 }
1572
1573
1574 /**
1575 \brief Store-Release Exclusive (16 bit)
1576 \details Executes a STLH exclusive instruction for 16 bit values.
1577 \param [in] value Value to store
1578 \param [in] ptr Pointer to location
1579 \return 0 Function succeeded
1580 \return 1 Function failed
1581 */
__STLEXH(uint16_t value,volatile uint16_t * ptr)1582 __STATIC_FORCEINLINE uint32_t __STLEXH(uint16_t value, volatile uint16_t *ptr)
1583 {
1584 uint32_t result;
1585
1586 __ASM volatile ("stlexh %0, %2, %1" : "=&r" (result), "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" );
1587 return(result);
1588 }
1589
1590
1591 /**
1592 \brief Store-Release Exclusive (32 bit)
1593 \details Executes a STL exclusive instruction for 32 bit values.
1594 \param [in] value Value to store
1595 \param [in] ptr Pointer to location
1596 \return 0 Function succeeded
1597 \return 1 Function failed
1598 */
__STLEX(uint32_t value,volatile uint32_t * ptr)1599 __STATIC_FORCEINLINE uint32_t __STLEX(uint32_t value, volatile uint32_t *ptr)
1600 {
1601 uint32_t result;
1602
1603 __ASM volatile ("stlex %0, %2, %1" : "=&r" (result), "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" );
1604 return(result);
1605 }
1606
1607 #endif /* ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \
1608 (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */
1609
1610 /*@}*/ /* end of group CMSIS_Core_InstructionInterface */
1611
1612
1613 /* ################### Compiler specific Intrinsics ########################### */
1614 /** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics
1615 Access to dedicated SIMD instructions
1616 @{
1617 */
1618
1619 #if (defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1))
1620
__SADD8(uint32_t op1,uint32_t op2)1621 __STATIC_FORCEINLINE uint32_t __SADD8(uint32_t op1, uint32_t op2)
1622 {
1623 uint32_t result;
1624
1625 __ASM volatile ("sadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1626 return(result);
1627 }
1628
__QADD8(uint32_t op1,uint32_t op2)1629 __STATIC_FORCEINLINE uint32_t __QADD8(uint32_t op1, uint32_t op2)
1630 {
1631 uint32_t result;
1632
1633 __ASM ("qadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1634 return(result);
1635 }
1636
__SHADD8(uint32_t op1,uint32_t op2)1637 __STATIC_FORCEINLINE uint32_t __SHADD8(uint32_t op1, uint32_t op2)
1638 {
1639 uint32_t result;
1640
1641 __ASM ("shadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1642 return(result);
1643 }
1644
__UADD8(uint32_t op1,uint32_t op2)1645 __STATIC_FORCEINLINE uint32_t __UADD8(uint32_t op1, uint32_t op2)
1646 {
1647 uint32_t result;
1648
1649 __ASM volatile ("uadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1650 return(result);
1651 }
1652
__UQADD8(uint32_t op1,uint32_t op2)1653 __STATIC_FORCEINLINE uint32_t __UQADD8(uint32_t op1, uint32_t op2)
1654 {
1655 uint32_t result;
1656
1657 __ASM ("uqadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1658 return(result);
1659 }
1660
__UHADD8(uint32_t op1,uint32_t op2)1661 __STATIC_FORCEINLINE uint32_t __UHADD8(uint32_t op1, uint32_t op2)
1662 {
1663 uint32_t result;
1664
1665 __ASM ("uhadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1666 return(result);
1667 }
1668
1669
__SSUB8(uint32_t op1,uint32_t op2)1670 __STATIC_FORCEINLINE uint32_t __SSUB8(uint32_t op1, uint32_t op2)
1671 {
1672 uint32_t result;
1673
1674 __ASM volatile ("ssub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1675 return(result);
1676 }
1677
__QSUB8(uint32_t op1,uint32_t op2)1678 __STATIC_FORCEINLINE uint32_t __QSUB8(uint32_t op1, uint32_t op2)
1679 {
1680 uint32_t result;
1681
1682 __ASM ("qsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1683 return(result);
1684 }
1685
__SHSUB8(uint32_t op1,uint32_t op2)1686 __STATIC_FORCEINLINE uint32_t __SHSUB8(uint32_t op1, uint32_t op2)
1687 {
1688 uint32_t result;
1689
1690 __ASM ("shsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1691 return(result);
1692 }
1693
__USUB8(uint32_t op1,uint32_t op2)1694 __STATIC_FORCEINLINE uint32_t __USUB8(uint32_t op1, uint32_t op2)
1695 {
1696 uint32_t result;
1697
1698 __ASM volatile ("usub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1699 return(result);
1700 }
1701
__UQSUB8(uint32_t op1,uint32_t op2)1702 __STATIC_FORCEINLINE uint32_t __UQSUB8(uint32_t op1, uint32_t op2)
1703 {
1704 uint32_t result;
1705
1706 __ASM ("uqsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1707 return(result);
1708 }
1709
__UHSUB8(uint32_t op1,uint32_t op2)1710 __STATIC_FORCEINLINE uint32_t __UHSUB8(uint32_t op1, uint32_t op2)
1711 {
1712 uint32_t result;
1713
1714 __ASM ("uhsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1715 return(result);
1716 }
1717
1718
__SADD16(uint32_t op1,uint32_t op2)1719 __STATIC_FORCEINLINE uint32_t __SADD16(uint32_t op1, uint32_t op2)
1720 {
1721 uint32_t result;
1722
1723 __ASM volatile ("sadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1724 return(result);
1725 }
1726
__QADD16(uint32_t op1,uint32_t op2)1727 __STATIC_FORCEINLINE uint32_t __QADD16(uint32_t op1, uint32_t op2)
1728 {
1729 uint32_t result;
1730
1731 __ASM ("qadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1732 return(result);
1733 }
1734
__SHADD16(uint32_t op1,uint32_t op2)1735 __STATIC_FORCEINLINE uint32_t __SHADD16(uint32_t op1, uint32_t op2)
1736 {
1737 uint32_t result;
1738
1739 __ASM ("shadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1740 return(result);
1741 }
1742
__UADD16(uint32_t op1,uint32_t op2)1743 __STATIC_FORCEINLINE uint32_t __UADD16(uint32_t op1, uint32_t op2)
1744 {
1745 uint32_t result;
1746
1747 __ASM volatile ("uadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1748 return(result);
1749 }
1750
__UQADD16(uint32_t op1,uint32_t op2)1751 __STATIC_FORCEINLINE uint32_t __UQADD16(uint32_t op1, uint32_t op2)
1752 {
1753 uint32_t result;
1754
1755 __ASM ("uqadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1756 return(result);
1757 }
1758
__UHADD16(uint32_t op1,uint32_t op2)1759 __STATIC_FORCEINLINE uint32_t __UHADD16(uint32_t op1, uint32_t op2)
1760 {
1761 uint32_t result;
1762
1763 __ASM ("uhadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1764 return(result);
1765 }
1766
__SSUB16(uint32_t op1,uint32_t op2)1767 __STATIC_FORCEINLINE uint32_t __SSUB16(uint32_t op1, uint32_t op2)
1768 {
1769 uint32_t result;
1770
1771 __ASM volatile ("ssub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1772 return(result);
1773 }
1774
__QSUB16(uint32_t op1,uint32_t op2)1775 __STATIC_FORCEINLINE uint32_t __QSUB16(uint32_t op1, uint32_t op2)
1776 {
1777 uint32_t result;
1778
1779 __ASM ("qsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1780 return(result);
1781 }
1782
__SHSUB16(uint32_t op1,uint32_t op2)1783 __STATIC_FORCEINLINE uint32_t __SHSUB16(uint32_t op1, uint32_t op2)
1784 {
1785 uint32_t result;
1786
1787 __ASM ("shsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1788 return(result);
1789 }
1790
__USUB16(uint32_t op1,uint32_t op2)1791 __STATIC_FORCEINLINE uint32_t __USUB16(uint32_t op1, uint32_t op2)
1792 {
1793 uint32_t result;
1794
1795 __ASM volatile ("usub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1796 return(result);
1797 }
1798
__UQSUB16(uint32_t op1,uint32_t op2)1799 __STATIC_FORCEINLINE uint32_t __UQSUB16(uint32_t op1, uint32_t op2)
1800 {
1801 uint32_t result;
1802
1803 __ASM ("uqsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1804 return(result);
1805 }
1806
__UHSUB16(uint32_t op1,uint32_t op2)1807 __STATIC_FORCEINLINE uint32_t __UHSUB16(uint32_t op1, uint32_t op2)
1808 {
1809 uint32_t result;
1810
1811 __ASM ("uhsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1812 return(result);
1813 }
1814
__SASX(uint32_t op1,uint32_t op2)1815 __STATIC_FORCEINLINE uint32_t __SASX(uint32_t op1, uint32_t op2)
1816 {
1817 uint32_t result;
1818
1819 __ASM volatile ("sasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1820 return(result);
1821 }
1822
__QASX(uint32_t op1,uint32_t op2)1823 __STATIC_FORCEINLINE uint32_t __QASX(uint32_t op1, uint32_t op2)
1824 {
1825 uint32_t result;
1826
1827 __ASM ("qasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1828 return(result);
1829 }
1830
__SHASX(uint32_t op1,uint32_t op2)1831 __STATIC_FORCEINLINE uint32_t __SHASX(uint32_t op1, uint32_t op2)
1832 {
1833 uint32_t result;
1834
1835 __ASM ("shasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1836 return(result);
1837 }
1838
__UASX(uint32_t op1,uint32_t op2)1839 __STATIC_FORCEINLINE uint32_t __UASX(uint32_t op1, uint32_t op2)
1840 {
1841 uint32_t result;
1842
1843 __ASM volatile ("uasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1844 return(result);
1845 }
1846
__UQASX(uint32_t op1,uint32_t op2)1847 __STATIC_FORCEINLINE uint32_t __UQASX(uint32_t op1, uint32_t op2)
1848 {
1849 uint32_t result;
1850
1851 __ASM ("uqasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1852 return(result);
1853 }
1854
__UHASX(uint32_t op1,uint32_t op2)1855 __STATIC_FORCEINLINE uint32_t __UHASX(uint32_t op1, uint32_t op2)
1856 {
1857 uint32_t result;
1858
1859 __ASM ("uhasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1860 return(result);
1861 }
1862
__SSAX(uint32_t op1,uint32_t op2)1863 __STATIC_FORCEINLINE uint32_t __SSAX(uint32_t op1, uint32_t op2)
1864 {
1865 uint32_t result;
1866
1867 __ASM volatile ("ssax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1868 return(result);
1869 }
1870
__QSAX(uint32_t op1,uint32_t op2)1871 __STATIC_FORCEINLINE uint32_t __QSAX(uint32_t op1, uint32_t op2)
1872 {
1873 uint32_t result;
1874
1875 __ASM ("qsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1876 return(result);
1877 }
1878
__SHSAX(uint32_t op1,uint32_t op2)1879 __STATIC_FORCEINLINE uint32_t __SHSAX(uint32_t op1, uint32_t op2)
1880 {
1881 uint32_t result;
1882
1883 __ASM ("shsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1884 return(result);
1885 }
1886
__USAX(uint32_t op1,uint32_t op2)1887 __STATIC_FORCEINLINE uint32_t __USAX(uint32_t op1, uint32_t op2)
1888 {
1889 uint32_t result;
1890
1891 __ASM volatile ("usax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1892 return(result);
1893 }
1894
__UQSAX(uint32_t op1,uint32_t op2)1895 __STATIC_FORCEINLINE uint32_t __UQSAX(uint32_t op1, uint32_t op2)
1896 {
1897 uint32_t result;
1898
1899 __ASM ("uqsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1900 return(result);
1901 }
1902
__UHSAX(uint32_t op1,uint32_t op2)1903 __STATIC_FORCEINLINE uint32_t __UHSAX(uint32_t op1, uint32_t op2)
1904 {
1905 uint32_t result;
1906
1907 __ASM ("uhsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1908 return(result);
1909 }
1910
__USAD8(uint32_t op1,uint32_t op2)1911 __STATIC_FORCEINLINE uint32_t __USAD8(uint32_t op1, uint32_t op2)
1912 {
1913 uint32_t result;
1914
1915 __ASM ("usad8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1916 return(result);
1917 }
1918
__USADA8(uint32_t op1,uint32_t op2,uint32_t op3)1919 __STATIC_FORCEINLINE uint32_t __USADA8(uint32_t op1, uint32_t op2, uint32_t op3)
1920 {
1921 uint32_t result;
1922
1923 __ASM ("usada8 %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
1924 return(result);
1925 }
1926
1927 #define __SSAT16(ARG1, ARG2) \
1928 ({ \
1929 int32_t __RES, __ARG1 = (ARG1); \
1930 __ASM volatile ("ssat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) : "cc" ); \
1931 __RES; \
1932 })
1933
1934 #define __USAT16(ARG1, ARG2) \
1935 ({ \
1936 uint32_t __RES, __ARG1 = (ARG1); \
1937 __ASM volatile ("usat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) : "cc" ); \
1938 __RES; \
1939 })
1940
__UXTB16(uint32_t op1)1941 __STATIC_FORCEINLINE uint32_t __UXTB16(uint32_t op1)
1942 {
1943 uint32_t result;
1944
1945 __ASM ("uxtb16 %0, %1" : "=r" (result) : "r" (op1));
1946 return(result);
1947 }
1948
__UXTAB16(uint32_t op1,uint32_t op2)1949 __STATIC_FORCEINLINE uint32_t __UXTAB16(uint32_t op1, uint32_t op2)
1950 {
1951 uint32_t result;
1952
1953 __ASM ("uxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1954 return(result);
1955 }
1956
__SXTB16(uint32_t op1)1957 __STATIC_FORCEINLINE uint32_t __SXTB16(uint32_t op1)
1958 {
1959 uint32_t result;
1960
1961 __ASM ("sxtb16 %0, %1" : "=r" (result) : "r" (op1));
1962 return(result);
1963 }
1964
__SXTB16_RORn(uint32_t op1,uint32_t rotate)1965 __STATIC_FORCEINLINE uint32_t __SXTB16_RORn(uint32_t op1, uint32_t rotate)
1966 {
1967 uint32_t result;
1968
1969 __ASM ("sxtb16 %0, %1, ROR %2" : "=r" (result) : "r" (op1), "i" (rotate) );
1970
1971 return result;
1972 }
1973
__SXTAB16(uint32_t op1,uint32_t op2)1974 __STATIC_FORCEINLINE uint32_t __SXTAB16(uint32_t op1, uint32_t op2)
1975 {
1976 uint32_t result;
1977
1978 __ASM ("sxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1979 return(result);
1980 }
1981
__SMUAD(uint32_t op1,uint32_t op2)1982 __STATIC_FORCEINLINE uint32_t __SMUAD (uint32_t op1, uint32_t op2)
1983 {
1984 uint32_t result;
1985
1986 __ASM volatile ("smuad %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1987 return(result);
1988 }
1989
__SMUADX(uint32_t op1,uint32_t op2)1990 __STATIC_FORCEINLINE uint32_t __SMUADX (uint32_t op1, uint32_t op2)
1991 {
1992 uint32_t result;
1993
1994 __ASM volatile ("smuadx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1995 return(result);
1996 }
1997
__SMLAD(uint32_t op1,uint32_t op2,uint32_t op3)1998 __STATIC_FORCEINLINE uint32_t __SMLAD (uint32_t op1, uint32_t op2, uint32_t op3)
1999 {
2000 uint32_t result;
2001
2002 __ASM volatile ("smlad %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
2003 return(result);
2004 }
2005
__SMLADX(uint32_t op1,uint32_t op2,uint32_t op3)2006 __STATIC_FORCEINLINE uint32_t __SMLADX (uint32_t op1, uint32_t op2, uint32_t op3)
2007 {
2008 uint32_t result;
2009
2010 __ASM volatile ("smladx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
2011 return(result);
2012 }
2013
__SMLALD(uint32_t op1,uint32_t op2,uint64_t acc)2014 __STATIC_FORCEINLINE uint64_t __SMLALD (uint32_t op1, uint32_t op2, uint64_t acc)
2015 {
2016 union llreg_u{
2017 uint32_t w32[2];
2018 uint64_t w64;
2019 } llr;
2020 llr.w64 = acc;
2021
2022 #ifndef __ARMEB__ /* Little endian */
2023 __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
2024 #else /* Big endian */
2025 __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
2026 #endif
2027
2028 return(llr.w64);
2029 }
2030
__SMLALDX(uint32_t op1,uint32_t op2,uint64_t acc)2031 __STATIC_FORCEINLINE uint64_t __SMLALDX (uint32_t op1, uint32_t op2, uint64_t acc)
2032 {
2033 union llreg_u{
2034 uint32_t w32[2];
2035 uint64_t w64;
2036 } llr;
2037 llr.w64 = acc;
2038
2039 #ifndef __ARMEB__ /* Little endian */
2040 __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
2041 #else /* Big endian */
2042 __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
2043 #endif
2044
2045 return(llr.w64);
2046 }
2047
__SMUSD(uint32_t op1,uint32_t op2)2048 __STATIC_FORCEINLINE uint32_t __SMUSD (uint32_t op1, uint32_t op2)
2049 {
2050 uint32_t result;
2051
2052 __ASM volatile ("smusd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
2053 return(result);
2054 }
2055
__SMUSDX(uint32_t op1,uint32_t op2)2056 __STATIC_FORCEINLINE uint32_t __SMUSDX (uint32_t op1, uint32_t op2)
2057 {
2058 uint32_t result;
2059
2060 __ASM volatile ("smusdx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
2061 return(result);
2062 }
2063
__SMLSD(uint32_t op1,uint32_t op2,uint32_t op3)2064 __STATIC_FORCEINLINE uint32_t __SMLSD (uint32_t op1, uint32_t op2, uint32_t op3)
2065 {
2066 uint32_t result;
2067
2068 __ASM volatile ("smlsd %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
2069 return(result);
2070 }
2071
__SMLSDX(uint32_t op1,uint32_t op2,uint32_t op3)2072 __STATIC_FORCEINLINE uint32_t __SMLSDX (uint32_t op1, uint32_t op2, uint32_t op3)
2073 {
2074 uint32_t result;
2075
2076 __ASM volatile ("smlsdx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
2077 return(result);
2078 }
2079
__SMLSLD(uint32_t op1,uint32_t op2,uint64_t acc)2080 __STATIC_FORCEINLINE uint64_t __SMLSLD (uint32_t op1, uint32_t op2, uint64_t acc)
2081 {
2082 union llreg_u{
2083 uint32_t w32[2];
2084 uint64_t w64;
2085 } llr;
2086 llr.w64 = acc;
2087
2088 #ifndef __ARMEB__ /* Little endian */
2089 __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
2090 #else /* Big endian */
2091 __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
2092 #endif
2093
2094 return(llr.w64);
2095 }
2096
__SMLSLDX(uint32_t op1,uint32_t op2,uint64_t acc)2097 __STATIC_FORCEINLINE uint64_t __SMLSLDX (uint32_t op1, uint32_t op2, uint64_t acc)
2098 {
2099 union llreg_u{
2100 uint32_t w32[2];
2101 uint64_t w64;
2102 } llr;
2103 llr.w64 = acc;
2104
2105 #ifndef __ARMEB__ /* Little endian */
2106 __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
2107 #else /* Big endian */
2108 __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
2109 #endif
2110
2111 return(llr.w64);
2112 }
2113
__SEL(uint32_t op1,uint32_t op2)2114 __STATIC_FORCEINLINE uint32_t __SEL (uint32_t op1, uint32_t op2)
2115 {
2116 uint32_t result;
2117
2118 __ASM volatile ("sel %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
2119 return(result);
2120 }
2121
__QADD(int32_t op1,int32_t op2)2122 __STATIC_FORCEINLINE int32_t __QADD( int32_t op1, int32_t op2)
2123 {
2124 int32_t result;
2125
2126 __ASM volatile ("qadd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
2127 return(result);
2128 }
2129
__QSUB(int32_t op1,int32_t op2)2130 __STATIC_FORCEINLINE int32_t __QSUB( int32_t op1, int32_t op2)
2131 {
2132 int32_t result;
2133
2134 __ASM volatile ("qsub %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
2135 return(result);
2136 }
2137
2138 #if 0
2139 #define __PKHBT(ARG1,ARG2,ARG3) \
2140 ({ \
2141 uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \
2142 __ASM ("pkhbt %0, %1, %2, lsl %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \
2143 __RES; \
2144 })
2145
2146 #define __PKHTB(ARG1,ARG2,ARG3) \
2147 ({ \
2148 uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \
2149 if (ARG3 == 0) \
2150 __ASM ("pkhtb %0, %1, %2" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2) ); \
2151 else \
2152 __ASM ("pkhtb %0, %1, %2, asr %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \
2153 __RES; \
2154 })
2155 #endif
2156
2157 #define __PKHBT(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0x0000FFFFUL) | \
2158 ((((uint32_t)(ARG2)) << (ARG3)) & 0xFFFF0000UL) )
2159
2160 #define __PKHTB(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0xFFFF0000UL) | \
2161 ((((uint32_t)(ARG2)) >> (ARG3)) & 0x0000FFFFUL) )
2162
__SMMLA(int32_t op1,int32_t op2,int32_t op3)2163 __STATIC_FORCEINLINE int32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3)
2164 {
2165 int32_t result;
2166
2167 __ASM ("smmla %0, %1, %2, %3" : "=r" (result): "r" (op1), "r" (op2), "r" (op3) );
2168 return(result);
2169 }
2170
2171 #endif /* (__ARM_FEATURE_DSP == 1) */
2172 /*@} end of group CMSIS_SIMD_intrinsics */
2173
2174
2175 #pragma GCC diagnostic pop
2176
2177 #endif /* __CMSIS_GCC_H */
2178