1 /*
2  * Copyright (c) 2017-2024 IAR Systems
3  * Copyright (c) 2017-2024 Arm Limited. All rights reserved.
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  *
7  * Licensed under the Apache License, Version 2.0 (the License); you may
8  * not use this file except in compliance with the License.
9  * You may obtain a copy of the License at
10  *
11  * www.apache.org/licenses/LICENSE-2.0
12  *
13  * Unless required by applicable law or agreed to in writing, software
14  * distributed under the License is distributed on an AS IS BASIS, WITHOUT
15  * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16  * See the License for the specific language governing permissions and
17  * limitations under the License.
18  */
19 
20 /*
21  * CMSIS-Core(Generic) Compiler ICCARM (IAR Compiler for Arm) Header File
22  */
23 
24 #ifndef __CMSIS_ICCARM_H
25 #define __CMSIS_ICCARM_H
26 
27 #pragma system_include
28 
29 #if (__VER__ >= 8000000)
30   #define __ICCARM_V8 1
31 #else
32   #define __ICCARM_V8 0
33 #endif
34 
35 #define __IAR_FT _Pragma("inline=forced") __intrinsic
36 
37 #ifndef __ASM
38   #define __ASM __asm
39 #endif
40 
41 #ifndef __INLINE
42   #define __INLINE inline
43 #endif
44 
45 #ifndef   __STATIC_INLINE
46   #define __STATIC_INLINE       static inline
47 #endif
48 
49 #ifndef   __FORCEINLINE
50   #define __FORCEINLINE         _Pragma("inline=forced")
51 #endif
52 
53 #ifndef   __STATIC_FORCEINLINE
54   #define __STATIC_FORCEINLINE  __FORCEINLINE __STATIC_INLINE
55 #endif
56 
57 #ifndef   __NO_RETURN
58   #if defined(__cplusplus) && __cplusplus >= 201103L
59     #define __NO_RETURN [[noreturn]]
60   #elif defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L
61     #define __NO_RETURN _Noreturn
62   #else
63     #define __NO_RETURN _Pragma("object_attribute=__noreturn")
64   #endif
65 #endif
66 
67 #ifndef   CMSIS_DEPRECATED
68   #define CMSIS_DEPRECATED      __attribute__((deprecated))
69 #endif
70 
71 #ifndef   __USED
72   #if __ICCARM_V8
73     #define __USED __attribute__((used))
74   #else
75     #define __USED _Pragma("__root")
76   #endif
77 #endif
78 
79 #undef __WEAK                           /* undo the definition from DLib_Defaults.h */
80 #ifndef   __WEAK
81   #if __ICCARM_V8
82     #define __WEAK __attribute__((weak))
83   #else
84     #define __WEAK _Pragma("__weak")
85   #endif
86 #endif
87 
88 #ifndef   __PACKED
89   #if __ICCARM_V8
90     #define __PACKED __attribute__((packed, aligned(1)))
91   #else
92     /* Needs IAR language extensions */
93     #define __PACKED __packed
94   #endif
95 #endif
96 
97 #ifndef   __PACKED_STRUCT
98   #if __ICCARM_V8
99     #define __PACKED_STRUCT struct __attribute__((packed, aligned(1)))
100   #else
101     /* Needs IAR language extensions */
102     #define __PACKED_STRUCT __packed struct
103   #endif
104 #endif
105 
106 #ifndef   __PACKED_UNION
107   #if __ICCARM_V8
108     #define __PACKED_UNION union __attribute__((packed, aligned(1)))
109   #else
110     /* Needs IAR language extensions */
111     #define __PACKED_UNION __packed union
112   #endif
113 #endif
114 
115 #ifndef __UNALIGNED_UINT16_READ
116 #pragma language=save
117 #pragma language=extended
__iar_uint16_read(void const * ptr)118 __IAR_FT uint16_t __iar_uint16_read(void const *ptr)
119 {
120   return *(__packed uint16_t*)(ptr);
121 }
122 #pragma language=restore
123 #define __UNALIGNED_UINT16_READ(PTR) __iar_uint16_read(PTR)
124 #endif
125 
126 
127 #ifndef __UNALIGNED_UINT16_WRITE
128 #pragma language=save
129 #pragma language=extended
__iar_uint16_write(void const * ptr,uint16_t val)130 __IAR_FT void __iar_uint16_write(void const *ptr, uint16_t val)
131 {
132   *(__packed uint16_t*)(ptr) = val;;
133 }
134 #pragma language=restore
135 #define __UNALIGNED_UINT16_WRITE(PTR,VAL) __iar_uint16_write(PTR,VAL)
136 #endif
137 
138 #ifndef __UNALIGNED_UINT32_READ
139 #pragma language=save
140 #pragma language=extended
__iar_uint32_read(void const * ptr)141 __IAR_FT uint32_t __iar_uint32_read(void const *ptr)
142 {
143   return *(__packed uint32_t*)(ptr);
144 }
145 #pragma language=restore
146 #define __UNALIGNED_UINT32_READ(PTR) __iar_uint32_read(PTR)
147 #endif
148 
149 #ifndef __UNALIGNED_UINT32_WRITE
150 #pragma language=save
151 #pragma language=extended
__iar_uint32_write(void const * ptr,uint32_t val)152 __IAR_FT void __iar_uint32_write(void const *ptr, uint32_t val)
153 {
154   *(__packed uint32_t*)(ptr) = val;;
155 }
156 #pragma language=restore
157 #define __UNALIGNED_UINT32_WRITE(PTR,VAL) __iar_uint32_write(PTR,VAL)
158 #endif
159 
160 #ifndef __ALIGNED
161   #if __ICCARM_V8
162     #define __ALIGNED(x) __attribute__((aligned(x)))
163   #elif (__VER__ >= 7080000)
164     /* Needs IAR language extensions */
165     #define __ALIGNED(x) __attribute__((aligned(x)))
166   #else
167     #warning No compiler specific solution for __ALIGNED.__ALIGNED is ignored.
168     #define __ALIGNED(x)
169   #endif
170 #endif
171 
172 #ifndef   __RESTRICT
173   #if __ICCARM_V8
174     #define __RESTRICT            __restrict
175   #else
176     /* Needs IAR language extensions */
177     #define __RESTRICT            restrict
178   #endif
179 #endif
180 
181 #ifndef   __COMPILER_BARRIER
182   #define __COMPILER_BARRIER() __ASM volatile("":::"memory")
183 #endif
184 
185 #ifndef __NO_INIT
186   #define __NO_INIT __attribute__ ((section (".noinit")))
187 #endif
188 
189 #ifndef __ALIAS
190   #define __ALIAS(x) __attribute__ ((alias(x)))
191 #endif
192 
193 #if defined(__CLZ)
194   #undef __CLZ
195 #endif
196 #if defined(__REVSH)
197   #undef __REVSH
198 #endif
199 #if defined(__RBIT)
200   #undef __RBIT
201 #endif
202 #if defined(__SSAT)
203   #undef __SSAT
204 #endif
205 #if defined(__USAT)
206   #undef __USAT
207 #endif
208 
209 #include "iccarm_builtin.h"
210 
211 #define __disable_irq       __iar_builtin_disable_interrupt
212 #define __enable_irq        __iar_builtin_enable_interrupt
213 #define __arm_rsr           __iar_builtin_rsr
214 #define __arm_wsr           __iar_builtin_wsr
215 
216 #define __NOP     __iar_builtin_no_operation
217 #define __WFI     __iar_builtin_WFI
218 #define __WFE     __iar_builtin_WFE
219 #define __ISB     __iar_builtin_ISB
220 #define __SEV     __iar_builtin_SEV
221 #define __DSB     __iar_builtin_DSB
222 #define __DMB     __iar_builtin_DMB
223 #define __REV     __iar_builtin_REV
224 #define __REV16   __iar_builtin_REV16
225 #define __ROR     __iar_builtin_ROR
226 #define __RBIT    __iar_builtin_RBIT
227 #define __CLZ     __iar_builtin_CLZ
228 
__REVSH(int16_t val)229 __IAR_FT int16_t __REVSH(int16_t val)
230 {
231   return (int16_t) __iar_builtin_REVSH(val);
232 }
233 
234 
235 #define __BKPT(value)    __asm volatile ("BKPT     %0" : : "i"(value))
236 
237 #if (__ARM_FEATURE_LDREX >= 1)
238 
239 /*
240  * __iar_builtin_CLREX can be reordered w.r.t. STREX during high optimizations.
241  * As a workaround we use inline assembly and a memory barrier.
242  * (IAR issue EWARM-11901)
243  * Fixed in EWARM 9.50.i2
244  */
245 
__CLREX()246 __IAR_FT void __CLREX() {
247   __ASM volatile("CLREX" ::: "memory");
248 }
249 
250 
251 #define __LDREXB  __iar_builtin_LDREXB
252 #define __STREXB  __iar_builtin_STREXB
253 
254 #endif /* (__ARM_FEATURE_LDREX >= 1) */
255 
256 #if (__ARM_FEATURE_LDREX >= 2)
257 
258 #define __LDREXH  __iar_builtin_LDREXH
259 #define __STREXH  __iar_builtin_STREXH
260 
261 #endif /* (__ARM_FEATURE_LDREX >= 2) */
262 
263 #if (__ARM_FEATURE_LDREX >= 4)
264 
265 #define __LDREXW  __iar_builtin_LDREX
266 #define __STREXW  __iar_builtin_STREX
267 
268 #endif /* (__ARM_FEATURE_LDREX >= 4) */
269 
270 #if ((__ARM_FEATURE_SAT    >= 1) && \
271      (__ARM_ARCH_ISA_THUMB >= 2)    )
272 /* __ARM_FEATURE_SAT is wrong for Armv8-M Baseline devices */
273 /**
274   \brief   Signed Saturate
275   \details Saturates a signed value.
276   \param [in]  value  Value to be saturated
277   \param [in]    sat  Bit position to saturate to (1..32)
278   \return             Saturated value
279  */
280 #define __SSAT    __iar_builtin_SSAT
281 
282 
283 /**
284   \brief   Unsigned Saturate
285   \details Saturates an unsigned value.
286   \param [in]  value  Value to be saturated
287   \param [in]    sat  Bit position to saturate to (0..31)
288   \return             Saturated value
289 */
290 #define __USAT    __iar_builtin_USAT
291 
292 #else /* (__ARM_FEATURE_SAT >= 1) */
293 /**
294   \brief   Signed Saturate
295   \details Saturates a signed value.
296   \param [in]  value  Value to be saturated
297   \param [in]    sat  Bit position to saturate to (1..32)
298   \return             Saturated value
299  */
__SSAT(int32_t val,uint32_t sat)300 __STATIC_FORCEINLINE int32_t __SSAT(int32_t val, uint32_t sat)
301 {
302   if ((sat >= 1U) && (sat <= 32U))
303   {
304     const int32_t max = (int32_t)((1U << (sat - 1U)) - 1U);
305     const int32_t min = -1 - max ;
306     if (val > max)
307     {
308       return (max);
309     }
310     else if (val < min)
311     {
312       return (min);
313     }
314   }
315   return (val);
316 }
317 
318 
319 /**
320   \brief   Unsigned Saturate
321   \details Saturates an unsigned value.
322   \param [in]  value  Value to be saturated
323   \param [in]    sat  Bit position to saturate to (0..31)
324   \return             Saturated value
325  */
__USAT(int32_t val,uint32_t sat)326 __STATIC_FORCEINLINE uint32_t __USAT(int32_t val, uint32_t sat)
327 {
328   if (sat <= 31U)
329   {
330     const uint32_t max = ((1U << sat) - 1U);
331     if (val > (int32_t)max)
332     {
333       return (max);
334     }
335     else if (val < 0)
336     {
337       return (0U);
338     }
339   }
340   return ((uint32_t)val);
341 }
342 #endif /* (__ARM_FEATURE_SAT >= 1) */
343 
344 #if (__ARM_ARCH_ISA_THUMB >= 2)
345 
346 #define __RRX     __iar_builtin_RRX
347 
__LDRBT(volatile uint8_t * addr)348 __IAR_FT uint8_t __LDRBT(volatile uint8_t *addr)
349 {
350   uint32_t res;
351   __ASM volatile ("LDRBT %0, [%1]" : "=r" (res) : "r" (addr) : "memory");
352   return ((uint8_t)res);
353 }
354 
__LDRHT(volatile uint16_t * addr)355 __IAR_FT uint16_t __LDRHT(volatile uint16_t *addr)
356 {
357   uint32_t res;
358   __ASM volatile ("LDRHT %0, [%1]" : "=r" (res) : "r" (addr) : "memory");
359   return ((uint16_t)res);
360 }
361 
__LDRT(volatile uint32_t * addr)362 __IAR_FT uint32_t __LDRT(volatile uint32_t *addr)
363 {
364   uint32_t res;
365   __ASM volatile ("LDRT %0, [%1]" : "=r" (res) : "r" (addr) : "memory");
366   return res;
367 }
368 
__STRBT(uint8_t value,volatile uint8_t * addr)369 __IAR_FT void __STRBT(uint8_t value, volatile uint8_t *addr)
370 {
371   __ASM volatile ("STRBT %1, [%0]" : : "r" (addr), "r" ((uint32_t)value) : "memory");
372 }
373 
__STRHT(uint16_t value,volatile uint16_t * addr)374 __IAR_FT void __STRHT(uint16_t value, volatile uint16_t *addr)
375 {
376   __ASM volatile ("STRHT %1, [%0]" : : "r" (addr), "r" ((uint32_t)value) : "memory");
377 }
378 
__STRT(uint32_t value,volatile uint32_t * addr)379 __IAR_FT void __STRT(uint32_t value, volatile uint32_t *addr)
380 {
381   __ASM volatile ("STRT %1, [%0]" : : "r" (addr), "r" (value) : "memory");
382 }
383 #endif
384 
385 #if (defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1))
386   #define __SADD8   __iar_builtin_SADD8
387   #define __QADD8   __iar_builtin_QADD8
388   #define __SHADD8  __iar_builtin_SHADD8
389   #define __UADD8   __iar_builtin_UADD8
390   #define __UQADD8  __iar_builtin_UQADD8
391   #define __UHADD8  __iar_builtin_UHADD8
392   #define __SSUB8   __iar_builtin_SSUB8
393   #define __QSUB8   __iar_builtin_QSUB8
394   #define __SHSUB8  __iar_builtin_SHSUB8
395   #define __USUB8   __iar_builtin_USUB8
396   #define __UQSUB8  __iar_builtin_UQSUB8
397   #define __UHSUB8  __iar_builtin_UHSUB8
398   #define __SADD16  __iar_builtin_SADD16
399   #define __QADD16  __iar_builtin_QADD16
400   #define __SHADD16 __iar_builtin_SHADD16
401   #define __UADD16  __iar_builtin_UADD16
402   #define __UQADD16 __iar_builtin_UQADD16
403   #define __UHADD16 __iar_builtin_UHADD16
404   #define __SSUB16  __iar_builtin_SSUB16
405   #define __QSUB16  __iar_builtin_QSUB16
406   #define __SHSUB16 __iar_builtin_SHSUB16
407   #define __USUB16  __iar_builtin_USUB16
408   #define __UQSUB16 __iar_builtin_UQSUB16
409   #define __UHSUB16 __iar_builtin_UHSUB16
410   #define __SASX    __iar_builtin_SASX
411   #define __QASX    __iar_builtin_QASX
412   #define __SHASX   __iar_builtin_SHASX
413   #define __UASX    __iar_builtin_UASX
414   #define __UQASX   __iar_builtin_UQASX
415   #define __UHASX   __iar_builtin_UHASX
416   #define __SSAX    __iar_builtin_SSAX
417   #define __QSAX    __iar_builtin_QSAX
418   #define __SHSAX   __iar_builtin_SHSAX
419   #define __USAX    __iar_builtin_USAX
420   #define __UQSAX   __iar_builtin_UQSAX
421   #define __UHSAX   __iar_builtin_UHSAX
422   #define __USAD8   __iar_builtin_USAD8
423   #define __USADA8  __iar_builtin_USADA8
424   #define __SSAT16  __iar_builtin_SSAT16
425   #define __USAT16  __iar_builtin_USAT16
426   #define __UXTB16  __iar_builtin_UXTB16
427   #define __UXTAB16 __iar_builtin_UXTAB16
428   #define __SXTB16  __iar_builtin_SXTB16
429   #define __SXTAB16 __iar_builtin_SXTAB16
430   #define __SMUAD   __iar_builtin_SMUAD
431   #define __SMUADX  __iar_builtin_SMUADX
432   #define __SMMLA   __iar_builtin_SMMLA
433   #define __SMLAD   __iar_builtin_SMLAD
434   #define __SMLADX  __iar_builtin_SMLADX
435   #define __SMLALD  __iar_builtin_SMLALD
436   #define __SMLALDX __iar_builtin_SMLALDX
437   #define __SMUSD   __iar_builtin_SMUSD
438   #define __SMUSDX  __iar_builtin_SMUSDX
439   #define __SMLSD   __iar_builtin_SMLSD
440   #define __SMLSDX  __iar_builtin_SMLSDX
441   #define __SMLSLD  __iar_builtin_SMLSLD
442   #define __SMLSLDX __iar_builtin_SMLSLDX
443   #define __SEL     __iar_builtin_SEL
444   #define __QADD    __iar_builtin_QADD
445   #define __QSUB    __iar_builtin_QSUB
446   #define __PKHBT   __iar_builtin_PKHBT
447   #define __PKHTB   __iar_builtin_PKHTB
448 
449   /* Note, these are suboptimal but I lack compiler features to express this */
450 
451   #define __SXTB16_RORn(ARG1, ARG2) __SXTB16(__ROR(ARG1, ARG2))
452   #define __SXTAB16_RORn(ARG1, ARG2, ARG3) __SXTAB16(ARG1, __ROR(ARG2, ARG3))
453 
454 #endif /* (defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1)) */
455 
456 #if (defined (__ARM_FP)      && (__ARM_FP >= 1))
457   #define __get_FPSCR()             (__arm_rsr("FPSCR"))
458   #define __set_FPSCR(VALUE)        (__arm_wsr("FPSCR", (VALUE)))
459 #else
460   #define __get_FPSCR()             ( 0 )
461   #define __set_FPSCR(VALUE)        ((void)VALUE)
462 #endif
463 
464 #if (defined(__ARM_ARCH_ISA_THUMB) && __ARM_ARCH_ISA_THUMB >= 2)
465 // This is not really fault_irq on Cortex-not-M. However
466 // there seems to be code that assumes this.
__disable_fault_irq()467   __IAR_FT void __disable_fault_irq()
468   {
469     __ASM volatile ("CPSID F" ::: "memory");
470   }
471 
__enable_fault_irq()472   __IAR_FT void __enable_fault_irq()
473   {
474     __ASM volatile ("CPSIE F" ::: "memory");
475   }
476 #endif
477 
478 #if (__ARM_ARCH >= 8)
479 
__LDAB(volatile uint8_t * ptr)480   __IAR_FT uint8_t __LDAB(volatile uint8_t *ptr)
481   {
482     uint32_t res;
483     __ASM volatile ("LDAB %0, [%1]" : "=r" (res) : "r" (ptr) : "memory");
484     return ((uint8_t)res);
485   }
486 
__LDAH(volatile uint16_t * ptr)487   __IAR_FT uint16_t __LDAH(volatile uint16_t *ptr)
488   {
489     uint32_t res;
490     __ASM volatile ("LDAH %0, [%1]" : "=r" (res) : "r" (ptr) : "memory");
491     return ((uint16_t)res);
492   }
493 
__LDA(volatile uint32_t * ptr)494   __IAR_FT uint32_t __LDA(volatile uint32_t *ptr)
495   {
496     uint32_t res;
497     __ASM volatile ("LDA %0, [%1]" : "=r" (res) : "r" (ptr) : "memory");
498     return res;
499   }
500 
__STLB(uint8_t value,volatile uint8_t * ptr)501   __IAR_FT void __STLB(uint8_t value, volatile uint8_t *ptr)
502   {
503     __ASM volatile ("STLB %1, [%0]" :: "r" (ptr), "r" (value) : "memory");
504   }
505 
__STLH(uint16_t value,volatile uint16_t * ptr)506   __IAR_FT void __STLH(uint16_t value, volatile uint16_t *ptr)
507   {
508     __ASM volatile ("STLH %1, [%0]" :: "r" (ptr), "r" (value) : "memory");
509   }
510 
__STL(uint32_t value,volatile uint32_t * ptr)511   __IAR_FT void __STL(uint32_t value, volatile uint32_t *ptr)
512   {
513     __ASM volatile ("STL %1, [%0]" :: "r" (ptr), "r" (value) : "memory");
514   }
515 
__LDAEXB(volatile uint8_t * ptr)516   __IAR_FT uint8_t __LDAEXB(volatile uint8_t *ptr)
517   {
518     uint32_t res;
519     __ASM volatile ("LDAEXB %0, [%1]" : "=r" (res) : "r" (ptr) : "memory");
520     return ((uint8_t)res);
521   }
522 
__LDAEXH(volatile uint16_t * ptr)523   __IAR_FT uint16_t __LDAEXH(volatile uint16_t *ptr)
524   {
525     uint32_t res;
526     __ASM volatile ("LDAEXH %0, [%1]" : "=r" (res) : "r" (ptr) : "memory");
527     return ((uint16_t)res);
528   }
529 
__LDAEX(volatile uint32_t * ptr)530   __IAR_FT uint32_t __LDAEX(volatile uint32_t *ptr)
531   {
532     uint32_t res;
533     __ASM volatile ("LDAEX %0, [%1]" : "=r" (res) : "r" (ptr) : "memory");
534     return res;
535   }
536 
__STLEXB(uint8_t value,volatile uint8_t * ptr)537   __IAR_FT uint32_t __STLEXB(uint8_t value, volatile uint8_t *ptr)
538   {
539     uint32_t res;
540     __ASM volatile ("STLEXB %0, %2, [%1]" : "=&r" (res) : "r" (ptr), "r" (value) : "memory");
541     return res;
542   }
543 
__STLEXH(uint16_t value,volatile uint16_t * ptr)544   __IAR_FT uint32_t __STLEXH(uint16_t value, volatile uint16_t *ptr)
545   {
546     uint32_t res;
547     __ASM volatile ("STLEXH %0, %2, [%1]" : "=&r" (res) : "r" (ptr), "r" (value) : "memory");
548     return res;
549   }
550 
__STLEX(uint32_t value,volatile uint32_t * ptr)551   __IAR_FT uint32_t __STLEX(uint32_t value, volatile uint32_t *ptr)
552   {
553     uint32_t res;
554     __ASM volatile ("STLEX %0, %2, [%1]" : "=&r" (res) : "r" (ptr), "r" (value) : "memory");
555     return res;
556   }
557 
558 #endif /* __ARM_ARCH >= 8 */
559 
560   #if __ARM_ARCH_PROFILE == 'A'
561     #include "a-profile/cmsis_iccarm_a.h"
562   #elif __ARM_ARCH_PROFILE == 'R'
563     #include "r-profile/cmsis_iccarm_r.h"
564   #elif __ARM_ARCH_PROFILE == 'M'
565     #include "m-profile/cmsis_iccarm_m.h"
566   #else
567     #error "Unknown Arm architecture profile"
568   #endif
569 
570 
571 #endif
572