1 /**************************************************************************//**
2 * @file cmsis_iccarm.h
3 * @brief CMSIS compiler ICCARM (IAR Compiler for Arm) header file
4 * @version V5.0.7
5 * @date 15. May 2019
6 ******************************************************************************/
7
8 //------------------------------------------------------------------------------
9 //
10 // Copyright (c) 2017-2018 IAR Systems
11 // Copyright (c) 2018-2019 Arm Limited
12 //
13 // SPDX-License-Identifier: Apache-2.0
14 //
15 // Licensed under the Apache License, Version 2.0 (the "License")
16 // you may not use this file except in compliance with the License.
17 // You may obtain a copy of the License at
18 // http://www.apache.org/licenses/LICENSE-2.0
19 //
20 // Unless required by applicable law or agreed to in writing, software
21 // distributed under the License is distributed on an "AS IS" BASIS,
22 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
23 // See the License for the specific language governing permissions and
24 // limitations under the License.
25 //
26 //------------------------------------------------------------------------------
27
28
29 #ifndef __CMSIS_ICCARM_H__
30 #define __CMSIS_ICCARM_H__
31
32 #ifndef __ICCARM__
33 #error This file should only be compiled by ICCARM
34 #endif
35
36 #pragma system_include
37
38 #define __IAR_FT _Pragma("inline=forced") __intrinsic
39
40 #if (__VER__ >= 8000000)
41 #define __ICCARM_V8 1
42 #else
43 #define __ICCARM_V8 0
44 #endif
45
46 #pragma language=extended
47
48 #ifndef __ALIGNED
49 #if __ICCARM_V8
50 #define __ALIGNED(x) __attribute__((aligned(x)))
51 #elif (__VER__ >= 7080000)
52 /* Needs IAR language extensions */
53 #define __ALIGNED(x) __attribute__((aligned(x)))
54 #else
55 #warning No compiler specific solution for __ALIGNED.__ALIGNED is ignored.
56 #define __ALIGNED(x)
57 #endif
58 #endif
59
60
61 /* Define compiler macros for CPU architecture, used in CMSIS 5.
62 */
63 #if __ARM_ARCH_7A__
64 /* Macro already defined */
65 #else
66 #if defined(__ARM7A__)
67 #define __ARM_ARCH_7A__ 1
68 #endif
69 #endif
70
71 #ifndef __ASM
72 #define __ASM __asm
73 #endif
74
75 #ifndef __COMPILER_BARRIER
76 #define __COMPILER_BARRIER() __ASM volatile("":::"memory")
77 #endif
78
79 #ifndef __INLINE
80 #define __INLINE inline
81 #endif
82
83 #ifndef __NO_RETURN
84 #if __ICCARM_V8
85 #define __NO_RETURN __attribute__((__noreturn__))
86 #else
87 #define __NO_RETURN _Pragma("object_attribute=__noreturn")
88 #endif
89 #endif
90
91 #ifndef __PACKED
92 /* Needs IAR language extensions */
93 #if __ICCARM_V8
94 #define __PACKED __attribute__((packed, aligned(1)))
95 #else
96 #define __PACKED __packed
97 #endif
98 #endif
99
100 #ifndef __PACKED_STRUCT
101 /* Needs IAR language extensions */
102 #if __ICCARM_V8
103 #define __PACKED_STRUCT struct __attribute__((packed, aligned(1)))
104 #else
105 #define __PACKED_STRUCT __packed struct
106 #endif
107 #endif
108
109 #ifndef __PACKED_UNION
110 /* Needs IAR language extensions */
111 #if __ICCARM_V8
112 #define __PACKED_UNION union __attribute__((packed, aligned(1)))
113 #else
114 #define __PACKED_UNION __packed union
115 #endif
116 #endif
117
118 #ifndef __RESTRICT
119 #if __ICCARM_V8
120 #define __RESTRICT __restrict
121 #else
122 /* Needs IAR language extensions */
123 #define __RESTRICT restrict
124 #endif
125 #endif
126
127 #ifndef __STATIC_INLINE
128 #define __STATIC_INLINE static inline
129 #endif
130
131 #ifndef __FORCEINLINE
132 #define __FORCEINLINE _Pragma("inline=forced")
133 #endif
134
135 #ifndef __STATIC_FORCEINLINE
136 #define __STATIC_FORCEINLINE __FORCEINLINE __STATIC_INLINE
137 #endif
138
139 #ifndef CMSIS_DEPRECATED
140 #define CMSIS_DEPRECATED __attribute__((deprecated))
141 #endif
142
143 #ifndef __UNALIGNED_UINT16_READ
144 #pragma language=save
145 #pragma language=extended
__iar_uint16_read(void const * ptr)146 __IAR_FT uint16_t __iar_uint16_read(void const *ptr)
147 {
148 return *(__packed uint16_t*)(ptr);
149 }
150 #pragma language=restore
151 #define __UNALIGNED_UINT16_READ(PTR) __iar_uint16_read(PTR)
152 #endif
153
154
155 #ifndef __UNALIGNED_UINT16_WRITE
156 #pragma language=save
157 #pragma language=extended
__iar_uint16_write(void const * ptr,uint16_t val)158 __IAR_FT void __iar_uint16_write(void const *ptr, uint16_t val)
159 {
160 *(__packed uint16_t*)(ptr) = val;;
161 }
162 #pragma language=restore
163 #define __UNALIGNED_UINT16_WRITE(PTR,VAL) __iar_uint16_write(PTR,VAL)
164 #endif
165
166 #ifndef __UNALIGNED_UINT32_READ
167 #pragma language=save
168 #pragma language=extended
__iar_uint32_read(void const * ptr)169 __IAR_FT uint32_t __iar_uint32_read(void const *ptr)
170 {
171 return *(__packed uint32_t*)(ptr);
172 }
173 #pragma language=restore
174 #define __UNALIGNED_UINT32_READ(PTR) __iar_uint32_read(PTR)
175 #endif
176
177 #ifndef __UNALIGNED_UINT32_WRITE
178 #pragma language=save
179 #pragma language=extended
__iar_uint32_write(void const * ptr,uint32_t val)180 __IAR_FT void __iar_uint32_write(void const *ptr, uint32_t val)
181 {
182 *(__packed uint32_t*)(ptr) = val;;
183 }
184 #pragma language=restore
185 #define __UNALIGNED_UINT32_WRITE(PTR,VAL) __iar_uint32_write(PTR,VAL)
186 #endif
187
188 #if 0
189 #ifndef __UNALIGNED_UINT32 /* deprecated */
190 #pragma language=save
191 #pragma language=extended
192 __packed struct __iar_u32 { uint32_t v; };
193 #pragma language=restore
194 #define __UNALIGNED_UINT32(PTR) (((struct __iar_u32 *)(PTR))->v)
195 #endif
196 #endif
197
198 #ifndef __USED
199 #if __ICCARM_V8
200 #define __USED __attribute__((used))
201 #else
202 #define __USED _Pragma("__root")
203 #endif
204 #endif
205
206 #ifndef __WEAK
207 #if __ICCARM_V8
208 #define __WEAK __attribute__((weak))
209 #else
210 #define __WEAK _Pragma("__weak")
211 #endif
212 #endif
213
214
215 #ifndef __ICCARM_INTRINSICS_VERSION__
216 #define __ICCARM_INTRINSICS_VERSION__ 0
217 #endif
218
219 #if __ICCARM_INTRINSICS_VERSION__ == 2
220
221 #if defined(__CLZ)
222 #undef __CLZ
223 #endif
224 #if defined(__REVSH)
225 #undef __REVSH
226 #endif
227 #if defined(__RBIT)
228 #undef __RBIT
229 #endif
230 #if defined(__SSAT)
231 #undef __SSAT
232 #endif
233 #if defined(__USAT)
234 #undef __USAT
235 #endif
236
237 #include "iccarm_builtin.h"
238
239 #define __enable_irq __iar_builtin_enable_interrupt
240 #define __disable_irq __iar_builtin_disable_interrupt
241 #define __enable_fault_irq __iar_builtin_enable_fiq
242 #define __disable_fault_irq __iar_builtin_disable_fiq
243 #define __arm_rsr __iar_builtin_rsr
244 #define __arm_wsr __iar_builtin_wsr
245
246 #if __FPU_PRESENT
247 #define __get_FPSCR() (__arm_rsr("FPSCR"))
248 #else
249 #define __get_FPSCR() ( 0 )
250 #endif
251
252 #define __set_FPSCR(VALUE) (__arm_wsr("FPSCR", VALUE))
253
254 #define __get_CPSR() (__arm_rsr("CPSR"))
255 #define __get_mode() (__get_CPSR() & 0x1FU)
256
257 #define __set_CPSR(VALUE) (__arm_wsr("CPSR", (VALUE)))
258 #define __set_mode(VALUE) (__arm_wsr("CPSR_c", (VALUE)))
259
260
261 #define __get_FPEXC() (__arm_rsr("FPEXC"))
262 #define __set_FPEXC(VALUE) (__arm_wsr("FPEXC", VALUE))
263
264 #define __get_CP(cp, op1, RT, CRn, CRm, op2) \
265 ((RT) = __arm_rsr("p" # cp ":" # op1 ":c" # CRn ":c" # CRm ":" # op2))
266
267 #define __set_CP(cp, op1, RT, CRn, CRm, op2) \
268 (__arm_wsr("p" # cp ":" # op1 ":c" # CRn ":c" # CRm ":" # op2, (RT)))
269
270 #define __get_CP64(cp, op1, Rt, CRm) \
271 __ASM volatile("MRRC p" # cp ", " # op1 ", %Q0, %R0, c" # CRm : "=r" (Rt) : : "memory" )
272
273 #define __set_CP64(cp, op1, Rt, CRm) \
274 __ASM volatile("MCRR p" # cp ", " # op1 ", %Q0, %R0, c" # CRm : : "r" (Rt) : "memory" )
275
276 #include "cmsis_cp15.h"
277
278 #define __NOP __iar_builtin_no_operation
279
280 #define __CLZ __iar_builtin_CLZ
281 #define __CLREX __iar_builtin_CLREX
282
283 #define __DMB __iar_builtin_DMB
284 #define __DSB __iar_builtin_DSB
285 #define __ISB __iar_builtin_ISB
286
287 #define __LDREXB __iar_builtin_LDREXB
288 #define __LDREXH __iar_builtin_LDREXH
289 #define __LDREXW __iar_builtin_LDREX
290
291 #define __RBIT __iar_builtin_RBIT
292 #define __REV __iar_builtin_REV
293 #define __REV16 __iar_builtin_REV16
294
__REVSH(int16_t val)295 __IAR_FT int16_t __REVSH(int16_t val)
296 {
297 return (int16_t) __iar_builtin_REVSH(val);
298 }
299
300 #define __ROR __iar_builtin_ROR
301 #define __RRX __iar_builtin_RRX
302
303 #define __SEV __iar_builtin_SEV
304
305 #define __SSAT __iar_builtin_SSAT
306
307 #define __STREXB __iar_builtin_STREXB
308 #define __STREXH __iar_builtin_STREXH
309 #define __STREXW __iar_builtin_STREX
310
311 #define __USAT __iar_builtin_USAT
312
313 #define __WFE __iar_builtin_WFE
314 #define __WFI __iar_builtin_WFI
315
316 #define __SADD8 __iar_builtin_SADD8
317 #define __QADD8 __iar_builtin_QADD8
318 #define __SHADD8 __iar_builtin_SHADD8
319 #define __UADD8 __iar_builtin_UADD8
320 #define __UQADD8 __iar_builtin_UQADD8
321 #define __UHADD8 __iar_builtin_UHADD8
322 #define __SSUB8 __iar_builtin_SSUB8
323 #define __QSUB8 __iar_builtin_QSUB8
324 #define __SHSUB8 __iar_builtin_SHSUB8
325 #define __USUB8 __iar_builtin_USUB8
326 #define __UQSUB8 __iar_builtin_UQSUB8
327 #define __UHSUB8 __iar_builtin_UHSUB8
328 #define __SADD16 __iar_builtin_SADD16
329 #define __QADD16 __iar_builtin_QADD16
330 #define __SHADD16 __iar_builtin_SHADD16
331 #define __UADD16 __iar_builtin_UADD16
332 #define __UQADD16 __iar_builtin_UQADD16
333 #define __UHADD16 __iar_builtin_UHADD16
334 #define __SSUB16 __iar_builtin_SSUB16
335 #define __QSUB16 __iar_builtin_QSUB16
336 #define __SHSUB16 __iar_builtin_SHSUB16
337 #define __USUB16 __iar_builtin_USUB16
338 #define __UQSUB16 __iar_builtin_UQSUB16
339 #define __UHSUB16 __iar_builtin_UHSUB16
340 #define __SASX __iar_builtin_SASX
341 #define __QASX __iar_builtin_QASX
342 #define __SHASX __iar_builtin_SHASX
343 #define __UASX __iar_builtin_UASX
344 #define __UQASX __iar_builtin_UQASX
345 #define __UHASX __iar_builtin_UHASX
346 #define __SSAX __iar_builtin_SSAX
347 #define __QSAX __iar_builtin_QSAX
348 #define __SHSAX __iar_builtin_SHSAX
349 #define __USAX __iar_builtin_USAX
350 #define __UQSAX __iar_builtin_UQSAX
351 #define __UHSAX __iar_builtin_UHSAX
352 #define __USAD8 __iar_builtin_USAD8
353 #define __USADA8 __iar_builtin_USADA8
354 #define __SSAT16 __iar_builtin_SSAT16
355 #define __USAT16 __iar_builtin_USAT16
356 #define __UXTB16 __iar_builtin_UXTB16
357 #define __UXTAB16 __iar_builtin_UXTAB16
358 #define __SXTB16 __iar_builtin_SXTB16
359 #define __SXTAB16 __iar_builtin_SXTAB16
360 #define __SMUAD __iar_builtin_SMUAD
361 #define __SMUADX __iar_builtin_SMUADX
362 #define __SMMLA __iar_builtin_SMMLA
363 #define __SMLAD __iar_builtin_SMLAD
364 #define __SMLADX __iar_builtin_SMLADX
365 #define __SMLALD __iar_builtin_SMLALD
366 #define __SMLALDX __iar_builtin_SMLALDX
367 #define __SMUSD __iar_builtin_SMUSD
368 #define __SMUSDX __iar_builtin_SMUSDX
369 #define __SMLSD __iar_builtin_SMLSD
370 #define __SMLSDX __iar_builtin_SMLSDX
371 #define __SMLSLD __iar_builtin_SMLSLD
372 #define __SMLSLDX __iar_builtin_SMLSLDX
373 #define __SEL __iar_builtin_SEL
374 #define __QADD __iar_builtin_QADD
375 #define __QSUB __iar_builtin_QSUB
376 #define __PKHBT __iar_builtin_PKHBT
377 #define __PKHTB __iar_builtin_PKHTB
378
379 #else /* __ICCARM_INTRINSICS_VERSION__ == 2 */
380
381 #if !__FPU_PRESENT
382 #define __get_FPSCR __cmsis_iar_get_FPSR_not_active
383 #endif
384
385 #ifdef __INTRINSICS_INCLUDED
386 #error intrinsics.h is already included previously!
387 #endif
388
389 #include <intrinsics.h>
390
391 #if !__FPU_PRESENT
392 #define __get_FPSCR() (0)
393 #endif
394
395 #pragma diag_suppress=Pe940
396 #pragma diag_suppress=Pe177
397
398 #define __enable_irq __enable_interrupt
399 #define __disable_irq __disable_interrupt
400 #define __enable_fault_irq __enable_fiq
401 #define __disable_fault_irq __disable_fiq
402 #define __NOP __no_operation
403
404 #define __get_xPSR __get_PSR
405
__set_mode(uint32_t mode)406 __IAR_FT void __set_mode(uint32_t mode)
407 {
408 __ASM volatile("MSR cpsr_c, %0" : : "r" (mode) : "memory");
409 }
410
__LDREXW(uint32_t volatile * ptr)411 __IAR_FT uint32_t __LDREXW(uint32_t volatile *ptr)
412 {
413 return __LDREX((unsigned long *)ptr);
414 }
415
__STREXW(uint32_t value,uint32_t volatile * ptr)416 __IAR_FT uint32_t __STREXW(uint32_t value, uint32_t volatile *ptr)
417 {
418 return __STREX(value, (unsigned long *)ptr);
419 }
420
421
__RRX(uint32_t value)422 __IAR_FT uint32_t __RRX(uint32_t value)
423 {
424 uint32_t result;
425 __ASM("RRX %0, %1" : "=r"(result) : "r" (value) : "cc");
426 return(result);
427 }
428
429
__ROR(uint32_t op1,uint32_t op2)430 __IAR_FT uint32_t __ROR(uint32_t op1, uint32_t op2)
431 {
432 return (op1 >> op2) | (op1 << ((sizeof(op1)*8)-op2));
433 }
434
__get_FPEXC(void)435 __IAR_FT uint32_t __get_FPEXC(void)
436 {
437 #if (__FPU_PRESENT == 1)
438 uint32_t result;
439 __ASM volatile("VMRS %0, fpexc" : "=r" (result) : : "memory");
440 return(result);
441 #else
442 return(0);
443 #endif
444 }
445
__set_FPEXC(uint32_t fpexc)446 __IAR_FT void __set_FPEXC(uint32_t fpexc)
447 {
448 #if (__FPU_PRESENT == 1)
449 __ASM volatile ("VMSR fpexc, %0" : : "r" (fpexc) : "memory");
450 #endif
451 }
452
453
454 #define __get_CP(cp, op1, Rt, CRn, CRm, op2) \
455 __ASM volatile("MRC p" # cp ", " # op1 ", %0, c" # CRn ", c" # CRm ", " # op2 : "=r" (Rt) : : "memory" )
456 #define __set_CP(cp, op1, Rt, CRn, CRm, op2) \
457 __ASM volatile("MCR p" # cp ", " # op1 ", %0, c" # CRn ", c" # CRm ", " # op2 : : "r" (Rt) : "memory" )
458 #define __get_CP64(cp, op1, Rt, CRm) \
459 __ASM volatile("MRRC p" # cp ", " # op1 ", %Q0, %R0, c" # CRm : "=r" (Rt) : : "memory" )
460 #define __set_CP64(cp, op1, Rt, CRm) \
461 __ASM volatile("MCRR p" # cp ", " # op1 ", %Q0, %R0, c" # CRm : : "r" (Rt) : "memory" )
462
463 #include "cmsis_cp15.h"
464
465 #endif /* __ICCARM_INTRINSICS_VERSION__ == 2 */
466
467 #define __BKPT(value) __asm volatile ("BKPT %0" : : "i"(value))
468
469
__get_SP_usr(void)470 __IAR_FT uint32_t __get_SP_usr(void)
471 {
472 uint32_t cpsr;
473 uint32_t result;
474 __ASM volatile(
475 "MRS %0, cpsr \n"
476 "CPS #0x1F \n" // no effect in USR mode
477 "MOV %1, sp \n"
478 "MSR cpsr_c, %2 \n" // no effect in USR mode
479 "ISB" : "=r"(cpsr), "=r"(result) : "r"(cpsr) : "memory"
480 );
481 return result;
482 }
483
__set_SP_usr(uint32_t topOfProcStack)484 __IAR_FT void __set_SP_usr(uint32_t topOfProcStack)
485 {
486 uint32_t cpsr;
487 __ASM volatile(
488 "MRS %0, cpsr \n"
489 "CPS #0x1F \n" // no effect in USR mode
490 "MOV sp, %1 \n"
491 "MSR cpsr_c, %2 \n" // no effect in USR mode
492 "ISB" : "=r"(cpsr) : "r" (topOfProcStack), "r"(cpsr) : "memory"
493 );
494 }
495
496 #define __get_mode() (__get_CPSR() & 0x1FU)
497
498 __STATIC_INLINE
__FPU_Enable(void)499 void __FPU_Enable(void)
500 {
501 __ASM volatile(
502 //Permit access to VFP/NEON, registers by modifying CPACR
503 " MRC p15,0,R1,c1,c0,2 \n"
504 " ORR R1,R1,#0x00F00000 \n"
505 " MCR p15,0,R1,c1,c0,2 \n"
506
507 //Ensure that subsequent instructions occur in the context of VFP/NEON access permitted
508 " ISB \n"
509
510 //Enable VFP/NEON
511 " VMRS R1,FPEXC \n"
512 " ORR R1,R1,#0x40000000 \n"
513 " VMSR FPEXC,R1 \n"
514
515 //Initialise VFP/NEON registers to 0
516 " MOV R2,#0 \n"
517
518 //Initialise D16 registers to 0
519 " VMOV D0, R2,R2 \n"
520 " VMOV D1, R2,R2 \n"
521 " VMOV D2, R2,R2 \n"
522 " VMOV D3, R2,R2 \n"
523 " VMOV D4, R2,R2 \n"
524 " VMOV D5, R2,R2 \n"
525 " VMOV D6, R2,R2 \n"
526 " VMOV D7, R2,R2 \n"
527 " VMOV D8, R2,R2 \n"
528 " VMOV D9, R2,R2 \n"
529 " VMOV D10,R2,R2 \n"
530 " VMOV D11,R2,R2 \n"
531 " VMOV D12,R2,R2 \n"
532 " VMOV D13,R2,R2 \n"
533 " VMOV D14,R2,R2 \n"
534 " VMOV D15,R2,R2 \n"
535
536 #ifdef __ARM_ADVANCED_SIMD__
537 //Initialise D32 registers to 0
538 " VMOV D16,R2,R2 \n"
539 " VMOV D17,R2,R2 \n"
540 " VMOV D18,R2,R2 \n"
541 " VMOV D19,R2,R2 \n"
542 " VMOV D20,R2,R2 \n"
543 " VMOV D21,R2,R2 \n"
544 " VMOV D22,R2,R2 \n"
545 " VMOV D23,R2,R2 \n"
546 " VMOV D24,R2,R2 \n"
547 " VMOV D25,R2,R2 \n"
548 " VMOV D26,R2,R2 \n"
549 " VMOV D27,R2,R2 \n"
550 " VMOV D28,R2,R2 \n"
551 " VMOV D29,R2,R2 \n"
552 " VMOV D30,R2,R2 \n"
553 " VMOV D31,R2,R2 \n"
554 #endif
555
556 //Initialise FPSCR to a known state
557 " VMRS R1,FPSCR \n"
558 " MOV32 R2,#0x00086060 \n" //Mask off all bits that do not have to be preserved. Non-preserved bits can/should be zero.
559 " AND R1,R1,R2 \n"
560 " VMSR FPSCR,R1 \n"
561 : : : "cc", "r1", "r2"
562 );
563 }
564
565
566
567 #undef __IAR_FT
568 #undef __ICCARM_V8
569
570 #pragma diag_default=Pe940
571 #pragma diag_default=Pe177
572
573 #endif /* __CMSIS_ICCARM_H__ */
574