1 /**************************************************************************//**
2 * @file cmsis_gcc.h
3 * @brief CMSIS compiler specific macros, functions, instructions
4 * @version V1.0.0
5 * @date 05. october 2021
6 ******************************************************************************/
7 /*
8 * Copyright (c) 2021 Arm Limited. All rights reserved.
9 * Copyright 2021-2022 NXP
10 *
11 * SPDX-License-Identifier: Apache-2.0
12 *
13 * Licensed under the Apache License, Version 2.0 (the License); you may
14 * not use this file except in compliance with the License.
15 * You may obtain a copy of the License at
16 *
17 * www.apache.org/licenses/LICENSE-2.0
18 *
19 * Unless required by applicable law or agreed to in writing, software
20 * distributed under the License is distributed on an AS IS BASIS, WITHOUT
21 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
22 * See the License for the specific language governing permissions and
23 * limitations under the License.
24 */
25
26 #ifndef __CMSIS_GCC_H
27 #define __CMSIS_GCC_H
28
29 /* CMSIS compiler specific defines */
30 #ifndef __ASM
31 #define __ASM __asm
32 #endif
33 #ifndef __FORCEINLINE
34 #define __FORCEINLINE __attribute__((always_inline))
35 #endif
36 #ifndef __INLINE
37 #define __INLINE inline
38 #endif
39 #ifndef __STATIC_INLINE
40 #define __STATIC_INLINE static inline
41 #endif
42 #ifndef __STATIC_FORCEINLINE
43 #define __STATIC_FORCEINLINE __attribute__((always_inline)) static inline
44 #endif
45 #ifndef __WEAK
46 #define __WEAK __attribute__((weak))
47 #endif
48
49 #ifndef __STRINGIFY
50 #define __STRINGIFY(x) #x
51 #endif
52
53 #ifndef __MSR
54 #define __MSR(sysreg, val) \
55 __asm volatile ("msr "__STRINGIFY(sysreg)", %0\n" : : "r"((uint64_t)(val)))
56 #endif
57
58 #ifndef __MRS
59 #define __MRS(sysreg, pVal) \
60 __asm volatile ("mrs %0, "__STRINGIFY(sysreg)"\n" : "=r"((*pVal)))
61 #endif
62
63 #ifndef __WFI
64 #define __WFI() \
65 __asm volatile ("wfi")
66 #endif
67
68
69 /* ########################### Core Function Access ########################### */
70 /** \ingroup CMSIS_Core_FunctionInterface
71 \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions
72 @{
73 */
74
75
76 /**
77 \brief Get Interrupt Mask Bits
78 \details Returns the current state of the interrupt mask bits from the DAIF register.
79 \return Interrupt Mask value
80 */
__get_DAIF(void)81 __STATIC_FORCEINLINE uint64_t __get_DAIF(void)
82 {
83 uint64_t result;
84 __MRS(DAIF, &result);
85 return result;
86 }
87
88
89 /**
90 \brief Enable IRQ Interrupts
91 \details Enables IRQ interrupts by clearing the I-bit in the DAIF.
92 */
__enable_irq(void)93 __STATIC_FORCEINLINE void __enable_irq(void)
94 {
95 __ASM volatile ("msr daifclr, #2" : : : "memory");
96 }
97
98
99 /**
100 \brief Disable IRQ Interrupts
101 \details Disables IRQ interrupts by setting the I-bit in the DAIF.
102 */
__disable_irq(void)103 __STATIC_FORCEINLINE void __disable_irq(void)
104 {
105 __ASM volatile ("msr daifset, #2" : : : "memory");
106 }
107
108
109 /*@} end of CMSIS_Core_RegAccFunctions */
110
111
112 /* ########################## Core Instruction Access ######################### */
113 /** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface
114 Access to dedicated instructions
115 @{
116 */
117
118 /**
119 \brief Hypervisor call with 2 arguments
120 \details Makes an hypervisor call with two arguments stored in x0 and x1.
121 */
122 #define HVC_2(imm, x0, x1) __asm volatile ( \
123 "mov x0, %0 \n\t" \
124 "mov x1, %1 \n\t" \
125 "hvc #" __STRINGIFY(imm) "\n\t" \
126 : : "r" (x0), "r" (x1) : "x0", "x1", "memory")
127
128 /**
129 \brief Multiprocessor Affinity
130 \details Indicates the core number in the Cortex-Axx processor.
131 */
__get_MPIDR_EL1(void)132 __STATIC_FORCEINLINE uint64_t __get_MPIDR_EL1(void)
133 {
134 uint64_t result;
135 __MRS(MPIDR_EL1, &result);
136 return result;
137 }
138
139 #define MPIDR_GetCoreID() \
140 ({ uint64_t mpidr = __get_MPIDR_EL1(); \
141 (mpidr >> (8 * MPIDR_SUPPORT_MT(mpidr))) & MPIDR_AFFLVL_MASK; })
142
143 /**
144 \brief Instruction Synchronization Barrier
145 \details Instruction Synchronization Barrier flushes the pipeline in the processor,
146 so that all instructions following the ISB are fetched from cache or memory,
147 after the instruction has been completed.
148 */
149 #ifndef __ISB
__ISB(void)150 __STATIC_FORCEINLINE void __ISB(void)
151 {
152 __ASM volatile ("isb":::"memory");
153 }
154 #endif
155
156 /**
157 \brief Data Synchronization Barrier
158 \details Acts as a special kind of Data Memory Barrier.
159 It completes when all explicit memory accesses before this instruction complete.
160 */
161 #ifndef __DSB
__DSB(void)162 __STATIC_FORCEINLINE void __DSB(void)
163 {
164 __ASM volatile ("dsb sy":::"memory");
165 }
166 #endif
167
168 /**
169 \brief Data Memory Barrier
170 \details Ensures the apparent order of the explicit memory operations before
171 and after the instruction, without ensuring their completion.
172 */
173 #ifndef __DMB
__DMB(void)174 __STATIC_FORCEINLINE void __DMB(void)
175 {
176 __ASM volatile ("dmb sy":::"memory");
177 }
178 #endif
179
180 /**
181 \brief Reverse byte order (32 bit)
182 \details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412.
183 \param [in] value Value to reverse
184 \return Reversed value
185 */
__REV(uint32_t value)186 __STATIC_FORCEINLINE uint32_t __REV(uint32_t value)
187 {
188 return __builtin_bswap32(value);
189 }
190
191
192 /**
193 \brief Reverse byte order (16 bit)
194 \details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856.
195 \param [in] value Value to reverse
196 \return Reversed value
197 */
__REV16(uint32_t value)198 __STATIC_FORCEINLINE uint32_t __REV16(uint32_t value)
199 {
200 return __builtin_bswap16(value);
201 }
202
203 /**
204 \brief Breakpoint
205 \details Causes the processor to enter Debug state.
206 Debug tools can use this to investigate system state when the instruction at a particular address is reached.
207 \param [in] value is ignored by the processor.
208 If required, a debugger can use it to store additional information about the breakpoint.
209 */
210 #define __BKPT(value) __ASM volatile ("brk "#value)
211
212 /**
213 \brief No Operation
214 \details No Operation does nothing. This instruction can be used for code alignment purposes.
215 */
216
__NOP(void)217 __STATIC_FORCEINLINE void __NOP(void)
218 {
219 __ASM volatile ("nop");
220 }
221
222 /**
223 \brief Count leading zeros
224 \details Counts the number of leading zeros of a data value.
225 \param [in] value Value to count the leading zeros
226 \return number of leading zeros in value
227 */
__CLZ(uint32_t value)228 __STATIC_FORCEINLINE uint8_t __CLZ(uint32_t value)
229 {
230 /* Even though __builtin_clz produces a CLZ instruction on ARM, formally
231 __builtin_clz(0) is undefined behaviour, so handle this case specially.
232 This guarantees ARM-compatible results if happening to compile on a non-ARM
233 target, and ensures the compiler doesn't decide to activate any
234 optimisations using the logic "value was passed to __builtin_clz, so it
235 is non-zero".
236 ARM GCC 7.3 and possibly earlier will optimise this test away, leaving a
237 single CLZ instruction.
238 */
239 if (value == 0U)
240 {
241 return 32U;
242 }
243 return __builtin_clz(value);
244 }
245
246 /**
247 \brief likely/unlikely() branch prediction
248 \details Gives hints to the compiler to favor either side of a jump instruction
249 \param [in] expr Boolean expression under evaluation
250 \return The same boolean value
251 */
252 #ifndef unlikely
unlikely(long expr)253 __STATIC_FORCEINLINE long unlikely(long expr)
254 {
255 return __builtin_expect(expr, 0L);
256 }
257 #endif
258
259 #ifndef likely
likely(long expr)260 __STATIC_FORCEINLINE long likely(long expr)
261 {
262 return __builtin_expect(expr, 1L);
263 }
264 #endif
265
266 /*@}*/ /* end of group CMSIS_Core_InstructionInterface */
267
268
269 #endif /* __CMSIS_GCC_H */
270