1 /*
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright © 2020 Keith Packard
5 * Copyright (c) 2017 embedded brains GmbH. All rights reserved
6 Copyright (c) 2011, 2012 ARM Ltd. All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 *
15 * 2. Redistributions in binary form must reproduce the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer in the documentation and/or other materials provided
18 * with the distribution.
19 *
20 * 3. Neither the name of the copyright holder nor the names of its
21 * contributors may be used to endorse or promote products derived
22 * from this software without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
27 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
28 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
29 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
30 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
31 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
33 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
35 * OF THE POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 #ifndef _MACHINE_MATH_H_
39 #define _MACHINE_MATH_H_
40
41 # if (__ARM_FEATURE_FMA && (__ARM_FP & 8))
42 #define _HAVE_FAST_FMA 1
43 #endif
44
45 #if (__ARM_FEATURE_FMA && (__ARM_FP & 4))
46 #define _HAVE_FAST_FMAF 1
47 #endif
48
49 #if defined(_HAVE_ATTRIBUTE_ALWAYS_INLINE) && defined(_HAVE_ATTRIBUTE_GNU_INLINE)
50 #define __declare_arm_macro(type) extern __inline type __attribute((gnu_inline, always_inline))
51
52 #ifdef _WANT_MATH_ERRNO
53 #include <errno.h>
54 #endif
55
56 #if (__ARM_FP & 0x8) && !defined(__SOFTFP__)
57
58 /*
59 * Double precision routines
60 */
61
62 __declare_arm_macro(double)
sqrt(double x)63 sqrt(double x)
64 {
65 double result;
66 #ifdef _WANT_MATH_ERRNO
67 if (isless(x, 0.0))
68 errno = EDOM;
69 #endif
70 #if __ARM_ARCH >= 6
71 __asm__ volatile ("vsqrt.f64 %P0, %P1" : "=w" (result) : "w" (x));
72 #else
73 /* VFP9 Erratum 760019, see GCC sources "gcc/config/arm/vfp.md" */
74 __asm__ volatile ("vsqrt.f64 %P0, %P1" : "=&w" (result) : "w" (x));
75 #endif
76 return result;
77 }
78
79 __declare_arm_macro(double)
fabs(double x)80 fabs(double x)
81 {
82 double result;
83 __asm__ ("vabs.f64\t%P0, %P1" : "=w" (result) : "w" (x));
84 return result;
85 }
86
87 #if __ARM_ARCH >= 8
88 __declare_arm_macro(double)
ceil(double x)89 ceil (double x)
90 {
91 double result;
92 __asm__ volatile ( "vrintp.f64\t%P0, %P1" : "=w" (result) : "w" (x) );
93 return result;
94 }
95
96 __declare_arm_macro(double)
floor(double x)97 floor (double x)
98 {
99 double result;
100 __asm__ volatile ("vrintm.f64\t%P0, %P1" : "=w" (result) : "w" (x));
101 return result;
102 }
103
104 __declare_arm_macro(double)
nearbyint(double x)105 nearbyint (double x)
106 {
107 if (isnan(x)) return x + x;
108 #if defined(FE_INEXACT)
109 fenv_t env;
110 fegetenv(&env);
111 #endif
112 __asm__ volatile ("vrintr.f64\t%P0, %P1" : "=w" (x) : "w" (x));
113 #if defined(FE_INEXACT)
114 fesetenv(&env);
115 #endif
116 return x;
117 }
118
119 __declare_arm_macro(double)
rint(double x)120 rint (double x)
121 {
122 double result;
123 __asm__ volatile ("vrintx.f64\t%P0, %P1" : "=w" (result) : "w" (x));
124 return result;
125 }
126
127 __declare_arm_macro(double)
round(double x)128 round (double x)
129 {
130 double result;
131 __asm__ volatile ("vrinta.f64\t%P0, %P1" : "=w" (result) : "w" (x));
132 return result;
133 }
134
135 __declare_arm_macro(double)
trunc(double x)136 trunc (double x)
137 {
138 double result;
139 __asm__ volatile ("vrintz.f64\t%P0, %P1" : "=w" (result) : "w" (x));
140 return result;
141 }
142 #endif /* __ARM_ARCH >= 8 */
143
144 #if _HAVE_FAST_FMA
145
146 __declare_arm_macro(double)
fma(double x,double y,double z)147 fma (double x, double y, double z)
148 {
149 __asm__ volatile ("vfma.f64 %P0, %P1, %P2" : "+w" (z) : "w" (x), "w" (y));
150 return z;
151 }
152
153 #endif
154
155 #endif /* (__ARM_FP & 0x8) && !defined(__SOFTFP__) */
156
157 #if (__ARM_FP & 0x4) && !defined(__SOFTFP__)
158
159 /*
160 * Single precision functions
161 */
162
163 __declare_arm_macro(float)
sqrtf(float x)164 sqrtf(float x)
165 {
166 float result;
167 #ifdef _WANT_MATH_ERRNO
168 if (isless(x, 0.0f))
169 errno = EDOM;
170 #endif
171 #if __ARM_ARCH >= 6
172 __asm__ volatile ("vsqrt.f32 %0, %1" : "=w" (result) : "w" (x));
173 #else
174 /* VFP9 Erratum 760019, see GCC sources "gcc/config/arm/vfp.md" */
175 __asm__ volatile ("vsqrt.f32 %0, %1" : "=&w" (result) : "w" (x) : "cc", "memory");
176 #endif
177 return result;
178 }
179
180 __declare_arm_macro(float)
fabsf(float x)181 fabsf(float x)
182 {
183 float result;
184 __asm__ ("vabs.f32\t%0, %1" : "=t" (result) : "t" (x));
185 return result;
186 }
187
188 #if __ARM_ARCH >= 8
189 __declare_arm_macro(float)
ceilf(float x)190 ceilf (float x)
191 {
192 float result;
193 __asm__ volatile ( "vrintp.f32\t%0, %1" : "=t" (result) : "t" (x) );
194 return result;
195 }
196
197 __declare_arm_macro(float)
floorf(float x)198 floorf (float x)
199 {
200 float result;
201 __asm__ volatile ( "vrintm.f32\t%0, %1" : "=t" (result) : "t" (x) );
202 return result;
203 }
204
205 __declare_arm_macro(float)
nearbyintf(float x)206 nearbyintf (float x)
207 {
208 if (isnan(x)) return x + x;
209 #if defined(FE_INEXACT)
210 fenv_t env;
211 fegetenv(&env);
212 #endif
213 __asm__ volatile ("vrintr.f32\t%0, %1" : "=t" (x) : "t" (x));
214 #if defined(FE_INEXACT)
215 fesetenv(&env);
216 #endif
217 return x;
218 }
219
220 __declare_arm_macro(float)
rintf(float x)221 rintf (float x)
222 {
223 float result;
224 __asm__ volatile ("vrintx.f32\t%0, %1" : "=t" (result) : "t" (x));
225 return result;
226 }
227
228 __declare_arm_macro(float)
roundf(float x)229 roundf (float x)
230 {
231 float result;
232 __asm__ volatile ("vrinta.f32\t%0, %1" : "=t" (result) : "t" (x));
233 return result;
234 }
235
236 __declare_arm_macro(float)
truncf(float x)237 truncf (float x)
238 {
239 float result;
240 __asm__ volatile ("vrintz.f32\t%0, %1" : "=t" (result) : "t" (x));
241 return result;
242 }
243 #endif /* __ARM_ARCH >= 8 */
244
245 #if _HAVE_FAST_FMAF
246
247 __declare_arm_macro(float)
fmaf(float x,float y,float z)248 fmaf (float x, float y, float z)
249 {
250 __asm__ volatile ("vfma.f32 %0, %1, %2" : "+t" (z) : "t" (x), "t" (y));
251 return z;
252 }
253
254 #endif
255
256 #endif /* (__ARM_FP & 0x4) && !defined(__SOFTFP__) */
257
258 #undef __declare_arm_macro
259
260 #endif /* have attributes */
261
262 #endif /* _MACHINE_MATH_H_ */
263