1 /*
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright © 2020 Keith Packard
5  * Copyright (c) 2017 embedded brains GmbH. All rights reserved
6    Copyright (c) 2011, 2012 ARM Ltd.  All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  *
15  * 2. Redistributions in binary form must reproduce the above
16  *    copyright notice, this list of conditions and the following
17  *    disclaimer in the documentation and/or other materials provided
18  *    with the distribution.
19  *
20  * 3. Neither the name of the copyright holder nor the names of its
21  *    contributors may be used to endorse or promote products derived
22  *    from this software without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
27  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
28  * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
29  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
30  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
31  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
33  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
35  * OF THE POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 #ifndef _MACHINE_MATH_H_
39 #define _MACHINE_MATH_H_
40 
41 # if (__ARM_FEATURE_FMA && (__ARM_FP & 8))
42 #define _HAVE_FAST_FMA  1
43 #endif
44 
45 #if (__ARM_FEATURE_FMA && (__ARM_FP & 4))
46 #define _HAVE_FAST_FMAF 1
47 #endif
48 
49 #ifdef __declare_extern_inline
50 
51 #ifdef _WANT_MATH_ERRNO
52 #include <errno.h>
53 #endif
54 
55 #if (__ARM_FP & 0x8) && !defined(__SOFTFP__)
56 
57 /*
58  * Double precision routines
59  */
60 
61 __declare_extern_inline(double)
sqrt(double x)62 sqrt(double x)
63 {
64 	double result;
65 #ifdef _WANT_MATH_ERRNO
66         if (isless(x, 0.0))
67             errno = EDOM;
68 #endif
69 #if __ARM_ARCH >= 6
70 	__asm__ volatile ("vsqrt.f64 %P0, %P1" : "=w" (result) : "w" (x));
71 #else
72 	/* VFP9 Erratum 760019, see GCC sources "gcc/config/arm/vfp.md" */
73         __asm__ volatile ("vsqrt.f64 %P0, %P1" : "=&w" (result) : "w" (x));
74 #endif
75 	return result;
76 }
77 
78 __declare_extern_inline(double)
fabs(double x)79 fabs(double x)
80 {
81     double result;
82     __asm__ ("vabs.f64\t%P0, %P1" : "=w" (result) : "w" (x));
83     return result;
84 }
85 
86 #if __ARM_ARCH >= 8
87 __declare_extern_inline(double)
ceil(double x)88 ceil (double x)
89 {
90   double result;
91   __asm__ volatile ( "vrintp.f64\t%P0, %P1" : "=w" (result) : "w" (x) );
92   return result;
93 }
94 
95 __declare_extern_inline(double)
floor(double x)96 floor (double x)
97 {
98   double result;
99   __asm__ volatile ("vrintm.f64\t%P0, %P1" : "=w" (result) : "w" (x));
100   return result;
101 }
102 
103 __declare_extern_inline(double)
nearbyint(double x)104 nearbyint (double x)
105 {
106     if (isnan(x)) return x + x;
107 #if defined(FE_INEXACT)
108     fenv_t env;
109     fegetenv(&env);
110 #endif
111     __asm__ volatile ("vrintr.f64\t%P0, %P1" : "=w" (x) : "w" (x));
112 #if defined(FE_INEXACT)
113     fesetenv(&env);
114 #endif
115     return x;
116 }
117 
118 __declare_extern_inline(double)
rint(double x)119 rint (double x)
120 {
121   double result;
122   __asm__ volatile ("vrintx.f64\t%P0, %P1" : "=w" (result) : "w" (x));
123   return result;
124 }
125 
126 __declare_extern_inline(double)
round(double x)127 round (double x)
128 {
129   double result;
130   __asm__ volatile ("vrinta.f64\t%P0, %P1" : "=w" (result) : "w" (x));
131   return result;
132 }
133 
134 __declare_extern_inline(double)
trunc(double x)135 trunc (double x)
136 {
137   double result;
138   __asm__ volatile ("vrintz.f64\t%P0, %P1" : "=w" (result) : "w" (x));
139   return result;
140 }
141 #endif /* __ARM_ARCH >= 8 */
142 
143 #if _HAVE_FAST_FMA
144 
145 __declare_extern_inline(double)
fma(double x,double y,double z)146 fma (double x, double y, double z)
147 {
148   __asm__ volatile ("vfma.f64 %P0, %P1, %P2" : "+w" (z) : "w" (x), "w" (y));
149   return z;
150 }
151 
152 #endif
153 
154 #endif /* (__ARM_FP & 0x8) && !defined(__SOFTFP__) */
155 
156 #if (__ARM_FP & 0x4) && !defined(__SOFTFP__)
157 
158 /*
159  * Single precision functions
160  */
161 
162 __declare_extern_inline(float)
sqrtf(float x)163 sqrtf(float x)
164 {
165 	float result;
166 #ifdef _WANT_MATH_ERRNO
167         if (isless(x, 0.0f))
168             errno = EDOM;
169 #endif
170 #if __ARM_ARCH >= 6
171 	__asm__ volatile ("vsqrt.f32 %0, %1" : "=w" (result) : "w" (x));
172 #else
173 	/* VFP9 Erratum 760019, see GCC sources "gcc/config/arm/vfp.md" */
174 	__asm__ volatile ("vsqrt.f32 %0, %1" : "=&w" (result) : "w" (x) : "cc", "memory");
175 #endif
176 	return result;
177 }
178 
179 __declare_extern_inline(float)
fabsf(float x)180 fabsf(float x)
181 {
182     float result;
183     __asm__ ("vabs.f32\t%0, %1" : "=t" (result) : "t" (x));
184     return result;
185 }
186 
187 #if __ARM_ARCH >= 8
188 __declare_extern_inline(float)
ceilf(float x)189 ceilf (float x)
190 {
191   float result;
192   __asm__ volatile ( "vrintp.f32\t%0, %1" : "=t" (result) : "t" (x) );
193   return result;
194 }
195 
196 __declare_extern_inline(float)
floorf(float x)197 floorf (float x)
198 {
199   float result;
200   __asm__ volatile ( "vrintm.f32\t%0, %1" : "=t" (result) : "t" (x) );
201   return result;
202 }
203 
204 __declare_extern_inline(float)
nearbyintf(float x)205 nearbyintf (float x)
206 {
207     if (isnan(x)) return x + x;
208 #if defined(FE_INEXACT)
209     fenv_t env;
210     fegetenv(&env);
211 #endif
212     __asm__ volatile ("vrintr.f32\t%0, %1" : "=t" (x) : "t" (x));
213 #if defined(FE_INEXACT)
214     fesetenv(&env);
215 #endif
216     return x;
217 }
218 
219 __declare_extern_inline(float)
rintf(float x)220 rintf (float x)
221 {
222   float result;
223   __asm__ volatile ("vrintx.f32\t%0, %1" : "=t" (result) : "t" (x));
224   return result;
225 }
226 
227 __declare_extern_inline(float)
roundf(float x)228 roundf (float x)
229 {
230   float result;
231   __asm__ volatile ("vrinta.f32\t%0, %1" : "=t" (result) : "t" (x));
232   return result;
233 }
234 
235 __declare_extern_inline(float)
truncf(float x)236 truncf (float x)
237 {
238   float result;
239   __asm__ volatile ("vrintz.f32\t%0, %1" : "=t" (result) : "t" (x));
240   return result;
241 }
242 #endif /* __ARM_ARCH >= 8 */
243 
244 #if _HAVE_FAST_FMAF
245 
246 __declare_extern_inline(float)
fmaf(float x,float y,float z)247 fmaf (float x, float y, float z)
248 {
249   __asm__ volatile ("vfma.f32 %0, %1, %2" : "+t" (z) : "t" (x), "t" (y));
250   return z;
251 }
252 
253 #endif
254 
255 #endif /* (__ARM_FP & 0x4) && !defined(__SOFTFP__) */
256 
257 #endif /* have attributes */
258 
259 #endif /* _MACHINE_MATH_H_ */
260