1 /*
2  * Copyright (c) 2016, 2019 ARM Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #ifndef __ARM_COMPUTE_NEMATH_H__
25 #define __ARM_COMPUTE_NEMATH_H__
26 
27 
28 #if defined(ARM_MATH_NEON)
29 
30 #if defined(__aarch64__)
31 
32 /** Perform a 7th degree polynomial approximation using Estrin's method.
33  *
34  * @param[in] x      Input vector value in F32 format.
35  * @param[in] coeffs Polynomial coefficients table. (array of flattened float32x4_t vectors)
36  *
37  * @return The calculated approximation.
38  */
39 static inline float64x2_t vtaylor_polyq_f64(float64x2_t x, const float64_t *coeffs);
40 
41 /** Calculate reciprocal.
42  *
43  * @param[in] x Input value.
44  *
45  * @return The calculated reciprocal.
46  */
47 static inline float64x2_t vinvq_f64(float64x2_t x);
48 
49 #endif /* #if defined(__aarch64__) */
50 
51 /** Calculate floor of a vector.
52  *
53  * @param[in] val Input vector value in F32 format.
54  *
55  * @return The calculated floor vector.
56  */
57 static inline float32x4_t vfloorq_f32(float32x4_t val);
58 
59 /** Calculate inverse square root.
60  *
61  * @param[in] x Input value.
62  *
63  * @return The calculated inverse square root.
64  */
65 static inline float32x2_t vinvsqrt_f32(float32x2_t x);
66 
67 /** Calculate inverse square root.
68  *
69  * @param[in] x Input value.
70  *
71  * @return The calculated inverse square root.
72  */
73 static inline float32x4_t vinvsqrtq_f32(float32x4_t x);
74 
75 /** Calculate reciprocal.
76  *
77  * @param[in] x Input value.
78  *
79  * @return The calculated reciprocal.
80  */
81 static inline float32x2_t vinv_f32(float32x2_t x);
82 
83 /** Calculate reciprocal.
84  *
85  * @param[in] x Input value.
86  *
87  * @return The calculated reciprocal.
88  */
89 static inline float32x4_t vinvq_f32(float32x4_t x);
90 
91 /** Perform a 7th degree polynomial approximation using Estrin's method.
92  *
93  * @param[in] x      Input vector value in F32 format.
94  * @param[in] coeffs Polynomial coefficients table. (array of flattened float32x4_t vectors)
95  *
96  * @return The calculated approximation.
97  */
98 static inline float32x4_t vtaylor_polyq_f32(float32x4_t x, const float32_t *coeffs);
99 
100 /** Calculate exponential
101  *
102  * @param[in] x Input vector value in F32 format.
103  *
104  * @return The calculated exponent.
105  */
106 static inline float32x4_t vexpq_f32(float32x4_t x);
107 
108 /** Calculate logarithm
109  *
110  * @param[in] x Input vector value in F32 format.
111  *
112  * @return The calculated logarithm.
113  */
114 static inline float32x4_t vlogq_f32(float32x4_t x);
115 
116 /** Calculate hyperbolic tangent.
117  *
118  * tanh(x) = (e^2x - 1)/(e^2x + 1)
119  *
120  * @note We clamp x to [-5,5] to avoid overflowing issues.
121  *
122  * @param[in] val Input vector value in F32 format.
123  *
124  * @return The calculated Hyperbolic Tangent.
125  */
126 static inline float32x4_t vtanhq_f32(float32x4_t val);
127 
128 /** Calculate n power of a number.
129  *
130  * pow(x,n) = e^(n*log(x))
131  *
132  * @param[in] val Input vector value in F32 format.
133  * @param[in] n   Powers to raise the input to.
134  *
135  * @return The calculated power.
136  */
137 static inline float32x4_t vpowq_f32(float32x4_t val, float32x4_t n);
138 
139 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
140 /** Calculate hyperbolic tangent.
141  *
142  * tanh(x) = (e^2x - 1)/(e^2x + 1)
143  *
144  * @note We clamp x to [-5,5] to avoid overflowing issues.
145  *
146  * @param[in] val Input vector value in F32 format.
147  *
148  * @return The calculated Hyperbolic Tangent.
149  */
150 static inline float16x8_t vtanhq_f16(float16x8_t val);
151 
152 /** Calculate reciprocal.
153  *
154  * @param[in] x Input value.
155  *
156  * @return The calculated reciprocal.
157  */
158 static inline float16x4_t vinv_f16(float16x4_t x);
159 
160 /** Calculate reciprocal.
161  *
162  * @param[in] x Input value.
163  *
164  * @return The calculated reciprocal.
165  */
166 static inline float16x8_t vinvq_f16(float16x8_t x);
167 
168 /** Calculate inverse square root.
169  *
170  * @param[in] x Input value.
171  *
172  * @return The calculated inverse square root.
173  */
174 static inline float16x4_t vinvsqrt_f16(float16x4_t x);
175 
176 /** Calculate inverse square root.
177  *
178  * @param[in] x Input value.
179  *
180  * @return The calculated inverse square root.
181  */
182 static inline float16x8_t vinvsqrtq_f16(float16x8_t x);
183 
184 /** Calculate exponential
185  *
186  * @param[in] x Input vector value in F16 format.
187  *
188  * @return The calculated exponent.
189  */
190 static inline float16x8_t vexpq_f16(float16x8_t x);
191 
192 /** Calculate n power of a number.
193  *
194  * pow(x,n) = e^(n*log(x))
195  *
196  * @param[in] val Input vector value in F16 format.
197  * @param[in] n   Powers to raise the input to.
198  *
199  * @return The calculated power.
200  */
201 static inline float16x8_t vpowq_f16(float16x8_t val, float16x8_t n);
202 #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
203 
204 /** Exponent polynomial coefficients */
205 extern const float32_t exp_tab[4*8];
206 
207 extern const float64_t exp_tab_64[2*8];
208 
209 
210 /** Logarithm polynomial coefficients */
211 extern const float32_t log_tab[4*8];
212 
213 extern const float64_t log_tab_64[2*8];
214 
215 #ifndef DOXYGEN_SKIP_THIS
vfloorq_f32(float32x4_t val)216 static inline float32x4_t vfloorq_f32(float32x4_t val)
217 {
218     static const float32_t CONST_1[4] = {1.f,1.f,1.f,1.f};
219 
220     const int32x4_t   z = vcvtq_s32_f32(val);
221     const float32x4_t r = vcvtq_f32_s32(z);
222 
223     return vbslq_f32(vcgtq_f32(r, val), vsubq_f32(r, vld1q_f32(CONST_1)), r);
224 }
225 
vinvsqrt_f32(float32x2_t x)226 static inline float32x2_t vinvsqrt_f32(float32x2_t x)
227 {
228     float32x2_t sqrt_reciprocal = vrsqrte_f32(x);
229     sqrt_reciprocal             = vmul_f32(vrsqrts_f32(vmul_f32(x, sqrt_reciprocal), sqrt_reciprocal), sqrt_reciprocal);
230     sqrt_reciprocal             = vmul_f32(vrsqrts_f32(vmul_f32(x, sqrt_reciprocal), sqrt_reciprocal), sqrt_reciprocal);
231 
232     return sqrt_reciprocal;
233 }
234 
vinvsqrtq_f32(float32x4_t x)235 static inline float32x4_t vinvsqrtq_f32(float32x4_t x)
236 {
237     float32x4_t sqrt_reciprocal = vrsqrteq_f32(x);
238     sqrt_reciprocal             = vmulq_f32(vrsqrtsq_f32(vmulq_f32(x, sqrt_reciprocal), sqrt_reciprocal), sqrt_reciprocal);
239     sqrt_reciprocal             = vmulq_f32(vrsqrtsq_f32(vmulq_f32(x, sqrt_reciprocal), sqrt_reciprocal), sqrt_reciprocal);
240 
241     return sqrt_reciprocal;
242 }
243 
vinv_f32(float32x2_t x)244 static inline float32x2_t vinv_f32(float32x2_t x)
245 {
246     float32x2_t recip = vrecpe_f32(x);
247     recip             = vmul_f32(vrecps_f32(x, recip), recip);
248     recip             = vmul_f32(vrecps_f32(x, recip), recip);
249     return recip;
250 }
251 
vinvq_f32(float32x4_t x)252 static inline float32x4_t vinvq_f32(float32x4_t x)
253 {
254     float32x4_t recip = vrecpeq_f32(x);
255     recip             = vmulq_f32(vrecpsq_f32(x, recip), recip);
256     recip             = vmulq_f32(vrecpsq_f32(x, recip), recip);
257     return recip;
258 }
259 
260 #if defined(__aarch64__)
261 
vinvq_f64(float64x2_t x)262 static inline float64x2_t vinvq_f64(float64x2_t x)
263 {
264     float64x2_t recip = vrecpeq_f64(x);
265     recip             = vmulq_f64(vrecpsq_f64(x, recip), recip);
266     recip             = vmulq_f64(vrecpsq_f64(x, recip), recip);
267     return recip;
268 }
269 
270 #endif /* #if defined(__aarch64__) */
271 
vtaylor_polyq_f32(float32x4_t x,const float32_t * coeffs)272 static inline float32x4_t vtaylor_polyq_f32(float32x4_t x, const float32_t *coeffs)
273 {
274     float32x4_t A   = vmlaq_f32(vld1q_f32(&coeffs[4*0]), vld1q_f32(&coeffs[4*4]), x);
275     float32x4_t B   = vmlaq_f32(vld1q_f32(&coeffs[4*2]), vld1q_f32(&coeffs[4*6]), x);
276     float32x4_t C   = vmlaq_f32(vld1q_f32(&coeffs[4*1]), vld1q_f32(&coeffs[4*5]), x);
277     float32x4_t D   = vmlaq_f32(vld1q_f32(&coeffs[4*3]), vld1q_f32(&coeffs[4*7]), x);
278     float32x4_t x2  = vmulq_f32(x, x);
279     float32x4_t x4  = vmulq_f32(x2, x2);
280     float32x4_t res = vmlaq_f32(vmlaq_f32(A, B, x2), vmlaq_f32(C, D, x2), x4);
281     return res;
282 }
283 
284 #if defined(__aarch64__)
285 
vtaylor_polyq_f64(float64x2_t x,const float64_t * coeffs)286 static inline float64x2_t vtaylor_polyq_f64(float64x2_t x, const float64_t *coeffs)
287 {
288     float64x2_t A   = vmlaq_f64(vld1q_f64(&coeffs[2*0]), vld1q_f64(&coeffs[2*4]), x);
289     float64x2_t B   = vmlaq_f64(vld1q_f64(&coeffs[2*2]), vld1q_f64(&coeffs[2*6]), x);
290     float64x2_t C   = vmlaq_f64(vld1q_f64(&coeffs[2*1]), vld1q_f64(&coeffs[2*5]), x);
291     float64x2_t D   = vmlaq_f64(vld1q_f64(&coeffs[2*3]), vld1q_f64(&coeffs[2*7]), x);
292     float64x2_t x2  = vmulq_f64(x, x);
293     float64x2_t x4  = vmulq_f64(x2, x2);
294     float64x2_t res = vmlaq_f64(vmlaq_f64(A, B, x2), vmlaq_f64(C, D, x2), x4);
295     return res;
296 }
297 
298 #endif /* #if defined(__aarch64__) */
299 
300 
vexpq_f32(float32x4_t x)301 static inline float32x4_t vexpq_f32(float32x4_t x)
302 {
303     static const float32_t CONST_LN2[4]          = {0.6931471805f,0.6931471805f,0.6931471805f,0.6931471805f}; // ln(2)
304     static const float32_t CONST_INV_LN2[4]      = {1.4426950408f,1.4426950408f,1.4426950408f,1.4426950408f}; // 1/ln(2)
305     static const float32_t CONST_0[4]            = {0.f,0.f,0.f,0.f};
306     static const int32_t   CONST_NEGATIVE_126[4] = {-126,-126,-126,-126};
307 
308     // Perform range reduction [-log(2),log(2)]
309     int32x4_t   m   = vcvtq_s32_f32(vmulq_f32(x, vld1q_f32(CONST_INV_LN2)));
310     float32x4_t val = vmlsq_f32(x, vcvtq_f32_s32(m), vld1q_f32(CONST_LN2));
311 
312     // Polynomial Approximation
313     float32x4_t poly = vtaylor_polyq_f32(val, exp_tab);
314 
315     // Reconstruct
316     poly = vreinterpretq_f32_s32(vqaddq_s32(vreinterpretq_s32_f32(poly), vqshlq_n_s32(m, 23)));
317     poly = vbslq_f32(vcltq_s32(m, vld1q_s32(CONST_NEGATIVE_126)), vld1q_f32(CONST_0), poly);
318 
319 
320     return poly;
321 }
322 
vlogq_f32(float32x4_t x)323 static inline float32x4_t vlogq_f32(float32x4_t x)
324 {
325     static const int32_t   CONST_127[4] = {127,127,127,127};           // 127
326     static const float32_t CONST_LN2[4] = {0.6931471805f,0.6931471805f,0.6931471805f,0.6931471805f}; // ln(2)
327 
328     // Extract exponent
329     int32x4_t   m   = vsubq_s32(vreinterpretq_s32_u32(vshrq_n_u32(vreinterpretq_u32_f32(x), 23)), vld1q_s32(CONST_127));
330     float32x4_t val = vreinterpretq_f32_s32(vsubq_s32(vreinterpretq_s32_f32(x), vshlq_n_s32(m, 23)));
331 
332     // Polynomial Approximation
333     float32x4_t poly = vtaylor_polyq_f32(val, log_tab);
334 
335     // Reconstruct
336     poly = vmlaq_f32(poly, vcvtq_f32_s32(m), vld1q_f32(CONST_LN2));
337 
338     return poly;
339 }
340 
341 
vtanhq_f32(float32x4_t val)342 static inline float32x4_t vtanhq_f32(float32x4_t val)
343 {
344     static const float32_t CONST_1[4]        = {1.f,1.f,1.f,1.f};
345     static const float32_t CONST_2[4]        = {2.f,2.f,2.f,2.f};
346     static const float32_t CONST_MIN_TANH[4] = {-10.f,-10.f,-10.f,-10.f};
347     static const float32_t CONST_MAX_TANH[4] = {10.f,10.f,10.f,10.f};
348 
349     float32x4_t x     = vminq_f32(vmaxq_f32(val, vld1q_f32(CONST_MIN_TANH)), vld1q_f32(CONST_MAX_TANH));
350     float32x4_t exp2x = vexpq_f32(vmulq_f32(vld1q_f32(CONST_2), x));
351     float32x4_t num   = vsubq_f32(exp2x, vld1q_f32(CONST_1));
352     float32x4_t den   = vaddq_f32(exp2x, vld1q_f32(CONST_1));
353     float32x4_t tanh  = vmulq_f32(num, vinvq_f32(den));
354     return tanh;
355 }
356 
vpowq_f32(float32x4_t val,float32x4_t n)357 static inline float32x4_t vpowq_f32(float32x4_t val, float32x4_t n)
358 {
359     return vexpq_f32(vmulq_f32(n, vlogq_f32(val)));
360 }
361 #endif /* DOXYGEN_SKIP_THIS */
362 
363 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
364 /** Exponent polynomial coefficients */
365 /** Logarithm polynomial coefficients */
366 #ifndef DOXYGEN_SKIP_THIS
vfloorq_f16(float16x8_t val)367 static inline float16x8_t vfloorq_f16(float16x8_t val)
368 {
369     static const float16_t CONST_1[8] = {1.f,1.f,1.f,1.f,1.f,1.f,1.f,1.f};
370 
371     const int16x8_t   z = vcvtq_s16_f16(val);
372     const float16x8_t r = vcvtq_f16_s16(z);
373 
374     return vbslq_f16(vcgtq_f16(r, val), vsubq_f16(r, vld1q_f16(CONST_1)), r);
375 }
vinvsqrt_f16(float16x4_t x)376 static inline float16x4_t vinvsqrt_f16(float16x4_t x)
377 {
378     float16x4_t sqrt_reciprocal = vrsqrte_f16(x);
379     sqrt_reciprocal             = vmul_f16(vrsqrts_f16(vmul_f16(x, sqrt_reciprocal), sqrt_reciprocal), sqrt_reciprocal);
380     sqrt_reciprocal             = vmul_f16(vrsqrts_f16(vmul_f16(x, sqrt_reciprocal), sqrt_reciprocal), sqrt_reciprocal);
381     return sqrt_reciprocal;
382 }
383 
vinvsqrtq_f16(float16x8_t x)384 static inline float16x8_t vinvsqrtq_f16(float16x8_t x)
385 {
386     float16x8_t sqrt_reciprocal = vrsqrteq_f16(x);
387     sqrt_reciprocal             = vmulq_f16(vrsqrtsq_f16(vmulq_f16(x, sqrt_reciprocal), sqrt_reciprocal), sqrt_reciprocal);
388     sqrt_reciprocal             = vmulq_f16(vrsqrtsq_f16(vmulq_f16(x, sqrt_reciprocal), sqrt_reciprocal), sqrt_reciprocal);
389     return sqrt_reciprocal;
390 }
391 
vinv_f16(float16x4_t x)392 static inline float16x4_t vinv_f16(float16x4_t x)
393 {
394     float16x4_t recip = vrecpe_f16(x);
395     recip             = vmul_f16(vrecps_f16(x, recip), recip);
396     recip             = vmul_f16(vrecps_f16(x, recip), recip);
397     return recip;
398 }
399 
vinvq_f16(float16x8_t x)400 static inline float16x8_t vinvq_f16(float16x8_t x)
401 {
402     float16x8_t recip = vrecpeq_f16(x);
403     recip             = vmulq_f16(vrecpsq_f16(x, recip), recip);
404     recip             = vmulq_f16(vrecpsq_f16(x, recip), recip);
405     return recip;
406 }
407 
vtanhq_f16(float16x8_t val)408 static inline float16x8_t vtanhq_f16(float16x8_t val)
409 {
410     const float16_t CONST_1[8]        = {1.f,1.f,1.f,1.f,1.f,1.f,1.f,1.f};
411     const float16_t CONST_2[8]        = {2.f,2.f,2.f,2.f,2.f,2.f,2.f,2.f};
412     const float16_t CONST_MIN_TANH[8] = {-10.f,-10.f,-10.f,-10.f,-10.f,-10.f,-10.f,-10.f};
413     const float16_t CONST_MAX_TANH[8] = {10.f,10.f,10.f,10.f,10.f,10.f,10.f,10.f};
414 
415     const float16x8_t x     = vminq_f16(vmaxq_f16(val, vld1q_f16(CONST_MIN_TANH)), vld1q_f16(CONST_MAX_TANH));
416     const float16x8_t exp2x = vexpq_f16(vmulq_f16(vld1q_f16(CONST_2), x));
417     const float16x8_t num   = vsubq_f16(exp2x, vld1q_f16(CONST_1));
418     const float16x8_t den   = vaddq_f16(exp2x, vld1q_f16(CONST_1));
419     const float16x8_t tanh  = vmulq_f16(num, vinvq_f16(den));
420     return tanh;
421 }
422 
vtaylor_polyq_f16(float16x8_t x,const float16_t * coeffs)423 static inline float16x8_t vtaylor_polyq_f16(float16x8_t x, const float16_t *coeffs)
424 {
425     const float16x8_t A   = vaddq_f16(vld1q_f16(&coeffs[8*0]), vmulq_f16(vld1q_f16(&coeffs[8*4]), x));
426     const float16x8_t B   = vaddq_f16(vld1q_f16(&coeffs[8*2]), vmulq_f16(vld1q_f16(&coeffs[8*6]), x));
427     const float16x8_t C   = vaddq_f16(vld1q_f16(&coeffs[8*1]), vmulq_f16(vld1q_f16(&coeffs[8*5]), x));
428     const float16x8_t D   = vaddq_f16(vld1q_f16(&coeffs[8*3]), vmulq_f16(vld1q_f16(&coeffs[8*7]), x));
429     const float16x8_t x2  = vmulq_f16(x, x);
430     const float16x8_t x4  = vmulq_f16(x2, x2);
431     const float16x8_t res = vaddq_f16(vaddq_f16(A, vmulq_f16(B, x2)), vmulq_f16(vaddq_f16(C, vmulq_f16(D, x2)), x4));
432     return res;
433 }
434 
vexpq_f16(float16x8_t x)435 static inline float16x8_t vexpq_f16(float16x8_t x)
436 {
437     // TODO (COMPMID-1535) : Revisit FP16 approximations
438     const float32x4_t x_high = vcvt_f32_f16(vget_high_f16(x));
439     const float32x4_t x_low  = vcvt_f32_f16(vget_low_f16(x));
440 
441     const float16x8_t res = vcvt_high_f16_f32(vcvt_f16_f32(vexpq_f32(x_low)), vexpq_f32(x_high));
442     return res;
443 }
444 
vlogq_f16(float16x8_t x)445 static inline float16x8_t vlogq_f16(float16x8_t x)
446 {
447     // TODO (COMPMID-1535) : Revisit FP16 approximations
448     const float32x4_t x_high = vcvt_f32_f16(vget_high_f16(x));
449     const float32x4_t x_low  = vcvt_f32_f16(vget_low_f16(x));
450 
451     const float16x8_t res = vcvt_high_f16_f32(vcvt_f16_f32(vlogq_f32(x_low)), vlogq_f32(x_high));
452     return res;
453 }
454 
vpowq_f16(float16x8_t val,float16x8_t n)455 static inline float16x8_t vpowq_f16(float16x8_t val, float16x8_t n)
456 {
457     // TODO (giaiod01) - COMPMID-1535
458     float32x4_t n0_f32   = vcvt_f32_f16(vget_low_f16(n));
459     float32x4_t n1_f32   = vcvt_f32_f16(vget_high_f16(n));
460     float32x4_t val0_f32 = vcvt_f32_f16(vget_low_f16(val));
461     float32x4_t val1_f32 = vcvt_f32_f16(vget_high_f16(val));
462 
463     float32x4_t res0_f32 = vexpq_f32(vmulq_f32(n0_f32, vlogq_f32(val0_f32)));
464     float32x4_t res1_f32 = vexpq_f32(vmulq_f32(n1_f32, vlogq_f32(val1_f32)));
465 
466     return vcombine_f16(vcvt_f16_f32(res0_f32), vcvt_f16_f32(res1_f32));
467 }
468 #endif /* DOXYGEN_SKIP_THIS */
469 #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
470 #endif
471 #endif /* __ARM_COMPUTE_NEMATH_H__ */
472