1 /* Double-precision e^x function.
2    Copyright (c) 2018 Arm Ltd.  All rights reserved.
3 
4    SPDX-License-Identifier: BSD-3-Clause
5 
6    Redistribution and use in source and binary forms, with or without
7    modification, are permitted provided that the following conditions
8    are met:
9    1. Redistributions of source code must retain the above copyright
10       notice, this list of conditions and the following disclaimer.
11    2. Redistributions in binary form must reproduce the above copyright
12       notice, this list of conditions and the following disclaimer in the
13       documentation and/or other materials provided with the distribution.
14    3. The name of the company may not be used to endorse or promote
15       products derived from this software without specific prior written
16       permission.
17 
18    THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
19    WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
20    MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21    IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
23    TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
24    PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
25    LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
26    NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
27    SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */
28 
29 #include "fdlibm.h"
30 #if !__OBSOLETE_MATH_DOUBLE
31 
32 #include <math.h>
33 #include <stdint.h>
34 #include "math_config.h"
35 
36 #define N (1 << EXP_TABLE_BITS)
37 #define InvLn2N __exp_data.invln2N
38 #define NegLn2hiN __exp_data.negln2hiN
39 #define NegLn2loN __exp_data.negln2loN
40 #define Shift __exp_data.shift
41 #define T __exp_data.tab
42 #define C2 __exp_data.poly[5 - EXP_POLY_ORDER]
43 #define C3 __exp_data.poly[6 - EXP_POLY_ORDER]
44 #define C4 __exp_data.poly[7 - EXP_POLY_ORDER]
45 #define C5 __exp_data.poly[8 - EXP_POLY_ORDER]
46 #define C6 __exp_data.poly[9 - EXP_POLY_ORDER]
47 
48 /* Handle cases that may overflow or underflow when computing the result that
49    is scale*(1+TMP) without intermediate rounding.  The bit representation of
50    scale is in SBITS, however it has a computed exponent that may have
51    overflown into the sign bit so that needs to be adjusted before using it as
52    a double.  (int32_t)KI is the k used in the argument reduction and exponent
53    adjustment of scale, positive k here means the result may overflow and
54    negative k means the result may underflow.  */
55 static inline double
specialcase(double_t tmp,uint64_t sbits,uint64_t ki)56 specialcase (double_t tmp, uint64_t sbits, uint64_t ki)
57 {
58   double_t scale, y;
59 
60   if ((ki & 0x80000000) == 0)
61     {
62       /* k > 0, the exponent of scale might have overflowed by <= 460.  */
63       sbits -= 1009ull << 52;
64       scale = asfloat64 (sbits);
65       y = 0x1p1009 * (scale + scale * tmp);
66       return check_oflow (y);
67     }
68   /* k < 0, need special care in the subnormal range.  */
69   sbits += 1022ull << 52;
70   scale = asfloat64 (sbits);
71   y = scale + scale * tmp;
72   if (y < 1.0)
73     {
74       /* Round y to the right precision before scaling it into the subnormal
75 	 range to avoid double rounding that can cause 0.5+E/2 ulp error where
76 	 E is the worst-case ulp error outside the subnormal range.  So this
77 	 is only useful if the goal is better than 1 ulp worst-case error.  */
78       double_t hi, lo;
79       lo = scale - y + scale * tmp;
80       hi = 1.0 + y;
81       lo = 1.0 - hi + y + lo;
82       y = eval_as_double (hi + lo) - 1.0;
83       /* Avoid -0.0 with downward rounding.  */
84       if (WANT_ROUNDING && y == 0.0)
85 	y = 0.0;
86       /* The underflow exception needs to be signaled explicitly.  */
87       force_eval_double (opt_barrier_double (0x1p-1022) * 0x1p-1022);
88     }
89   y = 0x1p-1022 * y;
90   return check_uflow (y);
91 }
92 
93 /* Top 12 bits of a double (sign and exponent bits).  */
94 static inline uint32_t
top12(double x)95 top12 (double x)
96 {
97   return asuint64 (x) >> 52;
98 }
99 
100 double
exp(double x)101 exp (double x)
102 {
103   uint32_t abstop;
104   uint64_t ki, idx, top, sbits;
105   /* double_t for better performance on targets with FLT_EVAL_METHOD==2.  */
106   double_t kd, z, r, r2, scale, tail, tmp;
107 
108   abstop = top12 (x) & 0x7ff;
109   if (unlikely (abstop - top12 (0x1p-54) >= top12 (512.0) - top12 (0x1p-54)))
110     {
111       if (abstop - top12 (0x1p-54) >= 0x80000000)
112 	/* Avoid spurious underflow for tiny x.  */
113 	/* Note: 0 is common input.  */
114 	return WANT_ROUNDING ? 1.0 + x : 1.0;
115       if (abstop >= top12 (1024.0))
116 	{
117 	  if (asuint64 (x) == asuint64 ((double) -INFINITY))
118 	    return 0.0;
119 	  if (abstop >= top12 ((double) INFINITY))
120 	    return 1.0 + x;
121 	  if (asuint64 (x) >> 63)
122 	    return __math_uflow (0);
123 	  else
124 	    return __math_oflow (0);
125 	}
126       /* Large x is special cased below.  */
127       abstop = 0;
128     }
129 
130   /* exp(x) = 2^(k/N) * exp(r), with exp(r) in [2^(-1/2N),2^(1/2N)].  */
131   /* x = ln2/N*k + r, with int k and r in [-ln2/2N, ln2/2N].  */
132   z = InvLn2N * x;
133 #if TOINT_INTRINSICS
134   kd = roundtoint (z);
135   ki = converttoint (z);
136 #elif EXP_USE_TOINT_NARROW
137   /* z - kd is in [-0.5-2^-16, 0.5] in all rounding modes.  */
138   kd = eval_as_double (z + Shift);
139   ki = asuint64 (kd) >> 16;
140   kd = (double_t) (int32_t) ki;
141 #else
142   /* z - kd is in [-1, 1] in non-nearest rounding modes.  */
143   kd = eval_as_double (z + Shift);
144   ki = asuint64 (kd);
145   kd -= Shift;
146 #endif
147   r = x + kd * NegLn2hiN + kd * NegLn2loN;
148   /* 2^(k/N) ~= scale * (1 + tail).  */
149   idx = 2 * (ki % N);
150   top = ki << (52 - EXP_TABLE_BITS);
151   tail = asfloat64 (T[idx]);
152   /* This is only a valid scale when -1023*N < k < 1024*N.  */
153   sbits = T[idx + 1] + top;
154   /* exp(x) = 2^(k/N) * exp(r) ~= scale + scale * (tail + exp(r) - 1).  */
155   /* Evaluation is optimized assuming superscalar pipelined execution.  */
156   r2 = r * r;
157   /* Without fma the worst case error is 0.25/N ulp larger.  */
158   /* Worst case error is less than 0.5+1.11/N+(abs poly error * 2^53) ulp.  */
159 #if EXP_POLY_ORDER == 4
160   tmp = tail + r + r2 * C2 + r * r2 * (C3 + r * C4);
161 #elif EXP_POLY_ORDER == 5
162   tmp = tail + r + r2 * (C2 + r * C3) + r2 * r2 * (C4 + r * C5);
163 #elif EXP_POLY_ORDER == 6
164   tmp = tail + r + r2 * (0.5 + r * C3) + r2 * r2 * (C4 + r * C5 + r2 * C6);
165 #endif
166   if (unlikely (abstop == 0))
167     return specialcase (tmp, sbits, ki);
168   scale = asfloat64 (sbits);
169   /* Note: tmp == 0 or |tmp| > 2^-65 and scale > 2^-739, so there
170      is no spurious underflow here even without fma.  */
171   return scale + scale * tmp;
172 }
173 
174 _MATH_ALIAS_d_d(exp)
175 
176 #endif
177