1 /* --------------------------------------------------------------  */
2 /* (C)Copyright 2007,2008,                                         */
3 /* International Business Machines Corporation                     */
4 /* All Rights Reserved.                                            */
5 /*                                                                 */
6 /* Redistribution and use in source and binary forms, with or      */
7 /* without modification, are permitted provided that the           */
8 /* following conditions are met:                                   */
9 /*                                                                 */
10 /* - Redistributions of source code must retain the above copyright*/
11 /*   notice, this list of conditions and the following disclaimer. */
12 /*                                                                 */
13 /* - Redistributions in binary form must reproduce the above       */
14 /*   copyright notice, this list of conditions and the following   */
15 /*   disclaimer in the documentation and/or other materials        */
16 /*   provided with the distribution.                               */
17 /*                                                                 */
18 /* - Neither the name of IBM Corporation nor the names of its      */
19 /*   contributors may be used to endorse or promote products       */
20 /*   derived from this software without specific prior written     */
21 /*   permission.                                                   */
22 /*                                                                 */
23 /* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND          */
24 /* CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,     */
25 /* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF        */
26 /* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE        */
27 /* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR            */
28 /* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,    */
29 /* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT    */
30 /* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;    */
31 /* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)        */
32 /* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN       */
33 /* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR    */
34 /* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,  */
35 /* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.              */
36 /* --------------------------------------------------------------  */
37 /* PROLOG END TAG zYx                                              */
38 #ifdef __SPU__
39 #ifndef _TGAMMAF4_H_
40 #define _TGAMMAF4_H_	1
41 
42 #include <spu_intrinsics.h>
43 #include "simdmath.h"
44 
45 #include "recipf4.h"
46 #include "truncf4.h"
47 #include "expf4.h"
48 #include "logf4.h"
49 #include "divf4.h"
50 #include "sinf4.h"
51 #include "powf4.h"
52 #include "tgammad2.h"
53 
54 /*
55  * FUNCTION
56  *  vector float _tgammaf4(vector float x)
57  *
58  * DESCRIPTION
59  *  The tgammaf4 function returns a vector containing tgamma for each
60  *  element of x
61  *
62  *	We take a fairly standard approach - break the domain into 5 separate regions:
63  *
64  *	1. [-infinity, 0)  - use gamma(x) = pi/(x*gamma(-x)*sin(x*pi))
65  *	2. [0, 1)          - push x into [1,2), then adjust the
66  *	                     result.
67  *	3. [1, 2)          - use a rational approximation.
68  *	4. [2, 10)         - pull back into [1, 2), then adjust
69  *	                     the result.
70  *	5. [10, +infinity] - use Stirling's Approximation.
71  *
72  *
73  * Special Cases:
74  *	- tgamma(+/- 0) returns +/- infinity
75  *	- tgamma(negative integer) returns NaN
76  *	- tgamma(-infinity) returns NaN
77  *	- tgamma(infinity) returns infinity
78  *
79  */
80 
81 /*
82  * Coefficients for Stirling's Series for Gamma() are defined in
83  * tgammad2.h
84  */
85 
86 /*
87  * Rational Approximation Coefficients for the
88  * domain [1, 2) are defined in tgammad2.h
89  */
90 
91 
_tgammaf4(vector float x)92 static __inline vector float _tgammaf4(vector float x)
93 {
94     vector float signbit = spu_splats(-0.0f);
95     vector float zerof   = spu_splats(0.0f);
96     vector float halff   = spu_splats(0.5f);
97     vector float onef    = spu_splats(1.0f);
98     vector float ninep9f = (vector float)spu_splats(0x411FFFFF); /* Next closest to 10.0 */
99     vector float t38f    = spu_splats(38.0f);
100     vector float pi      = spu_splats((float)SM_PI);
101     vector float sqrt2pi = spu_splats(2.506628274631000502415765284811f);
102     vector float inf     = (vec_float4)spu_splats(0x7F800000);
103     vector float nan     = (vec_float4)spu_splats(0x7FFFFFFF);
104 
105     vector float xabs;
106     vector float xscaled;
107     vector float xtrunc;
108     vector float xinv;
109     vector float nresult; /* Negative x result */
110     vector float rresult; /* Rational Approx result */
111     vector float sresult; /* Stirling's result */
112     vector float result;
113     vector float pr,qr;
114 
115     vector unsigned int gt0   = spu_cmpgt(x, zerof);
116     vector unsigned int gt1   = spu_cmpgt(x, onef);
117     vector unsigned int gt9p9 = spu_cmpgt(x, ninep9f);
118     vector unsigned int gt38  = spu_cmpgt(x, t38f);
119 
120     xabs    = spu_andc(x, signbit);
121 
122     /*
123      * For x in [0, 1], add 1 to x, use rational
124      * approximation, then use:
125      *
126      * gamma(x) = gamma(x+1)/x
127      *
128      */
129     xabs = spu_sel(spu_add(xabs, onef), xabs, gt1);
130     xtrunc = _truncf4(xabs);
131 
132 
133     /*
134      * For x in [2, 10):
135      */
136     xscaled = spu_add(onef, spu_sub(xabs, xtrunc));
137 
138     /*
139      * For x in [1,2), use a rational approximation.
140      */
141     pr = spu_madd(xscaled, spu_splats((float)TGD2_P07), spu_splats((float)TGD2_P06));
142     pr = spu_madd(pr, xscaled, spu_splats((float)TGD2_P05));
143     pr = spu_madd(pr, xscaled, spu_splats((float)TGD2_P04));
144     pr = spu_madd(pr, xscaled, spu_splats((float)TGD2_P03));
145     pr = spu_madd(pr, xscaled, spu_splats((float)TGD2_P02));
146     pr = spu_madd(pr, xscaled, spu_splats((float)TGD2_P01));
147     pr = spu_madd(pr, xscaled, spu_splats((float)TGD2_P00));
148 
149     qr = spu_madd(xscaled, spu_splats((float)TGD2_Q07), spu_splats((float)TGD2_Q06));
150     qr = spu_madd(qr, xscaled, spu_splats((float)TGD2_Q05));
151     qr = spu_madd(qr, xscaled, spu_splats((float)TGD2_Q04));
152     qr = spu_madd(qr, xscaled, spu_splats((float)TGD2_Q03));
153     qr = spu_madd(qr, xscaled, spu_splats((float)TGD2_Q02));
154     qr = spu_madd(qr, xscaled, spu_splats((float)TGD2_Q01));
155     qr = spu_madd(qr, xscaled, spu_splats((float)TGD2_Q00));
156 
157     rresult = _divf4(pr, qr);
158     rresult = spu_sel(_divf4(rresult, x), rresult, gt1);
159 
160     /*
161      * If x was in [2,10) and we pulled it into [1,2), we need to push
162      * it back out again.
163      */
164     rresult = spu_sel(rresult, spu_mul(rresult, xscaled), spu_cmpgt(x, xscaled)); /* [2,3) */
165     xscaled = spu_add(xscaled, onef);
166     rresult = spu_sel(rresult, spu_mul(rresult, xscaled), spu_cmpgt(x, xscaled)); /* [3,4) */
167     xscaled = spu_add(xscaled, onef);
168     rresult = spu_sel(rresult, spu_mul(rresult, xscaled), spu_cmpgt(x, xscaled)); /* [4,5) */
169     xscaled = spu_add(xscaled, onef);
170     rresult = spu_sel(rresult, spu_mul(rresult, xscaled), spu_cmpgt(x, xscaled)); /* [5,6) */
171     xscaled = spu_add(xscaled, onef);
172     rresult = spu_sel(rresult, spu_mul(rresult, xscaled), spu_cmpgt(x, xscaled)); /* [6,7) */
173     xscaled = spu_add(xscaled, onef);
174     rresult = spu_sel(rresult, spu_mul(rresult, xscaled), spu_cmpgt(x, xscaled)); /* [7,8) */
175     xscaled = spu_add(xscaled, onef);
176     rresult = spu_sel(rresult, spu_mul(rresult, xscaled), spu_cmpgt(x, xscaled)); /* [8,9) */
177     xscaled = spu_add(xscaled, onef);
178     rresult = spu_sel(rresult, spu_mul(rresult, xscaled), spu_cmpgt(x, xscaled)); /* [9,10) */
179 
180 
181     /*
182      * For x >= 10, we use Stirling's Approximation
183      */
184     vector float sum;
185     xinv    = _recipf4(xabs);
186     sum = spu_madd(xinv, spu_splats((float)STIRLING_16), spu_splats((float)STIRLING_15));
187     sum = spu_madd(sum, xinv, spu_splats((float)STIRLING_14));
188     sum = spu_madd(sum, xinv, spu_splats((float)STIRLING_13));
189     sum = spu_madd(sum, xinv, spu_splats((float)STIRLING_12));
190     sum = spu_madd(sum, xinv, spu_splats((float)STIRLING_11));
191     sum = spu_madd(sum, xinv, spu_splats((float)STIRLING_10));
192     sum = spu_madd(sum, xinv, spu_splats((float)STIRLING_09));
193     sum = spu_madd(sum, xinv, spu_splats((float)STIRLING_08));
194     sum = spu_madd(sum, xinv, spu_splats((float)STIRLING_07));
195     sum = spu_madd(sum, xinv, spu_splats((float)STIRLING_06));
196     sum = spu_madd(sum, xinv, spu_splats((float)STIRLING_05));
197     sum = spu_madd(sum, xinv, spu_splats((float)STIRLING_04));
198     sum = spu_madd(sum, xinv, spu_splats((float)STIRLING_03));
199     sum = spu_madd(sum, xinv, spu_splats((float)STIRLING_02));
200     sum = spu_madd(sum, xinv, spu_splats((float)STIRLING_01));
201     sum = spu_madd(sum, xinv, spu_splats((float)STIRLING_00));
202 
203     sum = spu_mul(sum, sqrt2pi);
204     sum = spu_mul(sum, _powf4(x, spu_sub(x, halff)));
205     sresult = spu_mul(sum, _expf4(spu_or(x, signbit)));
206 
207     /*
208      * Choose rational approximation or Stirling's result.
209      */
210     result = spu_sel(rresult, sresult, gt9p9);
211 
212     result = spu_sel(result, inf, gt38);
213 
214     /* For x < 0, use:
215      * gamma(x) = pi/(x*gamma(-x)*sin(x*pi))
216      */
217     nresult = _divf4(pi, spu_mul(x, spu_mul(result, _sinf4(spu_mul(x, pi)))));
218     result = spu_sel(nresult, result, gt0);
219 
220     /*
221      * x = non-positive integer, return NaN.
222      */
223     result = spu_sel(result, nan, spu_andc(spu_cmpeq(x, xtrunc), gt0));
224 
225     return result;
226 }
227 
228 #endif /* _TGAMMAF4_H_ */
229 #endif /* __SPU__ */
230