1 /* -------------------------------------------------------------- */
2 /* (C)Copyright 2001,2008, */
3 /* International Business Machines Corporation, */
4 /* Sony Computer Entertainment, Incorporated, */
5 /* Toshiba Corporation, */
6 /* */
7 /* All Rights Reserved. */
8 /* */
9 /* Redistribution and use in source and binary forms, with or */
10 /* without modification, are permitted provided that the */
11 /* following conditions are met: */
12 /* */
13 /* - Redistributions of source code must retain the above copyright*/
14 /* notice, this list of conditions and the following disclaimer. */
15 /* */
16 /* - Redistributions in binary form must reproduce the above */
17 /* copyright notice, this list of conditions and the following */
18 /* disclaimer in the documentation and/or other materials */
19 /* provided with the distribution. */
20 /* */
21 /* - Neither the name of IBM Corporation nor the names of its */
22 /* contributors may be used to endorse or promote products */
23 /* derived from this software without specific prior written */
24 /* permission. */
25 /* */
26 /* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND */
27 /* CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, */
28 /* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */
29 /* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */
30 /* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR */
31 /* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, */
32 /* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT */
33 /* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */
34 /* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) */
35 /* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN */
36 /* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR */
37 /* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, */
38 /* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */
39 /* -------------------------------------------------------------- */
40 /* PROLOG END TAG zYx */
41 #ifdef __SPU__
42 #ifndef _DIVF4_H_
43 #define _DIVF4_H_ 1
44
45 #include <spu_intrinsics.h>
46
47 /*
48 * FUNCTION
49 * vector float _divf4(vector float dividend, vector float divisor)
50 *
51 * DESCRIPTION
52 * The _divf4 function divides the vector dividend by the vector divisor
53 * and returns the resulting vector quotient.
54 *
55 */
_divf4(vector float a,vector float b)56 static __inline vector float _divf4(vector float a, vector float b)
57 {
58
59 /* This function has been designed to provide a
60 * full function operation that presisely computes
61 * the quotient for the entire range of extended
62 * single precision inputs <a> and <b>. This includes:
63 *
64 * 1) Computing the quotient to full single precision
65 * floating point accuracy.
66 * 2) Round the result consistently with the rounding
67 * mode of the processor - truncated toward zero.
68 * 3) Underflow and overflow results are clamped to
69 * Smin and Smax and flagged with the appropriate
70 * UNF or OVF exception in the FPSCR.
71 * 4) Divide By Zero (DBZ) exception is produced when
72 * the divisor <b> has a zero exponent. A quotient
73 * of correctly signed Smax is produced.
74 * 5) Denorm/zero divided by a denorm/zero generates
75 * a DBZ with the results undefined.
76 * 6) Resulting denorm quotients will be coerced to +0.
77 * 7) If a non-compliant IEEE result is produced, the
78 * a DIFF exception is generated.
79 */
80
81 vector float inv_b, err, q0, q1, q2;
82 vector float mult;
83 vector float mant_a, mant_b;
84 vector float one = spu_splats(1.0f);
85 vector unsigned int exp, exp_a, exp_b, overflow;
86 vector unsigned int exp_mask = (vec_uint4)spu_splats(0x7F800000);
87
88 /* If b has a zero exponent, then set the divide by zero
89 * (DBZ) exception flag. The estimate result is discarded.
90 * Note: This must be implemented as inline assembly. Otherwise
91 * the optimizer removes it.
92 */
93 (void)si_frest((qword)(b));
94
95 /* For computing the quotient, force the divisor and
96 * dividend into the range (1.0 <= 0 < 2.0).
97 */
98 mant_a = spu_sel(a, one, exp_mask);
99 mant_b = spu_sel(b, one, exp_mask);
100
101 /* Compute the quotient using reciprocal estimate
102 * followed by one iteration of the Newton-Raphson.
103 */
104 inv_b = spu_re(mant_b);
105 q0 = spu_mul(mant_a, inv_b);
106 q1 = spu_nmsub(mant_b, q0, mant_a);
107 q1 = spu_madd(inv_b, q1, q0);
108
109 /* Due to truncation error, the quotient result
110 * may be low by 1 ulp (unit of least position),
111 * Conditionally add one if the estimate is too
112 * small.
113 */
114 q2 = (vector float)spu_add((vector unsigned int)(q1), 1);
115 err = spu_nmsub(mant_b, q2, mant_a);
116 q2 = spu_sel(q1, q2, spu_cmpgt((vector signed int)err, -1));
117
118
119 /* Compute the quotient's expected exponent. If the exponent
120 * is out of range, then force the resulting exponent to 0.
121 * (127 with the bias). We correct for the out of range
122 * values by computing a multiplier (mult) that will force the
123 * result to the correct out of range value and set the
124 * correct exception flag (UNF, OVF, or neither). The multiplier
125 * is also conditioned to generate correctly signed Smax if the
126 * divisor b is a denorm or zero.
127 */
128 exp_a = spu_and((vector unsigned int)a, exp_mask);
129 exp_b = spu_and((vector unsigned int)b, exp_mask);
130 exp = spu_add(spu_sub(spu_add(exp_a, (vector unsigned int)one), exp_b), spu_cmpabsgt(mant_b, mant_a));
131
132 /* The default multiplier is 1.0. If an underflow is detected (ie,
133 * either the dividend <a> is a denorm/zero, or the computed exponent is
134 * less than or equal to a biased 0), force the multiplier to 0.0.
135 */
136 mult = spu_and(one, (vector float)spu_cmpgt((vector signed int)exp, 0));
137
138 /* Force the multiplier to positive Smax (0x7FFFFFFF) and the biased exponent
139 * to 127, if the divisor is denorm/zero or the computed biased exponent is
140 * greater than 255.
141 */
142
143 overflow = spu_or(spu_cmpeq(exp_b, 0), spu_cmpeq(spu_rlmask(exp, -30), 2));
144 exp = spu_sel(exp, (vector unsigned int)one, overflow);
145
146 mult = spu_or(mult, (vector float)spu_rlmask(overflow, -1));
147 mult = spu_andc(mult, (vector float)spu_cmpeq(exp_a, 0));
148
149 /* Insert the exponent into the result and perform the
150 * final multiplication.
151 */
152 q2 = spu_sel(q2, (vector float)exp, exp_mask);
153 q2 = spu_mul(q2, mult);
154
155 return (q2);
156
157 }
158
159 #endif /* _DIVF4_H_ */
160 #endif /* __SPU__ */
161