1 /* --------------------------------------------------------------  */
2 /* (C)Copyright 2001,2008,                                         */
3 /* International Business Machines Corporation,                    */
4 /* Sony Computer Entertainment, Incorporated,                      */
5 /* Toshiba Corporation,                                            */
6 /*                                                                 */
7 /* All Rights Reserved.                                            */
8 /*                                                                 */
9 /* Redistribution and use in source and binary forms, with or      */
10 /* without modification, are permitted provided that the           */
11 /* following conditions are met:                                   */
12 /*                                                                 */
13 /* - Redistributions of source code must retain the above copyright*/
14 /*   notice, this list of conditions and the following disclaimer. */
15 /*                                                                 */
16 /* - Redistributions in binary form must reproduce the above       */
17 /*   copyright notice, this list of conditions and the following   */
18 /*   disclaimer in the documentation and/or other materials        */
19 /*   provided with the distribution.                               */
20 /*                                                                 */
21 /* - Neither the name of IBM Corporation nor the names of its      */
22 /*   contributors may be used to endorse or promote products       */
23 /*   derived from this software without specific prior written     */
24 /*   permission.                                                   */
25 /*                                                                 */
26 /* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND          */
27 /* CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,     */
28 /* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF        */
29 /* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE        */
30 /* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR            */
31 /* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,    */
32 /* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT    */
33 /* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;    */
34 /* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)        */
35 /* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN       */
36 /* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR    */
37 /* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,  */
38 /* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.              */
39 /* --------------------------------------------------------------  */
40 /* PROLOG END TAG zYx                                              */
41 #ifdef __SPU__
42 
43 #ifndef _FLOORD2_H_
44 #define _FLOORD2_H_	1
45 
46 #include <spu_intrinsics.h>
47 
48 
49 /*
50  * FUNCTION
51  *	vector double _floord2(vector double x)
52  *
53  * DESCRIPTION
54  *	The _floord2 function rounds the elements of an vector double
55  *      input vector downwards to their nearest integer representable
56  *      as a double.
57  *
58  */
_floord2(vector double in)59 static __inline vector double _floord2(vector double in)
60 {
61   vec_uchar16 swap_words = (vec_uchar16) { 4,5,6,7, 0,1,2,3, 12,13,14,15, 8,9,10,11 };
62   vec_uchar16 splat_hi = (vec_uchar16)   { 0,1,2,3, 0,1,2,3, 8,9,10,11,   8,9,10,11 };
63   vec_uint4 one = (vec_uint4) { 0, 1, 0, 1 };
64   vec_int4 exp, shift;
65   vec_uint4 mask, mask_1, frac_mask, addend, insert, pos, equal0;
66   vec_ullong2 sign = spu_splats(0x8000000000000000ULL);
67   vec_double2 in_hi, out;
68   vec_double2 minus_one = spu_splats(-1.0);
69 
70   /* This function generates the following component
71    * based upon the inputs.
72    *
73    *   mask = bits of the input that need to be replaced.
74    *   insert = value of the bits that need to be replaced
75    *   addend = value to be added to perform function.
76    *
77    * These are applied as follows:.
78    *
79    *   out = ((in & mask) | insert) + addend
80    */
81   in_hi = spu_shuffle(in, in, splat_hi);
82   pos = spu_cmpgt((vec_int4)in_hi, -1);
83   exp = spu_and(spu_rlmask((vec_int4)in_hi, -20), 0x7FF);
84   shift = spu_sub(((vec_int4) { 1023, 1043, 1023, 1043 } ), exp);
85 
86   /* clamp shift to the range 0 to -31.
87    */
88   shift = spu_sel(spu_splats(-32), spu_andc(shift, (vec_int4)spu_cmpgt(shift, 0)), spu_cmpgt(shift, -32));
89 
90   frac_mask = spu_rlmask(((vec_uint4) { 0xFFFFF, -1, 0xFFFFF, -1 } ), shift);
91   mask = spu_orc(frac_mask, spu_cmpgt(exp, 0x3FE));
92 
93   /* addend = ((in & mask) && (in >= 0)) ? mask+1 : 0
94    */
95   mask_1 = spu_addx(mask, one, spu_rlqwbyte(spu_genc(mask, one), 4));
96 
97   equal0 = spu_cmpeq(spu_and((vec_uint4)in, mask), 0);
98   addend = spu_andc(spu_andc(mask_1, pos), spu_and(equal0, spu_shuffle(equal0, equal0, swap_words)));
99 
100   insert = spu_andc(spu_andc((vec_uint4)minus_one, pos),
101 		    spu_cmpgt((vec_uint4)spu_add(exp, -1), 1022));
102 
103   in = spu_sel(in, (vec_double2)insert, spu_andc((vec_ullong2)mask, sign));
104   out = (vec_double2)spu_addx((vec_uint4)in, addend, spu_rlqwbyte(spu_genc((vec_uint4)in, addend), 4));
105 
106   return (out);
107 }
108 
109 #endif /* _FLOORD2_H_ */
110 #endif /* __SPU__ */
111