Home
last modified time | relevance | path

Searched refs:abs_y (Results 1 – 6 of 6) sorted by relevance

/picolibc-3.7.0-3.6.0/newlib/libm/machine/spu/headers/
Dfmodf.h65 float abs_y; in _fmodf() local
68 abs_y = _fabsf(y); in _fmodf()
69 quotient = x/abs_y; in _fmodf()
71 return (abs_y*(quotient - ((float)((int)quotient)))); in _fmodf()
78 vec_uint4 abs_x, abs_y; in _fmodf()
96 abs_y = spu_andc(vy, sign_mask); in _fmodf()
104 exp_y = spu_rlmask(abs_y, -23); in _fmodf()
106 resultx = spu_cmpgt(abs_y, abs_x); in _fmodf()
115 mant_y = spu_andc(spu_sel(implied_1, abs_y, mant_mask), zero_y); in _fmodf()
Dremquof.h46 vec_uint4 abs_x, abs_y, abs_2x, abs_8y; in _remquof() local
61 abs_y = spu_andc(vy, sign_mask); in _remquof()
63 abs_8y = spu_add(abs_y, VEC_SPLAT_U32(0x01800000)); /* abs_2y = 8 * abs_y */ in _remquof()
79 resultx = spu_or(spu_cmpgt(abs_8y, abs_x), spu_cmpgt(abs_y, VEC_SPLAT_U32(0x7E7FFFFF))); in _remquof()
136 y4 = spu_andc(spu_add(abs_y, VEC_SPLAT_U32(0x01000000)), zero_y); in _remquof()
138 overflow = spu_cmpgt(abs_y, VEC_SPLAT_U32(0x7EFFFFFF)); in _remquof()
148 y2 = spu_andc(spu_add(abs_y, implied_1), zero_y); in _remquof()
159 bias = spu_cmpgt(abs_2x, abs_y); in _remquof()
160 abs_x = spu_sel(abs_x, (vec_uint4)spu_sub((vec_float4)abs_x, (vec_float4)abs_y), bias); in _remquof()
163 … spu_rlmaska((vec_uint4)spu_msub((vec_float4)abs_x, VEC_SPLAT_F32(2.0f), (vec_float4)abs_y), -31)); in _remquof()
[all …]
Dremainderf.h43 vec_uint4 abs_x, abs_y, abs_2x, abs_2y; in _remainderf() local
58 abs_y = spu_andc(vy, sign_mask); in _remainderf()
60 abs_2y = spu_add(abs_y, implied_1); /* abs_2y = 2 * abs_y */ in _remainderf()
74 resultx = spu_or(spu_cmpgt(abs_2y, abs_x), spu_cmpgt(abs_y, VEC_SPLAT_U32(0x7F7FFFFF))); in _remainderf()
130 bias = spu_cmpgt(abs_2x, abs_y); in _remainderf()
131 abs_x = spu_sel(abs_x, (vec_uint4)spu_sub((vec_float4)abs_x, (vec_float4)abs_y), bias); in _remainderf()
132 …bias = spu_andc(bias, spu_rlmaska((vec_uint4)spu_msub((vec_float4)abs_x, two, (vec_float4)abs_y), … in _remainderf()
133 abs_x = spu_sel(abs_x, (vec_uint4)spu_sub((vec_float4)abs_x, (vec_float4)abs_y), bias); in _remainderf()
Dremquo.h50 vec_uint4 abs_x, abs_y, abs_2x, abs_2y, abs_8y; in _remquo() local
70 abs_y = spu_andc(vy, sign_mask); in _remquo()
72 abs_2y = spu_add(abs_y, implied_1); in _remquo()
73 abs_8y = spu_add(abs_y, VEC_LITERAL(vec_uint4, 0x00300000, 0, 0x00300000, 0)); in _remquo()
85 y_lo = spu_shuffle(abs_y, abs_y, splat_lo); in _remquo()
86 y_hi = spu_shuffle(abs_y, abs_y, splat_hi); in _remquo()
181 y4 = spu_andc(spu_add(abs_y, spu_rl(implied_1, 1)), zero_y); in _remquo()
197 y2 = spu_andc(spu_add(abs_y, implied_1), zero_y); in _remquo()
216 gt = spu_cmpgt(abs_2x, abs_y); in _remquo()
217 eq = spu_cmpeq(abs_2x, abs_y); in _remquo()
[all …]
Dremainder.h48 vec_uint4 abs_x, abs_y, abs_2x, abs_2y; in _remainder() local
68 abs_y = spu_andc(vy, sign_mask); in _remainder()
70 abs_2y = spu_add(abs_y, implied_1); in _remainder()
80 y_lo = spu_shuffle(abs_y, abs_y, splat_lo); in _remainder()
81 y_hi = spu_shuffle(abs_y, abs_y, splat_hi); in _remainder()
176 gt = spu_cmpgt(abs_2x, abs_y); in _remainder()
177 eq = spu_cmpeq(abs_2x, abs_y); in _remainder()
180 abs_x = spu_sel(abs_x, (vec_uint4)spu_sub((vec_double2)abs_x, (vec_double2)abs_y), bias); in _remainder()
182 …spu_rlmaska((vec_uint4)spu_msub((vec_double2)abs_x, VEC_SPLAT_F64(2.0), (vec_double2)abs_y), -31)); in _remainder()
184 abs_x = spu_sel(abs_x, (vec_uint4)spu_sub((vec_double2)abs_x, (vec_double2)abs_y), bias); in _remainder()
Dfmod.h50 vec_uint4 abs_x, abs_y; in _fmod() local
70 abs_y = spu_andc(vy, sign_mask); in _fmod()
75 y_hi = spu_shuffle(abs_y, abs_y, splat_hi); in _fmod()
92 mant_y = spu_andc(spu_sel(implied_1, abs_y, mant_mask), zero_y); in _fmod()