Lines Matching +full:0 +full:x20e00000
43 set _off_bsun, 0x00
44 set _off_snan, 0x04
45 set _off_operr, 0x08
46 set _off_ovfl, 0x0c
47 set _off_unfl, 0x10
48 set _off_dz, 0x14
49 set _off_inex, 0x18
50 set _off_fline, 0x1c
51 set _off_fpu_dis, 0x20
52 set _off_trap, 0x24
53 set _off_trace, 0x28
54 set _off_access, 0x2c
55 set _off_done, 0x30
57 set _off_imr, 0x40
58 set _off_dmr, 0x44
59 set _off_dmw, 0x48
60 set _off_irw, 0x4c
61 set _off_irl, 0x50
62 set _off_drb, 0x54
63 set _off_drw, 0x58
64 set _off_drl, 0x5c
65 set _off_dwb, 0x60
66 set _off_dww, 0x64
67 set _off_dwl, 0x68
75 short 0x0000
77 short 0x0000
79 short 0x0000
81 short 0x0000
83 short 0x0000
85 short 0x0000
87 short 0x0000
89 short 0x0000
91 short 0x0000
99 mov.l (_060FPSP_TABLE-0x80+_off_done,%pc),%d0
100 pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
101 mov.l 0x4(%sp),%d0
102 rtd &0x4
107 mov.l (_060FPSP_TABLE-0x80+_off_ovfl,%pc),%d0
108 pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
109 mov.l 0x4(%sp),%d0
110 rtd &0x4
115 mov.l (_060FPSP_TABLE-0x80+_off_unfl,%pc),%d0
116 pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
117 mov.l 0x4(%sp),%d0
118 rtd &0x4
123 mov.l (_060FPSP_TABLE-0x80+_off_inex,%pc),%d0
124 pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
125 mov.l 0x4(%sp),%d0
126 rtd &0x4
131 mov.l (_060FPSP_TABLE-0x80+_off_bsun,%pc),%d0
132 pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
133 mov.l 0x4(%sp),%d0
134 rtd &0x4
139 mov.l (_060FPSP_TABLE-0x80+_off_operr,%pc),%d0
140 pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
141 mov.l 0x4(%sp),%d0
142 rtd &0x4
147 mov.l (_060FPSP_TABLE-0x80+_off_snan,%pc),%d0
148 pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
149 mov.l 0x4(%sp),%d0
150 rtd &0x4
155 mov.l (_060FPSP_TABLE-0x80+_off_dz,%pc),%d0
156 pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
157 mov.l 0x4(%sp),%d0
158 rtd &0x4
163 mov.l (_060FPSP_TABLE-0x80+_off_fline,%pc),%d0
164 pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
165 mov.l 0x4(%sp),%d0
166 rtd &0x4
171 mov.l (_060FPSP_TABLE-0x80+_off_fpu_dis,%pc),%d0
172 pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
173 mov.l 0x4(%sp),%d0
174 rtd &0x4
179 mov.l (_060FPSP_TABLE-0x80+_off_trap,%pc),%d0
180 pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
181 mov.l 0x4(%sp),%d0
182 rtd &0x4
187 mov.l (_060FPSP_TABLE-0x80+_off_trace,%pc),%d0
188 pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
189 mov.l 0x4(%sp),%d0
190 rtd &0x4
195 mov.l (_060FPSP_TABLE-0x80+_off_access,%pc),%d0
196 pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
197 mov.l 0x4(%sp),%d0
198 rtd &0x4
205 mov.l (_060FPSP_TABLE-0x80+_off_imr,%pc),%d0
206 pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
207 mov.l 0x4(%sp),%d0
208 rtd &0x4
213 mov.l (_060FPSP_TABLE-0x80+_off_dmr,%pc),%d0
214 pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
215 mov.l 0x4(%sp),%d0
216 rtd &0x4
221 mov.l (_060FPSP_TABLE-0x80+_off_dmw,%pc),%d0
222 pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
223 mov.l 0x4(%sp),%d0
224 rtd &0x4
229 mov.l (_060FPSP_TABLE-0x80+_off_irw,%pc),%d0
230 pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
231 mov.l 0x4(%sp),%d0
232 rtd &0x4
237 mov.l (_060FPSP_TABLE-0x80+_off_irl,%pc),%d0
238 pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
239 mov.l 0x4(%sp),%d0
240 rtd &0x4
245 mov.l (_060FPSP_TABLE-0x80+_off_drb,%pc),%d0
246 pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
247 mov.l 0x4(%sp),%d0
248 rtd &0x4
253 mov.l (_060FPSP_TABLE-0x80+_off_drw,%pc),%d0
254 pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
255 mov.l 0x4(%sp),%d0
256 rtd &0x4
261 mov.l (_060FPSP_TABLE-0x80+_off_drl,%pc),%d0
262 pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
263 mov.l 0x4(%sp),%d0
264 rtd &0x4
269 mov.l (_060FPSP_TABLE-0x80+_off_dwb,%pc),%d0
270 pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
271 mov.l 0x4(%sp),%d0
272 rtd &0x4
277 mov.l (_060FPSP_TABLE-0x80+_off_dww,%pc),%d0
278 pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
279 mov.l 0x4(%sp),%d0
280 rtd &0x4
285 mov.l (_060FPSP_TABLE-0x80+_off_dwl,%pc),%d0
286 pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
287 mov.l 0x4(%sp),%d0
288 rtd &0x4
298 set EXC_SR, 0x4 # stack status register
299 set EXC_PC, 0x6 # stack pc
300 set EXC_VOFF, 0xa # stacked vector offset
301 set EXC_EA, 0xc # stacked <ea>
303 set EXC_FP, 0x0 # frame pointer
317 set EXC_A0, EXC_AREGS+(0*4)
325 set EXC_D0, EXC_DREGS+(0*4)
327 set EXC_FP0, EXC_FPREGS+(0*12) # offset of saved fp0
332 set FP_SCR1_EX, FP_SCR1+0
337 set FP_SCR0, LV+68 # fp scratch 0
338 set FP_SCR0_EX, FP_SCR0+0
344 set FP_DST_EX, FP_DST+0
350 set FP_SRC_EX, FP_SRC+0
358 set FPSR_CC, USER_FPSR+0 # FPSR condition codes
385 set EXC_OPWORD, LV+0 # saved operation word
391 set FTEMP, 0 # offsets within an
392 set FTEMP_EX, 0 # extended precision
398 set LOCAL, 0 # offsets within an
399 set LOCAL_EX, 0 # extended precision
405 set DST, 0 # offsets within an
406 set DST_EX, 0 # extended precision
410 set SRC, 0 # offsets within an
411 set SRC_EX, 0 # extended precision
415 set SGL_LO, 0x3f81 # min sgl prec exponent
416 set SGL_HI, 0x407e # max sgl prec exponent
417 set DBL_LO, 0x3c01 # min dbl prec exponent
418 set DBL_HI, 0x43fe # max dbl prec exponent
419 set EXT_LO, 0x0 # min ext prec exponent
420 set EXT_HI, 0x7ffe # max ext prec exponent
422 set EXT_BIAS, 0x3fff # extended precision bias
423 set SGL_BIAS, 0x007f # single precision bias
424 set DBL_BIAS, 0x03ff # double precision bias
426 set NORM, 0x00 # operand type for STAG/DTAG
427 set ZERO, 0x01 # operand type for STAG/DTAG
428 set INF, 0x02 # operand type for STAG/DTAG
429 set QNAN, 0x03 # operand type for STAG/DTAG
430 set DENORM, 0x04 # operand type for STAG/DTAG
431 set SNAN, 0x05 # operand type for STAG/DTAG
432 set UNNORM, 0x06 # operand type for STAG/DTAG
437 set neg_bit, 0x3 # negative result
438 set z_bit, 0x2 # zero result
439 set inf_bit, 0x1 # infinite result
440 set nan_bit, 0x0 # NAN result
442 set q_sn_bit, 0x7 # sign bit of quotient byte
451 set inex1_bit, 0 # inexact result 1
462 set neg_mask, 0x08000000 # negative bit mask (lw)
463 set inf_mask, 0x02000000 # infinity bit mask (lw)
464 set z_mask, 0x04000000 # zero bit mask (lw)
465 set nan_mask, 0x01000000 # nan bit mask (lw)
467 set neg_bmask, 0x08 # negative bit mask (byte)
468 set inf_bmask, 0x02 # infinity bit mask (byte)
469 set z_bmask, 0x04 # zero bit mask (byte)
470 set nan_bmask, 0x01 # nan bit mask (byte)
472 set bsun_mask, 0x00008000 # bsun exception mask
473 set snan_mask, 0x00004000 # snan exception mask
474 set operr_mask, 0x00002000 # operr exception mask
475 set ovfl_mask, 0x00001000 # overflow exception mask
476 set unfl_mask, 0x00000800 # underflow exception mask
477 set dz_mask, 0x00000400 # dz exception mask
478 set inex2_mask, 0x00000200 # inex2 exception mask
479 set inex1_mask, 0x00000100 # inex1 exception mask
481 set aiop_mask, 0x00000080 # accrued illegal operation
482 set aovfl_mask, 0x00000040 # accrued overflow
483 set aunfl_mask, 0x00000020 # accrued underflow
484 set adz_mask, 0x00000010 # accrued divide by zero
485 set ainex_mask, 0x00000008 # accrued inexact
492 set nzi_mask, 0x01ffffff #clears N, Z, and I
513 set sign_bit, 0x7 # sign bit
514 set signan_bit, 0x6 # signalling nan bit
516 set sgl_thresh, 0x3f81 # minimum sgl exponent
517 set dbl_thresh, 0x3c01 # minimum dbl exponent
519 set x_mode, 0x0 # extended precision
520 set s_mode, 0x4 # single precision
521 set d_mode, 0x8 # double precision
523 set rn_mode, 0x0 # round-to-nearest
524 set rz_mode, 0x1 # round-to-zero
525 set rm_mode, 0x2 # round-tp-minus-infinity
526 set rp_mode, 0x3 # round-to-plus-infinity
534 set BSUN_VEC, 0xc0 # bsun vector offset
535 set INEX_VEC, 0xc4 # inexact vector offset
536 set DZ_VEC, 0xc8 # dz vector offset
537 set UNFL_VEC, 0xcc # unfl vector offset
538 set OPERR_VEC, 0xd0 # operr vector offset
539 set OVFL_VEC, 0xd4 # ovfl vector offset
540 set SNAN_VEC, 0xd8 # snan vector offset
545 set ftrapcc_flg, 0x01 # flag bit: ftrapcc exception
546 set fbsun_flg, 0x02 # flag bit: bsun exception
547 set mia7_flg, 0x04 # flag bit: (a7)+ <ea>
548 set mda7_flg, 0x08 # flag bit: -(a7) <ea>
549 set fmovm_flg, 0x40 # flag bit: fmovm instruction
550 set immed_flg, 0x80 # flag bit: &<data> <ea>
552 set ftrapcc_bit, 0x0
553 set fbsun_bit, 0x1
554 set mia7_bit, 0x2
555 set mda7_bit, 0x3
556 set immed_bit, 0x7
561 set FMUL_OP, 0x0 # fmul instr performed last
562 set FDIV_OP, 0x1 # fdiv performed last
563 set FADD_OP, 0x2 # fadd performed last
564 set FMOV_OP, 0x3 # fmov performed last
569 T1: long 0x40C62D38,0xD3D64634 # 16381 LOG2 LEAD
570 T2: long 0x3D6F90AE,0xB1E75CC7 # 16381 LOG2 TRAIL
572 PI: long 0x40000000,0xC90FDAA2,0x2168C235,0x00000000
573 PIBY2: long 0x3FFF0000,0xC90FDAA2,0x2168C235,0x00000000
576 long 0x3FE45F30,0x6DC9C883
589 # store_fpreg() - store opclass 0 or 2 result to FP regfile #
593 # tbl_unsupp - add of table of emulation routines for opclass 0,2 #
606 # - The fsave frame contains the adjusted src op for opclass 0,2 #
645 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
647 fmovm.x &0xc0,EXC_FPREGS(%a6) # save fp0-fp1 on stack
652 addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
658 btst &0x5,EXC_CMDREG(%a6) # is instr an fmove out?
674 btst &0x5,1+EXC_CMDREG(%a6) # is operation monadic or dyadic?
701 andi.w &0x007f,%d1 # extract extension
703 andi.l &0x00ff01ff,USER_FPSR(%a6) # zero all but accured field
705 fmov.l &0x0,%fpcr # zero current control regs
706 fmov.l &0x0,%fpsr
731 fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
733 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
742 fmovm.x &0x40,FP_SRC(%a6) # save EXOP (fp1) to stack
744 mov.w &0xe005,2+FP_SRC(%a6) # save exc status
746 fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
748 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
760 fmovm.x &0x40,FP_SRC(%a6) # save EXOP (fp1) to stack
762 mov.b &0xc4,1+EXC_VOFF(%a6) # vector offset = 0xc4
763 mov.w &0xe001,2+FP_SRC(%a6) # save exc status
765 fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
767 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
789 and.l &0xffff00ff,USER_FPSR(%a6) # zero all but accured field
791 fmov.l &0x0,%fpcr # zero current control regs
792 fmov.l &0x0,%fpsr
804 fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
806 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
811 btst &0x7,(%sp) # is trace on?
814 fmov.l %fpiar,0x8(%sp) # "Current PC" is in FPIAR
815 mov.w &0x2024,0x6(%sp) # stk fmt = 0x2; voff = 0x024
829 # store_fpreg() - store opclass 0 or 2 result to FP regfile #
833 # tbl_unsupp - add of table of emulation routines for opclass 0,2 #
846 # - The fsave frame contains the adjusted src op for opclass 0,2 #
885 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
887 fmovm.x &0xc0,EXC_FPREGS(%a6) # save fp0-fp1 on stack
892 addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
898 btst &0x5,EXC_CMDREG(%a6) # is instr an fmove out?
912 btst &0x5,1+EXC_CMDREG(%a6) # is op monadic or dyadic?
917 btst &0x4,1+EXC_CMDREG(%a6) # is op an fsincos?
944 andi.w &0x007f,%d1 # extract extension
946 andi.l &0x00ff01ff,USER_FPSR(%a6)
948 fmov.l &0x0,%fpcr # zero current control regs
949 fmov.l &0x0,%fpsr
963 # (0x00000000_80000000_00000000), then the machine will take an
981 fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
983 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
996 # (0x00000000_80000000_00000000), then the machine will take an
1004 fmovm.x &0x40,FP_SRC(%a6) # save EXOP (fp1) to stack
1006 mov.w &0xe003,2+FP_SRC(%a6) # save exc status
1008 fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
1010 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
1024 # (0x00000000_80000000_00000000), then the machine will take an
1034 fmovm.x &0x40,FP_SRC(%a6) # save EXOP to stack
1036 mov.b &0xc4,1+EXC_VOFF(%a6) # vector offset = 0xc4
1037 mov.w &0xe001,2+FP_SRC(%a6) # save exc status
1039 fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
1041 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
1063 and.l &0xffff00ff,USER_FPSR(%a6) # zero all but accured field
1065 fmov.l &0x0,%fpcr # zero current control regs
1066 fmov.l &0x0,%fpsr
1078 fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
1080 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
1085 btst &0x7,(%sp) # is trace on?
1088 fmov.l %fpiar,0x8(%sp) # "Current PC" is in FPIAR
1089 mov.w &0x2024,0x6(%sp) # stk fmt = 0x2; voff = 0x024
1104 # store_fpreg() - store opclass 0 or 2 result to FP regfile #
1109 # tbl_unsupp - add of table of emulation routines for opclass 0,2 #
1142 # unimplemented data types. These can be either opclass 0,2 or 3 #
1144 # also of opclasses 0,2, or 3. #
1145 # For UNNORM/DENORM opclass 0 and 2, the handler fetches the src #
1155 # PACKED opclass 0 and 2 is similar in how the instruction is #
1182 # * 0x0 * 0x0dc * * 0x3 * 0x0dc *
1195 # * 0x2 * 0x0dc *
1210 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
1212 fmovm.x &0xc0,EXC_FPREGS(%a6) # save fp0-fp1 on stack
1214 btst &0x5,EXC_SR(%a6) # user or supervisor mode?
1224 lea 0x4+EXC_EA(%a6),%a0 # load old a7'
1234 addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
1244 btst &0x5,EXC_CMDREG(%a6) # is it an fmove out?
1248 bfextu EXC_CMDREG(%a6){&0:&6},%d0
1249 cmpi.b %d0,&0x13
1255 andi.l &0x00ff00ff,USER_FPSR(%a6) # zero exception field
1257 fmov.l &0x0,%fpcr # zero current control regs
1258 fmov.l &0x0,%fpsr
1282 btst &0x5,1+EXC_CMDREG(%a6) # is operation monadic or dyadic?
1284 cmpi.b 1+EXC_CMDREG(%a6),&0x3a # is operation an ftst?
1329 andi.b &0x38,%d0 # extract bits 3-5
1330 cmpi.b %d0,&0x38 # is instr fcmp or ftst?
1338 fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
1340 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
1385 subi.l &24,%d0 # fix offset to be 0-8
1386 cmpi.b %d0,&0x6 # is exception INEX? (6)
1405 fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
1407 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
1416 short 0xe000,0xe006,0xe004,0xe005
1417 short 0xe003,0xe002,0xe001,0xe001
1420 mov.w &0x4,%d0
1423 mov.w &0x03,%d0
1432 bfextu EXC_CMDREG(%a6){&0:&6},%d0 # extract opclass,src fmt
1433 cmpi.b %d0,&0x11 # is class = 2 & fmt = sgl?
1435 cmpi.b %d0,&0x15 # is class = 2 & fmt = dbl?
1441 andi.w &0x7fff,%d0 # strip sign
1442 cmpi.w %d0,&0x3f80 # is |exp| == $3f80?
1444 cmpi.w %d0,&0x407f # no; is |exp| == $407f?
1449 andi.l &0x7fffffff,LOCAL_HI(%a0) # clear j-bit
1455 addi.w &0x3f81,%d0 # adjust new exponent
1456 andi.w &0x8000,LOCAL_EX(%a0) # clear old exponent
1461 andi.w &0x8000,LOCAL_EX(%a0) # clear bogus exponent
1465 andi.b &0x7f,LOCAL_HI(%a0) # clear j-bit
1466 ori.w &0x7fff,LOCAL_EX(%a0) # make exponent = $7fff
1471 andi.w &0x7fff,%d0 # strip sign
1472 cmpi.w %d0,&0x3c00 # is |exp| == $3c00?
1474 cmpi.w %d0,&0x43ff # no; is |exp| == $43ff?
1479 andi.l &0x7fffffff,LOCAL_HI(%a0) # clear j-bit
1487 addi.w &0x3c01,%d0 # adjust new exponent
1488 andi.w &0x8000,LOCAL_EX(%a0) # clear old exponent
1501 cmpi.b %d0,&0x3
1503 cmpi.b %d0,&0x7
1510 and.l &0xffff00ff,USER_FPSR(%a6) # zero exception field
1512 fmov.l &0x0,%fpcr # zero current control regs
1513 fmov.l &0x0,%fpsr
1518 andi.w &0x7fff,%d0 # strip sign
1563 btst &0x5,EXC_SR(%a6)
1570 fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
1572 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
1576 btst &0x7,(%sp) # is trace on?
1592 fmovm.x &0x80,FP_SRC(%a6) # put answer on stack
1594 fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
1596 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
1600 mov.l LOCAL_SIZE+EXC_SR(%sp),LOCAL_SIZE+EXC_SR-0xc(%sp)
1601 mov.l LOCAL_SIZE+2+EXC_PC(%sp),LOCAL_SIZE+2+EXC_PC-0xc(%sp)
1604 mov.l LOCAL_SIZE+FP_SRC_EX(%sp),LOCAL_SIZE+EXC_SR+0x0(%sp)
1605 mov.l LOCAL_SIZE+FP_SRC_HI(%sp),LOCAL_SIZE+EXC_SR+0x4(%sp)
1606 mov.l LOCAL_SIZE+FP_SRC_LO(%sp),LOCAL_SIZE+EXC_SR+0x8(%sp)
1608 add.l &LOCAL_SIZE-0x8,%sp
1610 btst &0x7,(%sp)
1641 # * 0x3 * 0x0dc * * 0x2 * 0x024 *
1650 mov.w &0x2024,0x6(%sp)
1651 fmov.l %fpiar,0x8(%sp)
1656 subi.l &24,%d0 # fix offset to be 0-8
1663 swbeg &0x8
1677 fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
1679 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
1681 mov.w &0x30d8,EXC_VOFF(%a6) # vector offset = 0xd8
1682 mov.w &0xe006,2+FP_SRC(%a6)
1692 fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
1694 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
1696 mov.w &0x30d0,EXC_VOFF(%a6) # vector offset = 0xd0
1697 mov.w &0xe004,2+FP_SRC(%a6)
1707 fmovm.x &0x40,FP_SRC(%a6) # save EXOP to the stack
1709 fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
1711 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
1713 mov.w &0x30d4,EXC_VOFF(%a6) # vector offset = 0xd4
1714 mov.w &0xe005,2+FP_SRC(%a6)
1729 btst &0x5,EXC_SR(%a6)
1736 fmovm.x &0x40,FP_SRC(%a6) # save EXOP to the stack
1738 fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
1740 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
1742 mov.w &0x30cc,EXC_VOFF(%a6) # vector offset = 0xcc
1743 mov.w &0xe003,2+FP_SRC(%a6)
1759 fmovm.x &0x80,FP_SRC(%a6) # put answer on stack
1760 fmovm.x &0x40,FP_DST(%a6) # put EXOP on stack
1762 fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
1764 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
1766 mov.w &0x30cc,EXC_VOFF(%a6) # vector offset = 0xcc
1767 mov.w &0xe003,2+FP_DST(%a6)
1773 mov.l LOCAL_SIZE+EXC_SR(%sp),LOCAL_SIZE+EXC_SR-0xc(%sp)
1774 mov.l LOCAL_SIZE+2+EXC_PC(%sp),LOCAL_SIZE+2+EXC_PC-0xc(%sp)
1775 mov.l LOCAL_SIZE+EXC_EA(%sp),LOCAL_SIZE+EXC_EA-0xc(%sp)
1778 mov.l LOCAL_SIZE+FP_SRC_EX(%sp),LOCAL_SIZE+EXC_SR+0x0(%sp)
1779 mov.l LOCAL_SIZE+FP_SRC_HI(%sp),LOCAL_SIZE+EXC_SR+0x4(%sp)
1780 mov.l LOCAL_SIZE+FP_SRC_LO(%sp),LOCAL_SIZE+EXC_SR+0x8(%sp)
1782 add.l &LOCAL_SIZE-0x8,%sp
1788 fmovm.x &0x40,FP_SRC(%a6) # save EXOP to the stack
1790 fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
1792 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
1794 mov.w &0x30c4,EXC_VOFF(%a6) # vector offset = 0xc4
1795 mov.w &0xe001,2+FP_SRC(%a6)
1811 andi.l &0x0ff00ff,USER_FPSR(%a6) # zero exception field
1813 fmov.l &0x0,%fpcr # zero current control regs
1814 fmov.l &0x0,%fpsr
1827 btst &0x5,1+EXC_CMDREG(%a6) # is operation monadic or dyadic?
1829 cmpi.b 1+EXC_CMDREG(%a6),&0x3a # is operation an ftst?
1874 andi.b &0x38,%d0 # extract bits 3-5
1875 cmpi.b %d0,&0x38 # is instr fcmp or ftst?
1883 btst &0x5,EXC_SR(%a6) # user or supervisor?
1890 fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
1892 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
1896 btst &0x7,(%sp) # is trace on?
1908 fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
1910 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
1915 mov.l 0x4(%sp),0x10(%sp)
1916 mov.l 0x0(%sp),0xc(%sp)
1917 add.l &0xc,%sp
1919 btst &0x7,(%sp) # is trace on?
1963 subi.l &24,%d0 # fix offset to be 0-8
1964 cmpi.b %d0,&0x6 # is exception INEX? (6 or 7)
1980 btst &0x5,EXC_SR(%a6) # user or supervisor?
1989 fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
1991 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
1997 btst &0x7,(%sp) # is trace enabled?
2003 short 0xe000,0xe006,0xe004,0xe005
2004 short 0xe003,0xe002,0xe001,0xe001
2007 mov.w &0x3,%d0
2011 mov.w &0x4,%d0
2020 fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
2022 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
2029 mov.l 0x4(%sp),0x10(%sp)
2030 mov.l 0x0(%sp),0xc(%sp)
2031 add.l &0xc,%sp
2033 btst &0x7,(%sp) # is trace on?
2048 # * 0x2 * 0x0dc * * 0x2 * 0x024 *
2056 mov.w &0x2024,0x6(%sp)
2057 fmov.l %fpiar,0x8(%sp)
2069 and.l &0xffff00ff,USER_FPSR(%a6) # zero exception field
2071 fmov.l &0x0,%fpcr # zero current control regs
2072 fmov.l &0x0,%fpsr
2114 btst &0x5,EXC_SR(%a6) # user or supervisor?
2121 fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
2123 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
2127 btst &0x7,(%sp) # is trace on?
2139 fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
2141 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
2145 mov.l LOCAL_SIZE+EXC_SR(%sp),LOCAL_SIZE+EXC_SR-0xc(%sp)
2146 mov.l LOCAL_SIZE+2+EXC_PC(%sp),LOCAL_SIZE+2+EXC_PC-0xc(%sp)
2149 mov.l LOCAL_SIZE+FP_DST_EX(%sp),LOCAL_SIZE+EXC_SR+0x0(%sp)
2150 mov.l LOCAL_SIZE+FP_DST_HI(%sp),LOCAL_SIZE+EXC_SR+0x4(%sp)
2151 mov.l LOCAL_SIZE+FP_DST_LO(%sp),LOCAL_SIZE+EXC_SR+0x8(%sp)
2153 add.l &LOCAL_SIZE-0x8,%sp
2155 btst &0x7,(%sp)
2170 cmpi.b %d0,&0x1a
2175 btst &0x5,EXC_SR(%a6)
2189 fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
2191 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
2193 mov.w &0x30d8,EXC_VOFF(%a6) # vector offset = 0xd0
2194 mov.w &0xe006,2+FP_SRC(%a6) # set fsave status
2200 mov.l LOCAL_SIZE+EXC_SR(%sp),LOCAL_SIZE+EXC_SR-0xc(%sp)
2201 mov.l LOCAL_SIZE+2+EXC_PC(%sp),LOCAL_SIZE+2+EXC_PC-0xc(%sp)
2202 mov.l LOCAL_SIZE+EXC_EA(%sp),LOCAL_SIZE+EXC_EA-0xc(%sp)
2205 mov.l LOCAL_SIZE+FP_DST_EX(%sp),LOCAL_SIZE+0x4(%sp)
2206 mov.l LOCAL_SIZE+FP_DST_HI(%sp),LOCAL_SIZE+0x8(%sp)
2207 mov.l LOCAL_SIZE+FP_DST_LO(%sp),LOCAL_SIZE+0xc(%sp)
2209 add.l &LOCAL_SIZE-0x8,%sp
2215 btst &0x5,EXC_SR(%a6)
2229 fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
2231 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
2233 mov.w &0x30d0,EXC_VOFF(%a6) # vector offset = 0xd0
2234 mov.w &0xe004,2+FP_SRC(%a6) # set fsave status
2240 mov.l LOCAL_SIZE+EXC_SR(%sp),LOCAL_SIZE+EXC_SR-0xc(%sp)
2241 mov.l LOCAL_SIZE+2+EXC_PC(%sp),LOCAL_SIZE+2+EXC_PC-0xc(%sp)
2242 mov.l LOCAL_SIZE+EXC_EA(%sp),LOCAL_SIZE+EXC_EA-0xc(%sp)
2245 mov.l LOCAL_SIZE+FP_DST_EX(%sp),LOCAL_SIZE+0x4(%sp)
2246 mov.l LOCAL_SIZE+FP_DST_HI(%sp),LOCAL_SIZE+0x8(%sp)
2247 mov.l LOCAL_SIZE+FP_DST_LO(%sp),LOCAL_SIZE+0xc(%sp)
2249 add.l &LOCAL_SIZE-0x8,%sp
2255 btst &0x5,EXC_SR(%a6)
2269 fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
2271 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
2273 mov.w &0x30c4,EXC_VOFF(%a6) # vector offset = 0xc4
2274 mov.w &0xe001,2+FP_SRC(%a6) # set fsave status
2280 mov.l LOCAL_SIZE+EXC_SR(%sp),LOCAL_SIZE+EXC_SR-0xc(%sp)
2281 mov.l LOCAL_SIZE+2+EXC_PC(%sp),LOCAL_SIZE+2+EXC_PC-0xc(%sp)
2282 mov.l LOCAL_SIZE+EXC_EA(%sp),LOCAL_SIZE+EXC_EA-0xc(%sp)
2285 mov.l LOCAL_SIZE+FP_DST_EX(%sp),LOCAL_SIZE+0x4(%sp)
2286 mov.l LOCAL_SIZE+FP_DST_HI(%sp),LOCAL_SIZE+0x8(%sp)
2287 mov.l LOCAL_SIZE+FP_DST_LO(%sp),LOCAL_SIZE+0xc(%sp)
2289 add.l &LOCAL_SIZE-0x8,%sp
2304 cmpi.b %d0,&0x1 # was src sgl?
2306 cmpi.b %d0,&0x5 # was src dbl?
2312 andi.w &0x7fff,%d0 # strip sign
2314 cmpi.w %d0,&0x3f80
2317 addi.w &0x3f81,%d0 # find amt to shift
2322 andi.w &0x8000,FP_SRC_EX(%a6) # clear old exponent
2323 ori.w &0x3f80,FP_SRC_EX(%a6) # insert new "skewed" exponent
2329 andi.w &0x7fff,%d0 # strip sign
2331 cmpi.w %d0,&0x3c00
2335 smi.b 0x2+FP_SRC(%a6)
2339 mov.w &0x3c01,%d1 # pass denorm threshold
2341 mov.w &0x3c00,%d0 # new exponent
2342 tst.b 0x2+FP_SRC(%a6) # is sign set?
2346 bset &0x7,FP_SRC_HI(%a6) # set j-bit
2354 btst &0x5,EXC_SR(%a6)
2356 mov.l 0x0(%a0),FP_DST_EX(%a6)
2357 mov.l 0x4(%a0),FP_DST_HI(%a6)
2358 mov.l 0x8(%a0),FP_DST_LO(%a6)
2375 # store_fpreg() - store opclass 0 or 2 result to FP regfile #
2378 # tbl_unsupp - add of table of emulation routines for opclass 0,2 #
2460 btst &0x1,%d0 # is FPU disabled?
2466 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
2468 fmovm.x &0xc0,EXC_FPREGS(%a6) # save fp0-fp1 on stack
2474 addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
2503 andi.l &0x00ff00ff,USER_FPSR(%a6)
2505 btst &0xa,%d0 # is src fmt x or p?
2511 mov.l &0xc,%d0 # pass: 12 bytes
2523 mov.l &0xc,%d0 # pass: 12 bytes
2531 cmpi.w %d0,&0x7fff # INF or NAN?
2537 andi.b &0x0f,%d0 # clear all but last nybble
2546 fmovm.x &0x80,FP_SRC(%a6) # make this the srcop
2549 addi.l &0xc,EXC_EXTWPTR(%a6) # update extension word pointer
2562 btst &0x5,1+EXC_CMDREG(%a6) # is operation monadic or dyadic?
2564 btst &0x4,1+EXC_CMDREG(%a6) # is operation fsincos,ftst,fcmp?
2582 btst &0x3,1+EXC_CMDREG(%a6) # is operation fsincos?
2587 btst &0x1,1+EXC_CMDREG(%a6) # is operation fcmp?
2595 andi.w &0x007f,%d1 # extract extension
2597 fmov.l &0x0,%fpcr
2598 fmov.l &0x0,%fpsr
2637 fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
2639 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
2643 btst &0x7,(%sp) # is trace on?
2666 subi.l &24,%d0 # fix offset to be 0-8
2667 cmpi.b %d0,&0x6 # is exception INEX?
2683 short 0xe002, 0xe006, 0xe004, 0xe005
2684 short 0xe003, 0xe002, 0xe001, 0xe001
2687 mov.w &0xe005,2+FP_SRC(%a6)
2691 mov.w &0xe003,2+FP_SRC(%a6)
2697 fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
2699 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
2705 btst &0x7,(%sp) # is trace on?
2717 # * 0x0 * 0x0f0 * * Current *
2720 # * PC * * 0x2 * 0x024 *
2729 mov.w 0x8(%sp),0x4(%sp)
2730 mov.w &0x2024,0x6(%sp) # stk fmt = 0x2; voff = 0x024
2731 fmov.l %fpiar,0x8(%sp) # "Current PC" is in FPIAR
2742 btst &0x5,EXC_SR(%a6) # user or supervisor mode
2755 lea 0x2+EXC_VOFF(%a6),%a0
2769 btst &0x7,EXC_SR(%a6)
2774 mov.w &0x00f0,(EXC_VOFF,%a6,%d0)
2779 fmovm.x EXC_FP0(%a6),&0xc0 # restore fp0-fp1
2781 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
2788 mov.w EXC_SR(%a6),(EXC_SR-0x4,%a6,%d0)
2789 mov.l EXC_EXTWPTR(%a6),(EXC_PC-0x4,%a6,%d0)
2790 mov.w &0x2024,(EXC_VOFF-0x4,%a6,%d0)
2791 mov.l EXC_PC(%a6),(EXC_VOFF+0x2-0x4,%a6,%d0)
2793 lea (EXC_SR-0x4,%a6,%d0),%a0
2796 fmovm.x EXC_FP0(%a6),&0xc0 # restore fp0-fp1
2798 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
2807 mov.b %d0,0x1+EXC_VOFF(%a6) # store size
2809 fmovm.x EXC_FP0(%a6),&0xc0 # restore fp0-fp1
2811 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
2819 mov.b 0x1+EXC_VOFF(%a6),%d0 # fetch size
2822 btst &0x7,EXC_SR(%a6) # is trace enabled?
2825 mov.w EXC_SR(%a6),(EXC_SR-0x4,%a6,%d0)
2826 mov.l EXC_PC(%a6),(EXC_VOFF-0x2,%a6,%d0)
2827 mov.l (%sp)+,(EXC_PC-0x4,%a6,%d0)
2828 mov.w &0x2024,(EXC_VOFF-0x4,%a6,%d0)
2836 mov.w &0x00f0,(EXC_VOFF,%a6,%d0)
2838 pea (0x4,%a6,%d0) # create final sp
2846 fmovm.x &0x80,(0x4+0x8,%a6,%d0)
2847 addi.l &0xc,%d0
2849 lsl.b &0x1,%d1
2851 fmovm.x &0x40,(0x4+0x8,%a6,%d0)
2852 addi.l &0xc,%d0
2854 lsl.b &0x1,%d1
2856 fmovm.x &0x20,(0x4+0x8,%a6,%d0)
2857 addi.l &0xc,%d0
2859 lsl.b &0x1,%d1
2861 fmovm.x &0x10,(0x4+0x8,%a6,%d0)
2862 addi.l &0xc,%d0
2864 lsl.b &0x1,%d1
2866 fmovm.x &0x08,(0x4+0x8,%a6,%d0)
2867 addi.l &0xc,%d0
2869 lsl.b &0x1,%d1
2871 fmovm.x &0x04,(0x4+0x8,%a6,%d0)
2872 addi.l &0xc,%d0
2874 lsl.b &0x1,%d1
2876 fmovm.x &0x02,(0x4+0x8,%a6,%d0)
2877 addi.l &0xc,%d0
2879 lsl.b &0x1,%d1
2881 fmovm.x &0x01,(0x4+0x8,%a6,%d0)
2883 mov.l 0x4(%sp),%d1
2884 mov.l 0x8(%sp),%d0
2885 mov.l 0xc(%sp),%a6
2888 btst &0x7,(%sp) # is trace enabled?
2898 fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
2900 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
2902 btst &0x7,EXC_SR(%a6) # is trace on?
2919 # * 0x0 * 0x0f0 * * Current *
2922 # * PC * * 0x2 * 0x024 *
2935 mov.w EXC_SR+LOCAL_SIZE(%sp),0x0+LOCAL_SIZE(%sp)
2936 mov.l EXC_PC+LOCAL_SIZE(%sp),0x8+LOCAL_SIZE(%sp)
2937 mov.l EXC_EXTWPTR+LOCAL_SIZE(%sp),0x2+LOCAL_SIZE(%sp)
2938 mov.w &0x2024,0x6+LOCAL_SIZE(%sp) # stk fmt = 0x2; voff = 0x024
2955 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
2960 addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
2969 mov.l &0x10,%d0 # 16 bytes of instruction
2972 btst &0xe,%d0 # is instr fmovm ctrl
2976 mov.l &0xc,%d0
2977 cmpi.b %d1,&0x7 # move all regs?
2979 addq.l &0x4,%d0
2993 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
3000 subq.l &0x8,%sp # make room for new stack
3002 mov.w 0xc(%sp),0x4(%sp) # move SR
3003 mov.l 0xe(%sp),0x6(%sp) # move Current PC
3005 mov.w 0x12(%sp),%d0
3006 mov.l 0x6(%sp),0x10(%sp) # move Current PC
3007 add.l %d0,0x6(%sp) # make Next PC
3008 mov.w &0x402c,0xa(%sp) # insert offset,frame format
3017 btst &0x1,%d0
3020 fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1 on stack
3022 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
3026 subq.w &0x8,%sp # make stack frame bigger
3027 mov.l 0x8(%sp),(%sp) # store SR,hi(PC)
3028 mov.w 0xc(%sp),0x4(%sp) # store lo(PC)
3029 mov.w &0x4008,0x6(%sp) # store voff
3030 mov.l 0x2(%sp),0x8(%sp) # store ea
3031 mov.l &0x09428001,0xc(%sp) # store fslw
3034 btst &0x5,(%sp) # user or supervisor mode?
3036 bset &0x2,0xd(%sp) # set supervisor TM bit
3045 btst &0x1,%d1
3047 fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1 on stack
3052 mov.l 0x4+LOCAL_SIZE(%sp),-0x8+0x4+LOCAL_SIZE(%sp)
3053 mov.w 0x8+LOCAL_SIZE(%sp),-0x8+0x8+LOCAL_SIZE(%sp)
3054 mov.w &0x4008,-0x8+0xa+LOCAL_SIZE(%sp)
3055 mov.l %a0,-0x8+0xc+LOCAL_SIZE(%sp)
3056 mov.w %d0,-0x8+0x10+LOCAL_SIZE(%sp)
3057 mov.w &0x0001,-0x8+0x12+LOCAL_SIZE(%sp)
3059 movm.l LOCAL_SIZE+EXC_DREGS(%sp),&0x0303 # restore d0-d1/a0-a1
3060 add.w &LOCAL_SIZE-0x4,%sp
3086 # - The fsave frame contains the adjusted src op for opclass 0,2 #
3113 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
3115 fmovm.x &0xc0,EXC_FPREGS(%a6) # save fp0-fp1 on stack
3121 addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
3139 fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
3141 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
3168 andi.w &0x7fff,%d1
3169 cmpi.w %d1,&0x7fff
3175 andi.l &0x7fffffff,%d1
3182 mov.l &0x7fffffff,%d1
3185 addq.l &0x1,%d1
3207 cmpi.b %d1,&0x7 # is <ea> mode a data reg?
3217 andi.w &0x0007,%d1
3223 cmpi.b %d1,&0x7 # is <ea> mode a data reg?
3233 andi.w &0x0007,%d1
3239 cmpi.b %d1,&0x7 # is <ea> mode a data reg?
3249 andi.w &0x0007,%d1
3276 # - The fsave frame contains the adjusted src op for opclass 0,2 #
3309 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
3311 fmovm.x &0xc0,EXC_FPREGS(%a6) # save fp0-fp1 on stack
3317 addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
3335 fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
3337 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
3378 cmpi.b %d1,&0x7 # is <ea> mode a data reg?
3388 andi.w &0x0007,%d1
3395 cmpi.b %d1,&0x7 # is <ea> mode a data reg?
3405 andi.w &0x0007,%d1
3412 cmpi.b %d1,&0x7 # is <ea> mode a data reg?
3422 andi.w &0x0007,%d1
3427 cmpi.b %d1,&0x7 # is <ea> mode a data reg?
3430 andi.l &0x80000000,%d0 # keep sign
3431 ori.l &0x7fc00000,%d0 # insert new exponent,SNAN bit
3433 lsr.l &0x8,%d1 # shift mantissa for sgl
3444 andi.l &0x80000000,%d0 # keep sign
3445 ori.l &0x7fc00000,%d0 # insert new exponent,SNAN bit
3448 lsr.l &0x8,%d1 # shift mantissa for sgl
3451 andi.w &0x0007,%d1
3457 andi.l &0x80000000,%d0 # keep sign
3458 ori.l &0x7ff80000,%d0 # insert new exponent,SNAN bit
3465 andi.l &0x000007ff,%d1
3473 movq.l &0x8,%d0 # pass: size of 8 bytes
3494 btst &0x5,EXC_SR(%a6) # supervisor mode exception?
3511 movq.l &0xc,%d0 # pass: size of extended
3532 fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
3534 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
3540 mov.l LOCAL_SIZE+EXC_SR(%sp),LOCAL_SIZE+EXC_SR-0xc(%sp)
3541 mov.l LOCAL_SIZE+EXC_PC+0x2(%sp),LOCAL_SIZE+EXC_PC+0x2-0xc(%sp)
3542 mov.l LOCAL_SIZE+EXC_EA(%sp),LOCAL_SIZE+EXC_EA-0xc(%sp)
3545 mov.l LOCAL_SIZE+FP_SCR0_HI(%sp),LOCAL_SIZE+EXC_PC+0x2(%sp)
3548 add.l &LOCAL_SIZE-0x8,%sp
3563 # store_fpreg() - store opclass 0 or 2 result to FP regfile #
3568 # tbl_unsupp - add of table of emulation routines for opclass 0,2 #
3577 # - The fsave frame contains the adjusted src op for opclass 0,2 #
3601 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
3603 fmovm.x &0xc0,EXC_FPREGS(%a6) # save fp0-fp1 on stack
3609 addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
3621 # w/ an exponent value of 0x401e. we convert this to extended precision here.
3624 cmpi.w FP_SRC_EX(%a6),&0x401e # is exponent 0x401e?
3626 fmov.l &0x0,%fpcr
3629 mov.w &0xe001,0x2+FP_SRC(%a6)
3640 andi.l &0x00ff01ff,USER_FPSR(%a6) # zero all but accured field
3642 fmov.l &0x0,%fpcr # zero current control regs
3643 fmov.l &0x0,%fpsr
3645 bfextu EXC_EXTWORD(%a6){&0:&6},%d1 # extract upper 6 of cmdreg
3646 cmpi.b %d1,&0x17 # is op an fmovecr?
3656 btst &0x5,1+EXC_CMDREG(%a6) # is operation monadic or dyadic?
3659 btst &0x4,1+EXC_CMDREG(%a6) # is operation an fsincos?
3678 andi.w &0x007f,%d1 # extract extension
3692 fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
3694 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
3705 andi.l &0x0000007f,%d1 # pass rom offset
3730 andi.l &0xffff00ff,USER_FPSR(%a6) # zero exception field
3775 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
3777 fmovm.x &0xc0,EXC_FPREGS(%a6) # save fp0-fp1 on stack
3783 addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
3797 fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
3799 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
3848 cmpi.w 0x6(%sp),&0x202c
3853 cmpi.w 0x6(%sp),&0x402c
3863 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
3867 addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
3870 bfextu %d0{&0:&10},%d1 # is it an fmovecr?
3871 cmpi.w %d1,&0x03c8
3875 cmpi.b %d1,&0x17
3887 btst &0x1,%d0
3890 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
3894 sub.l &0x8,%sp # make room for "Next PC", <ea>
3895 mov.w 0x8(%sp),(%sp)
3896 mov.l 0xa(%sp),0x2(%sp) # move "Current PC"
3897 mov.w &0x402c,0x6(%sp)
3898 mov.l 0x2(%sp),0xc(%sp)
3899 addq.l &0x4,0x2(%sp) # set "Next PC"
3904 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
3908 fmov.l 0x2(%sp),%fpiar # set current PC
3909 addq.l &0x4,0x2(%sp) # set Next PC
3912 mov.l 0x8(%sp),0x4(%sp)
3913 mov.b &0x20,0x6(%sp)
3918 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
3935 # store_fpreg() - store opclass 0 or 2 result to FP regfile #
4003 # * 0x2 * 0x02c * => frame format and vector offset(vector #11)
4021 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
4023 fmovm.x &0xc0,EXC_FPREGS(%a6) # save fp0-fp1
4025 btst &0x5,EXC_SR(%a6) # user mode exception?
4047 addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
4053 fmov.l &0x0,%fpcr # clear FPCR
4054 fmov.l &0x0,%fpsr # clear FPSR
4062 btst &22,%d0 # type 0 or 1 ?
4066 # TYPE == 0: General instructions #
4073 andi.l &0x00ff00ff,USER_FPSR(%a6)
4076 cmpi.b %d1,&0x17 # is op an fmovecr?
4086 andi.w &0x003f,%d1 # extract extension bits
4087 lsl.w &0x3,%d1 # shift right 3 bits
4105 fmovm.x EXC_FP0(%a6),&0xc0 # restore fp0-fp1
4107 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
4120 btst &0x7,(%sp) # is trace on?
4128 fmov.l %fpiar,0x14(%sp) # "Current PC" is in FPIAR
4130 mov.w &0x2024,0x6(%sp) # stk fmt = 0x2; voff = 0x24
4134 btst &0x5,EXC_SR(%a6) # supervisor or user mode?
4151 mov.l 0x2+EXC_PC(%a6),(0x2+EXC_PC,%a6,%d0) # shift stack frame
4168 andi.l &0x0000007f,%d1 # pass rom offset in d1
4200 subi.l &24,%d0 # fix offset to be 0-8
4201 cmpi.b %d0,&0x6 # is exception INEX?
4226 short 0xe002, 0xe006, 0xe004, 0xe005
4227 short 0xe003, 0xe002, 0xe001, 0xe001
4232 mov.w &0xe005,2+FP_SRC(%a6)
4238 mov.w &0xe003,2+FP_SRC(%a6)
4243 fmovm.x EXC_FP0(%a6),&0xc0 # restore fp0-fp1
4245 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
4261 cmpi.b %d1,&0x1 # is it an fdb<cc>?
4263 cmpi.b %d1,&0x7 # is it an fs<cc>?
4266 cmpi.b %d1,&0x2 # is it an fs<cc>?
4288 # * 0x2 * 0x02c * * 0x2 * 0x01c *
4301 mov.w &0x201c,EXC_VOFF(%a6) # Vector Offset = 0x01c
4303 fmovm.x EXC_FP0(%a6),&0xc0 # restore fp0-fp1
4305 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
4316 addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
4343 btst &0x5,EXC_SR(%a6) # yes; is it a user mode exception?
4362 fmovm.x EXC_FP0(%a6),&0xc0 # restore fp0-fp1
4364 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
4368 btst &0x7,(%sp) # is trace enabled?
4371 subq.l &0x2,%sp
4372 mov.l 0x2(%sp),(%sp) # shift SR,hi(PC) "down"
4373 mov.l 0x6(%sp),0x4(%sp) # shift lo(PC),voff "down"
4377 subq.l &0x2,%sp
4378 mov.l 0x2(%sp),(%sp) # shift SR,hi(PC) "down"
4379 mov.w 0x6(%sp),0x4(%sp) # shift lo(PC)
4380 mov.w &0x2024,0x6(%sp) # fmt/voff = $2024
4381 fmov.l %fpiar,0x8(%sp) # insert "current PC"
4393 # ** <EA> ** * 0x0 * 0x0c0 *
4395 # * 0x2 * 0x02c * ** Current PC **
4404 mov.w &0x00c0,2+EXC_EA(%a6) # Fmt = 0x0; Vector Offset = 0x0c0
4408 mov.w &0xe000,2+FP_SRC(%a6) # bsun exception enabled
4410 fmovm.x EXC_FP0(%a6),&0xc0 # restore fp0-fp1
4412 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
4418 addq.l &0x4,%sp # erase sludge
4431 fmovm.x EXC_FP0(%a6),&0xc0 # restore fp0-fp1
4433 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
4437 btst &0x7,(%sp) # is trace enabled?
4446 # * 0x2 * 0x02c * * 0x2 * 0x024 *
4458 fmov.l %fpiar,0x8(%sp) # current PC is in fpiar
4459 mov.b &0x24,0x7(%sp) # vector offset = 0x024
4466 swbeg &0x1c0
4468 short tbl_trans - tbl_trans # $00-0 fmovecr all
4477 short tbl_trans - tbl_trans # $01-0 fint norm
4486 short ssinh - tbl_trans # $02-0 fsinh norm
4495 short tbl_trans - tbl_trans # $03-0 fintrz norm
4504 short tbl_trans - tbl_trans # $04-0 fsqrt norm
4513 short tbl_trans - tbl_trans # $05-0 ERROR
4522 short slognp1 - tbl_trans # $06-0 flognp1 norm
4531 short tbl_trans - tbl_trans # $07-0 ERROR
4540 short setoxm1 - tbl_trans # $08-0 fetoxm1 norm
4549 short stanh - tbl_trans # $09-0 ftanh norm
4558 short satan - tbl_trans # $0a-0 fatan norm
4559 short src_zero - tbl_trans # $0a-1 fatan zero
4560 short spi_2 - tbl_trans # $0a-2 fatan inf
4561 short src_qnan - tbl_trans # $0a-3 fatan qnan
4562 short satand - tbl_trans # $0a-5 fatan denorm
4563 short src_snan - tbl_trans # $0a-4 fatan snan
4564 short tbl_trans - tbl_trans # $0a-6 fatan unnorm
4565 short tbl_trans - tbl_trans # $0a-7 ERROR
4567 short tbl_trans - tbl_trans # $0b-0 ERROR
4568 short tbl_trans - tbl_trans # $0b-1 ERROR
4569 short tbl_trans - tbl_trans # $0b-2 ERROR
4570 short tbl_trans - tbl_trans # $0b-3 ERROR
4571 short tbl_trans - tbl_trans # $0b-4 ERROR
4572 short tbl_trans - tbl_trans # $0b-5 ERROR
4573 short tbl_trans - tbl_trans # $0b-6 ERROR
4574 short tbl_trans - tbl_trans # $0b-7 ERROR
4576 short sasin - tbl_trans # $0c-0 fasin norm
4577 short src_zero - tbl_trans # $0c-1 fasin zero
4578 short t_operr - tbl_trans # $0c-2 fasin inf
4579 short src_qnan - tbl_trans # $0c-3 fasin qnan
4580 short sasind - tbl_trans # $0c-5 fasin denorm
4581 short src_snan - tbl_trans # $0c-4 fasin snan
4582 short tbl_trans - tbl_trans # $0c-6 fasin unnorm
4583 short tbl_trans - tbl_trans # $0c-7 ERROR
4585 short satanh - tbl_trans # $0d-0 fatanh norm
4586 short src_zero - tbl_trans # $0d-1 fatanh zero
4587 short t_operr - tbl_trans # $0d-2 fatanh inf
4588 short src_qnan - tbl_trans # $0d-3 fatanh qnan
4589 short satanhd - tbl_trans # $0d-5 fatanh denorm
4590 short src_snan - tbl_trans # $0d-4 fatanh snan
4591 short tbl_trans - tbl_trans # $0d-6 fatanh unnorm
4592 short tbl_trans - tbl_trans # $0d-7 ERROR
4594 short ssin - tbl_trans # $0e-0 fsin norm
4595 short src_zero - tbl_trans # $0e-1 fsin zero
4596 short t_operr - tbl_trans # $0e-2 fsin inf
4597 short src_qnan - tbl_trans # $0e-3 fsin qnan
4598 short ssind - tbl_trans # $0e-5 fsin denorm
4599 short src_snan - tbl_trans # $0e-4 fsin snan
4600 short tbl_trans - tbl_trans # $0e-6 fsin unnorm
4601 short tbl_trans - tbl_trans # $0e-7 ERROR
4603 short stan - tbl_trans # $0f-0 ftan norm
4604 short src_zero - tbl_trans # $0f-1 ftan zero
4605 short t_operr - tbl_trans # $0f-2 ftan inf
4606 short src_qnan - tbl_trans # $0f-3 ftan qnan
4607 short stand - tbl_trans # $0f-5 ftan denorm
4608 short src_snan - tbl_trans # $0f-4 ftan snan
4609 short tbl_trans - tbl_trans # $0f-6 ftan unnorm
4610 short tbl_trans - tbl_trans # $0f-7 ERROR
4612 short setox - tbl_trans # $10-0 fetox norm
4621 short stwotox - tbl_trans # $11-0 ftwotox norm
4630 short stentox - tbl_trans # $12-0 ftentox norm
4639 short tbl_trans - tbl_trans # $13-0 ERROR
4648 short slogn - tbl_trans # $14-0 flogn norm
4657 short slog10 - tbl_trans # $15-0 flog10 norm
4666 short slog2 - tbl_trans # $16-0 flog2 norm
4675 short tbl_trans - tbl_trans # $17-0 ERROR
4684 short tbl_trans - tbl_trans # $18-0 fabs norm
4693 short scosh - tbl_trans # $19-0 fcosh norm
4702 short tbl_trans - tbl_trans # $1a-0 fneg norm
4711 short tbl_trans - tbl_trans # $1b-0 ERROR
4720 short sacos - tbl_trans # $1c-0 facos norm
4729 short scos - tbl_trans # $1d-0 fcos norm
4738 short sgetexp - tbl_trans # $1e-0 fgetexp norm
4747 short sgetman - tbl_trans # $1f-0 fgetman norm
4756 short tbl_trans - tbl_trans # $20-0 fdiv norm
4765 short smod_snorm - tbl_trans # $21-0 fmod norm
4774 short tbl_trans - tbl_trans # $22-0 fadd norm
4783 short tbl_trans - tbl_trans # $23-0 fmul norm
4792 short tbl_trans - tbl_trans # $24-0 fsgldiv norm
4801 short srem_snorm - tbl_trans # $25-0 frem norm
4810 short sscale_snorm - tbl_trans # $26-0 fscale norm
4819 short tbl_trans - tbl_trans # $27-0 fsglmul norm
4828 short tbl_trans - tbl_trans # $28-0 fsub norm
4837 short tbl_trans - tbl_trans # $29-0 ERROR
4846 short tbl_trans - tbl_trans # $2a-0 ERROR
4855 short tbl_trans - tbl_trans # $2b-0 ERROR
4864 short tbl_trans - tbl_trans # $2c-0 ERROR
4873 short tbl_trans - tbl_trans # $2d-0 ERROR
4882 short tbl_trans - tbl_trans # $2e-0 ERROR
4891 short tbl_trans - tbl_trans # $2f-0 ERROR
4900 short ssincos - tbl_trans # $30-0 fsincos norm
4909 short ssincos - tbl_trans # $31-0 fsincos norm
4918 short ssincos - tbl_trans # $32-0 fsincos norm
4927 short ssincos - tbl_trans # $33-0 fsincos norm
4936 short ssincos - tbl_trans # $34-0 fsincos norm
4945 short ssincos - tbl_trans # $35-0 fsincos norm
4954 short ssincos - tbl_trans # $36-0 fsincos norm
4963 short ssincos - tbl_trans # $37-0 fsincos norm
4978 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
4980 fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
4987 mov.w 0x8(%sp),0x4(%sp) # store lo(PC)
4988 mov.w &0x4008,0x6(%sp) # store voff
4989 mov.l 0x2(%sp),0x8(%sp) # store EA
4990 mov.l &0x09428001,0xc(%sp) # store FSLW
4992 btst &0x5,(%sp) # user or supervisor mode?
4994 bset &0x2,0xd(%sp) # set supervisor TM bit
5027 # 1. If SIN is invoked, set AdjN := 0; otherwise, set AdjN := 1. #
5032 # k = N mod 4, so in particular, k = 0,1,2,or 3. #
5060 # k = N mod 4, so in particular, k = 0,1,2,or 3. #
5085 SINA7: long 0xBD6AAA77,0xCCC994F5
5086 SINA6: long 0x3DE61209,0x7AAE8DA1
5087 SINA5: long 0xBE5AE645,0x2A118AE4
5088 SINA4: long 0x3EC71DE3,0xA5341531
5089 SINA3: long 0xBF2A01A0,0x1A018B59,0x00000000,0x00000000
5090 SINA2: long 0x3FF80000,0x88888888,0x888859AF,0x00000000
5091 SINA1: long 0xBFFC0000,0xAAAAAAAA,0xAAAAAA99,0x00000000
5093 COSB8: long 0x3D2AC4D0,0xD6011EE3
5094 COSB7: long 0xBDA9396F,0x9F45AC19
5095 COSB6: long 0x3E21EED9,0x0612C972
5096 COSB5: long 0xBE927E4F,0xB79D9FCF
5097 COSB4: long 0x3EFA01A0,0x1A01D423,0x00000000,0x00000000
5098 COSB3: long 0xBFF50000,0xB60B60B6,0x0B61D438,0x00000000
5099 COSB2: long 0x3FFA0000,0xAAAAAAAA,0xAAAAAB5E
5100 COSB1: long 0xBF000000
5122 mov.l &0,ADJN(%a6) # yes; SET ADJN TO 0
5140 and.l &0x7FFFFFFF,%d1 # strip sign
5142 cmpi.l %d1,&0x3FD78000 # is |X| >= 2**(-40)?
5147 cmp.l %d1,&0x4004BC7E # is |X| < 15 PI?
5157 lea PITBL+0x200(%pc),%a1 # TABLE OF N*PI/2, N = -32,...,32
5177 cmp.l %d1,&0
5189 fmovm.x &0x0c,-(%sp) # save fp2/fp3
5201 and.l &0x80000000,%d1
5227 fmovm.x (%sp)+,&0x30 # restore fp2/fp3
5243 fmovm.x &0x0c,-(%sp) # save fp2/fp3
5255 and.l &0x80000000,%d1
5261 and.l &0x80000000,%d1
5265 or.l &0x3F800000,%d1 # D0 IS SGN IN SINGLE
5289 fmovm.x (%sp)+,&0x30 # restore fp2/fp3
5301 cmp.l %d1,&0x3FFF8000
5306 cmp.l %d1,&0
5312 # mov.w &0x0000,XDCARE(%a6) # JUST IN CASE
5320 fmov.s &0x3F800000,%fp0 # fp0 = 1.0
5322 fadd.s &0x80800000,%fp0 # last inst - possible exception set
5335 fmov.s &0x3F800000,%fp0 # fp0 = 1.0
5350 and.l &0x7FFFFFFF,%d1 # COMPACTIFY X
5352 cmp.l %d1,&0x3FD78000 # |X| >= 2**(-40)?
5357 cmp.l %d1,&0x4004BC7E # |X| < 15 PI?
5369 lea PITBL+0x200(%pc),%a1 # TABLE OF N*PI/2, N = -32,...,32
5385 cmp.l %d1,&0 # D0 < 0 IFF N IS ODD
5390 fmovm.x &0x04,-(%sp) # save fp2
5402 and.l &0x80000000,%d2
5404 and.l &0x80000000,%d2
5414 and.l &0x80000000,%d1
5415 mov.l &0x3F800000,POSNEG1(%a6)
5454 fmovm.x (%sp)+,&0x20 # restore fp2
5464 fmovm.x &0x04,-(%sp) # save fp2
5477 and.l &0x80000000,%d1
5487 or.l &0x3F800000,%d1
5527 fmovm.x (%sp)+,&0x20 # restore fp2
5538 cmp.l %d1,&0x3FFF8000
5544 # mov.w &0x0000,XDCARE(%a6)
5545 fmov.s &0x3F800000,%fp1
5548 fsub.s &0x00800000,%fp1
5561 fmov.s &0x3F800000,%fp1
5572 fmovm.x &0x3c,-(%sp) # save {fp2-fp5}
5574 fmov.s &0x00000000,%fp1 # fp1 = 0
5580 cmp.l %d1,&0x7ffeffff # is arg dangerously large?
5584 mov.w &0x7ffe,FP_SCR0_EX(%a6)
5585 mov.l &0xc90fdaa2,FP_SCR0_HI(%a6)
5589 mov.w &0x7fdc,FP_SCR1_EX(%a6)
5590 mov.l &0x85a308d3,FP_SCR1_HI(%a6)
5596 or.b &0x80,FP_SCR0_EX(%a6) # positive arg
5597 or.b &0x80,FP_SCR1_EX(%a6)
5612 and.l &0x00007FFF,%d1
5613 sub.l &0x00003FFF,%d1 # d0 = K
5618 mov.b &0,ENDFLAG(%a6)
5621 clr.l %d1 # d0 = L := 0
5631 mov.l &0x00003FFE,%d2 # BIASED EXP OF 2/PI
5634 mov.l &0xA2F9836E,FP_SCR0_HI(%a6)
5635 mov.l &0x4E44152A,FP_SCR0_LO(%a6)
5648 and.l &0x80000000,%d2
5649 or.l &0x5F000000,%d2 # d2 = SIGN(INARG)*2**63 IN SGL
5658 add.l &0x00003FFF,%d2 # BIASED EXP OF 2**L * (PI/2)
5660 mov.l &0xC90FDAA2,FP_SCR0_HI(%a6)
5663 add.l &0x00003FDD,%d1
5665 mov.l &0x85A308D3,FP_SCR1_HI(%a6)
5693 cmp.b %d1,&0
5704 fmovm.x (%sp)+,&0x3c # restore {fp2-fp5}
5734 # k = N mod 2, so in particular, k = 0 or 1. #
5760 long 0x3EA0B759,0xF50F8688
5762 long 0xBEF2BAA5,0xA8924F04
5765 long 0xBF346F59,0xB39BA65F,0x00000000,0x00000000
5768 long 0x3FF60000,0xE073D3FC,0x199C4A00,0x00000000
5771 long 0x3FF90000,0xD23CD684,0x15D95FA1,0x00000000
5774 long 0xBFFC0000,0x8895A6C5,0xFB423BCA,0x00000000
5777 long 0xBFFD0000,0xEEF57E0D,0xA84BC8CE,0x00000000
5780 long 0x3FFC0000,0xA2F9836E,0x4E44152A,0x00000000
5783 long 0x40010000,0xC90FDAA2,0x00000000,0x00000000
5785 long 0x3FDF0000,0x85A308D4,0x00000000,0x00000000
5792 long 0xC0040000,0xC90FDAA2,0x2168C235,0x21800000
5793 long 0xC0040000,0xC2C75BCD,0x105D7C23,0xA0D00000
5794 long 0xC0040000,0xBC7EDCF7,0xFF523611,0xA1E80000
5795 long 0xC0040000,0xB6365E22,0xEE46F000,0x21480000
5796 long 0xC0040000,0xAFEDDF4D,0xDD3BA9EE,0xA1200000
5797 long 0xC0040000,0xA9A56078,0xCC3063DD,0x21FC0000
5798 long 0xC0040000,0xA35CE1A3,0xBB251DCB,0x21100000
5799 long 0xC0040000,0x9D1462CE,0xAA19D7B9,0xA1580000
5800 long 0xC0040000,0x96CBE3F9,0x990E91A8,0x21E00000
5801 long 0xC0040000,0x90836524,0x88034B96,0x20B00000
5802 long 0xC0040000,0x8A3AE64F,0x76F80584,0xA1880000
5803 long 0xC0040000,0x83F2677A,0x65ECBF73,0x21C40000
5804 long 0xC0030000,0xFB53D14A,0xA9C2F2C2,0x20000000
5805 long 0xC0030000,0xEEC2D3A0,0x87AC669F,0x21380000
5806 long 0xC0030000,0xE231D5F6,0x6595DA7B,0xA1300000
5807 long 0xC0030000,0xD5A0D84C,0x437F4E58,0x9FC00000
5808 long 0xC0030000,0xC90FDAA2,0x2168C235,0x21000000
5809 long 0xC0030000,0xBC7EDCF7,0xFF523611,0xA1680000
5810 long 0xC0030000,0xAFEDDF4D,0xDD3BA9EE,0xA0A00000
5811 long 0xC0030000,0xA35CE1A3,0xBB251DCB,0x20900000
5812 long 0xC0030000,0x96CBE3F9,0x990E91A8,0x21600000
5813 long 0xC0030000,0x8A3AE64F,0x76F80584,0xA1080000
5814 long 0xC0020000,0xFB53D14A,0xA9C2F2C2,0x1F800000
5815 long 0xC0020000,0xE231D5F6,0x6595DA7B,0xA0B00000
5816 long 0xC0020000,0xC90FDAA2,0x2168C235,0x20800000
5817 long 0xC0020000,0xAFEDDF4D,0xDD3BA9EE,0xA0200000
5818 long 0xC0020000,0x96CBE3F9,0x990E91A8,0x20E00000
5819 long 0xC0010000,0xFB53D14A,0xA9C2F2C2,0x1F000000
5820 long 0xC0010000,0xC90FDAA2,0x2168C235,0x20000000
5821 long 0xC0010000,0x96CBE3F9,0x990E91A8,0x20600000
5822 long 0xC0000000,0xC90FDAA2,0x2168C235,0x1F800000
5823 long 0xBFFF0000,0xC90FDAA2,0x2168C235,0x1F000000
5824 long 0x00000000,0x00000000,0x00000000,0x00000000
5825 long 0x3FFF0000,0xC90FDAA2,0x2168C235,0x9F000000
5826 long 0x40000000,0xC90FDAA2,0x2168C235,0x9F800000
5827 long 0x40010000,0x96CBE3F9,0x990E91A8,0xA0600000
5828 long 0x40010000,0xC90FDAA2,0x2168C235,0xA0000000
5829 long 0x40010000,0xFB53D14A,0xA9C2F2C2,0x9F000000
5830 long 0x40020000,0x96CBE3F9,0x990E91A8,0xA0E00000
5831 long 0x40020000,0xAFEDDF4D,0xDD3BA9EE,0x20200000
5832 long 0x40020000,0xC90FDAA2,0x2168C235,0xA0800000
5833 long 0x40020000,0xE231D5F6,0x6595DA7B,0x20B00000
5834 long 0x40020000,0xFB53D14A,0xA9C2F2C2,0x9F800000
5835 long 0x40030000,0x8A3AE64F,0x76F80584,0x21080000
5836 long 0x40030000,0x96CBE3F9,0x990E91A8,0xA1600000
5837 long 0x40030000,0xA35CE1A3,0xBB251DCB,0xA0900000
5838 long 0x40030000,0xAFEDDF4D,0xDD3BA9EE,0x20A00000
5839 long 0x40030000,0xBC7EDCF7,0xFF523611,0x21680000
5840 long 0x40030000,0xC90FDAA2,0x2168C235,0xA1000000
5841 long 0x40030000,0xD5A0D84C,0x437F4E58,0x1FC00000
5842 long 0x40030000,0xE231D5F6,0x6595DA7B,0x21300000
5843 long 0x40030000,0xEEC2D3A0,0x87AC669F,0xA1380000
5844 long 0x40030000,0xFB53D14A,0xA9C2F2C2,0xA0000000
5845 long 0x40040000,0x83F2677A,0x65ECBF73,0xA1C40000
5846 long 0x40040000,0x8A3AE64F,0x76F80584,0x21880000
5847 long 0x40040000,0x90836524,0x88034B96,0xA0B00000
5848 long 0x40040000,0x96CBE3F9,0x990E91A8,0xA1E00000
5849 long 0x40040000,0x9D1462CE,0xAA19D7B9,0x21580000
5850 long 0x40040000,0xA35CE1A3,0xBB251DCB,0xA1100000
5851 long 0x40040000,0xA9A56078,0xCC3063DD,0xA1FC0000
5852 long 0x40040000,0xAFEDDF4D,0xDD3BA9EE,0x21200000
5853 long 0x40040000,0xB6365E22,0xEE46F000,0xA1480000
5854 long 0x40040000,0xBC7EDCF7,0xFF523611,0x21E80000
5855 long 0x40040000,0xC2C75BCD,0x105D7C23,0x20D00000
5856 long 0x40040000,0xC90FDAA2,0x2168C235,0xA1800000
5870 and.l &0x7FFFFFFF,%d1
5872 cmp.l %d1,&0x3FD78000 # |X| >= 2**(-40)?
5876 cmp.l %d1,&0x4004BC7E # |X| < 15 PI?
5886 lea.l PITBL+0x200(%pc),%a1 # TABLE OF N*PI/2, N = -32,...,32
5898 and.l &0x80000000,%d1 # D0 WAS ODD IFF D0 < 0
5901 fmovm.x &0x0c,-(%sp) # save fp2,fp3
5903 cmp.l %d1,&0
5934 fadd.s &0x3F800000,%fp1 # 1+S(Q1+...)
5936 fmovm.x (%sp)+,&0x30 # restore fp2,fp3
5970 fadd.s &0x3F800000,%fp0 # 1+S(Q1+...)
5972 fmovm.x (%sp)+,&0x30 # restore fp2,fp3
5975 eor.l &0x80000000,(%sp)
5984 cmp.l %d1,&0x3FFF8000
6003 fmovm.x &0x3c,-(%sp) # save {fp2-fp5}
6005 fmov.s &0x00000000,%fp1 # fp1 = 0
6011 cmp.l %d1,&0x7ffeffff # is arg dangerously large?
6015 mov.w &0x7ffe,FP_SCR0_EX(%a6)
6016 mov.l &0xc90fdaa2,FP_SCR0_HI(%a6)
6020 mov.w &0x7fdc,FP_SCR1_EX(%a6)
6021 mov.l &0x85a308d3,FP_SCR1_HI(%a6)
6027 or.b &0x80,FP_SCR0_EX(%a6) # positive arg
6028 or.b &0x80,FP_SCR1_EX(%a6)
6043 and.l &0x00007FFF,%d1
6044 sub.l &0x00003FFF,%d1 # d0 = K
6049 mov.b &0,ENDFLAG(%a6)
6052 clr.l %d1 # d0 = L := 0
6062 mov.l &0x00003FFE,%d2 # BIASED EXP OF 2/PI
6065 mov.l &0xA2F9836E,FP_SCR0_HI(%a6)
6066 mov.l &0x4E44152A,FP_SCR0_LO(%a6)
6079 and.l &0x80000000,%d2
6080 or.l &0x5F000000,%d2 # d2 = SIGN(INARG)*2**63 IN SGL
6089 add.l &0x00003FFF,%d2 # BIASED EXP OF 2**L * (PI/2)
6091 mov.l &0xC90FDAA2,FP_SCR0_HI(%a6)
6094 add.l &0x00003FDD,%d1
6096 mov.l &0x85A308D3,FP_SCR1_HI(%a6)
6124 cmp.b %d1,&0
6135 fmovm.x (%sp)+,&0x3c # restore {fp2-fp5}
6183 ATANA3: long 0xBFF6687E,0x314987D8
6184 ATANA2: long 0x4002AC69,0x34A26DB3
6185 ATANA1: long 0xBFC2476F,0x4E1DA28E
6187 ATANB6: long 0x3FB34444,0x7F876989
6188 ATANB5: long 0xBFB744EE,0x7FAF45DB
6189 ATANB4: long 0x3FBC71C6,0x46940220
6190 ATANB3: long 0xBFC24924,0x921872F9
6191 ATANB2: long 0x3FC99999,0x99998FA9
6192 ATANB1: long 0xBFD55555,0x55555555
6194 ATANC5: long 0xBFB70BF3,0x98539E6A
6195 ATANC4: long 0x3FBC7187,0x962D1D7D
6196 ATANC3: long 0xBFC24924,0x827107B8
6197 ATANC2: long 0x3FC99999,0x9996263E
6198 ATANC1: long 0xBFD55555,0x55555536
6200 PPIBY2: long 0x3FFF0000,0xC90FDAA2,0x2168C235,0x00000000
6201 NPIBY2: long 0xBFFF0000,0xC90FDAA2,0x2168C235,0x00000000
6203 PTINY: long 0x00010000,0x80000000,0x00000000,0x00000000
6204 NTINY: long 0x80010000,0x80000000,0x00000000,0x00000000
6207 long 0x3FFB0000,0x83D152C5,0x060B7A51,0x00000000
6208 long 0x3FFB0000,0x8BC85445,0x65498B8B,0x00000000
6209 long 0x3FFB0000,0x93BE4060,0x17626B0D,0x00000000
6210 long 0x3FFB0000,0x9BB3078D,0x35AEC202,0x00000000
6211 long 0x3FFB0000,0xA3A69A52,0x5DDCE7DE,0x00000000
6212 long 0x3FFB0000,0xAB98E943,0x62765619,0x00000000
6213 long 0x3FFB0000,0xB389E502,0xF9C59862,0x00000000
6214 long 0x3FFB0000,0xBB797E43,0x6B09E6FB,0x00000000
6215 long 0x3FFB0000,0xC367A5C7,0x39E5F446,0x00000000
6216 long 0x3FFB0000,0xCB544C61,0xCFF7D5C6,0x00000000
6217 long 0x3FFB0000,0xD33F62F8,0x2488533E,0x00000000
6218 long 0x3FFB0000,0xDB28DA81,0x62404C77,0x00000000
6219 long 0x3FFB0000,0xE310A407,0x8AD34F18,0x00000000
6220 long 0x3FFB0000,0xEAF6B0A8,0x188EE1EB,0x00000000
6221 long 0x3FFB0000,0xF2DAF194,0x9DBE79D5,0x00000000
6222 long 0x3FFB0000,0xFABD5813,0x61D47E3E,0x00000000
6223 long 0x3FFC0000,0x8346AC21,0x0959ECC4,0x00000000
6224 long 0x3FFC0000,0x8B232A08,0x304282D8,0x00000000
6225 long 0x3FFC0000,0x92FB70B8,0xD29AE2F9,0x00000000
6226 long 0x3FFC0000,0x9ACF476F,0x5CCD1CB4,0x00000000
6227 long 0x3FFC0000,0xA29E7630,0x4954F23F,0x00000000
6228 long 0x3FFC0000,0xAA68C5D0,0x8AB85230,0x00000000
6229 long 0x3FFC0000,0xB22DFFFD,0x9D539F83,0x00000000
6230 long 0x3FFC0000,0xB9EDEF45,0x3E900EA5,0x00000000
6231 long 0x3FFC0000,0xC1A85F1C,0xC75E3EA5,0x00000000
6232 long 0x3FFC0000,0xC95D1BE8,0x28138DE6,0x00000000
6233 long 0x3FFC0000,0xD10BF300,0x840D2DE4,0x00000000
6234 long 0x3FFC0000,0xD8B4B2BA,0x6BC05E7A,0x00000000
6235 long 0x3FFC0000,0xE0572A6B,0xB42335F6,0x00000000
6236 long 0x3FFC0000,0xE7F32A70,0xEA9CAA8F,0x00000000
6237 long 0x3FFC0000,0xEF888432,0x64ECEFAA,0x00000000
6238 long 0x3FFC0000,0xF7170A28,0xECC06666,0x00000000
6239 long 0x3FFD0000,0x812FD288,0x332DAD32,0x00000000
6240 long 0x3FFD0000,0x88A8D1B1,0x218E4D64,0x00000000
6241 long 0x3FFD0000,0x9012AB3F,0x23E4AEE8,0x00000000
6242 long 0x3FFD0000,0x976CC3D4,0x11E7F1B9,0x00000000
6243 long 0x3FFD0000,0x9EB68949,0x3889A227,0x00000000
6244 long 0x3FFD0000,0xA5EF72C3,0x4487361B,0x00000000
6245 long 0x3FFD0000,0xAD1700BA,0xF07A7227,0x00000000
6246 long 0x3FFD0000,0xB42CBCFA,0xFD37EFB7,0x00000000
6247 long 0x3FFD0000,0xBB303A94,0x0BA80F89,0x00000000
6248 long 0x3FFD0000,0xC22115C6,0xFCAEBBAF,0x00000000
6249 long 0x3FFD0000,0xC8FEF3E6,0x86331221,0x00000000
6250 long 0x3FFD0000,0xCFC98330,0xB4000C70,0x00000000
6251 long 0x3FFD0000,0xD6807AA1,0x102C5BF9,0x00000000
6252 long 0x3FFD0000,0xDD2399BC,0x31252AA3,0x00000000
6253 long 0x3FFD0000,0xE3B2A855,0x6B8FC517,0x00000000
6254 long 0x3FFD0000,0xEA2D764F,0x64315989,0x00000000
6255 long 0x3FFD0000,0xF3BF5BF8,0xBAD1A21D,0x00000000
6256 long 0x3FFE0000,0x801CE39E,0x0D205C9A,0x00000000
6257 long 0x3FFE0000,0x8630A2DA,0xDA1ED066,0x00000000
6258 long 0x3FFE0000,0x8C1AD445,0xF3E09B8C,0x00000000
6259 long 0x3FFE0000,0x91DB8F16,0x64F350E2,0x00000000
6260 long 0x3FFE0000,0x97731420,0x365E538C,0x00000000
6261 long 0x3FFE0000,0x9CE1C8E6,0xA0B8CDBA,0x00000000
6262 long 0x3FFE0000,0xA22832DB,0xCADAAE09,0x00000000
6263 long 0x3FFE0000,0xA746F2DD,0xB7602294,0x00000000
6264 long 0x3FFE0000,0xAC3EC0FB,0x997DD6A2,0x00000000
6265 long 0x3FFE0000,0xB110688A,0xEBDC6F6A,0x00000000
6266 long 0x3FFE0000,0xB5BCC490,0x59ECC4B0,0x00000000
6267 long 0x3FFE0000,0xBA44BC7D,0xD470782F,0x00000000
6268 long 0x3FFE0000,0xBEA94144,0xFD049AAC,0x00000000
6269 long 0x3FFE0000,0xC2EB4ABB,0x661628B6,0x00000000
6270 long 0x3FFE0000,0xC70BD54C,0xE602EE14,0x00000000
6271 long 0x3FFE0000,0xCD000549,0xADEC7159,0x00000000
6272 long 0x3FFE0000,0xD48457D2,0xD8EA4EA3,0x00000000
6273 long 0x3FFE0000,0xDB948DA7,0x12DECE3B,0x00000000
6274 long 0x3FFE0000,0xE23855F9,0x69E8096A,0x00000000
6275 long 0x3FFE0000,0xE8771129,0xC4353259,0x00000000
6276 long 0x3FFE0000,0xEE57C16E,0x0D379C0D,0x00000000
6277 long 0x3FFE0000,0xF3E10211,0xA87C3779,0x00000000
6278 long 0x3FFE0000,0xF919039D,0x758B8D41,0x00000000
6279 long 0x3FFE0000,0xFE058B8F,0x64935FB3,0x00000000
6280 long 0x3FFF0000,0x8155FB49,0x7B685D04,0x00000000
6281 long 0x3FFF0000,0x83889E35,0x49D108E1,0x00000000
6282 long 0x3FFF0000,0x859CFA76,0x511D724B,0x00000000
6283 long 0x3FFF0000,0x87952ECF,0xFF8131E7,0x00000000
6284 long 0x3FFF0000,0x89732FD1,0x9557641B,0x00000000
6285 long 0x3FFF0000,0x8B38CAD1,0x01932A35,0x00000000
6286 long 0x3FFF0000,0x8CE7A8D8,0x301EE6B5,0x00000000
6287 long 0x3FFF0000,0x8F46A39E,0x2EAE5281,0x00000000
6288 long 0x3FFF0000,0x922DA7D7,0x91888487,0x00000000
6289 long 0x3FFF0000,0x94D19FCB,0xDEDF5241,0x00000000
6290 long 0x3FFF0000,0x973AB944,0x19D2A08B,0x00000000
6291 long 0x3FFF0000,0x996FF00E,0x08E10B96,0x00000000
6292 long 0x3FFF0000,0x9B773F95,0x12321DA7,0x00000000
6293 long 0x3FFF0000,0x9D55CC32,0x0F935624,0x00000000
6294 long 0x3FFF0000,0x9F100575,0x006CC571,0x00000000
6295 long 0x3FFF0000,0xA0A9C290,0xD97CC06C,0x00000000
6296 long 0x3FFF0000,0xA22659EB,0xEBC0630A,0x00000000
6297 long 0x3FFF0000,0xA388B4AF,0xF6EF0EC9,0x00000000
6298 long 0x3FFF0000,0xA4D35F10,0x61D292C4,0x00000000
6299 long 0x3FFF0000,0xA60895DC,0xFBE3187E,0x00000000
6300 long 0x3FFF0000,0xA72A51DC,0x7367BEAC,0x00000000
6301 long 0x3FFF0000,0xA83A5153,0x0956168F,0x00000000
6302 long 0x3FFF0000,0xA93A2007,0x7539546E,0x00000000
6303 long 0x3FFF0000,0xAA9E7245,0x023B2605,0x00000000
6304 long 0x3FFF0000,0xAC4C84BA,0x6FE4D58F,0x00000000
6305 long 0x3FFF0000,0xADCE4A4A,0x606B9712,0x00000000
6306 long 0x3FFF0000,0xAF2A2DCD,0x8D263C9C,0x00000000
6307 long 0x3FFF0000,0xB0656F81,0xF22265C7,0x00000000
6308 long 0x3FFF0000,0xB1846515,0x0F71496A,0x00000000
6309 long 0x3FFF0000,0xB28AAA15,0x6F9ADA35,0x00000000
6310 long 0x3FFF0000,0xB37B44FF,0x3766B895,0x00000000
6311 long 0x3FFF0000,0xB458C3DC,0xE9630433,0x00000000
6312 long 0x3FFF0000,0xB525529D,0x562246BD,0x00000000
6313 long 0x3FFF0000,0xB5E2CCA9,0x5F9D88CC,0x00000000
6314 long 0x3FFF0000,0xB692CADA,0x7ACA1ADA,0x00000000
6315 long 0x3FFF0000,0xB736AEA7,0xA6925838,0x00000000
6316 long 0x3FFF0000,0xB7CFAB28,0x7E9F7B36,0x00000000
6317 long 0x3FFF0000,0xB85ECC66,0xCB219835,0x00000000
6318 long 0x3FFF0000,0xB8E4FD5A,0x20A593DA,0x00000000
6319 long 0x3FFF0000,0xB99F41F6,0x4AFF9BB5,0x00000000
6320 long 0x3FFF0000,0xBA7F1E17,0x842BBE7B,0x00000000
6321 long 0x3FFF0000,0xBB471285,0x7637E17D,0x00000000
6322 long 0x3FFF0000,0xBBFABE8A,0x4788DF6F,0x00000000
6323 long 0x3FFF0000,0xBC9D0FAD,0x2B689D79,0x00000000
6324 long 0x3FFF0000,0xBD306A39,0x471ECD86,0x00000000
6325 long 0x3FFF0000,0xBDB6C731,0x856AF18A,0x00000000
6326 long 0x3FFF0000,0xBE31CAC5,0x02E80D70,0x00000000
6327 long 0x3FFF0000,0xBEA2D55C,0xE33194E2,0x00000000
6328 long 0x3FFF0000,0xBF0B10B7,0xC03128F0,0x00000000
6329 long 0x3FFF0000,0xBF6B7A18,0xDACB778D,0x00000000
6330 long 0x3FFF0000,0xBFC4EA46,0x63FA18F6,0x00000000
6331 long 0x3FFF0000,0xC0181BDE,0x8B89A454,0x00000000
6332 long 0x3FFF0000,0xC065B066,0xCFBF6439,0x00000000
6333 long 0x3FFF0000,0xC0AE345F,0x56340AE6,0x00000000
6334 long 0x3FFF0000,0xC0F22291,0x9CB9E6A7,0x00000000
6353 and.l &0x7FFFFFFF,%d1
6355 cmp.l %d1,&0x3FFB8000 # |X| >= 1/16?
6360 cmp.l %d1,&0x4002FFFF # |X| < 16 ?
6387 and.l &0xF8000000,XFRAC(%a6) # FIRST 5 BITS
6388 or.l &0x04000000,XFRAC(%a6) # SET 6-TH BIT TO 1
6389 mov.l &0x00000000,XFRACLO(%a6) # LOCATION OF X IS NOW F
6392 fmul.x X(%a6),%fp1 # FP1 IS X*F, NOTE THAT X*F > 0
6394 fadd.s &0x3F800000,%fp1 # FP1 IS 1 + X*F
6403 and.l &0x00007800,%d1 # 4 VARYING BITS OF F'S FRACTION
6404 and.l &0x7FFF0000,%d2 # EXPONENT OF F
6405 sub.l &0x3FFB0000,%d2 # K+4
6415 and.l &0x80000000,%d1 # SIGN(F)
6430 fmovm.x &0x04,-(%sp) # save fp2
6443 fmovm.x (%sp)+,&0x20 # restore fp2
6452 cmp.l %d1,&0x3FFF8000
6462 cmp.l %d1,&0x3FD78000
6466 fmovm.x &0x0c,-(%sp) # save fp2/fp3
6495 fmovm.x (%sp)+,&0x30 # restore fp2/fp3
6513 cmp.l %d1,&0x40638000
6521 fmovm.x &0x0c,-(%sp) # save fp2/fp3
6523 fmov.s &0xBF800000,%fp1 # LOAD -1
6556 fmovm.x (%sp)+,&0x30 # restore fp2/fp3
6623 # 5. (|X| > 1) Generate an invalid operation by 0 * infinity. #
6634 and.l &0x7FFFFFFF,%d1
6635 cmp.l %d1,&0x3FFF8000
6643 cmp.l %d1,&0x3FD78000
6650 fmov.s &0x3F800000,%fp1
6652 fmovm.x &0x4,-(%sp) # {fp2}
6653 fmov.s &0x3F800000,%fp2
6656 fmovm.x (%sp)+,&0x20 # {fp2}
6659 fmovm.x &0x01,-(%sp) # save X/SQRT(...)
6662 add.l &0xc,%sp # clear X/SQRT(...) from stack
6667 fcmp.s %fp0,&0x3F800000
6674 and.l &0x80000000,%d1 # SIGN BIT OF X
6675 or.l &0x3F800000,%d1 # +-1 IN SGL FORMAT
6722 # 4. (|X| = 1) If X > 0, return 0. Otherwise, return Pi. Exit. #
6724 # 5. (|X| > 1) Generate an invalid operation by 0 * infinity. #
6735 and.l &0x7FFFFFFF,%d1
6736 cmp.l %d1,&0x3FFF8000
6743 fmov.s &0x3F800000,%fp1
6746 fadd.s &0x3F800000,%fp0 # 1-X
6751 fmovm.x &0x01,-(%sp) # save SQRT(...) to stack
6754 add.l &0xc,%sp # clear SQRT(...) from stack
6762 fcmp.s %fp0,&0x3F800000
6765 #--|X| = 1, ACOS(X) = 0 OR PI
6774 fadd.s &0x00800000,%fp0 # add a small value
6840 # 2.1 Set AdjFlag := 0 (indicates the branch 1.3 -> 2 #
6843 # 2.3 Calculate J = N mod 64; so J = 0,1,2,..., #
6919 # 6.1 If AdjFlag = 0, go to 6.3 #
6923 # Notes: If AdjFlag = 0, we have X = Mlog2 + Jlog2/64 + R, #
6954 # 8.3 Calculate J = N mod 64, J = 0,1,...,63 #
6964 # 9.1 If X < 0, go to 9.3 #
6978 # Step 1. Set ans := 0 #
7000 # 2.2 Calculate J = N mod 64; so J = 0,1,2,..., #
7108 L2: long 0x3FDC0000,0x82E30865,0x4361C4C6,0x00000000
7110 EEXPA3: long 0x3FA55555,0x55554CC1
7111 EEXPA2: long 0x3FC55555,0x55554A54
7113 EM1A4: long 0x3F811111,0x11174385
7114 EM1A3: long 0x3FA55555,0x55554F5A
7116 EM1A2: long 0x3FC55555,0x55555555,0x00000000,0x00000000
7118 EM1B8: long 0x3EC71DE3,0xA5774682
7119 EM1B7: long 0x3EFA01A0,0x19D7CB68
7121 EM1B6: long 0x3F2A01A0,0x1A019DF3
7122 EM1B5: long 0x3F56C16C,0x16C170E2
7124 EM1B4: long 0x3F811111,0x11111111
7125 EM1B3: long 0x3FA55555,0x55555555
7127 EM1B2: long 0x3FFC0000,0xAAAAAAAA,0xAAAAAAAB
7128 long 0x00000000
7130 TWO140: long 0x48B00000,0x00000000
7132 long 0x37300000,0x00000000
7135 long 0x3FFF0000,0x80000000,0x00000000,0x00000000
7136 long 0x3FFF0000,0x8164D1F3,0xBC030774,0x9F841A9B
7137 long 0x3FFF0000,0x82CD8698,0xAC2BA1D8,0x9FC1D5B9
7138 long 0x3FFF0000,0x843A28C3,0xACDE4048,0xA0728369
7139 long 0x3FFF0000,0x85AAC367,0xCC487B14,0x1FC5C95C
7140 long 0x3FFF0000,0x871F6196,0x9E8D1010,0x1EE85C9F
7141 long 0x3FFF0000,0x88980E80,0x92DA8528,0x9FA20729
7142 long 0x3FFF0000,0x8A14D575,0x496EFD9C,0xA07BF9AF
7143 long 0x3FFF0000,0x8B95C1E3,0xEA8BD6E8,0xA0020DCF
7144 long 0x3FFF0000,0x8D1ADF5B,0x7E5BA9E4,0x205A63DA
7145 long 0x3FFF0000,0x8EA4398B,0x45CD53C0,0x1EB70051
7146 long 0x3FFF0000,0x9031DC43,0x1466B1DC,0x1F6EB029
7147 long 0x3FFF0000,0x91C3D373,0xAB11C338,0xA0781494
7148 long 0x3FFF0000,0x935A2B2F,0x13E6E92C,0x9EB319B0
7149 long 0x3FFF0000,0x94F4EFA8,0xFEF70960,0x2017457D
7150 long 0x3FFF0000,0x96942D37,0x20185A00,0x1F11D537
7151 long 0x3FFF0000,0x9837F051,0x8DB8A970,0x9FB952DD
7152 long 0x3FFF0000,0x99E04593,0x20B7FA64,0x1FE43087
7153 long 0x3FFF0000,0x9B8D39B9,0xD54E5538,0x1FA2A818
7154 long 0x3FFF0000,0x9D3ED9A7,0x2CFFB750,0x1FDE494D
7155 long 0x3FFF0000,0x9EF53260,0x91A111AC,0x20504890
7156 long 0x3FFF0000,0xA0B0510F,0xB9714FC4,0xA073691C
7157 long 0x3FFF0000,0xA2704303,0x0C496818,0x1F9B7A05
7158 long 0x3FFF0000,0xA43515AE,0x09E680A0,0xA0797126
7159 long 0x3FFF0000,0xA5FED6A9,0xB15138EC,0xA071A140
7160 long 0x3FFF0000,0xA7CD93B4,0xE9653568,0x204F62DA
7161 long 0x3FFF0000,0xA9A15AB4,0xEA7C0EF8,0x1F283C4A
7162 long 0x3FFF0000,0xAB7A39B5,0xA93ED338,0x9F9A7FDC
7163 long 0x3FFF0000,0xAD583EEA,0x42A14AC8,0xA05B3FAC
7164 long 0x3FFF0000,0xAF3B78AD,0x690A4374,0x1FDF2610
7165 long 0x3FFF0000,0xB123F581,0xD2AC2590,0x9F705F90
7166 long 0x3FFF0000,0xB311C412,0xA9112488,0x201F678A
7167 long 0x3FFF0000,0xB504F333,0xF9DE6484,0x1F32FB13
7168 long 0x3FFF0000,0xB6FD91E3,0x28D17790,0x20038B30
7169 long 0x3FFF0000,0xB8FBAF47,0x62FB9EE8,0x200DC3CC
7170 long 0x3FFF0000,0xBAFF5AB2,0x133E45FC,0x9F8B2AE6
7171 long 0x3FFF0000,0xBD08A39F,0x580C36C0,0xA02BBF70
7172 long 0x3FFF0000,0xBF1799B6,0x7A731084,0xA00BF518
7173 long 0x3FFF0000,0xC12C4CCA,0x66709458,0xA041DD41
7174 long 0x3FFF0000,0xC346CCDA,0x24976408,0x9FDF137B
7175 long 0x3FFF0000,0xC5672A11,0x5506DADC,0x201F1568
7176 long 0x3FFF0000,0xC78D74C8,0xABB9B15C,0x1FC13A2E
7177 long 0x3FFF0000,0xC9B9BD86,0x6E2F27A4,0xA03F8F03
7178 long 0x3FFF0000,0xCBEC14FE,0xF2727C5C,0x1FF4907D
7179 long 0x3FFF0000,0xCE248C15,0x1F8480E4,0x9E6E53E4
7180 long 0x3FFF0000,0xD06333DA,0xEF2B2594,0x1FD6D45C
7181 long 0x3FFF0000,0xD2A81D91,0xF12AE45C,0xA076EDB9
7182 long 0x3FFF0000,0xD4F35AAB,0xCFEDFA20,0x9FA6DE21
7183 long 0x3FFF0000,0xD744FCCA,0xD69D6AF4,0x1EE69A2F
7184 long 0x3FFF0000,0xD99D15C2,0x78AFD7B4,0x207F439F
7185 long 0x3FFF0000,0xDBFBB797,0xDAF23754,0x201EC207
7186 long 0x3FFF0000,0xDE60F482,0x5E0E9124,0x9E8BE175
7187 long 0x3FFF0000,0xE0CCDEEC,0x2A94E110,0x20032C4B
7188 long 0x3FFF0000,0xE33F8972,0xBE8A5A50,0x2004DFF5
7189 long 0x3FFF0000,0xE5B906E7,0x7C8348A8,0x1E72F47A
7190 long 0x3FFF0000,0xE8396A50,0x3C4BDC68,0x1F722F22
7191 long 0x3FFF0000,0xEAC0C6E7,0xDD243930,0xA017E945
7192 long 0x3FFF0000,0xED4F301E,0xD9942B84,0x1F401A5B
7193 long 0x3FFF0000,0xEFE4B99B,0xDCDAF5CC,0x9FB9A9E3
7194 long 0x3FFF0000,0xF281773C,0x59FFB138,0x20744C05
7195 long 0x3FFF0000,0xF5257D15,0x2486CC2C,0x1F773A19
7196 long 0x3FFF0000,0xF7D0DF73,0x0AD13BB8,0x1FFE90D5
7197 long 0x3FFF0000,0xFA83B2DB,0x722A033C,0xA041ED22
7198 long 0x3FFF0000,0xFD3E0C0C,0xF486C174,0x1F853F3A
7212 and.l &0x7FFF0000,%d1 # biased expo. of X
7213 cmp.l %d1,&0x3FBE0000 # 2^(-65)
7220 cmp.l %d1,&0x400CB167 # 16380 log2 trunc. 16 bits
7230 fmul.s &0x42B8AA3B,%fp0 # 64/log2 * X
7231 fmovm.x &0xc,-(%sp) # save fp2 {%fp2/%fp3}
7232 mov.l &0,ADJFLAG(%a6)
7238 and.l &0x3F,%d1 # D0 is J = N mod 64
7243 add.w &0x3FFF,%d1 # biased expo. of 2^(M)
7251 fmul.s &0xBC317218,%fp0 # N * L1, L1 = lead(-log2/64)
7265 fmov.s &0x3AB60B70,%fp2 # fp2 IS A5
7269 fmul.s &0x3C088895,%fp3 # fp3 IS S*A4
7276 mov.l &0x80000000,SCALE+4(%a6)
7281 fadd.s &0x3F000000,%fp2 # fp2 IS A1+S*(A3+S*A5)
7295 fmovm.x (%sp)+,&0x30 # fp2 restored {%fp2/%fp3}
7314 fmovm.x (%a0),&0x80 # load X
7316 fadd.s &0x3F800000,%fp0 # 1+X in user mode
7321 cmp.l %d1,&0x400CB27C # 16480 log2
7327 fmul.s &0x42B8AA3B,%fp0 # 64/log2 * X
7328 fmovm.x &0xc,-(%sp) # save fp2 {%fp2/%fp3}
7334 and.l &0x3F,%d1 # D0 is J = N mod 64
7342 add.w &0x3FFF,%d1 # biased expo. of 2^(M1)
7344 mov.l &0x80000000,ADJSCALE+4(%a6)
7347 add.w &0x3FFF,%d1 # biased expo. of 2^(M)
7360 andi.l &0x80000000,(%sp)
7361 ori.l &0x00800000,(%sp) # sign(X)*2^(-126)
7363 fmov.s &0x3F800000,%fp0
7376 and.l &0x7FFF0000,%d1 # biased expo. of X
7377 cmp.l %d1,&0x3FFD0000 # 1/4
7385 cmp.l %d1,&0x4004C215 # 70log2 rounded up to 16 bits
7395 fmul.s &0x42B8AA3B,%fp0 # 64/log2 * X
7396 fmovm.x &0xc,-(%sp) # save fp2 {%fp2/%fp3}
7402 and.l &0x3F,%d1 # D0 is J = N mod 64
7413 fmul.s &0xBC317218,%fp0 # N * L1, L1 = lead(-log2/64)
7417 add.w &0x3FFF,%d1 # D0 is biased expo. of 2^M
7428 fmov.s &0x3950097B,%fp2 # fp2 IS a6
7432 fmul.s &0x3AB60B6A,%fp3 # fp3 IS S*A5
7437 mov.l &0x80000000,SC+4(%a6)
7444 add.w &0x3FFF,%d1 # biased expo. of 2^(-M)
7446 fadd.s &0x3F000000,%fp3 # fp3 IS A1+S*(A3+S*A5)
7449 or.w &0x8000,%d1 # signed/expo. of -2^(-M)
7451 mov.l &0x80000000,ONEBYSC+4(%a6)
7460 fmovm.x (%sp)+,&0x30 # fp2 restored {%fp2/%fp3}
7503 cmp.l %d1,&0x3FBE0000 # 2^(-65)
7508 cmp.l %d1,&0x00330000 # 2^(-16312)
7511 mov.l &0x80010000,SC(%a6) # SC is -2^(-16382)
7512 mov.l &0x80000000,SC+4(%a6)
7524 mov.l &0x80010000,SC(%a6)
7525 mov.l &0x80000000,SC+4(%a6)
7537 fmovm.x &0xc,-(%sp) # save fp2 {%fp2/%fp3}
7538 fmov.s &0x2F30CAA8,%fp1 # fp1 is B12
7540 fmov.s &0x310F8290,%fp2 # fp2 is B11
7541 fadd.s &0x32D73220,%fp1 # fp1 is B10+S*B12
7546 fadd.s &0x3493F281,%fp2 # fp2 is B9+S*...
7573 fmul.s &0x3F000000,%fp0 # fp0 is S*B1
7576 fmovm.x (%sp)+,&0x30 # fp2 restored {%fp2/%fp3}
7587 cmp.l %d1,&0
7590 fmov.s &0xBF800000,%fp0 # fp0 is -1
7592 fadd.s &0x00800000,%fp0 # -1 + 2^(-126)
7598 #--Step 0.
7624 bclr &0xf,%d0 # clear the sign bit
7625 subi.w &0x3fff,%d0 # subtract off the bias
7638 subi.w &0x3fff,%d0 # subtract off the bias
7646 ori.w &0x7fff,%d0 # clear old exp
7647 bclr &0xe,%d0 # make it the new exp +-3fff
7717 long 0x7FFB0000,0x80000000,0x00000000,0x00000000
7725 and.l &0x7FFFFFFF,%d1
7726 cmp.l %d1,&0x400CB167
7736 fmovm.x &0x01,-(%sp) # save |X| to stack
7739 add.l &0xc,%sp # erase |X| from stack
7740 fmul.s &0x3F000000,%fp0 # (1/2)EXP(|X|)
7743 fmov.s &0x3E800000,%fp1 # (1/4)
7752 cmp.l %d1,&0x400CB2B3
7761 fmovm.x &0x01,-(%sp) # save fp0 to stack
7764 add.l &0xc,%sp # clear fp0 from stack
7778 fmov.s &0x3F800000,%fp0
7781 fadd.s &0x00800000,%fp0
7838 and.l &0x7FFFFFFF,%d1
7839 cmp.l %d1,&0x400CB167
7847 movm.l &0x8040,-(%sp) # {a1/d0}
7848 fmovm.x &0x01,-(%sp) # save Y on stack
7852 add.l &0xc,%sp # clear Y from stack
7853 fmov.l &0,%fpcr
7854 movm.l (%sp)+,&0x0201 # {a1/d0}
7857 fadd.s &0x3F800000,%fp1 # 1+Z
7861 and.l &0x80000000,%d1
7862 or.l &0x3F000000,%d1
7872 cmp.l %d1,&0x400CB2B3
7876 mov.l &0,-(%sp)
7877 mov.l &0x80000000,-(%sp)
7879 and.l &0x80000000,%d1
7880 or.l &0x7FFB0000,%d1
7886 fmovm.x &0x01,-(%sp) # save fp0 on stack
7889 add.l &0xc,%sp # clear fp0 from stack
7964 and.l &0x7FFFFFFF,%d1
7965 cmp.l %d1, &0x3fd78000 # is |X| < 2^(-40)?
7967 cmp.l %d1, &0x3fffddce # is |X| > (5/2)LOG2?
7975 and.l &0x7FFF0000,%d1
7976 add.l &0x00010000,%d1 # EXPONENT OF 2|X|
7978 and.l &0x80000000,SGN(%a6)
7983 fmovm.x &0x1,-(%sp) # save Y on stack
7986 add.l &0xc,%sp # clear Y from stack
7990 fadd.s &0x40000000,%fp1 # Z+2
8000 cmp.l %d1,&0x3FFF8000
8003 cmp.l %d1,&0x40048AA1
8012 and.l &0x7FFF0000,%d1
8013 add.l &0x00010000,%d1 # EXPO OF 2|X|
8015 and.l &0x80000000,SGN(%a6)
8021 fmovm.x &0x01,-(%sp) # save Y on stack
8024 add.l &0xc,%sp # clear Y from stack
8027 fadd.s &0x3F800000,%fp0 # EXP(Y)+1
8029 eor.l &0xC0000000,%d1 # -SIGN(X)*2
8034 or.l &0x3F800000,%d1 # SGN
8051 and.l &0x80000000,%d1
8052 or.l &0x3F800000,%d1
8054 and.l &0x80000000,%d1
8055 eor.l &0x80800000,%d1 # -SIGN(X)*EPS
8130 long 0x3FFE0000,0xB17217F7,0xD1CF79AC,0x00000000
8133 long 0x3F800000
8135 long 0x00000000
8137 long 0x7F800000
8139 long 0xBF800000
8142 long 0x3FC2499A,0xB5E4040B
8144 long 0xBFC555B5,0x848CB7DB
8147 long 0x3FC99999,0x987D8730
8149 long 0xBFCFFFFF,0xFF6F7E97
8152 long 0x3FD55555,0x555555A4
8154 long 0xBFE00000,0x00000008
8157 long 0x3F175496,0xADD7DAD6
8159 long 0x3F3C71C2,0xFE80C7E0
8162 long 0x3F624924,0x928BCCFF
8164 long 0x3F899999,0x999995EC
8167 long 0x3FB55555,0x55555555
8169 long 0x40000000,0x00000000
8172 long 0x3f990000,0x80000000,0x00000000,0x00000000
8175 long 0x3FFE0000,0xFE03F80F,0xE03F80FE,0x00000000
8176 long 0x3FF70000,0xFF015358,0x833C47E2,0x00000000
8177 long 0x3FFE0000,0xFA232CF2,0x52138AC0,0x00000000
8178 long 0x3FF90000,0xBDC8D83E,0xAD88D549,0x00000000
8179 long 0x3FFE0000,0xF6603D98,0x0F6603DA,0x00000000
8180 long 0x3FFA0000,0x9CF43DCF,0xF5EAFD48,0x00000000
8181 long 0x3FFE0000,0xF2B9D648,0x0F2B9D65,0x00000000
8182 long 0x3FFA0000,0xDA16EB88,0xCB8DF614,0x00000000
8183 long 0x3FFE0000,0xEF2EB71F,0xC4345238,0x00000000
8184 long 0x3FFB0000,0x8B29B775,0x1BD70743,0x00000000
8185 long 0x3FFE0000,0xEBBDB2A5,0xC1619C8C,0x00000000
8186 long 0x3FFB0000,0xA8D839F8,0x30C1FB49,0x00000000
8187 long 0x3FFE0000,0xE865AC7B,0x7603A197,0x00000000
8188 long 0x3FFB0000,0xC61A2EB1,0x8CD907AD,0x00000000
8189 long 0x3FFE0000,0xE525982A,0xF70C880E,0x00000000
8190 long 0x3FFB0000,0xE2F2A47A,0xDE3A18AF,0x00000000
8191 long 0x3FFE0000,0xE1FC780E,0x1FC780E2,0x00000000
8192 long 0x3FFB0000,0xFF64898E,0xDF55D551,0x00000000
8193 long 0x3FFE0000,0xDEE95C4C,0xA037BA57,0x00000000
8194 long 0x3FFC0000,0x8DB956A9,0x7B3D0148,0x00000000
8195 long 0x3FFE0000,0xDBEB61EE,0xD19C5958,0x00000000
8196 long 0x3FFC0000,0x9B8FE100,0xF47BA1DE,0x00000000
8197 long 0x3FFE0000,0xD901B203,0x6406C80E,0x00000000
8198 long 0x3FFC0000,0xA9372F1D,0x0DA1BD17,0x00000000
8199 long 0x3FFE0000,0xD62B80D6,0x2B80D62C,0x00000000
8200 long 0x3FFC0000,0xB6B07F38,0xCE90E46B,0x00000000
8201 long 0x3FFE0000,0xD3680D36,0x80D3680D,0x00000000
8202 long 0x3FFC0000,0xC3FD0329,0x06488481,0x00000000
8203 long 0x3FFE0000,0xD0B69FCB,0xD2580D0B,0x00000000
8204 long 0x3FFC0000,0xD11DE0FF,0x15AB18CA,0x00000000
8205 long 0x3FFE0000,0xCE168A77,0x25080CE1,0x00000000
8206 long 0x3FFC0000,0xDE1433A1,0x6C66B150,0x00000000
8207 long 0x3FFE0000,0xCB8727C0,0x65C393E0,0x00000000
8208 long 0x3FFC0000,0xEAE10B5A,0x7DDC8ADD,0x00000000
8209 long 0x3FFE0000,0xC907DA4E,0x871146AD,0x00000000
8210 long 0x3FFC0000,0xF7856E5E,0xE2C9B291,0x00000000
8211 long 0x3FFE0000,0xC6980C69,0x80C6980C,0x00000000
8212 long 0x3FFD0000,0x82012CA5,0xA68206D7,0x00000000
8213 long 0x3FFE0000,0xC4372F85,0x5D824CA6,0x00000000
8214 long 0x3FFD0000,0x882C5FCD,0x7256A8C5,0x00000000
8215 long 0x3FFE0000,0xC1E4BBD5,0x95F6E947,0x00000000
8216 long 0x3FFD0000,0x8E44C60B,0x4CCFD7DE,0x00000000
8217 long 0x3FFE0000,0xBFA02FE8,0x0BFA02FF,0x00000000
8218 long 0x3FFD0000,0x944AD09E,0xF4351AF6,0x00000000
8219 long 0x3FFE0000,0xBD691047,0x07661AA3,0x00000000
8220 long 0x3FFD0000,0x9A3EECD4,0xC3EAA6B2,0x00000000
8221 long 0x3FFE0000,0xBB3EE721,0xA54D880C,0x00000000
8222 long 0x3FFD0000,0xA0218434,0x353F1DE8,0x00000000
8223 long 0x3FFE0000,0xB92143FA,0x36F5E02E,0x00000000
8224 long 0x3FFD0000,0xA5F2FCAB,0xBBC506DA,0x00000000
8225 long 0x3FFE0000,0xB70FBB5A,0x19BE3659,0x00000000
8226 long 0x3FFD0000,0xABB3B8BA,0x2AD362A5,0x00000000
8227 long 0x3FFE0000,0xB509E68A,0x9B94821F,0x00000000
8228 long 0x3FFD0000,0xB1641795,0xCE3CA97B,0x00000000
8229 long 0x3FFE0000,0xB30F6352,0x8917C80B,0x00000000
8230 long 0x3FFD0000,0xB7047551,0x5D0F1C61,0x00000000
8231 long 0x3FFE0000,0xB11FD3B8,0x0B11FD3C,0x00000000
8232 long 0x3FFD0000,0xBC952AFE,0xEA3D13E1,0x00000000
8233 long 0x3FFE0000,0xAF3ADDC6,0x80AF3ADE,0x00000000
8234 long 0x3FFD0000,0xC2168ED0,0xF458BA4A,0x00000000
8235 long 0x3FFE0000,0xAD602B58,0x0AD602B6,0x00000000
8236 long 0x3FFD0000,0xC788F439,0xB3163BF1,0x00000000
8237 long 0x3FFE0000,0xAB8F69E2,0x8359CD11,0x00000000
8238 long 0x3FFD0000,0xCCECAC08,0xBF04565D,0x00000000
8239 long 0x3FFE0000,0xA9C84A47,0xA07F5638,0x00000000
8240 long 0x3FFD0000,0xD2420487,0x2DD85160,0x00000000
8241 long 0x3FFE0000,0xA80A80A8,0x0A80A80B,0x00000000
8242 long 0x3FFD0000,0xD7894992,0x3BC3588A,0x00000000
8243 long 0x3FFE0000,0xA655C439,0x2D7B73A8,0x00000000
8244 long 0x3FFD0000,0xDCC2C4B4,0x9887DACC,0x00000000
8245 long 0x3FFE0000,0xA4A9CF1D,0x96833751,0x00000000
8246 long 0x3FFD0000,0xE1EEBD3E,0x6D6A6B9E,0x00000000
8247 long 0x3FFE0000,0xA3065E3F,0xAE7CD0E0,0x00000000
8248 long 0x3FFD0000,0xE70D785C,0x2F9F5BDC,0x00000000
8249 long 0x3FFE0000,0xA16B312E,0xA8FC377D,0x00000000
8250 long 0x3FFD0000,0xEC1F392C,0x5179F283,0x00000000
8251 long 0x3FFE0000,0x9FD809FD,0x809FD80A,0x00000000
8252 long 0x3FFD0000,0xF12440D3,0xE36130E6,0x00000000
8253 long 0x3FFE0000,0x9E4CAD23,0xDD5F3A20,0x00000000
8254 long 0x3FFD0000,0xF61CCE92,0x346600BB,0x00000000
8255 long 0x3FFE0000,0x9CC8E160,0xC3FB19B9,0x00000000
8256 long 0x3FFD0000,0xFB091FD3,0x8145630A,0x00000000
8257 long 0x3FFE0000,0x9B4C6F9E,0xF03A3CAA,0x00000000
8258 long 0x3FFD0000,0xFFE97042,0xBFA4C2AD,0x00000000
8259 long 0x3FFE0000,0x99D722DA,0xBDE58F06,0x00000000
8260 long 0x3FFE0000,0x825EFCED,0x49369330,0x00000000
8261 long 0x3FFE0000,0x9868C809,0x868C8098,0x00000000
8262 long 0x3FFE0000,0x84C37A7A,0xB9A905C9,0x00000000
8263 long 0x3FFE0000,0x97012E02,0x5C04B809,0x00000000
8264 long 0x3FFE0000,0x87224C2E,0x8E645FB7,0x00000000
8265 long 0x3FFE0000,0x95A02568,0x095A0257,0x00000000
8266 long 0x3FFE0000,0x897B8CAC,0x9F7DE298,0x00000000
8267 long 0x3FFE0000,0x94458094,0x45809446,0x00000000
8268 long 0x3FFE0000,0x8BCF55DE,0xC4CD05FE,0x00000000
8269 long 0x3FFE0000,0x92F11384,0x0497889C,0x00000000
8270 long 0x3FFE0000,0x8E1DC0FB,0x89E125E5,0x00000000
8271 long 0x3FFE0000,0x91A2B3C4,0xD5E6F809,0x00000000
8272 long 0x3FFE0000,0x9066E68C,0x955B6C9B,0x00000000
8273 long 0x3FFE0000,0x905A3863,0x3E06C43B,0x00000000
8274 long 0x3FFE0000,0x92AADE74,0xC7BE59E0,0x00000000
8275 long 0x3FFE0000,0x8F1779D9,0xFDC3A219,0x00000000
8276 long 0x3FFE0000,0x94E9BFF6,0x15845643,0x00000000
8277 long 0x3FFE0000,0x8DDA5202,0x37694809,0x00000000
8278 long 0x3FFE0000,0x9723A1B7,0x20134203,0x00000000
8279 long 0x3FFE0000,0x8CA29C04,0x6514E023,0x00000000
8280 long 0x3FFE0000,0x995899C8,0x90EB8990,0x00000000
8281 long 0x3FFE0000,0x8B70344A,0x139BC75A,0x00000000
8282 long 0x3FFE0000,0x9B88BDAA,0x3A3DAE2F,0x00000000
8283 long 0x3FFE0000,0x8A42F870,0x5669DB46,0x00000000
8284 long 0x3FFE0000,0x9DB4224F,0xFFE1157C,0x00000000
8285 long 0x3FFE0000,0x891AC73A,0xE9819B50,0x00000000
8286 long 0x3FFE0000,0x9FDADC26,0x8B7A12DA,0x00000000
8287 long 0x3FFE0000,0x87F78087,0xF78087F8,0x00000000
8288 long 0x3FFE0000,0xA1FCFF17,0xCE733BD4,0x00000000
8289 long 0x3FFE0000,0x86D90544,0x7A34ACC6,0x00000000
8290 long 0x3FFE0000,0xA41A9E8F,0x5446FB9F,0x00000000
8291 long 0x3FFE0000,0x85BF3761,0x2CEE3C9B,0x00000000
8292 long 0x3FFE0000,0xA633CD7E,0x6771CD8B,0x00000000
8293 long 0x3FFE0000,0x84A9F9C8,0x084A9F9D,0x00000000
8294 long 0x3FFE0000,0xA8489E60,0x0B435A5E,0x00000000
8295 long 0x3FFE0000,0x83993052,0x3FBE3368,0x00000000
8296 long 0x3FFE0000,0xAA59233C,0xCCA4BD49,0x00000000
8297 long 0x3FFE0000,0x828CBFBE,0xB9A020A3,0x00000000
8298 long 0x3FFE0000,0xAC656DAE,0x6BCC4985,0x00000000
8299 long 0x3FFE0000,0x81848DA8,0xFAF0D277,0x00000000
8300 long 0x3FFE0000,0xAE6D8EE3,0x60BB2468,0x00000000
8301 long 0x3FFE0000,0x80808080,0x80808081,0x00000000
8302 long 0x3FFE0000,0xB07197A2,0x3C46C654,0x00000000
8321 mov.l &0x00000000,ADJK(%a6)
8334 cmp.l %d1,&0 # CHECK IF X IS NEGATIVE
8337 cmp.l %d1,&0x3ffef07d # IS X < 15/16?
8339 cmp.l %d1,&0x3fff8841 # IS X > 17/16?
8357 sub.l &0x3FFF,%d1 # THIS IS K
8363 mov.l &0x3FFF0000,X(%a6) # X IS NOW Y, I.E. 2^(-K)*X
8365 and.l &0xFE000000,FFRAC(%a6) # FIRST 7 BITS OF Y
8366 or.l &0x01000000,FFRAC(%a6) # GET F: ATTACH A 1 AT THE EIGHTH BIT
8368 and.l &0x7E000000,%d1
8375 mov.l &0x3fff0000,F(%a6)
8378 fmovm.x &0xc,-(%sp) # SAVE FP2-3 WHILE FP0 IS NOT READY
8417 fmovm.x (%sp)+,&0x30 # RESTORE FP2-3
8430 fcmp.b %fp0,&0x1 # is it equal to one?
8444 fmovm.x &0xc,-(%sp) # SAVE FP2-3
8472 fmovm.x (%sp)+,&0x30 # FP2-3 RESTORED
8495 movm.l &0x3f00,-(%sp) # save some registers {d2-d7}
8509 bfffo %d4{&0:&32},%d6
8519 movm.l (%sp)+,&0xfc # restore registers {d2-d7}
8525 bfffo %d4{&0:&32},%d6 # find first 1
8541 movm.l (%sp)+,&0xfc # restore registers {d2-d7}
8559 mov.l &0x00000000,ADJK(%a6)
8565 cmp.l %d1,&0
8567 cmp.l %d1,&0x3ffe8000 # IS BOUNDS [1/2,3/2]?
8569 cmp.l %d1,&0x3fffc000
8577 cmp.l %d1,&0x3ffef07d
8579 cmp.l %d1,&0x3fff8841
8596 #--CASE 2: 1+Z > 1, THEN K = 0 AND Y-F = (1-F) + Z
8601 and.l &0xFE000000,FFRAC(%a6)
8602 or.l &0x01000000,FFRAC(%a6) # F OBTAINED
8603 cmp.l %d1,&0x3FFF8000 # SEE IF 1+Z > 1
8608 mov.l &0x3fff0000,F(%a6)
8612 and.l &0x7E000000,%d1
8617 fmovm.x &0xc,-(%sp) # SAVE FP2 {%fp2/%fp3}
8626 mov.l &0x3fff0000,F(%a6)
8630 and.l &0x7E000000,%d1
8635 fmovm.x &0xc,-(%sp) # FP2 SAVED {%fp2/%fp3}
8638 fmov.s zero(%pc),%fp1 # FP1 IS K = 0
8643 cmp.l %d1,&0
8697 # atan(X) := sgn / (+0). #
8700 # 5. (|X| > 1) Generate an invalid operation by 0 * infinity. #
8709 and.l &0x7FFFFFFF,%d1
8710 cmp.l %d1,&0x3FFF8000
8720 fadd.s &0x3F800000,%fp1 # 1-Y
8723 and.l &0x80000000,%d1
8724 or.l &0x3F000000,%d1 # SIGN(X)*HALF
8729 fmovm.x &0x01,-(%sp) # save Z on stack
8732 add.l &0xc,%sp # clear Z from stack
8742 fcmp.s %fp0,&0x3F800000
8774 # Step 0. If X < 0, create a NaN and raise the invalid operation #
8788 # Step 0. If X < 0, create a NaN and raise the invalid operation #
8801 # Step 0. If X < 0, create a NaN and raise the invalid operation #
8815 # Step 0. If X < 0, create a NaN and raise the invalid operation #
8837 long 0x3FFD0000,0xDE5BD8A9,0x37287195,0x00000000
8840 long 0x3FFF0000,0xB8AA3B29,0x5C17F0BC,0x00000000
8845 fmov.b &0x1,%fp0
8880 and.l &0x7FFFFFFF,%d1
8885 and.l &0x00007FFF,%d1
8886 sub.l &0x3FFF,%d1
8943 # N = 64(M + M') + j, j = 0,1,2,...,63. #
8956 # N = 64(M + M') + j, j = 0,1,2,...,63. #
8981 # 1. Generate overflow by Huge * Huge if X > 0; otherwise, #
8990 long 0x406A934F,0x0979A371 # 64LOG10/LOG2
8992 long 0x3F734413,0x509F8000 # LOG2/64LOG10
8995 long 0xBFCD0000,0xC0219DC1,0xDA994FD2,0x00000000
8997 LOG10: long 0x40000000,0x935D8DDD,0xAAA8AC17,0x00000000
8999 LOG2: long 0x3FFE0000,0xB17217F7,0xD1CF79AC,0x00000000
9001 EXPA5: long 0x3F56C16D,0x6F7BD0B2
9002 EXPA4: long 0x3F811112,0x302C712C
9003 EXPA3: long 0x3FA55555,0x55554CC1
9004 EXPA2: long 0x3FC55555,0x55554A54
9005 EXPA1: long 0x3FE00000,0x00000000,0x00000000,0x00000000
9008 long 0x3FFF0000,0x80000000,0x00000000,0x3F738000
9009 long 0x3FFF0000,0x8164D1F3,0xBC030773,0x3FBEF7CA
9010 long 0x3FFF0000,0x82CD8698,0xAC2BA1D7,0x3FBDF8A9
9011 long 0x3FFF0000,0x843A28C3,0xACDE4046,0x3FBCD7C9
9012 long 0x3FFF0000,0x85AAC367,0xCC487B15,0xBFBDE8DA
9013 long 0x3FFF0000,0x871F6196,0x9E8D1010,0x3FBDE85C
9014 long 0x3FFF0000,0x88980E80,0x92DA8527,0x3FBEBBF1
9015 long 0x3FFF0000,0x8A14D575,0x496EFD9A,0x3FBB80CA
9016 long 0x3FFF0000,0x8B95C1E3,0xEA8BD6E7,0xBFBA8373
9017 long 0x3FFF0000,0x8D1ADF5B,0x7E5BA9E6,0xBFBE9670
9018 long 0x3FFF0000,0x8EA4398B,0x45CD53C0,0x3FBDB700
9019 long 0x3FFF0000,0x9031DC43,0x1466B1DC,0x3FBEEEB0
9020 long 0x3FFF0000,0x91C3D373,0xAB11C336,0x3FBBFD6D
9021 long 0x3FFF0000,0x935A2B2F,0x13E6E92C,0xBFBDB319
9022 long 0x3FFF0000,0x94F4EFA8,0xFEF70961,0x3FBDBA2B
9023 long 0x3FFF0000,0x96942D37,0x20185A00,0x3FBE91D5
9024 long 0x3FFF0000,0x9837F051,0x8DB8A96F,0x3FBE8D5A
9025 long 0x3FFF0000,0x99E04593,0x20B7FA65,0xBFBCDE7B
9026 long 0x3FFF0000,0x9B8D39B9,0xD54E5539,0xBFBEBAAF
9027 long 0x3FFF0000,0x9D3ED9A7,0x2CFFB751,0xBFBD86DA
9028 long 0x3FFF0000,0x9EF53260,0x91A111AE,0xBFBEBEDD
9029 long 0x3FFF0000,0xA0B0510F,0xB9714FC2,0x3FBCC96E
9030 long 0x3FFF0000,0xA2704303,0x0C496819,0xBFBEC90B
9031 long 0x3FFF0000,0xA43515AE,0x09E6809E,0x3FBBD1DB
9032 long 0x3FFF0000,0xA5FED6A9,0xB15138EA,0x3FBCE5EB
9033 long 0x3FFF0000,0xA7CD93B4,0xE965356A,0xBFBEC274
9034 long 0x3FFF0000,0xA9A15AB4,0xEA7C0EF8,0x3FBEA83C
9035 long 0x3FFF0000,0xAB7A39B5,0xA93ED337,0x3FBECB00
9036 long 0x3FFF0000,0xAD583EEA,0x42A14AC6,0x3FBE9301
9037 long 0x3FFF0000,0xAF3B78AD,0x690A4375,0xBFBD8367
9038 long 0x3FFF0000,0xB123F581,0xD2AC2590,0xBFBEF05F
9039 long 0x3FFF0000,0xB311C412,0xA9112489,0x3FBDFB3C
9040 long 0x3FFF0000,0xB504F333,0xF9DE6484,0x3FBEB2FB
9041 long 0x3FFF0000,0xB6FD91E3,0x28D17791,0x3FBAE2CB
9042 long 0x3FFF0000,0xB8FBAF47,0x62FB9EE9,0x3FBCDC3C
9043 long 0x3FFF0000,0xBAFF5AB2,0x133E45FB,0x3FBEE9AA
9044 long 0x3FFF0000,0xBD08A39F,0x580C36BF,0xBFBEAEFD
9045 long 0x3FFF0000,0xBF1799B6,0x7A731083,0xBFBCBF51
9046 long 0x3FFF0000,0xC12C4CCA,0x66709456,0x3FBEF88A
9047 long 0x3FFF0000,0xC346CCDA,0x24976407,0x3FBD83B2
9048 long 0x3FFF0000,0xC5672A11,0x5506DADD,0x3FBDF8AB
9049 long 0x3FFF0000,0xC78D74C8,0xABB9B15D,0xBFBDFB17
9050 long 0x3FFF0000,0xC9B9BD86,0x6E2F27A3,0xBFBEFE3C
9051 long 0x3FFF0000,0xCBEC14FE,0xF2727C5D,0xBFBBB6F8
9052 long 0x3FFF0000,0xCE248C15,0x1F8480E4,0xBFBCEE53
9053 long 0x3FFF0000,0xD06333DA,0xEF2B2595,0xBFBDA4AE
9054 long 0x3FFF0000,0xD2A81D91,0xF12AE45A,0x3FBC9124
9055 long 0x3FFF0000,0xD4F35AAB,0xCFEDFA1F,0x3FBEB243
9056 long 0x3FFF0000,0xD744FCCA,0xD69D6AF4,0x3FBDE69A
9057 long 0x3FFF0000,0xD99D15C2,0x78AFD7B6,0xBFB8BC61
9058 long 0x3FFF0000,0xDBFBB797,0xDAF23755,0x3FBDF610
9059 long 0x3FFF0000,0xDE60F482,0x5E0E9124,0xBFBD8BE1
9060 long 0x3FFF0000,0xE0CCDEEC,0x2A94E111,0x3FBACB12
9061 long 0x3FFF0000,0xE33F8972,0xBE8A5A51,0x3FBB9BFE
9062 long 0x3FFF0000,0xE5B906E7,0x7C8348A8,0x3FBCF2F4
9063 long 0x3FFF0000,0xE8396A50,0x3C4BDC68,0x3FBEF22F
9064 long 0x3FFF0000,0xEAC0C6E7,0xDD24392F,0xBFBDBF4A
9065 long 0x3FFF0000,0xED4F301E,0xD9942B84,0x3FBEC01A
9066 long 0x3FFF0000,0xEFE4B99B,0xDCDAF5CB,0x3FBE8CAC
9067 long 0x3FFF0000,0xF281773C,0x59FFB13A,0xBFBCBB3F
9068 long 0x3FFF0000,0xF5257D15,0x2486CC2C,0x3FBEF73A
9069 long 0x3FFF0000,0xF7D0DF73,0x0AD13BB9,0xBFB8B795
9070 long 0x3FFF0000,0xFA83B2DB,0x722A033A,0x3FBEF84B
9071 long 0x3FFF0000,0xFD3E0C0C,0xF486C175,0xBFBEF581
9092 fmovm.x (%a0),&0x80 # LOAD INPUT
9097 and.l &0x7FFFFFFF,%d1
9099 cmp.l %d1,&0x3FB98000 # |X| >= 2**(-70)?
9104 cmp.l %d1,&0x400D80C0 # |X| > 16480?
9112 fmul.s &0x42800000,%fp1 # 64 * X
9119 and.l &0x3F,%d1 # D0 IS J
9126 add.l &0x3FFF,%d2
9133 fmovm.x &0x0c,-(%sp) # save fp2/fp3
9135 fmul.s &0x3C800000,%fp1 # (1/64)*N
9154 cmp.l %d1,&0x3FFF8000
9160 fadd.s &0x3F800000,%fp0 # RETURN 1 + X
9164 #--|X| IS LARGE, GENERATE OVERFLOW IF X > 0; ELSE GENERATE UNDERFLOW
9167 cmp.l %d1,&0
9180 fmov.s &0x3F800000,%fp0 # RETURN 1 + X
9182 or.l &0x00800001,%d1
9189 fmovm.x (%a0),&0x80 # LOAD INPUT
9194 and.l &0x7FFFFFFF,%d1
9196 cmp.l %d1,&0x3FB98000 # |X| >= 2**(-70)?
9201 cmp.l %d1,&0x400B9B07 # |X| <= 16480*log2/log10 ?
9216 and.l &0x3F,%d1 # D0 IS J
9223 add.l &0x3FFF,%d2
9229 fmovm.x &0x0c,-(%sp) # save fp2/fp3
9281 fmovm.x (%sp)+,&0x30 # restore fp2/fp3
9284 #--EXP(X) = 2^M*2^(J/64) + 2^M*2^(J/64)*(EXP(R)-1) - (1 OR 0)
9293 mov.l &0x80000000,ADJFACT+4(%a6)
9304 fmov.s &0x3F800000,%fp0 # RETURN 1 + X
9306 or.l &0x00800001,%d1
9327 lsr.b &0x4,%d0 # shift ctrl bits to lo
9329 andi.w &0x3,%d1 # extract rnd mode
9330 andi.w &0xc,%d0 # extract rnd prec
9341 cmpi.b %d1,&0x0a # check range $01 - $0a
9343 cmpi.b %d1,&0x0e # check range $0b - $0e
9345 cmpi.b %d1,&0x2f # check range $10 - $2f
9347 cmpi.b %d1,&0x3f # check range $30 - $3f
9377 # $0B log10(2) (inexact)
9378 # $0C e (inexact)
9379 # $0D log2(e) (inexact)
9380 # $0E log10(e) (exact)
9386 subi.b &0xb,%d1 # make offset in 0-4 range
9392 cmpi.b %d1,&0x2 # is result log10(e)?
9409 # $32 10^0 (exact)
9428 subi.b &0x30,%d1 # make offset in 0-f range
9434 cmpi.b %d1,&0x1 # is offset <= $31?
9436 cmpi.b %d1,&0x7 # is $32 <= offset <= $37?
9453 mulu.w &0xc,%d1 # offset points into tables
9460 fmovm.x (%a0,%d1.w),&0x80 # return result in fp0
9470 mov.w 0x0(%a0,%d1.w),FP_SCR1_EX(%a6) # load first word
9471 mov.l 0x4(%a0,%d1.w),FP_SCR1_HI(%a6) # load second word
9472 mov.l 0x8(%a0,%d1.w),FP_SCR1_LO(%a6) # load third word
9479 fmovm.x (%a0),&0x80 # return rounded result in fp0
9482 align 0x4
9484 PIRN: long 0x40000000,0xc90fdaa2,0x2168c235 # pi
9485 PIRZRM: long 0x40000000,0xc90fdaa2,0x2168c234 # pi
9486 PIRP: long 0x40000000,0xc90fdaa2,0x2168c235 # pi
9488 SMALRN: long 0x3ffd0000,0x9a209a84,0xfbcff798 # log10(2)
9489 long 0x40000000,0xadf85458,0xa2bb4a9a # e
9490 long 0x3fff0000,0xb8aa3b29,0x5c17f0bc # log2(e)
9491 long 0x3ffd0000,0xde5bd8a9,0x37287195 # log10(e)
9492 long 0x00000000,0x00000000,0x00000000 # 0.0
9495 long 0x3ffd0000,0x9a209a84,0xfbcff798 # log10(2)
9496 long 0x40000000,0xadf85458,0xa2bb4a9a # e
9497 long 0x3fff0000,0xb8aa3b29,0x5c17f0bb # log2(e)
9498 long 0x3ffd0000,0xde5bd8a9,0x37287195 # log10(e)
9499 long 0x00000000,0x00000000,0x00000000 # 0.0
9501 SMALRP: long 0x3ffd0000,0x9a209a84,0xfbcff799 # log10(2)
9502 long 0x40000000,0xadf85458,0xa2bb4a9b # e
9503 long 0x3fff0000,0xb8aa3b29,0x5c17f0bc # log2(e)
9504 long 0x3ffd0000,0xde5bd8a9,0x37287195 # log10(e)
9505 long 0x00000000,0x00000000,0x00000000 # 0.0
9507 BIGRN: long 0x3ffe0000,0xb17217f7,0xd1cf79ac # ln(2)
9508 long 0x40000000,0x935d8ddd,0xaaa8ac17 # ln(10)
9510 long 0x3fff0000,0x80000000,0x00000000 # 10 ^ 0
9511 long 0x40020000,0xA0000000,0x00000000 # 10 ^ 1
9512 long 0x40050000,0xC8000000,0x00000000 # 10 ^ 2
9513 long 0x400C0000,0x9C400000,0x00000000 # 10 ^ 4
9514 long 0x40190000,0xBEBC2000,0x00000000 # 10 ^ 8
9515 long 0x40340000,0x8E1BC9BF,0x04000000 # 10 ^ 16
9516 long 0x40690000,0x9DC5ADA8,0x2B70B59E # 10 ^ 32
9517 long 0x40D30000,0xC2781F49,0xFFCFA6D5 # 10 ^ 64
9518 long 0x41A80000,0x93BA47C9,0x80E98CE0 # 10 ^ 128
9519 long 0x43510000,0xAA7EEBFB,0x9DF9DE8E # 10 ^ 256
9520 long 0x46A30000,0xE319A0AE,0xA60E91C7 # 10 ^ 512
9521 long 0x4D480000,0xC9767586,0x81750C17 # 10 ^ 1024
9522 long 0x5A920000,0x9E8B3B5D,0xC53D5DE5 # 10 ^ 2048
9523 long 0x75250000,0xC4605202,0x8A20979B # 10 ^ 4096
9526 long 0x3ffe0000,0xb17217f7,0xd1cf79ab # ln(2)
9527 long 0x40000000,0x935d8ddd,0xaaa8ac16 # ln(10)
9529 long 0x3fff0000,0x80000000,0x00000000 # 10 ^ 0
9530 long 0x40020000,0xA0000000,0x00000000 # 10 ^ 1
9531 long 0x40050000,0xC8000000,0x00000000 # 10 ^ 2
9532 long 0x400C0000,0x9C400000,0x00000000 # 10 ^ 4
9533 long 0x40190000,0xBEBC2000,0x00000000 # 10 ^ 8
9534 long 0x40340000,0x8E1BC9BF,0x04000000 # 10 ^ 16
9535 long 0x40690000,0x9DC5ADA8,0x2B70B59D # 10 ^ 32
9536 long 0x40D30000,0xC2781F49,0xFFCFA6D5 # 10 ^ 64
9537 long 0x41A80000,0x93BA47C9,0x80E98CDF # 10 ^ 128
9538 long 0x43510000,0xAA7EEBFB,0x9DF9DE8D # 10 ^ 256
9539 long 0x46A30000,0xE319A0AE,0xA60E91C6 # 10 ^ 512
9540 long 0x4D480000,0xC9767586,0x81750C17 # 10 ^ 1024
9541 long 0x5A920000,0x9E8B3B5D,0xC53D5DE4 # 10 ^ 2048
9542 long 0x75250000,0xC4605202,0x8A20979A # 10 ^ 4096
9545 long 0x3ffe0000,0xb17217f7,0xd1cf79ac # ln(2)
9546 long 0x40000000,0x935d8ddd,0xaaa8ac17 # ln(10)
9548 long 0x3fff0000,0x80000000,0x00000000 # 10 ^ 0
9549 long 0x40020000,0xA0000000,0x00000000 # 10 ^ 1
9550 long 0x40050000,0xC8000000,0x00000000 # 10 ^ 2
9551 long 0x400C0000,0x9C400000,0x00000000 # 10 ^ 4
9552 long 0x40190000,0xBEBC2000,0x00000000 # 10 ^ 8
9553 long 0x40340000,0x8E1BC9BF,0x04000000 # 10 ^ 16
9554 long 0x40690000,0x9DC5ADA8,0x2B70B59E # 10 ^ 32
9555 long 0x40D30000,0xC2781F49,0xFFCFA6D6 # 10 ^ 64
9556 long 0x41A80000,0x93BA47C9,0x80E98CE0 # 10 ^ 128
9557 long 0x43510000,0xAA7EEBFB,0x9DF9DE8E # 10 ^ 256
9558 long 0x46A30000,0xE319A0AE,0xA60E91C7 # 10 ^ 512
9559 long 0x4D480000,0xC9767586,0x81750C18 # 10 ^ 1024
9560 long 0x5A920000,0x9E8B3B5D,0xC53D5DE5 # 10 ^ 2048
9561 long 0x75250000,0xC4605202,0x8A20979B # 10 ^ 4096
9585 andi.l &0x00007fff,%d1 # strip sign from dst exp
9588 andi.w &0x7fff,%d0 # clr src sign bit
9589 cmpi.w %d0,&0x3fff # is src ~ ZERO?
9591 cmpi.w %d0,&0x400c # no; is src too big?
9602 fmov.l &0x0,%fpsr
9621 fmovm.x FP_SCR0(%a6),&0x80 # load normalized DENORM
9623 cmpi.w %d0,&-0x3fff # is the shft amt really low?
9633 mov.l &0x80000000,%d1 # load normalized mantissa
9634 subi.l &-0x3fff,%d0 # how many should we shift?
9636 cmpi.b %d0,&0x20 # is it > 32?
9644 subi.b &0x20,%d0 # get shift count
9654 fmovm.x DST(%a1),&0x80 # load fp0 with normalized src
9658 addi.w &0x3fff,%d0 # turn src amt into exp value
9661 mov.l &0x80000000,-(%sp) # insert new high mantissa
9721 # Step 2. Set L := expo(X)-expo(Y), k := 0, Q := 0. #
9722 # If (L < 0) then #
9731 # 3.3 If j = 0, go to Step 4. #
9755 # R := 0. Return signQ, last 7 bits of Q, and R. #
9775 long 0x00010000,0x80000000,0x00000000,0x00000000
9788 mov.b &0x1,Mod_Flag(%a6)
9792 movm.l &0x3f00,-(%sp) # save data registers
9795 and.l &0x00007FFF,%d3 # Y := |Y|
9804 mov.l &0x00003FFE,%d3 # $3FFD + 1
9813 bfffo %d4{&0:&32},%d6
9821 bfffo %d4{&0:&32},%d6
9834 add.l &0x00003FFE,%d3 # (D3,D4,D5) normalized
9842 and.l &0x00008000,%d1
9844 and.l &0x00007FFF,%d0
9849 mov.l &0x00003FFE,%d0
9858 bfffo %d1{&0:&32},%d6
9866 bfffo %d1{&0:&32},%d6
9879 add.l &0x00003FFE,%d0 # (D0,D1,D2) normalized
9888 clr.l %d6 # D6 := carry <- 0
9890 mov.l &0,%a1 # A1 is k; j+k=L, Q=0
9902 addq.l &0x4,%sp # erase exp(X)
9903 #..At this point R = 2^(-L)X; Q = 0; k = 0; and k+j = L
9908 #..At this point carry = 0, R = (D1,D2), Y = (D4,D5)
9922 #..If Carry is set, then Y < (Carry,D1,D2) < 2Y. Otherwise, Carry = 0
9930 #..At this point, Carry=0, R < Y. R = 2^(k-L)X - QY; k+j = L; j >= 0.
9931 tst.l %d0 # see if j = 0.
9940 #..At this point, R=(Carry,D1,D2) = 2^(k-L)X - QY, j+k=L, j >= 0, R < 2Y.
9945 #..k = L, j = 0, Carry = 0, R = (D1,D2) = X - QY, R < Y.
9957 bfffo %d1{&0:&32},%d6
9965 bfffo %d1{&0:&32},%d6
9978 cmp.l %d0,&0x000041FE
9994 sub.l &0x3FFE,%d0
9997 sub.l &0x3FFE,%d6
10044 and.l &0x0000007F,%d3 # 7 bits of Q
10048 # and.l &0xFF00FFFF,%d6
10055 movm.l (%sp)+,&0xfc # {%d2-%d7}
10076 #..R = 2^(-j)X - Q Y = Y, thus R = 0 and quotient = 2^j (Q+1)
10088 fmov.s &0x00000000,%fp0
10095 and.l &0x00000001,%d6
10102 eor.l &0x00008000,%d6
10106 qnan: long 0x7fff0000, 0xffffffff, 0xffffffff
10137 fmov.s &0x7f800000,%fp0 # return +INF in fp0
10143 fmov.s &0xff800000,%fp0 # return -INF in fp0
10155 fmovm.x qnan(%pc),&0x80 # return default NAN in fp0
10180 andi.b &0xc0,%d1 # extended precision?
10198 fmovm.x (%a0),&0x80 # return default result in fp0
10201 andi.b &0x0a,%d0 # is UNFL or INEX enabled?
10209 # so, normalize the mantissa, add 0x6000 to the new exponent,
10218 addi.l &0x6000,%d0 # add extra bias
10219 andi.w &0x8000,FP_SCR0_EX(%a6) # keep old sign
10222 fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
10243 fmovm.x (%a0),&0x80 # return default result in fp0
10245 fmov.s &0x00000000,%fp1 # return EXOP in fp1
10255 fmovm.x (%a0),&0x80 # return default result in fp0
10257 fmov.s &0x0000000,%fp1 # return EXOP in fp1
10279 andi.b &0xc0,%d1 # extract rnd prec
10292 movm.l &0xc080,-(%sp) # save d0-d1/a0
10294 movm.l (%sp)+,&0x0103 # restore d0-d1/a0
10297 cmpi.b %d1,&0x40 # is prec dbl?
10307 andi.l &0x7ff,%d1 # dbl mantissa set?
10322 fmovm.x (%a0),&0x80 # return default result in fp0
10324 fmov.s &0x00000000,%fp1 # return EXOP in fp1
10335 fmovm.x (%a0),&0x80 # return default result in fp0
10337 fmov.s &0x00000000,%fp1 # return EXOP in fp1
10353 tst.b 0x2(%sp)
10355 add.l &0xc,%sp
10387 add.l &0xc,%sp
10395 tst.b 0x2(%sp)
10397 add.l &0xc,%sp
10418 andi.w &0x10,%d1 # keep sign bit in 4th spot
10420 lsr.b &0x4,%d0 # shift rnd prec,mode to lo bits
10421 andi.b &0xf,%d0 # strip hi rnd mode bit
10425 lsl.b &0x1,%d1 # mult index 2 by 2
10432 byte 0x4, 0x4, 0x4, 0x0
10433 byte 0x4, 0x4, 0x4, 0x0
10434 byte 0x4, 0x4, 0x4, 0x0
10435 byte 0x0, 0x0, 0x0, 0x0
10436 byte 0x8+0x4, 0x8+0x4, 0x8, 0x8+0x4
10437 byte 0x8+0x4, 0x8+0x4, 0x8, 0x8+0x4
10438 byte 0x8+0x4, 0x8+0x4, 0x8, 0x8+0x4
10441 long 0x00000000, 0x00000000, 0x00000000, 0x0 # ZERO;ext
10442 long 0x00000000, 0x00000000, 0x00000000, 0x0 # ZERO;ext
10443 long 0x00000000, 0x00000000, 0x00000000, 0x0 # ZERO;ext
10444 long 0x00000000, 0x00000000, 0x00000001, 0x0 # MIN; ext
10446 long 0x3f810000, 0x00000000, 0x00000000, 0x0 # ZERO;sgl
10447 long 0x3f810000, 0x00000000, 0x00000000, 0x0 # ZERO;sgl
10448 long 0x3f810000, 0x00000000, 0x00000000, 0x0 # ZERO;sgl
10449 long 0x3f810000, 0x00000100, 0x00000000, 0x0 # MIN; sgl
10451 long 0x3c010000, 0x00000000, 0x00000000, 0x0 # ZERO;dbl
10452 long 0x3c010000, 0x00000000, 0x00000000, 0x0 # ZER0;dbl
10453 long 0x3c010000, 0x00000000, 0x00000000, 0x0 # ZERO;dbl
10454 long 0x3c010000, 0x00000000, 0x00000800, 0x0 # MIN; dbl
10456 long 0x0,0x0,0x0,0x0
10457 long 0x0,0x0,0x0,0x0
10458 long 0x0,0x0,0x0,0x0
10459 long 0x0,0x0,0x0,0x0
10461 long 0x80000000, 0x00000000, 0x00000000, 0x0 # ZERO;ext
10462 long 0x80000000, 0x00000000, 0x00000000, 0x0 # ZERO;ext
10463 long 0x80000000, 0x00000000, 0x00000001, 0x0 # MIN; ext
10464 long 0x80000000, 0x00000000, 0x00000000, 0x0 # ZERO;ext
10466 long 0xbf810000, 0x00000000, 0x00000000, 0x0 # ZERO;sgl
10467 long 0xbf810000, 0x00000000, 0x00000000, 0x0 # ZERO;sgl
10468 long 0xbf810000, 0x00000100, 0x00000000, 0x0 # MIN; sgl
10469 long 0xbf810000, 0x00000000, 0x00000000, 0x0 # ZERO;sgl
10471 long 0xbc010000, 0x00000000, 0x00000000, 0x0 # ZERO;dbl
10472 long 0xbc010000, 0x00000000, 0x00000000, 0x0 # ZERO;dbl
10473 long 0xbc010000, 0x00000000, 0x00000800, 0x0 # MIN; dbl
10474 long 0xbc010000, 0x00000000, 0x00000000, 0x0 # ZERO;dbl
10491 fmov.s &0x00000000,%fp0 # load +0
10498 fmov.s &0x80000000,%fp0 # load -0
10524 fmov.s &0x7f800000,%fp0 # load +INF
10533 fmov.s &0xff800000,%fp0 # load -INF
10592 fmov.s &0x3f800000,%fp0 # load +1
10601 fmov.s &0xbf800000,%fp0 # load -1
10605 ppiby2: long 0x3fff0000, 0xc90fdaa2, 0x2168c235
10606 mpiby2: long 0xbfff0000, 0xc90fdaa2, 0x2168c235
10645 fmov.s &0x3f800000,%fp1
10648 fmov.s &0x80000000,%fp0 # return sin result in fp0
10652 fmov.s &0x00000000,%fp0 # return sin result in fp0
10701 andi.w &0x7,%d0
10716 fmovm.x &0x40,EXC_FP0(%a6)
10719 fmovm.x &0x40,EXC_FP1(%a6)
10790 andi.b &0x80,%d1
10803 andi.b &0x80,%d1
11362 cmpi.b 0x3(%sp),&0x7 # is exception UNSUPP?
11378 fmovm.x &0x80,FP_DST(%a6) # dst op is in fp0
11380 lea 0x4(%sp),%a0 # pass: ptr to src op
11421 long fatan - tbl_unsupp # 0a: fatan
11423 long fasin - tbl_unsupp # 0c: fasin
11424 long fatanh - tbl_unsupp # 0d: fatanh
11425 long fsine - tbl_unsupp # 0e: fsin
11426 long ftan - tbl_unsupp # 0f: ftan
11556 align 0x10
11558 long 0x3fff - 0x7ffe # ext_max
11559 long 0x3fff - 0x407e # sgl_max
11560 long 0x3fff - 0x43fe # dbl_max
11562 long 0x3fff + 0x0001 # ext_unfl
11563 long 0x3fff - 0x3f80 # sgl_unfl
11564 long 0x3fff - 0x3c00 # dbl_unfl
11568 andi.b &0x30,%d0 # clear rnd prec
11569 ori.b &s_mode*0x10,%d0 # insert sgl prec
11574 andi.b &0x30,%d0
11575 ori.b &d_mode*0x10,%d0 # insert dbl prec
11583 lsl.b &0x3,%d1
11604 lsr.b &0x6,%d1 # shift to lo bits
11623 fmovm.x FP_SCR1(%a6),&0x80 # load dst operand
11626 fmov.l &0x0,%fpsr # clear FPSR
11631 fmov.l &0x0,%fpcr # clear FPCR
11636 fmovm.x &0x80,FP_SCR0(%a6) # store out result
11640 andi.l &0x7fff,%d1 # strip sign
11641 andi.w &0x8000,%d2 # keep old sign
11646 fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
11659 # of this operation then has its exponent scaled by -0x6000 to create the
11663 fmovm.x FP_SCR1(%a6),&0x80 # load dst operand
11666 fmov.l &0x0,%fpsr # clear FPSR
11671 fmov.l &0x0,%fpcr # clear FPCR
11680 andi.b &0x13,%d1 # is OVFL or INEX enabled?
11690 fmovm.x (%a0),&0x80 # return default result in fp0
11696 # with an extra -0x6000. if the precision is single or double, we need to
11701 andi.b &0xc0,%d1 # test the rnd prec
11705 fmovm.x &0x80,FP_SCR0(%a6) # move result to stack
11710 andi.l &0x7fff,%d1 # strip sign
11712 subi.l &0x6000,%d1 # subtract bias
11713 andi.w &0x7fff,%d1 # clear sign bit
11714 andi.w &0x8000,%d2 # keep old sign
11718 fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
11722 fmovm.x FP_SCR1(%a6),&0x80 # load dst operand
11725 andi.b &0x30,%d1 # keep rnd mode only
11730 fmov.l &0x0,%fpcr # clear FPCR
11741 fmovm.x FP_SCR1(%a6),&0x80 # load dst op
11744 fmov.l &0x0,%fpsr # clear FPSR
11749 fmov.l &0x0,%fpcr # clear FPCR
11754 fcmp.b %fp1,&0x2 # is |result| >= 2.b?
11770 # of this operation then has its exponent scaled by -0x6000 to create the
11779 fmovm.x FP_SCR1(%a6),&0x80 # load dst operand
11781 fmov.l &rz_mode*0x10,%fpcr # set FPCR
11782 fmov.l &0x0,%fpsr # clear FPSR
11787 fmov.l &0x0,%fpcr # clear FPCR
11792 andi.b &0x0b,%d1 # is UNFL or INEX enabled?
11796 fmovm.x &0x80,FP_SCR0(%a6) # store out result
11802 fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
11809 fmovm.x FP_SCR1(%a6),&0x40 # load dst op
11812 andi.b &0xc0,%d1 # is precision extended?
11820 fmov.l &0x0,%fpsr # clear FPSR
11824 fmov.l &0x0,%fpcr # clear FPCR
11826 fmovm.x &0x40,FP_SCR0(%a6) # save result to stack
11830 andi.l &0x7fff,%d1 # strip sign
11831 andi.w &0x8000,%d2 # keep old sign
11833 addi.l &0x6000,%d1 # add bias
11834 andi.w &0x7fff,%d1
11838 fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
11843 andi.b &0x30,%d1 # use only rnd mode
11852 fmovm.x FP_SCR1(%a6),&0x80 # load dst operand
11855 fmov.l &0x0,%fpsr # clear FPSR
11860 fmov.l &0x0,%fpcr # clear FPCR
11865 fcmp.b %fp1,&0x2 # is |result| > 2.b?
11876 fmovm.x FP_SCR1(%a6),&0x40 # load dst operand
11879 andi.b &0xc0,%d1 # keep rnd prec
11880 ori.b &rz_mode*0x10,%d1 # insert RZ
11883 fmov.l &0x0,%fpsr # clear FPSR
11887 fmov.l &0x0,%fpcr # clear FPCR
11889 fcmp.b %fp1,&0x2 # is |result| < 2.b?
11975 fmov.s &0x80000000,%fp0 # load -ZERO
11979 fmov.s &0x00000000,%fp0 # load +ZERO
11993 fmovm.x DST(%a1),&0x80 # return INF result in fp0
12010 fmovm.x SRC(%a0),&0x80 # return INF result in fp0
12053 andi.b &0x30,%d0 # clear rnd prec
12054 ori.b &s_mode*0x10,%d0 # insert sgl precision
12059 andi.b &0x30,%d0 # clear rnd prec
12060 ori.b &d_mode*0x10,%d0 # insert dbl precision
12073 andi.b &0xc0,%d0 # is precision extended?
12085 fmovm.x SRC(%a0),&0x80 # return result in fp0
12093 andi.b &0xc0,%d0 # is precision extended?
12101 fmovm.x SRC(%a0),&0x80 # return result in fp0
12108 # normalize the mantissa and add the bias of 0x6000 to the resulting negative
12118 addi.w &0x6000,%d0 # add new bias to exponent
12120 andi.w &0x8000,%d1 # keep old sign
12121 andi.w &0x7fff,%d0 # clear sign position
12124 fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
12131 cmpi.b %d0,&s_mode*0x10 # separate sgl/dbl prec
12143 cmpi.l %d0,&0x3fff-0x3f80 # will move in underflow?
12145 cmpi.l %d0,&0x3fff-0x407e # will move in overflow?
12153 fmov.l &0x0,%fpsr # clear FPSR
12159 fmov.l &0x0,%fpcr # clear FPCR
12165 fmovm.x &0x80,FP_SCR0(%a6) # store out result
12168 andi.l &0x7fff,%d1 # strip sign
12170 andi.w &0x8000,%d2 # keep old sign
12174 fmovm.x FP_SCR0(%a6),&0x80 # return result in fp0
12186 cmpi.l %d0,&0x3fff-0x3c00 # will move in underflow?
12188 cmpi.l %d0,&0x3fff-0x43fe # will move in overflow?
12206 andi.b &0x0b,%d1 # is UNFL or INEX enabled?
12214 fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
12228 andi.l &0x7fff,%d1 # strip sign
12230 andi.w &0x8000,%d2 # extract old sign
12231 addi.l &0x6000,%d1 # add new bias
12232 andi.w &0x7fff,%d1
12235 fmovm.x FP_SCR1(%a6),&0x40 # return EXOP in fp1
12243 fmov.l &0x0,%fpsr # clear FPSR
12248 fmov.l &0x0,%fpcr # clear FPCR
12257 andi.b &0x13,%d1 # is OVFL or INEX enabled?
12270 fmovm.x (%a0),&0x80 # return default result in fp0
12282 andi.l &0x7fff,%d1 # strip sign
12283 andi.w &0x8000,%d2 # keep old sign
12285 sub.l &0x6000,%d1 # subtract bias
12286 andi.w &0x7fff,%d1
12290 fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
12297 fmov.l &0x0,%fpsr # clear FPSR
12303 fmov.l &0x0,%fpcr # clear FPCR
12308 fcmp.b %fp1,&0x2 # is |result| >= 2.b?
12334 rol.l &0x8,%d0 # put ccodes in lo byte
12373 align 0x10
12375 long 0x3fff - 0x0000 # ext_unfl
12376 long 0x3fff - 0x3f81 # sgl_unfl
12377 long 0x3fff - 0x3c01 # dbl_unfl
12380 long 0x3fff - 0x7ffe # ext overflow exponent
12381 long 0x3fff - 0x407e # sgl overflow exponent
12382 long 0x3fff - 0x43fe # dbl overflow exponent
12386 andi.b &0x30,%d0 # clear rnd prec
12387 ori.b &s_mode*0x10,%d0 # insert sgl prec
12392 andi.b &0x30,%d0 # clear rnd prec
12393 ori.b &d_mode*0x10,%d0 # insert dbl prec
12401 lsl.b &0x3,%d1
12427 lsr.b &0x6,%d1 # shift to lo bits
12437 fmovm.x FP_SCR1(%a6),&0x80 # load dst op
12440 fmov.l &0x0,%fpsr # clear FPSR
12445 fmov.l &0x0,%fpcr # clear FPCR
12450 fmovm.x &0x80,FP_SCR0(%a6) # store result on stack
12454 andi.l &0x7fff,%d1 # strip sign
12455 andi.w &0x8000,%d2 # keep old sign
12460 fmovm.x FP_SCR0(%a6),&0x80 # return result in fp0
12464 long 0x7fff
12465 long 0x407f
12466 long 0x43ff
12475 fmovm.x FP_SCR1(%a6),&0x80 # load dst op
12478 fmov.l &0x0,%fpsr # set FPSR
12483 fmov.l &0x0,%fpcr
12487 fmovm.x &0x01,-(%sp) # save result to stack
12489 add.l &0xc,%sp # clear result from stack
12490 andi.l &0x7fff,%d0 # strip sign
12500 andi.b &0x13,%d1 # is OVFL or INEX enabled?
12509 fmovm.x (%a0),&0x80 # return default result in fp0
12514 andi.b &0xc0,%d1 # is precision extended?
12518 fmovm.x &0x80,FP_SCR0(%a6) # move result to stack
12523 andi.l &0x7fff,%d1 # strip sign
12525 subi.l &0x6000,%d1 # subtract bias
12526 andi.w &0x7fff,%d1 # clear sign bit
12527 andi.w &0x8000,%d2 # keep old sign
12531 fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
12535 fmovm.x FP_SCR1(%a6),&0x80 # load dst operand
12538 andi.b &0x30,%d1 # keep rnd mode
12543 fmov.l &0x0,%fpcr # clear FPCR
12549 fmovm.x FP_SCR1(%a6),&0x80 # load dst op
12551 fmov.l &rz_mode*0x10,%fpcr # set FPCR
12552 fmov.l &0x0,%fpsr # clear FPSR
12557 fmov.l &0x0,%fpcr # clear FPCR
12562 andi.b &0x0b,%d1 # is UNFL or INEX enabled?
12566 fmovm.x &0x80,FP_SCR0(%a6) # store out result
12572 fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
12579 fmovm.x FP_SCR1(%a6),&0x40 # load dst op
12582 andi.b &0xc0,%d1 # is precision extended?
12588 fmov.l &0x0,%fpsr # clear FPSR
12592 fmov.l &0x0,%fpcr # clear FPCR
12594 fmovm.x &0x40,FP_SCR0(%a6) # save result to stack
12598 andi.l &0x7fff,%d1 # strip sign
12599 andi.w &0x8000,%d2 # keep old sign
12601 addi.l &0x6000,%d1 # add bias
12602 andi.w &0x7fff,%d1
12606 fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
12611 andi.b &0x30,%d1 # use only rnd mode
12620 fmovm.x FP_SCR1(%a6),&0x80 # load dst op
12623 fmov.l &0x0,%fpsr # clear FPSR
12628 fmov.l &0x0,%fpcr # clear FPCR
12633 fcmp.b %fp1,&0x1 # is |result| > 1.b?
12644 fmovm.x FP_SCR1(%a6),&0x40 # load dst op into fp1
12647 andi.b &0xc0,%d1 # keep rnd prec
12648 ori.b &rz_mode*0x10,%d1 # insert RZ
12651 fmov.l &0x0,%fpsr # clear FPSR
12655 fmov.l &0x0,%fpcr # clear FPCR
12657 fcmp.b %fp1,&0x1 # is |result| < 1.b?
12739 fmov.s &0x80000000,%fp0 # load a -ZERO
12743 fmov.s &0x00000000,%fp0 # load a +ZERO
12759 fmov.s &0xff800000,%fp0 # make result -INF
12763 fmov.s &0x7f800000,%fp0 # make result +INF
12780 fmovm.x DST(%a1),&0x80 # return result in fp0
12787 fmovm.x DST(%a1),&0x80 # return result in fp0
12827 andi.b &0x30,%d0 # clear rnd prec
12828 ori.b &s_mode*0x10,%d0 # insert sgl precision
12833 andi.b &0x30,%d0 # clear rnd prec
12834 ori.b &d_mode*0x10,%d0 # insert dbl prec
12846 andi.b &0xc0,%d0 # is precision extended?
12857 eori.w &0x8000,%d0 # negate sign
12862 fmovm.x FP_SCR0(%a6),&0x80 # return result in fp0
12870 andi.b &0xc0,%d0 # is precision extended?
12878 eori.w &0x8000,%d0 # negate sign
12883 fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
12891 # normalize the mantissa and add the bias of 0x6000 to the resulting negative
12898 addi.w &0x6000,%d0 # add new bias to exponent
12900 andi.w &0x8000,%d1 # keep old sign
12901 andi.w &0x7fff,%d0 # clear sign position
12904 fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
12911 cmpi.b %d0,&s_mode*0x10 # separate sgl/dbl prec
12923 cmpi.l %d0,&0x3fff-0x3f80 # will move in underflow?
12925 cmpi.l %d0,&0x3fff-0x407e # will move in overflow?
12933 fmov.l &0x0,%fpsr # clear FPSR
12939 fmov.l &0x0,%fpcr # clear FPCR
12945 fmovm.x &0x80,FP_SCR0(%a6) # store out result
12948 andi.l &0x7fff,%d1 # strip sign
12950 andi.w &0x8000,%d2 # keep old sign
12954 fmovm.x FP_SCR0(%a6),&0x80 # return result in fp0
12966 cmpi.l %d0,&0x3fff-0x3c00 # will move in underflow?
12968 cmpi.l %d0,&0x3fff-0x43fe # will move in overflow?
12979 eori.b &0x80,FP_SCR0_EX(%a6) # negate sign
12986 andi.b &0x0b,%d1 # is UNFL or INEX enabled?
12994 fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
13008 andi.l &0x7fff,%d1 # strip sign
13009 andi.w &0x8000,%d2 # keep old sign
13011 addi.l &0x6000,%d1 # add new bias
13012 andi.w &0x7fff,%d1
13015 fmovm.x FP_SCR1(%a6),&0x40 # return EXOP in fp1
13023 fmov.l &0x0,%fpsr # clear FPSR
13028 fmov.l &0x0,%fpcr # clear FPCR
13037 andi.b &0x13,%d1 # is OVFL or INEX enabled?
13050 fmovm.x (%a0),&0x80 # return default result in fp0
13062 andi.l &0x7fff,%d1 # strip sign
13063 andi.w &0x8000,%d2 # keep old sign
13065 subi.l &0x6000,%d1 # subtract bias
13066 andi.w &0x7fff,%d1
13069 fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
13077 fmov.l &0x0,%fpsr # clear FPSR
13083 fmov.l &0x0,%fpcr # clear FPCR
13088 fcmp.b %fp1,&0x2 # is |result| >= 2.b?
13114 rol.l &0x8,%d0 # put ccodes in lo byte
13238 andi.b &0x30,%d0 # set prec = ext
13241 fmov.l &0x0,%fpsr # clear FPSR
13245 fmov.l &0x0,%fpcr # clear FPCR
13276 mov.b &0x80,FP_SCR0_HI(%a6) # force DENORM ==> small NORM
13287 fmov.s &0x00000000,%fp0 # return +ZERO in fp0
13291 fmov.s &0x80000000,%fp0 # return -ZERO in fp0
13299 fmovm.x SRC(%a0),&0x80 # return result in fp0
13344 fmov.l &0x0,%fpsr # clear FPSR
13378 mov.b &0x80,FP_SCR0_HI(%a6) # force DENORM ==> small NORM
13389 fmov.s &0x00000000,%fp0 # return +ZERO in fp0
13393 fmov.s &0x80000000,%fp0 # return -ZERO in fp0
13401 fmovm.x SRC(%a0),&0x80 # return result in fp0
13419 # scale_to_zero_src() - make exponent. = 0; get scale factor #
13450 andi.b &0x30,%d0 # clear rnd prec
13451 ori.b &s_mode*0x10,%d0 # insert sgl precision
13456 andi.b &0x30,%d0 # clear rnd prec
13457 ori.b &d_mode*0x10,%d0 # insert dbl precision
13469 andi.b &0xc0,%d0 # is precision extended?
13482 fmovm.x FP_SCR0(%a6),&0x80 # return result in fp0
13490 andi.b &0xc0,%d0 # is precision extended?
13501 fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
13509 # normalize the mantissa and add the bias of 0x6000 to the resulting negative
13516 addi.w &0x6000,%d0 # add new bias to exponent
13518 andi.w &0x8000,%d1 # keep old sign
13519 andi.w &0x7fff,%d0 # clear sign position
13522 fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
13529 cmpi.b %d0,&s_mode*0x10 # separate sgl/dbl prec
13541 cmpi.l %d0,&0x3fff-0x3f80 # will move in underflow?
13543 cmpi.l %d0,&0x3fff-0x407e # will move in overflow?
13551 fmov.l &0x0,%fpsr # clear FPSR
13557 fmov.l &0x0,%fpcr # clear FPCR
13563 fmovm.x &0x80,FP_SCR0(%a6) # store out result
13566 andi.l &0x7fff,%d1 # strip sign
13568 andi.w &0x8000,%d2 # keep old sign
13572 fmovm.x FP_SCR0(%a6),&0x80 # return result in fp0
13584 cmpi.l %d0,&0x3fff-0x3c00 # will move in underflow?
13586 cmpi.l %d0,&0x3fff-0x43fe # will move in overflow?
13597 bclr &0x7,FP_SCR0_EX(%a6) # force absolute value
13601 andi.b &0x0b,%d1 # is UNFL or INEX enabled?
13609 fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
13623 andi.l &0x7fff,%d1 # strip sign
13624 andi.w &0x8000,%d2 # keep old sign
13626 addi.l &0x6000,%d1 # add new bias
13627 andi.w &0x7fff,%d1
13630 fmovm.x FP_SCR1(%a6),&0x40 # return EXOP in fp1
13638 fmov.l &0x0,%fpsr # clear FPSR
13643 fmov.l &0x0,%fpcr # clear FPCR
13652 andi.b &0x13,%d1 # is OVFL or INEX enabled?
13665 fmovm.x (%a0),&0x80 # return default result in fp0
13677 andi.l &0x7fff,%d1 # strip sign
13678 andi.w &0x8000,%d2 # keep old sign
13680 subi.l &0x6000,%d1 # subtract bias
13681 andi.w &0x7fff,%d1
13684 fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
13692 fmov.l &0x0,%fpsr # clear FPSR
13698 fmov.l &0x0,%fpcr # clear FPCR
13703 fcmp.b %fp1,&0x2 # is |result| >= 2.b?
13760 lsl.b &0x3,%d1
13768 fmovm.x DST(%a1),&0x80 # load dst op
13773 rol.l &0x8,%d0 # extract ccode bits
13845 andi.b &0xf7,FPSR_CC(%a6)
13849 andi.b &0xf7,FPSR_CC(%a6)
13964 lsl.b &0x3,%d1
13985 cmpi.l %d0,&0x3fff-0x7ffe # would result ovfl?
13989 cmpi.l %d0,&0x3fff+0x0001 # would result unfl?
13994 fmovm.x FP_SCR1(%a6),&0x80 # load dst op
13997 fmov.l &0x0,%fpsr # clear FPSR
14002 fmov.l &0x0,%fpcr # clear FPCR
14007 fmovm.x &0x80,FP_SCR0(%a6) # store out result
14011 andi.l &0x7fff,%d1 # strip sign
14012 andi.w &0x8000,%d2 # keep old sign
14017 fmovm.x FP_SCR0(%a6),&0x80 # return result in fp0
14021 fmovm.x FP_SCR1(%a6),&0x80 # load dst op
14024 fmov.l &0x0,%fpsr # clear FPSR
14029 fmov.l &0x0,%fpcr # clear FPCR
14039 andi.b &0x13,%d1 # is OVFL or INEX enabled?
14046 andi.b &0x30,%d0 # force prec = ext
14049 fmovm.x (%a0),&0x80 # return default result in fp0
14053 fmovm.x &0x80,FP_SCR0(%a6) # move result to stack
14058 andi.l &0x7fff,%d1 # strip sign
14060 subi.l &0x6000,%d1 # subtract bias
14061 andi.w &0x7fff,%d1
14062 andi.w &0x8000,%d2 # keep old sign
14066 fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
14070 fmovm.x FP_SCR1(%a6),&0x80 # load dst op
14073 fmov.l &0x0,%fpsr # clear FPSR
14078 fmov.l &0x0,%fpcr # clear FPCR
14083 fcmp.b %fp1,&0x2 # is |result| >= 2.b?
14092 fmovm.x FP_SCR1(%a6),&0x80 # load dst op
14094 fmov.l &rz_mode*0x10,%fpcr # set FPCR
14095 fmov.l &0x0,%fpsr # clear FPSR
14100 fmov.l &0x0,%fpcr # clear FPCR
14105 andi.b &0x0b,%d1 # is UNFL or INEX enabled?
14109 fmovm.x &0x80,FP_SCR0(%a6) # store out result
14115 fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
14122 fmovm.x FP_SCR1(%a6),&0x40 # load dst op
14125 fmov.l &0x0,%fpsr # clear FPSR
14129 fmov.l &0x0,%fpcr # clear FPCR
14131 fmovm.x &0x40,FP_SCR0(%a6) # save result to stack
14135 andi.l &0x7fff,%d1 # strip sign
14136 andi.w &0x8000,%d2 # keep old sign
14138 addi.l &0x6000,%d1 # add bias
14139 andi.w &0x7fff,%d1
14143 fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
14147 fmovm.x FP_SCR1(%a6),&0x80 # load dst op
14150 fmov.l &0x0,%fpsr # clear FPSR
14155 fmov.l &0x0,%fpcr # clear FPCR
14160 fcmp.b %fp1,&0x2 # is |result| > 2.b?
14171 fmovm.x FP_SCR1(%a6),&0x40 # load dst op into fp1
14174 andi.b &0xc0,%d1 # keep rnd prec
14175 ori.b &rz_mode*0x10,%d1 # insert RZ
14178 fmov.l &0x0,%fpsr # clear FPSR
14182 fmov.l &0x0,%fpcr # clear FPCR
14184 fcmp.b %fp1,&0x2 # is |result| < 2.b?
14305 lsl.b &0x3,%d1
14331 lsr.b &0x6,%d1
14333 cmpi.l %d0,&0x3fff-0x7ffe
14336 cmpi.l %d0,&0x3fff-0x0000 # will result underflow?
14341 fmovm.x FP_SCR1(%a6),&0x80 # load dst op
14344 fmov.l &0x0,%fpsr # clear FPSR
14349 fmov.l &0x0,%fpcr # clear FPCR
14354 fmovm.x &0x80,FP_SCR0(%a6) # store result on stack
14358 andi.l &0x7fff,%d1 # strip sign
14359 andi.w &0x8000,%d2 # keep old sign
14364 fmovm.x FP_SCR0(%a6),&0x80 # return result in fp0
14368 fmovm.x FP_SCR1(%a6),&0x80 # load dst op
14371 fmov.l &0x0,%fpsr # set FPSR
14376 fmov.l &0x0,%fpcr
14380 fmovm.x &0x01,-(%sp) # save result to stack
14382 add.l &0xc,%sp # clear result
14383 andi.l &0x7fff,%d1 # strip sign
14385 cmp.l %d1,&0x7fff # did divide overflow?
14392 andi.b &0x13,%d1 # is OVFL or INEX enabled?
14399 andi.b &0x30,%d0 # kill precision
14402 fmovm.x (%a0),&0x80 # return default result in fp0
14406 fmovm.x &0x80,FP_SCR0(%a6) # move result to stack
14411 andi.l &0x7fff,%d1 # strip sign
14412 andi.w &0x8000,%d2 # keep old sign
14414 subi.l &0x6000,%d1 # subtract new bias
14415 andi.w &0x7fff,%d1 # clear ms bit
14419 fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
14425 fmovm.x FP_SCR1(%a6),&0x80 # load dst op
14427 fmov.l &rz_mode*0x10,%fpcr # set FPCR
14428 fmov.l &0x0,%fpsr # clear FPSR
14433 fmov.l &0x0,%fpcr # clear FPCR
14438 andi.b &0x0b,%d1 # is UNFL or INEX enabled?
14442 fmovm.x &0x80,FP_SCR0(%a6) # store out result
14448 fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
14455 fmovm.x FP_SCR1(%a6),&0x40 # load dst op
14458 fmov.l &0x0,%fpsr # clear FPSR
14462 fmov.l &0x0,%fpcr # clear FPCR
14464 fmovm.x &0x40,FP_SCR0(%a6) # save result to stack
14468 andi.l &0x7fff,%d1 # strip sign
14469 andi.w &0x8000,%d2 # keep old sign
14471 addi.l &0x6000,%d1 # add bias
14472 andi.w &0x7fff,%d1 # clear top bit
14476 fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
14483 fmovm.x FP_SCR1(%a6),&0x80 # load dst op
14486 fmov.l &0x0,%fpsr # clear FPSR
14491 fmov.l &0x0,%fpcr # clear FPCR
14496 fcmp.b %fp1,&0x1 # is |result| > 1.b?
14507 fmovm.x FP_SCR1(%a6),&0x40 # load dst op into %fp1
14510 ori.b &rz_mode*0x10,%d1 # force RZ rnd mode
14513 fmov.l &0x0,%fpsr # clear FPSR
14517 fmov.l &0x0,%fpcr # clear FPCR
14519 fcmp.b %fp1,&0x1 # is |result| < 1.b?
14637 andi.b &0x30,%d0 # clear rnd prec
14638 ori.b &s_mode*0x10,%d0 # insert sgl prec
14643 andi.b &0x30,%d0 # clear rnd prec
14644 ori.b &d_mode*0x10,%d0 # insert dbl prec
14652 lsl.b &0x3,%d1
14664 fmovm.x FP_SCR1(%a6),&0x80 # load dst op
14666 fmov.l &0x0,%fpsr # clear FPSR
14671 fmov.l &0x0,%fpcr # clear FPCR
14680 fmovm.x &0x01,-(%sp) # save result to stack
14683 lsr.b &0x6,%d1
14686 andi.l &0x7fff,%d2 # strip sign
14698 andi.w &0x8000,%d1 # keep sign
14702 fmovm.x (%sp)+,&0x80 # return result in fp0
14708 # fmov.s &0x00000000,%fp0 # return zero in fp0
14712 long 0x7fff # ext ovfl
14713 long 0x407f # sgl ovfl
14714 long 0x43ff # dbl ovfl
14717 long 0x0000 # ext unfl
14718 long 0x3f81 # sgl unfl
14719 long 0x3c01 # dbl unfl
14725 andi.b &0x13,%d1 # is OVFL or INEX enabled?
14728 add.l &0xc,%sp
14735 fmovm.x (%a0),&0x80 # return default result in fp0
14741 andi.b &0xc0,%d1 # is precision extended?
14746 andi.w &0x8000,%d1 # keep sign
14747 subi.l &0x6000,%d2 # add extra bias
14748 andi.w &0x7fff,%d2
14752 fmovm.x (%sp)+,&0x40 # return EXOP in fp1
14756 fmovm.x FP_SCR1(%a6),&0x80 # load dst op
14759 andi.b &0x30,%d1 # keep rnd mode
14764 fmov.l &0x0,%fpcr # clear FPCR
14766 add.l &0xc,%sp
14767 fmovm.x &0x01,-(%sp)
14773 add.l &0xc,%sp
14775 fmovm.x FP_SCR1(%a6),&0x80 # load dst op
14777 fmov.l &rz_mode*0x10,%fpcr # set FPCR
14778 fmov.l &0x0,%fpsr # clear FPSR
14782 fmov.l &0x0,%fpcr # clear FPCR
14788 andi.b &0x0b,%d1 # is UNFL or INEX enabled?
14792 fmovm.x &0x80,FP_SCR0(%a6) # store out result
14798 fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
14803 fmovm.x FP_SCR1(%a6),&0x40 # load dst op
14806 andi.b &0xc0,%d1 # is precision extended?
14812 fmov.l &0x0,%fpsr # clear FPSR
14816 fmov.l &0x0,%fpcr # clear FPCR
14818 fmovm.x &0x40,FP_SCR0(%a6) # save result to stack
14821 andi.l &0x7fff,%d1 # strip sign
14822 andi.w &0x8000,%d2 # keep old sign
14824 addi.l &0x6000,%d1 # add new bias
14825 andi.w &0x7fff,%d1 # clear top bit
14828 fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
14833 andi.b &0x30,%d1 # use only rnd mode
14845 andi.b &0xc0,%d1
14848 mov.l 0x4(%sp),%d1 # extract hi(man)
14849 cmpi.l %d1,&0x80000000 # is hi(man) = 0x80000000?
14852 tst.l 0x8(%sp) # is lo(man) = 0x0?
14861 # 0x8000000000000000 and this mantissa is the result of rounding non-zero
14868 fmovm.x FP_SCR1(%a6),&0x40 # load dst op into fp1
14871 andi.b &0xc0,%d1 # keep rnd prec
14872 ori.b &rz_mode*0x10,%d1 # insert rnd mode
14874 fmov.l &0x0,%fpsr # clear FPSR
14878 fmov.l &0x0,%fpcr # clear FPCR
14970 fmov.s &0x00000000,%fp0 # return +ZERO
14981 andi.b &0x30,%d1 # extract rnd mode
14982 cmpi.b %d1,&rm_mode*0x10 # is rnd mode == RM?
14984 fmov.s &0x00000000,%fp0 # return +ZERO
14989 fmov.s &0x80000000,%fp0 # return -ZERO
15034 fmovm.x SRC(%a0),&0x80 # return src INF
15044 fmovm.x DST(%a1),&0x80 # return dst INF
15090 andi.b &0x30,%d0 # clear rnd prec
15091 ori.b &s_mode*0x10,%d0 # insert sgl prec
15096 andi.b &0x30,%d0 # clear rnd prec
15097 ori.b &d_mode*0x10,%d0 # insert dbl prec
15105 lsl.b &0x3,%d1
15117 fmovm.x FP_SCR1(%a6),&0x80 # load dst op
15119 fmov.l &0x0,%fpsr # clear FPSR
15124 fmov.l &0x0,%fpcr # clear FPCR
15133 fmovm.x &0x01,-(%sp) # save result to stack
15136 lsr.b &0x6,%d1
15139 andi.l &0x7fff,%d2 # strip sign
15151 andi.w &0x8000,%d1 # keep sign
15155 fmovm.x (%sp)+,&0x80 # return result in fp0
15161 # fmov.s &0x00000000,%fp0 # return zero in fp0
15165 long 0x7fff # ext ovfl
15166 long 0x407f # sgl ovfl
15167 long 0x43ff # dbl ovfl
15170 long 0x0000 # ext unfl
15171 long 0x3f81 # sgl unfl
15172 long 0x3c01 # dbl unfl
15178 andi.b &0x13,%d1 # is OVFL or INEX enabled?
15181 add.l &0xc,%sp
15188 fmovm.x (%a0),&0x80 # return default result in fp0
15194 andi.b &0xc0,%d1 # is precision extended?
15199 andi.w &0x8000,%d1 # keep sign
15200 subi.l &0x6000,%d2 # subtract new bias
15201 andi.w &0x7fff,%d2 # clear top bit
15205 fmovm.x (%sp)+,&0x40 # return EXOP in fp1
15209 fmovm.x FP_SCR1(%a6),&0x80 # load dst op
15212 andi.b &0x30,%d1 # clear rnd prec
15217 fmov.l &0x0,%fpcr # clear FPCR
15219 add.l &0xc,%sp
15220 fmovm.x &0x01,-(%sp)
15226 add.l &0xc,%sp
15228 fmovm.x FP_SCR1(%a6),&0x80 # load dst op
15230 fmov.l &rz_mode*0x10,%fpcr # set FPCR
15231 fmov.l &0x0,%fpsr # clear FPSR
15235 fmov.l &0x0,%fpcr # clear FPCR
15241 andi.b &0x0b,%d1 # is UNFL or INEX enabled?
15245 fmovm.x &0x80,FP_SCR0(%a6) # store out result
15251 fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
15256 fmovm.x FP_SCR1(%a6),&0x40
15259 andi.b &0xc0,%d1 # is precision extended?
15265 fmov.l &0x0,%fpsr # clear FPSR
15269 fmov.l &0x0,%fpcr # clear FPCR
15271 fmovm.x &0x40,FP_SCR0(%a6) # store result to stack
15274 andi.l &0x7fff,%d1 # strip sign
15275 andi.w &0x8000,%d2 # keep old sign
15277 addi.l &0x6000,%d1 # subtract new bias
15278 andi.w &0x7fff,%d1 # clear top bit
15281 fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
15286 andi.b &0x30,%d1 # clear rnd prec
15298 andi.b &0xc0,%d1 # fetch rnd prec
15301 mov.l 0x4(%sp),%d1
15302 cmpi.l %d1,&0x80000000 # is hi(man) = 0x80000000?
15305 tst.l 0x8(%sp) # is lo(man) = 0x0?
15314 # 0x8000000000000000 and this mantissa is the result of rounding non-zero
15321 fmovm.x FP_SCR1(%a6),&0x40 # load dst op into fp1
15324 andi.b &0xc0,%d1 # keep rnd prec
15325 ori.b &rz_mode*0x10,%d1 # insert rnd mode
15327 fmov.l &0x0,%fpsr # clear FPSR
15331 fmov.l &0x0,%fpcr # clear FPCR
15422 fmov.s &0x00000000,%fp0 # no; return +ZERO
15433 andi.b &0x30,%d1 # extract rnd mode
15434 cmpi.b %d1,&rm_mode*0x10 # is rnd mode = RM?
15436 fmov.s &0x00000000,%fp0 # no; return +ZERO
15441 fmov.s &0x80000000,%fp0 # return -ZERO
15483 fmovm.x SRC(%a0),&0x80 # return src INF
15490 fmovm.x DST(%a1),&0x80 # return dst INF
15535 andi.b &0x30,%d0 # clear rnd prec
15536 ori.b &s_mode*0x10,%d0 # insert sgl precision
15541 andi.b &0x30,%d0 # clear rnd prec
15542 ori.b &d_mode*0x10,%d0 # insert dbl precision
15558 andi.b &0xc0,%d0 # is precision extended?
15562 fmov.l &0x0,%fpsr # clear FPSR
15575 andi.b &0xc0,%d0 # is precision extended?
15590 cmpi.b %d0,&s_mode*0x10 # separate sgl/dbl prec
15603 cmpi.l %d0,&0x3fff-0x3f81 # will move in underflow?
15606 cmpi.l %d0,&0x3fff-0x407f # will move in overflow?
15614 fmov.l &0x0,%fpsr # clear FPSR
15620 fmov.l &0x0,%fpcr # clear FPCR
15626 fmovm.x &0x80,FP_SCR0(%a6) # store out result
15629 andi.l &0x7fff,%d1 # strip sign
15631 andi.w &0x8000,%d2 # keep old sign
15635 fmovm.x FP_SCR0(%a6),&0x80 # return result in fp0
15648 cmpi.l %d0,&0x3fff-0x3c01 # will move in underflow?
15651 cmpi.l %d0,&0x3fff-0x43ff # will move in overflow?
15660 btst &0x0,1+FP_SCR0_EX(%a6) # is exponent 0x3fff?
15669 fmov.l &rz_mode*0x10,%fpcr # set FPCR
15670 fmov.l &0x0,%fpsr # clear FPSR
15675 fmov.l &0x0,%fpcr # clear FPCR
15681 andi.b &0x0b,%d1 # is UNFL or INEX enabled?
15685 fmovm.x &0x80,FP_SCR0(%a6) # store out result
15691 fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
15705 andi.l &0x7fff,%d1 # strip sign
15706 andi.w &0x8000,%d2 # keep old sign
15708 addi.l &0x6000,%d1 # add new bias
15709 andi.w &0x7fff,%d1
15712 fmovm.x FP_SCR1(%a6),&0x40 # return EXOP in fp1
15720 fmov.l &0x0,%fpsr # clear FPSR
15725 fmov.l &0x0,%fpcr # clear FPCR
15734 andi.b &0x13,%d1 # is OVFL or INEX enabled?
15747 fmovm.x (%a0),&0x80 # return default result in fp0
15759 andi.l &0x7fff,%d1 # strip sign
15760 andi.w &0x8000,%d2 # keep old sign
15762 subi.l &0x6000,%d1 # subtract bias
15763 andi.w &0x7fff,%d1
15766 fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
15774 btst &0x0,1+FP_SCR0_EX(%a6) # is exponent 0x3fff?
15777 fmov.l &0x0,%fpsr # clear FPSR
15783 fmov.l &0x0,%fpcr # clear FPCR
15788 fcmp.b %fp1,&0x1 # is |result| >= 1.b?
15811 # fsqrt(+0) = +0
15812 # fsqrt(-0) = -0
15820 fmov.s &0x00000000,%fp0 # return +ZERO
15824 fmov.s &0x80000000,%fp0 # return -ZERO
15832 fmovm.x SRC(%a0),&0x80 # return +INF in fp0
15857 # equal to 0x3fff and scale the SRC exponent by the value that the #
15877 andi.w &0x7fff,%d0
15878 andi.w &0x7fff,%d1
15885 # dst exp is > src exp; scale dst to exp = 0x3fff
15906 add.w 0x2(%sp),%d0 # scale src exponent by scale factor
15908 and.w &0x8000,%d1
15916 andi.w &0x8000,FP_SCR0_EX(%a6) # zero src exponent
15917 bset &0x0,1+FP_SCR0_EX(%a6) # set exp = 1
15922 # src exp is >= dst exp; scale src to exp = 0x3fff
15942 add.w 0x2(%sp),%d0 # scale dst exponent by scale factor
15944 andi.w &0x8000,%d1
15952 andi.w &0x8000,FP_SCR1_EX(%a6) # zero dst exponent
15953 bset &0x0,1+FP_SCR1_EX(%a6) # set exp = 1
15976 # Set the exponent of the input operand to 0x3fff. Save the value #
15988 andi.l &0x7fff,%d1 # extract operand's exponent
15990 andi.w &0x8000,%d0 # extract operand's sgn
15991 or.w &0x3fff,%d0 # insert new operand's exponent(=0)
15999 mov.l &0x3fff,%d0
16031 # to 0x3ffe and return a scale factor of "(exp-0x3ffe)/2". If the #
16033 # return a scale factor of "(exp-0x3fff)/2". #
16043 andi.l &0x7fff,%d1 # extract operand's exponent
16045 andi.w &0x8000,FP_SCR0_EX(%a6) # extract operand's sgn
16047 btst &0x0,%d1 # is exp even or odd?
16050 ori.w &0x3fff,FP_SCR0_EX(%a6) # insert new operand's exponent(=0)
16052 mov.l &0x3fff,%d0
16054 asr.l &0x1,%d0 # divide scale factor by 2
16058 ori.w &0x3ffe,FP_SCR0_EX(%a6) # insert new operand's exponent(=0)
16060 mov.l &0x3ffe,%d0
16062 asr.l &0x1,%d0 # divide scale factor by 2
16069 btst &0x0,%d0 # is exp even or odd?
16072 ori.w &0x3fff,FP_SCR0_EX(%a6) # insert new operand's exponent(=0)
16074 add.l &0x3fff,%d0
16075 asr.l &0x1,%d0 # divide scale factor by 2
16079 ori.w &0x3ffe,FP_SCR0_EX(%a6) # insert new operand's exponent(=0)
16081 add.l &0x3ffe,%d0
16082 asr.l &0x1,%d0 # divide scale factor by 2
16103 # Set the exponent of the input operand to 0x3fff. Save the value #
16115 andi.l &0x7fff,%d1 # extract operand's exponent
16117 andi.w &0x8000,%d0 # extract operand's sgn
16118 or.w &0x3fff,%d0 # insert new operand's exponent(=0)
16126 mov.l &0x3fff,%d0
16186 bset &0x6, FP_SRC_HI(%a6) # set SNAN bit
16198 bset &0x6, FP_DST_HI(%a6) # set SNAN bit
16209 btst &0x7, FTEMP_EX(%a0) # is NAN neg?
16213 fmovm.x (%a0), &0x80
16241 fmovm.x nan_return(%pc), &0x80
16245 long 0x7fff0000, 0xffffffff, 0xffffffff
16286 ror.l &0x8,%d1 # rotate to top byte
16849 andi.w &0x7, %d1 # extract count register
16854 subq.w &0x1, %d0 # Dn - 1 -> Dn
16858 cmpi.w %d0, &-0x1 # is (Dn == -1)?
16865 addq.l &0x4,%d0 # add instruction length
16914 ror.l &0x8,%d1 # rotate to top byte
17484 ror.l &0x8,%d1 # rotate to top byte
18066 andi.b &0x38,%d1 # extract src mode
18071 andi.w &0x7,%d1 # pass index in d1
18084 cmpi.b %d1,&0x18 # is <ea> (An)+ ?
18086 cmpi.b %d1,&0x20 # is <ea> -(An) ?
18109 mov.b 0x1+EXC_OPWORD(%a6),%d1 # fetch opword
18110 andi.w &0x7,%d1 # pass index in d1
18111 movq.l &0x1,%d0 # pass amt to inc by
18127 mov.b 0x1+EXC_OPWORD(%a6),%d1 # fetch opword
18128 andi.w &0x7,%d1 # pass index in d1
18129 movq.l &0x1,%d0 # pass amt to dec by
18145 mov.w &0x00a1,EXC_VOFF(%a6)
18201 # 1111 0010 00 |<ea>| 11@& 1000 0$$$ 0000 #
18203 # & = (0): predecrement addressing mode #
18205 # @ = (0): move listed regs from memory to the FPU #
18220 andi.w &0x70,%d1 # extract reg bits
18221 lsr.b &0x4,%d1 # shift into lo bits
18226 andi.l &0x000000ff,%d0 # keep only lo byte
18240 btst &0x5,EXC_EXTWORD(%a6) # is it a move in or out?
18247 btst &0x4,EXC_EXTWORD(%a6) # control or predecrement?
18257 btst &0x5,EXC_SR(%a6) # user or supervisor mode?
18279 mov.l 0x0+EXC_FP0(%a6),(%a0)+ # yes
18280 mov.l 0x4+EXC_FP0(%a6),(%a0)+
18281 mov.l 0x8+EXC_FP0(%a6),(%a0)+
18284 lsl.b &0x1,%d1 # should FP1 be moved?
18287 mov.l 0x0+EXC_FP1(%a6),(%a0)+ # yes
18288 mov.l 0x4+EXC_FP1(%a6),(%a0)+
18289 mov.l 0x8+EXC_FP1(%a6),(%a0)+
18292 lsl.b &0x1,%d1 # should FP2 be moved?
18295 fmovm.x &0x20,(%a0) # yes
18296 add.l &0xc,%a0
18299 lsl.b &0x1,%d1 # should FP3 be moved?
18302 fmovm.x &0x10,(%a0) # yes
18303 add.l &0xc,%a0
18306 lsl.b &0x1,%d1 # should FP4 be moved?
18309 fmovm.x &0x08,(%a0) # yes
18310 add.l &0xc,%a0
18313 lsl.b &0x1,%d1 # should FP5 be moved?
18316 fmovm.x &0x04,(%a0) # yes
18317 add.l &0xc,%a0
18320 lsl.b &0x1,%d1 # should FP6 be moved?
18323 fmovm.x &0x02,(%a0) # yes
18324 add.l &0xc,%a0
18327 lsl.b &0x1,%d1 # should FP7 be moved?
18330 fmovm.x &0x01,(%a0) # yes
18331 add.l &0xc,%a0
18374 mov.l (%a0)+,0x0+EXC_FP0(%a6) # yes
18375 mov.l (%a0)+,0x4+EXC_FP0(%a6)
18376 mov.l (%a0)+,0x8+EXC_FP0(%a6)
18379 lsl.b &0x1,%d1 # should FP1 be moved?
18382 mov.l (%a0)+,0x0+EXC_FP1(%a6) # yes
18383 mov.l (%a0)+,0x4+EXC_FP1(%a6)
18384 mov.l (%a0)+,0x8+EXC_FP1(%a6)
18387 lsl.b &0x1,%d1 # should FP2 be moved?
18390 fmovm.x (%a0)+,&0x20 # yes
18393 lsl.b &0x1,%d1 # should FP3 be moved?
18396 fmovm.x (%a0)+,&0x10 # yes
18399 lsl.b &0x1,%d1 # should FP4 be moved?
18402 fmovm.x (%a0)+,&0x08 # yes
18405 lsl.b &0x1,%d1 # should FP5 be moved?
18408 fmovm.x (%a0)+,&0x04 # yes
18411 lsl.b &0x1,%d1 # should FP6 be moved?
18414 fmovm.x (%a0)+,&0x02 # yes
18417 lsl.b &0x1,%d1 # should FP7 be moved?
18420 fmovm.x (%a0)+,&0x01 # yes
18440 byte 0x00,0x0c,0x0c,0x18,0x0c,0x18,0x18,0x24
18441 byte 0x0c,0x18,0x18,0x24,0x18,0x24,0x24,0x30
18442 byte 0x0c,0x18,0x18,0x24,0x18,0x24,0x24,0x30
18443 byte 0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
18444 byte 0x0c,0x18,0x18,0x24,0x18,0x24,0x24,0x30
18445 byte 0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
18446 byte 0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
18447 byte 0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
18448 byte 0x0c,0x18,0x18,0x24,0x18,0x24,0x24,0x30
18449 byte 0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
18450 byte 0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
18451 byte 0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
18452 byte 0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
18453 byte 0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
18454 byte 0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
18455 byte 0x30,0x3c,0x3c,0x48,0x3c,0x48,0x48,0x54
18456 byte 0x0c,0x18,0x18,0x24,0x18,0x24,0x24,0x30
18457 byte 0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
18458 byte 0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
18459 byte 0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
18460 byte 0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
18461 byte 0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
18462 byte 0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
18463 byte 0x30,0x3c,0x3c,0x48,0x3c,0x48,0x48,0x54
18464 byte 0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
18465 byte 0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
18466 byte 0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
18467 byte 0x30,0x3c,0x3c,0x48,0x3c,0x48,0x48,0x54
18468 byte 0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
18469 byte 0x30,0x3c,0x3c,0x48,0x3c,0x48,0x48,0x54
18470 byte 0x30,0x3c,0x3c,0x48,0x3c,0x48,0x48,0x54
18471 byte 0x3c,0x48,0x48,0x54,0x48,0x54,0x54,0x60
18476 # ex: 0x00 ==> 0x00
18477 # 0x01 ==> 0x80
18478 # 0x02 ==> 0x40
18481 # 0xfd ==> 0xbf
18482 # 0xfe ==> 0x7f
18483 # 0xff ==> 0xff
18486 byte 0x00,0x80,0x40,0xc0,0x20,0xa0,0x60,0xe0
18487 byte 0x10,0x90,0x50,0xd0,0x30,0xb0,0x70,0xf0
18488 byte 0x08,0x88,0x48,0xc8,0x28,0xa8,0x68,0xe8
18489 byte 0x18,0x98,0x58,0xd8,0x38,0xb8,0x78,0xf8
18490 byte 0x04,0x84,0x44,0xc4,0x24,0xa4,0x64,0xe4
18491 byte 0x14,0x94,0x54,0xd4,0x34,0xb4,0x74,0xf4
18492 byte 0x0c,0x8c,0x4c,0xcc,0x2c,0xac,0x6c,0xec
18493 byte 0x1c,0x9c,0x5c,0xdc,0x3c,0xbc,0x7c,0xfc
18494 byte 0x02,0x82,0x42,0xc2,0x22,0xa2,0x62,0xe2
18495 byte 0x12,0x92,0x52,0xd2,0x32,0xb2,0x72,0xf2
18496 byte 0x0a,0x8a,0x4a,0xca,0x2a,0xaa,0x6a,0xea
18497 byte 0x1a,0x9a,0x5a,0xda,0x3a,0xba,0x7a,0xfa
18498 byte 0x06,0x86,0x46,0xc6,0x26,0xa6,0x66,0xe6
18499 byte 0x16,0x96,0x56,0xd6,0x36,0xb6,0x76,0xf6
18500 byte 0x0e,0x8e,0x4e,0xce,0x2e,0xae,0x6e,0xee
18501 byte 0x1e,0x9e,0x5e,0xde,0x3e,0xbe,0x7e,0xfe
18502 byte 0x01,0x81,0x41,0xc1,0x21,0xa1,0x61,0xe1
18503 byte 0x11,0x91,0x51,0xd1,0x31,0xb1,0x71,0xf1
18504 byte 0x09,0x89,0x49,0xc9,0x29,0xa9,0x69,0xe9
18505 byte 0x19,0x99,0x59,0xd9,0x39,0xb9,0x79,0xf9
18506 byte 0x05,0x85,0x45,0xc5,0x25,0xa5,0x65,0xe5
18507 byte 0x15,0x95,0x55,0xd5,0x35,0xb5,0x75,0xf5
18508 byte 0x0d,0x8d,0x4d,0xcd,0x2d,0xad,0x6d,0xed
18509 byte 0x1d,0x9d,0x5d,0xdd,0x3d,0xbd,0x7d,0xfd
18510 byte 0x03,0x83,0x43,0xc3,0x23,0xa3,0x63,0xe3
18511 byte 0x13,0x93,0x53,0xd3,0x33,0xb3,0x73,0xf3
18512 byte 0x0b,0x8b,0x4b,0xcb,0x2b,0xab,0x6b,0xeb
18513 byte 0x1b,0x9b,0x5b,0xdb,0x3b,0xbb,0x7b,0xfb
18514 byte 0x07,0x87,0x47,0xc7,0x27,0xa7,0x67,0xe7
18515 byte 0x17,0x97,0x57,0xd7,0x37,0xb7,0x77,0xf7
18516 byte 0x0f,0x8f,0x4f,0xcf,0x2f,0xaf,0x6f,0xef
18517 byte 0x1f,0x9f,0x5f,0xdf,0x3f,0xbf,0x7f,0xff
18531 andi.w &0x3f,%d0 # extract mode field
18532 andi.l &0x7,%d1 # extract reg field
18616 mov.l EXC_DREGS+0x8(%a6),%a0 # Get current a0
18620 mov.l EXC_DREGS+0xc(%a6),%a0 # Get current a1
18651 mov.l EXC_DREGS+0x8(%a6),%d0 # Get current a0
18654 mov.l %d1,EXC_DREGS+0x8(%a6) # Save incr value
18659 mov.l EXC_DREGS+0xc(%a6),%d0 # Get current a1
18662 mov.l %d1,EXC_DREGS+0xc(%a6) # Save incr value
18720 mov.l EXC_DREGS+0x8(%a6),%d0 # Get current a0
18722 mov.l %d0,EXC_DREGS+0x8(%a6) # Save decr value
18727 mov.l EXC_DREGS+0xc(%a6),%d0 # Get current a1
18729 mov.l %d0,EXC_DREGS+0xc(%a6) # Save decr value
18782 addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
18790 add.l EXC_DREGS+0x8(%a6),%a0 # a0 + d16
18795 addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
18803 add.l EXC_DREGS+0xc(%a6),%a0 # a1 + d16
18808 addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
18821 addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
18834 addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
18847 addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
18860 addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
18873 addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
18891 addq.l &0x8,%d1
18896 addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
18904 btst &0x8,%d0
18910 rol.w &0x4,%d1
18911 andi.w &0xf,%d1 # extract index regno
18919 btst &0xb,%d2 # is it word or long?
18924 rol.w &0x7,%d1
18925 andi.l &0x3,%d1 # extract scale value
18941 addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
18955 addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
18969 addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
18980 subq.l &0x2,%a0 # adjust <ea>
18991 addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
18998 subq.l &0x2,%a0 # adjust base
19000 btst &0x8,%d0 # is disp only 8 bits?
19006 rol.w &0x4,%d1 # rotate reg num into place
19007 andi.w &0xf,%d1 # extract register number
19015 btst &0xb,%d2 # is index word or long?
19020 rol.w &0x7,%d1 # rotate scale value into place
19021 andi.l &0x3,%d1 # extract scale value
19037 btst &0x6,%d0 # is the index suppressed?
19040 movm.l &0x3c00,-(%sp) # save d2-d5
19045 clr.l %d2 # yes, so index = 0
19054 movm.l &0x3c00,-(%sp) # save d2-d5
19059 btst &0xb,%d5 # is index word or long?
19070 btst &0x7,%d5 # is the bd suppressed?
19077 # beq.l fmovm_error # if (size == 0) it's reserved
19079 cmpi.b %d0,&0x2
19084 addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
19094 addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
19110 cmpi.b %d0,&0x2
19115 addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
19125 addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
19140 btst &0x2,%d5 # pre or post indexing?
19170 movm.l (%sp)+,&0x003c # restore d2-d5
19177 movm.l (%sp)+,&0x003c # restore d2-d5
19178 mov.w &0x0101,%d0
19182 movm.l (%sp)+,&0x003c # restore d2-d5
19187 mov.w &0x00e1,%d0
19192 mov.w &0x0161,%d0
19238 cmpi.b %d0,&0x9c # fpcr & fpsr & fpiar ?
19240 cmpi.b %d0,&0x98 # fpcr & fpsr ?
19242 cmpi.b %d0,&0x94 # fpcr & fpiar ?
19248 addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
19256 addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
19268 addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
19276 addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
19288 addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
19296 addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
19308 addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
19316 addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
19324 addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
19369 andi.w &0x38, %d0 # extract mode field
19370 andi.l &0x7, %d1 # extract reg field
19372 cmpi.b %d0,&0x18 # is mode (An)+ ?
19375 cmpi.b %d0,&0x20 # is mode -(An) ?
19379 cmpi.b %d0,&0x3c # is mode #<data>?
19390 lea ([USER_FPIAR,%a6],0x4),%a0 # no; return <ea>
19412 cmpi.b %d0,&0xc # is opsize ext or packed?
19416 sub.l &0x8,%a0 # correct <ea>
19452 andi.w &0x38,%d0 # extract mode field
19453 andi.l &0x7,%d1 # extract reg field
19455 cmpi.b %d0,&0x18 # is mode (An)+ ?
19458 cmpi.b %d0,&0x20 # is mode -(An) ?
19472 swbeg &0x8
19484 addi.l &0xc,EXC_DREGS+0x8(%a6)
19487 addi.l &0xc,EXC_DREGS+0xc(%a6)
19490 add.l &0xc,%a2
19493 add.l &0xc,%a3
19496 add.l &0xc,%a4
19499 add.l &0xc,%a5
19502 addi.l &0xc,EXC_A6(%a6)
19506 addi.l &0xc,EXC_A7(%a6)
19515 sub.l &0x8,%a0
19516 sub.l &0x8,EXC_EA(%a6)
19519 swbeg &0x8
19531 mov.l %a0,EXC_DREGS+0x8(%a6)
19534 mov.l %a0,EXC_DREGS+0xc(%a6)
19609 # 15 13 12 10 9 7 6 0
19616 # bfextu EXC_CMDREG(%a6){&0:&3}, %d0 # extract opclass
19617 # cmpi.b %d0, &0x2 # which class is it? ('000,'010,'011)
19622 btst &0x6,EXC_CMDREG(%a6)
19630 btst &0x5,%d0 # testing extension bits
19631 beq.b op000_src # (bit 5 == 0) => monadic
19632 btst &0x4,%d0 # (bit 5 == 1)
19633 beq.b op000_dst # (bit 4 == 0) => dyadic
19634 and.w &0x007f,%d0 # extract extension bits {6:0}
19635 cmpi.w %d0,&0x0038 # is it an fcmp (dyadic) ?
19673 btst &0x5,%d0 # testing extension bits
19674 beq.b op010_src # (bit 5 == 0) => monadic
19675 btst &0x4,%d0 # (bit 5 == 1)
19676 beq.b op010_dst # (bit 4 == 0) => dyadic
19677 and.w &0x007f,%d0 # extract extension bits {6:0}
19678 cmpi.w %d0,&0x0038 # is it an fcmp (dyadic) ?
19709 swbeg &0x8
19726 fmovm.x &0x80, FP_SRC(%a6) # return src op in FP_SRC
19739 fmovm.x &0x80, FP_SRC(%a6) # return src op in FP_SRC
19752 fmovm.x &0x80, FP_SRC(%a6) # return src op in FP_SRC
19780 fmovm.x &0x80, FP_SRC(%a6) # return src op in FP_SRC
19803 swbeg &0x8
19822 movq.l &0x4, %d0 # pass: 4 (bytes)
19835 fmovm.x &0x80, FP_SRC(%a6) # return src op in FP_SRC
19858 movq.l &0x2, %d0 # pass: 2 (bytes)
19871 fmovm.x &0x80, FP_SRC(%a6) # return src op in FP_SRC
19894 movq.l &0x1, %d0 # pass: 1 (byte)
19907 fmovm.x &0x80, FP_SRC(%a6) # return src op in FP_SRC
19930 movq.l &0x4, %d0 # pass: 4 (bytes)
19954 fmovm.x &0x80, FP_SRC(%a6) # return src op in FP_SRC
19970 lsl.l &0x8, %d0
19975 btst &0x7, (%a0) # is sgn bit set?
19977 bset &0x7, FP_SRC_EX(%a6) # set sgn of xprec value
19982 mov.w &0x3f81, %d1 # xprec exp = 0x3f81
19983 sub.w %d0, %d1 # exp = 0x3f81 - shft amt.
19992 mov.w &0x7fff, FP_SRC_EX(%a6) # set exp of SNAN
19994 lsl.l &0x8, %d0 # extract and insert hi(man)
19998 btst &0x7, (%a0) # see if sign of SNAN is set
20000 bset &0x7, FP_SRC_EX(%a6)
20012 movq.l &0x8, %d0 # pass: 8 (bytes)
20019 movq.l &0x8, %d0 # pass: # bytes to read
20037 fmovm.x &0x80, FP_SRC(%a6) # return src op in FP_SRC
20042 movq.l &0x8, %d0 # pass: # bytes to read
20057 mov.l &0xb, %d1
20061 btst &0x7, (%a0) # is sgn bit set?
20063 bset &0x7, FP_SRC_EX(%a6) # set sgn of xprec value
20068 mov.w &0x3c01, %d1 # xprec exp = 0x3c01
20069 sub.w %d0, %d1 # exp = 0x3c01 - shft amt.
20078 mov.w &0x7fff, FP_SRC_EX(%a6) # set exp of SNAN
20083 mov.l &0xb, %d1
20087 btst &0x7, (%a0) # see if sign of SNAN is set
20089 bset &0x7, FP_SRC_EX(%a6)
20101 mov.l &0xc, %d0 # pass: 12 (bytes)
20105 mov.l &0xc, %d0 # pass: # of bytes to read
20205 swbeg &0x8
20226 fmovm.x SRC(%a0),&0x80 # load value
20233 fmov.l &0x0,%fpcr # clear FPCR
20238 andi.b &0x38,%d1 # is mode == 0? (Dreg dst)
20251 andi.w &0x7,%d1
20257 andi.l &0x80000000,%d1 # keep DENORM sign
20258 ori.l &0x00800000,%d1 # make smallest sgl
20272 fmovm.x SRC(%a0),&0x80 # load value
20279 fmov.l &0x0,%fpcr # clear FPCR
20284 andi.b &0x38,%d1 # is mode == 0? (Dreg dst)
20297 andi.w &0x7,%d1
20303 andi.l &0x80000000,%d1 # keep DENORM sign
20304 ori.l &0x00800000,%d1 # make smallest sgl
20318 fmovm.x SRC(%a0),&0x80 # load value
20325 fmov.l &0x0,%fpcr # clear FPCR
20331 andi.b &0x38,%d1 # is mode == 0? (Dreg dst)
20344 andi.w &0x7,%d1
20350 andi.l &0x80000000,%d1 # keep DENORM sign
20351 ori.l &0x00800000,%d1 # make smallest sgl
20372 fmovm.x SRC(%a0),&0x80 # return result
20378 mov.l &0xc,%d0 # pass: opsize is 12 bytes
20400 andi.b &0x0a,%d0 # is UNFL or INEX enabled?
20420 andi.w &0x7fff,%d0
20421 andi.w &0x8000,FP_SCR0_EX(%a6) # keep only old sign
20423 fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
20434 andi.b &0x30,%d0 # clear rnd prec
20435 ori.b &s_mode*0x10,%d0 # insert sgl prec
20445 andi.w &0x7fff,%d0 # strip sign
20458 fmovm.x SRC(%a0),&0x80 # fetch fop from stack
20461 fmov.l &0x0,%fpsr # clear FPSR
20465 fmov.l &0x0,%fpcr # clear FPCR
20472 andi.b &0x38,%d1 # is mode == 0? (Dreg dst)
20485 andi.w &0x7,%d1
20502 clr.l %d0 # pass: S.F. = 0
20519 andi.b &0x38,%d1 # is mode == 0? (Dreg dst)
20532 andi.w &0x7,%d1
20537 andi.b &0x0a,%d1 # is UNFL or INEX enabled?
20539 addq.l &0x4,%sp
20565 fmovm.x (%a0),&0x80 # load default overflow result
20569 andi.b &0x38,%d1 # is mode == 0? (Dreg dst)
20582 andi.w &0x7,%d1
20587 andi.b &0x0a,%d1 # is UNFL or INEX enabled?
20589 addq.l &0x4,%sp
20594 # (1) force the exp to 0x3fff
20602 andi.w &0x8000,%d1 # keep it,clear exp
20603 ori.w &0x3fff,%d1 # insert exp = 0
20611 fmov.l &0x0,%fpcr # clear FPCR
20614 fcmp.b %fp0,&0x2 # did exponent increase?
20633 andi.w &0x7fff,%d0
20646 bclr &0x7,FP_SCR0_EX(%a6) # clear sign bit
20651 lsr.b &0x4,%d1
20652 andi.w &0x0c,%d1
20655 lsr.b &0x4,%d1
20656 andi.w &0x03,%d1
20662 bset &0x7,FP_SCR0_EX(%a6) # yes
20665 fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
20672 andi.b &0x30,%d0 # clear rnd prec
20673 ori.b &d_mode*0x10,%d0 # insert dbl prec
20683 andi.w &0x7fff,%d0 # strip sign
20696 fmovm.x SRC(%a0),&0x80 # fetch fop from stack
20699 fmov.l &0x0,%fpsr # clear FPSR
20703 fmov.l &0x0,%fpcr # clear FPCR
20710 movq.l &0x8,%d0 # pass: opsize is 8 bytes
20731 clr.l %d0 # pass: S.F. = 0
20751 movq.l &0x8,%d0 # pass: opsize is 8 bytes
20758 andi.b &0x0a,%d1 # is UNFL or INEX enabled?
20760 addq.l &0x4,%sp
20768 andi.w &0x7ff,%d0
20786 fmovm.x (%a0),&0x80 # load default overflow result
20791 movq.l &0x8,%d0 # pass: opsize is 8 bytes
20798 andi.b &0x0a,%d1 # is UNFL or INEX enabled?
20800 addq.l &0x4,%sp
20805 # (1) force the exp to 0x3fff
20813 andi.w &0x8000,%d1 # keep it,clear exp
20814 ori.w &0x3fff,%d1 # insert exp = 0
20822 fmov.l &0x0,%fpcr # clear FPCR
20825 fcmp.b %fp0,&0x2 # did exponent increase?
20855 # 95 64 63 62 32 31 11 0 #
20863 # 63 51 32 31 0 #
20874 subq.w &0x1,%d0 # yes; denorm bias = DBL_BIAS - 1
20877 lsl.l &0x4,%d0 # d0 in proper place for dbl prec exp
20880 bset &0x1f,%d0 # if negative, set sign
20891 bfextu %d1{&0:&21},%d0 # get ls 21 bits of double
20920 # 95 64 63 62 40 32 31 12 0 #
20928 # 31 22 0 #
20939 subq.w &0x1,%d0 # yes; denorm bias = SGL_BIAS - 1
20942 lsl.l &0x7,%d0 # shift it into single exp bits
20945 bset &0x1f,%d0 # if negative, put in sign first
20948 andi.l &0x7fffff00,%d1 # get upper 23 bits of ms
20949 lsr.l &0x8,%d1 # and put them flush right
20962 btst &0x4,EXC_CMDREG(%a6) # static or dynamic?
20967 lsr.b &0x4,%d1
20968 andi.w &0x7,%d1
20986 # andi.l &0xcfff000f,FP_SCR0(%a6) # clear unused fields
20987 andi.l &0xcffff00f,FP_SCR0(%a6) # clear unused fields
21007 andi.w &0xf000,FP_SCR0(%a6)
21015 mov.l &0xc,%d0 # pass: opsize is 12 bytes
21048 bset &0x6,FP_SRC_HI(%a6) # set snan bit
21097 mov.l EXC_DREGS+0x0(%a6),%d0
21100 mov.l EXC_DREGS+0x4(%a6),%d0
21121 mov.l EXC_DREGS+0x8(%a6),%d0
21124 mov.l EXC_DREGS+0xc(%a6),%d0
21182 mov.l %d0,EXC_DREGS+0x0(%a6)
21185 mov.l %d0,EXC_DREGS+0x4(%a6)
21243 mov.w %d0,2+EXC_DREGS+0x0(%a6)
21246 mov.w %d0,2+EXC_DREGS+0x4(%a6)
21304 mov.b %d0,3+EXC_DREGS+0x0(%a6)
21307 mov.b %d0,3+EXC_DREGS+0x4(%a6)
21369 iareg0: add.l %d0,EXC_DREGS+0x8(%a6)
21371 iareg1: add.l %d0,EXC_DREGS+0xc(%a6)
21384 cmpi.b %d0,&0x1
21389 addq.l &0x2,EXC_A7(%a6)
21433 dareg0: sub.l %d0,EXC_DREGS+0x8(%a6)
21435 dareg1: sub.l %d0,EXC_DREGS+0xc(%a6)
21448 cmpi.b %d0,&0x1
21453 subq.l &0x2,EXC_A7(%a6)
21493 mov.l 0+EXC_FP0(%a6), 0+FP_SRC(%a6)
21499 mov.l 0+EXC_FP1(%a6), 0+FP_SRC(%a6)
21505 fmovm.x &0x20, FP_SRC(%a6)
21509 fmovm.x &0x10, FP_SRC(%a6)
21513 fmovm.x &0x08, FP_SRC(%a6)
21517 fmovm.x &0x04, FP_SRC(%a6)
21521 fmovm.x &0x02, FP_SRC(%a6)
21525 fmovm.x &0x01, FP_SRC(%a6)
21566 mov.l 0+EXC_FP0(%a6), 0+FP_DST(%a6)
21572 mov.l 0+EXC_FP1(%a6), 0+FP_DST(%a6)
21578 fmovm.x &0x20, FP_DST(%a6)
21582 fmovm.x &0x10, FP_DST(%a6)
21586 fmovm.x &0x08, FP_DST(%a6)
21590 fmovm.x &0x04, FP_DST(%a6)
21594 fmovm.x &0x02, FP_DST(%a6)
21598 fmovm.x &0x01, FP_DST(%a6)
21641 fmovm.x &0x80, EXC_FP0(%a6)
21644 fmovm.x &0x80, EXC_FP1(%a6)
21647 fmovm.x &0x01, -(%sp)
21648 fmovm.x (%sp)+, &0x20
21651 fmovm.x &0x01, -(%sp)
21652 fmovm.x (%sp)+, &0x10
21655 fmovm.x &0x01, -(%sp)
21656 fmovm.x (%sp)+, &0x08
21659 fmovm.x &0x01, -(%sp)
21660 fmovm.x (%sp)+, &0x04
21663 fmovm.x &0x01, -(%sp)
21664 fmovm.x (%sp)+, &0x02
21667 fmovm.x &0x01, -(%sp)
21668 fmovm.x (%sp)+, &0x01
21706 short 0x0
21718 lsr.b &0x2, %d0 # shift prec to lo bits
21739 mov.l &0x20000000, %d0 # set sticky bit in return value
21741 clr.l FTEMP_HI(%a0) # set d1 = 0 (ms mantissa)
21742 clr.l FTEMP_LO(%a0) # set d2 = 0 (ms mantissa)
21751 # %d1{15:0} : denormalization threshold #
21777 ble.b dnrm_no_lp # d1 <= 0
21778 cmpi.w %d1, &0x20 # is ( 0 <= d1 < 32) ?
21780 cmpi.w %d1, &0x40 # is (32 <= d1 < 64) ?
21792 # case (0<d1<32)
21811 # |0.....0| NEW_HI | NEW_FTEMP_LO |grs |
21827 bfextu FTEMP_HI(%a0){&0:%d0}, %d2 # %d2 = new FTEMP_HI
21839 and.l &0xe0000000, %d0 # clear all but G,R,S
21863 # |0...............0|0....0| NEW_LO |grs |
21870 subi.w &0x20, %d1 # %d1 now between 0 and 32
21871 mov.l &0x20, %d0
21880 bfextu FTEMP_HI(%a0){&0:%d0}, %d2 # %d2 = new FTEMP_LO
21896 clr.l FTEMP_HI(%a0) # store FTEMP_HI = 0
21898 and.l &0xe0000000, %d0 # clear all but G,R,S
21924 mov.l &0x20000000, %d0 # set sticky bit
21944 # |0...............0|0................0|grs |
21950 and.l &0xc0000000, %d0 # extract G,R
21951 and.l &0x3fffffff, %d1 # extract other bits
21972 # |0...............0|0................0|0rs |
21977 and.l &0x80000000, %d0 # extract R bit
21978 lsr.l &0x1, %d0 # shift high bit into R bit
21979 and.l &0x7fffffff, %d1 # extract other bits
22048 # If (G,R,S == 0) then result is exact and round is done, else set
22060 # All of the following assumes grs != 0.
22074 # If sign of fp number = 0 (positive), then add 1 to l. #
22080 mov.l &0xffffffff, %d0 # force g,r,s to be all f's
22097 mov.l &0xffffffff, %d0 # force g,r,s to be all f's
22108 # If (g=1), then add 1 to l and if (r=s=0), then clear l #
22112 asl.l &0x1, %d0 # shift g-bit to c-bit
22123 set ad_1_sgl, 0x00000100 # constant to add 1 to l-bit in sgl prec
22124 set ad_1_dbl, 0x00000800 # constant to add 1 to l-bit in dbl prec
22134 add.w &0x1, FTEMP_EX(%a0) # and incr exponent
22136 tst.l %d0 # test for rs = 0
22138 and.w &0xfe00, FTEMP_HI+2(%a0) # clear the l-bit
22140 and.l &0xffffff00, FTEMP_HI(%a0) # truncate bits beyond sgl limit
22152 roxr.w FTEMP_HI(%a0) # mant is 0 so restore v-bit
22153 roxr.w FTEMP_HI+2(%a0) # mant is 0 so restore v-bit
22156 add.w &0x1,FTEMP_EX(%a0) # and inc exp
22158 tst.l %d0 # test rs = 0
22160 and.b &0xfe,FTEMP_LO+3(%a0) # clear the l bit
22170 addq.l &0x1, FTEMP_HI(%a0) # propagate carry
22173 roxr.w FTEMP_HI(%a0) # mant is 0 so restore v-bit
22174 roxr.w FTEMP_HI+2(%a0) # mant is 0 so restore v-bit
22177 addq.w &0x1, FTEMP_EX(%a0) # incr exponent
22179 tst.l %d0 # test for rs = 0
22181 and.w &0xf000, FTEMP_LO+2(%a0) # clear the l-bit
22184 and.l &0xfffff800,FTEMP_LO(%a0) # truncate bits beyond dbl limit
22234 movm.l &0x3000, -(%sp) # make some temp registers {d2/d3}
22241 # 96 64 40 32 0
22256 and.l &0x0000003f, %d2 # s bit is the or of all other
22266 # 96 64 32 11 0
22281 and.l &0x000001ff, %d2 # s bit is the or-ing of all
22292 movm.l (%sp)+, &0xc # restore scratch registers {d2/d3}
22324 bfffo %d0{&0:&32}, %d2 # how many places to shift?
22329 bfextu %d1{&0:%d2}, %d3 # extract lo bits
22345 bfffo %d1{&0:&32}, %d2 # how many places to shift?
22381 bfffo FTEMP_HI(%a0){&0:&32}, %d0 # how many shifts are needed?
22388 bfffo FTEMP_LO(%a0){&0:&32}, %d0 # is operand really a zero?
22399 and.w &0x7fff, %d1 # strip off sgn
22401 cmp.w %d0, %d1 # will denorm push exp < 0?
22402 bgt.b unnorm_nrm_zero # yes; denorm only until exp = 0
22405 # exponent would not go < 0. Therefore, number stays normalized
22409 and.w &0x8000, %d0 # save old sign
22419 # exponent would go < 0, so only denormalize until exp = 0
22432 and.w &0x8000, FTEMP_EX(%a0) # set exp = 0
22447 clr.l FTEMP_LO(%a0) # lo(man) = 0
22449 and.w &0x8000, FTEMP_EX(%a0) # set exp = 0
22458 and.w &0x8000, FTEMP_EX(%a0) # force exponent to zero
22488 andi.w &0x7fff, %d0 # strip off sign
22489 cmpi.w %d0, &0x7fff # is (EXP == MAX)?
22492 btst &0x7,FTEMP_HI(%a0)
22498 tst.w %d0 # is exponent = 0?
22519 andi.w &0x8000,FTEMP_EX(%a0) # clear exponent
22529 and.l &0x7fffffff, %d0 # msb is a don't care!
22535 btst &0x6, FTEMP_HI(%a0)
22568 andi.l &0x7ff00000, %d0
22571 cmpi.l %d0, &0x7ff00000
22578 and.l &0x000fffff, %d1
22589 and.l &0x000fffff, %d1
22631 andi.l &0x7f800000, %d0
22634 cmpi.l %d0, &0x7f800000
22641 and.l &0x007fffff, %d1
22650 and.l &0x007fffff, %d1
22703 btst &0x7, FTEMP_EX(%a0) # make "internal" format
22707 and.w &0x7fff, %d1
22713 mov.l 0x4(%sp),%d0 # pass rnd prec.
22714 andi.w &0x00c0,%d0
22715 lsr.w &0x4,%d0
22719 mov.w 0x6(%sp),%d1 # load prec:mode into %d1
22720 andi.w &0xc0,%d1 # extract rnd prec
22721 lsr.w &0x4,%d1
22723 mov.w 0x6(%sp),%d1
22724 andi.w &0x30,%d1
22725 lsr.w &0x4,%d1
22731 bclr &0x7, FTEMP_EX(%a0) # clear sgn first; may have residue
22734 bset &0x7, FTEMP_EX(%a0) # set result sgn
22759 add.l &0x4, %sp # clear stack
22767 btst &0x7,FTEMP_EX(%a0) # make "internal" format
22771 and.w &0x7fff,%d1
22783 mov.w 0x6(%sp),%d1 # load rnd mode
22784 andi.w &0x30,%d1 # extract rnd prec
22785 lsr.w &0x4,%d1
22791 bclr &0x7,FTEMP_EX(%a0) # clear sgn first; may have residue
22794 bset &0x7,FTEMP_EX(%a0) # set result sgn
22819 add.l &0x4,%sp # clear stack
22833 # d1.b = '-1' => (-); '0' => (+) #
22856 andi.w &0x10,%d1 # keep result sign
22857 lsr.b &0x4,%d0 # shift prec/mode
22860 lsl.b &0x1,%d1 # multiply d1 by 2
22865 and.w &0x10, %d1 # keep result sign
22870 lsl.b &0x1, %d1 # shift left by 1
22883 byte 0x2, 0x0, 0x0, 0x2
22884 byte 0x2, 0x0, 0x0, 0x2
22885 byte 0x2, 0x0, 0x0, 0x2
22886 byte 0x0, 0x0, 0x0, 0x0
22887 byte 0x2+0x8, 0x8, 0x2+0x8, 0x8
22888 byte 0x2+0x8, 0x8, 0x2+0x8, 0x8
22889 byte 0x2+0x8, 0x8, 0x2+0x8, 0x8
22892 long 0x7fff0000,0x00000000,0x00000000,0x00000000 # +INF; RN
22893 long 0x7ffe0000,0xffffffff,0xffffffff,0x00000000 # +EXT; RZ
22894 long 0x7ffe0000,0xffffffff,0xffffffff,0x00000000 # +EXT; RM
22895 long 0x7fff0000,0x00000000,0x00000000,0x00000000 # +INF; RP
22897 long 0x7fff0000,0x00000000,0x00000000,0x00000000 # +INF; RN
22898 long 0x407e0000,0xffffff00,0x00000000,0x00000000 # +SGL; RZ
22899 long 0x407e0000,0xffffff00,0x00000000,0x00000000 # +SGL; RM
22900 long 0x7fff0000,0x00000000,0x00000000,0x00000000 # +INF; RP
22902 long 0x7fff0000,0x00000000,0x00000000,0x00000000 # +INF; RN
22903 long 0x43fe0000,0xffffffff,0xfffff800,0x00000000 # +DBL; RZ
22904 long 0x43fe0000,0xffffffff,0xfffff800,0x00000000 # +DBL; RM
22905 long 0x7fff0000,0x00000000,0x00000000,0x00000000 # +INF; RP
22907 long 0x00000000,0x00000000,0x00000000,0x00000000
22908 long 0x00000000,0x00000000,0x00000000,0x00000000
22909 long 0x00000000,0x00000000,0x00000000,0x00000000
22910 long 0x00000000,0x00000000,0x00000000,0x00000000
22912 long 0xffff0000,0x00000000,0x00000000,0x00000000 # -INF; RN
22913 long 0xfffe0000,0xffffffff,0xffffffff,0x00000000 # -EXT; RZ
22914 long 0xffff0000,0x00000000,0x00000000,0x00000000 # -INF; RM
22915 long 0xfffe0000,0xffffffff,0xffffffff,0x00000000 # -EXT; RP
22917 long 0xffff0000,0x00000000,0x00000000,0x00000000 # -INF; RN
22918 long 0xc07e0000,0xffffff00,0x00000000,0x00000000 # -SGL; RZ
22919 long 0xffff0000,0x00000000,0x00000000,0x00000000 # -INF; RM
22920 long 0xc07e0000,0xffffff00,0x00000000,0x00000000 # -SGL; RP
22922 long 0xffff0000,0x00000000,0x00000000,0x00000000 # -INF; RN
22923 long 0xc3fe0000,0xffffffff,0xfffff800,0x00000000 # -DBL; RZ
22924 long 0xffff0000,0x00000000,0x00000000,0x00000000 # -INF; RM
22925 long 0xc3fe0000,0xffffffff,0xfffff800,0x00000000 # -DBL; RP
22960 mov.l &0xc,%d0 # packed is 12 bytes
22964 mov.l &0xc,%d0 # pass: 12 bytes
22972 cmpi.w %d0,&0x7fff # INF or NAN?
22980 andi.b &0x0f,%d0 # clear all but last nybble
22990 fmovm.x &0x80,FP_SRC(%a6) # make this the srcop
23024 # added if SM = 1 and subtracted if SM = 0. Scale the #
23027 # SM = 0 a non-zero digit in the integer position #
23059 byte 0,0,0,0
23065 set FSTRT,0
23072 mov.l 0x0(%a0),FP_SCR0_EX(%a6) # make a copy of input
23073 mov.l 0x4(%a0),FP_SCR0_HI(%a6) # so we don't alter it
23074 mov.l 0x8(%a0),FP_SCR0_LO(%a6)
23078 movm.l &0x3c00,-(%sp) # save d2-d5
23079 fmovm.x &0x1,-(%sp) # save fp1
23107 mulu.l &0xa,%d1 # mul partial product by one digit place
23119 or.l &0x40000000,%d4 # set SE in d4,
23120 or.l &0x40000000,(%a0) # and in working bcd
23146 fmov.s &0x00000000,%fp0 # accumulator
23164 fmul.s &0x41200000,%fp0 # fp0 = fp0 * 10
23169 # If all the digits (8) in that long word have been converted (d2=0),
23170 # then inc d1 (=2) to point to the next long word and reset d3 to 0
23270 or.l &0x40000000,%d4 # and set SE in d4
23271 or.l &0x40000000,(%a0) # and in memory
23279 fmov.s &0x3f800000,%fp1 # init fp1 to 1
23318 and.l &0xbfffffff,%d4 # and clr SE in d4
23319 and.l &0xbfffffff,(%a0) # and in memory
23327 fmov.s &0x3f800000,%fp1 # init fp1 to 1
23387 bfextu %d4{&0:&2},%d0 # {FPCR[6],FPCR[5],SM,SE}
23409 or.l &0x40000000,(%a0) # and set SE bit
23412 fmov.s &0x3f800000,%fp1 # init fp1 to 1
23454 add.l &0x4,%sp # clear 1 lw param
23455 fmovm.x (%sp)+,&0x40 # restore fp1
23456 movm.l (%sp)+,&0x3c # restore d2-d5
23457 fmov.l &0x0,%fpcr
23458 fmov.l &0x0,%fpsr
23485 # approximated by adding e + 0.f when the original #
23492 # A5. Set ICTR = 0; #
23567 long 0x3FFD0000,0x9A209A84,0xFBCFF798,0x00000000
23569 long 0x3FFD0000,0x9A209A84,0xFBCFF799,0x00000000
23573 long 0x3F800000,0x00000000,0x00000000,0x00000000
23575 long 0x40000000,0x00000000,0x00000000,0x00000000
23577 long 0x41200000,0x00000000,0x00000000,0x00000000
23579 long 0x459A2800,0x00000000,0x00000000,0x00000000
23582 byte 0,0,0,0
23612 movm.l &0x3f20,-(%sp) # {%d2-%d7/%a2}
23613 fmovm.x &0x7,-(%sp) # {%fp0-%fp2}
23622 fmov.l &rm_mode*0x10,%fpcr # set RM and ext
23635 and.w &0x7fff,%d0 # strip sign of normalized exp
23651 and.w &0x7fff,%d0 # strip sign of normalized exp
23662 and.l &0x7fffffff,FP_SCR1(%a6) # create abs(X)
23666 # imated by adding e + 0.f when the original value is viewed
23696 mov.w &0x3fff,FP_SCR1(%a6) # replace exponent with 0x3fff
23698 sub.w &0x3fff,%d0 # strip off bias
23714 fmov.l &0,%fpsr # zero all of fpsr - nothing needed
23717 # A5. Set ICTR = 0;
23754 ble.b k_neg # if k <= 0, LEN = ILOG + 1 - k
23755 mov.l %d7,%d4 # if k > 0, LEN = k
23787 # RN 00 0 0 00/0 RN
23788 # RN 00 0 1 00/0 RN
23789 # RN 00 1 0 00/0 RN
23790 # RN 00 1 1 00/0 RN
23791 # RZ 01 0 0 11/3 RP
23792 # RZ 01 0 1 11/3 RP
23793 # RZ 01 1 0 10/2 RM
23795 # RM 10 0 0 11/3 RP
23796 # RM 10 0 1 10/2 RM
23797 # RM 10 1 0 10/2 RM
23799 # RP 11 0 0 10/2 RM
23800 # RP 11 0 1 11/3 RP
23801 # RP 11 1 0 11/3 RP
23806 # d0: exponent/scratch - final is 0
23807 # d2: x/0 or 24 for A9
23810 # d5: 0/ICTR:LAMBDA
23811 # d6: ILOG/ILOG or k if ((k<=0)&(ILOG<k))
23826 bgt.b k_pos # if pos and > 0, skip this
23829 mov.l %d7,%d6 # if ((k<0) & (ILOG < k)) ILOG = k
23840 cmp.l %d0,&0xffffecd4 # test iscale <= -4908
23853 bge.b x_pos # if pos, don't set bit 0
23854 addq.l &1,%d1 # if neg, set bit 0
23892 fmov.l &0,%fpsr # clr INEX
23893 fmov.l &rz_mode*0x10,%fpcr # set RZ rounding mode
23910 # d2: 0 or 24/unchanged
23945 fmovm.x &0x2,-(%sp) # save 10^ISCALE to stack
23948 andi.w &0x7fff,%d3 # clear sign
23949 ori.w &0x8000,(%a0) # make DENORM exp negative
23951 subi.w &0x3fff,%d3 # subtract BIAS
23953 subi.w &0x3fff,%d3 # subtract BIAS
23955 subi.w &0x3fff,%d3 # subtract BIAS
23959 andi.w &0x8000,(%sp) # keep sign
23961 andi.w &0x7fff,(%a0) # clear sign bit on DENORM again
23962 mov.l 0x8(%a0),-(%sp) # put input op mantissa on stk
23963 mov.l 0x4(%a0),-(%sp)
23964 mov.l &0x3fff0000,-(%sp) # force exp to zero
23965 fmovm.x (%sp)+,&0x80 # load normalized DENORM into fp0
23972 mov.l &0x3fff0000,-(%sp) # force exp to zero
23975 mov.l &0x3fff0000,-(%sp)# force exp to zero
24029 and.l &0x00000030,USER_FPCR(%a6) # set size to ext,
24058 movm.l &0xc0c0,-(%sp) # save regs used by sintd0 {%d0-%d1/%a0-%a1}
24066 or.l &0x80000000,(%a0) # if neg, use -Y
24072 fmov.l &0x0,%fpsr # clear the AEXC bits!!!
24074 ## andi.l &0x00000030,%d0
24079 ## fmov.l &0x0,%fpcr
24088 movm.l (%sp)+,&0x303 # restore regs used by sint {%d0-%d1/%a0-%a1}
24106 # d0: FPCR with size set to ext/scratch final = 0
24126 tst.w %d5 # check if ICTR = 0
24160 fmov.l &rm_mode*0x10,%fpcr # set rmode to RM
24174 fmov.l &rm_mode*0x10,%fpcr # set rmode to RM
24177 # Since ICTR <> 0, we have already been through one adjustment,
24213 # d0: x/LEN call to binstr - final is 0
24214 # d1: x/0
24234 fmov.l &rz_mode*0x10,%fpcr # force rz for conversion
24245 sub.l &0x3ffd,%d0 # sub bias less 2 to make fract
24250 lsr.l &1,%d2 # shift d2:d3 right, add 0s
24260 add.l &0x00000080,%d3 # inc at bit 7
24262 and.l &0xffffff80,%d3 # strip off lsb not used by 882
24275 # 32 16 15 0
24277 # | 0 | e3 | e2 | e1 | e4 | X | X | X |
24286 # d0: x/LEN call to binstr - final is 0
24287 # d1: x/scratch (0);shift count for final exponent packing
24337 sub.w &0x3ffd,%d0 # subtract off bias
24345 add.l &0x00000080,%d3 # inc at bit 6
24347 and.l &0xffffff80,%d3 # strip off lsb not used by 882
24387 and.b &0x0f,FP_SCR0(%a6) # clear first nibble of FP_SCR0
24394 addq.l &1,%d0 # set bit 0 in d0 for SE
24396 bfins %d0,FP_SCR0(%a6){&0:&2} # insert SM and SE into FP_SCR0
24400 fmov.l &0,%fpsr # clear possible inex2/ainex bits
24401 fmovm.x (%sp)+,&0xe0 # {%fp0-%fp2}
24402 movm.l (%sp)+,&0x4fc # {%d2-%d7/%a2}
24407 long 0x40020000,0xA0000000,0x00000000 # 10 ^ 1
24408 long 0x40050000,0xC8000000,0x00000000 # 10 ^ 2
24409 long 0x400C0000,0x9C400000,0x00000000 # 10 ^ 4
24410 long 0x40190000,0xBEBC2000,0x00000000 # 10 ^ 8
24411 long 0x40340000,0x8E1BC9BF,0x04000000 # 10 ^ 16
24412 long 0x40690000,0x9DC5ADA8,0x2B70B59E # 10 ^ 32
24413 long 0x40D30000,0xC2781F49,0xFFCFA6D5 # 10 ^ 64
24414 long 0x41A80000,0x93BA47C9,0x80E98CE0 # 10 ^ 128
24415 long 0x43510000,0xAA7EEBFB,0x9DF9DE8E # 10 ^ 256
24416 long 0x46A30000,0xE319A0AE,0xA60E91C7 # 10 ^ 512
24417 long 0x4D480000,0xC9767586,0x81750C17 # 10 ^ 1024
24418 long 0x5A920000,0x9E8B3B5D,0xC53D5DE5 # 10 ^ 2048
24419 long 0x75250000,0xC4605202,0x8A20979B # 10 ^ 4096
24423 long 0x40020000,0xA0000000,0x00000000 # 10 ^ 1
24424 long 0x40050000,0xC8000000,0x00000000 # 10 ^ 2
24425 long 0x400C0000,0x9C400000,0x00000000 # 10 ^ 4
24426 long 0x40190000,0xBEBC2000,0x00000000 # 10 ^ 8
24427 long 0x40340000,0x8E1BC9BF,0x04000000 # 10 ^ 16
24428 long 0x40690000,0x9DC5ADA8,0x2B70B59E # 10 ^ 32
24429 long 0x40D30000,0xC2781F49,0xFFCFA6D6 # 10 ^ 64
24430 long 0x41A80000,0x93BA47C9,0x80E98CE0 # 10 ^ 128
24431 long 0x43510000,0xAA7EEBFB,0x9DF9DE8E # 10 ^ 256
24432 long 0x46A30000,0xE319A0AE,0xA60E91C7 # 10 ^ 512
24433 long 0x4D480000,0xC9767586,0x81750C18 # 10 ^ 1024
24434 long 0x5A920000,0x9E8B3B5D,0xC53D5DE5 # 10 ^ 2048
24435 long 0x75250000,0xC4605202,0x8A20979B # 10 ^ 4096
24439 long 0x40020000,0xA0000000,0x00000000 # 10 ^ 1
24440 long 0x40050000,0xC8000000,0x00000000 # 10 ^ 2
24441 long 0x400C0000,0x9C400000,0x00000000 # 10 ^ 4
24442 long 0x40190000,0xBEBC2000,0x00000000 # 10 ^ 8
24443 long 0x40340000,0x8E1BC9BF,0x04000000 # 10 ^ 16
24444 long 0x40690000,0x9DC5ADA8,0x2B70B59D # 10 ^ 32
24445 long 0x40D30000,0xC2781F49,0xFFCFA6D5 # 10 ^ 64
24446 long 0x41A80000,0x93BA47C9,0x80E98CDF # 10 ^ 128
24447 long 0x43510000,0xAA7EEBFB,0x9DF9DE8D # 10 ^ 256
24448 long 0x46A30000,0xE319A0AE,0xA60E91C6 # 10 ^ 512
24449 long 0x4D480000,0xC9767586,0x81750C17 # 10 ^ 1024
24450 long 0x5A920000,0x9E8B3B5D,0xC53D5DE4 # 10 ^ 2048
24451 long 0x75250000,0xC4605202,0x8A20979A # 10 ^ 4096
24475 # to force the first byte formed to have a 0 in the upper 4 bits. #
24509 # d7: byte digit formation word;digit count {0,1}
24515 movm.l &0xff00,-(%sp) # {%d0-%d7}
24531 bfextu %d2{&0:&3},%d1 # copy 3 msbs of d2 into d1
24533 bfextu %d3{&0:&3},%d6 # copy 3 msbs of d3 into d6
24541 swap %d6 # put 0 in d6 lower word
24551 swap %d6 # with d6 = 0; put 0 in upper word
24579 movm.l (%sp)+,&0xff # {%d0-%d7}
24616 movq.l &0x1,%d0 # one byte
24619 mov.w &0x0121,EXC_VOFF(%a6) # set FSLW
24623 movq.l &0x2,%d0 # two bytes
24626 mov.w &0x0141,EXC_VOFF(%a6) # set FSLW
24630 movq.l &0x4,%d0 # four bytes
24633 mov.w &0x0101,EXC_VOFF(%a6) # set FSLW
24637 movq.l &0x8,%d0 # eight bytes
24640 mov.w &0x0161,EXC_VOFF(%a6) # set FSLW
24644 movq.l &0xc,%d0 # twelve bytes
24647 mov.w &0x0161,EXC_VOFF(%a6) # set FSLW
24653 movq.l &0x1,%d0 # one byte
24656 mov.w &0x00a1,EXC_VOFF(%a6) # set FSLW
24660 movq.l &0x2,%d0 # two bytes
24663 mov.w &0x00c1,EXC_VOFF(%a6) # set FSLW
24667 movq.l &0x4,%d0 # four bytes
24670 mov.w &0x0081,EXC_VOFF(%a6) # set FSLW
24674 movq.l &0x8,%d0 # eight bytes
24677 mov.w &0x00e1,EXC_VOFF(%a6) # set FSLW
24681 mov.l &0xc,%d0 # twelve bytes
24684 mov.w &0x00e1,EXC_VOFF(%a6) # set FSLW
24691 fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
24693 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
24698 mov.l 0x8(%sp),0x4(%sp) # store lo(PC)
24699 mov.l 0xc(%sp),0x8(%sp) # store EA
24700 mov.l &0x00000001,0xc(%sp) # store FSLW
24701 mov.w 0x6(%sp),0xc(%sp) # fix FSLW (size)
24702 mov.w &0x4008,0x6(%sp) # store voff
24704 btst &0x5,(%sp) # supervisor or user mode?
24706 bset &0x2,0xd(%sp) # set supervisor TM bit
24719 mov.b EXC_OPWORD+0x1(%a6),%d1
24720 andi.b &0x38,%d1 # extract opmode
24721 cmpi.b %d1,&0x18 # postinc?
24723 cmpi.b %d1,&0x20 # predec?
24728 mov.b EXC_OPWORD+0x1(%a6),%d1
24729 andi.w &0x0007,%d1 # fetch An
24745 sub.l %d0,EXC_DREGS+0x8(%a6) # fix stacked a0
24748 sub.l %d0,EXC_DREGS+0xc(%a6) # fix stacked a1
24771 cmpi.b EXC_VOFF(%a6),&0x30 # move in or out?
24774 btst &0x5,EXC_SR(%a6) # user or supervisor?