Lines Matching full:d1
645 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
700 mov.b 1+EXC_CMDREG(%a6),%d1
701 andi.w &0x007f,%d1 # extract extension
712 mov.l (tbl_unsupp.l,%pc,%d1.w*4),%d1 # fetch routine addr
713 jsr (tbl_unsupp.l,%pc,%d1.l*1)
733 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
748 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
767 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
806 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
885 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
943 mov.b 1+EXC_CMDREG(%a6),%d1
944 andi.w &0x007f,%d1 # extract extension
955 mov.l (tbl_unsupp.l,%pc,%d1.w*4),%d1 # fetch routine addr
956 jsr (tbl_unsupp.l,%pc,%d1.l*1)
983 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
1010 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
1041 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
1080 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
1210 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
1301 bfextu 1+EXC_CMDREG(%a6){&1:&7},%d1 # extract extension
1306 mov.l (tbl_unsupp.l,%pc,%d1.l*4),%d1 # fetch routine addr
1307 jsr (tbl_unsupp.l,%pc,%d1.l*1)
1340 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
1407 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
1572 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
1596 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
1679 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
1694 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
1711 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
1740 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
1764 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
1792 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
1846 bfextu 1+EXC_CMDREG(%a6){&1:&7},%d1 # extract extension
1851 mov.l (tbl_unsupp.l,%pc,%d1.l*4),%d1 # fetch routine addr
1852 jsr (tbl_unsupp.l,%pc,%d1.l*1)
1892 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
1910 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
1991 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
2022 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
2123 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
2141 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
2191 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
2231 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
2271 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
2318 mov.l FP_SRC_HI(%a6),%d1 # fetch DENORM hi(man)
2319 lsr.l %d0,%d1 # shift it
2320 bset &31,%d1 # set j-bit
2321 mov.l %d1,FP_SRC_HI(%a6) # insert new hi(man)
2339 mov.w &0x3c01,%d1 # pass denorm threshold
2359 clr.l %d1
2466 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
2514 tst.l %d1 # did ifetch fail?
2526 tst.l %d1 # did ifetch fail?
2594 mov.b 1+EXC_CMDREG(%a6),%d1
2595 andi.w &0x007f,%d1 # extract extension
2603 mov.l (tbl_unsupp.l,%pc,%d1.w*4),%d1 # fetch routine addr
2604 jsr (tbl_unsupp.l,%pc,%d1.l*1)
2639 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
2699 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
2781 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
2798 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
2804 # right now, d1 = size and d0 = the strg.
2806 mov.b %d1,EXC_VOFF(%a6) # store strg
2811 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
2815 mov.l %d1,-(%sp) # save d1
2841 clr.l %d1
2842 mov.b EXC_VOFF(%a6),%d1 # fetch strg
2844 tst.b %d1
2849 lsl.b &0x1,%d1
2854 lsl.b &0x1,%d1
2859 lsl.b &0x1,%d1
2864 lsl.b &0x1,%d1
2869 lsl.b &0x1,%d1
2874 lsl.b &0x1,%d1
2879 lsl.b &0x1,%d1
2883 mov.l 0x4(%sp),%d1
2900 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
2955 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
2975 bfextu %d0{&19:&3},%d1
2977 cmpi.b %d1,&0x7 # move all regs?
2993 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
3022 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
3044 movc %pcr,%d1
3045 btst &0x1,%d1
3059 movm.l LOCAL_SIZE+EXC_DREGS(%sp),&0x0303 # restore d0-d1/a0-a1
3113 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
3141 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
3167 mov.w FP_SRC_EX(%a6),%d1 # fetch exponent
3168 andi.w &0x7fff,%d1
3169 cmpi.w %d1,&0x7fff
3174 mov.l FP_SRC_HI(%a6),%d1
3175 andi.l &0x7fffffff,%d1
3182 mov.l &0x7fffffff,%d1
3185 addq.l &0x1,%d1
3187 mov.l %d1,L_SCR1(%a6)
3191 mov.b 1+EXC_OPWORD(%a6),%d1 # extract <ea> mode,reg
3207 cmpi.b %d1,&0x7 # is <ea> mode a data reg?
3212 tst.l %d1 # did dstore fail?
3217 andi.w &0x0007,%d1
3223 cmpi.b %d1,&0x7 # is <ea> mode a data reg?
3228 tst.l %d1 # did dstore fail?
3233 andi.w &0x0007,%d1
3239 cmpi.b %d1,&0x7 # is <ea> mode a data reg?
3244 tst.l %d1 # did dstore fail?
3249 andi.w &0x0007,%d1
3309 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
3337 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
3361 mov.b 1+EXC_OPWORD(%a6),%d1 # extract <ea> mode,reg
3378 cmpi.b %d1,&0x7 # is <ea> mode a data reg?
3383 tst.l %d1 # did dstore fail?
3388 andi.w &0x0007,%d1
3395 cmpi.b %d1,&0x7 # is <ea> mode a data reg?
3400 tst.l %d1 # did dstore fail?
3405 andi.w &0x0007,%d1
3412 cmpi.b %d1,&0x7 # is <ea> mode a data reg?
3417 tst.l %d1 # did dstore fail?
3422 andi.w &0x0007,%d1
3427 cmpi.b %d1,&0x7 # is <ea> mode a data reg?
3432 mov.l FP_SRC_HI(%a6),%d1 # load mantissa
3433 lsr.l &0x8,%d1 # shift mantissa for sgl
3434 or.l %d1,%d0 # create sgl SNAN
3438 tst.l %d1 # did dstore fail?
3446 mov.l %d1,-(%sp)
3447 mov.l FP_SRC_HI(%a6),%d1 # load mantissa
3448 lsr.l &0x8,%d1 # shift mantissa for sgl
3449 or.l %d1,%d0 # create sgl SNAN
3450 mov.l (%sp)+,%d1
3451 andi.w &0x0007,%d1
3459 mov.l FP_SRC_HI(%a6),%d1 # load hi mantissa
3462 lsr.l %d0,%d1
3463 or.l %d1,FP_SCR0_EX(%a6) # create dbl hi
3464 mov.l FP_SRC_HI(%a6),%d1 # load hi mantissa
3465 andi.l &0x000007ff,%d1
3466 ror.l %d0,%d1
3467 mov.l %d1,FP_SCR0_HI(%a6) # store to temp space
3468 mov.l FP_SRC_LO(%a6),%d1 # load lo mantissa
3469 lsr.l %d0,%d1
3470 or.l %d1,FP_SCR0_HI(%a6) # create dbl lo
3476 tst.l %d1 # did dstore fail?
3514 tst.l %d1 # did dstore fail?
3534 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
3601 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
3645 bfextu EXC_EXTWORD(%a6){&0:&6},%d1 # extract upper 6 of cmdreg
3646 cmpi.b %d1,&0x17 # is op an fmovecr?
3677 mov.b 1+EXC_CMDREG(%a6),%d1
3678 andi.w &0x007f,%d1 # extract extension
3683 mov.l (tbl_unsupp.l,%pc,%d1.w*4),%d1 # fetch routine addr
3684 jsr (tbl_unsupp.l,%pc,%d1.l*1)
3694 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
3704 mov.b 1+EXC_CMDREG(%a6),%d1
3705 andi.l &0x0000007f,%d1 # pass rom offset
3775 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
3799 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
3863 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
3870 bfextu %d0{&0:&10},%d1 # is it an fmovecr?
3871 cmpi.w %d1,&0x03c8
3874 bfextu %d0{&16:&6},%d1 # is it an fmovecr?
3875 cmpi.b %d1,&0x17
3890 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
3904 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
3918 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
4021 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
4075 bfextu %d0{&16:&6},%d1 # extract upper 6 of cmdreg
4076 cmpi.b %d1,&0x17 # is op an fmovecr?
4085 mov.b 1+EXC_CMDREG(%a6),%d1
4086 andi.w &0x003f,%d1 # extract extension bits
4087 lsl.w &0x3,%d1 # shift right 3 bits
4088 or.b STAG(%a6),%d1 # insert src optag bits
4093 mov.w (tbl_trans.w,%pc,%d1.w*2),%d1
4094 jsr (tbl_trans.w,%pc,%d1.w*1) # emulate
4107 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
4167 mov.b 1+EXC_CMDREG(%a6),%d1
4168 andi.l &0x0000007f,%d1 # pass rom offset in d1
4245 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
4260 bfextu %d0{&10:&3},%d1 # extract mode field
4261 cmpi.b %d1,&0x1 # is it an fdb<cc>?
4263 cmpi.b %d1,&0x7 # is it an fs<cc>?
4265 bfextu %d0{&13:&3},%d1
4266 cmpi.b %d1,&0x2 # is it an fs<cc>?
4305 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
4319 tst.l %d1 # did ifetch fail?
4364 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
4412 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
4433 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
4978 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
5138 mov.l (%a0),%d1 # put exp in hi word
5139 mov.w 4(%a0),%d1 # fetch hi(man)
5140 and.l &0x7FFFFFFF,%d1 # strip sign
5142 cmpi.l %d1,&0x3FD78000 # is |X| >= 2**(-40)?
5147 cmp.l %d1,&0x4004BC7E # is |X| < 15 PI?
5161 mov.l INT(%a6),%d1 # make a copy of N
5162 asl.l &4,%d1 # N *= 16
5163 add.l %d1,%a1 # tbl_addr = a1 + (N*16)
5174 mov.l INT(%a6),%d1
5175 add.l ADJN(%a6),%d1 # SEE IF D0 IS ODD OR EVEN
5176 ror.l &1,%d1 # D0 WAS ODD IFF D0 IS NEGATIVE
5177 cmp.l %d1,&0
5200 ror.l &1,%d1
5201 and.l &0x80000000,%d1
5203 eor.l %d1,X(%a6) # X IS NOW R'= SGN*R
5254 ror.l &1,%d1
5255 and.l &0x80000000,%d1
5260 eor.l %d1,X(%a6) # X IS NOW S'= SGN*S
5261 and.l &0x80000000,%d1
5265 or.l &0x3F800000,%d1 # D0 IS SGN IN SINGLE
5266 mov.l %d1,POSNEG1(%a6)
5301 cmp.l %d1,&0x3FFF8000
5305 mov.l ADJN(%a6),%d1
5306 cmp.l %d1,&0
5315 mov.b &FMOV_OP,%d1 # last inst is MOVE
5348 mov.l (%a0),%d1
5349 mov.w 4(%a0),%d1
5350 and.l &0x7FFFFFFF,%d1 # COMPACTIFY X
5352 cmp.l %d1,&0x3FD78000 # |X| >= 2**(-40)?
5357 cmp.l %d1,&0x4004BC7E # |X| < 15 PI?
5373 mov.l INT(%a6),%d1
5374 asl.l &4,%d1
5375 add.l %d1,%a1 # ADDRESS OF N*PIBY2, IN Y1, Y2
5383 mov.l INT(%a6),%d1
5384 ror.l &1,%d1
5385 cmp.l %d1,&0 # D0 < 0 IFF N IS ODD
5400 mov.l %d1,%d2
5403 eor.l %d1,%d2
5413 ror.l &1,%d1
5414 and.l &0x80000000,%d1
5416 eor.l %d1,POSNEG1(%a6)
5426 eor.l %d1,SPRIME(%a6)
5476 ror.l &1,%d1
5477 and.l &0x80000000,%d1
5482 eor.l %d1,RPRIME(%a6)
5483 eor.l %d1,SPRIME(%a6)
5487 or.l &0x3F800000,%d1
5488 mov.l %d1,POSNEG1(%a6)
5538 cmp.l %d1,&0x3FFF8000
5551 mov.b &FMOV_OP,%d1 # last inst is MOVE
5580 cmp.l %d1,&0x7ffeffff # is arg dangerously large?
5610 mov.w INARG(%a6),%d1
5611 mov.l %d1,%a1 # save a copy of D0
5612 and.l &0x00007FFF,%d1
5613 sub.l &0x00003FFF,%d1 # d0 = K
5614 cmp.l %d1,&28
5617 sub.l &27,%d1 # d0 = L := K-27
5621 clr.l %d1 # d0 = L := 0
5632 sub.l %d1,%d2 # BIASED EXP OF 2**(-L)*(2/PI)
5656 mov.l %d1,%d2 # d2 = L
5663 add.l &0x00003FDD,%d1
5664 mov.w %d1,FP_SCR1_EX(%a6)
5668 mov.b ENDFLAG(%a6),%d1
5693 cmp.b %d1,&0
5706 mov.l ADJN(%a6),%d1
5707 cmp.l %d1,&4
5868 mov.l (%a0),%d1
5869 mov.w 4(%a0),%d1
5870 and.l &0x7FFFFFFF,%d1
5872 cmp.l %d1,&0x3FD78000 # |X| >= 2**(-40)?
5876 cmp.l %d1,&0x4004BC7E # |X| < 15 PI?
5888 fmov.l %fp1,%d1 # CONVERT TO INTEGER
5890 asl.l &4,%d1
5891 add.l %d1,%a1 # ADDRESS N*PIBY2 IN Y1, Y2
5897 ror.l &5,%d1
5898 and.l &0x80000000,%d1 # D0 WAS ODD IFF D0 < 0
5903 cmp.l %d1,&0
5984 cmp.l %d1,&0x3FFF8000
5990 mov.b &FMOV_OP,%d1 # last inst is MOVE
6011 cmp.l %d1,&0x7ffeffff # is arg dangerously large?
6041 mov.w INARG(%a6),%d1
6042 mov.l %d1,%a1 # save a copy of D0
6043 and.l &0x00007FFF,%d1
6044 sub.l &0x00003FFF,%d1 # d0 = K
6045 cmp.l %d1,&28
6048 sub.l &27,%d1 # d0 = L := K-27
6052 clr.l %d1 # d0 = L := 0
6063 sub.l %d1,%d2 # BIASED EXP OF 2**(-L)*(2/PI)
6087 mov.l %d1,%d2 # d2 = L
6094 add.l &0x00003FDD,%d1
6095 mov.w %d1,FP_SCR1_EX(%a6)
6099 mov.b ENDFLAG(%a6),%d1
6124 cmp.b %d1,&0
6137 mov.l INT(%a6),%d1
6138 ror.l &1,%d1
6350 mov.l (%a0),%d1
6351 mov.w 4(%a0),%d1
6353 and.l &0x7FFFFFFF,%d1
6355 cmp.l %d1,&0x3FFB8000 # |X| >= 1/16?
6360 cmp.l %d1,&0x4002FFFF # |X| < 16 ?
6402 mov.l %d1,%d2 # THE EXP AND 16 BITS OF X
6403 and.l &0x00007800,%d1 # 4 VARYING BITS OF F'S FRACTION
6407 add.l %d2,%d1 # THE 7 BITS IDENTIFYING F
6408 asr.l &7,%d1 # INDEX INTO TBL OF ATAN(|F|)
6410 add.l %d1,%a1 # ADDRESS OF ATAN(|F|)
6414 mov.l X(%a6),%d1 # LOAD SIGN AND EXPO. AGAIN
6415 and.l &0x80000000,%d1 # SIGN(F)
6416 or.l %d1,ATANF(%a6) # ATANF IS NOW SIGN(F)*ATAN(|F|)
6452 cmp.l %d1,&0x3FFF8000
6462 cmp.l %d1,&0x3FD78000
6505 mov.b &FMOV_OP,%d1 # last inst is MOVE
6513 cmp.l %d1,&0x40638000
6632 mov.l (%a0),%d1
6633 mov.w 4(%a0),%d1
6634 and.l &0x7FFFFFFF,%d1
6635 cmp.l %d1,&0x3FFF8000
6643 cmp.l %d1,&0x3FD78000
6673 mov.l (%a0),%d1
6674 and.l &0x80000000,%d1 # SIGN BIT OF X
6675 or.l &0x3F800000,%d1 # +-1 IN SGL FORMAT
6676 mov.l %d1,-(%sp) # push SIGN(X) IN SGL-FMT
6684 mov.b &FMOV_OP,%d1 # last inst is MOVE
6733 mov.l (%a0),%d1 # pack exp w/ upper 16 fraction
6734 mov.w 4(%a0),%d1
6735 and.l &0x7FFFFFFF,%d1
6736 cmp.l %d1,&0x3FFF8000
7211 mov.l (%a0),%d1 # load part of input X
7212 and.l &0x7FFF0000,%d1 # biased expo. of X
7213 cmp.l %d1,&0x3FBE0000 # 2^(-65)
7219 mov.w 4(%a0),%d1 # expo. and partial sig. of |X|
7220 cmp.l %d1,&0x400CB167 # 16380 log2 trunc. 16 bits
7233 fmov.l %fp0,%d1 # N = int( X * 64/log2 )
7235 fmov.l %d1,%fp0 # convert to floating-format
7237 mov.l %d1,L_SCR1(%a6) # save N temporarily
7238 and.l &0x3F,%d1 # D0 is J = N mod 64
7239 lsl.l &4,%d1
7240 add.l %d1,%a1 # address of 2^(J/64)
7241 mov.l L_SCR1(%a6),%d1
7242 asr.l &6,%d1 # D0 is M
7243 add.w &0x3FFF,%d1 # biased expo. of 2^(M)
7275 mov.w %d1,SCALE(%a6) # SCALE is 2^(M) in extended
7299 mov.l ADJFLAG(%a6),%d1
7302 tst.l %d1
7308 mov.b &FMUL_OP,%d1 # last inst is MUL
7321 cmp.l %d1,&0x400CB27C # 16480 log2
7330 fmov.l %fp0,%d1 # N = int( X * 64/log2 )
7332 fmov.l %d1,%fp0 # convert to floating-format
7333 mov.l %d1,L_SCR1(%a6) # save N temporarily
7334 and.l &0x3F,%d1 # D0 is J = N mod 64
7335 lsl.l &4,%d1
7336 add.l %d1,%a1 # address of 2^(J/64)
7337 mov.l L_SCR1(%a6),%d1
7338 asr.l &6,%d1 # D0 is K
7339 mov.l %d1,L_SCR1(%a6) # save K temporarily
7340 asr.l &1,%d1 # D0 is M1
7341 sub.l %d1,L_SCR1(%a6) # a1 is M
7342 add.w &0x3FFF,%d1 # biased expo. of 2^(M1)
7343 mov.w %d1,ADJSCALE(%a6) # ADJSCALE := 2^(M1)
7346 mov.l L_SCR1(%a6),%d1 # D0 is M
7347 add.w &0x3FFF,%d1 # biased expo. of 2^(M)
7375 mov.l (%a0),%d1 # load part of input X
7376 and.l &0x7FFF0000,%d1 # biased expo. of X
7377 cmp.l %d1,&0x3FFD0000 # 1/4
7384 mov.w 4(%a0),%d1 # expo. and partial sig. of |X|
7385 cmp.l %d1,&0x4004C215 # 70log2 rounded up to 16 bits
7397 fmov.l %fp0,%d1 # N = int( X * 64/log2 )
7399 fmov.l %d1,%fp0 # convert to floating-format
7401 mov.l %d1,L_SCR1(%a6) # save N temporarily
7402 and.l &0x3F,%d1 # D0 is J = N mod 64
7403 lsl.l &4,%d1
7404 add.l %d1,%a1 # address of 2^(J/64)
7405 mov.l L_SCR1(%a6),%d1
7406 asr.l &6,%d1 # D0 is M
7407 mov.l %d1,L_SCR1(%a6) # save a copy of M
7417 add.w &0x3FFF,%d1 # D0 is biased expo. of 2^M
7436 mov.w %d1,SC(%a6) # SC is 2^(M) in extended
7441 mov.l L_SCR1(%a6),%d1 # D0 is M
7442 neg.w %d1 # D0 is -M
7444 add.w &0x3FFF,%d1 # biased expo. of 2^(-M)
7449 or.w &0x8000,%d1 # signed/expo. of -2^(-M)
7450 mov.w %d1,ONEBYSC(%a6) # OnebySc is -2^(-M)
7469 mov.l L_SCR1(%a6),%d1 # retrieve M
7470 cmp.l %d1,&63
7480 cmp.l %d1,&-3
7503 cmp.l %d1,&0x3FBE0000 # 2^(-65)
7508 cmp.l %d1,&0x00330000 # 2^(-16312)
7516 mov.b &FADD_OP,%d1 # last inst is ADD
7529 mov.b &FMUL_OP,%d1 # last inst is MUL
7586 mov.l (%a0),%d1
7587 cmp.l %d1,&0
7723 mov.l (%a0),%d1
7724 mov.w 4(%a0),%d1
7725 and.l &0x7FFFFFFF,%d1
7726 cmp.l %d1,&0x400CB167
7747 mov.b &FADD_OP,%d1 # last inst is ADD
7752 cmp.l %d1,&0x400CB2B3
7768 mov.b &FMUL_OP,%d1 # last inst is MUL
7835 mov.l (%a0),%d1
7836 mov.w 4(%a0),%d1
7837 mov.l %d1,%a1 # save (compacted) operand
7838 and.l &0x7FFFFFFF,%d1
7839 cmp.l %d1,&0x400CB167
7860 mov.l %a1,%d1
7861 and.l &0x80000000,%d1
7862 or.l &0x3F000000,%d1
7864 mov.l %d1,-(%sp)
7867 mov.b &FMUL_OP,%d1 # last inst is MUL
7872 cmp.l %d1,&0x400CB2B3
7878 mov.l %a1,%d1
7879 and.l &0x80000000,%d1
7880 or.l &0x7FFB0000,%d1
7881 mov.l %d1,-(%sp) # EXTENDED FMT
7893 mov.b &FMUL_OP,%d1 # last inst is MUL
7961 mov.l (%a0),%d1
7962 mov.w 4(%a0),%d1
7963 mov.l %d1,X(%a6)
7964 and.l &0x7FFFFFFF,%d1
7965 cmp.l %d1, &0x3fd78000 # is |X| < 2^(-40)?
7967 cmp.l %d1, &0x3fffddce # is |X| > (5/2)LOG2?
7973 mov.l X(%a6),%d1
7974 mov.l %d1,SGN(%a6)
7975 and.l &0x7FFF0000,%d1
7976 add.l &0x00010000,%d1 # EXPONENT OF 2|X|
7977 mov.l %d1,X(%a6)
7991 mov.l SGN(%a6),%d1
7993 eor.l %d1,V(%a6)
8000 cmp.l %d1,&0x3FFF8000
8003 cmp.l %d1,&0x40048AA1
8010 mov.l X(%a6),%d1
8011 mov.l %d1,SGN(%a6)
8012 and.l &0x7FFF0000,%d1
8013 add.l &0x00010000,%d1 # EXPO OF 2|X|
8014 mov.l %d1,X(%a6) # Y = 2|X|
8016 mov.l SGN(%a6),%d1
8026 mov.l SGN(%a6),%d1
8029 eor.l &0xC0000000,%d1 # -SIGN(X)*2
8030 fmov.s %d1,%fp1 # -SIGN(X)*2 IN SGL FMT
8033 mov.l SGN(%a6),%d1
8034 or.l &0x3F800000,%d1 # SGN
8035 fmov.s %d1,%fp0 # SGN IN SGL FMT
8038 mov.b &FADD_OP,%d1 # last inst is ADD
8044 mov.b &FMOV_OP,%d1 # last inst is MOVE
8050 mov.l X(%a6),%d1
8051 and.l &0x80000000,%d1
8052 or.l &0x3F800000,%d1
8053 fmov.s %d1,%fp0
8054 and.l &0x80000000,%d1
8055 eor.l &0x80800000,%d1 # -SIGN(X)*EPS
8058 fadd.s %d1,%fp0
8327 mov.l (%a0),%d1
8328 mov.w 4(%a0),%d1
8334 cmp.l %d1,&0 # CHECK IF X IS NEGATIVE
8337 cmp.l %d1,&0x3ffef07d # IS X < 15/16?
8339 cmp.l %d1,&0x3fff8841 # IS X > 17/16?
8355 asr.l &8,%d1
8356 asr.l &8,%d1 # SHIFTED 16 BITS, BIASED EXPO. OF X
8357 sub.l &0x3FFF,%d1 # THIS IS K
8358 add.l ADJK(%a6),%d1 # ADJUST K, ORIGINAL INPUT MAY BE DENORM.
8360 fmov.l %d1,%fp1 # CONVERT K TO FLOATING-POINT FORMAT
8367 mov.l FFRAC(%a6),%d1 # READY TO GET ADDRESS OF 1/F
8368 and.l &0x7E000000,%d1
8369 asr.l &8,%d1
8370 asr.l &8,%d1
8371 asr.l &4,%d1 # SHIFTED 20, D0 IS THE DISPLACEMENT
8372 add.l %d1,%a0 # A0 IS THE ADDRESS FOR 1/F
8553 mov.b &FMOV_OP,%d1 # last inst is MOVE
8564 mov.l X(%a6),%d1
8565 cmp.l %d1,&0
8567 cmp.l %d1,&0x3ffe8000 # IS BOUNDS [1/2,3/2]?
8569 cmp.l %d1,&0x3fffc000
8577 cmp.l %d1,&0x3ffef07d
8579 cmp.l %d1,&0x3fff8841
8603 cmp.l %d1,&0x3FFF8000 # SEE IF 1+Z > 1
8611 mov.l FFRAC(%a6),%d1
8612 and.l &0x7E000000,%d1
8613 asr.l &8,%d1
8614 asr.l &8,%d1
8615 asr.l &4,%d1 # D0 CONTAINS DISPLACEMENT FOR 1/F
8620 add.l %d1,%a0
8629 mov.l FFRAC(%a6),%d1
8630 and.l &0x7E000000,%d1
8631 asr.l &8,%d1
8632 asr.l &8,%d1
8633 asr.l &4,%d1
8637 add.l %d1,%a0 # A0 IS ADDRESS OF 1/F
8643 cmp.l %d1,&0
8707 mov.l (%a0),%d1
8708 mov.w 4(%a0),%d1
8709 and.l &0x7FFFFFFF,%d1
8710 cmp.l %d1,&0x3FFF8000
8722 mov.l (%a0),%d1
8723 and.l &0x80000000,%d1
8724 or.l &0x3F000000,%d1 # SIGN(X)*HALF
8725 mov.l %d1,-(%sp)
8736 mov.b &FMUL_OP,%d1 # last inst is MUL
8775 # flag. Otherwise, save FPCR in D1; set FpCR to default. #
8789 # flag. Otherwise, save FPCR in D1; set FpCR to default. #
8802 # flag. Otherwise, save FPCR in D1; set FpCR to default. #
8816 # flag. Otherwise, save FPCR in D1; set FpCR to default. #
8849 mov.l (%a0),%d1
8861 mov.l (%a0),%d1
8873 mov.l (%a0),%d1
8876 mov.l 8(%a0),%d1
8879 mov.l 4(%a0),%d1
8880 and.l &0x7FFFFFFF,%d1
8884 mov.w (%a0),%d1
8885 and.l &0x00007FFF,%d1
8886 sub.l &0x3FFF,%d1
8889 fmov.l %d1,%fp0
8906 mov.l (%a0),%d1
9094 mov.l (%a0),%d1
9095 mov.w 4(%a0),%d1
9097 and.l &0x7FFFFFFF,%d1
9099 cmp.l %d1,&0x3FB98000 # |X| >= 2**(-70)?
9104 cmp.l %d1,&0x400D80C0 # |X| > 16480?
9117 mov.l INT(%a6),%d1
9118 mov.l %d1,%d2
9119 and.l &0x3F,%d1 # D0 IS J
9120 asl.l &4,%d1 # DISPLACEMENT FOR 2^(J/64)
9121 add.l %d1,%a1 # ADDRESS FOR 2^(J/64)
9123 mov.l %d2,%d1
9124 asr.l &1,%d1 # D0 IS M
9125 sub.l %d1,%d2 # d2 IS M', N = 64(M+M') + J
9146 add.w %d1,FACT1(%a6)
9148 add.w %d1,FACT2(%a6)
9154 cmp.l %d1,&0x3FFF8000
9166 mov.l X(%a6),%d1
9167 cmp.l %d1,&0
9181 mov.l (%a0),%d1
9182 or.l &0x00800001,%d1
9183 fadd.s %d1,%fp0
9191 mov.l (%a0),%d1
9192 mov.w 4(%a0),%d1
9194 and.l &0x7FFFFFFF,%d1
9196 cmp.l %d1,&0x3FB98000 # |X| >= 2**(-70)?
9201 cmp.l %d1,&0x400B9B07 # |X| <= 16480*log2/log10 ?
9214 mov.l INT(%a6),%d1
9215 mov.l %d1,%d2
9216 and.l &0x3F,%d1 # D0 IS J
9217 asl.l &4,%d1 # DISPLACEMENT FOR 2^(J/64)
9218 add.l %d1,%a1 # ADDRESS FOR 2^(J/64)
9220 mov.l %d2,%d1
9221 asr.l &1,%d1 # D0 IS M
9222 sub.l %d1,%d2 # d2 IS M', N = 64(M+M') + J
9250 add.w %d1,FACT1(%a6)
9251 add.w %d1,FACT2(%a6)
9295 mov.b &FMUL_OP,%d1 # last inst is MUL
9305 mov.l (%a0),%d1
9306 or.l &0x00800001,%d1
9307 fadd.s %d1,%fp0
9311 # smovcr(): returns the ROM constant at the offset specified in d1 #
9316 # d1 = ROM offset #
9325 mov.l %d1,-(%sp) # save rom offset for a sec
9328 mov.l %d0,%d1 # make a copy
9329 andi.w &0x3,%d1 # extract rnd mode
9332 mov.w %d1,%d0 # put rnd mode in lo
9334 mov.l (%sp)+,%d1 # get rom offset
9339 tst.b %d1 # if zero, offset is to pi
9341 cmpi.b %d1,&0x0a # check range $01 - $0a
9343 cmpi.b %d1,&0x0e # check range $0b - $0e
9345 cmpi.b %d1,&0x2f # check range $10 - $2f
9347 cmpi.b %d1,&0x3f # check range $30 - $3f
9386 subi.b &0xb,%d1 # make offset in 0-4 range
9392 cmpi.b %d1,&0x2 # is result log10(e)?
9428 subi.b &0x30,%d1 # make offset in 0-f range
9434 cmpi.b %d1,&0x1 # is offset <= $31?
9436 cmpi.b %d1,&0x7 # is $32 <= offset <= $37?
9453 mulu.w &0xc,%d1 # offset points into tables
9460 fmovm.x (%a0,%d1.w),&0x80 # return result in fp0
9470 mov.w 0x0(%a0,%d1.w),FP_SCR1_EX(%a6) # load first word
9471 mov.l 0x4(%a0,%d1.w),FP_SCR1_HI(%a6) # load second word
9472 mov.l 0x8(%a0,%d1.w),FP_SCR1_LO(%a6) # load third word
9473 mov.l %d0,%d1
9583 mov.w DST_EX(%a1),%d1 # get dst exponent
9585 andi.l &0x00007fff,%d1 # strip sign from dst exp
9633 mov.l &0x80000000,%d1 # load normalized mantissa
9638 lsr.l %d0,%d1 # no; bit stays in upper lw
9640 mov.l %d1,-(%sp) # insert new high mantissa
9645 lsr.l %d0,%d1 # make low mantissa longword
9646 mov.l %d1,-(%sp) # insert new low mantissa
9666 mov.b &FMUL_OP,%d1 # last inst is MUL
9691 mov.b &FMOV_OP,%d1 # last inst is MOVE
9695 mov.l (%sp)+,%d0 # load control bits into d1
9840 mov.w SignY(%a6),%d1
9841 eor.l %d0,%d1
9842 and.l &0x00008000,%d1
9843 mov.w %d1,SignQ(%a6) # sign(Q) obtained
9845 mov.l DST_HI(%a1),%d1
9846 mov.l DST_LO(%a1),%d2 # (D0,D1,D2) is |X|
9850 tst.l %d1
9854 mov.l %d2,%d1
9858 bfffo %d1{&0:&32},%d6
9859 lsl.l %d6,%d1
9860 sub.l %d6,%d0 # (D0,D1,D2) is normalized
9866 bfffo %d1{&0:&32},%d6
9868 lsl.l %d6,%d1
9874 or.l %d7,%d1 # (D0,D1,D2) normalized
9879 add.l &0x00003FFE,%d0 # (D0,D1,D2) normalized
9892 #..(Carry,D1,D2) is R
9908 #..At this point carry = 0, R = (D1,D2), Y = (D4,D5)
9909 cmp.l %d1,%d4 # compare hi(R) and hi(Y)
9922 #..If Carry is set, then Y < (Carry,D1,D2) < 2Y. Otherwise, Carry = 0
9923 #..and Y < (D1,D2) < 2Y. Either way, perform R - Y
9925 subx.l %d4,%d1 # hi(R) - hi(Y)
9936 roxl.l &1,%d1 # hi(R) = 2hi(R) + carry
9940 #..At this point, R=(Carry,D1,D2) = 2^(k-L)X - QY, j+k=L, j >= 0, R < 2Y.
9945 #..k = L, j = 0, Carry = 0, R = (D1,D2) = X - QY, R < Y.
9949 tst.l %d1
9953 mov.l %d2,%d1
9957 bfffo %d1{&0:&32},%d6
9958 lsl.l %d6,%d1
9959 sub.l %d6,%d0 # (D0,D1,D2) is normalized
9965 bfffo %d1{&0:&32},%d6
9968 lsl.l %d6,%d1
9974 or.l %d7,%d1 # (D0,D1,D2) normalized
9982 mov.l %d1,R_Hi(%a6)
9992 mov.l %d1,R_Hi(%a6)
10016 cmp.l %d1,%d4
10060 mov.b &FMUL_OP,%d1 # last inst is MUL
10071 mov.b &FMOV_OP,%d1 # last inst is MOVE
10179 mov.l %d0,%d1 # make copy of rnd prec,mode
10180 andi.b &0xc0,%d1 # extended precision?
10194 smi.b %d1 # set d0 accordingly
10241 smi.b %d1 # set d1 accordingly
10253 sf.b %d1 # set d0 to represent positive
10278 mov.b %d0,%d1 # fetch rnd mode/prec
10279 andi.b &0xc0,%d1 # extract rnd prec
10292 movm.l &0xc080,-(%sp) # save d0-d1/a0
10294 movm.l (%sp)+,&0x0103 # restore d0-d1/a0
10297 cmpi.b %d1,&0x40 # is prec dbl?
10306 mov.l LOCAL_LO(%a0),%d1 # are any of lo 11 bits of
10307 andi.l &0x7ff,%d1 # dbl mantissa set?
10319 smi.b %d1 # set d1 accordingly
10332 sf.b %d1 # clear sign flag for positive
10411 # d1.b : sign bit of result ('11111111 = (-) ; '00000000 = (+)) #
10418 andi.w &0x10,%d1 # keep sign bit in 4th spot
10422 or.b %d1,%d0 # concat {sgn,mode,prec}
10424 mov.l %d0,%d1 # make a copy
10425 lsl.b &0x1,%d1 # mult index 2 by 2
10428 lea (tbl_unf_result.b,%pc,%d1.w*8),%a0 # grab result ptr
10745 mov.b DTAG(%a6),%d1
10747 cmpi.b %d1,&ZERO
10749 cmpi.b %d1,&INF
10751 cmpi.b %d1,&DENORM
10753 cmpi.b %d1,&SNAN
10759 mov.b DTAG(%a6),%d1
10761 cmpi.b %d1,&ZERO
10763 cmpi.b %d1,&INF
10765 cmpi.b %d1,&DENORM
10767 cmpi.b %d1,&QNAN
10773 mov.b DTAG(%a6),%d1
10775 cmpi.b %d1,&ZERO
10777 cmpi.b %d1,&INF
10779 cmpi.b %d1,&DENORM
10781 cmpi.b %d1,&QNAN
10787 mov.b SRC_EX(%a0),%d1 # get src sign
10789 eor.b %d0,%d1 # get qbyte sign
10790 andi.b &0x80,%d1
10791 mov.b %d1,FPSR_QBYTE(%a6)
10800 mov.b SRC_EX(%a0),%d1 # get src sign
10802 eor.b %d0,%d1 # get qbyte sign
10803 andi.b &0x80,%d1
10804 mov.b %d1,FPSR_QBYTE(%a6)
10826 mov.b DTAG(%a6),%d1
10828 cmpi.b %d1,&ZERO
10830 cmpi.b %d1,&INF
10832 cmpi.b %d1,&DENORM
10834 cmpi.b %d1,&QNAN
10840 mov.b DTAG(%a6),%d1
10842 cmpi.b %d1,&ZERO
10844 cmpi.b %d1,&INF
10846 cmpi.b %d1,&DENORM
10848 cmpi.b %d1,&QNAN
10854 mov.b DTAG(%a6),%d1
10856 cmpi.b %d1,&ZERO
10858 cmpi.b %d1,&INF
10860 cmpi.b %d1,&DENORM
10862 cmpi.b %d1,&QNAN
10871 mov.b DTAG(%a6),%d1
10873 cmpi.b %d1,&ZERO
10875 cmpi.b %d1,&INF
10877 cmpi.b %d1,&DENORM
10879 cmpi.b %d1,&QNAN
10885 mov.b DTAG(%a6),%d1
10887 cmpi.b %d1,&ZERO
10889 cmpi.b %d1,&INF
10891 cmpi.b %d1,&DENORM
10893 cmpi.b %d1,&QNAN
10899 mov.b DTAG(%a6),%d1
10901 cmpi.b %d1,&QNAN
10903 cmpi.b %d1,&SNAN
10914 mov.b DTAG(%a6),%d1
10915 cmpi.b %d1,&QNAN
10917 cmpi.b %d1,&SNAN
10926 mov.b DTAG(%a6),%d1
10927 cmpi.b %d1,&QNAN
10929 cmpi.b %d1,&SNAN
10998 mov.b STAG(%a6),%d1
11000 cmpi.b %d1,&ZERO
11002 cmpi.b %d1,&INF
11004 cmpi.b %d1,&DENORM
11006 cmpi.b %d1,&QNAN
11012 mov.b STAG(%a6),%d1
11014 cmpi.b %d1,&ZERO
11016 cmpi.b %d1,&INF
11018 cmpi.b %d1,&DENORM
11020 cmpi.b %d1,&QNAN
11026 mov.b STAG(%a6),%d1
11028 cmpi.b %d1,&ZERO
11030 cmpi.b %d1,&INF
11032 cmpi.b %d1,&DENORM
11034 cmpi.b %d1,&QNAN
11040 mov.b STAG(%a6),%d1
11042 cmpi.b %d1,&ZERO
11044 cmpi.b %d1,&INF
11046 cmpi.b %d1,&DENORM
11048 cmpi.b %d1,&QNAN
11054 mov.b STAG(%a6),%d1
11056 cmpi.b %d1,&ZERO
11058 cmpi.b %d1,&INF
11060 cmpi.b %d1,&DENORM
11062 cmpi.b %d1,&QNAN
11068 mov.b STAG(%a6),%d1
11070 cmpi.b %d1,&ZERO
11072 cmpi.b %d1,&INF
11074 cmpi.b %d1,&DENORM
11076 cmpi.b %d1,&QNAN
11082 mov.b STAG(%a6),%d1
11084 cmpi.b %d1,&ZERO
11086 cmpi.b %d1,&INF
11088 cmpi.b %d1,&DENORM
11090 cmpi.b %d1,&QNAN
11096 mov.b STAG(%a6),%d1
11098 cmpi.b %d1,&ZERO
11100 cmpi.b %d1,&INF
11102 cmpi.b %d1,&DENORM
11104 cmpi.b %d1,&QNAN
11110 mov.b STAG(%a6),%d1
11112 cmpi.b %d1,&ZERO
11114 cmpi.b %d1,&INF
11116 cmpi.b %d1,&DENORM
11118 cmpi.b %d1,&QNAN
11124 mov.b STAG(%a6),%d1
11126 cmpi.b %d1,&ZERO
11128 cmpi.b %d1,&INF
11130 cmpi.b %d1,&DENORM
11132 cmpi.b %d1,&QNAN
11138 mov.b STAG(%a6),%d1
11140 cmpi.b %d1,&ZERO
11142 cmpi.b %d1,&INF
11144 cmpi.b %d1,&DENORM
11146 cmpi.b %d1,&QNAN
11152 mov.b STAG(%a6),%d1
11154 cmpi.b %d1,&ZERO
11156 cmpi.b %d1,&INF
11158 cmpi.b %d1,&DENORM
11160 cmpi.b %d1,&QNAN
11166 mov.b STAG(%a6),%d1
11168 cmpi.b %d1,&ZERO
11170 cmpi.b %d1,&INF
11172 cmpi.b %d1,&DENORM
11174 cmpi.b %d1,&QNAN
11180 mov.b STAG(%a6),%d1
11182 cmpi.b %d1,&ZERO
11184 cmpi.b %d1,&INF
11186 cmpi.b %d1,&DENORM
11188 cmpi.b %d1,&QNAN
11194 mov.b STAG(%a6),%d1
11196 cmpi.b %d1,&ZERO
11198 cmpi.b %d1,&INF
11200 cmpi.b %d1,&DENORM
11202 cmpi.b %d1,&QNAN
11208 mov.b STAG(%a6),%d1
11210 cmpi.b %d1,&ZERO
11212 cmpi.b %d1,&INF
11214 cmpi.b %d1,&DENORM
11216 cmpi.b %d1,&QNAN
11222 mov.b STAG(%a6),%d1
11224 cmpi.b %d1,&ZERO
11226 cmpi.b %d1,&INF
11228 cmpi.b %d1,&DENORM
11230 cmpi.b %d1,&QNAN
11236 mov.b STAG(%a6),%d1
11238 cmpi.b %d1,&ZERO
11240 cmpi.b %d1,&INF
11242 cmpi.b %d1,&DENORM
11244 cmpi.b %d1,&QNAN
11250 mov.b STAG(%a6),%d1
11252 cmpi.b %d1,&ZERO
11254 cmpi.b %d1,&INF
11256 cmpi.b %d1,&DENORM
11258 cmpi.b %d1,&QNAN
11264 mov.b STAG(%a6),%d1
11266 cmpi.b %d1,&ZERO
11268 cmpi.b %d1,&INF
11270 cmpi.b %d1,&DENORM
11272 cmpi.b %d1,&QNAN
11278 mov.b STAG(%a6),%d1
11280 cmpi.b %d1,&ZERO
11282 cmpi.b %d1,&INF
11284 cmpi.b %d1,&DENORM
11286 cmpi.b %d1,&QNAN
11292 mov.b STAG(%a6),%d1
11294 cmpi.b %d1,&ZERO
11296 cmpi.b %d1,&INF
11298 cmpi.b %d1,&DENORM
11300 cmpi.b %d1,&QNAN
11306 mov.b STAG(%a6),%d1
11308 cmpi.b %d1,&ZERO
11310 cmpi.b %d1,&INF
11312 cmpi.b %d1,&DENORM
11314 cmpi.b %d1,&QNAN
11320 mov.b STAG(%a6),%d1
11322 cmpi.b %d1,&ZERO
11324 cmpi.b %d1,&INF
11326 cmpi.b %d1,&DENORM
11328 cmpi.b %d1,&QNAN
11383 cmpi.b %d1,&FMOV_OP
11385 cmpi.b %d1,&FADD_OP
11581 clr.w %d1
11582 mov.b DTAG(%a6),%d1
11583 lsl.b &0x3,%d1
11584 or.b STAG(%a6),%d1 # combine src tags
11603 mov.w 2+L_SCR3(%a6),%d1 # fetch precision
11604 lsr.b &0x6,%d1 # shift to lo bits
11606 cmp.l %d0,(tbl_fmul_ovfl.w,%pc,%d1.w*4) # would result ovfl?
11610 cmp.l %d0,(tbl_fmul_unfl.w,%pc,%d1.w*4) # would result unfl?
11630 fmov.l %fpsr,%d1 # save status
11633 or.l %d1,USER_FPSR(%a6) # save INEX2,N
11638 mov.w FP_SCR0_EX(%a6),%d1 # load {sgn,exp}
11639 mov.l %d1,%d2 # make a copy
11640 andi.l &0x7fff,%d1 # strip sign
11642 sub.l %d0,%d1 # add scale factor
11643 or.w %d2,%d1 # concat old sign,new exp
11644 mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
11670 fmov.l %fpsr,%d1 # save status
11673 or.l %d1,USER_FPSR(%a6) # save INEX2,N
11679 mov.b FPCR_ENABLE(%a6),%d1
11680 andi.b &0x13,%d1 # is OVFL or INEX enabled?
11686 sne %d1 # set sign param accordingly
11700 mov.l L_SCR3(%a6),%d1
11701 andi.b &0xc0,%d1 # test the rnd prec
11708 mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
11709 mov.w %d1,%d2 # make a copy
11710 andi.l &0x7fff,%d1 # strip sign
11711 sub.l %d0,%d1 # add scale factor
11712 subi.l &0x6000,%d1 # subtract bias
11713 andi.w &0x7fff,%d1 # clear sign bit
11715 or.w %d2,%d1 # concat old sign,new exp
11716 mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
11724 mov.l L_SCR3(%a6),%d1
11725 andi.b &0x30,%d1 # keep rnd mode only
11726 fmov.l %d1,%fpcr # set FPCR
11748 fmov.l %fpsr,%d1 # save status
11751 or.l %d1,USER_FPSR(%a6) # save INEX2,N
11786 fmov.l %fpsr,%d1 # save status
11789 or.l %d1,USER_FPSR(%a6) # save INEX2,N
11791 mov.b FPCR_ENABLE(%a6),%d1
11792 andi.b &0x0b,%d1 # is UNFL or INEX enabled?
11799 mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode
11811 mov.l L_SCR3(%a6),%d1
11812 andi.b &0xc0,%d1 # is precision extended?
11828 mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
11829 mov.l %d1,%d2 # make a copy
11830 andi.l &0x7fff,%d1 # strip sign
11832 sub.l %d0,%d1 # add scale factor
11833 addi.l &0x6000,%d1 # add bias
11834 andi.w &0x7fff,%d1
11835 or.w %d2,%d1 # concat old sign,new exp
11836 mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
11842 mov.l L_SCR3(%a6),%d1
11843 andi.b &0x30,%d1 # use only rnd mode
11844 fmov.l %d1,%fpcr # set FPCR
11859 fmov.l %fpsr,%d1 # save status
11862 or.l %d1,USER_FPSR(%a6) # save INEX2,N
11878 mov.l L_SCR3(%a6),%d1
11879 andi.b &0xc0,%d1 # keep rnd prec
11880 ori.b &rz_mode*0x10,%d1 # insert RZ
11882 fmov.l %d1,%fpcr # set FPCR
11899 mov.w (tbl_fmul_op.b,%pc,%d1.w*2),%d1
11900 jmp (tbl_fmul_op.b,%pc,%d1.w)
11971 mov.b DST_EX(%a1),%d1
11972 eor.b %d0,%d1
11995 mov.b DST_EX(%a1),%d1
11996 eor.b %d0,%d1
12012 mov.b DST_EX(%a1),%d1
12013 eor.b %d0,%d1
12066 mov.b STAG(%a6),%d1 # fetch src optype tag
12119 mov.w FP_SCR0_EX(%a6),%d1 # fetch old sign,exp
12120 andi.w &0x8000,%d1 # keep old sign
12122 or.w %d1,%d0 # concat new exo,old sign
12158 fmov.l %fpsr,%d1 # save FPSR
12161 or.l %d1,USER_FPSR(%a6) # save INEX2,N
12166 mov.w FP_SCR0_EX(%a6),%d1 # load {sgn,exp}
12167 mov.w %d1,%d2 # make a copy
12168 andi.l &0x7fff,%d1 # strip sign
12169 sub.l %d0,%d1 # add scale factor
12171 or.w %d1,%d2 # concat old sign,new exponent
12205 mov.b FPCR_ENABLE(%a6),%d1
12206 andi.b &0x0b,%d1 # is UNFL or INEX enabled?
12211 mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode
12224 mov.w FP_SCR0_EX(%a6),%d1 # load current exponent
12227 mov.w %d1,%d2 # make a copy
12228 andi.l &0x7fff,%d1 # strip sign
12229 sub.l %d0,%d1 # subtract scale factor
12231 addi.l &0x6000,%d1 # add new bias
12232 andi.w &0x7fff,%d1
12233 or.w %d1,%d2 # concat old sign,new exp
12249 fmov.l %fpsr,%d1 # save FPSR
12251 or.l %d1,USER_FPSR(%a6) # save INEX2,N
12256 mov.b FPCR_ENABLE(%a6),%d1
12257 andi.b &0x13,%d1 # is OVFL or INEX enabled?
12266 sne %d1 # set sign param accordingly
12280 mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
12281 mov.l %d1,%d2 # make a copy
12282 andi.l &0x7fff,%d1 # strip sign
12284 sub.l %d0,%d1 # add scale factor
12285 sub.l &0x6000,%d1 # subtract bias
12286 andi.w &0x7fff,%d1
12287 or.w %d2,%d1
12288 mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
12302 fmov.l %fpsr,%d1 # save status
12305 or.l %d1,USER_FPSR(%a6) # save INEX2,N
12320 cmpi.b %d1,&DENORM # weed out DENORM
12322 cmpi.b %d1,&SNAN # weed out SNANs
12324 cmpi.b %d1,&QNAN # weed out QNANs
12399 clr.w %d1
12400 mov.b DTAG(%a6),%d1
12401 lsl.b &0x3,%d1
12402 or.b STAG(%a6),%d1 # combine src tags
12426 mov.w 2+L_SCR3(%a6),%d1 # fetch precision
12427 lsr.b &0x6,%d1 # shift to lo bits
12429 cmp.l %d0,(tbl_fdiv_ovfl.b,%pc,%d1.w*4) # will result overflow?
12432 cmp.l %d0,(tbl_fdiv_unfl.w,%pc,%d1.w*4) # will result underflow?
12444 fmov.l %fpsr,%d1 # save FPSR
12447 or.l %d1,USER_FPSR(%a6) # save INEX2,N
12452 mov.w FP_SCR0_EX(%a6),%d1 # load {sgn,exp}
12453 mov.l %d1,%d2 # make a copy
12454 andi.l &0x7fff,%d1 # strip sign
12456 sub.l %d0,%d1 # add scale factor
12457 or.w %d2,%d1 # concat old sign,new exp
12458 mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
12492 cmp.l %d0,(tbl_fdiv_ovfl2.b,%pc,%d1.w*4)
12499 mov.b FPCR_ENABLE(%a6),%d1
12500 andi.b &0x13,%d1 # is OVFL or INEX enabled?
12505 sne %d1 # set sign param accordingly
12513 mov.l L_SCR3(%a6),%d1
12514 andi.b &0xc0,%d1 # is precision extended?
12521 mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
12522 mov.w %d1,%d2 # make a copy
12523 andi.l &0x7fff,%d1 # strip sign
12524 sub.l %d0,%d1 # add scale factor
12525 subi.l &0x6000,%d1 # subtract bias
12526 andi.w &0x7fff,%d1 # clear sign bit
12528 or.w %d2,%d1 # concat old sign,new exp
12529 mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
12537 mov.l L_SCR3(%a6),%d1
12538 andi.b &0x30,%d1 # keep rnd mode
12539 fmov.l %d1,%fpcr # set FPCR
12556 fmov.l %fpsr,%d1 # save status
12559 or.l %d1,USER_FPSR(%a6) # save INEX2,N
12561 mov.b FPCR_ENABLE(%a6),%d1
12562 andi.b &0x0b,%d1 # is UNFL or INEX enabled?
12569 mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode
12581 mov.l L_SCR3(%a6),%d1
12582 andi.b &0xc0,%d1 # is precision extended?
12596 mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
12597 mov.l %d1,%d2 # make a copy
12598 andi.l &0x7fff,%d1 # strip sign
12600 sub.l %d0,%d1 # add scale factoer
12601 addi.l &0x6000,%d1 # add bias
12602 andi.w &0x7fff,%d1
12603 or.w %d2,%d1 # concat old sign,new exp
12604 mov.w %d1,FP_SCR0_EX(%a6) # insert new exp
12610 mov.l L_SCR3(%a6),%d1
12611 andi.b &0x30,%d1 # use only rnd mode
12612 fmov.l %d1,%fpcr # set FPCR
12627 fmov.l %fpsr,%d1 # save status
12630 or.l %d1,USER_FPSR(%a6) # save INEX2,N
12646 mov.l L_SCR3(%a6),%d1
12647 andi.b &0xc0,%d1 # keep rnd prec
12648 ori.b &rz_mode*0x10,%d1 # insert RZ
12650 fmov.l %d1,%fpcr # set FPCR
12667 mov.w (tbl_fdiv_op.b,%pc,%d1.w*2),%d1
12668 jmp (tbl_fdiv_op.b,%pc,%d1.w*1)
12736 mov.b DST_EX(%a1),%d1 # or of input signs.
12737 eor.b %d0,%d1
12756 mov.b DST_EX(%a1),%d1
12757 eor.b %d0,%d1
12776 mov.b SRC_EX(%a0),%d1
12777 eor.b %d0,%d1
12839 mov.b STAG(%a6),%d1
12899 mov.w FP_SCR0_EX(%a6),%d1 # fetch old sign,exp
12900 andi.w &0x8000,%d1 # keep old sign
12902 or.w %d1,%d0 # concat old sign, new exponent
12938 fmov.l %fpsr,%d1 # save FPSR
12941 or.l %d1,USER_FPSR(%a6) # save INEX2,N
12946 mov.w FP_SCR0_EX(%a6),%d1 # load sgn,exp
12947 mov.w %d1,%d2 # make a copy
12948 andi.l &0x7fff,%d1 # strip sign
12949 sub.l %d0,%d1 # add scale factor
12951 or.w %d1,%d2 # concat old sign,new exp
12985 mov.b FPCR_ENABLE(%a6),%d1
12986 andi.b &0x0b,%d1 # is UNFL or INEX enabled?
12991 mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode
13004 mov.w FP_SCR0_EX(%a6),%d1 # load current exponent
13007 mov.l %d1,%d2 # make a copy
13008 andi.l &0x7fff,%d1 # strip sign
13010 sub.l %d0,%d1 # subtract scale factor
13011 addi.l &0x6000,%d1 # add new bias
13012 andi.w &0x7fff,%d1
13013 or.w %d2,%d1 # concat new sign,new exp
13014 mov.w %d1,FP_SCR1_EX(%a6) # insert new exp
13029 fmov.l %fpsr,%d1 # save FPSR
13031 or.l %d1,USER_FPSR(%a6) # save INEX2,N
13036 mov.b FPCR_ENABLE(%a6),%d1
13037 andi.b &0x13,%d1 # is OVFL or INEX enabled?
13046 sne %d1 # set sign param accordingly
13060 mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
13061 mov.l %d1,%d2 # make a copy
13062 andi.l &0x7fff,%d1 # strip sign
13064 sub.l %d0,%d1 # add scale factor
13065 subi.l &0x6000,%d1 # subtract bias
13066 andi.w &0x7fff,%d1
13067 or.w %d2,%d1 # concat sign,exp
13068 mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
13082 fmov.l %fpsr,%d1 # save status
13085 or.l %d1,USER_FPSR(%a6) # save INEX2,N
13100 cmpi.b %d1,&DENORM # weed out DENORM
13102 cmpi.b %d1,&SNAN # weed out SNAN
13104 cmpi.b %d1,&QNAN # weed out QNAN
13139 mov.b STAG(%a6),%d1
13157 cmpi.b %d1,&ZERO # weed out ZERO
13159 cmpi.b %d1,&INF # weed out INF
13161 cmpi.b %d1,&SNAN # weed out SNAN
13163 cmpi.b %d1,&QNAN # weed out QNAN
13231 mov.b STAG(%a6),%d1
13255 cmpi.b %d1,&ZERO # weed out ZERO
13257 cmpi.b %d1,&INF # weed out INF
13259 cmpi.b %d1,&DENORM # weed out DENORM
13261 cmpi.b %d1,&SNAN # weed out SNAN
13337 mov.b STAG(%a6),%d1
13357 cmpi.b %d1,&ZERO # weed out ZERO
13359 cmpi.b %d1,&INF # weed out INF
13361 cmpi.b %d1,&DENORM # weed out DENORM
13363 cmpi.b %d1,&SNAN # weed out SNAN
13462 mov.b STAG(%a6),%d1
13479 mov.w SRC_EX(%a0),%d1
13480 bclr &15,%d1 # force absolute value
13481 mov.w %d1,FP_SCR0_EX(%a6) # insert exponent
13517 mov.w FP_SCR0_EX(%a6),%d1 # fetch old sign,exp
13518 andi.w &0x8000,%d1 # keep old sign
13520 or.w %d1,%d0 # concat old sign, new exponent
13556 fmov.l %fpsr,%d1 # save FPSR
13559 or.l %d1,USER_FPSR(%a6) # save INEX2,N
13564 mov.w FP_SCR0_EX(%a6),%d1 # load sgn,exp
13565 mov.l %d1,%d2 # make a copy
13566 andi.l &0x7fff,%d1 # strip sign
13567 sub.l %d0,%d1 # add scale factor
13569 or.w %d1,%d2 # concat old sign,new exp
13600 mov.b FPCR_ENABLE(%a6),%d1
13601 andi.b &0x0b,%d1 # is UNFL or INEX enabled?
13606 mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode
13619 mov.w FP_SCR0_EX(%a6),%d1 # load current exponent
13622 mov.l %d1,%d2 # make a copy
13623 andi.l &0x7fff,%d1 # strip sign
13625 sub.l %d0,%d1 # subtract scale factor
13626 addi.l &0x6000,%d1 # add new bias
13627 andi.w &0x7fff,%d1
13628 or.w %d2,%d1 # concat new sign,new exp
13629 mov.w %d1,FP_SCR1_EX(%a6) # insert new exp
13644 fmov.l %fpsr,%d1 # save FPSR
13646 or.l %d1,USER_FPSR(%a6) # save INEX2,N
13651 mov.b FPCR_ENABLE(%a6),%d1
13652 andi.b &0x13,%d1 # is OVFL or INEX enabled?
13661 sne %d1 # set sign param accordingly
13675 mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
13676 mov.l %d1,%d2 # make a copy
13677 andi.l &0x7fff,%d1 # strip sign
13679 sub.l %d0,%d1 # add scale factor
13680 subi.l &0x6000,%d1 # subtract bias
13681 andi.w &0x7fff,%d1
13682 or.w %d2,%d1 # concat sign,exp
13683 mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
13697 fmov.l %fpsr,%d1 # save status
13700 or.l %d1,USER_FPSR(%a6) # save INEX2,N
13715 cmpi.b %d1,&DENORM # weed out DENORM
13717 cmpi.b %d1,&SNAN # weed out SNAN
13719 cmpi.b %d1,&QNAN # weed out QNAN
13724 cmpi.b %d1,&INF # weed out INF
13758 clr.w %d1
13759 mov.b DTAG(%a6),%d1
13760 lsl.b &0x3,%d1
13761 or.b STAG(%a6),%d1
13782 mov.w (tbl_fcmp_op.b,%pc,%d1.w*2),%d1
13783 jmp (tbl_fcmp_op.b,%pc,%d1.w*1)
13899 mov.b DST_EX(%a1),%d1
13900 eor.b %d0,%d1
13913 mov.b DST_EX(%a1),%d1
13914 eor.b %d0,%d1
13962 clr.w %d1
13963 mov.b DTAG(%a6),%d1
13964 lsl.b &0x3,%d1
13965 or.b STAG(%a6),%d1
14001 fmov.l %fpsr,%d1 # save status
14004 or.l %d1,USER_FPSR(%a6) # save INEX2,N
14009 mov.w FP_SCR0_EX(%a6),%d1 # load {sgn,exp}
14010 mov.l %d1,%d2 # make a copy
14011 andi.l &0x7fff,%d1 # strip sign
14013 sub.l %d0,%d1 # add scale factor
14014 or.w %d2,%d1 # concat old sign,new exp
14015 mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
14028 fmov.l %fpsr,%d1 # save status
14031 or.l %d1,USER_FPSR(%a6) # save INEX2,N
14038 mov.b FPCR_ENABLE(%a6),%d1
14039 andi.b &0x13,%d1 # is OVFL or INEX enabled?
14044 sne %d1 # set sign param accordingly
14056 mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
14057 mov.l %d1,%d2 # make a copy
14058 andi.l &0x7fff,%d1 # strip sign
14059 sub.l %d0,%d1 # add scale factor
14060 subi.l &0x6000,%d1 # subtract bias
14061 andi.w &0x7fff,%d1
14063 or.w %d2,%d1 # concat old sign,new exp
14064 mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
14077 fmov.l %fpsr,%d1 # save status
14080 or.l %d1,USER_FPSR(%a6) # save INEX2,N
14099 fmov.l %fpsr,%d1 # save status
14102 or.l %d1,USER_FPSR(%a6) # save INEX2,N
14104 mov.b FPCR_ENABLE(%a6),%d1
14105 andi.b &0x0b,%d1 # is UNFL or INEX enabled?
14112 mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode
14133 mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
14134 mov.l %d1,%d2 # make a copy
14135 andi.l &0x7fff,%d1 # strip sign
14137 sub.l %d0,%d1 # add scale factor
14138 addi.l &0x6000,%d1 # add bias
14139 andi.w &0x7fff,%d1
14140 or.w %d2,%d1 # concat old sign,new exp
14141 mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
14154 fmov.l %fpsr,%d1 # save status
14157 or.l %d1,USER_FPSR(%a6) # save INEX2,N
14173 mov.l L_SCR3(%a6),%d1
14174 andi.b &0xc0,%d1 # keep rnd prec
14175 ori.b &rz_mode*0x10,%d1 # insert RZ
14177 fmov.l %d1,%fpcr # set FPCR
14194 mov.w (tbl_fsglmul_op.b,%pc,%d1.w*2),%d1
14195 jmp (tbl_fsglmul_op.b,%pc,%d1.w*1)
14303 clr.w %d1
14304 mov.b DTAG(%a6),%d1
14305 lsl.b &0x3,%d1
14306 or.b STAG(%a6),%d1 # combine src tags
14330 mov.w 2+L_SCR3(%a6),%d1 # fetch precision,mode
14331 lsr.b &0x6,%d1
14348 fmov.l %fpsr,%d1 # save FPSR
14351 or.l %d1,USER_FPSR(%a6) # save INEX2,N
14356 mov.w FP_SCR0_EX(%a6),%d1 # load {sgn,exp}
14357 mov.l %d1,%d2 # make a copy
14358 andi.l &0x7fff,%d1 # strip sign
14360 sub.l %d0,%d1 # add scale factor
14361 or.w %d2,%d1 # concat old sign,new exp
14362 mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
14375 fmov.l %fpsr,%d1
14378 or.l %d1,USER_FPSR(%a6) # save INEX,N
14381 mov.w (%sp),%d1 # fetch new exponent
14383 andi.l &0x7fff,%d1 # strip sign
14384 sub.l %d0,%d1 # add scale factor
14385 cmp.l %d1,&0x7fff # did divide overflow?
14391 mov.b FPCR_ENABLE(%a6),%d1
14392 andi.b &0x13,%d1 # is OVFL or INEX enabled?
14397 sne %d1 # set sign param accordingly
14409 mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
14410 mov.l %d1,%d2 # make a copy
14411 andi.l &0x7fff,%d1 # strip sign
14413 sub.l %d0,%d1 # add scale factor
14414 subi.l &0x6000,%d1 # subtract new bias
14415 andi.w &0x7fff,%d1 # clear ms bit
14416 or.w %d2,%d1 # concat old sign,new exp
14417 mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
14432 fmov.l %fpsr,%d1 # save status
14435 or.l %d1,USER_FPSR(%a6) # save INEX2,N
14437 mov.b FPCR_ENABLE(%a6),%d1
14438 andi.b &0x0b,%d1 # is UNFL or INEX enabled?
14445 mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode
14466 mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
14467 mov.l %d1,%d2 # make a copy
14468 andi.l &0x7fff,%d1 # strip sign
14470 sub.l %d0,%d1 # add scale factor
14471 addi.l &0x6000,%d1 # add bias
14472 andi.w &0x7fff,%d1 # clear top bit
14473 or.w %d2,%d1 # concat old sign, new exp
14474 mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
14490 fmov.l %fpsr,%d1 # save status
14493 or.l %d1,USER_FPSR(%a6) # save INEX2,N
14509 clr.l %d1 # clear scratch register
14510 ori.b &rz_mode*0x10,%d1 # force RZ rnd mode
14512 fmov.l %d1,%fpcr # set FPCR
14529 mov.w (tbl_fsgldiv_op.b,%pc,%d1.w*2),%d1
14530 jmp (tbl_fsgldiv_op.b,%pc,%d1.w*1)
14650 clr.w %d1
14651 mov.b DTAG(%a6),%d1
14652 lsl.b &0x3,%d1
14653 or.b STAG(%a6),%d1 # combine src tags
14672 fmov.l %fpsr,%d1 # fetch INEX2,N,Z
14674 or.l %d1,USER_FPSR(%a6) # save exc and ccode bits
14682 mov.w 2+L_SCR3(%a6),%d1
14683 lsr.b &0x6,%d1
14689 cmp.l %d2,(tbl_fadd_ovfl.b,%pc,%d1.w*4) # is it an overflow?
14692 cmp.l %d2,(tbl_fadd_unfl.b,%pc,%d1.w*4) # is it an underflow?
14697 mov.w (%sp),%d1
14698 andi.w &0x8000,%d1 # keep sign
14699 or.w %d2,%d1 # concat sign,new exp
14700 mov.w %d1,(%sp) # insert new exponent
14724 mov.b FPCR_ENABLE(%a6),%d1
14725 andi.b &0x13,%d1 # is OVFL or INEX enabled?
14731 sne %d1 # set sign param accordingly
14740 mov.b L_SCR3(%a6),%d1
14741 andi.b &0xc0,%d1 # is precision extended?
14745 mov.w (%sp),%d1
14746 andi.w &0x8000,%d1 # keep sign
14749 or.w %d2,%d1 # concat sign,new exp
14750 mov.w %d1,(%sp) # insert new exponent
14758 mov.l L_SCR3(%a6),%d1
14759 andi.b &0x30,%d1 # keep rnd mode
14760 fmov.l %d1,%fpcr # set FPCR
14783 fmov.l %fpsr,%d1 # save status
14785 or.l %d1,USER_FPSR(%a6) # save INEX,N
14787 mov.b FPCR_ENABLE(%a6),%d1
14788 andi.b &0x0b,%d1 # is UNFL or INEX enabled?
14795 mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode
14805 mov.l L_SCR3(%a6),%d1
14806 andi.b &0xc0,%d1 # is precision extended?
14819 mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
14820 mov.l %d1,%d2 # make a copy
14821 andi.l &0x7fff,%d1 # strip sign
14823 sub.l %d0,%d1 # add scale factor
14824 addi.l &0x6000,%d1 # add new bias
14825 andi.w &0x7fff,%d1 # clear top bit
14826 or.w %d2,%d1 # concat sign,new exp
14827 mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
14832 mov.l L_SCR3(%a6),%d1
14833 andi.b &0x30,%d1 # use only rnd mode
14834 fmov.l %d1,%fpcr # set FPCR
14844 mov.l L_SCR3(%a6),%d1
14845 andi.b &0xc0,%d1
14848 mov.l 0x4(%sp),%d1 # extract hi(man)
14849 cmpi.l %d1,&0x80000000 # is hi(man) = 0x80000000?
14870 mov.l L_SCR3(%a6),%d1
14871 andi.b &0xc0,%d1 # keep rnd prec
14872 ori.b &rz_mode*0x10,%d1 # insert rnd mode
14873 fmov.l %d1,%fpcr # set FPCR
14893 mov.w (tbl_fadd_op.b,%pc,%d1.w*2),%d1
14894 jmp (tbl_fadd_op.b,%pc,%d1.w*1)
14962 mov.b DST_EX(%a1),%d1
14963 eor.b %d0,%d1
14980 mov.b 3+L_SCR3(%a6),%d1
14981 andi.b &0x30,%d1 # extract rnd mode
14982 cmpi.b %d1,&rm_mode*0x10 # is rnd mode == RM?
15023 mov.b DST_EX(%a1),%d1
15024 eor.b %d1,%d0
15103 clr.w %d1
15104 mov.b DTAG(%a6),%d1
15105 lsl.b &0x3,%d1
15106 or.b STAG(%a6),%d1 # combine src tags
15125 fmov.l %fpsr,%d1 # fetch INEX2, N, Z
15127 or.l %d1,USER_FPSR(%a6) # save exc and ccode bits
15135 mov.w 2+L_SCR3(%a6),%d1
15136 lsr.b &0x6,%d1
15142 cmp.l %d2,(tbl_fsub_ovfl.b,%pc,%d1.w*4) # is it an overflow?
15145 cmp.l %d2,(tbl_fsub_unfl.b,%pc,%d1.w*4) # is it an underflow?
15150 mov.w (%sp),%d1
15151 andi.w &0x8000,%d1 # keep sign
15152 or.w %d2,%d1 # insert new exponent
15153 mov.w %d1,(%sp) # insert new exponent
15177 mov.b FPCR_ENABLE(%a6),%d1
15178 andi.b &0x13,%d1 # is OVFL or INEX enabled?
15184 sne %d1 # set sign param accordingly
15193 mov.b L_SCR3(%a6),%d1
15194 andi.b &0xc0,%d1 # is precision extended?
15198 mov.w (%sp),%d1 # fetch {sgn,exp}
15199 andi.w &0x8000,%d1 # keep sign
15202 or.w %d2,%d1 # concat sign,exp
15203 mov.w %d1,(%sp) # insert new exponent
15211 mov.l L_SCR3(%a6),%d1
15212 andi.b &0x30,%d1 # clear rnd prec
15213 fmov.l %d1,%fpcr # set FPCR
15236 fmov.l %fpsr,%d1 # save status
15238 or.l %d1,USER_FPSR(%a6)
15240 mov.b FPCR_ENABLE(%a6),%d1
15241 andi.b &0x0b,%d1 # is UNFL or INEX enabled?
15248 mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode
15258 mov.l L_SCR3(%a6),%d1
15259 andi.b &0xc0,%d1 # is precision extended?
15272 mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
15273 mov.l %d1,%d2 # make a copy
15274 andi.l &0x7fff,%d1 # strip sign
15276 sub.l %d0,%d1 # add scale factor
15277 addi.l &0x6000,%d1 # subtract new bias
15278 andi.w &0x7fff,%d1 # clear top bit
15279 or.w %d2,%d1 # concat sgn,exp
15280 mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
15285 mov.l L_SCR3(%a6),%d1
15286 andi.b &0x30,%d1 # clear rnd prec
15287 fmov.l %d1,%fpcr # set FPCR
15297 mov.l L_SCR3(%a6),%d1
15298 andi.b &0xc0,%d1 # fetch rnd prec
15301 mov.l 0x4(%sp),%d1
15302 cmpi.l %d1,&0x80000000 # is hi(man) = 0x80000000?
15323 mov.l L_SCR3(%a6),%d1
15324 andi.b &0xc0,%d1 # keep rnd prec
15325 ori.b &rz_mode*0x10,%d1 # insert rnd mode
15326 fmov.l %d1,%fpcr # set FPCR
15346 mov.w (tbl_fsub_op.b,%pc,%d1.w*2),%d1
15347 jmp (tbl_fsub_op.b,%pc,%d1.w*1)
15415 mov.b DST_EX(%a1),%d1
15416 eor.b %d1,%d0
15432 mov.b 3+L_SCR3(%a6),%d1
15433 andi.b &0x30,%d1 # extract rnd mode
15434 cmpi.b %d1,&rm_mode*0x10 # is rnd mode = RM?
15475 mov.b DST_EX(%a1),%d1
15476 eor.b %d1,%d0
15547 clr.w %d1
15548 mov.b STAG(%a6),%d1
15566 fmov.l %fpsr,%d1
15567 or.l %d1,USER_FPSR(%a6) # set N,INEX
15619 fmov.l %fpsr,%d1 # save FPSR
15622 or.l %d1,USER_FPSR(%a6) # save INEX2,N
15627 mov.w FP_SCR0_EX(%a6),%d1 # load sgn,exp
15628 mov.l %d1,%d2 # make a copy
15629 andi.l &0x7fff,%d1 # strip sign
15630 sub.l %d0,%d1 # add scale factor
15632 or.w %d1,%d2 # concat old sign,new exp
15674 fmov.l %fpsr,%d1 # save status
15677 or.l %d1,USER_FPSR(%a6) # save INEX2,N
15680 mov.b FPCR_ENABLE(%a6),%d1
15681 andi.b &0x0b,%d1 # is UNFL or INEX enabled?
15688 mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode
15701 mov.w FP_SCR0_EX(%a6),%d1 # load current exponent
15704 mov.l %d1,%d2 # make a copy
15705 andi.l &0x7fff,%d1 # strip sign
15707 sub.l %d0,%d1 # subtract scale factor
15708 addi.l &0x6000,%d1 # add new bias
15709 andi.w &0x7fff,%d1
15710 or.w %d2,%d1 # concat new sign,new exp
15711 mov.w %d1,FP_SCR1_EX(%a6) # insert new exp
15726 fmov.l %fpsr,%d1 # save FPSR
15728 or.l %d1,USER_FPSR(%a6) # save INEX2,N
15733 mov.b FPCR_ENABLE(%a6),%d1
15734 andi.b &0x13,%d1 # is OVFL or INEX enabled?
15743 sne %d1 # set sign param accordingly
15757 mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
15758 mov.l %d1,%d2 # make a copy
15759 andi.l &0x7fff,%d1 # strip sign
15761 sub.l %d0,%d1 # add scale factor
15762 subi.l &0x6000,%d1 # subtract bias
15763 andi.w &0x7fff,%d1
15764 or.w %d2,%d1 # concat sign,exp
15765 mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
15782 fmov.l %fpsr,%d1 # save status
15785 or.l %d1,USER_FPSR(%a6) # save INEX2,N
15800 cmpi.b %d1,&DENORM # weed out DENORM
15802 cmpi.b %d1,&ZERO # weed out ZERO
15804 cmpi.b %d1,&INF # weed out INF
15806 cmpi.b %d1,&SNAN # weed out SNAN
15873 mov.w DST_EX(%a1),%d1
15875 mov.w %d1,FP_SCR1_EX(%a6)
15878 andi.w &0x7fff,%d1
15880 mov.w %d1,2+L_SCR1(%a6) # store dst exponent
15882 cmp.w %d0, %d1 # is src exp >= dst exp?
15907 mov.w FP_SCR0_EX(%a6),%d1
15908 and.w &0x8000,%d1
15909 or.w %d1,%d0 # concat {sgn,new exp}
15943 mov.w FP_SCR1_EX(%a6),%d1
15944 andi.w &0x8000,%d1
15945 or.w %d1,%d0 # concat {sgn,new exp}
15985 mov.w FP_SCR0_EX(%a6),%d1 # extract operand's {sgn,exp}
15986 mov.w %d1,%d0 # make a copy
15988 andi.l &0x7fff,%d1 # extract operand's exponent
16000 sub.l %d1,%d0 # scale = BIAS + (-exp)
16008 mov.l %d0,%d1 # prepare for op_norm call
16042 mov.w FP_SCR0_EX(%a6),%d1 # extract operand's {sgn,exp}
16043 andi.l &0x7fff,%d1 # extract operand's exponent
16047 btst &0x0,%d1 # is exp even or odd?
16053 sub.l %d1,%d0 # scale = BIAS + (-exp)
16061 sub.l %d1,%d0 # scale = BIAS + (-exp)
16112 mov.w FP_SCR1_EX(%a6),%d1 # extract operand's {sgn,exp}
16113 mov.w %d1,%d0 # make a copy
16115 andi.l &0x7fff,%d1 # extract operand's exponent
16127 sub.l %d1,%d0 # scale = BIAS + (-exp)
16134 mov.l %d0,%d1 # prepare for op_norm call
16284 clr.l %d1 # clear scratch reg
16285 mov.b FPSR_CC(%a6),%d1 # fetch fp ccodes
16286 ror.l &0x8,%d1 # rotate to top byte
16287 fmov.l %d1,%fpsr # insert into FPSR
16289 mov.w (tbl_fdbcc.b,%pc,%d0.w*2),%d1 # load table
16290 jmp (tbl_fdbcc.b,%pc,%d1.w) # jump to fdbcc routine
16848 mov.b 1+EXC_OPWORD(%a6), %d1 # fetch lo opword
16849 andi.w &0x7, %d1 # extract count register
16912 clr.l %d1 # clear scratch reg
16913 mov.b FPSR_CC(%a6),%d1 # fetch fp ccodes
16914 ror.l &0x8,%d1 # rotate to top byte
16915 fmov.l %d1,%fpsr # insert into FPSR
16917 mov.w (tbl_ftrapcc.b,%pc,%d0.w*2), %d1 # load table
16918 jmp (tbl_ftrapcc.b,%pc,%d1.w) # jump to ftrapcc routine
17482 clr.l %d1 # clear scratch reg
17483 mov.b FPSR_CC(%a6),%d1 # fetch fp ccodes
17484 ror.l &0x8,%d1 # rotate to top byte
17485 fmov.l %d1,%fpsr # insert into FPSR
17487 mov.w (tbl_fscc.b,%pc,%d0.w*2),%d1 # load table
17488 jmp (tbl_fscc.b,%pc,%d1.w) # jump to fscc routine
18064 mov.b 1+EXC_OPWORD(%a6),%d1 # fetch lo opword
18065 mov.l %d1,%d0 # make a copy
18066 andi.b &0x38,%d1 # extract src mode
18070 mov.l %d0,%d1
18071 andi.w &0x7,%d1 # pass index in d1
18084 cmpi.b %d1,&0x18 # is <ea> (An)+ ?
18086 cmpi.b %d1,&0x20 # is <ea> -(An) ?
18093 tst.l %d1 # did dstore fail?
18106 tst.l %d1 # did dstore fail?
18109 mov.b 0x1+EXC_OPWORD(%a6),%d1 # fetch opword
18110 andi.w &0x7,%d1 # pass index in d1
18124 tst.l %d1 # did dstore fail?
18127 mov.b 0x1+EXC_OPWORD(%a6),%d1 # fetch opword
18128 andi.w &0x7,%d1 # pass index in d1
18166 # d1 = Dn #
18219 mov.b 1+EXC_EXTWORD(%a6),%d1 # fetch extword
18220 andi.w &0x70,%d1 # extract reg bits
18221 lsr.b &0x4,%d1 # shift into lo bits
18233 mov.l (%sp)+,%d1 # restore strg
18255 mov.b (tbl_fmovm_convert.w,%pc,%d1.w*1),%d1
18276 tst.b %d1 # should FP0 be moved?
18284 lsl.b &0x1,%d1 # should FP1 be moved?
18292 lsl.b &0x1,%d1 # should FP2 be moved?
18299 lsl.b &0x1,%d1 # should FP3 be moved?
18306 lsl.b &0x1,%d1 # should FP4 be moved?
18313 lsl.b &0x1,%d1 # should FP5 be moved?
18320 lsl.b &0x1,%d1 # should FP6 be moved?
18327 lsl.b &0x1,%d1 # should FP7 be moved?
18343 tst.l %d1 # did dstore err?
18357 mov.l %d1,-(%sp) # save bit string for later
18364 tst.l %d1 # did dfetch fail?
18367 mov.l (%sp)+,%d1 # load bit string
18371 tst.b %d1 # should FP0 be moved?
18379 lsl.b &0x1,%d1 # should FP1 be moved?
18387 lsl.b &0x1,%d1 # should FP2 be moved?
18393 lsl.b &0x1,%d1 # should FP3 be moved?
18399 lsl.b &0x1,%d1 # should FP4 be moved?
18405 lsl.b &0x1,%d1 # should FP5 be moved?
18411 lsl.b &0x1,%d1 # should FP6 be moved?
18417 lsl.b &0x1,%d1 # should FP7 be moved?
18529 mov.w %d0,%d1 # make a copy
18532 andi.l &0x7,%d1 # extract reg field
18652 mov.l %d0,%d1
18653 add.l %a0,%d1 # Increment
18654 mov.l %d1,EXC_DREGS+0x8(%a6) # Save incr value
18660 mov.l %d0,%d1
18661 add.l %a0,%d1 # Increment
18662 mov.l %d1,EXC_DREGS+0xc(%a6) # Save incr value
18668 mov.l %d0,%d1
18669 add.l %a0,%d1 # Increment
18670 mov.l %d1,%a2 # Save incr value
18676 mov.l %d0,%d1
18677 add.l %a0,%d1 # Increment
18678 mov.l %d1,%a3 # Save incr value
18684 mov.l %d0,%d1
18685 add.l %a0,%d1 # Increment
18686 mov.l %d1,%a4 # Save incr value
18692 mov.l %d0,%d1
18693 add.l %a0,%d1 # Increment
18694 mov.l %d1,%a5 # Save incr value
18700 mov.l %d0,%d1
18701 add.l %a0,%d1 # Increment
18702 mov.l %d1,(%a6) # Save incr value
18710 mov.l %d0,%d1
18711 add.l %a0,%d1 # Increment
18712 mov.l %d1,EXC_A7(%a6) # Save incr value
18785 tst.l %d1 # did ifetch fail?
18798 tst.l %d1 # did ifetch fail?
18811 tst.l %d1 # did ifetch fail?
18824 tst.l %d1 # did ifetch fail?
18837 tst.l %d1 # did ifetch fail?
18850 tst.l %d1 # did ifetch fail?
18863 tst.l %d1 # did ifetch fail?
18876 tst.l %d1 # did ifetch fail?
18891 addq.l &0x8,%d1
18899 tst.l %d1 # did ifetch fail?
18909 mov.l %d0,%d1
18910 rol.w &0x4,%d1
18911 andi.w &0xf,%d1 # extract index regno
18923 mov.l %d2,%d1
18924 rol.w &0x7,%d1
18925 andi.l &0x3,%d1 # extract scale value
18927 lsl.l %d1,%d0 # shift index by scale
18944 tst.l %d1 # did ifetch fail?
18958 tst.l %d1 # did ifetch fail?
18972 tst.l %d1 # did ifetch fail?
18994 tst.l %d1 # did ifetch fail?
19005 mov.l %d0,%d1 # make extword copy
19006 rol.w &0x4,%d1 # rotate reg num into place
19007 andi.w &0xf,%d1 # extract register number
19019 mov.l %d2,%d1
19020 rol.w &0x7,%d1 # rotate scale value into place
19021 andi.l &0x3,%d1 # extract scale value
19023 lsl.l %d1,%d0 # shift index by scale
19051 bfextu %d0{&16:&4},%d1 # fetch dreg index
19087 tst.l %d1 # did ifetch fail?
19097 tst.l %d1 # did ifetch fail?
19118 tst.l %d1 # did ifetch fail?
19128 tst.l %d1 # did ifetch fail?
19146 tst.l %d1 # did dfetch fail?
19158 tst.l %d1 # did dfetch fail?
19251 tst.l %d1 # did ifetch fail?
19259 tst.l %d1 # did ifetch fail?
19271 tst.l %d1 # did ifetch fail?
19279 tst.l %d1 # did ifetch fail?
19291 tst.l %d1 # did ifetch fail?
19299 tst.l %d1 # did ifetch fail?
19311 tst.l %d1 # did ifetch fail?
19319 tst.l %d1 # did ifetch fail?
19327 tst.l %d1 # did ifetch fail?
19367 mov.l %d0, %d1 # make a copy
19370 andi.l &0x7, %d1 # extract reg field
19378 or.w %d1,%d0 # concat mode,reg
19450 mov.l %d0,%d1 # make a copy
19453 andi.l &0x7,%d1 # extract reg field
19468 mov.w (tbl_ceaf_pi.b,%pc,%d1.w*2),%d1
19470 jmp (tbl_ceaf_pi.b,%pc,%d1.w*1)
19513 mov.w (tbl_ceaf_pd.b,%pc,%d1.w*2),%d1
19517 jmp (tbl_ceaf_pd.b,%pc,%d1.w*1)
19695 bfextu EXC_OPWORD(%a6){&10:&3}, %d1 # extract <ea> mode field
19700 bfextu EXC_OPWORD(%a6){&13:&3}, %d1 # extract src reg field
19830 tst.l %d1 # did dfetch fail?
19846 tst.l %d1 # did ifetch fail?
19866 tst.l %d1 # did dfetch fail?
19882 tst.l %d1 # did ifetch fail?
19902 tst.l %d1 # did dfetch fail?
19918 tst.l %d1 # did ifetch fail?
19939 tst.l %d1 # did dfetch fail?
19960 tst.l %d1 # did ifetch fail?
19982 mov.w &0x3f81, %d1 # xprec exp = 0x3f81
19983 sub.w %d0, %d1 # exp = 0x3f81 - shft amt.
19984 or.w %d1, FP_SRC_EX(%a6) # {sgn,exp}
20022 tst.l %d1 # did dfetch fail?
20045 tst.l %d1 # did ifetch fail?
20057 mov.l &0xb, %d1
20058 lsl.l %d1, %d0
20068 mov.w &0x3c01, %d1 # xprec exp = 0x3c01
20069 sub.w %d0, %d1 # exp = 0x3c01 - shft amt.
20070 or.w %d1, FP_SRC_EX(%a6) # {sgn,exp}
20083 mov.l &0xb, %d1
20084 lsl.l %d1, %d0
20108 tst.l %d1 # did dfetch fail?
20201 bfextu EXC_CMDREG(%a6){&3:&3},%d1 # extract dst fmt
20202 mov.w (tbl_fout.b,%pc,%d1.w*2),%a1 # use as index
20234 fmov.l %fpsr,%d1 # fetch FPSR
20235 or.w %d1,2+USER_FPSR(%a6) # save new exc,accrued bits
20237 mov.b 1+EXC_OPWORD(%a6),%d1 # extract dst mode
20238 andi.b &0x38,%d1 # is mode == 0? (Dreg dst)
20244 tst.l %d1 # did dstore fail?
20250 mov.b 1+EXC_OPWORD(%a6),%d1 # extract Dn
20251 andi.w &0x7,%d1
20256 mov.l SRC_EX(%a0),%d1
20257 andi.l &0x80000000,%d1 # keep DENORM sign
20258 ori.l &0x00800000,%d1 # make smallest sgl
20259 fmov.s %d1,%fp0
20280 fmov.l %fpsr,%d1 # fetch FPSR
20281 or.w %d1,2+USER_FPSR(%a6) # save new exc,accrued bits
20283 mov.b 1+EXC_OPWORD(%a6),%d1 # extract dst mode
20284 andi.b &0x38,%d1 # is mode == 0? (Dreg dst)
20290 tst.l %d1 # did dstore fail?
20296 mov.b 1+EXC_OPWORD(%a6),%d1 # extract Dn
20297 andi.w &0x7,%d1
20302 mov.l SRC_EX(%a0),%d1
20303 andi.l &0x80000000,%d1 # keep DENORM sign
20304 ori.l &0x00800000,%d1 # make smallest sgl
20305 fmov.s %d1,%fp0
20326 fmov.l %fpsr,%d1 # fetch FPSR
20327 or.w %d1,2+USER_FPSR(%a6) # save new exc,accrued bits
20330 mov.b 1+EXC_OPWORD(%a6),%d1 # extract dst mode
20331 andi.b &0x38,%d1 # is mode == 0? (Dreg dst)
20337 tst.l %d1 # did dstore fail?
20343 mov.b 1+EXC_OPWORD(%a6),%d1 # extract Dn
20344 andi.w &0x7,%d1
20349 mov.l SRC_EX(%a0),%d1
20350 andi.l &0x80000000,%d1 # keep DENORM sign
20351 ori.l &0x00800000,%d1 # make smallest sgl
20352 fmov.s %d1,%fp0
20388 tst.l %d1 # did dstore fail?
20409 tst.l %d1 # did dstore fail?
20466 fmov.l %fpsr,%d1 # save FPSR
20468 or.w %d1,2+USER_FPSR(%a6) # set possible inex2/ainex
20471 mov.b 1+EXC_OPWORD(%a6),%d1 # extract dst mode
20472 andi.b &0x38,%d1 # is mode == 0? (Dreg dst)
20478 tst.l %d1 # did dstore fail?
20484 mov.b 1+EXC_OPWORD(%a6),%d1 # extract Dn
20485 andi.w &0x7,%d1
20512 mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode
20518 mov.b 1+EXC_OPWORD(%a6),%d1 # extract dst mode
20519 andi.b &0x38,%d1 # is mode == 0? (Dreg dst)
20525 tst.l %d1 # did dstore fail?
20531 mov.b 1+EXC_OPWORD(%a6),%d1 # extract Dn
20532 andi.w &0x7,%d1
20536 mov.b FPCR_ENABLE(%a6),%d1
20537 andi.b &0x0a,%d1 # is UNFL or INEX enabled?
20562 smi %d1 # set if so
20568 mov.b 1+EXC_OPWORD(%a6),%d1 # extract dst mode
20569 andi.b &0x38,%d1 # is mode == 0? (Dreg dst)
20575 tst.l %d1 # did dstore fail?
20581 mov.b 1+EXC_OPWORD(%a6),%d1 # extract Dn
20582 andi.w &0x7,%d1
20586 mov.b FPCR_ENABLE(%a6),%d1
20587 andi.b &0x0a,%d1 # is UNFL or INEX enabled?
20601 mov.w SRC_EX(%a0),%d1 # fetch current sign
20602 andi.w &0x8000,%d1 # keep it,clear exp
20603 ori.w &0x3fff,%d1 # insert exp = 0
20604 mov.w %d1,FP_SCR0_EX(%a6) # insert scaled exp
20650 mov.b 3+L_SCR3(%a6),%d1
20651 lsr.b &0x4,%d1
20652 andi.w &0x0c,%d1
20653 swap %d1
20654 mov.b 3+L_SCR3(%a6),%d1
20655 lsr.b &0x4,%d1
20656 andi.w &0x03,%d1
20713 tst.l %d1 # did dstore fail?
20741 mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode
20747 mov.l %d1,L_SCR2(%a6)
20754 tst.l %d1 # did dstore fail?
20757 mov.b FPCR_ENABLE(%a6),%d1
20758 andi.b &0x0a,%d1 # is UNFL or INEX enabled?
20783 smi %d1 # set if so
20794 tst.l %d1 # did dstore fail?
20797 mov.b FPCR_ENABLE(%a6),%d1
20798 andi.b &0x0a,%d1 # is UNFL or INEX enabled?
20812 mov.w SRC_EX(%a0),%d1 # fetch current sign
20813 andi.w &0x8000,%d1 # keep it,clear exp
20814 ori.w &0x3fff,%d1 # insert exp = 0
20815 mov.w %d1,FP_SCR0_EX(%a6) # insert scaled exp
20841 # d1 = lo(double precision result) #
20882 mov.l FTEMP_HI(%a0),%d1 # get ms mantissa
20883 bfextu %d1{&1:&20},%d1 # get upper 20 bits of ms
20884 or.l %d1,%d0 # put these bits in ms word of double
20886 mov.l FTEMP_HI(%a0),%d1 # get ms mantissa
20888 lsl.l %d0,%d1 # put lower 11 bits in upper bits
20889 mov.l %d1,L_SCR2(%a6) # build lower lword in memory
20890 mov.l FTEMP_LO(%a0),%d1 # get ls mantissa
20891 bfextu %d1{&0:&21},%d0 # get ls 21 bits of double
20892 mov.l L_SCR2(%a6),%d1
20893 or.l %d0,%d1 # put them in double result
20947 mov.l FTEMP_HI(%a0),%d1 # get ms mantissa
20948 andi.l &0x7fffff00,%d1 # get upper 23 bits of ms
20949 lsr.l &0x8,%d1 # and put them flush right
20950 or.l %d1,%d0 # put these bits in ms word of single
20966 mov.b 1+EXC_CMDREG(%a6),%d1 # fetch dynamic reg
20967 lsr.b &0x4,%d1
20968 andi.w &0x7,%d1
21022 tst.l %d1 # did dstore fail?
21032 tst.l %d1 # did dstore fail?
21053 # fetch_dreg(): fetch register according to index in d1 #
21059 # d1 = index of register to fetch from #
21065 # According to the index value in d1 which can range from zero #
21067 # address register indexes start at 8). D0/D1/A0/A1/A6/A7 are on the #
21072 # this routine leaves d1 intact for subsequent store_dreg calls.
21075 mov.w (tbl_fdreg.b,%pc,%d1.w*2),%d0
21147 # store_dreg_l(): store longword to data register specified by d1 #
21154 # d1 = index of register to fetch from #
21160 # According to the index value in d1, store the longword value #
21161 # in d0 to the corresponding data register. D0/D1 are on the stack #
21168 mov.w (tbl_sdregl.b,%pc,%d1.w*2),%d1
21169 jmp (tbl_sdregl.b,%pc,%d1.w*1)
21208 # store_dreg_w(): store word to data register specified by d1 #
21215 # d1 = index of register to fetch from #
21221 # According to the index value in d1, store the word value #
21222 # in d0 to the corresponding data register. D0/D1 are on the stack #
21229 mov.w (tbl_sdregw.b,%pc,%d1.w*2),%d1
21230 jmp (tbl_sdregw.b,%pc,%d1.w*1)
21269 # store_dreg_b(): store byte to data register specified by d1 #
21276 # d1 = index of register to fetch from #
21282 # According to the index value in d1, store the byte value #
21283 # in d0 to the corresponding data register. D0/D1 are on the stack #
21290 mov.w (tbl_sdregb.b,%pc,%d1.w*2),%d1
21291 jmp (tbl_sdregb.b,%pc,%d1.w*1)
21337 # d1 = index of address register to increment #
21345 # specified by d1. A0/A1/A6/A7 reside on the stack. The rest reside #
21356 mov.w (tbl_iareg.b,%pc,%d1.w*2),%d1
21357 jmp (tbl_iareg.b,%pc,%d1.w*1)
21401 # d1 = index of address register to decrement #
21409 # specified by d1. A0/A1/A6/A7 reside on the stack. The rest reside #
21420 mov.w (tbl_dareg.b,%pc,%d1.w*2),%d1
21421 jmp (tbl_dareg.b,%pc,%d1.w*1)
21719 mov.w (tbl_thresh.b,%pc,%d0.w*2), %d1 # load prec threshold
21720 mov.w %d1, %d0 # copy d1 into d0
21740 mov.w %d1, FTEMP_EX(%a0) # load exp with threshold
21741 clr.l FTEMP_HI(%a0) # set d1 = 0 (ms mantissa)
21751 # %d1{15:0} : denormalization threshold #
21775 mov.l %d1, %d0 # copy the denorm threshold
21776 sub.w FTEMP_EX(%a0), %d1 # d1 = threshold - uns exponent
21777 ble.b dnrm_no_lp # d1 <= 0
21778 cmpi.w %d1, &0x20 # is ( 0 <= d1 < 32) ?
21780 cmpi.w %d1, &0x40 # is (32 <= d1 < 64) ?
21782 bra.w case_3 # (d1 >= 64)
21792 # case (0<d1<32)
21795 # %d1 = "n" = amt to shift
21819 sub.w %d1, %d0 # %d0 = 32 - %d1
21821 cmpi.w %d1, &29 # is shft amt >= 29
21828 bfextu FTEMP_HI(%a0){%d0:&32}, %d1 # %d1 = new FTEMP_LO
21832 mov.l %d1, FTEMP_LO(%a0) # store new FTEMP_LO
21844 # case (32<=d1<64)
21847 # %d1 = "n" = amt to shift
21870 subi.w &0x20, %d1 # %d1 now between 0 and 32
21872 sub.w %d1, %d0 # %d0 = 32 - %d1
21881 bfextu FTEMP_HI(%a0){%d0:&32}, %d1 # %d1 = new G,R,S
21883 bftst %d1{&2:&30} # were any bits shifted off?
21888 mov.l %d1, %d0 # move new G,R,S to %d0
21892 mov.l %d1, %d0 # move new G,R,S to %d0
21904 # case (d1>=64)
21907 # %d1 = amt to shift
21912 cmpi.w %d1, &65 # is shift amt > 65?
21917 # case (d1>65)
21928 # case (d1 == 64)
21949 mov.l %d0, %d1 # make a copy
21951 and.l &0x3fffffff, %d1 # extract other bits
21956 # case (d1 == 65)
21979 and.l &0x7fffffff, %d1 # extract other bits
22018 # d1(hi) = contains rounding precision: #
22022 # d1(lo) = contains rounding mode: #
22062 mov.w (tbl_mode.b,%pc,%d1.w*2), %a1 # load jump offset
22081 swap %d1 # set up d1 for round prec.
22083 cmpi.b %d1, &s_mode # is prec = sgl?
22098 swap %d1 # set up d1 for round prec.
22100 cmpi.b %d1, &s_mode # is prec = sgl?
22115 swap %d1 # set up d1 for round prec.
22117 cmpi.b %d1, &s_mode # is prec = sgl?
22191 swap %d1 # select rnd prec
22193 cmpi.b %d1, &s_mode # is prec sgl?
22205 # d1 = {PREC,ROUND}
22214 # Notes: the ext_grs uses the round PREC, and therefore has to swap d1
22215 # prior to usage, and needs to restore d1 to original. this
22221 swap %d1 # have d1.w point to round precision
22222 tst.b %d1 # is rnd prec = extended?
22230 swap %d1 # yes; return to correct positions
22236 cmpi.b %d1, &s_mode # is rnd prec = sgl?
22294 swap %d1 # restore d1 to original
22322 mov.l FTEMP_LO(%a0), %d1 # load lo(mantissa)
22329 bfextu %d1{&0:%d2}, %d3 # extract lo bits
22332 lsl.l %d2, %d1 # create lo(man)
22335 mov.l %d1, FTEMP_LO(%a0) # store new lo(man)
22345 bfffo %d1{&0:&32}, %d2 # how many places to shift?
22346 lsl.l %d2, %d1 # shift lo(man)
22349 mov.l %d1, FTEMP_HI(%a0) # store hi(man)
22397 clr.l %d1 # clear top word
22398 mov.w FTEMP_EX(%a0), %d1 # extract exponent
22399 and.w &0x7fff, %d1 # strip off sgn
22401 cmp.w %d0, %d1 # will denorm push exp < 0?
22407 sub.w %d0, %d1 # shift exponent value
22410 or.w %d0, %d1 # {sgn,new exp}
22411 mov.w %d1, FTEMP_EX(%a0) # insert new exponent
22422 cmp.b %d1, &32 # is exp <= 32?
22425 bfextu FTEMP_HI(%a0){%d1:&32}, %d0 # extract new hi(man)
22429 lsl.l %d1, %d0 # extract new lo(man)
22441 sub.w &32, %d1 # adjust shft amt by 32
22444 lsl.l %d1, %d0 # left shift lo(man)
22566 mov.l %d0, %d1
22578 and.l &0x000fffff, %d1
22589 and.l &0x000fffff, %d1
22597 btst &19, %d1
22629 mov.l %d0, %d1
22641 and.l &0x007fffff, %d1
22650 and.l &0x007fffff, %d1
22656 btst &22, %d1
22680 # d1 = rounding precision/mode #
22701 mov.l %d1, -(%sp) # save rnd prec,mode on stack
22706 mov.w FTEMP_EX(%a0), %d1 # extract exponent
22707 and.w &0x7fff, %d1
22708 sub.w %d0, %d1
22709 mov.w %d1, FTEMP_EX(%a0) # insert 16 bit exponent
22719 mov.w 0x6(%sp),%d1 # load prec:mode into %d1
22720 andi.w &0xc0,%d1 # extract rnd prec
22721 lsr.w &0x4,%d1
22722 swap %d1
22723 mov.w 0x6(%sp),%d1
22724 andi.w &0x30,%d1
22725 lsr.w &0x4,%d1
22765 mov.l %d1,-(%sp) # save rnd prec,mode on stack
22770 mov.w FTEMP_EX(%a0),%d1 # extract exponent
22771 and.w &0x7fff,%d1
22772 sub.w %d0,%d1
22773 mov.w %d1,FTEMP_EX(%a0) # insert 16 bit exponent
22781 mov.w &s_mode,%d1 # force rnd prec = sgl
22782 swap %d1
22783 mov.w 0x6(%sp),%d1 # load rnd mode
22784 andi.w &0x30,%d1 # extract rnd prec
22785 lsr.w &0x4,%d1
22833 # d1.b = '-1' => (-); '0' => (+) #
22856 andi.w &0x10,%d1 # keep result sign
22858 or.b %d0,%d1 # concat the two
22859 mov.w %d1,%d0 # make a copy
22860 lsl.b &0x1,%d1 # multiply d1 by 2
22865 and.w &0x10, %d1 # keep result sign
22866 or.b %d0, %d1 # insert rnd mode
22868 or.b %d0, %d1 # insert rnd prec
22869 mov.w %d1, %d0 # make a copy
22870 lsl.b &0x1, %d1 # shift left by 1
22878 lea (tbl_ovfl_result.b,%pc,%d1.w*8), %a0 # return result ptr
22967 tst.l %d1 # did dfetch fail?
23083 # 2. Calculate absolute value of exponent in d1 by mul and add.
23092 # (*) d1: accumulator for binary exponent
23105 clr.l %d1 # zero d1 for accumulator
23107 mulu.l &0xa,%d1 # mul partial product by one digit place
23109 add.l %d0,%d1 # d1 = d1 + d0
23114 neg.l %d1 # negate before subtracting
23116 sub.l &16,%d1 # sub to compensate for shift of mant
23118 neg.l %d1 # now negative, make pos and set SE
23122 mov.l %d1,-(%sp) # save exp on stack
23134 # (*) d1: lword counter
23145 mov.l &1,%d1 # word counter, init to 1
23160 mov.l (%a0,%d1.L*4),%d4 # load mantissa lonqword into d4
23170 # then inc d1 (=2) to point to the next long word and reset d3 to 0
23177 addq.l &1,%d1 # inc lw pointer in mantissa
23178 cmp.l %d1,&2 # test for last lw
23222 # (*) d1: zero count
23238 mov.l (%sp),%d1 # load expA for range test
23239 cmp.l %d1,&27 # test is with 27
23243 clr.l %d1 # zero count reg
23247 addq.l &1,%d1 # inc zero count
23251 addq.l &8,%d1 # and inc count by 8
23261 addq.l &1,%d1 # inc digit counter
23264 mov.l %d1,%d0 # copy counter to d2
23265 mov.l (%sp),%d1 # get adjusted exp from memory
23266 sub.l %d0,%d1 # subtract count from exp
23268 neg.l %d1 # now its neg; get abs
23295 clr.l %d1 # clr counter
23300 addq.l &8,%d1 # inc counter by 8
23309 addq.l &1,%d1 # inc digit counter
23312 mov.l %d1,%d0 # copy counter to d0
23313 mov.l (%sp),%d1 # get adjusted exp from memory
23314 sub.l %d0,%d1 # subtract count from exp
23316 neg.l %d1 # take abs of exp and clr SE
23346 # ( ) d1: exponent
23353 # ( ) d1: exponent
23406 mov.l %d1,%d0 # copy exp to d0;use d0
23592 # d1: scratch
23636 mov.l 4(%a0),%d1
23641 roxl.l &1,%d1
23642 tst.l %d1
23653 mov.l %d1,4(%a0)
23848 bfextu USER_FPCR(%a6){&26:&2},%d1 # get initial rmode bits
23849 lsl.w &1,%d1 # put them in bits 2:1
23850 add.w %d5,%d1 # add in LAMBDA
23851 lsl.w &1,%d1 # put them in bits 3:1
23854 addq.l &1,%d1 # if neg, set bit 0
23857 mov.b (%a2,%d1),%d3 # load d3 with new rmode
24058 movm.l &0xc0c0,-(%sp) # save regs used by sintd0 {%d0-%d1/%a0-%a1}
24088 movm.l (%sp)+,&0x303 # restore regs used by sint {%d0-%d1/%a0-%a1}
24214 # d1: x/0
24259 clr.l %d1 # put zero in d1 for addx
24261 addx.l %d1,%d2 # continue inc
24287 # d1: x/scratch (0);shift count for final exponent packing
24344 clr.l %d1 # put zero in d1 for addx
24346 addx.l %d1,%d2 # continue inc
24352 mov.l &12,%d1 # use d1 for shift count
24353 lsr.l %d1,%d0 # shift d0 right by 12
24355 lsr.l %d1,%d0 # shift d0 right by 12
24481 # extracts and shifts. The three msbs from d2 will go into d1. #
24487 # into d2:d3. D1 will contain the bcd digit formed. #
24503 # d1: temp used to form the digit
24529 # A3. Multiply d2:d3 by 8; extract msbs into d1.
24531 bfextu %d2{&0:&3},%d1 # copy 3 msbs of d2 into d1
24537 # A4. Multiply d4:d5 by 2; add carry out to d1.
24542 addx.w %d6,%d1 # add in extend from mul by 2
24544 # A5. Add mul by 8 to mul by 2. D1 contains the digit formed.
24550 addx.w %d6,%d1 # add in extend from add to d1
24560 add.w %d1,%d7 # add in ls digit to d7b
24568 mov.w %d1,%d7 # put new digit in d7b
24693 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
24719 mov.b EXC_OPWORD+0x1(%a6),%d1
24720 andi.b &0x38,%d1 # extract opmode
24721 cmpi.b %d1,&0x18 # postinc?
24723 cmpi.b %d1,&0x20 # predec?
24728 mov.b EXC_OPWORD+0x1(%a6),%d1
24729 andi.w &0x0007,%d1 # fetch An
24731 mov.w (tbl_rest_inc.b,%pc,%d1.w*2),%d1
24732 jmp (tbl_rest_inc.b,%pc,%d1.w*1)