Lines Matching +full:32 +full:k
160 addl \disp(%rsp, SRND), h # h = k + w + h # --
174 add h, d # d = k + w + h + d # --
188 vpslld $(32-7), XTMP1, XTMP3
190 add y1, h # h = k + w + h + S0 # --
192 add y2, d # d = k + w + h + d + S1 + CH = d + t1 # --
196 add y2, h # h = k + w + h + S0 + S1 + CH = t1 + S0# --
208 addl offset(%rsp, SRND), h # h = k + w + h # --
223 add h, d # d = k + w + h + d # --
225 vpslld $(32-18), XTMP1, XTMP1
242 add y1, h # h = k + w + h + S0 # --
245 add y2, d # d = k + w + h + d + S1 + CH = d + t1 # --
246 add y2, h # h = k + w + h + S0 + S1 + CH = t1 + S0# --
259 addl offset(%rsp, SRND), h # h = k + w + h # --
274 add h, d # d = k + w + h + d # --
294 add y1,h # h = k + w + h + S0 # --
295 add y2,d # d = k + w + h + d + S1 + CH = d + t1 # --
296 add y2,h # h = k + w + h + S0 + S1 + CH = t1 + S0# --
309 addl offset(%rsp, SRND), h # h = k + w + h # --
323 add h, d # d = k + w + h + d # --
336 add y2, d # d = k + w + h + d + S1 + CH = d + t1 # --
347 add y1, h # h = k + w + h + S0 # --
348 add y2, h # h = k + w + h + S0 + S1 + CH = t1 + S0# --
375 addl \disp(%rsp, SRND), h # h = k + w + h # --
385 add h, d # d = k + w + h + d # --
387 add y1, h # h = k + w + h + S0 # --
388 add y2, d # d = k + w + h + d + S1 + CH = d + t1 # --
394 add y2, old_h # h = k + w + h + S0 + S1 + CH = t1 + S0# --
414 addl offset(%rsp, SRND), h # h = k + w + h # --
424 add h, d # d = k + w + h + d # --
426 add y1, h # h = k + w + h + S0 # --
428 add y2, d # d = k + w + h + d + S1 + CH = d + t1 # --
434 add y2, old_h # h = k + w + h + S0 + S1 + CH = t1 + S0# --
454 addl offset(%rsp, SRND), h # h = k + w + h # --
464 add h, d # d = k + w + h + d # --
466 add y1, h # h = k + w + h + S0 # --
468 add y2, d # d = k + w + h + d + S1 + CH = d + t1 # --
474 add y2, old_h # h = k + w + h + S0 + S1 + CH = t1 + S0# --
494 addl offset(%rsp, SRND), h # h = k + w + h # --
504 add h, d # d = k + w + h + d # --
506 add y1, h # h = k + w + h + S0 # --
508 add y2, d # d = k + w + h + d + S1 + CH = d + t1 # --
511 add y2, h # h = k + w + h + S0 + S1 + CH = t1 + S0# --
527 .align 32
538 and $-32, %rsp # align rsp to 32 byte boundary
566 VMOVDQ 0*32(INP),XTMP0
567 VMOVDQ 1*32(INP),XTMP1
568 VMOVDQ 2*32(INP),XTMP2
569 VMOVDQ 3*32(INP),XTMP3
592 vpaddd K256+0*32(SRND), X0, XFER
593 vmovdqa XFER, 0*32+_XFER(%rsp, SRND)
594 FOUR_ROUNDS_AND_SCHED _XFER + 0*32
596 vpaddd K256+1*32(SRND), X0, XFER
597 vmovdqa XFER, 1*32+_XFER(%rsp, SRND)
598 FOUR_ROUNDS_AND_SCHED _XFER + 1*32
600 vpaddd K256+2*32(SRND), X0, XFER
601 vmovdqa XFER, 2*32+_XFER(%rsp, SRND)
602 FOUR_ROUNDS_AND_SCHED _XFER + 2*32
604 vpaddd K256+3*32(SRND), X0, XFER
605 vmovdqa XFER, 3*32+_XFER(%rsp, SRND)
606 FOUR_ROUNDS_AND_SCHED _XFER + 3*32
608 add $4*32, SRND
609 cmp $3*4*32, SRND
614 vpaddd K256+0*32(SRND), X0, XFER
615 vmovdqa XFER, 0*32+_XFER(%rsp, SRND)
616 DO_4ROUNDS _XFER + 0*32
618 vpaddd K256+1*32(SRND), X1, XFER
619 vmovdqa XFER, 1*32+_XFER(%rsp, SRND)
620 DO_4ROUNDS _XFER + 1*32
621 add $2*32, SRND
626 cmp $4*4*32, SRND
648 DO_4ROUNDS _XFER + 0*32 + 16
649 DO_4ROUNDS _XFER + 1*32 + 16
650 add $2*32, SRND
651 cmp $4*4*32, SRND
752 .section .rodata.cst32.PSHUFFLE_BYTE_FLIP_MASK, "aM", @progbits, 32
753 .align 32
758 .section .rodata.cst32._SHUF_00BA, "aM", @progbits, 32
759 .align 32
764 .section .rodata.cst32._SHUF_DC00, "aM", @progbits, 32
765 .align 32