Lines Matching +full:0 +full:x6
8 #define load_16way(src, x0, x1, x2, x3, x4, x5, x6, x7) \ argument
9 vmovdqu (0*32)(src), x0; \
15 vmovdqu (6*32)(src), x6; \
18 #define store_16way(dst, x0, x1, x2, x3, x4, x5, x6, x7) \ argument
19 vmovdqu x0, (0*32)(dst); \
25 vmovdqu x6, (6*32)(dst); \
28 #define store_cbc_16way(src, dst, x0, x1, x2, x3, x4, x5, x6, x7, t0) \ argument
32 vpxor (0*32+16)(src), x1, x1; \
37 vpxor (5*32+16)(src), x6, x6; \
39 store_16way(dst, x0, x1, x2, x3, x4, x5, x6, x7);
55 #define load_ctr_16way(iv, bswap, x0, x1, x2, x3, x4, x5, x6, x7, t0, t0x, t1, \ argument
58 vpsrldq $8, t0, t0; /* ab: -1:0 ; cd: -1:0 */ \
59 vpaddq t0, t0, t4; /* ab: -2:0 ; cd: -2:0 */\
81 vpshufb t1, t2, x6; \
88 #define store_ctr_16way(src, dst, x0, x1, x2, x3, x4, x5, x6, x7) \ argument
89 vpxor (0*32)(src), x0, x0; \
95 vpxor (6*32)(src), x6, x6; \
97 store_16way(dst, x0, x1, x2, x3, x4, x5, x6, x7);
102 vpshufd $0x13, tmp, tmp; \
110 vpshufd $0x13, tmp0, tmp0; \
113 vpshufd $0x13, tmp1, tmp1; \
118 #define load_xts_16way(iv, src, dst, x0, x1, x2, x3, x4, x5, x6, x7, tiv, \ argument
130 vpxor (0*32)(src), tiv, x0; \
131 vmovdqu tiv, (0*32)(dst); \
155 vpxor (6*32)(src), tiv, x6; \
166 #define store_xts_16way(dst, x0, x1, x2, x3, x4, x5, x6, x7) \ argument
167 vpxor (0*32)(dst), x0, x0; \
173 vpxor (6*32)(dst), x6, x6; \
175 store_16way(dst, x0, x1, x2, x3, x4, x5, x6, x7);