Lines Matching +full:0 +full:x7
8 #define load_16way(src, x0, x1, x2, x3, x4, x5, x6, x7) \ argument
9 vmovdqu (0*32)(src), x0; \
16 vmovdqu (7*32)(src), x7;
18 #define store_16way(dst, x0, x1, x2, x3, x4, x5, x6, x7) \ argument
19 vmovdqu x0, (0*32)(dst); \
26 vmovdqu x7, (7*32)(dst);
28 #define store_cbc_16way(src, dst, x0, x1, x2, x3, x4, x5, x6, x7, t0) \ argument
32 vpxor (0*32+16)(src), x1, x1; \
38 vpxor (6*32+16)(src), x7, x7; \
39 store_16way(dst, x0, x1, x2, x3, x4, x5, x6, x7);
55 #define load_ctr_16way(iv, bswap, x0, x1, x2, x3, x4, x5, x6, x7, t0, t0x, t1, \ argument
58 vpsrldq $8, t0, t0; /* ab: -1:0 ; cd: -1:0 */ \
59 vpaddq t0, t0, t4; /* ab: -2:0 ; cd: -2:0 */\
83 vpshufb t1, t2, x7; \
88 #define store_ctr_16way(src, dst, x0, x1, x2, x3, x4, x5, x6, x7) \ argument
89 vpxor (0*32)(src), x0, x0; \
96 vpxor (7*32)(src), x7, x7; \
97 store_16way(dst, x0, x1, x2, x3, x4, x5, x6, x7);
102 vpshufd $0x13, tmp, tmp; \
110 vpshufd $0x13, tmp0, tmp0; \
113 vpshufd $0x13, tmp1, tmp1; \
118 #define load_xts_16way(iv, src, dst, x0, x1, x2, x3, x4, x5, x6, x7, tiv, \ argument
130 vpxor (0*32)(src), tiv, x0; \
131 vmovdqu tiv, (0*32)(dst); \
159 vpxor (7*32)(src), tiv, x7; \
166 #define store_xts_16way(dst, x0, x1, x2, x3, x4, x5, x6, x7) \ argument
167 vpxor (0*32)(dst), x0, x0; \
174 vpxor (7*32)(dst), x7, x7; \
175 store_16way(dst, x0, x1, x2, x3, x4, x5, x6, x7);