Lines Matching +full:4 +full:b
14 #define b_offset 4
61 * b input register containing b
64 * operations on a and b are interleaved to increase performance
66 #define encrypt_round(a,b,c,d,round)\ argument
67 movzx b ## B, %edi;\
68 mov s1(%r11,%rdi,4),%r8d;\
69 movzx a ## B, %edi;\
70 mov s2(%r11,%rdi,4),%r9d;\
71 movzx b ## H, %edi;\
72 ror $16, b ## D;\
73 xor s2(%r11,%rdi,4),%r8d;\
76 xor s3(%r11,%rdi,4),%r9d;\
77 movzx b ## B, %edi;\
78 xor s3(%r11,%rdi,4),%r8d;\
79 movzx a ## B, %edi;\
80 xor (%r11,%rdi,4), %r9d;\
81 movzx b ## H, %edi;\
82 ror $15, b ## D;\
83 xor (%r11,%rdi,4), %r8d;\
85 xor s1(%r11,%rdi,4),%r9d;\
91 add k+4+round(%r11),%r8d;\
96 * b input register containing b
99 * operations on a and b are interleaved to increase performance
100 * during the round a and b are prepared for the output whitening
102 #define encrypt_last_round(a,b,c,d,round)\ argument
103 mov b ## D, %r10d;\
105 movzx b ## B, %edi;\
106 mov s1(%r11,%rdi,4),%r8d;\
107 movzx a ## B, %edi;\
108 mov s2(%r11,%rdi,4),%r9d;\
109 movzx b ## H, %edi;\
110 ror $16, b ## D;\
111 xor s2(%r11,%rdi,4),%r8d;\
114 xor s3(%r11,%rdi,4),%r9d;\
115 movzx b ## B, %edi;\
116 xor s3(%r11,%rdi,4),%r8d;\
117 movzx a ## B, %edi;\
118 xor (%r11,%rdi,4), %r9d;\
120 movzx b ## H, %edi;\
121 xor (%r11,%rdi,4), %r8d;\
123 xor s1(%r11,%rdi,4),%r9d;\
129 add k+4+round(%r11),%r8d;\
134 * b input register containing b (rotated 16)
137 * operations on a and b are interleaved to increase performance
139 #define decrypt_round(a,b,c,d,round)\ argument
140 movzx a ## B, %edi;\
141 mov (%r11,%rdi,4), %r9d;\
142 movzx b ## B, %edi;\
143 mov s3(%r11,%rdi,4),%r8d;\
146 xor s1(%r11,%rdi,4),%r9d;\
147 movzx b ## H, %edi;\
148 ror $16, b ## D;\
149 xor (%r11,%rdi,4), %r8d;\
150 movzx a ## B, %edi;\
151 xor s2(%r11,%rdi,4),%r9d;\
152 movzx b ## B, %edi;\
153 xor s1(%r11,%rdi,4),%r8d;\
156 xor s3(%r11,%rdi,4),%r9d;\
157 movzx b ## H, %edi;\
158 xor s2(%r11,%rdi,4),%r8d;\
163 add k+4+round(%r11),%r8d;\
169 * b input register containing b
172 * operations on a and b are interleaved to increase performance
173 * during the round a and b are prepared for the output whitening
175 #define decrypt_last_round(a,b,c,d,round)\ argument
176 movzx a ## B, %edi;\
177 mov (%r11,%rdi,4), %r9d;\
178 movzx b ## B, %edi;\
179 mov s3(%r11,%rdi,4),%r8d;\
180 movzx b ## H, %edi;\
181 ror $16, b ## D;\
182 xor (%r11,%rdi,4), %r8d;\
184 mov b ## D, %r10d;\
188 xor s1(%r11,%rdi,4),%r9d;\
189 movzx b ## B, %edi;\
190 xor s1(%r11,%rdi,4),%r8d;\
191 movzx a ## B, %edi;\
192 xor s2(%r11,%rdi,4),%r9d;\
193 movzx b ## H, %edi;\
194 xor s2(%r11,%rdi,4),%r8d;\
196 xor s3(%r11,%rdi,4),%r9d;\
201 add k+4+round(%r11),%r8d;\
230 encrypt_round(R0,R1,R2,R3,4*8);
290 decrypt_round(R2,R3,R0,R1,4*8);