Lines Matching +full:4 +full:- +full:ch

2 # Implement fast SHA-256 with AVX1 instructions. (x86_64)
21 # - Redistributions of source code must retain the above
25 # - Redistributions in binary form must reproduce the above
40 # This code is described in an Intel White-Paper:
41 # "Fast SHA-256 Implementations on Intel Architecture Processors"
47 # This code schedules 1 block at a time, with 4 lanes per block
58 # Add reg to mem using reg-mem add and store
66 shld $(32-(\p1)), \p2, \p2
93 SHUF_00BA = %xmm10 # shuffle xBxA -> 00BA
94 SHUF_DC00 = %xmm12 # shuffle xDxC -> DC00
155 ## compute W[-16] + W[-7] 4 at a time
158 MY_ROR (25-11), y0 # y0 = e >> (25-11)
160 vpalignr $4, X2, X3, XTMP0 # XTMP0 = W[-7]
161 MY_ROR (22-13), y1 # y1 = a >> (22-13)
162 xor e, y0 # y0 = e ^ (e >> (25-11))
164 MY_ROR (11-6), y0 # y0 = (e >> (11-6)) ^ (e >> (25-6))
165 xor a, y1 # y1 = a ^ (a >> (22-13)
167 vpaddd X0, XTMP0, XTMP0 # XTMP0 = W[-7] + W[-16]
168 xor e, y0 # y0 = e ^ (e >> (11-6)) ^ (e >> (25-6))
170 MY_ROR (13-2), y1 # y1 = (a >> (13-2)) ^ (a >> (22-2))
172 vpalignr $4, X0, X1, XTMP1 # XTMP1 = W[-15]
173 xor a, y1 # y1 = a ^ (a >> (13-2)) ^ (a >> (22-2))
175 xor g, y2 # y2 = CH = ((f^g)&e)^g
177 add y0, y2 # y2 = S1 + CH
178 add _XFER(%rsp), y2 # y2 = k + w + S1 + CH
180 add y2, h # h = h + S1 + CH + k + w
184 add h, d # d = d + h + S1 + CH + k + w
186 vpslld $(32-7), XTMP1, XTMP3
188 add y1, h # h = h + S1 + CH + k + w + S0
189 vpor XTMP2, XTMP3, XTMP3 # XTMP1 = W[-15] MY_ROR 7
191 add y0, h # h = h + S1 + CH + k + w + S0 + MAJ
195 MY_ROR (25-11), y0 # y0 = e >> (25-11)
196 xor e, y0 # y0 = e ^ (e >> (25-11))
198 MY_ROR (22-13), y1 # y1 = a >> (22-13)
200 xor a, y1 # y1 = a ^ (a >> (22-13)
201 MY_ROR (11-6), y0 # y0 = (e >> (11-6)) ^ (e >> (25-6))
203 vpsrld $3, XTMP1, XTMP4 # XTMP4 = W[-15] >> 3
204 MY_ROR (13-2), y1 # y1 = (a >> (13-2)) ^ (a >> (22-2))
205 xor e, y0 # y0 = e ^ (e >> (11-6)) ^ (e >> (25-6))
208 vpslld $(32-18), XTMP1, XTMP1
209 xor a, y1 # y1 = a ^ (a >> (13-2)) ^ (a >> (22-2))
210 xor g, y2 # y2 = CH = ((f^g)&e)^g
212 add y0, y2 # y2 = S1 + CH
213 add (1*4 + _XFER)(%rsp), y2 # y2 = k + w + S1 + CH
215 vpxor XTMP2, XTMP3, XTMP3 # XTMP1 = W[-15] MY_ROR 7 ^ W[-15] MY_ROR
217 add y2, h # h = h + S1 + CH + k + w
221 add h, d # d = d + h + S1 + CH + k + w
224 vpshufd $0b11111010, X3, XTMP2 # XTMP2 = W[-2] {BBAA}
226 add y1, h # h = h + S1 + CH + k + w + S0
227 vpaddd XTMP1, XTMP0, XTMP0 # XTMP0 = W[-16] + W[-7] + s0
229 add y0, h # h = h + S1 + CH + k + w + S0 + MAJ
233 MY_ROR (25-11), y0 # y0 = e >> (25-11)
234 xor e, y0 # y0 = e ^ (e >> (25-11))
235 MY_ROR (22-13), y1 # y1 = a >> (22-13)
237 xor a, y1 # y1 = a ^ (a >> (22-13)
238 MY_ROR (11-6), y0 # y0 = (e >> (11-6)) ^ (e >> (25-6))
239 vpsrld $10, XTMP2, XTMP4 # XTMP4 = W[-2] >> 10 {BBAA}
241 vpsrlq $19, XTMP2, XTMP3 # XTMP3 = W[-2] MY_ROR 19 {xBxA}
242 xor e, y0 # y0 = e ^ (e >> (11-6)) ^ (e >> (25-6))
244 vpsrlq $17, XTMP2, XTMP2 # XTMP2 = W[-2] MY_ROR 17 {xBxA}
245 MY_ROR (13-2), y1 # y1 = (a >> (13-2)) ^ (a >> (22-2))
246 xor a, y1 # y1 = a ^ (a >> (13-2)) ^ (a >> (22-2))
247 xor g, y2 # y2 = CH = ((f^g)&e)^g
250 add y0, y2 # y2 = S1 + CH
252 add (2*4 + _XFER)(%rsp), y2 # y2 = k + w + S1 + CH
255 add y2, h # h = h + S1 + CH + k + w
259 add h, d # d = d + h + S1 + CH + k + w
263 add y1, h # h = h + S1 + CH + k + w + S0
265 vpshufd $0b01010000, XTMP0, XTMP2 # XTMP2 = W[-2] {DDCC}
267 add y0, h # h = h + S1 + CH + k + w + S0 + MAJ
270 MY_ROR (25-11), y0 # y0 = e >> (25-11)
272 MY_ROR (22-13), y1 # y1 = a >> (22-13)
273 xor e, y0 # y0 = e ^ (e >> (25-11))
275 MY_ROR (11-6), y0 # y0 = (e >> (11-6)) ^ (e >> (25-6))
276 vpsrld $10, XTMP2, XTMP5 # XTMP5 = W[-2] >> 10 {DDCC}
277 xor a, y1 # y1 = a ^ (a >> (22-13)
279 vpsrlq $19, XTMP2, XTMP3 # XTMP3 = W[-2] MY_ROR 19 {xDxC}
280 xor e, y0 # y0 = e ^ (e >> (11-6)) ^ (e >> (25-6))
282 MY_ROR (13-2), y1 # y1 = (a >> (13-2)) ^ (a >> (22-2))
283 vpsrlq $17, XTMP2, XTMP2 # XTMP2 = W[-2] MY_ROR 17 {xDxC}
284 xor a, y1 # y1 = a ^ (a >> (13-2)) ^ (a >> (22-2))
286 xor g, y2 # y2 = CH = ((f^g)&e)^g
289 add y0, y2 # y2 = S1 + CH
290 add (3*4 + _XFER)(%rsp), y2 # y2 = k + w + S1 + CH
293 add y2, h # h = h + S1 + CH + k + w
297 add h, d # d = d + h + S1 + CH + k + w
301 add y1, h # h = h + S1 + CH + k + w + S0
303 add y0, h # h = h + S1 + CH + k + w + S0 + MAJ
308 ## input is [rsp + _XFER + %1 * 4]
311 MY_ROR (25-11), y0 # y0 = e >> (25-11)
313 xor e, y0 # y0 = e ^ (e >> (25-11))
314 MY_ROR (22-13), y1 # y1 = a >> (22-13)
316 xor a, y1 # y1 = a ^ (a >> (22-13)
317 MY_ROR (11-6), y0 # y0 = (e >> (11-6)) ^ (e >> (25-6))
319 xor e, y0 # y0 = e ^ (e >> (11-6)) ^ (e >> (25-6))
320 MY_ROR (13-2), y1 # y1 = (a >> (13-2)) ^ (a >> (22-2))
322 xor a, y1 # y1 = a ^ (a >> (13-2)) ^ (a >> (22-2))
324 xor g, y2 # y2 = CH = ((f^g)&e)^g
325 add y0, y2 # y2 = S1 + CH
327 offset = \round * 4 + _XFER #
328 add offset(%rsp), y2 # y2 = k + w + S1 + CH
330 add y2, h # h = h + S1 + CH + k + w
333 add h, d # d = d + h + S1 + CH + k + w
336 add y1, h # h = h + S1 + CH + k + w + S0
338 add y0, h # h = h + S1 + CH + k + w + S0 + MAJ
368 mov 4*0(CTX), a
369 mov 4*1(CTX), b
370 mov 4*2(CTX), c
371 mov 4*3(CTX), d
372 mov 4*4(CTX), e
373 mov 4*5(CTX), f
374 mov 4*6(CTX), g
375 mov 4*7(CTX), h
409 add $4*16, TBL
438 addm (4*0)(CTX),a
439 addm (4*1)(CTX),b
440 addm (4*2)(CTX),c
441 addm (4*3)(CTX),d
442 addm (4*4)(CTX),e
443 addm (4*5)(CTX),f
444 addm (4*6)(CTX),g
445 addm (4*7)(CTX),h
491 # shuffle xBxA -> 00BA
497 # shuffle xDxC -> DC00