Lines Matching +full:4 +full:- +full:ch

2 # Implement fast SHA-256 with AVX1 instructions. (x86_64)
21 # - Redistributions of source code must retain the above
25 # - Redistributions in binary form must reproduce the above
40 # This code is described in an Intel White-Paper:
41 # "Fast SHA-256 Implementations on Intel Architecture Processors"
47 # This code schedules 1 block at a time, with 4 lanes per block
59 # Add reg to mem using reg-mem add and store
67 shld $(32-(\p1)), \p2, \p2
94 SHUF_00BA = %xmm10 # shuffle xBxA -> 00BA
95 SHUF_DC00 = %xmm12 # shuffle xDxC -> DC00
156 ## compute W[-16] + W[-7] 4 at a time
159 MY_ROR (25-11), y0 # y0 = e >> (25-11)
161 vpalignr $4, X2, X3, XTMP0 # XTMP0 = W[-7]
162 MY_ROR (22-13), y1 # y1 = a >> (22-13)
163 xor e, y0 # y0 = e ^ (e >> (25-11))
165 MY_ROR (11-6), y0 # y0 = (e >> (11-6)) ^ (e >> (25-6))
166 xor a, y1 # y1 = a ^ (a >> (22-13)
168 vpaddd X0, XTMP0, XTMP0 # XTMP0 = W[-7] + W[-16]
169 xor e, y0 # y0 = e ^ (e >> (11-6)) ^ (e >> (25-6))
171 MY_ROR (13-2), y1 # y1 = (a >> (13-2)) ^ (a >> (22-2))
173 vpalignr $4, X0, X1, XTMP1 # XTMP1 = W[-15]
174 xor a, y1 # y1 = a ^ (a >> (13-2)) ^ (a >> (22-2))
176 xor g, y2 # y2 = CH = ((f^g)&e)^g
178 add y0, y2 # y2 = S1 + CH
179 add _XFER(%rsp), y2 # y2 = k + w + S1 + CH
181 add y2, h # h = h + S1 + CH + k + w
185 add h, d # d = d + h + S1 + CH + k + w
187 vpslld $(32-7), XTMP1, XTMP3
189 add y1, h # h = h + S1 + CH + k + w + S0
190 vpor XTMP2, XTMP3, XTMP3 # XTMP1 = W[-15] MY_ROR 7
192 add y0, h # h = h + S1 + CH + k + w + S0 + MAJ
196 MY_ROR (25-11), y0 # y0 = e >> (25-11)
197 xor e, y0 # y0 = e ^ (e >> (25-11))
199 MY_ROR (22-13), y1 # y1 = a >> (22-13)
201 xor a, y1 # y1 = a ^ (a >> (22-13)
202 MY_ROR (11-6), y0 # y0 = (e >> (11-6)) ^ (e >> (25-6))
204 vpsrld $3, XTMP1, XTMP4 # XTMP4 = W[-15] >> 3
205 MY_ROR (13-2), y1 # y1 = (a >> (13-2)) ^ (a >> (22-2))
206 xor e, y0 # y0 = e ^ (e >> (11-6)) ^ (e >> (25-6))
209 vpslld $(32-18), XTMP1, XTMP1
210 xor a, y1 # y1 = a ^ (a >> (13-2)) ^ (a >> (22-2))
211 xor g, y2 # y2 = CH = ((f^g)&e)^g
213 add y0, y2 # y2 = S1 + CH
214 add (1*4 + _XFER)(%rsp), y2 # y2 = k + w + S1 + CH
216 vpxor XTMP2, XTMP3, XTMP3 # XTMP1 = W[-15] MY_ROR 7 ^ W[-15] MY_ROR
218 add y2, h # h = h + S1 + CH + k + w
222 add h, d # d = d + h + S1 + CH + k + w
225 vpshufd $0b11111010, X3, XTMP2 # XTMP2 = W[-2] {BBAA}
227 add y1, h # h = h + S1 + CH + k + w + S0
228 vpaddd XTMP1, XTMP0, XTMP0 # XTMP0 = W[-16] + W[-7] + s0
230 add y0, h # h = h + S1 + CH + k + w + S0 + MAJ
234 MY_ROR (25-11), y0 # y0 = e >> (25-11)
235 xor e, y0 # y0 = e ^ (e >> (25-11))
236 MY_ROR (22-13), y1 # y1 = a >> (22-13)
238 xor a, y1 # y1 = a ^ (a >> (22-13)
239 MY_ROR (11-6), y0 # y0 = (e >> (11-6)) ^ (e >> (25-6))
240 vpsrld $10, XTMP2, XTMP4 # XTMP4 = W[-2] >> 10 {BBAA}
242 vpsrlq $19, XTMP2, XTMP3 # XTMP3 = W[-2] MY_ROR 19 {xBxA}
243 xor e, y0 # y0 = e ^ (e >> (11-6)) ^ (e >> (25-6))
245 vpsrlq $17, XTMP2, XTMP2 # XTMP2 = W[-2] MY_ROR 17 {xBxA}
246 MY_ROR (13-2), y1 # y1 = (a >> (13-2)) ^ (a >> (22-2))
247 xor a, y1 # y1 = a ^ (a >> (13-2)) ^ (a >> (22-2))
248 xor g, y2 # y2 = CH = ((f^g)&e)^g
251 add y0, y2 # y2 = S1 + CH
253 add (2*4 + _XFER)(%rsp), y2 # y2 = k + w + S1 + CH
256 add y2, h # h = h + S1 + CH + k + w
260 add h, d # d = d + h + S1 + CH + k + w
264 add y1, h # h = h + S1 + CH + k + w + S0
266 vpshufd $0b01010000, XTMP0, XTMP2 # XTMP2 = W[-2] {DDCC}
268 add y0, h # h = h + S1 + CH + k + w + S0 + MAJ
271 MY_ROR (25-11), y0 # y0 = e >> (25-11)
273 MY_ROR (22-13), y1 # y1 = a >> (22-13)
274 xor e, y0 # y0 = e ^ (e >> (25-11))
276 MY_ROR (11-6), y0 # y0 = (e >> (11-6)) ^ (e >> (25-6))
277 vpsrld $10, XTMP2, XTMP5 # XTMP5 = W[-2] >> 10 {DDCC}
278 xor a, y1 # y1 = a ^ (a >> (22-13)
280 vpsrlq $19, XTMP2, XTMP3 # XTMP3 = W[-2] MY_ROR 19 {xDxC}
281 xor e, y0 # y0 = e ^ (e >> (11-6)) ^ (e >> (25-6))
283 MY_ROR (13-2), y1 # y1 = (a >> (13-2)) ^ (a >> (22-2))
284 vpsrlq $17, XTMP2, XTMP2 # XTMP2 = W[-2] MY_ROR 17 {xDxC}
285 xor a, y1 # y1 = a ^ (a >> (13-2)) ^ (a >> (22-2))
287 xor g, y2 # y2 = CH = ((f^g)&e)^g
290 add y0, y2 # y2 = S1 + CH
291 add (3*4 + _XFER)(%rsp), y2 # y2 = k + w + S1 + CH
294 add y2, h # h = h + S1 + CH + k + w
298 add h, d # d = d + h + S1 + CH + k + w
302 add y1, h # h = h + S1 + CH + k + w + S0
304 add y0, h # h = h + S1 + CH + k + w + S0 + MAJ
309 ## input is [rsp + _XFER + %1 * 4]
312 MY_ROR (25-11), y0 # y0 = e >> (25-11)
314 xor e, y0 # y0 = e ^ (e >> (25-11))
315 MY_ROR (22-13), y1 # y1 = a >> (22-13)
317 xor a, y1 # y1 = a ^ (a >> (22-13)
318 MY_ROR (11-6), y0 # y0 = (e >> (11-6)) ^ (e >> (25-6))
320 xor e, y0 # y0 = e ^ (e >> (11-6)) ^ (e >> (25-6))
321 MY_ROR (13-2), y1 # y1 = (a >> (13-2)) ^ (a >> (22-2))
323 xor a, y1 # y1 = a ^ (a >> (13-2)) ^ (a >> (22-2))
325 xor g, y2 # y2 = CH = ((f^g)&e)^g
326 add y0, y2 # y2 = S1 + CH
328 offset = \round * 4 + _XFER #
329 add offset(%rsp), y2 # y2 = k + w + S1 + CH
331 add y2, h # h = h + S1 + CH + k + w
334 add h, d # d = d + h + S1 + CH + k + w
337 add y1, h # h = h + S1 + CH + k + w + S0
339 add y0, h # h = h + S1 + CH + k + w + S0 + MAJ
368 mov 4*0(CTX), a
369 mov 4*1(CTX), b
370 mov 4*2(CTX), c
371 mov 4*3(CTX), d
372 mov 4*4(CTX), e
373 mov 4*5(CTX), f
374 mov 4*6(CTX), g
375 mov 4*7(CTX), h
409 add $4*16, TBL
438 addm (4*0)(CTX),a
439 addm (4*1)(CTX),b
440 addm (4*2)(CTX),c
441 addm (4*3)(CTX),d
442 addm (4*4)(CTX),e
443 addm (4*5)(CTX),f
444 addm (4*6)(CTX),g
445 addm (4*7)(CTX),h
491 # shuffle xBxA -> 00BA
497 # shuffle xDxC -> DC00