Lines Matching +full:6 +full:e +full:- +full:7
2 # Implement fast SHA-256 with AVX2 instructions. (x86_64)
21 # - Redistributions of source code must retain the above
25 # - Redistributions in binary form must reproduce the above
41 # This code is described in an Intel White-Paper:
42 # "Fast SHA-256 Implementations on Intel Architecture Processors"
59 # Add reg to mem using reg-mem add and store
86 SHUF_00BA = %ymm10 # shuffle xBxA -> 00BA
87 SHUF_DC00 = %ymm12 # shuffle xDxC -> DC00
97 e = %edx # clobbers NUM_BLKS define
145 f = e
146 e = d define
157 rorx $25, e, y0 # y0 = e >> 25 # S1A
158 rorx $11, e, y1 # y1 = e >> 11 # S1B
160 addl \disp(%rsp, SRND), h # h = k + w + h # --
162 vpalignr $4, X2, X3, XTMP0 # XTMP0 = W[-7]
166 xor y1, y0 # y0 = (e>>25) ^ (e>>11) # S1
168 vpaddd X0, XTMP0, XTMP0 # XTMP0 = W[-7] + W[-16]# y1 = (e >> 6)# S1
169 rorx $6, e, y1 # y1 = (e >> 6) # S1
171 and e, y2 # y2 = (f^g)&e # CH
172 xor y1, y0 # y0 = (e>>25) ^ (e>>11) ^ (e>>6) # S1
174 add h, d # d = k + w + h + d # --
177 vpalignr $4, X0, X1, XTMP1 # XTMP1 = W[-15]
181 xor g, y2 # y2 = CH = ((f^g)&e)^g # CH
182 vpsrld $7, XTMP1, XTMP2
187 add y0, y2 # y2 = S1 + CH # --
188 vpslld $(32-7), XTMP1, XTMP3
190 add y1, h # h = k + w + h + S0 # --
192 add y2, d # d = k + w + h + d + S1 + CH = d + t1 # --
193 vpor XTMP2, XTMP3, XTMP3 # XTMP3 = W[-15] ror 7
196 add y2, h # h = k + w + h + S0 + S1 + CH = t1 + S0# --
197 add y3, h # h = t1 + S0 + MAJ # --
205 rorx $25, e, y0 # y0 = e >> 25 # S1A
206 rorx $11, e, y1 # y1 = e >> 11 # S1B
208 addl offset(%rsp, SRND), h # h = k + w + h # --
212 vpsrld $3, XTMP1, XTMP4 # XTMP4 = W[-15] >> 3
215 xor y1, y0 # y0 = (e>>25) ^ (e>>11) # S1
219 rorx $6, e, y1 # y1 = (e >> 6) # S1
220 xor y1, y0 # y0 = (e>>25) ^ (e>>11) ^ (e>>6) # S1
222 and e, y2 # y2 = (f^g)&e # CH
223 add h, d # d = k + w + h + d # --
225 vpslld $(32-18), XTMP1, XTMP1
231 xor g, y2 # y2 = CH = ((f^g)&e)^g # CH
233 vpxor XTMP2, XTMP3, XTMP3 # XTMP3 = W[-15] ror 7 ^ W[-15] ror 18
237 add y0, y2 # y2 = S1 + CH # --
240 vpshufd $0b11111010, X3, XTMP2 # XTMP2 = W[-2] {BBAA}
242 add y1, h # h = k + w + h + S0 # --
244 vpaddd XTMP1, XTMP0, XTMP0 # XTMP0 = W[-16] + W[-7] + s0
245 add y2, d # d = k + w + h + d + S1 + CH = d + t1 # --
246 add y2, h # h = k + w + h + S0 + S1 + CH = t1 + S0# --
247 add y3, h # h = t1 + S0 + MAJ # --
249 vpsrld $10, XTMP2, XTMP4 # XTMP4 = W[-2] >> 10 {BBAA}
257 rorx $25, e, y0 # y0 = e >> 25 # S1A
259 addl offset(%rsp, SRND), h # h = k + w + h # --
261 vpsrlq $19, XTMP2, XTMP3 # XTMP3 = W[-2] ror 19 {xBxA}
262 rorx $11, e, y1 # y1 = e >> 11 # S1B
268 xor y1, y0 # y0 = (e>>25) ^ (e>>11) # S1
269 vpsrlq $17, XTMP2, XTMP2 # XTMP2 = W[-2] ror 17 {xBxA}
270 and e, y2 # y2 = (f^g)&e # CH
272 rorx $6, e, y1 # y1 = (e >> 6) # S1
274 add h, d # d = k + w + h + d # --
277 xor y1, y0 # y0 = (e>>25) ^ (e>>11) ^ (e>>6) # S1
280 xor g, y2 # y2 = CH = ((f^g)&e)^g # CH
290 add y0, y2 # y2 = S1 + CH # --
291 vpshufd $0b01010000, XTMP0, XTMP2 # XTMP2 = W[-2] {DDCC}
294 add y1,h # h = k + w + h + S0 # --
295 add y2,d # d = k + w + h + d + S1 + CH = d + t1 # --
296 add y2,h # h = k + w + h + S0 + S1 + CH = t1 + S0# --
298 add y3,h # h = t1 + S0 + MAJ # --
306 rorx $25, e, y0 # y0 = e >> 25 # S1A
307 rorx $11, e, y1 # y1 = e >> 11 # S1B
309 addl offset(%rsp, SRND), h # h = k + w + h # --
313 vpsrld $10, XTMP2, XTMP5 # XTMP5 = W[-2] >> 10 {DDCC}
316 xor y1, y0 # y0 = (e>>25) ^ (e>>11) # S1
320 vpsrlq $19, XTMP2, XTMP3 # XTMP3 = W[-2] ror 19 {xDxC}
321 rorx $6, e, y1 # y1 = (e >> 6) # S1
322 and e, y2 # y2 = (f^g)&e # CH
323 add h, d # d = k + w + h + d # --
326 vpsrlq $17, XTMP2, XTMP2 # XTMP2 = W[-2] ror 17 {xDxC}
327 xor y1, y0 # y0 = (e>>25) ^ (e>>11) ^ (e>>6) # S1
328 xor g, y2 # y2 = CH = ((f^g)&e)^g # CH
332 add y0, y2 # y2 = S1 + CH # --
336 add y2, d # d = k + w + h + d + S1 + CH = d + t1 # --
347 add y1, h # h = k + w + h + S0 # --
348 add y2, h # h = k + w + h + S0 + S1 + CH = t1 + S0# --
349 add y3, h # h = t1 + S0 + MAJ # --
359 rorx $25, e, y0 # y0 = e >> 25 # S1A
360 rorx $11, e, y1 # y1 = e >> 11 # S1B
363 xor y1, y0 # y0 = (e>>25) ^ (e>>11) # S1
364 rorx $6, e, y1 # y1 = (e >> 6) # S1
365 and e, y2 # y2 = (f^g)&e # CH
367 xor y1, y0 # y0 = (e>>25) ^ (e>>11) ^ (e>>6) # S1
369 xor g, y2 # y2 = CH = ((f^g)&e)^g # CH
375 addl \disp(%rsp, SRND), h # h = k + w + h # --
382 add y0, y2 # y2 = S1 + CH # --
385 add h, d # d = k + w + h + d # --
387 add y1, h # h = k + w + h + S0 # --
388 add y2, d # d = k + w + h + d + S1 + CH = d + t1 # --
394 add y2, old_h # h = k + w + h + S0 + S1 + CH = t1 + S0# --
396 rorx $25, e, y0 # y0 = e >> 25 # S1A
397 rorx $11, e, y1 # y1 = e >> 11 # S1B
400 xor y1, y0 # y0 = (e>>25) ^ (e>>11) # S1
401 rorx $6, e, y1 # y1 = (e >> 6) # S1
402 and e, y2 # y2 = (f^g)&e # CH
403 add y3, old_h # h = t1 + S0 + MAJ # --
405 xor y1, y0 # y0 = (e>>25) ^ (e>>11) ^ (e>>6) # S1
407 xor g, y2 # y2 = CH = ((f^g)&e)^g # CH
414 addl offset(%rsp, SRND), h # h = k + w + h # --
421 add y0, y2 # y2 = S1 + CH # --
424 add h, d # d = k + w + h + d # --
426 add y1, h # h = k + w + h + S0 # --
428 add y2, d # d = k + w + h + d + S1 + CH = d + t1 # --
434 add y2, old_h # h = k + w + h + S0 + S1 + CH = t1 + S0# --
436 rorx $25, e, y0 # y0 = e >> 25 # S1A
437 rorx $11, e, y1 # y1 = e >> 11 # S1B
440 xor y1, y0 # y0 = (e>>25) ^ (e>>11) # S1
441 rorx $6, e, y1 # y1 = (e >> 6) # S1
442 and e, y2 # y2 = (f^g)&e # CH
443 add y3, old_h # h = t1 + S0 + MAJ # --
445 xor y1, y0 # y0 = (e>>25) ^ (e>>11) ^ (e>>6) # S1
447 xor g, y2 # y2 = CH = ((f^g)&e)^g # CH
454 addl offset(%rsp, SRND), h # h = k + w + h # --
461 add y0, y2 # y2 = S1 + CH # --
464 add h, d # d = k + w + h + d # --
466 add y1, h # h = k + w + h + S0 # --
468 add y2, d # d = k + w + h + d + S1 + CH = d + t1 # --
474 add y2, old_h # h = k + w + h + S0 + S1 + CH = t1 + S0# --
476 rorx $25, e, y0 # y0 = e >> 25 # S1A
477 rorx $11, e, y1 # y1 = e >> 11 # S1B
480 xor y1, y0 # y0 = (e>>25) ^ (e>>11) # S1
481 rorx $6, e, y1 # y1 = (e >> 6) # S1
482 and e, y2 # y2 = (f^g)&e # CH
483 add y3, old_h # h = t1 + S0 + MAJ # --
485 xor y1, y0 # y0 = (e>>25) ^ (e>>11) ^ (e>>6) # S1
487 xor g, y2 # y2 = CH = ((f^g)&e)^g # CH
494 addl offset(%rsp, SRND), h # h = k + w + h # --
501 add y0, y2 # y2 = S1 + CH # --
504 add h, d # d = k + w + h + d # --
506 add y1, h # h = k + w + h + S0 # --
508 add y2, d # d = k + w + h + d + S1 + CH = d + t1 # --
511 add y2, h # h = k + w + h + S0 + S1 + CH = t1 + S0# --
513 add y3, h # h = t1 + S0 + MAJ # --
538 and $-32, %rsp # align rsp to 32 byte boundary
540 shl $6, NUM_BLKS # convert to bytes
542 lea -64(INP, NUM_BLKS), NUM_BLKS # pointer to last block
553 mov 4*4(CTX), e
555 mov 4*6(CTX), g
556 mov 4*7(CTX), h
636 addm (4*4)(CTX),e
638 addm (4*6)(CTX),g
639 addm (4*7)(CTX),h
662 addm (4*4)(CTX),e
664 addm (4*6)(CTX),g
665 addm (4*7)(CTX),h
691 mov (4*4)(CTX),e
693 mov (4*6)(CTX),g
694 mov (4*7)(CTX),h
757 # shuffle xBxA -> 00BA
763 # shuffle xDxC -> DC00