Lines Matching +full:4 +full:- +full:ch
2 # Implement fast SHA-256 with SSSE3 instructions. (x86_64)
21 # - Redistributions of source code must retain the above
25 # - Redistributions in binary form must reproduce the above
41 # This code is described in an Intel White-Paper:
42 # "Fast SHA-256 Implementations on Intel Architecture Processors"
57 # Add reg to mem using reg-mem add and store
86 SHUF_00BA = %xmm10 # shuffle xBxA -> 00BA
87 SHUF_DC00 = %xmm11 # shuffle xDxC -> DC00
149 ## compute W[-16] + W[-7] 4 at a time
152 ror $(25-11), y0 # y0 = e >> (25-11)
154 palignr $4, X2, XTMP0 # XTMP0 = W[-7]
155 ror $(22-13), y1 # y1 = a >> (22-13)
156 xor e, y0 # y0 = e ^ (e >> (25-11))
158 ror $(11-6), y0 # y0 = (e >> (11-6)) ^ (e >> (25-6))
160 xor a, y1 # y1 = a ^ (a >> (22-13)
162 paddd X0, XTMP0 # XTMP0 = W[-7] + W[-16]
163 xor e, y0 # y0 = e ^ (e >> (11-6)) ^ (e >> (25-6))
165 ror $(13-2), y1 # y1 = (a >> (13-2)) ^ (a >> (22-2))
167 palignr $4, X0, XTMP1 # XTMP1 = W[-15]
168 xor a, y1 # y1 = a ^ (a >> (13-2)) ^ (a >> (22-2))
170 xor g, y2 # y2 = CH = ((f^g)&e)^g
171 movdqa XTMP1, XTMP2 # XTMP2 = W[-15]
173 add y0, y2 # y2 = S1 + CH
174 add _XFER(%rsp) , y2 # y2 = k + w + S1 + CH
175 movdqa XTMP1, XTMP3 # XTMP3 = W[-15]
177 add y2, h # h = h + S1 + CH + k + w
179 pslld $(32-7), XTMP1 #
181 add h, d # d = d + h + S1 + CH + k + w
185 add y1, h # h = h + S1 + CH + k + w + S0
186 por XTMP2, XTMP1 # XTMP1 = W[-15] ror 7
188 add y0, h # h = h + S1 + CH + k + w + S0 + MAJ
191 movdqa XTMP3, XTMP2 # XTMP2 = W[-15]
194 movdqa XTMP3, XTMP4 # XTMP4 = W[-15]
195 ror $(25-11), y0 # y0 = e >> (25-11)
196 xor e, y0 # y0 = e ^ (e >> (25-11))
198 ror $(22-13), y1 # y1 = a >> (22-13)
199 pslld $(32-18), XTMP3 #
200 xor a, y1 # y1 = a ^ (a >> (22-13)
201 ror $(11-6), y0 # y0 = (e >> (11-6)) ^ (e >> (25-6))
204 ror $(13-2), y1 # y1 = (a >> (13-2)) ^ (a >> (22-2))
205 xor e, y0 # y0 = e ^ (e >> (11-6)) ^ (e >> (25-6))
209 xor a, y1 # y1 = a ^ (a >> (13-2)) ^ (a >> (22-2))
210 xor g, y2 # y2 = CH = ((f^g)&e)^g
211 psrld $3, XTMP4 # XTMP4 = W[-15] >> 3
212 add y0, y2 # y2 = S1 + CH
213 add (1*4 + _XFER)(%rsp), y2 # y2 = k + w + S1 + CH
215 pxor XTMP2, XTMP1 # XTMP1 = W[-15] ror 7 ^ W[-15] ror 18
217 add y2, h # h = h + S1 + CH + k + w
221 add h, d # d = d + h + S1 + CH + k + w
224 pshufd $0b11111010, X3, XTMP2 # XTMP2 = W[-2] {BBAA}
226 add y1, h # h = h + S1 + CH + k + w + S0
227 paddd XTMP1, XTMP0 # XTMP0 = W[-16] + W[-7] + s0
229 add y0, h # h = h + S1 + CH + k + w + S0 + MAJ
232 movdqa XTMP2, XTMP3 # XTMP3 = W[-2] {BBAA}
235 ror $(25-11), y0 # y0 = e >> (25-11)
236 movdqa XTMP2, XTMP4 # XTMP4 = W[-2] {BBAA}
237 xor e, y0 # y0 = e ^ (e >> (25-11))
238 ror $(22-13), y1 # y1 = a >> (22-13)
240 xor a, y1 # y1 = a ^ (a >> (22-13)
241 ror $(11-6), y0 # y0 = (e >> (11-6)) ^ (e >> (25-6))
242 psrlq $17, XTMP2 # XTMP2 = W[-2] ror 17 {xBxA}
244 psrlq $19, XTMP3 # XTMP3 = W[-2] ror 19 {xBxA}
245 xor e, y0 # y0 = e ^ (e >> (11-6)) ^ (e >> (25-6))
247 psrld $10, XTMP4 # XTMP4 = W[-2] >> 10 {BBAA}
248 ror $(13-2), y1 # y1 = (a >> (13-2)) ^ (a >> (22-2))
249 xor a, y1 # y1 = a ^ (a >> (13-2)) ^ (a >> (22-2))
250 xor g, y2 # y2 = CH = ((f^g)&e)^g
253 add y0, y2 # y2 = S1 + CH
255 add (2*4 + _XFER)(%rsp), y2 # y2 = k + w + S1 + CH
258 add y2, h # h = h + S1 + CH + k + w
262 add h, d # d = d + h + S1 + CH + k + w
266 add y1, h # h = h + S1 + CH + k + w + S0
268 pshufd $0b01010000, XTMP0, XTMP2 # XTMP2 = W[-2] {BBAA}
270 add y0, h # h = h + S1 + CH + k + w + S0 + MAJ
273 movdqa XTMP2, XTMP3 # XTMP3 = W[-2] {DDCC}
275 ror $(25-11), y0 # y0 = e >> (25-11)
277 movdqa XTMP2, X0 # X0 = W[-2] {DDCC}
278 ror $(22-13), y1 # y1 = a >> (22-13)
279 xor e, y0 # y0 = e ^ (e >> (25-11))
281 ror $(11-6), y0 # y0 = (e >> (11-6)) ^ (e >> (25-6))
282 psrlq $17, XTMP2 # XTMP2 = W[-2] ror 17 {xDxC}
283 xor a, y1 # y1 = a ^ (a >> (22-13)
285 psrlq $19, XTMP3 # XTMP3 = W[-2] ror 19 {xDxC}
286 xor e, y0 # y0 = e ^ (e >> (11-6)) ^ (e >> (25
288 ror $(13-2), y1 # y1 = (a >> (13-2)) ^ (a >> (22-2))
289 psrld $10, X0 # X0 = W[-2] >> 10 {DDCC}
290 xor a, y1 # y1 = a ^ (a >> (13-2)) ^ (a >> (22
292 xor g, y2 # y2 = CH = ((f^g)&e)^g
295 add y0, y2 # y2 = S1 + CH
296 add (3*4 + _XFER)(%rsp), y2 # y2 = k + w + S1 + CH
299 add y2, h # h = h + S1 + CH + k + w
303 add h, d # d = d + h + S1 + CH + k + w
307 add y1, h # h = h + S1 + CH + k + w + S0
309 add y0, h # h = h + S1 + CH + k + w + S0 + MAJ
315 ## input is [rsp + _XFER + %1 * 4]
318 ror $(25-11), y0 # y0 = e >> (25-11)
320 xor e, y0 # y0 = e ^ (e >> (25-11))
321 ror $(22-13), y1 # y1 = a >> (22-13)
323 xor a, y1 # y1 = a ^ (a >> (22-13)
324 ror $(11-6), y0 # y0 = (e >> (11-6)) ^ (e >> (25-6))
326 xor e, y0 # y0 = e ^ (e >> (11-6)) ^ (e >> (25-6))
327 ror $(13-2), y1 # y1 = (a >> (13-2)) ^ (a >> (22-2))
329 xor a, y1 # y1 = a ^ (a >> (13-2)) ^ (a >> (22-2))
331 xor g, y2 # y2 = CH = ((f^g)&e)^g
332 add y0, y2 # y2 = S1 + CH
334 offset = \round * 4 + _XFER
335 add offset(%rsp), y2 # y2 = k + w + S1 + CH
337 add y2, h # h = h + S1 + CH + k + w
340 add h, d # d = d + h + S1 + CH + k + w
343 add y1, h # h = h + S1 + CH + k + w + S0
345 add y0, h # h = h + S1 + CH + k + w + S0 + MAJ
377 mov 4*0(CTX), a
378 mov 4*1(CTX), b
379 mov 4*2(CTX), c
380 mov 4*3(CTX), d
381 mov 4*4(CTX), e
382 mov 4*5(CTX), f
383 mov 4*6(CTX), g
384 mov 4*7(CTX), h
423 add $4*16, TBL
451 addm (4*0)(CTX),a
452 addm (4*1)(CTX),b
453 addm (4*2)(CTX),c
454 addm (4*3)(CTX),d
455 addm (4*4)(CTX),e
456 addm (4*5)(CTX),f
457 addm (4*6)(CTX),g
458 addm (4*7)(CTX),h
505 # shuffle xBxA -> 00BA
511 # shuffle xDxC -> DC00