Lines Matching +full:4 +full:- +full:ch

2 # Implement fast SHA-256 with SSSE3 instructions. (x86_64)
21 # - Redistributions of source code must retain the above
25 # - Redistributions in binary form must reproduce the above
41 # This code is described in an Intel White-Paper:
42 # "Fast SHA-256 Implementations on Intel Architecture Processors"
58 # Add reg to mem using reg-mem add and store
87 SHUF_00BA = %xmm10 # shuffle xBxA -> 00BA
88 SHUF_DC00 = %xmm11 # shuffle xDxC -> DC00
150 ## compute W[-16] + W[-7] 4 at a time
153 ror $(25-11), y0 # y0 = e >> (25-11)
155 palignr $4, X2, XTMP0 # XTMP0 = W[-7]
156 ror $(22-13), y1 # y1 = a >> (22-13)
157 xor e, y0 # y0 = e ^ (e >> (25-11))
159 ror $(11-6), y0 # y0 = (e >> (11-6)) ^ (e >> (25-6))
161 xor a, y1 # y1 = a ^ (a >> (22-13)
163 paddd X0, XTMP0 # XTMP0 = W[-7] + W[-16]
164 xor e, y0 # y0 = e ^ (e >> (11-6)) ^ (e >> (25-6))
166 ror $(13-2), y1 # y1 = (a >> (13-2)) ^ (a >> (22-2))
168 palignr $4, X0, XTMP1 # XTMP1 = W[-15]
169 xor a, y1 # y1 = a ^ (a >> (13-2)) ^ (a >> (22-2))
171 xor g, y2 # y2 = CH = ((f^g)&e)^g
172 movdqa XTMP1, XTMP2 # XTMP2 = W[-15]
174 add y0, y2 # y2 = S1 + CH
175 add _XFER(%rsp) , y2 # y2 = k + w + S1 + CH
176 movdqa XTMP1, XTMP3 # XTMP3 = W[-15]
178 add y2, h # h = h + S1 + CH + k + w
180 pslld $(32-7), XTMP1 #
182 add h, d # d = d + h + S1 + CH + k + w
186 add y1, h # h = h + S1 + CH + k + w + S0
187 por XTMP2, XTMP1 # XTMP1 = W[-15] ror 7
189 add y0, h # h = h + S1 + CH + k + w + S0 + MAJ
192 movdqa XTMP3, XTMP2 # XTMP2 = W[-15]
195 movdqa XTMP3, XTMP4 # XTMP4 = W[-15]
196 ror $(25-11), y0 # y0 = e >> (25-11)
197 xor e, y0 # y0 = e ^ (e >> (25-11))
199 ror $(22-13), y1 # y1 = a >> (22-13)
200 pslld $(32-18), XTMP3 #
201 xor a, y1 # y1 = a ^ (a >> (22-13)
202 ror $(11-6), y0 # y0 = (e >> (11-6)) ^ (e >> (25-6))
205 ror $(13-2), y1 # y1 = (a >> (13-2)) ^ (a >> (22-2))
206 xor e, y0 # y0 = e ^ (e >> (11-6)) ^ (e >> (25-6))
210 xor a, y1 # y1 = a ^ (a >> (13-2)) ^ (a >> (22-2))
211 xor g, y2 # y2 = CH = ((f^g)&e)^g
212 psrld $3, XTMP4 # XTMP4 = W[-15] >> 3
213 add y0, y2 # y2 = S1 + CH
214 add (1*4 + _XFER)(%rsp), y2 # y2 = k + w + S1 + CH
216 pxor XTMP2, XTMP1 # XTMP1 = W[-15] ror 7 ^ W[-15] ror 18
218 add y2, h # h = h + S1 + CH + k + w
222 add h, d # d = d + h + S1 + CH + k + w
225 pshufd $0b11111010, X3, XTMP2 # XTMP2 = W[-2] {BBAA}
227 add y1, h # h = h + S1 + CH + k + w + S0
228 paddd XTMP1, XTMP0 # XTMP0 = W[-16] + W[-7] + s0
230 add y0, h # h = h + S1 + CH + k + w + S0 + MAJ
233 movdqa XTMP2, XTMP3 # XTMP3 = W[-2] {BBAA}
236 ror $(25-11), y0 # y0 = e >> (25-11)
237 movdqa XTMP2, XTMP4 # XTMP4 = W[-2] {BBAA}
238 xor e, y0 # y0 = e ^ (e >> (25-11))
239 ror $(22-13), y1 # y1 = a >> (22-13)
241 xor a, y1 # y1 = a ^ (a >> (22-13)
242 ror $(11-6), y0 # y0 = (e >> (11-6)) ^ (e >> (25-6))
243 psrlq $17, XTMP2 # XTMP2 = W[-2] ror 17 {xBxA}
245 psrlq $19, XTMP3 # XTMP3 = W[-2] ror 19 {xBxA}
246 xor e, y0 # y0 = e ^ (e >> (11-6)) ^ (e >> (25-6))
248 psrld $10, XTMP4 # XTMP4 = W[-2] >> 10 {BBAA}
249 ror $(13-2), y1 # y1 = (a >> (13-2)) ^ (a >> (22-2))
250 xor a, y1 # y1 = a ^ (a >> (13-2)) ^ (a >> (22-2))
251 xor g, y2 # y2 = CH = ((f^g)&e)^g
254 add y0, y2 # y2 = S1 + CH
256 add (2*4 + _XFER)(%rsp), y2 # y2 = k + w + S1 + CH
259 add y2, h # h = h + S1 + CH + k + w
263 add h, d # d = d + h + S1 + CH + k + w
267 add y1, h # h = h + S1 + CH + k + w + S0
269 pshufd $0b01010000, XTMP0, XTMP2 # XTMP2 = W[-2] {BBAA}
271 add y0, h # h = h + S1 + CH + k + w + S0 + MAJ
274 movdqa XTMP2, XTMP3 # XTMP3 = W[-2] {DDCC}
276 ror $(25-11), y0 # y0 = e >> (25-11)
278 movdqa XTMP2, X0 # X0 = W[-2] {DDCC}
279 ror $(22-13), y1 # y1 = a >> (22-13)
280 xor e, y0 # y0 = e ^ (e >> (25-11))
282 ror $(11-6), y0 # y0 = (e >> (11-6)) ^ (e >> (25-6))
283 psrlq $17, XTMP2 # XTMP2 = W[-2] ror 17 {xDxC}
284 xor a, y1 # y1 = a ^ (a >> (22-13)
286 psrlq $19, XTMP3 # XTMP3 = W[-2] ror 19 {xDxC}
287 xor e, y0 # y0 = e ^ (e >> (11-6)) ^ (e >> (25
289 ror $(13-2), y1 # y1 = (a >> (13-2)) ^ (a >> (22-2))
290 psrld $10, X0 # X0 = W[-2] >> 10 {DDCC}
291 xor a, y1 # y1 = a ^ (a >> (13-2)) ^ (a >> (22
293 xor g, y2 # y2 = CH = ((f^g)&e)^g
296 add y0, y2 # y2 = S1 + CH
297 add (3*4 + _XFER)(%rsp), y2 # y2 = k + w + S1 + CH
300 add y2, h # h = h + S1 + CH + k + w
304 add h, d # d = d + h + S1 + CH + k + w
308 add y1, h # h = h + S1 + CH + k + w + S0
310 add y0, h # h = h + S1 + CH + k + w + S0 + MAJ
316 ## input is [rsp + _XFER + %1 * 4]
319 ror $(25-11), y0 # y0 = e >> (25-11)
321 xor e, y0 # y0 = e ^ (e >> (25-11))
322 ror $(22-13), y1 # y1 = a >> (22-13)
324 xor a, y1 # y1 = a ^ (a >> (22-13)
325 ror $(11-6), y0 # y0 = (e >> (11-6)) ^ (e >> (25-6))
327 xor e, y0 # y0 = e ^ (e >> (11-6)) ^ (e >> (25-6))
328 ror $(13-2), y1 # y1 = (a >> (13-2)) ^ (a >> (22-2))
330 xor a, y1 # y1 = a ^ (a >> (13-2)) ^ (a >> (22-2))
332 xor g, y2 # y2 = CH = ((f^g)&e)^g
333 add y0, y2 # y2 = S1 + CH
335 offset = \round * 4 + _XFER
336 add offset(%rsp), y2 # y2 = k + w + S1 + CH
338 add y2, h # h = h + S1 + CH + k + w
341 add h, d # d = d + h + S1 + CH + k + w
344 add y1, h # h = h + S1 + CH + k + w + S0
346 add y0, h # h = h + S1 + CH + k + w + S0 + MAJ
377 mov 4*0(CTX), a
378 mov 4*1(CTX), b
379 mov 4*2(CTX), c
380 mov 4*3(CTX), d
381 mov 4*4(CTX), e
382 mov 4*5(CTX), f
383 mov 4*6(CTX), g
384 mov 4*7(CTX), h
423 add $4*16, TBL
451 addm (4*0)(CTX),a
452 addm (4*1)(CTX),b
453 addm (4*2)(CTX),c
454 addm (4*3)(CTX),d
455 addm (4*4)(CTX),e
456 addm (4*5)(CTX),f
457 addm (4*6)(CTX),g
458 addm (4*7)(CTX),h
505 # shuffle xBxA -> 00BA
511 # shuffle xDxC -> DC00