Lines Matching full:16
118 eor perm1.16b, perm1.16b, perm4.16b
120 ushr perm3.2d, perm1.2d, #16
128 tbl bd1.16b, {\bd\().16b}, perm1.16b
129 tbl bd2.16b, {\bd\().16b}, perm2.16b
130 tbl bd3.16b, {\bd\().16b}, perm3.16b
131 tbl bd4.16b, {\bd\().16b}, perm4.16b
150 tbl t4.16b, {ad.16b}, perm1.16b // A1
151 tbl t5.16b, {ad.16b}, perm2.16b // A2
152 tbl t6.16b, {ad.16b}, perm3.16b // A3
154 pmull2 t4.8h, t4.16b, fold_consts.16b // F = A1*B
155 pmull2 t8.8h, ad.16b, bd1.16b // E = A*B1
156 pmull2 t5.8h, t5.16b, fold_consts.16b // H = A2*B
157 pmull2 t7.8h, ad.16b, bd2.16b // G = A*B2
158 pmull2 t6.8h, t6.16b, fold_consts.16b // J = A3*B
159 pmull2 t9.8h, ad.16b, bd3.16b // I = A*B3
160 pmull2 t3.8h, ad.16b, bd4.16b // K = A*B4
162 0: eor t4.16b, t4.16b, t8.16b // L = E + F
163 eor t5.16b, t5.16b, t7.16b // M = G + H
164 eor t6.16b, t6.16b, t9.16b // N = I + J
172 // t5 = (M) (P2 + P3) << 16
173 eor t8.16b, t8.16b, t4.16b
174 and t4.16b, t4.16b, k32_48.16b
178 eor t7.16b, t7.16b, t6.16b
179 and t6.16b, t6.16b, k00_16.16b
181 eor t8.16b, t8.16b, t4.16b
182 eor t7.16b, t7.16b, t6.16b
189 ext t4.16b, t4.16b, t4.16b, #15
190 ext t5.16b, t5.16b, t5.16b, #14
191 ext t6.16b, t6.16b, t6.16b, #13
192 ext t3.16b, t3.16b, t3.16b, #12
194 eor t4.16b, t4.16b, t5.16b
195 eor t6.16b, t6.16b, t3.16b
203 mov ad.16b, \ad\().16b
207 pmull2 \rq\().8h, \ad\().16b, \bd\().16b // D = A*B
212 eor \rq\().16b, \rq\().16b, t4.16b
213 eor \rq\().16b, \rq\().16b, t6.16b
224 CPU_LE( rev64 v11.16b, v11.16b )
225 CPU_LE( rev64 v12.16b, v12.16b )
230 CPU_LE( ext v11.16b, v11.16b, v11.16b, #8 )
231 CPU_LE( ext v12.16b, v12.16b, v12.16b, #8 )
233 eor \reg1\().16b, \reg1\().16b, v8.16b
234 eor \reg2\().16b, \reg2\().16b, v9.16b
235 eor \reg1\().16b, \reg1\().16b, v11.16b
236 eor \reg2\().16b, \reg2\().16b, v12.16b
244 ld1 {fold_consts.2d}, [fold_consts_ptr], #16
247 eor \dst_reg\().16b, \dst_reg\().16b, v8.16b
248 eor \dst_reg\().16b, \dst_reg\().16b, \src_reg\().16b
275 CPU_LE( rev64 v0.16b, v0.16b )
276 CPU_LE( rev64 v1.16b, v1.16b )
277 CPU_LE( rev64 v2.16b, v2.16b )
278 CPU_LE( rev64 v3.16b, v3.16b )
279 CPU_LE( rev64 v4.16b, v4.16b )
280 CPU_LE( rev64 v5.16b, v5.16b )
281 CPU_LE( rev64 v6.16b, v6.16b )
282 CPU_LE( rev64 v7.16b, v7.16b )
283 CPU_LE( ext v0.16b, v0.16b, v0.16b, #8 )
284 CPU_LE( ext v1.16b, v1.16b, v1.16b, #8 )
285 CPU_LE( ext v2.16b, v2.16b, v2.16b, #8 )
286 CPU_LE( ext v3.16b, v3.16b, v3.16b, #8 )
287 CPU_LE( ext v4.16b, v4.16b, v4.16b, #8 )
288 CPU_LE( ext v5.16b, v5.16b, v5.16b, #8 )
289 CPU_LE( ext v6.16b, v6.16b, v6.16b, #8 )
290 CPU_LE( ext v7.16b, v7.16b, v7.16b, #8 )
292 // XOR the first 16 data *bits* with the initial CRC value.
293 movi v8.16b, #0
295 eor v0.16b, v0.16b, v8.16b
316 // Now fold the 112 bytes in v0-v6 into the 16 bytes in v7.
319 add fold_consts_ptr, fold_consts_ptr, #16
320 ld1 {fold_consts.2d}, [fold_consts_ptr], #16
329 // Fold across 16 bytes.
334 // Then subtract 16 to simplify the termination condition of the
336 adds len, len, #(128-16)
338 // While >= 16 data bytes remain (not counting v7), fold the 16 bytes v7
344 eor v7.16b, v7.16b, v8.16b
345 ldr q0, [buf], #16
346 CPU_LE( rev64 v0.16b, v0.16b )
347 CPU_LE( ext v0.16b, v0.16b, v0.16b, #8 )
348 eor v7.16b, v7.16b, v0.16b
349 subs len, len, #16
353 // Add 16 to get the correct number of data bytes remaining in 0...15
354 // (not counting v7), following the previous extra subtraction by 16.
355 adds len, len, #16
359 // Reduce the last '16 + len' bytes where 1 <= len <= 15 and the first
360 // 16 bytes are in v7 and the rest are the remaining data in 'buf'. To
363 // chunk of 16 bytes, then fold the first chunk into the second.
365 // v0 = last 16 original data bytes
367 ldr q0, [buf, #-16]
368 CPU_LE( rev64 v0.16b, v0.16b )
369 CPU_LE( ext v0.16b, v0.16b, v0.16b, #8 )
372 adr_l x4, .Lbyteshift_table + 16
374 ld1 {v2.16b}, [x4]
375 tbl v1.16b, {v7.16b}, v2.16b
377 // v3 = first chunk: v7 right-shifted by '16-len' bytes.
378 movi v3.16b, #0x80
379 eor v2.16b, v2.16b, v3.16b
380 tbl v3.16b, {v7.16b}, v2.16b
382 // Convert to 8-bit masks: 'len' 0x00 bytes, then '16-len' 0xff bytes.
383 sshr v2.16b, v2.16b, #7
386 // then '16-len' bytes from v1 (high-order bytes).
387 bsl v2.16b, v1.16b, v0.16b
392 eor v7.16b, v7.16b, v0.16b
393 eor v7.16b, v7.16b, v2.16b
396 // Reduce the 128-bit value M(x), stored in v7, to the final 16-bit CRC.
398 movi v2.16b, #0 // init zero register
401 ld1 {fold_consts.2d}, [fold_consts_ptr], #16
407 ext v0.16b, v2.16b, v7.16b, #8
409 eor v0.16b, v0.16b, v7.16b // + low bits * x^64
413 ext v1.16b, v0.16b, v2.16b, #12 // extract high 32 bits
416 eor v0.16b, v0.16b, v1.16b // + low bits
427 eor v0.16b, v0.16b, v1.16b // + low 16 nonzero bits
428 // Final CRC value (x^16 * M(x)) mod G(x) is in low 16 bits of v0.
432 ldp x29, x30, [sp], #16
437 // Checksumming a buffer of length 16...255 bytes
441 // Load the first 16 data bytes.
443 CPU_LE( rev64 v7.16b, v7.16b )
444 CPU_LE( ext v7.16b, v7.16b, v7.16b, #8 )
446 // XOR the first 16 data *bits* with the initial CRC value.
447 movi v0.16b, #0
449 eor v7.16b, v7.16b, v0.16b
451 // Load the fold-across-16-bytes constants.
452 ld1 {fold_consts.2d}, [fold_consts_ptr], #16
455 cmp len, #16
456 b.eq .Lreduce_final_16_bytes_\@ // len == 16
459 add len, len, #16
466 // Assumes len >= 16.
469 stp x29, x30, [sp, #-16]!
478 // Assumes len >= 16.
488 // G(x) = x^16 + x^15 + x^11 + x^9 + x^8 + x^7 + x^5 + x^4 + x^2 + x^1 + x^0
508 // For 1 <= len <= 15, the 16-byte vector beginning at &byteshift_table[16 -
510 // ..., 0x80} XOR the index vector to shift right by '16 - len' bytes.