Lines Matching +full:v1 +full:- +full:v6
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 * SM4-GCM AEAD Algorithm using ARMv8 Crypto Extensions
14 #include "sm4-ce-asm.h"
16 .arch armv8-a+crypto
37 * output: r0:r1 (low 128-bits in r0, high in r1)
97 * input: r0:r1 (low 128-bits in r0, high in r1)
218 /* the lower 32-bits of initial IV is always be32(1) */ \
236 /* can be the same as input v0-v3 */
238 #define RR3 v1
244 #define RR4 v6
293 st1 {RH1.16b-RH4.16b}, [x1]
306 ld1 {RH1.16b-RH4.16b}, [x0]
322 ld1 {v0.16b-v3.16b}, [x2], #64
325 rbit v1.16b, v1.16b
338 RR2, RR3, v1, RH3, RTMP2, RTMP3,
391 ld1 {RH1.16b-RH4.16b}, [x6]
411 inc32_le128(v1) /* +1 */
415 ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64
417 SM4_CRYPT_BLK4(v0, v1, v2, v3)
420 eor v1.16b, v1.16b, RTMP1.16b
423 st1 {v0.16b-v3.16b}, [x1], #64
428 rbit v1.16b, v1.16b
441 RR2, RR3, v1, RH3, RTMP2, RTMP3,
520 GTAG_HASH_LENGTHS(v1, v3)
563 /* v0-v2 for building CTRs, v3-v5 for saving inputs */
565 #define RR1 v6
602 ld1 {RH1.16b-RH3.16b}, [x6]
620 ld1 {v3.16b-v5.16b}, [x2], #(3 * 16)
624 rbit v6.16b, v3.16b
625 inc32_le128(v1) /* +1 */
630 eor RHASH.16b, RHASH.16b, v6.16b
633 SM4_CRYPT_PMUL_128x128_BLK3(v0, v1, v2,
639 eor v1.16b, v1.16b, v4.16b
644 st1 {v0.16b-v2.16b}, [x1], #(3 * 16)
659 rbit v6.16b, v3.16b
661 eor RHASH.16b, RHASH.16b, v6.16b
712 GTAG_HASH_LENGTHS(v1, v3)