Lines Matching +full:1 +full:- +full:v0
1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * linux/arch/arm64/crypto/aes-modes.S - chaining mode wrappers for AES
5 * Copyright (C) 2013 - 2017 Linaro Ltd <ard.biesheuvel@linaro.org>
8 /* included by aes-ce.S and aes-neon.S */
26 encrypt_block4x v0, v1, v2, v3, w3, x2, x8, w7
31 decrypt_block4x v0, v1, v2, v3, w3, x2, x8, w7
37 encrypt_block5x v0, v1, v2, v3, v4, w3, x2, x8, w7
42 decrypt_block5x v0, v1, v2, v3, v4, w3, x2, x8, w7
55 stp x29, x30, [sp, #-16]!
63 ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 pt blocks */
67 st1 {v0.16b-v3.16b}, [x0], #64
74 ld1 {v0.16b}, [x1], #16 /* get next pt block */
75 encrypt_block v0, w3, x2, x5, w6
76 st1 {v0.16b}, [x0], #16
77 subs w4, w4, #1
86 stp x29, x30, [sp, #-16]!
94 ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 ct blocks */
98 st1 {v0.16b-v3.16b}, [x0], #64
105 ld1 {v0.16b}, [x1], #16 /* get next ct block */
106 decrypt_block v0, w3, x2, x5, w6
107 st1 {v0.16b}, [x0], #16
108 subs w4, w4, #1
132 mov w8, #14 /* AES-256: 14 rounds */
145 ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 pt blocks */
146 eor v0.16b, v0.16b, v4.16b /* ..and xor with iv */
147 encrypt_block v0, w3, x2, x6, w7
148 eor v1.16b, v1.16b, v0.16b
154 st1 {v0.16b-v3.16b}, [x0], #64
161 ld1 {v0.16b}, [x1], #16 /* get next pt block */
162 eor v4.16b, v4.16b, v0.16b /* ..and xor with iv */
165 subs w4, w4, #1
174 stp x29, x30, [sp, #-16]!
179 mov w8, #14 /* AES-256: 14 rounds */
185 stp x29, x30, [sp, #-16]!
195 ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 ct blocks */
197 ld1 {v4.16b}, [x1], #16 /* get 1 ct block */
198 mov v5.16b, v0.16b
203 eor v0.16b, v0.16b, cbciv.16b
205 ld1 {v5.16b}, [x1], #16 /* reload 1 ct block */
206 ld1 {cbciv.16b}, [x1], #16 /* reload 1 ct block */
211 mov v4.16b, v0.16b
216 eor v0.16b, v0.16b, cbciv.16b
218 ld1 {cbciv.16b}, [x1], #16 /* reload 1 ct block */
222 st1 {v0.16b-v3.16b}, [x0], #64
230 mov v0.16b, v1.16b /* ...and copy to v0 */
231 decrypt_block v0, w3, x2, x6, w7
232 eor v0.16b, v0.16b, cbciv.16b /* xor with iv => pt */
234 st1 {v0.16b}, [x0], #16
235 subs w4, w4, #1
261 ld1 {v0.16b}, [x1], x4 /* overlapping loads */
267 eor v0.16b, v0.16b, v5.16b /* xor with iv */
269 encrypt_block v0, w3, x2, x6, w7
271 eor v1.16b, v1.16b, v0.16b
272 tbl v0.16b, {v0.16b}, v3.16b
276 st1 {v0.16b}, [x4] /* overlapping stores */
290 ld1 {v0.16b}, [x1], x4 /* overlapping loads */
296 decrypt_block v0, w3, x2, x6, w7
297 tbl v2.16b, {v0.16b}, v3.16b
300 tbx v0.16b, {v1.16b}, v4.16b
301 decrypt_block v0, w3, x2, x6, w7
302 eor v0.16b, v0.16b, v5.16b /* xor with iv */
306 st1 {v0.16b}, [x0]
340 stp x29, x30, [sp, #-16]!
349 * the 64-bit counter with the IV.
355 umov IV_PART, vctr.d[1]
368 * Set up the counter values in v0-v{MAX_STRIDE-1}.
372 * v{MAX_STRIDE-1}. For example: if encrypting two blocks with
380 mov v0.16b, vctr.16b
386 sub x6, CTR, #MAX_STRIDE - 1
387 sub x7, CTR, #MAX_STRIDE - 2
388 sub x8, CTR, #MAX_STRIDE - 3
389 sub x9, CTR, #MAX_STRIDE - 4
390 ST5( sub x10, CTR, #MAX_STRIDE - 5 )
396 mov v0.d[0], x6
403 .subsection 1
416 add x8, x8, #1
428 adr x16, 1f
432 mov v0.d[0], vctr.d[0]
441 1: b 2f
445 ins vctr.d[1], x7
446 sub x7, IV_PART, #MAX_STRIDE - 1
447 sub x8, IV_PART, #MAX_STRIDE - 2
448 sub x9, IV_PART, #MAX_STRIDE - 3
451 mov v1.d[1], x7
453 ST5( sub x10, IV_PART, #MAX_STRIDE - 4 )
454 mov v2.d[1], x8
456 mov v3.d[1], x9
457 ST5( mov v4.d[1], x10 )
465 ld1 {v5.16b-v7.16b}, [IN], #48
468 eor v0.16b, v5.16b, v0.16b
471 ST5( ld1 {v5.16b-v6.16b}, [IN], #32 )
475 st1 {v0.16b-v3.16b}, [OUT], #64
489 * Handle up to MAX_STRIDE * 16 - 1 bytes of plaintext
491 * This code expects the last keystream block to be in v{MAX_STRIDE-1}.
504 ST5( cmp BYTES_W, #64 - (MAX_STRIDE << 4))
506 cmp BYTES_W, #48 - (MAX_STRIDE << 4)
508 cmp BYTES_W, #32 - (MAX_STRIDE << 4)
510 cmp BYTES_W, #16 - (MAX_STRIDE << 4)
527 ST4( eor v6.16b, v6.16b, v0.16b )
533 ST5( eor v5.16b, v5.16b, v0.16b )
571 ld1 {v10.16b-v11.16b}, [x9]
602 * to be at the end of this 16-byte temporary buffer rather than the
617 * to be at the end of this 16-byte temporary buffer rather than the
622 ctr_encrypt 1
648 stp x29, x30, [sp, #-16]!
668 ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 pt blocks */
670 eor v0.16b, v0.16b, v4.16b
678 eor v0.16b, v0.16b, v4.16b
681 st1 {v0.16b-v3.16b}, [x0], #64
692 ld1 {v0.16b}, [x1], #16
694 eor v0.16b, v0.16b, v4.16b
695 encrypt_block v0, w3, x2, x8, w7
696 eor v0.16b, v0.16b, v4.16b
701 st1 {v0.16b}, [x0], #16
704 st1 {v0.16b}, [x0]
711 mov v0.16b, v3.16b
727 tbl v2.16b, {v0.16b}, v2.16b
728 tbx v0.16b, {v1.16b}, v3.16b
735 stp x29, x30, [sp, #-16]!
761 ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 ct blocks */
763 eor v0.16b, v0.16b, v4.16b
771 eor v0.16b, v0.16b, v4.16b
774 st1 {v0.16b-v3.16b}, [x0], #64
784 ld1 {v0.16b}, [x1], #16
787 eor v0.16b, v0.16b, v4.16b
788 decrypt_block v0, w3, x2, x8, w7
789 eor v0.16b, v0.16b, v4.16b
790 st1 {v0.16b}, [x0], #16
816 eor v0.16b, v0.16b, v5.16b
817 decrypt_block v0, w3, x2, x8, w7
818 eor v0.16b, v0.16b, v5.16b
820 tbl v2.16b, {v0.16b}, v2.16b
821 tbx v0.16b, {v1.16b}, v3.16b
833 ld1 {v0.16b}, [x4] /* get dg */
837 encrypt_block v0, w2, x1, x7, w8
842 ld1 {v1.16b-v4.16b}, [x0], #64 /* get next pt block */
843 eor v0.16b, v0.16b, v1.16b /* ..and xor with dg */
844 encrypt_block v0, w2, x1, x7, w8
845 eor v0.16b, v0.16b, v2.16b
846 encrypt_block v0, w2, x1, x7, w8
847 eor v0.16b, v0.16b, v3.16b
848 encrypt_block v0, w2, x1, x7, w8
849 eor v0.16b, v0.16b, v4.16b
853 encrypt_block v0, w2, x1, x7, w8
854 st1 {v0.16b}, [x4] /* return dg */
862 eor v0.16b, v0.16b, v1.16b /* ..and xor with dg */
864 subs w3, w3, #1
869 encrypt_block v0, w2, x1, x7, w8
873 st1 {v0.16b}, [x4] /* return dg */