Lines Matching refs:b

63 	ld1		{v0.16b-v3.16b}, [x1], #64	/* get 4 pt blocks */
65 ST5( ld1 {v4.16b}, [x1], #16 )
67 st1 {v0.16b-v3.16b}, [x0], #64
68 ST5( st1 {v4.16b}, [x0], #16 )
69 b .LecbencloopNx
74 ld1 {v0.16b}, [x1], #16 /* get next pt block */
76 st1 {v0.16b}, [x0], #16
94 ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 ct blocks */
96 ST5( ld1 {v4.16b}, [x1], #16 )
98 st1 {v0.16b-v3.16b}, [x0], #64
99 ST5( st1 {v4.16b}, [x0], #16 )
100 b .LecbdecloopNx
105 ld1 {v0.16b}, [x1], #16 /* get next ct block */
107 st1 {v0.16b}, [x0], #16
130 ld1 {v4.16b}, [x5] /* get iv */
136 b .Lcbcencloop4x
139 ld1 {v4.16b}, [x5] /* get iv */
145 ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 pt blocks */
146 eor v0.16b, v0.16b, v4.16b /* ..and xor with iv */
148 eor v1.16b, v1.16b, v0.16b
150 eor v2.16b, v2.16b, v1.16b
152 eor v3.16b, v3.16b, v2.16b
154 st1 {v0.16b-v3.16b}, [x0], #64
155 mov v4.16b, v3.16b
156 b .Lcbcencloop4x
161 ld1 {v0.16b}, [x1], #16 /* get next pt block */
162 eor v4.16b, v4.16b, v0.16b /* ..and xor with iv */
164 st1 {v4.16b}, [x0], #16
168 st1 {v4.16b}, [x5] /* return iv */
177 ld1 {cbciv.16b}, [x5] /* get iv */
182 b .Lessivcbcdecstart
188 ld1 {cbciv.16b}, [x5] /* get iv */
195 ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 ct blocks */
197 ld1 {v4.16b}, [x1], #16 /* get 1 ct block */
198 mov v5.16b, v0.16b
199 mov v6.16b, v1.16b
200 mov v7.16b, v2.16b
203 eor v0.16b, v0.16b, cbciv.16b
204 eor v1.16b, v1.16b, v5.16b
205 ld1 {v5.16b}, [x1], #16 /* reload 1 ct block */
206 ld1 {cbciv.16b}, [x1], #16 /* reload 1 ct block */
207 eor v2.16b, v2.16b, v6.16b
208 eor v3.16b, v3.16b, v7.16b
209 eor v4.16b, v4.16b, v5.16b
211 mov v4.16b, v0.16b
212 mov v5.16b, v1.16b
213 mov v6.16b, v2.16b
216 eor v0.16b, v0.16b, cbciv.16b
217 eor v1.16b, v1.16b, v4.16b
218 ld1 {cbciv.16b}, [x1], #16 /* reload 1 ct block */
219 eor v2.16b, v2.16b, v5.16b
220 eor v3.16b, v3.16b, v6.16b
222 st1 {v0.16b-v3.16b}, [x0], #64
223 ST5( st1 {v4.16b}, [x0], #16 )
224 b .LcbcdecloopNx
229 ld1 {v1.16b}, [x1], #16 /* get next ct block */
230 mov v0.16b, v1.16b /* ...and copy to v0 */
232 eor v0.16b, v0.16b, cbciv.16b /* xor with iv => pt */
233 mov cbciv.16b, v1.16b /* ct is next iv */
234 st1 {v0.16b}, [x0], #16
238 st1 {cbciv.16b}, [x5] /* return iv */
258 ld1 {v3.16b}, [x8]
259 ld1 {v4.16b}, [x9]
261 ld1 {v0.16b}, [x1], x4 /* overlapping loads */
262 ld1 {v1.16b}, [x1]
264 ld1 {v5.16b}, [x5] /* get iv */
267 eor v0.16b, v0.16b, v5.16b /* xor with iv */
268 tbl v1.16b, {v1.16b}, v4.16b
271 eor v1.16b, v1.16b, v0.16b
272 tbl v0.16b, {v0.16b}, v3.16b
276 st1 {v0.16b}, [x4] /* overlapping stores */
277 st1 {v1.16b}, [x0]
287 ld1 {v3.16b}, [x8]
288 ld1 {v4.16b}, [x9]
290 ld1 {v0.16b}, [x1], x4 /* overlapping loads */
291 ld1 {v1.16b}, [x1]
293 ld1 {v5.16b}, [x5] /* get iv */
297 tbl v2.16b, {v0.16b}, v3.16b
298 eor v2.16b, v2.16b, v1.16b
300 tbx v0.16b, {v1.16b}, v4.16b
302 eor v0.16b, v0.16b, v5.16b /* xor with iv */
305 st1 {v2.16b}, [x4] /* overlapping stores */
306 st1 {v0.16b}, [x0]
344 ld1 {vctr.16b}, [IV]
380 mov v0.16b, vctr.16b
381 mov v1.16b, vctr.16b
382 mov v2.16b, vctr.16b
383 mov v3.16b, vctr.16b
384 ST5( mov v4.16b, vctr.16b )
441 1: b 2f
465 ld1 {v5.16b-v7.16b}, [IN], #48
468 eor v0.16b, v5.16b, v0.16b
469 ST4( ld1 {v5.16b}, [IN], #16 )
470 eor v1.16b, v6.16b, v1.16b
471 ST5( ld1 {v5.16b-v6.16b}, [IN], #32 )
472 eor v2.16b, v7.16b, v2.16b
473 eor v3.16b, v5.16b, v3.16b
474 ST5( eor v4.16b, v6.16b, v4.16b )
475 st1 {v0.16b-v3.16b}, [OUT], #64
476 ST5( st1 {v4.16b}, [OUT], #16 )
478 b .LctrloopNx\xctr
482 st1 {vctr.16b}, [IV] /* return next CTR value */
516 ST5( ld1 {v5.16b}, [IN], x14 )
517 ld1 {v6.16b}, [IN], x15
518 ld1 {v7.16b}, [IN], x16
523 ld1 {v8.16b}, [IN], x13
524 ld1 {v9.16b}, [IN]
525 ld1 {v10.16b}, [x9]
527 ST4( eor v6.16b, v6.16b, v0.16b )
528 ST4( eor v7.16b, v7.16b, v1.16b )
529 ST4( tbl v3.16b, {v3.16b}, v10.16b )
530 ST4( eor v8.16b, v8.16b, v2.16b )
531 ST4( eor v9.16b, v9.16b, v3.16b )
533 ST5( eor v5.16b, v5.16b, v0.16b )
534 ST5( eor v6.16b, v6.16b, v1.16b )
535 ST5( tbl v4.16b, {v4.16b}, v10.16b )
536 ST5( eor v7.16b, v7.16b, v2.16b )
537 ST5( eor v8.16b, v8.16b, v3.16b )
538 ST5( eor v9.16b, v9.16b, v4.16b )
540 ST5( st1 {v5.16b}, [OUT], x14 )
541 st1 {v6.16b}, [OUT], x15
542 st1 {v7.16b}, [OUT], x16
544 st1 {v9.16b}, [x13] // overlapping stores
545 st1 {v8.16b}, [OUT]
546 b .Lctrout\xctr
567 ld1 {v5.16b}, [IN]
568 ld1 {v6.16b}, [OUT]
569 ST5( mov v3.16b, v4.16b )
571 ld1 {v10.16b-v11.16b}, [x9]
572 tbl v3.16b, {v3.16b}, v10.16b
573 sshr v11.16b, v11.16b, #7
574 eor v5.16b, v5.16b, v3.16b
575 bif v5.16b, v6.16b, v11.16b
576 st1 {v5.16b}, [OUT]
577 b .Lctrout\xctr
635 and \tmp\().16b, \tmp\().16b, xtsmask.16b
637 ext \tmp\().16b, \tmp\().16b, \tmp\().16b, #8
638 eor \out\().16b, \out\().16b, \tmp\().16b
651 ld1 {v4.16b}, [x6]
659 b .LxtsencNx
668 ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 pt blocks */
670 eor v0.16b, v0.16b, v4.16b
672 eor v1.16b, v1.16b, v5.16b
673 eor v2.16b, v2.16b, v6.16b
675 eor v3.16b, v3.16b, v7.16b
677 eor v3.16b, v3.16b, v7.16b
678 eor v0.16b, v0.16b, v4.16b
679 eor v1.16b, v1.16b, v5.16b
680 eor v2.16b, v2.16b, v6.16b
681 st1 {v0.16b-v3.16b}, [x0], #64
682 mov v4.16b, v7.16b
685 b .LxtsencloopNx
692 ld1 {v0.16b}, [x1], #16
694 eor v0.16b, v0.16b, v4.16b
696 eor v0.16b, v0.16b, v4.16b
701 st1 {v0.16b}, [x0], #16
702 b .Lxtsencloop
704 st1 {v0.16b}, [x0]
706 st1 {v4.16b}, [x6]
711 mov v0.16b, v3.16b
723 ld1 {v1.16b}, [x1] /* load final block */
724 ld1 {v2.16b}, [x8]
725 ld1 {v3.16b}, [x9]
727 tbl v2.16b, {v0.16b}, v2.16b
728 tbx v0.16b, {v1.16b}, v3.16b
729 st1 {v2.16b}, [x4] /* overlapping stores */
731 b .Lxtsencctsout
743 ld1 {v4.16b}, [x6]
752 b .LxtsdecNx
761 ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 ct blocks */
763 eor v0.16b, v0.16b, v4.16b
765 eor v1.16b, v1.16b, v5.16b
766 eor v2.16b, v2.16b, v6.16b
768 eor v3.16b, v3.16b, v7.16b
770 eor v3.16b, v3.16b, v7.16b
771 eor v0.16b, v0.16b, v4.16b
772 eor v1.16b, v1.16b, v5.16b
773 eor v2.16b, v2.16b, v6.16b
774 st1 {v0.16b-v3.16b}, [x0], #64
775 mov v4.16b, v7.16b
778 b .LxtsdecloopNx
784 ld1 {v0.16b}, [x1], #16
787 eor v0.16b, v0.16b, v4.16b
789 eor v0.16b, v0.16b, v4.16b
790 st1 {v0.16b}, [x0], #16
794 b .Lxtsdecloop
796 st1 {v4.16b}, [x6]
812 ld1 {v1.16b}, [x1] /* load final block */
813 ld1 {v2.16b}, [x8]
814 ld1 {v3.16b}, [x9]
816 eor v0.16b, v0.16b, v5.16b
818 eor v0.16b, v0.16b, v5.16b
820 tbl v2.16b, {v0.16b}, v2.16b
821 tbx v0.16b, {v1.16b}, v3.16b
823 st1 {v2.16b}, [x4] /* overlapping stores */
825 b .Lxtsdecctsout
833 ld1 {v0.16b}, [x4] /* get dg */
842 ld1 {v1.16b-v4.16b}, [x0], #64 /* get next pt block */
843 eor v0.16b, v0.16b, v1.16b /* ..and xor with dg */
845 eor v0.16b, v0.16b, v2.16b
847 eor v0.16b, v0.16b, v3.16b
849 eor v0.16b, v0.16b, v4.16b
854 st1 {v0.16b}, [x4] /* return dg */
856 b .Lmacloop4x
861 ld1 {v1.16b}, [x0], #16 /* get next pt block */
862 eor v0.16b, v0.16b, v1.16b /* ..and xor with dg */
870 b .Lmacloop
873 st1 {v0.16b}, [x4] /* return dg */