Lines Matching +full:64 +full:- +full:byte
1 /* SPDX-License-Identifier: GPL-2.0 */
23 #define SAVE_AMOUNT 64
165 .align 64
171 save %sp, -SAVE_AMOUNT, %sp
186 * the destination to a 64-byte boundary which can chew up
187 * to (64 - 1) bytes from the length before we perform the
190 cmp %i2, (2 * 64)
205 /* Align destination on 64-byte boundary. */
206 andcc %o0, (64 - 1), %i4
208 sub %i4, 64, %i4
218 /* If the source is on a 16-byte boundary we can do
219 * the direct block copy loop. If it is 8-byte aligned
220 * we can do the 16-byte loads offset by -8 bytes and the
223 * If the source is not even 8-byte aligned, we need to do
229 * contents. Since the loop works on 64-bytes of 64-byte
233 andcc %i1, (16 - 1), %i4
234 andn %i2, (64 - 1), %g1 ! block copy loop iterator
236 sub %i2, %g1, %i2 ! final sub-block copy bytes
242 /* Neither 8-byte nor 16-byte aligned, shift and mask. */
245 mov 64, %i5
251 mov 64, %i3
284 add %i1, 64, %i1
290 subcc %g1, 64, %g1
292 add %o0, 64, %o0
317 add %i1, 64, %i1
323 subcc %g1, 64, %g1
325 add %o0, 64, %o0
330 10: /* Destination is 64-byte aligned, source was only 8-byte
339 mov 64, %o1
351 add %i1, 64, %i1
354 subcc %g1, 64, %g1
356 add %o0, 64, %o0
361 50: /* Destination is 64-byte aligned, and source is 16-byte
367 mov 64, %o1
377 add %i1, 64, %i1
382 subcc %g1, 64, %g1
384 add %o0, 64, %o0
391 * over. If anything is left, we copy it one byte at a time.
399 .align 64
400 70: /* 16 < len <= 64 */
460 8: mov 64, %i3
483 .align 64
509 .size FUNC_NAME, .-FUNC_NAME