Lines Matching +full:64 +full:- +full:byte
1 /* SPDX-License-Identifier: GPL-2.0-only */
11 #include <asm/asm-offsets.h>
45 * copy_user_generic_unrolled - memory copy with exception handling.
60 jb 20f /* less then 8 bytes, go to byte copy loop */
82 leaq 64(%rsi),%rsi
83 leaq 64(%rdi),%rdi
164 jb 2f /* less than 8 bytes, go to byte copy loop */
203 cmpl $64,%edx
204 jb .L_copy_short_string /* less then 64 bytes, avoid the costly 'rep' */
246 * is counter-intuitive, but needed to prevent the code
259 * copy_user_nocache - Uncached memory copy with exception handling
264 * - Require 8-byte alignment when size is 8 bytes or larger.
265 * - Require 4-byte alignment when size is 4 bytes.
270 /* If size is less than 8 bytes, go to 4-byte copy */
274 /* If destination is not 8-byte aligned, "cache" copy to align it */
277 /* Set 4x8-byte copy count and remainder */
283 /* Perform 4x8-byte nocache loop-copy */
301 leaq 64(%rsi),%rsi
302 leaq 64(%rdi),%rdi
306 /* Set 8-byte copy count and remainder */
313 /* Perform 8-byte nocache loop-copy */
322 /* If no byte left, we're done */
327 /* If destination is not 4-byte aligned, go to byte copy: */
332 /* Set 4-byte copy count (1 or 0) and remainder */
338 /* Perform 4-byte nocache copy: */
348 /* Perform byte "cache" loop-copy for the remainder */