Lines Matching +full:src +full:- +full:2
17 #include <asm/asm-offsets.h>
22 #define src a1 macro
28 * memcpy copies len bytes from src to dst and sets v0 to dst.
30 * - src and dst don't overlap
31 * - src is readable
32 * - dst is writable
35 * __copy_user copies up to len bytes from src to dst and sets a2 (len) to
37 * __copy_user assumes that src and dst don't overlap, and that the call is
40 * - src is readable (no exceptions when reading src)
42 * - dst is writable (no exceptions when writing dst)
43 * __copy_user uses a non-standard calling convention; see
57 * 1- AT contain the address of the byte just past the end of the source
59 * 2- src_entry <= src < AT, and
60 * 3- (dst - src) == (dst_entry - src_entry),
64 * (2) is met by incrementing src by the number of bytes copied
65 * (3) is met by not doing loads between a pair of increments of dst and src
81 * Only on the 64-bit kernel we can made use of 64-bit registers.
133 #define REST(unit) (FIRST(unit)+NBYTES-1)
136 #define ADDRMASK (NBYTES-1)
149 LEAF(memcpy) /* a0=dst a1=src a2=len */
158 * Note: dst & src may be unaligned, len may be 0
165 pref 0, 0(src)
168 and t0, src, ADDRMASK # Check if src unaligned
180 pref 0, 128(src) # We must not prefetch invalid addresses
183 2: pref 0, 256(src) # We must not prefetch invalid addresses
187 EXC( LOAD t0, UNIT(0)(src), l_exc)
188 EXC( LOAD t1, UNIT(1)(src), l_exc_copy)
189 EXC( LOAD t2, UNIT(2)(src), l_exc_copy)
190 EXC( LOAD t3, UNIT(3)(src), l_exc_copy)
194 EXC( STORE t2, UNIT(2)(dst), s_exc_p14u)
196 EXC( LOAD t0, UNIT(4)(src), l_exc_copy)
197 EXC( LOAD t1, UNIT(5)(src), l_exc_copy)
198 EXC( LOAD t2, UNIT(6)(src), l_exc_copy)
199 EXC( LOAD t3, UNIT(7)(src), l_exc_copy)
203 ADD src, src, 16*NBYTES
206 EXC( LOAD t0, UNIT(-8)(src), l_exc_copy_rewind16)
207 EXC( LOAD t1, UNIT(-7)(src), l_exc_copy_rewind16)
208 EXC( LOAD t2, UNIT(-6)(src), l_exc_copy_rewind16)
209 EXC( LOAD t3, UNIT(-5)(src), l_exc_copy_rewind16)
210 EXC( STORE t0, UNIT(-8)(dst), s_exc_p8u)
211 EXC( STORE t1, UNIT(-7)(dst), s_exc_p7u)
212 EXC( STORE t2, UNIT(-6)(dst), s_exc_p6u)
213 EXC( STORE t3, UNIT(-5)(dst), s_exc_p5u)
214 EXC( LOAD t0, UNIT(-4)(src), l_exc_copy_rewind16)
215 EXC( LOAD t1, UNIT(-3)(src), l_exc_copy_rewind16)
216 EXC( LOAD t2, UNIT(-2)(src), l_exc_copy_rewind16)
217 EXC( LOAD t3, UNIT(-1)(src), l_exc_copy_rewind16)
218 EXC( STORE t0, UNIT(-4)(dst), s_exc_p4u)
219 EXC( STORE t1, UNIT(-3)(dst), s_exc_p3u)
220 EXC( STORE t2, UNIT(-2)(dst), s_exc_p2u)
221 EXC( STORE t3, UNIT(-1)(dst), s_exc_p1u)
223 beqz t0, 2b
235 EXC( LOAD t0, UNIT(0)(src), l_exc)
236 EXC( LOAD t1, UNIT(1)(src), l_exc_copy)
237 EXC( LOAD t2, UNIT(2)(src), l_exc_copy)
238 EXC( LOAD t3, UNIT(3)(src), l_exc_copy)
242 EXC( STORE t2, UNIT(2)(dst), s_exc_p6u)
244 EXC( LOAD t0, UNIT(4)(src), l_exc_copy)
245 EXC( LOAD t1, UNIT(5)(src), l_exc_copy)
246 EXC( LOAD t2, UNIT(6)(src), l_exc_copy)
247 EXC( LOAD t3, UNIT(7)(src), l_exc_copy)
252 ADD src, src, 8*NBYTES
262 EXC( LOAD t0, UNIT(0)(src), l_exc)
263 EXC( LOAD t1, UNIT(1)(src), l_exc_copy)
264 EXC( LOAD t2, UNIT(2)(src), l_exc_copy)
265 EXC( LOAD t3, UNIT(3)(src), l_exc_copy)
269 EXC( STORE t2, UNIT(2)(dst), s_exc_p2u)
271 ADD src, src, 4*NBYTES
285 EXC( LOAD t0, 0(src), l_exc)
289 ADD src, src, NBYTES
293 # 2) Copy NBYTES, then check length again
295 EXC( LOAD t0, 0(src), l_exc)
299 ADD src, src, NBYTES
305 EXC( LOAD t0, 0(src), l_exc)
307 ADD src, src, NBYTES
310 EXC( STORE t0, -8(dst), s_exc_p1u)
314 SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter
316 and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES
322 * are to the same unit (unless src is aligned, but it's not).
324 EXC( LDFIRST t0, FIRST(0)(src), l_exc)
325 EXC( LDFIRST t1, FIRST(1)(src), l_exc_copy)
327 EXC( LDREST t0, REST(0)(src), l_exc_copy)
328 EXC( LDREST t1, REST(1)(src), l_exc_copy)
329 EXC( LDFIRST t2, FIRST(2)(src), l_exc_copy)
330 EXC( LDFIRST t3, FIRST(3)(src), l_exc_copy)
331 EXC( LDREST t2, REST(2)(src), l_exc_copy)
332 EXC( LDREST t3, REST(3)(src), l_exc_copy)
333 ADD src, src, 4*NBYTES
336 EXC( STORE t2, UNIT(2)(dst), s_exc_p2u)
343 and rem, len, NBYTES-1 # rem = len % NBYTES
347 EXC( LDFIRST t0, FIRST(0)(src), l_exc)
348 EXC( LDREST t0, REST(0)(src), l_exc_copy)
351 ADD src, src, NBYTES
361 EXC( lb t0, N(src), l_exc); \
368 COPY_BYTE(2)
372 EXC( lb t0, NBYTES-2(src), l_exc)
375 EXC( sb t0, NBYTES-2(dst), s_exc_p1)
382 /* Rewind src and dst by 16*NBYTES for l_exc_copy */
383 SUB src, src, 16*NBYTES
387 * Copy bytes from src until faulting load address (or until a
394 * Assumes src < THREAD_BUADDR($28)
399 EXC( lb t1, 0(src), l_exc)
400 ADD src, src, 1
401 sb t1, 0(dst) # can't fault -- we're copy_from_user
402 bne src, t0, 1b
431 SEXC(2)
446 sltu t0, a1, t0 # dst + len <= src -> memcpy
447 sltu t1, a0, t1 # dst >= src + len -> memcpy
455 LEAF(__rmemcpy) /* a0=dst a1=src a2=len */
457 beqz t0, r_end_bytes_up # src >= dst
460 ADD a1, a2 # src = src + len
463 lb t0, -1(a1)
465 sb t0, -1(a0)