]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
6 | * Unified implementation of memcpy, memmove and the __copy_user backend. | |
7 | * | |
8 | * Copyright (C) 1998, 99, 2000, 01, 2002 Ralf Baechle (ralf@gnu.org) | |
9 | * Copyright (C) 1999, 2000, 01, 2002 Silicon Graphics, Inc. | |
10 | * Copyright (C) 2002 Broadcom, Inc. | |
11 | * memcpy/copy_user author: Mark Vandevoorde | |
619b6e18 | 12 | * Copyright (C) 2007 Maciej W. Rozycki |
5bc05971 | 13 | * Copyright (C) 2014 Imagination Technologies Ltd. |
1da177e4 LT |
14 | * |
15 | * Mnemonic names for arguments to memcpy/__copy_user | |
16 | */ | |
e5adb877 RB |
17 | |
18 | /* | |
19 | * Hack to resolve longstanding prefetch issue | |
20 | * | |
21 | * Prefetching may be fatal on some systems if we're prefetching beyond the | |
22 | * end of memory on some systems. It's also a seriously bad idea on non | |
23 | * dma-coherent systems. | |
24 | */ | |
634286f1 | 25 | #ifdef CONFIG_DMA_NONCOHERENT |
e5adb877 RB |
26 | #undef CONFIG_CPU_HAS_PREFETCH |
27 | #endif | |
28 | #ifdef CONFIG_MIPS_MALTA | |
29 | #undef CONFIG_CPU_HAS_PREFETCH | |
30 | #endif | |
31 | ||
1da177e4 | 32 | #include <asm/asm.h> |
048eb582 | 33 | #include <asm/asm-offsets.h> |
1da177e4 LT |
34 | #include <asm/regdef.h> |
35 | ||
36 | #define dst a0 | |
37 | #define src a1 | |
38 | #define len a2 | |
39 | ||
40 | /* | |
41 | * Spec | |
42 | * | |
43 | * memcpy copies len bytes from src to dst and sets v0 to dst. | |
44 | * It assumes that | |
45 | * - src and dst don't overlap | |
46 | * - src is readable | |
47 | * - dst is writable | |
48 | * memcpy uses the standard calling convention | |
49 | * | |
50 | * __copy_user copies up to len bytes from src to dst and sets a2 (len) to | |
51 | * the number of uncopied bytes due to an exception caused by a read or write. | |
52 | * __copy_user assumes that src and dst don't overlap, and that the call is | |
53 | * implementing one of the following: | |
54 | * copy_to_user | |
55 | * - src is readable (no exceptions when reading src) | |
56 | * copy_from_user | |
57 | * - dst is writable (no exceptions when writing dst) | |
58 | * __copy_user uses a non-standard calling convention; see | |
59 | * include/asm-mips/uaccess.h | |
60 | * | |
61 | * When an exception happens on a load, the handler must | |
62 | # ensure that all of the destination buffer is overwritten to prevent | |
63 | * leaking information to user mode programs. | |
64 | */ | |
65 | ||
66 | /* | |
67 | * Implementation | |
68 | */ | |
69 | ||
70 | /* | |
71 | * The exception handler for loads requires that: | |
72 | * 1- AT contain the address of the byte just past the end of the source | |
73 | * of the copy, | |
74 | * 2- src_entry <= src < AT, and | |
75 | * 3- (dst - src) == (dst_entry - src_entry), | |
76 | * The _entry suffix denotes values when __copy_user was called. | |
77 | * | |
78 | * (1) is set up up by uaccess.h and maintained by not writing AT in copy_user | |
79 | * (2) is met by incrementing src by the number of bytes copied | |
80 | * (3) is met by not doing loads between a pair of increments of dst and src | |
81 | * | |
82 | * The exception handlers for stores adjust len (if necessary) and return. | |
83 | * These handlers do not need to overwrite any data. | |
84 | * | |
85 | * For __rmemcpy and memmove an exception is always a kernel bug, therefore | |
86 | * they're not protected. | |
87 | */ | |
88 | ||
5bc05971 MC |
89 | /* Instruction type */ |
90 | #define LD_INSN 1 | |
91 | #define ST_INSN 2 | |
bda4d986 MC |
92 | /* Pretech type */ |
93 | #define SRC_PREFETCH 1 | |
94 | #define DST_PREFETCH 2 | |
5bc05971 MC |
95 | |
96 | /* | |
97 | * Wrapper to add an entry in the exception table | |
98 | * in case the insn causes a memory exception. | |
99 | * Arguments: | |
100 | * insn : Load/store instruction | |
101 | * type : Instruction type | |
102 | * reg : Register | |
103 | * addr : Address | |
104 | * handler : Exception handler | |
105 | */ | |
106 | #define EXC(insn, type, reg, addr, handler) \ | |
107 | 9: insn reg, addr; \ | |
1da177e4 LT |
108 | .section __ex_table,"a"; \ |
109 | PTR 9b, handler; \ | |
110 | .previous | |
111 | ||
112 | /* | |
113 | * Only on the 64-bit kernel we can made use of 64-bit registers. | |
114 | */ | |
875d43e7 | 115 | #ifdef CONFIG_64BIT |
1da177e4 LT |
116 | #define USE_DOUBLE |
117 | #endif | |
118 | ||
119 | #ifdef USE_DOUBLE | |
120 | ||
5bc05971 MC |
121 | #define LOADK ld /* No exception */ |
122 | #define LOAD(reg, addr, handler) EXC(ld, LD_INSN, reg, addr, handler) | |
123 | #define LOADL(reg, addr, handler) EXC(ldl, LD_INSN, reg, addr, handler) | |
124 | #define LOADR(reg, addr, handler) EXC(ldr, LD_INSN, reg, addr, handler) | |
125 | #define STOREL(reg, addr, handler) EXC(sdl, ST_INSN, reg, addr, handler) | |
126 | #define STORER(reg, addr, handler) EXC(sdr, ST_INSN, reg, addr, handler) | |
127 | #define STORE(reg, addr, handler) EXC(sd, ST_INSN, reg, addr, handler) | |
1da177e4 LT |
128 | #define ADD daddu |
129 | #define SUB dsubu | |
130 | #define SRL dsrl | |
131 | #define SRA dsra | |
132 | #define SLL dsll | |
133 | #define SLLV dsllv | |
134 | #define SRLV dsrlv | |
135 | #define NBYTES 8 | |
136 | #define LOG_NBYTES 3 | |
137 | ||
42a3b4f2 | 138 | /* |
1da177e4 LT |
139 | * As we are sharing code base with the mips32 tree (which use the o32 ABI |
140 | * register definitions). We need to redefine the register definitions from | |
141 | * the n64 ABI register naming to the o32 ABI register naming. | |
142 | */ | |
143 | #undef t0 | |
144 | #undef t1 | |
145 | #undef t2 | |
146 | #undef t3 | |
147 | #define t0 $8 | |
148 | #define t1 $9 | |
149 | #define t2 $10 | |
150 | #define t3 $11 | |
151 | #define t4 $12 | |
152 | #define t5 $13 | |
153 | #define t6 $14 | |
154 | #define t7 $15 | |
42a3b4f2 | 155 | |
1da177e4 LT |
156 | #else |
157 | ||
5bc05971 MC |
158 | #define LOADK lw /* No exception */ |
159 | #define LOAD(reg, addr, handler) EXC(lw, LD_INSN, reg, addr, handler) | |
160 | #define LOADL(reg, addr, handler) EXC(lwl, LD_INSN, reg, addr, handler) | |
161 | #define LOADR(reg, addr, handler) EXC(lwr, LD_INSN, reg, addr, handler) | |
162 | #define STOREL(reg, addr, handler) EXC(swl, ST_INSN, reg, addr, handler) | |
163 | #define STORER(reg, addr, handler) EXC(swr, ST_INSN, reg, addr, handler) | |
164 | #define STORE(reg, addr, handler) EXC(sw, ST_INSN, reg, addr, handler) | |
1da177e4 LT |
165 | #define ADD addu |
166 | #define SUB subu | |
167 | #define SRL srl | |
168 | #define SLL sll | |
169 | #define SRA sra | |
170 | #define SLLV sllv | |
171 | #define SRLV srlv | |
172 | #define NBYTES 4 | |
173 | #define LOG_NBYTES 2 | |
174 | ||
175 | #endif /* USE_DOUBLE */ | |
176 | ||
5bc05971 MC |
177 | #define LOADB(reg, addr, handler) EXC(lb, LD_INSN, reg, addr, handler) |
178 | #define STOREB(reg, addr, handler) EXC(sb, ST_INSN, reg, addr, handler) | |
179 | ||
bda4d986 MC |
180 | #define _PREF(hint, addr, type) PREF(hint, addr) |
181 | ||
182 | #define PREFS(hint, addr) _PREF(hint, addr, SRC_PREFETCH) | |
183 | #define PREFD(hint, addr) _PREF(hint, addr, DST_PREFETCH) | |
184 | ||
1da177e4 LT |
185 | #ifdef CONFIG_CPU_LITTLE_ENDIAN |
186 | #define LDFIRST LOADR | |
70342287 | 187 | #define LDREST LOADL |
1da177e4 | 188 | #define STFIRST STORER |
70342287 | 189 | #define STREST STOREL |
1da177e4 LT |
190 | #define SHIFT_DISCARD SLLV |
191 | #else | |
192 | #define LDFIRST LOADL | |
70342287 | 193 | #define LDREST LOADR |
1da177e4 | 194 | #define STFIRST STOREL |
70342287 | 195 | #define STREST STORER |
1da177e4 LT |
196 | #define SHIFT_DISCARD SRLV |
197 | #endif | |
198 | ||
199 | #define FIRST(unit) ((unit)*NBYTES) | |
200 | #define REST(unit) (FIRST(unit)+NBYTES-1) | |
201 | #define UNIT(unit) FIRST(unit) | |
202 | ||
203 | #define ADDRMASK (NBYTES-1) | |
204 | ||
205 | .text | |
206 | .set noreorder | |
619b6e18 | 207 | #ifndef CONFIG_CPU_DADDI_WORKAROUNDS |
1da177e4 | 208 | .set noat |
619b6e18 MR |
209 | #else |
210 | .set at=v1 | |
211 | #endif | |
1da177e4 | 212 | |
bb0757eb DD |
213 | /* |
214 | * t6 is used as a flag to note inatomic mode. | |
215 | */ | |
216 | LEAF(__copy_user_inatomic) | |
217 | b __copy_user_common | |
218 | li t6, 1 | |
219 | END(__copy_user_inatomic) | |
220 | ||
1da177e4 LT |
221 | /* |
222 | * A combined memcpy/__copy_user | |
223 | * __copy_user sets len to 0 for success; else to an upper bound of | |
224 | * the number of uncopied bytes. | |
225 | * memcpy sets v0 to dst. | |
226 | */ | |
227 | .align 5 | |
228 | LEAF(memcpy) /* a0=dst a1=src a2=len */ | |
229 | move v0, dst /* return value */ | |
c5ec1983 | 230 | .L__memcpy: |
1da177e4 | 231 | FEXPORT(__copy_user) |
bb0757eb DD |
232 | li t6, 0 /* not inatomic */ |
233 | __copy_user_common: | |
1da177e4 LT |
234 | /* |
235 | * Note: dst & src may be unaligned, len may be 0 | |
236 | * Temps | |
237 | */ | |
238 | #define rem t8 | |
239 | ||
930bff88 | 240 | R10KCBARRIER(0(ra)) |
1da177e4 LT |
241 | /* |
242 | * The "issue break"s below are very approximate. | |
243 | * Issue delays for dcache fills will perturb the schedule, as will | |
244 | * load queue full replay traps, etc. | |
245 | * | |
246 | * If len < NBYTES use byte operations. | |
247 | */ | |
bda4d986 MC |
248 | PREFS( 0, 0(src) ) |
249 | PREFD( 1, 0(dst) ) | |
1da177e4 LT |
250 | sltu t2, len, NBYTES |
251 | and t1, dst, ADDRMASK | |
bda4d986 MC |
252 | PREFS( 0, 1*32(src) ) |
253 | PREFD( 1, 1*32(dst) ) | |
c5ec1983 | 254 | bnez t2, .Lcopy_bytes_checklen |
1da177e4 | 255 | and t0, src, ADDRMASK |
bda4d986 MC |
256 | PREFS( 0, 2*32(src) ) |
257 | PREFD( 1, 2*32(dst) ) | |
c5ec1983 | 258 | bnez t1, .Ldst_unaligned |
1da177e4 | 259 | nop |
c5ec1983 | 260 | bnez t0, .Lsrc_unaligned_dst_aligned |
1da177e4 LT |
261 | /* |
262 | * use delay slot for fall-through | |
263 | * src and dst are aligned; need to compute rem | |
264 | */ | |
c5ec1983 | 265 | .Lboth_aligned: |
70342287 | 266 | SRL t0, len, LOG_NBYTES+3 # +3 for 8 units/iter |
c5ec1983 | 267 | beqz t0, .Lcleanup_both_aligned # len < 8*NBYTES |
1da177e4 | 268 | and rem, len, (8*NBYTES-1) # rem = len % (8*NBYTES) |
bda4d986 MC |
269 | PREFS( 0, 3*32(src) ) |
270 | PREFD( 1, 3*32(dst) ) | |
1da177e4 LT |
271 | .align 4 |
272 | 1: | |
930bff88 | 273 | R10KCBARRIER(0(ra)) |
5bc05971 MC |
274 | LOAD(t0, UNIT(0)(src), .Ll_exc) |
275 | LOAD(t1, UNIT(1)(src), .Ll_exc_copy) | |
276 | LOAD(t2, UNIT(2)(src), .Ll_exc_copy) | |
277 | LOAD(t3, UNIT(3)(src), .Ll_exc_copy) | |
1da177e4 | 278 | SUB len, len, 8*NBYTES |
5bc05971 MC |
279 | LOAD(t4, UNIT(4)(src), .Ll_exc_copy) |
280 | LOAD(t7, UNIT(5)(src), .Ll_exc_copy) | |
281 | STORE(t0, UNIT(0)(dst), .Ls_exc_p8u) | |
282 | STORE(t1, UNIT(1)(dst), .Ls_exc_p7u) | |
283 | LOAD(t0, UNIT(6)(src), .Ll_exc_copy) | |
284 | LOAD(t1, UNIT(7)(src), .Ll_exc_copy) | |
1da177e4 LT |
285 | ADD src, src, 8*NBYTES |
286 | ADD dst, dst, 8*NBYTES | |
5bc05971 MC |
287 | STORE(t2, UNIT(-6)(dst), .Ls_exc_p6u) |
288 | STORE(t3, UNIT(-5)(dst), .Ls_exc_p5u) | |
289 | STORE(t4, UNIT(-4)(dst), .Ls_exc_p4u) | |
290 | STORE(t7, UNIT(-3)(dst), .Ls_exc_p3u) | |
291 | STORE(t0, UNIT(-2)(dst), .Ls_exc_p2u) | |
292 | STORE(t1, UNIT(-1)(dst), .Ls_exc_p1u) | |
bda4d986 MC |
293 | PREFS( 0, 8*32(src) ) |
294 | PREFD( 1, 8*32(dst) ) | |
1da177e4 LT |
295 | bne len, rem, 1b |
296 | nop | |
297 | ||
298 | /* | |
299 | * len == rem == the number of bytes left to copy < 8*NBYTES | |
300 | */ | |
c5ec1983 RB |
301 | .Lcleanup_both_aligned: |
302 | beqz len, .Ldone | |
1da177e4 | 303 | sltu t0, len, 4*NBYTES |
c5ec1983 | 304 | bnez t0, .Lless_than_4units |
1da177e4 LT |
305 | and rem, len, (NBYTES-1) # rem = len % NBYTES |
306 | /* | |
307 | * len >= 4*NBYTES | |
308 | */ | |
5bc05971 MC |
309 | LOAD( t0, UNIT(0)(src), .Ll_exc) |
310 | LOAD( t1, UNIT(1)(src), .Ll_exc_copy) | |
311 | LOAD( t2, UNIT(2)(src), .Ll_exc_copy) | |
312 | LOAD( t3, UNIT(3)(src), .Ll_exc_copy) | |
1da177e4 LT |
313 | SUB len, len, 4*NBYTES |
314 | ADD src, src, 4*NBYTES | |
930bff88 | 315 | R10KCBARRIER(0(ra)) |
5bc05971 MC |
316 | STORE(t0, UNIT(0)(dst), .Ls_exc_p4u) |
317 | STORE(t1, UNIT(1)(dst), .Ls_exc_p3u) | |
318 | STORE(t2, UNIT(2)(dst), .Ls_exc_p2u) | |
319 | STORE(t3, UNIT(3)(dst), .Ls_exc_p1u) | |
619b6e18 MR |
320 | .set reorder /* DADDI_WAR */ |
321 | ADD dst, dst, 4*NBYTES | |
c5ec1983 | 322 | beqz len, .Ldone |
619b6e18 | 323 | .set noreorder |
c5ec1983 | 324 | .Lless_than_4units: |
1da177e4 LT |
325 | /* |
326 | * rem = len % NBYTES | |
327 | */ | |
c5ec1983 | 328 | beq rem, len, .Lcopy_bytes |
1da177e4 LT |
329 | nop |
330 | 1: | |
930bff88 | 331 | R10KCBARRIER(0(ra)) |
5bc05971 | 332 | LOAD(t0, 0(src), .Ll_exc) |
1da177e4 LT |
333 | ADD src, src, NBYTES |
334 | SUB len, len, NBYTES | |
5bc05971 | 335 | STORE(t0, 0(dst), .Ls_exc_p1u) |
619b6e18 MR |
336 | .set reorder /* DADDI_WAR */ |
337 | ADD dst, dst, NBYTES | |
1da177e4 | 338 | bne rem, len, 1b |
619b6e18 | 339 | .set noreorder |
1da177e4 LT |
340 | |
341 | /* | |
342 | * src and dst are aligned, need to copy rem bytes (rem < NBYTES) | |
343 | * A loop would do only a byte at a time with possible branch | |
70342287 | 344 | * mispredicts. Can't do an explicit LOAD dst,mask,or,STORE |
1da177e4 LT |
345 | * because can't assume read-access to dst. Instead, use |
346 | * STREST dst, which doesn't require read access to dst. | |
347 | * | |
348 | * This code should perform better than a simple loop on modern, | |
349 | * wide-issue mips processors because the code has fewer branches and | |
350 | * more instruction-level parallelism. | |
351 | */ | |
352 | #define bits t2 | |
c5ec1983 | 353 | beqz len, .Ldone |
1da177e4 LT |
354 | ADD t1, dst, len # t1 is just past last byte of dst |
355 | li bits, 8*NBYTES | |
356 | SLL rem, len, 3 # rem = number of bits to keep | |
5bc05971 | 357 | LOAD(t0, 0(src), .Ll_exc) |
70342287 | 358 | SUB bits, bits, rem # bits = number of bits to discard |
1da177e4 | 359 | SHIFT_DISCARD t0, t0, bits |
5bc05971 | 360 | STREST(t0, -1(t1), .Ls_exc) |
1da177e4 LT |
361 | jr ra |
362 | move len, zero | |
c5ec1983 | 363 | .Ldst_unaligned: |
1da177e4 LT |
364 | /* |
365 | * dst is unaligned | |
366 | * t0 = src & ADDRMASK | |
367 | * t1 = dst & ADDRMASK; T1 > 0 | |
368 | * len >= NBYTES | |
369 | * | |
370 | * Copy enough bytes to align dst | |
371 | * Set match = (src and dst have same alignment) | |
372 | */ | |
373 | #define match rem | |
5bc05971 | 374 | LDFIRST(t3, FIRST(0)(src), .Ll_exc) |
1da177e4 | 375 | ADD t2, zero, NBYTES |
5bc05971 | 376 | LDREST(t3, REST(0)(src), .Ll_exc_copy) |
1da177e4 LT |
377 | SUB t2, t2, t1 # t2 = number of bytes copied |
378 | xor match, t0, t1 | |
930bff88 | 379 | R10KCBARRIER(0(ra)) |
5bc05971 | 380 | STFIRST(t3, FIRST(0)(dst), .Ls_exc) |
c5ec1983 | 381 | beq len, t2, .Ldone |
1da177e4 LT |
382 | SUB len, len, t2 |
383 | ADD dst, dst, t2 | |
c5ec1983 | 384 | beqz match, .Lboth_aligned |
1da177e4 LT |
385 | ADD src, src, t2 |
386 | ||
c5ec1983 | 387 | .Lsrc_unaligned_dst_aligned: |
70342287 | 388 | SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter |
bda4d986 | 389 | PREFS( 0, 3*32(src) ) |
c5ec1983 | 390 | beqz t0, .Lcleanup_src_unaligned |
70342287 | 391 | and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES |
bda4d986 | 392 | PREFD( 1, 3*32(dst) ) |
1da177e4 LT |
393 | 1: |
394 | /* | |
395 | * Avoid consecutive LD*'s to the same register since some mips | |
396 | * implementations can't issue them in the same cycle. | |
397 | * It's OK to load FIRST(N+1) before REST(N) because the two addresses | |
398 | * are to the same unit (unless src is aligned, but it's not). | |
399 | */ | |
930bff88 | 400 | R10KCBARRIER(0(ra)) |
5bc05971 MC |
401 | LDFIRST(t0, FIRST(0)(src), .Ll_exc) |
402 | LDFIRST(t1, FIRST(1)(src), .Ll_exc_copy) | |
70342287 | 403 | SUB len, len, 4*NBYTES |
5bc05971 MC |
404 | LDREST(t0, REST(0)(src), .Ll_exc_copy) |
405 | LDREST(t1, REST(1)(src), .Ll_exc_copy) | |
406 | LDFIRST(t2, FIRST(2)(src), .Ll_exc_copy) | |
407 | LDFIRST(t3, FIRST(3)(src), .Ll_exc_copy) | |
408 | LDREST(t2, REST(2)(src), .Ll_exc_copy) | |
409 | LDREST(t3, REST(3)(src), .Ll_exc_copy) | |
bda4d986 | 410 | PREFS( 0, 9*32(src) ) # 0 is PREF_LOAD (not streamed) |
1da177e4 LT |
411 | ADD src, src, 4*NBYTES |
412 | #ifdef CONFIG_CPU_SB1 | |
413 | nop # improves slotting | |
414 | #endif | |
5bc05971 MC |
415 | STORE(t0, UNIT(0)(dst), .Ls_exc_p4u) |
416 | STORE(t1, UNIT(1)(dst), .Ls_exc_p3u) | |
417 | STORE(t2, UNIT(2)(dst), .Ls_exc_p2u) | |
418 | STORE(t3, UNIT(3)(dst), .Ls_exc_p1u) | |
bda4d986 | 419 | PREFD( 1, 9*32(dst) ) # 1 is PREF_STORE (not streamed) |
619b6e18 MR |
420 | .set reorder /* DADDI_WAR */ |
421 | ADD dst, dst, 4*NBYTES | |
1da177e4 | 422 | bne len, rem, 1b |
619b6e18 | 423 | .set noreorder |
1da177e4 | 424 | |
c5ec1983 RB |
425 | .Lcleanup_src_unaligned: |
426 | beqz len, .Ldone | |
1da177e4 | 427 | and rem, len, NBYTES-1 # rem = len % NBYTES |
c5ec1983 | 428 | beq rem, len, .Lcopy_bytes |
1da177e4 LT |
429 | nop |
430 | 1: | |
930bff88 | 431 | R10KCBARRIER(0(ra)) |
5bc05971 MC |
432 | LDFIRST(t0, FIRST(0)(src), .Ll_exc) |
433 | LDREST(t0, REST(0)(src), .Ll_exc_copy) | |
1da177e4 LT |
434 | ADD src, src, NBYTES |
435 | SUB len, len, NBYTES | |
5bc05971 | 436 | STORE(t0, 0(dst), .Ls_exc_p1u) |
619b6e18 MR |
437 | .set reorder /* DADDI_WAR */ |
438 | ADD dst, dst, NBYTES | |
1da177e4 | 439 | bne len, rem, 1b |
619b6e18 | 440 | .set noreorder |
1da177e4 | 441 | |
c5ec1983 RB |
442 | .Lcopy_bytes_checklen: |
443 | beqz len, .Ldone | |
1da177e4 | 444 | nop |
c5ec1983 | 445 | .Lcopy_bytes: |
1da177e4 | 446 | /* 0 < len < NBYTES */ |
930bff88 | 447 | R10KCBARRIER(0(ra)) |
1da177e4 | 448 | #define COPY_BYTE(N) \ |
5bc05971 | 449 | LOADB(t0, N(src), .Ll_exc); \ |
1da177e4 | 450 | SUB len, len, 1; \ |
c5ec1983 | 451 | beqz len, .Ldone; \ |
5bc05971 | 452 | STOREB(t0, N(dst), .Ls_exc_p1) |
1da177e4 LT |
453 | |
454 | COPY_BYTE(0) | |
455 | COPY_BYTE(1) | |
456 | #ifdef USE_DOUBLE | |
457 | COPY_BYTE(2) | |
458 | COPY_BYTE(3) | |
459 | COPY_BYTE(4) | |
460 | COPY_BYTE(5) | |
461 | #endif | |
5bc05971 | 462 | LOADB(t0, NBYTES-2(src), .Ll_exc) |
1da177e4 LT |
463 | SUB len, len, 1 |
464 | jr ra | |
5bc05971 | 465 | STOREB(t0, NBYTES-2(dst), .Ls_exc_p1) |
c5ec1983 | 466 | .Ldone: |
1da177e4 LT |
467 | jr ra |
468 | nop | |
469 | END(memcpy) | |
470 | ||
c5ec1983 | 471 | .Ll_exc_copy: |
1da177e4 LT |
472 | /* |
473 | * Copy bytes from src until faulting load address (or until a | |
474 | * lb faults) | |
475 | * | |
476 | * When reached by a faulting LDFIRST/LDREST, THREAD_BUADDR($28) | |
477 | * may be more than a byte beyond the last address. | |
478 | * Hence, the lb below may get an exception. | |
479 | * | |
480 | * Assumes src < THREAD_BUADDR($28) | |
481 | */ | |
5bc05971 | 482 | LOADK t0, TI_TASK($28) |
1da177e4 | 483 | nop |
5bc05971 | 484 | LOADK t0, THREAD_BUADDR(t0) |
1da177e4 | 485 | 1: |
5bc05971 | 486 | LOADB(t1, 0(src), .Ll_exc) |
1da177e4 LT |
487 | ADD src, src, 1 |
488 | sb t1, 0(dst) # can't fault -- we're copy_from_user | |
619b6e18 MR |
489 | .set reorder /* DADDI_WAR */ |
490 | ADD dst, dst, 1 | |
1da177e4 | 491 | bne src, t0, 1b |
619b6e18 | 492 | .set noreorder |
c5ec1983 | 493 | .Ll_exc: |
5bc05971 | 494 | LOADK t0, TI_TASK($28) |
1da177e4 | 495 | nop |
5bc05971 | 496 | LOADK t0, THREAD_BUADDR(t0) # t0 is just past last good address |
1da177e4 LT |
497 | nop |
498 | SUB len, AT, t0 # len number of uncopied bytes | |
bb0757eb | 499 | bnez t6, .Ldone /* Skip the zeroing part if inatomic */ |
1da177e4 LT |
500 | /* |
501 | * Here's where we rely on src and dst being incremented in tandem, | |
502 | * See (3) above. | |
503 | * dst += (fault addr - src) to put dst at first byte to clear | |
504 | */ | |
505 | ADD dst, t0 # compute start address in a1 | |
506 | SUB dst, src | |
507 | /* | |
508 | * Clear len bytes starting at dst. Can't call __bzero because it | |
509 | * might modify len. An inefficient loop for these rare times... | |
510 | */ | |
619b6e18 MR |
511 | .set reorder /* DADDI_WAR */ |
512 | SUB src, len, 1 | |
c5ec1983 | 513 | beqz len, .Ldone |
619b6e18 | 514 | .set noreorder |
1da177e4 LT |
515 | 1: sb zero, 0(dst) |
516 | ADD dst, dst, 1 | |
619b6e18 | 517 | #ifndef CONFIG_CPU_DADDI_WORKAROUNDS |
1da177e4 LT |
518 | bnez src, 1b |
519 | SUB src, src, 1 | |
619b6e18 MR |
520 | #else |
521 | .set push | |
522 | .set noat | |
523 | li v1, 1 | |
524 | bnez src, 1b | |
525 | SUB src, src, v1 | |
526 | .set pop | |
527 | #endif | |
1da177e4 LT |
528 | jr ra |
529 | nop | |
530 | ||
531 | ||
619b6e18 | 532 | #define SEXC(n) \ |
70342287 | 533 | .set reorder; /* DADDI_WAR */ \ |
c5ec1983 | 534 | .Ls_exc_p ## n ## u: \ |
619b6e18 MR |
535 | ADD len, len, n*NBYTES; \ |
536 | jr ra; \ | |
537 | .set noreorder | |
1da177e4 LT |
538 | |
539 | SEXC(8) | |
540 | SEXC(7) | |
541 | SEXC(6) | |
542 | SEXC(5) | |
543 | SEXC(4) | |
544 | SEXC(3) | |
545 | SEXC(2) | |
546 | SEXC(1) | |
547 | ||
c5ec1983 | 548 | .Ls_exc_p1: |
619b6e18 MR |
549 | .set reorder /* DADDI_WAR */ |
550 | ADD len, len, 1 | |
1da177e4 | 551 | jr ra |
619b6e18 | 552 | .set noreorder |
c5ec1983 | 553 | .Ls_exc: |
1da177e4 LT |
554 | jr ra |
555 | nop | |
556 | ||
557 | .align 5 | |
558 | LEAF(memmove) | |
559 | ADD t0, a0, a2 | |
560 | ADD t1, a1, a2 | |
561 | sltu t0, a1, t0 # dst + len <= src -> memcpy | |
562 | sltu t1, a0, t1 # dst >= src + len -> memcpy | |
563 | and t0, t1 | |
c5ec1983 | 564 | beqz t0, .L__memcpy |
1da177e4 | 565 | move v0, a0 /* return value */ |
c5ec1983 | 566 | beqz a2, .Lr_out |
1da177e4 LT |
567 | END(memmove) |
568 | ||
569 | /* fall through to __rmemcpy */ | |
570 | LEAF(__rmemcpy) /* a0=dst a1=src a2=len */ | |
571 | sltu t0, a1, a0 | |
c5ec1983 | 572 | beqz t0, .Lr_end_bytes_up # src >= dst |
1da177e4 LT |
573 | nop |
574 | ADD a0, a2 # dst = dst + len | |
575 | ADD a1, a2 # src = src + len | |
576 | ||
c5ec1983 | 577 | .Lr_end_bytes: |
930bff88 | 578 | R10KCBARRIER(0(ra)) |
1da177e4 LT |
579 | lb t0, -1(a1) |
580 | SUB a2, a2, 0x1 | |
581 | sb t0, -1(a0) | |
582 | SUB a1, a1, 0x1 | |
619b6e18 MR |
583 | .set reorder /* DADDI_WAR */ |
584 | SUB a0, a0, 0x1 | |
c5ec1983 | 585 | bnez a2, .Lr_end_bytes |
619b6e18 | 586 | .set noreorder |
1da177e4 | 587 | |
c5ec1983 | 588 | .Lr_out: |
1da177e4 LT |
589 | jr ra |
590 | move a2, zero | |
591 | ||
c5ec1983 | 592 | .Lr_end_bytes_up: |
930bff88 | 593 | R10KCBARRIER(0(ra)) |
1da177e4 LT |
594 | lb t0, (a1) |
595 | SUB a2, a2, 0x1 | |
596 | sb t0, (a0) | |
597 | ADD a1, a1, 0x1 | |
619b6e18 MR |
598 | .set reorder /* DADDI_WAR */ |
599 | ADD a0, a0, 0x1 | |
c5ec1983 | 600 | bnez a2, .Lr_end_bytes_up |
619b6e18 | 601 | .set noreorder |
1da177e4 LT |
602 | |
603 | jr ra | |
604 | move a2, zero | |
605 | END(__rmemcpy) |