]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
6 | * Unified implementation of memcpy, memmove and the __copy_user backend. | |
7 | * | |
8 | * Copyright (C) 1998, 99, 2000, 01, 2002 Ralf Baechle (ralf@gnu.org) | |
9 | * Copyright (C) 1999, 2000, 01, 2002 Silicon Graphics, Inc. | |
10 | * Copyright (C) 2002 Broadcom, Inc. | |
11 | * memcpy/copy_user author: Mark Vandevoorde | |
619b6e18 | 12 | * Copyright (C) 2007 Maciej W. Rozycki |
5bc05971 | 13 | * Copyright (C) 2014 Imagination Technologies Ltd. |
1da177e4 LT |
14 | * |
15 | * Mnemonic names for arguments to memcpy/__copy_user | |
16 | */ | |
e5adb877 RB |
17 | |
18 | /* | |
19 | * Hack to resolve longstanding prefetch issue | |
20 | * | |
21 | * Prefetching may be fatal on some systems if we're prefetching beyond the | |
22 | * end of memory on some systems. It's also a seriously bad idea on non | |
23 | * dma-coherent systems. | |
24 | */ | |
634286f1 | 25 | #ifdef CONFIG_DMA_NONCOHERENT |
e5adb877 RB |
26 | #undef CONFIG_CPU_HAS_PREFETCH |
27 | #endif | |
28 | #ifdef CONFIG_MIPS_MALTA | |
29 | #undef CONFIG_CPU_HAS_PREFETCH | |
30 | #endif | |
31 | ||
1da177e4 | 32 | #include <asm/asm.h> |
048eb582 | 33 | #include <asm/asm-offsets.h> |
1da177e4 LT |
34 | #include <asm/regdef.h> |
35 | ||
36 | #define dst a0 | |
37 | #define src a1 | |
38 | #define len a2 | |
39 | ||
40 | /* | |
41 | * Spec | |
42 | * | |
43 | * memcpy copies len bytes from src to dst and sets v0 to dst. | |
44 | * It assumes that | |
45 | * - src and dst don't overlap | |
46 | * - src is readable | |
47 | * - dst is writable | |
48 | * memcpy uses the standard calling convention | |
49 | * | |
50 | * __copy_user copies up to len bytes from src to dst and sets a2 (len) to | |
51 | * the number of uncopied bytes due to an exception caused by a read or write. | |
52 | * __copy_user assumes that src and dst don't overlap, and that the call is | |
53 | * implementing one of the following: | |
54 | * copy_to_user | |
55 | * - src is readable (no exceptions when reading src) | |
56 | * copy_from_user | |
57 | * - dst is writable (no exceptions when writing dst) | |
58 | * __copy_user uses a non-standard calling convention; see | |
59 | * include/asm-mips/uaccess.h | |
60 | * | |
61 | * When an exception happens on a load, the handler must | |
62 | # ensure that all of the destination buffer is overwritten to prevent | |
63 | * leaking information to user mode programs. | |
64 | */ | |
65 | ||
66 | /* | |
67 | * Implementation | |
68 | */ | |
69 | ||
70 | /* | |
71 | * The exception handler for loads requires that: | |
72 | * 1- AT contain the address of the byte just past the end of the source | |
73 | * of the copy, | |
74 | * 2- src_entry <= src < AT, and | |
75 | * 3- (dst - src) == (dst_entry - src_entry), | |
76 | * The _entry suffix denotes values when __copy_user was called. | |
77 | * | |
78 | * (1) is set up up by uaccess.h and maintained by not writing AT in copy_user | |
79 | * (2) is met by incrementing src by the number of bytes copied | |
80 | * (3) is met by not doing loads between a pair of increments of dst and src | |
81 | * | |
82 | * The exception handlers for stores adjust len (if necessary) and return. | |
83 | * These handlers do not need to overwrite any data. | |
84 | * | |
85 | * For __rmemcpy and memmove an exception is always a kernel bug, therefore | |
86 | * they're not protected. | |
87 | */ | |
88 | ||
5bc05971 MC |
89 | /* Instruction type */ |
90 | #define LD_INSN 1 | |
91 | #define ST_INSN 2 | |
bda4d986 MC |
92 | /* Pretech type */ |
93 | #define SRC_PREFETCH 1 | |
94 | #define DST_PREFETCH 2 | |
cf62a8b8 MC |
95 | #define LEGACY_MODE 1 |
96 | #define EVA_MODE 2 | |
97 | #define USEROP 1 | |
98 | #define KERNELOP 2 | |
5bc05971 MC |
99 | |
100 | /* | |
101 | * Wrapper to add an entry in the exception table | |
102 | * in case the insn causes a memory exception. | |
103 | * Arguments: | |
104 | * insn : Load/store instruction | |
105 | * type : Instruction type | |
106 | * reg : Register | |
107 | * addr : Address | |
108 | * handler : Exception handler | |
109 | */ | |
1da177e4 | 110 | |
cf62a8b8 MC |
111 | #define EXC(insn, type, reg, addr, handler) \ |
112 | .if \mode == LEGACY_MODE; \ | |
113 | 9: insn reg, addr; \ | |
114 | .section __ex_table,"a"; \ | |
115 | PTR 9b, handler; \ | |
116 | .previous; \ | |
117 | .endif | |
1da177e4 LT |
118 | /* |
119 | * Only on the 64-bit kernel we can made use of 64-bit registers. | |
120 | */ | |
875d43e7 | 121 | #ifdef CONFIG_64BIT |
1da177e4 LT |
122 | #define USE_DOUBLE |
123 | #endif | |
124 | ||
125 | #ifdef USE_DOUBLE | |
126 | ||
5bc05971 MC |
127 | #define LOADK ld /* No exception */ |
128 | #define LOAD(reg, addr, handler) EXC(ld, LD_INSN, reg, addr, handler) | |
129 | #define LOADL(reg, addr, handler) EXC(ldl, LD_INSN, reg, addr, handler) | |
130 | #define LOADR(reg, addr, handler) EXC(ldr, LD_INSN, reg, addr, handler) | |
131 | #define STOREL(reg, addr, handler) EXC(sdl, ST_INSN, reg, addr, handler) | |
132 | #define STORER(reg, addr, handler) EXC(sdr, ST_INSN, reg, addr, handler) | |
133 | #define STORE(reg, addr, handler) EXC(sd, ST_INSN, reg, addr, handler) | |
1da177e4 LT |
134 | #define ADD daddu |
135 | #define SUB dsubu | |
136 | #define SRL dsrl | |
137 | #define SRA dsra | |
138 | #define SLL dsll | |
139 | #define SLLV dsllv | |
140 | #define SRLV dsrlv | |
141 | #define NBYTES 8 | |
142 | #define LOG_NBYTES 3 | |
143 | ||
42a3b4f2 | 144 | /* |
1da177e4 LT |
145 | * As we are sharing code base with the mips32 tree (which use the o32 ABI |
146 | * register definitions). We need to redefine the register definitions from | |
147 | * the n64 ABI register naming to the o32 ABI register naming. | |
148 | */ | |
149 | #undef t0 | |
150 | #undef t1 | |
151 | #undef t2 | |
152 | #undef t3 | |
153 | #define t0 $8 | |
154 | #define t1 $9 | |
155 | #define t2 $10 | |
156 | #define t3 $11 | |
157 | #define t4 $12 | |
158 | #define t5 $13 | |
159 | #define t6 $14 | |
160 | #define t7 $15 | |
42a3b4f2 | 161 | |
1da177e4 LT |
162 | #else |
163 | ||
5bc05971 MC |
164 | #define LOADK lw /* No exception */ |
165 | #define LOAD(reg, addr, handler) EXC(lw, LD_INSN, reg, addr, handler) | |
166 | #define LOADL(reg, addr, handler) EXC(lwl, LD_INSN, reg, addr, handler) | |
167 | #define LOADR(reg, addr, handler) EXC(lwr, LD_INSN, reg, addr, handler) | |
168 | #define STOREL(reg, addr, handler) EXC(swl, ST_INSN, reg, addr, handler) | |
169 | #define STORER(reg, addr, handler) EXC(swr, ST_INSN, reg, addr, handler) | |
170 | #define STORE(reg, addr, handler) EXC(sw, ST_INSN, reg, addr, handler) | |
1da177e4 LT |
171 | #define ADD addu |
172 | #define SUB subu | |
173 | #define SRL srl | |
174 | #define SLL sll | |
175 | #define SRA sra | |
176 | #define SLLV sllv | |
177 | #define SRLV srlv | |
178 | #define NBYTES 4 | |
179 | #define LOG_NBYTES 2 | |
180 | ||
181 | #endif /* USE_DOUBLE */ | |
182 | ||
5bc05971 MC |
183 | #define LOADB(reg, addr, handler) EXC(lb, LD_INSN, reg, addr, handler) |
184 | #define STOREB(reg, addr, handler) EXC(sb, ST_INSN, reg, addr, handler) | |
185 | ||
cf62a8b8 MC |
186 | #define _PREF(hint, addr, type) \ |
187 | .if \mode == LEGACY_MODE; \ | |
188 | PREF(hint, addr); \ | |
189 | .endif | |
bda4d986 MC |
190 | |
191 | #define PREFS(hint, addr) _PREF(hint, addr, SRC_PREFETCH) | |
192 | #define PREFD(hint, addr) _PREF(hint, addr, DST_PREFETCH) | |
193 | ||
1da177e4 LT |
194 | #ifdef CONFIG_CPU_LITTLE_ENDIAN |
195 | #define LDFIRST LOADR | |
70342287 | 196 | #define LDREST LOADL |
1da177e4 | 197 | #define STFIRST STORER |
70342287 | 198 | #define STREST STOREL |
1da177e4 LT |
199 | #define SHIFT_DISCARD SLLV |
200 | #else | |
201 | #define LDFIRST LOADL | |
70342287 | 202 | #define LDREST LOADR |
1da177e4 | 203 | #define STFIRST STOREL |
70342287 | 204 | #define STREST STORER |
1da177e4 LT |
205 | #define SHIFT_DISCARD SRLV |
206 | #endif | |
207 | ||
208 | #define FIRST(unit) ((unit)*NBYTES) | |
209 | #define REST(unit) (FIRST(unit)+NBYTES-1) | |
210 | #define UNIT(unit) FIRST(unit) | |
211 | ||
212 | #define ADDRMASK (NBYTES-1) | |
213 | ||
214 | .text | |
215 | .set noreorder | |
619b6e18 | 216 | #ifndef CONFIG_CPU_DADDI_WORKAROUNDS |
1da177e4 | 217 | .set noat |
619b6e18 MR |
218 | #else |
219 | .set at=v1 | |
220 | #endif | |
1da177e4 | 221 | |
1da177e4 | 222 | .align 5 |
cf62a8b8 MC |
223 | |
224 | /* | |
225 | * Macro to build the __copy_user common code | |
226 | * Arguements: | |
227 | * mode : LEGACY_MODE or EVA_MODE | |
228 | * from : Source operand. USEROP or KERNELOP | |
229 | * to : Destination operand. USEROP or KERNELOP | |
230 | */ | |
231 | .macro __BUILD_COPY_USER mode, from, to | |
232 | ||
233 | /* initialize __memcpy if this the first time we execute this macro */ | |
234 | .ifnotdef __memcpy | |
235 | .set __memcpy, 1 | |
236 | .hidden __memcpy /* make sure it does not leak */ | |
237 | .endif | |
238 | ||
1da177e4 LT |
239 | /* |
240 | * Note: dst & src may be unaligned, len may be 0 | |
241 | * Temps | |
242 | */ | |
243 | #define rem t8 | |
244 | ||
930bff88 | 245 | R10KCBARRIER(0(ra)) |
1da177e4 LT |
246 | /* |
247 | * The "issue break"s below are very approximate. | |
248 | * Issue delays for dcache fills will perturb the schedule, as will | |
249 | * load queue full replay traps, etc. | |
250 | * | |
251 | * If len < NBYTES use byte operations. | |
252 | */ | |
bda4d986 MC |
253 | PREFS( 0, 0(src) ) |
254 | PREFD( 1, 0(dst) ) | |
1da177e4 LT |
255 | sltu t2, len, NBYTES |
256 | and t1, dst, ADDRMASK | |
bda4d986 MC |
257 | PREFS( 0, 1*32(src) ) |
258 | PREFD( 1, 1*32(dst) ) | |
cf62a8b8 | 259 | bnez t2, .Lcopy_bytes_checklen\@ |
1da177e4 | 260 | and t0, src, ADDRMASK |
bda4d986 MC |
261 | PREFS( 0, 2*32(src) ) |
262 | PREFD( 1, 2*32(dst) ) | |
cf62a8b8 | 263 | bnez t1, .Ldst_unaligned\@ |
1da177e4 | 264 | nop |
cf62a8b8 | 265 | bnez t0, .Lsrc_unaligned_dst_aligned\@ |
1da177e4 LT |
266 | /* |
267 | * use delay slot for fall-through | |
268 | * src and dst are aligned; need to compute rem | |
269 | */ | |
cf62a8b8 | 270 | .Lboth_aligned\@: |
70342287 | 271 | SRL t0, len, LOG_NBYTES+3 # +3 for 8 units/iter |
cf62a8b8 | 272 | beqz t0, .Lcleanup_both_aligned\@ # len < 8*NBYTES |
1da177e4 | 273 | and rem, len, (8*NBYTES-1) # rem = len % (8*NBYTES) |
bda4d986 MC |
274 | PREFS( 0, 3*32(src) ) |
275 | PREFD( 1, 3*32(dst) ) | |
1da177e4 LT |
276 | .align 4 |
277 | 1: | |
930bff88 | 278 | R10KCBARRIER(0(ra)) |
cf62a8b8 MC |
279 | LOAD(t0, UNIT(0)(src), .Ll_exc\@) |
280 | LOAD(t1, UNIT(1)(src), .Ll_exc_copy\@) | |
281 | LOAD(t2, UNIT(2)(src), .Ll_exc_copy\@) | |
282 | LOAD(t3, UNIT(3)(src), .Ll_exc_copy\@) | |
1da177e4 | 283 | SUB len, len, 8*NBYTES |
cf62a8b8 MC |
284 | LOAD(t4, UNIT(4)(src), .Ll_exc_copy\@) |
285 | LOAD(t7, UNIT(5)(src), .Ll_exc_copy\@) | |
286 | STORE(t0, UNIT(0)(dst), .Ls_exc_p8u\@) | |
287 | STORE(t1, UNIT(1)(dst), .Ls_exc_p7u\@) | |
288 | LOAD(t0, UNIT(6)(src), .Ll_exc_copy\@) | |
289 | LOAD(t1, UNIT(7)(src), .Ll_exc_copy\@) | |
1da177e4 LT |
290 | ADD src, src, 8*NBYTES |
291 | ADD dst, dst, 8*NBYTES | |
cf62a8b8 MC |
292 | STORE(t2, UNIT(-6)(dst), .Ls_exc_p6u\@) |
293 | STORE(t3, UNIT(-5)(dst), .Ls_exc_p5u\@) | |
294 | STORE(t4, UNIT(-4)(dst), .Ls_exc_p4u\@) | |
295 | STORE(t7, UNIT(-3)(dst), .Ls_exc_p3u\@) | |
296 | STORE(t0, UNIT(-2)(dst), .Ls_exc_p2u\@) | |
297 | STORE(t1, UNIT(-1)(dst), .Ls_exc_p1u\@) | |
bda4d986 MC |
298 | PREFS( 0, 8*32(src) ) |
299 | PREFD( 1, 8*32(dst) ) | |
1da177e4 LT |
300 | bne len, rem, 1b |
301 | nop | |
302 | ||
303 | /* | |
304 | * len == rem == the number of bytes left to copy < 8*NBYTES | |
305 | */ | |
cf62a8b8 MC |
306 | .Lcleanup_both_aligned\@: |
307 | beqz len, .Ldone\@ | |
1da177e4 | 308 | sltu t0, len, 4*NBYTES |
cf62a8b8 | 309 | bnez t0, .Lless_than_4units\@ |
1da177e4 LT |
310 | and rem, len, (NBYTES-1) # rem = len % NBYTES |
311 | /* | |
312 | * len >= 4*NBYTES | |
313 | */ | |
cf62a8b8 MC |
314 | LOAD( t0, UNIT(0)(src), .Ll_exc\@) |
315 | LOAD( t1, UNIT(1)(src), .Ll_exc_copy\@) | |
316 | LOAD( t2, UNIT(2)(src), .Ll_exc_copy\@) | |
317 | LOAD( t3, UNIT(3)(src), .Ll_exc_copy\@) | |
1da177e4 LT |
318 | SUB len, len, 4*NBYTES |
319 | ADD src, src, 4*NBYTES | |
930bff88 | 320 | R10KCBARRIER(0(ra)) |
cf62a8b8 MC |
321 | STORE(t0, UNIT(0)(dst), .Ls_exc_p4u\@) |
322 | STORE(t1, UNIT(1)(dst), .Ls_exc_p3u\@) | |
323 | STORE(t2, UNIT(2)(dst), .Ls_exc_p2u\@) | |
324 | STORE(t3, UNIT(3)(dst), .Ls_exc_p1u\@) | |
619b6e18 MR |
325 | .set reorder /* DADDI_WAR */ |
326 | ADD dst, dst, 4*NBYTES | |
cf62a8b8 | 327 | beqz len, .Ldone\@ |
619b6e18 | 328 | .set noreorder |
cf62a8b8 | 329 | .Lless_than_4units\@: |
1da177e4 LT |
330 | /* |
331 | * rem = len % NBYTES | |
332 | */ | |
cf62a8b8 | 333 | beq rem, len, .Lcopy_bytes\@ |
1da177e4 LT |
334 | nop |
335 | 1: | |
930bff88 | 336 | R10KCBARRIER(0(ra)) |
cf62a8b8 | 337 | LOAD(t0, 0(src), .Ll_exc\@) |
1da177e4 LT |
338 | ADD src, src, NBYTES |
339 | SUB len, len, NBYTES | |
cf62a8b8 | 340 | STORE(t0, 0(dst), .Ls_exc_p1u\@) |
619b6e18 MR |
341 | .set reorder /* DADDI_WAR */ |
342 | ADD dst, dst, NBYTES | |
1da177e4 | 343 | bne rem, len, 1b |
619b6e18 | 344 | .set noreorder |
1da177e4 LT |
345 | |
346 | /* | |
347 | * src and dst are aligned, need to copy rem bytes (rem < NBYTES) | |
348 | * A loop would do only a byte at a time with possible branch | |
70342287 | 349 | * mispredicts. Can't do an explicit LOAD dst,mask,or,STORE |
1da177e4 LT |
350 | * because can't assume read-access to dst. Instead, use |
351 | * STREST dst, which doesn't require read access to dst. | |
352 | * | |
353 | * This code should perform better than a simple loop on modern, | |
354 | * wide-issue mips processors because the code has fewer branches and | |
355 | * more instruction-level parallelism. | |
356 | */ | |
357 | #define bits t2 | |
cf62a8b8 | 358 | beqz len, .Ldone\@ |
1da177e4 LT |
359 | ADD t1, dst, len # t1 is just past last byte of dst |
360 | li bits, 8*NBYTES | |
361 | SLL rem, len, 3 # rem = number of bits to keep | |
cf62a8b8 | 362 | LOAD(t0, 0(src), .Ll_exc\@) |
70342287 | 363 | SUB bits, bits, rem # bits = number of bits to discard |
1da177e4 | 364 | SHIFT_DISCARD t0, t0, bits |
cf62a8b8 | 365 | STREST(t0, -1(t1), .Ls_exc\@) |
1da177e4 LT |
366 | jr ra |
367 | move len, zero | |
cf62a8b8 | 368 | .Ldst_unaligned\@: |
1da177e4 LT |
369 | /* |
370 | * dst is unaligned | |
371 | * t0 = src & ADDRMASK | |
372 | * t1 = dst & ADDRMASK; T1 > 0 | |
373 | * len >= NBYTES | |
374 | * | |
375 | * Copy enough bytes to align dst | |
376 | * Set match = (src and dst have same alignment) | |
377 | */ | |
378 | #define match rem | |
cf62a8b8 | 379 | LDFIRST(t3, FIRST(0)(src), .Ll_exc\@) |
1da177e4 | 380 | ADD t2, zero, NBYTES |
cf62a8b8 | 381 | LDREST(t3, REST(0)(src), .Ll_exc_copy\@) |
1da177e4 LT |
382 | SUB t2, t2, t1 # t2 = number of bytes copied |
383 | xor match, t0, t1 | |
930bff88 | 384 | R10KCBARRIER(0(ra)) |
cf62a8b8 MC |
385 | STFIRST(t3, FIRST(0)(dst), .Ls_exc\@) |
386 | beq len, t2, .Ldone\@ | |
1da177e4 LT |
387 | SUB len, len, t2 |
388 | ADD dst, dst, t2 | |
cf62a8b8 | 389 | beqz match, .Lboth_aligned\@ |
1da177e4 LT |
390 | ADD src, src, t2 |
391 | ||
cf62a8b8 | 392 | .Lsrc_unaligned_dst_aligned\@: |
70342287 | 393 | SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter |
bda4d986 | 394 | PREFS( 0, 3*32(src) ) |
cf62a8b8 | 395 | beqz t0, .Lcleanup_src_unaligned\@ |
70342287 | 396 | and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES |
bda4d986 | 397 | PREFD( 1, 3*32(dst) ) |
1da177e4 LT |
398 | 1: |
399 | /* | |
400 | * Avoid consecutive LD*'s to the same register since some mips | |
401 | * implementations can't issue them in the same cycle. | |
402 | * It's OK to load FIRST(N+1) before REST(N) because the two addresses | |
403 | * are to the same unit (unless src is aligned, but it's not). | |
404 | */ | |
930bff88 | 405 | R10KCBARRIER(0(ra)) |
cf62a8b8 MC |
406 | LDFIRST(t0, FIRST(0)(src), .Ll_exc\@) |
407 | LDFIRST(t1, FIRST(1)(src), .Ll_exc_copy\@) | |
70342287 | 408 | SUB len, len, 4*NBYTES |
cf62a8b8 MC |
409 | LDREST(t0, REST(0)(src), .Ll_exc_copy\@) |
410 | LDREST(t1, REST(1)(src), .Ll_exc_copy\@) | |
411 | LDFIRST(t2, FIRST(2)(src), .Ll_exc_copy\@) | |
412 | LDFIRST(t3, FIRST(3)(src), .Ll_exc_copy\@) | |
413 | LDREST(t2, REST(2)(src), .Ll_exc_copy\@) | |
414 | LDREST(t3, REST(3)(src), .Ll_exc_copy\@) | |
bda4d986 | 415 | PREFS( 0, 9*32(src) ) # 0 is PREF_LOAD (not streamed) |
1da177e4 LT |
416 | ADD src, src, 4*NBYTES |
417 | #ifdef CONFIG_CPU_SB1 | |
418 | nop # improves slotting | |
419 | #endif | |
cf62a8b8 MC |
420 | STORE(t0, UNIT(0)(dst), .Ls_exc_p4u\@) |
421 | STORE(t1, UNIT(1)(dst), .Ls_exc_p3u\@) | |
422 | STORE(t2, UNIT(2)(dst), .Ls_exc_p2u\@) | |
423 | STORE(t3, UNIT(3)(dst), .Ls_exc_p1u\@) | |
bda4d986 | 424 | PREFD( 1, 9*32(dst) ) # 1 is PREF_STORE (not streamed) |
619b6e18 MR |
425 | .set reorder /* DADDI_WAR */ |
426 | ADD dst, dst, 4*NBYTES | |
1da177e4 | 427 | bne len, rem, 1b |
619b6e18 | 428 | .set noreorder |
1da177e4 | 429 | |
cf62a8b8 MC |
430 | .Lcleanup_src_unaligned\@: |
431 | beqz len, .Ldone\@ | |
1da177e4 | 432 | and rem, len, NBYTES-1 # rem = len % NBYTES |
cf62a8b8 | 433 | beq rem, len, .Lcopy_bytes\@ |
1da177e4 LT |
434 | nop |
435 | 1: | |
930bff88 | 436 | R10KCBARRIER(0(ra)) |
cf62a8b8 MC |
437 | LDFIRST(t0, FIRST(0)(src), .Ll_exc\@) |
438 | LDREST(t0, REST(0)(src), .Ll_exc_copy\@) | |
1da177e4 LT |
439 | ADD src, src, NBYTES |
440 | SUB len, len, NBYTES | |
cf62a8b8 | 441 | STORE(t0, 0(dst), .Ls_exc_p1u\@) |
619b6e18 MR |
442 | .set reorder /* DADDI_WAR */ |
443 | ADD dst, dst, NBYTES | |
1da177e4 | 444 | bne len, rem, 1b |
619b6e18 | 445 | .set noreorder |
1da177e4 | 446 | |
cf62a8b8 MC |
447 | .Lcopy_bytes_checklen\@: |
448 | beqz len, .Ldone\@ | |
1da177e4 | 449 | nop |
cf62a8b8 | 450 | .Lcopy_bytes\@: |
1da177e4 | 451 | /* 0 < len < NBYTES */ |
930bff88 | 452 | R10KCBARRIER(0(ra)) |
1da177e4 | 453 | #define COPY_BYTE(N) \ |
cf62a8b8 | 454 | LOADB(t0, N(src), .Ll_exc\@); \ |
1da177e4 | 455 | SUB len, len, 1; \ |
cf62a8b8 MC |
456 | beqz len, .Ldone\@; \ |
457 | STOREB(t0, N(dst), .Ls_exc_p1\@) | |
1da177e4 LT |
458 | |
459 | COPY_BYTE(0) | |
460 | COPY_BYTE(1) | |
461 | #ifdef USE_DOUBLE | |
462 | COPY_BYTE(2) | |
463 | COPY_BYTE(3) | |
464 | COPY_BYTE(4) | |
465 | COPY_BYTE(5) | |
466 | #endif | |
cf62a8b8 | 467 | LOADB(t0, NBYTES-2(src), .Ll_exc\@) |
1da177e4 LT |
468 | SUB len, len, 1 |
469 | jr ra | |
cf62a8b8 MC |
470 | STOREB(t0, NBYTES-2(dst), .Ls_exc_p1\@) |
471 | .Ldone\@: | |
1da177e4 | 472 | jr ra |
cf62a8b8 | 473 | .if __memcpy == 1 |
1da177e4 | 474 | END(memcpy) |
cf62a8b8 MC |
475 | .set __memcpy, 0 |
476 | .hidden __memcpy | |
477 | .endif | |
1da177e4 | 478 | |
cf62a8b8 | 479 | .Ll_exc_copy\@: |
1da177e4 LT |
480 | /* |
481 | * Copy bytes from src until faulting load address (or until a | |
482 | * lb faults) | |
483 | * | |
484 | * When reached by a faulting LDFIRST/LDREST, THREAD_BUADDR($28) | |
485 | * may be more than a byte beyond the last address. | |
486 | * Hence, the lb below may get an exception. | |
487 | * | |
488 | * Assumes src < THREAD_BUADDR($28) | |
489 | */ | |
5bc05971 | 490 | LOADK t0, TI_TASK($28) |
1da177e4 | 491 | nop |
5bc05971 | 492 | LOADK t0, THREAD_BUADDR(t0) |
1da177e4 | 493 | 1: |
cf62a8b8 | 494 | LOADB(t1, 0(src), .Ll_exc\@) |
1da177e4 LT |
495 | ADD src, src, 1 |
496 | sb t1, 0(dst) # can't fault -- we're copy_from_user | |
619b6e18 MR |
497 | .set reorder /* DADDI_WAR */ |
498 | ADD dst, dst, 1 | |
1da177e4 | 499 | bne src, t0, 1b |
619b6e18 | 500 | .set noreorder |
cf62a8b8 | 501 | .Ll_exc\@: |
5bc05971 | 502 | LOADK t0, TI_TASK($28) |
1da177e4 | 503 | nop |
5bc05971 | 504 | LOADK t0, THREAD_BUADDR(t0) # t0 is just past last good address |
1da177e4 LT |
505 | nop |
506 | SUB len, AT, t0 # len number of uncopied bytes | |
cf62a8b8 | 507 | bnez t6, .Ldone\@ /* Skip the zeroing part if inatomic */ |
1da177e4 LT |
508 | /* |
509 | * Here's where we rely on src and dst being incremented in tandem, | |
510 | * See (3) above. | |
511 | * dst += (fault addr - src) to put dst at first byte to clear | |
512 | */ | |
513 | ADD dst, t0 # compute start address in a1 | |
514 | SUB dst, src | |
515 | /* | |
516 | * Clear len bytes starting at dst. Can't call __bzero because it | |
517 | * might modify len. An inefficient loop for these rare times... | |
518 | */ | |
619b6e18 MR |
519 | .set reorder /* DADDI_WAR */ |
520 | SUB src, len, 1 | |
cf62a8b8 | 521 | beqz len, .Ldone\@ |
619b6e18 | 522 | .set noreorder |
1da177e4 LT |
523 | 1: sb zero, 0(dst) |
524 | ADD dst, dst, 1 | |
619b6e18 | 525 | #ifndef CONFIG_CPU_DADDI_WORKAROUNDS |
1da177e4 LT |
526 | bnez src, 1b |
527 | SUB src, src, 1 | |
619b6e18 MR |
528 | #else |
529 | .set push | |
530 | .set noat | |
531 | li v1, 1 | |
532 | bnez src, 1b | |
533 | SUB src, src, v1 | |
534 | .set pop | |
535 | #endif | |
1da177e4 LT |
536 | jr ra |
537 | nop | |
538 | ||
539 | ||
619b6e18 | 540 | #define SEXC(n) \ |
70342287 | 541 | .set reorder; /* DADDI_WAR */ \ |
cf62a8b8 | 542 | .Ls_exc_p ## n ## u\@: \ |
619b6e18 MR |
543 | ADD len, len, n*NBYTES; \ |
544 | jr ra; \ | |
545 | .set noreorder | |
1da177e4 LT |
546 | |
547 | SEXC(8) | |
548 | SEXC(7) | |
549 | SEXC(6) | |
550 | SEXC(5) | |
551 | SEXC(4) | |
552 | SEXC(3) | |
553 | SEXC(2) | |
554 | SEXC(1) | |
555 | ||
cf62a8b8 | 556 | .Ls_exc_p1\@: |
619b6e18 MR |
557 | .set reorder /* DADDI_WAR */ |
558 | ADD len, len, 1 | |
1da177e4 | 559 | jr ra |
619b6e18 | 560 | .set noreorder |
cf62a8b8 | 561 | .Ls_exc\@: |
1da177e4 LT |
562 | jr ra |
563 | nop | |
cf62a8b8 | 564 | .endm |
1da177e4 LT |
565 | |
566 | .align 5 | |
567 | LEAF(memmove) | |
568 | ADD t0, a0, a2 | |
569 | ADD t1, a1, a2 | |
570 | sltu t0, a1, t0 # dst + len <= src -> memcpy | |
571 | sltu t1, a0, t1 # dst >= src + len -> memcpy | |
572 | and t0, t1 | |
c5ec1983 | 573 | beqz t0, .L__memcpy |
1da177e4 | 574 | move v0, a0 /* return value */ |
c5ec1983 | 575 | beqz a2, .Lr_out |
1da177e4 LT |
576 | END(memmove) |
577 | ||
578 | /* fall through to __rmemcpy */ | |
579 | LEAF(__rmemcpy) /* a0=dst a1=src a2=len */ | |
580 | sltu t0, a1, a0 | |
c5ec1983 | 581 | beqz t0, .Lr_end_bytes_up # src >= dst |
1da177e4 LT |
582 | nop |
583 | ADD a0, a2 # dst = dst + len | |
584 | ADD a1, a2 # src = src + len | |
585 | ||
c5ec1983 | 586 | .Lr_end_bytes: |
930bff88 | 587 | R10KCBARRIER(0(ra)) |
1da177e4 LT |
588 | lb t0, -1(a1) |
589 | SUB a2, a2, 0x1 | |
590 | sb t0, -1(a0) | |
591 | SUB a1, a1, 0x1 | |
619b6e18 MR |
592 | .set reorder /* DADDI_WAR */ |
593 | SUB a0, a0, 0x1 | |
c5ec1983 | 594 | bnez a2, .Lr_end_bytes |
619b6e18 | 595 | .set noreorder |
1da177e4 | 596 | |
c5ec1983 | 597 | .Lr_out: |
1da177e4 LT |
598 | jr ra |
599 | move a2, zero | |
600 | ||
c5ec1983 | 601 | .Lr_end_bytes_up: |
930bff88 | 602 | R10KCBARRIER(0(ra)) |
1da177e4 LT |
603 | lb t0, (a1) |
604 | SUB a2, a2, 0x1 | |
605 | sb t0, (a0) | |
606 | ADD a1, a1, 0x1 | |
619b6e18 MR |
607 | .set reorder /* DADDI_WAR */ |
608 | ADD a0, a0, 0x1 | |
c5ec1983 | 609 | bnez a2, .Lr_end_bytes_up |
619b6e18 | 610 | .set noreorder |
1da177e4 LT |
611 | |
612 | jr ra | |
613 | move a2, zero | |
614 | END(__rmemcpy) | |
cf62a8b8 MC |
615 | |
616 | /* | |
617 | * t6 is used as a flag to note inatomic mode. | |
618 | */ | |
619 | LEAF(__copy_user_inatomic) | |
620 | b __copy_user_common | |
621 | li t6, 1 | |
622 | END(__copy_user_inatomic) | |
623 | ||
624 | /* | |
625 | * A combined memcpy/__copy_user | |
626 | * __copy_user sets len to 0 for success; else to an upper bound of | |
627 | * the number of uncopied bytes. | |
628 | * memcpy sets v0 to dst. | |
629 | */ | |
630 | .align 5 | |
631 | LEAF(memcpy) /* a0=dst a1=src a2=len */ | |
632 | move v0, dst /* return value */ | |
633 | .L__memcpy: | |
634 | FEXPORT(__copy_user) | |
635 | li t6, 0 /* not inatomic */ | |
636 | __copy_user_common: | |
637 | /* Legacy Mode, user <-> user */ | |
638 | __BUILD_COPY_USER LEGACY_MODE USEROP USEROP |