]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/mips/lib/memcpy.S
Merge branches 'for-4.11/upstream-fixes', 'for-4.12/accutouch', 'for-4.12/cp2112...
[mirror_ubuntu-artful-kernel.git] / arch / mips / lib / memcpy.S
CommitLineData
1da177e4
LT
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Unified implementation of memcpy, memmove and the __copy_user backend.
7 *
8 * Copyright (C) 1998, 99, 2000, 01, 2002 Ralf Baechle (ralf@gnu.org)
9 * Copyright (C) 1999, 2000, 01, 2002 Silicon Graphics, Inc.
10 * Copyright (C) 2002 Broadcom, Inc.
11 * memcpy/copy_user author: Mark Vandevoorde
619b6e18 12 * Copyright (C) 2007 Maciej W. Rozycki
5bc05971 13 * Copyright (C) 2014 Imagination Technologies Ltd.
1da177e4
LT
14 *
15 * Mnemonic names for arguments to memcpy/__copy_user
16 */
e5adb877
RB
17
18/*
19 * Hack to resolve longstanding prefetch issue
20 *
21 * Prefetching may be fatal on some systems if we're prefetching beyond the
22 * end of memory on some systems. It's also a seriously bad idea on non
23 * dma-coherent systems.
24 */
634286f1 25#ifdef CONFIG_DMA_NONCOHERENT
e5adb877
RB
26#undef CONFIG_CPU_HAS_PREFETCH
27#endif
28#ifdef CONFIG_MIPS_MALTA
29#undef CONFIG_CPU_HAS_PREFETCH
30#endif
31
1da177e4 32#include <asm/asm.h>
048eb582 33#include <asm/asm-offsets.h>
576a2f0c 34#include <asm/export.h>
1da177e4
LT
35#include <asm/regdef.h>
36
37#define dst a0
38#define src a1
39#define len a2
40
41/*
42 * Spec
43 *
44 * memcpy copies len bytes from src to dst and sets v0 to dst.
45 * It assumes that
46 * - src and dst don't overlap
47 * - src is readable
48 * - dst is writable
49 * memcpy uses the standard calling convention
50 *
51 * __copy_user copies up to len bytes from src to dst and sets a2 (len) to
52 * the number of uncopied bytes due to an exception caused by a read or write.
53 * __copy_user assumes that src and dst don't overlap, and that the call is
54 * implementing one of the following:
55 * copy_to_user
56 * - src is readable (no exceptions when reading src)
57 * copy_from_user
58 * - dst is writable (no exceptions when writing dst)
59 * __copy_user uses a non-standard calling convention; see
60 * include/asm-mips/uaccess.h
61 *
62 * When an exception happens on a load, the handler must
63 # ensure that all of the destination buffer is overwritten to prevent
64 * leaking information to user mode programs.
65 */
66
67/*
68 * Implementation
69 */
70
71/*
72 * The exception handler for loads requires that:
73 * 1- AT contain the address of the byte just past the end of the source
74 * of the copy,
75 * 2- src_entry <= src < AT, and
76 * 3- (dst - src) == (dst_entry - src_entry),
77 * The _entry suffix denotes values when __copy_user was called.
78 *
79 * (1) is set up up by uaccess.h and maintained by not writing AT in copy_user
80 * (2) is met by incrementing src by the number of bytes copied
81 * (3) is met by not doing loads between a pair of increments of dst and src
82 *
83 * The exception handlers for stores adjust len (if necessary) and return.
84 * These handlers do not need to overwrite any data.
85 *
86 * For __rmemcpy and memmove an exception is always a kernel bug, therefore
87 * they're not protected.
88 */
89
5bc05971
MC
90/* Instruction type */
91#define LD_INSN 1
92#define ST_INSN 2
bda4d986
MC
93/* Pretech type */
94#define SRC_PREFETCH 1
95#define DST_PREFETCH 2
cf62a8b8
MC
96#define LEGACY_MODE 1
97#define EVA_MODE 2
98#define USEROP 1
99#define KERNELOP 2
5bc05971
MC
100
101/*
102 * Wrapper to add an entry in the exception table
103 * in case the insn causes a memory exception.
104 * Arguments:
105 * insn : Load/store instruction
106 * type : Instruction type
107 * reg : Register
108 * addr : Address
109 * handler : Exception handler
110 */
1da177e4 111
cf62a8b8
MC
112#define EXC(insn, type, reg, addr, handler) \
113 .if \mode == LEGACY_MODE; \
1149: insn reg, addr; \
115 .section __ex_table,"a"; \
116 PTR 9b, handler; \
117 .previous; \
cd26cb41
MC
118 /* This is assembled in EVA mode */ \
119 .else; \
120 /* If loading from user or storing to user */ \
121 .if ((\from == USEROP) && (type == LD_INSN)) || \
122 ((\to == USEROP) && (type == ST_INSN)); \
1239: __BUILD_EVA_INSN(insn##e, reg, addr); \
124 .section __ex_table,"a"; \
125 PTR 9b, handler; \
126 .previous; \
127 .else; \
128 /* \
129 * Still in EVA, but no need for \
130 * exception handler or EVA insn \
131 */ \
132 insn reg, addr; \
133 .endif; \
cf62a8b8 134 .endif
cd26cb41 135
1da177e4
LT
136/*
137 * Only on the 64-bit kernel we can made use of 64-bit registers.
138 */
875d43e7 139#ifdef CONFIG_64BIT
1da177e4
LT
140#define USE_DOUBLE
141#endif
142
143#ifdef USE_DOUBLE
144
5bc05971
MC
145#define LOADK ld /* No exception */
146#define LOAD(reg, addr, handler) EXC(ld, LD_INSN, reg, addr, handler)
147#define LOADL(reg, addr, handler) EXC(ldl, LD_INSN, reg, addr, handler)
148#define LOADR(reg, addr, handler) EXC(ldr, LD_INSN, reg, addr, handler)
149#define STOREL(reg, addr, handler) EXC(sdl, ST_INSN, reg, addr, handler)
150#define STORER(reg, addr, handler) EXC(sdr, ST_INSN, reg, addr, handler)
151#define STORE(reg, addr, handler) EXC(sd, ST_INSN, reg, addr, handler)
1da177e4
LT
152#define ADD daddu
153#define SUB dsubu
154#define SRL dsrl
155#define SRA dsra
156#define SLL dsll
157#define SLLV dsllv
158#define SRLV dsrlv
159#define NBYTES 8
160#define LOG_NBYTES 3
161
42a3b4f2 162/*
1da177e4
LT
163 * As we are sharing code base with the mips32 tree (which use the o32 ABI
164 * register definitions). We need to redefine the register definitions from
165 * the n64 ABI register naming to the o32 ABI register naming.
166 */
167#undef t0
168#undef t1
169#undef t2
170#undef t3
171#define t0 $8
172#define t1 $9
173#define t2 $10
174#define t3 $11
175#define t4 $12
176#define t5 $13
177#define t6 $14
178#define t7 $15
42a3b4f2 179
1da177e4
LT
180#else
181
5bc05971
MC
182#define LOADK lw /* No exception */
183#define LOAD(reg, addr, handler) EXC(lw, LD_INSN, reg, addr, handler)
184#define LOADL(reg, addr, handler) EXC(lwl, LD_INSN, reg, addr, handler)
185#define LOADR(reg, addr, handler) EXC(lwr, LD_INSN, reg, addr, handler)
186#define STOREL(reg, addr, handler) EXC(swl, ST_INSN, reg, addr, handler)
187#define STORER(reg, addr, handler) EXC(swr, ST_INSN, reg, addr, handler)
188#define STORE(reg, addr, handler) EXC(sw, ST_INSN, reg, addr, handler)
1da177e4
LT
189#define ADD addu
190#define SUB subu
191#define SRL srl
192#define SLL sll
193#define SRA sra
194#define SLLV sllv
195#define SRLV srlv
196#define NBYTES 4
197#define LOG_NBYTES 2
198
199#endif /* USE_DOUBLE */
200
5bc05971
MC
201#define LOADB(reg, addr, handler) EXC(lb, LD_INSN, reg, addr, handler)
202#define STOREB(reg, addr, handler) EXC(sb, ST_INSN, reg, addr, handler)
203
cf62a8b8
MC
204#define _PREF(hint, addr, type) \
205 .if \mode == LEGACY_MODE; \
206 PREF(hint, addr); \
cd26cb41
MC
207 .else; \
208 .if ((\from == USEROP) && (type == SRC_PREFETCH)) || \
209 ((\to == USEROP) && (type == DST_PREFETCH)); \
210 /* \
211 * PREFE has only 9 bits for the offset \
212 * compared to PREF which has 16, so it may \
213 * need to use the $at register but this \
214 * register should remain intact because it's \
215 * used later on. Therefore use $v1. \
216 */ \
217 .set at=v1; \
218 PREFE(hint, addr); \
219 .set noat; \
220 .else; \
221 PREF(hint, addr); \
222 .endif; \
cf62a8b8 223 .endif
bda4d986
MC
224
225#define PREFS(hint, addr) _PREF(hint, addr, SRC_PREFETCH)
226#define PREFD(hint, addr) _PREF(hint, addr, DST_PREFETCH)
227
1da177e4
LT
228#ifdef CONFIG_CPU_LITTLE_ENDIAN
229#define LDFIRST LOADR
70342287 230#define LDREST LOADL
1da177e4 231#define STFIRST STORER
70342287 232#define STREST STOREL
1da177e4
LT
233#define SHIFT_DISCARD SLLV
234#else
235#define LDFIRST LOADL
70342287 236#define LDREST LOADR
1da177e4 237#define STFIRST STOREL
70342287 238#define STREST STORER
1da177e4
LT
239#define SHIFT_DISCARD SRLV
240#endif
241
242#define FIRST(unit) ((unit)*NBYTES)
243#define REST(unit) (FIRST(unit)+NBYTES-1)
244#define UNIT(unit) FIRST(unit)
245
246#define ADDRMASK (NBYTES-1)
247
248 .text
249 .set noreorder
619b6e18 250#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
1da177e4 251 .set noat
619b6e18
MR
252#else
253 .set at=v1
254#endif
1da177e4 255
1da177e4 256 .align 5
cf62a8b8
MC
257
258 /*
259 * Macro to build the __copy_user common code
209b8778 260 * Arguments:
cf62a8b8
MC
261 * mode : LEGACY_MODE or EVA_MODE
262 * from : Source operand. USEROP or KERNELOP
263 * to : Destination operand. USEROP or KERNELOP
264 */
265 .macro __BUILD_COPY_USER mode, from, to
266
267 /* initialize __memcpy if this the first time we execute this macro */
268 .ifnotdef __memcpy
269 .set __memcpy, 1
270 .hidden __memcpy /* make sure it does not leak */
271 .endif
272
1da177e4
LT
273 /*
274 * Note: dst & src may be unaligned, len may be 0
275 * Temps
276 */
277#define rem t8
278
930bff88 279 R10KCBARRIER(0(ra))
1da177e4
LT
280 /*
281 * The "issue break"s below are very approximate.
282 * Issue delays for dcache fills will perturb the schedule, as will
283 * load queue full replay traps, etc.
284 *
285 * If len < NBYTES use byte operations.
286 */
bda4d986
MC
287 PREFS( 0, 0(src) )
288 PREFD( 1, 0(dst) )
1da177e4
LT
289 sltu t2, len, NBYTES
290 and t1, dst, ADDRMASK
bda4d986
MC
291 PREFS( 0, 1*32(src) )
292 PREFD( 1, 1*32(dst) )
cf62a8b8 293 bnez t2, .Lcopy_bytes_checklen\@
1da177e4 294 and t0, src, ADDRMASK
bda4d986
MC
295 PREFS( 0, 2*32(src) )
296 PREFD( 1, 2*32(dst) )
b0ce4bd5 297#ifndef CONFIG_CPU_MIPSR6
cf62a8b8 298 bnez t1, .Ldst_unaligned\@
1da177e4 299 nop
cf62a8b8 300 bnez t0, .Lsrc_unaligned_dst_aligned\@
b0ce4bd5
LY
301#else
302 or t0, t0, t1
303 bnez t0, .Lcopy_unaligned_bytes\@
304#endif
1da177e4
LT
305 /*
306 * use delay slot for fall-through
307 * src and dst are aligned; need to compute rem
308 */
cf62a8b8 309.Lboth_aligned\@:
70342287 310 SRL t0, len, LOG_NBYTES+3 # +3 for 8 units/iter
cf62a8b8 311 beqz t0, .Lcleanup_both_aligned\@ # len < 8*NBYTES
1da177e4 312 and rem, len, (8*NBYTES-1) # rem = len % (8*NBYTES)
bda4d986
MC
313 PREFS( 0, 3*32(src) )
314 PREFD( 1, 3*32(dst) )
1da177e4
LT
315 .align 4
3161:
930bff88 317 R10KCBARRIER(0(ra))
cf62a8b8
MC
318 LOAD(t0, UNIT(0)(src), .Ll_exc\@)
319 LOAD(t1, UNIT(1)(src), .Ll_exc_copy\@)
320 LOAD(t2, UNIT(2)(src), .Ll_exc_copy\@)
321 LOAD(t3, UNIT(3)(src), .Ll_exc_copy\@)
1da177e4 322 SUB len, len, 8*NBYTES
cf62a8b8
MC
323 LOAD(t4, UNIT(4)(src), .Ll_exc_copy\@)
324 LOAD(t7, UNIT(5)(src), .Ll_exc_copy\@)
325 STORE(t0, UNIT(0)(dst), .Ls_exc_p8u\@)
326 STORE(t1, UNIT(1)(dst), .Ls_exc_p7u\@)
327 LOAD(t0, UNIT(6)(src), .Ll_exc_copy\@)
328 LOAD(t1, UNIT(7)(src), .Ll_exc_copy\@)
1da177e4
LT
329 ADD src, src, 8*NBYTES
330 ADD dst, dst, 8*NBYTES
cf62a8b8
MC
331 STORE(t2, UNIT(-6)(dst), .Ls_exc_p6u\@)
332 STORE(t3, UNIT(-5)(dst), .Ls_exc_p5u\@)
333 STORE(t4, UNIT(-4)(dst), .Ls_exc_p4u\@)
334 STORE(t7, UNIT(-3)(dst), .Ls_exc_p3u\@)
335 STORE(t0, UNIT(-2)(dst), .Ls_exc_p2u\@)
336 STORE(t1, UNIT(-1)(dst), .Ls_exc_p1u\@)
bda4d986
MC
337 PREFS( 0, 8*32(src) )
338 PREFD( 1, 8*32(dst) )
1da177e4
LT
339 bne len, rem, 1b
340 nop
341
342 /*
343 * len == rem == the number of bytes left to copy < 8*NBYTES
344 */
cf62a8b8
MC
345.Lcleanup_both_aligned\@:
346 beqz len, .Ldone\@
1da177e4 347 sltu t0, len, 4*NBYTES
cf62a8b8 348 bnez t0, .Lless_than_4units\@
1da177e4
LT
349 and rem, len, (NBYTES-1) # rem = len % NBYTES
350 /*
351 * len >= 4*NBYTES
352 */
cf62a8b8
MC
353 LOAD( t0, UNIT(0)(src), .Ll_exc\@)
354 LOAD( t1, UNIT(1)(src), .Ll_exc_copy\@)
355 LOAD( t2, UNIT(2)(src), .Ll_exc_copy\@)
356 LOAD( t3, UNIT(3)(src), .Ll_exc_copy\@)
1da177e4
LT
357 SUB len, len, 4*NBYTES
358 ADD src, src, 4*NBYTES
930bff88 359 R10KCBARRIER(0(ra))
cf62a8b8
MC
360 STORE(t0, UNIT(0)(dst), .Ls_exc_p4u\@)
361 STORE(t1, UNIT(1)(dst), .Ls_exc_p3u\@)
362 STORE(t2, UNIT(2)(dst), .Ls_exc_p2u\@)
363 STORE(t3, UNIT(3)(dst), .Ls_exc_p1u\@)
619b6e18
MR
364 .set reorder /* DADDI_WAR */
365 ADD dst, dst, 4*NBYTES
cf62a8b8 366 beqz len, .Ldone\@
619b6e18 367 .set noreorder
cf62a8b8 368.Lless_than_4units\@:
1da177e4
LT
369 /*
370 * rem = len % NBYTES
371 */
cf62a8b8 372 beq rem, len, .Lcopy_bytes\@
1da177e4
LT
373 nop
3741:
930bff88 375 R10KCBARRIER(0(ra))
cf62a8b8 376 LOAD(t0, 0(src), .Ll_exc\@)
1da177e4
LT
377 ADD src, src, NBYTES
378 SUB len, len, NBYTES
cf62a8b8 379 STORE(t0, 0(dst), .Ls_exc_p1u\@)
619b6e18
MR
380 .set reorder /* DADDI_WAR */
381 ADD dst, dst, NBYTES
1da177e4 382 bne rem, len, 1b
619b6e18 383 .set noreorder
1da177e4 384
b0ce4bd5 385#ifndef CONFIG_CPU_MIPSR6
1da177e4
LT
386 /*
387 * src and dst are aligned, need to copy rem bytes (rem < NBYTES)
388 * A loop would do only a byte at a time with possible branch
70342287 389 * mispredicts. Can't do an explicit LOAD dst,mask,or,STORE
1da177e4
LT
390 * because can't assume read-access to dst. Instead, use
391 * STREST dst, which doesn't require read access to dst.
392 *
393 * This code should perform better than a simple loop on modern,
394 * wide-issue mips processors because the code has fewer branches and
395 * more instruction-level parallelism.
396 */
397#define bits t2
cf62a8b8 398 beqz len, .Ldone\@
1da177e4
LT
399 ADD t1, dst, len # t1 is just past last byte of dst
400 li bits, 8*NBYTES
401 SLL rem, len, 3 # rem = number of bits to keep
cf62a8b8 402 LOAD(t0, 0(src), .Ll_exc\@)
70342287 403 SUB bits, bits, rem # bits = number of bits to discard
1da177e4 404 SHIFT_DISCARD t0, t0, bits
cf62a8b8 405 STREST(t0, -1(t1), .Ls_exc\@)
1da177e4
LT
406 jr ra
407 move len, zero
cf62a8b8 408.Ldst_unaligned\@:
1da177e4
LT
409 /*
410 * dst is unaligned
411 * t0 = src & ADDRMASK
412 * t1 = dst & ADDRMASK; T1 > 0
413 * len >= NBYTES
414 *
415 * Copy enough bytes to align dst
416 * Set match = (src and dst have same alignment)
417 */
418#define match rem
cf62a8b8 419 LDFIRST(t3, FIRST(0)(src), .Ll_exc\@)
1da177e4 420 ADD t2, zero, NBYTES
cf62a8b8 421 LDREST(t3, REST(0)(src), .Ll_exc_copy\@)
1da177e4
LT
422 SUB t2, t2, t1 # t2 = number of bytes copied
423 xor match, t0, t1
930bff88 424 R10KCBARRIER(0(ra))
cf62a8b8
MC
425 STFIRST(t3, FIRST(0)(dst), .Ls_exc\@)
426 beq len, t2, .Ldone\@
1da177e4
LT
427 SUB len, len, t2
428 ADD dst, dst, t2
cf62a8b8 429 beqz match, .Lboth_aligned\@
1da177e4
LT
430 ADD src, src, t2
431
cf62a8b8 432.Lsrc_unaligned_dst_aligned\@:
70342287 433 SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter
bda4d986 434 PREFS( 0, 3*32(src) )
cf62a8b8 435 beqz t0, .Lcleanup_src_unaligned\@
70342287 436 and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES
bda4d986 437 PREFD( 1, 3*32(dst) )
1da177e4
LT
4381:
439/*
440 * Avoid consecutive LD*'s to the same register since some mips
441 * implementations can't issue them in the same cycle.
442 * It's OK to load FIRST(N+1) before REST(N) because the two addresses
443 * are to the same unit (unless src is aligned, but it's not).
444 */
930bff88 445 R10KCBARRIER(0(ra))
cf62a8b8
MC
446 LDFIRST(t0, FIRST(0)(src), .Ll_exc\@)
447 LDFIRST(t1, FIRST(1)(src), .Ll_exc_copy\@)
70342287 448 SUB len, len, 4*NBYTES
cf62a8b8
MC
449 LDREST(t0, REST(0)(src), .Ll_exc_copy\@)
450 LDREST(t1, REST(1)(src), .Ll_exc_copy\@)
451 LDFIRST(t2, FIRST(2)(src), .Ll_exc_copy\@)
452 LDFIRST(t3, FIRST(3)(src), .Ll_exc_copy\@)
453 LDREST(t2, REST(2)(src), .Ll_exc_copy\@)
454 LDREST(t3, REST(3)(src), .Ll_exc_copy\@)
bda4d986 455 PREFS( 0, 9*32(src) ) # 0 is PREF_LOAD (not streamed)
1da177e4
LT
456 ADD src, src, 4*NBYTES
457#ifdef CONFIG_CPU_SB1
458 nop # improves slotting
459#endif
cf62a8b8
MC
460 STORE(t0, UNIT(0)(dst), .Ls_exc_p4u\@)
461 STORE(t1, UNIT(1)(dst), .Ls_exc_p3u\@)
462 STORE(t2, UNIT(2)(dst), .Ls_exc_p2u\@)
463 STORE(t3, UNIT(3)(dst), .Ls_exc_p1u\@)
bda4d986 464 PREFD( 1, 9*32(dst) ) # 1 is PREF_STORE (not streamed)
619b6e18
MR
465 .set reorder /* DADDI_WAR */
466 ADD dst, dst, 4*NBYTES
1da177e4 467 bne len, rem, 1b
619b6e18 468 .set noreorder
1da177e4 469
cf62a8b8
MC
470.Lcleanup_src_unaligned\@:
471 beqz len, .Ldone\@
1da177e4 472 and rem, len, NBYTES-1 # rem = len % NBYTES
cf62a8b8 473 beq rem, len, .Lcopy_bytes\@
1da177e4
LT
474 nop
4751:
930bff88 476 R10KCBARRIER(0(ra))
cf62a8b8
MC
477 LDFIRST(t0, FIRST(0)(src), .Ll_exc\@)
478 LDREST(t0, REST(0)(src), .Ll_exc_copy\@)
1da177e4
LT
479 ADD src, src, NBYTES
480 SUB len, len, NBYTES
cf62a8b8 481 STORE(t0, 0(dst), .Ls_exc_p1u\@)
619b6e18
MR
482 .set reorder /* DADDI_WAR */
483 ADD dst, dst, NBYTES
1da177e4 484 bne len, rem, 1b
619b6e18 485 .set noreorder
1da177e4 486
b0ce4bd5 487#endif /* !CONFIG_CPU_MIPSR6 */
cf62a8b8
MC
488.Lcopy_bytes_checklen\@:
489 beqz len, .Ldone\@
1da177e4 490 nop
cf62a8b8 491.Lcopy_bytes\@:
1da177e4 492 /* 0 < len < NBYTES */
930bff88 493 R10KCBARRIER(0(ra))
1da177e4 494#define COPY_BYTE(N) \
cf62a8b8 495 LOADB(t0, N(src), .Ll_exc\@); \
1da177e4 496 SUB len, len, 1; \
cf62a8b8
MC
497 beqz len, .Ldone\@; \
498 STOREB(t0, N(dst), .Ls_exc_p1\@)
1da177e4
LT
499
500 COPY_BYTE(0)
501 COPY_BYTE(1)
502#ifdef USE_DOUBLE
503 COPY_BYTE(2)
504 COPY_BYTE(3)
505 COPY_BYTE(4)
506 COPY_BYTE(5)
507#endif
cf62a8b8 508 LOADB(t0, NBYTES-2(src), .Ll_exc\@)
1da177e4
LT
509 SUB len, len, 1
510 jr ra
cf62a8b8
MC
511 STOREB(t0, NBYTES-2(dst), .Ls_exc_p1\@)
512.Ldone\@:
1da177e4 513 jr ra
51b1029d 514 nop
b0ce4bd5
LY
515
516#ifdef CONFIG_CPU_MIPSR6
517.Lcopy_unaligned_bytes\@:
5181:
519 COPY_BYTE(0)
520 COPY_BYTE(1)
521 COPY_BYTE(2)
522 COPY_BYTE(3)
523 COPY_BYTE(4)
524 COPY_BYTE(5)
525 COPY_BYTE(6)
526 COPY_BYTE(7)
527 ADD src, src, 8
528 b 1b
529 ADD dst, dst, 8
530#endif /* CONFIG_CPU_MIPSR6 */
cf62a8b8 531 .if __memcpy == 1
1da177e4 532 END(memcpy)
cf62a8b8
MC
533 .set __memcpy, 0
534 .hidden __memcpy
535 .endif
1da177e4 536
cf62a8b8 537.Ll_exc_copy\@:
1da177e4
LT
538 /*
539 * Copy bytes from src until faulting load address (or until a
540 * lb faults)
541 *
542 * When reached by a faulting LDFIRST/LDREST, THREAD_BUADDR($28)
543 * may be more than a byte beyond the last address.
544 * Hence, the lb below may get an exception.
545 *
546 * Assumes src < THREAD_BUADDR($28)
547 */
5bc05971 548 LOADK t0, TI_TASK($28)
1da177e4 549 nop
5bc05971 550 LOADK t0, THREAD_BUADDR(t0)
1da177e4 5511:
cf62a8b8 552 LOADB(t1, 0(src), .Ll_exc\@)
1da177e4
LT
553 ADD src, src, 1
554 sb t1, 0(dst) # can't fault -- we're copy_from_user
619b6e18
MR
555 .set reorder /* DADDI_WAR */
556 ADD dst, dst, 1
1da177e4 557 bne src, t0, 1b
619b6e18 558 .set noreorder
cf62a8b8 559.Ll_exc\@:
5bc05971 560 LOADK t0, TI_TASK($28)
1da177e4 561 nop
5bc05971 562 LOADK t0, THREAD_BUADDR(t0) # t0 is just past last good address
1da177e4
LT
563 nop
564 SUB len, AT, t0 # len number of uncopied bytes
cf62a8b8 565 bnez t6, .Ldone\@ /* Skip the zeroing part if inatomic */
1da177e4
LT
566 /*
567 * Here's where we rely on src and dst being incremented in tandem,
568 * See (3) above.
569 * dst += (fault addr - src) to put dst at first byte to clear
570 */
571 ADD dst, t0 # compute start address in a1
572 SUB dst, src
573 /*
574 * Clear len bytes starting at dst. Can't call __bzero because it
575 * might modify len. An inefficient loop for these rare times...
576 */
619b6e18
MR
577 .set reorder /* DADDI_WAR */
578 SUB src, len, 1
cf62a8b8 579 beqz len, .Ldone\@
619b6e18 580 .set noreorder
1da177e4
LT
5811: sb zero, 0(dst)
582 ADD dst, dst, 1
619b6e18 583#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
1da177e4
LT
584 bnez src, 1b
585 SUB src, src, 1
619b6e18
MR
586#else
587 .set push
588 .set noat
589 li v1, 1
590 bnez src, 1b
591 SUB src, src, v1
592 .set pop
593#endif
1da177e4
LT
594 jr ra
595 nop
596
597
619b6e18 598#define SEXC(n) \
70342287 599 .set reorder; /* DADDI_WAR */ \
cf62a8b8 600.Ls_exc_p ## n ## u\@: \
619b6e18
MR
601 ADD len, len, n*NBYTES; \
602 jr ra; \
603 .set noreorder
1da177e4
LT
604
605SEXC(8)
606SEXC(7)
607SEXC(6)
608SEXC(5)
609SEXC(4)
610SEXC(3)
611SEXC(2)
612SEXC(1)
613
cf62a8b8 614.Ls_exc_p1\@:
619b6e18
MR
615 .set reorder /* DADDI_WAR */
616 ADD len, len, 1
1da177e4 617 jr ra
619b6e18 618 .set noreorder
cf62a8b8 619.Ls_exc\@:
1da177e4
LT
620 jr ra
621 nop
cf62a8b8 622 .endm
1da177e4
LT
623
624 .align 5
625LEAF(memmove)
576a2f0c 626EXPORT_SYMBOL(memmove)
1da177e4
LT
627 ADD t0, a0, a2
628 ADD t1, a1, a2
629 sltu t0, a1, t0 # dst + len <= src -> memcpy
630 sltu t1, a0, t1 # dst >= src + len -> memcpy
631 and t0, t1
c5ec1983 632 beqz t0, .L__memcpy
1da177e4 633 move v0, a0 /* return value */
c5ec1983 634 beqz a2, .Lr_out
1da177e4
LT
635 END(memmove)
636
637 /* fall through to __rmemcpy */
638LEAF(__rmemcpy) /* a0=dst a1=src a2=len */
639 sltu t0, a1, a0
c5ec1983 640 beqz t0, .Lr_end_bytes_up # src >= dst
1da177e4
LT
641 nop
642 ADD a0, a2 # dst = dst + len
643 ADD a1, a2 # src = src + len
644
c5ec1983 645.Lr_end_bytes:
930bff88 646 R10KCBARRIER(0(ra))
1da177e4
LT
647 lb t0, -1(a1)
648 SUB a2, a2, 0x1
649 sb t0, -1(a0)
650 SUB a1, a1, 0x1
619b6e18
MR
651 .set reorder /* DADDI_WAR */
652 SUB a0, a0, 0x1
c5ec1983 653 bnez a2, .Lr_end_bytes
619b6e18 654 .set noreorder
1da177e4 655
c5ec1983 656.Lr_out:
1da177e4
LT
657 jr ra
658 move a2, zero
659
c5ec1983 660.Lr_end_bytes_up:
930bff88 661 R10KCBARRIER(0(ra))
1da177e4
LT
662 lb t0, (a1)
663 SUB a2, a2, 0x1
664 sb t0, (a0)
665 ADD a1, a1, 0x1
619b6e18
MR
666 .set reorder /* DADDI_WAR */
667 ADD a0, a0, 0x1
c5ec1983 668 bnez a2, .Lr_end_bytes_up
619b6e18 669 .set noreorder
1da177e4
LT
670
671 jr ra
672 move a2, zero
673 END(__rmemcpy)
cf62a8b8
MC
674
675/*
676 * t6 is used as a flag to note inatomic mode.
677 */
678LEAF(__copy_user_inatomic)
576a2f0c 679EXPORT_SYMBOL(__copy_user_inatomic)
cf62a8b8
MC
680 b __copy_user_common
681 li t6, 1
682 END(__copy_user_inatomic)
683
684/*
685 * A combined memcpy/__copy_user
686 * __copy_user sets len to 0 for success; else to an upper bound of
687 * the number of uncopied bytes.
688 * memcpy sets v0 to dst.
689 */
690 .align 5
691LEAF(memcpy) /* a0=dst a1=src a2=len */
576a2f0c 692EXPORT_SYMBOL(memcpy)
cf62a8b8
MC
693 move v0, dst /* return value */
694.L__memcpy:
695FEXPORT(__copy_user)
576a2f0c 696EXPORT_SYMBOL(__copy_user)
cf62a8b8
MC
697 li t6, 0 /* not inatomic */
698__copy_user_common:
699 /* Legacy Mode, user <-> user */
700 __BUILD_COPY_USER LEGACY_MODE USEROP USEROP
cd26cb41
MC
701
702#ifdef CONFIG_EVA
703
704/*
705 * For EVA we need distinct symbols for reading and writing to user space.
706 * This is because we need to use specific EVA instructions to perform the
707 * virtual <-> physical translation when a virtual address is actually in user
708 * space
709 */
710
711LEAF(__copy_user_inatomic_eva)
576a2f0c 712EXPORT_SYMBOL(__copy_user_inatomic_eva)
cd26cb41
MC
713 b __copy_from_user_common
714 li t6, 1
715 END(__copy_user_inatomic_eva)
716
717/*
718 * __copy_from_user (EVA)
719 */
720
721LEAF(__copy_from_user_eva)
576a2f0c 722EXPORT_SYMBOL(__copy_from_user_eva)
cd26cb41
MC
723 li t6, 0 /* not inatomic */
724__copy_from_user_common:
725 __BUILD_COPY_USER EVA_MODE USEROP KERNELOP
726END(__copy_from_user_eva)
727
728
729
730/*
731 * __copy_to_user (EVA)
732 */
733
734LEAF(__copy_to_user_eva)
576a2f0c 735EXPORT_SYMBOL(__copy_to_user_eva)
cd26cb41
MC
736__BUILD_COPY_USER EVA_MODE KERNELOP USEROP
737END(__copy_to_user_eva)
738
739/*
740 * __copy_in_user (EVA)
741 */
742
743LEAF(__copy_in_user_eva)
576a2f0c 744EXPORT_SYMBOL(__copy_in_user_eva)
cd26cb41
MC
745__BUILD_COPY_USER EVA_MODE USEROP USEROP
746END(__copy_in_user_eva)
747
748#endif