]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/mips/lib/memcpy.S
Merge remote-tracking branches 'spi/topic/atmel', 'spi/topic/cadence', 'spi/topic...
[mirror_ubuntu-artful-kernel.git] / arch / mips / lib / memcpy.S
CommitLineData
1da177e4
LT
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Unified implementation of memcpy, memmove and the __copy_user backend.
7 *
8 * Copyright (C) 1998, 99, 2000, 01, 2002 Ralf Baechle (ralf@gnu.org)
9 * Copyright (C) 1999, 2000, 01, 2002 Silicon Graphics, Inc.
10 * Copyright (C) 2002 Broadcom, Inc.
11 * memcpy/copy_user author: Mark Vandevoorde
619b6e18 12 * Copyright (C) 2007 Maciej W. Rozycki
5bc05971 13 * Copyright (C) 2014 Imagination Technologies Ltd.
1da177e4
LT
14 *
15 * Mnemonic names for arguments to memcpy/__copy_user
16 */
e5adb877
RB
17
18/*
19 * Hack to resolve longstanding prefetch issue
20 *
21 * Prefetching may be fatal on some systems if we're prefetching beyond the
22 * end of memory on some systems. It's also a seriously bad idea on non
23 * dma-coherent systems.
24 */
634286f1 25#ifdef CONFIG_DMA_NONCOHERENT
e5adb877
RB
26#undef CONFIG_CPU_HAS_PREFETCH
27#endif
28#ifdef CONFIG_MIPS_MALTA
29#undef CONFIG_CPU_HAS_PREFETCH
30#endif
31
1da177e4 32#include <asm/asm.h>
048eb582 33#include <asm/asm-offsets.h>
1da177e4
LT
34#include <asm/regdef.h>
35
36#define dst a0
37#define src a1
38#define len a2
39
40/*
41 * Spec
42 *
43 * memcpy copies len bytes from src to dst and sets v0 to dst.
44 * It assumes that
45 * - src and dst don't overlap
46 * - src is readable
47 * - dst is writable
48 * memcpy uses the standard calling convention
49 *
50 * __copy_user copies up to len bytes from src to dst and sets a2 (len) to
51 * the number of uncopied bytes due to an exception caused by a read or write.
52 * __copy_user assumes that src and dst don't overlap, and that the call is
53 * implementing one of the following:
54 * copy_to_user
55 * - src is readable (no exceptions when reading src)
56 * copy_from_user
57 * - dst is writable (no exceptions when writing dst)
58 * __copy_user uses a non-standard calling convention; see
59 * include/asm-mips/uaccess.h
60 *
61 * When an exception happens on a load, the handler must
62 # ensure that all of the destination buffer is overwritten to prevent
63 * leaking information to user mode programs.
64 */
65
66/*
67 * Implementation
68 */
69
70/*
71 * The exception handler for loads requires that:
72 * 1- AT contain the address of the byte just past the end of the source
73 * of the copy,
74 * 2- src_entry <= src < AT, and
75 * 3- (dst - src) == (dst_entry - src_entry),
76 * The _entry suffix denotes values when __copy_user was called.
77 *
78 * (1) is set up up by uaccess.h and maintained by not writing AT in copy_user
79 * (2) is met by incrementing src by the number of bytes copied
80 * (3) is met by not doing loads between a pair of increments of dst and src
81 *
82 * The exception handlers for stores adjust len (if necessary) and return.
83 * These handlers do not need to overwrite any data.
84 *
85 * For __rmemcpy and memmove an exception is always a kernel bug, therefore
86 * they're not protected.
87 */
88
5bc05971
MC
89/* Instruction type */
90#define LD_INSN 1
91#define ST_INSN 2
bda4d986
MC
92/* Pretech type */
93#define SRC_PREFETCH 1
94#define DST_PREFETCH 2
cf62a8b8
MC
95#define LEGACY_MODE 1
96#define EVA_MODE 2
97#define USEROP 1
98#define KERNELOP 2
5bc05971
MC
99
100/*
101 * Wrapper to add an entry in the exception table
102 * in case the insn causes a memory exception.
103 * Arguments:
104 * insn : Load/store instruction
105 * type : Instruction type
106 * reg : Register
107 * addr : Address
108 * handler : Exception handler
109 */
1da177e4 110
cf62a8b8
MC
111#define EXC(insn, type, reg, addr, handler) \
112 .if \mode == LEGACY_MODE; \
1139: insn reg, addr; \
114 .section __ex_table,"a"; \
115 PTR 9b, handler; \
116 .previous; \
cd26cb41
MC
117 /* This is assembled in EVA mode */ \
118 .else; \
119 /* If loading from user or storing to user */ \
120 .if ((\from == USEROP) && (type == LD_INSN)) || \
121 ((\to == USEROP) && (type == ST_INSN)); \
1229: __BUILD_EVA_INSN(insn##e, reg, addr); \
123 .section __ex_table,"a"; \
124 PTR 9b, handler; \
125 .previous; \
126 .else; \
127 /* \
128 * Still in EVA, but no need for \
129 * exception handler or EVA insn \
130 */ \
131 insn reg, addr; \
132 .endif; \
cf62a8b8 133 .endif
cd26cb41 134
1da177e4
LT
135/*
136 * Only on the 64-bit kernel we can made use of 64-bit registers.
137 */
875d43e7 138#ifdef CONFIG_64BIT
1da177e4
LT
139#define USE_DOUBLE
140#endif
141
142#ifdef USE_DOUBLE
143
5bc05971
MC
144#define LOADK ld /* No exception */
145#define LOAD(reg, addr, handler) EXC(ld, LD_INSN, reg, addr, handler)
146#define LOADL(reg, addr, handler) EXC(ldl, LD_INSN, reg, addr, handler)
147#define LOADR(reg, addr, handler) EXC(ldr, LD_INSN, reg, addr, handler)
148#define STOREL(reg, addr, handler) EXC(sdl, ST_INSN, reg, addr, handler)
149#define STORER(reg, addr, handler) EXC(sdr, ST_INSN, reg, addr, handler)
150#define STORE(reg, addr, handler) EXC(sd, ST_INSN, reg, addr, handler)
1da177e4
LT
151#define ADD daddu
152#define SUB dsubu
153#define SRL dsrl
154#define SRA dsra
155#define SLL dsll
156#define SLLV dsllv
157#define SRLV dsrlv
158#define NBYTES 8
159#define LOG_NBYTES 3
160
42a3b4f2 161/*
1da177e4
LT
162 * As we are sharing code base with the mips32 tree (which use the o32 ABI
163 * register definitions). We need to redefine the register definitions from
164 * the n64 ABI register naming to the o32 ABI register naming.
165 */
166#undef t0
167#undef t1
168#undef t2
169#undef t3
170#define t0 $8
171#define t1 $9
172#define t2 $10
173#define t3 $11
174#define t4 $12
175#define t5 $13
176#define t6 $14
177#define t7 $15
42a3b4f2 178
1da177e4
LT
179#else
180
5bc05971
MC
181#define LOADK lw /* No exception */
182#define LOAD(reg, addr, handler) EXC(lw, LD_INSN, reg, addr, handler)
183#define LOADL(reg, addr, handler) EXC(lwl, LD_INSN, reg, addr, handler)
184#define LOADR(reg, addr, handler) EXC(lwr, LD_INSN, reg, addr, handler)
185#define STOREL(reg, addr, handler) EXC(swl, ST_INSN, reg, addr, handler)
186#define STORER(reg, addr, handler) EXC(swr, ST_INSN, reg, addr, handler)
187#define STORE(reg, addr, handler) EXC(sw, ST_INSN, reg, addr, handler)
1da177e4
LT
188#define ADD addu
189#define SUB subu
190#define SRL srl
191#define SLL sll
192#define SRA sra
193#define SLLV sllv
194#define SRLV srlv
195#define NBYTES 4
196#define LOG_NBYTES 2
197
198#endif /* USE_DOUBLE */
199
5bc05971
MC
200#define LOADB(reg, addr, handler) EXC(lb, LD_INSN, reg, addr, handler)
201#define STOREB(reg, addr, handler) EXC(sb, ST_INSN, reg, addr, handler)
202
cf62a8b8
MC
203#define _PREF(hint, addr, type) \
204 .if \mode == LEGACY_MODE; \
205 PREF(hint, addr); \
cd26cb41
MC
206 .else; \
207 .if ((\from == USEROP) && (type == SRC_PREFETCH)) || \
208 ((\to == USEROP) && (type == DST_PREFETCH)); \
209 /* \
210 * PREFE has only 9 bits for the offset \
211 * compared to PREF which has 16, so it may \
212 * need to use the $at register but this \
213 * register should remain intact because it's \
214 * used later on. Therefore use $v1. \
215 */ \
216 .set at=v1; \
217 PREFE(hint, addr); \
218 .set noat; \
219 .else; \
220 PREF(hint, addr); \
221 .endif; \
cf62a8b8 222 .endif
bda4d986
MC
223
224#define PREFS(hint, addr) _PREF(hint, addr, SRC_PREFETCH)
225#define PREFD(hint, addr) _PREF(hint, addr, DST_PREFETCH)
226
1da177e4
LT
227#ifdef CONFIG_CPU_LITTLE_ENDIAN
228#define LDFIRST LOADR
70342287 229#define LDREST LOADL
1da177e4 230#define STFIRST STORER
70342287 231#define STREST STOREL
1da177e4
LT
232#define SHIFT_DISCARD SLLV
233#else
234#define LDFIRST LOADL
70342287 235#define LDREST LOADR
1da177e4 236#define STFIRST STOREL
70342287 237#define STREST STORER
1da177e4
LT
238#define SHIFT_DISCARD SRLV
239#endif
240
241#define FIRST(unit) ((unit)*NBYTES)
242#define REST(unit) (FIRST(unit)+NBYTES-1)
243#define UNIT(unit) FIRST(unit)
244
245#define ADDRMASK (NBYTES-1)
246
247 .text
248 .set noreorder
619b6e18 249#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
1da177e4 250 .set noat
619b6e18
MR
251#else
252 .set at=v1
253#endif
1da177e4 254
1da177e4 255 .align 5
cf62a8b8
MC
256
257 /*
258 * Macro to build the __copy_user common code
259 * Arguements:
260 * mode : LEGACY_MODE or EVA_MODE
261 * from : Source operand. USEROP or KERNELOP
262 * to : Destination operand. USEROP or KERNELOP
263 */
264 .macro __BUILD_COPY_USER mode, from, to
265
266 /* initialize __memcpy if this the first time we execute this macro */
267 .ifnotdef __memcpy
268 .set __memcpy, 1
269 .hidden __memcpy /* make sure it does not leak */
270 .endif
271
1da177e4
LT
272 /*
273 * Note: dst & src may be unaligned, len may be 0
274 * Temps
275 */
276#define rem t8
277
930bff88 278 R10KCBARRIER(0(ra))
1da177e4
LT
279 /*
280 * The "issue break"s below are very approximate.
281 * Issue delays for dcache fills will perturb the schedule, as will
282 * load queue full replay traps, etc.
283 *
284 * If len < NBYTES use byte operations.
285 */
bda4d986
MC
286 PREFS( 0, 0(src) )
287 PREFD( 1, 0(dst) )
1da177e4
LT
288 sltu t2, len, NBYTES
289 and t1, dst, ADDRMASK
bda4d986
MC
290 PREFS( 0, 1*32(src) )
291 PREFD( 1, 1*32(dst) )
cf62a8b8 292 bnez t2, .Lcopy_bytes_checklen\@
1da177e4 293 and t0, src, ADDRMASK
bda4d986
MC
294 PREFS( 0, 2*32(src) )
295 PREFD( 1, 2*32(dst) )
cf62a8b8 296 bnez t1, .Ldst_unaligned\@
1da177e4 297 nop
cf62a8b8 298 bnez t0, .Lsrc_unaligned_dst_aligned\@
1da177e4
LT
299 /*
300 * use delay slot for fall-through
301 * src and dst are aligned; need to compute rem
302 */
cf62a8b8 303.Lboth_aligned\@:
70342287 304 SRL t0, len, LOG_NBYTES+3 # +3 for 8 units/iter
cf62a8b8 305 beqz t0, .Lcleanup_both_aligned\@ # len < 8*NBYTES
1da177e4 306 and rem, len, (8*NBYTES-1) # rem = len % (8*NBYTES)
bda4d986
MC
307 PREFS( 0, 3*32(src) )
308 PREFD( 1, 3*32(dst) )
1da177e4
LT
309 .align 4
3101:
930bff88 311 R10KCBARRIER(0(ra))
cf62a8b8
MC
312 LOAD(t0, UNIT(0)(src), .Ll_exc\@)
313 LOAD(t1, UNIT(1)(src), .Ll_exc_copy\@)
314 LOAD(t2, UNIT(2)(src), .Ll_exc_copy\@)
315 LOAD(t3, UNIT(3)(src), .Ll_exc_copy\@)
1da177e4 316 SUB len, len, 8*NBYTES
cf62a8b8
MC
317 LOAD(t4, UNIT(4)(src), .Ll_exc_copy\@)
318 LOAD(t7, UNIT(5)(src), .Ll_exc_copy\@)
319 STORE(t0, UNIT(0)(dst), .Ls_exc_p8u\@)
320 STORE(t1, UNIT(1)(dst), .Ls_exc_p7u\@)
321 LOAD(t0, UNIT(6)(src), .Ll_exc_copy\@)
322 LOAD(t1, UNIT(7)(src), .Ll_exc_copy\@)
1da177e4
LT
323 ADD src, src, 8*NBYTES
324 ADD dst, dst, 8*NBYTES
cf62a8b8
MC
325 STORE(t2, UNIT(-6)(dst), .Ls_exc_p6u\@)
326 STORE(t3, UNIT(-5)(dst), .Ls_exc_p5u\@)
327 STORE(t4, UNIT(-4)(dst), .Ls_exc_p4u\@)
328 STORE(t7, UNIT(-3)(dst), .Ls_exc_p3u\@)
329 STORE(t0, UNIT(-2)(dst), .Ls_exc_p2u\@)
330 STORE(t1, UNIT(-1)(dst), .Ls_exc_p1u\@)
bda4d986
MC
331 PREFS( 0, 8*32(src) )
332 PREFD( 1, 8*32(dst) )
1da177e4
LT
333 bne len, rem, 1b
334 nop
335
336 /*
337 * len == rem == the number of bytes left to copy < 8*NBYTES
338 */
cf62a8b8
MC
339.Lcleanup_both_aligned\@:
340 beqz len, .Ldone\@
1da177e4 341 sltu t0, len, 4*NBYTES
cf62a8b8 342 bnez t0, .Lless_than_4units\@
1da177e4
LT
343 and rem, len, (NBYTES-1) # rem = len % NBYTES
344 /*
345 * len >= 4*NBYTES
346 */
cf62a8b8
MC
347 LOAD( t0, UNIT(0)(src), .Ll_exc\@)
348 LOAD( t1, UNIT(1)(src), .Ll_exc_copy\@)
349 LOAD( t2, UNIT(2)(src), .Ll_exc_copy\@)
350 LOAD( t3, UNIT(3)(src), .Ll_exc_copy\@)
1da177e4
LT
351 SUB len, len, 4*NBYTES
352 ADD src, src, 4*NBYTES
930bff88 353 R10KCBARRIER(0(ra))
cf62a8b8
MC
354 STORE(t0, UNIT(0)(dst), .Ls_exc_p4u\@)
355 STORE(t1, UNIT(1)(dst), .Ls_exc_p3u\@)
356 STORE(t2, UNIT(2)(dst), .Ls_exc_p2u\@)
357 STORE(t3, UNIT(3)(dst), .Ls_exc_p1u\@)
619b6e18
MR
358 .set reorder /* DADDI_WAR */
359 ADD dst, dst, 4*NBYTES
cf62a8b8 360 beqz len, .Ldone\@
619b6e18 361 .set noreorder
cf62a8b8 362.Lless_than_4units\@:
1da177e4
LT
363 /*
364 * rem = len % NBYTES
365 */
cf62a8b8 366 beq rem, len, .Lcopy_bytes\@
1da177e4
LT
367 nop
3681:
930bff88 369 R10KCBARRIER(0(ra))
cf62a8b8 370 LOAD(t0, 0(src), .Ll_exc\@)
1da177e4
LT
371 ADD src, src, NBYTES
372 SUB len, len, NBYTES
cf62a8b8 373 STORE(t0, 0(dst), .Ls_exc_p1u\@)
619b6e18
MR
374 .set reorder /* DADDI_WAR */
375 ADD dst, dst, NBYTES
1da177e4 376 bne rem, len, 1b
619b6e18 377 .set noreorder
1da177e4
LT
378
379 /*
380 * src and dst are aligned, need to copy rem bytes (rem < NBYTES)
381 * A loop would do only a byte at a time with possible branch
70342287 382 * mispredicts. Can't do an explicit LOAD dst,mask,or,STORE
1da177e4
LT
383 * because can't assume read-access to dst. Instead, use
384 * STREST dst, which doesn't require read access to dst.
385 *
386 * This code should perform better than a simple loop on modern,
387 * wide-issue mips processors because the code has fewer branches and
388 * more instruction-level parallelism.
389 */
390#define bits t2
cf62a8b8 391 beqz len, .Ldone\@
1da177e4
LT
392 ADD t1, dst, len # t1 is just past last byte of dst
393 li bits, 8*NBYTES
394 SLL rem, len, 3 # rem = number of bits to keep
cf62a8b8 395 LOAD(t0, 0(src), .Ll_exc\@)
70342287 396 SUB bits, bits, rem # bits = number of bits to discard
1da177e4 397 SHIFT_DISCARD t0, t0, bits
cf62a8b8 398 STREST(t0, -1(t1), .Ls_exc\@)
1da177e4
LT
399 jr ra
400 move len, zero
cf62a8b8 401.Ldst_unaligned\@:
1da177e4
LT
402 /*
403 * dst is unaligned
404 * t0 = src & ADDRMASK
405 * t1 = dst & ADDRMASK; T1 > 0
406 * len >= NBYTES
407 *
408 * Copy enough bytes to align dst
409 * Set match = (src and dst have same alignment)
410 */
411#define match rem
cf62a8b8 412 LDFIRST(t3, FIRST(0)(src), .Ll_exc\@)
1da177e4 413 ADD t2, zero, NBYTES
cf62a8b8 414 LDREST(t3, REST(0)(src), .Ll_exc_copy\@)
1da177e4
LT
415 SUB t2, t2, t1 # t2 = number of bytes copied
416 xor match, t0, t1
930bff88 417 R10KCBARRIER(0(ra))
cf62a8b8
MC
418 STFIRST(t3, FIRST(0)(dst), .Ls_exc\@)
419 beq len, t2, .Ldone\@
1da177e4
LT
420 SUB len, len, t2
421 ADD dst, dst, t2
cf62a8b8 422 beqz match, .Lboth_aligned\@
1da177e4
LT
423 ADD src, src, t2
424
cf62a8b8 425.Lsrc_unaligned_dst_aligned\@:
70342287 426 SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter
bda4d986 427 PREFS( 0, 3*32(src) )
cf62a8b8 428 beqz t0, .Lcleanup_src_unaligned\@
70342287 429 and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES
bda4d986 430 PREFD( 1, 3*32(dst) )
1da177e4
LT
4311:
432/*
433 * Avoid consecutive LD*'s to the same register since some mips
434 * implementations can't issue them in the same cycle.
435 * It's OK to load FIRST(N+1) before REST(N) because the two addresses
436 * are to the same unit (unless src is aligned, but it's not).
437 */
930bff88 438 R10KCBARRIER(0(ra))
cf62a8b8
MC
439 LDFIRST(t0, FIRST(0)(src), .Ll_exc\@)
440 LDFIRST(t1, FIRST(1)(src), .Ll_exc_copy\@)
70342287 441 SUB len, len, 4*NBYTES
cf62a8b8
MC
442 LDREST(t0, REST(0)(src), .Ll_exc_copy\@)
443 LDREST(t1, REST(1)(src), .Ll_exc_copy\@)
444 LDFIRST(t2, FIRST(2)(src), .Ll_exc_copy\@)
445 LDFIRST(t3, FIRST(3)(src), .Ll_exc_copy\@)
446 LDREST(t2, REST(2)(src), .Ll_exc_copy\@)
447 LDREST(t3, REST(3)(src), .Ll_exc_copy\@)
bda4d986 448 PREFS( 0, 9*32(src) ) # 0 is PREF_LOAD (not streamed)
1da177e4
LT
449 ADD src, src, 4*NBYTES
450#ifdef CONFIG_CPU_SB1
451 nop # improves slotting
452#endif
cf62a8b8
MC
453 STORE(t0, UNIT(0)(dst), .Ls_exc_p4u\@)
454 STORE(t1, UNIT(1)(dst), .Ls_exc_p3u\@)
455 STORE(t2, UNIT(2)(dst), .Ls_exc_p2u\@)
456 STORE(t3, UNIT(3)(dst), .Ls_exc_p1u\@)
bda4d986 457 PREFD( 1, 9*32(dst) ) # 1 is PREF_STORE (not streamed)
619b6e18
MR
458 .set reorder /* DADDI_WAR */
459 ADD dst, dst, 4*NBYTES
1da177e4 460 bne len, rem, 1b
619b6e18 461 .set noreorder
1da177e4 462
cf62a8b8
MC
463.Lcleanup_src_unaligned\@:
464 beqz len, .Ldone\@
1da177e4 465 and rem, len, NBYTES-1 # rem = len % NBYTES
cf62a8b8 466 beq rem, len, .Lcopy_bytes\@
1da177e4
LT
467 nop
4681:
930bff88 469 R10KCBARRIER(0(ra))
cf62a8b8
MC
470 LDFIRST(t0, FIRST(0)(src), .Ll_exc\@)
471 LDREST(t0, REST(0)(src), .Ll_exc_copy\@)
1da177e4
LT
472 ADD src, src, NBYTES
473 SUB len, len, NBYTES
cf62a8b8 474 STORE(t0, 0(dst), .Ls_exc_p1u\@)
619b6e18
MR
475 .set reorder /* DADDI_WAR */
476 ADD dst, dst, NBYTES
1da177e4 477 bne len, rem, 1b
619b6e18 478 .set noreorder
1da177e4 479
cf62a8b8
MC
480.Lcopy_bytes_checklen\@:
481 beqz len, .Ldone\@
1da177e4 482 nop
cf62a8b8 483.Lcopy_bytes\@:
1da177e4 484 /* 0 < len < NBYTES */
930bff88 485 R10KCBARRIER(0(ra))
1da177e4 486#define COPY_BYTE(N) \
cf62a8b8 487 LOADB(t0, N(src), .Ll_exc\@); \
1da177e4 488 SUB len, len, 1; \
cf62a8b8
MC
489 beqz len, .Ldone\@; \
490 STOREB(t0, N(dst), .Ls_exc_p1\@)
1da177e4
LT
491
492 COPY_BYTE(0)
493 COPY_BYTE(1)
494#ifdef USE_DOUBLE
495 COPY_BYTE(2)
496 COPY_BYTE(3)
497 COPY_BYTE(4)
498 COPY_BYTE(5)
499#endif
cf62a8b8 500 LOADB(t0, NBYTES-2(src), .Ll_exc\@)
1da177e4
LT
501 SUB len, len, 1
502 jr ra
cf62a8b8
MC
503 STOREB(t0, NBYTES-2(dst), .Ls_exc_p1\@)
504.Ldone\@:
1da177e4 505 jr ra
51b1029d 506 nop
cf62a8b8 507 .if __memcpy == 1
1da177e4 508 END(memcpy)
cf62a8b8
MC
509 .set __memcpy, 0
510 .hidden __memcpy
511 .endif
1da177e4 512
cf62a8b8 513.Ll_exc_copy\@:
1da177e4
LT
514 /*
515 * Copy bytes from src until faulting load address (or until a
516 * lb faults)
517 *
518 * When reached by a faulting LDFIRST/LDREST, THREAD_BUADDR($28)
519 * may be more than a byte beyond the last address.
520 * Hence, the lb below may get an exception.
521 *
522 * Assumes src < THREAD_BUADDR($28)
523 */
5bc05971 524 LOADK t0, TI_TASK($28)
1da177e4 525 nop
5bc05971 526 LOADK t0, THREAD_BUADDR(t0)
1da177e4 5271:
cf62a8b8 528 LOADB(t1, 0(src), .Ll_exc\@)
1da177e4
LT
529 ADD src, src, 1
530 sb t1, 0(dst) # can't fault -- we're copy_from_user
619b6e18
MR
531 .set reorder /* DADDI_WAR */
532 ADD dst, dst, 1
1da177e4 533 bne src, t0, 1b
619b6e18 534 .set noreorder
cf62a8b8 535.Ll_exc\@:
5bc05971 536 LOADK t0, TI_TASK($28)
1da177e4 537 nop
5bc05971 538 LOADK t0, THREAD_BUADDR(t0) # t0 is just past last good address
1da177e4
LT
539 nop
540 SUB len, AT, t0 # len number of uncopied bytes
cf62a8b8 541 bnez t6, .Ldone\@ /* Skip the zeroing part if inatomic */
1da177e4
LT
542 /*
543 * Here's where we rely on src and dst being incremented in tandem,
544 * See (3) above.
545 * dst += (fault addr - src) to put dst at first byte to clear
546 */
547 ADD dst, t0 # compute start address in a1
548 SUB dst, src
549 /*
550 * Clear len bytes starting at dst. Can't call __bzero because it
551 * might modify len. An inefficient loop for these rare times...
552 */
619b6e18
MR
553 .set reorder /* DADDI_WAR */
554 SUB src, len, 1
cf62a8b8 555 beqz len, .Ldone\@
619b6e18 556 .set noreorder
1da177e4
LT
5571: sb zero, 0(dst)
558 ADD dst, dst, 1
619b6e18 559#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
1da177e4
LT
560 bnez src, 1b
561 SUB src, src, 1
619b6e18
MR
562#else
563 .set push
564 .set noat
565 li v1, 1
566 bnez src, 1b
567 SUB src, src, v1
568 .set pop
569#endif
1da177e4
LT
570 jr ra
571 nop
572
573
619b6e18 574#define SEXC(n) \
70342287 575 .set reorder; /* DADDI_WAR */ \
cf62a8b8 576.Ls_exc_p ## n ## u\@: \
619b6e18
MR
577 ADD len, len, n*NBYTES; \
578 jr ra; \
579 .set noreorder
1da177e4
LT
580
581SEXC(8)
582SEXC(7)
583SEXC(6)
584SEXC(5)
585SEXC(4)
586SEXC(3)
587SEXC(2)
588SEXC(1)
589
cf62a8b8 590.Ls_exc_p1\@:
619b6e18
MR
591 .set reorder /* DADDI_WAR */
592 ADD len, len, 1
1da177e4 593 jr ra
619b6e18 594 .set noreorder
cf62a8b8 595.Ls_exc\@:
1da177e4
LT
596 jr ra
597 nop
cf62a8b8 598 .endm
1da177e4
LT
599
600 .align 5
601LEAF(memmove)
602 ADD t0, a0, a2
603 ADD t1, a1, a2
604 sltu t0, a1, t0 # dst + len <= src -> memcpy
605 sltu t1, a0, t1 # dst >= src + len -> memcpy
606 and t0, t1
c5ec1983 607 beqz t0, .L__memcpy
1da177e4 608 move v0, a0 /* return value */
c5ec1983 609 beqz a2, .Lr_out
1da177e4
LT
610 END(memmove)
611
612 /* fall through to __rmemcpy */
613LEAF(__rmemcpy) /* a0=dst a1=src a2=len */
614 sltu t0, a1, a0
c5ec1983 615 beqz t0, .Lr_end_bytes_up # src >= dst
1da177e4
LT
616 nop
617 ADD a0, a2 # dst = dst + len
618 ADD a1, a2 # src = src + len
619
c5ec1983 620.Lr_end_bytes:
930bff88 621 R10KCBARRIER(0(ra))
1da177e4
LT
622 lb t0, -1(a1)
623 SUB a2, a2, 0x1
624 sb t0, -1(a0)
625 SUB a1, a1, 0x1
619b6e18
MR
626 .set reorder /* DADDI_WAR */
627 SUB a0, a0, 0x1
c5ec1983 628 bnez a2, .Lr_end_bytes
619b6e18 629 .set noreorder
1da177e4 630
c5ec1983 631.Lr_out:
1da177e4
LT
632 jr ra
633 move a2, zero
634
c5ec1983 635.Lr_end_bytes_up:
930bff88 636 R10KCBARRIER(0(ra))
1da177e4
LT
637 lb t0, (a1)
638 SUB a2, a2, 0x1
639 sb t0, (a0)
640 ADD a1, a1, 0x1
619b6e18
MR
641 .set reorder /* DADDI_WAR */
642 ADD a0, a0, 0x1
c5ec1983 643 bnez a2, .Lr_end_bytes_up
619b6e18 644 .set noreorder
1da177e4
LT
645
646 jr ra
647 move a2, zero
648 END(__rmemcpy)
cf62a8b8
MC
649
650/*
651 * t6 is used as a flag to note inatomic mode.
652 */
653LEAF(__copy_user_inatomic)
654 b __copy_user_common
655 li t6, 1
656 END(__copy_user_inatomic)
657
658/*
659 * A combined memcpy/__copy_user
660 * __copy_user sets len to 0 for success; else to an upper bound of
661 * the number of uncopied bytes.
662 * memcpy sets v0 to dst.
663 */
664 .align 5
665LEAF(memcpy) /* a0=dst a1=src a2=len */
666 move v0, dst /* return value */
667.L__memcpy:
668FEXPORT(__copy_user)
669 li t6, 0 /* not inatomic */
670__copy_user_common:
671 /* Legacy Mode, user <-> user */
672 __BUILD_COPY_USER LEGACY_MODE USEROP USEROP
cd26cb41
MC
673
674#ifdef CONFIG_EVA
675
676/*
677 * For EVA we need distinct symbols for reading and writing to user space.
678 * This is because we need to use specific EVA instructions to perform the
679 * virtual <-> physical translation when a virtual address is actually in user
680 * space
681 */
682
683LEAF(__copy_user_inatomic_eva)
684 b __copy_from_user_common
685 li t6, 1
686 END(__copy_user_inatomic_eva)
687
688/*
689 * __copy_from_user (EVA)
690 */
691
692LEAF(__copy_from_user_eva)
693 li t6, 0 /* not inatomic */
694__copy_from_user_common:
695 __BUILD_COPY_USER EVA_MODE USEROP KERNELOP
696END(__copy_from_user_eva)
697
698
699
700/*
701 * __copy_to_user (EVA)
702 */
703
704LEAF(__copy_to_user_eva)
705__BUILD_COPY_USER EVA_MODE KERNELOP USEROP
706END(__copy_to_user_eva)
707
708/*
709 * __copy_in_user (EVA)
710 */
711
712LEAF(__copy_in_user_eva)
713__BUILD_COPY_USER EVA_MODE USEROP USEROP
714END(__copy_in_user_eva)
715
716#endif