]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/ia64/lib/memcpy_mck.S
Merge remote-tracking branches 'spi/topic/imx', 'spi/topic/mxs', 'spi/topic/orion...
[mirror_ubuntu-bionic-kernel.git] / arch / ia64 / lib / memcpy_mck.S
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
1da177e4
LT
2/*
3 * Itanium 2-optimized version of memcpy and copy_user function
4 *
5 * Inputs:
6 * in0: destination address
7 * in1: source address
8 * in2: number of bytes to copy
9 * Output:
3e6e1556
KC
10 * for memcpy: return dest
11 * for copy_user: return 0 if success,
12 * or number of byte NOT copied if error occurred.
1da177e4
LT
13 *
14 * Copyright (C) 2002 Intel Corp.
15 * Copyright (C) 2002 Ken Chen <kenneth.w.chen@intel.com>
16 */
1da177e4
LT
17#include <asm/asmmacro.h>
18#include <asm/page.h>
e007c533 19#include <asm/export.h>
1da177e4
LT
20
21#define EK(y...) EX(y)
22
23/* McKinley specific optimization */
24
25#define retval r8
26#define saved_pfs r31
27#define saved_lc r10
28#define saved_pr r11
29#define saved_in0 r14
30#define saved_in1 r15
31#define saved_in2 r16
32
33#define src0 r2
34#define src1 r3
35#define dst0 r17
36#define dst1 r18
37#define cnt r9
38
39/* r19-r30 are temp for each code section */
40#define PREFETCH_DIST 8
41#define src_pre_mem r19
42#define dst_pre_mem r20
43#define src_pre_l2 r21
44#define dst_pre_l2 r22
45#define t1 r23
46#define t2 r24
47#define t3 r25
48#define t4 r26
49#define t5 t1 // alias!
50#define t6 t2 // alias!
51#define t7 t3 // alias!
52#define n8 r27
53#define t9 t5 // alias!
54#define t10 t4 // alias!
55#define t11 t7 // alias!
56#define t12 t6 // alias!
57#define t14 t10 // alias!
58#define t13 r28
59#define t15 r29
60#define tmp r30
61
62/* defines for long_copy block */
63#define A 0
64#define B (PREFETCH_DIST)
65#define C (B + PREFETCH_DIST)
66#define D (C + 1)
67#define N (D + 1)
68#define Nrot ((N + 7) & ~7)
69
70/* alias */
71#define in0 r32
72#define in1 r33
73#define in2 r34
74
75GLOBAL_ENTRY(memcpy)
76 and r28=0x7,in0
77 and r29=0x7,in1
78 mov f6=f0
3e6e1556 79 mov retval=in0
1da177e4
LT
80 br.cond.sptk .common_code
81 ;;
512f6429 82END(memcpy)
e007c533 83EXPORT_SYMBOL(memcpy)
1da177e4
LT
84GLOBAL_ENTRY(__copy_user)
85 .prologue
86// check dest alignment
87 and r28=0x7,in0
88 and r29=0x7,in1
89 mov f6=f1
90 mov saved_in0=in0 // save dest pointer
91 mov saved_in1=in1 // save src pointer
3e6e1556 92 mov retval=r0 // initialize return value
1da177e4
LT
93 ;;
94.common_code:
95 cmp.gt p15,p0=8,in2 // check for small size
96 cmp.ne p13,p0=0,r28 // check dest alignment
97 cmp.ne p14,p0=0,r29 // check src alignment
98 add src0=0,in1
99 sub r30=8,r28 // for .align_dest
3e6e1556 100 mov saved_in2=in2 // save len
1da177e4
LT
101 ;;
102 add dst0=0,in0
103 add dst1=1,in0 // dest odd index
104 cmp.le p6,p0 = 1,r30 // for .align_dest
105(p15) br.cond.dpnt .memcpy_short
106(p13) br.cond.dpnt .align_dest
107(p14) br.cond.dpnt .unaligned_src
108 ;;
109
110// both dest and src are aligned on 8-byte boundary
111.aligned_src:
112 .save ar.pfs, saved_pfs
113 alloc saved_pfs=ar.pfs,3,Nrot-3,0,Nrot
114 .save pr, saved_pr
115 mov saved_pr=pr
116
117 shr.u cnt=in2,7 // this much cache line
118 ;;
119 cmp.lt p6,p0=2*PREFETCH_DIST,cnt
120 cmp.lt p7,p8=1,cnt
121 .save ar.lc, saved_lc
122 mov saved_lc=ar.lc
123 .body
124 add cnt=-1,cnt
125 add src_pre_mem=0,in1 // prefetch src pointer
126 add dst_pre_mem=0,in0 // prefetch dest pointer
127 ;;
128(p7) mov ar.lc=cnt // prefetch count
129(p8) mov ar.lc=r0
130(p6) br.cond.dpnt .long_copy
131 ;;
132
133.prefetch:
134 lfetch.fault [src_pre_mem], 128
135 lfetch.fault.excl [dst_pre_mem], 128
136 br.cloop.dptk.few .prefetch
137 ;;
138
139.medium_copy:
140 and tmp=31,in2 // copy length after iteration
141 shr.u r29=in2,5 // number of 32-byte iteration
142 add dst1=8,dst0 // 2nd dest pointer
143 ;;
144 add cnt=-1,r29 // ctop iteration adjustment
145 cmp.eq p10,p0=r29,r0 // do we really need to loop?
146 add src1=8,src0 // 2nd src pointer
147 cmp.le p6,p0=8,tmp
148 ;;
149 cmp.le p7,p0=16,tmp
150 mov ar.lc=cnt // loop setup
151 cmp.eq p16,p17 = r0,r0
152 mov ar.ec=2
153(p10) br.dpnt.few .aligned_src_tail
154 ;;
155 TEXT_ALIGN(32)
1561:
157EX(.ex_handler, (p16) ld8 r34=[src0],16)
158EK(.ex_handler, (p16) ld8 r38=[src1],16)
159EX(.ex_handler, (p17) st8 [dst0]=r33,16)
160EK(.ex_handler, (p17) st8 [dst1]=r37,16)
161 ;;
162EX(.ex_handler, (p16) ld8 r32=[src0],16)
163EK(.ex_handler, (p16) ld8 r36=[src1],16)
164EX(.ex_handler, (p16) st8 [dst0]=r34,16)
165EK(.ex_handler, (p16) st8 [dst1]=r38,16)
166 br.ctop.dptk.few 1b
167 ;;
168
169.aligned_src_tail:
170EX(.ex_handler, (p6) ld8 t1=[src0])
171 mov ar.lc=saved_lc
172 mov ar.pfs=saved_pfs
173EX(.ex_hndlr_s, (p7) ld8 t2=[src1],8)
174 cmp.le p8,p0=24,tmp
175 and r21=-8,tmp
176 ;;
177EX(.ex_hndlr_s, (p8) ld8 t3=[src1])
178EX(.ex_handler, (p6) st8 [dst0]=t1) // store byte 1
179 and in2=7,tmp // remaining length
180EX(.ex_hndlr_d, (p7) st8 [dst1]=t2,8) // store byte 2
181 add src0=src0,r21 // setting up src pointer
182 add dst0=dst0,r21 // setting up dest pointer
183 ;;
184EX(.ex_handler, (p8) st8 [dst1]=t3) // store byte 3
185 mov pr=saved_pr,-1
186 br.dptk.many .memcpy_short
187 ;;
188
189/* code taken from copy_page_mck */
190.long_copy:
191 .rotr v[2*PREFETCH_DIST]
192 .rotp p[N]
193
194 mov src_pre_mem = src0
195 mov pr.rot = 0x10000
196 mov ar.ec = 1 // special unrolled loop
197
198 mov dst_pre_mem = dst0
199
200 add src_pre_l2 = 8*8, src0
201 add dst_pre_l2 = 8*8, dst0
202 ;;
203 add src0 = 8, src_pre_mem // first t1 src
204 mov ar.lc = 2*PREFETCH_DIST - 1
205 shr.u cnt=in2,7 // number of lines
206 add src1 = 3*8, src_pre_mem // first t3 src
207 add dst0 = 8, dst_pre_mem // first t1 dst
208 add dst1 = 3*8, dst_pre_mem // first t3 dst
209 ;;
210 and tmp=127,in2 // remaining bytes after this block
211 add cnt = -(2*PREFETCH_DIST) - 1, cnt
212 // same as .line_copy loop, but with all predicated-off instructions removed:
213.prefetch_loop:
214EX(.ex_hndlr_lcpy_1, (p[A]) ld8 v[A] = [src_pre_mem], 128) // M0
215EK(.ex_hndlr_lcpy_1, (p[B]) st8 [dst_pre_mem] = v[B], 128) // M2
216 br.ctop.sptk .prefetch_loop
217 ;;
218 cmp.eq p16, p0 = r0, r0 // reset p16 to 1
219 mov ar.lc = cnt
220 mov ar.ec = N // # of stages in pipeline
221 ;;
222.line_copy:
223EX(.ex_handler, (p[D]) ld8 t2 = [src0], 3*8) // M0
224EK(.ex_handler, (p[D]) ld8 t4 = [src1], 3*8) // M1
225EX(.ex_handler_lcpy, (p[B]) st8 [dst_pre_mem] = v[B], 128) // M2 prefetch dst from memory
226EK(.ex_handler_lcpy, (p[D]) st8 [dst_pre_l2] = n8, 128) // M3 prefetch dst from L2
227 ;;
228EX(.ex_handler_lcpy, (p[A]) ld8 v[A] = [src_pre_mem], 128) // M0 prefetch src from memory
229EK(.ex_handler_lcpy, (p[C]) ld8 n8 = [src_pre_l2], 128) // M1 prefetch src from L2
230EX(.ex_handler, (p[D]) st8 [dst0] = t1, 8) // M2
231EK(.ex_handler, (p[D]) st8 [dst1] = t3, 8) // M3
232 ;;
233EX(.ex_handler, (p[D]) ld8 t5 = [src0], 8)
234EK(.ex_handler, (p[D]) ld8 t7 = [src1], 3*8)
235EX(.ex_handler, (p[D]) st8 [dst0] = t2, 3*8)
236EK(.ex_handler, (p[D]) st8 [dst1] = t4, 3*8)
237 ;;
238EX(.ex_handler, (p[D]) ld8 t6 = [src0], 3*8)
239EK(.ex_handler, (p[D]) ld8 t10 = [src1], 8)
240EX(.ex_handler, (p[D]) st8 [dst0] = t5, 8)
241EK(.ex_handler, (p[D]) st8 [dst1] = t7, 3*8)
242 ;;
243EX(.ex_handler, (p[D]) ld8 t9 = [src0], 3*8)
244EK(.ex_handler, (p[D]) ld8 t11 = [src1], 3*8)
245EX(.ex_handler, (p[D]) st8 [dst0] = t6, 3*8)
246EK(.ex_handler, (p[D]) st8 [dst1] = t10, 8)
247 ;;
248EX(.ex_handler, (p[D]) ld8 t12 = [src0], 8)
249EK(.ex_handler, (p[D]) ld8 t14 = [src1], 8)
250EX(.ex_handler, (p[D]) st8 [dst0] = t9, 3*8)
251EK(.ex_handler, (p[D]) st8 [dst1] = t11, 3*8)
252 ;;
253EX(.ex_handler, (p[D]) ld8 t13 = [src0], 4*8)
254EK(.ex_handler, (p[D]) ld8 t15 = [src1], 4*8)
255EX(.ex_handler, (p[D]) st8 [dst0] = t12, 8)
256EK(.ex_handler, (p[D]) st8 [dst1] = t14, 8)
257 ;;
258EX(.ex_handler, (p[C]) ld8 t1 = [src0], 8)
259EK(.ex_handler, (p[C]) ld8 t3 = [src1], 8)
260EX(.ex_handler, (p[D]) st8 [dst0] = t13, 4*8)
261EK(.ex_handler, (p[D]) st8 [dst1] = t15, 4*8)
262 br.ctop.sptk .line_copy
263 ;;
264
265 add dst0=-8,dst0
266 add src0=-8,src0
267 mov in2=tmp
268 .restore sp
269 br.sptk.many .medium_copy
270 ;;
271
272#define BLOCK_SIZE 128*32
273#define blocksize r23
274#define curlen r24
275
276// dest is on 8-byte boundary, src is not. We need to do
277// ld8-ld8, shrp, then st8. Max 8 byte copy per cycle.
278.unaligned_src:
279 .prologue
280 .save ar.pfs, saved_pfs
281 alloc saved_pfs=ar.pfs,3,5,0,8
282 .save ar.lc, saved_lc
283 mov saved_lc=ar.lc
284 .save pr, saved_pr
285 mov saved_pr=pr
286 .body
287.4k_block:
288 mov saved_in0=dst0 // need to save all input arguments
289 mov saved_in2=in2
290 mov blocksize=BLOCK_SIZE
291 ;;
292 cmp.lt p6,p7=blocksize,in2
293 mov saved_in1=src0
294 ;;
295(p6) mov in2=blocksize
296 ;;
297 shr.u r21=in2,7 // this much cache line
298 shr.u r22=in2,4 // number of 16-byte iteration
299 and curlen=15,in2 // copy length after iteration
300 and r30=7,src0 // source alignment
301 ;;
302 cmp.lt p7,p8=1,r21
303 add cnt=-1,r21
304 ;;
305
306 add src_pre_mem=0,src0 // prefetch src pointer
307 add dst_pre_mem=0,dst0 // prefetch dest pointer
308 and src0=-8,src0 // 1st src pointer
6118ec84 309(p7) mov ar.lc = cnt
1da177e4
LT
310(p8) mov ar.lc = r0
311 ;;
312 TEXT_ALIGN(32)
3131: lfetch.fault [src_pre_mem], 128
314 lfetch.fault.excl [dst_pre_mem], 128
315 br.cloop.dptk.few 1b
316 ;;
317
318 shladd dst1=r22,3,dst0 // 2nd dest pointer
319 shladd src1=r22,3,src0 // 2nd src pointer
320 cmp.eq p8,p9=r22,r0 // do we really need to loop?
321 cmp.le p6,p7=8,curlen; // have at least 8 byte remaining?
322 add cnt=-1,r22 // ctop iteration adjustment
323 ;;
324EX(.ex_handler, (p9) ld8 r33=[src0],8) // loop primer
325EK(.ex_handler, (p9) ld8 r37=[src1],8)
326(p8) br.dpnt.few .noloop
327 ;;
328
329// The jump address is calculated based on src alignment. The COPYU
330// macro below need to confine its size to power of two, so an entry
331// can be caulated using shl instead of an expensive multiply. The
332// size is then hard coded by the following #define to match the
333// actual size. This make it somewhat tedious when COPYU macro gets
334// changed and this need to be adjusted to match.
335#define LOOP_SIZE 6
3361:
337 mov r29=ip // jmp_table thread
338 mov ar.lc=cnt
339 ;;
340 add r29=.jump_table - 1b - (.jmp1-.jump_table), r29
341 shl r28=r30, LOOP_SIZE // jmp_table thread
342 mov ar.ec=2 // loop setup
343 ;;
344 add r29=r29,r28 // jmp_table thread
345 cmp.eq p16,p17=r0,r0
346 ;;
347 mov b6=r29 // jmp_table thread
348 ;;
349 br.cond.sptk.few b6
350
351// for 8-15 byte case
352// We will skip the loop, but need to replicate the side effect
353// that the loop produces.
354.noloop:
355EX(.ex_handler, (p6) ld8 r37=[src1],8)
356 add src0=8,src0
357(p6) shl r25=r30,3
358 ;;
359EX(.ex_handler, (p6) ld8 r27=[src1])
360(p6) shr.u r28=r37,r25
361(p6) sub r26=64,r25
362 ;;
363(p6) shl r27=r27,r26
364 ;;
365(p6) or r21=r28,r27
366
367.unaligned_src_tail:
368/* check if we have more than blocksize to copy, if so go back */
369 cmp.gt p8,p0=saved_in2,blocksize
370 ;;
371(p8) add dst0=saved_in0,blocksize
372(p8) add src0=saved_in1,blocksize
373(p8) sub in2=saved_in2,blocksize
374(p8) br.dpnt .4k_block
375 ;;
376
377/* we have up to 15 byte to copy in the tail.
378 * part of work is already done in the jump table code
379 * we are at the following state.
380 * src side:
381 *
382 * xxxxxx xx <----- r21 has xxxxxxxx already
383 * -------- -------- --------
384 * 0 8 16
385 * ^
386 * |
387 * src1
388 *
389 * dst
390 * -------- -------- --------
391 * ^
392 * |
393 * dst1
394 */
395EX(.ex_handler, (p6) st8 [dst1]=r21,8) // more than 8 byte to copy
396(p6) add curlen=-8,curlen // update length
397 mov ar.pfs=saved_pfs
398 ;;
399 mov ar.lc=saved_lc
400 mov pr=saved_pr,-1
401 mov in2=curlen // remaining length
402 mov dst0=dst1 // dest pointer
403 add src0=src1,r30 // forward by src alignment
404 ;;
405
406// 7 byte or smaller.
407.memcpy_short:
408 cmp.le p8,p9 = 1,in2
409 cmp.le p10,p11 = 2,in2
410 cmp.le p12,p13 = 3,in2
411 cmp.le p14,p15 = 4,in2
412 add src1=1,src0 // second src pointer
413 add dst1=1,dst0 // second dest pointer
414 ;;
415
416EX(.ex_handler_short, (p8) ld1 t1=[src0],2)
417EK(.ex_handler_short, (p10) ld1 t2=[src1],2)
418(p9) br.ret.dpnt rp // 0 byte copy
419 ;;
420
421EX(.ex_handler_short, (p8) st1 [dst0]=t1,2)
422EK(.ex_handler_short, (p10) st1 [dst1]=t2,2)
423(p11) br.ret.dpnt rp // 1 byte copy
424
425EX(.ex_handler_short, (p12) ld1 t3=[src0],2)
426EK(.ex_handler_short, (p14) ld1 t4=[src1],2)
427(p13) br.ret.dpnt rp // 2 byte copy
428 ;;
429
430 cmp.le p6,p7 = 5,in2
431 cmp.le p8,p9 = 6,in2
432 cmp.le p10,p11 = 7,in2
433
434EX(.ex_handler_short, (p12) st1 [dst0]=t3,2)
435EK(.ex_handler_short, (p14) st1 [dst1]=t4,2)
436(p15) br.ret.dpnt rp // 3 byte copy
437 ;;
438
439EX(.ex_handler_short, (p6) ld1 t5=[src0],2)
440EK(.ex_handler_short, (p8) ld1 t6=[src1],2)
441(p7) br.ret.dpnt rp // 4 byte copy
442 ;;
443
444EX(.ex_handler_short, (p6) st1 [dst0]=t5,2)
445EK(.ex_handler_short, (p8) st1 [dst1]=t6,2)
446(p9) br.ret.dptk rp // 5 byte copy
447
448EX(.ex_handler_short, (p10) ld1 t7=[src0],2)
449(p11) br.ret.dptk rp // 6 byte copy
450 ;;
451
452EX(.ex_handler_short, (p10) st1 [dst0]=t7,2)
453 br.ret.dptk rp // done all cases
454
455
456/* Align dest to nearest 8-byte boundary. We know we have at
457 * least 7 bytes to copy, enough to crawl to 8-byte boundary.
458 * Actual number of byte to crawl depend on the dest alignment.
459 * 7 byte or less is taken care at .memcpy_short
460
461 * src0 - source even index
462 * src1 - source odd index
463 * dst0 - dest even index
464 * dst1 - dest odd index
465 * r30 - distance to 8-byte boundary
466 */
467
468.align_dest:
469 add src1=1,in1 // source odd index
470 cmp.le p7,p0 = 2,r30 // for .align_dest
471 cmp.le p8,p0 = 3,r30 // for .align_dest
472EX(.ex_handler_short, (p6) ld1 t1=[src0],2)
473 cmp.le p9,p0 = 4,r30 // for .align_dest
474 cmp.le p10,p0 = 5,r30
475 ;;
476EX(.ex_handler_short, (p7) ld1 t2=[src1],2)
477EK(.ex_handler_short, (p8) ld1 t3=[src0],2)
478 cmp.le p11,p0 = 6,r30
479EX(.ex_handler_short, (p6) st1 [dst0] = t1,2)
480 cmp.le p12,p0 = 7,r30
481 ;;
482EX(.ex_handler_short, (p9) ld1 t4=[src1],2)
483EK(.ex_handler_short, (p10) ld1 t5=[src0],2)
484EX(.ex_handler_short, (p7) st1 [dst1] = t2,2)
485EK(.ex_handler_short, (p8) st1 [dst0] = t3,2)
486 ;;
487EX(.ex_handler_short, (p11) ld1 t6=[src1],2)
488EK(.ex_handler_short, (p12) ld1 t7=[src0],2)
489 cmp.eq p6,p7=r28,r29
490EX(.ex_handler_short, (p9) st1 [dst1] = t4,2)
491EK(.ex_handler_short, (p10) st1 [dst0] = t5,2)
492 sub in2=in2,r30
493 ;;
494EX(.ex_handler_short, (p11) st1 [dst1] = t6,2)
495EK(.ex_handler_short, (p12) st1 [dst0] = t7)
496 add dst0=in0,r30 // setup arguments
497 add src0=in1,r30
498(p6) br.cond.dptk .aligned_src
499(p7) br.cond.dpnt .unaligned_src
500 ;;
501
502/* main loop body in jump table format */
503#define COPYU(shift) \
5041: \
505EX(.ex_handler, (p16) ld8 r32=[src0],8); /* 1 */ \
506EK(.ex_handler, (p16) ld8 r36=[src1],8); \
507 (p17) shrp r35=r33,r34,shift;; /* 1 */ \
508EX(.ex_handler, (p6) ld8 r22=[src1]); /* common, prime for tail section */ \
509 nop.m 0; \
510 (p16) shrp r38=r36,r37,shift; \
511EX(.ex_handler, (p17) st8 [dst0]=r35,8); /* 1 */ \
512EK(.ex_handler, (p17) st8 [dst1]=r39,8); \
513 br.ctop.dptk.few 1b;; \
514 (p7) add src1=-8,src1; /* back out for <8 byte case */ \
515 shrp r21=r22,r38,shift; /* speculative work */ \
516 br.sptk.few .unaligned_src_tail /* branch out of jump table */ \
517 ;;
518 TEXT_ALIGN(32)
519.jump_table:
520 COPYU(8) // unaligned cases
521.jmp1:
522 COPYU(16)
523 COPYU(24)
524 COPYU(32)
525 COPYU(40)
526 COPYU(48)
527 COPYU(56)
528
529#undef A
530#undef B
531#undef C
532#undef D
1da177e4
LT
533
534/*
535 * Due to lack of local tag support in gcc 2.x assembler, it is not clear which
536 * instruction failed in the bundle. The exception algorithm is that we
537 * first figure out the faulting address, then detect if there is any
538 * progress made on the copy, if so, redo the copy from last known copied
539 * location up to the faulting address (exclusive). In the copy_from_user
540 * case, remaining byte in kernel buffer will be zeroed.
541 *
542 * Take copy_from_user as an example, in the code there are multiple loads
543 * in a bundle and those multiple loads could span over two pages, the
544 * faulting address is calculated as page_round_down(max(src0, src1)).
545 * This is based on knowledge that if we can access one byte in a page, we
546 * can access any byte in that page.
547 *
548 * predicate used in the exception handler:
549 * p6-p7: direction
550 * p10-p11: src faulting addr calculation
551 * p12-p13: dst faulting addr calculation
552 */
553
554#define A r19
555#define B r20
556#define C r21
557#define D r22
558#define F r28
559
1da177e4
LT
560#define saved_retval loc0
561#define saved_rtlink loc1
562#define saved_pfs_stack loc2
563
564.ex_hndlr_s:
565 add src0=8,src0
566 br.sptk .ex_handler
567 ;;
568.ex_hndlr_d:
569 add dst0=8,dst0
570 br.sptk .ex_handler
571 ;;
572.ex_hndlr_lcpy_1:
573 mov src1=src_pre_mem
574 mov dst1=dst_pre_mem
575 cmp.gtu p10,p11=src_pre_mem,saved_in1
576 cmp.gtu p12,p13=dst_pre_mem,saved_in0
577 ;;
578(p10) add src0=8,saved_in1
579(p11) mov src0=saved_in1
580(p12) add dst0=8,saved_in0
581(p13) mov dst0=saved_in0
582 br.sptk .ex_handler
583.ex_handler_lcpy:
584 // in line_copy block, the preload addresses should always ahead
585 // of the other two src/dst pointers. Furthermore, src1/dst1 should
586 // always ahead of src0/dst0.
587 mov src1=src_pre_mem
588 mov dst1=dst_pre_mem
589.ex_handler:
590 mov pr=saved_pr,-1 // first restore pr, lc, and pfs
591 mov ar.lc=saved_lc
592 mov ar.pfs=saved_pfs
593 ;;
594.ex_handler_short: // fault occurred in these sections didn't change pr, lc, pfs
595 cmp.ltu p6,p7=saved_in0, saved_in1 // get the copy direction
596 cmp.ltu p10,p11=src0,src1
597 cmp.ltu p12,p13=dst0,dst1
598 fcmp.eq p8,p0=f6,f0 // is it memcpy?
599 mov tmp = dst0
600 ;;
601(p11) mov src1 = src0 // pick the larger of the two
602(p13) mov dst0 = dst1 // make dst0 the smaller one
603(p13) mov dst1 = tmp // and dst1 the larger one
604 ;;
605(p6) dep F = r0,dst1,0,PAGE_SHIFT // usr dst round down to page boundary
606(p7) dep F = r0,src1,0,PAGE_SHIFT // usr src round down to page boundary
607 ;;
608(p6) cmp.le p14,p0=dst0,saved_in0 // no progress has been made on store
609(p7) cmp.le p14,p0=src0,saved_in1 // no progress has been made on load
610 mov retval=saved_in2
611(p8) ld1 tmp=[src1] // force an oops for memcpy call
612(p8) st1 [dst1]=r0 // force an oops for memcpy call
613(p14) br.ret.sptk.many rp
614
615/*
616 * The remaining byte to copy is calculated as:
617 *
618 * A = (faulting_addr - orig_src) -> len to faulting ld address
619 * or
620 * (faulting_addr - orig_dst) -> len to faulting st address
621 * B = (cur_dst - orig_dst) -> len copied so far
622 * C = A - B -> len need to be copied
b3622d32 623 * D = orig_len - A -> len need to be left along
1da177e4
LT
624 */
625(p6) sub A = F, saved_in0
626(p7) sub A = F, saved_in1
627 clrrrb
628 ;;
629 alloc saved_pfs_stack=ar.pfs,3,3,3,0
295bd892 630 cmp.lt p8,p0=A,r0
1da177e4
LT
631 sub B = dst0, saved_in0 // how many byte copied so far
632 ;;
295bd892
KC
633(p8) mov A = 0; // A shouldn't be negative, cap it
634 ;;
1da177e4
LT
635 sub C = A, B
636 sub D = saved_in2, A
637 ;;
638 cmp.gt p8,p0=C,r0 // more than 1 byte?
1da177e4
LT
639 mov r8=0
640 mov saved_retval = D
641 mov saved_rtlink = b0
642
643 add out0=saved_in0, B
644 add out1=saved_in1, B
645 mov out2=C
646(p8) br.call.sptk.few b0=__copy_user // recursive call
647 ;;
648
649 add saved_retval=saved_retval,r8 // above might return non-zero value
1da177e4
LT
650 ;;
651
652 mov retval=saved_retval
653 mov ar.pfs=saved_pfs_stack
654 mov b0=saved_rtlink
655 br.ret.sptk.many rp
656
657/* end of McKinley specific optimization */
658END(__copy_user)
e007c533 659EXPORT_SYMBOL(__copy_user)