1 /* NG4memcpy.S: Niagara-4 optimized memcpy.
3 * Copyright (C) 2012 David S. Miller (davem@davemloft.net)
7 #include <asm/visasm.h>
9 #define GLOBAL_SPARE %g7
11 #define ASI_BLK_INIT_QUAD_LDD_P 0xe2
14 /* On T4 it is very expensive to access ASRs like %fprs and
15 * %asi, avoiding a read or a write can save ~50 cycles.
19 andcc %o5, FPRS_FEF, %g0; \
21 wr %g0, FPRS_FEF, %fprs; \
25 #define VISEntryHalf FPU_ENTER; \
26 clr %g1; clr %g2; clr %g3; clr %g5; subcc %g0, %g0, %g0;
27 #define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
29 #define VISEntryHalf FPU_ENTER
30 #define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
33 #define GLOBAL_SPARE %g5
37 #ifndef SIMULATE_NIAGARA_ON_NON_NIAGARA
38 #define STORE_ASI ASI_BLK_INIT_QUAD_LDD_P
40 #define STORE_ASI 0x80 /* ASI_P */
44 #if !defined(EX_LD) && !defined(EX_ST)
63 #define EX_RETVAL(x) x
67 #define LOAD(type,addr,dest) type [addr], dest
72 #define STORE(type,src,addr) type src, [addr]
74 #define STORE(type,src,addr) type##a src, [addr] %asi
79 #define STORE_INIT(src,addr) stxa src, [addr] STORE_ASI
83 #define FUNC_NAME NG4memcpy
93 .register %g2,#scratch
94 .register %g3,#scratch
100 .type FUNC_NAME,#function
101 FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
120 .Llarge:/* len >= 0x80 */
121 /* First get dest 8 byte aligned. */
127 1: EX_LD(LOAD(ldub, %o1 + 0x00, %g2))
132 EX_ST(STORE(stb, %g2, %o0 - 0x01))
134 51: LOAD(prefetch, %o1 + 0x040, #n_reads_strong)
135 LOAD(prefetch, %o1 + 0x080, #n_reads_strong)
136 LOAD(prefetch, %o1 + 0x0c0, #n_reads_strong)
137 LOAD(prefetch, %o1 + 0x100, #n_reads_strong)
138 LOAD(prefetch, %o1 + 0x140, #n_reads_strong)
139 LOAD(prefetch, %o1 + 0x180, #n_reads_strong)
140 LOAD(prefetch, %o1 + 0x1c0, #n_reads_strong)
141 LOAD(prefetch, %o1 + 0x200, #n_reads_strong)
143 /* Check if we can use the straight fully aligned
144 * loop, or we require the alignaddr/faligndata variant.
147 bne,pn %icc, .Llarge_src_unaligned
150 /* Legitimize the use of initializing stores by getting dest
151 * to be 64-byte aligned.
154 brz,pt %g1, .Llarge_aligned
157 1: EX_LD(LOAD(ldx, %o1 + 0x00, %g2))
162 EX_ST(STORE(stx, %g2, %o0 - 0x08))
165 /* len >= 0x80 && src 8-byte aligned && dest 8-byte aligned */
169 1: EX_LD(LOAD(ldx, %o1 + 0x00, %g1))
171 EX_LD(LOAD(ldx, %o1 - 0x38, %g2))
173 EX_LD(LOAD(ldx, %o1 - 0x30, %g3))
174 EX_LD(LOAD(ldx, %o1 - 0x28, GLOBAL_SPARE))
175 EX_LD(LOAD(ldx, %o1 - 0x20, %o5))
176 EX_ST(STORE_INIT(%g1, %o0))
178 EX_ST(STORE_INIT(%g2, %o0))
180 EX_LD(LOAD(ldx, %o1 - 0x18, %g2))
181 EX_ST(STORE_INIT(%g3, %o0))
183 EX_LD(LOAD(ldx, %o1 - 0x10, %g3))
184 EX_ST(STORE_INIT(GLOBAL_SPARE, %o0))
186 EX_LD(LOAD(ldx, %o1 - 0x08, GLOBAL_SPARE))
187 EX_ST(STORE_INIT(%o5, %o0))
189 EX_ST(STORE_INIT(%g2, %o0))
191 EX_ST(STORE_INIT(%g3, %o0))
193 EX_ST(STORE_INIT(GLOBAL_SPARE, %o0))
196 LOAD(prefetch, %o1 + 0x200, #n_reads_strong)
198 membar #StoreLoad | #StoreStore
202 ble,pn %icc, .Lsmall_unaligned
204 ba,a,pt %icc, .Lmedium_noprefetch
207 mov EX_RETVAL(%o3), %o0
209 .Llarge_src_unaligned:
211 VISEntryHalfFast(.Lmedium_vis_entry_fail)
217 alignaddr %o1, %g0, %g1
219 EX_LD_FP(LOAD(ldd, %g1 + 0x00, %f0))
220 1: EX_LD_FP(LOAD(ldd, %g1 + 0x08, %f2))
222 EX_LD_FP(LOAD(ldd, %g1 + 0x10, %f4))
223 EX_LD_FP(LOAD(ldd, %g1 + 0x18, %f6))
224 EX_LD_FP(LOAD(ldd, %g1 + 0x20, %f8))
225 EX_LD_FP(LOAD(ldd, %g1 + 0x28, %f10))
226 EX_LD_FP(LOAD(ldd, %g1 + 0x30, %f12))
227 EX_LD_FP(LOAD(ldd, %g1 + 0x38, %f14))
228 faligndata %f0, %f2, %f16
229 EX_LD_FP(LOAD(ldd, %g1 + 0x40, %f0))
230 faligndata %f2, %f4, %f18
232 faligndata %f4, %f6, %f20
233 faligndata %f6, %f8, %f22
234 faligndata %f8, %f10, %f24
235 faligndata %f10, %f12, %f26
236 faligndata %f12, %f14, %f28
237 faligndata %f14, %f0, %f30
238 EX_ST_FP(STORE(std, %f16, %o0 + 0x00))
239 EX_ST_FP(STORE(std, %f18, %o0 + 0x08))
240 EX_ST_FP(STORE(std, %f20, %o0 + 0x10))
241 EX_ST_FP(STORE(std, %f22, %o0 + 0x18))
242 EX_ST_FP(STORE(std, %f24, %o0 + 0x20))
243 EX_ST_FP(STORE(std, %f26, %o0 + 0x28))
244 EX_ST_FP(STORE(std, %f28, %o0 + 0x30))
245 EX_ST_FP(STORE(std, %f30, %o0 + 0x38))
248 LOAD(prefetch, %g1 + 0x200, #n_reads_strong)
256 ble,pn %icc, .Lsmall_unaligned
258 ba,a,pt %icc, .Lmedium_unaligned
261 .Lmedium_vis_entry_fail:
265 LOAD(prefetch, %o1 + 0x40, #n_reads_strong)
267 bne,pn %icc, .Lmedium_unaligned
270 andncc %o2, 0x20 - 1, %o5
273 1: EX_LD(LOAD(ldx, %o1 + 0x00, %g1))
274 EX_LD(LOAD(ldx, %o1 + 0x08, %g2))
275 EX_LD(LOAD(ldx, %o1 + 0x10, GLOBAL_SPARE))
276 EX_LD(LOAD(ldx, %o1 + 0x18, %o4))
279 EX_ST(STORE(stx, %g1, %o0 + 0x00))
280 EX_ST(STORE(stx, %g2, %o0 + 0x08))
281 EX_ST(STORE(stx, GLOBAL_SPARE, %o0 + 0x10))
282 EX_ST(STORE(stx, %o4, %o0 + 0x18))
285 2: andcc %o2, 0x18, %o5
288 1: EX_LD(LOAD(ldx, %o1 + 0x00, %g1))
293 EX_ST(STORE(stx, %g1, %o0 - 0x08))
294 3: brz,pt %o2, .Lexit
298 EX_LD(LOAD(lduw, %o1 + 0x00, %g1))
303 EX_ST(STORE(stw, %g1, %o0 - 0x04))
306 /* First get dest 8 byte aligned. */
312 1: EX_LD(LOAD(ldub, %o1 + 0x00, %g2))
317 EX_ST(STORE(stb, %g2, %o0 - 0x01))
320 brz,pn %g1, .Lmedium_noprefetch
325 EX_LD(LOAD(ldx, %o1 + 0x00, %o4))
327 andn %o2, 0x08 - 1, %o5
329 1: EX_LD(LOAD(ldx, %o1 + 0x08, %g3))
332 srlx %g3, %g2, GLOBAL_SPARE
333 or GLOBAL_SPARE, %o4, GLOBAL_SPARE
334 EX_ST(STORE(stx, GLOBAL_SPARE, %o0 + 0x00))
342 ba,pt %icc, .Lsmall_unaligned
345 EX_LD(LOAD(ldub, %o1 + 0x00, %g1))
348 EX_ST(STORE(stb, %g1, %o0 + 0x00))
349 EX_LD(LOAD(ldub, %o1 + 0x01, %g1))
352 EX_ST(STORE(stb, %g1, %o0 + 0x01))
353 EX_LD(LOAD(ldub, %o1 + 0x02, %g1))
355 EX_ST(STORE(stb, %g1, %o0 + 0x02))
359 bne,pn %icc, .Lsmall_unaligned
360 andn %o2, 0x4 - 1, %o5
363 EX_LD(LOAD(lduw, %o1 + 0x00, %g1))
368 EX_ST(STORE(stw, %g1, %o0 - 0x04))
374 1: EX_LD(LOAD(ldub, %o1 + 0x00, %g1))
379 EX_ST(STORE(stb, %g1, %o0 - 0x01))
381 .size FUNC_NAME, .-FUNC_NAME