1 /* U3memcpy.S: UltraSparc-III optimized memcpy.
3 * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com)
7 #include <linux/linkage.h>
8 #include <asm/visasm.h>
10 #define GLOBAL_SPARE %g7
12 #define ASI_BLK_P 0xf0
15 #define VISEntryHalf rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs; \
16 clr %g1; clr %g2; clr %g3; subcc %g0, %g0, %g0;
17 #define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
19 #define VISEntryHalf rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs
20 #define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
22 #define GLOBAL_SPARE %g5
29 #define EX_LD_FP(x,y) x
36 #define EX_ST_FP(x,y) x
40 #define LOAD(type,addr,dest) type [addr], dest
44 #define STORE(type,src,addr) type src, [addr]
48 #define STORE_BLK(src,addr) stda src, [addr] ASI_BLK_P
52 #define FUNC_NAME U3memcpy
63 .register %g2,#scratch
64 .register %g3,#scratch
66 /* Special/non-trivial issues of this code:
68 * 1) %o5 is preserved from VISEntryHalf to VISExitHalf
69 * 2) Only low 32 FPU registers are used so that only the
70 * lower half of the FPU register set is dirtied by this
71 * code. This is especially important in the kernel.
72 * 3) This code never prefetches cachelines past the end
73 * of the source buffer.
78 #define EX_RETVAL(x) x
83 ENTRY(U3_retl_o2_plus_g2_plus_g1_plus_1_fp)
86 ba,pt %xcc, __restore_fp
88 ENDPROC(U3_retl_o2_plus_g2_plus_g1_plus_1_fp)
89 ENTRY(U3_retl_o2_plus_g2_fp)
90 ba,pt %xcc, __restore_fp
92 ENDPROC(U3_retl_o2_plus_g2_fp)
93 ENTRY(U3_retl_o2_plus_g2_plus_8_fp)
95 ba,pt %xcc, __restore_fp
97 ENDPROC(U3_retl_o2_plus_g2_plus_8_fp)
102 ENTRY(U3_retl_o2_plus_1)
105 ENDPROC(U3_retl_o2_plus_1)
106 ENTRY(U3_retl_o2_plus_4)
109 ENDPROC(U3_retl_o2_plus_4)
110 ENTRY(U3_retl_o2_plus_8)
113 ENDPROC(U3_retl_o2_plus_8)
114 ENTRY(U3_retl_o2_plus_g1_plus_1)
118 ENDPROC(U3_retl_o2_plus_g1_plus_1)
120 ba,pt %xcc, __restore_fp
122 ENDPROC(U3_retl_o2_fp)
123 ENTRY(U3_retl_o2_plus_o3_sll_6_plus_0x80_fp)
126 ba,pt %xcc, __restore_fp
128 ENDPROC(U3_retl_o2_plus_o3_sll_6_plus_0x80_fp)
129 ENTRY(U3_retl_o2_plus_o3_sll_6_plus_0x40_fp)
132 ba,pt %xcc, __restore_fp
134 ENDPROC(U3_retl_o2_plus_o3_sll_6_plus_0x40_fp)
135 ENTRY(U3_retl_o2_plus_GS_plus_0x10)
136 add GLOBAL_SPARE, 0x10, GLOBAL_SPARE
138 add %o2, GLOBAL_SPARE, %o0
139 ENDPROC(U3_retl_o2_plus_GS_plus_0x10)
140 ENTRY(U3_retl_o2_plus_GS_plus_0x08)
141 add GLOBAL_SPARE, 0x08, GLOBAL_SPARE
143 add %o2, GLOBAL_SPARE, %o0
144 ENDPROC(U3_retl_o2_plus_GS_plus_0x08)
145 ENTRY(U3_retl_o2_and_7_plus_GS)
148 add %o2, GLOBAL_SPARE, %o0
149 ENDPROC(U3_retl_o2_and_7_plus_GS)
150 ENTRY(U3_retl_o2_and_7_plus_GS_plus_8)
151 add GLOBAL_SPARE, 8, GLOBAL_SPARE
154 add %o2, GLOBAL_SPARE, %o0
155 ENDPROC(U3_retl_o2_and_7_plus_GS_plus_8)
160 /* The cheetah's flexible spine, oversized liver, enlarged heart,
161 * slender muscular body, and claws make it the swiftest hunter
162 * in Africa and the fastest animal on land. Can reach speeds
163 * of up to 2.4GB per second.
167 .type FUNC_NAME,#function
168 FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
172 /* software trap 5 "Range Check" if dst >= 0x80000000 */
179 be,pn %XCC, end_return
184 blu,a,pn %XCC, less_than_16
189 blu,pt %XCC, less_than_192
192 /* Clobbers o5/g1/g2/g3/g7/icc/xcc. We must preserve
193 * o5 from here until we hit VISExitHalf.
197 /* Is 'dst' already aligned on an 64-byte boundary? */
201 /* Compute abs((dst & 0x3f) - 0x40) into %g2. This is the number
202 * of bytes to copy to make 'dst' 64-byte aligned. We pre-
203 * subtract this from 'len'.
205 sub %o0, %o1, GLOBAL_SPARE
213 1: subcc %g1, 0x1, %g1
214 EX_LD_FP(LOAD(ldub, %o1 + 0x00, %o3), U3_retl_o2_plus_g2_plus_g1_plus_1)
215 EX_ST_FP(STORE(stb, %o3, %o1 + GLOBAL_SPARE), U3_retl_o2_plus_g2_plus_g1_plus_1)
219 add %o1, GLOBAL_SPARE, %o0
224 alignaddr %o1, %g0, %o1
226 EX_LD_FP(LOAD(ldd, %o1, %f4), U3_retl_o2_plus_g2)
227 1: EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f6), U3_retl_o2_plus_g2)
230 faligndata %f4, %f6, %f0
231 EX_ST_FP(STORE(std, %f0, %o0), U3_retl_o2_plus_g2_plus_8)
235 EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f4), U3_retl_o2_plus_g2)
238 faligndata %f6, %f4, %f2
239 EX_ST_FP(STORE(std, %f2, %o0), U3_retl_o2_plus_g2_plus_8)
243 3: LOAD(prefetch, %o1 + 0x000, #one_read)
244 LOAD(prefetch, %o1 + 0x040, #one_read)
245 andn %o2, (0x40 - 1), GLOBAL_SPARE
246 LOAD(prefetch, %o1 + 0x080, #one_read)
247 LOAD(prefetch, %o1 + 0x0c0, #one_read)
248 LOAD(prefetch, %o1 + 0x100, #one_read)
249 EX_LD_FP(LOAD(ldd, %o1 + 0x000, %f0), U3_retl_o2)
250 LOAD(prefetch, %o1 + 0x140, #one_read)
251 EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2), U3_retl_o2)
252 LOAD(prefetch, %o1 + 0x180, #one_read)
253 EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4), U3_retl_o2)
254 LOAD(prefetch, %o1 + 0x1c0, #one_read)
255 faligndata %f0, %f2, %f16
256 EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6), U3_retl_o2)
257 faligndata %f2, %f4, %f18
258 EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8), U3_retl_o2)
259 faligndata %f4, %f6, %f20
260 EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10), U3_retl_o2)
261 faligndata %f6, %f8, %f22
263 EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12), U3_retl_o2)
264 faligndata %f8, %f10, %f24
265 EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14), U3_retl_o2)
266 faligndata %f10, %f12, %f26
267 EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0), U3_retl_o2)
269 subcc GLOBAL_SPARE, 0x80, GLOBAL_SPARE
272 srl GLOBAL_SPARE, 6, %o3
278 EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2), U3_retl_o2_plus_o3_sll_6_plus_0x80)
279 faligndata %f12, %f14, %f28
280 EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4), U3_retl_o2_plus_o3_sll_6_plus_0x80)
281 faligndata %f14, %f0, %f30
282 EX_ST_FP(STORE_BLK(%f16, %o0), U3_retl_o2_plus_o3_sll_6_plus_0x80)
283 EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6), U3_retl_o2_plus_o3_sll_6_plus_0x40)
284 faligndata %f0, %f2, %f16
287 EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8), U3_retl_o2_plus_o3_sll_6_plus_0x40)
288 faligndata %f2, %f4, %f18
289 EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10), U3_retl_o2_plus_o3_sll_6_plus_0x40)
290 faligndata %f4, %f6, %f20
291 EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12), U3_retl_o2_plus_o3_sll_6_plus_0x40)
293 faligndata %f6, %f8, %f22
294 EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14), U3_retl_o2_plus_o3_sll_6_plus_0x80)
296 faligndata %f8, %f10, %f24
297 EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0), U3_retl_o2_plus_o3_sll_6_plus_0x80)
298 LOAD(prefetch, %o1 + 0x1c0, #one_read)
299 faligndata %f10, %f12, %f26
303 /* Finally we copy the last full 64-byte block. */
305 EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2), U3_retl_o2_plus_o3_sll_6_plus_0x80)
306 faligndata %f12, %f14, %f28
307 EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4), U3_retl_o2_plus_o3_sll_6_plus_0x80)
308 faligndata %f14, %f0, %f30
309 EX_ST_FP(STORE_BLK(%f16, %o0), U3_retl_o2_plus_o3_sll_6_plus_0x80)
310 EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6), U3_retl_o2_plus_o3_sll_6_plus_0x40)
311 faligndata %f0, %f2, %f16
312 EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8), U3_retl_o2_plus_o3_sll_6_plus_0x40)
313 faligndata %f2, %f4, %f18
314 EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10), U3_retl_o2_plus_o3_sll_6_plus_0x40)
315 faligndata %f4, %f6, %f20
316 EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12), U3_retl_o2_plus_o3_sll_6_plus_0x40)
317 faligndata %f6, %f8, %f22
318 EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14), U3_retl_o2_plus_o3_sll_6_plus_0x40)
319 faligndata %f8, %f10, %f24
323 EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0), U3_retl_o2_plus_o3_sll_6_plus_0x40)
324 1: faligndata %f10, %f12, %f26
325 faligndata %f12, %f14, %f28
326 faligndata %f14, %f0, %f30
327 EX_ST_FP(STORE_BLK(%f16, %o0), U3_retl_o2_plus_o3_sll_6_plus_0x40)
332 /* Now we copy the (len modulo 64) bytes at the end.
333 * Note how we borrow the %f0 loaded above.
335 * Also notice how this code is careful not to perform a
336 * load past the end of the src buffer.
347 EX_LD_FP(LOAD(ldd, %o1 + 0x00, %f0), U3_retl_o2_plus_g2)
349 1: EX_LD_FP(LOAD(ldd, %o1 + 0x08, %f2), U3_retl_o2_plus_g2)
352 faligndata %f0, %f2, %f8
353 EX_ST_FP(STORE(std, %f8, %o0), U3_retl_o2_plus_g2_plus_8)
356 EX_LD_FP(LOAD(ldd, %o1 + 0x08, %f0), U3_retl_o2_plus_g2)
359 faligndata %f2, %f0, %f8
360 EX_ST_FP(STORE(std, %f8, %o0), U3_retl_o2_plus_g2_plus_8)
364 /* If anything is left, we copy it one byte at a time.
365 * Note that %g1 is (src & 0x3) saved above before the
366 * alignaddr was performed.
372 be,pn %XCC, end_return
380 EX_LD(LOAD(ldx, %o1, %o5), U3_retl_o2)
381 EX_ST(STORE(stx, %o5, %o1 + %o3), U3_retl_o2)
385 1: andcc %o2, 0x4, %g0
388 EX_LD(LOAD(lduw, %o1, %o5), U3_retl_o2)
389 EX_ST(STORE(stw, %o5, %o1 + %o3), U3_retl_o2)
393 1: andcc %o2, 0x2, %g0
396 EX_LD(LOAD(lduh, %o1, %o5), U3_retl_o2)
397 EX_ST(STORE(sth, %o5, %o1 + %o3), U3_retl_o2)
401 1: andcc %o2, 0x1, %g0
402 be,pt %icc, end_return
404 EX_LD(LOAD(ldub, %o1, %o5), U3_retl_o2)
405 ba,pt %xcc, end_return
406 EX_ST(STORE(stb, %o5, %o1 + %o3), U3_retl_o2)
409 /* 16 <= len < 192 */
415 andn %o2, 0xf, GLOBAL_SPARE
417 1: subcc GLOBAL_SPARE, 0x10, GLOBAL_SPARE
418 EX_LD(LOAD(ldx, %o1 + 0x00, %o5), U3_retl_o2_plus_GS_plus_0x10)
419 EX_LD(LOAD(ldx, %o1 + 0x08, %g1), U3_retl_o2_plus_GS_plus_0x10)
420 EX_ST(STORE(stx, %o5, %o1 + %o3), U3_retl_o2_plus_GS_plus_0x10)
422 EX_ST(STORE(stx, %g1, %o1 + %o3), U3_retl_o2_plus_GS_plus_0x08)
425 73: andcc %o2, 0x8, %g0
429 EX_LD(LOAD(ldx, %o1, %o5), U3_retl_o2_plus_8)
430 EX_ST(STORE(stx, %o5, %o1 + %o3), U3_retl_o2_plus_8)
432 1: andcc %o2, 0x4, %g0
436 EX_LD(LOAD(lduw, %o1, %o5), U3_retl_o2_plus_4)
437 EX_ST(STORE(stw, %o5, %o1 + %o3), U3_retl_o2_plus_4)
440 be,pt %XCC, end_return
453 EX_LD(LOAD(ldub, %o1, %o5), U3_retl_o2_plus_g1_plus_1)
454 EX_ST(STORE(stb, %o5, %o1 + %o3), U3_retl_o2_plus_g1_plus_1)
470 EX_LD(LOAD(ldx, %o1, %g2), U3_retl_o2)
472 andn %o2, 0x7, GLOBAL_SPARE
474 1: EX_LD(LOAD(ldx, %o1 + 0x8, %g3), U3_retl_o2_and_7_plus_GS)
475 subcc GLOBAL_SPARE, 0x8, GLOBAL_SPARE
479 EX_ST(STORE(stx, %o5, %o0), U3_retl_o2_and_7_plus_GS_plus_8)
486 be,pn %icc, end_return
500 EX_LD(LOAD(lduw, %o1, %g1), U3_retl_o2_plus_4)
501 EX_ST(STORE(stw, %g1, %o1 + %o3), U3_retl_o2_plus_4)
507 mov EX_RETVAL(%o4), %o0
512 EX_LD(LOAD(ldub, %o1, %g1), U3_retl_o2_plus_1)
513 EX_ST(STORE(stb, %g1, %o1 + %o3), U3_retl_o2_plus_1)
517 mov EX_RETVAL(%o4), %o0
519 .size FUNC_NAME, .-FUNC_NAME