1 /* Optimized memcpy implementation for PowerPC A2.
2 Copyright (C) 2010-2013 Free Software Foundation, Inc.
3 Contributed by Michael Brutman <brutman@us.ibm.com>.
4 This file is part of the GNU C Library.
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public
8 License as published by the Free Software Foundation; either
9 version 2.1 of the License, or (at your option) any later version.
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; if not, see
18 <http://www.gnu.org/licenses/>. */
24 #define PREFETCH_AHEAD 4 /* no cache lines SRC prefetching ahead */
25 #define ZERO_AHEAD 2 /* no cache lines DST zeroing ahead */
28 EALIGN (BP_SYM (memcpy), 5, 0)
31 dcbt 0,r4 /* Prefetch ONE SRC cacheline */
32 cmplwi cr1,r5,16 /* is size < 16 ? */
33 mr r6,r3 /* Copy dest reg to r6; */
37 /* Big copy (16 bytes or more)
39 Figure out how far to the nearest quadword boundary, or if we are
42 r3 - return value (always)
43 r4 - current source addr
45 r6 - current dest addr
48 neg r8,r3 /* LS 4 bits = # bytes to 8-byte dest bdry */
49 clrlwi r8,r8,32-4 /* align to 16byte boundary */
50 sub r7,r4,r3 /* compute offset to src from dest */
51 cmplwi cr0,r8,0 /* Were we aligned on a 16 byte bdy? */
56 /* Destination is not aligned on quadword boundary. Get us to one.
58 r3 - return value (always)
59 r4 - current source addr
61 r6 - current dest addr
62 r7 - offset to src from dest
63 r8 - number of bytes to quadword boundary
66 mtcrf 0x01,r8 /* put #bytes to boundary into cr7 */
67 subf r5,r8,r5 /* adjust remaining len */
70 lbzx r0,r7,r6 /* copy 1 byte addr */
75 lhzx r0,r7,r6 /* copy 2 byte addr */
80 lwzx r0,r7,r6 /* copy 4 byte addr */
85 lfdx r0,r7,r6 /* copy 8 byte addr */
89 add r4,r7,r6 /* update src addr */
93 /* Dest is quadword aligned now.
95 Lots of decisions to make. If we are copying less than a cache
96 line we won't be here long. If we are not on a cache line
97 boundary we need to get there. And then we need to figure out
98 how many cache lines ahead to pre-touch.
100 r3 - return value (always)
101 r4 - current source addr
103 r6 - current dest addr
113 /* Establishes GOT addressability so we can load __cache_line_size
114 from static. This value was set from the aux vector during startup. */
115 SETUP_GOT_ACCESS(r9,got_label)
116 addis r9,r9,__cache_line_size-got_label@ha
117 lwz r9,__cache_line_size-got_label@l(r9)
120 /* Load __cache_line_size from static. This value was set from the
121 aux vector during startup. */
122 lis r9,__cache_line_size@ha
123 lwz r9,__cache_line_size@l(r9)
127 bne+ cr5,L(cachelineset)
129 /* __cache_line_size not set: generic byte copy without much optimization */
130 andi. r0,r5,1 /* If length is odd copy one byte. */
131 beq L(cachelinenotset_align)
132 lbz r7,0(r4) /* Read one byte from source. */
133 addi r5,r5,-1 /* Update length. */
134 addi r4,r4,1 /* Update source pointer address. */
135 stb r7,0(r6) /* Store one byte on dest. */
136 addi r6,r6,1 /* Update dest pointer address. */
137 L(cachelinenotset_align):
138 cmpwi cr7,r5,0 /* If length is 0 return. */
140 ori r2,r2,0 /* Force a new dispatch group. */
141 L(cachelinenotset_loop):
142 addic. r5,r5,-2 /* Update length. */
143 lbz r7,0(r4) /* Load 2 bytes from source. */
145 addi r4,r4,2 /* Update source pointer address. */
146 stb r7,0(r6) /* Store 2 bytes on dest. */
148 addi r6,r6,2 /* Update dest pointer address. */
149 bne L(cachelinenotset_loop)
157 cmpw cr5,r5,r10 /* Less than a cacheline to go? */
159 neg r7,r6 /* How far to next cacheline bdy? */
161 addi r6,r6,-8 /* prepare for stdu */
163 addi r4,r4,-8 /* prepare for ldu */
166 ble+ cr5,L(lessthancacheline)
168 beq- cr0,L(big_lines) /* 128 byte line code */
173 /* More than a cacheline left to go, and using 64 byte cachelines */
175 clrlwi r7,r7,32-6 /* How far to next cacheline bdy? */
177 cmplwi cr6,r7,0 /* Are we on a cacheline bdy already? */
179 /* Reduce total len by what it takes to get to the next cache line */
181 srwi r7,r7,4 /* How many qws to get to the line bdy? */
183 /* How many full cache lines to copy after getting to a line bdy? */
186 cmplwi r10,0 /* If no full cache lines to copy ... */
187 li r11,0 /* number cachelines to copy with prefetch */
188 beq L(nocacheprefetch)
191 /* We are here because we have at least one full cache line to copy,
192 and therefore some pre-touching to do. */
194 cmplwi r10,PREFETCH_AHEAD
195 li r12,64+8 /* prefetch distance */
196 ble L(lessthanmaxprefetch)
198 /* We can only do so much pre-fetching. R11 will have the count of
199 lines left to prefetch after the initial batch of prefetches
202 subi r11,r10,PREFETCH_AHEAD
203 li r10,PREFETCH_AHEAD
205 L(lessthanmaxprefetch):
208 /* At this point r10/ctr hold the number of lines to prefetch in this
209 initial batch, and r11 holds any remainder. */
217 /* Prefetching is done, or was not needed.
219 cr6 - are we on a cacheline boundary already?
220 r7 - number of quadwords to the next cacheline boundary
226 cmplwi cr1,r5,64 /* Less than a cache line to copy? */
228 /* How many bytes are left after we copy whatever full
229 cache lines we can get? */
232 beq cr6,L(cachelinealigned)
235 /* Copy quadwords up to the next cacheline boundary */
242 bdnz L(aligntocacheline)
246 L(cachelinealigned): /* copy while cache lines */
248 blt- cr1,L(lessthancacheline) /* size <64 */
255 li r11,64*ZERO_AHEAD +8 /* DCBZ dist */
258 /* Copy whole cachelines, optimized by prefetching SRC cacheline */
259 L(loop): /* Copy aligned body */
260 dcbt r12,r4 /* PREFETCH SOURCE some cache lines ahead */
287 L(loop2): /* Copy aligned body */
310 L(lessthancacheline): /* Was there less than cache to do ? */
312 srwi r7,r5,4 /* divide size by 16 */
321 bdnz L(copy_remaining)
323 L(do_lt16): /* less than 16 ? */
324 cmplwi cr0,r5,0 /* copy remaining bytes (0-15) */
325 beqlr+ /* no rest to copy */
329 L(shortcopy): /* SIMPLE COPY to handle size =< 15 bytes */
333 lfdx fp9,r7,r6 /* copy 8 byte */
338 lwzx r0,r7,r6 /* copy 4 byte */
343 lhzx r0,r7,r6 /* copy 2 byte */
348 lbzx r0,r7,r6 /* copy 1 byte */
357 /* Similar to above, but for use with 128 byte lines. */
362 clrlwi r7,r7,32-7 /* How far to next cacheline bdy? */
364 cmplwi cr6,r7,0 /* Are we on a cacheline bdy already? */
366 /* Reduce total len by what it takes to get to the next cache line */
368 srwi r7,r7,4 /* How many qw to get to the line bdy? */
370 /* How many full cache lines to copy after getting to a line bdy? */
373 cmplwi r10,0 /* If no full cache lines to copy ... */
374 li r11,0 /* number cachelines to copy with prefetch */
375 beq L(nocacheprefetch_128)
378 /* We are here because we have at least one full cache line to copy,
379 and therefore some pre-touching to do. */
381 cmplwi r10,PREFETCH_AHEAD
382 li r12,128+8 /* prefetch distance */
383 ble L(lessthanmaxprefetch_128)
385 /* We can only do so much pre-fetching. R11 will have the count of
386 lines left to prefetch after the initial batch of prefetches
389 subi r11,r10,PREFETCH_AHEAD
390 li r10,PREFETCH_AHEAD
392 L(lessthanmaxprefetch_128):
395 /* At this point r10/ctr hold the number of lines to prefetch in this
396 initial batch, and r11 holds any remainder. */
401 bdnz L(prefetchSRC_128)
404 /* Prefetching is done, or was not needed.
406 cr6 - are we on a cacheline boundary already?
407 r7 - number of quadwords to the next cacheline boundary
410 L(nocacheprefetch_128):
413 cmplwi cr1,r5,128 /* Less than a cache line to copy? */
415 /* How many bytes are left after we copy whatever full
416 cache lines we can get? */
419 beq cr6,L(cachelinealigned_128)
422 /* Copy quadwords up to the next cacheline boundary */
424 L(aligntocacheline_128):
429 bdnz L(aligntocacheline_128)
432 L(cachelinealigned_128): /* copy while cache lines */
434 blt- cr1,L(lessthancacheline) /* size <128 */
441 li r11,128*ZERO_AHEAD +8 /* DCBZ dist */
444 /* Copy whole cachelines, optimized by prefetching SRC cacheline */
445 L(loop_128): /* Copy aligned body */
446 dcbt r12,r4 /* PREFETCH SOURCE some cache lines ahead */
489 L(loop2_128): /* Copy aligned body */
525 b L(lessthancacheline)
528 END (BP_SYM (memcpy))
529 libc_hidden_builtin_def (memcpy)