aboutsummaryrefslogtreecommitdiff
path: root/sysdeps/sparc
diff options
context:
space:
mode:
authorUlrich Drepper <drepper@redhat.com>2004-07-31 17:14:51 +0000
committerUlrich Drepper <drepper@redhat.com>2004-07-31 17:14:51 +0000
commit3ee3a002229b7f6607c3945deb5ad9de66abf82f (patch)
treedcfd79a40634b937e83e22e2a30af5815b9ff599 /sysdeps/sparc
parent80574c92d70da99607ba25f734b30852c64d4208 (diff)
downloadglibc-3ee3a002229b7f6607c3945deb5ad9de66abf82f.zip
glibc-3ee3a002229b7f6607c3945deb5ad9de66abf82f.tar.gz
glibc-3ee3a002229b7f6607c3945deb5ad9de66abf82f.tar.bz2
(memcpy): Optimize better for smaller than 256 byte copies. Also, use only one unrolled loop instead of two for the large copy case.
Diffstat (limited to 'sysdeps/sparc')
-rw-r--r--sysdeps/sparc/sparc64/sparcv9b/memcpy.S567
1 files changed, 231 insertions, 336 deletions
diff --git a/sysdeps/sparc/sparc64/sparcv9b/memcpy.S b/sysdeps/sparc/sparc64/sparcv9b/memcpy.S
index ccbc3fd..8b70b0a 100644
--- a/sysdeps/sparc/sparc64/sparcv9b/memcpy.S
+++ b/sysdeps/sparc/sparc64/sparcv9b/memcpy.S
@@ -20,32 +20,36 @@
02111-1307 USA. */
#include <sysdep.h>
+
#define ASI_BLK_P 0xf0
#define FPRS_FEF 0x04
#define VISEntryHalf rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs
#define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
-#define SMALL_COPY_USES_FPU
#ifndef XCC
#define USE_BPR
#define XCC xcc
#endif
+ .register %g2,#scratch
+ .register %g3,#scratch
+ .register %g6,#scratch
+
.text
.align 32
ENTRY(bcopy)
- sub %o1, %o0, %o4 /* IEU0 Group */
- mov %o0, %g3 /* IEU1 */
- cmp %o4, %o2 /* IEU1 Group */
- mov %o1, %o0 /* IEU0 */
- bgeu,pt %XCC, 100f /* CTI */
- mov %g3, %o1 /* IEU0 Group */
+ sub %o1, %o0, %o4
+ mov %o0, %g4
+ cmp %o4, %o2
+ mov %o1, %o0
+ bgeu,pt %XCC, 100f
+ mov %g4, %o1
#ifndef USE_BPR
- srl %o2, 0, %o2 /* IEU1 */
+ srl %o2, 0, %o2
#endif
- brnz,pn %o2, 220f /* CTI Group */
- add %o0, %o2, %o0 /* IEU0 */
+ brnz,pn %o2, 220f
+ add %o0, %o2, %o0
retl
nop
END(bcopy)
@@ -68,373 +72,264 @@ END(bcopy)
ENTRY(memcpy)
100: /* %o0=dst, %o1=src, %o2=len */
-#ifndef __KERNEL__
- /* Save away original 'dst' for memcpy return value. */
- mov %o0, %g3 ! A0 Group
-#endif
- /* Anything to copy at all? */
- cmp %o2, 0 ! A1
- ble,pn %XCC, 102f ! BR
-
- /* Extremely small copy? */
-218: cmp %o2, 31 ! A0 Group
- ble,pn %XCC, 101f ! BR
-
- /* Large enough to use unrolled prefetch loops? */
- cmp %o2, 0x100 ! A1
- bge,a,pt %XCC, 103f ! BR Group
- andcc %o0, 0x3f, %g2 ! A0
+ mov %o0, %g5
+ cmp %o2, 0
+ be,pn %XCC, out
+218: or %o0, %o1, %o3
+ cmp %o2, 16
+ bleu,a,pn %XCC, small_copy
+ or %o3, %o2, %o3
- ba,pt %XCC, 108f ! BR Group
- andcc %o0, 0x7, %g2 ! A0
+ cmp %o2, 256
+ blu,pt %XCC, medium_copy
+ andcc %o3, 0x7, %g0
- .align 32
-101:
- /* Copy %o2 bytes from src to dst, one byte at a time. */
- ldub [%o1 + 0x00], %o3 ! MS Group
- add %o1, 0x1, %o1 ! A0
- add %o0, 0x1, %o0 ! A1
- subcc %o2, 1, %o2 ! A0 Group
-
- bg,pt %XCC, 101b ! BR
- stb %o3, [%o0 + -1] ! MS Group (1-cycle stall)
-
-102:
-#ifdef __KERNEL__
- retl ! BR Group (0-4 cycle stall)
- clr %o0 ! A0
-#else
- retl ! BR Group (0-4 cycle stall)
- mov %g3, %o0 ! A0
-#endif
+ ba,pt %xcc, enter
+ andcc %o0, 0x3f, %g2
- /* Here len >= (6 * 64) and condition codes reflect execution
+ /* Here len >= 256 and condition codes reflect execution
* of "andcc %o0, 0x7, %g2", done by caller.
*/
.align 64
-103:
+enter:
/* Is 'dst' already aligned on an 64-byte boundary? */
- be,pt %XCC, 2f ! BR
+ be,pt %XCC, 2f
/* Compute abs((dst & 0x3f) - 0x40) into %g2. This is the number
* of bytes to copy to make 'dst' 64-byte aligned. We pre-
* subtract this from 'len'.
*/
- sub %g2, 0x40, %g2 ! A0 Group
- sub %g0, %g2, %g2 ! A0 Group
- sub %o2, %g2, %o2 ! A0 Group
+ sub %g2, 0x40, %g2
+ sub %g0, %g2, %g2
+ sub %o2, %g2, %o2
/* Copy %g2 bytes from src to dst, one byte at a time. */
-1: ldub [%o1 + 0x00], %o3 ! MS (Group)
- add %o1, 0x1, %o1 ! A1
- add %o0, 0x1, %o0 ! A0 Group
- subcc %g2, 0x1, %g2 ! A1
+1: ldub [%o1 + 0x00], %o3
+ add %o1, 0x1, %o1
+ add %o0, 0x1, %o0
+ subcc %g2, 0x1, %g2
- bg,pt %XCC, 1b ! BR Group
- stb %o3, [%o0 + -1] ! MS Group
+ bg,pt %XCC, 1b
+ stb %o3, [%o0 + -1]
-2: VISEntryHalf ! MS+MS
- and %o1, 0x7, %g1 ! A1
- ba,pt %XCC, 104f ! BR
- alignaddr %o1, %g0, %o1 ! MS (Break-after)
+2: VISEntryHalf
+ and %o1, 0x7, %g1
+ ba,pt %xcc, begin
+ alignaddr %o1, %g0, %o1
.align 64
-104:
- prefetch [%o1 + 0x000], #one_read ! MS Group1
- prefetch [%o1 + 0x040], #one_read ! MS Group2
- andn %o2, (0x40 - 1), %o4 ! A0
- prefetch [%o1 + 0x080], #one_read ! MS Group3
- cmp %o4, 0x140 ! A0
- prefetch [%o1 + 0x0c0], #one_read ! MS Group4
- ldd [%o1 + 0x000], %f0 ! MS Group5 (%f0 results at G8)
- bge,a,pt %XCC, 1f ! BR
-
- prefetch [%o1 + 0x100], #one_read ! MS Group6
-1: ldd [%o1 + 0x008], %f2 ! AX (%f2 results at G9)
- cmp %o4, 0x180 ! A1
- bge,a,pt %XCC, 1f ! BR
- prefetch [%o1 + 0x140], #one_read ! MS Group7
-1: ldd [%o1 + 0x010], %f4 ! AX (%f4 results at G10)
- cmp %o4, 0x1c0 ! A1
- bge,a,pt %XCC, 1f ! BR
-
- prefetch [%o1 + 0x180], #one_read ! MS Group8
-1: faligndata %f0, %f2, %f16 ! FGA Group9 (%f16 at G12)
- ldd [%o1 + 0x018], %f6 ! AX (%f6 results at G12)
- faligndata %f2, %f4, %f18 ! FGA Group10 (%f18 results at G13)
- ldd [%o1 + 0x020], %f8 ! MS (%f8 results at G13)
- faligndata %f4, %f6, %f20 ! FGA Group12 (1-cycle stall,%f20 at G15)
- ldd [%o1 + 0x028], %f10 ! MS (%f10 results at G15)
- faligndata %f6, %f8, %f22 ! FGA Group13 (%f22 results at G16)
-
- ldd [%o1 + 0x030], %f12 ! MS (%f12 results at G16)
- faligndata %f8, %f10, %f24 ! FGA Group15 (1-cycle stall,%f24 at G18)
- ldd [%o1 + 0x038], %f14 ! MS (%f14 results at G18)
- faligndata %f10, %f12, %f26 ! FGA Group16 (%f26 results at G19)
- ldd [%o1 + 0x040], %f0 ! MS (%f0 results at G19)
-
- /* We only use the first loop if len > (7 * 64). */
- subcc %o4, 0x1c0, %o4 ! A0 Group17
- bg,pt %XCC, 105f ! BR
- add %o1, 0x40, %o1 ! A1
-
- add %o4, 0x140, %o4 ! A0 Group18
- ba,pt %XCC, 106f ! BR
- srl %o4, 6, %o3 ! A0 Group19
- nop
- nop
- nop
- nop
- nop
-
- nop
- nop
-
- /* This loop performs the copy and queues new prefetches.
- * We drop into the second loop when len <= (5 * 64). Note
- * that this (5 * 64) factor has been subtracted from len
- * already.
- */
-105:
- ldd [%o1 + 0x008], %f2 ! MS Group2 (%f2 results at G5)
- faligndata %f12, %f14, %f28 ! FGA (%f28 results at G5)
- ldd [%o1 + 0x010], %f4 ! MS Group3 (%f4 results at G6)
- faligndata %f14, %f0, %f30 ! FGA Group4 (1-cycle stall, %f30 at G7)
- stda %f16, [%o0] ASI_BLK_P ! MS
- ldd [%o1 + 0x018], %f6 ! AX (%f6 results at G7)
-
- faligndata %f0, %f2, %f16 ! FGA Group12 (7-cycle stall)
- ldd [%o1 + 0x020], %f8 ! MS (%f8 results at G15)
- faligndata %f2, %f4, %f18 ! FGA Group13 (%f18 results at G16)
- ldd [%o1 + 0x028], %f10 ! MS (%f10 results at G16)
- faligndata %f4, %f6, %f20 ! FGA Group14 (%f20 results at G17)
- ldd [%o1 + 0x030], %f12 ! MS (%f12 results at G17)
- faligndata %f6, %f8, %f22 ! FGA Group15 (%f22 results at G18)
- ldd [%o1 + 0x038], %f14 ! MS (%f14 results at G18)
-
- faligndata %f8, %f10, %f24 ! FGA Group16 (%f24 results at G19)
- ldd [%o1 + 0x040], %f0 ! AX (%f0 results at G19)
- prefetch [%o1 + 0x180], #one_read ! MS
- faligndata %f10, %f12, %f26 ! FGA Group17 (%f26 results at G20)
- subcc %o4, 0x40, %o4 ! A0
- add %o1, 0x40, %o1 ! A1
- bg,pt %XCC, 105b ! BR
- add %o0, 0x40, %o0 ! A0 Group18
-
- mov 5, %o3 ! A1
-
- /* This loop performs on the copy, no new prefetches are
- * queued. We do things this way so that we do not perform
- * any spurious prefetches past the end of the src buffer.
- */
-106:
- ldd [%o1 + 0x008], %f2 ! MS
- faligndata %f12, %f14, %f28 ! FGA Group2
- ldd [%o1 + 0x010], %f4 ! MS
- faligndata %f14, %f0, %f30 ! FGA Group4 (1-cycle stall)
- stda %f16, [%o0] ASI_BLK_P ! MS
- ldd [%o1 + 0x018], %f6 ! AX
- faligndata %f0, %f2, %f16 ! FGA Group12 (7-cycle stall)
-
- ldd [%o1 + 0x020], %f8 ! MS
- faligndata %f2, %f4, %f18 ! FGA Group13
- ldd [%o1 + 0x028], %f10 ! MS
- faligndata %f4, %f6, %f20 ! FGA Group14
- ldd [%o1 + 0x030], %f12 ! MS
- faligndata %f6, %f8, %f22 ! FGA Group15
- ldd [%o1 + 0x038], %f14 ! MS
- faligndata %f8, %f10, %f24 ! FGA Group16
-
- ldd [%o1 + 0x040], %f0 ! AX
- faligndata %f10, %f12, %f26 ! FGA Group17
- subcc %o3, 0x01, %o3 ! A0
- add %o1, 0x40, %o1 ! A1
- bg,pt %XCC, 106b ! BR
- add %o0, 0x40, %o0 ! A0 Group18
+begin:
+ prefetch [%o1 + 0x000], #one_read
+ prefetch [%o1 + 0x040], #one_read
+ andn %o2, (0x40 - 1), %o4
+ prefetch [%o1 + 0x080], #one_read
+ prefetch [%o1 + 0x0c0], #one_read
+ ldd [%o1 + 0x000], %f0
+ prefetch [%o1 + 0x100], #one_read
+ ldd [%o1 + 0x008], %f2
+ prefetch [%o1 + 0x140], #one_read
+ ldd [%o1 + 0x010], %f4
+ prefetch [%o1 + 0x180], #one_read
+ faligndata %f0, %f2, %f16
+ ldd [%o1 + 0x018], %f6
+ faligndata %f2, %f4, %f18
+ ldd [%o1 + 0x020], %f8
+ faligndata %f4, %f6, %f20
+ ldd [%o1 + 0x028], %f10
+ faligndata %f6, %f8, %f22
+
+ ldd [%o1 + 0x030], %f12
+ faligndata %f8, %f10, %f24
+ ldd [%o1 + 0x038], %f14
+ faligndata %f10, %f12, %f26
+ ldd [%o1 + 0x040], %f0
+
+ sub %o4, 0x80, %o4
+ add %o1, 0x40, %o1
+ ba,pt %xcc, loop
+ srl %o4, 6, %o3
+
+ .align 64
+loop:
+ ldd [%o1 + 0x008], %f2
+ faligndata %f12, %f14, %f28
+ ldd [%o1 + 0x010], %f4
+ faligndata %f14, %f0, %f30
+ stda %f16, [%o0] ASI_BLK_P
+ ldd [%o1 + 0x018], %f6
+ faligndata %f0, %f2, %f16
+
+ ldd [%o1 + 0x020], %f8
+ faligndata %f2, %f4, %f18
+ ldd [%o1 + 0x028], %f10
+ faligndata %f4, %f6, %f20
+ ldd [%o1 + 0x030], %f12
+ faligndata %f6, %f8, %f22
+ ldd [%o1 + 0x038], %f14
+ faligndata %f8, %f10, %f24
+
+ ldd [%o1 + 0x040], %f0
+ prefetch [%o1 + 0x180], #one_read
+ faligndata %f10, %f12, %f26
+ subcc %o3, 0x01, %o3
+ add %o1, 0x40, %o1
+ bg,pt %XCC, loop
+ add %o0, 0x40, %o0
/* Finally we copy the last full 64-byte block. */
- ldd [%o1 + 0x008], %f2 ! MS
- faligndata %f12, %f14, %f28 ! FGA
- ldd [%o1 + 0x010], %f4 ! MS Group19
- faligndata %f14, %f0, %f30 ! FGA
- stda %f16, [%o0] ASI_BLK_P ! MS Group20
- ldd [%o1 + 0x018], %f6 ! AX
- faligndata %f0, %f2, %f16 ! FGA Group11 (7-cycle stall)
- ldd [%o1 + 0x020], %f8 ! MS
- faligndata %f2, %f4, %f18 ! FGA Group12
- ldd [%o1 + 0x028], %f10 ! MS
- faligndata %f4, %f6, %f20 ! FGA Group13
- ldd [%o1 + 0x030], %f12 ! MS
- faligndata %f6, %f8, %f22 ! FGA Group14
- ldd [%o1 + 0x038], %f14 ! MS
- faligndata %f8, %f10, %f24 ! FGA Group15
- cmp %g1, 0 ! A0
- be,pt %XCC, 1f ! BR
- add %o0, 0x40, %o0 ! A1
- ldd [%o1 + 0x040], %f0 ! MS
-1: faligndata %f10, %f12, %f26 ! FGA Group16
- faligndata %f12, %f14, %f28 ! FGA Group17
- faligndata %f14, %f0, %f30 ! FGA Group18
- stda %f16, [%o0] ASI_BLK_P ! MS
- add %o0, 0x40, %o0 ! A0
- add %o1, 0x40, %o1 ! A1
- membar #Sync ! MS Group26 (7-cycle stall)
+loopfini:
+ ldd [%o1 + 0x008], %f2
+ faligndata %f12, %f14, %f28
+ ldd [%o1 + 0x010], %f4
+ faligndata %f14, %f0, %f30
+ stda %f16, [%o0] ASI_BLK_P
+ ldd [%o1 + 0x018], %f6
+ faligndata %f0, %f2, %f16
+ ldd [%o1 + 0x020], %f8
+ faligndata %f2, %f4, %f18
+ ldd [%o1 + 0x028], %f10
+ faligndata %f4, %f6, %f20
+ ldd [%o1 + 0x030], %f12
+ faligndata %f6, %f8, %f22
+ ldd [%o1 + 0x038], %f14
+ faligndata %f8, %f10, %f24
+ cmp %g1, 0
+ be,pt %XCC, 1f
+ add %o0, 0x40, %o0
+ ldd [%o1 + 0x040], %f0
+1: faligndata %f10, %f12, %f26
+ faligndata %f12, %f14, %f28
+ faligndata %f14, %f0, %f30
+ stda %f16, [%o0] ASI_BLK_P
+ add %o0, 0x40, %o0
+ add %o1, 0x40, %o1
+ membar #Sync
/* Now we copy the (len modulo 64) bytes at the end.
* Note how we borrow the %f0 loaded above.
*
* Also notice how this code is careful not to perform a
- * load past the end of the src buffer just like similar
- * code found in 'toosmall' processing.
+ * load past the end of the src buffer.
*/
- and %o2, 0x3f, %o2 ! A0 Group
- andcc %o2, 0x38, %g2 ! A0 Group
- be,pn %XCC, 107f ! BR
- subcc %g2, 0x8, %g2 ! A1
- be,pn %XCC, 107f ! BR Group
- cmp %g1, 0 ! A0
-
- be,a,pt %XCC, 1f ! BR Group
- ldd [%o1 + 0x00], %f0 ! MS
-
-1: ldd [%o1 + 0x08], %f2 ! MS Group
- add %o1, 0x8, %o1 ! A0
- sub %o2, 0x8, %o2 ! A1
- subcc %g2, 0x8, %g2 ! A0 Group
- faligndata %f0, %f2, %f8 ! FGA Group
- std %f8, [%o0 + 0x00] ! MS (XXX does it stall here? XXX)
- be,pn %XCC, 107f ! BR
- add %o0, 0x8, %o0 ! A0
- ldd [%o1 + 0x08], %f0 ! MS Group
- add %o1, 0x8, %o1 ! A0
- sub %o2, 0x8, %o2 ! A1
- subcc %g2, 0x8, %g2 ! A0 Group
- faligndata %f2, %f0, %f8 ! FGA
- std %f8, [%o0 + 0x00] ! MS (XXX does it stall here? XXX)
- bne,pn %XCC, 1b ! BR
- add %o0, 0x8, %o0 ! A0 Group
+loopend:
+ and %o2, 0x3f, %o2
+ andcc %o2, 0x38, %g2
+ be,pn %XCC, endcruft
+ subcc %g2, 0x8, %g2
+ be,pn %XCC, endcruft
+ cmp %g1, 0
+
+ be,a,pt %XCC, 1f
+ ldd [%o1 + 0x00], %f0
+
+1: ldd [%o1 + 0x08], %f2
+ add %o1, 0x8, %o1
+ sub %o2, 0x8, %o2
+ subcc %g2, 0x8, %g2
+ faligndata %f0, %f2, %f8
+ std %f8, [%o0 + 0x00]
+ be,pn %XCC, endcruft
+ add %o0, 0x8, %o0
+ ldd [%o1 + 0x08], %f0
+ add %o1, 0x8, %o1
+ sub %o2, 0x8, %o2
+ subcc %g2, 0x8, %g2
+ faligndata %f2, %f0, %f8
+ std %f8, [%o0 + 0x00]
+ bne,pn %XCC, 1b
+ add %o0, 0x8, %o0
/* If anything is left, we copy it one byte at a time.
* Note that %g1 is (src & 0x3) saved above before the
* alignaddr was performed.
*/
-107:
+endcruft:
cmp %o2, 0
add %o1, %g1, %o1
VISExitHalf
- be,pn %XCC, 102b
- nop
- ba,a,pt %XCC, 101b
-
- /* If we get here, then 32 <= len < (6 * 64) */
-108:
+ be,pn %XCC, out
+ sub %o0, %o1, %o3
-#ifdef SMALL_COPY_USES_FPU
-
- /* Is 'dst' already aligned on an 8-byte boundary? */
- be,pt %XCC, 2f ! BR Group
-
- /* Compute abs((dst & 7) - 8) into %g2. This is the number
- * of bytes to copy to make 'dst' 8-byte aligned. We pre-
- * subtract this from 'len'.
- */
- sub %g2, 0x8, %g2 ! A0
- sub %g0, %g2, %g2 ! A0 Group (reg-dep)
- sub %o2, %g2, %o2 ! A0 Group (reg-dep)
-
- /* Copy %g2 bytes from src to dst, one byte at a time. */
-1: ldub [%o1 + 0x00], %o3 ! MS (Group) (%o3 in 3 cycles)
- add %o1, 0x1, %o1 ! A1
- add %o0, 0x1, %o0 ! A0 Group
- subcc %g2, 0x1, %g2 ! A1
-
- bg,pt %XCC, 1b ! BR Group
- stb %o3, [%o0 + -1] ! MS Group
+ andcc %g1, 0x7, %g0
+ bne,pn %icc, small_copy_unaligned
+ andcc %o2, 0x8, %g0
+ be,pt %icc, 1f
+ nop
+ ldx [%o1], %o5
+ stx %o5, [%o1 + %o3]
+ add %o1, 0x8, %o1
-2: VISEntryHalf ! MS+MS
+1: andcc %o2, 0x4, %g0
+ be,pt %icc, 1f
+ nop
+ lduw [%o1], %o5
+ stw %o5, [%o1 + %o3]
+ add %o1, 0x4, %o1
- /* Compute (len - (len % 8)) into %g2. This is guarenteed
- * to be nonzero.
- */
- andn %o2, 0x7, %g2 ! A0 Group
-
- /* You may read this and believe that it allows reading
- * one 8-byte longword past the end of src. It actually
- * does not, as %g2 is subtracted as loads are done from
- * src, so we always stop before running off the end.
- * Also, we are guarenteed to have at least 0x10 bytes
- * to move here.
- */
- sub %g2, 0x8, %g2 ! A0 Group (reg-dep)
- alignaddr %o1, %g0, %g1 ! MS (Break-after)
- ldd [%g1 + 0x00], %f0 ! MS Group (1-cycle stall)
- add %g1, 0x8, %g1 ! A0
-
-1: ldd [%g1 + 0x00], %f2 ! MS Group
- add %g1, 0x8, %g1 ! A0
- sub %o2, 0x8, %o2 ! A1
- subcc %g2, 0x8, %g2 ! A0 Group
-
- faligndata %f0, %f2, %f8 ! FGA Group (1-cycle stall)
- std %f8, [%o0 + 0x00] ! MS Group (2-cycle stall)
- add %o1, 0x8, %o1 ! A0
- be,pn %XCC, 2f ! BR
-
- add %o0, 0x8, %o0 ! A1
- ldd [%g1 + 0x00], %f0 ! MS Group
- add %g1, 0x8, %g1 ! A0
- sub %o2, 0x8, %o2 ! A1
-
- subcc %g2, 0x8, %g2 ! A0 Group
- faligndata %f2, %f0, %f8 ! FGA Group (1-cycle stall)
- std %f8, [%o0 + 0x00] ! MS Group (2-cycle stall)
- add %o1, 0x8, %o1 ! A0
-
- bne,pn %XCC, 1b ! BR
- add %o0, 0x8, %o0 ! A1
-
- /* Nothing left to copy? */
-2: cmp %o2, 0 ! A0 Group
- VISExitHalf ! A0+MS
- be,pn %XCC, 102b ! BR Group
- nop ! A0
- ba,a,pt %XCC, 101b ! BR Group
-
-#else /* !(SMALL_COPY_USES_FPU) */
-
- xor %o1, %o0, %g2
- andcc %g2, 0x7, %g0
- bne,pn %XCC, 101b
- andcc %o1, 0x7, %g2
+1: andcc %o2, 0x2, %g0
+ be,pt %icc, 1f
+ nop
+ lduh [%o1], %o5
+ sth %o5, [%o1 + %o3]
+ add %o1, 0x2, %o1
- be,pt %XCC, 2f
- sub %g2, 0x8, %g2
- sub %g0, %g2, %g2
- sub %o2, %g2, %o2
+1: andcc %o2, 0x1, %g0
+ be,pt %icc, out
+ nop
+ ldub [%o1], %o5
+ ba,pt %xcc, out
+ stb %o5, [%o1 + %o3]
+
+medium_copy: /* 16 < len <= 64 */
+ bne,pn %XCC, small_copy_unaligned
+ sub %o0, %o1, %o3
+
+medium_copy_aligned:
+ andn %o2, 0x7, %o4
+ and %o2, 0x7, %o2
+1: subcc %o4, 0x8, %o4
+ ldx [%o1], %o5
+ stx %o5, [%o1 + %o3]
+ bgu,pt %XCC, 1b
+ add %o1, 0x8, %o1
+ andcc %o2, 0x4, %g0
+ be,pt %XCC, 1f
+ nop
+ sub %o2, 0x4, %o2
+ lduw [%o1], %o5
+ stw %o5, [%o1 + %o3]
+ add %o1, 0x4, %o1
+1: cmp %o2, 0
+ be,pt %XCC, out
+ nop
+ ba,pt %xcc, small_copy_unaligned
+ nop
-1: ldub [%o1 + 0x00], %o3
- add %o1, 0x1, %o1
- add %o0, 0x1, %o0
- subcc %g2, 0x1, %g2
- bg,pt %XCC, 1b
- stb %o3, [%o0 + -1]
+small_copy: /* 0 < len <= 16 */
+ andcc %o3, 0x3, %g0
+ bne,pn %XCC, small_copy_unaligned
+ sub %o0, %o1, %o3
-2: andn %o2, 0x7, %g2
- sub %o2, %g2, %o2
+small_copy_aligned:
+ subcc %o2, 4, %o2
+ lduw [%o1], %g1
+ stw %g1, [%o1 + %o3]
+ bgu,pt %XCC, small_copy_aligned
+ add %o1, 4, %o1
-3: ldx [%o1 + 0x00], %o3
- add %o1, 0x8, %o1
- add %o0, 0x8, %o0
- subcc %g2, 0x8, %g2
- bg,pt %XCC, 3b
- stx %o3, [%o0 + -8]
+out: retl
+ mov %g5, %o0
- cmp %o2, 0
- bne,pn %XCC, 101b
- nop
- ba,a,pt %XCC, 102b
+ .align 32
+small_copy_unaligned:
+ subcc %o2, 1, %o2
+ ldub [%o1], %g1
+ stb %g1, [%o1 + %o3]
+ bgu,pt %XCC, small_copy_unaligned
+ add %o1, 1, %o1
+ retl
+ mov %g5, %o0
-#endif /* !(SMALL_COPY_USES_FPU) */
END(memcpy)
#define RMOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3) \
@@ -511,11 +406,11 @@ END(memcpy)
.align 32
ENTRY(memmove)
- mov %o0, %g3
+ mov %o0, %g5
#ifndef USE_BPR
srl %o2, 0, %o2 /* IEU1 Group */
#endif
- brz,pn %o2, 102b /* CTI Group */
+ brz,pn %o2, out /* CTI Group */
sub %o0, %o1, %o4 /* IEU0 */
cmp %o4, %o2 /* IEU1 Group */
bgeu,pt %XCC, 218b /* CTI */