aboutsummaryrefslogtreecommitdiff
path: root/sysdeps/sparc/sparc64
diff options
context:
space:
mode:
Diffstat (limited to 'sysdeps/sparc/sparc64')
-rw-r--r--sysdeps/sparc/sparc64/hp-timing.h4
-rw-r--r--sysdeps/sparc/sparc64/memcpy.S120
-rw-r--r--sysdeps/sparc/sparc64/sparcv9b/memcpy.S26
-rw-r--r--sysdeps/sparc/sparc64/stpcpy.S40
-rw-r--r--sysdeps/sparc/sparc64/stpncpy.S64
-rw-r--r--sysdeps/sparc/sparc64/strcat.S22
-rw-r--r--sysdeps/sparc/sparc64/strchr.S16
-rw-r--r--sysdeps/sparc/sparc64/strcmp.S38
-rw-r--r--sysdeps/sparc/sparc64/strcpy.S22
-rw-r--r--sysdeps/sparc/sparc64/strncmp.S20
-rw-r--r--sysdeps/sparc/sparc64/strncpy.S16
11 files changed, 194 insertions, 194 deletions
diff --git a/sysdeps/sparc/sparc64/hp-timing.h b/sysdeps/sparc/sparc64/hp-timing.h
index f6cb89b..1784dc1 100644
--- a/sysdeps/sparc/sparc64/hp-timing.h
+++ b/sysdeps/sparc/sparc64/hp-timing.h
@@ -1,5 +1,5 @@
/* High precision, low overhead timing functions. sparc64 version.
- Copyright (C) 2001, 2002 Free Software Foundation, Inc.
+ Copyright (C) 2001, 2002, 2003 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by David S. Miller <davem@redhat.com>, 2001.
@@ -63,7 +63,7 @@ do { \
" nop" \
: "=&r" (tmp1), "=&r" (tmp2) \
: "r" (__diff), "r" (&(Sum)) \
- : "memory", "g1", "g5", "g7"); \
+ : "memory", "g1", "g5", "g6"); \
} while(0)
#define HP_TIMING_ACCUM_NT(Sum, Diff) (Sum) += (Diff)
diff --git a/sysdeps/sparc/sparc64/memcpy.S b/sysdeps/sparc/sparc64/memcpy.S
index ede8dc4..e9cc004 100644
--- a/sysdeps/sparc/sparc64/memcpy.S
+++ b/sysdeps/sparc/sparc64/memcpy.S
@@ -1,6 +1,6 @@
/* Copy SIZE bytes from SRC to DEST.
For UltraSPARC.
- Copyright (C) 1996, 97, 98, 99 Free Software Foundation, Inc.
+ Copyright (C) 1996, 97, 98, 99, 2003 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by David S. Miller (davem@caip.rutgers.edu) and
Jakub Jelinek (jakub@redhat.com).
@@ -26,7 +26,7 @@
#define USE_BPR
.register %g2, #scratch
.register %g3, #scratch
- .register %g7, #scratch
+ .register %g6, #scratch
#define XCC xcc
#endif
#define FPRS_FEF 4
@@ -256,24 +256,24 @@ END(bcopy)
add %o0, 8, %o0 /* IEU0 */
202: membar #LoadStore | #StoreStore | #StoreLoad /* LSU Group */
wr %g0, ASI_BLK_P, %asi /* LSU Group */
- subcc %o2, 0x40, %g7 /* IEU1 Group */
+ subcc %o2, 0x40, %g6 /* IEU1 Group */
mov %o1, %g1 /* IEU0 */
- andncc %g7, (0x40 - 1), %g7 /* IEU1 Group */
+ andncc %g6, (0x40 - 1), %g6 /* IEU1 Group */
srl %g1, 3, %g2 /* IEU0 */
- sub %o2, %g7, %g3 /* IEU0 Group */
+ sub %o2, %g6, %g3 /* IEU0 Group */
andn %o1, (0x40 - 1), %o1 /* IEU1 */
and %g2, 7, %g2 /* IEU0 Group */
andncc %g3, 0x7, %g3 /* IEU1 */
fmovd %f0, %f2 /* FPU */
sub %g3, 0x10, %g3 /* IEU0 Group */
- sub %o2, %g7, %o2 /* IEU1 */
+ sub %o2, %g6, %o2 /* IEU1 */
alignaddr %g1, %g0, %g0 /* GRU Group */
- add %g1, %g7, %g1 /* IEU0 Group */
+ add %g1, %g6, %g1 /* IEU0 Group */
subcc %o2, %g3, %o2 /* IEU1 */
ldda [%o1 + 0x00] %asi, %f0 /* LSU Group */
add %g1, %g3, %g1 /* IEU0 */
ldda [%o1 + 0x40] %asi, %f16 /* LSU Group */
- sub %g7, 0x80, %g7 /* IEU0 */
+ sub %g6, 0x80, %g6 /* IEU0 */
ldda [%o1 + 0x80] %asi, %f32 /* LSU Group */
/* Clk1 Group 8-( */
/* Clk2 Group 8-( */
@@ -286,9 +286,9 @@ END(bcopy)
addcc %o1, 0xc0, %o1 /* IEU1 Group */
.align 512 /* OK, here comes the fun part... */
-300: FREG_FROB(f0, f2, f4, f6, f8, f10,f12,f14,f16) LOOP_CHUNK1(o1, o0, g7, 301f)
- FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32) LOOP_CHUNK2(o1, o0, g7, 302f)
- FREG_FROB(f32,f34,f36,f38,f40,f42,f44,f46,f0) LOOP_CHUNK3(o1, o0, g7, 303f)
+300: FREG_FROB(f0, f2, f4, f6, f8, f10,f12,f14,f16) LOOP_CHUNK1(o1, o0, g6, 301f)
+ FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32) LOOP_CHUNK2(o1, o0, g6, 302f)
+ FREG_FROB(f32,f34,f36,f38,f40,f42,f44,f46,f0) LOOP_CHUNK3(o1, o0, g6, 303f)
b,pt %xcc, 300b+4; faligndata %f0, %f2, %f48
301: FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32) STORE_SYNC(o0, f48) membar #Sync
FREG_FROB(f32,f34,f36,f38,f40,f42,f44,f46,f0) STORE_JUMP(o0, f48, 400f) membar #Sync
@@ -297,9 +297,9 @@ END(bcopy)
303: FREG_FROB(f0, f2, f4, f6, f8, f10,f12,f14,f16) STORE_SYNC(o0, f48) membar #Sync
FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32) STORE_JUMP(o0, f48, 432f) membar #Sync
VISLOOP_PAD
-310: FREG_FROB(f2, f4, f6, f8, f10,f12,f14,f16,f18) LOOP_CHUNK1(o1, o0, g7, 311f)
- FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34) LOOP_CHUNK2(o1, o0, g7, 312f)
- FREG_FROB(f34,f36,f38,f40,f42,f44,f46,f0, f2) LOOP_CHUNK3(o1, o0, g7, 313f)
+310: FREG_FROB(f2, f4, f6, f8, f10,f12,f14,f16,f18) LOOP_CHUNK1(o1, o0, g6, 311f)
+ FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34) LOOP_CHUNK2(o1, o0, g6, 312f)
+ FREG_FROB(f34,f36,f38,f40,f42,f44,f46,f0, f2) LOOP_CHUNK3(o1, o0, g6, 313f)
b,pt %xcc, 310b+4; faligndata %f2, %f4, %f48
311: FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34) STORE_SYNC(o0, f48) membar #Sync
FREG_FROB(f34,f36,f38,f40,f42,f44,f46,f0, f2) STORE_JUMP(o0, f48, 402f) membar #Sync
@@ -308,9 +308,9 @@ END(bcopy)
313: FREG_FROB(f2, f4, f6, f8, f10,f12,f14,f16,f18) STORE_SYNC(o0, f48) membar #Sync
FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34) STORE_JUMP(o0, f48, 434f) membar #Sync
VISLOOP_PAD
-320: FREG_FROB(f4, f6, f8, f10,f12,f14,f16,f18,f20) LOOP_CHUNK1(o1, o0, g7, 321f)
- FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36) LOOP_CHUNK2(o1, o0, g7, 322f)
- FREG_FROB(f36,f38,f40,f42,f44,f46,f0, f2, f4) LOOP_CHUNK3(o1, o0, g7, 323f)
+320: FREG_FROB(f4, f6, f8, f10,f12,f14,f16,f18,f20) LOOP_CHUNK1(o1, o0, g6, 321f)
+ FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36) LOOP_CHUNK2(o1, o0, g6, 322f)
+ FREG_FROB(f36,f38,f40,f42,f44,f46,f0, f2, f4) LOOP_CHUNK3(o1, o0, g6, 323f)
b,pt %xcc, 320b+4; faligndata %f4, %f6, %f48
321: FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36) STORE_SYNC(o0, f48) membar #Sync
FREG_FROB(f36,f38,f40,f42,f44,f46,f0, f2, f4) STORE_JUMP(o0, f48, 404f) membar #Sync
@@ -319,9 +319,9 @@ END(bcopy)
323: FREG_FROB(f4, f6, f8, f10,f12,f14,f16,f18,f20) STORE_SYNC(o0, f48) membar #Sync
FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36) STORE_JUMP(o0, f48, 436f) membar #Sync
VISLOOP_PAD
-330: FREG_FROB(f6, f8, f10,f12,f14,f16,f18,f20,f22) LOOP_CHUNK1(o1, o0, g7, 331f)
- FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38) LOOP_CHUNK2(o1, o0, g7, 332f)
- FREG_FROB(f38,f40,f42,f44,f46,f0, f2, f4, f6) LOOP_CHUNK3(o1, o0, g7, 333f)
+330: FREG_FROB(f6, f8, f10,f12,f14,f16,f18,f20,f22) LOOP_CHUNK1(o1, o0, g6, 331f)
+ FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38) LOOP_CHUNK2(o1, o0, g6, 332f)
+ FREG_FROB(f38,f40,f42,f44,f46,f0, f2, f4, f6) LOOP_CHUNK3(o1, o0, g6, 333f)
b,pt %xcc, 330b+4; faligndata %f6, %f8, %f48
331: FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38) STORE_SYNC(o0, f48) membar #Sync
FREG_FROB(f38,f40,f42,f44,f46,f0, f2, f4, f6) STORE_JUMP(o0, f48, 406f) membar #Sync
@@ -330,9 +330,9 @@ END(bcopy)
333: FREG_FROB(f6, f8, f10,f12,f14,f16,f18,f20,f22) STORE_SYNC(o0, f48) membar #Sync
FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38) STORE_JUMP(o0, f48, 438f) membar #Sync
VISLOOP_PAD
-340: FREG_FROB(f8, f10,f12,f14,f16,f18,f20,f22,f24) LOOP_CHUNK1(o1, o0, g7, 341f)
- FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40) LOOP_CHUNK2(o1, o0, g7, 342f)
- FREG_FROB(f40,f42,f44,f46,f0, f2, f4, f6, f8) LOOP_CHUNK3(o1, o0, g7, 343f)
+340: FREG_FROB(f8, f10,f12,f14,f16,f18,f20,f22,f24) LOOP_CHUNK1(o1, o0, g6, 341f)
+ FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40) LOOP_CHUNK2(o1, o0, g6, 342f)
+ FREG_FROB(f40,f42,f44,f46,f0, f2, f4, f6, f8) LOOP_CHUNK3(o1, o0, g6, 343f)
b,pt %xcc, 340b+4; faligndata %f8, %f10, %f48
341: FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40) STORE_SYNC(o0, f48) membar #Sync
FREG_FROB(f40,f42,f44,f46,f0, f2, f4, f6, f8) STORE_JUMP(o0, f48, 408f) membar #Sync
@@ -341,9 +341,9 @@ END(bcopy)
343: FREG_FROB(f8, f10,f12,f14,f16,f18,f20,f22,f24) STORE_SYNC(o0, f48) membar #Sync
FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40) STORE_JUMP(o0, f48, 440f) membar #Sync
VISLOOP_PAD
-350: FREG_FROB(f10,f12,f14,f16,f18,f20,f22,f24,f26) LOOP_CHUNK1(o1, o0, g7, 351f)
- FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42) LOOP_CHUNK2(o1, o0, g7, 352f)
- FREG_FROB(f42,f44,f46,f0, f2, f4, f6, f8, f10) LOOP_CHUNK3(o1, o0, g7, 353f)
+350: FREG_FROB(f10,f12,f14,f16,f18,f20,f22,f24,f26) LOOP_CHUNK1(o1, o0, g6, 351f)
+ FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42) LOOP_CHUNK2(o1, o0, g6, 352f)
+ FREG_FROB(f42,f44,f46,f0, f2, f4, f6, f8, f10) LOOP_CHUNK3(o1, o0, g6, 353f)
b,pt %xcc, 350b+4; faligndata %f10, %f12, %f48
351: FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42) STORE_SYNC(o0, f48) membar #Sync
FREG_FROB(f42,f44,f46,f0, f2, f4, f6, f8, f10) STORE_JUMP(o0, f48, 410f) membar #Sync
@@ -352,9 +352,9 @@ END(bcopy)
353: FREG_FROB(f10,f12,f14,f16,f18,f20,f22,f24,f26) STORE_SYNC(o0, f48) membar #Sync
FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42) STORE_JUMP(o0, f48, 442f) membar #Sync
VISLOOP_PAD
-360: FREG_FROB(f12,f14,f16,f18,f20,f22,f24,f26,f28) LOOP_CHUNK1(o1, o0, g7, 361f)
- FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44) LOOP_CHUNK2(o1, o0, g7, 362f)
- FREG_FROB(f44,f46,f0, f2, f4, f6, f8, f10,f12) LOOP_CHUNK3(o1, o0, g7, 363f)
+360: FREG_FROB(f12,f14,f16,f18,f20,f22,f24,f26,f28) LOOP_CHUNK1(o1, o0, g6, 361f)
+ FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44) LOOP_CHUNK2(o1, o0, g6, 362f)
+ FREG_FROB(f44,f46,f0, f2, f4, f6, f8, f10,f12) LOOP_CHUNK3(o1, o0, g6, 363f)
b,pt %xcc, 360b+4; faligndata %f12, %f14, %f48
361: FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44) STORE_SYNC(o0, f48) membar #Sync
FREG_FROB(f44,f46,f0, f2, f4, f6, f8, f10,f12) STORE_JUMP(o0, f48, 412f) membar #Sync
@@ -363,9 +363,9 @@ END(bcopy)
363: FREG_FROB(f12,f14,f16,f18,f20,f22,f24,f26,f28) STORE_SYNC(o0, f48) membar #Sync
FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44) STORE_JUMP(o0, f48, 444f) membar #Sync
VISLOOP_PAD
-370: FREG_FROB(f14,f16,f18,f20,f22,f24,f26,f28,f30) LOOP_CHUNK1(o1, o0, g7, 371f)
- FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46) LOOP_CHUNK2(o1, o0, g7, 372f)
- FREG_FROB(f46,f0, f2, f4, f6, f8, f10,f12,f14) LOOP_CHUNK3(o1, o0, g7, 373f)
+370: FREG_FROB(f14,f16,f18,f20,f22,f24,f26,f28,f30) LOOP_CHUNK1(o1, o0, g6, 371f)
+ FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46) LOOP_CHUNK2(o1, o0, g6, 372f)
+ FREG_FROB(f46,f0, f2, f4, f6, f8, f10,f12,f14) LOOP_CHUNK3(o1, o0, g6, 373f)
b,pt %xcc, 370b+4; faligndata %f14, %f16, %f48
371: FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46) STORE_SYNC(o0, f48) membar #Sync
FREG_FROB(f46,f0, f2, f4, f6, f8, f10,f12,f14) STORE_JUMP(o0, f48, 414f) membar #Sync
@@ -476,11 +476,11 @@ ENTRY(__align_cpy_8)
cmp %o2, (64 * 6) /* IEU1 Group */
bgeu,pn %xcc, 201b /* CTI */
andcc %o0, 0x38, %g5 /* IEU1 Group */
- andcc %o2, -128, %g7 /* IEU1 Group */
+ andcc %o2, -128, %g6 /* IEU1 Group */
bne,a,pt %xcc, 82f + 4 /* CTI */
ldx [%o1], %g1 /* Load */
ba,pt %xcc, 41f /* CTI Group */
- andcc %o2, 0x70, %g7 /* IEU1 */
+ andcc %o2, 0x70, %g6 /* IEU1 */
END(__align_cpy_8)
/* void *__align_cpy_16(void *dest, void *src, size_t n)
@@ -494,11 +494,11 @@ ENTRY(__align_cpy_16)
cmp %o2, (64 * 6) /* IEU1 */
bgeu,pn %xcc, 201b /* CTI */
andcc %o0, 0x38, %g5 /* IEU1 Group */
- andcc %o2, -128, %g7 /* IEU1 Group */
+ andcc %o2, -128, %g6 /* IEU1 Group */
bne,a,pt %xcc, 82f + 4 /* CTI */
ldx [%o1], %g1 /* Load */
ba,pt %xcc, 41f /* CTI Group */
- andcc %o2, 0x70, %g7 /* IEU1 */
+ andcc %o2, 0x70, %g6 /* IEU1 */
END(__align_cpy_16)
#endif
@@ -538,13 +538,13 @@ ENTRY(memcpy)
sth %g2, [%o0 - 2] /* Store Group + bubble */
5: andcc %o1, 4, %g0 /* IEU1 */
216: be,a,pn %xcc, 2f /* CTI */
- andcc %o2, -128, %g7 /* IEU1 Group */
+ andcc %o2, -128, %g6 /* IEU1 Group */
lduw [%o1], %g5 /* Load Group */
add %o1, 4, %o1 /* IEU0 */
add %o0, 4, %o0 /* IEU1 */
sub %o2, 4, %o2 /* IEU0 Group */
stw %g5, [%o0 - 4] /* Store */
- andcc %o2, -128, %g7 /* IEU1 Group */
+ andcc %o2, -128, %g6 /* IEU1 Group */
2: be,pn %xcc, 215f /* CTI */
andcc %o0, 4, %g0 /* IEU1 Group */
be,pn %xcc, 82f + 4 /* CTI Group */
@@ -552,11 +552,11 @@ ENTRY(memcpy)
MOVE_BIGCHUNK(o1, o0, 0x20, g1, g3, g5, o5)
MOVE_BIGCHUNK(o1, o0, 0x40, g1, g3, g5, o5)
MOVE_BIGCHUNK(o1, o0, 0x60, g1, g3, g5, o5)
-35: subcc %g7, 128, %g7 /* IEU1 Group */
+35: subcc %g6, 128, %g6 /* IEU1 Group */
add %o1, 128, %o1 /* IEU0 */
bne,pt %xcc, 5b /* CTI */
add %o0, 128, %o0 /* IEU0 Group */
-215: andcc %o2, 0x70, %g7 /* IEU1 Group */
+215: andcc %o2, 0x70, %g6 /* IEU1 Group */
41: be,pn %xcc, 80f /* CTI */
andcc %o2, 8, %g0 /* IEU1 Group */
/* Clk1 8-( */
@@ -564,11 +564,11 @@ ENTRY(memcpy)
/* Clk3 8-( */
/* Clk4 8-( */
79: rd %pc, %o5 /* PDU Group */
- sll %g7, 1, %g5 /* IEU0 Group */
- add %o1, %g7, %o1 /* IEU1 */
+ sll %g6, 1, %g5 /* IEU0 Group */
+ add %o1, %g6, %o1 /* IEU1 */
sub %o5, %g5, %o5 /* IEU0 Group */
jmpl %o5 + %lo(80f - 79b), %g0 /* CTI Group brk forced*/
- add %o0, %g7, %o0 /* IEU0 Group */
+ add %o0, %g6, %o0 /* IEU0 Group */
36: MOVE_LASTCHUNK(o1, o0, 0x60, g2, g3, g5, o5)
MOVE_LASTCHUNK(o1, o0, 0x50, g2, g3, g5, o5)
MOVE_LASTCHUNK(o1, o0, 0x40, g2, g3, g5, o5)
@@ -605,11 +605,11 @@ ENTRY(memcpy)
82: MOVE_BIGALIGNCHUNK(o1, o0, 0x00, g1, g3, g5, o5)
MOVE_BIGALIGNCHUNK(o1, o0, 0x40, g1, g3, g5, o5)
-37: subcc %g7, 128, %g7 /* IEU1 Group */
+37: subcc %g6, 128, %g6 /* IEU1 Group */
add %o1, 128, %o1 /* IEU0 */
bne,pt %xcc, 82b /* CTI */
add %o0, 128, %o0 /* IEU0 Group */
- andcc %o2, 0x70, %g7 /* IEU1 */
+ andcc %o2, 0x70, %g6 /* IEU1 */
be,pn %xcc, 84f /* CTI */
andcc %o2, 8, %g0 /* IEU1 Group */
/* Clk1 8-( */
@@ -617,10 +617,10 @@ ENTRY(memcpy)
/* Clk3 8-( */
/* Clk4 8-( */
83: rd %pc, %o5 /* PDU Group */
- add %o1, %g7, %o1 /* IEU0 Group */
- sub %o5, %g7, %o5 /* IEU1 */
+ add %o1, %g6, %o1 /* IEU0 Group */
+ sub %o5, %g6, %o5 /* IEU1 */
jmpl %o5 + %lo(84f - 83b), %g0 /* CTI Group brk forced*/
- add %o0, %g7, %o0 /* IEU0 Group */
+ add %o0, %g6, %o0 /* IEU0 Group */
38: MOVE_LASTALIGNCHUNK(o1, o0, 0x60, g2, g3)
MOVE_LASTALIGNCHUNK(o1, o0, 0x50, g2, g3)
MOVE_LASTALIGNCHUNK(o1, o0, 0x40, g2, g3)
@@ -756,13 +756,13 @@ ENTRY(memmove)
sth %g2, [%o0] /* Store Group + bubble */
5: andcc %o1, 4, %g0 /* IEU1 */
236: be,a,pn %xcc, 2f /* CTI */
- andcc %o2, -128, %g7 /* IEU1 Group */
+ andcc %o2, -128, %g6 /* IEU1 Group */
lduw [%o1 - 4], %g5 /* Load Group */
sub %o1, 4, %o1 /* IEU0 */
sub %o0, 4, %o0 /* IEU1 */
sub %o2, 4, %o2 /* IEU0 Group */
stw %g5, [%o0] /* Store */
- andcc %o2, -128, %g7 /* IEU1 Group */
+ andcc %o2, -128, %g6 /* IEU1 Group */
2: be,pn %xcc, 235f /* CTI */
andcc %o0, 4, %g0 /* IEU1 Group */
be,pn %xcc, 282f + 4 /* CTI Group */
@@ -770,11 +770,11 @@ ENTRY(memmove)
RMOVE_BIGCHUNK(o1, o0, 0x20, g1, g3, g5, o5)
RMOVE_BIGCHUNK(o1, o0, 0x40, g1, g3, g5, o5)
RMOVE_BIGCHUNK(o1, o0, 0x60, g1, g3, g5, o5)
- subcc %g7, 128, %g7 /* IEU1 Group */
+ subcc %g6, 128, %g6 /* IEU1 Group */
sub %o1, 128, %o1 /* IEU0 */
bne,pt %xcc, 5b /* CTI */
sub %o0, 128, %o0 /* IEU0 Group */
-235: andcc %o2, 0x70, %g7 /* IEU1 Group */
+235: andcc %o2, 0x70, %g6 /* IEU1 Group */
41: be,pn %xcc, 280f /* CTI */
andcc %o2, 8, %g0 /* IEU1 Group */
/* Clk1 8-( */
@@ -782,11 +782,11 @@ ENTRY(memmove)
/* Clk3 8-( */
/* Clk4 8-( */
279: rd %pc, %o5 /* PDU Group */
- sll %g7, 1, %g5 /* IEU0 Group */
- sub %o1, %g7, %o1 /* IEU1 */
+ sll %g6, 1, %g5 /* IEU0 Group */
+ sub %o1, %g6, %o1 /* IEU1 */
sub %o5, %g5, %o5 /* IEU0 Group */
jmpl %o5 + %lo(280f - 279b), %g0 /* CTI Group brk forced*/
- sub %o0, %g7, %o0 /* IEU0 Group */
+ sub %o0, %g6, %o0 /* IEU0 Group */
RMOVE_LASTCHUNK(o1, o0, 0x60, g2, g3, g5, o5)
RMOVE_LASTCHUNK(o1, o0, 0x50, g2, g3, g5, o5)
RMOVE_LASTCHUNK(o1, o0, 0x40, g2, g3, g5, o5)
@@ -823,11 +823,11 @@ ENTRY(memmove)
282: RMOVE_BIGALIGNCHUNK(o1, o0, 0x00, g1, g3, g5, o5)
RMOVE_BIGALIGNCHUNK(o1, o0, 0x40, g1, g3, g5, o5)
- subcc %g7, 128, %g7 /* IEU1 Group */
+ subcc %g6, 128, %g6 /* IEU1 Group */
sub %o1, 128, %o1 /* IEU0 */
bne,pt %xcc, 282b /* CTI */
sub %o0, 128, %o0 /* IEU0 Group */
- andcc %o2, 0x70, %g7 /* IEU1 */
+ andcc %o2, 0x70, %g6 /* IEU1 */
be,pn %xcc, 284f /* CTI */
andcc %o2, 8, %g0 /* IEU1 Group */
/* Clk1 8-( */
@@ -835,10 +835,10 @@ ENTRY(memmove)
/* Clk3 8-( */
/* Clk4 8-( */
283: rd %pc, %o5 /* PDU Group */
- sub %o1, %g7, %o1 /* IEU0 Group */
- sub %o5, %g7, %o5 /* IEU1 */
+ sub %o1, %g6, %o1 /* IEU0 Group */
+ sub %o5, %g6, %o5 /* IEU1 */
jmpl %o5 + %lo(284f - 283b), %g0 /* CTI Group brk forced*/
- sub %o0, %g7, %o0 /* IEU0 Group */
+ sub %o0, %g6, %o0 /* IEU0 Group */
RMOVE_LASTALIGNCHUNK(o1, o0, 0x60, g2, g3)
RMOVE_LASTALIGNCHUNK(o1, o0, 0x50, g2, g3)
RMOVE_LASTALIGNCHUNK(o1, o0, 0x40, g2, g3)
diff --git a/sysdeps/sparc/sparc64/sparcv9b/memcpy.S b/sysdeps/sparc/sparc64/sparcv9b/memcpy.S
index 529e83a..704aee7 100644
--- a/sysdeps/sparc/sparc64/sparcv9b/memcpy.S
+++ b/sysdeps/sparc/sparc64/sparcv9b/memcpy.S
@@ -1,6 +1,6 @@
/* Copy SIZE bytes from SRC to DEST.
For UltraSPARC-III.
- Copyright (C) 2001 Free Software Foundation, Inc.
+ Copyright (C) 2001, 2003 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by David S. Miller (davem@redhat.com)
@@ -547,13 +547,13 @@ ENTRY(memmove)
sth %g2, [%o0] /* Store Group + bubble */
5: andcc %o1, 4, %g0 /* IEU1 */
236: be,a,pn %xcc, 2f /* CTI */
- andcc %o2, -128, %g7 /* IEU1 Group */
+ andcc %o2, -128, %g6 /* IEU1 Group */
lduw [%o1 - 4], %g5 /* Load Group */
sub %o1, 4, %o1 /* IEU0 */
sub %o0, 4, %o0 /* IEU1 */
sub %o2, 4, %o2 /* IEU0 Group */
stw %g5, [%o0] /* Store */
- andcc %o2, -128, %g7 /* IEU1 Group */
+ andcc %o2, -128, %g6 /* IEU1 Group */
2: be,pn %xcc, 235f /* CTI */
andcc %o0, 4, %g0 /* IEU1 Group */
be,pn %xcc, 282f + 4 /* CTI Group */
@@ -561,11 +561,11 @@ ENTRY(memmove)
RMOVE_BIGCHUNK(o1, o0, 0x20, g1, g3, g5, o5)
RMOVE_BIGCHUNK(o1, o0, 0x40, g1, g3, g5, o5)
RMOVE_BIGCHUNK(o1, o0, 0x60, g1, g3, g5, o5)
- subcc %g7, 128, %g7 /* IEU1 Group */
+ subcc %g6, 128, %g6 /* IEU1 Group */
sub %o1, 128, %o1 /* IEU0 */
bne,pt %xcc, 5b /* CTI */
sub %o0, 128, %o0 /* IEU0 Group */
-235: andcc %o2, 0x70, %g7 /* IEU1 Group */
+235: andcc %o2, 0x70, %g6 /* IEU1 Group */
41: be,pn %xcc, 280f /* CTI */
andcc %o2, 8, %g0 /* IEU1 Group */
/* Clk1 8-( */
@@ -573,11 +573,11 @@ ENTRY(memmove)
/* Clk3 8-( */
/* Clk4 8-( */
279: rd %pc, %o5 /* PDU Group */
- sll %g7, 1, %g5 /* IEU0 Group */
- sub %o1, %g7, %o1 /* IEU1 */
+ sll %g6, 1, %g5 /* IEU0 Group */
+ sub %o1, %g6, %o1 /* IEU1 */
sub %o5, %g5, %o5 /* IEU0 Group */
jmpl %o5 + %lo(280f - 279b), %g0 /* CTI Group brk forced*/
- sub %o0, %g7, %o0 /* IEU0 Group */
+ sub %o0, %g6, %o0 /* IEU0 Group */
RMOVE_LASTCHUNK(o1, o0, 0x60, g2, g3, g5, o5)
RMOVE_LASTCHUNK(o1, o0, 0x50, g2, g3, g5, o5)
RMOVE_LASTCHUNK(o1, o0, 0x40, g2, g3, g5, o5)
@@ -614,11 +614,11 @@ ENTRY(memmove)
282: RMOVE_BIGALIGNCHUNK(o1, o0, 0x00, g1, g3, g5, o5)
RMOVE_BIGALIGNCHUNK(o1, o0, 0x40, g1, g3, g5, o5)
- subcc %g7, 128, %g7 /* IEU1 Group */
+ subcc %g6, 128, %g6 /* IEU1 Group */
sub %o1, 128, %o1 /* IEU0 */
bne,pt %xcc, 282b /* CTI */
sub %o0, 128, %o0 /* IEU0 Group */
- andcc %o2, 0x70, %g7 /* IEU1 */
+ andcc %o2, 0x70, %g6 /* IEU1 */
be,pn %xcc, 284f /* CTI */
andcc %o2, 8, %g0 /* IEU1 Group */
/* Clk1 8-( */
@@ -626,10 +626,10 @@ ENTRY(memmove)
/* Clk3 8-( */
/* Clk4 8-( */
283: rd %pc, %o5 /* PDU Group */
- sub %o1, %g7, %o1 /* IEU0 Group */
- sub %o5, %g7, %o5 /* IEU1 */
+ sub %o1, %g6, %o1 /* IEU0 Group */
+ sub %o5, %g6, %o5 /* IEU1 */
jmpl %o5 + %lo(284f - 283b), %g0 /* CTI Group brk forced*/
- sub %o0, %g7, %o0 /* IEU0 Group */
+ sub %o0, %g6, %o0 /* IEU0 Group */
RMOVE_LASTALIGNCHUNK(o1, o0, 0x60, g2, g3)
RMOVE_LASTALIGNCHUNK(o1, o0, 0x50, g2, g3)
RMOVE_LASTALIGNCHUNK(o1, o0, 0x40, g2, g3)
diff --git a/sysdeps/sparc/sparc64/stpcpy.S b/sysdeps/sparc/sparc64/stpcpy.S
index e9617f4..bec0963 100644
--- a/sysdeps/sparc/sparc64/stpcpy.S
+++ b/sysdeps/sparc/sparc64/stpcpy.S
@@ -1,6 +1,6 @@
/* Copy SRC to DEST returning the address of the terminating '\0' in DEST.
For SPARC v9.
- Copyright (C) 1998, 1999, 2002 Free Software Foundation, Inc.
+ Copyright (C) 1998, 1999, 2002, 2003 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Jan Vondrak <jvon4518@ss1000.ms.mff.cuni.cz> and
Jakub Jelinek <jj@ultra.linux.cz>.
@@ -25,7 +25,7 @@
#ifndef XCC
.register %g2, #scratch
.register %g3, #scratch
- .register %g7, #scratch
+ .register %g6, #scratch
#endif
/* Normally, this uses
@@ -111,8 +111,8 @@ ENTRY(__stpcpy)
.align 16
6: ba,pt %xcc, 23f /* CTI Group */
- sub %o0, 3, %g7 /* IEU0 */
-5: sub %o0, 2, %g7 /* IEU0 Group */
+ sub %o0, 3, %g6 /* IEU0 */
+5: sub %o0, 2, %g6 /* IEU0 Group */
stb %g5, [%o0 - 2] /* Store */
srlx %g3, 16, %g4 /* IEU0 Group */
@@ -121,27 +121,27 @@ ENTRY(__stpcpy)
stw %g4, [%o0 - 8] /* Store */
retl /* CTI+IEU1 Group */
- mov %g7, %o0 /* IEU0 */
+ mov %g6, %o0 /* IEU0 */
8: ba,pt %xcc, 24f /* CTI Group */
- sub %o0, 5, %g7 /* IEU0 */
+ sub %o0, 5, %g6 /* IEU0 */
-7: sub %o0, 4, %g7 /* IEU0 Group */
+7: sub %o0, 4, %g6 /* IEU0 Group */
stb %g5, [%o0 - 4] /* Store */
srlx %g3, 32, %g4 /* IEU0 Group */
24: stw %g4, [%o0 - 8] /* Store */
retl /* CTI+IEU1 Group */
- mov %g7, %o0 /* IEU0 */
+ mov %g6, %o0 /* IEU0 */
10: ba,pt %xcc, 25f /* CTI Group */
- sub %o0, 7, %g7 /* IEU0 */
+ sub %o0, 7, %g6 /* IEU0 */
-9: sub %o0, 6, %g7 /* IEU0 Group */
+9: sub %o0, 6, %g6 /* IEU0 Group */
stb %g5, [%o0 - 6] /* Store */
srlx %g3, 48, %g4 /* IEU0 */
25: sth %g4, [%o0 - 8] /* Store Group */
retl /* CTI+IEU1 Group */
- mov %g7, %o0 /* IEU0 */
+ mov %g6, %o0 /* IEU0 */
11: stb %g5, [%o0 - 8] /* Store Group */
retl /* CTI+IEU1 Group */
@@ -230,13 +230,13 @@ ENTRY(__stpcpy)
.align 16
17: ba,pt %xcc, 26f /* CTI Group */
- subcc %o0, 3, %g7 /* IEU1 */
+ subcc %o0, 3, %g6 /* IEU1 */
18: ba,pt %xcc, 27f /* CTI Group */
- subcc %o0, 4, %g7 /* IEU1 */
+ subcc %o0, 4, %g6 /* IEU1 */
19: ba,pt %xcc, 28f /* CTI Group */
- subcc %o0, 5, %g7 /* IEU1 */
-16: subcc %o0, 2, %g7 /* IEU1 Group */
+ subcc %o0, 5, %g6 /* IEU1 */
+16: subcc %o0, 2, %g6 /* IEU1 Group */
srlx %o3, 8, %o4 /* IEU0 */
stb %o4, [%o0 - 2] /* Store */
@@ -249,15 +249,15 @@ ENTRY(__stpcpy)
stw %o4, [%o0 - 8] /* Store */
retl /* CTI+IEU1 Group */
- mov %g7, %o0 /* IEU0 */
+ mov %g6, %o0 /* IEU0 */
.align 16
21: ba,pt %xcc, 29f /* CTI Group */
- subcc %o0, 7, %g7 /* IEU1 */
+ subcc %o0, 7, %g6 /* IEU1 */
22: ba,pt %xcc, 30f /* CTI Group */
- subcc %o0, 8, %g7 /* IEU1 */
+ subcc %o0, 8, %g6 /* IEU1 */
-20: subcc %o0, 6, %g7 /* IEU1 Group */
+20: subcc %o0, 6, %g6 /* IEU1 Group */
srlx %o3, 40, %o4 /* IEU0 */
stb %o4, [%o0 - 6] /* Store */
29: srlx %o3, 48, %o4 /* IEU0 Group */
@@ -267,7 +267,7 @@ ENTRY(__stpcpy)
stb %o4, [%o0 - 8] /* Store */
retl /* CTI+IEU1 Group */
- mov %g7, %o0 /* IEU0 */
+ mov %g6, %o0 /* IEU0 */
END(__stpcpy)
weak_alias (__stpcpy, stpcpy)
diff --git a/sysdeps/sparc/sparc64/stpncpy.S b/sysdeps/sparc/sparc64/stpncpy.S
index ebd0025..c1ea820 100644
--- a/sysdeps/sparc/sparc64/stpncpy.S
+++ b/sysdeps/sparc/sparc64/stpncpy.S
@@ -2,7 +2,7 @@
SRC to DEST, returning the address of the terminating '\0' in
DEST, if any, or else DEST + N.
For SPARC v9.
- Copyright (C) 1998, 1999, 2002 Free Software Foundation, Inc.
+ Copyright (C) 1998, 1999, 2002, 2003 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Jakub Jelinek <jj@ultra.linux.cz> and
Jan Vondrak <jvon4518@ss1000.ms.mff.cuni.cz>.
@@ -29,7 +29,7 @@
#define USE_BPR
.register %g2, #scratch
.register %g3, #scratch
- .register %g7, #scratch
+ .register %g6, #scratch
#endif
/* Normally, this uses
@@ -112,7 +112,7 @@ ENTRY(__stpncpy)
srlx %g3, 8, %g5 /* IEU0 */
andcc %g5, 0xff, %g0 /* IEU1 Group */
be,pn %icc, 10f /* CTI */
- sub %o0, 1, %g7 /* IEU0 */
+ sub %o0, 1, %g6 /* IEU0 */
andcc %g3, 0xff, %g0 /* IEU1 Group */
bne,pt %icc, 2b /* CTI */
@@ -156,31 +156,31 @@ ENTRY(__stpncpy)
stb %g0, [%o0] /* Store */
9: retl /* CTI+IEU1 Group */
- mov %g7, %o0 /* IEU0 */
-10: subcc %o0, 2, %g7 /* IEU1 Group */
+ mov %g6, %o0 /* IEU0 */
+10: subcc %o0, 2, %g6 /* IEU1 Group */
ba,pt %xcc, 3b /* CTI */
sllx %g5, 8, %g3 /* IEU0 */
-11: subcc %o0, 3, %g7 /* IEU1 Group */
+11: subcc %o0, 3, %g6 /* IEU1 Group */
ba,pt %xcc, 3b /* CTI */
sllx %g4, 16, %g3 /* IEU0 */
-12: subcc %o0, 4, %g7 /* IEU1 Group */
+12: subcc %o0, 4, %g6 /* IEU1 Group */
ba,pt %xcc, 3b /* CTI */
sllx %g5, 24, %g3 /* IEU0 */
-13: subcc %o0, 5, %g7 /* IEU1 Group */
+13: subcc %o0, 5, %g6 /* IEU1 Group */
ba,pt %xcc, 3b /* CTI */
sllx %g4, 32, %g3 /* IEU0 */
-14: subcc %o0, 6, %g7 /* IEU1 Group */
+14: subcc %o0, 6, %g6 /* IEU1 Group */
ba,pt %xcc, 3b /* CTI */
sllx %g5, 40, %g3 /* IEU0 */
-15: subcc %o0, 7, %g7 /* IEU1 Group */
+15: subcc %o0, 7, %g6 /* IEU1 Group */
ba,pt %xcc, 3b /* CTI */
sllx %g4, 48, %g3 /* IEU0 */
-16: subcc %o0, 8, %g7 /* IEU1 Group */
+16: subcc %o0, 8, %g6 /* IEU1 Group */
ba,pt %xcc, 3b /* CTI */
clr %g3 /* IEU0 */
@@ -255,7 +255,7 @@ ENTRY(__stpncpy)
19: retl /* CTI+IEU1 Group */
nop /* IEU0 */
-20: mov %o0, %g7 /* IEU0 Group */
+20: mov %o0, %g6 /* IEU0 Group */
subcc %o2, 1, %o2 /* IEU1 */
be,pn %XCC, 51f /* CTI */
add %o0, 1, %o0 /* IEU0 Group */
@@ -266,7 +266,7 @@ ENTRY(__stpncpy)
add %o0, 1, %o0 /* IEU0 */
51: retl /* CTI+IEU1 Group */
- mov %g7, %o0 /* IEU0 */
+ mov %g6, %o0 /* IEU0 */
.align 16
21: andcc %o2, 4, %g0 /* IEU1 Group */
@@ -295,7 +295,7 @@ ENTRY(__stpncpy)
add %o0, 1, %o0 /* IEU0 */
.align 16
-55: sub %o0, 1, %g7 /* IEU0 Group */
+55: sub %o0, 1, %g6 /* IEU0 Group */
25: andcc %o0, 7, %g0 /* IEU1 */
be,a,pn %icc, 4b /* CTI */
andncc %o2, 31, %g3 /* IEU1 Group */
@@ -306,7 +306,7 @@ ENTRY(__stpncpy)
add %o0, 1, %o0 /* IEU0 Group */
retl /* CTI+IEU1 Group */
- mov %g7, %o0 /* IEU0 */
+ mov %g6, %o0 /* IEU0 */
.align 16
26: ldub [%o1], %o3 /* Load */
@@ -363,23 +363,23 @@ ENTRY(__stpncpy)
andcc %o4, 0xff, %g0 /* IEU1 Group */
be,pn %icc, 36f /* CTI */
- srlx %o3, 48, %g7 /* IEU0 */
- andcc %g7, 0xff, %g0 /* IEU1 Group */
+ srlx %o3, 48, %g6 /* IEU0 */
+ andcc %g6, 0xff, %g0 /* IEU1 Group */
be,pn %icc, 35f /* CTI */
srlx %o3, 40, %o4 /* IEU0 */
andcc %o4, 0xff, %g0 /* IEU1 Group */
be,pn %icc, 34f /* CTI */
- srlx %o3, 32, %g7 /* IEU0 */
+ srlx %o3, 32, %g6 /* IEU0 */
- andcc %g7, 0xff, %g0 /* IEU1 Group */
+ andcc %g6, 0xff, %g0 /* IEU1 Group */
be,pn %icc, 33f /* CTI */
srlx %o3, 24, %o4 /* IEU0 */
andcc %o4, 0xff, %g0 /* IEU1 Group */
be,pn %icc, 32f /* CTI */
- srlx %o3, 16, %g7 /* IEU0 */
- andcc %g7, 0xff, %g0 /* IEU1 Group */
+ srlx %o3, 16, %g6 /* IEU0 */
+ andcc %g6, 0xff, %g0 /* IEU1 Group */
be,pn %icc, 31f /* CTI */
srlx %o3, 8, %o4 /* IEU0 */
@@ -389,33 +389,33 @@ ENTRY(__stpncpy)
bne,pn %icc, 29b /* CTI */
stx %o3, [%o0-8] /* Store */
- sub %o0, 1, %g7 /* IEU0 Group */
+ sub %o0, 1, %g6 /* IEU0 Group */
ba,pt %xcc, 4b /* CTI */
andncc %o2, 31, %g3 /* IEU1 */
-30: subcc %o0, 2, %g7 /* IEU0 */
+30: subcc %o0, 2, %g6 /* IEU0 */
ba,pt %xcc, 3b /* CTI */
sllx %o4, 8, %g3 /* IEU0 Group */
-31: sllx %g7, 16, %g3 /* IEU0 Group */
+31: sllx %g6, 16, %g3 /* IEU0 Group */
ba,pt %xcc, 3b /* CTI */
- sub %o0, 3, %g7 /* IEU1 */
-32: subcc %o0, 4, %g7 /* IEU1 Group */
+ sub %o0, 3, %g6 /* IEU1 */
+32: subcc %o0, 4, %g6 /* IEU1 Group */
ba,pt %xcc, 3b /* CTI */
sllx %o4, 24, %g3 /* IEU0 */
-33: sllx %g7, 32, %g3 /* IEU0 Group */
+33: sllx %g6, 32, %g3 /* IEU0 Group */
ba,pt %xcc, 3b /* CTI */
- sub %o0, 5, %g7 /* IEU1 */
-34: subcc %o0, 6, %g7 /* IEU1 Group */
+ sub %o0, 5, %g6 /* IEU1 */
+34: subcc %o0, 6, %g6 /* IEU1 Group */
ba,pt %xcc, 3b /* CTI */
sllx %o4, 40, %g3 /* IEU0 */
-35: sllx %g7, 48, %g3 /* IEU0 Group */
+35: sllx %g6, 48, %g3 /* IEU0 Group */
ba,pt %xcc, 3b /* CTI */
- sub %o0, 7, %g7 /* IEU1 */
-36: subcc %o0, 8, %g7 /* IEU1 Group */
+ sub %o0, 7, %g6 /* IEU1 */
+36: subcc %o0, 8, %g6 /* IEU1 Group */
ba,pt %xcc, 3b /* CTI */
sllx %o4, 56, %g3 /* IEU0 */
diff --git a/sysdeps/sparc/sparc64/strcat.S b/sysdeps/sparc/sparc64/strcat.S
index 3bb27a9..85954a2 100644
--- a/sysdeps/sparc/sparc64/strcat.S
+++ b/sysdeps/sparc/sparc64/strcat.S
@@ -1,6 +1,6 @@
/* strcat (dest, src) -- Append SRC on the end of DEST.
For SPARC v9.
- Copyright (C) 1998, 1999 Free Software Foundation, Inc.
+ Copyright (C) 1998, 1999, 2003 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Jakub Jelinek <jj@ultra.linux.cz> and
Jan Vondrak <jvon4518@ss1000.ms.mff.cuni.cz>.
@@ -27,7 +27,7 @@
#define USE_BPR
.register %g2, #scratch
.register %g3, #scratch
- .register %g7, #scratch
+ .register %g6, #scratch
#endif
/* Normally, this uses
@@ -52,7 +52,7 @@ ENTRY(strcat)
sethi %hi(0x01010101), %g1 /* IEU0 Group */
ldub [%o0], %o3 /* Load */
or %g1, %lo(0x01010101), %g1 /* IEU0 Group */
- mov %o0, %g7 /* IEU1 */
+ mov %o0, %g6 /* IEU1 */
sllx %g1, 32, %g2 /* IEU0 Group */
andcc %o0, 7, %g0 /* IEU1 */
@@ -192,7 +192,7 @@ ENTRY(strcat)
bne,pt %icc, 3b /* CTI */
mov %o3, %g3 /* IEU0 Group */
4: retl /* CTI+IEU1 Group */
- mov %g7, %o0 /* IEU0 */
+ mov %g6, %o0 /* IEU0 */
.align 16
5: stb %g5, [%o0 - 2] /* Store Group */
@@ -202,23 +202,23 @@ ENTRY(strcat)
stw %g4, [%o0 - 8] /* Store Group */
retl /* CTI+IEU1 Group */
- mov %g7, %o0 /* IEU0 */
+ mov %g6, %o0 /* IEU0 */
7: stb %g5, [%o0 - 4] /* Store Group */
srlx %g3, 32, %g4 /* IEU0 */
8: stw %g4, [%o0 - 8] /* Store Group */
retl /* CTI+IEU1 Group */
- mov %g7, %o0 /* IEU0 */
+ mov %g6, %o0 /* IEU0 */
9: stb %g5, [%o0 - 6] /* Store Group */
srlx %g3, 48, %g4 /* IEU0 */
10: sth %g4, [%o0 - 8] /* Store Group */
retl /* CTI+IEU1 Group */
- mov %g7, %o0 /* IEU0 */
+ mov %g6, %o0 /* IEU0 */
11: stb %g5, [%o0 - 8] /* Store Group */
retl /* CTI+IEU1 Group */
- mov %g7, %o0 /* IEU0 */
+ mov %g6, %o0 /* IEU0 */
.align 16
32: andcc %o0, 7, %g0 /* IEU1 Group */
@@ -309,7 +309,7 @@ ENTRY(strcat)
stx %o3, [%o0 - 8] /* Store */
retl /* CTI+IEU1 Group */
- mov %g7, %o0 /* IEU0 */
+ mov %g6, %o0 /* IEU0 */
.align 16
16: srlx %o3, 8, %o4 /* IEU0 Group */
@@ -323,7 +323,7 @@ ENTRY(strcat)
stw %o4, [%o0 - 8] /* Store */
retl /* CTI+IEU1 Group */
- mov %g7, %o0 /* IEU0 */
+ mov %g6, %o0 /* IEU0 */
nop
nop
@@ -335,5 +335,5 @@ ENTRY(strcat)
22: srlx %o3, 56, %o4 /* IEU0 Group */
stb %o4, [%o0 - 8] /* Store */
retl /* CTI+IEU1 Group */
- mov %g7, %o0 /* IEU0 */
+ mov %g6, %o0 /* IEU0 */
END(strcat)
diff --git a/sysdeps/sparc/sparc64/strchr.S b/sysdeps/sparc/sparc64/strchr.S
index 8349148..3c976eb 100644
--- a/sysdeps/sparc/sparc64/strchr.S
+++ b/sysdeps/sparc/sparc64/strchr.S
@@ -1,6 +1,6 @@
/* strchr (str, ch) -- Return pointer to first occurrence of CH in STR.
For SPARC v9.
- Copyright (C) 1998, 1999 Free Software Foundation, Inc.
+ Copyright (C) 1998, 1999, 2003 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Jan Vondrak <jvon4518@ss1000.ms.mff.cuni.cz> and
Jakub Jelinek <jj@ultra.linux.cz>.
@@ -27,7 +27,7 @@
#define USE_BPR
.register %g2, #scratch
.register %g3, #scratch
- .register %g7, #scratch
+ .register %g6, #scratch
#endif
/* Normally, this uses
@@ -86,10 +86,10 @@ ENTRY(strchr)
sub %o4, %g1, %o5 /* IEU1 */
#ifdef EIGHTBIT_NOT_RARE
- andn %o2, %o3, %g7 /* IEU0 Group */
+ andn %o2, %o3, %g6 /* IEU0 Group */
andn %o5, %o4, %o5 /* IEU1 */
ldxa [%o0] ASI_PNF, %o3 /* Load */
- or %o5, %g7, %o5 /* IEU0 Group */
+ or %o5, %g6, %o5 /* IEU0 Group */
#else
ldxa [%o0] ASI_PNF, %o3 /* Load */
or %o5, %o2, %o5 /* IEU0 Group */
@@ -240,9 +240,9 @@ ENTRY(strchr)
19: sub %o3, %g1, %o2 /* IEU0 Group */
#ifdef EIGHTBIT_NOT_RARE
- andn %o2, %o3, %g7 /* IEU0 Group */
+ andn %o2, %o3, %g6 /* IEU0 Group */
ldxa [%o0] ASI_PNF, %o3 /* Load */
- andcc %g7, %g2, %g0 /* IEU1 Group */
+ andcc %g6, %g2, %g0 /* IEU1 Group */
#else
ldxa [%o0] ASI_PNF, %o3 /* Load */
andcc %o2, %g2, %g0 /* IEU1 Group */
@@ -364,11 +364,11 @@ ENTRY(strrchr)
3: sub %o4, %g1, %o5 /* IEU1 */
#ifdef EIGHTBIT_NOT_RARE
- andn %o2, %o3, %g7 /* IEU0 Group */
+ andn %o2, %o3, %g6 /* IEU0 Group */
andn %o5, %o4, %o5 /* IEU1 */
ldxa [%o0] ASI_PNF, %o3 /* Load */
- or %o5, %g7, %o5 /* IEU0 Group */
+ or %o5, %g6, %o5 /* IEU0 Group */
#else
ldxa [%o0] ASI_PNF, %o3 /* Load */
diff --git a/sysdeps/sparc/sparc64/strcmp.S b/sysdeps/sparc/sparc64/strcmp.S
index 54fd0e5..a69368c 100644
--- a/sysdeps/sparc/sparc64/strcmp.S
+++ b/sysdeps/sparc/sparc64/strcmp.S
@@ -1,6 +1,6 @@
/* Compare two strings for differences.
For SPARC v9.
- Copyright (C) 1997, 1999 Free Software Foundation, Inc.
+ Copyright (C) 1997, 1999, 2003 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Jan Vondrak <jvon4518@ss1000.ms.mff.cuni.cz> and
Jakub Jelinek <jj@ultra.linux.cz>.
@@ -25,7 +25,7 @@
#ifndef XCC
.register %g2, #scratch
.register %g3, #scratch
- .register %g7, #scratch
+ .register %g6, #scratch
#endif
/* Normally, this uses
@@ -117,7 +117,7 @@ ENTRY(strcmp)
clr %o0 /* IEU0 */
.align 32
-13: mov 0xff, %g7 /* IEU0 Group */
+13: mov 0xff, %g6 /* IEU0 Group */
#ifdef EIGHTBIT_NOT_RARE
andcc %g4, %g2, %g0 /* IEU1 */
#else
@@ -129,34 +129,34 @@ ENTRY(strcmp)
srlx %g3, 32, %g3 /* IEU0 */
andcc %g3, %g2, %g0 /* IEU1 Group */
be,pt %xcc, 23f /* CTI */
- sllx %g7, 56, %o5 /* IEU0 */
+ sllx %g6, 56, %o5 /* IEU0 */
andcc %o4, %o5, %g0 /* IEU1 Group */
be,pn %xcc, 24f /* CTI */
- sllx %g7, 48, %o5 /* IEU0 */
+ sllx %g6, 48, %o5 /* IEU0 */
andcc %o4, %o5, %g0 /* IEU1 Group */
be,pn %xcc, 24f /* CTI */
- sllx %g7, 40, %o5 /* IEU0 */
+ sllx %g6, 40, %o5 /* IEU0 */
andcc %o4, %o5, %g0 /* IEU1 Group */
be,pn %xcc, 24f /* CTI */
- sllx %g7, 32, %o5 /* IEU0 */
+ sllx %g6, 32, %o5 /* IEU0 */
andcc %o4, %o5, %g0 /* IEU1 Group */
be,pn %xcc, 24f /* CTI */
-23: sllx %g7, 24, %o5 /* IEU0 */
+23: sllx %g6, 24, %o5 /* IEU0 */
andcc %o4, %o5, %g0 /* IEU1 Group */
be,pn %icc, 24f /* CTI */
- sllx %g7, 16, %o5 /* IEU0 */
+ sllx %g6, 16, %o5 /* IEU0 */
andcc %o4, %o5, %g0 /* IEU1 Group */
be,pn %icc, 24f /* CTI */
- sllx %g7, 8, %o5 /* IEU0 */
+ sllx %g6, 8, %o5 /* IEU0 */
andcc %o4, %o5, %g0 /* IEU1 Group */
be,pn %icc, 24f /* CTI */
- mov %g7, %o5 /* IEU0 */
+ mov %g6, %o5 /* IEU0 */
25: cmp %o4, %o3 /* IEU1 Group */
5: mov -1, %o0 /* IEU0 */
retl /* CTI+IEU1 Group */
@@ -164,9 +164,9 @@ ENTRY(strcmp)
movgu %xcc, 1, %o0 /* Single Group */
.align 16
-24: sub %o5, 1, %g7 /* IEU0 Group */
+24: sub %o5, 1, %g6 /* IEU0 Group */
clr %o0 /* IEU1 */
- or %o5, %g7, %o5 /* IEU0 Group */
+ or %o5, %g6, %o5 /* IEU0 Group */
andn %o4, %o5, %o4 /* IEU0 Group */
andn %o3, %o5, %o3 /* IEU1 */
@@ -205,7 +205,7 @@ ENTRY(strcmp)
sub %o1, %g3, %o1 /* IEU0 Group */
sub %o5, %g5, %o5 /* IEU1 */
- ldxa [%o1] ASI_PNF, %g7 /* Load Group */
+ ldxa [%o1] ASI_PNF, %g6 /* Load Group */
or %g1, %g2, %g1 /* IEU0 */
sub %o1, %o0, %o1 /* IEU1 */
@@ -215,10 +215,10 @@ ENTRY(strcmp)
* %g2 = 8080808080800880
* %g5 = number of bits to shift left
* %o5 = number of bits to shift right */
-10: sllx %g7, %g5, %o3 /* IEU0 Group */
- ldxa [%o1 + %o0] ASI_PNF, %g7 /* Load */
+10: sllx %g6, %g5, %o3 /* IEU0 Group */
+ ldxa [%o1 + %o0] ASI_PNF, %g6 /* Load */
-11: srlx %g7, %o5, %o4 /* IEU0 Group */
+11: srlx %g6, %o5, %o4 /* IEU0 Group */
ldxa [%o0] ASI_PNF, %o2 /* Load */
or %o3, %o4, %o3 /* IEU1 */
add %o0, 8, %o0 /* IEU0 Group */
@@ -272,8 +272,8 @@ ENTRY(strcmp)
be,pn %icc, 4b /* CTI */
andcc %o2, 0xff, %g0 /* IEU1 Group */
be,pn %icc, 4b /* CTI */
- sllx %g7, %g5, %o3 /* IEU0 */
+ sllx %g6, %g5, %o3 /* IEU0 */
ba,pt %xcc, 11b /* CTI Group */
- ldxa [%o1 + %o0] ASI_PNF, %g7 /* Load */
+ ldxa [%o1 + %o0] ASI_PNF, %g6 /* Load */
END(strcmp)
diff --git a/sysdeps/sparc/sparc64/strcpy.S b/sysdeps/sparc/sparc64/strcpy.S
index 170f041..f29fb2a 100644
--- a/sysdeps/sparc/sparc64/strcpy.S
+++ b/sysdeps/sparc/sparc64/strcpy.S
@@ -1,6 +1,6 @@
/* Copy SRC to DEST returning DEST.
For SPARC v9.
- Copyright (C) 1998, 1999 Free Software Foundation, Inc.
+ Copyright (C) 1998, 1999, 2003 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Jan Vondrak <jvon4518@ss1000.ms.mff.cuni.cz> and
Jakub Jelinek <jj@ultra.linux.cz>.
@@ -25,7 +25,7 @@
#ifndef XCC
.register %g2, #scratch
.register %g3, #scratch
- .register %g7, #scratch
+ .register %g6, #scratch
#endif
/* Normally, this uses
@@ -48,7 +48,7 @@
.align 32
ENTRY(strcpy)
sethi %hi(0x01010101), %g1 /* IEU0 Group */
- mov %o0, %g7 /* IEU1 */
+ mov %o0, %g6 /* IEU1 */
or %g1, %lo(0x01010101), %g1 /* IEU0 Group */
andcc %o0, 7, %g0 /* IEU1 */
@@ -108,7 +108,7 @@ ENTRY(strcpy)
bne,pt %icc, 3b /* CTI */
mov %o3, %g3 /* IEU0 Group */
4: retl /* CTI+IEU1 Group */
- mov %g7, %o0 /* IEU0 */
+ mov %g6, %o0 /* IEU0 */
.align 16
5: stb %g5, [%o0 - 2] /* Store Group */
@@ -118,23 +118,23 @@ ENTRY(strcpy)
stw %g4, [%o0 - 8] /* Store Group */
retl /* CTI+IEU1 Group */
- mov %g7, %o0 /* IEU0 */
+ mov %g6, %o0 /* IEU0 */
7: stb %g5, [%o0 - 4] /* Store Group */
srlx %g3, 32, %g4 /* IEU0 */
8: stw %g4, [%o0 - 8] /* Store Group */
retl /* CTI+IEU1 Group */
- mov %g7, %o0 /* IEU0 */
+ mov %g6, %o0 /* IEU0 */
9: stb %g5, [%o0 - 6] /* Store Group */
srlx %g3, 48, %g4 /* IEU0 */
10: sth %g4, [%o0 - 8] /* Store Group */
retl /* CTI+IEU1 Group */
- mov %g7, %o0 /* IEU0 */
+ mov %g6, %o0 /* IEU0 */
11: stb %g5, [%o0 - 8] /* Store Group */
retl /* CTI+IEU1 Group */
- mov %g7, %o0 /* IEU0 */
+ mov %g6, %o0 /* IEU0 */
12: or %g1, %g2, %g1 /* IEU0 Group */
ldub [%o1], %o3 /* Load */
@@ -214,7 +214,7 @@ ENTRY(strcpy)
stx %o3, [%o0 - 8] /* Store */
retl /* CTI+IEU1 Group */
- mov %g7, %o0 /* IEU0 */
+ mov %g6, %o0 /* IEU0 */
.align 16
16: srlx %o3, 8, %o4 /* IEU0 Group */
@@ -228,7 +228,7 @@ ENTRY(strcpy)
stw %o4, [%o0 - 8] /* Store */
retl /* CTI+IEU1 Group */
- mov %g7, %o0 /* IEU0 */
+ mov %g6, %o0 /* IEU0 */
nop
nop
@@ -240,5 +240,5 @@ ENTRY(strcpy)
22: srlx %o3, 56, %o4 /* IEU0 Group */
stb %o4, [%o0 - 8] /* Store */
retl /* CTI+IEU1 Group */
- mov %g7, %o0 /* IEU0 */
+ mov %g6, %o0 /* IEU0 */
END(strcpy)
diff --git a/sysdeps/sparc/sparc64/strncmp.S b/sysdeps/sparc/sparc64/strncmp.S
index 31fcfee..5a2c288 100644
--- a/sysdeps/sparc/sparc64/strncmp.S
+++ b/sysdeps/sparc/sparc64/strncmp.S
@@ -2,7 +2,7 @@
equal to or greater than zero if S1 is lexicographically less than,
equal to or greater than S2.
For SPARC v9.
- Copyright (C) 1997, 1999 Free Software Foundation, Inc.
+ Copyright (C) 1997, 1999, 2003 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Jan Vondrak <jvon4518@ss1000.ms.mff.cuni.cz> and
Jakub Jelinek <jj@ultra.linux.cz>.
@@ -29,7 +29,7 @@
#define USE_BPR
.register %g2, #scratch
.register %g3, #scratch
- .register %g7, #scratch
+ .register %g6, #scratch
#endif
/* Normally, this uses
@@ -79,14 +79,14 @@ ENTRY(strncmp)
sub %g4, %g1, %g3 /* IEU0 Group */
subcc %g4, %o3, %o4 /* IEU1 */
#ifdef EIGHTBIT_NOT_RARE
- andn %g3, %g4, %g7 /* IEU0 Group */
+ andn %g3, %g4, %g6 /* IEU0 Group */
#endif
bne,pn %xcc, 6f /* CTI */
ldxa [%o0] ASI_PNF, %g4 /* Load Group */
add %o0, 8, %o0 /* IEU0 */
#ifdef EIGHTBIT_NOT_RARE
- andcc %g7, %g2, %g0 /* IEU1 */
+ andcc %g6, %g2, %g0 /* IEU1 */
#else
andcc %g3, %g2, %g0 /* IEU1 */
#endif
@@ -95,8 +95,8 @@ ENTRY(strncmp)
addcc %g3, %g1, %o4 /* IEU1 */
#ifdef EIGHTBIT_NOT_RARE
- srlx %g7, 32, %g7 /* IEU0 */
- andcc %g7, %g2, %g0 /* IEU1 Group */
+ srlx %g6, 32, %g6 /* IEU0 */
+ andcc %g6, %g2, %g0 /* IEU1 Group */
#else
srlx %g3, 32, %g3 /* IEU0 */
andcc %g3, %g2, %g0 /* IEU1 Group */
@@ -289,11 +289,11 @@ ENTRY(strncmp)
ldxa [%o0] ASI_PNF, %g4 /* Load */
11: sllx %g3, 3, %g5 /* IEU0 Group */
- mov 64, %g7 /* IEU1 */
+ mov 64, %g6 /* IEU1 */
or %g1, %g2, %g1 /* IEU0 Group */
sub %o1, %g3, %o1 /* IEU1 */
- sub %g7, %g5, %g7 /* IEU0 Group */
+ sub %g6, %g5, %g6 /* IEU0 Group */
ldxa [%o1] ASI_PNF, %o4 /* Load */
sllx %g1, 7, %g2 /* IEU1 */
add %o1, 8, %o1 /* IEU0 Group */
@@ -301,7 +301,7 @@ ENTRY(strncmp)
%g2 = 8080808080808080
%g3 = %o1 alignment
%g5 = number of bits to shift left
- %g7 = number of bits to shift right */
+ %g6 = number of bits to shift right */
12: sllx %o4, %g5, %o3 /* IEU0 Group */
ldxa [%o1] ASI_PNF, %o4 /* Load */
@@ -309,7 +309,7 @@ ENTRY(strncmp)
13: ldxa [%o0] ASI_PNF, %g4 /* Load Group */
addcc %o0, 8, %o0 /* IEU1 */
- srlx %o4, %g7, %o5 /* IEU0 */
+ srlx %o4, %g6, %o5 /* IEU0 */
subcc %o2, 8, %o2 /* IEU1 Group */
bl,pn %XCC, 5b /* CTI */
diff --git a/sysdeps/sparc/sparc64/strncpy.S b/sysdeps/sparc/sparc64/strncpy.S
index 1fec9b4..43ab5f6 100644
--- a/sysdeps/sparc/sparc64/strncpy.S
+++ b/sysdeps/sparc/sparc64/strncpy.S
@@ -2,7 +2,7 @@
null-terminated string from SRC to DST. If SRC does not cover all of
COUNT, the balance is zeroed.
For SPARC v9.
- Copyright (C) 1998, 1999 Free Software Foundation, Inc.
+ Copyright (C) 1998, 1999, 2003 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Jan Vondrak <jvon4518@ss1000.ms.mff.cuni.cz> and
Jakub Jelinek <jj@ultra.linux.cz>.
@@ -29,7 +29,7 @@
#define USE_BPR
.register %g2, #scratch
.register %g3, #scratch
- .register %g7, #scratch
+ .register %g6, #scratch
#endif
/* Normally, this uses
@@ -58,7 +58,7 @@ ENTRY(strncpy)
tst %o2 /* IEU1 */
be,pn %XCC, 19f /* CTI */
#endif
- mov %o0, %g7 /* IEU0 Group */
+ mov %o0, %g6 /* IEU0 Group */
or %g1, %lo(0x01010101), %g1 /* IEU1 */
andcc %o0, 7, %g0 /* IEU1 Group */
@@ -156,7 +156,7 @@ ENTRY(strncpy)
stb %g0, [%o0] /* Store */
9: retl /* CTI+IEU1 Group */
- mov %g7, %o0 /* IEU0 */
+ mov %g6, %o0 /* IEU0 */
.align 16
10: ba,pt %xcc, 3b /* CTI */
@@ -240,14 +240,14 @@ ENTRY(strncpy)
stb %g5, [%o0] /* Store */
19: retl /* CTI+IEU1 Group */
- mov %g7, %o0 /* IEU0 */
+ mov %g6, %o0 /* IEU0 */
50: stb %g0, [%o0] /* Store Group */
20: subcc %o2, 1, %o2 /* IEU1 Group */
bne,pt %XCC, 50b /* CTI */
add %o0, 1, %o0 /* IEU0 */
retl /* CTI+IEU1 Group */
- mov %g7, %o0 /* IEU0 */
+ mov %g6, %o0 /* IEU0 */
21: andcc %o2, 4, %g0 /* IEU1 Group */
be,pn %icc, 22f /* CTI */
@@ -270,7 +270,7 @@ ENTRY(strncpy)
stb %g4, [%o0] /* Store Group */
24: retl /* CTI+IEU1 Group */
- mov %g7, %o0 /* IEU0 */
+ mov %g6, %o0 /* IEU0 */
25: andcc %o0, 7, %g0 /* IEU1 Group */
be,a,pn %icc, 4b /* CTI */
@@ -281,7 +281,7 @@ ENTRY(strncpy)
add %o0, 1, %o0 /* IEU0 Group */
retl /* CTI+IEU1 Group */
- mov %g7, %o0 /* IEU0 */
+ mov %g6, %o0 /* IEU0 */
.align 16
26: ldub [%o1], %o3 /* Load */