diff options
Diffstat (limited to 'sysdeps/powerpc')
-rw-r--r-- | sysdeps/powerpc/add_n.S | 16 | ||||
-rw-r--r-- | sysdeps/powerpc/addmul_1.S | 10 | ||||
-rw-r--r-- | sysdeps/powerpc/fpu/s_copysign.S | 6 | ||||
-rw-r--r-- | sysdeps/powerpc/lshift.S | 10 | ||||
-rw-r--r-- | sysdeps/powerpc/memset.S | 26 | ||||
-rw-r--r-- | sysdeps/powerpc/mul_1.S | 10 | ||||
-rw-r--r-- | sysdeps/powerpc/rshift.S | 14 | ||||
-rw-r--r-- | sysdeps/powerpc/stpcpy.S | 26 | ||||
-rw-r--r-- | sysdeps/powerpc/strcmp.S | 28 | ||||
-rw-r--r-- | sysdeps/powerpc/strcpy.S | 26 | ||||
-rw-r--r-- | sysdeps/powerpc/sub_n.S | 16 | ||||
-rw-r--r-- | sysdeps/powerpc/submul_1.S | 10 |
12 files changed, 99 insertions, 99 deletions
diff --git a/sysdeps/powerpc/add_n.S b/sysdeps/powerpc/add_n.S index 88f5b9c..7b683b7 100644 --- a/sysdeps/powerpc/add_n.S +++ b/sysdeps/powerpc/add_n.S @@ -1,5 +1,5 @@ /* Add two limb vectors of equal, non-zero length for PowerPC. - Copyright (C) 1997, 1999 Free Software Foundation, Inc. + Copyright (C) 1997, 1999, 2000 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or @@ -33,7 +33,7 @@ EALIGN(__mpn_add_n,3,0) srwi. r7,r6,1 li r10,0 mtctr r7 - bt 31,2f + bt 31,L(2) /* Clear the carry. */ addic r0,r0,0 @@ -41,19 +41,19 @@ EALIGN(__mpn_add_n,3,0) addi r3,r3,-4 addi r4,r4,-4 addi r5,r5,-4 - b 0f + b L(0) -2: lwz r7,0(r5) +L(2): lwz r7,0(r5) lwz r6,0(r4) addc r6,r6,r7 stw r6,0(r3) - beq 1f + beq L(1) /* The loop. */ /* Align start of loop to an odd word boundary to guarantee that the last two words can be fetched in one access (for 601). */ -0: lwz r9,4(r4) +L(0): lwz r9,4(r4) lwz r8,4(r5) lwzu r6,8(r4) lwzu r7,8(r5) @@ -61,8 +61,8 @@ EALIGN(__mpn_add_n,3,0) stw r8,4(r3) adde r6,r6,r7 stwu r6,8(r3) - bdnz 0b + bdnz L(0) /* Return the carry. */ -1: addze r3,r10 +L(1): addze r3,r10 blr END(__mpn_add_n) diff --git a/sysdeps/powerpc/addmul_1.S b/sysdeps/powerpc/addmul_1.S index b7d5066..2ce4fa2 100644 --- a/sysdeps/powerpc/addmul_1.S +++ b/sysdeps/powerpc/addmul_1.S @@ -1,5 +1,5 @@ /* Multiply a limb vector by a single limb, for PowerPC. - Copyright (C) 1993, 1994, 1995, 1997, 1999 Free Software Foundation, Inc. + Copyright (C) 1993-1995, 1997, 1999, 2000 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or @@ -31,9 +31,9 @@ ENTRY(__mpn_addmul_1) lwz r9,0(r3) addc r8,r7,r9 addi r3,r3,-4 /* adjust res_ptr */ - bdz 1f + bdz L(1) -0: lwzu r0,4(r4) +L(0): lwzu r0,4(r4) stwu r8,4(r3) mullw r8,r0,r6 adde r7,r8,r10 @@ -41,9 +41,9 @@ ENTRY(__mpn_addmul_1) lwz r9,4(r3) addze r10,r10 addc r8,r7,r9 - bdnz 0b + bdnz L(0) -1: stw r8,4(r3) +L(1): stw r8,4(r3) addze r3,r10 blr END(__mpn_addmul_1) diff --git a/sysdeps/powerpc/fpu/s_copysign.S b/sysdeps/powerpc/fpu/s_copysign.S index 9027697..6167112 100644 --- a/sysdeps/powerpc/fpu/s_copysign.S +++ b/sysdeps/powerpc/fpu/s_copysign.S @@ -1,5 +1,5 @@ /* Copy a sign bit between floating-point values. - Copyright (C) 1997, 1999 Free Software Foundation, Inc. + Copyright (C) 1997, 1999, 2000 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or @@ -31,10 +31,10 @@ ENTRY(__copysign) lwz r3,8(r1) cmpwi r3,0 addi r1,r1,16 - blt 0f + blt L(0) fabs fp1,fp1 blr -0: fnabs fp1,fp1 +L(0): fnabs fp1,fp1 blr END (__copysign) diff --git a/sysdeps/powerpc/lshift.S b/sysdeps/powerpc/lshift.S index 2d3b996..832ee08 100644 --- a/sysdeps/powerpc/lshift.S +++ b/sysdeps/powerpc/lshift.S @@ -1,5 +1,5 @@ /* Shift a limb left, low level routine. - Copyright (C) 1996, 1997, 1999 Free Software Foundation, Inc. + Copyright (C) 1996, 1997, 1999, 2000 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or @@ -35,7 +35,7 @@ EALIGN(__mpn_lshift,3,0) bdz L(end1) -0: lwzu r10,-4(r4) +L(0): lwzu r10,-4(r4) slw r9,r11,r6 srw r12,r10,r8 or r9,r9,r12 @@ -46,7 +46,7 @@ EALIGN(__mpn_lshift,3,0) srw r12,r11,r8 or r9,r9,r12 stwu r9,-4(r7) - bdnz 0b + bdnz L(0) L(end1):slw r0,r11,r6 stw r0,-4(r7) @@ -76,7 +76,7 @@ L(end2):slw r0,r10,r6 #define DO_LSHIFT(n) \ mtctr r5; \ -0: lwzu r10,-4(r4); \ +L(n): lwzu r10,-4(r4); \ slwi r9,r11,n; \ inslwi r9,r10,n,32-n; \ stwu r9,-4(r7); \ @@ -85,7 +85,7 @@ L(end2):slw r0,r10,r6 slwi r9,r10,n; \ inslwi r9,r11,n,32-n; \ stwu r9,-4(r7); \ - bdnz 0b; \ + bdnz L(n); \ b L(end1) DO_LSHIFT(1) diff --git a/sysdeps/powerpc/memset.S b/sysdeps/powerpc/memset.S index 63c4c14..6dfe250 100644 --- a/sysdeps/powerpc/memset.S +++ b/sysdeps/powerpc/memset.S @@ -1,5 +1,5 @@ /* Optimized memset implementation for PowerPC. - Copyright (C) 1997, 1999 Free Software Foundation, Inc. + Copyright (C) 1997, 1999, 2000 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or @@ -42,10 +42,10 @@ EALIGN(memset,5,1) subfic r7,r7,4 add r6,r6,r7 sub r5,r5,r7 - bf+ 31,0f + bf+ 31,L(g0) stb r4,0(r3) bt 30,L(aligned) -0: sth r4,-2(r6) # 16th instruction from .align +L(g0): sth r4,-2(r6) # 16th instruction from .align /* take care of case for size < 31 */ L(aligned): mtcrf 0x01,r5 @@ -60,15 +60,15 @@ L(aligned): sub r5,r5,r7 cmplwi cr1,r7,0x10 mr r8,r6 - bf 28,1f + bf 28,L(a1) stw r4,-4(r8) stwu r4,-8(r8) -1: blt cr1,2f +L(a1): blt cr1,2f stw r4,-4(r8) # 32nd instruction from .align stw r4,-8(r8) stw r4,-12(r8) stwu r4,-16(r8) -2: bf 29,L(caligned) +L(a2): bf 29,L(caligned) stw r4,-4(r8) /* now aligned to a cache line. */ L(caligned): @@ -81,10 +81,10 @@ L(caligned): beq L(medium) # we may not actually get to do a full line clrlwi. r5,r5,27 add r6,r6,r7 -0: li r8,-0x40 + li r8,-0x40 bdz L(cloopdone) # 48th instruction from .align -3: dcbz r8,r6 +L(c3): dcbz r8,r6 stw r4,-4(r6) stw r4,-8(r6) stw r4,-12(r6) @@ -95,7 +95,7 @@ L(caligned): nop # let 601 fetch first 8 instructions of loop stw r4,-28(r6) stwu r4,-32(r6) - bdnz 3b + bdnz L(c3) L(cloopdone): stw r4,-4(r6) stw r4,-8(r6) @@ -121,15 +121,15 @@ L(zloopstart): li r7,0x20 li r8,-0x40 cmplwi cr1,r5,16 # 8 - bf 26,0f + bf 26,L(z0) dcbz 0,r6 addi r6,r6,0x20 -0: li r9,-0x20 - bf 25,1f +L(z0): li r9,-0x20 + bf 25,L(z1) dcbz 0,r6 dcbz r7,r6 addi r6,r6,0x40 # 16 -1: cmplwi cr5,r5,0 +L(z1): cmplwi cr5,r5,0 beq L(medium) L(zloop): dcbz 0,r6 diff --git a/sysdeps/powerpc/mul_1.S b/sysdeps/powerpc/mul_1.S index f0d8f07..52565a6 100644 --- a/sysdeps/powerpc/mul_1.S +++ b/sysdeps/powerpc/mul_1.S @@ -1,5 +1,5 @@ /* Multiply a limb vector by a limb, for PowerPC. - Copyright (C) 1993, 1994, 1995, 1997, 1999 Free Software Foundation, Inc. + Copyright (C) 1993-1995, 1997, 1999, 2000 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or @@ -31,16 +31,16 @@ ENTRY(__mpn_mul_1) mulhwu r10,r0,r6 addi r3,r3,-4 # adjust res_ptr addic r5,r5,0 # clear cy with dummy insn - bdz 1f + bdz L(1) -0: lwzu r0,4(r4) +L(0): lwzu r0,4(r4) stwu r7,4(r3) mullw r8,r0,r6 adde r7,r8,r10 mulhwu r10,r0,r6 - bdnz 0b + bdnz L(0) -1: stw r7,4(r3) +L(1): stw r7,4(r3) addze r3,r10 blr END(__mpn_mul_1) diff --git a/sysdeps/powerpc/rshift.S b/sysdeps/powerpc/rshift.S index c09a2a9..a02fb25 100644 --- a/sysdeps/powerpc/rshift.S +++ b/sysdeps/powerpc/rshift.S @@ -1,5 +1,5 @@ /* Shift a limb right, low level routine. - Copyright (C) 1995, 1997, 1999 Free Software Foundation, Inc. + Copyright (C) 1995, 1997, 1999, 2000 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or @@ -31,26 +31,26 @@ ENTRY(__mpn_rshift) subfic r8,r6,32 lwz r11,0(r4) # load first s1 limb slw r3,r11,r8 # compute function return value - bdz 1f + bdz L(1) -0: lwzu r10,4(r4) +L(0): lwzu r10,4(r4) srw r9,r11,r6 slw r12,r10,r8 or r9,r9,r12 stwu r9,4(r7) - bdz 2f + bdz L(2) lwzu r11,4(r4) srw r9,r10,r6 slw r12,r11,r8 or r9,r9,r12 stwu r9,4(r7) - bdnz 0b + bdnz L(0) -1: srw r0,r11,r6 +L(1): srw r0,r11,r6 stw r0,4(r7) blr -2: srw r0,r10,r6 +L(2): srw r0,r10,r6 stw r0,4(r7) blr END(__mpn_rshift) diff --git a/sysdeps/powerpc/stpcpy.S b/sysdeps/powerpc/stpcpy.S index e3d7c1e..eaea26b 100644 --- a/sysdeps/powerpc/stpcpy.S +++ b/sysdeps/powerpc/stpcpy.S @@ -1,5 +1,5 @@ /* Optimized stpcpy implementation for PowerPC. - Copyright (C) 1997, 1999 Free Software Foundation, Inc. + Copyright (C) 1997, 1999, 2000 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or @@ -44,24 +44,24 @@ EALIGN(__stpcpy,4,0) lwz r6,0(r4) addi r7,r7,-0x101 addi r8,r8,0x7f7f - b 2f + b L(g2) -0: lwzu r10,4(r4) +L(g0): lwzu r10,4(r4) stwu r6,4(r3) add r0,r7,r10 nor r9,r8,r10 and. r0,r0,r9 - bne- 1f + bne- L(g1) lwzu r6,4(r4) stwu r10,4(r3) -2: add r0,r7,r6 +L(g2): add r0,r7,r6 nor r9,r8,r6 and. r0,r0,r9 - beq+ 0b + beq+ L(g0) mr r10,r6 /* We've hit the end of the string. Do the rest byte-by-byte. */ -1: rlwinm. r0,r10,8,24,31 +L(g1): rlwinm. r0,r10,8,24,31 stbu r0,4(r3) beqlr- rlwinm. r0,r10,16,24,31 @@ -80,20 +80,20 @@ L(unaligned): lbz r6,0(r4) addi r3,r3,3 cmpwi r6,0 - beq- 2f + beq- L(u2) -0: lbzu r10,1(r4) +L(u0): lbzu r10,1(r4) stbu r6,1(r3) cmpwi r10,0 - beq- 1f + beq- L(u1) nop /* Let 601 load start of loop. */ lbzu r6,1(r4) stbu r10,1(r3) cmpwi r6,0 - bne+ 0b -2: stbu r6,1(r3) + bne+ L(u0) +L(u2): stbu r6,1(r3) blr -1: stbu r10,1(r3) +L(u1): stbu r10,1(r3) blr END(__stpcpy) diff --git a/sysdeps/powerpc/strcmp.S b/sysdeps/powerpc/strcmp.S index a4afead..92e9858 100644 --- a/sysdeps/powerpc/strcmp.S +++ b/sysdeps/powerpc/strcmp.S @@ -1,5 +1,5 @@ /* Optimized strcmp implementation for PowerPC. - Copyright (C) 1997, 1999 Free Software Foundation, Inc. + Copyright (C) 1997, 1999, 2000 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or @@ -48,16 +48,16 @@ EALIGN(strcmp,4,0) lis r8,0x7f7f addi r7,r7,-0x101 addi r8,r8,0x7f7f - b 1f + b L(g1) -0: lwzu r5,4(r3) +L(g0): lwzu r5,4(r3) bne cr1,L(different) lwzu r6,4(r4) -1: add r0,r7,r5 +L(g1): add r0,r7,r5 nor r9,r8,r5 and. r0,r0,r9 cmpw cr1,r5,r6 - beq+ 0b + beq+ L(g0) L(endstring): /* OK. We've hit the end of the string. We need to be careful that we don't compare two strings as different because of gunk beyond @@ -93,23 +93,23 @@ L(highbit): L(unaligned): lbz r5,0(r3) lbz r6,0(r4) - b 1f + b L(u1) -0: lbzu r5,1(r3) - bne- 4f +L(u0): lbzu r5,1(r3) + bne- L(u4) lbzu r6,1(r4) -1: cmpwi cr1,r5,0 - beq- cr1,3f +L(u1): cmpwi cr1,r5,0 + beq- cr1,L(u3) cmpw r5,r6 - bne- 3f + bne- L(u3) lbzu r5,1(r3) lbzu r6,1(r4) cmpwi cr1,r5,0 cmpw r5,r6 - bne+ cr1,0b -3: sub r3,r5,r6 + bne+ cr1,L(u0) +L(u3): sub r3,r5,r6 blr -4: lbz r5,-1(r3) +L(u4): lbz r5,-1(r3) sub r3,r5,r6 blr END(strcmp) diff --git a/sysdeps/powerpc/strcpy.S b/sysdeps/powerpc/strcpy.S index 0a1d89c..0767921 100644 --- a/sysdeps/powerpc/strcpy.S +++ b/sysdeps/powerpc/strcpy.S @@ -1,5 +1,5 @@ /* Optimized strcpy implementation for PowerPC. - Copyright (C) 1997, 1999 Free Software Foundation, Inc. + Copyright (C) 1997, 1999, 2000 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or @@ -45,24 +45,24 @@ EALIGN(strcpy,4,0) lwz r6,0(r4) addi r7,r7,-0x101 addi r8,r8,0x7f7f - b 2f + b L(g2) -0: lwzu r10,4(r4) +L(g0): lwzu r10,4(r4) stwu r6,4(r5) add r0,r7,r10 nor r9,r8,r10 and. r0,r0,r9 - bne- 1f + bne- L(g1) lwzu r6,4(r4) stwu r10,4(r5) -2: add r0,r7,r6 +L(g2): add r0,r7,r6 nor r9,r8,r6 and. r0,r0,r9 - beq+ 0b + beq+ L(g0) mr r10,r6 /* We've hit the end of the string. Do the rest byte-by-byte. */ -1: rlwinm. r0,r10,8,24,31 +L(g1): rlwinm. r0,r10,8,24,31 stb r0,4(r5) beqlr- rlwinm. r0,r10,16,24,31 @@ -81,20 +81,20 @@ L(unaligned): lbz r6,0(r4) addi r5,r3,-1 cmpwi r6,0 - beq- 2f + beq- L(u2) -0: lbzu r10,1(r4) +L(u0): lbzu r10,1(r4) stbu r6,1(r5) cmpwi r10,0 - beq- 1f + beq- L(u1) nop /* Let 601 load start of loop. */ lbzu r6,1(r4) stbu r10,1(r5) cmpwi r6,0 - bne+ 0b -2: stb r6,1(r5) + bne+ L(u0) +L(u2): stb r6,1(r5) blr -1: stb r10,1(r5) +L(u1): stb r10,1(r5) blr END(strcpy) diff --git a/sysdeps/powerpc/sub_n.S b/sysdeps/powerpc/sub_n.S index d839a5f..244ee4b 100644 --- a/sysdeps/powerpc/sub_n.S +++ b/sysdeps/powerpc/sub_n.S @@ -1,5 +1,5 @@ /* Subtract two limb vectors of equal, non-zero length for PowerPC. - Copyright (C) 1997, 1999 Free Software Foundation, Inc. + Copyright (C) 1997, 1999, 2000 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or @@ -32,7 +32,7 @@ EALIGN(__mpn_sub_n,3,1) mtcrf 0x01,r6 srwi. r7,r6,1 mtctr r7 - bt 31,2f + bt 31,L(2) /* Set the carry (clear the borrow). */ subfc r0,r0,r0 @@ -40,18 +40,18 @@ EALIGN(__mpn_sub_n,3,1) addi r3,r3,-4 addi r4,r4,-4 addi r5,r5,-4 - b 0f + b L(0) -2: lwz r7,0(r5) +L(2): lwz r7,0(r5) lwz r6,0(r4) subfc r6,r7,r6 stw r6,0(r3) - beq 1f + beq L(1) /* Align start of loop to an odd word boundary to guarantee that the last two words can be fetched in one access (for 601). This turns out to be important. */ -0: +L(0): lwz r9,4(r4) lwz r8,4(r5) lwzu r6,8(r4) @@ -60,9 +60,9 @@ EALIGN(__mpn_sub_n,3,1) stw r8,4(r3) subfe r6,r7,r6 stwu r6,8(r3) - bdnz 0b + bdnz L(0) /* Return the borrow. */ -1: subfe r3,r3,r3 +L(1): subfe r3,r3,r3 neg r3,r3 blr END(__mpn_sub_n) diff --git a/sysdeps/powerpc/submul_1.S b/sysdeps/powerpc/submul_1.S index d9472dd..e8c80af 100644 --- a/sysdeps/powerpc/submul_1.S +++ b/sysdeps/powerpc/submul_1.S @@ -1,5 +1,5 @@ /* Multiply a limb vector by a single limb, for PowerPC. - Copyright (C) 1993, 1994, 1995, 1997, 1999 Free Software Foundation, Inc. + Copyright (C) 1993-1995, 1997, 1999, 2000 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or @@ -33,9 +33,9 @@ ENTRY(__mpn_submul_1) subf r8,r7,r9 addc r7,r7,r8 # invert cy (r7 is junk) addi r3,r3,-4 # adjust res_ptr - bdz 1f + bdz L(1) -0: lwzu r0,4(r4) +L(0): lwzu r0,4(r4) stwu r8,4(r3) mullw r8,r0,r6 adde r7,r8,r10 @@ -44,9 +44,9 @@ ENTRY(__mpn_submul_1) addze r10,r10 subf r8,r7,r9 addc r7,r7,r8 # invert cy (r7 is junk) - bdnz 0b + bdnz L(0) -1: stw r8,4(r3) +L(1): stw r8,4(r3) addze r3,r10 blr END(__mpn_submul_1) |