aboutsummaryrefslogtreecommitdiff
path: root/newlib
diff options
context:
space:
mode:
authorAndrew Waterman <waterman@cs.berkeley.edu>2014-10-26 21:29:09 -0700
committerAndrew Waterman <waterman@cs.berkeley.edu>2014-10-27 16:59:03 -0700
commitde2fd4166d0e8ba7f9ec04cc70a1b854b4557858 (patch)
tree22ea2cfd88892bd37f96e9544be745bf110c979e /newlib
parentc2086ad170c50d3e96e51dd6caf75d5ac433b4f2 (diff)
downloadriscv-gnu-toolchain-de2fd4166d0e8ba7f9ec04cc70a1b854b4557858.zip
riscv-gnu-toolchain-de2fd4166d0e8ba7f9ec04cc70a1b854b4557858.tar.gz
riscv-gnu-toolchain-de2fd4166d0e8ba7f9ec04cc70a1b854b4557858.tar.bz2
binutils, gcc, glibc: new calling convention
v0-1 are removed; arguments are now returned in a0-1. Registers are renumbered so that s0-1 and a0-5 map to x8-15 to simplify the RVC ISA. (These are the most popular 8 registers besides x0 and sp.)
Diffstat (limited to 'newlib')
-rw-r--r--newlib/libgloss/riscv/crt0.S4
-rw-r--r--newlib/libgloss/riscv/machine/syscall.h8
-rw-r--r--newlib/libgloss/riscv/syscalls.c5
-rw-r--r--newlib/newlib/libc/machine/riscv/memset.S125
-rw-r--r--newlib/newlib/libc/machine/riscv/setjmp.S41
-rw-r--r--newlib/newlib/libc/machine/riscv/strcmp.S46
6 files changed, 99 insertions, 130 deletions
diff --git a/newlib/libgloss/riscv/crt0.S b/newlib/libgloss/riscv/crt0.S
index da7db15..fda6ad2 100644
--- a/newlib/libgloss/riscv/crt0.S
+++ b/newlib/libgloss/riscv/crt0.S
@@ -28,9 +28,7 @@ _start:
addi a1, sp, _RISCV_SZPTR/8 # a1 = argv
li a2, 0 # a2 = envp = NULL
call main
-
- move a0, v0 # Call exit function with return value
- jump exit # from main as exit code
+ jump exit
.global _init
.global _fini
diff --git a/newlib/libgloss/riscv/machine/syscall.h b/newlib/libgloss/riscv/machine/syscall.h
index 029b1b0..c8e759f 100644
--- a/newlib/libgloss/riscv/machine/syscall.h
+++ b/newlib/libgloss/riscv/machine/syscall.h
@@ -46,17 +46,17 @@
static inline long
__internal_syscall(long n, long _a0, long _a1, long _a2, long _a3)
{
- register long v0 asm("v0") = n;
register long a0 asm("a0") = _a0;
register long a1 asm("a1") = _a1;
register long a2 asm("a2") = _a2;
register long a3 asm("a3") = _a3;
+ register long a7 asm("a7") = n;
asm volatile ("scall\n"
- "bltz v0, __syscall_error"
- : "+r"(v0) : "r"(a0), "r"(a1), "r"(a2), "r"(a3));
+ "bltz a0, __syscall_error"
+ : "+r"(a0) : "r"(a1), "r"(a2), "r"(a3), "r"(a7));
- return v0;
+ return a0;
}
#endif
diff --git a/newlib/libgloss/riscv/syscalls.c b/newlib/libgloss/riscv/syscalls.c
index 5d7b9d5..c838c18 100644
--- a/newlib/libgloss/riscv/syscalls.c
+++ b/newlib/libgloss/riscv/syscalls.c
@@ -91,10 +91,9 @@
#define syscall_errno(n, a, b, c, d) \
__internal_syscall(n, (long)(a), (long)(b), (long)(c), (long)(d))
-int __syscall_error()
+long __syscall_error(long a0)
{
- register int v0 asm("v0");
- errno = -v0;
+ errno = -a0;
return -1;
}
diff --git a/newlib/newlib/libc/machine/riscv/memset.S b/newlib/newlib/libc/machine/riscv/memset.S
index 85a16ce..4205511 100644
--- a/newlib/newlib/libc/machine/riscv/memset.S
+++ b/newlib/newlib/libc/machine/riscv/memset.S
@@ -5,73 +5,64 @@
.text
.global memset
memset:
- li a6, 7
- move v0, a0
- bleu a2, a6, .tiny
- and a5, a0, 7
- bnez a5, .misaligned
+ li a6, 15
+ move a4, a0
+ bleu a2, a6, .Ltiny
+ and a5, a4, 15
+ bnez a5, .Lmisaligned
-.aligned:
- bnez a1, .wordify
+.Laligned:
+ bnez a1, .Lwordify
-.wordified:
- add a3, a2, -16
- bltu a3, a2, .block16
-
-.8to15:
-#ifdef __riscv64
- sd a1, 0(a0)
-#else
- sw a1, 0(a0)
- sw a1, 4(a0)
-#endif
- add a0, a0, 8
- add a2, a2, -8
- bnez a2, .tiny
- ret
+.Lwordified:
+ and a3, a2, ~15
+ and a2, a2, 15
+ add a3, a3, a4
-.block16:
- add a5, a0, a2
- add a3, a3, a0
#ifdef __riscv64
-1:sd a1, 0(a0)
- sd a1, 8(a0)
+1:sd a1, 0(a4)
+ sd a1, 8(a4)
#else
-1:sw a1, 0(a0)
- sw a1, 4(a0)
- sw a1, 8(a0)
- sw a1, 12(a0)
+1:sw a1, 0(a4)
+ sw a1, 4(a4)
+ sw a1, 8(a4)
+ sw a1, 12(a4)
#endif
- add a0, a0, 16
- bleu a0, a3, 1b
+ add a4, a4, 16
+ bltu a4, a3, 1b
- bne a0, a5, .more
+ bnez a2, .Ltiny
ret
-.more:
- sub a2, a5, a0
- bgtu a2, a6, .8to15
- /* Fall through */
-.tiny:
+.Ltiny:
sub a3, a6, a2
sll a3, a3, 2
+1:auipc t0, %pcrel_hi(.Ltable)
+ add a3, a3, t0
.option push
.option norvc
- lui a4, %hi(1f)
- add a3, a3, a4
- jalr x0, a3, %lo(1f)
-
-1:sb a1, 6(a0)
- sb a1, 5(a0)
- sb a1, 4(a0)
- sb a1, 3(a0)
- sb a1, 2(a0)
- sb a1, 1(a0)
- sb a1, 0(a0)
- ret
+.Ltable_misaligned:
+ jr a3, %pcrel_lo(1b)
+.Ltable:
+ sb a1,14(a4)
+ sb a1,13(a4)
+ sb a1,12(a4)
+ sb a1,11(a4)
+ sb a1,10(a4)
+ sb a1, 9(a4)
+ sb a1, 8(a4)
+ sb a1, 7(a4)
+ sb a1, 6(a4)
+ sb a1, 5(a4)
+ sb a1, 4(a4)
+ sb a1, 3(a4)
+ sb a1, 2(a4)
+ sb a1, 1(a4)
+ sb a1, 0(a4)
.option pop
+ ret
-.wordify:
+.Lwordify:
and a1, a1, 0xFF
sll a3, a1, 8
or a1, a1, a3
@@ -81,26 +72,18 @@ memset:
sll a3, a1, 32
or a1, a1, a3
#endif
- j .wordified
+ j .Lwordified
-.misaligned:
+.Lmisaligned:
sll a3, a5, 2
-.option push
-.option norvc
- lui a4, %hi(1f)
- add a3, a3, a4
-1:jalr x0, a3, %lo(1b)
+1:auipc t0, %pcrel_hi(.Ltable_misaligned)
+ add a3, a3, t0
+ mv t0, ra
+ jalr a3, %pcrel_lo(1b)
+ mv ra, t0
- sb a1, 6(a0)
- sb a1, 5(a0)
- sb a1, 4(a0)
- sb a1, 3(a0)
- sb a1, 2(a0)
- sb a1, 1(a0)
- sb a1, 0(a0)
- add a5, a5, -8
- sub a0, a0, a5
+ add a5, a5, -16
+ sub a4, a4, a5
add a2, a2, a5
- bleu a2, a6, .tiny
- j .aligned
-.option pop
+ bleu a2, a6, .Ltiny
+ j .Laligned
diff --git a/newlib/newlib/libc/machine/riscv/setjmp.S b/newlib/newlib/libc/machine/riscv/setjmp.S
index 07d04ac..2ddc59f 100644
--- a/newlib/newlib/libc/machine/riscv/setjmp.S
+++ b/newlib/newlib/libc/machine/riscv/setjmp.S
@@ -42,16 +42,12 @@ setjmp:
fsd fs9, 16*SZREG+ 9*8(a0)
fsd fs10,16*SZREG+10*8(a0)
fsd fs11,16*SZREG+11*8(a0)
- fsd fs12,16*SZREG+12*8(a0)
- fsd fs13,16*SZREG+13*8(a0)
- fsd fs14,16*SZREG+14*8(a0)
- fsd fs15,16*SZREG+15*8(a0)
REG_S a3, 15*SZREG(a0)
#endif
- li v0, 0
- ret
+ li a0, 0
+ ret
/* volatile void longjmp (jmp_buf, int); */
.globl longjmp
@@ -75,26 +71,21 @@ longjmp:
#ifdef __riscv_hard_float
REG_L a3, 15*SZREG(a0)
- fld fs0, 16*SZREG+ 0*8(a0)
- fld fs1, 16*SZREG+ 1*8(a0)
- fld fs2, 16*SZREG+ 2*8(a0)
- fld fs3, 16*SZREG+ 3*8(a0)
- fld fs4, 16*SZREG+ 4*8(a0)
- fld fs5, 16*SZREG+ 5*8(a0)
- fld fs6, 16*SZREG+ 6*8(a0)
- fld fs7, 16*SZREG+ 7*8(a0)
- fld fs8, 16*SZREG+ 8*8(a0)
- fld fs9, 16*SZREG+ 9*8(a0)
- fld fs10,16*SZREG+10*8(a0)
- fld fs11,16*SZREG+11*8(a0)
- fld fs12,16*SZREG+12*8(a0)
- fld fs13,16*SZREG+13*8(a0)
- fld fs14,16*SZREG+14*8(a0)
- fld fs15,16*SZREG+15*8(a0)
+ fld fs0, 16*SZREG+ 0*8(a0)
+ fld fs1, 16*SZREG+ 1*8(a0)
+ fld fs2, 16*SZREG+ 2*8(a0)
+ fld fs3, 16*SZREG+ 3*8(a0)
+ fld fs4, 16*SZREG+ 4*8(a0)
+ fld fs5, 16*SZREG+ 5*8(a0)
+ fld fs6, 16*SZREG+ 6*8(a0)
+ fld fs7, 16*SZREG+ 7*8(a0)
+ fld fs8, 16*SZREG+ 8*8(a0)
+ fld fs9, 16*SZREG+ 9*8(a0)
+ fld fs10,16*SZREG+10*8(a0)
fssr a3
#endif
- sltiu v0, a1, 1 # v0 = (a1 == 0)
- add v0, v0, a1 # v0 = (a1 == 0) ? 1 : a1
- ret
+ seqz a0, a1
+ add a0, a0, a1 # a0 = (a1 == 0) ? 1 : a1
+ ret
diff --git a/newlib/newlib/libc/machine/riscv/strcmp.S b/newlib/newlib/libc/machine/riscv/strcmp.S
index 65cbdf3..6004d9b 100644
--- a/newlib/newlib/libc/machine/riscv/strcmp.S
+++ b/newlib/newlib/libc/machine/riscv/strcmp.S
@@ -9,11 +9,10 @@
.text
.globl strcmp
strcmp:
- or v1, a0, a1
+ or a4, a0, a1
li t2, -1
- and v1, v1, SZREG-1
- li v0, 0
- bnez v1, .Lmisaligned
+ and a4, a4, SZREG-1
+ bnez a4, .Lmisaligned
#if SZREG == 4
li t3, 0x7f7f7f7f
@@ -50,16 +49,16 @@ strcmp:
.Lnull0:
.endif
bne a2, a3, .Lmisaligned
+ li a0, 0
ret
.endif
.endm
.Lloop:
# examine full words
- check_one_word 0 4
- check_one_word 1 4
- check_one_word 2 4
- check_one_word 3 4
+ check_one_word 0 3
+ check_one_word 1 3
+ check_one_word 2 3
# backwards branch to .Lloop contained above
.Lmismatch:
@@ -78,42 +77,41 @@ strcmp:
srl a0, a2, 8*SZREG-16
srl a1, a3, 8*SZREG-16
- sub v0, a0, a1
- and v1, v0, 0xff
- bnez v1, 1f
+ sub a0, a0, a1
+ and a4, a0, 0xff
+ bnez a4, 1f
ret
.Lmismatch_upper:
srl a0, a0, 8*SZREG-16
srl a1, a1, 8*SZREG-16
- sub v0, a0, a1
- and v1, v0, 0xff
- bnez v1, 1f
+ sub a2, a0, a1
+ and a3, a2, 0xff
+ bnez a3, 1f
ret
1:and a0, a0, 0xff
and a1, a1, 0xff
- sub v0, a0, a1
+ sub a0, a0, a1
ret
.Lmisaligned:
# misaligned
- lbu v0, 0(a0)
- lbu v1, 0(a1)
+ lbu a2, 0(a0)
+ lbu a3, 0(a1)
add a0, a0, 1
add a1, a1, 1
- bne v0, v1, 1f
- bnez v0, .Lmisaligned
+ bne a2, a3, 1f
+ bnez a2, .Lmisaligned
1:
- sub v0, v0, v1
+ sub a0, a2, a3
ret
# cases in which a null byte was detected
- foundnull 0, 4
- foundnull 1, 4
- foundnull 2, 4
- foundnull 3, 4
+ foundnull 0, 3
+ foundnull 1, 3
+ foundnull 2, 3
#if SZREG == 8
.section .srodata.cst8,"aM",@progbits,8