aboutsummaryrefslogtreecommitdiff
path: root/llvm/test/CodeGen/RISCV/double-convert.ll
diff options
context:
space:
mode:
authorVitaly Buka <vitalybuka@google.com>2024-01-08 11:48:45 -0800
committerVitaly Buka <vitalybuka@google.com>2024-01-08 11:48:45 -0800
commitb2845d6945cf560e26f98128d06b22e85953612d (patch)
tree9a868b16f59dd198c831a1e7f3921efd7dc7d745 /llvm/test/CodeGen/RISCV/double-convert.ll
parentc7e4065aad78f77d61be1d1ac674546cc62208d1 (diff)
parente7655ad605d77e206ec94b2cef59c41a508edba7 (diff)
downloadllvm-b2845d6945cf560e26f98128d06b22e85953612d.zip
llvm-b2845d6945cf560e26f98128d06b22e85953612d.tar.gz
llvm-b2845d6945cf560e26f98128d06b22e85953612d.tar.bz2
[𝘀𝗽𝗿] changes introduced through rebaseusers/vitalybuka/spr/main.msan-unwind-stack-before-fatal-reports
Created using spr 1.3.4 [skip ci]
Diffstat (limited to 'llvm/test/CodeGen/RISCV/double-convert.ll')
-rw-r--r--llvm/test/CodeGen/RISCV/double-convert.ll264
1 files changed, 132 insertions, 132 deletions
diff --git a/llvm/test/CodeGen/RISCV/double-convert.ll b/llvm/test/CodeGen/RISCV/double-convert.ll
index 39ac963..eb8ffe7 100644
--- a/llvm/test/CodeGen/RISCV/double-convert.ll
+++ b/llvm/test/CodeGen/RISCV/double-convert.ll
@@ -38,7 +38,7 @@ define float @fcvt_s_d(double %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __truncdfsf2@plt
+; RV32I-NEXT: call __truncdfsf2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -47,7 +47,7 @@ define float @fcvt_s_d(double %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __truncdfsf2@plt
+; RV64I-NEXT: call __truncdfsf2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -81,7 +81,7 @@ define double @fcvt_d_s(float %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __extendsfdf2@plt
+; RV32I-NEXT: call __extendsfdf2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -90,7 +90,7 @@ define double @fcvt_d_s(float %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __extendsfdf2@plt
+; RV64I-NEXT: call __extendsfdf2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -124,7 +124,7 @@ define i32 @fcvt_w_d(double %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __fixdfsi@plt
+; RV32I-NEXT: call __fixdfsi
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -133,7 +133,7 @@ define i32 @fcvt_w_d(double %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __fixdfsi@plt
+; RV64I-NEXT: call __fixdfsi
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -189,17 +189,17 @@ define i32 @fcvt_w_d_sat(double %a) nounwind {
; RV32I-NEXT: lui a3, 269824
; RV32I-NEXT: addi a3, a3, -1
; RV32I-NEXT: lui a2, 1047552
-; RV32I-NEXT: call __gtdf2@plt
+; RV32I-NEXT: call __gtdf2
; RV32I-NEXT: mv s2, a0
; RV32I-NEXT: lui a3, 794112
; RV32I-NEXT: mv a0, s1
; RV32I-NEXT: mv a1, s0
; RV32I-NEXT: li a2, 0
-; RV32I-NEXT: call __gedf2@plt
+; RV32I-NEXT: call __gedf2
; RV32I-NEXT: mv s4, a0
; RV32I-NEXT: mv a0, s1
; RV32I-NEXT: mv a1, s0
-; RV32I-NEXT: call __fixdfsi@plt
+; RV32I-NEXT: call __fixdfsi
; RV32I-NEXT: mv s3, a0
; RV32I-NEXT: lui a0, 524288
; RV32I-NEXT: bgez s4, .LBB3_2
@@ -214,7 +214,7 @@ define i32 @fcvt_w_d_sat(double %a) nounwind {
; RV32I-NEXT: mv a1, s0
; RV32I-NEXT: mv a2, s1
; RV32I-NEXT: mv a3, s0
-; RV32I-NEXT: call __unorddf2@plt
+; RV32I-NEXT: call __unorddf2
; RV32I-NEXT: snez a0, a0
; RV32I-NEXT: addi a0, a0, -1
; RV32I-NEXT: and a0, a0, s3
@@ -238,10 +238,10 @@ define i32 @fcvt_w_d_sat(double %a) nounwind {
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: li a1, -497
; RV64I-NEXT: slli a1, a1, 53
-; RV64I-NEXT: call __gedf2@plt
+; RV64I-NEXT: call __gedf2
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __fixdfdi@plt
+; RV64I-NEXT: call __fixdfdi
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: lui s3, 524288
; RV64I-NEXT: bgez s2, .LBB3_2
@@ -253,14 +253,14 @@ define i32 @fcvt_w_d_sat(double %a) nounwind {
; RV64I-NEXT: addi a0, a0, -1
; RV64I-NEXT: slli a1, a0, 22
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __gtdf2@plt
+; RV64I-NEXT: call __gtdf2
; RV64I-NEXT: blez a0, .LBB3_4
; RV64I-NEXT: # %bb.3: # %start
; RV64I-NEXT: addiw s1, s3, -1
; RV64I-NEXT: .LBB3_4: # %start
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: mv a1, s0
-; RV64I-NEXT: call __unorddf2@plt
+; RV64I-NEXT: call __unorddf2
; RV64I-NEXT: snez a0, a0
; RV64I-NEXT: addi a0, a0, -1
; RV64I-NEXT: and a0, a0, s1
@@ -305,7 +305,7 @@ define i32 @fcvt_wu_d(double %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __fixunsdfsi@plt
+; RV32I-NEXT: call __fixunsdfsi
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -314,7 +314,7 @@ define i32 @fcvt_wu_d(double %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __fixunsdfsi@plt
+; RV64I-NEXT: call __fixunsdfsi
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -356,7 +356,7 @@ define i32 @fcvt_wu_d_multiple_use(double %x, ptr %y) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __fixunsdfsi@plt
+; RV32I-NEXT: call __fixunsdfsi
; RV32I-NEXT: seqz a1, a0
; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -367,7 +367,7 @@ define i32 @fcvt_wu_d_multiple_use(double %x, ptr %y) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __fixunsdfsi@plt
+; RV64I-NEXT: call __fixunsdfsi
; RV64I-NEXT: seqz a1, a0
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -439,19 +439,19 @@ define i32 @fcvt_wu_d_sat(double %a) nounwind {
; RV32I-NEXT: lui a3, 270080
; RV32I-NEXT: addi a3, a3, -1
; RV32I-NEXT: lui a2, 1048064
-; RV32I-NEXT: call __gtdf2@plt
+; RV32I-NEXT: call __gtdf2
; RV32I-NEXT: sgtz a0, a0
; RV32I-NEXT: neg s2, a0
; RV32I-NEXT: mv a0, s1
; RV32I-NEXT: mv a1, s0
; RV32I-NEXT: li a2, 0
; RV32I-NEXT: li a3, 0
-; RV32I-NEXT: call __gedf2@plt
+; RV32I-NEXT: call __gedf2
; RV32I-NEXT: slti a0, a0, 0
; RV32I-NEXT: addi s3, a0, -1
; RV32I-NEXT: mv a0, s1
; RV32I-NEXT: mv a1, s0
-; RV32I-NEXT: call __fixunsdfsi@plt
+; RV32I-NEXT: call __fixunsdfsi
; RV32I-NEXT: and a0, s3, a0
; RV32I-NEXT: or a0, s2, a0
; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
@@ -471,17 +471,17 @@ define i32 @fcvt_wu_d_sat(double %a) nounwind {
; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __gedf2@plt
+; RV64I-NEXT: call __gedf2
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: mv a0, s2
-; RV64I-NEXT: call __fixunsdfdi@plt
+; RV64I-NEXT: call __fixunsdfdi
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: li a0, 1055
; RV64I-NEXT: slli a0, a0, 31
; RV64I-NEXT: addi a0, a0, -1
; RV64I-NEXT: slli a1, a0, 21
; RV64I-NEXT: mv a0, s2
-; RV64I-NEXT: call __gtdf2@plt
+; RV64I-NEXT: call __gtdf2
; RV64I-NEXT: blez a0, .LBB6_2
; RV64I-NEXT: # %bb.1: # %start
; RV64I-NEXT: li a0, -1
@@ -530,7 +530,7 @@ define double @fcvt_d_w(i32 %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __floatsidf@plt
+; RV32I-NEXT: call __floatsidf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -540,7 +540,7 @@ define double @fcvt_d_w(i32 %a) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: sext.w a0, a0
-; RV64I-NEXT: call __floatsidf@plt
+; RV64I-NEXT: call __floatsidf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -578,7 +578,7 @@ define double @fcvt_d_w_load(ptr %p) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: lw a0, 0(a0)
-; RV32I-NEXT: call __floatsidf@plt
+; RV32I-NEXT: call __floatsidf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -588,7 +588,7 @@ define double @fcvt_d_w_load(ptr %p) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: lw a0, 0(a0)
-; RV64I-NEXT: call __floatsidf@plt
+; RV64I-NEXT: call __floatsidf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -623,7 +623,7 @@ define double @fcvt_d_wu(i32 %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __floatunsidf@plt
+; RV32I-NEXT: call __floatunsidf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -633,7 +633,7 @@ define double @fcvt_d_wu(i32 %a) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: sext.w a0, a0
-; RV64I-NEXT: call __floatunsidf@plt
+; RV64I-NEXT: call __floatunsidf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -677,7 +677,7 @@ define double @fcvt_d_wu_load(ptr %p) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: lw a0, 0(a0)
-; RV32I-NEXT: call __floatunsidf@plt
+; RV32I-NEXT: call __floatunsidf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -687,7 +687,7 @@ define double @fcvt_d_wu_load(ptr %p) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: lw a0, 0(a0)
-; RV64I-NEXT: call __floatunsidf@plt
+; RV64I-NEXT: call __floatunsidf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -701,7 +701,7 @@ define i64 @fcvt_l_d(double %a) nounwind {
; RV32IFD: # %bb.0:
; RV32IFD-NEXT: addi sp, sp, -16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: call __fixdfdi@plt
+; RV32IFD-NEXT: call __fixdfdi
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
@@ -715,7 +715,7 @@ define i64 @fcvt_l_d(double %a) nounwind {
; RV32IZFINXZDINX: # %bb.0:
; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: call __fixdfdi@plt
+; RV32IZFINXZDINX-NEXT: call __fixdfdi
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
@@ -729,7 +729,7 @@ define i64 @fcvt_l_d(double %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __fixdfdi@plt
+; RV32I-NEXT: call __fixdfdi
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -738,7 +738,7 @@ define i64 @fcvt_l_d(double %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __fixdfdi@plt
+; RV64I-NEXT: call __fixdfdi
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -757,7 +757,7 @@ define i64 @fcvt_l_d_sat(double %a) nounwind {
; RV32IFD-NEXT: fld fa5, %lo(.LCPI12_0)(a0)
; RV32IFD-NEXT: fmv.d fs0, fa0
; RV32IFD-NEXT: fle.d s0, fa5, fa0
-; RV32IFD-NEXT: call __fixdfdi@plt
+; RV32IFD-NEXT: call __fixdfdi
; RV32IFD-NEXT: lui a4, 524288
; RV32IFD-NEXT: lui a2, 524288
; RV32IFD-NEXT: beqz s0, .LBB12_2
@@ -804,7 +804,7 @@ define i64 @fcvt_l_d_sat(double %a) nounwind {
; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: lw s0, 8(sp)
; RV32IZFINXZDINX-NEXT: lw s1, 12(sp)
-; RV32IZFINXZDINX-NEXT: call __fixdfdi@plt
+; RV32IZFINXZDINX-NEXT: call __fixdfdi
; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI12_0)
; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI12_0+4)(a2)
; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI12_0)(a2)
@@ -861,17 +861,17 @@ define i64 @fcvt_l_d_sat(double %a) nounwind {
; RV32I-NEXT: lui a3, 278016
; RV32I-NEXT: addi a3, a3, -1
; RV32I-NEXT: li a2, -1
-; RV32I-NEXT: call __gtdf2@plt
+; RV32I-NEXT: call __gtdf2
; RV32I-NEXT: mv s2, a0
; RV32I-NEXT: lui a3, 802304
; RV32I-NEXT: mv a0, s1
; RV32I-NEXT: mv a1, s0
; RV32I-NEXT: li a2, 0
-; RV32I-NEXT: call __gedf2@plt
+; RV32I-NEXT: call __gedf2
; RV32I-NEXT: mv s3, a0
; RV32I-NEXT: mv a0, s1
; RV32I-NEXT: mv a1, s0
-; RV32I-NEXT: call __fixdfdi@plt
+; RV32I-NEXT: call __fixdfdi
; RV32I-NEXT: mv s4, a0
; RV32I-NEXT: mv s5, a1
; RV32I-NEXT: lui a0, 524288
@@ -887,7 +887,7 @@ define i64 @fcvt_l_d_sat(double %a) nounwind {
; RV32I-NEXT: mv a1, s0
; RV32I-NEXT: mv a2, s1
; RV32I-NEXT: mv a3, s0
-; RV32I-NEXT: call __unorddf2@plt
+; RV32I-NEXT: call __unorddf2
; RV32I-NEXT: snez a0, a0
; RV32I-NEXT: addi a0, a0, -1
; RV32I-NEXT: and a1, a0, s5
@@ -919,10 +919,10 @@ define i64 @fcvt_l_d_sat(double %a) nounwind {
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: li a1, -481
; RV64I-NEXT: slli a1, a1, 53
-; RV64I-NEXT: call __gedf2@plt
+; RV64I-NEXT: call __gedf2
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __fixdfdi@plt
+; RV64I-NEXT: call __fixdfdi
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: li s3, -1
; RV64I-NEXT: bgez s2, .LBB12_2
@@ -933,14 +933,14 @@ define i64 @fcvt_l_d_sat(double %a) nounwind {
; RV64I-NEXT: slli a0, a0, 53
; RV64I-NEXT: addi a1, a0, -1
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __gtdf2@plt
+; RV64I-NEXT: call __gtdf2
; RV64I-NEXT: blez a0, .LBB12_4
; RV64I-NEXT: # %bb.3: # %start
; RV64I-NEXT: srli s1, s3, 1
; RV64I-NEXT: .LBB12_4: # %start
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: mv a1, s0
-; RV64I-NEXT: call __unorddf2@plt
+; RV64I-NEXT: call __unorddf2
; RV64I-NEXT: snez a0, a0
; RV64I-NEXT: addi a0, a0, -1
; RV64I-NEXT: and a0, a0, s1
@@ -962,7 +962,7 @@ define i64 @fcvt_lu_d(double %a) nounwind {
; RV32IFD: # %bb.0:
; RV32IFD-NEXT: addi sp, sp, -16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: call __fixunsdfdi@plt
+; RV32IFD-NEXT: call __fixunsdfdi
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
@@ -976,7 +976,7 @@ define i64 @fcvt_lu_d(double %a) nounwind {
; RV32IZFINXZDINX: # %bb.0:
; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: call __fixunsdfdi@plt
+; RV32IZFINXZDINX-NEXT: call __fixunsdfdi
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
@@ -990,7 +990,7 @@ define i64 @fcvt_lu_d(double %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __fixunsdfdi@plt
+; RV32I-NEXT: call __fixunsdfdi
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -999,7 +999,7 @@ define i64 @fcvt_lu_d(double %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __fixunsdfdi@plt
+; RV64I-NEXT: call __fixunsdfdi
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1018,7 +1018,7 @@ define i64 @fcvt_lu_d_sat(double %a) nounwind {
; RV32IFD-NEXT: fcvt.d.w fa5, zero
; RV32IFD-NEXT: fle.d a0, fa5, fa0
; RV32IFD-NEXT: neg s0, a0
-; RV32IFD-NEXT: call __fixunsdfdi@plt
+; RV32IFD-NEXT: call __fixunsdfdi
; RV32IFD-NEXT: lui a2, %hi(.LCPI14_0)
; RV32IFD-NEXT: fld fa5, %lo(.LCPI14_0)(a2)
; RV32IFD-NEXT: and a0, s0, a0
@@ -1052,7 +1052,7 @@ define i64 @fcvt_lu_d_sat(double %a) nounwind {
; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: lw s0, 8(sp)
; RV32IZFINXZDINX-NEXT: lw s1, 12(sp)
-; RV32IZFINXZDINX-NEXT: call __fixunsdfdi@plt
+; RV32IZFINXZDINX-NEXT: call __fixunsdfdi
; RV32IZFINXZDINX-NEXT: fcvt.d.w a2, zero
; RV32IZFINXZDINX-NEXT: lui a4, %hi(.LCPI14_0)
; RV32IZFINXZDINX-NEXT: lw a5, %lo(.LCPI14_0+4)(a4)
@@ -1093,19 +1093,19 @@ define i64 @fcvt_lu_d_sat(double %a) nounwind {
; RV32I-NEXT: lui a3, 278272
; RV32I-NEXT: addi a3, a3, -1
; RV32I-NEXT: li a2, -1
-; RV32I-NEXT: call __gtdf2@plt
+; RV32I-NEXT: call __gtdf2
; RV32I-NEXT: sgtz a0, a0
; RV32I-NEXT: neg s2, a0
; RV32I-NEXT: mv a0, s1
; RV32I-NEXT: mv a1, s0
; RV32I-NEXT: li a2, 0
; RV32I-NEXT: li a3, 0
-; RV32I-NEXT: call __gedf2@plt
+; RV32I-NEXT: call __gedf2
; RV32I-NEXT: slti a0, a0, 0
; RV32I-NEXT: addi s3, a0, -1
; RV32I-NEXT: mv a0, s1
; RV32I-NEXT: mv a1, s0
-; RV32I-NEXT: call __fixunsdfdi@plt
+; RV32I-NEXT: call __fixunsdfdi
; RV32I-NEXT: and a0, s3, a0
; RV32I-NEXT: or a0, s2, a0
; RV32I-NEXT: and a1, s3, a1
@@ -1126,17 +1126,17 @@ define i64 @fcvt_lu_d_sat(double %a) nounwind {
; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __gedf2@plt
+; RV64I-NEXT: call __gedf2
; RV64I-NEXT: slti a0, a0, 0
; RV64I-NEXT: addi s1, a0, -1
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __fixunsdfdi@plt
+; RV64I-NEXT: call __fixunsdfdi
; RV64I-NEXT: and s1, s1, a0
; RV64I-NEXT: li a0, 1087
; RV64I-NEXT: slli a0, a0, 52
; RV64I-NEXT: addi a1, a0, -1
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __gtdf2@plt
+; RV64I-NEXT: call __gtdf2
; RV64I-NEXT: sgtz a0, a0
; RV64I-NEXT: neg a0, a0
; RV64I-NEXT: or a0, a0, s1
@@ -1196,7 +1196,7 @@ define i64 @fmv_x_d(double %a, double %b) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __adddf3@plt
+; RV32I-NEXT: call __adddf3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1205,7 +1205,7 @@ define i64 @fmv_x_d(double %a, double %b) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __adddf3@plt
+; RV64I-NEXT: call __adddf3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1219,7 +1219,7 @@ define double @fcvt_d_l(i64 %a) nounwind {
; RV32IFD: # %bb.0:
; RV32IFD-NEXT: addi sp, sp, -16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: call __floatdidf@plt
+; RV32IFD-NEXT: call __floatdidf
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
@@ -1233,7 +1233,7 @@ define double @fcvt_d_l(i64 %a) nounwind {
; RV32IZFINXZDINX: # %bb.0:
; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: call __floatdidf@plt
+; RV32IZFINXZDINX-NEXT: call __floatdidf
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
@@ -1247,7 +1247,7 @@ define double @fcvt_d_l(i64 %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __floatdidf@plt
+; RV32I-NEXT: call __floatdidf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1256,7 +1256,7 @@ define double @fcvt_d_l(i64 %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __floatdidf@plt
+; RV64I-NEXT: call __floatdidf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1269,7 +1269,7 @@ define double @fcvt_d_lu(i64 %a) nounwind {
; RV32IFD: # %bb.0:
; RV32IFD-NEXT: addi sp, sp, -16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: call __floatundidf@plt
+; RV32IFD-NEXT: call __floatundidf
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
@@ -1283,7 +1283,7 @@ define double @fcvt_d_lu(i64 %a) nounwind {
; RV32IZFINXZDINX: # %bb.0:
; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: call __floatundidf@plt
+; RV32IZFINXZDINX-NEXT: call __floatundidf
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
@@ -1297,7 +1297,7 @@ define double @fcvt_d_lu(i64 %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __floatundidf@plt
+; RV32I-NEXT: call __floatundidf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1306,7 +1306,7 @@ define double @fcvt_d_lu(i64 %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __floatundidf@plt
+; RV64I-NEXT: call __floatundidf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1364,7 +1364,7 @@ define double @fmv_d_x(i64 %a, i64 %b) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __adddf3@plt
+; RV32I-NEXT: call __adddf3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1373,7 +1373,7 @@ define double @fmv_d_x(i64 %a, i64 %b) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __adddf3@plt
+; RV64I-NEXT: call __adddf3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1409,7 +1409,7 @@ define double @fcvt_d_w_i8(i8 signext %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __floatsidf@plt
+; RV32I-NEXT: call __floatsidf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1418,7 +1418,7 @@ define double @fcvt_d_w_i8(i8 signext %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __floatsidf@plt
+; RV64I-NEXT: call __floatsidf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1452,7 +1452,7 @@ define double @fcvt_d_wu_i8(i8 zeroext %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __floatunsidf@plt
+; RV32I-NEXT: call __floatunsidf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1461,7 +1461,7 @@ define double @fcvt_d_wu_i8(i8 zeroext %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __floatunsidf@plt
+; RV64I-NEXT: call __floatunsidf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1495,7 +1495,7 @@ define double @fcvt_d_w_i16(i16 signext %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __floatsidf@plt
+; RV32I-NEXT: call __floatsidf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1504,7 +1504,7 @@ define double @fcvt_d_w_i16(i16 signext %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __floatsidf@plt
+; RV64I-NEXT: call __floatsidf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1538,7 +1538,7 @@ define double @fcvt_d_wu_i16(i16 zeroext %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __floatunsidf@plt
+; RV32I-NEXT: call __floatunsidf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1547,7 +1547,7 @@ define double @fcvt_d_wu_i16(i16 zeroext %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __floatunsidf@plt
+; RV64I-NEXT: call __floatunsidf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1597,7 +1597,7 @@ define signext i32 @fcvt_d_w_demanded_bits(i32 signext %0, ptr %1) nounwind {
; RV32I-NEXT: mv s0, a1
; RV32I-NEXT: addi s1, a0, 1
; RV32I-NEXT: mv a0, s1
-; RV32I-NEXT: call __floatsidf@plt
+; RV32I-NEXT: call __floatsidf
; RV32I-NEXT: sw a1, 4(s0)
; RV32I-NEXT: sw a0, 0(s0)
; RV32I-NEXT: mv a0, s1
@@ -1616,7 +1616,7 @@ define signext i32 @fcvt_d_w_demanded_bits(i32 signext %0, ptr %1) nounwind {
; RV64I-NEXT: mv s0, a1
; RV64I-NEXT: addiw s1, a0, 1
; RV64I-NEXT: mv a0, s1
-; RV64I-NEXT: call __floatsidf@plt
+; RV64I-NEXT: call __floatsidf
; RV64I-NEXT: sd a0, 0(s0)
; RV64I-NEXT: mv a0, s1
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
@@ -1670,7 +1670,7 @@ define signext i32 @fcvt_d_wu_demanded_bits(i32 signext %0, ptr %1) nounwind {
; RV32I-NEXT: mv s0, a1
; RV32I-NEXT: addi s1, a0, 1
; RV32I-NEXT: mv a0, s1
-; RV32I-NEXT: call __floatunsidf@plt
+; RV32I-NEXT: call __floatunsidf
; RV32I-NEXT: sw a1, 4(s0)
; RV32I-NEXT: sw a0, 0(s0)
; RV32I-NEXT: mv a0, s1
@@ -1689,7 +1689,7 @@ define signext i32 @fcvt_d_wu_demanded_bits(i32 signext %0, ptr %1) nounwind {
; RV64I-NEXT: mv s0, a1
; RV64I-NEXT: addiw s1, a0, 1
; RV64I-NEXT: mv a0, s1
-; RV64I-NEXT: call __floatunsidf@plt
+; RV64I-NEXT: call __floatunsidf
; RV64I-NEXT: sd a0, 0(s0)
; RV64I-NEXT: mv a0, s1
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
@@ -1734,7 +1734,7 @@ define signext i16 @fcvt_w_s_i16(double %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __fixdfsi@plt
+; RV32I-NEXT: call __fixdfsi
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1743,7 +1743,7 @@ define signext i16 @fcvt_w_s_i16(double %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __fixdfdi@plt
+; RV64I-NEXT: call __fixdfdi
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1831,17 +1831,17 @@ define signext i16 @fcvt_w_s_sat_i16(double %a) nounwind {
; RV32I-NEXT: addi a3, a0, -64
; RV32I-NEXT: mv a0, s1
; RV32I-NEXT: li a2, 0
-; RV32I-NEXT: call __gtdf2@plt
+; RV32I-NEXT: call __gtdf2
; RV32I-NEXT: mv s2, a0
; RV32I-NEXT: lui a3, 790016
; RV32I-NEXT: mv a0, s1
; RV32I-NEXT: mv a1, s0
; RV32I-NEXT: li a2, 0
-; RV32I-NEXT: call __gedf2@plt
+; RV32I-NEXT: call __gedf2
; RV32I-NEXT: mv s4, a0
; RV32I-NEXT: mv a0, s1
; RV32I-NEXT: mv a1, s0
-; RV32I-NEXT: call __fixdfsi@plt
+; RV32I-NEXT: call __fixdfsi
; RV32I-NEXT: mv s3, a0
; RV32I-NEXT: bgez s4, .LBB26_2
; RV32I-NEXT: # %bb.1: # %start
@@ -1856,7 +1856,7 @@ define signext i16 @fcvt_w_s_sat_i16(double %a) nounwind {
; RV32I-NEXT: mv a1, s0
; RV32I-NEXT: mv a2, s1
; RV32I-NEXT: mv a3, s0
-; RV32I-NEXT: call __unorddf2@plt
+; RV32I-NEXT: call __unorddf2
; RV32I-NEXT: snez a0, a0
; RV32I-NEXT: addi a0, a0, -1
; RV32I-NEXT: and a0, a0, s3
@@ -1881,10 +1881,10 @@ define signext i16 @fcvt_w_s_sat_i16(double %a) nounwind {
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: li a1, -505
; RV64I-NEXT: slli a1, a1, 53
-; RV64I-NEXT: call __gedf2@plt
+; RV64I-NEXT: call __gedf2
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __fixdfdi@plt
+; RV64I-NEXT: call __fixdfdi
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: bgez s2, .LBB26_2
; RV64I-NEXT: # %bb.1: # %start
@@ -1894,7 +1894,7 @@ define signext i16 @fcvt_w_s_sat_i16(double %a) nounwind {
; RV64I-NEXT: addi a0, a0, -1
; RV64I-NEXT: slli a1, a0, 38
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __gtdf2@plt
+; RV64I-NEXT: call __gtdf2
; RV64I-NEXT: blez a0, .LBB26_4
; RV64I-NEXT: # %bb.3: # %start
; RV64I-NEXT: lui s1, 8
@@ -1902,7 +1902,7 @@ define signext i16 @fcvt_w_s_sat_i16(double %a) nounwind {
; RV64I-NEXT: .LBB26_4: # %start
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: mv a1, s0
-; RV64I-NEXT: call __unorddf2@plt
+; RV64I-NEXT: call __unorddf2
; RV64I-NEXT: snez a0, a0
; RV64I-NEXT: addi a0, a0, -1
; RV64I-NEXT: and a0, a0, s1
@@ -1951,7 +1951,7 @@ define zeroext i16 @fcvt_wu_s_i16(double %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __fixunsdfsi@plt
+; RV32I-NEXT: call __fixunsdfsi
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1960,7 +1960,7 @@ define zeroext i16 @fcvt_wu_s_i16(double %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __fixunsdfdi@plt
+; RV64I-NEXT: call __fixunsdfdi
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -2028,17 +2028,17 @@ define zeroext i16 @fcvt_wu_s_sat_i16(double %a) nounwind {
; RV32I-NEXT: lui a3, 265984
; RV32I-NEXT: addi a3, a3, -32
; RV32I-NEXT: li a2, 0
-; RV32I-NEXT: call __gtdf2@plt
+; RV32I-NEXT: call __gtdf2
; RV32I-NEXT: mv s3, a0
; RV32I-NEXT: mv a0, s2
; RV32I-NEXT: mv a1, s1
; RV32I-NEXT: li a2, 0
; RV32I-NEXT: li a3, 0
-; RV32I-NEXT: call __gedf2@plt
+; RV32I-NEXT: call __gedf2
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: mv a0, s2
; RV32I-NEXT: mv a1, s1
-; RV32I-NEXT: call __fixunsdfsi@plt
+; RV32I-NEXT: call __fixunsdfsi
; RV32I-NEXT: lui a1, 16
; RV32I-NEXT: addi a1, a1, -1
; RV32I-NEXT: blez s3, .LBB28_2
@@ -2068,16 +2068,16 @@ define zeroext i16 @fcvt_wu_s_sat_i16(double %a) nounwind {
; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __gedf2@plt
+; RV64I-NEXT: call __gedf2
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: mv a0, s2
-; RV64I-NEXT: call __fixunsdfdi@plt
+; RV64I-NEXT: call __fixunsdfdi
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: lui a0, 8312
; RV64I-NEXT: addi a0, a0, -1
; RV64I-NEXT: slli a1, a0, 37
; RV64I-NEXT: mv a0, s2
-; RV64I-NEXT: call __gtdf2@plt
+; RV64I-NEXT: call __gtdf2
; RV64I-NEXT: lui a1, 16
; RV64I-NEXT: addiw a1, a1, -1
; RV64I-NEXT: blez a0, .LBB28_2
@@ -2133,7 +2133,7 @@ define signext i8 @fcvt_w_s_i8(double %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __fixdfsi@plt
+; RV32I-NEXT: call __fixdfsi
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -2142,7 +2142,7 @@ define signext i8 @fcvt_w_s_i8(double %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __fixdfdi@plt
+; RV64I-NEXT: call __fixdfdi
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -2228,17 +2228,17 @@ define signext i8 @fcvt_w_s_sat_i8(double %a) nounwind {
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: lui a3, 263676
; RV32I-NEXT: li a2, 0
-; RV32I-NEXT: call __gtdf2@plt
+; RV32I-NEXT: call __gtdf2
; RV32I-NEXT: mv s2, a0
; RV32I-NEXT: lui a3, 787968
; RV32I-NEXT: mv a0, s1
; RV32I-NEXT: mv a1, s0
; RV32I-NEXT: li a2, 0
-; RV32I-NEXT: call __gedf2@plt
+; RV32I-NEXT: call __gedf2
; RV32I-NEXT: mv s4, a0
; RV32I-NEXT: mv a0, s1
; RV32I-NEXT: mv a1, s0
-; RV32I-NEXT: call __fixdfsi@plt
+; RV32I-NEXT: call __fixdfsi
; RV32I-NEXT: mv s3, a0
; RV32I-NEXT: bgez s4, .LBB30_2
; RV32I-NEXT: # %bb.1: # %start
@@ -2252,7 +2252,7 @@ define signext i8 @fcvt_w_s_sat_i8(double %a) nounwind {
; RV32I-NEXT: mv a1, s0
; RV32I-NEXT: mv a2, s1
; RV32I-NEXT: mv a3, s0
-; RV32I-NEXT: call __unorddf2@plt
+; RV32I-NEXT: call __unorddf2
; RV32I-NEXT: snez a0, a0
; RV32I-NEXT: addi a0, a0, -1
; RV32I-NEXT: and a0, a0, s3
@@ -2277,10 +2277,10 @@ define signext i8 @fcvt_w_s_sat_i8(double %a) nounwind {
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: li a1, -509
; RV64I-NEXT: slli a1, a1, 53
-; RV64I-NEXT: call __gedf2@plt
+; RV64I-NEXT: call __gedf2
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __fixdfdi@plt
+; RV64I-NEXT: call __fixdfdi
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: bgez s2, .LBB30_2
; RV64I-NEXT: # %bb.1: # %start
@@ -2289,14 +2289,14 @@ define signext i8 @fcvt_w_s_sat_i8(double %a) nounwind {
; RV64I-NEXT: lui a1, 65919
; RV64I-NEXT: slli a1, a1, 34
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __gtdf2@plt
+; RV64I-NEXT: call __gtdf2
; RV64I-NEXT: blez a0, .LBB30_4
; RV64I-NEXT: # %bb.3: # %start
; RV64I-NEXT: li s1, 127
; RV64I-NEXT: .LBB30_4: # %start
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: mv a1, s0
-; RV64I-NEXT: call __unorddf2@plt
+; RV64I-NEXT: call __unorddf2
; RV64I-NEXT: snez a0, a0
; RV64I-NEXT: addi a0, a0, -1
; RV64I-NEXT: and a0, a0, s1
@@ -2347,7 +2347,7 @@ define zeroext i8 @fcvt_wu_s_i8(double %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __fixunsdfsi@plt
+; RV32I-NEXT: call __fixunsdfsi
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -2356,7 +2356,7 @@ define zeroext i8 @fcvt_wu_s_i8(double %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __fixunsdfdi@plt
+; RV64I-NEXT: call __fixunsdfdi
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -2425,17 +2425,17 @@ define zeroext i8 @fcvt_wu_s_sat_i8(double %a) nounwind {
; RV32I-NEXT: mv s2, a0
; RV32I-NEXT: lui a3, 263934
; RV32I-NEXT: li a2, 0
-; RV32I-NEXT: call __gtdf2@plt
+; RV32I-NEXT: call __gtdf2
; RV32I-NEXT: mv s3, a0
; RV32I-NEXT: mv a0, s2
; RV32I-NEXT: mv a1, s1
; RV32I-NEXT: li a2, 0
; RV32I-NEXT: li a3, 0
-; RV32I-NEXT: call __gedf2@plt
+; RV32I-NEXT: call __gedf2
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: mv a0, s2
; RV32I-NEXT: mv a1, s1
-; RV32I-NEXT: call __fixunsdfsi@plt
+; RV32I-NEXT: call __fixunsdfsi
; RV32I-NEXT: blez s3, .LBB32_2
; RV32I-NEXT: # %bb.1: # %start
; RV32I-NEXT: li a0, 255
@@ -2463,15 +2463,15 @@ define zeroext i8 @fcvt_wu_s_sat_i8(double %a) nounwind {
; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __gedf2@plt
+; RV64I-NEXT: call __gedf2
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: mv a0, s2
-; RV64I-NEXT: call __fixunsdfdi@plt
+; RV64I-NEXT: call __fixunsdfdi
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: lui a1, 131967
; RV64I-NEXT: slli a1, a1, 33
; RV64I-NEXT: mv a0, s2
-; RV64I-NEXT: call __gtdf2@plt
+; RV64I-NEXT: call __gtdf2
; RV64I-NEXT: blez a0, .LBB32_2
; RV64I-NEXT: # %bb.1: # %start
; RV64I-NEXT: li a0, 255
@@ -2554,19 +2554,19 @@ define zeroext i32 @fcvt_wu_d_sat_zext(double %a) nounwind {
; RV32I-NEXT: lui a3, 270080
; RV32I-NEXT: addi a3, a3, -1
; RV32I-NEXT: lui a2, 1048064
-; RV32I-NEXT: call __gtdf2@plt
+; RV32I-NEXT: call __gtdf2
; RV32I-NEXT: sgtz a0, a0
; RV32I-NEXT: neg s2, a0
; RV32I-NEXT: mv a0, s1
; RV32I-NEXT: mv a1, s0
; RV32I-NEXT: li a2, 0
; RV32I-NEXT: li a3, 0
-; RV32I-NEXT: call __gedf2@plt
+; RV32I-NEXT: call __gedf2
; RV32I-NEXT: slti a0, a0, 0
; RV32I-NEXT: addi s3, a0, -1
; RV32I-NEXT: mv a0, s1
; RV32I-NEXT: mv a1, s0
-; RV32I-NEXT: call __fixunsdfsi@plt
+; RV32I-NEXT: call __fixunsdfsi
; RV32I-NEXT: and a0, s3, a0
; RV32I-NEXT: or a0, s2, a0
; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
@@ -2586,17 +2586,17 @@ define zeroext i32 @fcvt_wu_d_sat_zext(double %a) nounwind {
; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __gedf2@plt
+; RV64I-NEXT: call __gedf2
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: mv a0, s2
-; RV64I-NEXT: call __fixunsdfdi@plt
+; RV64I-NEXT: call __fixunsdfdi
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: li a0, 1055
; RV64I-NEXT: slli a0, a0, 31
; RV64I-NEXT: addi a0, a0, -1
; RV64I-NEXT: slli a1, a0, 21
; RV64I-NEXT: mv a0, s2
-; RV64I-NEXT: call __gtdf2@plt
+; RV64I-NEXT: call __gtdf2
; RV64I-NEXT: blez a0, .LBB33_2
; RV64I-NEXT: # %bb.1: # %start
; RV64I-NEXT: li a0, -1
@@ -2668,17 +2668,17 @@ define signext i32 @fcvt_w_d_sat_sext(double %a) nounwind {
; RV32I-NEXT: lui a3, 269824
; RV32I-NEXT: addi a3, a3, -1
; RV32I-NEXT: lui a2, 1047552
-; RV32I-NEXT: call __gtdf2@plt
+; RV32I-NEXT: call __gtdf2
; RV32I-NEXT: mv s2, a0
; RV32I-NEXT: lui a3, 794112
; RV32I-NEXT: mv a0, s1
; RV32I-NEXT: mv a1, s0
; RV32I-NEXT: li a2, 0
-; RV32I-NEXT: call __gedf2@plt
+; RV32I-NEXT: call __gedf2
; RV32I-NEXT: mv s4, a0
; RV32I-NEXT: mv a0, s1
; RV32I-NEXT: mv a1, s0
-; RV32I-NEXT: call __fixdfsi@plt
+; RV32I-NEXT: call __fixdfsi
; RV32I-NEXT: mv s3, a0
; RV32I-NEXT: lui a0, 524288
; RV32I-NEXT: bgez s4, .LBB34_2
@@ -2693,7 +2693,7 @@ define signext i32 @fcvt_w_d_sat_sext(double %a) nounwind {
; RV32I-NEXT: mv a1, s0
; RV32I-NEXT: mv a2, s1
; RV32I-NEXT: mv a3, s0
-; RV32I-NEXT: call __unorddf2@plt
+; RV32I-NEXT: call __unorddf2
; RV32I-NEXT: snez a0, a0
; RV32I-NEXT: addi a0, a0, -1
; RV32I-NEXT: and a0, a0, s3
@@ -2717,10 +2717,10 @@ define signext i32 @fcvt_w_d_sat_sext(double %a) nounwind {
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: li a1, -497
; RV64I-NEXT: slli a1, a1, 53
-; RV64I-NEXT: call __gedf2@plt
+; RV64I-NEXT: call __gedf2
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __fixdfdi@plt
+; RV64I-NEXT: call __fixdfdi
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: lui s3, 524288
; RV64I-NEXT: bgez s2, .LBB34_2
@@ -2732,14 +2732,14 @@ define signext i32 @fcvt_w_d_sat_sext(double %a) nounwind {
; RV64I-NEXT: addi a0, a0, -1
; RV64I-NEXT: slli a1, a0, 22
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __gtdf2@plt
+; RV64I-NEXT: call __gtdf2
; RV64I-NEXT: blez a0, .LBB34_4
; RV64I-NEXT: # %bb.3: # %start
; RV64I-NEXT: addi s1, s3, -1
; RV64I-NEXT: .LBB34_4: # %start
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: mv a1, s0
-; RV64I-NEXT: call __unorddf2@plt
+; RV64I-NEXT: call __unorddf2
; RV64I-NEXT: snez a0, a0
; RV64I-NEXT: addi a0, a0, -1
; RV64I-NEXT: and a0, a0, s1