aboutsummaryrefslogtreecommitdiff
path: root/llvm/test/CodeGen/RISCV
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen/RISCV')
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/combine-reduce-add-to-vcpop.ll18
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/extractelt-i1.ll38
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-i1.ll108
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll19
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-deinterleave.ll26
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/reproducer-pr146855.ll72
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll161
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vscale-vw-web-simplification.ll156
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vselect-fp.ll14
-rw-r--r--llvm/test/CodeGen/RISCV/short-forward-branch-opt.ll32
-rw-r--r--llvm/test/CodeGen/RISCV/zdinx-spill.ll26
11 files changed, 435 insertions, 235 deletions
diff --git a/llvm/test/CodeGen/RISCV/rvv/combine-reduce-add-to-vcpop.ll b/llvm/test/CodeGen/RISCV/rvv/combine-reduce-add-to-vcpop.ll
index 0d8aff3..2d4fce6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/combine-reduce-add-to-vcpop.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/combine-reduce-add-to-vcpop.ll
@@ -313,12 +313,12 @@ define i32 @test_nxv128i1(<vscale x 128 x i1> %x) {
; CHECK-NEXT: vslidedown.vx v0, v6, a0
; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma
; CHECK-NEXT: vslidedown.vx v6, v7, a1
+; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma
+; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
+; CHECK-NEXT: vslidedown.vx v0, v7, a0
; CHECK-NEXT: vslidedown.vx v5, v6, a0
-; CHECK-NEXT: vslidedown.vx v4, v7, a0
; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu
-; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
-; CHECK-NEXT: vmv1r.v v0, v4
; CHECK-NEXT: vadd.vi v8, v8, 1, v0.t
; CHECK-NEXT: vmv1r.v v0, v5
; CHECK-NEXT: vadd.vi v16, v16, 1, v0.t
@@ -425,13 +425,15 @@ define i32 @test_nxv256i1(<vscale x 256 x i1> %x) {
; CHECK-NEXT: vmerge.vim v16, v8, 1, v0
; CHECK-NEXT: vsetvli a2, zero, e8, mf2, ta, ma
; CHECK-NEXT: vslidedown.vx v0, v5, a1
-; CHECK-NEXT: vslidedown.vx v5, v7, a1
-; CHECK-NEXT: vslidedown.vx v4, v6, a1
-; CHECK-NEXT: vsetvli a2, zero, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli a2, zero, e32, m8, ta, ma
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
-; CHECK-NEXT: vmv1r.v v0, v4
+; CHECK-NEXT: vsetvli a2, zero, e8, mf2, ta, ma
+; CHECK-NEXT: vslidedown.vx v0, v6, a1
+; CHECK-NEXT: vsetvli a2, zero, e32, m8, ta, mu
; CHECK-NEXT: vadd.vi v8, v8, 1, v0.t
-; CHECK-NEXT: vmv1r.v v0, v5
+; CHECK-NEXT: vsetvli a2, zero, e8, mf2, ta, ma
+; CHECK-NEXT: vslidedown.vx v0, v7, a1
+; CHECK-NEXT: vsetvli a2, zero, e32, m8, ta, mu
; CHECK-NEXT: vadd.vi v16, v16, 1, v0.t
; CHECK-NEXT: vadd.vv v8, v16, v8
; CHECK-NEXT: addi a2, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-i1.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-i1.ll
index 796f8dd..15417da 100644
--- a/llvm/test/CodeGen/RISCV/rvv/extractelt-i1.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-i1.ll
@@ -139,21 +139,20 @@ define i1 @extractelt_nxv128i1(ptr %x, i64 %idx) nounwind {
; RV32-NEXT: slli a3, a3, 4
; RV32-NEXT: sub sp, sp, a3
; RV32-NEXT: andi sp, sp, -64
-; RV32-NEXT: addi a3, sp, 64
; RV32-NEXT: vl8r.v v8, (a0)
; RV32-NEXT: slli a2, a2, 3
; RV32-NEXT: add a0, a0, a2
-; RV32-NEXT: vl8r.v v24, (a0)
+; RV32-NEXT: vl8r.v v16, (a0)
; RV32-NEXT: vsetvli a0, zero, e8, m8, ta, ma
; RV32-NEXT: vmseq.vi v0, v8, 0
-; RV32-NEXT: vmv.v.i v16, 0
-; RV32-NEXT: add a1, a3, a1
-; RV32-NEXT: add a2, a3, a2
-; RV32-NEXT: vmseq.vi v8, v24, 0
-; RV32-NEXT: vmerge.vim v24, v16, 1, v0
-; RV32-NEXT: vs8r.v v24, (a3)
-; RV32-NEXT: vmv1r.v v0, v8
-; RV32-NEXT: vmerge.vim v8, v16, 1, v0
+; RV32-NEXT: vmv.v.i v8, 0
+; RV32-NEXT: vmerge.vim v24, v8, 1, v0
+; RV32-NEXT: vmseq.vi v0, v16, 0
+; RV32-NEXT: addi a0, sp, 64
+; RV32-NEXT: add a1, a0, a1
+; RV32-NEXT: add a2, a0, a2
+; RV32-NEXT: vs8r.v v24, (a0)
+; RV32-NEXT: vmerge.vim v8, v8, 1, v0
; RV32-NEXT: vs8r.v v8, (a2)
; RV32-NEXT: lbu a0, 0(a1)
; RV32-NEXT: addi sp, s0, -80
@@ -179,21 +178,20 @@ define i1 @extractelt_nxv128i1(ptr %x, i64 %idx) nounwind {
; RV64-NEXT: slli a3, a3, 4
; RV64-NEXT: sub sp, sp, a3
; RV64-NEXT: andi sp, sp, -64
-; RV64-NEXT: addi a3, sp, 64
; RV64-NEXT: vl8r.v v8, (a0)
; RV64-NEXT: slli a2, a2, 3
; RV64-NEXT: add a0, a0, a2
-; RV64-NEXT: vl8r.v v24, (a0)
+; RV64-NEXT: vl8r.v v16, (a0)
; RV64-NEXT: vsetvli a0, zero, e8, m8, ta, ma
; RV64-NEXT: vmseq.vi v0, v8, 0
-; RV64-NEXT: vmv.v.i v16, 0
-; RV64-NEXT: add a1, a3, a1
-; RV64-NEXT: add a2, a3, a2
-; RV64-NEXT: vmseq.vi v8, v24, 0
-; RV64-NEXT: vmerge.vim v24, v16, 1, v0
-; RV64-NEXT: vs8r.v v24, (a3)
-; RV64-NEXT: vmv1r.v v0, v8
-; RV64-NEXT: vmerge.vim v8, v16, 1, v0
+; RV64-NEXT: vmv.v.i v8, 0
+; RV64-NEXT: vmerge.vim v24, v8, 1, v0
+; RV64-NEXT: vmseq.vi v0, v16, 0
+; RV64-NEXT: addi a0, sp, 64
+; RV64-NEXT: add a1, a0, a1
+; RV64-NEXT: add a2, a0, a2
+; RV64-NEXT: vs8r.v v24, (a0)
+; RV64-NEXT: vmerge.vim v8, v8, 1, v0
; RV64-NEXT: vs8r.v v8, (a2)
; RV64-NEXT: lbu a0, 0(a1)
; RV64-NEXT: addi sp, s0, -80
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-i1.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-i1.ll
index 2587411..fb070b2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-i1.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-i1.ll
@@ -324,24 +324,23 @@ define i1 @extractelt_v256i1(ptr %x, i64 %idx) nounwind {
; RV32-NEXT: sw s0, 376(sp) # 4-byte Folded Spill
; RV32-NEXT: addi s0, sp, 384
; RV32-NEXT: andi sp, sp, -128
-; RV32-NEXT: zext.b a1, a1
-; RV32-NEXT: mv a2, sp
-; RV32-NEXT: li a3, 128
-; RV32-NEXT: vsetvli zero, a3, e8, m8, ta, ma
+; RV32-NEXT: li a2, 128
+; RV32-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; RV32-NEXT: vle8.v v8, (a0)
; RV32-NEXT: addi a0, a0, 128
; RV32-NEXT: vle8.v v16, (a0)
-; RV32-NEXT: add a1, a2, a1
; RV32-NEXT: vmseq.vi v0, v8, 0
-; RV32-NEXT: vmv.v.i v24, 0
-; RV32-NEXT: vmseq.vi v8, v16, 0
-; RV32-NEXT: vmerge.vim v16, v24, 1, v0
-; RV32-NEXT: vse8.v v16, (a2)
-; RV32-NEXT: vmv1r.v v0, v8
-; RV32-NEXT: vmerge.vim v8, v24, 1, v0
-; RV32-NEXT: addi a0, sp, 128
-; RV32-NEXT: vse8.v v8, (a0)
-; RV32-NEXT: lbu a0, 0(a1)
+; RV32-NEXT: vmv.v.i v8, 0
+; RV32-NEXT: vmerge.vim v24, v8, 1, v0
+; RV32-NEXT: vmseq.vi v0, v16, 0
+; RV32-NEXT: zext.b a0, a1
+; RV32-NEXT: mv a1, sp
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: vse8.v v24, (a1)
+; RV32-NEXT: vmerge.vim v8, v8, 1, v0
+; RV32-NEXT: addi a1, sp, 128
+; RV32-NEXT: vse8.v v8, (a1)
+; RV32-NEXT: lbu a0, 0(a0)
; RV32-NEXT: addi sp, s0, -384
; RV32-NEXT: lw ra, 380(sp) # 4-byte Folded Reload
; RV32-NEXT: lw s0, 376(sp) # 4-byte Folded Reload
@@ -355,24 +354,23 @@ define i1 @extractelt_v256i1(ptr %x, i64 %idx) nounwind {
; RV64-NEXT: sd s0, 368(sp) # 8-byte Folded Spill
; RV64-NEXT: addi s0, sp, 384
; RV64-NEXT: andi sp, sp, -128
-; RV64-NEXT: zext.b a1, a1
-; RV64-NEXT: mv a2, sp
-; RV64-NEXT: li a3, 128
-; RV64-NEXT: vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT: li a2, 128
+; RV64-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; RV64-NEXT: vle8.v v8, (a0)
; RV64-NEXT: addi a0, a0, 128
; RV64-NEXT: vle8.v v16, (a0)
-; RV64-NEXT: add a1, a2, a1
; RV64-NEXT: vmseq.vi v0, v8, 0
-; RV64-NEXT: vmv.v.i v24, 0
-; RV64-NEXT: vmseq.vi v8, v16, 0
-; RV64-NEXT: vmerge.vim v16, v24, 1, v0
-; RV64-NEXT: vse8.v v16, (a2)
-; RV64-NEXT: vmv1r.v v0, v8
-; RV64-NEXT: vmerge.vim v8, v24, 1, v0
-; RV64-NEXT: addi a0, sp, 128
-; RV64-NEXT: vse8.v v8, (a0)
-; RV64-NEXT: lbu a0, 0(a1)
+; RV64-NEXT: vmv.v.i v8, 0
+; RV64-NEXT: vmerge.vim v24, v8, 1, v0
+; RV64-NEXT: vmseq.vi v0, v16, 0
+; RV64-NEXT: zext.b a0, a1
+; RV64-NEXT: mv a1, sp
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: vse8.v v24, (a1)
+; RV64-NEXT: vmerge.vim v8, v8, 1, v0
+; RV64-NEXT: addi a1, sp, 128
+; RV64-NEXT: vse8.v v8, (a1)
+; RV64-NEXT: lbu a0, 0(a0)
; RV64-NEXT: addi sp, s0, -384
; RV64-NEXT: ld ra, 376(sp) # 8-byte Folded Reload
; RV64-NEXT: ld s0, 368(sp) # 8-byte Folded Reload
@@ -386,24 +384,23 @@ define i1 @extractelt_v256i1(ptr %x, i64 %idx) nounwind {
; RV32ZBS-NEXT: sw s0, 376(sp) # 4-byte Folded Spill
; RV32ZBS-NEXT: addi s0, sp, 384
; RV32ZBS-NEXT: andi sp, sp, -128
-; RV32ZBS-NEXT: zext.b a1, a1
-; RV32ZBS-NEXT: mv a2, sp
-; RV32ZBS-NEXT: li a3, 128
-; RV32ZBS-NEXT: vsetvli zero, a3, e8, m8, ta, ma
+; RV32ZBS-NEXT: li a2, 128
+; RV32ZBS-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; RV32ZBS-NEXT: vle8.v v8, (a0)
; RV32ZBS-NEXT: addi a0, a0, 128
; RV32ZBS-NEXT: vle8.v v16, (a0)
-; RV32ZBS-NEXT: add a1, a2, a1
; RV32ZBS-NEXT: vmseq.vi v0, v8, 0
-; RV32ZBS-NEXT: vmv.v.i v24, 0
-; RV32ZBS-NEXT: vmseq.vi v8, v16, 0
-; RV32ZBS-NEXT: vmerge.vim v16, v24, 1, v0
-; RV32ZBS-NEXT: vse8.v v16, (a2)
-; RV32ZBS-NEXT: vmv1r.v v0, v8
-; RV32ZBS-NEXT: vmerge.vim v8, v24, 1, v0
-; RV32ZBS-NEXT: addi a0, sp, 128
-; RV32ZBS-NEXT: vse8.v v8, (a0)
-; RV32ZBS-NEXT: lbu a0, 0(a1)
+; RV32ZBS-NEXT: vmv.v.i v8, 0
+; RV32ZBS-NEXT: vmerge.vim v24, v8, 1, v0
+; RV32ZBS-NEXT: vmseq.vi v0, v16, 0
+; RV32ZBS-NEXT: zext.b a0, a1
+; RV32ZBS-NEXT: mv a1, sp
+; RV32ZBS-NEXT: add a0, a1, a0
+; RV32ZBS-NEXT: vse8.v v24, (a1)
+; RV32ZBS-NEXT: vmerge.vim v8, v8, 1, v0
+; RV32ZBS-NEXT: addi a1, sp, 128
+; RV32ZBS-NEXT: vse8.v v8, (a1)
+; RV32ZBS-NEXT: lbu a0, 0(a0)
; RV32ZBS-NEXT: addi sp, s0, -384
; RV32ZBS-NEXT: lw ra, 380(sp) # 4-byte Folded Reload
; RV32ZBS-NEXT: lw s0, 376(sp) # 4-byte Folded Reload
@@ -417,24 +414,23 @@ define i1 @extractelt_v256i1(ptr %x, i64 %idx) nounwind {
; RV64ZBS-NEXT: sd s0, 368(sp) # 8-byte Folded Spill
; RV64ZBS-NEXT: addi s0, sp, 384
; RV64ZBS-NEXT: andi sp, sp, -128
-; RV64ZBS-NEXT: zext.b a1, a1
-; RV64ZBS-NEXT: mv a2, sp
-; RV64ZBS-NEXT: li a3, 128
-; RV64ZBS-NEXT: vsetvli zero, a3, e8, m8, ta, ma
+; RV64ZBS-NEXT: li a2, 128
+; RV64ZBS-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; RV64ZBS-NEXT: vle8.v v8, (a0)
; RV64ZBS-NEXT: addi a0, a0, 128
; RV64ZBS-NEXT: vle8.v v16, (a0)
-; RV64ZBS-NEXT: add a1, a2, a1
; RV64ZBS-NEXT: vmseq.vi v0, v8, 0
-; RV64ZBS-NEXT: vmv.v.i v24, 0
-; RV64ZBS-NEXT: vmseq.vi v8, v16, 0
-; RV64ZBS-NEXT: vmerge.vim v16, v24, 1, v0
-; RV64ZBS-NEXT: vse8.v v16, (a2)
-; RV64ZBS-NEXT: vmv1r.v v0, v8
-; RV64ZBS-NEXT: vmerge.vim v8, v24, 1, v0
-; RV64ZBS-NEXT: addi a0, sp, 128
-; RV64ZBS-NEXT: vse8.v v8, (a0)
-; RV64ZBS-NEXT: lbu a0, 0(a1)
+; RV64ZBS-NEXT: vmv.v.i v8, 0
+; RV64ZBS-NEXT: vmerge.vim v24, v8, 1, v0
+; RV64ZBS-NEXT: vmseq.vi v0, v16, 0
+; RV64ZBS-NEXT: zext.b a0, a1
+; RV64ZBS-NEXT: mv a1, sp
+; RV64ZBS-NEXT: add a0, a1, a0
+; RV64ZBS-NEXT: vse8.v v24, (a1)
+; RV64ZBS-NEXT: vmerge.vim v8, v8, 1, v0
+; RV64ZBS-NEXT: addi a1, sp, 128
+; RV64ZBS-NEXT: vse8.v v8, (a1)
+; RV64ZBS-NEXT: lbu a0, 0(a0)
; RV64ZBS-NEXT: addi sp, s0, -384
; RV64ZBS-NEXT: ld ra, 376(sp) # 8-byte Folded Reload
; RV64ZBS-NEXT: ld s0, 368(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll
index dbc8e89..bdf344d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll
@@ -1754,6 +1754,17 @@ define void @store_factor4_one_active(ptr %ptr, <4 x i32> %v) {
ret void
}
+define void @vpstore_factor4_one_active(ptr %ptr, <4 x i32> %v) {
+; CHECK-LABEL: vpstore_factor4_one_active:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vsseg4e32.v v8, (a0)
+; CHECK-NEXT: ret
+ %v0 = shufflevector <4 x i32> %v, <4 x i32> poison, <16 x i32> <i32 0, i32 undef, i32 undef, i32 undef, i32 1, i32 undef, i32 undef, i32 undef, i32 2, i32 undef, i32 undef, i32 undef, i32 3, i32 undef, i32 undef, i32 undef>
+ tail call void @llvm.vp.store.v16i32.p0(<16 x i32> %v0, ptr %ptr, <16 x i1> splat (i1 true), i32 16)
+ ret void
+}
+
define void @store_factor4_one_active_idx1(ptr %ptr, <4 x i32> %v) {
; CHECK-LABEL: store_factor4_one_active_idx1:
; CHECK: # %bb.0:
@@ -1828,8 +1839,8 @@ define {<4 x i32>, <4 x i32>, <4 x i32>} @invalid_vp_mask(ptr %ptr) {
; RV32-NEXT: vle32.v v12, (a0), v0.t
; RV32-NEXT: li a0, 36
; RV32-NEXT: vmv.s.x v20, a1
-; RV32-NEXT: lui a1, %hi(.LCPI53_0)
-; RV32-NEXT: addi a1, a1, %lo(.LCPI53_0)
+; RV32-NEXT: lui a1, %hi(.LCPI54_0)
+; RV32-NEXT: addi a1, a1, %lo(.LCPI54_0)
; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vle16.v v21, (a1)
; RV32-NEXT: vcompress.vm v8, v12, v11
@@ -1904,8 +1915,8 @@ define {<4 x i32>, <4 x i32>, <4 x i32>} @invalid_vp_evl(ptr %ptr) {
; RV32-NEXT: vmv.s.x v10, a0
; RV32-NEXT: li a0, 146
; RV32-NEXT: vmv.s.x v11, a0
-; RV32-NEXT: lui a0, %hi(.LCPI54_0)
-; RV32-NEXT: addi a0, a0, %lo(.LCPI54_0)
+; RV32-NEXT: lui a0, %hi(.LCPI55_0)
+; RV32-NEXT: addi a0, a0, %lo(.LCPI55_0)
; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vle16.v v20, (a0)
; RV32-NEXT: li a0, 36
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-deinterleave.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-deinterleave.ll
index c11319f..67584ba 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-deinterleave.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-deinterleave.ll
@@ -143,16 +143,15 @@ define void @deinterleave6_0_i8(ptr %in, ptr %out) {
; CHECK-LABEL: deinterleave6_0_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; CHECK-NEXT: vle8.v v9, (a0)
+; CHECK-NEXT: vle8.v v8, (a0)
; CHECK-NEXT: vmv.v.i v0, 2
-; CHECK-NEXT: vmv.v.i v8, 4
; CHECK-NEXT: vsetivli zero, 8, e8, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v9, 8
+; CHECK-NEXT: vslidedown.vi v9, v8, 8
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
-; CHECK-NEXT: vslidedown.vi v9, v9, 5, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vrgather.vi v9, v10, 4, v0.t
-; CHECK-NEXT: vse8.v v9, (a1)
+; CHECK-NEXT: vslidedown.vi v8, v8, 5, v0.t
+; CHECK-NEXT: vmv.v.i v0, 4
+; CHECK-NEXT: vrgather.vi v8, v9, 4, v0.t
+; CHECK-NEXT: vse8.v v8, (a1)
; CHECK-NEXT: ret
entry:
%0 = load <16 x i8>, ptr %in, align 1
@@ -188,16 +187,15 @@ define void @deinterleave7_0_i8(ptr %in, ptr %out) {
; CHECK-LABEL: deinterleave7_0_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; CHECK-NEXT: vle8.v v9, (a0)
+; CHECK-NEXT: vle8.v v8, (a0)
; CHECK-NEXT: vmv.v.i v0, 2
-; CHECK-NEXT: vmv.v.i v8, 4
; CHECK-NEXT: vsetivli zero, 8, e8, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v9, 8
+; CHECK-NEXT: vslidedown.vi v9, v8, 8
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
-; CHECK-NEXT: vslidedown.vi v9, v9, 6, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vrgather.vi v9, v10, 6, v0.t
-; CHECK-NEXT: vse8.v v9, (a1)
+; CHECK-NEXT: vslidedown.vi v8, v8, 6, v0.t
+; CHECK-NEXT: vmv.v.i v0, 4
+; CHECK-NEXT: vrgather.vi v8, v9, 6, v0.t
+; CHECK-NEXT: vse8.v v8, (a1)
; CHECK-NEXT: ret
entry:
%0 = load <16 x i8>, ptr %in, align 1
diff --git a/llvm/test/CodeGen/RISCV/rvv/reproducer-pr146855.ll b/llvm/test/CodeGen/RISCV/rvv/reproducer-pr146855.ll
new file mode 100644
index 0000000..cca00bf
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/reproducer-pr146855.ll
@@ -0,0 +1,72 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc < %s -mtriple=riscv64 -mattr=+v | FileCheck %s
+target datalayout = "e-m:e-p:64:64-i64:64-i128:128-n32:64-S128"
+target triple = "riscv64-unknown-linux-gnu"
+
+define i32 @_ZN4Mesh12rezone_countESt6vectorIiSaIiEERiS3_(<vscale x 4 x i32> %wide.load, <vscale x 4 x i1> %0, <vscale x 4 x i1> %1, <vscale x 4 x i1> %2, <vscale x 4 x i1> %3) #0 {
+; CHECK-LABEL: _ZN4Mesh12rezone_countESt6vectorIiSaIiEERiS3_:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vmv1r.v v8, v0
+; CHECK-NEXT: li a0, 0
+; CHECK-NEXT: vmv.v.i v10, 0
+; CHECK-NEXT: vmv.v.i v12, 0
+; CHECK-NEXT: vmv.v.i v14, 0
+; CHECK-NEXT: .LBB0_1: # %vector.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: slli a0, a0, 2
+; CHECK-NEXT: vmv2r.v v16, v10
+; CHECK-NEXT: vle32.v v16, (a0), v0.t
+; CHECK-NEXT: vand.vi v16, v16, 1
+; CHECK-NEXT: vmsne.vi v9, v16, 0
+; CHECK-NEXT: vmand.mm v0, v8, v9
+; CHECK-NEXT: vmerge.vim v12, v12, -1, v0
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vor.vi v14, v14, 1, v0.t
+; CHECK-NEXT: li a0, 1
+; CHECK-NEXT: j .LBB0_1
+entry:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ 1, %vector.body ]
+ %vec.phi = phi <vscale x 4 x i32> [ zeroinitializer, %entry ], [ %predphi88, %vector.body ]
+ %vec.phi81 = phi <vscale x 4 x i32> [ zeroinitializer, %entry ], [ %predphi93, %vector.body ]
+ %wide.load1 = load <vscale x 4 x i32>, ptr null, align 4
+ %4 = icmp slt <vscale x 4 x i32> %wide.load, zeroinitializer
+ %5 = icmp sgt <vscale x 4 x i32> %wide.load, zeroinitializer
+ %wide.masked.load82 = tail call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr null, i32 1, <vscale x 4 x i1> zeroinitializer, <vscale x 4 x i32> zeroinitializer)
+ %6 = icmp eq <vscale x 4 x i32> zeroinitializer, zeroinitializer
+ %7 = getelementptr i32, ptr null, i64 %index
+ %wide.masked.load83 = tail call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr %7, i32 1, <vscale x 4 x i1> %0, <vscale x 4 x i32> zeroinitializer)
+ %8 = select <vscale x 4 x i1> %0, <vscale x 4 x i1> %0, <vscale x 4 x i1> zeroinitializer
+ %9 = trunc <vscale x 4 x i32> %wide.masked.load83 to <vscale x 4 x i1>
+ %narrow = select <vscale x 4 x i1> %0, <vscale x 4 x i1> %9, <vscale x 4 x i1> zeroinitializer
+ %10 = sext <vscale x 4 x i1> %narrow to <vscale x 4 x i32>
+ %predphi88 = or <vscale x 4 x i32> %vec.phi, %10
+ %11 = zext <vscale x 4 x i1> %0 to <vscale x 4 x i32>
+ %predphi93 = or <vscale x 4 x i32> %vec.phi81, %11
+ %index.next = add i64 0, 1
+ br i1 false, label %middle.block, label %vector.body
+
+middle.block: ; preds = %vector.body
+ %12 = tail call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> %vec.phi)
+ %13 = tail call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> %vec.phi81)
+ ret i32 %13
+}
+
+; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: read)
+declare <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr captures(none), i32 immarg, <vscale x 4 x i1>, <vscale x 4 x i32>) #1
+
+; Function Attrs: nocallback nofree nosync nounwind speculatable willreturn memory(none)
+declare i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32>) #2
+
+; uselistorder directives
+uselistorder ptr @llvm.masked.load.nxv4i32.p0, { 1, 0 }
+uselistorder ptr @llvm.vector.reduce.add.nxv4i32, { 1, 0 }
+
+attributes #0 = { "target-features"="+v" }
+attributes #1 = { nocallback nofree nosync nounwind willreturn memory(argmem: read) }
+attributes #2 = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll
index 9af92aa..578b67e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll
@@ -538,3 +538,164 @@ define { <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x
%res7 = insertvalue { <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8> } %res6, <vscale x 8 x i8> %t7, 7
ret { <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8> } %res7
}
+
+define {<vscale x 16 x i8>, <vscale x 16 x i8>} @masked_load_factor2(ptr %p) {
+; CHECK-LABEL: masked_load_factor2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vl4r.v v12, (a0)
+; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v12, 0
+; CHECK-NEXT: vnsrl.wi v10, v12, 8
+; CHECK-NEXT: ret
+ %vec = call <vscale x 32 x i8> @llvm.masked.load(ptr %p, i32 4, <vscale x 32 x i1> splat (i1 true), <vscale x 32 x i8> poison)
+ %deinterleaved.results = call {<vscale x 16 x i8>, <vscale x 16 x i8>} @llvm.vector.deinterleave2.nxv32i8(<vscale x 32 x i8> %vec)
+ ret {<vscale x 16 x i8>, <vscale x 16 x i8>} %deinterleaved.results
+}
+
+define {<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>} @masked_loat_factor4(ptr %p) {
+; CHECK-LABEL: masked_loat_factor4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 2
+; CHECK-NEXT: sub sp, sp, a1
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 4 * vlenb
+; CHECK-NEXT: vl4r.v v8, (a0)
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vs4r.v v8, (a0)
+; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
+; CHECK-NEXT: vlseg4e8.v v8, (a0)
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 2
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: .cfi_def_cfa_offset 0
+; CHECK-NEXT: ret
+ %vec = call <vscale x 32 x i8> @llvm.masked.load(ptr %p, i32 4, <vscale x 32 x i1> splat (i1 true), <vscale x 32 x i8> poison)
+ %deinterleaved.results = call {<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>} @llvm.vector.deinterleave4.nxv32i8(<vscale x 32 x i8> %vec)
+ ret {<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>} %deinterleaved.results
+}
+
+define {<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>} @masked_loat_factor4_mask(ptr %p, <vscale x 8 x i1> %mask) {
+; CHECK-LABEL: masked_loat_factor4_mask:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: sub sp, sp, a1
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: addi a1, sp, 16
+; CHECK-NEXT: csrr a2, vlenb
+; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
+; CHECK-NEXT: add a3, a1, a2
+; CHECK-NEXT: vmv.v.v v9, v8
+; CHECK-NEXT: srli a4, a2, 2
+; CHECK-NEXT: vmv.v.v v10, v8
+; CHECK-NEXT: srli a5, a2, 3
+; CHECK-NEXT: vmv.v.v v11, v8
+; CHECK-NEXT: vsseg4e8.v v8, (a1)
+; CHECK-NEXT: vl1r.v v8, (a1)
+; CHECK-NEXT: add a1, a4, a5
+; CHECK-NEXT: vl1r.v v9, (a3)
+; CHECK-NEXT: add a3, a3, a2
+; CHECK-NEXT: add a2, a3, a2
+; CHECK-NEXT: vl1r.v v10, (a3)
+; CHECK-NEXT: vl1r.v v11, (a2)
+; CHECK-NEXT: vmsne.vi v9, v9, 0
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: vmsne.vi v8, v10, 0
+; CHECK-NEXT: vmsne.vi v10, v11, 0
+; CHECK-NEXT: vsetvli zero, a4, e8, mf2, tu, ma
+; CHECK-NEXT: vslideup.vx v0, v9, a5
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma
+; CHECK-NEXT: vslideup.vx v0, v8, a4
+; CHECK-NEXT: vsetvli a2, zero, e8, mf2, ta, ma
+; CHECK-NEXT: vslideup.vx v0, v10, a1
+; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma
+; CHECK-NEXT: vle8.v v8, (a0), v0.t
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 2
+; CHECK-NEXT: add a0, sp, a0
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: vs4r.v v8, (a0)
+; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
+; CHECK-NEXT: vlseg4e8.v v8, (a0)
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: .cfi_def_cfa_offset 0
+; CHECK-NEXT: ret
+ %interleaved.mask = tail call <vscale x 32 x i1> @llvm.vector.interleave4.nxv32i1(<vscale x 8 x i1> %mask, <vscale x 8 x i1> %mask, <vscale x 8 x i1> %mask, <vscale x 8 x i1> %mask)
+ %vec = call <vscale x 32 x i8> @llvm.masked.load(ptr %p, i32 4, <vscale x 32 x i1> %interleaved.mask, <vscale x 32 x i8> poison)
+ %deinterleaved.results = call {<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>} @llvm.vector.deinterleave4.nxv32i8(<vscale x 32 x i8> %vec)
+ ret {<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>} %deinterleaved.results
+}
+
+; Negative test - some of the deinterleaved elements might come from the
+; passthru not the load
+define {<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>} @masked_loat_factor4_passthru(ptr %p, <vscale x 8 x i1> %mask, <vscale x 32 x i8> %passthru) {
+; CHECK-LABEL: masked_loat_factor4_passthru:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: sub sp, sp, a1
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
+; CHECK-NEXT: vmv.v.i v12, 0
+; CHECK-NEXT: addi a1, sp, 16
+; CHECK-NEXT: csrr a2, vlenb
+; CHECK-NEXT: vmerge.vim v12, v12, 1, v0
+; CHECK-NEXT: add a3, a1, a2
+; CHECK-NEXT: vmv.v.v v13, v12
+; CHECK-NEXT: srli a4, a2, 2
+; CHECK-NEXT: vmv.v.v v14, v12
+; CHECK-NEXT: srli a5, a2, 3
+; CHECK-NEXT: vmv.v.v v15, v12
+; CHECK-NEXT: vsseg4e8.v v12, (a1)
+; CHECK-NEXT: vl1r.v v12, (a1)
+; CHECK-NEXT: add a1, a4, a5
+; CHECK-NEXT: vl1r.v v13, (a3)
+; CHECK-NEXT: add a3, a3, a2
+; CHECK-NEXT: add a2, a3, a2
+; CHECK-NEXT: vl1r.v v14, (a3)
+; CHECK-NEXT: vl1r.v v15, (a2)
+; CHECK-NEXT: vmsne.vi v13, v13, 0
+; CHECK-NEXT: vmsne.vi v0, v12, 0
+; CHECK-NEXT: vmsne.vi v12, v14, 0
+; CHECK-NEXT: vmsne.vi v14, v15, 0
+; CHECK-NEXT: vsetvli zero, a4, e8, mf2, tu, ma
+; CHECK-NEXT: vslideup.vx v0, v13, a5
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma
+; CHECK-NEXT: vslideup.vx v0, v12, a4
+; CHECK-NEXT: vsetvli a2, zero, e8, mf2, ta, ma
+; CHECK-NEXT: vslideup.vx v0, v14, a1
+; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu
+; CHECK-NEXT: vle8.v v8, (a0), v0.t
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 2
+; CHECK-NEXT: add a0, sp, a0
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: vs4r.v v8, (a0)
+; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
+; CHECK-NEXT: vlseg4e8.v v8, (a0)
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: .cfi_def_cfa_offset 0
+; CHECK-NEXT: ret
+ %interleaved.mask = tail call <vscale x 32 x i1> @llvm.vector.interleave4.nxv32i1(<vscale x 8 x i1> %mask, <vscale x 8 x i1> %mask, <vscale x 8 x i1> %mask, <vscale x 8 x i1> %mask)
+ %vec = call <vscale x 32 x i8> @llvm.masked.load(ptr %p, i32 4, <vscale x 32 x i1> %interleaved.mask, <vscale x 32 x i8> %passthru)
+ %deinterleaved.results = call {<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>} @llvm.vector.deinterleave4.nxv32i8(<vscale x 32 x i8> %vec)
+ ret {<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>} %deinterleaved.results
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vscale-vw-web-simplification.ll b/llvm/test/CodeGen/RISCV/rvv/vscale-vw-web-simplification.ll
index 2068389..ad2ed47 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vscale-vw-web-simplification.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vscale-vw-web-simplification.ll
@@ -153,20 +153,19 @@ define <vscale x 2 x i32> @vwop_vscale_sext_i1i32_multiple_users(ptr %x, ptr %y,
; NO_FOLDING: # %bb.0:
; NO_FOLDING-NEXT: vsetvli a3, zero, e32, m1, ta, mu
; NO_FOLDING-NEXT: vlm.v v8, (a0)
-; NO_FOLDING-NEXT: vlm.v v9, (a1)
-; NO_FOLDING-NEXT: vlm.v v10, (a2)
-; NO_FOLDING-NEXT: vmv.v.i v11, 0
+; NO_FOLDING-NEXT: vmv.v.i v10, 0
; NO_FOLDING-NEXT: vmv.v.v v0, v8
-; NO_FOLDING-NEXT: vmerge.vim v12, v11, -1, v0
+; NO_FOLDING-NEXT: vmerge.vim v11, v10, -1, v0
+; NO_FOLDING-NEXT: vlm.v v0, (a1)
+; NO_FOLDING-NEXT: vlm.v v9, (a2)
+; NO_FOLDING-NEXT: vmerge.vim v12, v10, -1, v0
; NO_FOLDING-NEXT: vmv.v.v v0, v9
-; NO_FOLDING-NEXT: vmerge.vim v9, v11, -1, v0
-; NO_FOLDING-NEXT: vmv.v.v v0, v10
-; NO_FOLDING-NEXT: vmerge.vim v10, v11, -1, v0
-; NO_FOLDING-NEXT: vmul.vv v9, v12, v9
-; NO_FOLDING-NEXT: vsub.vv v11, v12, v10
+; NO_FOLDING-NEXT: vmerge.vim v9, v10, -1, v0
+; NO_FOLDING-NEXT: vmul.vv v10, v11, v12
+; NO_FOLDING-NEXT: vsub.vv v11, v11, v9
; NO_FOLDING-NEXT: vmv.v.v v0, v8
-; NO_FOLDING-NEXT: vadd.vi v10, v10, -1, v0.t
-; NO_FOLDING-NEXT: vor.vv v8, v9, v10
+; NO_FOLDING-NEXT: vadd.vi v9, v9, -1, v0.t
+; NO_FOLDING-NEXT: vor.vv v8, v10, v9
; NO_FOLDING-NEXT: vor.vv v8, v8, v11
; NO_FOLDING-NEXT: ret
;
@@ -174,20 +173,19 @@ define <vscale x 2 x i32> @vwop_vscale_sext_i1i32_multiple_users(ptr %x, ptr %y,
; FOLDING: # %bb.0:
; FOLDING-NEXT: vsetvli a3, zero, e32, m1, ta, mu
; FOLDING-NEXT: vlm.v v8, (a0)
-; FOLDING-NEXT: vlm.v v9, (a1)
-; FOLDING-NEXT: vlm.v v10, (a2)
-; FOLDING-NEXT: vmv.v.i v11, 0
+; FOLDING-NEXT: vmv.v.i v10, 0
; FOLDING-NEXT: vmv.v.v v0, v8
-; FOLDING-NEXT: vmerge.vim v12, v11, -1, v0
+; FOLDING-NEXT: vmerge.vim v11, v10, -1, v0
+; FOLDING-NEXT: vlm.v v0, (a1)
+; FOLDING-NEXT: vlm.v v9, (a2)
+; FOLDING-NEXT: vmerge.vim v12, v10, -1, v0
; FOLDING-NEXT: vmv.v.v v0, v9
-; FOLDING-NEXT: vmerge.vim v9, v11, -1, v0
-; FOLDING-NEXT: vmv.v.v v0, v10
-; FOLDING-NEXT: vmerge.vim v10, v11, -1, v0
-; FOLDING-NEXT: vmul.vv v9, v12, v9
-; FOLDING-NEXT: vsub.vv v11, v12, v10
+; FOLDING-NEXT: vmerge.vim v9, v10, -1, v0
+; FOLDING-NEXT: vmul.vv v10, v11, v12
+; FOLDING-NEXT: vsub.vv v11, v11, v9
; FOLDING-NEXT: vmv.v.v v0, v8
-; FOLDING-NEXT: vadd.vi v10, v10, -1, v0.t
-; FOLDING-NEXT: vor.vv v8, v9, v10
+; FOLDING-NEXT: vadd.vi v9, v9, -1, v0.t
+; FOLDING-NEXT: vor.vv v8, v10, v9
; FOLDING-NEXT: vor.vv v8, v8, v11
; FOLDING-NEXT: ret
%a = load <vscale x 2 x i1>, ptr %x
@@ -209,20 +207,19 @@ define <vscale x 2 x i8> @vwop_vscale_sext_i1i8_multiple_users(ptr %x, ptr %y, p
; NO_FOLDING: # %bb.0:
; NO_FOLDING-NEXT: vsetvli a3, zero, e8, mf4, ta, mu
; NO_FOLDING-NEXT: vlm.v v8, (a0)
-; NO_FOLDING-NEXT: vlm.v v9, (a1)
-; NO_FOLDING-NEXT: vlm.v v10, (a2)
-; NO_FOLDING-NEXT: vmv.v.i v11, 0
+; NO_FOLDING-NEXT: vmv.v.i v10, 0
; NO_FOLDING-NEXT: vmv1r.v v0, v8
-; NO_FOLDING-NEXT: vmerge.vim v12, v11, -1, v0
+; NO_FOLDING-NEXT: vmerge.vim v11, v10, -1, v0
+; NO_FOLDING-NEXT: vlm.v v0, (a1)
+; NO_FOLDING-NEXT: vlm.v v9, (a2)
+; NO_FOLDING-NEXT: vmerge.vim v12, v10, -1, v0
; NO_FOLDING-NEXT: vmv1r.v v0, v9
-; NO_FOLDING-NEXT: vmerge.vim v9, v11, -1, v0
-; NO_FOLDING-NEXT: vmv1r.v v0, v10
-; NO_FOLDING-NEXT: vmerge.vim v10, v11, -1, v0
-; NO_FOLDING-NEXT: vmul.vv v9, v12, v9
-; NO_FOLDING-NEXT: vsub.vv v11, v12, v10
+; NO_FOLDING-NEXT: vmerge.vim v9, v10, -1, v0
+; NO_FOLDING-NEXT: vmul.vv v10, v11, v12
+; NO_FOLDING-NEXT: vsub.vv v11, v11, v9
; NO_FOLDING-NEXT: vmv1r.v v0, v8
-; NO_FOLDING-NEXT: vadd.vi v10, v10, -1, v0.t
-; NO_FOLDING-NEXT: vor.vv v8, v9, v10
+; NO_FOLDING-NEXT: vadd.vi v9, v9, -1, v0.t
+; NO_FOLDING-NEXT: vor.vv v8, v10, v9
; NO_FOLDING-NEXT: vor.vv v8, v8, v11
; NO_FOLDING-NEXT: ret
;
@@ -230,20 +227,19 @@ define <vscale x 2 x i8> @vwop_vscale_sext_i1i8_multiple_users(ptr %x, ptr %y, p
; FOLDING: # %bb.0:
; FOLDING-NEXT: vsetvli a3, zero, e8, mf4, ta, mu
; FOLDING-NEXT: vlm.v v8, (a0)
-; FOLDING-NEXT: vlm.v v9, (a1)
-; FOLDING-NEXT: vlm.v v10, (a2)
-; FOLDING-NEXT: vmv.v.i v11, 0
+; FOLDING-NEXT: vmv.v.i v10, 0
; FOLDING-NEXT: vmv1r.v v0, v8
-; FOLDING-NEXT: vmerge.vim v12, v11, -1, v0
+; FOLDING-NEXT: vmerge.vim v11, v10, -1, v0
+; FOLDING-NEXT: vlm.v v0, (a1)
+; FOLDING-NEXT: vlm.v v9, (a2)
+; FOLDING-NEXT: vmerge.vim v12, v10, -1, v0
; FOLDING-NEXT: vmv1r.v v0, v9
-; FOLDING-NEXT: vmerge.vim v9, v11, -1, v0
-; FOLDING-NEXT: vmv1r.v v0, v10
-; FOLDING-NEXT: vmerge.vim v10, v11, -1, v0
-; FOLDING-NEXT: vmul.vv v9, v12, v9
-; FOLDING-NEXT: vsub.vv v11, v12, v10
+; FOLDING-NEXT: vmerge.vim v9, v10, -1, v0
+; FOLDING-NEXT: vmul.vv v10, v11, v12
+; FOLDING-NEXT: vsub.vv v11, v11, v9
; FOLDING-NEXT: vmv1r.v v0, v8
-; FOLDING-NEXT: vadd.vi v10, v10, -1, v0.t
-; FOLDING-NEXT: vor.vv v8, v9, v10
+; FOLDING-NEXT: vadd.vi v9, v9, -1, v0.t
+; FOLDING-NEXT: vor.vv v8, v10, v9
; FOLDING-NEXT: vor.vv v8, v8, v11
; FOLDING-NEXT: ret
%a = load <vscale x 2 x i1>, ptr %x
@@ -444,16 +440,14 @@ define <vscale x 2 x i32> @vwop_vscale_zext_i1i32_multiple_users(ptr %x, ptr %y,
; NO_FOLDING: # %bb.0:
; NO_FOLDING-NEXT: vsetvli a3, zero, e32, m1, ta, mu
; NO_FOLDING-NEXT: vlm.v v0, (a0)
-; NO_FOLDING-NEXT: vlm.v v8, (a2)
-; NO_FOLDING-NEXT: vlm.v v9, (a1)
-; NO_FOLDING-NEXT: vmv.v.i v10, 0
-; NO_FOLDING-NEXT: vmerge.vim v11, v10, 1, v0
-; NO_FOLDING-NEXT: vmv.v.v v0, v8
-; NO_FOLDING-NEXT: vmerge.vim v8, v10, 1, v0
-; NO_FOLDING-NEXT: vadd.vv v10, v11, v8
-; NO_FOLDING-NEXT: vsub.vv v8, v11, v8
-; NO_FOLDING-NEXT: vmv.v.v v0, v9
-; NO_FOLDING-NEXT: vor.vv v10, v10, v11, v0.t
+; NO_FOLDING-NEXT: vmv.v.i v8, 0
+; NO_FOLDING-NEXT: vmerge.vim v9, v8, 1, v0
+; NO_FOLDING-NEXT: vlm.v v0, (a2)
+; NO_FOLDING-NEXT: vmerge.vim v8, v8, 1, v0
+; NO_FOLDING-NEXT: vlm.v v0, (a1)
+; NO_FOLDING-NEXT: vadd.vv v10, v9, v8
+; NO_FOLDING-NEXT: vsub.vv v8, v9, v8
+; NO_FOLDING-NEXT: vor.vv v10, v10, v9, v0.t
; NO_FOLDING-NEXT: vor.vv v8, v10, v8
; NO_FOLDING-NEXT: ret
;
@@ -461,16 +455,14 @@ define <vscale x 2 x i32> @vwop_vscale_zext_i1i32_multiple_users(ptr %x, ptr %y,
; FOLDING: # %bb.0:
; FOLDING-NEXT: vsetvli a3, zero, e32, m1, ta, mu
; FOLDING-NEXT: vlm.v v0, (a0)
-; FOLDING-NEXT: vlm.v v8, (a2)
-; FOLDING-NEXT: vlm.v v9, (a1)
-; FOLDING-NEXT: vmv.v.i v10, 0
-; FOLDING-NEXT: vmerge.vim v11, v10, 1, v0
-; FOLDING-NEXT: vmv.v.v v0, v8
-; FOLDING-NEXT: vmerge.vim v8, v10, 1, v0
-; FOLDING-NEXT: vadd.vv v10, v11, v8
-; FOLDING-NEXT: vsub.vv v8, v11, v8
-; FOLDING-NEXT: vmv.v.v v0, v9
-; FOLDING-NEXT: vor.vv v10, v10, v11, v0.t
+; FOLDING-NEXT: vmv.v.i v8, 0
+; FOLDING-NEXT: vmerge.vim v9, v8, 1, v0
+; FOLDING-NEXT: vlm.v v0, (a2)
+; FOLDING-NEXT: vmerge.vim v8, v8, 1, v0
+; FOLDING-NEXT: vlm.v v0, (a1)
+; FOLDING-NEXT: vadd.vv v10, v9, v8
+; FOLDING-NEXT: vsub.vv v8, v9, v8
+; FOLDING-NEXT: vor.vv v10, v10, v9, v0.t
; FOLDING-NEXT: vor.vv v8, v10, v8
; FOLDING-NEXT: ret
%a = load <vscale x 2 x i1>, ptr %x
@@ -492,16 +484,14 @@ define <vscale x 2 x i8> @vwop_vscale_zext_i1i8_multiple_users(ptr %x, ptr %y, p
; NO_FOLDING: # %bb.0:
; NO_FOLDING-NEXT: vsetvli a3, zero, e8, mf4, ta, mu
; NO_FOLDING-NEXT: vlm.v v0, (a0)
-; NO_FOLDING-NEXT: vlm.v v8, (a2)
-; NO_FOLDING-NEXT: vlm.v v9, (a1)
-; NO_FOLDING-NEXT: vmv.v.i v10, 0
-; NO_FOLDING-NEXT: vmerge.vim v11, v10, 1, v0
-; NO_FOLDING-NEXT: vmv1r.v v0, v8
-; NO_FOLDING-NEXT: vmerge.vim v8, v10, 1, v0
-; NO_FOLDING-NEXT: vadd.vv v10, v11, v8
-; NO_FOLDING-NEXT: vsub.vv v8, v11, v8
-; NO_FOLDING-NEXT: vmv1r.v v0, v9
-; NO_FOLDING-NEXT: vor.vv v10, v10, v11, v0.t
+; NO_FOLDING-NEXT: vmv.v.i v8, 0
+; NO_FOLDING-NEXT: vmerge.vim v9, v8, 1, v0
+; NO_FOLDING-NEXT: vlm.v v0, (a2)
+; NO_FOLDING-NEXT: vmerge.vim v8, v8, 1, v0
+; NO_FOLDING-NEXT: vlm.v v0, (a1)
+; NO_FOLDING-NEXT: vadd.vv v10, v9, v8
+; NO_FOLDING-NEXT: vsub.vv v8, v9, v8
+; NO_FOLDING-NEXT: vor.vv v10, v10, v9, v0.t
; NO_FOLDING-NEXT: vor.vv v8, v10, v8
; NO_FOLDING-NEXT: ret
;
@@ -509,16 +499,14 @@ define <vscale x 2 x i8> @vwop_vscale_zext_i1i8_multiple_users(ptr %x, ptr %y, p
; FOLDING: # %bb.0:
; FOLDING-NEXT: vsetvli a3, zero, e8, mf4, ta, mu
; FOLDING-NEXT: vlm.v v0, (a0)
-; FOLDING-NEXT: vlm.v v8, (a2)
-; FOLDING-NEXT: vlm.v v9, (a1)
-; FOLDING-NEXT: vmv.v.i v10, 0
-; FOLDING-NEXT: vmerge.vim v11, v10, 1, v0
-; FOLDING-NEXT: vmv1r.v v0, v8
-; FOLDING-NEXT: vmerge.vim v8, v10, 1, v0
-; FOLDING-NEXT: vadd.vv v10, v11, v8
-; FOLDING-NEXT: vsub.vv v8, v11, v8
-; FOLDING-NEXT: vmv1r.v v0, v9
-; FOLDING-NEXT: vor.vv v10, v10, v11, v0.t
+; FOLDING-NEXT: vmv.v.i v8, 0
+; FOLDING-NEXT: vmerge.vim v9, v8, 1, v0
+; FOLDING-NEXT: vlm.v v0, (a2)
+; FOLDING-NEXT: vmerge.vim v8, v8, 1, v0
+; FOLDING-NEXT: vlm.v v0, (a1)
+; FOLDING-NEXT: vadd.vv v10, v9, v8
+; FOLDING-NEXT: vsub.vv v8, v9, v8
+; FOLDING-NEXT: vor.vv v10, v10, v9, v0.t
; FOLDING-NEXT: vor.vv v8, v10, v8
; FOLDING-NEXT: ret
%a = load <vscale x 2 x i1>, ptr %x
diff --git a/llvm/test/CodeGen/RISCV/rvv/vselect-fp.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-fp.ll
index 9cdec6a..30044ad 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vselect-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vselect-fp.ll
@@ -494,17 +494,17 @@ define <vscale x 8 x double> @vfmerge_nzv_nxv8f64(<vscale x 8 x double> %va, <vs
define <vscale x 16 x double> @vselect_combine_regression(<vscale x 16 x i64> %va, <vscale x 16 x double> %vb) {
; CHECK-LABEL: vselect_combine_regression:
; CHECK: # %bb.0:
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmseq.vi v24, v16, 0
+; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu
+; CHECK-NEXT: vmv8r.v v24, v16
; CHECK-NEXT: vmseq.vi v0, v8, 0
; CHECK-NEXT: vmv.v.i v16, 0
-; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: vmv.v.i v8, 0
-; CHECK-NEXT: add a1, a0, a1
; CHECK-NEXT: vle64.v v8, (a0), v0.t
-; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: vle64.v v16, (a1), v0.t
+; CHECK-NEXT: vmseq.vi v0, v24, 0
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: add a0, a0, a1
+; CHECK-NEXT: vle64.v v16, (a0), v0.t
; CHECK-NEXT: ret
%cond = icmp eq <vscale x 16 x i64> %va, zeroinitializer
%sel = select <vscale x 16 x i1> %cond, <vscale x 16 x double> %vb, <vscale x 16 x double> zeroinitializer
diff --git a/llvm/test/CodeGen/RISCV/short-forward-branch-opt.ll b/llvm/test/CodeGen/RISCV/short-forward-branch-opt.ll
index 061435c..59a702a 100644
--- a/llvm/test/CodeGen/RISCV/short-forward-branch-opt.ll
+++ b/llvm/test/CodeGen/RISCV/short-forward-branch-opt.ll
@@ -798,12 +798,12 @@ define void @sextw_removal_ccor(i1 %c, i32 signext %arg, i32 signext %arg1, i32
; RV64SFBSIFIVEU74-LABEL: sextw_removal_ccor:
; RV64SFBSIFIVEU74: # %bb.0: # %bb
; RV64SFBSIFIVEU74-NEXT: addi sp, sp, -32
-; RV64SFBSIFIVEU74-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
; RV64SFBSIFIVEU74-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
-; RV64SFBSIFIVEU74-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
; RV64SFBSIFIVEU74-NEXT: mv s0, a3
+; RV64SFBSIFIVEU74-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
; RV64SFBSIFIVEU74-NEXT: andi a0, a0, 1
; RV64SFBSIFIVEU74-NEXT: mv s1, a2
+; RV64SFBSIFIVEU74-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
; RV64SFBSIFIVEU74-NEXT: beqz a0, .LBB15_4
; RV64SFBSIFIVEU74-NEXT: # %bb.3: # %bb
; RV64SFBSIFIVEU74-NEXT: or s0, a3, a1
@@ -824,11 +824,11 @@ define void @sextw_removal_ccor(i1 %c, i32 signext %arg, i32 signext %arg1, i32
; RV64SFBANDESAX45-LABEL: sextw_removal_ccor:
; RV64SFBANDESAX45: # %bb.0: # %bb
; RV64SFBANDESAX45-NEXT: addi sp, sp, -32
-; RV64SFBANDESAX45-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
; RV64SFBANDESAX45-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
-; RV64SFBANDESAX45-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
; RV64SFBANDESAX45-NEXT: mv s0, a3
+; RV64SFBANDESAX45-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
; RV64SFBANDESAX45-NEXT: mv s1, a2
+; RV64SFBANDESAX45-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
; RV64SFBANDESAX45-NEXT: nds.bbc a0, 0, .LBB15_2
; RV64SFBANDESAX45-NEXT: # %bb.1:
; RV64SFBANDESAX45-NEXT: or s0, s0, a1
@@ -848,12 +848,12 @@ define void @sextw_removal_ccor(i1 %c, i32 signext %arg, i32 signext %arg1, i32
; ZICOND-LABEL: sextw_removal_ccor:
; ZICOND: # %bb.0: # %bb
; ZICOND-NEXT: addi sp, sp, -32
-; ZICOND-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
; ZICOND-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
-; ZICOND-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
; ZICOND-NEXT: mv s0, a3
+; ZICOND-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
; ZICOND-NEXT: andi a0, a0, 1
; ZICOND-NEXT: mv s1, a2
+; ZICOND-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
; ZICOND-NEXT: beqz a0, .LBB15_4
; ZICOND-NEXT: # %bb.3: # %bb
; ZICOND-NEXT: or s0, a3, a1
@@ -874,12 +874,12 @@ define void @sextw_removal_ccor(i1 %c, i32 signext %arg, i32 signext %arg1, i32
; RV32SFB-LABEL: sextw_removal_ccor:
; RV32SFB: # %bb.0: # %bb
; RV32SFB-NEXT: addi sp, sp, -16
-; RV32SFB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32SFB-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
-; RV32SFB-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
; RV32SFB-NEXT: mv s0, a3
+; RV32SFB-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
; RV32SFB-NEXT: andi a0, a0, 1
; RV32SFB-NEXT: mv s1, a2
+; RV32SFB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32SFB-NEXT: beqz a0, .LBB15_4
; RV32SFB-NEXT: # %bb.3: # %bb
; RV32SFB-NEXT: or s0, a3, a1
@@ -941,11 +941,11 @@ define void @sextw_removal_ccaddw(i1 %c, i32 signext %arg, i32 signext %arg1, i3
; RV64SFBSIFIVEU74-LABEL: sextw_removal_ccaddw:
; RV64SFBSIFIVEU74: # %bb.0: # %bb
; RV64SFBSIFIVEU74-NEXT: addi sp, sp, -32
-; RV64SFBSIFIVEU74-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
-; RV64SFBSIFIVEU74-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
; RV64SFBSIFIVEU74-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
; RV64SFBSIFIVEU74-NEXT: mv s1, a1
; RV64SFBSIFIVEU74-NEXT: andi a0, a0, 1
+; RV64SFBSIFIVEU74-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
+; RV64SFBSIFIVEU74-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
; RV64SFBSIFIVEU74-NEXT: mv s0, a2
; RV64SFBSIFIVEU74-NEXT: beqz a0, .LBB16_4
; RV64SFBSIFIVEU74-NEXT: # %bb.3: # %bb
@@ -967,11 +967,11 @@ define void @sextw_removal_ccaddw(i1 %c, i32 signext %arg, i32 signext %arg1, i3
; RV64SFBANDESAX45-LABEL: sextw_removal_ccaddw:
; RV64SFBANDESAX45: # %bb.0: # %bb
; RV64SFBANDESAX45-NEXT: addi sp, sp, -32
-; RV64SFBANDESAX45-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
; RV64SFBANDESAX45-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
-; RV64SFBANDESAX45-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
; RV64SFBANDESAX45-NEXT: mv s0, a2
+; RV64SFBANDESAX45-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
; RV64SFBANDESAX45-NEXT: mv s1, a1
+; RV64SFBANDESAX45-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
; RV64SFBANDESAX45-NEXT: nds.bbc a0, 0, .LBB16_2
; RV64SFBANDESAX45-NEXT: # %bb.1:
; RV64SFBANDESAX45-NEXT: addw s1, s1, a3
@@ -991,11 +991,11 @@ define void @sextw_removal_ccaddw(i1 %c, i32 signext %arg, i32 signext %arg1, i3
; ZICOND-LABEL: sextw_removal_ccaddw:
; ZICOND: # %bb.0: # %bb
; ZICOND-NEXT: addi sp, sp, -32
-; ZICOND-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
-; ZICOND-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
; ZICOND-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
; ZICOND-NEXT: mv s1, a1
; ZICOND-NEXT: andi a0, a0, 1
+; ZICOND-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
+; ZICOND-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
; ZICOND-NEXT: mv s0, a2
; ZICOND-NEXT: beqz a0, .LBB16_4
; ZICOND-NEXT: # %bb.3: # %bb
@@ -1017,11 +1017,11 @@ define void @sextw_removal_ccaddw(i1 %c, i32 signext %arg, i32 signext %arg1, i3
; RV32SFB-LABEL: sextw_removal_ccaddw:
; RV32SFB: # %bb.0: # %bb
; RV32SFB-NEXT: addi sp, sp, -16
-; RV32SFB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32SFB-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32SFB-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
; RV32SFB-NEXT: mv s1, a1
; RV32SFB-NEXT: andi a0, a0, 1
+; RV32SFB-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32SFB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32SFB-NEXT: mv s0, a2
; RV32SFB-NEXT: beqz a0, .LBB16_4
; RV32SFB-NEXT: # %bb.3: # %bb
diff --git a/llvm/test/CodeGen/RISCV/zdinx-spill.ll b/llvm/test/CodeGen/RISCV/zdinx-spill.ll
index d7a7006..6f206fe 100644
--- a/llvm/test/CodeGen/RISCV/zdinx-spill.ll
+++ b/llvm/test/CodeGen/RISCV/zdinx-spill.ll
@@ -9,7 +9,6 @@ define double @foo(double %x) nounwind {
; CHECK-NEXT: liveins: $x10, $x11, $x8, $x9, $x18, $x19, $x20, $x21, $x22, $x23, $x24, $x25, $x26, $x27
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: $x2 = frame-setup ADDI $x2, -64
- ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 64
; CHECK-NEXT: frame-setup SW killed $x8, $x2, 60 :: (store (s32) into %stack.1)
; CHECK-NEXT: frame-setup SW killed $x9, $x2, 56 :: (store (s32) into %stack.2)
; CHECK-NEXT: frame-setup SW killed $x18, $x2, 52 :: (store (s32) into %stack.3)
@@ -22,18 +21,6 @@ define double @foo(double %x) nounwind {
; CHECK-NEXT: frame-setup SW killed $x25, $x2, 24 :: (store (s32) into %stack.10)
; CHECK-NEXT: frame-setup SW killed $x26, $x2, 20 :: (store (s32) into %stack.11)
; CHECK-NEXT: frame-setup SW killed $x27, $x2, 16 :: (store (s32) into %stack.12)
- ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $x8, -4
- ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $x9, -8
- ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $x18, -12
- ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $x19, -16
- ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $x20, -20
- ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $x21, -24
- ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $x22, -28
- ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $x23, -32
- ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $x24, -36
- ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $x25, -40
- ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $x26, -44
- ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $x27, -48
; CHECK-NEXT: renamable $x10_x11 = nofpexcept FADD_D_IN32X killed renamable $x10_x11, renamable $x10_x11, 7, implicit $frm
; CHECK-NEXT: PseudoRV32ZdinxSD killed renamable $x10_x11, $x2, 8 :: (store (s64) into %stack.0, align 4)
; CHECK-NEXT: INLINEASM &"", 1 /* sideeffect attdialect */, 12 /* clobber */, implicit-def dead early-clobber $x6, 12 /* clobber */, implicit-def dead early-clobber $x7, 12 /* clobber */, implicit-def dead early-clobber $x8, 12 /* clobber */, implicit-def dead early-clobber $x9, 12 /* clobber */, implicit-def dead early-clobber $x10, 12 /* clobber */, implicit-def dead early-clobber $x11, 12 /* clobber */, implicit-def dead early-clobber $x12, 12 /* clobber */, implicit-def dead early-clobber $x13, 12 /* clobber */, implicit-def dead early-clobber $x14, 12 /* clobber */, implicit-def dead early-clobber $x15, 12 /* clobber */, implicit-def dead early-clobber $x16, 12 /* clobber */, implicit-def dead early-clobber $x17, 12 /* clobber */, implicit-def dead early-clobber $x18, 12 /* clobber */, implicit-def dead early-clobber $x19, 12 /* clobber */, implicit-def dead early-clobber $x20, 12 /* clobber */, implicit-def dead early-clobber $x21, 12 /* clobber */, implicit-def dead early-clobber $x22, 12 /* clobber */, implicit-def dead early-clobber $x23, 12 /* clobber */, implicit-def dead early-clobber $x24, 12 /* clobber */, implicit-def dead early-clobber $x25, 12 /* clobber */, implicit-def dead early-clobber $x26, 12 /* clobber */, implicit-def dead early-clobber $x27, 12 /* clobber */, implicit-def dead early-clobber $x28, 12 /* clobber */, implicit-def dead early-clobber $x29, 12 /* clobber */, implicit-def dead early-clobber $x31
@@ -50,20 +37,7 @@ define double @foo(double %x) nounwind {
; CHECK-NEXT: $x25 = frame-destroy LW $x2, 24 :: (load (s32) from %stack.10)
; CHECK-NEXT: $x26 = frame-destroy LW $x2, 20 :: (load (s32) from %stack.11)
; CHECK-NEXT: $x27 = frame-destroy LW $x2, 16 :: (load (s32) from %stack.12)
- ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $x8
- ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $x9
- ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $x18
- ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $x19
- ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $x20
- ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $x21
- ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $x22
- ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $x23
- ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $x24
- ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $x25
- ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $x26
- ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $x27
; CHECK-NEXT: $x2 = frame-destroy ADDI $x2, 64
- ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0
; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11
%a = fadd double %x, %x
call void asm sideeffect "", "~{x6},~{x7},~{x8},~{x9},~{x10},~{x11},~{x12},~{x13},~{x14},~{x15},~{x16},~{x17},~{x18},~{x19},~{x20},~{x21},~{x22},~{x23},~{x24},~{x25},~{x26},~{x27},~{x28},~{x29},~{xr0},~{x31}"()