aboutsummaryrefslogtreecommitdiff
path: root/llvm/test/CodeGen/RISCV
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen/RISCV')
-rw-r--r--llvm/test/CodeGen/RISCV/branch-on-zero.ll16
-rw-r--r--llvm/test/CodeGen/RISCV/machine-pipeliner.ll46
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-exact-vlen.ll10
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/machine-combiner-subreg-verifier-error.mir39
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/pr95865.ll43
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vandn-sdnode.ll66
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vcpop-shl-zext-opt.ll28
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vxrm-insert-out-of-loop.ll24
-rw-r--r--llvm/test/CodeGen/RISCV/sra-xor-sra.ll32
9 files changed, 184 insertions, 120 deletions
diff --git a/llvm/test/CodeGen/RISCV/branch-on-zero.ll b/llvm/test/CodeGen/RISCV/branch-on-zero.ll
index 02aeebd..2aec92e 100644
--- a/llvm/test/CodeGen/RISCV/branch-on-zero.ll
+++ b/llvm/test/CodeGen/RISCV/branch-on-zero.ll
@@ -127,13 +127,11 @@ define i32 @test_lshr2(ptr nocapture %x, ptr nocapture readonly %y, i32 %n) {
; RV32-NEXT: .LBB3_2: # %while.body
; RV32-NEXT: # =>This Inner Loop Header: Depth=1
; RV32-NEXT: lw a3, 0(a1)
-; RV32-NEXT: addi a4, a1, 4
+; RV32-NEXT: addi a1, a1, 4
; RV32-NEXT: slli a3, a3, 1
-; RV32-NEXT: addi a1, a0, 4
; RV32-NEXT: sw a3, 0(a0)
-; RV32-NEXT: mv a0, a1
-; RV32-NEXT: mv a1, a4
-; RV32-NEXT: bne a4, a2, .LBB3_2
+; RV32-NEXT: addi a0, a0, 4
+; RV32-NEXT: bne a1, a2, .LBB3_2
; RV32-NEXT: .LBB3_3: # %while.end
; RV32-NEXT: li a0, 0
; RV32-NEXT: ret
@@ -151,13 +149,11 @@ define i32 @test_lshr2(ptr nocapture %x, ptr nocapture readonly %y, i32 %n) {
; RV64-NEXT: .LBB3_2: # %while.body
; RV64-NEXT: # =>This Inner Loop Header: Depth=1
; RV64-NEXT: lw a3, 0(a1)
-; RV64-NEXT: addi a4, a1, 4
+; RV64-NEXT: addi a1, a1, 4
; RV64-NEXT: slli a3, a3, 1
-; RV64-NEXT: addi a1, a0, 4
; RV64-NEXT: sw a3, 0(a0)
-; RV64-NEXT: mv a0, a1
-; RV64-NEXT: mv a1, a4
-; RV64-NEXT: bne a4, a2, .LBB3_2
+; RV64-NEXT: addi a0, a0, 4
+; RV64-NEXT: bne a1, a2, .LBB3_2
; RV64-NEXT: .LBB3_3: # %while.end
; RV64-NEXT: li a0, 0
; RV64-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/machine-pipeliner.ll b/llvm/test/CodeGen/RISCV/machine-pipeliner.ll
index d250098..a2a7da7 100644
--- a/llvm/test/CodeGen/RISCV/machine-pipeliner.ll
+++ b/llvm/test/CodeGen/RISCV/machine-pipeliner.ll
@@ -54,37 +54,37 @@ define void @test_pipelined_1(ptr noalias %in, ptr noalias %out, i32 signext %cn
; CHECK-PIPELINED: # %bb.0: # %entry
; CHECK-PIPELINED-NEXT: blez a2, .LBB1_6
; CHECK-PIPELINED-NEXT: # %bb.1: # %for.body.preheader
-; CHECK-PIPELINED-NEXT: lw a4, 0(a1)
+; CHECK-PIPELINED-NEXT: lw a7, 0(a1)
; CHECK-PIPELINED-NEXT: addi a2, a2, -1
+; CHECK-PIPELINED-NEXT: addi a3, a0, 4
+; CHECK-PIPELINED-NEXT: addi a5, a1, 4
; CHECK-PIPELINED-NEXT: sh2add.uw a6, a2, a1
-; CHECK-PIPELINED-NEXT: addi a2, a0, 4
-; CHECK-PIPELINED-NEXT: addi a1, a1, 4
; CHECK-PIPELINED-NEXT: addi a6, a6, 4
-; CHECK-PIPELINED-NEXT: beq a1, a6, .LBB1_5
+; CHECK-PIPELINED-NEXT: beq a5, a6, .LBB1_5
; CHECK-PIPELINED-NEXT: # %bb.2: # %for.body
-; CHECK-PIPELINED-NEXT: lw a5, 0(a1)
-; CHECK-PIPELINED-NEXT: addi a3, a2, 4
-; CHECK-PIPELINED-NEXT: addi a4, a4, 1
-; CHECK-PIPELINED-NEXT: addi a1, a1, 4
-; CHECK-PIPELINED-NEXT: beq a1, a6, .LBB1_4
+; CHECK-PIPELINED-NEXT: lw a1, 0(a5)
+; CHECK-PIPELINED-NEXT: addi a4, a3, 4
+; CHECK-PIPELINED-NEXT: addi a5, a5, 4
+; CHECK-PIPELINED-NEXT: beq a5, a6, .LBB1_4
; CHECK-PIPELINED-NEXT: .LBB1_3: # %for.body
; CHECK-PIPELINED-NEXT: # =>This Inner Loop Header: Depth=1
-; CHECK-PIPELINED-NEXT: sw a4, 0(a0)
-; CHECK-PIPELINED-NEXT: mv a4, a5
-; CHECK-PIPELINED-NEXT: lw a5, 0(a1)
-; CHECK-PIPELINED-NEXT: mv a0, a2
-; CHECK-PIPELINED-NEXT: mv a2, a3
-; CHECK-PIPELINED-NEXT: addi a3, a3, 4
-; CHECK-PIPELINED-NEXT: addi a4, a4, 1
-; CHECK-PIPELINED-NEXT: addi a1, a1, 4
-; CHECK-PIPELINED-NEXT: bne a1, a6, .LBB1_3
+; CHECK-PIPELINED-NEXT: addi a2, a7, 1
+; CHECK-PIPELINED-NEXT: mv a7, a1
+; CHECK-PIPELINED-NEXT: lw a1, 0(a5)
+; CHECK-PIPELINED-NEXT: sw a2, 0(a0)
+; CHECK-PIPELINED-NEXT: mv a0, a3
+; CHECK-PIPELINED-NEXT: mv a3, a4
+; CHECK-PIPELINED-NEXT: addi a4, a4, 4
+; CHECK-PIPELINED-NEXT: addi a5, a5, 4
+; CHECK-PIPELINED-NEXT: bne a5, a6, .LBB1_3
; CHECK-PIPELINED-NEXT: .LBB1_4:
-; CHECK-PIPELINED-NEXT: sw a4, 0(a0)
-; CHECK-PIPELINED-NEXT: mv a0, a2
-; CHECK-PIPELINED-NEXT: mv a4, a5
+; CHECK-PIPELINED-NEXT: addi a7, a7, 1
+; CHECK-PIPELINED-NEXT: sw a7, 0(a0)
+; CHECK-PIPELINED-NEXT: mv a0, a3
+; CHECK-PIPELINED-NEXT: mv a7, a1
; CHECK-PIPELINED-NEXT: .LBB1_5:
-; CHECK-PIPELINED-NEXT: addi a4, a4, 1
-; CHECK-PIPELINED-NEXT: sw a4, 0(a0)
+; CHECK-PIPELINED-NEXT: addi a7, a7, 1
+; CHECK-PIPELINED-NEXT: sw a7, 0(a0)
; CHECK-PIPELINED-NEXT: .LBB1_6: # %for.end
; CHECK-PIPELINED-NEXT: ret
entry:
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-exact-vlen.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-exact-vlen.ll
index 9c6d77d..c3fe6b3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-exact-vlen.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-exact-vlen.ll
@@ -44,9 +44,8 @@ define <4 x i64> @m2_splat_with_tail(<4 x i64> %v1) vscale_range(2,2) {
; CHECK-LABEL: m2_splat_with_tail:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT: vrgather.vi v10, v8, 0
-; CHECK-NEXT: vmv1r.v v11, v9
-; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vrgather.vi v8, v10, 0
; CHECK-NEXT: ret
%res = shufflevector <4 x i64> %v1, <4 x i64> poison, <4 x i32> <i32 0, i32 0, i32 2, i32 3>
ret <4 x i64> %res
@@ -99,9 +98,8 @@ define <4 x i64> @m2_splat_into_identity(<4 x i64> %v1) vscale_range(2,2) {
; CHECK-LABEL: m2_splat_into_identity:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT: vrgather.vi v10, v8, 0
-; CHECK-NEXT: vmv1r.v v11, v9
-; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vrgather.vi v8, v10, 0
; CHECK-NEXT: ret
%res = shufflevector <4 x i64> %v1, <4 x i64> poison, <4 x i32> <i32 0, i32 0, i32 2, i32 3>
ret <4 x i64> %res
diff --git a/llvm/test/CodeGen/RISCV/rvv/machine-combiner-subreg-verifier-error.mir b/llvm/test/CodeGen/RISCV/rvv/machine-combiner-subreg-verifier-error.mir
new file mode 100644
index 0000000..76dfd4e
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/machine-combiner-subreg-verifier-error.mir
@@ -0,0 +1,39 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
+# RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs -run-pass=machine-combiner -o - %s | FileCheck %s
+
+# Make sure the verifier doesn't fail due to dropping subregister
+# uses.
+
+---
+name: machine_combiner_subreg_verifier_error
+tracksRegLiveness: true
+isSSA: true
+body: |
+ bb.0:
+ liveins: $v8m4, $v12m4
+
+ ; CHECK-LABEL: name: machine_combiner_subreg_verifier_error
+ ; CHECK: liveins: $v8m4, $v12m4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:gprnox0 = IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF2:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF3:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF4:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF5:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; CHECK-NEXT: [[PseudoVSLIDEDOWN_VI_M8_:%[0-9]+]]:vrm8 = PseudoVSLIDEDOWN_VI_M8 $noreg, [[DEF2]], 26, 2, 5 /* e32 */, 3 /* ta, ma */
+ ; CHECK-NEXT: [[PseudoVADD_VV_MF2_:%[0-9]+]]:vr = PseudoVADD_VV_MF2 $noreg, [[DEF2]].sub_vrm1_0, killed [[DEF3]], 2, 5 /* e32 */, 1 /* ta, mu */
+ ; CHECK-NEXT: [[PseudoVADD_VV_MF2_1:%[0-9]+]]:vr = PseudoVADD_VV_MF2 $noreg, [[PseudoVSLIDEDOWN_VI_M8_]].sub_vrm1_0, killed [[PseudoVADD_VV_MF2_]], 2, 5 /* e32 */, 1 /* ta, mu */
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:vrm4 = IMPLICIT_DEF
+ %1:gprnox0 = IMPLICIT_DEF
+ %2:vrm8 = IMPLICIT_DEF
+ %3:vr = IMPLICIT_DEF
+ %4:vrm2 = IMPLICIT_DEF
+ %5:vr = IMPLICIT_DEF
+ %6:vrm8 = PseudoVSLIDEDOWN_VI_M8 $noreg, %2, 26, 2, 5 /* e32 */, 3 /* ta, ma */
+ %7:vr = PseudoVADD_VV_MF2 $noreg, %6.sub_vrm1_0, %2.sub_vrm1_0, 2, 5 /* e32 */, 1 /* ta, mu */
+ %8:vr = PseudoVADD_VV_MF2 $noreg, killed %7, killed %3, 2, 5 /* e32 */, 1 /* ta, mu */
+ PseudoRET implicit $v8
+
+...
diff --git a/llvm/test/CodeGen/RISCV/rvv/pr95865.ll b/llvm/test/CodeGen/RISCV/rvv/pr95865.ll
index ab98496..a4c793b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/pr95865.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/pr95865.ll
@@ -36,7 +36,7 @@ define i32 @main(i1 %arg.1, i64 %arg.2, i1 %arg.3, i64 %arg.4, i1 %arg.5, <vscal
; CHECK-NEXT: .cfi_offset s10, -96
; CHECK-NEXT: .cfi_offset s11, -104
; CHECK-NEXT: li a6, 0
-; CHECK-NEXT: li s2, 8
+; CHECK-NEXT: li a7, 8
; CHECK-NEXT: li t0, 12
; CHECK-NEXT: li s0, 4
; CHECK-NEXT: li t1, 20
@@ -45,7 +45,7 @@ define i32 @main(i1 %arg.1, i64 %arg.2, i1 %arg.3, i64 %arg.4, i1 %arg.5, <vscal
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: andi t3, a4, 1
-; CHECK-NEXT: li t2, 4
+; CHECK-NEXT: li s2, 4
; CHECK-NEXT: .LBB0_1: # %for.cond1.preheader.i
; CHECK-NEXT: # =>This Loop Header: Depth=1
; CHECK-NEXT: # Child Loop BB0_2 Depth 2
@@ -53,9 +53,9 @@ define i32 @main(i1 %arg.1, i64 %arg.2, i1 %arg.3, i64 %arg.4, i1 %arg.5, <vscal
; CHECK-NEXT: # Child Loop BB0_4 Depth 4
; CHECK-NEXT: # Child Loop BB0_5 Depth 5
; CHECK-NEXT: mv t4, t1
-; CHECK-NEXT: mv t5, t2
+; CHECK-NEXT: mv t2, s2
; CHECK-NEXT: mv t6, t0
-; CHECK-NEXT: mv a7, s2
+; CHECK-NEXT: mv s3, a7
; CHECK-NEXT: mv s4, a6
; CHECK-NEXT: .LBB0_2: # %for.cond5.preheader.i
; CHECK-NEXT: # Parent Loop BB0_1 Depth=1
@@ -64,9 +64,9 @@ define i32 @main(i1 %arg.1, i64 %arg.2, i1 %arg.3, i64 %arg.4, i1 %arg.5, <vscal
; CHECK-NEXT: # Child Loop BB0_4 Depth 4
; CHECK-NEXT: # Child Loop BB0_5 Depth 5
; CHECK-NEXT: mv s5, t4
-; CHECK-NEXT: mv s6, t5
+; CHECK-NEXT: mv t5, t2
; CHECK-NEXT: mv s7, t6
-; CHECK-NEXT: mv s3, a7
+; CHECK-NEXT: mv s8, s3
; CHECK-NEXT: mv s9, s4
; CHECK-NEXT: .LBB0_3: # %for.cond9.preheader.i
; CHECK-NEXT: # Parent Loop BB0_1 Depth=1
@@ -75,9 +75,9 @@ define i32 @main(i1 %arg.1, i64 %arg.2, i1 %arg.3, i64 %arg.4, i1 %arg.5, <vscal
; CHECK-NEXT: # Child Loop BB0_4 Depth 4
; CHECK-NEXT: # Child Loop BB0_5 Depth 5
; CHECK-NEXT: mv s11, s5
-; CHECK-NEXT: mv a3, s6
+; CHECK-NEXT: mv s6, t5
; CHECK-NEXT: mv ra, s7
-; CHECK-NEXT: mv s8, s3
+; CHECK-NEXT: mv a5, s8
; CHECK-NEXT: mv s1, s9
; CHECK-NEXT: .LBB0_4: # %vector.ph.i
; CHECK-NEXT: # Parent Loop BB0_1 Depth=1
@@ -92,45 +92,44 @@ define i32 @main(i1 %arg.1, i64 %arg.2, i1 %arg.3, i64 %arg.4, i1 %arg.5, <vscal
; CHECK-NEXT: # Parent Loop BB0_3 Depth=3
; CHECK-NEXT: # Parent Loop BB0_4 Depth=4
; CHECK-NEXT: # => This Inner Loop Header: Depth=5
-; CHECK-NEXT: addi a5, a1, 4
-; CHECK-NEXT: add a4, s8, a1
-; CHECK-NEXT: add a1, a1, a3
+; CHECK-NEXT: add a4, a5, a1
+; CHECK-NEXT: add a3, s6, a1
+; CHECK-NEXT: addi a1, a1, 4
; CHECK-NEXT: vse32.v v8, (a4), v0.t
-; CHECK-NEXT: vse32.v v8, (a1), v0.t
-; CHECK-NEXT: mv a1, a5
-; CHECK-NEXT: bne a5, s0, .LBB0_5
+; CHECK-NEXT: vse32.v v8, (a3), v0.t
+; CHECK-NEXT: bne a1, s0, .LBB0_5
; CHECK-NEXT: # %bb.6: # %for.cond.cleanup15.i
; CHECK-NEXT: # in Loop: Header=BB0_4 Depth=4
; CHECK-NEXT: addi s1, s1, 4
-; CHECK-NEXT: addi s8, s8, 4
+; CHECK-NEXT: addi a5, a5, 4
; CHECK-NEXT: addi ra, ra, 4
-; CHECK-NEXT: addi a3, a3, 4
+; CHECK-NEXT: addi s6, s6, 4
; CHECK-NEXT: andi s10, a0, 1
; CHECK-NEXT: addi s11, s11, 4
; CHECK-NEXT: beqz s10, .LBB0_4
; CHECK-NEXT: # %bb.7: # %for.cond.cleanup11.i
; CHECK-NEXT: # in Loop: Header=BB0_3 Depth=3
; CHECK-NEXT: addi s9, s9, 4
-; CHECK-NEXT: addi s3, s3, 4
+; CHECK-NEXT: addi s8, s8, 4
; CHECK-NEXT: addi s7, s7, 4
-; CHECK-NEXT: addi s6, s6, 4
+; CHECK-NEXT: addi t5, t5, 4
; CHECK-NEXT: andi a1, a2, 1
; CHECK-NEXT: addi s5, s5, 4
; CHECK-NEXT: beqz a1, .LBB0_3
; CHECK-NEXT: # %bb.8: # %for.cond.cleanup7.i
; CHECK-NEXT: # in Loop: Header=BB0_2 Depth=2
; CHECK-NEXT: addi s4, s4, 4
-; CHECK-NEXT: addi a7, a7, 4
+; CHECK-NEXT: addi s3, s3, 4
; CHECK-NEXT: addi t6, t6, 4
-; CHECK-NEXT: addi t5, t5, 4
+; CHECK-NEXT: addi t2, t2, 4
; CHECK-NEXT: addi t4, t4, 4
; CHECK-NEXT: beqz t3, .LBB0_2
; CHECK-NEXT: # %bb.9: # %for.cond.cleanup3.i
; CHECK-NEXT: # in Loop: Header=BB0_1 Depth=1
; CHECK-NEXT: addi a6, a6, 4
-; CHECK-NEXT: addi s2, s2, 4
+; CHECK-NEXT: addi a7, a7, 4
; CHECK-NEXT: addi t0, t0, 4
-; CHECK-NEXT: addi t2, t2, 4
+; CHECK-NEXT: addi s2, s2, 4
; CHECK-NEXT: addi t1, t1, 4
; CHECK-NEXT: beqz a1, .LBB0_1
; CHECK-NEXT: # %bb.10: # %l.exit
diff --git a/llvm/test/CodeGen/RISCV/rvv/vandn-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vandn-sdnode.ll
index f295bd8..386c736 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vandn-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vandn-sdnode.ll
@@ -2258,18 +2258,18 @@ define void @vand_vx_loop_hoisted_not(ptr %a, i32 noundef signext %mask) {
; CHECK-RV32-NEXT: vsetvli a7, zero, e32, m2, ta, ma
; CHECK-RV32-NEXT: .LBB98_3: # %vector.body
; CHECK-RV32-NEXT: # =>This Inner Loop Header: Depth=1
-; CHECK-RV32-NEXT: slli a7, a6, 2
-; CHECK-RV32-NEXT: add t0, a6, a4
-; CHECK-RV32-NEXT: add a7, a0, a7
-; CHECK-RV32-NEXT: vl2re32.v v8, (a7)
-; CHECK-RV32-NEXT: sltu a6, t0, a6
-; CHECK-RV32-NEXT: add a5, a5, a6
-; CHECK-RV32-NEXT: xor a6, t0, a3
+; CHECK-RV32-NEXT: mv a7, a6
+; CHECK-RV32-NEXT: slli t0, a6, 2
+; CHECK-RV32-NEXT: add a6, a6, a4
+; CHECK-RV32-NEXT: add t0, a0, t0
+; CHECK-RV32-NEXT: vl2re32.v v8, (t0)
+; CHECK-RV32-NEXT: sltu a7, a6, a7
+; CHECK-RV32-NEXT: add a5, a5, a7
+; CHECK-RV32-NEXT: xor a7, a6, a3
; CHECK-RV32-NEXT: vand.vx v8, v8, a1
-; CHECK-RV32-NEXT: or t1, a6, a5
-; CHECK-RV32-NEXT: vs2r.v v8, (a7)
-; CHECK-RV32-NEXT: mv a6, t0
-; CHECK-RV32-NEXT: bnez t1, .LBB98_3
+; CHECK-RV32-NEXT: or a7, a7, a5
+; CHECK-RV32-NEXT: vs2r.v v8, (t0)
+; CHECK-RV32-NEXT: bnez a7, .LBB98_3
; CHECK-RV32-NEXT: # %bb.4: # %middle.block
; CHECK-RV32-NEXT: bnez a3, .LBB98_6
; CHECK-RV32-NEXT: .LBB98_5: # %for.body
@@ -2350,18 +2350,18 @@ define void @vand_vx_loop_hoisted_not(ptr %a, i32 noundef signext %mask) {
; CHECK-ZVKB-NOZBB32-NEXT: vsetvli a7, zero, e32, m2, ta, ma
; CHECK-ZVKB-NOZBB32-NEXT: .LBB98_3: # %vector.body
; CHECK-ZVKB-NOZBB32-NEXT: # =>This Inner Loop Header: Depth=1
-; CHECK-ZVKB-NOZBB32-NEXT: slli a7, a6, 2
-; CHECK-ZVKB-NOZBB32-NEXT: add t0, a6, a4
-; CHECK-ZVKB-NOZBB32-NEXT: add a7, a0, a7
-; CHECK-ZVKB-NOZBB32-NEXT: vl2re32.v v8, (a7)
-; CHECK-ZVKB-NOZBB32-NEXT: sltu a6, t0, a6
-; CHECK-ZVKB-NOZBB32-NEXT: add a5, a5, a6
-; CHECK-ZVKB-NOZBB32-NEXT: xor a6, t0, a3
+; CHECK-ZVKB-NOZBB32-NEXT: mv a7, a6
+; CHECK-ZVKB-NOZBB32-NEXT: slli t0, a6, 2
+; CHECK-ZVKB-NOZBB32-NEXT: add a6, a6, a4
+; CHECK-ZVKB-NOZBB32-NEXT: add t0, a0, t0
+; CHECK-ZVKB-NOZBB32-NEXT: vl2re32.v v8, (t0)
+; CHECK-ZVKB-NOZBB32-NEXT: sltu a7, a6, a7
+; CHECK-ZVKB-NOZBB32-NEXT: add a5, a5, a7
+; CHECK-ZVKB-NOZBB32-NEXT: xor a7, a6, a3
; CHECK-ZVKB-NOZBB32-NEXT: vandn.vx v8, v8, a1
-; CHECK-ZVKB-NOZBB32-NEXT: or t1, a6, a5
-; CHECK-ZVKB-NOZBB32-NEXT: vs2r.v v8, (a7)
-; CHECK-ZVKB-NOZBB32-NEXT: mv a6, t0
-; CHECK-ZVKB-NOZBB32-NEXT: bnez t1, .LBB98_3
+; CHECK-ZVKB-NOZBB32-NEXT: or a7, a7, a5
+; CHECK-ZVKB-NOZBB32-NEXT: vs2r.v v8, (t0)
+; CHECK-ZVKB-NOZBB32-NEXT: bnez a7, .LBB98_3
; CHECK-ZVKB-NOZBB32-NEXT: # %bb.4: # %middle.block
; CHECK-ZVKB-NOZBB32-NEXT: bnez a3, .LBB98_7
; CHECK-ZVKB-NOZBB32-NEXT: .LBB98_5: # %for.body.preheader
@@ -2444,18 +2444,18 @@ define void @vand_vx_loop_hoisted_not(ptr %a, i32 noundef signext %mask) {
; CHECK-ZVKB-ZBB32-NEXT: vsetvli a7, zero, e32, m2, ta, ma
; CHECK-ZVKB-ZBB32-NEXT: .LBB98_3: # %vector.body
; CHECK-ZVKB-ZBB32-NEXT: # =>This Inner Loop Header: Depth=1
-; CHECK-ZVKB-ZBB32-NEXT: slli a7, a6, 2
-; CHECK-ZVKB-ZBB32-NEXT: add t0, a6, a4
-; CHECK-ZVKB-ZBB32-NEXT: add a7, a0, a7
-; CHECK-ZVKB-ZBB32-NEXT: vl2re32.v v8, (a7)
-; CHECK-ZVKB-ZBB32-NEXT: sltu a6, t0, a6
-; CHECK-ZVKB-ZBB32-NEXT: add a5, a5, a6
-; CHECK-ZVKB-ZBB32-NEXT: xor a6, t0, a3
+; CHECK-ZVKB-ZBB32-NEXT: mv a7, a6
+; CHECK-ZVKB-ZBB32-NEXT: slli t0, a6, 2
+; CHECK-ZVKB-ZBB32-NEXT: add a6, a6, a4
+; CHECK-ZVKB-ZBB32-NEXT: add t0, a0, t0
+; CHECK-ZVKB-ZBB32-NEXT: vl2re32.v v8, (t0)
+; CHECK-ZVKB-ZBB32-NEXT: sltu a7, a6, a7
+; CHECK-ZVKB-ZBB32-NEXT: add a5, a5, a7
+; CHECK-ZVKB-ZBB32-NEXT: xor a7, a6, a3
; CHECK-ZVKB-ZBB32-NEXT: vandn.vx v8, v8, a1
-; CHECK-ZVKB-ZBB32-NEXT: or t1, a6, a5
-; CHECK-ZVKB-ZBB32-NEXT: vs2r.v v8, (a7)
-; CHECK-ZVKB-ZBB32-NEXT: mv a6, t0
-; CHECK-ZVKB-ZBB32-NEXT: bnez t1, .LBB98_3
+; CHECK-ZVKB-ZBB32-NEXT: or a7, a7, a5
+; CHECK-ZVKB-ZBB32-NEXT: vs2r.v v8, (t0)
+; CHECK-ZVKB-ZBB32-NEXT: bnez a7, .LBB98_3
; CHECK-ZVKB-ZBB32-NEXT: # %bb.4: # %middle.block
; CHECK-ZVKB-ZBB32-NEXT: bnez a3, .LBB98_6
; CHECK-ZVKB-ZBB32-NEXT: .LBB98_5: # %for.body
diff --git a/llvm/test/CodeGen/RISCV/rvv/vcpop-shl-zext-opt.ll b/llvm/test/CodeGen/RISCV/rvv/vcpop-shl-zext-opt.ll
index ed6b7f1..1044008 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vcpop-shl-zext-opt.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vcpop-shl-zext-opt.ll
@@ -25,24 +25,24 @@ define dso_local void @test_store1(ptr nocapture noundef writeonly %dst, ptr noc
; RV32-NEXT: li a6, 0
; RV32-NEXT: .LBB0_4: # %vector.body
; RV32-NEXT: # =>This Inner Loop Header: Depth=1
-; RV32-NEXT: slli t0, a7, 2
-; RV32-NEXT: addi t1, a7, 8
-; RV32-NEXT: add t0, a1, t0
+; RV32-NEXT: mv t0, a7
+; RV32-NEXT: slli t1, a7, 2
+; RV32-NEXT: addi a7, a7, 8
+; RV32-NEXT: add t1, a1, t1
; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; RV32-NEXT: vle32.v v8, (t0)
-; RV32-NEXT: sltu a7, t1, a7
-; RV32-NEXT: xor t0, t1, a5
-; RV32-NEXT: add a6, a6, a7
+; RV32-NEXT: vle32.v v8, (t1)
+; RV32-NEXT: sltu t0, a7, t0
+; RV32-NEXT: xor t1, a7, a5
+; RV32-NEXT: add a6, a6, t0
; RV32-NEXT: vmslt.vx v12, v8, a2
; RV32-NEXT: vcompress.vm v10, v8, v12
-; RV32-NEXT: vcpop.m a7, v12
-; RV32-NEXT: vsetvli zero, a7, e32, m2, ta, ma
+; RV32-NEXT: vcpop.m t0, v12
+; RV32-NEXT: vsetvli zero, t0, e32, m2, ta, ma
; RV32-NEXT: vse32.v v10, (a0)
-; RV32-NEXT: slli a7, a7, 2
-; RV32-NEXT: or t0, t0, a6
-; RV32-NEXT: add a0, a0, a7
-; RV32-NEXT: mv a7, t1
-; RV32-NEXT: bnez t0, .LBB0_4
+; RV32-NEXT: slli t0, t0, 2
+; RV32-NEXT: or t1, t1, a6
+; RV32-NEXT: add a0, a0, t0
+; RV32-NEXT: bnez t1, .LBB0_4
; RV32-NEXT: # %bb.5: # %middle.block
; RV32-NEXT: bne a5, a3, .LBB0_9
; RV32-NEXT: .LBB0_6: # %for.cond.cleanup
diff --git a/llvm/test/CodeGen/RISCV/rvv/vxrm-insert-out-of-loop.ll b/llvm/test/CodeGen/RISCV/rvv/vxrm-insert-out-of-loop.ll
index ead79fc..af3b0852a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vxrm-insert-out-of-loop.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vxrm-insert-out-of-loop.ll
@@ -102,20 +102,20 @@ define void @test1(ptr nocapture noundef writeonly %dst, i32 noundef signext %i_
; RV32-NEXT: .LBB0_13: # %vector.body
; RV32-NEXT: # Parent Loop BB0_10 Depth=1
; RV32-NEXT: # => This Inner Loop Header: Depth=2
-; RV32-NEXT: add s0, a2, t6
-; RV32-NEXT: add s1, a4, t6
-; RV32-NEXT: vl2r.v v8, (s0)
-; RV32-NEXT: add s0, a0, t6
+; RV32-NEXT: mv s0, t6
+; RV32-NEXT: add t6, a2, t6
+; RV32-NEXT: add s1, a4, s0
+; RV32-NEXT: vl2r.v v8, (t6)
+; RV32-NEXT: add s2, a0, s0
; RV32-NEXT: vl2r.v v10, (s1)
-; RV32-NEXT: add s1, t6, t2
-; RV32-NEXT: sltu t6, s1, t6
-; RV32-NEXT: add t5, t5, t6
-; RV32-NEXT: xor t6, s1, t4
+; RV32-NEXT: add t6, s0, t2
+; RV32-NEXT: sltu s0, t6, s0
+; RV32-NEXT: add t5, t5, s0
+; RV32-NEXT: xor s0, t6, t4
; RV32-NEXT: vaaddu.vv v8, v8, v10
-; RV32-NEXT: or s2, t6, t5
-; RV32-NEXT: vs2r.v v8, (s0)
-; RV32-NEXT: mv t6, s1
-; RV32-NEXT: bnez s2, .LBB0_13
+; RV32-NEXT: or s0, s0, t5
+; RV32-NEXT: vs2r.v v8, (s2)
+; RV32-NEXT: bnez s0, .LBB0_13
; RV32-NEXT: # %bb.14: # %middle.block
; RV32-NEXT: # in Loop: Header=BB0_10 Depth=1
; RV32-NEXT: beq t4, a6, .LBB0_9
diff --git a/llvm/test/CodeGen/RISCV/sra-xor-sra.ll b/llvm/test/CodeGen/RISCV/sra-xor-sra.ll
new file mode 100644
index 0000000..b04f0a2
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/sra-xor-sra.ll
@@ -0,0 +1,32 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s | FileCheck %s
+
+; Test folding of: (sra (xor (sra x, c1), -1), c2) -> (sra (xor x, -1), c3)
+; Original motivating example: should merge sra+sra across xor
+define i16 @not_invert_signbit_splat_mask(i8 %x, i16 %y) {
+; CHECK-LABEL: not_invert_signbit_splat_mask:
+; CHECK: # %bb.0:
+; CHECK-NEXT: slli a0, a0, 56
+; CHECK-NEXT: srai a0, a0, 62
+; CHECK-NEXT: not a0, a0
+; CHECK-NEXT: and a0, a0, a1
+; CHECK-NEXT: ret
+ %a = ashr i8 %x, 6
+ %n = xor i8 %a, -1
+ %s = sext i8 %n to i16
+ %r = and i16 %s, %y
+ ret i16 %r
+}
+
+; Edge case
+define i16 @sra_xor_sra_overflow(i8 %x, i16 %y) {
+; CHECK-LABEL: sra_xor_sra_overflow:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a0, 0
+; CHECK-NEXT: ret
+ %a = ashr i8 %x, 10
+ %n = xor i8 %a, -1
+ %s = sext i8 %n to i16
+ %r = and i16 %s, %y
+ ret i16 %r
+}