aboutsummaryrefslogtreecommitdiff
path: root/llvm/test/CodeGen/RISCV/rv32p.ll
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen/RISCV/rv32p.ll')
-rw-r--r--llvm/test/CodeGen/RISCV/rv32p.ll709
1 files changed, 709 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/RISCV/rv32p.ll b/llvm/test/CodeGen/RISCV/rv32p.ll
new file mode 100644
index 0000000..4eee880a
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rv32p.ll
@@ -0,0 +1,709 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-p -verify-machineinstrs < %s \
+; RUN: | FileCheck %s
+
+declare i32 @llvm.ctlz.i32(i32, i1)
+
+define i32 @ctlz_i32(i32 %a) nounwind {
+; CHECK-LABEL: ctlz_i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: clz a0, a0
+; CHECK-NEXT: ret
+ %1 = call i32 @llvm.ctlz.i32(i32 %a, i1 false)
+ ret i32 %1
+}
+
+declare i64 @llvm.ctlz.i64(i64, i1)
+
+define i64 @ctlz_i64(i64 %a) nounwind {
+; CHECK-LABEL: ctlz_i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: bnez a1, .LBB1_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: clz a0, a0
+; CHECK-NEXT: addi a0, a0, 32
+; CHECK-NEXT: li a1, 0
+; CHECK-NEXT: ret
+; CHECK-NEXT: .LBB1_2:
+; CHECK-NEXT: clz a0, a1
+; CHECK-NEXT: li a1, 0
+; CHECK-NEXT: ret
+ %1 = call i64 @llvm.ctlz.i64(i64 %a, i1 false)
+ ret i64 %1
+}
+
+declare i32 @llvm.cttz.i32(i32, i1)
+
+define i32 @cttz_i32(i32 %a) nounwind {
+; CHECK-LABEL: cttz_i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: beqz a0, .LBB2_2
+; CHECK-NEXT: # %bb.1: # %cond.false
+; CHECK-NEXT: addi a1, a0, -1
+; CHECK-NEXT: not a0, a0
+; CHECK-NEXT: and a0, a0, a1
+; CHECK-NEXT: clz a0, a0
+; CHECK-NEXT: li a1, 32
+; CHECK-NEXT: sub a0, a1, a0
+; CHECK-NEXT: ret
+; CHECK-NEXT: .LBB2_2:
+; CHECK-NEXT: li a0, 32
+; CHECK-NEXT: ret
+ %1 = call i32 @llvm.cttz.i32(i32 %a, i1 false)
+ ret i32 %1
+}
+
+declare i64 @llvm.cttz.i64(i64, i1)
+
+define i64 @cttz_i64(i64 %a) nounwind {
+; CHECK-LABEL: cttz_i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: or a2, a0, a1
+; CHECK-NEXT: beqz a2, .LBB3_3
+; CHECK-NEXT: # %bb.1: # %cond.false
+; CHECK-NEXT: bnez a0, .LBB3_4
+; CHECK-NEXT: # %bb.2: # %cond.false
+; CHECK-NEXT: addi a0, a1, -1
+; CHECK-NEXT: not a1, a1
+; CHECK-NEXT: and a0, a1, a0
+; CHECK-NEXT: clz a0, a0
+; CHECK-NEXT: li a1, 64
+; CHECK-NEXT: j .LBB3_5
+; CHECK-NEXT: .LBB3_3:
+; CHECK-NEXT: li a1, 0
+; CHECK-NEXT: li a0, 64
+; CHECK-NEXT: ret
+; CHECK-NEXT: .LBB3_4:
+; CHECK-NEXT: addi a1, a0, -1
+; CHECK-NEXT: not a0, a0
+; CHECK-NEXT: and a0, a0, a1
+; CHECK-NEXT: clz a0, a0
+; CHECK-NEXT: li a1, 32
+; CHECK-NEXT: .LBB3_5: # %cond.false
+; CHECK-NEXT: sub a0, a1, a0
+; CHECK-NEXT: li a1, 0
+; CHECK-NEXT: ret
+ %1 = call i64 @llvm.cttz.i64(i64 %a, i1 false)
+ ret i64 %1
+}
+
+define i32 @sextb_i32(i32 %a) nounwind {
+; CHECK-LABEL: sextb_i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: sext.b a0, a0
+; CHECK-NEXT: ret
+ %shl = shl i32 %a, 24
+ %shr = ashr exact i32 %shl, 24
+ ret i32 %shr
+}
+
+define i64 @sextb_i64(i64 %a) nounwind {
+; CHECK-LABEL: sextb_i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: sext.b a0, a0
+; CHECK-NEXT: srai a1, a0, 31
+; CHECK-NEXT: ret
+ %shl = shl i64 %a, 56
+ %shr = ashr exact i64 %shl, 56
+ ret i64 %shr
+}
+
+define i32 @sexth_i32(i32 %a) nounwind {
+; CHECK-LABEL: sexth_i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: sext.h a0, a0
+; CHECK-NEXT: ret
+ %shl = shl i32 %a, 16
+ %shr = ashr exact i32 %shl, 16
+ ret i32 %shr
+}
+
+define i64 @sexth_i64(i64 %a) nounwind {
+; CHECK-LABEL: sexth_i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: sext.h a0, a0
+; CHECK-NEXT: srai a1, a0, 31
+; CHECK-NEXT: ret
+ %shl = shl i64 %a, 48
+ %shr = ashr exact i64 %shl, 48
+ ret i64 %shr
+}
+
+define i32 @min_i32(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: min_i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: min a0, a0, a1
+; CHECK-NEXT: ret
+ %cmp = icmp slt i32 %a, %b
+ %cond = select i1 %cmp, i32 %a, i32 %b
+ ret i32 %cond
+}
+
+; As we are not matching directly i64 code patterns on RV32 some i64 patterns
+; don't have yet any matching bit manipulation instructions on RV32.
+; This test is presented here in case future expansions of the Bitmanip
+; extensions introduce instructions suitable for this pattern.
+
+define i64 @min_i64(i64 %a, i64 %b) nounwind {
+; CHECK-LABEL: min_i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: beq a1, a3, .LBB9_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: slt a4, a1, a3
+; CHECK-NEXT: beqz a4, .LBB9_3
+; CHECK-NEXT: j .LBB9_4
+; CHECK-NEXT: .LBB9_2:
+; CHECK-NEXT: sltu a4, a0, a2
+; CHECK-NEXT: bnez a4, .LBB9_4
+; CHECK-NEXT: .LBB9_3:
+; CHECK-NEXT: mv a0, a2
+; CHECK-NEXT: mv a1, a3
+; CHECK-NEXT: .LBB9_4:
+; CHECK-NEXT: ret
+ %cmp = icmp slt i64 %a, %b
+ %cond = select i1 %cmp, i64 %a, i64 %b
+ ret i64 %cond
+}
+
+define i32 @max_i32(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: max_i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: max a0, a0, a1
+; CHECK-NEXT: ret
+ %cmp = icmp sgt i32 %a, %b
+ %cond = select i1 %cmp, i32 %a, i32 %b
+ ret i32 %cond
+}
+
+; As we are not matching directly i64 code patterns on RV32 some i64 patterns
+; don't have yet any matching bit manipulation instructions on RV32.
+; This test is presented here in case future expansions of the Bitmanip
+; extensions introduce instructions suitable for this pattern.
+
+define i64 @max_i64(i64 %a, i64 %b) nounwind {
+; CHECK-LABEL: max_i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: beq a1, a3, .LBB11_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: slt a4, a3, a1
+; CHECK-NEXT: beqz a4, .LBB11_3
+; CHECK-NEXT: j .LBB11_4
+; CHECK-NEXT: .LBB11_2:
+; CHECK-NEXT: sltu a4, a2, a0
+; CHECK-NEXT: bnez a4, .LBB11_4
+; CHECK-NEXT: .LBB11_3:
+; CHECK-NEXT: mv a0, a2
+; CHECK-NEXT: mv a1, a3
+; CHECK-NEXT: .LBB11_4:
+; CHECK-NEXT: ret
+ %cmp = icmp sgt i64 %a, %b
+ %cond = select i1 %cmp, i64 %a, i64 %b
+ ret i64 %cond
+}
+
+define i32 @minu_i32(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: minu_i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minu a0, a0, a1
+; CHECK-NEXT: ret
+ %cmp = icmp ult i32 %a, %b
+ %cond = select i1 %cmp, i32 %a, i32 %b
+ ret i32 %cond
+}
+
+; As we are not matching directly i64 code patterns on RV32 some i64 patterns
+; don't have yet any matching bit manipulation instructions on RV32.
+; This test is presented here in case future expansions of the Bitmanip
+; extensions introduce instructions suitable for this pattern.
+
+define i64 @minu_i64(i64 %a, i64 %b) nounwind {
+; CHECK-LABEL: minu_i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: beq a1, a3, .LBB13_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: sltu a4, a1, a3
+; CHECK-NEXT: beqz a4, .LBB13_3
+; CHECK-NEXT: j .LBB13_4
+; CHECK-NEXT: .LBB13_2:
+; CHECK-NEXT: sltu a4, a0, a2
+; CHECK-NEXT: bnez a4, .LBB13_4
+; CHECK-NEXT: .LBB13_3:
+; CHECK-NEXT: mv a0, a2
+; CHECK-NEXT: mv a1, a3
+; CHECK-NEXT: .LBB13_4:
+; CHECK-NEXT: ret
+ %cmp = icmp ult i64 %a, %b
+ %cond = select i1 %cmp, i64 %a, i64 %b
+ ret i64 %cond
+}
+
+define i32 @maxu_i32(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: maxu_i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxu a0, a0, a1
+; CHECK-NEXT: ret
+ %cmp = icmp ugt i32 %a, %b
+ %cond = select i1 %cmp, i32 %a, i32 %b
+ ret i32 %cond
+}
+
+; As we are not matching directly i64 code patterns on RV32 some i64 patterns
+; don't have yet any matching bit manipulation instructions on RV32.
+; This test is presented here in case future expansions of the Bitmanip
+; extensions introduce instructions suitable for this pattern.
+
+define i64 @maxu_i64(i64 %a, i64 %b) nounwind {
+; CHECK-LABEL: maxu_i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: beq a1, a3, .LBB15_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: sltu a4, a3, a1
+; CHECK-NEXT: beqz a4, .LBB15_3
+; CHECK-NEXT: j .LBB15_4
+; CHECK-NEXT: .LBB15_2:
+; CHECK-NEXT: sltu a4, a2, a0
+; CHECK-NEXT: bnez a4, .LBB15_4
+; CHECK-NEXT: .LBB15_3:
+; CHECK-NEXT: mv a0, a2
+; CHECK-NEXT: mv a1, a3
+; CHECK-NEXT: .LBB15_4:
+; CHECK-NEXT: ret
+ %cmp = icmp ugt i64 %a, %b
+ %cond = select i1 %cmp, i64 %a, i64 %b
+ ret i64 %cond
+}
+
+declare i32 @llvm.abs.i32(i32, i1 immarg)
+
+define i32 @abs_i32(i32 %x) {
+; CHECK-LABEL: abs_i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: abs a0, a0
+; CHECK-NEXT: ret
+ %abs = tail call i32 @llvm.abs.i32(i32 %x, i1 true)
+ ret i32 %abs
+}
+
+declare i64 @llvm.abs.i64(i64, i1 immarg)
+
+define i64 @abs_i64(i64 %x) {
+; CHECK-LABEL: abs_i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: bgez a1, .LBB17_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: snez a2, a0
+; CHECK-NEXT: neg a0, a0
+; CHECK-NEXT: neg a1, a1
+; CHECK-NEXT: sub a1, a1, a2
+; CHECK-NEXT: .LBB17_2:
+; CHECK-NEXT: ret
+ %abs = tail call i64 @llvm.abs.i64(i64 %x, i1 true)
+ ret i64 %abs
+}
+
+define i32 @zexth_i32(i32 %a) nounwind {
+; CHECK-LABEL: zexth_i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: slli a0, a0, 16
+; CHECK-NEXT: srli a0, a0, 16
+; CHECK-NEXT: ret
+ %and = and i32 %a, 65535
+ ret i32 %and
+}
+
+define i64 @zexth_i64(i64 %a) nounwind {
+; CHECK-LABEL: zexth_i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: slli a0, a0, 16
+; CHECK-NEXT: srli a0, a0, 16
+; CHECK-NEXT: li a1, 0
+; CHECK-NEXT: ret
+ %and = and i64 %a, 65535
+ ret i64 %and
+}
+
+declare i32 @llvm.bswap.i32(i32)
+
+define i32 @bswap_i32(i32 %a) nounwind {
+; CHECK-LABEL: bswap_i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: rev8 a0, a0
+; CHECK-NEXT: ret
+ %1 = tail call i32 @llvm.bswap.i32(i32 %a)
+ ret i32 %1
+}
+
+declare i64 @llvm.bswap.i64(i64)
+
+define i64 @bswap_i64(i64 %a) {
+; CHECK-LABEL: bswap_i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: rev8 a2, a1
+; CHECK-NEXT: rev8 a1, a0
+; CHECK-NEXT: mv a0, a2
+; CHECK-NEXT: ret
+ %1 = call i64 @llvm.bswap.i64(i64 %a)
+ ret i64 %1
+}
+
+define i32 @srai_slli(i16 signext %0) {
+; CHECK-LABEL: srai_slli:
+; CHECK: # %bb.0:
+; CHECK-NEXT: slli a0, a0, 25
+; CHECK-NEXT: srai a0, a0, 31
+; CHECK-NEXT: ret
+ %2 = shl i16 %0, 9
+ %sext = ashr i16 %2, 15
+ %3 = sext i16 %sext to i32
+ ret i32 %3
+}
+
+define i32 @srai_slli2(i16 signext %0) {
+; CHECK-LABEL: srai_slli2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: slli a0, a0, 25
+; CHECK-NEXT: srai a0, a0, 30
+; CHECK-NEXT: ret
+ %2 = shl i16 %0, 9
+ %sext = ashr i16 %2, 14
+ %3 = sext i16 %sext to i32
+ ret i32 %3
+}
+define i8 @sub_if_uge_i8(i8 %x, i8 %y) {
+; CHECK-LABEL: sub_if_uge_i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: zext.b a2, a0
+; CHECK-NEXT: sub a0, a0, a1
+; CHECK-NEXT: zext.b a0, a0
+; CHECK-NEXT: minu a0, a2, a0
+; CHECK-NEXT: ret
+ %cmp = icmp ult i8 %x, %y
+ %select = select i1 %cmp, i8 0, i8 %y
+ %sub = sub nuw i8 %x, %select
+ ret i8 %sub
+}
+
+define i16 @sub_if_uge_i16(i16 %x, i16 %y) {
+; CHECK-LABEL: sub_if_uge_i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lui a2, 16
+; CHECK-NEXT: sub a1, a0, a1
+; CHECK-NEXT: addi a2, a2, -1
+; CHECK-NEXT: and a0, a0, a2
+; CHECK-NEXT: and a1, a1, a2
+; CHECK-NEXT: minu a0, a0, a1
+; CHECK-NEXT: ret
+ %cmp = icmp ult i16 %x, %y
+ %select = select i1 %cmp, i16 0, i16 %y
+ %sub = sub nuw i16 %x, %select
+ ret i16 %sub
+}
+
+define i32 @sub_if_uge_i32(i32 %x, i32 %y) {
+; CHECK-LABEL: sub_if_uge_i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: sub a1, a0, a1
+; CHECK-NEXT: minu a0, a0, a1
+; CHECK-NEXT: ret
+ %cmp = icmp ult i32 %x, %y
+ %select = select i1 %cmp, i32 0, i32 %y
+ %sub = sub nuw i32 %x, %select
+ ret i32 %sub
+}
+
+define i64 @sub_if_uge_i64(i64 %x, i64 %y) {
+; CHECK-LABEL: sub_if_uge_i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: beq a1, a3, .LBB27_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: sltu a4, a1, a3
+; CHECK-NEXT: j .LBB27_3
+; CHECK-NEXT: .LBB27_2:
+; CHECK-NEXT: sltu a4, a0, a2
+; CHECK-NEXT: .LBB27_3:
+; CHECK-NEXT: addi a4, a4, -1
+; CHECK-NEXT: and a3, a4, a3
+; CHECK-NEXT: and a2, a4, a2
+; CHECK-NEXT: sltu a4, a0, a2
+; CHECK-NEXT: sub a1, a1, a3
+; CHECK-NEXT: sub a1, a1, a4
+; CHECK-NEXT: sub a0, a0, a2
+; CHECK-NEXT: ret
+ %cmp = icmp ult i64 %x, %y
+ %select = select i1 %cmp, i64 0, i64 %y
+ %sub = sub nuw i64 %x, %select
+ ret i64 %sub
+}
+
+define i128 @sub_if_uge_i128(i128 %x, i128 %y) {
+; CHECK-LABEL: sub_if_uge_i128:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lw a3, 4(a1)
+; CHECK-NEXT: lw a4, 8(a1)
+; CHECK-NEXT: lw a5, 12(a1)
+; CHECK-NEXT: lw a6, 4(a2)
+; CHECK-NEXT: lw t0, 12(a2)
+; CHECK-NEXT: lw a7, 8(a2)
+; CHECK-NEXT: beq a5, t0, .LBB28_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: sltu t1, a5, t0
+; CHECK-NEXT: j .LBB28_3
+; CHECK-NEXT: .LBB28_2:
+; CHECK-NEXT: sltu t1, a4, a7
+; CHECK-NEXT: .LBB28_3:
+; CHECK-NEXT: lw a1, 0(a1)
+; CHECK-NEXT: lw a2, 0(a2)
+; CHECK-NEXT: beq a3, a6, .LBB28_5
+; CHECK-NEXT: # %bb.4:
+; CHECK-NEXT: sltu t2, a3, a6
+; CHECK-NEXT: j .LBB28_6
+; CHECK-NEXT: .LBB28_5:
+; CHECK-NEXT: sltu t2, a1, a2
+; CHECK-NEXT: .LBB28_6:
+; CHECK-NEXT: xor t3, a5, t0
+; CHECK-NEXT: xor t4, a4, a7
+; CHECK-NEXT: or t3, t4, t3
+; CHECK-NEXT: beqz t3, .LBB28_8
+; CHECK-NEXT: # %bb.7:
+; CHECK-NEXT: mv t2, t1
+; CHECK-NEXT: .LBB28_8:
+; CHECK-NEXT: addi t3, t2, -1
+; CHECK-NEXT: and t2, t3, t0
+; CHECK-NEXT: and t0, t3, a2
+; CHECK-NEXT: and t1, t3, a6
+; CHECK-NEXT: sltu a2, a1, t0
+; CHECK-NEXT: and a7, t3, a7
+; CHECK-NEXT: mv a6, a2
+; CHECK-NEXT: beq a3, t1, .LBB28_10
+; CHECK-NEXT: # %bb.9:
+; CHECK-NEXT: sltu a6, a3, t1
+; CHECK-NEXT: .LBB28_10:
+; CHECK-NEXT: sub t3, a4, a7
+; CHECK-NEXT: sltu a4, a4, a7
+; CHECK-NEXT: sub a5, a5, t2
+; CHECK-NEXT: sub a3, a3, t1
+; CHECK-NEXT: sub a1, a1, t0
+; CHECK-NEXT: sltu a7, t3, a6
+; CHECK-NEXT: sub a5, a5, a4
+; CHECK-NEXT: sub a4, t3, a6
+; CHECK-NEXT: sub a3, a3, a2
+; CHECK-NEXT: sub a2, a5, a7
+; CHECK-NEXT: sw a1, 0(a0)
+; CHECK-NEXT: sw a3, 4(a0)
+; CHECK-NEXT: sw a4, 8(a0)
+; CHECK-NEXT: sw a2, 12(a0)
+; CHECK-NEXT: ret
+ %cmp = icmp ult i128 %x, %y
+ %select = select i1 %cmp, i128 0, i128 %y
+ %sub = sub nuw i128 %x, %select
+ ret i128 %sub
+}
+
+define i32 @sub_if_uge_multiuse_select_i32(i32 %x, i32 %y) {
+; CHECK-LABEL: sub_if_uge_multiuse_select_i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: sltu a2, a0, a1
+; CHECK-NEXT: addi a2, a2, -1
+; CHECK-NEXT: and a1, a2, a1
+; CHECK-NEXT: sub a0, a0, a1
+; CHECK-NEXT: sll a0, a0, a1
+; CHECK-NEXT: ret
+ %cmp = icmp ult i32 %x, %y
+ %select = select i1 %cmp, i32 0, i32 %y
+ %sub = sub nuw i32 %x, %select
+ %shl = shl i32 %sub, %select
+ ret i32 %shl
+}
+
+define i32 @sub_if_uge_multiuse_cmp_i32(i32 %x, i32 %y) {
+; CHECK-LABEL: sub_if_uge_multiuse_cmp_i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: sub a2, a0, a1
+; CHECK-NEXT: minu a2, a0, a2
+; CHECK-NEXT: bltu a0, a1, .LBB30_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: li a0, 4
+; CHECK-NEXT: sll a0, a2, a0
+; CHECK-NEXT: ret
+; CHECK-NEXT: .LBB30_2:
+; CHECK-NEXT: li a0, 2
+; CHECK-NEXT: sll a0, a2, a0
+; CHECK-NEXT: ret
+ %cmp = icmp ult i32 %x, %y
+ %select = select i1 %cmp, i32 0, i32 %y
+ %sub = sub nuw i32 %x, %select
+ %select2 = select i1 %cmp, i32 2, i32 4
+ %shl = shl i32 %sub, %select2
+ ret i32 %shl
+}
+
+define i32 @sub_if_uge_multiuse_cmp_store_i32(i32 %x, i32 %y, ptr %z) {
+; CHECK-LABEL: sub_if_uge_multiuse_cmp_store_i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: sltu a3, a0, a1
+; CHECK-NEXT: sub a1, a0, a1
+; CHECK-NEXT: xori a3, a3, 1
+; CHECK-NEXT: minu a0, a0, a1
+; CHECK-NEXT: sw a3, 0(a2)
+; CHECK-NEXT: ret
+ %cmp = icmp uge i32 %x, %y
+ %conv = zext i1 %cmp to i32
+ store i32 %conv, ptr %z, align 4
+ %select = select i1 %cmp, i32 %y, i32 0
+ %sub = sub nuw i32 %x, %select
+ ret i32 %sub
+}
+
+define i8 @sub_if_uge_C_i8(i8 zeroext %x) {
+; CHECK-LABEL: sub_if_uge_C_i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a1, a0, -13
+; CHECK-NEXT: zext.b a1, a1
+; CHECK-NEXT: minu a0, a1, a0
+; CHECK-NEXT: ret
+ %cmp = icmp ugt i8 %x, 12
+ %sub = add i8 %x, -13
+ %conv4 = select i1 %cmp, i8 %sub, i8 %x
+ ret i8 %conv4
+}
+
+define i16 @sub_if_uge_C_i16(i16 zeroext %x) {
+; CHECK-LABEL: sub_if_uge_C_i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a1, a0, -251
+; CHECK-NEXT: slli a1, a1, 16
+; CHECK-NEXT: srli a1, a1, 16
+; CHECK-NEXT: minu a0, a1, a0
+; CHECK-NEXT: ret
+ %cmp = icmp ugt i16 %x, 250
+ %sub = add i16 %x, -251
+ %conv4 = select i1 %cmp, i16 %sub, i16 %x
+ ret i16 %conv4
+}
+
+define i32 @sub_if_uge_C_i32(i32 signext %x) {
+; CHECK-LABEL: sub_if_uge_C_i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lui a1, 1048560
+; CHECK-NEXT: addi a1, a1, 15
+; CHECK-NEXT: add a1, a0, a1
+; CHECK-NEXT: minu a0, a1, a0
+; CHECK-NEXT: ret
+ %cmp = icmp ugt i32 %x, 65520
+ %sub = add i32 %x, -65521
+ %cond = select i1 %cmp, i32 %sub, i32 %x
+ ret i32 %cond
+}
+
+define i64 @sub_if_uge_C_i64(i64 %x) {
+; CHECK-LABEL: sub_if_uge_C_i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a2, 1
+; CHECK-NEXT: beq a1, a2, .LBB35_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: sltiu a2, a1, 2
+; CHECK-NEXT: xori a2, a2, 1
+; CHECK-NEXT: j .LBB35_3
+; CHECK-NEXT: .LBB35_2:
+; CHECK-NEXT: lui a2, 172127
+; CHECK-NEXT: addi a2, a2, 511
+; CHECK-NEXT: sltu a2, a2, a0
+; CHECK-NEXT: .LBB35_3:
+; CHECK-NEXT: neg a2, a2
+; CHECK-NEXT: andi a3, a2, -2
+; CHECK-NEXT: add a1, a1, a3
+; CHECK-NEXT: lui a3, 876449
+; CHECK-NEXT: addi a3, a3, -512
+; CHECK-NEXT: and a2, a2, a3
+; CHECK-NEXT: add a2, a0, a2
+; CHECK-NEXT: sltu a0, a2, a0
+; CHECK-NEXT: add a1, a1, a0
+; CHECK-NEXT: mv a0, a2
+; CHECK-NEXT: ret
+ %cmp = icmp ugt i64 %x, 4999999999
+ %sub = add i64 %x, -5000000000
+ %cond = select i1 %cmp, i64 %sub, i64 %x
+ ret i64 %cond
+}
+
+define i32 @sub_if_uge_C_multiuse_cmp_i32(i32 signext %x, ptr %z) {
+; CHECK-LABEL: sub_if_uge_C_multiuse_cmp_i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lui a2, 16
+; CHECK-NEXT: lui a3, 1048560
+; CHECK-NEXT: addi a2, a2, -16
+; CHECK-NEXT: addi a3, a3, 15
+; CHECK-NEXT: sltu a2, a2, a0
+; CHECK-NEXT: add a3, a0, a3
+; CHECK-NEXT: minu a0, a3, a0
+; CHECK-NEXT: sw a2, 0(a1)
+; CHECK-NEXT: ret
+ %cmp = icmp ugt i32 %x, 65520
+ %conv = zext i1 %cmp to i32
+ store i32 %conv, ptr %z, align 4
+ %sub = add i32 %x, -65521
+ %cond = select i1 %cmp, i32 %sub, i32 %x
+ ret i32 %cond
+}
+
+define i32 @sub_if_uge_C_multiuse_sub_i32(i32 signext %x, ptr %z) {
+; CHECK-LABEL: sub_if_uge_C_multiuse_sub_i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lui a2, 1048560
+; CHECK-NEXT: addi a2, a2, 15
+; CHECK-NEXT: add a2, a0, a2
+; CHECK-NEXT: minu a0, a2, a0
+; CHECK-NEXT: sw a2, 0(a1)
+; CHECK-NEXT: ret
+ %sub = add i32 %x, -65521
+ store i32 %sub, ptr %z, align 4
+ %cmp = icmp ugt i32 %x, 65520
+ %cond = select i1 %cmp, i32 %sub, i32 %x
+ ret i32 %cond
+}
+
+define i32 @sub_if_uge_C_swapped_i32(i32 %x) {
+; CHECK-LABEL: sub_if_uge_C_swapped_i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lui a1, 1048560
+; CHECK-NEXT: addi a1, a1, 15
+; CHECK-NEXT: add a1, a0, a1
+; CHECK-NEXT: minu a0, a0, a1
+; CHECK-NEXT: ret
+ %cmp = icmp ult i32 %x, 65521
+ %sub = add i32 %x, -65521
+ %cond = select i1 %cmp, i32 %x, i32 %sub
+ ret i32 %cond
+}
+
+define i7 @sub_if_uge_C_nsw_i7(i7 %a) {
+; CHECK-LABEL: sub_if_uge_C_nsw_i7:
+; CHECK: # %bb.0:
+; CHECK-NEXT: ori a0, a0, 51
+; CHECK-NEXT: andi a1, a0, 127
+; CHECK-NEXT: addi a0, a0, 17
+; CHECK-NEXT: andi a0, a0, 92
+; CHECK-NEXT: minu a0, a0, a1
+; CHECK-NEXT: ret
+ %x = or i7 %a, 51
+ %c = icmp ugt i7 %x, -18
+ %add = add nsw i7 %x, 17
+ %s = select i1 %c, i7 %add, i7 %x
+ ret i7 %s
+}
+
+define i7 @sub_if_uge_C_swapped_nsw_i7(i7 %a) {
+; CHECK-LABEL: sub_if_uge_C_swapped_nsw_i7:
+; CHECK: # %bb.0:
+; CHECK-NEXT: ori a0, a0, 51
+; CHECK-NEXT: andi a1, a0, 127
+; CHECK-NEXT: addi a0, a0, 17
+; CHECK-NEXT: andi a0, a0, 92
+; CHECK-NEXT: minu a0, a1, a0
+; CHECK-NEXT: ret
+ %x = or i7 %a, 51
+ %c = icmp ult i7 %x, -17
+ %add = add nsw i7 %x, 17
+ %s = select i1 %c, i7 %x, i7 %add
+ ret i7 %s
+}