aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCraig Topper <craig.topper@sifive.com>2024-02-12 21:17:53 -0800
committerCraig Topper <craig.topper@sifive.com>2024-02-13 09:57:48 -0800
commit9838c8512bc29e3a1b8edeb0eb2541160e4c727f (patch)
tree930fc74956001605fc4af5364a608bf45dbeabcf
parent742a06f577b4c3b1c1f994e91bb6579ae89fe4b0 (diff)
downloadllvm-9838c8512bc29e3a1b8edeb0eb2541160e4c727f.zip
llvm-9838c8512bc29e3a1b8edeb0eb2541160e4c727f.tar.gz
llvm-9838c8512bc29e3a1b8edeb0eb2541160e4c727f.tar.bz2
[RISCV] Copy typepromotion-overflow.ll from AArch64. NFC
-rw-r--r--llvm/test/CodeGen/RISCV/typepromotion-overflow.ll388
1 files changed, 388 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/RISCV/typepromotion-overflow.ll b/llvm/test/CodeGen/RISCV/typepromotion-overflow.ll
new file mode 100644
index 0000000..fad9e6c
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/typepromotion-overflow.ll
@@ -0,0 +1,388 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+m %s -o - | FileCheck %s
+
+define zeroext i16 @overflow_add(i16 zeroext %a, i16 zeroext %b) {
+; CHECK-LABEL: overflow_add:
+; CHECK: # %bb.0:
+; CHECK-NEXT: add a0, a1, a0
+; CHECK-NEXT: ori a0, a0, 1
+; CHECK-NEXT: slli a0, a0, 48
+; CHECK-NEXT: srli a1, a0, 48
+; CHECK-NEXT: li a2, 1024
+; CHECK-NEXT: li a0, 2
+; CHECK-NEXT: bltu a2, a1, .LBB0_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: li a0, 5
+; CHECK-NEXT: .LBB0_2:
+; CHECK-NEXT: ret
+ %add = add i16 %b, %a
+ %or = or i16 %add, 1
+ %cmp = icmp ugt i16 %or, 1024
+ %res = select i1 %cmp, i16 2, i16 5
+ ret i16 %res
+}
+
+define zeroext i16 @overflow_sub(i16 zeroext %a, i16 zeroext %b) {
+; CHECK-LABEL: overflow_sub:
+; CHECK: # %bb.0:
+; CHECK-NEXT: subw a0, a0, a1
+; CHECK-NEXT: ori a0, a0, 1
+; CHECK-NEXT: slli a0, a0, 48
+; CHECK-NEXT: srli a1, a0, 48
+; CHECK-NEXT: li a2, 1024
+; CHECK-NEXT: li a0, 2
+; CHECK-NEXT: bltu a2, a1, .LBB1_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: li a0, 5
+; CHECK-NEXT: .LBB1_2:
+; CHECK-NEXT: ret
+ %add = sub i16 %a, %b
+ %or = or i16 %add, 1
+ %cmp = icmp ugt i16 %or, 1024
+ %res = select i1 %cmp, i16 2, i16 5
+ ret i16 %res
+}
+
+define zeroext i16 @overflow_mul(i16 zeroext %a, i16 zeroext %b) {
+; CHECK-LABEL: overflow_mul:
+; CHECK: # %bb.0:
+; CHECK-NEXT: mul a0, a1, a0
+; CHECK-NEXT: ori a0, a0, 1
+; CHECK-NEXT: slli a0, a0, 48
+; CHECK-NEXT: srli a1, a0, 48
+; CHECK-NEXT: li a2, 1024
+; CHECK-NEXT: li a0, 2
+; CHECK-NEXT: bltu a2, a1, .LBB2_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: li a0, 5
+; CHECK-NEXT: .LBB2_2:
+; CHECK-NEXT: ret
+ %add = mul i16 %b, %a
+ %or = or i16 %add, 1
+ %cmp = icmp ugt i16 %or, 1024
+ %res = select i1 %cmp, i16 2, i16 5
+ ret i16 %res
+}
+
+define zeroext i16 @overflow_shl(i16 zeroext %a, i16 zeroext %b) {
+; CHECK-LABEL: overflow_shl:
+; CHECK: # %bb.0:
+; CHECK-NEXT: sll a0, a0, a1
+; CHECK-NEXT: ori a0, a0, 1
+; CHECK-NEXT: slli a0, a0, 48
+; CHECK-NEXT: srli a1, a0, 48
+; CHECK-NEXT: li a2, 1024
+; CHECK-NEXT: li a0, 2
+; CHECK-NEXT: bltu a2, a1, .LBB3_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: li a0, 5
+; CHECK-NEXT: .LBB3_2:
+; CHECK-NEXT: ret
+ %add = shl i16 %a, %b
+ %or = or i16 %add, 1
+ %cmp = icmp ugt i16 %or, 1024
+ %res = select i1 %cmp, i16 2, i16 5
+ ret i16 %res
+}
+
+define i32 @overflow_add_no_consts(i8 zeroext %a, i8 zeroext %b, i8 zeroext %limit) {
+; CHECK-LABEL: overflow_add_no_consts:
+; CHECK: # %bb.0:
+; CHECK-NEXT: add a0, a1, a0
+; CHECK-NEXT: andi a1, a0, 255
+; CHECK-NEXT: li a0, 8
+; CHECK-NEXT: bltu a2, a1, .LBB4_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: li a0, 16
+; CHECK-NEXT: .LBB4_2:
+; CHECK-NEXT: ret
+ %add = add i8 %b, %a
+ %cmp = icmp ugt i8 %add, %limit
+ %res = select i1 %cmp, i32 8, i32 16
+ ret i32 %res
+}
+
+define i32 @overflow_add_const_limit(i8 zeroext %a, i8 zeroext %b) {
+; CHECK-LABEL: overflow_add_const_limit:
+; CHECK: # %bb.0:
+; CHECK-NEXT: add a0, a1, a0
+; CHECK-NEXT: andi a1, a0, 255
+; CHECK-NEXT: li a2, 128
+; CHECK-NEXT: li a0, 8
+; CHECK-NEXT: bltu a2, a1, .LBB5_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: li a0, 16
+; CHECK-NEXT: .LBB5_2:
+; CHECK-NEXT: ret
+ %add = add i8 %b, %a
+ %cmp = icmp ugt i8 %add, -128
+ %res = select i1 %cmp, i32 8, i32 16
+ ret i32 %res
+}
+
+define i32 @overflow_add_positive_const_limit(i8 zeroext %a) {
+; CHECK-LABEL: overflow_add_positive_const_limit:
+; CHECK: # %bb.0:
+; CHECK-NEXT: slli a0, a0, 56
+; CHECK-NEXT: srai a1, a0, 56
+; CHECK-NEXT: li a2, -1
+; CHECK-NEXT: li a0, 8
+; CHECK-NEXT: blt a1, a2, .LBB6_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: li a0, 16
+; CHECK-NEXT: .LBB6_2:
+; CHECK-NEXT: ret
+ %cmp = icmp slt i8 %a, -1
+ %res = select i1 %cmp, i32 8, i32 16
+ ret i32 %res
+}
+
+define i32 @unsafe_add_underflow(i8 zeroext %a) {
+; CHECK-LABEL: unsafe_add_underflow:
+; CHECK: # %bb.0:
+; CHECK-NEXT: mv a1, a0
+; CHECK-NEXT: li a2, 1
+; CHECK-NEXT: li a0, 8
+; CHECK-NEXT: beq a1, a2, .LBB7_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: li a0, 16
+; CHECK-NEXT: .LBB7_2:
+; CHECK-NEXT: ret
+ %cmp = icmp eq i8 %a, 1
+ %res = select i1 %cmp, i32 8, i32 16
+ ret i32 %res
+}
+
+define i32 @safe_add_underflow(i8 zeroext %a) {
+; CHECK-LABEL: safe_add_underflow:
+; CHECK: # %bb.0:
+; CHECK-NEXT: mv a1, a0
+; CHECK-NEXT: li a0, 8
+; CHECK-NEXT: beqz a1, .LBB8_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: li a0, 16
+; CHECK-NEXT: .LBB8_2:
+; CHECK-NEXT: ret
+ %cmp = icmp eq i8 %a, 0
+ %res = select i1 %cmp, i32 8, i32 16
+ ret i32 %res
+}
+
+define i32 @safe_add_underflow_neg(i8 zeroext %a) {
+; CHECK-LABEL: safe_add_underflow_neg:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, a0, -2
+; CHECK-NEXT: andi a1, a0, 255
+; CHECK-NEXT: li a2, 251
+; CHECK-NEXT: li a0, 8
+; CHECK-NEXT: bltu a1, a2, .LBB9_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: li a0, 16
+; CHECK-NEXT: .LBB9_2:
+; CHECK-NEXT: ret
+ %add = add i8 %a, -2
+ %cmp = icmp ult i8 %add, -5
+ %res = select i1 %cmp, i32 8, i32 16
+ ret i32 %res
+}
+
+define i32 @overflow_sub_negative_const_limit(i8 zeroext %a) {
+; CHECK-LABEL: overflow_sub_negative_const_limit:
+; CHECK: # %bb.0:
+; CHECK-NEXT: slli a0, a0, 56
+; CHECK-NEXT: srai a1, a0, 56
+; CHECK-NEXT: li a2, -1
+; CHECK-NEXT: li a0, 8
+; CHECK-NEXT: blt a1, a2, .LBB10_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: li a0, 16
+; CHECK-NEXT: .LBB10_2:
+; CHECK-NEXT: ret
+ %cmp = icmp slt i8 %a, -1
+ %res = select i1 %cmp, i32 8, i32 16
+ ret i32 %res
+}
+
+; This is valid so long as the icmp immediate is sext.
+define i32 @sext_sub_underflow(i8 zeroext %a) {
+; CHECK-LABEL: sext_sub_underflow:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, a0, -6
+; CHECK-NEXT: andi a1, a0, 255
+; CHECK-NEXT: li a2, 250
+; CHECK-NEXT: li a0, 8
+; CHECK-NEXT: bltu a2, a1, .LBB11_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: li a0, 16
+; CHECK-NEXT: .LBB11_2:
+; CHECK-NEXT: ret
+ %sub = add i8 %a, -6
+ %cmp = icmp ugt i8 %sub, -6
+ %res = select i1 %cmp, i32 8, i32 16
+ ret i32 %res
+}
+
+define i32 @safe_sub_underflow(i8 zeroext %a) {
+; CHECK-LABEL: safe_sub_underflow:
+; CHECK: # %bb.0:
+; CHECK-NEXT: mv a1, a0
+; CHECK-NEXT: li a0, 16
+; CHECK-NEXT: beqz a1, .LBB12_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: li a0, 8
+; CHECK-NEXT: .LBB12_2:
+; CHECK-NEXT: ret
+ %cmp.not = icmp eq i8 %a, 0
+ %res = select i1 %cmp.not, i32 16, i32 8
+ ret i32 %res
+}
+
+define i32 @safe_sub_underflow_neg(i8 zeroext %a) {
+; CHECK-LABEL: safe_sub_underflow_neg:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, a0, -4
+; CHECK-NEXT: andi a1, a0, 255
+; CHECK-NEXT: li a2, 250
+; CHECK-NEXT: li a0, 8
+; CHECK-NEXT: bltu a2, a1, .LBB13_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: li a0, 16
+; CHECK-NEXT: .LBB13_2:
+; CHECK-NEXT: ret
+ %sub = add i8 %a, -4
+ %cmp = icmp ugt i8 %sub, -6
+ %res = select i1 %cmp, i32 8, i32 16
+ ret i32 %res
+}
+
+; This is valid so long as the icmp immediate is sext.
+define i32 @sext_sub_underflow_neg(i8 zeroext %a) {
+; CHECK-LABEL: sext_sub_underflow_neg:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, a0, -4
+; CHECK-NEXT: andi a1, a0, 255
+; CHECK-NEXT: li a2, 253
+; CHECK-NEXT: li a0, 8
+; CHECK-NEXT: bltu a1, a2, .LBB14_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: li a0, 16
+; CHECK-NEXT: .LBB14_2:
+; CHECK-NEXT: ret
+ %sub = add i8 %a, -4
+ %cmp = icmp ult i8 %sub, -3
+ %res = select i1 %cmp, i32 8, i32 16
+ ret i32 %res
+}
+
+define i32 @safe_sub_imm_var(ptr nocapture readonly %b) local_unnamed_addr #1 {
+; CHECK-LABEL: safe_sub_imm_var:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: li a0, 0
+; CHECK-NEXT: ret
+entry:
+ ret i32 0
+}
+
+define i32 @safe_sub_var_imm(ptr nocapture readonly %b) local_unnamed_addr #1 {
+; CHECK-LABEL: safe_sub_var_imm:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: lbu a0, 0(a0)
+; CHECK-NEXT: addi a0, a0, 8
+; CHECK-NEXT: andi a0, a0, 255
+; CHECK-NEXT: sltiu a0, a0, 253
+; CHECK-NEXT: xori a0, a0, 1
+; CHECK-NEXT: ret
+entry:
+ %0 = load i8, ptr %b, align 1
+ %sub = add nsw i8 %0, 8
+ %cmp = icmp ugt i8 %sub, -4
+ %conv4 = zext i1 %cmp to i32
+ ret i32 %conv4
+}
+
+define i32 @safe_add_imm_var(ptr nocapture readnone %b) {
+; CHECK-LABEL: safe_add_imm_var:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: li a0, 1
+; CHECK-NEXT: ret
+entry:
+ ret i32 1
+}
+
+define i32 @safe_add_var_imm(ptr nocapture readnone %b) {
+; CHECK-LABEL: safe_add_var_imm:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: li a0, 1
+; CHECK-NEXT: ret
+entry:
+ ret i32 1
+}
+
+define i8 @convert_add_order(i8 zeroext %arg) {
+; CHECK-LABEL: convert_add_order:
+; CHECK: # %bb.0:
+; CHECK-NEXT: ori a1, a0, 1
+; CHECK-NEXT: sltiu a2, a1, 50
+; CHECK-NEXT: addi a1, a1, -40
+; CHECK-NEXT: andi a1, a1, 255
+; CHECK-NEXT: sltiu a1, a1, 20
+; CHECK-NEXT: li a3, 2
+; CHECK-NEXT: sub a3, a3, a1
+; CHECK-NEXT: addi a2, a2, -1
+; CHECK-NEXT: or a2, a2, a3
+; CHECK-NEXT: and a0, a2, a0
+; CHECK-NEXT: ret
+ %shl = or i8 %arg, 1
+ %cmp.0 = icmp ult i8 %shl, 50
+ %sub = add nsw i8 %shl, -40
+ %cmp.1 = icmp ult i8 %sub, 20
+ %mask.sel.v = select i1 %cmp.1, i8 1, i8 2
+ %mask.sel = select i1 %cmp.0, i8 %mask.sel.v, i8 -1
+ %res = and i8 %mask.sel, %arg
+ ret i8 %res
+}
+
+define i8 @underflow_if_sub(i32 %arg, i8 zeroext %arg1) {
+; CHECK-LABEL: underflow_if_sub:
+; CHECK: # %bb.0:
+; CHECK-NEXT: sext.w a2, a0
+; CHECK-NEXT: sgtz a2, a2
+; CHECK-NEXT: and a0, a2, a0
+; CHECK-NEXT: addi a0, a0, -11
+; CHECK-NEXT: andi a2, a0, 247
+; CHECK-NEXT: bltu a2, a1, .LBB20_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: li a0, 100
+; CHECK-NEXT: .LBB20_2:
+; CHECK-NEXT: ret
+ %cmp = icmp sgt i32 %arg, 0
+ %conv = zext i1 %cmp to i32
+ %and = and i32 %conv, %arg
+ %trunc = trunc i32 %and to i8
+ %conv1 = add nuw nsw i8 %trunc, -11
+ %cmp.1 = icmp ult i8 %conv1, %arg1
+ %res = select i1 %cmp.1, i8 %conv1, i8 100
+ ret i8 %res
+}
+
+define i8 @underflow_if_sub_signext(i32 %arg, i8 signext %arg1) {
+; CHECK-LABEL: underflow_if_sub_signext:
+; CHECK: # %bb.0:
+; CHECK-NEXT: sext.w a2, a0
+; CHECK-NEXT: sgtz a2, a2
+; CHECK-NEXT: and a0, a2, a0
+; CHECK-NEXT: addi a0, a0, -11
+; CHECK-NEXT: bltu a0, a1, .LBB21_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: li a0, 100
+; CHECK-NEXT: .LBB21_2:
+; CHECK-NEXT: ret
+ %cmp = icmp sgt i32 %arg, 0
+ %conv = zext i1 %cmp to i32
+ %and = and i32 %conv, %arg
+ %trunc = trunc i32 %and to i8
+ %conv1 = add nuw nsw i8 %trunc, -11
+ %cmp.1 = icmp ult i8 %conv1, %arg1
+ %res = select i1 %cmp.1, i8 %conv1, i8 100
+ ret i8 %res
+}