; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt -S -passes=sccp < %s | FileCheck %s declare { i8, i1 } @llvm.uadd.with.overflow.i8(i8, i8) declare { i8, i1 } @llvm.usub.with.overflow.i8(i8, i8) declare { i8, i1 } @llvm.umul.with.overflow.i8(i8, i8) declare { i8, i1 } @llvm.sadd.with.overflow.i8(i8, i8) declare { i8, i1 } @llvm.ssub.with.overflow.i8(i8, i8) declare { i8, i1 } @llvm.smul.with.overflow.i8(i8, i8) declare { <2 x i8>, <2 x i1> } @llvm.uadd.with.overflow.v2i8(<2 x i8>, <2 x i8>) declare void @use.i1(i1) define void @unsigned_overflow(ptr %p) { ; CHECK-LABEL: @unsigned_overflow( ; CHECK-NEXT: [[V0_100:%.*]] = load i8, ptr [[P:%.*]], align 1, !range [[RNG0:![0-9]+]] ; CHECK-NEXT: [[V0_155:%.*]] = load i8, ptr [[P]], align 1, !range [[RNG1:![0-9]+]] ; CHECK-NEXT: [[V0_156:%.*]] = load i8, ptr [[P]], align 1, !range [[RNG2:![0-9]+]] ; CHECK-NEXT: [[V100_255:%.*]] = load i8, ptr [[P]], align 1, !range [[RNG3:![0-9]+]] ; CHECK-NEXT: [[V99_255:%.*]] = load i8, ptr [[P]], align 1, !range [[RNG4:![0-9]+]] ; CHECK-NEXT: [[V1_2:%.*]] = load i8, ptr [[P]], align 1, !range [[RNG5:![0-9]+]] ; CHECK-NEXT: [[V1_3:%.*]] = load i8, ptr [[P]], align 1, !range [[RNG6:![0-9]+]] ; CHECK-NEXT: [[WO1:%.*]] = call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 [[V0_100]], i8 [[V0_155]]) ; CHECK-NEXT: call void @use.i1(i1 false) ; CHECK-NEXT: [[WO2:%.*]] = call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 [[V0_100]], i8 [[V0_156]]) ; CHECK-NEXT: [[OV2:%.*]] = extractvalue { i8, i1 } [[WO2]], 1 ; CHECK-NEXT: call void @use.i1(i1 [[OV2]]) ; CHECK-NEXT: [[WO3:%.*]] = call { i8, i1 } @llvm.usub.with.overflow.i8(i8 [[V100_255]], i8 [[V0_100]]) ; CHECK-NEXT: call void @use.i1(i1 false) ; CHECK-NEXT: [[WO4:%.*]] = call { i8, i1 } @llvm.usub.with.overflow.i8(i8 [[V99_255]], i8 [[V0_100]]) ; CHECK-NEXT: [[OV4:%.*]] = extractvalue { i8, i1 } [[WO4]], 1 ; CHECK-NEXT: call void @use.i1(i1 [[OV4]]) ; CHECK-NEXT: [[WO5:%.*]] = call { i8, i1 } @llvm.umul.with.overflow.i8(i8 [[V0_100]], i8 [[V1_2]]) ; CHECK-NEXT: call void @use.i1(i1 false) ; CHECK-NEXT: [[WO6:%.*]] = call { i8, i1 } @llvm.umul.with.overflow.i8(i8 [[V0_100]], i8 [[V1_3]]) ; CHECK-NEXT: [[OV6:%.*]] = extractvalue { i8, i1 } [[WO6]], 1 ; CHECK-NEXT: call void @use.i1(i1 [[OV6]]) ; CHECK-NEXT: ret void ; %v0_100 = load i8, ptr %p, !range !{i8 0, i8 101} %v0_155 = load i8, ptr %p, !range !{i8 0, i8 156} %v0_156 = load i8, ptr %p, !range !{i8 0, i8 157} %v100_255 = load i8, ptr %p, !range !{i8 100, i8 0} %v99_255 = load i8, ptr %p, !range !{i8 99, i8 0} %v1_2 = load i8, ptr %p, !range !{i8 1, i8 3} %v1_3 = load i8, ptr %p, !range !{i8 1, i8 4} %wo1 = call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 %v0_100, i8 %v0_155) %ov1 = extractvalue { i8, i1 } %wo1, 1 call void @use.i1(i1 %ov1) %wo2 = call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 %v0_100, i8 %v0_156) %ov2 = extractvalue { i8, i1 } %wo2, 1 call void @use.i1(i1 %ov2) %wo3 = call { i8, i1 } @llvm.usub.with.overflow.i8(i8 %v100_255, i8 %v0_100) %ov3 = extractvalue { i8, i1 } %wo3, 1 call void @use.i1(i1 %ov3) %wo4 = call { i8, i1 } @llvm.usub.with.overflow.i8(i8 %v99_255, i8 %v0_100) %ov4 = extractvalue { i8, i1 } %wo4, 1 call void @use.i1(i1 %ov4) %wo5 = call { i8, i1 } @llvm.umul.with.overflow.i8(i8 %v0_100, i8 %v1_2) %ov5 = extractvalue { i8, i1 } %wo5, 1 call void @use.i1(i1 %ov5) %wo6 = call { i8, i1 } @llvm.umul.with.overflow.i8(i8 %v0_100, i8 %v1_3) %ov6 = extractvalue { i8, i1 } %wo6, 1 call void @use.i1(i1 %ov6) ret void } define void @signed_overflow(ptr %p) { ; CHECK-LABEL: @signed_overflow( ; CHECK-NEXT: [[V0_100:%.*]] = load i8, ptr [[P:%.*]], align 1, !range [[RNG0]] ; CHECK-NEXT: [[V0_27:%.*]] = load i8, ptr [[P]], align 1, !range [[RNG7:![0-9]+]] ; CHECK-NEXT: [[V0_28:%.*]] = load i8, ptr [[P]], align 1, !range [[RNG8:![0-9]+]] ; CHECK-NEXT: [[VM27_0:%.*]] = load i8, ptr [[P]], align 1, !range [[RNG9:![0-9]+]] ; CHECK-NEXT: [[VM28_0:%.*]] = load i8, ptr [[P]], align 1, !range [[RNG10:![0-9]+]] ; CHECK-NEXT: [[V1_4:%.*]] = load i8, ptr [[P]], align 1, !range [[RNG11:![0-9]+]] ; CHECK-NEXT: [[V1_5:%.*]] = load i8, ptr [[P]], align 1, !range [[RNG12:![0-9]+]] ; CHECK-NEXT: [[WO1:%.*]] = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 [[V0_100]], i8 [[V0_27]]) ; CHECK-NEXT: call void @use.i1(i1 false) ; CHECK-NEXT: [[WO2:%.*]] = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 [[V0_100]], i8 [[V0_28]]) ; CHECK-NEXT: [[OV2:%.*]] = extractvalue { i8, i1 } [[WO2]], 1 ; CHECK-NEXT: call void @use.i1(i1 [[OV2]]) ; CHECK-NEXT: [[WO3:%.*]] = call { i8, i1 } @llvm.ssub.with.overflow.i8(i8 [[V0_100]], i8 [[VM27_0]]) ; CHECK-NEXT: call void @use.i1(i1 false) ; CHECK-NEXT: [[WO4:%.*]] = call { i8, i1 } @llvm.ssub.with.overflow.i8(i8 [[V0_100]], i8 [[VM28_0]]) ; CHECK-NEXT: [[OV4:%.*]] = extractvalue { i8, i1 } [[WO4]], 1 ; CHECK-NEXT: call void @use.i1(i1 [[OV4]]) ; CHECK-NEXT: [[WO5:%.*]] = call { i8, i1 } @llvm.smul.with.overflow.i8(i8 [[V0_27]], i8 [[V1_4]]) ; CHECK-NEXT: call void @use.i1(i1 false) ; CHECK-NEXT: [[WO6:%.*]] = call { i8, i1 } @llvm.smul.with.overflow.i8(i8 [[V0_27]], i8 [[V1_5]]) ; CHECK-NEXT: [[OV6:%.*]] = extractvalue { i8, i1 } [[WO6]], 1 ; CHECK-NEXT: call void @use.i1(i1 [[OV6]]) ; CHECK-NEXT: ret void ; %v0_100 = load i8, ptr %p, !range !{i8 0, i8 101} %v0_27 = load i8, ptr %p, !range !{i8 0, i8 28} %v0_28 = load i8, ptr %p, !range !{i8 0, i8 29} %vm27_0 = load i8, ptr %p, !range !{i8 -27, i8 0} %vm28_0 = load i8, ptr %p, !range !{i8 -28, i8 0} %v1_4 = load i8, ptr %p, !range !{i8 1, i8 5} %v1_5 = load i8, ptr %p, !range !{i8 1, i8 6} %wo1 = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 %v0_100, i8 %v0_27) %ov1 = extractvalue { i8, i1 } %wo1, 1 call void @use.i1(i1 %ov1) %wo2 = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 %v0_100, i8 %v0_28) %ov2 = extractvalue { i8, i1 } %wo2, 1 call void @use.i1(i1 %ov2) %wo3 = call { i8, i1 } @llvm.ssub.with.overflow.i8(i8 %v0_100, i8 %vm27_0) %ov3 = extractvalue { i8, i1 } %wo3, 1 call void @use.i1(i1 %ov3) %wo4 = call { i8, i1 } @llvm.ssub.with.overflow.i8(i8 %v0_100, i8 %vm28_0) %ov4 = extractvalue { i8, i1 } %wo4, 1 call void @use.i1(i1 %ov4) %wo5 = call { i8, i1 } @llvm.smul.with.overflow.i8(i8 %v0_27, i8 %v1_4) %ov5 = extractvalue { i8, i1 } %wo5, 1 call void @use.i1(i1 %ov5) %wo6 = call { i8, i1 } @llvm.smul.with.overflow.i8(i8 %v0_27, i8 %v1_5) %ov6 = extractvalue { i8, i1 } %wo6, 1 call void @use.i1(i1 %ov6) ret void } define void @unsigned_result(ptr %p) { ; CHECK-LABEL: @unsigned_result( ; CHECK-NEXT: [[V0_20:%.*]] = load i8, ptr [[P:%.*]], align 1, !range [[RNG13:![0-9]+]] ; CHECK-NEXT: [[V20_40:%.*]] = load i8, ptr [[P]], align 1, !range [[RNG14:![0-9]+]] ; CHECK-NEXT: [[V0_10:%.*]] = load i8, ptr [[P]], align 1, !range [[RNG15:![0-9]+]] ; CHECK-NEXT: [[V2_3:%.*]] = load i8, ptr [[P]], align 1, !range [[RNG16:![0-9]+]] ; CHECK-NEXT: [[WO1:%.*]] = call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 [[V0_20]], i8 [[V20_40]]) ; CHECK-NEXT: [[RES1:%.*]] = extractvalue { i8, i1 } [[WO1]], 0 ; CHECK-NEXT: call void @use.i1(i1 true) ; CHECK-NEXT: call void @use.i1(i1 true) ; CHECK-NEXT: [[CMP1_3:%.*]] = icmp ugt i8 [[RES1]], 20 ; CHECK-NEXT: call void @use.i1(i1 [[CMP1_3]]) ; CHECK-NEXT: [[CMP1_4:%.*]] = icmp ult i8 [[RES1]], 60 ; CHECK-NEXT: call void @use.i1(i1 [[CMP1_4]]) ; CHECK-NEXT: [[WO2:%.*]] = call { i8, i1 } @llvm.usub.with.overflow.i8(i8 [[V0_10]], i8 [[V20_40]]) ; CHECK-NEXT: [[RES2:%.*]] = extractvalue { i8, i1 } [[WO2]], 0 ; CHECK-NEXT: call void @use.i1(i1 true) ; CHECK-NEXT: call void @use.i1(i1 true) ; CHECK-NEXT: [[CMP2_3:%.*]] = icmp ugt i8 [[RES2]], -40 ; CHECK-NEXT: call void @use.i1(i1 [[CMP2_3]]) ; CHECK-NEXT: [[CMP2_4:%.*]] = icmp ult i8 [[RES2]], -10 ; CHECK-NEXT: call void @use.i1(i1 [[CMP2_4]]) ; CHECK-NEXT: [[WO3:%.*]] = call { i8, i1 } @llvm.umul.with.overflow.i8(i8 [[V20_40]], i8 [[V2_3]]) ; CHECK-NEXT: [[RES3:%.*]] = extractvalue { i8, i1 } [[WO3]], 0 ; CHECK-NEXT: call void @use.i1(i1 true) ; CHECK-NEXT: call void @use.i1(i1 true) ; CHECK-NEXT: [[CMP3_3:%.*]] = icmp ugt i8 [[RES3]], 40 ; CHECK-NEXT: call void @use.i1(i1 [[CMP3_3]]) ; CHECK-NEXT: [[CMP3_4:%.*]] = icmp ult i8 [[RES3]], 120 ; CHECK-NEXT: call void @use.i1(i1 [[CMP3_4]]) ; CHECK-NEXT: ret void ; %v0_20 = load i8, ptr %p, !range !{i8 0, i8 21} %v20_40 = load i8, ptr %p, !range !{i8 20, i8 41} %v0_10 = load i8, ptr %p, !range !{i8 0, i8 11} %v2_3 = load i8, ptr %p, !range !{i8 2, i8 4} %wo1 = call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 %v0_20, i8 %v20_40) %res1 = extractvalue { i8, i1 } %wo1, 0 %cmp1.1 = icmp uge i8 %res1, 20 call void @use.i1(i1 %cmp1.1) %cmp1.2 = icmp ule i8 %res1, 60 call void @use.i1(i1 %cmp1.2) %cmp1.3 = icmp ugt i8 %res1, 20 call void @use.i1(i1 %cmp1.3) %cmp1.4 = icmp ult i8 %res1, 60 call void @use.i1(i1 %cmp1.4) ; This case actually does overflow, but we can still determine the range. %wo2 = call { i8, i1 } @llvm.usub.with.overflow.i8(i8 %v0_10, i8 %v20_40) %res2 = extractvalue { i8, i1 } %wo2, 0 %cmp2.1 = icmp uge i8 %res2, -40 call void @use.i1(i1 %cmp2.1) %cmp2.2 = icmp ule i8 %res2, -10 call void @use.i1(i1 %cmp2.2) %cmp2.3 = icmp ugt i8 %res2, -40 call void @use.i1(i1 %cmp2.3) %cmp2.4 = icmp ult i8 %res2, -10 call void @use.i1(i1 %cmp2.4) %wo3 = call { i8, i1 } @llvm.umul.with.overflow.i8(i8 %v20_40, i8 %v2_3) %res3 = extractvalue { i8, i1 } %wo3, 0 %cmp3.1 = icmp uge i8 %res3, 40 call void @use.i1(i1 %cmp3.1) %cmp3.2 = icmp ule i8 %res3, 120 call void @use.i1(i1 %cmp3.2) %cmp3.3 = icmp ugt i8 %res3, 40 call void @use.i1(i1 %cmp3.3) %cmp3.4 = icmp ult i8 %res3, 120 call void @use.i1(i1 %cmp3.4) ret void } define void @signed_result(ptr %p) { ; CHECK-LABEL: @signed_result( ; CHECK-NEXT: [[V0_20:%.*]] = load i8, ptr [[P:%.*]], align 1, !range [[RNG13]] ; CHECK-NEXT: [[V20_40:%.*]] = load i8, ptr [[P]], align 1, !range [[RNG14]] ; CHECK-NEXT: [[V0_10:%.*]] = load i8, ptr [[P]], align 1, !range [[RNG15]] ; CHECK-NEXT: [[V2_3:%.*]] = load i8, ptr [[P]], align 1, !range [[RNG16]] ; CHECK-NEXT: [[WO1:%.*]] = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 [[V0_20]], i8 [[V20_40]]) ; CHECK-NEXT: [[RES1:%.*]] = extractvalue { i8, i1 } [[WO1]], 0 ; CHECK-NEXT: call void @use.i1(i1 true) ; CHECK-NEXT: call void @use.i1(i1 true) ; CHECK-NEXT: [[CMP1_3:%.*]] = icmp ugt i8 [[RES1]], 20 ; CHECK-NEXT: call void @use.i1(i1 [[CMP1_3]]) ; CHECK-NEXT: [[CMP1_4:%.*]] = icmp ult i8 [[RES1]], 60 ; CHECK-NEXT: call void @use.i1(i1 [[CMP1_4]]) ; CHECK-NEXT: [[WO2:%.*]] = call { i8, i1 } @llvm.ssub.with.overflow.i8(i8 [[V0_10]], i8 [[V20_40]]) ; CHECK-NEXT: [[RES2:%.*]] = extractvalue { i8, i1 } [[WO2]], 0 ; CHECK-NEXT: call void @use.i1(i1 true) ; CHECK-NEXT: call void @use.i1(i1 true) ; CHECK-NEXT: [[CMP2_3:%.*]] = icmp ugt i8 [[RES2]], -40 ; CHECK-NEXT: call void @use.i1(i1 [[CMP2_3]]) ; CHECK-NEXT: [[CMP2_4:%.*]] = icmp ult i8 [[RES2]], -10 ; CHECK-NEXT: call void @use.i1(i1 [[CMP2_4]]) ; CHECK-NEXT: [[WO3:%.*]] = call { i8, i1 } @llvm.smul.with.overflow.i8(i8 [[V20_40]], i8 [[V2_3]]) ; CHECK-NEXT: [[RES3:%.*]] = extractvalue { i8, i1 } [[WO3]], 0 ; CHECK-NEXT: call void @use.i1(i1 true) ; CHECK-NEXT: call void @use.i1(i1 true) ; CHECK-NEXT: [[CMP3_3:%.*]] = icmp ugt i8 [[RES3]], 40 ; CHECK-NEXT: call void @use.i1(i1 [[CMP3_3]]) ; CHECK-NEXT: [[CMP3_4:%.*]] = icmp ult i8 [[RES3]], 120 ; CHECK-NEXT: call void @use.i1(i1 [[CMP3_4]]) ; CHECK-NEXT: ret void ; %v0_20 = load i8, ptr %p, !range !{i8 0, i8 21} %v20_40 = load i8, ptr %p, !range !{i8 20, i8 41} %v0_10 = load i8, ptr %p, !range !{i8 0, i8 11} %v2_3 = load i8, ptr %p, !range !{i8 2, i8 4} %wo1 = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 %v0_20, i8 %v20_40) %res1 = extractvalue { i8, i1 } %wo1, 0 %cmp1.1 = icmp uge i8 %res1, 20 call void @use.i1(i1 %cmp1.1) %cmp1.2 = icmp ule i8 %res1, 60 call void @use.i1(i1 %cmp1.2) %cmp1.3 = icmp ugt i8 %res1, 20 call void @use.i1(i1 %cmp1.3) %cmp1.4 = icmp ult i8 %res1, 60 call void @use.i1(i1 %cmp1.4) %wo2 = call { i8, i1 } @llvm.ssub.with.overflow.i8(i8 %v0_10, i8 %v20_40) %res2 = extractvalue { i8, i1 } %wo2, 0 %cmp2.1 = icmp uge i8 %res2, -40 call void @use.i1(i1 %cmp2.1) %cmp2.2 = icmp ule i8 %res2, -10 call void @use.i1(i1 %cmp2.2) %cmp2.3 = icmp ugt i8 %res2, -40 call void @use.i1(i1 %cmp2.3) %cmp2.4 = icmp ult i8 %res2, -10 call void @use.i1(i1 %cmp2.4) %wo3 = call { i8, i1 } @llvm.smul.with.overflow.i8(i8 %v20_40, i8 %v2_3) %res3 = extractvalue { i8, i1 } %wo3, 0 %cmp3.1 = icmp uge i8 %res3, 40 call void @use.i1(i1 %cmp3.1) %cmp3.2 = icmp ule i8 %res3, 120 call void @use.i1(i1 %cmp3.2) %cmp3.3 = icmp ugt i8 %res3, 40 call void @use.i1(i1 %cmp3.3) %cmp3.4 = icmp ult i8 %res3, 120 call void @use.i1(i1 %cmp3.4) ret void } ; SCCP doesn't really support vector ranges yet, just make sure we don't crash. define <2 x i1> @vec(<2 x i8> %v1, <2 x i8> %v2) { ; CHECK-LABEL: @vec( ; CHECK-NEXT: [[WO:%.*]] = call { <2 x i8>, <2 x i1> } @llvm.uadd.with.overflow.v2i8(<2 x i8> [[V1:%.*]], <2 x i8> [[V2:%.*]]) ; CHECK-NEXT: [[OV:%.*]] = extractvalue { <2 x i8>, <2 x i1> } [[WO]], 1 ; CHECK-NEXT: ret <2 x i1> [[OV]] ; %wo = call { <2 x i8>, <2 x i1> } @llvm.uadd.with.overflow.v2i8(<2 x i8> %v1, <2 x i8> %v2) %ov = extractvalue { <2 x i8>, <2 x i1> } %wo, 1 ret <2 x i1> %ov }