; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt < %s -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1200 -passes=separate-const-offset-from-gep -S | FileCheck %s ; gfx1200 is particularly interesting since it allows negative immediate offsets ; in flat instructions, so the transformation is applied in more cases. ; The inbounds flags cannot be preserved here: If the pointers point to the ; beginning of an object and %i is 1, the intermediate GEPs are out of bounds. define ptr @maybe_oob(ptr %p, i64 %i) { ; CHECK-LABEL: @maybe_oob( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[IDX1:%.*]] = sub i64 0, [[I:%.*]] ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[IDX1]] ; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr i8, ptr [[TMP0]], i64 4 ; CHECK-NEXT: ret ptr [[ARRAYIDX2]] ; entry: %idx = sub nsw i64 1, %i %arrayidx = getelementptr inbounds i32, ptr %p, i64 %idx ret ptr %arrayidx } ; All indices must be non-negative, so inbounds can be preserved. define ptr @must_be_inbounds(ptr %p, i32 %i) { ; CHECK-LABEL: @must_be_inbounds( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[I_PROM:%.*]] = zext i32 [[I:%.*]] to i64 ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[P:%.*]], i64 [[I_PROM]] ; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i8, ptr [[TMP0]], i64 4 ; CHECK-NEXT: ret ptr [[ARRAYIDX2]] ; entry: %i.prom = zext i32 %i to i64 %idx = add nsw i64 %i.prom, 1 %arrayidx = getelementptr inbounds i32, ptr %p, i64 %idx ret ptr %arrayidx } ; idx must be non-negative -> preserve inbounds define ptr @sign_bit_clear(ptr %p, i64 %i) { ; CHECK-LABEL: @sign_bit_clear( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[IDX:%.*]] = and i64 [[I:%.*]], 9223372036854775807 ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[P:%.*]], i64 [[IDX]] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[TMP0]], i64 4 ; CHECK-NEXT: ret ptr [[ARRAYIDX]] ; entry: %idx = and i64 %i, u0x7fffffffffffffff %idx.add = add i64 %idx, 1 %arrayidx = getelementptr inbounds i32, ptr %p, i64 %idx.add ret ptr %arrayidx } ; idx may be negative -> don't preserve inbounds define ptr @sign_bit_not_clear(ptr %p, i64 %i) { ; CHECK-LABEL: @sign_bit_not_clear( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[IDX:%.*]] = and i64 [[I:%.*]], -256 ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[IDX]] ; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr i8, ptr [[TMP0]], i64 4 ; CHECK-NEXT: ret ptr [[ARRAYIDX2]] ; entry: %idx = and i64 %i, u0xffffffffffffff00 %idx.add = add i64 %idx, 1 %arrayidx = getelementptr inbounds i32, ptr %p, i64 %idx.add ret ptr %arrayidx } ; idx may be 0 or very negative -> don't preserve inbounds define ptr @only_sign_bit_not_clear(ptr %p, i64 %i) { ; CHECK-LABEL: @only_sign_bit_not_clear( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[IDX:%.*]] = and i64 [[I:%.*]], -9223372036854775808 ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[IDX]] ; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr i8, ptr [[TMP0]], i64 4 ; CHECK-NEXT: ret ptr [[ARRAYIDX2]] ; entry: %idx = and i64 %i, u0x8000000000000000 %idx.add = add i64 %idx, 1 %arrayidx = getelementptr inbounds i32, ptr %p, i64 %idx.add ret ptr %arrayidx } ; all indices non-negative -> preserve inbounds define ptr @multi_level_nonnegative(ptr %p, i64 %idx1, i64 %idx2) { ; CHECK-LABEL: @multi_level_nonnegative( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[MASKED_IDX1:%.*]] = and i64 [[IDX1:%.*]], 255 ; CHECK-NEXT: [[MASKED_IDX2:%.*]] = and i64 [[IDX2:%.*]], 65535 ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [10 x [20 x i32]], ptr [[P:%.*]], i64 0, i64 [[MASKED_IDX1]], i64 [[MASKED_IDX2]] ; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i8, ptr [[TMP0]], i64 180 ; CHECK-NEXT: ret ptr [[ARRAYIDX3]] ; entry: %masked.idx1 = and i64 %idx1, u0xff %masked.idx2 = and i64 %idx2, u0xffff %idx1.add = add i64 %masked.idx1, 2 %idx2.add = add i64 %masked.idx2, 5 %arrayidx = getelementptr inbounds [10 x [20 x i32]], ptr %p, i64 0, i64 %idx1.add, i64 %idx2.add ret ptr %arrayidx } ; It doesn't matter that %idx2.add might be negative, the indices in the resulting GEPs are all non-negative -> preserve inbounds define ptr @multi_level_mixed_okay(ptr %p, i64 %idx1, i64 %idx2) { ; CHECK-LABEL: @multi_level_mixed_okay( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[MASKED_IDX1:%.*]] = and i64 [[IDX1:%.*]], 255 ; CHECK-NEXT: [[MASKED_IDX2:%.*]] = and i64 [[IDX2:%.*]], 65535 ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [10 x [20 x i32]], ptr [[P:%.*]], i64 0, i64 [[MASKED_IDX1]], i64 [[MASKED_IDX2]] ; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i8, ptr [[TMP0]], i64 156 ; CHECK-NEXT: ret ptr [[ARRAYIDX3]] ; entry: %masked.idx1 = and i64 %idx1, u0xff %masked.idx2 = and i64 %idx2, u0xffff %idx1.add = add i64 %masked.idx1, 2 %idx2.add = add i64 %masked.idx2, -1 %arrayidx = getelementptr inbounds [10 x [20 x i32]], ptr %p, i64 0, i64 %idx1.add, i64 %idx2.add ret ptr %arrayidx } ; One index may be negative -> don't preserve inbounds define ptr @multi_level_mixed_not_okay(ptr %p, i64 %idx1, i64 %idx2) { ; CHECK-LABEL: @multi_level_mixed_not_okay( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[MASKED_IDX1:%.*]] = and i64 [[IDX1:%.*]], -256 ; CHECK-NEXT: [[MASKED_IDX2:%.*]] = and i64 [[IDX2:%.*]], 65535 ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr [10 x [20 x i32]], ptr [[P:%.*]], i64 0, i64 [[MASKED_IDX1]], i64 [[MASKED_IDX2]] ; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr i8, ptr [[TMP0]], i64 156 ; CHECK-NEXT: ret ptr [[ARRAYIDX3]] ; entry: %masked.idx1 = and i64 %idx1, u0xffffffffffffff00 %masked.idx2 = and i64 %idx2, u0xffff %idx1.add = add i64 %masked.idx1, 2 %idx2.add = add i64 %masked.idx2, -1 %arrayidx = getelementptr inbounds [10 x [20 x i32]], ptr %p, i64 0, i64 %idx1.add, i64 %idx2.add ret ptr %arrayidx } define ptr @nuw_implies_nuw(ptr %p, i64 %i) { ; CHECK-LABEL: @nuw_implies_nuw( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr nuw i32, ptr [[P:%.*]], i64 [[I:%.*]] ; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr nuw i8, ptr [[TMP0]], i64 4 ; CHECK-NEXT: ret ptr [[ARRAYIDX2]] ; entry: %idx = add nuw i64 %i, 1 %arrayidx = getelementptr nuw i32, ptr %p, i64 %idx ret ptr %arrayidx } define ptr @nuw_implies_nuw_negative(ptr %p, i64 %i) { ; CHECK-LABEL: @nuw_implies_nuw_negative( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr nuw i32, ptr [[P:%.*]], i64 [[I:%.*]] ; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr nuw i8, ptr [[TMP0]], i64 -64 ; CHECK-NEXT: ret ptr [[ARRAYIDX2]] ; entry: %idx = add nuw i64 %i, -16 %arrayidx = getelementptr nuw i32, ptr %p, i64 %idx ret ptr %arrayidx } define ptr @nuw_inbounds_implies_nuw_inbounds(ptr %p, i64 %i) { ; CHECK-LABEL: @nuw_inbounds_implies_nuw_inbounds( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw i32, ptr [[P:%.*]], i64 [[I:%.*]] ; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i64 4 ; CHECK-NEXT: ret ptr [[ARRAYIDX2]] ; entry: %idx = add nuw i64 %i, 1 %arrayidx = getelementptr inbounds nuw i32, ptr %p, i64 %idx ret ptr %arrayidx } ; This is poison anyway, so we can preserve the flags. define ptr @nuw_inbounds_implies_nuw_inbounds_negative(ptr %p, i64 %i) { ; CHECK-LABEL: @nuw_inbounds_implies_nuw_inbounds_negative( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw i32, ptr [[P:%.*]], i64 [[I:%.*]] ; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i64 -64 ; CHECK-NEXT: ret ptr [[ARRAYIDX2]] ; entry: %idx = add nuw i64 %i, -16 %arrayidx = getelementptr inbounds nuw i32, ptr %p, i64 %idx ret ptr %arrayidx } define ptr @nuw_nusw_implies_nuw_nusw(ptr %p, i64 %i) { ; CHECK-LABEL: @nuw_nusw_implies_nuw_nusw( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr nusw nuw i32, ptr [[P:%.*]], i64 [[I:%.*]] ; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr nusw nuw i8, ptr [[TMP0]], i64 4 ; CHECK-NEXT: ret ptr [[ARRAYIDX2]] ; entry: %idx = add nuw i64 %i, 1 %arrayidx = getelementptr nusw nuw i32, ptr %p, i64 %idx ret ptr %arrayidx } ; Also poison. define ptr @nuw_implies_nuw_nusw_negative(ptr %p, i64 %i) { ; CHECK-LABEL: @nuw_implies_nuw_nusw_negative( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr nusw nuw i32, ptr [[P:%.*]], i64 [[I:%.*]] ; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr nusw nuw i8, ptr [[TMP0]], i64 -64 ; CHECK-NEXT: ret ptr [[ARRAYIDX2]] ; entry: %idx = add nuw i64 %i, -16 %arrayidx = getelementptr nusw nuw i32, ptr %p, i64 %idx ret ptr %arrayidx } define ptr @nuw_inbounds_implies_nuw_inbounds_ordisjoint(ptr %p, i64 %i) { ; CHECK-LABEL: @nuw_inbounds_implies_nuw_inbounds_ordisjoint( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw i32, ptr [[P:%.*]], i64 [[I:%.*]] ; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i64 4 ; CHECK-NEXT: ret ptr [[ARRAYIDX2]] ; entry: %idx = or disjoint i64 %i, 1 %arrayidx = getelementptr inbounds nuw i32, ptr %p, i64 %idx ret ptr %arrayidx } ; This is poison anyway, so we can do the transformation. define ptr @nuw_inbounds_implies_nuw_inbounds_ordisjoint_negative(ptr %p, i64 %i) { ; CHECK-LABEL: @nuw_inbounds_implies_nuw_inbounds_ordisjoint_negative( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw i32, ptr [[P:%.*]], i64 [[I:%.*]] ; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i64 -64 ; CHECK-NEXT: ret ptr [[ARRAYIDX2]] ; entry: %idx = or disjoint i64 %i, -16 %arrayidx = getelementptr inbounds nuw i32, ptr %p, i64 %idx ret ptr %arrayidx } ; Check that nothing happens for non-disjoint ors define ptr @or_no_disjoint(ptr %p, i64 %i) { ; CHECK-LABEL: @or_no_disjoint( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[IDX:%.*]] = or i64 [[I:%.*]], 1 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[P:%.*]], i64 [[IDX]] ; CHECK-NEXT: ret ptr [[ARRAYIDX]] ; entry: %idx = or i64 %i, 1 %arrayidx = getelementptr inbounds nuw i32, ptr %p, i64 %idx ret ptr %arrayidx } define ptr @no_nuw_inbounds_for_sub(ptr %p, i64 %i) { ; CHECK-LABEL: @no_nuw_inbounds_for_sub( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[I:%.*]] ; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr i8, ptr [[TMP0]], i64 -4 ; CHECK-NEXT: ret ptr [[ARRAYIDX2]] ; entry: %idx = sub nuw i64 %i, 1 %arrayidx = getelementptr inbounds nuw i32, ptr %p, i64 %idx ret ptr %arrayidx } define ptr @no_nuw_inbounds_for_sub_negative(ptr %p, i64 %i) { ; CHECK-LABEL: @no_nuw_inbounds_for_sub_negative( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[I:%.*]] ; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr i8, ptr [[TMP0]], i64 64 ; CHECK-NEXT: ret ptr [[ARRAYIDX2]] ; entry: %idx = sub nuw i64 %i, -16 %arrayidx = getelementptr inbounds nuw i32, ptr %p, i64 %idx ret ptr %arrayidx } ; Can't preserved nuw and other flags here as distributing the trunc towards the ; leaves can introduce new wraps. define ptr @nuw_inbounds_trunc(ptr %p, i128 %i) { ; CHECK-LABEL: @nuw_inbounds_trunc( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = trunc i128 [[I:%.*]] to i64 ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[TMP0]] ; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr i8, ptr [[TMP1]], i64 4 ; CHECK-NEXT: ret ptr [[ARRAYIDX2]] ; entry: %idx = add nuw i128 %i, 1 %idx.conv = trunc i128 %idx to i64 %arrayidx = getelementptr inbounds nuw i32, ptr %p, i64 %idx.conv ret ptr %arrayidx } ; trunc nuw is not a problem. define ptr @nuw_inbounds_implies_nuw_inbounds_trunc_nuw(ptr %p, i128 %i) { ; CHECK-LABEL: @nuw_inbounds_implies_nuw_inbounds_trunc_nuw( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = trunc i128 [[I:%.*]] to i64 ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw i32, ptr [[P:%.*]], i64 [[TMP0]] ; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP1]], i64 4 ; CHECK-NEXT: ret ptr [[ARRAYIDX2]] ; entry: %idx = add nuw i128 %i, 1 %idx.conv = trunc nuw i128 %idx to i64 %arrayidx = getelementptr inbounds nuw i32, ptr %p, i64 %idx.conv ret ptr %arrayidx } define ptr @nuw_inbounds_implies_nuw_inbounds_sext(ptr %p, i32 %i) { ; CHECK-LABEL: @nuw_inbounds_implies_nuw_inbounds_sext( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = sext i32 [[I:%.*]] to i64 ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw i32, ptr [[P:%.*]], i64 [[TMP0]] ; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP1]], i64 4 ; CHECK-NEXT: ret ptr [[ARRAYIDX2]] ; entry: %idx = add nuw i32 %i, 1 %idx.conv = sext i32 %idx to i64 %arrayidx = getelementptr inbounds nuw i32, ptr %p, i64 %idx.conv ret ptr %arrayidx } define ptr @nuw_inbounds_implies_nuw_inbounds_zext(ptr %p, i32 %i) { ; CHECK-LABEL: @nuw_inbounds_implies_nuw_inbounds_zext( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = zext i32 [[I:%.*]] to i64 ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw i32, ptr [[P:%.*]], i64 [[TMP0]] ; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP1]], i64 4 ; CHECK-NEXT: ret ptr [[ARRAYIDX2]] ; entry: %idx = add nuw i32 %i, 1 %idx.conv = zext i32 %idx to i64 %arrayidx = getelementptr inbounds nuw i32, ptr %p, i64 %idx.conv ret ptr %arrayidx } define ptr @nuw_inbounds_implies_nuw_inbounds_zext_negative(ptr %p, i8 %i) { ; CHECK-LABEL: @nuw_inbounds_implies_nuw_inbounds_zext_negative( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[IDX_CONV:%.*]] = zext i8 [[I:%.*]] to i64 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[P:%.*]], i64 [[IDX_CONV]] ; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw i8, ptr [[ARRAYIDX]], i64 960 ; CHECK-NEXT: ret ptr [[ARRAYIDX2]] ; entry: %idx = add nuw i8 %i, -16 %idx.conv = zext i8 %idx to i64 %arrayidx = getelementptr inbounds nuw i32, ptr %p, i64 %idx.conv ret ptr %arrayidx } ; This test and the following ones mask most bits of %v off to facilitate ; validation with alive2 while still allowing interesting values. define ptr @nuw_inbounds_implies_nuw_inbounds_nested(ptr %p, i64 %i, i64 %v) { ; CHECK-LABEL: @nuw_inbounds_implies_nuw_inbounds_nested( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[V_MASKED:%.*]] = and i64 [[V:%.*]], -1152921488500719601 ; CHECK-NEXT: [[IDX22:%.*]] = add i64 [[I:%.*]], [[V_MASKED]] ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw i32, ptr [[P:%.*]], i64 [[IDX22]] ; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i64 4 ; CHECK-NEXT: ret ptr [[ARRAYIDX3]] ; entry: %v.masked = and i64 %v, u0xf0000003c000000f %idx1 = add nuw i64 %i, 1 %idx2 = add nuw i64 %idx1, %v.masked %arrayidx = getelementptr inbounds nuw i32, ptr %p, i64 %idx2 ret ptr %arrayidx } define ptr @nuw_inbounds_implies_nuw_inbounds_nested_negative(ptr %p, i64 %i, i64 %v) { ; CHECK-LABEL: @nuw_inbounds_implies_nuw_inbounds_nested_negative( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[V:%.*]] = and i64 [[V1:%.*]], -1152921488500719601 ; CHECK-NEXT: [[IDX22:%.*]] = add i64 [[I:%.*]], [[V]] ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw i32, ptr [[P:%.*]], i64 [[IDX22]] ; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i64 -64 ; CHECK-NEXT: ret ptr [[ARRAYIDX3]] ; entry: %v.masked = and i64 %v, u0xf0000003c000000f %idx1 = add nuw i64 %i, -16 %idx2 = add nuw i64 %idx1, %v.masked %arrayidx = getelementptr inbounds nuw i32, ptr %p, i64 %idx2 ret ptr %arrayidx } define ptr @nuw_implies_nuw_nested(ptr %p, i64 %i, i64 %v) { ; CHECK-LABEL: @nuw_implies_nuw_nested( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[V:%.*]] = and i64 [[V1:%.*]], -1152921488500719601 ; CHECK-NEXT: [[IDX22:%.*]] = add i64 [[I:%.*]], [[V]] ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr nuw i32, ptr [[P:%.*]], i64 [[IDX22]] ; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr nuw i8, ptr [[TMP0]], i64 4 ; CHECK-NEXT: ret ptr [[ARRAYIDX3]] ; entry: %v.masked = and i64 %v, u0xf0000003c000000f %idx1 = add nuw i64 %i, 1 %idx2 = add nuw i64 %idx1, %v.masked %arrayidx = getelementptr nuw i32, ptr %p, i64 %idx2 ret ptr %arrayidx } define ptr @nuw_implies_nuw_nested_negative(ptr %p, i64 %i, i64 %v) { ; CHECK-LABEL: @nuw_implies_nuw_nested_negative( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[V:%.*]] = and i64 [[V1:%.*]], -1152921488500719601 ; CHECK-NEXT: [[IDX22:%.*]] = add i64 [[I:%.*]], [[V]] ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr nuw i32, ptr [[P:%.*]], i64 [[IDX22]] ; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr nuw i8, ptr [[TMP0]], i64 -64 ; CHECK-NEXT: ret ptr [[ARRAYIDX3]] ; entry: %v.masked = and i64 %v, u0xf0000003c000000f %idx1 = add nuw i64 %i, -16 %idx2 = add nuw i64 %idx1, %v.masked %arrayidx = getelementptr nuw i32, ptr %p, i64 %idx2 ret ptr %arrayidx } define ptr @nuw_nusw_implies_nuw_nusw_nested(ptr %p, i64 %i, i64 %v) { ; CHECK-LABEL: @nuw_nusw_implies_nuw_nusw_nested( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[V:%.*]] = and i64 [[V1:%.*]], -1152921488500719601 ; CHECK-NEXT: [[IDX22:%.*]] = add i64 [[I:%.*]], [[V]] ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr nusw nuw i32, ptr [[P:%.*]], i64 [[IDX22]] ; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr nusw nuw i8, ptr [[TMP0]], i64 4 ; CHECK-NEXT: ret ptr [[ARRAYIDX3]] ; entry: %v.masked = and i64 %v, u0xf0000003c000000f %idx1 = add nuw i64 %i, 1 %idx2 = add nuw i64 %idx1, %v.masked %arrayidx = getelementptr nusw nuw i32, ptr %p, i64 %idx2 ret ptr %arrayidx } define ptr @nuw_nusw_implies_nuw_nusw_nested_negative(ptr %p, i64 %i, i64 %v) { ; CHECK-LABEL: @nuw_nusw_implies_nuw_nusw_nested_negative( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[V:%.*]] = and i64 [[V1:%.*]], -1152921488500719601 ; CHECK-NEXT: [[IDX22:%.*]] = add i64 [[I:%.*]], [[V]] ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr nusw nuw i32, ptr [[P:%.*]], i64 [[IDX22]] ; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr nusw nuw i8, ptr [[TMP0]], i64 -64 ; CHECK-NEXT: ret ptr [[ARRAYIDX3]] ; entry: %v.masked = and i64 %v, u0xf0000003c000000f %idx1 = add nuw i64 %i, -16 %idx2 = add nuw i64 %idx1, %v.masked %arrayidx = getelementptr nusw nuw i32, ptr %p, i64 %idx2 ret ptr %arrayidx } ; Neither inbounds nor nuw can be preserved. define ptr @nuw_inbounds_nested_not_all_nuw(ptr %p, i64 %i, i64 %v) { ; CHECK-LABEL: @nuw_inbounds_nested_not_all_nuw( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[V:%.*]] = and i64 [[V1:%.*]], -1152921488500719601 ; CHECK-NEXT: [[IDX22:%.*]] = add i64 [[I:%.*]], [[V]] ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[IDX22]] ; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr i8, ptr [[TMP0]], i64 4 ; CHECK-NEXT: ret ptr [[ARRAYIDX3]] ; entry: %v.masked = and i64 %v, u0xf0000003c000000f %idx1 = add nuw i64 %i, 1 %idx2 = add i64 %idx1, %v.masked %arrayidx = getelementptr inbounds nuw i32, ptr %p, i64 %idx2 ret ptr %arrayidx } define ptr @nuw_inbounds_implies_nuw_inbounds_multilevel(ptr %src, i64 %i1, i64 %i2) { ; CHECK-LABEL: @nuw_inbounds_implies_nuw_inbounds_multilevel( ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [4 x i32], ptr [[SRC:%.*]], i64 [[I1:%.*]], i64 [[I2:%.*]] ; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP1]], i64 24 ; CHECK-NEXT: ret ptr [[ARRAYIDX3]] ; %idx1 = add nuw i64 %i1, 1 %idx2 = add nuw i64 2, %i2 %arrayidx = getelementptr inbounds nuw [4 x i32], ptr %src, i64 %idx1, i64 %idx2 ret ptr %arrayidx } ; Neither inbounds nor nuw can be preserved. define ptr @nuw_inbounds_multilevel_not_all_nuw(ptr %src, i64 %i1, i64 %i2) { ; CHECK-LABEL: @nuw_inbounds_multilevel_not_all_nuw( ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr [4 x i32], ptr [[SRC:%.*]], i64 [[I1:%.*]], i64 [[I2:%.*]] ; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr i8, ptr [[TMP1]], i64 24 ; CHECK-NEXT: ret ptr [[ARRAYIDX3]] ; %idx1 = add nuw i64 %i1, 1 %idx2 = add i64 2, %i2 %arrayidx = getelementptr inbounds nuw [4 x i32], ptr %src, i64 %idx1, i64 %idx2 ret ptr %arrayidx } ; Missing information about non-extracted indices does not matter. define ptr @nuw_inbounds_implies_nuw_inbounds_multilevel_one_unfolded(ptr %src, i64 %i1, i64 %v) { ; CHECK-LABEL: @nuw_inbounds_implies_nuw_inbounds_multilevel_one_unfolded( ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [4 x i32], ptr [[SRC:%.*]], i64 [[I1:%.*]], i64 [[V:%.*]] ; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP1]], i64 16 ; CHECK-NEXT: ret ptr [[ARRAYIDX2]] ; %idx1 = add nuw i64 %i1, 1 %arrayidx = getelementptr inbounds nuw [4 x i32], ptr %src, i64 %idx1, i64 %v ret ptr %arrayidx } define ptr @nuw_inbounds_implies_nuw_inbounds_multilevel_other_unfolded(ptr %src, i64 %i1, i64 %v) { ; CHECK-LABEL: @nuw_inbounds_implies_nuw_inbounds_multilevel_other_unfolded( ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [4 x i32], ptr [[SRC:%.*]], i64 [[V:%.*]], i64 [[I1:%.*]] ; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP1]], i64 4 ; CHECK-NEXT: ret ptr [[ARRAYIDX2]] ; %idx1 = add nuw i64 %i1, 1 %arrayidx = getelementptr inbounds nuw [4 x i32], ptr %src, i64 %v, i64 %idx1 ret ptr %arrayidx }