diff options
author | Noah Goldstein <goldstein.w.n@gmail.com> | 2024-02-13 00:42:24 -0600 |
---|---|---|
committer | Noah Goldstein <goldstein.w.n@gmail.com> | 2024-02-13 12:53:16 -0600 |
commit | 79ce933114e46c891a5632f7ad4a004b93a5b808 (patch) | |
tree | 8019d498d81ac254ffad471230c91e1364261bf5 | |
parent | 2422e969bf0a05b9b5cb4a6233a5f8dd335c2de5 (diff) | |
download | llvm-79ce933114e46c891a5632f7ad4a004b93a5b808.zip llvm-79ce933114e46c891a5632f7ad4a004b93a5b808.tar.gz llvm-79ce933114e46c891a5632f7ad4a004b93a5b808.tar.bz2 |
[InstCombine] Extend `(lshr/shl (shl/lshr -1, x), x)` -> `(lshr/shl -1, x)` for multi-use
We previously did this iff the inner `(shl/lshr -1, x)` was
one-use. No instructions are added even if the inner `(shl/lshr -1,
x)` is multi-use and this canonicalization both makes the resulting
instruction easier to analyze and shrinks its dependency chain.
Closes #81576
7 files changed, 61 insertions, 49 deletions
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp index 3fbe98f..eafd288 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp @@ -1206,6 +1206,12 @@ Instruction *InstCombinerImpl::visitShl(BinaryOperator &I) { return BinaryOperator::CreateAnd(Mask, X); } + // Transform (-1 >> y) << y to -1 << y + if (match(Op0, m_LShr(m_AllOnes(), m_Specific(Op1)))) { + Constant *AllOnes = ConstantInt::getAllOnesValue(Ty); + return BinaryOperator::CreateShl(AllOnes, Op1); + } + Constant *C1; if (match(Op1, m_Constant(C1))) { Constant *C2; @@ -1493,6 +1499,12 @@ Instruction *InstCombinerImpl::visitLShr(BinaryOperator &I) { return BinaryOperator::CreateAnd(Mask, X); } + // Transform (-1 << y) >> y to -1 >> y + if (match(Op0, m_Shl(m_AllOnes(), m_Specific(Op1)))) { + Constant *AllOnes = ConstantInt::getAllOnesValue(Ty); + return BinaryOperator::CreateLShr(AllOnes, Op1); + } + if (Instruction *Overflow = foldLShrOverflowBit(I)) return Overflow; diff --git a/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v4-and-icmp-eq-to-icmp-ule.ll b/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v4-and-icmp-eq-to-icmp-ule.ll index 81f9fe4..d13129c 100644 --- a/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v4-and-icmp-eq-to-icmp-ule.ll +++ b/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v4-and-icmp-eq-to-icmp-ule.ll @@ -22,7 +22,7 @@ define i1 @p0(i8 %x, i8 %y) { ; CHECK-LABEL: @p0( ; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[Y:%.*]] ; CHECK-NEXT: call void @use8(i8 [[T0]]) -; CHECK-NEXT: [[T1:%.*]] = lshr exact i8 [[T0]], [[Y]] +; CHECK-NEXT: [[T1:%.*]] = lshr i8 -1, [[Y]] ; CHECK-NEXT: [[RET:%.*]] = icmp uge i8 [[T1]], [[X:%.*]] ; CHECK-NEXT: ret i1 [[RET]] ; @@ -42,7 +42,7 @@ define <2 x i1> @p1_vec(<2 x i8> %x, <2 x i8> %y) { ; CHECK-LABEL: @p1_vec( ; CHECK-NEXT: [[T0:%.*]] = shl nsw <2 x i8> <i8 -1, i8 -1>, [[Y:%.*]] ; CHECK-NEXT: call void @use2i8(<2 x i8> [[T0]]) -; CHECK-NEXT: [[T1:%.*]] = lshr exact <2 x i8> [[T0]], [[Y]] +; CHECK-NEXT: [[T1:%.*]] = lshr <2 x i8> <i8 -1, i8 -1>, [[Y]] ; CHECK-NEXT: [[RET:%.*]] = icmp uge <2 x i8> [[T1]], [[X:%.*]] ; CHECK-NEXT: ret <2 x i1> [[RET]] ; @@ -58,7 +58,7 @@ define <3 x i1> @p2_vec_undef0(<3 x i8> %x, <3 x i8> %y) { ; CHECK-LABEL: @p2_vec_undef0( ; CHECK-NEXT: [[T0:%.*]] = shl <3 x i8> <i8 -1, i8 undef, i8 -1>, [[Y:%.*]] ; CHECK-NEXT: call void @use3i8(<3 x i8> [[T0]]) -; CHECK-NEXT: [[T1:%.*]] = lshr exact <3 x i8> [[T0]], [[Y]] +; CHECK-NEXT: [[T1:%.*]] = lshr <3 x i8> <i8 -1, i8 -1, i8 -1>, [[Y]] ; CHECK-NEXT: [[RET:%.*]] = icmp uge <3 x i8> [[T1]], [[X:%.*]] ; CHECK-NEXT: ret <3 x i1> [[RET]] ; @@ -80,7 +80,7 @@ define i1 @c0(i8 %y) { ; CHECK-LABEL: @c0( ; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[Y:%.*]] ; CHECK-NEXT: call void @use8(i8 [[T0]]) -; CHECK-NEXT: [[T1:%.*]] = lshr exact i8 [[T0]], [[Y]] +; CHECK-NEXT: [[T1:%.*]] = lshr i8 -1, [[Y]] ; CHECK-NEXT: [[X:%.*]] = call i8 @gen8() ; CHECK-NEXT: [[RET:%.*]] = icmp ule i8 [[X]], [[T1]] ; CHECK-NEXT: ret i1 [[RET]] @@ -98,7 +98,7 @@ define i1 @c1(i8 %y) { ; CHECK-LABEL: @c1( ; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[Y:%.*]] ; CHECK-NEXT: call void @use8(i8 [[T0]]) -; CHECK-NEXT: [[T1:%.*]] = lshr exact i8 [[T0]], [[Y]] +; CHECK-NEXT: [[T1:%.*]] = lshr i8 -1, [[Y]] ; CHECK-NEXT: [[X:%.*]] = call i8 @gen8() ; CHECK-NEXT: [[RET:%.*]] = icmp ule i8 [[X]], [[T1]] ; CHECK-NEXT: ret i1 [[RET]] @@ -116,7 +116,7 @@ define i1 @c2(i8 %y) { ; CHECK-LABEL: @c2( ; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[Y:%.*]] ; CHECK-NEXT: call void @use8(i8 [[T0]]) -; CHECK-NEXT: [[T1:%.*]] = lshr exact i8 [[T0]], [[Y]] +; CHECK-NEXT: [[T1:%.*]] = lshr i8 -1, [[Y]] ; CHECK-NEXT: [[X:%.*]] = call i8 @gen8() ; CHECK-NEXT: [[RET:%.*]] = icmp ule i8 [[X]], [[T1]] ; CHECK-NEXT: ret i1 [[RET]] @@ -138,7 +138,7 @@ define i1 @oneuse0(i8 %x, i8 %y) { ; CHECK-LABEL: @oneuse0( ; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[Y:%.*]] ; CHECK-NEXT: call void @use8(i8 [[T0]]) -; CHECK-NEXT: [[T1:%.*]] = lshr exact i8 [[T0]], [[Y]] +; CHECK-NEXT: [[T1:%.*]] = lshr i8 -1, [[Y]] ; CHECK-NEXT: call void @use8(i8 [[T1]]) ; CHECK-NEXT: [[RET:%.*]] = icmp uge i8 [[T1]], [[X:%.*]] ; CHECK-NEXT: ret i1 [[RET]] @@ -156,7 +156,7 @@ define i1 @oneuse1(i8 %x, i8 %y) { ; CHECK-LABEL: @oneuse1( ; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[Y:%.*]] ; CHECK-NEXT: call void @use8(i8 [[T0]]) -; CHECK-NEXT: [[T1:%.*]] = lshr exact i8 [[T0]], [[Y]] +; CHECK-NEXT: [[T1:%.*]] = lshr i8 -1, [[Y]] ; CHECK-NEXT: [[T2:%.*]] = and i8 [[T1]], [[X:%.*]] ; CHECK-NEXT: call void @use8(i8 [[T2]]) ; CHECK-NEXT: [[RET:%.*]] = icmp uge i8 [[T1]], [[X]] @@ -175,7 +175,7 @@ define i1 @oneuse2(i8 %x, i8 %y) { ; CHECK-LABEL: @oneuse2( ; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[Y:%.*]] ; CHECK-NEXT: call void @use8(i8 [[T0]]) -; CHECK-NEXT: [[T1:%.*]] = lshr exact i8 [[T0]], [[Y]] +; CHECK-NEXT: [[T1:%.*]] = lshr i8 -1, [[Y]] ; CHECK-NEXT: call void @use8(i8 [[T1]]) ; CHECK-NEXT: [[T2:%.*]] = and i8 [[T1]], [[X:%.*]] ; CHECK-NEXT: call void @use8(i8 [[T2]]) @@ -200,7 +200,7 @@ define i1 @n0(i8 %x, i8 %y, i8 %notx) { ; CHECK-LABEL: @n0( ; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[Y:%.*]] ; CHECK-NEXT: call void @use8(i8 [[T0]]) -; CHECK-NEXT: [[T1:%.*]] = lshr exact i8 [[T0]], [[Y]] +; CHECK-NEXT: [[T1:%.*]] = lshr i8 -1, [[Y]] ; CHECK-NEXT: [[T2:%.*]] = and i8 [[T1]], [[X:%.*]] ; CHECK-NEXT: [[RET:%.*]] = icmp eq i8 [[T2]], [[NOTX:%.*]] ; CHECK-NEXT: ret i1 [[RET]] diff --git a/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v4-and-icmp-ne-to-icmp-ugt.ll b/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v4-and-icmp-ne-to-icmp-ugt.ll index 321a115..a1517b36 100644 --- a/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v4-and-icmp-ne-to-icmp-ugt.ll +++ b/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v4-and-icmp-ne-to-icmp-ugt.ll @@ -22,7 +22,7 @@ define i1 @p0(i8 %x, i8 %y) { ; CHECK-LABEL: @p0( ; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[Y:%.*]] ; CHECK-NEXT: call void @use8(i8 [[T0]]) -; CHECK-NEXT: [[T1:%.*]] = lshr exact i8 [[T0]], [[Y]] +; CHECK-NEXT: [[T1:%.*]] = lshr i8 -1, [[Y]] ; CHECK-NEXT: [[RET:%.*]] = icmp ult i8 [[T1]], [[X:%.*]] ; CHECK-NEXT: ret i1 [[RET]] ; @@ -42,7 +42,7 @@ define <2 x i1> @p1_vec(<2 x i8> %x, <2 x i8> %y) { ; CHECK-LABEL: @p1_vec( ; CHECK-NEXT: [[T0:%.*]] = shl nsw <2 x i8> <i8 -1, i8 -1>, [[Y:%.*]] ; CHECK-NEXT: call void @use2i8(<2 x i8> [[T0]]) -; CHECK-NEXT: [[T1:%.*]] = lshr exact <2 x i8> [[T0]], [[Y]] +; CHECK-NEXT: [[T1:%.*]] = lshr <2 x i8> <i8 -1, i8 -1>, [[Y]] ; CHECK-NEXT: [[RET:%.*]] = icmp ult <2 x i8> [[T1]], [[X:%.*]] ; CHECK-NEXT: ret <2 x i1> [[RET]] ; @@ -58,7 +58,7 @@ define <3 x i1> @p2_vec_undef0(<3 x i8> %x, <3 x i8> %y) { ; CHECK-LABEL: @p2_vec_undef0( ; CHECK-NEXT: [[T0:%.*]] = shl <3 x i8> <i8 -1, i8 undef, i8 -1>, [[Y:%.*]] ; CHECK-NEXT: call void @use3i8(<3 x i8> [[T0]]) -; CHECK-NEXT: [[T1:%.*]] = lshr exact <3 x i8> [[T0]], [[Y]] +; CHECK-NEXT: [[T1:%.*]] = lshr <3 x i8> <i8 -1, i8 -1, i8 -1>, [[Y]] ; CHECK-NEXT: [[RET:%.*]] = icmp ult <3 x i8> [[T1]], [[X:%.*]] ; CHECK-NEXT: ret <3 x i1> [[RET]] ; @@ -80,7 +80,7 @@ define i1 @c0(i8 %y) { ; CHECK-LABEL: @c0( ; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[Y:%.*]] ; CHECK-NEXT: call void @use8(i8 [[T0]]) -; CHECK-NEXT: [[T1:%.*]] = lshr exact i8 [[T0]], [[Y]] +; CHECK-NEXT: [[T1:%.*]] = lshr i8 -1, [[Y]] ; CHECK-NEXT: [[X:%.*]] = call i8 @gen8() ; CHECK-NEXT: [[RET:%.*]] = icmp ugt i8 [[X]], [[T1]] ; CHECK-NEXT: ret i1 [[RET]] @@ -98,7 +98,7 @@ define i1 @c1(i8 %y) { ; CHECK-LABEL: @c1( ; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[Y:%.*]] ; CHECK-NEXT: call void @use8(i8 [[T0]]) -; CHECK-NEXT: [[T1:%.*]] = lshr exact i8 [[T0]], [[Y]] +; CHECK-NEXT: [[T1:%.*]] = lshr i8 -1, [[Y]] ; CHECK-NEXT: [[X:%.*]] = call i8 @gen8() ; CHECK-NEXT: [[RET:%.*]] = icmp ugt i8 [[X]], [[T1]] ; CHECK-NEXT: ret i1 [[RET]] @@ -116,7 +116,7 @@ define i1 @c2(i8 %y) { ; CHECK-LABEL: @c2( ; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[Y:%.*]] ; CHECK-NEXT: call void @use8(i8 [[T0]]) -; CHECK-NEXT: [[T1:%.*]] = lshr exact i8 [[T0]], [[Y]] +; CHECK-NEXT: [[T1:%.*]] = lshr i8 -1, [[Y]] ; CHECK-NEXT: [[X:%.*]] = call i8 @gen8() ; CHECK-NEXT: [[RET:%.*]] = icmp ugt i8 [[X]], [[T1]] ; CHECK-NEXT: ret i1 [[RET]] @@ -138,7 +138,7 @@ define i1 @oneuse0(i8 %x, i8 %y) { ; CHECK-LABEL: @oneuse0( ; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[Y:%.*]] ; CHECK-NEXT: call void @use8(i8 [[T0]]) -; CHECK-NEXT: [[T1:%.*]] = lshr exact i8 [[T0]], [[Y]] +; CHECK-NEXT: [[T1:%.*]] = lshr i8 -1, [[Y]] ; CHECK-NEXT: call void @use8(i8 [[T1]]) ; CHECK-NEXT: [[RET:%.*]] = icmp ult i8 [[T1]], [[X:%.*]] ; CHECK-NEXT: ret i1 [[RET]] @@ -156,7 +156,7 @@ define i1 @oneuse1(i8 %x, i8 %y) { ; CHECK-LABEL: @oneuse1( ; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[Y:%.*]] ; CHECK-NEXT: call void @use8(i8 [[T0]]) -; CHECK-NEXT: [[T1:%.*]] = lshr exact i8 [[T0]], [[Y]] +; CHECK-NEXT: [[T1:%.*]] = lshr i8 -1, [[Y]] ; CHECK-NEXT: [[T2:%.*]] = and i8 [[T1]], [[X:%.*]] ; CHECK-NEXT: call void @use8(i8 [[T2]]) ; CHECK-NEXT: [[RET:%.*]] = icmp ult i8 [[T1]], [[X]] @@ -175,7 +175,7 @@ define i1 @oneuse2(i8 %x, i8 %y) { ; CHECK-LABEL: @oneuse2( ; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[Y:%.*]] ; CHECK-NEXT: call void @use8(i8 [[T0]]) -; CHECK-NEXT: [[T1:%.*]] = lshr exact i8 [[T0]], [[Y]] +; CHECK-NEXT: [[T1:%.*]] = lshr i8 -1, [[Y]] ; CHECK-NEXT: call void @use8(i8 [[T1]]) ; CHECK-NEXT: [[T2:%.*]] = and i8 [[T1]], [[X:%.*]] ; CHECK-NEXT: call void @use8(i8 [[T2]]) @@ -200,7 +200,7 @@ define i1 @n0(i8 %x, i8 %y, i8 %notx) { ; CHECK-LABEL: @n0( ; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[Y:%.*]] ; CHECK-NEXT: call void @use8(i8 [[T0]]) -; CHECK-NEXT: [[T1:%.*]] = lshr exact i8 [[T0]], [[Y]] +; CHECK-NEXT: [[T1:%.*]] = lshr i8 -1, [[Y]] ; CHECK-NEXT: [[T2:%.*]] = and i8 [[T1]], [[X:%.*]] ; CHECK-NEXT: [[RET:%.*]] = icmp ne i8 [[T2]], [[NOTX:%.*]] ; CHECK-NEXT: ret i1 [[RET]] diff --git a/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-after-truncation-variant-d.ll b/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-after-truncation-variant-d.ll index 6b7061f..4887385 100644 --- a/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-after-truncation-variant-d.ll +++ b/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-after-truncation-variant-d.ll @@ -18,7 +18,7 @@ define i32 @t0_basic(i64 %x, i32 %nbits) { ; CHECK-LABEL: @t0_basic( ; CHECK-NEXT: [[T0:%.*]] = zext i32 [[NBITS:%.*]] to i64 ; CHECK-NEXT: [[T1:%.*]] = shl nsw i64 -1, [[T0]] -; CHECK-NEXT: [[T2:%.*]] = lshr exact i64 [[T1]], [[T0]] +; CHECK-NEXT: [[T2:%.*]] = lshr i64 -1, [[T0]] ; CHECK-NEXT: [[T3:%.*]] = add i32 [[NBITS]], -33 ; CHECK-NEXT: call void @use64(i64 [[T0]]) ; CHECK-NEXT: call void @use64(i64 [[T1]]) @@ -54,7 +54,7 @@ define <8 x i32> @t1_vec_splat(<8 x i64> %x, <8 x i32> %nbits) { ; CHECK-LABEL: @t1_vec_splat( ; CHECK-NEXT: [[T0:%.*]] = zext <8 x i32> [[NBITS:%.*]] to <8 x i64> ; CHECK-NEXT: [[T1:%.*]] = shl nsw <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, [[T0]] -; CHECK-NEXT: [[T2:%.*]] = lshr exact <8 x i64> [[T1]], [[T0]] +; CHECK-NEXT: [[T2:%.*]] = lshr <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, [[T0]] ; CHECK-NEXT: [[T3:%.*]] = add <8 x i32> [[NBITS]], <i32 -33, i32 -33, i32 -33, i32 -33, i32 -33, i32 -33, i32 -33, i32 -33> ; CHECK-NEXT: call void @use8xi64(<8 x i64> [[T0]]) ; CHECK-NEXT: call void @use8xi64(<8 x i64> [[T1]]) @@ -85,7 +85,7 @@ define <8 x i32> @t2_vec_splat_undef(<8 x i64> %x, <8 x i32> %nbits) { ; CHECK-LABEL: @t2_vec_splat_undef( ; CHECK-NEXT: [[T0:%.*]] = zext <8 x i32> [[NBITS:%.*]] to <8 x i64> ; CHECK-NEXT: [[T1:%.*]] = shl <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 undef, i64 -1>, [[T0]] -; CHECK-NEXT: [[T2:%.*]] = lshr exact <8 x i64> [[T1]], [[T0]] +; CHECK-NEXT: [[T2:%.*]] = lshr <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, [[T0]] ; CHECK-NEXT: [[T3:%.*]] = add <8 x i32> [[NBITS]], <i32 -33, i32 -33, i32 -33, i32 -33, i32 -33, i32 -33, i32 undef, i32 -33> ; CHECK-NEXT: call void @use8xi64(<8 x i64> [[T0]]) ; CHECK-NEXT: call void @use8xi64(<8 x i64> [[T1]]) @@ -116,7 +116,7 @@ define <8 x i32> @t3_vec_nonsplat(<8 x i64> %x, <8 x i32> %nbits) { ; CHECK-LABEL: @t3_vec_nonsplat( ; CHECK-NEXT: [[T0:%.*]] = zext <8 x i32> [[NBITS:%.*]] to <8 x i64> ; CHECK-NEXT: [[T1:%.*]] = shl <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 undef, i64 -1>, [[T0]] -; CHECK-NEXT: [[T2:%.*]] = lshr exact <8 x i64> [[T1]], [[T0]] +; CHECK-NEXT: [[T2:%.*]] = lshr <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, [[T0]] ; CHECK-NEXT: [[T3:%.*]] = add <8 x i32> [[NBITS]], <i32 -64, i32 -63, i32 -33, i32 -32, i32 63, i32 64, i32 undef, i32 65> ; CHECK-NEXT: call void @use8xi64(<8 x i64> [[T0]]) ; CHECK-NEXT: call void @use8xi64(<8 x i64> [[T1]]) @@ -149,7 +149,7 @@ define i32 @n4_extrause0(i64 %x, i32 %nbits) { ; CHECK-LABEL: @n4_extrause0( ; CHECK-NEXT: [[T0:%.*]] = zext i32 [[NBITS:%.*]] to i64 ; CHECK-NEXT: [[T1:%.*]] = shl nsw i64 -1, [[T0]] -; CHECK-NEXT: [[T2:%.*]] = lshr exact i64 [[T1]], [[T0]] +; CHECK-NEXT: [[T2:%.*]] = lshr i64 -1, [[T0]] ; CHECK-NEXT: [[T3:%.*]] = add i32 [[NBITS]], -33 ; CHECK-NEXT: call void @use64(i64 [[T0]]) ; CHECK-NEXT: call void @use64(i64 [[T1]]) @@ -182,7 +182,7 @@ define i32 @n5_extrause1(i64 %x, i32 %nbits) { ; CHECK-LABEL: @n5_extrause1( ; CHECK-NEXT: [[T0:%.*]] = zext i32 [[NBITS:%.*]] to i64 ; CHECK-NEXT: [[T1:%.*]] = shl nsw i64 -1, [[T0]] -; CHECK-NEXT: [[T2:%.*]] = lshr exact i64 [[T1]], [[T0]] +; CHECK-NEXT: [[T2:%.*]] = lshr i64 -1, [[T0]] ; CHECK-NEXT: [[T3:%.*]] = add i32 [[NBITS]], -33 ; CHECK-NEXT: call void @use64(i64 [[T0]]) ; CHECK-NEXT: call void @use64(i64 [[T1]]) @@ -215,7 +215,7 @@ define i32 @n6_extrause2(i64 %x, i32 %nbits) { ; CHECK-LABEL: @n6_extrause2( ; CHECK-NEXT: [[T0:%.*]] = zext i32 [[NBITS:%.*]] to i64 ; CHECK-NEXT: [[T1:%.*]] = shl nsw i64 -1, [[T0]] -; CHECK-NEXT: [[T2:%.*]] = lshr exact i64 [[T1]], [[T0]] +; CHECK-NEXT: [[T2:%.*]] = lshr i64 -1, [[T0]] ; CHECK-NEXT: [[T3:%.*]] = add i32 [[NBITS]], -33 ; CHECK-NEXT: call void @use64(i64 [[T0]]) ; CHECK-NEXT: call void @use64(i64 [[T1]]) diff --git a/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-d.ll b/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-d.ll index 92805c6..9c096d1 100644 --- a/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-d.ll +++ b/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-d.ll @@ -16,7 +16,7 @@ declare void @use32(i32) define i32 @t0_basic(i32 %x, i32 %nbits) { ; CHECK-LABEL: @t0_basic( ; CHECK-NEXT: [[T0:%.*]] = shl nsw i32 -1, [[NBITS:%.*]] -; CHECK-NEXT: [[T1:%.*]] = lshr exact i32 [[T0]], [[NBITS]] +; CHECK-NEXT: [[T1:%.*]] = lshr i32 -1, [[NBITS]] ; CHECK-NEXT: [[T3:%.*]] = add i32 [[NBITS]], -1 ; CHECK-NEXT: call void @use32(i32 [[T0]]) ; CHECK-NEXT: call void @use32(i32 [[T1]]) @@ -43,7 +43,7 @@ declare void @use8xi32(<8 x i32>) define <8 x i32> @t2_vec_splat(<8 x i32> %x, <8 x i32> %nbits) { ; CHECK-LABEL: @t2_vec_splat( ; CHECK-NEXT: [[T0:%.*]] = shl nsw <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, [[NBITS:%.*]] -; CHECK-NEXT: [[T1:%.*]] = lshr exact <8 x i32> [[T0]], [[NBITS]] +; CHECK-NEXT: [[T1:%.*]] = lshr <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, [[NBITS]] ; CHECK-NEXT: [[T3:%.*]] = add <8 x i32> [[NBITS]], <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1> ; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T0]]) ; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T1]]) @@ -66,7 +66,7 @@ define <8 x i32> @t2_vec_splat(<8 x i32> %x, <8 x i32> %nbits) { define <8 x i32> @t2_vec_splat_undef(<8 x i32> %x, <8 x i32> %nbits) { ; CHECK-LABEL: @t2_vec_splat_undef( ; CHECK-NEXT: [[T0:%.*]] = shl <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 undef, i32 -1>, [[NBITS:%.*]] -; CHECK-NEXT: [[T1:%.*]] = lshr exact <8 x i32> [[T0]], [[NBITS]] +; CHECK-NEXT: [[T1:%.*]] = lshr <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, [[NBITS]] ; CHECK-NEXT: [[T3:%.*]] = add <8 x i32> [[NBITS]], <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 undef, i32 -1> ; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T0]]) ; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T1]]) @@ -89,7 +89,7 @@ define <8 x i32> @t2_vec_splat_undef(<8 x i32> %x, <8 x i32> %nbits) { define <8 x i32> @t2_vec_nonsplat(<8 x i32> %x, <8 x i32> %nbits) { ; CHECK-LABEL: @t2_vec_nonsplat( ; CHECK-NEXT: [[T0:%.*]] = shl nsw <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, [[NBITS:%.*]] -; CHECK-NEXT: [[T1:%.*]] = lshr exact <8 x i32> [[T0]], [[NBITS]] +; CHECK-NEXT: [[T1:%.*]] = lshr <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, [[NBITS]] ; CHECK-NEXT: [[T3:%.*]] = add <8 x i32> [[NBITS]], <i32 -32, i32 -31, i32 -1, i32 0, i32 1, i32 31, i32 32, i32 33> ; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T0]]) ; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T1]]) @@ -114,7 +114,7 @@ define <8 x i32> @t2_vec_nonsplat(<8 x i32> %x, <8 x i32> %nbits) { define i32 @n3_extrause(i32 %x, i32 %nbits) { ; CHECK-LABEL: @n3_extrause( ; CHECK-NEXT: [[T0:%.*]] = shl nsw i32 -1, [[NBITS:%.*]] -; CHECK-NEXT: [[T1:%.*]] = lshr exact i32 [[T0]], [[NBITS]] +; CHECK-NEXT: [[T1:%.*]] = lshr i32 -1, [[NBITS]] ; CHECK-NEXT: [[T2:%.*]] = and i32 [[T1]], [[X:%.*]] ; CHECK-NEXT: [[T3:%.*]] = add i32 [[NBITS]], -1 ; CHECK-NEXT: call void @use32(i32 [[T0]]) diff --git a/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-after-truncation-variant-d.ll b/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-after-truncation-variant-d.ll index bdc7beb..1a977f6 100644 --- a/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-after-truncation-variant-d.ll +++ b/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-after-truncation-variant-d.ll @@ -18,7 +18,7 @@ define i32 @t0_basic(i64 %x, i32 %nbits) { ; CHECK-LABEL: @t0_basic( ; CHECK-NEXT: [[T0:%.*]] = zext i32 [[NBITS:%.*]] to i64 ; CHECK-NEXT: [[T1:%.*]] = shl nsw i64 -1, [[T0]] -; CHECK-NEXT: [[T2:%.*]] = lshr exact i64 [[T1]], [[T0]] +; CHECK-NEXT: [[T2:%.*]] = lshr i64 -1, [[T0]] ; CHECK-NEXT: [[T3:%.*]] = add i32 [[NBITS]], -32 ; CHECK-NEXT: [[T4:%.*]] = and i64 [[T2]], [[X:%.*]] ; CHECK-NEXT: call void @use64(i64 [[T0]]) @@ -56,7 +56,7 @@ define <8 x i32> @t1_vec_splat(<8 x i64> %x, <8 x i32> %nbits) { ; CHECK-LABEL: @t1_vec_splat( ; CHECK-NEXT: [[T0:%.*]] = zext <8 x i32> [[NBITS:%.*]] to <8 x i64> ; CHECK-NEXT: [[T1:%.*]] = shl nsw <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, [[T0]] -; CHECK-NEXT: [[T2:%.*]] = lshr exact <8 x i64> [[T1]], [[T0]] +; CHECK-NEXT: [[T2:%.*]] = lshr <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, [[T0]] ; CHECK-NEXT: [[T3:%.*]] = add <8 x i32> [[NBITS]], <i32 -32, i32 -32, i32 -32, i32 -32, i32 -32, i32 -32, i32 -32, i32 -32> ; CHECK-NEXT: [[T4:%.*]] = and <8 x i64> [[T2]], [[X:%.*]] ; CHECK-NEXT: call void @use8xi64(<8 x i64> [[T0]]) @@ -89,7 +89,7 @@ define <8 x i32> @t2_vec_splat_undef(<8 x i64> %x, <8 x i32> %nbits) { ; CHECK-LABEL: @t2_vec_splat_undef( ; CHECK-NEXT: [[T0:%.*]] = zext <8 x i32> [[NBITS:%.*]] to <8 x i64> ; CHECK-NEXT: [[T1:%.*]] = shl <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 undef, i64 -1>, [[T0]] -; CHECK-NEXT: [[T2:%.*]] = lshr exact <8 x i64> [[T1]], [[T0]] +; CHECK-NEXT: [[T2:%.*]] = lshr <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, [[T0]] ; CHECK-NEXT: [[T3:%.*]] = add <8 x i32> [[NBITS]], <i32 -32, i32 -32, i32 -32, i32 -32, i32 -32, i32 -32, i32 undef, i32 -32> ; CHECK-NEXT: [[T4:%.*]] = and <8 x i64> [[T2]], [[X:%.*]] ; CHECK-NEXT: call void @use8xi64(<8 x i64> [[T0]]) @@ -122,7 +122,7 @@ define <8 x i32> @t3_vec_nonsplat(<8 x i64> %x, <8 x i32> %nbits) { ; CHECK-LABEL: @t3_vec_nonsplat( ; CHECK-NEXT: [[T0:%.*]] = zext <8 x i32> [[NBITS:%.*]] to <8 x i64> ; CHECK-NEXT: [[T1:%.*]] = shl <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 undef, i64 -1>, [[T0]] -; CHECK-NEXT: [[T2:%.*]] = lshr exact <8 x i64> [[T1]], [[T0]] +; CHECK-NEXT: [[T2:%.*]] = lshr <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, [[T0]] ; CHECK-NEXT: [[T3:%.*]] = add <8 x i32> [[NBITS]], <i32 -32, i32 -1, i32 0, i32 1, i32 31, i32 32, i32 undef, i32 64> ; CHECK-NEXT: [[T4:%.*]] = and <8 x i64> [[T2]], [[X:%.*]] ; CHECK-NEXT: call void @use8xi64(<8 x i64> [[T0]]) @@ -157,7 +157,7 @@ define i32 @n4_extrause(i64 %x, i32 %nbits) { ; CHECK-LABEL: @n4_extrause( ; CHECK-NEXT: [[T0:%.*]] = zext i32 [[NBITS:%.*]] to i64 ; CHECK-NEXT: [[T1:%.*]] = shl nsw i64 -1, [[T0]] -; CHECK-NEXT: [[T2:%.*]] = lshr exact i64 [[T1]], [[T0]] +; CHECK-NEXT: [[T2:%.*]] = lshr i64 -1, [[T0]] ; CHECK-NEXT: [[T3:%.*]] = add i32 [[NBITS]], -32 ; CHECK-NEXT: [[T4:%.*]] = and i64 [[T2]], [[X:%.*]] ; CHECK-NEXT: call void @use64(i64 [[T0]]) diff --git a/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-variant-d.ll b/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-variant-d.ll index c91e5a0..549729f 100644 --- a/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-variant-d.ll +++ b/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-variant-d.ll @@ -18,7 +18,7 @@ declare void @use32(i32) define i32 @t0_basic(i32 %x, i32 %nbits) { ; CHECK-LABEL: @t0_basic( ; CHECK-NEXT: [[T0:%.*]] = shl nsw i32 -1, [[NBITS:%.*]] -; CHECK-NEXT: [[T1:%.*]] = lshr exact i32 [[T0]], [[NBITS]] +; CHECK-NEXT: [[T1:%.*]] = lshr i32 -1, [[NBITS]] ; CHECK-NEXT: [[T2:%.*]] = and i32 [[T1]], [[X:%.*]] ; CHECK-NEXT: call void @use32(i32 [[T0]]) ; CHECK-NEXT: call void @use32(i32 [[T1]]) @@ -39,7 +39,7 @@ define i32 @t0_basic(i32 %x, i32 %nbits) { define i32 @t1_bigger_shift(i32 %x, i32 %nbits) { ; CHECK-LABEL: @t1_bigger_shift( ; CHECK-NEXT: [[T0:%.*]] = shl nsw i32 -1, [[NBITS:%.*]] -; CHECK-NEXT: [[T1:%.*]] = lshr exact i32 [[T0]], [[NBITS]] +; CHECK-NEXT: [[T1:%.*]] = lshr i32 -1, [[NBITS]] ; CHECK-NEXT: [[T2:%.*]] = and i32 [[T1]], [[X:%.*]] ; CHECK-NEXT: [[T3:%.*]] = add i32 [[NBITS]], 1 ; CHECK-NEXT: call void @use32(i32 [[T0]]) @@ -68,7 +68,7 @@ declare void @use3xi32(<3 x i32>) define <3 x i32> @t2_vec_splat(<3 x i32> %x, <3 x i32> %nbits) { ; CHECK-LABEL: @t2_vec_splat( ; CHECK-NEXT: [[T0:%.*]] = shl nsw <3 x i32> <i32 -1, i32 -1, i32 -1>, [[NBITS:%.*]] -; CHECK-NEXT: [[T1:%.*]] = lshr exact <3 x i32> [[T0]], [[NBITS]] +; CHECK-NEXT: [[T1:%.*]] = lshr <3 x i32> <i32 -1, i32 -1, i32 -1>, [[NBITS]] ; CHECK-NEXT: [[T2:%.*]] = and <3 x i32> [[T1]], [[X:%.*]] ; CHECK-NEXT: [[T3:%.*]] = add <3 x i32> [[NBITS]], <i32 1, i32 1, i32 1> ; CHECK-NEXT: call void @use3xi32(<3 x i32> [[T0]]) @@ -93,7 +93,7 @@ define <3 x i32> @t2_vec_splat(<3 x i32> %x, <3 x i32> %nbits) { define <3 x i32> @t3_vec_nonsplat(<3 x i32> %x, <3 x i32> %nbits) { ; CHECK-LABEL: @t3_vec_nonsplat( ; CHECK-NEXT: [[T0:%.*]] = shl nsw <3 x i32> <i32 -1, i32 -1, i32 -1>, [[NBITS:%.*]] -; CHECK-NEXT: [[T1:%.*]] = lshr exact <3 x i32> [[T0]], [[NBITS]] +; CHECK-NEXT: [[T1:%.*]] = lshr <3 x i32> <i32 -1, i32 -1, i32 -1>, [[NBITS]] ; CHECK-NEXT: [[T2:%.*]] = and <3 x i32> [[T1]], [[X:%.*]] ; CHECK-NEXT: [[T3:%.*]] = add <3 x i32> [[NBITS]], <i32 1, i32 0, i32 2> ; CHECK-NEXT: call void @use3xi32(<3 x i32> [[T0]]) @@ -118,7 +118,7 @@ define <3 x i32> @t3_vec_nonsplat(<3 x i32> %x, <3 x i32> %nbits) { define <3 x i32> @t4_vec_undef(<3 x i32> %x, <3 x i32> %nbits) { ; CHECK-LABEL: @t4_vec_undef( ; CHECK-NEXT: [[T0:%.*]] = shl <3 x i32> <i32 -1, i32 undef, i32 -1>, [[NBITS:%.*]] -; CHECK-NEXT: [[T1:%.*]] = lshr exact <3 x i32> [[T0]], [[NBITS]] +; CHECK-NEXT: [[T1:%.*]] = lshr <3 x i32> <i32 -1, i32 -1, i32 -1>, [[NBITS]] ; CHECK-NEXT: [[T2:%.*]] = and <3 x i32> [[T1]], [[X:%.*]] ; CHECK-NEXT: call void @use3xi32(<3 x i32> [[T0]]) ; CHECK-NEXT: call void @use3xi32(<3 x i32> [[T1]]) @@ -147,7 +147,7 @@ define i32 @t5_commutativity0(i32 %nbits) { ; CHECK-LABEL: @t5_commutativity0( ; CHECK-NEXT: [[X:%.*]] = call i32 @gen32() ; CHECK-NEXT: [[T0:%.*]] = shl nsw i32 -1, [[NBITS:%.*]] -; CHECK-NEXT: [[T1:%.*]] = lshr exact i32 [[T0]], [[NBITS]] +; CHECK-NEXT: [[T1:%.*]] = lshr i32 -1, [[NBITS]] ; CHECK-NEXT: [[T2:%.*]] = and i32 [[X]], [[T1]] ; CHECK-NEXT: call void @use32(i32 [[T0]]) ; CHECK-NEXT: call void @use32(i32 [[T1]]) @@ -169,7 +169,7 @@ define i32 @t5_commutativity0(i32 %nbits) { define i32 @t6_commutativity1(i32 %nbits0, i32 %nbits1) { ; CHECK-LABEL: @t6_commutativity1( ; CHECK-NEXT: [[T0:%.*]] = shl nsw i32 -1, [[NBITS0:%.*]] -; CHECK-NEXT: [[T1:%.*]] = lshr exact i32 [[T0]], [[NBITS0]] +; CHECK-NEXT: [[T1:%.*]] = lshr i32 -1, [[NBITS0]] ; CHECK-NEXT: [[T2:%.*]] = shl nsw i32 -1, [[NBITS1:%.*]] ; CHECK-NEXT: [[T3:%.*]] = lshr i32 [[T0]], [[NBITS1]] ; CHECK-NEXT: [[T4:%.*]] = and i32 [[T3]], [[T1]] @@ -197,7 +197,7 @@ define i32 @t6_commutativity1(i32 %nbits0, i32 %nbits1) { define i32 @t7_commutativity2(i32 %nbits0, i32 %nbits1) { ; CHECK-LABEL: @t7_commutativity2( ; CHECK-NEXT: [[T0:%.*]] = shl nsw i32 -1, [[NBITS0:%.*]] -; CHECK-NEXT: [[T1:%.*]] = lshr exact i32 [[T0]], [[NBITS0]] +; CHECK-NEXT: [[T1:%.*]] = lshr i32 -1, [[NBITS0]] ; CHECK-NEXT: [[T2:%.*]] = shl nsw i32 -1, [[NBITS1:%.*]] ; CHECK-NEXT: [[T3:%.*]] = lshr i32 [[T0]], [[NBITS1]] ; CHECK-NEXT: [[T4:%.*]] = and i32 [[T3]], [[T1]] @@ -228,7 +228,7 @@ define i32 @t7_commutativity2(i32 %nbits0, i32 %nbits1) { define i32 @t8_nuw(i32 %x, i32 %nbits) { ; CHECK-LABEL: @t8_nuw( ; CHECK-NEXT: [[T0:%.*]] = shl nsw i32 -1, [[NBITS:%.*]] -; CHECK-NEXT: [[T1:%.*]] = lshr exact i32 [[T0]], [[NBITS]] +; CHECK-NEXT: [[T1:%.*]] = lshr i32 -1, [[NBITS]] ; CHECK-NEXT: [[T2:%.*]] = and i32 [[T1]], [[X:%.*]] ; CHECK-NEXT: call void @use32(i32 [[T0]]) ; CHECK-NEXT: call void @use32(i32 [[T1]]) @@ -249,7 +249,7 @@ define i32 @t8_nuw(i32 %x, i32 %nbits) { define i32 @t9_nsw(i32 %x, i32 %nbits) { ; CHECK-LABEL: @t9_nsw( ; CHECK-NEXT: [[T0:%.*]] = shl nsw i32 -1, [[NBITS:%.*]] -; CHECK-NEXT: [[T1:%.*]] = lshr exact i32 [[T0]], [[NBITS]] +; CHECK-NEXT: [[T1:%.*]] = lshr i32 -1, [[NBITS]] ; CHECK-NEXT: [[T2:%.*]] = and i32 [[T1]], [[X:%.*]] ; CHECK-NEXT: call void @use32(i32 [[T0]]) ; CHECK-NEXT: call void @use32(i32 [[T1]]) @@ -270,7 +270,7 @@ define i32 @t9_nsw(i32 %x, i32 %nbits) { define i32 @t10_nuw_nsw(i32 %x, i32 %nbits) { ; CHECK-LABEL: @t10_nuw_nsw( ; CHECK-NEXT: [[T0:%.*]] = shl nsw i32 -1, [[NBITS:%.*]] -; CHECK-NEXT: [[T1:%.*]] = lshr exact i32 [[T0]], [[NBITS]] +; CHECK-NEXT: [[T1:%.*]] = lshr i32 -1, [[NBITS]] ; CHECK-NEXT: [[T2:%.*]] = and i32 [[T1]], [[X:%.*]] ; CHECK-NEXT: call void @use32(i32 [[T0]]) ; CHECK-NEXT: call void @use32(i32 [[T1]]) @@ -298,7 +298,7 @@ define i32 @t11_assume_uge(i32 %x, i32 %masknbits, i32 %shiftnbits) { ; CHECK-NEXT: [[CMP:%.*]] = icmp uge i32 [[SHIFTNBITS:%.*]], [[MASKNBITS:%.*]] ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]]) ; CHECK-NEXT: [[T0:%.*]] = shl nsw i32 -1, [[MASKNBITS]] -; CHECK-NEXT: [[T1:%.*]] = lshr exact i32 [[T0]], [[MASKNBITS]] +; CHECK-NEXT: [[T1:%.*]] = lshr i32 -1, [[MASKNBITS]] ; CHECK-NEXT: [[T2:%.*]] = and i32 [[T1]], [[X:%.*]] ; CHECK-NEXT: call void @use32(i32 [[T0]]) ; CHECK-NEXT: call void @use32(i32 [[T1]]) |