aboutsummaryrefslogtreecommitdiff
path: root/llvm/unittests/IR
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/unittests/IR')
-rw-r--r--llvm/unittests/IR/ConstantFPRangeTest.cpp469
-rw-r--r--llvm/unittests/IR/ConstantsTest.cpp14
-rw-r--r--llvm/unittests/IR/InstructionsTest.cpp40
-rw-r--r--llvm/unittests/IR/RuntimeLibcallsTest.cpp12
4 files changed, 500 insertions, 35 deletions
diff --git a/llvm/unittests/IR/ConstantFPRangeTest.cpp b/llvm/unittests/IR/ConstantFPRangeTest.cpp
index 5bc516d..67fee96 100644
--- a/llvm/unittests/IR/ConstantFPRangeTest.cpp
+++ b/llvm/unittests/IR/ConstantFPRangeTest.cpp
@@ -7,6 +7,8 @@
//===----------------------------------------------------------------------===//
#include "llvm/IR/ConstantFPRange.h"
+#include "llvm/ADT/APFloat.h"
+#include "llvm/ADT/FloatingPointMode.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Operator.h"
#include "gtest/gtest.h"
@@ -21,6 +23,7 @@ protected:
static ConstantFPRange Full;
static ConstantFPRange Empty;
static ConstantFPRange Finite;
+ static ConstantFPRange NonNaN;
static ConstantFPRange One;
static ConstantFPRange PosZero;
static ConstantFPRange NegZero;
@@ -43,6 +46,8 @@ ConstantFPRange ConstantFPRangeTest::Empty =
ConstantFPRange::getEmpty(APFloat::IEEEdouble());
ConstantFPRange ConstantFPRangeTest::Finite =
ConstantFPRange::getFinite(APFloat::IEEEdouble());
+ConstantFPRange ConstantFPRangeTest::NonNaN =
+ ConstantFPRange::getNonNaN(APFloat::IEEEdouble());
ConstantFPRange ConstantFPRangeTest::One = ConstantFPRange(APFloat(1.0));
ConstantFPRange ConstantFPRangeTest::PosZero = ConstantFPRange(
APFloat::getZero(APFloat::IEEEdouble(), /*Negative=*/false));
@@ -78,15 +83,21 @@ static void strictNext(APFloat &V) {
V.next(/*nextDown=*/false);
}
+enum class SparseLevel {
+ Dense,
+ SpecialValuesWithAllPowerOfTwos,
+ SpecialValuesOnly,
+};
+
template <typename Fn>
-static void EnumerateConstantFPRangesImpl(Fn TestFn, bool Exhaustive,
+static void EnumerateConstantFPRangesImpl(Fn TestFn, SparseLevel Level,
bool MayBeQNaN, bool MayBeSNaN) {
const fltSemantics &Sem = APFloat::Float8E4M3();
APFloat PosInf = APFloat::getInf(Sem, /*Negative=*/false);
APFloat NegInf = APFloat::getInf(Sem, /*Negative=*/true);
TestFn(ConstantFPRange(PosInf, NegInf, MayBeQNaN, MayBeSNaN));
- if (!Exhaustive) {
+ if (Level != SparseLevel::Dense) {
SmallVector<APFloat, 36> Values;
Values.push_back(APFloat::getInf(Sem, /*Negative=*/true));
Values.push_back(APFloat::getLargest(Sem, /*Negative=*/true));
@@ -94,10 +105,13 @@ static void EnumerateConstantFPRangesImpl(Fn TestFn, bool Exhaustive,
unsigned Exponents = APFloat::semanticsMaxExponent(Sem) -
APFloat::semanticsMinExponent(Sem) + 3;
unsigned MantissaBits = APFloat::semanticsPrecision(Sem) - 1;
- // Add -2^(max exponent), -2^(max exponent-1), ..., -2^(min exponent)
- for (unsigned M = Exponents - 2; M != 0; --M)
- Values.push_back(
- APFloat(Sem, APInt(BitWidth, (M + Exponents) << MantissaBits)));
+ if (Level == SparseLevel::SpecialValuesWithAllPowerOfTwos) {
+ // Add -2^(max exponent), -2^(max exponent-1), ..., -2^(min exponent)
+ for (unsigned M = Exponents - 2; M != 0; --M)
+ Values.push_back(
+ APFloat(Sem, APInt(BitWidth, (M + Exponents) << MantissaBits)));
+ }
+ Values.push_back(APFloat::getSmallestNormalized(Sem, /*Negative=*/true));
Values.push_back(APFloat::getSmallest(Sem, /*Negative=*/true));
Values.push_back(APFloat::getZero(Sem, /*Negative=*/true));
size_t E = Values.size();
@@ -126,26 +140,30 @@ static void EnumerateConstantFPRangesImpl(Fn TestFn, bool Exhaustive,
}
template <typename Fn>
-static void EnumerateConstantFPRanges(Fn TestFn, bool Exhaustive) {
- EnumerateConstantFPRangesImpl(TestFn, Exhaustive, /*MayBeQNaN=*/false,
+static void EnumerateConstantFPRanges(Fn TestFn, SparseLevel Level,
+ bool IgnoreSNaNs = false) {
+ EnumerateConstantFPRangesImpl(TestFn, Level, /*MayBeQNaN=*/false,
/*MayBeSNaN=*/false);
- EnumerateConstantFPRangesImpl(TestFn, Exhaustive, /*MayBeQNaN=*/false,
- /*MayBeSNaN=*/true);
- EnumerateConstantFPRangesImpl(TestFn, Exhaustive, /*MayBeQNaN=*/true,
+ EnumerateConstantFPRangesImpl(TestFn, Level, /*MayBeQNaN=*/true,
/*MayBeSNaN=*/false);
- EnumerateConstantFPRangesImpl(TestFn, Exhaustive, /*MayBeQNaN=*/true,
+ if (IgnoreSNaNs)
+ return;
+ EnumerateConstantFPRangesImpl(TestFn, Level, /*MayBeQNaN=*/false,
+ /*MayBeSNaN=*/true);
+ EnumerateConstantFPRangesImpl(TestFn, Level, /*MayBeQNaN=*/true,
/*MayBeSNaN=*/true);
}
template <typename Fn>
static void EnumerateTwoInterestingConstantFPRanges(Fn TestFn,
- bool Exhaustive) {
+ SparseLevel Level) {
EnumerateConstantFPRanges(
[&](const ConstantFPRange &CR1) {
EnumerateConstantFPRanges(
- [&](const ConstantFPRange &CR2) { TestFn(CR1, CR2); }, Exhaustive);
+ [&](const ConstantFPRange &CR2) { TestFn(CR1, CR2); }, Level,
+ /*IgnoreSNaNs=*/true);
},
- Exhaustive);
+ Level, /*IgnoreSNaNs=*/true);
}
template <typename Fn>
@@ -347,16 +365,25 @@ TEST_F(ConstantFPRangeTest, ExhaustivelyEnumerate) {
constexpr unsigned Expected = 4 * ((NNaNValues + 1) * NNaNValues / 2 + 1);
unsigned Count = 0;
EnumerateConstantFPRanges([&](const ConstantFPRange &) { ++Count; },
- /*Exhaustive=*/true);
+ SparseLevel::Dense);
EXPECT_EQ(Expected, Count);
}
TEST_F(ConstantFPRangeTest, Enumerate) {
- constexpr unsigned NNaNValues = 2 * ((1 << 4) - 2 + 4);
+ constexpr unsigned NNaNValues = 2 * ((1 << 4) - 2 + 5);
constexpr unsigned Expected = 4 * ((NNaNValues + 1) * NNaNValues / 2 + 1);
unsigned Count = 0;
EnumerateConstantFPRanges([&](const ConstantFPRange &) { ++Count; },
- /*Exhaustive=*/false);
+ SparseLevel::SpecialValuesWithAllPowerOfTwos);
+ EXPECT_EQ(Expected, Count);
+}
+
+TEST_F(ConstantFPRangeTest, EnumerateWithSpecialValuesOnly) {
+ constexpr unsigned NNaNValues = 2 * 5;
+ constexpr unsigned Expected = 4 * ((NNaNValues + 1) * NNaNValues / 2 + 1);
+ unsigned Count = 0;
+ EnumerateConstantFPRanges([&](const ConstantFPRange &) { ++Count; },
+ SparseLevel::SpecialValuesOnly);
EXPECT_EQ(Expected, Count);
}
@@ -458,7 +485,7 @@ TEST_F(ConstantFPRangeTest, FPClassify) {
EXPECT_EQ(SignBit, CR.getSignBit()) << CR;
EXPECT_EQ(Mask, CR.classify()) << CR;
},
- /*Exhaustive=*/true);
+ SparseLevel::Dense);
#endif
}
@@ -559,7 +586,7 @@ TEST_F(ConstantFPRangeTest, makeAllowedFCmpRegion) {
<< "Suboptimal result for makeAllowedFCmpRegion(" << Pred << ", "
<< CR << ")";
},
- /*Exhaustive=*/false);
+ SparseLevel::SpecialValuesWithAllPowerOfTwos);
}
#endif
}
@@ -670,7 +697,7 @@ TEST_F(ConstantFPRangeTest, makeSatisfyingFCmpRegion) {
<< ", " << CR << ")";
}
},
- /*Exhaustive=*/false);
+ SparseLevel::SpecialValuesWithAllPowerOfTwos);
}
#endif
}
@@ -803,13 +830,13 @@ TEST_F(ConstantFPRangeTest, negate) {
}
TEST_F(ConstantFPRangeTest, getWithout) {
- EXPECT_EQ(Full.getWithoutNaN(), ConstantFPRange::getNonNaN(Sem));
+ EXPECT_EQ(Full.getWithoutNaN(), NonNaN);
EXPECT_EQ(NaN.getWithoutNaN(), Empty);
EXPECT_EQ(NaN.getWithoutInf(), NaN);
EXPECT_EQ(PosInf.getWithoutInf(), Empty);
EXPECT_EQ(NegInf.getWithoutInf(), Empty);
- EXPECT_EQ(ConstantFPRange::getNonNaN(Sem).getWithoutInf(), Finite);
+ EXPECT_EQ(NonNaN.getWithoutInf(), Finite);
EXPECT_EQ(Zero.getWithoutInf(), Zero);
EXPECT_EQ(ConstantFPRange::getNonNaN(APFloat::getInf(Sem, /*Negative=*/true),
APFloat(3.0))
@@ -818,4 +845,400 @@ TEST_F(ConstantFPRangeTest, getWithout) {
APFloat::getLargest(Sem, /*Negative=*/true), APFloat(3.0)));
}
+TEST_F(ConstantFPRangeTest, cast) {
+ const fltSemantics &F16Sem = APFloat::IEEEhalf();
+ const fltSemantics &BF16Sem = APFloat::BFloat();
+ const fltSemantics &F32Sem = APFloat::IEEEsingle();
+ const fltSemantics &F8NanOnlySem = APFloat::Float8E4M3FN();
+ // normal -> normal (exact)
+ EXPECT_EQ(ConstantFPRange::getNonNaN(APFloat(1.0), APFloat(2.0)).cast(F32Sem),
+ ConstantFPRange::getNonNaN(APFloat(1.0f), APFloat(2.0f)));
+ EXPECT_EQ(
+ ConstantFPRange::getNonNaN(APFloat(-2.0f), APFloat(-1.0f)).cast(Sem),
+ ConstantFPRange::getNonNaN(APFloat(-2.0), APFloat(-1.0)));
+ // normal -> normal (inexact)
+ EXPECT_EQ(
+ ConstantFPRange::getNonNaN(APFloat(3.141592653589793),
+ APFloat(6.283185307179586))
+ .cast(F32Sem),
+ ConstantFPRange::getNonNaN(APFloat(3.14159274f), APFloat(6.28318548f)));
+ // normal -> subnormal
+ EXPECT_EQ(ConstantFPRange::getNonNaN(APFloat(-5e-8), APFloat(5e-8))
+ .cast(F16Sem)
+ .classify(),
+ fcSubnormal | fcZero);
+ // normal -> zero
+ EXPECT_EQ(ConstantFPRange::getNonNaN(
+ APFloat::getSmallestNormalized(Sem, /*Negative=*/true),
+ APFloat::getSmallestNormalized(Sem, /*Negative=*/false))
+ .cast(F32Sem)
+ .classify(),
+ fcZero);
+ // normal -> inf
+ EXPECT_EQ(ConstantFPRange::getNonNaN(APFloat(-65536.0), APFloat(65536.0))
+ .cast(F16Sem),
+ ConstantFPRange::getNonNaN(F16Sem));
+ // nan -> qnan
+ EXPECT_EQ(
+ ConstantFPRange::getNaNOnly(Sem, /*MayBeQNaN=*/true, /*MayBeSNaN=*/false)
+ .cast(F32Sem),
+ ConstantFPRange::getNaNOnly(F32Sem, /*MayBeQNaN=*/true,
+ /*MayBeSNaN=*/false));
+ EXPECT_EQ(
+ ConstantFPRange::getNaNOnly(Sem, /*MayBeQNaN=*/false, /*MayBeSNaN=*/true)
+ .cast(F32Sem),
+ ConstantFPRange::getNaNOnly(F32Sem, /*MayBeQNaN=*/true,
+ /*MayBeSNaN=*/false));
+ EXPECT_EQ(
+ ConstantFPRange::getNaNOnly(Sem, /*MayBeQNaN=*/true, /*MayBeSNaN=*/true)
+ .cast(F32Sem),
+ ConstantFPRange::getNaNOnly(F32Sem, /*MayBeQNaN=*/true,
+ /*MayBeSNaN=*/false));
+ // For BF16 -> F32, signaling bit is still lost.
+ EXPECT_EQ(ConstantFPRange::getNaNOnly(BF16Sem, /*MayBeQNaN=*/true,
+ /*MayBeSNaN=*/true)
+ .cast(F32Sem),
+ ConstantFPRange::getNaNOnly(F32Sem, /*MayBeQNaN=*/true,
+ /*MayBeSNaN=*/false));
+ // inf -> nan only (return full set for now)
+ EXPECT_EQ(ConstantFPRange::getNonNaN(APFloat::getInf(Sem, /*Negative=*/true),
+ APFloat::getInf(Sem, /*Negative=*/false))
+ .cast(F8NanOnlySem),
+ ConstantFPRange::getFull(F8NanOnlySem));
+ // other rounding modes
+ EXPECT_EQ(
+ ConstantFPRange::getNonNaN(APFloat::getSmallest(Sem, /*Negative=*/true),
+ APFloat::getSmallest(Sem, /*Negative=*/false))
+ .cast(F32Sem, APFloat::rmTowardNegative),
+ ConstantFPRange::getNonNaN(
+ APFloat::getSmallest(F32Sem, /*Negative=*/true),
+ APFloat::getZero(F32Sem, /*Negative=*/false)));
+ EXPECT_EQ(
+ ConstantFPRange::getNonNaN(APFloat::getSmallest(Sem, /*Negative=*/true),
+ APFloat::getSmallest(Sem, /*Negative=*/false))
+ .cast(F32Sem, APFloat::rmTowardPositive),
+ ConstantFPRange::getNonNaN(
+ APFloat::getZero(F32Sem, /*Negative=*/true),
+ APFloat::getSmallest(F32Sem, /*Negative=*/false)));
+ EXPECT_EQ(
+ ConstantFPRange::getNonNaN(
+ APFloat::getSmallestNormalized(Sem, /*Negative=*/true),
+ APFloat::getSmallestNormalized(Sem, /*Negative=*/false))
+ .cast(F32Sem, APFloat::rmTowardZero),
+ ConstantFPRange::getNonNaN(APFloat::getZero(F32Sem, /*Negative=*/true),
+ APFloat::getZero(F32Sem, /*Negative=*/false)));
+
+ EnumerateValuesInConstantFPRange(
+ ConstantFPRange::getFull(APFloat::Float8E4M3()),
+ [&](const APFloat &V) {
+ bool LosesInfo = false;
+
+ APFloat DoubleV = V;
+ DoubleV.convert(Sem, APFloat::rmNearestTiesToEven, &LosesInfo);
+ ConstantFPRange DoubleCR = ConstantFPRange(V).cast(Sem);
+ EXPECT_TRUE(DoubleCR.contains(DoubleV))
+ << "Casting " << V << " to double failed. " << DoubleCR
+ << " doesn't contain " << DoubleV;
+
+ auto &FP4Sem = APFloat::Float4E2M1FN();
+ APFloat FP4V = V;
+ FP4V.convert(FP4Sem, APFloat::rmNearestTiesToEven, &LosesInfo);
+ ConstantFPRange FP4CR = ConstantFPRange(V).cast(FP4Sem);
+ EXPECT_TRUE(FP4CR.contains(FP4V))
+ << "Casting " << V << " to FP4E2M1FN failed. " << FP4CR
+ << " doesn't contain " << FP4V;
+ },
+ /*IgnoreNaNPayload=*/true);
+}
+
+TEST_F(ConstantFPRangeTest, add) {
+ EXPECT_EQ(Full.add(Full), NonNaN.unionWith(QNaN));
+ EXPECT_EQ(Full.add(Empty), Empty);
+ EXPECT_EQ(Empty.add(Full), Empty);
+ EXPECT_EQ(Empty.add(Empty), Empty);
+ EXPECT_EQ(One.add(One), ConstantFPRange(APFloat(2.0)));
+ EXPECT_EQ(Some.add(Some),
+ ConstantFPRange::getNonNaN(APFloat(-6.0), APFloat(6.0)));
+ EXPECT_EQ(SomePos.add(SomeNeg),
+ ConstantFPRange::getNonNaN(APFloat(-3.0), APFloat(3.0)));
+ EXPECT_EQ(PosInf.add(PosInf), PosInf);
+ EXPECT_EQ(NegInf.add(NegInf), NegInf);
+ EXPECT_EQ(PosInf.add(Finite.unionWith(PosInf)), PosInf);
+ EXPECT_EQ(NegInf.add(Finite.unionWith(NegInf)), NegInf);
+ EXPECT_EQ(PosInf.add(Finite.unionWith(NegInf)), PosInf.unionWith(QNaN));
+ EXPECT_EQ(NegInf.add(Finite.unionWith(PosInf)), NegInf.unionWith(QNaN));
+ EXPECT_EQ(PosInf.add(NegInf), QNaN);
+ EXPECT_EQ(NegInf.add(PosInf), QNaN);
+ EXPECT_EQ(PosZero.add(NegZero), PosZero);
+ EXPECT_EQ(PosZero.add(Zero), PosZero);
+ EXPECT_EQ(NegZero.add(NegZero), NegZero);
+ EXPECT_EQ(NegZero.add(Zero), Zero);
+ EXPECT_EQ(NaN.add(NaN), QNaN);
+ EXPECT_EQ(NaN.add(Finite), QNaN);
+ EXPECT_EQ(NonNaN.unionWith(NaN).add(NonNaN), NonNaN.unionWith(QNaN));
+ EXPECT_EQ(PosInf.unionWith(QNaN).add(PosInf), PosInf.unionWith(QNaN));
+ EXPECT_EQ(PosInf.unionWith(NaN).add(ConstantFPRange(APFloat(24.0))),
+ PosInf.unionWith(QNaN));
+
+#if defined(EXPENSIVE_CHECKS)
+ EnumerateTwoInterestingConstantFPRanges(
+ [](const ConstantFPRange &LHS, const ConstantFPRange &RHS) {
+ ConstantFPRange Res = LHS.add(RHS);
+ ConstantFPRange Expected =
+ ConstantFPRange::getEmpty(LHS.getSemantics());
+ EnumerateValuesInConstantFPRange(
+ LHS,
+ [&](const APFloat &LHSC) {
+ EnumerateValuesInConstantFPRange(
+ RHS,
+ [&](const APFloat &RHSC) {
+ APFloat Sum = LHSC + RHSC;
+ EXPECT_TRUE(Res.contains(Sum))
+ << "Wrong result for " << LHS << " + " << RHS
+ << ". The result " << Res << " should contain " << Sum;
+ if (!Expected.contains(Sum))
+ Expected = Expected.unionWith(ConstantFPRange(Sum));
+ },
+ /*IgnoreNaNPayload=*/true);
+ },
+ /*IgnoreNaNPayload=*/true);
+ EXPECT_EQ(Res, Expected)
+ << "Suboptimal result for " << LHS << " + " << RHS << ". Expected "
+ << Expected << ", but got " << Res;
+ },
+ SparseLevel::SpecialValuesOnly);
+#endif
+}
+
+TEST_F(ConstantFPRangeTest, sub) {
+ EXPECT_EQ(Full.sub(Full), NonNaN.unionWith(QNaN));
+ EXPECT_EQ(Full.sub(Empty), Empty);
+ EXPECT_EQ(Empty.sub(Full), Empty);
+ EXPECT_EQ(Empty.sub(Empty), Empty);
+ EXPECT_EQ(One.sub(One), ConstantFPRange(APFloat(0.0)));
+ EXPECT_EQ(Some.sub(Some),
+ ConstantFPRange::getNonNaN(APFloat(-6.0), APFloat(6.0)));
+ EXPECT_EQ(SomePos.sub(SomeNeg),
+ ConstantFPRange::getNonNaN(APFloat(0.0), APFloat(6.0)));
+ EXPECT_EQ(PosInf.sub(NegInf), PosInf);
+ EXPECT_EQ(NegInf.sub(PosInf), NegInf);
+ EXPECT_EQ(PosInf.sub(Finite.unionWith(NegInf)), PosInf);
+ EXPECT_EQ(NegInf.sub(Finite.unionWith(PosInf)), NegInf);
+ EXPECT_EQ(PosInf.sub(Finite.unionWith(PosInf)), PosInf.unionWith(QNaN));
+ EXPECT_EQ(NegInf.sub(Finite.unionWith(NegInf)), NegInf.unionWith(QNaN));
+ EXPECT_EQ(PosInf.sub(PosInf), QNaN);
+ EXPECT_EQ(NegInf.sub(NegInf), QNaN);
+ EXPECT_EQ(PosZero.sub(NegZero), PosZero);
+ EXPECT_EQ(PosZero.sub(Zero), PosZero);
+ EXPECT_EQ(NegZero.sub(NegZero), PosZero);
+ EXPECT_EQ(NegZero.sub(PosZero), NegZero);
+ EXPECT_EQ(NegZero.sub(Zero), Zero);
+ EXPECT_EQ(NaN.sub(NaN), QNaN);
+ EXPECT_EQ(NaN.add(Finite), QNaN);
+
+#if defined(EXPENSIVE_CHECKS)
+ EnumerateTwoInterestingConstantFPRanges(
+ [](const ConstantFPRange &LHS, const ConstantFPRange &RHS) {
+ ConstantFPRange Res = LHS.sub(RHS);
+ ConstantFPRange Expected =
+ ConstantFPRange::getEmpty(LHS.getSemantics());
+ EnumerateValuesInConstantFPRange(
+ LHS,
+ [&](const APFloat &LHSC) {
+ EnumerateValuesInConstantFPRange(
+ RHS,
+ [&](const APFloat &RHSC) {
+ APFloat Diff = LHSC - RHSC;
+ EXPECT_TRUE(Res.contains(Diff))
+ << "Wrong result for " << LHS << " - " << RHS
+ << ". The result " << Res << " should contain " << Diff;
+ if (!Expected.contains(Diff))
+ Expected = Expected.unionWith(ConstantFPRange(Diff));
+ },
+ /*IgnoreNaNPayload=*/true);
+ },
+ /*IgnoreNaNPayload=*/true);
+ EXPECT_EQ(Res, Expected)
+ << "Suboptimal result for " << LHS << " - " << RHS << ". Expected "
+ << Expected << ", but got " << Res;
+ },
+ SparseLevel::SpecialValuesOnly);
+#endif
+}
+
+TEST_F(ConstantFPRangeTest, mul) {
+ EXPECT_EQ(Full.mul(Full), NonNaN.unionWith(QNaN));
+ EXPECT_EQ(Full.mul(Empty), Empty);
+ EXPECT_EQ(Empty.mul(Full), Empty);
+ EXPECT_EQ(Empty.mul(Empty), Empty);
+ EXPECT_EQ(One.mul(One), ConstantFPRange(APFloat(1.0)));
+ EXPECT_EQ(Some.mul(Some),
+ ConstantFPRange::getNonNaN(APFloat(-9.0), APFloat(9.0)));
+ EXPECT_EQ(SomePos.mul(SomeNeg),
+ ConstantFPRange::getNonNaN(APFloat(-9.0), APFloat(-0.0)));
+ EXPECT_EQ(PosInf.mul(PosInf), PosInf);
+ EXPECT_EQ(NegInf.mul(NegInf), PosInf);
+ EXPECT_EQ(PosInf.mul(Finite), NonNaN.unionWith(QNaN));
+ EXPECT_EQ(NegInf.mul(Finite), NonNaN.unionWith(QNaN));
+ EXPECT_EQ(PosInf.mul(NegInf), NegInf);
+ EXPECT_EQ(NegInf.mul(PosInf), NegInf);
+ EXPECT_EQ(PosZero.mul(NegZero), NegZero);
+ EXPECT_EQ(PosZero.mul(Zero), Zero);
+ EXPECT_EQ(NegZero.mul(NegZero), PosZero);
+ EXPECT_EQ(NegZero.mul(Zero), Zero);
+ EXPECT_EQ(NaN.mul(NaN), QNaN);
+ EXPECT_EQ(NaN.mul(Finite), QNaN);
+
+#if defined(EXPENSIVE_CHECKS)
+ EnumerateTwoInterestingConstantFPRanges(
+ [](const ConstantFPRange &LHS, const ConstantFPRange &RHS) {
+ ConstantFPRange Res = LHS.mul(RHS);
+ ConstantFPRange Expected =
+ ConstantFPRange::getEmpty(LHS.getSemantics());
+ EnumerateValuesInConstantFPRange(
+ LHS,
+ [&](const APFloat &LHSC) {
+ EnumerateValuesInConstantFPRange(
+ RHS,
+ [&](const APFloat &RHSC) {
+ APFloat Prod = LHSC * RHSC;
+ EXPECT_TRUE(Res.contains(Prod))
+ << "Wrong result for " << LHS << " * " << RHS
+ << ". The result " << Res << " should contain " << Prod;
+ if (!Expected.contains(Prod))
+ Expected = Expected.unionWith(ConstantFPRange(Prod));
+ },
+ /*IgnoreNaNPayload=*/true);
+ },
+ /*IgnoreNaNPayload=*/true);
+ EXPECT_EQ(Res, Expected)
+ << "Suboptimal result for " << LHS << " * " << RHS << ". Expected "
+ << Expected << ", but got " << Res;
+ },
+ SparseLevel::SpecialValuesOnly);
+#endif
+}
+
+TEST_F(ConstantFPRangeTest, div) {
+ EXPECT_EQ(Full.div(Full), NonNaN.unionWith(QNaN));
+ EXPECT_EQ(Full.div(Empty), Empty);
+ EXPECT_EQ(Empty.div(Full), Empty);
+ EXPECT_EQ(Empty.div(Empty), Empty);
+ EXPECT_EQ(One.div(One), ConstantFPRange(APFloat(1.0)));
+ EXPECT_EQ(Some.div(Some), NonNaN.unionWith(QNaN));
+ EXPECT_EQ(SomePos.div(SomeNeg),
+ ConstantFPRange(APFloat::getInf(Sem, /*Negative=*/true),
+ APFloat::getZero(Sem, /*Negative=*/true),
+ /*MayBeQNaN=*/true, /*MayBeSNaN=*/false));
+ EXPECT_EQ(PosInf.div(PosInf), QNaN);
+ EXPECT_EQ(NegInf.div(NegInf), QNaN);
+ EXPECT_EQ(PosInf.div(Finite), NonNaN);
+ EXPECT_EQ(NegInf.div(Finite), NonNaN);
+ EXPECT_EQ(PosInf.div(NegInf), QNaN);
+ EXPECT_EQ(NegInf.div(PosInf), QNaN);
+ EXPECT_EQ(Zero.div(Zero), QNaN);
+ EXPECT_EQ(SomePos.div(PosInf), PosZero);
+ EXPECT_EQ(SomeNeg.div(PosInf), NegZero);
+ EXPECT_EQ(PosInf.div(SomePos), PosInf);
+ EXPECT_EQ(NegInf.div(SomeNeg), PosInf);
+ EXPECT_EQ(NegInf.div(Some), NonNaN);
+ EXPECT_EQ(NaN.div(NaN), QNaN);
+ EXPECT_EQ(NaN.div(Finite), QNaN);
+
+#if defined(EXPENSIVE_CHECKS)
+ EnumerateTwoInterestingConstantFPRanges(
+ [](const ConstantFPRange &LHS, const ConstantFPRange &RHS) {
+ ConstantFPRange Res = LHS.div(RHS);
+ ConstantFPRange Expected =
+ ConstantFPRange::getEmpty(LHS.getSemantics());
+ EnumerateValuesInConstantFPRange(
+ LHS,
+ [&](const APFloat &LHSC) {
+ EnumerateValuesInConstantFPRange(
+ RHS,
+ [&](const APFloat &RHSC) {
+ APFloat Val = LHSC / RHSC;
+ EXPECT_TRUE(Res.contains(Val))
+ << "Wrong result for " << LHS << " / " << RHS
+ << ". The result " << Res << " should contain " << Val;
+ if (!Expected.contains(Val))
+ Expected = Expected.unionWith(ConstantFPRange(Val));
+ },
+ /*IgnoreNaNPayload=*/true);
+ },
+ /*IgnoreNaNPayload=*/true);
+ EXPECT_EQ(Res, Expected)
+ << "Suboptimal result for " << LHS << " / " << RHS << ". Expected "
+ << Expected << ", but got " << Res;
+ },
+ SparseLevel::SpecialValuesOnly);
+#endif
+}
+
+TEST_F(ConstantFPRangeTest, flushDenormals) {
+ const fltSemantics &FP8Sem = APFloat::Float8E4M3();
+ APFloat NormalVal = APFloat::getSmallestNormalized(FP8Sem);
+ APFloat Subnormal1 = NormalVal;
+ Subnormal1.next(/*nextDown=*/true);
+ APFloat Subnormal2 = APFloat::getSmallest(FP8Sem);
+ APFloat ZeroVal = APFloat::getZero(FP8Sem);
+ APFloat EdgeValues[8] = {-NormalVal, -Subnormal1, -Subnormal2, -ZeroVal,
+ ZeroVal, Subnormal2, Subnormal1, NormalVal};
+ constexpr DenormalMode::DenormalModeKind Modes[4] = {
+ DenormalMode::IEEE, DenormalMode::PreserveSign,
+ DenormalMode::PositiveZero, DenormalMode::Dynamic};
+ for (uint32_t I = 0; I != 8; ++I) {
+ for (uint32_t J = I; J != 8; ++J) {
+ ConstantFPRange OriginCR =
+ ConstantFPRange::getNonNaN(EdgeValues[I], EdgeValues[J]);
+ for (auto Mode : Modes) {
+ StringRef ModeName = denormalModeKindName(Mode);
+ ConstantFPRange FlushedCR = OriginCR;
+ FlushedCR.flushDenormals(Mode);
+
+ ConstantFPRange Expected = ConstantFPRange::getEmpty(FP8Sem);
+ auto CheckFlushedV = [&](const APFloat &V, const APFloat &FlushedV) {
+ EXPECT_TRUE(FlushedCR.contains(FlushedV))
+ << "Wrong result for flushDenormal(" << V << ", " << ModeName
+ << "). The result " << FlushedCR << " should contain "
+ << FlushedV;
+ if (!Expected.contains(FlushedV))
+ Expected = Expected.unionWith(ConstantFPRange(FlushedV));
+ };
+ EnumerateValuesInConstantFPRange(
+ OriginCR,
+ [&](const APFloat &V) {
+ if (V.isDenormal()) {
+ switch (Mode) {
+ case DenormalMode::IEEE:
+ break;
+ case DenormalMode::PreserveSign:
+ CheckFlushedV(V, APFloat::getZero(FP8Sem, V.isNegative()));
+ break;
+ case DenormalMode::PositiveZero:
+ CheckFlushedV(V, APFloat::getZero(FP8Sem));
+ break;
+ case DenormalMode::Dynamic:
+ // PreserveSign
+ CheckFlushedV(V, APFloat::getZero(FP8Sem, V.isNegative()));
+ // PositiveZero
+ CheckFlushedV(V, APFloat::getZero(FP8Sem));
+ break;
+ default:
+ llvm_unreachable("unknown denormal mode");
+ }
+ }
+ // It is not mandated that flushing to zero occurs.
+ CheckFlushedV(V, V);
+ },
+ /*IgnoreNaNPayload=*/true);
+ EXPECT_EQ(FlushedCR, Expected)
+ << "Suboptimal result for flushDenormal(" << OriginCR << ", "
+ << ModeName << "). Expected " << Expected << ", but got "
+ << FlushedCR;
+ }
+ }
+ }
+}
+
} // anonymous namespace
diff --git a/llvm/unittests/IR/ConstantsTest.cpp b/llvm/unittests/IR/ConstantsTest.cpp
index 54c7ddd..6376165 100644
--- a/llvm/unittests/IR/ConstantsTest.cpp
+++ b/llvm/unittests/IR/ConstantsTest.cpp
@@ -564,13 +564,17 @@ TEST(ConstantsTest, FoldGlobalVariablePtr) {
Global->setAlignment(Align(4));
- ConstantInt *TheConstant(ConstantInt::get(IntType, 2));
+ ConstantInt *TheConstant = ConstantInt::get(IntType, 2);
- Constant *TheConstantExpr(ConstantExpr::getPtrToInt(Global.get(), IntType));
+ Constant *PtrToInt = ConstantExpr::getPtrToInt(Global.get(), IntType);
+ ASSERT_TRUE(
+ ConstantFoldBinaryInstruction(Instruction::And, PtrToInt, TheConstant)
+ ->isNullValue());
- ASSERT_TRUE(ConstantFoldBinaryInstruction(Instruction::And, TheConstantExpr,
- TheConstant)
- ->isNullValue());
+ Constant *PtrToAddr = ConstantExpr::getPtrToAddr(Global.get(), IntType);
+ ASSERT_TRUE(
+ ConstantFoldBinaryInstruction(Instruction::And, PtrToAddr, TheConstant)
+ ->isNullValue());
}
// Check that containsUndefOrPoisonElement and containsPoisonElement is working
diff --git a/llvm/unittests/IR/InstructionsTest.cpp b/llvm/unittests/IR/InstructionsTest.cpp
index fe9e7e8..f4693bf 100644
--- a/llvm/unittests/IR/InstructionsTest.cpp
+++ b/llvm/unittests/IR/InstructionsTest.cpp
@@ -606,12 +606,14 @@ TEST(InstructionTest, ConstrainedTrans) {
TEST(InstructionsTest, isEliminableCastPair) {
LLVMContext C;
- DataLayout DL1("p1:32:32");
+ DataLayout DL1("p1:32:32-p2:64:64:64:32");
Type *Int16Ty = Type::getInt16Ty(C);
+ Type *Int32Ty = Type::getInt32Ty(C);
Type *Int64Ty = Type::getInt64Ty(C);
Type *PtrTy64 = PointerType::get(C, 0);
Type *PtrTy32 = PointerType::get(C, 1);
+ Type *PtrTy64_32 = PointerType::get(C, 2);
// Source and destination pointers have same size -> bitcast.
EXPECT_EQ(CastInst::isEliminableCastPair(CastInst::PtrToInt,
@@ -637,6 +639,42 @@ TEST(InstructionsTest, isEliminableCastPair) {
Int64Ty, &DL1),
0U);
+ // Destination larger than source. Pointer type same as destination.
+ EXPECT_EQ(CastInst::isEliminableCastPair(CastInst::IntToPtr,
+ CastInst::PtrToInt, Int16Ty, PtrTy64,
+ Int64Ty, &DL1),
+ CastInst::ZExt);
+
+ // Destination larger than source. Pointer type different from destination.
+ EXPECT_EQ(CastInst::isEliminableCastPair(CastInst::IntToPtr,
+ CastInst::PtrToInt, Int16Ty, PtrTy32,
+ Int64Ty, &DL1),
+ CastInst::ZExt);
+
+ // Destination smaller than source. Pointer type same as source.
+ EXPECT_EQ(CastInst::isEliminableCastPair(CastInst::IntToPtr,
+ CastInst::PtrToInt, Int64Ty, PtrTy64,
+ Int16Ty, &DL1),
+ CastInst::Trunc);
+
+ // Destination smaller than source. Pointer type different from source.
+ EXPECT_EQ(CastInst::isEliminableCastPair(CastInst::IntToPtr,
+ CastInst::PtrToInt, Int64Ty, PtrTy32,
+ Int16Ty, &DL1),
+ CastInst::Trunc);
+
+ // ptrtoaddr with address size != pointer size. Truncating case.
+ EXPECT_EQ(CastInst::isEliminableCastPair(CastInst::IntToPtr,
+ CastInst::PtrToAddr, Int64Ty,
+ PtrTy64_32, Int32Ty, &DL1),
+ CastInst::Trunc);
+
+ // ptrtoaddr with address size != pointer size. Non-truncating case.
+ EXPECT_EQ(CastInst::isEliminableCastPair(CastInst::IntToPtr,
+ CastInst::PtrToAddr, Int32Ty,
+ PtrTy64_32, Int32Ty, &DL1),
+ CastInst::BitCast);
+
// Test that we don't eliminate bitcasts between different address spaces,
// or if we don't have available pointer size information.
DataLayout DL2("e-p:32:32:32-p1:16:16:16-p2:64:64:64-i1:8:8-i8:8:8-i16:16:16"
diff --git a/llvm/unittests/IR/RuntimeLibcallsTest.cpp b/llvm/unittests/IR/RuntimeLibcallsTest.cpp
index 26cb7e3..8925d2b 100644
--- a/llvm/unittests/IR/RuntimeLibcallsTest.cpp
+++ b/llvm/unittests/IR/RuntimeLibcallsTest.cpp
@@ -44,9 +44,9 @@ TEST(RuntimeLibcallsTest, LibcallImplByName) {
RTLIB::RuntimeLibcallsInfo::lookupLibcallImplName("sqrtl");
ASSERT_EQ(size(SquirtleSquad), 3);
auto I = SquirtleSquad.begin();
- EXPECT_EQ(*I++, RTLIB::impl_sqrt_f128);
- EXPECT_EQ(*I++, RTLIB::impl_sqrt_f80);
- EXPECT_EQ(*I++, RTLIB::impl_sqrt_ppcf128);
+ EXPECT_EQ(*I++, RTLIB::impl_sqrtl_f128);
+ EXPECT_EQ(*I++, RTLIB::impl_sqrtl_f80);
+ EXPECT_EQ(*I++, RTLIB::impl_sqrtl_ppcf128);
}
// Last libcall
@@ -54,9 +54,9 @@ TEST(RuntimeLibcallsTest, LibcallImplByName) {
auto Truncs = RTLIB::RuntimeLibcallsInfo::lookupLibcallImplName("truncl");
ASSERT_EQ(size(Truncs), 3);
auto I = Truncs.begin();
- EXPECT_EQ(*I++, RTLIB::impl_trunc_f128);
- EXPECT_EQ(*I++, RTLIB::impl_trunc_f80);
- EXPECT_EQ(*I++, RTLIB::impl_trunc_ppcf128);
+ EXPECT_EQ(*I++, RTLIB::impl_truncl_f128);
+ EXPECT_EQ(*I++, RTLIB::impl_truncl_f80);
+ EXPECT_EQ(*I++, RTLIB::impl_truncl_ppcf128);
}
}