aboutsummaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
authorAldy Hernandez <aldyh@redhat.com>2022-07-12 10:16:03 +0200
committerAldy Hernandez <aldyh@redhat.com>2022-07-12 16:40:51 +0200
commitcab411a2b4b4f6a6b619d0650fade85288a31f9e (patch)
tree20ef2d8a96f990a4565e1153b78bb7f4179eb8e0 /gcc
parent32a753506be1d5265b657b4b80aeeae57871bb4c (diff)
downloadgcc-cab411a2b4b4f6a6b619d0650fade85288a31f9e.zip
gcc-cab411a2b4b4f6a6b619d0650fade85288a31f9e.tar.gz
gcc-cab411a2b4b4f6a6b619d0650fade85288a31f9e.tar.bz2
Set nonzero bits from bitwise and operator in range-ops.
Now that nonzero bits are first class citizens in the range, we can keep better track of them in range-ops, especially the bitwise and operator. This patch sets the nonzero mask for the trivial case. In doing so, I've removed some old dead code that was an attempt to keep better track of masks. I'm sure there are tons of optimizations throughout range-ops that could be implemented, especially the op1_range methods, but those always make my head hurt. I'll leave them to the smarter hackers out there. I've removed the restriction that nonzero bits can't be queried from legacy. This was causing special casing all over the place, and it's not like we can generate incorrect code. We just silently drop nonzero bits to -1 in some of the legacy code. The end result is that VRP1, and other users of legacy, may not benefit from these improvements. Tested and benchmarked on x86-64 Linux. gcc/ChangeLog: * range-op.cc (unsigned_singleton_p): Remove. (operator_bitwise_and::remove_impossible_ranges): Remove. (operator_bitwise_and::fold_range): Set nonzero bits. * * value-range.cc (irange::get_nonzero_bits): Remove legacy_mode_p assert. (irange::dump_bitmasks): Remove legacy_mode_p check.
Diffstat (limited to 'gcc')
-rw-r--r--gcc/range-op.cc70
-rw-r--r--gcc/value-range.cc6
2 files changed, 4 insertions, 72 deletions
diff --git a/gcc/range-op.cc b/gcc/range-op.cc
index 5150c60..0e16408 100644
--- a/gcc/range-op.cc
+++ b/gcc/range-op.cc
@@ -2604,72 +2604,8 @@ private:
void simple_op1_range_solver (irange &r, tree type,
const irange &lhs,
const irange &op2) const;
- void remove_impossible_ranges (irange &r, const irange &rh) const;
} op_bitwise_and;
-static bool
-unsigned_singleton_p (const irange &op)
-{
- tree mask;
- if (op.singleton_p (&mask))
- {
- wide_int x = wi::to_wide (mask);
- return wi::ge_p (x, 0, TYPE_SIGN (op.type ()));
- }
- return false;
-}
-
-// Remove any ranges from R that are known to be impossible when an
-// range is ANDed with MASK.
-
-void
-operator_bitwise_and::remove_impossible_ranges (irange &r,
- const irange &rmask) const
-{
- if (r.undefined_p () || !unsigned_singleton_p (rmask))
- return;
-
- wide_int mask = rmask.lower_bound ();
- tree type = r.type ();
- int prec = TYPE_PRECISION (type);
- int leading_zeros = wi::clz (mask);
- int_range_max impossible_ranges;
-
- /* We know that starting at the most significant bit, any 0 in the
- mask means the resulting range cannot contain a 1 in that same
- position. This means the following ranges are impossible:
-
- x & 0b1001 1010
- IMPOSSIBLE RANGES
- 01xx xxxx [0100 0000, 0111 1111]
- 001x xxxx [0010 0000, 0011 1111]
- 0000 01xx [0000 0100, 0000 0111]
- 0000 0001 [0000 0001, 0000 0001]
- */
- wide_int one = wi::one (prec);
- for (int i = 0; i < prec - leading_zeros - 1; ++i)
- if (wi::bit_and (mask, wi::lshift (one, wi::uhwi (i, prec))) == 0)
- {
- tree lb = fold_build2 (LSHIFT_EXPR, type,
- build_one_cst (type),
- build_int_cst (type, i));
- tree ub_left = fold_build1 (BIT_NOT_EXPR, type,
- fold_build2 (LSHIFT_EXPR, type,
- build_minus_one_cst (type),
- build_int_cst (type, i)));
- tree ub_right = fold_build2 (LSHIFT_EXPR, type,
- build_one_cst (type),
- build_int_cst (type, i));
- tree ub = fold_build2 (BIT_IOR_EXPR, type, ub_left, ub_right);
- impossible_ranges.union_ (int_range<1> (lb, ub));
- }
- if (!impossible_ranges.undefined_p ())
- {
- impossible_ranges.invert ();
- r.intersect (impossible_ranges);
- }
-}
-
bool
operator_bitwise_and::fold_range (irange &r, tree type,
const irange &lh,
@@ -2678,9 +2614,9 @@ operator_bitwise_and::fold_range (irange &r, tree type,
{
if (range_operator::fold_range (r, type, lh, rh))
{
- // FIXME: This is temporarily disabled because, though it
- // generates better ranges, it's noticeably slower for evrp.
- // remove_impossible_ranges (r, rh);
+ if (!lh.undefined_p () && !rh.undefined_p ())
+ r.set_nonzero_bits (wi::bit_and (lh.get_nonzero_bits (),
+ rh.get_nonzero_bits ()));
return true;
}
return false;
diff --git a/gcc/value-range.cc b/gcc/value-range.cc
index a02fab4..2aa973b 100644
--- a/gcc/value-range.cc
+++ b/gcc/value-range.cc
@@ -2388,10 +2388,6 @@ wide_int
irange::get_nonzero_bits () const
{
gcc_checking_assert (!undefined_p ());
- // Nonzero bits are unsupported in legacy mode. The mask may be set
- // as a consequence of propagation or reading global ranges, but no
- // one from legacy land should be querying this.
- gcc_checking_assert (!legacy_mode_p ());
// Calculate the nonzero bits inherent in the range.
wide_int min = lower_bound ();
@@ -2509,7 +2505,7 @@ irange::dump (FILE *file) const
void
irange::dump_bitmasks (FILE *file) const
{
- if (m_nonzero_mask && !legacy_mode_p ())
+ if (m_nonzero_mask)
{
wide_int nz = get_nonzero_bits ();
if (nz != -1)