aboutsummaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
Diffstat (limited to 'gcc')
-rw-r--r--gcc/ipa-modref-tree.c535
-rw-r--r--gcc/ipa-modref-tree.h542
2 files changed, 563 insertions, 514 deletions
diff --git a/gcc/ipa-modref-tree.c b/gcc/ipa-modref-tree.c
index d0ee487..e363c50 100644
--- a/gcc/ipa-modref-tree.c
+++ b/gcc/ipa-modref-tree.c
@@ -28,6 +28,541 @@ along with GCC; see the file COPYING3. If not see
#if CHECKING_P
+/* Return true if both accesses are the same. */
+bool
+modref_access_node::operator == (modref_access_node &a) const
+{
+ if (parm_index != a.parm_index)
+ return false;
+ if (parm_index != MODREF_UNKNOWN_PARM)
+ {
+ if (parm_offset_known != a.parm_offset_known)
+ return false;
+ if (parm_offset_known
+ && !known_eq (parm_offset, a.parm_offset))
+ return false;
+ }
+ if (range_info_useful_p () != a.range_info_useful_p ())
+ return false;
+ if (range_info_useful_p ()
+ && (!known_eq (a.offset, offset)
+ || !known_eq (a.size, size)
+ || !known_eq (a.max_size, max_size)))
+ return false;
+ return true;
+}
+
+/* Return true A is a subaccess. */
+bool
+modref_access_node::contains (const modref_access_node &a) const
+{
+ poly_int64 aoffset_adj = 0;
+ if (parm_index != MODREF_UNKNOWN_PARM)
+ {
+ if (parm_index != a.parm_index)
+ return false;
+ if (parm_offset_known)
+ {
+ if (!a.parm_offset_known)
+ return false;
+ /* Accesses are never below parm_offset, so look
+ for smaller offset.
+ If access ranges are known still allow merging
+ when bit offsets comparsion passes. */
+ if (!known_le (parm_offset, a.parm_offset)
+ && !range_info_useful_p ())
+ return false;
+ /* We allow negative aoffset_adj here in case
+ there is an useful range. This is because adding
+ a.offset may result in non-ngative offset again.
+ Ubsan fails on val << LOG_BITS_PER_UNIT where val
+ is negative. */
+ aoffset_adj = (a.parm_offset - parm_offset)
+ * BITS_PER_UNIT;
+ }
+ }
+ if (range_info_useful_p ())
+ {
+ if (!a.range_info_useful_p ())
+ return false;
+ /* Sizes of stores are used to check that object is big enough
+ to fit the store, so smaller or unknown sotre is more general
+ than large store. */
+ if (known_size_p (size)
+ && (!known_size_p (a.size)
+ || !known_le (size, a.size)))
+ return false;
+ if (known_size_p (max_size))
+ return known_subrange_p (a.offset + aoffset_adj,
+ a.max_size, offset, max_size);
+ else
+ return known_le (offset, a.offset + aoffset_adj);
+ }
+ return true;
+}
+
+/* Update access range to new parameters.
+ If RECORD_ADJUSTMENTS is true, record number of changes in the access
+ and if threshold is exceeded start dropping precision
+ so only constantly many updates are possible. This makes dataflow
+ to converge. */
+void
+modref_access_node::update (poly_int64 parm_offset1,
+ poly_int64 offset1, poly_int64 size1,
+ poly_int64 max_size1, bool record_adjustments)
+{
+ if (known_eq (parm_offset, parm_offset1)
+ && known_eq (offset, offset1)
+ && known_eq (size, size1)
+ && known_eq (max_size, max_size1))
+ return;
+ if (!record_adjustments
+ || (++adjustments) < param_modref_max_adjustments)
+ {
+ parm_offset = parm_offset1;
+ offset = offset1;
+ size = size1;
+ max_size = max_size1;
+ }
+ else
+ {
+ if (dump_file)
+ fprintf (dump_file,
+ "--param param=modref-max-adjustments limit reached:");
+ if (!known_eq (parm_offset, parm_offset1))
+ {
+ if (dump_file)
+ fprintf (dump_file, " parm_offset cleared");
+ parm_offset_known = false;
+ }
+ if (!known_eq (size, size1))
+ {
+ size = -1;
+ if (dump_file)
+ fprintf (dump_file, " size cleared");
+ }
+ if (!known_eq (max_size, max_size1))
+ {
+ max_size = -1;
+ if (dump_file)
+ fprintf (dump_file, " max_size cleared");
+ }
+ if (!known_eq (offset, offset1))
+ {
+ offset = 0;
+ if (dump_file)
+ fprintf (dump_file, " offset cleared");
+ }
+ if (dump_file)
+ fprintf (dump_file, "\n");
+ }
+}
+
+/* Merge in access A if it is possible to do without losing
+ precision. Return true if successful.
+ If RECORD_ADJUSTMENTs is true, remember how many interval
+ was prolonged and punt when there are too many. */
+bool
+modref_access_node::merge (const modref_access_node &a,
+ bool record_adjustments)
+{
+ poly_int64 offset1 = 0;
+ poly_int64 aoffset1 = 0;
+ poly_int64 new_parm_offset = 0;
+
+ /* We assume that containment was tested earlier. */
+ gcc_checking_assert (!contains (a) && !a.contains (*this));
+ if (parm_index != MODREF_UNKNOWN_PARM)
+ {
+ if (parm_index != a.parm_index)
+ return false;
+ if (parm_offset_known)
+ {
+ if (!a.parm_offset_known)
+ return false;
+ if (!combined_offsets (a, &new_parm_offset, &offset1, &aoffset1))
+ return false;
+ }
+ }
+ /* See if we can merge ranges. */
+ if (range_info_useful_p ())
+ {
+ /* In this case we have containment that should be
+ handled earlier. */
+ gcc_checking_assert (a.range_info_useful_p ());
+
+ /* If a.size is less specified than size, merge only
+ if intervals are otherwise equivalent. */
+ if (known_size_p (size)
+ && (!known_size_p (a.size) || known_lt (a.size, size)))
+ {
+ if (((known_size_p (max_size) || known_size_p (a.max_size))
+ && !known_eq (max_size, a.max_size))
+ || !known_eq (offset1, aoffset1))
+ return false;
+ update (new_parm_offset, offset1, a.size, max_size,
+ record_adjustments);
+ return true;
+ }
+ /* If sizes are same, we can extend the interval. */
+ if ((known_size_p (size) || known_size_p (a.size))
+ && !known_eq (size, a.size))
+ return false;
+ if (known_le (offset1, aoffset1))
+ {
+ if (!known_size_p (max_size)
+ || known_ge (offset1 + max_size, aoffset1))
+ {
+ update2 (new_parm_offset, offset1, size, max_size,
+ aoffset1, a.size, a.max_size,
+ record_adjustments);
+ return true;
+ }
+ }
+ else if (known_le (aoffset1, offset1))
+ {
+ if (!known_size_p (a.max_size)
+ || known_ge (aoffset1 + a.max_size, offset1))
+ {
+ update2 (new_parm_offset, offset1, size, max_size,
+ aoffset1, a.size, a.max_size,
+ record_adjustments);
+ return true;
+ }
+ }
+ return false;
+ }
+ update (new_parm_offset, offset1,
+ size, max_size, record_adjustments);
+ return true;
+}
+
+/* Return true if A1 and B1 can be merged with lower information
+ less than A2 and B2.
+ Assume that no containment or lossless merging is possible. */
+bool
+modref_access_node::closer_pair_p (const modref_access_node &a1,
+ const modref_access_node &b1,
+ const modref_access_node &a2,
+ const modref_access_node &b2)
+{
+ /* Merging different parm indexes comes to complete loss
+ of range info. */
+ if (a1.parm_index != b1.parm_index)
+ return false;
+ if (a2.parm_index != b2.parm_index)
+ return true;
+ /* If parm is known and parm indexes are the same we should
+ already have containment. */
+ gcc_checking_assert (a1.parm_offset_known && b1.parm_offset_known);
+ gcc_checking_assert (a2.parm_offset_known && b2.parm_offset_known);
+
+ /* First normalize offsets for parm offsets. */
+ poly_int64 new_parm_offset, offseta1, offsetb1, offseta2, offsetb2;
+ if (!a1.combined_offsets (b1, &new_parm_offset, &offseta1, &offsetb1)
+ || !a2.combined_offsets (b2, &new_parm_offset, &offseta2, &offsetb2))
+ gcc_unreachable ();
+
+
+ /* Now compute distnace of the intervals. */
+ poly_int64 dist1, dist2;
+ if (known_le (offseta1, offsetb1))
+ {
+ if (!known_size_p (a1.max_size))
+ dist1 = 0;
+ else
+ dist1 = offsetb1 - offseta1 - a1.max_size;
+ }
+ else
+ {
+ if (!known_size_p (b1.max_size))
+ dist1 = 0;
+ else
+ dist1 = offseta1 - offsetb1 - b1.max_size;
+ }
+ if (known_le (offseta2, offsetb2))
+ {
+ if (!known_size_p (a2.max_size))
+ dist2 = 0;
+ else
+ dist2 = offsetb2 - offseta2 - a2.max_size;
+ }
+ else
+ {
+ if (!known_size_p (b2.max_size))
+ dist2 = 0;
+ else
+ dist2 = offseta2 - offsetb2 - b2.max_size;
+ }
+ /* It may happen that intervals overlap in case size
+ is different. Preffer the overlap to non-overlap. */
+ if (known_lt (dist1, 0) && known_ge (dist2, 0))
+ return true;
+ if (known_lt (dist2, 0) && known_ge (dist1, 0))
+ return false;
+ if (known_lt (dist1, 0))
+ /* If both overlaps minimize overlap. */
+ return known_le (dist2, dist1);
+ else
+ /* If both are disjoint look for smaller distance. */
+ return known_le (dist1, dist2);
+}
+
+/* Merge in access A while losing precision. */
+void
+modref_access_node::forced_merge (const modref_access_node &a,
+ bool record_adjustments)
+{
+ if (parm_index != a.parm_index)
+ {
+ gcc_checking_assert (parm_index != MODREF_UNKNOWN_PARM);
+ parm_index = MODREF_UNKNOWN_PARM;
+ return;
+ }
+
+ /* We assume that containment and lossless merging
+ was tested earlier. */
+ gcc_checking_assert (!contains (a) && !a.contains (*this)
+ && !merge (a, record_adjustments));
+ gcc_checking_assert (parm_offset_known && a.parm_offset_known);
+
+ poly_int64 new_parm_offset, offset1, aoffset1;
+ if (!combined_offsets (a, &new_parm_offset, &offset1, &aoffset1))
+ {
+ parm_offset_known = false;
+ return;
+ }
+ gcc_checking_assert (range_info_useful_p ()
+ && a.range_info_useful_p ());
+ if (record_adjustments)
+ adjustments += a.adjustments;
+ update2 (new_parm_offset,
+ offset1, size, max_size,
+ aoffset1, a.size, a.max_size,
+ record_adjustments);
+}
+
+/* Merge two ranges both starting at parm_offset1 and update THIS
+ with result. */
+void
+modref_access_node::update2 (poly_int64 parm_offset1,
+ poly_int64 offset1, poly_int64 size1,
+ poly_int64 max_size1,
+ poly_int64 offset2, poly_int64 size2,
+ poly_int64 max_size2,
+ bool record_adjustments)
+{
+ poly_int64 new_size = size1;
+
+ if (!known_size_p (size2)
+ || known_le (size2, size1))
+ new_size = size2;
+ else
+ gcc_checking_assert (known_le (size1, size2));
+
+ if (known_le (offset1, offset2))
+ ;
+ else if (known_le (offset2, offset1))
+ {
+ std::swap (offset1, offset2);
+ std::swap (max_size1, max_size2);
+ }
+ else
+ gcc_unreachable ();
+
+ poly_int64 new_max_size;
+
+ if (!known_size_p (max_size1))
+ new_max_size = max_size1;
+ else if (!known_size_p (max_size2))
+ new_max_size = max_size2;
+ else
+ {
+ new_max_size = max_size2 + offset2 - offset1;
+ if (known_le (new_max_size, max_size1))
+ new_max_size = max_size1;
+ }
+
+ update (parm_offset1, offset1,
+ new_size, new_max_size, record_adjustments);
+}
+
+/* Given access nodes THIS and A, return true if they
+ can be done with common parm_offsets. In this case
+ return parm offset in new_parm_offset, new_offset
+ which is start of range in THIS and new_aoffset that
+ is start of range in A. */
+bool
+modref_access_node::combined_offsets (const modref_access_node &a,
+ poly_int64 *new_parm_offset,
+ poly_int64 *new_offset,
+ poly_int64 *new_aoffset) const
+{
+ gcc_checking_assert (parm_offset_known && a.parm_offset_known);
+ if (known_le (a.parm_offset, parm_offset))
+ {
+ *new_offset = offset
+ + ((parm_offset - a.parm_offset)
+ << LOG2_BITS_PER_UNIT);
+ *new_aoffset = a.offset;
+ *new_parm_offset = a.parm_offset;
+ return true;
+ }
+ else if (known_le (parm_offset, a.parm_offset))
+ {
+ *new_aoffset = a.offset
+ + ((a.parm_offset - parm_offset)
+ << LOG2_BITS_PER_UNIT);
+ *new_offset = offset;
+ *new_parm_offset = parm_offset;
+ return true;
+ }
+ else
+ return false;
+}
+
+/* Try to optimize the access ACCESSES list after entry INDEX was modified. */
+void
+modref_access_node::try_merge_with (vec <modref_access_node, va_gc> *&accesses,
+ size_t index)
+{
+ size_t i;
+
+ for (i = 0; i < accesses->length ();)
+ if (i != index)
+ {
+ bool found = false, restart = false;
+ modref_access_node *a = &(*accesses)[i];
+ modref_access_node *n = &(*accesses)[index];
+
+ if (n->contains (*a))
+ found = true;
+ if (!found && n->merge (*a, false))
+ found = restart = true;
+ gcc_checking_assert (found || !a->merge (*n, false));
+ if (found)
+ {
+ accesses->unordered_remove (i);
+ if (index == accesses->length ())
+ {
+ index = i;
+ i++;
+ }
+ if (restart)
+ i = 0;
+ }
+ else
+ i++;
+ }
+ else
+ i++;
+}
+
+/* Insert access with OFFSET and SIZE.
+ Collapse tree if it has more than MAX_ACCESSES entries.
+ If RECORD_ADJUSTMENTs is true avoid too many interval extensions.
+ Return true if record was changed.
+
+ Reutrn 0 if nothing changed, 1 if insert was successful and -1
+ if entries should be collapsed. */
+int
+modref_access_node::insert (vec <modref_access_node, va_gc> *&accesses,
+ modref_access_node a, size_t max_accesses,
+ bool record_adjustments)
+{
+ size_t i, j;
+ modref_access_node *a2;
+
+ /* Verify that list does not contain redundant accesses. */
+ if (flag_checking)
+ {
+ size_t i, i2;
+ modref_access_node *a, *a2;
+
+ FOR_EACH_VEC_SAFE_ELT (accesses, i, a)
+ {
+ FOR_EACH_VEC_SAFE_ELT (accesses, i2, a2)
+ if (i != i2)
+ gcc_assert (!a->contains (*a2));
+ }
+ }
+
+ FOR_EACH_VEC_SAFE_ELT (accesses, i, a2)
+ {
+ if (a2->contains (a))
+ return 0;
+ if (a.contains (*a2))
+ {
+ a.adjustments = 0;
+ a2->parm_index = a.parm_index;
+ a2->parm_offset_known = a.parm_offset_known;
+ a2->update (a.parm_offset, a.offset, a.size, a.max_size,
+ record_adjustments);
+ modref_access_node::try_merge_with (accesses, i);
+ return 1;
+ }
+ if (a2->merge (a, record_adjustments))
+ {
+ modref_access_node::try_merge_with (accesses, i);
+ return 1;
+ }
+ gcc_checking_assert (!(a == *a2));
+ }
+
+ /* If this base->ref pair has too many accesses stored, we will clear
+ all accesses and bail out. */
+ if (accesses && accesses->length () >= max_accesses)
+ {
+ if (max_accesses < 2)
+ return -1;
+ /* Find least harmful merge and perform it. */
+ int best1 = -1, best2 = -1;
+ FOR_EACH_VEC_SAFE_ELT (accesses, i, a2)
+ {
+ for (j = i + 1; j < accesses->length (); j++)
+ if (best1 < 0
+ || modref_access_node::closer_pair_p
+ (*a2, (*accesses)[j],
+ (*accesses)[best1],
+ best2 < 0 ? a : (*accesses)[best2]))
+ {
+ best1 = i;
+ best2 = j;
+ }
+ if (modref_access_node::closer_pair_p
+ (*a2, a,
+ (*accesses)[best1],
+ best2 < 0 ? a : (*accesses)[best2]))
+ {
+ best1 = i;
+ best2 = -1;
+ }
+ }
+ (*accesses)[best1].forced_merge (best2 < 0 ? a : (*accesses)[best2],
+ record_adjustments);
+ /* Check that merging indeed merged ranges. */
+ gcc_checking_assert ((*accesses)[best1].contains
+ (best2 < 0 ? a : (*accesses)[best2]));
+ if (!(*accesses)[best1].useful_p ())
+ return -1;
+ if (dump_file && best2 >= 0)
+ fprintf (dump_file,
+ "--param param=modref-max-accesses limit reached;"
+ " merging %i and %i\n", best1, best2);
+ else if (dump_file)
+ fprintf (dump_file,
+ "--param param=modref-max-accesses limit reached;"
+ " merging with %i\n", best1);
+ modref_access_node::try_merge_with (accesses, best1);
+ if (best2 >= 0)
+ insert (accesses, a, max_accesses, record_adjustments);
+ return 1;
+ }
+ a.adjustments = 0;
+ vec_safe_push (accesses, a);
+ return 1;
+}
+
namespace selftest {
static void
diff --git a/gcc/ipa-modref-tree.h b/gcc/ipa-modref-tree.h
index 3e213b2..b35cf3a 100644
--- a/gcc/ipa-modref-tree.h
+++ b/gcc/ipa-modref-tree.h
@@ -57,7 +57,6 @@ enum modref_special_parms {
/* Memory access. */
struct GTY(()) modref_access_node
{
-
/* Access range information (in bits). */
poly_int64 offset;
poly_int64 size;
@@ -88,380 +87,28 @@ struct GTY(()) modref_access_node
|| known_ge (offset, 0));
}
/* Return true if both accesses are the same. */
- bool operator == (modref_access_node &a) const
- {
- if (parm_index != a.parm_index)
- return false;
- if (parm_index != MODREF_UNKNOWN_PARM)
- {
- if (parm_offset_known != a.parm_offset_known)
- return false;
- if (parm_offset_known
- && !known_eq (parm_offset, a.parm_offset))
- return false;
- }
- if (range_info_useful_p () != a.range_info_useful_p ())
- return false;
- if (range_info_useful_p ()
- && (!known_eq (a.offset, offset)
- || !known_eq (a.size, size)
- || !known_eq (a.max_size, max_size)))
- return false;
- return true;
- }
- /* Return true A is a subaccess. */
- bool contains (const modref_access_node &a) const
- {
- poly_int64 aoffset_adj = 0;
- if (parm_index != MODREF_UNKNOWN_PARM)
- {
- if (parm_index != a.parm_index)
- return false;
- if (parm_offset_known)
- {
- if (!a.parm_offset_known)
- return false;
- /* Accesses are never below parm_offset, so look
- for smaller offset.
- If access ranges are known still allow merging
- when bit offsets comparsion passes. */
- if (!known_le (parm_offset, a.parm_offset)
- && !range_info_useful_p ())
- return false;
- /* We allow negative aoffset_adj here in case
- there is an useful range. This is because adding
- a.offset may result in non-ngative offset again.
- Ubsan fails on val << LOG_BITS_PER_UNIT where val
- is negative. */
- aoffset_adj = (a.parm_offset - parm_offset)
- * BITS_PER_UNIT;
- }
- }
- if (range_info_useful_p ())
- {
- if (!a.range_info_useful_p ())
- return false;
- /* Sizes of stores are used to check that object is big enough
- to fit the store, so smaller or unknown sotre is more general
- than large store. */
- if (known_size_p (size)
- && (!known_size_p (a.size)
- || !known_le (size, a.size)))
- return false;
- if (known_size_p (max_size))
- return known_subrange_p (a.offset + aoffset_adj,
- a.max_size, offset, max_size);
- else
- return known_le (offset, a.offset + aoffset_adj);
- }
- return true;
- }
- /* Update access range to new parameters.
- If RECORD_ADJUSTMENTS is true, record number of changes in the access
- and if threshold is exceeded start dropping precision
- so only constantly many updates are possible. This makes dataflow
- to converge. */
- void update (poly_int64 parm_offset1,
- poly_int64 offset1, poly_int64 size1, poly_int64 max_size1,
- bool record_adjustments)
- {
- if (known_eq (parm_offset, parm_offset1)
- && known_eq (offset, offset1)
- && known_eq (size, size1)
- && known_eq (max_size, max_size1))
- return;
- if (!record_adjustments
- || (++adjustments) < param_modref_max_adjustments)
- {
- parm_offset = parm_offset1;
- offset = offset1;
- size = size1;
- max_size = max_size1;
- }
- else
- {
- if (dump_file)
- fprintf (dump_file,
- "--param param=modref-max-adjustments limit reached:");
- if (!known_eq (parm_offset, parm_offset1))
- {
- if (dump_file)
- fprintf (dump_file, " parm_offset cleared");
- parm_offset_known = false;
- }
- if (!known_eq (size, size1))
- {
- size = -1;
- if (dump_file)
- fprintf (dump_file, " size cleared");
- }
- if (!known_eq (max_size, max_size1))
- {
- max_size = -1;
- if (dump_file)
- fprintf (dump_file, " max_size cleared");
- }
- if (!known_eq (offset, offset1))
- {
- offset = 0;
- if (dump_file)
- fprintf (dump_file, " offset cleared");
- }
- if (dump_file)
- fprintf (dump_file, "\n");
- }
- }
- /* Merge in access A if it is possible to do without losing
- precision. Return true if successful.
- If RECORD_ADJUSTMENTs is true, remember how many interval
- was prolonged and punt when there are too many. */
- bool merge (const modref_access_node &a, bool record_adjustments)
- {
- poly_int64 offset1 = 0;
- poly_int64 aoffset1 = 0;
- poly_int64 new_parm_offset = 0;
-
- /* We assume that containment was tested earlier. */
- gcc_checking_assert (!contains (a) && !a.contains (*this));
- if (parm_index != MODREF_UNKNOWN_PARM)
- {
- if (parm_index != a.parm_index)
- return false;
- if (parm_offset_known)
- {
- if (!a.parm_offset_known)
- return false;
- if (!combined_offsets (a, &new_parm_offset, &offset1, &aoffset1))
- return false;
- }
- }
- /* See if we can merge ranges. */
- if (range_info_useful_p ())
- {
- /* In this case we have containment that should be
- handled earlier. */
- gcc_checking_assert (a.range_info_useful_p ());
-
- /* If a.size is less specified than size, merge only
- if intervals are otherwise equivalent. */
- if (known_size_p (size)
- && (!known_size_p (a.size) || known_lt (a.size, size)))
- {
- if (((known_size_p (max_size) || known_size_p (a.max_size))
- && !known_eq (max_size, a.max_size))
- || !known_eq (offset1, aoffset1))
- return false;
- update (new_parm_offset, offset1, a.size, max_size,
- record_adjustments);
- return true;
- }
- /* If sizes are same, we can extend the interval. */
- if ((known_size_p (size) || known_size_p (a.size))
- && !known_eq (size, a.size))
- return false;
- if (known_le (offset1, aoffset1))
- {
- if (!known_size_p (max_size)
- || known_ge (offset1 + max_size, aoffset1))
- {
- update2 (new_parm_offset, offset1, size, max_size,
- aoffset1, a.size, a.max_size,
- record_adjustments);
- return true;
- }
- }
- else if (known_le (aoffset1, offset1))
- {
- if (!known_size_p (a.max_size)
- || known_ge (aoffset1 + a.max_size, offset1))
- {
- update2 (new_parm_offset, offset1, size, max_size,
- aoffset1, a.size, a.max_size,
- record_adjustments);
- return true;
- }
- }
- return false;
- }
- update (new_parm_offset, offset1,
- size, max_size, record_adjustments);
- return true;
- }
- /* Return true if A1 and B1 can be merged with lower informatoin
- less than A2 and B2.
- Assume that no containment or lossless merging is possible. */
- static bool closer_pair_p (const modref_access_node &a1,
- const modref_access_node &b1,
- const modref_access_node &a2,
- const modref_access_node &b2)
- {
- /* Merging different parm indexes comes to complete loss
- of range info. */
- if (a1.parm_index != b1.parm_index)
- return false;
- if (a2.parm_index != b2.parm_index)
- return true;
- /* If parm is known and parm indexes are the same we should
- already have containment. */
- gcc_checking_assert (a1.parm_offset_known && b1.parm_offset_known);
- gcc_checking_assert (a2.parm_offset_known && b2.parm_offset_known);
-
- /* First normalize offsets for parm offsets. */
- poly_int64 new_parm_offset, offseta1, offsetb1, offseta2, offsetb2;
- if (!a1.combined_offsets (b1, &new_parm_offset, &offseta1, &offsetb1)
- || !a2.combined_offsets (b2, &new_parm_offset, &offseta2, &offsetb2))
- gcc_unreachable ();
-
-
- /* Now compute distnace of the intervals. */
- poly_int64 dist1, dist2;
- if (known_le (offseta1, offsetb1))
- {
- if (!known_size_p (a1.max_size))
- dist1 = 0;
- else
- dist1 = offsetb1 - offseta1 - a1.max_size;
- }
- else
- {
- if (!known_size_p (b1.max_size))
- dist1 = 0;
- else
- dist1 = offseta1 - offsetb1 - b1.max_size;
- }
- if (known_le (offseta2, offsetb2))
- {
- if (!known_size_p (a2.max_size))
- dist2 = 0;
- else
- dist2 = offsetb2 - offseta2 - a2.max_size;
- }
- else
- {
- if (!known_size_p (b2.max_size))
- dist2 = 0;
- else
- dist2 = offseta2 - offsetb2 - b2.max_size;
- }
- /* It may happen that intervals overlap in case size
- is different. Preffer the overlap to non-overlap. */
- if (known_lt (dist1, 0) && known_ge (dist2, 0))
- return true;
- if (known_lt (dist2, 0) && known_ge (dist1, 0))
- return false;
- if (known_lt (dist1, 0))
- /* If both overlaps minimize overlap. */
- return known_le (dist2, dist1);
- else
- /* If both are disjoint look for smaller distance. */
- return known_le (dist1, dist2);
- }
-
- /* Merge in access A while losing precision. */
- void forced_merge (const modref_access_node &a, bool record_adjustments)
- {
- if (parm_index != a.parm_index)
- {
- gcc_checking_assert (parm_index != MODREF_UNKNOWN_PARM);
- parm_index = MODREF_UNKNOWN_PARM;
- return;
- }
-
- /* We assume that containment and lossless merging
- was tested earlier. */
- gcc_checking_assert (!contains (a) && !a.contains (*this)
- && !merge (a, record_adjustments));
- gcc_checking_assert (parm_offset_known && a.parm_offset_known);
-
- poly_int64 new_parm_offset, offset1, aoffset1;
- if (!combined_offsets (a, &new_parm_offset, &offset1, &aoffset1))
- {
- parm_offset_known = false;
- return;
- }
- gcc_checking_assert (range_info_useful_p ()
- && a.range_info_useful_p ());
- if (record_adjustments)
- adjustments += a.adjustments;
- update2 (new_parm_offset,
- offset1, size, max_size,
- aoffset1, a.size, a.max_size,
- record_adjustments);
- }
+ bool operator == (modref_access_node &a) const;
+ /* Insert A into ACCESSES. Limit size of vector to MAX_ACCESSES and if
+ RECORD_ADJUSTMENT is true keep track of adjustment counts.
+ Return 0 if nothing changed, 1 is insertion suceeded and -1 if
+ failed. */
+ static int insert (vec <modref_access_node, va_gc> *&accesses,
+ modref_access_node a, size_t max_accesses,
+ bool record_adjustments);
private:
- /* Merge two ranges both starting at parm_offset1 and update THIS
- with result. */
- void update2 (poly_int64 parm_offset1,
- poly_int64 offset1, poly_int64 size1, poly_int64 max_size1,
- poly_int64 offset2, poly_int64 size2, poly_int64 max_size2,
- bool record_adjustments)
- {
- poly_int64 new_size = size1;
-
- if (!known_size_p (size2)
- || known_le (size2, size1))
- new_size = size2;
- else
- gcc_checking_assert (known_le (size1, size2));
-
- if (known_le (offset1, offset2))
- ;
- else if (known_le (offset2, offset1))
- {
- std::swap (offset1, offset2);
- std::swap (max_size1, max_size2);
- }
- else
- gcc_unreachable ();
-
- poly_int64 new_max_size;
-
- if (!known_size_p (max_size1))
- new_max_size = max_size1;
- else if (!known_size_p (max_size2))
- new_max_size = max_size2;
- else
- {
- new_max_size = max_size2 + offset2 - offset1;
- if (known_le (new_max_size, max_size1))
- new_max_size = max_size1;
- }
-
- update (parm_offset1, offset1,
- new_size, new_max_size, record_adjustments);
- }
- /* Given access nodes THIS and A, return true if they
- can be done with common parm_offsets. In this case
- return parm offset in new_parm_offset, new_offset
- which is start of range in THIS and new_aoffset that
- is start of range in A. */
- bool combined_offsets (const modref_access_node &a,
- poly_int64 *new_parm_offset,
- poly_int64 *new_offset,
- poly_int64 *new_aoffset) const
- {
- gcc_checking_assert (parm_offset_known && a.parm_offset_known);
- if (known_le (a.parm_offset, parm_offset))
- {
- *new_offset = offset
- + ((parm_offset - a.parm_offset)
- << LOG2_BITS_PER_UNIT);
- *new_aoffset = a.offset;
- *new_parm_offset = a.parm_offset;
- return true;
- }
- else if (known_le (parm_offset, a.parm_offset))
- {
- *new_aoffset = a.offset
- + ((a.parm_offset - parm_offset)
- << LOG2_BITS_PER_UNIT);
- *new_offset = offset;
- *new_parm_offset = parm_offset;
- return true;
- }
- else
- return false;
- }
+ bool contains (const modref_access_node &) const;
+ void update (poly_int64, poly_int64, poly_int64, poly_int64, bool);
+ bool merge (const modref_access_node &, bool);
+ static bool closer_pair_p (const modref_access_node &,
+ const modref_access_node &,
+ const modref_access_node &,
+ const modref_access_node &);
+ void forced_merge (const modref_access_node &, bool);
+ void update2 (poly_int64, poly_int64, poly_int64, poly_int64,
+ poly_int64, poly_int64, poly_int64, bool);
+ bool combined_offsets (const modref_access_node &,
+ poly_int64 *, poly_int64 *, poly_int64 *) const;
+ static void try_merge_with (vec <modref_access_node, va_gc> *&, size_t);
};
/* Access node specifying no useful info. */
@@ -489,20 +136,6 @@ struct GTY((user)) modref_ref_node
every_access = true;
}
- /* Verify that list does not contain redundant accesses. */
- void verify ()
- {
- size_t i, i2;
- modref_access_node *a, *a2;
-
- FOR_EACH_VEC_SAFE_ELT (accesses, i, a)
- {
- FOR_EACH_VEC_SAFE_ELT (accesses, i2, a2)
- if (i != i2)
- gcc_assert (!a->contains (*a2));
- }
- }
-
/* Insert access with OFFSET and SIZE.
Collapse tree if it has more than MAX_ACCESSES entries.
If RECORD_ADJUSTMENTs is true avoid too many interval extensions.
@@ -514,18 +147,12 @@ struct GTY((user)) modref_ref_node
if (every_access)
return false;
- /* Otherwise, insert a node for the ref of the access under the base. */
- size_t i, j;
- modref_access_node *a2;
-
/* Only the following kind of paramters needs to be tracked.
We do not track return slots because they are seen as a direct store
in the caller. */
gcc_checking_assert (a.parm_index >= 0
|| a.parm_index == MODREF_STATIC_CHAIN_PARM
|| a.parm_index == MODREF_UNKNOWN_PARM);
- if (flag_checking)
- verify ();
if (!a.useful_p ())
{
@@ -537,130 +164,17 @@ struct GTY((user)) modref_ref_node
return false;
}
- FOR_EACH_VEC_SAFE_ELT (accesses, i, a2)
- {
- if (a2->contains (a))
- return false;
- if (a.contains (*a2))
- {
- a.adjustments = 0;
- a2->parm_index = a.parm_index;
- a2->parm_offset_known = a.parm_offset_known;
- a2->update (a.parm_offset, a.offset, a.size, a.max_size,
- record_adjustments);
- try_merge_with (i);
- return true;
- }
- if (a2->merge (a, record_adjustments))
- {
- try_merge_with (i);
- return true;
- }
- gcc_checking_assert (!(a == *a2));
- }
-
- /* If this base->ref pair has too many accesses stored, we will clear
- all accesses and bail out. */
- if (accesses && accesses->length () >= max_accesses)
+ int ret = modref_access_node::insert (accesses, a, max_accesses,
+ record_adjustments);
+ if (ret == -1)
{
- if (max_accesses < 2)
- {
- collapse ();
- if (dump_file)
- fprintf (dump_file,
- "--param param=modref-max-accesses limit reached;"
- " collapsing\n");
- return true;
- }
- /* Find least harmful merge and perform it. */
- int best1 = -1, best2 = -1;
- FOR_EACH_VEC_SAFE_ELT (accesses, i, a2)
- {
- for (j = i + 1; j < accesses->length (); j++)
- if (best1 < 0
- || modref_access_node::closer_pair_p
- (*a2, (*accesses)[j],
- (*accesses)[best1],
- best2 < 0 ? a : (*accesses)[best2]))
- {
- best1 = i;
- best2 = j;
- }
- if (modref_access_node::closer_pair_p
- (*a2, a,
- (*accesses)[best1],
- best2 < 0 ? a : (*accesses)[best2]))
- {
- best1 = i;
- best2 = -1;
- }
- }
- (*accesses)[best1].forced_merge (best2 < 0 ? a : (*accesses)[best2],
- record_adjustments);
- /* Check that merging indeed merged ranges. */
- gcc_checking_assert ((*accesses)[best1].contains
- (best2 < 0 ? a : (*accesses)[best2]));
- if (!(*accesses)[best1].useful_p ())
- {
- collapse ();
- if (dump_file)
- fprintf (dump_file,
- "--param param=modref-max-accesses limit reached;"
- " collapsing\n");
- return true;
- }
- if (dump_file && best2 >= 0)
- fprintf (dump_file,
- "--param param=modref-max-accesses limit reached;"
- " merging %i and %i\n", best1, best2);
- else if (dump_file)
+ if (dump_file)
fprintf (dump_file,
"--param param=modref-max-accesses limit reached;"
- " merging with %i\n", best1);
- try_merge_with (best1);
- if (best2 >= 0)
- insert_access (a, max_accesses, record_adjustments);
- return 1;
+ " collapsing\n");
+ collapse ();
}
- a.adjustments = 0;
- vec_safe_push (accesses, a);
- return true;
- }
-private:
- /* Try to optimize the access list after entry INDEX was modified. */
- void
- try_merge_with (size_t index)
- {
- size_t i;
-
- for (i = 0; i < accesses->length ();)
- if (i != index)
- {
- bool found = false, restart = false;
- modref_access_node *a = &(*accesses)[i];
- modref_access_node *n = &(*accesses)[index];
-
- if (n->contains (*a))
- found = true;
- if (!found && n->merge (*a, false))
- found = restart = true;
- gcc_checking_assert (found || !a->merge (*n, false));
- if (found)
- {
- accesses->unordered_remove (i);
- if (index == accesses->length ())
- {
- index = i;
- i++;
- }
- if (restart)
- i = 0;
- }
- else
- i++;
- }
- else
- i++;
+ return ret != 0;
}
};