aboutsummaryrefslogtreecommitdiff
path: root/gcc/tree-data-ref.c
diff options
context:
space:
mode:
Diffstat (limited to 'gcc/tree-data-ref.c')
-rw-r--r--gcc/tree-data-ref.c414
1 files changed, 198 insertions, 216 deletions
diff --git a/gcc/tree-data-ref.c b/gcc/tree-data-ref.c
index 124a7be..e061baa 100644
--- a/gcc/tree-data-ref.c
+++ b/gcc/tree-data-ref.c
@@ -97,8 +97,8 @@ along with GCC; see the file COPYING3. If not see
#include "tree-eh.h"
#include "ssa.h"
#include "internal-fn.h"
-#include "range-op.h"
#include "vr-values.h"
+#include "range-op.h"
static struct datadep_stats
{
@@ -170,10 +170,7 @@ ref_contains_union_access_p (tree ref)
static void
dump_data_references (FILE *file, vec<data_reference_p> datarefs)
{
- unsigned int i;
- struct data_reference *dr;
-
- FOR_EACH_VEC_ELT (datarefs, i, dr)
+ for (data_reference *dr : datarefs)
dump_data_reference (file, dr);
}
@@ -378,10 +375,7 @@ DEBUG_FUNCTION void
print_dir_vectors (FILE *outf, vec<lambda_vector> dir_vects,
int length)
{
- unsigned j;
- lambda_vector v;
-
- FOR_EACH_VEC_ELT (dir_vects, j, v)
+ for (lambda_vector v : dir_vects)
print_direction_vector (outf, v, length);
}
@@ -403,18 +397,14 @@ DEBUG_FUNCTION void
print_dist_vectors (FILE *outf, vec<lambda_vector> dist_vects,
int length)
{
- unsigned j;
- lambda_vector v;
-
- FOR_EACH_VEC_ELT (dist_vects, j, v)
+ for (lambda_vector v : dist_vects)
print_lambda_vector (outf, v, length);
}
/* Dump function for a DATA_DEPENDENCE_RELATION structure. */
DEBUG_FUNCTION void
-dump_data_dependence_relation (FILE *outf,
- struct data_dependence_relation *ddr)
+dump_data_dependence_relation (FILE *outf, const data_dependence_relation *ddr)
{
struct data_reference *dra, *drb;
@@ -488,7 +478,7 @@ dump_data_dependence_relation (FILE *outf,
/* Debug version. */
DEBUG_FUNCTION void
-debug_data_dependence_relation (struct data_dependence_relation *ddr)
+debug_data_dependence_relation (const struct data_dependence_relation *ddr)
{
dump_data_dependence_relation (stderr, ddr);
}
@@ -496,13 +486,9 @@ debug_data_dependence_relation (struct data_dependence_relation *ddr)
/* Dump into FILE all the dependence relations from DDRS. */
DEBUG_FUNCTION void
-dump_data_dependence_relations (FILE *file,
- vec<ddr_p> ddrs)
+dump_data_dependence_relations (FILE *file, const vec<ddr_p> &ddrs)
{
- unsigned int i;
- struct data_dependence_relation *ddr;
-
- FOR_EACH_VEC_ELT (ddrs, i, ddr)
+ for (auto ddr : ddrs)
dump_data_dependence_relation (file, ddr);
}
@@ -538,21 +524,17 @@ debug_data_dependence_relations (vec<ddr_p> ddrs)
DEBUG_FUNCTION void
dump_dist_dir_vectors (FILE *file, vec<ddr_p> ddrs)
{
- unsigned int i, j;
- struct data_dependence_relation *ddr;
- lambda_vector v;
-
- FOR_EACH_VEC_ELT (ddrs, i, ddr)
+ for (data_dependence_relation *ddr : ddrs)
if (DDR_ARE_DEPENDENT (ddr) == NULL_TREE && DDR_AFFINE_P (ddr))
{
- FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr), j, v)
+ for (lambda_vector v : DDR_DIST_VECTS (ddr))
{
fprintf (file, "DISTANCE_V (");
print_lambda_vector (file, v, DDR_NB_LOOPS (ddr));
fprintf (file, ")\n");
}
- FOR_EACH_VEC_ELT (DDR_DIR_VECTS (ddr), j, v)
+ for (lambda_vector v : DDR_DIR_VECTS (ddr))
{
fprintf (file, "DIRECTION_V (");
print_direction_vector (file, v, DDR_NB_LOOPS (ddr));
@@ -568,10 +550,7 @@ dump_dist_dir_vectors (FILE *file, vec<ddr_p> ddrs)
DEBUG_FUNCTION void
dump_ddrs (FILE *file, vec<ddr_p> ddrs)
{
- unsigned int i;
- struct data_dependence_relation *ddr;
-
- FOR_EACH_VEC_ELT (ddrs, i, ddr)
+ for (data_dependence_relation *ddr : ddrs)
dump_data_dependence_relation (file, ddr);
fprintf (file, "\n\n");
@@ -1035,14 +1014,23 @@ split_constant_offset (tree exp, tree *var, tree *off, value_range *exp_range,
*exp_range = type;
if (code == SSA_NAME)
{
- wide_int var_min, var_max;
- value_range_kind vr_kind = get_range_info (exp, &var_min, &var_max);
+ value_range vr;
+ get_range_query (cfun)->range_of_expr (vr, exp);
+ if (vr.undefined_p ())
+ vr.set_varying (TREE_TYPE (exp));
+ wide_int var_min = wi::to_wide (vr.min ());
+ wide_int var_max = wi::to_wide (vr.max ());
+ value_range_kind vr_kind = vr.kind ();
wide_int var_nonzero = get_nonzero_bits (exp);
vr_kind = intersect_range_with_nonzero_bits (vr_kind,
&var_min, &var_max,
var_nonzero,
TYPE_SIGN (type));
- if (vr_kind == VR_RANGE)
+ /* This check for VR_VARYING is here because the old code
+ using get_range_info would return VR_RANGE for the entire
+ domain, instead of VR_VARYING. The new code normalizes
+ full-domain ranges to VR_VARYING. */
+ if (vr_kind == VR_RANGE || vr_kind == VR_VARYING)
*exp_range = value_range (type, var_min, var_max);
}
}
@@ -1060,12 +1048,12 @@ split_constant_offset (tree exp, tree *var, tree *off, value_range *exp_range,
if (INTEGRAL_TYPE_P (type))
*var = fold_convert (sizetype, *var);
*off = ssize_int (0);
- if (exp_range && code != SSA_NAME)
- {
- wide_int var_min, var_max;
- if (determine_value_range (exp, &var_min, &var_max) == VR_RANGE)
- *exp_range = value_range (type, var_min, var_max);
- }
+
+ value_range r;
+ if (exp_range && code != SSA_NAME
+ && get_range_query (cfun)->range_of_expr (r, exp)
+ && !r.undefined_p ())
+ *exp_range = r;
}
/* Expresses EXP as VAR + OFF, where OFF is a constant. VAR has the same
@@ -2147,8 +2135,8 @@ create_intersect_range_checks_index (class loop *loop, tree *cond_expr,
bool waw_or_war_p = (alias_pair.flags & ~(DR_ALIAS_WAR | DR_ALIAS_WAW)) == 0;
- unsigned int i;
- for (i = 0; i < DR_NUM_DIMENSIONS (dr_a.dr); i++)
+ int found = -1;
+ for (unsigned int i = 0; i < DR_NUM_DIMENSIONS (dr_a.dr); i++)
{
tree access1 = DR_ACCESS_FN (dr_a.dr, i);
tree access2 = DR_ACCESS_FN (dr_b.dr, i);
@@ -2164,155 +2152,166 @@ create_intersect_range_checks_index (class loop *loop, tree *cond_expr,
return false;
}
- /* The two indices must have the same step. */
- if (!operand_equal_p (CHREC_RIGHT (access1), CHREC_RIGHT (access2), 0))
+ if (found >= 0)
return false;
+ found = i;
+ }
+
+ /* Ought not to happen in practice, since if all accesses are equal then the
+ alias should be decidable at compile time. */
+ if (found < 0)
+ return false;
- tree idx_step = CHREC_RIGHT (access1);
- /* Index must have const step, otherwise DR_STEP won't be constant. */
- gcc_assert (TREE_CODE (idx_step) == INTEGER_CST);
- /* Index must evaluate in the same direction as DR. */
- gcc_assert (!neg_step || tree_int_cst_sign_bit (idx_step) == 1);
+ /* The two indices must have the same step. */
+ tree access1 = DR_ACCESS_FN (dr_a.dr, found);
+ tree access2 = DR_ACCESS_FN (dr_b.dr, found);
+ if (!operand_equal_p (CHREC_RIGHT (access1), CHREC_RIGHT (access2), 0))
+ return false;
- tree min1 = CHREC_LEFT (access1);
- tree min2 = CHREC_LEFT (access2);
- if (!types_compatible_p (TREE_TYPE (min1), TREE_TYPE (min2)))
- return false;
+ tree idx_step = CHREC_RIGHT (access1);
+ /* Index must have const step, otherwise DR_STEP won't be constant. */
+ gcc_assert (TREE_CODE (idx_step) == INTEGER_CST);
+ /* Index must evaluate in the same direction as DR. */
+ gcc_assert (!neg_step || tree_int_cst_sign_bit (idx_step) == 1);
- /* Ideally, alias can be checked against loop's control IV, but we
- need to prove linear mapping between control IV and reference
- index. Although that should be true, we check against (array)
- index of data reference. Like segment length, index length is
- linear function of the number of iterations with index_step as
- the coefficient, i.e, niter_len * idx_step. */
- offset_int abs_idx_step = offset_int::from (wi::to_wide (idx_step),
- SIGNED);
- if (neg_step)
- abs_idx_step = -abs_idx_step;
- poly_offset_int idx_len1 = abs_idx_step * niter_len1;
- poly_offset_int idx_len2 = abs_idx_step * niter_len2;
- poly_offset_int idx_access1 = abs_idx_step * niter_access1;
- poly_offset_int idx_access2 = abs_idx_step * niter_access2;
+ tree min1 = CHREC_LEFT (access1);
+ tree min2 = CHREC_LEFT (access2);
+ if (!types_compatible_p (TREE_TYPE (min1), TREE_TYPE (min2)))
+ return false;
+
+ /* Ideally, alias can be checked against loop's control IV, but we
+ need to prove linear mapping between control IV and reference
+ index. Although that should be true, we check against (array)
+ index of data reference. Like segment length, index length is
+ linear function of the number of iterations with index_step as
+ the coefficient, i.e, niter_len * idx_step. */
+ offset_int abs_idx_step = offset_int::from (wi::to_wide (idx_step),
+ SIGNED);
+ if (neg_step)
+ abs_idx_step = -abs_idx_step;
+ poly_offset_int idx_len1 = abs_idx_step * niter_len1;
+ poly_offset_int idx_len2 = abs_idx_step * niter_len2;
+ poly_offset_int idx_access1 = abs_idx_step * niter_access1;
+ poly_offset_int idx_access2 = abs_idx_step * niter_access2;
- gcc_assert (known_ge (idx_len1, 0)
- && known_ge (idx_len2, 0)
- && known_ge (idx_access1, 0)
- && known_ge (idx_access2, 0));
+ gcc_assert (known_ge (idx_len1, 0)
+ && known_ge (idx_len2, 0)
+ && known_ge (idx_access1, 0)
+ && known_ge (idx_access2, 0));
- /* Each access has the following pattern, with lengths measured
- in units of INDEX:
+ /* Each access has the following pattern, with lengths measured
+ in units of INDEX:
- <-- idx_len -->
- <--- A: -ve step --->
- +-----+-------+-----+-------+-----+
- | n-1 | ..... | 0 | ..... | n-1 |
- +-----+-------+-----+-------+-----+
- <--- B: +ve step --->
- <-- idx_len -->
- |
- min
+ <-- idx_len -->
+ <--- A: -ve step --->
+ +-----+-------+-----+-------+-----+
+ | n-1 | ..... | 0 | ..... | n-1 |
+ +-----+-------+-----+-------+-----+
+ <--- B: +ve step --->
+ <-- idx_len -->
+ |
+ min
- where "n" is the number of scalar iterations covered by the segment
- and where each access spans idx_access units.
+ where "n" is the number of scalar iterations covered by the segment
+ and where each access spans idx_access units.
- A is the range of bytes accessed when the step is negative,
- B is the range when the step is positive.
+ A is the range of bytes accessed when the step is negative,
+ B is the range when the step is positive.
- When checking for general overlap, we need to test whether
- the range:
+ When checking for general overlap, we need to test whether
+ the range:
- [min1 + low_offset1, min2 + high_offset1 + idx_access1 - 1]
+ [min1 + low_offset1, min1 + high_offset1 + idx_access1 - 1]
- overlaps:
+ overlaps:
- [min2 + low_offset2, min2 + high_offset2 + idx_access2 - 1]
+ [min2 + low_offset2, min2 + high_offset2 + idx_access2 - 1]
- where:
+ where:
- low_offsetN = +ve step ? 0 : -idx_lenN;
- high_offsetN = +ve step ? idx_lenN : 0;
+ low_offsetN = +ve step ? 0 : -idx_lenN;
+ high_offsetN = +ve step ? idx_lenN : 0;
- This is equivalent to testing whether:
+ This is equivalent to testing whether:
- min1 + low_offset1 <= min2 + high_offset2 + idx_access2 - 1
- && min2 + low_offset2 <= min1 + high_offset1 + idx_access1 - 1
+ min1 + low_offset1 <= min2 + high_offset2 + idx_access2 - 1
+ && min2 + low_offset2 <= min1 + high_offset1 + idx_access1 - 1
- Converting this into a single test, there is an overlap if:
+ Converting this into a single test, there is an overlap if:
- 0 <= min2 - min1 + bias <= limit
+ 0 <= min2 - min1 + bias <= limit
- where bias = high_offset2 + idx_access2 - 1 - low_offset1
- limit = (high_offset1 - low_offset1 + idx_access1 - 1)
- + (high_offset2 - low_offset2 + idx_access2 - 1)
- i.e. limit = idx_len1 + idx_access1 - 1 + idx_len2 + idx_access2 - 1
+ where bias = high_offset2 + idx_access2 - 1 - low_offset1
+ limit = (high_offset1 - low_offset1 + idx_access1 - 1)
+ + (high_offset2 - low_offset2 + idx_access2 - 1)
+ i.e. limit = idx_len1 + idx_access1 - 1 + idx_len2 + idx_access2 - 1
- Combining the tests requires limit to be computable in an unsigned
- form of the index type; if it isn't, we fall back to the usual
- pointer-based checks.
+ Combining the tests requires limit to be computable in an unsigned
+ form of the index type; if it isn't, we fall back to the usual
+ pointer-based checks.
- We can do better if DR_B is a write and if DR_A and DR_B are
- well-ordered in both the original and the new code (see the
- comment above the DR_ALIAS_* flags for details). In this case
- we know that for each i in [0, n-1], the write performed by
- access i of DR_B occurs after access numbers j<=i of DR_A in
- both the original and the new code. Any write or anti
- dependencies wrt those DR_A accesses are therefore maintained.
+ We can do better if DR_B is a write and if DR_A and DR_B are
+ well-ordered in both the original and the new code (see the
+ comment above the DR_ALIAS_* flags for details). In this case
+ we know that for each i in [0, n-1], the write performed by
+ access i of DR_B occurs after access numbers j<=i of DR_A in
+ both the original and the new code. Any write or anti
+ dependencies wrt those DR_A accesses are therefore maintained.
- We just need to make sure that each individual write in DR_B does not
- overlap any higher-indexed access in DR_A; such DR_A accesses happen
- after the DR_B access in the original code but happen before it in
- the new code.
+ We just need to make sure that each individual write in DR_B does not
+ overlap any higher-indexed access in DR_A; such DR_A accesses happen
+ after the DR_B access in the original code but happen before it in
+ the new code.
- We know the steps for both accesses are equal, so by induction, we
- just need to test whether the first write of DR_B overlaps a later
- access of DR_A. In other words, we need to move min1 along by
- one iteration:
+ We know the steps for both accesses are equal, so by induction, we
+ just need to test whether the first write of DR_B overlaps a later
+ access of DR_A. In other words, we need to move min1 along by
+ one iteration:
- min1' = min1 + idx_step
+ min1' = min1 + idx_step
- and use the ranges:
+ and use the ranges:
- [min1' + low_offset1', min1' + high_offset1' + idx_access1 - 1]
+ [min1' + low_offset1', min1' + high_offset1' + idx_access1 - 1]
- and:
+ and:
- [min2, min2 + idx_access2 - 1]
+ [min2, min2 + idx_access2 - 1]
- where:
+ where:
- low_offset1' = +ve step ? 0 : -(idx_len1 - |idx_step|)
- high_offset1' = +ve_step ? idx_len1 - |idx_step| : 0. */
- if (waw_or_war_p)
- idx_len1 -= abs_idx_step;
+ low_offset1' = +ve step ? 0 : -(idx_len1 - |idx_step|)
+ high_offset1' = +ve_step ? idx_len1 - |idx_step| : 0. */
+ if (waw_or_war_p)
+ idx_len1 -= abs_idx_step;
- poly_offset_int limit = idx_len1 + idx_access1 - 1 + idx_access2 - 1;
- if (!waw_or_war_p)
- limit += idx_len2;
+ poly_offset_int limit = idx_len1 + idx_access1 - 1 + idx_access2 - 1;
+ if (!waw_or_war_p)
+ limit += idx_len2;
- tree utype = unsigned_type_for (TREE_TYPE (min1));
- if (!wi::fits_to_tree_p (limit, utype))
- return false;
+ tree utype = unsigned_type_for (TREE_TYPE (min1));
+ if (!wi::fits_to_tree_p (limit, utype))
+ return false;
- poly_offset_int low_offset1 = neg_step ? -idx_len1 : 0;
- poly_offset_int high_offset2 = neg_step || waw_or_war_p ? 0 : idx_len2;
- poly_offset_int bias = high_offset2 + idx_access2 - 1 - low_offset1;
- /* Equivalent to adding IDX_STEP to MIN1. */
- if (waw_or_war_p)
- bias -= wi::to_offset (idx_step);
-
- tree subject = fold_build2 (MINUS_EXPR, utype,
- fold_convert (utype, min2),
- fold_convert (utype, min1));
- subject = fold_build2 (PLUS_EXPR, utype, subject,
- wide_int_to_tree (utype, bias));
- tree part_cond_expr = fold_build2 (GT_EXPR, boolean_type_node, subject,
- wide_int_to_tree (utype, limit));
- if (*cond_expr)
- *cond_expr = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
- *cond_expr, part_cond_expr);
- else
- *cond_expr = part_cond_expr;
- }
+ poly_offset_int low_offset1 = neg_step ? -idx_len1 : 0;
+ poly_offset_int high_offset2 = neg_step || waw_or_war_p ? 0 : idx_len2;
+ poly_offset_int bias = high_offset2 + idx_access2 - 1 - low_offset1;
+ /* Equivalent to adding IDX_STEP to MIN1. */
+ if (waw_or_war_p)
+ bias -= wi::to_offset (idx_step);
+
+ tree subject = fold_build2 (MINUS_EXPR, utype,
+ fold_convert (utype, min2),
+ fold_convert (utype, min1));
+ subject = fold_build2 (PLUS_EXPR, utype, subject,
+ wide_int_to_tree (utype, bias));
+ tree part_cond_expr = fold_build2 (GT_EXPR, boolean_type_node, subject,
+ wide_int_to_tree (utype, limit));
+ if (*cond_expr)
+ *cond_expr = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
+ *cond_expr, part_cond_expr);
+ else
+ *cond_expr = part_cond_expr;
if (dump_enabled_p ())
{
if (waw_or_war_p)
@@ -2642,25 +2641,23 @@ create_intersect_range_checks (class loop *loop, tree *cond_expr,
void
create_runtime_alias_checks (class loop *loop,
- vec<dr_with_seg_len_pair_t> *alias_pairs,
+ const vec<dr_with_seg_len_pair_t> *alias_pairs,
tree * cond_expr)
{
tree part_cond_expr;
fold_defer_overflow_warnings ();
- dr_with_seg_len_pair_t *alias_pair;
- unsigned int i;
- FOR_EACH_VEC_ELT (*alias_pairs, i, alias_pair)
+ for (const dr_with_seg_len_pair_t &alias_pair : alias_pairs)
{
- gcc_assert (alias_pair->flags);
+ gcc_assert (alias_pair.flags);
if (dump_enabled_p ())
dump_printf (MSG_NOTE,
"create runtime check for data references %T and %T\n",
- DR_REF (alias_pair->first.dr),
- DR_REF (alias_pair->second.dr));
+ DR_REF (alias_pair.first.dr),
+ DR_REF (alias_pair.second.dr));
/* Create condition expression for each pair data references. */
- create_intersect_range_checks (loop, &part_cond_expr, *alias_pair);
+ create_intersect_range_checks (loop, &part_cond_expr, alias_pair);
if (*cond_expr)
*cond_expr = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
*cond_expr, part_cond_expr);
@@ -3416,10 +3413,7 @@ free_conflict_function (conflict_function *f)
static void
free_subscripts (vec<subscript_p> subscripts)
{
- unsigned i;
- subscript_p s;
-
- FOR_EACH_VEC_ELT (subscripts, i, s)
+ for (subscript_p s : subscripts)
{
free_conflict_function (s->conflicting_iterations_in_a);
free_conflict_function (s->conflicting_iterations_in_b);
@@ -4960,10 +4954,7 @@ analyze_overlapping_iterations (tree chrec_a,
static void
save_dist_v (struct data_dependence_relation *ddr, lambda_vector dist_v)
{
- unsigned i;
- lambda_vector v;
-
- FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr), i, v)
+ for (lambda_vector v : DDR_DIST_VECTS (ddr))
if (lambda_vector_equal (v, dist_v, DDR_NB_LOOPS (ddr)))
return;
@@ -4975,10 +4966,7 @@ save_dist_v (struct data_dependence_relation *ddr, lambda_vector dist_v)
static void
save_dir_v (struct data_dependence_relation *ddr, lambda_vector dir_v)
{
- unsigned i;
- lambda_vector v;
-
- FOR_EACH_VEC_ELT (DDR_DIR_VECTS (ddr), i, v)
+ for (lambda_vector v : DDR_DIR_VECTS (ddr))
if (lambda_vector_equal (v, dir_v, DDR_NB_LOOPS (ddr)))
return;
@@ -5101,6 +5089,8 @@ build_classic_dist_vector_1 (struct data_dependence_relation *ddr,
non_affine_dependence_relation (ddr);
return false;
}
+ else
+ *init_b = true;
}
return true;
@@ -5113,10 +5103,7 @@ static bool
invariant_access_functions (const struct data_dependence_relation *ddr,
int lnum)
{
- unsigned i;
- subscript *sub;
-
- FOR_EACH_VEC_ELT (DDR_SUBSCRIPTS (ddr), i, sub)
+ for (subscript *sub : DDR_SUBSCRIPTS (ddr))
if (!evolution_function_is_invariant_p (SUB_ACCESS_FN (sub, 0), lnum)
|| !evolution_function_is_invariant_p (SUB_ACCESS_FN (sub, 1), lnum))
return false;
@@ -5285,10 +5272,7 @@ add_distance_for_zero_overlaps (struct data_dependence_relation *ddr)
static inline bool
same_access_functions (const struct data_dependence_relation *ddr)
{
- unsigned i;
- subscript *sub;
-
- FOR_EACH_VEC_ELT (DDR_SUBSCRIPTS (ddr), i, sub)
+ for (subscript *sub : DDR_SUBSCRIPTS (ddr))
if (!eq_evolutions_p (SUB_ACCESS_FN (sub, 0),
SUB_ACCESS_FN (sub, 1)))
return false;
@@ -5565,11 +5549,8 @@ static bool
access_functions_are_affine_or_constant_p (const struct data_reference *a,
const class loop *loop_nest)
{
- unsigned int i;
vec<tree> fns = DR_ACCESS_FNS (a);
- tree t;
-
- FOR_EACH_VEC_ELT (fns, i, t)
+ for (tree t : fns)
if (!evolution_function_is_invariant_p (t, loop_nest->num)
&& !evolution_function_is_affine_multivariate_p (t, loop_nest->num))
return false;
@@ -5596,9 +5577,13 @@ compute_affine_dependence (struct data_dependence_relation *ddr,
if (dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file, "(compute_affine_dependence\n");
- fprintf (dump_file, " stmt_a: ");
+ fprintf (dump_file, " ref_a: ");
+ print_generic_expr (dump_file, DR_REF (dra));
+ fprintf (dump_file, ", stmt_a: ");
print_gimple_stmt (dump_file, DR_STMT (dra), 0, TDF_SLIM);
- fprintf (dump_file, " stmt_b: ");
+ fprintf (dump_file, " ref_b: ");
+ print_generic_expr (dump_file, DR_REF (drb));
+ fprintf (dump_file, ", stmt_b: ");
print_gimple_stmt (dump_file, DR_STMT (drb), 0, TDF_SLIM);
}
@@ -5648,9 +5633,9 @@ compute_affine_dependence (struct data_dependence_relation *ddr,
is small enough to be handled. */
bool
-compute_all_dependences (vec<data_reference_p> datarefs,
+compute_all_dependences (const vec<data_reference_p> &datarefs,
vec<ddr_p> *dependence_relations,
- vec<loop_p> loop_nest,
+ const vec<loop_p> &loop_nest,
bool compute_self_and_rr)
{
struct data_dependence_relation *ddr;
@@ -5876,20 +5861,18 @@ opt_result
find_data_references_in_stmt (class loop *nest, gimple *stmt,
vec<data_reference_p> *datarefs)
{
- unsigned i;
auto_vec<data_ref_loc, 2> references;
- data_ref_loc *ref;
data_reference_p dr;
if (get_references_in_stmt (stmt, &references))
return opt_result::failure_at (stmt, "statement clobbers memory: %G",
stmt);
- FOR_EACH_VEC_ELT (references, i, ref)
+ for (const data_ref_loc &ref : references)
{
dr = create_data_ref (nest ? loop_preheader_edge (nest) : NULL,
- loop_containing_stmt (stmt), ref->ref,
- stmt, ref->is_read, ref->is_conditional_in_stmt);
+ loop_containing_stmt (stmt), ref.ref,
+ stmt, ref.is_read, ref.is_conditional_in_stmt);
gcc_assert (dr != NULL);
datarefs->safe_push (dr);
}
@@ -5907,19 +5890,17 @@ bool
graphite_find_data_references_in_stmt (edge nest, loop_p loop, gimple *stmt,
vec<data_reference_p> *datarefs)
{
- unsigned i;
auto_vec<data_ref_loc, 2> references;
- data_ref_loc *ref;
bool ret = true;
data_reference_p dr;
if (get_references_in_stmt (stmt, &references))
return false;
- FOR_EACH_VEC_ELT (references, i, ref)
+ for (const data_ref_loc &ref : references)
{
- dr = create_data_ref (nest, loop, ref->ref, stmt, ref->is_read,
- ref->is_conditional_in_stmt);
+ dr = create_data_ref (nest, loop, ref.ref, stmt, ref.is_read,
+ ref.is_conditional_in_stmt);
gcc_assert (dr != NULL);
datarefs->safe_push (dr);
}
@@ -6225,12 +6206,9 @@ free_dependence_relation (struct data_dependence_relation *ddr)
DEPENDENCE_RELATIONS. */
void
-free_dependence_relations (vec<ddr_p> dependence_relations)
+free_dependence_relations (vec<ddr_p>& dependence_relations)
{
- unsigned int i;
- struct data_dependence_relation *ddr;
-
- FOR_EACH_VEC_ELT (dependence_relations, i, ddr)
+ for (data_dependence_relation *ddr : dependence_relations)
if (ddr)
free_dependence_relation (ddr);
@@ -6240,12 +6218,9 @@ free_dependence_relations (vec<ddr_p> dependence_relations)
/* Free the memory used by the data references from DATAREFS. */
void
-free_data_refs (vec<data_reference_p> datarefs)
+free_data_refs (vec<data_reference_p>& datarefs)
{
- unsigned int i;
- struct data_reference *dr;
-
- FOR_EACH_VEC_ELT (datarefs, i, dr)
+ for (data_reference *dr : datarefs)
free_data_ref (dr);
datarefs.release ();
}
@@ -6287,12 +6262,19 @@ dr_step_indicator (struct data_reference *dr, int useful_min)
/* Get the range of values that the unconverted step actually has. */
wide_int step_min, step_max;
+ value_range vr;
if (TREE_CODE (step) != SSA_NAME
- || get_range_info (step, &step_min, &step_max) != VR_RANGE)
+ || !get_range_query (cfun)->range_of_expr (vr, step)
+ || vr.kind () != VR_RANGE)
{
step_min = wi::to_wide (TYPE_MIN_VALUE (type));
step_max = wi::to_wide (TYPE_MAX_VALUE (type));
}
+ else
+ {
+ step_min = vr.lower_bound ();
+ step_max = vr.upper_bound ();
+ }
/* Check whether the unconverted step has an acceptable range. */
signop sgn = TYPE_SIGN (type);