diff options
author | Ian Lance Taylor <ian@gcc.gnu.org> | 2019-12-13 22:10:44 +0000 |
---|---|---|
committer | Ian Lance Taylor <ian@gcc.gnu.org> | 2019-12-13 22:10:44 +0000 |
commit | 51426017f8fe0f18295ca467feba3fbb5aad3fa8 (patch) | |
tree | 2f686f2d4657aa570473986e7d0924794093c67b /gcc/tree-data-ref.c | |
parent | 0cec14923830569b8727d461bcf64adaf965de83 (diff) | |
parent | c926fd82bbd336b317266d43b9fa67a83397b06b (diff) | |
download | gcc-51426017f8fe0f18295ca467feba3fbb5aad3fa8.zip gcc-51426017f8fe0f18295ca467feba3fbb5aad3fa8.tar.gz gcc-51426017f8fe0f18295ca467feba3fbb5aad3fa8.tar.bz2 |
Merge from trunk revision 279830.
From-SVN: r279387
Diffstat (limited to 'gcc/tree-data-ref.c')
-rw-r--r-- | gcc/tree-data-ref.c | 640 |
1 files changed, 547 insertions, 93 deletions
diff --git a/gcc/tree-data-ref.c b/gcc/tree-data-ref.c index 7f75b7e..7ef891d 100644 --- a/gcc/tree-data-ref.c +++ b/gcc/tree-data-ref.c @@ -93,10 +93,10 @@ along with GCC; see the file COPYING3. If not see #include "tree-scalar-evolution.h" #include "dumpfile.h" #include "tree-affine.h" -#include "params.h" #include "builtins.h" #include "tree-eh.h" #include "ssa.h" +#include "internal-fn.h" static struct datadep_stats { @@ -836,7 +836,7 @@ split_constant_offset (tree exp, tree *var, tree *off, void split_constant_offset (tree exp, tree *var, tree *off) { - unsigned limit = PARAM_VALUE (PARAM_SSA_NAME_DEF_CHAIN_LIMIT); + unsigned limit = param_ssa_name_def_chain_limit; static hash_map<tree, std::pair<tree, tree> > *cache; if (!cache) cache = new hash_map<tree, std::pair<tree, tree> > (37); @@ -1453,6 +1453,54 @@ comp_dr_with_seg_len_pair (const void *pa_, const void *pb_) return 0; } +/* Dump information about ALIAS_PAIR, indenting each line by INDENT. */ + +static void +dump_alias_pair (dr_with_seg_len_pair_t *alias_pair, const char *indent) +{ + dump_printf (MSG_NOTE, "%sreference: %T vs. %T\n", indent, + DR_REF (alias_pair->first.dr), + DR_REF (alias_pair->second.dr)); + + dump_printf (MSG_NOTE, "%ssegment length: %T", indent, + alias_pair->first.seg_len); + if (!operand_equal_p (alias_pair->first.seg_len, + alias_pair->second.seg_len, 0)) + dump_printf (MSG_NOTE, " vs. %T", alias_pair->second.seg_len); + + dump_printf (MSG_NOTE, "\n%saccess size: ", indent); + dump_dec (MSG_NOTE, alias_pair->first.access_size); + if (maybe_ne (alias_pair->first.access_size, alias_pair->second.access_size)) + { + dump_printf (MSG_NOTE, " vs. "); + dump_dec (MSG_NOTE, alias_pair->second.access_size); + } + + dump_printf (MSG_NOTE, "\n%salignment: %d", indent, + alias_pair->first.align); + if (alias_pair->first.align != alias_pair->second.align) + dump_printf (MSG_NOTE, " vs. %d", alias_pair->second.align); + + dump_printf (MSG_NOTE, "\n%sflags: ", indent); + if (alias_pair->flags & DR_ALIAS_RAW) + dump_printf (MSG_NOTE, " RAW"); + if (alias_pair->flags & DR_ALIAS_WAR) + dump_printf (MSG_NOTE, " WAR"); + if (alias_pair->flags & DR_ALIAS_WAW) + dump_printf (MSG_NOTE, " WAW"); + if (alias_pair->flags & DR_ALIAS_ARBITRARY) + dump_printf (MSG_NOTE, " ARBITRARY"); + if (alias_pair->flags & DR_ALIAS_SWAPPED) + dump_printf (MSG_NOTE, " SWAPPED"); + if (alias_pair->flags & DR_ALIAS_UNSWAPPED) + dump_printf (MSG_NOTE, " UNSWAPPED"); + if (alias_pair->flags & DR_ALIAS_MIXED_STEPS) + dump_printf (MSG_NOTE, " MIXED_STEPS"); + if (alias_pair->flags == 0) + dump_printf (MSG_NOTE, " <none>"); + dump_printf (MSG_NOTE, "\n"); +} + /* Merge alias checks recorded in ALIAS_PAIRS and remove redundant ones. FACTOR is number of iterations that each data reference is accessed. @@ -1487,19 +1535,50 @@ void prune_runtime_alias_test_list (vec<dr_with_seg_len_pair_t> *alias_pairs, poly_uint64) { + if (alias_pairs->is_empty ()) + return; + + /* Canonicalize each pair so that the base components are ordered wrt + data_ref_compare_tree. This allows the loop below to merge more + cases. */ + unsigned int i; + dr_with_seg_len_pair_t *alias_pair; + FOR_EACH_VEC_ELT (*alias_pairs, i, alias_pair) + { + data_reference_p dr_a = alias_pair->first.dr; + data_reference_p dr_b = alias_pair->second.dr; + int comp_res = data_ref_compare_tree (DR_BASE_ADDRESS (dr_a), + DR_BASE_ADDRESS (dr_b)); + if (comp_res == 0) + comp_res = data_ref_compare_tree (DR_OFFSET (dr_a), DR_OFFSET (dr_b)); + if (comp_res == 0) + comp_res = data_ref_compare_tree (DR_INIT (dr_a), DR_INIT (dr_b)); + if (comp_res > 0) + { + std::swap (alias_pair->first, alias_pair->second); + alias_pair->flags |= DR_ALIAS_SWAPPED; + } + else + alias_pair->flags |= DR_ALIAS_UNSWAPPED; + } + /* Sort the collected data ref pairs so that we can scan them once to combine all possible aliasing checks. */ alias_pairs->qsort (comp_dr_with_seg_len_pair); /* Scan the sorted dr pairs and check if we can combine alias checks of two neighboring dr pairs. */ - for (size_t i = 1; i < alias_pairs->length (); ++i) + unsigned int last = 0; + for (i = 1; i < alias_pairs->length (); ++i) { /* Deal with two ddrs (dr_a1, dr_b1) and (dr_a2, dr_b2). */ - dr_with_seg_len *dr_a1 = &(*alias_pairs)[i-1].first, - *dr_b1 = &(*alias_pairs)[i-1].second, - *dr_a2 = &(*alias_pairs)[i].first, - *dr_b2 = &(*alias_pairs)[i].second; + dr_with_seg_len_pair_t *alias_pair1 = &(*alias_pairs)[last]; + dr_with_seg_len_pair_t *alias_pair2 = &(*alias_pairs)[i]; + + dr_with_seg_len *dr_a1 = &alias_pair1->first; + dr_with_seg_len *dr_b1 = &alias_pair1->second; + dr_with_seg_len *dr_a2 = &alias_pair2->first; + dr_with_seg_len *dr_b2 = &alias_pair2->second; /* Remove duplicate data ref pairs. */ if (*dr_a1 == *dr_a2 && *dr_b1 == *dr_b2) @@ -1508,10 +1587,16 @@ prune_runtime_alias_test_list (vec<dr_with_seg_len_pair_t> *alias_pairs, dump_printf (MSG_NOTE, "found equal ranges %T, %T and %T, %T\n", DR_REF (dr_a1->dr), DR_REF (dr_b1->dr), DR_REF (dr_a2->dr), DR_REF (dr_b2->dr)); - alias_pairs->ordered_remove (i--); + alias_pair1->flags |= alias_pair2->flags; continue; } + /* Assume that we won't be able to merge the pairs, then correct + if we do. */ + last += 1; + if (last != i) + (*alias_pairs)[last] = (*alias_pairs)[i]; + if (*dr_a1 == *dr_a2 || *dr_b1 == *dr_b2) { /* We consider the case that DR_B1 and DR_B2 are same memrefs, @@ -1537,13 +1622,6 @@ prune_runtime_alias_test_list (vec<dr_with_seg_len_pair_t> *alias_pairs, if (!ordered_p (init_a1, init_a2)) continue; - /* Make sure dr_a1 starts left of dr_a2. */ - if (maybe_gt (init_a1, init_a2)) - { - std::swap (*dr_a1, *dr_a2); - std::swap (init_a1, init_a2); - } - /* Work out what the segment length would be if we did combine DR_A1 and DR_A2: @@ -1560,7 +1638,10 @@ prune_runtime_alias_test_list (vec<dr_with_seg_len_pair_t> *alias_pairs, The lengths both have sizetype, so the sign is taken from the step instead. */ - if (!operand_equal_p (dr_a1->seg_len, dr_a2->seg_len, 0)) + poly_uint64 new_seg_len = 0; + bool new_seg_len_p = !operand_equal_p (dr_a1->seg_len, + dr_a2->seg_len, 0); + if (new_seg_len_p) { poly_uint64 seg_len_a1, seg_len_a2; if (!poly_int_tree_p (dr_a1->seg_len, &seg_len_a1) @@ -1578,14 +1659,29 @@ prune_runtime_alias_test_list (vec<dr_with_seg_len_pair_t> *alias_pairs, int sign_a = tree_int_cst_sgn (indicator_a); int sign_b = tree_int_cst_sgn (indicator_b); - poly_uint64 new_seg_len; if (sign_a <= 0 && sign_b <= 0) new_seg_len = lower_bound (seg_len_a1, seg_len_a2); else if (sign_a >= 0 && sign_b >= 0) new_seg_len = upper_bound (seg_len_a1, seg_len_a2); else continue; + } + /* At this point we're committed to merging the refs. */ + + /* Make sure dr_a1 starts left of dr_a2. */ + if (maybe_gt (init_a1, init_a2)) + { + std::swap (*dr_a1, *dr_a2); + std::swap (init_a1, init_a2); + } + + /* The DR_Bs are equal, so only the DR_As can introduce + mixed steps. */ + if (!operand_equal_p (DR_STEP (dr_a1->dr), DR_STEP (dr_a2->dr), 0)) + alias_pair1->flags |= DR_ALIAS_MIXED_STEPS; + if (new_seg_len_p) + { dr_a1->seg_len = build_int_cst (TREE_TYPE (dr_a1->seg_len), new_seg_len); dr_a1->align = MIN (dr_a1->align, known_alignment (new_seg_len)); @@ -1607,17 +1703,114 @@ prune_runtime_alias_test_list (vec<dr_with_seg_len_pair_t> *alias_pairs, dump_printf (MSG_NOTE, "merging ranges for %T, %T and %T, %T\n", DR_REF (dr_a1->dr), DR_REF (dr_b1->dr), DR_REF (dr_a2->dr), DR_REF (dr_b2->dr)); - alias_pairs->ordered_remove (i); - i--; + alias_pair1->flags |= alias_pair2->flags; + last -= 1; } } + alias_pairs->truncate (last + 1); + + /* Try to restore the original dr_with_seg_len order within each + dr_with_seg_len_pair_t. If we ended up combining swapped and + unswapped pairs into the same check, we have to invalidate any + RAW, WAR and WAW information for it. */ + if (dump_enabled_p ()) + dump_printf (MSG_NOTE, "merged alias checks:\n"); + FOR_EACH_VEC_ELT (*alias_pairs, i, alias_pair) + { + unsigned int swap_mask = (DR_ALIAS_SWAPPED | DR_ALIAS_UNSWAPPED); + unsigned int swapped = (alias_pair->flags & swap_mask); + if (swapped == DR_ALIAS_SWAPPED) + std::swap (alias_pair->first, alias_pair->second); + else if (swapped != DR_ALIAS_UNSWAPPED) + alias_pair->flags |= DR_ALIAS_ARBITRARY; + alias_pair->flags &= ~swap_mask; + if (dump_enabled_p ()) + dump_alias_pair (alias_pair, " "); + } +} + +/* A subroutine of create_intersect_range_checks, with a subset of the + same arguments. Try to use IFN_CHECK_RAW_PTRS and IFN_CHECK_WAR_PTRS + to optimize cases in which the references form a simple RAW, WAR or + WAR dependence. */ + +static bool +create_ifn_alias_checks (tree *cond_expr, + const dr_with_seg_len_pair_t &alias_pair) +{ + const dr_with_seg_len& dr_a = alias_pair.first; + const dr_with_seg_len& dr_b = alias_pair.second; + + /* Check for cases in which: + + (a) we have a known RAW, WAR or WAR dependence + (b) the accesses are well-ordered in both the original and new code + (see the comment above the DR_ALIAS_* flags for details); and + (c) the DR_STEPs describe all access pairs covered by ALIAS_PAIR. */ + if (alias_pair.flags & ~(DR_ALIAS_RAW | DR_ALIAS_WAR | DR_ALIAS_WAW)) + return false; + + /* Make sure that both DRs access the same pattern of bytes, + with a constant length and and step. */ + poly_uint64 seg_len; + if (!operand_equal_p (dr_a.seg_len, dr_b.seg_len, 0) + || !poly_int_tree_p (dr_a.seg_len, &seg_len) + || maybe_ne (dr_a.access_size, dr_b.access_size) + || !operand_equal_p (DR_STEP (dr_a.dr), DR_STEP (dr_b.dr), 0) + || !tree_fits_uhwi_p (DR_STEP (dr_a.dr))) + return false; + + unsigned HOST_WIDE_INT bytes = tree_to_uhwi (DR_STEP (dr_a.dr)); + tree addr_a = DR_BASE_ADDRESS (dr_a.dr); + tree addr_b = DR_BASE_ADDRESS (dr_b.dr); + + /* See whether the target suports what we want to do. WAW checks are + equivalent to WAR checks here. */ + internal_fn ifn = (alias_pair.flags & DR_ALIAS_RAW + ? IFN_CHECK_RAW_PTRS + : IFN_CHECK_WAR_PTRS); + unsigned int align = MIN (dr_a.align, dr_b.align); + poly_uint64 full_length = seg_len + bytes; + if (!internal_check_ptrs_fn_supported_p (ifn, TREE_TYPE (addr_a), + full_length, align)) + { + full_length = seg_len + dr_a.access_size; + if (!internal_check_ptrs_fn_supported_p (ifn, TREE_TYPE (addr_a), + full_length, align)) + return false; + } + + /* Commit to using this form of test. */ + addr_a = fold_build_pointer_plus (addr_a, DR_OFFSET (dr_a.dr)); + addr_a = fold_build_pointer_plus (addr_a, DR_INIT (dr_a.dr)); + + addr_b = fold_build_pointer_plus (addr_b, DR_OFFSET (dr_b.dr)); + addr_b = fold_build_pointer_plus (addr_b, DR_INIT (dr_b.dr)); + + *cond_expr = build_call_expr_internal_loc (UNKNOWN_LOCATION, + ifn, boolean_type_node, + 4, addr_a, addr_b, + size_int (full_length), + size_int (align)); + + if (dump_enabled_p ()) + { + if (ifn == IFN_CHECK_RAW_PTRS) + dump_printf (MSG_NOTE, "using an IFN_CHECK_RAW_PTRS test\n"); + else + dump_printf (MSG_NOTE, "using an IFN_CHECK_WAR_PTRS test\n"); + } + return true; } -/* Given LOOP's two data references and segment lengths described by DR_A - and DR_B, create expression checking if the two addresses ranges intersect - with each other based on index of the two addresses. This can only be - done if DR_A and DR_B referring to the same (array) object and the index - is the only difference. For example: +/* Try to generate a runtime condition that is true if ALIAS_PAIR is + free of aliases, using a condition based on index values instead + of a condition based on addresses. Return true on success, + storing the condition in *COND_EXPR. + + This can only be done if the two data references in ALIAS_PAIR access + the same array object and the index is the only difference. For example, + if the two data references are DR_A and DR_B: DR_A DR_B data-ref arr[i] arr[j] @@ -1634,16 +1827,20 @@ prune_runtime_alias_test_list (vec<dr_with_seg_len_pair_t> *alias_pairs, We can create expression based on index rather than address: - (i_0 + 4 < j_0 || j_0 + 4 < i_0) + (unsigned) (i_0 - j_0 + 3) <= 6 + + i.e. the indices are less than 4 apart. Note evolution step of index needs to be considered in comparison. */ static bool create_intersect_range_checks_index (class loop *loop, tree *cond_expr, - const dr_with_seg_len& dr_a, - const dr_with_seg_len& dr_b) + const dr_with_seg_len_pair_t &alias_pair) { - if (integer_zerop (DR_STEP (dr_a.dr)) + const dr_with_seg_len &dr_a = alias_pair.first; + const dr_with_seg_len &dr_b = alias_pair.second; + if ((alias_pair.flags & DR_ALIAS_MIXED_STEPS) + || integer_zerop (DR_STEP (dr_a.dr)) || integer_zerop (DR_STEP (dr_b.dr)) || DR_NUM_DIMENSIONS (dr_a.dr) != DR_NUM_DIMENSIONS (dr_b.dr)) return false; @@ -1669,15 +1866,8 @@ create_intersect_range_checks_index (class loop *loop, tree *cond_expr, if (neg_step) { abs_step = -abs_step; - seg_len1 = -seg_len1; - seg_len2 = -seg_len2; - } - else - { - /* Include the access size in the length, so that we only have one - tree addition below. */ - seg_len1 += dr_a.access_size; - seg_len2 += dr_b.access_size; + seg_len1 = (-wi::to_poly_wide (dr_a.seg_len)).force_uhwi (); + seg_len2 = (-wi::to_poly_wide (dr_b.seg_len)).force_uhwi (); } /* Infer the number of iterations with which the memory segment is accessed @@ -1691,16 +1881,15 @@ create_intersect_range_checks_index (class loop *loop, tree *cond_expr, || !can_div_trunc_p (seg_len2 + abs_step - 1, abs_step, &niter_len2)) return false; - poly_uint64 niter_access1 = 0, niter_access2 = 0; - if (neg_step) - { - /* Divide each access size by the byte step, rounding up. */ - if (!can_div_trunc_p (dr_a.access_size - abs_step - 1, - abs_step, &niter_access1) - || !can_div_trunc_p (dr_b.access_size + abs_step - 1, - abs_step, &niter_access2)) - return false; - } + /* Divide each access size by the byte step, rounding up. */ + poly_uint64 niter_access1, niter_access2; + if (!can_div_trunc_p (dr_a.access_size + abs_step - 1, + abs_step, &niter_access1) + || !can_div_trunc_p (dr_b.access_size + abs_step - 1, + abs_step, &niter_access2)) + return false; + + bool waw_or_war_p = (alias_pair.flags & ~(DR_ALIAS_WAR | DR_ALIAS_WAW)) == 0; unsigned int i; for (i = 0; i < DR_NUM_DIMENSIONS (dr_a.dr); i++) @@ -1740,44 +1929,298 @@ create_intersect_range_checks_index (class loop *loop, tree *cond_expr, index of data reference. Like segment length, index length is linear function of the number of iterations with index_step as the coefficient, i.e, niter_len * idx_step. */ - tree idx_len1 = fold_build2 (MULT_EXPR, TREE_TYPE (min1), idx_step, - build_int_cst (TREE_TYPE (min1), - niter_len1)); - tree idx_len2 = fold_build2 (MULT_EXPR, TREE_TYPE (min2), idx_step, - build_int_cst (TREE_TYPE (min2), - niter_len2)); - tree max1 = fold_build2 (PLUS_EXPR, TREE_TYPE (min1), min1, idx_len1); - tree max2 = fold_build2 (PLUS_EXPR, TREE_TYPE (min2), min2, idx_len2); - /* Adjust ranges for negative step. */ + offset_int abs_idx_step = offset_int::from (wi::to_wide (idx_step), + SIGNED); if (neg_step) - { - /* IDX_LEN1 and IDX_LEN2 are negative in this case. */ - std::swap (min1, max1); - std::swap (min2, max2); - - /* As with the lengths just calculated, we've measured the access - sizes in iterations, so multiply them by the index step. */ - tree idx_access1 - = fold_build2 (MULT_EXPR, TREE_TYPE (min1), idx_step, - build_int_cst (TREE_TYPE (min1), niter_access1)); - tree idx_access2 - = fold_build2 (MULT_EXPR, TREE_TYPE (min2), idx_step, - build_int_cst (TREE_TYPE (min2), niter_access2)); - - /* MINUS_EXPR because the above values are negative. */ - max1 = fold_build2 (MINUS_EXPR, TREE_TYPE (max1), max1, idx_access1); - max2 = fold_build2 (MINUS_EXPR, TREE_TYPE (max2), max2, idx_access2); - } - tree part_cond_expr - = fold_build2 (TRUTH_OR_EXPR, boolean_type_node, - fold_build2 (LE_EXPR, boolean_type_node, max1, min2), - fold_build2 (LE_EXPR, boolean_type_node, max2, min1)); + abs_idx_step = -abs_idx_step; + poly_offset_int idx_len1 = abs_idx_step * niter_len1; + poly_offset_int idx_len2 = abs_idx_step * niter_len2; + poly_offset_int idx_access1 = abs_idx_step * niter_access1; + poly_offset_int idx_access2 = abs_idx_step * niter_access2; + + gcc_assert (known_ge (idx_len1, 0) + && known_ge (idx_len2, 0) + && known_ge (idx_access1, 0) + && known_ge (idx_access2, 0)); + + /* Each access has the following pattern, with lengths measured + in units of INDEX: + + <-- idx_len --> + <--- A: -ve step ---> + +-----+-------+-----+-------+-----+ + | n-1 | ..... | 0 | ..... | n-1 | + +-----+-------+-----+-------+-----+ + <--- B: +ve step ---> + <-- idx_len --> + | + min + + where "n" is the number of scalar iterations covered by the segment + and where each access spans idx_access units. + + A is the range of bytes accessed when the step is negative, + B is the range when the step is positive. + + When checking for general overlap, we need to test whether + the range: + + [min1 + low_offset1, min2 + high_offset1 + idx_access1 - 1] + + overlaps: + + [min2 + low_offset2, min2 + high_offset2 + idx_access2 - 1] + + where: + + low_offsetN = +ve step ? 0 : -idx_lenN; + high_offsetN = +ve step ? idx_lenN : 0; + + This is equivalent to testing whether: + + min1 + low_offset1 <= min2 + high_offset2 + idx_access2 - 1 + && min2 + low_offset2 <= min1 + high_offset1 + idx_access1 - 1 + + Converting this into a single test, there is an overlap if: + + 0 <= min2 - min1 + bias <= limit + + where bias = high_offset2 + idx_access2 - 1 - low_offset1 + limit = (high_offset1 - low_offset1 + idx_access1 - 1) + + (high_offset2 - low_offset2 + idx_access2 - 1) + i.e. limit = idx_len1 + idx_access1 - 1 + idx_len2 + idx_access2 - 1 + + Combining the tests requires limit to be computable in an unsigned + form of the index type; if it isn't, we fall back to the usual + pointer-based checks. + + We can do better if DR_B is a write and if DR_A and DR_B are + well-ordered in both the original and the new code (see the + comment above the DR_ALIAS_* flags for details). In this case + we know that for each i in [0, n-1], the write performed by + access i of DR_B occurs after access numbers j<=i of DR_A in + both the original and the new code. Any write or anti + dependencies wrt those DR_A accesses are therefore maintained. + + We just need to make sure that each individual write in DR_B does not + overlap any higher-indexed access in DR_A; such DR_A accesses happen + after the DR_B access in the original code but happen before it in + the new code. + + We know the steps for both accesses are equal, so by induction, we + just need to test whether the first write of DR_B overlaps a later + access of DR_A. In other words, we need to move min1 along by + one iteration: + + min1' = min1 + idx_step + + and use the ranges: + + [min1' + low_offset1', min1' + high_offset1' + idx_access1 - 1] + + and: + + [min2, min2 + idx_access2 - 1] + + where: + + low_offset1' = +ve step ? 0 : -(idx_len1 - |idx_step|) + high_offset1' = +ve_step ? idx_len1 - |idx_step| : 0. */ + if (waw_or_war_p) + idx_len1 -= abs_idx_step; + + poly_offset_int limit = idx_len1 + idx_access1 - 1 + idx_access2 - 1; + if (!waw_or_war_p) + limit += idx_len2; + + tree utype = unsigned_type_for (TREE_TYPE (min1)); + if (!wi::fits_to_tree_p (limit, utype)) + return false; + + poly_offset_int low_offset1 = neg_step ? -idx_len1 : 0; + poly_offset_int high_offset2 = neg_step || waw_or_war_p ? 0 : idx_len2; + poly_offset_int bias = high_offset2 + idx_access2 - 1 - low_offset1; + /* Equivalent to adding IDX_STEP to MIN1. */ + if (waw_or_war_p) + bias -= wi::to_offset (idx_step); + + tree subject = fold_build2 (MINUS_EXPR, utype, + fold_convert (utype, min2), + fold_convert (utype, min1)); + subject = fold_build2 (PLUS_EXPR, utype, subject, + wide_int_to_tree (utype, bias)); + tree part_cond_expr = fold_build2 (GT_EXPR, boolean_type_node, subject, + wide_int_to_tree (utype, limit)); if (*cond_expr) *cond_expr = fold_build2 (TRUTH_AND_EXPR, boolean_type_node, *cond_expr, part_cond_expr); else *cond_expr = part_cond_expr; } + if (dump_enabled_p ()) + { + if (waw_or_war_p) + dump_printf (MSG_NOTE, "using an index-based WAR/WAW test\n"); + else + dump_printf (MSG_NOTE, "using an index-based overlap test\n"); + } + return true; +} + +/* A subroutine of create_intersect_range_checks, with a subset of the + same arguments. Try to optimize cases in which the second access + is a write and in which some overlap is valid. */ + +static bool +create_waw_or_war_checks (tree *cond_expr, + const dr_with_seg_len_pair_t &alias_pair) +{ + const dr_with_seg_len& dr_a = alias_pair.first; + const dr_with_seg_len& dr_b = alias_pair.second; + + /* Check for cases in which: + + (a) DR_B is always a write; + (b) the accesses are well-ordered in both the original and new code + (see the comment above the DR_ALIAS_* flags for details); and + (c) the DR_STEPs describe all access pairs covered by ALIAS_PAIR. */ + if (alias_pair.flags & ~(DR_ALIAS_WAR | DR_ALIAS_WAW)) + return false; + + /* Check for equal (but possibly variable) steps. */ + tree step = DR_STEP (dr_a.dr); + if (!operand_equal_p (step, DR_STEP (dr_b.dr))) + return false; + + /* Make sure that we can operate on sizetype without loss of precision. */ + tree addr_type = TREE_TYPE (DR_BASE_ADDRESS (dr_a.dr)); + if (TYPE_PRECISION (addr_type) != TYPE_PRECISION (sizetype)) + return false; + + /* All addresses involved are known to have a common alignment ALIGN. + We can therefore subtract ALIGN from an exclusive endpoint to get + an inclusive endpoint. In the best (and common) case, ALIGN is the + same as the access sizes of both DRs, and so subtracting ALIGN + cancels out the addition of an access size. */ + unsigned int align = MIN (dr_a.align, dr_b.align); + poly_uint64 last_chunk_a = dr_a.access_size - align; + poly_uint64 last_chunk_b = dr_b.access_size - align; + + /* Get a boolean expression that is true when the step is negative. */ + tree indicator = dr_direction_indicator (dr_a.dr); + tree neg_step = fold_build2 (LT_EXPR, boolean_type_node, + fold_convert (ssizetype, indicator), + ssize_int (0)); + + /* Get lengths in sizetype. */ + tree seg_len_a + = fold_convert (sizetype, rewrite_to_non_trapping_overflow (dr_a.seg_len)); + step = fold_convert (sizetype, rewrite_to_non_trapping_overflow (step)); + + /* Each access has the following pattern: + + <- |seg_len| -> + <--- A: -ve step ---> + +-----+-------+-----+-------+-----+ + | n-1 | ..... | 0 | ..... | n-1 | + +-----+-------+-----+-------+-----+ + <--- B: +ve step ---> + <- |seg_len| -> + | + base address + + where "n" is the number of scalar iterations covered by the segment. + + A is the range of bytes accessed when the step is negative, + B is the range when the step is positive. + + We know that DR_B is a write. We also know (from checking that + DR_A and DR_B are well-ordered) that for each i in [0, n-1], + the write performed by access i of DR_B occurs after access numbers + j<=i of DR_A in both the original and the new code. Any write or + anti dependencies wrt those DR_A accesses are therefore maintained. + + We just need to make sure that each individual write in DR_B does not + overlap any higher-indexed access in DR_A; such DR_A accesses happen + after the DR_B access in the original code but happen before it in + the new code. + + We know the steps for both accesses are equal, so by induction, we + just need to test whether the first write of DR_B overlaps a later + access of DR_A. In other words, we need to move addr_a along by + one iteration: + + addr_a' = addr_a + step + + and check whether: + + [addr_b, addr_b + last_chunk_b] + + overlaps: + + [addr_a' + low_offset_a, addr_a' + high_offset_a + last_chunk_a] + + where [low_offset_a, high_offset_a] spans accesses [1, n-1]. I.e.: + + low_offset_a = +ve step ? 0 : seg_len_a - step + high_offset_a = +ve step ? seg_len_a - step : 0 + + This is equivalent to testing whether: + + addr_a' + low_offset_a <= addr_b + last_chunk_b + && addr_b <= addr_a' + high_offset_a + last_chunk_a + + Converting this into a single test, there is an overlap if: + + 0 <= addr_b + last_chunk_b - addr_a' - low_offset_a <= limit + + where limit = high_offset_a - low_offset_a + last_chunk_a + last_chunk_b + + If DR_A is performed, limit + |step| - last_chunk_b is known to be + less than the size of the object underlying DR_A. We also know + that last_chunk_b <= |step|; this is checked elsewhere if it isn't + guaranteed at compile time. There can therefore be no overflow if + "limit" is calculated in an unsigned type with pointer precision. */ + tree addr_a = fold_build_pointer_plus (DR_BASE_ADDRESS (dr_a.dr), + DR_OFFSET (dr_a.dr)); + addr_a = fold_build_pointer_plus (addr_a, DR_INIT (dr_a.dr)); + + tree addr_b = fold_build_pointer_plus (DR_BASE_ADDRESS (dr_b.dr), + DR_OFFSET (dr_b.dr)); + addr_b = fold_build_pointer_plus (addr_b, DR_INIT (dr_b.dr)); + + /* Advance ADDR_A by one iteration and adjust the length to compensate. */ + addr_a = fold_build_pointer_plus (addr_a, step); + tree seg_len_a_minus_step = fold_build2 (MINUS_EXPR, sizetype, + seg_len_a, step); + if (!CONSTANT_CLASS_P (seg_len_a_minus_step)) + seg_len_a_minus_step = build1 (SAVE_EXPR, sizetype, seg_len_a_minus_step); + + tree low_offset_a = fold_build3 (COND_EXPR, sizetype, neg_step, + seg_len_a_minus_step, size_zero_node); + if (!CONSTANT_CLASS_P (low_offset_a)) + low_offset_a = build1 (SAVE_EXPR, sizetype, low_offset_a); + + /* We could use COND_EXPR <neg_step, size_zero_node, seg_len_a_minus_step>, + but it's usually more efficient to reuse the LOW_OFFSET_A result. */ + tree high_offset_a = fold_build2 (MINUS_EXPR, sizetype, seg_len_a_minus_step, + low_offset_a); + + /* The amount added to addr_b - addr_a'. */ + tree bias = fold_build2 (MINUS_EXPR, sizetype, + size_int (last_chunk_b), low_offset_a); + + tree limit = fold_build2 (MINUS_EXPR, sizetype, high_offset_a, low_offset_a); + limit = fold_build2 (PLUS_EXPR, sizetype, limit, + size_int (last_chunk_a + last_chunk_b)); + + tree subject = fold_build2 (POINTER_DIFF_EXPR, ssizetype, addr_b, addr_a); + subject = fold_build2 (PLUS_EXPR, sizetype, + fold_convert (sizetype, subject), bias); + + *cond_expr = fold_build2 (GT_EXPR, boolean_type_node, subject, limit); + if (dump_enabled_p ()) + dump_printf (MSG_NOTE, "using an address-based WAR/WAW test\n"); return true; } @@ -1865,24 +2308,32 @@ get_segment_min_max (const dr_with_seg_len &d, tree *seg_min_out, *seg_max_out = fold_build_pointer_plus (addr_base, max_reach); } -/* Given two data references and segment lengths described by DR_A and DR_B, - create expression checking if the two addresses ranges intersect with - each other: +/* Generate a runtime condition that is true if ALIAS_PAIR is free of aliases, + storing the condition in *COND_EXPR. The fallback is to generate a + a test that the two accesses do not overlap: - ((DR_A_addr_0 + DR_A_segment_length_0) <= DR_B_addr_0) - || (DR_B_addr_0 + DER_B_segment_length_0) <= DR_A_addr_0)) */ + end_a <= start_b || end_b <= start_a. */ static void create_intersect_range_checks (class loop *loop, tree *cond_expr, - const dr_with_seg_len& dr_a, - const dr_with_seg_len& dr_b) + const dr_with_seg_len_pair_t &alias_pair) { + const dr_with_seg_len& dr_a = alias_pair.first; + const dr_with_seg_len& dr_b = alias_pair.second; *cond_expr = NULL_TREE; - if (create_intersect_range_checks_index (loop, cond_expr, dr_a, dr_b)) + if (create_intersect_range_checks_index (loop, cond_expr, alias_pair)) + return; + + if (create_ifn_alias_checks (cond_expr, alias_pair)) + return; + + if (create_waw_or_war_checks (cond_expr, alias_pair)) return; unsigned HOST_WIDE_INT min_align; tree_code cmp_code; + /* We don't have to check DR_ALIAS_MIXED_STEPS here, since both versions + are equivalent. This is just an optimization heuristic. */ if (TREE_CODE (DR_STEP (dr_a.dr)) == INTEGER_CST && TREE_CODE (DR_STEP (dr_b.dr)) == INTEGER_CST) { @@ -1923,6 +2374,8 @@ create_intersect_range_checks (class loop *loop, tree *cond_expr, = fold_build2 (TRUTH_OR_EXPR, boolean_type_node, fold_build2 (cmp_code, boolean_type_node, seg_a_max, seg_b_min), fold_build2 (cmp_code, boolean_type_node, seg_b_max, seg_a_min)); + if (dump_enabled_p ()) + dump_printf (MSG_NOTE, "using an address-based overlap test\n"); } /* Create a conditional expression that represents the run-time checks for @@ -1939,18 +2392,19 @@ create_runtime_alias_checks (class loop *loop, tree part_cond_expr; fold_defer_overflow_warnings (); - for (size_t i = 0, s = alias_pairs->length (); i < s; ++i) + dr_with_seg_len_pair_t *alias_pair; + unsigned int i; + FOR_EACH_VEC_ELT (*alias_pairs, i, alias_pair) { - const dr_with_seg_len& dr_a = (*alias_pairs)[i].first; - const dr_with_seg_len& dr_b = (*alias_pairs)[i].second; - + gcc_assert (alias_pair->flags); if (dump_enabled_p ()) dump_printf (MSG_NOTE, "create runtime check for data references %T and %T\n", - DR_REF (dr_a.dr), DR_REF (dr_b.dr)); + DR_REF (alias_pair->first.dr), + DR_REF (alias_pair->second.dr)); /* Create condition expression for each pair data references. */ - create_intersect_range_checks (loop, &part_cond_expr, dr_a, dr_b); + create_intersect_range_checks (loop, &part_cond_expr, *alias_pair); if (*cond_expr) *cond_expr = fold_build2 (TRUTH_AND_EXPR, boolean_type_node, *cond_expr, part_cond_expr); @@ -4917,7 +5371,7 @@ compute_all_dependences (vec<data_reference_p> datarefs, unsigned int i, j; if ((int) datarefs.length () - > PARAM_VALUE (PARAM_LOOP_MAX_DATAREFS_FOR_DATADEPS)) + > param_loop_max_datarefs_for_datadeps) { struct data_dependence_relation *ddr; |