diff options
Diffstat (limited to 'gcc/tree-vect-stmts.cc')
-rw-r--r-- | gcc/tree-vect-stmts.cc | 4209 |
1 files changed, 1806 insertions, 2403 deletions
diff --git a/gcc/tree-vect-stmts.cc b/gcc/tree-vect-stmts.cc index 7f87435..69f5f67 100644 --- a/gcc/tree-vect-stmts.cc +++ b/gcc/tree-vect-stmts.cc @@ -130,7 +130,8 @@ record_stmt_cost (stmt_vector_for_cost *body_cost_vec, int count, tree vectype, int misalign, enum vect_cost_model_location where) { - return record_stmt_cost (body_cost_vec, count, kind, NULL, node, + return record_stmt_cost (body_cost_vec, count, kind, + SLP_TREE_REPRESENTATIVE (node), node, vectype, misalign, where); } @@ -419,18 +420,22 @@ vect_stmt_relevant_p (stmt_vec_info stmt_info, loop_vec_info loop_vinfo, } } - /* Check if it's an induction and multiple exits. In this case there will be - a usage later on after peeling which is needed for the alternate exit. */ + /* Check if it's a not live PHI and multiple exits. In this case + there will be a usage later on after peeling which is needed for the + alternate exit. */ if (LOOP_VINFO_EARLY_BREAKS (loop_vinfo) - && STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def) + && is_a <gphi *> (stmt) + && gimple_bb (stmt) == LOOP_VINFO_LOOP (loop_vinfo)->header + && ((! VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info)) + && ! *live_p) + || STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def)) { if (dump_enabled_p ()) - dump_printf_loc (MSG_NOTE, vect_location, - "vec_stmt_relevant_p: induction forced for " - "early break.\n"); + dump_printf_loc (MSG_NOTE, vect_location, + "vec_stmt_relevant_p: PHI forced live for " + "early break.\n"); LOOP_VINFO_EARLY_BREAKS_LIVE_IVS (loop_vinfo).safe_push (stmt_info); *live_p = true; - } if (*live_p && *relevant == vect_unused_in_scope @@ -714,6 +719,8 @@ vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo, bool *fatal) bb = bbs[i]; for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si)) { + if (virtual_operand_p (gimple_phi_result (gsi_stmt (si)))) + continue; stmt_vec_info phi_info = loop_vinfo->lookup_stmt (gsi_stmt (si)); if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "init: phi relevant? %G", @@ -899,11 +906,8 @@ vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo, bool *fatal) be generated for the single vector op. We will handle that shortly. */ static void -vect_model_simple_cost (vec_info *, - stmt_vec_info stmt_info, int ncopies, - enum vect_def_type *dt, - int ndts, - slp_tree node, +vect_model_simple_cost (vec_info *, int ncopies, enum vect_def_type *dt, + int ndts, slp_tree node, stmt_vector_for_cost *cost_vec, vect_cost_for_stmt kind = vector_stmt) { @@ -922,11 +926,11 @@ vect_model_simple_cost (vec_info *, for (int i = 0; i < ndts; i++) if (dt[i] == vect_constant_def || dt[i] == vect_external_def) prologue_cost += record_stmt_cost (cost_vec, 1, scalar_to_vec, - stmt_info, 0, vect_prologue); + node, 0, vect_prologue); /* Pass the inside-of-loop statements to the target-specific cost model. */ inside_cost += record_stmt_cost (cost_vec, ncopies, kind, - stmt_info, 0, vect_body); + node, 0, vect_body); if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, @@ -1013,13 +1017,15 @@ vect_get_store_cost (vec_info *, stmt_vec_info stmt_info, slp_tree slp_node, unsigned int *inside_cost, stmt_vector_for_cost *body_cost_vec) { + tree vectype + = slp_node ? SLP_TREE_VECTYPE (slp_node) : STMT_VINFO_VECTYPE (stmt_info); switch (alignment_support_scheme) { case dr_aligned: { *inside_cost += record_stmt_cost (body_cost_vec, ncopies, - vector_store, stmt_info, slp_node, 0, - vect_body); + vector_store, stmt_info, slp_node, + vectype, 0, vect_body); if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, @@ -1032,7 +1038,7 @@ vect_get_store_cost (vec_info *, stmt_vec_info stmt_info, slp_tree slp_node, /* Here, we assign an additional cost for the unaligned store. */ *inside_cost += record_stmt_cost (body_cost_vec, ncopies, unaligned_store, stmt_info, slp_node, - misalignment, vect_body); + vectype, misalignment, vect_body); if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "vect_model_store_cost: unaligned supported by " @@ -1066,12 +1072,15 @@ vect_get_load_cost (vec_info *, stmt_vec_info stmt_info, slp_tree slp_node, stmt_vector_for_cost *body_cost_vec, bool record_prologue_costs) { + tree vectype + = slp_node ? SLP_TREE_VECTYPE (slp_node) : STMT_VINFO_VECTYPE (stmt_info); switch (alignment_support_scheme) { case dr_aligned: { *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vector_load, - stmt_info, slp_node, 0, vect_body); + stmt_info, slp_node, vectype, + 0, vect_body); if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, @@ -1084,7 +1093,7 @@ vect_get_load_cost (vec_info *, stmt_vec_info stmt_info, slp_tree slp_node, /* Here, we assign an additional cost for the unaligned load. */ *inside_cost += record_stmt_cost (body_cost_vec, ncopies, unaligned_load, stmt_info, slp_node, - misalignment, vect_body); + vectype, misalignment, vect_body); if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, @@ -1096,18 +1105,19 @@ vect_get_load_cost (vec_info *, stmt_vec_info stmt_info, slp_tree slp_node, case dr_explicit_realign: { *inside_cost += record_stmt_cost (body_cost_vec, ncopies * 2, - vector_load, stmt_info, slp_node, 0, - vect_body); + vector_load, stmt_info, slp_node, + vectype, 0, vect_body); *inside_cost += record_stmt_cost (body_cost_vec, ncopies, - vec_perm, stmt_info, slp_node, 0, - vect_body); + vec_perm, stmt_info, slp_node, + vectype, 0, vect_body); /* FIXME: If the misalignment remains fixed across the iterations of the containing loop, the following cost should be added to the prologue costs. */ if (targetm.vectorize.builtin_mask_for_load) *inside_cost += record_stmt_cost (body_cost_vec, 1, vector_stmt, - stmt_info, slp_node, 0, vect_body); + stmt_info, slp_node, vectype, + 0, vect_body); if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, @@ -1133,17 +1143,21 @@ vect_get_load_cost (vec_info *, stmt_vec_info stmt_info, slp_tree slp_node, { *prologue_cost += record_stmt_cost (prologue_cost_vec, 2, vector_stmt, stmt_info, - slp_node, 0, vect_prologue); + slp_node, vectype, + 0, vect_prologue); if (targetm.vectorize.builtin_mask_for_load) *prologue_cost += record_stmt_cost (prologue_cost_vec, 1, vector_stmt, stmt_info, - slp_node, 0, vect_prologue); + slp_node, vectype, + 0, vect_prologue); } *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vector_load, - stmt_info, slp_node, 0, vect_body); + stmt_info, slp_node, vectype, + 0, vect_body); *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vec_perm, - stmt_info, slp_node, 0, vect_body); + stmt_info, slp_node, vectype, + 0, vect_body); if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, @@ -2083,302 +2097,246 @@ get_group_load_store_type (vec_info *vinfo, stmt_vec_info stmt_info, known at compile time. */ gcc_assert (!STMT_VINFO_STRIDED_P (first_stmt_info) || gap == 0); - /* Stores can't yet have gaps. */ - gcc_assert (slp_node || vls_type == VLS_LOAD || gap == 0); - - if (slp_node) + /* For SLP vectorization we directly vectorize a subchain + without permutation. */ + if (! SLP_TREE_LOAD_PERMUTATION (slp_node).exists ()) + first_dr_info = STMT_VINFO_DR_INFO (SLP_TREE_SCALAR_STMTS (slp_node)[0]); + if (STMT_VINFO_STRIDED_P (first_stmt_info)) + /* Try to use consecutive accesses of as many elements as possible, + separated by the stride, until we have a complete vector. + Fall back to scalar accesses if that isn't possible. */ + *memory_access_type = VMAT_STRIDED_SLP; + else { - /* For SLP vectorization we directly vectorize a subchain - without permutation. */ - if (! SLP_TREE_LOAD_PERMUTATION (slp_node).exists ()) - first_dr_info - = STMT_VINFO_DR_INFO (SLP_TREE_SCALAR_STMTS (slp_node)[0]); - if (STMT_VINFO_STRIDED_P (first_stmt_info)) - /* Try to use consecutive accesses of as many elements as possible, - separated by the stride, until we have a complete vector. - Fall back to scalar accesses if that isn't possible. */ - *memory_access_type = VMAT_STRIDED_SLP; - else + int cmp = compare_step_with_zero (vinfo, stmt_info); + if (cmp < 0) { - int cmp = compare_step_with_zero (vinfo, stmt_info); - if (cmp < 0) - { - if (single_element_p) - /* ??? The VMAT_CONTIGUOUS_REVERSE code generation is - only correct for single element "interleaving" SLP. */ - *memory_access_type = get_negative_load_store_type - (vinfo, stmt_info, vectype, vls_type, 1, - &neg_ldst_offset); - else - { - /* Try to use consecutive accesses of DR_GROUP_SIZE elements, - separated by the stride, until we have a complete vector. - Fall back to scalar accesses if that isn't possible. */ - if (multiple_p (nunits, group_size)) - *memory_access_type = VMAT_STRIDED_SLP; - else - *memory_access_type = VMAT_ELEMENTWISE; - } - } - else if (cmp == 0 && loop_vinfo) - { - gcc_assert (vls_type == VLS_LOAD); - *memory_access_type = VMAT_INVARIANT; - } - /* Try using LOAD/STORE_LANES. */ - else if (slp_node->ldst_lanes - && (*lanes_ifn - = (vls_type == VLS_LOAD - ? vect_load_lanes_supported (vectype, group_size, - masked_p, elsvals) - : vect_store_lanes_supported (vectype, group_size, - masked_p))) != IFN_LAST) - *memory_access_type = VMAT_LOAD_STORE_LANES; + if (single_element_p) + /* ??? The VMAT_CONTIGUOUS_REVERSE code generation is + only correct for single element "interleaving" SLP. */ + *memory_access_type = get_negative_load_store_type + (vinfo, stmt_info, vectype, vls_type, 1, + &neg_ldst_offset); else - *memory_access_type = VMAT_CONTIGUOUS; - - /* If this is single-element interleaving with an element - distance that leaves unused vector loads around fall back - to elementwise access if possible - we otherwise least - create very sub-optimal code in that case (and - blow up memory, see PR65518). */ - if (loop_vinfo - && single_element_p - && (*memory_access_type == VMAT_CONTIGUOUS - || *memory_access_type == VMAT_CONTIGUOUS_REVERSE) - && maybe_gt (group_size, TYPE_VECTOR_SUBPARTS (vectype))) - { - if (SLP_TREE_LANES (slp_node) == 1) - { - *memory_access_type = VMAT_ELEMENTWISE; - if (dump_enabled_p ()) - dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - "single-element interleaving not supported " - "for not adjacent vector loads, using " - "elementwise access\n"); - } + { + /* Try to use consecutive accesses of DR_GROUP_SIZE elements, + separated by the stride, until we have a complete vector. + Fall back to scalar accesses if that isn't possible. */ + if (multiple_p (nunits, group_size)) + *memory_access_type = VMAT_STRIDED_SLP; else - { - if (dump_enabled_p ()) - dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - "single-element interleaving not supported " - "for not adjacent vector loads\n"); - return false; - } + *memory_access_type = VMAT_ELEMENTWISE; } + } + else if (cmp == 0 && loop_vinfo) + { + gcc_assert (vls_type == VLS_LOAD); + *memory_access_type = VMAT_INVARIANT; + } + /* Try using LOAD/STORE_LANES. */ + else if (slp_node->ldst_lanes + && (*lanes_ifn + = (vls_type == VLS_LOAD + ? vect_load_lanes_supported (vectype, group_size, + masked_p, elsvals) + : vect_store_lanes_supported (vectype, group_size, + masked_p))) != IFN_LAST) + *memory_access_type = VMAT_LOAD_STORE_LANES; + else if (!loop_vinfo && slp_node->avoid_stlf_fail) + { + *memory_access_type = VMAT_ELEMENTWISE; + if (dump_enabled_p ()) + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + "using element-wise load to avoid disrupting " + "cross iteration store-to-load forwarding\n"); + } + else + *memory_access_type = VMAT_CONTIGUOUS; - /* For single-element interleaving also fall back to elementwise - access in case we did not lower a permutation and cannot - code generate it. */ - auto_vec<tree> temv; - unsigned n_perms; - if (loop_vinfo - && single_element_p - && SLP_TREE_LANES (slp_node) == 1 - && (*memory_access_type == VMAT_CONTIGUOUS - || *memory_access_type == VMAT_CONTIGUOUS_REVERSE) - && SLP_TREE_LOAD_PERMUTATION (slp_node).exists () - && !vect_transform_slp_perm_load - (loop_vinfo, slp_node, temv, NULL, - LOOP_VINFO_VECT_FACTOR (loop_vinfo), true, &n_perms)) + /* If this is single-element interleaving with an element + distance that leaves unused vector loads around fall back + to elementwise access if possible - we otherwise least + create very sub-optimal code in that case (and + blow up memory, see PR65518). */ + if (loop_vinfo + && single_element_p + && (*memory_access_type == VMAT_CONTIGUOUS + || *memory_access_type == VMAT_CONTIGUOUS_REVERSE) + && maybe_gt (group_size, TYPE_VECTOR_SUBPARTS (vectype))) + { + if (SLP_TREE_LANES (slp_node) == 1) { *memory_access_type = VMAT_ELEMENTWISE; if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - "single-element interleaving permutation not " - "supported, using elementwise access\n"); - } - - overrun_p = (loop_vinfo && gap != 0 - && *memory_access_type != VMAT_ELEMENTWISE); - if (overrun_p && vls_type != VLS_LOAD) - { - dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - "Grouped store with gaps requires" - " non-consecutive accesses\n"); - return false; + "single-element interleaving not supported " + "for not adjacent vector loads, using " + "elementwise access\n"); } - - unsigned HOST_WIDE_INT dr_size - = vect_get_scalar_dr_size (first_dr_info); - poly_int64 off = 0; - if (*memory_access_type == VMAT_CONTIGUOUS_REVERSE) - off = (TYPE_VECTOR_SUBPARTS (vectype) - 1) * -dr_size; - - /* An overrun is fine if the trailing elements are smaller - than the alignment boundary B. Every vector access will - be a multiple of B and so we are guaranteed to access a - non-gap element in the same B-sized block. */ - if (overrun_p - && gap < (vect_known_alignment_in_bytes (first_dr_info, - vectype, off) / dr_size)) - overrun_p = false; - - /* When we have a contiguous access across loop iterations - but the access in the loop doesn't cover the full vector - we can end up with no gap recorded but still excess - elements accessed, see PR103116. Make sure we peel for - gaps if necessary and sufficient and give up if not. - - If there is a combination of the access not covering the full - vector and a gap recorded then we may need to peel twice. */ - bool large_vector_overrun_p = false; - if (loop_vinfo - && (*memory_access_type == VMAT_CONTIGUOUS - || *memory_access_type == VMAT_CONTIGUOUS_REVERSE) - && SLP_TREE_LOAD_PERMUTATION (slp_node).exists () - && !multiple_p (group_size * LOOP_VINFO_VECT_FACTOR (loop_vinfo), - nunits)) - large_vector_overrun_p = overrun_p = true; - - /* If the gap splits the vector in half and the target - can do half-vector operations avoid the epilogue peeling - by simply loading half of the vector only. Usually - the construction with an upper zero half will be elided. */ - dr_alignment_support alss; - int misalign = dr_misalignment (first_dr_info, vectype, off); - tree half_vtype; - poly_uint64 remain; - unsigned HOST_WIDE_INT tem, num; - if (overrun_p - && !masked_p - && *memory_access_type != VMAT_LOAD_STORE_LANES - && (((alss = vect_supportable_dr_alignment (vinfo, first_dr_info, - vectype, misalign))) - == dr_aligned - || alss == dr_unaligned_supported) - && can_div_trunc_p (group_size - * LOOP_VINFO_VECT_FACTOR (loop_vinfo) - gap, - nunits, &tem, &remain) - && (known_eq (remain, 0u) - || (known_ne (remain, 0u) - && constant_multiple_p (nunits, remain, &num) - && (vector_vector_composition_type (vectype, num, - &half_vtype) - != NULL_TREE)))) - overrun_p = false; - - if (overrun_p && !can_overrun_p) + else { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - "Peeling for outer loop is not supported\n"); + "single-element interleaving not supported " + "for not adjacent vector loads\n"); return false; } + } - /* Peeling for gaps assumes that a single scalar iteration - is enough to make sure the last vector iteration doesn't - access excess elements. */ - if (overrun_p - && (!can_div_trunc_p (group_size - * LOOP_VINFO_VECT_FACTOR (loop_vinfo) - gap, - nunits, &tem, &remain) - || maybe_lt (remain + group_size, nunits))) - { - /* But peeling a single scalar iteration is enough if - we can use the next power-of-two sized partial - access and that is sufficiently small to be covered - by the single scalar iteration. */ - unsigned HOST_WIDE_INT cnunits, cvf, cremain, cpart_size; - if (masked_p - || !nunits.is_constant (&cnunits) - || !LOOP_VINFO_VECT_FACTOR (loop_vinfo).is_constant (&cvf) - || (((cremain = (group_size * cvf - gap) % cnunits), true) - && ((cpart_size = (1 << ceil_log2 (cremain))), true) - && (cremain + group_size < cpart_size - || vector_vector_composition_type - (vectype, cnunits / cpart_size, - &half_vtype) == NULL_TREE))) - { - /* If all fails we can still resort to niter masking unless - the vectors used are too big, so enforce the use of - partial vectors. */ - if (LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo) - && !large_vector_overrun_p) - { - if (dump_enabled_p ()) - dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - "peeling for gaps insufficient for " - "access unless using partial " - "vectors\n"); - LOOP_VINFO_MUST_USE_PARTIAL_VECTORS_P (loop_vinfo) = true; - } - else - { - if (dump_enabled_p ()) - dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - "peeling for gaps insufficient for " - "access\n"); - return false; - } - } - else if (large_vector_overrun_p) - { - if (dump_enabled_p ()) - dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - "can't operate on partial vectors because " - "only unmasked loads handle access " - "shortening required because of gaps at " - "the end of the access\n"); - LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo) = false; - } - } + /* For single-element interleaving also fall back to elementwise + access in case we did not lower a permutation and cannot + code generate it. */ + auto_vec<tree> temv; + unsigned n_perms; + if (loop_vinfo + && single_element_p + && SLP_TREE_LANES (slp_node) == 1 + && (*memory_access_type == VMAT_CONTIGUOUS + || *memory_access_type == VMAT_CONTIGUOUS_REVERSE) + && SLP_TREE_LOAD_PERMUTATION (slp_node).exists () + && !vect_transform_slp_perm_load + (loop_vinfo, slp_node, temv, NULL, + LOOP_VINFO_VECT_FACTOR (loop_vinfo), true, &n_perms)) + { + *memory_access_type = VMAT_ELEMENTWISE; + if (dump_enabled_p ()) + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + "single-element interleaving permutation not " + "supported, using elementwise access\n"); } - } - else - { - /* We can always handle this case using elementwise accesses, - but see if something more efficient is available. */ - *memory_access_type = VMAT_ELEMENTWISE; - - /* If there is a gap at the end of the group then these optimizations - would access excess elements in the last iteration. */ - bool would_overrun_p = (gap != 0); - /* An overrun is fine if the trailing elements are smaller than the - alignment boundary B. Every vector access will be a multiple of B - and so we are guaranteed to access a non-gap element in the - same B-sized block. */ - if (would_overrun_p - && !masked_p - && gap < (vect_known_alignment_in_bytes (first_dr_info, vectype) - / vect_get_scalar_dr_size (first_dr_info))) - would_overrun_p = false; - if (!STMT_VINFO_STRIDED_P (first_stmt_info) - && (can_overrun_p || !would_overrun_p) - && compare_step_with_zero (vinfo, stmt_info) > 0) + overrun_p = (loop_vinfo && gap != 0 + && *memory_access_type != VMAT_ELEMENTWISE); + if (overrun_p && vls_type != VLS_LOAD) { - /* First cope with the degenerate case of a single-element - vector. */ - if (known_eq (TYPE_VECTOR_SUBPARTS (vectype), 1U)) - ; + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + "Grouped store with gaps requires" + " non-consecutive accesses\n"); + return false; + } - else - { - /* Otherwise try using LOAD/STORE_LANES. */ - *lanes_ifn - = vls_type == VLS_LOAD - ? vect_load_lanes_supported (vectype, group_size, masked_p, - elsvals) - : vect_store_lanes_supported (vectype, group_size, - masked_p); - if (*lanes_ifn != IFN_LAST) + unsigned HOST_WIDE_INT dr_size = vect_get_scalar_dr_size (first_dr_info); + poly_int64 off = 0; + if (*memory_access_type == VMAT_CONTIGUOUS_REVERSE) + off = (TYPE_VECTOR_SUBPARTS (vectype) - 1) * -dr_size; + + /* An overrun is fine if the trailing elements are smaller + than the alignment boundary B. Every vector access will + be a multiple of B and so we are guaranteed to access a + non-gap element in the same B-sized block. */ + if (overrun_p + && gap < (vect_known_alignment_in_bytes (first_dr_info, + vectype, off) / dr_size)) + overrun_p = false; + + /* When we have a contiguous access across loop iterations + but the access in the loop doesn't cover the full vector + we can end up with no gap recorded but still excess + elements accessed, see PR103116. Make sure we peel for + gaps if necessary and sufficient and give up if not. + + If there is a combination of the access not covering the full + vector and a gap recorded then we may need to peel twice. */ + bool large_vector_overrun_p = false; + if (loop_vinfo + && (*memory_access_type == VMAT_CONTIGUOUS + || *memory_access_type == VMAT_CONTIGUOUS_REVERSE) + && SLP_TREE_LOAD_PERMUTATION (slp_node).exists () + && !multiple_p (group_size * LOOP_VINFO_VECT_FACTOR (loop_vinfo), + nunits)) + large_vector_overrun_p = overrun_p = true; + + /* If the gap splits the vector in half and the target + can do half-vector operations avoid the epilogue peeling + by simply loading half of the vector only. Usually + the construction with an upper zero half will be elided. */ + dr_alignment_support alss; + int misalign = dr_misalignment (first_dr_info, vectype, off); + tree half_vtype; + poly_uint64 remain; + unsigned HOST_WIDE_INT tem, num; + if (overrun_p + && !masked_p + && *memory_access_type != VMAT_LOAD_STORE_LANES + && (((alss = vect_supportable_dr_alignment (vinfo, first_dr_info, + vectype, misalign))) + == dr_aligned + || alss == dr_unaligned_supported) + && can_div_trunc_p (group_size + * LOOP_VINFO_VECT_FACTOR (loop_vinfo) - gap, + nunits, &tem, &remain) + && (known_eq (remain, 0u) + || (known_ne (remain, 0u) + && constant_multiple_p (nunits, remain, &num) + && (vector_vector_composition_type (vectype, num, &half_vtype) + != NULL_TREE)))) + overrun_p = false; + + if (overrun_p && !can_overrun_p) + { + if (dump_enabled_p ()) + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + "Peeling for outer loop is not supported\n"); + return false; + } + + /* Peeling for gaps assumes that a single scalar iteration + is enough to make sure the last vector iteration doesn't + access excess elements. */ + if (overrun_p + && (!can_div_trunc_p (group_size + * LOOP_VINFO_VECT_FACTOR (loop_vinfo) - gap, + nunits, &tem, &remain) + || maybe_lt (remain + group_size, nunits))) + { + /* But peeling a single scalar iteration is enough if + we can use the next power-of-two sized partial + access and that is sufficiently small to be covered + by the single scalar iteration. */ + unsigned HOST_WIDE_INT cnunits, cvf, cremain, cpart_size; + if (masked_p + || !nunits.is_constant (&cnunits) + || !LOOP_VINFO_VECT_FACTOR (loop_vinfo).is_constant (&cvf) + || (((cremain = (group_size * cvf - gap) % cnunits), true) + && ((cpart_size = (1 << ceil_log2 (cremain))), true) + && (cremain + group_size < cpart_size + || (vector_vector_composition_type (vectype, + cnunits / cpart_size, + &half_vtype) + == NULL_TREE)))) + { + /* If all fails we can still resort to niter masking unless + the vectors used are too big, so enforce the use of + partial vectors. */ + if (LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo) + && !large_vector_overrun_p) { - *memory_access_type = VMAT_LOAD_STORE_LANES; - overrun_p = would_overrun_p; + if (dump_enabled_p ()) + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + "peeling for gaps insufficient for " + "access unless using partial " + "vectors\n"); + LOOP_VINFO_MUST_USE_PARTIAL_VECTORS_P (loop_vinfo) = true; } - - /* If that fails, try using permuting loads. */ - else if (vls_type == VLS_LOAD - ? vect_grouped_load_supported (vectype, - single_element_p, - group_size) - : vect_grouped_store_supported (vectype, group_size)) + else { - *memory_access_type = VMAT_CONTIGUOUS_PERMUTE; - overrun_p = would_overrun_p; + if (dump_enabled_p ()) + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + "peeling for gaps insufficient for " + "access\n"); + return false; } } + else if (large_vector_overrun_p) + { + if (dump_enabled_p ()) + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + "can't operate on partial vectors because " + "only unmasked loads handle access " + "shortening required because of gaps at " + "the end of the access\n"); + LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo) = false; + } } } @@ -2392,7 +2350,7 @@ get_group_load_store_type (vec_info *vinfo, stmt_vec_info stmt_info, if ((*memory_access_type == VMAT_ELEMENTWISE || *memory_access_type == VMAT_STRIDED_SLP) && single_element_p - && (!slp_node || SLP_TREE_LANES (slp_node) == 1) + && SLP_TREE_LANES (slp_node) == 1 && loop_vinfo && vect_use_strided_gather_scatters_p (stmt_info, loop_vinfo, masked_p, gs_info, elsvals)) @@ -2472,7 +2430,7 @@ static bool get_load_store_type (vec_info *vinfo, stmt_vec_info stmt_info, tree vectype, slp_tree slp_node, bool masked_p, vec_load_store_type vls_type, - unsigned int ncopies, + unsigned int, vect_memory_access_type *memory_access_type, poly_int64 *poffset, dr_alignment_support *alignment_support_scheme, @@ -2538,54 +2496,13 @@ get_load_store_type (vec_info *vinfo, stmt_vec_info stmt_info, is irrelevant for them. */ *alignment_support_scheme = dr_unaligned_supported; } - else if (STMT_VINFO_GROUPED_ACCESS (stmt_info) || slp_node) - { - if (!get_group_load_store_type (vinfo, stmt_info, vectype, slp_node, - masked_p, - vls_type, memory_access_type, poffset, - alignment_support_scheme, - misalignment, gs_info, lanes_ifn, - elsvals)) - return false; - } - else if (STMT_VINFO_STRIDED_P (stmt_info)) - { - gcc_assert (!slp_node); - if (loop_vinfo - && vect_use_strided_gather_scatters_p (stmt_info, loop_vinfo, - masked_p, gs_info, elsvals)) - *memory_access_type = VMAT_GATHER_SCATTER; - else - *memory_access_type = VMAT_ELEMENTWISE; - /* Alignment is irrelevant here. */ - *alignment_support_scheme = dr_unaligned_supported; - } - else - { - int cmp = compare_step_with_zero (vinfo, stmt_info); - if (cmp == 0) - { - gcc_assert (vls_type == VLS_LOAD); - *memory_access_type = VMAT_INVARIANT; - /* Invariant accesses perform only component accesses, alignment - is irrelevant for them. */ - *alignment_support_scheme = dr_unaligned_supported; - } - else - { - if (cmp < 0) - *memory_access_type = get_negative_load_store_type - (vinfo, stmt_info, vectype, vls_type, ncopies, poffset); - else - *memory_access_type = VMAT_CONTIGUOUS; - *misalignment = dr_misalignment (STMT_VINFO_DR_INFO (stmt_info), - vectype, *poffset); - *alignment_support_scheme - = vect_supportable_dr_alignment (vinfo, - STMT_VINFO_DR_INFO (stmt_info), - vectype, *misalignment); - } - } + else if (!get_group_load_store_type (vinfo, stmt_info, vectype, slp_node, + masked_p, + vls_type, memory_access_type, poffset, + alignment_support_scheme, + misalignment, gs_info, lanes_ifn, + elsvals)) + return false; if ((*memory_access_type == VMAT_ELEMENTWISE || *memory_access_type == VMAT_STRIDED_SLP) @@ -2709,7 +2626,7 @@ get_load_store_type (vec_info *vinfo, stmt_vec_info stmt_info, such only the first load in the group is aligned, the rest are not. Because of this the permutes may break the alignment requirements that have been set, and as such we should for now, reject them. */ - if (slp_node && SLP_TREE_LOAD_PERMUTATION (slp_node).exists ()) + if (SLP_TREE_LOAD_PERMUTATION (slp_node).exists ()) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, @@ -3742,8 +3659,7 @@ vectorizable_call (vec_info *vinfo, } STMT_VINFO_TYPE (stmt_info) = call_vec_info_type; DUMP_VECT_SCOPE ("vectorizable_call"); - vect_model_simple_cost (vinfo, stmt_info, - ncopies, dt, ndts, slp_node, cost_vec); + vect_model_simple_cost (vinfo, ncopies, dt, ndts, slp_node, cost_vec); if (ifn != IFN_LAST && modifier == NARROW && !slp_node) record_stmt_cost (cost_vec, ncopies / 2, vec_promote_demote, stmt_info, 0, vect_body); @@ -4710,8 +4626,7 @@ vectorizable_simd_clone_call (vec_info *vinfo, stmt_vec_info stmt_info, STMT_VINFO_TYPE (stmt_info) = call_simd_clone_vec_info_type; DUMP_VECT_SCOPE ("vectorizable_simd_clone_call"); -/* vect_model_simple_cost (vinfo, stmt_info, ncopies, - dt, slp_node, cost_vec); */ +/* vect_model_simple_cost (vinfo, ncopies, dt, slp_node, cost_vec); */ return true; } @@ -5528,7 +5443,6 @@ vectorizable_conversion (vec_info *vinfo, tree vec_dest, cvt_op = NULL_TREE; tree scalar_dest; tree op0, op1 = NULL_TREE; - loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo); tree_code tc1; code_helper code, code1, code2; code_helper codecvt1 = ERROR_MARK, codecvt2 = ERROR_MARK; @@ -5538,7 +5452,7 @@ vectorizable_conversion (vec_info *vinfo, poly_uint64 nunits_in; poly_uint64 nunits_out; tree vectype_out, vectype_in; - int ncopies, i; + int i; tree lhs_type, rhs_type; /* For conversions between floating point and integer, there're 2 NARROW cases. NARROW_SRC is for FLOAT_EXPR, means @@ -5605,7 +5519,7 @@ vectorizable_conversion (vec_info *vinfo, /* Check types of lhs and rhs. */ scalar_dest = gimple_get_lhs (stmt); lhs_type = TREE_TYPE (scalar_dest); - vectype_out = STMT_VINFO_VECTYPE (stmt_info); + vectype_out = SLP_TREE_VECTYPE (slp_node); /* Check the operands of the operation. */ slp_tree slp_op0, slp_op1 = NULL; @@ -5703,25 +5617,11 @@ vectorizable_conversion (vec_info *vinfo, modifier = WIDEN; } - /* Multiple types in SLP are handled by creating the appropriate number of - vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in - case of SLP. */ - if (slp_node) - ncopies = 1; - else if (modifier == NARROW_DST) - ncopies = vect_get_num_copies (loop_vinfo, vectype_out); - else - ncopies = vect_get_num_copies (loop_vinfo, vectype_in); - - /* Sanity check: make sure that at least one copy of the vectorized stmt - needs to be generated. */ - gcc_assert (ncopies >= 1); - bool found_mode = false; scalar_mode lhs_mode = SCALAR_TYPE_MODE (lhs_type); scalar_mode rhs_mode = SCALAR_TYPE_MODE (rhs_type); opt_scalar_mode rhs_mode_iter; - auto_vec<std::pair<tree, tree_code> > converts; + auto_vec<std::pair<tree, tree_code>, 2> converts; /* Supportable by target? */ switch (modifier) @@ -5871,16 +5771,11 @@ vectorizable_conversion (vec_info *vinfo, else if (code == FLOAT_EXPR) { wide_int op_min_value, op_max_value; - if (slp_node) - { - tree def; - /* ??? Merge ranges in case of more than one lane. */ - if (SLP_TREE_LANES (slp_op0) != 1 - || !(def = vect_get_slp_scalar_def (slp_op0, 0)) - || !vect_get_range_info (def, &op_min_value, &op_max_value)) - goto unsupported; - } - else if (!vect_get_range_info (op0, &op_min_value, &op_max_value)) + tree def; + /* ??? Merge ranges in case of more than one lane. */ + if (SLP_TREE_LANES (slp_op0) != 1 + || !(def = vect_get_slp_scalar_def (slp_op0, 0)) + || !vect_get_range_info (def, &op_min_value, &op_max_value)) goto unsupported; cvt_type @@ -5916,9 +5811,8 @@ vectorizable_conversion (vec_info *vinfo, if (!vec_stmt) /* transformation not required. */ { - if (slp_node - && (!vect_maybe_update_slp_op_vectype (slp_op0, vectype_in) - || !vect_maybe_update_slp_op_vectype (slp_op1, vectype_in))) + if (!vect_maybe_update_slp_op_vectype (slp_op0, vectype_in) + || !vect_maybe_update_slp_op_vectype (slp_op1, vectype_in)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, @@ -5929,16 +5823,14 @@ vectorizable_conversion (vec_info *vinfo, if (modifier == NONE) { STMT_VINFO_TYPE (stmt_info) = type_conversion_vec_info_type; - vect_model_simple_cost (vinfo, stmt_info, - ncopies * (1 + multi_step_cvt), + vect_model_simple_cost (vinfo, (1 + multi_step_cvt), dt, ndts, slp_node, cost_vec); } else if (modifier == NARROW_SRC || modifier == NARROW_DST) { STMT_VINFO_TYPE (stmt_info) = type_demotion_vec_info_type; /* The final packing step produces one vector result per copy. */ - unsigned int nvectors - = (slp_node ? SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node) : ncopies); + unsigned int nvectors = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node); vect_model_promotion_demotion_cost (stmt_info, dt, nvectors, multi_step_cvt, cost_vec, widen_arith); @@ -5950,9 +5842,7 @@ vectorizable_conversion (vec_info *vinfo, per copy. MULTI_STEP_CVT is 0 for a single conversion, so >> MULTI_STEP_CVT divides by 2^(number of steps - 1). */ unsigned int nvectors - = (slp_node - ? SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node) >> multi_step_cvt - : ncopies * 2); + = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node) >> multi_step_cvt; vect_model_promotion_demotion_cost (stmt_info, dt, nvectors, multi_step_cvt, cost_vec, widen_arith); @@ -5963,8 +5853,7 @@ vectorizable_conversion (vec_info *vinfo, /* Transform. */ if (dump_enabled_p ()) - dump_printf_loc (MSG_NOTE, vect_location, - "transform conversion. ncopies = %d.\n", ncopies); + dump_printf_loc (MSG_NOTE, vect_location, "transform conversion.\n"); if (op_type == binary_op) { @@ -6003,23 +5892,10 @@ vectorizable_conversion (vec_info *vinfo, widen_or_narrow_float_p ? vectype_out : cvt_type); - int ninputs = 1; - if (!slp_node) - { - if (modifier == WIDEN) - ; - else if (modifier == NARROW_SRC || modifier == NARROW_DST) - { - if (multi_step_cvt) - ninputs = vect_pow2 (multi_step_cvt); - ninputs *= 2; - } - } - switch (modifier) { case NONE: - vect_get_vec_defs (vinfo, stmt_info, slp_node, ncopies, + vect_get_vec_defs (vinfo, stmt_info, slp_node, 1, op0, vectype_in, &vec_oprnds0); /* vec_dest is intermediate type operand when multi_step_cvt. */ if (multi_step_cvt) @@ -6046,10 +5922,7 @@ vectorizable_conversion (vec_info *vinfo, gimple_set_lhs (new_stmt, new_temp); vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); - if (slp_node) - slp_node->push_vec_def (new_stmt); - else - STMT_VINFO_VEC_STMTS (stmt_info).safe_push (new_stmt); + slp_node->push_vec_def (new_stmt); } break; @@ -6058,7 +5931,7 @@ vectorizable_conversion (vec_info *vinfo, of elements that we can fit in a vectype (nunits), we have to generate more than one vector stmt - i.e - we need to "unroll" the vector stmt by a factor VF/nunits. */ - vect_get_vec_defs (vinfo, stmt_info, slp_node, ncopies * ninputs, + vect_get_vec_defs (vinfo, stmt_info, slp_node, 1, op0, vectype_in, &vec_oprnds0, code == WIDEN_LSHIFT_EXPR ? NULL_TREE : op1, vectype_in, &vec_oprnds1); @@ -6102,10 +5975,7 @@ vectorizable_conversion (vec_info *vinfo, else new_stmt = SSA_NAME_DEF_STMT (vop0); - if (slp_node) - slp_node->push_vec_def (new_stmt); - else - STMT_VINFO_VEC_STMTS (stmt_info).safe_push (new_stmt); + slp_node->push_vec_def (new_stmt); } break; @@ -6115,7 +5985,7 @@ vectorizable_conversion (vec_info *vinfo, of elements that we can fit in a vectype (nunits), we have to generate more than one vector stmt - i.e - we need to "unroll" the vector stmt by a factor VF/nunits. */ - vect_get_vec_defs (vinfo, stmt_info, slp_node, ncopies * ninputs, + vect_get_vec_defs (vinfo, stmt_info, slp_node, 1, op0, vectype_in, &vec_oprnds0); /* Arguments are ready. Create the new vector stmts. */ if (cvt_type && modifier == NARROW_DST) @@ -6148,16 +6018,11 @@ vectorizable_conversion (vec_info *vinfo, /* This is the last step of the conversion sequence. Store the vectors in SLP_NODE or in vector info of the scalar statement (or in STMT_VINFO_RELATED_STMT chain). */ - if (slp_node) - slp_node->push_vec_def (new_stmt); - else - STMT_VINFO_VEC_STMTS (stmt_info).safe_push (new_stmt); + slp_node->push_vec_def (new_stmt); } } break; } - if (!slp_node) - *vec_stmt = STMT_VINFO_VEC_STMTS (stmt_info)[0]; vec_oprnds0.release (); vec_oprnds1.release (); @@ -6327,8 +6192,7 @@ vectorizable_assignment (vec_info *vinfo, STMT_VINFO_TYPE (stmt_info) = assignment_vec_info_type; DUMP_VECT_SCOPE ("vectorizable_assignment"); if (!vect_nop_conversion_p (stmt_info)) - vect_model_simple_cost (vinfo, stmt_info, ncopies, dt, ndts, slp_node, - cost_vec); + vect_model_simple_cost (vinfo, ncopies, dt, ndts, slp_node, cost_vec); return true; } @@ -6698,7 +6562,7 @@ vectorizable_shift (vec_info *vinfo, } STMT_VINFO_TYPE (stmt_info) = shift_vec_info_type; DUMP_VECT_SCOPE ("vectorizable_shift"); - vect_model_simple_cost (vinfo, stmt_info, ncopies, dt, + vect_model_simple_cost (vinfo, ncopies, dt, scalar_shift_arg ? 1 : ndts, slp_node, cost_vec); return true; } @@ -6848,7 +6712,6 @@ vectorizable_operation (vec_info *vinfo, poly_uint64 nunits_in; poly_uint64 nunits_out; tree vectype_out; - unsigned int ncopies; int vec_num; int i; vec<tree> vec_oprnds0 = vNULL; @@ -6909,7 +6772,7 @@ vectorizable_operation (vec_info *vinfo, } scalar_dest = gimple_assign_lhs (stmt); - vectype_out = STMT_VINFO_VECTYPE (stmt_info); + vectype_out = SLP_TREE_VECTYPE (slp_node); /* Most operations cannot handle bit-precision types without extra truncations. */ @@ -7020,20 +6883,8 @@ vectorizable_operation (vec_info *vinfo, } /* Multiple types in SLP are handled by creating the appropriate number of - vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in - case of SLP. */ - if (slp_node) - { - ncopies = 1; - vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node); - } - else - { - ncopies = vect_get_num_copies (loop_vinfo, vectype); - vec_num = 1; - } - - gcc_assert (ncopies >= 1); + vectorized stmts for each SLP node. */ + vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node); /* Reject attempts to combine mask types with nonmask types, e.g. if we have an AND between a (nonmask) boolean loaded from memory and @@ -7075,9 +6926,10 @@ vectorizable_operation (vec_info *vinfo, ops we have to lower the lowering code assumes we are dealing with word_mode. */ if (!INTEGRAL_TYPE_P (TREE_TYPE (vectype)) + || !GET_MODE_SIZE (vec_mode).is_constant () || (((code == PLUS_EXPR || code == MINUS_EXPR || code == NEGATE_EXPR) - || !target_support_p) - && maybe_ne (GET_MODE_SIZE (vec_mode), UNITS_PER_WORD)) + || !target_support_p) + && maybe_ne (GET_MODE_SIZE (vec_mode), UNITS_PER_WORD)) /* Check only during analysis. */ || (!vec_stmt && !vect_can_vectorize_without_simd_p (code))) { @@ -7117,12 +6969,12 @@ vectorizable_operation (vec_info *vinfo, if (cond_len_fn != IFN_LAST && direct_internal_fn_supported_p (cond_len_fn, vectype, OPTIMIZE_FOR_SPEED)) - vect_record_loop_len (loop_vinfo, lens, ncopies * vec_num, vectype, + vect_record_loop_len (loop_vinfo, lens, vec_num, vectype, 1); else if (cond_fn != IFN_LAST && direct_internal_fn_supported_p (cond_fn, vectype, OPTIMIZE_FOR_SPEED)) - vect_record_loop_mask (loop_vinfo, masks, ncopies * vec_num, + vect_record_loop_mask (loop_vinfo, masks, vec_num, vectype, NULL); else { @@ -7135,10 +6987,9 @@ vectorizable_operation (vec_info *vinfo, } /* Put types on constant and invariant SLP children. */ - if (slp_node - && (!vect_maybe_update_slp_op_vectype (slp_op0, vectype) - || !vect_maybe_update_slp_op_vectype (slp_op1, vectype) - || !vect_maybe_update_slp_op_vectype (slp_op2, vectype))) + if (!vect_maybe_update_slp_op_vectype (slp_op0, vectype) + || !vect_maybe_update_slp_op_vectype (slp_op1, vectype) + || !vect_maybe_update_slp_op_vectype (slp_op2, vectype)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, @@ -7148,16 +6999,14 @@ vectorizable_operation (vec_info *vinfo, STMT_VINFO_TYPE (stmt_info) = op_vec_info_type; DUMP_VECT_SCOPE ("vectorizable_operation"); - vect_model_simple_cost (vinfo, stmt_info, - ncopies, dt, ndts, slp_node, cost_vec); + vect_model_simple_cost (vinfo, 1, dt, ndts, slp_node, cost_vec); if (using_emulated_vectors_p) { /* The above vect_model_simple_cost call handles constants in the prologue and (mis-)costs one of the stmts as vector stmt. See below for the actual lowering that will be applied. */ - unsigned n - = slp_node ? SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node) : ncopies; + unsigned n = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node); switch (code) { case PLUS_EXPR: @@ -7210,60 +7059,7 @@ vectorizable_operation (vec_info *vinfo, else vec_dest = vect_create_destination_var (scalar_dest, vectype_out); - /* In case the vectorization factor (VF) is bigger than the number - of elements that we can fit in a vectype (nunits), we have to generate - more than one vector stmt - i.e - we need to "unroll" the - vector stmt by a factor VF/nunits. In doing so, we record a pointer - from one copy of the vector stmt to the next, in the field - STMT_VINFO_RELATED_STMT. This is necessary in order to allow following - stages to find the correct vector defs to be used when vectorizing - stmts that use the defs of the current stmt. The example below - illustrates the vectorization process when VF=16 and nunits=4 (i.e., - we need to create 4 vectorized stmts): - - before vectorization: - RELATED_STMT VEC_STMT - S1: x = memref - - - S2: z = x + 1 - - - - step 1: vectorize stmt S1 (done in vectorizable_load. See more details - there): - RELATED_STMT VEC_STMT - VS1_0: vx0 = memref0 VS1_1 - - VS1_1: vx1 = memref1 VS1_2 - - VS1_2: vx2 = memref2 VS1_3 - - VS1_3: vx3 = memref3 - - - S1: x = load - VS1_0 - S2: z = x + 1 - - - - step2: vectorize stmt S2 (done here): - To vectorize stmt S2 we first need to find the relevant vector - def for the first operand 'x'. This is, as usual, obtained from - the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt - that defines 'x' (S1). This way we find the stmt VS1_0, and the - relevant vector def 'vx0'. Having found 'vx0' we can generate - the vector stmt VS2_0, and as usual, record it in the - STMT_VINFO_VEC_STMT of stmt S2. - When creating the second copy (VS2_1), we obtain the relevant vector - def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of - stmt VS1_0. This way we find the stmt VS1_1 and the relevant - vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a - pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0. - Similarly when creating stmts VS2_2 and VS2_3. This is the resulting - chain of stmts and pointers: - RELATED_STMT VEC_STMT - VS1_0: vx0 = memref0 VS1_1 - - VS1_1: vx1 = memref1 VS1_2 - - VS1_2: vx2 = memref2 VS1_3 - - VS1_3: vx3 = memref3 - - - S1: x = load - VS1_0 - VS2_0: vz0 = vx0 + v1 VS2_1 - - VS2_1: vz1 = vx1 + v1 VS2_2 - - VS2_2: vz2 = vx2 + v1 VS2_3 - - VS2_3: vz3 = vx3 + v1 - - - S2: z = x + 1 - VS2_0 */ - - vect_get_vec_defs (vinfo, stmt_info, slp_node, ncopies, + vect_get_vec_defs (vinfo, stmt_info, slp_node, 1, op0, &vec_oprnds0, op1, &vec_oprnds1, op2, &vec_oprnds2); /* Arguments are ready. Create the new vector stmt. */ FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0) @@ -7272,88 +7068,108 @@ vectorizable_operation (vec_info *vinfo, vop1 = ((op_type == binary_op || op_type == ternary_op) ? vec_oprnds1[i] : NULL_TREE); vop2 = ((op_type == ternary_op) ? vec_oprnds2[i] : NULL_TREE); - if (using_emulated_vectors_p - && (code == PLUS_EXPR || code == MINUS_EXPR || code == NEGATE_EXPR)) + if (using_emulated_vectors_p) { /* Lower the operation. This follows vector lowering. */ - unsigned int width = vector_element_bits (vectype); - tree inner_type = TREE_TYPE (vectype); - tree word_type - = build_nonstandard_integer_type (GET_MODE_BITSIZE (word_mode), 1); - HOST_WIDE_INT max = GET_MODE_MASK (TYPE_MODE (inner_type)); - tree low_bits = build_replicated_int_cst (word_type, width, max >> 1); - tree high_bits - = build_replicated_int_cst (word_type, width, max & ~(max >> 1)); + tree word_type = build_nonstandard_integer_type + (GET_MODE_BITSIZE (vec_mode).to_constant (), 1); tree wvop0 = make_ssa_name (word_type); new_stmt = gimple_build_assign (wvop0, VIEW_CONVERT_EXPR, build1 (VIEW_CONVERT_EXPR, word_type, vop0)); vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); - tree result_low, signs; - if (code == PLUS_EXPR || code == MINUS_EXPR) + tree wvop1 = NULL_TREE; + if (vop1) { - tree wvop1 = make_ssa_name (word_type); + wvop1 = make_ssa_name (word_type); new_stmt = gimple_build_assign (wvop1, VIEW_CONVERT_EXPR, build1 (VIEW_CONVERT_EXPR, word_type, vop1)); vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); - signs = make_ssa_name (word_type); - new_stmt = gimple_build_assign (signs, - BIT_XOR_EXPR, wvop0, wvop1); - vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); - tree b_low = make_ssa_name (word_type); - new_stmt = gimple_build_assign (b_low, - BIT_AND_EXPR, wvop1, low_bits); - vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); - tree a_low = make_ssa_name (word_type); - if (code == PLUS_EXPR) - new_stmt = gimple_build_assign (a_low, - BIT_AND_EXPR, wvop0, low_bits); - else - new_stmt = gimple_build_assign (a_low, - BIT_IOR_EXPR, wvop0, high_bits); - vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); - if (code == MINUS_EXPR) + } + + tree result_low; + if (code == PLUS_EXPR || code == MINUS_EXPR || code == NEGATE_EXPR) + { + unsigned int width = vector_element_bits (vectype); + tree inner_type = TREE_TYPE (vectype); + HOST_WIDE_INT max = GET_MODE_MASK (TYPE_MODE (inner_type)); + tree low_bits + = build_replicated_int_cst (word_type, width, max >> 1); + tree high_bits + = build_replicated_int_cst (word_type, + width, max & ~(max >> 1)); + tree signs; + if (code == PLUS_EXPR || code == MINUS_EXPR) { - new_stmt = gimple_build_assign (NULL_TREE, - BIT_NOT_EXPR, signs); + signs = make_ssa_name (word_type); + new_stmt = gimple_build_assign (signs, + BIT_XOR_EXPR, wvop0, wvop1); + vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); + tree b_low = make_ssa_name (word_type); + new_stmt = gimple_build_assign (b_low, BIT_AND_EXPR, + wvop1, low_bits); + vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); + tree a_low = make_ssa_name (word_type); + if (code == PLUS_EXPR) + new_stmt = gimple_build_assign (a_low, BIT_AND_EXPR, + wvop0, low_bits); + else + new_stmt = gimple_build_assign (a_low, BIT_IOR_EXPR, + wvop0, high_bits); + vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); + if (code == MINUS_EXPR) + { + new_stmt = gimple_build_assign (NULL_TREE, + BIT_NOT_EXPR, signs); + signs = make_ssa_name (word_type); + gimple_assign_set_lhs (new_stmt, signs); + vect_finish_stmt_generation (vinfo, stmt_info, + new_stmt, gsi); + } + new_stmt = gimple_build_assign (NULL_TREE, BIT_AND_EXPR, + signs, high_bits); signs = make_ssa_name (word_type); gimple_assign_set_lhs (new_stmt, signs); vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); + result_low = make_ssa_name (word_type); + new_stmt = gimple_build_assign (result_low, code, + a_low, b_low); + vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); } - new_stmt = gimple_build_assign (NULL_TREE, - BIT_AND_EXPR, signs, high_bits); - signs = make_ssa_name (word_type); - gimple_assign_set_lhs (new_stmt, signs); - vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); + else /* if (code == NEGATE_EXPR) */ + { + tree a_low = make_ssa_name (word_type); + new_stmt = gimple_build_assign (a_low, BIT_AND_EXPR, + wvop0, low_bits); + vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); + signs = make_ssa_name (word_type); + new_stmt = gimple_build_assign (signs, BIT_NOT_EXPR, wvop0); + vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); + new_stmt = gimple_build_assign (NULL_TREE, BIT_AND_EXPR, + signs, high_bits); + signs = make_ssa_name (word_type); + gimple_assign_set_lhs (new_stmt, signs); + vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); + result_low = make_ssa_name (word_type); + new_stmt = gimple_build_assign (result_low, + MINUS_EXPR, high_bits, a_low); + vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); + } + new_stmt = gimple_build_assign (NULL_TREE, BIT_XOR_EXPR, + result_low, signs); result_low = make_ssa_name (word_type); - new_stmt = gimple_build_assign (result_low, code, a_low, b_low); + gimple_assign_set_lhs (new_stmt, result_low); vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); } else { - tree a_low = make_ssa_name (word_type); - new_stmt = gimple_build_assign (a_low, - BIT_AND_EXPR, wvop0, low_bits); - vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); - signs = make_ssa_name (word_type); - new_stmt = gimple_build_assign (signs, BIT_NOT_EXPR, wvop0); - vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); - new_stmt = gimple_build_assign (NULL_TREE, - BIT_AND_EXPR, signs, high_bits); - signs = make_ssa_name (word_type); - gimple_assign_set_lhs (new_stmt, signs); - vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); + new_stmt = gimple_build_assign (NULL_TREE, code, wvop0, wvop1); result_low = make_ssa_name (word_type); - new_stmt = gimple_build_assign (result_low, - MINUS_EXPR, high_bits, a_low); + gimple_assign_set_lhs (new_stmt, result_low); vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); + } - new_stmt = gimple_build_assign (NULL_TREE, BIT_XOR_EXPR, result_low, - signs); - result_low = make_ssa_name (word_type); - gimple_assign_set_lhs (new_stmt, result_low); - vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); new_stmt = gimple_build_assign (NULL_TREE, VIEW_CONVERT_EXPR, build1 (VIEW_CONVERT_EXPR, vectype, result_low)); @@ -7366,7 +7182,7 @@ vectorizable_operation (vec_info *vinfo, tree mask; if (masked_loop_p) mask = vect_get_loop_mask (loop_vinfo, gsi, masks, - vec_num * ncopies, vectype, i); + vec_num, vectype, i); else /* Dummy mask. */ mask = build_minus_one_cst (truth_type_for (vectype)); @@ -7393,7 +7209,7 @@ vectorizable_operation (vec_info *vinfo, if (len_loop_p) { tree len = vect_get_loop_len (loop_vinfo, gsi, lens, - vec_num * ncopies, vectype, i, 1); + vec_num, vectype, i, 1); signed char biasval = LOOP_VINFO_PARTIAL_LOAD_STORE_BIAS (loop_vinfo); tree bias = build_int_cst (intQI_type_node, biasval); @@ -7420,21 +7236,19 @@ vectorizable_operation (vec_info *vinfo, && code == BIT_AND_EXPR && VECTOR_BOOLEAN_TYPE_P (vectype)) { - if (loop_vinfo->scalar_cond_masked_set.contains ({ op0, - ncopies})) + if (loop_vinfo->scalar_cond_masked_set.contains ({ op0, 1 })) { mask = vect_get_loop_mask (loop_vinfo, gsi, masks, - vec_num * ncopies, vectype, i); + vec_num, vectype, i); vop0 = prepare_vec_mask (loop_vinfo, TREE_TYPE (mask), mask, vop0, gsi); } - if (loop_vinfo->scalar_cond_masked_set.contains ({ op1, - ncopies })) + if (loop_vinfo->scalar_cond_masked_set.contains ({ op1, 1 })) { mask = vect_get_loop_mask (loop_vinfo, gsi, masks, - vec_num * ncopies, vectype, i); + vec_num, vectype, i); vop1 = prepare_vec_mask (loop_vinfo, TREE_TYPE (mask), mask, vop1, gsi); @@ -7465,15 +7279,9 @@ vectorizable_operation (vec_info *vinfo, new_stmt, gsi); } - if (slp_node) - slp_node->push_vec_def (new_stmt); - else - STMT_VINFO_VEC_STMTS (stmt_info).safe_push (new_stmt); + slp_node->push_vec_def (new_stmt); } - if (!slp_node) - *vec_stmt = STMT_VINFO_VEC_STMTS (stmt_info)[0]; - vec_oprnds0.release (); vec_oprnds1.release (); vec_oprnds2.release (); @@ -8439,12 +8247,10 @@ vectorizable_store (vec_info *vinfo, tree dataref_ptr = NULL_TREE; tree dataref_offset = NULL_TREE; gimple *ptr_incr = NULL; - int ncopies; int j; stmt_vec_info first_stmt_info; bool grouped_store; unsigned int group_size, i; - bool slp = (slp_node != NULL); unsigned int vec_num; bb_vec_info bb_vinfo = dyn_cast <bb_vec_info> (vinfo); tree aggr_type; @@ -8490,7 +8296,7 @@ vectorizable_store (vec_info *vinfo, return false; int mask_index = internal_fn_mask_index (ifn); - if (mask_index >= 0 && slp_node) + if (mask_index >= 0) mask_index = vect_slp_child_index_for_operand (call, mask_index, STMT_VINFO_GATHER_SCATTER_P (stmt_info)); if (mask_index >= 0 @@ -8502,9 +8308,9 @@ vectorizable_store (vec_info *vinfo, /* Cannot have hybrid store SLP -- that would mean storing to the same location twice. */ - gcc_assert (slp == PURE_SLP_STMT (stmt_info)); + gcc_assert (PURE_SLP_STMT (stmt_info)); - tree vectype = STMT_VINFO_VECTYPE (stmt_info), rhs_vectype = NULL_TREE; + tree vectype = SLP_TREE_VECTYPE (stmt_info), rhs_vectype = NULL_TREE; poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype); if (loop_vinfo) @@ -8515,20 +8321,10 @@ vectorizable_store (vec_info *vinfo, else vf = 1; - /* Multiple types in SLP are handled by creating the appropriate number of - vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in - case of SLP. */ - if (slp) - ncopies = 1; - else - ncopies = vect_get_num_copies (loop_vinfo, vectype); - - gcc_assert (ncopies >= 1); - /* FORNOW. This restriction should be relaxed. */ if (loop && nested_in_vect_loop_p (loop, stmt_info) - && (ncopies > 1 || (slp && SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node) > 1))) + && SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node) > 1) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, @@ -8554,13 +8350,12 @@ vectorizable_store (vec_info *vinfo, poly_int64 poffset; internal_fn lanes_ifn; if (!get_load_store_type (vinfo, stmt_info, vectype, slp_node, mask, vls_type, - ncopies, &memory_access_type, &poffset, + 1, &memory_access_type, &poffset, &alignment_support_scheme, &misalignment, &gs_info, &lanes_ifn)) return false; - if (slp_node - && slp_node->ldst_lanes + if (slp_node->ldst_lanes && memory_access_type != VMAT_LOAD_STORE_LANES) { if (dump_enabled_p ()) @@ -8607,8 +8402,7 @@ vectorizable_store (vec_info *vinfo, dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info), *first_dr_info = NULL; grouped_store = (STMT_VINFO_GROUPED_ACCESS (stmt_info) - && memory_access_type != VMAT_GATHER_SCATTER - && (slp || memory_access_type != VMAT_CONTIGUOUS)); + && memory_access_type != VMAT_GATHER_SCATTER); if (grouped_store) { first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info); @@ -8633,8 +8427,7 @@ vectorizable_store (vec_info *vinfo, if (costing_p) /* transformation not required. */ { STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info) = memory_access_type; - if (slp_node) - SLP_TREE_MEMORY_ACCESS_TYPE (slp_node) = memory_access_type; + SLP_TREE_MEMORY_ACCESS_TYPE (slp_node) = memory_access_type; if (loop_vinfo && LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo)) @@ -8643,11 +8436,10 @@ vectorizable_store (vec_info *vinfo, memory_access_type, &gs_info, mask); - if (slp_node - && (!vect_maybe_update_slp_op_vectype (op_node, vectype) - || (mask - && !vect_maybe_update_slp_op_vectype (mask_node, - mask_vectype)))) + if (!vect_maybe_update_slp_op_vectype (op_node, vectype) + || (mask + && !vect_maybe_update_slp_op_vectype (mask_node, + mask_vectype))) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, @@ -8665,22 +8457,8 @@ vectorizable_store (vec_info *vinfo, "Vectorizing an unaligned access.\n"); STMT_VINFO_TYPE (stmt_info) = store_vec_info_type; - - /* As function vect_transform_stmt shows, for interleaving stores - the whole chain is vectorized when the last store in the chain - is reached, the other stores in the group are skipped. So we - want to only cost the last one here, but it's not trivial to - get the last, as it's equivalent to use the first one for - costing, use the first one instead. */ - if (grouped_store - && !slp - && first_stmt_info != stmt_info) - return true; } - if (slp_node) - gcc_assert (memory_access_type == SLP_TREE_MEMORY_ACCESS_TYPE (stmt_info)); - else - gcc_assert (memory_access_type == STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info)); + gcc_assert (memory_access_type == SLP_TREE_MEMORY_ACCESS_TYPE (stmt_info)); /* Transform. */ @@ -8689,14 +8467,14 @@ vectorizable_store (vec_info *vinfo, if (STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info) >= 3) { gcc_assert (memory_access_type == VMAT_CONTIGUOUS); - gcc_assert (!slp || SLP_TREE_LANES (slp_node) == 1); + gcc_assert (SLP_TREE_LANES (slp_node) == 1); if (costing_p) { unsigned int inside_cost = 0, prologue_cost = 0; if (vls_type == VLS_STORE_INVARIANT) prologue_cost += record_stmt_cost (cost_vec, 1, scalar_to_vec, stmt_info, 0, vect_prologue); - vect_get_store_cost (vinfo, stmt_info, slp_node, ncopies, + vect_get_store_cost (vinfo, stmt_info, slp_node, 1, alignment_support_scheme, misalignment, &inside_cost, cost_vec); @@ -8709,67 +8487,28 @@ vectorizable_store (vec_info *vinfo, return true; } return vectorizable_scan_store (vinfo, stmt_info, slp_node, - gsi, vec_stmt, ncopies); + gsi, vec_stmt, 1); } - if (grouped_store || slp) - { - /* FORNOW */ - gcc_assert (!grouped_store - || !loop - || !nested_in_vect_loop_p (loop, stmt_info)); - - if (slp) - { - grouped_store = false; - /* VEC_NUM is the number of vect stmts to be created for this - group. */ - vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node); - first_stmt_info = SLP_TREE_SCALAR_STMTS (slp_node)[0]; - gcc_assert (!STMT_VINFO_GROUPED_ACCESS (first_stmt_info) - || (DR_GROUP_FIRST_ELEMENT (first_stmt_info) - == first_stmt_info)); - first_dr_info = STMT_VINFO_DR_INFO (first_stmt_info); - op = vect_get_store_rhs (first_stmt_info); - } - else - /* VEC_NUM is the number of vect stmts to be created for this - group. */ - vec_num = group_size; - - ref_type = get_group_alias_ptr_type (first_stmt_info); - } - else - ref_type = reference_alias_ptr_type (DR_REF (first_dr_info->dr)); + /* FORNOW */ + gcc_assert (!grouped_store + || !loop + || !nested_in_vect_loop_p (loop, stmt_info)); + + grouped_store = false; + /* VEC_NUM is the number of vect stmts to be created for this + group. */ + vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node); + first_stmt_info = SLP_TREE_SCALAR_STMTS (slp_node)[0]; + gcc_assert (!STMT_VINFO_GROUPED_ACCESS (first_stmt_info) + || (DR_GROUP_FIRST_ELEMENT (first_stmt_info) == first_stmt_info)); + first_dr_info = STMT_VINFO_DR_INFO (first_stmt_info); + op = vect_get_store_rhs (first_stmt_info); + + ref_type = get_group_alias_ptr_type (first_stmt_info); if (!costing_p && dump_enabled_p ()) - dump_printf_loc (MSG_NOTE, vect_location, "transform store. ncopies = %d\n", - ncopies); - - /* Check if we need to update prologue cost for invariant, - and update it accordingly if so. If it's not for - interleaving store, we can just check vls_type; but if - it's for interleaving store, need to check the def_type - of the stored value since the current vls_type is just - for first_stmt_info. */ - auto update_prologue_cost = [&](unsigned *prologue_cost, tree store_rhs) - { - gcc_assert (costing_p); - if (slp) - return; - if (grouped_store) - { - gcc_assert (store_rhs); - enum vect_def_type cdt; - gcc_assert (vect_is_simple_use (store_rhs, vinfo, &cdt)); - if (cdt != vect_constant_def && cdt != vect_external_def) - return; - } - else if (vls_type != VLS_STORE_INVARIANT) - return; - *prologue_cost += record_stmt_cost (cost_vec, 1, scalar_to_vec, stmt_info, - slp_node, 0, vect_prologue); - }; + dump_printf_loc (MSG_NOTE, vect_location, "transform store.\n"); if (memory_access_type == VMAT_ELEMENTWISE || memory_access_type == VMAT_STRIDED_SLP) @@ -8777,14 +8516,12 @@ vectorizable_store (vec_info *vinfo, unsigned inside_cost = 0, prologue_cost = 0; gimple_stmt_iterator incr_gsi; bool insert_after; - gimple *incr; tree offvar = NULL_TREE; tree ivstep; tree running_off; tree stride_base, stride_step, alias_off; tree vec_oprnd = NULL_TREE; tree dr_offset; - unsigned int g; /* Checked by get_load_store_type. */ unsigned int const_nunits = nunits.to_constant (); @@ -8822,110 +8559,112 @@ vectorizable_store (vec_info *vinfo, unsigned lnel = 1; tree ltype = elem_type; tree lvectype = vectype; - if (slp) - { - HOST_WIDE_INT n = gcd (group_size, const_nunits); - if (n == const_nunits) - { - int mis_align = dr_misalignment (first_dr_info, vectype); - dr_alignment_support dr_align - = vect_supportable_dr_alignment (vinfo, dr_info, vectype, - mis_align); - if (dr_align == dr_aligned - || dr_align == dr_unaligned_supported) - { - nstores = 1; - lnel = const_nunits; - ltype = vectype; - lvectype = vectype; - alignment_support_scheme = dr_align; - misalignment = mis_align; - } - } - else if (n > 1) - { - nstores = const_nunits / n; - lnel = n; - ltype = build_vector_type (elem_type, n); + HOST_WIDE_INT n = gcd (group_size, const_nunits); + if (n == const_nunits) + { + int mis_align = dr_misalignment (first_dr_info, vectype); + /* With VF > 1 we advance the DR by step, if that is constant + and only aligned when performed VF times, DR alignment + analysis can analyze this as aligned since it assumes + contiguous accesses. But that is not how we code generate + here, so adjust for this. */ + if (maybe_gt (vf, 1u) + && !multiple_p (DR_STEP_ALIGNMENT (first_dr_info->dr), + DR_TARGET_ALIGNMENT (first_dr_info))) + mis_align = -1; + dr_alignment_support dr_align + = vect_supportable_dr_alignment (vinfo, dr_info, vectype, + mis_align); + if (dr_align == dr_aligned + || dr_align == dr_unaligned_supported) + { + nstores = 1; + lnel = const_nunits; + ltype = vectype; lvectype = vectype; - int mis_align = dr_misalignment (first_dr_info, ltype); - dr_alignment_support dr_align - = vect_supportable_dr_alignment (vinfo, dr_info, ltype, - mis_align); alignment_support_scheme = dr_align; misalignment = mis_align; - - /* First check if vec_extract optab doesn't support extraction - of vector elts directly. */ - scalar_mode elmode = SCALAR_TYPE_MODE (elem_type); - machine_mode vmode; - if (!VECTOR_MODE_P (TYPE_MODE (vectype)) - || !related_vector_mode (TYPE_MODE (vectype), elmode, - n).exists (&vmode) - || (convert_optab_handler (vec_extract_optab, - TYPE_MODE (vectype), vmode) - == CODE_FOR_nothing) - || !(dr_align == dr_aligned - || dr_align == dr_unaligned_supported)) - { - /* Try to avoid emitting an extract of vector elements - by performing the extracts using an integer type of the - same size, extracting from a vector of those and then - re-interpreting it as the original vector type if - supported. */ - unsigned lsize - = n * GET_MODE_BITSIZE (elmode); - unsigned int lnunits = const_nunits / n; - /* If we can't construct such a vector fall back to - element extracts from the original vector type and - element size stores. */ - if (int_mode_for_size (lsize, 0).exists (&elmode) - && VECTOR_MODE_P (TYPE_MODE (vectype)) - && related_vector_mode (TYPE_MODE (vectype), elmode, - lnunits).exists (&vmode) - && (convert_optab_handler (vec_extract_optab, - vmode, elmode) - != CODE_FOR_nothing)) - { - nstores = lnunits; - lnel = n; - ltype = build_nonstandard_integer_type (lsize, 1); - lvectype = build_vector_type (ltype, nstores); - } - /* Else fall back to vector extraction anyway. - Fewer stores are more important than avoiding spilling - of the vector we extract from. Compared to the - construction case in vectorizable_load no store-forwarding - issue exists here for reasonable archs. But only - if the store is supported. */ - else if (!(dr_align == dr_aligned - || dr_align == dr_unaligned_supported)) - { - nstores = const_nunits; - lnel = 1; - ltype = elem_type; - lvectype = vectype; - } - } } - unsigned align; - /* ??? We'd want to use - if (alignment_support_scheme == dr_aligned) - align = known_alignment (DR_TARGET_ALIGNMENT (first_dr_info)); - since doing that is what we assume we can in the above checks. - But this interferes with groups with gaps where for example - VF == 2 makes the group in the unrolled loop aligned but the - fact that we advance with step between the two subgroups - makes the access to the second unaligned. See PR119586. - We have to anticipate that here or adjust code generation to - avoid the misaligned loads by means of permutations. */ - align = dr_alignment (vect_dr_behavior (vinfo, first_dr_info)); - /* Alignment is at most the access size if we do multiple stores. */ - if (nstores > 1) - align = MIN (tree_to_uhwi (TYPE_SIZE_UNIT (ltype)), align); - ltype = build_aligned_type (ltype, align * BITS_PER_UNIT); - ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node); } + else if (n > 1) + { + nstores = const_nunits / n; + lnel = n; + ltype = build_vector_type (elem_type, n); + lvectype = vectype; + int mis_align = dr_misalignment (first_dr_info, ltype); + if (maybe_gt (vf, 1u) + && !multiple_p (DR_STEP_ALIGNMENT (first_dr_info->dr), + DR_TARGET_ALIGNMENT (first_dr_info))) + mis_align = -1; + dr_alignment_support dr_align + = vect_supportable_dr_alignment (vinfo, dr_info, ltype, + mis_align); + alignment_support_scheme = dr_align; + misalignment = mis_align; + + /* First check if vec_extract optab doesn't support extraction + of vector elts directly. */ + scalar_mode elmode = SCALAR_TYPE_MODE (elem_type); + machine_mode vmode; + if (!VECTOR_MODE_P (TYPE_MODE (vectype)) + || !related_vector_mode (TYPE_MODE (vectype), elmode, + n).exists (&vmode) + || (convert_optab_handler (vec_extract_optab, + TYPE_MODE (vectype), vmode) + == CODE_FOR_nothing) + || !(dr_align == dr_aligned + || dr_align == dr_unaligned_supported)) + { + /* Try to avoid emitting an extract of vector elements + by performing the extracts using an integer type of the + same size, extracting from a vector of those and then + re-interpreting it as the original vector type if + supported. */ + unsigned lsize = n * GET_MODE_BITSIZE (elmode); + unsigned int lnunits = const_nunits / n; + /* If we can't construct such a vector fall back to + element extracts from the original vector type and + element size stores. */ + if (int_mode_for_size (lsize, 0).exists (&elmode) + && VECTOR_MODE_P (TYPE_MODE (vectype)) + && related_vector_mode (TYPE_MODE (vectype), elmode, + lnunits).exists (&vmode) + && (convert_optab_handler (vec_extract_optab, + vmode, elmode) + != CODE_FOR_nothing)) + { + nstores = lnunits; + lnel = n; + ltype = build_nonstandard_integer_type (lsize, 1); + lvectype = build_vector_type (ltype, nstores); + } + /* Else fall back to vector extraction anyway. + Fewer stores are more important than avoiding spilling + of the vector we extract from. Compared to the + construction case in vectorizable_load no store-forwarding + issue exists here for reasonable archs. But only + if the store is supported. */ + else if (!(dr_align == dr_aligned + || dr_align == dr_unaligned_supported)) + { + nstores = const_nunits; + lnel = 1; + ltype = elem_type; + lvectype = vectype; + } + } + } + unsigned align; + if (alignment_support_scheme == dr_aligned) + align = known_alignment (DR_TARGET_ALIGNMENT (first_dr_info)); + else + align = dr_alignment (vect_dr_behavior (vinfo, first_dr_info)); + /* Alignment is at most the access size if we do multiple stores. */ + if (nstores > 1) + align = MIN (tree_to_uhwi (TYPE_SIZE_UNIT (ltype)), align); + ltype = build_aligned_type (ltype, align * BITS_PER_UNIT); + int ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node); if (!costing_p) { @@ -8939,7 +8678,6 @@ vectorizable_store (vec_info *vinfo, ivstep = cse_and_gimplify_to_preheader (loop_vinfo, ivstep); create_iv (stride_base, PLUS_EXPR, ivstep, NULL, loop, &incr_gsi, insert_after, &offvar, NULL); - incr = gsi_stmt (incr_gsi); stride_step = cse_and_gimplify_to_preheader (loop_vinfo, stride_step); } @@ -8950,104 +8688,68 @@ vectorizable_store (vec_info *vinfo, /* For costing some adjacent vector stores, we'd like to cost with the total number of them once instead of cost each one by one. */ unsigned int n_adjacent_stores = 0; - for (g = 0; g < group_size; g++) + running_off = offvar; + if (!costing_p) + vect_get_vec_defs (vinfo, next_stmt_info, slp_node, ncopies, op, + &vec_oprnds); + unsigned int group_el = 0; + unsigned HOST_WIDE_INT elsz + = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype))); + for (j = 0; j < ncopies; j++) { - running_off = offvar; if (!costing_p) { - if (g) + vec_oprnd = vec_oprnds[j]; + /* Pun the vector to extract from if necessary. */ + if (lvectype != vectype) { - tree size = TYPE_SIZE_UNIT (ltype); - tree pos - = fold_build2 (MULT_EXPR, sizetype, size_int (g), size); - tree newoff = copy_ssa_name (running_off, NULL); - incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR, - running_off, pos); - vect_finish_stmt_generation (vinfo, stmt_info, incr, gsi); - running_off = newoff; + tree tem = make_ssa_name (lvectype); + tree cvt = build1 (VIEW_CONVERT_EXPR, lvectype, vec_oprnd); + gimple *pun = gimple_build_assign (tem, cvt); + vect_finish_stmt_generation (vinfo, stmt_info, pun, gsi); + vec_oprnd = tem; } } - if (!slp) - op = vect_get_store_rhs (next_stmt_info); - if (!costing_p) - vect_get_vec_defs (vinfo, next_stmt_info, slp_node, ncopies, op, - &vec_oprnds); - else - update_prologue_cost (&prologue_cost, op); - unsigned int group_el = 0; - unsigned HOST_WIDE_INT - elsz = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype))); - for (j = 0; j < ncopies; j++) + for (i = 0; i < nstores; i++) { - if (!costing_p) + if (costing_p) { - vec_oprnd = vec_oprnds[j]; - /* Pun the vector to extract from if necessary. */ - if (lvectype != vectype) - { - tree tem = make_ssa_name (lvectype); - tree cvt - = build1 (VIEW_CONVERT_EXPR, lvectype, vec_oprnd); - gimple *pun = gimple_build_assign (tem, cvt); - vect_finish_stmt_generation (vinfo, stmt_info, pun, gsi); - vec_oprnd = tem; - } + n_adjacent_stores++; + continue; } - for (i = 0; i < nstores; i++) + tree newref, newoff; + gimple *incr, *assign; + tree size = TYPE_SIZE (ltype); + /* Extract the i'th component. */ + tree pos = fold_build2 (MULT_EXPR, bitsizetype, + bitsize_int (i), size); + tree elem = fold_build3 (BIT_FIELD_REF, ltype, vec_oprnd, + size, pos); + + elem = force_gimple_operand_gsi (gsi, elem, true, NULL_TREE, true, + GSI_SAME_STMT); + + tree this_off = build_int_cst (TREE_TYPE (alias_off), + group_el * elsz); + newref = build2 (MEM_REF, ltype, running_off, this_off); + vect_copy_ref_info (newref, DR_REF (first_dr_info->dr)); + + /* And store it to *running_off. */ + assign = gimple_build_assign (newref, elem); + vect_finish_stmt_generation (vinfo, stmt_info, assign, gsi); + + group_el += lnel; + if (group_el == group_size) { - if (costing_p) - { - n_adjacent_stores++; - continue; - } - tree newref, newoff; - gimple *incr, *assign; - tree size = TYPE_SIZE (ltype); - /* Extract the i'th component. */ - tree pos = fold_build2 (MULT_EXPR, bitsizetype, - bitsize_int (i), size); - tree elem = fold_build3 (BIT_FIELD_REF, ltype, vec_oprnd, - size, pos); - - elem = force_gimple_operand_gsi (gsi, elem, true, - NULL_TREE, true, - GSI_SAME_STMT); - - tree this_off = build_int_cst (TREE_TYPE (alias_off), - group_el * elsz); - newref = build2 (MEM_REF, ltype, - running_off, this_off); - vect_copy_ref_info (newref, DR_REF (first_dr_info->dr)); - - /* And store it to *running_off. */ - assign = gimple_build_assign (newref, elem); - vect_finish_stmt_generation (vinfo, stmt_info, assign, gsi); - - group_el += lnel; - if (! slp - || group_el == group_size) - { - newoff = copy_ssa_name (running_off, NULL); - incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR, - running_off, stride_step); - vect_finish_stmt_generation (vinfo, stmt_info, incr, gsi); + newoff = copy_ssa_name (running_off, NULL); + incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR, + running_off, stride_step); + vect_finish_stmt_generation (vinfo, stmt_info, incr, gsi); - running_off = newoff; - group_el = 0; - } - if (g == group_size - 1 - && !slp) - { - if (j == 0 && i == 0) - *vec_stmt = assign; - STMT_VINFO_VEC_STMTS (stmt_info).safe_push (assign); - } + running_off = newoff; + group_el = 0; } } - next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info); - vec_oprnds.truncate(0); - if (slp) - break; } if (costing_p) @@ -9070,8 +8772,7 @@ vectorizable_store (vec_info *vinfo, if (nstores > 1) inside_cost += record_stmt_cost (cost_vec, n_adjacent_stores, - vec_to_scalar, stmt_info, slp_node, - 0, vect_body); + vec_to_scalar, slp_node, 0, vect_body); } if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, @@ -9189,7 +8890,7 @@ vectorizable_store (vec_info *vinfo, if (memory_access_type == VMAT_LOAD_STORE_LANES) { - if (costing_p && slp_node) + if (costing_p) /* Update all incoming store operand nodes, the general handling above only handles the mask and the first store operand node. */ for (slp_tree child : SLP_TREE_CHILDREN (slp_node)) @@ -9205,49 +8906,18 @@ vectorizable_store (vec_info *vinfo, /* For costing some adjacent vector stores, we'd like to cost with the total number of them once instead of cost each one by one. */ unsigned int n_adjacent_stores = 0; - if (slp) - ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node) / group_size; + int ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node) / group_size; for (j = 0; j < ncopies; j++) { - gimple *new_stmt; if (j == 0) { - /* For interleaved stores we collect vectorized defs for all - the stores in the group in DR_CHAIN. DR_CHAIN is then used - as an input to vect_permute_store_chain(). */ - stmt_vec_info next_stmt_info = first_stmt_info; - for (i = 0; i < group_size; i++) - { - /* Since gaps are not supported for interleaved stores, - DR_GROUP_SIZE is the exact number of stmts in the - chain. Therefore, NEXT_STMT_INFO can't be NULL_TREE. */ - op = vect_get_store_rhs (next_stmt_info); - if (costing_p) - update_prologue_cost (&prologue_cost, op); - else if (!slp) - { - vect_get_vec_defs_for_operand (vinfo, next_stmt_info, - ncopies, op, - gvec_oprnds[i]); - vec_oprnd = (*gvec_oprnds[i])[0]; - dr_chain.quick_push (vec_oprnd); - } - next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info); - } - if (!costing_p) { if (mask) { - if (slp_node) - vect_get_slp_defs (mask_node, &vec_masks); - else - vect_get_vec_defs_for_operand (vinfo, stmt_info, ncopies, - mask, &vec_masks, - mask_vectype); + vect_get_slp_defs (mask_node, &vec_masks); vec_mask = vec_masks[0]; } - dataref_ptr = vect_create_data_ref_ptr (vinfo, first_stmt_info, aggr_type, NULL, offset, &dummy, @@ -9257,19 +8927,6 @@ vectorizable_store (vec_info *vinfo, else if (!costing_p) { gcc_assert (!LOOP_VINFO_USING_SELECT_VL_P (loop_vinfo)); - /* DR_CHAIN is then used as an input to - vect_permute_store_chain(). */ - if (!slp) - { - /* We should have caught mismatched types earlier. */ - gcc_assert ( - useless_type_conversion_p (vectype, TREE_TYPE (vec_oprnd))); - for (i = 0; i < group_size; i++) - { - vec_oprnd = (*gvec_oprnds[i])[j]; - dr_chain[i] = vec_oprnd; - } - } if (mask) vec_mask = vec_masks[j]; dataref_ptr = bump_vector_ptr (vinfo, dataref_ptr, ptr_incr, gsi, @@ -9293,17 +8950,12 @@ vectorizable_store (vec_info *vinfo, /* Store the individual vectors into the array. */ for (i = 0; i < group_size; i++) { - if (slp) - { - slp_tree child; - if (i == 0 || !mask_node) - child = SLP_TREE_CHILDREN (slp_node)[i]; - else - child = SLP_TREE_CHILDREN (slp_node)[i + 1]; - vec_oprnd = SLP_TREE_VEC_DEFS (child)[j]; - } + slp_tree child; + if (i == 0 || !mask_node) + child = SLP_TREE_CHILDREN (slp_node)[i]; else - vec_oprnd = dr_chain[i]; + child = SLP_TREE_CHILDREN (slp_node)[i + 1]; + vec_oprnd = SLP_TREE_VEC_DEFS (child)[j]; write_vector_array (vinfo, stmt_info, gsi, vec_oprnd, vec_array, i); } @@ -9369,14 +9021,9 @@ vectorizable_store (vec_info *vinfo, } gimple_call_set_nothrow (call, true); vect_finish_stmt_generation (vinfo, stmt_info, call, gsi); - new_stmt = call; /* Record that VEC_ARRAY is now dead. */ vect_clobber_variable (vinfo, stmt_info, gsi, vec_array); - if (j == 0 && !slp) - *vec_stmt = new_stmt; - if (!slp) - STMT_VINFO_VEC_STMTS (stmt_info).safe_push (new_stmt); } if (costing_p) @@ -9400,7 +9047,7 @@ vectorizable_store (vec_info *vinfo, gcc_assert (!grouped_store); auto_vec<tree> vec_offsets; unsigned int inside_cost = 0, prologue_cost = 0; - int num_stmts = ncopies * vec_num; + int num_stmts = vec_num; for (j = 0; j < num_stmts; j++) { gimple *new_stmt; @@ -9408,28 +9055,15 @@ vectorizable_store (vec_info *vinfo, { if (costing_p && vls_type == VLS_STORE_INVARIANT) prologue_cost += record_stmt_cost (cost_vec, 1, scalar_to_vec, - stmt_info, slp_node, 0, - vect_prologue); + slp_node, 0, vect_prologue); else if (!costing_p) { /* Since the store is not grouped, DR_GROUP_SIZE is 1, and DR_CHAIN is of size 1. */ gcc_assert (group_size == 1); - if (slp_node) - vect_get_slp_defs (op_node, gvec_oprnds[0]); - else - vect_get_vec_defs_for_operand (vinfo, first_stmt_info, - num_stmts, op, gvec_oprnds[0]); + vect_get_slp_defs (op_node, gvec_oprnds[0]); if (mask) - { - if (slp_node) - vect_get_slp_defs (mask_node, &vec_masks); - else - vect_get_vec_defs_for_operand (vinfo, stmt_info, - num_stmts, - mask, &vec_masks, - mask_vectype); - } + vect_get_slp_defs (mask_node, &vec_masks); if (STMT_VINFO_GATHER_SCATTER_P (stmt_info)) vect_get_gather_scatter_ops (loop_vinfo, loop, stmt_info, @@ -9483,8 +9117,7 @@ vectorizable_store (vec_info *vinfo, unsigned int cnunits = vect_nunits_for_cost (vectype); inside_cost += record_stmt_cost (cost_vec, cnunits, scalar_store, - stmt_info, slp_node, 0, - vect_body); + slp_node, 0, vect_body); continue; } @@ -9552,7 +9185,7 @@ vectorizable_store (vec_info *vinfo, unsigned int cnunits = vect_nunits_for_cost (vectype); inside_cost += record_stmt_cost (cost_vec, cnunits, scalar_store, - stmt_info, slp_node, 0, vect_body); + slp_node, 0, vect_body); continue; } @@ -9660,14 +9293,14 @@ vectorizable_store (vec_info *vinfo, consumed by the load). */ inside_cost += record_stmt_cost (cost_vec, cnunits, vec_to_scalar, - stmt_info, slp_node, 0, vect_body); + slp_node, 0, vect_body); /* N scalar stores plus extracting the elements. */ inside_cost += record_stmt_cost (cost_vec, cnunits, vec_to_scalar, - stmt_info, slp_node, 0, vect_body); + slp_node, 0, vect_body); inside_cost += record_stmt_cost (cost_vec, cnunits, scalar_store, - stmt_info, slp_node, 0, vect_body); + slp_node, 0, vect_body); continue; } @@ -9722,17 +9355,10 @@ vectorizable_store (vec_info *vinfo, vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); } - if (slp) - slp_node->push_vec_def (new_stmt); + slp_node->push_vec_def (new_stmt); } - - if (!slp && !costing_p) - STMT_VINFO_VEC_STMTS (stmt_info).safe_push (new_stmt); } - if (!slp && !costing_p) - *vec_stmt = STMT_VINFO_VEC_STMTS (stmt_info)[0]; - if (costing_p && dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "vect_model_store_cost: inside_cost = %d, " @@ -9753,332 +9379,262 @@ vectorizable_store (vec_info *vinfo, unsigned int n_adjacent_stores = 0; auto_vec<tree> result_chain (group_size); auto_vec<tree, 1> vec_oprnds; - for (j = 0; j < ncopies; j++) + gimple *new_stmt; + if (!costing_p) { - gimple *new_stmt; - if (j == 0) - { - if (slp && !costing_p) - { - /* Get vectorized arguments for SLP_NODE. */ - vect_get_vec_defs (vinfo, stmt_info, slp_node, 1, op, - &vec_oprnds, mask, &vec_masks); - vec_oprnd = vec_oprnds[0]; - if (mask) - vec_mask = vec_masks[0]; - } - else - { - /* For interleaved stores we collect vectorized defs for all the - stores in the group in DR_CHAIN. DR_CHAIN is then used as an - input to vect_permute_store_chain(). - - If the store is not grouped, DR_GROUP_SIZE is 1, and DR_CHAIN - is of size 1. */ - stmt_vec_info next_stmt_info = first_stmt_info; - for (i = 0; i < group_size; i++) - { - /* Since gaps are not supported for interleaved stores, - DR_GROUP_SIZE is the exact number of stmts in the chain. - Therefore, NEXT_STMT_INFO can't be NULL_TREE. In case - that there is no interleaving, DR_GROUP_SIZE is 1, - and only one iteration of the loop will be executed. */ - op = vect_get_store_rhs (next_stmt_info); - if (costing_p) - update_prologue_cost (&prologue_cost, op); - else - { - vect_get_vec_defs_for_operand (vinfo, next_stmt_info, - ncopies, op, - gvec_oprnds[i]); - vec_oprnd = (*gvec_oprnds[i])[0]; - dr_chain.quick_push (vec_oprnd); - } - next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info); - } - if (mask && !costing_p) - { - vect_get_vec_defs_for_operand (vinfo, stmt_info, ncopies, - mask, &vec_masks, - mask_vectype); - vec_mask = vec_masks[0]; - } - } + /* Get vectorized arguments for SLP_NODE. */ + vect_get_vec_defs (vinfo, stmt_info, slp_node, 1, op, + &vec_oprnds, mask, &vec_masks); + vec_oprnd = vec_oprnds[0]; + if (mask) + vec_mask = vec_masks[0]; + } + else + { + /* For interleaved stores we collect vectorized defs for all the + stores in the group in DR_CHAIN. DR_CHAIN is then used as an + input to vect_permute_store_chain(). - /* We should have catched mismatched types earlier. */ - gcc_assert (costing_p - || useless_type_conversion_p (vectype, - TREE_TYPE (vec_oprnd))); - bool simd_lane_access_p - = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info) != 0; - if (!costing_p - && simd_lane_access_p - && !loop_masks - && TREE_CODE (DR_BASE_ADDRESS (first_dr_info->dr)) == ADDR_EXPR - && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr_info->dr), 0)) - && integer_zerop (get_dr_vinfo_offset (vinfo, first_dr_info)) - && integer_zerop (DR_INIT (first_dr_info->dr)) - && alias_sets_conflict_p (get_alias_set (aggr_type), - get_alias_set (TREE_TYPE (ref_type)))) - { - dataref_ptr = unshare_expr (DR_BASE_ADDRESS (first_dr_info->dr)); - dataref_offset = build_int_cst (ref_type, 0); - } - else if (!costing_p) - dataref_ptr - = vect_create_data_ref_ptr (vinfo, first_stmt_info, aggr_type, - simd_lane_access_p ? loop : NULL, - offset, &dummy, gsi, &ptr_incr, - simd_lane_access_p, bump); - } - else if (!costing_p) - { - gcc_assert (!LOOP_VINFO_USING_SELECT_VL_P (loop_vinfo)); - /* DR_CHAIN is then used as an input to vect_permute_store_chain(). - If the store is not grouped, DR_GROUP_SIZE is 1, and DR_CHAIN is - of size 1. */ - for (i = 0; i < group_size; i++) + If the store is not grouped, DR_GROUP_SIZE is 1, and DR_CHAIN + is of size 1. */ + stmt_vec_info next_stmt_info = first_stmt_info; + for (i = 0; i < group_size; i++) + { + /* Since gaps are not supported for interleaved stores, + DR_GROUP_SIZE is the exact number of stmts in the chain. + Therefore, NEXT_STMT_INFO can't be NULL_TREE. In case + that there is no interleaving, DR_GROUP_SIZE is 1, + and only one iteration of the loop will be executed. */ + op = vect_get_store_rhs (next_stmt_info); + if (!costing_p) { - vec_oprnd = (*gvec_oprnds[i])[j]; - dr_chain[i] = vec_oprnd; + vect_get_vec_defs_for_operand (vinfo, next_stmt_info, + 1, op, gvec_oprnds[i]); + vec_oprnd = (*gvec_oprnds[i])[0]; + dr_chain.quick_push (vec_oprnd); } - if (mask) - vec_mask = vec_masks[j]; - if (dataref_offset) - dataref_offset = int_const_binop (PLUS_EXPR, dataref_offset, bump); - else - dataref_ptr = bump_vector_ptr (vinfo, dataref_ptr, ptr_incr, gsi, - stmt_info, bump); + next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info); } - - new_stmt = NULL; - if (grouped_store) + if (mask && !costing_p) { - /* Permute. */ - gcc_assert (memory_access_type == VMAT_CONTIGUOUS_PERMUTE); - if (costing_p) - { - int group_size = DR_GROUP_SIZE (first_stmt_info); - int nstmts = ceil_log2 (group_size) * group_size; - inside_cost += record_stmt_cost (cost_vec, nstmts, vec_perm, - stmt_info, slp_node, 0, - vect_body); - if (dump_enabled_p ()) - dump_printf_loc (MSG_NOTE, vect_location, - "vect_model_store_cost: " - "strided group_size = %d .\n", - group_size); - } - else - vect_permute_store_chain (vinfo, dr_chain, group_size, stmt_info, - gsi, &result_chain); + vect_get_vec_defs_for_operand (vinfo, stmt_info, 1, + mask, &vec_masks, mask_vectype); + vec_mask = vec_masks[0]; } + } - stmt_vec_info next_stmt_info = first_stmt_info; - for (i = 0; i < vec_num; i++) + /* We should have catched mismatched types earlier. */ + gcc_assert (costing_p + || useless_type_conversion_p (vectype, TREE_TYPE (vec_oprnd))); + bool simd_lane_access_p + = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info) != 0; + if (!costing_p + && simd_lane_access_p + && !loop_masks + && TREE_CODE (DR_BASE_ADDRESS (first_dr_info->dr)) == ADDR_EXPR + && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr_info->dr), 0)) + && integer_zerop (get_dr_vinfo_offset (vinfo, first_dr_info)) + && integer_zerop (DR_INIT (first_dr_info->dr)) + && alias_sets_conflict_p (get_alias_set (aggr_type), + get_alias_set (TREE_TYPE (ref_type)))) + { + dataref_ptr = unshare_expr (DR_BASE_ADDRESS (first_dr_info->dr)); + dataref_offset = build_int_cst (ref_type, 0); + } + else if (!costing_p) + dataref_ptr = vect_create_data_ref_ptr (vinfo, first_stmt_info, aggr_type, + simd_lane_access_p ? loop : NULL, + offset, &dummy, gsi, &ptr_incr, + simd_lane_access_p, bump); + + new_stmt = NULL; + if (grouped_store) + { + /* Permute. */ + gcc_assert (memory_access_type == VMAT_CONTIGUOUS_PERMUTE); + if (costing_p) { - if (!costing_p) - { - if (slp) - vec_oprnd = vec_oprnds[i]; - else if (grouped_store) - /* For grouped stores vectorized defs are interleaved in - vect_permute_store_chain(). */ - vec_oprnd = result_chain[i]; - } + int group_size = DR_GROUP_SIZE (first_stmt_info); + int nstmts = ceil_log2 (group_size) * group_size; + inside_cost += record_stmt_cost (cost_vec, nstmts, vec_perm, + slp_node, 0, vect_body); + if (dump_enabled_p ()) + dump_printf_loc (MSG_NOTE, vect_location, "vect_model_store_cost: " + "strided group_size = %d .\n", group_size); + } + else + vect_permute_store_chain (vinfo, dr_chain, group_size, stmt_info, + gsi, &result_chain); + } - if (memory_access_type == VMAT_CONTIGUOUS_REVERSE) - { - if (costing_p) - inside_cost += record_stmt_cost (cost_vec, 1, vec_perm, - stmt_info, slp_node, 0, - vect_body); - else - { - tree perm_mask = perm_mask_for_reverse (vectype); - tree perm_dest = vect_create_destination_var ( - vect_get_store_rhs (stmt_info), vectype); - tree new_temp = make_ssa_name (perm_dest); - - /* Generate the permute statement. */ - gimple *perm_stmt - = gimple_build_assign (new_temp, VEC_PERM_EXPR, vec_oprnd, - vec_oprnd, perm_mask); - vect_finish_stmt_generation (vinfo, stmt_info, perm_stmt, - gsi); - - perm_stmt = SSA_NAME_DEF_STMT (new_temp); - vec_oprnd = new_temp; - } - } + for (i = 0; i < vec_num; i++) + { + if (!costing_p) + vec_oprnd = vec_oprnds[i]; + if (memory_access_type == VMAT_CONTIGUOUS_REVERSE) + { if (costing_p) + inside_cost += record_stmt_cost (cost_vec, 1, vec_perm, + slp_node, 0, vect_body); + else { - n_adjacent_stores++; + tree perm_mask = perm_mask_for_reverse (vectype); + tree perm_dest + = vect_create_destination_var (vect_get_store_rhs (stmt_info), + vectype); + tree new_temp = make_ssa_name (perm_dest); - if (!slp) - { - next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info); - if (!next_stmt_info) - break; - } + /* Generate the permute statement. */ + gimple *perm_stmt + = gimple_build_assign (new_temp, VEC_PERM_EXPR, vec_oprnd, + vec_oprnd, perm_mask); + vect_finish_stmt_generation (vinfo, stmt_info, perm_stmt, gsi); - continue; + perm_stmt = SSA_NAME_DEF_STMT (new_temp); + vec_oprnd = new_temp; } + } - tree final_mask = NULL_TREE; - tree final_len = NULL_TREE; - tree bias = NULL_TREE; - if (loop_masks) - final_mask = vect_get_loop_mask (loop_vinfo, gsi, loop_masks, - vec_num * ncopies, vectype, - vec_num * j + i); - if (slp && vec_mask) - vec_mask = vec_masks[i]; - if (vec_mask) - final_mask = prepare_vec_mask (loop_vinfo, mask_vectype, final_mask, - vec_mask, gsi); - - if (i > 0) - /* Bump the vector pointer. */ - dataref_ptr = bump_vector_ptr (vinfo, dataref_ptr, ptr_incr, gsi, - stmt_info, bump); + if (costing_p) + { + n_adjacent_stores++; + continue; + } - unsigned misalign; - unsigned HOST_WIDE_INT align; - align = known_alignment (DR_TARGET_ALIGNMENT (first_dr_info)); - if (alignment_support_scheme == dr_aligned) - misalign = 0; - else if (misalignment == DR_MISALIGNMENT_UNKNOWN) - { - align = dr_alignment (vect_dr_behavior (vinfo, first_dr_info)); - misalign = 0; - } - else - misalign = misalignment; - if (dataref_offset == NULL_TREE - && TREE_CODE (dataref_ptr) == SSA_NAME) - set_ptr_info_alignment (get_ptr_info (dataref_ptr), align, - misalign); - align = least_bit_hwi (misalign | align); - - /* Compute IFN when LOOP_LENS or final_mask valid. */ - machine_mode vmode = TYPE_MODE (vectype); - machine_mode new_vmode = vmode; - internal_fn partial_ifn = IFN_LAST; - if (loop_lens) - { - opt_machine_mode new_ovmode - = get_len_load_store_mode (vmode, false, &partial_ifn); - new_vmode = new_ovmode.require (); - unsigned factor - = (new_ovmode == vmode) ? 1 : GET_MODE_UNIT_SIZE (vmode); - final_len = vect_get_loop_len (loop_vinfo, gsi, loop_lens, - vec_num * ncopies, vectype, - vec_num * j + i, factor); - } - else if (final_mask) - { - if (!can_vec_mask_load_store_p ( - vmode, TYPE_MODE (TREE_TYPE (final_mask)), false, - &partial_ifn)) - gcc_unreachable (); - } + tree final_mask = NULL_TREE; + tree final_len = NULL_TREE; + tree bias = NULL_TREE; + if (loop_masks) + final_mask = vect_get_loop_mask (loop_vinfo, gsi, loop_masks, + vec_num, vectype, i); + if (vec_mask) + vec_mask = vec_masks[i]; + if (vec_mask) + final_mask = prepare_vec_mask (loop_vinfo, mask_vectype, final_mask, + vec_mask, gsi); + + if (i > 0) + /* Bump the vector pointer. */ + dataref_ptr = bump_vector_ptr (vinfo, dataref_ptr, ptr_incr, gsi, + stmt_info, bump); + + unsigned misalign; + unsigned HOST_WIDE_INT align; + align = known_alignment (DR_TARGET_ALIGNMENT (first_dr_info)); + if (alignment_support_scheme == dr_aligned) + misalign = 0; + else if (misalignment == DR_MISALIGNMENT_UNKNOWN) + { + align = dr_alignment (vect_dr_behavior (vinfo, first_dr_info)); + misalign = 0; + } + else + misalign = misalignment; + if (dataref_offset == NULL_TREE + && TREE_CODE (dataref_ptr) == SSA_NAME) + set_ptr_info_alignment (get_ptr_info (dataref_ptr), align, misalign); + align = least_bit_hwi (misalign | align); + + /* Compute IFN when LOOP_LENS or final_mask valid. */ + machine_mode vmode = TYPE_MODE (vectype); + machine_mode new_vmode = vmode; + internal_fn partial_ifn = IFN_LAST; + if (loop_lens) + { + opt_machine_mode new_ovmode + = get_len_load_store_mode (vmode, false, &partial_ifn); + new_vmode = new_ovmode.require (); + unsigned factor + = (new_ovmode == vmode) ? 1 : GET_MODE_UNIT_SIZE (vmode); + final_len = vect_get_loop_len (loop_vinfo, gsi, loop_lens, + vec_num, vectype, i, factor); + } + else if (final_mask) + { + if (!can_vec_mask_load_store_p (vmode, + TYPE_MODE (TREE_TYPE (final_mask)), + false, &partial_ifn)) + gcc_unreachable (); + } - if (partial_ifn == IFN_MASK_LEN_STORE) + if (partial_ifn == IFN_MASK_LEN_STORE) + { + if (!final_len) { - if (!final_len) - { - /* Pass VF value to 'len' argument of - MASK_LEN_STORE if LOOP_LENS is invalid. */ - final_len = size_int (TYPE_VECTOR_SUBPARTS (vectype)); - } - if (!final_mask) - { - /* Pass all ones value to 'mask' argument of - MASK_LEN_STORE if final_mask is invalid. */ - mask_vectype = truth_type_for (vectype); - final_mask = build_minus_one_cst (mask_vectype); - } + /* Pass VF value to 'len' argument of + MASK_LEN_STORE if LOOP_LENS is invalid. */ + final_len = size_int (TYPE_VECTOR_SUBPARTS (vectype)); } - if (final_len) + if (!final_mask) { - signed char biasval - = LOOP_VINFO_PARTIAL_LOAD_STORE_BIAS (loop_vinfo); - - bias = build_int_cst (intQI_type_node, biasval); + /* Pass all ones value to 'mask' argument of + MASK_LEN_STORE if final_mask is invalid. */ + mask_vectype = truth_type_for (vectype); + final_mask = build_minus_one_cst (mask_vectype); } + } + if (final_len) + { + signed char biasval = LOOP_VINFO_PARTIAL_LOAD_STORE_BIAS (loop_vinfo); + bias = build_int_cst (intQI_type_node, biasval); + } - /* Arguments are ready. Create the new vector stmt. */ - if (final_len) - { - gcall *call; - tree ptr = build_int_cst (ref_type, align * BITS_PER_UNIT); - /* Need conversion if it's wrapped with VnQI. */ - if (vmode != new_vmode) - { - tree new_vtype - = build_vector_type_for_mode (unsigned_intQI_type_node, - new_vmode); - tree var = vect_get_new_ssa_name (new_vtype, vect_simple_var); - vec_oprnd = build1 (VIEW_CONVERT_EXPR, new_vtype, vec_oprnd); - gassign *new_stmt - = gimple_build_assign (var, VIEW_CONVERT_EXPR, vec_oprnd); - vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); - vec_oprnd = var; - } - - if (partial_ifn == IFN_MASK_LEN_STORE) - call = gimple_build_call_internal (IFN_MASK_LEN_STORE, 6, - dataref_ptr, ptr, final_mask, - final_len, bias, vec_oprnd); - else - call = gimple_build_call_internal (IFN_LEN_STORE, 5, - dataref_ptr, ptr, final_len, - bias, vec_oprnd); - gimple_call_set_nothrow (call, true); - vect_finish_stmt_generation (vinfo, stmt_info, call, gsi); - new_stmt = call; - } - else if (final_mask) - { - tree ptr = build_int_cst (ref_type, align * BITS_PER_UNIT); - gcall *call - = gimple_build_call_internal (IFN_MASK_STORE, 4, dataref_ptr, - ptr, final_mask, vec_oprnd); - gimple_call_set_nothrow (call, true); - vect_finish_stmt_generation (vinfo, stmt_info, call, gsi); - new_stmt = call; - } - else - { - data_ref - = fold_build2 (MEM_REF, vectype, dataref_ptr, - dataref_offset ? dataref_offset - : build_int_cst (ref_type, 0)); - if (alignment_support_scheme == dr_aligned) - ; - else - TREE_TYPE (data_ref) - = build_aligned_type (TREE_TYPE (data_ref), - align * BITS_PER_UNIT); - vect_copy_ref_info (data_ref, DR_REF (first_dr_info->dr)); - new_stmt = gimple_build_assign (data_ref, vec_oprnd); + /* Arguments are ready. Create the new vector stmt. */ + if (final_len) + { + gcall *call; + tree ptr = build_int_cst (ref_type, align * BITS_PER_UNIT); + /* Need conversion if it's wrapped with VnQI. */ + if (vmode != new_vmode) + { + tree new_vtype + = build_vector_type_for_mode (unsigned_intQI_type_node, + new_vmode); + tree var = vect_get_new_ssa_name (new_vtype, vect_simple_var); + vec_oprnd = build1 (VIEW_CONVERT_EXPR, new_vtype, vec_oprnd); + gassign *new_stmt + = gimple_build_assign (var, VIEW_CONVERT_EXPR, vec_oprnd); vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); + vec_oprnd = var; } - if (slp) - continue; - - next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info); - if (!next_stmt_info) - break; + if (partial_ifn == IFN_MASK_LEN_STORE) + call = gimple_build_call_internal (IFN_MASK_LEN_STORE, 6, + dataref_ptr, ptr, final_mask, + final_len, bias, vec_oprnd); + else + call = gimple_build_call_internal (IFN_LEN_STORE, 5, + dataref_ptr, ptr, final_len, + bias, vec_oprnd); + gimple_call_set_nothrow (call, true); + vect_finish_stmt_generation (vinfo, stmt_info, call, gsi); + new_stmt = call; } - if (!slp && !costing_p) + else if (final_mask) { - if (j == 0) - *vec_stmt = new_stmt; - STMT_VINFO_VEC_STMTS (stmt_info).safe_push (new_stmt); + tree ptr = build_int_cst (ref_type, align * BITS_PER_UNIT); + gcall *call + = gimple_build_call_internal (IFN_MASK_STORE, 4, dataref_ptr, + ptr, final_mask, vec_oprnd); + gimple_call_set_nothrow (call, true); + vect_finish_stmt_generation (vinfo, stmt_info, call, gsi); + new_stmt = call; + } + else + { + data_ref = fold_build2 (MEM_REF, vectype, dataref_ptr, + dataref_offset ? dataref_offset + : build_int_cst (ref_type, 0)); + if (alignment_support_scheme == dr_aligned) + ; + else + TREE_TYPE (data_ref) + = build_aligned_type (TREE_TYPE (data_ref), + align * BITS_PER_UNIT); + vect_copy_ref_info (data_ref, DR_REF (first_dr_info->dr)); + new_stmt = gimple_build_assign (data_ref, vec_oprnd); + vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); } } @@ -10110,12 +9666,12 @@ vectorizable_store (vec_info *vinfo, { /* Spill. */ prologue_cost - += record_stmt_cost (cost_vec, ncopies, vector_store, - stmt_info, slp_node, 0, vect_epilogue); + += record_stmt_cost (cost_vec, 1, vector_store, + slp_node, 0, vect_epilogue); /* Loads. */ prologue_cost - += record_stmt_cost (cost_vec, ncopies * nregs, scalar_load, - stmt_info, slp_node, 0, vect_epilogue); + += record_stmt_cost (cost_vec, nregs, scalar_load, + slp_node, 0, vect_epilogue); } } } @@ -10280,7 +9836,6 @@ vectorizable_load (vec_info *vinfo, tree dataref_ptr = NULL_TREE; tree dataref_offset = NULL_TREE; gimple *ptr_incr = NULL; - int ncopies; int i, j; unsigned int group_size; poly_uint64 group_gap_adj; @@ -10294,7 +9849,6 @@ vectorizable_load (vec_info *vinfo, bool compute_in_loop = false; class loop *at_loop; int vec_num; - bool slp = (slp_node != NULL); bool slp_perm = false; bb_vec_info bb_vinfo = dyn_cast <bb_vec_info> (vinfo); poly_uint64 vf; @@ -10353,7 +9907,7 @@ vectorizable_load (vec_info *vinfo, return false; mask_index = internal_fn_mask_index (ifn); - if (mask_index >= 0 && slp_node) + if (mask_index >= 0) mask_index = vect_slp_child_index_for_operand (call, mask_index, STMT_VINFO_GATHER_SCATTER_P (stmt_info)); if (mask_index >= 0 @@ -10362,7 +9916,7 @@ vectorizable_load (vec_info *vinfo, return false; els_index = internal_fn_else_index (ifn); - if (els_index >= 0 && slp_node) + if (els_index >= 0) els_index = vect_slp_child_index_for_operand (call, els_index, STMT_VINFO_GATHER_SCATTER_P (stmt_info)); if (els_index >= 0 @@ -10383,19 +9937,9 @@ vectorizable_load (vec_info *vinfo, else vf = 1; - /* Multiple types in SLP are handled by creating the appropriate number of - vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in - case of SLP. */ - if (slp) - ncopies = 1; - else - ncopies = vect_get_num_copies (loop_vinfo, vectype); - - gcc_assert (ncopies >= 1); - /* FORNOW. This restriction should be relaxed. */ if (nested_in_vect_loop - && (ncopies > 1 || (slp && SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node) > 1))) + && SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node) > 1) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, @@ -10403,20 +9947,6 @@ vectorizable_load (vec_info *vinfo, return false; } - /* Invalidate assumptions made by dependence analysis when vectorization - on the unrolled body effectively re-orders stmts. */ - if (ncopies > 1 - && STMT_VINFO_MIN_NEG_DIST (stmt_info) != 0 - && maybe_gt (LOOP_VINFO_VECT_FACTOR (loop_vinfo), - STMT_VINFO_MIN_NEG_DIST (stmt_info))) - { - if (dump_enabled_p ()) - dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - "cannot perform implicit CSE when unrolling " - "with negative dependence distance\n"); - return false; - } - elem_type = TREE_TYPE (vectype); mode = TYPE_MODE (vectype); @@ -10441,15 +9971,6 @@ vectorizable_load (vec_info *vinfo, first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info); group_size = DR_GROUP_SIZE (first_stmt_info); - /* Refuse non-SLP vectorization of SLP-only groups. */ - if (!slp && STMT_VINFO_SLP_VECT_ONLY (first_stmt_info)) - { - if (dump_enabled_p ()) - dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - "cannot vectorize load in non-SLP mode.\n"); - return false; - } - /* Invalidate assumptions made by dependence analysis when vectorization on the unrolled body effectively re-orders stmts. */ if (STMT_VINFO_MIN_NEG_DIST (stmt_info) != 0 @@ -10475,7 +9996,7 @@ vectorizable_load (vec_info *vinfo, int maskload_elsval = 0; bool need_zeroing = false; if (!get_load_store_type (vinfo, stmt_info, vectype, slp_node, mask, VLS_LOAD, - ncopies, &memory_access_type, &poffset, + 1, &memory_access_type, &poffset, &alignment_support_scheme, &misalignment, &gs_info, &lanes_ifn, &elsvals)) return false; @@ -10490,8 +10011,7 @@ vectorizable_load (vec_info *vinfo, /* ??? The following checks should really be part of get_group_load_store_type. */ - if (slp - && SLP_TREE_LOAD_PERMUTATION (slp_node).exists () + if (SLP_TREE_LOAD_PERMUTATION (slp_node).exists () && !((memory_access_type == VMAT_ELEMENTWISE || memory_access_type == VMAT_GATHER_SCATTER) && SLP_TREE_LANES (slp_node) == 1)) @@ -10534,8 +10054,7 @@ vectorizable_load (vec_info *vinfo, } } - if (slp_node - && slp_node->ldst_lanes + if (slp_node->ldst_lanes && memory_access_type != VMAT_LOAD_STORE_LANES) { if (dump_enabled_p ()) @@ -10586,8 +10105,7 @@ vectorizable_load (vec_info *vinfo, if (costing_p) /* transformation not required. */ { - if (slp_node - && mask + if (mask && !vect_maybe_update_slp_op_vectype (slp_op, mask_vectype)) { @@ -10597,10 +10115,7 @@ vectorizable_load (vec_info *vinfo, return false; } - if (!slp) - STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info) = memory_access_type; - else - SLP_TREE_MEMORY_ACCESS_TYPE (slp_node) = memory_access_type; + SLP_TREE_MEMORY_ACCESS_TYPE (slp_node) = memory_access_type; if (loop_vinfo && LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo)) @@ -10654,16 +10169,10 @@ vectorizable_load (vec_info *vinfo, if (elsvals.length ()) maskload_elsval = *elsvals.begin (); - if (!slp) - gcc_assert (memory_access_type - == STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info)); - else - gcc_assert (memory_access_type - == SLP_TREE_MEMORY_ACCESS_TYPE (slp_node)); + gcc_assert (memory_access_type == SLP_TREE_MEMORY_ACCESS_TYPE (slp_node)); if (dump_enabled_p () && !costing_p) - dump_printf_loc (MSG_NOTE, vect_location, - "transform load. ncopies = %d\n", ncopies); + dump_printf_loc (MSG_NOTE, vect_location, "transform load.\n"); /* Transform. */ @@ -10688,9 +10197,8 @@ vectorizable_load (vec_info *vinfo, enum vect_cost_model_location cost_loc = hoist_p ? vect_prologue : vect_body; unsigned int cost = record_stmt_cost (cost_vec, 1, scalar_load, - stmt_info, slp_node, 0, - cost_loc); - cost += record_stmt_cost (cost_vec, 1, scalar_to_vec, stmt_info, + slp_node, 0, cost_loc); + cost += record_stmt_cost (cost_vec, 1, scalar_to_vec, slp_node, 0, cost_loc); unsigned int prologue_cost = hoist_p ? cost : 0; unsigned int inside_cost = hoist_p ? 0 : cost; @@ -10734,15 +10242,8 @@ vectorizable_load (vec_info *vinfo, vectype, &gsi2); } gimple *new_stmt = SSA_NAME_DEF_STMT (new_temp); - if (slp) - for (j = 0; j < (int) SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node); ++j) - slp_node->push_vec_def (new_stmt); - else - { - for (j = 0; j < ncopies; ++j) - STMT_VINFO_VEC_STMTS (stmt_info).safe_push (new_stmt); - *vec_stmt = new_stmt; - } + for (j = 0; j < (int) SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node); ++j) + slp_node->push_vec_def (new_stmt); return true; } @@ -10776,8 +10277,7 @@ vectorizable_load (vec_info *vinfo, first_dr_info = dr_info; } - if (slp && grouped_load - && memory_access_type == VMAT_STRIDED_SLP) + if (grouped_load && memory_access_type == VMAT_STRIDED_SLP) { group_size = DR_GROUP_SIZE (first_stmt_info); ref_type = get_group_alias_ptr_type (first_stmt_info); @@ -10847,6 +10347,15 @@ vectorizable_load (vec_info *vinfo, if (n == const_nunits) { int mis_align = dr_misalignment (first_dr_info, vectype); + /* With VF > 1 we advance the DR by step, if that is constant + and only aligned when performed VF times, DR alignment + analysis can analyze this as aligned since it assumes + contiguous accesses. But that is not how we code generate + here, so adjust for this. */ + if (maybe_gt (vf, 1u) + && !multiple_p (DR_STEP_ALIGNMENT (first_dr_info->dr), + DR_TARGET_ALIGNMENT (first_dr_info))) + mis_align = -1; dr_alignment_support dr_align = vect_supportable_dr_alignment (vinfo, dr_info, vectype, mis_align); @@ -10875,6 +10384,10 @@ vectorizable_load (vec_info *vinfo, if (VECTOR_TYPE_P (ptype)) { mis_align = dr_misalignment (first_dr_info, ptype); + if (maybe_gt (vf, 1u) + && !multiple_p (DR_STEP_ALIGNMENT (first_dr_info->dr), + DR_TARGET_ALIGNMENT (first_dr_info))) + mis_align = -1; dr_align = vect_supportable_dr_alignment (vinfo, dr_info, ptype, mis_align); @@ -10894,30 +10407,31 @@ vectorizable_load (vec_info *vinfo, } } unsigned align; - /* ??? The above is still wrong, see vectorizable_store. */ - align = dr_alignment (vect_dr_behavior (vinfo, first_dr_info)); + if (alignment_support_scheme == dr_aligned) + align = known_alignment (DR_TARGET_ALIGNMENT (first_dr_info)); + else + align = dr_alignment (vect_dr_behavior (vinfo, first_dr_info)); /* Alignment is at most the access size if we do multiple loads. */ if (nloads > 1) align = MIN (tree_to_uhwi (TYPE_SIZE_UNIT (ltype)), align); ltype = build_aligned_type (ltype, align * BITS_PER_UNIT); } - if (slp) + /* For SLP permutation support we need to load the whole group, + not only the number of vector stmts the permutation result + fits in. */ + int ncopies; + if (slp_perm) { - /* For SLP permutation support we need to load the whole group, - not only the number of vector stmts the permutation result - fits in. */ - if (slp_perm) - { - /* We don't yet generate SLP_TREE_LOAD_PERMUTATIONs for - variable VF. */ - unsigned int const_vf = vf.to_constant (); - ncopies = CEIL (group_size * const_vf, const_nunits); - dr_chain.create (ncopies); - } - else - ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node); + /* We don't yet generate SLP_TREE_LOAD_PERMUTATIONs for + variable VF. */ + unsigned int const_vf = vf.to_constant (); + ncopies = CEIL (group_size * const_vf, const_nunits); + dr_chain.create (ncopies); } + else + ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node); + unsigned int group_el = 0; unsigned HOST_WIDE_INT elsz = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype))); @@ -10941,8 +10455,7 @@ vectorizable_load (vec_info *vinfo, n_adjacent_loads++; else inside_cost += record_stmt_cost (cost_vec, 1, scalar_load, - stmt_info, slp_node, 0, - vect_body); + slp_node, 0, vect_body); continue; } tree this_off = build_int_cst (TREE_TYPE (alias_off), @@ -10956,14 +10469,13 @@ vectorizable_load (vec_info *vinfo, CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, new_temp); group_el += lnel; - if (! slp - || group_el == group_size) + if (group_el == group_size) { n_groups++; /* When doing SLP make sure to not load elements from the next vector iteration, those will not be accessed so just use the last element again. See PR107451. */ - if (!slp || known_lt (n_groups, vf)) + if (known_lt (n_groups, vf)) { tree newoff = copy_ssa_name (running_off); gimple *incr @@ -10980,8 +10492,7 @@ vectorizable_load (vec_info *vinfo, { if (costing_p) inside_cost += record_stmt_cost (cost_vec, 1, vec_construct, - stmt_info, slp_node, 0, - vect_body); + slp_node, 0, vect_body); else { tree vec_inv = build_constructor (lvectype, v); @@ -11012,19 +10523,10 @@ vectorizable_load (vec_info *vinfo, if (!costing_p) { - if (slp) - { - if (slp_perm) - dr_chain.quick_push (gimple_assign_lhs (new_stmt)); - else - slp_node->push_vec_def (new_stmt); - } + if (slp_perm) + dr_chain.quick_push (gimple_assign_lhs (new_stmt)); else - { - if (j == 0) - *vec_stmt = new_stmt; - STMT_VINFO_VEC_STMTS (stmt_info).safe_push (new_stmt); - } + slp_node->push_vec_def (new_stmt); } } if (slp_perm) @@ -11036,8 +10538,7 @@ vectorizable_load (vec_info *vinfo, vect_transform_slp_perm_load (vinfo, slp_node, vNULL, NULL, vf, true, &n_perms, &n_loads); inside_cost += record_stmt_cost (cost_vec, n_perms, vec_perm, - first_stmt_info, slp_node, 0, - vect_body); + slp_node, 0, vect_body); } else vect_transform_slp_perm_load (vinfo, slp_node, dr_chain, gsi, vf, @@ -11061,12 +10562,11 @@ vectorizable_load (vec_info *vinfo, return true; } - if (memory_access_type == VMAT_GATHER_SCATTER - || (!slp && memory_access_type == VMAT_CONTIGUOUS)) + if (memory_access_type == VMAT_GATHER_SCATTER) grouped_load = false; if (grouped_load - || (slp && SLP_TREE_LOAD_PERMUTATION (slp_node).exists ())) + || SLP_TREE_LOAD_PERMUTATION (slp_node).exists ()) { if (grouped_load) { @@ -11080,7 +10580,7 @@ vectorizable_load (vec_info *vinfo, } /* For SLP vectorization we directly vectorize a subchain without permutation. */ - if (slp && ! SLP_TREE_LOAD_PERMUTATION (slp_node).exists ()) + if (! SLP_TREE_LOAD_PERMUTATION (slp_node).exists ()) first_stmt_info = SLP_TREE_SCALAR_STMTS (slp_node)[0]; /* For BB vectorization always use the first stmt to base the data ref pointer on. */ @@ -11088,60 +10588,39 @@ vectorizable_load (vec_info *vinfo, first_stmt_info_for_drptr = vect_find_first_scalar_stmt_in_slp (slp_node); - /* Check if the chain of loads is already vectorized. */ - if (STMT_VINFO_VEC_STMTS (first_stmt_info).exists () - /* For SLP we would need to copy over SLP_TREE_VEC_DEFS. - ??? But we can only do so if there is exactly one - as we have no way to get at the rest. Leave the CSE - opportunity alone. - ??? With the group load eventually participating - in multiple different permutations (having multiple - slp nodes which refer to the same group) the CSE - is even wrong code. See PR56270. */ - && !slp) - { - *vec_stmt = STMT_VINFO_VEC_STMTS (stmt_info)[0]; - return true; - } first_dr_info = STMT_VINFO_DR_INFO (first_stmt_info); group_gap_adj = 0; /* VEC_NUM is the number of vect stmts to be created for this group. */ - if (slp) - { - grouped_load = false; - /* If an SLP permutation is from N elements to N elements, - and if one vector holds a whole number of N, we can load - the inputs to the permutation in the same way as an - unpermuted sequence. In other cases we need to load the - whole group, not only the number of vector stmts the - permutation result fits in. */ - unsigned scalar_lanes = SLP_TREE_LANES (slp_node); - if (nested_in_vect_loop) - /* We do not support grouped accesses in a nested loop, - instead the access is contiguous but it might be - permuted. No gap adjustment is needed though. */ - vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node); - else if (slp_perm - && (group_size != scalar_lanes - || !multiple_p (nunits, group_size))) - { - /* We don't yet generate such SLP_TREE_LOAD_PERMUTATIONs for - variable VF; see vect_transform_slp_perm_load. */ - unsigned int const_vf = vf.to_constant (); - unsigned int const_nunits = nunits.to_constant (); - vec_num = CEIL (group_size * const_vf, const_nunits); - group_gap_adj = vf * group_size - nunits * vec_num; - } - else - { - vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node); - group_gap_adj - = group_size - scalar_lanes; - } - } + grouped_load = false; + /* If an SLP permutation is from N elements to N elements, + and if one vector holds a whole number of N, we can load + the inputs to the permutation in the same way as an + unpermuted sequence. In other cases we need to load the + whole group, not only the number of vector stmts the + permutation result fits in. */ + unsigned scalar_lanes = SLP_TREE_LANES (slp_node); + if (nested_in_vect_loop) + /* We do not support grouped accesses in a nested loop, + instead the access is contiguous but it might be + permuted. No gap adjustment is needed though. */ + vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node); + else if (slp_perm + && (group_size != scalar_lanes + || !multiple_p (nunits, group_size))) + { + /* We don't yet generate such SLP_TREE_LOAD_PERMUTATIONs for + variable VF; see vect_transform_slp_perm_load. */ + unsigned int const_vf = vf.to_constant (); + unsigned int const_nunits = nunits.to_constant (); + vec_num = CEIL (group_size * const_vf, const_nunits); + group_gap_adj = vf * group_size - nunits * vec_num; + } else - vec_num = group_size; + { + vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node); + group_gap_adj = group_size - scalar_lanes; + } ref_type = get_group_alias_ptr_type (first_stmt_info); } @@ -11152,8 +10631,7 @@ vectorizable_load (vec_info *vinfo, group_size = vec_num = 1; group_gap_adj = 0; ref_type = reference_alias_ptr_type (DR_REF (first_dr_info->dr)); - if (slp) - vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node); + vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node); } gcc_assert (alignment_support_scheme); @@ -11355,14 +10833,8 @@ vectorizable_load (vec_info *vinfo, auto_vec<tree> vec_offsets; auto_vec<tree> vec_masks; if (mask && !costing_p) - { - if (slp_node) - vect_get_slp_defs (SLP_TREE_CHILDREN (slp_node)[mask_index], - &vec_masks); - else - vect_get_vec_defs_for_operand (vinfo, stmt_info, ncopies, mask, - &vec_masks, mask_vectype); - } + vect_get_slp_defs (SLP_TREE_CHILDREN (slp_node)[mask_index], + &vec_masks); tree vec_mask = NULL_TREE; tree vec_els = NULL_TREE; @@ -11375,8 +10847,7 @@ vectorizable_load (vec_info *vinfo, /* For costing some adjacent vector loads, we'd like to cost with the total number of them once instead of cost each one by one. */ unsigned int n_adjacent_loads = 0; - if (slp_node) - ncopies = slp_node->vec_stmts_size / group_size; + int ncopies = slp_node->vec_stmts_size / group_size; for (j = 0; j < ncopies; j++) { if (costing_p) @@ -11499,32 +10970,17 @@ vectorizable_load (vec_info *vinfo, gimple_call_set_nothrow (call, true); vect_finish_stmt_generation (vinfo, stmt_info, call, gsi); - if (!slp) - dr_chain.create (group_size); /* Extract each vector into an SSA_NAME. */ for (unsigned i = 0; i < group_size; i++) { new_temp = read_vector_array (vinfo, stmt_info, gsi, scalar_dest, vec_array, i, need_zeroing, final_mask); - if (slp) - slp_node->push_vec_def (new_temp); - else - dr_chain.quick_push (new_temp); + slp_node->push_vec_def (new_temp); } - if (!slp) - /* Record the mapping between SSA_NAMEs and statements. */ - vect_record_grouped_load_vectors (vinfo, stmt_info, dr_chain); - /* Record that VEC_ARRAY is now dead. */ vect_clobber_variable (vinfo, stmt_info, gsi, vec_array); - - if (!slp) - dr_chain.release (); - - if (!slp_node) - *vec_stmt = STMT_VINFO_VEC_STMTS (stmt_info)[0]; } if (costing_p) @@ -11551,365 +11007,329 @@ vectorizable_load (vec_info *vinfo, gcc_assert (!grouped_load && !slp_perm); unsigned int inside_cost = 0, prologue_cost = 0; - for (j = 0; j < ncopies; j++) + + /* 1. Create the vector or array pointer update chain. */ + if (!costing_p) { - /* 1. Create the vector or array pointer update chain. */ - if (j == 0 && !costing_p) - { - if (STMT_VINFO_GATHER_SCATTER_P (stmt_info)) - vect_get_gather_scatter_ops (loop_vinfo, loop, stmt_info, - slp_node, &gs_info, &dataref_ptr, - &vec_offsets); - else - dataref_ptr - = vect_create_data_ref_ptr (vinfo, first_stmt_info, aggr_type, - at_loop, offset, &dummy, gsi, - &ptr_incr, false, bump); - } - else if (!costing_p) + if (STMT_VINFO_GATHER_SCATTER_P (stmt_info)) + vect_get_gather_scatter_ops (loop_vinfo, loop, stmt_info, + slp_node, &gs_info, &dataref_ptr, + &vec_offsets); + else + dataref_ptr + = vect_create_data_ref_ptr (vinfo, first_stmt_info, aggr_type, + at_loop, offset, &dummy, gsi, + &ptr_incr, false, bump); + } + + gimple *new_stmt = NULL; + for (i = 0; i < vec_num; i++) + { + tree final_mask = NULL_TREE; + tree final_len = NULL_TREE; + tree bias = NULL_TREE; + if (!costing_p) { - gcc_assert (!LOOP_VINFO_USING_SELECT_VL_P (loop_vinfo)); - if (!STMT_VINFO_GATHER_SCATTER_P (stmt_info)) + if (mask) + vec_mask = vec_masks[i]; + if (loop_masks) + final_mask = vect_get_loop_mask (loop_vinfo, gsi, loop_masks, + vec_num, vectype, i); + if (vec_mask) + final_mask = prepare_vec_mask (loop_vinfo, mask_vectype, + final_mask, vec_mask, gsi); + + if (i > 0 && !STMT_VINFO_GATHER_SCATTER_P (stmt_info)) dataref_ptr = bump_vector_ptr (vinfo, dataref_ptr, ptr_incr, gsi, stmt_info, bump); } - gimple *new_stmt = NULL; - for (i = 0; i < vec_num; i++) + /* 2. Create the vector-load in the loop. */ + unsigned HOST_WIDE_INT align; + if (gs_info.ifn != IFN_LAST) { - tree final_mask = NULL_TREE; - tree final_len = NULL_TREE; - tree bias = NULL_TREE; - if (!costing_p) + if (costing_p) { - if (mask) - vec_mask = vec_masks[vec_num * j + i]; - if (loop_masks) - final_mask - = vect_get_loop_mask (loop_vinfo, gsi, loop_masks, - vec_num * ncopies, vectype, - vec_num * j + i); - if (vec_mask) - final_mask = prepare_vec_mask (loop_vinfo, mask_vectype, - final_mask, vec_mask, gsi); - - if (i > 0 && !STMT_VINFO_GATHER_SCATTER_P (stmt_info)) - dataref_ptr = bump_vector_ptr (vinfo, dataref_ptr, ptr_incr, - gsi, stmt_info, bump); + unsigned int cnunits = vect_nunits_for_cost (vectype); + inside_cost + = record_stmt_cost (cost_vec, cnunits, scalar_load, + slp_node, 0, vect_body); + continue; } + if (STMT_VINFO_GATHER_SCATTER_P (stmt_info)) + vec_offset = vec_offsets[i]; + tree zero = build_zero_cst (vectype); + tree scale = size_int (gs_info.scale); - /* 2. Create the vector-load in the loop. */ - unsigned HOST_WIDE_INT align; - if (gs_info.ifn != IFN_LAST) + if (gs_info.ifn == IFN_MASK_LEN_GATHER_LOAD) { - if (costing_p) - { - unsigned int cnunits = vect_nunits_for_cost (vectype); - inside_cost - = record_stmt_cost (cost_vec, cnunits, scalar_load, - stmt_info, slp_node, 0, vect_body); - continue; - } - if (STMT_VINFO_GATHER_SCATTER_P (stmt_info)) - vec_offset = vec_offsets[vec_num * j + i]; - tree zero = build_zero_cst (vectype); - tree scale = size_int (gs_info.scale); - - if (gs_info.ifn == IFN_MASK_LEN_GATHER_LOAD) + if (loop_lens) + final_len = vect_get_loop_len (loop_vinfo, gsi, loop_lens, + vec_num, vectype, i, 1); + else + final_len = build_int_cst (sizetype, + TYPE_VECTOR_SUBPARTS (vectype)); + signed char biasval + = LOOP_VINFO_PARTIAL_LOAD_STORE_BIAS (loop_vinfo); + bias = build_int_cst (intQI_type_node, biasval); + if (!final_mask) { - if (loop_lens) - final_len - = vect_get_loop_len (loop_vinfo, gsi, loop_lens, - vec_num * ncopies, vectype, - vec_num * j + i, 1); - else - final_len - = build_int_cst (sizetype, - TYPE_VECTOR_SUBPARTS (vectype)); - signed char biasval - = LOOP_VINFO_PARTIAL_LOAD_STORE_BIAS (loop_vinfo); - bias = build_int_cst (intQI_type_node, biasval); - if (!final_mask) - { - mask_vectype = truth_type_for (vectype); - final_mask = build_minus_one_cst (mask_vectype); - } + mask_vectype = truth_type_for (vectype); + final_mask = build_minus_one_cst (mask_vectype); } + } - if (final_mask) - { - vec_els = vect_get_mask_load_else - (maskload_elsval, vectype); - if (type_mode_padding_p - && maskload_elsval != MASK_LOAD_ELSE_ZERO) - need_zeroing = true; - } + if (final_mask) + { + vec_els = vect_get_mask_load_else (maskload_elsval, vectype); + if (type_mode_padding_p + && maskload_elsval != MASK_LOAD_ELSE_ZERO) + need_zeroing = true; + } - gcall *call; - if (final_len && final_mask) - { - if (VECTOR_TYPE_P (TREE_TYPE (vec_offset))) - call = gimple_build_call_internal ( - IFN_MASK_LEN_GATHER_LOAD, 8, dataref_ptr, vec_offset, - scale, zero, final_mask, vec_els, final_len, bias); - else - /* Non-vector offset indicates that prefer to take - MASK_LEN_STRIDED_LOAD instead of the - MASK_LEN_GATHER_LOAD with direct stride arg. */ - call = gimple_build_call_internal ( - IFN_MASK_LEN_STRIDED_LOAD, 7, dataref_ptr, vec_offset, - zero, final_mask, vec_els, final_len, bias); - } - else if (final_mask) - call = gimple_build_call_internal (IFN_MASK_GATHER_LOAD, - 6, dataref_ptr, - vec_offset, scale, - zero, final_mask, - vec_els); + gcall *call; + if (final_len && final_mask) + { + if (VECTOR_TYPE_P (TREE_TYPE (vec_offset))) + call = gimple_build_call_internal (IFN_MASK_LEN_GATHER_LOAD, + 8, dataref_ptr, + vec_offset, scale, zero, + final_mask, vec_els, + final_len, bias); else - call = gimple_build_call_internal (IFN_GATHER_LOAD, 4, - dataref_ptr, vec_offset, - scale, zero); - gimple_call_set_nothrow (call, true); - new_stmt = call; + /* Non-vector offset indicates that prefer to take + MASK_LEN_STRIDED_LOAD instead of the + MASK_LEN_GATHER_LOAD with direct stride arg. */ + call = gimple_build_call_internal + (IFN_MASK_LEN_STRIDED_LOAD, 7, dataref_ptr, + vec_offset, zero, final_mask, vec_els, final_len, + bias); + } + else if (final_mask) + call = gimple_build_call_internal (IFN_MASK_GATHER_LOAD, + 6, dataref_ptr, + vec_offset, scale, + zero, final_mask, vec_els); + else + call = gimple_build_call_internal (IFN_GATHER_LOAD, 4, + dataref_ptr, vec_offset, + scale, zero); + gimple_call_set_nothrow (call, true); + new_stmt = call; + data_ref = NULL_TREE; + } + else if (gs_info.decl) + { + /* The builtin decls path for gather is legacy, x86 only. */ + gcc_assert (!final_len && nunits.is_constant ()); + if (costing_p) + { + unsigned int cnunits = vect_nunits_for_cost (vectype); + inside_cost + = record_stmt_cost (cost_vec, cnunits, scalar_load, + slp_node, 0, vect_body); + continue; + } + poly_uint64 offset_nunits + = TYPE_VECTOR_SUBPARTS (gs_info.offset_vectype); + if (known_eq (nunits, offset_nunits)) + { + new_stmt = vect_build_one_gather_load_call + (vinfo, stmt_info, gsi, &gs_info, + dataref_ptr, vec_offsets[i], final_mask); data_ref = NULL_TREE; } - else if (gs_info.decl) + else if (known_eq (nunits, offset_nunits * 2)) { - /* The builtin decls path for gather is legacy, x86 only. */ - gcc_assert (!final_len && nunits.is_constant ()); - if (costing_p) - { - unsigned int cnunits = vect_nunits_for_cost (vectype); - inside_cost - = record_stmt_cost (cost_vec, cnunits, scalar_load, - stmt_info, slp_node, 0, vect_body); - continue; - } - poly_uint64 offset_nunits - = TYPE_VECTOR_SUBPARTS (gs_info.offset_vectype); - if (known_eq (nunits, offset_nunits)) + /* We have a offset vector with half the number of + lanes but the builtins will produce full vectype + data with just the lower lanes filled. */ + new_stmt = vect_build_one_gather_load_call + (vinfo, stmt_info, gsi, &gs_info, + dataref_ptr, vec_offsets[2 * i], final_mask); + tree low = make_ssa_name (vectype); + gimple_set_lhs (new_stmt, low); + vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); + + /* now put upper half of final_mask in final_mask low. */ + if (final_mask + && !SCALAR_INT_MODE_P (TYPE_MODE (TREE_TYPE (final_mask)))) { - new_stmt = vect_build_one_gather_load_call - (vinfo, stmt_info, gsi, &gs_info, - dataref_ptr, vec_offsets[vec_num * j + i], - final_mask); - data_ref = NULL_TREE; + int count = nunits.to_constant (); + vec_perm_builder sel (count, count, 1); + sel.quick_grow (count); + for (int i = 0; i < count; ++i) + sel[i] = i | (count / 2); + vec_perm_indices indices (sel, 2, count); + tree perm_mask = vect_gen_perm_mask_checked + (TREE_TYPE (final_mask), indices); + new_stmt = gimple_build_assign (NULL_TREE, VEC_PERM_EXPR, + final_mask, final_mask, + perm_mask); + final_mask = make_ssa_name (TREE_TYPE (final_mask)); + gimple_set_lhs (new_stmt, final_mask); + vect_finish_stmt_generation (vinfo, stmt_info, + new_stmt, gsi); } - else if (known_eq (nunits, offset_nunits * 2)) + else if (final_mask) { - /* We have a offset vector with half the number of - lanes but the builtins will produce full vectype - data with just the lower lanes filled. */ - new_stmt = vect_build_one_gather_load_call - (vinfo, stmt_info, gsi, &gs_info, - dataref_ptr, vec_offsets[2 * vec_num * j + 2 * i], - final_mask); - tree low = make_ssa_name (vectype); - gimple_set_lhs (new_stmt, low); + new_stmt = gimple_build_assign (NULL_TREE, + VEC_UNPACK_HI_EXPR, + final_mask); + final_mask = make_ssa_name + (truth_type_for (gs_info.offset_vectype)); + gimple_set_lhs (new_stmt, final_mask); vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); + } - /* now put upper half of final_mask in final_mask low. */ - if (final_mask - && !SCALAR_INT_MODE_P - (TYPE_MODE (TREE_TYPE (final_mask)))) - { - int count = nunits.to_constant (); - vec_perm_builder sel (count, count, 1); - sel.quick_grow (count); - for (int i = 0; i < count; ++i) - sel[i] = i | (count / 2); - vec_perm_indices indices (sel, 2, count); - tree perm_mask = vect_gen_perm_mask_checked - (TREE_TYPE (final_mask), indices); - new_stmt = gimple_build_assign (NULL_TREE, - VEC_PERM_EXPR, - final_mask, - final_mask, - perm_mask); - final_mask = make_ssa_name (TREE_TYPE (final_mask)); - gimple_set_lhs (new_stmt, final_mask); - vect_finish_stmt_generation (vinfo, stmt_info, - new_stmt, gsi); - } - else if (final_mask) - { - new_stmt = gimple_build_assign (NULL_TREE, - VEC_UNPACK_HI_EXPR, - final_mask); - final_mask = make_ssa_name - (truth_type_for (gs_info.offset_vectype)); - gimple_set_lhs (new_stmt, final_mask); - vect_finish_stmt_generation (vinfo, stmt_info, - new_stmt, gsi); - } - - new_stmt = vect_build_one_gather_load_call - (vinfo, stmt_info, gsi, &gs_info, - dataref_ptr, - vec_offsets[2 * vec_num * j + 2 * i + 1], - final_mask); - tree high = make_ssa_name (vectype); - gimple_set_lhs (new_stmt, high); - vect_finish_stmt_generation (vinfo, stmt_info, - new_stmt, gsi); + new_stmt = vect_build_one_gather_load_call + (vinfo, stmt_info, gsi, &gs_info, dataref_ptr, + vec_offsets[2 * i + 1], final_mask); + tree high = make_ssa_name (vectype); + gimple_set_lhs (new_stmt, high); + vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); - /* compose low + high. */ - int count = nunits.to_constant (); + /* compose low + high. */ + int count = nunits.to_constant (); + vec_perm_builder sel (count, count, 1); + sel.quick_grow (count); + for (int i = 0; i < count; ++i) + sel[i] = i < count / 2 ? i : i + count / 2; + vec_perm_indices indices (sel, 2, count); + tree perm_mask + = vect_gen_perm_mask_checked (vectype, indices); + new_stmt = gimple_build_assign (NULL_TREE, VEC_PERM_EXPR, + low, high, perm_mask); + data_ref = NULL_TREE; + } + else if (known_eq (nunits * 2, offset_nunits)) + { + /* We have a offset vector with double the number of + lanes. Select the low/high part accordingly. */ + vec_offset = vec_offsets[i / 2]; + if (i & 1) + { + int count = offset_nunits.to_constant (); vec_perm_builder sel (count, count, 1); sel.quick_grow (count); for (int i = 0; i < count; ++i) - sel[i] = i < count / 2 ? i : i + count / 2; + sel[i] = i | (count / 2); vec_perm_indices indices (sel, 2, count); - tree perm_mask - = vect_gen_perm_mask_checked (vectype, indices); - new_stmt = gimple_build_assign (NULL_TREE, - VEC_PERM_EXPR, - low, high, perm_mask); - data_ref = NULL_TREE; - } - else if (known_eq (nunits * 2, offset_nunits)) - { - /* We have a offset vector with double the number of - lanes. Select the low/high part accordingly. */ - vec_offset = vec_offsets[(vec_num * j + i) / 2]; - if ((vec_num * j + i) & 1) - { - int count = offset_nunits.to_constant (); - vec_perm_builder sel (count, count, 1); - sel.quick_grow (count); - for (int i = 0; i < count; ++i) - sel[i] = i | (count / 2); - vec_perm_indices indices (sel, 2, count); - tree perm_mask = vect_gen_perm_mask_checked - (TREE_TYPE (vec_offset), indices); - new_stmt = gimple_build_assign (NULL_TREE, - VEC_PERM_EXPR, - vec_offset, - vec_offset, - perm_mask); - vec_offset = make_ssa_name (TREE_TYPE (vec_offset)); - gimple_set_lhs (new_stmt, vec_offset); - vect_finish_stmt_generation (vinfo, stmt_info, - new_stmt, gsi); - } - new_stmt = vect_build_one_gather_load_call - (vinfo, stmt_info, gsi, &gs_info, - dataref_ptr, vec_offset, final_mask); - data_ref = NULL_TREE; + tree perm_mask = vect_gen_perm_mask_checked + (TREE_TYPE (vec_offset), indices); + new_stmt = gimple_build_assign (NULL_TREE, VEC_PERM_EXPR, + vec_offset, vec_offset, + perm_mask); + vec_offset = make_ssa_name (TREE_TYPE (vec_offset)); + gimple_set_lhs (new_stmt, vec_offset); + vect_finish_stmt_generation (vinfo, stmt_info, + new_stmt, gsi); } - else - gcc_unreachable (); + new_stmt = vect_build_one_gather_load_call + (vinfo, stmt_info, gsi, &gs_info, + dataref_ptr, vec_offset, final_mask); + data_ref = NULL_TREE; } else + gcc_unreachable (); + } + else + { + /* Emulated gather-scatter. */ + gcc_assert (!final_mask); + unsigned HOST_WIDE_INT const_nunits = nunits.to_constant (); + if (costing_p) { - /* Emulated gather-scatter. */ - gcc_assert (!final_mask); - unsigned HOST_WIDE_INT const_nunits = nunits.to_constant (); - if (costing_p) - { - /* For emulated gathers N offset vector element - offset add is consumed by the load). */ - inside_cost = record_stmt_cost (cost_vec, const_nunits, - vec_to_scalar, stmt_info, - slp_node, 0, vect_body); - /* N scalar loads plus gathering them into a - vector. */ - inside_cost - = record_stmt_cost (cost_vec, const_nunits, scalar_load, - stmt_info, slp_node, 0, vect_body); - inside_cost - = record_stmt_cost (cost_vec, 1, vec_construct, - stmt_info, slp_node, 0, vect_body); - continue; - } - unsigned HOST_WIDE_INT const_offset_nunits - = TYPE_VECTOR_SUBPARTS (gs_info.offset_vectype) - .to_constant (); - vec<constructor_elt, va_gc> *ctor_elts; - vec_alloc (ctor_elts, const_nunits); - gimple_seq stmts = NULL; - /* We support offset vectors with more elements - than the data vector for now. */ - unsigned HOST_WIDE_INT factor - = const_offset_nunits / const_nunits; - vec_offset = vec_offsets[(vec_num * j + i) / factor]; - unsigned elt_offset - = ((vec_num * j + i) % factor) * const_nunits; - tree idx_type = TREE_TYPE (TREE_TYPE (vec_offset)); - tree scale = size_int (gs_info.scale); - align = get_object_alignment (DR_REF (first_dr_info->dr)); - tree ltype = build_aligned_type (TREE_TYPE (vectype), align); - for (unsigned k = 0; k < const_nunits; ++k) - { - tree boff = size_binop (MULT_EXPR, TYPE_SIZE (idx_type), - bitsize_int (k + elt_offset)); - tree idx - = gimple_build (&stmts, BIT_FIELD_REF, idx_type, - vec_offset, TYPE_SIZE (idx_type), boff); - idx = gimple_convert (&stmts, sizetype, idx); - idx = gimple_build (&stmts, MULT_EXPR, sizetype, idx, - scale); - tree ptr = gimple_build (&stmts, PLUS_EXPR, - TREE_TYPE (dataref_ptr), - dataref_ptr, idx); - ptr = gimple_convert (&stmts, ptr_type_node, ptr); - tree elt = make_ssa_name (TREE_TYPE (vectype)); - tree ref = build2 (MEM_REF, ltype, ptr, - build_int_cst (ref_type, 0)); - new_stmt = gimple_build_assign (elt, ref); - gimple_set_vuse (new_stmt, gimple_vuse (gsi_stmt (*gsi))); - gimple_seq_add_stmt (&stmts, new_stmt); - CONSTRUCTOR_APPEND_ELT (ctor_elts, NULL_TREE, elt); - } - gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT); - new_stmt = gimple_build_assign ( - NULL_TREE, build_constructor (vectype, ctor_elts)); - data_ref = NULL_TREE; + /* For emulated gathers N offset vector element + offset add is consumed by the load). */ + inside_cost = record_stmt_cost (cost_vec, const_nunits, + vec_to_scalar, + slp_node, 0, vect_body); + /* N scalar loads plus gathering them into a + vector. */ + inside_cost + = record_stmt_cost (cost_vec, const_nunits, scalar_load, + slp_node, 0, vect_body); + inside_cost + = record_stmt_cost (cost_vec, 1, vec_construct, + slp_node, 0, vect_body); + continue; } - - vec_dest = vect_create_destination_var (scalar_dest, vectype); - /* DATA_REF is null if we've already built the statement. */ - if (data_ref) + unsigned HOST_WIDE_INT const_offset_nunits + = TYPE_VECTOR_SUBPARTS (gs_info.offset_vectype) .to_constant (); + vec<constructor_elt, va_gc> *ctor_elts; + vec_alloc (ctor_elts, const_nunits); + gimple_seq stmts = NULL; + /* We support offset vectors with more elements + than the data vector for now. */ + unsigned HOST_WIDE_INT factor + = const_offset_nunits / const_nunits; + vec_offset = vec_offsets[i / factor]; + unsigned elt_offset = (i % factor) * const_nunits; + tree idx_type = TREE_TYPE (TREE_TYPE (vec_offset)); + tree scale = size_int (gs_info.scale); + align = get_object_alignment (DR_REF (first_dr_info->dr)); + tree ltype = build_aligned_type (TREE_TYPE (vectype), align); + for (unsigned k = 0; k < const_nunits; ++k) { - vect_copy_ref_info (data_ref, DR_REF (first_dr_info->dr)); - new_stmt = gimple_build_assign (vec_dest, data_ref); + tree boff = size_binop (MULT_EXPR, TYPE_SIZE (idx_type), + bitsize_int (k + elt_offset)); + tree idx = gimple_build (&stmts, BIT_FIELD_REF, idx_type, + vec_offset, TYPE_SIZE (idx_type), + boff); + idx = gimple_convert (&stmts, sizetype, idx); + idx = gimple_build (&stmts, MULT_EXPR, sizetype, idx, scale); + tree ptr = gimple_build (&stmts, PLUS_EXPR, + TREE_TYPE (dataref_ptr), + dataref_ptr, idx); + ptr = gimple_convert (&stmts, ptr_type_node, ptr); + tree elt = make_ssa_name (TREE_TYPE (vectype)); + tree ref = build2 (MEM_REF, ltype, ptr, + build_int_cst (ref_type, 0)); + new_stmt = gimple_build_assign (elt, ref); + gimple_set_vuse (new_stmt, gimple_vuse (gsi_stmt (*gsi))); + gimple_seq_add_stmt (&stmts, new_stmt); + CONSTRUCTOR_APPEND_ELT (ctor_elts, NULL_TREE, elt); } - new_temp = need_zeroing - ? make_ssa_name (vectype) - : make_ssa_name (vec_dest, new_stmt); - gimple_set_lhs (new_stmt, new_temp); - vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); + gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT); + new_stmt = gimple_build_assign (NULL_TREE, + build_constructor (vectype, + ctor_elts)); + data_ref = NULL_TREE; + } - /* If we need to explicitly zero inactive elements emit a - VEC_COND_EXPR that does so. */ - if (need_zeroing) - { - vec_els = vect_get_mask_load_else (MASK_LOAD_ELSE_ZERO, - vectype); + vec_dest = vect_create_destination_var (scalar_dest, vectype); + /* DATA_REF is null if we've already built the statement. */ + if (data_ref) + { + vect_copy_ref_info (data_ref, DR_REF (first_dr_info->dr)); + new_stmt = gimple_build_assign (vec_dest, data_ref); + } + new_temp = (need_zeroing + ? make_ssa_name (vectype) + : make_ssa_name (vec_dest, new_stmt)); + gimple_set_lhs (new_stmt, new_temp); + vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); - tree new_temp2 = make_ssa_name (vec_dest, new_stmt); - new_stmt - = gimple_build_assign (new_temp2, VEC_COND_EXPR, - final_mask, new_temp, vec_els); - vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, - gsi); - new_temp = new_temp2; - } + /* If we need to explicitly zero inactive elements emit a + VEC_COND_EXPR that does so. */ + if (need_zeroing) + { + vec_els = vect_get_mask_load_else (MASK_LOAD_ELSE_ZERO, + vectype); - /* Store vector loads in the corresponding SLP_NODE. */ - if (slp) - slp_node->push_vec_def (new_stmt); + tree new_temp2 = make_ssa_name (vec_dest, new_stmt); + new_stmt = gimple_build_assign (new_temp2, VEC_COND_EXPR, + final_mask, new_temp, vec_els); + vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); + new_temp = new_temp2; } - if (!slp && !costing_p) - STMT_VINFO_VEC_STMTS (stmt_info).safe_push (new_stmt); + /* Store vector loads in the corresponding SLP_NODE. */ + slp_node->push_vec_def (new_stmt); } - if (!slp && !costing_p) - *vec_stmt = STMT_VINFO_VEC_STMTS (stmt_info)[0]; - if (costing_p && dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "vect_model_load_cost: inside_cost = %u, " @@ -11923,273 +11343,293 @@ vectorizable_load (vec_info *vinfo, /* For costing some adjacent vector loads, we'd like to cost with the total number of them once instead of cost each one by one. */ unsigned int n_adjacent_loads = 0; - for (j = 0; j < ncopies; j++) - { - /* 1. Create the vector or array pointer update chain. */ - if (j == 0 && !costing_p) - { - bool simd_lane_access_p - = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info) != 0; - if (simd_lane_access_p - && TREE_CODE (DR_BASE_ADDRESS (first_dr_info->dr)) == ADDR_EXPR - && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr_info->dr), 0)) - && integer_zerop (get_dr_vinfo_offset (vinfo, first_dr_info)) - && integer_zerop (DR_INIT (first_dr_info->dr)) - && alias_sets_conflict_p (get_alias_set (aggr_type), - get_alias_set (TREE_TYPE (ref_type))) - && (alignment_support_scheme == dr_aligned - || alignment_support_scheme == dr_unaligned_supported)) - { - dataref_ptr = unshare_expr (DR_BASE_ADDRESS (first_dr_info->dr)); - dataref_offset = build_int_cst (ref_type, 0); - } - else if (diff_first_stmt_info) - { - dataref_ptr - = vect_create_data_ref_ptr (vinfo, first_stmt_info_for_drptr, - aggr_type, at_loop, offset, &dummy, - gsi, &ptr_incr, simd_lane_access_p, - bump); - /* Adjust the pointer by the difference to first_stmt. */ - data_reference_p ptrdr - = STMT_VINFO_DATA_REF (first_stmt_info_for_drptr); - tree diff - = fold_convert (sizetype, - size_binop (MINUS_EXPR, - DR_INIT (first_dr_info->dr), - DR_INIT (ptrdr))); - dataref_ptr = bump_vector_ptr (vinfo, dataref_ptr, ptr_incr, gsi, - stmt_info, diff); - if (alignment_support_scheme == dr_explicit_realign) - { - msq = vect_setup_realignment (vinfo, - first_stmt_info_for_drptr, gsi, - &realignment_token, - alignment_support_scheme, - dataref_ptr, &at_loop); - gcc_assert (!compute_in_loop); - } + + /* 1. Create the vector or array pointer update chain. */ + if (!costing_p) + { + bool simd_lane_access_p + = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info) != 0; + if (simd_lane_access_p + && TREE_CODE (DR_BASE_ADDRESS (first_dr_info->dr)) == ADDR_EXPR + && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr_info->dr), 0)) + && integer_zerop (get_dr_vinfo_offset (vinfo, first_dr_info)) + && integer_zerop (DR_INIT (first_dr_info->dr)) + && alias_sets_conflict_p (get_alias_set (aggr_type), + get_alias_set (TREE_TYPE (ref_type))) + && (alignment_support_scheme == dr_aligned + || alignment_support_scheme == dr_unaligned_supported)) + { + dataref_ptr = unshare_expr (DR_BASE_ADDRESS (first_dr_info->dr)); + dataref_offset = build_int_cst (ref_type, 0); + } + else if (diff_first_stmt_info) + { + dataref_ptr + = vect_create_data_ref_ptr (vinfo, first_stmt_info_for_drptr, + aggr_type, at_loop, offset, &dummy, + gsi, &ptr_incr, simd_lane_access_p, + bump); + /* Adjust the pointer by the difference to first_stmt. */ + data_reference_p ptrdr + = STMT_VINFO_DATA_REF (first_stmt_info_for_drptr); + tree diff = fold_convert (sizetype, + size_binop (MINUS_EXPR, + DR_INIT (first_dr_info->dr), + DR_INIT (ptrdr))); + dataref_ptr = bump_vector_ptr (vinfo, dataref_ptr, ptr_incr, gsi, + stmt_info, diff); + if (alignment_support_scheme == dr_explicit_realign) + { + msq = vect_setup_realignment (vinfo, + first_stmt_info_for_drptr, gsi, + &realignment_token, + alignment_support_scheme, + dataref_ptr, &at_loop); + gcc_assert (!compute_in_loop); } - else - dataref_ptr - = vect_create_data_ref_ptr (vinfo, first_stmt_info, aggr_type, - at_loop, - offset, &dummy, gsi, &ptr_incr, - simd_lane_access_p, bump); - } - else if (!costing_p) - { - gcc_assert (!LOOP_VINFO_USING_SELECT_VL_P (loop_vinfo)); - if (dataref_offset) - dataref_offset = int_const_binop (PLUS_EXPR, dataref_offset, - bump); - else - dataref_ptr = bump_vector_ptr (vinfo, dataref_ptr, ptr_incr, gsi, - stmt_info, bump); } + else + dataref_ptr + = vect_create_data_ref_ptr (vinfo, first_stmt_info, aggr_type, + at_loop, + offset, &dummy, gsi, &ptr_incr, + simd_lane_access_p, bump); + } + else if (!costing_p) + { + gcc_assert (!LOOP_VINFO_USING_SELECT_VL_P (loop_vinfo)); + if (dataref_offset) + dataref_offset = int_const_binop (PLUS_EXPR, dataref_offset, bump); + else + dataref_ptr = bump_vector_ptr (vinfo, dataref_ptr, ptr_incr, gsi, + stmt_info, bump); + } - if (grouped_load || slp_perm) - dr_chain.create (vec_num); + if (grouped_load || slp_perm) + dr_chain.create (vec_num); - gimple *new_stmt = NULL; - for (i = 0; i < vec_num; i++) + gimple *new_stmt = NULL; + for (i = 0; i < vec_num; i++) + { + tree final_mask = NULL_TREE; + tree final_len = NULL_TREE; + tree bias = NULL_TREE; + + if (!costing_p) { - tree final_mask = NULL_TREE; - tree final_len = NULL_TREE; - tree bias = NULL_TREE; + if (mask) + vec_mask = vec_masks[i]; + if (loop_masks) + final_mask = vect_get_loop_mask (loop_vinfo, gsi, loop_masks, + vec_num, vectype, i); + if (vec_mask) + final_mask = prepare_vec_mask (loop_vinfo, mask_vectype, + final_mask, vec_mask, gsi); - if (!costing_p) - { - if (mask) - vec_mask = vec_masks[vec_num * j + i]; - if (loop_masks) - final_mask = vect_get_loop_mask (loop_vinfo, gsi, loop_masks, - vec_num * ncopies, vectype, - vec_num * j + i); - if (vec_mask) - final_mask = prepare_vec_mask (loop_vinfo, mask_vectype, - final_mask, vec_mask, gsi); + if (i > 0) + dataref_ptr = bump_vector_ptr (vinfo, dataref_ptr, ptr_incr, + gsi, stmt_info, bump); + } - if (i > 0) - dataref_ptr = bump_vector_ptr (vinfo, dataref_ptr, ptr_incr, - gsi, stmt_info, bump); - } + /* 2. Create the vector-load in the loop. */ + switch (alignment_support_scheme) + { + case dr_aligned: + case dr_unaligned_supported: + { + if (costing_p) + break; - /* 2. Create the vector-load in the loop. */ - switch (alignment_support_scheme) - { - case dr_aligned: - case dr_unaligned_supported: + unsigned int misalign; + unsigned HOST_WIDE_INT align; + align = known_alignment (DR_TARGET_ALIGNMENT (first_dr_info)); + if (alignment_support_scheme == dr_aligned) + misalign = 0; + else if (misalignment == DR_MISALIGNMENT_UNKNOWN) { - if (costing_p) - break; - - unsigned int misalign; - unsigned HOST_WIDE_INT align; - align = known_alignment (DR_TARGET_ALIGNMENT (first_dr_info)); - if (alignment_support_scheme == dr_aligned) - misalign = 0; - else if (misalignment == DR_MISALIGNMENT_UNKNOWN) - { - align - = dr_alignment (vect_dr_behavior (vinfo, first_dr_info)); - misalign = 0; - } - else - misalign = misalignment; - if (dataref_offset == NULL_TREE - && TREE_CODE (dataref_ptr) == SSA_NAME) - set_ptr_info_alignment (get_ptr_info (dataref_ptr), align, - misalign); - align = least_bit_hwi (misalign | align); - - /* Compute IFN when LOOP_LENS or final_mask valid. */ - machine_mode vmode = TYPE_MODE (vectype); - machine_mode new_vmode = vmode; - internal_fn partial_ifn = IFN_LAST; - if (loop_lens) - { - opt_machine_mode new_ovmode - = get_len_load_store_mode (vmode, true, &partial_ifn); - new_vmode = new_ovmode.require (); - unsigned factor - = (new_ovmode == vmode) ? 1 : GET_MODE_UNIT_SIZE (vmode); - final_len = vect_get_loop_len (loop_vinfo, gsi, loop_lens, - vec_num * ncopies, vectype, - vec_num * j + i, factor); - } - else if (final_mask) - { - if (!can_vec_mask_load_store_p ( - vmode, TYPE_MODE (TREE_TYPE (final_mask)), true, - &partial_ifn)) - gcc_unreachable (); - } + align = dr_alignment (vect_dr_behavior (vinfo, first_dr_info)); + misalign = 0; + } + else + misalign = misalignment; + if (dataref_offset == NULL_TREE + && TREE_CODE (dataref_ptr) == SSA_NAME) + set_ptr_info_alignment (get_ptr_info (dataref_ptr), align, + misalign); + align = least_bit_hwi (misalign | align); + + /* Compute IFN when LOOP_LENS or final_mask valid. */ + machine_mode vmode = TYPE_MODE (vectype); + machine_mode new_vmode = vmode; + internal_fn partial_ifn = IFN_LAST; + if (loop_lens) + { + opt_machine_mode new_ovmode + = get_len_load_store_mode (vmode, true, &partial_ifn); + new_vmode = new_ovmode.require (); + unsigned factor + = (new_ovmode == vmode) ? 1 : GET_MODE_UNIT_SIZE (vmode); + final_len = vect_get_loop_len (loop_vinfo, gsi, loop_lens, + vec_num, vectype, i, factor); + } + else if (final_mask) + { + if (!can_vec_mask_load_store_p (vmode, + TYPE_MODE + (TREE_TYPE (final_mask)), + true, &partial_ifn)) + gcc_unreachable (); + } - if (partial_ifn == IFN_MASK_LEN_LOAD) + if (partial_ifn == IFN_MASK_LEN_LOAD) + { + if (!final_len) { - if (!final_len) - { - /* Pass VF value to 'len' argument of - MASK_LEN_LOAD if LOOP_LENS is invalid. */ - final_len = size_int (TYPE_VECTOR_SUBPARTS (vectype)); - } - if (!final_mask) - { - /* Pass all ones value to 'mask' argument of - MASK_LEN_LOAD if final_mask is invalid. */ - mask_vectype = truth_type_for (vectype); - final_mask = build_minus_one_cst (mask_vectype); - } + /* Pass VF value to 'len' argument of + MASK_LEN_LOAD if LOOP_LENS is invalid. */ + final_len = size_int (TYPE_VECTOR_SUBPARTS (vectype)); } - if (final_len) + if (!final_mask) { - signed char biasval - = LOOP_VINFO_PARTIAL_LOAD_STORE_BIAS (loop_vinfo); - - bias = build_int_cst (intQI_type_node, biasval); + /* Pass all ones value to 'mask' argument of + MASK_LEN_LOAD if final_mask is invalid. */ + mask_vectype = truth_type_for (vectype); + final_mask = build_minus_one_cst (mask_vectype); } + } + if (final_len) + { + signed char biasval + = LOOP_VINFO_PARTIAL_LOAD_STORE_BIAS (loop_vinfo); + bias = build_int_cst (intQI_type_node, biasval); + } - tree vec_els; + tree vec_els; - if (final_len) - { - tree ptr = build_int_cst (ref_type, align * BITS_PER_UNIT); - gcall *call; - if (partial_ifn == IFN_MASK_LEN_LOAD) - { - vec_els = vect_get_mask_load_else - (maskload_elsval, vectype); - if (type_mode_padding_p - && maskload_elsval != MASK_LOAD_ELSE_ZERO) - need_zeroing = true; - call = gimple_build_call_internal (IFN_MASK_LEN_LOAD, - 6, dataref_ptr, ptr, - final_mask, vec_els, - final_len, bias); - } - else - call = gimple_build_call_internal (IFN_LEN_LOAD, 4, - dataref_ptr, ptr, - final_len, bias); - gimple_call_set_nothrow (call, true); - new_stmt = call; - data_ref = NULL_TREE; - - /* Need conversion if it's wrapped with VnQI. */ - if (vmode != new_vmode) - { - tree new_vtype = build_vector_type_for_mode ( - unsigned_intQI_type_node, new_vmode); - tree var - = vect_get_new_ssa_name (new_vtype, vect_simple_var); - gimple_set_lhs (call, var); - vect_finish_stmt_generation (vinfo, stmt_info, call, - gsi); - tree op = build1 (VIEW_CONVERT_EXPR, vectype, var); - new_stmt = gimple_build_assign (vec_dest, - VIEW_CONVERT_EXPR, op); - } - } - else if (final_mask) + if (final_len) + { + tree ptr = build_int_cst (ref_type, align * BITS_PER_UNIT); + gcall *call; + if (partial_ifn == IFN_MASK_LEN_LOAD) { - tree ptr = build_int_cst (ref_type, align * BITS_PER_UNIT); - vec_els = vect_get_mask_load_else - (maskload_elsval, vectype); + vec_els = vect_get_mask_load_else (maskload_elsval, + vectype); if (type_mode_padding_p && maskload_elsval != MASK_LOAD_ELSE_ZERO) need_zeroing = true; - gcall *call = gimple_build_call_internal (IFN_MASK_LOAD, 4, - dataref_ptr, ptr, - final_mask, - vec_els); - gimple_call_set_nothrow (call, true); - new_stmt = call; - data_ref = NULL_TREE; + call = gimple_build_call_internal (IFN_MASK_LEN_LOAD, + 6, dataref_ptr, ptr, + final_mask, vec_els, + final_len, bias); } else + call = gimple_build_call_internal (IFN_LEN_LOAD, 4, + dataref_ptr, ptr, + final_len, bias); + gimple_call_set_nothrow (call, true); + new_stmt = call; + data_ref = NULL_TREE; + + /* Need conversion if it's wrapped with VnQI. */ + if (vmode != new_vmode) { - tree ltype = vectype; - tree new_vtype = NULL_TREE; - unsigned HOST_WIDE_INT gap = DR_GROUP_GAP (first_stmt_info); - unsigned HOST_WIDE_INT dr_size - = vect_get_scalar_dr_size (first_dr_info); - poly_int64 off = 0; - if (memory_access_type == VMAT_CONTIGUOUS_REVERSE) - off = (TYPE_VECTOR_SUBPARTS (vectype) - 1) * -dr_size; - unsigned int vect_align - = vect_known_alignment_in_bytes (first_dr_info, vectype, - off); - /* Try to use a single smaller load when we are about - to load excess elements compared to the unrolled - scalar loop. */ - if (known_gt ((vec_num * j + i + 1) * nunits, - (group_size * vf - gap))) + tree new_vtype + = build_vector_type_for_mode (unsigned_intQI_type_node, + new_vmode); + tree var = vect_get_new_ssa_name (new_vtype, + vect_simple_var); + gimple_set_lhs (call, var); + vect_finish_stmt_generation (vinfo, stmt_info, call, + gsi); + tree op = build1 (VIEW_CONVERT_EXPR, vectype, var); + new_stmt = gimple_build_assign (vec_dest, + VIEW_CONVERT_EXPR, op); + } + } + else if (final_mask) + { + tree ptr = build_int_cst (ref_type, align * BITS_PER_UNIT); + vec_els = vect_get_mask_load_else (maskload_elsval, vectype); + if (type_mode_padding_p + && maskload_elsval != MASK_LOAD_ELSE_ZERO) + need_zeroing = true; + gcall *call = gimple_build_call_internal (IFN_MASK_LOAD, 4, + dataref_ptr, ptr, + final_mask, + vec_els); + gimple_call_set_nothrow (call, true); + new_stmt = call; + data_ref = NULL_TREE; + } + else + { + tree ltype = vectype; + tree new_vtype = NULL_TREE; + unsigned HOST_WIDE_INT gap = DR_GROUP_GAP (first_stmt_info); + unsigned HOST_WIDE_INT dr_size + = vect_get_scalar_dr_size (first_dr_info); + poly_int64 off = 0; + if (memory_access_type == VMAT_CONTIGUOUS_REVERSE) + off = (TYPE_VECTOR_SUBPARTS (vectype) - 1) * -dr_size; + unsigned int vect_align + = vect_known_alignment_in_bytes (first_dr_info, vectype, + off); + /* Try to use a single smaller load when we are about + to load excess elements compared to the unrolled + scalar loop. */ + if (known_gt ((i + 1) * nunits, + (group_size * vf - gap))) + { + poly_uint64 remain = ((group_size * vf - gap) - i * nunits); + if (known_ge ((i + 1) * nunits - (group_size * vf - gap), + nunits)) + /* DR will be unused. */ + ltype = NULL_TREE; + else if (known_ge (vect_align, + tree_to_poly_uint64 + (TYPE_SIZE_UNIT (vectype)))) + /* Aligned access to excess elements is OK if + at least one element is accessed in the + scalar loop. */ + ; + else if (known_gt (vect_align, + ((nunits - remain) * dr_size))) + /* Aligned access to the gap area when there's + at least one element in it is OK. */ + ; + else { - poly_uint64 remain = ((group_size * vf - gap) - - (vec_num * j + i) * nunits); - if (known_ge ((vec_num * j + i + 1) * nunits - - (group_size * vf - gap), nunits)) - /* DR will be unused. */ - ltype = NULL_TREE; - else if (known_ge (vect_align, - tree_to_poly_uint64 - (TYPE_SIZE_UNIT (vectype)))) - /* Aligned access to excess elements is OK if - at least one element is accessed in the - scalar loop. */ - ; - else if (known_gt (vect_align, - ((nunits - remain) * dr_size))) - /* Aligned access to the gap area when there's - at least one element in it is OK. */ - ; - else + /* remain should now be > 0 and < nunits. */ + unsigned num; + if (known_ne (remain, 0u) + && constant_multiple_p (nunits, remain, &num)) + { + tree ptype; + new_vtype + = vector_vector_composition_type (vectype, num, + &ptype); + if (new_vtype) + ltype = ptype; + } + /* Else use multiple loads or a masked load? */ + /* For loop vectorization we now should have + an alternate type or LOOP_VINFO_PEELING_FOR_GAPS + set. */ + if (loop_vinfo) + gcc_assert (new_vtype + || LOOP_VINFO_PEELING_FOR_GAPS + (loop_vinfo)); + /* But still reduce the access size to the next + required power-of-two so peeling a single + scalar iteration is sufficient. */ + unsigned HOST_WIDE_INT cremain; + if (remain.is_constant (&cremain)) { - /* remain should now be > 0 and < nunits. */ - unsigned num; - if (known_ne (remain, 0u) - && constant_multiple_p (nunits, remain, &num)) + unsigned HOST_WIDE_INT cpart_size + = 1 << ceil_log2 (cremain); + if (known_gt (nunits, cpart_size) + && constant_multiple_p (nunits, cpart_size, + &num)) { tree ptype; new_vtype @@ -12199,334 +11639,289 @@ vectorizable_load (vec_info *vinfo, if (new_vtype) ltype = ptype; } - /* Else use multiple loads or a masked load? */ - /* For loop vectorization we now should have - an alternate type or LOOP_VINFO_PEELING_FOR_GAPS - set. */ - if (loop_vinfo) - gcc_assert (new_vtype - || LOOP_VINFO_PEELING_FOR_GAPS - (loop_vinfo)); - /* But still reduce the access size to the next - required power-of-two so peeling a single - scalar iteration is sufficient. */ - unsigned HOST_WIDE_INT cremain; - if (remain.is_constant (&cremain)) - { - unsigned HOST_WIDE_INT cpart_size - = 1 << ceil_log2 (cremain); - if (known_gt (nunits, cpart_size) - && constant_multiple_p (nunits, cpart_size, - &num)) - { - tree ptype; - new_vtype - = vector_vector_composition_type (vectype, - num, - &ptype); - if (new_vtype) - ltype = ptype; - } - } } } - tree offset - = (dataref_offset ? dataref_offset - : build_int_cst (ref_type, 0)); - if (!ltype) + } + tree offset = (dataref_offset ? dataref_offset + : build_int_cst (ref_type, 0)); + if (!ltype) + ; + else if (ltype != vectype + && memory_access_type == VMAT_CONTIGUOUS_REVERSE) + { + poly_uint64 gap_offset + = (tree_to_poly_uint64 (TYPE_SIZE_UNIT (vectype)) + - tree_to_poly_uint64 (TYPE_SIZE_UNIT (ltype))); + tree gapcst = build_int_cstu (ref_type, gap_offset); + offset = size_binop (PLUS_EXPR, offset, gapcst); + } + if (ltype) + { + data_ref = fold_build2 (MEM_REF, ltype, + dataref_ptr, offset); + if (alignment_support_scheme == dr_aligned) ; - else if (ltype != vectype - && memory_access_type == VMAT_CONTIGUOUS_REVERSE) + else + TREE_TYPE (data_ref) + = build_aligned_type (TREE_TYPE (data_ref), + align * BITS_PER_UNIT); + } + if (!ltype) + data_ref = build_constructor (vectype, NULL); + else if (ltype != vectype) + { + vect_copy_ref_info (data_ref, + DR_REF (first_dr_info->dr)); + tree tem = make_ssa_name (ltype); + new_stmt = gimple_build_assign (tem, data_ref); + vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, + gsi); + data_ref = NULL; + vec<constructor_elt, va_gc> *v; + /* We've computed 'num' above to statically two + or via constant_multiple_p. */ + unsigned num + = (exact_div (tree_to_poly_uint64 + (TYPE_SIZE_UNIT (vectype)), + tree_to_poly_uint64 + (TYPE_SIZE_UNIT (ltype))) + .to_constant ()); + vec_alloc (v, num); + if (memory_access_type == VMAT_CONTIGUOUS_REVERSE) { - poly_uint64 gap_offset - = (tree_to_poly_uint64 (TYPE_SIZE_UNIT (vectype)) - - tree_to_poly_uint64 (TYPE_SIZE_UNIT (ltype))); - tree gapcst = build_int_cstu (ref_type, gap_offset); - offset = size_binop (PLUS_EXPR, offset, gapcst); + while (--num) + CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, + build_zero_cst (ltype)); + CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, tem); } - if (ltype) + else { - data_ref - = fold_build2 (MEM_REF, ltype, dataref_ptr, offset); - if (alignment_support_scheme == dr_aligned) - ; - else - TREE_TYPE (data_ref) - = build_aligned_type (TREE_TYPE (data_ref), - align * BITS_PER_UNIT); + CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, tem); + while (--num) + CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, + build_zero_cst (ltype)); } - if (!ltype) - data_ref = build_constructor (vectype, NULL); - else if (ltype != vectype) + gcc_assert (new_vtype != NULL_TREE); + if (new_vtype == vectype) + new_stmt + = gimple_build_assign (vec_dest, + build_constructor (vectype, v)); + else { - vect_copy_ref_info (data_ref, - DR_REF (first_dr_info->dr)); - tree tem = make_ssa_name (ltype); - new_stmt = gimple_build_assign (tem, data_ref); - vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, - gsi); - data_ref = NULL; - vec<constructor_elt, va_gc> *v; - /* We've computed 'num' above to statically two - or via constant_multiple_p. */ - unsigned num - = (exact_div (tree_to_poly_uint64 - (TYPE_SIZE_UNIT (vectype)), - tree_to_poly_uint64 - (TYPE_SIZE_UNIT (ltype))) - .to_constant ()); - vec_alloc (v, num); - if (memory_access_type == VMAT_CONTIGUOUS_REVERSE) - { - while (--num) - CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, - build_zero_cst (ltype)); - CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, tem); - } - else - { - CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, tem); - while (--num) - CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, - build_zero_cst (ltype)); - } - gcc_assert (new_vtype != NULL_TREE); - if (new_vtype == vectype) - new_stmt = gimple_build_assign ( - vec_dest, build_constructor (vectype, v)); - else - { - tree new_vname = make_ssa_name (new_vtype); - new_stmt = gimple_build_assign ( - new_vname, build_constructor (new_vtype, v)); - vect_finish_stmt_generation (vinfo, stmt_info, - new_stmt, gsi); - new_stmt = gimple_build_assign ( - vec_dest, - build1 (VIEW_CONVERT_EXPR, vectype, new_vname)); - } + tree new_vname = make_ssa_name (new_vtype); + new_stmt + = gimple_build_assign (new_vname, + build_constructor (new_vtype, + v)); + vect_finish_stmt_generation (vinfo, stmt_info, + new_stmt, gsi); + new_stmt + = gimple_build_assign (vec_dest, + build1 (VIEW_CONVERT_EXPR, + vectype, new_vname)); } } - break; } - case dr_explicit_realign: - { - if (costing_p) - break; - tree ptr, bump; - - tree vs = size_int (TYPE_VECTOR_SUBPARTS (vectype)); - - if (compute_in_loop) - msq = vect_setup_realignment (vinfo, first_stmt_info, gsi, - &realignment_token, - dr_explicit_realign, - dataref_ptr, NULL); + break; + } + case dr_explicit_realign: + { + if (costing_p) + break; + tree ptr, bump; + + tree vs = size_int (TYPE_VECTOR_SUBPARTS (vectype)); + + if (compute_in_loop) + msq = vect_setup_realignment (vinfo, first_stmt_info, gsi, + &realignment_token, + dr_explicit_realign, + dataref_ptr, NULL); + + if (TREE_CODE (dataref_ptr) == SSA_NAME) + ptr = copy_ssa_name (dataref_ptr); + else + ptr = make_ssa_name (TREE_TYPE (dataref_ptr)); + // For explicit realign the target alignment should be + // known at compile time. + unsigned HOST_WIDE_INT align + = DR_TARGET_ALIGNMENT (first_dr_info).to_constant (); + new_stmt = gimple_build_assign (ptr, BIT_AND_EXPR, dataref_ptr, + build_int_cst + (TREE_TYPE (dataref_ptr), + -(HOST_WIDE_INT) align)); + vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); + data_ref = build2 (MEM_REF, vectype, + ptr, build_int_cst (ref_type, 0)); + vect_copy_ref_info (data_ref, DR_REF (first_dr_info->dr)); + vec_dest = vect_create_destination_var (scalar_dest, vectype); + new_stmt = gimple_build_assign (vec_dest, data_ref); + new_temp = make_ssa_name (vec_dest, new_stmt); + gimple_assign_set_lhs (new_stmt, new_temp); + gimple_move_vops (new_stmt, stmt_info->stmt); + vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); + msq = new_temp; + + bump = size_binop (MULT_EXPR, vs, TYPE_SIZE_UNIT (elem_type)); + bump = size_binop (MINUS_EXPR, bump, size_one_node); + ptr = bump_vector_ptr (vinfo, dataref_ptr, NULL, gsi, stmt_info, + bump); + new_stmt = gimple_build_assign (NULL_TREE, BIT_AND_EXPR, ptr, + build_int_cst (TREE_TYPE (ptr), + -(HOST_WIDE_INT) align)); + if (TREE_CODE (ptr) == SSA_NAME) + ptr = copy_ssa_name (ptr, new_stmt); + else + ptr = make_ssa_name (TREE_TYPE (ptr), new_stmt); + gimple_assign_set_lhs (new_stmt, ptr); + vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); + data_ref = build2 (MEM_REF, vectype, + ptr, build_int_cst (ref_type, 0)); + break; + } + case dr_explicit_realign_optimized: + { + if (costing_p) + break; + if (TREE_CODE (dataref_ptr) == SSA_NAME) + new_temp = copy_ssa_name (dataref_ptr); + else + new_temp = make_ssa_name (TREE_TYPE (dataref_ptr)); + // We should only be doing this if we know the target + // alignment at compile time. + unsigned HOST_WIDE_INT align + = DR_TARGET_ALIGNMENT (first_dr_info).to_constant (); + new_stmt = gimple_build_assign (new_temp, BIT_AND_EXPR, dataref_ptr, + build_int_cst (TREE_TYPE (dataref_ptr), + -(HOST_WIDE_INT) align)); + vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); + data_ref = build2 (MEM_REF, vectype, new_temp, + build_int_cst (ref_type, 0)); + break; + } + default: + gcc_unreachable (); + } - if (TREE_CODE (dataref_ptr) == SSA_NAME) - ptr = copy_ssa_name (dataref_ptr); - else - ptr = make_ssa_name (TREE_TYPE (dataref_ptr)); - // For explicit realign the target alignment should be - // known at compile time. - unsigned HOST_WIDE_INT align - = DR_TARGET_ALIGNMENT (first_dr_info).to_constant (); - new_stmt = gimple_build_assign ( - ptr, BIT_AND_EXPR, dataref_ptr, - build_int_cst (TREE_TYPE (dataref_ptr), - -(HOST_WIDE_INT) align)); - vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); - data_ref - = build2 (MEM_REF, vectype, ptr, build_int_cst (ref_type, 0)); - vect_copy_ref_info (data_ref, DR_REF (first_dr_info->dr)); - vec_dest = vect_create_destination_var (scalar_dest, vectype); - new_stmt = gimple_build_assign (vec_dest, data_ref); - new_temp = make_ssa_name (vec_dest, new_stmt); - gimple_assign_set_lhs (new_stmt, new_temp); - gimple_move_vops (new_stmt, stmt_info->stmt); - vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); - msq = new_temp; - - bump = size_binop (MULT_EXPR, vs, TYPE_SIZE_UNIT (elem_type)); - bump = size_binop (MINUS_EXPR, bump, size_one_node); - ptr = bump_vector_ptr (vinfo, dataref_ptr, NULL, gsi, stmt_info, - bump); - new_stmt = gimple_build_assign ( - NULL_TREE, BIT_AND_EXPR, ptr, - build_int_cst (TREE_TYPE (ptr), -(HOST_WIDE_INT) align)); - if (TREE_CODE (ptr) == SSA_NAME) - ptr = copy_ssa_name (ptr, new_stmt); - else - ptr = make_ssa_name (TREE_TYPE (ptr), new_stmt); - gimple_assign_set_lhs (new_stmt, ptr); - vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); - data_ref - = build2 (MEM_REF, vectype, ptr, build_int_cst (ref_type, 0)); - break; - } - case dr_explicit_realign_optimized: - { - if (costing_p) - break; - if (TREE_CODE (dataref_ptr) == SSA_NAME) - new_temp = copy_ssa_name (dataref_ptr); - else - new_temp = make_ssa_name (TREE_TYPE (dataref_ptr)); - // We should only be doing this if we know the target - // alignment at compile time. - unsigned HOST_WIDE_INT align - = DR_TARGET_ALIGNMENT (first_dr_info).to_constant (); - new_stmt = gimple_build_assign ( - new_temp, BIT_AND_EXPR, dataref_ptr, - build_int_cst (TREE_TYPE (dataref_ptr), - -(HOST_WIDE_INT) align)); - vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); - data_ref = build2 (MEM_REF, vectype, new_temp, - build_int_cst (ref_type, 0)); - break; - } - default: - gcc_unreachable (); + /* One common place to cost the above vect load for different + alignment support schemes. */ + if (costing_p) + { + /* For VMAT_CONTIGUOUS_PERMUTE if it's grouped load, we + only need to take care of the first stmt, whose + stmt_info is first_stmt_info, vec_num iterating on it + will cover the cost for the remaining, it's consistent + with transforming. For the prologue cost for realign, + we only need to count it once for the whole group. */ + bool first_stmt_info_p = first_stmt_info == stmt_info; + bool add_realign_cost = first_stmt_info_p && i == 0; + if (memory_access_type == VMAT_CONTIGUOUS + || memory_access_type == VMAT_CONTIGUOUS_REVERSE + || (memory_access_type == VMAT_CONTIGUOUS_PERMUTE + && (!grouped_load || first_stmt_info_p))) + { + /* Leave realign cases alone to keep them simple. */ + if (alignment_support_scheme == dr_explicit_realign_optimized + || alignment_support_scheme == dr_explicit_realign) + vect_get_load_cost (vinfo, stmt_info, slp_node, 1, + alignment_support_scheme, misalignment, + add_realign_cost, &inside_cost, + &prologue_cost, cost_vec, cost_vec, + true); + else + n_adjacent_loads++; } - - /* One common place to cost the above vect load for different - alignment support schemes. */ - if (costing_p) + } + else + { + vec_dest = vect_create_destination_var (scalar_dest, vectype); + /* DATA_REF is null if we've already built the statement. */ + if (data_ref) { - /* For VMAT_CONTIGUOUS_PERMUTE if it's grouped load, we - only need to take care of the first stmt, whose - stmt_info is first_stmt_info, vec_num iterating on it - will cover the cost for the remaining, it's consistent - with transforming. For the prologue cost for realign, - we only need to count it once for the whole group. */ - bool first_stmt_info_p = first_stmt_info == stmt_info; - bool add_realign_cost = first_stmt_info_p && i == 0; - if (memory_access_type == VMAT_CONTIGUOUS - || memory_access_type == VMAT_CONTIGUOUS_REVERSE - || (memory_access_type == VMAT_CONTIGUOUS_PERMUTE - && (!grouped_load || first_stmt_info_p))) - { - /* Leave realign cases alone to keep them simple. */ - if (alignment_support_scheme == dr_explicit_realign_optimized - || alignment_support_scheme == dr_explicit_realign) - vect_get_load_cost (vinfo, stmt_info, slp_node, 1, - alignment_support_scheme, misalignment, - add_realign_cost, &inside_cost, - &prologue_cost, cost_vec, cost_vec, - true); - else - n_adjacent_loads++; - } + vect_copy_ref_info (data_ref, DR_REF (first_dr_info->dr)); + new_stmt = gimple_build_assign (vec_dest, data_ref); } - else - { - vec_dest = vect_create_destination_var (scalar_dest, vectype); - /* DATA_REF is null if we've already built the statement. */ - if (data_ref) - { - vect_copy_ref_info (data_ref, DR_REF (first_dr_info->dr)); - new_stmt = gimple_build_assign (vec_dest, data_ref); - } - new_temp = need_zeroing - ? make_ssa_name (vectype) - : make_ssa_name (vec_dest, new_stmt); - gimple_set_lhs (new_stmt, new_temp); - vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); + new_temp = (need_zeroing + ? make_ssa_name (vectype) + : make_ssa_name (vec_dest, new_stmt)); + gimple_set_lhs (new_stmt, new_temp); + vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); - /* If we need to explicitly zero inactive elements emit a - VEC_COND_EXPR that does so. */ - if (need_zeroing) - { - vec_els = vect_get_mask_load_else (MASK_LOAD_ELSE_ZERO, - vectype); + /* If we need to explicitly zero inactive elements emit a + VEC_COND_EXPR that does so. */ + if (need_zeroing) + { + vec_els = vect_get_mask_load_else (MASK_LOAD_ELSE_ZERO, + vectype); - tree new_temp2 = make_ssa_name (vec_dest, new_stmt); - new_stmt - = gimple_build_assign (new_temp2, VEC_COND_EXPR, - final_mask, new_temp, vec_els); - vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, - gsi); - new_temp = new_temp2; - } + tree new_temp2 = make_ssa_name (vec_dest, new_stmt); + new_stmt = gimple_build_assign (new_temp2, VEC_COND_EXPR, + final_mask, new_temp, vec_els); + vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, + gsi); + new_temp = new_temp2; } + } - /* 3. Handle explicit realignment if necessary/supported. - Create in loop: - vec_dest = realign_load (msq, lsq, realignment_token) */ - if (!costing_p - && (alignment_support_scheme == dr_explicit_realign_optimized - || alignment_support_scheme == dr_explicit_realign)) - { - lsq = gimple_assign_lhs (new_stmt); - if (!realignment_token) - realignment_token = dataref_ptr; - vec_dest = vect_create_destination_var (scalar_dest, vectype); - new_stmt = gimple_build_assign (vec_dest, REALIGN_LOAD_EXPR, msq, - lsq, realignment_token); - new_temp = make_ssa_name (vec_dest, new_stmt); - gimple_assign_set_lhs (new_stmt, new_temp); - vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); + /* 3. Handle explicit realignment if necessary/supported. + Create in loop: + vec_dest = realign_load (msq, lsq, realignment_token) */ + if (!costing_p + && (alignment_support_scheme == dr_explicit_realign_optimized + || alignment_support_scheme == dr_explicit_realign)) + { + lsq = gimple_assign_lhs (new_stmt); + if (!realignment_token) + realignment_token = dataref_ptr; + vec_dest = vect_create_destination_var (scalar_dest, vectype); + new_stmt = gimple_build_assign (vec_dest, REALIGN_LOAD_EXPR, msq, + lsq, realignment_token); + new_temp = make_ssa_name (vec_dest, new_stmt); + gimple_assign_set_lhs (new_stmt, new_temp); + vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); - if (alignment_support_scheme == dr_explicit_realign_optimized) - { - gcc_assert (phi); - if (i == vec_num - 1 && j == ncopies - 1) - add_phi_arg (phi, lsq, loop_latch_edge (containing_loop), - UNKNOWN_LOCATION); - msq = lsq; - } + if (alignment_support_scheme == dr_explicit_realign_optimized) + { + gcc_assert (phi); + if (i == vec_num - 1) + add_phi_arg (phi, lsq, loop_latch_edge (containing_loop), + UNKNOWN_LOCATION); + msq = lsq; } + } - if (memory_access_type == VMAT_CONTIGUOUS_REVERSE) + if (memory_access_type == VMAT_CONTIGUOUS_REVERSE) + { + if (costing_p) + inside_cost = record_stmt_cost (cost_vec, 1, vec_perm, + slp_node, 0, vect_body); + else { - if (costing_p) - inside_cost = record_stmt_cost (cost_vec, 1, vec_perm, - stmt_info, slp_node, 0, - vect_body); - else - { - tree perm_mask = perm_mask_for_reverse (vectype); - new_temp = permute_vec_elements (vinfo, new_temp, new_temp, - perm_mask, stmt_info, gsi); - new_stmt = SSA_NAME_DEF_STMT (new_temp); - } + tree perm_mask = perm_mask_for_reverse (vectype); + new_temp = permute_vec_elements (vinfo, new_temp, new_temp, + perm_mask, stmt_info, gsi); + new_stmt = SSA_NAME_DEF_STMT (new_temp); } + } - /* Collect vector loads and later create their permutation in - vect_transform_grouped_load (). */ - if (!costing_p && (grouped_load || slp_perm)) - dr_chain.quick_push (new_temp); + /* Collect vector loads and later create their permutation in + vect_transform_grouped_load (). */ + if (!costing_p && (grouped_load || slp_perm)) + dr_chain.quick_push (new_temp); - /* Store vector loads in the corresponding SLP_NODE. */ - if (!costing_p && slp && !slp_perm) - slp_node->push_vec_def (new_stmt); + /* Store vector loads in the corresponding SLP_NODE. */ + if (!costing_p && !slp_perm) + slp_node->push_vec_def (new_stmt); - /* With SLP permutation we load the gaps as well, without - we need to skip the gaps after we manage to fully load - all elements. group_gap_adj is DR_GROUP_SIZE here. */ - group_elt += nunits; - if (!costing_p - && maybe_ne (group_gap_adj, 0U) - && !slp_perm - && known_eq (group_elt, group_size - group_gap_adj)) - { - poly_wide_int bump_val - = (wi::to_wide (TYPE_SIZE_UNIT (elem_type)) * group_gap_adj); - if (tree_int_cst_sgn (vect_dr_behavior (vinfo, dr_info)->step) - == -1) - bump_val = -bump_val; - tree bump = wide_int_to_tree (sizetype, bump_val); - dataref_ptr = bump_vector_ptr (vinfo, dataref_ptr, ptr_incr, gsi, - stmt_info, bump); - group_elt = 0; - } - } - /* Bump the vector pointer to account for a gap or for excess - elements loaded for a permuted SLP load. */ + /* With SLP permutation we load the gaps as well, without + we need to skip the gaps after we manage to fully load + all elements. group_gap_adj is DR_GROUP_SIZE here. */ + group_elt += nunits; if (!costing_p && maybe_ne (group_gap_adj, 0U) - && slp_perm) + && !slp_perm + && known_eq (group_elt, group_size - group_gap_adj)) { poly_wide_int bump_val = (wi::to_wide (TYPE_SIZE_UNIT (elem_type)) * group_gap_adj); @@ -12535,74 +11930,47 @@ vectorizable_load (vec_info *vinfo, tree bump = wide_int_to_tree (sizetype, bump_val); dataref_ptr = bump_vector_ptr (vinfo, dataref_ptr, ptr_incr, gsi, stmt_info, bump); + group_elt = 0; } + } + /* Bump the vector pointer to account for a gap or for excess + elements loaded for a permuted SLP load. */ + if (!costing_p + && maybe_ne (group_gap_adj, 0U) + && slp_perm) + { + poly_wide_int bump_val + = (wi::to_wide (TYPE_SIZE_UNIT (elem_type)) * group_gap_adj); + if (tree_int_cst_sgn (vect_dr_behavior (vinfo, dr_info)->step) == -1) + bump_val = -bump_val; + tree bump = wide_int_to_tree (sizetype, bump_val); + dataref_ptr = bump_vector_ptr (vinfo, dataref_ptr, ptr_incr, gsi, + stmt_info, bump); + } - if (slp && !slp_perm) - continue; - - if (slp_perm) + if (slp_perm) + { + unsigned n_perms; + /* For SLP we know we've seen all possible uses of dr_chain so + direct vect_transform_slp_perm_load to DCE the unused parts. + ??? This is a hack to prevent compile-time issues as seen + in PR101120 and friends. */ + if (costing_p) { - unsigned n_perms; - /* For SLP we know we've seen all possible uses of dr_chain so - direct vect_transform_slp_perm_load to DCE the unused parts. - ??? This is a hack to prevent compile-time issues as seen - in PR101120 and friends. */ - if (costing_p) - { - vect_transform_slp_perm_load (vinfo, slp_node, vNULL, nullptr, vf, - true, &n_perms, nullptr); - inside_cost = record_stmt_cost (cost_vec, n_perms, vec_perm, - stmt_info, slp_node, 0, - vect_body); - } - else - { - bool ok = vect_transform_slp_perm_load (vinfo, slp_node, dr_chain, - gsi, vf, false, &n_perms, - nullptr, true); - gcc_assert (ok); - } + vect_transform_slp_perm_load (vinfo, slp_node, vNULL, nullptr, vf, + true, &n_perms, nullptr); + inside_cost = record_stmt_cost (cost_vec, n_perms, vec_perm, + slp_node, 0, vect_body); } else { - if (grouped_load) - { - gcc_assert (memory_access_type == VMAT_CONTIGUOUS_PERMUTE); - /* We assume that the cost of a single load-lanes instruction - is equivalent to the cost of DR_GROUP_SIZE separate loads. - If a grouped access is instead being provided by a - load-and-permute operation, include the cost of the - permutes. */ - if (costing_p && first_stmt_info == stmt_info) - { - /* Uses an even and odd extract operations or shuffle - operations for each needed permute. */ - int group_size = DR_GROUP_SIZE (first_stmt_info); - int nstmts = ceil_log2 (group_size) * group_size; - inside_cost += record_stmt_cost (cost_vec, nstmts, vec_perm, - stmt_info, slp_node, 0, - vect_body); - - if (dump_enabled_p ()) - dump_printf_loc (MSG_NOTE, vect_location, - "vect_model_load_cost: " - "strided group_size = %d .\n", - group_size); - } - else if (!costing_p) - { - vect_transform_grouped_load (vinfo, stmt_info, dr_chain, - group_size, gsi); - *vec_stmt = STMT_VINFO_VEC_STMTS (stmt_info)[0]; - } - } - else if (!costing_p) - STMT_VINFO_VEC_STMTS (stmt_info).safe_push (new_stmt); + bool ok = vect_transform_slp_perm_load (vinfo, slp_node, dr_chain, + gsi, vf, false, &n_perms, + nullptr, true); + gcc_assert (ok); } dr_chain.release (); } - if (!slp && !costing_p) - *vec_stmt = STMT_VINFO_VEC_STMTS (stmt_info)[0]; if (costing_p) { @@ -13001,7 +12369,7 @@ vectorizable_condition (vec_info *vinfo, } STMT_VINFO_TYPE (stmt_info) = condition_vec_info_type; - vect_model_simple_cost (vinfo, stmt_info, ncopies, dts, ndts, slp_node, + vect_model_simple_cost (vinfo, ncopies, dts, ndts, slp_node, cost_vec, kind); return true; } @@ -13433,8 +12801,7 @@ vectorizable_comparison_1 (vec_info *vinfo, tree vectype, return false; } - vect_model_simple_cost (vinfo, stmt_info, - ncopies * (1 + (bitop2 != NOP_EXPR)), + vect_model_simple_cost (vinfo, ncopies * (1 + (bitop2 != NOP_EXPR)), dts, ndts, slp_node, cost_vec); return true; } @@ -13615,29 +12982,23 @@ vectorizable_early_exit (vec_info *vinfo, stmt_vec_info stmt_info, codegen so we must replace the original insn. */ gimple *orig_stmt = STMT_VINFO_STMT (vect_orig_stmt (stmt_info)); gcond *cond_stmt = as_a <gcond *>(orig_stmt); + + tree cst = build_zero_cst (vectype); + auto bb = gimple_bb (cond_stmt); + edge exit_true_edge = EDGE_SUCC (bb, 0); + if (exit_true_edge->flags & EDGE_FALSE_VALUE) + exit_true_edge = EDGE_SUCC (bb, 1); + gcc_assert (exit_true_edge->flags & EDGE_TRUE_VALUE); + /* When vectorizing we assume that if the branch edge is taken that we're exiting the loop. This is not however always the case as the compiler will rewrite conditions to always be a comparison against 0. To do this it sometimes flips the edges. This is fine for scalar, but for vector we - then have to flip the test, as we're still assuming that if you take the - branch edge that we found the exit condition. i.e. we need to know whether - we are generating a `forall` or an `exist` condition. */ - auto new_code = NE_EXPR; - auto reduc_optab = ior_optab; - auto reduc_op = BIT_IOR_EXPR; - tree cst = build_zero_cst (vectype); - edge exit_true_edge = EDGE_SUCC (gimple_bb (cond_stmt), 0); - if (exit_true_edge->flags & EDGE_FALSE_VALUE) - exit_true_edge = EDGE_SUCC (gimple_bb (cond_stmt), 1); - gcc_assert (exit_true_edge->flags & EDGE_TRUE_VALUE); - if (flow_bb_inside_loop_p (LOOP_VINFO_LOOP (loop_vinfo), - exit_true_edge->dest)) - { - new_code = EQ_EXPR; - reduc_optab = and_optab; - reduc_op = BIT_AND_EXPR; - cst = build_minus_one_cst (vectype); - } + then have to negate the result of the test, as we're still assuming that if + you take the branch edge that we found the exit condition. i.e. we need to + know whether we are generating a `forall` or an `exist` condition. */ + bool flipped = flow_bb_inside_loop_p (LOOP_VINFO_LOOP (loop_vinfo), + exit_true_edge->dest); /* Analyze only. */ if (!vec_stmt) @@ -13653,14 +13014,13 @@ vectorizable_early_exit (vec_info *vinfo, stmt_vec_info stmt_info, } if (ncopies > 1 - && direct_optab_handler (reduc_optab, mode) == CODE_FOR_nothing) + && direct_optab_handler (ior_optab, mode) == CODE_FOR_nothing) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "can't vectorize early exit because the " - "target does not support boolean vector %s " + "target does not support boolean vector IOR " "for type %T.\n", - reduc_optab == ior_optab ? "OR" : "AND", vectype); return false; } @@ -13720,6 +13080,29 @@ vectorizable_early_exit (vec_info *vinfo, stmt_vec_info stmt_info, stmts.quick_push (gimple_assign_lhs (stmt)); } + /* If we're comparing against a previous forall we need to negate the resullts + before we do the final comparison or reduction. */ + if (flipped) + { + /* Rewrite the if(all(mask)) into if (!all(mask)) which is the same as + if (any(~mask)) by negating the masks and flipping the branches. + + 1. For unmasked loops we simply reduce the ~mask. + 2. For masked loops we reduce (~mask & loop_mask) which is the same as + doing (mask & loop_mask) ^ loop_mask. */ + for (unsigned i = 0; i < stmts.length (); i++) + { + tree inv_lhs = make_temp_ssa_name (vectype, NULL, "vexit_inv"); + auto inv_stmt = gimple_build_assign (inv_lhs, BIT_NOT_EXPR, stmts[i]); + vect_finish_stmt_generation (loop_vinfo, stmt_info, inv_stmt, + &cond_gsi); + stmts[i] = inv_lhs; + } + + EDGE_SUCC (bb, 0)->flags ^= (EDGE_TRUE_VALUE|EDGE_FALSE_VALUE); + EDGE_SUCC (bb, 1)->flags ^= (EDGE_TRUE_VALUE|EDGE_FALSE_VALUE); + } + /* Determine if we need to reduce the final value. */ if (stmts.length () > 1) { @@ -13758,7 +13141,7 @@ vectorizable_early_exit (vec_info *vinfo, stmt_vec_info stmt_info, new_temp = make_temp_ssa_name (vectype, NULL, "vexit_reduc"); tree arg0 = workset.pop (); tree arg1 = workset.pop (); - new_stmt = gimple_build_assign (new_temp, reduc_op, arg0, arg1); + new_stmt = gimple_build_assign (new_temp, BIT_IOR_EXPR, arg0, arg1); vect_finish_stmt_generation (loop_vinfo, stmt_info, new_stmt, &cond_gsi); workset.quick_insert (0, new_temp); @@ -13781,7 +13164,7 @@ vectorizable_early_exit (vec_info *vinfo, stmt_vec_info stmt_info, gcc_assert (new_temp); - gimple_cond_set_condition (cond_stmt, new_code, new_temp, cst); + gimple_cond_set_condition (cond_stmt, NE_EXPR, new_temp, cst); update_stmt (orig_stmt); if (slp_node) @@ -14006,7 +13389,7 @@ vect_analyze_stmt (vec_info *vinfo, /* When we arrive here with a non-SLP statement and we are supposed to use SLP for everything fail vectorization. */ - if (!node && param_vect_force_slp) + if (!node) return opt_result::failure_at (stmt_info->stmt, "needs non-SLP handling\n"); @@ -14040,7 +13423,7 @@ vect_analyze_stmt (vec_info *vinfo, || vectorizable_comparison (vinfo, stmt_info, NULL, NULL, node, cost_vec) || vectorizable_lc_phi (as_a <loop_vec_info> (vinfo), - stmt_info, NULL, node) + stmt_info, node) || vectorizable_recurr (as_a <loop_vec_info> (vinfo), stmt_info, NULL, node, cost_vec) || vectorizable_early_exit (vinfo, stmt_info, NULL, NULL, node, @@ -14115,6 +13498,10 @@ vect_transform_stmt (vec_info *vinfo, gcc_assert (slp_node || !PURE_SLP_STMT (stmt_info)); + if (dump_enabled_p ()) + dump_printf_loc (MSG_NOTE, vect_location, + "------>vectorizing statement: %G", stmt_info->stmt); + tree saved_vectype = STMT_VINFO_VECTYPE (stmt_info); if (slp_node) STMT_VINFO_VECTYPE (stmt_info) = SLP_TREE_VECTYPE (slp_node); @@ -14203,19 +13590,19 @@ vect_transform_stmt (vec_info *vinfo, case reduc_vec_info_type: done = vect_transform_reduction (as_a <loop_vec_info> (vinfo), stmt_info, - gsi, &vec_stmt, slp_node); + gsi, slp_node); gcc_assert (done); break; case cycle_phi_info_type: done = vect_transform_cycle_phi (as_a <loop_vec_info> (vinfo), stmt_info, - &vec_stmt, slp_node, slp_node_instance); + slp_node, slp_node_instance); gcc_assert (done); break; case lc_phi_info_type: - done = vectorizable_lc_phi (as_a <loop_vec_info> (vinfo), - stmt_info, &vec_stmt, slp_node); + done = vect_transform_lc_phi (as_a <loop_vec_info> (vinfo), + stmt_info, slp_node); gcc_assert (done); break; @@ -14560,6 +13947,23 @@ vect_chooses_same_modes_p (vec_info *vinfo, machine_mode vector_mode) return true; } +/* Return true if replacing VECTOR_MODE with ALT_VECTOR_MODE would not + change the chosen vector modes for analysis of a loop. */ + +bool +vect_chooses_same_modes_p (machine_mode vector_mode, + machine_mode alt_vector_mode) +{ + return (VECTOR_MODE_P (vector_mode) + && VECTOR_MODE_P (alt_vector_mode) + && (related_vector_mode (vector_mode, + GET_MODE_INNER (alt_vector_mode)) + == alt_vector_mode) + && (related_vector_mode (alt_vector_mode, + GET_MODE_INNER (vector_mode)) + == vector_mode)); +} + /* Function vect_is_simple_use. Input: @@ -15357,7 +14761,6 @@ supportable_indirect_convert_operation (code_helper code, bool found_mode = false; scalar_mode lhs_mode = GET_MODE_INNER (TYPE_MODE (vectype_out)); scalar_mode rhs_mode = GET_MODE_INNER (TYPE_MODE (vectype_in)); - opt_scalar_mode mode_iter; tree_code tc1, tc2, code1, code2; tree cvt_type = NULL_TREE; |