diff options
Diffstat (limited to 'gcc/tree-vect-data-refs.cc')
-rw-r--r-- | gcc/tree-vect-data-refs.cc | 72 |
1 files changed, 23 insertions, 49 deletions
diff --git a/gcc/tree-vect-data-refs.cc b/gcc/tree-vect-data-refs.cc index da700cd..a9d4aae 100644 --- a/gcc/tree-vect-data-refs.cc +++ b/gcc/tree-vect-data-refs.cc @@ -1856,21 +1856,14 @@ vect_get_data_access_cost (vec_info *vinfo, dr_vec_info *dr_info, stmt_vector_for_cost *prologue_cost_vec) { stmt_vec_info stmt_info = dr_info->stmt; - loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo); - int ncopies; - - if (PURE_SLP_STMT (stmt_info)) - ncopies = 1; - else - ncopies = vect_get_num_copies (loop_vinfo, STMT_VINFO_VECTYPE (stmt_info)); if (DR_IS_READ (dr_info->dr)) - vect_get_load_cost (vinfo, stmt_info, NULL, ncopies, + vect_get_load_cost (vinfo, stmt_info, NULL, 1, alignment_support_scheme, misalignment, true, inside_cost, outside_cost, prologue_cost_vec, body_cost_vec, false); else - vect_get_store_cost (vinfo,stmt_info, NULL, ncopies, + vect_get_store_cost (vinfo,stmt_info, NULL, 1, alignment_support_scheme, misalignment, inside_cost, body_cost_vec); @@ -4430,8 +4423,9 @@ vect_prune_runtime_alias_test_list (loop_vec_info loop_vinfo) MASKED_P is true if the load or store is conditional. MEMORY_TYPE is the type of the memory elements being loaded or stored. OFFSET_TYPE is the type of the offset that is being applied to the invariant - base address. SCALE is the amount by which the offset should - be multiplied *after* it has been converted to address width. + base address. If OFFSET_TYPE is scalar the function chooses an + appropriate vector type for it. SCALE is the amount by which the + offset should be multiplied *after* it has been converted to address width. Return true if the function is supported, storing the function id in *IFN_OUT and the vector type for the offset in *OFFSET_VECTYPE_OUT. @@ -4474,9 +4468,15 @@ vect_gather_scatter_fn_p (vec_info *vinfo, bool read_p, bool masked_p, for (;;) { - tree offset_vectype = get_vectype_for_scalar_type (vinfo, offset_type); - if (!offset_vectype) - return false; + tree offset_vectype; + if (VECTOR_TYPE_P (offset_type)) + offset_vectype = offset_type; + else + { + offset_vectype = get_vectype_for_scalar_type (vinfo, offset_type); + if (!offset_vectype) + return false; + } /* Test whether the target supports this combination. */ if (internal_gather_scatter_fn_supported_p (ifn, vectype, memory_type, @@ -4507,10 +4507,15 @@ vect_gather_scatter_fn_p (vec_info *vinfo, bool read_p, bool masked_p, return true; } + /* For fixed offset vector type we're done. */ + if (VECTOR_TYPE_P (offset_type)) + return false; + if (TYPE_PRECISION (offset_type) >= POINTER_SIZE && TYPE_PRECISION (offset_type) >= element_bits) return false; + /* Try a larger offset vector type. */ offset_type = build_nonstandard_integer_type (TYPE_PRECISION (offset_type) * 2, TYPE_UNSIGNED (offset_type)); } @@ -4519,7 +4524,7 @@ vect_gather_scatter_fn_p (vec_info *vinfo, bool read_p, bool masked_p, /* STMT_INFO is a call to an internal gather load or scatter store function. Describe the operation in INFO. */ -static void +void vect_describe_gather_scatter_call (stmt_vec_info stmt_info, gather_scatter_info *info) { @@ -6531,7 +6536,7 @@ vect_supportable_dr_alignment (vec_info *vinfo, dr_vec_info *dr_info, tree vectype, int misalignment, gather_scatter_info *gs_info) { - data_reference *dr = dr_info ? dr_info->dr : nullptr; + data_reference *dr = dr_info->dr; stmt_vec_info stmt_info = dr_info->stmt; machine_mode mode = TYPE_MODE (vectype); loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo); @@ -6612,7 +6617,7 @@ vect_supportable_dr_alignment (vec_info *vinfo, dr_vec_info *dr_info, } } */ - if (dr && DR_IS_READ (dr)) + if (DR_IS_READ (dr)) { if (can_implement_p (vec_realign_load_optab, mode) && (!targetm.vectorize.builtin_mask_for_load @@ -6642,38 +6647,7 @@ vect_supportable_dr_alignment (vec_info *vinfo, dr_vec_info *dr_info, tree type = TREE_TYPE (DR_REF (dr)); bool is_gather_scatter = gs_info != nullptr; if (misalignment == DR_MISALIGNMENT_UNKNOWN) - { - if (!is_gather_scatter || dr != nullptr) - is_packed = not_size_aligned (DR_REF (dr)); - else - { - /* Gather-scatter accesses normally perform only component accesses - so alignment is irrelevant for them. Targets like riscv do care - about scalar alignment in vector accesses, though, so check scalar - alignment here. We determined the alias pointer as well as the - base alignment during pattern recognition and can re-use it here. - - As we do not have an analyzed dataref we only know the alignment - of the reference itself and nothing about init, steps, etc. - For now don't try harder to determine misalignment and - just assume it is unknown. We consider the type packed if its - scalar alignment is lower than the natural alignment of a vector - element's type. */ - - gcc_assert (!GATHER_SCATTER_LEGACY_P (*gs_info)); - gcc_assert (dr == nullptr); - - tree inner_vectype = TREE_TYPE (vectype); - - unsigned HOST_WIDE_INT scalar_align - = tree_to_uhwi (gs_info->alias_ptr); - unsigned HOST_WIDE_INT inner_vectype_sz - = tree_to_uhwi (TYPE_SIZE (inner_vectype)); - - bool is_misaligned = scalar_align < inner_vectype_sz; - is_packed = scalar_align > 1 && is_misaligned; - } - } + is_packed = not_size_aligned (DR_REF (dr)); if (targetm.vectorize.support_vector_misalignment (mode, type, misalignment, is_packed, is_gather_scatter)) |