aboutsummaryrefslogtreecommitdiff
path: root/gcc/tree-vect-stmts.c
diff options
context:
space:
mode:
authorMichael Matz <matz@suse.de>2012-04-10 16:09:03 +0000
committerMichael Matz <matz@gcc.gnu.org>2012-04-10 16:09:03 +0000
commit0d0293ace6acfd32edc68f6b9b371661c9b494e4 (patch)
treec1ba5f3475c5b1edde02b5641162698422325689 /gcc/tree-vect-stmts.c
parent1f00098bf3a107ecf463cd31fc1ff50f960e1013 (diff)
downloadgcc-0d0293ace6acfd32edc68f6b9b371661c9b494e4.zip
gcc-0d0293ace6acfd32edc68f6b9b371661c9b494e4.tar.gz
gcc-0d0293ace6acfd32edc68f6b9b371661c9b494e4.tar.bz2
tree-vectorizer.h (_loop_vec_info.strided_stores): Rename to grouped_stores.
* tree-vectorizer.h (_loop_vec_info.strided_stores): Rename to grouped_stores. (LOOP_VINFO_STRIDED_STORES): Rename to LOOP_VINFO_GROUPED_STORES. (struce _bb_vec_info.strided_stores): Rename to grouped_stores. (BB_VINFO_STRIDED_STORES): Rename to BB_VINFO_GROUPED_STORES. (STMT_VINFO_STRIDED_ACCESS): Rename to STMT_VINFO_GROUPED_ACCESS. (vect_strided_store_supported): Rename to vect_grouped_store_supported. (vect_strided_load_supported): Rename to vect_grouped_load_supported. (vect_transform_strided_load): Rename to vect_transform_grouped_load. (vect_record_strided_load_vectors): Rename to vect_record_grouped_load_vectors. * tree-vect-data-refs.c (vect_update_misalignment_for_peel): Rename use of above macros. (vect_verify_datarefs_alignment): Ditto. (vector_alignment_reachable_p): Ditto. (vect_peeling_hash_get_lowest_cost): Ditto. (vect_enhance_data_refs_alignment): Ditto. (vect_analyze_group_access): Ditto and rename stride to groupsize. (vect_analyze_data_ref_access): Rename "strided" to "grouped". (vect_strided_store_supported): Rename to vect_grouped_store_supported. (vect_strided_load_supported): Rename to vect_grouped_load_supported. (vect_transform_strided_load): Rename to vect_transform_grouped_load. (vect_record_strided_load_vectors): Rename to vect_record_grouped_load_vectors. * tree-vect-loop.c (new_loop_vec_info): Rename use of above macros. (destroy_loop_vec_info): Ditto. (vect_transform_loop): Ditto and rename strided_store to grouped_store. * tree-vect-slp.c (vect_build_slp_tree): Rename use of above macros. (vect_analyze_slp): Ditto. (new_bb_vec_info): Ditto. (destroy_bb_vec_info): Ditto. (vect_schedule_slp_instance): Ditto and rename strided_store to grouped_store. * tree-vect-stmts.c (vect_cost_strided_group_size): Rename to vect_cost_group_size. (vect_model_store_cost): Rename use of above macros and call to vect_cost_strided_group_size. (vect_model_load_cost): Ditto. (vectorizable_store): Ditto, rename strided_store to grouped_store and calls to renamed tree-vectorizer.h functions. (vectorizable_load): Ditto. (vect_transform_stmt): Rename use of above macros and strided_store to grouped_store. testsuite/ * gcc.dg/vect/vect-outer-1-big-array.c: Adjust. * gcc.dg/vect/vect-outer-1.c: Adjust. * gcc.dg/vect/vect-outer-1a-big-array.c: Adjust. * gcc.dg/vect/vect-outer-1a.c: Adjust. * gcc.dg/vect/vect-outer-1b-big-array.c: Adjust. * gcc.dg/vect/vect-outer-1b.c: Adjust. * gcc.dg/vect/vect-outer-2b.c: Adjust. * gcc.dg/vect/vect-outer-3b.c: Adjust. From-SVN: r186285
Diffstat (limited to 'gcc/tree-vect-stmts.c')
-rw-r--r--gcc/tree-vect-stmts.c100
1 files changed, 50 insertions, 50 deletions
diff --git a/gcc/tree-vect-stmts.c b/gcc/tree-vect-stmts.c
index dabb63d..968e4ed 100644
--- a/gcc/tree-vect-stmts.c
+++ b/gcc/tree-vect-stmts.c
@@ -851,14 +851,14 @@ vect_model_promotion_demotion_cost (stmt_vec_info stmt_info,
stmt_vinfo_set_outside_of_loop_cost (stmt_info, NULL, outside_cost);
}
-/* Function vect_cost_strided_group_size
+/* Function vect_cost_group_size
- For strided load or store, return the group_size only if it is the first
+ For grouped load or store, return the group_size only if it is the first
load or store of a group, else return 1. This ensures that group size is
only returned once per group. */
static int
-vect_cost_strided_group_size (stmt_vec_info stmt_info)
+vect_cost_group_size (stmt_vec_info stmt_info)
{
gimple first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
@@ -871,8 +871,8 @@ vect_cost_strided_group_size (stmt_vec_info stmt_info)
/* Function vect_model_store_cost
- Models cost for stores. In the case of strided accesses, one access
- has the overhead of the strided access attributed to it. */
+ Models cost for stores. In the case of grouped accesses, one access
+ has the overhead of the grouped access attributed to it. */
void
vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
@@ -891,8 +891,8 @@ vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
if (dt == vect_constant_def || dt == vect_external_def)
outside_cost = vect_get_stmt_cost (scalar_to_vec);
- /* Strided access? */
- if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
+ /* Grouped access? */
+ if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
{
if (slp_node)
{
@@ -902,12 +902,12 @@ vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
else
{
first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
- group_size = vect_cost_strided_group_size (stmt_info);
+ group_size = vect_cost_group_size (stmt_info);
}
first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
}
- /* Not a strided access. */
+ /* Not a grouped access. */
else
{
group_size = 1;
@@ -915,7 +915,7 @@ vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
}
/* We assume that the cost of a single store-lanes instruction is
- equivalent to the cost of GROUP_SIZE separate stores. If a strided
+ equivalent to the cost of GROUP_SIZE separate stores. If a grouped
access is instead being provided by a permute-and-store operation,
include the cost of the permutes. */
if (!store_lanes_p && group_size > 1)
@@ -987,8 +987,8 @@ vect_get_store_cost (struct data_reference *dr, int ncopies,
/* Function vect_model_load_cost
- Models cost for loads. In the case of strided accesses, the last access
- has the overhead of the strided access attributed to it. Since unaligned
+ Models cost for loads. In the case of grouped accesses, the last access
+ has the overhead of the grouped access attributed to it. Since unaligned
accesses are supported for loads, we also account for the costs of the
access scheme chosen. */
@@ -1005,14 +1005,14 @@ vect_model_load_cost (stmt_vec_info stmt_info, int ncopies, bool load_lanes_p,
if (PURE_SLP_STMT (stmt_info))
return;
- /* Strided accesses? */
+ /* Grouped accesses? */
first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
- if (STMT_VINFO_STRIDED_ACCESS (stmt_info) && first_stmt && !slp_node)
+ if (STMT_VINFO_GROUPED_ACCESS (stmt_info) && first_stmt && !slp_node)
{
- group_size = vect_cost_strided_group_size (stmt_info);
+ group_size = vect_cost_group_size (stmt_info);
first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
}
- /* Not a strided access. */
+ /* Not a grouped access. */
else
{
group_size = 1;
@@ -1020,7 +1020,7 @@ vect_model_load_cost (stmt_vec_info stmt_info, int ncopies, bool load_lanes_p,
}
/* We assume that the cost of a single load-lanes instruction is
- equivalent to the cost of GROUP_SIZE separate loads. If a strided
+ equivalent to the cost of GROUP_SIZE separate loads. If a grouped
access is instead being provided by a load-and-permute operation,
include the cost of the permutes. */
if (!load_lanes_p && group_size > 1)
@@ -1036,7 +1036,7 @@ vect_model_load_cost (stmt_vec_info stmt_info, int ncopies, bool load_lanes_p,
/* The loads themselves. */
vect_get_load_cost (first_dr, ncopies,
- ((!STMT_VINFO_STRIDED_ACCESS (stmt_info)) || group_size > 1
+ ((!STMT_VINFO_GROUPED_ACCESS (stmt_info)) || group_size > 1
|| slp_node),
&inside_cost, &outside_cost);
@@ -1109,7 +1109,7 @@ vect_get_load_cost (struct data_reference *dr, int ncopies,
/* Unaligned software pipeline has a load of an address, an initial
load, and possibly a mask operation to "prime" the loop. However,
- if this is an access in a group of loads, which provide strided
+ if this is an access in a group of loads, which provide grouped
access, then the above cost should only be considered for one
access in the group. Inside the loop, there is a load op
and a realignment op. */
@@ -3692,7 +3692,7 @@ vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
int ncopies;
int j;
gimple next_stmt, first_stmt = NULL;
- bool strided_store = false;
+ bool grouped_store = false;
bool store_lanes_p = false;
unsigned int group_size, i;
VEC(tree,heap) *dr_chain = NULL, *oprnds = NULL, *result_chain = NULL;
@@ -3777,16 +3777,16 @@ vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
return false;
}
- if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
+ if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
{
- strided_store = true;
+ grouped_store = true;
first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
if (!slp && !PURE_SLP_STMT (stmt_info))
{
group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
if (vect_store_lanes_supported (vectype, group_size))
store_lanes_p = true;
- else if (!vect_strided_store_supported (vectype, group_size))
+ else if (!vect_grouped_store_supported (vectype, group_size))
return false;
}
@@ -3820,7 +3820,7 @@ vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
/** Transform. **/
- if (strided_store)
+ if (grouped_store)
{
first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
@@ -3842,7 +3842,7 @@ vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
if (slp)
{
- strided_store = false;
+ grouped_store = false;
/* VEC_NUM is the number of vect stmts to be created for this
group. */
vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
@@ -3887,7 +3887,7 @@ vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
vector stmt by a factor VF/nunits. For more details see documentation in
vect_get_vec_def_for_copy_stmt. */
- /* In case of interleaving (non-unit strided access):
+ /* In case of interleaving (non-unit grouped access):
S1: &base + 2 = x2
S2: &base = x0
@@ -3943,7 +3943,7 @@ vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
used as an input to vect_permute_store_chain(), and OPRNDS as
an input to vect_get_vec_def_for_stmt_copy() for the next copy.
- If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and
+ If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
OPRNDS are of size 1. */
next_stmt = first_stmt;
for (i = 0; i < group_size; i++)
@@ -3980,7 +3980,7 @@ vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
DR_CHAIN is then used as an input to vect_permute_store_chain(),
and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
next copy.
- If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and
+ If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
OPRNDS are of size 1. */
for (i = 0; i < group_size; i++)
{
@@ -4018,7 +4018,7 @@ vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
else
{
new_stmt = NULL;
- if (strided_store)
+ if (grouped_store)
{
result_chain = VEC_alloc (tree, heap, group_size);
/* Permute. */
@@ -4038,8 +4038,8 @@ vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
if (slp)
vec_oprnd = VEC_index (tree, vec_oprnds, i);
- else if (strided_store)
- /* For strided stores vectorized defs are interleaved in
+ else if (grouped_store)
+ /* For grouped stores vectorized defs are interleaved in
vect_permute_store_chain(). */
vec_oprnd = VEC_index (tree, result_chain, i);
@@ -4208,7 +4208,7 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
tree realignment_token = NULL_TREE;
gimple phi = NULL;
VEC(tree,heap) *dr_chain = NULL;
- bool strided_load = false;
+ bool grouped_load = false;
bool load_lanes_p = false;
gimple first_stmt;
bool inv_p;
@@ -4305,9 +4305,9 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
}
/* Check if the load is a part of an interleaving chain. */
- if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
+ if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
{
- strided_load = true;
+ grouped_load = true;
/* FORNOW */
gcc_assert (! nested_in_vect_loop && !STMT_VINFO_GATHER_P (stmt_info));
@@ -4317,14 +4317,14 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
if (vect_load_lanes_supported (vectype, group_size))
load_lanes_p = true;
- else if (!vect_strided_load_supported (vectype, group_size))
+ else if (!vect_grouped_load_supported (vectype, group_size))
return false;
}
}
if (negative)
{
- gcc_assert (!strided_load && !STMT_VINFO_GATHER_P (stmt_info));
+ gcc_assert (!grouped_load && !STMT_VINFO_GATHER_P (stmt_info));
alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
if (alignment_support_scheme != dr_aligned
&& alignment_support_scheme != dr_unaligned_supported)
@@ -4525,7 +4525,7 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
return true;
}
- if (strided_load)
+ if (grouped_load)
{
first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
if (slp
@@ -4545,7 +4545,7 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
/* VEC_NUM is the number of vect stmts to be created for this group. */
if (slp)
{
- strided_load = false;
+ grouped_load = false;
vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
if (SLP_INSTANCE_LOAD_PERMUTATION (slp_node_instance))
slp_perm = true;
@@ -4603,7 +4603,7 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
information we recorded in RELATED_STMT field is used to vectorize
stmt S2. */
- /* In case of interleaving (non-unit strided access):
+ /* In case of interleaving (non-unit grouped access):
S1: x2 = &base + 2
S2: x0 = &base
@@ -4629,7 +4629,7 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
corresponds to the order of scalar stmts in the interleaving chain - see
the documentation of vect_permute_load_chain()).
The generation of permutation stmts and recording them in
- STMT_VINFO_VEC_STMT is done in vect_transform_strided_load().
+ STMT_VINFO_VEC_STMT is done in vect_transform_grouped_load().
In case of both multiple types and interleaving, the vector loads and
permutation stmts above are created for every copy. The result vector
@@ -4715,7 +4715,7 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
TYPE_SIZE_UNIT (aggr_type));
- if (strided_load || slp_perm)
+ if (grouped_load || slp_perm)
dr_chain = VEC_alloc (tree, heap, vec_num);
if (load_lanes_p)
@@ -4741,7 +4741,7 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
}
/* Record the mapping between SSA_NAMEs and statements. */
- vect_record_strided_load_vectors (stmt, dr_chain);
+ vect_record_grouped_load_vectors (stmt, dr_chain);
}
else
{
@@ -4896,7 +4896,7 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
if (inv_p && !bb_vinfo)
{
gimple_stmt_iterator gsi2 = *gsi;
- gcc_assert (!strided_load);
+ gcc_assert (!grouped_load);
gsi_next (&gsi2);
new_temp = vect_init_vector (stmt, scalar_dest,
vectype, &gsi2);
@@ -4912,8 +4912,8 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
}
/* Collect vector loads and later create their permutation in
- vect_transform_strided_load (). */
- if (strided_load || slp_perm)
+ vect_transform_grouped_load (). */
+ if (grouped_load || slp_perm)
VEC_quick_push (tree, dr_chain, new_temp);
/* Store vector loads in the corresponding SLP_NODE. */
@@ -4937,10 +4937,10 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
}
else
{
- if (strided_load)
+ if (grouped_load)
{
if (!load_lanes_p)
- vect_transform_strided_load (stmt, dr_chain, group_size, gsi);
+ vect_transform_grouped_load (stmt, dr_chain, group_size, gsi);
*vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
}
else
@@ -5494,7 +5494,7 @@ vect_analyze_stmt (gimple stmt, bool *need_to_vectorize, slp_tree node)
bool
vect_transform_stmt (gimple stmt, gimple_stmt_iterator *gsi,
- bool *strided_store, slp_tree slp_node,
+ bool *grouped_store, slp_tree slp_node,
slp_instance slp_node_instance)
{
bool is_store = false;
@@ -5541,13 +5541,13 @@ vect_transform_stmt (gimple stmt, gimple_stmt_iterator *gsi,
case store_vec_info_type:
done = vectorizable_store (stmt, gsi, &vec_stmt, slp_node);
gcc_assert (done);
- if (STMT_VINFO_STRIDED_ACCESS (stmt_info) && !slp_node)
+ if (STMT_VINFO_GROUPED_ACCESS (stmt_info) && !slp_node)
{
/* In case of interleaving, the whole chain is vectorized when the
last store in the chain is reached. Store stmts before the last
one are skipped, and there vec_stmt_info shouldn't be freed
meanwhile. */
- *strided_store = true;
+ *grouped_store = true;
if (STMT_VINFO_VEC_STMT (stmt_info))
is_store = true;
}