aboutsummaryrefslogtreecommitdiff
path: root/gcc/tree-vect-stmts.c
diff options
context:
space:
mode:
Diffstat (limited to 'gcc/tree-vect-stmts.c')
-rw-r--r--gcc/tree-vect-stmts.c173
1 files changed, 86 insertions, 87 deletions
diff --git a/gcc/tree-vect-stmts.c b/gcc/tree-vect-stmts.c
index dec6ea2..257ab076 100644
--- a/gcc/tree-vect-stmts.c
+++ b/gcc/tree-vect-stmts.c
@@ -978,7 +978,7 @@ vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
stmt_vector_for_cost *cost_vec)
{
unsigned int inside_cost = 0, prologue_cost = 0;
- gimple *first_stmt = STMT_VINFO_STMT (stmt_info);
+ stmt_vec_info first_stmt_info = stmt_info;
bool grouped_access_p = STMT_VINFO_GROUPED_ACCESS (stmt_info);
/* ??? Somehow we need to fix this at the callers. */
@@ -998,12 +998,12 @@ vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
/* Grouped stores update all elements in the group at once,
so we want the DR for the first statement. */
if (!slp_node && grouped_access_p)
- first_stmt = DR_GROUP_FIRST_ELEMENT (stmt_info);
+ first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
/* True if we should include any once-per-group costs as well as
the cost of the statement itself. For SLP we only get called
once per group anyhow. */
- bool first_stmt_p = (first_stmt == STMT_VINFO_STMT (stmt_info));
+ bool first_stmt_p = (first_stmt_info == stmt_info);
/* We assume that the cost of a single store-lanes instruction is
equivalent to the cost of DR_GROUP_SIZE separate stores. If a grouped
@@ -1014,7 +1014,7 @@ vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
{
/* Uses a high and low interleave or shuffle operations for each
needed permute. */
- int group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
+ int group_size = DR_GROUP_SIZE (first_stmt_info);
int nstmts = ncopies * ceil_log2 (group_size) * group_size;
inside_cost = record_stmt_cost (cost_vec, nstmts, vec_perm,
stmt_info, 0, vect_body);
@@ -1122,7 +1122,6 @@ vect_model_load_cost (stmt_vec_info stmt_info, unsigned ncopies,
slp_tree slp_node,
stmt_vector_for_cost *cost_vec)
{
- gimple *first_stmt = STMT_VINFO_STMT (stmt_info);
unsigned int inside_cost = 0, prologue_cost = 0;
bool grouped_access_p = STMT_VINFO_GROUPED_ACCESS (stmt_info);
@@ -1136,28 +1135,27 @@ vect_model_load_cost (stmt_vec_info stmt_info, unsigned ncopies,
{
/* If the load is permuted then the alignment is determined by
the first group element not by the first scalar stmt DR. */
- gimple *stmt = DR_GROUP_FIRST_ELEMENT (stmt_info);
- stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
+ stmt_vec_info first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
/* Record the cost for the permutation. */
unsigned n_perms;
unsigned assumed_nunits
- = vect_nunits_for_cost (STMT_VINFO_VECTYPE (stmt_info));
+ = vect_nunits_for_cost (STMT_VINFO_VECTYPE (first_stmt_info));
unsigned slp_vf = (ncopies * assumed_nunits) / instance->group_size;
vect_transform_slp_perm_load (slp_node, vNULL, NULL,
slp_vf, instance, true,
&n_perms);
inside_cost += record_stmt_cost (cost_vec, n_perms, vec_perm,
- stmt_info, 0, vect_body);
+ first_stmt_info, 0, vect_body);
/* And adjust the number of loads performed. This handles
redundancies as well as loads that are later dead. */
- auto_sbitmap perm (DR_GROUP_SIZE (stmt_info));
+ auto_sbitmap perm (DR_GROUP_SIZE (first_stmt_info));
bitmap_clear (perm);
for (unsigned i = 0;
i < SLP_TREE_LOAD_PERMUTATION (slp_node).length (); ++i)
bitmap_set_bit (perm, SLP_TREE_LOAD_PERMUTATION (slp_node)[i]);
ncopies = 0;
bool load_seen = false;
- for (unsigned i = 0; i < DR_GROUP_SIZE (stmt_info); ++i)
+ for (unsigned i = 0; i < DR_GROUP_SIZE (first_stmt_info); ++i)
{
if (i % assumed_nunits == 0)
{
@@ -1171,19 +1169,21 @@ vect_model_load_cost (stmt_vec_info stmt_info, unsigned ncopies,
if (load_seen)
ncopies++;
gcc_assert (ncopies
- <= (DR_GROUP_SIZE (stmt_info) - DR_GROUP_GAP (stmt_info)
+ <= (DR_GROUP_SIZE (first_stmt_info)
+ - DR_GROUP_GAP (first_stmt_info)
+ assumed_nunits - 1) / assumed_nunits);
}
/* Grouped loads read all elements in the group at once,
so we want the DR for the first statement. */
+ stmt_vec_info first_stmt_info = stmt_info;
if (!slp_node && grouped_access_p)
- first_stmt = DR_GROUP_FIRST_ELEMENT (stmt_info);
+ first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
/* True if we should include any once-per-group costs as well as
the cost of the statement itself. For SLP we only get called
once per group anyhow. */
- bool first_stmt_p = (first_stmt == STMT_VINFO_STMT (stmt_info));
+ bool first_stmt_p = (first_stmt_info == stmt_info);
/* We assume that the cost of a single load-lanes instruction is
equivalent to the cost of DR_GROUP_SIZE separate loads. If a grouped
@@ -1194,7 +1194,7 @@ vect_model_load_cost (stmt_vec_info stmt_info, unsigned ncopies,
{
/* Uses an even and odd extract operations or shuffle operations
for each needed permute. */
- int group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
+ int group_size = DR_GROUP_SIZE (first_stmt_info);
int nstmts = ncopies * ceil_log2 (group_size) * group_size;
inside_cost += record_stmt_cost (cost_vec, nstmts, vec_perm,
stmt_info, 0, vect_body);
@@ -2183,12 +2183,12 @@ get_group_load_store_type (gimple *stmt, tree vectype, bool slp,
vec_info *vinfo = stmt_info->vinfo;
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
struct loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL;
- gimple *first_stmt = DR_GROUP_FIRST_ELEMENT (stmt_info);
- data_reference *first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
- unsigned int group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
- bool single_element_p = (stmt == first_stmt
+ stmt_vec_info first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
+ data_reference *first_dr = STMT_VINFO_DATA_REF (first_stmt_info);
+ unsigned int group_size = DR_GROUP_SIZE (first_stmt_info);
+ bool single_element_p = (stmt_info == first_stmt_info
&& !DR_GROUP_NEXT_ELEMENT (stmt_info));
- unsigned HOST_WIDE_INT gap = DR_GROUP_GAP (vinfo_for_stmt (first_stmt));
+ unsigned HOST_WIDE_INT gap = DR_GROUP_GAP (first_stmt_info);
poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
/* True if the vectorized statements would access beyond the last
@@ -2315,14 +2315,14 @@ get_group_load_store_type (gimple *stmt, tree vectype, bool slp,
*memory_access_type = VMAT_GATHER_SCATTER;
}
- if (vls_type != VLS_LOAD && first_stmt == stmt)
+ if (vls_type != VLS_LOAD && first_stmt_info == stmt_info)
{
/* STMT is the leader of the group. Check the operands of all the
stmts of the group. */
- gimple *next_stmt = DR_GROUP_NEXT_ELEMENT (stmt_info);
- while (next_stmt)
+ stmt_vec_info next_stmt_info = DR_GROUP_NEXT_ELEMENT (stmt_info);
+ while (next_stmt_info)
{
- tree op = vect_get_store_rhs (next_stmt);
+ tree op = vect_get_store_rhs (next_stmt_info);
enum vect_def_type dt;
if (!vect_is_simple_use (op, vinfo, &dt))
{
@@ -2331,7 +2331,7 @@ get_group_load_store_type (gimple *stmt, tree vectype, bool slp,
"use not simple.\n");
return false;
}
- next_stmt = DR_GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
+ next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info);
}
}
@@ -2482,7 +2482,7 @@ get_load_store_type (gimple *stmt, tree vectype, bool slp, bool masked_p,
traditional behavior until that can be fixed. */
if (*memory_access_type == VMAT_ELEMENTWISE
&& !STMT_VINFO_STRIDED_P (stmt_info)
- && !(stmt == DR_GROUP_FIRST_ELEMENT (stmt_info)
+ && !(stmt_info == DR_GROUP_FIRST_ELEMENT (stmt_info)
&& !DR_GROUP_NEXT_ELEMENT (stmt_info)
&& !pow2p_hwi (DR_GROUP_SIZE (stmt_info))))
{
@@ -6195,13 +6195,13 @@ static tree
get_group_alias_ptr_type (gimple *first_stmt)
{
struct data_reference *first_dr, *next_dr;
- gimple *next_stmt;
first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
- next_stmt = DR_GROUP_NEXT_ELEMENT (vinfo_for_stmt (first_stmt));
- while (next_stmt)
+ stmt_vec_info next_stmt_info
+ = DR_GROUP_NEXT_ELEMENT (vinfo_for_stmt (first_stmt));
+ while (next_stmt_info)
{
- next_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (next_stmt));
+ next_dr = STMT_VINFO_DATA_REF (next_stmt_info);
if (get_alias_set (DR_REF (first_dr))
!= get_alias_set (DR_REF (next_dr)))
{
@@ -6210,7 +6210,7 @@ get_group_alias_ptr_type (gimple *first_stmt)
"conflicting alias set types.\n");
return ptr_type_node;
}
- next_stmt = DR_GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
+ next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info);
}
return reference_alias_ptr_type (DR_REF (first_dr));
}
@@ -6248,7 +6248,7 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi,
gimple *ptr_incr = NULL;
int ncopies;
int j;
- gimple *next_stmt, *first_stmt;
+ stmt_vec_info first_stmt_info;
bool grouped_store;
unsigned int group_size, i;
vec<tree> oprnds = vNULL;
@@ -6400,13 +6400,13 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi,
&& (slp || memory_access_type != VMAT_CONTIGUOUS));
if (grouped_store)
{
- first_stmt = DR_GROUP_FIRST_ELEMENT (stmt_info);
- first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
- group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
+ first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
+ first_dr = STMT_VINFO_DATA_REF (first_stmt_info);
+ group_size = DR_GROUP_SIZE (first_stmt_info);
}
else
{
- first_stmt = stmt;
+ first_stmt_info = stmt_info;
first_dr = dr;
group_size = vec_num = 1;
}
@@ -6584,10 +6584,7 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi,
}
if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
- {
- gimple *group_stmt = DR_GROUP_FIRST_ELEMENT (stmt_info);
- DR_GROUP_STORE_COUNT (vinfo_for_stmt (group_stmt))++;
- }
+ DR_GROUP_STORE_COUNT (DR_GROUP_FIRST_ELEMENT (stmt_info))++;
if (grouped_store)
{
@@ -6596,8 +6593,8 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi,
/* We vectorize all the stmts of the interleaving group when we
reach the last stmt in the group. */
- if (DR_GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))
- < DR_GROUP_SIZE (vinfo_for_stmt (first_stmt))
+ if (DR_GROUP_STORE_COUNT (first_stmt_info)
+ < DR_GROUP_SIZE (first_stmt_info)
&& !slp)
{
*vec_stmt = NULL;
@@ -6610,17 +6607,18 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi,
/* VEC_NUM is the number of vect stmts to be created for this
group. */
vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
- first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
- gcc_assert (DR_GROUP_FIRST_ELEMENT (vinfo_for_stmt (first_stmt)) == first_stmt);
- first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
- op = vect_get_store_rhs (first_stmt);
+ first_stmt_info = SLP_TREE_SCALAR_STMTS (slp_node)[0];
+ gcc_assert (DR_GROUP_FIRST_ELEMENT (first_stmt_info)
+ == first_stmt_info);
+ first_dr = STMT_VINFO_DATA_REF (first_stmt_info);
+ op = vect_get_store_rhs (first_stmt_info);
}
else
/* VEC_NUM is the number of vect stmts to be created for this
group. */
vec_num = group_size;
- ref_type = get_group_alias_ptr_type (first_stmt);
+ ref_type = get_group_alias_ptr_type (first_stmt_info);
}
else
ref_type = reference_alias_ptr_type (DR_REF (first_dr));
@@ -6759,7 +6757,7 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi,
prev_stmt_info = NULL;
alias_off = build_int_cst (ref_type, 0);
- next_stmt = first_stmt;
+ stmt_vec_info next_stmt_info = first_stmt_info;
for (g = 0; g < group_size; g++)
{
running_off = offvar;
@@ -6780,7 +6778,7 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi,
for (j = 0; j < ncopies; j++)
{
/* We've set op and dt above, from vect_get_store_rhs,
- and first_stmt == stmt. */
+ and first_stmt_info == stmt_info. */
if (j == 0)
{
if (slp)
@@ -6791,8 +6789,9 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi,
}
else
{
- op = vect_get_store_rhs (next_stmt);
- vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt);
+ op = vect_get_store_rhs (next_stmt_info);
+ vec_oprnd = vect_get_vec_def_for_operand
+ (op, next_stmt_info);
}
}
else
@@ -6866,7 +6865,7 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi,
}
}
}
- next_stmt = DR_GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
+ next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info);
if (slp)
break;
}
@@ -6985,19 +6984,20 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi,
If the store is not grouped, DR_GROUP_SIZE is 1, and DR_CHAIN and
OPRNDS are of size 1. */
- next_stmt = first_stmt;
+ stmt_vec_info next_stmt_info = first_stmt_info;
for (i = 0; i < group_size; i++)
{
/* Since gaps are not supported for interleaved stores,
DR_GROUP_SIZE is the exact number of stmts in the chain.
- Therefore, NEXT_STMT can't be NULL_TREE. In case that
- there is no interleaving, DR_GROUP_SIZE is 1, and only one
- iteration of the loop will be executed. */
- op = vect_get_store_rhs (next_stmt);
- vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt);
+ Therefore, NEXT_STMT_INFO can't be NULL_TREE. In case
+ that there is no interleaving, DR_GROUP_SIZE is 1,
+ and only one iteration of the loop will be executed. */
+ op = vect_get_store_rhs (next_stmt_info);
+ vec_oprnd = vect_get_vec_def_for_operand
+ (op, next_stmt_info);
dr_chain.quick_push (vec_oprnd);
oprnds.quick_push (vec_oprnd);
- next_stmt = DR_GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
+ next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info);
}
if (mask)
vec_mask = vect_get_vec_def_for_operand (mask, stmt,
@@ -7029,7 +7029,7 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi,
}
else
dataref_ptr
- = vect_create_data_ref_ptr (first_stmt, aggr_type,
+ = vect_create_data_ref_ptr (first_stmt_info, aggr_type,
simd_lane_access_p ? loop : NULL,
offset, &dummy, gsi, &ptr_incr,
simd_lane_access_p, &inv_p,
@@ -7132,7 +7132,7 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi,
&result_chain);
}
- next_stmt = first_stmt;
+ stmt_vec_info next_stmt_info = first_stmt_info;
for (i = 0; i < vec_num; i++)
{
unsigned align, misalign;
@@ -7249,8 +7249,8 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi,
if (slp)
continue;
- next_stmt = DR_GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
- if (!next_stmt)
+ next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info);
+ if (!next_stmt_info)
break;
}
}
@@ -7423,7 +7423,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi,
gphi *phi = NULL;
vec<tree> dr_chain = vNULL;
bool grouped_load = false;
- gimple *first_stmt;
+ stmt_vec_info first_stmt_info;
stmt_vec_info first_stmt_info_for_drptr = NULL;
bool inv_p;
bool compute_in_loop = false;
@@ -7565,8 +7565,8 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi,
gcc_assert (!nested_in_vect_loop);
gcc_assert (!STMT_VINFO_GATHER_SCATTER_P (stmt_info));
- first_stmt = DR_GROUP_FIRST_ELEMENT (stmt_info);
- group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
+ first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
+ group_size = DR_GROUP_SIZE (first_stmt_info);
if (slp && SLP_TREE_LOAD_PERMUTATION (slp_node).exists ())
slp_perm = true;
@@ -7696,25 +7696,26 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi,
if (grouped_load)
{
- first_stmt = DR_GROUP_FIRST_ELEMENT (stmt_info);
- first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
+ first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
+ first_dr = STMT_VINFO_DATA_REF (first_stmt_info);
}
else
{
- first_stmt = stmt;
+ first_stmt_info = stmt_info;
first_dr = dr;
}
if (slp && grouped_load)
{
- group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
- ref_type = get_group_alias_ptr_type (first_stmt);
+ group_size = DR_GROUP_SIZE (first_stmt_info);
+ ref_type = get_group_alias_ptr_type (first_stmt_info);
}
else
{
if (grouped_load)
cst_offset
= (tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype)))
- * vect_get_place_in_interleaving_chain (stmt, first_stmt));
+ * vect_get_place_in_interleaving_chain (stmt,
+ first_stmt_info));
group_size = 1;
ref_type = reference_alias_ptr_type (DR_REF (dr));
}
@@ -7924,19 +7925,19 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi,
if (grouped_load)
{
- first_stmt = DR_GROUP_FIRST_ELEMENT (stmt_info);
- group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
+ first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
+ group_size = DR_GROUP_SIZE (first_stmt_info);
/* For SLP vectorization we directly vectorize a subchain
without permutation. */
if (slp && ! SLP_TREE_LOAD_PERMUTATION (slp_node).exists ())
- first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
+ first_stmt_info = SLP_TREE_SCALAR_STMTS (slp_node)[0];
/* For BB vectorization always use the first stmt to base
the data ref pointer on. */
if (bb_vinfo)
first_stmt_info_for_drptr = SLP_TREE_SCALAR_STMTS (slp_node)[0];
/* Check if the chain of loads is already vectorized. */
- if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt))
+ if (STMT_VINFO_VEC_STMT (first_stmt_info)
/* For SLP we would need to copy over SLP_TREE_VEC_STMTS.
??? But we can only do so if there is exactly one
as we have no way to get at the rest. Leave the CSE
@@ -7950,7 +7951,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi,
*vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
return true;
}
- first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
+ first_dr = STMT_VINFO_DATA_REF (first_stmt_info);
group_gap_adj = 0;
/* VEC_NUM is the number of vect stmts to be created for this group. */
@@ -7979,11 +7980,11 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi,
else
vec_num = group_size;
- ref_type = get_group_alias_ptr_type (first_stmt);
+ ref_type = get_group_alias_ptr_type (first_stmt_info);
}
else
{
- first_stmt = stmt;
+ first_stmt_info = stmt_info;
first_dr = dr;
group_size = vec_num = 1;
group_gap_adj = 0;
@@ -8120,7 +8121,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi,
|| alignment_support_scheme == dr_explicit_realign)
&& !compute_in_loop)
{
- msq = vect_setup_realignment (first_stmt, gsi, &realignment_token,
+ msq = vect_setup_realignment (first_stmt_info, gsi, &realignment_token,
alignment_support_scheme, NULL_TREE,
&at_loop);
if (alignment_support_scheme == dr_explicit_realign_optimized)
@@ -8184,7 +8185,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi,
inv_p = false;
}
else if (first_stmt_info_for_drptr
- && first_stmt != first_stmt_info_for_drptr)
+ && first_stmt_info != first_stmt_info_for_drptr)
{
dataref_ptr
= vect_create_data_ref_ptr (first_stmt_info_for_drptr,
@@ -8209,7 +8210,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi,
}
else
dataref_ptr
- = vect_create_data_ref_ptr (first_stmt, aggr_type, at_loop,
+ = vect_create_data_ref_ptr (first_stmt_info, aggr_type, at_loop,
offset, &dummy, gsi, &ptr_incr,
simd_lane_access_p, &inv_p,
byte_offset, bump);
@@ -8388,7 +8389,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi,
tree vs = size_int (TYPE_VECTOR_SUBPARTS (vectype));
if (compute_in_loop)
- msq = vect_setup_realignment (first_stmt, gsi,
+ msq = vect_setup_realignment (first_stmt_info, gsi,
&realignment_token,
dr_explicit_realign,
dataref_ptr, NULL);
@@ -9708,8 +9709,7 @@ vect_transform_stmt (gimple *stmt, gimple_stmt_iterator *gsi,
one are skipped, and there vec_stmt_info shouldn't be freed
meanwhile. */
*grouped_store = true;
- stmt_vec_info group_info
- = vinfo_for_stmt (DR_GROUP_FIRST_ELEMENT (stmt_info));
+ stmt_vec_info group_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
if (DR_GROUP_STORE_COUNT (group_info) == DR_GROUP_SIZE (group_info))
is_store = true;
}
@@ -9817,14 +9817,13 @@ void
vect_remove_stores (gimple *first_stmt)
{
gimple *next = first_stmt;
- gimple *tmp;
gimple_stmt_iterator next_si;
while (next)
{
stmt_vec_info stmt_info = vinfo_for_stmt (next);
- tmp = DR_GROUP_NEXT_ELEMENT (stmt_info);
+ stmt_vec_info tmp = DR_GROUP_NEXT_ELEMENT (stmt_info);
if (is_pattern_stmt_p (stmt_info))
next = STMT_VINFO_RELATED_STMT (stmt_info);
/* Free the attached stmt_vec_info and remove the stmt. */