aboutsummaryrefslogtreecommitdiff
path: root/gcc/tree-vect-loop.c
diff options
context:
space:
mode:
authorRichard Sandiford <richard.sandiford@linaro.org>2018-01-02 18:26:47 +0000
committerRichard Sandiford <rsandifo@gcc.gnu.org>2018-01-02 18:26:47 +0000
commite3342de49cbee48957acc749b9566eee230860be (patch)
tree32a86a752b83bafed11e1621d738a7fd284a93f7 /gcc/tree-vect-loop.c
parent6da64f1b329f57c07f22ec034bc7bc4b0dc9e87b (diff)
downloadgcc-e3342de49cbee48957acc749b9566eee230860be.zip
gcc-e3342de49cbee48957acc749b9566eee230860be.tar.gz
gcc-e3342de49cbee48957acc749b9566eee230860be.tar.bz2
Make vec_perm_indices use new vector encoding
This patch changes vec_perm_indices from a plain vec<> to a class that stores a canonicalized permutation, using the same encoding as for VECTOR_CSTs. This means that vec_perm_indices now carries information about the number of vectors being permuted (currently always 1 or 2) and the number of elements in each input vector. A new vec_perm_builder class is used to actually build up the vector, like tree_vector_builder does for trees. vec_perm_indices is the completed representation, a bit like VECTOR_CST is for trees. The patch just does a mechanical conversion of the code to vec_perm_builder: a later patch uses explicit encodings where possible. The point of all this is that it makes the representation suitable for variable-length vectors. It's no longer necessary for the underlying vec<>s to store every element explicitly. In int-vector-builder.h, "using the same encoding as tree and rtx constants" describes the endpoint -- adding the rtx encoding comes later. 2018-01-02 Richard Sandiford <richard.sandiford@linaro.org> gcc/ * int-vector-builder.h: New file. * vec-perm-indices.h: Include int-vector-builder.h. (vec_perm_indices): Redefine as an int_vector_builder. (auto_vec_perm_indices): Delete. (vec_perm_builder): Redefine as a stand-alone class. (vec_perm_indices::vec_perm_indices): New function. (vec_perm_indices::clamp): Likewise. * vec-perm-indices.c: Include fold-const.h and tree-vector-builder.h. (vec_perm_indices::new_vector): New function. (vec_perm_indices::new_expanded_vector): Update for new vec_perm_indices class. (vec_perm_indices::rotate_inputs): New function. (vec_perm_indices::all_in_range_p): Operate directly on the encoded form, without computing elided elements. (tree_to_vec_perm_builder): Operate directly on the VECTOR_CST encoding. Update for new vec_perm_indices class. * optabs.c (expand_vec_perm_const): Create a vec_perm_indices for the given vec_perm_builder. (expand_vec_perm_var): Update vec_perm_builder constructor. (expand_mult_highpart): Use vec_perm_builder instead of auto_vec_perm_indices. * optabs-query.c (can_mult_highpart_p): Use vec_perm_builder and vec_perm_indices instead of auto_vec_perm_indices. Use a single or double series encoding as appropriate. * fold-const.c (fold_ternary_loc): Use vec_perm_builder and vec_perm_indices instead of auto_vec_perm_indices. * tree-ssa-forwprop.c (simplify_vector_constructor): Likewise. * tree-vect-data-refs.c (vect_grouped_store_supported): Likewise. (vect_permute_store_chain): Likewise. (vect_grouped_load_supported): Likewise. (vect_permute_load_chain): Likewise. (vect_shift_permute_load_chain): Likewise. * tree-vect-slp.c (vect_build_slp_tree_1): Likewise. (vect_transform_slp_perm_load): Likewise. (vect_schedule_slp_instance): Likewise. * tree-vect-stmts.c (perm_mask_for_reverse): Likewise. (vectorizable_mask_load_store): Likewise. (vectorizable_bswap): Likewise. (vectorizable_store): Likewise. (vectorizable_load): Likewise. * tree-vect-generic.c (lower_vec_perm): Use vec_perm_builder and vec_perm_indices instead of auto_vec_perm_indices. Use tree_to_vec_perm_builder to read the vector from a tree. * tree-vect-loop.c (calc_vec_perm_mask_for_shift): Take a vec_perm_builder instead of a vec_perm_indices. (have_whole_vector_shift): Use vec_perm_builder and vec_perm_indices instead of auto_vec_perm_indices. Leave the truncation to calc_vec_perm_mask_for_shift. (vect_create_epilog_for_reduction): Likewise. * config/aarch64/aarch64.c (expand_vec_perm_d::perm): Change from auto_vec_perm_indices to vec_perm_indices. (aarch64_expand_vec_perm_const_1): Use rotate_inputs on d.perm instead of changing individual elements. (aarch64_vectorize_vec_perm_const): Use new_vector to install the vector in d.perm. * config/arm/arm.c (expand_vec_perm_d::perm): Change from auto_vec_perm_indices to vec_perm_indices. (arm_expand_vec_perm_const_1): Use rotate_inputs on d.perm instead of changing individual elements. (arm_vectorize_vec_perm_const): Use new_vector to install the vector in d.perm. * config/powerpcspe/powerpcspe.c (rs6000_expand_extract_even): Update vec_perm_builder constructor. (rs6000_expand_interleave): Likewise. * config/rs6000/rs6000.c (rs6000_expand_extract_even): Likewise. (rs6000_expand_interleave): Likewise. From-SVN: r256095
Diffstat (limited to 'gcc/tree-vect-loop.c')
-rw-r--r--gcc/tree-vect-loop.c24
1 files changed, 12 insertions, 12 deletions
diff --git a/gcc/tree-vect-loop.c b/gcc/tree-vect-loop.c
index 81060e0..fa33c7d 100644
--- a/gcc/tree-vect-loop.c
+++ b/gcc/tree-vect-loop.c
@@ -3713,12 +3713,11 @@ vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo,
vector elements (not bits) for a vector with NELT elements. */
static void
calc_vec_perm_mask_for_shift (unsigned int offset, unsigned int nelt,
- vec_perm_indices *sel)
+ vec_perm_builder *sel)
{
- unsigned int i;
-
- for (i = 0; i < nelt; i++)
- sel->quick_push ((i + offset) & (2 * nelt - 1));
+ sel->new_vector (nelt, nelt, 1);
+ for (unsigned int i = 0; i < nelt; i++)
+ sel->quick_push (i + offset);
}
/* Checks whether the target supports whole-vector shifts for vectors of mode
@@ -3731,13 +3730,13 @@ have_whole_vector_shift (machine_mode mode)
return true;
unsigned int i, nelt = GET_MODE_NUNITS (mode);
- auto_vec_perm_indices sel (nelt);
-
+ vec_perm_builder sel;
+ vec_perm_indices indices;
for (i = nelt/2; i >= 1; i/=2)
{
- sel.truncate (0);
calc_vec_perm_mask_for_shift (i, nelt, &sel);
- if (!can_vec_perm_const_p (mode, sel, false))
+ indices.new_vector (sel, 2, nelt);
+ if (!can_vec_perm_const_p (mode, indices, false))
return false;
}
return true;
@@ -5055,7 +5054,8 @@ vect_create_epilog_for_reduction (vec<tree> vect_defs, gimple *stmt,
if (reduce_with_shift && !slp_reduc)
{
int nelements = vec_size_in_bits / element_bitsize;
- auto_vec_perm_indices sel (nelements);
+ vec_perm_builder sel;
+ vec_perm_indices indices;
int elt_offset;
@@ -5079,9 +5079,9 @@ vect_create_epilog_for_reduction (vec<tree> vect_defs, gimple *stmt,
elt_offset >= 1;
elt_offset /= 2)
{
- sel.truncate (0);
calc_vec_perm_mask_for_shift (elt_offset, nelements, &sel);
- tree mask = vect_gen_perm_mask_any (vectype, sel);
+ indices.new_vector (sel, 2, nelements);
+ tree mask = vect_gen_perm_mask_any (vectype, indices);
epilog_stmt = gimple_build_assign (vec_dest, VEC_PERM_EXPR,
new_temp, zero_vec, mask);
new_name = make_ssa_name (vec_dest, epilog_stmt);