aboutsummaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
authorRichard Sandiford <richard.sandiford@linaro.org>2018-01-03 21:42:12 +0000
committerRichard Sandiford <rsandifo@gcc.gnu.org>2018-01-03 21:42:12 +0000
commit928686b1c6d0a8b791ec08b2588a5fb8118d191c (patch)
tree88cbf9a40cb8344238ea471bb4c046691b5a1e40 /gcc
parentedab8e10e3cc127335ae4fa7a5935a28acfae0a9 (diff)
downloadgcc-928686b1c6d0a8b791ec08b2588a5fb8118d191c.zip
gcc-928686b1c6d0a8b791ec08b2588a5fb8118d191c.tar.gz
gcc-928686b1c6d0a8b791ec08b2588a5fb8118d191c.tar.bz2
poly_int: TYPE_VECTOR_SUBPARTS
This patch changes TYPE_VECTOR_SUBPARTS to a poly_uint64. The value is encoded in the 10-bit precision field and was previously always stored as a simple log2 value. The challenge was to use this 10 bits to encode the number of elements in variable-length vectors, so that we didn't need to increase the size of the tree. In practice the number of vector elements should always have the form N + N * X (where X is the runtime value), and as for constant-length vectors, N must be a power of 2 (even though X itself might not be). The patch therefore uses the low 8 bits to encode log2(N) and bit 8 to select between constant-length and variable-length vectors. Targets without variable-length vectors continue to use the old scheme. A new valid_vector_subparts_p function tests whether a given number of elements can be encoded. This is false for the vector modes that represent an LD3 or ST3 vector triple (which we want to treat as arrays of vectors rather than single vectors). Most of the patch is mechanical; previous patches handled the changes that weren't entirely straightforward. 2018-01-03 Richard Sandiford <richard.sandiford@linaro.org> Alan Hayward <alan.hayward@arm.com> David Sherwood <david.sherwood@arm.com> gcc/ * tree.h (TYPE_VECTOR_SUBPARTS): Turn into a function and handle polynomial numbers of units. (SET_TYPE_VECTOR_SUBPARTS): Likewise. (valid_vector_subparts_p): New function. (build_vector_type): Remove temporary shim and take the number of units as a poly_uint64 rather than an int. (build_opaque_vector_type): Take the number of units as a poly_uint64 rather than an int. * tree.c (build_vector_from_ctor): Handle polynomial TYPE_VECTOR_SUBPARTS. (type_hash_canon_hash, type_cache_hasher::equal): Likewise. (uniform_vector_p, vector_type_mode, build_vector): Likewise. (build_vector_from_val): If the number of units is variable, use build_vec_duplicate_cst for constant operands and VEC_DUPLICATE_EXPR otherwise. (make_vector_type): Remove temporary is_constant (). (build_vector_type, build_opaque_vector_type): Take the number of units as a poly_uint64 rather than an int. (check_vector_cst): Handle polynomial TYPE_VECTOR_SUBPARTS and VECTOR_CST_NELTS. * cfgexpand.c (expand_debug_expr): Likewise. * expr.c (count_type_elements, categorize_ctor_elements_1): Likewise. (store_constructor, expand_expr_real_1): Likewise. (const_scalar_mask_from_tree): Likewise. * fold-const-call.c (fold_const_reduction): Likewise. * fold-const.c (const_binop, const_unop, fold_convert_const): Likewise. (operand_equal_p, fold_vec_perm, fold_ternary_loc): Likewise. (native_encode_vector, vec_cst_ctor_to_array): Likewise. (fold_relational_const): Likewise. (native_interpret_vector): Likewise. Change the size from an int to an unsigned int. * gimple-fold.c (gimple_fold_stmt_to_constant_1): Handle polynomial TYPE_VECTOR_SUBPARTS. (gimple_fold_indirect_ref, gimple_build_vector): Likewise. (gimple_build_vector_from_val): Use VEC_DUPLICATE_EXPR when duplicating a non-constant operand into a variable-length vector. * hsa-brig.c (hsa_op_immed::emit_to_buffer): Handle polynomial TYPE_VECTOR_SUBPARTS and VECTOR_CST_NELTS. * ipa-icf.c (sem_variable::equals): Likewise. * match.pd: Likewise. * omp-simd-clone.c (simd_clone_subparts): Likewise. * print-tree.c (print_node): Likewise. * stor-layout.c (layout_type): Likewise. * targhooks.c (default_builtin_vectorization_cost): Likewise. * tree-cfg.c (verify_gimple_comparison): Likewise. (verify_gimple_assign_binary): Likewise. (verify_gimple_assign_ternary): Likewise. (verify_gimple_assign_single): Likewise. * tree-pretty-print.c (dump_generic_node): Likewise. * tree-ssa-forwprop.c (simplify_vector_constructor): Likewise. (simplify_bitfield_ref, is_combined_permutation_identity): Likewise. * tree-vect-data-refs.c (vect_permute_store_chain): Likewise. (vect_grouped_load_supported, vect_permute_load_chain): Likewise. (vect_shift_permute_load_chain): Likewise. * tree-vect-generic.c (nunits_for_known_piecewise_op): Likewise. (expand_vector_condition, optimize_vector_constructor): Likewise. (lower_vec_perm, get_compute_type): Likewise. * tree-vect-loop.c (vect_determine_vectorization_factor): Likewise. (get_initial_defs_for_reduction, vect_transform_loop): Likewise. * tree-vect-patterns.c (vect_recog_bool_pattern): Likewise. (vect_recog_mask_conversion_pattern): Likewise. * tree-vect-slp.c (vect_supported_load_permutation_p): Likewise. (vect_get_constant_vectors, vect_transform_slp_perm_load): Likewise. * tree-vect-stmts.c (perm_mask_for_reverse): Likewise. (get_group_load_store_type, vectorizable_mask_load_store): Likewise. (vectorizable_bswap, simd_clone_subparts, vectorizable_assignment) (vectorizable_shift, vectorizable_operation, vectorizable_store) (vectorizable_load, vect_is_simple_cond, vectorizable_comparison) (supportable_widening_operation): Likewise. (supportable_narrowing_operation): Likewise. * tree-vector-builder.c (tree_vector_builder::binary_encoded_nelts): Likewise. * varasm.c (output_constant): Likewise. gcc/ada/ * gcc-interface/utils.c (gnat_types_compatible_p): Handle polynomial TYPE_VECTOR_SUBPARTS. gcc/brig/ * brigfrontend/brig-to-generic.cc (get_unsigned_int_type): Handle polynomial TYPE_VECTOR_SUBPARTS. * brigfrontend/brig-util.h (gccbrig_type_vector_subparts): Likewise. gcc/c-family/ * c-common.c (vector_types_convertible_p, c_build_vec_perm_expr) (convert_vector_to_array_for_subscript): Handle polynomial TYPE_VECTOR_SUBPARTS. (c_common_type_for_mode): Check valid_vector_subparts_p. * c-pretty-print.c (pp_c_initializer_list): Handle polynomial VECTOR_CST_NELTS. gcc/c/ * c-typeck.c (comptypes_internal, build_binary_op): Handle polynomial TYPE_VECTOR_SUBPARTS. gcc/cp/ * constexpr.c (cxx_eval_array_reference): Handle polynomial VECTOR_CST_NELTS. (cxx_fold_indirect_ref): Handle polynomial TYPE_VECTOR_SUBPARTS. * call.c (build_conditional_expr_1): Likewise. * decl.c (cp_finish_decomp): Likewise. * mangle.c (write_type): Likewise. * typeck.c (structural_comptypes): Likewise. (cp_build_binary_op): Likewise. * typeck2.c (process_init_constructor_array): Likewise. gcc/fortran/ * trans-types.c (gfc_type_for_mode): Check valid_vector_subparts_p. gcc/lto/ * lto-lang.c (lto_type_for_mode): Check valid_vector_subparts_p. * lto.c (hash_canonical_type): Handle polynomial TYPE_VECTOR_SUBPARTS. gcc/go/ * go-lang.c (go_langhook_type_for_mode): Check valid_vector_subparts_p. Co-Authored-By: Alan Hayward <alan.hayward@arm.com> Co-Authored-By: David Sherwood <david.sherwood@arm.com> From-SVN: r256197
Diffstat (limited to 'gcc')
-rw-r--r--gcc/ChangeLog78
-rw-r--r--gcc/ada/ChangeLog7
-rw-r--r--gcc/ada/gcc-interface/utils.c2
-rw-r--r--gcc/brig/ChangeLog8
-rw-r--r--gcc/brig/brigfrontend/brig-to-generic.cc2
-rw-r--r--gcc/brig/brigfrontend/brig-util.h2
-rw-r--r--gcc/c-family/ChangeLog11
-rw-r--r--gcc/c-family/c-common.c20
-rw-r--r--gcc/c-family/c-pretty-print.c5
-rw-r--r--gcc/c/ChangeLog7
-rw-r--r--gcc/c/c-typeck.c14
-rw-r--r--gcc/cfgexpand.c17
-rw-r--r--gcc/cp/ChangeLog14
-rw-r--r--gcc/cp/call.c4
-rw-r--r--gcc/cp/constexpr.c6
-rw-r--r--gcc/cp/decl.c6
-rw-r--r--gcc/cp/mangle.c3
-rw-r--r--gcc/cp/typeck.c19
-rw-r--r--gcc/cp/typeck2.c2
-rw-r--r--gcc/expr.c43
-rw-r--r--gcc/fold-const-call.c7
-rw-r--r--gcc/fold-const.c90
-rw-r--r--gcc/fortran/ChangeLog6
-rw-r--r--gcc/fortran/trans-types.c3
-rw-r--r--gcc/gimple-fold.c16
-rw-r--r--gcc/go/ChangeLog6
-rw-r--r--gcc/go/go-lang.c3
-rw-r--r--gcc/hsa-brig.c3
-rw-r--r--gcc/ipa-icf.c4
-rw-r--r--gcc/lto/ChangeLog7
-rw-r--r--gcc/lto/lto-lang.c3
-rw-r--r--gcc/lto/lto.c2
-rw-r--r--gcc/match.pd12
-rw-r--r--gcc/omp-simd-clone.c2
-rw-r--r--gcc/print-tree.c5
-rw-r--r--gcc/stor-layout.c4
-rw-r--r--gcc/targhooks.c2
-rw-r--r--gcc/tree-cfg.c32
-rw-r--r--gcc/tree-pretty-print.c7
-rw-r--r--gcc/tree-ssa-forwprop.c21
-rw-r--r--gcc/tree-vect-data-refs.c18
-rw-r--r--gcc/tree-vect-generic.c31
-rw-r--r--gcc/tree-vect-loop.c27
-rw-r--r--gcc/tree-vect-patterns.c14
-rw-r--r--gcc/tree-vect-slp.c31
-rw-r--r--gcc/tree-vect-stmts.c103
-rw-r--r--gcc/tree-vector-builder.c9
-rw-r--r--gcc/tree.c51
-rw-r--r--gcc/tree.h77
-rw-r--r--gcc/varasm.c4
50 files changed, 582 insertions, 288 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index f17884d..75099e3 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -2,6 +2,84 @@
Alan Hayward <alan.hayward@arm.com>
David Sherwood <david.sherwood@arm.com>
+ * tree.h (TYPE_VECTOR_SUBPARTS): Turn into a function and handle
+ polynomial numbers of units.
+ (SET_TYPE_VECTOR_SUBPARTS): Likewise.
+ (valid_vector_subparts_p): New function.
+ (build_vector_type): Remove temporary shim and take the number
+ of units as a poly_uint64 rather than an int.
+ (build_opaque_vector_type): Take the number of units as a
+ poly_uint64 rather than an int.
+ * tree.c (build_vector_from_ctor): Handle polynomial
+ TYPE_VECTOR_SUBPARTS.
+ (type_hash_canon_hash, type_cache_hasher::equal): Likewise.
+ (uniform_vector_p, vector_type_mode, build_vector): Likewise.
+ (build_vector_from_val): If the number of units is variable,
+ use build_vec_duplicate_cst for constant operands and
+ VEC_DUPLICATE_EXPR otherwise.
+ (make_vector_type): Remove temporary is_constant ().
+ (build_vector_type, build_opaque_vector_type): Take the number of
+ units as a poly_uint64 rather than an int.
+ (check_vector_cst): Handle polynomial TYPE_VECTOR_SUBPARTS and
+ VECTOR_CST_NELTS.
+ * cfgexpand.c (expand_debug_expr): Likewise.
+ * expr.c (count_type_elements, categorize_ctor_elements_1): Likewise.
+ (store_constructor, expand_expr_real_1): Likewise.
+ (const_scalar_mask_from_tree): Likewise.
+ * fold-const-call.c (fold_const_reduction): Likewise.
+ * fold-const.c (const_binop, const_unop, fold_convert_const): Likewise.
+ (operand_equal_p, fold_vec_perm, fold_ternary_loc): Likewise.
+ (native_encode_vector, vec_cst_ctor_to_array): Likewise.
+ (fold_relational_const): Likewise.
+ (native_interpret_vector): Likewise. Change the size from an
+ int to an unsigned int.
+ * gimple-fold.c (gimple_fold_stmt_to_constant_1): Handle polynomial
+ TYPE_VECTOR_SUBPARTS.
+ (gimple_fold_indirect_ref, gimple_build_vector): Likewise.
+ (gimple_build_vector_from_val): Use VEC_DUPLICATE_EXPR when
+ duplicating a non-constant operand into a variable-length vector.
+ * hsa-brig.c (hsa_op_immed::emit_to_buffer): Handle polynomial
+ TYPE_VECTOR_SUBPARTS and VECTOR_CST_NELTS.
+ * ipa-icf.c (sem_variable::equals): Likewise.
+ * match.pd: Likewise.
+ * omp-simd-clone.c (simd_clone_subparts): Likewise.
+ * print-tree.c (print_node): Likewise.
+ * stor-layout.c (layout_type): Likewise.
+ * targhooks.c (default_builtin_vectorization_cost): Likewise.
+ * tree-cfg.c (verify_gimple_comparison): Likewise.
+ (verify_gimple_assign_binary): Likewise.
+ (verify_gimple_assign_ternary): Likewise.
+ (verify_gimple_assign_single): Likewise.
+ * tree-pretty-print.c (dump_generic_node): Likewise.
+ * tree-ssa-forwprop.c (simplify_vector_constructor): Likewise.
+ (simplify_bitfield_ref, is_combined_permutation_identity): Likewise.
+ * tree-vect-data-refs.c (vect_permute_store_chain): Likewise.
+ (vect_grouped_load_supported, vect_permute_load_chain): Likewise.
+ (vect_shift_permute_load_chain): Likewise.
+ * tree-vect-generic.c (nunits_for_known_piecewise_op): Likewise.
+ (expand_vector_condition, optimize_vector_constructor): Likewise.
+ (lower_vec_perm, get_compute_type): Likewise.
+ * tree-vect-loop.c (vect_determine_vectorization_factor): Likewise.
+ (get_initial_defs_for_reduction, vect_transform_loop): Likewise.
+ * tree-vect-patterns.c (vect_recog_bool_pattern): Likewise.
+ (vect_recog_mask_conversion_pattern): Likewise.
+ * tree-vect-slp.c (vect_supported_load_permutation_p): Likewise.
+ (vect_get_constant_vectors, vect_transform_slp_perm_load): Likewise.
+ * tree-vect-stmts.c (perm_mask_for_reverse): Likewise.
+ (get_group_load_store_type, vectorizable_mask_load_store): Likewise.
+ (vectorizable_bswap, simd_clone_subparts, vectorizable_assignment)
+ (vectorizable_shift, vectorizable_operation, vectorizable_store)
+ (vectorizable_load, vect_is_simple_cond, vectorizable_comparison)
+ (supportable_widening_operation): Likewise.
+ (supportable_narrowing_operation): Likewise.
+ * tree-vector-builder.c (tree_vector_builder::binary_encoded_nelts):
+ Likewise.
+ * varasm.c (output_constant): Likewise.
+
+2018-01-03 Richard Sandiford <richard.sandiford@linaro.org>
+ Alan Hayward <alan.hayward@arm.com>
+ David Sherwood <david.sherwood@arm.com>
+
* tree-vect-data-refs.c (vect_permute_store_chain): Reorganize
so that both the length == 3 and length != 3 cases set up their
own permute vectors. Add comments explaining why we know the
diff --git a/gcc/ada/ChangeLog b/gcc/ada/ChangeLog
index a4f1af0..aeb2115 100644
--- a/gcc/ada/ChangeLog
+++ b/gcc/ada/ChangeLog
@@ -2,6 +2,13 @@
Alan Hayward <alan.hayward@arm.com>
David Sherwood <david.sherwood@arm.com>
+ * gcc-interface/utils.c (gnat_types_compatible_p): Handle
+ polynomial TYPE_VECTOR_SUBPARTS.
+
+2018-01-03 Richard Sandiford <richard.sandiford@linaro.org>
+ Alan Hayward <alan.hayward@arm.com>
+ David Sherwood <david.sherwood@arm.com>
+
* gcc-interface/misc.c (enumerate_modes): Handle polynomial
GET_MODE_NUNITS.
diff --git a/gcc/ada/gcc-interface/utils.c b/gcc/ada/gcc-interface/utils.c
index a094ac7..5d03585 100644
--- a/gcc/ada/gcc-interface/utils.c
+++ b/gcc/ada/gcc-interface/utils.c
@@ -3561,7 +3561,7 @@ gnat_types_compatible_p (tree t1, tree t2)
/* Vector types are also compatible if they have the same number of subparts
and the same form of (scalar) element type. */
if (code == VECTOR_TYPE
- && TYPE_VECTOR_SUBPARTS (t1) == TYPE_VECTOR_SUBPARTS (t2)
+ && known_eq (TYPE_VECTOR_SUBPARTS (t1), TYPE_VECTOR_SUBPARTS (t2))
&& TREE_CODE (TREE_TYPE (t1)) == TREE_CODE (TREE_TYPE (t2))
&& TYPE_PRECISION (TREE_TYPE (t1)) == TYPE_PRECISION (TREE_TYPE (t2)))
return 1;
diff --git a/gcc/brig/ChangeLog b/gcc/brig/ChangeLog
index a0370d8..7805b99 100644
--- a/gcc/brig/ChangeLog
+++ b/gcc/brig/ChangeLog
@@ -1,3 +1,11 @@
+2018-01-03 Richard Sandiford <richard.sandiford@linaro.org>
+ Alan Hayward <alan.hayward@arm.com>
+ David Sherwood <david.sherwood@arm.com>
+
+ * brigfrontend/brig-to-generic.cc (get_unsigned_int_type): Handle
+ polynomial TYPE_VECTOR_SUBPARTS.
+ * brigfrontend/brig-util.h (gccbrig_type_vector_subparts): Likewise.
+
2018-01-03 Jakub Jelinek <jakub@redhat.com>
Update copyright years.
diff --git a/gcc/brig/brigfrontend/brig-to-generic.cc b/gcc/brig/brigfrontend/brig-to-generic.cc
index 805e4d6..f644db8 100644
--- a/gcc/brig/brigfrontend/brig-to-generic.cc
+++ b/gcc/brig/brigfrontend/brig-to-generic.cc
@@ -913,7 +913,7 @@ get_unsigned_int_type (tree original_type)
{
size_t esize
= int_size_in_bytes (TREE_TYPE (original_type)) * BITS_PER_UNIT;
- size_t ecount = TYPE_VECTOR_SUBPARTS (original_type);
+ poly_uint64 ecount = TYPE_VECTOR_SUBPARTS (original_type);
return build_vector_type (build_nonstandard_integer_type (esize, true),
ecount);
}
diff --git a/gcc/brig/brigfrontend/brig-util.h b/gcc/brig/brigfrontend/brig-util.h
index 4470181..6de0fa5 100644
--- a/gcc/brig/brigfrontend/brig-util.h
+++ b/gcc/brig/brigfrontend/brig-util.h
@@ -112,7 +112,7 @@ void gccbrig_print_reg_use_info (FILE *dump, const regs_use_index &info);
inline unsigned HOST_WIDE_INT
gccbrig_type_vector_subparts (const_tree type)
{
- return TYPE_VECTOR_SUBPARTS (type);
+ return TYPE_VECTOR_SUBPARTS (type).to_constant ();
}
#endif
diff --git a/gcc/c-family/ChangeLog b/gcc/c-family/ChangeLog
index 1730b44..ab8f517 100644
--- a/gcc/c-family/ChangeLog
+++ b/gcc/c-family/ChangeLog
@@ -1,3 +1,14 @@
+2018-01-03 Richard Sandiford <richard.sandiford@linaro.org>
+ Alan Hayward <alan.hayward@arm.com>
+ David Sherwood <david.sherwood@arm.com>
+
+ * c-common.c (vector_types_convertible_p, c_build_vec_perm_expr)
+ (convert_vector_to_array_for_subscript): Handle polynomial
+ TYPE_VECTOR_SUBPARTS.
+ (c_common_type_for_mode): Check valid_vector_subparts_p.
+ * c-pretty-print.c (pp_c_initializer_list): Handle polynomial
+ VECTOR_CST_NELTS.
+
2018-01-03 Jakub Jelinek <jakub@redhat.com>
Update copyright years.
diff --git a/gcc/c-family/c-common.c b/gcc/c-family/c-common.c
index 6a1bb13..cebd1b8 100644
--- a/gcc/c-family/c-common.c
+++ b/gcc/c-family/c-common.c
@@ -940,15 +940,16 @@ vector_types_convertible_p (const_tree t1, const_tree t2, bool emit_lax_note)
convertible_lax =
(tree_int_cst_equal (TYPE_SIZE (t1), TYPE_SIZE (t2))
- && (TREE_CODE (TREE_TYPE (t1)) != REAL_TYPE ||
- TYPE_VECTOR_SUBPARTS (t1) == TYPE_VECTOR_SUBPARTS (t2))
+ && (TREE_CODE (TREE_TYPE (t1)) != REAL_TYPE
+ || known_eq (TYPE_VECTOR_SUBPARTS (t1),
+ TYPE_VECTOR_SUBPARTS (t2)))
&& (INTEGRAL_TYPE_P (TREE_TYPE (t1))
== INTEGRAL_TYPE_P (TREE_TYPE (t2))));
if (!convertible_lax || flag_lax_vector_conversions)
return convertible_lax;
- if (TYPE_VECTOR_SUBPARTS (t1) == TYPE_VECTOR_SUBPARTS (t2)
+ if (known_eq (TYPE_VECTOR_SUBPARTS (t1), TYPE_VECTOR_SUBPARTS (t2))
&& lang_hooks.types_compatible_p (TREE_TYPE (t1), TREE_TYPE (t2)))
return true;
@@ -1016,10 +1017,10 @@ c_build_vec_perm_expr (location_t loc, tree v0, tree v1, tree mask,
return error_mark_node;
}
- if (TYPE_VECTOR_SUBPARTS (TREE_TYPE (v0))
- != TYPE_VECTOR_SUBPARTS (TREE_TYPE (mask))
- && TYPE_VECTOR_SUBPARTS (TREE_TYPE (v1))
- != TYPE_VECTOR_SUBPARTS (TREE_TYPE (mask)))
+ if (maybe_ne (TYPE_VECTOR_SUBPARTS (TREE_TYPE (v0)),
+ TYPE_VECTOR_SUBPARTS (TREE_TYPE (mask)))
+ && maybe_ne (TYPE_VECTOR_SUBPARTS (TREE_TYPE (v1)),
+ TYPE_VECTOR_SUBPARTS (TREE_TYPE (mask))))
{
if (complain)
error_at (loc, "__builtin_shuffle number of elements of the "
@@ -2278,7 +2279,8 @@ c_common_type_for_mode (machine_mode mode, int unsignedp)
if (inner_type != NULL_TREE)
return build_complex_type (inner_type);
}
- else if (VECTOR_MODE_P (mode))
+ else if (VECTOR_MODE_P (mode)
+ && valid_vector_subparts_p (GET_MODE_NUNITS (mode)))
{
machine_mode inner_mode = GET_MODE_INNER (mode);
tree inner_type = c_common_type_for_mode (inner_mode, unsignedp);
@@ -7660,7 +7662,7 @@ convert_vector_to_array_for_subscript (location_t loc,
if (TREE_CODE (index) == INTEGER_CST)
if (!tree_fits_uhwi_p (index)
- || tree_to_uhwi (index) >= TYPE_VECTOR_SUBPARTS (type))
+ || maybe_ge (tree_to_uhwi (index), TYPE_VECTOR_SUBPARTS (type)))
warning_at (loc, OPT_Warray_bounds, "index value is out of bound");
/* We are building an ARRAY_REF so mark the vector as addressable
diff --git a/gcc/c-family/c-pretty-print.c b/gcc/c-family/c-pretty-print.c
index 1c95943..6e4f85c 100644
--- a/gcc/c-family/c-pretty-print.c
+++ b/gcc/c-family/c-pretty-print.c
@@ -1379,8 +1379,9 @@ pp_c_initializer_list (c_pretty_printer *pp, tree e)
case VECTOR_TYPE:
if (TREE_CODE (e) == VECTOR_CST)
{
- unsigned i;
- for (i = 0; i < VECTOR_CST_NELTS (e); ++i)
+ /* We don't create variable-length VECTOR_CSTs. */
+ unsigned int nunits = VECTOR_CST_NELTS (e).to_constant ();
+ for (unsigned int i = 0; i < nunits; ++i)
{
if (i > 0)
pp_separate_with (pp, ',');
diff --git a/gcc/c/ChangeLog b/gcc/c/ChangeLog
index 17f6502..d15073e 100644
--- a/gcc/c/ChangeLog
+++ b/gcc/c/ChangeLog
@@ -1,3 +1,10 @@
+2018-01-03 Richard Sandiford <richard.sandiford@linaro.org>
+ Alan Hayward <alan.hayward@arm.com>
+ David Sherwood <david.sherwood@arm.com>
+
+ * c-typeck.c (comptypes_internal, build_binary_op): Handle polynomial
+ TYPE_VECTOR_SUBPARTS.
+
2018-01-03 Jakub Jelinek <jakub@redhat.com>
Update copyright years.
diff --git a/gcc/c/c-typeck.c b/gcc/c/c-typeck.c
index d281cc6..33b4364 100644
--- a/gcc/c/c-typeck.c
+++ b/gcc/c/c-typeck.c
@@ -1237,7 +1237,7 @@ comptypes_internal (const_tree type1, const_tree type2, bool *enum_and_int_p,
break;
case VECTOR_TYPE:
- val = (TYPE_VECTOR_SUBPARTS (t1) == TYPE_VECTOR_SUBPARTS (t2)
+ val = (known_eq (TYPE_VECTOR_SUBPARTS (t1), TYPE_VECTOR_SUBPARTS (t2))
&& comptypes_internal (TREE_TYPE (t1), TREE_TYPE (t2),
enum_and_int_p, different_types_p));
break;
@@ -11346,7 +11346,8 @@ build_binary_op (location_t location, enum tree_code code,
if (code0 == VECTOR_TYPE && code1 == VECTOR_TYPE
&& TREE_CODE (TREE_TYPE (type0)) == INTEGER_TYPE
&& TREE_CODE (TREE_TYPE (type1)) == INTEGER_TYPE
- && TYPE_VECTOR_SUBPARTS (type0) == TYPE_VECTOR_SUBPARTS (type1))
+ && known_eq (TYPE_VECTOR_SUBPARTS (type0),
+ TYPE_VECTOR_SUBPARTS (type1)))
{
result_type = type0;
converted = 1;
@@ -11403,7 +11404,8 @@ build_binary_op (location_t location, enum tree_code code,
if (code0 == VECTOR_TYPE && code1 == VECTOR_TYPE
&& TREE_CODE (TREE_TYPE (type0)) == INTEGER_TYPE
&& TREE_CODE (TREE_TYPE (type1)) == INTEGER_TYPE
- && TYPE_VECTOR_SUBPARTS (type0) == TYPE_VECTOR_SUBPARTS (type1))
+ && known_eq (TYPE_VECTOR_SUBPARTS (type0),
+ TYPE_VECTOR_SUBPARTS (type1)))
{
result_type = type0;
converted = 1;
@@ -11477,7 +11479,8 @@ build_binary_op (location_t location, enum tree_code code,
return error_mark_node;
}
- if (TYPE_VECTOR_SUBPARTS (type0) != TYPE_VECTOR_SUBPARTS (type1))
+ if (maybe_ne (TYPE_VECTOR_SUBPARTS (type0),
+ TYPE_VECTOR_SUBPARTS (type1)))
{
error_at (location, "comparing vectors with different "
"number of elements");
@@ -11637,7 +11640,8 @@ build_binary_op (location_t location, enum tree_code code,
return error_mark_node;
}
- if (TYPE_VECTOR_SUBPARTS (type0) != TYPE_VECTOR_SUBPARTS (type1))
+ if (maybe_ne (TYPE_VECTOR_SUBPARTS (type0),
+ TYPE_VECTOR_SUBPARTS (type1)))
{
error_at (location, "comparing vectors with different "
"number of elements");
diff --git a/gcc/cfgexpand.c b/gcc/cfgexpand.c
index 257ddfc..f278eb2 100644
--- a/gcc/cfgexpand.c
+++ b/gcc/cfgexpand.c
@@ -4961,9 +4961,11 @@ expand_debug_expr (tree exp)
case VECTOR_CST:
{
- unsigned i, nelts;
+ unsigned HOST_WIDE_INT i, nelts;
+
+ if (!VECTOR_CST_NELTS (exp).is_constant (&nelts))
+ return NULL;
- nelts = VECTOR_CST_NELTS (exp);
op0 = gen_rtx_CONCATN (mode, rtvec_alloc (nelts));
for (i = 0; i < nelts; ++i)
@@ -4983,10 +4985,13 @@ expand_debug_expr (tree exp)
else if (TREE_CODE (TREE_TYPE (exp)) == VECTOR_TYPE)
{
unsigned i;
+ unsigned HOST_WIDE_INT nelts;
tree val;
- op0 = gen_rtx_CONCATN
- (mode, rtvec_alloc (TYPE_VECTOR_SUBPARTS (TREE_TYPE (exp))));
+ if (!TYPE_VECTOR_SUBPARTS (TREE_TYPE (exp)).is_constant (&nelts))
+ goto flag_unsupported;
+
+ op0 = gen_rtx_CONCATN (mode, rtvec_alloc (nelts));
FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp), i, val)
{
@@ -4996,7 +5001,7 @@ expand_debug_expr (tree exp)
XVECEXP (op0, 0, i) = op1;
}
- if (i < TYPE_VECTOR_SUBPARTS (TREE_TYPE (exp)))
+ if (i < nelts)
{
op1 = expand_debug_expr
(build_zero_cst (TREE_TYPE (TREE_TYPE (exp))));
@@ -5004,7 +5009,7 @@ expand_debug_expr (tree exp)
if (!op1)
return NULL;
- for (; i < TYPE_VECTOR_SUBPARTS (TREE_TYPE (exp)); i++)
+ for (; i < nelts; i++)
XVECEXP (op0, 0, i) = op1;
}
diff --git a/gcc/cp/ChangeLog b/gcc/cp/ChangeLog
index 959f412..73a31df 100644
--- a/gcc/cp/ChangeLog
+++ b/gcc/cp/ChangeLog
@@ -1,3 +1,17 @@
+2018-01-03 Richard Sandiford <richard.sandiford@linaro.org>
+ Alan Hayward <alan.hayward@arm.com>
+ David Sherwood <david.sherwood@arm.com>
+
+ * constexpr.c (cxx_eval_array_reference): Handle polynomial
+ VECTOR_CST_NELTS.
+ (cxx_fold_indirect_ref): Handle polynomial TYPE_VECTOR_SUBPARTS.
+ * call.c (build_conditional_expr_1): Likewise.
+ * decl.c (cp_finish_decomp): Likewise.
+ * mangle.c (write_type): Likewise.
+ * typeck.c (structural_comptypes): Likewise.
+ (cp_build_binary_op): Likewise.
+ * typeck2.c (process_init_constructor_array): Likewise.
+
2018-01-03 Jakub Jelinek <jakub@redhat.com>
PR c++/83555
diff --git a/gcc/cp/call.c b/gcc/cp/call.c
index 0d2fa85..1c54bab 100644
--- a/gcc/cp/call.c
+++ b/gcc/cp/call.c
@@ -4919,8 +4919,8 @@ build_conditional_expr_1 (location_t loc, tree arg1, tree arg2, tree arg3,
}
if (!same_type_p (arg2_type, arg3_type)
- || TYPE_VECTOR_SUBPARTS (arg1_type)
- != TYPE_VECTOR_SUBPARTS (arg2_type)
+ || maybe_ne (TYPE_VECTOR_SUBPARTS (arg1_type),
+ TYPE_VECTOR_SUBPARTS (arg2_type))
|| TYPE_SIZE (arg1_type) != TYPE_SIZE (arg2_type))
{
if (complain & tf_error)
diff --git a/gcc/cp/constexpr.c b/gcc/cp/constexpr.c
index 1aeacd5..c91ca96 100644
--- a/gcc/cp/constexpr.c
+++ b/gcc/cp/constexpr.c
@@ -2338,7 +2338,8 @@ cxx_eval_array_reference (const constexpr_ctx *ctx, tree t,
len = (unsigned) TREE_STRING_LENGTH (ary) / elem_nchars;
}
else if (TREE_CODE (ary) == VECTOR_CST)
- len = VECTOR_CST_NELTS (ary);
+ /* We don't create variable-length VECTOR_CSTs. */
+ len = VECTOR_CST_NELTS (ary).to_constant ();
else
{
/* We can't do anything with other tree codes, so use
@@ -3115,7 +3116,8 @@ cxx_fold_indirect_ref (location_t loc, tree type, tree op0, bool *empty_base)
unsigned HOST_WIDE_INT indexi = offset * BITS_PER_UNIT;
tree index = bitsize_int (indexi);
- if (offset / part_widthi < TYPE_VECTOR_SUBPARTS (op00type))
+ if (known_lt (offset / part_widthi,
+ TYPE_VECTOR_SUBPARTS (op00type)))
return fold_build3_loc (loc,
BIT_FIELD_REF, type, op00,
part_width, index);
diff --git a/gcc/cp/decl.c b/gcc/cp/decl.c
index b1c5096..6ba6578 100644
--- a/gcc/cp/decl.c
+++ b/gcc/cp/decl.c
@@ -7500,7 +7500,11 @@ cp_finish_decomp (tree decl, tree first, unsigned int count)
}
else if (TREE_CODE (type) == VECTOR_TYPE)
{
- eltscnt = TYPE_VECTOR_SUBPARTS (type);
+ if (!TYPE_VECTOR_SUBPARTS (type).is_constant (&eltscnt))
+ {
+ error_at (loc, "cannot decompose variable length vector %qT", type);
+ goto error_out;
+ }
if (count != eltscnt)
goto cnt_mismatch;
eltype = cp_build_qualified_type (TREE_TYPE (type), TYPE_QUALS (type));
diff --git a/gcc/cp/mangle.c b/gcc/cp/mangle.c
index 9c7b659..bd74543 100644
--- a/gcc/cp/mangle.c
+++ b/gcc/cp/mangle.c
@@ -2287,7 +2287,8 @@ write_type (tree type)
write_string ("Dv");
/* Non-constant vector size would be encoded with
_ expression, but we don't support that yet. */
- write_unsigned_number (TYPE_VECTOR_SUBPARTS (type));
+ write_unsigned_number (TYPE_VECTOR_SUBPARTS (type)
+ .to_constant ());
write_char ('_');
}
else
diff --git a/gcc/cp/typeck.c b/gcc/cp/typeck.c
index 76fd930..dc04b4b 100644
--- a/gcc/cp/typeck.c
+++ b/gcc/cp/typeck.c
@@ -1412,7 +1412,7 @@ structural_comptypes (tree t1, tree t2, int strict)
break;
case VECTOR_TYPE:
- if (TYPE_VECTOR_SUBPARTS (t1) != TYPE_VECTOR_SUBPARTS (t2)
+ if (maybe_ne (TYPE_VECTOR_SUBPARTS (t1), TYPE_VECTOR_SUBPARTS (t2))
|| !same_type_p (TREE_TYPE (t1), TREE_TYPE (t2)))
return false;
break;
@@ -4585,9 +4585,10 @@ cp_build_binary_op (location_t location,
converted = 1;
}
else if (code0 == VECTOR_TYPE && code1 == VECTOR_TYPE
- && TREE_CODE (TREE_TYPE (type0)) == INTEGER_TYPE
- && TREE_CODE (TREE_TYPE (type1)) == INTEGER_TYPE
- && TYPE_VECTOR_SUBPARTS (type0) == TYPE_VECTOR_SUBPARTS (type1))
+ && TREE_CODE (TREE_TYPE (type0)) == INTEGER_TYPE
+ && TREE_CODE (TREE_TYPE (type1)) == INTEGER_TYPE
+ && known_eq (TYPE_VECTOR_SUBPARTS (type0),
+ TYPE_VECTOR_SUBPARTS (type1)))
{
result_type = type0;
converted = 1;
@@ -4630,9 +4631,10 @@ cp_build_binary_op (location_t location,
converted = 1;
}
else if (code0 == VECTOR_TYPE && code1 == VECTOR_TYPE
- && TREE_CODE (TREE_TYPE (type0)) == INTEGER_TYPE
- && TREE_CODE (TREE_TYPE (type1)) == INTEGER_TYPE
- && TYPE_VECTOR_SUBPARTS (type0) == TYPE_VECTOR_SUBPARTS (type1))
+ && TREE_CODE (TREE_TYPE (type0)) == INTEGER_TYPE
+ && TREE_CODE (TREE_TYPE (type1)) == INTEGER_TYPE
+ && known_eq (TYPE_VECTOR_SUBPARTS (type0),
+ TYPE_VECTOR_SUBPARTS (type1)))
{
result_type = type0;
converted = 1;
@@ -4997,7 +4999,8 @@ cp_build_binary_op (location_t location,
return error_mark_node;
}
- if (TYPE_VECTOR_SUBPARTS (type0) != TYPE_VECTOR_SUBPARTS (type1))
+ if (maybe_ne (TYPE_VECTOR_SUBPARTS (type0),
+ TYPE_VECTOR_SUBPARTS (type1)))
{
if (complain & tf_error)
{
diff --git a/gcc/cp/typeck2.c b/gcc/cp/typeck2.c
index cc2cab8..8d93325 100644
--- a/gcc/cp/typeck2.c
+++ b/gcc/cp/typeck2.c
@@ -1292,7 +1292,7 @@ process_init_constructor_array (tree type, tree init, int nested,
}
else
/* Vectors are like simple fixed-size arrays. */
- len = TYPE_VECTOR_SUBPARTS (type);
+ unbounded = !TYPE_VECTOR_SUBPARTS (type).is_constant (&len);
/* There must not be more initializers than needed. */
if (!unbounded && vec_safe_length (v) > len)
diff --git a/gcc/expr.c b/gcc/expr.c
index 280fd70..c6a0ff0 100644
--- a/gcc/expr.c
+++ b/gcc/expr.c
@@ -5922,7 +5922,13 @@ count_type_elements (const_tree type, bool for_ctor_p)
return 2;
case VECTOR_TYPE:
- return TYPE_VECTOR_SUBPARTS (type);
+ {
+ unsigned HOST_WIDE_INT nelts;
+ if (TYPE_VECTOR_SUBPARTS (type).is_constant (&nelts))
+ return nelts;
+ else
+ return -1;
+ }
case INTEGER_TYPE:
case REAL_TYPE:
@@ -6024,8 +6030,10 @@ categorize_ctor_elements_1 (const_tree ctor, HOST_WIDE_INT *p_nz_elts,
case VECTOR_CST:
{
- unsigned i;
- for (i = 0; i < VECTOR_CST_NELTS (value); ++i)
+ /* We can only construct constant-length vectors using
+ CONSTRUCTOR. */
+ unsigned int nunits = VECTOR_CST_NELTS (value).to_constant ();
+ for (unsigned int i = 0; i < nunits; ++i)
{
tree v = VECTOR_CST_ELT (value, i);
if (!initializer_zerop (v))
@@ -6669,7 +6677,8 @@ store_constructor (tree exp, rtx target, int cleared, poly_int64 size,
HOST_WIDE_INT bitsize;
HOST_WIDE_INT bitpos;
rtvec vector = NULL;
- unsigned n_elts;
+ poly_uint64 n_elts;
+ unsigned HOST_WIDE_INT const_n_elts;
alias_set_type alias;
bool vec_vec_init_p = false;
machine_mode mode = GET_MODE (target);
@@ -6694,7 +6703,9 @@ store_constructor (tree exp, rtx target, int cleared, poly_int64 size,
}
n_elts = TYPE_VECTOR_SUBPARTS (type);
- if (REG_P (target) && VECTOR_MODE_P (mode))
+ if (REG_P (target)
+ && VECTOR_MODE_P (mode)
+ && n_elts.is_constant (&const_n_elts))
{
machine_mode emode = eltmode;
@@ -6703,14 +6714,15 @@ store_constructor (tree exp, rtx target, int cleared, poly_int64 size,
== VECTOR_TYPE))
{
tree etype = TREE_TYPE (CONSTRUCTOR_ELT (exp, 0)->value);
- gcc_assert (CONSTRUCTOR_NELTS (exp) * TYPE_VECTOR_SUBPARTS (etype)
- == n_elts);
+ gcc_assert (known_eq (CONSTRUCTOR_NELTS (exp)
+ * TYPE_VECTOR_SUBPARTS (etype),
+ n_elts));
emode = TYPE_MODE (etype);
}
icode = convert_optab_handler (vec_init_optab, mode, emode);
if (icode != CODE_FOR_nothing)
{
- unsigned int i, n = n_elts;
+ unsigned int i, n = const_n_elts;
if (emode != eltmode)
{
@@ -6749,7 +6761,8 @@ store_constructor (tree exp, rtx target, int cleared, poly_int64 size,
/* Clear the entire vector first if there are any missing elements,
or if the incidence of zero elements is >= 75%. */
- need_to_clear = (count < n_elts || 4 * zero_count >= 3 * count);
+ need_to_clear = (maybe_lt (count, n_elts)
+ || 4 * zero_count >= 3 * count);
}
if (need_to_clear && maybe_gt (size, 0) && !vector)
@@ -10082,9 +10095,10 @@ expand_expr_real_1 (tree exp, rtx target, machine_mode tmode,
if (!tmp)
{
vec<constructor_elt, va_gc> *v;
- unsigned i;
- vec_alloc (v, VECTOR_CST_NELTS (exp));
- for (i = 0; i < VECTOR_CST_NELTS (exp); ++i)
+ /* Constructors need to be fixed-length. FIXME. */
+ unsigned int nunits = VECTOR_CST_NELTS (exp).to_constant ();
+ vec_alloc (v, nunits);
+ for (unsigned int i = 0; i < nunits; ++i)
CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, VECTOR_CST_ELT (exp, i));
tmp = build_constructor (type, v);
}
@@ -11837,9 +11851,10 @@ const_scalar_mask_from_tree (scalar_int_mode mode, tree exp)
{
wide_int res = wi::zero (GET_MODE_PRECISION (mode));
tree elt;
- unsigned i;
- for (i = 0; i < VECTOR_CST_NELTS (exp); ++i)
+ /* The result has a fixed number of bits so the input must too. */
+ unsigned int nunits = VECTOR_CST_NELTS (exp).to_constant ();
+ for (unsigned int i = 0; i < nunits; ++i)
{
elt = VECTOR_CST_ELT (exp, i);
gcc_assert (TREE_CODE (elt) == INTEGER_CST);
diff --git a/gcc/fold-const-call.c b/gcc/fold-const-call.c
index f4d15b6..7e3cd1e 100644
--- a/gcc/fold-const-call.c
+++ b/gcc/fold-const-call.c
@@ -588,12 +588,13 @@ fold_const_builtin_nan (tree type, tree arg, bool quiet)
static tree
fold_const_reduction (tree type, tree arg, tree_code code)
{
- if (TREE_CODE (arg) != VECTOR_CST)
+ unsigned HOST_WIDE_INT nelts;
+ if (TREE_CODE (arg) != VECTOR_CST
+ || !VECTOR_CST_NELTS (arg).is_constant (&nelts))
return NULL_TREE;
tree res = VECTOR_CST_ELT (arg, 0);
- unsigned int nelts = VECTOR_CST_NELTS (arg);
- for (unsigned int i = 1; i < nelts; i++)
+ for (unsigned HOST_WIDE_INT i = 1; i < nelts; i++)
{
res = const_binop (code, type, res, VECTOR_CST_ELT (arg, i));
if (res == NULL_TREE || !CONSTANT_CLASS_P (res))
diff --git a/gcc/fold-const.c b/gcc/fold-const.c
index 9f558e2..eabaa4a 100644
--- a/gcc/fold-const.c
+++ b/gcc/fold-const.c
@@ -1500,8 +1500,8 @@ const_binop (enum tree_code code, tree arg1, tree arg2)
if (TREE_CODE (arg1) == VECTOR_CST
&& TREE_CODE (arg2) == VECTOR_CST
- && (TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg1))
- == TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg2))))
+ && known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg1)),
+ TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg2))))
{
tree type = TREE_TYPE (arg1);
bool step_ok_p;
@@ -1617,16 +1617,18 @@ const_binop (enum tree_code code, tree type, tree arg1, tree arg2)
case VEC_PACK_TRUNC_EXPR:
case VEC_PACK_FIX_TRUNC_EXPR:
{
- unsigned int out_nelts, in_nelts, i;
+ unsigned int HOST_WIDE_INT out_nelts, in_nelts, i;
if (TREE_CODE (arg1) != VECTOR_CST
|| TREE_CODE (arg2) != VECTOR_CST)
return NULL_TREE;
- in_nelts = VECTOR_CST_NELTS (arg1);
+ if (!VECTOR_CST_NELTS (arg1).is_constant (&in_nelts))
+ return NULL_TREE;
+
out_nelts = in_nelts * 2;
- gcc_assert (in_nelts == VECTOR_CST_NELTS (arg2)
- && out_nelts == TYPE_VECTOR_SUBPARTS (type));
+ gcc_assert (known_eq (in_nelts, VECTOR_CST_NELTS (arg2))
+ && known_eq (out_nelts, TYPE_VECTOR_SUBPARTS (type)));
tree_vector_builder elts (type, out_nelts, 1);
for (i = 0; i < out_nelts; i++)
@@ -1650,15 +1652,16 @@ const_binop (enum tree_code code, tree type, tree arg1, tree arg2)
case VEC_WIDEN_MULT_EVEN_EXPR:
case VEC_WIDEN_MULT_ODD_EXPR:
{
- unsigned int out_nelts, in_nelts, out, ofs, scale;
+ unsigned HOST_WIDE_INT out_nelts, in_nelts, out, ofs, scale;
if (TREE_CODE (arg1) != VECTOR_CST || TREE_CODE (arg2) != VECTOR_CST)
return NULL_TREE;
- in_nelts = VECTOR_CST_NELTS (arg1);
+ if (!VECTOR_CST_NELTS (arg1).is_constant (&in_nelts))
+ return NULL_TREE;
out_nelts = in_nelts / 2;
- gcc_assert (in_nelts == VECTOR_CST_NELTS (arg2)
- && out_nelts == TYPE_VECTOR_SUBPARTS (type));
+ gcc_assert (known_eq (in_nelts, VECTOR_CST_NELTS (arg2))
+ && known_eq (out_nelts, TYPE_VECTOR_SUBPARTS (type)));
if (code == VEC_WIDEN_MULT_LO_EXPR)
scale = 0, ofs = BYTES_BIG_ENDIAN ? out_nelts : 0;
@@ -1809,15 +1812,16 @@ const_unop (enum tree_code code, tree type, tree arg0)
case VEC_UNPACK_FLOAT_LO_EXPR:
case VEC_UNPACK_FLOAT_HI_EXPR:
{
- unsigned int out_nelts, in_nelts, i;
+ unsigned HOST_WIDE_INT out_nelts, in_nelts, i;
enum tree_code subcode;
if (TREE_CODE (arg0) != VECTOR_CST)
return NULL_TREE;
- in_nelts = VECTOR_CST_NELTS (arg0);
+ if (!VECTOR_CST_NELTS (arg0).is_constant (&in_nelts))
+ return NULL_TREE;
out_nelts = in_nelts / 2;
- gcc_assert (out_nelts == TYPE_VECTOR_SUBPARTS (type));
+ gcc_assert (known_eq (out_nelts, TYPE_VECTOR_SUBPARTS (type)));
unsigned int offset = 0;
if ((!BYTES_BIG_ENDIAN) ^ (code == VEC_UNPACK_LO_EXPR
@@ -2275,7 +2279,7 @@ fold_convert_const (enum tree_code code, tree type, tree arg1)
else if (TREE_CODE (type) == VECTOR_TYPE)
{
if (TREE_CODE (arg1) == VECTOR_CST
- && TYPE_VECTOR_SUBPARTS (type) == VECTOR_CST_NELTS (arg1))
+ && known_eq (TYPE_VECTOR_SUBPARTS (type), VECTOR_CST_NELTS (arg1)))
{
tree elttype = TREE_TYPE (type);
tree arg1_elttype = TREE_TYPE (TREE_TYPE (arg1));
@@ -3429,8 +3433,8 @@ operand_equal_p (const_tree arg0, const_tree arg1, unsigned int flags)
We only tested element precision and modes to match.
Vectors may be BLKmode and thus also check that the number of
parts match. */
- if (TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg0))
- != TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg1)))
+ if (maybe_ne (TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg0)),
+ TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg1))))
return 0;
vec<constructor_elt, va_gc> *v0 = CONSTRUCTOR_ELTS (arg0);
@@ -7290,12 +7294,13 @@ native_encode_complex (const_tree expr, unsigned char *ptr, int len, int off)
static int
native_encode_vector (const_tree expr, unsigned char *ptr, int len, int off)
{
- unsigned i, count;
+ unsigned HOST_WIDE_INT i, count;
int size, offset;
tree itype, elem;
offset = 0;
- count = VECTOR_CST_NELTS (expr);
+ if (!VECTOR_CST_NELTS (expr).is_constant (&count))
+ return 0;
itype = TREE_TYPE (TREE_TYPE (expr));
size = GET_MODE_SIZE (SCALAR_TYPE_MODE (itype));
for (i = 0; i < count; i++)
@@ -7532,15 +7537,16 @@ native_interpret_complex (tree type, const unsigned char *ptr, int len)
If the buffer cannot be interpreted, return NULL_TREE. */
static tree
-native_interpret_vector (tree type, const unsigned char *ptr, int len)
+native_interpret_vector (tree type, const unsigned char *ptr, unsigned int len)
{
tree etype, elem;
- int i, size, count;
+ unsigned int i, size;
+ unsigned HOST_WIDE_INT count;
etype = TREE_TYPE (type);
size = GET_MODE_SIZE (SCALAR_TYPE_MODE (etype));
- count = TYPE_VECTOR_SUBPARTS (type);
- if (size * count > len)
+ if (!TYPE_VECTOR_SUBPARTS (type).is_constant (&count)
+ || size * count > len)
return NULL_TREE;
tree_vector_builder elements (type, count, 1);
@@ -8894,11 +8900,12 @@ fold_mult_zconjz (location_t loc, tree type, tree expr)
static bool
vec_cst_ctor_to_array (tree arg, unsigned int nelts, tree *elts)
{
- unsigned int i;
+ unsigned HOST_WIDE_INT i, nunits;
- if (TREE_CODE (arg) == VECTOR_CST)
+ if (TREE_CODE (arg) == VECTOR_CST
+ && VECTOR_CST_NELTS (arg).is_constant (&nunits))
{
- for (i = 0; i < VECTOR_CST_NELTS (arg); ++i)
+ for (i = 0; i < nunits; ++i)
elts[i] = VECTOR_CST_ELT (arg, i);
}
else if (TREE_CODE (arg) == CONSTRUCTOR)
@@ -8932,9 +8939,9 @@ fold_vec_perm (tree type, tree arg0, tree arg1, const vec_perm_indices &sel)
if (!sel.length ().is_constant (&nelts))
return NULL_TREE;
- gcc_assert (TYPE_VECTOR_SUBPARTS (type) == nelts
- && TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg0)) == nelts
- && TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg1)) == nelts);
+ gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (type), nelts)
+ && known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg0)), nelts)
+ && known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg1)), nelts));
if (TREE_TYPE (TREE_TYPE (arg0)) != TREE_TYPE (type)
|| TREE_TYPE (TREE_TYPE (arg1)) != TREE_TYPE (type))
return NULL_TREE;
@@ -11371,15 +11378,15 @@ fold_ternary_loc (location_t loc, enum tree_code code, tree type,
}
else if (TREE_CODE (arg0) == VECTOR_CST)
{
+ unsigned HOST_WIDE_INT nelts;
if ((TREE_CODE (arg1) == VECTOR_CST
|| TREE_CODE (arg1) == CONSTRUCTOR)
&& (TREE_CODE (arg2) == VECTOR_CST
- || TREE_CODE (arg2) == CONSTRUCTOR))
+ || TREE_CODE (arg2) == CONSTRUCTOR)
+ && TYPE_VECTOR_SUBPARTS (type).is_constant (&nelts))
{
- unsigned int nelts = VECTOR_CST_NELTS (arg0), i;
- gcc_assert (nelts == TYPE_VECTOR_SUBPARTS (type));
vec_perm_builder sel (nelts, nelts, 1);
- for (i = 0; i < nelts; i++)
+ for (unsigned int i = 0; i < nelts; i++)
{
tree val = VECTOR_CST_ELT (arg0, i);
if (integer_all_onesp (val))
@@ -11644,7 +11651,8 @@ fold_ternary_loc (location_t loc, enum tree_code code, tree type,
if (n != 0
&& (idx % width) == 0
&& (n % width) == 0
- && ((idx + n) / width) <= TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg0)))
+ && known_le ((idx + n) / width,
+ TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg0))))
{
idx = idx / width;
n = n / width;
@@ -11716,7 +11724,7 @@ fold_ternary_loc (location_t loc, enum tree_code code, tree type,
return NULL_TREE;
/* Create a vec_perm_indices for the integer vector. */
- unsigned int nelts = TYPE_VECTOR_SUBPARTS (type);
+ poly_uint64 nelts = TYPE_VECTOR_SUBPARTS (type);
bool single_arg = (op0 == op1);
vec_perm_indices sel (builder, single_arg ? 1 : 2, nelts);
@@ -11803,14 +11811,14 @@ fold_ternary_loc (location_t loc, enum tree_code code, tree type,
if (bitpos % elsize == 0)
{
unsigned k = bitpos / elsize;
+ unsigned HOST_WIDE_INT nelts;
if (operand_equal_p (VECTOR_CST_ELT (arg0, k), arg1, 0))
return arg0;
- else
+ else if (VECTOR_CST_NELTS (arg0).is_constant (&nelts))
{
- unsigned int nelts = VECTOR_CST_NELTS (arg0);
tree_vector_builder elts (type, nelts, 1);
elts.quick_grow (nelts);
- for (unsigned int i = 0; i < nelts; ++i)
+ for (unsigned HOST_WIDE_INT i = 0; i < nelts; ++i)
elts[i] = (i == k ? arg1 : VECTOR_CST_ELT (arg0, i));
return elts.build ();
}
@@ -13937,8 +13945,12 @@ fold_relational_const (enum tree_code code, tree type, tree op0, tree op1)
{
/* Have vector comparison with scalar boolean result. */
gcc_assert ((code == EQ_EXPR || code == NE_EXPR)
- && VECTOR_CST_NELTS (op0) == VECTOR_CST_NELTS (op1));
- for (unsigned i = 0; i < VECTOR_CST_NELTS (op0); i++)
+ && known_eq (VECTOR_CST_NELTS (op0),
+ VECTOR_CST_NELTS (op1)));
+ unsigned HOST_WIDE_INT nunits;
+ if (!VECTOR_CST_NELTS (op0).is_constant (&nunits))
+ return NULL_TREE;
+ for (unsigned i = 0; i < nunits; i++)
{
tree elem0 = VECTOR_CST_ELT (op0, i);
tree elem1 = VECTOR_CST_ELT (op1, i);
diff --git a/gcc/fortran/ChangeLog b/gcc/fortran/ChangeLog
index e4661a8..6188c25 100644
--- a/gcc/fortran/ChangeLog
+++ b/gcc/fortran/ChangeLog
@@ -1,3 +1,9 @@
+2018-01-03 Richard Sandiford <richard.sandiford@linaro.org>
+ Alan Hayward <alan.hayward@arm.com>
+ David Sherwood <david.sherwood@arm.com>
+
+ * trans-types.c (gfc_type_for_mode): Check valid_vector_subparts_p.
+
2018-01-03 Thomas Koenig <tkoenig@gcc.gnu.org>
PR fortran/83664
diff --git a/gcc/fortran/trans-types.c b/gcc/fortran/trans-types.c
index bc32d62..82415be 100644
--- a/gcc/fortran/trans-types.c
+++ b/gcc/fortran/trans-types.c
@@ -3185,7 +3185,8 @@ gfc_type_for_mode (machine_mode mode, int unsignedp)
tree type = gfc_type_for_size (GET_MODE_PRECISION (int_mode), unsignedp);
return type != NULL_TREE && mode == TYPE_MODE (type) ? type : NULL_TREE;
}
- else if (VECTOR_MODE_P (mode))
+ else if (VECTOR_MODE_P (mode)
+ && valid_vector_subparts_p (GET_MODE_NUNITS (mode)))
{
machine_mode inner_mode = GET_MODE_INNER (mode);
tree inner_type = gfc_type_for_mode (inner_mode, unsignedp);
diff --git a/gcc/gimple-fold.c b/gcc/gimple-fold.c
index 66aced6..99f265e 100644
--- a/gcc/gimple-fold.c
+++ b/gcc/gimple-fold.c
@@ -6079,13 +6079,13 @@ gimple_fold_stmt_to_constant_1 (gimple *stmt, tree (*valueize) (tree),
}
else if (TREE_CODE (rhs) == CONSTRUCTOR
&& TREE_CODE (TREE_TYPE (rhs)) == VECTOR_TYPE
- && (CONSTRUCTOR_NELTS (rhs)
- == TYPE_VECTOR_SUBPARTS (TREE_TYPE (rhs))))
+ && known_eq (CONSTRUCTOR_NELTS (rhs),
+ TYPE_VECTOR_SUBPARTS (TREE_TYPE (rhs))))
{
unsigned i, nelts;
tree val;
- nelts = TYPE_VECTOR_SUBPARTS (TREE_TYPE (rhs));
+ nelts = CONSTRUCTOR_NELTS (rhs);
tree_vector_builder vec (TREE_TYPE (rhs), nelts, 1);
FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (rhs), i, val)
{
@@ -6930,8 +6930,8 @@ gimple_fold_indirect_ref (tree t)
= tree_to_shwi (part_width) / BITS_PER_UNIT;
unsigned HOST_WIDE_INT indexi = offset * BITS_PER_UNIT;
tree index = bitsize_int (indexi);
- if (offset / part_widthi
- < TYPE_VECTOR_SUBPARTS (TREE_TYPE (addrtype)))
+ if (known_lt (offset / part_widthi,
+ TYPE_VECTOR_SUBPARTS (TREE_TYPE (addrtype))))
return fold_build3 (BIT_FIELD_REF, type, TREE_OPERAND (addr, 0),
part_width, index);
}
@@ -7233,6 +7233,10 @@ tree
gimple_build_vector_from_val (gimple_seq *seq, location_t loc, tree type,
tree op)
{
+ if (!TYPE_VECTOR_SUBPARTS (type).is_constant ()
+ && !CONSTANT_CLASS_P (op))
+ return gimple_build (seq, loc, VEC_DUPLICATE_EXPR, type, op);
+
tree res, vec = build_vector_from_val (type, op);
if (is_gimple_val (vec))
return vec;
@@ -7265,7 +7269,7 @@ gimple_build_vector (gimple_seq *seq, location_t loc,
if (!TREE_CONSTANT ((*builder)[i]))
{
tree type = builder->type ();
- unsigned int nelts = TYPE_VECTOR_SUBPARTS (type);
+ unsigned int nelts = TYPE_VECTOR_SUBPARTS (type).to_constant ();
vec<constructor_elt, va_gc> *v;
vec_alloc (v, nelts);
for (i = 0; i < nelts; ++i)
diff --git a/gcc/go/ChangeLog b/gcc/go/ChangeLog
index d7fbce1..727254b 100644
--- a/gcc/go/ChangeLog
+++ b/gcc/go/ChangeLog
@@ -1,3 +1,9 @@
+2018-01-03 Richard Sandiford <richard.sandiford@linaro.org>
+ Alan Hayward <alan.hayward@arm.com>
+ David Sherwood <david.sherwood@arm.com>
+
+ * go-lang.c (go_langhook_type_for_mode): Check valid_vector_subparts_p.
+
2018-01-03 Jakub Jelinek <jakub@redhat.com>
Update copyright years.
diff --git a/gcc/go/go-lang.c b/gcc/go/go-lang.c
index 44fdaab..4a05799 100644
--- a/gcc/go/go-lang.c
+++ b/gcc/go/go-lang.c
@@ -377,7 +377,8 @@ go_langhook_type_for_mode (machine_mode mode, int unsignedp)
make sense for the middle-end to ask the frontend for a type
which the frontend does not support. However, at least for now
it is required. See PR 46805. */
- if (VECTOR_MODE_P (mode))
+ if (VECTOR_MODE_P (mode)
+ && valid_vector_subparts_p (GET_MODE_NUNITS (mode)))
{
tree inner;
diff --git a/gcc/hsa-brig.c b/gcc/hsa-brig.c
index 83d3464..d3efff4 100644
--- a/gcc/hsa-brig.c
+++ b/gcc/hsa-brig.c
@@ -963,7 +963,8 @@ hsa_op_immed::emit_to_buffer (unsigned *brig_repr_size)
if (TREE_CODE (m_tree_value) == VECTOR_CST)
{
- int i, num = VECTOR_CST_NELTS (m_tree_value);
+ /* Variable-length vectors aren't supported. */
+ int i, num = VECTOR_CST_NELTS (m_tree_value).to_constant ();
for (i = 0; i < num; i++)
{
tree v = VECTOR_CST_ELT (m_tree_value, i);
diff --git a/gcc/ipa-icf.c b/gcc/ipa-icf.c
index eff2f42..edb0b789 100644
--- a/gcc/ipa-icf.c
+++ b/gcc/ipa-icf.c
@@ -2025,8 +2025,8 @@ sem_variable::equals (tree t1, tree t2)
&TREE_REAL_CST (t2)));
case VECTOR_CST:
{
- if (VECTOR_CST_NELTS (t1) != VECTOR_CST_NELTS (t2))
- return return_false_with_msg ("VECTOR_CST nelts mismatch");
+ if (maybe_ne (VECTOR_CST_NELTS (t1), VECTOR_CST_NELTS (t2)))
+ return return_false_with_msg ("VECTOR_CST nelts mismatch");
unsigned int count
= tree_vector_builder::binary_encoded_nelts (t1, t2);
diff --git a/gcc/lto/ChangeLog b/gcc/lto/ChangeLog
index b9e290a..8240d9f3 100644
--- a/gcc/lto/ChangeLog
+++ b/gcc/lto/ChangeLog
@@ -1,3 +1,10 @@
+2018-01-03 Richard Sandiford <richard.sandiford@linaro.org>
+ Alan Hayward <alan.hayward@arm.com>
+ David Sherwood <david.sherwood@arm.com>
+
+ * lto-lang.c (lto_type_for_mode): Check valid_vector_subparts_p.
+ * lto.c (hash_canonical_type): Handle polynomial TYPE_VECTOR_SUBPARTS.
+
2018-01-03 Jakub Jelinek <jakub@redhat.com>
Update copyright years.
diff --git a/gcc/lto/lto-lang.c b/gcc/lto/lto-lang.c
index c52f943..81a2035 100644
--- a/gcc/lto/lto-lang.c
+++ b/gcc/lto/lto-lang.c
@@ -1012,7 +1012,8 @@ lto_type_for_mode (machine_mode mode, int unsigned_p)
if (inner_type != NULL_TREE)
return build_complex_type (inner_type);
}
- else if (VECTOR_MODE_P (mode))
+ else if (VECTOR_MODE_P (mode)
+ && valid_vector_subparts_p (GET_MODE_NUNITS (mode)))
{
machine_mode inner_mode = GET_MODE_INNER (mode);
tree inner_type = lto_type_for_mode (inner_mode, unsigned_p);
diff --git a/gcc/lto/lto.c b/gcc/lto/lto.c
index 01f2814..a8707d4 100644
--- a/gcc/lto/lto.c
+++ b/gcc/lto/lto.c
@@ -316,7 +316,7 @@ hash_canonical_type (tree type)
if (VECTOR_TYPE_P (type))
{
- hstate.add_int (TYPE_VECTOR_SUBPARTS (type));
+ hstate.add_poly_int (TYPE_VECTOR_SUBPARTS (type));
hstate.add_int (TYPE_UNSIGNED (type));
}
diff --git a/gcc/match.pd b/gcc/match.pd
index f4cd5e0..f189cb1 100644
--- a/gcc/match.pd
+++ b/gcc/match.pd
@@ -83,7 +83,8 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
(match (nop_convert @0)
(view_convert @0)
(if (VECTOR_TYPE_P (type) && VECTOR_TYPE_P (TREE_TYPE (@0))
- && TYPE_VECTOR_SUBPARTS (type) == TYPE_VECTOR_SUBPARTS (TREE_TYPE (@0))
+ && known_eq (TYPE_VECTOR_SUBPARTS (type),
+ TYPE_VECTOR_SUBPARTS (TREE_TYPE (@0)))
&& tree_nop_conversion_p (TREE_TYPE (type), TREE_TYPE (TREE_TYPE (@0))))))
/* This one has to be last, or it shadows the others. */
(match (nop_convert @0)
@@ -2849,7 +2850,8 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
(simplify
(plus:c @3 (view_convert? (vec_cond:s @0 integer_each_onep@1 integer_zerop@2)))
(if (VECTOR_TYPE_P (type)
- && TYPE_VECTOR_SUBPARTS (type) == TYPE_VECTOR_SUBPARTS (TREE_TYPE (@1))
+ && known_eq (TYPE_VECTOR_SUBPARTS (type),
+ TYPE_VECTOR_SUBPARTS (TREE_TYPE (@1)))
&& (TYPE_MODE (TREE_TYPE (type))
== TYPE_MODE (TREE_TYPE (TREE_TYPE (@1)))))
(minus @3 (view_convert (vec_cond @0 (negate @1) @2)))))
@@ -2858,7 +2860,8 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
(simplify
(minus @3 (view_convert? (vec_cond:s @0 integer_each_onep@1 integer_zerop@2)))
(if (VECTOR_TYPE_P (type)
- && TYPE_VECTOR_SUBPARTS (type) == TYPE_VECTOR_SUBPARTS (TREE_TYPE (@1))
+ && known_eq (TYPE_VECTOR_SUBPARTS (type),
+ TYPE_VECTOR_SUBPARTS (TREE_TYPE (@1)))
&& (TYPE_MODE (TREE_TYPE (type))
== TYPE_MODE (TREE_TYPE (TREE_TYPE (@1)))))
(plus @3 (view_convert (vec_cond @0 (negate @1) @2)))))
@@ -4524,7 +4527,8 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
(if (n != 0
&& (idx % width) == 0
&& (n % width) == 0
- && ((idx + n) / width) <= TYPE_VECTOR_SUBPARTS (TREE_TYPE (ctor)))
+ && known_le ((idx + n) / width,
+ TYPE_VECTOR_SUBPARTS (TREE_TYPE (ctor))))
(with
{
idx = idx / width;
diff --git a/gcc/omp-simd-clone.c b/gcc/omp-simd-clone.c
index f654dd9..b7737a2 100644
--- a/gcc/omp-simd-clone.c
+++ b/gcc/omp-simd-clone.c
@@ -57,7 +57,7 @@ along with GCC; see the file COPYING3. If not see
static unsigned HOST_WIDE_INT
simd_clone_subparts (tree vectype)
{
- return TYPE_VECTOR_SUBPARTS (vectype);
+ return TYPE_VECTOR_SUBPARTS (vectype).to_constant ();
}
/* Allocate a fresh `simd_clone' and return it. NARGS is the number
diff --git a/gcc/print-tree.c b/gcc/print-tree.c
index 343d057..cba8bac 100644
--- a/gcc/print-tree.c
+++ b/gcc/print-tree.c
@@ -630,7 +630,10 @@ print_node (FILE *file, const char *prefix, tree node, int indent,
else if (code == ARRAY_TYPE)
print_node (file, "domain", TYPE_DOMAIN (node), indent + 4);
else if (code == VECTOR_TYPE)
- fprintf (file, " nunits:%d", (int) TYPE_VECTOR_SUBPARTS (node));
+ {
+ fprintf (file, " nunits:");
+ print_dec (TYPE_VECTOR_SUBPARTS (node), file);
+ }
else if (code == RECORD_TYPE
|| code == UNION_TYPE
|| code == QUAL_UNION_TYPE)
diff --git a/gcc/stor-layout.c b/gcc/stor-layout.c
index f57fe7a..af3303b 100644
--- a/gcc/stor-layout.c
+++ b/gcc/stor-layout.c
@@ -2271,11 +2271,9 @@ layout_type (tree type)
case VECTOR_TYPE:
{
- int nunits = TYPE_VECTOR_SUBPARTS (type);
+ poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (type);
tree innertype = TREE_TYPE (type);
- gcc_assert (!(nunits & (nunits - 1)));
-
/* Find an appropriate mode for the vector type. */
if (TYPE_MODE (type) == VOIDmode)
SET_TYPE_MODE (type,
diff --git a/gcc/targhooks.c b/gcc/targhooks.c
index c9520ea..f98cdc9 100644
--- a/gcc/targhooks.c
+++ b/gcc/targhooks.c
@@ -713,7 +713,7 @@ default_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
return 3;
case vec_construct:
- return TYPE_VECTOR_SUBPARTS (vectype) - 1;
+ return estimated_poly_value (TYPE_VECTOR_SUBPARTS (vectype)) - 1;
default:
gcc_unreachable ();
diff --git a/gcc/tree-cfg.c b/gcc/tree-cfg.c
index b1c3cad..a0cc1ed 100644
--- a/gcc/tree-cfg.c
+++ b/gcc/tree-cfg.c
@@ -3775,7 +3775,8 @@ verify_gimple_comparison (tree type, tree op0, tree op1, enum tree_code code)
return true;
}
- if (TYPE_VECTOR_SUBPARTS (type) != TYPE_VECTOR_SUBPARTS (op0_type))
+ if (maybe_ne (TYPE_VECTOR_SUBPARTS (type),
+ TYPE_VECTOR_SUBPARTS (op0_type)))
{
error ("invalid vector comparison resulting type");
debug_generic_expr (type);
@@ -4214,8 +4215,8 @@ verify_gimple_assign_binary (gassign *stmt)
if (VECTOR_BOOLEAN_TYPE_P (lhs_type)
&& VECTOR_BOOLEAN_TYPE_P (rhs1_type)
&& types_compatible_p (rhs1_type, rhs2_type)
- && (TYPE_VECTOR_SUBPARTS (lhs_type)
- == 2 * TYPE_VECTOR_SUBPARTS (rhs1_type)))
+ && known_eq (TYPE_VECTOR_SUBPARTS (lhs_type),
+ 2 * TYPE_VECTOR_SUBPARTS (rhs1_type)))
return false;
/* Fallthru. */
@@ -4365,8 +4366,8 @@ verify_gimple_assign_ternary (gassign *stmt)
case VEC_COND_EXPR:
if (!VECTOR_BOOLEAN_TYPE_P (rhs1_type)
- || TYPE_VECTOR_SUBPARTS (rhs1_type)
- != TYPE_VECTOR_SUBPARTS (lhs_type))
+ || maybe_ne (TYPE_VECTOR_SUBPARTS (rhs1_type),
+ TYPE_VECTOR_SUBPARTS (lhs_type)))
{
error ("the first argument of a VEC_COND_EXPR must be of a "
"boolean vector type of the same number of elements "
@@ -4412,11 +4413,12 @@ verify_gimple_assign_ternary (gassign *stmt)
return true;
}
- if (TYPE_VECTOR_SUBPARTS (rhs1_type) != TYPE_VECTOR_SUBPARTS (rhs2_type)
- || TYPE_VECTOR_SUBPARTS (rhs2_type)
- != TYPE_VECTOR_SUBPARTS (rhs3_type)
- || TYPE_VECTOR_SUBPARTS (rhs3_type)
- != TYPE_VECTOR_SUBPARTS (lhs_type))
+ if (maybe_ne (TYPE_VECTOR_SUBPARTS (rhs1_type),
+ TYPE_VECTOR_SUBPARTS (rhs2_type))
+ || maybe_ne (TYPE_VECTOR_SUBPARTS (rhs2_type),
+ TYPE_VECTOR_SUBPARTS (rhs3_type))
+ || maybe_ne (TYPE_VECTOR_SUBPARTS (rhs3_type),
+ TYPE_VECTOR_SUBPARTS (lhs_type)))
{
error ("vectors with different element number found "
"in vector permute expression");
@@ -4699,9 +4701,9 @@ verify_gimple_assign_single (gassign *stmt)
debug_generic_stmt (rhs1);
return true;
}
- else if (CONSTRUCTOR_NELTS (rhs1)
- * TYPE_VECTOR_SUBPARTS (elt_t)
- != TYPE_VECTOR_SUBPARTS (rhs1_type))
+ else if (maybe_ne (CONSTRUCTOR_NELTS (rhs1)
+ * TYPE_VECTOR_SUBPARTS (elt_t),
+ TYPE_VECTOR_SUBPARTS (rhs1_type)))
{
error ("incorrect number of vector CONSTRUCTOR"
" elements");
@@ -4716,8 +4718,8 @@ verify_gimple_assign_single (gassign *stmt)
debug_generic_stmt (rhs1);
return true;
}
- else if (CONSTRUCTOR_NELTS (rhs1)
- > TYPE_VECTOR_SUBPARTS (rhs1_type))
+ else if (maybe_gt (CONSTRUCTOR_NELTS (rhs1),
+ TYPE_VECTOR_SUBPARTS (rhs1_type)))
{
error ("incorrect number of vector CONSTRUCTOR elements");
debug_generic_stmt (rhs1);
diff --git a/gcc/tree-pretty-print.c b/gcc/tree-pretty-print.c
index e4a1f08..c6cd65f 100644
--- a/gcc/tree-pretty-print.c
+++ b/gcc/tree-pretty-print.c
@@ -1793,13 +1793,18 @@ dump_generic_node (pretty_printer *pp, tree node, int spc, dump_flags_t flags,
{
unsigned i;
pp_string (pp, "{ ");
- for (i = 0; i < VECTOR_CST_NELTS (node); ++i)
+ unsigned HOST_WIDE_INT nunits;
+ if (!VECTOR_CST_NELTS (node).is_constant (&nunits))
+ nunits = vector_cst_encoded_nelts (node);
+ for (i = 0; i < nunits; ++i)
{
if (i != 0)
pp_string (pp, ", ");
dump_generic_node (pp, VECTOR_CST_ELT (node, i),
spc, flags, false);
}
+ if (!VECTOR_CST_NELTS (node).is_constant ())
+ pp_string (pp, ", ...");
pp_string (pp, " }");
}
break;
diff --git a/gcc/tree-ssa-forwprop.c b/gcc/tree-ssa-forwprop.c
index a51b86c..8ddef99 100644
--- a/gcc/tree-ssa-forwprop.c
+++ b/gcc/tree-ssa-forwprop.c
@@ -1824,11 +1824,11 @@ simplify_bitfield_ref (gimple_stmt_iterator *gsi)
&& constant_multiple_p (bit_field_offset (op), size, &idx))
{
tree p, m, tem;
- unsigned nelts;
+ unsigned HOST_WIDE_INT nelts;
m = gimple_assign_rhs3 (def_stmt);
- if (TREE_CODE (m) != VECTOR_CST)
+ if (TREE_CODE (m) != VECTOR_CST
+ || !VECTOR_CST_NELTS (m).is_constant (&nelts))
return false;
- nelts = VECTOR_CST_NELTS (m);
idx = TREE_INT_CST_LOW (VECTOR_CST_ELT (m, idx));
idx %= 2 * nelts;
if (idx < nelts)
@@ -1858,7 +1858,7 @@ static int
is_combined_permutation_identity (tree mask1, tree mask2)
{
tree mask;
- unsigned int nelts, i, j;
+ unsigned HOST_WIDE_INT nelts, i, j;
bool maybe_identity1 = true;
bool maybe_identity2 = true;
@@ -1867,7 +1867,8 @@ is_combined_permutation_identity (tree mask1, tree mask2)
mask = fold_ternary (VEC_PERM_EXPR, TREE_TYPE (mask1), mask1, mask1, mask2);
gcc_assert (TREE_CODE (mask) == VECTOR_CST);
- nelts = VECTOR_CST_NELTS (mask);
+ if (!VECTOR_CST_NELTS (mask).is_constant (&nelts))
+ return 0;
for (i = 0; i < nelts; i++)
{
tree val = VECTOR_CST_ELT (mask, i);
@@ -2003,7 +2004,8 @@ simplify_vector_constructor (gimple_stmt_iterator *gsi)
gimple *stmt = gsi_stmt (*gsi);
gimple *def_stmt;
tree op, op2, orig, type, elem_type;
- unsigned elem_size, nelts, i;
+ unsigned elem_size, i;
+ unsigned HOST_WIDE_INT nelts;
enum tree_code code, conv_code;
constructor_elt *elt;
bool maybe_ident;
@@ -2014,7 +2016,8 @@ simplify_vector_constructor (gimple_stmt_iterator *gsi)
type = TREE_TYPE (op);
gcc_checking_assert (TREE_CODE (type) == VECTOR_TYPE);
- nelts = TYPE_VECTOR_SUBPARTS (type);
+ if (!TYPE_VECTOR_SUBPARTS (type).is_constant (&nelts))
+ return false;
elem_type = TREE_TYPE (type);
elem_size = TREE_INT_CST_LOW (TYPE_SIZE (elem_type));
@@ -2086,8 +2089,8 @@ simplify_vector_constructor (gimple_stmt_iterator *gsi)
return false;
if (! VECTOR_TYPE_P (TREE_TYPE (orig))
- || (TYPE_VECTOR_SUBPARTS (type)
- != TYPE_VECTOR_SUBPARTS (TREE_TYPE (orig))))
+ || maybe_ne (TYPE_VECTOR_SUBPARTS (type),
+ TYPE_VECTOR_SUBPARTS (TREE_TYPE (orig))))
return false;
tree tem;
diff --git a/gcc/tree-vect-data-refs.c b/gcc/tree-vect-data-refs.c
index 2fad890..c1005ee 100644
--- a/gcc/tree-vect-data-refs.c
+++ b/gcc/tree-vect-data-refs.c
@@ -4772,7 +4772,7 @@ vect_permute_store_chain (vec<tree> dr_chain,
if (length == 3)
{
/* vect_grouped_store_supported ensures that this is constant. */
- unsigned int nelt = TYPE_VECTOR_SUBPARTS (vectype);
+ unsigned int nelt = TYPE_VECTOR_SUBPARTS (vectype).to_constant ();
unsigned int j0 = 0, j1 = 0, j2 = 0;
vec_perm_builder sel (nelt, nelt, 1);
@@ -4839,7 +4839,7 @@ vect_permute_store_chain (vec<tree> dr_chain,
gcc_assert (pow2p_hwi (length));
/* The encoding has 2 interleaved stepped patterns. */
- unsigned int nelt = TYPE_VECTOR_SUBPARTS (vectype);
+ poly_uint64 nelt = TYPE_VECTOR_SUBPARTS (vectype);
vec_perm_builder sel (nelt, 2, 3);
sel.quick_grow (6);
for (i = 0; i < 3; i++)
@@ -4851,7 +4851,7 @@ vect_permute_store_chain (vec<tree> dr_chain,
perm_mask_high = vect_gen_perm_mask_checked (vectype, indices);
for (i = 0; i < 6; i++)
- sel[i] += nelt / 2;
+ sel[i] += exact_div (nelt, 2);
indices.new_vector (sel, 2, nelt);
perm_mask_low = vect_gen_perm_mask_checked (vectype, indices);
@@ -5174,7 +5174,7 @@ vect_grouped_load_supported (tree vectype, bool single_element_p,
that leaves unused vector loads around punt - we at least create
very sub-optimal code in that case (and blow up memory,
see PR65518). */
- if (single_element_p && count > TYPE_VECTOR_SUBPARTS (vectype))
+ if (single_element_p && maybe_gt (count, TYPE_VECTOR_SUBPARTS (vectype)))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
@@ -5385,7 +5385,7 @@ vect_permute_load_chain (vec<tree> dr_chain,
if (length == 3)
{
/* vect_grouped_load_supported ensures that this is constant. */
- unsigned nelt = TYPE_VECTOR_SUBPARTS (vectype);
+ unsigned nelt = TYPE_VECTOR_SUBPARTS (vectype).to_constant ();
unsigned int k;
vec_perm_builder sel (nelt, nelt, 1);
@@ -5438,7 +5438,7 @@ vect_permute_load_chain (vec<tree> dr_chain,
gcc_assert (pow2p_hwi (length));
/* The encoding has a single stepped pattern. */
- unsigned int nelt = TYPE_VECTOR_SUBPARTS (vectype);
+ poly_uint64 nelt = TYPE_VECTOR_SUBPARTS (vectype);
vec_perm_builder sel (nelt, 1, 3);
sel.quick_grow (3);
for (i = 0; i < 3; ++i)
@@ -5581,12 +5581,12 @@ vect_shift_permute_load_chain (vec<tree> dr_chain,
tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
unsigned int i;
- unsigned nelt = TYPE_VECTOR_SUBPARTS (vectype);
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
- unsigned HOST_WIDE_INT vf;
- if (!LOOP_VINFO_VECT_FACTOR (loop_vinfo).is_constant (&vf))
+ unsigned HOST_WIDE_INT nelt, vf;
+ if (!TYPE_VECTOR_SUBPARTS (vectype).is_constant (&nelt)
+ || !LOOP_VINFO_VECT_FACTOR (loop_vinfo).is_constant (&vf))
/* Not supported for variable-length vectors. */
return false;
diff --git a/gcc/tree-vect-generic.c b/gcc/tree-vect-generic.c
index c0cc656..d7e77b6 100644
--- a/gcc/tree-vect-generic.c
+++ b/gcc/tree-vect-generic.c
@@ -50,7 +50,7 @@ static void expand_vector_operations_1 (gimple_stmt_iterator *);
static unsigned int
nunits_for_known_piecewise_op (const_tree type)
{
- return TYPE_VECTOR_SUBPARTS (type);
+ return TYPE_VECTOR_SUBPARTS (type).to_constant ();
}
/* Return true if TYPE1 has more elements than TYPE2, where either
@@ -917,9 +917,9 @@ expand_vector_condition (gimple_stmt_iterator *gsi)
Similarly for vbfld_10 instead of x_2 < y_3. */
if (VECTOR_BOOLEAN_TYPE_P (type)
&& SCALAR_INT_MODE_P (TYPE_MODE (type))
- && (GET_MODE_BITSIZE (TYPE_MODE (type))
- < (TYPE_VECTOR_SUBPARTS (type)
- * GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (type)))))
+ && known_lt (GET_MODE_BITSIZE (TYPE_MODE (type)),
+ TYPE_VECTOR_SUBPARTS (type)
+ * GET_MODE_BITSIZE (SCALAR_TYPE_MODE (TREE_TYPE (type))))
&& (a_is_comparison
? useless_type_conversion_p (type, TREE_TYPE (a))
: expand_vec_cmp_expr_p (TREE_TYPE (a1), type, TREE_CODE (a))))
@@ -1084,14 +1084,17 @@ optimize_vector_constructor (gimple_stmt_iterator *gsi)
tree lhs = gimple_assign_lhs (stmt);
tree rhs = gimple_assign_rhs1 (stmt);
tree type = TREE_TYPE (rhs);
- unsigned int i, j, nelts = TYPE_VECTOR_SUBPARTS (type);
+ unsigned int i, j;
+ unsigned HOST_WIDE_INT nelts;
bool all_same = true;
constructor_elt *elt;
gimple *g;
tree base = NULL_TREE;
optab op;
- if (nelts <= 2 || CONSTRUCTOR_NELTS (rhs) != nelts)
+ if (!TYPE_VECTOR_SUBPARTS (type).is_constant (&nelts)
+ || nelts <= 2
+ || CONSTRUCTOR_NELTS (rhs) != nelts)
return;
op = optab_for_tree_code (PLUS_EXPR, type, optab_default);
if (op == unknown_optab
@@ -1303,7 +1306,7 @@ lower_vec_perm (gimple_stmt_iterator *gsi)
tree mask_type = TREE_TYPE (mask);
tree vect_elt_type = TREE_TYPE (vect_type);
tree mask_elt_type = TREE_TYPE (mask_type);
- unsigned int elements = TYPE_VECTOR_SUBPARTS (vect_type);
+ unsigned HOST_WIDE_INT elements;
vec<constructor_elt, va_gc> *v;
tree constr, t, si, i_val;
tree vec0tmp = NULL_TREE, vec1tmp = NULL_TREE, masktmp = NULL_TREE;
@@ -1311,6 +1314,9 @@ lower_vec_perm (gimple_stmt_iterator *gsi)
location_t loc = gimple_location (gsi_stmt (*gsi));
unsigned i;
+ if (!TYPE_VECTOR_SUBPARTS (vect_type).is_constant (&elements))
+ return;
+
if (TREE_CODE (mask) == SSA_NAME)
{
gimple *def_stmt = SSA_NAME_DEF_STMT (mask);
@@ -1338,17 +1344,18 @@ lower_vec_perm (gimple_stmt_iterator *gsi)
&& TREE_CODE (vec1) == VECTOR_CST
&& initializer_zerop (vec1)
&& maybe_ne (indices[0], 0)
- && known_lt (indices[0], elements))
+ && known_lt (poly_uint64 (indices[0]), elements))
{
bool ok_p = indices.series_p (0, 1, indices[0], 1);
if (!ok_p)
{
for (i = 1; i < elements; ++i)
{
- poly_int64 expected = i + indices[0];
+ poly_uint64 actual = indices[i];
+ poly_uint64 expected = i + indices[0];
/* Indices into the second vector are all equivalent. */
- if (maybe_lt (indices[i], elements)
- ? maybe_ne (indices[i], expected)
+ if (maybe_lt (actual, elements)
+ ? maybe_ne (actual, expected)
: maybe_lt (expected, elements))
break;
}
@@ -1472,7 +1479,7 @@ get_compute_type (enum tree_code code, optab op, tree type)
= type_for_widest_vector_mode (TREE_TYPE (type), op);
if (vector_compute_type != NULL_TREE
&& subparts_gt (compute_type, vector_compute_type)
- && TYPE_VECTOR_SUBPARTS (vector_compute_type) > 1
+ && maybe_ne (TYPE_VECTOR_SUBPARTS (vector_compute_type), 1U)
&& (optab_handler (op, TYPE_MODE (vector_compute_type))
!= CODE_FOR_nothing))
compute_type = vector_compute_type;
diff --git a/gcc/tree-vect-loop.c b/gcc/tree-vect-loop.c
index 1f15a64..a74992bb 100644
--- a/gcc/tree-vect-loop.c
+++ b/gcc/tree-vect-loop.c
@@ -258,9 +258,11 @@ vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
}
if (dump_enabled_p ())
- dump_printf_loc (MSG_NOTE, vect_location,
- "nunits = " HOST_WIDE_INT_PRINT_DEC "\n",
- TYPE_VECTOR_SUBPARTS (vectype));
+ {
+ dump_printf_loc (MSG_NOTE, vect_location, "nunits = ");
+ dump_dec (MSG_NOTE, TYPE_VECTOR_SUBPARTS (vectype));
+ dump_printf (MSG_NOTE, "\n");
+ }
vect_update_max_nunits (&vectorization_factor, vectype);
}
@@ -551,9 +553,11 @@ vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
}
if (dump_enabled_p ())
- dump_printf_loc (MSG_NOTE, vect_location,
- "nunits = " HOST_WIDE_INT_PRINT_DEC "\n",
- TYPE_VECTOR_SUBPARTS (vf_vectype));
+ {
+ dump_printf_loc (MSG_NOTE, vect_location, "nunits = ");
+ dump_dec (MSG_NOTE, TYPE_VECTOR_SUBPARTS (vf_vectype));
+ dump_printf (MSG_NOTE, "\n");
+ }
vect_update_max_nunits (&vectorization_factor, vf_vectype);
@@ -635,8 +639,8 @@ vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
if (!mask_type)
mask_type = vectype;
- else if (TYPE_VECTOR_SUBPARTS (mask_type)
- != TYPE_VECTOR_SUBPARTS (vectype))
+ else if (maybe_ne (TYPE_VECTOR_SUBPARTS (mask_type),
+ TYPE_VECTOR_SUBPARTS (vectype)))
{
if (dump_enabled_p ())
{
@@ -4156,7 +4160,7 @@ get_initial_defs_for_reduction (slp_tree slp_node,
scalar_type = TREE_TYPE (vector_type);
/* vectorizable_reduction has already rejected SLP reductions on
variable-length vectors. */
- nunits = TYPE_VECTOR_SUBPARTS (vector_type);
+ nunits = TYPE_VECTOR_SUBPARTS (vector_type).to_constant ();
gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def);
@@ -7733,9 +7737,8 @@ vect_transform_loop (loop_vec_info loop_vinfo)
if (STMT_VINFO_VECTYPE (stmt_info))
{
- unsigned int nunits
- = (unsigned int)
- TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info));
+ poly_uint64 nunits
+ = TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info));
if (!STMT_SLP_TYPE (stmt_info)
&& maybe_ne (nunits, vf)
&& dump_enabled_p ())
diff --git a/gcc/tree-vect-patterns.c b/gcc/tree-vect-patterns.c
index 704e5e8..01806e7 100644
--- a/gcc/tree-vect-patterns.c
+++ b/gcc/tree-vect-patterns.c
@@ -3714,8 +3714,9 @@ vect_recog_bool_pattern (vec<gimple *> *stmts, tree *type_in,
vectorized matches the vector type of the result in
size and number of elements. */
unsigned prec
- = wi::udiv_trunc (wi::to_wide (TYPE_SIZE (vectype)),
- TYPE_VECTOR_SUBPARTS (vectype)).to_uhwi ();
+ = vector_element_size (tree_to_poly_uint64 (TYPE_SIZE (vectype)),
+ TYPE_VECTOR_SUBPARTS (vectype));
+
tree type
= build_nonstandard_integer_type (prec,
TYPE_UNSIGNED (TREE_TYPE (var)));
@@ -3898,7 +3899,8 @@ vect_recog_mask_conversion_pattern (vec<gimple *> *stmts, tree *type_in,
vectype2 = get_mask_type_for_scalar_type (rhs1_type);
if (!vectype1 || !vectype2
- || TYPE_VECTOR_SUBPARTS (vectype1) == TYPE_VECTOR_SUBPARTS (vectype2))
+ || known_eq (TYPE_VECTOR_SUBPARTS (vectype1),
+ TYPE_VECTOR_SUBPARTS (vectype2)))
return NULL;
tmp = build_mask_conversion (rhs1, vectype1, stmt_vinfo, vinfo);
@@ -3973,7 +3975,8 @@ vect_recog_mask_conversion_pattern (vec<gimple *> *stmts, tree *type_in,
vectype2 = get_mask_type_for_scalar_type (rhs1_type);
if (!vectype1 || !vectype2
- || TYPE_VECTOR_SUBPARTS (vectype1) == TYPE_VECTOR_SUBPARTS (vectype2))
+ || known_eq (TYPE_VECTOR_SUBPARTS (vectype1),
+ TYPE_VECTOR_SUBPARTS (vectype2)))
return NULL;
/* If rhs1 is invariant and we can promote it leave the COND_EXPR
@@ -3981,7 +3984,8 @@ vect_recog_mask_conversion_pattern (vec<gimple *> *stmts, tree *type_in,
unnecessary promotion stmts and increased vectorization factor. */
if (COMPARISON_CLASS_P (rhs1)
&& INTEGRAL_TYPE_P (rhs1_type)
- && TYPE_VECTOR_SUBPARTS (vectype1) < TYPE_VECTOR_SUBPARTS (vectype2))
+ && known_le (TYPE_VECTOR_SUBPARTS (vectype1),
+ TYPE_VECTOR_SUBPARTS (vectype2)))
{
gimple *dummy;
enum vect_def_type dt;
diff --git a/gcc/tree-vect-slp.c b/gcc/tree-vect-slp.c
index 8a9782b..2563fc0 100644
--- a/gcc/tree-vect-slp.c
+++ b/gcc/tree-vect-slp.c
@@ -1625,15 +1625,16 @@ vect_supported_load_permutation_p (slp_instance slp_instn)
stmt_vec_info group_info
= vinfo_for_stmt (SLP_TREE_SCALAR_STMTS (node)[0]);
group_info = vinfo_for_stmt (GROUP_FIRST_ELEMENT (group_info));
- unsigned nunits
- = TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (group_info));
+ unsigned HOST_WIDE_INT nunits;
unsigned k, maxk = 0;
FOR_EACH_VEC_ELT (SLP_TREE_LOAD_PERMUTATION (node), j, k)
if (k > maxk)
maxk = k;
/* In BB vectorization we may not actually use a loaded vector
accessing elements in excess of GROUP_SIZE. */
- if (maxk >= (GROUP_SIZE (group_info) & ~(nunits - 1)))
+ tree vectype = STMT_VINFO_VECTYPE (group_info);
+ if (!TYPE_VECTOR_SUBPARTS (vectype).is_constant (&nunits)
+ || maxk >= (GROUP_SIZE (group_info) & ~(nunits - 1)))
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"BB vectorization with gaps at the end of "
@@ -3257,7 +3258,7 @@ vect_get_constant_vectors (tree op, slp_tree slp_node,
else
vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
/* Enforced by vect_get_and_check_slp_defs. */
- nunits = TYPE_VECTOR_SUBPARTS (vector_type);
+ nunits = TYPE_VECTOR_SUBPARTS (vector_type).to_constant ();
if (STMT_VINFO_DATA_REF (stmt_vinfo))
{
@@ -3616,12 +3617,12 @@ vect_transform_slp_perm_load (slp_tree node, vec<tree> dr_chain,
gimple *stmt = SLP_TREE_SCALAR_STMTS (node)[0];
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
tree mask_element_type = NULL_TREE, mask_type;
- int nunits, vec_index = 0;
+ int vec_index = 0;
tree vectype = STMT_VINFO_VECTYPE (stmt_info);
int group_size = SLP_INSTANCE_GROUP_SIZE (slp_node_instance);
- int mask_element;
+ unsigned int mask_element;
machine_mode mode;
- unsigned HOST_WIDE_INT const_vf;
+ unsigned HOST_WIDE_INT nunits, const_vf;
if (!STMT_VINFO_GROUPED_ACCESS (stmt_info))
return false;
@@ -3631,8 +3632,10 @@ vect_transform_slp_perm_load (slp_tree node, vec<tree> dr_chain,
mode = TYPE_MODE (vectype);
/* At the moment, all permutations are represented using per-element
- indices, so we can't cope with variable vectorization factors. */
- if (!vf.is_constant (&const_vf))
+ indices, so we can't cope with variable vector lengths or
+ vectorization factors. */
+ if (!TYPE_VECTOR_SUBPARTS (vectype).is_constant (&nunits)
+ || !vf.is_constant (&const_vf))
return false;
/* The generic VEC_PERM_EXPR code always uses an integral type of the
@@ -3640,7 +3643,6 @@ vect_transform_slp_perm_load (slp_tree node, vec<tree> dr_chain,
mask_element_type = lang_hooks.types.type_for_mode
(int_mode_for_mode (TYPE_MODE (TREE_TYPE (vectype))).require (), 1);
mask_type = get_vectype_for_scalar_type (mask_element_type);
- nunits = TYPE_VECTOR_SUBPARTS (vectype);
vec_perm_builder mask (nunits, nunits, 1);
mask.quick_grow (nunits);
vec_perm_indices indices;
@@ -3671,7 +3673,7 @@ vect_transform_slp_perm_load (slp_tree node, vec<tree> dr_chain,
{c2,a3,b3,c3}. */
int vect_stmts_counter = 0;
- int index = 0;
+ unsigned int index = 0;
int first_vec_index = -1;
int second_vec_index = -1;
bool noop_p = true;
@@ -3681,8 +3683,8 @@ vect_transform_slp_perm_load (slp_tree node, vec<tree> dr_chain,
{
for (int k = 0; k < group_size; k++)
{
- int i = (SLP_TREE_LOAD_PERMUTATION (node)[k]
- + j * STMT_VINFO_GROUP_SIZE (stmt_info));
+ unsigned int i = (SLP_TREE_LOAD_PERMUTATION (node)[k]
+ + j * STMT_VINFO_GROUP_SIZE (stmt_info));
vec_index = i / nunits;
mask_element = i % nunits;
if (vec_index == first_vec_index
@@ -3710,8 +3712,7 @@ vect_transform_slp_perm_load (slp_tree node, vec<tree> dr_chain,
return false;
}
- gcc_assert (mask_element >= 0
- && mask_element < 2 * nunits);
+ gcc_assert (mask_element < 2 * nunits);
if (mask_element != index)
noop_p = false;
mask[index++] = mask_element;
diff --git a/gcc/tree-vect-stmts.c b/gcc/tree-vect-stmts.c
index 71bc3c1..14ffdd4 100644
--- a/gcc/tree-vect-stmts.c
+++ b/gcc/tree-vect-stmts.c
@@ -1721,13 +1721,11 @@ compare_step_with_zero (gimple *stmt)
static tree
perm_mask_for_reverse (tree vectype)
{
- int i, nunits;
-
- nunits = TYPE_VECTOR_SUBPARTS (vectype);
+ poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
/* The encoding has a single stepped pattern. */
vec_perm_builder sel (nunits, 1, 3);
- for (i = 0; i < 3; ++i)
+ for (int i = 0; i < 3; ++i)
sel.quick_push (nunits - 1 - i);
vec_perm_indices indices (sel, 1, nunits);
@@ -1760,7 +1758,7 @@ get_group_load_store_type (gimple *stmt, tree vectype, bool slp,
bool single_element_p = (stmt == first_stmt
&& !GROUP_NEXT_ELEMENT (stmt_info));
unsigned HOST_WIDE_INT gap = GROUP_GAP (vinfo_for_stmt (first_stmt));
- unsigned nunits = TYPE_VECTOR_SUBPARTS (vectype);
+ poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
/* True if the vectorized statements would access beyond the last
statement in the group. */
@@ -1784,7 +1782,7 @@ get_group_load_store_type (gimple *stmt, tree vectype, bool slp,
/* Try to use consecutive accesses of GROUP_SIZE elements,
separated by the stride, until we have a complete vector.
Fall back to scalar accesses if that isn't possible. */
- if (nunits % group_size == 0)
+ if (multiple_p (nunits, group_size))
*memory_access_type = VMAT_STRIDED_SLP;
else
*memory_access_type = VMAT_ELEMENTWISE;
@@ -2112,7 +2110,8 @@ vectorizable_mask_load_store (gimple *stmt, gimple_stmt_iterator *gsi,
mask_vectype = get_mask_type_for_scalar_type (TREE_TYPE (vectype));
if (!mask_vectype || !VECTOR_BOOLEAN_TYPE_P (mask_vectype)
- || TYPE_VECTOR_SUBPARTS (mask_vectype) != TYPE_VECTOR_SUBPARTS (vectype))
+ || maybe_ne (TYPE_VECTOR_SUBPARTS (mask_vectype),
+ TYPE_VECTOR_SUBPARTS (vectype)))
return false;
if (gimple_call_internal_fn (stmt) == IFN_MASK_STORE)
@@ -2269,8 +2268,8 @@ vectorizable_mask_load_store (gimple *stmt, gimple_stmt_iterator *gsi,
if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
{
- gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op))
- == TYPE_VECTOR_SUBPARTS (idxtype));
+ gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op)),
+ TYPE_VECTOR_SUBPARTS (idxtype)));
var = vect_get_new_ssa_name (idxtype, vect_simple_var);
op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
new_stmt
@@ -2295,8 +2294,9 @@ vectorizable_mask_load_store (gimple *stmt, gimple_stmt_iterator *gsi,
mask_op = vec_mask;
if (!useless_type_conversion_p (masktype, TREE_TYPE (vec_mask)))
{
- gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (mask_op))
- == TYPE_VECTOR_SUBPARTS (masktype));
+ gcc_assert
+ (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (mask_op)),
+ TYPE_VECTOR_SUBPARTS (masktype)));
var = vect_get_new_ssa_name (masktype, vect_simple_var);
mask_op = build1 (VIEW_CONVERT_EXPR, masktype, mask_op);
new_stmt
@@ -2312,8 +2312,8 @@ vectorizable_mask_load_store (gimple *stmt, gimple_stmt_iterator *gsi,
if (!useless_type_conversion_p (vectype, rettype))
{
- gcc_assert (TYPE_VECTOR_SUBPARTS (vectype)
- == TYPE_VECTOR_SUBPARTS (rettype));
+ gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (vectype),
+ TYPE_VECTOR_SUBPARTS (rettype)));
op = vect_get_new_ssa_name (rettype, vect_simple_var);
gimple_call_set_lhs (new_stmt, op);
vect_finish_stmt_generation (stmt, new_stmt, gsi);
@@ -2507,11 +2507,14 @@ vectorizable_bswap (gimple *stmt, gimple_stmt_iterator *gsi,
tree op, vectype;
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
- unsigned ncopies, nunits;
+ unsigned ncopies;
+ unsigned HOST_WIDE_INT nunits, num_bytes;
op = gimple_call_arg (stmt, 0);
vectype = STMT_VINFO_VECTYPE (stmt_info);
- nunits = TYPE_VECTOR_SUBPARTS (vectype);
+
+ if (!TYPE_VECTOR_SUBPARTS (vectype).is_constant (&nunits))
+ return false;
/* Multiple types in SLP are handled by creating the appropriate number of
vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
@@ -2527,7 +2530,9 @@ vectorizable_bswap (gimple *stmt, gimple_stmt_iterator *gsi,
if (! char_vectype)
return false;
- unsigned int num_bytes = TYPE_VECTOR_SUBPARTS (char_vectype);
+ if (!TYPE_VECTOR_SUBPARTS (char_vectype).is_constant (&num_bytes))
+ return false;
+
unsigned word_bytes = num_bytes / nunits;
/* The encoding uses one stepped pattern for each byte in the word. */
@@ -3226,7 +3231,7 @@ vect_simd_lane_linear (tree op, struct loop *loop,
static unsigned HOST_WIDE_INT
simd_clone_subparts (tree vectype)
{
- return TYPE_VECTOR_SUBPARTS (vectype);
+ return TYPE_VECTOR_SUBPARTS (vectype).to_constant ();
}
/* Function vectorizable_simd_clone_call.
@@ -4745,7 +4750,7 @@ vectorizable_assignment (gimple *stmt, gimple_stmt_iterator *gsi,
op = TREE_OPERAND (op, 0);
tree vectype = STMT_VINFO_VECTYPE (stmt_info);
- unsigned int nunits = TYPE_VECTOR_SUBPARTS (vectype);
+ poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
/* Multiple types in SLP are handled by creating the appropriate number of
vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
@@ -4770,7 +4775,7 @@ vectorizable_assignment (gimple *stmt, gimple_stmt_iterator *gsi,
if ((CONVERT_EXPR_CODE_P (code)
|| code == VIEW_CONVERT_EXPR)
&& (!vectype_in
- || TYPE_VECTOR_SUBPARTS (vectype_in) != nunits
+ || maybe_ne (TYPE_VECTOR_SUBPARTS (vectype_in), nunits)
|| (GET_MODE_SIZE (TYPE_MODE (vectype))
!= GET_MODE_SIZE (TYPE_MODE (vectype_in)))))
return false;
@@ -4919,8 +4924,8 @@ vectorizable_shift (gimple *stmt, gimple_stmt_iterator *gsi,
int ndts = 2;
gimple *new_stmt = NULL;
stmt_vec_info prev_stmt_info;
- int nunits_in;
- int nunits_out;
+ poly_uint64 nunits_in;
+ poly_uint64 nunits_out;
tree vectype_out;
tree op1_vectype;
int ncopies;
@@ -4987,7 +4992,7 @@ vectorizable_shift (gimple *stmt, gimple_stmt_iterator *gsi,
nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
- if (nunits_out != nunits_in)
+ if (maybe_ne (nunits_out, nunits_in))
return false;
op1 = gimple_assign_rhs2 (stmt);
@@ -5287,8 +5292,8 @@ vectorizable_operation (gimple *stmt, gimple_stmt_iterator *gsi,
int ndts = 3;
gimple *new_stmt = NULL;
stmt_vec_info prev_stmt_info;
- int nunits_in;
- int nunits_out;
+ poly_uint64 nunits_in;
+ poly_uint64 nunits_out;
tree vectype_out;
int ncopies;
int j, i;
@@ -5400,7 +5405,7 @@ vectorizable_operation (gimple *stmt, gimple_stmt_iterator *gsi,
nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
- if (nunits_out != nunits_in)
+ if (maybe_ne (nunits_out, nunits_in))
return false;
if (op_type == binary_op || op_type == ternary_op)
@@ -5972,8 +5977,8 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
if (!useless_type_conversion_p (srctype, TREE_TYPE (src)))
{
- gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (src))
- == TYPE_VECTOR_SUBPARTS (srctype));
+ gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (src)),
+ TYPE_VECTOR_SUBPARTS (srctype)));
var = vect_get_new_ssa_name (srctype, vect_simple_var);
src = build1 (VIEW_CONVERT_EXPR, srctype, src);
new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, src);
@@ -5983,8 +5988,8 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
{
- gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op))
- == TYPE_VECTOR_SUBPARTS (idxtype));
+ gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op)),
+ TYPE_VECTOR_SUBPARTS (idxtype)));
var = vect_get_new_ssa_name (idxtype, vect_simple_var);
op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
@@ -7023,8 +7028,8 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
{
- gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op))
- == TYPE_VECTOR_SUBPARTS (idxtype));
+ gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op)),
+ TYPE_VECTOR_SUBPARTS (idxtype)));
var = vect_get_new_ssa_name (idxtype, vect_simple_var);
op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
new_stmt
@@ -7038,8 +7043,8 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
if (!useless_type_conversion_p (vectype, rettype))
{
- gcc_assert (TYPE_VECTOR_SUBPARTS (vectype)
- == TYPE_VECTOR_SUBPARTS (rettype));
+ gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (vectype),
+ TYPE_VECTOR_SUBPARTS (rettype)));
op = vect_get_new_ssa_name (rettype, vect_simple_var);
gimple_call_set_lhs (new_stmt, op);
vect_finish_stmt_generation (stmt, new_stmt, gsi);
@@ -7936,7 +7941,8 @@ vect_is_simple_cond (tree cond, vec_info *vinfo,
return false;
if (vectype1 && vectype2
- && TYPE_VECTOR_SUBPARTS (vectype1) != TYPE_VECTOR_SUBPARTS (vectype2))
+ && maybe_ne (TYPE_VECTOR_SUBPARTS (vectype1),
+ TYPE_VECTOR_SUBPARTS (vectype2)))
return false;
*comp_vectype = vectype1 ? vectype1 : vectype2;
@@ -8353,7 +8359,7 @@ vectorizable_comparison (gimple *stmt, gimple_stmt_iterator *gsi,
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
enum vect_def_type dts[2] = {vect_unknown_def_type, vect_unknown_def_type};
int ndts = 2;
- unsigned nunits;
+ poly_uint64 nunits;
int ncopies;
enum tree_code code, bitop1 = NOP_EXPR, bitop2 = NOP_EXPR;
stmt_vec_info prev_stmt_info = NULL;
@@ -8413,7 +8419,8 @@ vectorizable_comparison (gimple *stmt, gimple_stmt_iterator *gsi,
return false;
if (vectype1 && vectype2
- && TYPE_VECTOR_SUBPARTS (vectype1) != TYPE_VECTOR_SUBPARTS (vectype2))
+ && maybe_ne (TYPE_VECTOR_SUBPARTS (vectype1),
+ TYPE_VECTOR_SUBPARTS (vectype2)))
return false;
vectype = vectype1 ? vectype1 : vectype2;
@@ -8422,10 +8429,10 @@ vectorizable_comparison (gimple *stmt, gimple_stmt_iterator *gsi,
if (!vectype)
{
vectype = get_vectype_for_scalar_type (TREE_TYPE (rhs1));
- if (TYPE_VECTOR_SUBPARTS (vectype) != nunits)
+ if (maybe_ne (TYPE_VECTOR_SUBPARTS (vectype), nunits))
return false;
}
- else if (nunits != TYPE_VECTOR_SUBPARTS (vectype))
+ else if (maybe_ne (nunits, TYPE_VECTOR_SUBPARTS (vectype)))
return false;
/* Can't compare mask and non-mask types. */
@@ -9656,8 +9663,8 @@ supportable_widening_operation (enum tree_code code, gimple *stmt,
vector types having the same QImode. Thus we
add additional check for elements number. */
return (!VECTOR_BOOLEAN_TYPE_P (vectype)
- || (TYPE_VECTOR_SUBPARTS (vectype) / 2
- == TYPE_VECTOR_SUBPARTS (wide_vectype)));
+ || known_eq (TYPE_VECTOR_SUBPARTS (vectype),
+ TYPE_VECTOR_SUBPARTS (wide_vectype) * 2));
/* Check if it's a multi-step conversion that can be done using intermediate
types. */
@@ -9678,8 +9685,10 @@ supportable_widening_operation (enum tree_code code, gimple *stmt,
intermediate_mode = insn_data[icode1].operand[0].mode;
if (VECTOR_BOOLEAN_TYPE_P (prev_type))
{
+ poly_uint64 intermediate_nelts
+ = exact_div (TYPE_VECTOR_SUBPARTS (prev_type), 2);
intermediate_type
- = build_truth_vector_type (TYPE_VECTOR_SUBPARTS (prev_type) / 2,
+ = build_truth_vector_type (intermediate_nelts,
current_vector_size);
if (intermediate_mode != TYPE_MODE (intermediate_type))
return false;
@@ -9709,8 +9718,8 @@ supportable_widening_operation (enum tree_code code, gimple *stmt,
if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
&& insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
return (!VECTOR_BOOLEAN_TYPE_P (vectype)
- || (TYPE_VECTOR_SUBPARTS (intermediate_type) / 2
- == TYPE_VECTOR_SUBPARTS (wide_vectype)));
+ || known_eq (TYPE_VECTOR_SUBPARTS (intermediate_type),
+ TYPE_VECTOR_SUBPARTS (wide_vectype) * 2));
prev_type = intermediate_type;
prev_mode = intermediate_mode;
@@ -9798,8 +9807,8 @@ supportable_narrowing_operation (enum tree_code code,
vector types having the same QImode. Thus we
add additional check for elements number. */
return (!VECTOR_BOOLEAN_TYPE_P (vectype)
- || (TYPE_VECTOR_SUBPARTS (vectype) * 2
- == TYPE_VECTOR_SUBPARTS (narrow_vectype)));
+ || known_eq (TYPE_VECTOR_SUBPARTS (vectype) * 2,
+ TYPE_VECTOR_SUBPARTS (narrow_vectype)));
/* Check if it's a multi-step conversion that can be done using intermediate
types. */
@@ -9865,8 +9874,8 @@ supportable_narrowing_operation (enum tree_code code,
if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
return (!VECTOR_BOOLEAN_TYPE_P (vectype)
- || (TYPE_VECTOR_SUBPARTS (intermediate_type) * 2
- == TYPE_VECTOR_SUBPARTS (narrow_vectype)));
+ || known_eq (TYPE_VECTOR_SUBPARTS (intermediate_type) * 2,
+ TYPE_VECTOR_SUBPARTS (narrow_vectype)));
prev_mode = intermediate_mode;
prev_type = intermediate_type;
diff --git a/gcc/tree-vector-builder.c b/gcc/tree-vector-builder.c
index 7912f39..08652a2 100644
--- a/gcc/tree-vector-builder.c
+++ b/gcc/tree-vector-builder.c
@@ -108,14 +108,17 @@ tree_vector_builder::new_binary_operation (tree type, tree t1, tree t2,
unsigned int
tree_vector_builder::binary_encoded_nelts (tree t1, tree t2)
{
- unsigned int nelts = TYPE_VECTOR_SUBPARTS (TREE_TYPE (t1));
- gcc_assert (nelts == TYPE_VECTOR_SUBPARTS (TREE_TYPE (t2)));
+ poly_uint64 nelts = TYPE_VECTOR_SUBPARTS (TREE_TYPE (t1));
+ gcc_assert (known_eq (nelts, TYPE_VECTOR_SUBPARTS (TREE_TYPE (t2))));
/* See new_binary_operation for details. */
unsigned int npatterns = least_common_multiple (VECTOR_CST_NPATTERNS (t1),
VECTOR_CST_NPATTERNS (t2));
unsigned int nelts_per_pattern = MAX (VECTOR_CST_NELTS_PER_PATTERN (t1),
VECTOR_CST_NELTS_PER_PATTERN (t2));
- return MIN (npatterns * nelts_per_pattern, nelts);
+ unsigned HOST_WIDE_INT const_nelts;
+ if (nelts.is_constant (&const_nelts))
+ return MIN (npatterns * nelts_per_pattern, const_nelts);
+ return npatterns * nelts_per_pattern;
}
/* Return a vector element with the value BASE + FACTOR * STEP. */
diff --git a/gcc/tree.c b/gcc/tree.c
index c9ca760..d263129 100644
--- a/gcc/tree.c
+++ b/gcc/tree.c
@@ -1851,16 +1851,21 @@ make_vector (unsigned log2_npatterns,
tree
build_vector_from_ctor (tree type, vec<constructor_elt, va_gc> *v)
{
- unsigned int nelts = TYPE_VECTOR_SUBPARTS (type);
- unsigned HOST_WIDE_INT idx;
+ unsigned HOST_WIDE_INT idx, nelts;
tree value;
+ /* We can't construct a VECTOR_CST for a variable number of elements. */
+ nelts = TYPE_VECTOR_SUBPARTS (type).to_constant ();
tree_vector_builder vec (type, nelts, 1);
FOR_EACH_CONSTRUCTOR_VALUE (v, idx, value)
{
if (TREE_CODE (value) == VECTOR_CST)
- for (unsigned i = 0; i < VECTOR_CST_NELTS (value); ++i)
- vec.quick_push (VECTOR_CST_ELT (value, i));
+ {
+ /* If NELTS is constant then this must be too. */
+ unsigned int sub_nelts = VECTOR_CST_NELTS (value).to_constant ();
+ for (unsigned i = 0; i < sub_nelts; ++i)
+ vec.quick_push (VECTOR_CST_ELT (value, i));
+ }
else
vec.quick_push (value);
}
@@ -1872,9 +1877,9 @@ build_vector_from_ctor (tree type, vec<constructor_elt, va_gc> *v)
/* Build a vector of type VECTYPE where all the elements are SCs. */
tree
-build_vector_from_val (tree vectype, tree sc)
+build_vector_from_val (tree vectype, tree sc)
{
- int i, nunits = TYPE_VECTOR_SUBPARTS (vectype);
+ unsigned HOST_WIDE_INT i, nunits;
if (sc == error_mark_node)
return sc;
@@ -1894,7 +1899,7 @@ build_vector_from_val (tree vectype, tree sc)
v.quick_push (sc);
return v.build ();
}
- else if (0)
+ else if (!TYPE_VECTOR_SUBPARTS (vectype).is_constant (&nunits))
return fold_build1 (VEC_DUPLICATE_EXPR, vectype, sc);
else
{
@@ -6497,11 +6502,8 @@ type_hash_canon_hash (tree type)
}
case VECTOR_TYPE:
- {
- unsigned nunits = TYPE_VECTOR_SUBPARTS (type);
- hstate.add_object (nunits);
- break;
- }
+ hstate.add_poly_int (TYPE_VECTOR_SUBPARTS (type));
+ break;
default:
break;
@@ -6545,7 +6547,8 @@ type_cache_hasher::equal (type_hash *a, type_hash *b)
return 1;
case VECTOR_TYPE:
- return TYPE_VECTOR_SUBPARTS (a->type) == TYPE_VECTOR_SUBPARTS (b->type);
+ return known_eq (TYPE_VECTOR_SUBPARTS (a->type),
+ TYPE_VECTOR_SUBPARTS (b->type));
case ENUMERAL_TYPE:
if (TYPE_VALUES (a->type) != TYPE_VALUES (b->type)
@@ -9610,7 +9613,7 @@ make_vector_type (tree innertype, poly_int64 nunits, machine_mode mode)
t = make_node (VECTOR_TYPE);
TREE_TYPE (t) = mv_innertype;
- SET_TYPE_VECTOR_SUBPARTS (t, nunits.to_constant ()); /* Temporary */
+ SET_TYPE_VECTOR_SUBPARTS (t, nunits);
SET_TYPE_MODE (t, mode);
if (TYPE_STRUCTURAL_EQUALITY_P (mv_innertype) || in_lto_p)
@@ -10533,7 +10536,7 @@ build_vector_type_for_mode (tree innertype, machine_mode mode)
a power of two. */
tree
-build_vector_type (tree innertype, int nunits)
+build_vector_type (tree innertype, poly_int64 nunits)
{
return make_vector_type (innertype, nunits, VOIDmode);
}
@@ -10578,7 +10581,7 @@ build_same_sized_truth_vector_type (tree vectype)
/* Similarly, but builds a variant type with TYPE_VECTOR_OPAQUE set. */
tree
-build_opaque_vector_type (tree innertype, int nunits)
+build_opaque_vector_type (tree innertype, poly_int64 nunits)
{
tree t = make_vector_type (innertype, nunits, VOIDmode);
tree cand;
@@ -10727,7 +10730,7 @@ tree
uniform_vector_p (const_tree vec)
{
tree first, t;
- unsigned i;
+ unsigned HOST_WIDE_INT i, nelts;
if (vec == NULL_TREE)
return NULL_TREE;
@@ -10744,7 +10747,8 @@ uniform_vector_p (const_tree vec)
return NULL_TREE;
}
- else if (TREE_CODE (vec) == CONSTRUCTOR)
+ else if (TREE_CODE (vec) == CONSTRUCTOR
+ && TYPE_VECTOR_SUBPARTS (TREE_TYPE (vec)).is_constant (&nelts))
{
first = error_mark_node;
@@ -10758,7 +10762,7 @@ uniform_vector_p (const_tree vec)
if (!operand_equal_p (first, t, 0))
return NULL_TREE;
}
- if (i != TYPE_VECTOR_SUBPARTS (TREE_TYPE (vec)))
+ if (i != nelts)
return NULL_TREE;
return first;
@@ -13034,8 +13038,8 @@ vector_type_mode (const_tree t)
/* For integers, try mapping it to a same-sized scalar mode. */
if (is_int_mode (TREE_TYPE (t)->type_common.mode, &innermode))
{
- unsigned int size = (TYPE_VECTOR_SUBPARTS (t)
- * GET_MODE_BITSIZE (innermode));
+ poly_int64 size = (TYPE_VECTOR_SUBPARTS (t)
+ * GET_MODE_BITSIZE (innermode));
scalar_int_mode mode;
if (int_mode_for_size (size, 0).exists (&mode)
&& have_regs_of_mode[mode])
@@ -14319,7 +14323,7 @@ test_labels ()
static tree
build_vector (tree type, vec<tree> vals MEM_STAT_DECL)
{
- gcc_assert (vals.length () == TYPE_VECTOR_SUBPARTS (type));
+ gcc_assert (known_eq (vals.length (), TYPE_VECTOR_SUBPARTS (type)));
tree_vector_builder builder (type, vals.length (), 1);
builder.splice (vals);
return builder.build ();
@@ -14330,7 +14334,8 @@ build_vector (tree type, vec<tree> vals MEM_STAT_DECL)
static void
check_vector_cst (vec<tree> expected, tree actual)
{
- ASSERT_EQ (expected.length (), TYPE_VECTOR_SUBPARTS (TREE_TYPE (actual)));
+ ASSERT_KNOWN_EQ (expected.length (),
+ TYPE_VECTOR_SUBPARTS (TREE_TYPE (actual)));
for (unsigned int i = 0; i < expected.length (); ++i)
ASSERT_EQ (wi::to_wide (expected[i]),
wi::to_wide (vector_cst_elt (actual, i)));
diff --git a/gcc/tree.h b/gcc/tree.h
index 418075d..60c4411 100644
--- a/gcc/tree.h
+++ b/gcc/tree.h
@@ -2035,15 +2035,6 @@ extern machine_mode vector_type_mode (const_tree);
If set in a INTEGER_TYPE, indicates a character type. */
#define TYPE_STRING_FLAG(NODE) (TYPE_CHECK (NODE)->type_common.string_flag)
-/* For a VECTOR_TYPE, this is the number of sub-parts of the vector. */
-#define TYPE_VECTOR_SUBPARTS(VECTOR_TYPE) \
- (HOST_WIDE_INT_1U \
- << VECTOR_TYPE_CHECK (VECTOR_TYPE)->type_common.precision)
-
-/* Set precision to n when we have 2^n sub-parts of the vector. */
-#define SET_TYPE_VECTOR_SUBPARTS(VECTOR_TYPE, X) \
- (VECTOR_TYPE_CHECK (VECTOR_TYPE)->type_common.precision = exact_log2 (X))
-
/* Nonzero in a VECTOR_TYPE if the frontends should not emit warnings
about missing conversions to other vector types of the same size. */
#define TYPE_VECTOR_OPAQUE(NODE) \
@@ -3677,6 +3668,64 @@ id_equal (const char *str, const_tree id)
return !strcmp (str, IDENTIFIER_POINTER (id));
}
+/* Return the number of elements in the VECTOR_TYPE given by NODE. */
+
+inline poly_uint64
+TYPE_VECTOR_SUBPARTS (const_tree node)
+{
+ STATIC_ASSERT (NUM_POLY_INT_COEFFS <= 2);
+ unsigned int precision = VECTOR_TYPE_CHECK (node)->type_common.precision;
+ if (NUM_POLY_INT_COEFFS == 2)
+ {
+ poly_uint64 res = 0;
+ res.coeffs[0] = 1 << (precision & 0xff);
+ if (precision & 0x100)
+ res.coeffs[1] = 1 << (precision & 0xff);
+ return res;
+ }
+ else
+ return 1 << precision;
+}
+
+/* Set the number of elements in VECTOR_TYPE NODE to SUBPARTS, which must
+ satisfy valid_vector_subparts_p. */
+
+inline void
+SET_TYPE_VECTOR_SUBPARTS (tree node, poly_uint64 subparts)
+{
+ STATIC_ASSERT (NUM_POLY_INT_COEFFS <= 2);
+ unsigned HOST_WIDE_INT coeff0 = subparts.coeffs[0];
+ int index = exact_log2 (coeff0);
+ gcc_assert (index >= 0);
+ if (NUM_POLY_INT_COEFFS == 2)
+ {
+ unsigned HOST_WIDE_INT coeff1 = subparts.coeffs[1];
+ gcc_assert (coeff1 == 0 || coeff1 == coeff0);
+ VECTOR_TYPE_CHECK (node)->type_common.precision
+ = index + (coeff1 != 0 ? 0x100 : 0);
+ }
+ else
+ VECTOR_TYPE_CHECK (node)->type_common.precision = index;
+}
+
+/* Return true if we can construct vector types with the given number
+ of subparts. */
+
+static inline bool
+valid_vector_subparts_p (poly_uint64 subparts)
+{
+ unsigned HOST_WIDE_INT coeff0 = subparts.coeffs[0];
+ if (!pow2p_hwi (coeff0))
+ return false;
+ if (NUM_POLY_INT_COEFFS == 2)
+ {
+ unsigned HOST_WIDE_INT coeff1 = subparts.coeffs[1];
+ if (coeff1 != 0 && coeff1 != coeff0)
+ return false;
+ }
+ return true;
+}
+
#define error_mark_node global_trees[TI_ERROR_MARK]
#define intQI_type_node global_trees[TI_INTQI_TYPE]
@@ -4121,16 +4170,10 @@ extern tree build_pointer_type (tree);
extern tree build_reference_type_for_mode (tree, machine_mode, bool);
extern tree build_reference_type (tree);
extern tree build_vector_type_for_mode (tree, machine_mode);
-extern tree build_vector_type (tree innertype, int nunits);
-/* Temporary. */
-inline tree
-build_vector_type (tree innertype, poly_uint64 nunits)
-{
- return build_vector_type (innertype, (int) nunits.to_constant ());
-}
+extern tree build_vector_type (tree, poly_int64);
extern tree build_truth_vector_type (poly_uint64, poly_uint64);
extern tree build_same_sized_truth_vector_type (tree vectype);
-extern tree build_opaque_vector_type (tree innertype, int nunits);
+extern tree build_opaque_vector_type (tree, poly_int64);
extern tree build_index_type (tree);
extern tree build_array_type (tree, tree, bool = false);
extern tree build_nonshared_array_type (tree, tree);
diff --git a/gcc/varasm.c b/gcc/varasm.c
index 0f5790f..915b99b 100644
--- a/gcc/varasm.c
+++ b/gcc/varasm.c
@@ -4920,7 +4920,9 @@ output_constant (tree exp, unsigned HOST_WIDE_INT size, unsigned int align,
output_constant (VECTOR_CST_ELT (exp, 0), elt_size, align,
reverse);
thissize = elt_size;
- for (unsigned int i = 1; i < VECTOR_CST_NELTS (exp); i++)
+ /* Static constants must have a fixed size. */
+ unsigned int nunits = VECTOR_CST_NELTS (exp).to_constant ();
+ for (unsigned int i = 1; i < nunits; i++)
{
output_constant (VECTOR_CST_ELT (exp, i), elt_size, nalign,
reverse);