diff options
author | Diego Novillo <dnovillo@google.com> | 2012-11-17 21:54:30 -0500 |
---|---|---|
committer | Diego Novillo <dnovillo@gcc.gnu.org> | 2012-11-17 21:54:30 -0500 |
commit | 9771b26396c39dfaecd5a76dd359fb65d3be4cb6 (patch) | |
tree | 1b9f930d315fa3e0a5ed7fa6e27ec5bd0a3436a4 /gcc/tree-vect-stmts.c | |
parent | 0f4119158064e271e48a14ce3f88a67e7baf14e0 (diff) | |
download | gcc-9771b26396c39dfaecd5a76dd359fb65d3be4cb6.zip gcc-9771b26396c39dfaecd5a76dd359fb65d3be4cb6.tar.gz gcc-9771b26396c39dfaecd5a76dd359fb65d3be4cb6.tar.bz2 |
This patch rewrites the old VEC macro-based interface into a new one based on the template class 'vec'.
This patch rewrites the old VEC macro-based interface into a new one
based on the template class 'vec'. The user-visible changes are
described in http://gcc.gnu.org/wiki/cxx-conversion/cxx-vec.
I have tested the patch pretty extensively:
- Regular bootstraps on x86_64, ppc, ia64, sparc and hppa.
- Bootstraps with --enable-checking=release
- Bootstraps with --enable-checking=gc,gcac
- Basic builds on all targets (using contrib/config-list.mk).
We no longer access the vectors via VEC_* macros. The pattern is
"VEC_operation (T, A, V, args)" becomes "V.operation (args)".
The only thing I could not do is create proper ctors and dtors for the
vec class. Since these vectors are stored in unions, we
have to keep them as PODs (C++03 does not allow non-PODs in unions).
This means that creation and destruction must be explicit. There is a
new method vec<type, allocation, layout>::create() and another vec<type,
allocation, layout>::destroy() to allocate the internal vector.
For vectors that must be pointers, there is a family of free functions
that implement the operations that need to tolerate NULL vectors.
These functions all start with the prefix 'vec_safe_'. See the wiki
page for details.
The gengtype change removes the special handling for VEC() that used
to exist in gengtype. Additionally, it allows gengtype to recognize
templates of more than one argument and introduces the concept of an
undefined type (useful for template arguments that may or may not be
types).
When a TYPE_UNDEFINED is reached, gengtype will ignore it if it
happens inside a type marked with GTY((user)). Otherwise, it will
emit an error.
Finally, gengtype rejects root types marked GTY((user)) that are not
first class pointers.
2012-11-16 Diego Novillo <dnovillo@google.com>
VEC API overhaul (http://gcc.gnu.org/wiki/cxx-conversion/cxx-vec)
* vec.c (register_overhead): Convert it into
member function of vec_prefix.
(release_overhead): Likewise.
(calculate_allocation): Likewise.
(vec_heap_free): Remove.
(vec_gc_o_reserve_1): Remove.
(vec_heap_o_reserve_1): Remove.
(vec_stack_o_reserve_1): Remove.
(vec_stack_o_reserve_exact): Remove.
(register_stack_vec): New.
(stack_vec_register_index): New.
(unregister_stack_vec): New.
(vec_assert_fail): Remove.
* vec.h: Conditionally include ggc.h. Document conditional
hackery.
Update top-level documentation.
(ALONE_VEC_CHECK_INFO): Remove.
(VEC_CHECK_INFO): Remove.
(ALONE_VEC_CHECK_DECL): Remove.
(VEC_CHECK_DECL): Remove.
(ALONE_VEC_CHECK_PASS): Remove.
(VEC_CHECK_PASS): Remove.
(VEC_ASSERT): Remove.
(vec_prefix): Add friends va_gc, va_gc_atomic, va_heap and
va_stack.
Mark fields alloc_ and num_ as protected.
(struct vec_t): Remove. Remove all function members.
(struct vl_embed): Declare.
(struct vl_ptr): Declare.
(free): Remove.
(reserve_exact): Remove.
(reserve): Remove.
(safe_splice): Remove.
(safe_push): Remove.
(safe_grow): Remove.
(safe_grow_cleared): Remove.
(safe_insert): Remove.
(DEF_VEC_I): Remove.
(DEF_VEC_ALLOC_I): Remove.
(DEF_VEC_P): Remove.
(DEF_VEC_ALLOC_P): Remove.
(DEF_VEC_O): Remove.
(DEF_VEC_ALLOC_O): Remove.
(DEF_VEC_ALLOC_P_STACK): Remove.
(DEF_VEC_ALLOC_O_STACK): Remove.
(DEF_VEC_ALLOC_I_STACK): Remove.
(DEF_VEC_A): Remove.
(DEF_VEC_ALLOC_A): Remove.
(vec_stack_p_reserve_exact_1): Remove.
(vec_stack_o_reserve): Remove.
(vec_stack_o_reserve_exact): Remove.
(VEC_length): Remove.
(VEC_empty): Remove.
(VEC_address): Remove.
(vec_address): Remove.
(VEC_last): Remove.
(VEC_index): Remove.
(VEC_iterate): Remove.
(VEC_embedded_size): Remove.
(VEC_embedded_init): Remove.
(VEC_free): Remove.
(VEC_copy): Remove.
(VEC_space): Remove.
(VEC_reserve): Remove.
(VEC_reserve_exact): Remove.
(VEC_splice): Remove.
(VEC_safe_splice): Remove.
(VEC_quick_push): Remove.
(VEC_safe_push): Remove.
(VEC_pop): Remove.
(VEC_truncate): Remove.
(VEC_safe_grow): Remove.
(VEC_replace): Remove.
(VEC_quick_insert): Remove.
(VEC_safe_insert): Remove.
(VEC_ordered_remove): Remove.
(VEC_unordered_remove): Remove.
(VEC_block_remove): Remove.
(VEC_lower_bound): Remove.
(VEC_alloc): Remove.
(VEC_qsort): Remove.
(va_heap): Declare.
(va_heap::default_layout): New typedef to vl_ptr.
(va_heap::reserve): New.
(va_heap::release): New.
(va_gc): Declare.
(va_gc::default_layout): New typedef to vl_embed.
(va_gc::reserve): New.
(va_gc::release): New.
(va_gc_atomic): Declare. Inherit from va_gc.
(va_stack): Declare.
(va_stack::default_layout): New typedef to vl_ptr.
(va_stack::alloc): New.
(va_stack::reserve): New.
(va_stack::release): New.
(register_stack_vec): Declare.
(stack_vec_register_index): Declare.
(unregister_stack_vec): Declare.
(vec<T, A = va_heap, L = typename A::default_layout>): Declare
empty vec template.
(vec<T, A, vl_embed>): Partial specialization for embedded
layout.
(vec<T, A, vl_embed>::allocated): New.
(vec<T, A, vl_embed>::length): New.
(vec<T, A, vl_embed>::is_empty): New.
(vec<T, A, vl_embed>::address): New.
(vec<T, A, vl_embed>::operator[]): New.
(vec<T, A, vl_embed>::last New.
(vec<T, A, vl_embed>::space): New.
(vec<T, A, vl_embed>::iterate): New.
(vec<T, A, vl_embed>::iterate): New.
(vec<T, A, vl_embed>::copy): New.
(vec<T, A, vl_embed>::splice): New.
(vec<T, A, vl_embed>::quick_push New.
(vec<T, A, vl_embed>::pop New.
(vec<T, A, vl_embed>::truncate): New.
(vec<T, A, vl_embed>::quick_insert): New.
(vec<T, A, vl_embed>::ordered_remove): New.
(vec<T, A, vl_embed>::unordered_remove): New.
(vec<T, A, vl_embed>::block_remove): New.
(vec<T, A, vl_embed>::qsort): New.
(vec<T, A, vl_embed>::lower_bound): New.
(vec<T, A, vl_embed>::embedded_size): New.
(vec<T, A, vl_embed>::embedded_init): New.
(vec<T, A, vl_embed>::quick_grow): New.
(vec<T, A, vl_embed>::quick_grow_cleared): New.
(vec_safe_space): New.
(vec_safe_length): New.
(vec_safe_address): New.
(vec_safe_is_empty): New.
(vec_safe_reserve): New.
(vec_safe_reserve_exact): New.
(vec_alloc): New.
(vec_free): New.
(vec_safe_grow): New.
(vec_safe_grow_cleared): New.
(vec_safe_iterate): New.
(vec_safe_push): New.
(vec_safe_insert): New.
(vec_safe_truncate): New.
(vec_safe_copy): New.
(vec_safe_splice): New.
(vec<T, A, vl_ptr>): New partial specialization for the space
efficient layout.
(vec<T, A, vl_ptr>::exists): New.
(vec<T, A, vl_ptr>::is_empty): New.
(vec<T, A, vl_ptr>::length): New.
(vec<T, A, vl_ptr>::address): New.
(vec<T, A, vl_ptr>::operator[]): New.
(vec<T, A, vl_ptr>::operator!=): New.
(vec<T, A, vl_ptr>::operator==): New.
(vec<T, A, vl_ptr>::last): New.
(vec<T, A, vl_ptr>::space): New.
(vec<T, A, vl_ptr>::iterate): New.
(vec<T, A, vl_ptr>::copy): New.
(vec<T, A, vl_ptr>::reserve): New.
(vec<T, A, vl_ptr>::reserve_exact): New.
(vec<T, A, vl_ptr>::splice): New.
(vec<T, A, vl_ptr>::safe_splice): New.
(vec<T, A, vl_ptr>::quick_push): New.
(vec<T, A, vl_ptr>::safe_push): New.
(vec<T, A, vl_ptr>::pop): New.
(vec<T, A, vl_ptr>::truncate): New.
(vec<T, A, vl_ptr>::safe_grow): New.
(vec<T, A, vl_ptr>::safe_grow_cleared): New.
(vec<T, A, vl_ptr>::quick_grow): New.
(vec<T, A, vl_ptr>::quick_grow_cleared): New.
(vec<T, A, vl_ptr>::quick_insert): New.
(vec<T, A, vl_ptr>::safe_insert): New.
(vec<T, A, vl_ptr>::ordered_remove): New.
(vec<T, A, vl_ptr>::unordered_remove): New.
(vec<T, A, vl_ptr>::block_remove): New.
(vec<T, A, vl_ptr>::qsort): New.
(vec<T, A, vl_ptr>::lower_bound): New.
(vec_stack_alloc): Define.
(FOR_EACH_VEC_SAFE_ELT): Define.
* vecir.h: Remove. Update all users.
* vecprim.h: Remove. Update all users.
Move uchar to coretypes.h.
* Makefile.in (VEC_H): Add $(GGC_H).
Remove vecir.h and vecprim.h dependencies everywhere.
2012-11-16 Diego Novillo <dnovillo@google.com>
* gengtype-lex.l (VEC): Remove.
Add characters in the set [\!\>\.-].
* gengtype-parse.c (token_names): Remove "VEC".
(require_template_declaration): Remove handling of VEC_TOKEN.
(type): Likewise.
Call create_user_defined_type when parsing GTY((user)).
* gengtype-state.c (type_lineloc): handle TYPE_UNDEFINED.
(write_state_undefined_type): New.
(write_state_type): Call write_state_undefined_type for
TYPE_UNDEFINED.
(read_state_type): Call read_state_undefined_type for
TYPE_UNDEFINED.
* gengtype.c (dbgprint_count_type_at): Handle TYPE_UNDEFINED.
(create_user_defined_type): Make extern.
(type_for_name): Factor out of resolve_typedef.
(create_undefined_type): New
(resolve_typedef): Call it when we cannot find a previous
typedef and the type is not a template.
(find_structure): Accept TYPE_UNDEFINED.
(set_gc_used_type): Add argument ALLOWED_UNDEFINED_TYPES,
default to false.
Emit an error for TYPE_UNDEFINED unless LEVEL is GC_UNUSED or
ALLOWED_UNDEFINED_TYPES is set.
Set ALLOWED_UNDEFINED_TYPES to true for TYPE_USER_STRUCT.
(filter_type_name): Accept templates with more than one
argument.
(output_mangled_typename): Handle TYPE_UNDEFINED
(walk_type): Likewise.
(write_types_process_field): Likewise.
(write_func_for_structure): If CHAIN_NEXT is set, ORIG_S
should not be a user-defined type.
(write_types_local_user_process_field): Handle TYPE_ARRAY,
TYPE_NONE and TYPE_UNDEFINED.
(write_types_local_process_field): Likewise.
(contains_scalar_p): Return 0 for TYPE_USER_STRUCT.
(write_root): Reject user-defined types that are not pointers.
Handle TYPE_NONE, TYPE_UNDEFINED, TYPE_UNION, TYPE_LANG_STRUCT
and TYPE_PARAM_STRUCT.
(output_typename): Handle TYPE_NONE, TYPE_UNDEFINED, and
TYPE_ARRAY.
(dump_typekind): Handle TYPE_UNDEFINED.
* gengtype.h (enum typekind): Add TYPE_UNDEFINED.
(create_user_defined_type): Declare.
(enum gty_token): Remove VEC_TOKEN.
2012-11-16 Diego Novillo <dnovillo@google.com>
Adjust for new vec API (http://gcc.gnu.org/wiki/cxx-conversion/cxx-vec)
* coretypes.h (uchar): Define.
* alias.c: Use new vec API in vec.h.
* asan.c: Likewise.
* attribs.c: Likewise.
* basic-block.h: Likewise.
* bb-reorder.c: Likewise.
* builtins.c: Likewise.
* calls.c: Likewise.
* cfg.c: Likewise.
* cfganal.c: Likewise.
* cfgcleanup.c: Likewise.
* cfgexpand.c: Likewise.
* cfghooks.c: Likewise.
* cfghooks.h: Likewise.
* cfgloop.c: Likewise.
* cfgloop.h: Likewise.
* cfgloopanal.c: Likewise.
* cfgloopmanip.c: Likewise.
* cfgrtl.c: Likewise.
* cgraph.c: Likewise.
* cgraph.h: Likewise.
* cgraphclones.c: Likewise.
* cgraphunit.c: Likewise.
* combine.c: Likewise.
* compare-elim.c: Likewise.
* coverage.c: Likewise.
* cprop.c: Likewise.
* data-streamer.h: Likewise.
* dbxout.c: Likewise.
* dce.c: Likewise.
* df-core.c: Likewise.
* df-problems.c: Likewise.
* df-scan.c: Likewise.
* dominance.c: Likewise.
* domwalk.c: Likewise.
* domwalk.h: Likewise.
* dse.c: Likewise.
* dwarf2cfi.c: Likewise.
* dwarf2out.c: Likewise.
* dwarf2out.h: Likewise.
* emit-rtl.c: Likewise.
* except.c: Likewise.
* except.h: Likewise.
* expr.c: Likewise.
* expr.h: Likewise.
* final.c: Likewise.
* fold-const.c: Likewise.
* function.c: Likewise.
* function.h: Likewise.
* fwprop.c: Likewise.
* gcc.c: Likewise.
* gcse.c: Likewise.
* genattr.c: Likewise.
* genattrtab.c: Likewise.
* genautomata.c: Likewise.
* genextract.c: Likewise.
* genopinit.c: Likewise
* ggc-common.c: Likewise.
* ggc.h: Likewise.
* gimple-low.c: Likewise.
* gimple-ssa-strength-reduction.c: Likewise.
* gimple-streamer-in.c: Likewise.
* gimple.c: Likewise.
* gimple.h: Likewise.
* gimplify.c: Likewise.
* graph.c: Likewise.
* graphds.c: Likewise.
* graphds.h: Likewise.
* graphite-blocking.c: Likewise.
* graphite-clast-to-gimple.c: Likewise.
* graphite-dependences.c: Likewise.
* graphite-interchange.c: Likewise.
* graphite-optimize-isl.c: Likewise.
* graphite-poly.c: Likewise.
* graphite-poly.h: Likewise.
* graphite-scop-detection.c: Likewise.
* graphite-scop-detection.h: Likewise.
* graphite-sese-to-poly.c: Likewise.
* graphite.c: Likewise.
* godump.c: Likewise.
* haifa-sched.c: Likewise.
* hw-doloop.c: Likewise.
* hw-doloop.h: Likewise.
* ifcvt.c: Likewise.
* insn-addr.h: Likewise.
* ipa-cp.c: Likewise.
* ipa-inline-analysis.c: Likewise.
* ipa-inline-transform.c: Likewise.
* ipa-inline.c: Likewise.
* ipa-inline.h: Likewise.
* ipa-prop.c: Likewise.
* ipa-prop.h: Likewise.
* ipa-pure-const.c: Likewise.
* ipa-ref-inline.h: Likewise.
* ipa-ref.c: Likewise.
* ipa-ref.h: Likewise.
* ipa-reference.c: Likewise.
* ipa-split.c: Likewise.
* ipa-utils.c: Likewise.
* ipa-utils.h: Likewise.
* ipa.c: Likewise.
* ira-build.c: Likewise.
* ira-color.c: Likewise.
* ira-emit.c: Likewise.
* ira-int.h: Likewise.
* ira.c: Likewise.
* loop-invariant.c: Likewise.
* loop-unroll.c: Likewise.
* lower-subreg.c: Likewise.
* lra-lives.c: Likewise.
* lra.c: Likewise.
* lto-cgraph.c: Likewise.
* lto-section-out.c: Likewise.
* lto-streamer-in.c: Likewise.
* lto-streamer-out.c: Likewise.
* lto-streamer.h: Likewise.
* lto-symtab.c: Likewise.
* mcf.c: Likewise.
* modulo-sched.c: Likewise.
* omp-low.c: Likewise.
* opts-common.c: Likewise.
* opts-global.c: Likewise.
* opts.c: Likewise.
* opts.h: Likewise.
* passes.c: Likewise.
* predict.c: Likewise.
* print-tree.c: Likewise.
* profile.c: Likewise.
* profile.h: Likewise.
* read-rtl.c: Likewise.
* ree.c: Likewise.
* reg-stack.c: Likewise.
* regrename.c: Likewise.
* regrename.h: Likewise.
* reload.c: Likewise.
* reload.h: Likewise.
* reload1.c: Likewise.
* rtl.h: Likewise.
* sched-deps.c: Likewise.
* sched-int.h: Likewise.
* sdbout.c: Likewise.
* sel-sched-dump.c: Likewise.
* sel-sched-ir.c: Likewise.
* sel-sched-ir.h: Likewise.
* sel-sched.c: Likewise.
* sese.c: Likewise.
* sese.h: Likewise.
* statistics.h: Likewise.
* stmt.c: Likewise.
* stor-layout.c: Likewise.
* store-motion.c: Likewise.
* tlink.c: Likewise.
* toplev.c: Likewise.
* trans-mem.c: Likewise.
* tree-browser.c: Likewise.
* tree-call-cdce.c: Likewise.
* tree-cfg.c: Likewise.
* tree-cfgcleanup.c: Likewise.
* tree-chrec.c: Likewise.
* tree-chrec.h: Likewise.
* tree-complex.c: Likewise.
* tree-data-ref.c: Likewise.
* tree-data-ref.h: Likewise.
* tree-dfa.c: Likewise.
* tree-diagnostic.c: Likewise.
* tree-dump.c: Likewise.
* tree-eh.c: Likewise.
* tree-emutls.c: Likewise.
* tree-flow.h: Likewise.
* tree-if-conv.c: Likewise.
* tree-inline.c: Likewise.
* tree-inline.h: Likewise.
* tree-into-ssa.c: Likewise.
* tree-iterator.c: Likewise.
* tree-loop-distribution.c: Likewise.
* tree-mudflap.c: Likewise.
* tree-optimize.c: Likewise.
* tree-outof-ssa.c: Likewise.
* tree-parloops.c: Likewise.
* tree-phinodes.c: Likewise.
* tree-predcom.c: Likewise.
* tree-pretty-print.c: Likewise.
* tree-scalar-evolution.c: Likewise.
* tree-sra.c: Likewise.
* tree-ssa-address.c: Likewise.
* tree-ssa-alias.c: Likewise.
* tree-ssa-ccp.c: Likewise.
* tree-ssa-coalesce.c: Likewise.
* tree-ssa-dce.c: Likewise.
* tree-ssa-dom.c: Likewise.
* tree-ssa-forwprop.c: Likewise.
* tree-ssa-live.c: Likewise.
* tree-ssa-live.h: Likewise.
* tree-ssa-loop-im.c: Likewise.
* tree-ssa-loop-ivcanon.c: Likewise.
* tree-ssa-loop-ivopts.c: Likewise.
* tree-ssa-loop-manip.c: Likewise.
* tree-ssa-loop-niter.c: Likewise.
* tree-ssa-loop-prefetch.c: Likewise.
* tree-ssa-math-opts.c: Likewise.
* tree-ssa-operands.c: Likewise.
* tree-ssa-phiopt.c: Likewise.
* tree-ssa-phiprop.c: Likewise.
* tree-ssa-pre.c: Likewise.
* tree-ssa-propagate.c: Likewise.
* tree-ssa-reassoc.c: Likewise.
* tree-ssa-sccvn.c: Likewise.
* tree-ssa-sccvn.h: Likewise.
* tree-ssa-strlen.c: Likewise.
* tree-ssa-structalias.c: Likewise.
* tree-ssa-tail-merge.c: Likewise.
* tree-ssa-threadedge.c: Likewise.
* tree-ssa-threadupdate.c: Likewise.
* tree-ssa-uncprop.c: Likewise.
* tree-ssa-uninit.c: Likewise.
* tree-ssa.c: Likewise.
* tree-ssanames.c: Likewise.
* tree-stdarg.c: Likewise.
* tree-streamer-in.c: Likewise.
* tree-streamer-out.c: Likewise.
* tree-streamer.c: Likewise.
* tree-streamer.h: Likewise.
* tree-switch-conversion.c: Likewise.
* tree-vect-data-refs.c: Likewise.
* tree-vect-generic.c: Likewise.
* tree-vect-loop-manip.c: Likewise.
* tree-vect-loop.c: Likewise.
* tree-vect-patterns.c: Likewise.
* tree-vect-slp.c: Likewise.
* tree-vect-stmts.c: Likewise.
* tree-vectorizer.c: Likewise.
* tree-vectorizer.h: Likewise.
* tree-vrp.c: Likewise.
* tree.c: Likewise.
* tree.h: Likewise.
* value-prof.c: Likewise.
* value-prof.h: Likewise.
* var-tracking.c: Likewise.
* varasm.c: Likewise.
* varpool.c: Likewise.
* vmsdbgout.c: Likewise.
* config/bfin/bfin.c: Likewise.
* config/c6x/c6x.c: Likewise.
* config/darwin.c: Likewise.
* config/i386/i386.c: Likewise.
* config/ia64/ia64.c: Likewise.
* config/mep/mep.c: Likewise.
* config/mips/mips.c: Likewise.
* config/pa/pa.c: Likewise.
* config/rs6000/rs6000-c.c: Likewise.
* config/rs6000/rs6000.c: Likewise.
* config/rx/rx.c: Likewise.
* config/spu/spu-c.c: Likewise.
* config/vms/vms.c: Likewise.
* config/vxworks.c: Likewise.
* config/epiphany/resolve-sw-modes.c: Likewise.
From-SVN: r193595
Diffstat (limited to 'gcc/tree-vect-stmts.c')
-rw-r--r-- | gcc/tree-vect-stmts.c | 527 |
1 files changed, 256 insertions, 271 deletions
diff --git a/gcc/tree-vect-stmts.c b/gcc/tree-vect-stmts.c index 2f4be11..cfe1275 100644 --- a/gcc/tree-vect-stmts.c +++ b/gcc/tree-vect-stmts.c @@ -181,7 +181,7 @@ create_array_ref (tree type, tree ptr, struct data_reference *first_dr) Mark STMT as "relevant for vectorization" and add it to WORKLIST. */ static void -vect_mark_relevant (VEC(gimple,heap) **worklist, gimple stmt, +vect_mark_relevant (vec<gimple> *worklist, gimple stmt, enum vect_relevant relevant, bool live_p, bool used_in_pattern) { @@ -271,7 +271,7 @@ vect_mark_relevant (VEC(gimple,heap) **worklist, gimple stmt, return; } - VEC_safe_push (gimple, heap, *worklist, stmt); + worklist->safe_push (stmt); } @@ -419,7 +419,7 @@ exist_non_indexing_operands_for_use_p (tree use, gimple stmt) static bool process_use (gimple stmt, tree use, loop_vec_info loop_vinfo, bool live_p, - enum vect_relevant relevant, VEC(gimple,heap) **worklist, + enum vect_relevant relevant, vec<gimple> *worklist, bool force) { struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); @@ -575,7 +575,7 @@ process_use (gimple stmt, tree use, loop_vec_info loop_vinfo, bool live_p, bool vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo) { - VEC(gimple,heap) *worklist; + vec<gimple> worklist; struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo); unsigned int nbbs = loop->num_nodes; @@ -593,7 +593,7 @@ vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo) dump_printf_loc (MSG_NOTE, vect_location, "=== vect_mark_stmts_to_be_vectorized ==="); - worklist = VEC_alloc (gimple, heap, 64); + worklist.create (64); /* 1. Init worklist. */ for (i = 0; i < nbbs; i++) @@ -626,12 +626,12 @@ vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo) } /* 2. Process_worklist */ - while (VEC_length (gimple, worklist) > 0) + while (worklist.length () > 0) { use_operand_p use_p; ssa_op_iter iter; - stmt = VEC_pop (gimple, worklist); + stmt = worklist.pop (); if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "worklist: examine stmt: "); @@ -680,7 +680,7 @@ vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo) if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "unsupported use of reduction."); - VEC_free (gimple, heap, worklist); + worklist.release (); return false; } @@ -696,7 +696,7 @@ vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "unsupported use of nested cycle."); - VEC_free (gimple, heap, worklist); + worklist.release (); return false; } @@ -711,7 +711,7 @@ vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "unsupported use of double reduction."); - VEC_free (gimple, heap, worklist); + worklist.release (); return false; } @@ -740,7 +740,7 @@ vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo) || !process_use (stmt, TREE_OPERAND (op, 1), loop_vinfo, live_p, relevant, &worklist, false)) { - VEC_free (gimple, heap, worklist); + worklist.release (); return false; } i = 2; @@ -751,7 +751,7 @@ vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo) if (!process_use (stmt, op, loop_vinfo, live_p, relevant, &worklist, false)) { - VEC_free (gimple, heap, worklist); + worklist.release (); return false; } } @@ -764,7 +764,7 @@ vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo) if (!process_use (stmt, arg, loop_vinfo, live_p, relevant, &worklist, false)) { - VEC_free (gimple, heap, worklist); + worklist.release (); return false; } } @@ -777,7 +777,7 @@ vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo) if (!process_use (stmt, op, loop_vinfo, live_p, relevant, &worklist, false)) { - VEC_free (gimple, heap, worklist); + worklist.release (); return false; } } @@ -790,13 +790,13 @@ vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo) if (!process_use (stmt, off, loop_vinfo, live_p, relevant, &worklist, true)) { - VEC_free (gimple, heap, worklist); + worklist.release (); return false; } } } /* while worklist */ - VEC_free (gimple, heap, worklist); + worklist.release (); return true; } @@ -930,7 +930,7 @@ vect_model_store_cost (stmt_vec_info stmt_info, int ncopies, { if (slp_node) { - first_stmt = VEC_index (gimple, SLP_TREE_SCALAR_STMTS (slp_node), 0); + first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0]; group_size = 1; } else @@ -1552,19 +1552,19 @@ vect_get_vec_def_for_stmt_copy (enum vect_def_type dt, tree vec_oprnd) static void vect_get_vec_defs_for_stmt_copy (enum vect_def_type *dt, - VEC(tree,heap) **vec_oprnds0, - VEC(tree,heap) **vec_oprnds1) + vec<tree> *vec_oprnds0, + vec<tree> *vec_oprnds1) { - tree vec_oprnd = VEC_pop (tree, *vec_oprnds0); + tree vec_oprnd = vec_oprnds0->pop (); vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd); - VEC_quick_push (tree, *vec_oprnds0, vec_oprnd); + vec_oprnds0->quick_push (vec_oprnd); - if (vec_oprnds1 && *vec_oprnds1) + if (vec_oprnds1 && vec_oprnds1->length ()) { - vec_oprnd = VEC_pop (tree, *vec_oprnds1); + vec_oprnd = vec_oprnds1->pop (); vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd); - VEC_quick_push (tree, *vec_oprnds1, vec_oprnd); + vec_oprnds1->quick_push (vec_oprnd); } } @@ -1575,42 +1575,44 @@ vect_get_vec_defs_for_stmt_copy (enum vect_def_type *dt, void vect_get_vec_defs (tree op0, tree op1, gimple stmt, - VEC (tree, heap) **vec_oprnds0, - VEC (tree, heap) **vec_oprnds1, + vec<tree> *vec_oprnds0, + vec<tree> *vec_oprnds1, slp_tree slp_node, int reduc_index) { if (slp_node) { int nops = (op1 == NULL_TREE) ? 1 : 2; - VEC (tree, heap) *ops = VEC_alloc (tree, heap, nops); - VEC (slp_void_p, heap) *vec_defs = VEC_alloc (slp_void_p, heap, nops); + vec<tree> ops; + ops.create (nops); + vec<slp_void_p> vec_defs; + vec_defs.create (nops); - VEC_quick_push (tree, ops, op0); + ops.quick_push (op0); if (op1) - VEC_quick_push (tree, ops, op1); + ops.quick_push (op1); vect_get_slp_defs (ops, slp_node, &vec_defs, reduc_index); - *vec_oprnds0 = (VEC (tree, heap) *) VEC_index (slp_void_p, vec_defs, 0); + *vec_oprnds0 = *((vec<tree> *) vec_defs[0]); if (op1) - *vec_oprnds1 = (VEC (tree, heap) *) VEC_index (slp_void_p, vec_defs, 1); + *vec_oprnds1 = *((vec<tree> *) vec_defs[1]); - VEC_free (tree, heap, ops); - VEC_free (slp_void_p, heap, vec_defs); + ops.release (); + vec_defs.release (); } else { tree vec_oprnd; - *vec_oprnds0 = VEC_alloc (tree, heap, 1); + vec_oprnds0->create (1); vec_oprnd = vect_get_vec_def_for_operand (op0, stmt, NULL); - VEC_quick_push (tree, *vec_oprnds0, vec_oprnd); + vec_oprnds0->quick_push (vec_oprnd); if (op1) { - *vec_oprnds1 = VEC_alloc (tree, heap, 1); + vec_oprnds1->create (1); vec_oprnd = vect_get_vec_def_for_operand (op1, stmt, NULL); - VEC_quick_push (tree, *vec_oprnds1, vec_oprnd); + vec_oprnds1->quick_push (vec_oprnd); } } } @@ -1720,7 +1722,7 @@ vectorizable_call (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt, = {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type}; gimple new_stmt = NULL; int ncopies, j; - VEC(tree, heap) *vargs = NULL; + vec<tree> vargs = vec<tree>(); enum { NARROW, NONE, WIDEN } modifier; size_t i, nargs; tree lhs; @@ -1875,50 +1877,43 @@ vectorizable_call (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt, { /* Build argument list for the vectorized call. */ if (j == 0) - vargs = VEC_alloc (tree, heap, nargs); + vargs.create (nargs); else - VEC_truncate (tree, vargs, 0); + vargs.truncate (0); if (slp_node) { - VEC (slp_void_p, heap) *vec_defs - = VEC_alloc (slp_void_p, heap, nargs); - VEC (tree, heap) *vec_oprnds0; + vec<slp_void_p> vec_defs; + vec_defs.create (nargs); + vec<tree> vec_oprnds0; for (i = 0; i < nargs; i++) - VEC_quick_push (tree, vargs, gimple_call_arg (stmt, i)); + vargs.quick_push (gimple_call_arg (stmt, i)); vect_get_slp_defs (vargs, slp_node, &vec_defs, -1); - vec_oprnds0 - = (VEC (tree, heap) *) VEC_index (slp_void_p, vec_defs, 0); + vec_oprnds0 = *((vec<tree> *) vec_defs[0]); /* Arguments are ready. Create the new vector stmt. */ - FOR_EACH_VEC_ELT (tree, vec_oprnds0, i, vec_oprnd0) + FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_oprnd0) { size_t k; for (k = 0; k < nargs; k++) { - VEC (tree, heap) *vec_oprndsk - = (VEC (tree, heap) *) - VEC_index (slp_void_p, vec_defs, k); - VEC_replace (tree, vargs, k, - VEC_index (tree, vec_oprndsk, i)); + vec<tree> vec_oprndsk = *((vec<tree> *) vec_defs[k]); + vargs[k] = vec_oprndsk[i]; } new_stmt = gimple_build_call_vec (fndecl, vargs); new_temp = make_ssa_name (vec_dest, new_stmt); gimple_call_set_lhs (new_stmt, new_temp); vect_finish_stmt_generation (stmt, new_stmt, gsi); - VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), - new_stmt); + SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt); } for (i = 0; i < nargs; i++) { - VEC (tree, heap) *vec_oprndsi - = (VEC (tree, heap) *) - VEC_index (slp_void_p, vec_defs, i); - VEC_free (tree, heap, vec_oprndsi); + vec<tree> vec_oprndsi = *((vec<tree> *) vec_defs[i]); + vec_oprndsi.release (); } - VEC_free (slp_void_p, heap, vec_defs); + vec_defs.release (); continue; } @@ -1935,7 +1930,7 @@ vectorizable_call (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt, = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0); } - VEC_quick_push (tree, vargs, vec_oprnd0); + vargs.quick_push (vec_oprnd0); } new_stmt = gimple_build_call_vec (fndecl, vargs); @@ -1958,54 +1953,45 @@ vectorizable_call (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt, { /* Build argument list for the vectorized call. */ if (j == 0) - vargs = VEC_alloc (tree, heap, nargs * 2); + vargs.create (nargs * 2); else - VEC_truncate (tree, vargs, 0); + vargs.truncate (0); if (slp_node) { - VEC (slp_void_p, heap) *vec_defs - = VEC_alloc (slp_void_p, heap, nargs); - VEC (tree, heap) *vec_oprnds0; + vec<slp_void_p> vec_defs; + vec_defs.create (nargs); + vec<tree> vec_oprnds0; for (i = 0; i < nargs; i++) - VEC_quick_push (tree, vargs, gimple_call_arg (stmt, i)); + vargs.quick_push (gimple_call_arg (stmt, i)); vect_get_slp_defs (vargs, slp_node, &vec_defs, -1); - vec_oprnds0 - = (VEC (tree, heap) *) VEC_index (slp_void_p, vec_defs, 0); + vec_oprnds0 = *((vec<tree> *) vec_defs[0]); /* Arguments are ready. Create the new vector stmt. */ - for (i = 0; VEC_iterate (tree, vec_oprnds0, i, vec_oprnd0); - i += 2) + for (i = 0; vec_oprnds0.iterate (i, &vec_oprnd0); i += 2) { size_t k; - VEC_truncate (tree, vargs, 0); + vargs.truncate (0); for (k = 0; k < nargs; k++) { - VEC (tree, heap) *vec_oprndsk - = (VEC (tree, heap) *) - VEC_index (slp_void_p, vec_defs, k); - VEC_quick_push (tree, vargs, - VEC_index (tree, vec_oprndsk, i)); - VEC_quick_push (tree, vargs, - VEC_index (tree, vec_oprndsk, i + 1)); + vec<tree> vec_oprndsk = *((vec<tree> *) vec_defs[k]); + vargs.quick_push (vec_oprndsk[i]); + vargs.quick_push (vec_oprndsk[i + 1]); } new_stmt = gimple_build_call_vec (fndecl, vargs); new_temp = make_ssa_name (vec_dest, new_stmt); gimple_call_set_lhs (new_stmt, new_temp); vect_finish_stmt_generation (stmt, new_stmt, gsi); - VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), - new_stmt); + SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt); } for (i = 0; i < nargs; i++) { - VEC (tree, heap) *vec_oprndsi - = (VEC (tree, heap) *) - VEC_index (slp_void_p, vec_defs, i); - VEC_free (tree, heap, vec_oprndsi); + vec<tree> vec_oprndsi = *((vec<tree> *) vec_defs[i]); + vec_oprndsi.release (); } - VEC_free (slp_void_p, heap, vec_defs); + vec_defs.release (); continue; } @@ -2028,8 +2014,8 @@ vectorizable_call (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt, = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0); } - VEC_quick_push (tree, vargs, vec_oprnd0); - VEC_quick_push (tree, vargs, vec_oprnd1); + vargs.quick_push (vec_oprnd0); + vargs.quick_push (vec_oprnd1); } new_stmt = gimple_build_call_vec (fndecl, vargs); @@ -2054,7 +2040,7 @@ vectorizable_call (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt, return false; } - VEC_free (tree, heap, vargs); + vargs.release (); /* Update the exception handling table with the vector stmt if necessary. */ if (maybe_clean_or_replace_eh_stmt (stmt, *vec_stmt)) @@ -2140,7 +2126,7 @@ vect_gen_widened_results_half (enum tree_code code, static void vect_get_loop_based_defs (tree *oprnd, gimple stmt, enum vect_def_type dt, - VEC (tree, heap) **vec_oprnds, int multi_step_cvt) + vec<tree> *vec_oprnds, int multi_step_cvt) { tree vec_oprnd; @@ -2152,11 +2138,11 @@ vect_get_loop_based_defs (tree *oprnd, gimple stmt, enum vect_def_type dt, else vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, *oprnd); - VEC_quick_push (tree, *vec_oprnds, vec_oprnd); + vec_oprnds->quick_push (vec_oprnd); /* Get second vector operand. */ vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd); - VEC_quick_push (tree, *vec_oprnds, vec_oprnd); + vec_oprnds->quick_push (vec_oprnd); *oprnd = vec_oprnd; @@ -2172,9 +2158,9 @@ vect_get_loop_based_defs (tree *oprnd, gimple stmt, enum vect_def_type dt, recursively. */ static void -vect_create_vectorized_demotion_stmts (VEC (tree, heap) **vec_oprnds, +vect_create_vectorized_demotion_stmts (vec<tree> *vec_oprnds, int multi_step_cvt, gimple stmt, - VEC (tree, heap) *vec_dsts, + vec<tree> vec_dsts, gimple_stmt_iterator *gsi, slp_tree slp_node, enum tree_code code, stmt_vec_info *prev_stmt_info) @@ -2184,13 +2170,13 @@ vect_create_vectorized_demotion_stmts (VEC (tree, heap) **vec_oprnds, gimple new_stmt; stmt_vec_info stmt_info = vinfo_for_stmt (stmt); - vec_dest = VEC_pop (tree, vec_dsts); + vec_dest = vec_dsts.pop (); - for (i = 0; i < VEC_length (tree, *vec_oprnds); i += 2) + for (i = 0; i < vec_oprnds->length (); i += 2) { /* Create demotion operation. */ - vop0 = VEC_index (tree, *vec_oprnds, i); - vop1 = VEC_index (tree, *vec_oprnds, i + 1); + vop0 = (*vec_oprnds)[i]; + vop1 = (*vec_oprnds)[i + 1]; new_stmt = gimple_build_assign_with_ops (code, vec_dest, vop0, vop1); new_tmp = make_ssa_name (vec_dest, new_stmt); gimple_assign_set_lhs (new_stmt, new_tmp); @@ -2198,14 +2184,14 @@ vect_create_vectorized_demotion_stmts (VEC (tree, heap) **vec_oprnds, if (multi_step_cvt) /* Store the resulting vector for next recursive call. */ - VEC_replace (tree, *vec_oprnds, i/2, new_tmp); + (*vec_oprnds)[i/2] = new_tmp; else { /* This is the last step of the conversion sequence. Store the vectors in SLP_NODE or in vector info of the scalar statement (or in STMT_VINFO_RELATED_STMT chain). */ if (slp_node) - VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt); + SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt); else { if (!*prev_stmt_info) @@ -2226,14 +2212,14 @@ vect_create_vectorized_demotion_stmts (VEC (tree, heap) **vec_oprnds, { /* At each level of recursion we have half of the operands we had at the previous level. */ - VEC_truncate (tree, *vec_oprnds, (i+1)/2); + vec_oprnds->truncate ((i+1)/2); vect_create_vectorized_demotion_stmts (vec_oprnds, multi_step_cvt - 1, stmt, vec_dsts, gsi, slp_node, VEC_PACK_TRUNC_EXPR, prev_stmt_info); } - VEC_quick_push (tree, vec_dsts, vec_dest); + vec_dsts.quick_push (vec_dest); } @@ -2242,8 +2228,8 @@ vect_create_vectorized_demotion_stmts (VEC (tree, heap) **vec_oprnds, the resulting vectors and call the function recursively. */ static void -vect_create_vectorized_promotion_stmts (VEC (tree, heap) **vec_oprnds0, - VEC (tree, heap) **vec_oprnds1, +vect_create_vectorized_promotion_stmts (vec<tree> *vec_oprnds0, + vec<tree> *vec_oprnds1, gimple stmt, tree vec_dest, gimple_stmt_iterator *gsi, enum tree_code code1, @@ -2253,13 +2239,13 @@ vect_create_vectorized_promotion_stmts (VEC (tree, heap) **vec_oprnds0, int i; tree vop0, vop1, new_tmp1, new_tmp2; gimple new_stmt1, new_stmt2; - VEC (tree, heap) *vec_tmp = NULL; + vec<tree> vec_tmp = vec<tree>(); - vec_tmp = VEC_alloc (tree, heap, VEC_length (tree, *vec_oprnds0) * 2); - FOR_EACH_VEC_ELT (tree, *vec_oprnds0, i, vop0) + vec_tmp.create (vec_oprnds0->length () * 2); + FOR_EACH_VEC_ELT (*vec_oprnds0, i, vop0) { if (op_type == binary_op) - vop1 = VEC_index (tree, *vec_oprnds1, i); + vop1 = (*vec_oprnds1)[i]; else vop1 = NULL_TREE; @@ -2280,11 +2266,11 @@ vect_create_vectorized_promotion_stmts (VEC (tree, heap) **vec_oprnds0, } /* Store the results for the next step. */ - VEC_quick_push (tree, vec_tmp, new_tmp1); - VEC_quick_push (tree, vec_tmp, new_tmp2); + vec_tmp.quick_push (new_tmp1); + vec_tmp.quick_push (new_tmp2); } - VEC_free (tree, heap, *vec_oprnds0); + vec_oprnds0->truncate (0); *vec_oprnds0 = vec_tmp; } @@ -2319,11 +2305,13 @@ vectorizable_conversion (gimple stmt, gimple_stmt_iterator *gsi, int ncopies, i, j; tree lhs_type, rhs_type; enum { NARROW, NONE, WIDEN } modifier; - VEC (tree,heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL; + vec<tree> vec_oprnds0 = vec<tree>(); + vec<tree> vec_oprnds1 = vec<tree>(); tree vop0; bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info); int multi_step_cvt = 0; - VEC (tree, heap) *vec_dsts = NULL, *interm_types = NULL; + vec<tree> vec_dsts = vec<tree>(); + vec<tree> interm_types = vec<tree>(); tree last_oprnd, intermediate_type, cvt_type = NULL_TREE; int op_type; enum machine_mode rhs_mode; @@ -2527,7 +2515,7 @@ vectorizable_conversion (gimple stmt, gimple_stmt_iterator *gsi, else { multi_step_cvt++; - VEC_safe_push (tree, heap, interm_types, cvt_type); + interm_types.safe_push (cvt_type); cvt_type = NULL_TREE; } break; @@ -2583,7 +2571,7 @@ vectorizable_conversion (gimple stmt, gimple_stmt_iterator *gsi, STMT_VINFO_TYPE (stmt_info) = type_promotion_vec_info_type; vect_model_promotion_demotion_cost (stmt_info, dt, multi_step_cvt); } - VEC_free (tree, heap, interm_types); + interm_types.release (); return true; } @@ -2605,20 +2593,20 @@ vectorizable_conversion (gimple stmt, gimple_stmt_iterator *gsi, We create vector destinations for the intermediate type (TYPES) received from supportable_*_operation, and store them in the correct order for future use in vect_create_vectorized_*_stmts (). */ - vec_dsts = VEC_alloc (tree, heap, multi_step_cvt + 1); + vec_dsts.create (multi_step_cvt + 1); vec_dest = vect_create_destination_var (scalar_dest, (cvt_type && modifier == WIDEN) ? cvt_type : vectype_out); - VEC_quick_push (tree, vec_dsts, vec_dest); + vec_dsts.quick_push (vec_dest); if (multi_step_cvt) { - for (i = VEC_length (tree, interm_types) - 1; - VEC_iterate (tree, interm_types, i, intermediate_type); i--) + for (i = interm_types.length () - 1; + interm_types.iterate (i, &intermediate_type); i--) { vec_dest = vect_create_destination_var (scalar_dest, intermediate_type); - VEC_quick_push (tree, vec_dsts, vec_dest); + vec_dsts.quick_push (vec_dest); } } @@ -2630,22 +2618,19 @@ vectorizable_conversion (gimple stmt, gimple_stmt_iterator *gsi, if (!slp_node) { if (modifier == NONE) - vec_oprnds0 = VEC_alloc (tree, heap, 1); + vec_oprnds0.create (1); else if (modifier == WIDEN) { - vec_oprnds0 = VEC_alloc (tree, heap, - (multi_step_cvt - ? vect_pow2 (multi_step_cvt) : 1)); + vec_oprnds0.create (multi_step_cvt ? vect_pow2(multi_step_cvt) : 1); if (op_type == binary_op) - vec_oprnds1 = VEC_alloc (tree, heap, 1); + vec_oprnds1.create (1); } else - vec_oprnds0 = VEC_alloc (tree, heap, - 2 * (multi_step_cvt - ? vect_pow2 (multi_step_cvt) : 1)); + vec_oprnds0.create ( + 2 * (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1)); } else if (code == WIDEN_LSHIFT_EXPR) - vec_oprnds1 = VEC_alloc (tree, heap, slp_node->vec_stmts_size); + vec_oprnds1.create (slp_node->vec_stmts_size); last_oprnd = op0; prev_stmt_info = NULL; @@ -2660,7 +2645,7 @@ vectorizable_conversion (gimple stmt, gimple_stmt_iterator *gsi, else vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, NULL); - FOR_EACH_VEC_ELT (tree, vec_oprnds0, i, vop0) + FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0) { /* Arguments are ready, create the new vector stmt. */ if (code1 == CALL_EXPR) @@ -2680,8 +2665,7 @@ vectorizable_conversion (gimple stmt, gimple_stmt_iterator *gsi, vect_finish_stmt_generation (stmt, new_stmt, gsi); if (slp_node) - VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), - new_stmt); + SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt); } if (j == 0) @@ -2713,7 +2697,7 @@ vectorizable_conversion (gimple stmt, gimple_stmt_iterator *gsi, for SLP_NODE. We check during the analysis that all the shift arguments are the same. */ for (k = 0; k < slp_node->vec_stmts_size - 1; k++) - VEC_quick_push (tree, vec_oprnds1, vec_oprnd1); + vec_oprnds1.quick_push (vec_oprnd1); vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL, slp_node, -1); @@ -2725,7 +2709,7 @@ vectorizable_conversion (gimple stmt, gimple_stmt_iterator *gsi, else { vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL); - VEC_quick_push (tree, vec_oprnds0, vec_oprnd0); + vec_oprnds0.quick_push (vec_oprnd0); if (op_type == binary_op) { if (code == WIDEN_LSHIFT_EXPR) @@ -2733,15 +2717,15 @@ vectorizable_conversion (gimple stmt, gimple_stmt_iterator *gsi, else vec_oprnd1 = vect_get_vec_def_for_operand (op1, stmt, NULL); - VEC_quick_push (tree, vec_oprnds1, vec_oprnd1); + vec_oprnds1.quick_push (vec_oprnd1); } } } else { vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0); - VEC_truncate (tree, vec_oprnds0, 0); - VEC_quick_push (tree, vec_oprnds0, vec_oprnd0); + vec_oprnds0.truncate (0); + vec_oprnds0.quick_push (vec_oprnd0); if (op_type == binary_op) { if (code == WIDEN_LSHIFT_EXPR) @@ -2749,15 +2733,15 @@ vectorizable_conversion (gimple stmt, gimple_stmt_iterator *gsi, else vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd1); - VEC_truncate (tree, vec_oprnds1, 0); - VEC_quick_push (tree, vec_oprnds1, vec_oprnd1); + vec_oprnds1.truncate (0); + vec_oprnds1.quick_push (vec_oprnd1); } } /* Arguments are ready. Create the new vector stmts. */ for (i = multi_step_cvt; i >= 0; i--) { - tree this_dest = VEC_index (tree, vec_dsts, i); + tree this_dest = vec_dsts[i]; enum tree_code c1 = code1, c2 = code2; if (i == 0 && codecvt2 != ERROR_MARK) { @@ -2771,7 +2755,7 @@ vectorizable_conversion (gimple stmt, gimple_stmt_iterator *gsi, op_type); } - FOR_EACH_VEC_ELT (tree, vec_oprnds0, i, vop0) + FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0) { if (cvt_type) { @@ -2796,8 +2780,7 @@ vectorizable_conversion (gimple stmt, gimple_stmt_iterator *gsi, new_stmt = SSA_NAME_DEF_STMT (vop0); if (slp_node) - VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), - new_stmt); + SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt); else { if (!prev_stmt_info) @@ -2825,14 +2808,14 @@ vectorizable_conversion (gimple stmt, gimple_stmt_iterator *gsi, slp_node, -1); else { - VEC_truncate (tree, vec_oprnds0, 0); + vec_oprnds0.truncate (0); vect_get_loop_based_defs (&last_oprnd, stmt, dt[0], &vec_oprnds0, vect_pow2 (multi_step_cvt) - 1); } /* Arguments are ready. Create the new vector stmts. */ if (cvt_type) - FOR_EACH_VEC_ELT (tree, vec_oprnds0, i, vop0) + FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0) { if (codecvt1 == CALL_EXPR) { @@ -2849,7 +2832,7 @@ vectorizable_conversion (gimple stmt, gimple_stmt_iterator *gsi, } vect_finish_stmt_generation (stmt, new_stmt, gsi); - VEC_replace (tree, vec_oprnds0, i, new_temp); + vec_oprnds0[i] = new_temp; } vect_create_vectorized_demotion_stmts (&vec_oprnds0, multi_step_cvt, @@ -2862,10 +2845,10 @@ vectorizable_conversion (gimple stmt, gimple_stmt_iterator *gsi, break; } - VEC_free (tree, heap, vec_oprnds0); - VEC_free (tree, heap, vec_oprnds1); - VEC_free (tree, heap, vec_dsts); - VEC_free (tree, heap, interm_types); + vec_oprnds0.release (); + vec_oprnds1.release (); + vec_dsts.release (); + interm_types.release (); return true; } @@ -2895,7 +2878,7 @@ vectorizable_assignment (gimple stmt, gimple_stmt_iterator *gsi, unsigned int nunits = TYPE_VECTOR_SUBPARTS (vectype); int ncopies; int i, j; - VEC(tree,heap) *vec_oprnds = NULL; + vec<tree> vec_oprnds = vec<tree>(); tree vop; bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info); gimple new_stmt = NULL; @@ -3004,7 +2987,7 @@ vectorizable_assignment (gimple stmt, gimple_stmt_iterator *gsi, vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds, NULL); /* Arguments are ready. create the new vector stmt. */ - FOR_EACH_VEC_ELT (tree, vec_oprnds, i, vop) + FOR_EACH_VEC_ELT (vec_oprnds, i, vop) { if (CONVERT_EXPR_CODE_P (code) || code == VIEW_CONVERT_EXPR) @@ -3014,7 +2997,7 @@ vectorizable_assignment (gimple stmt, gimple_stmt_iterator *gsi, gimple_assign_set_lhs (new_stmt, new_temp); vect_finish_stmt_generation (stmt, new_stmt, gsi); if (slp_node) - VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt); + SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt); } if (slp_node) @@ -3028,7 +3011,7 @@ vectorizable_assignment (gimple stmt, gimple_stmt_iterator *gsi, prev_stmt_info = vinfo_for_stmt (new_stmt); } - VEC_free (tree, heap, vec_oprnds); + vec_oprnds.release (); return true; } @@ -3104,7 +3087,8 @@ vectorizable_shift (gimple stmt, gimple_stmt_iterator *gsi, tree op1_vectype; int ncopies; int j, i; - VEC (tree, heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL; + vec<tree> vec_oprnds0 = vec<tree>(); + vec<tree> vec_oprnds1 = vec<tree>(); tree vop0, vop1; unsigned int k; bool scalar_shift_arg = true; @@ -3208,10 +3192,10 @@ vectorizable_shift (gimple stmt, gimple_stmt_iterator *gsi, a scalar shift. */ if (slp_node) { - VEC (gimple, heap) *stmts = SLP_TREE_SCALAR_STMTS (slp_node); + vec<gimple> stmts = SLP_TREE_SCALAR_STMTS (slp_node); gimple slpstmt; - FOR_EACH_VEC_ELT (gimple, stmts, k, slpstmt) + FOR_EACH_VEC_ELT (stmts, k, slpstmt) if (!operand_equal_p (gimple_assign_rhs2 (slpstmt), op1, 0)) scalar_shift_arg = false; } @@ -3361,11 +3345,11 @@ vectorizable_shift (gimple stmt, gimple_stmt_iterator *gsi, allocate VEC_OPRNDS1 only in case of binary operation. */ if (!slp_node) { - vec_oprnds0 = VEC_alloc (tree, heap, 1); - vec_oprnds1 = VEC_alloc (tree, heap, 1); + vec_oprnds0.create (1); + vec_oprnds1.create (1); } else if (scalar_shift_arg) - vec_oprnds1 = VEC_alloc (tree, heap, slp_node->vec_stmts_size); + vec_oprnds1.create (slp_node->vec_stmts_size); prev_stmt_info = NULL; for (j = 0; j < ncopies; j++) @@ -3386,7 +3370,7 @@ vectorizable_shift (gimple stmt, gimple_stmt_iterator *gsi, dump_printf_loc (MSG_NOTE, vect_location, "operand 1 using scalar mode."); vec_oprnd1 = op1; - VEC_quick_push (tree, vec_oprnds1, vec_oprnd1); + vec_oprnds1.quick_push (vec_oprnd1); if (slp_node) { /* Store vec_oprnd1 for every vector stmt to be created @@ -3395,7 +3379,7 @@ vectorizable_shift (gimple stmt, gimple_stmt_iterator *gsi, TODO: Allow different constants for different vector stmts generated for an SLP instance. */ for (k = 0; k < slp_node->vec_stmts_size - 1; k++) - VEC_quick_push (tree, vec_oprnds1, vec_oprnd1); + vec_oprnds1.quick_push (vec_oprnd1); } } } @@ -3414,15 +3398,15 @@ vectorizable_shift (gimple stmt, gimple_stmt_iterator *gsi, vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1); /* Arguments are ready. Create the new vector stmt. */ - FOR_EACH_VEC_ELT (tree, vec_oprnds0, i, vop0) + FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0) { - vop1 = VEC_index (tree, vec_oprnds1, i); + vop1 = vec_oprnds1[i]; new_stmt = gimple_build_assign_with_ops (code, vec_dest, vop0, vop1); new_temp = make_ssa_name (vec_dest, new_stmt); gimple_assign_set_lhs (new_stmt, new_temp); vect_finish_stmt_generation (stmt, new_stmt, gsi); if (slp_node) - VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt); + SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt); } if (slp_node) @@ -3435,8 +3419,8 @@ vectorizable_shift (gimple stmt, gimple_stmt_iterator *gsi, prev_stmt_info = vinfo_for_stmt (new_stmt); } - VEC_free (tree, heap, vec_oprnds0); - VEC_free (tree, heap, vec_oprnds1); + vec_oprnds0.release (); + vec_oprnds1.release (); return true; } @@ -3481,7 +3465,9 @@ vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi, tree vectype_out; int ncopies; int j, i; - VEC(tree,heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL, *vec_oprnds2 = NULL; + vec<tree> vec_oprnds0 = vec<tree>(); + vec<tree> vec_oprnds1 = vec<tree>(); + vec<tree> vec_oprnds2 = vec<tree>(); tree vop0, vop1, vop2; bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info); int vf; @@ -3746,9 +3732,10 @@ vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi, slp_node, -1); if (op_type == ternary_op) { - vec_oprnds2 = VEC_alloc (tree, heap, 1); - VEC_quick_push (tree, vec_oprnds2, - vect_get_vec_def_for_operand (op2, stmt, NULL)); + vec_oprnds2.create (1); + vec_oprnds2.quick_push (vect_get_vec_def_for_operand (op2, + stmt, + NULL)); } } else @@ -3756,27 +3743,26 @@ vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi, vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1); if (op_type == ternary_op) { - tree vec_oprnd = VEC_pop (tree, vec_oprnds2); - VEC_quick_push (tree, vec_oprnds2, - vect_get_vec_def_for_stmt_copy (dt[2], - vec_oprnd)); + tree vec_oprnd = vec_oprnds2.pop (); + vec_oprnds2.quick_push (vect_get_vec_def_for_stmt_copy (dt[2], + vec_oprnd)); } } /* Arguments are ready. Create the new vector stmt. */ - FOR_EACH_VEC_ELT (tree, vec_oprnds0, i, vop0) + FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0) { vop1 = ((op_type == binary_op || op_type == ternary_op) - ? VEC_index (tree, vec_oprnds1, i) : NULL_TREE); + ? vec_oprnds1[i] : NULL_TREE); vop2 = ((op_type == ternary_op) - ? VEC_index (tree, vec_oprnds2, i) : NULL_TREE); + ? vec_oprnds2[i] : NULL_TREE); new_stmt = gimple_build_assign_with_ops (code, vec_dest, vop0, vop1, vop2); new_temp = make_ssa_name (vec_dest, new_stmt); gimple_assign_set_lhs (new_stmt, new_temp); vect_finish_stmt_generation (stmt, new_stmt, gsi); if (slp_node) - VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt); + SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt); } if (slp_node) @@ -3789,11 +3775,9 @@ vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi, prev_stmt_info = vinfo_for_stmt (new_stmt); } - VEC_free (tree, heap, vec_oprnds0); - if (vec_oprnds1) - VEC_free (tree, heap, vec_oprnds1); - if (vec_oprnds2) - VEC_free (tree, heap, vec_oprnds2); + vec_oprnds0.release (); + vec_oprnds1.release (); + vec_oprnds2.release (); return true; } @@ -3836,9 +3820,11 @@ vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt, bool grouped_store = false; bool store_lanes_p = false; unsigned int group_size, i; - VEC(tree,heap) *dr_chain = NULL, *oprnds = NULL, *result_chain = NULL; + vec<tree> dr_chain = vec<tree>(); + vec<tree> oprnds = vec<tree>(); + vec<tree> result_chain = vec<tree>(); bool inv_p; - VEC(tree,heap) *vec_oprnds = NULL; + vec<tree> vec_oprnds = vec<tree>(); bool slp = (slp_node != NULL); unsigned int vec_num; bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info); @@ -3992,7 +3978,7 @@ vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt, /* VEC_NUM is the number of vect stmts to be created for this group. */ vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node); - first_stmt = VEC_index (gimple, SLP_TREE_SCALAR_STMTS (slp_node), 0); + first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0]; first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt)); op = gimple_assign_rhs1 (first_stmt); } @@ -4012,8 +3998,8 @@ vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt, dump_printf_loc (MSG_NOTE, vect_location, "transform store. ncopies = %d", ncopies); - dr_chain = VEC_alloc (tree, heap, group_size); - oprnds = VEC_alloc (tree, heap, group_size); + dr_chain.create (group_size); + oprnds.create (group_size); alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false); gcc_assert (alignment_support_scheme); @@ -4081,7 +4067,7 @@ vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt, vect_get_vec_defs (op, NULL_TREE, stmt, &vec_oprnds, NULL, slp_node, -1); - vec_oprnd = VEC_index (tree, vec_oprnds, 0); + vec_oprnd = vec_oprnds[0]; } else { @@ -4106,8 +4092,8 @@ vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt, vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt, NULL); - VEC_quick_push(tree, dr_chain, vec_oprnd); - VEC_quick_push(tree, oprnds, vec_oprnd); + dr_chain.quick_push (vec_oprnd); + oprnds.quick_push (vec_oprnd); next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt)); } } @@ -4131,12 +4117,12 @@ vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt, OPRNDS are of size 1. */ for (i = 0; i < group_size; i++) { - op = VEC_index (tree, oprnds, i); + op = oprnds[i]; vect_is_simple_use (op, NULL, loop_vinfo, bb_vinfo, &def_stmt, &def, &dt); vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, op); - VEC_replace(tree, dr_chain, i, vec_oprnd); - VEC_replace(tree, oprnds, i, vec_oprnd); + dr_chain[i] = vec_oprnd; + oprnds[i] = vec_oprnd; } dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt, TYPE_SIZE_UNIT (aggr_type)); @@ -4150,7 +4136,7 @@ vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt, vec_array = create_vector_array (vectype, vec_num); for (i = 0; i < vec_num; i++) { - vec_oprnd = VEC_index (tree, dr_chain, i); + vec_oprnd = dr_chain[i]; write_vector_array (stmt, gsi, vec_oprnd, vec_array, i); } @@ -4166,7 +4152,7 @@ vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt, new_stmt = NULL; if (grouped_store) { - result_chain = VEC_alloc (tree, heap, group_size); + result_chain.create (group_size); /* Permute. */ vect_permute_store_chain (dr_chain, group_size, stmt, gsi, &result_chain); @@ -4183,11 +4169,11 @@ vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt, stmt, NULL_TREE); if (slp) - vec_oprnd = VEC_index (tree, vec_oprnds, i); + vec_oprnd = vec_oprnds[i]; else if (grouped_store) /* For grouped stores vectorized defs are interleaved in vect_permute_store_chain(). */ - vec_oprnd = VEC_index (tree, result_chain, i); + vec_oprnd = result_chain[i]; data_ref = build2 (MEM_REF, TREE_TYPE (vec_oprnd), dataref_ptr, build_int_cst (reference_alias_ptr_type @@ -4235,12 +4221,10 @@ vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt, } } - VEC_free (tree, heap, dr_chain); - VEC_free (tree, heap, oprnds); - if (result_chain) - VEC_free (tree, heap, result_chain); - if (vec_oprnds) - VEC_free (tree, heap, vec_oprnds); + dr_chain.release (); + oprnds.release (); + result_chain.release (); + vec_oprnds.release (); return true; } @@ -4353,7 +4337,7 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt, tree offset = NULL_TREE; tree realignment_token = NULL_TREE; gimple phi = NULL; - VEC(tree,heap) *dr_chain = NULL; + vec<tree> dr_chain = vec<tree>(); bool grouped_load = false; bool load_lanes_p = false; gimple first_stmt; @@ -4694,7 +4678,7 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt, tree ref = DR_REF (dr); tree ivstep; tree running_off; - VEC(constructor_elt, gc) *v = NULL; + vec<constructor_elt, va_gc> *v = NULL; gimple_seq stmts = NULL; gcc_assert (stride_base && stride_step); @@ -4737,7 +4721,7 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt, { tree vec_inv; - v = VEC_alloc (constructor_elt, gc, nunits); + vec_alloc (v, nunits); for (i = 0; i < nunits; i++) { tree newref, newoff; @@ -4791,9 +4775,9 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt, { first_stmt = GROUP_FIRST_ELEMENT (stmt_info); if (slp - && !SLP_INSTANCE_LOAD_PERMUTATION (slp_node_instance) - && first_stmt != VEC_index (gimple, SLP_TREE_SCALAR_STMTS (slp_node), 0)) - first_stmt = VEC_index (gimple, SLP_TREE_SCALAR_STMTS (slp_node), 0); + && !SLP_INSTANCE_LOAD_PERMUTATION (slp_node_instance).exists () + && first_stmt != SLP_TREE_SCALAR_STMTS (slp_node)[0]) + first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0]; /* Check if the chain of loads is already vectorized. */ if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt))) @@ -4809,7 +4793,7 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt, { grouped_load = false; vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node); - if (SLP_INSTANCE_LOAD_PERMUTATION (slp_node_instance)) + if (SLP_INSTANCE_LOAD_PERMUTATION (slp_node_instance).exists ()) slp_perm = true; } else @@ -4978,7 +4962,7 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt, TYPE_SIZE_UNIT (aggr_type)); if (grouped_load || slp_perm) - dr_chain = VEC_alloc (tree, heap, vec_num); + dr_chain.create (vec_num); if (load_lanes_p) { @@ -4998,7 +4982,7 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt, { new_temp = read_vector_array (stmt, gsi, scalar_dest, vec_array, i); - VEC_quick_push (tree, dr_chain, new_temp); + dr_chain.quick_push (new_temp); } /* Record the mapping between SSA_NAMEs and statements. */ @@ -5173,12 +5157,11 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt, /* Collect vector loads and later create their permutation in vect_transform_grouped_load (). */ if (grouped_load || slp_perm) - VEC_quick_push (tree, dr_chain, new_temp); + dr_chain.quick_push (new_temp); /* Store vector loads in the corresponding SLP_NODE. */ if (slp && !slp_perm) - VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), - new_stmt); + SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt); } } @@ -5190,7 +5173,7 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt, if (!vect_transform_slp_perm_load (stmt, dr_chain, gsi, vf, slp_node_instance, false)) { - VEC_free (tree, heap, dr_chain); + dr_chain.release (); return false; } } @@ -5211,8 +5194,7 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt, prev_stmt_info = vinfo_for_stmt (new_stmt); } } - if (dr_chain) - VEC_free (tree, heap, dr_chain); + dr_chain.release (); } return true; @@ -5308,8 +5290,10 @@ vectorizable_condition (gimple stmt, gimple_stmt_iterator *gsi, stmt_vec_info prev_stmt_info = NULL; int i, j; bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info); - VEC (tree, heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL; - VEC (tree, heap) *vec_oprnds2 = NULL, *vec_oprnds3 = NULL; + vec<tree> vec_oprnds0 = vec<tree>(); + vec<tree> vec_oprnds1 = vec<tree>(); + vec<tree> vec_oprnds2 = vec<tree>(); + vec<tree> vec_oprnds3 = vec<tree>(); tree vec_cmp_type = vectype; if (slp_node || PURE_SLP_STMT (stmt_info)) @@ -5402,10 +5386,10 @@ vectorizable_condition (gimple stmt, gimple_stmt_iterator *gsi, if (!slp_node) { - vec_oprnds0 = VEC_alloc (tree, heap, 1); - vec_oprnds1 = VEC_alloc (tree, heap, 1); - vec_oprnds2 = VEC_alloc (tree, heap, 1); - vec_oprnds3 = VEC_alloc (tree, heap, 1); + vec_oprnds0.create (1); + vec_oprnds1.create (1); + vec_oprnds2.create (1); + vec_oprnds3.create (1); } /* Handle def. */ @@ -5420,22 +5404,23 @@ vectorizable_condition (gimple stmt, gimple_stmt_iterator *gsi, { if (slp_node) { - VEC (tree, heap) *ops = VEC_alloc (tree, heap, 4); - VEC (slp_void_p, heap) *vec_defs; - - vec_defs = VEC_alloc (slp_void_p, heap, 4); - VEC_safe_push (tree, heap, ops, TREE_OPERAND (cond_expr, 0)); - VEC_safe_push (tree, heap, ops, TREE_OPERAND (cond_expr, 1)); - VEC_safe_push (tree, heap, ops, then_clause); - VEC_safe_push (tree, heap, ops, else_clause); + vec<tree> ops; + ops.create (4); + vec<slp_void_p> vec_defs; + + vec_defs.create (4); + ops.safe_push (TREE_OPERAND (cond_expr, 0)); + ops.safe_push (TREE_OPERAND (cond_expr, 1)); + ops.safe_push (then_clause); + ops.safe_push (else_clause); vect_get_slp_defs (ops, slp_node, &vec_defs, -1); - vec_oprnds3 = (VEC (tree, heap) *) VEC_pop (slp_void_p, vec_defs); - vec_oprnds2 = (VEC (tree, heap) *) VEC_pop (slp_void_p, vec_defs); - vec_oprnds1 = (VEC (tree, heap) *) VEC_pop (slp_void_p, vec_defs); - vec_oprnds0 = (VEC (tree, heap) *) VEC_pop (slp_void_p, vec_defs); + vec_oprnds3 = *((vec<tree> *) vec_defs.pop ()); + vec_oprnds2 = *((vec<tree> *) vec_defs.pop ()); + vec_oprnds1 = *((vec<tree> *) vec_defs.pop ()); + vec_oprnds0 = *((vec<tree> *) vec_defs.pop ()); - VEC_free (tree, heap, ops); - VEC_free (slp_void_p, heap, vec_defs); + ops.release (); + vec_defs.release (); } else { @@ -5474,29 +5459,29 @@ vectorizable_condition (gimple stmt, gimple_stmt_iterator *gsi, else { vec_cond_lhs = vect_get_vec_def_for_stmt_copy (dts[0], - VEC_pop (tree, vec_oprnds0)); + vec_oprnds0.pop ()); vec_cond_rhs = vect_get_vec_def_for_stmt_copy (dts[1], - VEC_pop (tree, vec_oprnds1)); + vec_oprnds1.pop ()); vec_then_clause = vect_get_vec_def_for_stmt_copy (dts[2], - VEC_pop (tree, vec_oprnds2)); + vec_oprnds2.pop ()); vec_else_clause = vect_get_vec_def_for_stmt_copy (dts[3], - VEC_pop (tree, vec_oprnds3)); + vec_oprnds3.pop ()); } if (!slp_node) { - VEC_quick_push (tree, vec_oprnds0, vec_cond_lhs); - VEC_quick_push (tree, vec_oprnds1, vec_cond_rhs); - VEC_quick_push (tree, vec_oprnds2, vec_then_clause); - VEC_quick_push (tree, vec_oprnds3, vec_else_clause); + vec_oprnds0.quick_push (vec_cond_lhs); + vec_oprnds1.quick_push (vec_cond_rhs); + vec_oprnds2.quick_push (vec_then_clause); + vec_oprnds3.quick_push (vec_else_clause); } /* Arguments are ready. Create the new vector stmt. */ - FOR_EACH_VEC_ELT (tree, vec_oprnds0, i, vec_cond_lhs) + FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_cond_lhs) { - vec_cond_rhs = VEC_index (tree, vec_oprnds1, i); - vec_then_clause = VEC_index (tree, vec_oprnds2, i); - vec_else_clause = VEC_index (tree, vec_oprnds3, i); + vec_cond_rhs = vec_oprnds1[i]; + vec_then_clause = vec_oprnds2[i]; + vec_else_clause = vec_oprnds3[i]; vec_compare = build2 (TREE_CODE (cond_expr), vec_cmp_type, vec_cond_lhs, vec_cond_rhs); @@ -5508,7 +5493,7 @@ vectorizable_condition (gimple stmt, gimple_stmt_iterator *gsi, gimple_assign_set_lhs (new_stmt, new_temp); vect_finish_stmt_generation (stmt, new_stmt, gsi); if (slp_node) - VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt); + SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt); } if (slp_node) @@ -5522,10 +5507,10 @@ vectorizable_condition (gimple stmt, gimple_stmt_iterator *gsi, prev_stmt_info = vinfo_for_stmt (new_stmt); } - VEC_free (tree, heap, vec_oprnds0); - VEC_free (tree, heap, vec_oprnds1); - VEC_free (tree, heap, vec_oprnds2); - VEC_free (tree, heap, vec_oprnds3); + vec_oprnds0.release (); + vec_oprnds1.release (); + vec_oprnds2.release (); + vec_oprnds3.release (); return true; } @@ -5984,7 +5969,7 @@ new_stmt_vec_info (gimple stmt, loop_vec_info loop_vinfo, else STMT_VINFO_DEF_TYPE (res) = vect_internal_def; - STMT_VINFO_SAME_ALIGN_REFS (res) = NULL; + STMT_VINFO_SAME_ALIGN_REFS (res).create (0); STMT_SLP_TYPE (res) = loop_vect; GROUP_FIRST_ELEMENT (res) = NULL; GROUP_NEXT_ELEMENT (res) = NULL; @@ -6003,8 +5988,8 @@ new_stmt_vec_info (gimple stmt, loop_vec_info loop_vinfo, void init_stmt_vec_info_vec (void) { - gcc_assert (!stmt_vec_info_vec); - stmt_vec_info_vec = VEC_alloc (vec_void_p, heap, 50); + gcc_assert (!stmt_vec_info_vec.exists ()); + stmt_vec_info_vec.create (50); } @@ -6013,8 +5998,8 @@ init_stmt_vec_info_vec (void) void free_stmt_vec_info_vec (void) { - gcc_assert (stmt_vec_info_vec); - VEC_free (vec_void_p, heap, stmt_vec_info_vec); + gcc_assert (stmt_vec_info_vec.exists ()); + stmt_vec_info_vec.release (); } @@ -6049,7 +6034,7 @@ free_stmt_vec_info (gimple stmt) } } - VEC_free (dr_p, heap, STMT_VINFO_SAME_ALIGN_REFS (stmt_info)); + STMT_VINFO_SAME_ALIGN_REFS (stmt_info).release (); set_vinfo_for_stmt (stmt, NULL); free (stmt_info); } @@ -6392,7 +6377,7 @@ supportable_widening_operation (enum tree_code code, gimple stmt, tree vectype_out, tree vectype_in, enum tree_code *code1, enum tree_code *code2, int *multi_step_cvt, - VEC (tree, heap) **interm_types) + vec<tree> *interm_types) { stmt_vec_info stmt_info = vinfo_for_stmt (stmt); loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_info); @@ -6538,7 +6523,7 @@ supportable_widening_operation (enum tree_code code, gimple stmt, intermediate steps in promotion sequence. We try MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do not. */ - *interm_types = VEC_alloc (tree, heap, MAX_INTERM_CVT_STEPS); + interm_types->create (MAX_INTERM_CVT_STEPS); for (i = 0; i < MAX_INTERM_CVT_STEPS; i++) { intermediate_mode = insn_data[icode1].operand[0].mode; @@ -6559,7 +6544,7 @@ supportable_widening_operation (enum tree_code code, gimple stmt, == CODE_FOR_nothing)) break; - VEC_quick_push (tree, *interm_types, intermediate_type); + interm_types->quick_push (intermediate_type); (*multi_step_cvt)++; if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype) @@ -6570,7 +6555,7 @@ supportable_widening_operation (enum tree_code code, gimple stmt, prev_mode = intermediate_mode; } - VEC_free (tree, heap, *interm_types); + interm_types->release (); return false; } @@ -6599,7 +6584,7 @@ bool supportable_narrowing_operation (enum tree_code code, tree vectype_out, tree vectype_in, enum tree_code *code1, int *multi_step_cvt, - VEC (tree, heap) **interm_types) + vec<tree> *interm_types) { enum machine_mode vec_mode; enum insn_code icode1; @@ -6683,7 +6668,7 @@ supportable_narrowing_operation (enum tree_code code, /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS intermediate steps in promotion sequence. We try MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do not. */ - *interm_types = VEC_alloc (tree, heap, MAX_INTERM_CVT_STEPS); + interm_types->create (MAX_INTERM_CVT_STEPS); for (i = 0; i < MAX_INTERM_CVT_STEPS; i++) { intermediate_mode = insn_data[icode1].operand[0].mode; @@ -6699,7 +6684,7 @@ supportable_narrowing_operation (enum tree_code code, == CODE_FOR_nothing)) break; - VEC_quick_push (tree, *interm_types, intermediate_type); + interm_types->quick_push (intermediate_type); (*multi_step_cvt)++; if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype)) @@ -6709,6 +6694,6 @@ supportable_narrowing_operation (enum tree_code code, optab1 = interm_optab; } - VEC_free (tree, heap, *interm_types); + interm_types->release (); return false; } |