diff options
author | Richard Biener <rguenther@suse.de> | 2013-05-10 07:52:25 +0000 |
---|---|---|
committer | Richard Biener <rguenth@gcc.gnu.org> | 2013-05-10 07:52:25 +0000 |
commit | afb119beca72d64c166127445ca6883358e24174 (patch) | |
tree | b74da88625d1181dbe2a76c0891227920028cbd0 /gcc/tree-vect-data-refs.c | |
parent | 01ae486155f3c4fb45a52bb4e95527d072b84966 (diff) | |
download | gcc-afb119beca72d64c166127445ca6883358e24174.zip gcc-afb119beca72d64c166127445ca6883358e24174.tar.gz gcc-afb119beca72d64c166127445ca6883358e24174.tar.bz2 |
tree-vect-data-refs.c (vect_enhance_data_refs_alignment): Do not disable peeling when we version for aliasing.
2013-05-10 Richard Biener <rguenther@suse.de>
* tree-vect-data-refs.c (vect_enhance_data_refs_alignment): Do not
disable peeling when we version for aliasing.
(vector_alignment_reachable_p): Honor explicit user alignment.
(vect_supportable_dr_alignment): Likewise.
* tree-vect-loop-manip.c (vect_can_advance_ivs_p): Use
STMT_VINFO_LOOP_PHI_EVOLUTION_PART instead of recomputing it.
* tree-vect-loop.c (vect_transform_loop): First apply versioning,
then peeling to arrange for the cost-model check to come first.
* gcc.target/i386/avx256-unaligned-load-2.c: Make well-defined.
* gcc.target/i386/l_fma_double_1.c: Adjust.
* gcc.target/i386/l_fma_double_2.c: Likewise.
* gcc.target/i386/l_fma_double_3.c: Likewise.
* gcc.target/i386/l_fma_double_4.c: Likewise.
* gcc.target/i386/l_fma_double_5.c: Likewise.
* gcc.target/i386/l_fma_double_6.c: Likewise.
* gcc.target/i386/l_fma_float_1.c: Likewise.
* gcc.target/i386/l_fma_float_2.c: Likewise.
* gcc.target/i386/l_fma_float_3.c: Likewise.
* gcc.target/i386/l_fma_float_4.c: Likewise.
* gcc.target/i386/l_fma_float_5.c: Likewise.
* gcc.target/i386/l_fma_float_6.c: Likewise.
From-SVN: r198767
Diffstat (limited to 'gcc/tree-vect-data-refs.c')
-rw-r--r-- | gcc/tree-vect-data-refs.c | 29 |
1 files changed, 12 insertions, 17 deletions
diff --git a/gcc/tree-vect-data-refs.c b/gcc/tree-vect-data-refs.c index c1b5826..bf0b510 100644 --- a/gcc/tree-vect-data-refs.c +++ b/gcc/tree-vect-data-refs.c @@ -1024,7 +1024,8 @@ vector_alignment_reachable_p (struct data_reference *dr) if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "Unknown misalignment, is_packed = %d",is_packed); - if (targetm.vectorize.vector_alignment_reachable (type, is_packed)) + if ((TYPE_USER_ALIGN (type) && !is_packed) + || targetm.vectorize.vector_alignment_reachable (type, is_packed)) return true; else return false; @@ -1323,7 +1324,6 @@ vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo) bool stat; gimple stmt; stmt_vec_info stmt_info; - int vect_versioning_for_alias_required; unsigned int npeel = 0; bool all_misalignments_unknown = true; unsigned int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo); @@ -1510,15 +1510,8 @@ vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo) } } - vect_versioning_for_alias_required - = LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo); - - /* Temporarily, if versioning for alias is required, we disable peeling - until we support peeling and versioning. Often peeling for alignment - will require peeling for loop-bound, which in turn requires that we - know how to adjust the loop ivs after the loop. */ - if (vect_versioning_for_alias_required - || !vect_can_advance_ivs_p (loop_vinfo) + /* Check if we can possibly peel the loop. */ + if (!vect_can_advance_ivs_p (loop_vinfo) || !slpeel_can_duplicate_loop_p (loop, single_exit (loop))) do_peeling = false; @@ -4722,9 +4715,10 @@ vect_supportable_dr_alignment (struct data_reference *dr, if (!known_alignment_for_access_p (dr)) is_packed = not_size_aligned (DR_REF (dr)); - if (targetm.vectorize. - support_vector_misalignment (mode, type, - DR_MISALIGNMENT (dr), is_packed)) + if ((TYPE_USER_ALIGN (type) && !is_packed) + || targetm.vectorize. + support_vector_misalignment (mode, type, + DR_MISALIGNMENT (dr), is_packed)) /* Can't software pipeline the loads, but can at least do them. */ return dr_unaligned_supported; } @@ -4736,9 +4730,10 @@ vect_supportable_dr_alignment (struct data_reference *dr, if (!known_alignment_for_access_p (dr)) is_packed = not_size_aligned (DR_REF (dr)); - if (targetm.vectorize. - support_vector_misalignment (mode, type, - DR_MISALIGNMENT (dr), is_packed)) + if ((TYPE_USER_ALIGN (type) && !is_packed) + || targetm.vectorize. + support_vector_misalignment (mode, type, + DR_MISALIGNMENT (dr), is_packed)) return dr_unaligned_supported; } |