diff options
Diffstat (limited to 'gcc/tree-vect-data-refs.c')
-rw-r--r-- | gcc/tree-vect-data-refs.c | 17 |
1 files changed, 14 insertions, 3 deletions
diff --git a/gcc/tree-vect-data-refs.c b/gcc/tree-vect-data-refs.c index c1005ee..26431203 100644 --- a/gcc/tree-vect-data-refs.c +++ b/gcc/tree-vect-data-refs.c @@ -2139,11 +2139,22 @@ vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo) vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt)); gcc_assert (vectype); + /* At present we don't support versioning for alignment + with variable VF, since there's no guarantee that the + VF is a power of two. We could relax this if we added + a way of enforcing a power-of-two size. */ + unsigned HOST_WIDE_INT size; + if (!GET_MODE_SIZE (TYPE_MODE (vectype)).is_constant (&size)) + { + do_versioning = false; + break; + } + /* The rightmost bits of an aligned address must be zeros. Construct the mask needed for this test. For example, GET_MODE_SIZE for the vector mode V4SI is 16 bytes so the mask must be 15 = 0xf. */ - mask = GET_MODE_SIZE (TYPE_MODE (vectype)) - 1; + mask = size - 1; /* FORNOW: use the same mask to test all potentially unaligned references in the loop. The vectorizer currently supports @@ -6078,8 +6089,8 @@ vect_supportable_dr_alignment (struct data_reference *dr, ; else if (!loop_vinfo || (nested_in_vect_loop - && (TREE_INT_CST_LOW (DR_STEP (dr)) - != GET_MODE_SIZE (TYPE_MODE (vectype))))) + && maybe_ne (TREE_INT_CST_LOW (DR_STEP (dr)), + GET_MODE_SIZE (TYPE_MODE (vectype))))) return dr_explicit_realign; else return dr_explicit_realign_optimized; |