aboutsummaryrefslogtreecommitdiff
path: root/gcc/tree-vect-data-refs.c
diff options
context:
space:
mode:
Diffstat (limited to 'gcc/tree-vect-data-refs.c')
-rw-r--r--gcc/tree-vect-data-refs.c33
1 files changed, 31 insertions, 2 deletions
diff --git a/gcc/tree-vect-data-refs.c b/gcc/tree-vect-data-refs.c
index 7415860..51c08b9 100644
--- a/gcc/tree-vect-data-refs.c
+++ b/gcc/tree-vect-data-refs.c
@@ -2919,6 +2919,24 @@ vect_check_gather (gimple stmt, loop_vec_info loop_vinfo, tree *basep,
enum machine_mode pmode;
int punsignedp, pvolatilep;
+ base = DR_REF (dr);
+ /* For masked loads/stores, DR_REF (dr) is an artificial MEM_REF,
+ see if we can use the def stmt of the address. */
+ if (is_gimple_call (stmt)
+ && gimple_call_internal_p (stmt)
+ && (gimple_call_internal_fn (stmt) == IFN_MASK_LOAD
+ || gimple_call_internal_fn (stmt) == IFN_MASK_STORE)
+ && TREE_CODE (base) == MEM_REF
+ && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME
+ && integer_zerop (TREE_OPERAND (base, 1))
+ && !expr_invariant_in_loop_p (loop, TREE_OPERAND (base, 0)))
+ {
+ gimple def_stmt = SSA_NAME_DEF_STMT (TREE_OPERAND (base, 0));
+ if (is_gimple_assign (def_stmt)
+ && gimple_assign_rhs_code (def_stmt) == ADDR_EXPR)
+ base = TREE_OPERAND (gimple_assign_rhs1 (def_stmt), 0);
+ }
+
/* The gather builtins need address of the form
loop_invariant + vector * {1, 2, 4, 8}
or
@@ -2931,7 +2949,7 @@ vect_check_gather (gimple stmt, loop_vec_info loop_vinfo, tree *basep,
vectorized. The following code attempts to find such a preexistng
SSA_NAME OFF and put the loop invariants into a tree BASE
that can be gimplified before the loop. */
- base = get_inner_reference (DR_REF (dr), &pbitsize, &pbitpos, &off,
+ base = get_inner_reference (base, &pbitsize, &pbitpos, &off,
&pmode, &punsignedp, &pvolatilep, false);
gcc_assert (base != NULL_TREE && (pbitpos % BITS_PER_UNIT) == 0);
@@ -3428,7 +3446,10 @@ again:
offset = unshare_expr (DR_OFFSET (dr));
init = unshare_expr (DR_INIT (dr));
- if (is_gimple_call (stmt))
+ if (is_gimple_call (stmt)
+ && (!gimple_call_internal_p (stmt)
+ || (gimple_call_internal_fn (stmt) != IFN_MASK_LOAD
+ && gimple_call_internal_fn (stmt) != IFN_MASK_STORE)))
{
if (dump_enabled_p ())
{
@@ -5079,6 +5100,14 @@ vect_supportable_dr_alignment (struct data_reference *dr,
if (aligned_access_p (dr) && !check_aligned_accesses)
return dr_aligned;
+ /* For now assume all conditional loads/stores support unaligned
+ access without any special code. */
+ if (is_gimple_call (stmt)
+ && gimple_call_internal_p (stmt)
+ && (gimple_call_internal_fn (stmt) == IFN_MASK_LOAD
+ || gimple_call_internal_fn (stmt) == IFN_MASK_STORE))
+ return dr_unaligned_supported;
+
if (loop_vinfo)
{
vect_loop = LOOP_VINFO_LOOP (loop_vinfo);