diff options
author | Richard Sandiford <richard.sandiford@arm.com> | 2019-08-05 16:46:58 +0000 |
---|---|---|
committer | Richard Sandiford <rsandifo@gcc.gnu.org> | 2019-08-05 16:46:58 +0000 |
commit | 868363d4f52df19d8d5e9529b8906fa25c8d0c95 (patch) | |
tree | 53d8dc707b6539ee636b896399f561e66e1c88d6 /gcc | |
parent | 779724a5913b4e6a7ccccc0b8b415a772144a067 (diff) | |
download | gcc-868363d4f52df19d8d5e9529b8906fa25c8d0c95.zip gcc-868363d4f52df19d8d5e9529b8906fa25c8d0c95.tar.gz gcc-868363d4f52df19d8d5e9529b8906fa25c8d0c95.tar.bz2 |
Fold MASK_LOAD/STORE with an all-true mask
This patch folds IFN_MASK_LOAD and IFN_MASK_STOREs to normal accesses
if the mask is all-true. This can happen for fully-masked loops that
didn't actually need to be (which could be handled by the vectoriser
instead), or for unrolled fully-masked loops whose first iteration is
guaranteed to operate on a full vector. It's also useful when the
accesses are generated directly by intrinsics (to follow for SVE).
2019-08-05 Richard Sandiford <richard.sandiford@arm.com>
gcc/
* gimple-fold.c (gimple_fold_mask_load_store_mem_ref)
(gimple_fold_mask_load, gimple_fold_mask_store): New functions.
(gimple_fold_call): Use them to fold IFN_MASK_LOAD and
IFN_MASK_STORE.
gcc/testsuite/
* gcc.target/aarch64/sve/mask_load_1.c: New test.
From-SVN: r274118
Diffstat (limited to 'gcc')
-rw-r--r-- | gcc/ChangeLog | 7 | ||||
-rw-r--r-- | gcc/gimple-fold.c | 63 | ||||
-rw-r--r-- | gcc/testsuite/ChangeLog | 4 | ||||
-rw-r--r-- | gcc/testsuite/gcc.target/aarch64/sve/mask_load_1.c | 12 |
4 files changed, 86 insertions, 0 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog index 3d3a2fa..26ada9c 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,5 +1,12 @@ 2019-08-05 Richard Sandiford <richard.sandiford@arm.com> + * gimple-fold.c (gimple_fold_mask_load_store_mem_ref) + (gimple_fold_mask_load, gimple_fold_mask_store): New functions. + (gimple_fold_call): Use them to fold IFN_MASK_LOAD and + IFN_MASK_STORE. + +2019-08-05 Richard Sandiford <richard.sandiford@arm.com> + * gimple.h (gimple_move_vops): Declare. * gimple.c (gimple_move_vops): New function * gimple-fold.c (replace_call_with_call_and_fold) diff --git a/gcc/gimple-fold.c b/gcc/gimple-fold.c index c3128e7..fc57fb4 100644 --- a/gcc/gimple-fold.c +++ b/gcc/gimple-fold.c @@ -4180,6 +4180,63 @@ arith_overflowed_p (enum tree_code code, const_tree type, return wi::min_precision (wres, sign) > TYPE_PRECISION (type); } +/* If IFN_MASK_LOAD/STORE call CALL is unconditional, return a MEM_REF + for the memory it references, otherwise return null. VECTYPE is the + type of the memory vector. */ + +static tree +gimple_fold_mask_load_store_mem_ref (gcall *call, tree vectype) +{ + tree ptr = gimple_call_arg (call, 0); + tree alias_align = gimple_call_arg (call, 1); + tree mask = gimple_call_arg (call, 2); + if (!tree_fits_uhwi_p (alias_align) || !integer_all_onesp (mask)) + return NULL_TREE; + + unsigned HOST_WIDE_INT align = tree_to_uhwi (alias_align) * BITS_PER_UNIT; + if (TYPE_ALIGN (vectype) != align) + vectype = build_aligned_type (vectype, align); + tree offset = build_zero_cst (TREE_TYPE (alias_align)); + return fold_build2 (MEM_REF, vectype, ptr, offset); +} + +/* Try to fold IFN_MASK_LOAD call CALL. Return true on success. */ + +static bool +gimple_fold_mask_load (gimple_stmt_iterator *gsi, gcall *call) +{ + tree lhs = gimple_call_lhs (call); + if (!lhs) + return false; + + if (tree rhs = gimple_fold_mask_load_store_mem_ref (call, TREE_TYPE (lhs))) + { + gassign *new_stmt = gimple_build_assign (lhs, rhs); + gimple_set_location (new_stmt, gimple_location (call)); + gimple_move_vops (new_stmt, call); + gsi_replace (gsi, new_stmt, false); + return true; + } + return false; +} + +/* Try to fold IFN_MASK_STORE call CALL. Return true on success. */ + +static bool +gimple_fold_mask_store (gimple_stmt_iterator *gsi, gcall *call) +{ + tree rhs = gimple_call_arg (call, 3); + if (tree lhs = gimple_fold_mask_load_store_mem_ref (call, TREE_TYPE (rhs))) + { + gassign *new_stmt = gimple_build_assign (lhs, rhs); + gimple_set_location (new_stmt, gimple_location (call)); + gimple_move_vops (new_stmt, call); + gsi_replace (gsi, new_stmt, false); + return true; + } + return false; +} + /* Attempt to fold a call statement referenced by the statement iterator GSI. The statement may be replaced by another statement, e.g., if the call simplifies to a constant value. Return true if any changes were made. @@ -4409,6 +4466,12 @@ gimple_fold_call (gimple_stmt_iterator *gsi, bool inplace) subcode = MULT_EXPR; cplx_result = true; break; + case IFN_MASK_LOAD: + changed |= gimple_fold_mask_load (gsi, stmt); + break; + case IFN_MASK_STORE: + changed |= gimple_fold_mask_store (gsi, stmt); + break; default: break; } diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog index ca99dc6..2db4529 100644 --- a/gcc/testsuite/ChangeLog +++ b/gcc/testsuite/ChangeLog @@ -1,3 +1,7 @@ +2019-08-05 Richard Sandiford <richard.sandiford@arm.com> + + * gcc.target/aarch64/sve/mask_load_1.c: New test. + 2019-08-05 Jozef Lawrynowicz <jozef.l@mittosystems.com> * gcc.target/msp430/pr80993.c: Add cleanup-saved-temps to final diff --git a/gcc/testsuite/gcc.target/aarch64/sve/mask_load_1.c b/gcc/testsuite/gcc.target/aarch64/sve/mask_load_1.c new file mode 100644 index 0000000..e76d365 --- /dev/null +++ b/gcc/testsuite/gcc.target/aarch64/sve/mask_load_1.c @@ -0,0 +1,12 @@ +/* { dg-do compile } */ +/* { dg-options "-O2 -ftree-vectorize -msve-vector-bits=256 -fdump-tree-optimized" } */ + +void +f (int *x) +{ + for (int i = 0; i < 8; ++i) + x[i] += 1; +} + +/* { dg-final { scan-tree-dump { = MEM <vector\(8\) int>} "optimized" } } */ +/* { dg-final { scan-tree-dump { MEM <vector\(8\) int> \[[^]]*\] = } "optimized" } } */ |