diff options
author | Pan Li <pan2.li@intel.com> | 2024-10-23 16:36:28 +0800 |
---|---|---|
committer | Pan Li <pan2.li@intel.com> | 2024-10-29 22:19:26 +0800 |
commit | a0292ddb21475e8fd238e201d3b64f0ab02ace04 (patch) | |
tree | cd2388260d36419733bc15214711f18f50213612 /gcc | |
parent | 1fdee26ee985385c8043ea0ca6ff05ffdbc34f9c (diff) | |
download | gcc-a0292ddb21475e8fd238e201d3b64f0ab02ace04.zip gcc-a0292ddb21475e8fd238e201d3b64f0ab02ace04.tar.gz gcc-a0292ddb21475e8fd238e201d3b64f0ab02ace04.tar.bz2 |
Vect: Introduce MASK_LEN_STRIDED_LOAD{STORE} to loop vectorizer
This patch would like to allow generation of MASK_LEN_STRIDED_LOAD{STORE} IR
for invariant stride memory access. For example as below
void foo (int * __restrict a, int * __restrict b, int stride, int n)
{
for (int i = 0; i < n; i++)
a[i*stride] = b[i*stride] + 100;
}
Before this patch:
66 │ _73 = .SELECT_VL (ivtmp_71, POLY_INT_CST [4, 4]);
67 │ _52 = _54 * _73;
68 │ vect__5.16_61 = .MASK_LEN_GATHER_LOAD (vectp_b.14_59, _58, 4, { 0, ... }, { -1, ... }, _73, 0);
69 │ vect__7.17_63 = vect__5.16_61 + { 100, ... };
70 │ .MASK_LEN_SCATTER_STORE (vectp_a.18_67, _58, 4, vect__7.17_63, { -1, ... }, _73, 0);
71 │ vectp_b.14_60 = vectp_b.14_59 + _52;
72 │ vectp_a.18_68 = vectp_a.18_67 + _52;
73 │ ivtmp_72 = ivtmp_71 - _73;
After this patch:
60 │ _70 = .SELECT_VL (ivtmp_68, POLY_INT_CST [4, 4]);
61 │ _52 = _54 * _70;
62 │ vect__5.16_58 = .MASK_LEN_STRIDED_LOAD (vectp_b.14_56, _55, { 0, ... }, { -1, ... }, _70, 0);
63 │ vect__7.17_60 = vect__5.16_58 + { 100, ... };
64 │ .MASK_LEN_STRIDED_STORE (vectp_a.18_64, _55, vect__7.17_60, { -1, ... }, _70, 0);
65 │ vectp_b.14_57 = vectp_b.14_56 + _52;
66 │ vectp_a.18_65 = vectp_a.18_64 + _52;
67 │ ivtmp_69 = ivtmp_68 - _70;
The below test suites are passed for this patch:
* The x86 bootstrap test.
* The x86 fully regression test.
* The riscv fully regression test.
gcc/ChangeLog:
* tree-vect-stmts.cc (vect_get_strided_load_store_ops): Handle
MASK_LEN_STRIDED_LOAD{STORE} after supported check.
(vectorizable_store): Generate MASK_LEN_STRIDED_LOAD when the offset
of gater is not vector type.
(vectorizable_load): Ditto but for store.
Signed-off-by: Pan Li <pan2.li@intel.com>
Co-Authored-By: Juzhe-Zhong <juzhe.zhong@rivai.ai>
Diffstat (limited to 'gcc')
-rw-r--r-- | gcc/tree-vect-stmts.cc | 45 |
1 files changed, 36 insertions, 9 deletions
diff --git a/gcc/tree-vect-stmts.cc b/gcc/tree-vect-stmts.cc index 4a824d1..9a2c2ea 100644 --- a/gcc/tree-vect-stmts.cc +++ b/gcc/tree-vect-stmts.cc @@ -2951,6 +2951,15 @@ vect_get_strided_load_store_ops (stmt_vec_info stmt_info, *dataref_bump = cse_and_gimplify_to_preheader (loop_vinfo, bump); } + internal_fn ifn + = DR_IS_READ (dr) ? IFN_MASK_LEN_STRIDED_LOAD : IFN_MASK_LEN_STRIDED_STORE; + if (direct_internal_fn_supported_p (ifn, vectype, OPTIMIZE_FOR_SPEED)) + { + *vec_offset = cse_and_gimplify_to_preheader (loop_vinfo, + unshare_expr (DR_STEP (dr))); + return; + } + /* The offset given in GS_INFO can have pointer type, so use the element type of the vector instead. */ tree offset_type = TREE_TYPE (gs_info->offset_vectype); @@ -9195,10 +9204,20 @@ vectorizable_store (vec_info *vinfo, gcall *call; if (final_len && final_mask) - call = gimple_build_call_internal - (IFN_MASK_LEN_SCATTER_STORE, 7, dataref_ptr, - vec_offset, scale, vec_oprnd, final_mask, - final_len, bias); + { + if (VECTOR_TYPE_P (TREE_TYPE (vec_offset))) + call = gimple_build_call_internal ( + IFN_MASK_LEN_SCATTER_STORE, 7, dataref_ptr, + vec_offset, scale, vec_oprnd, final_mask, final_len, + bias); + else + /* Non-vector offset indicates that prefer to take + MASK_LEN_STRIDED_STORE instead of the + IFN_MASK_SCATTER_STORE with direct stride arg. */ + call = gimple_build_call_internal ( + IFN_MASK_LEN_STRIDED_STORE, 6, dataref_ptr, + vec_offset, vec_oprnd, final_mask, final_len, bias); + } else if (final_mask) call = gimple_build_call_internal (IFN_MASK_SCATTER_STORE, 5, dataref_ptr, @@ -11195,11 +11214,19 @@ vectorizable_load (vec_info *vinfo, gcall *call; if (final_len && final_mask) - call - = gimple_build_call_internal (IFN_MASK_LEN_GATHER_LOAD, 7, - dataref_ptr, vec_offset, - scale, zero, final_mask, - final_len, bias); + { + if (VECTOR_TYPE_P (TREE_TYPE (vec_offset))) + call = gimple_build_call_internal ( + IFN_MASK_LEN_GATHER_LOAD, 7, dataref_ptr, vec_offset, + scale, zero, final_mask, final_len, bias); + else + /* Non-vector offset indicates that prefer to take + MASK_LEN_STRIDED_LOAD instead of the + MASK_LEN_GATHER_LOAD with direct stride arg. */ + call = gimple_build_call_internal ( + IFN_MASK_LEN_STRIDED_LOAD, 6, dataref_ptr, vec_offset, + zero, final_mask, final_len, bias); + } else if (final_mask) call = gimple_build_call_internal (IFN_MASK_GATHER_LOAD, 5, dataref_ptr, vec_offset, |