aboutsummaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
authorJuzhe-Zhong <juzhe.zhong@rivai.ai>2023-07-21 10:23:43 +0800
committerLehua Ding <lehua.ding@rivai.ai>2023-07-21 16:34:50 +0800
commit3e76bdd5fd289ac38326060bba91b0e264c50ccf (patch)
treeda4d19c8d3b37d8a1204966b149101f2b0f7e8c8 /gcc
parent363bb3dca429e885ce3d134fc50f8ab05aacda0f (diff)
downloadgcc-3e76bdd5fd289ac38326060bba91b0e264c50ccf.zip
gcc-3e76bdd5fd289ac38326060bba91b0e264c50ccf.tar.gz
gcc-3e76bdd5fd289ac38326060bba91b0e264c50ccf.tar.bz2
cleanup: Change condition order
Hi, Richard and Richi. I have double check the recent codes for len && mask support again. Some places code structure: if (len_mask_fn) ... else if (mask_fn) ... some places code structure: if (mask_len_fn) ... else if (mask) Base on previous review comment from Richi: https://gcc.gnu.org/pipermail/gcc-patches/2023-July/625067.html len mask stuff should be checked before mask. So I reorder all condition order to check LEN MASK stuff before MASK. This is the last clean up patch. Boostrap and Regression is on the way. gcc/ChangeLog: * tree-vect-stmts.cc (check_load_store_for_partial_vectors): Change condition order. (vectorizable_operation): Ditto.
Diffstat (limited to 'gcc')
-rw-r--r--gcc/tree-vect-stmts.cc24
1 files changed, 12 insertions, 12 deletions
diff --git a/gcc/tree-vect-stmts.cc b/gcc/tree-vect-stmts.cc
index 2555958..ed28fbd 100644
--- a/gcc/tree-vect-stmts.cc
+++ b/gcc/tree-vect-stmts.cc
@@ -1635,17 +1635,17 @@ check_load_store_for_partial_vectors (loop_vec_info loop_vinfo, tree vectype,
internal_fn len_ifn = (is_load
? IFN_MASK_LEN_GATHER_LOAD
: IFN_MASK_LEN_SCATTER_STORE);
- if (internal_gather_scatter_fn_supported_p (ifn, vectype,
+ if (internal_gather_scatter_fn_supported_p (len_ifn, vectype,
gs_info->memory_type,
gs_info->offset_vectype,
gs_info->scale))
- vect_record_loop_mask (loop_vinfo, masks, nvectors, vectype,
- scalar_mask);
- else if (internal_gather_scatter_fn_supported_p (len_ifn, vectype,
+ vect_record_loop_len (loop_vinfo, lens, nvectors, vectype, 1);
+ else if (internal_gather_scatter_fn_supported_p (ifn, vectype,
gs_info->memory_type,
gs_info->offset_vectype,
gs_info->scale))
- vect_record_loop_len (loop_vinfo, lens, nvectors, vectype, 1);
+ vect_record_loop_mask (loop_vinfo, masks, nvectors, vectype,
+ scalar_mask);
else
{
if (dump_enabled_p ())
@@ -6598,16 +6598,16 @@ vectorizable_operation (vec_info *vinfo,
&& LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo)
&& mask_out_inactive)
{
- if (cond_fn != IFN_LAST
- && direct_internal_fn_supported_p (cond_fn, vectype,
+ if (cond_len_fn != IFN_LAST
+ && direct_internal_fn_supported_p (cond_len_fn, vectype,
OPTIMIZE_FOR_SPEED))
- vect_record_loop_mask (loop_vinfo, masks, ncopies * vec_num,
- vectype, NULL);
- else if (cond_len_fn != IFN_LAST
- && direct_internal_fn_supported_p (cond_len_fn, vectype,
- OPTIMIZE_FOR_SPEED))
vect_record_loop_len (loop_vinfo, lens, ncopies * vec_num, vectype,
1);
+ else if (cond_fn != IFN_LAST
+ && direct_internal_fn_supported_p (cond_fn, vectype,
+ OPTIMIZE_FOR_SPEED))
+ vect_record_loop_mask (loop_vinfo, masks, ncopies * vec_num,
+ vectype, NULL);
else
{
if (dump_enabled_p ())