diff options
author | Ju-Zhe Zhong <juzhe.zhong@rivai.ai> | 2023-07-20 16:06:29 +0800 |
---|---|---|
committer | Pan Li <pan2.li@intel.com> | 2023-07-20 21:53:49 +0800 |
commit | b6b72562d116bd0a589dce39437f9d2b3c34491f (patch) | |
tree | 301629a76ac6601b11e3c6f10c7ee1b0c04d88f0 /gcc | |
parent | ef28aadad6e5cff3d7494f3c97d435a6579a2e2d (diff) | |
download | gcc-b6b72562d116bd0a589dce39437f9d2b3c34491f.zip gcc-b6b72562d116bd0a589dce39437f9d2b3c34491f.tar.gz gcc-b6b72562d116bd0a589dce39437f9d2b3c34491f.tar.bz2 |
CODE STRUCTURE: Refine codes in Vectorizer
Hi, Richard and Richi.
I plan to refine the codes that I recently support for RVV auto-vectorization.
This patch is inspired last review comments from Richard:
https://patchwork.sourceware.org/project/gcc/patch/20230712042124.111818-1-juzhe.zhong@rivai.ai/
Richard said he prefer the the code structure as follows:
Please instead switch the if condition so that the structure is:
if (...)
vect_record_loop_mask (...)
else if (...)
vect_record_loop_len (...)
else
can't use partial vectors
This is his last comments.
So, I come back to refine this piece of codes.
Does it look reasonable ?
This next refine patch is change all names of "LEN_MASK" into "MASK_LEN" but should come after this
patch.
gcc/ChangeLog:
* tree-vect-stmts.cc (check_load_store_for_partial_vectors):
Refine code structure.
Diffstat (limited to 'gcc')
-rw-r--r-- | gcc/tree-vect-stmts.cc | 38 |
1 files changed, 17 insertions, 21 deletions
diff --git a/gcc/tree-vect-stmts.cc b/gcc/tree-vect-stmts.cc index 60de650..cc9a200 100644 --- a/gcc/tree-vect-stmts.cc +++ b/gcc/tree-vect-stmts.cc @@ -1605,6 +1605,7 @@ check_load_store_for_partial_vectors (loop_vec_info loop_vinfo, tree vectype, nvectors = vect_get_num_copies (loop_vinfo, vectype); vec_loop_masks *masks = &LOOP_VINFO_MASKS (loop_vinfo); + vec_loop_lens *lens = &LOOP_VINFO_LENS (loop_vinfo); machine_mode vecmode = TYPE_MODE (vectype); bool is_load = (vls_type == VLS_LOAD); if (memory_access_type == VMAT_LOAD_STORE_LANES) @@ -1631,33 +1632,29 @@ check_load_store_for_partial_vectors (loop_vec_info loop_vinfo, tree vectype, internal_fn ifn = (is_load ? IFN_MASK_GATHER_LOAD : IFN_MASK_SCATTER_STORE); - if (!internal_gather_scatter_fn_supported_p (ifn, vectype, - gs_info->memory_type, - gs_info->offset_vectype, - gs_info->scale)) - { - ifn = (is_load - ? IFN_LEN_MASK_GATHER_LOAD - : IFN_LEN_MASK_SCATTER_STORE); - if (internal_gather_scatter_fn_supported_p (ifn, vectype, - gs_info->memory_type, - gs_info->offset_vectype, - gs_info->scale)) - { - vec_loop_lens *lens = &LOOP_VINFO_LENS (loop_vinfo); - vect_record_loop_len (loop_vinfo, lens, nvectors, vectype, 1); - return; - } + internal_fn len_ifn = (is_load + ? IFN_LEN_MASK_GATHER_LOAD + : IFN_LEN_MASK_SCATTER_STORE); + if (internal_gather_scatter_fn_supported_p (ifn, vectype, + gs_info->memory_type, + gs_info->offset_vectype, + gs_info->scale)) + vect_record_loop_mask (loop_vinfo, masks, nvectors, vectype, + scalar_mask); + else if (internal_gather_scatter_fn_supported_p (len_ifn, vectype, + gs_info->memory_type, + gs_info->offset_vectype, + gs_info->scale)) + vect_record_loop_len (loop_vinfo, lens, nvectors, vectype, 1); + else + { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "can't operate on partial vectors because" " the target doesn't have an appropriate" " gather load or scatter store instruction.\n"); LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo) = false; - return; } - vect_record_loop_mask (loop_vinfo, masks, nvectors, vectype, - scalar_mask); return; } @@ -1703,7 +1700,6 @@ check_load_store_for_partial_vectors (loop_vec_info loop_vinfo, tree vectype, if (get_len_load_store_mode (vecmode, is_load).exists (&vmode)) { nvectors = group_memory_nvectors (group_size * vf, nunits); - vec_loop_lens *lens = &LOOP_VINFO_LENS (loop_vinfo); unsigned factor = (vecmode == vmode) ? 1 : GET_MODE_UNIT_SIZE (vecmode); vect_record_loop_len (loop_vinfo, lens, nvectors, vectype, factor); using_partial_vectors_p = true; |