diff options
author | Richard Biener <rguenther@suse.de> | 2025-04-02 13:12:58 +0200 |
---|---|---|
committer | Richard Biener <rguenth@gcc.gnu.org> | 2025-04-02 14:19:18 +0200 |
commit | 574c59cfe6e29c9e5758988b75c2e7ab6edc37da (patch) | |
tree | 440681455881cd750e5bd08caa1ee7abc98b4b0c /gcc | |
parent | e7912d4a81cf34e05c7ded70910069b691a8bb15 (diff) | |
download | gcc-574c59cfe6e29c9e5758988b75c2e7ab6edc37da.zip gcc-574c59cfe6e29c9e5758988b75c2e7ab6edc37da.tar.gz gcc-574c59cfe6e29c9e5758988b75c2e7ab6edc37da.tar.bz2 |
tree-optimization/119586 - aligned access to unaligned data
The following reverts parts of r15-8047 which assesses alignment
analysis for VMAT_STRIDED_SLP is correct by using aligned accesses
where allowed by it. As the PR shows this analysis is still incorrect,
so revert back to assuming we got it wrong.
PR tree-optimization/119586
* tree-vect-stmts.cc (vectorizable_load): Assume we got
alignment analysis for VMAT_STRIDED_SLP wrong.
(vectorizable_store): Likewise.
* gcc.dg/vect/pr119586.c: New testcase.
Diffstat (limited to 'gcc')
-rw-r--r-- | gcc/testsuite/gcc.dg/vect/pr119586.c | 21 | ||||
-rw-r--r-- | gcc/tree-vect-stmts.cc | 21 |
2 files changed, 34 insertions, 8 deletions
diff --git a/gcc/testsuite/gcc.dg/vect/pr119586.c b/gcc/testsuite/gcc.dg/vect/pr119586.c new file mode 100644 index 0000000..04a00ef --- /dev/null +++ b/gcc/testsuite/gcc.dg/vect/pr119586.c @@ -0,0 +1,21 @@ +#include "tree-vect.h" + +void __attribute__((noipa)) foo (long *) {} +void __attribute__((noipa)) +d() +{ + long e[6][8][5]; + for (int b = 0; b < 6; b++) + for (int c = 0; c < 8; c++) + { + e[b][c][0] = 1; + e[b][c][1] = 1; + e[b][c][4] = 1; + } + foo (&e[0][0][0]); +} +int main() +{ + check_vect (); + d(); +} diff --git a/gcc/tree-vect-stmts.cc b/gcc/tree-vect-stmts.cc index 8bd5ea9..3005ae6 100644 --- a/gcc/tree-vect-stmts.cc +++ b/gcc/tree-vect-stmts.cc @@ -8906,10 +8906,17 @@ vectorizable_store (vec_info *vinfo, } } unsigned align; - if (alignment_support_scheme == dr_aligned) - align = known_alignment (DR_TARGET_ALIGNMENT (first_dr_info)); - else - align = dr_alignment (vect_dr_behavior (vinfo, first_dr_info)); + /* ??? We'd want to use + if (alignment_support_scheme == dr_aligned) + align = known_alignment (DR_TARGET_ALIGNMENT (first_dr_info)); + since doing that is what we assume we can in the above checks. + But this interferes with groups with gaps where for example + VF == 2 makes the group in the unrolled loop aligned but the + fact that we advance with step between the two subgroups + makes the access to the second unaligned. See PR119586. + We have to anticipate that here or adjust code generation to + avoid the misaligned loads by means of permutations. */ + align = dr_alignment (vect_dr_behavior (vinfo, first_dr_info)); /* Alignment is at most the access size if we do multiple stores. */ if (nstores > 1) align = MIN (tree_to_uhwi (TYPE_SIZE_UNIT (ltype)), align); @@ -10884,10 +10891,8 @@ vectorizable_load (vec_info *vinfo, } } unsigned align; - if (alignment_support_scheme == dr_aligned) - align = known_alignment (DR_TARGET_ALIGNMENT (first_dr_info)); - else - align = dr_alignment (vect_dr_behavior (vinfo, first_dr_info)); + /* ??? The above is still wrong, see vectorizable_store. */ + align = dr_alignment (vect_dr_behavior (vinfo, first_dr_info)); /* Alignment is at most the access size if we do multiple loads. */ if (nloads > 1) align = MIN (tree_to_uhwi (TYPE_SIZE_UNIT (ltype)), align); |