diff options
author | Richard Biener <rguenther@suse.de> | 2013-04-19 13:37:12 +0000 |
---|---|---|
committer | Richard Biener <rguenth@gcc.gnu.org> | 2013-04-19 13:37:12 +0000 |
commit | 01d8bf070a2ca6a10c66a81f1b5e1c00856b5bc4 (patch) | |
tree | 296dc78a662c0699454b4483380a99d413a514d7 /gcc/tree-vect-stmts.c | |
parent | ede22fc330f055810555d867de5a8e3d3f94ad63 (diff) | |
download | gcc-01d8bf070a2ca6a10c66a81f1b5e1c00856b5bc4.zip gcc-01d8bf070a2ca6a10c66a81f1b5e1c00856b5bc4.tar.gz gcc-01d8bf070a2ca6a10c66a81f1b5e1c00856b5bc4.tar.bz2 |
re PR tree-optimization/56270 (loop over array of struct float causes compiler error: segmentation fault)
2013-04-19 Richard Biener <rguenther@suse.de>
* tree-vectorizer.h (struct _slp_instance): Move load_permutation
member ...
(struct _slp_tree): ... here. Make it a vector of unsigned ints.
(SLP_INSTANCE_LOAD_PERMUTATION): Remove.
(SLP_TREE_LOAD_PERMUTATION): Add.
(vect_transform_slp_perm_load): Adjust prototype.
* tree-vect-slp.c (vect_free_slp_tree): Adjust.
(vect_free_slp_instance): Likewise.
(vect_create_new_slp_node): Likewise.
(vect_supported_slp_permutation_p): Remove.
(vect_slp_rearrange_stmts): Adjust.
(vect_supported_load_permutation_p): Likewise. Inline
vect_supported_slp_permutation_p here.
(vect_analyze_slp_instance): Compute load permutations per
slp node instead of per instance.
(vect_get_slp_defs): Adjust.
(vect_transform_slp_perm_load): Likewise.
(vect_schedule_slp_instance): Remove redundant code.
(vect_schedule_slp): Remove hack for PR56270, add it ...
* tree-vect-stmts.c (vectorizable_load): ... here, do not
CSE loads for SLP. Adjust.
From-SVN: r198095
Diffstat (limited to 'gcc/tree-vect-stmts.c')
-rw-r--r-- | gcc/tree-vect-stmts.c | 17 |
1 files changed, 13 insertions, 4 deletions
diff --git a/gcc/tree-vect-stmts.c b/gcc/tree-vect-stmts.c index ca474c1..28b80bb 100644 --- a/gcc/tree-vect-stmts.c +++ b/gcc/tree-vect-stmts.c @@ -4754,12 +4754,21 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt, { first_stmt = GROUP_FIRST_ELEMENT (stmt_info); if (slp - && !SLP_INSTANCE_LOAD_PERMUTATION (slp_node_instance).exists () + && !SLP_TREE_LOAD_PERMUTATION (slp_node).exists () && first_stmt != SLP_TREE_SCALAR_STMTS (slp_node)[0]) first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0]; /* Check if the chain of loads is already vectorized. */ - if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt))) + if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt)) + /* For SLP we would need to copy over SLP_TREE_VEC_STMTS. + ??? But we can only do so if there is exactly one + as we have no way to get at the rest. Leave the CSE + opportunity alone. + ??? With the group load eventually participating + in multiple different permutations (having multiple + slp nodes which refer to the same group) the CSE + is even wrong code. See PR56270. */ + && !slp) { *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info); return true; @@ -4772,7 +4781,7 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt, { grouped_load = false; vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node); - if (SLP_INSTANCE_LOAD_PERMUTATION (slp_node_instance).exists ()) + if (SLP_TREE_LOAD_PERMUTATION (slp_node).exists ()) slp_perm = true; group_gap = GROUP_GAP (vinfo_for_stmt (first_stmt)); } @@ -5163,7 +5172,7 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt, if (slp_perm) { - if (!vect_transform_slp_perm_load (stmt, dr_chain, gsi, vf, + if (!vect_transform_slp_perm_load (slp_node, dr_chain, gsi, vf, slp_node_instance, false)) { dr_chain.release (); |