aboutsummaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
authorRichard Biener <rguenther@suse.de>2016-01-12 08:30:44 +0000
committerRichard Biener <rguenth@gcc.gnu.org>2016-01-12 08:30:44 +0000
commit66c16fd94fe26bcea334ffbe9a4f3b8aa2e1cf00 (patch)
tree776e3bf5ae07bd31016ba46ab84bc75855a12121 /gcc
parent723033a6b2ce651e9fded5af3a820ad605cbfed0 (diff)
downloadgcc-66c16fd94fe26bcea334ffbe9a4f3b8aa2e1cf00.zip
gcc-66c16fd94fe26bcea334ffbe9a4f3b8aa2e1cf00.tar.gz
gcc-66c16fd94fe26bcea334ffbe9a4f3b8aa2e1cf00.tar.bz2
re PR tree-optimization/69157 (ICE in vect_transform_stmt, at tree-vect-stmts.c:8176)
2016-01-12 Richard Biener <rguenther@suse.de> PR tree-optimization/69157 * tree-vect-stmts.c (vectorizable_mask_load_store): Check stmts def type only during analyze phase. (vectorizable_call): Likewise. (vectorizable_simd_clone_call): Likewise. (vectorizable_conversion): Likewise. (vectorizable_assignment): Likewise. (vectorizable_shift): Likewise. (vectorizable_operation): Likewise. (vectorizable_store): Likewise. (vectorizable_load): Likewise. * gcc.dg/torture/pr69157.c: New testcase. 2016-01-12 Richard Biener <rguenther@suse.de> PR tree-optimization/69174 * tree-vect-stmts.c (vect_mark_relevant): Remove excessive vertical space. (vectorizable_load): Properly compute the number of loads needed for permuted strided SLP loads and do not spuriously assign to SLP_TREE_VEC_STMTS. * gcc.dg/torture/pr69174.c: New testcase. From-SVN: r232260
Diffstat (limited to 'gcc')
-rw-r--r--gcc/ChangeLog23
-rw-r--r--gcc/testsuite/ChangeLog10
-rw-r--r--gcc/testsuite/gcc.dg/torture/pr69157.c17
-rw-r--r--gcc/testsuite/gcc.dg/torture/pr69174.c19
-rw-r--r--gcc/tree-vect-stmts.c48
5 files changed, 103 insertions, 14 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index f8784f0..6ccdbf8 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,3 +1,26 @@
+2016-01-12 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/69157
+ * tree-vect-stmts.c (vectorizable_mask_load_store): Check
+ stmts def type only during analyze phase.
+ (vectorizable_call): Likewise.
+ (vectorizable_simd_clone_call): Likewise.
+ (vectorizable_conversion): Likewise.
+ (vectorizable_assignment): Likewise.
+ (vectorizable_shift): Likewise.
+ (vectorizable_operation): Likewise.
+ (vectorizable_store): Likewise.
+ (vectorizable_load): Likewise.
+
+2016-01-12 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/69174
+ * tree-vect-stmts.c (vect_mark_relevant): Remove excessive vertical
+ space.
+ (vectorizable_load): Properly compute the number of loads needed
+ for permuted strided SLP loads and do not spuriously assign
+ to SLP_TREE_VEC_STMTS.
+
2016-01-12 Andris Pavenis <andris.pavenis@iki.fi>
* config/i386/djgpp.h (PREFERRED_DEBUGGING_TYPE): Define to DWARF2
diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog
index 1866304..e19ea31 100644
--- a/gcc/testsuite/ChangeLog
+++ b/gcc/testsuite/ChangeLog
@@ -1,3 +1,13 @@
+2016-01-12 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/69157
+ * gcc.dg/torture/pr69157.c: New testcase.
+
+2016-01-12 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/69174
+ * gcc.dg/torture/pr69174.c: New testcase.
+
2016-01-12 Jakub Jelinek <jakub@redhat.com>
PR c++/66808
diff --git a/gcc/testsuite/gcc.dg/torture/pr69157.c b/gcc/testsuite/gcc.dg/torture/pr69157.c
new file mode 100644
index 0000000..e85082e
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/torture/pr69157.c
@@ -0,0 +1,17 @@
+/* { dg-do compile } */
+
+typedef struct {
+ float *data_normal3f;
+ float *data_texcoordtexture2f;
+ float *data_texcoordlightmap2f;
+ float *data_color4f;
+} dp_model_t;
+dp_model_t a;
+float *b;
+void fn1() {
+ int c;
+ a.data_normal3f = b + c * 3;
+ a.data_texcoordtexture2f = a.data_normal3f + c * 3;
+ a.data_texcoordlightmap2f = a.data_color4f =
+ a.data_texcoordlightmap2f + c * 2;
+}
diff --git a/gcc/testsuite/gcc.dg/torture/pr69174.c b/gcc/testsuite/gcc.dg/torture/pr69174.c
new file mode 100644
index 0000000..0866331
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/torture/pr69174.c
@@ -0,0 +1,19 @@
+/* { dg-do compile } */
+
+typedef int pixval;
+typedef struct { pixval r, g, b; } xel;
+int convertRow_sample, convertRaster_col;
+short *convertRow_samplebuf;
+xel *convertRow_xelrow;
+short convertRow_spp;
+void fn1() {
+ int *alpharow;
+ for (; convertRaster_col;
+ ++convertRaster_col, convertRow_sample += convertRow_spp) {
+ convertRow_xelrow[convertRaster_col].r =
+ convertRow_xelrow[convertRaster_col].g =
+ convertRow_xelrow[convertRaster_col].b =
+ convertRow_samplebuf[convertRow_sample];
+ alpharow[convertRaster_col] = convertRow_samplebuf[convertRow_sample + 3];
+ }
+}
diff --git a/gcc/tree-vect-stmts.c b/gcc/tree-vect-stmts.c
index 465826e..872fa07 100644
--- a/gcc/tree-vect-stmts.c
+++ b/gcc/tree-vect-stmts.c
@@ -190,8 +190,11 @@ vect_mark_relevant (vec<gimple *> *worklist, gimple *stmt,
gimple *pattern_stmt;
if (dump_enabled_p ())
- dump_printf_loc (MSG_NOTE, vect_location,
- "mark relevant %d, live %d.\n", relevant, live_p);
+ {
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "mark relevant %d, live %d: ", relevant, live_p);
+ dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
+ }
/* If this stmt is an original stmt in a pattern, we might need to mark its
related pattern stmt instead of the original stmt. However, such stmts
@@ -1757,7 +1760,8 @@ vectorizable_mask_load_store (gimple *stmt, gimple_stmt_iterator *gsi,
if (!STMT_VINFO_RELEVANT_P (stmt_info))
return false;
- if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
+ if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
+ && ! vec_stmt)
return false;
if (!STMT_VINFO_DATA_REF (stmt_info))
@@ -2206,7 +2210,8 @@ vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi, gimple **vec_stmt,
if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
return false;
- if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
+ if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
+ && ! vec_stmt)
return false;
/* Is GS a vectorizable call? */
@@ -2811,7 +2816,8 @@ vectorizable_simd_clone_call (gimple *stmt, gimple_stmt_iterator *gsi,
if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
return false;
- if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
+ if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
+ && ! vec_stmt)
return false;
if (gimple_call_lhs (stmt)
@@ -3669,7 +3675,8 @@ vectorizable_conversion (gimple *stmt, gimple_stmt_iterator *gsi,
if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
return false;
- if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
+ if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
+ && ! vec_stmt)
return false;
if (!is_gimple_assign (stmt))
@@ -4246,7 +4253,8 @@ vectorizable_assignment (gimple *stmt, gimple_stmt_iterator *gsi,
if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
return false;
- if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
+ if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
+ && ! vec_stmt)
return false;
/* Is vectorizable assignment? */
@@ -4462,7 +4470,8 @@ vectorizable_shift (gimple *stmt, gimple_stmt_iterator *gsi,
if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
return false;
- if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
+ if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
+ && ! vec_stmt)
return false;
/* Is STMT a vectorizable binary/unary operation? */
@@ -4823,7 +4832,8 @@ vectorizable_operation (gimple *stmt, gimple_stmt_iterator *gsi,
if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
return false;
- if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
+ if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
+ && ! vec_stmt)
return false;
/* Is STMT a vectorizable binary/unary operation? */
@@ -5248,7 +5258,8 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
return false;
- if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
+ if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
+ && ! vec_stmt)
return false;
/* Is vectorizable store? */
@@ -6237,7 +6248,8 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
return false;
- if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
+ if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
+ && ! vec_stmt)
return false;
/* Is vectorizable load? */
@@ -6748,9 +6760,16 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
else
ltype = vectype;
ltype = build_aligned_type (ltype, TYPE_ALIGN (TREE_TYPE (vectype)));
- ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
+ /* For SLP permutation support we need to load the whole group,
+ not only the number of vector stmts the permutation result
+ fits in. */
if (slp_perm)
- dr_chain.create (ncopies);
+ {
+ ncopies = (group_size * vf + nunits - 1) / nunits;
+ dr_chain.create (ncopies);
+ }
+ else
+ ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
}
for (j = 0; j < ncopies; j++)
{
@@ -6798,9 +6817,10 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
if (slp)
{
- SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
if (slp_perm)
dr_chain.quick_push (gimple_assign_lhs (new_stmt));
+ else
+ SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
}
else
{