aboutsummaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
authorRichard Henderson <rth@redhat.com>2010-01-22 10:52:01 -0800
committerRichard Henderson <rth@gcc.gnu.org>2010-01-22 10:52:01 -0800
commit002cda0a8b3a5cff63b60f2f7aaf0684729c7c02 (patch)
tree3305726401c185237a4df96747f19c0599fc7716 /gcc
parent728d406cd758c24a4e63f1e3089827fd2c470358 (diff)
downloadgcc-002cda0a8b3a5cff63b60f2f7aaf0684729c7c02.zip
gcc-002cda0a8b3a5cff63b60f2f7aaf0684729c7c02.tar.gz
gcc-002cda0a8b3a5cff63b60f2f7aaf0684729c7c02.tar.bz2
re PR tree-optimization/42833 (sra miscompiles qemu)
PR tree-opt/42833 * tree-sra.c (sra_modify_assign): Delay re-gimplification of the RHS until after generate_subtree_copies has insertted its code before the current statement. From-SVN: r156176
Diffstat (limited to 'gcc')
-rw-r--r--gcc/ChangeLog7
-rw-r--r--gcc/testsuite/gcc.c-torture/execute/pr42833.c171
-rw-r--r--gcc/tree-sra.c22
3 files changed, 191 insertions, 9 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index e799c94..10a3626 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,3 +1,10 @@
+2010-01-22 Richard Henderson <rth@redhat.com>
+
+ PR tree-opt/42833
+ * tree-sra.c (sra_modify_assign): Delay re-gimplification of
+ the RHS until after generate_subtree_copies has insertted its
+ code before the current statement.
+
2010-01-22 Joern Rennecke <amylaar@spamcop.net>
* doc/tm.texi (TARGET_MIN_DIVISIONS_FOR_RECIP_MUL): Fix return type.
diff --git a/gcc/testsuite/gcc.c-torture/execute/pr42833.c b/gcc/testsuite/gcc.c-torture/execute/pr42833.c
new file mode 100644
index 0000000..5494f98
--- /dev/null
+++ b/gcc/testsuite/gcc.c-torture/execute/pr42833.c
@@ -0,0 +1,171 @@
+typedef __INT_LEAST8_TYPE__ int8_t;
+typedef __UINT_LEAST32_TYPE__ uint32_t;
+typedef int ssize_t;
+typedef struct { int8_t v1; int8_t v2; int8_t v3; int8_t v4; } neon_s8;
+
+uint32_t helper_neon_rshl_s8 (uint32_t arg1, uint32_t arg2);
+
+uint32_t
+helper_neon_rshl_s8 (uint32_t arg1, uint32_t arg2)
+{
+ uint32_t res;
+ neon_s8 vsrc1;
+ neon_s8 vsrc2;
+ neon_s8 vdest;
+ do
+ {
+ union
+ {
+ neon_s8 v;
+ uint32_t i;
+ } conv_u;
+ conv_u.i = (arg1);
+ vsrc1 = conv_u.v;
+ }
+ while (0);
+ do
+ {
+ union
+ {
+ neon_s8 v;
+ uint32_t i;
+ } conv_u;
+ conv_u.i = (arg2);
+ vsrc2 = conv_u.v;
+ }
+ while (0);
+ do
+ {
+ int8_t tmp;
+ tmp = (int8_t) vsrc2.v1;
+ if (tmp >= (ssize_t) sizeof (vsrc1.v1) * 8)
+ {
+ vdest.v1 = 0;
+ }
+ else if (tmp < -(ssize_t) sizeof (vsrc1.v1) * 8)
+ {
+ vdest.v1 = vsrc1.v1 >> (sizeof (vsrc1.v1) * 8 - 1);
+ }
+ else if (tmp == -(ssize_t) sizeof (vsrc1.v1) * 8)
+ {
+ vdest.v1 = vsrc1.v1 >> (tmp - 1);
+ vdest.v1++;
+ vdest.v1 >>= 1;
+ }
+ else if (tmp < 0)
+ {
+ vdest.v1 = (vsrc1.v1 + (1 << (-1 - tmp))) >> -tmp;
+ }
+ else
+ {
+ vdest.v1 = vsrc1.v1 << tmp;
+ }
+ }
+ while (0);
+ do
+ {
+ int8_t tmp;
+ tmp = (int8_t) vsrc2.v2;
+ if (tmp >= (ssize_t) sizeof (vsrc1.v2) * 8)
+ {
+ vdest.v2 = 0;
+ }
+ else if (tmp < -(ssize_t) sizeof (vsrc1.v2) * 8)
+ {
+ vdest.v2 = vsrc1.v2 >> (sizeof (vsrc1.v2) * 8 - 1);
+ }
+ else if (tmp == -(ssize_t) sizeof (vsrc1.v2) * 8)
+ {
+ vdest.v2 = vsrc1.v2 >> (tmp - 1);
+ vdest.v2++;
+ vdest.v2 >>= 1;
+ }
+ else if (tmp < 0)
+ {
+ vdest.v2 = (vsrc1.v2 + (1 << (-1 - tmp))) >> -tmp;
+ }
+ else
+ {
+ vdest.v2 = vsrc1.v2 << tmp;
+ }
+ }
+ while (0);
+ do
+ {
+ int8_t tmp;
+ tmp = (int8_t) vsrc2.v3;
+ if (tmp >= (ssize_t) sizeof (vsrc1.v3) * 8)
+ {
+ vdest.v3 = 0;
+ }
+ else if (tmp < -(ssize_t) sizeof (vsrc1.v3) * 8)
+ {
+ vdest.v3 = vsrc1.v3 >> (sizeof (vsrc1.v3) * 8 - 1);
+ }
+ else if (tmp == -(ssize_t) sizeof (vsrc1.v3) * 8)
+ {
+ vdest.v3 = vsrc1.v3 >> (tmp - 1);
+ vdest.v3++;
+ vdest.v3 >>= 1;
+ }
+ else if (tmp < 0)
+ {
+ vdest.v3 = (vsrc1.v3 + (1 << (-1 - tmp))) >> -tmp;
+ }
+ else
+ {
+ vdest.v3 = vsrc1.v3 << tmp;
+ }
+ }
+ while (0);
+ do
+ {
+ int8_t tmp;
+ tmp = (int8_t) vsrc2.v4;
+ if (tmp >= (ssize_t) sizeof (vsrc1.v4) * 8)
+ {
+ vdest.v4 = 0;
+ }
+ else if (tmp < -(ssize_t) sizeof (vsrc1.v4) * 8)
+ {
+ vdest.v4 = vsrc1.v4 >> (sizeof (vsrc1.v4) * 8 - 1);
+ }
+ else if (tmp == -(ssize_t) sizeof (vsrc1.v4) * 8)
+ {
+ vdest.v4 = vsrc1.v4 >> (tmp - 1);
+ vdest.v4++;
+ vdest.v4 >>= 1;
+ }
+ else if (tmp < 0)
+ {
+ vdest.v4 = (vsrc1.v4 + (1 << (-1 - tmp))) >> -tmp;
+ }
+ else
+ {
+ vdest.v4 = vsrc1.v4 << tmp;
+ }
+ }
+ while (0);;
+ do
+ {
+ union
+ {
+ neon_s8 v;
+ uint32_t i;
+ } conv_u;
+ conv_u.v = (vdest);
+ res = conv_u.i;
+ }
+ while (0);
+ return res;
+}
+
+extern void abort(void);
+
+int main()
+{
+ uint32_t r = helper_neon_rshl_s8 (0x05050505, 0x01010101);
+ if (r != 0x0a0a0a0a)
+ abort ();
+ return 0;
+}
diff --git a/gcc/tree-sra.c b/gcc/tree-sra.c
index 07658bd..79c5c76 100644
--- a/gcc/tree-sra.c
+++ b/gcc/tree-sra.c
@@ -2533,6 +2533,7 @@ sra_modify_assign (gimple *stmt, gimple_stmt_iterator *gsi,
bool modify_this_stmt = false;
bool force_gimple_rhs = false;
location_t loc = gimple_location (*stmt);
+ gimple_stmt_iterator orig_gsi = *gsi;
if (!gimple_assign_single_p (*stmt))
return SRA_SA_NONE;
@@ -2611,15 +2612,6 @@ sra_modify_assign (gimple *stmt, gimple_stmt_iterator *gsi,
force_gimple_rhs = true;
}
}
-
- if (force_gimple_rhs)
- rhs = force_gimple_operand_gsi (gsi, rhs, true, NULL_TREE,
- true, GSI_SAME_STMT);
- if (gimple_assign_rhs1 (*stmt) != rhs)
- {
- gimple_assign_set_rhs_from_tree (gsi, rhs);
- gcc_assert (*stmt == gsi_stmt (*gsi));
- }
}
/* From this point on, the function deals with assignments in between
@@ -2721,6 +2713,18 @@ sra_modify_assign (gimple *stmt, gimple_stmt_iterator *gsi,
0, 0, gsi, true, true);
}
}
+
+ /* This gimplification must be done after generate_subtree_copies, lest we
+ insert the subtree copies in the middle of the gimplified sequence. */
+ if (force_gimple_rhs)
+ rhs = force_gimple_operand_gsi (&orig_gsi, rhs, true, NULL_TREE,
+ true, GSI_SAME_STMT);
+ if (gimple_assign_rhs1 (*stmt) != rhs)
+ {
+ gimple_assign_set_rhs_from_tree (&orig_gsi, rhs);
+ gcc_assert (*stmt == gsi_stmt (orig_gsi));
+ }
+
return modify_this_stmt ? SRA_SA_PROCESSED : SRA_SA_NONE;
}