aboutsummaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
Diffstat (limited to 'gcc')
-rw-r--r--gcc/ChangeLog8
-rw-r--r--gcc/testsuite/ChangeLog6
-rw-r--r--gcc/testsuite/gcc.dg/strlenopt-8.c9
-rw-r--r--gcc/testsuite/gcc.dg/tree-ssa/forwprop-6.c6
-rw-r--r--gcc/tree-ssa-forwprop.c92
5 files changed, 116 insertions, 5 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index cfcba31..d11aae9 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,3 +1,11 @@
+2014-10-27 Richard Biener <rguenther@suse.de>
+
+ * tree-ssa-forwprop.c: Include tree-cfgcleanup.h and tree-into-ssa.h.
+ (lattice): New global.
+ (fwprop_ssa_val): New function.
+ (fold_all_stmts): Likewise.
+ (pass_forwprop::execute): Finally fold all stmts.
+
2014-10-26 Manuel López-Ibáñez <manu@gcc.gnu.org>
PR c++/53061
diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog
index a6e6d03..2f8efef 100644
--- a/gcc/testsuite/ChangeLog
+++ b/gcc/testsuite/ChangeLog
@@ -1,3 +1,9 @@
+2014-10-27 Richard Biener <rguenther@suse.de>
+
+ * gcc.dg/tree-ssa/forwprop-6.c: Scan ccp1 dump instead.
+ * gcc.dg/strlenopt-8.c: Adjust and XFAIL for non_strict_align
+ target due to memcpy inline-expansion.
+
2014-10-27 Eric Botcazou <ebotcazou@adacore.com>
* gnat.dg/specs/pack10.ads: New test.
diff --git a/gcc/testsuite/gcc.dg/strlenopt-8.c b/gcc/testsuite/gcc.dg/strlenopt-8.c
index 3aaf660..d82b31c 100644
--- a/gcc/testsuite/gcc.dg/strlenopt-8.c
+++ b/gcc/testsuite/gcc.dg/strlenopt-8.c
@@ -43,8 +43,13 @@ main ()
return 0;
}
-/* { dg-final { scan-tree-dump-times "strlen \\(" 0 "strlen" } } */
-/* { dg-final { scan-tree-dump-times "memcpy \\(" 4 "strlen" } } */
+/* On non-strict-align targets we inline the memcpy that strcat is turned
+ into and end up with a short typed load / store which strlenopt is not
+ able to analyze. */
+
+/* { dg-final { scan-tree-dump-times "strlen \\(" 0 "strlen" { xfail non_strict_align } } } */
+/* { dg-final { scan-tree-dump-times "memcpy \\(" 2 "strlen" { target { non_strict_align } } } } */
+/* { dg-final { scan-tree-dump-times "memcpy \\(" 4 "strlen" { target { ! non_strict_align } } } } */
/* { dg-final { scan-tree-dump-times "strcpy \\(" 0 "strlen" } } */
/* { dg-final { scan-tree-dump-times "strcat \\(" 0 "strlen" } } */
/* { dg-final { scan-tree-dump-times "strchr \\(" 0 "strlen" } } */
diff --git a/gcc/testsuite/gcc.dg/tree-ssa/forwprop-6.c b/gcc/testsuite/gcc.dg/tree-ssa/forwprop-6.c
index 4106b33..79f0a8d 100644
--- a/gcc/testsuite/gcc.dg/tree-ssa/forwprop-6.c
+++ b/gcc/testsuite/gcc.dg/tree-ssa/forwprop-6.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-O2 -fdump-tree-forwprop1 -W -Wall" } */
+/* { dg-options "-O2 -fdump-tree-ccp1 -W -Wall" } */
#if (__SIZEOF_INT__ == __SIZEOF_FLOAT__)
typedef int intflt;
#elif (__SIZEOF_LONG__ == __SIZEOF_FLOAT__)
@@ -24,5 +24,5 @@ void f(void)
it to be valid. Then we might as well handle the situation by
value-numbering, removing the load altogether.
??? We now do this after CPP re-writes a into SSA form. */
-/* { dg-final { scan-tree-dump-times "VIEW_CONVERT_EXPR" 1 "forwprop1" } } */
-/* { dg-final { cleanup-tree-dump "forwprop1" } } */
+/* { dg-final { scan-tree-dump-times "VIEW_CONVERT_EXPR" 1 "ccp1" } } */
+/* { dg-final { cleanup-tree-dump "ccp1" } } */
diff --git a/gcc/tree-ssa-forwprop.c b/gcc/tree-ssa-forwprop.c
index 0284301..6b584e5 100644
--- a/gcc/tree-ssa-forwprop.c
+++ b/gcc/tree-ssa-forwprop.c
@@ -54,6 +54,8 @@ along with GCC; see the file COPYING3. If not see
#include "tree-ssa-propagate.h"
#include "tree-ssa-dom.h"
#include "builtins.h"
+#include "tree-cfgcleanup.h"
+#include "tree-into-ssa.h"
/* This pass propagates the RHS of assignment statements into use
sites of the LHS of the assignment. It's basically a specialized
@@ -3586,6 +3588,93 @@ simplify_mult (gimple_stmt_iterator *gsi)
return false;
}
+
+
+/* Const-and-copy lattice for fold_all_stmts. */
+static vec<tree> lattice;
+
+/* Primitive "lattice" function for gimple_simplify. */
+
+static tree
+fwprop_ssa_val (tree name)
+{
+ /* First valueize NAME. */
+ if (TREE_CODE (name) == SSA_NAME
+ && SSA_NAME_VERSION (name) < lattice.length ())
+ {
+ tree val = lattice[SSA_NAME_VERSION (name)];
+ if (val)
+ name = val;
+ }
+ /* If NAME is not the only use signal we don't want to continue
+ matching into its definition. */
+ if (TREE_CODE (name) == SSA_NAME
+ && !has_single_use (name))
+ return NULL_TREE;
+ return name;
+}
+
+/* Fold all stmts using fold_stmt following only single-use chains
+ and using a simple const-and-copy lattice. */
+
+static bool
+fold_all_stmts (struct function *fun)
+{
+ bool cfg_changed = false;
+
+ /* Combine stmts with the stmts defining their operands. Do that
+ in an order that guarantees visiting SSA defs before SSA uses. */
+ lattice.create (num_ssa_names);
+ lattice.quick_grow_cleared (num_ssa_names);
+ int *postorder = XNEWVEC (int, n_basic_blocks_for_fn (fun));
+ int postorder_num = inverted_post_order_compute (postorder);
+ for (int i = 0; i < postorder_num; ++i)
+ {
+ basic_block bb = BASIC_BLOCK_FOR_FN (fun, postorder[i]);
+ for (gimple_stmt_iterator gsi = gsi_start_bb (bb);
+ !gsi_end_p (gsi); gsi_next (&gsi))
+ {
+ gimple stmt = gsi_stmt (gsi);
+ gimple orig_stmt = stmt;
+
+ if (fold_stmt (&gsi, fwprop_ssa_val))
+ {
+ stmt = gsi_stmt (gsi);
+ if (maybe_clean_or_replace_eh_stmt (orig_stmt, stmt)
+ && gimple_purge_dead_eh_edges (bb))
+ cfg_changed = true;
+ /* Cleanup the CFG if we simplified a condition to
+ true or false. */
+ if (gimple_code (stmt) == GIMPLE_COND
+ && (gimple_cond_true_p (stmt)
+ || gimple_cond_false_p (stmt)))
+ cfg_changed = true;
+ update_stmt (stmt);
+ }
+
+ /* Fill up the lattice. */
+ if (gimple_assign_single_p (stmt))
+ {
+ tree lhs = gimple_assign_lhs (stmt);
+ tree rhs = gimple_assign_rhs1 (stmt);
+ if (TREE_CODE (lhs) == SSA_NAME)
+ {
+ if (TREE_CODE (rhs) == SSA_NAME)
+ lattice[SSA_NAME_VERSION (lhs)] = fwprop_ssa_val (rhs);
+ else if (is_gimple_min_invariant (rhs))
+ lattice[SSA_NAME_VERSION (lhs)] = rhs;
+ else
+ lattice[SSA_NAME_VERSION (lhs)] = lhs;
+ }
+ }
+ }
+ }
+ free (postorder);
+ lattice.release ();
+
+ return cfg_changed;
+}
+
/* Main entry point for the forward propagation and statement combine
optimizer. */
@@ -3876,6 +3965,9 @@ pass_forwprop::execute (function *fun)
}
}
+ /* At the end fold all statements. */
+ cfg_changed |= fold_all_stmts (fun);
+
if (cfg_changed)
todoflags |= TODO_cleanup_cfg;