aboutsummaryrefslogtreecommitdiff
path: root/gcc/tree-ssa-threadedge.c
diff options
context:
space:
mode:
authorRichard Biener <rguenther@suse.de>2015-10-26 14:22:20 +0000
committerRichard Biener <rguenth@gcc.gnu.org>2015-10-26 14:22:20 +0000
commit1e080ab4ed3a4390f6e57adb608b91b2c440af36 (patch)
tree7612eb7cb09900d59522c9f603ee66b5da5b0595 /gcc/tree-ssa-threadedge.c
parent3373589b25382e5389a189acc832fb657016f375 (diff)
downloadgcc-1e080ab4ed3a4390f6e57adb608b91b2c440af36.zip
gcc-1e080ab4ed3a4390f6e57adb608b91b2c440af36.tar.gz
gcc-1e080ab4ed3a4390f6e57adb608b91b2c440af36.tar.bz2
tree-object-size.c: Remove builtins.h include, include tree-cfg.h.
2015-10-26 Richard Biener <rguenther@suse.de> * tree-object-size.c: Remove builtins.h include, include tree-cfg.h. (do_valueize): New function. (pass_object_sizes::execute): Use gimple_fold_stmt_to_constant and replace_uses_by. * tree-ssa-threadedge.c: Remove builtins.h include, include gimple-fold.h (fold_assignment_stmt): Remove. (threadedge_valueize): New function. (record_temporary_equivalences_from_stmts): Use gimple_fold_stmt_to_constant_1, note additional cleanup opportunities. From-SVN: r229364
Diffstat (limited to 'gcc/tree-ssa-threadedge.c')
-rw-r--r--gcc/tree-ssa-threadedge.c126
1 files changed, 46 insertions, 80 deletions
diff --git a/gcc/tree-ssa-threadedge.c b/gcc/tree-ssa-threadedge.c
index da2fb1f..38f80ba 100644
--- a/gcc/tree-ssa-threadedge.c
+++ b/gcc/tree-ssa-threadedge.c
@@ -36,7 +36,7 @@ along with GCC; see the file COPYING3. If not see
#include "tree-ssa-threadedge.h"
#include "tree-ssa-threadbackward.h"
#include "tree-ssa-dom.h"
-#include "builtins.h"
+#include "gimple-fold.h"
/* To avoid code explosion due to jump threading, we limit the
number of statements we are going to copy. This variable
@@ -180,54 +180,18 @@ record_temporary_equivalences_from_phis (edge e, const_and_copies *const_and_cop
return true;
}
-/* Fold the RHS of an assignment statement and return it as a tree.
- May return NULL_TREE if no simplification is possible. */
+/* Valueize hook for gimple_fold_stmt_to_constant_1. */
static tree
-fold_assignment_stmt (gimple *stmt)
+threadedge_valueize (tree t)
{
- enum tree_code subcode = gimple_assign_rhs_code (stmt);
-
- switch (get_gimple_rhs_class (subcode))
+ if (TREE_CODE (t) == SSA_NAME)
{
- case GIMPLE_SINGLE_RHS:
- return fold (gimple_assign_rhs1 (stmt));
-
- case GIMPLE_UNARY_RHS:
- {
- tree lhs = gimple_assign_lhs (stmt);
- tree op0 = gimple_assign_rhs1 (stmt);
- return fold_unary (subcode, TREE_TYPE (lhs), op0);
- }
-
- case GIMPLE_BINARY_RHS:
- {
- tree lhs = gimple_assign_lhs (stmt);
- tree op0 = gimple_assign_rhs1 (stmt);
- tree op1 = gimple_assign_rhs2 (stmt);
- return fold_binary (subcode, TREE_TYPE (lhs), op0, op1);
- }
-
- case GIMPLE_TERNARY_RHS:
- {
- tree lhs = gimple_assign_lhs (stmt);
- tree op0 = gimple_assign_rhs1 (stmt);
- tree op1 = gimple_assign_rhs2 (stmt);
- tree op2 = gimple_assign_rhs3 (stmt);
-
- /* Sadly, we have to handle conditional assignments specially
- here, because fold expects all the operands of an expression
- to be folded before the expression itself is folded, but we
- can't just substitute the folded condition here. */
- if (gimple_assign_rhs_code (stmt) == COND_EXPR)
- op0 = fold (op0);
-
- return fold_ternary (subcode, TREE_TYPE (lhs), op0, op1, op2);
- }
-
- default:
- gcc_unreachable ();
+ tree tem = SSA_NAME_VALUE (t);
+ if (tem)
+ return tem;
}
+ return t;
}
/* Try to simplify each statement in E->dest, ultimately leading to
@@ -371,48 +335,50 @@ record_temporary_equivalences_from_stmts_at_dest (edge e,
else
{
/* A statement that is not a trivial copy or ASSERT_EXPR.
- We're going to temporarily copy propagate the operands
- and see if that allows us to simplify this statement. */
- tree *copy;
- ssa_op_iter iter;
- use_operand_p use_p;
- unsigned int num, i = 0;
-
- num = NUM_SSA_OPERANDS (stmt, (SSA_OP_USE | SSA_OP_VUSE));
- copy = XCNEWVEC (tree, num);
-
- /* Make a copy of the uses & vuses into USES_COPY, then cprop into
- the operands. */
- FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_USE | SSA_OP_VUSE)
- {
- tree tmp = NULL;
- tree use = USE_FROM_PTR (use_p);
-
- copy[i++] = use;
- if (TREE_CODE (use) == SSA_NAME)
- tmp = SSA_NAME_VALUE (use);
- if (tmp)
- SET_USE (use_p, tmp);
- }
-
- /* Try to fold/lookup the new expression. Inserting the
+ Try to fold the new expression. Inserting the
expression into the hash table is unlikely to help. */
- if (is_gimple_call (stmt))
- cached_lhs = fold_call_stmt (as_a <gcall *> (stmt), false);
- else
- cached_lhs = fold_assignment_stmt (stmt);
-
+ /* ??? The DOM callback below can be changed to setting
+ the mprts_hook around the call to thread_across_edge,
+ avoiding the use substitution. The VRP hook should be
+ changed to properly valueize operands itself using
+ SSA_NAME_VALUE in addition to its own lattice. */
+ cached_lhs = gimple_fold_stmt_to_constant_1 (stmt,
+ threadedge_valueize);
if (!cached_lhs
|| (TREE_CODE (cached_lhs) != SSA_NAME
&& !is_gimple_min_invariant (cached_lhs)))
- cached_lhs = (*simplify) (stmt, stmt, avail_exprs_stack);
+ {
+ /* We're going to temporarily copy propagate the operands
+ and see if that allows us to simplify this statement. */
+ tree *copy;
+ ssa_op_iter iter;
+ use_operand_p use_p;
+ unsigned int num, i = 0;
+
+ num = NUM_SSA_OPERANDS (stmt, SSA_OP_ALL_USES);
+ copy = XALLOCAVEC (tree, num);
+
+ /* Make a copy of the uses & vuses into USES_COPY, then cprop into
+ the operands. */
+ FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_ALL_USES)
+ {
+ tree tmp = NULL;
+ tree use = USE_FROM_PTR (use_p);
+
+ copy[i++] = use;
+ if (TREE_CODE (use) == SSA_NAME)
+ tmp = SSA_NAME_VALUE (use);
+ if (tmp)
+ SET_USE (use_p, tmp);
+ }
- /* Restore the statement's original uses/defs. */
- i = 0;
- FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_USE | SSA_OP_VUSE)
- SET_USE (use_p, copy[i++]);
+ cached_lhs = (*simplify) (stmt, stmt, avail_exprs_stack);
- free (copy);
+ /* Restore the statement's original uses/defs. */
+ i = 0;
+ FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_ALL_USES)
+ SET_USE (use_p, copy[i++]);
+ }
}
/* Record the context sensitive equivalence if we were able