diff options
Diffstat (limited to 'gcc/tree-inline.c')
-rw-r--r-- | gcc/tree-inline.c | 244 |
1 files changed, 139 insertions, 105 deletions
diff --git a/gcc/tree-inline.c b/gcc/tree-inline.c index 4b937ca..835edd1 100644 --- a/gcc/tree-inline.c +++ b/gcc/tree-inline.c @@ -808,7 +808,7 @@ remap_gimple_seq (gimple_seq body, copy_body_data *id) block using the mapping information in ID. */ static gimple -copy_gimple_bind (gimple stmt, copy_body_data *id) +copy_gimple_bind (gbind *stmt, copy_body_data *id) { gimple new_bind; tree new_block, new_vars; @@ -1319,7 +1319,7 @@ remap_gimple_stmt (gimple stmt, copy_body_data *id) statement. */ if (gimple_code (stmt) == GIMPLE_RETURN && id->transform_return_to_modify) { - tree retval = gimple_return_retval (stmt); + tree retval = gimple_return_retval (as_a <greturn *> (stmt)); tree retbnd = gimple_return_retbnd (stmt); tree bndslot = id->retbnd; @@ -1371,12 +1371,15 @@ remap_gimple_stmt (gimple stmt, copy_body_data *id) switch (gimple_code (stmt)) { case GIMPLE_BIND: - copy = copy_gimple_bind (stmt, id); + copy = copy_gimple_bind (as_a <gbind *> (stmt), id); break; case GIMPLE_CATCH: - s1 = remap_gimple_seq (gimple_catch_handler (stmt), id); - copy = gimple_build_catch (gimple_catch_types (stmt), s1); + { + gcatch *catch_stmt = as_a <gcatch *> (stmt); + s1 = remap_gimple_seq (gimple_catch_handler (catch_stmt), id); + copy = gimple_build_catch (gimple_catch_types (catch_stmt), s1); + } break; case GIMPLE_EH_FILTER: @@ -1396,12 +1399,15 @@ remap_gimple_stmt (gimple stmt, copy_body_data *id) break; case GIMPLE_OMP_PARALLEL: - s1 = remap_gimple_seq (gimple_omp_body (stmt), id); - copy = gimple_build_omp_parallel - (s1, - gimple_omp_parallel_clauses (stmt), - gimple_omp_parallel_child_fn (stmt), - gimple_omp_parallel_data_arg (stmt)); + { + gomp_parallel *omp_par_stmt = as_a <gomp_parallel *> (stmt); + s1 = remap_gimple_seq (gimple_omp_body (omp_par_stmt), id); + copy = gimple_build_omp_parallel + (s1, + gimple_omp_parallel_clauses (omp_par_stmt), + gimple_omp_parallel_child_fn (omp_par_stmt), + gimple_omp_parallel_data_arg (omp_par_stmt)); + } break; case GIMPLE_OMP_TASK: @@ -1487,14 +1493,25 @@ remap_gimple_stmt (gimple stmt, copy_body_data *id) case GIMPLE_OMP_CRITICAL: s1 = remap_gimple_seq (gimple_omp_body (stmt), id); - copy - = gimple_build_omp_critical (s1, gimple_omp_critical_name (stmt)); + copy = gimple_build_omp_critical (s1, + gimple_omp_critical_name ( + as_a <gomp_critical *> (stmt))); break; case GIMPLE_TRANSACTION: - s1 = remap_gimple_seq (gimple_transaction_body (stmt), id); - copy = gimple_build_transaction (s1, gimple_transaction_label (stmt)); - gimple_transaction_set_subcode (copy, gimple_transaction_subcode (stmt)); + { + gtransaction *old_trans_stmt = as_a <gtransaction *> (stmt); + gtransaction *new_trans_stmt; + s1 = remap_gimple_seq (gimple_transaction_body (old_trans_stmt), + id); + copy = new_trans_stmt + = gimple_build_transaction ( + s1, + gimple_transaction_label (old_trans_stmt)); + gimple_transaction_set_subcode ( + new_trans_stmt, + gimple_transaction_subcode (old_trans_stmt)); + } break; default: @@ -1546,18 +1563,20 @@ remap_gimple_stmt (gimple stmt, copy_body_data *id) if (gimple_debug_bind_p (stmt)) { - copy = gimple_build_debug_bind (gimple_debug_bind_get_var (stmt), - gimple_debug_bind_get_value (stmt), - stmt); + gdebug *copy + = gimple_build_debug_bind (gimple_debug_bind_get_var (stmt), + gimple_debug_bind_get_value (stmt), + stmt); id->debug_stmts.safe_push (copy); gimple_seq_add_stmt (&stmts, copy); return stmts; } if (gimple_debug_source_bind_p (stmt)) { - copy = gimple_build_debug_source_bind - (gimple_debug_source_bind_get_var (stmt), - gimple_debug_source_bind_get_value (stmt), stmt); + gdebug *copy = gimple_build_debug_source_bind + (gimple_debug_source_bind_get_var (stmt), + gimple_debug_source_bind_get_value (stmt), + stmt); id->debug_stmts.safe_push (copy); gimple_seq_add_stmt (&stmts, copy); return stmts; @@ -1567,9 +1586,9 @@ remap_gimple_stmt (gimple stmt, copy_body_data *id) copy = gimple_copy (stmt); /* Clear flags that need revisiting. */ - if (is_gimple_call (copy) - && gimple_call_tail_p (copy)) - gimple_call_set_tail (copy, false); + if (gcall *call_stmt = dyn_cast <gcall *> (copy)) + if (gimple_call_tail_p (call_stmt)) + gimple_call_set_tail (call_stmt, false); /* Remap the region numbers for __builtin_eh_{pointer,filter}, RESX and EH_DISPATCH. */ @@ -1603,23 +1622,25 @@ remap_gimple_stmt (gimple stmt, copy_body_data *id) keep it valid over inlining by setting DECL_PT_UID. */ if (!id->src_cfun->gimple_df || !id->src_cfun->gimple_df->ipa_pta) - gimple_call_reset_alias_info (copy); + gimple_call_reset_alias_info (as_a <gcall *> (copy)); } break; case GIMPLE_RESX: { - int r = gimple_resx_region (copy); + gresx *resx_stmt = as_a <gresx *> (copy); + int r = gimple_resx_region (resx_stmt); r = remap_eh_region_nr (r, id); - gimple_resx_set_region (copy, r); + gimple_resx_set_region (resx_stmt, r); } break; case GIMPLE_EH_DISPATCH: { - int r = gimple_eh_dispatch_region (copy); + geh_dispatch *eh_dispatch = as_a <geh_dispatch *> (copy); + int r = gimple_eh_dispatch_region (eh_dispatch); r = remap_eh_region_nr (r, id); - gimple_eh_dispatch_set_region (copy, r); + gimple_eh_dispatch_set_region (eh_dispatch, r); } break; @@ -1769,18 +1790,20 @@ copy_bb (copy_body_data *id, basic_block bb, int frequency_scale, do { tree fn; + gcall *call_stmt; stmt = gsi_stmt (copy_gsi); - if (is_gimple_call (stmt) - && gimple_call_va_arg_pack_p (stmt) - && id->gimple_call) + call_stmt = dyn_cast <gcall *> (stmt); + if (call_stmt + && gimple_call_va_arg_pack_p (call_stmt) + && id->call_stmt) { /* __builtin_va_arg_pack () should be replaced by all arguments corresponding to ... in the caller. */ tree p; - gimple new_call; + gcall *new_call; vec<tree> argarray; - size_t nargs = gimple_call_num_args (id->gimple_call); + size_t nargs = gimple_call_num_args (id->call_stmt); size_t n, i, nargs_to_copy; bool remove_bounds = false; @@ -1791,73 +1814,73 @@ copy_bb (copy_body_data *id, basic_block bb, int frequency_scale, we handle not instrumented call in instrumented function. */ nargs_to_copy = nargs; - if (gimple_call_with_bounds_p (id->gimple_call) + if (gimple_call_with_bounds_p (id->call_stmt) && !gimple_call_with_bounds_p (stmt)) { - for (i = gimple_call_num_args (id->gimple_call) - nargs; - i < gimple_call_num_args (id->gimple_call); + for (i = gimple_call_num_args (id->call_stmt) - nargs; + i < gimple_call_num_args (id->call_stmt); i++) - if (POINTER_BOUNDS_P (gimple_call_arg (id->gimple_call, i))) + if (POINTER_BOUNDS_P (gimple_call_arg (id->call_stmt, i))) nargs_to_copy--; remove_bounds = true; } /* Create the new array of arguments. */ - n = nargs_to_copy + gimple_call_num_args (stmt); + n = nargs_to_copy + gimple_call_num_args (call_stmt); argarray.create (n); argarray.safe_grow_cleared (n); /* Copy all the arguments before '...' */ memcpy (argarray.address (), - gimple_call_arg_ptr (stmt, 0), - gimple_call_num_args (stmt) * sizeof (tree)); + gimple_call_arg_ptr (call_stmt, 0), + gimple_call_num_args (call_stmt) * sizeof (tree)); if (remove_bounds) { /* Append the rest of arguments removing bounds. */ - unsigned cur = gimple_call_num_args (stmt); - i = gimple_call_num_args (id->gimple_call) - nargs; - for (i = gimple_call_num_args (id->gimple_call) - nargs; - i < gimple_call_num_args (id->gimple_call); + unsigned cur = gimple_call_num_args (call_stmt); + i = gimple_call_num_args (id->call_stmt) - nargs; + for (i = gimple_call_num_args (id->call_stmt) - nargs; + i < gimple_call_num_args (id->call_stmt); i++) - if (!POINTER_BOUNDS_P (gimple_call_arg (id->gimple_call, i))) - argarray[cur++] = gimple_call_arg (id->gimple_call, i); + if (!POINTER_BOUNDS_P (gimple_call_arg (id->call_stmt, i))) + argarray[cur++] = gimple_call_arg (id->call_stmt, i); gcc_assert (cur == n); } else { /* Append the arguments passed in '...' */ - memcpy (argarray.address () + gimple_call_num_args (stmt), - gimple_call_arg_ptr (id->gimple_call, 0) - + (gimple_call_num_args (id->gimple_call) - nargs), + memcpy (argarray.address () + gimple_call_num_args (call_stmt), + gimple_call_arg_ptr (id->call_stmt, 0) + + (gimple_call_num_args (id->call_stmt) - nargs), nargs * sizeof (tree)); } - new_call = gimple_build_call_vec (gimple_call_fn (stmt), + new_call = gimple_build_call_vec (gimple_call_fn (call_stmt), argarray); argarray.release (); /* Copy all GIMPLE_CALL flags, location and block, except GF_CALL_VA_ARG_PACK. */ - gimple_call_copy_flags (new_call, stmt); + gimple_call_copy_flags (new_call, call_stmt); gimple_call_set_va_arg_pack (new_call, false); gimple_set_location (new_call, gimple_location (stmt)); gimple_set_block (new_call, gimple_block (stmt)); - gimple_call_set_lhs (new_call, gimple_call_lhs (stmt)); + gimple_call_set_lhs (new_call, gimple_call_lhs (call_stmt)); gsi_replace (©_gsi, new_call, false); stmt = new_call; } else if (is_gimple_call (stmt) - && id->gimple_call + && id->call_stmt && (decl = gimple_call_fndecl (stmt)) && DECL_BUILT_IN_CLASS (decl) == BUILT_IN_NORMAL && DECL_FUNCTION_CODE (decl) == BUILT_IN_VA_ARG_PACK_LEN) { /* __builtin_va_arg_pack_len () should be replaced by the number of anonymous arguments. */ - size_t nargs = gimple_call_num_args (id->gimple_call), i; + size_t nargs = gimple_call_num_args (id->call_stmt), i; tree count, p; gimple new_stmt; @@ -1865,10 +1888,10 @@ copy_bb (copy_body_data *id, basic_block bb, int frequency_scale, nargs--; /* For instrumented calls we should ignore bounds. */ - for (i = gimple_call_num_args (id->gimple_call) - nargs; - i < gimple_call_num_args (id->gimple_call); + for (i = gimple_call_num_args (id->call_stmt) - nargs; + i < gimple_call_num_args (id->call_stmt); i++) - if (POINTER_BOUNDS_P (gimple_call_arg (id->gimple_call, i))) + if (POINTER_BOUNDS_P (gimple_call_arg (id->call_stmt, i))) nargs--; count = build_int_cst (integer_type_node, nargs); @@ -1893,7 +1916,7 @@ copy_bb (copy_body_data *id, basic_block bb, int frequency_scale, /* We're duplicating a CALL_EXPR. Find any corresponding callgraph edges and update or duplicate them. */ - if (is_gimple_call (stmt)) + if (gcall *call_stmt = dyn_cast <gcall *> (stmt)) { struct cgraph_edge *edge; @@ -1906,7 +1929,7 @@ copy_bb (copy_body_data *id, basic_block bb, int frequency_scale, int edge_freq = edge->frequency; int new_freq; struct cgraph_edge *old_edge = edge; - edge = edge->clone (id->dst_node, stmt, + edge = edge->clone (id->dst_node, call_stmt, gimple_uid (stmt), REG_BR_PROB_BASE, CGRAPH_FREQ_BASE, true); @@ -1925,7 +1948,7 @@ copy_bb (copy_body_data *id, basic_block bb, int frequency_scale, gcc_assert (!edge->indirect_unknown_callee); old_edge->speculative_call_info (direct, indirect, ref); - indirect = indirect->clone (id->dst_node, stmt, + indirect = indirect->clone (id->dst_node, call_stmt, gimple_uid (stmt), REG_BR_PROB_BASE, CGRAPH_FREQ_BASE, true); @@ -1964,14 +1987,14 @@ copy_bb (copy_body_data *id, basic_block bb, int frequency_scale, case CB_CGE_MOVE_CLONES: id->dst_node->set_call_stmt_including_clones (orig_stmt, - stmt); + call_stmt); edge = id->dst_node->get_edge (stmt); break; case CB_CGE_MOVE: edge = id->dst_node->get_edge (orig_stmt); if (edge) - edge->set_call_stmt (stmt); + edge->set_call_stmt (call_stmt); break; default: @@ -2000,12 +2023,12 @@ copy_bb (copy_body_data *id, basic_block bb, int frequency_scale, || !id->dst_node->definition); if (id->transform_call_graph_edges == CB_CGE_MOVE_CLONES) id->dst_node->create_edge_including_clones - (dest, orig_stmt, stmt, bb->count, + (dest, orig_stmt, call_stmt, bb->count, compute_call_stmt_bb_frequency (id->dst_node->decl, copy_basic_block), CIF_ORIGINALLY_INDIRECT_CALL); else - id->dst_node->create_edge (dest, stmt, + id->dst_node->create_edge (dest, call_stmt, bb->count, compute_call_stmt_bb_frequency (id->dst_node->decl, @@ -2018,7 +2041,7 @@ copy_bb (copy_body_data *id, basic_block bb, int frequency_scale, } } - notice_special_calls (stmt); + notice_special_calls (as_a <gcall *> (stmt)); } maybe_duplicate_eh_stmt_fn (cfun, stmt, id->src_cfun, orig_stmt, @@ -2073,8 +2096,8 @@ update_ssa_across_abnormal_edges (basic_block bb, basic_block ret_bb, if (!e->dest->aux || ((basic_block)e->dest->aux)->index == ENTRY_BLOCK) { - gimple phi; - gimple_stmt_iterator si; + gphi *phi; + gphi_iterator si; if (!nonlocal_goto) gcc_assert (e->flags & EDGE_EH); @@ -2086,7 +2109,7 @@ update_ssa_across_abnormal_edges (basic_block bb, basic_block ret_bb, { edge re; - phi = gsi_stmt (si); + phi = si.phi (); /* For abnormal goto/call edges the receiver can be the ENTRY_BLOCK. Do not assert this cannot happen. */ @@ -2194,7 +2217,7 @@ copy_edges_for_bb (basic_block bb, gcov_type count_scale, basic_block ret_bb, } if (gimple_code (copy_stmt) == GIMPLE_EH_DISPATCH) - make_eh_dispatch_edges (copy_stmt); + make_eh_dispatch_edges (as_a <geh_dispatch *> (copy_stmt)); else if (can_throw) make_eh_edges (copy_stmt); @@ -2240,17 +2263,17 @@ copy_phis_for_bb (basic_block bb, copy_body_data *id) { basic_block const new_bb = (basic_block) bb->aux; edge_iterator ei; - gimple phi; - gimple_stmt_iterator si; + gphi *phi; + gphi_iterator si; edge new_edge; bool inserted = false; for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si)) { tree res, new_res; - gimple new_phi; + gphi *new_phi; - phi = gsi_stmt (si); + phi = si.phi (); res = PHI_RESULT (phi); new_res = res; if (!virtual_operand_p (res)) @@ -2421,7 +2444,8 @@ maybe_move_debug_stmts_to_successors (copy_body_data *id, basic_block new_bb) gimple_stmt_iterator dsi = gsi_after_labels (e->dest); while (is_gimple_debug (gsi_stmt (ssi))) { - gimple stmt = gsi_stmt (ssi), new_stmt; + gimple stmt = gsi_stmt (ssi); + gdebug *new_stmt; tree var; tree value; @@ -2658,12 +2682,12 @@ copy_cfg_body (copy_body_data * id, gcov_type count, int frequency_scale, /* Now that we've duplicated the blocks, duplicate their edges. */ basic_block abnormal_goto_dest = NULL; - if (id->gimple_call - && stmt_can_make_abnormal_goto (id->gimple_call)) + if (id->call_stmt + && stmt_can_make_abnormal_goto (id->call_stmt)) { - gimple_stmt_iterator gsi = gsi_for_stmt (id->gimple_call); + gimple_stmt_iterator gsi = gsi_for_stmt (id->call_stmt); - bb = gimple_bb (id->gimple_call); + bb = gimple_bb (id->call_stmt); gsi_next (&gsi); if (gsi_end_p (gsi)) abnormal_goto_dest = get_abnormal_succ_dispatcher (bb); @@ -2749,7 +2773,7 @@ copy_cfg_body (copy_body_data * id, gcov_type count, int frequency_scale, this arises, we drop the VALUE expression altogether. */ static void -copy_debug_stmt (gimple stmt, copy_body_data *id) +copy_debug_stmt (gdebug *stmt, copy_body_data *id) { tree t, *n; struct walk_stmt_info wi; @@ -2808,7 +2832,7 @@ copy_debug_stmt (gimple stmt, copy_body_data *id) t = gimple_debug_source_bind_get_value (stmt); if (t != NULL_TREE && TREE_CODE (t) == PARM_DECL - && id->gimple_call) + && id->call_stmt) { vec<tree, va_gc> **debug_args = decl_debug_args_lookup (id->src_fn); unsigned int i; @@ -2841,7 +2865,7 @@ static void copy_debug_stmts (copy_body_data *id) { size_t i; - gimple stmt; + gdebug *stmt; if (!id->debug_stmts.exists ()) return; @@ -3512,7 +3536,7 @@ inline_forbidden_p_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p, VLA objects as those can't cause unbounded growth (they're always wrapped inside stack_save/stack_restore regions. */ if (gimple_alloca_call_p (stmt) - && !gimple_call_alloca_for_var_p (stmt) + && !gimple_call_alloca_for_var_p (as_a <gcall *> (stmt)) && !lookup_attribute ("always_inline", DECL_ATTRIBUTES (fn))) { inline_forbidden_reason @@ -3974,15 +3998,18 @@ estimate_num_insns (gimple stmt, eni_weights *weights) break; case GIMPLE_SWITCH: - /* Take into account cost of the switch + guess 2 conditional jumps for - each case label. - - TODO: once the switch expansion logic is sufficiently separated, we can - do better job on estimating cost of the switch. */ - if (weights->time_based) - cost = floor_log2 (gimple_switch_num_labels (stmt)) * 2; - else - cost = gimple_switch_num_labels (stmt) * 2; + { + gswitch *switch_stmt = as_a <gswitch *> (stmt); + /* Take into account cost of the switch + guess 2 conditional jumps for + each case label. + + TODO: once the switch expansion logic is sufficiently separated, we can + do better job on estimating cost of the switch. */ + if (weights->time_based) + cost = floor_log2 (gimple_switch_num_labels (switch_stmt)) * 2; + else + cost = gimple_switch_num_labels (switch_stmt) * 2; + } break; case GIMPLE_CALL: @@ -4056,7 +4083,7 @@ estimate_num_insns (gimple stmt, eni_weights *weights) case GIMPLE_ASM: { - int count = asm_str_count (gimple_asm_string (stmt)); + int count = asm_str_count (gimple_asm_string (as_a <gasm *> (stmt))); /* 1000 means infinity. This avoids overflows later with very long asm statements. */ if (count > 1000) @@ -4076,13 +4103,17 @@ estimate_num_insns (gimple stmt, eni_weights *weights) return 10; case GIMPLE_BIND: - return estimate_num_insns_seq (gimple_bind_body (stmt), weights); + return estimate_num_insns_seq ( + gimple_bind_body (as_a <gbind *> (stmt)), + weights); case GIMPLE_EH_FILTER: return estimate_num_insns_seq (gimple_eh_filter_failure (stmt), weights); case GIMPLE_CATCH: - return estimate_num_insns_seq (gimple_catch_handler (stmt), weights); + return estimate_num_insns_seq (gimple_catch_handler ( + as_a <gcatch *> (stmt)), + weights); case GIMPLE_TRY: return (estimate_num_insns_seq (gimple_try_eval (stmt), weights) @@ -4121,7 +4152,8 @@ estimate_num_insns (gimple stmt, eni_weights *weights) case GIMPLE_TRANSACTION: return (weights->tm_cost - + estimate_num_insns_seq (gimple_transaction_body (stmt), + + estimate_num_insns_seq (gimple_transaction_body ( + as_a <gtransaction *> (stmt)), weights)); default: @@ -4258,6 +4290,7 @@ expand_call_inline (basic_block bb, gimple stmt, copy_body_data *id) gimple_stmt_iterator gsi, stmt_gsi; bool successfully_inlined = FALSE; bool purge_dead_abnormal_edges; + gcall *call_stmt; unsigned int i; /* Set input_location here so we get the right instantiation context @@ -4267,7 +4300,8 @@ expand_call_inline (basic_block bb, gimple stmt, copy_body_data *id) input_location = gimple_location (stmt); /* From here on, we're only interested in CALL_EXPRs. */ - if (gimple_code (stmt) != GIMPLE_CALL) + call_stmt = dyn_cast <gcall *> (stmt); + if (!call_stmt) goto egress; cg_edge = id->dst_node->get_edge (stmt); @@ -4413,7 +4447,7 @@ expand_call_inline (basic_block bb, gimple stmt, copy_body_data *id) id->src_fn = fn; id->src_node = cg_edge->callee; id->src_cfun = DECL_STRUCT_FUNCTION (fn); - id->gimple_call = stmt; + id->call_stmt = stmt; gcc_assert (!id->src_cfun->after_inlining); @@ -4473,7 +4507,7 @@ expand_call_inline (basic_block bb, gimple stmt, copy_body_data *id) if (gimple_call_with_bounds_p (stmt) && TREE_CODE (modify_dest) == SSA_NAME) { - gimple retbnd = chkp_retbnd_call_by_val (modify_dest); + gcall *retbnd = chkp_retbnd_call_by_val (modify_dest); if (retbnd) { return_bounds = gimple_call_lhs (retbnd); @@ -4496,7 +4530,7 @@ expand_call_inline (basic_block bb, gimple stmt, copy_body_data *id) if (DECL_P (modify_dest)) TREE_NO_WARNING (modify_dest) = 1; - if (gimple_call_return_slot_opt_p (stmt)) + if (gimple_call_return_slot_opt_p (call_stmt)) { return_slot = modify_dest; modify_dest = NULL; @@ -4969,9 +5003,9 @@ mark_local_labels_stmt (gimple_stmt_iterator *gsip, struct walk_stmt_info *wi) { copy_body_data *id = (copy_body_data *) wi->info; - gimple stmt = gsi_stmt (*gsip); + glabel *stmt = dyn_cast <glabel *> (gsi_stmt (*gsip)); - if (gimple_code (stmt) == GIMPLE_LABEL) + if (stmt) { tree decl = gimple_label_label (stmt); @@ -5043,9 +5077,9 @@ replace_locals_stmt (gimple_stmt_iterator *gsip, struct walk_stmt_info *wi) { copy_body_data *id = (copy_body_data *) wi->info; - gimple stmt = gsi_stmt (*gsip); + gimple gs = gsi_stmt (*gsip); - if (gimple_code (stmt) == GIMPLE_BIND) + if (gbind *stmt = dyn_cast <gbind *> (gs)) { tree block = gimple_bind_block (stmt); |