aboutsummaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
authorNathan Sidwell <nathan@gcc.gnu.org>2005-04-21 15:47:33 +0000
committerNathan Sidwell <nathan@gcc.gnu.org>2005-04-21 15:47:33 +0000
commit41806d92af91cf26e52aa00a536cf3094ef2648b (patch)
treed321c52c21dedb7b851a7e57b5ea7c445d44db56 /gcc
parentac7e839c63bfb19d83e6c81aee68c4435c1915c2 (diff)
downloadgcc-41806d92af91cf26e52aa00a536cf3094ef2648b.zip
gcc-41806d92af91cf26e52aa00a536cf3094ef2648b.tar.gz
gcc-41806d92af91cf26e52aa00a536cf3094ef2648b.tar.bz2
alias.c (true_dependence): Remove 'abort' from comments.
* alias.c (true_dependence): Remove 'abort' from comments. Use gcc_assert and gcc_unreachable as appropriate. (canon_true_dependence): Likewise. * bb-reorder.c (connect_traces): Likewise. * c-common.c (c_add_case_label): Likewise. * c-decl.c (finish_function): Likewise. * caller-save.c (insert_restore, insert_save): Likewise. * cfg.c (update_bb_profile_for_threading): Likewise. * cfganal.c (flow_active_insn_p): Likewise. * cfgexpand.c (add_reg_br_prob_note): Likewise. * cfgrtl.c (rtl_redirect_edge_and_branch_force, rtl_split_edge, cfg_layout_merge_blocks): Likewise. * ifcvt.c (cond_exec_process_insns, merge_if_block, find_if_block): Likewise. * integrate.c (allocate_initial_values): Likewise. * jump.c (reverse_condition, reverse_condition_maybe_unordered, swap_condition, unsigned_condition, signed_condition, mark_jump_label, invert_jump_1, rtx_renumbered_equal_p, reg_or_subregno): Likewise. * lambda-code.c (lambda_compute_auxillary_space, lambda_transform_legal_p): Likewise. * lambda-mat.c (lambda_matrix_inverse_hard): Likewise. * langhooks.c (lhd_set_decl_assembler_name, lhd_type_promotes_to, lhd_incomplete_type_error, lhd_expand_expr, lhd_types_compatible_p, lhd_tree_size): Likewise. * lcm.c (create_pre_exit, optimize_mode_switching): Likewise. * local-alloc.c (update_equiv_regs): Likewise. * loop-unroll.c (peel_loop_completely unroll_loop_constant_iterations, unroll_loop_runtime_iterations, peel_loop_simple, unroll_loop_stupid, analyze_iv_to_split_insn): Likewise. * loop.c (gen_prefetch, find_and_verify_loops, basic_induction_var): Likewise. * modulo-sched.c (normalize_sched_times, check_nodes_order): Likewise. * value-prof.c (tree_find_values_to_profile): Likewise. * varasm.c (named_section, default_assemble_integer, decode_addr_const): Likewise. From-SVN: r98508
Diffstat (limited to 'gcc')
-rw-r--r--gcc/ChangeLog50
-rw-r--r--gcc/alias.c4
-rw-r--r--gcc/bb-reorder.c18
-rw-r--r--gcc/c-common.c2
-rw-r--r--gcc/c-decl.c2
-rw-r--r--gcc/caller-save.c22
-rw-r--r--gcc/cfg.c3
-rw-r--r--gcc/cfganal.c2
-rw-r--r--gcc/cfgexpand.c8
-rw-r--r--gcc/cfgrtl.c7
-rw-r--r--gcc/ifcvt.c40
-rw-r--r--gcc/integrate.c21
-rw-r--r--gcc/jump.c29
-rw-r--r--gcc/lambda-code.c9
-rw-r--r--gcc/lambda-mat.c5
-rw-r--r--gcc/langhooks.c73
-rw-r--r--gcc/lcm.c60
-rw-r--r--gcc/local-alloc.c5
-rw-r--r--gcc/loop-unroll.c113
-rw-r--r--gcc/loop.c13
-rw-r--r--gcc/modulo-sched.c6
-rw-r--r--gcc/value-prof.c2
-rw-r--r--gcc/varasm.c8
23 files changed, 272 insertions, 230 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index 7ab26e5..d148538 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,10 +1,50 @@
+2005-04-21 Nathan Sidwell <nathan@codesourcery.com>
+
+ * alias.c (true_dependence): Remove 'abort' from comments. Use
+ gcc_assert and gcc_unreachable as appropriate.
+ (canon_true_dependence): Likewise.
+ * bb-reorder.c (connect_traces): Likewise.
+ * c-common.c (c_add_case_label): Likewise.
+ * c-decl.c (finish_function): Likewise.
+ * caller-save.c (insert_restore, insert_save): Likewise.
+ * cfg.c (update_bb_profile_for_threading): Likewise.
+ * cfganal.c (flow_active_insn_p): Likewise.
+ * cfgexpand.c (add_reg_br_prob_note): Likewise.
+ * cfgrtl.c (rtl_redirect_edge_and_branch_force, rtl_split_edge,
+ cfg_layout_merge_blocks): Likewise.
+ * ifcvt.c (cond_exec_process_insns, merge_if_block,
+ find_if_block): Likewise.
+ * integrate.c (allocate_initial_values): Likewise.
+ * jump.c (reverse_condition, reverse_condition_maybe_unordered,
+ swap_condition, unsigned_condition, signed_condition,
+ mark_jump_label, invert_jump_1, rtx_renumbered_equal_p,
+ reg_or_subregno): Likewise.
+ * lambda-code.c (lambda_compute_auxillary_space,
+ lambda_transform_legal_p): Likewise.
+ * lambda-mat.c (lambda_matrix_inverse_hard): Likewise.
+ * langhooks.c (lhd_set_decl_assembler_name, lhd_type_promotes_to,
+ lhd_incomplete_type_error, lhd_expand_expr,
+ lhd_types_compatible_p, lhd_tree_size): Likewise.
+ * lcm.c (create_pre_exit, optimize_mode_switching): Likewise.
+ * local-alloc.c (update_equiv_regs): Likewise.
+ * loop-unroll.c (peel_loop_completely
+ unroll_loop_constant_iterations, unroll_loop_runtime_iterations,
+ peel_loop_simple, unroll_loop_stupid,
+ analyze_iv_to_split_insn): Likewise.
+ * loop.c (gen_prefetch, find_and_verify_loops,
+ basic_induction_var): Likewise.
+ * modulo-sched.c (normalize_sched_times, check_nodes_order): Likewise.
+ * value-prof.c (tree_find_values_to_profile): Likewise.
+ * varasm.c (named_section, default_assemble_integer,
+ decode_addr_const): Likewise.
+
2005-04-21 Alan Modra <amodra@bigpond.net.au>
Fariborz Jahanian <fjahanian@apple.com>
- * config/rs6000/rs6000.c (rs6000_arg_partial_bytes): Fix size of
- portion of argument passed in fpr.
- *expr.c (emit_push_insn): Fix computation of 'offset' used to
- decide on partial argument save on stack.
+ * config/rs6000/rs6000.c (rs6000_arg_partial_bytes): Fix size of
+ portion of argument passed in fpr.
+ * expr.c (emit_push_insn): Fix computation of 'offset' used to
+ decide on partial argument save on stack.
2005-04-21 Kazu Hirata <kazu@cs.umass.edu>
@@ -31,6 +71,8 @@
2005-04-21 Nathan Sidwell <nathan@codesourcery.com>
+2005-04-21 Nathan Sidwell <nathan@codesourcery.com>
+
* config/i386/i386.c (type_natural_mode): Use gcc_unreachable and
gcc_assert instead of abort.
(classify_argument, examine_argument, construct_container,
diff --git a/gcc/alias.c b/gcc/alias.c
index a0f77d5..a36e1d5 100644
--- a/gcc/alias.c
+++ b/gcc/alias.c
@@ -2141,7 +2141,7 @@ true_dependence (rtx mem, enum machine_mode mem_mode, rtx x,
/* Read-only memory is by definition never modified, and therefore can't
conflict with anything. We don't expect to find read-only set on MEM,
- but stupid user tricks can produce them, so don't abort. */
+ but stupid user tricks can produce them, so don't die. */
if (MEM_READONLY_P (x))
return 0;
@@ -2214,7 +2214,7 @@ canon_true_dependence (rtx mem, enum machine_mode mem_mode, rtx mem_addr,
/* Read-only memory is by definition never modified, and therefore can't
conflict with anything. We don't expect to find read-only set on MEM,
- but stupid user tricks can produce them, so don't abort. */
+ but stupid user tricks can produce them, so don't die. */
if (MEM_READONLY_P (x))
return 0;
diff --git a/gcc/bb-reorder.c b/gcc/bb-reorder.c
index 6925114..cc0844e 100644
--- a/gcc/bb-reorder.c
+++ b/gcc/bb-reorder.c
@@ -926,18 +926,14 @@ connect_traces (int n_traces, struct trace *traces)
if (i >= n_traces)
{
- if (two_passes && current_pass == 1)
- {
- i = 0;
- t = i;
- current_pass = 2;
- if (current_partition == BB_HOT_PARTITION)
- current_partition = BB_COLD_PARTITION;
- else
- current_partition = BB_HOT_PARTITION;
- }
+ gcc_assert (two_passes && current_pass == 1);
+ i = 0;
+ t = i;
+ current_pass = 2;
+ if (current_partition == BB_HOT_PARTITION)
+ current_partition = BB_COLD_PARTITION;
else
- abort ();
+ current_partition = BB_HOT_PARTITION;
}
if (connected[t])
diff --git a/gcc/c-common.c b/gcc/c-common.c
index 7f1ac51..ab23903 100644
--- a/gcc/c-common.c
+++ b/gcc/c-common.c
@@ -3635,7 +3635,7 @@ c_add_case_label (splay_tree cases, tree cond, tree orig_type,
error_out:
/* Add a label so that the back-end doesn't think that the beginning of
the switch is unreachable. Note that we do not add a case label, as
- that just leads to duplicates and thence to aborts later on. */
+ that just leads to duplicates and thence to failure later on. */
if (!cases->root)
{
tree t = create_artificial_label ();
diff --git a/gcc/c-decl.c b/gcc/c-decl.c
index 5140a0f..70b2d83 100644
--- a/gcc/c-decl.c
+++ b/gcc/c-decl.c
@@ -6323,7 +6323,7 @@ finish_function (void)
if (warn_return_type
&& TREE_CODE (TREE_TYPE (TREE_TYPE (fndecl))) != VOID_TYPE
&& !current_function_returns_value && !current_function_returns_null
- /* Don't complain if we abort. */
+ /* Don't complain if we are no-return. */
&& !current_function_returns_abnormally
/* Don't warn for main(). */
&& !MAIN_NAME_P (DECL_NAME (fndecl))
diff --git a/gcc/caller-save.c b/gcc/caller-save.c
index 26be617..5d8e649 100644
--- a/gcc/caller-save.c
+++ b/gcc/caller-save.c
@@ -637,12 +637,12 @@ insert_restore (struct insn_chain *chain, int before_p, int regno,
struct insn_chain *new;
rtx mem;
- /* A common failure mode if register status is not correct in the RTL
- is for this routine to be called with a REGNO we didn't expect to
- save. That will cause us to write an insn with a (nil) SET_DEST
- or SET_SRC. Instead of doing so and causing a crash later, check
- for this common case and abort here instead. This will remove one
- step in debugging such problems. */
+ /* A common failure mode if register status is not correct in the
+ RTL is for this routine to be called with a REGNO we didn't
+ expect to save. That will cause us to write an insn with a (nil)
+ SET_DEST or SET_SRC. Instead of doing so and causing a crash
+ later, check for this common case here instead. This will remove
+ one step in debugging such problems. */
gcc_assert (regno_save_mem[regno][1]);
/* Get the pattern to emit and update our status.
@@ -710,11 +710,11 @@ insert_save (struct insn_chain *chain, int before_p, int regno,
struct insn_chain *new;
rtx mem;
- /* A common failure mode if register status is not correct in the RTL
- is for this routine to be called with a REGNO we didn't expect to
- save. That will cause us to write an insn with a (nil) SET_DEST
- or SET_SRC. Instead of doing so and causing a crash later, check
- for this common case and abort here instead. This will remove one
+ /* A common failure mode if register status is not correct in the
+ RTL is for this routine to be called with a REGNO we didn't
+ expect to save. That will cause us to write an insn with a (nil)
+ SET_DEST or SET_SRC. Instead of doing so and causing a crash
+ later, check for this common case here. This will remove one
step in debugging such problems. */
gcc_assert (regno_save_mem[regno][1]);
diff --git a/gcc/cfg.c b/gcc/cfg.c
index 85d922e..432cea5 100644
--- a/gcc/cfg.c
+++ b/gcc/cfg.c
@@ -895,8 +895,7 @@ update_bb_profile_for_threading (basic_block bb, int edge_frequency,
c->probability *= scale / 65536;
}
- if (bb != taken_edge->src)
- abort ();
+ gcc_assert (bb == taken_edge->src);
taken_edge->count -= count;
if (taken_edge->count < 0)
taken_edge->count = 0;
diff --git a/gcc/cfganal.c b/gcc/cfganal.c
index 5afbabc..6c70772 100644
--- a/gcc/cfganal.c
+++ b/gcc/cfganal.c
@@ -69,7 +69,7 @@ flow_active_insn_p (rtx insn)
programs that fail to return a value. Its effect is to
keep the return value from being live across the entire
function. If we allow it to be skipped, we introduce the
- possibility for register livetime aborts. */
+ possibility for register lifetime confusion. */
if (GET_CODE (PATTERN (insn)) == CLOBBER
&& REG_P (XEXP (PATTERN (insn), 0))
&& REG_FUNCTION_VALUE_P (XEXP (PATTERN (insn), 0)))
diff --git a/gcc/cfgexpand.c b/gcc/cfgexpand.c
index f7a2f1e1..e894b77 100644
--- a/gcc/cfgexpand.c
+++ b/gcc/cfgexpand.c
@@ -60,8 +60,7 @@ add_reg_br_prob_note (FILE *dump_file, rtx last, int probability)
|| GET_CODE (NEXT_INSN (NEXT_INSN (NEXT_INSN (last)))) != CODE_LABEL
|| NEXT_INSN (NEXT_INSN (NEXT_INSN (NEXT_INSN (last)))))
goto failed;
- if (find_reg_note (last, REG_BR_PROB, 0))
- abort ();
+ gcc_assert (!find_reg_note (last, REG_BR_PROB, 0));
REG_NOTES (last)
= gen_rtx_EXPR_LIST (REG_BR_PROB,
GEN_INT (REG_BR_PROB_BASE - probability),
@@ -69,9 +68,8 @@ add_reg_br_prob_note (FILE *dump_file, rtx last, int probability)
return;
}
if (!last || GET_CODE (last) != JUMP_INSN || !any_condjump_p (last))
- goto failed;
- if (find_reg_note (last, REG_BR_PROB, 0))
- abort ();
+ goto failed;
+ gcc_assert (!find_reg_note (last, REG_BR_PROB, 0));
REG_NOTES (last)
= gen_rtx_EXPR_LIST (REG_BR_PROB,
GEN_INT (probability), REG_NOTES (last));
diff --git a/gcc/cfgrtl.c b/gcc/cfgrtl.c
index f14e44e..c1bbb1a 100644
--- a/gcc/cfgrtl.c
+++ b/gcc/cfgrtl.c
@@ -1165,7 +1165,7 @@ force_nonfallthru (edge e)
/* Redirect edge even at the expense of creating new jump insn or
basic block. Return new basic block if created, NULL otherwise.
- Abort if conversion is impossible. */
+ Conversion must be possible. */
static basic_block
rtl_redirect_edge_and_branch_force (edge e, basic_block target)
@@ -1277,7 +1277,7 @@ rtl_move_block_after (basic_block bb ATTRIBUTE_UNUSED,
}
/* Split a (typically critical) edge. Return the new block.
- Abort on abnormal edges.
+ The edge must not be abnormal.
??? The code generally expects to be called on critical edges.
The case of a block ending in an unconditional jump to a
@@ -2695,7 +2695,8 @@ cfg_layout_can_merge_blocks_p (basic_block a, basic_block b)
? simplejump_p (BB_END (a)) : onlyjump_p (BB_END (a)))));
}
-/* Merge block A and B, abort when it is not possible. */
+/* Merge block A and B. The blocks must be mergeable. */
+
static void
cfg_layout_merge_blocks (basic_block a, basic_block b)
{
diff --git a/gcc/ifcvt.c b/gcc/ifcvt.c
index 8801b8f..8367316 100644
--- a/gcc/ifcvt.c
+++ b/gcc/ifcvt.c
@@ -269,8 +269,7 @@ cond_exec_process_insns (ce_if_block_t *ce_info ATTRIBUTE_UNUSED,
if (NOTE_P (insn))
goto insn_done;
- if (!NONJUMP_INSN_P (insn) && !CALL_P (insn))
- abort ();
+ gcc_assert(NONJUMP_INSN_P (insn) || CALL_P (insn));
/* Remove USE insns that get in the way. */
if (reload_completed && GET_CODE (PATTERN (insn)) == USE)
@@ -2228,30 +2227,21 @@ merge_if_block (struct ce_if_block * ce_info)
/* The outgoing edge for the current COMBO block should already
be correct. Verify this. */
if (EDGE_COUNT (combo_bb->succs) == 0)
- {
- if (find_reg_note (last, REG_NORETURN, NULL))
- ;
- else if (NONJUMP_INSN_P (last)
- && GET_CODE (PATTERN (last)) == TRAP_IF
- && TRAP_CONDITION (PATTERN (last)) == const_true_rtx)
- ;
- else
- abort ();
- }
+ gcc_assert (find_reg_note (last, REG_NORETURN, NULL)
+ || (NONJUMP_INSN_P (last)
+ && GET_CODE (PATTERN (last)) == TRAP_IF
+ && (TRAP_CONDITION (PATTERN (last))
+ == const_true_rtx)));
+ else
/* There should still be something at the end of the THEN or ELSE
blocks taking us to our final destination. */
- else if (JUMP_P (last))
- ;
- else if (EDGE_SUCC (combo_bb, 0)->dest == EXIT_BLOCK_PTR
- && CALL_P (last)
- && SIBLING_CALL_P (last))
- ;
- else if ((EDGE_SUCC (combo_bb, 0)->flags & EDGE_EH)
- && can_throw_internal (last))
- ;
- else
- abort ();
+ gcc_assert (JUMP_P (last)
+ || (EDGE_SUCC (combo_bb, 0)->dest == EXIT_BLOCK_PTR
+ && CALL_P (last)
+ && SIBLING_CALL_P (last))
+ || ((EDGE_SUCC (combo_bb, 0)->flags & EDGE_EH)
+ && can_throw_internal (last)));
}
/* The JOIN block may have had quite a number of other predecessors too.
@@ -2259,7 +2249,7 @@ merge_if_block (struct ce_if_block * ce_info)
have only one remaining edge from our if-then-else diamond. If there
is more than one remaining edge, it must come from elsewhere. There
may be zero incoming edges if the THEN block didn't actually join
- back up (as with a call to abort). */
+ back up (as with a call to a non-return function). */
else if (EDGE_COUNT (join_bb->preds) < 2
&& join_bb != EXIT_BLOCK_PTR)
{
@@ -2626,7 +2616,7 @@ find_if_block (struct ce_if_block * ce_info)
we checked the FALLTHRU flag, those are already adjacent to the last IF
block. */
/* ??? As an enhancement, move the ELSE block. Have to deal with
- BLOCK notes, if by no other means than aborting the merge if they
+ BLOCK notes, if by no other means than backing out the merge if they
exist. Sticky enough I don't want to think about it now. */
next = then_bb;
if (else_bb && (next = next->next_bb) != else_bb)
diff --git a/gcc/integrate.c b/gcc/integrate.c
index 135bd1d..22a7c40 100644
--- a/gcc/integrate.c
+++ b/gcc/integrate.c
@@ -389,18 +389,19 @@ allocate_initial_values (rtx *reg_equiv_memory_loc ATTRIBUTE_UNUSED)
int regno = REGNO (ivs->entries[i].pseudo);
rtx x = ALLOCATE_INITIAL_VALUE (ivs->entries[i].hard_reg);
- if (x == NULL_RTX || REG_N_SETS (REGNO (ivs->entries[i].pseudo)) > 1)
- ; /* Do nothing. */
- else if (MEM_P (x))
- reg_equiv_memory_loc[regno] = x;
- else if (REG_P (x))
+ if (x && REG_N_SETS (REGNO (ivs->entries[i].pseudo)) <= 1)
{
- reg_renumber[regno] = REGNO (x);
- /* Poke the regno right into regno_reg_rtx
- so that even fixed regs are accepted. */
- REGNO (ivs->entries[i].pseudo) = REGNO (x);
+ if (MEM_P (x))
+ reg_equiv_memory_loc[regno] = x;
+ else
+ {
+ gcc_assert (REG_P (x));
+ reg_renumber[regno] = REGNO (x);
+ /* Poke the regno right into regno_reg_rtx so that even
+ fixed regs are accepted. */
+ REGNO (ivs->entries[i].pseudo) = REGNO (x);
+ }
}
- else abort ();
}
#endif
}
diff --git a/gcc/jump.c b/gcc/jump.c
index dc81c52..74a6c69 100644
--- a/gcc/jump.c
+++ b/gcc/jump.c
@@ -511,7 +511,7 @@ reverse_condition (enum rtx_code code)
return UNKNOWN;
default:
- abort ();
+ gcc_unreachable ();
}
}
@@ -554,7 +554,7 @@ reverse_condition_maybe_unordered (enum rtx_code code)
return LTGT;
default:
- abort ();
+ gcc_unreachable ();
}
}
@@ -600,7 +600,7 @@ swap_condition (enum rtx_code code)
return UNLE;
default:
- abort ();
+ gcc_unreachable ();
}
}
@@ -631,7 +631,7 @@ unsigned_condition (enum rtx_code code)
return LEU;
default:
- abort ();
+ gcc_unreachable ();
}
}
@@ -660,7 +660,7 @@ signed_condition (enum rtx_code code)
return LE;
default:
- abort ();
+ gcc_unreachable ();
}
}
@@ -1109,8 +1109,7 @@ mark_jump_label (rtx x, rtx insn, int in_mem)
&& NOTE_LINE_NUMBER (label) == NOTE_INSN_DELETED_LABEL)
break;
- if (!LABEL_P (label))
- abort ();
+ gcc_assert (LABEL_P (label));
/* Ignore references to labels of containing functions. */
if (LABEL_REF_NONLOCAL_P (x))
@@ -1723,10 +1722,13 @@ invert_jump_1 (rtx jump, rtx nlabel)
{
rtx x = pc_set (jump);
int ochanges;
+ int ok;
ochanges = num_validated_changes ();
- if (!x || !invert_exp_1 (SET_SRC (x), jump))
- abort ();
+ gcc_assert (x);
+ ok = invert_exp_1 (SET_SRC (x), jump);
+ gcc_assert (ok);
+
if (num_validated_changes () == ochanges)
return 0;
@@ -1941,7 +1943,7 @@ rtx_renumbered_equal_p (rtx x, rtx y)
break;
default:
- abort ();
+ gcc_unreachable ();
}
}
return 1;
@@ -1976,9 +1978,8 @@ true_regnum (rtx x)
unsigned int
reg_or_subregno (rtx reg)
{
- if (REG_P (reg))
- return REGNO (reg);
if (GET_CODE (reg) == SUBREG)
- return REGNO (SUBREG_REG (reg));
- abort ();
+ reg = SUBREG_REG (reg);
+ gcc_assert (REG_P (reg));
+ return REGNO (reg);
}
diff --git a/gcc/lambda-code.c b/gcc/lambda-code.c
index da24e95..fbfcdf9 100644
--- a/gcc/lambda-code.c
+++ b/gcc/lambda-code.c
@@ -687,7 +687,7 @@ lambda_compute_auxillary_space (lambda_loopnest nest,
/* Unfortunately, we can't know the number of constraints we'll have
ahead of time, but this should be enough even in ridiculous loop nest
- cases. We abort if we go over this limit. */
+ cases. We must not go over this limit. */
A = lambda_matrix_new (128, depth);
B = lambda_matrix_new (128, invariants);
a = lambda_vector_new (128);
@@ -2483,11 +2483,8 @@ lambda_transform_legal_p (lambda_trans_matrix trans,
lambda_vector distres;
struct data_dependence_relation *ddr;
-#if defined ENABLE_CHECKING
- if (LTM_COLSIZE (trans) != nb_loops
- || LTM_ROWSIZE (trans) != nb_loops)
- abort ();
-#endif
+ gcc_assert (LTM_COLSIZE (trans) == nb_loops
+ && LTM_ROWSIZE (trans) == nb_loops);
/* When there is an unknown relation in the dependence_relations, we
know that it is no worth looking at this loop nest: give up. */
diff --git a/gcc/lambda-mat.c b/gcc/lambda-mat.c
index 8aa3c12..8de0e98 100644
--- a/gcc/lambda-mat.c
+++ b/gcc/lambda-mat.c
@@ -401,9 +401,8 @@ lambda_matrix_inverse_hard (lambda_matrix mat, lambda_matrix inv, int n)
row = temp[j];
diagonal = row[j];
- /* If the matrix is singular, abort. */
- if (diagonal == 0)
- abort ();
+ /* The matrix must not be singular. */
+ gcc_assert (diagonal);
determinant = determinant * diagonal;
diff --git a/gcc/langhooks.c b/gcc/langhooks.c
index 12119a8..94e08e6 100644
--- a/gcc/langhooks.c
+++ b/gcc/langhooks.c
@@ -158,37 +158,31 @@ lhd_set_decl_assembler_name (tree decl)
DECL_ASSEMBLER_NAME for lots of DECLs. Only FUNCTION_DECLs and
VAR_DECLs for variables with static storage duration need a real
DECL_ASSEMBLER_NAME. */
- if (TREE_CODE (decl) == FUNCTION_DECL
- || (TREE_CODE (decl) == VAR_DECL
- && (TREE_STATIC (decl)
- || DECL_EXTERNAL (decl)
- || TREE_PUBLIC (decl))))
+ gcc_assert (TREE_CODE (decl) == FUNCTION_DECL
+ || (TREE_CODE (decl) == VAR_DECL
+ && (TREE_STATIC (decl)
+ || DECL_EXTERNAL (decl)
+ || TREE_PUBLIC (decl))));
+
+ /* By default, assume the name to use in assembly code is the same
+ as that used in the source language. (That's correct for C, and
+ GCC used to set DECL_ASSEMBLER_NAME to the same value as
+ DECL_NAME in build_decl, so this choice provides backwards
+ compatibility with existing front-ends.
+
+ Can't use just the variable's own name for a variable whose scope
+ is less than the whole compilation. Concatenate a distinguishing
+ number - we use the DECL_UID. */
+ if (TREE_PUBLIC (decl) || DECL_CONTEXT (decl) == NULL_TREE)
+ SET_DECL_ASSEMBLER_NAME (decl, DECL_NAME (decl));
+ else
{
- /* By default, assume the name to use in assembly code is the
- same as that used in the source language. (That's correct
- for C, and GCC used to set DECL_ASSEMBLER_NAME to the same
- value as DECL_NAME in build_decl, so this choice provides
- backwards compatibility with existing front-ends.
-
- Can't use just the variable's own name for a variable whose
- scope is less than the whole compilation. Concatenate a
- distinguishing number - we use the DECL_UID. */
- if (TREE_PUBLIC (decl) || DECL_CONTEXT (decl) == NULL_TREE)
- SET_DECL_ASSEMBLER_NAME (decl, DECL_NAME (decl));
- else
- {
- const char *name = IDENTIFIER_POINTER (DECL_NAME (decl));
- char *label;
-
- ASM_FORMAT_PRIVATE_NAME (label, name, DECL_UID (decl));
- SET_DECL_ASSEMBLER_NAME (decl, get_identifier (label));
- }
+ const char *name = IDENTIFIER_POINTER (DECL_NAME (decl));
+ char *label;
+
+ ASM_FORMAT_PRIVATE_NAME (label, name, DECL_UID (decl));
+ SET_DECL_ASSEMBLER_NAME (decl, get_identifier (label));
}
- else
- /* Nobody should ever be asking for the DECL_ASSEMBLER_NAME of
- these DECLs -- unless they're in language-dependent code, in
- which case set_decl_assembler_name hook should handle things. */
- abort ();
}
/* By default we always allow bit-field based optimizations. */
@@ -202,7 +196,7 @@ lhd_can_use_bit_fields_p (void)
tree
lhd_type_promotes_to (tree ARG_UNUSED (type))
{
- abort ();
+ gcc_unreachable ();
}
/* Registration of machine- or os-specific builtin types. */
@@ -216,10 +210,8 @@ lhd_register_builtin_type (tree ARG_UNUSED (type),
void
lhd_incomplete_type_error (tree ARG_UNUSED (value), tree type)
{
- if (TREE_CODE (type) == ERROR_MARK)
- return;
-
- abort ();
+ gcc_assert (TREE_CODE (type) == ERROR_MARK);
+ return;
}
/* Provide a default routine for alias sets that always returns -1. This
@@ -248,7 +240,7 @@ lhd_expand_expr (tree ARG_UNUSED (t), rtx ARG_UNUSED (r),
int ARG_UNUSED (em),
rtx * ARG_UNUSED (a))
{
- abort ();
+ gcc_unreachable ();
}
/* The default language-specific function for expanding a decl. After
@@ -288,10 +280,10 @@ lhd_types_compatible_p (tree x, tree y)
handle language-specific tree codes, as well as language-specific
information associated to common tree codes. If a tree node is
completely handled within this function, it should set *SUBTREES to
- 0, so that generic handling isn't attempted. For language-specific
- tree codes, generic handling would abort(), so make sure it is set
- properly. Both SUBTREES and *SUBTREES is guaranteed to be nonzero
- when the function is called. */
+ 0, so that generic handling isn't attempted. The generic handling
+ cannot deal with language-specific tree codes, so make sure it is
+ set properly. Both SUBTREES and *SUBTREES is guaranteed to be
+ nonzero when the function is called. */
tree
lhd_tree_inlining_walk_subtrees (tree *tp ATTRIBUTE_UNUSED,
@@ -444,8 +436,7 @@ lhd_gimplify_expr (tree *expr_p ATTRIBUTE_UNUSED, tree *pre_p ATTRIBUTE_UNUSED,
size_t
lhd_tree_size (enum tree_code c ATTRIBUTE_UNUSED)
{
- abort ();
- return 0;
+ gcc_unreachable ();
}
/* Return true if decl, which is a function decl, may be called by a
diff --git a/gcc/lcm.c b/gcc/lcm.c
index 4f2f406..d62bbb0 100644
--- a/gcc/lcm.c
+++ b/gcc/lcm.c
@@ -1101,23 +1101,27 @@ create_pre_exit (int n_entities, int *entity_map, const int *num_modes)
last_insn = return_copy;
}
while (nregs);
+
/* If we didn't see a full return value copy, verify that there
is a plausible reason for this. If some, but not all of the
return register is likely spilled, we can expect that there
is a copy for the likely spilled part. */
- if (nregs
- && ! forced_late_switch
- && ! short_block
- && CLASS_LIKELY_SPILLED_P (REGNO_REG_CLASS (ret_start))
- && nregs == hard_regno_nregs[ret_start][GET_MODE (ret_reg)]
- /* For multi-hard-register floating point values,
- sometimes the likely-spilled part is ordinarily copied
- first, then the other part is set with an arithmetic
- operation. This doesn't actually cause reload failures,
- so let it pass. */
- && (GET_MODE_CLASS (GET_MODE (ret_reg)) == MODE_INT
- || nregs == 1))
- abort ();
+ gcc_assert (!nregs
+ || forced_late_switch
+ || short_block
+ || !(CLASS_LIKELY_SPILLED_P
+ (REGNO_REG_CLASS (ret_start)))
+ || (nregs
+ != hard_regno_nregs[ret_start][GET_MODE (ret_reg)])
+ /* For multi-hard-register floating point
+ values, sometimes the likely-spilled part
+ is ordinarily copied first, then the other
+ part is set with an arithmetic operation.
+ This doesn't actually cause reload
+ failures, so let it pass. */
+ || (GET_MODE_CLASS (GET_MODE (ret_reg)) != MODE_INT
+ && nregs != 1));
+
if (INSN_P (last_insn))
{
before_return_copy
@@ -1370,21 +1374,23 @@ optimize_mode_switching (FILE *file)
emited = true;
if (JUMP_P (BB_END (src_bb)))
emit_insn_before (mode_set, BB_END (src_bb));
- /* It doesn't make sense to switch to normal mode
- after a CALL_INSN, so we're going to abort if we
- find one. The cases in which a CALL_INSN may
- have an abnormal edge are sibcalls and EH edges.
- In the case of sibcalls, the dest basic-block is
- the EXIT_BLOCK, that runs in normal mode; it is
- assumed that a sibcall insn requires normal mode
- itself, so no mode switch would be required after
- the call (it wouldn't make sense, anyway). In
- the case of EH edges, EH entry points also start
- in normal mode, so a similar reasoning applies. */
- else if (NONJUMP_INSN_P (BB_END (src_bb)))
- emit_insn_after (mode_set, BB_END (src_bb));
else
- abort ();
+ {
+ /* It doesn't make sense to switch to normal
+ mode after a CALL_INSN. The cases in which a
+ CALL_INSN may have an abnormal edge are
+ sibcalls and EH edges. In the case of
+ sibcalls, the dest basic-block is the
+ EXIT_BLOCK, that runs in normal mode; it is
+ assumed that a sibcall insn requires normal
+ mode itself, so no mode switch would be
+ required after the call (it wouldn't make
+ sense, anyway). In the case of EH edges, EH
+ entry points also start in normal mode, so a
+ similar reasoning applies. */
+ gcc_assert (NONJUMP_INSN_P (BB_END (src_bb)));
+ emit_insn_after (mode_set, BB_END (src_bb));
+ }
bb_info[j][src_bb->index].computing = mode;
RESET_BIT (transp[src_bb->index], j);
}
diff --git a/gcc/local-alloc.c b/gcc/local-alloc.c
index 3969a86..2caeafc 100644
--- a/gcc/local-alloc.c
+++ b/gcc/local-alloc.c
@@ -1088,8 +1088,9 @@ update_equiv_regs (void)
REG_NOTES (new_insn) = REG_NOTES (equiv_insn);
REG_NOTES (equiv_insn) = 0;
- /* Make sure this insn is recognized before reload begins,
- otherwise eliminate_regs_in_insn will abort. */
+ /* Make sure this insn is recognized before
+ reload begins, otherwise
+ eliminate_regs_in_insn will die. */
INSN_CODE (new_insn) = INSN_CODE (equiv_insn);
delete_insn (equiv_insn);
diff --git a/gcc/loop-unroll.c b/gcc/loop-unroll.c
index ab3828d..c93170c 100644
--- a/gcc/loop-unroll.c
+++ b/gcc/loop-unroll.c
@@ -495,6 +495,8 @@ peel_loop_completely (struct loops *loops, struct loop *loop)
if (npeel)
{
+ bool ok;
+
wont_exit = sbitmap_alloc (npeel + 1);
sbitmap_ones (wont_exit);
RESET_BIT (wont_exit, 0);
@@ -508,11 +510,12 @@ peel_loop_completely (struct loops *loops, struct loop *loop)
opt_info = analyze_insns_in_loop (loop);
opt_info_start_duplication (opt_info);
- if (!duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop),
- loops, npeel,
- wont_exit, desc->out_edge, remove_edges, &n_remove_edges,
- DLTHE_FLAG_UPDATE_FREQ))
- abort ();
+ ok = duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop),
+ loops, npeel,
+ wont_exit, desc->out_edge,
+ remove_edges, &n_remove_edges,
+ DLTHE_FLAG_UPDATE_FREQ);
+ gcc_assert (ok);
free (wont_exit);
@@ -670,6 +673,7 @@ unroll_loop_constant_iterations (struct loops *loops, struct loop *loop)
struct niter_desc *desc = get_simple_loop_desc (loop);
bool exit_at_end = loop_exit_at_end_p (loop);
struct opt_info *opt_info = NULL;
+ bool ok;
niter = desc->niter;
@@ -704,12 +708,12 @@ unroll_loop_constant_iterations (struct loops *loops, struct loop *loop)
if (exit_mod)
{
opt_info_start_duplication (opt_info);
- if (!duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop),
+ ok = duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop),
loops, exit_mod,
wont_exit, desc->out_edge,
remove_edges, &n_remove_edges,
- DLTHE_FLAG_UPDATE_FREQ))
- abort ();
+ DLTHE_FLAG_UPDATE_FREQ);
+ gcc_assert (ok);
if (opt_info && exit_mod > 1)
apply_opt_in_copies (opt_info, exit_mod, false, false);
@@ -740,11 +744,12 @@ unroll_loop_constant_iterations (struct loops *loops, struct loop *loop)
RESET_BIT (wont_exit, 1);
opt_info_start_duplication (opt_info);
- if (!duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop),
- loops, exit_mod + 1,
- wont_exit, desc->out_edge, remove_edges, &n_remove_edges,
- DLTHE_FLAG_UPDATE_FREQ))
- abort ();
+ ok = duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop),
+ loops, exit_mod + 1,
+ wont_exit, desc->out_edge,
+ remove_edges, &n_remove_edges,
+ DLTHE_FLAG_UPDATE_FREQ);
+ gcc_assert (ok);
if (opt_info && exit_mod > 0)
apply_opt_in_copies (opt_info, exit_mod + 1, false, false);
@@ -763,11 +768,12 @@ unroll_loop_constant_iterations (struct loops *loops, struct loop *loop)
/* Now unroll the loop. */
opt_info_start_duplication (opt_info);
- if (!duplicate_loop_to_header_edge (loop, loop_latch_edge (loop),
- loops, max_unroll,
- wont_exit, desc->out_edge, remove_edges, &n_remove_edges,
- DLTHE_FLAG_UPDATE_FREQ))
- abort ();
+ ok = duplicate_loop_to_header_edge (loop, loop_latch_edge (loop),
+ loops, max_unroll,
+ wont_exit, desc->out_edge,
+ remove_edges, &n_remove_edges,
+ DLTHE_FLAG_UPDATE_FREQ);
+ gcc_assert (ok);
if (opt_info)
{
@@ -935,6 +941,7 @@ unroll_loop_runtime_iterations (struct loops *loops, struct loop *loop)
struct niter_desc *desc = get_simple_loop_desc (loop);
bool exit_at_end = loop_exit_at_end_p (loop);
struct opt_info *opt_info = NULL;
+ bool ok;
if (flag_split_ivs_in_unroller
|| flag_variable_expansion_in_unroller)
@@ -1013,11 +1020,12 @@ unroll_loop_runtime_iterations (struct loops *loops, struct loop *loop)
&& !desc->noloop_assumptions)
SET_BIT (wont_exit, 1);
ezc_swtch = loop_preheader_edge (loop)->src;
- if (!duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop),
- loops, 1,
- wont_exit, desc->out_edge, remove_edges, &n_remove_edges,
- DLTHE_FLAG_UPDATE_FREQ))
- abort ();
+ ok = duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop),
+ loops, 1,
+ wont_exit, desc->out_edge,
+ remove_edges, &n_remove_edges,
+ DLTHE_FLAG_UPDATE_FREQ);
+ gcc_assert (ok);
/* Record the place where switch will be built for preconditioning. */
swtch = loop_split_edge_with (loop_preheader_edge (loop),
@@ -1029,11 +1037,12 @@ unroll_loop_runtime_iterations (struct loops *loops, struct loop *loop)
sbitmap_zero (wont_exit);
if (i != n_peel - 1 || !last_may_exit)
SET_BIT (wont_exit, 1);
- if (!duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop),
- loops, 1,
- wont_exit, desc->out_edge, remove_edges, &n_remove_edges,
- DLTHE_FLAG_UPDATE_FREQ))
- abort ();
+ ok = duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop),
+ loops, 1,
+ wont_exit, desc->out_edge,
+ remove_edges, &n_remove_edges,
+ DLTHE_FLAG_UPDATE_FREQ);
+ gcc_assert (ok);
/* Create item for switch. */
j = n_peel - i - (extra_zero_check ? 0 : 1);
@@ -1041,7 +1050,8 @@ unroll_loop_runtime_iterations (struct loops *loops, struct loop *loop)
preheader = loop_split_edge_with (loop_preheader_edge (loop), NULL_RTX);
branch_code = compare_and_jump_seq (copy_rtx (niter), GEN_INT (j), EQ,
- block_label (preheader), p, NULL_RTX);
+ block_label (preheader), p,
+ NULL_RTX);
swtch = loop_split_edge_with (single_pred_edge (swtch), branch_code);
set_immediate_dominator (CDI_DOMINATORS, preheader, swtch);
@@ -1058,7 +1068,8 @@ unroll_loop_runtime_iterations (struct loops *loops, struct loop *loop)
swtch = ezc_swtch;
preheader = loop_split_edge_with (loop_preheader_edge (loop), NULL_RTX);
branch_code = compare_and_jump_seq (copy_rtx (niter), const0_rtx, EQ,
- block_label (preheader), p, NULL_RTX);
+ block_label (preheader), p,
+ NULL_RTX);
swtch = loop_split_edge_with (single_succ_edge (swtch), branch_code);
set_immediate_dominator (CDI_DOMINATORS, preheader, swtch);
@@ -1077,11 +1088,12 @@ unroll_loop_runtime_iterations (struct loops *loops, struct loop *loop)
RESET_BIT (wont_exit, may_exit_copy);
opt_info_start_duplication (opt_info);
- if (!duplicate_loop_to_header_edge (loop, loop_latch_edge (loop),
- loops, max_unroll,
- wont_exit, desc->out_edge, remove_edges, &n_remove_edges,
- DLTHE_FLAG_UPDATE_FREQ))
- abort ();
+ ok = duplicate_loop_to_header_edge (loop, loop_latch_edge (loop),
+ loops, max_unroll,
+ wont_exit, desc->out_edge,
+ remove_edges, &n_remove_edges,
+ DLTHE_FLAG_UPDATE_FREQ);
+ gcc_assert (ok);
if (opt_info)
{
@@ -1094,7 +1106,8 @@ unroll_loop_runtime_iterations (struct loops *loops, struct loop *loop)
if (exit_at_end)
{
basic_block exit_block = desc->in_edge->src->rbi->copy;
- /* Find a new in and out edge; they are in the last copy we have made. */
+ /* Find a new in and out edge; they are in the last copy we have
+ made. */
if (EDGE_SUCC (exit_block, 0)->dest == desc->out_edge->dest)
{
@@ -1119,7 +1132,8 @@ unroll_loop_runtime_iterations (struct loops *loops, struct loop *loop)
the correct new number of iterations is this: */
gcc_assert (!desc->const_iter);
desc->niter_expr =
- simplify_gen_binary (UDIV, desc->mode, old_niter, GEN_INT (max_unroll + 1));
+ simplify_gen_binary (UDIV, desc->mode, old_niter,
+ GEN_INT (max_unroll + 1));
desc->niter_max /= max_unroll + 1;
if (exit_at_end)
{
@@ -1242,6 +1256,7 @@ peel_loop_simple (struct loops *loops, struct loop *loop)
unsigned npeel = loop->lpt_decision.times;
struct niter_desc *desc = get_simple_loop_desc (loop);
struct opt_info *opt_info = NULL;
+ bool ok;
if (flag_split_ivs_in_unroller && npeel > 1)
opt_info = analyze_insns_in_loop (loop);
@@ -1251,10 +1266,11 @@ peel_loop_simple (struct loops *loops, struct loop *loop)
opt_info_start_duplication (opt_info);
- if (!duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop),
- loops, npeel, wont_exit, NULL, NULL, NULL,
- DLTHE_FLAG_UPDATE_FREQ))
- abort ();
+ ok = duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop),
+ loops, npeel, wont_exit,
+ NULL, NULL,
+ NULL, DLTHE_FLAG_UPDATE_FREQ);
+ gcc_assert (ok);
free (wont_exit);
@@ -1387,6 +1403,7 @@ unroll_loop_stupid (struct loops *loops, struct loop *loop)
unsigned nunroll = loop->lpt_decision.times;
struct niter_desc *desc = get_simple_loop_desc (loop);
struct opt_info *opt_info = NULL;
+ bool ok;
if (flag_split_ivs_in_unroller
|| flag_variable_expansion_in_unroller)
@@ -1397,10 +1414,11 @@ unroll_loop_stupid (struct loops *loops, struct loop *loop)
sbitmap_zero (wont_exit);
opt_info_start_duplication (opt_info);
- if (!duplicate_loop_to_header_edge (loop, loop_latch_edge (loop),
- loops, nunroll, wont_exit, NULL, NULL, NULL,
- DLTHE_FLAG_UPDATE_FREQ))
- abort ();
+ ok = duplicate_loop_to_header_edge (loop, loop_latch_edge (loop),
+ loops, nunroll, wont_exit,
+ NULL, NULL, NULL,
+ DLTHE_FLAG_UPDATE_FREQ);
+ gcc_assert (ok);
if (opt_info)
{
@@ -1599,6 +1617,7 @@ analyze_iv_to_split_insn (rtx insn)
rtx set, dest;
struct rtx_iv iv;
struct iv_to_split *ivts;
+ bool ok;
/* For now we just split the basic induction variables. Later this may be
extended for example by selecting also addresses of memory references. */
@@ -1613,8 +1632,8 @@ analyze_iv_to_split_insn (rtx insn)
if (!biv_p (insn, dest))
return NULL;
- if (!iv_analyze (insn, dest, &iv))
- abort ();
+ ok = iv_analyze (insn, dest, &iv);
+ gcc_assert (ok);
if (iv.step == const0_rtx
|| iv.mode != iv.extend_mode)
diff --git a/gcc/loop.c b/gcc/loop.c
index 005560a..a61c8df 100644
--- a/gcc/loop.c
+++ b/gcc/loop.c
@@ -424,7 +424,7 @@ struct loop_info
#ifndef HAVE_prefetch
#define HAVE_prefetch 0
#define CODE_FOR_prefetch 0
-#define gen_prefetch(a,b,c) (abort(), NULL_RTX)
+#define gen_prefetch(a,b,c) (gcc_unreachable (), NULL_RTX)
#endif
/* Give up the prefetch optimizations once we exceed a given threshold.
@@ -3280,6 +3280,7 @@ find_and_verify_loops (rtx f, struct loops *loops)
if (invert_jump (p, new_label, 1))
{
rtx q, r;
+ bool only_notes;
/* If no suitable BARRIER was found, create a suitable
one before TARGET. Since TARGET is a fall through
@@ -3304,8 +3305,10 @@ find_and_verify_loops (rtx f, struct loops *loops)
/* Include the BARRIER after INSN and copy the
block after LOC. */
- if (squeeze_notes (&new_label, &last_insn_to_move))
- abort ();
+ only_notes = squeeze_notes (&new_label,
+ &last_insn_to_move);
+ gcc_assert (!only_notes);
+
reorder_insns (new_label, last_insn_to_move, loc);
/* All those insns are now in TARGET_LOOP. */
@@ -7649,9 +7652,9 @@ basic_induction_var (const struct loop *loop, rtx x, enum machine_mode mode,
case CONST_INT:
case SYMBOL_REF:
case CONST:
- /* convert_modes aborts if we try to convert to or from CCmode, so just
+ /* convert_modes dies if we try to convert to or from CCmode, so just
exclude that case. It is very unlikely that a condition code value
- would be a useful iterator anyways. convert_modes aborts if we try to
+ would be a useful iterator anyways. convert_modes dies if we try to
convert a float mode to non-float or vice versa too. */
if (loop->level == 1
&& GET_MODE_CLASS (mode) == GET_MODE_CLASS (GET_MODE (dest_reg))
diff --git a/gcc/modulo-sched.c b/gcc/modulo-sched.c
index 4ea4ed8..11cfdcb 100644
--- a/gcc/modulo-sched.c
+++ b/gcc/modulo-sched.c
@@ -638,8 +638,7 @@ normalize_sched_times (partial_schedule_ptr ps)
ddg_node_ptr u = &g->nodes[i];
int normalized_time = SCHED_TIME (u) - amount;
- if (normalized_time < 0)
- abort ();
+ gcc_assert (normalized_time >= 0);
SCHED_TIME (u) = normalized_time;
SCHED_ROW (u) = normalized_time % ii;
@@ -1733,8 +1732,7 @@ check_nodes_order (int *node_order, int num_nodes)
{
int u = node_order[i];
- if (u >= num_nodes || u < 0 || TEST_BIT (tmp, u))
- abort ();
+ gcc_assert (u < num_nodes && u >= 0 && !TEST_BIT (tmp, u));
SET_BIT (tmp, u);
}
diff --git a/gcc/value-prof.c b/gcc/value-prof.c
index 9652c23..398fdfc 100644
--- a/gcc/value-prof.c
+++ b/gcc/value-prof.c
@@ -1789,7 +1789,7 @@ tree_find_values_to_profile (histogram_values *values)
break;
default:
- abort ();
+ gcc_unreachable ();
}
}
}
diff --git a/gcc/varasm.c b/gcc/varasm.c
index 6d3f443..832b55b 100644
--- a/gcc/varasm.c
+++ b/gcc/varasm.c
@@ -470,7 +470,7 @@ named_section (tree decl, const char *name, int reloc)
flags = targetm.section_type_flags (decl, name, reloc);
/* Sanity check user variables for flag changes. Non-user
- section flag changes will abort in named_section_flags.
+ section flag changes will die in named_section_flags.
However, don't complain if SECTION_OVERRIDE is set.
We trust that the setter knows that it is safe to ignore
the default flags for this decl. */
@@ -2170,8 +2170,8 @@ default_assemble_integer (rtx x ATTRIBUTE_UNUSED,
/* Assemble the integer constant X into an object of SIZE bytes. ALIGN is
the alignment of the integer in bits. Return 1 if we were able to output
- the constant, otherwise 0. If FORCE is nonzero, abort if we can't output
- the constant. */
+ the constant, otherwise 0. We must be able to output the constant,
+ if FORCE is nonzero. */
bool
assemble_integer (rtx x, unsigned int size, unsigned int align, int force)
@@ -2259,7 +2259,7 @@ assemble_real (REAL_VALUE_TYPE d, enum machine_mode mode, unsigned int align)
/* Given an expression EXP with a constant value,
reduce it to the sum of an assembler symbol and an integer.
Store them both in the structure *VALUE.
- Abort if EXP does not reduce. */
+ EXP must be reduceable. */
struct addr_const GTY(())
{