aboutsummaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
Diffstat (limited to 'gcc')
-rw-r--r--gcc/ChangeLog27
-rw-r--r--gcc/regmove.c14
-rw-r--r--gcc/reload.c2
-rw-r--r--gcc/reload1.c12
-rw-r--r--gcc/resource.c2
-rw-r--r--gcc/rtl.def2
-rw-r--r--gcc/rtl.h2
-rw-r--r--gcc/rtlanal.c4
-rw-r--r--gcc/sched-deps.c2
-rw-r--r--gcc/sched-rgn.c10
-rw-r--r--gcc/sibcall.c2
-rw-r--r--gcc/simplify-rtx.c6
-rw-r--r--gcc/ssa-ccp.c2
-rw-r--r--gcc/ssa.c2
-rw-r--r--gcc/stmt.c4
-rw-r--r--gcc/stor-layout.c8
-rw-r--r--gcc/system.h2
-rw-r--r--gcc/tlink.c2
-rw-r--r--gcc/toplev.c8
-rw-r--r--gcc/tracer.c2
-rw-r--r--gcc/tree-inline.c2
-rw-r--r--gcc/tree.c2
-rw-r--r--gcc/tree.h6
-rw-r--r--gcc/unroll.c4
-rw-r--r--gcc/varasm.c4
25 files changed, 80 insertions, 53 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index c046383..388076df 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,3 +1,30 @@
+2002-12-24 Kazu Hirata <kazu@cs.umass.edu>
+
+ * regmove.c: Fix comment typos.
+ * reload.c: Likewise.
+ * reload1.c: Likewise.
+ * resource.c: Likewise.
+ * rtl.def: Likewise.
+ * rtl.h: Likewise.
+ * rtlanal.c: Likewise.
+ * sched-deps.c: Likewise.
+ * sched-rgn.c: Likewise.
+ * sibcall.c: Likewise.
+ * simplify-rtx.c: Likewise.
+ * ssa-ccp.c: Likewise.
+ * ssa.c: Likewise.
+ * stmt.c: Likewise.
+ * stor-layout.c: Likewise.
+ * system.h: Likewise.
+ * tlink.c: Likewise.
+ * toplev.c: Likewise.
+ * tracer.c: Likewise.
+ * tree-inline.c: Likewise.
+ * tree.c: Likewise.
+ * tree.h: Likewise.
+ * unroll.c: Likewise.
+ * varasm.c: Likewise.
+
2002-12-23 Larin Hennessy <larin@science.oregonstate.edu>
* doc/install.texi: Remove i386-*-isc, i860-*-bsd,
diff --git a/gcc/regmove.c b/gcc/regmove.c
index ad4f2ad..653d158 100644
--- a/gcc/regmove.c
+++ b/gcc/regmove.c
@@ -686,7 +686,7 @@ optimize_reg_copy_3 (insn, dest, src)
|| SET_DEST (set) != src_reg)
return;
- /* Be conserative: although this optimization is also valid for
+ /* Be conservative: although this optimization is also valid for
volatile memory references, that could cause trouble in later passes. */
if (MEM_VOLATILE_P (SET_SRC (set)))
return;
@@ -925,7 +925,7 @@ reg_is_remote_constant_p (reg, insn, first)
(set (reg100) (plus reg100 offset2-offset1)) */
/* ??? What does this comment mean? */
-/* cse disrupts preincrement / postdecrement squences when it finds a
+/* cse disrupts preincrement / postdecrement sequences when it finds a
hard register as ultimate source, like the frame pointer. */
static int
@@ -1072,7 +1072,7 @@ regmove_optimize (f, nregs, regmove_dump_file)
return;
/* Find out where a potential flags register is live, and so that we
- can supress some optimizations in those zones. */
+ can suppress some optimizations in those zones. */
mark_flags_life_zones (discover_flags_reg ());
regno_src_regno = (int *) xmalloc (sizeof *regno_src_regno * nregs);
@@ -1339,7 +1339,7 @@ regmove_optimize (f, nregs, regmove_dump_file)
it produces worse code, as it eliminates no copy
instructions and the copy emitted will be produced by
reload anyway. On patterns with multiple alternatives,
- there may be better sollution availble.
+ there may be better solution available.
In particular this change produced slower code for numeric
i387 programs. */
@@ -1499,7 +1499,7 @@ regmove_optimize (f, nregs, regmove_dump_file)
}
/* If we weren't able to replace any of the alternatives, try an
- alternative appoach of copying the source to the destination. */
+ alternative approach of copying the source to the destination. */
if (!success && copy_src != NULL_RTX)
copy_src_to_dest (insn, copy_src, copy_dst, old_max_uid);
@@ -2315,7 +2315,7 @@ record_stack_memrefs (xp, data)
We can't just compare with STACK_POINTER_RTX because the
reference to the stack pointer might be in some other mode.
- In particular, an explict clobber in an asm statement will
+ In particular, an explicit clobber in an asm statement will
result in a QImode clober. */
if (REGNO (x) == STACK_POINTER_REGNUM)
return 1;
@@ -2374,7 +2374,7 @@ combine_stack_adjustments_for_block (bb)
adjustment is now too large for a constant addition,
we cannot merge the two stack adjustments.
- Also we need to be carefull to not move stack pointer
+ Also we need to be careful to not move stack pointer
such that we create stack accesses outside the allocated
area. We can combine an allocation into the first insn,
or a deallocation into the second insn. We can not
diff --git a/gcc/reload.c b/gcc/reload.c
index ad3f375..e61c3b1 100644
--- a/gcc/reload.c
+++ b/gcc/reload.c
@@ -2898,7 +2898,7 @@ find_reloads (insn, replace, ind_levels, live_known, reload_reg_p)
by forcing the reload.
??? When is it right at this stage to have a subreg
- of a mem that is _not_ to be handled specialy? IMO
+ of a mem that is _not_ to be handled specially? IMO
those should have been reduced to just a mem. */
|| ((GET_CODE (operand) == MEM
|| (GET_CODE (operand)== REG
diff --git a/gcc/reload1.c b/gcc/reload1.c
index 8e09c10..aeb740e 100644
--- a/gcc/reload1.c
+++ b/gcc/reload1.c
@@ -7689,7 +7689,7 @@ delete_output_reload (insn, j, last_reload_reg)
/* The caller has already checked that REG dies or is set in INSN.
It has also checked that we are optimizing, and thus some
- inaccurancies in the debugging information are acceptable.
+ inaccuracies in the debugging information are acceptable.
So we could just delete output_reload_insn. But in some cases
we can improve the debugging information without sacrificing
optimization - maybe even improving the code: See if the pseudo
@@ -7891,7 +7891,7 @@ delete_address_reloads_1 (dead_insn, x, current_insn)
return;
/* ??? We can't finish the loop here, because dst might be
allocated to a pseudo in this block if no reload in this
- block needs any of the clsses containing DST - see
+ block needs any of the classes containing DST - see
spill_hard_reg. There is no easy way to tell this, so we
have to scan till the end of the basic block. */
}
@@ -8530,7 +8530,7 @@ reload_cse_simplify_operands (insn, testreg)
/* If reload couldn't use reg+reg+offset addressing, try to use reg+reg
addressing now.
- This code might also be useful when reload gave up on reg+reg addresssing
+ This code might also be useful when reload gave up on reg+reg addressing
because of clashes between the return register and INDEX_REG_CLASS. */
/* The maximum number of uses of a register we can keep track of to
@@ -8551,7 +8551,7 @@ struct reg_use { rtx insn, *usep; };
last, of these uses.
STORE_RUID is always meaningful if we only want to use a value in a
register in a different place: it denotes the next insn in the insn
- stream (i.e. the last ecountered) that sets or clobbers the register. */
+ stream (i.e. the last encountered) that sets or clobbers the register. */
static struct
{
struct reg_use reg_use[RELOAD_COMBINE_MAX_USES];
@@ -9054,7 +9054,7 @@ static HOST_WIDE_INT reg_offset[FIRST_PSEUDO_REGISTER];
static int reg_base_reg[FIRST_PSEUDO_REGISTER];
static enum machine_mode reg_mode[FIRST_PSEUDO_REGISTER];
-/* move2add_luid is linearily increased while scanning the instructions
+/* move2add_luid is linearly increased while scanning the instructions
from first to last. It is used to set reg_set_luid in
reload_cse_move2add and move2add_note_store. */
static int move2add_luid;
@@ -9499,7 +9499,7 @@ fixup_abnormal_edges ()
If it's placed after a trapping call (i.e. that
call is the last insn anyway), we have no fallthru
edge. Simply delete this use and don't try to insert
- on the non-existant edge. */
+ on the non-existent edge. */
if (GET_CODE (PATTERN (insn)) != USE)
{
/* We're not deleting it, we're moving it. */
diff --git a/gcc/resource.c b/gcc/resource.c
index 7388aa3..f542460 100644
--- a/gcc/resource.c
+++ b/gcc/resource.c
@@ -1245,7 +1245,7 @@ init_resource_info (epilogue_insn)
bb_ticks = (int *) xcalloc (last_basic_block, sizeof (int));
}
-/* Free up the resources allcated to mark_target_live_regs (). This
+/* Free up the resources allocated to mark_target_live_regs (). This
should be invoked after the last call to mark_target_live_regs (). */
void
diff --git a/gcc/rtl.def b/gcc/rtl.def
index 353a225..6341386 100644
--- a/gcc/rtl.def
+++ b/gcc/rtl.def
@@ -960,7 +960,7 @@ DEF_RTL_EXPR(GTU, "gtu", "ee", '<')
DEF_RTL_EXPR(LEU, "leu", "ee", '<')
DEF_RTL_EXPR(LTU, "ltu", "ee", '<')
-/* Additional floating point unordered comparision flavors. */
+/* Additional floating point unordered comparison flavors. */
DEF_RTL_EXPR(UNORDERED, "unordered", "ee", '<')
DEF_RTL_EXPR(ORDERED, "ordered", "ee", '<')
diff --git a/gcc/rtl.h b/gcc/rtl.h
index 72d07f5..c873d9f 100644
--- a/gcc/rtl.h
+++ b/gcc/rtl.h
@@ -1838,7 +1838,7 @@ extern rtx gen_lowpart_SUBREG PARAMS ((enum machine_mode, rtx));
#define VIRTUAL_OUTGOING_ARGS_REGNUM ((FIRST_VIRTUAL_REGISTER) + 3)
/* This points to the Canonical Frame Address of the function. This
- should corrospond to the CFA produced by INCOMING_FRAME_SP_OFFSET,
+ should correspond to the CFA produced by INCOMING_FRAME_SP_OFFSET,
but is calculated relative to the arg pointer for simplicity; the
frame pointer nor stack pointer are necessarily fixed relative to
the CFA until after reload. */
diff --git a/gcc/rtlanal.c b/gcc/rtlanal.c
index 924ef52..2aad022 100644
--- a/gcc/rtlanal.c
+++ b/gcc/rtlanal.c
@@ -3336,7 +3336,7 @@ find_first_parameter_load (call_insn, boundary)
return before;
}
-/* Return true if we should avoid inserting code between INSN and preceeding
+/* Return true if we should avoid inserting code between INSN and preceding
call instruction. */
bool
@@ -3479,7 +3479,7 @@ can_hoist_insn_p (insn, val, live)
case USE:
/* We need to fix callers to really ensure availability
of all values inisn uses, but for now it is safe to prohibit
- hoisting of any insn having such a hiden uses. */
+ hoisting of any insn having such a hidden uses. */
return false;
break;
case CLOBBER:
diff --git a/gcc/sched-deps.c b/gcc/sched-deps.c
index ec695dd..ec3df2c1 100644
--- a/gcc/sched-deps.c
+++ b/gcc/sched-deps.c
@@ -1533,7 +1533,7 @@ free_deps (deps)
}
/* If it is profitable to use them, initialize caches for tracking
- dependency informatino. LUID is the number of insns to be scheduled,
+ dependency information. LUID is the number of insns to be scheduled,
it is used in the estimate of profitability. */
void
diff --git a/gcc/sched-rgn.c b/gcc/sched-rgn.c
index 570abac..43fdef7 100644
--- a/gcc/sched-rgn.c
+++ b/gcc/sched-rgn.c
@@ -276,7 +276,7 @@ static void compute_dom_prob_ps PARAMS ((int));
#define INSN_BB(INSN) (BLOCK_TO_BB (BLOCK_NUM (INSN)))
/* Parameters affecting the decision of rank_for_schedule().
- ??? Nope. But MIN_PROBABILITY is used in copmute_trg_info. */
+ ??? Nope. But MIN_PROBABILITY is used in compute_trg_info. */
#define MIN_PROBABILITY 40
/* Speculative scheduling functions. */
@@ -802,7 +802,7 @@ find_rgns (edge_list, dom)
if (no_loops)
SET_BIT (header, 0);
- /* Second travsersal:find reducible inner loops and topologically sort
+ /* Second traversal:find reducible inner loops and topologically sort
block of each region. */
queue = (int *) xmalloc (n_basic_blocks * sizeof (int));
@@ -1291,7 +1291,7 @@ debug_candidates (trg)
debug_candidate (i);
}
-/* Functions for speculative scheduing. */
+/* Functions for speculative scheduling. */
/* Return 0 if x is a set of a register alive in the beginning of one
of the split-blocks of src, otherwise return 1. */
@@ -2531,7 +2531,7 @@ propagate_deps (bb, pred_deps)
/* Compute backward dependences inside bb. In a multiple blocks region:
(1) a bb is analyzed after its predecessors, and (2) the lists in
effect at the end of bb (after analyzing for bb) are inherited by
- bb's successrs.
+ bb's successors.
Specifically for reg-reg data dependences, the block insns are
scanned by sched_analyze () top-to-bottom. Two lists are
@@ -2712,7 +2712,7 @@ schedule_region (rgn)
init_deps_global ();
- /* Initializations for region data dependence analyisis. */
+ /* Initializations for region data dependence analysis. */
bb_deps = (struct deps *) xmalloc (sizeof (struct deps) * current_nr_blocks);
for (bb = 0; bb < current_nr_blocks; bb++)
init_deps (bb_deps + bb);
diff --git a/gcc/sibcall.c b/gcc/sibcall.c
index 91cd781..bfad98a 100644
--- a/gcc/sibcall.c
+++ b/gcc/sibcall.c
@@ -693,7 +693,7 @@ optimize_sibling_and_tail_recursive_calls ()
|| current_function_calls_setjmp
/* Can't if more than one successor or single successor is not
exit block. These two tests prevent tail call optimization
- in the presense of active exception handlers. */
+ in the presence of active exception handlers. */
|| call_block->succ == NULL
|| call_block->succ->succ_next != NULL
|| (call_block->succ->dest != EXIT_BLOCK_PTR
diff --git a/gcc/simplify-rtx.c b/gcc/simplify-rtx.c
index 04c7b9f..1949f24 100644
--- a/gcc/simplify-rtx.c
+++ b/gcc/simplify-rtx.c
@@ -2434,7 +2434,7 @@ simplify_subreg (outermode, op, innermode, byte)
if (GET_CODE (op) == CONST_INT)
val = INTVAL (op);
- /* We don't handle synthetizing of non-integral constants yet. */
+ /* We don't handle synthesizing of non-integral constants yet. */
if (GET_MODE_CLASS (outermode) != MODE_INT)
return NULL_RTX;
@@ -2571,7 +2571,7 @@ simplify_subreg (outermode, op, innermode, byte)
rtx x = gen_rtx_REG (outermode, final_regno);
/* Propagate original regno. We don't have any way to specify
- the offset inside orignal regno, so do so only for lowpart.
+ the offset inside original regno, so do so only for lowpart.
The information is used only by alias analysis that can not
grog partial register anyway. */
@@ -2656,7 +2656,7 @@ simplify_gen_subreg (outermode, op, innermode, byte)
This is the preferred entry point into the simplification routines;
however, we still allow passes to call the more specific routines.
- Right now GCC has three (yes, three) major bodies of RTL simplficiation
+ Right now GCC has three (yes, three) major bodies of RTL simplification
code that need to be unified.
1. fold_rtx in cse.c. This code uses various CSE specific
diff --git a/gcc/ssa-ccp.c b/gcc/ssa-ccp.c
index 64e5f24..085f18f 100644
--- a/gcc/ssa-ccp.c
+++ b/gcc/ssa-ccp.c
@@ -815,7 +815,7 @@ optimize_unexecutable_edges (edges, executable_edges)
&& bb->succ && bb->succ->succ_next == NULL)
{
/* If the fallthru edge is the executable edge, then turn
- this jump into a nop jump, otherwise make it an unconditinoal
+ this jump into a nop jump, otherwise make it an unconditional
jump to its target. */
if (edge->flags & EDGE_FALLTHRU)
{
diff --git a/gcc/ssa.c b/gcc/ssa.c
index b2c05bc..0a640ef 100644
--- a/gcc/ssa.c
+++ b/gcc/ssa.c
@@ -1828,7 +1828,7 @@ struct phi_coalesce_context
/* Callback function for for_each_successor_phi. If the set
destination and the phi alternative regs do not conflict, place
- them in the same paritition class. DATA is a pointer to a
+ them in the same partition class. DATA is a pointer to a
phi_coalesce_context struct. */
static int
diff --git a/gcc/stmt.c b/gcc/stmt.c
index 2fc608c..fbdf463 100644
--- a/gcc/stmt.c
+++ b/gcc/stmt.c
@@ -1122,7 +1122,7 @@ expand_asm (body)
will be true if the operand is read-write, i.e., if it is used as
an input as well as an output. If *CONSTRAINT_P is not in
canonical form, it will be made canonical. (Note that `+' will be
- rpelaced with `=' as part of this process.)
+ replaced with `=' as part of this process.)
Returns TRUE if all went well; FALSE if an error occurred. */
@@ -2597,7 +2597,7 @@ expand_end_loop ()
end_label:
We rely on the presence of NOTE_INSN_LOOP_END_TOP_COND to mark
- the end of the entry condtional. Without this, our lexical scan
+ the end of the entry conditional. Without this, our lexical scan
can't tell the difference between an entry conditional and a
body conditional that exits the loop. Mistaking the two means
that we can misplace the NOTE_INSN_LOOP_CONT note, which can
diff --git a/gcc/stor-layout.c b/gcc/stor-layout.c
index 6c81924..70eecc0 100644
--- a/gcc/stor-layout.c
+++ b/gcc/stor-layout.c
@@ -1001,10 +1001,10 @@ place_field (rli, field)
used in the record, and any additional adjacent long bitfields are
packed into the same chunk of 32 bits. However, if the size
changes, a new field of that size is allocated.) In an unpacked
- record, this is the same as using alignment, but not eqivalent
+ record, this is the same as using alignment, but not equivalent
when packing.
- Note: for compatability, we use the type size, not the type alignment
+ Note: for compatibility, we use the type size, not the type alignment
to determine alignment, since that matches the documentation */
if ((* targetm.ms_bitfield_layout_p) (rli->t)
@@ -1103,7 +1103,7 @@ place_field (rli, field)
TYPE_SIZE (TREE_TYPE (prev_saved)))
: !integer_zerop (DECL_SIZE (field)) ))
{
- unsigned int type_align = 8; /* Never below 8 for compatability */
+ unsigned int type_align = 8; /* Never below 8 for compatibility */
/* (When not a bitfield), we could be seeing a flex array (with
no DECL_SIZE). Since we won't be using remaining_in_alignment
@@ -1197,7 +1197,7 @@ place_field (rli, field)
/* Assuming that all the fields have been laid out, this function uses
RLI to compute the final TYPE_SIZE, TYPE_ALIGN, etc. for the type
- inidicated by RLI. */
+ indicated by RLI. */
static void
finalize_record_size (rli)
diff --git a/gcc/system.h b/gcc/system.h
index 8d1c521..0d8106d 100644
--- a/gcc/system.h
+++ b/gcc/system.h
@@ -470,7 +470,7 @@ extern void abort PARAMS ((void));
/* Say how to test for an absolute pathname. On Unix systems, this is if
it starts with a leading slash or a '$', the latter meaning the value of
- an environment variable is to be used. On machien with DOS-based
+ an environment variable is to be used. On machine with DOS-based
file systems, it is also absolute if it starts with a drive identifier. */
#ifdef HAVE_DOS_BASED_FILE_SYSTEM
#define IS_ABSOLUTE_PATHNAME(STR) \
diff --git a/gcc/tlink.c b/gcc/tlink.c
index ea1ce3a..706bd6e 100644
--- a/gcc/tlink.c
+++ b/gcc/tlink.c
@@ -343,7 +343,7 @@ pfgets (stream)
/* Subroutine of read_repo_file. We are reading the repo file for file F,
which is coming in on STREAM, and the symbol that comes next in STREAM
- is offerred, chosen or provided if CHOSEN is 0, 1 or 2, respectively.
+ is offered, chosen or provided if CHOSEN is 0, 1 or 2, respectively.
XXX "provided" is unimplemented, both here and in the compiler. */
diff --git a/gcc/toplev.c b/gcc/toplev.c
index 6d1a30d..ccc061f 100644
--- a/gcc/toplev.c
+++ b/gcc/toplev.c
@@ -750,7 +750,7 @@ int flag_schedule_insns_after_reload = 0;
/* The following flags have effect only for scheduling before register
allocation:
- flag_schedule_interblock means schedule insns accross basic blocks.
+ flag_schedule_interblock means schedule insns across basic blocks.
flag_schedule_speculative means allow speculative motion of non-load insns.
flag_schedule_speculative_load means allow speculative motion of some
load insns.
@@ -2973,11 +2973,11 @@ rest_of_compilation (decl)
| (flag_thread_jumps ? CLEANUP_THREADING : 0));
/* It may make more sense to mark constant functions after dead code is
- eliminated by life_analyzis, but we need to do it early, as -fprofile-arcs
+ eliminated by life_analysis, but we need to do it early, as -fprofile-arcs
may insert code making function non-constant, but we still must consider
it as constant, otherwise -fbranch-probabilities will not read data back.
- life_analyzis rarely eliminates modification of external memory.
+ life_analysis rarely eliminates modification of external memory.
*/
if (optimize)
mark_constant_function ();
@@ -3478,7 +3478,7 @@ rest_of_compilation (decl)
open_dump_file (DFI_bbro, decl);
/* Last attempt to optimize CFG, as scheduling, peepholing and insn
- splitting possibly introduced more crossjumping oppurtuntities.
+ splitting possibly introduced more crossjumping opportunities.
Except that we can't actually run crossjumping without running
another DCE pass, which we can't do after reg-stack. */
cleanup_cfg (CLEANUP_EXPENSIVE | CLEANUP_POST_REGSTACK
diff --git a/gcc/tracer.c b/gcc/tracer.c
index de231e8..4c5aa76 100644
--- a/gcc/tracer.c
+++ b/gcc/tracer.c
@@ -287,7 +287,7 @@ tail_duplicate ()
bb2 = cfg_layout_duplicate_bb (bb2, e);
/* Reconsider the original copy of block we've duplicated.
- Removing the most common predecesor may make it to be
+ Removing the most common predecessor may make it to be
head. */
blocks[old->index] =
fibheap_insert (heap, -old->frequency, old);
diff --git a/gcc/tree-inline.c b/gcc/tree-inline.c
index dfb1935..110f938 100644
--- a/gcc/tree-inline.c
+++ b/gcc/tree-inline.c
@@ -1257,7 +1257,7 @@ expand_call_inline (tp, walk_subtrees, data)
#endif /* INLINER_FOR_JAVA */
/* After the body of the function comes the RET_LABEL. This must come
- before we evaluate the returned value below, because that evalulation
+ before we evaluate the returned value below, because that evaluation
may cause RTL to be generated. */
#ifndef INLINER_FOR_JAVA
COMPOUND_BODY (stmt)
diff --git a/gcc/tree.c b/gcc/tree.c
index f2fc48f..0247bb9 100644
--- a/gcc/tree.c
+++ b/gcc/tree.c
@@ -1362,7 +1362,7 @@ save_expr (expr)
/* If we have simple operations applied to a SAVE_EXPR or to a SAVE_EXPR and
a constant, it will be more efficient to not make another SAVE_EXPR since
it will allow better simplification and GCSE will be able to merge the
- computations if they actualy occur. */
+ computations if they actually occur. */
inner = t;
while (1)
{
diff --git a/gcc/tree.h b/gcc/tree.h
index b5803e5..f06a666 100644
--- a/gcc/tree.h
+++ b/gcc/tree.h
@@ -1608,7 +1608,7 @@ struct tree_type GTY(())
#define DECL_EXTERNAL(NODE) (DECL_CHECK (NODE)->decl.external_flag)
/* In a VAR_DECL for a RECORD_TYPE, sets number for non-init_priority
- initializatons. */
+ initializations. */
#define DEFAULT_INIT_PRIORITY 65535
#define MAX_INIT_PRIORITY 65535
#define MAX_RESERVED_INIT_PRIORITY 100
@@ -2421,7 +2421,7 @@ extern tree build_qualified_type PARAMS ((tree, int));
/* Like build_qualified_type, but only deals with the `const' and
`volatile' qualifiers. This interface is retained for backwards
- compatiblity with the various front-ends; new code should use
+ compatibility with the various front-ends; new code should use
build_qualified_type instead. */
#define build_type_variant(TYPE, CONST_P, VOLATILE_P) \
@@ -2675,7 +2675,7 @@ enum tree_node_structure_enum tree_node_structure PARAMS ((tree));
extern tree unsave_expr PARAMS ((tree));
-/* Reset EXP in place so that it can be expaned again. Does not
+/* Reset EXP in place so that it can be expanded again. Does not
recurse into subtrees. */
extern void unsave_expr_1 PARAMS ((tree));
diff --git a/gcc/unroll.c b/gcc/unroll.c
index da142a6..a371e48 100644
--- a/gcc/unroll.c
+++ b/gcc/unroll.c
@@ -69,7 +69,7 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
/* ??? Improve control of which loops get unrolled. Could use profiling
info to only unroll the most commonly executed loops. Perhaps have
- a user specifyable option to control the amount of code expansion,
+ a user specifiable option to control the amount of code expansion,
or the percent of loops to consider for unrolling. Etc. */
/* ??? Look at the register copies inside the loop to see if they form a
@@ -3792,7 +3792,7 @@ loop_iterations (loop)
if (inc_once == final_value)
{
/* The iterator value once through the loop is equal to the
- comparision value. Either we have an infinite loop, or
+ comparison value. Either we have an infinite loop, or
we'll loop twice. */
if (increment == const0_rtx)
return 0;
diff --git a/gcc/varasm.c b/gcc/varasm.c
index ffa653b..948b1a9 100644
--- a/gcc/varasm.c
+++ b/gcc/varasm.c
@@ -3864,7 +3864,7 @@ initializer_constant_valid_p (value, endtype)
op1 = TREE_OPERAND (value, 1);
/* Like STRIP_NOPS except allow the operand mode to widen.
- This works around a feature of fold that simplfies
+ This works around a feature of fold that simplifies
(int)(p1 - p2) to ((int)p1 - (int)p2) under the theory
that the narrower operation is cheaper. */
@@ -3938,7 +3938,7 @@ output_constant (exp, size, align)
enum tree_code code;
HOST_WIDE_INT thissize;
- /* Some front-ends use constants other than the standard language-indepdent
+ /* Some front-ends use constants other than the standard language-independent
varieties, but which may still be output directly. Give the front-end a
chance to convert EXP to a language-independent representation. */
exp = (*lang_hooks.expand_constant) (exp);