aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--gcc/ChangeLog17
-rw-r--r--gcc/output.h2
-rw-r--r--gcc/predict.c12
-rw-r--r--gcc/print-tree.c2
-rw-r--r--gcc/profile.c2
-rw-r--r--gcc/ra-build.c32
-rw-r--r--gcc/ra-colorize.c8
-rw-r--r--gcc/ra-debug.c2
-rw-r--r--gcc/ra-rewrite.c2
-rw-r--r--gcc/ra.c6
-rw-r--r--gcc/ra.h8
-rw-r--r--gcc/real.c2
-rw-r--r--gcc/recog.c4
-rw-r--r--gcc/reg-stack.c6
-rw-r--r--gcc/regclass.c6
15 files changed, 64 insertions, 47 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index 02e5ef5..3182687 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,5 +1,22 @@
2002-12-25 Kazu Hirata <kazu@cs.umass.edu>
+ * output.h: Fix comment typos.
+ * predict.c: Likewise.
+ * print-tree.c: Likewise.
+ * profile.c: Likewise.
+ * ra-build.c: Likewise.
+ * ra-colorize.c: Likewise.
+ * ra-debug.c: Likewise.
+ * ra-rewrite.c: Likewise.
+ * ra.c: Likewise.
+ * ra.h: Likewise.
+ * real.c: Likewise.
+ * recog.c: Likewise.
+ * reg-stack.c: Likewise.
+ * regclass.c: Likewise.
+
+2002-12-25 Kazu Hirata <kazu@cs.umass.edu>
+
* config/h8300/h8300.c (print_operand_address): Do not negate
a negative number when printing one.
diff --git a/gcc/output.h b/gcc/output.h
index d6c0a05..3f6c0cd 100644
--- a/gcc/output.h
+++ b/gcc/output.h
@@ -461,7 +461,7 @@ extern void default_eh_frame_section PARAMS ((void));
/* Default target hook that outputs nothing to a stream. */
extern void no_asm_to_stream PARAMS ((FILE *));
-/* Flags controling properties of a section. */
+/* Flags controlling properties of a section. */
#define SECTION_ENTSIZE 0x000ff /* entity size in section */
#define SECTION_CODE 0x00100 /* contains code */
#define SECTION_WRITE 0x00200 /* data is writable */
diff --git a/gcc/predict.c b/gcc/predict.c
index feeaeac..3f3e2fc 100644
--- a/gcc/predict.c
+++ b/gcc/predict.c
@@ -113,7 +113,7 @@ static const struct predictor_info predictor_info[]= {
#undef DEF_PREDICTOR
/* Return true in case BB can be CPU intensive and should be optimized
- for maximal perofmrance. */
+ for maximal performance. */
bool
maybe_hot_bb_p (bb)
@@ -470,7 +470,7 @@ estimate_probability (loops_info)
}
/* Loop exit heuristics - predict an edge exiting the loop if the
- conditinal has no loop header successors as not taken. */
+ conditional has no loop header successors as not taken. */
if (!header_found)
for (e = bb->succ; e; e = e->succ_next)
if (e->dest->index < 0
@@ -565,7 +565,7 @@ estimate_probability (loops_info)
case EQ:
case UNEQ:
/* Floating point comparisons appears to behave in a very
- inpredictable way because of special role of = tests in
+ unpredictable way because of special role of = tests in
FP code. */
if (FLOAT_MODE_P (GET_MODE (XEXP (cond, 0))))
;
@@ -581,7 +581,7 @@ estimate_probability (loops_info)
case NE:
case LTGT:
/* Floating point comparisons appears to behave in a very
- inpredictable way because of special role of = tests in
+ unpredictable way because of special role of = tests in
FP code. */
if (FLOAT_MODE_P (GET_MODE (XEXP (cond, 0))))
;
@@ -801,7 +801,7 @@ process_note_predictions (bb, heads, dominators, post_dominators)
rtx insn;
edge e;
- /* Additionaly, we check here for blocks with no successors. */
+ /* Additionally, we check here for blocks with no successors. */
int contained_noreturn_call = 0;
int was_bb_head = 0;
int noreturn_block = 1;
@@ -1091,7 +1091,7 @@ counts_to_freqs ()
/* Return true if function is likely to be expensive, so there is no point to
optimize performance of prologue, epilogue or do inlining at the expense
- of code size growth. THRESHOLD is the limit of number of isntructions
+ of code size growth. THRESHOLD is the limit of number of instructions
function can execute at average to be still considered not expensive. */
bool
diff --git a/gcc/print-tree.c b/gcc/print-tree.c
index d063831..914ffcb 100644
--- a/gcc/print-tree.c
+++ b/gcc/print-tree.c
@@ -195,7 +195,7 @@ print_node (file, prefix, node, indent)
return;
}
- /* It is unsafe to look at any other filds of an ERROR_MARK node. */
+ /* It is unsafe to look at any other fields of an ERROR_MARK node. */
if (TREE_CODE (node) == ERROR_MARK)
{
print_node_brief (file, prefix, node, indent);
diff --git a/gcc/profile.c b/gcc/profile.c
index 2e70598..d14b8624 100644
--- a/gcc/profile.c
+++ b/gcc/profile.c
@@ -948,7 +948,7 @@ branch_prob ()
/* Add fake edges from entry block to the call insns that may return
twice. The CFG is not quite correct then, as call insn plays more
role of CODE_LABEL, but for our purposes, everything should be OK,
- as we never insert code to the beggining of basic block. */
+ as we never insert code to the beginning of basic block. */
for (insn = bb->head; insn != NEXT_INSN (bb->end);
insn = NEXT_INSN (insn))
{
diff --git a/gcc/ra-build.c b/gcc/ra-build.c
index dd1b884..5f0b3e1 100644
--- a/gcc/ra-build.c
+++ b/gcc/ra-build.c
@@ -36,7 +36,7 @@
#include "ggc.h"
#include "ra.h"
-/* This file is part of the graph coloring register alloctor.
+/* This file is part of the graph coloring register allocator.
It deals with building the interference graph. When rebuilding
the graph for a function after spilling, we rebuild only those
parts needed, i.e. it works incrementally.
@@ -48,7 +48,7 @@
conflicts. By connecting the uses and defs, which reach each other, webs
(or live ranges) are built conceptually.
- The second part (make_webs() and childs) deals with converting that
+ The second part (make_webs() and children) deals with converting that
structure to the nodes and edges, on which our interference graph is
built. For each root web part constructed above, an instance of struct
web is created. For all subregs of pseudos, which matter for allocation,
@@ -369,7 +369,7 @@ static struct undef_table_s {
/* Interpret *UNDEFINED as bitmask where each bit corresponds to a byte.
A set bit means an undefined byte. Factor all undefined bytes into
groups, and return a size/ofs pair of consecutive undefined bytes,
- but according to certain borders. Clear out those bits corrsponding
+ but according to certain borders. Clear out those bits corresponding
to bytes overlaid by that size/ofs pair. REG is only used for
the mode, to detect if it's a floating mode or not.
@@ -491,7 +491,7 @@ union_web_part_roots (r1, r2)
{
/* The new root is the smaller (pointerwise) of both. This is crucial
to make the construction of webs from web parts work (so, when
- scanning all parts, we see the roots before all it's childs).
+ scanning all parts, we see the roots before all its children).
Additionally this ensures, that if the web has a def at all, than
the root is a def (because all def parts are before use parts in the
web_parts[] array), or put another way, as soon, as the root of a
@@ -548,7 +548,7 @@ union_web_part_roots (r1, r2)
return r1;
}
-/* Convenience macro, that is cabable of unioning also non-roots. */
+/* Convenience macro, that is capable of unioning also non-roots. */
#define union_web_parts(p1, p2) \
((p1 == p2) ? find_web_part (p1) \
: union_web_part_roots (find_web_part (p1), find_web_part (p2)))
@@ -583,7 +583,7 @@ remember_move (insn)
/* XXX for now we don't remember move insns involving any subregs.
Those would be difficult to coalesce (we would need to implement
handling of all the subwebs in the allocator, including that such
- subwebs could be source and target of coalesing). */
+ subwebs could be source and target of coalescing). */
if (GET_CODE (s) == REG && GET_CODE (d) == REG)
{
struct move *m = (struct move *) ra_calloc (sizeof (struct move));
@@ -673,7 +673,7 @@ defuse_overlap_p_1 (def, use)
/* Now the more difficult part: the same regno is refered, but the
sizes of the references or the words differ. E.g.
(subreg:SI (reg:CDI a) 0) and (subreg:DI (reg:CDI a) 2) do not
- overlap, wereas the latter overlaps with (subreg:SI (reg:CDI a) 3).
+ overlap, whereas the latter overlaps with (subreg:SI (reg:CDI a) 3).
*/
{
unsigned HOST_WIDE_INT old_u;
@@ -964,7 +964,7 @@ live_in (df, use, insn)
basic_block bb = BLOCK_FOR_INSN (insn);
number_seen[uid]++;
- /* We want to be as fast as possible, so explicitely write
+ /* We want to be as fast as possible, so explicitly write
this loop. */
for (insn = PREV_INSN (insn); insn && !INSN_P (insn);
insn = PREV_INSN (insn))
@@ -1046,7 +1046,7 @@ update_regnos_mentioned ()
}
}
-/* Handle the uses which reach a block end, but were defered due
+/* Handle the uses which reach a block end, but were deferred due
to it's regno not being mentioned in that block. This adds the
remaining conflicts and updates also the crosses_call and
spanned_deaths members. */
@@ -1062,7 +1062,7 @@ livethrough_conflicts_bb (bb)
unsigned int deaths = 0;
unsigned int contains_call = 0;
- /* If there are no defered uses, just return. */
+ /* If there are no deferred uses, just return. */
if ((first = bitmap_first_set_bit (info->live_throughout)) < 0)
return;
@@ -1141,7 +1141,7 @@ free_bb_info ()
}
/* Toplevel function for the first part of this file.
- Connect web parts, thereby implicitely building webs, and remember
+ Connect web parts, thereby implicitly building webs, and remember
their conflicts. */
static void
@@ -1545,7 +1545,7 @@ copy_conflict_list (web)
/* Possibly add an edge from web FROM to TO marking a conflict between
those two. This is one half of marking a complete conflict, which notes
in FROM, that TO is a conflict. Adding TO to FROM's conflicts might
- make other conflicts superflous, because the current TO overlaps some web
+ make other conflicts superfluous, because the current TO overlaps some web
already being in conflict with FROM. In this case the smaller webs are
deleted from the conflict list. Likewise if TO is overlapped by a web
already in the list, it isn't added at all. Note, that this can only
@@ -2100,7 +2100,7 @@ parts_to_webs (df)
sbitmap_zero (igraph);
sbitmap_zero (sup_igraph);
- /* Distibute the references to their webs. */
+ /* Distribute the references to their webs. */
init_webs_defs_uses ();
/* And do some sanity checks if old webs, and those recreated from the
really are the same. */
@@ -2149,7 +2149,7 @@ reset_conflicts ()
{
*pcl = NULL;
/* Useless conflicts will be rebuilt completely. But check
- for cleanlyness, as the web might have come from the
+ for cleanliness, as the web might have come from the
free list. */
if (bitmap_first_set_bit (web->useless_conflicts) >= 0)
abort ();
@@ -2985,7 +2985,7 @@ handle_asm_insn (df, insn)
are not allowed by the constraints. */
if (nothing_allowed)
{
- /* If we had no real constraints nothing was explicitely
+ /* If we had no real constraints nothing was explicitly
allowed, so we allow the whole class (i.e. we make no
additional conflicts). */
CLEAR_HARD_REG_SET (conflict);
@@ -3055,7 +3055,7 @@ build_i_graph (df)
}
/* Allocates or reallocates most memory for the interference graph and
- assiciated structures. If it reallocates memory (meaning, this is not
+ associated structures. If it reallocates memory (meaning, this is not
the first pass), this also changes some structures to reflect the
additional entries in various array, and the higher number of
defs and uses. */
diff --git a/gcc/ra-colorize.c b/gcc/ra-colorize.c
index e7af300..b867255 100644
--- a/gcc/ra-colorize.c
+++ b/gcc/ra-colorize.c
@@ -338,7 +338,7 @@ build_worklists (df)
backed by a new pseudo, but conceptually can stand for a stackslot,
i.e. it doesn't really matter if they get a color or not), on
the SELECT stack first, those with lowest cost first. This way
- they will be colored last, so do not contrain the coloring of the
+ they will be colored last, so do not constrain the coloring of the
normal webs. But still those with the highest count are colored
before, i.e. get a color more probable. The use of stackregs is
a pure optimization, and all would work, if we used real stackslots
@@ -1347,7 +1347,7 @@ colorize_one_web (web, hard)
HARD_REG_SET call_clobbered;
/* Here we choose a hard-reg for the current web. For non spill
- temporaries we first search in the hardregs for it's prefered
+ temporaries we first search in the hardregs for it's preferred
class, then, if we found nothing appropriate, in those of the
alternate class. For spill temporaries we only search in
usable_regs of this web (which is probably larger than that of
@@ -1558,7 +1558,7 @@ colorize_one_web (web, hard)
set_cand (6, aw);
/* For boehm-gc/misc.c. If we are a difficult spilltemp,
also coalesced neighbors are a chance, _even_ if they
- too are spilltemps. At least their coalscing can be
+ too are spilltemps. At least their coalescing can be
broken up, which may be reset usable_regs, and makes
it easier colorable. */
if (web->spill_temp != 2 && aw->is_coalesced
@@ -2025,7 +2025,7 @@ check_colors ()
if (!TEST_HARD_REG_BIT (web->usable_regs, aweb->color + c))
abort ();
/* Search the original (pre-coalesce) conflict list. In the current
- one some inprecise conflicts may be noted (due to combine() or
+ one some imprecise conflicts may be noted (due to combine() or
insert_coalesced_conflicts() relocating partial conflicts) making
it look like some wide webs are in conflict and having the same
color. */
diff --git a/gcc/ra-debug.c b/gcc/ra-debug.c
index ed5b4ee..e415df4 100644
--- a/gcc/ra-debug.c
+++ b/gcc/ra-debug.c
@@ -817,7 +817,7 @@ dump_constraints ()
int uid = INSN_UID (insn);
int o;
/* Don't simply force rerecognition, as combine might left us
- with some unrecongnizable ones, which later leads to aborts
+ with some unrecognizable ones, which later leads to aborts
in regclass, if we now destroy the remembered INSN_CODE(). */
/*INSN_CODE (insn) = -1;*/
code = recog_memoized (insn);
diff --git a/gcc/ra-rewrite.c b/gcc/ra-rewrite.c
index 9071c8f..e10ddd3 100644
--- a/gcc/ra-rewrite.c
+++ b/gcc/ra-rewrite.c
@@ -1886,7 +1886,7 @@ delete_moves ()
}
}
-/* Due to resons documented elsewhere we create different pseudos
+/* Due to reasons documented elsewhere we create different pseudos
for all webs coalesced to hardregs. For these parts life_analysis()
might have added REG_DEAD notes without considering, that only this part
but not the whole coalesced web dies. The RTL is correct, there is no
diff --git a/gcc/ra.c b/gcc/ra.c
index 9dd5d4a..785ef94 100644
--- a/gcc/ra.c
+++ b/gcc/ra.c
@@ -75,7 +75,7 @@
* Lattice based rematerialization
* create definitions of ever-life regs at the beginning of
the insn chain
- * insert loads as soon, stores as late as possile
+ * insert loads as soon, stores as late as possible
* insert spill insns as outward as possible (either looptree, or LCM)
* reuse stack-slots
* delete coalesced insns. Partly done. The rest can only go, when we get
@@ -657,7 +657,7 @@ reg_alloc ()
/* If this is an empty function we shouldn't do all the following,
but instead just setup what's necessary, and return. */
- /* We currently rely on the existance of the return value USE as
+ /* We currently rely on the existence of the return value USE as
one of the last insns. Add it if it's not there anymore. */
if (last)
{
@@ -698,7 +698,7 @@ reg_alloc ()
/* Run regclass first, so we know the preferred and alternate classes
for each pseudo. Deactivate emitting of debug info, if it's not
- explicitely requested. */
+ explicitly requested. */
if ((debug_new_regalloc & DUMP_REGCLASS) == 0)
rtl_dump_file = NULL;
regclass (get_insns (), max_reg_num (), rtl_dump_file);
diff --git a/gcc/ra.h b/gcc/ra.h
index d3c1f1a..04962df9 100644
--- a/gcc/ra.h
+++ b/gcc/ra.h
@@ -62,7 +62,7 @@ struct tagged_conflict
In the process of building the interference graph web parts are
connected together, if they have common instructions and reference the
same register. That way live ranges are build (by connecting defs and
- uses) and implicitely complete webs (by connecting web parts in common
+ uses) and implicitly complete webs (by connecting web parts in common
uses). */
struct web_part
{
@@ -378,7 +378,7 @@ extern sbitmap igraph;
/* This is the bitmap of all (even partly) conflicting super webs.
If bit I*num_webs+J or J*num_webs+I is set, then I and J (both being
super web indices) conflict, maybe only partially. Note the
- assymetry. */
+ asymmetry. */
extern sbitmap sup_igraph;
/* After the first pass, and when interference region spilling is
@@ -430,7 +430,7 @@ extern struct df *df;
which backward reach the end of B. */
extern bitmap *live_at_end;
-/* One pass is: collecting registers refs, buiding I-graph, spilling.
+/* One pass is: collecting registers refs, building I-graph, spilling.
And this is how often we already ran that for the current function. */
extern int ra_pass;
@@ -491,7 +491,7 @@ extern HARD_REG_SET usable_regs[N_REG_CLASSES];
/* For each class C the count of hardregs in usable_regs[C]. */
extern unsigned int num_free_regs[N_REG_CLASSES];
/* For each mode M the hardregs, which are MODE_OK for M, and have
- enough space behind them to hold an M value. Additinally
+ enough space behind them to hold an M value. Additionally
if reg R is OK for mode M, but it needs two hardregs, then R+1 will
also be set here, even if R+1 itself is not OK for M. I.e. this
represent the possible resources which could be taken away be a value
diff --git a/gcc/real.c b/gcc/real.c
index 1598254..7d7b4bb 100644
--- a/gcc/real.c
+++ b/gcc/real.c
@@ -3016,7 +3016,7 @@ encode_ieee_extended (fmt, buf, r)
Except for Motorola, which consider exp=0 and explicit
integer bit set to continue to be normalized. In theory
- this descrepency has been taken care of by the difference
+ this discrepancy has been taken care of by the difference
in fmt->emin in round_for_format. */
if (denormal)
diff --git a/gcc/recog.c b/gcc/recog.c
index 48d2915..340b2ee 100644
--- a/gcc/recog.c
+++ b/gcc/recog.c
@@ -694,7 +694,7 @@ validate_replace_src_group (from, to, insn)
note_uses (&PATTERN (insn), validate_replace_src_1, &d);
}
-/* Same as validate_repalace_src_group, but validate by seeing if
+/* Same as validate_replace_src_group, but validate by seeing if
INSN is still valid. */
int
validate_replace_src (from, to, insn)
@@ -2861,7 +2861,7 @@ split_all_insns (upd_life)
}
/* Same as split_all_insns, but do not expect CFG to be available.
- Used by machine depedent reorg passes. */
+ Used by machine dependent reorg passes. */
void
split_all_insns_noflow ()
diff --git a/gcc/reg-stack.c b/gcc/reg-stack.c
index 18bc062..52eebe8 100644
--- a/gcc/reg-stack.c
+++ b/gcc/reg-stack.c
@@ -478,7 +478,7 @@ reg_to_stack (first, file)
/* A QNaN for initializing uninitialized variables.
??? We can't load from constant memory in PIC mode, because
- we're insertting these instructions before the prologue and
+ we're inserting these instructions before the prologue and
the PIC register hasn't been set up. In that case, fall back
on zero, which we can get from `ldz'. */
@@ -1689,7 +1689,7 @@ subst_stack_regs_pat (insn, regstack, pat)
replace_reg (dest, get_hard_regnum (regstack, *dest));
}
- /* Keep operand 1 maching with destination. */
+ /* Keep operand 1 matching with destination. */
if (GET_RTX_CLASS (GET_CODE (pat_src)) == 'c'
&& REG_P (*src1) && REG_P (*src2)
&& REGNO (*src1) != REGNO (*dest))
@@ -2406,7 +2406,7 @@ convert_regs_entry ()
the push/pop code happy, and to not scrog the register stack, we
must put something in these registers. Use a QNaN.
- Note that we are insertting converted code here. This code is
+ Note that we are inserting converted code here. This code is
never seen by the convert_regs pass. */
for (e = ENTRY_BLOCK_PTR->succ; e ; e = e->succ_next)
diff --git a/gcc/regclass.c b/gcc/regclass.c
index 0bb2108..6b6a6dd 100644
--- a/gcc/regclass.c
+++ b/gcc/regclass.c
@@ -817,7 +817,7 @@ struct costs
int mem_cost;
};
-/* Structure used to record preferrences of given pseudo. */
+/* Structure used to record preferences of given pseudo. */
struct reg_pref
{
/* (enum reg_class) prefclass is the preferred class. */
@@ -841,7 +841,7 @@ static struct costs *costs;
static struct costs init_cost;
-/* Record preferrences of each pseudo.
+/* Record preferences of each pseudo.
This is available after `regclass' is run. */
static struct reg_pref *reg_pref;
@@ -2481,7 +2481,7 @@ reg_scan_mark_refs (x, insn, note_flag, min_regno)
&& REGNO (SET_DEST (x)) >= min_regno
/* If the destination pseudo is set more than once, then other
sets might not be to a pointer value (consider access to a
- union in two threads of control in the presense of global
+ union in two threads of control in the presence of global
optimizations). So only set REG_POINTER on the destination
pseudo if this is the only set of that pseudo. */
&& REG_N_SETS (REGNO (SET_DEST (x))) == 1