aboutsummaryrefslogtreecommitdiff
path: root/gold/arm.cc
diff options
context:
space:
mode:
Diffstat (limited to 'gold/arm.cc')
-rw-r--r--gold/arm.cc66
1 files changed, 33 insertions, 33 deletions
diff --git a/gold/arm.cc b/gold/arm.cc
index d67c4a0..6c81d30 100644
--- a/gold/arm.cc
+++ b/gold/arm.cc
@@ -116,7 +116,7 @@ const size_t ARM_TCB_SIZE = 8;
// Ideally we would like to avoid using global variables but this is used
// very in many places and sometimes in loops. If we use a function
-// returning a static instance of Arm_reloc_property_table, it will very
+// returning a static instance of Arm_reloc_property_table, it will be very
// slow in an threaded environment since the static instance needs to be
// locked. The pointer is below initialized in the
// Target::do_select_as_default_target() hook so that we do not spend time
@@ -598,10 +598,10 @@ class Reloc_stub : public Stub
// If this is a local symbol, this is the index in the defining object.
// Otherwise, it is invalid_index for a global symbol.
unsigned int r_sym_;
- // If r_sym_ is invalid index. This points to a global symbol.
- // Otherwise, this points a relobj. We used the unsized and target
+ // If r_sym_ is an invalid index, this points to a global symbol.
+ // Otherwise, it points to a relobj. We used the unsized and target
// independent Symbol and Relobj classes instead of Sized_symbol<32> and
- // Arm_relobj. This is done to avoid making the stub class a template
+ // Arm_relobj, in order to avoid making the stub class a template
// as most of the stub machinery is endianness-neutral. However, it
// may require a bit of casting done by users of this class.
union
@@ -895,8 +895,8 @@ class Stub_table : public Output_data
current_data_size() const
{ return this->current_data_size_for_child(); }
- // Add a STUB with using KEY. Caller is reponsible for avoid adding
- // if already a STUB with the same key has been added.
+ // Add a STUB using KEY. The caller is responsible for avoiding addition
+ // if a STUB with the same key has already been added.
void
add_reloc_stub(Reloc_stub* stub, const Reloc_stub::Key& key)
{
@@ -915,8 +915,8 @@ class Stub_table : public Output_data
}
// Add a Cortex-A8 STUB that fixes up a THUMB branch at ADDRESS.
- // Caller is reponsible for avoid adding if already a STUB with the same
- // address has been added.
+ // The caller is responsible for avoiding addition if a STUB with the same
+ // address has already been added.
void
add_cortex_a8_stub(Arm_address address, Cortex_a8_stub* stub)
{
@@ -1265,7 +1265,7 @@ class Arm_exidx_fixup
// number of bytes to be deleted in output. If parts of the input EXIDX
// section are merged a heap allocated Arm_exidx_section_offset_map is store
// in the located PSECTION_OFFSET_MAP. The caller owns the map and is
- // reponsible for releasing it.
+ // responsible for releasing it.
template<bool big_endian>
uint32_t
process_exidx_section(const Arm_exidx_input_section* exidx_input_section,
@@ -1704,7 +1704,7 @@ class Arm_relobj : public Sized_relobj<32, big_endian>
Target_arm<big_endian>*);
// Find the linked text section of an EXIDX section by looking at the
- // first reloction of the EXIDX section. PSHDR points to the section
+ // first relocation of the EXIDX section. PSHDR points to the section
// headers of a relocation section and PSYMS points to the local symbols.
// PSHNDX points to a location storing the text section index if found.
// Return whether we can find the linked section.
@@ -2023,7 +2023,7 @@ class Arm_output_data_got : public Output_data_got<32, big_endian>
std::vector<Static_reloc> static_relocs_;
};
-// The ARM target has many relocation types with odd-sizes or incontigious
+// The ARM target has many relocation types with odd-sizes or noncontiguous
// bits. The default handling of relocatable relocation cannot process these
// relocations. So we have to extend the default code.
@@ -2963,7 +2963,7 @@ class Arm_relocate_functions : public Relocate_functions<32, big_endian>
typedef enum
{
STATUS_OKAY, // No error during relocation.
- STATUS_OVERFLOW, // Relocation oveflow.
+ STATUS_OVERFLOW, // Relocation overflow.
STATUS_BAD_RELOC // Relocation cannot be applied.
} Status;
@@ -3189,7 +3189,7 @@ class Arm_relocate_functions : public Relocate_functions<32, big_endian>
// Insert OFFSET to a 32-bit THUMB conditional branch and return the lower
// instruction. LOWER_INSN is the original lower instruction of the branch.
- // Caller is reponsible for overflow checking.
+ // The caller is responsible for overflow checking.
static inline uint16_t
thumb32_cond_branch_lower(uint16_t lower_insn, int32_t offset)
{
@@ -4435,7 +4435,7 @@ Reloc_stub::Key::name() const
// Determine the type of stub needed, if any, for a relocation of R_TYPE at
// LOCATION to DESTINATION.
// This code is based on the arm_type_of_stub function in
-// bfd/elf32-arm.c. We have changed the interface a liitle to keep the Stub
+// bfd/elf32-arm.c. We have changed the interface a little to keep the Stub
// class simple.
Stub_type
@@ -4839,7 +4839,7 @@ Stub_factory::Stub_factory()
// Stub_table methods.
-// Removel all Cortex-A8 stub.
+// Remove all Cortex-A8 stub.
template<bool big_endian>
void
@@ -5525,7 +5525,7 @@ Arm_exidx_fixup::process_exidx_section(
// dropping. If there is no entry (x0, y0) for an input offset x0,
// the output offset y0 of it is determined by the output offset y1 of
// the smallest input offset x1 > x0 that there is an (x1, y1) entry
- // in the map. If y1 is not -1, then y0 = y1 + x0 - x1. Othewise, y1
+ // in the map. If y1 is not -1, then y0 = y1 + x0 - x1. Otherwise, y1
// y0 is also -1.
if (delete_entry != prev_delete_entry && i != 0)
this->update_offset_map(i - 1, deleted_bytes, prev_delete_entry);
@@ -5687,7 +5687,7 @@ Arm_output_section<big_endian>::group_sections(
section_size_type section_end_offset =
section_begin_offset + p->data_size();
- // Check to see if we should group the previously seens sections.
+ // Check to see if we should group the previously seen sections.
switch (state)
{
case NO_GROUP:
@@ -6335,7 +6335,7 @@ Arm_relobj<big_endian>::scan_sections_for_stubs(
// harder because we cannot access this information. So we override the
// do_count_local_symbol in parent and scan local symbols to mark
// THUMB functions. This is not the most efficient way but I do not want to
-// slow down other ports by calling a per symbol targer hook inside
+// slow down other ports by calling a per symbol target hook inside
// Sized_relobj<size, big_endian>::do_count_local_symbols.
template<bool big_endian>
@@ -6353,7 +6353,7 @@ Arm_relobj<big_endian>::do_count_local_symbols(
if (loccount == 0)
return;
- // Intialize the thumb function bit-vector.
+ // Initialize the thumb function bit-vector.
std::vector<bool> empty_vector(loccount, false);
this->local_symbol_is_thumb_function_.swap(empty_vector);
@@ -6528,9 +6528,9 @@ Arm_relobj<big_endian>::do_relocate_sections(
}
}
-// Find the linked text section of an EXIDX section by looking the the first
+// Find the linked text section of an EXIDX section by looking at the first
// relocation. 4.4.1 of the EHABI specifications says that an EXIDX section
-// must be linked to to its associated code section via the sh_link field of
+// must be linked to its associated code section via the sh_link field of
// its section header. However, some tools are broken and the link is not
// always set. LD just drops such an EXIDX section silently, causing the
// associated code not unwindabled. Here we try a little bit harder to
@@ -6663,7 +6663,7 @@ Arm_relobj<big_endian>::make_exidx_input_section(
exidx_input_section->set_has_errors();
}
else if ((text_shdr.get_sh_flags() & elfcpp::SHF_EXECINSTR) == 0)
- // I would like to make this an error but currenlty ld just ignores
+ // I would like to make this an error but currently ld just ignores
// this.
gold_warning(_("EXIDX section %s(%u) links to non-executable section "
"%s(%u) in %s"),
@@ -6827,7 +6827,7 @@ Arm_relobj<big_endian>::do_read_symbols(Read_symbols_data* sd)
// Process relocations for garbage collection. The ARM target uses .ARM.exidx
// sections for unwinding. These sections are referenced implicitly by
-// text sections linked in the section headers. If we ignore these implict
+// text sections linked in the section headers. If we ignore these implicit
// references, the .ARM.exidx sections and any .ARM.extab sections they use
// will be garbage-collected incorrectly. Hence we override the same function
// in the base class to handle these implicit references.
@@ -8312,7 +8312,7 @@ Target_arm<big_endian>::Scan::global(Symbol_table* symtab,
// All the relocation above are branches except for the PREL31 ones.
// A PREL31 relocation can point to a personality function in a shared
// library. In that case we want to use a PLT because we want to
- // call the personality routine and the dyanmic linkers we care about
+ // call the personality routine and the dynamic linkers we care about
// do not support dynamic PREL31 relocations. An REL31 relocation may
// point to a function whose unwinding behaviour is being described but
// we will not mistakenly generate a PLT for that because we should use
@@ -8646,7 +8646,7 @@ Target_arm<big_endian>::do_finalize_sections(
if (exidx_section != NULL
&& exidx_section->type() == elfcpp::SHT_ARM_EXIDX)
{
- // Create __exidx_start and __exdix_end symbols.
+ // Create __exidx_start and __exidx_end symbols.
symtab->define_in_output_data("__exidx_start", NULL,
Symbol_table::PREDEFINED,
exidx_section, 0, 0, elfcpp::STT_OBJECT,
@@ -9873,7 +9873,7 @@ Target_arm<big_endian>::get_real_reloc_type(unsigned int r_type)
return elfcpp::R_ARM_ABS32;
case elfcpp::R_ARM_TARGET2:
- // This can be any reloc type but ususally is R_ARM_GOT_PREL
+ // This can be any reloc type but usually is R_ARM_GOT_PREL
return elfcpp::R_ARM_GOT_PREL;
default:
@@ -10457,7 +10457,7 @@ Target_arm<big_endian>::merge_object_attributes(
|| (out_attr[elfcpp::Tag_ABI_align8_preserved].int_value()
== 0)))
{
- // This error message should be enabled once all non-conformant
+ // This error message should be enabled once all non-conforming
// binaries in the toolchain have had the attributes set
// properly.
// gold_error(_("output 8-byte data alignment conflicts with %s"),
@@ -10634,7 +10634,7 @@ Target_arm<big_endian>::merge_object_attributes(
}
break;
case elfcpp::Tag_ABI_VFP_args:
- // Aready done.
+ // Already done.
break;
case elfcpp::Tag_ABI_WMMX_args:
if (in_attr[i].int_value() != out_attr[i].int_value()
@@ -11339,12 +11339,12 @@ Target_arm<big_endian>::scan_section_for_stubs(
// Group input sections for stub generation.
//
-// We goup input sections in an output sections so that the total size,
+// We group input sections in an output section so that the total size,
// including any padding space due to alignment is smaller than GROUP_SIZE
// unless the only input section in group is bigger than GROUP_SIZE already.
// Then an ARM stub table is created to follow the last input section
// in group. For each group an ARM stub table is created an is placed
-// after the last group. If STUB_ALWATS_AFTER_BRANCH is false, we further
+// after the last group. If STUB_ALWAYS_AFTER_BRANCH is false, we further
// extend the group after the stub table.
template<bool big_endian>
@@ -11391,7 +11391,7 @@ Target_arm<big_endian>::do_relax(
{
// Determine the stub group size. The group size is the absolute
// value of the parameter --stub-group-size. If --stub-group-size
- // is passed a negative value, we restict stubs to be always after
+ // is passed a negative value, we restrict stubs to be always after
// the stubbed branches.
int32_t stub_group_size_param =
parameters->options().stub_group_size();
@@ -11657,7 +11657,7 @@ Target_arm<big_endian>::do_attributes_order(int num) const
{
// Reorder the known object attributes in output. We want to move
// Tag_conformance to position 4 and Tag_conformance to position 5
- // and shift eveything between 4 .. Tag_conformance - 1 to make room.
+ // and shift everything between 4 .. Tag_conformance - 1 to make room.
if (num == 4)
return elfcpp::Tag_conformance;
if (num == 5)
@@ -11859,7 +11859,7 @@ Target_arm<big_endian>::apply_cortex_a8_workaround(
switch (stub->stub_template()->type())
{
case arm_stub_a8_veneer_b_cond:
- // For a conditional branch, we re-write it to be a uncondition
+ // For a conditional branch, we re-write it to be an unconditional
// branch to the stub. We use the THUMB-2 encoding here.
upper_insn = 0xf000U;
lower_insn = 0xb800U;