aboutsummaryrefslogtreecommitdiff
path: root/bfd
diff options
context:
space:
mode:
authorSiddhesh Poyarekar <siddesh.poyarekar@arm.com>2020-09-11 09:18:11 +0530
committerLuis Machado <luis.machado@linaro.org>2020-10-20 15:04:26 -0300
commit7ff36d1a856fce85c7ad854270289815758a8816 (patch)
tree89452b35cd6d65c63613b6a8916445221fd55639 /bfd
parentfa6ca5e254bb95725389f8eb87cd426def4e23f1 (diff)
downloadgdb-7ff36d1a856fce85c7ad854270289815758a8816.zip
gdb-7ff36d1a856fce85c7ad854270289815758a8816.tar.gz
gdb-7ff36d1a856fce85c7ad854270289815758a8816.tar.bz2
[Morello] Pad section alignment to account for capability range format
The capability format has limitations on the alignment and length of capability bounds and are subject to rounding. Add alignment and padding at the boundaries of such long (typically >16M) sections so that any capabilities referencing these sections do not end up overlapping into neighbouring sections. There are two cases where this is in use. The first and most important due to the current implementation is the range for PCC, which needs to span all executable sections and all PLT and GOT sections. The other case is for linker and ldscript defined symbols that may be used in dynamic relocations. bfd/ChangeLog: 2020-10-20 Siddhesh Poyarekar <siddesh.poyarekar@arm.com> * elfnn-aarch64.c (elf_aarch64_link_hash_table): New member. (section_start_symbol, c64_valid_cap_range, exponent): Move up. (sec_change_queue): New structure. (queue_section_padding, record_section_change, elfNN_c64_resize_sections): New functions. (bfd_elfNN_aarch64_init_maps): Add info argument. Adjust callers. * elfxx-aarch64.h (bfd_elf64_aarch64_init_maps, bfd_elf32_aarch64_init_maps): Add info argument. (elf64_c64_resize_sections, elf32_c64_resize_sections): New function declarations. ld/ChangeLog: 2020-10-20 Siddhesh Poyarekar <siddesh.poyarekar@arm.com> * emultempl/aarch64elf.em (elf64_c64_pad_section): New function. (gld${EMULATION_NAME}_after_allocation): Resize C64 sections. * ldlang.c (lang_add_newdot): New function. * ldlang.h (lang_add_newdot): New function declaration. * testsuite/ld-aarch64/aarch64-elf.exp: Add new test. * testsuite/ld-aarch64/morello-sec-round.d: New file. * testsuite/ld-aarch64/morello-sec-round.ld: New file. * testsuite/ld-aarch64/morello-sec-round.s: New file.
Diffstat (limited to 'bfd')
-rw-r--r--bfd/ChangeLog15
-rw-r--r--bfd/elfnn-aarch64.c370
-rw-r--r--bfd/elfxx-aarch64.h13
3 files changed, 338 insertions, 60 deletions
diff --git a/bfd/ChangeLog b/bfd/ChangeLog
index e714ede..66e167c 100644
--- a/bfd/ChangeLog
+++ b/bfd/ChangeLog
@@ -1,5 +1,20 @@
2020-10-20 Siddhesh Poyarekar <siddesh.poyarekar@arm.com>
+ * elfnn-aarch64.c (elf_aarch64_link_hash_table): New member.
+ (section_start_symbol, c64_valid_cap_range, exponent): Move
+ up.
+ (sec_change_queue): New structure.
+ (queue_section_padding, record_section_change,
+ elfNN_c64_resize_sections): New functions.
+ (bfd_elfNN_aarch64_init_maps): Add info argument. Adjust
+ callers.
+ * elfxx-aarch64.h (bfd_elf64_aarch64_init_maps,
+ bfd_elf32_aarch64_init_maps): Add info argument.
+ (elf64_c64_resize_sections, elf32_c64_resize_sections): New
+ function declarations.
+
+2020-10-20 Siddhesh Poyarekar <siddesh.poyarekar@arm.com>
+
* elf-bfd.h (elf_backend_data): New callback
elf_backend_eh_frame_augmentation_char.
* elf-eh-frame.c (_bfd_elf_parse_eh_frame): Use it.
diff --git a/bfd/elfnn-aarch64.c b/bfd/elfnn-aarch64.c
index f1a43e1..83158d6 100644
--- a/bfd/elfnn-aarch64.c
+++ b/bfd/elfnn-aarch64.c
@@ -3049,6 +3049,7 @@ struct elf_aarch64_link_hash_table
/* Used for capability relocations. */
asection *srelcaps;
int c64_rel;
+ bfd_boolean c64_output;
};
/* Create an entry in an AArch64 ELF linker hash table. */
@@ -4694,6 +4695,306 @@ _bfd_aarch64_erratum_843419_scan (bfd *input_bfd, asection *section,
return TRUE;
}
+static bfd_boolean
+section_start_symbol (bfd *abfd ATTRIBUTE_UNUSED, asection *section,
+ void *valp)
+{
+ return section->vma == *(bfd_vma *)valp;
+}
+
+/* Capability format functions. */
+
+static unsigned
+exponent (uint64_t len)
+{
+#define CAP_MAX_EXPONENT 50
+ /* Size is a 65 bit value, so there's an implicit 0 MSB. */
+ unsigned zeroes = __builtin_clzl (len) + 1;
+
+ /* All bits up to and including CAP_MW - 2 are zero. */
+ if (CAP_MAX_EXPONENT < zeroes)
+ return (unsigned) -1;
+ else
+ return CAP_MAX_EXPONENT - zeroes;
+#undef CAP_MAX_EXPONENT
+}
+
+#define ONES(x) ((1ULL << ((x) + 1)) - 1)
+#define ALIGN_UP(x, a) (((x) + ONES (a)) & (~ONES (a)))
+
+static bfd_boolean
+c64_valid_cap_range (bfd_vma *basep, bfd_vma *limitp)
+{
+ bfd_vma base = *basep, size = *limitp - *basep;
+
+ unsigned e, old_e;
+
+ if ((e = exponent (size)) == (unsigned) -1)
+ return TRUE;
+
+ size = ALIGN_UP (size, e + 3);
+ old_e = e;
+ e = exponent (size);
+ if (old_e != e)
+ size = ALIGN_UP (size, e + 3);
+
+ base = ALIGN_UP (base, e + 3);
+
+ if (base == *basep && *limitp == base + size)
+ return TRUE;
+
+ *basep = base;
+ *limitp = base + size;
+ return FALSE;
+}
+
+struct sec_change_queue
+{
+ asection *sec;
+ struct sec_change_queue *next;
+};
+
+/* Queue up the change, sorted in order of the output section vma. */
+
+static void
+queue_section_padding (struct sec_change_queue **queue, asection *sec)
+{
+ struct sec_change_queue *q = *queue, *last_q = NULL, *n;
+
+ while (q != NULL)
+ {
+ if (q->sec->vma > sec->vma)
+ break;
+ last_q = q;
+ q = q->next;
+ }
+
+ n = bfd_zmalloc (sizeof (struct sec_change_queue));
+
+ if (last_q == NULL)
+ *queue = n;
+ else
+ {
+ n->next = q;
+ last_q->next = n;
+ }
+
+ n->sec = sec;
+}
+
+/* Check if the bounds covering all sections between LOW_SEC and HIGH_SEC will
+ get rounded off in the Morello capability format and if it does, queue up a
+ change to fix up the section layout. */
+static inline void
+record_section_change (asection *sec, struct sec_change_queue **queue)
+{
+ bfd_vma low = sec->vma;
+ bfd_vma high = sec->vma + sec->size;
+
+ if (!c64_valid_cap_range (&low, &high))
+ queue_section_padding (queue, sec);
+}
+
+/* Make sure that all capabilities that refer to sections have bounds that
+ won't overlap with neighbouring sections. This is needed in two specific
+ cases. The first case is that of PCC, which needs to span across all
+ executable sections as well as the GOT and PLT sections in the output
+ binary. The second case is that of linker and ldscript defined symbols that
+ indicate start and/or end of sections.
+
+ In both cases, overlap of capability bounds are avoided by aligning the base
+ of the section and if necessary, adding a pad at the end of the section so
+ that the section following it starts only after the pad. */
+
+void
+elfNN_c64_resize_sections (bfd *output_bfd, struct bfd_link_info *info,
+ void (*c64_pad_section) (asection *, bfd_vma),
+ void (*layout_sections_again) (void))
+{
+ asection *sec, *pcc_low_sec = NULL, *pcc_high_sec = NULL;
+ struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
+ bfd_vma low = (bfd_vma) -1, high = 0;
+
+ htab->layout_sections_again = layout_sections_again;
+
+ if (!htab->c64_output)
+ return;
+
+ struct sec_change_queue *queue = NULL;
+
+ /* First, walk through all the relocations to find those referring to linker
+ defined and ldscript defined symbols since we set their range to their
+ output sections. */
+ for (bfd *input_bfd = info->input_bfds;
+ htab->c64_rel && input_bfd != NULL; input_bfd = input_bfd->link.next)
+ {
+ Elf_Internal_Shdr *symtab_hdr;
+
+ symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
+ if (symtab_hdr->sh_info == 0)
+ continue;
+
+ for (sec = input_bfd->sections; sec != NULL; sec = sec->next)
+ {
+ Elf_Internal_Rela *irelaend, *irela;
+
+ /* If there aren't any relocs, then there's nothing more to do. */
+ if ((sec->flags & SEC_RELOC) == 0 || sec->reloc_count == 0)
+ continue;
+
+ irela = _bfd_elf_link_read_relocs (input_bfd, sec, NULL, NULL,
+ info->keep_memory);
+ if (irela == NULL)
+ continue;
+
+ /* Now examine each relocation. */
+ irelaend = irela + sec->reloc_count;
+ for (; irela < irelaend; irela++)
+ {
+ unsigned int r_indx;
+ struct elf_link_hash_entry *h;
+ int e_indx;
+ asection *os;
+
+ r_indx = ELFNN_R_SYM (irela->r_info);
+
+ /* Linker defined or linker script defined symbols are always in
+ the symbol hash. */
+ if (r_indx < symtab_hdr->sh_info)
+ continue;
+
+ e_indx = r_indx - symtab_hdr->sh_info;
+ h = elf_sym_hashes (input_bfd)[e_indx];
+
+ /* XXX Does this ever happen? */
+ if (h == NULL)
+ continue;
+
+ os = h->root.u.def.section->output_section;
+
+ if (h->root.linker_def)
+ record_section_change (os, &queue);
+ else if (h->root.ldscript_def)
+ {
+ const char *name = h->root.root.string;
+ size_t len = strlen (name);
+
+ if (len > 8 && name[0] == '_' && name[1] == '_'
+ && (!strncmp (name + 2, "start_", 6)
+ || !strcmp (name + len - 6, "_start")))
+
+ {
+ bfd_vma value = os->vma + os->size;
+
+ os = bfd_sections_find_if (info->output_bfd,
+ section_start_symbol, &value);
+
+ if (os != NULL)
+ record_section_change (os, &queue);
+ }
+ /* XXX We're overfitting here because the offset of H within
+ the output section is not yet resolved and ldscript
+ defined symbols do not have input section information. */
+ else
+ record_section_change (os, &queue);
+ }
+ }
+ }
+ }
+
+ /* Next, walk through output sections to find the PCC span and add a padding
+ at the end to ensure that PCC bounds don't bleed into neighbouring
+ sections. For now PCC needs to encompass all code sections, .got, .plt
+ and .got.plt. */
+ for (sec = output_bfd->sections; sec != NULL; sec = sec->next)
+ {
+ /* XXX This is a good place to figure out if there are any readable or
+ writable sections in the PCC range that are not in the list of
+ sections we want the PCC to span and then warn the user of it. */
+
+#define NOT_OP_SECTION(s) ((s) == NULL || (s)->output_section != sec)
+
+ if ((sec->flags & SEC_CODE) == 0
+ && NOT_OP_SECTION (htab->root.sgotplt)
+ && NOT_OP_SECTION (htab->root.igotplt)
+ && NOT_OP_SECTION (htab->root.sgot)
+ && NOT_OP_SECTION (htab->root.splt)
+ && NOT_OP_SECTION (htab->root.iplt))
+ continue;
+
+ if (sec->vma < low)
+ {
+ low = sec->vma;
+ pcc_low_sec = sec;
+ }
+ if (sec->vma + sec->size > high)
+ {
+ high = sec->vma + sec->size;
+ pcc_high_sec = sec;
+ }
+
+#undef NOT_OP_SECTION
+ }
+
+ /* Sequentially add alignment and padding as required. We also need to
+ account for the PCC-related alignment and padding here since its
+ requirements could change based on the range of sections it encompasses
+ and whether they need to be padded or aligned. */
+ while (queue)
+ {
+ unsigned align = 0;
+ bfd_vma padding = 0;
+
+ low = queue->sec->vma;
+ high = queue->sec->vma + queue->sec->size;
+
+ if (!c64_valid_cap_range (&low, &high))
+ {
+ align = __builtin_ctzl (low);
+
+ if (queue->sec->alignment_power < align)
+ queue->sec->alignment_power = align;
+
+ padding = high - queue->sec->vma - queue->sec->size;
+
+ if (queue->sec != pcc_high_sec)
+ {
+ c64_pad_section (queue->sec, padding);
+ padding = 0;
+ }
+ }
+
+ /* If we have crossed all sections within the PCC range, set up alignment
+ and padding for the PCC range. */
+ if (pcc_high_sec != NULL && pcc_low_sec != NULL
+ && (queue->next == NULL
+ || queue->next->sec->vma > pcc_high_sec->vma))
+ {
+ /* Layout sections since it affects the final range of PCC. */
+ (*htab->layout_sections_again) ();
+
+ bfd_vma pcc_low = pcc_low_sec->vma;
+ bfd_vma pcc_high = pcc_high_sec->vma + pcc_high_sec->size + padding;
+
+ if (!c64_valid_cap_range (&pcc_low, &pcc_high))
+ {
+ align = __builtin_ctzl (pcc_low);
+ if (pcc_low_sec->alignment_power < align)
+ pcc_low_sec->alignment_power = align;
+
+ padding = pcc_high - pcc_high_sec->vma - pcc_high_sec->size;
+ c64_pad_section (pcc_high_sec, padding);
+ }
+ }
+
+ (*htab->layout_sections_again) ();
+
+ struct sec_change_queue *queue_free = queue;
+
+ queue = queue->next;
+ free (queue_free);
+ }
+}
/* Determine and set the size of the stub section for a final link.
@@ -5194,7 +5495,7 @@ elfNN_aarch64_section_map_add (bfd *abfd, asection *sec, char type,
/* Initialise maps of insn/data for input BFDs. */
void
-bfd_elfNN_aarch64_init_maps (bfd *abfd)
+bfd_elfNN_aarch64_init_maps (bfd *abfd, struct bfd_link_info *info)
{
Elf_Internal_Sym *isymbuf;
Elf_Internal_Shdr *hdr;
@@ -5222,6 +5523,8 @@ bfd_elfNN_aarch64_init_maps (bfd *abfd)
if (isymbuf == NULL)
return;
+ struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table ((info));
+
for (i = 0; i < localsyms; i++)
{
Elf_Internal_Sym *isym = &isymbuf[i];
@@ -5236,7 +5539,12 @@ bfd_elfNN_aarch64_init_maps (bfd *abfd)
if (bfd_is_aarch64_special_symbol_name
(name, BFD_AARCH64_SPECIAL_SYM_TYPE_MAP))
- elfNN_aarch64_section_map_add (abfd, sec, name[1], isym->st_value);
+ {
+ elfNN_aarch64_section_map_add (abfd, sec, name[1],
+ isym->st_value);
+ if (!htab->c64_output && name[1] == 'c')
+ htab->c64_output = TRUE;
+ }
}
}
elf_aarch64_tdata (abfd)->secmaps_initialised = TRUE;
@@ -5923,53 +6231,6 @@ aarch64_relocation_aginst_gp_p (bfd_reloc_code_real_type reloc)
|| reloc == BFD_RELOC_AARCH64_MOVW_GOTOFF_G1);
}
-/* Capability format functions. */
-
-static unsigned
-exponent (uint64_t len)
-{
-#define CAP_MAX_EXPONENT 50
- /* Size is a 65 bit value, so there's an implicit 0 MSB. */
- unsigned zeroes = __builtin_clzl (len) + 1;
-
- /* All bits up to and including CAP_MW - 2 are zero. */
- if (CAP_MAX_EXPONENT < zeroes)
- return (unsigned) -1;
- else
- return CAP_MAX_EXPONENT - zeroes;
-#undef CAP_MAX_EXPONENT
-}
-
-#define ONES(x) ((1ULL << ((x) + 1)) - 1)
-#define ALIGN_UP(x, a) (((x) + ONES (a)) & (~ONES (a)))
-
-static bfd_boolean
-c64_valid_cap_range (bfd_vma *basep, bfd_vma *limitp)
-{
- bfd_vma base = *basep, size = *limitp - *basep;
-
- unsigned e, old_e;
-
- if ((e = exponent (size)) == (unsigned) -1)
- return TRUE;
-
- size = ALIGN_UP (size, e + 3);
- old_e = e;
- e = exponent (size);
- if (old_e != e)
- size = ALIGN_UP (size, e + 3);
-
- base = ALIGN_UP (base, e + 3);
-
- if (base == *basep && *limitp == base + size)
- return TRUE;
-
- *basep = base;
- *limitp = base + size;
- return FALSE;
-}
-
-
/* Build capability meta data, i.e. size and permissions for a capability. */
static bfd_vma
@@ -5993,13 +6254,6 @@ cap_meta (size_t size, const asection *sec)
abort ();
}
-static bfd_boolean
-section_start_symbol (bfd *abfd ATTRIBUTE_UNUSED, asection *section,
- void *valp)
-{
- return section->vma == *(bfd_vma *)valp;
-}
-
static bfd_reloc_status_type
c64_fixup_frag (bfd *input_bfd, struct bfd_link_info *info,
bfd_reloc_code_real_type bfd_r_type, Elf_Internal_Sym *sym,
@@ -8477,7 +8731,7 @@ elfNN_aarch64_check_relocs (bfd *abfd, struct bfd_link_info *info,
symtab_hdr = &elf_symtab_hdr (abfd);
sym_hashes = elf_sym_hashes (abfd);
- bfd_elfNN_aarch64_init_maps (abfd);
+ bfd_elfNN_aarch64_init_maps (abfd, info);
rel_end = relocs + sec->reloc_count;
for (rel = relocs; rel < rel_end; rel++)
@@ -9904,7 +10158,7 @@ elfNN_aarch64_size_dynamic_sections (bfd *output_bfd,
{
if (!is_aarch64_elf (ibfd))
continue;
- bfd_elfNN_aarch64_init_maps (ibfd);
+ bfd_elfNN_aarch64_init_maps (ibfd, info);
}
/* We now have determined the sizes of the various dynamic sections.
diff --git a/bfd/elfxx-aarch64.h b/bfd/elfxx-aarch64.h
index 0bb78aa..c9fc01b 100644
--- a/bfd/elfxx-aarch64.h
+++ b/bfd/elfxx-aarch64.h
@@ -19,10 +19,10 @@
see <http://www.gnu.org/licenses/>. */
extern void bfd_elf64_aarch64_init_maps
- (bfd *);
+ (bfd *, struct bfd_link_info *);
extern void bfd_elf32_aarch64_init_maps
- (bfd *);
+ (bfd *, struct bfd_link_info *);
/* Types of PLTs based on the level of security. This would be a
bit-mask to denote which of the combinations of security features
@@ -82,6 +82,11 @@ extern bfd_boolean elf64_aarch64_size_stubs
(bfd *, bfd *, struct bfd_link_info *, bfd_signed_vma,
struct bfd_section * (*) (const char *, struct bfd_section *),
void (*) (void));
+
+extern void elf64_c64_resize_sections (bfd *, struct bfd_link_info *,
+ void (*) (asection *, bfd_vma),
+ void (*) (void));
+
extern bfd_boolean elf64_aarch64_build_stubs
(struct bfd_link_info *);
/* AArch64 stub generation support for ELF32. Called from the linker. */
@@ -96,6 +101,10 @@ extern bfd_boolean elf32_aarch64_size_stubs
extern bfd_boolean elf32_aarch64_build_stubs
(struct bfd_link_info *);
+extern void elf32_c64_resize_sections (bfd *, struct bfd_link_info *,
+ void (*) (asection *, bfd_vma),
+ void (*) (void));
+
/* Take the PAGE component of an address or offset. */
#define PG(x) ((x) & ~ (bfd_vma) 0xfff)
#define PG_OFFSET(x) ((x) & (bfd_vma) 0xfff)