aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlan Modra <amodra@gmail.com>2016-08-30 21:02:58 +0930
committerAlan Modra <amodra@gmail.com>2016-08-31 19:22:31 +0930
commit09f92717713cfc7595b29caa3f017f88e7f7e279 (patch)
treef6de0878a217d7f92872f190b467a0810d7a02ae
parent3e8c34ea9d6ede278cc1a49fab7ccac7971aa32f (diff)
downloadgdb-09f92717713cfc7595b29caa3f017f88e7f7e279.zip
gdb-09f92717713cfc7595b29caa3f017f88e7f7e279.tar.gz
gdb-09f92717713cfc7595b29caa3f017f88e7f7e279.tar.bz2
PowerPC64, correct grouping of stubs for ld.bfd
Like 57f6d32d, this patch ensures that sections containing external conditional branches limit the group size. * elf64-ppc.c (group_sections): Delete stub14_group_size. Instead, track max group size with a new "group_size" var that is reduced by a factor of 1024 from the 24-bit branch size whenever a 14-bit branch is seen.
-rw-r--r--bfd/ChangeLog7
-rw-r--r--bfd/elf64-ppc.c43
2 files changed, 26 insertions, 24 deletions
diff --git a/bfd/ChangeLog b/bfd/ChangeLog
index e1f5c87..59dfb2c 100644
--- a/bfd/ChangeLog
+++ b/bfd/ChangeLog
@@ -1,5 +1,12 @@
2016-08-31 Alan Modra <amodra@gmail.com>
+ * elf64-ppc.c (group_sections): Delete stub14_group_size. Instead,
+ track max group size with a new "group_size" var that is reduced
+ by a factor of 1024 from the 24-bit branch size whenever a 14-bit
+ branch is seen.
+
+2016-08-31 Alan Modra <amodra@gmail.com>
+
* elf32-ppc.c (ppc_elf_section_processing): Delete.
(elf_backend_section_processing): Don't define.
(ppc_elf_modify_segment_map): Set p_flags and mark valid. Don't
diff --git a/bfd/elf64-ppc.c b/bfd/elf64-ppc.c
index a9cedb5..3a9a1cb 100644
--- a/bfd/elf64-ppc.c
+++ b/bfd/elf64-ppc.c
@@ -12049,7 +12049,6 @@ group_sections (struct bfd_link_info *info,
{
struct ppc_link_hash_table *htab;
asection *osec;
- bfd_size_type stub14_group_size;
bfd_boolean suppress_size_errors;
htab = ppc_hash_table (info);
@@ -12057,20 +12056,13 @@ group_sections (struct bfd_link_info *info,
return FALSE;
suppress_size_errors = FALSE;
- stub14_group_size = stub_group_size >> 10;
if (stub_group_size == 1)
{
/* Default values. */
if (stubs_always_before_branch)
- {
- stub_group_size = 0x1e00000;
- stub14_group_size = 0x7800;
- }
+ stub_group_size = 0x1e00000;
else
- {
- stub_group_size = 0x1c00000;
- stub14_group_size = 0x7000;
- }
+ stub_group_size = 0x1c00000;
suppress_size_errors = TRUE;
}
@@ -12090,12 +12082,15 @@ group_sections (struct bfd_link_info *info,
bfd_boolean big_sec;
bfd_vma curr_toc;
struct map_stub *group;
+ bfd_size_type group_size;
curr = tail;
total = tail->size;
- big_sec = total > (ppc64_elf_section_data (tail) != NULL
- && ppc64_elf_section_data (tail)->has_14bit_branch
- ? stub14_group_size : stub_group_size);
+ group_size = (ppc64_elf_section_data (tail) != NULL
+ && ppc64_elf_section_data (tail)->has_14bit_branch
+ ? stub_group_size >> 10 : stub_group_size);
+
+ big_sec = total > group_size;
if (big_sec && !suppress_size_errors)
(*_bfd_error_handler) (_("%B section %A exceeds stub group size"),
tail->owner, tail);
@@ -12105,20 +12100,20 @@ group_sections (struct bfd_link_info *info,
&& ((total += curr->output_offset - prev->output_offset)
< (ppc64_elf_section_data (prev) != NULL
&& ppc64_elf_section_data (prev)->has_14bit_branch
- ? stub14_group_size : stub_group_size))
+ ? (group_size = stub_group_size >> 10) : group_size))
&& htab->sec_info[prev->id].toc_off == curr_toc)
curr = prev;
/* OK, the size from the start of CURR to the end is less
- than stub_group_size and thus can be handled by one stub
+ than group_size and thus can be handled by one stub
section. (or the tail section is itself larger than
- stub_group_size, in which case we may be toast.) We
- should really be keeping track of the total size of stubs
- added here, as stubs contribute to the final output
- section size. That's a little tricky, and this way will
- only break if stubs added make the total size more than
- 2^25, ie. for the default stub_group_size, if stubs total
- more than 2097152 bytes, or nearly 75000 plt call stubs. */
+ group_size, in which case we may be toast.) We should
+ really be keeping track of the total size of stubs added
+ here, as stubs contribute to the final output section
+ size. That's a little tricky, and this way will only
+ break if stubs added make the total size more than 2^25,
+ ie. for the default stub_group_size, if stubs total more
+ than 2097152 bytes, or nearly 75000 plt call stubs. */
group = bfd_alloc (curr->owner, sizeof (*group));
if (group == NULL)
return FALSE;
@@ -12135,7 +12130,7 @@ group_sections (struct bfd_link_info *info,
}
while (tail != curr && (tail = prev) != NULL);
- /* But wait, there's more! Input sections up to stub_group_size
+ /* But wait, there's more! Input sections up to group_size
bytes before the stub section can be handled by it too.
Don't do this if we have a really large section after the
stubs, as adding more stubs increases the chance that
@@ -12147,7 +12142,7 @@ group_sections (struct bfd_link_info *info,
&& ((total += tail->output_offset - prev->output_offset)
< (ppc64_elf_section_data (prev) != NULL
&& ppc64_elf_section_data (prev)->has_14bit_branch
- ? stub14_group_size : stub_group_size))
+ ? (group_size = stub_group_size >> 10) : group_size))
&& htab->sec_info[prev->id].toc_off == curr_toc)
{
tail = prev;