aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlan Modra <amodra@gmail.com>2010-08-25 06:49:55 +0000
committerAlan Modra <amodra@gmail.com>2010-08-25 06:49:55 +0000
commit8d06853ec69b274b573df0ac2f5c00e2aa8d0daa (patch)
tree3cd06577a76e9d825f28324fd7179934739ba2ef
parentdc5ec521f1916d5951e3287da05a1c007c69bff9 (diff)
downloadbinutils-8d06853ec69b274b573df0ac2f5c00e2aa8d0daa.zip
binutils-8d06853ec69b274b573df0ac2f5c00e2aa8d0daa.tar.gz
binutils-8d06853ec69b274b573df0ac2f5c00e2aa8d0daa.tar.bz2
* elf.c (_bfd_elf_map_sections_to_segments): Don't load program
headers if any loaded section wraps the address space. Simplify ~(m-1) to -m. Use lma rather than vma when determining whether note sections are adjacent.
-rw-r--r--bfd/ChangeLog7
-rw-r--r--bfd/elf.c25
2 files changed, 25 insertions, 7 deletions
diff --git a/bfd/ChangeLog b/bfd/ChangeLog
index 7ae8300..b2608ad 100644
--- a/bfd/ChangeLog
+++ b/bfd/ChangeLog
@@ -1,3 +1,10 @@
+2010-08-25 Alan Modra <amodra@gmail.com>
+
+ * elf.c (_bfd_elf_map_sections_to_segments): Don't load program
+ headers if any loaded section wraps the address space. Simplify
+ ~(m-1) to -m. Use lma rather than vma when determining whether
+ note sections are adjacent.
+
2010-08-22 H.J. Lu <hongjiu.lu@intel.com>
PR ld/11933
diff --git a/bfd/elf.c b/bfd/elf.c
index f9f2dad..9c56e2e 100644
--- a/bfd/elf.c
+++ b/bfd/elf.c
@@ -3624,6 +3624,7 @@ _bfd_elf_map_sections_to_segments (bfd *abfd, struct bfd_link_info *info)
asection *first_tls = NULL;
asection *dynsec, *eh_frame_hdr;
bfd_size_type amt;
+ bfd_vma addr_mask, wrap_to = 0;
/* Select the allocated sections, and sort them. */
@@ -3632,6 +3633,12 @@ _bfd_elf_map_sections_to_segments (bfd *abfd, struct bfd_link_info *info)
if (sections == NULL)
goto error_return;
+ /* Calculate top address, avoiding undefined behaviour of shift
+ left operator when shift count is equal to size of type
+ being shifted. */
+ addr_mask = ((bfd_vma) 1 << (bfd_arch_bits_per_address (abfd) - 1)) - 1;
+ addr_mask = (addr_mask << 1) + 1;
+
i = 0;
for (s = abfd->sections; s != NULL; s = s->next)
{
@@ -3639,6 +3646,9 @@ _bfd_elf_map_sections_to_segments (bfd *abfd, struct bfd_link_info *info)
{
sections[i] = s;
++i;
+ /* A wrapping section potentially clashes with header. */
+ if (((s->lma + s->size) & addr_mask) < (s->lma & addr_mask))
+ wrap_to = (s->lma + s->size) & addr_mask;
}
}
BFD_ASSERT (i <= bfd_count_sections (abfd));
@@ -3708,8 +3718,10 @@ _bfd_elf_map_sections_to_segments (bfd *abfd, struct bfd_link_info *info)
if (phdr_size == (bfd_size_type) -1)
phdr_size = get_program_header_size (abfd, info);
if ((abfd->flags & D_PAGED) == 0
- || sections[0]->lma < phdr_size
- || sections[0]->lma % maxpagesize < phdr_size % maxpagesize)
+ || (sections[0]->lma & addr_mask) < phdr_size
+ || ((sections[0]->lma & addr_mask) % maxpagesize
+ < phdr_size % maxpagesize)
+ || (sections[0]->lma & addr_mask & -maxpagesize) < wrap_to)
phdr_in_segment = FALSE;
}
@@ -3774,9 +3786,8 @@ _bfd_elf_map_sections_to_segments (bfd *abfd, struct bfd_link_info *info)
}
else if (! writable
&& (hdr->flags & SEC_READONLY) == 0
- && (((last_hdr->lma + last_size - 1)
- & ~(maxpagesize - 1))
- != (hdr->lma & ~(maxpagesize - 1))))
+ && (((last_hdr->lma + last_size - 1) & -maxpagesize)
+ != (hdr->lma & -maxpagesize)))
{
/* We don't want to put a writable section in a read only
segment, unless they are on the same page in memory
@@ -3883,8 +3894,8 @@ _bfd_elf_map_sections_to_segments (bfd *abfd, struct bfd_link_info *info)
if (s2->next->alignment_power == 2
&& (s2->next->flags & SEC_LOAD) != 0
&& CONST_STRNEQ (s2->next->name, ".note")
- && align_power (s2->vma + s2->size, 2)
- == s2->next->vma)
+ && align_power (s2->lma + s2->size, 2)
+ == s2->next->lma)
count++;
else
break;