aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSzabolcs Nagy <szabolcs.nagy@arm.com>2022-04-07 08:43:00 +0100
committerSzabolcs Nagy <szabolcs.nagy@arm.com>2022-10-27 14:46:53 +0100
commit5eedf66625842a56c0ed7e16a1f79fda4b52b425 (patch)
tree360f525c980d5410819e8580cff19cf7c6f8608e
parent50bbdb94d32a75a577f393bf58cbc14f67d2fbea (diff)
downloadglibc-5eedf66625842a56c0ed7e16a1f79fda4b52b425.zip
glibc-5eedf66625842a56c0ed7e16a1f79fda4b52b425.tar.gz
glibc-5eedf66625842a56c0ed7e16a1f79fda4b52b425.tar.bz2
cheri: elf: Setup per module RX and RW capabilities
_dl_map_segments must use capabilities, this required changes beyond the obvious elfptr_t changes: - Ensure map_end is derived from map_start, - Use strict mmap bounds with MAP_FIXED: c->mapend is aligned up to pagesize which may be out of bounds of l_map_start (covering the load segments, but bounds are not aligned up), so use c->dataend instead. Propagate l_map_start and l_rw_start capabilities of ld.so and exe that come from auxv, and ensure they are not recomputed incorrectly by ld.so. The l_rw_range should exclude the relro region, but in libc.so and ld.so this does not work: symbols are accessed before relro is applied and then the permission should be writable.
-rw-r--r--elf/dl-map-segments.h72
-rw-r--r--elf/rtld.c51
2 files changed, 111 insertions, 12 deletions
diff --git a/elf/dl-map-segments.h b/elf/dl-map-segments.h
index 024175b..ce51585 100644
--- a/elf/dl-map-segments.h
+++ b/elf/dl-map-segments.h
@@ -18,15 +18,18 @@
<https://www.gnu.org/licenses/>. */
#include <dl-load.h>
+#ifdef __CHERI_PURE_CAPABILITY__
+# include <cheri_perms.h>
+#endif
/* Map a segment and align it properly. */
-static __always_inline ElfW(Addr)
+static __always_inline elfptr_t
_dl_map_segment (const struct loadcmd *c, ElfW(Addr) mappref,
const size_t maplength, int fd)
{
if (__glibc_likely (c->mapalign <= GLRO(dl_pagesize)))
- return (ElfW(Addr)) __mmap ((void *) mappref, maplength, c->prot,
+ return (elfptr_t) __mmap ((void *) mappref, maplength, c->prot,
MAP_COPY|MAP_FILE, fd, c->mapoff);
/* If the segment alignment > the page size, allocate enough space to
@@ -34,15 +37,15 @@ _dl_map_segment (const struct loadcmd *c, ElfW(Addr) mappref,
ElfW(Addr) maplen = (maplength >= c->mapalign
? (maplength + c->mapalign)
: (2 * c->mapalign));
- ElfW(Addr) map_start = (ElfW(Addr)) __mmap ((void *) mappref, maplen,
+ elfptr_t map_start = (elfptr_t) __mmap ((void *) mappref, maplen,
PROT_NONE,
MAP_ANONYMOUS|MAP_PRIVATE,
-1, 0);
if (__glibc_unlikely ((void *) map_start == MAP_FAILED))
return map_start;
- ElfW(Addr) map_start_aligned = ALIGN_UP (map_start, c->mapalign);
- map_start_aligned = (ElfW(Addr)) __mmap ((void *) map_start_aligned,
+ elfptr_t map_start_aligned = ALIGN_UP (map_start, c->mapalign);
+ map_start_aligned = (elfptr_t) __mmap ((void *) map_start_aligned,
maplength, c->prot,
MAP_COPY|MAP_FILE|MAP_FIXED,
fd, c->mapoff);
@@ -54,7 +57,7 @@ _dl_map_segment (const struct loadcmd *c, ElfW(Addr) mappref,
ElfW(Addr) delta = map_start_aligned - map_start;
if (delta)
__munmap ((void *) map_start, delta);
- ElfW(Addr) map_end = map_start_aligned + maplength;
+ elfptr_t map_end = map_start + (map_start_aligned - map_start) + maplength;
map_end = ALIGN_UP (map_end, GLRO(dl_pagesize));
delta = map_start + maplen - map_end;
if (delta)
@@ -79,6 +82,10 @@ _dl_map_segments (struct link_map *l, int fd,
struct link_map *loader)
{
const struct loadcmd *c = loadcmds;
+#ifdef __CHERI_PURE_CAPABILITY__
+ ElfW(Addr) rw_start = -1;
+ ElfW(Addr) rw_end = 0;
+#endif
if (__glibc_likely (type == ET_DYN))
{
@@ -116,7 +123,7 @@ _dl_map_segments (struct link_map *l, int fd,
c->mapend))
return N_("ELF load command address/offset not page-aligned");
if (__glibc_unlikely
- (__mprotect ((caddr_t) (l->l_addr + c->mapend),
+ (__mprotect ((caddr_t) dl_rx_ptr (l, c->mapend),
loadcmds[nloadcmds - 1].mapstart - c->mapend,
PROT_NONE) < 0))
return DL_MAP_SEGMENTS_ERROR_MPROTECT;
@@ -126,6 +133,22 @@ _dl_map_segments (struct link_map *l, int fd,
goto postmap;
}
+#ifdef __CHERI_PURE_CAPABILITY__
+ else
+ {
+ /* Need a single capability to cover all load segments. */
+ void *p = __mmap ((void *) c->mapstart, maplength, c->prot,
+ MAP_FIXED|MAP_COPY|MAP_FILE,
+ fd, c->mapoff);
+ if (p == MAP_FAILED)
+ return DL_MAP_SEGMENTS_ERROR_MAP_SEGMENT;
+ l->l_map_start = (elfptr_t) p;
+ l->l_map_end = l->l_map_start + maplength;
+ l->l_contiguous = !has_holes;
+
+ goto postmap;
+ }
+#endif
/* Remember which part of the address space this object uses. */
l->l_map_start = c->mapstart + l->l_addr;
@@ -134,10 +157,10 @@ _dl_map_segments (struct link_map *l, int fd,
while (c < &loadcmds[nloadcmds])
{
- if (c->mapend > c->mapstart
+ if (c->dataend > c->mapstart
/* Map the segment contents from the file. */
- && (__mmap ((void *) (l->l_addr + c->mapstart),
- c->mapend - c->mapstart, c->prot,
+ && (__mmap ((void *) dl_rx_ptr (l, c->mapstart),
+ c->dataend - c->mapstart, c->prot,
MAP_FIXED|MAP_COPY|MAP_FILE,
fd, c->mapoff)
== MAP_FAILED))
@@ -146,13 +169,28 @@ _dl_map_segments (struct link_map *l, int fd,
postmap:
_dl_postprocess_loadcmd (l, header, c);
+#ifdef __CHERI_PURE_CAPABILITY__
+ if (c->prot & PROT_WRITE)
+ {
+ if (l->l_rw_count >= DL_MAX_RW_COUNT)
+ return DL_MAP_SEGMENTS_ERROR_MAP_SEGMENT; // TODO: right error code
+ if (c->mapstart < rw_start)
+ rw_start = c->mapstart;
+ if (c->allocend > rw_end)
+ rw_end = c->allocend;
+ l->l_rw_range[l->l_rw_count].start = l->l_addr + c->mapstart;
+ l->l_rw_range[l->l_rw_count].end = l->l_addr + c->allocend;
+ l->l_rw_count++;
+ }
+#endif
+
if (c->allocend > c->dataend)
{
/* Extra zero pages should appear at the end of this segment,
after the data mapped from the file. */
- ElfW(Addr) zero, zeroend, zeropage;
+ elfptr_t zero, zeroend, zeropage;
- zero = l->l_addr + c->dataend;
+ zero = dl_rx_ptr (l, c->dataend);
zeroend = l->l_addr + c->allocend;
zeropage = ((zero + GLRO(dl_pagesize) - 1)
& ~(GLRO(dl_pagesize) - 1));
@@ -194,6 +232,16 @@ _dl_map_segments (struct link_map *l, int fd,
++c;
}
+#ifdef __CHERI_PURE_CAPABILITY__
+ if (l->l_rw_count > 0)
+ {
+ l->l_rw_start = __builtin_cheri_address_set (l->l_map_start, l->l_addr + rw_start);
+ l->l_rw_start = __builtin_cheri_bounds_set (l->l_rw_start, rw_end - rw_start);
+ l->l_rw_start = __builtin_cheri_perms_and (l->l_rw_start, CAP_PERM_MASK_RW);
+ }
+ l->l_map_start = __builtin_cheri_perms_and (l->l_map_start, CAP_PERM_MASK_RX);
+#endif
+
/* Notify ELF_PREFERRED_ADDRESS that we have to load this one
fixed. */
ELF_FIXED_ADDRESS (loader, c->mapstart);
diff --git a/elf/rtld.c b/elf/rtld.c
index 205df43..26af993 100644
--- a/elf/rtld.c
+++ b/elf/rtld.c
@@ -474,10 +474,19 @@ _dl_start_final (void *arg, struct dl_start_final_info *info)
sizeof GL(dl_rtld_map).l_info);
GL(dl_rtld_map).l_mach = info->l.l_mach;
GL(dl_rtld_map).l_relocated = 1;
+# ifdef __CHERI_PURE_CAPABILITY__
+ GL(dl_rtld_map).l_map_start = info->l.l_map_start;
+ GL(dl_rtld_map).l_rw_start = info->l.l_rw_start;
+ GL(dl_rtld_map).l_rw_count = info->l.l_rw_count;
+ for (int i = 0; i < info->l.l_rw_count; i++)
+ GL(dl_rtld_map).l_rw_range[i] = info->l.l_rw_range[i];
+# endif
#endif
_dl_setup_hash (&GL(dl_rtld_map));
GL(dl_rtld_map).l_real = &GL(dl_rtld_map);
+#ifndef __CHERI_PURE_CAPABILITY__
GL(dl_rtld_map).l_map_start = (ElfW(Addr)) &__ehdr_start;
+#endif
GL(dl_rtld_map).l_map_end = (ElfW(Addr)) _end;
GL(dl_rtld_map).l_text_end = (ElfW(Addr)) _etext;
/* Copy the TLS related data if necessary. */
@@ -542,11 +551,16 @@ _dl_start (void *arg)
# endif
#endif
+#ifdef __CHERI_PURE_CAPABILITY__
+ elf_machine_rtld_base_setup (&bootstrap_map, arg);
+ bootstrap_map.l_ld = elf_machine_runtime_dynamic ();
+#else
/* Figure out the run-time load address of the dynamic linker itself. */
bootstrap_map.l_addr = elf_machine_load_address ();
/* Read our own dynamic section and fill in the info array. */
bootstrap_map.l_ld = (void *) bootstrap_map.l_addr + elf_machine_dynamic ();
+#endif
bootstrap_map.l_ld_readonly = DL_RO_DYN_SECTION;
elf_get_dynamic_info (&bootstrap_map, true, false);
@@ -1125,8 +1139,13 @@ rtld_setup_main_map (struct link_map *main_map)
main_map->l_map_end = 0;
main_map->l_text_end = 0;
+#ifndef __CHERI_PURE_CAPABILITY__
/* Perhaps the executable has no PT_LOAD header entries at all. */
main_map->l_map_start = ~0;
+#else
+ /* May be computed already when exe is loaded by ld.so. */
+ main_map->l_rw_count = 0;
+#endif
/* And it was opened directly. */
++main_map->l_direct_opencount;
main_map->l_contiguous = 1;
@@ -1205,8 +1224,10 @@ rtld_setup_main_map (struct link_map *main_map)
/* Remember where the main program starts in memory. */
mapstart = (main_map->l_addr
+ (ph->p_vaddr & ~(GLRO(dl_pagesize) - 1)));
+#ifndef __CHERI_PURE_CAPABILITY__
if (main_map->l_map_start > mapstart)
main_map->l_map_start = mapstart;
+#endif
if (main_map->l_contiguous && expected_load_address != 0
&& expected_load_address != mapstart)
@@ -1223,6 +1244,15 @@ rtld_setup_main_map (struct link_map *main_map)
segment. */
expected_load_address = ((allocend + GLRO(dl_pagesize) - 1)
& ~(GLRO(dl_pagesize) - 1));
+#ifdef __CHERI_PURE_CAPABILITY__
+ if (ph->p_flags & PF_W)
+ {
+ assert (main_map->l_rw_count < DL_MAX_RW_COUNT);
+ main_map->l_rw_range[main_map->l_rw_count].start = mapstart;
+ main_map->l_rw_range[main_map->l_rw_count].end = allocend;
+ main_map->l_rw_count++;
+ }
+#endif
}
break;
@@ -1635,6 +1665,14 @@ dl_main (const ElfW(Phdr) *phdr,
case AT_EXECFN:
av->a_un.a_val = (uintptr_t) _dl_argv[0];
break;
+# ifdef __CHERI_PURE_CAPABILITY__
+ case AT_CHERI_EXEC_RX_CAP:
+ av->a_un.a_val = main_map->l_map_start;
+ break;
+ case AT_CHERI_EXEC_RW_CAP:
+ av->a_un.a_val = main_map->l_rw_start;
+ break;
+# endif
}
#endif
@@ -1678,6 +1716,19 @@ dl_main (const ElfW(Phdr) *phdr,
/* We delay initializing the path structure until we got the dynamic
information for the program. */
+
+#ifdef __CHERI_PURE_CAPABILITY__
+ for (ElfW(auxv_t) *av = auxv; av->a_type != AT_NULL; av++)
+ switch (av->a_type)
+ {
+ case AT_CHERI_EXEC_RX_CAP:
+ main_map->l_map_start = av->a_un.a_val;
+ break;
+ case AT_CHERI_EXEC_RW_CAP:
+ main_map->l_rw_start = av->a_un.a_val;
+ break;
+ }
+#endif
}
bool has_interp = rtld_setup_main_map (main_map);