diff options
author | Szabolcs Nagy <szabolcs.nagy@arm.com> | 2022-09-01 09:45:30 +0100 |
---|---|---|
committer | Szabolcs Nagy <szabolcs.nagy@arm.com> | 2022-10-12 14:22:03 +0100 |
commit | a66d563c9e33cffbf646e1327bdd73423a10ef76 (patch) | |
tree | ed8a20e6a29486ad507e5501759ca77468672214 | |
parent | 74085ebc1f762c9fd70af4b99b5615dc5358dee0 (diff) | |
download | glibc-a66d563c9e33cffbf646e1327bdd73423a10ef76.zip glibc-a66d563c9e33cffbf646e1327bdd73423a10ef76.tar.gz glibc-a66d563c9e33cffbf646e1327bdd73423a10ef76.tar.bz2 |
cheri: elf: Setup per module RX and RW capabilities
The l_map_start and l_rw_start of the ld.so and exe comes from the auxv
since they are normally mapped by the kernel. Some generic code had to
be modified so l_map_start is propagated and not overwritten when it is
recomputed.
The l_rw_range should exclude the relro region, but in libc.so and
ld.so this does not work: symbols are accessed before relro is applied
and then the permission should be writable.
-rw-r--r-- | elf/dl-map-segments.h | 44 | ||||
-rw-r--r-- | elf/rtld.c | 51 |
2 files changed, 95 insertions, 0 deletions
diff --git a/elf/dl-map-segments.h b/elf/dl-map-segments.h index 5439c20..4ba1c71 100644 --- a/elf/dl-map-segments.h +++ b/elf/dl-map-segments.h @@ -18,6 +18,9 @@ <https://www.gnu.org/licenses/>. */ #include <dl-load.h> +#ifdef __CHERI_PURE_CAPABILITY__ +# include <cheri_perms.h> +#endif /* Map a segment and align it properly. */ @@ -79,6 +82,10 @@ _dl_map_segments (struct link_map *l, int fd, struct link_map *loader) { const struct loadcmd *c = loadcmds; +#ifdef __CHERI_PURE_CAPABILITY__ + ElfW(Addr) rw_start = -1; + ElfW(Addr) rw_end = 0; +#endif if (__glibc_likely (type == ET_DYN)) { @@ -129,6 +136,16 @@ _dl_map_segments (struct link_map *l, int fd, #ifdef __CHERI_PURE_CAPABILITY__ else { + /* Need a single capability to cover all load segments. */ + void *p = __mmap ((void *) c->mapstart, maplength, c->prot, + MAP_FIXED|MAP_COPY|MAP_FILE, + fd, c->mapoff); + if (p == MAP_FAILED) + return DL_MAP_SEGMENTS_ERROR_MAP_SEGMENT; + l->l_map_start = (elfptr_t) p; + l->l_map_end = l->l_map_start + maplength; + l->l_contiguous = !has_holes; + /* TODO: l_addr is 0 in an exe, but it should cover the load segments. */ uintptr_t l_addr = 0; unsigned long allocend = ALIGN_UP (loadcmds[nloadcmds - 1].allocend, @@ -136,6 +153,8 @@ _dl_map_segments (struct link_map *l, int fd, asm volatile ("cvtd %0, %x0" : "+r"(l_addr)); asm volatile ("scbnds %0, %0, %x1" : "+r"(l_addr) : "r"(allocend)); l->l_addr = l_addr; + + goto postmap; } #endif @@ -158,6 +177,21 @@ _dl_map_segments (struct link_map *l, int fd, postmap: _dl_postprocess_loadcmd (l, header, c); +#ifdef __CHERI_PURE_CAPABILITY__ + if (c->prot & PROT_WRITE) + { + if (l->l_rw_count >= DL_MAX_RW_COUNT) + return DL_MAP_SEGMENTS_ERROR_MAP_SEGMENT; // TODO: right error code + if (c->mapstart < rw_start) + rw_start = c->mapstart; + if (c->allocend > rw_end) + rw_end = c->allocend; + l->l_rw_range[l->l_rw_count].start = l->l_addr + c->mapstart; + l->l_rw_range[l->l_rw_count].end = l->l_addr + c->allocend; + l->l_rw_count++; + } +#endif + if (c->allocend > c->dataend) { /* Extra zero pages should appear at the end of this segment, @@ -206,6 +240,16 @@ _dl_map_segments (struct link_map *l, int fd, ++c; } +#ifdef __CHERI_PURE_CAPABILITY__ + if (l->l_rw_count > 0) + { + l->l_rw_start = __builtin_cheri_address_set (l->l_map_start, l->l_addr + rw_start); + l->l_rw_start = __builtin_cheri_bounds_set (l->l_rw_start, rw_end - rw_start); + l->l_rw_start = __builtin_cheri_perms_and (l->l_rw_start, CAP_PERM_MASK_RW); + } + l->l_map_start = __builtin_cheri_perms_and (l->l_map_start, CAP_PERM_MASK_RX); +#endif + /* Notify ELF_PREFERRED_ADDRESS that we have to load this one fixed. */ ELF_FIXED_ADDRESS (loader, c->mapstart); @@ -474,10 +474,19 @@ _dl_start_final (void *arg, struct dl_start_final_info *info) sizeof GL(dl_rtld_map).l_info); GL(dl_rtld_map).l_mach = info->l.l_mach; GL(dl_rtld_map).l_relocated = 1; +# ifdef __CHERI_PURE_CAPABILITY__ + GL(dl_rtld_map).l_map_start = info->l.l_map_start; + GL(dl_rtld_map).l_rw_start = info->l.l_rw_start; + GL(dl_rtld_map).l_rw_count = info->l.l_rw_count; + for (int i = 0; i < info->l.l_rw_count; i++) + GL(dl_rtld_map).l_rw_range[i] = info->l.l_rw_range[i]; +# endif #endif _dl_setup_hash (&GL(dl_rtld_map)); GL(dl_rtld_map).l_real = &GL(dl_rtld_map); +#ifndef __CHERI_PURE_CAPABILITY__ GL(dl_rtld_map).l_map_start = (ElfW(Addr)) &__ehdr_start; +#endif GL(dl_rtld_map).l_map_end = (ElfW(Addr)) _end; GL(dl_rtld_map).l_text_end = (ElfW(Addr)) _etext; /* Copy the TLS related data if necessary. */ @@ -543,6 +552,7 @@ _dl_start (void *arg) #endif #ifdef __CHERI_PURE_CAPABILITY__ + elf_machine_rtld_base_setup (&bootstrap_map, arg); bootstrap_map.l_addr = elf_machine_load_address_from_args (arg); bootstrap_map.l_ld = elf_machine_runtime_dynamic (); #else @@ -1130,8 +1140,13 @@ rtld_setup_main_map (struct link_map *main_map) main_map->l_map_end = 0; main_map->l_text_end = 0; +#ifndef __CHERI_PURE_CAPABILITY__ /* Perhaps the executable has no PT_LOAD header entries at all. */ main_map->l_map_start = ~0; +#else + /* May be computed already when exe is loaded by ld.so. */ + main_map->l_rw_count = 0; +#endif /* And it was opened directly. */ ++main_map->l_direct_opencount; main_map->l_contiguous = 1; @@ -1158,6 +1173,10 @@ rtld_setup_main_map (struct link_map *main_map) case PT_PHDR: /* Find out the load address. */ main_map->l_addr = (elfptr_t) phdr - ph->p_vaddr; +#ifdef __CHERI_PURE_CAPABILITY__ + // TODO: we still need laddr + asm volatile ("cvtd %0, %x0" : "+r"(main_map->l_addr)); +#endif break; case PT_DYNAMIC: /* This tells us where to find the dynamic section, @@ -1210,8 +1229,10 @@ rtld_setup_main_map (struct link_map *main_map) /* Remember where the main program starts in memory. */ mapstart = (main_map->l_addr + (ph->p_vaddr & ~(GLRO(dl_pagesize) - 1))); +#ifndef __CHERI_PURE_CAPABILITY__ if (main_map->l_map_start > mapstart) main_map->l_map_start = mapstart; +#endif if (main_map->l_contiguous && expected_load_address != 0 && expected_load_address != mapstart) @@ -1228,6 +1249,15 @@ rtld_setup_main_map (struct link_map *main_map) segment. */ expected_load_address = ((allocend + GLRO(dl_pagesize) - 1) & ~(GLRO(dl_pagesize) - 1)); +#ifdef __CHERI_PURE_CAPABILITY__ + if (ph->p_flags & PF_W) + { + assert (main_map->l_rw_count < DL_MAX_RW_COUNT); + main_map->l_rw_range[main_map->l_rw_count].start = mapstart; + main_map->l_rw_range[main_map->l_rw_count].end = allocend; + main_map->l_rw_count++; + } +#endif } break; @@ -1640,6 +1670,14 @@ dl_main (const ElfW(Phdr) *phdr, case AT_EXECFN: av->a_un.a_val = (uintptr_t) _dl_argv[0]; break; +# ifdef __CHERI_PURE_CAPABILITY__ + case AT_CHERI_EXEC_RX_CAP: + av->a_un.a_val = main_map->l_map_start; + break; + case AT_CHERI_EXEC_RW_CAP: + av->a_un.a_val = main_map->l_rw_start; + break; +# endif } #endif @@ -1683,6 +1721,19 @@ dl_main (const ElfW(Phdr) *phdr, /* We delay initializing the path structure until we got the dynamic information for the program. */ + +#ifdef __CHERI_PURE_CAPABILITY__ + for (ElfW(auxv_t) *av = auxv; av->a_type != AT_NULL; av++) + switch (av->a_type) + { + case AT_CHERI_EXEC_RX_CAP: + main_map->l_map_start = av->a_un.a_val; + break; + case AT_CHERI_EXEC_RW_CAP: + main_map->l_rw_start = av->a_un.a_val; + break; + } +#endif } bool has_interp = rtld_setup_main_map (main_map); |