From 9d1ba393ae200bb7a4286997d4f6bddc014616b0 Mon Sep 17 00:00:00 2001 From: Campbell Suter Date: Sun, 5 Dec 2021 12:25:32 +1300 Subject: unix: Fix unprotecting memory across a page boundary Previously, we'd round the address to unprotect to the nearest page but keep the length the same. This meant that if we were hooking a function that started just before the end of a page, it would need both pages to be unprotected but we'd only unprotect the first one. --- subhook_unix.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/subhook_unix.c b/subhook_unix.c index 4d9c03a..697974d 100644 --- a/subhook_unix.c +++ b/subhook_unix.c @@ -39,14 +39,19 @@ int subhook_unprotect(void *address, size_t size) { long pagesize; pagesize = sysconf(_SC_PAGESIZE); - address = (void *)((long)address & ~(pagesize - 1)); + void *aligned_address = (void *)((long)address & ~(pagesize - 1)); - int error = mprotect(address, size, SUBHOOK_CODE_PROTECT_FLAGS); + // Fix up the length - since we rounded the start address off, if a jump is right at the + // end of a page we could need to unprotect both. + void *end = address + size; + size_t new_size = end - aligned_address; + + int error = mprotect(aligned_address, new_size, SUBHOOK_CODE_PROTECT_FLAGS); #ifdef SUBHOOK_APPLE if (-1 == error) { /* If mprotect fails, try to use VM_PROT_COPY with vm_protect. */ - kern_return_t kret = vm_protect(mach_task_self(), (unsigned long)address, size, 0, SUBHOOK_CODE_PROTECT_FLAGS | VM_PROT_COPY); + kern_return_t kret = vm_protect(mach_task_self(), (unsigned long)aligned_address, new_size, 0, SUBHOOK_CODE_PROTECT_FLAGS | VM_PROT_COPY); if (kret != KERN_SUCCESS) { error = -1; -- cgit v1.1