aboutsummaryrefslogtreecommitdiff
path: root/support/next_to_fault.c
diff options
context:
space:
mode:
Diffstat (limited to 'support/next_to_fault.c')
-rw-r--r--support/next_to_fault.c46
1 files changed, 35 insertions, 11 deletions
diff --git a/support/next_to_fault.c b/support/next_to_fault.c
index 44d5477..b60d5a5 100644
--- a/support/next_to_fault.c
+++ b/support/next_to_fault.c
@@ -1,5 +1,5 @@
-/* Memory allocation next to an unmapped page.
- Copyright (C) 2017-2024 Free Software Foundation, Inc.
+/* Memory allocation either before or after an unmapped page.
+ Copyright (C) 2017-2025 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,34 +16,58 @@
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
+#include <stdbool.h>
#include <support/check.h>
#include <support/next_to_fault.h>
#include <support/xunistd.h>
#include <sys/mman.h>
#include <sys/param.h>
-struct support_next_to_fault
-support_next_to_fault_allocate (size_t size)
+static struct support_next_to_fault
+support_next_to_fault_allocate_any (size_t size, bool fault_after_alloc)
{
long page_size = sysconf (_SC_PAGE_SIZE);
+ long protect_offset = 0;
+ long buffer_offset = page_size;
+
TEST_VERIFY_EXIT (page_size > 0);
struct support_next_to_fault result;
result.region_size = roundup (size, page_size) + page_size;
if (size + page_size <= size || result.region_size <= size)
- FAIL_EXIT1 ("support_next_to_fault_allocate (%zu): overflow", size);
+ FAIL_EXIT1 ("%s (%zu): overflow", __func__, size);
result.region_start
= xmmap (NULL, result.region_size, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1);
- /* Unmap the page after the allocation. */
- xmprotect (result.region_start + (result.region_size - page_size),
- page_size, PROT_NONE);
- /* Align the allocation within the region so that it ends just
- before the PROT_NONE page. */
- result.buffer = result.region_start + result.region_size - page_size - size;
+
+ if (fault_after_alloc)
+ {
+ protect_offset = result.region_size - page_size;
+ buffer_offset = protect_offset - size;
+ }
+
+ /* Unmap the page before or after the allocation. */
+ xmprotect (result.region_start + protect_offset, page_size, PROT_NONE);
+ /* Align the allocation within the region so that it starts after or ends
+ just before the PROT_NONE page. */
+ result.buffer = result.region_start + buffer_offset;
result.length = size;
return result;
}
+/* Unmapped a page after the buffer */
+struct support_next_to_fault
+support_next_to_fault_allocate (size_t size)
+{
+ return support_next_to_fault_allocate_any (size, true);
+}
+
+/* Unmapped a page before the buffer */
+struct support_next_to_fault
+support_next_to_fault_allocate_before (size_t size)
+{
+ return support_next_to_fault_allocate_any (size, false);
+}
+
void
support_next_to_fault_free (struct support_next_to_fault *ntf)
{