diff options
author | Wilco Dijkstra <wilco.dijkstra@arm.com> | 2025-08-27 13:04:18 +0000 |
---|---|---|
committer | Wilco Dijkstra <wilco.dijkstra@arm.com> | 2025-08-27 13:07:09 +0000 |
commit | 921e251e8f364d00fc753274095007275381ae65 (patch) | |
tree | 8dc46c3b8dccb669cdfceb7284991d131d180c3e | |
parent | 614cfd0f8a2820aed54f9745077c7da0e6643bac (diff) | |
download | glibc-921e251e8f364d00fc753274095007275381ae65.zip glibc-921e251e8f364d00fc753274095007275381ae65.tar.gz glibc-921e251e8f364d00fc753274095007275381ae65.tar.bz2 |
malloc: Support hugepages in mremap_chunk
Add mremap_chunk support for mmap()ed chunks using hugepages by accounting for
their alignment, to prevent the mremap call failing in most cases where the
size passed is not a hugepage size multiple. It also improves robustness for
reallocating hugepages since mremap is much less likely to fail, so running
out of memory when reallocating a larger size and having to copy the old
contents after mremap fails is also less likely.
To track whether an mmap()ed chunk uses hugepages, have a flag in the lowest
bit of the mchunk_prev_size field which is set after a call to sysmalloc_mmap,
and accessed later in mremap_chunk. Create macros for getting and setting this
bit, and for mapping the bit off when accessing the field for mmap()ed chunks.
Since the alignment cannot be lower than 8 bytes, this flag cannot affect the
alignment data.
Add malloc/tst-tcfree4-malloc-check to the tests-exclude-malloc-check list as
malloc-check prevents the tcache from being used to store chunks. This test
caused failures due to a bug in mem2chunk_check to be fixed in a later patch.
Reviewed-by: Adhemerval Zanella <adhemerval.zanella@linaro.org>
-rw-r--r-- | malloc/Makefile | 1 | ||||
-rw-r--r-- | malloc/malloc.c | 39 |
2 files changed, 35 insertions, 5 deletions
diff --git a/malloc/Makefile b/malloc/Makefile index 83f6c87..a9a0e87 100644 --- a/malloc/Makefile +++ b/malloc/Makefile @@ -114,6 +114,7 @@ tests-exclude-malloc-check = \ tst-memalign-3 \ tst-mxfast \ tst-safe-linking \ + tst-tcfree4 \ # tests-exclude-malloc-check # Run all tests with MALLOC_CHECK_=3 diff --git a/malloc/malloc.c b/malloc/malloc.c index f1cf392..6c02af5 100644 --- a/malloc/malloc.c +++ b/malloc/malloc.c @@ -1477,18 +1477,42 @@ tag_new_usable (void *ptr) return ptr; } +/* HP page used for an mmap()'ed chunk. */ +#define MMAP_HP 0x1 + +/* Check for HP usage from an mmap()'ed chunk. */ +static __always_inline bool +mmap_is_hp (mchunkptr p) +{ + return prev_size (p) & MMAP_HP; +} + +/* Set HP advised field for an mmap()'ed chunk. */ +static __always_inline void +set_mmap_is_hp (mchunkptr p) +{ + prev_size (p) |= MMAP_HP; +} + +/* Get an mmap()ed chunk's offset, ignoring huge page bits. */ +static __always_inline size_t +prev_size_mmap (mchunkptr p) +{ + return prev_size (p) & ~MMAP_HP; +} + /* Return pointer to mmap base from a chunk with IS_MMAPPED set. */ static __always_inline void * mmap_base (mchunkptr p) { - return (char *) p - prev_size (p); + return (char *) p - prev_size_mmap (p); } /* Return total mmap size of a chunk with IS_MMAPPED set. */ static __always_inline size_t mmap_size (mchunkptr p) { - return prev_size (p) + chunksize (p) + CHUNK_HDR_SZ; + return prev_size_mmap (p) + chunksize (p) + CHUNK_HDR_SZ; } /* @@ -2443,6 +2467,11 @@ sysmalloc_mmap (INTERNAL_SIZE_T nb, size_t pagesize, int extra_flags) set_prev_size (p, padding); set_head (p, (size - padding - CHUNK_HDR_SZ) | IS_MMAPPED); + /* Must also check whether huge pages were used in the mmap call + and this is not the fallback call after using huge pages failed */ + if (__glibc_unlikely (extra_flags & mp_.hp_flags)) + set_mmap_is_hp (p); + /* update statistics */ int new = atomic_fetch_add_relaxed (&mp_.n_mmaps, 1) + 1; atomic_max (&mp_.max_n_mmaps, new); @@ -3020,8 +3049,8 @@ munmap_chunk (mchunkptr p) static mchunkptr mremap_chunk (mchunkptr p, size_t new_size) { - size_t pagesize = GLRO (dl_pagesize); - INTERNAL_SIZE_T offset = prev_size (p); + size_t pagesize = mmap_is_hp (p) ? mp_.hp_pagesize : GLRO (dl_pagesize); + INTERNAL_SIZE_T offset = prev_size_mmap (p); INTERNAL_SIZE_T size = chunksize (p); char *cp; @@ -3053,7 +3082,7 @@ mremap_chunk (mchunkptr p, size_t new_size) assert (!misaligned_chunk (p)); - assert (prev_size (p) == offset); + assert (prev_size_mmap (p) == offset); set_head (p, (new_size - offset - CHUNK_HDR_SZ) | IS_MMAPPED); INTERNAL_SIZE_T new; |