aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorWilco Dijkstra <wilco.dijkstra@arm.com>2025-03-18 12:30:10 +0000
committerWilco Dijkstra <wilco.dijkstra@arm.com>2025-03-18 17:14:34 +0000
commit9f551e28a74941e17ed4572511a2308c03f5a718 (patch)
tree0a914f21d4b1316f3559a9ca854d0954c521f859
parentdd003db8d127b0a8ebabc6a972501a7d881073ea (diff)
downloadglibc-9f551e28a74941e17ed4572511a2308c03f5a718.zip
glibc-9f551e28a74941e17ed4572511a2308c03f5a718.tar.gz
glibc-9f551e28a74941e17ed4572511a2308c03f5a718.tar.bz2
malloc: Improve arena_for_chunk()
Change heap_max_size() to improve performance of arena_for_chunk(). Instead of a complex calculation, using a simple mask operation to get the arena base pointer. HEAP_MAX_SIZE should be larger than the huge page size, otherwise heaps will use not huge pages. On AArch64 this removes 6 instructions from arena_for_chunk(), and bench-malloc-thread improves by 1.1% - 1.8%. Reviewed-by: Adhemerval Zanella <adhemerval.zanella@linaro.org>
-rw-r--r--malloc/arena.c13
1 files changed, 7 insertions, 6 deletions
diff --git a/malloc/arena.c b/malloc/arena.c
index 353b634..405ae82 100644
--- a/malloc/arena.c
+++ b/malloc/arena.c
@@ -40,19 +40,20 @@
mmap threshold, so that requests with a size just below that
threshold can be fulfilled without creating too many heaps. */
-/* When huge pages are used to create new arenas, the maximum and minimum
- size are based on the runtime defined huge page size. */
+/* HEAP_MAX_SIZE should be larger than the huge page size, otherwise heaps will
+ use not huge pages. It is a constant so arena_for_chunk() is efficient. */
static inline size_t
heap_min_size (void)
{
- return mp_.hp_pagesize == 0 ? HEAP_MIN_SIZE : mp_.hp_pagesize;
+ return mp_.hp_pagesize == 0 || mp_.hp_pagesize > HEAP_MAX_SIZE
+ ? HEAP_MIN_SIZE : mp_.hp_pagesize;
}
static inline size_t
heap_max_size (void)
{
- return mp_.hp_pagesize == 0 ? HEAP_MAX_SIZE : mp_.hp_pagesize * 4;
+ return HEAP_MAX_SIZE;
}
/***************************************************************************/
@@ -313,7 +314,7 @@ ptmalloc_init (void)
TUNABLE_GET (mxfast, size_t, TUNABLE_CALLBACK (set_mxfast));
TUNABLE_GET (hugetlb, size_t, TUNABLE_CALLBACK (set_hugetlb));
- if (mp_.hp_pagesize > 0)
+ if (mp_.hp_pagesize > 0 && mp_.hp_pagesize <= heap_max_size ())
{
/* Force mmap for main arena instead of sbrk, so MAP_HUGETLB is always
tried. Also tune the mmap threshold, so allocation smaller than the
@@ -460,7 +461,7 @@ alloc_new_heap (size_t size, size_t top_pad, size_t pagesize,
static heap_info *
new_heap (size_t size, size_t top_pad)
{
- if (__glibc_unlikely (mp_.hp_pagesize != 0))
+ if (mp_.hp_pagesize != 0 && mp_.hp_pagesize <= heap_max_size ())
{
heap_info *h = alloc_new_heap (size, top_pad, mp_.hp_pagesize,
mp_.hp_flags);