aboutsummaryrefslogtreecommitdiff
path: root/malloc/arena.c
diff options
context:
space:
mode:
Diffstat (limited to 'malloc/arena.c')
-rw-r--r--malloc/arena.c62
1 files changed, 23 insertions, 39 deletions
diff --git a/malloc/arena.c b/malloc/arena.c
index cfb1ff8..90c526f 100644
--- a/malloc/arena.c
+++ b/malloc/arena.c
@@ -1,5 +1,5 @@
/* Malloc implementation for multiple threads without lock contention.
- Copyright (C) 2001-2024 Free Software Foundation, Inc.
+ Copyright (C) 2001-2025 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -40,19 +40,20 @@
mmap threshold, so that requests with a size just below that
threshold can be fulfilled without creating too many heaps. */
-/* When huge pages are used to create new arenas, the maximum and minimum
- size are based on the runtime defined huge page size. */
+/* HEAP_MAX_SIZE should be larger than the huge page size, otherwise heaps will
+ use not huge pages. It is a constant so arena_for_chunk() is efficient. */
-static inline size_t
+static __always_inline size_t
heap_min_size (void)
{
- return mp_.hp_pagesize == 0 ? HEAP_MIN_SIZE : mp_.hp_pagesize;
+ return mp_.hp_pagesize == 0 || mp_.hp_pagesize > HEAP_MAX_SIZE
+ ? HEAP_MIN_SIZE : mp_.hp_pagesize;
}
-static inline size_t
+static __always_inline size_t
heap_max_size (void)
{
- return mp_.hp_pagesize == 0 ? HEAP_MAX_SIZE : mp_.hp_pagesize * 4;
+ return HEAP_MAX_SIZE;
}
/***************************************************************************/
@@ -112,9 +113,6 @@ static mstate free_list;
acquired. */
__libc_lock_define_initialized (static, list_lock);
-/* Already initialized? */
-static bool __malloc_initialized = false;
-
/**************************************************************************/
@@ -140,14 +138,14 @@ static bool __malloc_initialized = false;
/* find the heap and corresponding arena for a given ptr */
-static inline heap_info *
+static __always_inline heap_info *
heap_for_ptr (void *ptr)
{
size_t max_size = heap_max_size ();
return PTR_ALIGN_DOWN (ptr, max_size);
}
-static inline struct malloc_state *
+static __always_inline struct malloc_state *
arena_for_chunk (mchunkptr ptr)
{
return chunk_main_arena (ptr) ? &main_arena : heap_for_ptr (ptr)->ar_ptr;
@@ -167,9 +165,6 @@ arena_for_chunk (mchunkptr ptr)
void
__malloc_fork_lock_parent (void)
{
- if (!__malloc_initialized)
- return;
-
/* We do not acquire free_list_lock here because we completely
reconstruct free_list in __malloc_fork_unlock_child. */
@@ -187,9 +182,6 @@ __malloc_fork_lock_parent (void)
void
__malloc_fork_unlock_parent (void)
{
- if (!__malloc_initialized)
- return;
-
for (mstate ar_ptr = &main_arena;; )
{
__libc_lock_unlock (ar_ptr->mutex);
@@ -203,9 +195,6 @@ __malloc_fork_unlock_parent (void)
void
__malloc_fork_unlock_child (void)
{
- if (!__malloc_initialized)
- return;
-
/* Push all arenas to the free list, except thread_arena, which is
attached to the current thread. */
__libc_lock_init (free_list_lock);
@@ -231,8 +220,8 @@ __malloc_fork_unlock_child (void)
}
#define TUNABLE_CALLBACK_FNDECL(__name, __type) \
-static inline int do_ ## __name (__type value); \
-static void \
+static __always_inline int do_ ## __name (__type value); \
+static void \
TUNABLE_CALLBACK (__name) (tunable_val_t *valp) \
{ \
__type value = (__type) (valp)->numval; \
@@ -258,14 +247,9 @@ TUNABLE_CALLBACK_FNDECL (set_hugetlb, size_t)
static void tcache_key_initialize (void);
#endif
-static void
-ptmalloc_init (void)
+void
+__ptmalloc_init (void)
{
- if (__malloc_initialized)
- return;
-
- __malloc_initialized = true;
-
#if USE_TCACHE
tcache_key_initialize ();
#endif
@@ -313,7 +297,7 @@ ptmalloc_init (void)
TUNABLE_GET (mxfast, size_t, TUNABLE_CALLBACK (set_mxfast));
TUNABLE_GET (hugetlb, size_t, TUNABLE_CALLBACK (set_hugetlb));
- if (mp_.hp_pagesize > 0)
+ if (mp_.hp_pagesize > 0 && mp_.hp_pagesize <= heap_max_size ())
{
/* Force mmap for main arena instead of sbrk, so MAP_HUGETLB is always
tried. Also tune the mmap threshold, so allocation smaller than the
@@ -389,7 +373,7 @@ alloc_new_heap (size_t size, size_t top_pad, size_t pagesize,
else if (size + top_pad <= max_size)
size += top_pad;
else if (size > max_size)
- return 0;
+ return NULL;
else
size = max_size;
size = ALIGN_UP (size, pagesize);
@@ -411,7 +395,7 @@ alloc_new_heap (size_t size, size_t top_pad, size_t pagesize,
}
if (p2 == MAP_FAILED)
{
- p1 = (char *) MMAP (0, max_size << 1, PROT_NONE, mmap_flags);
+ p1 = (char *) MMAP (NULL, max_size << 1, PROT_NONE, mmap_flags);
if (p1 != MAP_FAILED)
{
p2 = (char *) (((uintptr_t) p1 + (max_size - 1))
@@ -427,21 +411,21 @@ alloc_new_heap (size_t size, size_t top_pad, size_t pagesize,
{
/* Try to take the chance that an allocation of only max_size
is already aligned. */
- p2 = (char *) MMAP (0, max_size, PROT_NONE, mmap_flags);
+ p2 = (char *) MMAP (NULL, max_size, PROT_NONE, mmap_flags);
if (p2 == MAP_FAILED)
- return 0;
+ return NULL;
if ((unsigned long) p2 & (max_size - 1))
{
__munmap (p2, max_size);
- return 0;
+ return NULL;
}
}
}
if (__mprotect (p2, size, mtag_mmap_flags | PROT_READ | PROT_WRITE) != 0)
{
__munmap (p2, max_size);
- return 0;
+ return NULL;
}
/* Only considere the actual usable range. */
@@ -460,7 +444,7 @@ alloc_new_heap (size_t size, size_t top_pad, size_t pagesize,
static heap_info *
new_heap (size_t size, size_t top_pad)
{
- if (__glibc_unlikely (mp_.hp_pagesize != 0))
+ if (mp_.hp_pagesize != 0 && mp_.hp_pagesize <= heap_max_size ())
{
heap_info *h = alloc_new_heap (size, top_pad, mp_.hp_pagesize,
mp_.hp_flags);
@@ -644,7 +628,7 @@ _int_new_arena (size_t size)
to deal with the large request via mmap_chunk(). */
h = new_heap (sizeof (*h) + sizeof (*a) + MALLOC_ALIGNMENT, mp_.top_pad);
if (!h)
- return 0;
+ return NULL;
}
a = h->ar_ptr = (mstate) (h + 1);
malloc_init_state (a);