aboutsummaryrefslogtreecommitdiff
path: root/malloc/malloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'malloc/malloc.c')
-rw-r--r--malloc/malloc.c184
1 files changed, 54 insertions, 130 deletions
diff --git a/malloc/malloc.c b/malloc/malloc.c
index 6da40ad..9d646ab 100644
--- a/malloc/malloc.c
+++ b/malloc/malloc.c
@@ -294,9 +294,9 @@
# define TCACHE_SMALL_BINS 64
# define TCACHE_LARGE_BINS 12 /* Up to 4M chunks */
# define TCACHE_MAX_BINS (TCACHE_SMALL_BINS + TCACHE_LARGE_BINS)
-# define MAX_TCACHE_SMALL_SIZE tidx2usize (TCACHE_MAX_BINS-1)
+# define MAX_TCACHE_SMALL_SIZE tidx2csize (TCACHE_SMALL_BINS-1)
-/* Only used to pre-fill the tunables. */
+# define tidx2csize(idx) (((size_t) idx) * MALLOC_ALIGNMENT + MINSIZE)
# define tidx2usize(idx) (((size_t) idx) * MALLOC_ALIGNMENT + MINSIZE - SIZE_SZ)
/* When "x" is from chunksize(). */
@@ -1323,8 +1323,8 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/* Check if REQ overflows when padded and aligned and if the resulting
value is less than PTRDIFF_T. Returns the requested size or
- MINSIZE in case the value is less than MINSIZE, or 0 if any of the
- previous checks fail. */
+ MINSIZE in case the value is less than MINSIZE, or SIZE_MAX if any
+ of the previous checks fail. */
static __always_inline size_t
checked_request2size (size_t req) __nonnull (1)
{
@@ -1332,7 +1332,7 @@ checked_request2size (size_t req) __nonnull (1)
"PTRDIFF_MAX is not more than half of SIZE_MAX");
if (__glibc_unlikely (req > PTRDIFF_MAX))
- return 0;
+ return SIZE_MAX;
/* When using tagged memory, we cannot share the end of the user
block with the header for the next chunk, so ensure that we
@@ -1871,6 +1871,7 @@ struct malloc_par
INTERNAL_SIZE_T arena_max;
/* Transparent Large Page support. */
+ enum malloc_thp_mode_t thp_mode;
INTERNAL_SIZE_T thp_pagesize;
/* A value different than 0 means to align mmap allocation to hp_pagesize
add hp_flags on flags. */
@@ -1927,12 +1928,13 @@ static struct malloc_par mp_ =
.mmap_threshold = DEFAULT_MMAP_THRESHOLD,
.trim_threshold = DEFAULT_TRIM_THRESHOLD,
#define NARENAS_FROM_NCORES(n) ((n) * (sizeof (long) == 4 ? 2 : 8))
- .arena_test = NARENAS_FROM_NCORES (1)
+ .arena_test = NARENAS_FROM_NCORES (1),
+ .thp_mode = malloc_thp_mode_not_supported
#if USE_TCACHE
,
.tcache_count = TCACHE_FILL_COUNT,
.tcache_small_bins = TCACHE_SMALL_BINS,
- .tcache_max_bytes = MAX_TCACHE_SMALL_SIZE,
+ .tcache_max_bytes = MAX_TCACHE_SMALL_SIZE + 1,
.tcache_unsorted_limit = 0 /* No limit. */
#endif
};
@@ -2011,6 +2013,11 @@ static inline void
madvise_thp (void *p, INTERNAL_SIZE_T size)
{
#ifdef MADV_HUGEPAGE
+ /* Only use __madvise if the system is using 'madvise' mode.
+ Otherwise the call is wasteful. */
+ if (mp_.thp_mode != malloc_thp_mode_madvise)
+ return;
+
/* Do not consider areas smaller than a huge page or if the tunable is
not active. */
if (mp_.thp_pagesize == 0 || size < mp_.thp_pagesize)
@@ -2068,12 +2075,13 @@ static void
do_check_chunk (mstate av, mchunkptr p)
{
unsigned long sz = chunksize (p);
- /* min and max possible addresses assuming contiguous allocation */
- char *max_address = (char *) (av->top) + chunksize (av->top);
- char *min_address = max_address - av->system_mem;
if (!chunk_is_mmapped (p))
{
+ /* min and max possible addresses assuming contiguous allocation */
+ char *max_address = (char *) (av->top) + chunksize (av->top);
+ char *min_address = max_address - av->system_mem;
+
/* Has legal address ... */
if (p != av->top)
{
@@ -2093,11 +2101,6 @@ do_check_chunk (mstate av, mchunkptr p)
}
else
{
- /* address is outside main heap */
- if (contiguous (av) && av->top != initial_top (av))
- {
- assert (((char *) p) < min_address || ((char *) p) >= max_address);
- }
/* chunk is page-aligned */
assert (((prev_size (p) + sz) & (GLRO (dl_pagesize) - 1)) == 0);
/* mem is aligned */
@@ -2397,83 +2400,31 @@ do_check_malloc_state (mstate av)
/* ----------- Routines dealing with system allocation -------------- */
-/*
- sysmalloc handles malloc cases requiring more memory from the system.
- On entry, it is assumed that av->top does not have enough
- space to service request for nb bytes, thus requiring that av->top
- be extended or replaced.
- */
+/* Allocate a mmap chunk - used for large block sizes or as a fallback.
+ Round up size to nearest page. Add padding if MALLOC_ALIGNMENT is
+ larger than CHUNK_HDR_SZ. Add SIZE_SZ at the end since there is no
+ following chunk whose prev_size field could be used. */
static void *
-sysmalloc_mmap (INTERNAL_SIZE_T nb, size_t pagesize, int extra_flags, mstate av)
+sysmalloc_mmap (INTERNAL_SIZE_T nb, size_t pagesize, int extra_flags)
{
- long int size;
-
- /*
- Round up size to nearest page. For mmapped chunks, the overhead is one
- SIZE_SZ unit larger than for normal chunks, because there is no
- following chunk whose prev_size field could be used.
-
- See the front_misalign handling below, for glibc there is no need for
- further alignments unless we have have high alignment.
- */
- if (MALLOC_ALIGNMENT == CHUNK_HDR_SZ)
- size = ALIGN_UP (nb + SIZE_SZ, pagesize);
- else
- size = ALIGN_UP (nb + SIZE_SZ + MALLOC_ALIGN_MASK, pagesize);
-
- /* Don't try if size wraps around 0. */
- if ((unsigned long) (size) <= (unsigned long) (nb))
- return MAP_FAILED;
+ size_t padding = MALLOC_ALIGNMENT - CHUNK_HDR_SZ;
+ size_t size = ALIGN_UP (nb + padding + SIZE_SZ, pagesize);
char *mm = (char *) MMAP (NULL, size,
mtag_mmap_flags | PROT_READ | PROT_WRITE,
extra_flags);
if (mm == MAP_FAILED)
return mm;
-
-#ifdef MAP_HUGETLB
- if (!(extra_flags & MAP_HUGETLB))
+ if (extra_flags == 0)
madvise_thp (mm, size);
-#endif
__set_vma_name (mm, size, " glibc: malloc");
- /*
- The offset to the start of the mmapped region is stored in the prev_size
- field of the chunk. This allows us to adjust returned start address to
- meet alignment requirements here and in memalign(), and still be able to
- compute proper address argument for later munmap in free() and realloc().
- */
-
- INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */
-
- if (MALLOC_ALIGNMENT == CHUNK_HDR_SZ)
- {
- /* For glibc, chunk2mem increases the address by CHUNK_HDR_SZ and
- MALLOC_ALIGN_MASK is CHUNK_HDR_SZ-1. Each mmap'ed area is page
- aligned and therefore definitely MALLOC_ALIGN_MASK-aligned. */
- assert (((INTERNAL_SIZE_T) chunk2mem (mm) & MALLOC_ALIGN_MASK) == 0);
- front_misalign = 0;
- }
- else
- front_misalign = (INTERNAL_SIZE_T) chunk2mem (mm) & MALLOC_ALIGN_MASK;
-
- mchunkptr p; /* the allocated/returned chunk */
-
- if (front_misalign > 0)
- {
- ptrdiff_t correction = MALLOC_ALIGNMENT - front_misalign;
- p = (mchunkptr) (mm + correction);
- set_prev_size (p, correction);
- set_head (p, (size - correction) | IS_MMAPPED);
- }
- else
- {
- p = (mchunkptr) mm;
- set_prev_size (p, 0);
- set_head (p, size | IS_MMAPPED);
- }
+ /* Store offset to start of mmap in prev_size. */
+ mchunkptr p = (mchunkptr) (mm + padding);
+ set_prev_size (p, padding);
+ set_head (p, (size - padding) | IS_MMAPPED);
/* update statistics */
int new = atomic_fetch_add_relaxed (&mp_.n_mmaps, 1) + 1;
@@ -2483,7 +2434,7 @@ sysmalloc_mmap (INTERNAL_SIZE_T nb, size_t pagesize, int extra_flags, mstate av)
sum = atomic_fetch_add_relaxed (&mp_.mmapped_mem, size) + size;
atomic_max (&mp_.max_mmapped_mem, sum);
- check_chunk (av, p);
+ check_chunk (NULL, p);
return chunk2mem (p);
}
@@ -2519,10 +2470,8 @@ sysmalloc_mmap_fallback (long int *s, INTERNAL_SIZE_T nb,
if (mbrk == MAP_FAILED)
return MAP_FAILED;
-#ifdef MAP_HUGETLB
- if (!(extra_flags & MAP_HUGETLB))
+ if (extra_flags == 0)
madvise_thp (mbrk, size);
-#endif
__set_vma_name (mbrk, size, " glibc: malloc");
@@ -2577,11 +2526,11 @@ sysmalloc (INTERNAL_SIZE_T nb, mstate av)
{
/* There is no need to issue the THP madvise call if Huge Pages are
used directly. */
- mm = sysmalloc_mmap (nb, mp_.hp_pagesize, mp_.hp_flags, av);
+ mm = sysmalloc_mmap (nb, mp_.hp_pagesize, mp_.hp_flags);
if (mm != MAP_FAILED)
return mm;
}
- mm = sysmalloc_mmap (nb, pagesize, 0, av);
+ mm = sysmalloc_mmap (nb, pagesize, 0);
if (mm != MAP_FAILED)
return mm;
tried_mmap = true;
@@ -2665,7 +2614,7 @@ sysmalloc (INTERNAL_SIZE_T nb, mstate av)
/* We can at least try to use to mmap memory. If new_heap fails
it is unlikely that trying to allocate huge pages will
succeed. */
- char *mm = sysmalloc_mmap (nb, pagesize, 0, av);
+ char *mm = sysmalloc_mmap (nb, pagesize, 0);
if (mm != MAP_FAILED)
return mm;
}
@@ -2693,7 +2642,6 @@ sysmalloc (INTERNAL_SIZE_T nb, mstate av)
previous calls. Otherwise, we correct to page-align below.
*/
-#ifdef MADV_HUGEPAGE
/* Defined in brk.c. */
extern void *__curbrk;
if (__glibc_unlikely (mp_.thp_pagesize != 0))
@@ -2703,7 +2651,6 @@ sysmalloc (INTERNAL_SIZE_T nb, mstate av)
size = top - (uintptr_t) __curbrk;
}
else
-#endif
size = ALIGN_UP (size, GLRO(dl_pagesize));
/*
@@ -2978,11 +2925,9 @@ systrim (size_t pad, mstate av)
return 0;
/* Release in pagesize units and round down to the nearest page. */
-#ifdef MADV_HUGEPAGE
if (__glibc_unlikely (mp_.thp_pagesize != 0))
extra = ALIGN_DOWN (top_area - pad, mp_.thp_pagesize);
else
-#endif
extra = ALIGN_DOWN (top_area - pad, GLRO(dl_pagesize));
if (extra == 0)
@@ -3208,11 +3153,10 @@ tcache_get_n (size_t tc_idx, tcache_entry **ep, bool mangled)
if (__glibc_unlikely (misaligned_mem (e)))
malloc_printerr ("malloc(): unaligned tcache chunk detected");
- void *ne = e == NULL ? NULL : REVEAL_PTR (e->next);
if (!mangled)
- *ep = ne;
+ *ep = REVEAL_PTR (e->next);
else
- *ep = PROTECT_PTR (ep, ne);
+ *ep = PROTECT_PTR (ep, REVEAL_PTR (e->next));
++(tcache->num_slots[tc_idx]);
e->key = 0;
@@ -3229,7 +3173,7 @@ tcache_put (mchunkptr chunk, size_t tc_idx)
static __always_inline void *
tcache_get (size_t tc_idx)
{
- return tcache_get_n (tc_idx, & tcache->entries[tc_idx], false);
+ return tcache_get_n (tc_idx, &tcache->entries[tc_idx], false);
}
static __always_inline tcache_entry **
@@ -3465,11 +3409,6 @@ __libc_malloc (size_t bytes)
{
#if USE_TCACHE
size_t nb = checked_request2size (bytes);
- if (nb == 0)
- {
- __set_errno (ENOMEM);
- return NULL;
- }
if (nb < mp_.tcache_max_bytes)
{
@@ -3614,12 +3553,12 @@ __libc_realloc (void *oldmem, size_t bytes)
|| misaligned_chunk (oldp)))
malloc_printerr ("realloc(): invalid pointer");
- nb = checked_request2size (bytes);
- if (nb == 0)
+ if (bytes > PTRDIFF_MAX)
{
__set_errno (ENOMEM);
return NULL;
}
+ nb = checked_request2size (bytes);
if (chunk_is_mmapped (oldp))
{
@@ -3745,13 +3684,7 @@ _mid_memalign (size_t alignment, size_t bytes)
}
#if USE_TCACHE
- size_t nb = checked_request2size (bytes);
- if (nb == 0)
- {
- __set_errno (ENOMEM);
- return NULL;
- }
- void *victim = tcache_get_align (nb, alignment);
+ void *victim = tcache_get_align (checked_request2size (bytes), alignment);
if (victim != NULL)
return tag_new_usable (victim);
#endif
@@ -3912,11 +3845,7 @@ __libc_calloc (size_t n, size_t elem_size)
#if USE_TCACHE
size_t nb = checked_request2size (bytes);
- if (nb == 0)
- {
- __set_errno (ENOMEM);
- return NULL;
- }
+
if (nb < mp_.tcache_max_bytes)
{
if (__glibc_unlikely (tcache == NULL))
@@ -3991,12 +3920,12 @@ _int_malloc (mstate av, size_t bytes)
aligned.
*/
- nb = checked_request2size (bytes);
- if (nb == 0)
+ if (bytes > PTRDIFF_MAX)
{
__set_errno (ENOMEM);
return NULL;
}
+ nb = checked_request2size (bytes);
/* There are no usable arenas. Fall back to sysmalloc to get a chunk from
mmap. */
@@ -5151,12 +5080,12 @@ _int_memalign (mstate av, size_t alignment, size_t bytes)
unsigned long remainder_size; /* its size */
INTERNAL_SIZE_T size;
- nb = checked_request2size (bytes);
- if (nb == 0)
+ if (bytes > PTRDIFF_MAX)
{
__set_errno (ENOMEM);
return NULL;
}
+ nb = checked_request2size (bytes);
/* We can't check tcache here because we hold the arena lock, which
tcache doesn't expect. We expect it has been checked
@@ -5587,15 +5516,13 @@ do_set_arena_max (size_t value)
static __always_inline int
do_set_tcache_max (size_t value)
{
+ if (value > PTRDIFF_MAX)
+ return 0;
+
size_t nb = request2size (value);
size_t tc_idx = csize2tidx (nb);
- /* To check that value is not too big and request2size does not return an
- overflown value. */
- if (value > nb)
- return 0;
-
- if (nb > MAX_TCACHE_SMALL_SIZE)
+ if (tc_idx >= TCACHE_SMALL_BINS)
tc_idx = large_csize2tidx (nb);
LIBC_PROBE (memory_tunable_tcache_max_bytes, 2, value, mp_.tcache_max_bytes);
@@ -5604,7 +5531,7 @@ do_set_tcache_max (size_t value)
{
if (tc_idx < TCACHE_SMALL_BINS)
mp_.tcache_small_bins = tc_idx + 1;
- mp_.tcache_max_bytes = nb;
+ mp_.tcache_max_bytes = nb + 1;
return 1;
}
@@ -5649,12 +5576,9 @@ do_set_hugetlb (size_t value)
{
if (value == 1)
{
- enum malloc_thp_mode_t thp_mode = __malloc_thp_mode ();
- /*
- Only enable THP madvise usage if system does support it and
- has 'madvise' mode. Otherwise the madvise() call is wasteful.
- */
- if (thp_mode == malloc_thp_mode_madvise)
+ mp_.thp_mode = __malloc_thp_mode ();
+ if (mp_.thp_mode == malloc_thp_mode_madvise
+ || mp_.thp_mode == malloc_thp_mode_always)
mp_.thp_pagesize = __malloc_default_thp_pagesize ();
}
else if (value >= 2)