diff options
Diffstat (limited to 'malloc/malloc.c')
-rw-r--r-- | malloc/malloc.c | 664 |
1 files changed, 343 insertions, 321 deletions
diff --git a/malloc/malloc.c b/malloc/malloc.c index 9f44f5a..9d646ab 100644 --- a/malloc/malloc.c +++ b/malloc/malloc.c @@ -291,10 +291,12 @@ #if USE_TCACHE /* We want 64 entries. This is an arbitrary limit, which tunables can reduce. */ -# define TCACHE_MAX_BINS 64 -# define MAX_TCACHE_SIZE tidx2usize (TCACHE_MAX_BINS-1) +# define TCACHE_SMALL_BINS 64 +# define TCACHE_LARGE_BINS 12 /* Up to 4M chunks */ +# define TCACHE_MAX_BINS (TCACHE_SMALL_BINS + TCACHE_LARGE_BINS) +# define MAX_TCACHE_SMALL_SIZE tidx2csize (TCACHE_SMALL_BINS-1) -/* Only used to pre-fill the tunables. */ +# define tidx2csize(idx) (((size_t) idx) * MALLOC_ALIGNMENT + MINSIZE) # define tidx2usize(idx) (((size_t) idx) * MALLOC_ALIGNMENT + MINSIZE - SIZE_SZ) /* When "x" is from chunksize(). */ @@ -313,7 +315,7 @@ # define TCACHE_FILL_COUNT 7 /* Maximum chunks in tcache bins for tunables. This value must fit the range - of tcache->counts[] entries, else they may overflow. */ + of tcache->num_slots[] entries, else they may overflow. */ # define MAX_TCACHE_COUNT UINT16_MAX #endif @@ -588,9 +590,12 @@ tag_at (void *ptr) differs across systems, but is in all cases less than the maximum representable value of a size_t. */ -void* __libc_malloc(size_t); +void *__libc_malloc (size_t); libc_hidden_proto (__libc_malloc) +static void *__libc_calloc2 (size_t); +static void *__libc_malloc2 (size_t); + /* free(void* p) Releases the chunk of memory pointed to by p, that had been previously @@ -1096,7 +1101,7 @@ static void* _int_realloc(mstate, mchunkptr, INTERNAL_SIZE_T, INTERNAL_SIZE_T); static void* _int_memalign(mstate, size_t, size_t); #if IS_IN (libc) -static void* _mid_memalign(size_t, size_t, void *); +static void* _mid_memalign(size_t, size_t); #endif #if USE_TCACHE @@ -1304,11 +1309,9 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ /* Check if m has acceptable alignment */ -#define aligned_OK(m) (((unsigned long)(m) & MALLOC_ALIGN_MASK) == 0) +#define misaligned_mem(m) ((uintptr_t)(m) & MALLOC_ALIGN_MASK) -#define misaligned_chunk(p) \ - ((uintptr_t)(MALLOC_ALIGNMENT == CHUNK_HDR_SZ ? (p) : chunk2mem (p)) \ - & MALLOC_ALIGN_MASK) +#define misaligned_chunk(p) (misaligned_mem( chunk2mem (p))) /* pad request bytes into a usable size -- internal version */ /* Note: This must be a macro that evaluates to a compile time constant @@ -1320,8 +1323,8 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ /* Check if REQ overflows when padded and aligned and if the resulting value is less than PTRDIFF_T. Returns the requested size or - MINSIZE in case the value is less than MINSIZE, or 0 if any of the - previous checks fail. */ + MINSIZE in case the value is less than MINSIZE, or SIZE_MAX if any + of the previous checks fail. */ static __always_inline size_t checked_request2size (size_t req) __nonnull (1) { @@ -1329,7 +1332,7 @@ checked_request2size (size_t req) __nonnull (1) "PTRDIFF_MAX is not more than half of SIZE_MAX"); if (__glibc_unlikely (req > PTRDIFF_MAX)) - return 0; + return SIZE_MAX; /* When using tagged memory, we cannot share the end of the user block with the header for the next chunk, so ensure that we @@ -1618,7 +1621,7 @@ unlink_chunk (mstate av, mchunkptr p) mchunkptr fd = p->fd; mchunkptr bk = p->bk; - if (__builtin_expect (fd->bk != p || bk->fd != p, 0)) + if (__glibc_unlikely (fd->bk != p || bk->fd != p)) malloc_printerr ("corrupted double-linked list"); fd->bk = bk; @@ -1868,6 +1871,7 @@ struct malloc_par INTERNAL_SIZE_T arena_max; /* Transparent Large Page support. */ + enum malloc_thp_mode_t thp_mode; INTERNAL_SIZE_T thp_pagesize; /* A value different than 0 means to align mmap allocation to hp_pagesize add hp_flags on flags. */ @@ -1891,8 +1895,8 @@ struct malloc_par char *sbrk_base; #if USE_TCACHE - /* Maximum number of buckets to use. */ - size_t tcache_bins; + /* Maximum number of small buckets to use. */ + size_t tcache_small_bins; size_t tcache_max_bytes; /* Maximum number of chunks in each bucket. */ size_t tcache_count; @@ -1924,12 +1928,13 @@ static struct malloc_par mp_ = .mmap_threshold = DEFAULT_MMAP_THRESHOLD, .trim_threshold = DEFAULT_TRIM_THRESHOLD, #define NARENAS_FROM_NCORES(n) ((n) * (sizeof (long) == 4 ? 2 : 8)) - .arena_test = NARENAS_FROM_NCORES (1) + .arena_test = NARENAS_FROM_NCORES (1), + .thp_mode = malloc_thp_mode_not_supported #if USE_TCACHE , .tcache_count = TCACHE_FILL_COUNT, - .tcache_bins = TCACHE_MAX_BINS, - .tcache_max_bytes = tidx2usize (TCACHE_MAX_BINS-1), + .tcache_small_bins = TCACHE_SMALL_BINS, + .tcache_max_bytes = MAX_TCACHE_SMALL_SIZE + 1, .tcache_unsorted_limit = 0 /* No limit. */ #endif }; @@ -1937,7 +1942,7 @@ static struct malloc_par mp_ = /* Initialize a malloc_state struct. - This is called from ptmalloc_init () or from _int_new_arena () + This is called from __ptmalloc_init () or from _int_new_arena () when creating a new arena. */ @@ -2008,6 +2013,11 @@ static inline void madvise_thp (void *p, INTERNAL_SIZE_T size) { #ifdef MADV_HUGEPAGE + /* Only use __madvise if the system is using 'madvise' mode. + Otherwise the call is wasteful. */ + if (mp_.thp_mode != malloc_thp_mode_madvise) + return; + /* Do not consider areas smaller than a huge page or if the tunable is not active. */ if (mp_.thp_pagesize == 0 || size < mp_.thp_pagesize) @@ -2065,12 +2075,13 @@ static void do_check_chunk (mstate av, mchunkptr p) { unsigned long sz = chunksize (p); - /* min and max possible addresses assuming contiguous allocation */ - char *max_address = (char *) (av->top) + chunksize (av->top); - char *min_address = max_address - av->system_mem; if (!chunk_is_mmapped (p)) { + /* min and max possible addresses assuming contiguous allocation */ + char *max_address = (char *) (av->top) + chunksize (av->top); + char *min_address = max_address - av->system_mem; + /* Has legal address ... */ if (p != av->top) { @@ -2090,15 +2101,10 @@ do_check_chunk (mstate av, mchunkptr p) } else { - /* address is outside main heap */ - if (contiguous (av) && av->top != initial_top (av)) - { - assert (((char *) p) < min_address || ((char *) p) >= max_address); - } /* chunk is page-aligned */ assert (((prev_size (p) + sz) & (GLRO (dl_pagesize) - 1)) == 0); /* mem is aligned */ - assert (aligned_OK (chunk2mem (p))); + assert (!misaligned_chunk (p)); } } @@ -2122,7 +2128,7 @@ do_check_free_chunk (mstate av, mchunkptr p) if ((unsigned long) (sz) >= MINSIZE) { assert ((sz & MALLOC_ALIGN_MASK) == 0); - assert (aligned_OK (chunk2mem (p))); + assert (!misaligned_chunk (p)); /* ... matching footer field */ assert (prev_size (next_chunk (p)) == sz); /* ... and is fully consolidated */ @@ -2201,7 +2207,7 @@ do_check_remalloced_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T s) assert ((sz & MALLOC_ALIGN_MASK) == 0); assert ((unsigned long) (sz) >= MINSIZE); /* ... and alignment */ - assert (aligned_OK (chunk2mem (p))); + assert (!misaligned_chunk (p)); /* chunk is less than MINSIZE more than request */ assert ((long) (sz) - (long) (s) >= 0); assert ((long) (sz) - (long) (s + MINSIZE) < 0); @@ -2394,83 +2400,31 @@ do_check_malloc_state (mstate av) /* ----------- Routines dealing with system allocation -------------- */ -/* - sysmalloc handles malloc cases requiring more memory from the system. - On entry, it is assumed that av->top does not have enough - space to service request for nb bytes, thus requiring that av->top - be extended or replaced. - */ +/* Allocate a mmap chunk - used for large block sizes or as a fallback. + Round up size to nearest page. Add padding if MALLOC_ALIGNMENT is + larger than CHUNK_HDR_SZ. Add SIZE_SZ at the end since there is no + following chunk whose prev_size field could be used. */ static void * -sysmalloc_mmap (INTERNAL_SIZE_T nb, size_t pagesize, int extra_flags, mstate av) +sysmalloc_mmap (INTERNAL_SIZE_T nb, size_t pagesize, int extra_flags) { - long int size; - - /* - Round up size to nearest page. For mmapped chunks, the overhead is one - SIZE_SZ unit larger than for normal chunks, because there is no - following chunk whose prev_size field could be used. - - See the front_misalign handling below, for glibc there is no need for - further alignments unless we have have high alignment. - */ - if (MALLOC_ALIGNMENT == CHUNK_HDR_SZ) - size = ALIGN_UP (nb + SIZE_SZ, pagesize); - else - size = ALIGN_UP (nb + SIZE_SZ + MALLOC_ALIGN_MASK, pagesize); - - /* Don't try if size wraps around 0. */ - if ((unsigned long) (size) <= (unsigned long) (nb)) - return MAP_FAILED; + size_t padding = MALLOC_ALIGNMENT - CHUNK_HDR_SZ; + size_t size = ALIGN_UP (nb + padding + SIZE_SZ, pagesize); char *mm = (char *) MMAP (NULL, size, mtag_mmap_flags | PROT_READ | PROT_WRITE, extra_flags); if (mm == MAP_FAILED) return mm; - -#ifdef MAP_HUGETLB - if (!(extra_flags & MAP_HUGETLB)) + if (extra_flags == 0) madvise_thp (mm, size); -#endif __set_vma_name (mm, size, " glibc: malloc"); - /* - The offset to the start of the mmapped region is stored in the prev_size - field of the chunk. This allows us to adjust returned start address to - meet alignment requirements here and in memalign(), and still be able to - compute proper address argument for later munmap in free() and realloc(). - */ - - INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */ - - if (MALLOC_ALIGNMENT == CHUNK_HDR_SZ) - { - /* For glibc, chunk2mem increases the address by CHUNK_HDR_SZ and - MALLOC_ALIGN_MASK is CHUNK_HDR_SZ-1. Each mmap'ed area is page - aligned and therefore definitely MALLOC_ALIGN_MASK-aligned. */ - assert (((INTERNAL_SIZE_T) chunk2mem (mm) & MALLOC_ALIGN_MASK) == 0); - front_misalign = 0; - } - else - front_misalign = (INTERNAL_SIZE_T) chunk2mem (mm) & MALLOC_ALIGN_MASK; - - mchunkptr p; /* the allocated/returned chunk */ - - if (front_misalign > 0) - { - ptrdiff_t correction = MALLOC_ALIGNMENT - front_misalign; - p = (mchunkptr) (mm + correction); - set_prev_size (p, correction); - set_head (p, (size - correction) | IS_MMAPPED); - } - else - { - p = (mchunkptr) mm; - set_prev_size (p, 0); - set_head (p, size | IS_MMAPPED); - } + /* Store offset to start of mmap in prev_size. */ + mchunkptr p = (mchunkptr) (mm + padding); + set_prev_size (p, padding); + set_head (p, (size - padding) | IS_MMAPPED); /* update statistics */ int new = atomic_fetch_add_relaxed (&mp_.n_mmaps, 1) + 1; @@ -2480,7 +2434,7 @@ sysmalloc_mmap (INTERNAL_SIZE_T nb, size_t pagesize, int extra_flags, mstate av) sum = atomic_fetch_add_relaxed (&mp_.mmapped_mem, size) + size; atomic_max (&mp_.max_mmapped_mem, sum); - check_chunk (av, p); + check_chunk (NULL, p); return chunk2mem (p); } @@ -2516,10 +2470,8 @@ sysmalloc_mmap_fallback (long int *s, INTERNAL_SIZE_T nb, if (mbrk == MAP_FAILED) return MAP_FAILED; -#ifdef MAP_HUGETLB - if (!(extra_flags & MAP_HUGETLB)) + if (extra_flags == 0) madvise_thp (mbrk, size); -#endif __set_vma_name (mbrk, size, " glibc: malloc"); @@ -2574,11 +2526,11 @@ sysmalloc (INTERNAL_SIZE_T nb, mstate av) { /* There is no need to issue the THP madvise call if Huge Pages are used directly. */ - mm = sysmalloc_mmap (nb, mp_.hp_pagesize, mp_.hp_flags, av); + mm = sysmalloc_mmap (nb, mp_.hp_pagesize, mp_.hp_flags); if (mm != MAP_FAILED) return mm; } - mm = sysmalloc_mmap (nb, pagesize, 0, av); + mm = sysmalloc_mmap (nb, pagesize, 0); if (mm != MAP_FAILED) return mm; tried_mmap = true; @@ -2662,7 +2614,7 @@ sysmalloc (INTERNAL_SIZE_T nb, mstate av) /* We can at least try to use to mmap memory. If new_heap fails it is unlikely that trying to allocate huge pages will succeed. */ - char *mm = sysmalloc_mmap (nb, pagesize, 0, av); + char *mm = sysmalloc_mmap (nb, pagesize, 0); if (mm != MAP_FAILED) return mm; } @@ -2690,7 +2642,6 @@ sysmalloc (INTERNAL_SIZE_T nb, mstate av) previous calls. Otherwise, we correct to page-align below. */ -#ifdef MADV_HUGEPAGE /* Defined in brk.c. */ extern void *__curbrk; if (__glibc_unlikely (mp_.thp_pagesize != 0)) @@ -2700,7 +2651,6 @@ sysmalloc (INTERNAL_SIZE_T nb, mstate av) size = top - (uintptr_t) __curbrk; } else -#endif size = ALIGN_UP (size, GLRO(dl_pagesize)); /* @@ -2975,11 +2925,9 @@ systrim (size_t pad, mstate av) return 0; /* Release in pagesize units and round down to the nearest page. */ -#ifdef MADV_HUGEPAGE if (__glibc_unlikely (mp_.thp_pagesize != 0)) extra = ALIGN_DOWN (top_area - pad, mp_.thp_pagesize); else -#endif extra = ALIGN_DOWN (top_area - pad, GLRO(dl_pagesize)); if (extra == 0) @@ -3089,7 +3037,7 @@ mremap_chunk (mchunkptr p, size_t new_size) p = (mchunkptr) (cp + offset); - assert (aligned_OK (chunk2mem (p))); + assert (!misaligned_chunk (p)); assert (prev_size (p) == offset); set_head (p, (new_size - offset) | IS_MMAPPED); @@ -3117,12 +3065,13 @@ typedef struct tcache_entry /* There is one of these for each thread, which contains the per-thread cache (hence "tcache_perthread_struct"). Keeping - overall size low is mildly important. Note that COUNTS and ENTRIES - are redundant (we could have just counted the linked list each - time), this is for performance reasons. */ + overall size low is mildly important. The 'entries' field is linked list of + free blocks, while 'num_slots' contains the number of free blocks that can + be added. Each bin may allow a different maximum number of free blocks, + and can be disabled by initializing 'num_slots' to zero. */ typedef struct tcache_perthread_struct { - uint16_t counts[TCACHE_MAX_BINS]; + uint16_t num_slots[TCACHE_MAX_BINS]; tcache_entry *entries[TCACHE_MAX_BINS]; } tcache_perthread_struct; @@ -3156,10 +3105,19 @@ tcache_key_initialize (void) } } +static __always_inline size_t +large_csize2tidx(size_t nb) +{ + size_t idx = TCACHE_SMALL_BINS + + __builtin_clz (MAX_TCACHE_SMALL_SIZE) + - __builtin_clz (nb); + return idx; +} + /* Caller must ensure that we know tc_idx is valid and there's room for more chunks. */ static __always_inline void -tcache_put (mchunkptr chunk, size_t tc_idx) +tcache_put_n (mchunkptr chunk, size_t tc_idx, tcache_entry **ep, bool mangled) { tcache_entry *e = (tcache_entry *) chunk2mem (chunk); @@ -3167,60 +3125,136 @@ tcache_put (mchunkptr chunk, size_t tc_idx) detect a double free. */ e->key = tcache_key; - e->next = PROTECT_PTR (&e->next, tcache->entries[tc_idx]); - tcache->entries[tc_idx] = e; - ++(tcache->counts[tc_idx]); + if (!mangled) + { + e->next = PROTECT_PTR (&e->next, *ep); + *ep = e; + } + else + { + e->next = PROTECT_PTR (&e->next, REVEAL_PTR (*ep)); + *ep = PROTECT_PTR (ep, e); + } + --(tcache->num_slots[tc_idx]); } /* Caller must ensure that we know tc_idx is valid and there's available chunks to remove. Removes chunk from the middle of the list. */ static __always_inline void * -tcache_get_n (size_t tc_idx, tcache_entry **ep) +tcache_get_n (size_t tc_idx, tcache_entry **ep, bool mangled) { tcache_entry *e; - if (ep == &(tcache->entries[tc_idx])) + if (!mangled) e = *ep; else e = REVEAL_PTR (*ep); - if (__glibc_unlikely (!aligned_OK (e))) + if (__glibc_unlikely (misaligned_mem (e))) malloc_printerr ("malloc(): unaligned tcache chunk detected"); - if (ep == &(tcache->entries[tc_idx])) - *ep = REVEAL_PTR (e->next); + if (!mangled) + *ep = REVEAL_PTR (e->next); else *ep = PROTECT_PTR (ep, REVEAL_PTR (e->next)); - --(tcache->counts[tc_idx]); + ++(tcache->num_slots[tc_idx]); e->key = 0; return (void *) e; } +static __always_inline void +tcache_put (mchunkptr chunk, size_t tc_idx) +{ + tcache_put_n (chunk, tc_idx, &tcache->entries[tc_idx], false); +} + /* Like the above, but removes from the head of the list. */ static __always_inline void * tcache_get (size_t tc_idx) { - return tcache_get_n (tc_idx, & tcache->entries[tc_idx]); + return tcache_get_n (tc_idx, &tcache->entries[tc_idx], false); } -/* Iterates through the tcache linked list. */ -static __always_inline tcache_entry * -tcache_next (tcache_entry *e) +static __always_inline tcache_entry ** +tcache_location_large (size_t nb, size_t tc_idx, bool *mangled) { - return (tcache_entry *) REVEAL_PTR (e->next); + tcache_entry **tep = &(tcache->entries[tc_idx]); + tcache_entry *te = *tep; + while (te != NULL + && __glibc_unlikely (chunksize (mem2chunk (te)) < nb)) + { + tep = & (te->next); + te = REVEAL_PTR (te->next); + *mangled = true; + } + + return tep; } -/* Check if tcache is available for alloc by corresponding tc_idx. */ -static __always_inline bool -tcache_available (size_t tc_idx) +static __always_inline void +tcache_put_large (mchunkptr chunk, size_t tc_idx) { - if (tc_idx < mp_.tcache_bins - && tcache != NULL - && tcache->counts[tc_idx] > 0) - return true; - else - return false; + tcache_entry **entry; + bool mangled = false; + entry = tcache_location_large (chunksize (chunk), tc_idx, &mangled); + + return tcache_put_n (chunk, tc_idx, entry, mangled); +} + +static __always_inline void * +tcache_get_large (size_t tc_idx, size_t nb) +{ + tcache_entry **entry; + bool mangled = false; + entry = tcache_location_large (nb, tc_idx, &mangled); + + if ((mangled && REVEAL_PTR (*entry) == NULL) + || (!mangled && *entry == NULL)) + return NULL; + + return tcache_get_n (tc_idx, entry, mangled); +} + +static void tcache_init (void); + +static __always_inline void * +tcache_get_align (size_t nb, size_t alignment) +{ + if (nb < mp_.tcache_max_bytes) + { + if (__glibc_unlikely (tcache == NULL)) + { + tcache_init (); + return NULL; + } + + size_t tc_idx = csize2tidx (nb); + if (__glibc_unlikely (tc_idx >= TCACHE_SMALL_BINS)) + tc_idx = large_csize2tidx (nb); + + /* The tcache itself isn't encoded, but the chain is. */ + tcache_entry **tep = & tcache->entries[tc_idx]; + tcache_entry *te = *tep; + bool mangled = false; + size_t csize; + + while (te != NULL + && ((csize = chunksize (mem2chunk (te))) < nb + || (csize == nb + && !PTR_IS_ALIGNED (te, alignment)))) + { + tep = & (te->next); + te = REVEAL_PTR (te->next); + mangled = true; + } + + if (te != NULL + && csize == nb + && PTR_IS_ALIGNED (te, alignment)) + return tag_new_usable (tcache_get_n (tc_idx, tep, mangled)); + } + return NULL; } /* Verify if the suspicious tcache_entry is double free. @@ -3239,7 +3273,7 @@ tcache_double_free_verify (tcache_entry *e) { if (cnt >= mp_.tcache_count) malloc_printerr ("free(): too many chunks detected in tcache"); - if (__glibc_unlikely (!aligned_OK (tmp))) + if (__glibc_unlikely (misaligned_mem (tmp))) malloc_printerr ("free(): unaligned chunk detected in tcache 2"); if (tmp == e) malloc_printerr ("free(): double free detected in tcache 2"); @@ -3273,7 +3307,7 @@ tcache_thread_shutdown (void) while (tcache_tmp->entries[i]) { tcache_entry *e = tcache_tmp->entries[i]; - if (__glibc_unlikely (!aligned_OK (e))) + if (__glibc_unlikely (misaligned_mem (e))) malloc_printerr ("tcache_thread_shutdown(): " "unaligned tcache chunk detected"); tcache_tmp->entries[i] = REVEAL_PTR (e->next); @@ -3284,52 +3318,45 @@ tcache_thread_shutdown (void) __libc_free (tcache_tmp); } +/* Initialize tcache. In the rare case there isn't any memory available, + later calls will retry initialization. */ static void -tcache_init(void) +tcache_init (void) { - mstate ar_ptr; - void *victim = NULL; - const size_t bytes = sizeof (tcache_perthread_struct); - if (tcache_shutting_down) return; /* Check minimum mmap chunk is larger than max tcache size. This means mmap chunks with their different layout are never added to tcache. */ - if (MAX_TCACHE_SIZE >= GLRO (dl_pagesize) / 2) + if (MAX_TCACHE_SMALL_SIZE >= GLRO (dl_pagesize) / 2) malloc_printerr ("max tcache size too large"); - arena_get (ar_ptr, bytes); - victim = _int_malloc (ar_ptr, bytes); - if (!victim && ar_ptr != NULL) - { - ar_ptr = arena_get_retry (ar_ptr, bytes); - victim = _int_malloc (ar_ptr, bytes); - } - - - if (ar_ptr != NULL) - __libc_lock_unlock (ar_ptr->mutex); + size_t bytes = sizeof (tcache_perthread_struct); + tcache = (tcache_perthread_struct *) __libc_malloc2 (bytes); - /* In a low memory situation, we may not be able to allocate memory - - in which case, we just keep trying later. However, we - typically do this very early, so either there is sufficient - memory, or there isn't enough memory to do non-trivial - allocations anyway. */ - if (victim) + if (tcache != NULL) { - tcache = (tcache_perthread_struct *) victim; - memset (tcache, 0, sizeof (tcache_perthread_struct)); + memset (tcache, 0, bytes); + for (int i = 0; i < TCACHE_MAX_BINS; i++) + tcache->num_slots[i] = mp_.tcache_count; } +} +static void * __attribute_noinline__ +tcache_calloc_init (size_t bytes) +{ + tcache_init (); + return __libc_calloc2 (bytes); } -# define MAYBE_INIT_TCACHE() \ - if (__glibc_unlikely (tcache == NULL)) \ - tcache_init(); +static void * __attribute_noinline__ +tcache_malloc_init (size_t bytes) +{ + tcache_init (); + return __libc_malloc2 (bytes); +} #else /* !USE_TCACHE */ -# define MAYBE_INIT_TCACHE() static void tcache_thread_shutdown (void) @@ -3347,11 +3374,6 @@ __libc_malloc2 (size_t bytes) mstate ar_ptr; void *victim; - if (!__malloc_initialized) - ptmalloc_init (); - - MAYBE_INIT_TCACHE (); - if (SINGLE_THREAD_P) { victim = tag_new_usable (_int_malloc (&main_arena, bytes)); @@ -3386,10 +3408,27 @@ void * __libc_malloc (size_t bytes) { #if USE_TCACHE - size_t tc_idx = usize2tidx (bytes); + size_t nb = checked_request2size (bytes); - if (tcache_available (tc_idx)) - return tag_new_usable (tcache_get (tc_idx)); + if (nb < mp_.tcache_max_bytes) + { + size_t tc_idx = csize2tidx (nb); + if(__glibc_unlikely (tcache == NULL)) + return tcache_malloc_init (bytes); + + if (__glibc_likely (tc_idx < TCACHE_SMALL_BINS)) + { + if (tcache->entries[tc_idx] != NULL) + return tag_new_usable (tcache_get (tc_idx)); + } + else + { + tc_idx = large_csize2tidx (nb); + void *victim = tcache_get_large (tc_idx, nb); + if (victim != NULL) + return tag_new_usable (victim); + } + } #endif return __libc_malloc2 (bytes); @@ -3422,9 +3461,7 @@ __libc_free (void *mem) check_inuse_chunk (arena_for_chunk (p), p); #if USE_TCACHE - size_t tc_idx = csize2tidx (size); - - if (__glibc_likely (tcache != NULL && tc_idx < mp_.tcache_bins)) + if (__glibc_likely (size < mp_.tcache_max_bytes && tcache != NULL)) { /* Check to see if it's already in the tcache. */ tcache_entry *e = (tcache_entry *) chunk2mem (p); @@ -3433,8 +3470,20 @@ __libc_free (void *mem) if (__glibc_unlikely (e->key == tcache_key)) return tcache_double_free_verify (e); - if (__glibc_likely (tcache->counts[tc_idx] < mp_.tcache_count)) - return tcache_put (p, tc_idx); + size_t tc_idx = csize2tidx (size); + if (__glibc_likely (tc_idx < TCACHE_SMALL_BINS)) + { + if (__glibc_likely (tcache->num_slots[tc_idx] != 0)) + return tcache_put (p, tc_idx); + } + else + { + tc_idx = large_csize2tidx (size); + if (size >= MINSIZE + && !chunk_is_mmapped (p) + && __glibc_likely (tcache->num_slots[tc_idx] != 0)) + return tcache_put_large (p, tc_idx); + } } #endif @@ -3455,9 +3504,6 @@ __libc_realloc (void *oldmem, size_t bytes) void *newp; /* chunk to return */ - if (!__malloc_initialized) - ptmalloc_init (); - #if REALLOC_ZERO_BYTES_FREES if (bytes == 0 && oldmem != NULL) { @@ -3503,16 +3549,16 @@ __libc_realloc (void *oldmem, size_t bytes) never wraps around at the end of the address space. Therefore we can exclude some size values which might appear here by accident or by "design" from some intruder. */ - if ((__builtin_expect ((uintptr_t) oldp > (uintptr_t) -oldsize, 0) - || __builtin_expect (misaligned_chunk (oldp), 0))) + if (__glibc_unlikely ((uintptr_t) oldp > (uintptr_t) -oldsize + || misaligned_chunk (oldp))) malloc_printerr ("realloc(): invalid pointer"); - nb = checked_request2size (bytes); - if (nb == 0) + if (bytes > PTRDIFF_MAX) { __set_errno (ENOMEM); return NULL; } + nb = checked_request2size (bytes); if (chunk_is_mmapped (oldp)) { @@ -3583,11 +3629,7 @@ libc_hidden_def (__libc_realloc) void * __libc_memalign (size_t alignment, size_t bytes) { - if (!__malloc_initialized) - ptmalloc_init (); - - void *address = RETURN_ADDRESS (0); - return _mid_memalign (alignment, bytes, address); + return _mid_memalign (alignment, bytes); } libc_hidden_def (__libc_memalign) @@ -3596,9 +3638,6 @@ void * weak_function aligned_alloc (size_t alignment, size_t bytes) { - if (!__malloc_initialized) - ptmalloc_init (); - /* Similar to memalign, but starting with ISO C17 the standard requires an error for alignments that are not supported by the implementation. Valid alignments for the current implementation @@ -3609,12 +3648,11 @@ aligned_alloc (size_t alignment, size_t bytes) return NULL; } - void *address = RETURN_ADDRESS (0); - return _mid_memalign (alignment, bytes, address); + return _mid_memalign (alignment, bytes); } static void * -_mid_memalign (size_t alignment, size_t bytes, void *address) +_mid_memalign (size_t alignment, size_t bytes) { mstate ar_ptr; void *p; @@ -3646,27 +3684,9 @@ _mid_memalign (size_t alignment, size_t bytes, void *address) } #if USE_TCACHE - { - size_t tc_idx = usize2tidx (bytes); - - if (tcache_available (tc_idx)) - { - /* The tcache itself isn't encoded, but the chain is. */ - tcache_entry **tep = & tcache->entries[tc_idx]; - tcache_entry *te = *tep; - while (te != NULL && !PTR_IS_ALIGNED (te, alignment)) - { - tep = & (te->next); - te = tcache_next (te); - } - if (te != NULL) - { - void *victim = tcache_get_n (tc_idx, tep); - return tag_new_usable (victim); - } - } - MAYBE_INIT_TCACHE (); - } + void *victim = tcache_get_align (checked_request2size (bytes), alignment); + if (victim != NULL) + return tag_new_usable (victim); #endif if (SINGLE_THREAD_P) @@ -3698,21 +3718,12 @@ _mid_memalign (size_t alignment, size_t bytes, void *address) void * __libc_valloc (size_t bytes) { - if (!__malloc_initialized) - ptmalloc_init (); - - void *address = RETURN_ADDRESS (0); - size_t pagesize = GLRO (dl_pagesize); - return _mid_memalign (pagesize, bytes, address); + return _mid_memalign (GLRO (dl_pagesize), bytes); } void * __libc_pvalloc (size_t bytes) { - if (!__malloc_initialized) - ptmalloc_init (); - - void *address = RETURN_ADDRESS (0); size_t pagesize = GLRO (dl_pagesize); size_t rounded_bytes; /* ALIGN_UP with overflow check. */ @@ -3723,47 +3734,18 @@ __libc_pvalloc (size_t bytes) __set_errno (ENOMEM); return NULL; } - rounded_bytes = rounded_bytes & -(pagesize - 1); - return _mid_memalign (pagesize, rounded_bytes, address); + return _mid_memalign (pagesize, rounded_bytes & -pagesize); } -void * -__libc_calloc (size_t n, size_t elem_size) +static void * __attribute_noinline__ +__libc_calloc2 (size_t sz) { mstate av; mchunkptr oldtop, p; - INTERNAL_SIZE_T sz, oldtopsize, csz; + INTERNAL_SIZE_T oldtopsize, csz; void *mem; unsigned long clearsize; - ptrdiff_t bytes; - - if (__glibc_unlikely (__builtin_mul_overflow (n, elem_size, &bytes))) - { - __set_errno (ENOMEM); - return NULL; - } - - sz = bytes; - - if (!__malloc_initialized) - ptmalloc_init (); - -#if USE_TCACHE - size_t tc_idx = usize2tidx (bytes); - if (tcache_available (tc_idx)) - { - mem = tcache_get (tc_idx); - p = mem2chunk (mem); - if (__glibc_unlikely (mtag_enabled)) - return tag_new_zero_region (mem, memsize (p)); - - csz = chunksize (p); - clearsize = csz - SIZE_SZ; - return clear_memory ((INTERNAL_SIZE_T *) mem, clearsize); - } - MAYBE_INIT_TCACHE (); -#endif if (SINGLE_THREAD_P) av = &main_arena; @@ -3832,7 +3814,7 @@ __libc_calloc (size_t n, size_t elem_size) /* Two optional cases in which clearing not necessary */ if (chunk_is_mmapped (p)) { - if (__builtin_expect (perturb_byte, 0)) + if (__glibc_unlikely (perturb_byte)) return memset (mem, 0, sz); return mem; @@ -3849,6 +3831,55 @@ __libc_calloc (size_t n, size_t elem_size) clearsize = csz - SIZE_SZ; return clear_memory ((INTERNAL_SIZE_T *) mem, clearsize); } + +void * +__libc_calloc (size_t n, size_t elem_size) +{ + size_t bytes; + + if (__glibc_unlikely (__builtin_mul_overflow (n, elem_size, &bytes))) + { + __set_errno (ENOMEM); + return NULL; + } + +#if USE_TCACHE + size_t nb = checked_request2size (bytes); + + if (nb < mp_.tcache_max_bytes) + { + if (__glibc_unlikely (tcache == NULL)) + return tcache_calloc_init (bytes); + + size_t tc_idx = csize2tidx (nb); + + if (__glibc_unlikely (tc_idx < TCACHE_SMALL_BINS)) + { + if (tcache->entries[tc_idx] != NULL) + { + void *mem = tcache_get (tc_idx); + if (__glibc_unlikely (mtag_enabled)) + return tag_new_zero_region (mem, memsize (mem2chunk (mem))); + + return clear_memory ((INTERNAL_SIZE_T *) mem, tidx2usize (tc_idx)); + } + } + else + { + tc_idx = large_csize2tidx (nb); + void *mem = tcache_get_large (tc_idx, nb); + if (mem != NULL) + { + if (__glibc_unlikely (mtag_enabled)) + return tag_new_zero_region (mem, memsize (mem2chunk (mem))); + + return memset (mem, 0, memsize (mem2chunk (mem))); + } + } + } +#endif + return __libc_calloc2 (bytes); +} #endif /* IS_IN (libc) */ /* @@ -3889,12 +3920,12 @@ _int_malloc (mstate av, size_t bytes) aligned. */ - nb = checked_request2size (bytes); - if (nb == 0) + if (bytes > PTRDIFF_MAX) { __set_errno (ENOMEM); return NULL; } + nb = checked_request2size (bytes); /* There are no usable arenas. Fall back to sysmalloc to get a chunk from mmap. */ @@ -3944,20 +3975,19 @@ _int_malloc (mstate av, size_t bytes) if (__glibc_likely (victim != NULL)) { size_t victim_idx = fastbin_index (chunksize (victim)); - if (__builtin_expect (victim_idx != idx, 0)) + if (__glibc_unlikely (victim_idx != idx)) malloc_printerr ("malloc(): memory corruption (fast)"); check_remalloced_chunk (av, victim, nb); #if USE_TCACHE /* While we're here, if we see other chunks of the same size, stash them in the tcache. */ size_t tc_idx = csize2tidx (nb); - if (tcache != NULL && tc_idx < mp_.tcache_bins) + if (tcache != NULL && tc_idx < mp_.tcache_small_bins) { mchunkptr tc_victim; /* While bin not empty and tcache not full, copy chunks. */ - while (tcache->counts[tc_idx] < mp_.tcache_count - && (tc_victim = *fb) != NULL) + while (tcache->num_slots[tc_idx] != 0 && (tc_victim = *fb) != NULL) { if (__glibc_unlikely (misaligned_chunk (tc_victim))) malloc_printerr ("malloc(): unaligned fastbin chunk detected 3"); @@ -4012,12 +4042,12 @@ _int_malloc (mstate av, size_t bytes) /* While we're here, if we see other chunks of the same size, stash them in the tcache. */ size_t tc_idx = csize2tidx (nb); - if (tcache != NULL && tc_idx < mp_.tcache_bins) + if (tcache != NULL && tc_idx < mp_.tcache_small_bins) { mchunkptr tc_victim; /* While bin not empty and tcache not full, copy chunks over. */ - while (tcache->counts[tc_idx] < mp_.tcache_count + while (tcache->num_slots[tc_idx] != 0 && (tc_victim = last (bin)) != bin) { if (tc_victim != NULL) @@ -4074,7 +4104,7 @@ _int_malloc (mstate av, size_t bytes) #if USE_TCACHE INTERNAL_SIZE_T tcache_nb = 0; size_t tc_idx = csize2tidx (nb); - if (tcache != NULL && tc_idx < mp_.tcache_bins) + if (tcache != NULL && tc_idx < mp_.tcache_small_bins) tcache_nb = nb; int return_cached = 0; @@ -4155,7 +4185,7 @@ _int_malloc (mstate av, size_t bytes) /* Fill cache first, return to user only if cache fills. We may return one of these chunks later. */ if (tcache_nb > 0 - && tcache->counts[tc_idx] < mp_.tcache_count) + && tcache->num_slots[tc_idx] != 0) { tcache_put (victim, tc_idx); return_cached = 1; @@ -4534,10 +4564,9 @@ _int_free_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T size, int have_lock) #endif ) { - if (__builtin_expect (chunksize_nomask (chunk_at_offset (p, size)) - <= CHUNK_HDR_SZ, 0) - || __builtin_expect (chunksize (chunk_at_offset (p, size)) - >= av->system_mem, 0)) + if (__glibc_unlikely ( + chunksize_nomask (chunk_at_offset(p, size)) <= CHUNK_HDR_SZ + || chunksize (chunk_at_offset(p, size)) >= av->system_mem)) { bool fail = true; /* We might not have a lock at this point and concurrent modifications @@ -4568,7 +4597,7 @@ _int_free_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T size, int have_lock) { /* Check that the top of the bin is not the record we are going to add (i.e., double free). */ - if (__builtin_expect (old == p, 0)) + if (__glibc_unlikely (old == p)) malloc_printerr ("double free or corruption (fasttop)"); p->fd = PROTECT_PTR (&p->fd, old); *fb = p; @@ -4578,7 +4607,7 @@ _int_free_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T size, int have_lock) { /* Check that the top of the bin is not the record we are going to add (i.e., double free). */ - if (__builtin_expect (old == p, 0)) + if (__glibc_unlikely (old == p)) malloc_printerr ("double free or corruption (fasttop)"); old2 = old; p->fd = PROTECT_PTR (&p->fd, old); @@ -4591,7 +4620,7 @@ _int_free_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T size, int have_lock) only if we have the lock, otherwise it might have already been allocated again. */ if (have_lock && old != NULL - && __builtin_expect (fastbin_index (chunksize (old)) != idx, 0)) + && __glibc_unlikely (fastbin_index (chunksize (old)) != idx)) malloc_printerr ("invalid fastbin entry (free)"); } @@ -4658,17 +4687,17 @@ _int_free_merge_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T size) if (__glibc_unlikely (p == av->top)) malloc_printerr ("double free or corruption (top)"); /* Or whether the next chunk is beyond the boundaries of the arena. */ - if (__builtin_expect (contiguous (av) + if (__glibc_unlikely (contiguous (av) && (char *) nextchunk - >= ((char *) av->top + chunksize(av->top)), 0)) + >= ((char *) av->top + chunksize(av->top)))) malloc_printerr ("double free or corruption (out)"); /* Or whether the block is actually not marked used. */ if (__glibc_unlikely (!prev_inuse(nextchunk))) malloc_printerr ("double free or corruption (!prev)"); INTERNAL_SIZE_T nextsize = chunksize(nextchunk); - if (__builtin_expect (chunksize_nomask (nextchunk) <= CHUNK_HDR_SZ, 0) - || __builtin_expect (nextsize >= av->system_mem, 0)) + if (__glibc_unlikely (chunksize_nomask (nextchunk) <= CHUNK_HDR_SZ + || nextsize >= av->system_mem)) malloc_printerr ("free(): invalid next size (normal)"); free_perturb (chunk2mem(p), size - CHUNK_HDR_SZ); @@ -4925,9 +4954,9 @@ _int_realloc (mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize, unsigned long remainder_size; /* its size */ /* oldmem size */ - if (__builtin_expect (chunksize_nomask (oldp) <= CHUNK_HDR_SZ, 0) - || __builtin_expect (oldsize >= av->system_mem, 0) - || __builtin_expect (oldsize != chunksize (oldp), 0)) + if (__glibc_unlikely (chunksize_nomask (oldp) <= CHUNK_HDR_SZ + || oldsize >= av->system_mem + || oldsize != chunksize (oldp))) malloc_printerr ("realloc(): invalid old size"); check_inuse_chunk (av, oldp); @@ -4937,8 +4966,8 @@ _int_realloc (mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize, next = chunk_at_offset (oldp, oldsize); INTERNAL_SIZE_T nextsize = chunksize (next); - if (__builtin_expect (chunksize_nomask (next) <= CHUNK_HDR_SZ, 0) - || __builtin_expect (nextsize >= av->system_mem, 0)) + if (__glibc_unlikely (chunksize_nomask (next) <= CHUNK_HDR_SZ + || nextsize >= av->system_mem)) malloc_printerr ("realloc(): invalid next size"); if ((unsigned long) (oldsize) >= (unsigned long) (nb)) @@ -5051,12 +5080,12 @@ _int_memalign (mstate av, size_t alignment, size_t bytes) unsigned long remainder_size; /* its size */ INTERNAL_SIZE_T size; - nb = checked_request2size (bytes); - if (nb == 0) + if (bytes > PTRDIFF_MAX) { __set_errno (ENOMEM); return NULL; } + nb = checked_request2size (bytes); /* We can't check tcache here because we hold the arena lock, which tcache doesn't expect. We expect it has been checked @@ -5211,9 +5240,6 @@ __malloc_trim (size_t s) { int result = 0; - if (!__malloc_initialized) - ptmalloc_init (); - mstate ar_ptr = &main_arena; do { @@ -5330,9 +5356,6 @@ __libc_mallinfo2 (void) struct mallinfo2 m; mstate ar_ptr; - if (!__malloc_initialized) - ptmalloc_init (); - memset (&m, 0, sizeof (m)); ar_ptr = &main_arena; do @@ -5381,8 +5404,6 @@ __malloc_stats (void) mstate ar_ptr; unsigned int in_use_b = mp_.mmapped_mem, system_b = in_use_b; - if (!__malloc_initialized) - ptmalloc_init (); _IO_flockfile (stderr); int old_flags2 = stderr->_flags2; stderr->_flags2 |= _IO_FLAGS2_NOTCANCEL; @@ -5495,13 +5516,25 @@ do_set_arena_max (size_t value) static __always_inline int do_set_tcache_max (size_t value) { - if (value <= MAX_TCACHE_SIZE) + if (value > PTRDIFF_MAX) + return 0; + + size_t nb = request2size (value); + size_t tc_idx = csize2tidx (nb); + + if (tc_idx >= TCACHE_SMALL_BINS) + tc_idx = large_csize2tidx (nb); + + LIBC_PROBE (memory_tunable_tcache_max_bytes, 2, value, mp_.tcache_max_bytes); + + if (tc_idx < TCACHE_MAX_BINS) { - LIBC_PROBE (memory_tunable_tcache_max_bytes, 2, value, mp_.tcache_max_bytes); - mp_.tcache_max_bytes = value; - mp_.tcache_bins = csize2tidx (request2size(value)) + 1; + if (tc_idx < TCACHE_SMALL_BINS) + mp_.tcache_small_bins = tc_idx + 1; + mp_.tcache_max_bytes = nb + 1; return 1; } + return 0; } @@ -5543,12 +5576,9 @@ do_set_hugetlb (size_t value) { if (value == 1) { - enum malloc_thp_mode_t thp_mode = __malloc_thp_mode (); - /* - Only enable THP madvise usage if system does support it and - has 'madvise' mode. Otherwise the madvise() call is wasteful. - */ - if (thp_mode == malloc_thp_mode_madvise) + mp_.thp_mode = __malloc_thp_mode (); + if (mp_.thp_mode == malloc_thp_mode_madvise + || mp_.thp_mode == malloc_thp_mode_always) mp_.thp_pagesize = __malloc_default_thp_pagesize (); } else if (value >= 2) @@ -5563,8 +5593,6 @@ __libc_mallopt (int param_number, int value) mstate av = &main_arena; int res = 1; - if (!__malloc_initialized) - ptmalloc_init (); __libc_lock_lock (av->mutex); LIBC_PROBE (memory_mallopt, 2, param_number, value); @@ -5780,11 +5808,14 @@ malloc_printerr (const char *str) } #if USE_TCACHE + +static volatile int dummy_var; + static __attribute_noinline__ void malloc_printerr_tail (const char *str) { /* Ensure this cannot be a no-return function. */ - if (!__malloc_initialized) + if (dummy_var) return; malloc_printerr (str); } @@ -5797,9 +5828,6 @@ __posix_memalign (void **memptr, size_t alignment, size_t size) { void *mem; - if (!__malloc_initialized) - ptmalloc_init (); - /* Test whether the SIZE argument is valid. It must be a power of two multiple of sizeof (void *). */ if (alignment % sizeof (void *) != 0 @@ -5808,8 +5836,7 @@ __posix_memalign (void **memptr, size_t alignment, size_t size) return EINVAL; - void *address = RETURN_ADDRESS (0); - mem = _mid_memalign (alignment, size, address); + mem = _mid_memalign (alignment, size); if (mem != NULL) { @@ -5840,11 +5867,6 @@ __malloc_info (int options, FILE *fp) size_t total_aspace = 0; size_t total_aspace_mprotect = 0; - - - if (!__malloc_initialized) - ptmalloc_init (); - fputs ("<malloc version=\"1\">\n", fp); /* Iterate over all arenas currently in use. */ |