diff options
Diffstat (limited to 'malloc/malloc.c')
-rw-r--r-- | malloc/malloc.c | 191 |
1 files changed, 114 insertions, 77 deletions
diff --git a/malloc/malloc.c b/malloc/malloc.c index 9e577ab..81ddd2c 100644 --- a/malloc/malloc.c +++ b/malloc/malloc.c @@ -369,7 +369,7 @@ #include "morecore.c" #define MORECORE (*__glibc_morecore) -#define MORECORE_FAILURE 0 +#define MORECORE_FAILURE NULL /* Memory tagging. */ @@ -1086,7 +1086,9 @@ typedef struct malloc_chunk* mchunkptr; /* Internal routines. */ static void* _int_malloc(mstate, size_t); -static void _int_free(mstate, mchunkptr, int); +static void _int_free (mstate, mchunkptr, int); +static void _int_free_check (mstate, mchunkptr, INTERNAL_SIZE_T); +static void _int_free_chunk (mstate, mchunkptr, INTERNAL_SIZE_T, int); static void _int_free_merge_chunk (mstate, mchunkptr, INTERNAL_SIZE_T); static INTERNAL_SIZE_T _int_free_create_chunk (mstate, mchunkptr, INTERNAL_SIZE_T, @@ -2418,7 +2420,7 @@ sysmalloc_mmap (INTERNAL_SIZE_T nb, size_t pagesize, int extra_flags, mstate av) if ((unsigned long) (size) <= (unsigned long) (nb)) return MAP_FAILED; - char *mm = (char *) MMAP (0, size, + char *mm = (char *) MMAP (NULL, size, mtag_mmap_flags | PROT_READ | PROT_WRITE, extra_flags); if (mm == MAP_FAILED) @@ -2505,7 +2507,7 @@ sysmalloc_mmap_fallback (long int *s, INTERNAL_SIZE_T nb, if ((unsigned long) (size) <= (unsigned long) (nb)) return MORECORE_FAILURE; - char *mbrk = (char *) (MMAP (0, size, + char *mbrk = (char *) (MMAP (NULL, size, mtag_mmap_flags | PROT_READ | PROT_WRITE, extra_flags)); if (mbrk == MAP_FAILED) @@ -2581,7 +2583,7 @@ sysmalloc (INTERNAL_SIZE_T nb, mstate av) /* There are no usable arenas and mmap also failed. */ if (av == NULL) - return 0; + return NULL; /* Record incoming configuration of top */ @@ -2741,7 +2743,7 @@ sysmalloc (INTERNAL_SIZE_T nb, mstate av) if (brk != (char *) (MORECORE_FAILURE)) { - if (mp_.sbrk_base == 0) + if (mp_.sbrk_base == NULL) mp_.sbrk_base = brk; av->system_mem += size; @@ -2940,7 +2942,7 @@ sysmalloc (INTERNAL_SIZE_T nb, mstate av) /* catch all failure paths */ __set_errno (ENOMEM); - return 0; + return NULL; } @@ -3078,7 +3080,7 @@ mremap_chunk (mchunkptr p, size_t new_size) MREMAP_MAYMOVE); if (cp == MAP_FAILED) - return 0; + return NULL; madvise_thp (cp, new_size); @@ -3206,6 +3208,57 @@ tcache_next (tcache_entry *e) return (tcache_entry *) REVEAL_PTR (e->next); } +/* Verify if the suspicious tcache_entry is double free. + It's not expected to execute very often, mark it as noinline. */ +static __attribute__ ((noinline)) void +tcache_double_free_verify (tcache_entry *e, size_t tc_idx) +{ + tcache_entry *tmp; + size_t cnt = 0; + LIBC_PROBE (memory_tcache_double_free, 2, e, tc_idx); + for (tmp = tcache->entries[tc_idx]; + tmp; + tmp = REVEAL_PTR (tmp->next), ++cnt) + { + if (cnt >= mp_.tcache_count) + malloc_printerr ("free(): too many chunks detected in tcache"); + if (__glibc_unlikely (!aligned_OK (tmp))) + malloc_printerr ("free(): unaligned chunk detected in tcache 2"); + if (tmp == e) + malloc_printerr ("free(): double free detected in tcache 2"); + /* If we get here, it was a coincidence. We've wasted a + few cycles, but don't abort. */ + } +} + +/* Try to free chunk to the tcache, if success return true. + Caller must ensure that chunk and size are valid. */ +static inline bool +tcache_free (mchunkptr p, INTERNAL_SIZE_T size) +{ + bool done = false; + size_t tc_idx = csize2tidx (size); + if (tcache != NULL && tc_idx < mp_.tcache_bins) + { + /* Check to see if it's already in the tcache. */ + tcache_entry *e = (tcache_entry *) chunk2mem (p); + + /* This test succeeds on double free. However, we don't 100% + trust it (it also matches random payload data at a 1 in + 2^<size_t> chance), so verify it's not an unlikely + coincidence before aborting. */ + if (__glibc_unlikely (e->key == tcache_key)) + tcache_double_free_verify (e, tc_idx); + + if (tcache->counts[tc_idx] < mp_.tcache_count) + { + tcache_put (p, tc_idx); + done = true; + } + } + return done; +} + static void tcache_thread_shutdown (void) { @@ -3242,7 +3295,7 @@ static void tcache_init(void) { mstate ar_ptr; - void *victim = 0; + void *victim = NULL; const size_t bytes = sizeof (tcache_perthread_struct); if (tcache_shutting_down) @@ -3360,7 +3413,7 @@ __libc_free (void *mem) mstate ar_ptr; mchunkptr p; /* chunk corresponding to mem */ - if (mem == 0) /* free(0) has no effect */ + if (mem == NULL) /* free(0) has no effect */ return; /* Quickly check that the freed pointer matches the tag for the memory. @@ -3416,12 +3469,12 @@ __libc_realloc (void *oldmem, size_t bytes) #if REALLOC_ZERO_BYTES_FREES if (bytes == 0 && oldmem != NULL) { - __libc_free (oldmem); return 0; + __libc_free (oldmem); return NULL; } #endif /* realloc of null is supposed to be same as malloc */ - if (oldmem == 0) + if (oldmem == NULL) return __libc_malloc (bytes); /* Perform a quick check to ensure that the pointer's tag matches the @@ -3495,8 +3548,8 @@ __libc_realloc (void *oldmem, size_t bytes) /* Must alloc, copy, free. */ newmem = __libc_malloc (bytes); - if (newmem == 0) - return 0; /* propagate failure */ + if (newmem == NULL) + return NULL; /* propagate failure */ memcpy (newmem, oldmem, oldsize - CHUNK_HDR_SZ); munmap_chunk (oldp); @@ -3564,7 +3617,7 @@ aligned_alloc (size_t alignment, size_t bytes) if (!powerof2 (alignment) || alignment == 0) { __set_errno (EINVAL); - return 0; + return NULL; } void *address = RETURN_ADDRESS (0); @@ -3590,7 +3643,7 @@ _mid_memalign (size_t alignment, size_t bytes, void *address) if (alignment > SIZE_MAX / 2 + 1) { __set_errno (EINVAL); - return 0; + return NULL; } @@ -3687,7 +3740,7 @@ __libc_pvalloc (size_t bytes) &rounded_bytes))) { __set_errno (ENOMEM); - return 0; + return NULL; } rounded_bytes = rounded_bytes & -(pagesize - 1); @@ -3748,7 +3801,7 @@ __libc_calloc (size_t n, size_t elem_size) else { /* No usable arenas. */ - oldtop = 0; + oldtop = NULL; oldtopsize = 0; } mem = _int_malloc (av, sz); @@ -3758,7 +3811,7 @@ __libc_calloc (size_t n, size_t elem_size) if (!SINGLE_THREAD_P) { - if (mem == 0 && av != NULL) + if (mem == NULL && av != NULL) { LIBC_PROBE (memory_calloc_retry, 1, sz); av = arena_get_retry (av, sz); @@ -3770,8 +3823,8 @@ __libc_calloc (size_t n, size_t elem_size) } /* Allocation failed even after a retry. */ - if (mem == 0) - return 0; + if (mem == NULL) + return NULL; mchunkptr p = mem2chunk (mem); @@ -4003,7 +4056,7 @@ _int_malloc (mstate av, size_t bytes) while (tcache->counts[tc_idx] < mp_.tcache_count && (tc_victim = last (bin)) != bin) { - if (tc_victim != 0) + if (tc_victim != NULL) { bck = tc_victim->bk; set_inuse_bit_at_offset (tc_victim, nb); @@ -4490,14 +4543,9 @@ _int_malloc (mstate av, size_t bytes) ------------------------------ free ------------------------------ */ -static void -_int_free (mstate av, mchunkptr p, int have_lock) +static inline void +_int_free_check (mstate av, mchunkptr p, INTERNAL_SIZE_T size) { - INTERNAL_SIZE_T size; /* its size */ - mfastbinptr *fb; /* associated fastbin */ - - size = chunksize (p); - /* Little security check which won't hurt performance: the allocator never wraps around at the end of the address space. Therefore we can exclude some size values which might appear @@ -4510,48 +4558,16 @@ _int_free (mstate av, mchunkptr p, int have_lock) if (__glibc_unlikely (size < MINSIZE || !aligned_OK (size))) malloc_printerr ("free(): invalid size"); - check_inuse_chunk(av, p); - -#if USE_TCACHE - { - size_t tc_idx = csize2tidx (size); - if (tcache != NULL && tc_idx < mp_.tcache_bins) - { - /* Check to see if it's already in the tcache. */ - tcache_entry *e = (tcache_entry *) chunk2mem (p); - - /* This test succeeds on double free. However, we don't 100% - trust it (it also matches random payload data at a 1 in - 2^<size_t> chance), so verify it's not an unlikely - coincidence before aborting. */ - if (__glibc_unlikely (e->key == tcache_key)) - { - tcache_entry *tmp; - size_t cnt = 0; - LIBC_PROBE (memory_tcache_double_free, 2, e, tc_idx); - for (tmp = tcache->entries[tc_idx]; - tmp; - tmp = REVEAL_PTR (tmp->next), ++cnt) - { - if (cnt >= mp_.tcache_count) - malloc_printerr ("free(): too many chunks detected in tcache"); - if (__glibc_unlikely (!aligned_OK (tmp))) - malloc_printerr ("free(): unaligned chunk detected in tcache 2"); - if (tmp == e) - malloc_printerr ("free(): double free detected in tcache 2"); - /* If we get here, it was a coincidence. We've wasted a - few cycles, but don't abort. */ - } - } + check_inuse_chunk (av, p); +} - if (tcache->counts[tc_idx] < mp_.tcache_count) - { - tcache_put (p, tc_idx); - return; - } - } - } -#endif +/* Free chunk P of SIZE bytes to the arena. HAVE_LOCK indicates where + the arena for P has already been locked. Caller must ensure chunk + and size are valid. */ +static void +_int_free_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T size, int have_lock) +{ + mfastbinptr *fb; /* associated fastbin */ /* If eligible, place chunk on a fastbin so it can be found @@ -4657,6 +4673,27 @@ _int_free (mstate av, mchunkptr p, int have_lock) } } +/* Free chunk P to its arena AV. HAVE_LOCK indicates where the arena for + P has already been locked. It will perform sanity check, then try the + fast path to free into tcache. If the attempt not success, free the + chunk to arena. */ +static inline void +_int_free (mstate av, mchunkptr p, int have_lock) +{ + INTERNAL_SIZE_T size; /* its size */ + + size = chunksize (p); + + _int_free_check (av, p, size); + +#if USE_TCACHE + if (tcache_free (p, size)) + return; +#endif + + _int_free_chunk (av, p, size, have_lock); +} + /* Try to merge chunk P of SIZE bytes with its neighbors. Put the resulting chunk on the appropriate bin list. P must not be on a bin list yet, and it can be in use. */ @@ -4839,7 +4876,7 @@ static void malloc_consolidate(mstate av) fb = &fastbin (av, 0); do { p = atomic_exchange_acquire (fb, NULL); - if (p != 0) { + if (p != NULL) { do { { if (__glibc_unlikely (misaligned_chunk (p))) @@ -4898,7 +4935,7 @@ static void malloc_consolidate(mstate av) av->top = p; } - } while ( (p = nextp) != 0); + } while ( (p = nextp) != NULL); } } while (fb++ != maxfb); @@ -4973,8 +5010,8 @@ _int_realloc (mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize, else { newmem = _int_malloc (av, nb - MALLOC_ALIGN_MASK); - if (newmem == 0) - return 0; /* propagate failure */ + if (newmem == NULL) + return NULL; /* propagate failure */ newp = mem2chunk (newmem); newsize = chunksize (newp); @@ -5068,8 +5105,8 @@ _int_memalign (mstate av, size_t alignment, size_t bytes) /* Call malloc with worst case padding to hit alignment. */ m = (char *) (_int_malloc (av, nb + alignment + MINSIZE)); - if (m == 0) - return 0; /* propagate failure */ + if (m == NULL) + return NULL; /* propagate failure */ p = mem2chunk (m); @@ -5281,7 +5318,7 @@ int_mallinfo (mstate av, struct mallinfo2 *m) for (i = 0; i < NFASTBINS; ++i) { for (p = fastbin (av, i); - p != 0; + p != NULL; p = REVEAL_PTR (p->fd)) { if (__glibc_unlikely (misaligned_chunk (p))) |