diff options
Diffstat (limited to 'malloc')
-rw-r--r-- | malloc/malloc-check.c | 2 | ||||
-rw-r--r-- | malloc/malloc.c | 222 |
2 files changed, 83 insertions, 141 deletions
diff --git a/malloc/malloc-check.c b/malloc/malloc-check.c index 814a916..c5265ec 100644 --- a/malloc/malloc-check.c +++ b/malloc/malloc-check.c @@ -235,7 +235,7 @@ free_check (void *mem) { /* Mark the chunk as belonging to the library again. */ (void)tag_region (chunk2mem (p), memsize (p)); - _int_free (&main_arena, p, 1); + _int_free_chunk (&main_arena, p, chunksize (p), 1); __libc_lock_unlock (main_arena.mutex); } __set_errno (err); diff --git a/malloc/malloc.c b/malloc/malloc.c index a0bc733..9d860ea 100644 --- a/malloc/malloc.c +++ b/malloc/malloc.c @@ -300,7 +300,7 @@ /* When "x" is from chunksize(). */ # define csize2tidx(x) (((x) - MINSIZE) / MALLOC_ALIGNMENT) /* When "x" is a user-provided size. */ -# define usize2tidx(x) csize2tidx (request2size (x)) +# define usize2tidx(x) csize2tidx (checked_request2size (x)) /* With rounding and alignment, the bins are... idx 0 bytes 0..24 (64-bit) or 0..12 (32-bit) @@ -1086,8 +1086,6 @@ typedef struct malloc_chunk* mchunkptr; /* Internal routines. */ static void* _int_malloc(mstate, size_t); -static void _int_free (mstate, mchunkptr, int); -static void _int_free_check (mstate, mchunkptr, INTERNAL_SIZE_T); static void _int_free_chunk (mstate, mchunkptr, INTERNAL_SIZE_T, int); static void _int_free_merge_chunk (mstate, mchunkptr, INTERNAL_SIZE_T); static INTERNAL_SIZE_T _int_free_create_chunk (mstate, @@ -1101,6 +1099,9 @@ static void* _int_memalign(mstate, size_t, size_t); static void* _mid_memalign(size_t, size_t, void *); #endif +#if USE_TCACHE +static void malloc_printerr_tail(const char *str); +#endif static void malloc_printerr(const char *str) __attribute__ ((noreturn)); static void munmap_chunk(mchunkptr p); @@ -1273,7 +1274,6 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ sysmalloc: Returns untagged memory. _int_malloc: Returns untagged memory. - _int_free: Takes untagged memory. _int_memalign: Returns untagged memory. _int_memalign: Returns untagged memory. _mid_memalign: Returns tagged memory. @@ -3163,7 +3163,7 @@ tcache_put (mchunkptr chunk, size_t tc_idx) { tcache_entry *e = (tcache_entry *) chunk2mem (chunk); - /* Mark this chunk as "in the tcache" so the test in _int_free will + /* Mark this chunk as "in the tcache" so the test in __libc_free will detect a double free. */ e->key = tcache_key; @@ -3241,37 +3241,12 @@ tcache_double_free_verify (tcache_entry *e, size_t tc_idx) malloc_printerr ("free(): unaligned chunk detected in tcache 2"); if (tmp == e) malloc_printerr ("free(): double free detected in tcache 2"); - /* If we get here, it was a coincidence. We've wasted a - few cycles, but don't abort. */ } -} - -/* Try to free chunk to the tcache, if success return true. - Caller must ensure that chunk and size are valid. */ -static __always_inline bool -tcache_free (mchunkptr p, INTERNAL_SIZE_T size) -{ - bool done = false; - size_t tc_idx = csize2tidx (size); - if (tcache != NULL && tc_idx < mp_.tcache_bins) - { - /* Check to see if it's already in the tcache. */ - tcache_entry *e = (tcache_entry *) chunk2mem (p); - - /* This test succeeds on double free. However, we don't 100% - trust it (it also matches random payload data at a 1 in - 2^<size_t> chance), so verify it's not an unlikely - coincidence before aborting. */ - if (__glibc_unlikely (e->key == tcache_key)) - tcache_double_free_verify (e, tc_idx); - - if (tcache->counts[tc_idx] < mp_.tcache_count) - { - tcache_put (p, tc_idx); - done = true; - } - } - return done; + /* No double free detected - it might be in a tcache of another thread, + or user data that happens to match the key. Since we are not sure, + clear the key and retry freeing it. */ + e->key = 0; + __libc_free (e); } static void @@ -3316,6 +3291,11 @@ tcache_init(void) if (tcache_shutting_down) return; + /* Check minimum mmap chunk is larger than max tcache size. This means + mmap chunks with their different layout are never added to tcache. */ + if (MAX_TCACHE_SIZE >= GLRO (dl_pagesize) / 2) + malloc_printerr ("max tcache size too large"); + arena_get (ar_ptr, bytes); victim = _int_malloc (ar_ptr, bytes); if (!victim && ar_ptr != NULL) @@ -3345,32 +3325,6 @@ tcache_init(void) if (__glibc_unlikely (tcache == NULL)) \ tcache_init(); -/* Trying to alloc BYTES from tcache. If tcache is available, chunk - is allocated and stored to MEMPTR, otherwise, MEMPTR is NULL. - It returns true if error occurs, else false. */ -static __always_inline bool -tcache_try_malloc (size_t bytes, void **memptr) -{ - /* int_free also calls request2size, be careful to not pad twice. */ - size_t tbytes = checked_request2size (bytes); - if (tbytes == 0) - { - __set_errno (ENOMEM); - return true; - } - - size_t tc_idx = csize2tidx (tbytes); - - MAYBE_INIT_TCACHE (); - - if (tcache_available (tc_idx)) - *memptr = tcache_get (tc_idx); - else - *memptr = NULL; - - return false; -} - #else /* !USE_TCACHE */ # define MAYBE_INIT_TCACHE() @@ -3429,7 +3383,7 @@ void * __libc_malloc (size_t bytes) { #if USE_TCACHE - size_t tc_idx = csize2tidx (checked_request2size (bytes)); + size_t tc_idx = usize2tidx (bytes); if (tcache_available (tc_idx)) return tag_new_usable (tcache_get (tc_idx)); @@ -3442,7 +3396,6 @@ libc_hidden_def (__libc_malloc) void __libc_free (void *mem) { - mstate ar_ptr; mchunkptr p; /* chunk corresponding to mem */ if (mem == NULL) /* free(0) has no effect */ @@ -3453,37 +3406,41 @@ __libc_free (void *mem) if (__glibc_unlikely (mtag_enabled)) *(volatile char *)mem; - int err = errno; - p = mem2chunk (mem); - if (chunk_is_mmapped (p)) /* release mmapped memory. */ - { - /* See if the dynamic brk/mmap threshold needs adjusting. - Dumped fake mmapped chunks do not affect the threshold. */ - if (!mp_.no_dyn_threshold - && chunksize_nomask (p) > mp_.mmap_threshold - && chunksize_nomask (p) <= DEFAULT_MMAP_THRESHOLD_MAX) - { - mp_.mmap_threshold = chunksize (p); - mp_.trim_threshold = 2 * mp_.mmap_threshold; - LIBC_PROBE (memory_mallopt_free_dyn_thresholds, 2, - mp_.mmap_threshold, mp_.trim_threshold); - } - munmap_chunk (p); - } - else + /* Mark the chunk as belonging to the library again. */ + tag_region (chunk2mem (p), memsize (p)); + + INTERNAL_SIZE_T size = chunksize (p); + + if (__glibc_unlikely (misaligned_chunk (p))) + return malloc_printerr_tail ("free(): invalid pointer"); + + check_inuse_chunk (arena_for_chunk (p), p); + +#if USE_TCACHE + size_t tc_idx = csize2tidx (size); + + if (__glibc_likely (tcache != NULL && tc_idx < mp_.tcache_bins)) { - MAYBE_INIT_TCACHE (); + /* Check to see if it's already in the tcache. */ + tcache_entry *e = (tcache_entry *) chunk2mem (p); - /* Mark the chunk as belonging to the library again. */ - (void)tag_region (chunk2mem (p), memsize (p)); + /* Check for double free - verify if the key matches. */ + if (__glibc_unlikely (e->key == tcache_key)) + return tcache_double_free_verify (e, tc_idx); - ar_ptr = arena_for_chunk (p); - _int_free (ar_ptr, p, 0); + if (__glibc_likely (tcache->counts[tc_idx] < mp_.tcache_count)) + return tcache_put (p, tc_idx); } +#endif - __set_errno (err); + /* Check size >= MINSIZE and p + size does not overflow. */ + if (__glibc_unlikely (__builtin_add_overflow_p ((uintptr_t) p, size - MINSIZE, + (uintptr_t) 0))) + return malloc_printerr_tail ("free(): invalid size"); + + _int_free_chunk (arena_for_chunk (p), p, size, 0); } libc_hidden_def (__libc_free) @@ -3687,16 +3644,7 @@ _mid_memalign (size_t alignment, size_t bytes, void *address) #if USE_TCACHE { - size_t tbytes; - tbytes = checked_request2size (bytes); - if (tbytes == 0) - { - __set_errno (ENOMEM); - return NULL; - } - size_t tc_idx = csize2tidx (tbytes); - - MAYBE_INIT_TCACHE (); + size_t tc_idx = usize2tidx (bytes); if (tcache_available (tc_idx)) { @@ -3714,6 +3662,7 @@ _mid_memalign (size_t alignment, size_t bytes, void *address) return tag_new_usable (victim); } } + MAYBE_INIT_TCACHE (); } #endif @@ -3798,13 +3747,10 @@ __libc_calloc (size_t n, size_t elem_size) ptmalloc_init (); #if USE_TCACHE - bool err = tcache_try_malloc (bytes, &mem); - - if (err) - return NULL; - - if (mem) + size_t tc_idx = usize2tidx (bytes); + if (tcache_available (tc_idx)) { + mem = tcache_get (tc_idx); p = mem2chunk (mem); if (__glibc_unlikely (mtag_enabled)) return tag_new_zero_region (mem, memsize (p)); @@ -3813,6 +3759,7 @@ __libc_calloc (size_t n, size_t elem_size) clearsize = csz - SIZE_SZ; return clear_memory ((INTERNAL_SIZE_T *) mem, clearsize); } + MAYBE_INIT_TCACHE (); #endif if (SINGLE_THREAD_P) @@ -4560,24 +4507,6 @@ _int_malloc (mstate av, size_t bytes) ------------------------------ free ------------------------------ */ -static __always_inline void -_int_free_check (mstate av, mchunkptr p, INTERNAL_SIZE_T size) -{ - /* Little security check which won't hurt performance: the - allocator never wraps around at the end of the address space. - Therefore we can exclude some size values which might appear - here by accident or by "design" from some intruder. */ - if (__builtin_expect ((uintptr_t) p > (uintptr_t) -size, 0) - || __builtin_expect (misaligned_chunk (p), 0)) - malloc_printerr ("free(): invalid pointer"); - /* We know that each chunk is at least MINSIZE bytes in size or a - multiple of MALLOC_ALIGNMENT. */ - if (__glibc_unlikely (size < MINSIZE || !aligned_OK (size))) - malloc_printerr ("free(): invalid size"); - - check_inuse_chunk (av, p); -} - /* Free chunk P of SIZE bytes to the arena. HAVE_LOCK indicates where the arena for P has already been locked. Caller must ensure chunk and size are valid. */ @@ -4669,6 +4598,9 @@ _int_free_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T size, int have_lock) else if (!chunk_is_mmapped(p)) { + /* Preserve errno in case block merging results in munmap. */ + int err = errno; + /* If we're single-threaded, don't lock the arena. */ if (SINGLE_THREAD_P) have_lock = true; @@ -4680,35 +4612,34 @@ _int_free_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T size, int have_lock) if (!have_lock) __libc_lock_unlock (av->mutex); + + __set_errno (err); } /* If the chunk was allocated via mmap, release via munmap(). */ else { - munmap_chunk (p); - } -} - -/* Free chunk P to its arena AV. HAVE_LOCK indicates where the arena for - P has already been locked. It will perform sanity check, then try the - fast path to free into tcache. If the attempt not success, free the - chunk to arena. */ -static __always_inline void -_int_free (mstate av, mchunkptr p, int have_lock) -{ - INTERNAL_SIZE_T size; /* its size */ - size = chunksize (p); + /* Preserve errno in case munmap sets it. */ + int err = errno; - _int_free_check (av, p, size); + /* See if the dynamic brk/mmap threshold needs adjusting. + Dumped fake mmapped chunks do not affect the threshold. */ + if (!mp_.no_dyn_threshold + && chunksize_nomask (p) > mp_.mmap_threshold + && chunksize_nomask (p) <= DEFAULT_MMAP_THRESHOLD_MAX) + { + mp_.mmap_threshold = chunksize (p); + mp_.trim_threshold = 2 * mp_.mmap_threshold; + LIBC_PROBE (memory_mallopt_free_dyn_thresholds, 2, + mp_.mmap_threshold, mp_.trim_threshold); + } -#if USE_TCACHE - if (tcache_free (p, size)) - return; -#endif + munmap_chunk (p); - _int_free_chunk (av, p, size, have_lock); + __set_errno (err); + } } /* Try to merge chunk P of SIZE bytes with its neighbors. Put the @@ -5845,6 +5776,17 @@ malloc_printerr (const char *str) __builtin_unreachable (); } +#if USE_TCACHE +static __attribute_noinline__ void +malloc_printerr_tail (const char *str) +{ + /* Ensure this cannot be a no-return function. */ + if (!__malloc_initialized) + return; + malloc_printerr (str); +} +#endif + #if IS_IN (libc) /* We need a wrapper function for one of the additions of POSIX. */ int |