aboutsummaryrefslogtreecommitdiff
path: root/malloc
diff options
context:
space:
mode:
Diffstat (limited to 'malloc')
-rw-r--r--malloc/malloc-check.c2
-rw-r--r--malloc/malloc.c181
2 files changed, 81 insertions, 102 deletions
diff --git a/malloc/malloc-check.c b/malloc/malloc-check.c
index 814a916..c5265ec 100644
--- a/malloc/malloc-check.c
+++ b/malloc/malloc-check.c
@@ -235,7 +235,7 @@ free_check (void *mem)
{
/* Mark the chunk as belonging to the library again. */
(void)tag_region (chunk2mem (p), memsize (p));
- _int_free (&main_arena, p, 1);
+ _int_free_chunk (&main_arena, p, chunksize (p), 1);
__libc_lock_unlock (main_arena.mutex);
}
__set_errno (err);
diff --git a/malloc/malloc.c b/malloc/malloc.c
index a0bc733..23b9306 100644
--- a/malloc/malloc.c
+++ b/malloc/malloc.c
@@ -1086,8 +1086,6 @@ typedef struct malloc_chunk* mchunkptr;
/* Internal routines. */
static void* _int_malloc(mstate, size_t);
-static void _int_free (mstate, mchunkptr, int);
-static void _int_free_check (mstate, mchunkptr, INTERNAL_SIZE_T);
static void _int_free_chunk (mstate, mchunkptr, INTERNAL_SIZE_T, int);
static void _int_free_merge_chunk (mstate, mchunkptr, INTERNAL_SIZE_T);
static INTERNAL_SIZE_T _int_free_create_chunk (mstate,
@@ -1101,6 +1099,9 @@ static void* _int_memalign(mstate, size_t, size_t);
static void* _mid_memalign(size_t, size_t, void *);
#endif
+#if USE_TCACHE
+static void malloc_printerr_tail(const char *str);
+#endif
static void malloc_printerr(const char *str) __attribute__ ((noreturn));
static void munmap_chunk(mchunkptr p);
@@ -1273,7 +1274,6 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
sysmalloc: Returns untagged memory.
_int_malloc: Returns untagged memory.
- _int_free: Takes untagged memory.
_int_memalign: Returns untagged memory.
_int_memalign: Returns untagged memory.
_mid_memalign: Returns tagged memory.
@@ -3163,7 +3163,7 @@ tcache_put (mchunkptr chunk, size_t tc_idx)
{
tcache_entry *e = (tcache_entry *) chunk2mem (chunk);
- /* Mark this chunk as "in the tcache" so the test in _int_free will
+ /* Mark this chunk as "in the tcache" so the test in __libc_free will
detect a double free. */
e->key = tcache_key;
@@ -3241,37 +3241,12 @@ tcache_double_free_verify (tcache_entry *e, size_t tc_idx)
malloc_printerr ("free(): unaligned chunk detected in tcache 2");
if (tmp == e)
malloc_printerr ("free(): double free detected in tcache 2");
- /* If we get here, it was a coincidence. We've wasted a
- few cycles, but don't abort. */
}
-}
-
-/* Try to free chunk to the tcache, if success return true.
- Caller must ensure that chunk and size are valid. */
-static __always_inline bool
-tcache_free (mchunkptr p, INTERNAL_SIZE_T size)
-{
- bool done = false;
- size_t tc_idx = csize2tidx (size);
- if (tcache != NULL && tc_idx < mp_.tcache_bins)
- {
- /* Check to see if it's already in the tcache. */
- tcache_entry *e = (tcache_entry *) chunk2mem (p);
-
- /* This test succeeds on double free. However, we don't 100%
- trust it (it also matches random payload data at a 1 in
- 2^<size_t> chance), so verify it's not an unlikely
- coincidence before aborting. */
- if (__glibc_unlikely (e->key == tcache_key))
- tcache_double_free_verify (e, tc_idx);
-
- if (tcache->counts[tc_idx] < mp_.tcache_count)
- {
- tcache_put (p, tc_idx);
- done = true;
- }
- }
- return done;
+ /* No double free detected - it might be in a tcache of another thread,
+ or user data that happens to match the key. Since we are not sure,
+ clear the key and retry freeing it. */
+ e->key = 0;
+ __libc_free (e);
}
static void
@@ -3316,6 +3291,11 @@ tcache_init(void)
if (tcache_shutting_down)
return;
+ /* Check minimum mmap chunk is larger than max tcache size. This means
+ mmap chunks with their different layout are never added to tcache. */
+ if (MAX_TCACHE_SIZE >= GLRO (dl_pagesize) / 2)
+ malloc_printerr ("max tcache size too large");
+
arena_get (ar_ptr, bytes);
victim = _int_malloc (ar_ptr, bytes);
if (!victim && ar_ptr != NULL)
@@ -3361,13 +3341,15 @@ tcache_try_malloc (size_t bytes, void **memptr)
size_t tc_idx = csize2tidx (tbytes);
- MAYBE_INIT_TCACHE ();
-
if (tcache_available (tc_idx))
- *memptr = tcache_get (tc_idx);
+ {
+ *memptr = tcache_get (tc_idx);
+ return false;
+ }
else
*memptr = NULL;
+ MAYBE_INIT_TCACHE ();
return false;
}
@@ -3442,7 +3424,6 @@ libc_hidden_def (__libc_malloc)
void
__libc_free (void *mem)
{
- mstate ar_ptr;
mchunkptr p; /* chunk corresponding to mem */
if (mem == NULL) /* free(0) has no effect */
@@ -3453,37 +3434,41 @@ __libc_free (void *mem)
if (__glibc_unlikely (mtag_enabled))
*(volatile char *)mem;
- int err = errno;
-
p = mem2chunk (mem);
- if (chunk_is_mmapped (p)) /* release mmapped memory. */
- {
- /* See if the dynamic brk/mmap threshold needs adjusting.
- Dumped fake mmapped chunks do not affect the threshold. */
- if (!mp_.no_dyn_threshold
- && chunksize_nomask (p) > mp_.mmap_threshold
- && chunksize_nomask (p) <= DEFAULT_MMAP_THRESHOLD_MAX)
- {
- mp_.mmap_threshold = chunksize (p);
- mp_.trim_threshold = 2 * mp_.mmap_threshold;
- LIBC_PROBE (memory_mallopt_free_dyn_thresholds, 2,
- mp_.mmap_threshold, mp_.trim_threshold);
- }
- munmap_chunk (p);
- }
- else
+ /* Mark the chunk as belonging to the library again. */
+ tag_region (chunk2mem (p), memsize (p));
+
+ INTERNAL_SIZE_T size = chunksize (p);
+
+ if (__glibc_unlikely (misaligned_chunk (p)))
+ return malloc_printerr_tail ("free(): invalid pointer");
+
+ check_inuse_chunk (arena_for_chunk (p), p);
+
+#if USE_TCACHE
+ size_t tc_idx = csize2tidx (size);
+
+ if (__glibc_likely (tcache != NULL && tc_idx < mp_.tcache_bins))
{
- MAYBE_INIT_TCACHE ();
+ /* Check to see if it's already in the tcache. */
+ tcache_entry *e = (tcache_entry *) chunk2mem (p);
- /* Mark the chunk as belonging to the library again. */
- (void)tag_region (chunk2mem (p), memsize (p));
+ /* Check for double free - verify if the key matches. */
+ if (__glibc_unlikely (e->key == tcache_key))
+ return tcache_double_free_verify (e, tc_idx);
- ar_ptr = arena_for_chunk (p);
- _int_free (ar_ptr, p, 0);
+ if (__glibc_likely (tcache->counts[tc_idx] < mp_.tcache_count))
+ return tcache_put (p, tc_idx);
}
+#endif
- __set_errno (err);
+ /* Check size >= MINSIZE and p + size does not overflow. */
+ if (__glibc_unlikely (__builtin_add_overflow_p ((uintptr_t) p, size - MINSIZE,
+ (uintptr_t) 0)))
+ return malloc_printerr_tail ("free(): invalid size");
+
+ _int_free_chunk (arena_for_chunk (p), p, size, 0);
}
libc_hidden_def (__libc_free)
@@ -3696,8 +3681,6 @@ _mid_memalign (size_t alignment, size_t bytes, void *address)
}
size_t tc_idx = csize2tidx (tbytes);
- MAYBE_INIT_TCACHE ();
-
if (tcache_available (tc_idx))
{
/* The tcache itself isn't encoded, but the chain is. */
@@ -3714,6 +3697,7 @@ _mid_memalign (size_t alignment, size_t bytes, void *address)
return tag_new_usable (victim);
}
}
+ MAYBE_INIT_TCACHE ();
}
#endif
@@ -4560,24 +4544,6 @@ _int_malloc (mstate av, size_t bytes)
------------------------------ free ------------------------------
*/
-static __always_inline void
-_int_free_check (mstate av, mchunkptr p, INTERNAL_SIZE_T size)
-{
- /* Little security check which won't hurt performance: the
- allocator never wraps around at the end of the address space.
- Therefore we can exclude some size values which might appear
- here by accident or by "design" from some intruder. */
- if (__builtin_expect ((uintptr_t) p > (uintptr_t) -size, 0)
- || __builtin_expect (misaligned_chunk (p), 0))
- malloc_printerr ("free(): invalid pointer");
- /* We know that each chunk is at least MINSIZE bytes in size or a
- multiple of MALLOC_ALIGNMENT. */
- if (__glibc_unlikely (size < MINSIZE || !aligned_OK (size)))
- malloc_printerr ("free(): invalid size");
-
- check_inuse_chunk (av, p);
-}
-
/* Free chunk P of SIZE bytes to the arena. HAVE_LOCK indicates where
the arena for P has already been locked. Caller must ensure chunk
and size are valid. */
@@ -4669,6 +4635,9 @@ _int_free_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T size, int have_lock)
else if (!chunk_is_mmapped(p)) {
+ /* Preserve errno in case block merging results in munmap. */
+ int err = errno;
+
/* If we're single-threaded, don't lock the arena. */
if (SINGLE_THREAD_P)
have_lock = true;
@@ -4680,35 +4649,34 @@ _int_free_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T size, int have_lock)
if (!have_lock)
__libc_lock_unlock (av->mutex);
+
+ __set_errno (err);
}
/*
If the chunk was allocated via mmap, release via munmap().
*/
else {
- munmap_chunk (p);
- }
-}
-/* Free chunk P to its arena AV. HAVE_LOCK indicates where the arena for
- P has already been locked. It will perform sanity check, then try the
- fast path to free into tcache. If the attempt not success, free the
- chunk to arena. */
-static __always_inline void
-_int_free (mstate av, mchunkptr p, int have_lock)
-{
- INTERNAL_SIZE_T size; /* its size */
-
- size = chunksize (p);
+ /* Preserve errno in case munmap sets it. */
+ int err = errno;
- _int_free_check (av, p, size);
+ /* See if the dynamic brk/mmap threshold needs adjusting.
+ Dumped fake mmapped chunks do not affect the threshold. */
+ if (!mp_.no_dyn_threshold
+ && chunksize_nomask (p) > mp_.mmap_threshold
+ && chunksize_nomask (p) <= DEFAULT_MMAP_THRESHOLD_MAX)
+ {
+ mp_.mmap_threshold = chunksize (p);
+ mp_.trim_threshold = 2 * mp_.mmap_threshold;
+ LIBC_PROBE (memory_mallopt_free_dyn_thresholds, 2,
+ mp_.mmap_threshold, mp_.trim_threshold);
+ }
-#if USE_TCACHE
- if (tcache_free (p, size))
- return;
-#endif
+ munmap_chunk (p);
- _int_free_chunk (av, p, size, have_lock);
+ __set_errno (err);
+ }
}
/* Try to merge chunk P of SIZE bytes with its neighbors. Put the
@@ -5845,6 +5813,17 @@ malloc_printerr (const char *str)
__builtin_unreachable ();
}
+#if USE_TCACHE
+static __attribute_noinline__ void
+malloc_printerr_tail (const char *str)
+{
+ /* Ensure this cannot be a no-return function. */
+ if (!__malloc_initialized)
+ return;
+ malloc_printerr (str);
+}
+#endif
+
#if IS_IN (libc)
/* We need a wrapper function for one of the additions of POSIX. */
int