aboutsummaryrefslogtreecommitdiff
path: root/malloc
diff options
context:
space:
mode:
Diffstat (limited to 'malloc')
-rw-r--r--malloc/malloc-check.c2
-rw-r--r--malloc/malloc-debug.c10
-rw-r--r--malloc/malloc.c95
3 files changed, 43 insertions, 64 deletions
diff --git a/malloc/malloc-check.c b/malloc/malloc-check.c
index fbb0301..f5ca5fb 100644
--- a/malloc/malloc-check.c
+++ b/malloc/malloc-check.c
@@ -111,7 +111,7 @@ mem2chunk_check (void *mem, unsigned char **magic_p)
INTERNAL_SIZE_T sz, c;
unsigned char magic;
- if (!aligned_OK (mem))
+ if (misaligned_mem (mem))
return NULL;
p = mem2chunk (mem);
diff --git a/malloc/malloc-debug.c b/malloc/malloc-debug.c
index d208aa3..8bcb565 100644
--- a/malloc/malloc-debug.c
+++ b/malloc/malloc-debug.c
@@ -169,7 +169,7 @@ static void *
__debug_malloc (size_t bytes)
{
void *(*hook) (size_t, const void *) = atomic_forced_read (__malloc_hook);
- if (__builtin_expect (hook != NULL, 0))
+ if (__glibc_unlikely (hook != NULL))
return (*hook)(bytes, RETURN_ADDRESS (0));
void *victim = NULL;
@@ -193,7 +193,7 @@ static void
__debug_free (void *mem)
{
void (*hook) (void *, const void *) = atomic_forced_read (__free_hook);
- if (__builtin_expect (hook != NULL, 0))
+ if (__glibc_unlikely (hook != NULL))
{
(*hook)(mem, RETURN_ADDRESS (0));
return;
@@ -218,7 +218,7 @@ __debug_realloc (void *oldmem, size_t bytes)
{
void *(*hook) (void *, size_t, const void *) =
atomic_forced_read (__realloc_hook);
- if (__builtin_expect (hook != NULL, 0))
+ if (__glibc_unlikely (hook != NULL))
return (*hook)(oldmem, bytes, RETURN_ADDRESS (0));
size_t orig_bytes = bytes, oldsize = 0;
@@ -272,7 +272,7 @@ _debug_mid_memalign (size_t alignment, size_t bytes, const void *address)
{
void *(*hook) (size_t, size_t, const void *) =
atomic_forced_read (__memalign_hook);
- if (__builtin_expect (hook != NULL, 0))
+ if (__glibc_unlikely (hook != NULL))
return (*hook)(alignment, bytes, address);
void *victim = NULL;
@@ -371,7 +371,7 @@ __debug_calloc (size_t nmemb, size_t size)
}
void *(*hook) (size_t, const void *) = atomic_forced_read (__malloc_hook);
- if (__builtin_expect (hook != NULL, 0))
+ if (__glibc_unlikely (hook != NULL))
{
void *mem = (*hook)(bytes, RETURN_ADDRESS (0));
diff --git a/malloc/malloc.c b/malloc/malloc.c
index d28cd66..6da40ad 100644
--- a/malloc/malloc.c
+++ b/malloc/malloc.c
@@ -590,7 +590,7 @@ tag_at (void *ptr)
differs across systems, but is in all cases less than the maximum
representable value of a size_t.
*/
-void* __libc_malloc(size_t);
+void *__libc_malloc (size_t);
libc_hidden_proto (__libc_malloc)
static void *__libc_calloc2 (size_t);
@@ -1309,11 +1309,9 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/* Check if m has acceptable alignment */
-#define aligned_OK(m) (((unsigned long)(m) & MALLOC_ALIGN_MASK) == 0)
+#define misaligned_mem(m) ((uintptr_t)(m) & MALLOC_ALIGN_MASK)
-#define misaligned_chunk(p) \
- ((uintptr_t)(MALLOC_ALIGNMENT == CHUNK_HDR_SZ ? (p) : chunk2mem (p)) \
- & MALLOC_ALIGN_MASK)
+#define misaligned_chunk(p) (misaligned_mem( chunk2mem (p)))
/* pad request bytes into a usable size -- internal version */
/* Note: This must be a macro that evaluates to a compile time constant
@@ -1623,7 +1621,7 @@ unlink_chunk (mstate av, mchunkptr p)
mchunkptr fd = p->fd;
mchunkptr bk = p->bk;
- if (__builtin_expect (fd->bk != p || bk->fd != p, 0))
+ if (__glibc_unlikely (fd->bk != p || bk->fd != p))
malloc_printerr ("corrupted double-linked list");
fd->bk = bk;
@@ -2103,7 +2101,7 @@ do_check_chunk (mstate av, mchunkptr p)
/* chunk is page-aligned */
assert (((prev_size (p) + sz) & (GLRO (dl_pagesize) - 1)) == 0);
/* mem is aligned */
- assert (aligned_OK (chunk2mem (p)));
+ assert (!misaligned_chunk (p));
}
}
@@ -2127,7 +2125,7 @@ do_check_free_chunk (mstate av, mchunkptr p)
if ((unsigned long) (sz) >= MINSIZE)
{
assert ((sz & MALLOC_ALIGN_MASK) == 0);
- assert (aligned_OK (chunk2mem (p)));
+ assert (!misaligned_chunk (p));
/* ... matching footer field */
assert (prev_size (next_chunk (p)) == sz);
/* ... and is fully consolidated */
@@ -2206,7 +2204,7 @@ do_check_remalloced_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T s)
assert ((sz & MALLOC_ALIGN_MASK) == 0);
assert ((unsigned long) (sz) >= MINSIZE);
/* ... and alignment */
- assert (aligned_OK (chunk2mem (p)));
+ assert (!misaligned_chunk (p));
/* chunk is less than MINSIZE more than request */
assert ((long) (sz) - (long) (s) >= 0);
assert ((long) (sz) - (long) (s + MINSIZE) < 0);
@@ -3094,7 +3092,7 @@ mremap_chunk (mchunkptr p, size_t new_size)
p = (mchunkptr) (cp + offset);
- assert (aligned_OK (chunk2mem (p)));
+ assert (!misaligned_chunk (p));
assert (prev_size (p) == offset);
set_head (p, (new_size - offset) | IS_MMAPPED);
@@ -3207,7 +3205,7 @@ tcache_get_n (size_t tc_idx, tcache_entry **ep, bool mangled)
else
e = REVEAL_PTR (*ep);
- if (__glibc_unlikely (!aligned_OK (e)))
+ if (__glibc_unlikely (misaligned_mem (e)))
malloc_printerr ("malloc(): unaligned tcache chunk detected");
void *ne = e == NULL ? NULL : REVEAL_PTR (e->next);
@@ -3331,7 +3329,7 @@ tcache_double_free_verify (tcache_entry *e)
{
if (cnt >= mp_.tcache_count)
malloc_printerr ("free(): too many chunks detected in tcache");
- if (__glibc_unlikely (!aligned_OK (tmp)))
+ if (__glibc_unlikely (misaligned_mem (tmp)))
malloc_printerr ("free(): unaligned chunk detected in tcache 2");
if (tmp == e)
malloc_printerr ("free(): double free detected in tcache 2");
@@ -3365,7 +3363,7 @@ tcache_thread_shutdown (void)
while (tcache_tmp->entries[i])
{
tcache_entry *e = tcache_tmp->entries[i];
- if (__glibc_unlikely (!aligned_OK (e)))
+ if (__glibc_unlikely (misaligned_mem (e)))
malloc_printerr ("tcache_thread_shutdown(): "
"unaligned tcache chunk detected");
tcache_tmp->entries[i] = REVEAL_PTR (e->next);
@@ -3376,13 +3374,11 @@ tcache_thread_shutdown (void)
__libc_free (tcache_tmp);
}
+/* Initialize tcache. In the rare case there isn't any memory available,
+ later calls will retry initialization. */
static void
-tcache_init(void)
+tcache_init (void)
{
- mstate ar_ptr;
- void *victim = NULL;
- const size_t bytes = sizeof (tcache_perthread_struct);
-
if (tcache_shutting_down)
return;
@@ -3391,31 +3387,15 @@ tcache_init(void)
if (MAX_TCACHE_SMALL_SIZE >= GLRO (dl_pagesize) / 2)
malloc_printerr ("max tcache size too large");
- arena_get (ar_ptr, bytes);
- victim = _int_malloc (ar_ptr, bytes);
- if (!victim && ar_ptr != NULL)
- {
- ar_ptr = arena_get_retry (ar_ptr, bytes);
- victim = _int_malloc (ar_ptr, bytes);
- }
-
+ size_t bytes = sizeof (tcache_perthread_struct);
+ tcache = (tcache_perthread_struct *) __libc_malloc2 (bytes);
- if (ar_ptr != NULL)
- __libc_lock_unlock (ar_ptr->mutex);
-
- /* In a low memory situation, we may not be able to allocate memory
- - in which case, we just keep trying later. However, we
- typically do this very early, so either there is sufficient
- memory, or there isn't enough memory to do non-trivial
- allocations anyway. */
- if (victim)
+ if (tcache != NULL)
{
- tcache = (tcache_perthread_struct *) victim;
- memset (tcache, 0, sizeof (tcache_perthread_struct));
+ memset (tcache, 0, bytes);
for (int i = 0; i < TCACHE_MAX_BINS; i++)
tcache->num_slots[i] = mp_.tcache_count;
}
-
}
static void * __attribute_noinline__
@@ -3630,8 +3610,8 @@ __libc_realloc (void *oldmem, size_t bytes)
never wraps around at the end of the address space. Therefore
we can exclude some size values which might appear here by
accident or by "design" from some intruder. */
- if ((__builtin_expect ((uintptr_t) oldp > (uintptr_t) -oldsize, 0)
- || __builtin_expect (misaligned_chunk (oldp), 0)))
+ if (__glibc_unlikely ((uintptr_t) oldp > (uintptr_t) -oldsize
+ || misaligned_chunk (oldp)))
malloc_printerr ("realloc(): invalid pointer");
nb = checked_request2size (bytes);
@@ -3901,7 +3881,7 @@ __libc_calloc2 (size_t sz)
/* Two optional cases in which clearing not necessary */
if (chunk_is_mmapped (p))
{
- if (__builtin_expect (perturb_byte, 0))
+ if (__glibc_unlikely (perturb_byte))
return memset (mem, 0, sz);
return mem;
@@ -4066,7 +4046,7 @@ _int_malloc (mstate av, size_t bytes)
if (__glibc_likely (victim != NULL))
{
size_t victim_idx = fastbin_index (chunksize (victim));
- if (__builtin_expect (victim_idx != idx, 0))
+ if (__glibc_unlikely (victim_idx != idx))
malloc_printerr ("malloc(): memory corruption (fast)");
check_remalloced_chunk (av, victim, nb);
#if USE_TCACHE
@@ -4655,10 +4635,9 @@ _int_free_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T size, int have_lock)
#endif
) {
- if (__builtin_expect (chunksize_nomask (chunk_at_offset (p, size))
- <= CHUNK_HDR_SZ, 0)
- || __builtin_expect (chunksize (chunk_at_offset (p, size))
- >= av->system_mem, 0))
+ if (__glibc_unlikely (
+ chunksize_nomask (chunk_at_offset(p, size)) <= CHUNK_HDR_SZ
+ || chunksize (chunk_at_offset(p, size)) >= av->system_mem))
{
bool fail = true;
/* We might not have a lock at this point and concurrent modifications
@@ -4689,7 +4668,7 @@ _int_free_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T size, int have_lock)
{
/* Check that the top of the bin is not the record we are going to
add (i.e., double free). */
- if (__builtin_expect (old == p, 0))
+ if (__glibc_unlikely (old == p))
malloc_printerr ("double free or corruption (fasttop)");
p->fd = PROTECT_PTR (&p->fd, old);
*fb = p;
@@ -4699,7 +4678,7 @@ _int_free_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T size, int have_lock)
{
/* Check that the top of the bin is not the record we are going to
add (i.e., double free). */
- if (__builtin_expect (old == p, 0))
+ if (__glibc_unlikely (old == p))
malloc_printerr ("double free or corruption (fasttop)");
old2 = old;
p->fd = PROTECT_PTR (&p->fd, old);
@@ -4712,7 +4691,7 @@ _int_free_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T size, int have_lock)
only if we have the lock, otherwise it might have already been
allocated again. */
if (have_lock && old != NULL
- && __builtin_expect (fastbin_index (chunksize (old)) != idx, 0))
+ && __glibc_unlikely (fastbin_index (chunksize (old)) != idx))
malloc_printerr ("invalid fastbin entry (free)");
}
@@ -4779,17 +4758,17 @@ _int_free_merge_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T size)
if (__glibc_unlikely (p == av->top))
malloc_printerr ("double free or corruption (top)");
/* Or whether the next chunk is beyond the boundaries of the arena. */
- if (__builtin_expect (contiguous (av)
+ if (__glibc_unlikely (contiguous (av)
&& (char *) nextchunk
- >= ((char *) av->top + chunksize(av->top)), 0))
+ >= ((char *) av->top + chunksize(av->top))))
malloc_printerr ("double free or corruption (out)");
/* Or whether the block is actually not marked used. */
if (__glibc_unlikely (!prev_inuse(nextchunk)))
malloc_printerr ("double free or corruption (!prev)");
INTERNAL_SIZE_T nextsize = chunksize(nextchunk);
- if (__builtin_expect (chunksize_nomask (nextchunk) <= CHUNK_HDR_SZ, 0)
- || __builtin_expect (nextsize >= av->system_mem, 0))
+ if (__glibc_unlikely (chunksize_nomask (nextchunk) <= CHUNK_HDR_SZ
+ || nextsize >= av->system_mem))
malloc_printerr ("free(): invalid next size (normal)");
free_perturb (chunk2mem(p), size - CHUNK_HDR_SZ);
@@ -5046,9 +5025,9 @@ _int_realloc (mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize,
unsigned long remainder_size; /* its size */
/* oldmem size */
- if (__builtin_expect (chunksize_nomask (oldp) <= CHUNK_HDR_SZ, 0)
- || __builtin_expect (oldsize >= av->system_mem, 0)
- || __builtin_expect (oldsize != chunksize (oldp), 0))
+ if (__glibc_unlikely (chunksize_nomask (oldp) <= CHUNK_HDR_SZ
+ || oldsize >= av->system_mem
+ || oldsize != chunksize (oldp)))
malloc_printerr ("realloc(): invalid old size");
check_inuse_chunk (av, oldp);
@@ -5058,8 +5037,8 @@ _int_realloc (mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize,
next = chunk_at_offset (oldp, oldsize);
INTERNAL_SIZE_T nextsize = chunksize (next);
- if (__builtin_expect (chunksize_nomask (next) <= CHUNK_HDR_SZ, 0)
- || __builtin_expect (nextsize >= av->system_mem, 0))
+ if (__glibc_unlikely (chunksize_nomask (next) <= CHUNK_HDR_SZ
+ || nextsize >= av->system_mem))
malloc_printerr ("realloc(): invalid next size");
if ((unsigned long) (oldsize) >= (unsigned long) (nb))