aboutsummaryrefslogtreecommitdiff
path: root/malloc/hooks.c
diff options
context:
space:
mode:
authorFlorian Weimer <fweimer@redhat.com>2016-05-13 14:16:39 +0200
committerFlorian Weimer <fweimer@redhat.com>2016-05-13 14:16:39 +0200
commit4cf6c72fd2a482e7499c29162349810029632c3f (patch)
tree4c6a45d1e76bb4eb1afdfb39cf96a2b2c5751753 /malloc/hooks.c
parent567c710bcad634bc94a21f00b9e1c24ca9f9f57c (diff)
downloadglibc-4cf6c72fd2a482e7499c29162349810029632c3f.zip
glibc-4cf6c72fd2a482e7499c29162349810029632c3f.tar.gz
glibc-4cf6c72fd2a482e7499c29162349810029632c3f.tar.bz2
malloc: Rewrite dumped heap for compatibility in __malloc_set_state
This will allow us to change many aspects of the malloc implementation while preserving compatibility with existing Emacs binaries. As a result, existing Emacs binaries will have a larger RSS, and Emacs needs a few more milliseconds to start. This overhead is specific to Emacs (and will go away once Emacs switches to its internal malloc). The new checks to make free and realloc compatible with the dumped heap are confined to the mmap paths, which are already quite slow due to the munmap overhead. This commit weakens some security checks, but only for heap pointers in the dumped main arena. By default, this area is empty, so those checks are as effective as before.
Diffstat (limited to 'malloc/hooks.c')
-rw-r--r--malloc/hooks.c150
1 files changed, 50 insertions, 100 deletions
diff --git a/malloc/hooks.c b/malloc/hooks.c
index 0d2bb96..45241f2 100644
--- a/malloc/hooks.c
+++ b/malloc/hooks.c
@@ -548,11 +548,7 @@ int
__malloc_set_state (void *msptr)
{
struct malloc_save_state *ms = (struct malloc_save_state *) msptr;
- size_t i;
- mbinptr b;
- disallow_malloc_check = 1;
- ptmalloc_init ();
if (ms->magic != MALLOC_STATE_MAGIC)
return -1;
@@ -560,106 +556,60 @@ __malloc_set_state (void *msptr)
if ((ms->version & ~0xffl) > (MALLOC_STATE_VERSION & ~0xffl))
return -2;
- (void) mutex_lock (&main_arena.mutex);
- /* There are no fastchunks. */
- clear_fastchunks (&main_arena);
- if (ms->version >= 4)
- set_max_fast (ms->max_fast);
- else
- set_max_fast (64); /* 64 used to be the value we always used. */
- for (i = 0; i < NFASTBINS; ++i)
- fastbin (&main_arena, i) = 0;
- for (i = 0; i < BINMAPSIZE; ++i)
- main_arena.binmap[i] = 0;
- top (&main_arena) = ms->av[2];
- main_arena.last_remainder = 0;
- for (i = 1; i < NBINS; i++)
- {
- b = bin_at (&main_arena, i);
- if (ms->av[2 * i + 2] == 0)
- {
- assert (ms->av[2 * i + 3] == 0);
- first (b) = last (b) = b;
- }
+ /* We do not need to perform locking here because __malloc_set_state
+ must be called before the first call into the malloc subsytem
+ (usually via __malloc_initialize_hook). pthread_create always
+ calls calloc and thus must be called only afterwards, so there
+ cannot be more than one thread when we reach this point. */
+
+ /* Disable the malloc hooks (and malloc checking). */
+ __malloc_hook = NULL;
+ __realloc_hook = NULL;
+ __free_hook = NULL;
+ __memalign_hook = NULL;
+ using_malloc_checking = 0;
+
+ /* Patch the dumped heap. We no longer try to integrate into the
+ existing heap. Instead, we mark the existing chunks as mmapped.
+ Together with the update to dumped_main_arena_start and
+ dumped_main_arena_end, realloc and free will recognize these
+ chunks as dumped fake mmapped chunks and never free them. */
+
+ /* Find the chunk with the lowest address with the heap. */
+ mchunkptr chunk = NULL;
+ {
+ size_t *candidate = (size_t *) ms->sbrk_base;
+ size_t *end = (size_t *) (ms->sbrk_base + ms->sbrked_mem_bytes);
+ while (candidate < end)
+ if (*candidate != 0)
+ {
+ chunk = mem2chunk ((void *) (candidate + 1));
+ break;
+ }
else
- {
- if (ms->version >= 3 &&
- (i < NSMALLBINS || (largebin_index (chunksize (ms->av[2 * i + 2])) == i &&
- largebin_index (chunksize (ms->av[2 * i + 3])) == i)))
- {
- first (b) = ms->av[2 * i + 2];
- last (b) = ms->av[2 * i + 3];
- /* Make sure the links to the bins within the heap are correct. */
- first (b)->bk = b;
- last (b)->fd = b;
- /* Set bit in binblocks. */
- mark_bin (&main_arena, i);
- }
- else
- {
- /* Oops, index computation from chunksize must have changed.
- Link the whole list into unsorted_chunks. */
- first (b) = last (b) = b;
- b = unsorted_chunks (&main_arena);
- ms->av[2 * i + 2]->bk = b;
- ms->av[2 * i + 3]->fd = b->fd;
- b->fd->bk = ms->av[2 * i + 3];
- b->fd = ms->av[2 * i + 2];
- }
- }
- }
- if (ms->version < 3)
- {
- /* Clear fd_nextsize and bk_nextsize fields. */
- b = unsorted_chunks (&main_arena)->fd;
- while (b != unsorted_chunks (&main_arena))
- {
- if (!in_smallbin_range (chunksize (b)))
- {
- b->fd_nextsize = NULL;
- b->bk_nextsize = NULL;
- }
- b = b->fd;
- }
- }
- mp_.sbrk_base = ms->sbrk_base;
- main_arena.system_mem = ms->sbrked_mem_bytes;
- mp_.trim_threshold = ms->trim_threshold;
- mp_.top_pad = ms->top_pad;
- mp_.n_mmaps_max = ms->n_mmaps_max;
- mp_.mmap_threshold = ms->mmap_threshold;
- check_action = ms->check_action;
- main_arena.max_system_mem = ms->max_sbrked_mem;
- mp_.n_mmaps = ms->n_mmaps;
- mp_.max_n_mmaps = ms->max_n_mmaps;
- mp_.mmapped_mem = ms->mmapped_mem;
- mp_.max_mmapped_mem = ms->max_mmapped_mem;
- /* add version-dependent code here */
- if (ms->version >= 1)
- {
- /* Check whether it is safe to enable malloc checking, or whether
- it is necessary to disable it. */
- if (ms->using_malloc_checking && !using_malloc_checking &&
- !disallow_malloc_check)
- __malloc_check_init ();
- else if (!ms->using_malloc_checking && using_malloc_checking)
- {
- __malloc_hook = NULL;
- __free_hook = NULL;
- __realloc_hook = NULL;
- __memalign_hook = NULL;
- using_malloc_checking = 0;
- }
- }
- if (ms->version >= 4)
+ ++candidate;
+ }
+ if (chunk == NULL)
+ return 0;
+
+ /* Iterate over the dumped heap and patch the chunks so that they
+ are treated as fake mmapped chunks. */
+ mchunkptr top = ms->av[2];
+ while (chunk < top)
{
- mp_.arena_test = ms->arena_test;
- mp_.arena_max = ms->arena_max;
- narenas = ms->narenas;
+ if (inuse (chunk))
+ {
+ /* Mark chunk as mmapped, to trigger the fallback path. */
+ size_t size = chunksize (chunk);
+ set_head (chunk, size | IS_MMAPPED);
+ }
+ chunk = next_chunk (chunk);
}
- check_malloc_state (&main_arena);
- (void) mutex_unlock (&main_arena.mutex);
+ /* The dumped fake mmapped chunks all lie in this address range. */
+ dumped_main_arena_start = (mchunkptr) ms->sbrk_base;
+ dumped_main_arena_end = top;
+
return 0;
}