aboutsummaryrefslogtreecommitdiff
path: root/malloc
diff options
context:
space:
mode:
authorUlrich Drepper <drepper@redhat.com>2007-12-16 22:53:08 +0000
committerUlrich Drepper <drepper@redhat.com>2007-12-16 22:53:08 +0000
commit68631c8eb92ff38d9da1ae34f6aa048539b199cc (patch)
tree092db43d0f8df56b3df2a759012d029395376fd4 /malloc
parent52386be756e113f20502f181d780aecc38cbb66a (diff)
downloadglibc-68631c8eb92ff38d9da1ae34f6aa048539b199cc.zip
glibc-68631c8eb92ff38d9da1ae34f6aa048539b199cc.tar.gz
glibc-68631c8eb92ff38d9da1ae34f6aa048539b199cc.tar.bz2
* malloc/malloc.c (public_mTRIm): Iterate over all arenas and call
mTRIm for all of them. (mTRIm): Additionally iterate over all free blocks and use madvise to free memory for all those blocks which contain at least one memory page. * malloc/malloc.c (do_check_malloc_state): Minimal cleanups.
Diffstat (limited to 'malloc')
-rw-r--r--malloc/malloc.c79
1 files changed, 63 insertions, 16 deletions
diff --git a/malloc/malloc.c b/malloc/malloc.c
index 1e71608..c54c203 100644
--- a/malloc/malloc.c
+++ b/malloc/malloc.c
@@ -1592,7 +1592,7 @@ static Void_t* _int_pvalloc(mstate, size_t);
static Void_t** _int_icalloc(mstate, size_t, size_t, Void_t**);
static Void_t** _int_icomalloc(mstate, size_t, size_t*, Void_t**);
#endif
-static int mTRIm(size_t);
+static int mTRIm(mstate, size_t);
static size_t mUSABLe(Void_t*);
static void mSTATs(void);
static int mALLOPt(int, int);
@@ -2739,8 +2739,6 @@ static void do_check_malloc_state(mstate av)
mchunkptr p;
mchunkptr q;
mbinptr b;
- unsigned int binbit;
- int empty;
unsigned int idx;
INTERNAL_SIZE_T size;
unsigned long total = 0;
@@ -2810,8 +2808,8 @@ static void do_check_malloc_state(mstate av)
/* binmap is accurate (except for bin 1 == unsorted_chunks) */
if (i >= 2) {
- binbit = get_binmap(av,i);
- empty = last(b) == b;
+ unsigned int binbit = get_binmap(av,i);
+ int empty = last(b) == b;
if (!binbit)
assert(empty);
else if (!empty)
@@ -4013,13 +4011,22 @@ public_cFREe(Void_t* m)
int
public_mTRIm(size_t s)
{
- int result;
+ int result = 0;
if(__malloc_initialized < 0)
ptmalloc_init ();
- (void)mutex_lock(&main_arena.mutex);
- result = mTRIm(s);
- (void)mutex_unlock(&main_arena.mutex);
+
+ mstate ar_ptr = &main_arena;
+ do
+ {
+ (void) mutex_lock (&ar_ptr->mutex);
+ result |= mTRIm (ar_ptr, s);
+ (void) mutex_unlock (&ar_ptr->mutex);
+
+ ar_ptr = ar_ptr->next;
+ }
+ while (ar_ptr != &main_arena);
+
return result;
}
@@ -5489,20 +5496,60 @@ _int_pvalloc(av, bytes) mstate av, size_t bytes;
*/
#if __STD_C
-int mTRIm(size_t pad)
+static int mTRIm(mstate av, size_t pad)
#else
-int mTRIm(pad) size_t pad;
+static int mTRIm(av, pad) mstate av; size_t pad;
#endif
{
- mstate av = &main_arena; /* already locked */
-
/* Ensure initialization/consolidation */
- malloc_consolidate(av);
+ malloc_consolidate (av);
+
+ const size_t ps = mp_.pagesize;
+ int psindex = bin_index (ps);
+ const size_t psm1 = ps - 1;
+
+ int result = 0;
+ for (int i = 1; i < NBINS; ++i)
+ if (i == 1 || i >= psindex)
+ {
+ mbinptr bin = bin_at (av, i);
+
+ for (mchunkptr p = last (bin); p != bin; p = p->bk)
+ {
+ INTERNAL_SIZE_T size = chunksize (p);
+
+ if (size > psm1 + sizeof (struct malloc_chunk))
+ {
+ /* See whether the chunk contains at least one unused page. */
+ char *paligned_mem = (char *) (((uintptr_t) p
+ + sizeof (struct malloc_chunk)
+ + psm1) & ~psm1);
+
+ assert ((char *) chunk2mem (p) + 4 * SIZE_SZ <= paligned_mem);
+ assert ((char *) p + size > paligned_mem);
+
+ /* This is the size we could potentially free. */
+ size -= paligned_mem - (char *) p;
+
+ if (size > psm1)
+ {
+#ifdef MALLOC_DEBUG
+ /* When debugging we simulate destroying the memory
+ content. */
+ memset (paligned_mem, 0x89, size & ~psm1);
+#endif
+ madvise (paligned_mem, size & ~psm1, MADV_DONTNEED);
+
+ result = 1;
+ }
+ }
+ }
+ }
#ifndef MORECORE_CANNOT_TRIM
- return sYSTRIm(pad, av);
+ return result | (av == &main_arena ? sYSTRIm (pad, av) : 0);
#else
- return 0;
+ return result;
#endif
}