aboutsummaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
authorAndi Kleen <ak@linux.intel.com>2011-10-29 01:01:54 +0000
committerAndi Kleen <ak@gcc.gnu.org>2011-10-29 01:01:54 +0000
commitd33ef9a52bb4a561ec8fb14ad4f45c68dc57bc72 (patch)
tree2071cd9a9389dac958320f1bd9ae2bb9c110cc2b /gcc
parentbf72b0094aa097ec23fdac68b33d2f86274bfd1d (diff)
downloadgcc-d33ef9a52bb4a561ec8fb14ad4f45c68dc57bc72.zip
gcc-d33ef9a52bb4a561ec8fb14ad4f45c68dc57bc72.tar.gz
gcc-d33ef9a52bb4a561ec8fb14ad4f45c68dc57bc72.tar.bz2
Free large chunks in ggc v2
This implements the freeing back of large chunks in the ggc madvise path Richard Guenther asked for. This way on systems with limited address space malloc() and other allocators still have a chance to get back at some of the memory ggc freed. The fragmented pages are still just given back, but the address space stays allocated. I tried freeing only aligned 2MB areas to optimize for 2MB huge pages, but the hit rate was quite low, so I switched to 1MB+ unaligned areas. v2: Hardcode free unit size instead of param gcc/: 2011-10-18 Andi Kleen <ak@linux.intel.com> * ggc-page (release_pages): First free large continuous chunks in the madvise path. From-SVN: r180648
Diffstat (limited to 'gcc')
-rw-r--r--gcc/ChangeLog5
-rw-r--r--gcc/ggc-page.c48
2 files changed, 53 insertions, 0 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index 65df15b..3b24734 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,5 +1,10 @@
2011-10-18 Andi Kleen <ak@linux.intel.com>
+ * ggc-page (release_pages): First free large continuous
+ chunks in the madvise path.
+
+2011-10-18 Andi Kleen <ak@linux.intel.com>
+
* ggc-page.c (alloc_pages): Always round up entry_size.
2011-10-19 Andi Kleen <ak@linux.intel.com>
diff --git a/gcc/ggc-page.c b/gcc/ggc-page.c
index 077bc8e..7bef4c0 100644
--- a/gcc/ggc-page.c
+++ b/gcc/ggc-page.c
@@ -973,6 +973,54 @@ release_pages (void)
page_entry *p, *start_p;
char *start;
size_t len;
+ size_t mapped_len;
+ page_entry *next, *prev, *newprev;
+ size_t free_unit = (GGC_QUIRE_SIZE/2) * G.pagesize;
+
+ /* First free larger continuous areas to the OS.
+ This allows other allocators to grab these areas if needed.
+ This is only done on larger chunks to avoid fragmentation.
+ This does not always work because the free_pages list is only
+ approximately sorted. */
+
+ p = G.free_pages;
+ prev = NULL;
+ while (p)
+ {
+ start = p->page;
+ start_p = p;
+ len = 0;
+ mapped_len = 0;
+ newprev = prev;
+ while (p && p->page == start + len)
+ {
+ len += p->bytes;
+ if (!p->discarded)
+ mapped_len += p->bytes;
+ newprev = p;
+ p = p->next;
+ }
+ if (len >= free_unit)
+ {
+ while (start_p != p)
+ {
+ next = start_p->next;
+ free (start_p);
+ start_p = next;
+ }
+ munmap (start, len);
+ if (prev)
+ prev->next = p;
+ else
+ G.free_pages = p;
+ G.bytes_mapped -= mapped_len;
+ continue;
+ }
+ prev = newprev;
+ }
+
+ /* Now give back the fragmented pages to the OS, but keep the address
+ space to reuse it next time. */
for (p = G.free_pages; p; )
{