aboutsummaryrefslogtreecommitdiff
path: root/gcc/ggc-page.cc
diff options
context:
space:
mode:
Diffstat (limited to 'gcc/ggc-page.cc')
-rw-r--r--gcc/ggc-page.cc136
1 files changed, 107 insertions, 29 deletions
diff --git a/gcc/ggc-page.cc b/gcc/ggc-page.cc
index 971b433..ebd1197 100644
--- a/gcc/ggc-page.cc
+++ b/gcc/ggc-page.cc
@@ -234,6 +234,8 @@ static struct
}
inverse_table[NUM_ORDERS];
+struct free_list;
+
/* A page_entry records the status of an allocation page. This
structure is dynamically sized to fit the bitmap in_use_p. */
struct page_entry
@@ -251,6 +253,9 @@ struct page_entry
of the host system page size.) */
size_t bytes;
+ /* Free list of this page size. */
+ struct free_list *free_list;
+
/* The address at which the memory is allocated. */
char *page;
@@ -368,6 +373,15 @@ struct free_object
};
#endif
+constexpr int num_free_list = 8;
+
+/* A free_list for pages with BYTES size. */
+struct free_list
+{
+ size_t bytes;
+ page_entry *free_pages;
+};
+
/* The rest of the global variables. */
static struct ggc_globals
{
@@ -412,8 +426,8 @@ static struct ggc_globals
int dev_zero_fd;
#endif
- /* A cache of free system pages. */
- page_entry *free_pages;
+ /* A cache of free system pages. Entry 0 is fallback. */
+ struct free_list free_lists[num_free_list];
#ifdef USING_MALLOC_PAGE_GROUPS
page_group *page_groups;
@@ -488,6 +502,9 @@ static struct ggc_globals
/* The overhead for each of the allocation orders. */
unsigned long long total_overhead_per_order[NUM_ORDERS];
+
+ /* Number of fallbacks. */
+ unsigned long long fallback;
} stats;
} G;
@@ -754,6 +771,42 @@ clear_page_group_in_use (page_group *group, char *page)
}
#endif
+/* Find a free list for ENTRY_SIZE. */
+
+static inline struct free_list *
+find_free_list (size_t entry_size)
+{
+ int i;
+ for (i = 1; i < num_free_list; i++)
+ {
+ if (G.free_lists[i].bytes == entry_size)
+ return &G.free_lists[i];
+ if (G.free_lists[i].bytes == 0)
+ {
+ G.free_lists[i].bytes = entry_size;
+ return &G.free_lists[i];
+ }
+ }
+ /* Fallback. Does this happen? */
+ if (GATHER_STATISTICS)
+ G.stats.fallback++;
+ return &G.free_lists[0];
+}
+
+/* Fast lookup of free_list by order. */
+
+static struct free_list *cache_free_list[NUM_ORDERS];
+
+/* Faster way to find a free list by ORDER for BYTES. */
+
+static inline struct free_list *
+find_free_list_order (unsigned order, size_t bytes)
+{
+ if (cache_free_list[order] == NULL)
+ cache_free_list[order] = find_free_list (bytes);
+ return cache_free_list[order];
+}
+
/* Allocate a new page for allocating objects of size 2^ORDER,
and return an entry for it. The entry is not added to the
appropriate page_table list. */
@@ -770,6 +823,7 @@ alloc_page (unsigned order)
#ifdef USING_MALLOC_PAGE_GROUPS
page_group *group;
#endif
+ struct free_list *free_list;
num_objects = OBJECTS_PER_PAGE (order);
bitmap_size = BITMAP_SIZE (num_objects + 1);
@@ -782,8 +836,10 @@ alloc_page (unsigned order)
entry = NULL;
page = NULL;
+ free_list = find_free_list_order (order, entry_size);
+
/* Check the list of free pages for one we can use. */
- for (pp = &G.free_pages, p = *pp; p; pp = &p->next, p = *pp)
+ for (pp = &free_list->free_pages, p = *pp; p; pp = &p->next, p = *pp)
if (p->bytes == entry_size)
break;
@@ -816,7 +872,7 @@ alloc_page (unsigned order)
/* We want just one page. Allocate a bunch of them and put the
extras on the freelist. (Can only do this optimization with
mmap for backing store.) */
- struct page_entry *e, *f = G.free_pages;
+ struct page_entry *e, *f = free_list->free_pages;
int i, entries = GGC_QUIRE_SIZE;
page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE, false);
@@ -833,12 +889,13 @@ alloc_page (unsigned order)
e = XCNEWVAR (struct page_entry, page_entry_size);
e->order = order;
e->bytes = G.pagesize;
+ e->free_list = free_list;
e->page = page + (i << G.lg_pagesize);
e->next = f;
f = e;
}
- G.free_pages = f;
+ free_list->free_pages = f;
}
else
page = alloc_anon (NULL, entry_size, true);
@@ -898,18 +955,19 @@ alloc_page (unsigned order)
/* If we allocated multiple pages, put the rest on the free list. */
if (multiple_pages)
{
- struct page_entry *e, *f = G.free_pages;
+ struct page_entry *e, *f = free_list->free_pages;
for (a = enda - G.pagesize; a != page; a -= G.pagesize)
{
e = XCNEWVAR (struct page_entry, page_entry_size);
e->order = order;
e->bytes = G.pagesize;
+ e->free_list = free_list;
e->page = a;
e->group = group;
e->next = f;
f = e;
}
- G.free_pages = f;
+ free_list->free_pages = f;
}
}
#endif
@@ -918,6 +976,7 @@ alloc_page (unsigned order)
entry = XCNEWVAR (struct page_entry, page_entry_size);
entry->bytes = entry_size;
+ entry->free_list = free_list;
entry->page = page;
entry->context_depth = G.context_depth;
entry->order = order;
@@ -1006,17 +1065,18 @@ free_page (page_entry *entry)
adjust_depth ();
- entry->next = G.free_pages;
- G.free_pages = entry;
+ struct free_list *free_list = entry->free_list;
+ entry->next = free_list->free_pages;
+ free_list->free_pages = entry;
}
-/* Release the free page cache to the system. */
+/* Release the free page cache for FREE_LIST to the system. */
static void
-release_pages (void)
+do_release_pages (struct free_list *free_list, size_t &n1, size_t &n2)
{
- size_t n1 = 0;
- size_t n2 = 0;
+ (void) n1;
+ (void) n2;
#ifdef USING_MADVISE
page_entry *p, *start_p;
char *start;
@@ -1029,9 +1089,9 @@ release_pages (void)
This allows other allocators to grab these areas if needed.
This is only done on larger chunks to avoid fragmentation.
This does not always work because the free_pages list is only
- approximately sorted. */
+ approximately sorted. */
- p = G.free_pages;
+ p = free_list->free_pages;
prev = NULL;
while (p)
{
@@ -1044,7 +1104,7 @@ release_pages (void)
{
len += p->bytes;
if (!p->discarded)
- mapped_len += p->bytes;
+ mapped_len += p->bytes;
newprev = p;
p = p->next;
}
@@ -1060,7 +1120,7 @@ release_pages (void)
if (prev)
prev->next = p;
else
- G.free_pages = p;
+ free_list->free_pages = p;
G.bytes_mapped -= mapped_len;
n1 += len;
continue;
@@ -1069,9 +1129,9 @@ release_pages (void)
}
/* Now give back the fragmented pages to the OS, but keep the address
- space to reuse it next time. */
+ space to reuse it next time. */
- for (p = G.free_pages; p; )
+ for (p = free_list->free_pages; p; )
{
if (p->discarded)
{
@@ -1089,10 +1149,10 @@ release_pages (void)
}
/* Give the page back to the kernel, but don't free the mapping.
This avoids fragmentation in the virtual memory map of the
- process. Next time we can reuse it by just touching it. */
+ process. Next time we can reuse it by just touching it. */
madvise (start, len, MADV_DONTNEED);
/* Don't count those pages as mapped to not touch the garbage collector
- unnecessarily. */
+ unnecessarily. */
G.bytes_mapped -= len;
n2 += len;
while (start_p != p)
@@ -1108,7 +1168,7 @@ release_pages (void)
size_t len;
/* Gather up adjacent pages so they are unmapped together. */
- p = G.free_pages;
+ p = free_list->free_pages;
while (p)
{
@@ -1131,14 +1191,13 @@ release_pages (void)
G.bytes_mapped -= len;
}
- G.free_pages = NULL;
+ free_list->free_pages = NULL;
#endif
#ifdef USING_MALLOC_PAGE_GROUPS
page_entry **pp, *p;
- page_group **gp, *g;
/* Remove all pages from free page groups from the list. */
- pp = &G.free_pages;
+ pp = &free_list->free_pages;
while ((p = *pp) != NULL)
if (p->group->in_use == 0)
{
@@ -1147,6 +1206,20 @@ release_pages (void)
}
else
pp = &p->next;
+#endif
+}
+
+/* Release the free page cache to the system. */
+
+static void
+release_pages ()
+{
+ size_t n1 = 0;
+ size_t n2 = 0;
+ for (int i = 0; i < num_free_list; i++)
+ do_release_pages (&G.free_lists[i], n1, n2);
+#ifdef USING_MALLOC_PAGE_GROUPS
+ page_group **gp, *g;
/* Remove all free page groups, and release the storage. */
gp = &G.page_groups;
@@ -1801,9 +1874,10 @@ init_ggc (void)
/* We have a good page, might as well hold onto it... */
e = XCNEW (struct page_entry);
e->bytes = G.pagesize;
+ e->free_list = find_free_list (G.pagesize);
e->page = p;
- e->next = G.free_pages;
- G.free_pages = e;
+ e->next = e->free_list->free_pages;
+ e->free_list->free_pages = e;
}
#endif
@@ -1938,9 +2012,9 @@ clear_marks (void)
}
}
-/* Check if any blocks with a registered finalizer have become unmarked. If so
+/* Check if any blocks with a registered finalizer have become unmarked. If so
run the finalizer and unregister it because the block is about to be freed.
- Note that no garantee is made about what order finalizers will run in so
+ Note that no guarantee is made about what order finalizers will run in so
touching other objects in gc memory is extremely unwise. */
static void
@@ -2404,6 +2478,9 @@ ggc_print_statistics (void)
fprintf (stderr, "Total Allocated under 128B: "
PRsa (9) "\n",
SIZE_AMOUNT (G.stats.total_allocated_under128));
+ fprintf (stderr, "Number of free list fallbacks: "
+ PRsa (9) "\n",
+ SIZE_AMOUNT (G.stats.fallback));
for (i = 0; i < NUM_ORDERS; i++)
if (G.stats.total_allocated_per_order[i])
@@ -2677,6 +2754,7 @@ ggc_pch_read (FILE *f, void *addr)
- sizeof (long)
+ BITMAP_SIZE (num_objs + 1)));
entry->bytes = bytes;
+ entry->free_list = find_free_list (bytes);
entry->page = offs;
entry->context_depth = 0;
offs += bytes;