diff options
author | Ian Lance Taylor <ian@gcc.gnu.org> | 2012-04-20 04:58:26 +0000 |
---|---|---|
committer | Ian Lance Taylor <ian@gcc.gnu.org> | 2012-04-20 04:58:26 +0000 |
commit | 8a72417502730163b7a149310f90140610ca8be0 (patch) | |
tree | be7f67d3d4b1e832ed43c6bf3fe2047579eb1d9f /libgo | |
parent | 8198dc134f0026841ebd5e1ee55190e5ff540348 (diff) | |
download | gcc-8a72417502730163b7a149310f90140610ca8be0.zip gcc-8a72417502730163b7a149310f90140610ca8be0.tar.gz gcc-8a72417502730163b7a149310f90140610ca8be0.tar.bz2 |
runtime: Ignore stack sizes when deciding when to GC.
Also allocate heap bitmaps bit in page size units and clear
context when putting G structures on free list.
From-SVN: r186607
Diffstat (limited to 'libgo')
-rw-r--r-- | libgo/runtime/malloc.goc | 2 | ||||
-rw-r--r-- | libgo/runtime/mgc0.c | 11 | ||||
-rw-r--r-- | libgo/runtime/proc.c | 4 | ||||
-rw-r--r-- | libgo/runtime/runtime.h | 5 |
4 files changed, 19 insertions, 3 deletions
diff --git a/libgo/runtime/malloc.goc b/libgo/runtime/malloc.goc index 97cfabe..253fdbe 100644 --- a/libgo/runtime/malloc.goc +++ b/libgo/runtime/malloc.goc @@ -72,7 +72,7 @@ runtime_mallocgc(uintptr size, uint32 flag, int32 dogc, int32 zeroed) npages = size >> PageShift; if((size & PageMask) != 0) npages++; - s = runtime_MHeap_Alloc(&runtime_mheap, npages, 0, !(flag & FlagNoGC)); + s = runtime_MHeap_Alloc(&runtime_mheap, npages, 0, 1); if(s == nil) runtime_throw("out of memory"); size = npages<<PageShift; diff --git a/libgo/runtime/mgc0.c b/libgo/runtime/mgc0.c index 4aa7c45..7e68535 100644 --- a/libgo/runtime/mgc0.c +++ b/libgo/runtime/mgc0.c @@ -4,6 +4,8 @@ // Garbage collector. +#include <unistd.h> + #include "runtime.h" #include "arch.h" #include "malloc.h" @@ -918,7 +920,7 @@ cachestats(void) uint64 stacks_sys; stacks_inuse = 0; - stacks_sys = 0; + stacks_sys = runtime_stacks_sys; for(m=runtime_allm; m; m=m->alllink) { runtime_purgecachedstats(m); // stacks_inuse += m->stackalloc->inuse; @@ -1020,7 +1022,7 @@ runtime_gc(int32 force) stealcache(); cachestats(); - mstats.next_gc = mstats.heap_alloc+mstats.heap_alloc*gcpercent/100; + mstats.next_gc = mstats.heap_alloc+(mstats.heap_alloc-runtime_stacks_sys)*gcpercent/100; m->gcing = 0; m->locks++; // disable gc during the mallocs in newproc @@ -1329,6 +1331,8 @@ runtime_setblockspecial(void *v, bool s) void runtime_MHeap_MapBits(MHeap *h) { + size_t page_size; + // Caller has added extra mappings to the arena. // Add extra mappings of bitmap words as needed. // We allocate extra bitmap pieces in chunks of bitmapChunk. @@ -1342,6 +1346,9 @@ runtime_MHeap_MapBits(MHeap *h) if(h->bitmap_mapped >= n) return; + page_size = getpagesize(); + n = (n+page_size-1) & ~(page_size-1); + runtime_SysMap(h->arena_start - n, n - h->bitmap_mapped); h->bitmap_mapped = n; } diff --git a/libgo/runtime/proc.c b/libgo/runtime/proc.c index 9ad9f96..660c69f 100644 --- a/libgo/runtime/proc.c +++ b/libgo/runtime/proc.c @@ -46,6 +46,8 @@ extern void __splitstack_block_signals_context (void *context[10], int *, # define StackMin 2 * 1024 * 1024 #endif +uintptr runtime_stacks_sys; + static void schedule(G*); typedef struct Sched Sched; @@ -1091,6 +1093,7 @@ schedule(G *gp) m->lockedg = nil; } gp->idlem = nil; + runtime_memclr(&gp->context, sizeof gp->context); gfput(gp); if(--runtime_sched.gcount == 0) runtime_exit(0); @@ -1288,6 +1291,7 @@ runtime_malg(int32 stacksize, byte** ret_stack, size_t* ret_stacksize) *ret_stacksize = stacksize; newg->gcinitial_sp = *ret_stack; newg->gcstack_size = stacksize; + runtime_xadd(&runtime_stacks_sys, stacksize); #endif } return newg; diff --git a/libgo/runtime/runtime.h b/libgo/runtime/runtime.h index a81c210..d379f99 100644 --- a/libgo/runtime/runtime.h +++ b/libgo/runtime/runtime.h @@ -463,3 +463,8 @@ struct root_list { }; void __go_register_gc_roots(struct root_list*); + +// Size of stack space allocated using Go's allocator. +// This will be 0 when using split stacks, as in that case +// the stacks are allocated by the splitstack library. +extern uintptr runtime_stacks_sys; |