diff options
Diffstat (limited to 'libgo/runtime/mheap.c')
-rw-r--r-- | libgo/runtime/mheap.c | 103 |
1 files changed, 67 insertions, 36 deletions
diff --git a/libgo/runtime/mheap.c b/libgo/runtime/mheap.c index 52c6d8c..b36df25 100644 --- a/libgo/runtime/mheap.c +++ b/libgo/runtime/mheap.c @@ -42,7 +42,6 @@ runtime_MHeap_Init(MHeap *h, void *(*alloc)(uintptr)) runtime_initlock(h); runtime_FixAlloc_Init(&h->spanalloc, sizeof(MSpan), alloc, RecordSpan, h); runtime_FixAlloc_Init(&h->cachealloc, sizeof(MCache), alloc, nil, nil); - runtime_MHeapMap_Init(&h->map, alloc); // h->mapcache needs no init for(i=0; i<nelem(h->free); i++) runtime_MSpanList_Init(&h->free[i]); @@ -80,6 +79,7 @@ MHeap_AllocLocked(MHeap *h, uintptr npage, int32 sizeclass) { uintptr n; MSpan *s, *t; + PageID p; // Try in fixed-size lists up to max. for(n=npage; n < nelem(h->free); n++) { @@ -113,18 +113,29 @@ HaveSpan: mstats.mspan_sys = h->spanalloc.sys; runtime_MSpan_Init(t, s->start + npage, s->npages - npage); s->npages = npage; - runtime_MHeapMap_Set(&h->map, t->start - 1, s); - runtime_MHeapMap_Set(&h->map, t->start, t); - runtime_MHeapMap_Set(&h->map, t->start + t->npages - 1, t); + p = t->start; + if(sizeof(void*) == 8) + p -= ((uintptr)h->arena_start>>PageShift); + if(p > 0) + h->map[p-1] = s; + h->map[p] = t; + h->map[p+t->npages-1] = t; + *(uintptr*)(t->start<<PageShift) = *(uintptr*)(s->start<<PageShift); // copy "needs zeroing" mark t->state = MSpanInUse; MHeap_FreeLocked(h, t); } + if(*(uintptr*)(s->start<<PageShift) != 0) + runtime_memclr((byte*)(s->start<<PageShift), s->npages<<PageShift); + // Record span info, because gc needs to be // able to map interior pointer to containing span. s->sizeclass = sizeclass; + p = s->start; + if(sizeof(void*) == 8) + p -= ((uintptr)h->arena_start>>PageShift); for(n=0; n<npage; n++) - runtime_MHeapMap_Set(&h->map, s->start+n, s); + h->map[p+n] = s; return s; } @@ -162,6 +173,7 @@ MHeap_Grow(MHeap *h, uintptr npage) uintptr ask; void *v; MSpan *s; + PageID p; // Ask for a big chunk, to reduce the number of mappings // the operating system needs to track; also amortizes @@ -169,68 +181,72 @@ MHeap_Grow(MHeap *h, uintptr npage) // Allocate a multiple of 64kB (16 pages). npage = (npage+15)&~15; ask = npage<<PageShift; - if(ask < HeapAllocChunk) + if(ask > (uintptr)(h->arena_end - h->arena_used)) + return false; + if(ask < HeapAllocChunk && HeapAllocChunk <= h->arena_end - h->arena_used) ask = HeapAllocChunk; - v = runtime_SysAlloc(ask); + v = runtime_MHeap_SysAlloc(h, ask); if(v == nil) { if(ask > (npage<<PageShift)) { ask = npage<<PageShift; - v = runtime_SysAlloc(ask); + v = runtime_MHeap_SysAlloc(h, ask); } if(v == nil) return false; } mstats.heap_sys += ask; - if((byte*)v < h->min || h->min == nil) - h->min = v; - if((byte*)v+ask > h->max) - h->max = (byte*)v+ask; - - // NOTE(rsc): In tcmalloc, if we've accumulated enough - // system allocations, the heap map gets entirely allocated - // in 32-bit mode. (In 64-bit mode that's not practical.) - if(!runtime_MHeapMap_Preallocate(&h->map, ((uintptr)v>>PageShift) - 1, (ask>>PageShift) + 2)) { - runtime_SysFree(v, ask); - return false; - } - // Create a fake "in use" span and free it, so that the // right coalescing happens. s = runtime_FixAlloc_Alloc(&h->spanalloc); mstats.mspan_inuse = h->spanalloc.inuse; mstats.mspan_sys = h->spanalloc.sys; runtime_MSpan_Init(s, (uintptr)v>>PageShift, ask>>PageShift); - runtime_MHeapMap_Set(&h->map, s->start, s); - runtime_MHeapMap_Set(&h->map, s->start + s->npages - 1, s); + p = s->start; + if(sizeof(void*) == 8) + p -= ((uintptr)h->arena_start>>PageShift); + h->map[p] = s; + h->map[p + s->npages - 1] = s; s->state = MSpanInUse; MHeap_FreeLocked(h, s); return true; } -// Look up the span at the given page number. -// Page number is guaranteed to be in map +// Look up the span at the given address. +// Address is guaranteed to be in map // and is guaranteed to be start or end of span. MSpan* -runtime_MHeap_Lookup(MHeap *h, PageID p) +runtime_MHeap_Lookup(MHeap *h, void *v) { - return runtime_MHeapMap_Get(&h->map, p); + uintptr p; + + p = (uintptr)v; + if(sizeof(void*) == 8) + p -= (uintptr)h->arena_start; + return h->map[p >> PageShift]; } -// Look up the span at the given page number. -// Page number is *not* guaranteed to be in map +// Look up the span at the given address. +// Address is *not* guaranteed to be in map // and may be anywhere in the span. // Map entries for the middle of a span are only // valid for allocated spans. Free spans may have // other garbage in their middles, so we have to // check for that. MSpan* -runtime_MHeap_LookupMaybe(MHeap *h, PageID p) +runtime_MHeap_LookupMaybe(MHeap *h, void *v) { MSpan *s; + PageID p, q; - s = runtime_MHeapMap_GetMaybe(&h->map, p); + if((byte*)v < h->arena_start || (byte*)v >= h->arena_used) + return nil; + p = (uintptr)v>>PageShift; + q = p; + if(sizeof(void*) == 8) + q -= (uintptr)h->arena_start >> PageShift; + s = h->map[q]; if(s == nil || p < s->start || p - s->start >= s->npages) return nil; if(s->state != MSpanInUse) @@ -259,7 +275,9 @@ runtime_MHeap_Free(MHeap *h, MSpan *s, int32 acct) static void MHeap_FreeLocked(MHeap *h, MSpan *s) { + uintptr *sp, *tp; MSpan *t; + PageID p; if(s->state != MSpanInUse || s->ref != 0) { // runtime_printf("MHeap_FreeLocked - span %p ptr %p state %d ref %d\n", s, s->start<<PageShift, s->state, s->ref); @@ -267,21 +285,30 @@ MHeap_FreeLocked(MHeap *h, MSpan *s) } s->state = MSpanFree; runtime_MSpanList_Remove(s); + sp = (uintptr*)(s->start<<PageShift); // Coalesce with earlier, later spans. - if((t = runtime_MHeapMap_Get(&h->map, s->start - 1)) != nil && t->state != MSpanInUse) { + p = s->start; + if(sizeof(void*) == 8) + p -= (uintptr)h->arena_start >> PageShift; + if(p > 0 && (t = h->map[p-1]) != nil && t->state != MSpanInUse) { + tp = (uintptr*)(t->start<<PageShift); + *tp |= *sp; // propagate "needs zeroing" mark s->start = t->start; s->npages += t->npages; - runtime_MHeapMap_Set(&h->map, s->start, s); + p -= t->npages; + h->map[p] = s; runtime_MSpanList_Remove(t); t->state = MSpanDead; runtime_FixAlloc_Free(&h->spanalloc, t); mstats.mspan_inuse = h->spanalloc.inuse; mstats.mspan_sys = h->spanalloc.sys; } - if((t = runtime_MHeapMap_Get(&h->map, s->start + s->npages)) != nil && t->state != MSpanInUse) { + if(p+s->npages < nelem(h->map) && (t = h->map[p+s->npages]) != nil && t->state != MSpanInUse) { + tp = (uintptr*)(t->start<<PageShift); + *sp |= *tp; // propagate "needs zeroing" mark s->npages += t->npages; - runtime_MHeapMap_Set(&h->map, s->start + s->npages - 1, s); + h->map[p + s->npages - 1] = s; runtime_MSpanList_Remove(t); t->state = MSpanDead; runtime_FixAlloc_Free(&h->spanalloc, t); @@ -341,10 +368,14 @@ runtime_MSpanList_IsEmpty(MSpan *list) void runtime_MSpanList_Insert(MSpan *list, MSpan *span) { - if(span->next != nil || span->prev != nil) + if(span->next != nil || span->prev != nil) { + // runtime_printf("failed MSpanList_Insert %p %p %p\n", span, span->next, span->prev); runtime_throw("MSpanList_Insert"); + } span->next = list->next; span->prev = list; span->next->prev = span; span->prev->next = span; } + + |