diff options
Diffstat (limited to 'libgo/go/runtime/mcentral.go')
-rw-r--r-- | libgo/go/runtime/mcentral.go | 259 |
1 files changed, 255 insertions, 4 deletions
diff --git a/libgo/go/runtime/mcentral.go b/libgo/go/runtime/mcentral.go index 78a3ae6..ed49d86 100644 --- a/libgo/go/runtime/mcentral.go +++ b/libgo/go/runtime/mcentral.go @@ -20,8 +20,31 @@ import "runtime/internal/atomic" type mcentral struct { lock mutex spanclass spanClass - nonempty mSpanList // list of spans with a free object, ie a nonempty free list - empty mSpanList // list of spans with no free objects (or cached in an mcache) + + // For !go115NewMCentralImpl. + nonempty mSpanList // list of spans with a free object, ie a nonempty free list + empty mSpanList // list of spans with no free objects (or cached in an mcache) + + // partial and full contain two mspan sets: one of swept in-use + // spans, and one of unswept in-use spans. These two trade + // roles on each GC cycle. The unswept set is drained either by + // allocation or by the background sweeper in every GC cycle, + // so only two roles are necessary. + // + // sweepgen is increased by 2 on each GC cycle, so the swept + // spans are in partial[sweepgen/2%2] and the unswept spans are in + // partial[1-sweepgen/2%2]. Sweeping pops spans from the + // unswept set and pushes spans that are still in-use on the + // swept set. Likewise, allocating an in-use span pushes it + // on the swept set. + // + // Some parts of the sweeper can sweep arbitrary spans, and hence + // can't remove them from the unswept set, but will add the span + // to the appropriate swept list. As a result, the parts of the + // sweeper and mcentral that do consume from the unswept list may + // encounter swept spans, and these should be ignored. + partial [2]spanSet // list of spans with a free object + full [2]spanSet // list of spans with no free objects // nmalloc is the cumulative count of objects allocated from // this mcentral, assuming all spans in mcaches are @@ -32,12 +55,168 @@ type mcentral struct { // Initialize a single central free list. func (c *mcentral) init(spc spanClass) { c.spanclass = spc - c.nonempty.init() - c.empty.init() + if go115NewMCentralImpl { + lockInit(&c.partial[0].spineLock, lockRankSpanSetSpine) + lockInit(&c.partial[1].spineLock, lockRankSpanSetSpine) + lockInit(&c.full[0].spineLock, lockRankSpanSetSpine) + lockInit(&c.full[1].spineLock, lockRankSpanSetSpine) + } else { + c.nonempty.init() + c.empty.init() + lockInit(&c.lock, lockRankMcentral) + } +} + +// partialUnswept returns the spanSet which holds partially-filled +// unswept spans for this sweepgen. +func (c *mcentral) partialUnswept(sweepgen uint32) *spanSet { + return &c.partial[1-sweepgen/2%2] +} + +// partialSwept returns the spanSet which holds partially-filled +// swept spans for this sweepgen. +func (c *mcentral) partialSwept(sweepgen uint32) *spanSet { + return &c.partial[sweepgen/2%2] +} + +// fullUnswept returns the spanSet which holds unswept spans without any +// free slots for this sweepgen. +func (c *mcentral) fullUnswept(sweepgen uint32) *spanSet { + return &c.full[1-sweepgen/2%2] +} + +// fullSwept returns the spanSet which holds swept spans without any +// free slots for this sweepgen. +func (c *mcentral) fullSwept(sweepgen uint32) *spanSet { + return &c.full[sweepgen/2%2] } // Allocate a span to use in an mcache. func (c *mcentral) cacheSpan() *mspan { + if !go115NewMCentralImpl { + return c.oldCacheSpan() + } + // Deduct credit for this span allocation and sweep if necessary. + spanBytes := uintptr(class_to_allocnpages[c.spanclass.sizeclass()]) * _PageSize + deductSweepCredit(spanBytes, 0) + + sg := mheap_.sweepgen + + traceDone := false + if trace.enabled { + traceGCSweepStart() + } + + // If we sweep spanBudget spans without finding any free + // space, just allocate a fresh span. This limits the amount + // of time we can spend trying to find free space and + // amortizes the cost of small object sweeping over the + // benefit of having a full free span to allocate from. By + // setting this to 100, we limit the space overhead to 1%. + // + // TODO(austin,mknyszek): This still has bad worst-case + // throughput. For example, this could find just one free slot + // on the 100th swept span. That limits allocation latency, but + // still has very poor throughput. We could instead keep a + // running free-to-used budget and switch to fresh span + // allocation if the budget runs low. + spanBudget := 100 + + var s *mspan + + // Try partial swept spans first. + if s = c.partialSwept(sg).pop(); s != nil { + goto havespan + } + + // Now try partial unswept spans. + for ; spanBudget >= 0; spanBudget-- { + s = c.partialUnswept(sg).pop() + if s == nil { + break + } + if atomic.Load(&s.sweepgen) == sg-2 && atomic.Cas(&s.sweepgen, sg-2, sg-1) { + // We got ownership of the span, so let's sweep it and use it. + s.sweep(true) + goto havespan + } + // We failed to get ownership of the span, which means it's being or + // has been swept by an asynchronous sweeper that just couldn't remove it + // from the unswept list. That sweeper took ownership of the span and + // responsibility for either freeing it to the heap or putting it on the + // right swept list. Either way, we should just ignore it (and it's unsafe + // for us to do anything else). + } + // Now try full unswept spans, sweeping them and putting them into the + // right list if we fail to get a span. + for ; spanBudget >= 0; spanBudget-- { + s = c.fullUnswept(sg).pop() + if s == nil { + break + } + if atomic.Load(&s.sweepgen) == sg-2 && atomic.Cas(&s.sweepgen, sg-2, sg-1) { + // We got ownership of the span, so let's sweep it. + s.sweep(true) + // Check if there's any free space. + freeIndex := s.nextFreeIndex() + if freeIndex != s.nelems { + s.freeindex = freeIndex + goto havespan + } + // Add it to the swept list, because sweeping didn't give us any free space. + c.fullSwept(sg).push(s) + } + // See comment for partial unswept spans. + } + if trace.enabled { + traceGCSweepDone() + traceDone = true + } + + // We failed to get a span from the mcentral so get one from mheap. + s = c.grow() + if s == nil { + return nil + } + + // At this point s is a span that should have free slots. +havespan: + if trace.enabled && !traceDone { + traceGCSweepDone() + } + n := int(s.nelems) - int(s.allocCount) + if n == 0 || s.freeindex == s.nelems || uintptr(s.allocCount) == s.nelems { + throw("span has no free objects") + } + // Assume all objects from this span will be allocated in the + // mcache. If it gets uncached, we'll adjust this. + atomic.Xadd64(&c.nmalloc, int64(n)) + usedBytes := uintptr(s.allocCount) * s.elemsize + atomic.Xadd64(&memstats.heap_live, int64(spanBytes)-int64(usedBytes)) + if trace.enabled { + // heap_live changed. + traceHeapAlloc() + } + if gcBlackenEnabled != 0 { + // heap_live changed. + gcController.revise() + } + freeByteBase := s.freeindex &^ (64 - 1) + whichByte := freeByteBase / 8 + // Init alloc bits cache. + s.refillAllocCache(whichByte) + + // Adjust the allocCache so that s.freeindex corresponds to the low bit in + // s.allocCache. + s.allocCache >>= s.freeindex % 64 + + return s +} + +// Allocate a span to use in an mcache. +// +// For !go115NewMCentralImpl. +func (c *mcentral) oldCacheSpan() *mspan { // Deduct credit for this span allocation and sweep if necessary. spanBytes := uintptr(class_to_allocnpages[c.spanclass.sizeclass()]) * _PageSize deductSweepCredit(spanBytes, 0) @@ -147,7 +326,77 @@ havespan: } // Return span from an mcache. +// +// s must have a span class corresponding to this +// mcentral and it must not be empty. func (c *mcentral) uncacheSpan(s *mspan) { + if !go115NewMCentralImpl { + c.oldUncacheSpan(s) + return + } + if s.allocCount == 0 { + throw("uncaching span but s.allocCount == 0") + } + + sg := mheap_.sweepgen + stale := s.sweepgen == sg+1 + + // Fix up sweepgen. + if stale { + // Span was cached before sweep began. It's our + // responsibility to sweep it. + // + // Set sweepgen to indicate it's not cached but needs + // sweeping and can't be allocated from. sweep will + // set s.sweepgen to indicate s is swept. + atomic.Store(&s.sweepgen, sg-1) + } else { + // Indicate that s is no longer cached. + atomic.Store(&s.sweepgen, sg) + } + n := int(s.nelems) - int(s.allocCount) + + // Fix up statistics. + if n > 0 { + // cacheSpan updated alloc assuming all objects on s + // were going to be allocated. Adjust for any that + // weren't. We must do this before potentially + // sweeping the span. + atomic.Xadd64(&c.nmalloc, -int64(n)) + + if !stale { + // (*mcentral).cacheSpan conservatively counted + // unallocated slots in heap_live. Undo this. + // + // If this span was cached before sweep, then + // heap_live was totally recomputed since + // caching this span, so we don't do this for + // stale spans. + atomic.Xadd64(&memstats.heap_live, -int64(n)*int64(s.elemsize)) + } + } + + // Put the span in the appropriate place. + if stale { + // It's stale, so just sweep it. Sweeping will put it on + // the right list. + s.sweep(false) + } else { + if n > 0 { + // Put it back on the partial swept list. + c.partialSwept(sg).push(s) + } else { + // There's no free space and it's not stale, so put it on the + // full swept list. + c.fullSwept(sg).push(s) + } + } +} + +// Return span from an mcache. +// +// For !go115NewMCentralImpl. +func (c *mcentral) oldUncacheSpan(s *mspan) { if s.allocCount == 0 { throw("uncaching span but s.allocCount == 0") } @@ -206,6 +455,8 @@ func (c *mcentral) uncacheSpan(s *mspan) { // freeSpan reports whether s was returned to the heap. // If preserve=true, it does not move s (the caller // must take care of it). +// +// For !go115NewMCentralImpl. func (c *mcentral) freeSpan(s *mspan, preserve bool, wasempty bool) bool { if sg := mheap_.sweepgen; s.sweepgen == sg+1 || s.sweepgen == sg+3 { throw("freeSpan given cached span") |