diff options
Diffstat (limited to 'libgo/go/runtime/mcentral.go')
-rw-r--r-- | libgo/go/runtime/mcentral.go | 284 |
1 files changed, 5 insertions, 279 deletions
diff --git a/libgo/go/runtime/mcentral.go b/libgo/go/runtime/mcentral.go index ed49d86..cd20dec 100644 --- a/libgo/go/runtime/mcentral.go +++ b/libgo/go/runtime/mcentral.go @@ -18,13 +18,8 @@ import "runtime/internal/atomic" // //go:notinheap type mcentral struct { - lock mutex spanclass spanClass - // For !go115NewMCentralImpl. - nonempty mSpanList // list of spans with a free object, ie a nonempty free list - empty mSpanList // list of spans with no free objects (or cached in an mcache) - // partial and full contain two mspan sets: one of swept in-use // spans, and one of unswept in-use spans. These two trade // roles on each GC cycle. The unswept set is drained either by @@ -45,26 +40,15 @@ type mcentral struct { // encounter swept spans, and these should be ignored. partial [2]spanSet // list of spans with a free object full [2]spanSet // list of spans with no free objects - - // nmalloc is the cumulative count of objects allocated from - // this mcentral, assuming all spans in mcaches are - // fully-allocated. Written atomically, read under STW. - nmalloc uint64 } // Initialize a single central free list. func (c *mcentral) init(spc spanClass) { c.spanclass = spc - if go115NewMCentralImpl { - lockInit(&c.partial[0].spineLock, lockRankSpanSetSpine) - lockInit(&c.partial[1].spineLock, lockRankSpanSetSpine) - lockInit(&c.full[0].spineLock, lockRankSpanSetSpine) - lockInit(&c.full[1].spineLock, lockRankSpanSetSpine) - } else { - c.nonempty.init() - c.empty.init() - lockInit(&c.lock, lockRankMcentral) - } + lockInit(&c.partial[0].spineLock, lockRankSpanSetSpine) + lockInit(&c.partial[1].spineLock, lockRankSpanSetSpine) + lockInit(&c.full[0].spineLock, lockRankSpanSetSpine) + lockInit(&c.full[1].spineLock, lockRankSpanSetSpine) } // partialUnswept returns the spanSet which holds partially-filled @@ -93,9 +77,6 @@ func (c *mcentral) fullSwept(sweepgen uint32) *spanSet { // Allocate a span to use in an mcache. func (c *mcentral) cacheSpan() *mspan { - if !go115NewMCentralImpl { - return c.oldCacheSpan() - } // Deduct credit for this span allocation and sweep if necessary. spanBytes := uintptr(class_to_allocnpages[c.spanclass.sizeclass()]) * _PageSize deductSweepCredit(spanBytes, 0) @@ -188,131 +169,6 @@ havespan: if n == 0 || s.freeindex == s.nelems || uintptr(s.allocCount) == s.nelems { throw("span has no free objects") } - // Assume all objects from this span will be allocated in the - // mcache. If it gets uncached, we'll adjust this. - atomic.Xadd64(&c.nmalloc, int64(n)) - usedBytes := uintptr(s.allocCount) * s.elemsize - atomic.Xadd64(&memstats.heap_live, int64(spanBytes)-int64(usedBytes)) - if trace.enabled { - // heap_live changed. - traceHeapAlloc() - } - if gcBlackenEnabled != 0 { - // heap_live changed. - gcController.revise() - } - freeByteBase := s.freeindex &^ (64 - 1) - whichByte := freeByteBase / 8 - // Init alloc bits cache. - s.refillAllocCache(whichByte) - - // Adjust the allocCache so that s.freeindex corresponds to the low bit in - // s.allocCache. - s.allocCache >>= s.freeindex % 64 - - return s -} - -// Allocate a span to use in an mcache. -// -// For !go115NewMCentralImpl. -func (c *mcentral) oldCacheSpan() *mspan { - // Deduct credit for this span allocation and sweep if necessary. - spanBytes := uintptr(class_to_allocnpages[c.spanclass.sizeclass()]) * _PageSize - deductSweepCredit(spanBytes, 0) - - lock(&c.lock) - traceDone := false - if trace.enabled { - traceGCSweepStart() - } - sg := mheap_.sweepgen -retry: - var s *mspan - for s = c.nonempty.first; s != nil; s = s.next { - if s.sweepgen == sg-2 && atomic.Cas(&s.sweepgen, sg-2, sg-1) { - c.nonempty.remove(s) - c.empty.insertBack(s) - unlock(&c.lock) - s.sweep(true) - goto havespan - } - if s.sweepgen == sg-1 { - // the span is being swept by background sweeper, skip - continue - } - // we have a nonempty span that does not require sweeping, allocate from it - c.nonempty.remove(s) - c.empty.insertBack(s) - unlock(&c.lock) - goto havespan - } - - for s = c.empty.first; s != nil; s = s.next { - if s.sweepgen == sg-2 && atomic.Cas(&s.sweepgen, sg-2, sg-1) { - // we have an empty span that requires sweeping, - // sweep it and see if we can free some space in it - c.empty.remove(s) - // swept spans are at the end of the list - c.empty.insertBack(s) - unlock(&c.lock) - s.sweep(true) - freeIndex := s.nextFreeIndex() - if freeIndex != s.nelems { - s.freeindex = freeIndex - goto havespan - } - lock(&c.lock) - // the span is still empty after sweep - // it is already in the empty list, so just retry - goto retry - } - if s.sweepgen == sg-1 { - // the span is being swept by background sweeper, skip - continue - } - // already swept empty span, - // all subsequent ones must also be either swept or in process of sweeping - break - } - if trace.enabled { - traceGCSweepDone() - traceDone = true - } - unlock(&c.lock) - - // Replenish central list if empty. - s = c.grow() - if s == nil { - return nil - } - lock(&c.lock) - c.empty.insertBack(s) - unlock(&c.lock) - - // At this point s is a non-empty span, queued at the end of the empty list, - // c is unlocked. -havespan: - if trace.enabled && !traceDone { - traceGCSweepDone() - } - n := int(s.nelems) - int(s.allocCount) - if n == 0 || s.freeindex == s.nelems || uintptr(s.allocCount) == s.nelems { - throw("span has no free objects") - } - // Assume all objects from this span will be allocated in the - // mcache. If it gets uncached, we'll adjust this. - atomic.Xadd64(&c.nmalloc, int64(n)) - usedBytes := uintptr(s.allocCount) * s.elemsize - atomic.Xadd64(&memstats.heap_live, int64(spanBytes)-int64(usedBytes)) - if trace.enabled { - // heap_live changed. - traceHeapAlloc() - } - if gcBlackenEnabled != 0 { - // heap_live changed. - gcController.revise() - } freeByteBase := s.freeindex &^ (64 - 1) whichByte := freeByteBase / 8 // Init alloc bits cache. @@ -330,10 +186,6 @@ havespan: // s must have a span class corresponding to this // mcentral and it must not be empty. func (c *mcentral) uncacheSpan(s *mspan) { - if !go115NewMCentralImpl { - c.oldUncacheSpan(s) - return - } if s.allocCount == 0 { throw("uncaching span but s.allocCount == 0") } @@ -354,27 +206,6 @@ func (c *mcentral) uncacheSpan(s *mspan) { // Indicate that s is no longer cached. atomic.Store(&s.sweepgen, sg) } - n := int(s.nelems) - int(s.allocCount) - - // Fix up statistics. - if n > 0 { - // cacheSpan updated alloc assuming all objects on s - // were going to be allocated. Adjust for any that - // weren't. We must do this before potentially - // sweeping the span. - atomic.Xadd64(&c.nmalloc, -int64(n)) - - if !stale { - // (*mcentral).cacheSpan conservatively counted - // unallocated slots in heap_live. Undo this. - // - // If this span was cached before sweep, then - // heap_live was totally recomputed since - // caching this span, so we don't do this for - // stale spans. - atomic.Xadd64(&memstats.heap_live, -int64(n)*int64(s.elemsize)) - } - } // Put the span in the appropriate place. if stale { @@ -382,7 +213,7 @@ func (c *mcentral) uncacheSpan(s *mspan) { // the right list. s.sweep(false) } else { - if n > 0 { + if int(s.nelems)-int(s.allocCount) > 0 { // Put it back on the partial swept list. c.partialSwept(sg).push(s) } else { @@ -393,111 +224,6 @@ func (c *mcentral) uncacheSpan(s *mspan) { } } -// Return span from an mcache. -// -// For !go115NewMCentralImpl. -func (c *mcentral) oldUncacheSpan(s *mspan) { - if s.allocCount == 0 { - throw("uncaching span but s.allocCount == 0") - } - - sg := mheap_.sweepgen - stale := s.sweepgen == sg+1 - if stale { - // Span was cached before sweep began. It's our - // responsibility to sweep it. - // - // Set sweepgen to indicate it's not cached but needs - // sweeping and can't be allocated from. sweep will - // set s.sweepgen to indicate s is swept. - atomic.Store(&s.sweepgen, sg-1) - } else { - // Indicate that s is no longer cached. - atomic.Store(&s.sweepgen, sg) - } - - n := int(s.nelems) - int(s.allocCount) - if n > 0 { - // cacheSpan updated alloc assuming all objects on s - // were going to be allocated. Adjust for any that - // weren't. We must do this before potentially - // sweeping the span. - atomic.Xadd64(&c.nmalloc, -int64(n)) - - lock(&c.lock) - c.empty.remove(s) - c.nonempty.insert(s) - if !stale { - // mCentral_CacheSpan conservatively counted - // unallocated slots in heap_live. Undo this. - // - // If this span was cached before sweep, then - // heap_live was totally recomputed since - // caching this span, so we don't do this for - // stale spans. - atomic.Xadd64(&memstats.heap_live, -int64(n)*int64(s.elemsize)) - } - unlock(&c.lock) - } - - if stale { - // Now that s is in the right mcentral list, we can - // sweep it. - s.sweep(false) - } -} - -// freeSpan updates c and s after sweeping s. -// It sets s's sweepgen to the latest generation, -// and, based on the number of free objects in s, -// moves s to the appropriate list of c or returns it -// to the heap. -// freeSpan reports whether s was returned to the heap. -// If preserve=true, it does not move s (the caller -// must take care of it). -// -// For !go115NewMCentralImpl. -func (c *mcentral) freeSpan(s *mspan, preserve bool, wasempty bool) bool { - if sg := mheap_.sweepgen; s.sweepgen == sg+1 || s.sweepgen == sg+3 { - throw("freeSpan given cached span") - } - s.needzero = 1 - - if preserve { - // preserve is set only when called from (un)cacheSpan above, - // the span must be in the empty list. - if !s.inList() { - throw("can't preserve unlinked span") - } - atomic.Store(&s.sweepgen, mheap_.sweepgen) - return false - } - - lock(&c.lock) - - // Move to nonempty if necessary. - if wasempty { - c.empty.remove(s) - c.nonempty.insert(s) - } - - // delay updating sweepgen until here. This is the signal that - // the span may be used in an mcache, so it must come after the - // linked list operations above (actually, just after the - // lock of c above.) - atomic.Store(&s.sweepgen, mheap_.sweepgen) - - if s.allocCount != 0 { - unlock(&c.lock) - return false - } - - c.nonempty.remove(s) - unlock(&c.lock) - mheap_.freeSpan(s) - return true -} - // grow allocates a new empty span from the heap and initializes it for c's size class. func (c *mcentral) grow() *mspan { npages := uintptr(class_to_allocnpages[c.spanclass.sizeclass()]) |