aboutsummaryrefslogtreecommitdiff
path: root/libgo
diff options
context:
space:
mode:
authorIan Lance Taylor <ian@gcc.gnu.org>2018-09-13 22:06:16 +0000
committerIan Lance Taylor <ian@gcc.gnu.org>2018-09-13 22:06:16 +0000
commit38fab7369d19fd545eb8510ec198e73949a2c75d (patch)
tree39d19743a7f211c9c8b196184a333a57a9b2d6ba /libgo
parent84b5706abb8f5dff634e588071ab4fef080cd05e (diff)
downloadgcc-38fab7369d19fd545eb8510ec198e73949a2c75d.zip
gcc-38fab7369d19fd545eb8510ec198e73949a2c75d.tar.gz
gcc-38fab7369d19fd545eb8510ec198e73949a2c75d.tar.bz2
runtime: correct counters in sweep
In the sweep code we can sometimes see incorrect counts when conservative stack scanning causes us to grey an object that we earlier decided could be freed. We already ignored this check, but adjust this case to maintain correct span counts when it happens. This gives us slightly more correct numbers in MemStats, and helps avoid a rare failure in TestReadMemStats. Also fix the free index, and cope with finding a full span when allocating a new one. Reviewed-on: https://go-review.googlesource.com/134216 From-SVN: r264294
Diffstat (limited to 'libgo')
-rw-r--r--libgo/go/runtime/mcentral.go9
-rw-r--r--libgo/go/runtime/mgcsweep.go26
2 files changed, 27 insertions, 8 deletions
diff --git a/libgo/go/runtime/mcentral.go b/libgo/go/runtime/mcentral.go
index eaabcb9..150f4fd 100644
--- a/libgo/go/runtime/mcentral.go
+++ b/libgo/go/runtime/mcentral.go
@@ -56,6 +56,15 @@ retry:
c.empty.insertBack(s)
unlock(&c.lock)
s.sweep(true)
+
+ // With gccgo's conservative GC, the returned span may
+ // now be full. See the comments in mspan.sweep.
+ if uintptr(s.allocCount) == s.nelems {
+ s.freeindex = s.nelems
+ lock(&c.lock)
+ goto retry
+ }
+
goto havespan
}
if s.sweepgen == sg-1 {
diff --git a/libgo/go/runtime/mgcsweep.go b/libgo/go/runtime/mgcsweep.go
index c60214c..d6be349 100644
--- a/libgo/go/runtime/mgcsweep.go
+++ b/libgo/go/runtime/mgcsweep.go
@@ -296,7 +296,7 @@ func (s *mspan) sweep(preserve bool) bool {
}
nfreed := s.allocCount - nalloc
- // This test is not reliable with gccgo, because of
+ // This check is not reliable with gccgo, because of
// conservative stack scanning. The test boils down to
// checking that no new bits have been set in gcmarkBits since
// the span was added to the sweep count. New bits are set by
@@ -309,16 +309,23 @@ func (s *mspan) sweep(preserve bool) bool {
// check to be inaccurate, and it will keep an object live
// unnecessarily, but provided the pointer is not really live
// it is not otherwise a problem. So we disable the test for gccgo.
- if false && nalloc > s.allocCount {
- print("runtime: nelems=", s.nelems, " nalloc=", nalloc, " previous allocCount=", s.allocCount, " nfreed=", nfreed, "\n")
- throw("sweep increased allocation count")
+ nfreedSigned := int(nfreed)
+ if nalloc > s.allocCount {
+ // print("runtime: nelems=", s.nelems, " nalloc=", nalloc, " previous allocCount=", s.allocCount, " nfreed=", nfreed, "\n")
+ // throw("sweep increased allocation count")
+
+ // For gccgo, adjust the freed count as a signed number.
+ nfreedSigned = int(s.allocCount) - int(nalloc)
+ if uintptr(nalloc) == s.nelems {
+ s.freeindex = s.nelems
+ }
}
s.allocCount = nalloc
wasempty := s.nextFreeIndex() == s.nelems
s.freeindex = 0 // reset allocation index to start of span.
if trace.enabled {
- getg().m.p.ptr().traceReclaimed += uintptr(nfreed) * s.elemsize
+ getg().m.p.ptr().traceReclaimed += uintptr(nfreedSigned) * s.elemsize
}
// gcmarkBits becomes the allocBits.
@@ -334,7 +341,7 @@ func (s *mspan) sweep(preserve bool) bool {
// But we need to set it before we make the span available for allocation
// (return it to heap or mcentral), because allocation code assumes that a
// span is already swept if available for allocation.
- if freeToHeap || nfreed == 0 {
+ if freeToHeap || nfreedSigned <= 0 {
// The span must be in our exclusive ownership until we update sweepgen,
// check for potential races.
if s.state != mSpanInUse || s.sweepgen != sweepgen-1 {
@@ -347,8 +354,11 @@ func (s *mspan) sweep(preserve bool) bool {
atomic.Store(&s.sweepgen, sweepgen)
}
- if nfreed > 0 && spc.sizeclass() != 0 {
- c.local_nsmallfree[spc.sizeclass()] += uintptr(nfreed)
+ if spc.sizeclass() != 0 {
+ c.local_nsmallfree[spc.sizeclass()] += uintptr(nfreedSigned)
+ }
+
+ if nfreedSigned > 0 && spc.sizeclass() != 0 {
res = mheap_.central[spc].mcentral.freeSpan(s, preserve, wasempty)
// MCentral_FreeSpan updates sweepgen
} else if freeToHeap {