aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--gcc/go/gofrontend/MERGE2
-rw-r--r--libgo/go/runtime/mcentral.go9
-rw-r--r--libgo/go/runtime/mgcsweep.go26
3 files changed, 28 insertions, 9 deletions
diff --git a/gcc/go/gofrontend/MERGE b/gcc/go/gofrontend/MERGE
index 6650b03..9a789b9 100644
--- a/gcc/go/gofrontend/MERGE
+++ b/gcc/go/gofrontend/MERGE
@@ -1,4 +1,4 @@
-f2cd046a4e0d681c3d21ee547b437d3eab8af268
+82d7205ba9e5c1fe38fd24f89a45caf2e974975b
The first line of this file holds the git revision number of the last
merge done from the gofrontend repository.
diff --git a/libgo/go/runtime/mcentral.go b/libgo/go/runtime/mcentral.go
index eaabcb9..150f4fd 100644
--- a/libgo/go/runtime/mcentral.go
+++ b/libgo/go/runtime/mcentral.go
@@ -56,6 +56,15 @@ retry:
c.empty.insertBack(s)
unlock(&c.lock)
s.sweep(true)
+
+ // With gccgo's conservative GC, the returned span may
+ // now be full. See the comments in mspan.sweep.
+ if uintptr(s.allocCount) == s.nelems {
+ s.freeindex = s.nelems
+ lock(&c.lock)
+ goto retry
+ }
+
goto havespan
}
if s.sweepgen == sg-1 {
diff --git a/libgo/go/runtime/mgcsweep.go b/libgo/go/runtime/mgcsweep.go
index c60214c..d6be349 100644
--- a/libgo/go/runtime/mgcsweep.go
+++ b/libgo/go/runtime/mgcsweep.go
@@ -296,7 +296,7 @@ func (s *mspan) sweep(preserve bool) bool {
}
nfreed := s.allocCount - nalloc
- // This test is not reliable with gccgo, because of
+ // This check is not reliable with gccgo, because of
// conservative stack scanning. The test boils down to
// checking that no new bits have been set in gcmarkBits since
// the span was added to the sweep count. New bits are set by
@@ -309,16 +309,23 @@ func (s *mspan) sweep(preserve bool) bool {
// check to be inaccurate, and it will keep an object live
// unnecessarily, but provided the pointer is not really live
// it is not otherwise a problem. So we disable the test for gccgo.
- if false && nalloc > s.allocCount {
- print("runtime: nelems=", s.nelems, " nalloc=", nalloc, " previous allocCount=", s.allocCount, " nfreed=", nfreed, "\n")
- throw("sweep increased allocation count")
+ nfreedSigned := int(nfreed)
+ if nalloc > s.allocCount {
+ // print("runtime: nelems=", s.nelems, " nalloc=", nalloc, " previous allocCount=", s.allocCount, " nfreed=", nfreed, "\n")
+ // throw("sweep increased allocation count")
+
+ // For gccgo, adjust the freed count as a signed number.
+ nfreedSigned = int(s.allocCount) - int(nalloc)
+ if uintptr(nalloc) == s.nelems {
+ s.freeindex = s.nelems
+ }
}
s.allocCount = nalloc
wasempty := s.nextFreeIndex() == s.nelems
s.freeindex = 0 // reset allocation index to start of span.
if trace.enabled {
- getg().m.p.ptr().traceReclaimed += uintptr(nfreed) * s.elemsize
+ getg().m.p.ptr().traceReclaimed += uintptr(nfreedSigned) * s.elemsize
}
// gcmarkBits becomes the allocBits.
@@ -334,7 +341,7 @@ func (s *mspan) sweep(preserve bool) bool {
// But we need to set it before we make the span available for allocation
// (return it to heap or mcentral), because allocation code assumes that a
// span is already swept if available for allocation.
- if freeToHeap || nfreed == 0 {
+ if freeToHeap || nfreedSigned <= 0 {
// The span must be in our exclusive ownership until we update sweepgen,
// check for potential races.
if s.state != mSpanInUse || s.sweepgen != sweepgen-1 {
@@ -347,8 +354,11 @@ func (s *mspan) sweep(preserve bool) bool {
atomic.Store(&s.sweepgen, sweepgen)
}
- if nfreed > 0 && spc.sizeclass() != 0 {
- c.local_nsmallfree[spc.sizeclass()] += uintptr(nfreed)
+ if spc.sizeclass() != 0 {
+ c.local_nsmallfree[spc.sizeclass()] += uintptr(nfreedSigned)
+ }
+
+ if nfreedSigned > 0 && spc.sizeclass() != 0 {
res = mheap_.central[spc].mcentral.freeSpan(s, preserve, wasempty)
// MCentral_FreeSpan updates sweepgen
} else if freeToHeap {