aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorIan Lance Taylor <ian@gcc.gnu.org>2019-08-19 19:09:50 +0000
committerIan Lance Taylor <ian@gcc.gnu.org>2019-08-19 19:09:50 +0000
commit4f6bdb08bab64b973e465fb45deb751561e3b969 (patch)
tree1945ba0d73dd9ecc9361beaaa2fa056653e3bce8
parent188d00796f5bd338b9b8ab1cc8ba4b43af8ab8fd (diff)
downloadgcc-4f6bdb08bab64b973e465fb45deb751561e3b969.zip
gcc-4f6bdb08bab64b973e465fb45deb751561e3b969.tar.gz
gcc-4f6bdb08bab64b973e465fb45deb751561e3b969.tar.bz2
runtime: be more strict in GC
With CL 190599, along with what we do in greyobject, we ensure that we only mark allocated heap objects. As a result we can be more strict in GC: - Enable "sweep increased allocation count" check, which checks that the number of mark bits set are no more than the number of allocation bits. - Enable invalid pointer check on heap scan. We only trace allocated heap objects, which should not contain invalid pointer. This also makes the libgo runtime more convergent with the gc runtime. Reviewed-on: https://go-review.googlesource.com/c/gofrontend/+/190797 From-SVN: r274678
-rw-r--r--gcc/go/gofrontend/MERGE2
-rw-r--r--libgo/go/runtime/mcentral.go9
-rw-r--r--libgo/go/runtime/mgcsweep.go38
-rw-r--r--libgo/go/runtime/runtime1.go19
4 files changed, 13 insertions, 55 deletions
diff --git a/gcc/go/gofrontend/MERGE b/gcc/go/gofrontend/MERGE
index 74577f5..b1a6579 100644
--- a/gcc/go/gofrontend/MERGE
+++ b/gcc/go/gofrontend/MERGE
@@ -1,4 +1,4 @@
-85857977230437f2b3dcbeea009efbb8b2789039
+b0ba5daa8216a0424b24f74466cedab0b986f3b4
The first line of this file holds the git revision number of the last
merge done from the gofrontend repository.
diff --git a/libgo/go/runtime/mcentral.go b/libgo/go/runtime/mcentral.go
index 0196ba4..a60eb9f 100644
--- a/libgo/go/runtime/mcentral.go
+++ b/libgo/go/runtime/mcentral.go
@@ -56,15 +56,6 @@ retry:
c.empty.insertBack(s)
unlock(&c.lock)
s.sweep(true)
-
- // With gccgo's conservative GC, the returned span may
- // now be full. See the comments in mspan.sweep.
- if uintptr(s.allocCount) == s.nelems {
- s.freeindex = s.nelems
- lock(&c.lock)
- goto retry
- }
-
goto havespan
}
if s.sweepgen == sg-1 {
diff --git a/libgo/go/runtime/mgcsweep.go b/libgo/go/runtime/mgcsweep.go
index bc53de4..539a982 100644
--- a/libgo/go/runtime/mgcsweep.go
+++ b/libgo/go/runtime/mgcsweep.go
@@ -326,39 +326,16 @@ func (s *mspan) sweep(preserve bool) bool {
freeToHeap = true
}
nfreed := s.allocCount - nalloc
-
- // This check is not reliable with gccgo, because of
- // conservative stack scanning. The test boils down to
- // checking that no new bits have been set in gcmarkBits since
- // the span was added to the sweep count. New bits are set by
- // greyobject. Seeing a new bit means that a live pointer has
- // appeared that was not found during the mark phase. That can
- // not happen when pointers are followed strictly. However,
- // with conservative checking, it is possible for a pointer
- // that will never be used to appear live and to cause a mark
- // to be added. That is unfortunate in that it causes this
- // check to be inaccurate, and it will keep an object live
- // unnecessarily, but provided the pointer is not really live
- // it is not otherwise a problem. So we disable the test for gccgo.
- nfreedSigned := int(nfreed)
if nalloc > s.allocCount {
- if usestackmaps {
- print("runtime: nelems=", s.nelems, " nalloc=", nalloc, " previous allocCount=", s.allocCount, " nfreed=", nfreed, "\n")
- throw("sweep increased allocation count")
- }
-
- // For gccgo, adjust the freed count as a signed number.
- nfreedSigned = int(s.allocCount) - int(nalloc)
- if uintptr(nalloc) == s.nelems {
- s.freeindex = s.nelems
- }
+ print("runtime: nelems=", s.nelems, " nalloc=", nalloc, " previous allocCount=", s.allocCount, " nfreed=", nfreed, "\n")
+ throw("sweep increased allocation count")
}
s.allocCount = nalloc
wasempty := s.nextFreeIndex() == s.nelems
s.freeindex = 0 // reset allocation index to start of span.
if trace.enabled {
- getg().m.p.ptr().traceReclaimed += uintptr(nfreedSigned) * s.elemsize
+ getg().m.p.ptr().traceReclaimed += uintptr(nfreed) * s.elemsize
}
// gcmarkBits becomes the allocBits.
@@ -374,7 +351,7 @@ func (s *mspan) sweep(preserve bool) bool {
// But we need to set it before we make the span available for allocation
// (return it to heap or mcentral), because allocation code assumes that a
// span is already swept if available for allocation.
- if freeToHeap || nfreedSigned <= 0 {
+ if freeToHeap || nfreed == 0 {
// The span must be in our exclusive ownership until we update sweepgen,
// check for potential races.
if s.state != mSpanInUse || s.sweepgen != sweepgen-1 {
@@ -387,11 +364,8 @@ func (s *mspan) sweep(preserve bool) bool {
atomic.Store(&s.sweepgen, sweepgen)
}
- if spc.sizeclass() != 0 {
- c.local_nsmallfree[spc.sizeclass()] += uintptr(nfreedSigned)
- }
-
- if nfreedSigned > 0 && spc.sizeclass() != 0 {
+ if nfreed > 0 && spc.sizeclass() != 0 {
+ c.local_nsmallfree[spc.sizeclass()] += uintptr(nfreed)
res = mheap_.central[spc].mcentral.freeSpan(s, preserve, wasempty)
// mcentral.freeSpan updates sweepgen
} else if freeToHeap {
diff --git a/libgo/go/runtime/runtime1.go b/libgo/go/runtime/runtime1.go
index e2567b3..2424f20 100644
--- a/libgo/go/runtime/runtime1.go
+++ b/libgo/go/runtime/runtime1.go
@@ -352,19 +352,12 @@ func parsedebugvars() {
// defaults
debug.cgocheck = 1
- // Unfortunately, because gccgo uses conservative stack scanning,
- // we can not enable invalid pointer checking. It is possible for
- // memory block M1 to point to M2, and for both to be dead.
- // We release M2, causing the entire span to be released.
- // Before we release M1, a stack pointer appears that point into it.
- // This stack pointer is presumably dead, but causes M1 to be marked.
- // We scan M1 and see the pointer to M2 on a released span.
- // At that point, if debug.invalidptr is set, we crash.
- // This is not a problem, assuming that M1 really is dead and
- // the pointer we discovered to it will not be used.
- if usestackmaps {
- debug.invalidptr = 1
- }
+ // Gccgo uses conservative stack scanning, so we cannot check
+ // invalid pointers on stack. But we can still enable invalid
+ // pointer check on heap scanning. When scanning the heap, we
+ // ensure that we only trace allocated heap objects, which should
+ // not contain invalid pointers.
+ debug.invalidptr = 1
for p := gogetenv("GODEBUG"); p != ""; {
field := ""