aboutsummaryrefslogtreecommitdiff
path: root/libgo/go/runtime/mgcmark.go
diff options
context:
space:
mode:
Diffstat (limited to 'libgo/go/runtime/mgcmark.go')
-rw-r--r--libgo/go/runtime/mgcmark.go84
1 files changed, 32 insertions, 52 deletions
diff --git a/libgo/go/runtime/mgcmark.go b/libgo/go/runtime/mgcmark.go
index 7297fcb..88cae41 100644
--- a/libgo/go/runtime/mgcmark.go
+++ b/libgo/go/runtime/mgcmark.go
@@ -232,7 +232,7 @@ func markroot(gcw *gcWork, i uint32) {
selfScan := gp == userG && readgstatus(userG) == _Grunning
if selfScan {
casgstatus(userG, _Grunning, _Gwaiting)
- userG.waitreason = "garbage collection scan"
+ userG.waitreason = waitReasonGarbageCollectionScan
}
// TODO: scang blocks until gp's stack has
@@ -467,7 +467,7 @@ func gcAssistAlloc1(gp *g, scanWork int64) {
// store that clears it but an atomic check in every malloc
// would be a performance hit.
// Instead we recheck it here on the non-preemptable system
- // stack to determine if we should preform an assist.
+ // stack to determine if we should perform an assist.
// GC is done, so ignore any remaining debt.
gp.gcAssistBytes = 0
@@ -486,7 +486,7 @@ func gcAssistAlloc1(gp *g, scanWork int64) {
// gcDrainN requires the caller to be preemptible.
casgstatus(gp, _Grunning, _Gwaiting)
- gp.waitreason = "GC assist marking"
+ gp.waitreason = waitReasonGCAssistMarking
// drain own cached work first in the hopes that it
// will be more cache friendly.
@@ -585,7 +585,7 @@ func gcParkAssist() bool {
return false
}
// Park.
- goparkunlock(&work.assistQueue.lock, "GC assist wait", traceEvGoBlockGC, 2)
+ goparkunlock(&work.assistQueue.lock, waitReasonGCAssistWait, traceEvGoBlockGC, 2)
return true
}
@@ -934,9 +934,6 @@ func scanblock(b0, n0 uintptr, ptrmask *uint8, gcw *gcWork) {
b := b0
n := n0
- arena_start := mheap_.arena_start
- arena_used := mheap_.arena_used
-
for i := uintptr(0); i < n; {
// Find bits for the next word.
bits := uint32(*addb(ptrmask, i/(sys.PtrSize*8)))
@@ -948,9 +945,9 @@ func scanblock(b0, n0 uintptr, ptrmask *uint8, gcw *gcWork) {
if bits&1 != 0 {
// Same work as in scanobject; see comments there.
obj := *(*uintptr)(unsafe.Pointer(b + i))
- if obj != 0 && arena_start <= obj && obj < arena_used {
- if obj, hbits, span, objIndex := heapBitsForObject(obj, b, i, false); obj != 0 {
- greyobject(obj, b, i, hbits, span, gcw, objIndex, false)
+ if obj != 0 {
+ if obj, span, objIndex := findObject(obj, b, i, false); obj != 0 {
+ greyobject(obj, b, i, span, gcw, objIndex, false)
}
}
}
@@ -967,18 +964,6 @@ func scanblock(b0, n0 uintptr, ptrmask *uint8, gcw *gcWork) {
//
//go:nowritebarrier
func scanobject(b uintptr, gcw *gcWork) {
- // Note that arena_used may change concurrently during
- // scanobject and hence scanobject may encounter a pointer to
- // a newly allocated heap object that is *not* in
- // [start,used). It will not mark this object; however, we
- // know that it was just installed by a mutator, which means
- // that mutator will execute a write barrier and take care of
- // marking it. This is even more pronounced on relaxed memory
- // architectures since we access arena_used without barriers
- // or synchronization, but the same logic applies.
- arena_start := mheap_.arena_start
- arena_used := mheap_.arena_used
-
// Find the bits for b and the size of the object at b.
//
// b is either the beginning of an object, in which case this
@@ -1052,11 +1037,19 @@ func scanobject(b uintptr, gcw *gcWork) {
obj := *(*uintptr)(unsafe.Pointer(b + i))
// At this point we have extracted the next potential pointer.
- // Check if it points into heap and not back at the current object.
- if obj != 0 && arena_start <= obj && obj < arena_used && obj-b >= n {
- // Mark the object.
- if obj, hbits, span, objIndex := heapBitsForObject(obj, b, i, false); obj != 0 {
- greyobject(obj, b, i, hbits, span, gcw, objIndex, false)
+ // Quickly filter out nil and pointers back to the current object.
+ if obj != 0 && obj-b >= n {
+ // Test if obj points into the Go heap and, if so,
+ // mark the object.
+ //
+ // Note that it's possible for findObject to
+ // fail if obj points to a just-allocated heap
+ // object because of a race with growing the
+ // heap. In this case, we know the object was
+ // just allocated and hence will be marked by
+ // allocation itself.
+ if obj, span, objIndex := findObject(obj, b, i, false); obj != 0 {
+ greyobject(obj, b, i, span, gcw, objIndex, false)
}
}
}
@@ -1071,16 +1064,11 @@ func scanobject(b uintptr, gcw *gcWork) {
// scanblock, but we scan the stack conservatively, so there is no
// bitmask of pointers.
func scanstackblock(b, n uintptr, gcw *gcWork) {
- arena_start := mheap_.arena_start
- arena_used := mheap_.arena_used
-
for i := uintptr(0); i < n; i += sys.PtrSize {
// Same work as in scanobject; see comments there.
obj := *(*uintptr)(unsafe.Pointer(b + i))
- if obj != 0 && arena_start <= obj && obj < arena_used {
- if obj, hbits, span, objIndex := heapBitsForObject(obj, b, i, true); obj != 0 {
- greyobject(obj, b, i, hbits, span, gcw, objIndex, true)
- }
+ if obj, span, objIndex := findObject(obj, b, i, true); obj != 0 {
+ greyobject(obj, b, i, span, gcw, objIndex, true)
}
}
}
@@ -1090,11 +1078,9 @@ func scanstackblock(b, n uintptr, gcw *gcWork) {
// Preemption must be disabled.
//go:nowritebarrier
func shade(b uintptr) {
- // shade can be called to shade a pointer found on the stack,
- // so pass forStack as true to heapBitsForObject and greyobject.
- if obj, hbits, span, objIndex := heapBitsForObject(b, 0, 0, true); obj != 0 {
+ if obj, span, objIndex := findObject(b, 0, 0, true); obj != 0 {
gcw := &getg().m.p.ptr().gcw
- greyobject(obj, 0, 0, hbits, span, gcw, objIndex, true)
+ greyobject(obj, 0, 0, span, gcw, objIndex, true)
if gcphase == _GCmarktermination || gcBlackenPromptly {
// Ps aren't allowed to cache work during mark
// termination.
@@ -1110,7 +1096,7 @@ func shade(b uintptr) {
// See also wbBufFlush1, which partially duplicates this logic.
//
//go:nowritebarrierrec
-func greyobject(obj, base, off uintptr, hbits heapBits, span *mspan, gcw *gcWork, objIndex uintptr, forStack bool) {
+func greyobject(obj, base, off uintptr, span *mspan, gcw *gcWork, objIndex uintptr, forStack bool) {
// obj should be start of allocation, and so must be at least pointer-aligned.
if obj&(sys.PtrSize-1) != 0 {
throw("greyobject: obj not pointer-aligned")
@@ -1139,6 +1125,7 @@ func greyobject(obj, base, off uintptr, hbits heapBits, span *mspan, gcw *gcWork
getg().m.traceback = 2
throw("checkmark found unmarked object")
}
+ hbits := heapBitsForAddr(obj)
if hbits.isCheckmarked(span.elemsize) {
return
}
@@ -1190,15 +1177,8 @@ func greyobject(obj, base, off uintptr, hbits heapBits, span *mspan, gcw *gcWork
// gcDumpObject dumps the contents of obj for debugging and marks the
// field at byte offset off in obj.
func gcDumpObject(label string, obj, off uintptr) {
- if obj < mheap_.arena_start || obj >= mheap_.arena_used {
- print(label, "=", hex(obj), " is not in the Go heap\n")
- return
- }
- k := obj >> _PageShift
- x := k
- x -= mheap_.arena_start >> _PageShift
- s := mheap_.spans[x]
- print(label, "=", hex(obj), " k=", hex(k))
+ s := spanOf(obj)
+ print(label, "=", hex(obj))
if s == nil {
print(" s=nil\n")
return
@@ -1272,9 +1252,9 @@ func gcMarkTinyAllocs() {
if c == nil || c.tiny == 0 {
continue
}
- _, hbits, span, objIndex := heapBitsForObject(c.tiny, 0, 0, false)
+ _, span, objIndex := findObject(c.tiny, 0, 0, false)
gcw := &p.gcw
- greyobject(c.tiny, 0, 0, hbits, span, gcw, objIndex, false)
+ greyobject(c.tiny, 0, 0, span, gcw, objIndex, false)
if gcBlackenPromptly {
gcw.dispose()
}
@@ -1309,7 +1289,7 @@ func initCheckmarks() {
useCheckmark = true
for _, s := range mheap_.allspans {
if s.state == _MSpanInUse {
- heapBitsForSpan(s.base()).initCheckmarkSpan(s.layout())
+ heapBitsForAddr(s.base()).initCheckmarkSpan(s.layout())
}
}
}
@@ -1318,7 +1298,7 @@ func clearCheckmarks() {
useCheckmark = false
for _, s := range mheap_.allspans {
if s.state == _MSpanInUse {
- heapBitsForSpan(s.base()).clearCheckmarkSpan(s.layout())
+ heapBitsForAddr(s.base()).clearCheckmarkSpan(s.layout())
}
}
}