aboutsummaryrefslogtreecommitdiff
path: root/libgo/go/runtime/proc.go
diff options
context:
space:
mode:
authorIan Lance Taylor <iant@golang.org>2020-12-23 09:57:37 -0800
committerIan Lance Taylor <iant@golang.org>2020-12-30 15:13:24 -0800
commitcfcbb4227fb20191e04eb8d7766ae6202f526afd (patch)
treee2effea96f6f204451779f044415c2385e45042b /libgo/go/runtime/proc.go
parent0696141107d61483f38482b941549959a0d7f613 (diff)
downloadgcc-cfcbb4227fb20191e04eb8d7766ae6202f526afd.zip
gcc-cfcbb4227fb20191e04eb8d7766ae6202f526afd.tar.gz
gcc-cfcbb4227fb20191e04eb8d7766ae6202f526afd.tar.bz2
libgo: update to Go1.16beta1 release
This does not yet include support for the //go:embed directive added in this release. * Makefile.am (check-runtime): Don't create check-runtime-dir. (mostlyclean-local): Don't remove check-runtime-dir. (check-go-tool, check-vet): Copy in go.mod and modules.txt. (check-cgo-test, check-carchive-test): Add go.mod file. * Makefile.in: Regenerate. Reviewed-on: https://go-review.googlesource.com/c/gofrontend/+/280172
Diffstat (limited to 'libgo/go/runtime/proc.go')
-rw-r--r--libgo/go/runtime/proc.go777
1 files changed, 607 insertions, 170 deletions
diff --git a/libgo/go/runtime/proc.go b/libgo/go/runtime/proc.go
index e0b4b50..1696a1b 100644
--- a/libgo/go/runtime/proc.go
+++ b/libgo/go/runtime/proc.go
@@ -171,10 +171,20 @@ func main(unsafe.Pointer) {
maxstacksize = 250000000
}
+ // An upper limit for max stack size. Used to avoid random crashes
+ // after calling SetMaxStack and trying to allocate a stack that is too big,
+ // since stackalloc works with 32-bit sizes.
+ // Not used by gofrontend.
+ // maxstackceiling = 2 * maxstacksize
+
// Allow newproc to start new Ms.
mainStarted = true
if GOARCH != "wasm" { // no threads on wasm yet, so no sysmon
+ // For runtime_syscall_doAllThreadsSyscall, we
+ // register sysmon is not ready for the world to be
+ // stopped.
+ atomic.Store(&sched.sysmonStarting, 1)
systemstack(func() {
newm(sysmon, nil, -1)
})
@@ -191,11 +201,22 @@ func main(unsafe.Pointer) {
if g.m != &m0 {
throw("runtime.main not on m0")
}
+ m0.doesPark = true
- if nanotime() == 0 {
+ // Record when the world started.
+ // Must be before doInit for tracing init.
+ runtimeInitTime = nanotime()
+ if runtimeInitTime == 0 {
throw("nanotime returning zero")
}
+ if debug.inittrace != 0 {
+ inittrace.id = getg().goid
+ inittrace.active = true
+ }
+
+ // doInit(&runtime_inittask) // Must be before defer.
+
// Defer unlock so that runtime.Goexit during init does the unlock too.
needUnlock := true
defer func() {
@@ -204,9 +225,6 @@ func main(unsafe.Pointer) {
}
}()
- // Record when the world started.
- runtimeInitTime = nanotime()
-
main_init_done = make(chan bool)
if iscgo {
// Start the template thread in case we enter Go from
@@ -218,12 +236,17 @@ func main(unsafe.Pointer) {
fn := main_init // make an indirect call, as the linker doesn't know the address of the main package when laying down the runtime
fn()
createGcRootsIndex()
- close(main_init_done)
// For gccgo we have to wait until after main is initialized
// to enable GC, because initializing main registers the GC roots.
gcenable()
+ // Disable init tracing after main init done to avoid overhead
+ // of collecting statistics in malloc and newproc
+ inittrace.active = false
+
+ close(main_init_done)
+
needUnlock = false
unlockOSThread()
@@ -313,14 +336,23 @@ func goschedguarded() {
mcall(goschedguarded_m)
}
-// Puts the current goroutine into a waiting state and calls unlockf.
+// Puts the current goroutine into a waiting state and calls unlockf on the
+// system stack.
+//
// If unlockf returns false, the goroutine is resumed.
+//
// unlockf must not access this G's stack, as it may be moved between
// the call to gopark and the call to unlockf.
-// Reason explains why the goroutine has been parked.
-// It is displayed in stack traces and heap dumps.
-// Reasons should be unique and descriptive.
-// Do not re-use reasons, add new ones.
+//
+// Note that because unlockf is called after putting the G into a waiting
+// state, the G may have already been readied by the time unlockf is called
+// unless there is external synchronization preventing the G from being
+// readied. If unlockf returns false, it must guarantee that the G cannot be
+// externally readied.
+//
+// Reason explains why the goroutine has been parked. It is displayed in stack
+// traces and heap dumps. Reasons should be unique and descriptive. Do not
+// re-use reasons, add new ones.
func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason waitReason, traceEv byte, traceskip int) {
if reason != waitReasonSleep {
checkTimeouts() // timeouts may expire while two goroutines keep the scheduler busy
@@ -501,7 +533,7 @@ func cpuinit() {
var env string
switch GOOS {
- case "aix", "darwin", "dragonfly", "freebsd", "netbsd", "openbsd", "illumos", "solaris", "linux":
+ case "aix", "darwin", "ios", "dragonfly", "freebsd", "netbsd", "openbsd", "illumos", "solaris", "linux":
cpu.DebugOptions = true
// Similar to goenv_unix but extracts the environment value for
@@ -559,12 +591,19 @@ func schedinit() {
lockInit(&trace.lock, lockRankTrace)
lockInit(&cpuprof.lock, lockRankCpuprof)
lockInit(&trace.stackTab.lock, lockRankTraceStackTab)
+ // Enforce that this lock is always a leaf lock.
+ // All of this lock's critical sections should be
+ // extremely short.
+ lockInit(&memstats.heapStats.noPLock, lockRankLeafRank)
_g_ := getg()
sched.maxmcount = 10000
usestackmaps = probestackmaps()
+ // The world starts stopped.
+ worldStopped()
+
mallocinit()
fastrandinit() // must run before mcommoninit
mcommoninit(_g_.m, -1)
@@ -579,6 +618,7 @@ func schedinit() {
parsedebugvars()
gcinit()
+ lock(&sched.lock)
sched.lastpoll = uint64(nanotime())
procs := ncpu
@@ -595,6 +635,10 @@ func schedinit() {
if procresize(procs) != nil {
throw("unknown runnable goroutine during bootstrap")
}
+ unlock(&sched.lock)
+
+ // World is effectively started now, as P's can run.
+ worldStarted()
// For cgocheck > 1, we turn on the write barrier at all times
// and check all pointer writes. We can't do this until after
@@ -625,8 +669,10 @@ func dumpgstatus(gp *g) {
print("runtime: g: g=", _g_, ", goid=", _g_.goid, ", g->atomicstatus=", readgstatus(_g_), "\n")
}
+// sched.lock must be held.
func checkmcount() {
- // sched lock is held
+ assertLockHeld(&sched.lock)
+
if mcount() > sched.maxmcount {
print("runtime: program exceeds ", sched.maxmcount, "-thread limit\n")
throw("thread exhaustion")
@@ -638,6 +684,8 @@ func checkmcount() {
//
// sched.lock must be held.
func mReserveID() int64 {
+ assertLockHeld(&sched.lock)
+
if sched.mnext+1 < sched.mnext {
throw("runtime: thread ID overflow")
}
@@ -904,10 +952,26 @@ func stopTheWorld(reason string) {
// startTheWorld undoes the effects of stopTheWorld.
func startTheWorld() {
systemstack(func() { startTheWorldWithSema(false) })
+
// worldsema must be held over startTheWorldWithSema to ensure
// gomaxprocs cannot change while worldsema is held.
- semrelease(&worldsema)
- getg().m.preemptoff = ""
+ //
+ // Release worldsema with direct handoff to the next waiter, but
+ // acquirem so that semrelease1 doesn't try to yield our time.
+ //
+ // Otherwise if e.g. ReadMemStats is being called in a loop,
+ // it might stomp on other attempts to stop the world, such as
+ // for starting or ending GC. The operation this blocks is
+ // so heavy-weight that we should just try to be as fair as
+ // possible here.
+ //
+ // We don't want to just allow us to get preempted between now
+ // and releasing the semaphore because then we keep everyone
+ // (including, for example, GCs) waiting longer.
+ mp := acquirem()
+ mp.preemptoff = ""
+ semrelease1(&worldsema, true, 0)
+ releasem(mp)
}
// stopTheWorldGC has the same effect as stopTheWorld, but blocks
@@ -1031,9 +1095,13 @@ func stopTheWorldWithSema() {
if bad != "" {
throw(bad)
}
+
+ worldStopped()
}
func startTheWorldWithSema(emitTraceEvent bool) int64 {
+ assertWorldStopped()
+
mp := acquirem() // disable preemption because it can be holding p in a local var
if netpollinited() {
list := netpoll(0) // non-blocking
@@ -1054,6 +1122,8 @@ func startTheWorldWithSema(emitTraceEvent bool) int64 {
}
unlock(&sched.lock)
+ worldStarted()
+
for p1 != nil {
p := p1
p1 = p1.link.ptr()
@@ -1173,6 +1243,21 @@ func mstartm0() {
initsig(false)
}
+// mPark causes a thread to park itself - temporarily waking for
+// fixups but otherwise waiting to be fully woken. This is the
+// only way that m's should park themselves.
+//go:nosplit
+func mPark() {
+ g := getg()
+ for {
+ notesleep(&g.m.park)
+ noteclear(&g.m.park)
+ if !mDoFixup() {
+ return
+ }
+ }
+}
+
// mexit tears down and exits the current thread.
//
// Don't call this directly to exit the thread, since it must run at
@@ -1204,7 +1289,7 @@ func mexit(osStack bool) {
sched.nmfreed++
checkdead()
unlock(&sched.lock)
- notesleep(&m.park)
+ mPark()
throw("locked m0 woke up")
}
@@ -1258,7 +1343,7 @@ found:
checkdead()
unlock(&sched.lock)
- if GOOS == "darwin" {
+ if GOOS == "darwin" || GOOS == "ios" {
// Make sure pendingPreemptSignals is correct when an M exits.
// For #41702.
if atomic.Load(&m.signalPending) != 0 {
@@ -1428,7 +1513,12 @@ func allocm(_p_ *p, fn func(), id int64, allocatestack bool) (mp *m, g0Stack uns
freem = next
continue
}
- stackfree(freem.g0)
+ // stackfree must be on the system stack, but allocm is
+ // reachable off the system stack transitively from
+ // startm.
+ systemstack(func() {
+ stackfree(freem.g0)
+ })
freem = freem.freelink
}
sched.freem = newList
@@ -1484,7 +1574,7 @@ func allocm(_p_ *p, fn func(), id int64, allocatestack bool) (mp *m, g0Stack uns
// When the callback is done with the m, it calls dropm to
// put the m back on the list.
//go:nosplit
-func needm(x byte) {
+func needm() {
if (iscgo || GOOS == "windows") && !cgoHasExtraM {
// Can happen if C/C++ code calls Go from a global ctor.
// Can also happen on Windows if a global ctor uses a
@@ -1750,6 +1840,7 @@ var newmHandoff struct {
//go:nowritebarrierrec
func newm(fn func(), _p_ *p, id int64) {
mp, _, _ := allocm(_p_, fn, id, false)
+ mp.doesPark = (_p_ != nil)
mp.nextp.set(_p_)
mp.sigmask = initSigmask
if gp := getg(); gp != nil && gp.m != nil && (gp.m.lockedExt != 0 || gp.m.incgo) && GOOS != "plan9" {
@@ -1806,6 +1897,44 @@ func startTemplateThread() {
releasem(mp)
}
+// mFixupRace is used to temporarily borrow the race context from the
+// coordinating m during a syscall_runtime_doAllThreadsSyscall and
+// loan it out to each of the m's of the runtime so they can execute a
+// mFixup.fn in that context.
+var mFixupRace struct {
+ lock mutex
+ ctx uintptr
+}
+
+// mDoFixup runs any outstanding fixup function for the running m.
+// Returns true if a fixup was outstanding and actually executed.
+//
+//go:nosplit
+func mDoFixup() bool {
+ _g_ := getg()
+ lock(&_g_.m.mFixup.lock)
+ fn := _g_.m.mFixup.fn
+ if fn != nil {
+ if gcphase != _GCoff {
+ // We can't have a write barrier in this
+ // context since we may not have a P, but we
+ // clear fn to signal that we've executed the
+ // fixup. As long as fn is kept alive
+ // elsewhere, technically we should have no
+ // issues with the GC, but fn is likely
+ // generated in a different package altogether
+ // that may change independently. Just assert
+ // the GC is off so this lack of write barrier
+ // is more obviously safe.
+ throw("GC must be disabled to protect validity of fn value")
+ }
+ *(*uintptr)(unsafe.Pointer(&_g_.m.mFixup.fn)) = 0
+ fn(false)
+ }
+ unlock(&_g_.m.mFixup.lock)
+ return fn != nil
+}
+
// templateThread is a thread in a known-good state that exists solely
// to start new threads in known-good states when the calling thread
// may not be in a good state.
@@ -1842,6 +1971,7 @@ func templateThread() {
noteclear(&newmHandoff.wake)
unlock(&newmHandoff.lock)
notesleep(&newmHandoff.wake)
+ mDoFixup()
}
}
@@ -1863,8 +1993,7 @@ func stopm() {
lock(&sched.lock)
mput(_g_.m)
unlock(&sched.lock)
- notesleep(&_g_.m.park)
- noteclear(&_g_.m.park)
+ mPark()
acquirep(_g_.m.nextp.ptr())
_g_.m.nextp = 0
}
@@ -1879,8 +2008,30 @@ func mspinning() {
// May run with m.p==nil, so write barriers are not allowed.
// If spinning is set, the caller has incremented nmspinning and startm will
// either decrement nmspinning or set m.spinning in the newly started M.
+//
+// Callers passing a non-nil P must call from a non-preemptible context. See
+// comment on acquirem below.
+//
+// Must not have write barriers because this may be called without a P.
//go:nowritebarrierrec
func startm(_p_ *p, spinning bool) {
+ // Disable preemption.
+ //
+ // Every owned P must have an owner that will eventually stop it in the
+ // event of a GC stop request. startm takes transient ownership of a P
+ // (either from argument or pidleget below) and transfers ownership to
+ // a started M, which will be responsible for performing the stop.
+ //
+ // Preemption must be disabled during this transient ownership,
+ // otherwise the P this is running on may enter GC stop while still
+ // holding the transient P, leaving that P in limbo and deadlocking the
+ // STW.
+ //
+ // Callers passing a non-nil P must already be in non-preemptible
+ // context, otherwise such preemption could occur on function entry to
+ // startm. Callers passing a nil P may be preemptible, so we must
+ // disable preemption before acquiring a P from pidleget below.
+ mp := acquirem()
lock(&sched.lock)
if _p_ == nil {
_p_ = pidleget()
@@ -1893,11 +2044,12 @@ func startm(_p_ *p, spinning bool) {
throw("startm: negative nmspinning")
}
}
+ releasem(mp)
return
}
}
- mp := mget()
- if mp == nil {
+ nmp := mget()
+ if nmp == nil {
// No M is available, we must drop sched.lock and call newm.
// However, we already own a P to assign to the M.
//
@@ -1919,22 +2071,28 @@ func startm(_p_ *p, spinning bool) {
fn = mspinning
}
newm(fn, _p_, id)
+ // Ownership transfer of _p_ committed by start in newm.
+ // Preemption is now safe.
+ releasem(mp)
return
}
unlock(&sched.lock)
- if mp.spinning {
+ if nmp.spinning {
throw("startm: m is spinning")
}
- if mp.nextp != 0 {
+ if nmp.nextp != 0 {
throw("startm: m has p")
}
if spinning && !runqempty(_p_) {
throw("startm: p has runnable gs")
}
// The caller incremented nmspinning, so set m.spinning in the new M.
- mp.spinning = spinning
- mp.nextp.set(_p_)
- notewakeup(&mp.park)
+ nmp.spinning = spinning
+ nmp.nextp.set(_p_)
+ notewakeup(&nmp.park)
+ // Ownership transfer of _p_ committed by wakeup. Preemption is now
+ // safe.
+ releasem(mp)
}
// Hands off P from syscall or locked M.
@@ -1989,11 +2147,16 @@ func handoffp(_p_ *p) {
startm(_p_, false)
return
}
- if when := nobarrierWakeTime(_p_); when != 0 {
- wakeNetPoller(when)
- }
+
+ // The scheduler lock cannot be held when calling wakeNetPoller below
+ // because wakeNetPoller may call wakep which may call startm.
+ when := nobarrierWakeTime(_p_)
pidleput(_p_)
unlock(&sched.lock)
+
+ if when != 0 {
+ wakeNetPoller(when)
+ }
}
// Tries to add one more P to execute G's.
@@ -2024,12 +2187,11 @@ func stoplockedm() {
}
incidlelocked(1)
// Wait until another thread schedules lockedg again.
- notesleep(&_g_.m.park)
- noteclear(&_g_.m.park)
+ mPark()
status := readgstatus(_g_.m.lockedg.ptr())
if status&^_Gscan != _Grunnable {
- print("runtime:stoplockedm: g is not Grunnable or Gscanrunnable\n")
- dumpgstatus(_g_)
+ print("runtime:stoplockedm: lockedg (atomicstatus=", status, ") is not Grunnable or Gscanrunnable\n")
+ dumpgstatus(_g_.m.lockedg.ptr())
throw("stoplockedm: not runnable")
}
acquirep(_g_.m.nextp.ptr())
@@ -2202,31 +2364,33 @@ top:
_g_.m.spinning = true
atomic.Xadd(&sched.nmspinning, 1)
}
- for i := 0; i < 4; i++ {
+ const stealTries = 4
+ for i := 0; i < stealTries; i++ {
+ stealTimersOrRunNextG := i == stealTries-1
+
for enum := stealOrder.start(fastrand()); !enum.done(); enum.next() {
if sched.gcwaiting != 0 {
goto top
}
- stealRunNextG := i > 2 // first look for ready queues with more than 1 g
p2 := allp[enum.position()]
if _p_ == p2 {
continue
}
- if gp := runqsteal(_p_, p2, stealRunNextG); gp != nil {
- return gp, false
- }
- // Consider stealing timers from p2.
- // This call to checkTimers is the only place where
- // we hold a lock on a different P's timers.
- // Lock contention can be a problem here, so
- // initially avoid grabbing the lock if p2 is running
- // and is not marked for preemption. If p2 is running
- // and not being preempted we assume it will handle its
- // own timers.
- // If we're still looking for work after checking all
- // the P's, then go ahead and steal from an active P.
- if i > 2 || (i > 1 && shouldStealTimers(p2)) {
+ // Steal timers from p2. This call to checkTimers is the only place
+ // where we might hold a lock on a different P's timers. We do this
+ // once on the last pass before checking runnext because stealing
+ // from the other P's runnext should be the last resort, so if there
+ // are timers to steal do that first.
+ //
+ // We only check timers on one of the stealing iterations because
+ // the time stored in now doesn't change in this loop and checking
+ // the timers for each P more than once with the same value of now
+ // is probably a waste of time.
+ //
+ // timerpMask tells us whether the P may have timers at all. If it
+ // can't, no need to check at all.
+ if stealTimersOrRunNextG && timerpMask.read(enum.position()) {
tnow, w, ran := checkTimers(p2, now)
now = tnow
if w != 0 && (pollUntil == 0 || w < pollUntil) {
@@ -2247,6 +2411,13 @@ top:
ranTimer = true
}
}
+
+ // Don't bother to attempt to steal if p2 is idle.
+ if !idlepMask.read(enum.position()) {
+ if gp := runqsteal(_p_, p2, stealTimersOrRunNextG); gp != nil {
+ return gp, false
+ }
+ }
}
}
if ranTimer {
@@ -2259,14 +2430,17 @@ stop:
// We have nothing to do. If we're in the GC mark phase, can
// safely scan and blacken objects, and have work to do, run
// idle-time marking rather than give up the P.
- if gcBlackenEnabled != 0 && _p_.gcBgMarkWorker != 0 && gcMarkWorkAvailable(_p_) {
- _p_.gcMarkWorkerMode = gcMarkWorkerIdleMode
- gp := _p_.gcBgMarkWorker.ptr()
- casgstatus(gp, _Gwaiting, _Grunnable)
- if trace.enabled {
- traceGoUnpark(gp, 0)
+ if gcBlackenEnabled != 0 && gcMarkWorkAvailable(_p_) {
+ node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
+ if node != nil {
+ _p_.gcMarkWorkerMode = gcMarkWorkerIdleMode
+ gp := node.gp.ptr()
+ casgstatus(gp, _Gwaiting, _Grunnable)
+ if trace.enabled {
+ traceGoUnpark(gp, 0)
+ }
+ return gp, false
}
- return gp, false
}
delta := int64(-1)
@@ -2296,6 +2470,10 @@ stop:
// safe-points. We don't need to snapshot the contents because
// everything up to cap(allp) is immutable.
allpSnapshot := allp
+ // Also snapshot masks. Value changes are OK, but we can't allow
+ // len to change out from under us.
+ idlepMaskSnapshot := idlepMask
+ timerpMaskSnapshot := timerpMask
// return P and block
lock(&sched.lock)
@@ -2319,7 +2497,7 @@ stop:
// drop nmspinning first and then check all per-P queues again (with
// #StoreLoad memory barrier in between). If we do it the other way around,
// another thread can submit a goroutine after we've checked all run queues
- // but before we drop nmspinning; as the result nobody will unpark a thread
+ // but before we drop nmspinning; as a result nobody will unpark a thread
// to run the goroutine.
// If we discover new work below, we need to restore m.spinning as a signal
// for resetspinning to unpark a new worker thread (because there can be more
@@ -2336,8 +2514,8 @@ stop:
}
// check all runqueues once again
- for _, _p_ := range allpSnapshot {
- if !runqempty(_p_) {
+ for id, _p_ := range allpSnapshot {
+ if !idlepMaskSnapshot.read(uint32(id)) && !runqempty(_p_) {
lock(&sched.lock)
_p_ = pidleget()
unlock(&sched.lock)
@@ -2353,13 +2531,56 @@ stop:
}
}
+ // Similar to above, check for timer creation or expiry concurrently with
+ // transitioning from spinning to non-spinning. Note that we cannot use
+ // checkTimers here because it calls adjusttimers which may need to allocate
+ // memory, and that isn't allowed when we don't have an active P.
+ for id, _p_ := range allpSnapshot {
+ if timerpMaskSnapshot.read(uint32(id)) {
+ w := nobarrierWakeTime(_p_)
+ if w != 0 && (pollUntil == 0 || w < pollUntil) {
+ pollUntil = w
+ }
+ }
+ }
+ if pollUntil != 0 {
+ if now == 0 {
+ now = nanotime()
+ }
+ delta = pollUntil - now
+ if delta < 0 {
+ delta = 0
+ }
+ }
+
// Check for idle-priority GC work again.
- if gcBlackenEnabled != 0 && gcMarkWorkAvailable(nil) {
+ //
+ // N.B. Since we have no P, gcBlackenEnabled may change at any time; we
+ // must check again after acquiring a P.
+ if atomic.Load(&gcBlackenEnabled) != 0 && gcMarkWorkAvailable(nil) {
+ // Work is available; we can start an idle GC worker only if
+ // there is an available P and available worker G.
+ //
+ // We can attempt to acquire these in either order. Workers are
+ // almost always available (see comment in findRunnableGCWorker
+ // for the one case there may be none). Since we're slightly
+ // less likely to find a P, check for that first.
lock(&sched.lock)
+ var node *gcBgMarkWorkerNode
_p_ = pidleget()
- if _p_ != nil && _p_.gcBgMarkWorker == 0 {
- pidleput(_p_)
- _p_ = nil
+ if _p_ != nil {
+ // Now that we own a P, gcBlackenEnabled can't change
+ // (as it requires STW).
+ if gcBlackenEnabled != 0 {
+ node = (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
+ if node == nil {
+ pidleput(_p_)
+ _p_ = nil
+ }
+ } else {
+ pidleput(_p_)
+ _p_ = nil
+ }
}
unlock(&sched.lock)
if _p_ != nil {
@@ -2368,8 +2589,15 @@ stop:
_g_.m.spinning = true
atomic.Xadd(&sched.nmspinning, 1)
}
- // Go back to idle GC check.
- goto stop
+
+ // Run the idle worker.
+ _p_.gcMarkWorkerMode = gcMarkWorkerIdleMode
+ gp := node.gp.ptr()
+ casgstatus(gp, _Gwaiting, _Grunnable)
+ if trace.enabled {
+ traceGoUnpark(gp, 0)
+ }
+ return gp, false
}
}
@@ -2448,9 +2676,9 @@ func pollWork() bool {
return false
}
-// wakeNetPoller wakes up the thread sleeping in the network poller,
-// if there is one, and if it isn't going to wake up anyhow before
-// the when argument.
+// wakeNetPoller wakes up the thread sleeping in the network poller if it isn't
+// going to wake up before the when argument; or it wakes an idle P to service
+// timers and the network poller if there isn't one already.
func wakeNetPoller(when int64) {
if atomic.Load64(&sched.lastpoll) == 0 {
// In findrunnable we ensure that when polling the pollUntil
@@ -2461,6 +2689,10 @@ func wakeNetPoller(when int64) {
if pollerPollUntil == 0 || pollerPollUntil > when {
netpollBreak()
}
+ } else {
+ // There are no threads in the network poller, try to get
+ // one there so it can handle new timers.
+ wakep()
}
}
@@ -2486,7 +2718,7 @@ func resetspinning() {
// Otherwise, for each idle P, this adds a G to the global queue
// and starts an M. Any remaining G's are added to the current P's
// local run queue.
-// This may temporarily acquire the scheduler lock.
+// This may temporarily acquire sched.lock.
// Can run concurrently with GC.
func injectglist(glist *gList) {
if glist.empty() {
@@ -2530,15 +2762,20 @@ func injectglist(glist *gList) {
return
}
- lock(&sched.lock)
- npidle := int(sched.npidle)
+ npidle := int(atomic.Load(&sched.npidle))
+ var globq gQueue
var n int
for n = 0; n < npidle && !q.empty(); n++ {
- globrunqput(q.pop())
+ g := q.pop()
+ globq.pushBack(g)
+ }
+ if n > 0 {
+ lock(&sched.lock)
+ globrunqputbatch(&globq, int32(n))
+ unlock(&sched.lock)
+ startIdle(n)
+ qsize -= n
}
- unlock(&sched.lock)
- startIdle(n)
- qsize -= n
if !q.empty() {
runqputbatch(pp, &q, qsize)
@@ -2702,40 +2939,40 @@ func dropg() {
// We pass now in and out to avoid extra calls of nanotime.
//go:yeswritebarrierrec
func checkTimers(pp *p, now int64) (rnow, pollUntil int64, ran bool) {
- // If there are no timers to adjust, and the first timer on
- // the heap is not yet ready to run, then there is nothing to do.
- if atomic.Load(&pp.adjustTimers) == 0 {
- next := int64(atomic.Load64(&pp.timer0When))
- if next == 0 {
- return now, 0, false
- }
- if now == 0 {
- now = nanotime()
- }
- if now < next {
- // Next timer is not ready to run.
- // But keep going if we would clear deleted timers.
- // This corresponds to the condition below where
- // we decide whether to call clearDeletedTimers.
- if pp != getg().m.p.ptr() || int(atomic.Load(&pp.deletedTimers)) <= int(atomic.Load(&pp.numTimers)/4) {
- return now, next, false
- }
+ // If it's not yet time for the first timer, or the first adjusted
+ // timer, then there is nothing to do.
+ next := int64(atomic.Load64(&pp.timer0When))
+ nextAdj := int64(atomic.Load64(&pp.timerModifiedEarliest))
+ if next == 0 || (nextAdj != 0 && nextAdj < next) {
+ next = nextAdj
+ }
+
+ if next == 0 {
+ // No timers to run or adjust.
+ return now, 0, false
+ }
+
+ if now == 0 {
+ now = nanotime()
+ }
+ if now < next {
+ // Next timer is not ready to run, but keep going
+ // if we would clear deleted timers.
+ // This corresponds to the condition below where
+ // we decide whether to call clearDeletedTimers.
+ if pp != getg().m.p.ptr() || int(atomic.Load(&pp.deletedTimers)) <= int(atomic.Load(&pp.numTimers)/4) {
+ return now, next, false
}
}
lock(&pp.timersLock)
- adjusttimers(pp)
-
- rnow = now
if len(pp.timers) > 0 {
- if rnow == 0 {
- rnow = nanotime()
- }
+ adjusttimers(pp, now)
for len(pp.timers) > 0 {
// Note that runtimer may temporarily unlock
// pp.timersLock.
- if tw := runtimer(pp, rnow); tw != 0 {
+ if tw := runtimer(pp, now); tw != 0 {
if tw > 0 {
pollUntil = tw
}
@@ -2754,26 +2991,7 @@ func checkTimers(pp *p, now int64) (rnow, pollUntil int64, ran bool) {
unlock(&pp.timersLock)
- return rnow, pollUntil, ran
-}
-
-// shouldStealTimers reports whether we should try stealing the timers from p2.
-// We don't steal timers from a running P that is not marked for preemption,
-// on the assumption that it will run its own timers. This reduces
-// contention on the timers lock.
-func shouldStealTimers(p2 *p) bool {
- if p2.status != _Prunning {
- return true
- }
- mp := p2.m.ptr()
- if mp == nil || mp.locks > 0 {
- return false
- }
- gp := mp.curg
- if gp == nil || gp.atomicstatus != _Grunning || !gp.preempt {
- return false
- }
- return true
+ return now, pollUntil, ran
}
func parkunlock_c(gp *g, lock unsafe.Pointer) bool {
@@ -2930,7 +3148,8 @@ func goexit0(gp *g) {
// Flush assist credit to the global pool. This gives
// better information to pacing if the application is
// rapidly creating an exiting goroutines.
- scanCredit := int64(gcController.assistWorkPerByte * float64(gp.gcAssistBytes))
+ assistWorkPerByte := float64frombits(atomic.Load64(&gcController.assistWorkPerByte))
+ scanCredit := int64(assistWorkPerByte * float64(gp.gcAssistBytes))
atomic.Xaddint64(&gcController.bgScanCredit, scanCredit)
gp.gcAssistBytes = 0
}
@@ -3376,7 +3595,7 @@ func syscall_runtime_BeforeExec() {
// On Darwin, wait for all pending preemption signals to
// be received. See issue #41702.
- if GOOS == "darwin" {
+ if GOOS == "darwin" || GOOS == "ios" {
for int32(atomic.Load(&pendingPreemptSignals)) > 0 {
osyield()
}
@@ -3760,6 +3979,12 @@ func sigprof(pc uintptr, gp *g, mp *m) {
return
}
+ // If mp.profilehz is 0, then profiling is not enabled for this thread.
+ // We must check this to avoid a deadlock between setcpuprofilerate
+ // and the call to cpuprof.add, below.
+ if mp != nil && mp.profilehz == 0 {
+ return
+ }
// Profiling runs concurrently with GC, so it must not allocate.
// Set a trap in case the code does allocate.
// Note that on windows, one thread takes profiles of all the
@@ -3768,6 +3993,71 @@ func sigprof(pc uintptr, gp *g, mp *m) {
// See golang.org/issue/17165.
getg().m.mallocing++
+ // Define that a "user g" is a user-created goroutine, and a "system g"
+ // is one that is m->g0 or m->gsignal.
+ //
+ // We might be interrupted for profiling halfway through a
+ // goroutine switch. The switch involves updating three (or four) values:
+ // g, PC, SP, and (on arm) LR. The PC must be the last to be updated,
+ // because once it gets updated the new g is running.
+ //
+ // When switching from a user g to a system g, LR is not considered live,
+ // so the update only affects g, SP, and PC. Since PC must be last, there
+ // the possible partial transitions in ordinary execution are (1) g alone is updated,
+ // (2) both g and SP are updated, and (3) SP alone is updated.
+ // If SP or g alone is updated, we can detect the partial transition by checking
+ // whether the SP is within g's stack bounds. (We could also require that SP
+ // be changed only after g, but the stack bounds check is needed by other
+ // cases, so there is no need to impose an additional requirement.)
+ //
+ // There is one exceptional transition to a system g, not in ordinary execution.
+ // When a signal arrives, the operating system starts the signal handler running
+ // with an updated PC and SP. The g is updated last, at the beginning of the
+ // handler. There are two reasons this is okay. First, until g is updated the
+ // g and SP do not match, so the stack bounds check detects the partial transition.
+ // Second, signal handlers currently run with signals disabled, so a profiling
+ // signal cannot arrive during the handler.
+ //
+ // When switching from a system g to a user g, there are three possibilities.
+ //
+ // First, it may be that the g switch has no PC update, because the SP
+ // either corresponds to a user g throughout (as in asmcgocall)
+ // or because it has been arranged to look like a user g frame
+ // (as in cgocallback). In this case, since the entire
+ // transition is a g+SP update, a partial transition updating just one of
+ // those will be detected by the stack bounds check.
+ //
+ // Second, when returning from a signal handler, the PC and SP updates
+ // are performed by the operating system in an atomic update, so the g
+ // update must be done before them. The stack bounds check detects
+ // the partial transition here, and (again) signal handlers run with signals
+ // disabled, so a profiling signal cannot arrive then anyway.
+ //
+ // Third, the common case: it may be that the switch updates g, SP, and PC
+ // separately. If the PC is within any of the functions that does this,
+ // we don't ask for a traceback. C.F. the function setsSP for more about this.
+ //
+ // There is another apparently viable approach, recorded here in case
+ // the "PC within setsSP function" check turns out not to be usable.
+ // It would be possible to delay the update of either g or SP until immediately
+ // before the PC update instruction. Then, because of the stack bounds check,
+ // the only problematic interrupt point is just before that PC update instruction,
+ // and the sigprof handler can detect that instruction and simulate stepping past
+ // it in order to reach a consistent state. On ARM, the update of g must be made
+ // in two places (in R10 and also in a TLS slot), so the delayed update would
+ // need to be the SP update. The sigprof handler must read the instruction at
+ // the current PC and if it was the known instruction (for example, JMP BX or
+ // MOV R2, PC), use that other register in place of the PC value.
+ // The biggest drawback to this solution is that it requires that we can tell
+ // whether it's safe to read from the memory pointed at by PC.
+ // In a correct program, we can test PC == nil and otherwise read,
+ // but if a profiling signal happens at the instant that a program executes
+ // a bad jump (before the program manages to handle the resulting fault)
+ // the profiling handler could fault trying to read nonexistent memory.
+ //
+ // To recap, there are no constraints on the assembly being used for the
+ // transition. We simply require that g and SP match and that the PC is not
+ // in gogo.
traceback := true
// If SIGPROF arrived while already fetching runtime callers
@@ -3953,6 +4243,13 @@ func (pp *p) init(id int32) {
}
}
lockInit(&pp.timersLock, lockRankTimers)
+
+ // This P may get timers when it starts running. Set the mask here
+ // since the P may not go through pidleget (notably P 0 on startup).
+ timerpMask.set(id)
+ // Similarly, we may not go through pidleget before this P starts
+ // running if it is P 0 on startup.
+ idlepMask.clear(id)
}
// destroy releases all of the resources associated with pp and
@@ -3960,6 +4257,9 @@ func (pp *p) init(id int32) {
//
// sched.lock must be held and the world must be stopped.
func (pp *p) destroy() {
+ assertLockHeld(&sched.lock)
+ assertWorldStopped()
+
// Move all runnable goroutines to the global queue
for pp.runqhead != pp.runqtail {
// Pop from tail of local queue
@@ -3989,18 +4289,6 @@ func (pp *p) destroy() {
unlock(&pp.timersLock)
unlock(&plocal.timersLock)
}
- // If there's a background worker, make it runnable and put
- // it on the global queue so it can clean itself up.
- if gp := pp.gcBgMarkWorker.ptr(); gp != nil {
- casgstatus(gp, _Gwaiting, _Grunnable)
- if trace.enabled {
- traceGoUnpark(gp, 0)
- }
- globrunqput(gp)
- // This assignment doesn't race because the
- // world is stopped.
- pp.gcBgMarkWorker.set(nil)
- }
// Flush p's write barrier buffer.
if gcphase != _GCoff {
wbBufFlush1(pp)
@@ -4020,7 +4308,9 @@ func (pp *p) destroy() {
mheap_.spanalloc.free(unsafe.Pointer(pp.mspancache.buf[i]))
}
pp.mspancache.len = 0
+ lock(&mheap_.lock)
pp.pcache.flush(&mheap_.pages)
+ unlock(&mheap_.lock)
})
freemcache(pp.mcache)
pp.mcache = nil
@@ -4030,11 +4320,18 @@ func (pp *p) destroy() {
pp.status = _Pdead
}
-// Change number of processors. The world is stopped, sched is locked.
-// gcworkbufs are not being modified by either the GC or
-// the write barrier code.
+// Change number of processors.
+//
+// sched.lock must be held, and the world must be stopped.
+//
+// gcworkbufs must not be being modified by either the GC or the write barrier
+// code, so the GC must not be running if the number of Ps actually changes.
+//
// Returns list of Ps with local work, they need to be scheduled by the caller.
func procresize(nprocs int32) *p {
+ assertLockHeld(&sched.lock)
+ assertWorldStopped()
+
old := gomaxprocs
if old < 0 || nprocs <= 0 {
throw("procresize: invalid arg")
@@ -4050,6 +4347,8 @@ func procresize(nprocs int32) *p {
}
sched.procresizetime = now
+ maskWords := (nprocs + 31) / 32
+
// Grow allp if necessary.
if nprocs > int32(len(allp)) {
// Synchronize with retake, which could be running
@@ -4064,6 +4363,20 @@ func procresize(nprocs int32) *p {
copy(nallp, allp[:cap(allp)])
allp = nallp
}
+
+ if maskWords <= int32(cap(idlepMask)) {
+ idlepMask = idlepMask[:maskWords]
+ timerpMask = timerpMask[:maskWords]
+ } else {
+ nidlepMask := make([]uint32, maskWords)
+ // No need to copy beyond len, old Ps are irrelevant.
+ copy(nidlepMask, idlepMask)
+ idlepMask = nidlepMask
+
+ ntimerpMask := make([]uint32, maskWords)
+ copy(ntimerpMask, timerpMask)
+ timerpMask = ntimerpMask
+ }
unlock(&allpLock)
}
@@ -4122,6 +4435,8 @@ func procresize(nprocs int32) *p {
if int32(len(allp)) != nprocs {
lock(&allpLock)
allp = allp[:nprocs]
+ idlepMask = idlepMask[:maskWords]
+ timerpMask = timerpMask[:maskWords]
unlock(&allpLock)
}
@@ -4226,6 +4541,8 @@ func incidlelocked(v int32) {
// The check is based on number of running M's, if 0 -> deadlock.
// sched.lock must be held.
func checkdead() {
+ assertLockHeld(&sched.lock)
+
// For -buildmode=c-shared or -buildmode=c-archive it's OK if
// there are no running goroutines. The calling program is
// assumed to be running.
@@ -4341,9 +4658,14 @@ func sysmon() {
checkdead()
unlock(&sched.lock)
+ // For syscall_runtime_doAllThreadsSyscall, sysmon is
+ // sufficiently up to participate in fixups.
+ atomic.Store(&sched.sysmonStarting, 0)
+
lasttrace := int64(0)
idle := 0 // how many cycles in succession we had not wokeup somebody
delay := uint32(0)
+
for {
if idle == 0 { // start with 20us sleep...
delay = 20
@@ -4354,11 +4676,29 @@ func sysmon() {
delay = 10 * 1000
}
usleep(delay)
+ mDoFixup()
+
+ // sysmon should not enter deep sleep if schedtrace is enabled so that
+ // it can print that information at the right time.
+ //
+ // It should also not enter deep sleep if there are any active P's so
+ // that it can retake P's from syscalls, preempt long running G's, and
+ // poll the network if all P's are busy for long stretches.
+ //
+ // It should wakeup from deep sleep if any P's become active either due
+ // to exiting a syscall or waking up due to a timer expiring so that it
+ // can resume performing those duties. If it wakes from a syscall it
+ // resets idle and delay as a bet that since it had retaken a P from a
+ // syscall before, it may need to do it again shortly after the
+ // application starts work again. It does not reset idle when waking
+ // from a timer to avoid adding system load to applications that spend
+ // most of their time sleeping.
now := nanotime()
- next, _ := timeSleepUntil()
if debug.schedtrace <= 0 && (sched.gcwaiting != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs)) {
lock(&sched.lock)
if atomic.Load(&sched.gcwaiting) != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs) {
+ syscallWake := false
+ next, _ := timeSleepUntil()
if next > now {
atomic.Store(&sched.sysmonwait, 1)
unlock(&sched.lock)
@@ -4372,32 +4712,27 @@ func sysmon() {
if shouldRelax {
osRelax(true)
}
- notetsleep(&sched.sysmonnote, sleep)
+ syscallWake = notetsleep(&sched.sysmonnote, sleep)
+ mDoFixup()
if shouldRelax {
osRelax(false)
}
- now = nanotime()
- next, _ = timeSleepUntil()
lock(&sched.lock)
atomic.Store(&sched.sysmonwait, 0)
noteclear(&sched.sysmonnote)
}
- idle = 0
- delay = 20
+ if syscallWake {
+ idle = 0
+ delay = 20
+ }
}
unlock(&sched.lock)
}
+
lock(&sched.sysmonlock)
- {
- // If we spent a long time blocked on sysmonlock
- // then we want to update now and next since it's
- // likely stale.
- now1 := nanotime()
- if now1-now > 50*1000 /* 50µs */ {
- next, _ = timeSleepUntil()
- }
- now = now1
- }
+ // Update now in case we blocked on sysmonnote or spent a long time
+ // blocked on schedlock or sysmonlock above.
+ now = nanotime()
// trigger libc interceptors if needed
if *cgo_yield != nil {
@@ -4421,12 +4756,7 @@ func sysmon() {
incidlelocked(1)
}
}
- if next < now {
- // There are timers that should have already run,
- // perhaps because there is an unpreemptible P.
- // Try to start an M to run them.
- startm(nil, false)
- }
+ mDoFixup()
if atomic.Load(&scavenge.sysmonWake) != 0 {
// Kick the scavenger awake if someone requested it.
wakeScavenger()
@@ -4709,7 +5039,11 @@ func schedEnableUser(enable bool) {
// schedEnabled reports whether gp should be scheduled. It returns
// false is scheduling of gp is disabled.
+//
+// sched.lock must be held.
func schedEnabled(gp *g) bool {
+ assertLockHeld(&sched.lock)
+
if sched.disable.user {
return isSystemGoroutine(gp, true)
}
@@ -4717,10 +5051,12 @@ func schedEnabled(gp *g) bool {
}
// Put mp on midle list.
-// Sched must be locked.
+// sched.lock must be held.
// May run during STW, so write barriers are not allowed.
//go:nowritebarrierrec
func mput(mp *m) {
+ assertLockHeld(&sched.lock)
+
mp.schedlink = sched.midle
sched.midle.set(mp)
sched.nmidle++
@@ -4728,10 +5064,12 @@ func mput(mp *m) {
}
// Try to get an m from midle list.
-// Sched must be locked.
+// sched.lock must be held.
// May run during STW, so write barriers are not allowed.
//go:nowritebarrierrec
func mget() *m {
+ assertLockHeld(&sched.lock)
+
mp := sched.midle.ptr()
if mp != nil {
sched.midle = mp.schedlink
@@ -4741,35 +5079,43 @@ func mget() *m {
}
// Put gp on the global runnable queue.
-// Sched must be locked.
+// sched.lock must be held.
// May run during STW, so write barriers are not allowed.
//go:nowritebarrierrec
func globrunqput(gp *g) {
+ assertLockHeld(&sched.lock)
+
sched.runq.pushBack(gp)
sched.runqsize++
}
// Put gp at the head of the global runnable queue.
-// Sched must be locked.
+// sched.lock must be held.
// May run during STW, so write barriers are not allowed.
//go:nowritebarrierrec
func globrunqputhead(gp *g) {
+ assertLockHeld(&sched.lock)
+
sched.runq.push(gp)
sched.runqsize++
}
// Put a batch of runnable goroutines on the global runnable queue.
// This clears *batch.
-// Sched must be locked.
+// sched.lock must be held.
func globrunqputbatch(batch *gQueue, n int32) {
+ assertLockHeld(&sched.lock)
+
sched.runq.pushBackAll(*batch)
sched.runqsize += n
*batch = gQueue{}
}
// Try get a batch of G's from the global runnable queue.
-// Sched must be locked.
+// sched.lock must be held.
func globrunqget(_p_ *p, max int32) *g {
+ assertLockHeld(&sched.lock)
+
if sched.runqsize == 0 {
return nil
}
@@ -4796,26 +5142,106 @@ func globrunqget(_p_ *p, max int32) *g {
return gp
}
-// Put p to on _Pidle list.
-// Sched must be locked.
+// pMask is an atomic bitstring with one bit per P.
+type pMask []uint32
+
+// read returns true if P id's bit is set.
+func (p pMask) read(id uint32) bool {
+ word := id / 32
+ mask := uint32(1) << (id % 32)
+ return (atomic.Load(&p[word]) & mask) != 0
+}
+
+// set sets P id's bit.
+func (p pMask) set(id int32) {
+ word := id / 32
+ mask := uint32(1) << (id % 32)
+ atomic.Or(&p[word], mask)
+}
+
+// clear clears P id's bit.
+func (p pMask) clear(id int32) {
+ word := id / 32
+ mask := uint32(1) << (id % 32)
+ atomic.And(&p[word], ^mask)
+}
+
+// updateTimerPMask clears pp's timer mask if it has no timers on its heap.
+//
+// Ideally, the timer mask would be kept immediately consistent on any timer
+// operations. Unfortunately, updating a shared global data structure in the
+// timer hot path adds too much overhead in applications frequently switching
+// between no timers and some timers.
+//
+// As a compromise, the timer mask is updated only on pidleget / pidleput. A
+// running P (returned by pidleget) may add a timer at any time, so its mask
+// must be set. An idle P (passed to pidleput) cannot add new timers while
+// idle, so if it has no timers at that time, its mask may be cleared.
+//
+// Thus, we get the following effects on timer-stealing in findrunnable:
+//
+// * Idle Ps with no timers when they go idle are never checked in findrunnable
+// (for work- or timer-stealing; this is the ideal case).
+// * Running Ps must always be checked.
+// * Idle Ps whose timers are stolen must continue to be checked until they run
+// again, even after timer expiration.
+//
+// When the P starts running again, the mask should be set, as a timer may be
+// added at any time.
+//
+// TODO(prattmic): Additional targeted updates may improve the above cases.
+// e.g., updating the mask when stealing a timer.
+func updateTimerPMask(pp *p) {
+ if atomic.Load(&pp.numTimers) > 0 {
+ return
+ }
+
+ // Looks like there are no timers, however another P may transiently
+ // decrement numTimers when handling a timerModified timer in
+ // checkTimers. We must take timersLock to serialize with these changes.
+ lock(&pp.timersLock)
+ if atomic.Load(&pp.numTimers) == 0 {
+ timerpMask.clear(pp.id)
+ }
+ unlock(&pp.timersLock)
+}
+
+// pidleput puts p to on the _Pidle list.
+//
+// This releases ownership of p. Once sched.lock is released it is no longer
+// safe to use p.
+//
+// sched.lock must be held.
+//
// May run during STW, so write barriers are not allowed.
//go:nowritebarrierrec
func pidleput(_p_ *p) {
+ assertLockHeld(&sched.lock)
+
if !runqempty(_p_) {
throw("pidleput: P has non-empty run queue")
}
+ updateTimerPMask(_p_) // clear if there are no timers.
+ idlepMask.set(_p_.id)
_p_.link = sched.pidle
sched.pidle.set(_p_)
atomic.Xadd(&sched.npidle, 1) // TODO: fast atomic
}
-// Try get a p from _Pidle list.
-// Sched must be locked.
+// pidleget tries to get a p from the _Pidle list, acquiring ownership.
+//
+// sched.lock must be held.
+//
// May run during STW, so write barriers are not allowed.
//go:nowritebarrierrec
func pidleget() *p {
+ assertLockHeld(&sched.lock)
+
_p_ := sched.pidle.ptr()
if _p_ != nil {
+ // Timer may get added at any time now.
+ timerpMask.set(_p_.id)
+ idlepMask.clear(_p_.id)
sched.pidle = _p_.link
atomic.Xadd(&sched.npidle, -1) // TODO: fast atomic
}
@@ -5308,3 +5734,14 @@ func gcd(a, b uint32) uint32 {
}
return a
}
+
+// inittrace stores statistics for init functions which are
+// updated by malloc and newproc when active is true.
+var inittrace tracestat
+
+type tracestat struct {
+ active bool // init tracing activation status
+ id int64 // init go routine id
+ allocs uint64 // heap allocations
+ bytes uint64 // heap allocated bytes
+}