diff options
author | Ian Lance Taylor <iant@golang.org> | 2020-04-03 15:01:34 -0700 |
---|---|---|
committer | Ian Lance Taylor <iant@golang.org> | 2020-04-03 15:01:34 -0700 |
commit | 00eb71c43c74cc5143b60d470450c3981037ed3c (patch) | |
tree | 66833e46153e7869903229adb39ebb1a86c86169 /libgo/go/runtime/proc.go | |
parent | 213caedb0104ed919b67b3446a53f06054d62fec (diff) | |
parent | ff229375721d1763a18ec76403aa1215b2932fb3 (diff) | |
download | gcc-00eb71c43c74cc5143b60d470450c3981037ed3c.zip gcc-00eb71c43c74cc5143b60d470450c3981037ed3c.tar.gz gcc-00eb71c43c74cc5143b60d470450c3981037ed3c.tar.bz2 |
Merge from trunk revision ff229375721d1763a18ec76403aa1215b2932fb3
Diffstat (limited to 'libgo/go/runtime/proc.go')
-rw-r--r-- | libgo/go/runtime/proc.go | 125 |
1 files changed, 67 insertions, 58 deletions
diff --git a/libgo/go/runtime/proc.go b/libgo/go/runtime/proc.go index c0e8577..f75cacf 100644 --- a/libgo/go/runtime/proc.go +++ b/libgo/go/runtime/proc.go @@ -563,6 +563,14 @@ func schedinit() { sched.lastpoll = uint64(nanotime()) procs := ncpu + + // In 32-bit mode, we can burn a lot of memory on thread stacks. + // Try to avoid this by limiting the number of threads we run + // by default. + if sys.PtrSize == 4 && procs > 32 { + procs = 32 + } + if n, ok := atoi32(gogetenv("GOMAXPROCS")); ok && n > 0 { procs = n } @@ -833,23 +841,8 @@ func casGFromPreempted(gp *g, old, new uint32) bool { // goroutines. func stopTheWorld(reason string) { semacquire(&worldsema) - gp := getg() - gp.m.preemptoff = reason - systemstack(func() { - // Mark the goroutine which called stopTheWorld preemptible so its - // stack may be scanned. - // This lets a mark worker scan us while we try to stop the world - // since otherwise we could get in a mutual preemption deadlock. - // We must not modify anything on the G stack because a stack shrink - // may occur. A stack shrink is otherwise OK though because in order - // to return from this function (and to leave the system stack) we - // must have preempted all goroutines, including any attempting - // to scan our stack, in which case, any stack shrinking will - // have already completed by the time we exit. - casgstatus(gp, _Grunning, _Gwaiting) - stopTheWorldWithSema() - casgstatus(gp, _Gwaiting, _Grunning) - }) + getg().m.preemptoff = reason + systemstack(stopTheWorldWithSema) } // startTheWorld undoes the effects of stopTheWorld. @@ -861,31 +854,10 @@ func startTheWorld() { getg().m.preemptoff = "" } -// stopTheWorldGC has the same effect as stopTheWorld, but blocks -// until the GC is not running. It also blocks a GC from starting -// until startTheWorldGC is called. -func stopTheWorldGC(reason string) { - semacquire(&gcsema) - stopTheWorld(reason) -} - -// startTheWorldGC undoes the effects of stopTheWorldGC. -func startTheWorldGC() { - startTheWorld() - semrelease(&gcsema) -} - -// Holding worldsema grants an M the right to try to stop the world. +// Holding worldsema grants an M the right to try to stop the world +// and prevents gomaxprocs from changing concurrently. var worldsema uint32 = 1 -// Holding gcsema grants the M the right to block a GC, and blocks -// until the current GC is done. In particular, it prevents gomaxprocs -// from changing concurrently. -// -// TODO(mknyszek): Once gomaxprocs and the execution tracer can handle -// being changed/enabled during a GC, remove this. -var gcsema uint32 = 1 - // stopTheWorldWithSema is the core implementation of stopTheWorld. // The caller is responsible for acquiring worldsema and disabling // preemption first and then should stopTheWorldWithSema on the system @@ -2569,6 +2541,27 @@ func dropg() { // We pass now in and out to avoid extra calls of nanotime. //go:yeswritebarrierrec func checkTimers(pp *p, now int64) (rnow, pollUntil int64, ran bool) { + // If there are no timers to adjust, and the first timer on + // the heap is not yet ready to run, then there is nothing to do. + if atomic.Load(&pp.adjustTimers) == 0 { + next := int64(atomic.Load64(&pp.timer0When)) + if next == 0 { + return now, 0, false + } + if now == 0 { + now = nanotime() + } + if now < next { + // Next timer is not ready to run. + // But keep going if we would clear deleted timers. + // This corresponds to the condition below where + // we decide whether to call clearDeletedTimers. + if pp != getg().m.p.ptr() || int(atomic.Load(&pp.deletedTimers)) <= int(atomic.Load(&pp.numTimers)/4) { + return now, next, false + } + } + } + lock(&pp.timersLock) adjusttimers(pp) @@ -2591,6 +2584,13 @@ func checkTimers(pp *p, now int64) (rnow, pollUntil int64, ran bool) { } } + // If this is the local P, and there are a lot of deleted timers, + // clear them out. We only do this for the local P to reduce + // lock contention on timersLock. + if pp == getg().m.p.ptr() && int(atomic.Load(&pp.deletedTimers)) > len(pp.timers)/4 { + clearDeletedTimers(pp) + } + unlock(&pp.timersLock) return rnow, pollUntil, ran @@ -2715,7 +2715,7 @@ func preemptPark(gp *g) { } // goyield is like Gosched, but it: -// - does not emit a GoSched trace event +// - emits a GoPreempt trace event instead of a GoSched trace event // - puts the current G on the runq of the current P instead of the globrunq func goyield() { checkTimeouts() @@ -2723,6 +2723,9 @@ func goyield() { } func goyield_m(gp *g) { + if trace.enabled { + traceGoPreempt() + } pp := gp.m.p.ptr() casgstatus(gp, _Grunning, _Grunnable) dropg() @@ -3808,7 +3811,10 @@ func (pp *p) destroy() { lock(&pp.timersLock) moveTimers(plocal, pp.timers) pp.timers = nil + pp.numTimers = 0 pp.adjustTimers = 0 + pp.deletedTimers = 0 + atomic.Store64(&pp.timer0When, 0) unlock(&pp.timersLock) unlock(&plocal.timersLock) } @@ -4114,23 +4120,26 @@ func checkdead() { } // Maybe jump time forward for playground. - _p_ := timejump() - if _p_ != nil { - for pp := &sched.pidle; *pp != 0; pp = &(*pp).ptr().link { - if (*pp).ptr() == _p_ { - *pp = _p_.link - break + if faketime != 0 { + when, _p_ := timeSleepUntil() + if _p_ != nil { + faketime = when + for pp := &sched.pidle; *pp != 0; pp = &(*pp).ptr().link { + if (*pp).ptr() == _p_ { + *pp = _p_.link + break + } } + mp := mget() + if mp == nil { + // There should always be a free M since + // nothing is running. + throw("checkdead: no m for timer") + } + mp.nextp.set(_p_) + notewakeup(&mp.park) + return } - mp := mget() - if mp == nil { - // There should always be a free M since - // nothing is running. - throw("checkdead: no m for timer") - } - mp.nextp.set(_p_) - notewakeup(&mp.park) - return } // There are no goroutines running, so we can look at the P's. @@ -4175,7 +4184,7 @@ func sysmon() { } usleep(delay) now := nanotime() - next := timeSleepUntil() + next, _ := timeSleepUntil() if debug.schedtrace <= 0 && (sched.gcwaiting != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs)) { lock(&sched.lock) if atomic.Load(&sched.gcwaiting) != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs) { @@ -4197,7 +4206,7 @@ func sysmon() { osRelax(false) } now = nanotime() - next = timeSleepUntil() + next, _ = timeSleepUntil() lock(&sched.lock) atomic.Store(&sched.sysmonwait, 0) noteclear(&sched.sysmonnote) |