aboutsummaryrefslogtreecommitdiff
path: root/libgo/go/runtime/proc.go
diff options
context:
space:
mode:
Diffstat (limited to 'libgo/go/runtime/proc.go')
-rw-r--r--libgo/go/runtime/proc.go176
1 files changed, 147 insertions, 29 deletions
diff --git a/libgo/go/runtime/proc.go b/libgo/go/runtime/proc.go
index 4fc45dd..77d379b 100644
--- a/libgo/go/runtime/proc.go
+++ b/libgo/go/runtime/proc.go
@@ -5,6 +5,7 @@
package runtime
import (
+ "internal/cpu"
"runtime/internal/atomic"
"runtime/internal/sys"
"unsafe"
@@ -169,9 +170,11 @@ func main() {
// Allow newproc to start new Ms.
mainStarted = true
- systemstack(func() {
- newm(sysmon, nil)
- })
+ if GOARCH != "wasm" { // no threads on wasm yet, so no sysmon
+ systemstack(func() {
+ newm(sysmon, nil)
+ })
+ }
// Lock the main goroutine onto this, the main OS thread,
// during initialization. Most programs won't care, but a few
@@ -242,7 +245,7 @@ func main() {
}
}
if atomic.Load(&panicking) != 0 {
- gopark(nil, nil, "panicwait", traceEvGoStop, 1)
+ gopark(nil, nil, waitReasonPanicWait, traceEvGoStop, 1)
}
exit(0)
@@ -276,7 +279,7 @@ func forcegchelper() {
throw("forcegc: phase error")
}
atomic.Store(&forcegc.idle, 1)
- goparkunlock(&forcegc.lock, "force gc (idle)", traceEvGoBlock, 1)
+ goparkunlock(&forcegc.lock, waitReasonForceGGIdle, traceEvGoBlock, 1)
// this goroutine is explicitly resumed by sysmon
if debug.gctrace > 0 {
println("GC forced")
@@ -291,6 +294,7 @@ func forcegchelper() {
// Gosched yields the processor, allowing other goroutines to run. It does not
// suspend the current goroutine, so execution resumes automatically.
func Gosched() {
+ checkTimeouts()
mcall(gosched_m)
}
@@ -305,7 +309,14 @@ func goschedguarded() {
// If unlockf returns false, the goroutine is resumed.
// unlockf must not access this G's stack, as it may be moved between
// the call to gopark and the call to unlockf.
-func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason string, traceEv byte, traceskip int) {
+// Reason explains why the goroutine has been parked.
+// It is displayed in stack traces and heap dumps.
+// Reasons should be unique and descriptive.
+// Do not re-use reasons, add new ones.
+func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason waitReason, traceEv byte, traceskip int) {
+ if reason != waitReasonSleep {
+ checkTimeouts() // timeouts may expire while two goroutines keep the scheduler busy
+ }
mp := acquirem()
gp := mp.curg
status := readgstatus(gp)
@@ -324,7 +335,7 @@ func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason s
// Puts the current goroutine into a waiting state and unlocks the lock.
// The goroutine can be made runnable again by calling goready(gp).
-func goparkunlock(lock *mutex, reason string, traceEv byte, traceskip int) {
+func goparkunlock(lock *mutex, reason waitReason, traceEv byte, traceskip int) {
gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceEv, traceskip)
}
@@ -468,6 +479,37 @@ const (
_GoidCacheBatch = 16
)
+// cpuinit extracts the environment variable GODEBUGCPU from the environment on
+// Linux and Darwin if the GOEXPERIMENT debugcpu was set and calls internal/cpu.Initialize.
+func cpuinit() {
+ const prefix = "GODEBUGCPU="
+ var env string
+
+ if haveexperiment("debugcpu") && (GOOS == "linux" || GOOS == "darwin") {
+ cpu.DebugOptions = true
+
+ // Similar to goenv_unix but extracts the environment value for
+ // GODEBUGCPU directly.
+ // TODO(moehrmann): remove when general goenvs() can be called before cpuinit()
+ n := int32(0)
+ for argv_index(argv, argc+1+n) != nil {
+ n++
+ }
+
+ for i := int32(0); i < n; i++ {
+ p := argv_index(argv, argc+1+i)
+ s := *(*string)(unsafe.Pointer(&stringStruct{unsafe.Pointer(p), findnull(p)}))
+
+ if hasprefix(s, prefix) {
+ env = gostring(p)[len(prefix):]
+ break
+ }
+ }
+ }
+
+ cpu.Initialize(env)
+}
+
// The bootstrap sequence is:
//
// call osinit
@@ -488,6 +530,7 @@ func schedinit() {
mallocinit()
mcommoninit(_g_.m)
+ cpuinit() // must run before alginit
alginit() // maps must not be used before this call
msigsave(_g_.m)
@@ -778,7 +821,7 @@ func casgstatus(gp *g, oldval, newval uint32) {
})
}
- // See http://golang.org/cl/21503 for justification of the yield delay.
+ // See https://golang.org/cl/21503 for justification of the yield delay.
const yieldDelay = 5 * 1000
var nextYield int64
@@ -786,9 +829,7 @@ func casgstatus(gp *g, oldval, newval uint32) {
// GC time to finish and change the state to oldval.
for i := 0; !atomic.Cas(&gp.atomicstatus, oldval, newval); i++ {
if oldval == _Gwaiting && gp.atomicstatus == _Grunnable {
- systemstack(func() {
- throw("casgstatus: waiting for Gwaiting but is Grunnable")
- })
+ throw("casgstatus: waiting for Gwaiting but is Grunnable")
}
// Help GC if needed.
// if gp.preemptscan && !gp.gcworkdone && (oldval == _Grunning || oldval == _Gsyscall) {
@@ -826,7 +867,7 @@ func scang(gp *g, gcw *gcWork) {
gp.gcscandone = false
- // See http://golang.org/cl/21503 for justification of the yield delay.
+ // See https://golang.org/cl/21503 for justification of the yield delay.
const yieldDelay = 10 * 1000
var nextYield int64
@@ -1212,7 +1253,9 @@ func mstart1() {
//go:yeswritebarrierrec
func mstartm0() {
// Create an extra M for callbacks on threads not created by Go.
- if iscgo && !cgoHasExtraM {
+ // An extra M is also needed on Windows for callbacks created by
+ // syscall.NewCallback. See issue #6751 for details.
+ if (iscgo || GOOS == "windows") && !cgoHasExtraM {
cgoHasExtraM = true
newextram()
}
@@ -1517,8 +1560,12 @@ func allocm(_p_ *p, fn func(), allocatestack bool) (mp *m, g0Stack unsafe.Pointe
// put the m back on the list.
//go:nosplit
func needm(x byte) {
- if iscgo && !cgoHasExtraM {
+ if (iscgo || GOOS == "windows") && !cgoHasExtraM {
// Can happen if C/C++ code calls Go from a global ctor.
+ // Can also happen on Windows if a global ctor uses a
+ // callback created by syscall.NewCallback. See issue #6751
+ // for details.
+ //
// Can not throw, because scheduler is not initialized yet.
write(2, unsafe.Pointer(&earlycgocallback[0]), int32(len(earlycgocallback)))
exit(1)
@@ -1814,13 +1861,16 @@ func newm1(mp *m) {
//
// The calling thread must itself be in a known-good state.
func startTemplateThread() {
+ if GOARCH == "wasm" { // no threads on wasm yet
+ return
+ }
if !atomic.Cas(&newmHandoff.haveTemplateThread, 0, 1) {
return
}
newm(templateThread, nil)
}
-// tmeplateThread is a thread in a known-good state that exists solely
+// templateThread is a thread in a known-good state that exists solely
// to start new threads in known-good states when the calling thread
// may not be a a good state.
//
@@ -2232,6 +2282,14 @@ stop:
return gp, false
}
+ // wasm only:
+ // Check if a goroutine is waiting for a callback from the WebAssembly host.
+ // If yes, pause the execution until a callback was triggered.
+ if pauseSchedulerUntilCallback() {
+ // A callback was triggered and caused at least one goroutine to wake up.
+ goto top
+ }
+
// Before we drop our P, make a snapshot of the allp slice,
// which can change underfoot once we no longer block
// safe-points. We don't need to snapshot the contents because
@@ -2616,7 +2674,7 @@ func goexit0(gp *g) {
gp._defer = nil // should be true already but just in case.
gp._panic = nil // non-nil for Goexit during panic. points at stack-allocated data.
gp.writebuf = nil
- gp.waitreason = ""
+ gp.waitreason = 0
gp.param = nil
gp.labels = nil
gp.timer = nil
@@ -2635,6 +2693,11 @@ func goexit0(gp *g) {
gp.gcscanvalid = true
dropg()
+ if GOARCH == "wasm" { // no threads yet on wasm
+ gfput(_g_.m.p.ptr(), gp)
+ schedule() // never returns
+ }
+
if _g_.m.lockedInt != 0 {
print("invalid m->lockedInt = ", _g_.m.lockedInt, "\n")
throw("internal lockOSThread error")
@@ -2743,8 +2806,6 @@ func entersyscall_gcwait() {
unlock(&sched.lock)
}
-// The same as reentersyscall(), but with a hint that the syscall is blocking.
-//go:nosplit
func reentersyscallblock(pc, sp uintptr) {
_g_ := getg()
@@ -2789,9 +2850,7 @@ func exitsyscall() {
oldp := _g_.m.p.ptr()
if exitsyscallfast() {
if _g_.m.mcache == nil {
- systemstack(func() {
- throw("lost mcache")
- })
+ throw("lost mcache")
}
if trace.enabled {
if oldp != _g_.m.p.ptr() || _g_.m.syscalltick != _g_.m.p.ptr().syscalltick {
@@ -2836,9 +2895,7 @@ func exitsyscall() {
mcall(exitsyscall0)
if _g_.m.mcache == nil {
- systemstack(func() {
- throw("lost mcache")
- })
+ throw("lost mcache")
}
// Scheduler returned, so we're allowed to run now.
@@ -3188,6 +3245,42 @@ func setSystemGoroutine() {
atomic.Xadd(&expectedSystemGoroutines, -1)
}
+// saveAncestors copies previous ancestors of the given caller g and
+// includes infor for the current caller into a new set of tracebacks for
+// a g being created.
+func saveAncestors(callergp *g) *[]ancestorInfo {
+ // Copy all prior info, except for the root goroutine (goid 0).
+ if debug.tracebackancestors <= 0 || callergp.goid == 0 {
+ return nil
+ }
+ var callerAncestors []ancestorInfo
+ if callergp.ancestors != nil {
+ callerAncestors = *callergp.ancestors
+ }
+ n := int32(len(callerAncestors)) + 1
+ if n > debug.tracebackancestors {
+ n = debug.tracebackancestors
+ }
+ ancestors := make([]ancestorInfo, n)
+ copy(ancestors[1:], callerAncestors)
+
+ var pcs [_TracebackMaxFrames]uintptr
+ // FIXME: This should get a traceback of callergp.
+ // npcs := gcallers(callergp, 0, pcs[:])
+ npcs := 0
+ ipcs := make([]uintptr, npcs)
+ copy(ipcs, pcs[:])
+ ancestors[0] = ancestorInfo{
+ pcs: ipcs,
+ goid: callergp.goid,
+ gopc: callergp.gopc,
+ }
+
+ ancestorsp := new([]ancestorInfo)
+ *ancestorsp = ancestors
+ return ancestorsp
+}
+
// Put on gfree list.
// If local list is too long, transfer a batch to the global list.
func gfput(_p_ *p, gp *g) {
@@ -3265,6 +3358,9 @@ func Breakpoint() {
// or else the m might be different in this function than in the caller.
//go:nosplit
func dolockOSThread() {
+ if GOARCH == "wasm" {
+ return // no threads on wasm yet
+ }
_g_ := getg()
_g_.m.lockedg.set(_g_)
_g_.lockedm.set(_g_.m)
@@ -3280,6 +3376,10 @@ func dolockOSThread() {
// If the calling goroutine exits without unlocking the thread,
// the thread will be terminated.
//
+// All init functions are run on the startup thread. Calling LockOSThread
+// from an init function will cause the main function to be invoked on
+// that thread.
+//
// A goroutine should call LockOSThread before calling OS services or
// non-Go library functions that depend on per-thread state.
func LockOSThread() {
@@ -3309,6 +3409,9 @@ func lockOSThread() {
// or else the m might be in different in this function than in the caller.
//go:nosplit
func dounlockOSThread() {
+ if GOARCH == "wasm" {
+ return // no threads on wasm yet
+ }
_g_ := getg()
if _g_.m.lockedInt != 0 || _g_.m.lockedExt != 0 {
return
@@ -3382,6 +3485,7 @@ func _ExternalCode() { _ExternalCode() }
func _LostExternalCode() { _LostExternalCode() }
func _GC() { _GC() }
func _LostSIGPROFDuringAtomic64() { _LostSIGPROFDuringAtomic64() }
+func _VDSO() { _VDSO() }
// Counts SIGPROFs received while in atomic64 critical section, on mips{,le}
var lostAtomic64Count uint64
@@ -3470,7 +3574,7 @@ func sigprof(pc uintptr, gp *g, mp *m) {
}
if prof.hz != 0 {
- if (GOARCH == "mips" || GOARCH == "mipsle") && lostAtomic64Count > 0 {
+ if (GOARCH == "mips" || GOARCH == "mipsle" || GOARCH == "arm") && lostAtomic64Count > 0 {
cpuprof.addLostAtomic64(lostAtomic64Count)
lostAtomic64Count = 0
}
@@ -3818,8 +3922,17 @@ func checkdead() {
return
}
+ // If we are not running under cgo, but we have an extra M then account
+ // for it. (It is possible to have an extra M on Windows without cgo to
+ // accommodate callbacks created by syscall.NewCallback. See issue #6751
+ // for details.)
+ var run0 int32
+ if !iscgo && cgoHasExtraM {
+ run0 = 1
+ }
+
run := mcount() - sched.nmidle - sched.nmidlelocked - sched.nmsys
- if run > 0 {
+ if run > run0 {
return
}
if run < 0 {
@@ -4215,7 +4328,7 @@ func schedtrace(detailed bool) {
if lockedm != nil {
id2 = lockedm.id
}
- print(" G", gp.goid, ": status=", readgstatus(gp), "(", gp.waitreason, ") m=", id1, " lockedm=", id2, "\n")
+ print(" G", gp.goid, ": status=", readgstatus(gp), "(", gp.waitreason.String(), ") m=", id1, " lockedm=", id2, "\n")
}
unlock(&allglock)
unlock(&sched.lock)
@@ -4375,7 +4488,7 @@ func runqempty(_p_ *p) bool {
const randomizeScheduler = raceenabled
// runqput tries to put g on the local runnable queue.
-// If next if false, runqput adds g to the tail of the runnable queue.
+// If next is false, runqput adds g to the tail of the runnable queue.
// If next is true, runqput puts g in the _p_.runnext slot.
// If the run queue is full, runnext puts g on the global queue.
// Executed only by the owner P.
@@ -4571,6 +4684,11 @@ func setMaxThreads(in int) (out int) {
return
}
+func haveexperiment(name string) bool {
+ // The gofrontend does not support experiments.
+ return false
+}
+
//go:nosplit
func procPin() int {
_g_ := getg()
@@ -4618,7 +4736,7 @@ func sync_runtime_canSpin(i int) bool {
// Spin only few times and only if running on a multicore machine and
// GOMAXPROCS>1 and there is at least one other running P and local runq is empty.
// As opposed to runtime mutex we don't do passive spinning here,
- // because there can be work on global runq on on other Ps.
+ // because there can be work on global runq or on other Ps.
if i >= active_spin || ncpu <= 1 || gomaxprocs <= int32(sched.npidle+sched.nmspinning)+1 {
return false
}