aboutsummaryrefslogtreecommitdiff
path: root/libgo/go/runtime/proc.go
diff options
context:
space:
mode:
authorIan Lance Taylor <iant@golang.org>2022-02-11 14:53:56 -0800
committerIan Lance Taylor <iant@golang.org>2022-02-11 15:01:19 -0800
commit8dc2499aa62f768c6395c9754b8cabc1ce25c494 (patch)
tree43d7fd2bbfd7ad8c9625a718a5e8718889351994 /libgo/go/runtime/proc.go
parent9a56779dbc4e2d9c15be8d31e36f2f59be7331a8 (diff)
downloadgcc-8dc2499aa62f768c6395c9754b8cabc1ce25c494.zip
gcc-8dc2499aa62f768c6395c9754b8cabc1ce25c494.tar.gz
gcc-8dc2499aa62f768c6395c9754b8cabc1ce25c494.tar.bz2
libgo: update to Go1.18beta2
gotools/ * Makefile.am (go_cmd_cgo_files): Add ast_go118.go (check-go-tool): Copy golang.org/x/tools directories. * Makefile.in: Regenerate. Reviewed-on: https://go-review.googlesource.com/c/gofrontend/+/384695
Diffstat (limited to 'libgo/go/runtime/proc.go')
-rw-r--r--libgo/go/runtime/proc.go171
1 files changed, 64 insertions, 107 deletions
diff --git a/libgo/go/runtime/proc.go b/libgo/go/runtime/proc.go
index 343f13b..db1e2b4 100644
--- a/libgo/go/runtime/proc.go
+++ b/libgo/go/runtime/proc.go
@@ -7,6 +7,7 @@ package runtime
import (
"internal/abi"
"internal/cpu"
+ "internal/goarch"
"runtime/internal/atomic"
"runtime/internal/sys"
"unsafe"
@@ -195,7 +196,7 @@ func main(unsafe.Pointer) {
// Max stack size is 1 GB on 64-bit, 250 MB on 32-bit.
// Using decimal instead of binary GB and MB because
// they look nicer in the stack overflow failure message.
- if sys.PtrSize == 8 {
+ if goarch.PtrSize == 8 {
maxstacksize = 1000000000
} else {
maxstacksize = 250000000
@@ -503,32 +504,6 @@ func releaseSudog(s *sudog) {
releasem(mp)
}
-// funcPC returns the entry PC of the function f.
-// It assumes that f is a func value. Otherwise the behavior is undefined.
-// CAREFUL: In programs with plugins, funcPC can return different values
-// for the same function (because there are actually multiple copies of
-// the same function in the address space). To be safe, don't use the
-// results of this function in any == expression. It is only safe to
-// use the result as an address at which to start executing code.
-//
-// For gccgo note that this differs from the gc implementation; the gc
-// implementation adds sys.PtrSize to the address of the interface
-// value, but GCC's alias analysis decides that that can not be a
-// reference to the second field of the interface, and in some cases
-// it drops the initialization of the second field as a dead store.
-//go:nosplit
-func funcPC(f interface{}) uintptr {
- i := (*iface)(unsafe.Pointer(&f))
- r := *(*uintptr)(i.data)
- if cpu.FunctionDescriptors {
- // With PPC64 ELF ABI v1 function descriptors the
- // function address is a pointer to a struct whose
- // first field is the actual PC.
- r = *(*uintptr)(unsafe.Pointer(r))
- }
- return r
-}
-
func lockedOSThread() bool {
gp := getg()
return gp.lockedm != 0 && gp.m.lockedg != 0
@@ -574,6 +549,20 @@ func allgadd(gp *g) {
unlock(&allglock)
}
+// allGsSnapshot returns a snapshot of the slice of all Gs.
+//
+// The world must be stopped or allglock must be held.
+func allGsSnapshot() []*g {
+ assertWorldStoppedOrLockHeld(&allglock)
+
+ // Because the world is stopped or allglock is held, allgadd
+ // cannot happen concurrently with this. allgs grows
+ // monotonically and existing entries never change, so we can
+ // simply return a copy of the slice header. For added safety,
+ // we trim everything past len because that can still change.
+ return allgs[:len(allgs):len(allgs)]
+}
+
// atomicAllG returns &allgs[0] and len(allgs) for use with atomicAllGIndex.
func atomicAllG() (**g, uintptr) {
length := atomic.Loaduintptr(&allglen)
@@ -583,7 +572,7 @@ func atomicAllG() (**g, uintptr) {
// atomicAllGIndex returns ptr[i] with the allgptr returned from atomicAllG.
func atomicAllGIndex(ptr **g, i uintptr) *g {
- return *(**g)(add(unsafe.Pointer(ptr), i*sys.PtrSize))
+ return *(**g)(add(unsafe.Pointer(ptr), i*goarch.PtrSize))
}
// forEachG calls fn on every G from allgs.
@@ -695,10 +684,10 @@ func schedinit() {
worldStopped()
mallocinit()
+ cpuinit() // must run before alginit
+ alginit() // maps, hash, fastrand must not be used before this call
fastrandinit() // must run before mcommoninit
mcommoninit(_g_.m, -1)
- cpuinit() // must run before alginit
- alginit() // maps must not be used before this call
sigsave(&_g_.m.sigmask)
initSigmask = _g_.m.sigmask
@@ -720,7 +709,7 @@ func schedinit() {
// In 32-bit mode, we can burn a lot of memory on thread stacks.
// Try to avoid this by limiting the number of threads we run
// by default.
- if sys.PtrSize == 4 && procs > 32 {
+ if goarch.PtrSize == 4 && procs > 32 {
procs = 32
}
@@ -807,11 +796,12 @@ func mcommoninit(mp *m, id int64) {
mp.id = mReserveID()
}
- mp.fastrand[0] = uint32(int64Hash(uint64(mp.id), fastrandseed))
- mp.fastrand[1] = uint32(int64Hash(uint64(cputicks()), ^fastrandseed))
- if mp.fastrand[0]|mp.fastrand[1] == 0 {
- mp.fastrand[1] = 1
+ lo := uint32(int64Hash(uint64(mp.id), fastrandseed))
+ hi := uint32(int64Hash(uint64(cputicks()), ^fastrandseed))
+ if lo|hi == 0 {
+ hi = 1
}
+ mp.fastrand = uint64(hi)<<32 | uint64(lo)
mpreinit(mp)
@@ -994,17 +984,18 @@ func casgstatus(gp *g, oldval, newval uint32) {
gp.trackingSeq++
}
if gp.tracking {
- now := nanotime()
if oldval == _Grunnable {
// We transitioned out of runnable, so measure how much
// time we spent in this state and add it to
// runnableTime.
+ now := nanotime()
gp.runnableTime += now - gp.runnableStamp
gp.runnableStamp = 0
}
if newval == _Grunnable {
// We just transitioned into runnable, so record what
// time that happened.
+ now := nanotime()
gp.runnableStamp = now
} else if newval == _Grunning {
// We're transitioning into running, so turn off
@@ -3382,8 +3373,10 @@ func goexit1() {
// goexit continuation on g0.
func goexit0(gp *g) {
_g_ := getg()
+ _p_ := _g_.m.p.ptr()
casgstatus(gp, _Grunning, _Gdead)
+ // gcController.addScannableStack(_p_, -int64(gp.stack.hi-gp.stack.lo))
if isSystemGoroutine(gp, false) {
atomic.Xadd(&sched.ngsys, -1)
gp.isSystemGoroutine = false
@@ -3407,7 +3400,7 @@ func goexit0(gp *g) {
// Flush assist credit to the global pool. This gives
// better information to pacing if the application is
// rapidly creating an exiting goroutines.
- assistWorkPerByte := float64frombits(atomic.Load64(&gcController.assistWorkPerByte))
+ assistWorkPerByte := gcController.assistWorkPerByte.Load()
scanCredit := int64(assistWorkPerByte * float64(gp.gcAssistBytes))
atomic.Xaddint64(&gcController.bgScanCredit, scanCredit)
gp.gcAssistBytes = 0
@@ -3416,7 +3409,7 @@ func goexit0(gp *g) {
dropg()
if GOARCH == "wasm" { // no threads yet on wasm
- gfput(_g_.m.p.ptr(), gp)
+ gfput(_p_, gp)
schedule() // never returns
}
@@ -3424,7 +3417,7 @@ func goexit0(gp *g) {
print("invalid m->lockedInt = ", _g_.m.lockedInt, "\n")
throw("internal lockOSThread error")
}
- gfput(_g_.m.p.ptr(), gp)
+ gfput(_p_, gp)
if locked {
// The goroutine may have locked this thread because
// it put it in an unusual kernel state. Kill it
@@ -3927,6 +3920,11 @@ func newproc(fn uintptr, arg unsafe.Pointer) *g {
}
if isSystemGoroutine(newg, false) {
atomic.Xadd(&sched.ngsys, +1)
+ } else {
+ // Only user goroutines inherit pprof labels.
+ if _g_.m.curg != nil {
+ newg.labels = _g_.m.curg.labels
+ }
}
// Track initial transition?
newg.trackingSeq = uint8(fastrand())
@@ -3934,6 +3932,7 @@ func newproc(fn uintptr, arg unsafe.Pointer) *g {
newg.tracking = true
}
casgstatus(newg, _Gdead, _Grunnable)
+ // gcController.addScannableStack(_p_, int64(newg.stack.hi-newg.stack.lo))
if _p_.goidcache == _p_.goidcacheend {
// Sched.goidgen is the last allocated id,
@@ -4244,12 +4243,6 @@ func _GC() { _GC() }
func _LostSIGPROFDuringAtomic64() { _LostSIGPROFDuringAtomic64() }
func _VDSO() { _VDSO() }
-var _SystemPC = abi.FuncPCABIInternal(_System)
-var _ExternalCodePC = abi.FuncPCABIInternal(_ExternalCode)
-var _LostExternalCodePC = abi.FuncPCABIInternal(_LostExternalCode)
-var _GCPC = abi.FuncPCABIInternal(_GC)
-var _LostSIGPROFDuringAtomic64PC = abi.FuncPCABIInternal(_LostSIGPROFDuringAtomic64)
-
// Called if we receive a SIGPROF signal.
// Called by the signal handler, may run during STW.
//go:nowritebarrierrec
@@ -4286,7 +4279,7 @@ func sigprof(pc uintptr, gp *g, mp *m) {
n := 0
if traceback {
var stklocs [maxCPUProfStack]location
- n = callers(0, stklocs[:])
+ n = callers(1, stklocs[:])
// Issue 26595: the stack trace we've just collected is going
// to include frames that we don't want to report in the CPU
@@ -4333,61 +4326,23 @@ func sigprof(pc uintptr, gp *g, mp *m) {
n = 2
stk[0] = pc
if mp.preemptoff != "" {
- stk[1] = _GCPC + sys.PCQuantum
+ stk[1] = abi.FuncPCABIInternal(_GC) + sys.PCQuantum
} else {
- stk[1] = _SystemPC + sys.PCQuantum
- }
- }
-
- if prof.hz != 0 {
- cpuprof.add(gp, stk[:n])
- }
- getg().m.mallocing--
-}
-
-// Use global arrays rather than using up lots of stack space in the
-// signal handler. This is safe since while we are executing a SIGPROF
-// signal other SIGPROF signals are blocked.
-var nonprofGoStklocs [maxCPUProfStack]location
-var nonprofGoStk [maxCPUProfStack]uintptr
-
-// sigprofNonGo is called if we receive a SIGPROF signal on a non-Go thread,
-// and the signal handler collected a stack trace in sigprofCallers.
-// When this is called, sigprofCallersUse will be non-zero.
-// g is nil, and what we can do is very limited.
-//go:nosplit
-//go:nowritebarrierrec
-func sigprofNonGo(pc uintptr) {
- if prof.hz != 0 {
- n := callers(0, nonprofGoStklocs[:])
-
- for i := 0; i < n; i++ {
- nonprofGoStk[i] = nonprofGoStklocs[i].pc
- }
-
- if n <= 0 {
- n = 2
- nonprofGoStk[0] = pc
- nonprofGoStk[1] = _ExternalCodePC + sys.PCQuantum
+ stk[1] = abi.FuncPCABIInternal(_System) + sys.PCQuantum
}
-
- cpuprof.addNonGo(nonprofGoStk[:n])
}
-}
-// sigprofNonGoPC is called when a profiling signal arrived on a
-// non-Go thread and we have a single PC value, not a stack trace.
-// g is nil, and what we can do is very limited.
-//go:nosplit
-//go:nowritebarrierrec
-func sigprofNonGoPC(pc uintptr) {
if prof.hz != 0 {
- stk := []uintptr{
- pc,
- _ExternalCodePC + sys.PCQuantum,
+ // Note: it can happen on Windows that we interrupted a system thread
+ // with no g, so gp could nil. The other nil checks are done out of
+ // caution, but not expected to be nil in practice.
+ var tagPtr *unsafe.Pointer
+ if gp != nil && gp.m != nil && gp.m.curg != nil {
+ tagPtr = &gp.m.curg.labels
}
- cpuprof.addNonGo(stk)
+ cpuprof.add(tagPtr, stk[:n])
}
+ getg().m.mallocing--
}
// setcpuprofilerate sets the CPU profiling rate to hz times per second.
@@ -4511,8 +4466,8 @@ func (pp *p) destroy() {
pp.sudogbuf[i] = nil
}
pp.sudogcache = pp.sudogbuf[:0]
- for i := range pp.deferpoolbuf {
- pp.deferpoolbuf[i] = nil
+ for j := range pp.deferpoolbuf {
+ pp.deferpoolbuf[j] = nil
}
pp.deferpool = pp.deferpoolbuf[:0]
systemstack(func() {
@@ -4858,6 +4813,10 @@ func checkdead() {
// This is a variable for testing purposes. It normally doesn't change.
var forcegcperiod int64 = 2 * 60 * 1e9
+// needSysmonWorkaround is true if the workaround for
+// golang.org/issue/42515 is needed on NetBSD.
+var needSysmonWorkaround bool = false
+
// Always runs without a P, so write barriers are not allowed.
//
//go:nowritebarrierrec
@@ -4966,7 +4925,7 @@ func sysmon() {
}
}
mDoFixup()
- if GOOS == "netbsd" {
+ if GOOS == "netbsd" && needSysmonWorkaround {
// netpoll is responsible for waiting for timer
// expiration, so we typically don't have to worry
// about starting an M to service timers. (Note that
@@ -5510,7 +5469,7 @@ const randomizeScheduler = raceenabled
// If the run queue is full, runnext puts g on the global queue.
// Executed only by the owner P.
func runqput(_p_ *p, gp *g, next bool) {
- if randomizeScheduler && next && fastrand()%2 == 0 {
+ if randomizeScheduler && next && fastrandn(2) == 0 {
next = false
}
@@ -5623,14 +5582,12 @@ func runqputbatch(pp *p, q *gQueue, qsize int) {
// Executed only by the owner P.
func runqget(_p_ *p) (gp *g, inheritTime bool) {
// If there's a runnext, it's the next G to run.
- for {
- next := _p_.runnext
- if next == 0 {
- break
- }
- if _p_.runnext.cas(next, 0) {
- return next.ptr(), true
- }
+ next := _p_.runnext
+ // If the runnext is non-0 and the CAS fails, it could only have been stolen by another P,
+ // because other Ps can race to set runnext to 0, but only the current P can set it to non-0.
+ // Hence, there's no need to retry this CAS if it falls.
+ if next != 0 && _p_.runnext.cas(next, 0) {
+ return next.ptr(), true
}
for {
@@ -5795,7 +5752,7 @@ func (q *gQueue) pushBack(gp *g) {
q.tail.set(gp)
}
-// pushBackAll adds all Gs in l2 to the tail of q. After this q2 must
+// pushBackAll adds all Gs in q2 to the tail of q. After this q2 must
// not be used.
func (q *gQueue) pushBackAll(q2 gQueue) {
if q2.tail == 0 {