aboutsummaryrefslogtreecommitdiff
path: root/libgo/go/runtime
diff options
context:
space:
mode:
authorIan Lance Taylor <iant@golang.org>2020-10-24 14:47:44 -0700
committerIan Lance Taylor <iant@golang.org>2020-10-27 13:58:02 -0700
commit668894d7b584b40ddb46e9e2e2ffa637f4d732e5 (patch)
tree6c97d325215c8462f375f142d1e91099cc4edb68 /libgo/go/runtime
parent2b3e722a3ca1b9dcfff1c016e651d0d681de1af0 (diff)
downloadgcc-668894d7b584b40ddb46e9e2e2ffa637f4d732e5.zip
gcc-668894d7b584b40ddb46e9e2e2ffa637f4d732e5.tar.gz
gcc-668894d7b584b40ddb46e9e2e2ffa637f4d732e5.tar.bz2
libgo: update to Go 1.15.3 release
Reviewed-on: https://go-review.googlesource.com/c/gofrontend/+/265717
Diffstat (limited to 'libgo/go/runtime')
-rw-r--r--libgo/go/runtime/chan.go22
-rw-r--r--libgo/go/runtime/chan_test.go56
-rw-r--r--libgo/go/runtime/export_test.go24
-rw-r--r--libgo/go/runtime/internal/atomic/atomic_test.go10
-rw-r--r--libgo/go/runtime/mpagealloc.go13
-rw-r--r--libgo/go/runtime/proc_test.go10
-rw-r--r--libgo/go/runtime/runtime2.go9
-rw-r--r--libgo/go/runtime/select.go19
-rw-r--r--libgo/go/runtime/trace/trace_stack_test.go1
9 files changed, 147 insertions, 17 deletions
diff --git a/libgo/go/runtime/chan.go b/libgo/go/runtime/chan.go
index de1d80a..b909d47 100644
--- a/libgo/go/runtime/chan.go
+++ b/libgo/go/runtime/chan.go
@@ -267,6 +267,11 @@ func chansend(c *hchan, ep unsafe.Pointer, block bool, callerpc uintptr) bool {
gp.waiting = mysg
gp.param = nil
c.sendq.enqueue(mysg)
+ // Signal to anyone trying to shrink our stack that we're about
+ // to park on a channel. The window between when this G's status
+ // changes and when we set gp.activeStackChans is not safe for
+ // stack shrinking.
+ atomic.Store8(&gp.parkingOnChan, 1)
gopark(chanparkcommit, unsafe.Pointer(&c.lock), waitReasonChanSend, traceEvGoBlockSend, 2)
// Ensure the value being sent is kept alive until the
// receiver copies it out. The sudog has a pointer to the
@@ -586,6 +591,11 @@ func chanrecv(c *hchan, ep unsafe.Pointer, block bool) (selected, received bool)
mysg.c = c
gp.param = nil
c.recvq.enqueue(mysg)
+ // Signal to anyone trying to shrink our stack that we're about
+ // to park on a channel. The window between when this G's status
+ // changes and when we set gp.activeStackChans is not safe for
+ // stack shrinking.
+ atomic.Store8(&gp.parkingOnChan, 1)
gopark(chanparkcommit, unsafe.Pointer(&c.lock), waitReasonChanReceive, traceEvGoBlockRecv, 2)
// someone woke us up
@@ -663,7 +673,19 @@ func recv(c *hchan, sg *sudog, ep unsafe.Pointer, unlockf func(), skip int) {
func chanparkcommit(gp *g, chanLock unsafe.Pointer) bool {
// There are unlocked sudogs that point into gp's stack. Stack
// copying must lock the channels of those sudogs.
+ // Set activeStackChans here instead of before we try parking
+ // because we could self-deadlock in stack growth on the
+ // channel lock.
gp.activeStackChans = true
+ // Mark that it's safe for stack shrinking to occur now,
+ // because any thread acquiring this G's stack for shrinking
+ // is guaranteed to observe activeStackChans after this store.
+ atomic.Store8(&gp.parkingOnChan, 0)
+ // Make sure we unlock after setting activeStackChans and
+ // unsetting parkingOnChan. The moment we unlock chanLock
+ // we risk gp getting readied by a channel operation and
+ // so gp could continue running before everything before
+ // the unlock is visible (even to gp itself).
unlock((*mutex)(chanLock))
return true
}
diff --git a/libgo/go/runtime/chan_test.go b/libgo/go/runtime/chan_test.go
index ac81d40..85d3e04 100644
--- a/libgo/go/runtime/chan_test.go
+++ b/libgo/go/runtime/chan_test.go
@@ -628,6 +628,62 @@ func TestShrinkStackDuringBlockedSend(t *testing.T) {
<-done
}
+func TestNoShrinkStackWhileParking(t *testing.T) {
+ // The goal of this test is to trigger a "racy sudog adjustment"
+ // throw. Basically, there's a window between when a goroutine
+ // becomes available for preemption for stack scanning (and thus,
+ // stack shrinking) but before the goroutine has fully parked on a
+ // channel. See issue 40641 for more details on the problem.
+ //
+ // The way we try to induce this failure is to set up two
+ // goroutines: a sender and a reciever that communicate across
+ // a channel. We try to set up a situation where the sender
+ // grows its stack temporarily then *fully* blocks on a channel
+ // often. Meanwhile a GC is triggered so that we try to get a
+ // mark worker to shrink the sender's stack and race with the
+ // sender parking.
+ //
+ // Unfortunately the race window here is so small that we
+ // either need a ridiculous number of iterations, or we add
+ // "usleep(1000)" to park_m, just before the unlockf call.
+ const n = 10
+ send := func(c chan<- int, done chan struct{}) {
+ for i := 0; i < n; i++ {
+ c <- i
+ // Use lots of stack briefly so that
+ // the GC is going to want to shrink us
+ // when it scans us. Make sure not to
+ // do any function calls otherwise
+ // in order to avoid us shrinking ourselves
+ // when we're preempted.
+ stackGrowthRecursive(20)
+ }
+ done <- struct{}{}
+ }
+ recv := func(c <-chan int, done chan struct{}) {
+ for i := 0; i < n; i++ {
+ // Sleep here so that the sender always
+ // fully blocks.
+ time.Sleep(10 * time.Microsecond)
+ <-c
+ }
+ done <- struct{}{}
+ }
+ for i := 0; i < n*20; i++ {
+ c := make(chan int)
+ done := make(chan struct{})
+ go recv(c, done)
+ go send(c, done)
+ // Wait a little bit before triggering
+ // the GC to make sure the sender and
+ // reciever have gotten into their groove.
+ time.Sleep(50 * time.Microsecond)
+ runtime.GC()
+ <-done
+ <-done
+ }
+}
+
func TestSelectDuplicateChannel(t *testing.T) {
// This test makes sure we can queue a G on
// the same channel multiple times.
diff --git a/libgo/go/runtime/export_test.go b/libgo/go/runtime/export_test.go
index 482d014..369230a 100644
--- a/libgo/go/runtime/export_test.go
+++ b/libgo/go/runtime/export_test.go
@@ -355,7 +355,11 @@ func ReadMemStatsSlow() (base, slow MemStats) {
}
for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
- pg := mheap_.pages.chunkOf(i).scavenged.popcntRange(0, pallocChunkPages)
+ chunk := mheap_.pages.tryChunkOf(i)
+ if chunk == nil {
+ continue
+ }
+ pg := chunk.scavenged.popcntRange(0, pallocChunkPages)
slow.HeapReleased += uint64(pg) * pageSize
}
for _, p := range allp {
@@ -752,11 +756,7 @@ func (p *PageAlloc) InUse() []AddrRange {
// Returns nil if the PallocData's L2 is missing.
func (p *PageAlloc) PallocData(i ChunkIdx) *PallocData {
ci := chunkIdx(i)
- l2 := (*pageAlloc)(p).chunks[ci.l1()]
- if l2 == nil {
- return nil
- }
- return (*PallocData)(&l2[ci.l2()])
+ return (*PallocData)((*pageAlloc)(p).tryChunkOf(ci))
}
// AddrRange represents a range over addresses.
@@ -896,7 +896,10 @@ func CheckScavengedBitsCleared(mismatches []BitsMismatch) (n int, ok bool) {
lock(&mheap_.lock)
chunkLoop:
for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
- chunk := mheap_.pages.chunkOf(i)
+ chunk := mheap_.pages.tryChunkOf(i)
+ if chunk == nil {
+ continue
+ }
for j := 0; j < pallocChunkPages/64; j++ {
// Run over each 64-bit bitmap section and ensure
// scavenged is being cleared properly on allocation.
@@ -977,10 +980,9 @@ func MapHashCheck(m interface{}, k interface{}) (uintptr, uintptr) {
}
func MSpanCountAlloc(bits []byte) int {
- s := mspan{
- nelems: uintptr(len(bits) * 8),
- gcmarkBits: (*gcBits)(unsafe.Pointer(&bits[0])),
- }
+ s := (*mspan)(mheap_.spanalloc.alloc())
+ s.nelems = uintptr(len(bits) * 8)
+ s.gcmarkBits = (*gcBits)(unsafe.Pointer(&bits[0]))
return s.countAlloc()
}
diff --git a/libgo/go/runtime/internal/atomic/atomic_test.go b/libgo/go/runtime/internal/atomic/atomic_test.go
index 0c1125c..b0a8fa0 100644
--- a/libgo/go/runtime/internal/atomic/atomic_test.go
+++ b/libgo/go/runtime/internal/atomic/atomic_test.go
@@ -220,3 +220,13 @@ func TestBitwiseContended(t *testing.T) {
}
}
}
+
+func TestStorepNoWB(t *testing.T) {
+ var p [2]*int
+ for i := range p {
+ atomic.StorepNoWB(unsafe.Pointer(&p[i]), unsafe.Pointer(new(int)))
+ }
+ if p[0] == p[1] {
+ t.Error("Bad escape analysis of StorepNoWB")
+ }
+}
diff --git a/libgo/go/runtime/mpagealloc.go b/libgo/go/runtime/mpagealloc.go
index 8b3c62c..c90a637 100644
--- a/libgo/go/runtime/mpagealloc.go
+++ b/libgo/go/runtime/mpagealloc.go
@@ -326,7 +326,20 @@ func (s *pageAlloc) init(mheapLock *mutex, sysStat *uint64) {
s.scav.scavLWM = maxSearchAddr
}
+// tryChunkOf returns the bitmap data for the given chunk.
+//
+// Returns nil if the chunk data has not been mapped.
+func (s *pageAlloc) tryChunkOf(ci chunkIdx) *pallocData {
+ l2 := s.chunks[ci.l1()]
+ if l2 == nil {
+ return nil
+ }
+ return &l2[ci.l2()]
+}
+
// chunkOf returns the chunk at the given chunk index.
+//
+// The chunk index must be valid or this method may throw.
func (s *pageAlloc) chunkOf(ci chunkIdx) *pallocData {
return &s.chunks[ci.l1()][ci.l2()]
}
diff --git a/libgo/go/runtime/proc_test.go b/libgo/go/runtime/proc_test.go
index b9828d9..a8f0dc3 100644
--- a/libgo/go/runtime/proc_test.go
+++ b/libgo/go/runtime/proc_test.go
@@ -529,9 +529,17 @@ func BenchmarkPingPongHog(b *testing.B) {
<-done
}
+var padData [128]uint64
+
func stackGrowthRecursive(i int) {
var pad [128]uint64
- if i != 0 && pad[0] == 0 {
+ pad = padData
+ for j := range pad {
+ if pad[j] != 0 {
+ return
+ }
+ }
+ if i != 0 {
stackGrowthRecursive(i - 1)
}
}
diff --git a/libgo/go/runtime/runtime2.go b/libgo/go/runtime/runtime2.go
index 5029dba..bf3fbac 100644
--- a/libgo/go/runtime/runtime2.go
+++ b/libgo/go/runtime/runtime2.go
@@ -450,6 +450,10 @@ type g struct {
// copying needs to acquire channel locks to protect these
// areas of the stack.
activeStackChans bool
+ // parkingOnChan indicates that the goroutine is about to
+ // park on a chansend or chanrecv. Used to signal an unsafe point
+ // for stack shrinking. It's a boolean value, but is updated atomically.
+ parkingOnChan uint8
raceignore int8 // ignore race detection events
sysblocktraced bool // StartTrace has emitted EvGoInSyscall about this goroutine
@@ -940,11 +944,6 @@ type _defer struct {
// panics
// This is the gccgo version.
-//
-// This is marked go:notinheap because _panic values must only ever
-// live on the stack.
-//
-//go:notinheap
type _panic struct {
// The next entry in the stack.
link *_panic
diff --git a/libgo/go/runtime/select.go b/libgo/go/runtime/select.go
index cf5d0c7..c8910b8 100644
--- a/libgo/go/runtime/select.go
+++ b/libgo/go/runtime/select.go
@@ -7,6 +7,7 @@ package runtime
// This file contains the implementation of Go select statements.
import (
+ "runtime/internal/atomic"
"unsafe"
)
@@ -72,7 +73,20 @@ func selunlock(scases []scase, lockorder []uint16) {
func selparkcommit(gp *g, _ unsafe.Pointer) bool {
// There are unlocked sudogs that point into gp's stack. Stack
// copying must lock the channels of those sudogs.
+ // Set activeStackChans here instead of before we try parking
+ // because we could self-deadlock in stack growth on a
+ // channel lock.
gp.activeStackChans = true
+ // Mark that it's safe for stack shrinking to occur now,
+ // because any thread acquiring this G's stack for shrinking
+ // is guaranteed to observe activeStackChans after this store.
+ atomic.Store8(&gp.parkingOnChan, 0)
+ // Make sure we unlock after setting activeStackChans and
+ // unsetting parkingOnChan. The moment we unlock any of the
+ // channel locks we risk gp getting readied by a channel operation
+ // and so gp could continue running before everything before the
+ // unlock is visible (even to gp itself).
+
// This must not access gp's stack (see gopark). In
// particular, it must not access the *hselect. That's okay,
// because by the time this is called, gp.waiting has all
@@ -313,6 +327,11 @@ loop:
// wait for someone to wake us up
gp.param = nil
+ // Signal to anyone trying to shrink our stack that we're about
+ // to park on a channel. The window between when this G's status
+ // changes and when we set gp.activeStackChans is not safe for
+ // stack shrinking.
+ atomic.Store8(&gp.parkingOnChan, 1)
gopark(selparkcommit, nil, waitReasonSelect, traceEvGoBlockSelect, 1)
gp.activeStackChans = false
diff --git a/libgo/go/runtime/trace/trace_stack_test.go b/libgo/go/runtime/trace/trace_stack_test.go
index cfc0419..f856fdc 100644
--- a/libgo/go/runtime/trace/trace_stack_test.go
+++ b/libgo/go/runtime/trace/trace_stack_test.go
@@ -252,6 +252,7 @@ func TestTraceSymbolize(t *testing.T) {
{trace.EvGoSysCall, []frame{
{"syscall.read", 0},
{"syscall.Read", 0},
+ {"internal/poll.(*FD).Read.func1", 0},
{"internal/poll.ignoringEINTR", 0},
{"internal/poll.(*FD).Read", 0},
{"os.(*File).read", 0},