aboutsummaryrefslogtreecommitdiff
path: root/libgo/go/runtime
diff options
context:
space:
mode:
authorIan Lance Taylor <iant@golang.org>2020-02-26 11:15:50 -0800
committerIan Lance Taylor <iant@golang.org>2020-02-26 12:19:13 -0800
commitc5decc83e4eb06103c801fd4f8215301ce746109 (patch)
tree5443f7ec2e16fac152fe1af564d10b0d29ff1b95 /libgo/go/runtime
parent051b9873e78fe1acb1a3fecd0c6e5685b6c12fb3 (diff)
downloadgcc-c5decc83e4eb06103c801fd4f8215301ce746109.zip
gcc-c5decc83e4eb06103c801fd4f8215301ce746109.tar.gz
gcc-c5decc83e4eb06103c801fd4f8215301ce746109.tar.bz2
libgo: update to final Go1.14 release
Reviewed-on: https://go-review.googlesource.com/c/gofrontend/+/221158
Diffstat (limited to 'libgo/go/runtime')
-rw-r--r--libgo/go/runtime/malloc.go7
-rw-r--r--libgo/go/runtime/mkpreempt.go9
-rw-r--r--libgo/go/runtime/netpoll_stub.go17
3 files changed, 27 insertions, 6 deletions
diff --git a/libgo/go/runtime/malloc.go b/libgo/go/runtime/malloc.go
index 35ace7f..266f5eb 100644
--- a/libgo/go/runtime/malloc.go
+++ b/libgo/go/runtime/malloc.go
@@ -62,9 +62,10 @@
// Allocating and freeing a large object uses the mheap
// directly, bypassing the mcache and mcentral.
//
-// Free object slots in an mspan are zeroed only if mspan.needzero is
-// false. If needzero is true, objects are zeroed as they are
-// allocated. There are various benefits to delaying zeroing this way:
+// If mspan.needzero is false, then free object slots in the mspan are
+// already zeroed. Otherwise if needzero is true, objects are zeroed as
+// they are allocated. There are various benefits to delaying zeroing
+// this way:
//
// 1. Stack frame allocation can avoid zeroing altogether.
//
diff --git a/libgo/go/runtime/mkpreempt.go b/libgo/go/runtime/mkpreempt.go
index 64e2207..31b6f5c 100644
--- a/libgo/go/runtime/mkpreempt.go
+++ b/libgo/go/runtime/mkpreempt.go
@@ -244,6 +244,15 @@ func genAMD64() {
// TODO: MXCSR register?
+ // Apparently, the signal handling code path in darwin kernel leaves
+ // the upper bits of Y registers in a dirty state, which causes
+ // many SSE operations (128-bit and narrower) become much slower.
+ // Clear the upper bits to get to a clean state. See issue #37174.
+ // It is safe here as Go code don't use the upper bits of Y registers.
+ p("#ifdef GOOS_darwin")
+ p("VZEROUPPER")
+ p("#endif")
+
p("PUSHQ BP")
p("MOVQ SP, BP")
p("// Save flags before clobbering them")
diff --git a/libgo/go/runtime/netpoll_stub.go b/libgo/go/runtime/netpoll_stub.go
index fe45cfb..f86f2f6 100644
--- a/libgo/go/runtime/netpoll_stub.go
+++ b/libgo/go/runtime/netpoll_stub.go
@@ -13,16 +13,23 @@ var netpollWaiters uint32
var netpollStubLock mutex
var netpollNote note
-var netpollBroken uint32
+
+// netpollBroken, protected by netpollBrokenLock, avoids a double notewakeup.
+var netpollBrokenLock mutex
+var netpollBroken bool
func netpollGenericInit() {
atomic.Store(&netpollInited, 1)
}
func netpollBreak() {
- if atomic.Cas(&netpollBroken, 0, 1) {
+ lock(&netpollBrokenLock)
+ broken := netpollBroken
+ netpollBroken = true
+ if !broken {
notewakeup(&netpollNote)
}
+ unlock(&netpollBrokenLock)
}
// Polls for ready network connections.
@@ -34,8 +41,12 @@ func netpoll(delay int64) gList {
// This lock ensures that only one goroutine tries to use
// the note. It should normally be completely uncontended.
lock(&netpollStubLock)
+
+ lock(&netpollBrokenLock)
noteclear(&netpollNote)
- atomic.Store(&netpollBroken, 0)
+ netpollBroken = false
+ unlock(&netpollBrokenLock)
+
notetsleep(&netpollNote, delay)
unlock(&netpollStubLock)
}