aboutsummaryrefslogtreecommitdiff
path: root/libgo/go/sync
diff options
context:
space:
mode:
authorIan Lance Taylor <iant@golang.org>2020-01-02 15:05:27 -0800
committerIan Lance Taylor <iant@golang.org>2020-01-21 23:53:22 -0800
commit5a8ea165926cb0737ab03bc48c18dc5198ab5305 (patch)
tree962dc3357c57f019f85658f99e2e753e30201c27 /libgo/go/sync
parent6ac6529e155c9baa0aaaed7aca06bd38ebda5b43 (diff)
downloadgcc-5a8ea165926cb0737ab03bc48c18dc5198ab5305.zip
gcc-5a8ea165926cb0737ab03bc48c18dc5198ab5305.tar.gz
gcc-5a8ea165926cb0737ab03bc48c18dc5198ab5305.tar.bz2
libgo: update to Go1.14beta1
Reviewed-on: https://go-review.googlesource.com/c/gofrontend/+/214297
Diffstat (limited to 'libgo/go/sync')
-rw-r--r--libgo/go/sync/atomic/atomic_test.go14
-rw-r--r--libgo/go/sync/mutex.go3
-rw-r--r--libgo/go/sync/waitgroup_test.go11
3 files changed, 15 insertions, 13 deletions
diff --git a/libgo/go/sync/atomic/atomic_test.go b/libgo/go/sync/atomic/atomic_test.go
index 39c40c6..286eadc 100644
--- a/libgo/go/sync/atomic/atomic_test.go
+++ b/libgo/go/sync/atomic/atomic_test.go
@@ -1140,6 +1140,9 @@ func hammerStoreLoadUintptr(t *testing.T, paddr unsafe.Pointer) {
StoreUintptr(addr, new)
}
+//go:nocheckptr
+// This code is just testing that LoadPointer/StorePointer operate
+// atomically; it's not actually calculating pointers.
func hammerStoreLoadPointer(t *testing.T, paddr unsafe.Pointer) {
addr := (*unsafe.Pointer)(paddr)
v := uintptr(LoadPointer(addr))
@@ -1391,15 +1394,8 @@ func TestUnaligned64(t *testing.T) {
// Unaligned 64-bit atomics on 32-bit systems are
// a continual source of pain. Test that on 32-bit systems they crash
// instead of failing silently.
-
- switch runtime.GOARCH {
- default:
- if !arch32 {
- t.Skip("test only runs on 32-bit systems")
- }
- case "amd64p32":
- // amd64p32 can handle unaligned atomics.
- t.Skipf("test not needed on %v", runtime.GOARCH)
+ if !arch32 {
+ t.Skip("test only runs on 32-bit systems")
}
x := make([]uint32, 4)
diff --git a/libgo/go/sync/mutex.go b/libgo/go/sync/mutex.go
index 11ad20c..3028552 100644
--- a/libgo/go/sync/mutex.go
+++ b/libgo/go/sync/mutex.go
@@ -216,7 +216,8 @@ func (m *Mutex) unlockSlow(new int32) {
old = m.state
}
} else {
- // Starving mode: handoff mutex ownership to the next waiter.
+ // Starving mode: handoff mutex ownership to the next waiter, and yield
+ // our time slice so that the next waiter can start to run immediately.
// Note: mutexLocked is not set, the waiter will set it after wakeup.
// But mutex is still considered locked if mutexStarving is set,
// so new coming goroutines won't acquire it.
diff --git a/libgo/go/sync/waitgroup_test.go b/libgo/go/sync/waitgroup_test.go
index 4ab438c..c569e0f 100644
--- a/libgo/go/sync/waitgroup_test.go
+++ b/libgo/go/sync/waitgroup_test.go
@@ -147,7 +147,7 @@ func TestWaitGroupMisuse3(t *testing.T) {
}
}()
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4))
- done := make(chan interface{}, 2)
+ done := make(chan interface{}, 3)
// The detection is opportunistically, so we want it to panic
// at least in one run out of a million.
for i := 0; i < 1e6; i++ {
@@ -171,8 +171,13 @@ func TestWaitGroupMisuse3(t *testing.T) {
}()
wg.Wait()
}()
- wg.Wait()
- for j := 0; j < 2; j++ {
+ go func() {
+ defer func() {
+ done <- recover()
+ }()
+ wg.Wait()
+ }()
+ for j := 0; j < 3; j++ {
if err := <-done; err != nil {
panic(err)
}