aboutsummaryrefslogtreecommitdiff
path: root/libgo/go/sync
diff options
context:
space:
mode:
authorIan Lance Taylor <iant@golang.org>2020-12-23 09:57:37 -0800
committerIan Lance Taylor <iant@golang.org>2020-12-30 15:13:24 -0800
commitcfcbb4227fb20191e04eb8d7766ae6202f526afd (patch)
treee2effea96f6f204451779f044415c2385e45042b /libgo/go/sync
parent0696141107d61483f38482b941549959a0d7f613 (diff)
downloadgcc-cfcbb4227fb20191e04eb8d7766ae6202f526afd.zip
gcc-cfcbb4227fb20191e04eb8d7766ae6202f526afd.tar.gz
gcc-cfcbb4227fb20191e04eb8d7766ae6202f526afd.tar.bz2
libgo: update to Go1.16beta1 release
This does not yet include support for the //go:embed directive added in this release. * Makefile.am (check-runtime): Don't create check-runtime-dir. (mostlyclean-local): Don't remove check-runtime-dir. (check-go-tool, check-vet): Copy in go.mod and modules.txt. (check-cgo-test, check-carchive-test): Add go.mod file. * Makefile.in: Regenerate. Reviewed-on: https://go-review.googlesource.com/c/gofrontend/+/280172
Diffstat (limited to 'libgo/go/sync')
-rw-r--r--libgo/go/sync/atomic/atomic.c24
-rw-r--r--libgo/go/sync/atomic/atomic_test.go9
-rw-r--r--libgo/go/sync/atomic/doc.go11
-rw-r--r--libgo/go/sync/atomic/unaligned.go14
-rw-r--r--libgo/go/sync/cond_test.go1
-rw-r--r--libgo/go/sync/mutex_test.go2
-rw-r--r--libgo/go/sync/once.go4
-rw-r--r--libgo/go/sync/pool.go20
-rw-r--r--libgo/go/sync/poolqueue.go2
-rw-r--r--libgo/go/sync/rwmutex.go13
10 files changed, 75 insertions, 25 deletions
diff --git a/libgo/go/sync/atomic/atomic.c b/libgo/go/sync/atomic/atomic.c
index 71d51aa..bc45ba9 100644
--- a/libgo/go/sync/atomic/atomic.c
+++ b/libgo/go/sync/atomic/atomic.c
@@ -8,6 +8,10 @@
#include "runtime.h"
+extern void panicUnaligned(void)
+ __asm__ (GOSYM_PREFIX "runtime_1internal_1atomic.panicUnaligned")
+ __attribute__ ((noreturn));
+
int32_t SwapInt32 (int32_t *, int32_t)
__asm__ (GOSYM_PREFIX "sync_1atomic.SwapInt32")
__attribute__ ((no_split_stack));
@@ -26,7 +30,7 @@ int64_t
SwapInt64 (int64_t *addr, int64_t new)
{
if (((uintptr_t) addr & 7) != 0)
- panicmem ();
+ panicUnaligned ();
return __atomic_exchange_n (addr, new, __ATOMIC_SEQ_CST);
}
@@ -48,7 +52,7 @@ uint64_t
SwapUint64 (uint64_t *addr, uint64_t new)
{
if (((uintptr_t) addr & 7) != 0)
- panicmem ();
+ panicUnaligned ();
return __atomic_exchange_n (addr, new, __ATOMIC_SEQ_CST);
}
@@ -81,7 +85,7 @@ _Bool
CompareAndSwapInt64 (int64_t *val, int64_t old, int64_t new)
{
if (((uintptr_t) val & 7) != 0)
- val = NULL;
+ panicUnaligned ();
return __atomic_compare_exchange_n (val, &old, new, false, __ATOMIC_SEQ_CST,
__ATOMIC_RELAXED);
}
@@ -105,7 +109,7 @@ _Bool
CompareAndSwapUint64 (uint64_t *val, uint64_t old, uint64_t new)
{
if (((uintptr_t) val & 7) != 0)
- val = NULL;
+ panicUnaligned ();
return __atomic_compare_exchange_n (val, &old, new, false, __ATOMIC_SEQ_CST,
__ATOMIC_RELAXED);
}
@@ -149,7 +153,7 @@ int64_t
AddInt64 (int64_t *val, int64_t delta)
{
if (((uintptr_t) val & 7) != 0)
- val = NULL;
+ panicUnaligned ();
return __atomic_add_fetch (val, delta, __ATOMIC_SEQ_CST);
}
@@ -161,7 +165,7 @@ uint64_t
AddUint64 (uint64_t *val, uint64_t delta)
{
if (((uintptr_t) val & 7) != 0)
- val = NULL;
+ panicUnaligned ();
return __atomic_add_fetch (val, delta, __ATOMIC_SEQ_CST);
}
@@ -193,7 +197,7 @@ int64_t
LoadInt64 (int64_t *addr)
{
if (((uintptr_t) addr & 7) != 0)
- panicmem ();
+ panicUnaligned ();
return __atomic_load_n (addr, __ATOMIC_SEQ_CST);
}
@@ -215,7 +219,7 @@ uint64_t
LoadUint64 (uint64_t *addr)
{
if (((uintptr_t) addr & 7) != 0)
- panicmem ();
+ panicUnaligned ();
return __atomic_load_n (addr, __ATOMIC_SEQ_CST);
}
@@ -257,7 +261,7 @@ void
StoreInt64 (int64_t *addr, int64_t val)
{
if (((uintptr_t) addr & 7) != 0)
- panicmem ();
+ panicUnaligned ();
__atomic_store_n (addr, val, __ATOMIC_SEQ_CST);
}
@@ -279,7 +283,7 @@ void
StoreUint64 (uint64_t *addr, uint64_t val)
{
if (((uintptr_t) addr & 7) != 0)
- panicmem ();
+ panicUnaligned ();
__atomic_store_n (addr, val, __ATOMIC_SEQ_CST);
}
diff --git a/libgo/go/sync/atomic/atomic_test.go b/libgo/go/sync/atomic/atomic_test.go
index 83e7c8d..eadc962 100644
--- a/libgo/go/sync/atomic/atomic_test.go
+++ b/libgo/go/sync/atomic/atomic_test.go
@@ -1397,8 +1397,15 @@ func TestStoreLoadRelAcq64(t *testing.T) {
func shouldPanic(t *testing.T, name string, f func()) {
defer func() {
- if recover() == nil {
+ // Check that all GC maps are sane.
+ runtime.GC()
+
+ err := recover()
+ want := "unaligned 64-bit atomic operation"
+ if err == nil {
t.Errorf("%s did not panic", name)
+ } else if s, _ := err.(string); s != want {
+ t.Errorf("%s: wanted panic %q, got %q", name, want, err)
}
}()
f()
diff --git a/libgo/go/sync/atomic/doc.go b/libgo/go/sync/atomic/doc.go
index ff4ad80..805ef95 100644
--- a/libgo/go/sync/atomic/doc.go
+++ b/libgo/go/sync/atomic/doc.go
@@ -43,15 +43,14 @@ import (
"unsafe"
)
-// BUG(rsc): On x86-32, the 64-bit functions use instructions unavailable before the Pentium MMX.
+// BUG(rsc): On 386, the 64-bit functions use instructions unavailable before the Pentium MMX.
//
// On non-Linux ARM, the 64-bit functions use instructions unavailable before the ARMv6k core.
//
-// On ARM, x86-32, and 32-bit MIPS,
-// it is the caller's responsibility to arrange for 64-bit
-// alignment of 64-bit words accessed atomically. The first word in a
-// variable or in an allocated struct, array, or slice can be relied upon to be
-// 64-bit aligned.
+// On ARM, 386, and 32-bit MIPS, it is the caller's responsibility
+// to arrange for 64-bit alignment of 64-bit words accessed atomically.
+// The first word in a variable or in an allocated struct, array, or slice can
+// be relied upon to be 64-bit aligned.
// SwapInt32 atomically stores new into *addr and returns the previous *addr value.
func SwapInt32(addr *int32, new int32) (old int32)
diff --git a/libgo/go/sync/atomic/unaligned.go b/libgo/go/sync/atomic/unaligned.go
new file mode 100644
index 0000000..fbfe125
--- /dev/null
+++ b/libgo/go/sync/atomic/unaligned.go
@@ -0,0 +1,14 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package atomic
+
+import _ "unsafe" // for go:linkname
+
+// Let the C code call this function.
+//go:linkname panicUnaligned
+
+func panicUnaligned() {
+ panic("unaligned 64-bit atomic operation")
+}
diff --git a/libgo/go/sync/cond_test.go b/libgo/go/sync/cond_test.go
index 9d0d9ad..859cae5 100644
--- a/libgo/go/sync/cond_test.go
+++ b/libgo/go/sync/cond_test.go
@@ -1,6 +1,7 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+
package sync_test
import (
diff --git a/libgo/go/sync/mutex_test.go b/libgo/go/sync/mutex_test.go
index e61a853..98c1bf2 100644
--- a/libgo/go/sync/mutex_test.go
+++ b/libgo/go/sync/mutex_test.go
@@ -194,7 +194,7 @@ func TestMutexFairness(t *testing.T) {
}
}
}()
- done := make(chan bool)
+ done := make(chan bool, 1)
go func() {
for i := 0; i < 10; i++ {
time.Sleep(100 * time.Microsecond)
diff --git a/libgo/go/sync/once.go b/libgo/go/sync/once.go
index ca04408..8844314 100644
--- a/libgo/go/sync/once.go
+++ b/libgo/go/sync/once.go
@@ -9,11 +9,13 @@ import (
)
// Once is an object that will perform exactly one action.
+//
+// A Once must not be copied after first use.
type Once struct {
// done indicates whether the action has been performed.
// It is first in the struct because it is used in the hot path.
// The hot path is inlined at every call site.
- // Placing done first allows more compact instructions on some architectures (amd64/x86),
+ // Placing done first allows more compact instructions on some architectures (amd64/386),
// and fewer instructions (to calculate offset) on other architectures.
done uint32
m Mutex
diff --git a/libgo/go/sync/pool.go b/libgo/go/sync/pool.go
index ca7afdb..a892fa7 100644
--- a/libgo/go/sync/pool.go
+++ b/libgo/go/sync/pool.go
@@ -152,8 +152,8 @@ func (p *Pool) Get() interface{} {
func (p *Pool) getSlow(pid int) interface{} {
// See the comment in pin regarding ordering of the loads.
- size := atomic.LoadUintptr(&p.localSize) // load-acquire
- locals := p.local // load-consume
+ size := runtime_LoadAcquintptr(&p.localSize) // load-acquire
+ locals := p.local // load-consume
// Try to steal one element from other procs.
for i := 0; i < int(size); i++ {
l := indexLocal(locals, (pid+i+1)%int(size))
@@ -198,8 +198,8 @@ func (p *Pool) pin() (*poolLocal, int) {
// Since we've disabled preemption, GC cannot happen in between.
// Thus here we must observe local at least as large localSize.
// We can observe a newer/larger local, it is fine (we must observe its zero-initialized-ness).
- s := atomic.LoadUintptr(&p.localSize) // load-acquire
- l := p.local // load-consume
+ s := runtime_LoadAcquintptr(&p.localSize) // load-acquire
+ l := p.local // load-consume
if uintptr(pid) < s {
return indexLocal(l, pid), pid
}
@@ -226,7 +226,7 @@ func (p *Pool) pinSlow() (*poolLocal, int) {
size := runtime.GOMAXPROCS(0)
local := make([]poolLocal, size)
atomic.StorePointer(&p.local, unsafe.Pointer(&local[0])) // store-release
- atomic.StoreUintptr(&p.localSize, uintptr(size)) // store-release
+ runtime_StoreReluintptr(&p.localSize, uintptr(size)) // store-release
return &local[pid], pid
}
@@ -282,3 +282,13 @@ func indexLocal(l unsafe.Pointer, i int) *poolLocal {
func runtime_registerPoolCleanup(cleanup func())
func runtime_procPin() int
func runtime_procUnpin()
+
+// The below are implemented in runtime/internal/atomic and the
+// compiler also knows to intrinsify the symbol we linkname into this
+// package.
+
+//go:linkname runtime_LoadAcquintptr runtime_1internal_1atomic.LoadAcquintptr
+func runtime_LoadAcquintptr(ptr *uintptr) uintptr
+
+//go:linkname runtime_StoreReluintptr runtime_1internal_1atomic.StoreReluintptr
+func runtime_StoreReluintptr(ptr *uintptr, val uintptr) uintptr
diff --git a/libgo/go/sync/poolqueue.go b/libgo/go/sync/poolqueue.go
index 22f7496..9be83e9 100644
--- a/libgo/go/sync/poolqueue.go
+++ b/libgo/go/sync/poolqueue.go
@@ -57,7 +57,7 @@ const dequeueBits = 32
// the index. We divide by 4 so this fits in an int on 32-bit.
const dequeueLimit = (1 << dequeueBits) / 4
-// dequeueNil is used in poolDeqeue to represent interface{}(nil).
+// dequeueNil is used in poolDequeue to represent interface{}(nil).
// Since we use nil to represent empty slots, we need a sentinel value
// to represent nil.
type dequeueNil *struct{}
diff --git a/libgo/go/sync/rwmutex.go b/libgo/go/sync/rwmutex.go
index dc0faf6..3012b55 100644
--- a/libgo/go/sync/rwmutex.go
+++ b/libgo/go/sync/rwmutex.go
@@ -35,6 +35,19 @@ type RWMutex struct {
const rwmutexMaxReaders = 1 << 30
+// Happens-before relationships are indicated to the race detector via:
+// - Unlock -> Lock: readerSem
+// - Unlock -> RLock: readerSem
+// - RUnlock -> Lock: writerSem
+//
+// The methods below temporarily disable handling of race synchronization
+// events in order to provide the more precise model above to the race
+// detector.
+//
+// For example, atomic.AddInt32 in RLock should not appear to provide
+// acquire-release semantics, which would incorrectly synchronize racing
+// readers, thus potentially missing races.
+
// RLock locks rw for reading.
//
// It should not be used for recursive read locking; a blocked Lock