aboutsummaryrefslogtreecommitdiff
path: root/libgo/go/runtime/internal/atomic
diff options
context:
space:
mode:
Diffstat (limited to 'libgo/go/runtime/internal/atomic')
-rw-r--r--libgo/go/runtime/internal/atomic/atomic.c82
-rw-r--r--libgo/go/runtime/internal/atomic/atomic_test.go128
-rw-r--r--libgo/go/runtime/internal/atomic/bench_test.go91
-rw-r--r--libgo/go/runtime/internal/atomic/gccgo.go18
-rw-r--r--libgo/go/runtime/internal/atomic/unaligned.go14
5 files changed, 324 insertions, 9 deletions
diff --git a/libgo/go/runtime/internal/atomic/atomic.c b/libgo/go/runtime/internal/atomic/atomic.c
index 9fed1a8..569e56e 100644
--- a/libgo/go/runtime/internal/atomic/atomic.c
+++ b/libgo/go/runtime/internal/atomic/atomic.c
@@ -6,6 +6,10 @@
#include "runtime.h"
+extern void panicUnaligned(void)
+ __asm__ (GOSYM_PREFIX "runtime_1internal_1atomic.panicUnaligned")
+ __attribute__ ((noreturn));
+
uint32_t Load (uint32_t *ptr)
__asm__ (GOSYM_PREFIX "runtime_1internal_1atomic.Load")
__attribute__ ((no_split_stack));
@@ -44,7 +48,7 @@ uint64_t
Load64 (uint64_t *ptr)
{
if (((uintptr_t) ptr & 7) != 0)
- panicmem ();
+ panicUnaligned ();
return __atomic_load_n (ptr, __ATOMIC_SEQ_CST);
}
@@ -58,6 +62,28 @@ LoadAcq (uint32_t *ptr)
return __atomic_load_n (ptr, __ATOMIC_ACQUIRE);
}
+uint64_t LoadAcq64 (uint64_t *ptr)
+ __asm__ (GOSYM_PREFIX "runtime_1internal_1atomic.LoadAcq64")
+ __attribute__ ((no_split_stack));
+
+uint64_t
+LoadAcq64 (uint64_t *ptr)
+{
+ if (((uintptr_t) ptr & 7) != 0)
+ panicUnaligned ();
+ return __atomic_load_n (ptr, __ATOMIC_ACQUIRE);
+}
+
+uintptr_t LoadAcquintptr (uintptr_t *ptr)
+ __asm__ (GOSYM_PREFIX "runtime_1internal_1atomic.LoadAcquintptr")
+ __attribute__ ((no_split_stack));
+
+uintptr_t
+LoadAcquintptr (uintptr_t *ptr)
+{
+ return __atomic_load_n (ptr, __ATOMIC_ACQUIRE);
+}
+
uintptr_t Loaduintptr (uintptr_t *ptr)
__asm__ (GOSYM_PREFIX "runtime_1internal_1atomic.Loaduintptr")
__attribute__ ((no_split_stack));
@@ -86,7 +112,7 @@ int64_t
Loadint64 (int64_t *ptr)
{
if (((uintptr_t) ptr & 7) != 0)
- panicmem ();
+ panicUnaligned ();
return __atomic_load_n (ptr, __ATOMIC_SEQ_CST);
}
@@ -108,7 +134,7 @@ uint64_t
Xadd64 (uint64_t *ptr, int64_t delta)
{
if (((uintptr_t) ptr & 7) != 0)
- panicmem ();
+ panicUnaligned ();
return __atomic_add_fetch (ptr, (uint64_t) delta, __ATOMIC_SEQ_CST);
}
@@ -130,7 +156,7 @@ int64_t
Xaddint64 (int64_t *ptr, int64_t delta)
{
if (((uintptr_t) ptr & 7) != 0)
- panicmem ();
+ panicUnaligned ();
return __atomic_add_fetch (ptr, delta, __ATOMIC_SEQ_CST);
}
@@ -152,7 +178,7 @@ uint64_t
Xchg64 (uint64_t *ptr, uint64_t new)
{
if (((uintptr_t) ptr & 7) != 0)
- panicmem ();
+ panicUnaligned ();
return __atomic_exchange_n (ptr, new, __ATOMIC_SEQ_CST);
}
@@ -186,6 +212,26 @@ Or8 (uint8_t *ptr, uint8_t val)
__atomic_or_fetch (ptr, val, __ATOMIC_SEQ_CST);
}
+void And (uint32_t *ptr, uint32_t val)
+ __asm__ (GOSYM_PREFIX "runtime_1internal_1atomic.And")
+ __attribute__ ((no_split_stack));
+
+void
+And (uint32_t *ptr, uint32_t val)
+{
+ __atomic_and_fetch (ptr, val, __ATOMIC_SEQ_CST);
+}
+
+void Or (uint32_t *ptr, uint32_t val)
+ __asm__ (GOSYM_PREFIX "runtime_1internal_1atomic.Or")
+ __attribute__ ((no_split_stack));
+
+void
+Or (uint32_t *ptr, uint32_t val)
+{
+ __atomic_or_fetch (ptr, val, __ATOMIC_SEQ_CST);
+}
+
_Bool Cas (uint32_t *ptr, uint32_t old, uint32_t new)
__asm__ (GOSYM_PREFIX "runtime_1internal_1atomic.Cas")
__attribute__ ((no_split_stack));
@@ -204,7 +250,7 @@ _Bool
Cas64 (uint64_t *ptr, uint64_t old, uint64_t new)
{
if (((uintptr_t) ptr & 7) != 0)
- panicmem ();
+ panicUnaligned ();
return __atomic_compare_exchange_n (ptr, &old, new, false, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED);
}
@@ -266,7 +312,7 @@ void
Store64 (uint64_t *ptr, uint64_t val)
{
if (((uintptr_t) ptr & 7) != 0)
- panicmem ();
+ panicUnaligned ();
__atomic_store_n (ptr, val, __ATOMIC_SEQ_CST);
}
@@ -280,6 +326,28 @@ StoreRel (uint32_t *ptr, uint32_t val)
__atomic_store_n (ptr, val, __ATOMIC_RELEASE);
}
+void StoreRel64 (uint64_t *ptr, uint64_t val)
+ __asm__ (GOSYM_PREFIX "runtime_1internal_1atomic.StoreRel64")
+ __attribute__ ((no_split_stack));
+
+void
+StoreRel64 (uint64_t *ptr, uint64_t val)
+{
+ if (((uintptr_t) ptr & 7) != 0)
+ panicUnaligned ();
+ __atomic_store_n (ptr, val, __ATOMIC_RELEASE);
+}
+
+void StoreReluintptr (uintptr_t *ptr, uintptr_t val)
+ __asm__ (GOSYM_PREFIX "runtime_1internal_1atomic.StoreReluintptr")
+ __attribute__ ((no_split_stack));
+
+void
+StoreReluintptr (uintptr_t *ptr, uintptr_t val)
+{
+ __atomic_store_n (ptr, val, __ATOMIC_RELEASE);
+}
+
void Storeuintptr (uintptr_t *ptr, uintptr_t val)
__asm__ (GOSYM_PREFIX "runtime_1internal_1atomic.Storeuintptr")
__attribute__ ((no_split_stack));
diff --git a/libgo/go/runtime/internal/atomic/atomic_test.go b/libgo/go/runtime/internal/atomic/atomic_test.go
index b0a8fa0..c9c2eba 100644
--- a/libgo/go/runtime/internal/atomic/atomic_test.go
+++ b/libgo/go/runtime/internal/atomic/atomic_test.go
@@ -73,8 +73,15 @@ func TestXadduintptrOnUint64(t *testing.T) {
func shouldPanic(t *testing.T, name string, f func()) {
defer func() {
- if recover() == nil {
+ // Check that all GC maps are sane.
+ runtime.GC()
+
+ err := recover()
+ want := "unaligned 64-bit atomic operation"
+ if err == nil {
t.Errorf("%s did not panic", name)
+ } else if s, _ := err.(string); s != want {
+ t.Errorf("%s: wanted panic %q, got %q", name, want, err)
}
}()
f()
@@ -143,6 +150,45 @@ func TestAnd8(t *testing.T) {
}
}
+func TestAnd(t *testing.T) {
+ // Basic sanity check.
+ x := uint32(0xffffffff)
+ for i := uint32(0); i < 32; i++ {
+ atomic.And(&x, ^(1 << i))
+ if r := uint32(0xffffffff) << (i + 1); x != r {
+ t.Fatalf("clearing bit %#x: want %#x, got %#x", uint32(1<<i), r, x)
+ }
+ }
+
+ // Set every bit in array to 1.
+ a := make([]uint32, 1<<12)
+ for i := range a {
+ a[i] = 0xffffffff
+ }
+
+ // Clear array bit-by-bit in different goroutines.
+ done := make(chan bool)
+ for i := 0; i < 32; i++ {
+ m := ^uint32(1 << i)
+ go func() {
+ for i := range a {
+ atomic.And(&a[i], m)
+ }
+ done <- true
+ }()
+ }
+ for i := 0; i < 32; i++ {
+ <-done
+ }
+
+ // Check that the array has been totally cleared.
+ for i, v := range a {
+ if v != 0 {
+ t.Fatalf("a[%v] not cleared: want %#x, got %#x", i, uint32(0), v)
+ }
+ }
+}
+
func TestOr8(t *testing.T) {
// Basic sanity check.
x := uint8(0)
@@ -179,7 +225,43 @@ func TestOr8(t *testing.T) {
}
}
-func TestBitwiseContended(t *testing.T) {
+func TestOr(t *testing.T) {
+ // Basic sanity check.
+ x := uint32(0)
+ for i := uint32(0); i < 32; i++ {
+ atomic.Or(&x, 1<<i)
+ if r := (uint32(1) << (i + 1)) - 1; x != r {
+ t.Fatalf("setting bit %#x: want %#x, got %#x", uint32(1)<<i, r, x)
+ }
+ }
+
+ // Start with every bit in array set to 0.
+ a := make([]uint32, 1<<12)
+
+ // Set every bit in array bit-by-bit in different goroutines.
+ done := make(chan bool)
+ for i := 0; i < 32; i++ {
+ m := uint32(1 << i)
+ go func() {
+ for i := range a {
+ atomic.Or(&a[i], m)
+ }
+ done <- true
+ }()
+ }
+ for i := 0; i < 32; i++ {
+ <-done
+ }
+
+ // Check that the array has been totally set.
+ for i, v := range a {
+ if v != 0xffffffff {
+ t.Fatalf("a[%v] not fully set: want %#x, got %#x", i, uint32(0xffffffff), v)
+ }
+ }
+}
+
+func TestBitwiseContended8(t *testing.T) {
// Start with every bit in array set to 0.
a := make([]uint8, 16)
@@ -221,6 +303,48 @@ func TestBitwiseContended(t *testing.T) {
}
}
+func TestBitwiseContended(t *testing.T) {
+ // Start with every bit in array set to 0.
+ a := make([]uint32, 16)
+
+ // Iterations to try.
+ N := 1 << 16
+ if testing.Short() {
+ N = 1 << 10
+ }
+
+ // Set and then clear every bit in the array bit-by-bit in different goroutines.
+ done := make(chan bool)
+ for i := 0; i < 32; i++ {
+ m := uint32(1 << i)
+ go func() {
+ for n := 0; n < N; n++ {
+ for i := range a {
+ atomic.Or(&a[i], m)
+ if atomic.Load(&a[i])&m != m {
+ t.Errorf("a[%v] bit %#x not set", i, m)
+ }
+ atomic.And(&a[i], ^m)
+ if atomic.Load(&a[i])&m != 0 {
+ t.Errorf("a[%v] bit %#x not clear", i, m)
+ }
+ }
+ }
+ done <- true
+ }()
+ }
+ for i := 0; i < 32; i++ {
+ <-done
+ }
+
+ // Check that the array has been totally cleared.
+ for i, v := range a {
+ if v != 0 {
+ t.Fatalf("a[%v] not cleared: want %#x, got %#x", i, uint32(0), v)
+ }
+ }
+}
+
func TestStorepNoWB(t *testing.T) {
var p [2]*int
for i := range p {
diff --git a/libgo/go/runtime/internal/atomic/bench_test.go b/libgo/go/runtime/internal/atomic/bench_test.go
index de71b0f..2476c06 100644
--- a/libgo/go/runtime/internal/atomic/bench_test.go
+++ b/libgo/go/runtime/internal/atomic/bench_test.go
@@ -51,6 +51,14 @@ func BenchmarkAnd8(b *testing.B) {
}
}
+func BenchmarkAnd(b *testing.B) {
+ var x [128]uint32 // give x its own cache line
+ sink = &x
+ for i := 0; i < b.N; i++ {
+ atomic.And(&x[63], uint32(i))
+ }
+}
+
func BenchmarkAnd8Parallel(b *testing.B) {
var x [512]uint8 // give byte its own cache line
sink = &x
@@ -63,6 +71,18 @@ func BenchmarkAnd8Parallel(b *testing.B) {
})
}
+func BenchmarkAndParallel(b *testing.B) {
+ var x [128]uint32 // give x its own cache line
+ sink = &x
+ b.RunParallel(func(pb *testing.PB) {
+ i := uint32(0)
+ for pb.Next() {
+ atomic.And(&x[63], i)
+ i++
+ }
+ })
+}
+
func BenchmarkOr8(b *testing.B) {
var x [512]uint8 // give byte its own cache line
sink = &x
@@ -71,6 +91,14 @@ func BenchmarkOr8(b *testing.B) {
}
}
+func BenchmarkOr(b *testing.B) {
+ var x [128]uint32 // give x its own cache line
+ sink = &x
+ for i := 0; i < b.N; i++ {
+ atomic.Or(&x[63], uint32(i))
+ }
+}
+
func BenchmarkOr8Parallel(b *testing.B) {
var x [512]uint8 // give byte its own cache line
sink = &x
@@ -83,6 +111,18 @@ func BenchmarkOr8Parallel(b *testing.B) {
})
}
+func BenchmarkOrParallel(b *testing.B) {
+ var x [128]uint32 // give x its own cache line
+ sink = &x
+ b.RunParallel(func(pb *testing.PB) {
+ i := uint32(0)
+ for pb.Next() {
+ atomic.Or(&x[63], i)
+ i++
+ }
+ })
+}
+
func BenchmarkXadd(b *testing.B) {
var x uint32
ptr := &x
@@ -102,3 +142,54 @@ func BenchmarkXadd64(b *testing.B) {
}
})
}
+
+func BenchmarkCas(b *testing.B) {
+ var x uint32
+ x = 1
+ ptr := &x
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ atomic.Cas(ptr, 1, 0)
+ atomic.Cas(ptr, 0, 1)
+ }
+ })
+}
+
+func BenchmarkCas64(b *testing.B) {
+ var x uint64
+ x = 1
+ ptr := &x
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ atomic.Cas64(ptr, 1, 0)
+ atomic.Cas64(ptr, 0, 1)
+ }
+ })
+}
+func BenchmarkXchg(b *testing.B) {
+ var x uint32
+ x = 1
+ ptr := &x
+ b.RunParallel(func(pb *testing.PB) {
+ var y uint32
+ y = 1
+ for pb.Next() {
+ y = atomic.Xchg(ptr, y)
+ y += 1
+ }
+ })
+}
+
+func BenchmarkXchg64(b *testing.B) {
+ var x uint64
+ x = 1
+ ptr := &x
+ b.RunParallel(func(pb *testing.PB) {
+ var y uint64
+ y = 1
+ for pb.Next() {
+ y = atomic.Xchg64(ptr, y)
+ y += 1
+ }
+ })
+}
diff --git a/libgo/go/runtime/internal/atomic/gccgo.go b/libgo/go/runtime/internal/atomic/gccgo.go
index 4df8346..c423803 100644
--- a/libgo/go/runtime/internal/atomic/gccgo.go
+++ b/libgo/go/runtime/internal/atomic/gccgo.go
@@ -24,6 +24,12 @@ func Load64(ptr *uint64) uint64
func LoadAcq(ptr *uint32) uint32
//go:noescape
+func LoadAcq64(ptr *uint64) uint64
+
+//go:noescape
+func LoadAcquintptr(ptr *uintptr) uintptr
+
+//go:noescape
func Xadd(ptr *uint32, delta int32) uint32
//go:noescape
@@ -47,6 +53,12 @@ func And8(ptr *uint8, val uint8)
//go:noescape
func Or8(ptr *uint8, val uint8)
+//go:noescape
+func And(ptr *uint32, val uint32)
+
+//go:noescape
+func Or(ptr *uint32, val uint32)
+
// NOTE: Do not add atomicxor8 (XOR is not idempotent).
//go:noescape
@@ -67,6 +79,12 @@ func Store64(ptr *uint64, val uint64)
//go:noescape
func StoreRel(ptr *uint32, val uint32)
+//go:noescape
+func StoreRel64(ptr *uint64, val uint64)
+
+//go:noescape
+func StoreReluintptr(ptr *uintptr, val uintptr)
+
// StorepNoWB performs *ptr = val atomically and without a write
// barrier.
//
diff --git a/libgo/go/runtime/internal/atomic/unaligned.go b/libgo/go/runtime/internal/atomic/unaligned.go
new file mode 100644
index 0000000..fbfe125
--- /dev/null
+++ b/libgo/go/runtime/internal/atomic/unaligned.go
@@ -0,0 +1,14 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package atomic
+
+import _ "unsafe" // for go:linkname
+
+// Let the C code call this function.
+//go:linkname panicUnaligned
+
+func panicUnaligned() {
+ panic("unaligned 64-bit atomic operation")
+}