diff options
author | Ian Lance Taylor <ian@gcc.gnu.org> | 2019-02-01 21:55:38 +0000 |
---|---|---|
committer | Ian Lance Taylor <ian@gcc.gnu.org> | 2019-02-01 21:55:38 +0000 |
commit | b52a3881f04799d410f7ec70d022179c8d734459 (patch) | |
tree | eace57a9fb5df73173371815f0a0e1a5913a77a8 /libgo/runtime/runtime.h | |
parent | a53a893b4fe04ec966a4ec178ee8f394426a5dad (diff) | |
download | gcc-b52a3881f04799d410f7ec70d022179c8d734459.zip gcc-b52a3881f04799d410f7ec70d022179c8d734459.tar.gz gcc-b52a3881f04799d410f7ec70d022179c8d734459.tar.bz2 |
runtime, sync: use __atomic intrinsics instead of __sync
GCC has supported the __atomic intrinsics since 4.7. They are better
than the __sync intrinsics in that they specify a memory model and,
more importantly for our purposes, they are reliably implemented
either in the compiler or in libatomic.
Fixes https://gcc.gnu.org/PR52084
Reviewed-on: https://go-review.googlesource.com/c/160820
From-SVN: r268458
Diffstat (limited to 'libgo/runtime/runtime.h')
-rw-r--r-- | libgo/runtime/runtime.h | 16 |
1 files changed, 1 insertions, 15 deletions
diff --git a/libgo/runtime/runtime.h b/libgo/runtime/runtime.h index 3bb1e55..5da34fb 100644 --- a/libgo/runtime/runtime.h +++ b/libgo/runtime/runtime.h @@ -276,22 +276,8 @@ int32 runtime_timediv(int64, int32, int32*) int32 runtime_round2(int32 x); // round x up to a power of 2. // atomic operations -#define runtime_cas(pval, old, new) __sync_bool_compare_and_swap (pval, old, new) -#define runtime_cas64(pval, old, new) __sync_bool_compare_and_swap (pval, old, new) -#define runtime_casp(pval, old, new) __sync_bool_compare_and_swap (pval, old, new) -// Don't confuse with XADD x86 instruction, -// this one is actually 'addx', that is, add-and-fetch. -#define runtime_xadd(p, v) __sync_add_and_fetch (p, v) -#define runtime_xadd64(p, v) __sync_add_and_fetch (p, v) -#define runtime_xchg(p, v) __atomic_exchange_n (p, v, __ATOMIC_SEQ_CST) -#define runtime_xchg64(p, v) __atomic_exchange_n (p, v, __ATOMIC_SEQ_CST) -#define runtime_xchgp(p, v) __atomic_exchange_n (p, v, __ATOMIC_SEQ_CST) +#define runtime_xadd(p, v) __atomic_add_fetch (p, v, __ATOMIC_SEQ_CST) #define runtime_atomicload(p) __atomic_load_n (p, __ATOMIC_SEQ_CST) -#define runtime_atomicstore(p, v) __atomic_store_n (p, v, __ATOMIC_SEQ_CST) -#define runtime_atomicstore64(p, v) __atomic_store_n (p, v, __ATOMIC_SEQ_CST) -#define runtime_atomicload64(p) __atomic_load_n (p, __ATOMIC_SEQ_CST) -#define runtime_atomicloadp(p) __atomic_load_n (p, __ATOMIC_SEQ_CST) -#define runtime_atomicstorep(p, v) __atomic_store_n (p, v, __ATOMIC_SEQ_CST) void runtime_setg(G*) __asm__ (GOSYM_PREFIX "runtime.setg"); |