aboutsummaryrefslogtreecommitdiff
path: root/libgo/runtime
diff options
context:
space:
mode:
authorIan Lance Taylor <ian@gcc.gnu.org>2019-02-01 21:55:38 +0000
committerIan Lance Taylor <ian@gcc.gnu.org>2019-02-01 21:55:38 +0000
commitb52a3881f04799d410f7ec70d022179c8d734459 (patch)
treeeace57a9fb5df73173371815f0a0e1a5913a77a8 /libgo/runtime
parenta53a893b4fe04ec966a4ec178ee8f394426a5dad (diff)
downloadgcc-b52a3881f04799d410f7ec70d022179c8d734459.zip
gcc-b52a3881f04799d410f7ec70d022179c8d734459.tar.gz
gcc-b52a3881f04799d410f7ec70d022179c8d734459.tar.bz2
runtime, sync: use __atomic intrinsics instead of __sync
GCC has supported the __atomic intrinsics since 4.7. They are better than the __sync intrinsics in that they specify a memory model and, more importantly for our purposes, they are reliably implemented either in the compiler or in libatomic. Fixes https://gcc.gnu.org/PR52084 Reviewed-on: https://go-review.googlesource.com/c/160820 From-SVN: r268458
Diffstat (limited to 'libgo/runtime')
-rw-r--r--libgo/runtime/runtime.h16
-rw-r--r--libgo/runtime/thread.c161
2 files changed, 1 insertions, 176 deletions
diff --git a/libgo/runtime/runtime.h b/libgo/runtime/runtime.h
index 3bb1e55..5da34fb 100644
--- a/libgo/runtime/runtime.h
+++ b/libgo/runtime/runtime.h
@@ -276,22 +276,8 @@ int32 runtime_timediv(int64, int32, int32*)
int32 runtime_round2(int32 x); // round x up to a power of 2.
// atomic operations
-#define runtime_cas(pval, old, new) __sync_bool_compare_and_swap (pval, old, new)
-#define runtime_cas64(pval, old, new) __sync_bool_compare_and_swap (pval, old, new)
-#define runtime_casp(pval, old, new) __sync_bool_compare_and_swap (pval, old, new)
-// Don't confuse with XADD x86 instruction,
-// this one is actually 'addx', that is, add-and-fetch.
-#define runtime_xadd(p, v) __sync_add_and_fetch (p, v)
-#define runtime_xadd64(p, v) __sync_add_and_fetch (p, v)
-#define runtime_xchg(p, v) __atomic_exchange_n (p, v, __ATOMIC_SEQ_CST)
-#define runtime_xchg64(p, v) __atomic_exchange_n (p, v, __ATOMIC_SEQ_CST)
-#define runtime_xchgp(p, v) __atomic_exchange_n (p, v, __ATOMIC_SEQ_CST)
+#define runtime_xadd(p, v) __atomic_add_fetch (p, v, __ATOMIC_SEQ_CST)
#define runtime_atomicload(p) __atomic_load_n (p, __ATOMIC_SEQ_CST)
-#define runtime_atomicstore(p, v) __atomic_store_n (p, v, __ATOMIC_SEQ_CST)
-#define runtime_atomicstore64(p, v) __atomic_store_n (p, v, __ATOMIC_SEQ_CST)
-#define runtime_atomicload64(p) __atomic_load_n (p, __ATOMIC_SEQ_CST)
-#define runtime_atomicloadp(p) __atomic_load_n (p, __ATOMIC_SEQ_CST)
-#define runtime_atomicstorep(p, v) __atomic_store_n (p, v, __ATOMIC_SEQ_CST)
void runtime_setg(G*)
__asm__ (GOSYM_PREFIX "runtime.setg");
diff --git a/libgo/runtime/thread.c b/libgo/runtime/thread.c
deleted file mode 100644
index 83ee006..0000000
--- a/libgo/runtime/thread.c
+++ /dev/null
@@ -1,161 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#include <errno.h>
-#include <signal.h>
-#include <sys/time.h>
-#include <sys/resource.h>
-
-#include "runtime.h"
-#include "go-assert.h"
-
-/* For targets which don't have the required sync support. Really
- these should be provided by gcc itself. FIXME. */
-
-#if !defined (HAVE_SYNC_BOOL_COMPARE_AND_SWAP_4) || !defined (HAVE_SYNC_BOOL_COMPARE_AND_SWAP_8) || !defined (HAVE_SYNC_FETCH_AND_ADD_4) || !defined (HAVE_SYNC_ADD_AND_FETCH_8)
-
-static pthread_mutex_t sync_lock = PTHREAD_MUTEX_INITIALIZER;
-
-#endif
-
-#ifndef HAVE_SYNC_BOOL_COMPARE_AND_SWAP_4
-
-_Bool
-__sync_bool_compare_and_swap_4 (uint32*, uint32, uint32)
- __attribute__ ((visibility ("hidden")));
-
-_Bool
-__sync_bool_compare_and_swap_4 (uint32* ptr, uint32 old, uint32 new)
-{
- int i;
- _Bool ret;
-
- i = pthread_mutex_lock (&sync_lock);
- __go_assert (i == 0);
-
- if (*ptr != old)
- ret = 0;
- else
- {
- *ptr = new;
- ret = 1;
- }
-
- i = pthread_mutex_unlock (&sync_lock);
- __go_assert (i == 0);
-
- return ret;
-}
-
-#endif
-
-#ifndef HAVE_SYNC_BOOL_COMPARE_AND_SWAP_8
-
-_Bool
-__sync_bool_compare_and_swap_8 (uint64*, uint64, uint64)
- __attribute__ ((visibility ("hidden")));
-
-_Bool
-__sync_bool_compare_and_swap_8 (uint64* ptr, uint64 old, uint64 new)
-{
- int i;
- _Bool ret;
-
- i = pthread_mutex_lock (&sync_lock);
- __go_assert (i == 0);
-
- if (*ptr != old)
- ret = 0;
- else
- {
- *ptr = new;
- ret = 1;
- }
-
- i = pthread_mutex_unlock (&sync_lock);
- __go_assert (i == 0);
-
- return ret;
-}
-
-#endif
-
-#ifndef HAVE_SYNC_FETCH_AND_ADD_4
-
-uint32
-__sync_fetch_and_add_4 (uint32*, uint32)
- __attribute__ ((visibility ("hidden")));
-
-uint32
-__sync_fetch_and_add_4 (uint32* ptr, uint32 add)
-{
- int i;
- uint32 ret;
-
- i = pthread_mutex_lock (&sync_lock);
- __go_assert (i == 0);
-
- ret = *ptr;
- *ptr += add;
-
- i = pthread_mutex_unlock (&sync_lock);
- __go_assert (i == 0);
-
- return ret;
-}
-
-#endif
-
-#ifndef HAVE_SYNC_ADD_AND_FETCH_8
-
-uint64
-__sync_add_and_fetch_8 (uint64*, uint64)
- __attribute__ ((visibility ("hidden")));
-
-uint64
-__sync_add_and_fetch_8 (uint64* ptr, uint64 add)
-{
- int i;
- uint64 ret;
-
- i = pthread_mutex_lock (&sync_lock);
- __go_assert (i == 0);
-
- *ptr += add;
- ret = *ptr;
-
- i = pthread_mutex_unlock (&sync_lock);
- __go_assert (i == 0);
-
- return ret;
-}
-
-#endif
-
-uintptr
-runtime_memlimit(void)
-{
- struct rlimit rl;
- uintptr used;
-
- if(getrlimit(RLIMIT_AS, &rl) != 0)
- return 0;
- if(rl.rlim_cur >= 0x7fffffff)
- return 0;
-
- // Estimate our VM footprint excluding the heap.
- // Not an exact science: use size of binary plus
- // some room for thread stacks.
- used = (64<<20);
- if(used >= rl.rlim_cur)
- return 0;
-
- // If there's not at least 16 MB left, we're probably
- // not going to be able to do much. Treat as no limit.
- rl.rlim_cur -= used;
- if(rl.rlim_cur < (16<<20))
- return 0;
-
- return rl.rlim_cur - used;
-}