aboutsummaryrefslogtreecommitdiff
path: root/libjava
diff options
context:
space:
mode:
authorDavid Edelsohn <dje.gcc@gmail.com>2012-06-20 13:02:56 +0000
committerDavid Edelsohn <dje@gcc.gnu.org>2012-06-20 09:02:56 -0400
commit54af9de73f589c38c02a64dc1ed6d2c0c6554972 (patch)
treee879acf299bb2060d99c8b970fb37dc1bdb03b6e /libjava
parenta75f501709fc1562a96064688ca925d48562f131 (diff)
downloadgcc-54af9de73f589c38c02a64dc1ed6d2c0c6554972.zip
gcc-54af9de73f589c38c02a64dc1ed6d2c0c6554972.tar.gz
gcc-54af9de73f589c38c02a64dc1ed6d2c0c6554972.tar.bz2
locks.h (compare_and_swap): Use GCC atomic intrinsics.
2012-06-20 David Edelsohn <dje.gcc@gmail.com> Alan Modra <amodra@gmail.com> * sysdep/powerpc/locks.h (compare_and_swap): Use GCC atomic intrinsics. (release_set): Same. (compare_and_swap_release): Same. (read_barrier): Same. (write_barrier): Same. Co-Authored-By: Alan Modra <amodra@gmail.com> From-SVN: r188829
Diffstat (limited to 'libjava')
-rw-r--r--libjava/ChangeLog10
-rw-r--r--libjava/sysdep/powerpc/locks.h76
2 files changed, 36 insertions, 50 deletions
diff --git a/libjava/ChangeLog b/libjava/ChangeLog
index 8a6dd0f..152e9d7 100644
--- a/libjava/ChangeLog
+++ b/libjava/ChangeLog
@@ -1,3 +1,13 @@
+2012-06-20 David Edelsohn <dje.gcc@gmail.com>
+ Alan Modra <amodra@gmail.com>
+
+ * sysdep/powerpc/locks.h (compare_and_swap): Use GCC atomic
+ intrinsics.
+ (release_set): Same.
+ (compare_and_swap_release): Same.
+ (read_barrier): Same.
+ (write_barrier): Same.
+
2012-06-15 Andreas Schwab <schwab@linux-m68k.org>
* sysdep/m68k/locks.h (compare_and_swap): Use
diff --git a/libjava/sysdep/powerpc/locks.h b/libjava/sysdep/powerpc/locks.h
index 2e9eb0e..ecff4e2 100644
--- a/libjava/sysdep/powerpc/locks.h
+++ b/libjava/sysdep/powerpc/locks.h
@@ -11,87 +11,63 @@ details. */
#ifndef __SYSDEP_LOCKS_H__
#define __SYSDEP_LOCKS_H__
-#ifdef __LP64__
-#define _LARX "ldarx "
-#define _STCX "stdcx. "
-#else
-#define _LARX "lwarx "
-#ifdef __PPC405__
-#define _STCX "sync; stwcx. "
-#else
-#define _STCX "stwcx. "
-#endif
-#endif
-
typedef size_t obj_addr_t; /* Integer type big enough for object */
/* address. */
+// Atomically replace *addr by new_val if it was initially equal to old.
+// Return true if the comparison succeeded.
+// Assumed to have acquire semantics, i.e. later memory operations
+// cannot execute before the compare_and_swap finishes.
+
inline static bool
-compare_and_swap (volatile obj_addr_t *addr, obj_addr_t old,
+compare_and_swap (volatile obj_addr_t *addr,
+ obj_addr_t old,
obj_addr_t new_val)
{
- obj_addr_t ret;
-
- __asm__ __volatile__ (
- " " _LARX "%0,0,%1 \n"
- " xor. %0,%3,%0\n"
- " bne $+12\n"
- " " _STCX "%2,0,%1\n"
- " bne- $-16\n"
- : "=&r" (ret)
- : "r" (addr), "r" (new_val), "r" (old)
- : "cr0", "memory");
-
- /* This version of __compare_and_swap is to be used when acquiring
- a lock, so we don't need to worry about whether other memory
- operations have completed, but we do need to be sure that any loads
- after this point really occur after we have acquired the lock. */
- __asm__ __volatile__ ("isync" : : : "memory");
- return ret == 0;
+ return __atomic_compare_exchange_n (addr, &old, new_val, 0,
+ __ATOMIC_ACQUIRE, __ATOMIC_RELAXED);
}
+
+// Set *addr to new_val with release semantics, i.e. making sure
+// that prior loads and stores complete before this
+// assignment.
+
inline static void
release_set (volatile obj_addr_t *addr, obj_addr_t new_val)
{
- __asm__ __volatile__ ("sync" : : : "memory");
- *addr = new_val;
+ __atomic_store_n(addr, val, __ATOMIC_RELEASE);
}
+
+// Compare_and_swap with release semantics instead of acquire semantics.
+
inline static bool
compare_and_swap_release (volatile obj_addr_t *addr, obj_addr_t old,
obj_addr_t new_val)
{
- obj_addr_t ret;
-
- __asm__ __volatile__ ("sync" : : : "memory");
-
- __asm__ __volatile__ (
- " " _LARX "%0,0,%1 \n"
- " xor. %0,%3,%0\n"
- " bne $+12\n"
- " " _STCX "%2,0,%1\n"
- " bne- $-16\n"
- : "=&r" (ret)
- : "r" (addr), "r" (new_val), "r" (old)
- : "cr0", "memory");
-
- return ret == 0;
+ return __atomic_compare_exchange_n (addr, &old, new_val, 0,
+ __ATOMIC_RELEASE, __ATOMIC_RELAXED);
}
+
// Ensure that subsequent instructions do not execute on stale
// data that was loaded from memory before the barrier.
+
inline static void
read_barrier ()
{
- __asm__ __volatile__ ("isync" : : : "memory");
+ __atomic_thread_fence (__ATOMIC_ACQUIRE);
}
+
// Ensure that prior stores to memory are completed with respect to other
// processors.
+
inline static void
write_barrier ()
{
- __asm__ __volatile__ ("sync" : : : "memory");
+ __atomic_thread_fence (__ATOMIC_RELEASE);
}
#endif