aboutsummaryrefslogtreecommitdiff
path: root/libjava/sysdep/i386
diff options
context:
space:
mode:
authorH.J. Lu <hongjiu.lu@intel.com>2011-05-31 13:02:40 +0000
committerDodji Seketeli <dodji@gcc.gnu.org>2011-05-31 15:02:40 +0200
commite67e39c2b5d1cbd37d9d3cd8821794fe2d45b97b (patch)
tree4fa9d97f5218f181ac98f29a03a79c2bc912d0b4 /libjava/sysdep/i386
parentcea8c6deef9e4b39260a8c21091238c85eedc1eb (diff)
downloadgcc-e67e39c2b5d1cbd37d9d3cd8821794fe2d45b97b.zip
gcc-e67e39c2b5d1cbd37d9d3cd8821794fe2d45b97b.tar.gz
gcc-e67e39c2b5d1cbd37d9d3cd8821794fe2d45b97b.tar.bz2
Revert accidental svn commit r174473
From-SVN: r174480
Diffstat (limited to 'libjava/sysdep/i386')
-rw-r--r--libjava/sysdep/i386/locks.h54
1 files changed, 21 insertions, 33 deletions
diff --git a/libjava/sysdep/i386/locks.h b/libjava/sysdep/i386/locks.h
index 9d130b0..7b99f0b 100644
--- a/libjava/sysdep/i386/locks.h
+++ b/libjava/sysdep/i386/locks.h
@@ -1,6 +1,6 @@
/* locks.h - Thread synchronization primitives. X86/x86-64 implementation.
- Copyright (C) 2002 Free Software Foundation
+ Copyright (C) 2002, 2011 Free Software Foundation
This file is part of libgcj.
@@ -23,19 +23,25 @@ compare_and_swap(volatile obj_addr_t *addr,
obj_addr_t old,
obj_addr_t new_val)
{
- char result;
-#ifdef __x86_64__
- __asm__ __volatile__("lock; cmpxchgq %2, %0; setz %1"
- : "=m"(*(addr)), "=q"(result)
- : "r" (new_val), "a"(old), "m"(*addr)
- : "memory");
-#else
- __asm__ __volatile__("lock; cmpxchgl %2, %0; setz %1"
- : "=m"(*addr), "=q"(result)
- : "r" (new_val), "a"(old), "m"(*addr)
- : "memory");
-#endif
- return (bool) result;
+ return __sync_bool_compare_and_swap (addr, old, new_val);
+}
+
+// Ensure that subsequent instructions do not execute on stale
+// data that was loaded from memory before the barrier.
+// On X86/x86-64, the hardware ensures that reads are properly ordered.
+inline static void
+read_barrier()
+{
+}
+
+// Ensure that prior stores to memory are completed with respect to other
+// processors.
+inline static void
+write_barrier()
+{
+ /* x86-64/X86 does not reorder writes. We just need to ensure that
+ gcc also doesn't. */
+ __asm__ __volatile__(" " : : : "memory");
}
// Set *addr to new_val with release semantics, i.e. making sure
@@ -46,7 +52,7 @@ compare_and_swap(volatile obj_addr_t *addr,
inline static void
release_set(volatile obj_addr_t *addr, obj_addr_t new_val)
{
- __asm__ __volatile__(" " : : : "memory");
+ write_barrier ();
*(addr) = new_val;
}
@@ -60,22 +66,4 @@ compare_and_swap_release(volatile obj_addr_t *addr,
{
return compare_and_swap(addr, old, new_val);
}
-
-// Ensure that subsequent instructions do not execute on stale
-// data that was loaded from memory before the barrier.
-// On X86/x86-64, the hardware ensures that reads are properly ordered.
-inline static void
-read_barrier()
-{
-}
-
-// Ensure that prior stores to memory are completed with respect to other
-// processors.
-inline static void
-write_barrier()
-{
- /* x86-64/X86 does not reorder writes. We just need to ensure that
- gcc also doesn't. */
- __asm__ __volatile__(" " : : : "memory");
-}
#endif