aboutsummaryrefslogtreecommitdiff
path: root/libjava/sysdep/i386/locks.h
diff options
context:
space:
mode:
authorH.J. Lu <hongjiu.lu@intel.com>2011-05-31 11:53:34 +0000
committerH.J. Lu <hjl@gcc.gnu.org>2011-05-31 04:53:34 -0700
commit755b3b7cf7c635a5c766400865c2bfa52d6aa16d (patch)
treea7e67035fdec4a3fe4aa5320fcddb6ecacc5b568 /libjava/sysdep/i386/locks.h
parent2e87621cb04e341cd1d10f45ae62b5310393e08e (diff)
downloadgcc-755b3b7cf7c635a5c766400865c2bfa52d6aa16d.zip
gcc-755b3b7cf7c635a5c766400865c2bfa52d6aa16d.tar.gz
gcc-755b3b7cf7c635a5c766400865c2bfa52d6aa16d.tar.bz2
Use __sync_bool_compare_and_swap in x86 locks.h.
2011-05-31 H.J. Lu <hongjiu.lu@intel.com> PR libgcj/49193 * configure.host (sysdeps_dir): Set to i386 for x86_64. * sysdep/i386/locks.h (compare_and_swap): Call __sync_bool_compare_and_swap. (release_set): Call write_barrier (). * sysdep/x86-64/locks.h: Removed. From-SVN: r174471
Diffstat (limited to 'libjava/sysdep/i386/locks.h')
-rw-r--r--libjava/sysdep/i386/locks.h54
1 files changed, 21 insertions, 33 deletions
diff --git a/libjava/sysdep/i386/locks.h b/libjava/sysdep/i386/locks.h
index 9d130b0..7b99f0b 100644
--- a/libjava/sysdep/i386/locks.h
+++ b/libjava/sysdep/i386/locks.h
@@ -1,6 +1,6 @@
/* locks.h - Thread synchronization primitives. X86/x86-64 implementation.
- Copyright (C) 2002 Free Software Foundation
+ Copyright (C) 2002, 2011 Free Software Foundation
This file is part of libgcj.
@@ -23,19 +23,25 @@ compare_and_swap(volatile obj_addr_t *addr,
obj_addr_t old,
obj_addr_t new_val)
{
- char result;
-#ifdef __x86_64__
- __asm__ __volatile__("lock; cmpxchgq %2, %0; setz %1"
- : "=m"(*(addr)), "=q"(result)
- : "r" (new_val), "a"(old), "m"(*addr)
- : "memory");
-#else
- __asm__ __volatile__("lock; cmpxchgl %2, %0; setz %1"
- : "=m"(*addr), "=q"(result)
- : "r" (new_val), "a"(old), "m"(*addr)
- : "memory");
-#endif
- return (bool) result;
+ return __sync_bool_compare_and_swap (addr, old, new_val);
+}
+
+// Ensure that subsequent instructions do not execute on stale
+// data that was loaded from memory before the barrier.
+// On X86/x86-64, the hardware ensures that reads are properly ordered.
+inline static void
+read_barrier()
+{
+}
+
+// Ensure that prior stores to memory are completed with respect to other
+// processors.
+inline static void
+write_barrier()
+{
+ /* x86-64/X86 does not reorder writes. We just need to ensure that
+ gcc also doesn't. */
+ __asm__ __volatile__(" " : : : "memory");
}
// Set *addr to new_val with release semantics, i.e. making sure
@@ -46,7 +52,7 @@ compare_and_swap(volatile obj_addr_t *addr,
inline static void
release_set(volatile obj_addr_t *addr, obj_addr_t new_val)
{
- __asm__ __volatile__(" " : : : "memory");
+ write_barrier ();
*(addr) = new_val;
}
@@ -60,22 +66,4 @@ compare_and_swap_release(volatile obj_addr_t *addr,
{
return compare_and_swap(addr, old, new_val);
}
-
-// Ensure that subsequent instructions do not execute on stale
-// data that was loaded from memory before the barrier.
-// On X86/x86-64, the hardware ensures that reads are properly ordered.
-inline static void
-read_barrier()
-{
-}
-
-// Ensure that prior stores to memory are completed with respect to other
-// processors.
-inline static void
-write_barrier()
-{
- /* x86-64/X86 does not reorder writes. We just need to ensure that
- gcc also doesn't. */
- __asm__ __volatile__(" " : : : "memory");
-}
#endif