aboutsummaryrefslogtreecommitdiff
path: root/libitm/config/posix
diff options
context:
space:
mode:
authorRichard Henderson <rth@redhat.com>2011-12-13 11:11:25 -0800
committerRichard Henderson <rth@gcc.gnu.org>2011-12-13 11:11:25 -0800
commit36cfbee133027429a681ce585643d38228ab1213 (patch)
tree7506d65c4a5b0a1a5cd4450e48e4943360f9ab19 /libitm/config/posix
parentc36cc670b57fa6ebfcc387732fb7e34b7881eb14 (diff)
downloadgcc-36cfbee133027429a681ce585643d38228ab1213.zip
gcc-36cfbee133027429a681ce585643d38228ab1213.tar.gz
gcc-36cfbee133027429a681ce585643d38228ab1213.tar.bz2
libitm: Conversion to c++11 atomics.
* local_atomic: New file. * libitm_i.h: Include it. (gtm_thread::shared_state): Use atomic template. * beginend.cc (GTM::gtm_clock): Use atomic template. (global_tid): Use atomic template if 64-bit atomics available. (gtm_thread::gtm_thread): Update shared_state access. (gtm_thread::trycommit): Likewise. (choose_code_path): Update global_tid access. * method-gl.cc (gl_mg::orec): Use atomic template. Update all users. * stmlock.h (GTM::gtm_clock): Use atomic template. (gtm_get_clock, gtm_inc_clock): Update accesses. * config/linux/rwlock.cc (gtm_rwlock::read_lock): Remove redundant __sync_synchronize after atomic shared_state access. * config/posix/rwlock.cc (gtm_rwlock::read_lock): Likewise. (gtm_rwlock::write_lock_generic): Likewise. (gtm_rwlock::read_unlock): Likewise. * config/alpha/target.h (atomic_read_barrier): Remove. (atomic_write_barrier): Remove. * config/x86/target.h (atomic_read_barrier): Remove. (atomic_write_barrier): Remove. From-SVN: r182294
Diffstat (limited to 'libitm/config/posix')
-rw-r--r--libitm/config/posix/rwlock.cc18
1 files changed, 8 insertions, 10 deletions
diff --git a/libitm/config/posix/rwlock.cc b/libitm/config/posix/rwlock.cc
index f379383..e1e3dcf 100644
--- a/libitm/config/posix/rwlock.cc
+++ b/libitm/config/posix/rwlock.cc
@@ -53,10 +53,9 @@ void
gtm_rwlock::read_lock (gtm_thread *tx)
{
// Fast path: first announce our intent to read, then check for conflicting
- // intents to write. The barrier makes sure that this happens in exactly
- // this order.
+ // intents to write. Note that direct assignment to an atomic object
+ // is memory_order_seq_cst.
tx->shared_state = 0;
- __sync_synchronize();
unsigned int sum = this->summary;
if (likely(!(sum & (a_writer | w_writer))))
return;
@@ -69,7 +68,7 @@ gtm_rwlock::read_lock (gtm_thread *tx)
// to happen before we leave the slow path and before we wait for any
// writer).
// ??? Add a barrier to enforce early visibility of this?
- tx->shared_state = ~(typeof tx->shared_state)0;
+ tx->shared_state.store(-1, memory_order_relaxed);
pthread_mutex_lock (&this->mutex);
@@ -101,7 +100,7 @@ gtm_rwlock::read_lock (gtm_thread *tx)
}
// Otherwise we can acquire the lock for read.
- tx->shared_state = 0;
+ tx->shared_state.store(0, memory_order_relaxed);
pthread_mutex_unlock(&this->mutex);
}
@@ -153,11 +152,11 @@ gtm_rwlock::write_lock_generic (gtm_thread *tx)
// sure that we first set our write intent and check for active readers
// after that, in strictly this order (similar to the barrier in the fast
// path of read_lock()).
- __sync_synchronize();
+ atomic_thread_fence(memory_order_acq_rel);
// If this is an upgrade, we are not a reader anymore.
if (tx != 0)
- tx->shared_state = ~(typeof tx->shared_state)0;
+ tx->shared_state.store(-1, memory_order_relaxed);
// Count the number of active readers to be able to decrease the number of
// wake-ups and wait calls that are necessary.
@@ -194,7 +193,7 @@ gtm_rwlock::write_lock_generic (gtm_thread *tx)
it = it->next_thread)
{
// Don't count ourself if this is an upgrade.
- if (it->shared_state != ~(typeof it->shared_state)0)
+ if (it->shared_state.load(memory_order_relaxed) != -1)
readers++;
}
@@ -236,8 +235,7 @@ gtm_rwlock::write_upgrade (gtm_thread *tx)
void
gtm_rwlock::read_unlock (gtm_thread *tx)
{
- tx->shared_state = ~(typeof tx->shared_state)0;
- __sync_synchronize();
+ tx->shared_state = -1;
unsigned int sum = this->summary;
if (likely(!(sum & (a_writer | w_writer))))
return;