aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJonathan Wakely <jwakely.gcc@gmail.com>2009-06-24 07:06:17 +0000
committerJonathan Wakely <redi@gcc.gnu.org>2009-06-24 08:06:17 +0100
commit27f41cfea599bae1acce055104ccefd715f71c9d (patch)
tree3fc355db48ed7c447465433669872b14a3efdb7a
parentfb6234e0f88b9623472927b15277da9f3c54518e (diff)
downloadgcc-27f41cfea599bae1acce055104ccefd715f71c9d.zip
gcc-27f41cfea599bae1acce055104ccefd715f71c9d.tar.gz
gcc-27f41cfea599bae1acce055104ccefd715f71c9d.tar.bz2
re PR libstdc++/40297 ([C++0x] debug mode vs atomics)
2009-06-24 Jonathan Wakely <jwakely.gcc@gmail.com> PR libstdc++/40297 * include/bits/atomic_0.h: Reverse debug assertions. * include/bits/atomic_2.h: Likewise. From-SVN: r148893
-rw-r--r--libstdc++-v3/ChangeLog6
-rw-r--r--libstdc++-v3/include/bits/atomic_0.h36
-rw-r--r--libstdc++-v3/include/bits/atomic_2.h32
3 files changed, 42 insertions, 32 deletions
diff --git a/libstdc++-v3/ChangeLog b/libstdc++-v3/ChangeLog
index 38f7f79..db7c50f 100644
--- a/libstdc++-v3/ChangeLog
+++ b/libstdc++-v3/ChangeLog
@@ -1,3 +1,9 @@
+2009-06-24 Jonathan Wakely <jwakely.gcc@gmail.com>
+
+ PR libstdc++/40297
+ * include/bits/atomic_0.h: Reverse debug assertions.
+ * include/bits/atomic_2.h: Likewise.
+
2009-06-23 DJ Delorie <dj@redhat.com>
Add MeP port.
diff --git a/libstdc++-v3/include/bits/atomic_0.h b/libstdc++-v3/include/bits/atomic_0.h
index 5d2631d..a493ea6 100644
--- a/libstdc++-v3/include/bits/atomic_0.h
+++ b/libstdc++-v3/include/bits/atomic_0.h
@@ -119,17 +119,17 @@ namespace __atomic0
void
store(void* __v, memory_order __m = memory_order_seq_cst) volatile
{
- __glibcxx_assert(__m == memory_order_acquire);
- __glibcxx_assert(__m == memory_order_acq_rel);
- __glibcxx_assert(__m == memory_order_consume);
+ __glibcxx_assert(__m != memory_order_acquire);
+ __glibcxx_assert(__m != memory_order_acq_rel);
+ __glibcxx_assert(__m != memory_order_consume);
_ATOMIC_STORE_(this, __v, __m);
}
void*
load(memory_order __m = memory_order_seq_cst) const volatile
{
- __glibcxx_assert(__m == memory_order_release);
- __glibcxx_assert(__m == memory_order_acq_rel);
+ __glibcxx_assert(__m != memory_order_release);
+ __glibcxx_assert(__m != memory_order_acq_rel);
return _ATOMIC_LOAD_(this, __m);
}
@@ -141,8 +141,8 @@ namespace __atomic0
compare_exchange_weak(void*& __v1, void* __v2, memory_order __m1,
memory_order __m2) volatile
{
- __glibcxx_assert(__m2 == memory_order_release);
- __glibcxx_assert(__m2 == memory_order_acq_rel);
+ __glibcxx_assert(__m2 != memory_order_release);
+ __glibcxx_assert(__m2 != memory_order_acq_rel);
__glibcxx_assert(__m2 <= __m1);
return _ATOMIC_CMPEXCHNG_(this, &__v1, __v2, __m1);
}
@@ -159,8 +159,8 @@ namespace __atomic0
compare_exchange_strong(void*& __v1, void* __v2, memory_order __m1,
memory_order __m2) volatile
{
- __glibcxx_assert(__m2 == memory_order_release);
- __glibcxx_assert(__m2 == memory_order_acq_rel);
+ __glibcxx_assert(__m2 != memory_order_release);
+ __glibcxx_assert(__m2 != memory_order_acq_rel);
__glibcxx_assert(__m2 <= __m1);
return _ATOMIC_CMPEXCHNG_(this, &__v1, __v2, __m1);
}
@@ -310,17 +310,17 @@ namespace __atomic0
store(__integral_type __i,
memory_order __m = memory_order_seq_cst) volatile
{
- __glibcxx_assert(__m == memory_order_acquire);
- __glibcxx_assert(__m == memory_order_acq_rel);
- __glibcxx_assert(__m == memory_order_consume);
+ __glibcxx_assert(__m != memory_order_acquire);
+ __glibcxx_assert(__m != memory_order_acq_rel);
+ __glibcxx_assert(__m != memory_order_consume);
_ATOMIC_STORE_(this, __i, __m);
}
__integral_type
load(memory_order __m = memory_order_seq_cst) const volatile
{
- __glibcxx_assert(__m == memory_order_release);
- __glibcxx_assert(__m == memory_order_acq_rel);
+ __glibcxx_assert(__m != memory_order_release);
+ __glibcxx_assert(__m != memory_order_acq_rel);
return _ATOMIC_LOAD_(this, __m);
}
@@ -333,8 +333,8 @@ namespace __atomic0
compare_exchange_weak(__integral_type& __i1, __integral_type __i2,
memory_order __m1, memory_order __m2) volatile
{
- __glibcxx_assert(__m2 == memory_order_release);
- __glibcxx_assert(__m2 == memory_order_acq_rel);
+ __glibcxx_assert(__m2 != memory_order_release);
+ __glibcxx_assert(__m2 != memory_order_acq_rel);
__glibcxx_assert(__m2 <= __m1);
return _ATOMIC_CMPEXCHNG_(this, &__i1, __i2, __m1);
}
@@ -351,8 +351,8 @@ namespace __atomic0
compare_exchange_strong(__integral_type& __i1, __integral_type __i2,
memory_order __m1, memory_order __m2) volatile
{
- __glibcxx_assert(__m2 == memory_order_release);
- __glibcxx_assert(__m2 == memory_order_acq_rel);
+ __glibcxx_assert(__m2 != memory_order_release);
+ __glibcxx_assert(__m2 != memory_order_acq_rel);
__glibcxx_assert(__m2 <= __m1);
return _ATOMIC_CMPEXCHNG_(this, &__i1, __i2, __m1);
}
diff --git a/libstdc++-v3/include/bits/atomic_2.h b/libstdc++-v3/include/bits/atomic_2.h
index c172767..8e8e7ff 100644
--- a/libstdc++-v3/include/bits/atomic_2.h
+++ b/libstdc++-v3/include/bits/atomic_2.h
@@ -65,6 +65,10 @@ namespace __atomic2
void
clear(memory_order __m = memory_order_seq_cst) volatile
{
+ __glibcxx_assert(__m != memory_order_consume);
+ __glibcxx_assert(__m != memory_order_acquire);
+ __glibcxx_assert(__m != memory_order_acq_rel);
+
__sync_lock_release(&_M_i);
if (__m != memory_order_acquire && __m != memory_order_acq_rel)
__sync_synchronize();
@@ -93,9 +97,9 @@ namespace __atomic2
void
store(void* __v, memory_order __m = memory_order_seq_cst) volatile
{
- __glibcxx_assert(__m == memory_order_acquire);
- __glibcxx_assert(__m == memory_order_acq_rel);
- __glibcxx_assert(__m == memory_order_consume);
+ __glibcxx_assert(__m != memory_order_acquire);
+ __glibcxx_assert(__m != memory_order_acq_rel);
+ __glibcxx_assert(__m != memory_order_consume);
if (__m == memory_order_relaxed)
_M_i = __v;
@@ -111,8 +115,8 @@ namespace __atomic2
void*
load(memory_order __m = memory_order_seq_cst) const volatile
{
- __glibcxx_assert(__m == memory_order_release);
- __glibcxx_assert(__m == memory_order_acq_rel);
+ __glibcxx_assert(__m != memory_order_release);
+ __glibcxx_assert(__m != memory_order_acq_rel);
__sync_synchronize();
void* __ret = _M_i;
@@ -144,8 +148,8 @@ namespace __atomic2
compare_exchange_strong(void*& __v1, void* __v2, memory_order __m1,
memory_order __m2) volatile
{
- __glibcxx_assert(__m2 == memory_order_release);
- __glibcxx_assert(__m2 == memory_order_acq_rel);
+ __glibcxx_assert(__m2 != memory_order_release);
+ __glibcxx_assert(__m2 != memory_order_acq_rel);
__glibcxx_assert(__m2 <= __m1);
void* __v1o = __v1;
@@ -284,9 +288,9 @@ namespace __atomic2
store(__integral_type __i,
memory_order __m = memory_order_seq_cst) volatile
{
- __glibcxx_assert(__m == memory_order_acquire);
- __glibcxx_assert(__m == memory_order_acq_rel);
- __glibcxx_assert(__m == memory_order_consume);
+ __glibcxx_assert(__m != memory_order_acquire);
+ __glibcxx_assert(__m != memory_order_acq_rel);
+ __glibcxx_assert(__m != memory_order_consume);
if (__m == memory_order_relaxed)
_M_i = __i;
@@ -302,8 +306,8 @@ namespace __atomic2
__integral_type
load(memory_order __m = memory_order_seq_cst) const volatile
{
- __glibcxx_assert(__m == memory_order_release);
- __glibcxx_assert(__m == memory_order_acq_rel);
+ __glibcxx_assert(__m != memory_order_release);
+ __glibcxx_assert(__m != memory_order_acq_rel);
__sync_synchronize();
__integral_type __ret = _M_i;
@@ -336,8 +340,8 @@ namespace __atomic2
compare_exchange_strong(__integral_type& __i1, __integral_type __i2,
memory_order __m1, memory_order __m2) volatile
{
- __glibcxx_assert(__m2 == memory_order_release);
- __glibcxx_assert(__m2 == memory_order_acq_rel);
+ __glibcxx_assert(__m2 != memory_order_release);
+ __glibcxx_assert(__m2 != memory_order_acq_rel);
__glibcxx_assert(__m2 <= __m1);
__integral_type __i1o = __i1;