aboutsummaryrefslogtreecommitdiff
path: root/libstdc++-v3
diff options
context:
space:
mode:
authorAndi Kleen <ak@linux.intel.com>2013-01-20 19:03:22 +0000
committerAndi Kleen <ak@gcc.gnu.org>2013-01-20 19:03:22 +0000
commitd76b6ea41791f792e202cde0eb5227cd1de80201 (patch)
tree0b2492617f97e63077e8a77faae69484cc2f7b75 /libstdc++-v3
parentfe6035536ae0297dc80c38b679042265f4810286 (diff)
downloadgcc-d76b6ea41791f792e202cde0eb5227cd1de80201.zip
gcc-d76b6ea41791f792e202cde0eb5227cd1de80201.tar.gz
gcc-d76b6ea41791f792e202cde0eb5227cd1de80201.tar.bz2
libstdc++: Add mem_order_hle_acquire/release to atomic.h v2
The underlying compiler supports additional __ATOMIC_HLE_ACQUIRE/RELEASE memmodel flags for TSX, but this was not exposed to the C++ wrapper. Handle it there. These are additional flags, so some of assert checks need to mask off the flags before checking the memory model type. libstdc++-v3/: 2013-01-12 Andi Kleen <ak@linux.intel.com> Jonathan Wakely <jwakely.gcc@gmail.com> PR libstdc++/55223 * include/bits/atomic_base.h (__memory_order_modifier): Add __memory_order_mask, __memory_order_modifier_mask, __memory_order_hle_acquire, __memory_order_hle_release. (operator|,operator&): Add. (__cmpexch_failure_order): Rename to __cmpexch_failure_order2. (__cmpexch_failure_order): Add. (clear, store, load, compare_exchange_weak, compare_exchange_strong): Handle flags. * testsuite/29_atomics/atomic_flag/test_and_set/explicit-hle.cc: Add. Co-Authored-By: Jonathan Wakely <jwakely.gcc@gmail.com> From-SVN: r195321
Diffstat (limited to 'libstdc++-v3')
-rw-r--r--libstdc++-v3/ChangeLog15
-rw-r--r--libstdc++-v3/include/bits/atomic_base.h142
-rw-r--r--libstdc++-v3/testsuite/29_atomics/atomic_flag/test_and_set/explicit-hle.cc120
3 files changed, 232 insertions, 45 deletions
diff --git a/libstdc++-v3/ChangeLog b/libstdc++-v3/ChangeLog
index e8ccef0..416b7e9 100644
--- a/libstdc++-v3/ChangeLog
+++ b/libstdc++-v3/ChangeLog
@@ -1,3 +1,18 @@
+2013-01-12 Andi Kleen <ak@linux.intel.com>
+ Jonathan Wakely <jwakely.gcc@gmail.com>
+
+ PR libstdc++/55223
+ * include/bits/atomic_base.h (__memory_order_modifier): Add
+ __memory_order_mask, __memory_order_modifier_mask,
+ __memory_order_hle_acquire, __memory_order_hle_release.
+ (operator|,operator&): Add.
+ (__cmpexch_failure_order): Rename to __cmpexch_failure_order2.
+ (__cmpexch_failure_order): Add.
+ (clear, store, load, compare_exchange_weak, compare_exchange_strong):
+ Handle flags.
+ * testsuite/29_atomics/atomic_flag/test_and_set/explicit-hle.cc:
+ Add.
+
2013-01-19 Jonathan Wakely <jwakely.gcc@gmail.com>
PR libstdc++/55861
diff --git a/libstdc++-v3/include/bits/atomic_base.h b/libstdc++-v3/include/bits/atomic_base.h
index 8ce5553..d69bc76 100644
--- a/libstdc++-v3/include/bits/atomic_base.h
+++ b/libstdc++-v3/include/bits/atomic_base.h
@@ -59,14 +59,41 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
memory_order_seq_cst
} memory_order;
+ enum __memory_order_modifier
+ {
+ __memory_order_mask = 0x0ffff,
+ __memory_order_modifier_mask = 0xffff0000,
+ __memory_order_hle_acquire = 0x10000,
+ __memory_order_hle_release = 0x20000
+ };
+
+ constexpr memory_order
+ operator|(memory_order __m, __memory_order_modifier __mod)
+ {
+ return memory_order(__m | int(__mod));
+ }
+
+ constexpr memory_order
+ operator&(memory_order __m, __memory_order_modifier __mod)
+ {
+ return memory_order(__m & int(__mod));
+ }
+
// Drop release ordering as per [atomics.types.operations.req]/21
constexpr memory_order
- __cmpexch_failure_order(memory_order __m) noexcept
+ __cmpexch_failure_order2(memory_order __m) noexcept
{
return __m == memory_order_acq_rel ? memory_order_acquire
: __m == memory_order_release ? memory_order_relaxed : __m;
}
+ constexpr memory_order
+ __cmpexch_failure_order(memory_order __m) noexcept
+ {
+ return memory_order(__cmpexch_failure_order2(__m & __memory_order_mask)
+ | (__m & __memory_order_modifier_mask));
+ }
+
inline void
atomic_thread_fence(memory_order __m) noexcept
{ __atomic_thread_fence(__m); }
@@ -268,9 +295,10 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
void
clear(memory_order __m = memory_order_seq_cst) noexcept
{
- __glibcxx_assert(__m != memory_order_consume);
- __glibcxx_assert(__m != memory_order_acquire);
- __glibcxx_assert(__m != memory_order_acq_rel);
+ memory_order __b = __m & __memory_order_mask;
+ __glibcxx_assert(__b != memory_order_consume);
+ __glibcxx_assert(__b != memory_order_acquire);
+ __glibcxx_assert(__b != memory_order_acq_rel);
__atomic_clear (&_M_i, __m);
}
@@ -278,9 +306,10 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
void
clear(memory_order __m = memory_order_seq_cst) volatile noexcept
{
- __glibcxx_assert(__m != memory_order_consume);
- __glibcxx_assert(__m != memory_order_acquire);
- __glibcxx_assert(__m != memory_order_acq_rel);
+ memory_order __b = __m & __memory_order_mask;
+ __glibcxx_assert(__b != memory_order_consume);
+ __glibcxx_assert(__b != memory_order_acquire);
+ __glibcxx_assert(__b != memory_order_acq_rel);
__atomic_clear (&_M_i, __m);
}
@@ -431,9 +460,10 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
void
store(__int_type __i, memory_order __m = memory_order_seq_cst) noexcept
{
- __glibcxx_assert(__m != memory_order_acquire);
- __glibcxx_assert(__m != memory_order_acq_rel);
- __glibcxx_assert(__m != memory_order_consume);
+ memory_order __b = __m & __memory_order_mask;
+ __glibcxx_assert(__b != memory_order_acquire);
+ __glibcxx_assert(__b != memory_order_acq_rel);
+ __glibcxx_assert(__b != memory_order_consume);
__atomic_store_n(&_M_i, __i, __m);
}
@@ -442,9 +472,10 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
store(__int_type __i,
memory_order __m = memory_order_seq_cst) volatile noexcept
{
- __glibcxx_assert(__m != memory_order_acquire);
- __glibcxx_assert(__m != memory_order_acq_rel);
- __glibcxx_assert(__m != memory_order_consume);
+ memory_order __b = __m & __memory_order_mask;
+ __glibcxx_assert(__b != memory_order_acquire);
+ __glibcxx_assert(__b != memory_order_acq_rel);
+ __glibcxx_assert(__b != memory_order_consume);
__atomic_store_n(&_M_i, __i, __m);
}
@@ -452,8 +483,9 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
__int_type
load(memory_order __m = memory_order_seq_cst) const noexcept
{
- __glibcxx_assert(__m != memory_order_release);
- __glibcxx_assert(__m != memory_order_acq_rel);
+ memory_order __b = __m & __memory_order_mask;
+ __glibcxx_assert(__b != memory_order_release);
+ __glibcxx_assert(__b != memory_order_acq_rel);
return __atomic_load_n(&_M_i, __m);
}
@@ -461,8 +493,9 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
__int_type
load(memory_order __m = memory_order_seq_cst) const volatile noexcept
{
- __glibcxx_assert(__m != memory_order_release);
- __glibcxx_assert(__m != memory_order_acq_rel);
+ memory_order __b = __m & __memory_order_mask;
+ __glibcxx_assert(__b != memory_order_release);
+ __glibcxx_assert(__b != memory_order_acq_rel);
return __atomic_load_n(&_M_i, __m);
}
@@ -486,9 +519,11 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
compare_exchange_weak(__int_type& __i1, __int_type __i2,
memory_order __m1, memory_order __m2) noexcept
{
- __glibcxx_assert(__m2 != memory_order_release);
- __glibcxx_assert(__m2 != memory_order_acq_rel);
- __glibcxx_assert(__m2 <= __m1);
+ memory_order __b2 = __m2 & __memory_order_mask;
+ memory_order __b1 = __m1 & __memory_order_mask;
+ __glibcxx_assert(__b2 != memory_order_release);
+ __glibcxx_assert(__b2 != memory_order_acq_rel);
+ __glibcxx_assert(__b2 <= __b1);
return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1, __m1, __m2);
}
@@ -498,9 +533,11 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
memory_order __m1,
memory_order __m2) volatile noexcept
{
- __glibcxx_assert(__m2 != memory_order_release);
- __glibcxx_assert(__m2 != memory_order_acq_rel);
- __glibcxx_assert(__m2 <= __m1);
+ memory_order __b2 = __m2 & __memory_order_mask;
+ memory_order __b1 = __m1 & __memory_order_mask;
+ __glibcxx_assert(__b2 != memory_order_release);
+ __glibcxx_assert(__b2 != memory_order_acq_rel);
+ __glibcxx_assert(__b2 <= __b1);
return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1, __m1, __m2);
}
@@ -525,9 +562,11 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
compare_exchange_strong(__int_type& __i1, __int_type __i2,
memory_order __m1, memory_order __m2) noexcept
{
- __glibcxx_assert(__m2 != memory_order_release);
- __glibcxx_assert(__m2 != memory_order_acq_rel);
- __glibcxx_assert(__m2 <= __m1);
+ memory_order __b2 = __m2 & __memory_order_mask;
+ memory_order __b1 = __m1 & __memory_order_mask;
+ __glibcxx_assert(__b2 != memory_order_release);
+ __glibcxx_assert(__b2 != memory_order_acq_rel);
+ __glibcxx_assert(__b2 <= __b1);
return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0, __m1, __m2);
}
@@ -537,9 +576,12 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
memory_order __m1,
memory_order __m2) volatile noexcept
{
- __glibcxx_assert(__m2 != memory_order_release);
- __glibcxx_assert(__m2 != memory_order_acq_rel);
- __glibcxx_assert(__m2 <= __m1);
+ memory_order __b2 = __m2 & __memory_order_mask;
+ memory_order __b1 = __m1 & __memory_order_mask;
+
+ __glibcxx_assert(__b2 != memory_order_release);
+ __glibcxx_assert(__b2 != memory_order_acq_rel);
+ __glibcxx_assert(__b2 <= __b1);
return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0, __m1, __m2);
}
@@ -726,9 +768,11 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
store(__pointer_type __p,
memory_order __m = memory_order_seq_cst) noexcept
{
- __glibcxx_assert(__m != memory_order_acquire);
- __glibcxx_assert(__m != memory_order_acq_rel);
- __glibcxx_assert(__m != memory_order_consume);
+ memory_order __b = __m & __memory_order_mask;
+
+ __glibcxx_assert(__b != memory_order_acquire);
+ __glibcxx_assert(__b != memory_order_acq_rel);
+ __glibcxx_assert(__b != memory_order_consume);
__atomic_store_n(&_M_p, __p, __m);
}
@@ -737,9 +781,10 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
store(__pointer_type __p,
memory_order __m = memory_order_seq_cst) volatile noexcept
{
- __glibcxx_assert(__m != memory_order_acquire);
- __glibcxx_assert(__m != memory_order_acq_rel);
- __glibcxx_assert(__m != memory_order_consume);
+ memory_order __b = __m & __memory_order_mask;
+ __glibcxx_assert(__b != memory_order_acquire);
+ __glibcxx_assert(__b != memory_order_acq_rel);
+ __glibcxx_assert(__b != memory_order_consume);
__atomic_store_n(&_M_p, __p, __m);
}
@@ -747,8 +792,9 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
__pointer_type
load(memory_order __m = memory_order_seq_cst) const noexcept
{
- __glibcxx_assert(__m != memory_order_release);
- __glibcxx_assert(__m != memory_order_acq_rel);
+ memory_order __b = __m & __memory_order_mask;
+ __glibcxx_assert(__b != memory_order_release);
+ __glibcxx_assert(__b != memory_order_acq_rel);
return __atomic_load_n(&_M_p, __m);
}
@@ -756,8 +802,9 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
__pointer_type
load(memory_order __m = memory_order_seq_cst) const volatile noexcept
{
- __glibcxx_assert(__m != memory_order_release);
- __glibcxx_assert(__m != memory_order_acq_rel);
+ memory_order __b = __m & __memory_order_mask;
+ __glibcxx_assert(__b != memory_order_release);
+ __glibcxx_assert(__b != memory_order_acq_rel);
return __atomic_load_n(&_M_p, __m);
}
@@ -782,9 +829,11 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
memory_order __m1,
memory_order __m2) noexcept
{
- __glibcxx_assert(__m2 != memory_order_release);
- __glibcxx_assert(__m2 != memory_order_acq_rel);
- __glibcxx_assert(__m2 <= __m1);
+ memory_order __b2 = __m2 & __memory_order_mask;
+ memory_order __b1 = __m1 & __memory_order_mask;
+ __glibcxx_assert(__b2 != memory_order_release);
+ __glibcxx_assert(__b2 != memory_order_acq_rel);
+ __glibcxx_assert(__b2 <= __b1);
return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0, __m1, __m2);
}
@@ -794,9 +843,12 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
memory_order __m1,
memory_order __m2) volatile noexcept
{
- __glibcxx_assert(__m2 != memory_order_release);
- __glibcxx_assert(__m2 != memory_order_acq_rel);
- __glibcxx_assert(__m2 <= __m1);
+ memory_order __b2 = __m2 & __memory_order_mask;
+ memory_order __b1 = __m1 & __memory_order_mask;
+
+ __glibcxx_assert(__b2 != memory_order_release);
+ __glibcxx_assert(__b2 != memory_order_acq_rel);
+ __glibcxx_assert(__b2 <= __b1);
return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0, __m1, __m2);
}
diff --git a/libstdc++-v3/testsuite/29_atomics/atomic_flag/test_and_set/explicit-hle.cc b/libstdc++-v3/testsuite/29_atomics/atomic_flag/test_and_set/explicit-hle.cc
new file mode 100644
index 0000000..916a5e2
--- /dev/null
+++ b/libstdc++-v3/testsuite/29_atomics/atomic_flag/test_and_set/explicit-hle.cc
@@ -0,0 +1,120 @@
+// { dg-options "-std=gnu++0x" }
+// { dg-do compile { target i?86-*-* x86_64-*-* } }
+// { dg-final { scan-assembler-times "\(xacquire\|\.byte.*0xf2\)" 14 } }
+// { dg-final { scan-assembler-times "\(xrelease\|\.byte.*0xf3\)" 14 } }
+
+// Copyright (C) 2008, 2009, 2013 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library. This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License along
+// with this library; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#include <atomic>
+
+#define ACQ memory_order_acquire | __memory_order_hle_acquire
+#define REL memory_order_release | __memory_order_hle_release
+
+int main()
+{
+ unsigned zero, one;
+ using namespace std;
+ atomic_flag af = ATOMIC_FLAG_INIT;
+
+ if (!af.test_and_set(ACQ))
+ af.clear(REL);
+
+ atomic_uint au = ATOMIC_VAR_INIT(0);
+
+ if (au.exchange(1, ACQ))
+ au.store(0, REL);
+
+ if (au.exchange(1, ACQ))
+ au.exchange(0, REL);
+
+ zero = 0;
+ one = 1;
+ if (au.compare_exchange_weak(zero, 1, ACQ, memory_order_consume))
+ au.compare_exchange_weak(one, 0, REL, memory_order_consume);
+
+ zero = 0;
+ one = 1;
+ if (au.compare_exchange_strong(zero, 1, ACQ, memory_order_consume))
+ au.compare_exchange_strong(one, 0, REL, memory_order_consume);
+
+ if (!au.fetch_add(1, ACQ))
+ au.fetch_add(-1, REL);
+
+ if (!au.fetch_sub(1, ACQ))
+ au.fetch_sub(-1, REL);
+
+#if 0 /* broken in underlying target */
+ if (!au.fetch_and(1, ACQ))
+ au.fetch_and(-1, REL);
+
+ if (!au.fetch_or(1, ACQ))
+ au.fetch_or(-1, REL);
+
+ if (!au.fetch_xor(1, ACQ))
+ au.fetch_xor(-1, REL);
+
+ if (!au.fetch_nand(1, ACQ))
+ au.fetch_nand(-1, REL);
+#endif
+
+ volatile atomic_flag vaf = ATOMIC_FLAG_INIT;
+
+ if (!vaf.test_and_set(ACQ))
+ vaf.clear(REL);
+
+ volatile atomic_uint vau = ATOMIC_VAR_INIT(0);
+
+ if (!vau.exchange(1, ACQ))
+ vau.store(0, REL);
+
+ if (!vau.exchange(1, ACQ))
+ vau.exchange(0, REL);
+
+ zero = 0;
+ one = 1;
+ if (vau.compare_exchange_weak(zero, 1, ACQ, memory_order_consume))
+ vau.compare_exchange_weak(one, 0, REL, memory_order_consume);
+
+ zero = 0;
+ one = 1;
+ if (vau.compare_exchange_strong(zero, 1, ACQ, memory_order_consume))
+ vau.compare_exchange_strong(one, 0, REL, memory_order_consume);
+
+ if (!vau.fetch_add(1, ACQ))
+ vau.fetch_add(-1, REL);
+
+ if (!vau.fetch_sub(1, ACQ))
+ vau.fetch_sub(-1, REL);
+
+#if 0 /* broken in underlying target */
+
+ if (!vau.fetch_and(1, ACQ))
+ vau.fetch_and(-1, REL);
+
+ if (!vau.fetch_or(1, ACQ))
+ vau.fetch_or(-1, REL);
+
+ if (!vau.fetch_xor(1, ACQ))
+ vau.fetch_xor(-1, REL);
+
+ if (!vau.fetch_nand(1, ACQ))
+ vau.fetch_nand(-1, REL);
+#endif
+
+ return 0;
+}