aboutsummaryrefslogtreecommitdiff
path: root/libstdc++-v3/testsuite/29_atomics
diff options
context:
space:
mode:
authorAndi Kleen <ak@linux.intel.com>2013-01-20 19:03:22 +0000
committerAndi Kleen <ak@gcc.gnu.org>2013-01-20 19:03:22 +0000
commitd76b6ea41791f792e202cde0eb5227cd1de80201 (patch)
tree0b2492617f97e63077e8a77faae69484cc2f7b75 /libstdc++-v3/testsuite/29_atomics
parentfe6035536ae0297dc80c38b679042265f4810286 (diff)
downloadgcc-d76b6ea41791f792e202cde0eb5227cd1de80201.zip
gcc-d76b6ea41791f792e202cde0eb5227cd1de80201.tar.gz
gcc-d76b6ea41791f792e202cde0eb5227cd1de80201.tar.bz2
libstdc++: Add mem_order_hle_acquire/release to atomic.h v2
The underlying compiler supports additional __ATOMIC_HLE_ACQUIRE/RELEASE memmodel flags for TSX, but this was not exposed to the C++ wrapper. Handle it there. These are additional flags, so some of assert checks need to mask off the flags before checking the memory model type. libstdc++-v3/: 2013-01-12 Andi Kleen <ak@linux.intel.com> Jonathan Wakely <jwakely.gcc@gmail.com> PR libstdc++/55223 * include/bits/atomic_base.h (__memory_order_modifier): Add __memory_order_mask, __memory_order_modifier_mask, __memory_order_hle_acquire, __memory_order_hle_release. (operator|,operator&): Add. (__cmpexch_failure_order): Rename to __cmpexch_failure_order2. (__cmpexch_failure_order): Add. (clear, store, load, compare_exchange_weak, compare_exchange_strong): Handle flags. * testsuite/29_atomics/atomic_flag/test_and_set/explicit-hle.cc: Add. Co-Authored-By: Jonathan Wakely <jwakely.gcc@gmail.com> From-SVN: r195321
Diffstat (limited to 'libstdc++-v3/testsuite/29_atomics')
-rw-r--r--libstdc++-v3/testsuite/29_atomics/atomic_flag/test_and_set/explicit-hle.cc120
1 files changed, 120 insertions, 0 deletions
diff --git a/libstdc++-v3/testsuite/29_atomics/atomic_flag/test_and_set/explicit-hle.cc b/libstdc++-v3/testsuite/29_atomics/atomic_flag/test_and_set/explicit-hle.cc
new file mode 100644
index 0000000..916a5e2
--- /dev/null
+++ b/libstdc++-v3/testsuite/29_atomics/atomic_flag/test_and_set/explicit-hle.cc
@@ -0,0 +1,120 @@
+// { dg-options "-std=gnu++0x" }
+// { dg-do compile { target i?86-*-* x86_64-*-* } }
+// { dg-final { scan-assembler-times "\(xacquire\|\.byte.*0xf2\)" 14 } }
+// { dg-final { scan-assembler-times "\(xrelease\|\.byte.*0xf3\)" 14 } }
+
+// Copyright (C) 2008, 2009, 2013 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library. This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License along
+// with this library; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#include <atomic>
+
+#define ACQ memory_order_acquire | __memory_order_hle_acquire
+#define REL memory_order_release | __memory_order_hle_release
+
+int main()
+{
+ unsigned zero, one;
+ using namespace std;
+ atomic_flag af = ATOMIC_FLAG_INIT;
+
+ if (!af.test_and_set(ACQ))
+ af.clear(REL);
+
+ atomic_uint au = ATOMIC_VAR_INIT(0);
+
+ if (au.exchange(1, ACQ))
+ au.store(0, REL);
+
+ if (au.exchange(1, ACQ))
+ au.exchange(0, REL);
+
+ zero = 0;
+ one = 1;
+ if (au.compare_exchange_weak(zero, 1, ACQ, memory_order_consume))
+ au.compare_exchange_weak(one, 0, REL, memory_order_consume);
+
+ zero = 0;
+ one = 1;
+ if (au.compare_exchange_strong(zero, 1, ACQ, memory_order_consume))
+ au.compare_exchange_strong(one, 0, REL, memory_order_consume);
+
+ if (!au.fetch_add(1, ACQ))
+ au.fetch_add(-1, REL);
+
+ if (!au.fetch_sub(1, ACQ))
+ au.fetch_sub(-1, REL);
+
+#if 0 /* broken in underlying target */
+ if (!au.fetch_and(1, ACQ))
+ au.fetch_and(-1, REL);
+
+ if (!au.fetch_or(1, ACQ))
+ au.fetch_or(-1, REL);
+
+ if (!au.fetch_xor(1, ACQ))
+ au.fetch_xor(-1, REL);
+
+ if (!au.fetch_nand(1, ACQ))
+ au.fetch_nand(-1, REL);
+#endif
+
+ volatile atomic_flag vaf = ATOMIC_FLAG_INIT;
+
+ if (!vaf.test_and_set(ACQ))
+ vaf.clear(REL);
+
+ volatile atomic_uint vau = ATOMIC_VAR_INIT(0);
+
+ if (!vau.exchange(1, ACQ))
+ vau.store(0, REL);
+
+ if (!vau.exchange(1, ACQ))
+ vau.exchange(0, REL);
+
+ zero = 0;
+ one = 1;
+ if (vau.compare_exchange_weak(zero, 1, ACQ, memory_order_consume))
+ vau.compare_exchange_weak(one, 0, REL, memory_order_consume);
+
+ zero = 0;
+ one = 1;
+ if (vau.compare_exchange_strong(zero, 1, ACQ, memory_order_consume))
+ vau.compare_exchange_strong(one, 0, REL, memory_order_consume);
+
+ if (!vau.fetch_add(1, ACQ))
+ vau.fetch_add(-1, REL);
+
+ if (!vau.fetch_sub(1, ACQ))
+ vau.fetch_sub(-1, REL);
+
+#if 0 /* broken in underlying target */
+
+ if (!vau.fetch_and(1, ACQ))
+ vau.fetch_and(-1, REL);
+
+ if (!vau.fetch_or(1, ACQ))
+ vau.fetch_or(-1, REL);
+
+ if (!vau.fetch_xor(1, ACQ))
+ vau.fetch_xor(-1, REL);
+
+ if (!vau.fetch_nand(1, ACQ))
+ vau.fetch_nand(-1, REL);
+#endif
+
+ return 0;
+}