aboutsummaryrefslogtreecommitdiff
path: root/gcc/testsuite/lib
diff options
context:
space:
mode:
authorTorvald Riegel <torvald@gcc.gnu.org>2017-02-01 17:21:59 +0000
committerTorvald Riegel <torvald@gcc.gnu.org>2017-02-01 17:21:59 +0000
commit969a32ce9354585f5f2b89df2e025f52eb0e1644 (patch)
treeba5dc4787f7d4f9d23224810508207f4fcc188dc /gcc/testsuite/lib
parent55e75c7c6bcfe386d0ecbf4611cff81040af00b3 (diff)
downloadgcc-969a32ce9354585f5f2b89df2e025f52eb0e1644.zip
gcc-969a32ce9354585f5f2b89df2e025f52eb0e1644.tar.gz
gcc-969a32ce9354585f5f2b89df2e025f52eb0e1644.tar.bz2
Fix __atomic to not implement atomic loads with CAS.
gcc/ * builtins.c (fold_builtin_atomic_always_lock_free): Make "lock-free" conditional on existance of a fast atomic load. * optabs-query.c (can_atomic_load_p): New function. * optabs-query.h (can_atomic_load_p): Declare it. * optabs.c (expand_atomic_exchange): Always delegate to libatomic if no fast atomic load is available for the particular size of access. (expand_atomic_compare_and_swap): Likewise. (expand_atomic_load): Likewise. (expand_atomic_store): Likewise. (expand_atomic_fetch_op): Likewise. * testsuite/lib/target-supports.exp (check_effective_target_sync_int_128): Remove x86 because it provides no fast atomic load. (check_effective_target_sync_int_128_runtime): Likewise. libatomic/ * acinclude.m4: Add #define FAST_ATOMIC_LDST_*. * auto-config.h.in: Regenerate. * config/x86/host-config.h (FAST_ATOMIC_LDST_16): Define to 0. (atomic_compare_exchange_n): New. * glfree.c (EXACT, LARGER): Change condition and add comments. From-SVN: r245098
Diffstat (limited to 'gcc/testsuite/lib')
-rw-r--r--gcc/testsuite/lib/target-supports.exp21
1 files changed, 3 insertions, 18 deletions
diff --git a/gcc/testsuite/lib/target-supports.exp b/gcc/testsuite/lib/target-supports.exp
index 95a1c50..7a26008 100644
--- a/gcc/testsuite/lib/target-supports.exp
+++ b/gcc/testsuite/lib/target-supports.exp
@@ -6514,9 +6514,7 @@ proc check_effective_target_section_anchors { } {
# Return 1 if the target supports atomic operations on "int_128" values.
proc check_effective_target_sync_int_128 { } {
- if { (([istarget i?86-*-*] || [istarget x86_64-*-*])
- && ![is-effective-target ia32])
- || [istarget spu-*-*] } {
+ if { [istarget spu-*-*] } {
return 1
} else {
return 0
@@ -6525,23 +6523,10 @@ proc check_effective_target_sync_int_128 { } {
# Return 1 if the target supports atomic operations on "int_128" values
# and can execute them.
+# This requires support for both compare-and-swap and true atomic loads.
proc check_effective_target_sync_int_128_runtime { } {
- if { (([istarget i?86-*-*] || [istarget x86_64-*-*])
- && ![is-effective-target ia32]
- && [check_cached_effective_target sync_int_128_available {
- check_runtime_nocache sync_int_128_available {
- #include "cpuid.h"
- int main ()
- {
- unsigned int eax, ebx, ecx, edx;
- if (__get_cpuid (1, &eax, &ebx, &ecx, &edx))
- return !(ecx & bit_CMPXCHG16B);
- return 1;
- }
- } ""
- }])
- || [istarget spu-*-*] } {
+ if { [istarget spu-*-*] } {
return 1
} else {
return 0