aboutsummaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
Diffstat (limited to 'gcc')
-rw-r--r--gcc/ChangeLog18
-rw-r--r--gcc/builtins.c5
-rw-r--r--gcc/optabs-query.c19
-rw-r--r--gcc/optabs-query.h1
-rw-r--r--gcc/optabs.c63
-rw-r--r--gcc/testsuite/lib/target-supports.exp21
6 files changed, 86 insertions, 41 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index bb31a8f..594cc3b 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,3 +1,21 @@
+2017-02-01 Torvald Riegel <triegel@redhat.com>
+ Richard Henderson <rth@redhat.com>
+
+ * builtins.c (fold_builtin_atomic_always_lock_free): Make "lock-free"
+ conditional on existance of a fast atomic load.
+ * optabs-query.c (can_atomic_load_p): New function.
+ * optabs-query.h (can_atomic_load_p): Declare it.
+ * optabs.c (expand_atomic_exchange): Always delegate to libatomic if
+ no fast atomic load is available for the particular size of access.
+ (expand_atomic_compare_and_swap): Likewise.
+ (expand_atomic_load): Likewise.
+ (expand_atomic_store): Likewise.
+ (expand_atomic_fetch_op): Likewise.
+ * testsuite/lib/target-supports.exp
+ (check_effective_target_sync_int_128): Remove x86 because it provides
+ no fast atomic load.
+ (check_effective_target_sync_int_128_runtime): Likewise.
+
2017-02-01 Richard Biener <rguenther@suse.de>
* graphite.c: Include tree-vectorizer.h for find_loop_location.
diff --git a/gcc/builtins.c b/gcc/builtins.c
index bf68e31..0a0e8b9 100644
--- a/gcc/builtins.c
+++ b/gcc/builtins.c
@@ -6157,8 +6157,9 @@ fold_builtin_atomic_always_lock_free (tree arg0, tree arg1)
/* Check if a compare_and_swap pattern exists for the mode which represents
the required size. The pattern is not allowed to fail, so the existence
- of the pattern indicates support is present. */
- if (can_compare_and_swap_p (mode, true))
+ of the pattern indicates support is present. Also require that an
+ atomic load exists for the required size. */
+ if (can_compare_and_swap_p (mode, true) && can_atomic_load_p (mode))
return boolean_true_node;
else
return boolean_false_node;
diff --git a/gcc/optabs-query.c b/gcc/optabs-query.c
index 6c34a4e..4899333 100644
--- a/gcc/optabs-query.c
+++ b/gcc/optabs-query.c
@@ -584,6 +584,25 @@ can_atomic_exchange_p (machine_mode mode, bool allow_libcall)
return can_compare_and_swap_p (mode, allow_libcall);
}
+/* Return true if an atomic load can be performed without falling back to
+ a compare-and-swap. */
+
+bool
+can_atomic_load_p (machine_mode mode)
+{
+ enum insn_code icode;
+
+ /* Does the target supports the load directly? */
+ icode = direct_optab_handler (atomic_load_optab, mode);
+ if (icode != CODE_FOR_nothing)
+ return true;
+
+ /* If the size of the object is greater than word size on this target,
+ then we assume that a load will not be atomic. Also see
+ expand_atomic_load. */
+ return GET_MODE_PRECISION (mode) <= BITS_PER_WORD;
+}
+
/* Determine whether "1 << x" is relatively cheap in word_mode. */
bool
diff --git a/gcc/optabs-query.h b/gcc/optabs-query.h
index a80a0e7..e85a7f1 100644
--- a/gcc/optabs-query.h
+++ b/gcc/optabs-query.h
@@ -176,6 +176,7 @@ int can_mult_highpart_p (machine_mode, bool);
bool can_vec_mask_load_store_p (machine_mode, machine_mode, bool);
bool can_compare_and_swap_p (machine_mode, bool);
bool can_atomic_exchange_p (machine_mode, bool);
+bool can_atomic_load_p (machine_mode);
bool lshift_cheap_p (bool);
#endif
diff --git a/gcc/optabs.c b/gcc/optabs.c
index d8831a8..1afd593 100644
--- a/gcc/optabs.c
+++ b/gcc/optabs.c
@@ -6086,8 +6086,15 @@ expand_atomic_test_and_set (rtx target, rtx mem, enum memmodel model)
rtx
expand_atomic_exchange (rtx target, rtx mem, rtx val, enum memmodel model)
{
+ machine_mode mode = GET_MODE (mem);
rtx ret;
+ /* If loads are not atomic for the required size and we are not called to
+ provide a __sync builtin, do not do anything so that we stay consistent
+ with atomic loads of the same size. */
+ if (!can_atomic_load_p (mode) && !is_mm_sync (model))
+ return NULL_RTX;
+
ret = maybe_emit_atomic_exchange (target, mem, val, model);
/* Next try a compare-and-swap loop for the exchange. */
@@ -6121,6 +6128,12 @@ expand_atomic_compare_and_swap (rtx *ptarget_bool, rtx *ptarget_oval,
rtx target_oval, target_bool = NULL_RTX;
rtx libfunc;
+ /* If loads are not atomic for the required size and we are not called to
+ provide a __sync builtin, do not do anything so that we stay consistent
+ with atomic loads of the same size. */
+ if (!can_atomic_load_p (mode) && !is_mm_sync (succ_model))
+ return false;
+
/* Load expected into a register for the compare and swap. */
if (MEM_P (expected))
expected = copy_to_reg (expected);
@@ -6316,19 +6329,13 @@ expand_atomic_load (rtx target, rtx mem, enum memmodel model)
}
/* If the size of the object is greater than word size on this target,
- then we assume that a load will not be atomic. */
+ then we assume that a load will not be atomic. We could try to
+ emulate a load with a compare-and-swap operation, but the store that
+ doing this could result in would be incorrect if this is a volatile
+ atomic load or targetting read-only-mapped memory. */
if (GET_MODE_PRECISION (mode) > BITS_PER_WORD)
- {
- /* Issue val = compare_and_swap (mem, 0, 0).
- This may cause the occasional harmless store of 0 when the value is
- already 0, but it seems to be OK according to the standards guys. */
- if (expand_atomic_compare_and_swap (NULL, &target, mem, const0_rtx,
- const0_rtx, false, model, model))
- return target;
- else
- /* Otherwise there is no atomic load, leave the library call. */
- return NULL_RTX;
- }
+ /* If there is no atomic load, leave the library call. */
+ return NULL_RTX;
/* Otherwise assume loads are atomic, and emit the proper barriers. */
if (!target || target == const0_rtx)
@@ -6370,7 +6377,9 @@ expand_atomic_store (rtx mem, rtx val, enum memmodel model, bool use_release)
return const0_rtx;
}
- /* If using __sync_lock_release is a viable alternative, try it. */
+ /* If using __sync_lock_release is a viable alternative, try it.
+ Note that this will not be set to true if we are expanding a generic
+ __atomic_store_n. */
if (use_release)
{
icode = direct_optab_handler (sync_lock_release_optab, mode);
@@ -6389,16 +6398,22 @@ expand_atomic_store (rtx mem, rtx val, enum memmodel model, bool use_release)
}
/* If the size of the object is greater than word size on this target,
- a default store will not be atomic, Try a mem_exchange and throw away
- the result. If that doesn't work, don't do anything. */
+ a default store will not be atomic. */
if (GET_MODE_PRECISION (mode) > BITS_PER_WORD)
{
- rtx target = maybe_emit_atomic_exchange (NULL_RTX, mem, val, model);
- if (!target)
- target = maybe_emit_compare_and_swap_exchange_loop (NULL_RTX, mem, val);
- if (target)
- return const0_rtx;
- else
+ /* If loads are atomic or we are called to provide a __sync builtin,
+ we can try a atomic_exchange and throw away the result. Otherwise,
+ don't do anything so that we do not create an inconsistency between
+ loads and stores. */
+ if (can_atomic_load_p (mode) || is_mm_sync (model))
+ {
+ rtx target = maybe_emit_atomic_exchange (NULL_RTX, mem, val, model);
+ if (!target)
+ target = maybe_emit_compare_and_swap_exchange_loop (NULL_RTX, mem,
+ val);
+ if (target)
+ return const0_rtx;
+ }
return NULL_RTX;
}
@@ -6713,6 +6728,12 @@ expand_atomic_fetch_op (rtx target, rtx mem, rtx val, enum rtx_code code,
rtx result;
bool unused_result = (target == const0_rtx);
+ /* If loads are not atomic for the required size and we are not called to
+ provide a __sync builtin, do not do anything so that we stay consistent
+ with atomic loads of the same size. */
+ if (!can_atomic_load_p (mode) && !is_mm_sync (model))
+ return NULL_RTX;
+
result = expand_atomic_fetch_op_no_fallback (target, mem, val, code, model,
after);
diff --git a/gcc/testsuite/lib/target-supports.exp b/gcc/testsuite/lib/target-supports.exp
index 95a1c50..7a26008 100644
--- a/gcc/testsuite/lib/target-supports.exp
+++ b/gcc/testsuite/lib/target-supports.exp
@@ -6514,9 +6514,7 @@ proc check_effective_target_section_anchors { } {
# Return 1 if the target supports atomic operations on "int_128" values.
proc check_effective_target_sync_int_128 { } {
- if { (([istarget i?86-*-*] || [istarget x86_64-*-*])
- && ![is-effective-target ia32])
- || [istarget spu-*-*] } {
+ if { [istarget spu-*-*] } {
return 1
} else {
return 0
@@ -6525,23 +6523,10 @@ proc check_effective_target_sync_int_128 { } {
# Return 1 if the target supports atomic operations on "int_128" values
# and can execute them.
+# This requires support for both compare-and-swap and true atomic loads.
proc check_effective_target_sync_int_128_runtime { } {
- if { (([istarget i?86-*-*] || [istarget x86_64-*-*])
- && ![is-effective-target ia32]
- && [check_cached_effective_target sync_int_128_available {
- check_runtime_nocache sync_int_128_available {
- #include "cpuid.h"
- int main ()
- {
- unsigned int eax, ebx, ecx, edx;
- if (__get_cpuid (1, &eax, &ebx, &ecx, &edx))
- return !(ecx & bit_CMPXCHG16B);
- return 1;
- }
- } ""
- }])
- || [istarget spu-*-*] } {
+ if { [istarget spu-*-*] } {
return 1
} else {
return 0