aboutsummaryrefslogtreecommitdiff
path: root/gcc/cp/decl2.c
diff options
context:
space:
mode:
authorBernd Edlinger <bernd.edlinger@hotmail.de>2020-12-01 18:54:48 +0100
committerBernd Edlinger <bernd.edlinger@hotmail.de>2020-12-08 21:07:02 +0100
commit447f99b3b8baabbfb33b29123113637e54c5e652 (patch)
tree59eb5f7abb8bd21cd8d2eed6f71c55cc657070ba /gcc/cp/decl2.c
parentf7251a2c103bc48775cb9726a4bebeaebde96684 (diff)
downloadgcc-447f99b3b8baabbfb33b29123113637e54c5e652.zip
gcc-447f99b3b8baabbfb33b29123113637e54c5e652.tar.gz
gcc-447f99b3b8baabbfb33b29123113637e54c5e652.tar.bz2
Avoid atomic for guard acquire when that is expensive
When the atomic access involves a call to __sync_synchronize it is better to call __cxa_guard_acquire unconditionally, since it handles the atomics too, or is a non-threaded implementation when there is no gthread support for this target. This fixes also a bug for the ARM EABI big-endian target, that is, previously the wrong bit was checked. 2020-12-08 Bernd Edlinger <bernd.edlinger@hotmail.de> * decl2.c: (is_atomic_expensive_p): New helper function. (build_atomic_load_byte): Rename to... (build_atomic_load_type): ... and add new parameter type. (get_guard_cond): Skip the atomic here if that is expensive. Use the correct type for the atomic load on certain targets.
Diffstat (limited to 'gcc/cp/decl2.c')
-rw-r--r--gcc/cp/decl2.c33
1 files changed, 29 insertions, 4 deletions
diff --git a/gcc/cp/decl2.c b/gcc/cp/decl2.c
index e713033..eec0c92 100644
--- a/gcc/cp/decl2.c
+++ b/gcc/cp/decl2.c
@@ -48,6 +48,7 @@ along with GCC; see the file COPYING3. If not see
#include "intl.h"
#include "c-family/c-ada-spec.h"
#include "asan.h"
+#include "optabs-query.h"
/* Id for dumping the raw trees. */
int raw_dump_id;
@@ -3297,18 +3298,34 @@ get_guard (tree decl)
return guard;
}
+/* Returns true if accessing the GUARD atomic is expensive,
+ i.e. involves a call to __sync_synchronize or similar.
+ In this case let __cxa_guard_acquire handle the atomics. */
+
+static bool
+is_atomic_expensive_p (machine_mode mode)
+{
+ if (!flag_inline_atomics)
+ return true;
+
+ if (!can_compare_and_swap_p (mode, false) || !can_atomic_load_p (mode))
+ return true;
+
+ return false;
+}
+
/* Return an atomic load of src with the appropriate memory model. */
static tree
-build_atomic_load_byte (tree src, HOST_WIDE_INT model)
+build_atomic_load_type (tree src, HOST_WIDE_INT model, tree type)
{
- tree ptr_type = build_pointer_type (char_type_node);
+ tree ptr_type = build_pointer_type (type);
tree mem_model = build_int_cst (integer_type_node, model);
tree t, addr, val;
unsigned int size;
int fncode;
- size = tree_to_uhwi (TYPE_SIZE_UNIT (char_type_node));
+ size = tree_to_uhwi (TYPE_SIZE_UNIT (type));
fncode = BUILT_IN_ATOMIC_LOAD_N + exact_log2 (size) + 1;
t = builtin_decl_implicit ((enum built_in_function) fncode);
@@ -3351,7 +3368,15 @@ get_guard_cond (tree guard, bool thread_safe)
if (!thread_safe)
guard = get_guard_bits (guard);
else
- guard = build_atomic_load_byte (guard, MEMMODEL_ACQUIRE);
+ {
+ tree type = targetm.cxx.guard_mask_bit ()
+ ? TREE_TYPE (guard) : char_type_node;
+
+ if (is_atomic_expensive_p (TYPE_MODE (type)))
+ guard = integer_zero_node;
+ else
+ guard = build_atomic_load_type (guard, MEMMODEL_ACQUIRE, type);
+ }
/* Mask off all but the low bit. */
if (targetm.cxx.guard_mask_bit ())