aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJonathan Wakely <jwakely@redhat.com>2023-09-11 16:42:54 +0100
committerJonathan Wakely <jwakely@redhat.com>2023-09-11 17:07:24 +0100
commit4a2766ed00a47904dc8b85bf0538aa116d8e658b (patch)
treeb3ebf7823e2c8ce3f2d5f238aa5a318f21548dcd
parentc7db9000fa7caceadb4e72dcc6226abebf7a6239 (diff)
downloadgcc-4a2766ed00a47904dc8b85bf0538aa116d8e658b.zip
gcc-4a2766ed00a47904dc8b85bf0538aa116d8e658b.tar.gz
gcc-4a2766ed00a47904dc8b85bf0538aa116d8e658b.tar.bz2
libstdc++: Remove unconditional use of atomics in Debug Mode
The fix for PR 91910 (r10-3426-gf7a3a382279585) introduced unconditional uses of atomics into src/c++11/debug.cc, which causes linker errors for arm4t where GCC emits an unresolved reference to __sync_synchronize. By making the uses of atomics depend on _GLIBCXX_HAS_GTHREADS we can avoid those unconditional references to __sync_synchronize for targets where the atomics are unnecessary. As a minor performance optimization we can also check the __gnu_cxx::__is_single_threaded function to avoid atomics for single-threaded programs even where they don't cause linker errors. libstdc++-v3/ChangeLog: * src/c++11/debug.cc (acquire_sequence_ptr_for_lock): New function. (reset_sequence_ptr): New function. (_Safe_iterator_base::_M_detach) (_Safe_local_iterator_base::_M_detach): Replace bare atomic_load with acquire_sequence_ptr_for_lock. (_Safe_iterator_base::_M_reset): Replace bare atomic_store with reset_sequence_ptr.
-rw-r--r--libstdc++-v3/src/c++11/debug.cc32
1 files changed, 29 insertions, 3 deletions
diff --git a/libstdc++-v3/src/c++11/debug.cc b/libstdc++-v3/src/c++11/debug.cc
index f40e995..bb0d0db 100644
--- a/libstdc++-v3/src/c++11/debug.cc
+++ b/libstdc++-v3/src/c++11/debug.cc
@@ -24,6 +24,7 @@
#include <bits/move.h>
#include <bits/stl_iterator_base_types.h>
+#include <ext/atomicity.h> // __is_single_threaded
#include <debug/formatter.h>
#include <debug/safe_base.h>
@@ -156,6 +157,31 @@ namespace
__old->_M_reset();
}
}
+
+ void*
+ acquire_sequence_ptr_for_lock(__gnu_debug::_Safe_sequence_base*& seq)
+ {
+#ifdef __GTHREADS
+ if (!__gnu_cxx::__is_single_threaded())
+ return __atomic_load_n(&seq, __ATOMIC_ACQUIRE);
+#endif
+ return seq;
+ }
+
+ void
+ reset_sequence_ptr(__gnu_debug::_Safe_sequence_base*& seq)
+ {
+#ifdef __GTHREADS
+ if (!__gnu_cxx::__is_single_threaded())
+ {
+ __atomic_store_n(&seq, (__gnu_debug::_Safe_sequence_base*)nullptr,
+ __ATOMIC_RELEASE);
+ return;
+ }
+#endif
+ seq = nullptr;
+ }
+
} // anonymous namespace
namespace __gnu_debug
@@ -439,7 +465,7 @@ namespace __gnu_debug
// If the sequence destructor runs between loading the pointer and
// locking the mutex, it will detach this iterator and set _M_sequence
// to null, and then _M_detach_single() will do nothing.
- if (auto seq = __atomic_load_n(&_M_sequence, __ATOMIC_ACQUIRE))
+ if (auto seq = acquire_sequence_ptr_for_lock(_M_sequence))
{
__gnu_cxx::__scoped_lock sentry(get_safe_base_mutex(seq));
_M_detach_single();
@@ -461,7 +487,7 @@ namespace __gnu_debug
_Safe_iterator_base::
_M_reset() throw ()
{
- __atomic_store_n(&_M_sequence, (_Safe_sequence_base*)0, __ATOMIC_RELEASE);
+ reset_sequence_ptr(_M_sequence);
// Do not reset version, so that a detached iterator does not look like a
// value-initialized one.
// _M_version = 0;
@@ -523,7 +549,7 @@ namespace __gnu_debug
_Safe_local_iterator_base::
_M_detach()
{
- if (auto seq = __atomic_load_n(&_M_sequence, __ATOMIC_ACQUIRE))
+ if (auto seq = acquire_sequence_ptr_for_lock(_M_sequence))
{
__gnu_cxx::__scoped_lock sentry(get_safe_base_mutex(seq));
_M_detach_single();