aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJonathan Wakely <jwakely@redhat.com>2023-01-06 18:39:14 +0000
committerJonathan Wakely <jwakely@redhat.com>2023-01-06 20:56:43 +0000
commit61da01772a3dff61cf23ba2ffba33bccb68d1063 (patch)
treefe41530662b6fea6c1819485c2bc3663f126961a
parent5b68fb47bd24abc2e6a65e5617f586d1c1b81d4e (diff)
downloadgcc-61da01772a3dff61cf23ba2ffba33bccb68d1063.zip
gcc-61da01772a3dff61cf23ba2ffba33bccb68d1063.tar.gz
gcc-61da01772a3dff61cf23ba2ffba33bccb68d1063.tar.bz2
libstdc++: Refactor time_zone::_Impl::rules_counter [PR108235]
Abstract the atomic counter used to synchronize access to time_zone infos behind a Lockable class API, and use atomic_signed_lock_free instead of atomic<int_least32_t>, as that should be the most efficient type. (For futex-supporting targets it makes no difference, but might benefit other targets in future.) The new API allows the calling code to be simpler, without needing to repeat the same error prone preprocessor conditions in multiple places. It also allows using template metaprogramming to decide whether to use the atomic or a mutex, which gives us more flexibility than only using preprocessor conditions. That allows us to choose the mutex implementation for targets such as hppa-hp-hpux11.11 where 32-bit atomics are not lock-free and so would introduce an unwanted dependency on libatomic. libstdc++-v3/ChangeLog: PR libstdc++/108235 * src/c++20/tzdb.cc (time_zone::_Impl::RulesCounter): New class template and partial specialization for synchronizing access to time_zone::_Impl::infos. (time_zone::_M_get_sys_info, reload_tzdb): Adjust uses of rules_counter.
-rw-r--r--libstdc++-v3/src/c++20/tzdb.cc137
1 files changed, 84 insertions, 53 deletions
diff --git a/libstdc++-v3/src/c++20/tzdb.cc b/libstdc++-v3/src/c++20/tzdb.cc
index 9103b15..fa4f4c7 100644
--- a/libstdc++-v3/src/c++20/tzdb.cc
+++ b/libstdc++-v3/src/c++20/tzdb.cc
@@ -29,7 +29,7 @@
#include <fstream> // ifstream
#include <sstream> // istringstream
#include <algorithm> // ranges::upper_bound, ranges::lower_bound, ranges::sort
-#include <atomic> // atomic<T*>, atomic<int_least32_t>
+#include <atomic> // atomic<T*>, atomic<int>
#include <memory> // atomic<shared_ptr<T>>
#include <mutex> // mutex
#include <filesystem> // filesystem::read_symlink
@@ -598,13 +598,86 @@ namespace std::chrono
// Needed to access the list of rules for the time zones.
weak_ptr<tzdb_list::_Node> node;
-#ifndef __GTHREADS
- // Don't need synchronization for accessing the infos vector.
-#elif __cpp_lib_atomic_wait
- atomic<int_least32_t> rules_counter;
-#else
- mutex infos_mutex;
+ // In the simple case, we don't actual keep count. No concurrent access
+ // to the infos vector is possible, even if all infos are expanded.
+ template<typename _Tp>
+ struct RulesCounter
+ {
+ // Called for each rule-based ZoneInfo added to the infos vector.
+ // Called when the time_zone::_Impl is created, so no concurrent calls.
+ void increment() { }
+ // Called when a rule-based ZoneInfo is expanded.
+ // The caller must have called lock() for exclusive access to infos.
+ void decrement() { }
+
+ // Use a mutex to synchronize all access to the infos vector.
+ mutex infos_mutex;
+
+ void lock() { infos_mutex.lock(); }
+ void unlock() { infos_mutex.unlock(); }
+ };
+
+#if defined __GTHREADS && __cpp_lib_atomic_wait
+ // Atomic count of unexpanded ZoneInfo objects in the infos vector.
+ // Concurrent access is allowed when all objects have been expanded.
+ // Only use an atomic counter if it would not require libatomic,
+ // because we don't want libstdc++.so to depend on libatomic.
+ template<typename _Tp> requires _Tp::is_always_lock_free
+ struct RulesCounter<_Tp>
+ {
+ atomic_signed_lock_free counter{0};
+
+ void
+ increment()
+ { counter.fetch_add(1, memory_order::relaxed); }
+
+ void
+ decrement()
+ {
+ // The current thread holds the lock, so the counter is negative
+ // and so we need to increment it to decrement it!
+ // If the count reaches zero then there are no more unexpanded infos,
+ // so notify all waiting threads that they can access the infos.
+ // We must do this here, because unlock() is a no-op if counter==0.
+ if (++counter == 0)
+ counter.notify_all();
+ }
+
+ void
+ lock()
+ {
+ // If counter is zero then concurrent access is allowed, so lock()
+ // and unlock() are no-ops and multiple threads can "lock" at once.
+ // If counter is non-zero then the contents of the infos vector might
+ // need to be changed, so only one thread is allowed to access it.
+ for (auto c = counter.load(memory_order::relaxed); c != 0;)
+ {
+ // Setting counter to negative means this thread has the lock.
+ if (c > 0 && counter.compare_exchange_strong(c, -c))
+ return;
+
+ if (c < 0)
+ {
+ // Counter is negative, another thread already has the lock.
+ counter.wait(c);
+ c = counter.load();
+ }
+ }
+ }
+
+ void
+ unlock()
+ {
+ if (auto c = counter.load(memory_order::relaxed); c < 0)
+ {
+ counter.store(-c, memory_order::release);
+ counter.notify_one();
+ }
+ }
+ };
#endif
+
+ RulesCounter<atomic_signed_lock_free> rules_counter;
};
namespace
@@ -666,46 +739,8 @@ namespace std::chrono
const auto node = _M_impl->node.lock();
auto& infos = _M_impl->infos;
-#ifndef __GTHREADS
-#elif __cpp_lib_atomic_wait
// Prevent concurrent access to _M_impl->infos if it might need to change.
- struct Lock
- {
- Lock(atomic<int_least32_t>& counter) : counter(counter)
- {
- // If counter is non-zero then the contents of _M_impl->info might
- // need to be changed, so only one thread is allowed to access it.
- for (auto c = counter.load(memory_order::relaxed); c != 0;)
- {
- // Setting counter to negative means this thread has the lock.
- if (c > 0 && counter.compare_exchange_strong(c, -c))
- return;
-
- if (c < 0)
- {
- // Counter is negative, another thread already has the lock.
- counter.wait(c);
- c = counter.load();
- }
- }
- }
-
- ~Lock()
- {
- if (auto c = counter.load(memory_order::relaxed); c < 0)
- {
- counter.store(-c, memory_order::release);
- counter.notify_one();
- }
- }
-
- atomic<int_least32_t>& counter;
- };
- Lock lock{_M_impl->rules_counter};
-#else
- // Keep it simple, just use a mutex for all access.
- lock_guard<mutex> lock(_M_impl->infos_mutex);
-#endif
+ lock_guard lock(_M_impl->rules_counter);
// Find the transition info for the time point.
auto i = ranges::upper_bound(infos, tp, ranges::less{}, &ZoneInfo::until);
@@ -894,10 +929,8 @@ namespace std::chrono
std::rotate(infos.begin() + result_index + 1, i, infos.end());
// Then replace the original rules_info object with new_infos.front():
infos[result_index] = ZoneInfo(new_infos.front());
-#if defined __GTHREADS && __cpp_lib_atomic_wait
- if (++_M_impl->rules_counter == 0) // No more unexpanded infos.
- _M_impl->rules_counter.notify_all();
-#endif
+ // Decrement count of rule-based infos (might also release lock).
+ _M_impl->rules_counter.decrement();
}
return info;
@@ -1339,11 +1372,9 @@ namespace std::chrono
ZoneInfo& info = impl.infos.emplace_back();
is >> info;
-#if defined __GTHREADS && __cpp_lib_atomic_wait
// Keep count of ZoneInfo objects that refer to named Rules.
if (!info.rules().empty())
- impl.rules_counter.fetch_add(1, memory_order::relaxed);
-#endif
+ impl.rules_counter.increment();
}
}
}