aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Crowe <mac@mcrowe.com>2020-09-11 14:25:00 +0100
committerJonathan Wakely <jwakely@redhat.com>2020-09-11 14:25:00 +0100
commit01d412ef36f56c6961858f4d3d395d000e3f1c06 (patch)
tree697e8fb22e6e0d068d0503c2e09fc2eba98b7446
parent5bad23ceec0bfc9fea7c3da10b187366052de369 (diff)
downloadgcc-01d412ef36f56c6961858f4d3d395d000e3f1c06.zip
gcc-01d412ef36f56c6961858f4d3d395d000e3f1c06.tar.gz
gcc-01d412ef36f56c6961858f4d3d395d000e3f1c06.tar.bz2
libstdc++: Support futex waiting on chrono::steady_clock directly
The user-visible effect of this change is for std::future::wait_until to use CLOCK_MONOTONIC when passed a timeout of std::chrono::steady_clock type. This makes it immune to any changes made to the system clock CLOCK_REALTIME. Add an overload of __atomic_futex_unsigned::_M_load_and_text_until_impl that accepts a std::chrono::steady_clock, and correctly passes this through to __atomic_futex_unsigned_base::_M_futex_wait_until_steady which uses CLOCK_MONOTONIC for the timeout within the futex system call. These functions are mostly just copies of the std::chrono::system_clock versions with small tweaks. Prior to this commit, a std::chrono::steady timeout would be converted via std::chrono::system_clock which risks reducing or increasing the timeout if someone changes CLOCK_REALTIME whilst the wait is happening. (The commit immediately prior to this one increases the window of opportunity for that from a short period during the calculation of a relative timeout, to the entire duration of the wait.) FUTEX_WAIT_BITSET was added in kernel v2.6.25. If futex reports ENOSYS to indicate that this operation is not supported then the code falls back to using clock_gettime(2) to calculate a relative time to wait for. I believe that I've added this functionality in a way that it doesn't break ABI compatibility, but that has made it more verbose and less type safe. I believe that it would be better to maintain the timeout as an instance of the correct clock type all the way down to a single _M_futex_wait_until function with an overload for each clock. The current scheme of separating out the seconds and nanoseconds early risks accidentally calling the wait function for the wrong clock. Unfortunately, doing this would break code that compiled against the old header. libstdc++-v3/ChangeLog: * config/abi/pre/gnu.ver: Update for addition of __atomic_futex_unsigned_base::_M_futex_wait_until_steady. * include/bits/atomic_futex.h (__atomic_futex_unsigned_base): Add comments to clarify that _M_futex_wait_until and _M_load_and_test_until use CLOCK_REALTIME. (__atomic_futex_unsigned_base::_M_futex_wait_until_steady) (__atomic_futex_unsigned_base::_M_load_and_text_until_steady): New member functions that use CLOCK_MONOTONIC. (__atomic_futex_unsigned_base::_M_load_and_test_until_impl) (__atomic_futex_unsigned_base::_M_load_when_equal_until): Add overloads that accept a steady_clock time_point and use the new member functions. * src/c++11/futex.cc: Include headers required for clock_gettime. (futex_clock_monotonic_flag): New constant to tell futex to use CLOCK_MONOTONIC to match existing futex_clock_realtime_flag. (futex_clock_monotonic_unavailable): New global to store the result of trying to use CLOCK_MONOTONIC. (__atomic_futex_unsigned_base::_M_futex_wait_until_steady): Add new variant of _M_futex_wait_until that uses CLOCK_MONOTONIC to support waiting using steady_clock.
-rw-r--r--libstdc++-v3/config/abi/pre/gnu.ver10
-rw-r--r--libstdc++-v3/include/bits/atomic_futex.h67
-rw-r--r--libstdc++-v3/src/c++11/futex.cc82
3 files changed, 154 insertions, 5 deletions
diff --git a/libstdc++-v3/config/abi/pre/gnu.ver b/libstdc++-v3/config/abi/pre/gnu.ver
index 5ffe9d9..87a48a2 100644
--- a/libstdc++-v3/config/abi/pre/gnu.ver
+++ b/libstdc++-v3/config/abi/pre/gnu.ver
@@ -1922,10 +1922,9 @@ GLIBCXX_3.4.21 {
_ZNSt7codecvtID[is]c*;
_ZT[ISV]St7codecvtID[is]c*E;
- extern "C++"
- {
- std::__atomic_futex_unsigned_base*;
- };
+ # std::__atomic_futex_unsigned_base members
+ _ZNSt28__atomic_futex_unsigned_base19_M_futex_notify_all*;
+ _ZNSt28__atomic_futex_unsigned_base19_M_futex_wait_until*;
# codecvt_utf8 etc.
_ZNKSt19__codecvt_utf8_base*;
@@ -2320,6 +2319,9 @@ GLIBCXX_3.4.29 {
_ZNSbIwSt11char_traitsIwESaIwEE7reserveEv;
_ZNSt7__cxx1112basic_stringI[cw]St11char_traitsI[cw]ESaI[cw]EE7reserveEv;
+ # std::__atomic_futex_unsigned_base::_M_futex_wait_until_steady
+ _ZNSt28__atomic_futex_unsigned_base26_M_futex_wait_until_steady*;
+
} GLIBCXX_3.4.28;
# Symbols in the support library (libsupc++) have their own tag.
diff --git a/libstdc++-v3/include/bits/atomic_futex.h b/libstdc++-v3/include/bits/atomic_futex.h
index 886fc63..507c5c9 100644
--- a/libstdc++-v3/include/bits/atomic_futex.h
+++ b/libstdc++-v3/include/bits/atomic_futex.h
@@ -52,11 +52,18 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
#if defined(_GLIBCXX_HAVE_LINUX_FUTEX) && ATOMIC_INT_LOCK_FREE > 1
struct __atomic_futex_unsigned_base
{
- // Returns false iff a timeout occurred.
+ // __s and __ns are measured against CLOCK_REALTIME. Returns false
+ // iff a timeout occurred.
bool
_M_futex_wait_until(unsigned *__addr, unsigned __val, bool __has_timeout,
chrono::seconds __s, chrono::nanoseconds __ns);
+ // __s and __ns are measured against CLOCK_MONOTONIC. Returns
+ // false iff a timeout occurred.
+ bool
+ _M_futex_wait_until_steady(unsigned *__addr, unsigned __val,
+ bool __has_timeout, chrono::seconds __s, chrono::nanoseconds __ns);
+
// This can be executed after the object has been destroyed.
static void _M_futex_notify_all(unsigned* __addr);
};
@@ -86,6 +93,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
// value if equal is false.
// The assumed value is the caller's assumption about the current value
// when making the call.
+ // __s and __ns are measured against CLOCK_REALTIME.
unsigned
_M_load_and_test_until(unsigned __assumed, unsigned __operand,
bool __equal, memory_order __mo, bool __has_timeout,
@@ -110,6 +118,36 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
}
}
+ // If a timeout occurs, returns a current value after the timeout;
+ // otherwise, returns the operand's value if equal is true or a different
+ // value if equal is false.
+ // The assumed value is the caller's assumption about the current value
+ // when making the call.
+ // __s and __ns are measured against CLOCK_MONOTONIC.
+ unsigned
+ _M_load_and_test_until_steady(unsigned __assumed, unsigned __operand,
+ bool __equal, memory_order __mo, bool __has_timeout,
+ chrono::seconds __s, chrono::nanoseconds __ns)
+ {
+ for (;;)
+ {
+ // Don't bother checking the value again because we expect the caller
+ // to have done it recently.
+ // memory_order_relaxed is sufficient because we can rely on just the
+ // modification order (store_notify uses an atomic RMW operation too),
+ // and the futex syscalls synchronize between themselves.
+ _M_data.fetch_or(_Waiter_bit, memory_order_relaxed);
+ bool __ret = _M_futex_wait_until_steady((unsigned*)(void*)&_M_data,
+ __assumed | _Waiter_bit,
+ __has_timeout, __s, __ns);
+ // Fetch the current value after waiting (clears _Waiter_bit).
+ __assumed = _M_load(__mo);
+ if (!__ret || ((__operand == __assumed) == __equal))
+ return __assumed;
+ // TODO adapt wait time
+ }
+ }
+
// Returns the operand's value if equal is true or a different value if
// equal is false.
// The assumed value is the caller's assumption about the current value
@@ -140,6 +178,19 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
true, __s.time_since_epoch(), __ns);
}
+ template<typename _Dur>
+ unsigned
+ _M_load_and_test_until_impl(unsigned __assumed, unsigned __operand,
+ bool __equal, memory_order __mo,
+ const chrono::time_point<std::chrono::steady_clock, _Dur>& __atime)
+ {
+ auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
+ auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
+ // XXX correct?
+ return _M_load_and_test_until_steady(__assumed, __operand, __equal, __mo,
+ true, __s.time_since_epoch(), __ns);
+ }
+
public:
_GLIBCXX_ALWAYS_INLINE unsigned
@@ -200,6 +251,20 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
return (__i & ~_Waiter_bit) == __val;
}
+ // Returns false iff a timeout occurred.
+ template<typename _Duration>
+ _GLIBCXX_ALWAYS_INLINE bool
+ _M_load_when_equal_until(unsigned __val, memory_order __mo,
+ const chrono::time_point<std::chrono::steady_clock, _Duration>& __atime)
+ {
+ unsigned __i = _M_load(__mo);
+ if ((__i & ~_Waiter_bit) == __val)
+ return true;
+ // TODO Spin-wait first. Ignore effect on timeout.
+ __i = _M_load_and_test_until_impl(__i, __val, true, __mo, __atime);
+ return (__i & ~_Waiter_bit) == __val;
+ }
+
_GLIBCXX_ALWAYS_INLINE void
_M_store_notify_all(unsigned __val, memory_order __mo)
{
diff --git a/libstdc++-v3/src/c++11/futex.cc b/libstdc++-v3/src/c++11/futex.cc
index 25b3e05..0331bd6 100644
--- a/libstdc++-v3/src/c++11/futex.cc
+++ b/libstdc++-v3/src/c++11/futex.cc
@@ -33,9 +33,15 @@
#include <errno.h>
#include <debug/debug.h>
+#ifdef _GLIBCXX_USE_CLOCK_GETTIME_SYSCALL
+#include <unistd.h>
+#include <sys/syscall.h>
+#endif
+
// Constants for the wait/wake futex syscall operations
const unsigned futex_wait_op = 0;
const unsigned futex_wait_bitset_op = 9;
+const unsigned futex_clock_monotonic_flag = 0;
const unsigned futex_clock_realtime_flag = 256;
const unsigned futex_bitset_match_any = ~0;
const unsigned futex_wake_op = 1;
@@ -43,6 +49,7 @@ const unsigned futex_wake_op = 1;
namespace
{
std::atomic<bool> futex_clock_realtime_unavailable;
+ std::atomic<bool> futex_clock_monotonic_unavailable;
}
namespace std _GLIBCXX_VISIBILITY(default)
@@ -121,6 +128,81 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
}
}
+ bool
+ __atomic_futex_unsigned_base::_M_futex_wait_until_steady(unsigned *__addr,
+ unsigned __val,
+ bool __has_timeout, chrono::seconds __s, chrono::nanoseconds __ns)
+ {
+ if (!__has_timeout)
+ {
+ // Ignore whether we actually succeeded to block because at worst,
+ // we will fall back to spin-waiting. The only thing we could do
+ // here on errors is abort.
+ int ret __attribute__((unused));
+ ret = syscall (SYS_futex, __addr, futex_wait_op, __val, nullptr);
+ __glibcxx_assert(ret == 0 || errno == EINTR || errno == EAGAIN);
+ return true;
+ }
+ else
+ {
+ if (!futex_clock_monotonic_unavailable.load(std::memory_order_relaxed))
+ {
+ struct timespec rt;
+ rt.tv_sec = __s.count();
+ rt.tv_nsec = __ns.count();
+
+ if (syscall (SYS_futex, __addr,
+ futex_wait_bitset_op | futex_clock_monotonic_flag,
+ __val, &rt, nullptr, futex_bitset_match_any) == -1)
+ {
+ __glibcxx_assert(errno == EINTR || errno == EAGAIN
+ || errno == ETIMEDOUT || errno == ENOSYS);
+ if (errno == ETIMEDOUT)
+ return false;
+ else if (errno == ENOSYS)
+ {
+ futex_clock_monotonic_unavailable.store(true,
+ std::memory_order_relaxed);
+ // Fall through to legacy implementation if the system
+ // call is unavailable.
+ }
+ else
+ return true;
+ }
+ }
+
+ // We only get to here if futex_clock_monotonic_unavailable was
+ // true or has just been set to true.
+ struct timespec ts;
+#ifdef _GLIBCXX_USE_CLOCK_GETTIME_SYSCALL
+ syscall(SYS_clock_gettime, CLOCK_MONOTONIC, &ts);
+#else
+ clock_gettime(CLOCK_MONOTONIC, &ts);
+#endif
+ // Convert the absolute timeout value to a relative timeout
+ struct timespec rt;
+ rt.tv_sec = __s.count() - ts.tv_sec;
+ rt.tv_nsec = __ns.count() - ts.tv_nsec;
+ if (rt.tv_nsec < 0)
+ {
+ rt.tv_nsec += 1000000000;
+ --rt.tv_sec;
+ }
+ // Did we already time out?
+ if (rt.tv_sec < 0)
+ return false;
+
+ if (syscall (SYS_futex, __addr, futex_wait_op, __val, &rt) == -1)
+ {
+ __glibcxx_assert(errno == EINTR || errno == EAGAIN
+ || errno == ETIMEDOUT);
+ if (errno == ETIMEDOUT)
+ return false;
+ }
+ return true;
+ }
+ }
+
void
__atomic_futex_unsigned_base::_M_futex_notify_all(unsigned* __addr)
{