aboutsummaryrefslogtreecommitdiff
path: root/libstdc++-v3/include
diff options
context:
space:
mode:
authorJonathan Wakely <jwakely@redhat.com>2022-01-16 20:47:09 +0000
committerJonathan Wakely <jwakely@redhat.com>2022-01-17 00:06:28 +0000
commit2ac0649d7bf3eacbf92add1ec2b54045c401a4c2 (patch)
treec858f264e67f6e875b9a122ebe865ed95bc6f154 /libstdc++-v3/include
parent86e3b476d5defaa79c94d40b76cbeec21cd02e5f (diff)
downloadgcc-2ac0649d7bf3eacbf92add1ec2b54045c401a4c2.zip
gcc-2ac0649d7bf3eacbf92add1ec2b54045c401a4c2.tar.gz
gcc-2ac0649d7bf3eacbf92add1ec2b54045c401a4c2.tar.bz2
libstdc++: Implement C++20 atomic<shared_ptr> and atomic<weak_ptr>
This adds another piece of C++20, the std::atomic specializations for std::shared_ptr and std::weak_ptr. The new _Sp_atomic type mimics the structure of shared_ptr<T> and weak_ptr<T>, holding a T* pointer (the one returned by get() on a shared_ptr/weak ptr) and a _Sp_counted_base<>* pointer to the ref-counted control block. For _Sp_atomic the low bit of the control block pointer is used as a lock bit, to ensure only one thread will access the object at a time. The pointer is actually stored as a uintptr_t to avoid accidental dereferences of the pointer when unlocked (which would be a race) or when locked (which would dereference the wrong pointer value due to the low bit being set). To get a raw pointer to the control block, the lock must be acquired. Converting between a _Sp_atomic and a shared_ptr or weak_ptr requires manually adjusting the T* and _Sp_counted_base<>* members of the shared/weak ptr, instead of going through the public API. This must be done carefully to ensure that any change in the number of owners is reflected in a ref-count update. Co-authored-by: Thomas Rodgers <trodgers@redhat.com> Signed-off-by: Thomas Rodgers <trodgers@redhat.com> libstdc++-v3/ChangeLog: * include/bits/shared_ptr_atomic.h (__cpp_lib_atomic_shared_ptr): New macro. (_Sp_atomic): New class template. (atomic<shared_ptr<T>>, atomic<weak_ptr<T>>): New partial specializations. * include/bits/shared_ptr_base.h (__shared_count, __weak_count) (__shared_ptr, __weak_ptr): Declare _Sp_atomic as a friend. * include/std/version (__cpp_lib_atomic_shared_ptr): New macro. * testsuite/20_util/shared_ptr/atomic/atomic_shared_ptr.cc: New test. * testsuite/20_util/weak_ptr/atomic_weak_ptr.cc: New test.
Diffstat (limited to 'libstdc++-v3/include')
-rw-r--r--libstdc++-v3/include/bits/shared_ptr_atomic.h455
-rw-r--r--libstdc++-v3/include/bits/shared_ptr_base.h17
-rw-r--r--libstdc++-v3/include/std/version1
3 files changed, 473 insertions, 0 deletions
diff --git a/libstdc++-v3/include/bits/shared_ptr_atomic.h b/libstdc++-v3/include/bits/shared_ptr_atomic.h
index 0e1c289..900499b 100644
--- a/libstdc++-v3/include/bits/shared_ptr_atomic.h
+++ b/libstdc++-v3/include/bits/shared_ptr_atomic.h
@@ -327,6 +327,461 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
}
/// @}
+#if __cplusplus >= 202002L
+# define __cpp_lib_atomic_shared_ptr 201711L
+ template<typename _Tp>
+ class atomic;
+
+ template<typename _Up>
+ static constexpr bool __is_shared_ptr = false;
+ template<typename _Up>
+ static constexpr bool __is_shared_ptr<shared_ptr<_Up>> = true;
+
+ template<typename _Tp>
+ class _Sp_atomic
+ {
+ using value_type = _Tp;
+
+ friend class atomic<_Tp>;
+
+ // An atomic version of __shared_count<> and __weak_count<>.
+ // Stores a _Sp_counted_base<>* but uses the LSB as a lock.
+ struct _Atomic_count
+ {
+ // Either __shared_count<> or __weak_count<>
+ using __count_type = decltype(_Tp::_M_refcount);
+
+ // _Sp_counted_base<>*
+ using pointer = decltype(__count_type::_M_pi);
+
+ // Ensure we can use the LSB as the lock bit.
+ static_assert(alignof(remove_pointer_t<pointer>) > 1);
+
+ _Atomic_count() : _M_val(0) { }
+
+ explicit
+ _Atomic_count(__count_type&& __c) noexcept
+ : _M_val(reinterpret_cast<uintptr_t>(__c._M_pi))
+ {
+ __c._M_pi = nullptr;
+ }
+
+ ~_Atomic_count()
+ {
+ auto __val = _M_val.load(memory_order_relaxed);
+ __glibcxx_assert(!(__val & _S_lock_bit));
+ if (auto __pi = reinterpret_cast<pointer>(__val))
+ {
+ if constexpr (__is_shared_ptr<_Tp>)
+ __pi->_M_release();
+ else
+ __pi->_M_weak_release();
+ }
+ }
+
+ _Atomic_count(const _Atomic_count&) = delete;
+ _Atomic_count& operator=(const _Atomic_count&) = delete;
+
+ // Precondition: Caller does not hold lock!
+ // Returns the raw pointer value without the lock bit set.
+ pointer
+ lock(memory_order __o) const noexcept
+ {
+ // To acquire the lock we flip the LSB from 0 to 1.
+
+ auto __current = _M_val.load(memory_order_relaxed);
+ while (__current & _S_lock_bit)
+ {
+ __detail::__thread_relax();
+ __current = _M_val.load(memory_order_relaxed);
+ }
+
+ while (!_M_val.compare_exchange_strong(__current,
+ __current | _S_lock_bit,
+ __o,
+ memory_order_relaxed))
+ {
+ __detail::__thread_relax();
+ __current = __current & ~_S_lock_bit;
+ }
+ return reinterpret_cast<pointer>(__current);
+ }
+
+ // Precondition: caller holds lock!
+ void
+ unlock(memory_order __o) const noexcept
+ {
+ _M_val.fetch_sub(1, __o);
+ }
+
+ // Swaps the values of *this and __c, and unlocks *this.
+ // Precondition: caller holds lock!
+ void
+ _M_swap_unlock(__count_type& __c, memory_order __o) noexcept
+ {
+ if (__o != memory_order_seq_cst)
+ __o = memory_order_release;
+ auto __x = reinterpret_cast<uintptr_t>(__c._M_pi);
+ __x = _M_val.exchange(__x, __o);
+ __c._M_pi = reinterpret_cast<pointer>(__x & ~_S_lock_bit);
+ }
+
+#if __cpp_lib_atomic_wait
+ // Precondition: caller holds lock!
+ void
+ _M_wait_unlock(memory_order __o) const noexcept
+ {
+ auto __v = _M_val.fetch_sub(1, memory_order_relaxed);
+ _M_val.wait(__v & ~_S_lock_bit, __o);
+ }
+
+ void
+ notify_one() noexcept
+ {
+ _M_val.notify_one();
+ }
+
+ void
+ notify_all() noexcept
+ {
+ _M_val.notify_all();
+ }
+#endif
+
+ private:
+ mutable __atomic_base<uintptr_t> _M_val{0};
+ static constexpr uintptr_t _S_lock_bit{1};
+ };
+
+ typename _Tp::element_type* _M_ptr;
+ _Atomic_count _M_refcount;
+
+ static _Atomic_count::pointer
+ _S_add_ref(_Atomic_count::pointer __p)
+ {
+ if (__p)
+ {
+ if constexpr (__is_shared_ptr<_Tp>)
+ __p->_M_add_ref_copy();
+ else
+ __p->_M_weak_add_ref();
+ }
+ return __p;
+ }
+
+ constexpr _Sp_atomic() noexcept = default;
+
+ explicit
+ _Sp_atomic(value_type __r) noexcept
+ : _M_ptr(__r._M_ptr), _M_refcount(std::move(__r._M_refcount))
+ { }
+
+ ~_Sp_atomic() = default;
+
+ _Sp_atomic(const _Sp_atomic&) = delete;
+ void operator=(const _Sp_atomic&) = delete;
+
+ value_type
+ load(memory_order __o) const noexcept
+ {
+ __glibcxx_assert(__o != memory_order_release
+ && __o != memory_order_acq_rel);
+ // Ensure that the correct value of _M_ptr is visible after locking.,
+ // by upgrading relaxed or consume to acquire.
+ if (__o != memory_order_seq_cst)
+ __o = memory_order_acquire;
+
+ value_type __ret;
+ auto __pi = _M_refcount.lock(__o);
+ __ret._M_ptr = _M_ptr;
+ __ret._M_refcount._M_pi = _S_add_ref(__pi);
+ _M_refcount.unlock(memory_order_relaxed);
+ return __ret;
+ }
+
+ void
+ swap(value_type& __r, memory_order __o) noexcept
+ {
+ _M_refcount.lock(memory_order_acquire);
+ std::swap(_M_ptr, __r._M_ptr);
+ _M_refcount._M_swap_unlock(__r._M_refcount, __o);
+ }
+
+ bool
+ compare_exchange_strong(value_type& __expected, value_type __desired,
+ memory_order __o, memory_order __o2) noexcept
+ {
+ bool __result = true;
+ auto __pi = _M_refcount.lock(memory_order_acquire);
+ if (_M_ptr == __expected._M_ptr
+ && __pi == __expected._M_refcount._M_pi)
+ {
+ _M_ptr = __desired._M_ptr;
+ _M_refcount._M_swap_unlock(__desired._M_refcount, __o);
+ }
+ else
+ {
+ _Tp __sink = std::move(__expected);
+ __expected._M_ptr = _M_ptr;
+ __expected._M_refcount._M_pi = _S_add_ref(__pi);
+ _M_refcount.unlock(__o2);
+ __result = false;
+ }
+ return __result;
+ }
+
+#if __cpp_lib_atomic_wait
+ void
+ wait(value_type __old, memory_order __o) const noexcept
+ {
+ auto __pi = _M_refcount.lock(memory_order_acquire);
+ if (_M_ptr == __old._M_ptr && __pi == __old._M_refcount._M_pi)
+ _M_refcount._M_wait_unlock(__o);
+ else
+ _M_refcount.unlock(memory_order_relaxed);
+ }
+
+ void
+ notify_one() noexcept
+ {
+ _M_refcount.notify_one();
+ }
+
+ void
+ notify_all() noexcept
+ {
+ _M_refcount.notify_all();
+ }
+#endif
+ };
+
+ template<typename _Tp>
+ class atomic<shared_ptr<_Tp>>
+ {
+ public:
+ using value_type = shared_ptr<_Tp>;
+
+ static constexpr bool is_always_lock_free = false;
+
+ bool
+ is_lock_free() const noexcept
+ { return false; }
+
+ constexpr atomic() noexcept = default;
+
+ atomic(shared_ptr<_Tp> __r) noexcept
+ : _M_impl(std::move(__r))
+ { }
+
+ atomic(const atomic&) = delete;
+ void operator=(const atomic&) = delete;
+
+ shared_ptr<_Tp>
+ load(memory_order __o = memory_order_seq_cst) const noexcept
+ { return _M_impl.load(__o); }
+
+ operator shared_ptr<_Tp>() const noexcept
+ { return _M_impl.load(memory_order_seq_cst); }
+
+ void
+ store(shared_ptr<_Tp> __desired,
+ memory_order __o = memory_order_seq_cst) noexcept
+ { _M_impl.swap(__desired, __o); }
+
+ void
+ operator=(shared_ptr<_Tp> __desired) noexcept
+ { _M_impl.swap(__desired, memory_order_seq_cst); }
+
+ shared_ptr<_Tp>
+ exchange(shared_ptr<_Tp> __desired,
+ memory_order __o = memory_order_seq_cst) noexcept
+ {
+ _M_impl.swap(__desired, __o);
+ return __desired;
+ }
+
+ bool
+ compare_exchange_strong(shared_ptr<_Tp>& __expected,
+ shared_ptr<_Tp> __desired,
+ memory_order __o, memory_order __o2) noexcept
+ {
+ return _M_impl.compare_exchange_strong(__expected, __desired, __o, __o2);
+ }
+
+ bool
+ compare_exchange_strong(value_type& __expected, value_type __desired,
+ memory_order __o = memory_order_seq_cst) noexcept
+ {
+ memory_order __o2;
+ switch (__o)
+ {
+ case memory_order_acq_rel:
+ __o2 = memory_order_acquire;
+ break;
+ case memory_order_release:
+ __o2 = memory_order_relaxed;
+ break;
+ default:
+ __o2 = __o;
+ }
+ return compare_exchange_strong(__expected, std::move(__desired),
+ __o, __o2);
+ }
+
+ bool
+ compare_exchange_weak(value_type& __expected, value_type __desired,
+ memory_order __o, memory_order __o2) noexcept
+ {
+ return compare_exchange_strong(__expected, std::move(__desired),
+ __o, __o2);
+ }
+
+ bool
+ compare_exchange_weak(value_type& __expected, value_type __desired,
+ memory_order __o = memory_order_seq_cst) noexcept
+ {
+ return compare_exchange_strong(__expected, std::move(__desired), __o);
+ }
+
+#if __cpp_lib_atomic_wait
+ void
+ wait(value_type __old,
+ memory_order __o = memory_order_seq_cst) const noexcept
+ {
+ _M_impl.wait(std::move(__old), __o);
+ }
+
+ void
+ notify_one() noexcept
+ {
+ _M_impl.notify_one();
+ }
+
+ void
+ notify_all() noexcept
+ {
+ _M_impl.notify_all();
+ }
+#endif
+
+ private:
+ _Sp_atomic<shared_ptr<_Tp>> _M_impl;
+ };
+
+ template<typename _Tp>
+ class atomic<weak_ptr<_Tp>>
+ {
+ public:
+ using value_type = weak_ptr<_Tp>;
+
+ static constexpr bool is_always_lock_free = false;
+
+ bool
+ is_lock_free() const noexcept
+ { return false; }
+
+ constexpr atomic() noexcept = default;
+
+ atomic(weak_ptr<_Tp> __r) noexcept
+ : _M_impl(move(__r))
+ { }
+
+ atomic(const atomic&) = delete;
+ void operator=(const atomic&) = delete;
+
+ weak_ptr<_Tp>
+ load(memory_order __o = memory_order_seq_cst) const noexcept
+ { return _M_impl.load(__o); }
+
+ operator weak_ptr<_Tp>() const noexcept
+ { return _M_impl.load(memory_order_seq_cst); }
+
+ void
+ store(weak_ptr<_Tp> __desired,
+ memory_order __o = memory_order_seq_cst) noexcept
+ { _M_impl.swap(__desired, __o); }
+
+ void
+ operator=(weak_ptr<_Tp> __desired) noexcept
+ { _M_impl.swap(__desired, memory_order_seq_cst); }
+
+ weak_ptr<_Tp>
+ exchange(weak_ptr<_Tp> __desired,
+ memory_order __o = memory_order_seq_cst) noexcept
+ {
+ _M_impl.swap(__desired, __o);
+ return __desired;
+ }
+
+ bool
+ compare_exchange_strong(weak_ptr<_Tp>& __expected,
+ weak_ptr<_Tp> __desired,
+ memory_order __o, memory_order __o2) noexcept
+ {
+ return _M_impl.compare_exchange_strong(__expected, __desired, __o, __o2);
+ }
+
+ bool
+ compare_exchange_strong(value_type& __expected, value_type __desired,
+ memory_order __o = memory_order_seq_cst) noexcept
+ {
+ memory_order __o2;
+ switch (__o)
+ {
+ case memory_order_acq_rel:
+ __o2 = memory_order_acquire;
+ break;
+ case memory_order_release:
+ __o2 = memory_order_relaxed;
+ break;
+ default:
+ __o2 = __o;
+ }
+ return compare_exchange_strong(__expected, std::move(__desired),
+ __o, __o2);
+ }
+
+ bool
+ compare_exchange_weak(value_type& __expected, value_type __desired,
+ memory_order __o, memory_order __o2) noexcept
+ {
+ return compare_exchange_strong(__expected, std::move(__desired),
+ __o, __o2);
+ }
+
+ bool
+ compare_exchange_weak(value_type& __expected, value_type __desired,
+ memory_order __o = memory_order_seq_cst) noexcept
+ {
+ return compare_exchange_strong(__expected, std::move(__desired), __o);
+ }
+
+#if __cpp_lib_atomic_wait
+ void
+ wait(value_type __old,
+ memory_order __o = memory_order_seq_cst) const noexcept
+ {
+ _M_impl.wait(std::move(__old), __o);
+ }
+
+ void
+ notify_one() noexcept
+ {
+ _M_impl.notify_one();
+ }
+
+ void
+ notify_all() noexcept
+ {
+ _M_impl.notify_all();
+ }
+#endif
+
+ private:
+ _Sp_atomic<weak_ptr<_Tp>> _M_impl;
+ };
+#endif // C++20
+
/// @} relates shared_ptr
/// @} group pointer_abstractions
diff --git a/libstdc++-v3/include/bits/shared_ptr_base.h b/libstdc++-v3/include/bits/shared_ptr_base.h
index 9e80aab..5b8f84b 100644
--- a/libstdc++-v3/include/bits/shared_ptr_base.h
+++ b/libstdc++-v3/include/bits/shared_ptr_base.h
@@ -409,6 +409,10 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
template<_Lock_policy _Lp = __default_lock_policy>
class __shared_count;
+#if __cplusplus >= 202002L
+ template<typename>
+ class _Sp_atomic;
+#endif
// Counted ptr with no deleter or allocator support
template<typename _Ptr, _Lock_policy _Lp>
@@ -1121,6 +1125,9 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
private:
friend class __weak_count<_Lp>;
+#if __cplusplus >= 202002L
+ template<typename> friend class _Sp_atomic;
+#endif
_Sp_counted_base<_Lp>* _M_pi;
};
@@ -1218,6 +1225,9 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
private:
friend class __shared_count<_Lp>;
+#if __cplusplus >= 202002L
+ template<typename> friend class _Sp_atomic;
+#endif
_Sp_counted_base<_Lp>* _M_pi;
};
@@ -1765,6 +1775,10 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
template<typename _Del, typename _Tp1>
friend _Del* get_deleter(const shared_ptr<_Tp1>&) noexcept;
+#if __cplusplus >= 202002L
+ friend _Sp_atomic<shared_ptr<_Tp>>;
+#endif
+
element_type* _M_ptr; // Contained pointer.
__shared_count<_Lp> _M_refcount; // Reference counter.
};
@@ -2097,6 +2111,9 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
template<typename _Tp1, _Lock_policy _Lp1> friend class __weak_ptr;
friend class __enable_shared_from_this<_Tp, _Lp>;
friend class enable_shared_from_this<_Tp>;
+#if __cplusplus >= 202002L
+ friend _Sp_atomic<weak_ptr<_Tp>>;
+#endif
element_type* _M_ptr; // Contained pointer.
__weak_count<_Lp> _M_refcount; // Reference counter.
diff --git a/libstdc++-v3/include/std/version b/libstdc++-v3/include/std/version
index a8b792e..7bd32f6 100644
--- a/libstdc++-v3/include/std/version
+++ b/libstdc++-v3/include/std/version
@@ -215,6 +215,7 @@
#if _GLIBCXX_HOSTED
#define __cpp_lib_array_constexpr 201811L
#define __cpp_lib_assume_aligned 201811L
+#define __cpp_lib_atomic_shared_ptr 201711L
#if defined _GLIBCXX_HAS_GTHREADS || defined _GLIBCXX_HAVE_LINUX_FUTEX
# define __cpp_lib_atomic_wait 201907L
# if __cpp_aligned_new