aboutsummaryrefslogtreecommitdiff
path: root/libphobos
diff options
context:
space:
mode:
authorIain Buclaw <ibuclaw@gdcproject.org>2024-01-16 19:57:40 +0100
committerIain Buclaw <ibuclaw@gdcproject.org>2024-02-03 00:16:55 +0100
commit838e706fa55b1798fb5f0242dbd90cd4d9817bbe (patch)
treeba9e9519442f8edea295b389e77e1183403d87d8 /libphobos
parentcfc6d9ae8143cf0e903384bc63e8d659ca1c9fe7 (diff)
downloadgcc-838e706fa55b1798fb5f0242dbd90cd4d9817bbe.zip
gcc-838e706fa55b1798fb5f0242dbd90cd4d9817bbe.tar.gz
gcc-838e706fa55b1798fb5f0242dbd90cd4d9817bbe.tar.bz2
d: Merge upstream dmd, druntime f1a045928e
D front-end changes: - Import dmd v2.106.1-rc.1. - Unrecognized pragmas are no longer an error by default. D runtime changes: - Import druntime v2.106.1-rc.1. gcc/d/ChangeLog: * dmd/MERGE: Merge upstream dmd f1a045928e. * dmd/VERSION: Bump version to v2.106.1-rc.1. * gdc.texi (fignore-unknown-pragmas): Update documentation. * d-builtins.cc (covariant_with_builtin_type_p): Update for new front-end interface. * d-lang.cc (d_parse_file): Likewise. * typeinfo.cc (make_frontend_typeinfo): Likewise. libphobos/ChangeLog: * libdruntime/MERGE: Merge upstream druntime f1a045928e. * libdruntime/Makefile.am (DRUNTIME_DSOURCES): Add core/stdc/stdatomic.d. * libdruntime/Makefile.in: Regenerate.
Diffstat (limited to 'libphobos')
-rw-r--r--libphobos/libdruntime/MERGE2
-rw-r--r--libphobos/libdruntime/Makefile.am40
-rw-r--r--libphobos/libdruntime/Makefile.in76
-rw-r--r--libphobos/libdruntime/core/internal/array/operations.d35
-rw-r--r--libphobos/libdruntime/core/internal/atomic.d105
-rw-r--r--libphobos/libdruntime/core/stdc/stdatomic.d1124
-rw-r--r--libphobos/libdruntime/core/thread/osthread.d7
-rw-r--r--libphobos/libdruntime/object.d6
8 files changed, 1308 insertions, 87 deletions
diff --git a/libphobos/libdruntime/MERGE b/libphobos/libdruntime/MERGE
index 5edcee1..fa7004b 100644
--- a/libphobos/libdruntime/MERGE
+++ b/libphobos/libdruntime/MERGE
@@ -1,4 +1,4 @@
-2bbf64907cbbb483d003e0a8fcf8b502e4883799
+f1a045928e03239b9477f9497f43f2cf0e61e959
The first line of this file holds the git revision number of the last
merge done from the dlang/dmd repository.
diff --git a/libphobos/libdruntime/Makefile.am b/libphobos/libdruntime/Makefile.am
index 5e4c5ac..3ef98dc 100644
--- a/libphobos/libdruntime/Makefile.am
+++ b/libphobos/libdruntime/Makefile.am
@@ -202,26 +202,26 @@ DRUNTIME_DSOURCES = core/atomic.d core/attribute.d core/bitop.d \
core/stdc/ctype.d core/stdc/errno.d core/stdc/fenv.d \
core/stdc/float_.d core/stdc/inttypes.d core/stdc/limits.d \
core/stdc/locale.d core/stdc/math.d core/stdc/signal.d \
- core/stdc/stdarg.d core/stdc/stddef.d core/stdc/stdint.d \
- core/stdc/stdio.d core/stdc/stdlib.d core/stdc/string.d \
- core/stdc/tgmath.d core/stdc/time.d core/stdc/wchar_.d \
- core/stdc/wctype.d core/sync/barrier.d core/sync/condition.d \
- core/sync/config.d core/sync/event.d core/sync/exception.d \
- core/sync/mutex.d core/sync/package.d core/sync/rwmutex.d \
- core/sync/semaphore.d core/thread/context.d core/thread/fiber.d \
- core/thread/osthread.d core/thread/package.d core/thread/threadbase.d \
- core/thread/threadgroup.d core/thread/types.d core/time.d \
- core/vararg.d core/volatile.d etc/valgrind/valgrind.d gcc/attribute.d \
- gcc/attributes.d gcc/backtrace.d gcc/builtins.d gcc/deh.d gcc/emutls.d \
- gcc/gthread.d gcc/sections/common.d gcc/sections/elf.d \
- gcc/sections/macho.d gcc/sections/package.d gcc/sections/pecoff.d \
- gcc/simd.d gcc/unwind/arm.d gcc/unwind/arm_common.d gcc/unwind/c6x.d \
- gcc/unwind/generic.d gcc/unwind/package.d gcc/unwind/pe.d object.d \
- rt/aApply.d rt/aApplyR.d rt/aaA.d rt/adi.d rt/arraycat.d rt/cast_.d \
- rt/config.d rt/critical_.d rt/deh.d rt/dmain2.d rt/ehalloc.d \
- rt/invariant.d rt/lifetime.d rt/memory.d rt/minfo.d rt/monitor_.d \
- rt/profilegc.d rt/sections.d rt/tlsgc.d rt/util/typeinfo.d \
- rt/util/utility.d
+ core/stdc/stdarg.d core/stdc/stdatomic.d core/stdc/stddef.d \
+ core/stdc/stdint.d core/stdc/stdio.d core/stdc/stdlib.d \
+ core/stdc/string.d core/stdc/tgmath.d core/stdc/time.d \
+ core/stdc/wchar_.d core/stdc/wctype.d core/sync/barrier.d \
+ core/sync/condition.d core/sync/config.d core/sync/event.d \
+ core/sync/exception.d core/sync/mutex.d core/sync/package.d \
+ core/sync/rwmutex.d core/sync/semaphore.d core/thread/context.d \
+ core/thread/fiber.d core/thread/osthread.d core/thread/package.d \
+ core/thread/threadbase.d core/thread/threadgroup.d core/thread/types.d \
+ core/time.d core/vararg.d core/volatile.d etc/valgrind/valgrind.d \
+ gcc/attribute.d gcc/attributes.d gcc/backtrace.d gcc/builtins.d \
+ gcc/deh.d gcc/emutls.d gcc/gthread.d gcc/sections/common.d \
+ gcc/sections/elf.d gcc/sections/macho.d gcc/sections/package.d \
+ gcc/sections/pecoff.d gcc/simd.d gcc/unwind/arm.d \
+ gcc/unwind/arm_common.d gcc/unwind/c6x.d gcc/unwind/generic.d \
+ gcc/unwind/package.d gcc/unwind/pe.d object.d rt/aApply.d rt/aApplyR.d \
+ rt/aaA.d rt/adi.d rt/arraycat.d rt/cast_.d rt/config.d rt/critical_.d \
+ rt/deh.d rt/dmain2.d rt/ehalloc.d rt/invariant.d rt/lifetime.d \
+ rt/memory.d rt/minfo.d rt/monitor_.d rt/profilegc.d rt/sections.d \
+ rt/tlsgc.d rt/util/typeinfo.d rt/util/utility.d
DRUNTIME_DSOURCES_STDCXX = core/stdcpp/allocator.d core/stdcpp/array.d \
core/stdcpp/exception.d core/stdcpp/memory.d core/stdcpp/new_.d \
diff --git a/libphobos/libdruntime/Makefile.in b/libphobos/libdruntime/Makefile.in
index 9c29e20..1d2bd66 100644
--- a/libphobos/libdruntime/Makefile.in
+++ b/libphobos/libdruntime/Makefile.in
@@ -225,23 +225,24 @@ am__objects_1 = core/atomic.lo core/attribute.lo core/bitop.lo \
core/stdc/config.lo core/stdc/ctype.lo core/stdc/errno.lo \
core/stdc/fenv.lo core/stdc/float_.lo core/stdc/inttypes.lo \
core/stdc/limits.lo core/stdc/locale.lo core/stdc/math.lo \
- core/stdc/signal.lo core/stdc/stdarg.lo core/stdc/stddef.lo \
- core/stdc/stdint.lo core/stdc/stdio.lo core/stdc/stdlib.lo \
- core/stdc/string.lo core/stdc/tgmath.lo core/stdc/time.lo \
- core/stdc/wchar_.lo core/stdc/wctype.lo core/sync/barrier.lo \
- core/sync/condition.lo core/sync/config.lo core/sync/event.lo \
- core/sync/exception.lo core/sync/mutex.lo core/sync/package.lo \
- core/sync/rwmutex.lo core/sync/semaphore.lo \
- core/thread/context.lo core/thread/fiber.lo \
- core/thread/osthread.lo core/thread/package.lo \
- core/thread/threadbase.lo core/thread/threadgroup.lo \
- core/thread/types.lo core/time.lo core/vararg.lo \
- core/volatile.lo etc/valgrind/valgrind.lo gcc/attribute.lo \
- gcc/attributes.lo gcc/backtrace.lo gcc/builtins.lo gcc/deh.lo \
- gcc/emutls.lo gcc/gthread.lo gcc/sections/common.lo \
- gcc/sections/elf.lo gcc/sections/macho.lo \
- gcc/sections/package.lo gcc/sections/pecoff.lo gcc/simd.lo \
- gcc/unwind/arm.lo gcc/unwind/arm_common.lo gcc/unwind/c6x.lo \
+ core/stdc/signal.lo core/stdc/stdarg.lo core/stdc/stdatomic.lo \
+ core/stdc/stddef.lo core/stdc/stdint.lo core/stdc/stdio.lo \
+ core/stdc/stdlib.lo core/stdc/string.lo core/stdc/tgmath.lo \
+ core/stdc/time.lo core/stdc/wchar_.lo core/stdc/wctype.lo \
+ core/sync/barrier.lo core/sync/condition.lo \
+ core/sync/config.lo core/sync/event.lo core/sync/exception.lo \
+ core/sync/mutex.lo core/sync/package.lo core/sync/rwmutex.lo \
+ core/sync/semaphore.lo core/thread/context.lo \
+ core/thread/fiber.lo core/thread/osthread.lo \
+ core/thread/package.lo core/thread/threadbase.lo \
+ core/thread/threadgroup.lo core/thread/types.lo core/time.lo \
+ core/vararg.lo core/volatile.lo etc/valgrind/valgrind.lo \
+ gcc/attribute.lo gcc/attributes.lo gcc/backtrace.lo \
+ gcc/builtins.lo gcc/deh.lo gcc/emutls.lo gcc/gthread.lo \
+ gcc/sections/common.lo gcc/sections/elf.lo \
+ gcc/sections/macho.lo gcc/sections/package.lo \
+ gcc/sections/pecoff.lo gcc/simd.lo gcc/unwind/arm.lo \
+ gcc/unwind/arm_common.lo gcc/unwind/c6x.lo \
gcc/unwind/generic.lo gcc/unwind/package.lo gcc/unwind/pe.lo \
object.lo rt/aApply.lo rt/aApplyR.lo rt/aaA.lo rt/adi.lo \
rt/arraycat.lo rt/cast_.lo rt/config.lo rt/critical_.lo \
@@ -879,26 +880,26 @@ DRUNTIME_DSOURCES = core/atomic.d core/attribute.d core/bitop.d \
core/stdc/ctype.d core/stdc/errno.d core/stdc/fenv.d \
core/stdc/float_.d core/stdc/inttypes.d core/stdc/limits.d \
core/stdc/locale.d core/stdc/math.d core/stdc/signal.d \
- core/stdc/stdarg.d core/stdc/stddef.d core/stdc/stdint.d \
- core/stdc/stdio.d core/stdc/stdlib.d core/stdc/string.d \
- core/stdc/tgmath.d core/stdc/time.d core/stdc/wchar_.d \
- core/stdc/wctype.d core/sync/barrier.d core/sync/condition.d \
- core/sync/config.d core/sync/event.d core/sync/exception.d \
- core/sync/mutex.d core/sync/package.d core/sync/rwmutex.d \
- core/sync/semaphore.d core/thread/context.d core/thread/fiber.d \
- core/thread/osthread.d core/thread/package.d core/thread/threadbase.d \
- core/thread/threadgroup.d core/thread/types.d core/time.d \
- core/vararg.d core/volatile.d etc/valgrind/valgrind.d gcc/attribute.d \
- gcc/attributes.d gcc/backtrace.d gcc/builtins.d gcc/deh.d gcc/emutls.d \
- gcc/gthread.d gcc/sections/common.d gcc/sections/elf.d \
- gcc/sections/macho.d gcc/sections/package.d gcc/sections/pecoff.d \
- gcc/simd.d gcc/unwind/arm.d gcc/unwind/arm_common.d gcc/unwind/c6x.d \
- gcc/unwind/generic.d gcc/unwind/package.d gcc/unwind/pe.d object.d \
- rt/aApply.d rt/aApplyR.d rt/aaA.d rt/adi.d rt/arraycat.d rt/cast_.d \
- rt/config.d rt/critical_.d rt/deh.d rt/dmain2.d rt/ehalloc.d \
- rt/invariant.d rt/lifetime.d rt/memory.d rt/minfo.d rt/monitor_.d \
- rt/profilegc.d rt/sections.d rt/tlsgc.d rt/util/typeinfo.d \
- rt/util/utility.d
+ core/stdc/stdarg.d core/stdc/stdatomic.d core/stdc/stddef.d \
+ core/stdc/stdint.d core/stdc/stdio.d core/stdc/stdlib.d \
+ core/stdc/string.d core/stdc/tgmath.d core/stdc/time.d \
+ core/stdc/wchar_.d core/stdc/wctype.d core/sync/barrier.d \
+ core/sync/condition.d core/sync/config.d core/sync/event.d \
+ core/sync/exception.d core/sync/mutex.d core/sync/package.d \
+ core/sync/rwmutex.d core/sync/semaphore.d core/thread/context.d \
+ core/thread/fiber.d core/thread/osthread.d core/thread/package.d \
+ core/thread/threadbase.d core/thread/threadgroup.d core/thread/types.d \
+ core/time.d core/vararg.d core/volatile.d etc/valgrind/valgrind.d \
+ gcc/attribute.d gcc/attributes.d gcc/backtrace.d gcc/builtins.d \
+ gcc/deh.d gcc/emutls.d gcc/gthread.d gcc/sections/common.d \
+ gcc/sections/elf.d gcc/sections/macho.d gcc/sections/package.d \
+ gcc/sections/pecoff.d gcc/simd.d gcc/unwind/arm.d \
+ gcc/unwind/arm_common.d gcc/unwind/c6x.d gcc/unwind/generic.d \
+ gcc/unwind/package.d gcc/unwind/pe.d object.d rt/aApply.d rt/aApplyR.d \
+ rt/aaA.d rt/adi.d rt/arraycat.d rt/cast_.d rt/config.d rt/critical_.d \
+ rt/deh.d rt/dmain2.d rt/ehalloc.d rt/invariant.d rt/lifetime.d \
+ rt/memory.d rt/minfo.d rt/monitor_.d rt/profilegc.d rt/sections.d \
+ rt/tlsgc.d rt/util/typeinfo.d rt/util/utility.d
DRUNTIME_DSOURCES_STDCXX = core/stdcpp/allocator.d core/stdcpp/array.d \
core/stdcpp/exception.d core/stdcpp/memory.d core/stdcpp/new_.d \
@@ -1316,6 +1317,7 @@ core/stdc/locale.lo: core/stdc/$(am__dirstamp)
core/stdc/math.lo: core/stdc/$(am__dirstamp)
core/stdc/signal.lo: core/stdc/$(am__dirstamp)
core/stdc/stdarg.lo: core/stdc/$(am__dirstamp)
+core/stdc/stdatomic.lo: core/stdc/$(am__dirstamp)
core/stdc/stddef.lo: core/stdc/$(am__dirstamp)
core/stdc/stdint.lo: core/stdc/$(am__dirstamp)
core/stdc/stdio.lo: core/stdc/$(am__dirstamp)
diff --git a/libphobos/libdruntime/core/internal/array/operations.d b/libphobos/libdruntime/core/internal/array/operations.d
index 3e23314..7e5b5f4 100644
--- a/libphobos/libdruntime/core/internal/array/operations.d
+++ b/libphobos/libdruntime/core/internal/array/operations.d
@@ -33,7 +33,7 @@ version (LDC) version = GNU_OR_LDC;
*
* Returns: the slice containing the result
*/
-T[] arrayOp(T : T[], Args...)(T[] res, Filter!(isType, Args) args) @trusted @nogc pure nothrow
+T[] arrayOp(T : T[], Args...)(T[] res, Filter!(isType, Args) args) @trusted
{
alias scalarizedExp = staticMap!(toElementType, Args);
alias check = typeCheck!(true, T, scalarizedExp); // must support all scalar ops
@@ -541,7 +541,7 @@ unittest
}
// test handling of v op= exp
-unittest
+@nogc nothrow pure @safe unittest
{
uint[32] c;
arrayOp!(uint[], uint, "+=")(c[], 2);
@@ -556,7 +556,7 @@ unittest
}
// proper error message for UDT lacking certain ops
-unittest
+@nogc nothrow pure @safe unittest
{
static assert(!is(typeof(&arrayOp!(int[4][], int[4], "+="))));
static assert(!is(typeof(&arrayOp!(int[4][], int[4], "u-", "="))));
@@ -585,7 +585,7 @@ unittest
}
// test mixed type array op
-unittest
+@nogc nothrow pure @safe unittest
{
uint[32] a = 0xF;
float[32] res = 2.0f;
@@ -595,7 +595,7 @@ unittest
}
// test mixed type array op
-unittest
+@nogc nothrow pure @safe unittest
{
static struct S
{
@@ -613,7 +613,7 @@ unittest
}
// test scalar after operation argument
-unittest
+@nogc nothrow pure @safe unittest
{
float[32] res, a = 2, b = 3;
float c = 4;
@@ -622,7 +622,7 @@ unittest
assert(v == 2 * 3 + 4);
}
-unittest
+@nogc nothrow pure @safe unittest
{
// https://issues.dlang.org/show_bug.cgi?id=17964
uint bug(){
@@ -635,7 +635,7 @@ unittest
}
// https://issues.dlang.org/show_bug.cgi?id=19796
-unittest
+nothrow pure @safe unittest
{
double[] data = [0.5];
double[] result;
@@ -645,7 +645,7 @@ unittest
}
// https://issues.dlang.org/show_bug.cgi?id=21110
-unittest
+pure unittest
{
import core.exception;
@@ -668,3 +668,20 @@ unittest
void func() { dst[] = a[] + b[]; }
assertThrown!AssertError(func(), "Array operations with mismatched lengths must throw an error");
}
+
+// https://issues.dlang.org/show_bug.cgi?id=24272
+unittest
+{
+ static struct B
+ {
+ B opOpAssign(string op)(B other)
+ {
+ static int g;
+ g++;
+ throw new Exception("");
+ }
+ }
+
+ B[] bArr;
+ bArr[] += B();
+}
diff --git a/libphobos/libdruntime/core/internal/atomic.d b/libphobos/libdruntime/core/internal/atomic.d
index eebf94e..3fd5d4a 100644
--- a/libphobos/libdruntime/core/internal/atomic.d
+++ b/libphobos/libdruntime/core/internal/atomic.d
@@ -49,6 +49,8 @@ version (DigitalMars)
enum SizedReg(int reg, T = size_t) = registerNames[reg][RegIndex!T];
}
+ enum IsAtomicLockFree(T) = T.sizeof <= size_t.sizeof * 2;
+
inout(T) atomicLoad(MemoryOrder order = MemoryOrder.seq, T)(inout(T)* src) pure nothrow @nogc @trusted
if (CanCAS!T)
{
@@ -649,6 +651,11 @@ version (DigitalMars)
}
}
+ void atomicSignalFence(MemoryOrder order = MemoryOrder.seq)() pure nothrow @nogc @trusted
+ {
+ // no-op, dmd doesn't reorder instructions
+ }
+
void pause() pure nothrow @nogc @trusted
{
version (D_InlineAsm_X86)
@@ -681,37 +688,57 @@ else version (GNU)
import gcc.builtins;
import gcc.config;
+ // Targets where MemoryOrder.acq_rel is sufficiently cheaper than using
+ // MemoryOrder.seq, used when the MemoryOrder requested is not valid for
+ // a given atomic operation.
+ version (IA64)
+ private enum PreferAcquireRelease = true;
+ else version (PPC)
+ private enum PreferAcquireRelease = true;
+ else version (PPC64)
+ private enum PreferAcquireRelease = true;
+ else
+ private enum PreferAcquireRelease = false;
+
+ enum IsAtomicLockFree(T) = __atomic_is_lock_free(T.sizeof, null);
+
inout(T) atomicLoad(MemoryOrder order = MemoryOrder.seq, T)(inout(T)* src) pure nothrow @nogc @trusted
if (CanCAS!T)
{
+ // MemoryOrder.rel and MemoryOrder.acq_rel are not valid for load.
static assert(order != MemoryOrder.rel, "invalid MemoryOrder for atomicLoad()");
+ static if (order == MemoryOrder.acq_rel)
+ enum smodel = PreferAcquireRelease ? MemoryOrder.acq : MemoryOrder.seq;
+ else
+ enum smodel = order;
+
static if (GNU_Have_Atomics || GNU_Have_LibAtomic)
{
static if (T.sizeof == ubyte.sizeof)
{
- ubyte value = __atomic_load_1(cast(shared)src, order);
+ ubyte value = __atomic_load_1(cast(shared)src, smodel);
return *cast(typeof(return)*)&value;
}
else static if (T.sizeof == ushort.sizeof)
{
- ushort value = __atomic_load_2(cast(shared)src, order);
+ ushort value = __atomic_load_2(cast(shared)src, smodel);
return *cast(typeof(return)*)&value;
}
else static if (T.sizeof == uint.sizeof)
{
- uint value = __atomic_load_4(cast(shared)src, order);
+ uint value = __atomic_load_4(cast(shared)src, smodel);
return *cast(typeof(return)*)&value;
}
else static if (T.sizeof == ulong.sizeof && GNU_Have_64Bit_Atomics)
{
- ulong value = __atomic_load_8(cast(shared)src, order);
+ ulong value = __atomic_load_8(cast(shared)src, smodel);
return *cast(typeof(return)*)&value;
}
else static if (GNU_Have_LibAtomic)
{
T value;
- __atomic_load(T.sizeof, cast(shared)src, &value, order);
+ __atomic_load(T.sizeof, cast(shared)src, &value, smodel);
return *cast(typeof(return)*)&value;
}
else
@@ -728,20 +755,26 @@ else version (GNU)
void atomicStore(MemoryOrder order = MemoryOrder.seq, T)(T* dest, T value) pure nothrow @nogc @trusted
if (CanCAS!T)
{
+ // MemoryOrder.acq and MemoryOrder.acq_rel are not valid for store.
static assert(order != MemoryOrder.acq, "Invalid MemoryOrder for atomicStore()");
+ static if (order == MemoryOrder.acq_rel)
+ enum smodel = PreferAcquireRelease ? MemoryOrder.rel : MemoryOrder.seq;
+ else
+ enum smodel = order;
+
static if (GNU_Have_Atomics || GNU_Have_LibAtomic)
{
static if (T.sizeof == ubyte.sizeof)
- __atomic_store_1(cast(shared)dest, *cast(ubyte*)&value, order);
+ __atomic_store_1(cast(shared)dest, *cast(ubyte*)&value, smodel);
else static if (T.sizeof == ushort.sizeof)
- __atomic_store_2(cast(shared)dest, *cast(ushort*)&value, order);
+ __atomic_store_2(cast(shared)dest, *cast(ushort*)&value, smodel);
else static if (T.sizeof == uint.sizeof)
- __atomic_store_4(cast(shared)dest, *cast(uint*)&value, order);
+ __atomic_store_4(cast(shared)dest, *cast(uint*)&value, smodel);
else static if (T.sizeof == ulong.sizeof && GNU_Have_64Bit_Atomics)
- __atomic_store_8(cast(shared)dest, *cast(ulong*)&value, order);
+ __atomic_store_8(cast(shared)dest, *cast(ulong*)&value, smodel);
else static if (GNU_Have_LibAtomic)
- __atomic_store(T.sizeof, cast(shared)dest, cast(void*)&value, order);
+ __atomic_store(T.sizeof, cast(shared)dest, cast(void*)&value, smodel);
else
static assert(0, "Invalid template type specified.");
}
@@ -814,30 +847,36 @@ else version (GNU)
{
static if (GNU_Have_Atomics || GNU_Have_LibAtomic)
{
+ // MemoryOrder.acq is not valid for exchange.
+ static if (order == MemoryOrder.acq)
+ enum smodel = PreferAcquireRelease ? MemoryOrder.acq_rel : MemoryOrder.seq;
+ else
+ enum smodel = order;
+
static if (T.sizeof == byte.sizeof)
{
- ubyte res = __atomic_exchange_1(cast(shared)dest, *cast(ubyte*)&value, order);
+ ubyte res = __atomic_exchange_1(cast(shared)dest, *cast(ubyte*)&value, smodel);
return *cast(typeof(return)*)&res;
}
else static if (T.sizeof == short.sizeof)
{
- ushort res = __atomic_exchange_2(cast(shared)dest, *cast(ushort*)&value, order);
+ ushort res = __atomic_exchange_2(cast(shared)dest, *cast(ushort*)&value, smodel);
return *cast(typeof(return)*)&res;
}
else static if (T.sizeof == int.sizeof)
{
- uint res = __atomic_exchange_4(cast(shared)dest, *cast(uint*)&value, order);
+ uint res = __atomic_exchange_4(cast(shared)dest, *cast(uint*)&value, smodel);
return *cast(typeof(return)*)&res;
}
else static if (T.sizeof == long.sizeof && GNU_Have_64Bit_Atomics)
{
- ulong res = __atomic_exchange_8(cast(shared)dest, *cast(ulong*)&value, order);
+ ulong res = __atomic_exchange_8(cast(shared)dest, *cast(ulong*)&value, smodel);
return *cast(typeof(return)*)&res;
}
else static if (GNU_Have_LibAtomic)
{
T res = void;
- __atomic_exchange(T.sizeof, cast(shared)dest, cast(void*)&value, &res, order);
+ __atomic_exchange(T.sizeof, cast(shared)dest, cast(void*)&value, &res, smodel);
return res;
}
else
@@ -885,21 +924,42 @@ else version (GNU)
static if (GNU_Have_Atomics || GNU_Have_LibAtomic)
{
+ static if (fail == MemoryOrder.rel || fail == MemoryOrder.acq_rel)
+ {
+ // MemoryOrder.rel and MemoryOrder.acq_rel are not valid failure models.
+ enum smodel = (succ != MemoryOrder.seq && PreferAcquireRelease)
+ ? MemoryOrder.acq_rel : MemoryOrder.seq;
+ enum fmodel = (succ != MemoryOrder.seq && PreferAcquireRelease)
+ ? MemoryOrder.raw : MemoryOrder.seq;
+ }
+ else static if (fail > succ)
+ {
+ // Failure memory model cannot be stronger than success.
+ enum smodel = (fail != MemoryOrder.seq && PreferAcquireRelease)
+ ? MemoryOrder.acq_rel : MemoryOrder.seq;
+ enum fmodel = fail;
+ }
+ else
+ {
+ enum smodel = succ;
+ enum fmodel = fail;
+ }
+
static if (T.sizeof == byte.sizeof)
res = __atomic_compare_exchange_1(cast(shared)dest, compare, *cast(ubyte*)&value,
- weak, succ, fail);
+ weak, smodel, fmodel);
else static if (T.sizeof == short.sizeof)
res = __atomic_compare_exchange_2(cast(shared)dest, compare, *cast(ushort*)&value,
- weak, succ, fail);
+ weak, smodel, fmodel);
else static if (T.sizeof == int.sizeof)
res = __atomic_compare_exchange_4(cast(shared)dest, compare, *cast(uint*)&value,
- weak, succ, fail);
+ weak, smodel, fmodel);
else static if (T.sizeof == long.sizeof && GNU_Have_64Bit_Atomics)
res = __atomic_compare_exchange_8(cast(shared)dest, compare, *cast(ulong*)&value,
- weak, succ, fail);
+ weak, smodel, fmodel);
else static if (GNU_Have_LibAtomic)
res = __atomic_compare_exchange(T.sizeof, cast(shared)dest, compare, cast(void*)&value,
- succ, fail);
+ smodel, fmodel);
else
static assert(0, "Invalid template type specified.");
}
@@ -945,6 +1005,11 @@ else version (GNU)
}
}
+ void atomicSignalFence(MemoryOrder order = MemoryOrder.seq)() pure nothrow @nogc @trusted
+ {
+ __atomic_signal_fence(order);
+ }
+
void pause() pure nothrow @nogc @trusted
{
version (X86)
diff --git a/libphobos/libdruntime/core/stdc/stdatomic.d b/libphobos/libdruntime/core/stdc/stdatomic.d
new file mode 100644
index 0000000..ae17e04
--- /dev/null
+++ b/libphobos/libdruntime/core/stdc/stdatomic.d
@@ -0,0 +1,1124 @@
+/**
+ * A D implementation of the C stdatomic.h header.
+ *
+ * $(NOTE If it compiles it should produce similar assembly to the system C toolchain
+ * and should not introduce when optimizing unnecessary behaviors,
+ * if you do not care about this guarantee use the _impl suffix.)
+ *
+ * $(NOTE The D shared type qualifier is the closest to the _Atomic type qualifier from C. It may be changed from shared in the future.)
+ *
+ * Copyright: Copyright Richard (Rikki) Andrew Cattermole 2023.
+ * License: $(LINK2 http://www.boost.org/LICENSE_1_0.txt, Boost License 1.0)
+ * Authors: Richard (Rikki) Andrew cattermole
+ * Source: $(DRUNTIMESRC core/stdc/stdatomic.d)
+ */
+module core.stdc.stdatomic;
+import core.atomic : MemoryOrder;
+import core.internal.atomic;
+import core.stdc.config;
+import core.stdc.stdint;
+
+@safe nothrow @nogc:
+
+///
+enum memory_order
+{
+ /// No ordering provided
+ memory_order_relaxed = MemoryOrder.raw,
+ /// As per cppreference.com circa 2015 no compiler supports consume memory order and in practice it devolves to acquire.
+ memory_order_consume = MemoryOrder.acq,
+ /// Prevent reordering before operation
+ memory_order_acquire = MemoryOrder.acq,
+ /// Prevent reordering after operation
+ memory_order_release = MemoryOrder.rel,
+ /// Prevent reordering before and after operation
+ memory_order_acq_rel = MemoryOrder.acq_rel,
+ /// Prevent reordering before for read operations and after for writes.
+ memory_order_seq_cst = MemoryOrder.seq
+}
+
+///
+enum
+{
+ ///
+ __STDC_VERSION_STDATOMIC_H__ = 202311,
+
+ ///
+ ATOMIC_BOOL_LOCK_FREE = IsAtomicLockFree!bool ? 2 : 0,
+ ///
+ ATOMIC_CHAR_LOCK_FREE = IsAtomicLockFree!char ? 2 : 0,
+ ///
+ ATOMIC_CHAR16_T_LOCK_FREE = IsAtomicLockFree!wchar ? 2 : 0,
+ ///
+ ATOMIC_CHAR32_T_LOCK_FREE = IsAtomicLockFree!dchar ? 2 : 0,
+ ///
+ ATOMIC_WCHAR_T_LOCK_FREE = ATOMIC_CHAR16_T_LOCK_FREE,
+ ///
+ ATOMIC_SHORT_LOCK_FREE = IsAtomicLockFree!short ? 2 : 0,
+ ///
+ ATOMIC_INT_LOCK_FREE = IsAtomicLockFree!int ? 2 : 0,
+ ///
+ ATOMIC_LONG_LOCK_FREE = IsAtomicLockFree!c_long ? 2 : 0,
+ ///
+ ATOMIC_LLONG_LOCK_FREE = IsAtomicLockFree!ulong ? 2 : 0,
+ ///
+ ATOMIC_POINTER_LOCK_FREE = IsAtomicLockFree!(void*) ? 2 : 0,
+ ///
+ ATOMIC_CHAR8_T_LOCK_FREE = ATOMIC_CHAR_LOCK_FREE,
+}
+
+version (DigitalMars)
+{
+ alias atomic_signal_fence = atomic_signal_fence_impl; ///
+
+ // these all use inline assembly, so will unlikely produce the codegen a user will expect
+ version(none)
+ {
+ alias atomic_flag_clear = atomic_flag_clear_impl; ///
+ alias atomic_flag_clear_explicit = atomic_flag_clear_explicit_impl; ///
+ alias atomic_flag_test_and_set = atomic_flag_test_and_set_impl; ///
+ alias atomic_flag_test_and_set_explicit = atomic_flag_test_and_set_explicit_impl; ///
+ alias atomic_thread_fence = atomic_thread_fence_impl; ///
+ alias atomic_store = atomic_store_impl; ///
+ alias atomic_store_explicit = atomic_store_explicit_impl; ///
+ alias atomic_load = atomic_load_impl; ///
+ alias atomic_load_explicit = atomic_load_explicit_impl; ///
+ alias atomic_exchange = atomic_exchange_impl; ///
+ alias atomic_exchange_explicit = atomic_exchange_explicit_impl; ///
+ alias atomic_compare_exchange_strong = atomic_compare_exchange_strong_impl; ///
+ alias atomic_compare_exchange_weak = atomic_compare_exchange_weak_impl; ///
+ alias atomic_compare_exchange_strong_explicit = atomic_compare_exchange_strong_explicit_impl; ///
+ alias atomic_compare_exchange_weak_explicit = atomic_compare_exchange_weak_explicit_impl; ///
+ alias atomic_fetch_add = atomic_fetch_add_impl; ///
+ alias atomic_fetch_add_explicit = atomic_fetch_add_explicit_impl; ///
+ alias atomic_fetch_sub = atomic_fetch_sub_impl; ///
+ alias atomic_fetch_sub_explicit = atomic_fetch_sub_explicit_impl; ///
+ alias atomic_fetch_or = atomic_fetch_or_impl; ///
+ alias atomic_fetch_or_explicit = atomic_fetch_or_explicit_impl; ///
+ alias atomic_fetch_xor = atomic_fetch_xor_impl; ///
+ alias atomic_fetch_xor_explicit = atomic_fetch_xor_explicit_impl; ///
+ alias atomic_fetch_and = atomic_fetch_and_impl; ///
+ alias atomic_fetch_and_explicit = atomic_fetch_and_explicit_impl; ///
+ }
+}
+else version(GNU)
+{
+ alias atomic_flag_clear = atomic_flag_clear_impl; ///
+ alias atomic_flag_clear_explicit = atomic_flag_clear_explicit_impl; ///
+ alias atomic_flag_test_and_set = atomic_flag_test_and_set_impl; ///
+ alias atomic_flag_test_and_set_explicit = atomic_flag_test_and_set_explicit_impl; ///
+ alias atomic_signal_fence = atomic_signal_fence_impl; ///
+ alias atomic_thread_fence = atomic_thread_fence_impl; ///
+ alias atomic_store = atomic_store_impl; ///
+ alias atomic_store_explicit = atomic_store_explicit_impl; ///
+ alias atomic_load = atomic_load_impl; ///
+ alias atomic_load_explicit = atomic_load_explicit_impl; ///
+ alias atomic_exchange = atomic_exchange_impl; ///
+ alias atomic_exchange_explicit = atomic_exchange_explicit_impl; ///
+ alias atomic_compare_exchange_strong = atomic_compare_exchange_strong_impl; ///
+ alias atomic_compare_exchange_weak = atomic_compare_exchange_weak_impl; ///
+ alias atomic_compare_exchange_strong_explicit = atomic_compare_exchange_strong_explicit_impl; ///
+ alias atomic_compare_exchange_weak_explicit = atomic_compare_exchange_weak_explicit_impl; ///
+ alias atomic_fetch_add = atomic_fetch_add_impl; ///
+ alias atomic_fetch_add_explicit = atomic_fetch_add_explicit_impl; ///
+ alias atomic_fetch_sub = atomic_fetch_sub_impl; ///
+ alias atomic_fetch_sub_explicit = atomic_fetch_sub_explicit_impl; ///
+ alias atomic_fetch_or = atomic_fetch_or_impl; ///
+ alias atomic_fetch_or_explicit = atomic_fetch_or_explicit_impl; ///
+ alias atomic_fetch_xor = atomic_fetch_xor_impl; ///
+ alias atomic_fetch_xor_explicit = atomic_fetch_xor_explicit_impl; ///
+ alias atomic_fetch_and = atomic_fetch_and_impl; ///
+ alias atomic_fetch_and_explicit = atomic_fetch_and_explicit_impl; ///
+}
+
+///
+pragma(inline, true)
+bool atomic_is_lock_free(A)(const shared(A)* obj)
+{
+ return IsAtomicLockFree!A;
+}
+
+/// Guaranteed to be a atomic boolean type
+struct atomic_flag
+{
+ private bool b;
+}
+
+///
+enum ATOMIC_FLAG_INIT = atomic_flag.init;
+
+///
+pragma(inline, true)
+void atomic_flag_clear_impl()(atomic_flag* obj)
+{
+ assert(obj !is null);
+
+ atomicStore(&obj.b, false);
+}
+
+///
+pragma(inline, true)
+void atomic_flag_clear_explicit_impl()(atomic_flag* obj, memory_order order)
+{
+ assert(obj !is null);
+
+ final switch (order)
+ {
+ case memory_order.memory_order_relaxed:
+ atomicStore!(memory_order.memory_order_relaxed)(&obj.b, false);
+ break;
+
+ case memory_order.memory_order_acquire:
+ // Ideally this would error at compile time but alas it is not an intrinsic.
+ // Note: this is not a valid memory order for this operation.
+ atomicStore!(memory_order.memory_order_seq_cst)(&obj.b, false);
+ break;
+
+ case memory_order.memory_order_release:
+ atomicStore!(memory_order.memory_order_release)(&obj.b, false);
+ break;
+
+ case memory_order.memory_order_acq_rel:
+ atomicStore!(memory_order.memory_order_acq_rel)(&obj.b, false);
+ break;
+
+ case memory_order.memory_order_seq_cst:
+ atomicStore(&obj.b, false);
+ break;
+ }
+}
+
+///
+pragma(inline, true)
+bool atomic_flag_test_and_set_impl()(atomic_flag* obj)
+{
+ assert(obj !is null);
+ return atomicExchange(&obj.b, true);
+}
+
+///
+unittest
+{
+ atomic_flag flag;
+ assert(!atomic_flag_test_and_set_impl(&flag));
+ atomic_flag_clear_impl(&flag);
+}
+
+///
+pragma(inline, true)
+bool atomic_flag_test_and_set_explicit_impl()(atomic_flag* obj, memory_order order)
+{
+ assert(obj !is null);
+
+ final switch (order)
+ {
+ case memory_order.memory_order_relaxed:
+ return atomicExchange!(memory_order.memory_order_relaxed)(&obj.b, true);
+
+ case memory_order.memory_order_acquire:
+ return atomicExchange!(memory_order.memory_order_acquire)(&obj.b, true);
+
+ case memory_order.memory_order_release:
+ return atomicExchange!(memory_order.memory_order_release)(&obj.b, true);
+
+ case memory_order.memory_order_acq_rel:
+ return atomicExchange!(memory_order.memory_order_acq_rel)(&obj.b, true);
+
+ case memory_order.memory_order_seq_cst:
+ return atomicExchange(&obj.b, true);
+ }
+}
+
+///
+unittest
+{
+ atomic_flag flag;
+ assert(!atomic_flag_test_and_set_explicit_impl(&flag, memory_order.memory_order_seq_cst));
+ atomic_flag_clear_explicit_impl(&flag, memory_order.memory_order_seq_cst);
+}
+
+/**
+ * Initializes an atomic variable, the destination should not have any expression associated with it prior to this call.
+ *
+ * We use an out parameter instead of a pointer for destination in an attempt to communicate to the compiler that it initializers.
+ */
+pragma(inline, true)
+void atomic_init(A, C)(out shared(A) obj, C desired) @trusted
+{
+ obj = cast(shared) desired;
+}
+
+///
+unittest
+{
+ shared int val;
+ atomic_init(val, 2);
+}
+
+/// No-op function, doesn't apply to D
+pragma(inline, true)
+A kill_dependency(A)(A y) @trusted
+{
+ return y;
+}
+
+/// Don't allow reordering, does not emit any instructions.
+pragma(inline, true)
+void atomic_signal_fence_impl()(memory_order order)
+{
+ final switch (order)
+ {
+ case memory_order.memory_order_relaxed:
+ atomicSignalFence!(memory_order.memory_order_relaxed);
+ break;
+
+ case memory_order.memory_order_acquire:
+ atomicSignalFence!(memory_order.memory_order_acquire);
+ break;
+
+ case memory_order.memory_order_release:
+ atomicSignalFence!(memory_order.memory_order_release);
+ break;
+
+ case memory_order.memory_order_acq_rel:
+ atomicSignalFence!(memory_order.memory_order_acq_rel);
+ break;
+
+ case memory_order.memory_order_seq_cst:
+ atomicSignalFence!(memory_order.memory_order_seq_cst);
+ break;
+ }
+}
+
+///
+unittest
+{
+ atomic_signal_fence_impl(memory_order.memory_order_seq_cst);
+}
+
+/// Don't allow reordering, and emit a fence instruction.
+pragma(inline, true)
+void atomic_thread_fence_impl()(memory_order order)
+{
+ final switch (order)
+ {
+ case memory_order.memory_order_relaxed:
+ atomicFence!(memory_order.memory_order_relaxed);
+ break;
+
+ case memory_order.memory_order_acquire:
+ atomicFence!(memory_order.memory_order_acquire);
+ break;
+
+ case memory_order.memory_order_release:
+ atomicFence!(memory_order.memory_order_release);
+ break;
+
+ case memory_order.memory_order_acq_rel:
+ atomicFence!(memory_order.memory_order_acq_rel);
+ break;
+
+ case memory_order.memory_order_seq_cst:
+ atomicFence!(memory_order.memory_order_seq_cst);
+ break;
+ }
+}
+
+///
+unittest
+{
+ atomic_thread_fence_impl(memory_order.memory_order_seq_cst);
+}
+
+///
+alias atomic_bool = shared(bool);
+///
+alias atomic_char = shared(char);
+///
+alias atomic_schar = shared(byte);
+///
+alias atomic_uchar = shared(ubyte);
+///
+alias atomic_short = shared(short);
+///
+alias atomic_ushort = shared(ushort);
+///
+alias atomic_int = shared(int);
+///
+alias atomic_uint = shared(uint);
+///
+alias atomic_long = shared(c_long);
+///
+alias atomic_ulong = shared(c_ulong);
+///
+alias atomic_llong = shared(long);
+///
+alias atomic_ullong = shared(ulong);
+///
+alias atomic_char8_t = shared(char);
+///
+alias atomic_char16_t = shared(wchar);
+///
+alias atomic_char32_t = shared(dchar);
+///
+alias atomic_wchar_t = shared(wchar);
+
+///
+alias atomic_int_least8_t = shared(int_least8_t);
+///
+alias atomic_uint_least8_t = shared(uint_least8_t);
+///
+alias atomic_int_least16_t = shared(int_least16_t);
+///
+alias atomic_uint_least16_t = shared(uint_least16_t);
+///
+alias atomic_int_least32_t = shared(int_least32_t);
+///
+alias atomic_uint_least32_t = shared(uint_least32_t);
+///
+alias atomic_int_least64_t = shared(int_least64_t);
+///
+alias atomic_uint_least64_t = shared(uint_least64_t);
+///
+alias atomic_int_fast8_t = shared(int_fast8_t);
+///
+alias atomic_uint_fast8_t = shared(uint_fast8_t);
+///
+alias atomic_int_fast16_t = shared(int_fast16_t);
+///
+alias atomic_uint_fast16_t = shared(uint_fast16_t);
+///
+alias atomic_int_fast32_t = shared(int_fast32_t);
+///
+alias atomic_uint_fast32_t = shared(uint_fast32_t);
+///
+alias atomic_int_fast64_t = shared(int_fast64_t);
+///
+alias atomic_uint_fast64_t = shared(uint_fast64_t);
+///
+alias atomic_intptr_t = shared(intptr_t);
+///
+alias atomic_uintptr_t = shared(uintptr_t);
+///
+alias atomic_size_t = shared(size_t);
+///
+alias atomic_ptrdiff_t = shared(ptrdiff_t);
+///
+alias atomic_intmax_t = shared(intmax_t);
+///
+alias atomic_uintmax_t = shared(uintmax_t);
+
+///
+pragma(inline, true)
+void atomic_store_impl(A, C)(shared(A)* obj, C desired) @trusted
+{
+ assert(obj !is null);
+ atomicStore(obj, cast(A)desired);
+}
+
+///
+unittest
+{
+ shared(int) obj;
+ atomic_store_impl(&obj, 3);
+}
+
+///
+pragma(inline, true)
+void atomic_store_explicit_impl(A, C)(shared(A)* obj, C desired, memory_order order) @trusted
+{
+ assert(obj !is null);
+
+ final switch (order)
+ {
+ case memory_order.memory_order_relaxed:
+ atomicStore!(memory_order.memory_order_relaxed)(obj, cast(A)desired);
+ break;
+
+ case memory_order.memory_order_acquire:
+ // Ideally this would error at compile time but alas it is not an intrinsic.
+ // Note: this is not a valid memory order for this operation.
+ atomicStore!(memory_order.memory_order_release)(obj, cast(A)desired);
+ break;
+
+ case memory_order.memory_order_release:
+ atomicStore!(memory_order.memory_order_release)(obj, cast(A)desired);
+ break;
+
+ case memory_order.memory_order_acq_rel:
+ atomicStore!(memory_order.memory_order_acq_rel)(obj, cast(A)desired);
+ break;
+
+ case memory_order.memory_order_seq_cst:
+ atomicStore!(memory_order.memory_order_seq_cst)(obj, cast(A)desired);
+ break;
+ }
+}
+
+///
+unittest
+{
+ shared(int) obj;
+ atomic_store_explicit_impl(&obj, 3, memory_order.memory_order_seq_cst);
+}
+
+///
+pragma(inline, true)
+A atomic_load_impl(A)(const shared(A)* obj) @trusted
+{
+ assert(obj !is null);
+ return atomicLoad(cast(shared(A)*)obj);
+}
+
+///
+unittest
+{
+ shared(int) obj = 3;
+ assert(atomic_load_impl(&obj) == 3);
+}
+
+///
+pragma(inline, true)
+A atomic_load_explicit_impl(A)(const shared(A)* obj, memory_order order) @trusted
+{
+ assert(obj !is null);
+
+ final switch (order)
+ {
+ case memory_order.memory_order_relaxed:
+ return atomicLoad!(memory_order.memory_order_relaxed)(obj);
+
+ case memory_order.memory_order_acquire:
+ return atomicLoad!(memory_order.memory_order_acquire)(obj);
+
+ case memory_order.memory_order_release:
+ // Ideally this would error at compile time but alas it is not an intrinsic.
+ // Note: this is not a valid memory order for this operation.
+ return atomicLoad!(memory_order.memory_order_acquire)(obj);
+
+ case memory_order.memory_order_acq_rel:
+ return atomicLoad!(memory_order.memory_order_acq_rel)(obj);
+
+ case memory_order.memory_order_seq_cst:
+ return atomicLoad!(memory_order.memory_order_seq_cst)(obj);
+ }
+}
+
+///
+unittest
+{
+ shared(int) obj = 3;
+ assert(atomic_load_explicit_impl(&obj, memory_order.memory_order_seq_cst) == 3);
+}
+
+///
+pragma(inline, true)
+A atomic_exchange_impl(A, C)(shared(A)* obj, C desired) @trusted
+{
+ assert(obj !is null);
+ return atomicExchange(cast(shared(A)*)obj, cast(A)desired);
+}
+
+///
+unittest
+{
+ shared(int) obj = 3;
+ assert(atomic_exchange_impl(&obj, 2) == 3);
+}
+
+///
+pragma(inline, true)
+A atomic_exchange_explicit_impl(A, C)(shared(A)* obj, C desired, memory_order order) @trusted
+{
+ assert(obj !is null);
+
+ final switch (order)
+ {
+ case memory_order.memory_order_relaxed:
+ return atomicExchange!(memory_order.memory_order_relaxed)(obj, cast(A)desired);
+
+ case memory_order.memory_order_acquire:
+ return atomicExchange!(memory_order.memory_order_acquire)(obj, cast(A)desired);
+
+ case memory_order.memory_order_release:
+ return atomicExchange!(memory_order.memory_order_release)(obj, cast(A)desired);
+
+ case memory_order.memory_order_acq_rel:
+ return atomicExchange!(memory_order.memory_order_acq_rel)(obj, cast(A)desired);
+
+ case memory_order.memory_order_seq_cst:
+ return atomicExchange!(memory_order.memory_order_seq_cst)(obj, cast(A)desired);
+ }
+}
+
+///
+unittest
+{
+ shared(int) obj = 3;
+ assert(atomic_exchange_explicit_impl(&obj, 2, memory_order.memory_order_seq_cst) == 3);
+}
+
+///
+pragma(inline, true)
+bool atomic_compare_exchange_strong_impl(A, C)(shared(A)* obj, A* expected, C desired) @trusted
+{
+ return atomicCompareExchangeStrong(cast(A*)obj, expected, cast(A)desired);
+}
+
+///
+unittest
+{
+ shared(int) obj = 3;
+ int expected = 3;
+ assert(atomic_compare_exchange_strong_impl(&obj, &expected, 2));
+}
+
+///
+pragma(inline, true)
+bool atomic_compare_exchange_weak_impl(A, C)(shared(A)* obj, A* expected, C desired) @trusted
+{
+ return atomicCompareExchangeStrong(cast(A*)obj, expected, cast(A)desired);
+}
+
+///
+unittest
+{
+ shared(int) obj = 3;
+ int expected = 3;
+ static assert(__traits(compiles, {atomic_compare_exchange_weak_impl(&obj, &expected, 2);}));
+}
+
+///
+pragma(inline, true)
+bool atomic_compare_exchange_strong_explicit_impl(A, C)(shared(A)* obj, A* expected, C desired, memory_order succ, memory_order fail) @trusted
+{
+ assert(obj !is null);
+ // We use these giant switch inside switch statements
+ // because as of 2023 they are capable of being for the most part inlined by gdc & ldc when using literal arguments for memory_order.
+
+ final switch(succ)
+ {
+ case memory_order.memory_order_relaxed:
+ final switch(fail)
+ {
+ case memory_order.memory_order_relaxed:
+ return atomicCompareExchangeStrong!(memory_order.memory_order_relaxed, memory_order.memory_order_relaxed)(cast(A*)obj, expected, cast(A)desired);
+ case memory_order.memory_order_acquire:
+ return atomicCompareExchangeStrong!(memory_order.memory_order_relaxed, memory_order.memory_order_acquire)(cast(A*)obj, expected, cast(A)desired);
+ case memory_order.memory_order_release:
+ return atomicCompareExchangeStrong!(memory_order.memory_order_relaxed, memory_order.memory_order_release)(cast(A*)obj, expected, cast(A)desired);
+ case memory_order.memory_order_acq_rel:
+ return atomicCompareExchangeStrong!(memory_order.memory_order_relaxed, memory_order.memory_order_acq_rel)(cast(A*)obj, expected, cast(A)desired);
+ case memory_order.memory_order_seq_cst:
+ return atomicCompareExchangeStrong!(memory_order.memory_order_relaxed, memory_order.memory_order_seq_cst)(cast(A*)obj, expected, cast(A)desired);
+ }
+ case memory_order.memory_order_acquire:
+ final switch(fail)
+ {
+ case memory_order.memory_order_relaxed:
+ return atomicCompareExchangeStrong!(memory_order.memory_order_acquire, memory_order.memory_order_relaxed)(cast(A*)obj, expected, cast(A)desired);
+ case memory_order.memory_order_acquire:
+ return atomicCompareExchangeStrong!(memory_order.memory_order_acquire, memory_order.memory_order_acquire)(cast(A*)obj, expected, cast(A)desired);
+ case memory_order.memory_order_release:
+ return atomicCompareExchangeStrong!(memory_order.memory_order_acquire, memory_order.memory_order_release)(cast(A*)obj, expected, cast(A)desired);
+ case memory_order.memory_order_acq_rel:
+ return atomicCompareExchangeStrong!(memory_order.memory_order_acquire, memory_order.memory_order_acq_rel)(cast(A*)obj, expected, cast(A)desired);
+ case memory_order.memory_order_seq_cst:
+ return atomicCompareExchangeStrong!(memory_order.memory_order_acquire, memory_order.memory_order_seq_cst)(cast(A*)obj, expected, cast(A)desired);
+ }
+ case memory_order.memory_order_release:
+ final switch(fail)
+ {
+ case memory_order.memory_order_relaxed:
+ return atomicCompareExchangeStrong!(memory_order.memory_order_release, memory_order.memory_order_relaxed)(cast(A*)obj, expected, cast(A)desired);
+ case memory_order.memory_order_acquire:
+ return atomicCompareExchangeStrong!(memory_order.memory_order_release, memory_order.memory_order_acquire)(cast(A*)obj, expected, cast(A)desired);
+ case memory_order.memory_order_release:
+ return atomicCompareExchangeStrong!(memory_order.memory_order_release, memory_order.memory_order_release)(cast(A*)obj, expected, cast(A)desired);
+ case memory_order.memory_order_acq_rel:
+ return atomicCompareExchangeStrong!(memory_order.memory_order_release, memory_order.memory_order_acq_rel)(cast(A*)obj, expected, cast(A)desired);
+ case memory_order.memory_order_seq_cst:
+ return atomicCompareExchangeStrong!(memory_order.memory_order_release, memory_order.memory_order_seq_cst)(cast(A*)obj, expected, cast(A)desired);
+ }
+ case memory_order.memory_order_acq_rel:
+ final switch(fail)
+ {
+ case memory_order.memory_order_relaxed:
+ return atomicCompareExchangeStrong!(memory_order.memory_order_acq_rel, memory_order.memory_order_relaxed)(cast(A*)obj, expected, cast(A)desired);
+ case memory_order.memory_order_acquire:
+ return atomicCompareExchangeStrong!(memory_order.memory_order_acq_rel, memory_order.memory_order_acquire)(cast(A*)obj, expected, cast(A)desired);
+ case memory_order.memory_order_release:
+ return atomicCompareExchangeStrong!(memory_order.memory_order_acq_rel, memory_order.memory_order_release)(cast(A*)obj, expected, cast(A)desired);
+ case memory_order.memory_order_acq_rel:
+ return atomicCompareExchangeStrong!(memory_order.memory_order_acq_rel, memory_order.memory_order_acq_rel)(cast(A*)obj, expected, cast(A)desired);
+ case memory_order.memory_order_seq_cst:
+ return atomicCompareExchangeStrong!(memory_order.memory_order_acq_rel, memory_order.memory_order_seq_cst)(cast(A*)obj, expected, cast(A)desired);
+ }
+ case memory_order.memory_order_seq_cst:
+ final switch(fail)
+ {
+ case memory_order.memory_order_relaxed:
+ return atomicCompareExchangeStrong!(memory_order.memory_order_seq_cst, memory_order.memory_order_relaxed)(cast(A*)obj, expected, cast(A)desired);
+ case memory_order.memory_order_acquire:
+ return atomicCompareExchangeStrong!(memory_order.memory_order_seq_cst, memory_order.memory_order_acquire)(cast(A*)obj, expected, cast(A)desired);
+ case memory_order.memory_order_release:
+ return atomicCompareExchangeStrong!(memory_order.memory_order_seq_cst, memory_order.memory_order_release)(cast(A*)obj, expected, cast(A)desired);
+ case memory_order.memory_order_acq_rel:
+ return atomicCompareExchangeStrong!(memory_order.memory_order_seq_cst, memory_order.memory_order_acq_rel)(cast(A*)obj, expected, cast(A)desired);
+ case memory_order.memory_order_seq_cst:
+ return atomicCompareExchangeStrong!(memory_order.memory_order_seq_cst, memory_order.memory_order_seq_cst)(cast(A*)obj, expected, cast(A)desired);
+ }
+ }
+}
+
+///
+unittest
+{
+ shared(int) obj = 3;
+ int expected = 3;
+ assert(atomic_compare_exchange_strong_explicit_impl(&obj, &expected, 2, memory_order.memory_order_seq_cst, memory_order.memory_order_seq_cst));
+}
+
+///
+pragma(inline, true)
+bool atomic_compare_exchange_weak_explicit_impl(A, C)(shared(A)* obj, A* expected, C desired, memory_order succ, memory_order fail) @trusted
+{
+ assert(obj !is null);
+ // We use these giant switch inside switch statements
+ // because as of 2023 they are capable of being for the most part inlined by gdc & ldc when using literal arguments for memory_order.
+
+ final switch(succ)
+ {
+ case memory_order.memory_order_relaxed:
+ final switch(fail)
+ {
+ case memory_order.memory_order_relaxed:
+ return atomicCompareExchangeWeak!(memory_order.memory_order_relaxed, memory_order.memory_order_relaxed)(cast(A*)obj, expected, cast(A)desired);
+ case memory_order.memory_order_acquire:
+ return atomicCompareExchangeWeak!(memory_order.memory_order_relaxed, memory_order.memory_order_relaxed)(cast(A*)obj, expected, cast(A)desired);
+ case memory_order.memory_order_release:
+ return atomicCompareExchangeWeak!(memory_order.memory_order_relaxed, memory_order.memory_order_relaxed)(cast(A*)obj, expected, cast(A)desired);
+ case memory_order.memory_order_acq_rel:
+ return atomicCompareExchangeWeak!(memory_order.memory_order_relaxed, memory_order.memory_order_relaxed)(cast(A*)obj, expected, cast(A)desired);
+ case memory_order.memory_order_seq_cst:
+ return atomicCompareExchangeWeak!(memory_order.memory_order_relaxed, memory_order.memory_order_relaxed)(cast(A*)obj, expected, cast(A)desired);
+ }
+ case memory_order.memory_order_acquire:
+ final switch(fail)
+ {
+ case memory_order.memory_order_relaxed:
+ return atomicCompareExchangeWeak!(memory_order.memory_order_acquire, memory_order.memory_order_relaxed)(cast(A*)obj, expected, cast(A)desired);
+ case memory_order.memory_order_acquire:
+ return atomicCompareExchangeWeak!(memory_order.memory_order_acquire, memory_order.memory_order_acquire)(cast(A*)obj, expected, cast(A)desired);
+ case memory_order.memory_order_release:
+ return atomicCompareExchangeWeak!(memory_order.memory_order_acquire, memory_order.memory_order_release)(cast(A*)obj, expected, cast(A)desired);
+ case memory_order.memory_order_acq_rel:
+ return atomicCompareExchangeWeak!(memory_order.memory_order_acquire, memory_order.memory_order_acq_rel)(cast(A*)obj, expected, cast(A)desired);
+ case memory_order.memory_order_seq_cst:
+ return atomicCompareExchangeWeak!(memory_order.memory_order_acquire, memory_order.memory_order_seq_cst)(cast(A*)obj, expected, cast(A)desired);
+ }
+ case memory_order.memory_order_release:
+ final switch(fail)
+ {
+ case memory_order.memory_order_relaxed:
+ return atomicCompareExchangeWeak!(memory_order.memory_order_release, memory_order.memory_order_relaxed)(cast(A*)obj, expected, cast(A)desired);
+ case memory_order.memory_order_acquire:
+ return atomicCompareExchangeWeak!(memory_order.memory_order_release, memory_order.memory_order_acquire)(cast(A*)obj, expected, cast(A)desired);
+ case memory_order.memory_order_release:
+ return atomicCompareExchangeWeak!(memory_order.memory_order_release, memory_order.memory_order_release)(cast(A*)obj, expected, cast(A)desired);
+ case memory_order.memory_order_acq_rel:
+ return atomicCompareExchangeWeak!(memory_order.memory_order_release, memory_order.memory_order_acq_rel)(cast(A*)obj, expected, cast(A)desired);
+ case memory_order.memory_order_seq_cst:
+ return atomicCompareExchangeWeak!(memory_order.memory_order_release, memory_order.memory_order_seq_cst)(cast(A*)obj, expected, cast(A)desired);
+ }
+ case memory_order.memory_order_acq_rel:
+ final switch(fail)
+ {
+ case memory_order.memory_order_relaxed:
+ return atomicCompareExchangeWeak!(memory_order.memory_order_acq_rel, memory_order.memory_order_relaxed)(cast(A*)obj, expected, cast(A)desired);
+ case memory_order.memory_order_acquire:
+ return atomicCompareExchangeWeak!(memory_order.memory_order_acq_rel, memory_order.memory_order_acquire)(cast(A*)obj, expected, cast(A)desired);
+ case memory_order.memory_order_release:
+ return atomicCompareExchangeWeak!(memory_order.memory_order_acq_rel, memory_order.memory_order_release)(cast(A*)obj, expected, cast(A)desired);
+ case memory_order.memory_order_acq_rel:
+ return atomicCompareExchangeWeak!(memory_order.memory_order_acq_rel, memory_order.memory_order_acq_rel)(cast(A*)obj, expected, cast(A)desired);
+ case memory_order.memory_order_seq_cst:
+ return atomicCompareExchangeWeak!(memory_order.memory_order_acq_rel, memory_order.memory_order_seq_cst)(cast(A*)obj, expected, cast(A)desired);
+ }
+ case memory_order.memory_order_seq_cst:
+ final switch(fail)
+ {
+ case memory_order.memory_order_relaxed:
+ return atomicCompareExchangeWeak!(memory_order.memory_order_seq_cst, memory_order.memory_order_relaxed)(cast(A*)obj, expected, cast(A)desired);
+ case memory_order.memory_order_acquire:
+ return atomicCompareExchangeWeak!(memory_order.memory_order_seq_cst, memory_order.memory_order_acquire)(cast(A*)obj, expected, cast(A)desired);
+ case memory_order.memory_order_release:
+ return atomicCompareExchangeWeak!(memory_order.memory_order_seq_cst, memory_order.memory_order_release)(cast(A*)obj, expected, cast(A)desired);
+ case memory_order.memory_order_acq_rel:
+ return atomicCompareExchangeWeak!(memory_order.memory_order_seq_cst, memory_order.memory_order_acq_rel)(cast(A*)obj, expected, cast(A)desired);
+ case memory_order.memory_order_seq_cst:
+ return atomicCompareExchangeWeak!(memory_order.memory_order_seq_cst, memory_order.memory_order_seq_cst)(cast(A*)obj, expected, cast(A)desired);
+ }
+ }
+}
+
+///
+unittest
+{
+ shared(int) obj = 3;
+ int expected = 3;
+ atomic_compare_exchange_weak_explicit_impl(&obj, &expected, 2, memory_order.memory_order_seq_cst, memory_order.memory_order_seq_cst);
+}
+
+///
+pragma(inline, true)
+A atomic_fetch_add_impl(A, M)(shared(A)* obj, M arg) @trusted
+{
+ assert(obj !is null);
+ return atomicFetchAdd(cast(A*)obj, arg);
+}
+
+///
+unittest
+{
+ shared(int) val;
+ atomic_fetch_add_impl(&val, 3);
+ assert(atomic_load_impl(&val) == 3);
+}
+
+pragma(inline, true)
+A atomic_fetch_sub_impl(A, M)(shared(A)* obj, M arg) @trusted
+{
+ assert(obj !is null);
+ return atomicFetchSub(cast(A*)obj, arg);
+}
+
+///
+unittest
+{
+ shared(int) val = 3;
+ atomic_fetch_sub_impl(&val, 3);
+ assert(atomic_load_impl(&val) == 0);
+}
+
+///
+pragma(inline, true)
+A atomic_fetch_add_explicit_impl(A, M)(shared(A)* obj, M arg, memory_order order) @trusted
+{
+ assert(obj !is null);
+
+ final switch(order)
+ {
+ case memory_order.memory_order_relaxed:
+ return atomicFetchAdd!(memory_order.memory_order_relaxed)(cast(A*)obj, arg);
+ case memory_order.memory_order_acquire:
+ return atomicFetchAdd!(memory_order.memory_order_acquire)(cast(A*)obj, arg);
+ case memory_order.memory_order_release:
+ return atomicFetchAdd!(memory_order.memory_order_release)(cast(A*)obj, arg);
+ case memory_order.memory_order_acq_rel:
+ return atomicFetchAdd!(memory_order.memory_order_acq_rel)(cast(A*)obj, arg);
+ case memory_order.memory_order_seq_cst:
+ return atomicFetchAdd!(memory_order.memory_order_seq_cst)(cast(A*)obj, arg);
+ }
+}
+
+///
+unittest
+{
+ shared(int) val;
+ atomic_fetch_add_explicit_impl(&val, 3, memory_order.memory_order_seq_cst);
+ assert(atomic_load_impl(&val) == 3);
+}
+
+///
+pragma(inline, true)
+A atomic_fetch_sub_explicit_impl(A, M)(shared(A)* obj, M arg, memory_order order) @trusted
+{
+ assert(obj !is null);
+
+ final switch(order)
+ {
+ case memory_order.memory_order_relaxed:
+ return atomicFetchSub!(memory_order.memory_order_relaxed)(cast(A*)obj, arg);
+ case memory_order.memory_order_acquire:
+ return atomicFetchSub!(memory_order.memory_order_acquire)(cast(A*)obj, arg);
+ case memory_order.memory_order_release:
+ return atomicFetchSub!(memory_order.memory_order_release)(cast(A*)obj, arg);
+ case memory_order.memory_order_acq_rel:
+ return atomicFetchSub!(memory_order.memory_order_acq_rel)(cast(A*)obj, arg);
+ case memory_order.memory_order_seq_cst:
+ return atomicFetchSub!(memory_order.memory_order_seq_cst)(cast(A*)obj, arg);
+ }
+}
+
+///
+unittest
+{
+ shared(int) val = 3;
+ atomic_fetch_sub_explicit_impl(&val, 3, memory_order.memory_order_seq_cst);
+ assert(atomic_load_impl(&val) == 0);
+}
+
+///
+pragma(inline, true)
+A atomic_fetch_or_impl(A, M)(shared(A)* obj, M arg) @trusted
+{
+ assert(obj !is null);
+
+ // copied from atomicOp
+
+ A set, get = atomicLoad(cast(A*)obj);
+
+ do
+ {
+ set = get | arg;
+ } while (!atomicCompareExchangeWeak!(memory_order.memory_order_seq_cst, memory_order.memory_order_seq_cst)(cast(A*)obj, &get, cast(A)set));
+
+ return get;
+}
+
+///
+unittest
+{
+ shared(int) val = 5;
+ atomic_fetch_or_impl(&val, 3);
+ assert(atomic_load_impl(&val) == 7);
+}
+
+///
+pragma(inline, true)
+A atomic_fetch_or_explicit_impl(A, M)(shared(A)* obj, M arg, memory_order order) @trusted
+{
+ assert(obj !is null);
+
+ A set, get;
+
+ final switch(order)
+ {
+ case memory_order.memory_order_relaxed:
+ get = atomicLoad!(memory_order.memory_order_relaxed)(cast(A*)obj);
+ do
+ {
+ set = get | arg;
+ } while (!atomicCompareExchangeWeak!(memory_order.memory_order_relaxed, memory_order.memory_order_relaxed)(cast(A*)obj, &get, cast(A)set));
+ break;
+
+ case memory_order.memory_order_acquire:
+ get = atomicLoad!(memory_order.memory_order_acquire)(cast(A*)obj);
+ do
+ {
+ set = get | arg;
+ } while (!atomicCompareExchangeWeak!(memory_order.memory_order_acquire, memory_order.memory_order_acquire)(cast(A*)obj, &get, cast(A)set));
+ break;
+
+ case memory_order.memory_order_release:
+ get = atomicLoad!(memory_order.memory_order_relaxed)(cast(A*)obj);
+ do
+ {
+ set = get | arg;
+ } while (!atomicCompareExchangeWeak!(memory_order.memory_order_release, memory_order.memory_order_release)(cast(A*)obj, &get, cast(A)set));
+ break;
+
+ case memory_order.memory_order_acq_rel:
+ get = atomicLoad!(memory_order.memory_order_acq_rel)(cast(A*)obj);
+ do
+ {
+ set = get | arg;
+ } while (!atomicCompareExchangeWeak!(memory_order.memory_order_acq_rel, memory_order.memory_order_acq_rel)(cast(A*)obj, &get, cast(A)set));
+ break;
+
+ case memory_order.memory_order_seq_cst:
+ get = atomicLoad!(memory_order.memory_order_relaxed)(cast(A*)obj);
+ do
+ {
+ set = get | arg;
+ } while (!atomicCompareExchangeWeak!(memory_order.memory_order_seq_cst, memory_order.memory_order_seq_cst)(cast(A*)obj, &get, cast(A)set));
+ break;
+ }
+
+ return get;
+}
+
+///
+unittest
+{
+ shared(int) val = 5;
+ atomic_fetch_or_explicit_impl(&val, 3, memory_order.memory_order_seq_cst);
+ assert(atomic_load_impl(&val) == 7);
+}
+
+///
+pragma(inline, true)
+A atomic_fetch_xor_impl(A, M)(shared(A)* obj, M arg) @trusted
+{
+ assert(obj !is null);
+
+ // copied from atomicOp
+
+ A set, get = atomicLoad(cast(A*)obj);
+
+ do
+ {
+ set = get ^ arg;
+ } while (!atomicCompareExchangeWeak!(memory_order.memory_order_seq_cst, memory_order.memory_order_seq_cst)(cast(A*)obj, &get, cast(A)set));
+
+ return get;
+}
+
+///
+unittest
+{
+ shared(int) val = 5;
+ atomic_fetch_xor_impl(&val, 3);
+ assert(atomic_load_impl(&val) == 6);
+}
+
+///
+pragma(inline, true)
+A atomic_fetch_xor_explicit_impl(A, M)(shared(A)* obj, M arg, memory_order order) @trusted
+{
+ assert(obj !is null);
+
+ A set, get;
+
+ final switch(order)
+ {
+ case memory_order.memory_order_relaxed:
+ get = atomicLoad!(memory_order.memory_order_relaxed)(cast(A*)obj);
+ do
+ {
+ set = get ^ arg;
+ } while (!atomicCompareExchangeWeak!(memory_order.memory_order_relaxed, memory_order.memory_order_relaxed)(cast(A*)obj, &get, cast(A)set));
+ break;
+
+ case memory_order.memory_order_acquire:
+ get = atomicLoad!(memory_order.memory_order_acquire)(cast(A*)obj);
+ do
+ {
+ set = get ^ arg;
+ } while (!atomicCompareExchangeWeak!(memory_order.memory_order_acquire, memory_order.memory_order_acquire)(cast(A*)obj, &get, cast(A)set));
+ break;
+
+ case memory_order.memory_order_release:
+ get = atomicLoad!(memory_order.memory_order_relaxed)(cast(A*)obj);
+ do
+ {
+ set = get ^ arg;
+ } while (!atomicCompareExchangeWeak!(memory_order.memory_order_release, memory_order.memory_order_release)(cast(A*)obj, &get, cast(A)set));
+ break;
+
+ case memory_order.memory_order_acq_rel:
+ get = atomicLoad!(memory_order.memory_order_acq_rel)(cast(A*)obj);
+ do
+ {
+ set = get ^ arg;
+ } while (!atomicCompareExchangeWeak!(memory_order.memory_order_acq_rel, memory_order.memory_order_acq_rel)(cast(A*)obj, &get, cast(A)set));
+ break;
+
+ case memory_order.memory_order_seq_cst:
+ get = atomicLoad!(memory_order.memory_order_relaxed)(cast(A*)obj);
+ do
+ {
+ set = get ^ arg;
+ } while (!atomicCompareExchangeWeak!(memory_order.memory_order_seq_cst, memory_order.memory_order_seq_cst)(cast(A*)obj, &get, cast(A)set));
+ break;
+ }
+
+ return get;
+}
+
+///
+unittest
+{
+ shared(int) val = 5;
+ atomic_fetch_xor_explicit_impl(&val, 3, memory_order.memory_order_seq_cst);
+ assert(atomic_load_impl(&val) == 6);
+}
+
+///
+pragma(inline, true)
+A atomic_fetch_and_impl(A, M)(shared(A)* obj, M arg) @trusted
+{
+ assert(obj !is null);
+
+ // copied from atomicOp
+
+ A set, get = atomicLoad(cast(A*)obj);
+
+ do
+ {
+ set = get & arg;
+ } while (!atomicCompareExchangeWeak!(memory_order.memory_order_seq_cst, memory_order.memory_order_seq_cst)(cast(A*)obj, &get, cast(A)set));
+
+ return get;
+}
+
+///
+unittest
+{
+ shared(int) val = 5;
+ atomic_fetch_and_impl(&val, 3);
+ assert(atomic_load_impl(&val) == 1);
+}
+
+///
+pragma(inline, true)
+A atomic_fetch_and_explicit_impl(A, M)(shared(A)* obj, M arg, memory_order order) @trusted
+{
+ assert(obj !is null);
+
+ A set, get;
+
+ final switch(order)
+ {
+ case memory_order.memory_order_relaxed:
+ get = atomicLoad!(memory_order.memory_order_relaxed)(cast(A*)obj);
+ do
+ {
+ set = get & arg;
+ } while (!atomicCompareExchangeWeak!(memory_order.memory_order_relaxed, memory_order.memory_order_relaxed)(cast(A*)obj, &get, cast(A)set));
+ break;
+
+ case memory_order.memory_order_acquire:
+ get = atomicLoad!(memory_order.memory_order_acquire)(cast(A*)obj);
+ do
+ {
+ set = get & arg;
+ } while (!atomicCompareExchangeWeak!(memory_order.memory_order_acquire, memory_order.memory_order_acquire)(cast(A*)obj, &get, cast(A)set));
+ break;
+
+ case memory_order.memory_order_release:
+ get = atomicLoad!(memory_order.memory_order_relaxed)(cast(A*)obj);
+ do
+ {
+ set = get & arg;
+ } while (!atomicCompareExchangeWeak!(memory_order.memory_order_release, memory_order.memory_order_release)(cast(A*)obj, &get, cast(A)set));
+ break;
+
+ case memory_order.memory_order_acq_rel:
+ get = atomicLoad!(memory_order.memory_order_acq_rel)(cast(A*)obj);
+ do
+ {
+ set = get & arg;
+ } while (!atomicCompareExchangeWeak!(memory_order.memory_order_acq_rel, memory_order.memory_order_acq_rel)(cast(A*)obj, &get, cast(A)set));
+ break;
+
+ case memory_order.memory_order_seq_cst:
+ get = atomicLoad!(memory_order.memory_order_relaxed)(cast(A*)obj);
+ do
+ {
+ set = get & arg;
+ } while (!atomicCompareExchangeWeak!(memory_order.memory_order_seq_cst, memory_order.memory_order_seq_cst)(cast(A*)obj, &get, cast(A)set));
+ break;
+ }
+
+ return get;
+}
+
+///
+unittest
+{
+ shared(int) val = 5;
+ atomic_fetch_and_explicit_impl(&val, 3, memory_order.memory_order_seq_cst);
+ assert(atomic_load_impl(&val) == 1);
+}
diff --git a/libphobos/libdruntime/core/thread/osthread.d b/libphobos/libdruntime/core/thread/osthread.d
index 066f39e..9ddc187 100644
--- a/libphobos/libdruntime/core/thread/osthread.d
+++ b/libphobos/libdruntime/core/thread/osthread.d
@@ -2129,6 +2129,13 @@ extern (C) void thread_init() @nogc nothrow
static extern(C) void initChildAfterFork()
{
auto thisThread = Thread.getThis();
+ if (!thisThread)
+ {
+ // It is possible that runtime was not properly initialized in the current process or thread -
+ // it may happen after `fork` call when using a dynamically loaded shared library written in D from a multithreaded non-D program.
+ // In such case getThis will return null.
+ return;
+ }
thisThread.m_addr = pthread_self();
assert( thisThread.m_addr != thisThread.m_addr.init );
thisThread.m_tmach = pthread_mach_thread_np( thisThread.m_addr );
diff --git a/libphobos/libdruntime/object.d b/libphobos/libdruntime/object.d
index 5589c0a..1b39a27 100644
--- a/libphobos/libdruntime/object.d
+++ b/libphobos/libdruntime/object.d
@@ -526,6 +526,12 @@ unittest
private extern(C) void _d_setSameMutex(shared Object ownee, shared Object owner) nothrow;
+/** Makes ownee use owner's mutex.
+ * This will initialize owner's mutex if it hasn't been set yet.
+ * Params:
+ * ownee = object to change
+ * owner = source object
+ */
void setSameMutex(shared Object ownee, shared Object owner)
{
import core.atomic : atomicLoad;