aboutsummaryrefslogtreecommitdiff
path: root/libphobos
diff options
context:
space:
mode:
Diffstat (limited to 'libphobos')
-rw-r--r--libphobos/libdruntime/MERGE2
-rw-r--r--libphobos/libdruntime/Makefile.am1
-rw-r--r--libphobos/libdruntime/Makefile.in4
-rw-r--r--libphobos/libdruntime/core/internal/array/arrayassign.d16
-rw-r--r--libphobos/libdruntime/core/internal/array/utils.d108
-rw-r--r--libphobos/libdruntime/core/internal/gc/blkcache.d243
-rw-r--r--libphobos/libdruntime/core/internal/gc/blockmeta.d209
-rw-r--r--libphobos/libdruntime/core/internal/gc/impl/conservative/gc.d23
-rw-r--r--libphobos/libdruntime/core/internal/traits.d212
-rw-r--r--libphobos/libdruntime/core/lifetime.d5
-rw-r--r--libphobos/libdruntime/core/memory.d22
-rw-r--r--libphobos/libdruntime/core/thread/osthread.d6
-rw-r--r--libphobos/libdruntime/core/thread/threadbase.d61
-rw-r--r--libphobos/libdruntime/rt/lifetime.d398
-rw-r--r--libphobos/libdruntime/rt/tlsgc.d16
-rw-r--r--libphobos/src/MERGE2
-rw-r--r--libphobos/src/std/algorithm/searching.d82
-rw-r--r--libphobos/src/std/array.d59
-rw-r--r--libphobos/src/std/bitmanip.d23
-rw-r--r--libphobos/src/std/container/dlist.d25
-rw-r--r--libphobos/src/std/conv.d11
-rw-r--r--libphobos/src/std/digest/package.d342
-rw-r--r--libphobos/src/std/format/internal/write.d33
-rw-r--r--libphobos/src/std/format/read.d115
-rw-r--r--libphobos/src/std/logger/core.d4
-rw-r--r--libphobos/src/std/logger/filelogger.d16
-rw-r--r--libphobos/src/std/logger/package.d2
-rw-r--r--libphobos/src/std/numeric.d25
-rw-r--r--libphobos/src/std/process.d11
-rw-r--r--libphobos/src/std/socket.d22
-rw-r--r--libphobos/src/std/sumtype.d182
-rw-r--r--libphobos/src/std/traits.d38
-rw-r--r--libphobos/src/std/typecons.d345
-rw-r--r--libphobos/src/std/windows/syserror.d1
34 files changed, 1945 insertions, 719 deletions
diff --git a/libphobos/libdruntime/MERGE b/libphobos/libdruntime/MERGE
index f660884..bfdc9ea 100644
--- a/libphobos/libdruntime/MERGE
+++ b/libphobos/libdruntime/MERGE
@@ -1,4 +1,4 @@
-2b89c2909de239bd603d6f36379658fe902667db
+82a5d2a7c4dd3d270537bcede2981e047bfd0e6a
The first line of this file holds the git revision number of the last
merge done from the dlang/dmd repository.
diff --git a/libphobos/libdruntime/Makefile.am b/libphobos/libdruntime/Makefile.am
index a20bf6b..7713c8c 100644
--- a/libphobos/libdruntime/Makefile.am
+++ b/libphobos/libdruntime/Makefile.am
@@ -188,6 +188,7 @@ DRUNTIME_DSOURCES = core/atomic.d core/attribute.d core/bitop.d \
core/internal/container/treap.d core/internal/convert.d \
core/internal/dassert.d core/internal/destruction.d \
core/internal/entrypoint.d core/internal/gc/bits.d \
+ core/internal/gc/blkcache.d core/internal/gc/blockmeta.d \
core/internal/gc/impl/conservative/gc.d \
core/internal/gc/impl/manual/gc.d core/internal/gc/impl/proto/gc.d \
core/internal/gc/os.d core/internal/gc/pooltable.d \
diff --git a/libphobos/libdruntime/Makefile.in b/libphobos/libdruntime/Makefile.in
index 53c197e..f4d5552 100644
--- a/libphobos/libdruntime/Makefile.in
+++ b/libphobos/libdruntime/Makefile.in
@@ -209,6 +209,7 @@ am__objects_1 = core/atomic.lo core/attribute.lo core/bitop.lo \
core/internal/container/treap.lo core/internal/convert.lo \
core/internal/dassert.lo core/internal/destruction.lo \
core/internal/entrypoint.lo core/internal/gc/bits.lo \
+ core/internal/gc/blkcache.lo core/internal/gc/blockmeta.lo \
core/internal/gc/impl/conservative/gc.lo \
core/internal/gc/impl/manual/gc.lo \
core/internal/gc/impl/proto/gc.lo core/internal/gc/os.lo \
@@ -868,6 +869,7 @@ DRUNTIME_DSOURCES = core/atomic.d core/attribute.d core/bitop.d \
core/internal/container/treap.d core/internal/convert.d \
core/internal/dassert.d core/internal/destruction.d \
core/internal/entrypoint.d core/internal/gc/bits.d \
+ core/internal/gc/blkcache.d core/internal/gc/blockmeta.d \
core/internal/gc/impl/conservative/gc.d \
core/internal/gc/impl/manual/gc.d core/internal/gc/impl/proto/gc.d \
core/internal/gc/os.d core/internal/gc/pooltable.d \
@@ -1265,6 +1267,8 @@ core/internal/gc/$(am__dirstamp):
@$(MKDIR_P) core/internal/gc
@: > core/internal/gc/$(am__dirstamp)
core/internal/gc/bits.lo: core/internal/gc/$(am__dirstamp)
+core/internal/gc/blkcache.lo: core/internal/gc/$(am__dirstamp)
+core/internal/gc/blockmeta.lo: core/internal/gc/$(am__dirstamp)
core/internal/gc/impl/conservative/$(am__dirstamp):
@$(MKDIR_P) core/internal/gc/impl/conservative
@: > core/internal/gc/impl/conservative/$(am__dirstamp)
diff --git a/libphobos/libdruntime/core/internal/array/arrayassign.d b/libphobos/libdruntime/core/internal/array/arrayassign.d
index 6e3c1fd..21690ca 100644
--- a/libphobos/libdruntime/core/internal/array/arrayassign.d
+++ b/libphobos/libdruntime/core/internal/array/arrayassign.d
@@ -347,7 +347,7 @@ Tarr _d_arraysetassign(Tarr : T[], T)(return scope Tarr to, scope ref T value) @
static if (__traits(isCopyable, T))
copyEmplace(value, dst);
else
- memcpy(cast(void*) &value, cast(void*) &dst, elemSize);
+ memcpy(cast(void*) &dst, cast(void*) &value, elemSize);
auto elem = cast(Unqual!T*) &tmp;
destroy(*elem);
}
@@ -395,6 +395,20 @@ Tarr _d_arraysetassign(Tarr : T[], T)(return scope Tarr to, scope ref T value) @
assert(arr == [S(1234), S(1234), S(1234), S(1234)]);
}
+// disabled copy constructor
+@safe unittest
+{
+ static struct S
+ {
+ int val;
+ @disable this(ref S);
+ }
+ S[1] arr;
+ S s = S(1234);
+ _d_arraysetassign(arr[], s);
+ assert(arr[0].val == 1234);
+}
+
// throwing and `nothrow`
@safe nothrow unittest
{
diff --git a/libphobos/libdruntime/core/internal/array/utils.d b/libphobos/libdruntime/core/internal/array/utils.d
index 89ce6ca..c45913d 100644
--- a/libphobos/libdruntime/core/internal/array/utils.d
+++ b/libphobos/libdruntime/core/internal/array/utils.d
@@ -263,111 +263,7 @@ void *__arrayStart()(return scope BlkInfo info) nothrow pure
*/
bool __setArrayAllocLength(T)(ref BlkInfo info, size_t newLength, bool isShared, size_t oldLength = ~0)
{
- import core.atomic;
import core.lifetime : TypeInfoSize;
-
- size_t typeInfoSize = TypeInfoSize!T;
-
- if (info.size <= 256)
- {
- import core.checkedint;
-
- bool overflow;
- auto newLengthPadded = addu(newLength,
- addu(SMALLPAD, typeInfoSize, overflow),
- overflow);
-
- if (newLengthPadded > info.size || overflow)
- // new size does not fit inside block
- return false;
-
- auto length = cast(ubyte *)(info.base + info.size - typeInfoSize - SMALLPAD);
- if (oldLength != ~0)
- {
- if (isShared)
- {
- return cas(cast(shared)length, cast(ubyte)oldLength, cast(ubyte)newLength);
- }
- else
- {
- if (*length == cast(ubyte)oldLength)
- *length = cast(ubyte)newLength;
- else
- return false;
- }
- }
- else
- {
- // setting the initial length, no cas needed
- *length = cast(ubyte)newLength;
- }
- if (typeInfoSize)
- {
- auto typeInfo = cast(TypeInfo*)(info.base + info.size - size_t.sizeof);
- *typeInfo = cast()typeid(T);
- }
- }
- else if (info.size < PAGESIZE)
- {
- if (newLength + MEDPAD + typeInfoSize > info.size)
- // new size does not fit inside block
- return false;
- auto length = cast(ushort *)(info.base + info.size - typeInfoSize - MEDPAD);
- if (oldLength != ~0)
- {
- if (isShared)
- {
- return cas(cast(shared)length, cast(ushort)oldLength, cast(ushort)newLength);
- }
- else
- {
- if (*length == oldLength)
- *length = cast(ushort)newLength;
- else
- return false;
- }
- }
- else
- {
- // setting the initial length, no cas needed
- *length = cast(ushort)newLength;
- }
- if (typeInfoSize)
- {
- auto typeInfo = cast(TypeInfo*)(info.base + info.size - size_t.sizeof);
- *typeInfo = cast()typeid(T);
- }
- }
- else
- {
- if (newLength + LARGEPAD > info.size)
- // new size does not fit inside block
- return false;
- auto length = cast(size_t *)(info.base);
- if (oldLength != ~0)
- {
- if (isShared)
- {
- return cas(cast(shared)length, cast(size_t)oldLength, cast(size_t)newLength);
- }
- else
- {
- if (*length == oldLength)
- *length = newLength;
- else
- return false;
- }
- }
- else
- {
- // setting the initial length, no cas needed
- *length = newLength;
- }
- if (typeInfoSize)
- {
- auto typeInfo = cast(TypeInfo*)(info.base + size_t.sizeof);
- *typeInfo = cast()typeid(T);
- }
- }
- return true; // resize succeeded
+ import core.internal.gc.blockmeta : __setArrayAllocLengthImpl;
+ return __setArrayAllocLengthImpl(info, newLength, isShared, typeid(T), oldLength, TypeInfoSize!T);
}
diff --git a/libphobos/libdruntime/core/internal/gc/blkcache.d b/libphobos/libdruntime/core/internal/gc/blkcache.d
new file mode 100644
index 0000000..c555c22
--- /dev/null
+++ b/libphobos/libdruntime/core/internal/gc/blkcache.d
@@ -0,0 +1,243 @@
+/**
+BlkInfo thread-local cache. Used for array appending in the conservative GC to avoid the lock when possible.
+
+Note: this used to be in rt.lifetime, but was moved here to allow GCs to take over array operations.
+*/
+module core.internal.gc.blkcache;
+
+import core.memory;
+import core.attribute;
+
+alias BlkInfo = GC.BlkInfo;
+alias BlkAttr = GC.BlkAttr;
+
+/**
+ cache for the lookup of the block info
+ */
+private enum N_CACHE_BLOCKS = 8;
+
+// note this is TLS, so no need to sync.
+BlkInfo *__blkcache_storage;
+
+static if (N_CACHE_BLOCKS == 1)
+{
+ version=single_cache;
+}
+else
+{
+ //version=simple_cache; // uncomment to test simple cache strategy
+ //version=random_cache; // uncomment to test random cache strategy
+
+ // ensure N_CACHE_BLOCKS is power of 2.
+ static assert(!((N_CACHE_BLOCKS - 1) & N_CACHE_BLOCKS));
+
+ version (random_cache)
+ {
+ int __nextRndNum = 0;
+ }
+ int __nextBlkIdx;
+}
+
+@property BlkInfo *__blkcache() nothrow
+{
+ if (!__blkcache_storage)
+ {
+ import core.stdc.stdlib;
+ import core.stdc.string;
+ import core.thread.threadbase;
+ auto tBase = ThreadBase.getThis();
+ if (tBase is null)
+ // if we don't have a thread object, this is a detached thread, and
+ // this won't be properly maintained by the GC.
+ return null;
+
+ // allocate the block cache for the first time
+ immutable size = BlkInfo.sizeof * N_CACHE_BLOCKS;
+ // use C alloc, because this may become a detached thread, and the GC
+ // would then clean up the cache without zeroing this pointer.
+ __blkcache_storage = cast(BlkInfo*) calloc(size, 1);
+ tBase.tlsGCData = __blkcache_storage;
+ }
+ return __blkcache_storage;
+}
+
+// free the allocation on thread exit.
+@standalone static ~this()
+{
+ if (__blkcache_storage)
+ {
+ import core.stdc.stdlib;
+ import core.thread.threadbase;
+ auto tBase = ThreadBase.getThis();
+ if (tBase !is null)
+ tBase.tlsGCData = null;
+ free(__blkcache_storage);
+ __blkcache_storage = null;
+ }
+}
+
+/**
+ * Indicates whether an address has been marked by the GC.
+ */
+enum IsMarked : int
+{
+ no, /// Address is not marked.
+ yes, /// Address is marked.
+ unknown, /// Address is not managed by the GC.
+}
+
+alias IsMarkedDg = IsMarked delegate(void* addr) nothrow; /// The isMarked callback function.
+
+// we expect this to be called with the lock in place
+void processGCMarks(void* data, scope IsMarkedDg isMarked) nothrow
+{
+ if (!data)
+ return;
+
+ auto cache = cast(BlkInfo*) data;
+ // called after the mark routine to eliminate block cache data when it
+ // might be ready to sweep
+
+ debug(PRINTF) printf("processing GC Marks, %x\n", cache);
+ debug(PRINTF) foreach (i; 0 .. N_CACHE_BLOCKS)
+ {
+ printf("cache entry %d has base ptr %x\tsize %d\tflags %x\n", i, cache[i].base, cache[i].size, cache[i].attr);
+ }
+ auto cache_end = cache + N_CACHE_BLOCKS;
+ for (;cache < cache_end; ++cache)
+ {
+ if (cache.base != null && isMarked(cache.base) == IsMarked.no)
+ {
+ debug(PRINTF) printf("clearing cache entry at %x\n", cache.base);
+ cache.base = null; // clear that data.
+ }
+ }
+}
+
+unittest
+{
+ // Bugzilla 10701 - segfault in GC
+ ubyte[] result; result.length = 4096;
+ GC.free(result.ptr);
+ GC.collect();
+}
+
+/**
+ Get the cached block info of an interior pointer. Returns null if the
+ interior pointer's block is not cached.
+
+ NOTE: The following note was not valid, but is retained for historical
+ purposes. The data cannot be cleared because the stack contains a
+ reference to the affected block (e.g. through `interior`). Therefore,
+ the element will not be collected, and the data will remain valid.
+
+ ORIGINAL: The base ptr in this struct can be cleared asynchronously by the GC,
+ so any use of the returned BlkInfo should copy it and then check the
+ base ptr of the copy before actually using it.
+ */
+BlkInfo *__getBlkInfo(void *interior) nothrow
+{
+ BlkInfo *ptr = __blkcache;
+ if (ptr is null)
+ // if for some reason we don't have a cache, return null.
+ return null;
+ version (single_cache)
+ {
+ if (ptr.base && ptr.base <= interior && (interior - ptr.base) < ptr.size)
+ return ptr;
+ return null; // not in cache.
+ }
+ else version (simple_cache)
+ {
+ foreach (i; 0..N_CACHE_BLOCKS)
+ {
+ if (ptr.base && ptr.base <= interior && (interior - ptr.base) < ptr.size)
+ return ptr;
+ ptr++;
+ }
+ }
+ else
+ {
+ // try to do a smart lookup, using __nextBlkIdx as the "head"
+ auto curi = ptr + __nextBlkIdx;
+ for (auto i = curi; i >= ptr; --i)
+ {
+ if (i.base && i.base <= interior && cast(size_t)(interior - i.base) < i.size)
+ return i;
+ }
+
+ for (auto i = ptr + N_CACHE_BLOCKS - 1; i > curi; --i)
+ {
+ if (i.base && i.base <= interior && cast(size_t)(interior - i.base) < i.size)
+ return i;
+ }
+ }
+ return null; // not in cache.
+}
+
+void __insertBlkInfoCache(BlkInfo bi, BlkInfo *curpos) nothrow
+{
+ auto cache = __blkcache;
+ if (cache is null)
+ // no cache to use.
+ return;
+
+ version (single_cache)
+ {
+ *cache = bi;
+ return;
+ }
+ else
+ {
+ version (simple_cache)
+ {
+ if (curpos)
+ *curpos = bi;
+ else
+ {
+ // note, this is a super-simple algorithm that does not care about
+ // most recently used. It simply uses a round-robin technique to
+ // cache block info. This means that the ordering of the cache
+ // doesn't mean anything. Certain patterns of allocation may
+ // render the cache near-useless.
+ cache[__nextBlkIdx] = bi;
+ __nextBlkIdx = (__nextBlkIdx+1) & (N_CACHE_BLOCKS - 1);
+ }
+ }
+ else version (random_cache)
+ {
+ // strategy: if the block currently is in the cache, move the
+ // current block index to the a random element and evict that
+ // element.
+ if (!curpos)
+ {
+ __nextBlkIdx = (__nextRndNum = 1664525 * __nextRndNum + 1013904223) & (N_CACHE_BLOCKS - 1);
+ curpos = cache + __nextBlkIdx;
+ }
+ else
+ {
+ __nextBlkIdx = curpos - cache;
+ }
+ *curpos = bi;
+ }
+ else
+ {
+ //
+ // strategy: If the block currently is in the cache, swap it with
+ // the head element. Otherwise, move the head element up by one,
+ // and insert it there.
+ //
+ if (!curpos)
+ {
+ __nextBlkIdx = (__nextBlkIdx+1) & (N_CACHE_BLOCKS - 1);
+ curpos = cache + __nextBlkIdx;
+ }
+ else if (curpos !is cache + __nextBlkIdx)
+ {
+ *curpos = cache[__nextBlkIdx];
+ curpos = cache + __nextBlkIdx;
+ }
+ *curpos = bi;
+ }
+ }
+}
diff --git a/libphobos/libdruntime/core/internal/gc/blockmeta.d b/libphobos/libdruntime/core/internal/gc/blockmeta.d
new file mode 100644
index 0000000..c7dfeb6
--- /dev/null
+++ b/libphobos/libdruntime/core/internal/gc/blockmeta.d
@@ -0,0 +1,209 @@
+/**
+ Functions to manipulate metadata in-block.
+
+ functionality was moved from rt.lifetime
+ */
+module core.internal.gc.blockmeta;
+
+import core.memory;
+
+alias BlkInfo = GC.BlkInfo;
+alias BlkAttr = GC.BlkAttr;
+
+enum : size_t
+{
+ PAGESIZE = 4096,
+ BIGLENGTHMASK = ~(PAGESIZE - 1),
+ SMALLPAD = 1,
+ MEDPAD = ushort.sizeof,
+ LARGEPREFIX = 16, // 16 bytes padding at the front of the array
+ LARGEPAD = LARGEPREFIX + 1,
+ MAXSMALLSIZE = 256-SMALLPAD,
+ MAXMEDSIZE = (PAGESIZE / 2) - MEDPAD
+}
+
+// size used to store the TypeInfo at the end of an allocation for structs that have a destructor
+size_t structTypeInfoSize(const TypeInfo ti) pure nothrow @nogc
+{
+ if (ti && typeid(ti) is typeid(TypeInfo_Struct)) // avoid a complete dynamic type cast
+ {
+ auto sti = cast(TypeInfo_Struct)cast(void*)ti;
+ if (sti.xdtor)
+ return size_t.sizeof;
+ }
+ return 0;
+}
+
+/**
+ Set the allocated length of the array block. This is called
+ any time an array is appended to or its length is set.
+
+ The allocated block looks like this for blocks < PAGESIZE:
+
+ |elem0|elem1|elem2|...|elemN-1|emptyspace|N*elemsize|
+
+
+ The size of the allocated length at the end depends on the block size:
+
+ a block of 16 to 256 bytes has an 8-bit length.
+
+ a block with 512 to pagesize/2 bytes has a 16-bit length.
+
+ For blocks >= pagesize, the length is a size_t and is at the beginning of the
+ block. The reason we have to do this is because the block can extend into
+ more pages, so we cannot trust the block length if it sits at the end of the
+ block, because it might have just been extended. If we can prove in the
+ future that the block is unshared, we may be able to change this, but I'm not
+ sure it's important.
+
+ In order to do put the length at the front, we have to provide 16 bytes
+ buffer space in case the block has to be aligned properly. In x86, certain
+ SSE instructions will only work if the data is 16-byte aligned. In addition,
+ we need the sentinel byte to prevent accidental pointers to the next block.
+ Because of the extra overhead, we only do this for page size and above, where
+ the overhead is minimal compared to the block size.
+
+ So for those blocks, it looks like:
+
+ |N*elemsize|padding|elem0|elem1|...|elemN-1|emptyspace|sentinelbyte|
+
+ where elem0 starts 16 bytes after the first byte.
+ */
+bool __setArrayAllocLength(ref BlkInfo info, size_t newlength, bool isshared, const TypeInfo tinext, size_t oldlength = ~0) pure nothrow
+{
+ size_t typeInfoSize = structTypeInfoSize(tinext);
+ return __setArrayAllocLengthImpl(info, newlength, isshared, tinext, oldlength, typeInfoSize);
+}
+
+// the impl function, used both above and in core.internal.array.utils
+bool __setArrayAllocLengthImpl(ref BlkInfo info, size_t newlength, bool isshared, const TypeInfo tinext, size_t oldlength, size_t typeInfoSize) pure nothrow
+{
+ import core.atomic;
+
+ if (info.size <= 256)
+ {
+ import core.checkedint;
+
+ bool overflow;
+ auto newlength_padded = addu(newlength,
+ addu(SMALLPAD, typeInfoSize, overflow),
+ overflow);
+
+ if (newlength_padded > info.size || overflow)
+ // new size does not fit inside block
+ return false;
+
+ auto length = cast(ubyte *)(info.base + info.size - typeInfoSize - SMALLPAD);
+ if (oldlength != ~0)
+ {
+ if (isshared)
+ {
+ return cas(cast(shared)length, cast(ubyte)oldlength, cast(ubyte)newlength);
+ }
+ else
+ {
+ if (*length == cast(ubyte)oldlength)
+ *length = cast(ubyte)newlength;
+ else
+ return false;
+ }
+ }
+ else
+ {
+ // setting the initial length, no cas needed
+ *length = cast(ubyte)newlength;
+ }
+ if (typeInfoSize)
+ {
+ auto typeInfo = cast(TypeInfo*)(info.base + info.size - size_t.sizeof);
+ *typeInfo = cast() tinext;
+ }
+ }
+ else if (info.size < PAGESIZE)
+ {
+ if (newlength + MEDPAD + typeInfoSize > info.size)
+ // new size does not fit inside block
+ return false;
+ auto length = cast(ushort *)(info.base + info.size - typeInfoSize - MEDPAD);
+ if (oldlength != ~0)
+ {
+ if (isshared)
+ {
+ return cas(cast(shared)length, cast(ushort)oldlength, cast(ushort)newlength);
+ }
+ else
+ {
+ if (*length == oldlength)
+ *length = cast(ushort)newlength;
+ else
+ return false;
+ }
+ }
+ else
+ {
+ // setting the initial length, no cas needed
+ *length = cast(ushort)newlength;
+ }
+ if (typeInfoSize)
+ {
+ auto typeInfo = cast(TypeInfo*)(info.base + info.size - size_t.sizeof);
+ *typeInfo = cast() tinext;
+ }
+ }
+ else
+ {
+ if (newlength + LARGEPAD > info.size)
+ // new size does not fit inside block
+ return false;
+ auto length = cast(size_t *)(info.base);
+ if (oldlength != ~0)
+ {
+ if (isshared)
+ {
+ return cas(cast(shared)length, cast(size_t)oldlength, cast(size_t)newlength);
+ }
+ else
+ {
+ if (*length == oldlength)
+ *length = newlength;
+ else
+ return false;
+ }
+ }
+ else
+ {
+ // setting the initial length, no cas needed
+ *length = newlength;
+ }
+ if (typeInfoSize)
+ {
+ auto typeInfo = cast(TypeInfo*)(info.base + size_t.sizeof);
+ *typeInfo = cast()tinext;
+ }
+ }
+ return true; // resize succeeded
+}
+
+/**
+ get the allocation size of the array for the given block (without padding or type info)
+ */
+size_t __arrayAllocLength(ref BlkInfo info, const TypeInfo tinext) pure nothrow
+{
+ if (info.size <= 256)
+ return *cast(ubyte *)(info.base + info.size - structTypeInfoSize(tinext) - SMALLPAD);
+
+ if (info.size < PAGESIZE)
+ return *cast(ushort *)(info.base + info.size - structTypeInfoSize(tinext) - MEDPAD);
+
+ return *cast(size_t *)(info.base);
+}
+
+/**
+ get the padding required to allocate size bytes. Note that the padding is
+ NOT included in the passed in size. Therefore, do NOT call this function
+ with the size of an allocated block.
+ */
+size_t __arrayPad(size_t size, const TypeInfo tinext) nothrow pure @trusted
+{
+ return size > MAXMEDSIZE ? LARGEPAD : ((size > MAXSMALLSIZE ? MEDPAD : SMALLPAD) + structTypeInfoSize(tinext));
+}
diff --git a/libphobos/libdruntime/core/internal/gc/impl/conservative/gc.d b/libphobos/libdruntime/core/internal/gc/impl/conservative/gc.d
index dd6f92a..149cc5d 100644
--- a/libphobos/libdruntime/core/internal/gc/impl/conservative/gc.d
+++ b/libphobos/libdruntime/core/internal/gc/impl/conservative/gc.d
@@ -41,6 +41,7 @@ import core.gc.gcinterface;
import core.internal.container.treap;
import core.internal.spinlock;
import core.internal.gc.pooltable;
+import core.internal.gc.blkcache;
import cstdlib = core.stdc.stdlib : calloc, free, malloc, realloc;
import core.stdc.string : memcpy, memset, memmove;
@@ -1426,7 +1427,7 @@ short[PAGESIZE / 16][Bins.B_NUMSMALL + 1] calcBinBase()
foreach (i, size; binsize)
{
- short end = (PAGESIZE / size) * size;
+ short end = cast(short) ((PAGESIZE / size) * size);
short bsz = size / 16;
foreach (off; 0..PAGESIZE/16)
{
@@ -2873,7 +2874,7 @@ struct Gcx
markProcPid = 0;
// process GC marks then sweep
thread_suspendAll();
- thread_processGCMarks(&isMarked);
+ thread_processTLSGCData(&clearBlkCacheData);
thread_resumeAll();
break;
case ChildStatus.running:
@@ -3108,7 +3109,7 @@ Lmark:
markAll!(markConservative!false)();
}
- thread_processGCMarks(&isMarked);
+ thread_processTLSGCData(&clearBlkCacheData);
thread_resumeAll();
isFinal = false;
}
@@ -3162,12 +3163,26 @@ Lmark:
}
/**
+ * Clear the block cache data if it exists, given the data which is the
+ * block info cache.
+ *
+ * Warning! This should only be called while the world is stopped inside
+ * the fullcollect function after all live objects have been marked, but
+ * before sweeping.
+ */
+ void *clearBlkCacheData(void* data) scope nothrow
+ {
+ processGCMarks(data, &isMarked);
+ return data;
+ }
+
+ /**
* Returns true if the addr lies within a marked block.
*
* Warning! This should only be called while the world is stopped inside
* the fullcollect function after all live objects have been marked, but before sweeping.
*/
- int isMarked(void *addr) scope nothrow
+ IsMarked isMarked(void *addr) scope nothrow
{
// first, we find the Pool this block is in, then check to see if the
// mark bit is clear.
diff --git a/libphobos/libdruntime/core/internal/traits.d b/libphobos/libdruntime/core/internal/traits.d
index f0d9ebc..0289808 100644
--- a/libphobos/libdruntime/core/internal/traits.d
+++ b/libphobos/libdruntime/core/internal/traits.d
@@ -267,8 +267,12 @@ template hasElaborateDestructor(S)
}
else static if (is(S == struct))
{
- enum hasElaborateDestructor = __traits(hasMember, S, "__dtor")
- || anySatisfy!(.hasElaborateDestructor, Fields!S);
+ // Once https://issues.dlang.org/show_bug.cgi?id=24865 is fixed, then
+ // this should be the implementation, but until that's fixed, we need the
+ // uncommented code.
+ // enum hasElaborateDestructor = __traits(hasMember, S, "__xdtor");
+
+ enum hasElaborateDestructor = hasDtor([__traits(allMembers, S)]);
}
else
{
@@ -276,6 +280,64 @@ template hasElaborateDestructor(S)
}
}
+private bool hasDtor(string[] members)
+{
+ foreach (name; members)
+ {
+ if (name == "__xdtor")
+ return true;
+ }
+
+ return false;
+}
+
+@safe unittest
+{
+ static struct NoDestructor {}
+ static assert(!hasElaborateDestructor!NoDestructor);
+ static assert(!hasElaborateDestructor!(NoDestructor[42]));
+ static assert(!hasElaborateDestructor!(NoDestructor[0]));
+ static assert(!hasElaborateDestructor!(NoDestructor[]));
+
+ static struct HasDestructor { ~this() {} }
+ static assert( hasElaborateDestructor!HasDestructor);
+ static assert( hasElaborateDestructor!(HasDestructor[42]));
+ static assert(!hasElaborateDestructor!(HasDestructor[0]));
+ static assert(!hasElaborateDestructor!(HasDestructor[]));
+
+ static struct HasDestructor2 { HasDestructor s; }
+ static assert( hasElaborateDestructor!HasDestructor2);
+ static assert( hasElaborateDestructor!(HasDestructor2[42]));
+ static assert(!hasElaborateDestructor!(HasDestructor2[0]));
+ static assert(!hasElaborateDestructor!(HasDestructor2[]));
+
+ static class HasFinalizer { ~this() {} }
+ static assert(!hasElaborateDestructor!HasFinalizer);
+
+ static struct HasUnion { union { HasDestructor s; } }
+ static assert(!hasElaborateDestructor!HasUnion);
+ static assert(!hasElaborateDestructor!(HasUnion[42]));
+ static assert(!hasElaborateDestructor!(HasUnion[0]));
+ static assert(!hasElaborateDestructor!(HasUnion[]));
+
+ static assert(!hasElaborateDestructor!int);
+ static assert(!hasElaborateDestructor!(int[0]));
+ static assert(!hasElaborateDestructor!(int[42]));
+ static assert(!hasElaborateDestructor!(int[]));
+}
+
+// https://issues.dlang.org/show_bug.cgi?id=24865
+@safe unittest
+{
+ static struct S2 { ~this() {} }
+ static struct S3 { S2 field; }
+ static struct S6 { S3[0] field; }
+
+ static assert( hasElaborateDestructor!S2);
+ static assert( hasElaborateDestructor!S3);
+ static assert(!hasElaborateDestructor!S6);
+}
+
// std.traits.hasElaborateCopyDestructor
template hasElaborateCopyConstructor(S)
{
@@ -302,7 +364,7 @@ template hasElaborateCopyConstructor(S)
this(int x, int y) {}
}
- static assert(hasElaborateCopyConstructor!S);
+ static assert( hasElaborateCopyConstructor!S);
static assert(!hasElaborateCopyConstructor!(S[0][1]));
static struct S2
@@ -320,7 +382,11 @@ template hasElaborateCopyConstructor(S)
this(int x, int y) {}
}
- static assert(hasElaborateCopyConstructor!S3);
+ static assert( hasElaborateCopyConstructor!S3);
+
+ static struct S4 { union { S s; } }
+
+ static assert(!hasElaborateCopyConstructor!S4);
}
template hasElaborateAssign(S)
@@ -332,8 +398,7 @@ template hasElaborateAssign(S)
else static if (is(S == struct))
{
enum hasElaborateAssign = is(typeof(S.init.opAssign(rvalueOf!S))) ||
- is(typeof(S.init.opAssign(lvalueOf!S))) ||
- anySatisfy!(.hasElaborateAssign, Fields!S);
+ is(typeof(S.init.opAssign(lvalueOf!S)));
}
else
{
@@ -341,17 +406,148 @@ template hasElaborateAssign(S)
}
}
+unittest
+{
+ {
+ static struct S {}
+ static assert(!hasElaborateAssign!S);
+ static assert(!hasElaborateAssign!(S[10]));
+ static assert(!hasElaborateAssign!(S[0]));
+ static assert(!hasElaborateAssign!(S[]));
+ }
+ {
+ static struct S { int i; }
+ static assert(!hasElaborateAssign!S);
+ static assert(!hasElaborateAssign!(S[10]));
+ static assert(!hasElaborateAssign!(S[0]));
+ static assert(!hasElaborateAssign!(S[]));
+ }
+ {
+ static struct S { void opAssign(S) {} }
+ static assert( hasElaborateAssign!S);
+ static assert( hasElaborateAssign!(S[10]));
+ static assert(!hasElaborateAssign!(S[0]));
+ static assert(!hasElaborateAssign!(S[]));
+ }
+ {
+ static struct S { void opAssign(ref S) {} }
+ static assert( hasElaborateAssign!S);
+ static assert( hasElaborateAssign!(S[10]));
+ static assert(!hasElaborateAssign!(S[0]));
+ static assert(!hasElaborateAssign!(S[]));
+ }
+ {
+ static struct S { void opAssign(int) {} }
+ static assert(!hasElaborateAssign!S);
+ static assert(!hasElaborateAssign!(S[10]));
+ static assert(!hasElaborateAssign!(S[0]));
+ static assert(!hasElaborateAssign!(S[]));
+ }
+ {
+ static struct S { this(this) {} }
+ static assert( hasElaborateAssign!S);
+ static assert( hasElaborateAssign!(S[10]));
+ static assert(!hasElaborateAssign!(S[0]));
+ static assert(!hasElaborateAssign!(S[]));
+ }
+ // https://issues.dlang.org/show_bug.cgi?id=24834
+ /+
+ {
+ static struct S { this(ref S) {} }
+ static assert( hasElaborateAssign!S);
+ static assert( hasElaborateAssign!(S[10]));
+ static assert(!hasElaborateAssign!(S[0]));
+ static assert(!hasElaborateAssign!(S[]));
+ }
+ +/
+ {
+ static struct S { ~this() {} }
+ static assert( hasElaborateAssign!S);
+ static assert( hasElaborateAssign!(S[10]));
+ static assert(!hasElaborateAssign!(S[0]));
+ static assert(!hasElaborateAssign!(S[]));
+ }
+ {
+ static struct S { @disable void opAssign(S); }
+ static assert(!hasElaborateAssign!S);
+ static assert(!hasElaborateAssign!(S[10]));
+ static assert(!hasElaborateAssign!(S[0]));
+ static assert(!hasElaborateAssign!(S[]));
+ }
+ {
+ static struct Member {}
+ static struct S { Member member; }
+ static assert(!hasElaborateAssign!S);
+ static assert(!hasElaborateAssign!(S[10]));
+ static assert(!hasElaborateAssign!(S[0]));
+ static assert(!hasElaborateAssign!(S[]));
+ }
+ {
+ static struct Member { void opAssign(Member) {} }
+ static struct S { Member member; }
+ static assert( hasElaborateAssign!S);
+ static assert( hasElaborateAssign!(S[10]));
+ static assert(!hasElaborateAssign!(S[0]));
+ static assert(!hasElaborateAssign!(S[]));
+ }
+ {
+ static struct Member {}
+ static struct S { Member member; void opAssign(S) {} }
+ static assert( hasElaborateAssign!S);
+ static assert( hasElaborateAssign!(S[10]));
+ static assert(!hasElaborateAssign!(S[0]));
+ static assert(!hasElaborateAssign!(S[]));
+ }
+ {
+ static struct Member { @disable void opAssign(Member); }
+ static struct S { Member member; }
+ static assert(!hasElaborateAssign!S);
+ static assert(!hasElaborateAssign!(S[10]));
+ static assert(!hasElaborateAssign!(S[0]));
+ static assert(!hasElaborateAssign!(S[]));
+ }
+ {
+ static struct Member { @disable void opAssign(Member); }
+ static struct S { Member member; void opAssign(S) {} }
+ static assert( hasElaborateAssign!S);
+ static assert( hasElaborateAssign!(S[10]));
+ static assert(!hasElaborateAssign!(S[0]));
+ static assert(!hasElaborateAssign!(S[]));
+ }
+ {
+ static struct Member { void opAssign(Member) {} }
+ static struct S { Member member; @disable void opAssign(S); }
+ static assert(!hasElaborateAssign!S);
+ static assert(!hasElaborateAssign!(S[10]));
+ static assert(!hasElaborateAssign!(S[0]));
+ static assert(!hasElaborateAssign!(S[]));
+ }
+ {
+ static struct Member { void opAssign(Member) {} }
+ static struct S { union { Member member; } }
+ static assert(!hasElaborateAssign!S);
+ static assert(!hasElaborateAssign!(S[10]));
+ static assert(!hasElaborateAssign!(S[0]));
+ static assert(!hasElaborateAssign!(S[]));
+ }
+
+ static assert(!hasElaborateAssign!int);
+ static assert(!hasElaborateAssign!(string[]));
+ static assert(!hasElaborateAssign!Object);
+}
+
template hasIndirections(T)
{
static if (is(T == struct) || is(T == union))
enum hasIndirections = anySatisfy!(.hasIndirections, typeof(T.tupleof));
+ else static if (__traits(isAssociativeArray, T) || is(T == class) || is(T == interface))
+ enum hasIndirections = true;
else static if (is(T == E[N], E, size_t N))
enum hasIndirections = T.sizeof && is(E == void) ? true : hasIndirections!(BaseElemOf!E);
else static if (isFunctionPointer!T)
enum hasIndirections = false;
else
- enum hasIndirections = isPointer!T || isDelegate!T || isDynamicArray!T ||
- __traits(isAssociativeArray, T) || is (T == class) || is(T == interface);
+ enum hasIndirections = isPointer!T || isDelegate!T || isDynamicArray!T;
}
template hasUnsharedIndirections(T)
diff --git a/libphobos/libdruntime/core/lifetime.d b/libphobos/libdruntime/core/lifetime.d
index 7010d2a..84ffdde 100644
--- a/libphobos/libdruntime/core/lifetime.d
+++ b/libphobos/libdruntime/core/lifetime.d
@@ -2739,8 +2739,11 @@ if (is(T == class))
auto init = __traits(initSymbol, T);
void* p;
- static if (__traits(getLinkage, T) == "Windows")
+ static if (__traits(isCOMClass, T))
{
+ // If this is a COM class we allocate it using malloc.
+ // This allows the reference counting to outlive the reference known about by the GC.
+
p = pureMalloc(init.length);
if (!p)
onOutOfMemoryError();
diff --git a/libphobos/libdruntime/core/memory.d b/libphobos/libdruntime/core/memory.d
index 001c315..63a3c2e 100644
--- a/libphobos/libdruntime/core/memory.d
+++ b/libphobos/libdruntime/core/memory.d
@@ -100,6 +100,10 @@
* License: $(LINK2 http://www.boost.org/LICENSE_1_0.txt, Boost License 1.0)
* Authors: Sean Kelly, Alex Rønne Petersen
* Source: $(DRUNTIMESRC core/_memory.d)
+ * Macros:
+ * WARN_UNINITIALIZED=$(RED Warning):
+ * $1 will be uninitialized, and may happen to hold pointers to GC memory.
+ * Consider zeroing out any uninitialized bytes which won't be immediately written to.
*/
module core.memory;
@@ -400,7 +404,7 @@ extern(D):
* a = A bit field containing any bits to set for this memory block.
*
* Returns:
- * The result of a call to getAttr after the specified bits have been
+ * The result of a call to $(LREF getAttr) after the specified bits have been
* set.
*/
static uint setAttr( const scope void* p, uint a ) nothrow
@@ -427,7 +431,7 @@ extern(D):
* a = A bit field containing any bits to clear for this memory block.
*
* Returns:
- * The result of a call to getAttr after the specified bits have been
+ * The result of a call to $(LREF getAttr) after the specified bits have been
* cleared.
*/
static uint clrAttr( const scope void* p, uint a ) nothrow
@@ -461,6 +465,8 @@ extern(C):
* A reference to the allocated memory or null if insufficient memory
* is available.
*
+ * $(WARN_UNINITIALIZED Allocated memory)
+ *
* Throws:
* OutOfMemoryError on allocation failure.
*/
@@ -472,7 +478,7 @@ extern(C):
/**
* Requests an aligned block of managed memory from the garbage collector.
- * This memory may be deleted at will with a call to free, or it may be
+ * This memory may be deleted at will with a call to $(LREF free), or it may be
* discarded and cleaned up automatically during a collection run. If
* allocation fails, this function will call onOutOfMemory which is
* expected to throw an OutOfMemoryError.
@@ -487,6 +493,8 @@ extern(C):
* Information regarding the allocated memory block or BlkInfo.init on
* error.
*
+ * $(WARN_UNINITIALIZED Allocated memory)
+ *
* Throws:
* OutOfMemoryError on allocation failure.
*/
@@ -564,6 +572,8 @@ extern(C):
* zero or the pointer does not point to the base of an GC allocated
* memory block.
*
+ * $(WARN_UNINITIALIZED Any extra bytes past the initial size)
+ *
* Throws:
* `OutOfMemoryError` on allocation failure.
*/
@@ -608,6 +618,8 @@ extern(C):
* The size in bytes of the extended memory block referenced by p or zero
* if no extension occurred.
*
+ * $(WARN_UNINITIALIZED Any extension bytes)
+ *
* Note:
* Extend may also be used to extend slices (or memory blocks with
* $(LREF APPENDABLE) info). However, use the return value only
@@ -669,7 +681,7 @@ extern(C):
* If p references memory not originally allocated by this garbage
* collector, if p points to the interior of a memory block, or if this
* method is called from a finalizer, no action will be taken. The block
- * will not be finalized regardless of whether the FINALIZE attribute is
+ * will not be finalized regardless of whether the $(LREF FINALIZE) attribute is
* set. If finalization is desired, call $(REF1 destroy, object) prior to `GC.free`.
*
* Params:
@@ -707,7 +719,7 @@ extern(D):
/**
* Returns the true size of the memory block referenced by p. This value
- * represents the maximum number of bytes for which a call to realloc may
+ * represents the maximum number of bytes for which a call to $(LREF realloc) may
* resize the existing block in place. If p references memory not
* originally allocated by this garbage collector, points to the interior
* of a memory block, or if p is null, zero will be returned.
diff --git a/libphobos/libdruntime/core/thread/osthread.d b/libphobos/libdruntime/core/thread/osthread.d
index cf93094..2379f79 100644
--- a/libphobos/libdruntime/core/thread/osthread.d
+++ b/libphobos/libdruntime/core/thread/osthread.d
@@ -1237,7 +1237,7 @@ private extern (D) ThreadBase attachThread(ThreadBase _thisThread) @nogc nothrow
atomicStore!(MemoryOrder.raw)(thisThread.toThread.m_isRunning, true);
}
thisThread.m_isDaemon = true;
- thisThread.tlsGCdataInit();
+ thisThread.tlsRTdataInit();
Thread.setThis( thisThread );
version (Darwin)
@@ -1312,7 +1312,7 @@ version (Windows)
if ( addr == GetCurrentThreadId() )
{
thisThread.m_hndl = GetCurrentThreadHandle();
- thisThread.tlsGCdataInit();
+ thisThread.tlsRTdataInit();
Thread.setThis( thisThread );
}
else
@@ -1320,7 +1320,7 @@ version (Windows)
thisThread.m_hndl = OpenThreadHandle( addr );
impersonate_thread(addr,
{
- thisThread.tlsGCdataInit();
+ thisThread.tlsRTdataInit();
Thread.setThis( thisThread );
});
}
diff --git a/libphobos/libdruntime/core/thread/threadbase.d b/libphobos/libdruntime/core/thread/threadbase.d
index cb13e9a..f385407 100644
--- a/libphobos/libdruntime/core/thread/threadbase.d
+++ b/libphobos/libdruntime/core/thread/threadbase.d
@@ -32,9 +32,6 @@ private
alias ScanDg = void delegate(void* pstart, void* pend) nothrow;
alias rt_tlsgc_scan =
externDFunc!("rt.tlsgc.scan", void function(void*, scope ScanDg) nothrow);
-
- alias rt_tlsgc_processGCMarks =
- externDFunc!("rt.tlsgc.processGCMarks", void function(void*, scope IsMarkedDg) nothrow);
}
@@ -131,9 +128,14 @@ class ThreadBase
return (no_context || not_registered);
}
- package void tlsGCdataInit() nothrow @nogc
+ ref void* tlsGCData() nothrow @nogc
{
- m_tlsgcdata = rt_tlsgc_init();
+ return m_tlsgcdata;
+ }
+
+ package void tlsRTdataInit() nothrow @nogc
+ {
+ m_tlsrtdata = rt_tlsgc_init();
}
package void initDataStorage() nothrow
@@ -142,18 +144,18 @@ class ThreadBase
m_main.bstack = getStackBottom();
m_main.tstack = m_main.bstack;
- tlsGCdataInit();
+ tlsRTdataInit();
}
package void destroyDataStorage() nothrow @nogc
{
- rt_tlsgc_destroy(m_tlsgcdata);
- m_tlsgcdata = null;
+ rt_tlsgc_destroy(m_tlsrtdata);
+ m_tlsrtdata = null;
}
package void destroyDataStorageIfAvail() nothrow @nogc
{
- if (m_tlsgcdata)
+ if (m_tlsrtdata)
destroyDataStorage();
}
@@ -477,6 +479,7 @@ package(core.thread):
StackContext* m_curr;
bool m_lock;
private void* m_tlsgcdata;
+ private void* m_tlsrtdata;
///////////////////////////////////////////////////////////////////////////
// Thread Context and GC Scanning Support
@@ -1112,8 +1115,8 @@ private void scanAllTypeImpl(scope ScanAllThreadsTypeFn scan, void* curStackTop)
scanWindowsOnly(scan, t);
}
- if (t.m_tlsgcdata !is null)
- rt_tlsgc_scan(t.m_tlsgcdata, (p1, p2) => scan(ScanType.tls, p1, p2));
+ if (t.m_tlsrtdata !is null)
+ rt_tlsgc_scan(t.m_tlsrtdata, (p1, p2) => scan(ScanType.tls, p1, p2));
}
}
@@ -1163,43 +1166,15 @@ package void onThreadError(string msg) nothrow @nogc
}
-/**
- * Indicates whether an address has been marked by the GC.
- */
-enum IsMarked : int
-{
- no, /// Address is not marked.
- yes, /// Address is marked.
- unknown, /// Address is not managed by the GC.
-}
-
-alias IsMarkedDg = int delegate(void* addr) nothrow; /// The isMarked callback function.
+// GC-specific processing of TLSGC data.
+alias ProcessTLSGCDataDg = void* delegate(void* data) nothrow;
-/**
- * This routine allows the runtime to process any special per-thread handling
- * for the GC. This is needed for taking into account any memory that is
- * referenced by non-scanned pointers but is about to be freed. That currently
- * means the array append cache.
- *
- * Params:
- * isMarked = The function used to check if $(D addr) is marked.
- *
- * In:
- * This routine must be called just prior to resuming all threads.
- */
-extern(C) void thread_processGCMarks(scope IsMarkedDg isMarked) nothrow
+void thread_processTLSGCData(ProcessTLSGCDataDg dg) nothrow
{
for (ThreadBase t = ThreadBase.sm_tbeg; t; t = t.next)
- {
- /* Can be null if collection was triggered between adding a
- * thread and calling rt_tlsgc_init.
- */
- if (t.m_tlsgcdata !is null)
- rt_tlsgc_processGCMarks(t.m_tlsgcdata, isMarked);
- }
+ t.m_tlsgcdata = dg(t.m_tlsgcdata);
}
-
/**
* Returns the stack top of the currently active stack within the calling
* thread.
diff --git a/libphobos/libdruntime/rt/lifetime.d b/libphobos/libdruntime/rt/lifetime.d
index 1de993c..86f5f82 100644
--- a/libphobos/libdruntime/rt/lifetime.d
+++ b/libphobos/libdruntime/rt/lifetime.d
@@ -15,6 +15,8 @@ module rt.lifetime;
import core.attribute : weak;
import core.internal.array.utils : __arrayStart, __arrayClearPad;
import core.memory;
+import core.internal.gc.blkcache;
+import core.internal.gc.blockmeta;
debug(PRINTF) import core.stdc.stdio;
static import rt.tlsgc;
@@ -28,17 +30,6 @@ private
extern (C) void _d_monitordelete(Object h, bool det);
- enum : size_t
- {
- PAGESIZE = 4096,
- BIGLENGTHMASK = ~(PAGESIZE - 1),
- SMALLPAD = 1,
- MEDPAD = ushort.sizeof,
- LARGEPREFIX = 16, // 16 bytes padding at the front of the array
- LARGEPAD = LARGEPREFIX + 1,
- MAXSMALLSIZE = 256-SMALLPAD,
- MAXMEDSIZE = (PAGESIZE / 2) - MEDPAD
- }
}
// Now-removed symbol, kept around for ABI
@@ -211,191 +202,6 @@ inout(TypeInfo) unqualify(return scope inout(TypeInfo) cti) pure nothrow @nogc
return ti;
}
-// size used to store the TypeInfo at the end of an allocation for structs that have a destructor
-size_t structTypeInfoSize(const TypeInfo ti) pure nothrow @nogc
-{
- if (ti && typeid(ti) is typeid(TypeInfo_Struct)) // avoid a complete dynamic type cast
- {
- auto sti = cast(TypeInfo_Struct)cast(void*)ti;
- if (sti.xdtor)
- return size_t.sizeof;
- }
- return 0;
-}
-
-/** dummy class used to lock for shared array appending */
-private class ArrayAllocLengthLock
-{}
-
-/**
- Set the allocated length of the array block. This is called
- any time an array is appended to or its length is set.
-
- The allocated block looks like this for blocks < PAGESIZE:
-
- |elem0|elem1|elem2|...|elemN-1|emptyspace|N*elemsize|
-
-
- The size of the allocated length at the end depends on the block size:
-
- a block of 16 to 256 bytes has an 8-bit length.
-
- a block with 512 to pagesize/2 bytes has a 16-bit length.
-
- For blocks >= pagesize, the length is a size_t and is at the beginning of the
- block. The reason we have to do this is because the block can extend into
- more pages, so we cannot trust the block length if it sits at the end of the
- block, because it might have just been extended. If we can prove in the
- future that the block is unshared, we may be able to change this, but I'm not
- sure it's important.
-
- In order to do put the length at the front, we have to provide 16 bytes
- buffer space in case the block has to be aligned properly. In x86, certain
- SSE instructions will only work if the data is 16-byte aligned. In addition,
- we need the sentinel byte to prevent accidental pointers to the next block.
- Because of the extra overhead, we only do this for page size and above, where
- the overhead is minimal compared to the block size.
-
- So for those blocks, it looks like:
-
- |N*elemsize|padding|elem0|elem1|...|elemN-1|emptyspace|sentinelbyte|
-
- where elem0 starts 16 bytes after the first byte.
- */
-bool __setArrayAllocLength(ref BlkInfo info, size_t newlength, bool isshared, const TypeInfo tinext, size_t oldlength = ~0) pure nothrow
-{
- import core.atomic;
-
- size_t typeInfoSize = structTypeInfoSize(tinext);
-
- if (info.size <= 256)
- {
- import core.checkedint;
-
- bool overflow;
- auto newlength_padded = addu(newlength,
- addu(SMALLPAD, typeInfoSize, overflow),
- overflow);
-
- if (newlength_padded > info.size || overflow)
- // new size does not fit inside block
- return false;
-
- auto length = cast(ubyte *)(info.base + info.size - typeInfoSize - SMALLPAD);
- if (oldlength != ~0)
- {
- if (isshared)
- {
- return cas(cast(shared)length, cast(ubyte)oldlength, cast(ubyte)newlength);
- }
- else
- {
- if (*length == cast(ubyte)oldlength)
- *length = cast(ubyte)newlength;
- else
- return false;
- }
- }
- else
- {
- // setting the initial length, no cas needed
- *length = cast(ubyte)newlength;
- }
- if (typeInfoSize)
- {
- auto typeInfo = cast(TypeInfo*)(info.base + info.size - size_t.sizeof);
- *typeInfo = cast() tinext;
- }
- }
- else if (info.size < PAGESIZE)
- {
- if (newlength + MEDPAD + typeInfoSize > info.size)
- // new size does not fit inside block
- return false;
- auto length = cast(ushort *)(info.base + info.size - typeInfoSize - MEDPAD);
- if (oldlength != ~0)
- {
- if (isshared)
- {
- return cas(cast(shared)length, cast(ushort)oldlength, cast(ushort)newlength);
- }
- else
- {
- if (*length == oldlength)
- *length = cast(ushort)newlength;
- else
- return false;
- }
- }
- else
- {
- // setting the initial length, no cas needed
- *length = cast(ushort)newlength;
- }
- if (typeInfoSize)
- {
- auto typeInfo = cast(TypeInfo*)(info.base + info.size - size_t.sizeof);
- *typeInfo = cast() tinext;
- }
- }
- else
- {
- if (newlength + LARGEPAD > info.size)
- // new size does not fit inside block
- return false;
- auto length = cast(size_t *)(info.base);
- if (oldlength != ~0)
- {
- if (isshared)
- {
- return cas(cast(shared)length, cast(size_t)oldlength, cast(size_t)newlength);
- }
- else
- {
- if (*length == oldlength)
- *length = newlength;
- else
- return false;
- }
- }
- else
- {
- // setting the initial length, no cas needed
- *length = newlength;
- }
- if (typeInfoSize)
- {
- auto typeInfo = cast(TypeInfo*)(info.base + size_t.sizeof);
- *typeInfo = cast()tinext;
- }
- }
- return true; // resize succeeded
-}
-
-/**
- get the allocation size of the array for the given block (without padding or type info)
- */
-private size_t __arrayAllocLength(ref BlkInfo info, const TypeInfo tinext) pure nothrow
-{
- if (info.size <= 256)
- return *cast(ubyte *)(info.base + info.size - structTypeInfoSize(tinext) - SMALLPAD);
-
- if (info.size < PAGESIZE)
- return *cast(ushort *)(info.base + info.size - structTypeInfoSize(tinext) - MEDPAD);
-
- return *cast(size_t *)(info.base);
-}
-
-/**
- get the padding required to allocate size bytes. Note that the padding is
- NOT included in the passed in size. Therefore, do NOT call this function
- with the size of an allocated block.
- */
-private size_t __arrayPad(size_t size, const TypeInfo tinext) nothrow pure @trusted
-{
- return size > MAXMEDSIZE ? LARGEPAD : ((size > MAXSMALLSIZE ? MEDPAD : SMALLPAD) + structTypeInfoSize(tinext));
-}
-
/**
allocate an array memory block by applying the proper padding and
assigning block attributes if not inherited from the existing block
@@ -443,206 +249,6 @@ private BlkInfo __arrayAlloc(size_t arrsize, ref BlkInfo info, const scope TypeI
}
/**
- cache for the lookup of the block info
- */
-private enum N_CACHE_BLOCKS=8;
-
-// note this is TLS, so no need to sync.
-BlkInfo *__blkcache_storage;
-
-static if (N_CACHE_BLOCKS==1)
-{
- version=single_cache;
-}
-else
-{
- //version=simple_cache; // uncomment to test simple cache strategy
- //version=random_cache; // uncomment to test random cache strategy
-
- // ensure N_CACHE_BLOCKS is power of 2.
- static assert(!((N_CACHE_BLOCKS - 1) & N_CACHE_BLOCKS));
-
- version (random_cache)
- {
- int __nextRndNum = 0;
- }
- int __nextBlkIdx;
-}
-
-@property BlkInfo *__blkcache() nothrow
-{
- if (!__blkcache_storage)
- {
- import core.stdc.stdlib;
- import core.stdc.string;
- // allocate the block cache for the first time
- immutable size = BlkInfo.sizeof * N_CACHE_BLOCKS;
- __blkcache_storage = cast(BlkInfo *)malloc(size);
- memset(__blkcache_storage, 0, size);
- }
- return __blkcache_storage;
-}
-
-// called when thread is exiting.
-static ~this()
-{
- // free the blkcache
- if (__blkcache_storage)
- {
- import core.stdc.stdlib;
- free(__blkcache_storage);
- __blkcache_storage = null;
- }
-}
-
-
-// we expect this to be called with the lock in place
-void processGCMarks(BlkInfo* cache, scope rt.tlsgc.IsMarkedDg isMarked) nothrow
-{
- // called after the mark routine to eliminate block cache data when it
- // might be ready to sweep
-
- debug(PRINTF) printf("processing GC Marks, %x\n", cache);
- if (cache)
- {
- debug(PRINTF) foreach (i; 0 .. N_CACHE_BLOCKS)
- {
- printf("cache entry %d has base ptr %x\tsize %d\tflags %x\n", i, cache[i].base, cache[i].size, cache[i].attr);
- }
- auto cache_end = cache + N_CACHE_BLOCKS;
- for (;cache < cache_end; ++cache)
- {
- if (cache.base != null && !isMarked(cache.base))
- {
- debug(PRINTF) printf("clearing cache entry at %x\n", cache.base);
- cache.base = null; // clear that data.
- }
- }
- }
-}
-
-unittest
-{
- // Bugzilla 10701 - segfault in GC
- ubyte[] result; result.length = 4096;
- GC.free(result.ptr);
- GC.collect();
-}
-
-/**
- Get the cached block info of an interior pointer. Returns null if the
- interior pointer's block is not cached.
-
- NOTE: The base ptr in this struct can be cleared asynchronously by the GC,
- so any use of the returned BlkInfo should copy it and then check the
- base ptr of the copy before actually using it.
-
- TODO: Change this function so the caller doesn't have to be aware of this
- issue. Either return by value and expect the caller to always check
- the base ptr as an indication of whether the struct is valid, or set
- the BlkInfo as a side-effect and return a bool to indicate success.
- */
-BlkInfo *__getBlkInfo(void *interior) nothrow
-{
- BlkInfo *ptr = __blkcache;
- version (single_cache)
- {
- if (ptr.base && ptr.base <= interior && (interior - ptr.base) < ptr.size)
- return ptr;
- return null; // not in cache.
- }
- else version (simple_cache)
- {
- foreach (i; 0..N_CACHE_BLOCKS)
- {
- if (ptr.base && ptr.base <= interior && (interior - ptr.base) < ptr.size)
- return ptr;
- ptr++;
- }
- }
- else
- {
- // try to do a smart lookup, using __nextBlkIdx as the "head"
- auto curi = ptr + __nextBlkIdx;
- for (auto i = curi; i >= ptr; --i)
- {
- if (i.base && i.base <= interior && cast(size_t)(interior - i.base) < i.size)
- return i;
- }
-
- for (auto i = ptr + N_CACHE_BLOCKS - 1; i > curi; --i)
- {
- if (i.base && i.base <= interior && cast(size_t)(interior - i.base) < i.size)
- return i;
- }
- }
- return null; // not in cache.
-}
-
-void __insertBlkInfoCache(BlkInfo bi, BlkInfo *curpos) nothrow
-{
- version (single_cache)
- {
- *__blkcache = bi;
- }
- else
- {
- version (simple_cache)
- {
- if (curpos)
- *curpos = bi;
- else
- {
- // note, this is a super-simple algorithm that does not care about
- // most recently used. It simply uses a round-robin technique to
- // cache block info. This means that the ordering of the cache
- // doesn't mean anything. Certain patterns of allocation may
- // render the cache near-useless.
- __blkcache[__nextBlkIdx] = bi;
- __nextBlkIdx = (__nextBlkIdx+1) & (N_CACHE_BLOCKS - 1);
- }
- }
- else version (random_cache)
- {
- // strategy: if the block currently is in the cache, move the
- // current block index to the a random element and evict that
- // element.
- auto cache = __blkcache;
- if (!curpos)
- {
- __nextBlkIdx = (__nextRndNum = 1664525 * __nextRndNum + 1013904223) & (N_CACHE_BLOCKS - 1);
- curpos = cache + __nextBlkIdx;
- }
- else
- {
- __nextBlkIdx = curpos - cache;
- }
- *curpos = bi;
- }
- else
- {
- //
- // strategy: If the block currently is in the cache, swap it with
- // the head element. Otherwise, move the head element up by one,
- // and insert it there.
- //
- auto cache = __blkcache;
- if (!curpos)
- {
- __nextBlkIdx = (__nextBlkIdx+1) & (N_CACHE_BLOCKS - 1);
- curpos = cache + __nextBlkIdx;
- }
- else if (curpos !is cache + __nextBlkIdx)
- {
- *curpos = cache[__nextBlkIdx];
- curpos = cache + __nextBlkIdx;
- }
- *curpos = bi;
- }
- }
-}
-
-/**
Shrink the "allocated" length of an array to be the exact size of the array.
It doesn't matter what the current allocated length of the array is, the
diff --git a/libphobos/libdruntime/rt/tlsgc.d b/libphobos/libdruntime/rt/tlsgc.d
index b13a1b3..f1dcc59 100644
--- a/libphobos/libdruntime/rt/tlsgc.d
+++ b/libphobos/libdruntime/rt/tlsgc.d
@@ -23,7 +23,6 @@ static import rt.lifetime, rt.sections;
struct Data
{
typeof(rt.sections.initTLSRanges()) tlsRanges;
- rt.lifetime.BlkInfo** blockInfoCache;
}
/**
@@ -39,8 +38,6 @@ void* init() nothrow @nogc
// do module specific initialization
data.tlsRanges = rt.sections.initTLSRanges();
- data.blockInfoCache = &rt.lifetime.__blkcache_storage;
-
return data;
}
@@ -67,16 +64,3 @@ void scan(void* data, scope ScanDg dg) nothrow
// do module specific marking
rt.sections.scanTLSRanges((cast(Data*)data).tlsRanges, dg);
}
-
-alias int delegate(void* addr) nothrow IsMarkedDg;
-
-/**
- * GC sweep hook, called FOR each thread. Can be used to free
- * additional thread local memory or associated data structures. Note
- * that only memory allocated from the GC can have marks.
- */
-void processGCMarks(void* data, scope IsMarkedDg dg) nothrow
-{
- // do module specific sweeping
- rt.lifetime.processGCMarks(*(cast(Data*)data).blockInfoCache, dg);
-}
diff --git a/libphobos/src/MERGE b/libphobos/src/MERGE
index f7c5223..0522cf8 100644
--- a/libphobos/src/MERGE
+++ b/libphobos/src/MERGE
@@ -1,4 +1,4 @@
-2a730adc07b0a708b31dd8e592f56df4adbaf4be
+dbc09d8230f0e273af8a78546e5431a7783478b5
The first line of this file holds the git revision number of the last
merge done from the dlang/phobos repository.
diff --git a/libphobos/src/std/algorithm/searching.d b/libphobos/src/std/algorithm/searching.d
index 42a9df5..b7119d2 100644
--- a/libphobos/src/std/algorithm/searching.d
+++ b/libphobos/src/std/algorithm/searching.d
@@ -3735,6 +3735,47 @@ if (isInputRange!Range && !isInfinite!Range &&
assert(arr.minElement!"a.val".val == 0);
}
+// https://issues.dlang.org/show_bug.cgi?id=24827
+@safe unittest
+{
+ static struct S
+ {
+ int i;
+ bool destroyed;
+
+ this(int i) @safe
+ {
+ this.i = i;
+ }
+
+ ~this() @safe
+ {
+ destroyed = true;
+ }
+
+ bool opEquals()(auto ref S rhs)
+ {
+ return this.i == rhs.i;
+ }
+
+ int opCmp()(auto ref S rhs)
+ {
+ if (this.i < rhs.i)
+ return -1;
+
+ return this.i == rhs.i ? 0 : 1;
+ }
+
+ @safe invariant
+ {
+ assert(!destroyed);
+ }
+ }
+
+ auto arr = [S(19), S(2), S(145), S(7)];
+ assert(minElement(arr) == S(2));
+}
+
/**
Iterates the passed range and returns the maximal element.
A custom mapping function can be passed to `map`.
@@ -3888,6 +3929,47 @@ if (isInputRange!Range && !isInfinite!Range &&
assert(arr[0].getI == 2);
}
+// https://issues.dlang.org/show_bug.cgi?id=24827
+@safe unittest
+{
+ static struct S
+ {
+ int i;
+ bool destroyed;
+
+ this(int i) @safe
+ {
+ this.i = i;
+ }
+
+ ~this() @safe
+ {
+ destroyed = true;
+ }
+
+ bool opEquals()(auto ref S rhs)
+ {
+ return this.i == rhs.i;
+ }
+
+ int opCmp()(auto ref S rhs)
+ {
+ if (this.i < rhs.i)
+ return -1;
+
+ return this.i == rhs.i ? 0 : 1;
+ }
+
+ @safe invariant
+ {
+ assert(!destroyed);
+ }
+ }
+
+ auto arr = [S(19), S(2), S(145), S(7)];
+ assert(maxElement(arr) == S(145));
+}
+
// minPos
/**
Computes a subrange of `range` starting at the first occurrence of `range`'s
diff --git a/libphobos/src/std/array.d b/libphobos/src/std/array.d
index acd5311..3313dbb 100644
--- a/libphobos/src/std/array.d
+++ b/libphobos/src/std/array.d
@@ -3639,6 +3639,7 @@ if (isDynamicArray!A)
}
else
{
+ import core.stdc.string : memcpy, memset;
// Time to reallocate.
// We need to almost duplicate what's in druntime, except we
// have better access to the capacity field.
@@ -3650,6 +3651,15 @@ if (isDynamicArray!A)
if (u)
{
// extend worked, update the capacity
+ // if the type has indirections, we need to zero any new
+ // data that we requested, as the existing data may point
+ // at large unused blocks.
+ static if (hasIndirections!T)
+ {
+ immutable addedSize = u - (_data.capacity * T.sizeof);
+ () @trusted { memset(_data.arr.ptr + _data.capacity, 0, addedSize); }();
+ }
+
_data.capacity = u / T.sizeof;
return;
}
@@ -3665,10 +3675,20 @@ if (isDynamicArray!A)
auto bi = (() @trusted => GC.qalloc(nbytes, blockAttribute!T))();
_data.capacity = bi.size / T.sizeof;
- import core.stdc.string : memcpy;
if (len)
() @trusted { memcpy(bi.base, _data.arr.ptr, len * T.sizeof); }();
+
_data.arr = (() @trusted => (cast(Unqual!T*) bi.base)[0 .. len])();
+
+ // we requested new bytes that are not in the existing
+ // data. If T has pointers, then this new data could point at stale
+ // objects from the last time this block was allocated. Zero that
+ // new data out, it may point at large unused blocks!
+ static if (hasIndirections!T)
+ () @trusted {
+ memset(bi.base + (len * T.sizeof), 0, (newlen - len) * T.sizeof);
+ }();
+
_data.tryExtendBlock = true;
// leave the old data, for safety reasons
}
@@ -4047,6 +4067,43 @@ if (isDynamicArray!A)
app2.toString();
}
+// https://issues.dlang.org/show_bug.cgi?id=24856
+@system unittest
+{
+ import core.memory : GC;
+ import std.stdio : writeln;
+ import std.algorithm.searching : canFind;
+ GC.disable();
+ scope(exit) GC.enable();
+ void*[] freeme;
+ // generate some poison blocks to allocate from.
+ auto poison = cast(void*) 0xdeadbeef;
+ foreach (i; 0 .. 10)
+ {
+ auto blk = new void*[7];
+ blk[] = poison;
+ freeme ~= blk.ptr;
+ }
+
+ foreach (p; freeme)
+ GC.free(p);
+
+ int tests = 0;
+ foreach (i; 0 .. 10)
+ {
+ Appender!(void*[]) app;
+ app.put(null);
+ // if not a realloc of one of the deadbeef pointers, continue
+ if (!freeme.canFind(app.data.ptr))
+ continue;
+ ++tests;
+ assert(!app.data.ptr[0 .. app.capacity].canFind(poison), "Appender not zeroing data!");
+ }
+ // just notify in the log whether this test actually could be done.
+ if (tests == 0)
+ writeln("WARNING: test of Appender zeroing did not occur");
+}
+
//Calculates an efficient growth scheme based on the old capacity
//of data, and the minimum requested capacity.
//arg curLen: The current length
diff --git a/libphobos/src/std/bitmanip.d b/libphobos/src/std/bitmanip.d
index 15211a3..f8a97df 100644
--- a/libphobos/src/std/bitmanip.d
+++ b/libphobos/src/std/bitmanip.d
@@ -106,7 +106,7 @@ private template createAccessors(
enum RightShiftOp = ">>>=";
}
- static if (is(T == bool))
+ static if (is(T : bool))
{
enum createAccessors =
// getter
@@ -4676,3 +4676,24 @@ if (isIntegral!T)
foreach (i; 0 .. 63)
assert(bitsSet(1UL << i).equal([i]));
}
+
+// Fix https://issues.dlang.org/show_bug.cgi?id=24095
+@safe @nogc pure unittest
+{
+ enum Bar : bool
+ {
+ a,
+ b,
+ }
+
+ struct Foo
+ {
+ mixin(bitfields!(Bar, "bar", 1, ubyte, "", 7,));
+ }
+
+ Foo foo;
+ foo.bar = Bar.a;
+ assert(foo.bar == Bar.a);
+ foo.bar = Bar.b;
+ assert(foo.bar == Bar.b);
+}
diff --git a/libphobos/src/std/container/dlist.d b/libphobos/src/std/container/dlist.d
index 728aacd..8f7df10 100644
--- a/libphobos/src/std/container/dlist.d
+++ b/libphobos/src/std/container/dlist.d
@@ -185,6 +185,7 @@ Implements a doubly-linked list.
struct DList(T)
{
import std.range : Take;
+ import std.traits : isMutable;
/*
A Node with a Payload. A PayNode.
@@ -220,7 +221,10 @@ struct DList(T)
{
import std.algorithm.mutation : move;
- return (new PayNode(BaseNode(prev, next), move(arg))).asBaseNode();
+ static if (isMutable!Stuff)
+ return (new PayNode(BaseNode(prev, next), move(arg))).asBaseNode();
+ else
+ return (new PayNode(BaseNode(prev, next), arg)).asBaseNode();
}
void initialize() nothrow @safe pure
@@ -1149,3 +1153,22 @@ private:
list.removeFront();
assert(list[].walkLength == 0);
}
+
+// https://issues.dlang.org/show_bug.cgi?id=24637
+@safe unittest
+{
+ import std.algorithm.comparison : equal;
+
+ struct A
+ {
+ int c;
+ }
+
+ DList!A B;
+ B.insert(A(1));
+ assert(B[].equal([A(1)]));
+
+ const a = A(3);
+ B.insert(a);
+ assert(B[].equal([A(1), A(3)]));
+}
diff --git a/libphobos/src/std/conv.d b/libphobos/src/std/conv.d
index 9c9d8db..5e0165c 100644
--- a/libphobos/src/std/conv.d
+++ b/libphobos/src/std/conv.d
@@ -2560,9 +2560,6 @@ Lerr:
string s1 = "123";
auto a1 = parse!(int, string, Yes.doCount)(s1);
assert(a1.data == 123 && a1.count == 3);
-
- // parse only accepts lvalues
- static assert(!__traits(compiles, parse!int("123")));
}
///
@@ -5611,6 +5608,14 @@ Params:
Returns:
a `string`, a `wstring` or a `dstring`, according to the type of hexData.
+
+See_Also:
+ Use $(REF fromHexString, std, digest) for run time conversions.
+ Note, these functions are not drop-in replacements and have different
+ input requirements.
+ This template inherits its data syntax from builtin
+ $(LINK2 $(ROOT_DIR)spec/lex.html#hex_string, hex strings).
+ See $(REF fromHexString, std, digest) for its own respective requirements.
*/
template hexString(string hexData)
if (hexData.isHexLiteral)
diff --git a/libphobos/src/std/digest/package.d b/libphobos/src/std/digest/package.d
index ea3738b..8274680 100644
--- a/libphobos/src/std/digest/package.d
+++ b/libphobos/src/std/digest/package.d
@@ -1212,3 +1212,345 @@ if (isInputRange!R1 && isInputRange!R2 && !isInfinite!R1 && !isInfinite!R2 &&
assert(!secureEqual(hex1, hex2));
}
}
+
+/**
+ * Validates a hex string.
+ *
+ * Checks whether all characters following an optional "0x" suffix
+ * are valid hexadecimal digits.
+ *
+ * Params:
+ * hex = hexdecimal encoded byte array
+ * Returns:
+ * true = if valid
+ */
+bool isHexString(String)(String hex) @safe pure nothrow @nogc
+if (isSomeString!String)
+{
+ import std.ascii : isHexDigit;
+
+ if ((hex.length >= 2) && (hex[0 .. 2] == "0x"))
+ {
+ hex = hex[2 .. $];
+ }
+
+ foreach (digit; hex)
+ {
+ if (!digit.isHexDigit)
+ {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+///
+@safe unittest
+{
+ assert(isHexString("0x0123456789ABCDEFabcdef"));
+ assert(isHexString("0123456789ABCDEFabcdef"));
+ assert(!isHexString("g"));
+ assert(!isHexString("#"));
+}
+
+/**
+ * Converts a hex text string to a range of bytes.
+ *
+ * The input to this function MUST be valid.
+ * $(REF isHexString, std, digest) can be used to check for this if needed.
+ *
+ * Params:
+ * hex = String representation of a hexdecimal-encoded byte array.
+ * Returns:
+ * A forward range of bytes.
+ */
+auto fromHexStringAsRange(String)(String hex) @safe pure nothrow @nogc
+if (isSomeString!String)
+{
+ return HexStringDecoder!String(hex);
+}
+
+///
+@safe unittest
+{
+ import std.range.primitives : ElementType, isForwardRange;
+ import std.traits : ReturnType;
+
+ // The decoder implements a forward range.
+ static assert(isForwardRange!(ReturnType!(fromHexStringAsRange!string)));
+ static assert(isForwardRange!(ReturnType!(fromHexStringAsRange!wstring)));
+ static assert(isForwardRange!(ReturnType!(fromHexStringAsRange!dstring)));
+
+ // The element type of the range is always `ubyte`.
+ static assert(
+ is(ElementType!(ReturnType!(fromHexStringAsRange!string)) == ubyte)
+ );
+ static assert(
+ is(ElementType!(ReturnType!(fromHexStringAsRange!wstring)) == ubyte)
+ );
+ static assert(
+ is(ElementType!(ReturnType!(fromHexStringAsRange!dstring)) == ubyte)
+ );
+}
+
+@safe unittest
+{
+ import std.array : staticArray;
+
+ // `staticArray` consumes the range returned by `fromHexStringAsRange`.
+ assert("0x0000ff".fromHexStringAsRange.staticArray!3 == [0, 0, 0xFF]);
+ assert("0x0000ff"w.fromHexStringAsRange.staticArray!3 == [0, 0, 0xFF]);
+ assert("0x0000ff"d.fromHexStringAsRange.staticArray!3 == [0, 0, 0xFF]);
+ assert("0xff12ff".fromHexStringAsRange.staticArray!1 == [0xFF]);
+ assert("0x12ff".fromHexStringAsRange.staticArray!2 == [0x12, 255]);
+ assert(
+ "0x3AaAA".fromHexStringAsRange.staticArray!4 == [0x3, 0xAA, 0xAA, 0x00]
+ );
+}
+
+/**
+ * Converts a hex text string to a range of bytes.
+ *
+ * Params:
+ * hex = String representation of a hexdecimal-encoded byte array.
+ * Returns:
+ * An newly allocated array of bytes.
+ * Throws:
+ * Exception on invalid input.
+ * Example:
+ * ---
+ * ubyte[] dby = "0xBA".fromHexString;
+ * ---
+ * See_Also:
+ * $(REF fromHexString, std, digest) for a range version of the function.
+ */
+ubyte[] fromHexString(String)(String hex) @safe pure
+if (isSomeString!String)
+{
+ // This function is trivial, yet necessary for consistency.
+ // It provides a similar API to its `toHexString` counterpart.
+
+ if (!hex.isHexString)
+ {
+ import std.conv : text;
+
+ throw new Exception(
+ "The provided character sequence `"
+ ~ hex.text
+ ~ "` is not a valid hex string."
+ );
+ }
+
+ if ((hex.length >= 2) && (hex[0 .. 2] == "0x"))
+ {
+ hex = hex[2 .. $];
+ }
+
+ auto decoder = HexStringDecoder!String(hex);
+ auto result = new ubyte[](decoder.length);
+
+ size_t idx = 0;
+ foreach (b; decoder)
+ {
+ result[idx++] = b;
+ }
+ return result;
+}
+
+///
+@safe unittest
+{
+ // Single byte
+ assert("0xff".fromHexString == [255]);
+ assert("0xff"w.fromHexString == [255]);
+ assert("0xff"d.fromHexString == [255]);
+ assert("0xC0".fromHexString == [192]);
+ assert("0x00".fromHexString == [0]);
+
+ // Nothing
+ assert("".fromHexString == []);
+ assert(""w.fromHexString == []);
+ assert(""d.fromHexString == []);
+
+ // Nothing but a prefix
+ assert("0x".fromHexString == []);
+ assert("0x"w.fromHexString == []);
+ assert("0x"d.fromHexString == []);
+
+ // Half a byte
+ assert("0x1".fromHexString == [0x01]);
+ assert("0x1"w.fromHexString == [0x01]);
+ assert("0x1"d.fromHexString == [0x01]);
+
+ // Mixed case is fine.
+ assert("0xAf".fromHexString == [0xAF]);
+ assert("0xaF".fromHexString == [0xAF]);
+
+ // Multiple bytes
+ assert("0xfff".fromHexString == [0x0F, 0xFF]);
+ assert("0x123AaAa".fromHexString == [0x01, 0x23, 0xAA, 0xAA]);
+ assert("EBBBBF".fromHexString == [0xEB, 0xBB, 0xBF]);
+
+ // md5 sum
+ assert("d41d8cd98f00b204e9800998ecf8427e".fromHexString == [
+ 0xD4, 0x1D, 0x8C, 0xD9, 0x8F, 0x00, 0xB2, 0x04,
+ 0xE9, 0x80, 0x09, 0x98, 0xEC, 0xF8, 0x42, 0x7E,
+ ]);
+}
+
+///
+@safe unittest
+{
+ // Cycle self-test
+ const ubyte[] initial = [0x00, 0x12, 0x34, 0xEB];
+ assert(initial == initial.toHexString().fromHexString());
+}
+
+private ubyte hexDigitToByte(dchar hexDigit) @safe pure nothrow @nogc
+{
+ static int hexDigitToByteImpl(dchar hexDigit)
+ {
+ if (hexDigit >= '0' && hexDigit <= '9')
+ {
+ return hexDigit - '0';
+ }
+ else if (hexDigit >= 'A' && hexDigit <= 'F')
+ {
+ return hexDigit - 'A' + 10;
+ }
+ else if (hexDigit >= 'a' && hexDigit <= 'f')
+ {
+ return hexDigit - 'a' + 10;
+ }
+
+ assert(false, "Cannot convert invalid hex digit.");
+ }
+
+ return hexDigitToByteImpl(hexDigit) & 0xFF;
+}
+
+@safe unittest
+{
+ assert(hexDigitToByte('0') == 0x0);
+ assert(hexDigitToByte('9') == 0x9);
+ assert(hexDigitToByte('a') == 0xA);
+ assert(hexDigitToByte('b') == 0xB);
+ assert(hexDigitToByte('A') == 0xA);
+ assert(hexDigitToByte('C') == 0xC);
+}
+
+private struct HexStringDecoder(String)
+if (isSomeString!String)
+{
+ String hex;
+ ubyte front;
+ bool empty;
+
+ this(String hex)
+ {
+ if ((hex.length >= 2) && (hex[0 .. 2] == "0x"))
+ {
+ hex = hex[2 .. $];
+ }
+
+ if (hex.length == 0)
+ {
+ empty = true;
+ return;
+ }
+
+ const oddInputLength = (hex.length % 2 == 1);
+
+ if (oddInputLength)
+ {
+ front = hexDigitToByte(hex[0]);
+ hex = hex[1 .. $];
+ }
+ else
+ {
+ front = cast(ubyte)(hexDigitToByte(hex[0]) << 4 | hexDigitToByte(hex[1]));
+ hex = hex[2 .. $];
+ }
+
+ this.hex = hex;
+ }
+
+ void popFront()
+ {
+ if (hex.length == 0)
+ {
+ empty = true;
+ return;
+ }
+
+ front = cast(ubyte)(hexDigitToByte(hex[0]) << 4 | hexDigitToByte(hex[1]));
+ hex = hex[2 .. $];
+ }
+
+ typeof(this) save()
+ {
+ return this;
+ }
+
+ size_t length() const
+ {
+ if (this.empty)
+ {
+ return 0;
+ }
+
+ // current front + remainder
+ return 1 + (hex.length >> 1);
+ }
+}
+
+@safe unittest
+{
+ auto decoder = HexStringDecoder!string("");
+ assert(decoder.empty);
+ assert(decoder.length == 0);
+
+ decoder = HexStringDecoder!string("0x");
+ assert(decoder.empty);
+ assert(decoder.length == 0);
+}
+
+@safe unittest
+{
+ auto decoder = HexStringDecoder!string("0x0077FF");
+ assert(!decoder.empty);
+ assert(decoder.length == 3);
+ assert(decoder.front == 0x00);
+
+ decoder.popFront();
+ assert(!decoder.empty);
+ assert(decoder.length == 2);
+ assert(decoder.front == 0x77);
+
+ decoder.popFront();
+ assert(!decoder.empty);
+ assert(decoder.length == 1);
+ assert(decoder.front == 0xFF);
+
+ decoder.popFront();
+ assert(decoder.length == 0);
+ assert(decoder.empty);
+}
+
+@safe unittest
+{
+ auto decoder = HexStringDecoder!string("0x7FF");
+ assert(!decoder.empty);
+ assert(decoder.length == 2);
+ assert(decoder.front == 0x07);
+
+ decoder.popFront();
+ assert(!decoder.empty);
+ assert(decoder.length == 1);
+ assert(decoder.front == 0xFF);
+
+ decoder.popFront();
+ assert(decoder.length == 0);
+ assert(decoder.empty);
+}
diff --git a/libphobos/src/std/format/internal/write.d b/libphobos/src/std/format/internal/write.d
index 8b60565..6fd468d 100644
--- a/libphobos/src/std/format/internal/write.d
+++ b/libphobos/src/std/format/internal/write.d
@@ -1839,24 +1839,26 @@ template hasToString(T, Char)
else static if (is(typeof(
(T val) {
const FormatSpec!Char f;
- static struct S {void put(scope Char s){}}
+ static struct S
+ {
+ @disable this(this);
+ void put(scope Char s){}
+ }
S s;
val.toString(s, f);
- static assert(!__traits(compiles, val.toString(s, FormatSpec!Char())),
- "force toString to take parameters by ref");
- static assert(!__traits(compiles, val.toString(S(), f)),
- "force toString to take parameters by ref");
})))
{
enum hasToString = HasToStringResult.customPutWriterFormatSpec;
}
else static if (is(typeof(
(T val) {
- static struct S {void put(scope Char s){}}
+ static struct S
+ {
+ @disable this(this);
+ void put(scope Char s){}
+ }
S s;
val.toString(s);
- static assert(!__traits(compiles, val.toString(S())),
- "force toString to take parameters by ref");
})))
{
enum hasToString = HasToStringResult.customPutWriter;
@@ -1996,9 +1998,10 @@ template hasToString(T, Char)
static assert(hasToString!(G, char) == customPutWriter);
static assert(hasToString!(H, char) == customPutWriterFormatSpec);
static assert(hasToString!(I, char) == customPutWriterFormatSpec);
- static assert(hasToString!(J, char) == hasSomeToString);
+ static assert(hasToString!(J, char) == hasSomeToString
+ || hasToString!(J, char) == constCharSinkFormatSpec); // depends on -preview=rvaluerefparam
static assert(hasToString!(K, char) == constCharSinkFormatSpec);
- static assert(hasToString!(L, char) == none);
+ static assert(hasToString!(L, char) == customPutWriterFormatSpec);
static if (hasPreviewIn)
{
static assert(hasToString!(M, char) == inCharSinkFormatSpec);
@@ -2105,9 +2108,10 @@ template hasToString(T, Char)
static assert(hasToString!(G, char) == customPutWriter);
static assert(hasToString!(H, char) == customPutWriterFormatSpec);
static assert(hasToString!(I, char) == customPutWriterFormatSpec);
- static assert(hasToString!(J, char) == hasSomeToString);
+ static assert(hasToString!(J, char) == hasSomeToString
+ || hasToString!(J, char) == constCharSinkFormatSpec); // depends on -preview=rvaluerefparam
static assert(hasToString!(K, char) == constCharSinkFormatSpec);
- static assert(hasToString!(L, char) == none);
+ static assert(hasToString!(L, char) == HasToStringResult.customPutWriterFormatSpec);
static if (hasPreviewIn)
{
static assert(hasToString!(M, char) == inCharSinkFormatSpec);
@@ -2125,9 +2129,10 @@ template hasToString(T, Char)
static assert(hasToString!(inout(G), char) == customPutWriter);
static assert(hasToString!(inout(H), char) == customPutWriterFormatSpec);
static assert(hasToString!(inout(I), char) == customPutWriterFormatSpec);
- static assert(hasToString!(inout(J), char) == hasSomeToString);
+ static assert(hasToString!(inout(J), char) == hasSomeToString
+ || hasToString!(inout(J), char) == constCharSinkFormatSpec); // depends on -preview=rvaluerefparam
static assert(hasToString!(inout(K), char) == constCharSinkFormatSpec);
- static assert(hasToString!(inout(L), char) == none);
+ static assert(hasToString!(inout(L), char) == customPutWriterFormatSpec);
static if (hasPreviewIn)
{
static assert(hasToString!(inout(M), char) == inCharSinkFormatSpec);
diff --git a/libphobos/src/std/format/read.d b/libphobos/src/std/format/read.d
index da9d0dc..e2f9b94 100644
--- a/libphobos/src/std/format/read.d
+++ b/libphobos/src/std/format/read.d
@@ -198,7 +198,8 @@ module std.format.read;
import std.format.spec : FormatSpec;
import std.format.internal.read;
-import std.traits : isSomeString;
+import std.meta : allSatisfy;
+import std.traits : isSomeString, isType;
/**
Reads an input range according to a format string and stores the read
@@ -300,7 +301,7 @@ uint formattedRead(Range, Char, Args...)(auto ref Range r, const(Char)[] fmt, au
/// ditto
uint formattedRead(alias fmt, Range, Args...)(auto ref Range r, auto ref Args args)
-if (isSomeString!(typeof(fmt)))
+if (!isType!fmt && isSomeString!(typeof(fmt)))
{
import std.format : checkFormatException;
import std.meta : staticMap;
@@ -693,6 +694,116 @@ if (isSomeString!(typeof(fmt)))
}
/**
+Reads an input range according to a format string and returns a tuple of Args
+with the read values.
+
+Format specifiers with format character $(B 'd'), $(B 'u') and $(B
+'c') can take a $(B '*') parameter for skipping values.
+
+The second version of `formattedRead` takes the format string as
+template argument. In this case, it is checked for consistency at
+compile-time.
+
+Params:
+ Args = a variadic list of types of the arguments
+ */
+template formattedRead(Args...)
+if (Args.length && allSatisfy!(isType, Args))
+{
+ import std.typecons : Tuple;
+
+ /**
+ Params:
+ r = an $(REF_ALTTEXT input range, isInputRange, std, range, primitives),
+ where the formatted input is read from
+ fmt = a $(MREF_ALTTEXT format string, std,format)
+ Range = the type of the input range `r`
+ Char = the character type used for `fmt`
+
+ Returns:
+ A Tuple!Args with the elements filled.
+
+ Throws:
+ A $(REF_ALTTEXT FormatException, FormatException, std, format)
+ if reading did not succeed.
+ */
+ Tuple!Args formattedRead(Range, Char)(auto ref Range r, const(Char)[] fmt)
+ {
+ import core.lifetime : forward;
+ import std.format : enforceFmt;
+
+ Tuple!Args args;
+ const numArgsFilled = .formattedRead(forward!r, fmt, args.expand);
+ enforceFmt(numArgsFilled == Args.length, "Failed reading into all format arguments");
+ return args;
+ }
+}
+
+///
+@safe pure unittest
+{
+ import std.exception : assertThrown;
+ import std.format : FormatException;
+ import std.typecons : tuple;
+
+ auto complete = "hello!34.5:124".formattedRead!(string, double, int)("%s!%s:%s");
+ assert(complete == tuple("hello", 34.5, 124));
+
+ // reading ends early
+ assertThrown!FormatException("hello!34.5:".formattedRead!(string, double, int)("%s!%s:%s"));
+}
+
+/// Skipping values
+@safe pure unittest
+{
+ import std.format : FormatException;
+ import std.typecons : tuple;
+
+ auto result = "orange: (12%) 15.25".formattedRead!(string, double)("%s: (%*d%%) %f");
+ assert(result == tuple("orange", 15.25));
+}
+
+/// ditto
+template formattedRead(alias fmt, Args...)
+if (!isType!fmt && isSomeString!(typeof(fmt)) && Args.length && allSatisfy!(isType, Args))
+{
+ import std.typecons : Flag, Tuple, Yes;
+ Tuple!Args formattedRead(Range)(auto ref Range r)
+ {
+ import core.lifetime : forward;
+ import std.format : enforceFmt;
+
+ Tuple!Args args;
+ const numArgsFilled = .formattedRead!fmt(forward!r, args.expand);
+ enforceFmt(numArgsFilled == Args.length, "Failed reading into all format arguments");
+ return args;
+ }
+}
+
+/// The format string can be checked at compile-time
+@safe pure unittest
+{
+ import std.exception : assertThrown;
+ import std.format : FormatException;
+ import std.typecons : tuple;
+
+ auto expected = tuple("hello", 124, 34.5);
+ auto result = "hello!124:34.5".formattedRead!("%s!%s:%s", string, int, double);
+ assert(result == expected);
+
+ assertThrown!FormatException("hello!34.5:".formattedRead!("%s!%s:%s", string, double, int));
+}
+
+/// Compile-time consistency check
+@safe pure unittest
+{
+ import std.format : FormatException;
+ import std.typecons : tuple;
+
+ static assert(!__traits(compiles, "orange: (12%) 15.25".formattedRead!("%s: (%*d%%) %f", string, double)));
+}
+
+/**
Reads a value from the given _input range and converts it according to a
format specifier.
diff --git a/libphobos/src/std/logger/core.d b/libphobos/src/std/logger/core.d
index cc938d4..1e879fd 100644
--- a/libphobos/src/std/logger/core.d
+++ b/libphobos/src/std/logger/core.d
@@ -1433,7 +1433,7 @@ logger by the user, the default logger's log level is LogLevel.info.
Example:
-------------
-sharedLog = new FileLogger(yourFile);
+sharedLog = new shared FileLogger(yourFile);
-------------
The example sets a new `FileLogger` as new `sharedLog`.
@@ -1450,7 +1450,7 @@ writing `sharedLog`.
The default `Logger` is thread-safe.
-------------
if (sharedLog !is myLogger)
- sharedLog = new myLogger;
+ sharedLog = new shared myLogger;
-------------
*/
@property shared(Logger) sharedLog() @safe
diff --git a/libphobos/src/std/logger/filelogger.d b/libphobos/src/std/logger/filelogger.d
index c662ca7..5ba167c 100644
--- a/libphobos/src/std/logger/filelogger.d
+++ b/libphobos/src/std/logger/filelogger.d
@@ -37,7 +37,7 @@ class FileLogger : Logger
auto l3 = new FileLogger("logFile", LogLevel.fatal, CreateFolder.yes);
-------------
*/
- this(const string fn, const LogLevel lv = LogLevel.all) @safe
+ this(this This)(const string fn, const LogLevel lv = LogLevel.all)
{
this(fn, lv, CreateFolder.yes);
}
@@ -63,7 +63,7 @@ class FileLogger : Logger
auto l2 = new FileLogger(file, LogLevel.fatal);
-------------
*/
- this(const string fn, const LogLevel lv, CreateFolder createFileNameFolder) @safe
+ this(this This)(const string fn, const LogLevel lv, CreateFolder createFileNameFolder)
{
import std.file : exists, mkdirRecurse;
import std.path : dirName;
@@ -80,7 +80,8 @@ class FileLogger : Logger
" created in '", d,"' could not be created."));
}
- this.file_.open(this.filename, "a");
+ // Cast away `shared` when the constructor is inferred shared.
+ () @trusted { (cast() this.file_).open(this.filename, "a"); }();
}
/** A constructor for the `FileLogger` Logger that takes a reference to
@@ -270,3 +271,12 @@ class FileLogger : Logger
assert(tl !is null);
stdThreadLocalLog.logLevel = LogLevel.all;
}
+
+@safe unittest
+{
+ // we don't need to actually run the code, only make sure
+ // it compiles
+ static _() {
+ auto l = new shared FileLogger("");
+ }
+}
diff --git a/libphobos/src/std/logger/package.d b/libphobos/src/std/logger/package.d
index 14a4394..215ca20 100644
--- a/libphobos/src/std/logger/package.d
+++ b/libphobos/src/std/logger/package.d
@@ -64,7 +64,7 @@ using the property called `sharedLog`. This property is a reference to the
current default `Logger`. This reference can be used to assign a new
default `Logger`.
-------------
-sharedLog = new FileLogger("New_Default_Log_File.log");
+sharedLog = new shared FileLogger("New_Default_Log_File.log");
-------------
Additional `Logger` can be created by creating a new instance of the
diff --git a/libphobos/src/std/numeric.d b/libphobos/src/std/numeric.d
index 3fef8e4..9966b1c 100644
--- a/libphobos/src/std/numeric.d
+++ b/libphobos/src/std/numeric.d
@@ -223,7 +223,7 @@ private:
}
// Convert the current value to signed exponent, normalized form
- void toNormalized(T,U)(ref T sig, ref U exp)
+ void toNormalized(T,U)(ref T sig, ref U exp) const
{
sig = significand;
auto shift = (T.sizeof*8) - precision;
@@ -490,7 +490,7 @@ public:
}
/// Returns: real part
- @property CustomFloat re() { return this; }
+ @property CustomFloat re() const { return this; }
/// Returns: imaginary part
static @property CustomFloat im() { return CustomFloat(0.0f); }
@@ -546,7 +546,7 @@ public:
}
/// Fetches the stored value either as a `float`, `double` or `real`.
- @property F get(F)()
+ @property F get(F)() const
if (staticIndexOf!(immutable F, immutable float, immutable double, immutable real) >= 0)
{
import std.conv : text;
@@ -591,14 +591,14 @@ public:
// Define an opBinary `CustomFloat op CustomFloat` so that those below
// do not match equally, which is disallowed by the spec:
// https://dlang.org/spec/operatoroverloading.html#binary
- real opBinary(string op,T)(T b)
+ real opBinary(string op,T)(T b) const
if (__traits(compiles, mixin(`get!real`~op~`b.get!real`)))
{
return mixin(`get!real`~op~`b.get!real`);
}
/// ditto
- real opBinary(string op,T)(T b)
+ real opBinary(string op,T)(T b) const
if ( __traits(compiles, mixin(`get!real`~op~`b`)) &&
!__traits(compiles, mixin(`get!real`~op~`b.get!real`)))
{
@@ -606,7 +606,7 @@ public:
}
/// ditto
- real opBinaryRight(string op,T)(T a)
+ real opBinaryRight(string op,T)(T a) const
if ( __traits(compiles, mixin(`a`~op~`get!real`)) &&
!__traits(compiles, mixin(`get!real`~op~`b`)) &&
!__traits(compiles, mixin(`get!real`~op~`b.get!real`)))
@@ -615,7 +615,7 @@ public:
}
/// ditto
- int opCmp(T)(auto ref T b)
+ int opCmp(T)(auto ref T b) const
if (__traits(compiles, cast(real) b))
{
auto x = get!real;
@@ -949,6 +949,17 @@ public:
assertThrown!AssertError(a = float.infinity);
}
+@safe unittest
+{
+ const CustomFloat!16 x = CustomFloat!16(3);
+ assert(x.get!float == 3);
+ assert(x.re.get!float == 3);
+ assert(x + x == 6);
+ assert(x + 1 == 4);
+ assert(2 + x == 5);
+ assert(x < 4);
+}
+
private bool isCorrectCustomFloat(uint precision, uint exponentWidth, CustomFloatFlags flags) @safe pure nothrow @nogc
{
// Restrictions from bitfield
diff --git a/libphobos/src/std/process.d b/libphobos/src/std/process.d
index 4f593bd..2efbcaa 100644
--- a/libphobos/src/std/process.d
+++ b/libphobos/src/std/process.d
@@ -4631,11 +4631,12 @@ else version (Posix)
if (childpid == 0)
{
// Trusted because args and all entries are always zero-terminated
- (() @trusted =>
- core.sys.posix.unistd.execvp(args[0], &args[0]) ||
- perror(args[0]) // failed to execute
- )();
- return;
+ (() @trusted {
+ core.sys.posix.unistd.execvp(args[0], &args[0]);
+ perror(args[0]);
+ core.sys.posix.unistd._exit(1);
+ })();
+ assert(0, "Child failed to exec");
}
if (browser)
// Trusted because it's allocated via strdup above
diff --git a/libphobos/src/std/socket.d b/libphobos/src/std/socket.d
index 52fd33b..7fa9974 100644
--- a/libphobos/src/std/socket.d
+++ b/libphobos/src/std/socket.d
@@ -54,6 +54,12 @@ version (Windows)
enum socket_t : SOCKET { INVALID_SOCKET }
private const int _SOCKET_ERROR = SOCKET_ERROR;
+ /**
+ * On Windows, there is no `SO_REUSEPORT`.
+ * However, `SO_REUSEADDR` is equivalent to `SO_REUSEPORT` there.
+ * $(LINK https://learn.microsoft.com/en-us/windows/win32/winsock/using-so-reuseaddr-and-so-exclusiveaddruse)
+ */
+ private enum SO_REUSEPORT = SO_REUSEADDR;
private int _lasterr() nothrow @nogc
{
@@ -2589,6 +2595,22 @@ enum SocketOption: int
DEBUG = SO_DEBUG, /// Record debugging information
BROADCAST = SO_BROADCAST, /// Allow transmission of broadcast messages
REUSEADDR = SO_REUSEADDR, /// Allow local reuse of address
+ /**
+ * Allow local reuse of port
+ *
+ * On Windows, this is equivalent to `SocketOption.REUSEADDR`.
+ * There is in fact no option named `REUSEPORT`.
+ * However, `SocketOption.REUSEADDR` matches the behavior of
+ * `SocketOption.REUSEPORT` on other platforms. Further details on this
+ * topic can be found here:
+ * $(LINK https://learn.microsoft.com/en-us/windows/win32/winsock/using-so-reuseaddr-and-so-exclusiveaddruse)
+ *
+ * On Linux, this ensures fair distribution of incoming connections accross threads.
+ *
+ * See_Also:
+ * https://lwn.net/Articles/542629/
+ */
+ REUSEPORT = SO_REUSEPORT,
LINGER = SO_LINGER, /// Linger on close if unsent data is present
OOBINLINE = SO_OOBINLINE, /// Receive out-of-band data in band
SNDBUF = SO_SNDBUF, /// Send buffer size
diff --git a/libphobos/src/std/sumtype.d b/libphobos/src/std/sumtype.d
index 69c2a49..ad29428 100644
--- a/libphobos/src/std/sumtype.d
+++ b/libphobos/src/std/sumtype.d
@@ -1860,88 +1860,65 @@ private template Iota(size_t n)
assert(Iota!3 == AliasSeq!(0, 1, 2));
}
-/* The number that the dim-th argument's tag is multiplied by when
- * converting TagTuples to and from case indices ("caseIds").
- *
- * Named by analogy to the stride that the dim-th index into a
- * multidimensional static array is multiplied by to calculate the
- * offset of a specific element.
- */
-private size_t stride(size_t dim, lengths...)()
-{
- import core.checkedint : mulu;
-
- size_t result = 1;
- bool overflow = false;
-
- static foreach (i; 0 .. dim)
- {
- result = mulu(result, lengths[i], overflow);
- }
-
- /* The largest number matchImpl uses, numCases, is calculated with
- * stride!(SumTypes.length), so as long as this overflow check
- * passes, we don't need to check for overflow anywhere else.
- */
- assert(!overflow, "Integer overflow");
- return result;
-}
-
private template matchImpl(Flag!"exhaustive" exhaustive, handlers...)
{
auto ref matchImpl(SumTypes...)(auto ref SumTypes args)
if (allSatisfy!(isSumType, SumTypes) && args.length > 0)
{
- alias stride(size_t i) = .stride!(i, Map!(typeCount, SumTypes));
- alias TagTuple = .TagTuple!(SumTypes);
-
- /*
- * A list of arguments to be passed to a handler needed for the case
- * labeled with `caseId`.
- */
- template handlerArgs(size_t caseId)
+ // Single dispatch (fast path)
+ static if (args.length == 1)
{
- enum tags = TagTuple.fromCaseId(caseId);
- enum argsFrom(size_t i : tags.length) = "";
- enum argsFrom(size_t i) = "args[" ~ toCtString!i ~ "].get!(SumTypes[" ~ toCtString!i ~ "]" ~
- ".Types[" ~ toCtString!(tags[i]) ~ "])(), " ~ argsFrom!(i + 1);
- enum handlerArgs = argsFrom!0;
- }
+ /* When there's only one argument, the caseId is just that
+ * argument's tag, so there's no need for TagTuple.
+ */
+ enum handlerArgs(size_t caseId) =
+ "args[0].get!(SumTypes[0].Types[" ~ toCtString!caseId ~ "])()";
- /* An AliasSeq of the types of the member values in the argument list
- * returned by `handlerArgs!caseId`.
- *
- * Note that these are the actual (that is, qualified) types of the
- * member values, which may not be the same as the types listed in
- * the arguments' `.Types` properties.
- */
- template valueTypes(size_t caseId)
+ alias valueTypes(size_t caseId) =
+ typeof(args[0].get!(SumTypes[0].Types[caseId])());
+
+ enum numCases = SumTypes[0].Types.length;
+ }
+ // Multiple dispatch (slow path)
+ else
{
- enum tags = TagTuple.fromCaseId(caseId);
+ alias typeCounts = Map!(typeCount, SumTypes);
+ alias stride(size_t i) = .stride!(i, typeCounts);
+ alias TagTuple = .TagTuple!typeCounts;
+
+ alias handlerArgs(size_t caseId) = .handlerArgs!(caseId, typeCounts);
- template getType(size_t i)
+ /* An AliasSeq of the types of the member values in the argument list
+ * returned by `handlerArgs!caseId`.
+ *
+ * Note that these are the actual (that is, qualified) types of the
+ * member values, which may not be the same as the types listed in
+ * the arguments' `.Types` properties.
+ */
+ template valueTypes(size_t caseId)
{
- enum tid = tags[i];
- alias T = SumTypes[i].Types[tid];
- alias getType = typeof(args[i].get!T());
+ enum tags = TagTuple.fromCaseId(caseId);
+
+ template getType(size_t i)
+ {
+ enum tid = tags[i];
+ alias T = SumTypes[i].Types[tid];
+ alias getType = typeof(args[i].get!T());
+ }
+
+ alias valueTypes = Map!(getType, Iota!(tags.length));
}
- alias valueTypes = Map!(getType, Iota!(tags.length));
+ /* The total number of cases is
+ *
+ * Π SumTypes[i].Types.length for 0 ≤ i < SumTypes.length
+ *
+ * Conveniently, this is equal to stride!(SumTypes.length), so we can
+ * use that function to compute it.
+ */
+ enum numCases = stride!(SumTypes.length);
}
- /* The total number of cases is
- *
- * Π SumTypes[i].Types.length for 0 ≤ i < SumTypes.length
- *
- * Or, equivalently,
- *
- * ubyte[SumTypes[0].Types.length]...[SumTypes[$-1].Types.length].sizeof
- *
- * Conveniently, this is equal to stride!(SumTypes.length), so we can
- * use that function to compute it.
- */
- enum numCases = stride!(SumTypes.length);
-
/* Guaranteed to never be a valid handler index, since
* handlers.length <= size_t.max.
*/
@@ -1998,7 +1975,12 @@ private template matchImpl(Flag!"exhaustive" exhaustive, handlers...)
mixin("alias ", handlerName!hid, " = handler;");
}
- immutable argsId = TagTuple(args).toCaseId;
+ // Single dispatch (fast path)
+ static if (args.length == 1)
+ immutable argsId = args[0].tag;
+ // Multiple dispatch (slow path)
+ else
+ immutable argsId = TagTuple(args).toCaseId;
final switch (argsId)
{
@@ -2029,10 +2011,11 @@ private template matchImpl(Flag!"exhaustive" exhaustive, handlers...)
}
}
+// Predicate for staticMap
private enum typeCount(SumType) = SumType.Types.length;
-/* A TagTuple represents a single possible set of tags that `args`
- * could have at runtime.
+/* A TagTuple represents a single possible set of tags that the arguments to
+ * `matchImpl` could have at runtime.
*
* Because D does not allow a struct to be the controlling expression
* of a switch statement, we cannot dispatch on the TagTuple directly.
@@ -2054,22 +2037,23 @@ private enum typeCount(SumType) = SumType.Types.length;
* When there is only one argument, the caseId is equal to that
* argument's tag.
*/
-private struct TagTuple(SumTypes...)
+private struct TagTuple(typeCounts...)
{
- size_t[SumTypes.length] tags;
+ size_t[typeCounts.length] tags;
alias tags this;
- alias stride(size_t i) = .stride!(i, Map!(typeCount, SumTypes));
+ alias stride(size_t i) = .stride!(i, typeCounts);
invariant
{
static foreach (i; 0 .. tags.length)
{
- assert(tags[i] < SumTypes[i].Types.length, "Invalid tag");
+ assert(tags[i] < typeCounts[i], "Invalid tag");
}
}
- this(ref const(SumTypes) args)
+ this(SumTypes...)(ref const SumTypes args)
+ if (allSatisfy!(isSumType, SumTypes) && args.length == typeCounts.length)
{
static foreach (i; 0 .. tags.length)
{
@@ -2104,6 +2088,52 @@ private struct TagTuple(SumTypes...)
}
}
+/* The number that the dim-th argument's tag is multiplied by when
+ * converting TagTuples to and from case indices ("caseIds").
+ *
+ * Named by analogy to the stride that the dim-th index into a
+ * multidimensional static array is multiplied by to calculate the
+ * offset of a specific element.
+ */
+private size_t stride(size_t dim, lengths...)()
+{
+ import core.checkedint : mulu;
+
+ size_t result = 1;
+ bool overflow = false;
+
+ static foreach (i; 0 .. dim)
+ {
+ result = mulu(result, lengths[i], overflow);
+ }
+
+ /* The largest number matchImpl uses, numCases, is calculated with
+ * stride!(SumTypes.length), so as long as this overflow check
+ * passes, we don't need to check for overflow anywhere else.
+ */
+ assert(!overflow, "Integer overflow");
+ return result;
+}
+
+/* A list of arguments to be passed to a handler needed for the case
+ * labeled with `caseId`.
+ */
+private template handlerArgs(size_t caseId, typeCounts...)
+{
+ enum tags = TagTuple!typeCounts.fromCaseId(caseId);
+
+ alias handlerArgs = AliasSeq!();
+
+ static foreach (i; 0 .. tags.length)
+ {
+ handlerArgs = AliasSeq!(
+ handlerArgs,
+ "args[" ~ toCtString!i ~ "].get!(SumTypes[" ~ toCtString!i ~ "]" ~
+ ".Types[" ~ toCtString!(tags[i]) ~ "])(), "
+ );
+ }
+}
+
// Matching
@safe unittest
{
diff --git a/libphobos/src/std/traits.d b/libphobos/src/std/traits.d
index 69362c0..f230aa3 100644
--- a/libphobos/src/std/traits.d
+++ b/libphobos/src/std/traits.d
@@ -7251,16 +7251,21 @@ alias PointerTarget(T : T*) = T;
/**
* Detect whether type `T` is an aggregate type.
*/
-enum bool isAggregateType(T) = is(T == struct) || is(T == union) ||
- is(T == class) || is(T == interface);
+template isAggregateType(T)
+{
+ static if (is(T == enum))
+ enum isAggregateType = isAggregateType!(OriginalType!T);
+ else
+ enum isAggregateType = is(T == struct) || is(T == class) || is(T == interface) || is(T == union);
+}
///
@safe unittest
{
- class C;
- union U;
- struct S;
- interface I;
+ class C {}
+ union U {}
+ struct S {}
+ interface I {}
static assert( isAggregateType!C);
static assert( isAggregateType!U);
@@ -7271,6 +7276,16 @@ enum bool isAggregateType(T) = is(T == struct) || is(T == union) ||
static assert(!isAggregateType!(int[]));
static assert(!isAggregateType!(C[string]));
static assert(!isAggregateType!(void delegate(int)));
+
+ enum ES : S { a = S.init }
+ enum EC : C { a = C.init }
+ enum EI : I { a = I.init }
+ enum EU : U { a = U.init }
+
+ static assert( isAggregateType!ES);
+ static assert( isAggregateType!EC);
+ static assert( isAggregateType!EI);
+ static assert( isAggregateType!EU);
}
/**
@@ -9238,12 +9253,16 @@ enum isCopyable(S) = __traits(isCopyable, S);
* is the same as `T`. For pointer and slice types, it is `T` with the
* outer-most layer of qualifiers dropped.
*/
-package(std) template DeducedParameterType(T)
+package(std) alias DeducedParameterType(T) = DeducedParameterTypeImpl!T;
+/// ditto
+package(std) alias DeducedParameterType(alias T) = DeducedParameterTypeImpl!T;
+
+private template DeducedParameterTypeImpl(T)
{
static if (is(T == U*, U) || is(T == U[], U))
- alias DeducedParameterType = Unqual!T;
+ alias DeducedParameterTypeImpl = Unqual!T;
else
- alias DeducedParameterType = T;
+ alias DeducedParameterTypeImpl = T;
}
@safe unittest
@@ -9263,6 +9282,7 @@ package(std) template DeducedParameterType(T)
}
static assert(is(DeducedParameterType!NoCopy == NoCopy));
+ static assert(is(DeducedParameterType!(inout(NoCopy)) == inout(NoCopy)));
}
@safe unittest
diff --git a/libphobos/src/std/typecons.d b/libphobos/src/std/typecons.d
index c874c0f..bd462f5 100644
--- a/libphobos/src/std/typecons.d
+++ b/libphobos/src/std/typecons.d
@@ -3104,17 +3104,18 @@ private:
{
static if (useQualifierCast)
{
- this.data = cast() value;
+ static if (hasElaborateAssign!T)
+ {
+ import core.lifetime : copyEmplace;
+ copyEmplace(cast() value, this.data);
+ }
+ else
+ this.data = cast() value;
}
else
{
- // As we're escaping a copy of `value`, deliberately leak a copy:
- static union DontCallDestructor
- {
- T value;
- }
- DontCallDestructor copy = DontCallDestructor(value);
- this.data = *cast(Payload*) &copy;
+ import core.lifetime : copyEmplace;
+ copyEmplace(cast() value, cast() *cast(T*) &this.data);
}
}
@@ -3139,6 +3140,334 @@ package(std) Rebindable2!T rebindable2(T)(T value)
return Rebindable2!T(value);
}
+// Verify that the destructor is called properly if there is one.
+@system unittest
+{
+ {
+ bool destroyed;
+
+ struct S
+ {
+ int i;
+
+ this(int i) @safe
+ {
+ this.i = i;
+ }
+
+ ~this() @safe
+ {
+ destroyed = true;
+ }
+ }
+
+ {
+ auto foo = rebindable2(S(42));
+
+ // Whether destruction has occurred here depends on whether the
+ // temporary gets moved or not, so we won't assume that it has or
+ // hasn't happened. What we care about here is that foo gets destroyed
+ // properly when it leaves the scope.
+ destroyed = false;
+ }
+ assert(destroyed);
+
+ {
+ auto foo = rebindable2(const S(42));
+ destroyed = false;
+ }
+ assert(destroyed);
+ }
+
+ // Test for double destruction with qualifer cast being used
+ {
+ static struct S
+ {
+ int i;
+ bool destroyed;
+
+ this(int i) @safe
+ {
+ this.i = i;
+ }
+
+ ~this() @safe
+ {
+ destroyed = true;
+ }
+
+ @safe invariant
+ {
+ assert(!destroyed);
+ }
+ }
+
+ {
+ auto foo = rebindable2(S(42));
+ assert(typeof(foo).useQualifierCast);
+ assert(foo.data.i == 42);
+ assert(!foo.data.destroyed);
+ }
+ {
+ auto foo = rebindable2(S(42));
+ destroy(foo);
+ }
+ {
+ auto foo = rebindable2(const S(42));
+ assert(typeof(foo).useQualifierCast);
+ assert(foo.data.i == 42);
+ assert(!foo.data.destroyed);
+ }
+ {
+ auto foo = rebindable2(const S(42));
+ destroy(foo);
+ }
+ }
+
+ // Test for double destruction without qualifer cast being used
+ {
+ static struct S
+ {
+ int i;
+ bool destroyed;
+
+ this(int i) @safe
+ {
+ this.i = i;
+ }
+
+ ~this() @safe
+ {
+ destroyed = true;
+ }
+
+ @disable ref S opAssign()(auto ref S rhs);
+
+ @safe invariant
+ {
+ assert(!destroyed);
+ }
+ }
+
+ {
+ auto foo = rebindable2(S(42));
+ assert(!typeof(foo).useQualifierCast);
+ assert((cast(S*)&(foo.data)).i == 42);
+ assert(!(cast(S*)&(foo.data)).destroyed);
+ }
+ {
+ auto foo = rebindable2(S(42));
+ destroy(foo);
+ }
+ }
+}
+
+// Verify that if there is an overloaded assignment operator, it's not assigned
+// to garbage.
+@safe unittest
+{
+ static struct S
+ {
+ int i;
+ bool destroyed;
+
+ this(int i) @safe
+ {
+ this.i = i;
+ }
+
+ ~this() @safe
+ {
+ destroyed = true;
+ }
+
+ ref opAssign()(auto ref S rhs)
+ {
+ assert(!this.destroyed);
+ this.i = rhs.i;
+ return this;
+ }
+ }
+
+ {
+ auto foo = rebindable2(S(42));
+ foo = S(99);
+ assert(foo.data.i == 99);
+ }
+ {
+ auto foo = rebindable2(S(42));
+ foo = const S(99);
+ assert(foo.data.i == 99);
+ }
+}
+
+// Verify that postblit or copy constructor is called properly if there is one.
+@system unittest
+{
+ // postblit with type qualifier cast
+ {
+ static struct S
+ {
+ int i;
+ static bool copied;
+
+ this(this) @safe
+ {
+ copied = true;
+ }
+ }
+
+ {
+ auto foo = rebindable2(S(42));
+
+ // Whether a copy has occurred here depends on whether the
+ // temporary gets moved or not, so we won't assume that it has or
+ // hasn't happened. What we care about here is that foo gets copied
+ // properly when we copy it below.
+ S.copied = false;
+
+ auto bar = foo;
+ assert(S.copied);
+ }
+ {
+ auto foo = rebindable2(const S(42));
+ assert(typeof(foo).useQualifierCast);
+ S.copied = false;
+
+ auto bar = foo;
+ assert(S.copied);
+ }
+ }
+
+ // copy constructor with type qualifier cast
+ {
+ static struct S
+ {
+ int i;
+ static bool copied;
+
+ this(ref inout S rhs) @safe inout
+ {
+ this.i = i;
+ copied = true;
+ }
+ }
+
+ {
+ auto foo = rebindable2(S(42));
+ assert(typeof(foo).useQualifierCast);
+ S.copied = false;
+
+ auto bar = foo;
+ assert(S.copied);
+ }
+ {
+ auto foo = rebindable2(const S(42));
+ S.copied = false;
+
+ auto bar = foo;
+ assert(S.copied);
+ }
+ }
+
+ // FIXME https://issues.dlang.org/show_bug.cgi?id=24829
+
+ // Making this work requires either reworking how the !useQualiferCast
+ // version works so that the compiler can correctly generate postblit
+ // constructors and copy constructors as appropriate, or an explicit
+ // postblit or copy constructor needs to be added for such cases, which
+ // gets pretty complicated if we want to correctly add the same attributes
+ // that T's postblit or copy constructor has.
+
+ /+
+ // postblit without type qualifier cast
+ {
+ static struct S
+ {
+ int* ptr;
+ static bool copied;
+
+ this(int i)
+ {
+ ptr = new int(i);
+ }
+
+ this(this) @safe
+ {
+ if (ptr !is null)
+ ptr = new int(*ptr);
+ copied = true;
+ }
+
+ @disable ref S opAssign()(auto ref S rhs);
+ }
+
+ {
+ auto foo = rebindable2(S(42));
+ assert(!typeof(foo).useQualifierCast);
+ S.copied = false;
+
+ auto bar = foo;
+ assert(S.copied);
+ assert(*(cast(S*)&(foo.data)).ptr == *(cast(S*)&(bar.data)).ptr);
+ assert((cast(S*)&(foo.data)).ptr !is (cast(S*)&(bar.data)).ptr);
+ }
+ {
+ auto foo = rebindable2(const S(42));
+ S.copied = false;
+
+ auto bar = foo;
+ assert(S.copied);
+ assert(*(cast(S*)&(foo.data)).ptr == *(cast(S*)&(bar.data)).ptr);
+ assert((cast(S*)&(foo.data)).ptr !is (cast(S*)&(bar.data)).ptr);
+ }
+ }
+
+ // copy constructor without type qualifier cast
+ {
+ static struct S
+ {
+ int* ptr;
+ static bool copied;
+
+ this(int i)
+ {
+ ptr = new int(i);
+ }
+
+ this(ref inout S rhs) @safe inout
+ {
+ if (rhs.ptr !is null)
+ ptr = new inout int(*rhs.ptr);
+ copied = true;
+ }
+
+ @disable ref S opAssign()(auto ref S rhs);
+ }
+
+ {
+ auto foo = rebindable2(S(42));
+ assert(!typeof(foo).useQualifierCast);
+ S.copied = false;
+
+ auto bar = foo;
+ assert(S.copied);
+ assert(*(cast(S*)&(foo.data)).ptr == *(cast(S*)&(bar.data)).ptr);
+ assert((cast(S*)&(foo.data)).ptr !is (cast(S*)&(bar.data)).ptr);
+ }
+ {
+ auto foo = rebindable2(const S(42));
+ S.copied = false;
+
+ auto bar = foo;
+ assert(S.copied);
+ assert(*(cast(S*)&(foo.data)).ptr == *(cast(S*)&(bar.data)).ptr);
+ assert((cast(S*)&(foo.data)).ptr !is (cast(S*)&(bar.data)).ptr);
+ }
+ }
+ +/
+}
+
/**
Similar to `Rebindable!(T)` but strips all qualifiers from the reference as
opposed to just constness / immutability. Primary intended use case is with
diff --git a/libphobos/src/std/windows/syserror.d b/libphobos/src/std/windows/syserror.d
index 3d8c5e7..dadf0e8 100644
--- a/libphobos/src/std/windows/syserror.d
+++ b/libphobos/src/std/windows/syserror.d
@@ -69,7 +69,6 @@ import core.sys.windows.winbase, core.sys.windows.winnt;
import std.array : appender, Appender;
import std.conv : to, toTextRange, text;
import std.exception;
-import std.windows.charset;
string sysErrorString(
DWORD errCode,