aboutsummaryrefslogtreecommitdiff
path: root/libphobos/libdruntime
diff options
context:
space:
mode:
Diffstat (limited to 'libphobos/libdruntime')
-rw-r--r--libphobos/libdruntime/LICENSE.txt (renamed from libphobos/libdruntime/LICENSE)3
-rw-r--r--libphobos/libdruntime/MERGE2
-rw-r--r--libphobos/libdruntime/Makefile.am96
-rw-r--r--libphobos/libdruntime/Makefile.in508
-rw-r--r--libphobos/libdruntime/__entrypoint.di56
-rw-r--r--libphobos/libdruntime/core/atomic.d2428
-rw-r--r--libphobos/libdruntime/core/attribute.d188
-rw-r--r--libphobos/libdruntime/core/bitop.d19
-rw-r--r--libphobos/libdruntime/core/builtins.d19
-rw-r--r--libphobos/libdruntime/core/checkedint.d114
-rw-r--r--libphobos/libdruntime/core/demangle.d184
-rw-r--r--libphobos/libdruntime/core/exception.d347
-rw-r--r--libphobos/libdruntime/core/gc/config.d129
-rw-r--r--libphobos/libdruntime/core/gc/gcinterface.d (renamed from libphobos/libdruntime/gc/gcinterface.d)36
-rw-r--r--libphobos/libdruntime/core/gc/registry.d87
-rw-r--r--libphobos/libdruntime/core/internal/abort.d20
-rw-r--r--libphobos/libdruntime/core/internal/array/appending.d222
-rw-r--r--libphobos/libdruntime/core/internal/array/capacity.d85
-rw-r--r--libphobos/libdruntime/core/internal/array/casting.d115
-rw-r--r--libphobos/libdruntime/core/internal/array/comparison.d242
-rw-r--r--libphobos/libdruntime/core/internal/array/concatenation.d75
-rw-r--r--libphobos/libdruntime/core/internal/array/construction.d307
-rw-r--r--libphobos/libdruntime/core/internal/array/equality.d237
-rw-r--r--libphobos/libdruntime/core/internal/array/operations.d670
-rw-r--r--libphobos/libdruntime/core/internal/array/utils.d121
-rw-r--r--libphobos/libdruntime/core/internal/arrayop.d451
-rw-r--r--libphobos/libdruntime/core/internal/atomic.d1141
-rw-r--r--libphobos/libdruntime/core/internal/container/array.d (renamed from libphobos/libdruntime/rt/util/container/array.d)16
-rw-r--r--libphobos/libdruntime/core/internal/container/common.d (renamed from libphobos/libdruntime/rt/util/container/common.d)13
-rw-r--r--libphobos/libdruntime/core/internal/container/hashtab.d (renamed from libphobos/libdruntime/rt/util/container/hashtab.d)23
-rw-r--r--libphobos/libdruntime/core/internal/container/treap.d (renamed from libphobos/libdruntime/rt/util/container/treap.d)56
-rw-r--r--libphobos/libdruntime/core/internal/convert.d56
-rw-r--r--libphobos/libdruntime/core/internal/dassert.d590
-rw-r--r--libphobos/libdruntime/core/internal/destruction.d47
-rw-r--r--libphobos/libdruntime/core/internal/entrypoint.d41
-rw-r--r--libphobos/libdruntime/core/internal/gc/bits.d493
-rw-r--r--libphobos/libdruntime/core/internal/gc/impl/conservative/gc.d4836
-rw-r--r--libphobos/libdruntime/core/internal/gc/impl/manual/gc.d (renamed from libphobos/libdruntime/gc/impl/manual/gc.d)85
-rw-r--r--libphobos/libdruntime/core/internal/gc/impl/proto/gc.d248
-rw-r--r--libphobos/libdruntime/core/internal/gc/os.d (renamed from libphobos/libdruntime/gc/os.d)146
-rw-r--r--libphobos/libdruntime/core/internal/gc/pooltable.d (renamed from libphobos/libdruntime/gc/pooltable.d)24
-rw-r--r--libphobos/libdruntime/core/internal/gc/proxy.d296
-rw-r--r--libphobos/libdruntime/core/internal/hash.d453
-rw-r--r--libphobos/libdruntime/core/internal/lifetime.d213
-rw-r--r--libphobos/libdruntime/core/internal/moving.d147
-rw-r--r--libphobos/libdruntime/core/internal/parseoptions.d422
-rw-r--r--libphobos/libdruntime/core/internal/postblit.d274
-rw-r--r--libphobos/libdruntime/core/internal/qsort.d (renamed from libphobos/libdruntime/rt/qsort.d)52
-rw-r--r--libphobos/libdruntime/core/internal/spinlock.d23
-rw-r--r--libphobos/libdruntime/core/internal/string.d166
-rw-r--r--libphobos/libdruntime/core/internal/switch_.d190
-rw-r--r--libphobos/libdruntime/core/internal/traits.d609
-rw-r--r--libphobos/libdruntime/core/internal/utf.d (renamed from libphobos/libdruntime/rt/util/utf.d)112
-rw-r--r--libphobos/libdruntime/core/internal/util/array.d (renamed from libphobos/libdruntime/rt/util/array.d)38
-rw-r--r--libphobos/libdruntime/core/internal/util/math.d53
-rw-r--r--libphobos/libdruntime/core/lifetime.d2201
-rw-r--r--libphobos/libdruntime/core/memory.d926
-rw-r--r--libphobos/libdruntime/core/runtime.d848
-rw-r--r--libphobos/libdruntime/core/stdc/math.d395
-rw-r--r--libphobos/libdruntime/core/stdc/stdint.d91
-rw-r--r--libphobos/libdruntime/core/stdcpp/allocator.d373
-rw-r--r--libphobos/libdruntime/core/stdcpp/array.d133
-rw-r--r--libphobos/libdruntime/core/stdcpp/exception.d161
-rw-r--r--libphobos/libdruntime/core/stdcpp/memory.d163
-rw-r--r--libphobos/libdruntime/core/stdcpp/new_.d186
-rw-r--r--libphobos/libdruntime/core/stdcpp/string.d2593
-rw-r--r--libphobos/libdruntime/core/stdcpp/string_view.d130
-rw-r--r--libphobos/libdruntime/core/stdcpp/type_traits.d50
-rw-r--r--libphobos/libdruntime/core/stdcpp/typeinfo.d87
-rw-r--r--libphobos/libdruntime/core/stdcpp/utility.d50
-rw-r--r--libphobos/libdruntime/core/stdcpp/vector.d850
-rw-r--r--libphobos/libdruntime/core/stdcpp/xutility.d427
-rw-r--r--libphobos/libdruntime/core/sync/barrier.d61
-rw-r--r--libphobos/libdruntime/core/sync/condition.d450
-rw-r--r--libphobos/libdruntime/core/sync/config.d19
-rw-r--r--libphobos/libdruntime/core/sync/event.d345
-rw-r--r--libphobos/libdruntime/core/sync/mutex.d16
-rw-r--r--libphobos/libdruntime/core/sync/rwmutex.d173
-rw-r--r--libphobos/libdruntime/core/sync/semaphore.d42
-rw-r--r--libphobos/libdruntime/core/sys/darwin/dlfcn.d5
-rw-r--r--libphobos/libdruntime/core/sys/dragonflybsd/sys/elf32.d2
-rw-r--r--libphobos/libdruntime/core/sys/dragonflybsd/sys/elf64.d2
-rw-r--r--libphobos/libdruntime/core/sys/freebsd/sys/elf32.d2
-rw-r--r--libphobos/libdruntime/core/sys/freebsd/sys/elf64.d2
-rw-r--r--libphobos/libdruntime/core/sys/linux/fs.d265
-rw-r--r--libphobos/libdruntime/core/sys/linux/io_uring.d414
-rw-r--r--libphobos/libdruntime/core/sys/linux/perf_event.d2515
-rw-r--r--libphobos/libdruntime/core/sys/linux/sys/procfs.d15
-rw-r--r--libphobos/libdruntime/core/sys/netbsd/sys/elf32.d2
-rw-r--r--libphobos/libdruntime/core/sys/netbsd/sys/elf64.d2
-rw-r--r--libphobos/libdruntime/core/sys/openbsd/execinfo.d147
-rw-r--r--libphobos/libdruntime/core/sys/openbsd/sys/elf32.d2
-rw-r--r--libphobos/libdruntime/core/sys/openbsd/sys/elf64.d2
-rw-r--r--libphobos/libdruntime/core/sys/posix/arpa/inet.d116
-rw-r--r--libphobos/libdruntime/core/sys/posix/fcntl.d16
-rw-r--r--libphobos/libdruntime/core/sys/posix/net/if_.d2
-rw-r--r--libphobos/libdruntime/core/sys/posix/semaphore.d2
-rw-r--r--libphobos/libdruntime/core/sys/posix/setjmp.d4
-rw-r--r--libphobos/libdruntime/core/sys/posix/stdio.d10
-rw-r--r--libphobos/libdruntime/core/sys/posix/string.d8
-rw-r--r--libphobos/libdruntime/core/sys/windows/basetsd.d2
-rw-r--r--libphobos/libdruntime/core/sys/windows/dll.d1
-rw-r--r--libphobos/libdruntime/core/sys/windows/sqlext.d2
-rw-r--r--libphobos/libdruntime/core/thread/fiber.d2
-rw-r--r--libphobos/libdruntime/core/thread/osthread.d34
-rw-r--r--libphobos/libdruntime/core/thread/threadbase.d12
-rw-r--r--libphobos/libdruntime/core/time.d1201
-rw-r--r--libphobos/libdruntime/gc/bits.d129
-rw-r--r--libphobos/libdruntime/gc/config.d291
-rw-r--r--libphobos/libdruntime/gc/impl/conservative/gc.d3413
-rw-r--r--libphobos/libdruntime/gc/proxy.d239
-rw-r--r--libphobos/libdruntime/gcc/deh.d22
-rw-r--r--libphobos/libdruntime/gcc/emutls.d3
-rw-r--r--libphobos/libdruntime/gcc/sections/elf.d6
-rw-r--r--libphobos/libdruntime/gcc/sections/macho.d6
-rw-r--r--libphobos/libdruntime/gcc/sections/pecoff.d6
-rw-r--r--libphobos/libdruntime/object.d3555
-rw-r--r--libphobos/libdruntime/rt/aApply.d6
-rw-r--r--libphobos/libdruntime/rt/aApplyR.d5
-rw-r--r--libphobos/libdruntime/rt/aaA.d272
-rw-r--r--libphobos/libdruntime/rt/adi.d306
-rw-r--r--libphobos/libdruntime/rt/arrayassign.d4
-rw-r--r--libphobos/libdruntime/rt/arraycast.d52
-rw-r--r--libphobos/libdruntime/rt/arraycat.d4
-rw-r--r--libphobos/libdruntime/rt/cast_.d51
-rw-r--r--libphobos/libdruntime/rt/config.d85
-rw-r--r--libphobos/libdruntime/rt/critical_.d3
-rw-r--r--libphobos/libdruntime/rt/deh.d36
-rw-r--r--libphobos/libdruntime/rt/dmain2.d333
-rw-r--r--libphobos/libdruntime/rt/dylib_fixes.c2
-rw-r--r--libphobos/libdruntime/rt/ehalloc.d125
-rw-r--r--libphobos/libdruntime/rt/invariant.d3
-rw-r--r--libphobos/libdruntime/rt/lifetime.d896
-rw-r--r--libphobos/libdruntime/rt/memory.d2
-rw-r--r--libphobos/libdruntime/rt/minfo.d10
-rw-r--r--libphobos/libdruntime/rt/monitor_.d10
-rw-r--r--libphobos/libdruntime/rt/obj.d35
-rw-r--r--libphobos/libdruntime/rt/profilegc.d170
-rw-r--r--libphobos/libdruntime/rt/sections.d17
-rw-r--r--libphobos/libdruntime/rt/switch_.d424
-rw-r--r--libphobos/libdruntime/rt/tlsgc.d3
-rw-r--r--libphobos/libdruntime/rt/util/random.d51
-rw-r--r--libphobos/libdruntime/rt/util/typeinfo.d304
-rw-r--r--libphobos/libdruntime/rt/util/utility.d44
144 files changed, 31728 insertions, 12378 deletions
diff --git a/libphobos/libdruntime/LICENSE b/libphobos/libdruntime/LICENSE.txt
index c83ac89..36b7cd9 100644
--- a/libphobos/libdruntime/LICENSE
+++ b/libphobos/libdruntime/LICENSE.txt
@@ -1,6 +1,3 @@
-DRuntime: Runtime Library for the D Programming Language
-========================================================
-
Boost Software License - Version 1.0 - August 17th, 2003
Permission is hereby granted, free of charge, to any person or organization
diff --git a/libphobos/libdruntime/MERGE b/libphobos/libdruntime/MERGE
index 0d554e0..11bef0f 100644
--- a/libphobos/libdruntime/MERGE
+++ b/libphobos/libdruntime/MERGE
@@ -1,4 +1,4 @@
-98c6ff0cf1241a0cfac196bf8a0523b1d4ecd3ac
+e6caaab9d359198b760c698dcb6d253afb3f81f6
The first line of this file holds the git revision number of the last
merge done from the dlang/druntime repository.
diff --git a/libphobos/libdruntime/Makefile.am b/libphobos/libdruntime/Makefile.am
index a2e2bff..80c7567 100644
--- a/libphobos/libdruntime/Makefile.am
+++ b/libphobos/libdruntime/Makefile.am
@@ -19,7 +19,8 @@
include $(top_srcdir)/d_rules.am
# Make sure GDC can find libdruntime include files
-D_EXTRA_DFLAGS=-nostdinc -I $(srcdir) -I .
+D_EXTRA_DFLAGS=-fpreview=dip1000 -fpreview=fieldwise -fpreview=dtorfields \
+ -nostdinc -I $(srcdir) -I .
# D flags for compilation
AM_DFLAGS= \
@@ -119,6 +120,7 @@ endif
DRUNTIME_DSOURCES_GENERATED = gcc/config.d gcc/libbacktrace.d
ALL_DRUNTIME_SOURCES = $(DRUNTIME_DSOURCES) $(DRUNTIME_CSOURCES) \
+ $(DRUNTIME_DSOURCES_STDCXX) \
$(DRUNTIME_SOURCES_CONFIGURED) $(DRUNTIME_DSOURCES_GENERATED)
# Need this library to both be part of libgphobos.a, and installed separately.
@@ -166,12 +168,30 @@ install-data-local:
DRUNTIME_CSOURCES = core/stdc/errno_.c
DRUNTIME_DSOURCES = core/atomic.d core/attribute.d core/bitop.d \
- core/checkedint.d core/cpuid.d core/demangle.d core/exception.d \
- core/internal/abort.d core/internal/arrayop.d \
- core/internal/attributes.d core/internal/convert.d \
- core/internal/hash.d core/internal/spinlock.d core/internal/string.d \
- core/internal/traits.d core/math.d core/memory.d core/runtime.d \
- core/simd.d core/stdc/assert_.d core/stdc/complex.d core/stdc/config.d \
+ core/builtins.d core/checkedint.d core/cpuid.d core/demangle.d \
+ core/exception.d core/gc/config.d core/gc/gcinterface.d \
+ core/gc/registry.d core/internal/abort.d \
+ core/internal/array/appending.d core/internal/array/capacity.d \
+ core/internal/array/casting.d core/internal/array/comparison.d \
+ core/internal/array/concatenation.d core/internal/array/construction.d \
+ core/internal/array/equality.d core/internal/array/operations.d \
+ core/internal/array/utils.d core/internal/atomic.d \
+ core/internal/attributes.d core/internal/container/array.d \
+ core/internal/container/common.d core/internal/container/hashtab.d \
+ core/internal/container/treap.d core/internal/convert.d \
+ core/internal/dassert.d core/internal/destruction.d \
+ core/internal/entrypoint.d core/internal/gc/bits.d \
+ core/internal/gc/impl/conservative/gc.d \
+ core/internal/gc/impl/manual/gc.d core/internal/gc/impl/proto/gc.d \
+ core/internal/gc/os.d core/internal/gc/pooltable.d \
+ core/internal/gc/proxy.d core/internal/hash.d core/internal/lifetime.d \
+ core/internal/moving.d core/internal/parseoptions.d \
+ core/internal/postblit.d core/internal/qsort.d \
+ core/internal/spinlock.d core/internal/string.d \
+ core/internal/switch_.d core/internal/traits.d core/internal/utf.d \
+ core/internal/util/array.d core/internal/util/math.d core/lifetime.d \
+ core/math.d core/memory.d core/runtime.d core/simd.d \
+ core/stdc/assert_.d core/stdc/complex.d core/stdc/config.d \
core/stdc/ctype.d core/stdc/errno.d core/stdc/fenv.d \
core/stdc/float_.d core/stdc/inttypes.d core/stdc/limits.d \
core/stdc/locale.d core/stdc/math.d core/stdc/signal.d \
@@ -179,28 +199,28 @@ DRUNTIME_DSOURCES = core/atomic.d core/attribute.d core/bitop.d \
core/stdc/stdio.d core/stdc/stdlib.d core/stdc/string.d \
core/stdc/tgmath.d core/stdc/time.d core/stdc/wchar_.d \
core/stdc/wctype.d core/sync/barrier.d core/sync/condition.d \
- core/sync/config.d core/sync/exception.d core/sync/mutex.d \
- core/sync/rwmutex.d core/sync/semaphore.d core/thread/context.d \
- core/thread/fiber.d core/thread/osthread.d core/thread/package.d \
- core/thread/threadbase.d core/thread/threadgroup.d core/thread/types.d \
- core/time.d core/vararg.d core/volatile.d gc/bits.d gc/config.d \
- gc/gcinterface.d gc/impl/conservative/gc.d gc/impl/manual/gc.d gc/os.d \
- gc/pooltable.d gc/proxy.d gcc/attribute.d gcc/attributes.d \
+ core/sync/config.d core/sync/event.d core/sync/exception.d \
+ core/sync/mutex.d core/sync/rwmutex.d core/sync/semaphore.d \
+ core/thread/context.d core/thread/fiber.d core/thread/osthread.d \
+ core/thread/package.d core/thread/threadbase.d \
+ core/thread/threadgroup.d core/thread/types.d core/time.d \
+ core/vararg.d core/volatile.d gcc/attribute.d gcc/attributes.d \
gcc/backtrace.d gcc/builtins.d gcc/deh.d gcc/emutls.d gcc/gthread.d \
gcc/sections/common.d gcc/sections/elf.d gcc/sections/macho.d \
gcc/sections/package.d gcc/sections/pecoff.d gcc/unwind/arm.d \
gcc/unwind/arm_common.d gcc/unwind/c6x.d gcc/unwind/generic.d \
gcc/unwind/package.d gcc/unwind/pe.d object.d rt/aApply.d rt/aApplyR.d \
- rt/aaA.d rt/adi.d rt/arrayassign.d rt/arraycast.d rt/arraycat.d \
- rt/cast_.d rt/config.d rt/critical_.d rt/deh.d rt/dmain2.d \
+ rt/aaA.d rt/adi.d rt/arrayassign.d rt/arraycat.d rt/cast_.d \
+ rt/config.d rt/critical_.d rt/deh.d rt/dmain2.d rt/ehalloc.d \
rt/invariant.d rt/lifetime.d rt/memory.d rt/minfo.d rt/monitor_.d \
- rt/obj.d rt/qsort.d rt/sections.d rt/switch_.d rt/tlsgc.d \
- rt/util/array.d rt/util/container/array.d rt/util/container/common.d \
- rt/util/container/hashtab.d rt/util/container/treap.d rt/util/random.d \
- rt/util/typeinfo.d rt/util/utf.d
+ rt/profilegc.d rt/sections.d rt/tlsgc.d rt/util/typeinfo.d \
+ rt/util/utility.d
-DRUNTIME_DSOURCES_STDCXX = core/stdcpp/exception.d \
- core/stdcpp/typeinfo.d
+DRUNTIME_DSOURCES_STDCXX = core/stdcpp/allocator.d core/stdcpp/array.d \
+ core/stdcpp/exception.d core/stdcpp/memory.d core/stdcpp/new_.d \
+ core/stdcpp/string.d core/stdcpp/string_view.d \
+ core/stdcpp/type_traits.d core/stdcpp/typeinfo.d core/stdcpp/utility.d \
+ core/stdcpp/vector.d core/stdcpp/xutility.d
DRUNTIME_DSOURCES_BIONIC = core/sys/bionic/err.d \
core/sys/bionic/fcntl.d core/sys/bionic/stdlib.d \
@@ -249,17 +269,19 @@ DRUNTIME_DSOURCES_FREEBSD = core/sys/freebsd/config.d \
DRUNTIME_DSOURCES_LINUX = core/sys/linux/config.d \
core/sys/linux/dlfcn.d core/sys/linux/elf.d core/sys/linux/epoll.d \
core/sys/linux/err.d core/sys/linux/errno.d core/sys/linux/execinfo.d \
- core/sys/linux/fcntl.d core/sys/linux/ifaddrs.d core/sys/linux/link.d \
+ core/sys/linux/fcntl.d core/sys/linux/fs.d core/sys/linux/ifaddrs.d \
+ core/sys/linux/io_uring.d core/sys/linux/link.d \
core/sys/linux/netinet/in_.d core/sys/linux/netinet/tcp.d \
- core/sys/linux/sched.d core/sys/linux/stdio.d core/sys/linux/string.d \
+ core/sys/linux/perf_event.d core/sys/linux/sched.d \
+ core/sys/linux/stdio.d core/sys/linux/string.d \
core/sys/linux/sys/auxv.d core/sys/linux/sys/eventfd.d \
core/sys/linux/sys/file.d core/sys/linux/sys/inotify.d \
core/sys/linux/sys/mman.d core/sys/linux/sys/prctl.d \
- core/sys/linux/sys/signalfd.d core/sys/linux/sys/socket.d \
- core/sys/linux/sys/sysinfo.d core/sys/linux/sys/time.d \
- core/sys/linux/sys/xattr.d core/sys/linux/termios.d \
- core/sys/linux/time.d core/sys/linux/timerfd.d core/sys/linux/tipc.d \
- core/sys/linux/unistd.d
+ core/sys/linux/sys/procfs.d core/sys/linux/sys/signalfd.d \
+ core/sys/linux/sys/socket.d core/sys/linux/sys/sysinfo.d \
+ core/sys/linux/sys/time.d core/sys/linux/sys/xattr.d \
+ core/sys/linux/termios.d core/sys/linux/time.d \
+ core/sys/linux/timerfd.d core/sys/linux/tipc.d core/sys/linux/unistd.d
DRUNTIME_DSOURCES_NETBSD = core/sys/netbsd/dlfcn.d \
core/sys/netbsd/err.d core/sys/netbsd/execinfo.d \
@@ -271,13 +293,13 @@ DRUNTIME_DSOURCES_NETBSD = core/sys/netbsd/dlfcn.d \
core/sys/netbsd/sys/sysctl.d core/sys/netbsd/time.d
DRUNTIME_DSOURCES_OPENBSD = core/sys/openbsd/dlfcn.d \
- core/sys/openbsd/err.d core/sys/openbsd/stdlib.d \
- core/sys/openbsd/string.d core/sys/openbsd/sys/cdefs.d \
- core/sys/openbsd/sys/elf.d core/sys/openbsd/sys/elf32.d \
- core/sys/openbsd/sys/elf64.d core/sys/openbsd/sys/elf_common.d \
- core/sys/openbsd/sys/link_elf.d core/sys/openbsd/sys/mman.d \
- core/sys/openbsd/sys/sysctl.d core/sys/openbsd/time.d \
- core/sys/openbsd/unistd.d
+ core/sys/openbsd/err.d core/sys/openbsd/execinfo.d \
+ core/sys/openbsd/stdlib.d core/sys/openbsd/string.d \
+ core/sys/openbsd/sys/cdefs.d core/sys/openbsd/sys/elf.d \
+ core/sys/openbsd/sys/elf32.d core/sys/openbsd/sys/elf64.d \
+ core/sys/openbsd/sys/elf_common.d core/sys/openbsd/sys/link_elf.d \
+ core/sys/openbsd/sys/mman.d core/sys/openbsd/sys/sysctl.d \
+ core/sys/openbsd/time.d core/sys/openbsd/unistd.d
DRUNTIME_DSOURCES_POSIX = core/sys/posix/aio.d \
core/sys/posix/arpa/inet.d core/sys/posix/config.d \
@@ -402,4 +424,4 @@ DRUNTIME_DSOURCES_WINDOWS = core/sys/windows/accctrl.d \
core/sys/windows/winuser.d core/sys/windows/winver.d \
core/sys/windows/wtsapi32.d core/sys/windows/wtypes.d
-DRUNTIME_DISOURCES = __entrypoint.di __main.di
+DRUNTIME_DISOURCES = __main.di
diff --git a/libphobos/libdruntime/Makefile.in b/libphobos/libdruntime/Makefile.in
index cb2e372..b5f29da 100644
--- a/libphobos/libdruntime/Makefile.in
+++ b/libphobos/libdruntime/Makefile.in
@@ -188,47 +188,70 @@ LTLIBRARIES = $(noinst_LTLIBRARIES) $(toolexeclib_LTLIBRARIES)
am__DEPENDENCIES_1 =
am__dirstamp = $(am__leading_dot)dirstamp
am__objects_1 = core/atomic.lo core/attribute.lo core/bitop.lo \
- core/checkedint.lo core/cpuid.lo core/demangle.lo \
- core/exception.lo core/internal/abort.lo \
- core/internal/arrayop.lo core/internal/attributes.lo \
- core/internal/convert.lo core/internal/hash.lo \
+ core/builtins.lo core/checkedint.lo core/cpuid.lo \
+ core/demangle.lo core/exception.lo core/gc/config.lo \
+ core/gc/gcinterface.lo core/gc/registry.lo \
+ core/internal/abort.lo core/internal/array/appending.lo \
+ core/internal/array/capacity.lo core/internal/array/casting.lo \
+ core/internal/array/comparison.lo \
+ core/internal/array/concatenation.lo \
+ core/internal/array/construction.lo \
+ core/internal/array/equality.lo \
+ core/internal/array/operations.lo core/internal/array/utils.lo \
+ core/internal/atomic.lo core/internal/attributes.lo \
+ core/internal/container/array.lo \
+ core/internal/container/common.lo \
+ core/internal/container/hashtab.lo \
+ core/internal/container/treap.lo core/internal/convert.lo \
+ core/internal/dassert.lo core/internal/destruction.lo \
+ core/internal/entrypoint.lo core/internal/gc/bits.lo \
+ core/internal/gc/impl/conservative/gc.lo \
+ core/internal/gc/impl/manual/gc.lo \
+ core/internal/gc/impl/proto/gc.lo core/internal/gc/os.lo \
+ core/internal/gc/pooltable.lo core/internal/gc/proxy.lo \
+ core/internal/hash.lo core/internal/lifetime.lo \
+ core/internal/moving.lo core/internal/parseoptions.lo \
+ core/internal/postblit.lo core/internal/qsort.lo \
core/internal/spinlock.lo core/internal/string.lo \
- core/internal/traits.lo core/math.lo core/memory.lo \
- core/runtime.lo core/simd.lo core/stdc/assert_.lo \
- core/stdc/complex.lo core/stdc/config.lo core/stdc/ctype.lo \
- core/stdc/errno.lo core/stdc/fenv.lo core/stdc/float_.lo \
- core/stdc/inttypes.lo core/stdc/limits.lo core/stdc/locale.lo \
- core/stdc/math.lo core/stdc/signal.lo core/stdc/stdarg.lo \
- core/stdc/stddef.lo core/stdc/stdint.lo core/stdc/stdio.lo \
- core/stdc/stdlib.lo core/stdc/string.lo core/stdc/tgmath.lo \
- core/stdc/time.lo core/stdc/wchar_.lo core/stdc/wctype.lo \
- core/sync/barrier.lo core/sync/condition.lo \
- core/sync/config.lo core/sync/exception.lo core/sync/mutex.lo \
- core/sync/rwmutex.lo core/sync/semaphore.lo \
- core/thread/context.lo core/thread/fiber.lo \
- core/thread/osthread.lo core/thread/package.lo \
- core/thread/threadbase.lo core/thread/threadgroup.lo \
- core/thread/types.lo core/time.lo core/vararg.lo \
- core/volatile.lo gc/bits.lo gc/config.lo gc/gcinterface.lo \
- gc/impl/conservative/gc.lo gc/impl/manual/gc.lo gc/os.lo \
- gc/pooltable.lo gc/proxy.lo gcc/attribute.lo gcc/attributes.lo \
- gcc/backtrace.lo gcc/builtins.lo gcc/deh.lo gcc/emutls.lo \
- gcc/gthread.lo gcc/sections/common.lo gcc/sections/elf.lo \
- gcc/sections/macho.lo gcc/sections/package.lo \
- gcc/sections/pecoff.lo gcc/unwind/arm.lo \
- gcc/unwind/arm_common.lo gcc/unwind/c6x.lo \
+ core/internal/switch_.lo core/internal/traits.lo \
+ core/internal/utf.lo core/internal/util/array.lo \
+ core/internal/util/math.lo core/lifetime.lo core/math.lo \
+ core/memory.lo core/runtime.lo core/simd.lo \
+ core/stdc/assert_.lo core/stdc/complex.lo core/stdc/config.lo \
+ core/stdc/ctype.lo core/stdc/errno.lo core/stdc/fenv.lo \
+ core/stdc/float_.lo core/stdc/inttypes.lo core/stdc/limits.lo \
+ core/stdc/locale.lo core/stdc/math.lo core/stdc/signal.lo \
+ core/stdc/stdarg.lo core/stdc/stddef.lo core/stdc/stdint.lo \
+ core/stdc/stdio.lo core/stdc/stdlib.lo core/stdc/string.lo \
+ core/stdc/tgmath.lo core/stdc/time.lo core/stdc/wchar_.lo \
+ core/stdc/wctype.lo core/sync/barrier.lo \
+ core/sync/condition.lo core/sync/config.lo core/sync/event.lo \
+ core/sync/exception.lo core/sync/mutex.lo core/sync/rwmutex.lo \
+ core/sync/semaphore.lo core/thread/context.lo \
+ core/thread/fiber.lo core/thread/osthread.lo \
+ core/thread/package.lo core/thread/threadbase.lo \
+ core/thread/threadgroup.lo core/thread/types.lo core/time.lo \
+ core/vararg.lo core/volatile.lo gcc/attribute.lo \
+ gcc/attributes.lo gcc/backtrace.lo gcc/builtins.lo gcc/deh.lo \
+ gcc/emutls.lo gcc/gthread.lo gcc/sections/common.lo \
+ gcc/sections/elf.lo gcc/sections/macho.lo \
+ gcc/sections/package.lo gcc/sections/pecoff.lo \
+ gcc/unwind/arm.lo gcc/unwind/arm_common.lo gcc/unwind/c6x.lo \
gcc/unwind/generic.lo gcc/unwind/package.lo gcc/unwind/pe.lo \
object.lo rt/aApply.lo rt/aApplyR.lo rt/aaA.lo rt/adi.lo \
- rt/arrayassign.lo rt/arraycast.lo rt/arraycat.lo rt/cast_.lo \
- rt/config.lo rt/critical_.lo rt/deh.lo rt/dmain2.lo \
+ rt/arrayassign.lo rt/arraycat.lo rt/cast_.lo rt/config.lo \
+ rt/critical_.lo rt/deh.lo rt/dmain2.lo rt/ehalloc.lo \
rt/invariant.lo rt/lifetime.lo rt/memory.lo rt/minfo.lo \
- rt/monitor_.lo rt/obj.lo rt/qsort.lo rt/sections.lo \
- rt/switch_.lo rt/tlsgc.lo rt/util/array.lo \
- rt/util/container/array.lo rt/util/container/common.lo \
- rt/util/container/hashtab.lo rt/util/container/treap.lo \
- rt/util/random.lo rt/util/typeinfo.lo rt/util/utf.lo
+ rt/monitor_.lo rt/profilegc.lo rt/sections.lo rt/tlsgc.lo \
+ rt/util/typeinfo.lo rt/util/utility.lo
am__objects_2 = core/stdc/libgdruntime_la-errno_.lo
-am__objects_3 = core/sys/posix/aio.lo core/sys/posix/arpa/inet.lo \
+am__objects_3 = core/stdcpp/allocator.lo core/stdcpp/array.lo \
+ core/stdcpp/exception.lo core/stdcpp/memory.lo \
+ core/stdcpp/new_.lo core/stdcpp/string.lo \
+ core/stdcpp/string_view.lo core/stdcpp/type_traits.lo \
+ core/stdcpp/typeinfo.lo core/stdcpp/utility.lo \
+ core/stdcpp/vector.lo core/stdcpp/xutility.lo
+am__objects_4 = core/sys/posix/aio.lo core/sys/posix/arpa/inet.lo \
core/sys/posix/config.lo core/sys/posix/dirent.lo \
core/sys/posix/dlfcn.lo core/sys/posix/fcntl.lo \
core/sys/posix/grp.lo core/sys/posix/iconv.lo \
@@ -255,8 +278,8 @@ am__objects_3 = core/sys/posix/aio.lo core/sys/posix/arpa/inet.lo \
core/sys/posix/syslog.lo core/sys/posix/termios.lo \
core/sys/posix/time.lo core/sys/posix/ucontext.lo \
core/sys/posix/unistd.lo core/sys/posix/utime.lo
-@DRUNTIME_OS_POSIX_TRUE@am__objects_4 = $(am__objects_3)
-am__objects_5 = core/sys/darwin/config.lo \
+@DRUNTIME_OS_POSIX_TRUE@am__objects_5 = $(am__objects_4)
+am__objects_6 = core/sys/darwin/config.lo \
core/sys/darwin/crt_externs.lo core/sys/darwin/dlfcn.lo \
core/sys/darwin/err.lo core/sys/darwin/execinfo.lo \
core/sys/darwin/fcntl.lo core/sys/darwin/ifaddrs.lo \
@@ -271,8 +294,8 @@ am__objects_5 = core/sys/darwin/config.lo \
core/sys/darwin/sys/attr.lo core/sys/darwin/sys/cdefs.lo \
core/sys/darwin/sys/event.lo core/sys/darwin/sys/mman.lo \
core/sys/darwin/sys/sysctl.lo
-@DRUNTIME_OS_DARWIN_TRUE@am__objects_6 = $(am__objects_5)
-am__objects_7 = core/sys/dragonflybsd/dlfcn.lo \
+@DRUNTIME_OS_DARWIN_TRUE@am__objects_7 = $(am__objects_6)
+am__objects_8 = core/sys/dragonflybsd/dlfcn.lo \
core/sys/dragonflybsd/err.lo core/sys/dragonflybsd/execinfo.lo \
core/sys/dragonflybsd/netinet/in_.lo \
core/sys/dragonflybsd/pthread_np.lo \
@@ -291,12 +314,12 @@ am__objects_7 = core/sys/dragonflybsd/dlfcn.lo \
core/sys/dragonflybsd/sys/socket.lo \
core/sys/dragonflybsd/sys/sysctl.lo \
core/sys/dragonflybsd/time.lo
-@DRUNTIME_OS_DRAGONFLYBSD_TRUE@am__objects_8 = $(am__objects_7)
-am__objects_9 = core/sys/bionic/err.lo core/sys/bionic/fcntl.lo \
+@DRUNTIME_OS_DRAGONFLYBSD_TRUE@am__objects_9 = $(am__objects_8)
+am__objects_10 = core/sys/bionic/err.lo core/sys/bionic/fcntl.lo \
core/sys/bionic/stdlib.lo core/sys/bionic/string.lo \
core/sys/bionic/unistd.lo
-@DRUNTIME_OS_ANDROID_TRUE@am__objects_10 = $(am__objects_9)
-am__objects_11 = core/sys/freebsd/config.lo core/sys/freebsd/dlfcn.lo \
+@DRUNTIME_OS_ANDROID_TRUE@am__objects_11 = $(am__objects_10)
+am__objects_12 = core/sys/freebsd/config.lo core/sys/freebsd/dlfcn.lo \
core/sys/freebsd/err.lo core/sys/freebsd/execinfo.lo \
core/sys/freebsd/netinet/in_.lo core/sys/freebsd/pthread_np.lo \
core/sys/freebsd/stdlib.lo core/sys/freebsd/string.lo \
@@ -309,8 +332,8 @@ am__objects_11 = core/sys/freebsd/config.lo core/sys/freebsd/dlfcn.lo \
core/sys/freebsd/sys/mman.lo core/sys/freebsd/sys/mount.lo \
core/sys/freebsd/sys/sysctl.lo core/sys/freebsd/time.lo \
core/sys/freebsd/unistd.lo
-@DRUNTIME_OS_FREEBSD_TRUE@am__objects_12 = $(am__objects_11)
-am__objects_13 = core/sys/netbsd/dlfcn.lo core/sys/netbsd/err.lo \
+@DRUNTIME_OS_FREEBSD_TRUE@am__objects_13 = $(am__objects_12)
+am__objects_14 = core/sys/netbsd/dlfcn.lo core/sys/netbsd/err.lo \
core/sys/netbsd/execinfo.lo core/sys/netbsd/stdlib.lo \
core/sys/netbsd/string.lo core/sys/netbsd/sys/elf.lo \
core/sys/netbsd/sys/elf32.lo core/sys/netbsd/sys/elf64.lo \
@@ -318,34 +341,37 @@ am__objects_13 = core/sys/netbsd/dlfcn.lo core/sys/netbsd/err.lo \
core/sys/netbsd/sys/featuretest.lo \
core/sys/netbsd/sys/link_elf.lo core/sys/netbsd/sys/mman.lo \
core/sys/netbsd/sys/sysctl.lo core/sys/netbsd/time.lo
-@DRUNTIME_OS_NETBSD_TRUE@am__objects_14 = $(am__objects_13)
-am__objects_15 = core/sys/openbsd/dlfcn.lo core/sys/openbsd/err.lo \
- core/sys/openbsd/stdlib.lo core/sys/openbsd/string.lo \
- core/sys/openbsd/sys/cdefs.lo core/sys/openbsd/sys/elf.lo \
- core/sys/openbsd/sys/elf32.lo core/sys/openbsd/sys/elf64.lo \
+@DRUNTIME_OS_NETBSD_TRUE@am__objects_15 = $(am__objects_14)
+am__objects_16 = core/sys/openbsd/dlfcn.lo core/sys/openbsd/err.lo \
+ core/sys/openbsd/execinfo.lo core/sys/openbsd/stdlib.lo \
+ core/sys/openbsd/string.lo core/sys/openbsd/sys/cdefs.lo \
+ core/sys/openbsd/sys/elf.lo core/sys/openbsd/sys/elf32.lo \
+ core/sys/openbsd/sys/elf64.lo \
core/sys/openbsd/sys/elf_common.lo \
core/sys/openbsd/sys/link_elf.lo core/sys/openbsd/sys/mman.lo \
core/sys/openbsd/sys/sysctl.lo core/sys/openbsd/time.lo \
core/sys/openbsd/unistd.lo
-@DRUNTIME_OS_OPENBSD_TRUE@am__objects_16 = $(am__objects_15)
-am__objects_17 = core/sys/linux/config.lo core/sys/linux/dlfcn.lo \
+@DRUNTIME_OS_OPENBSD_TRUE@am__objects_17 = $(am__objects_16)
+am__objects_18 = core/sys/linux/config.lo core/sys/linux/dlfcn.lo \
core/sys/linux/elf.lo core/sys/linux/epoll.lo \
core/sys/linux/err.lo core/sys/linux/errno.lo \
core/sys/linux/execinfo.lo core/sys/linux/fcntl.lo \
- core/sys/linux/ifaddrs.lo core/sys/linux/link.lo \
+ core/sys/linux/fs.lo core/sys/linux/ifaddrs.lo \
+ core/sys/linux/io_uring.lo core/sys/linux/link.lo \
core/sys/linux/netinet/in_.lo core/sys/linux/netinet/tcp.lo \
- core/sys/linux/sched.lo core/sys/linux/stdio.lo \
- core/sys/linux/string.lo core/sys/linux/sys/auxv.lo \
- core/sys/linux/sys/eventfd.lo core/sys/linux/sys/file.lo \
- core/sys/linux/sys/inotify.lo core/sys/linux/sys/mman.lo \
- core/sys/linux/sys/prctl.lo core/sys/linux/sys/signalfd.lo \
+ core/sys/linux/perf_event.lo core/sys/linux/sched.lo \
+ core/sys/linux/stdio.lo core/sys/linux/string.lo \
+ core/sys/linux/sys/auxv.lo core/sys/linux/sys/eventfd.lo \
+ core/sys/linux/sys/file.lo core/sys/linux/sys/inotify.lo \
+ core/sys/linux/sys/mman.lo core/sys/linux/sys/prctl.lo \
+ core/sys/linux/sys/procfs.lo core/sys/linux/sys/signalfd.lo \
core/sys/linux/sys/socket.lo core/sys/linux/sys/sysinfo.lo \
core/sys/linux/sys/time.lo core/sys/linux/sys/xattr.lo \
core/sys/linux/termios.lo core/sys/linux/time.lo \
core/sys/linux/timerfd.lo core/sys/linux/tipc.lo \
core/sys/linux/unistd.lo
-@DRUNTIME_OS_LINUX_TRUE@am__objects_18 = $(am__objects_17)
-am__objects_19 = core/sys/windows/accctrl.lo \
+@DRUNTIME_OS_LINUX_TRUE@am__objects_19 = $(am__objects_18)
+am__objects_20 = core/sys/windows/accctrl.lo \
core/sys/windows/aclapi.lo core/sys/windows/aclui.lo \
core/sys/windows/basetsd.lo core/sys/windows/basetyps.lo \
core/sys/windows/cderr.lo core/sys/windows/cguid.lo \
@@ -430,9 +456,9 @@ am__objects_19 = core/sys/windows/accctrl.lo \
core/sys/windows/winsvc.lo core/sys/windows/winuser.lo \
core/sys/windows/winver.lo core/sys/windows/wtsapi32.lo \
core/sys/windows/wtypes.lo
-@DRUNTIME_OS_MINGW_TRUE@am__objects_20 = $(am__objects_19) \
+@DRUNTIME_OS_MINGW_TRUE@am__objects_21 = $(am__objects_20) \
@DRUNTIME_OS_MINGW_TRUE@ config/mingw/libgdruntime_la-msvc.lo
-am__objects_21 = core/sys/solaris/dlfcn.lo core/sys/solaris/elf.lo \
+am__objects_22 = core/sys/solaris/dlfcn.lo core/sys/solaris/elf.lo \
core/sys/solaris/err.lo core/sys/solaris/execinfo.lo \
core/sys/solaris/libelf.lo core/sys/solaris/link.lo \
core/sys/solaris/stdlib.lo core/sys/solaris/sys/elf.lo \
@@ -444,48 +470,48 @@ am__objects_21 = core/sys/solaris/dlfcn.lo core/sys/solaris/elf.lo \
core/sys/solaris/sys/priocntl.lo \
core/sys/solaris/sys/procset.lo core/sys/solaris/sys/types.lo \
core/sys/solaris/time.lo
-@DRUNTIME_OS_SOLARIS_TRUE@am__objects_22 = $(am__objects_21)
-@DRUNTIME_CPU_AARCH64_TRUE@am__objects_23 = config/aarch64/libgdruntime_la-switchcontext.lo
-@DRUNTIME_CPU_ARM_TRUE@am__objects_24 = config/arm/libgdruntime_la-switchcontext.lo
-@DRUNTIME_CPU_MIPS_TRUE@am__objects_25 = config/mips/libgdruntime_la-switchcontext.lo
-@DRUNTIME_CPU_POWERPC_TRUE@am__objects_26 = config/powerpc/libgdruntime_la-switchcontext.lo
-@DRUNTIME_CPU_X86_TRUE@@DRUNTIME_OS_MINGW_TRUE@am__objects_27 = config/mingw/libgdruntime_la-switchcontext.lo
-@DRUNTIME_CPU_X86_TRUE@@DRUNTIME_OS_MINGW_FALSE@am__objects_28 = config/x86/libgdruntime_la-switchcontext.lo
-@DRUNTIME_CPU_SYSTEMZ_TRUE@am__objects_29 = config/systemz/libgdruntime_la-get_tls_offset.lo
-@DRUNTIME_CPU_S390_TRUE@am__objects_30 = config/s390/libgdruntime_la-get_tls_offset.lo
-am__objects_31 = $(am__objects_4) $(am__objects_6) $(am__objects_8) \
- $(am__objects_10) $(am__objects_12) $(am__objects_14) \
- $(am__objects_16) $(am__objects_18) $(am__objects_20) \
- $(am__objects_22) $(am__objects_23) $(am__objects_24) \
- $(am__objects_25) $(am__objects_26) $(am__objects_27) \
- $(am__objects_28) $(am__objects_29) $(am__objects_30)
-am__objects_32 = gcc/config.lo gcc/libbacktrace.lo
-am__objects_33 = $(am__objects_1) $(am__objects_2) $(am__objects_31) \
- $(am__objects_32)
-am_libgdruntime_la_OBJECTS = $(am__objects_33)
+@DRUNTIME_OS_SOLARIS_TRUE@am__objects_23 = $(am__objects_22)
+@DRUNTIME_CPU_AARCH64_TRUE@am__objects_24 = config/aarch64/libgdruntime_la-switchcontext.lo
+@DRUNTIME_CPU_ARM_TRUE@am__objects_25 = config/arm/libgdruntime_la-switchcontext.lo
+@DRUNTIME_CPU_MIPS_TRUE@am__objects_26 = config/mips/libgdruntime_la-switchcontext.lo
+@DRUNTIME_CPU_POWERPC_TRUE@am__objects_27 = config/powerpc/libgdruntime_la-switchcontext.lo
+@DRUNTIME_CPU_X86_TRUE@@DRUNTIME_OS_MINGW_TRUE@am__objects_28 = config/mingw/libgdruntime_la-switchcontext.lo
+@DRUNTIME_CPU_X86_TRUE@@DRUNTIME_OS_MINGW_FALSE@am__objects_29 = config/x86/libgdruntime_la-switchcontext.lo
+@DRUNTIME_CPU_SYSTEMZ_TRUE@am__objects_30 = config/systemz/libgdruntime_la-get_tls_offset.lo
+@DRUNTIME_CPU_S390_TRUE@am__objects_31 = config/s390/libgdruntime_la-get_tls_offset.lo
+am__objects_32 = $(am__objects_5) $(am__objects_7) $(am__objects_9) \
+ $(am__objects_11) $(am__objects_13) $(am__objects_15) \
+ $(am__objects_17) $(am__objects_19) $(am__objects_21) \
+ $(am__objects_23) $(am__objects_24) $(am__objects_25) \
+ $(am__objects_26) $(am__objects_27) $(am__objects_28) \
+ $(am__objects_29) $(am__objects_30) $(am__objects_31)
+am__objects_33 = gcc/config.lo gcc/libbacktrace.lo
+am__objects_34 = $(am__objects_1) $(am__objects_2) $(am__objects_3) \
+ $(am__objects_32) $(am__objects_33)
+am_libgdruntime_la_OBJECTS = $(am__objects_34)
libgdruntime_la_OBJECTS = $(am_libgdruntime_la_OBJECTS)
am__DEPENDENCIES_2 = $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1)
-am__objects_34 = core/stdc/libgdruntime_convenience_la-errno_.lo
-@DRUNTIME_OS_MINGW_TRUE@am__objects_35 = $(am__objects_19) \
+am__objects_35 = core/stdc/libgdruntime_convenience_la-errno_.lo
+@DRUNTIME_OS_MINGW_TRUE@am__objects_36 = $(am__objects_20) \
@DRUNTIME_OS_MINGW_TRUE@ config/mingw/libgdruntime_convenience_la-msvc.lo
-@DRUNTIME_CPU_AARCH64_TRUE@am__objects_36 = config/aarch64/libgdruntime_convenience_la-switchcontext.lo
-@DRUNTIME_CPU_ARM_TRUE@am__objects_37 = config/arm/libgdruntime_convenience_la-switchcontext.lo
-@DRUNTIME_CPU_MIPS_TRUE@am__objects_38 = config/mips/libgdruntime_convenience_la-switchcontext.lo
-@DRUNTIME_CPU_POWERPC_TRUE@am__objects_39 = config/powerpc/libgdruntime_convenience_la-switchcontext.lo
-@DRUNTIME_CPU_X86_TRUE@@DRUNTIME_OS_MINGW_TRUE@am__objects_40 = config/mingw/libgdruntime_convenience_la-switchcontext.lo
-@DRUNTIME_CPU_X86_TRUE@@DRUNTIME_OS_MINGW_FALSE@am__objects_41 = config/x86/libgdruntime_convenience_la-switchcontext.lo
-@DRUNTIME_CPU_SYSTEMZ_TRUE@am__objects_42 = config/systemz/libgdruntime_convenience_la-get_tls_offset.lo
-@DRUNTIME_CPU_S390_TRUE@am__objects_43 = config/s390/libgdruntime_convenience_la-get_tls_offset.lo
-am__objects_44 = $(am__objects_4) $(am__objects_6) $(am__objects_8) \
- $(am__objects_10) $(am__objects_12) $(am__objects_14) \
- $(am__objects_16) $(am__objects_18) $(am__objects_35) \
- $(am__objects_22) $(am__objects_36) $(am__objects_37) \
- $(am__objects_38) $(am__objects_39) $(am__objects_40) \
- $(am__objects_41) $(am__objects_42) $(am__objects_43)
-am__objects_45 = $(am__objects_1) $(am__objects_34) $(am__objects_44) \
- $(am__objects_32)
-am__objects_46 = $(am__objects_45)
-am_libgdruntime_convenience_la_OBJECTS = $(am__objects_46)
+@DRUNTIME_CPU_AARCH64_TRUE@am__objects_37 = config/aarch64/libgdruntime_convenience_la-switchcontext.lo
+@DRUNTIME_CPU_ARM_TRUE@am__objects_38 = config/arm/libgdruntime_convenience_la-switchcontext.lo
+@DRUNTIME_CPU_MIPS_TRUE@am__objects_39 = config/mips/libgdruntime_convenience_la-switchcontext.lo
+@DRUNTIME_CPU_POWERPC_TRUE@am__objects_40 = config/powerpc/libgdruntime_convenience_la-switchcontext.lo
+@DRUNTIME_CPU_X86_TRUE@@DRUNTIME_OS_MINGW_TRUE@am__objects_41 = config/mingw/libgdruntime_convenience_la-switchcontext.lo
+@DRUNTIME_CPU_X86_TRUE@@DRUNTIME_OS_MINGW_FALSE@am__objects_42 = config/x86/libgdruntime_convenience_la-switchcontext.lo
+@DRUNTIME_CPU_SYSTEMZ_TRUE@am__objects_43 = config/systemz/libgdruntime_convenience_la-get_tls_offset.lo
+@DRUNTIME_CPU_S390_TRUE@am__objects_44 = config/s390/libgdruntime_convenience_la-get_tls_offset.lo
+am__objects_45 = $(am__objects_5) $(am__objects_7) $(am__objects_9) \
+ $(am__objects_11) $(am__objects_13) $(am__objects_15) \
+ $(am__objects_17) $(am__objects_19) $(am__objects_36) \
+ $(am__objects_23) $(am__objects_37) $(am__objects_38) \
+ $(am__objects_39) $(am__objects_40) $(am__objects_41) \
+ $(am__objects_42) $(am__objects_43) $(am__objects_44)
+am__objects_46 = $(am__objects_1) $(am__objects_35) $(am__objects_3) \
+ $(am__objects_45) $(am__objects_33)
+am__objects_47 = $(am__objects_46)
+am_libgdruntime_convenience_la_OBJECTS = $(am__objects_47)
libgdruntime_convenience_la_OBJECTS = \
$(am_libgdruntime_convenience_la_OBJECTS)
AM_V_P = $(am__v_P_@AM_V@)
@@ -728,7 +754,9 @@ LTDCOMPILE = $(LIBTOOL) --tag=D $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
# Include D build rules
# Make sure GDC can find libdruntime include files
-D_EXTRA_DFLAGS = -nostdinc -I $(srcdir) -I .
+D_EXTRA_DFLAGS = -fpreview=dip1000 -fpreview=fieldwise -fpreview=dtorfields \
+ -nostdinc -I $(srcdir) -I .
+
# D flags for compilation
AM_DFLAGS = \
@@ -767,6 +795,7 @@ DRUNTIME_SOURCES_CONFIGURED = $(am__append_1) $(am__append_2) \
# Generated by configure
DRUNTIME_DSOURCES_GENERATED = gcc/config.d gcc/libbacktrace.d
ALL_DRUNTIME_SOURCES = $(DRUNTIME_DSOURCES) $(DRUNTIME_CSOURCES) \
+ $(DRUNTIME_DSOURCES_STDCXX) \
$(DRUNTIME_SOURCES_CONFIGURED) $(DRUNTIME_DSOURCES_GENERATED)
@@ -803,12 +832,30 @@ libgdruntime_convenience_la_LINK = $(libgdruntime_la_LINK)
# https://www.gnu.org/software/automake/manual/html_node/Wildcards.html
DRUNTIME_CSOURCES = core/stdc/errno_.c
DRUNTIME_DSOURCES = core/atomic.d core/attribute.d core/bitop.d \
- core/checkedint.d core/cpuid.d core/demangle.d core/exception.d \
- core/internal/abort.d core/internal/arrayop.d \
- core/internal/attributes.d core/internal/convert.d \
- core/internal/hash.d core/internal/spinlock.d core/internal/string.d \
- core/internal/traits.d core/math.d core/memory.d core/runtime.d \
- core/simd.d core/stdc/assert_.d core/stdc/complex.d core/stdc/config.d \
+ core/builtins.d core/checkedint.d core/cpuid.d core/demangle.d \
+ core/exception.d core/gc/config.d core/gc/gcinterface.d \
+ core/gc/registry.d core/internal/abort.d \
+ core/internal/array/appending.d core/internal/array/capacity.d \
+ core/internal/array/casting.d core/internal/array/comparison.d \
+ core/internal/array/concatenation.d core/internal/array/construction.d \
+ core/internal/array/equality.d core/internal/array/operations.d \
+ core/internal/array/utils.d core/internal/atomic.d \
+ core/internal/attributes.d core/internal/container/array.d \
+ core/internal/container/common.d core/internal/container/hashtab.d \
+ core/internal/container/treap.d core/internal/convert.d \
+ core/internal/dassert.d core/internal/destruction.d \
+ core/internal/entrypoint.d core/internal/gc/bits.d \
+ core/internal/gc/impl/conservative/gc.d \
+ core/internal/gc/impl/manual/gc.d core/internal/gc/impl/proto/gc.d \
+ core/internal/gc/os.d core/internal/gc/pooltable.d \
+ core/internal/gc/proxy.d core/internal/hash.d core/internal/lifetime.d \
+ core/internal/moving.d core/internal/parseoptions.d \
+ core/internal/postblit.d core/internal/qsort.d \
+ core/internal/spinlock.d core/internal/string.d \
+ core/internal/switch_.d core/internal/traits.d core/internal/utf.d \
+ core/internal/util/array.d core/internal/util/math.d core/lifetime.d \
+ core/math.d core/memory.d core/runtime.d core/simd.d \
+ core/stdc/assert_.d core/stdc/complex.d core/stdc/config.d \
core/stdc/ctype.d core/stdc/errno.d core/stdc/fenv.d \
core/stdc/float_.d core/stdc/inttypes.d core/stdc/limits.d \
core/stdc/locale.d core/stdc/math.d core/stdc/signal.d \
@@ -816,28 +863,28 @@ DRUNTIME_DSOURCES = core/atomic.d core/attribute.d core/bitop.d \
core/stdc/stdio.d core/stdc/stdlib.d core/stdc/string.d \
core/stdc/tgmath.d core/stdc/time.d core/stdc/wchar_.d \
core/stdc/wctype.d core/sync/barrier.d core/sync/condition.d \
- core/sync/config.d core/sync/exception.d core/sync/mutex.d \
- core/sync/rwmutex.d core/sync/semaphore.d core/thread/context.d \
- core/thread/fiber.d core/thread/osthread.d core/thread/package.d \
- core/thread/threadbase.d core/thread/threadgroup.d core/thread/types.d \
- core/time.d core/vararg.d core/volatile.d gc/bits.d gc/config.d \
- gc/gcinterface.d gc/impl/conservative/gc.d gc/impl/manual/gc.d gc/os.d \
- gc/pooltable.d gc/proxy.d gcc/attribute.d gcc/attributes.d \
+ core/sync/config.d core/sync/event.d core/sync/exception.d \
+ core/sync/mutex.d core/sync/rwmutex.d core/sync/semaphore.d \
+ core/thread/context.d core/thread/fiber.d core/thread/osthread.d \
+ core/thread/package.d core/thread/threadbase.d \
+ core/thread/threadgroup.d core/thread/types.d core/time.d \
+ core/vararg.d core/volatile.d gcc/attribute.d gcc/attributes.d \
gcc/backtrace.d gcc/builtins.d gcc/deh.d gcc/emutls.d gcc/gthread.d \
gcc/sections/common.d gcc/sections/elf.d gcc/sections/macho.d \
gcc/sections/package.d gcc/sections/pecoff.d gcc/unwind/arm.d \
gcc/unwind/arm_common.d gcc/unwind/c6x.d gcc/unwind/generic.d \
gcc/unwind/package.d gcc/unwind/pe.d object.d rt/aApply.d rt/aApplyR.d \
- rt/aaA.d rt/adi.d rt/arrayassign.d rt/arraycast.d rt/arraycat.d \
- rt/cast_.d rt/config.d rt/critical_.d rt/deh.d rt/dmain2.d \
+ rt/aaA.d rt/adi.d rt/arrayassign.d rt/arraycat.d rt/cast_.d \
+ rt/config.d rt/critical_.d rt/deh.d rt/dmain2.d rt/ehalloc.d \
rt/invariant.d rt/lifetime.d rt/memory.d rt/minfo.d rt/monitor_.d \
- rt/obj.d rt/qsort.d rt/sections.d rt/switch_.d rt/tlsgc.d \
- rt/util/array.d rt/util/container/array.d rt/util/container/common.d \
- rt/util/container/hashtab.d rt/util/container/treap.d rt/util/random.d \
- rt/util/typeinfo.d rt/util/utf.d
+ rt/profilegc.d rt/sections.d rt/tlsgc.d rt/util/typeinfo.d \
+ rt/util/utility.d
-DRUNTIME_DSOURCES_STDCXX = core/stdcpp/exception.d \
- core/stdcpp/typeinfo.d
+DRUNTIME_DSOURCES_STDCXX = core/stdcpp/allocator.d core/stdcpp/array.d \
+ core/stdcpp/exception.d core/stdcpp/memory.d core/stdcpp/new_.d \
+ core/stdcpp/string.d core/stdcpp/string_view.d \
+ core/stdcpp/type_traits.d core/stdcpp/typeinfo.d core/stdcpp/utility.d \
+ core/stdcpp/vector.d core/stdcpp/xutility.d
DRUNTIME_DSOURCES_BIONIC = core/sys/bionic/err.d \
core/sys/bionic/fcntl.d core/sys/bionic/stdlib.d \
@@ -886,17 +933,19 @@ DRUNTIME_DSOURCES_FREEBSD = core/sys/freebsd/config.d \
DRUNTIME_DSOURCES_LINUX = core/sys/linux/config.d \
core/sys/linux/dlfcn.d core/sys/linux/elf.d core/sys/linux/epoll.d \
core/sys/linux/err.d core/sys/linux/errno.d core/sys/linux/execinfo.d \
- core/sys/linux/fcntl.d core/sys/linux/ifaddrs.d core/sys/linux/link.d \
+ core/sys/linux/fcntl.d core/sys/linux/fs.d core/sys/linux/ifaddrs.d \
+ core/sys/linux/io_uring.d core/sys/linux/link.d \
core/sys/linux/netinet/in_.d core/sys/linux/netinet/tcp.d \
- core/sys/linux/sched.d core/sys/linux/stdio.d core/sys/linux/string.d \
+ core/sys/linux/perf_event.d core/sys/linux/sched.d \
+ core/sys/linux/stdio.d core/sys/linux/string.d \
core/sys/linux/sys/auxv.d core/sys/linux/sys/eventfd.d \
core/sys/linux/sys/file.d core/sys/linux/sys/inotify.d \
core/sys/linux/sys/mman.d core/sys/linux/sys/prctl.d \
- core/sys/linux/sys/signalfd.d core/sys/linux/sys/socket.d \
- core/sys/linux/sys/sysinfo.d core/sys/linux/sys/time.d \
- core/sys/linux/sys/xattr.d core/sys/linux/termios.d \
- core/sys/linux/time.d core/sys/linux/timerfd.d core/sys/linux/tipc.d \
- core/sys/linux/unistd.d
+ core/sys/linux/sys/procfs.d core/sys/linux/sys/signalfd.d \
+ core/sys/linux/sys/socket.d core/sys/linux/sys/sysinfo.d \
+ core/sys/linux/sys/time.d core/sys/linux/sys/xattr.d \
+ core/sys/linux/termios.d core/sys/linux/time.d \
+ core/sys/linux/timerfd.d core/sys/linux/tipc.d core/sys/linux/unistd.d
DRUNTIME_DSOURCES_NETBSD = core/sys/netbsd/dlfcn.d \
core/sys/netbsd/err.d core/sys/netbsd/execinfo.d \
@@ -908,13 +957,13 @@ DRUNTIME_DSOURCES_NETBSD = core/sys/netbsd/dlfcn.d \
core/sys/netbsd/sys/sysctl.d core/sys/netbsd/time.d
DRUNTIME_DSOURCES_OPENBSD = core/sys/openbsd/dlfcn.d \
- core/sys/openbsd/err.d core/sys/openbsd/stdlib.d \
- core/sys/openbsd/string.d core/sys/openbsd/sys/cdefs.d \
- core/sys/openbsd/sys/elf.d core/sys/openbsd/sys/elf32.d \
- core/sys/openbsd/sys/elf64.d core/sys/openbsd/sys/elf_common.d \
- core/sys/openbsd/sys/link_elf.d core/sys/openbsd/sys/mman.d \
- core/sys/openbsd/sys/sysctl.d core/sys/openbsd/time.d \
- core/sys/openbsd/unistd.d
+ core/sys/openbsd/err.d core/sys/openbsd/execinfo.d \
+ core/sys/openbsd/stdlib.d core/sys/openbsd/string.d \
+ core/sys/openbsd/sys/cdefs.d core/sys/openbsd/sys/elf.d \
+ core/sys/openbsd/sys/elf32.d core/sys/openbsd/sys/elf64.d \
+ core/sys/openbsd/sys/elf_common.d core/sys/openbsd/sys/link_elf.d \
+ core/sys/openbsd/sys/mman.d core/sys/openbsd/sys/sysctl.d \
+ core/sys/openbsd/time.d core/sys/openbsd/unistd.d
DRUNTIME_DSOURCES_POSIX = core/sys/posix/aio.d \
core/sys/posix/arpa/inet.d core/sys/posix/config.d \
@@ -1039,7 +1088,7 @@ DRUNTIME_DSOURCES_WINDOWS = core/sys/windows/accctrl.d \
core/sys/windows/winuser.d core/sys/windows/winver.d \
core/sys/windows/wtsapi32.d core/sys/windows/wtypes.d
-DRUNTIME_DISOURCES = __entrypoint.di __main.di
+DRUNTIME_DISOURCES = __main.di
all: all-am
.SUFFIXES:
@@ -1126,21 +1175,93 @@ core/$(am__dirstamp):
core/atomic.lo: core/$(am__dirstamp)
core/attribute.lo: core/$(am__dirstamp)
core/bitop.lo: core/$(am__dirstamp)
+core/builtins.lo: core/$(am__dirstamp)
core/checkedint.lo: core/$(am__dirstamp)
core/cpuid.lo: core/$(am__dirstamp)
core/demangle.lo: core/$(am__dirstamp)
core/exception.lo: core/$(am__dirstamp)
+core/gc/$(am__dirstamp):
+ @$(MKDIR_P) core/gc
+ @: > core/gc/$(am__dirstamp)
+core/gc/config.lo: core/gc/$(am__dirstamp)
+core/gc/gcinterface.lo: core/gc/$(am__dirstamp)
+core/gc/registry.lo: core/gc/$(am__dirstamp)
core/internal/$(am__dirstamp):
@$(MKDIR_P) core/internal
@: > core/internal/$(am__dirstamp)
core/internal/abort.lo: core/internal/$(am__dirstamp)
-core/internal/arrayop.lo: core/internal/$(am__dirstamp)
+core/internal/array/$(am__dirstamp):
+ @$(MKDIR_P) core/internal/array
+ @: > core/internal/array/$(am__dirstamp)
+core/internal/array/appending.lo: core/internal/array/$(am__dirstamp)
+core/internal/array/capacity.lo: core/internal/array/$(am__dirstamp)
+core/internal/array/casting.lo: core/internal/array/$(am__dirstamp)
+core/internal/array/comparison.lo: \
+ core/internal/array/$(am__dirstamp)
+core/internal/array/concatenation.lo: \
+ core/internal/array/$(am__dirstamp)
+core/internal/array/construction.lo: \
+ core/internal/array/$(am__dirstamp)
+core/internal/array/equality.lo: core/internal/array/$(am__dirstamp)
+core/internal/array/operations.lo: \
+ core/internal/array/$(am__dirstamp)
+core/internal/array/utils.lo: core/internal/array/$(am__dirstamp)
+core/internal/atomic.lo: core/internal/$(am__dirstamp)
core/internal/attributes.lo: core/internal/$(am__dirstamp)
+core/internal/container/$(am__dirstamp):
+ @$(MKDIR_P) core/internal/container
+ @: > core/internal/container/$(am__dirstamp)
+core/internal/container/array.lo: \
+ core/internal/container/$(am__dirstamp)
+core/internal/container/common.lo: \
+ core/internal/container/$(am__dirstamp)
+core/internal/container/hashtab.lo: \
+ core/internal/container/$(am__dirstamp)
+core/internal/container/treap.lo: \
+ core/internal/container/$(am__dirstamp)
core/internal/convert.lo: core/internal/$(am__dirstamp)
+core/internal/dassert.lo: core/internal/$(am__dirstamp)
+core/internal/destruction.lo: core/internal/$(am__dirstamp)
+core/internal/entrypoint.lo: core/internal/$(am__dirstamp)
+core/internal/gc/$(am__dirstamp):
+ @$(MKDIR_P) core/internal/gc
+ @: > core/internal/gc/$(am__dirstamp)
+core/internal/gc/bits.lo: core/internal/gc/$(am__dirstamp)
+core/internal/gc/impl/conservative/$(am__dirstamp):
+ @$(MKDIR_P) core/internal/gc/impl/conservative
+ @: > core/internal/gc/impl/conservative/$(am__dirstamp)
+core/internal/gc/impl/conservative/gc.lo: \
+ core/internal/gc/impl/conservative/$(am__dirstamp)
+core/internal/gc/impl/manual/$(am__dirstamp):
+ @$(MKDIR_P) core/internal/gc/impl/manual
+ @: > core/internal/gc/impl/manual/$(am__dirstamp)
+core/internal/gc/impl/manual/gc.lo: \
+ core/internal/gc/impl/manual/$(am__dirstamp)
+core/internal/gc/impl/proto/$(am__dirstamp):
+ @$(MKDIR_P) core/internal/gc/impl/proto
+ @: > core/internal/gc/impl/proto/$(am__dirstamp)
+core/internal/gc/impl/proto/gc.lo: \
+ core/internal/gc/impl/proto/$(am__dirstamp)
+core/internal/gc/os.lo: core/internal/gc/$(am__dirstamp)
+core/internal/gc/pooltable.lo: core/internal/gc/$(am__dirstamp)
+core/internal/gc/proxy.lo: core/internal/gc/$(am__dirstamp)
core/internal/hash.lo: core/internal/$(am__dirstamp)
+core/internal/lifetime.lo: core/internal/$(am__dirstamp)
+core/internal/moving.lo: core/internal/$(am__dirstamp)
+core/internal/parseoptions.lo: core/internal/$(am__dirstamp)
+core/internal/postblit.lo: core/internal/$(am__dirstamp)
+core/internal/qsort.lo: core/internal/$(am__dirstamp)
core/internal/spinlock.lo: core/internal/$(am__dirstamp)
core/internal/string.lo: core/internal/$(am__dirstamp)
+core/internal/switch_.lo: core/internal/$(am__dirstamp)
core/internal/traits.lo: core/internal/$(am__dirstamp)
+core/internal/utf.lo: core/internal/$(am__dirstamp)
+core/internal/util/$(am__dirstamp):
+ @$(MKDIR_P) core/internal/util
+ @: > core/internal/util/$(am__dirstamp)
+core/internal/util/array.lo: core/internal/util/$(am__dirstamp)
+core/internal/util/math.lo: core/internal/util/$(am__dirstamp)
+core/lifetime.lo: core/$(am__dirstamp)
core/math.lo: core/$(am__dirstamp)
core/memory.lo: core/$(am__dirstamp)
core/runtime.lo: core/$(am__dirstamp)
@@ -1176,6 +1297,7 @@ core/sync/$(am__dirstamp):
core/sync/barrier.lo: core/sync/$(am__dirstamp)
core/sync/condition.lo: core/sync/$(am__dirstamp)
core/sync/config.lo: core/sync/$(am__dirstamp)
+core/sync/event.lo: core/sync/$(am__dirstamp)
core/sync/exception.lo: core/sync/$(am__dirstamp)
core/sync/mutex.lo: core/sync/$(am__dirstamp)
core/sync/rwmutex.lo: core/sync/$(am__dirstamp)
@@ -1193,23 +1315,6 @@ core/thread/types.lo: core/thread/$(am__dirstamp)
core/time.lo: core/$(am__dirstamp)
core/vararg.lo: core/$(am__dirstamp)
core/volatile.lo: core/$(am__dirstamp)
-gc/$(am__dirstamp):
- @$(MKDIR_P) gc
- @: > gc/$(am__dirstamp)
-gc/bits.lo: gc/$(am__dirstamp)
-gc/config.lo: gc/$(am__dirstamp)
-gc/gcinterface.lo: gc/$(am__dirstamp)
-gc/impl/conservative/$(am__dirstamp):
- @$(MKDIR_P) gc/impl/conservative
- @: > gc/impl/conservative/$(am__dirstamp)
-gc/impl/conservative/gc.lo: gc/impl/conservative/$(am__dirstamp)
-gc/impl/manual/$(am__dirstamp):
- @$(MKDIR_P) gc/impl/manual
- @: > gc/impl/manual/$(am__dirstamp)
-gc/impl/manual/gc.lo: gc/impl/manual/$(am__dirstamp)
-gc/os.lo: gc/$(am__dirstamp)
-gc/pooltable.lo: gc/$(am__dirstamp)
-gc/proxy.lo: gc/$(am__dirstamp)
gcc/$(am__dirstamp):
@$(MKDIR_P) gcc
@: > gcc/$(am__dirstamp)
@@ -1245,38 +1350,42 @@ rt/aApplyR.lo: rt/$(am__dirstamp)
rt/aaA.lo: rt/$(am__dirstamp)
rt/adi.lo: rt/$(am__dirstamp)
rt/arrayassign.lo: rt/$(am__dirstamp)
-rt/arraycast.lo: rt/$(am__dirstamp)
rt/arraycat.lo: rt/$(am__dirstamp)
rt/cast_.lo: rt/$(am__dirstamp)
rt/config.lo: rt/$(am__dirstamp)
rt/critical_.lo: rt/$(am__dirstamp)
rt/deh.lo: rt/$(am__dirstamp)
rt/dmain2.lo: rt/$(am__dirstamp)
+rt/ehalloc.lo: rt/$(am__dirstamp)
rt/invariant.lo: rt/$(am__dirstamp)
rt/lifetime.lo: rt/$(am__dirstamp)
rt/memory.lo: rt/$(am__dirstamp)
rt/minfo.lo: rt/$(am__dirstamp)
rt/monitor_.lo: rt/$(am__dirstamp)
-rt/obj.lo: rt/$(am__dirstamp)
-rt/qsort.lo: rt/$(am__dirstamp)
+rt/profilegc.lo: rt/$(am__dirstamp)
rt/sections.lo: rt/$(am__dirstamp)
-rt/switch_.lo: rt/$(am__dirstamp)
rt/tlsgc.lo: rt/$(am__dirstamp)
rt/util/$(am__dirstamp):
@$(MKDIR_P) rt/util
@: > rt/util/$(am__dirstamp)
-rt/util/array.lo: rt/util/$(am__dirstamp)
-rt/util/container/$(am__dirstamp):
- @$(MKDIR_P) rt/util/container
- @: > rt/util/container/$(am__dirstamp)
-rt/util/container/array.lo: rt/util/container/$(am__dirstamp)
-rt/util/container/common.lo: rt/util/container/$(am__dirstamp)
-rt/util/container/hashtab.lo: rt/util/container/$(am__dirstamp)
-rt/util/container/treap.lo: rt/util/container/$(am__dirstamp)
-rt/util/random.lo: rt/util/$(am__dirstamp)
rt/util/typeinfo.lo: rt/util/$(am__dirstamp)
-rt/util/utf.lo: rt/util/$(am__dirstamp)
+rt/util/utility.lo: rt/util/$(am__dirstamp)
core/stdc/libgdruntime_la-errno_.lo: core/stdc/$(am__dirstamp)
+core/stdcpp/$(am__dirstamp):
+ @$(MKDIR_P) core/stdcpp
+ @: > core/stdcpp/$(am__dirstamp)
+core/stdcpp/allocator.lo: core/stdcpp/$(am__dirstamp)
+core/stdcpp/array.lo: core/stdcpp/$(am__dirstamp)
+core/stdcpp/exception.lo: core/stdcpp/$(am__dirstamp)
+core/stdcpp/memory.lo: core/stdcpp/$(am__dirstamp)
+core/stdcpp/new_.lo: core/stdcpp/$(am__dirstamp)
+core/stdcpp/string.lo: core/stdcpp/$(am__dirstamp)
+core/stdcpp/string_view.lo: core/stdcpp/$(am__dirstamp)
+core/stdcpp/type_traits.lo: core/stdcpp/$(am__dirstamp)
+core/stdcpp/typeinfo.lo: core/stdcpp/$(am__dirstamp)
+core/stdcpp/utility.lo: core/stdcpp/$(am__dirstamp)
+core/stdcpp/vector.lo: core/stdcpp/$(am__dirstamp)
+core/stdcpp/xutility.lo: core/stdcpp/$(am__dirstamp)
core/sys/posix/$(am__dirstamp):
@$(MKDIR_P) core/sys/posix
@: > core/sys/posix/$(am__dirstamp)
@@ -1506,6 +1615,7 @@ core/sys/openbsd/$(am__dirstamp):
@: > core/sys/openbsd/$(am__dirstamp)
core/sys/openbsd/dlfcn.lo: core/sys/openbsd/$(am__dirstamp)
core/sys/openbsd/err.lo: core/sys/openbsd/$(am__dirstamp)
+core/sys/openbsd/execinfo.lo: core/sys/openbsd/$(am__dirstamp)
core/sys/openbsd/stdlib.lo: core/sys/openbsd/$(am__dirstamp)
core/sys/openbsd/string.lo: core/sys/openbsd/$(am__dirstamp)
core/sys/openbsd/sys/$(am__dirstamp):
@@ -1534,13 +1644,16 @@ core/sys/linux/err.lo: core/sys/linux/$(am__dirstamp)
core/sys/linux/errno.lo: core/sys/linux/$(am__dirstamp)
core/sys/linux/execinfo.lo: core/sys/linux/$(am__dirstamp)
core/sys/linux/fcntl.lo: core/sys/linux/$(am__dirstamp)
+core/sys/linux/fs.lo: core/sys/linux/$(am__dirstamp)
core/sys/linux/ifaddrs.lo: core/sys/linux/$(am__dirstamp)
+core/sys/linux/io_uring.lo: core/sys/linux/$(am__dirstamp)
core/sys/linux/link.lo: core/sys/linux/$(am__dirstamp)
core/sys/linux/netinet/$(am__dirstamp):
@$(MKDIR_P) core/sys/linux/netinet
@: > core/sys/linux/netinet/$(am__dirstamp)
core/sys/linux/netinet/in_.lo: core/sys/linux/netinet/$(am__dirstamp)
core/sys/linux/netinet/tcp.lo: core/sys/linux/netinet/$(am__dirstamp)
+core/sys/linux/perf_event.lo: core/sys/linux/$(am__dirstamp)
core/sys/linux/sched.lo: core/sys/linux/$(am__dirstamp)
core/sys/linux/stdio.lo: core/sys/linux/$(am__dirstamp)
core/sys/linux/string.lo: core/sys/linux/$(am__dirstamp)
@@ -1553,6 +1666,7 @@ core/sys/linux/sys/file.lo: core/sys/linux/sys/$(am__dirstamp)
core/sys/linux/sys/inotify.lo: core/sys/linux/sys/$(am__dirstamp)
core/sys/linux/sys/mman.lo: core/sys/linux/sys/$(am__dirstamp)
core/sys/linux/sys/prctl.lo: core/sys/linux/sys/$(am__dirstamp)
+core/sys/linux/sys/procfs.lo: core/sys/linux/sys/$(am__dirstamp)
core/sys/linux/sys/signalfd.lo: core/sys/linux/sys/$(am__dirstamp)
core/sys/linux/sys/socket.lo: core/sys/linux/sys/$(am__dirstamp)
core/sys/linux/sys/sysinfo.lo: core/sys/linux/sys/$(am__dirstamp)
@@ -1857,10 +1971,28 @@ mostlyclean-compile:
-rm -f config/x86/*.lo
-rm -f core/*.$(OBJEXT)
-rm -f core/*.lo
+ -rm -f core/gc/*.$(OBJEXT)
+ -rm -f core/gc/*.lo
-rm -f core/internal/*.$(OBJEXT)
-rm -f core/internal/*.lo
+ -rm -f core/internal/array/*.$(OBJEXT)
+ -rm -f core/internal/array/*.lo
+ -rm -f core/internal/container/*.$(OBJEXT)
+ -rm -f core/internal/container/*.lo
+ -rm -f core/internal/gc/*.$(OBJEXT)
+ -rm -f core/internal/gc/*.lo
+ -rm -f core/internal/gc/impl/conservative/*.$(OBJEXT)
+ -rm -f core/internal/gc/impl/conservative/*.lo
+ -rm -f core/internal/gc/impl/manual/*.$(OBJEXT)
+ -rm -f core/internal/gc/impl/manual/*.lo
+ -rm -f core/internal/gc/impl/proto/*.$(OBJEXT)
+ -rm -f core/internal/gc/impl/proto/*.lo
+ -rm -f core/internal/util/*.$(OBJEXT)
+ -rm -f core/internal/util/*.lo
-rm -f core/stdc/*.$(OBJEXT)
-rm -f core/stdc/*.lo
+ -rm -f core/stdcpp/*.$(OBJEXT)
+ -rm -f core/stdcpp/*.lo
-rm -f core/sync/*.$(OBJEXT)
-rm -f core/sync/*.lo
-rm -f core/sys/bionic/*.$(OBJEXT)
@@ -1921,12 +2053,6 @@ mostlyclean-compile:
-rm -f core/sys/windows/stdc/*.lo
-rm -f core/thread/*.$(OBJEXT)
-rm -f core/thread/*.lo
- -rm -f gc/*.$(OBJEXT)
- -rm -f gc/*.lo
- -rm -f gc/impl/conservative/*.$(OBJEXT)
- -rm -f gc/impl/conservative/*.lo
- -rm -f gc/impl/manual/*.$(OBJEXT)
- -rm -f gc/impl/manual/*.lo
-rm -f gcc/*.$(OBJEXT)
-rm -f gcc/*.lo
-rm -f gcc/sections/*.$(OBJEXT)
@@ -1937,8 +2063,6 @@ mostlyclean-compile:
-rm -f rt/*.lo
-rm -f rt/util/*.$(OBJEXT)
-rm -f rt/util/*.lo
- -rm -f rt/util/container/*.$(OBJEXT)
- -rm -f rt/util/container/*.lo
distclean-compile:
-rm -f *.tab.c
@@ -2035,8 +2159,17 @@ clean-libtool:
-rm -rf config/systemz/.libs config/systemz/_libs
-rm -rf config/x86/.libs config/x86/_libs
-rm -rf core/.libs core/_libs
+ -rm -rf core/gc/.libs core/gc/_libs
-rm -rf core/internal/.libs core/internal/_libs
+ -rm -rf core/internal/array/.libs core/internal/array/_libs
+ -rm -rf core/internal/container/.libs core/internal/container/_libs
+ -rm -rf core/internal/gc/.libs core/internal/gc/_libs
+ -rm -rf core/internal/gc/impl/conservative/.libs core/internal/gc/impl/conservative/_libs
+ -rm -rf core/internal/gc/impl/manual/.libs core/internal/gc/impl/manual/_libs
+ -rm -rf core/internal/gc/impl/proto/.libs core/internal/gc/impl/proto/_libs
+ -rm -rf core/internal/util/.libs core/internal/util/_libs
-rm -rf core/stdc/.libs core/stdc/_libs
+ -rm -rf core/stdcpp/.libs core/stdcpp/_libs
-rm -rf core/sync/.libs core/sync/_libs
-rm -rf core/sys/bionic/.libs core/sys/bionic/_libs
-rm -rf core/sys/darwin/.libs core/sys/darwin/_libs
@@ -2067,15 +2200,11 @@ clean-libtool:
-rm -rf core/sys/windows/.libs core/sys/windows/_libs
-rm -rf core/sys/windows/stdc/.libs core/sys/windows/stdc/_libs
-rm -rf core/thread/.libs core/thread/_libs
- -rm -rf gc/.libs gc/_libs
- -rm -rf gc/impl/conservative/.libs gc/impl/conservative/_libs
- -rm -rf gc/impl/manual/.libs gc/impl/manual/_libs
-rm -rf gcc/.libs gcc/_libs
-rm -rf gcc/sections/.libs gcc/sections/_libs
-rm -rf gcc/unwind/.libs gcc/unwind/_libs
-rm -rf rt/.libs rt/_libs
-rm -rf rt/util/.libs rt/util/_libs
- -rm -rf rt/util/container/.libs rt/util/container/_libs
install-toolexeclibDATA: $(toolexeclib_DATA)
@$(NORMAL_INSTALL)
@list='$(toolexeclib_DATA)'; test -n "$(toolexeclibdir)" || list=; \
@@ -2191,8 +2320,17 @@ distclean-generic:
-rm -f config/systemz/$(am__dirstamp)
-rm -f config/x86/$(am__dirstamp)
-rm -f core/$(am__dirstamp)
+ -rm -f core/gc/$(am__dirstamp)
-rm -f core/internal/$(am__dirstamp)
+ -rm -f core/internal/array/$(am__dirstamp)
+ -rm -f core/internal/container/$(am__dirstamp)
+ -rm -f core/internal/gc/$(am__dirstamp)
+ -rm -f core/internal/gc/impl/conservative/$(am__dirstamp)
+ -rm -f core/internal/gc/impl/manual/$(am__dirstamp)
+ -rm -f core/internal/gc/impl/proto/$(am__dirstamp)
+ -rm -f core/internal/util/$(am__dirstamp)
-rm -f core/stdc/$(am__dirstamp)
+ -rm -f core/stdcpp/$(am__dirstamp)
-rm -f core/sync/$(am__dirstamp)
-rm -f core/sys/bionic/$(am__dirstamp)
-rm -f core/sys/darwin/$(am__dirstamp)
@@ -2223,15 +2361,11 @@ distclean-generic:
-rm -f core/sys/windows/$(am__dirstamp)
-rm -f core/sys/windows/stdc/$(am__dirstamp)
-rm -f core/thread/$(am__dirstamp)
- -rm -f gc/$(am__dirstamp)
- -rm -f gc/impl/conservative/$(am__dirstamp)
- -rm -f gc/impl/manual/$(am__dirstamp)
-rm -f gcc/$(am__dirstamp)
-rm -f gcc/sections/$(am__dirstamp)
-rm -f gcc/unwind/$(am__dirstamp)
-rm -f rt/$(am__dirstamp)
-rm -f rt/util/$(am__dirstamp)
- -rm -f rt/util/container/$(am__dirstamp)
maintainer-clean-generic:
@echo "This command is intended for maintainers to use"
diff --git a/libphobos/libdruntime/__entrypoint.di b/libphobos/libdruntime/__entrypoint.di
deleted file mode 100644
index fba2ae28..0000000
--- a/libphobos/libdruntime/__entrypoint.di
+++ /dev/null
@@ -1,56 +0,0 @@
-/* GDC -- D front-end for GCC
- Copyright (C) 2013-2021 Free Software Foundation, Inc.
-
- GCC is free software; you can redistribute it and/or modify it under
- the terms of the GNU General Public License as published by the Free
- Software Foundation; either version 3, or (at your option) any later
- version.
-
- GCC is distributed in the hope that it will be useful, but WITHOUT ANY
- WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- for more details.
-
- You should have received a copy of the GNU General Public License
- along with GCC; see the file COPYING3. If not see
- <http://www.gnu.org/licenses/>.
-*/
-
-/* This module provides the C main() function supplied by the user's program. */
-
-module __entrypoint;
-
-extern(C):
-
-/* The D main() function supplied by the user's program
-
- It always has `_Dmain` symbol name and uses C calling convention.
- But D frontend returns its type as `extern(D)` because of Issue 9028.
- As we need to deal with actual calling convention we have to mark it
- as `extern(C)` and use its symbol name.
-*/
-
-int _Dmain(char[][] args);
-int _d_run_main(int argc, char **argv, void* mainFunc);
-
-/* Substitutes for the C main() function. Just calls into d_run_main with
- the default main function. Applications are free to implement their own
- main function and call the _d_run_main function themselves with any main
- function.
-*/
-
-int main(int argc, char **argv)
-{
- return _d_run_main(argc, argv, &_Dmain);
-}
-
-/* This is apparently needed on Solaris because the C tool chain seems to
- expect the main function to be called _main. It needs both not just one!
-*/
-
-version (Solaris)
-int _main(int argc, char** argv)
-{
- return main(argc, argv);
-}
-
diff --git a/libphobos/libdruntime/core/atomic.d b/libphobos/libdruntime/core/atomic.d
index 1d0a2ea..e6a82e5 100644
--- a/libphobos/libdruntime/core/atomic.d
+++ b/libphobos/libdruntime/core/atomic.d
@@ -4,1691 +4,915 @@
*
* Copyright: Copyright Sean Kelly 2005 - 2016.
* License: $(LINK2 http://www.boost.org/LICENSE_1_0.txt, Boost License 1.0)
- * Authors: Sean Kelly, Alex Rønne Petersen
+ * Authors: Sean Kelly, Alex Rønne Petersen, Manu Evans
* Source: $(DRUNTIMESRC core/_atomic.d)
*/
-
-/* NOTE: This file has been patched from the original DMD distribution to
- * work with the GDC compiler.
- */
module core.atomic;
-version (D_InlineAsm_X86)
+import core.internal.atomic;
+import core.internal.attributes : betterC;
+import core.internal.traits : hasUnsharedIndirections;
+
+/**
+ * Specifies the memory ordering semantics of an atomic operation.
+ *
+ * See_Also:
+ * $(HTTP en.cppreference.com/w/cpp/atomic/memory_order)
+ */
+enum MemoryOrder
{
- version = AsmX86;
- version = AsmX86_32;
- enum has64BitCAS = true;
- enum has128BitCAS = false;
+ /**
+ * Not sequenced.
+ * Corresponds to $(LINK2 https://llvm.org/docs/Atomics.html#monotonic, LLVM AtomicOrdering.Monotonic)
+ * and C++11/C11 `memory_order_relaxed`.
+ */
+ raw = 0,
+ /**
+ * Hoist-load + hoist-store barrier.
+ * Corresponds to $(LINK2 https://llvm.org/docs/Atomics.html#acquire, LLVM AtomicOrdering.Acquire)
+ * and C++11/C11 `memory_order_acquire`.
+ */
+ acq = 2,
+ /**
+ * Sink-load + sink-store barrier.
+ * Corresponds to $(LINK2 https://llvm.org/docs/Atomics.html#release, LLVM AtomicOrdering.Release)
+ * and C++11/C11 `memory_order_release`.
+ */
+ rel = 3,
+ /**
+ * Acquire + release barrier.
+ * Corresponds to $(LINK2 https://llvm.org/docs/Atomics.html#acquirerelease, LLVM AtomicOrdering.AcquireRelease)
+ * and C++11/C11 `memory_order_acq_rel`.
+ */
+ acq_rel = 4,
+ /**
+ * Fully sequenced (acquire + release). Corresponds to
+ * $(LINK2 https://llvm.org/docs/Atomics.html#sequentiallyconsistent, LLVM AtomicOrdering.SequentiallyConsistent)
+ * and C++11/C11 `memory_order_seq_cst`.
+ */
+ seq = 5,
}
-else version (D_InlineAsm_X86_64)
+
+/**
+ * Loads 'val' from memory and returns it. The memory barrier specified
+ * by 'ms' is applied to the operation, which is fully sequenced by
+ * default. Valid memory orders are MemoryOrder.raw, MemoryOrder.acq,
+ * and MemoryOrder.seq.
+ *
+ * Params:
+ * val = The target variable.
+ *
+ * Returns:
+ * The value of 'val'.
+ */
+T atomicLoad(MemoryOrder ms = MemoryOrder.seq, T)(ref return scope const T val) pure nothrow @nogc @trusted
+ if (!is(T == shared U, U) && !is(T == shared inout U, U) && !is(T == shared const U, U))
{
- version = AsmX86;
- version = AsmX86_64;
- enum has64BitCAS = true;
- enum has128BitCAS = true;
+ static if (__traits(isFloating, T))
+ {
+ alias IntTy = IntForFloat!T;
+ IntTy r = core.internal.atomic.atomicLoad!ms(cast(IntTy*)&val);
+ return *cast(T*)&r;
+ }
+ else
+ return core.internal.atomic.atomicLoad!ms(cast(T*)&val);
}
-else version (GNU)
+
+/// Ditto
+T atomicLoad(MemoryOrder ms = MemoryOrder.seq, T)(ref return scope shared const T val) pure nothrow @nogc @trusted
+ if (!hasUnsharedIndirections!T)
{
- import gcc.config;
- enum has64BitCAS = GNU_Have_64Bit_Atomics;
- enum has128BitCAS = GNU_Have_LibAtomic;
+ import core.internal.traits : hasUnsharedIndirections;
+ static assert(!hasUnsharedIndirections!T, "Copying `" ~ shared(const(T)).stringof ~ "` would violate shared.");
+
+ return atomicLoad!ms(*cast(T*)&val);
}
-else
+
+/// Ditto
+TailShared!T atomicLoad(MemoryOrder ms = MemoryOrder.seq, T)(ref shared const T val) pure nothrow @nogc @trusted
+ if (hasUnsharedIndirections!T)
{
- enum has64BitCAS = false;
- enum has128BitCAS = false;
+ // HACK: DEPRECATE THIS FUNCTION, IT IS INVALID TO DO ATOMIC LOAD OF SHARED CLASS
+ // this is here because code exists in the wild that does this...
+
+ return core.internal.atomic.atomicLoad!ms(cast(TailShared!T*)&val);
}
-private
+/**
+ * Writes 'newval' into 'val'. The memory barrier specified by 'ms' is
+ * applied to the operation, which is fully sequenced by default.
+ * Valid memory orders are MemoryOrder.raw, MemoryOrder.rel, and
+ * MemoryOrder.seq.
+ *
+ * Params:
+ * val = The target variable.
+ * newval = The value to store.
+ */
+void atomicStore(MemoryOrder ms = MemoryOrder.seq, T, V)(ref T val, V newval) pure nothrow @nogc @trusted
+ if (!is(T == shared) && !is(V == shared))
{
- template HeadUnshared(T)
- {
- static if ( is( T U : shared(U*) ) )
- alias shared(U)* HeadUnshared;
- else
- alias T HeadUnshared;
- }
-}
+ import core.internal.traits : hasElaborateCopyConstructor;
+ static assert (!hasElaborateCopyConstructor!T, "`T` may not have an elaborate copy: atomic operations override regular copying semantics.");
+ // resolve implicit conversions
+ T arg = newval;
-version (AsmX86)
-{
- // NOTE: Strictly speaking, the x86 supports atomic operations on
- // unaligned values. However, this is far slower than the
- // common case, so such behavior should be prohibited.
- private bool atomicValueIsProperlyAligned(T)( ref T val ) pure nothrow @nogc @trusted
+ static if (__traits(isFloating, T))
{
- return atomicPtrIsProperlyAligned(&val);
+ alias IntTy = IntForFloat!T;
+ core.internal.atomic.atomicStore!ms(cast(IntTy*)&val, *cast(IntTy*)&arg);
}
+ else
+ core.internal.atomic.atomicStore!ms(&val, arg);
+}
- private bool atomicPtrIsProperlyAligned(T)( T* ptr ) pure nothrow @nogc @safe
+/// Ditto
+void atomicStore(MemoryOrder ms = MemoryOrder.seq, T, V)(ref shared T val, V newval) pure nothrow @nogc @trusted
+ if (!is(T == class))
+{
+ static if (is (V == shared U, U))
+ alias Thunk = U;
+ else
{
- // NOTE: 32 bit x86 systems support 8 byte CAS, which only requires
- // 4 byte alignment, so use size_t as the align type here.
- static if ( T.sizeof > size_t.sizeof )
- return cast(size_t)ptr % size_t.sizeof == 0;
- else
- return cast(size_t)ptr % T.sizeof == 0;
+ import core.internal.traits : hasUnsharedIndirections;
+ static assert(!hasUnsharedIndirections!V, "Copying argument `" ~ V.stringof ~ " newval` to `" ~ shared(T).stringof ~ " here` would violate shared.");
+ alias Thunk = V;
}
+ atomicStore!ms(*cast(T*)&val, *cast(Thunk*)&newval);
}
-
-version (CoreDdoc)
+/// Ditto
+void atomicStore(MemoryOrder ms = MemoryOrder.seq, T, V)(ref shared T val, shared V newval) pure nothrow @nogc @trusted
+ if (is(T == class))
{
- /**
- * Performs the binary operation 'op' on val using 'mod' as the modifier.
- *
- * Params:
- * val = The target variable.
- * mod = The modifier to apply.
- *
- * Returns:
- * The result of the operation.
- */
- HeadUnshared!(T) atomicOp(string op, T, V1)( ref shared T val, V1 mod ) pure nothrow @nogc @safe
- if ( __traits( compiles, mixin( "*cast(T*)&val" ~ op ~ "mod" ) ) )
- {
- return HeadUnshared!(T).init;
- }
+ static assert (is (V : T), "Can't assign `newval` of type `shared " ~ V.stringof ~ "` to `shared " ~ T.stringof ~ "`.");
+ core.internal.atomic.atomicStore!ms(cast(T*)&val, cast(V)newval);
+}
- /**
- * Stores 'writeThis' to the memory referenced by 'here' if the value
- * referenced by 'here' is equal to 'ifThis'. This operation is both
- * lock-free and atomic.
- *
- * Params:
- * here = The address of the destination variable.
- * writeThis = The value to store.
- * ifThis = The comparison value.
- *
- * Returns:
- * true if the store occurred, false if not.
- */
- bool cas(T,V1,V2)( shared(T)* here, const V1 ifThis, V2 writeThis ) pure nothrow @nogc @safe
- if ( !is(T == class) && !is(T U : U*) && __traits( compiles, { *here = writeThis; } ) );
+/**
+ * Atomically adds `mod` to the value referenced by `val` and returns the value `val` held previously.
+ * This operation is both lock-free and atomic.
+ *
+ * Params:
+ * val = Reference to the value to modify.
+ * mod = The value to add.
+ *
+ * Returns:
+ * The value held previously by `val`.
+ */
+T atomicFetchAdd(MemoryOrder ms = MemoryOrder.seq, T)(ref return scope T val, size_t mod) pure nothrow @nogc @trusted
+ if ((__traits(isIntegral, T) || is(T == U*, U)) && !is(T == shared))
+in (atomicValueIsProperlyAligned(val))
+{
+ static if (is(T == U*, U))
+ return cast(T)core.internal.atomic.atomicFetchAdd!ms(cast(size_t*)&val, mod * U.sizeof);
+ else
+ return core.internal.atomic.atomicFetchAdd!ms(&val, cast(T)mod);
+}
- /// Ditto
- bool cas(T,V1,V2)( shared(T)* here, const shared(V1) ifThis, shared(V2) writeThis ) pure nothrow @nogc @safe
- if ( is(T == class) && __traits( compiles, { *here = writeThis; } ) );
+/// Ditto
+T atomicFetchAdd(MemoryOrder ms = MemoryOrder.seq, T)(ref return scope shared T val, size_t mod) pure nothrow @nogc @trusted
+ if (__traits(isIntegral, T) || is(T == U*, U))
+in (atomicValueIsProperlyAligned(val))
+{
+ return atomicFetchAdd!ms(*cast(T*)&val, mod);
+}
- /// Ditto
- bool cas(T,V1,V2)( shared(T)* here, const shared(V1)* ifThis, shared(V2)* writeThis ) pure nothrow @nogc @safe
- if ( is(T U : U*) && __traits( compiles, { *here = writeThis; } ) );
+/**
+ * Atomically subtracts `mod` from the value referenced by `val` and returns the value `val` held previously.
+ * This operation is both lock-free and atomic.
+ *
+ * Params:
+ * val = Reference to the value to modify.
+ * mod = The value to subtract.
+ *
+ * Returns:
+ * The value held previously by `val`.
+ */
+T atomicFetchSub(MemoryOrder ms = MemoryOrder.seq, T)(ref return scope T val, size_t mod) pure nothrow @nogc @trusted
+ if ((__traits(isIntegral, T) || is(T == U*, U)) && !is(T == shared))
+in (atomicValueIsProperlyAligned(val))
+{
+ static if (is(T == U*, U))
+ return cast(T)core.internal.atomic.atomicFetchSub!ms(cast(size_t*)&val, mod * U.sizeof);
+ else
+ return core.internal.atomic.atomicFetchSub!ms(&val, cast(T)mod);
+}
- /**
- * Loads 'val' from memory and returns it. The memory barrier specified
- * by 'ms' is applied to the operation, which is fully sequenced by
- * default. Valid memory orders are MemoryOrder.raw, MemoryOrder.acq,
- * and MemoryOrder.seq.
- *
- * Params:
- * val = The target variable.
- *
- * Returns:
- * The value of 'val'.
- */
- HeadUnshared!(T) atomicLoad(MemoryOrder ms = MemoryOrder.seq,T)( ref const shared T val ) pure nothrow @nogc @safe
- {
- return HeadUnshared!(T).init;
- }
+/// Ditto
+T atomicFetchSub(MemoryOrder ms = MemoryOrder.seq, T)(ref return scope shared T val, size_t mod) pure nothrow @nogc @trusted
+ if (__traits(isIntegral, T) || is(T == U*, U))
+in (atomicValueIsProperlyAligned(val))
+{
+ return atomicFetchSub!ms(*cast(T*)&val, mod);
+}
+/**
+ * Exchange `exchangeWith` with the memory referenced by `here`.
+ * This operation is both lock-free and atomic.
+ *
+ * Params:
+ * here = The address of the destination variable.
+ * exchangeWith = The value to exchange.
+ *
+ * Returns:
+ * The value held previously by `here`.
+ */
+T atomicExchange(MemoryOrder ms = MemoryOrder.seq,T,V)(T* here, V exchangeWith) pure nothrow @nogc @trusted
+ if (!is(T == shared) && !is(V == shared))
+in (atomicPtrIsProperlyAligned(here), "Argument `here` is not properly aligned")
+{
+ // resolve implicit conversions
+ T arg = exchangeWith;
- /**
- * Writes 'newval' into 'val'. The memory barrier specified by 'ms' is
- * applied to the operation, which is fully sequenced by default.
- * Valid memory orders are MemoryOrder.raw, MemoryOrder.rel, and
- * MemoryOrder.seq.
- *
- * Params:
- * val = The target variable.
- * newval = The value to store.
- */
- void atomicStore(MemoryOrder ms = MemoryOrder.seq,T,V1)( ref shared T val, V1 newval ) pure nothrow @nogc @safe
- if ( __traits( compiles, { val = newval; } ) )
+ static if (__traits(isFloating, T))
{
-
+ alias IntTy = IntForFloat!T;
+ IntTy r = core.internal.atomic.atomicExchange!ms(cast(IntTy*)here, *cast(IntTy*)&arg);
+ return *cast(shared(T)*)&r;
}
+ else
+ return core.internal.atomic.atomicExchange!ms(here, arg);
+}
-
- /**
- * Specifies the memory ordering semantics of an atomic operation.
- */
- enum MemoryOrder
+/// Ditto
+TailShared!T atomicExchange(MemoryOrder ms = MemoryOrder.seq,T,V)(shared(T)* here, V exchangeWith) pure nothrow @nogc @trusted
+ if (!is(T == class) && !is(T == interface))
+in (atomicPtrIsProperlyAligned(here), "Argument `here` is not properly aligned")
+{
+ static if (is (V == shared U, U))
+ alias Thunk = U;
+ else
{
- raw, /// Not sequenced.
- acq, /// Hoist-load + hoist-store barrier.
- rel, /// Sink-load + sink-store barrier.
- seq, /// Fully sequenced (acquire + release).
+ import core.internal.traits : hasUnsharedIndirections;
+ static assert(!hasUnsharedIndirections!V, "Copying `exchangeWith` of type `" ~ V.stringof ~ "` to `" ~ shared(T).stringof ~ "` would violate shared.");
+ alias Thunk = V;
}
+ return atomicExchange!ms(cast(T*)here, *cast(Thunk*)&exchangeWith);
+}
- deprecated("Please use MemoryOrder instead.")
- alias MemoryOrder msync;
+/// Ditto
+shared(T) atomicExchange(MemoryOrder ms = MemoryOrder.seq,T,V)(shared(T)* here, shared(V) exchangeWith) pure nothrow @nogc @trusted
+ if (is(T == class) || is(T == interface))
+in (atomicPtrIsProperlyAligned(here), "Argument `here` is not properly aligned")
+{
+ static assert (is (V : T), "Can't assign `exchangeWith` of type `" ~ shared(V).stringof ~ "` to `" ~ shared(T).stringof ~ "`.");
- /**
- * Inserts a full load/store memory fence (on platforms that need it). This ensures
- * that all loads and stores before a call to this function are executed before any
- * loads and stores after the call.
- */
- void atomicFence() nothrow @nogc;
+ return cast(shared)core.internal.atomic.atomicExchange!ms(cast(T*)here, cast(V)exchangeWith);
}
-else version (AsmX86_32)
+
+/**
+ * Performs either compare-and-set or compare-and-swap (or exchange).
+ *
+ * There are two categories of overloads in this template:
+ * The first category does a simple compare-and-set.
+ * The comparison value (`ifThis`) is treated as an rvalue.
+ *
+ * The second category does a compare-and-swap (a.k.a. compare-and-exchange),
+ * and expects `ifThis` to be a pointer type, where the previous value
+ * of `here` will be written.
+ *
+ * This operation is both lock-free and atomic.
+ *
+ * Params:
+ * here = The address of the destination variable.
+ * writeThis = The value to store.
+ * ifThis = The comparison value.
+ *
+ * Returns:
+ * true if the store occurred, false if not.
+ */
+template cas(MemoryOrder succ = MemoryOrder.seq, MemoryOrder fail = MemoryOrder.seq)
{
- // Uses specialized asm for fast fetch and add operations
- private HeadUnshared!(T) atomicFetchAdd(T)( ref shared T val, size_t mod ) pure nothrow @nogc @safe
- if ( T.sizeof <= 4 )
+ /// Compare-and-set for non-shared values
+ bool cas(T, V1, V2)(T* here, V1 ifThis, V2 writeThis) pure nothrow @nogc @trusted
+ if (!is(T == shared) && is(T : V1))
+ in (atomicPtrIsProperlyAligned(here), "Argument `here` is not properly aligned")
{
- size_t tmp = mod;
- asm pure nothrow @nogc @trusted
- {
- mov EAX, tmp;
- mov EDX, val;
- }
- static if (T.sizeof == 1) asm pure nothrow @nogc @trusted { lock; xadd[EDX], AL; }
- else static if (T.sizeof == 2) asm pure nothrow @nogc @trusted { lock; xadd[EDX], AX; }
- else static if (T.sizeof == 4) asm pure nothrow @nogc @trusted { lock; xadd[EDX], EAX; }
+ // resolve implicit conversions
+ T arg1 = ifThis;
+ T arg2 = writeThis;
- asm pure nothrow @nogc @trusted
+ static if (__traits(isFloating, T))
{
- mov tmp, EAX;
+ alias IntTy = IntForFloat!T;
+ return atomicCompareExchangeStrongNoResult!(succ, fail)(
+ cast(IntTy*)here, *cast(IntTy*)&arg1, *cast(IntTy*)&arg2);
}
-
- return cast(T)tmp;
- }
-
- private HeadUnshared!(T) atomicFetchSub(T)( ref shared T val, size_t mod ) pure nothrow @nogc @safe
- if ( T.sizeof <= 4)
- {
- return atomicFetchAdd(val, -mod);
+ else
+ return atomicCompareExchangeStrongNoResult!(succ, fail)(here, arg1, arg2);
}
- HeadUnshared!(T) atomicOp(string op, T, V1)( ref shared T val, V1 mod ) pure nothrow @nogc
- if ( __traits( compiles, mixin( "*cast(T*)&val" ~ op ~ "mod" ) ) )
- in
- {
- assert(atomicValueIsProperlyAligned(val));
- }
- body
+ /// Compare-and-set for shared value type
+ bool cas(T, V1, V2)(shared(T)* here, V1 ifThis, V2 writeThis) pure nothrow @nogc @trusted
+ if (!is(T == class) && (is(T : V1) || is(shared T : V1)))
+ in (atomicPtrIsProperlyAligned(here), "Argument `here` is not properly aligned")
{
- // binary operators
- //
- // + - * / % ^^ &
- // | ^ << >> >>> ~ in
- // == != < <= > >=
- static if ( op == "+" || op == "-" || op == "*" || op == "/" ||
- op == "%" || op == "^^" || op == "&" || op == "|" ||
- op == "^" || op == "<<" || op == ">>" || op == ">>>" ||
- op == "~" || // skip "in"
- op == "==" || op == "!=" || op == "<" || op == "<=" ||
- op == ">" || op == ">=" )
- {
- HeadUnshared!(T) get = atomicLoad!(MemoryOrder.raw)( val );
- mixin( "return get " ~ op ~ " mod;" );
- }
+ static if (is (V1 == shared U1, U1))
+ alias Thunk1 = U1;
else
- // assignment operators
- //
- // += -= *= /= %= ^^= &=
- // |= ^= <<= >>= >>>= ~=
- static if ( op == "+=" && __traits(isIntegral, T) && T.sizeof <= 4 && V1.sizeof <= 4)
- {
- return cast(T)(atomicFetchAdd!(T)(val, mod) + mod);
- }
- else static if ( op == "-=" && __traits(isIntegral, T) && T.sizeof <= 4 && V1.sizeof <= 4)
- {
- return cast(T)(atomicFetchSub!(T)(val, mod) - mod);
- }
- else static if ( op == "+=" || op == "-=" || op == "*=" || op == "/=" ||
- op == "%=" || op == "^^=" || op == "&=" || op == "|=" ||
- op == "^=" || op == "<<=" || op == ">>=" || op == ">>>=" ) // skip "~="
- {
- HeadUnshared!(T) get, set;
-
- do
- {
- get = set = atomicLoad!(MemoryOrder.raw)( val );
- mixin( "set " ~ op ~ " mod;" );
- } while ( !casByRef( val, get, set ) );
- return set;
- }
+ alias Thunk1 = V1;
+ static if (is (V2 == shared U2, U2))
+ alias Thunk2 = U2;
else
{
- static assert( false, "Operation not supported." );
+ import core.internal.traits : hasUnsharedIndirections;
+ static assert(!hasUnsharedIndirections!V2,
+ "Copying `" ~ V2.stringof ~ "* writeThis` to `" ~
+ shared(T).stringof ~ "* here` would violate shared.");
+ alias Thunk2 = V2;
}
+ return cas(cast(T*)here, *cast(Thunk1*)&ifThis, *cast(Thunk2*)&writeThis);
}
- bool casByRef(T,V1,V2)( ref T value, V1 ifThis, V2 writeThis ) pure nothrow @nogc @trusted
+ /// Compare-and-set for `shared` reference type (`class`)
+ bool cas(T, V1, V2)(shared(T)* here, shared(V1) ifThis, shared(V2) writeThis)
+ pure nothrow @nogc @trusted
+ if (is(T == class))
+ in (atomicPtrIsProperlyAligned(here), "Argument `here` is not properly aligned")
{
- return cas(&value, ifThis, writeThis);
+ return atomicCompareExchangeStrongNoResult!(succ, fail)(
+ cast(T*)here, cast(V1)ifThis, cast(V2)writeThis);
}
- bool cas(T,V1,V2)( shared(T)* here, const V1 ifThis, V2 writeThis ) pure nothrow @nogc @safe
- if ( !is(T == class) && !is(T U : U*) && __traits( compiles, { *here = writeThis; } ) )
+ /// Compare-and-exchange for non-`shared` types
+ bool cas(T, V)(T* here, T* ifThis, V writeThis) pure nothrow @nogc @trusted
+ if (!is(T == shared) && !is(V == shared))
+ in (atomicPtrIsProperlyAligned(here), "Argument `here` is not properly aligned")
{
- return casImpl(here, ifThis, writeThis);
- }
-
- bool cas(T,V1,V2)( shared(T)* here, const shared(V1) ifThis, shared(V2) writeThis ) pure nothrow @nogc @safe
- if ( is(T == class) && __traits( compiles, { *here = writeThis; } ) )
- {
- return casImpl(here, ifThis, writeThis);
- }
-
- bool cas(T,V1,V2)( shared(T)* here, const shared(V1)* ifThis, shared(V2)* writeThis ) pure nothrow @nogc @safe
- if ( is(T U : U*) && __traits( compiles, { *here = writeThis; } ) )
- {
- return casImpl(here, ifThis, writeThis);
- }
+ // resolve implicit conversions
+ T arg1 = writeThis;
- private bool casImpl(T,V1,V2)( shared(T)* here, V1 ifThis, V2 writeThis ) pure nothrow @nogc @safe
- in
- {
- assert( atomicPtrIsProperlyAligned( here ) );
- }
- body
- {
- static if ( T.sizeof == byte.sizeof )
- {
- //////////////////////////////////////////////////////////////////
- // 1 Byte CAS
- //////////////////////////////////////////////////////////////////
-
- asm pure nothrow @nogc @trusted
- {
- mov DL, writeThis;
- mov AL, ifThis;
- mov ECX, here;
- lock; // lock always needed to make this op atomic
- cmpxchg [ECX], DL;
- setz AL;
- }
- }
- else static if ( T.sizeof == short.sizeof )
+ static if (__traits(isFloating, T))
{
- //////////////////////////////////////////////////////////////////
- // 2 Byte CAS
- //////////////////////////////////////////////////////////////////
-
- asm pure nothrow @nogc @trusted
- {
- mov DX, writeThis;
- mov AX, ifThis;
- mov ECX, here;
- lock; // lock always needed to make this op atomic
- cmpxchg [ECX], DX;
- setz AL;
- }
- }
- else static if ( T.sizeof == int.sizeof )
- {
- //////////////////////////////////////////////////////////////////
- // 4 Byte CAS
- //////////////////////////////////////////////////////////////////
-
- asm pure nothrow @nogc @trusted
- {
- mov EDX, writeThis;
- mov EAX, ifThis;
- mov ECX, here;
- lock; // lock always needed to make this op atomic
- cmpxchg [ECX], EDX;
- setz AL;
- }
- }
- else static if ( T.sizeof == long.sizeof && has64BitCAS )
- {
-
- //////////////////////////////////////////////////////////////////
- // 8 Byte CAS on a 32-Bit Processor
- //////////////////////////////////////////////////////////////////
-
- asm pure nothrow @nogc @trusted
- {
- push EDI;
- push EBX;
- lea EDI, writeThis;
- mov EBX, [EDI];
- mov ECX, 4[EDI];
- lea EDI, ifThis;
- mov EAX, [EDI];
- mov EDX, 4[EDI];
- mov EDI, here;
- lock; // lock always needed to make this op atomic
- cmpxchg8b [EDI];
- setz AL;
- pop EBX;
- pop EDI;
-
- }
-
+ alias IntTy = IntForFloat!T;
+ return atomicCompareExchangeStrong!(succ, fail)(
+ cast(IntTy*)here, cast(IntTy*)ifThis, *cast(IntTy*)&writeThis);
}
else
- {
- static assert( false, "Invalid template type specified." );
- }
- }
-
-
- enum MemoryOrder
- {
- raw,
- acq,
- rel,
- seq,
+ return atomicCompareExchangeStrong!(succ, fail)(here, ifThis, writeThis);
}
- deprecated("Please use MemoryOrder instead.")
- alias MemoryOrder msync;
-
-
- private
+ /// Compare and exchange for mixed-`shared`ness types
+ bool cas(T, V1, V2)(shared(T)* here, V1* ifThis, V2 writeThis) pure nothrow @nogc @trusted
+ if (!is(T == class) && (is(T : V1) || is(shared T : V1)))
+ in (atomicPtrIsProperlyAligned(here), "Argument `here` is not properly aligned")
{
- // NOTE: x86 loads implicitly have acquire semantics so a memory
- // barrier is only necessary on releases.
- template needsLoadBarrier( MemoryOrder ms )
- {
- enum bool needsLoadBarrier = ms == MemoryOrder.seq;
- }
-
-
- // NOTE: x86 stores implicitly have release semantics so a memory
- // barrier is only necessary on acquires.
- template needsStoreBarrier( MemoryOrder ms )
- {
- enum bool needsStoreBarrier = ms == MemoryOrder.seq;
- }
- }
-
-
- HeadUnshared!(T) atomicLoad(MemoryOrder ms = MemoryOrder.seq, T)( ref const shared T val ) pure nothrow @nogc @safe
- if (!__traits(isFloating, T))
- {
- static assert( ms != MemoryOrder.rel, "invalid MemoryOrder for atomicLoad()" );
- static assert( __traits(isPOD, T), "argument to atomicLoad() must be POD" );
-
- static if ( T.sizeof == byte.sizeof )
- {
- //////////////////////////////////////////////////////////////////
- // 1 Byte Load
- //////////////////////////////////////////////////////////////////
-
- static if ( needsLoadBarrier!(ms) )
- {
- asm pure nothrow @nogc @trusted
- {
- mov DL, 0;
- mov AL, 0;
- mov ECX, val;
- lock; // lock always needed to make this op atomic
- cmpxchg [ECX], DL;
- }
- }
- else
- {
- asm pure nothrow @nogc @trusted
- {
- mov EAX, val;
- mov AL, [EAX];
- }
- }
- }
- else static if ( T.sizeof == short.sizeof )
- {
- //////////////////////////////////////////////////////////////////
- // 2 Byte Load
- //////////////////////////////////////////////////////////////////
-
- static if ( needsLoadBarrier!(ms) )
- {
- asm pure nothrow @nogc @trusted
- {
- mov DX, 0;
- mov AX, 0;
- mov ECX, val;
- lock; // lock always needed to make this op atomic
- cmpxchg [ECX], DX;
- }
- }
- else
- {
- asm pure nothrow @nogc @trusted
- {
- mov EAX, val;
- mov AX, [EAX];
- }
- }
- }
- else static if ( T.sizeof == int.sizeof )
- {
- //////////////////////////////////////////////////////////////////
- // 4 Byte Load
- //////////////////////////////////////////////////////////////////
-
- static if ( needsLoadBarrier!(ms) )
- {
- asm pure nothrow @nogc @trusted
- {
- mov EDX, 0;
- mov EAX, 0;
- mov ECX, val;
- lock; // lock always needed to make this op atomic
- cmpxchg [ECX], EDX;
- }
- }
- else
- {
- asm pure nothrow @nogc @trusted
- {
- mov EAX, val;
- mov EAX, [EAX];
- }
- }
- }
- else static if ( T.sizeof == long.sizeof && has64BitCAS )
+ static if (is (V1 == shared U1, U1))
+ alias Thunk1 = U1;
+ else
{
- //////////////////////////////////////////////////////////////////
- // 8 Byte Load on a 32-Bit Processor
- //////////////////////////////////////////////////////////////////
-
- asm pure nothrow @nogc @trusted
- {
- push EDI;
- push EBX;
- mov EBX, 0;
- mov ECX, 0;
- mov EAX, 0;
- mov EDX, 0;
- mov EDI, val;
- lock; // lock always needed to make this op atomic
- cmpxchg8b [EDI];
- pop EBX;
- pop EDI;
- }
+ import core.internal.traits : hasUnsharedIndirections;
+ static assert(!hasUnsharedIndirections!V1,
+ "Copying `" ~ shared(T).stringof ~ "* here` to `" ~
+ V1.stringof ~ "* ifThis` would violate shared.");
+ alias Thunk1 = V1;
}
+ static if (is (V2 == shared U2, U2))
+ alias Thunk2 = U2;
else
{
- static assert( false, "Invalid template type specified." );
+ import core.internal.traits : hasUnsharedIndirections;
+ static assert(!hasUnsharedIndirections!V2,
+ "Copying `" ~ V2.stringof ~ "* writeThis` to `" ~
+ shared(T).stringof ~ "* here` would violate shared.");
+ alias Thunk2 = V2;
}
+ static assert (is(T : Thunk1),
+ "Mismatching types for `here` and `ifThis`: `" ~
+ shared(T).stringof ~ "` and `" ~ V1.stringof ~ "`.");
+ return cas(cast(T*)here, cast(Thunk1*)ifThis, *cast(Thunk2*)&writeThis);
}
- void atomicStore(MemoryOrder ms = MemoryOrder.seq, T, V1)( ref shared T val, V1 newval ) pure nothrow @nogc @safe
- if ( __traits( compiles, { val = newval; } ) )
+ /// Compare-and-exchange for `class`
+ bool cas(T, V)(shared(T)* here, shared(T)* ifThis, shared(V) writeThis)
+ pure nothrow @nogc @trusted
+ if (is(T == class))
+ in (atomicPtrIsProperlyAligned(here), "Argument `here` is not properly aligned")
{
- static assert( ms != MemoryOrder.acq, "invalid MemoryOrder for atomicStore()" );
- static assert( __traits(isPOD, T), "argument to atomicStore() must be POD" );
-
- static if ( T.sizeof == byte.sizeof )
- {
- //////////////////////////////////////////////////////////////////
- // 1 Byte Store
- //////////////////////////////////////////////////////////////////
-
- static if ( needsStoreBarrier!(ms) )
- {
- asm pure nothrow @nogc @trusted
- {
- mov EAX, val;
- mov DL, newval;
- lock;
- xchg [EAX], DL;
- }
- }
- else
- {
- asm pure nothrow @nogc @trusted
- {
- mov EAX, val;
- mov DL, newval;
- mov [EAX], DL;
- }
- }
- }
- else static if ( T.sizeof == short.sizeof )
- {
- //////////////////////////////////////////////////////////////////
- // 2 Byte Store
- //////////////////////////////////////////////////////////////////
-
- static if ( needsStoreBarrier!(ms) )
- {
- asm pure nothrow @nogc @trusted
- {
- mov EAX, val;
- mov DX, newval;
- lock;
- xchg [EAX], DX;
- }
- }
- else
- {
- asm pure nothrow @nogc @trusted
- {
- mov EAX, val;
- mov DX, newval;
- mov [EAX], DX;
- }
- }
- }
- else static if ( T.sizeof == int.sizeof )
- {
- //////////////////////////////////////////////////////////////////
- // 4 Byte Store
- //////////////////////////////////////////////////////////////////
-
- static if ( needsStoreBarrier!(ms) )
- {
- asm pure nothrow @nogc @trusted
- {
- mov EAX, val;
- mov EDX, newval;
- lock;
- xchg [EAX], EDX;
- }
- }
- else
- {
- asm pure nothrow @nogc @trusted
- {
- mov EAX, val;
- mov EDX, newval;
- mov [EAX], EDX;
- }
- }
- }
- else static if ( T.sizeof == long.sizeof && has64BitCAS )
- {
- //////////////////////////////////////////////////////////////////
- // 8 Byte Store on a 32-Bit Processor
- //////////////////////////////////////////////////////////////////
-
- asm pure nothrow @nogc @trusted
- {
- push EDI;
- push EBX;
- lea EDI, newval;
- mov EBX, [EDI];
- mov ECX, 4[EDI];
- mov EDI, val;
- mov EAX, [EDI];
- mov EDX, 4[EDI];
- L1: lock; // lock always needed to make this op atomic
- cmpxchg8b [EDI];
- jne L1;
- pop EBX;
- pop EDI;
- }
- }
- else
- {
- static assert( false, "Invalid template type specified." );
- }
+ return atomicCompareExchangeStrong!(succ, fail)(
+ cast(T*)here, cast(T*)ifThis, cast(V)writeThis);
}
+}
+/**
+* Stores 'writeThis' to the memory referenced by 'here' if the value
+* referenced by 'here' is equal to 'ifThis'.
+* The 'weak' version of cas may spuriously fail. It is recommended to
+* use `casWeak` only when `cas` would be used in a loop.
+* This operation is both
+* lock-free and atomic.
+*
+* Params:
+* here = The address of the destination variable.
+* writeThis = The value to store.
+* ifThis = The comparison value.
+*
+* Returns:
+* true if the store occurred, false if not.
+*/
+bool casWeak(MemoryOrder succ = MemoryOrder.seq,MemoryOrder fail = MemoryOrder.seq,T,V1,V2)(T* here, V1 ifThis, V2 writeThis) pure nothrow @nogc @trusted
+ if (!is(T == shared) && is(T : V1))
+in (atomicPtrIsProperlyAligned(here), "Argument `here` is not properly aligned")
+{
+ // resolve implicit conversions
+ T arg1 = ifThis;
+ T arg2 = writeThis;
- void atomicFence() nothrow @nogc @safe
+ static if (__traits(isFloating, T))
{
- import core.cpuid;
-
- asm pure nothrow @nogc @trusted
- {
- naked;
-
- call sse2;
- test AL, AL;
- jne Lcpuid;
-
- // Fast path: We have SSE2, so just use mfence.
- mfence;
- jmp Lend;
-
- Lcpuid:
-
- // Slow path: We use cpuid to serialize. This is
- // significantly slower than mfence, but is the
- // only serialization facility we have available
- // on older non-SSE2 chips.
- push EBX;
-
- mov EAX, 0;
- cpuid;
-
- pop EBX;
-
- Lend:
-
- ret;
- }
+ alias IntTy = IntForFloat!T;
+ return atomicCompareExchangeWeakNoResult!(succ, fail)(cast(IntTy*)here, *cast(IntTy*)&arg1, *cast(IntTy*)&arg2);
}
+ else
+ return atomicCompareExchangeWeakNoResult!(succ, fail)(here, arg1, arg2);
}
-else version (AsmX86_64)
+
+/// Ditto
+bool casWeak(MemoryOrder succ = MemoryOrder.seq,MemoryOrder fail = MemoryOrder.seq,T,V1,V2)(shared(T)* here, V1 ifThis, V2 writeThis) pure nothrow @nogc @trusted
+ if (!is(T == class) && (is(T : V1) || is(shared T : V1)))
+in (atomicPtrIsProperlyAligned(here), "Argument `here` is not properly aligned")
{
- // Uses specialized asm for fast fetch and add operations
- private HeadUnshared!(T) atomicFetchAdd(T)( ref shared T val, size_t mod ) pure nothrow @nogc @trusted
- if ( __traits(isIntegral, T) )
- in
+ static if (is (V1 == shared U1, U1))
+ alias Thunk1 = U1;
+ else
+ alias Thunk1 = V1;
+ static if (is (V2 == shared U2, U2))
+ alias Thunk2 = U2;
+ else
{
- assert( atomicValueIsProperlyAligned(val));
+ import core.internal.traits : hasUnsharedIndirections;
+ static assert(!hasUnsharedIndirections!V2, "Copying `" ~ V2.stringof ~ "* writeThis` to `" ~ shared(T).stringof ~ "* here` would violate shared.");
+ alias Thunk2 = V2;
}
- body
- {
- size_t tmp = mod;
- asm pure nothrow @nogc @trusted
- {
- mov RAX, tmp;
- mov RDX, val;
- }
- static if (T.sizeof == 1) asm pure nothrow @nogc @trusted { lock; xadd[RDX], AL; }
- else static if (T.sizeof == 2) asm pure nothrow @nogc @trusted { lock; xadd[RDX], AX; }
- else static if (T.sizeof == 4) asm pure nothrow @nogc @trusted { lock; xadd[RDX], EAX; }
- else static if (T.sizeof == 8) asm pure nothrow @nogc @trusted { lock; xadd[RDX], RAX; }
+ return casWeak!(succ, fail)(cast(T*)here, *cast(Thunk1*)&ifThis, *cast(Thunk2*)&writeThis);
+}
- asm pure nothrow @nogc @trusted
- {
- mov tmp, RAX;
- }
+/// Ditto
+bool casWeak(MemoryOrder succ = MemoryOrder.seq,MemoryOrder fail = MemoryOrder.seq,T,V1,V2)(shared(T)* here, shared(V1) ifThis, shared(V2) writeThis) pure nothrow @nogc @trusted
+ if (is(T == class))
+in (atomicPtrIsProperlyAligned(here), "Argument `here` is not properly aligned")
+{
+ return atomicCompareExchangeWeakNoResult!(succ, fail)(cast(T*)here, cast(V1)ifThis, cast(V2)writeThis);
+}
- return cast(T)tmp;
- }
+/**
+* Stores 'writeThis' to the memory referenced by 'here' if the value
+* referenced by 'here' is equal to the value referenced by 'ifThis'.
+* The prior value referenced by 'here' is written to `ifThis` and
+* returned to the user.
+* The 'weak' version of cas may spuriously fail. It is recommended to
+* use `casWeak` only when `cas` would be used in a loop.
+* This operation is both lock-free and atomic.
+*
+* Params:
+* here = The address of the destination variable.
+* writeThis = The value to store.
+* ifThis = The address of the value to compare, and receives the prior value of `here` as output.
+*
+* Returns:
+* true if the store occurred, false if not.
+*/
+bool casWeak(MemoryOrder succ = MemoryOrder.seq,MemoryOrder fail = MemoryOrder.seq,T,V)(T* here, T* ifThis, V writeThis) pure nothrow @nogc @trusted
+ if (!is(T == shared S, S) && !is(V == shared U, U))
+in (atomicPtrIsProperlyAligned(here), "Argument `here` is not properly aligned")
+{
+ // resolve implicit conversions
+ T arg1 = writeThis;
- private HeadUnshared!(T) atomicFetchSub(T)( ref shared T val, size_t mod ) pure nothrow @nogc @safe
- if ( __traits(isIntegral, T) )
+ static if (__traits(isFloating, T))
{
- return atomicFetchAdd(val, -mod);
+ alias IntTy = IntForFloat!T;
+ return atomicCompareExchangeWeak!(succ, fail)(cast(IntTy*)here, cast(IntTy*)ifThis, *cast(IntTy*)&writeThis);
}
+ else
+ return atomicCompareExchangeWeak!(succ, fail)(here, ifThis, writeThis);
+}
- HeadUnshared!(T) atomicOp(string op, T, V1)( ref shared T val, V1 mod ) pure nothrow @nogc
- if ( __traits( compiles, mixin( "*cast(T*)&val" ~ op ~ "mod" ) ) )
- in
+/// Ditto
+bool casWeak(MemoryOrder succ = MemoryOrder.seq,MemoryOrder fail = MemoryOrder.seq,T,V1,V2)(shared(T)* here, V1* ifThis, V2 writeThis) pure nothrow @nogc @trusted
+ if (!is(T == class) && (is(T : V1) || is(shared T : V1)))
+in (atomicPtrIsProperlyAligned(here), "Argument `here` is not properly aligned")
+{
+ static if (is (V1 == shared U1, U1))
+ alias Thunk1 = U1;
+ else
{
- assert( atomicValueIsProperlyAligned(val));
+ import core.internal.traits : hasUnsharedIndirections;
+ static assert(!hasUnsharedIndirections!V1, "Copying `" ~ shared(T).stringof ~ "* here` to `" ~ V1.stringof ~ "* ifThis` would violate shared.");
+ alias Thunk1 = V1;
}
- body
+ static if (is (V2 == shared U2, U2))
+ alias Thunk2 = U2;
+ else
{
- // binary operators
- //
- // + - * / % ^^ &
- // | ^ << >> >>> ~ in
- // == != < <= > >=
- static if ( op == "+" || op == "-" || op == "*" || op == "/" ||
- op == "%" || op == "^^" || op == "&" || op == "|" ||
- op == "^" || op == "<<" || op == ">>" || op == ">>>" ||
- op == "~" || // skip "in"
- op == "==" || op == "!=" || op == "<" || op == "<=" ||
- op == ">" || op == ">=" )
- {
- HeadUnshared!(T) get = atomicLoad!(MemoryOrder.raw)( val );
- mixin( "return get " ~ op ~ " mod;" );
- }
- else
- // assignment operators
- //
- // += -= *= /= %= ^^= &=
- // |= ^= <<= >>= >>>= ~=
- static if ( op == "+=" && __traits(isIntegral, T) && __traits(isIntegral, V1))
- {
- return cast(T)(atomicFetchAdd!(T)(val, mod) + mod);
- }
- else static if ( op == "-=" && __traits(isIntegral, T) && __traits(isIntegral, V1))
- {
- return cast(T)(atomicFetchSub!(T)(val, mod) - mod);
- }
- else static if ( op == "+=" || op == "-=" || op == "*=" || op == "/=" ||
- op == "%=" || op == "^^=" || op == "&=" || op == "|=" ||
- op == "^=" || op == "<<=" || op == ">>=" || op == ">>>=" ) // skip "~="
- {
- HeadUnshared!(T) get, set;
-
- do
- {
- get = set = atomicLoad!(MemoryOrder.raw)( val );
- mixin( "set " ~ op ~ " mod;" );
- } while ( !casByRef( val, get, set ) );
- return set;
- }
- else
- {
- static assert( false, "Operation not supported." );
- }
+ import core.internal.traits : hasUnsharedIndirections;
+ static assert(!hasUnsharedIndirections!V2, "Copying `" ~ V2.stringof ~ "* writeThis` to `" ~ shared(T).stringof ~ "* here` would violate shared.");
+ alias Thunk2 = V2;
}
+ static assert (is(T : Thunk1), "Mismatching types for `here` and `ifThis`: `" ~ shared(T).stringof ~ "` and `" ~ V1.stringof ~ "`.");
+ return casWeak!(succ, fail)(cast(T*)here, cast(Thunk1*)ifThis, *cast(Thunk2*)&writeThis);
+}
+/// Ditto
+bool casWeak(MemoryOrder succ = MemoryOrder.seq,MemoryOrder fail = MemoryOrder.seq,T,V)(shared(T)* here, shared(T)* ifThis, shared(V) writeThis) pure nothrow @nogc @trusted
+ if (is(T == class))
+in (atomicPtrIsProperlyAligned(here), "Argument `here` is not properly aligned")
+{
+ return atomicCompareExchangeWeak!(succ, fail)(cast(T*)here, cast(T*)ifThis, cast(V)writeThis);
+}
- bool casByRef(T,V1,V2)( ref T value, V1 ifThis, V2 writeThis ) pure nothrow @nogc @trusted
- {
- return cas(&value, ifThis, writeThis);
- }
+/**
+ * Inserts a full load/store memory fence (on platforms that need it). This ensures
+ * that all loads and stores before a call to this function are executed before any
+ * loads and stores after the call.
+ */
+void atomicFence(MemoryOrder order = MemoryOrder.seq)() pure nothrow @nogc @safe
+{
+ core.internal.atomic.atomicFence!order();
+}
- bool cas(T,V1,V2)( shared(T)* here, const V1 ifThis, V2 writeThis ) pure nothrow @nogc @safe
- if ( !is(T == class) && !is(T U : U*) && __traits( compiles, { *here = writeThis; } ) )
- {
- return casImpl(here, ifThis, writeThis);
- }
+/**
+ * Gives a hint to the processor that the calling thread is in a 'spin-wait' loop,
+ * allowing to more efficiently allocate resources.
+ */
+void pause() pure nothrow @nogc @safe
+{
+ core.internal.atomic.pause();
+}
- bool cas(T,V1,V2)( shared(T)* here, const shared(V1) ifThis, shared(V2) writeThis ) pure nothrow @nogc @safe
- if ( is(T == class) && __traits( compiles, { *here = writeThis; } ) )
+/**
+ * Performs the binary operation 'op' on val using 'mod' as the modifier.
+ *
+ * Params:
+ * val = The target variable.
+ * mod = The modifier to apply.
+ *
+ * Returns:
+ * The result of the operation.
+ */
+TailShared!T atomicOp(string op, T, V1)(ref shared T val, V1 mod) pure nothrow @nogc @safe
+ if (__traits(compiles, mixin("*cast(T*)&val" ~ op ~ "mod")))
+in (atomicValueIsProperlyAligned(val))
+{
+ // binary operators
+ //
+ // + - * / % ^^ &
+ // | ^ << >> >>> ~ in
+ // == != < <= > >=
+ static if (op == "+" || op == "-" || op == "*" || op == "/" ||
+ op == "%" || op == "^^" || op == "&" || op == "|" ||
+ op == "^" || op == "<<" || op == ">>" || op == ">>>" ||
+ op == "~" || // skip "in"
+ op == "==" || op == "!=" || op == "<" || op == "<=" ||
+ op == ">" || op == ">=")
{
- return casImpl(here, ifThis, writeThis);
+ T get = atomicLoad!(MemoryOrder.raw, T)(val);
+ mixin("return get " ~ op ~ " mod;");
}
-
- bool cas(T,V1,V2)( shared(T)* here, const shared(V1)* ifThis, shared(V2)* writeThis ) pure nothrow @nogc @safe
- if ( is(T U : U*) && __traits( compiles, { *here = writeThis; } ) )
+ else
+ // assignment operators
+ //
+ // += -= *= /= %= ^^= &=
+ // |= ^= <<= >>= >>>= ~=
+ static if (op == "+=" && __traits(isIntegral, T) && __traits(isIntegral, V1) && T.sizeof <= size_t.sizeof && V1.sizeof <= size_t.sizeof)
{
- return casImpl(here, ifThis, writeThis);
+ return cast(T)(atomicFetchAdd(val, mod) + mod);
}
-
- private bool casImpl(T,V1,V2)( shared(T)* here, V1 ifThis, V2 writeThis ) pure nothrow @nogc @safe
- in
+ else static if (op == "-=" && __traits(isIntegral, T) && __traits(isIntegral, V1) && T.sizeof <= size_t.sizeof && V1.sizeof <= size_t.sizeof)
{
- assert( atomicPtrIsProperlyAligned( here ) );
+ return cast(T)(atomicFetchSub(val, mod) - mod);
}
- body
+ else static if (op == "+=" || op == "-=" || op == "*=" || op == "/=" ||
+ op == "%=" || op == "^^=" || op == "&=" || op == "|=" ||
+ op == "^=" || op == "<<=" || op == ">>=" || op == ">>>=") // skip "~="
{
- static if ( T.sizeof == byte.sizeof )
- {
- //////////////////////////////////////////////////////////////////
- // 1 Byte CAS
- //////////////////////////////////////////////////////////////////
-
- asm pure nothrow @nogc @trusted
- {
- mov DL, writeThis;
- mov AL, ifThis;
- mov RCX, here;
- lock; // lock always needed to make this op atomic
- cmpxchg [RCX], DL;
- setz AL;
- }
- }
- else static if ( T.sizeof == short.sizeof )
- {
- //////////////////////////////////////////////////////////////////
- // 2 Byte CAS
- //////////////////////////////////////////////////////////////////
-
- asm pure nothrow @nogc @trusted
- {
- mov DX, writeThis;
- mov AX, ifThis;
- mov RCX, here;
- lock; // lock always needed to make this op atomic
- cmpxchg [RCX], DX;
- setz AL;
- }
- }
- else static if ( T.sizeof == int.sizeof )
- {
- //////////////////////////////////////////////////////////////////
- // 4 Byte CAS
- //////////////////////////////////////////////////////////////////
-
- asm pure nothrow @nogc @trusted
- {
- mov EDX, writeThis;
- mov EAX, ifThis;
- mov RCX, here;
- lock; // lock always needed to make this op atomic
- cmpxchg [RCX], EDX;
- setz AL;
- }
- }
- else static if ( T.sizeof == long.sizeof )
- {
- //////////////////////////////////////////////////////////////////
- // 8 Byte CAS on a 64-Bit Processor
- //////////////////////////////////////////////////////////////////
-
- asm pure nothrow @nogc @trusted
- {
- mov RDX, writeThis;
- mov RAX, ifThis;
- mov RCX, here;
- lock; // lock always needed to make this op atomic
- cmpxchg [RCX], RDX;
- setz AL;
- }
- }
- else static if ( T.sizeof == long.sizeof*2 && has128BitCAS)
- {
- //////////////////////////////////////////////////////////////////
- // 16 Byte CAS on a 64-Bit Processor
- //////////////////////////////////////////////////////////////////
- version (Win64){
- //Windows 64 calling convention uses different registers.
- //DMD appears to reverse the register order.
- asm pure nothrow @nogc @trusted
- {
- push RDI;
- push RBX;
- mov R9, writeThis;
- mov R10, ifThis;
- mov R11, here;
-
- mov RDI, R9;
- mov RBX, [RDI];
- mov RCX, 8[RDI];
-
- mov RDI, R10;
- mov RAX, [RDI];
- mov RDX, 8[RDI];
-
- mov RDI, R11;
- lock;
- cmpxchg16b [RDI];
- setz AL;
- pop RBX;
- pop RDI;
- }
-
- }else{
-
- asm pure nothrow @nogc @trusted
- {
- push RDI;
- push RBX;
- lea RDI, writeThis;
- mov RBX, [RDI];
- mov RCX, 8[RDI];
- lea RDI, ifThis;
- mov RAX, [RDI];
- mov RDX, 8[RDI];
- mov RDI, here;
- lock; // lock always needed to make this op atomic
- cmpxchg16b [RDI];
- setz AL;
- pop RBX;
- pop RDI;
- }
- }
- }
- else
+ T set, get = atomicLoad!(MemoryOrder.raw, T)(val);
+ do
{
- static assert( false, "Invalid template type specified." );
- }
+ set = get;
+ mixin("set " ~ op ~ " mod;");
+ } while (!casWeakByRef(val, get, set));
+ return set;
}
-
-
- enum MemoryOrder
+ else
{
- raw,
- acq,
- rel,
- seq,
+ static assert(false, "Operation not supported.");
}
+}
- deprecated("Please use MemoryOrder instead.")
- alias MemoryOrder msync;
+version (D_InlineAsm_X86)
+{
+ enum has64BitXCHG = false;
+ enum has64BitCAS = true;
+ enum has128BitCAS = false;
+}
+else version (D_InlineAsm_X86_64)
+{
+ enum has64BitXCHG = true;
+ enum has64BitCAS = true;
+ enum has128BitCAS = true;
+}
+else version (GNU)
+{
+ import gcc.config;
+ enum has64BitCAS = GNU_Have_64Bit_Atomics;
+ enum has64BitXCHG = GNU_Have_64Bit_Atomics;
+ enum has128BitCAS = GNU_Have_LibAtomic;
+}
+else
+{
+ enum has64BitXCHG = false;
+ enum has64BitCAS = false;
+ enum has128BitCAS = false;
+}
- private
+private
+{
+ bool atomicValueIsProperlyAligned(T)(ref T val) pure nothrow @nogc @trusted
{
- // NOTE: x86 loads implicitly have acquire semantics so a memory
- // barrier is only necessary on releases.
- template needsLoadBarrier( MemoryOrder ms )
- {
- enum bool needsLoadBarrier = ms == MemoryOrder.seq;
- }
-
-
- // NOTE: x86 stores implicitly have release semantics so a memory
- // barrier is only necessary on acquires.
- template needsStoreBarrier( MemoryOrder ms )
- {
- enum bool needsStoreBarrier = ms == MemoryOrder.seq;
- }
+ return atomicPtrIsProperlyAligned(&val);
}
-
- HeadUnshared!(T) atomicLoad(MemoryOrder ms = MemoryOrder.seq, T)( ref const shared T val ) pure nothrow @nogc @safe
- if (!__traits(isFloating, T))
+ bool atomicPtrIsProperlyAligned(T)(T* ptr) pure nothrow @nogc @safe
{
- static assert( ms != MemoryOrder.rel, "invalid MemoryOrder for atomicLoad()" );
- static assert( __traits(isPOD, T), "argument to atomicLoad() must be POD" );
-
- static if ( T.sizeof == byte.sizeof )
- {
- //////////////////////////////////////////////////////////////////
- // 1 Byte Load
- //////////////////////////////////////////////////////////////////
-
- static if ( needsLoadBarrier!(ms) )
- {
- asm pure nothrow @nogc @trusted
- {
- mov DL, 0;
- mov AL, 0;
- mov RCX, val;
- lock; // lock always needed to make this op atomic
- cmpxchg [RCX], DL;
- }
- }
- else
- {
- asm pure nothrow @nogc @trusted
- {
- mov RAX, val;
- mov AL, [RAX];
- }
- }
- }
- else static if ( T.sizeof == short.sizeof )
- {
- //////////////////////////////////////////////////////////////////
- // 2 Byte Load
- //////////////////////////////////////////////////////////////////
-
- static if ( needsLoadBarrier!(ms) )
- {
- asm pure nothrow @nogc @trusted
- {
- mov DX, 0;
- mov AX, 0;
- mov RCX, val;
- lock; // lock always needed to make this op atomic
- cmpxchg [RCX], DX;
- }
- }
- else
- {
- asm pure nothrow @nogc @trusted
- {
- mov RAX, val;
- mov AX, [RAX];
- }
- }
- }
- else static if ( T.sizeof == int.sizeof )
+ // NOTE: Strictly speaking, the x86 supports atomic operations on
+ // unaligned values. However, this is far slower than the
+ // common case, so such behavior should be prohibited.
+ static if (T.sizeof > size_t.sizeof)
{
- //////////////////////////////////////////////////////////////////
- // 4 Byte Load
- //////////////////////////////////////////////////////////////////
-
- static if ( needsLoadBarrier!(ms) )
+ version (X86)
{
- asm pure nothrow @nogc @trusted
- {
- mov EDX, 0;
- mov EAX, 0;
- mov RCX, val;
- lock; // lock always needed to make this op atomic
- cmpxchg [RCX], EDX;
- }
+ // cmpxchg8b only requires 4-bytes alignment
+ return cast(size_t)ptr % size_t.sizeof == 0;
}
else
{
- asm pure nothrow @nogc @trusted
- {
- mov RAX, val;
- mov EAX, [RAX];
- }
- }
- }
- else static if ( T.sizeof == long.sizeof )
- {
- //////////////////////////////////////////////////////////////////
- // 8 Byte Load
- //////////////////////////////////////////////////////////////////
-
- static if ( needsLoadBarrier!(ms) )
- {
- asm pure nothrow @nogc @trusted
- {
- mov RDX, 0;
- mov RAX, 0;
- mov RCX, val;
- lock; // lock always needed to make this op atomic
- cmpxchg [RCX], RDX;
- }
- }
- else
- {
- asm pure nothrow @nogc @trusted
- {
- mov RAX, val;
- mov RAX, [RAX];
- }
- }
- }
- else static if ( T.sizeof == long.sizeof*2 && has128BitCAS )
- {
- //////////////////////////////////////////////////////////////////
- // 16 Byte Load on a 64-Bit Processor
- //////////////////////////////////////////////////////////////////
- version (Win64){
- size_t[2] retVal;
- asm pure nothrow @nogc @trusted
- {
- push RDI;
- push RBX;
- mov RDI, val;
- mov RBX, 0;
- mov RCX, 0;
- mov RAX, 0;
- mov RDX, 0;
- lock; // lock always needed to make this op atomic
- cmpxchg16b [RDI];
- lea RDI, retVal;
- mov [RDI], RAX;
- mov 8[RDI], RDX;
- pop RBX;
- pop RDI;
- }
-
- static if (is(T:U[], U))
- {
- pragma(inline, true)
- static typeof(return) toTrusted(size_t[2] retVal) @trusted
- {
- return *(cast(typeof(return)*) retVal.ptr);
- }
-
- return toTrusted(retVal);
- }
- else
- {
- return cast(typeof(return)) retVal;
- }
- }else{
- asm pure nothrow @nogc @trusted
- {
- push RDI;
- push RBX;
- mov RBX, 0;
- mov RCX, 0;
- mov RAX, 0;
- mov RDX, 0;
- mov RDI, val;
- lock; // lock always needed to make this op atomic
- cmpxchg16b [RDI];
- pop RBX;
- pop RDI;
- }
+ // e.g., x86_64 cmpxchg16b requires 16-bytes alignment
+ return cast(size_t)ptr % T.sizeof == 0;
}
}
else
{
- static assert( false, "Invalid template type specified." );
+ return cast(size_t)ptr % T.sizeof == 0;
}
}
-
- void atomicStore(MemoryOrder ms = MemoryOrder.seq, T, V1)( ref shared T val, V1 newval ) pure nothrow @nogc @safe
- if ( __traits( compiles, { val = newval; } ) )
+ template IntForFloat(F)
+ if (__traits(isFloating, F))
{
- static assert( ms != MemoryOrder.acq, "invalid MemoryOrder for atomicStore()" );
- static assert( __traits(isPOD, T), "argument to atomicStore() must be POD" );
-
- static if ( T.sizeof == byte.sizeof )
- {
- //////////////////////////////////////////////////////////////////
- // 1 Byte Store
- //////////////////////////////////////////////////////////////////
-
- static if ( needsStoreBarrier!(ms) )
- {
- asm pure nothrow @nogc @trusted
- {
- mov RAX, val;
- mov DL, newval;
- lock;
- xchg [RAX], DL;
- }
- }
- else
- {
- asm pure nothrow @nogc @trusted
- {
- mov RAX, val;
- mov DL, newval;
- mov [RAX], DL;
- }
- }
- }
- else static if ( T.sizeof == short.sizeof )
- {
- //////////////////////////////////////////////////////////////////
- // 2 Byte Store
- //////////////////////////////////////////////////////////////////
-
- static if ( needsStoreBarrier!(ms) )
- {
- asm pure nothrow @nogc @trusted
- {
- mov RAX, val;
- mov DX, newval;
- lock;
- xchg [RAX], DX;
- }
- }
- else
- {
- asm pure nothrow @nogc @trusted
- {
- mov RAX, val;
- mov DX, newval;
- mov [RAX], DX;
- }
- }
- }
- else static if ( T.sizeof == int.sizeof )
- {
- //////////////////////////////////////////////////////////////////
- // 4 Byte Store
- //////////////////////////////////////////////////////////////////
-
- static if ( needsStoreBarrier!(ms) )
- {
- asm pure nothrow @nogc @trusted
- {
- mov RAX, val;
- mov EDX, newval;
- lock;
- xchg [RAX], EDX;
- }
- }
- else
- {
- asm pure nothrow @nogc @trusted
- {
- mov RAX, val;
- mov EDX, newval;
- mov [RAX], EDX;
- }
- }
- }
- else static if ( T.sizeof == long.sizeof && has64BitCAS )
- {
- //////////////////////////////////////////////////////////////////
- // 8 Byte Store on a 64-Bit Processor
- //////////////////////////////////////////////////////////////////
-
- static if ( needsStoreBarrier!(ms) )
- {
- asm pure nothrow @nogc @trusted
- {
- mov RAX, val;
- mov RDX, newval;
- lock;
- xchg [RAX], RDX;
- }
- }
- else
- {
- asm pure nothrow @nogc @trusted
- {
- mov RAX, val;
- mov RDX, newval;
- mov [RAX], RDX;
- }
- }
- }
- else static if ( T.sizeof == long.sizeof*2 && has128BitCAS )
- {
- //////////////////////////////////////////////////////////////////
- // 16 Byte Store on a 64-Bit Processor
- //////////////////////////////////////////////////////////////////
- version (Win64){
- asm pure nothrow @nogc @trusted
- {
- push RDI;
- push RBX;
- mov R9, val;
- mov R10, newval;
-
- mov RDI, R10;
- mov RBX, [RDI];
- mov RCX, 8[RDI];
-
- mov RDI, R9;
- mov RAX, [RDI];
- mov RDX, 8[RDI];
-
- L1: lock; // lock always needed to make this op atomic
- cmpxchg16b [RDI];
- jne L1;
- pop RBX;
- pop RDI;
- }
- }else{
- asm pure nothrow @nogc @trusted
- {
- push RDI;
- push RBX;
- lea RDI, newval;
- mov RBX, [RDI];
- mov RCX, 8[RDI];
- mov RDI, val;
- mov RAX, [RDI];
- mov RDX, 8[RDI];
- L1: lock; // lock always needed to make this op atomic
- cmpxchg16b [RDI];
- jne L1;
- pop RBX;
- pop RDI;
- }
- }
- }
+ static if (F.sizeof == 4)
+ alias IntForFloat = uint;
+ else static if (F.sizeof == 8)
+ alias IntForFloat = ulong;
else
- {
- static assert( false, "Invalid template type specified." );
- }
+ static assert (false, "Invalid floating point type: " ~ F.stringof ~ ", only support `float` and `double`.");
}
-
- void atomicFence() nothrow @nogc @safe
+ template IntForStruct(S)
+ if (is(S == struct))
{
- // SSE2 is always present in 64-bit x86 chips.
- asm nothrow @nogc @trusted
- {
- naked;
-
- mfence;
- ret;
- }
+ static if (S.sizeof == 1)
+ alias IntForFloat = ubyte;
+ else static if (F.sizeof == 2)
+ alias IntForFloat = ushort;
+ else static if (F.sizeof == 4)
+ alias IntForFloat = uint;
+ else static if (F.sizeof == 8)
+ alias IntForFloat = ulong;
+ else static if (F.sizeof == 16)
+ alias IntForFloat = ulong[2]; // TODO: what's the best type here? slice/delegates pass in registers...
+ else
+ static assert (ValidateStruct!S);
}
-}
-else version (GNU)
-{
- import gcc.builtins;
- HeadUnshared!(T) atomicOp(string op, T, V1)( ref shared T val, V1 mod ) pure nothrow @nogc @trusted
- if ( __traits( compiles, mixin( "*cast(T*)&val" ~ op ~ "mod" ) ) )
+ template ValidateStruct(S)
+ if (is(S == struct))
{
- // binary operators
- //
- // + - * / % ^^ &
- // | ^ << >> >>> ~ in
- // == != < <= > >=
- static if ( op == "+" || op == "-" || op == "*" || op == "/" ||
- op == "%" || op == "^^" || op == "&" || op == "|" ||
- op == "^" || op == "<<" || op == ">>" || op == ">>>" ||
- op == "~" || // skip "in"
- op == "==" || op == "!=" || op == "<" || op == "<=" ||
- op == ">" || op == ">=" )
- {
- HeadUnshared!(T) get = atomicLoad!(MemoryOrder.raw)( val );
- mixin( "return get " ~ op ~ " mod;" );
- }
- else
- // assignment operators
- //
- // += -= *= /= %= ^^= &=
- // |= ^= <<= >>= >>>= ~=
- static if ( op == "+=" || op == "-=" || op == "*=" || op == "/=" ||
- op == "%=" || op == "^^=" || op == "&=" || op == "|=" ||
- op == "^=" || op == "<<=" || op == ">>=" || op == ">>>=" ) // skip "~="
- {
- HeadUnshared!(T) get, set;
-
- do
- {
- get = set = atomicLoad!(MemoryOrder.raw)( val );
- mixin( "set " ~ op ~ " mod;" );
- } while ( !cas( &val, get, set ) );
- return set;
- }
- else
- {
- static assert( false, "Operation not supported." );
- }
- }
+ import core.internal.traits : hasElaborateAssign;
+ // `(x & (x-1)) == 0` checks that x is a power of 2.
+ static assert (S.sizeof <= size_t.sizeof * 2
+ && (S.sizeof & (S.sizeof - 1)) == 0,
+ S.stringof ~ " has invalid size for atomic operations.");
+ static assert (!hasElaborateAssign!S, S.stringof ~ " may not have an elaborate assignment when used with atomic operations.");
- bool cas(T,V1,V2)( shared(T)* here, const V1 ifThis, V2 writeThis ) pure nothrow @nogc @safe
- if ( !is(T == class) && !is(T U : U*) && __traits( compiles, { *here = writeThis; } ) )
- {
- return casImpl(here, ifThis, writeThis);
+ enum ValidateStruct = true;
}
- bool cas(T,V1,V2)( shared(T)* here, const shared(V1) ifThis, shared(V2) writeThis ) pure nothrow @nogc @safe
- if ( is(T == class) && __traits( compiles, { *here = writeThis; } ) )
+ // TODO: it'd be nice if we had @trusted scopes; we could remove this...
+ bool casWeakByRef(T,V1,V2)(ref T value, ref V1 ifThis, V2 writeThis) pure nothrow @nogc @trusted
{
- return casImpl(here, ifThis, writeThis);
+ return casWeak(&value, &ifThis, writeThis);
}
- bool cas(T,V1,V2)( shared(T)* here, const shared(V1)* ifThis, shared(V2)* writeThis ) pure nothrow @nogc @safe
- if ( is(T U : U*) && __traits( compiles, { *here = writeThis; } ) )
+ /* Construct a type with a shared tail, and if possible with an unshared
+ head. */
+ template TailShared(U) if (!is(U == shared))
{
- return casImpl(here, ifThis, writeThis);
+ alias TailShared = .TailShared!(shared U);
}
-
- private bool casImpl(T,V1,V2)( shared(T)* here, V1 ifThis, V2 writeThis ) pure nothrow @nogc @trusted
+ template TailShared(S) if (is(S == shared))
{
- bool res = void;
-
- static if (GNU_Have_Atomics || GNU_Have_LibAtomic)
- {
- static if (T.sizeof == byte.sizeof)
- {
- res = __atomic_compare_exchange_1(here, cast(void*) &ifThis, *cast(ubyte*) &writeThis,
- false, MemoryOrder.seq, MemoryOrder.seq);
- }
- else static if (T.sizeof == short.sizeof)
- {
- res = __atomic_compare_exchange_2(here, cast(void*) &ifThis, *cast(ushort*) &writeThis,
- false, MemoryOrder.seq, MemoryOrder.seq);
- }
- else static if (T.sizeof == int.sizeof)
- {
- res = __atomic_compare_exchange_4(here, cast(void*) &ifThis, *cast(uint*) &writeThis,
- false, MemoryOrder.seq, MemoryOrder.seq);
- }
- else static if (T.sizeof == long.sizeof && GNU_Have_64Bit_Atomics)
- {
- res = __atomic_compare_exchange_8(here, cast(void*) &ifThis, *cast(ulong*) &writeThis,
- false, MemoryOrder.seq, MemoryOrder.seq);
- }
- else static if (GNU_Have_LibAtomic)
+ // Get the unshared variant of S.
+ static if (is(S U == shared U)) {}
+ else static assert(false, "Should never be triggered. The `static " ~
+ "if` declares `U` as the unshared version of the shared type " ~
+ "`S`. `S` is explicitly declared as shared, so getting `U` " ~
+ "should always work.");
+
+ static if (is(S : U))
+ alias TailShared = U;
+ else static if (is(S == struct))
+ {
+ enum implName = () {
+ /* Start with "_impl". If S has a field with that name, append
+ underscores until the clash is resolved. */
+ string name = "_impl";
+ string[] fieldNames;
+ static foreach (alias field; S.tupleof)
+ {
+ fieldNames ~= __traits(identifier, field);
+ }
+ static bool canFind(string[] haystack, string needle)
+ {
+ foreach (candidate; haystack)
+ {
+ if (candidate == needle) return true;
+ }
+ return false;
+ }
+ while (canFind(fieldNames, name)) name ~= "_";
+ return name;
+ } ();
+ struct TailShared
{
- res = __atomic_compare_exchange(T.sizeof, here, cast(void*) &ifThis, cast(void*) &writeThis,
- MemoryOrder.seq, MemoryOrder.seq);
+ static foreach (i, alias field; S.tupleof)
+ {
+ /* On @trusted: This is casting the field from shared(Foo)
+ to TailShared!Foo. The cast is safe because the field has
+ been loaded and is not shared anymore. */
+ mixin("
+ @trusted @property
+ ref " ~ __traits(identifier, field) ~ "()
+ {
+ alias R = TailShared!(typeof(field));
+ return * cast(R*) &" ~ implName ~ ".tupleof[i];
+ }
+ ");
+ }
+ mixin("
+ S " ~ implName ~ ";
+ alias " ~ implName ~ " this;
+ ");
}
- else
- static assert(0, "Invalid template type specified.");
}
else
- {
- static if (T.sizeof == byte.sizeof)
- alias U = byte;
- else static if (T.sizeof == short.sizeof)
- alias U = short;
- else static if (T.sizeof == int.sizeof)
- alias U = int;
- else static if (T.sizeof == long.sizeof)
- alias U = long;
- else
- static assert(0, "Invalid template type specified.");
-
- getAtomicMutex.lock();
- scope(exit) getAtomicMutex.unlock();
-
- if (*cast(U*)here == *cast(U*)&ifThis)
- {
- *here = writeThis;
- res = true;
- }
- else
- res = false;
- }
-
- return res;
+ alias TailShared = S;
}
-
-
- // Memory model types for the __atomic* builtins.
- enum MemoryOrder
+ @safe unittest
{
- raw = 0,
- acq = 2,
- rel = 3,
- seq = 5,
- }
+ // No tail (no indirections) -> fully unshared.
- deprecated("Please use MemoryOrder instead.")
- alias MemoryOrder msync;
+ static assert(is(TailShared!int == int));
+ static assert(is(TailShared!(shared int) == int));
+ static struct NoIndir { int i; }
+ static assert(is(TailShared!NoIndir == NoIndir));
+ static assert(is(TailShared!(shared NoIndir) == NoIndir));
- HeadUnshared!(T) atomicLoad(MemoryOrder ms = MemoryOrder.seq, T)( ref const shared T val ) pure nothrow @nogc @trusted
- if (!__traits(isFloating, T))
- {
- static assert(ms != MemoryOrder.rel, "Invalid MemoryOrder for atomicLoad");
- static assert(__traits(isPOD, T), "argument to atomicLoad() must be POD");
-
- static if (GNU_Have_Atomics || GNU_Have_LibAtomic)
- {
- static if (T.sizeof == ubyte.sizeof)
- {
- ubyte value = __atomic_load_1(&val, ms);
- return *cast(HeadUnshared!T*) &value;
- }
- else static if (T.sizeof == ushort.sizeof)
- {
- ushort value = __atomic_load_2(&val, ms);
- return *cast(HeadUnshared!T*) &value;
- }
- else static if (T.sizeof == uint.sizeof)
- {
- uint value = __atomic_load_4(&val, ms);
- return *cast(HeadUnshared!T*) &value;
- }
- else static if (T.sizeof == ulong.sizeof && GNU_Have_64Bit_Atomics)
- {
- ulong value = __atomic_load_8(&val, ms);
- return *cast(HeadUnshared!T*) &value;
- }
- else static if (GNU_Have_LibAtomic)
- {
- T value;
- __atomic_load(T.sizeof, &val, cast(void*)&value, ms);
- return *cast(HeadUnshared!T*) &value;
- }
- else
- static assert(0, "Invalid template type specified.");
- }
- else
- {
- getAtomicMutex.lock();
- scope(exit) getAtomicMutex.unlock();
- return *cast(HeadUnshared!T*)&val;
- }
- }
+ // Tail can be independently shared or is already -> tail-shared.
+ static assert(is(TailShared!(int*) == shared(int)*));
+ static assert(is(TailShared!(shared int*) == shared(int)*));
+ static assert(is(TailShared!(shared(int)*) == shared(int)*));
- void atomicStore(MemoryOrder ms = MemoryOrder.seq, T, V1)( ref shared T val, V1 newval ) pure nothrow @nogc @trusted
- if ( __traits( compiles, { val = newval; } ) )
- {
- static assert(ms != MemoryOrder.acq, "Invalid MemoryOrder for atomicStore");
- static assert(__traits(isPOD, T), "argument to atomicLoad() must be POD");
-
- static if (GNU_Have_Atomics || GNU_Have_LibAtomic)
- {
- static if (T.sizeof == ubyte.sizeof)
- {
- __atomic_store_1(&val, *cast(ubyte*) &newval, ms);
- }
- else static if (T.sizeof == ushort.sizeof)
- {
- __atomic_store_2(&val, *cast(ushort*) &newval, ms);
- }
- else static if (T.sizeof == uint.sizeof)
- {
- __atomic_store_4(&val, *cast(uint*) &newval, ms);
- }
- else static if (T.sizeof == ulong.sizeof && GNU_Have_64Bit_Atomics)
- {
- __atomic_store_8(&val, *cast(ulong*) &newval, ms);
- }
- else static if (GNU_Have_LibAtomic)
- {
- __atomic_store(T.sizeof, &val, cast(void*)&newval, ms);
- }
- else
- static assert(0, "Invalid template type specified.");
- }
- else
- {
- getAtomicMutex.lock();
- val = newval;
- getAtomicMutex.unlock();
- }
- }
+ static assert(is(TailShared!(int[]) == shared(int)[]));
+ static assert(is(TailShared!(shared int[]) == shared(int)[]));
+ static assert(is(TailShared!(shared(int)[]) == shared(int)[]));
+ static struct S1 { shared int* p; }
+ static assert(is(TailShared!S1 == S1));
+ static assert(is(TailShared!(shared S1) == S1));
- void atomicFence() nothrow @nogc
- {
- static if (GNU_Have_Atomics || GNU_Have_LibAtomic)
- __atomic_thread_fence(MemoryOrder.seq);
- else
- {
- getAtomicMutex.lock();
- getAtomicMutex.unlock();
- }
- }
+ static struct S2 { shared(int)* p; }
+ static assert(is(TailShared!S2 == S2));
+ static assert(is(TailShared!(shared S2) == S2));
- static if (!GNU_Have_Atomics && !GNU_Have_LibAtomic)
- {
- // Use system mutex for atomics, faking the purity of the functions so
- // that they can be used in pure/nothrow/@safe code.
- extern (C) private pure @trusted @nogc nothrow
- {
- static if (GNU_Thread_Model == ThreadModel.Posix)
- {
- import core.sys.posix.pthread;
- alias atomicMutexHandle = pthread_mutex_t;
+ // Tail follows shared-ness of head -> fully shared.
- pragma(mangle, "pthread_mutex_init") int fakePureMutexInit(pthread_mutex_t*, pthread_mutexattr_t*);
- pragma(mangle, "pthread_mutex_lock") int fakePureMutexLock(pthread_mutex_t*);
- pragma(mangle, "pthread_mutex_unlock") int fakePureMutexUnlock(pthread_mutex_t*);
- }
- else static if (GNU_Thread_Model == ThreadModel.Win32)
- {
- import core.sys.windows.winbase;
- alias atomicMutexHandle = CRITICAL_SECTION;
+ static class C { int i; }
+ static assert(is(TailShared!C == shared C));
+ static assert(is(TailShared!(shared C) == shared C));
- pragma(mangle, "InitializeCriticalSection") int fakePureMutexInit(CRITICAL_SECTION*);
- pragma(mangle, "EnterCriticalSection") void fakePureMutexLock(CRITICAL_SECTION*);
- pragma(mangle, "LeaveCriticalSection") int fakePureMutexUnlock(CRITICAL_SECTION*);
- }
- else
- {
- alias atomicMutexHandle = int;
- }
- }
+ /* However, structs get a wrapper that has getters which cast to
+ TailShared. */
- // Implements lock/unlock operations.
- private struct AtomicMutex
- {
- int lock() pure @trusted @nogc nothrow
- {
- static if (GNU_Thread_Model == ThreadModel.Posix)
- {
- if (!_inited)
- {
- fakePureMutexInit(&_handle, null);
- _inited = true;
- }
- return fakePureMutexLock(&_handle);
- }
- else
- {
- static if (GNU_Thread_Model == ThreadModel.Win32)
- {
- if (!_inited)
- {
- fakePureMutexInit(&_handle);
- _inited = true;
- }
- fakePureMutexLock(&_handle);
- }
- return 0;
- }
- }
+ static struct S3 { int* p; int _impl; int _impl_; int _impl__; }
+ static assert(!is(TailShared!S3 : S3));
+ static assert(is(TailShared!S3 : shared S3));
+ static assert(is(TailShared!(shared S3) == TailShared!S3));
- int unlock() pure @trusted @nogc nothrow
- {
- static if (GNU_Thread_Model == ThreadModel.Posix)
- return fakePureMutexUnlock(&_handle);
- else
- {
- static if (GNU_Thread_Model == ThreadModel.Win32)
- fakePureMutexUnlock(&_handle);
- return 0;
- }
- }
+ static struct S4 { shared(int)** p; }
+ static assert(!is(TailShared!S4 : S4));
+ static assert(is(TailShared!S4 : shared S4));
+ static assert(is(TailShared!(shared S4) == TailShared!S4));
+ }
+}
- private:
- atomicMutexHandle _handle;
- bool _inited;
- }
- // Internal static mutex reference.
- private AtomicMutex* _getAtomicMutex() @trusted @nogc nothrow
- {
- __gshared static AtomicMutex mutex;
- return &mutex;
- }
+////////////////////////////////////////////////////////////////////////////////
+// Unit Tests
+////////////////////////////////////////////////////////////////////////////////
- // Pure alias for _getAtomicMutex.
- pragma(mangle, _getAtomicMutex.mangleof)
- private AtomicMutex* getAtomicMutex() pure @trusted @nogc nothrow @property;
- }
-}
-// This is an ABI adapter that works on all architectures. It type puns
-// floats and doubles to ints and longs, atomically loads them, then puns
-// them back. This is necessary so that they get returned in floating
-// point instead of integer registers.
-HeadUnshared!(T) atomicLoad(MemoryOrder ms = MemoryOrder.seq, T)( ref const shared T val ) pure nothrow @nogc @trusted
-if (__traits(isFloating, T))
+version (CoreUnittest)
{
- static if (T.sizeof == int.sizeof)
+ version (D_LP64)
{
- static assert(is(T : float));
- auto ptr = cast(const shared int*) &val;
- auto asInt = atomicLoad!(ms)(*ptr);
- return *(cast(typeof(return)*) &asInt);
+ enum hasDWCAS = has128BitCAS;
}
- else static if (T.sizeof == long.sizeof)
+ else
{
- static assert(is(T : double));
- auto ptr = cast(const shared long*) &val;
- auto asLong = atomicLoad!(ms)(*ptr);
- return *(cast(typeof(return)*) &asLong);
+ enum hasDWCAS = has64BitCAS;
}
- else
+
+ void testXCHG(T)(T val) pure nothrow @nogc @trusted
+ in
{
- static assert(0, "Cannot atomically load 80-bit reals.");
+ assert(val !is T.init);
}
-}
+ do
+ {
+ T base = cast(T)null;
+ shared(T) atom = cast(shared(T))null;
-////////////////////////////////////////////////////////////////////////////////
-// Unit Tests
-////////////////////////////////////////////////////////////////////////////////
+ assert(base !is val, T.stringof);
+ assert(atom is base, T.stringof);
+ assert(atomicExchange(&atom, val) is base, T.stringof);
+ assert(atom is val, T.stringof);
+ }
-version (unittest)
-{
- void testCAS(T)( T val ) pure nothrow @nogc @trusted
+ void testCAS(T)(T val) pure nothrow @nogc @trusted
in
{
assert(val !is T.init);
}
- body
+ do
{
T base = cast(T)null;
shared(T) atom = cast(shared(T))null;
- assert( base !is val, T.stringof );
- assert( atom is base, T.stringof );
+ assert(base !is val, T.stringof);
+ assert(atom is base, T.stringof);
+
+ assert(cas(&atom, base, val), T.stringof);
+ assert(atom is val, T.stringof);
+ assert(!cas(&atom, base, base), T.stringof);
+ assert(atom is val, T.stringof);
- assert( cas( &atom, base, val ), T.stringof );
- assert( atom is val, T.stringof );
- assert( !cas( &atom, base, base ), T.stringof );
- assert( atom is val, T.stringof );
+ atom = cast(shared(T))null;
+
+ shared(T) arg = base;
+ assert(cas(&atom, &arg, val), T.stringof);
+ assert(arg is base, T.stringof);
+ assert(atom is val, T.stringof);
+
+ arg = base;
+ assert(!cas(&atom, &arg, base), T.stringof);
+ assert(arg is val, T.stringof);
+ assert(atom is val, T.stringof);
}
- void testLoadStore(MemoryOrder ms = MemoryOrder.seq, T)( T val = T.init + 1 ) pure nothrow @nogc @trusted
+ void testLoadStore(MemoryOrder ms = MemoryOrder.seq, T)(T val = T.init + 1) pure nothrow @nogc @trusted
{
T base = cast(T) 0;
shared(T) atom = cast(T) 0;
- assert( base !is val );
- assert( atom is base );
- atomicStore!(ms)( atom, val );
- base = atomicLoad!(ms)( atom );
+ assert(base !is val);
+ assert(atom is base);
+ atomicStore!(ms)(atom, val);
+ base = atomicLoad!(ms)(atom);
- assert( base is val, T.stringof );
- assert( atom is val );
+ assert(base is val, T.stringof);
+ assert(atom is val);
}
- void testType(T)( T val = T.init + 1 ) pure nothrow @nogc @safe
+ void testType(T)(T val = T.init + 1) pure nothrow @nogc @safe
{
- testCAS!(T)( val );
- testLoadStore!(MemoryOrder.seq, T)( val );
- testLoadStore!(MemoryOrder.raw, T)( val );
+ static if (T.sizeof < 8 || has64BitXCHG)
+ testXCHG!(T)(val);
+ testCAS!(T)(val);
+ testLoadStore!(MemoryOrder.seq, T)(val);
+ testLoadStore!(MemoryOrder.raw, T)(val);
}
- @safe pure nothrow unittest
+ @betterC @safe pure nothrow unittest
{
testType!(bool)();
@@ -1700,42 +924,81 @@ version (unittest)
testType!(int)();
testType!(uint)();
+ }
+
+ @safe pure nothrow unittest
+ {
testType!(shared int*)();
+ static interface Inter {}
+ static class KlassImpl : Inter {}
+ testXCHG!(shared Inter)(new shared(KlassImpl));
+ testCAS!(shared Inter)(new shared(KlassImpl));
+
static class Klass {}
- testCAS!(shared Klass)( new shared(Klass) );
+ testXCHG!(shared Klass)(new shared(Klass));
+ testCAS!(shared Klass)(new shared(Klass));
+
+ testXCHG!(shared int)(42);
- testType!(float)(1.0f);
+ testType!(float)(0.1f);
- static if ( has64BitCAS )
+ static if (has64BitCAS)
{
- testType!(double)(1.0);
+ testType!(double)(0.1);
testType!(long)();
testType!(ulong)();
}
+ static if (has128BitCAS)
+ {
+ () @trusted
+ {
+ align(16) struct Big { long a, b; }
+
+ shared(Big) atom;
+ shared(Big) base;
+ shared(Big) arg;
+ shared(Big) val = Big(1, 2);
+
+ assert(cas(&atom, arg, val), Big.stringof);
+ assert(atom is val, Big.stringof);
+ assert(!cas(&atom, arg, val), Big.stringof);
+ assert(atom is val, Big.stringof);
+
+ atom = Big();
+ assert(cas(&atom, &arg, val), Big.stringof);
+ assert(arg is base, Big.stringof);
+ assert(atom is val, Big.stringof);
+
+ arg = Big();
+ assert(!cas(&atom, &arg, base), Big.stringof);
+ assert(arg is val, Big.stringof);
+ assert(atom is val, Big.stringof);
+ }();
+ }
shared(size_t) i;
- atomicOp!"+="( i, cast(size_t) 1 );
- assert( i == 1 );
+ atomicOp!"+="(i, cast(size_t) 1);
+ assert(i == 1);
- atomicOp!"-="( i, cast(size_t) 1 );
- assert( i == 0 );
+ atomicOp!"-="(i, cast(size_t) 1);
+ assert(i == 0);
- shared float f = 0;
- atomicOp!"+="( f, 1 );
- assert( f == 1 );
+ shared float f = 0.1f;
+ atomicOp!"+="(f, 0.1f);
+ assert(f > 0.1999f && f < 0.2001f);
- static if ( has64BitCAS )
+ static if (has64BitCAS)
{
- shared double d = 0;
- atomicOp!"+="( d, 1 );
- assert( d == 1 );
+ shared double d = 0.1;
+ atomicOp!"+="(d, 0.1);
+ assert(d > 0.1999 && d < 0.2001);
}
}
- pure nothrow unittest
+ @betterC pure nothrow unittest
{
static if (has128BitCAS)
{
@@ -1756,15 +1019,6 @@ version (unittest)
assert(b.value1 == 3 && b.value2 ==4);
}
- version (D_LP64)
- {
- enum hasDWCAS = has128BitCAS;
- }
- else
- {
- enum hasDWCAS = has64BitCAS;
- }
-
static if (hasDWCAS)
{
static struct List { size_t gen; List* next; }
@@ -1773,9 +1027,48 @@ version (unittest)
assert(head.gen == 1);
assert(cast(size_t)head.next == 1);
}
+
+ // https://issues.dlang.org/show_bug.cgi?id=20629
+ static struct Struct
+ {
+ uint a, b;
+ }
+ shared Struct s1 = Struct(1, 2);
+ atomicStore(s1, Struct(3, 4));
+ assert(cast(uint) s1.a == 3);
+ assert(cast(uint) s1.b == 4);
+ }
+
+ // https://issues.dlang.org/show_bug.cgi?id=20844
+ static if (hasDWCAS)
+ {
+ debug: // tests CAS in-contract
+
+ pure nothrow unittest
+ {
+ import core.exception : AssertError;
+
+ align(16) shared ubyte[2 * size_t.sizeof + 1] data;
+ auto misalignedPointer = cast(size_t[2]*) &data[1];
+ size_t[2] x;
+
+ try
+ cas(misalignedPointer, x, x);
+ catch (AssertError)
+ return;
+
+ assert(0, "should have failed");
+ }
}
- pure nothrow unittest
+ @betterC pure nothrow @nogc @safe unittest
+ {
+ int a;
+ if (casWeak!(MemoryOrder.acq_rel, MemoryOrder.raw)(&a, 0, 4))
+ assert(a == 4);
+ }
+
+ @betterC pure nothrow unittest
{
static struct S { int val; }
auto s = shared(S)(1);
@@ -1794,11 +1087,6 @@ version (unittest)
shared(S*) writeThis2 = null;
assert(cas(&ptr, ifThis2, writeThis2));
assert(ptr is null);
-
- // head unshared target doesn't want atomic CAS
- shared(S)* ptr2;
- static assert(!__traits(compiles, cas(&ptr2, ifThis, writeThis)));
- static assert(!__traits(compiles, cas(&ptr2, ifThis2, writeThis2)));
}
unittest
@@ -1838,7 +1126,7 @@ version (unittest)
}
// === atomicFetchAdd and atomicFetchSub operations ====
- pure nothrow @nogc @safe unittest
+ @betterC pure nothrow @nogc @safe unittest
{
shared ubyte u8 = 1;
shared ushort u16 = 2;
@@ -1853,7 +1141,7 @@ version (unittest)
assert(atomicOp!"+="(i8, 8) == 13);
assert(atomicOp!"+="(i16, 8) == 14);
assert(atomicOp!"+="(i32, 8) == 15);
- version (AsmX86_64)
+ version (D_LP64)
{
shared ulong u64 = 4;
shared long i64 = 8;
@@ -1862,7 +1150,30 @@ version (unittest)
}
}
- pure nothrow @nogc @safe unittest
+ @betterC pure nothrow @nogc unittest
+ {
+ byte[10] byteArray = [1, 3, 5, 7, 9, 11, 13, 15, 17, 19];
+ ulong[10] ulongArray = [2, 4, 6, 8, 10, 12, 14, 16, 19, 20];
+
+ {
+ auto array = byteArray;
+ byte* ptr = &array[0];
+ byte* prevPtr = atomicFetchAdd(ptr, 3);
+ assert(prevPtr == &array[0]);
+ assert(*prevPtr == 1);
+ assert(*ptr == 7);
+ }
+ {
+ auto array = ulongArray;
+ ulong* ptr = &array[0];
+ ulong* prevPtr = atomicFetchAdd(ptr, 3);
+ assert(prevPtr == &array[0]);
+ assert(*prevPtr == 2);
+ assert(*ptr == 8);
+ }
+ }
+
+ @betterC pure nothrow @nogc @safe unittest
{
shared ubyte u8 = 1;
shared ushort u16 = 2;
@@ -1877,7 +1188,7 @@ version (unittest)
assert(atomicOp!"-="(i8, 1) == 4);
assert(atomicOp!"-="(i16, 1) == 5);
assert(atomicOp!"-="(i32, 1) == 6);
- version (AsmX86_64)
+ version (D_LP64)
{
shared ulong u64 = 4;
shared long i64 = 8;
@@ -1886,16 +1197,83 @@ version (unittest)
}
}
- pure nothrow @nogc @safe unittest // issue 16651
+ @betterC pure nothrow @nogc unittest
+ {
+ byte[10] byteArray = [1, 3, 5, 7, 9, 11, 13, 15, 17, 19];
+ ulong[10] ulongArray = [2, 4, 6, 8, 10, 12, 14, 16, 19, 20];
+
+ {
+ auto array = byteArray;
+ byte* ptr = &array[5];
+ byte* prevPtr = atomicFetchSub(ptr, 4);
+ assert(prevPtr == &array[5]);
+ assert(*prevPtr == 11);
+ assert(*ptr == 3); // https://issues.dlang.org/show_bug.cgi?id=21578
+ }
+ {
+ auto array = ulongArray;
+ ulong* ptr = &array[5];
+ ulong* prevPtr = atomicFetchSub(ptr, 4);
+ assert(prevPtr == &array[5]);
+ assert(*prevPtr == 12);
+ assert(*ptr == 4); // https://issues.dlang.org/show_bug.cgi?id=21578
+ }
+ }
+
+ @betterC pure nothrow @nogc @safe unittest // issue 16651
{
shared ulong a = 2;
uint b = 1;
- atomicOp!"-="( a, b );
+ atomicOp!"-="(a, b);
assert(a == 1);
shared uint c = 2;
ubyte d = 1;
- atomicOp!"-="( c, d );
+ atomicOp!"-="(c, d);
assert(c == 1);
}
+
+ pure nothrow @safe unittest // issue 16230
+ {
+ shared int i;
+ static assert(is(typeof(atomicLoad(i)) == int));
+
+ shared int* p;
+ static assert(is(typeof(atomicLoad(p)) == shared(int)*));
+
+ shared int[] a;
+ static if (__traits(compiles, atomicLoad(a)))
+ {
+ static assert(is(typeof(atomicLoad(a)) == shared(int)[]));
+ }
+
+ static struct S { int* _impl; }
+ shared S s;
+ static assert(is(typeof(atomicLoad(s)) : shared S));
+ static assert(is(typeof(atomicLoad(s)._impl) == shared(int)*));
+ auto u = atomicLoad(s);
+ assert(u._impl is null);
+ u._impl = new shared int(42);
+ assert(atomicLoad(*u._impl) == 42);
+
+ static struct S2 { S s; }
+ shared S2 s2;
+ static assert(is(typeof(atomicLoad(s2).s) == TailShared!S));
+
+ static struct S3 { size_t head; int* tail; }
+ shared S3 s3;
+ static if (__traits(compiles, atomicLoad(s3)))
+ {
+ static assert(is(typeof(atomicLoad(s3).head) == size_t));
+ static assert(is(typeof(atomicLoad(s3).tail) == shared(int)*));
+ }
+
+ static class C { int i; }
+ shared C c;
+ static assert(is(typeof(atomicLoad(c)) == shared C));
+
+ static struct NoIndirections { int i; }
+ shared NoIndirections n;
+ static assert(is(typeof(atomicLoad(n)) == NoIndirections));
+ }
}
diff --git a/libphobos/libdruntime/core/attribute.d b/libphobos/libdruntime/core/attribute.d
index 9d350d8..b0b973f 100644
--- a/libphobos/libdruntime/core/attribute.d
+++ b/libphobos/libdruntime/core/attribute.d
@@ -15,6 +15,56 @@
*/
module core.attribute;
+version (GNU)
+ public import gcc.attributes;
+
+version (LDC)
+ public import ldc.attributes;
+
+version (D_ObjectiveC)
+{
+ version = UdaOptional;
+ version = UdaSelector;
+}
+
+version (Posix)
+ version = UdaGNUAbiTag;
+
+version (CoreDdoc)
+{
+ version = UdaGNUAbiTag;
+ version = UdaOptional;
+ version = UdaSelector;
+}
+
+/**
+ * Use this attribute to specify that a global symbol should be emitted with
+ * weak linkage. This is primarily useful in defining library functions that
+ * can be overridden by user code, though it can also be used with shared and
+ * static variables too.
+ *
+ * The overriding symbol must have the same type as the weak symbol. In
+ * addition, if it designates a variable it must also have the same size and
+ * alignment as the weak symbol.
+ *
+ * Quote from the LLVM manual: "Note that weak linkage does not actually allow
+ * the optimizer to inline the body of this function into callers because it
+ * doesn’t know if this definition of the function is the definitive definition
+ * within the program or whether it will be overridden by a stronger
+ * definition."
+ *
+ * This attribute is only meaningful to the GNU and LLVM D compilers. The
+ * Digital Mars D compiler emits all symbols with weak linkage by default.
+ */
+version (DigitalMars)
+{
+ enum weak;
+}
+else
+{
+ // GDC and LDC declare this attribute in their own modules.
+}
+
/**
* Use this attribute to attach an Objective-C selector to a method.
*
@@ -51,7 +101,143 @@ module core.attribute;
* }
* ---
*/
-version (D_ObjectiveC) struct selector
+version (UdaSelector) struct selector
{
string selector;
}
+
+/**
+ * Use this attribute to make an Objective-C interface method optional.
+ *
+ * An optional method is a method that does **not** have to be implemented in
+ * the class that implements the interface. To safely call an optional method,
+ * a runtime check should be performed to make sure the receiver implements the
+ * method.
+ *
+ * This is a special compiler recognized attribute, it has several
+ * requirements, which all will be enforced by the compiler:
+ *
+ * * The attribute can only be attached to methods which have Objective-C
+ * linkage. That is, a method inside an interface declared as `extern (Objective-C)`
+ *
+ * * It can only be used for methods that are declared inside an interface
+ * * It can only be used once in a method declaration
+ * * It cannot be attached to a method that is a template
+ *
+ * Examples:
+ * ---
+ * import core.attribute : optional, selector;
+ *
+ * extern (Objective-C):
+ *
+ * struct objc_selector;
+ * alias SEL = objc_selector*;
+ *
+ * SEL sel_registerName(in char* str);
+ *
+ * extern class NSObject
+ * {
+ * bool respondsToSelector(SEL sel) @selector("respondsToSelector:");
+ * }
+ *
+ * interface Foo
+ * {
+ * @optional void foo() @selector("foo");
+ * @optional void bar() @selector("bar");
+ * }
+ *
+ * class Bar : NSObject
+ * {
+ * static Bar alloc() @selector("alloc");
+ * Bar init() @selector("init");
+ *
+ * void bar() @selector("bar")
+ * {
+ * }
+ * }
+ *
+ * extern (D) void main()
+ * {
+ * auto bar = Bar.alloc.init;
+ *
+ * if (bar.respondsToSelector(sel_registerName("bar")))
+ * bar.bar();
+ * }
+ * ---
+ */
+version (UdaOptional)
+ enum optional;
+
+/**
+ * Use this attribute to declare an ABI tag on a C++ symbol.
+ *
+ * ABI tag is an attribute introduced by the GNU C++ compiler.
+ * It modifies the mangled name of the symbol to incorporate the tag name,
+ * in order to distinguish from an earlier version with a different ABI.
+ *
+ * This is a special compiler recognized attribute, it has a few
+ * requirements, which all will be enforced by the compiler:
+ *
+ * $(UL
+ * $(LI
+ * There can only be one such attribute per symbol.
+ * ),
+ * $(LI
+ * The attribute can only be attached to an `extern(C++)` symbol
+ * (`struct`, `class`, `enum`, function, and their templated counterparts).
+ * ),
+ * $(LI
+ * The attribute cannot be applied to C++ namespaces.
+ * This is to prevent confusion with the C++ semantic, which allows it to
+ * be applied to namespaces.
+ * ),
+ * $(LI
+ * The string arguments must only contain valid characters
+ * for C++ name mangling which currently include alphanumerics
+ * and the underscore character.
+ * ),
+ * )
+ *
+ * This UDA is not transitive, and inner scope do not inherit outer scopes'
+ * ABI tag. See examples below for how to translate a C++ declaration to D.
+ * Also note that entries in this UDA will be automatically sorted
+ * alphabetically, hence `gnuAbiTag("c", "b", "a")` will appear as
+ * `@gnuAbiTag("a", "b", "c")`.
+ *
+ * See_Also:
+ * $(LINK2 https://itanium-cxx-abi.github.io/cxx-abi/abi.html#mangle.abi-tag, Itanium ABI spec)
+ * $(LINK2 https://gcc.gnu.org/onlinedocs/gcc/C_002b_002b-Attributes.html, GCC attributes documentation).
+ *
+ * Examples:
+ * ---
+ * // ---- foo.cpp
+ * struct [[gnu::abi_tag ("tag1", "tag2")]] Tagged1_2
+ * {
+ * struct [[gnu::abi_tag ("tag3")]] Tagged3
+ * {
+ * [[gnu::abi_tag ("tag4")]]
+ * int Tagged4 () { return 42; }
+ * }
+ * }
+ * Tagged1_2 inst1;
+ * // ---- foo.d
+ * @gnuAbiTag("tag1", "tag2") struct Tagged1_2
+ * {
+ * // Notice the repetition
+ * @gnuAbiTag("tag1", "tag2", "tag3") struct Tagged3
+ * {
+ * @gnuAbiTag("tag1", "tag2", "tag3", "tag4") int Tagged4 ();
+ * }
+ * }
+ * extern __gshared Tagged1_2 inst1;
+ * ---
+ */
+version (UdaGNUAbiTag) struct gnuAbiTag
+{
+ string[] tags;
+
+ this(string[] tags...)
+ {
+ this.tags = tags;
+ }
+}
diff --git a/libphobos/libdruntime/core/bitop.d b/libphobos/libdruntime/core/bitop.d
index 25b5cd5..40f2242 100644
--- a/libphobos/libdruntime/core/bitop.d
+++ b/libphobos/libdruntime/core/bitop.d
@@ -758,11 +758,13 @@ version (DigitalMars) version (AnyX86)
}
+// @@@DEPRECATED_2.099@@@
deprecated("volatileLoad has been moved to core.volatile. Use core.volatile.volatileLoad instead.")
{
public import core.volatile : volatileLoad;
}
+// @@@DEPRECATED_2.099@@@
deprecated("volatileStore has been moved to core.volatile. Use core.volatile.volatileStore instead.")
{
public import core.volatile : volatileStore;
@@ -951,6 +953,9 @@ pure T rol(T)(const T value, const uint count)
if (__traits(isIntegral, T) && __traits(isUnsigned, T))
{
assert(count < 8 * T.sizeof);
+ if (count == 0)
+ return cast(T) value;
+
return cast(T) ((value << count) | (value >> (T.sizeof * 8 - count)));
}
/// ditto
@@ -958,6 +963,9 @@ pure T ror(T)(const T value, const uint count)
if (__traits(isIntegral, T) && __traits(isUnsigned, T))
{
assert(count < 8 * T.sizeof);
+ if (count == 0)
+ return cast(T) value;
+
return cast(T) ((value >> count) | (value << (T.sizeof * 8 - count)));
}
/// ditto
@@ -965,6 +973,9 @@ pure T rol(uint count, T)(const T value)
if (__traits(isIntegral, T) && __traits(isUnsigned, T))
{
static assert(count < 8 * T.sizeof);
+ static if (count == 0)
+ return cast(T) value;
+
return cast(T) ((value << count) | (value >> (T.sizeof * 8 - count)));
}
/// ditto
@@ -972,6 +983,9 @@ pure T ror(uint count, T)(const T value)
if (__traits(isIntegral, T) && __traits(isUnsigned, T))
{
static assert(count < 8 * T.sizeof);
+ static if (count == 0)
+ return cast(T) value;
+
return cast(T) ((value >> count) | (value << (T.sizeof * 8 - count)));
}
@@ -994,4 +1008,9 @@ unittest
assert(rol!3(a) == 0b10000111);
assert(ror!3(a) == 0b00011110);
+
+ enum c = rol(uint(1), 0);
+ enum d = ror(uint(1), 0);
+ assert(c == uint(1));
+ assert(d == uint(1));
}
diff --git a/libphobos/libdruntime/core/builtins.d b/libphobos/libdruntime/core/builtins.d
new file mode 100644
index 0000000..f2ca503
--- /dev/null
+++ b/libphobos/libdruntime/core/builtins.d
@@ -0,0 +1,19 @@
+/**********************************************
+ * This module implements common builtins for the D frontend.
+ *
+ * Copyright: Copyright © 2019, The D Language Foundation
+ * License: $(LINK2 http://www.boost.org/LICENSE_1_0.txt, Boost License 1.0)
+ * Authors: Walter Bright
+ * Source: $(DRUNTIMESRC core/builtins.d)
+ */
+
+module core.builtins;
+
+version (GNU)
+ public import gcc.builtins;
+
+version (LDC)
+ public import ldc.intrinsics;
+
+/// Writes `s` to `stderr` during CTFE (does nothing at runtime).
+void __ctfeWrite(scope const(char)[] s) @nogc @safe pure nothrow {}
diff --git a/libphobos/libdruntime/core/checkedint.d b/libphobos/libdruntime/core/checkedint.d
index 57209ad..49a5c11 100644
--- a/libphobos/libdruntime/core/checkedint.d
+++ b/libphobos/libdruntime/core/checkedint.d
@@ -19,6 +19,9 @@
* relative to the cost of the operation itself, compiler implementations are free
* to recognize them and generate equivalent and faster code.
*
+ * The functions here are templates so they can be used with -betterC,
+ * as betterC does not link with this library.
+ *
* References: $(LINK2 http://blog.regehr.org/archives/1139, Fast Integer Overflow Checks)
* Copyright: Copyright (c) Walter Bright 2014.
* License: $(LINK2 http://www.boost.org/LICENSE_1_0.txt, Boost License 1.0)
@@ -28,6 +31,8 @@
module core.checkedint;
+import core.internal.attributes : betterC;
+
nothrow:
@safe:
@nogc:
@@ -55,20 +60,27 @@ int adds()(int x, int y, ref bool overflow)
return cast(int)r;
}
+///
+@betterC
unittest
{
bool overflow;
assert(adds(2, 3, overflow) == 5);
assert(!overflow);
+
assert(adds(1, int.max - 1, overflow) == int.max);
assert(!overflow);
+
assert(adds(int.min + 1, -1, overflow) == int.min);
assert(!overflow);
+
assert(adds(int.max, 1, overflow) == int.min);
assert(overflow);
+
overflow = false;
assert(adds(int.min, -1, overflow) == int.max);
assert(overflow);
+
assert(adds(0, 0, overflow) == 0);
assert(overflow); // sticky
}
@@ -84,20 +96,27 @@ long adds()(long x, long y, ref bool overflow)
return r;
}
+///
+@betterC
unittest
{
bool overflow;
assert(adds(2L, 3L, overflow) == 5);
assert(!overflow);
+
assert(adds(1L, long.max - 1, overflow) == long.max);
assert(!overflow);
+
assert(adds(long.min + 1, -1, overflow) == long.min);
assert(!overflow);
+
assert(adds(long.max, 1, overflow) == long.min);
assert(overflow);
+
overflow = false;
assert(adds(long.min, -1, overflow) == long.max);
assert(overflow);
+
assert(adds(0L, 0L, overflow) == 0);
assert(overflow); // sticky
}
@@ -152,25 +171,42 @@ pragma(inline, true)
uint addu()(uint x, uint y, ref bool overflow)
{
immutable uint r = x + y;
- if (r < x || r < y)
+ immutable bool o = r < x;
+ assert(o == (r < y));
+ if (o)
overflow = true;
return r;
}
+///
+@betterC
unittest
{
+ for (uint i = 0; i < 10; ++i)
+ {
+ bool overflow;
+ immutable uint r = addu (uint.max - i, uint.max - i, overflow);
+ assert (r == 2 * (uint.max - i));
+ assert (overflow);
+ }
+
bool overflow;
assert(addu(2, 3, overflow) == 5);
assert(!overflow);
+
assert(addu(1, uint.max - 1, overflow) == uint.max);
assert(!overflow);
+
assert(addu(uint.min, -1, overflow) == uint.max);
assert(!overflow);
+
assert(addu(uint.max, 1, overflow) == uint.min);
assert(overflow);
+
overflow = false;
assert(addu(uint.min + 1, -1, overflow) == uint.min);
assert(overflow);
+
assert(addu(0, 0, overflow) == 0);
assert(overflow); // sticky
}
@@ -180,25 +216,34 @@ pragma(inline, true)
ulong addu()(ulong x, ulong y, ref bool overflow)
{
immutable ulong r = x + y;
- if (r < x || r < y)
+ immutable bool o = r < x;
+ assert(o == (r < y));
+ if (o)
overflow = true;
return r;
}
+///
+@betterC
unittest
{
bool overflow;
assert(addu(2L, 3L, overflow) == 5);
assert(!overflow);
+
assert(addu(1, ulong.max - 1, overflow) == ulong.max);
assert(!overflow);
+
assert(addu(ulong.min, -1L, overflow) == ulong.max);
assert(!overflow);
+
assert(addu(ulong.max, 1, overflow) == ulong.min);
assert(overflow);
+
overflow = false;
assert(addu(ulong.min + 1, -1L, overflow) == ulong.min);
assert(overflow);
+
assert(addu(0L, 0L, overflow) == 0);
assert(overflow); // sticky
}
@@ -210,7 +255,9 @@ pragma(inline, true)
ucent addu()(ucent x, ucent y, ref bool overflow)
{
immutable ucent r = x + y;
- if (r < x || r < y)
+ immutable bool o = r < x;
+ assert(o == (r < y));
+ if (o)
overflow = true;
return r;
}
@@ -257,20 +304,27 @@ int subs()(int x, int y, ref bool overflow)
return cast(int)r;
}
+///
+@betterC
unittest
{
bool overflow;
assert(subs(2, -3, overflow) == 5);
assert(!overflow);
+
assert(subs(1, -int.max + 1, overflow) == int.max);
assert(!overflow);
+
assert(subs(int.min + 1, 1, overflow) == int.min);
assert(!overflow);
+
assert(subs(int.max, -1, overflow) == int.min);
assert(overflow);
+
overflow = false;
assert(subs(int.min, 1, overflow) == int.max);
assert(overflow);
+
assert(subs(0, 0, overflow) == 0);
assert(overflow); // sticky
}
@@ -286,22 +340,30 @@ long subs()(long x, long y, ref bool overflow)
return r;
}
+///
+@betterC
unittest
{
bool overflow;
assert(subs(2L, -3L, overflow) == 5);
assert(!overflow);
+
assert(subs(1L, -long.max + 1, overflow) == long.max);
assert(!overflow);
+
assert(subs(long.min + 1, 1, overflow) == long.min);
assert(!overflow);
+
assert(subs(-1L, long.min, overflow) == long.max);
assert(!overflow);
+
assert(subs(long.max, -1, overflow) == long.min);
assert(overflow);
+
overflow = false;
assert(subs(long.min, 1, overflow) == long.max);
assert(overflow);
+
assert(subs(0L, 0L, overflow) == 0);
assert(overflow); // sticky
}
@@ -362,20 +424,27 @@ uint subu()(uint x, uint y, ref bool overflow)
return x - y;
}
+///
+@betterC
unittest
{
bool overflow;
assert(subu(3, 2, overflow) == 1);
assert(!overflow);
+
assert(subu(uint.max, 1, overflow) == uint.max - 1);
assert(!overflow);
+
assert(subu(1, 1, overflow) == uint.min);
assert(!overflow);
+
assert(subu(0, 1, overflow) == uint.max);
assert(overflow);
+
overflow = false;
assert(subu(uint.max - 1, uint.max, overflow) == uint.max);
assert(overflow);
+
assert(subu(0, 0, overflow) == 0);
assert(overflow); // sticky
}
@@ -390,20 +459,27 @@ ulong subu()(ulong x, ulong y, ref bool overflow)
return x - y;
}
+///
+@betterC
unittest
{
bool overflow;
assert(subu(3UL, 2UL, overflow) == 1);
assert(!overflow);
+
assert(subu(ulong.max, 1, overflow) == ulong.max - 1);
assert(!overflow);
+
assert(subu(1UL, 1UL, overflow) == ulong.min);
assert(!overflow);
+
assert(subu(0UL, 1UL, overflow) == ulong.max);
assert(overflow);
+
overflow = false;
assert(subu(ulong.max - 1, ulong.max, overflow) == ulong.max);
assert(overflow);
+
assert(subu(0UL, 0UL, overflow) == 0);
assert(overflow); // sticky
}
@@ -457,17 +533,23 @@ int negs()(int x, ref bool overflow)
return -x;
}
+///
+@betterC
unittest
{
bool overflow;
assert(negs(0, overflow) == -0);
assert(!overflow);
+
assert(negs(1234, overflow) == -1234);
assert(!overflow);
+
assert(negs(-5678, overflow) == 5678);
assert(!overflow);
+
assert(negs(int.min, overflow) == -int.min);
assert(overflow);
+
assert(negs(0, overflow) == -0);
assert(overflow); // sticky
}
@@ -481,17 +563,23 @@ long negs()(long x, ref bool overflow)
return -x;
}
+///
+@betterC
unittest
{
bool overflow;
assert(negs(0L, overflow) == -0);
assert(!overflow);
+
assert(negs(1234L, overflow) == -1234);
assert(!overflow);
+
assert(negs(-5678L, overflow) == 5678);
assert(!overflow);
+
assert(negs(long.min, overflow) == -long.min);
assert(overflow);
+
assert(negs(0L, overflow) == -0);
assert(overflow); // sticky
}
@@ -546,22 +634,30 @@ int muls()(int x, int y, ref bool overflow)
return cast(int)r;
}
+///
+@betterC
unittest
{
bool overflow;
assert(muls(2, 3, overflow) == 6);
assert(!overflow);
+
assert(muls(-200, 300, overflow) == -60_000);
assert(!overflow);
+
assert(muls(1, int.max, overflow) == int.max);
assert(!overflow);
+
assert(muls(int.min, 1, overflow) == int.min);
assert(!overflow);
+
assert(muls(int.max, 2, overflow) == (int.max * 2));
assert(overflow);
+
overflow = false;
assert(muls(int.min, -1, overflow) == int.min);
assert(overflow);
+
assert(muls(0, 0, overflow) == 0);
assert(overflow); // sticky
}
@@ -579,25 +675,34 @@ long muls()(long x, long y, ref bool overflow)
return r;
}
+///
+@betterC
unittest
{
bool overflow;
assert(muls(2L, 3L, overflow) == 6);
assert(!overflow);
+
assert(muls(-200L, 300L, overflow) == -60_000);
assert(!overflow);
+
assert(muls(1, long.max, overflow) == long.max);
assert(!overflow);
+
assert(muls(long.min, 1L, overflow) == long.min);
assert(!overflow);
+
assert(muls(long.max, 2L, overflow) == (long.max * 2));
assert(overflow);
overflow = false;
+
assert(muls(-1L, long.min, overflow) == long.min);
assert(overflow);
+
overflow = false;
assert(muls(long.min, -1L, overflow) == long.min);
assert(overflow);
+
assert(muls(0L, 0L, overflow) == 0);
assert(overflow); // sticky
}
@@ -652,7 +757,6 @@ unittest
* Returns:
* the product
*/
-
pragma(inline, true)
uint mulu()(uint x, uint y, ref bool overflow)
{
@@ -662,6 +766,7 @@ uint mulu()(uint x, uint y, ref bool overflow)
return cast(uint) r;
}
+@betterC
unittest
{
void test(uint x, uint y, uint r, bool overflow) @nogc nothrow
@@ -705,6 +810,7 @@ ulong mulu()(ulong x, ulong y, ref bool overflow)
return r;
}
+@betterC
unittest
{
void test(T, U)(T x, U y, ulong r, bool overflow) @nogc nothrow
diff --git a/libphobos/libdruntime/core/demangle.d b/libphobos/libdruntime/core/demangle.d
index 4458b70..ad9b44a 100644
--- a/libphobos/libdruntime/core/demangle.d
+++ b/libphobos/libdruntime/core/demangle.d
@@ -54,13 +54,13 @@ pure @safe:
enum AddType { no, yes }
- this( const(char)[] buf_, char[] dst_ = null )
+ this( return const(char)[] buf_, return char[] dst_ = null )
{
this( buf_, AddType.yes, dst_ );
}
- this( const(char)[] buf_, AddType addType_, char[] dst_ = null )
+ this( return const(char)[] buf_, AddType addType_, return char[] dst_ = null )
{
buf = buf_;
addType = addType_;
@@ -208,15 +208,15 @@ pure @safe:
{
assert( contains( dst[0 .. len], val ) );
debug(info) printf( "removing (%.*s)\n", cast(int) val.length, val.ptr );
-
size_t v = &val[0] - &dst[0];
+ assert( len >= val.length && len <= dst.length );
+ len -= val.length;
for (size_t p = v; p < len; p++)
dst[p] = dst[p + val.length];
- len -= val.length;
}
}
- char[] append( const(char)[] val )
+ char[] append( const(char)[] val ) return scope
{
pragma(inline, false); // tame dmd inliner
@@ -227,8 +227,7 @@ pure @safe:
assert( !contains( dst[0 .. len], val ) );
debug(info) printf( "appending (%.*s)\n", cast(int) val.length, val.ptr );
- if ( &dst[len] == &val[0] &&
- dst.length - len >= val.length )
+ if ( dst.length - len >= val.length && &dst[len] == &val[0] )
{
// data is already in place
auto t = dst[len .. len + val.length];
@@ -254,13 +253,13 @@ pure @safe:
put(", ");
}
- char[] put(char c)
+ char[] put(char c) return scope
{
char[1] val = c;
return put(val[]);
}
- char[] put( const(char)[] val )
+ char[] put( scope const(char)[] val ) return scope
{
pragma(inline, false); // tame dmd inliner
@@ -278,9 +277,9 @@ pure @safe:
{
import core.internal.string;
- UnsignedStringBuf buf;
+ UnsignedStringBuf buf = void;
- auto s = unsignedToTempString(val, buf, 16);
+ auto s = unsignedToTempString!16(val, buf);
int slen = cast(int)s.length;
if (slen < width)
{
@@ -301,7 +300,7 @@ pure @safe:
}
- void silent( lazy void dg )
+ void silent( void delegate() pure @safe dg )
{
debug(trace) printf( "silent+\n" );
debug(trace) scope(success) printf( "silent-\n" );
@@ -431,7 +430,7 @@ pure @safe:
Digit
Digit Number
*/
- const(char)[] sliceNumber()
+ const(char)[] sliceNumber() return scope
{
debug(trace) printf( "sliceNumber+\n" );
debug(trace) scope(success) printf( "sliceNumber-\n" );
@@ -449,7 +448,7 @@ pure @safe:
}
- size_t decodeNumber()
+ size_t decodeNumber() scope
{
debug(trace) printf( "decodeNumber+\n" );
debug(trace) scope(success) printf( "decodeNumber-\n" );
@@ -458,7 +457,7 @@ pure @safe:
}
- size_t decodeNumber( const(char)[] num )
+ size_t decodeNumber( scope const(char)[] num ) scope
{
debug(trace) printf( "decodeNumber+\n" );
debug(trace) scope(success) printf( "decodeNumber-\n" );
@@ -479,7 +478,7 @@ pure @safe:
}
- void parseReal()
+ void parseReal() scope
{
debug(trace) printf( "parseReal+\n" );
debug(trace) scope(success) printf( "parseReal-\n" );
@@ -570,7 +569,7 @@ pure @safe:
Namechar
Namechar Namechars
*/
- void parseLName()
+ void parseLName() scope
{
debug(trace) printf( "parseLName+\n" );
debug(trace) scope(success) printf( "parseLName-\n" );
@@ -788,7 +787,7 @@ pure @safe:
TypeTuple:
B Number Arguments
*/
- char[] parseType( char[] name = null )
+ char[] parseType( char[] name = null ) return scope
{
static immutable string[23] primitives = [
"char", // a
@@ -924,7 +923,6 @@ pure @safe:
return dst[beg .. len];
case 'F': case 'U': case 'W': case 'V': case 'R': // TypeFunction
return parseTypeFunction( name );
- case 'I': // TypeIdent (I LName)
case 'C': // TypeClass (C LName)
case 'S': // TypeStruct (S LName)
case 'E': // TypeEnum (E LName)
@@ -1186,13 +1184,17 @@ pure @safe:
popFront();
put( "scope " );
continue;
+ case 'm': // FuncAttrLive
+ popFront();
+ put( "@live " );
+ continue;
default:
error();
}
}
}
- void parseFuncArguments()
+ void parseFuncArguments() scope
{
// Arguments
for ( size_t n = 0; true; n++ )
@@ -1233,9 +1235,11 @@ pure @safe:
}
switch ( front )
{
- case 'J': // out (J Type)
+ case 'I': // in (I Type)
popFront();
- put( "out " );
+ put("in ");
+ if (front == 'K')
+ goto case;
parseType();
continue;
case 'K': // ref (K Type)
@@ -1243,6 +1247,11 @@ pure @safe:
put( "ref " );
parseType();
continue;
+ case 'J': // out (J Type)
+ popFront();
+ put( "out " );
+ parseType();
+ continue;
case 'L': // lazy (L Type)
popFront();
put( "lazy " );
@@ -1260,7 +1269,7 @@ pure @safe:
TypeFunction:
CallConvention FuncAttrs Arguments ArgClose Type
*/
- char[] parseTypeFunction( char[] name = null, IsDelegate isdg = IsDelegate.no )
+ char[] parseTypeFunction( char[] name = null, IsDelegate isdg = IsDelegate.no ) return
{
debug(trace) printf( "parseTypeFunction+\n" );
debug(trace) scope(success) printf( "parseTypeFunction-\n" );
@@ -1349,7 +1358,7 @@ pure @safe:
E
F
*/
- void parseValue( char[] name = null, char type = '\0' )
+ void parseValue(scope char[] name = null, char type = '\0' ) scope
{
debug(trace) printf( "parseValue+\n" );
debug(trace) scope(success) printf( "parseValue-\n" );
@@ -1464,13 +1473,19 @@ pure @safe:
}
put( ')' );
return;
+ case 'f':
+ // f MangledName
+ // A function literal symbol
+ popFront();
+ parseMangledName(false, 1);
+ return;
default:
error();
}
}
- void parseIntegerValue( char[] name = null, char type = '\0' )
+ void parseIntegerValue( scope char[] name = null, char type = '\0' ) scope
{
debug(trace) printf( "parseIntegerValue+\n" );
debug(trace) scope(success) printf( "parseIntegerValue-\n" );
@@ -1580,7 +1595,7 @@ pure @safe:
S Number_opt QualifiedName
X ExternallyMangledName
*/
- void parseTemplateArgs()
+ void parseTemplateArgs() scope
{
debug(trace) printf( "parseTemplateArgs+\n" );
debug(trace) scope(success) printf( "parseTemplateArgs-\n" );
@@ -1608,7 +1623,7 @@ pure @safe:
char t = front; // peek at type for parseValue
if ( t == 'Q' )
t = peekBackref();
- char[] name; silent( name = parseType() );
+ char[] name; silent( delegate void() { name = parseType(); } );
parseValue( name, t );
continue;
case 'S':
@@ -1714,7 +1729,7 @@ pure @safe:
TemplateInstanceName:
Number __T LName TemplateArgs Z
*/
- void parseTemplateInstanceName(bool hasNumber)
+ void parseTemplateInstanceName(bool hasNumber) scope
{
debug(trace) printf( "parseTemplateInstanceName+\n" );
debug(trace) scope(success) printf( "parseTemplateInstanceName-\n" );
@@ -1739,7 +1754,7 @@ pure @safe:
}
- bool mayBeTemplateInstanceName()
+ bool mayBeTemplateInstanceName() scope
{
debug(trace) printf( "mayBeTemplateInstanceName+\n" );
debug(trace) scope(success) printf( "mayBeTemplateInstanceName-\n" );
@@ -1759,7 +1774,7 @@ pure @safe:
LName
TemplateInstanceName
*/
- void parseSymbolName()
+ void parseSymbolName() scope
{
debug(trace) printf( "parseSymbolName+\n" );
debug(trace) scope(success) printf( "parseSymbolName-\n" );
@@ -1801,7 +1816,7 @@ pure @safe:
// parse optional function arguments as part of a symbol name, i.e without return type
// if keepAttr, the calling convention and function attributes are not discarded, but returned
- char[] parseFunctionTypeNoReturn( bool keepAttr = false )
+ char[] parseFunctionTypeNoReturn( bool keepAttr = false ) return scope
{
// try to demangle a function, in case we are pointing to some function local
auto prevpos = pos;
@@ -1852,7 +1867,7 @@ pure @safe:
SymbolName
SymbolName QualifiedName
*/
- char[] parseQualifiedName()
+ char[] parseQualifiedName() return scope
{
debug(trace) printf( "parseQualifiedName+\n" );
debug(trace) scope(success) printf( "parseQualifiedName-\n" );
@@ -1876,7 +1891,7 @@ pure @safe:
_D QualifiedName Type
_D QualifiedName M Type
*/
- void parseMangledName( bool displayType, size_t n = 0 )
+ void parseMangledName( bool displayType, size_t n = 0 ) scope
{
debug(trace) printf( "parseMangledName+\n" );
debug(trace) scope(success) printf( "parseMangledName-\n" );
@@ -1951,7 +1966,16 @@ pure @safe:
parseMangledName( AddType.yes == addType );
}
- char[] doDemangle(alias FUNC)()
+ char[] copyInput() return scope
+ {
+ if (dst.length < buf.length)
+ dst.length = buf.length;
+ char[] r = dst[0 .. buf.length];
+ r[] = buf[];
+ return r;
+ }
+
+ char[] doDemangle(alias FUNC)() return scope
{
while ( true )
{
@@ -1979,10 +2003,7 @@ pure @safe:
auto msg = e.toString();
printf( "error: %.*s\n", cast(int) msg.length, msg.ptr );
}
- if ( dst.length < buf.length )
- dst.length = buf.length;
- dst[0 .. buf.length] = buf[];
- return dst[0 .. buf.length];
+ return copyInput();
}
catch ( Exception e )
{
@@ -2015,10 +2036,13 @@ pure @safe:
* The demangled name or the original string if the name is not a mangled D
* name.
*/
-char[] demangle( const(char)[] buf, char[] dst = null ) nothrow pure @safe
+char[] demangle(return scope const(char)[] buf, return scope char[] dst = null ) nothrow pure @safe
{
- //return Demangle(buf, dst)();
auto d = Demangle!()(buf, dst);
+ // fast path (avoiding throwing & catching exception) for obvious
+ // non-D mangled names
+ if (buf.length < 2 || !(buf[0] == 'D' || buf[0..2] == "_D"))
+ return d.copyInput();
return d.demangleName();
}
@@ -2051,7 +2075,7 @@ char[] demangleType( const(char)[] buf, char[] dst = null ) nothrow pure @safe
* Returns:
* The mangled name with deduplicated identifiers
*/
-char[] reencodeMangled(const(char)[] mangled) nothrow pure @safe
+char[] reencodeMangled(return scope const(char)[] mangled) nothrow pure @safe
{
static struct PrependHooks
{
@@ -2067,7 +2091,7 @@ char[] reencodeMangled(const(char)[] mangled) nothrow pure @safe
Replacement [] replacements;
pure @safe:
- size_t positionInResult(size_t pos)
+ size_t positionInResult(size_t pos) scope
{
foreach_reverse (r; replacements)
if (pos >= r.pos)
@@ -2077,7 +2101,7 @@ char[] reencodeMangled(const(char)[] mangled) nothrow pure @safe
alias Remangle = Demangle!(PrependHooks);
- void flushPosition(ref Remangle d)
+ void flushPosition(ref Remangle d) scope
{
if (lastpos < d.pos)
{
@@ -2096,7 +2120,7 @@ char[] reencodeMangled(const(char)[] mangled) nothrow pure @safe
}
}
- bool parseLName(ref Remangle d)
+ bool parseLName(scope ref Remangle d) scope
{
flushPosition(d);
@@ -2127,7 +2151,8 @@ char[] reencodeMangled(const(char)[] mangled) nothrow pure @safe
npos = positionInResult(*pid);
}
encodeBackref(reslen - npos);
- replacements ~= Replacement(d.pos, result.length);
+ const pos = d.pos; // work around issues.dlang.org/show_bug.cgi?id=20675
+ replacements ~= Replacement(pos, result.length);
}
else
{
@@ -2141,7 +2166,8 @@ char[] reencodeMangled(const(char)[] mangled) nothrow pure @safe
size_t npos = positionInResult(*pid);
result.length = reslen;
encodeBackref(reslen - npos);
- replacements ~= Replacement(d.pos, result.length);
+ const pos = d.pos; // work around issues.dlang.org/show_bug.cgi?id=20675
+ replacements ~= Replacement(pos, result.length);
}
else
{
@@ -2153,7 +2179,7 @@ char[] reencodeMangled(const(char)[] mangled) nothrow pure @safe
return true;
}
- char[] parseType( ref Remangle d, char[] name = null )
+ char[] parseType( ref Remangle d, char[] name = null ) return scope
{
if (d.front != 'Q')
return null;
@@ -2174,7 +2200,7 @@ char[] reencodeMangled(const(char)[] mangled) nothrow pure @safe
return result[reslen .. $]; // anything but null
}
- void encodeBackref(size_t relpos)
+ void encodeBackref(size_t relpos) scope
{
result ~= 'Q';
enum base = 26;
@@ -2221,7 +2247,7 @@ char[] reencodeMangled(const(char)[] mangled) nothrow pure @safe
* The mangled name for a symbols of type T and the given fully
* qualified name.
*/
-char[] mangle(T)(const(char)[] fqn, char[] dst = null) @safe pure nothrow
+char[] mangle(T)(return scope const(char)[] fqn, return scope char[] dst = null) @safe pure nothrow
{
import core.internal.string : numDigits, unsignedToTempString;
@@ -2232,19 +2258,19 @@ char[] mangle(T)(const(char)[] fqn, char[] dst = null) @safe pure nothrow
@property bool empty() const { return !s.length; }
- @property const(char)[] front() const
+ @property const(char)[] front() const return
{
immutable i = indexOfDot();
return i == -1 ? s[0 .. $] : s[0 .. i];
}
- void popFront()
+ void popFront() scope
{
immutable i = indexOfDot();
s = i == -1 ? s[$ .. $] : s[i+1 .. $];
}
- private ptrdiff_t indexOfDot() const
+ private ptrdiff_t indexOfDot() const scope
{
foreach (i, c; s) if (c == '.') return i;
return -1;
@@ -2311,7 +2337,7 @@ char[] mangle(T)(const(char)[] fqn, char[] dst = null) @safe pure nothrow
* The mangled name for a function with function pointer type T and
* the given fully qualified name.
*/
-char[] mangleFunc(T:FT*, FT)(const(char)[] fqn, char[] dst = null) @safe pure nothrow if (is(FT == function))
+char[] mangleFunc(T:FT*, FT)(return scope const(char)[] fqn, return scope char[] dst = null) @safe pure nothrow if (is(FT == function))
{
static if (isExternD!FT)
{
@@ -2335,7 +2361,6 @@ char[] mangleFunc(T:FT*, FT)(const(char)[] fqn, char[] dst = null) @safe pure no
private enum hasTypeBackRef = (int function(void**,void**)).mangleof[$-4 .. $] == "QdZi";
-///
@safe pure nothrow unittest
{
assert(mangleFunc!(int function(int))("a.b") == "_D1a1bFiZi");
@@ -2412,13 +2437,15 @@ else version (Darwin)
else
enum string cPrefix = "";
-version (unittest)
+@safe pure nothrow unittest
{
immutable string[2][] table =
[
["printf", "printf"],
["_foo", "_foo"],
["_D88", "_D88"],
+ ["_D3fooQeFIAyaZv", "void foo.foo(in immutable(char)[])" ],
+ ["_D3barQeFIKAyaZv", "void bar.bar(in ref immutable(char)[])" ],
["_D4test3fooAa", "char[] test.foo"],
["_D8demangle8demangleFAaZAa", "char[] demangle.demangle(char[])"],
["_D6object6Object8opEqualsFC6ObjectZi", "int object.Object.opEquals(Object)"],
@@ -2470,7 +2497,7 @@ version (unittest)
["_D3foo7__arrayZ", "foo.__array"],
["_D8link657428__T3fooVE8link65746Methodi0Z3fooFZi", "int link6574.foo!(0).foo()"],
["_D8link657429__T3fooHVE8link65746Methodi0Z3fooFZi", "int link6574.foo!(0).foo()"],
- ["_D4test22__T4funcVAyaa3_610a62Z4funcFNaNbNiNfZAya", `pure nothrow @nogc @safe immutable(char)[] test.func!("a\x0ab").func()`],
+ ["_D4test22__T4funcVAyaa3_610a62Z4funcFNaNbNiNmNfZAya", `pure nothrow @nogc @live @safe immutable(char)[] test.func!("a\x0ab").func()`],
["_D3foo3barFzkZzi", "cent foo.bar(ucent)"],
["_D5bug145Class3fooMFNlZPv", "scope void* bug14.Class.foo()"],
["_D5bug145Class3barMFNjZPv", "return void* bug14.Class.bar()"],
@@ -2482,6 +2509,8 @@ version (unittest)
"pure @safe int std.format.getNth!(\"integer width\", std.traits.isIntegral, int, uint, uint).getNth(uint, uint, uint)"],
["_D3std11parallelism42__T16RoundRobinBufferTDFKAaZvTDxFNaNdNeZbZ16RoundRobinBuffer5primeMFZv",
"void std.parallelism.RoundRobinBuffer!(void delegate(ref char[]), bool delegate() pure @property @trusted const).RoundRobinBuffer.prime()"],
+ ["_D6mangle__T8fun21753VSQv6S21753S1f_DQBj10__lambda71MFNaNbNiNfZvZQCbQp",
+ "void function() pure nothrow @nogc @safe mangle.fun21753!(mangle.S21753(mangle.__lambda71())).fun21753"],
// Lname '0'
["_D3std9algorithm9iteration__T9MapResultSQBmQBlQBe005stripTAAyaZQBi7opSliceMFNaNbNiNfmmZSQDiQDhQDa__TQCtSQDyQDxQDq00QCmTQCjZQDq",
"pure nothrow @nogc @safe std.algorithm.iteration.MapResult!(std.algorithm.iteration.__anonymous.strip, "
@@ -2541,23 +2570,56 @@ version (unittest)
else
alias staticIota = Seq!(staticIota!(x - 1), x - 1);
}
-}
-@safe pure nothrow unittest
-{
foreach ( i, name; table )
{
auto r = demangle( name[0] );
assert( r == name[1],
- "demangled \"" ~ name[0] ~ "\" as \"" ~ r ~ "\" but expected \"" ~ name[1] ~ "\"");
+ "demangled `" ~ name[0] ~ "` as `" ~ r ~ "` but expected `" ~ name[1] ~ "`");
}
foreach ( i; staticIota!(table.length) )
{
enum r = demangle( table[i][0] );
static assert( r == table[i][1],
- "demangled \"" ~ table[i][0] ~ "\" as \"" ~ r ~ "\" but expected \"" ~ table[i][1] ~ "\"");
+ "demangled `" ~ table[i][0] ~ "` as `" ~ r ~ "` but expected `" ~ table[i][1] ~ "`");
+ }
+
+ {
+ // https://issues.dlang.org/show_bug.cgi?id=18531
+ auto symbol = `_D3std3uni__T6toCaseS_DQvQt12toLowerIndexFNaNbNiNewZtVii1043S_DQCjQCi10toLowerTabFNaNbNiNemZwSQDo5ascii7toLowerTAyaZQDzFNaNeQmZ14__foreachbody2MFNaNeKmKwZ14__foreachbody3MFNaNeKwZi`;
+ auto demangled = `pure @trusted int std.uni.toCase!(std.uni.toLowerIndex(dchar), 1043, std.uni.toLowerTab(ulong), std.ascii.toLower, immutable(char)[]).toCase(immutable(char)[]).__foreachbody2(ref ulong, ref dchar).__foreachbody3(ref dchar)`;
+ auto dst = new char[200];
+ auto ret = demangle( symbol, dst);
+ assert( ret == demangled );
}
}
+unittest
+{
+ // https://issues.dlang.org/show_bug.cgi?id=18300
+ string s = demangle.mangleof;
+ foreach (i; 1..77)
+ {
+ char[] buf = new char[i];
+ auto ds = demangle(s, buf);
+ assert(ds == "pure nothrow @safe char[] core.demangle.demangle(scope return const(char)[], scope return char[])" ||
+ ds == "pure nothrow @safe char[] core.demangle.demangle(return scope const(char)[], return scope char[])");
+ }
+}
+
+unittest
+{
+ // https://issues.dlang.org/show_bug.cgi?id=18300
+ string s = "_D1";
+ string expected = "int ";
+ foreach (_; 0..10_000)
+ {
+ s ~= "a1";
+ expected ~= "a.";
+ }
+ s ~= "FiZi";
+ expected ~= "F";
+ assert(s.demangle == expected);
+}
/*
*
diff --git a/libphobos/libdruntime/core/exception.d b/libphobos/libdruntime/core/exception.d
index f21c43e..fe298d4 100644
--- a/libphobos/libdruntime/core/exception.d
+++ b/libphobos/libdruntime/core/exception.d
@@ -1,29 +1,61 @@
/**
- * The exception module defines all system-level exceptions and provides a
- * mechanism to alter system-level error handling.
- *
- * Copyright: Copyright Sean Kelly 2005 - 2013.
- * License: Distributed under the
- * $(LINK2 http://www.boost.org/LICENSE_1_0.txt, Boost Software License 1.0).
- * (See accompanying file LICENSE)
- * Authors: Sean Kelly and Jonathan M Davis
- * Source: $(DRUNTIMESRC core/_exception.d)
- */
+ The exception module defines all system-level exceptions and provides a
+ mechanism to alter system-level error handling.
-/* NOTE: This file has been patched from the original DMD distribution to
- * work with the GDC compiler.
+ Copyright: Copyright Sean Kelly 2005 - 2013.
+ License: $(HTTP www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
+ Authors: Sean Kelly and $(HTTP jmdavisprog.com, Jonathan M Davis)
+ Source: $(DRUNTIMESRC core/_exception.d)
*/
module core.exception;
+// Compiler lowers final switch default case to this (which is a runtime error)
+void __switch_errorT()(string file = __FILE__, size_t line = __LINE__) @trusted
+{
+ // Consider making this a compile time check.
+ version (D_Exceptions)
+ throw staticError!SwitchError(file, line, null);
+ else
+ assert(0, "No appropriate switch clause found");
+}
+
+version (D_BetterC)
+{
+ // When compiling with -betterC we use template functions so if they are
+ // used the bodies are copied into the user's program so there is no need
+ // for the D runtime during linking.
+
+ // In the future we might want to convert all functions in this module to
+ // templates even for ordinary builds instead of providing them as an
+ // extern(C) library.
+
+ void onOutOfMemoryError()(void* pretend_sideffect = null) @nogc nothrow pure @trusted
+ {
+ assert(0, "Memory allocation failed");
+ }
+ alias onOutOfMemoryErrorNoGC = onOutOfMemoryError;
+
+ void onInvalidMemoryOperationError()(void* pretend_sideffect = null) @nogc nothrow pure @trusted
+ {
+ assert(0, "Invalid memory operation");
+ }
+}
+else:
+
/**
* Thrown on a range error.
*/
class RangeError : Error
{
- @safe pure nothrow this( string file = __FILE__, size_t line = __LINE__, Throwable next = null )
+ this( string file = __FILE__, size_t line = __LINE__, Throwable next = null ) @nogc nothrow pure @safe
{
super( "Range violation", file, line, next );
}
+
+ protected this( string msg, string file, size_t line, Throwable next = null ) @nogc nothrow pure @safe
+ {
+ super( msg, file, line, next );
+ }
}
unittest
@@ -45,6 +77,142 @@ unittest
}
}
+/**
+ * Thrown when an out of bounds array index is accessed.
+ */
+class ArrayIndexError : RangeError
+{
+ /// Index into array
+ const size_t index;
+ /// Length of indexed array
+ const size_t length;
+
+ // Buffer to avoid GC allocations
+ private immutable char[100] msgBuf = '\0';
+
+ this(size_t index, size_t length, string file = __FILE__,
+ size_t line = __LINE__, Throwable next = null) @nogc nothrow pure @safe
+ {
+ this.index = index;
+ this.length = length;
+
+ // Constructing the message is a bit clumsy:
+ // It's essentially `printf("index [%zu] exceeds array of length [%zu]", index, length)`,
+ // but even `snprintf` isn't `pure`.
+ // Also string concatenation isn't `@nogc`, and casting to/from immutable isn't `@safe`
+ import core.internal.string : unsignedToTempString;
+ char[msgBuf.length] buf = void;
+ char[20] tmpBuf = void;
+ char[] sink = buf[];
+ sink.rangeMsgPut("index [");
+ sink.rangeMsgPut(unsignedToTempString!10(index, tmpBuf));
+ sink.rangeMsgPut("]");
+ sink.rangeMsgPut(" exceeds array of length ");
+ sink.rangeMsgPut(unsignedToTempString!10(length, tmpBuf));
+ this.msgBuf = buf;
+ super(msgBuf[0..$-sink.length], file, line, next);
+ }
+}
+
+@safe pure unittest
+{
+ assert(new ArrayIndexError(900, 700).msg == "index [900] exceeds array of length 700");
+ // Ensure msg buffer doesn't overflow on large numbers
+ assert(new ArrayIndexError(size_t.max, size_t.max-1).msg);
+}
+
+unittest
+{
+ try
+ {
+ _d_arraybounds_indexp("test", 400, 9, 3);
+ assert(0, "no ArrayIndexError thrown");
+ }
+ catch (ArrayIndexError re)
+ {
+ assert(re.file == "test");
+ assert(re.line == 400);
+ assert(re.index == 9);
+ assert(re.length == 3);
+ }
+}
+
+/**
+ * Thrown when an out of bounds array slice is created
+ */
+class ArraySliceError : RangeError
+{
+ /// Lower/upper bound passed to slice: `array[lower .. upper]`
+ const size_t lower, upper;
+ /// Length of sliced array
+ const size_t length;
+
+ private immutable char[120] msgBuf = '\0';
+
+ this(size_t lower, size_t upper, size_t length, string file = __FILE__,
+ size_t line = __LINE__, Throwable next = null) @nogc nothrow pure @safe
+ {
+ this.lower = lower;
+ this.upper = upper;
+ this.length = length;
+
+ // Constructing the message is a bit clumsy for the same reasons as ArrayIndexError
+ import core.internal.string : unsignedToTempString;
+ char[msgBuf.length] buf = void;
+ char[20] tmpBuf = void;
+ char[] sink = buf;
+ sink.rangeMsgPut("slice [");
+ sink.rangeMsgPut(unsignedToTempString!10(lower, tmpBuf));
+ sink.rangeMsgPut(" .. ");
+ sink.rangeMsgPut(unsignedToTempString!10(upper, tmpBuf));
+ sink.rangeMsgPut("] ");
+ if (lower > upper)
+ {
+ sink.rangeMsgPut("has a larger lower index than upper index");
+ }
+ else
+ {
+ sink.rangeMsgPut("extends past source array of length ");
+ sink.rangeMsgPut(unsignedToTempString!10(length, tmpBuf));
+ }
+
+ this.msgBuf = buf;
+ super(msgBuf[0..$-sink.length], file, line, next);
+ }
+}
+
+@safe pure unittest
+{
+ assert(new ArraySliceError(40, 80, 20).msg == "slice [40 .. 80] extends past source array of length 20");
+ assert(new ArraySliceError(90, 70, 20).msg == "slice [90 .. 70] has a larger lower index than upper index");
+ // Ensure msg buffer doesn't overflow on large numbers
+ assert(new ArraySliceError(size_t.max, size_t.max, size_t.max-1).msg);
+}
+
+unittest
+{
+ try
+ {
+ _d_arraybounds_slicep("test", 400, 1, 7, 3);
+ assert(0, "no ArraySliceError thrown");
+ }
+ catch (ArraySliceError re)
+ {
+ assert(re.file == "test");
+ assert(re.line == 400);
+ assert(re.lower == 1);
+ assert(re.upper == 7);
+ assert(re.length == 3);
+ }
+}
+
+/// Mini `std.range.primitives: put` for constructor of ArraySliceError / ArrayIndexError
+private void rangeMsgPut(ref char[] r, scope const(char)[] e) @nogc nothrow pure @safe
+{
+ assert(r.length >= e.length); // don't throw ArraySliceError inside ArrayIndexError ctor
+ r[0 .. e.length] = e[];
+ r = r[e.length .. $];
+}
/**
* Thrown on an assert error.
@@ -187,32 +355,6 @@ unittest
}
/**
- * Thrown on hidden function error.
- * $(RED Deprecated.
- * This feature is not longer part of the language.)
- */
-deprecated class HiddenFuncError : Error
-{
- @safe pure nothrow this( ClassInfo ci )
- {
- super( "Hidden method called for " ~ ci.name );
- }
-}
-
-deprecated unittest
-{
- ClassInfo info = new ClassInfo;
- info.name = "testInfo";
-
- {
- auto hfe = new HiddenFuncError(info);
- assert(hfe.next is null);
- assert(hfe.msg == "Hidden method called for testInfo");
- }
-}
-
-
-/**
* Thrown on an out of memory error.
*/
class OutOfMemoryError : Error
@@ -312,11 +454,23 @@ unittest
/**
+* Thrown on a configuration error.
+*/
+class ForkError : Error
+{
+ this( string file = __FILE__, size_t line = __LINE__, Throwable next = null ) @nogc nothrow pure @safe
+ {
+ super( "fork() failed", file, line, next );
+ }
+}
+
+
+/**
* Thrown on a switch error.
*/
class SwitchError : Error
{
- @safe pure nothrow this( string file = __FILE__, size_t line = __LINE__, Throwable next = null )
+ @safe pure nothrow @nogc this( string file = __FILE__, size_t line = __LINE__, Throwable next = null )
{
super( "No appropriate switch clause found", file, line, next );
}
@@ -349,7 +503,7 @@ class UnicodeException : Exception
{
size_t idx;
- this( string msg, size_t idx, string file = __FILE__, size_t line = __LINE__, Throwable next = null ) @safe pure nothrow
+ this( string msg, size_t idx, string file = __FILE__, size_t line = __LINE__, Throwable next = null ) @safe pure nothrow @nogc
{
super( msg, file, line, next );
this.idx = idx;
@@ -407,19 +561,6 @@ alias AssertHandler = void function(string file, size_t line, string msg) nothro
_assertHandler = handler;
}
-/**
- * Overrides the default assert hander with a user-supplied version.
- * $(RED Deprecated.
- * Please use $(LREF assertHandler) instead.)
- *
- * Params:
- * h = The new assert handler. Set to null to use the default handler.
- */
-deprecated void setAssertHandler( AssertHandler h ) @trusted nothrow @nogc
-{
- assertHandler = h;
-}
-
///////////////////////////////////////////////////////////////////////////////
// Overridable Callbacks
@@ -438,7 +579,7 @@ deprecated void setAssertHandler( AssertHandler h ) @trusted nothrow @nogc
extern (C) void onAssertError( string file = __FILE__, size_t line = __LINE__ ) nothrow
{
if ( _assertHandler is null )
- throw new AssertError( file, line );
+ throw staticError!AssertError(file, line);
_assertHandler( file, line, null);
}
@@ -456,7 +597,7 @@ extern (C) void onAssertError( string file = __FILE__, size_t line = __LINE__ )
extern (C) void onAssertErrorMsg( string file, size_t line, string msg ) nothrow
{
if ( _assertHandler is null )
- throw new AssertError( msg, file, line );
+ throw staticError!AssertError(msg, file, line);
_assertHandler( file, line, msg );
}
@@ -482,7 +623,7 @@ extern (C) void onUnittestErrorMsg( string file, size_t line, string msg ) nothr
///////////////////////////////////////////////////////////////////////////////
/**
- * A callback for array bounds errors in D. A $(LREF RangeError) will be thrown.
+ * A callback for general array bounds errors in D. A $(LREF RangeError) will be thrown.
*
* Params:
* file = The name of the file that signaled this error.
@@ -491,11 +632,47 @@ extern (C) void onUnittestErrorMsg( string file, size_t line, string msg ) nothr
* Throws:
* $(LREF RangeError).
*/
-extern (C) void onRangeError( string file = __FILE__, size_t line = __LINE__ ) @safe pure nothrow
+extern (C) void onRangeError( string file = __FILE__, size_t line = __LINE__ ) @trusted pure nothrow @nogc
+{
+ throw staticError!RangeError(file, line, null);
+}
+
+/**
+ * A callback for array slice out of bounds errors in D.
+ *
+ * Params:
+ * lower = the lower bound of the index passed of a slice
+ * upper = the upper bound of the index passed of a slice or the index if not a slice
+ * length = length of the array
+ * file = The name of the file that signaled this error.
+ * line = The line number on which this error occurred.
+ *
+ * Throws:
+ * $(LREF ArraySliceError).
+ */
+extern (C) void onArraySliceError( size_t lower = 0, size_t upper = 0, size_t length = 0,
+ string file = __FILE__, size_t line = __LINE__ ) @trusted pure nothrow @nogc
{
- throw new RangeError( file, line, null );
+ throw staticError!ArraySliceError(lower, upper, length, file, line, null);
}
+/**
+ * A callback for array index out of bounds errors in D.
+ *
+ * Params:
+ * index = index in the array
+ * length = length of the array
+ * file = The name of the file that signaled this error.
+ * line = The line number on which this error occurred.
+ *
+ * Throws:
+ * $(LREF ArrayIndexError).
+ */
+extern (C) void onArrayIndexError( size_t index = 0, size_t length = 0,
+ string file = __FILE__, size_t line = __LINE__ ) @trusted pure nothrow @nogc
+{
+ throw staticError!ArrayIndexError(index, length, file, line, null);
+}
/**
* A callback for finalize errors in D. A $(LREF FinalizeError) will be thrown.
@@ -516,22 +693,6 @@ extern (C) void onFinalizeError( TypeInfo info, Throwable e, string file = __FIL
throw staticError!FinalizeError(info, e, file, line);
}
-
-/**
- * A callback for hidden function errors in D. A $(LREF HiddenFuncError) will be
- * thrown.
- * $(RED Deprecated.
- * This feature is not longer part of the language.)
- *
- * Throws:
- * $(LREF HiddenFuncError).
- */
-deprecated extern (C) void onHiddenFuncError( Object o ) @safe pure nothrow
-{
- throw new HiddenFuncError( typeid(o) );
-}
-
-
/**
* A callback for out of memory errors in D. An $(LREF OutOfMemoryError) will be
* thrown.
@@ -569,21 +730,20 @@ extern (C) void onInvalidMemoryOperationError(void* pretend_sideffect = null) @t
/**
- * A callback for switch errors in D. A $(LREF SwitchError) will be thrown.
+ * A callback for errors in the case of a failed fork in D. A $(LREF ForkError) will be thrown.
*
* Params:
* file = The name of the file that signaled this error.
* line = The line number on which this error occurred.
*
* Throws:
- * $(LREF SwitchError).
+ * $(LREF ConfigurationError).
*/
-extern (C) void onSwitchError( string file = __FILE__, size_t line = __LINE__ ) @safe pure nothrow
+extern (C) void onForkError( string file = __FILE__, size_t line = __LINE__ ) @trusted pure nothrow @nogc
{
- throw new SwitchError( file, line, null );
+ throw staticError!ForkError( file, line, null );
}
-
/**
* A callback for unicode errors in D. A $(LREF UnicodeException) will be thrown.
*
@@ -611,7 +771,6 @@ extern (C) void onAssertErrorMsg(string file, size_t line, string msg);
extern (C) void onUnittestErrorMsg(string file, size_t line, string msg);
extern (C) void onRangeError(string file, size_t line);
extern (C) void onHiddenFuncError(Object o);
-extern (C) void onSwitchError(string file, size_t line);
+/
/***********************************
@@ -621,8 +780,6 @@ extern (C) void onSwitchError(string file, size_t line);
extern (C)
{
- // Use ModuleInfo to get file name for "m" versions
-
/* One of these three is called upon an assert() fail.
*/
void _d_assertp(immutable(char)* file, uint line)
@@ -659,34 +816,36 @@ extern (C)
_d_unittest_msg("unittest failure", file, line);
}
- /* Called when an array index is out of bounds
- */
+ /// Called when an invalid array index/slice or associative array key is accessed
void _d_arrayboundsp(immutable(char*) file, uint line)
{
import core.stdc.string : strlen;
onRangeError(file[0 .. strlen(file)], line);
}
+ /// ditto
void _d_arraybounds(string file, uint line)
{
onRangeError(file, line);
}
- /* Called when a switch statement has no DefaultStatement, yet none of the cases match
- */
- void _d_switch_errorm(immutable(ModuleInfo)* m, uint line)
+ /// Called when an out of range slice of an array is created
+ void _d_arraybounds_slicep(immutable(char*) file, uint line, size_t lower, size_t upper, size_t length)
{
- onSwitchError(m.name, line);
+ import core.stdc.string : strlen;
+ onArraySliceError(lower, upper, length, file[0 .. strlen(file)], line);
}
- void _d_switch_error(string file, uint line)
+ /// Called when an out of range array index is accessed
+ void _d_arraybounds_indexp(immutable(char*) file, uint line, size_t index, size_t length)
{
- onSwitchError(file, line);
+ import core.stdc.string : strlen;
+ onArrayIndexError(index, length, file[0 .. strlen(file)], line);
}
}
// TLS storage shared for all errors, chaining might create circular reference
-private void[128] _store;
+private align(2 * size_t.sizeof) void[256] _store;
// only Errors for now as those are rarely chained
private T staticError(T, Args...)(auto ref Args args)
@@ -698,11 +857,11 @@ private T staticError(T, Args...)(auto ref Args args)
static assert(__traits(classInstanceSize, T) <= _store.length,
T.stringof ~ " is too large for staticError()");
- _store[0 .. __traits(classInstanceSize, T)] = typeid(T).initializer[];
return cast(T) _store.ptr;
}
auto res = (cast(T function() @trusted pure nothrow @nogc) &get)();
- res.__ctor(args);
+ import core.lifetime : emplace;
+ emplace(res, args);
return res;
}
diff --git a/libphobos/libdruntime/core/gc/config.d b/libphobos/libdruntime/core/gc/config.d
new file mode 100644
index 0000000..258183f
--- /dev/null
+++ b/libphobos/libdruntime/core/gc/config.d
@@ -0,0 +1,129 @@
+/**
+* Contains the garbage collector configuration.
+*
+* Copyright: Copyright Digital Mars 2016
+* License: $(HTTP www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
+*/
+
+module core.gc.config;
+
+import core.stdc.stdio;
+import core.internal.parseoptions;
+
+__gshared Config config;
+
+struct Config
+{
+ bool disable; // start disabled
+ bool fork = false; // optional concurrent behaviour
+ ubyte profile; // enable profiling with summary when terminating program
+ string gc = "conservative"; // select gc implementation conservative|precise|manual
+
+ @MemVal size_t initReserve; // initial reserve (bytes)
+ @MemVal size_t minPoolSize = 1 << 20; // initial and minimum pool size (bytes)
+ @MemVal size_t maxPoolSize = 64 << 20; // maximum pool size (bytes)
+ @MemVal size_t incPoolSize = 3 << 20; // pool size increment (bytes)
+ uint parallel = 99; // number of additional threads for marking (limited by cpuid.threadsPerCPU-1)
+ float heapSizeFactor = 2.0; // heap size to used memory ratio
+ string cleanup = "collect"; // select gc cleanup method none|collect|finalize
+
+@nogc nothrow:
+
+ bool initialize()
+ {
+ return initConfigOptions(this, "gcopt");
+ }
+
+ void help() @nogc nothrow
+ {
+ import core.gc.registry : registeredGCFactories;
+
+ printf("GC options are specified as white space separated assignments:
+ disable:0|1 - start disabled (%d)
+ fork:0|1 - set fork behaviour (%d)
+ profile:0|1|2 - enable profiling with summary when terminating program (%d)
+ gc:".ptr, disable, fork, profile);
+ foreach (i, entry; registeredGCFactories)
+ {
+ if (i) printf("|");
+ printf("%.*s", cast(int) entry.name.length, entry.name.ptr);
+ }
+ auto _initReserve = initReserve.bytes2prettyStruct;
+ auto _minPoolSize = minPoolSize.bytes2prettyStruct;
+ auto _maxPoolSize = maxPoolSize.bytes2prettyStruct;
+ auto _incPoolSize = incPoolSize.bytes2prettyStruct;
+ printf(" - select gc implementation (default = conservative)
+
+ initReserve:N - initial memory to reserve in MB (%lld%c)
+ minPoolSize:N - initial and minimum pool size in MB (%lld%c)
+ maxPoolSize:N - maximum pool size in MB (%lld%c)
+ incPoolSize:N - pool size increment MB (%lld%c)
+ parallel:N - number of additional threads for marking (%lld)
+ heapSizeFactor:N - targeted heap size to used memory ratio (%g)
+ cleanup:none|collect|finalize - how to treat live objects when terminating (collect)
+
+ Memory-related values can use B, K, M or G suffixes.
+".ptr,
+ _initReserve.v, _initReserve.u,
+ _minPoolSize.v, _minPoolSize.u,
+ _maxPoolSize.v, _maxPoolSize.u,
+ _incPoolSize.v, _incPoolSize.u,
+ cast(long)parallel, heapSizeFactor);
+ }
+
+ string errorName() @nogc nothrow { return "GC"; }
+}
+
+private struct PrettyBytes
+{
+ long v;
+ char u; /// unit
+}
+
+pure @nogc nothrow:
+
+private PrettyBytes bytes2prettyStruct(size_t val)
+{
+ char c = prettyBytes(val);
+
+ return PrettyBytes(val, c);
+}
+
+private static char prettyBytes(ref size_t val)
+{
+ char sym = 'B';
+
+ if (val == 0)
+ return sym;
+
+ char[3] units = ['K', 'M', 'G'];
+
+ foreach (u; units)
+ if (val % (1 << 10) == 0)
+ {
+ val /= (1 << 10);
+ sym = u;
+ }
+ else if (sym != 'B')
+ break;
+
+ return sym;
+}
+unittest
+{
+ size_t v = 1024;
+ assert(prettyBytes(v) == 'K');
+ assert(v == 1);
+
+ v = 1025;
+ assert(prettyBytes(v) == 'B');
+ assert(v == 1025);
+
+ v = 1024UL * 1024 * 1024 * 3;
+ assert(prettyBytes(v) == 'G');
+ assert(v == 3);
+
+ v = 1024 * 1024 + 1;
+ assert(prettyBytes(v) == 'B');
+ assert(v == 1024 * 1024 + 1);
+}
diff --git a/libphobos/libdruntime/gc/gcinterface.d b/libphobos/libdruntime/core/gc/gcinterface.d
index abe88f1..e8cdf11 100644
--- a/libphobos/libdruntime/gc/gcinterface.d
+++ b/libphobos/libdruntime/core/gc/gcinterface.d
@@ -2,7 +2,7 @@
* Contains the internal GC interface.
*
* Copyright: Copyright Digital Mars 2016.
- * License: $(WEB www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
+ * License: $(HTTP www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
* Authors: Walter Bright, Sean Kelly, Jeremy DeHaan
*/
@@ -11,7 +11,7 @@
* (See accompanying file LICENSE or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*/
-module gc.gcinterface;
+module core.gc.gcinterface;
static import core.memory;
alias BlkAttr = core.memory.GC.BlkAttr;
@@ -33,16 +33,11 @@ struct Range
void* ptop;
TypeInfo ti; // should be tail const, but doesn't exist for references
alias pbot this; // only consider pbot for relative ordering (opCmp)
+ bool opEquals(const scope Range rhs) nothrow const { return pbot == rhs.pbot; }
}
interface GC
{
-
- /*
- *
- */
- void Dtor();
-
/**
*
*/
@@ -91,7 +86,7 @@ interface GC
/*
*
*/
- BlkInfo qalloc(size_t size, uint bits, const TypeInfo ti) nothrow;
+ BlkInfo qalloc(size_t size, uint bits, const scope TypeInfo ti) nothrow;
/*
*
@@ -122,19 +117,19 @@ interface GC
/**
*
*/
- void free(void* p) nothrow;
+ void free(void* p) nothrow @nogc;
/**
* Determine the base address of the block containing p. If p is not a gc
* allocated pointer, return null.
*/
- void* addrOf(void* p) nothrow;
+ void* addrOf(void* p) nothrow @nogc;
/**
* Determine the allocated size of pointer p. If p is an interior pointer
* or not a gc allocated pointer, return 0.
*/
- size_t sizeOf(void* p) nothrow;
+ size_t sizeOf(void* p) nothrow @nogc;
/**
* Determine the base address of the block containing p. If p is not a gc
@@ -149,6 +144,12 @@ interface GC
core.memory.GC.Stats stats() nothrow;
/**
+ * Retrieve profile statistics about garbage collection.
+ * Useful for debugging and tuning.
+ */
+ core.memory.GC.ProfileStats profileStats() nothrow @safe;
+
+ /**
* add p to list of roots
*/
void addRoot(void* p) nothrow @nogc;
@@ -181,10 +182,17 @@ interface GC
/**
* run finalizers
*/
- void runFinalizers(in void[] segment) nothrow;
+ void runFinalizers(const scope void[] segment) nothrow;
/*
*
*/
- bool inFinalizer() nothrow;
+ bool inFinalizer() nothrow @nogc @safe;
+
+ /**
+ * Returns the number of bytes allocated for the current thread
+ * since program start. It is the same as
+ * GC.stats().allocatedInCurrentThread, but faster.
+ */
+ ulong allocatedInCurrentThread() nothrow;
}
diff --git a/libphobos/libdruntime/core/gc/registry.d b/libphobos/libdruntime/core/gc/registry.d
new file mode 100644
index 0000000..da2dcff
--- /dev/null
+++ b/libphobos/libdruntime/core/gc/registry.d
@@ -0,0 +1,87 @@
+/**
+ * Contains a registry for GC factories.
+ *
+ * Copyright: Copyright Digital Mars 2016.
+ * License: $(WEB www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
+ * Authors: Martin Nowak
+ */
+module core.gc.registry;
+
+import core.gc.gcinterface : GC;
+
+/*@nogc nothrow:*/
+
+/**
+ * A factory function that instantiates an implementation of the GC interface.
+ * In case the instance was allocated on the C heap, it is supposed to
+ * free itself upon calling it's destructor.
+ *
+ * The factory should print an error and abort the program if it
+ * cannot successfully initialize the GC instance.
+ */
+alias GCFactory = GC function();
+
+/**
+ * Register a GC factory under the given `name`. This function must be called
+ * from a C constructor before druntime is initialized.
+ *
+ * To use the registered GC, it's name must be specified gcopt runtime option,
+ * e.g. by passing $(TT, --DRT-gcopt=gc:my_gc_name) as application argument.
+ *
+ * Params:
+ * name = name of the GC implementation; should be unique
+ * factory = function to instantiate the implementation
+ * Note: The registry does not perform synchronization, as registration is
+ * assumed to be executed serially, as is the case for C constructors.
+ * See_Also:
+ * $(LINK2 https://dlang.org/spec/garbage.html#gc_config, Configuring the Garbage Collector)
+ */
+void registerGCFactory(string name, GCFactory factory) nothrow @nogc
+{
+ import core.stdc.stdlib : realloc;
+
+ auto ptr = cast(Entry*)realloc(entries.ptr, (entries.length + 1) * Entry.sizeof);
+ entries = ptr[0 .. entries.length + 1];
+ entries[$ - 1] = Entry(name, factory);
+}
+
+/**
+ * Called during runtime initialization to initialize a GC instance of given `name`.
+ *
+ * Params:
+ * name = name of the GC to instantiate
+ * Returns:
+ * The created GC instance or `null` if no factory for that name was registered
+ */
+GC createGCInstance(string name)
+{
+ import core.stdc.stdlib : free;
+
+ foreach (entry; entries)
+ {
+ if (entry.name != name)
+ continue;
+ auto instance = entry.factory();
+ // only one GC at a time for now, so free the registry to not leak
+ free(entries.ptr);
+ entries = null;
+ return instance;
+ }
+ return null;
+}
+
+// list of all registerd GCs
+const(Entry[]) registeredGCFactories(scope int dummy=0) nothrow @nogc
+{
+ return entries;
+}
+
+private:
+
+struct Entry
+{
+ string name;
+ GCFactory factory;
+}
+
+__gshared Entry[] entries;
diff --git a/libphobos/libdruntime/core/internal/abort.d b/libphobos/libdruntime/core/internal/abort.d
index 8ee1684..6942f7e 100644
--- a/libphobos/libdruntime/core/internal/abort.d
+++ b/libphobos/libdruntime/core/internal/abort.d
@@ -11,7 +11,7 @@ void abort(scope string msg, scope string filename = __FILE__, size_t line = __L
version (Posix)
{
import core.sys.posix.unistd: write;
- static void writeStr(const(char)[][] m...) @nogc nothrow @trusted
+ static void writeStr(scope const(char)[][] m...) @nogc nothrow @trusted
{
foreach (s; m)
write(2, s.ptr, s.length);
@@ -19,12 +19,20 @@ void abort(scope string msg, scope string filename = __FILE__, size_t line = __L
}
else version (Windows)
{
- import core.sys.windows.windows: GetStdHandle, STD_ERROR_HANDLE, WriteFile, INVALID_HANDLE_VALUE;
+ import core.sys.windows.winbase : GetStdHandle, STD_ERROR_HANDLE, WriteFile, INVALID_HANDLE_VALUE;
auto h = (() @trusted => GetStdHandle(STD_ERROR_HANDLE))();
if (h == INVALID_HANDLE_VALUE)
+ {
// attempt best we can to print the message
- assert(0, msg);
- void writeStr(const(char)[][] m...) @nogc nothrow @trusted
+
+ /* Note that msg is scope.
+ * assert() calls _d_assert_msg() calls onAssertErrorMsg() calls _assertHandler() but
+ * msg parameter isn't scope and can escape.
+ * Give up and use our own immutable message instead.
+ */
+ assert(0, "Cannot get stderr handle for message");
+ }
+ void writeStr(scope const(char)[][] m...) @nogc nothrow @trusted
{
foreach (s; m)
{
@@ -37,9 +45,9 @@ void abort(scope string msg, scope string filename = __FILE__, size_t line = __L
static assert(0, "Unsupported OS");
import core.internal.string;
- UnsignedStringBuf strbuff;
+ UnsignedStringBuf strbuff = void;
// write an appropriate message, then abort the program
- writeStr("Aborting from ", filename, "(", line.unsignedToTempString(strbuff, 10), ") ", msg);
+ writeStr("Aborting from ", filename, "(", line.unsignedToTempString(strbuff), ") ", msg);
c_abort();
}
diff --git a/libphobos/libdruntime/core/internal/array/appending.d b/libphobos/libdruntime/core/internal/array/appending.d
new file mode 100644
index 0000000..1e58ddc
--- /dev/null
+++ b/libphobos/libdruntime/core/internal/array/appending.d
@@ -0,0 +1,222 @@
+/**
+ This module contains support for controlling dynamic arrays' appending
+
+ Copyright: Copyright Digital Mars 2000 - 2019.
+ License: Distributed under the
+ $(LINK2 http://www.boost.org/LICENSE_1_0.txt, Boost Software License 1.0).
+ (See accompanying file LICENSE)
+ Source: $(DRUNTIMESRC core/_internal/_array/_appending.d)
+*/
+module core.internal.array.appending;
+
+/// See $(REF _d_arrayappendcTX, rt,lifetime,_d_arrayappendcTX)
+private extern (C) byte[] _d_arrayappendcTX(const TypeInfo ti, ref return scope byte[] px, size_t n) @trusted pure nothrow;
+
+private enum isCopyingNothrow(T) = __traits(compiles, (ref T rhs) nothrow { T lhs = rhs; });
+
+/// Implementation of `_d_arrayappendcTX` and `_d_arrayappendcTXTrace`
+template _d_arrayappendcTXImpl(Tarr : T[], T)
+{
+ import core.internal.array.utils : _d_HookTraceImpl;
+
+ private enum errorMessage = "Cannot append to array if compiling without support for runtime type information!";
+
+ /**
+ * Extend an array `px` by `n` elements.
+ * Caller must initialize those elements.
+ * Params:
+ * px = the array that will be extended, taken as a reference
+ * n = how many new elements to extend it with
+ * Returns:
+ * The new value of `px`
+ * Bugs:
+ * This function template was ported from a much older runtime hook that bypassed safety,
+ * purity, and throwabilty checks. To prevent breaking existing code, this function template
+ * is temporarily declared `@trusted pure` until the implementation can be brought up to modern D expectations.
+ */
+ static if (isCopyingNothrow!T) // `nothrow` deduction doesn't work, so this is needed
+ ref Tarr _d_arrayappendcTX(return scope ref Tarr px, size_t n) @trusted pure nothrow
+ {
+ pragma(inline, false);
+
+ mixin(_d_arrayappendcTXBody);
+ }
+ else
+ ref Tarr _d_arrayappendcTX(return scope ref Tarr px, size_t n) @trusted pure nothrow
+ {
+ pragma(inline, false);
+
+ mixin(_d_arrayappendcTXBody);
+ }
+
+ private enum _d_arrayappendcTXBody = q{
+ version (D_TypeInfo)
+ {
+ auto ti = typeid(Tarr);
+
+ // _d_arrayappendcTX takes the `px` as a ref byte[], but its length
+ // should still be the original length
+ auto pxx = (cast(byte*)px.ptr)[0 .. px.length];
+ ._d_arrayappendcTX(ti, pxx, n);
+ px = (cast(T*)pxx.ptr)[0 .. pxx.length];
+
+ return px;
+ }
+ else
+ assert(0, "Cannot append arrays if compiling without support for runtime type information!");
+ };
+
+ /**
+ * TraceGC wrapper around $(REF _d_arrayappendcTX, rt,array,appending,_d_arrayappendcTXImpl).
+ * Bugs:
+ * This function template was ported from a much older runtime hook that bypassed safety,
+ * purity, and throwabilty checks. To prevent breaking existing code, this function template
+ * is temporarily declared `@trusted pure` until the implementation can be brought up to modern D expectations.
+ */
+ alias _d_arrayappendcTXTrace = _d_HookTraceImpl!(Tarr, _d_arrayappendcTX, errorMessage);
+}
+
+/// Implementation of `_d_arrayappendT` and `_d_arrayappendTTrace`
+template _d_arrayappendTImpl(Tarr : T[], T)
+{
+ import core.internal.array.utils : _d_HookTraceImpl;
+
+ private enum errorMessage = "Cannot append to array if compiling without support for runtime type information!";
+
+ /**
+ * Append array `y` to array `x`.
+ * Params:
+ * x = what array to append to, taken as a reference
+ * y = what should be appended
+ * Returns:
+ * The new value of `x`
+ * Bugs:
+ * This function template was ported from a much older runtime hook that bypassed safety,
+ * purity, and throwabilty checks. To prevent breaking existing code, this function template
+ * is temporarily declared `@trusted pure` until the implementation can be brought up to modern D expectations.
+ */
+ static if (isCopyingNothrow!T)
+ ref Tarr _d_arrayappendT(return scope ref Tarr x, scope Tarr y) @trusted pure nothrow
+ {
+ pragma(inline, false);
+
+ mixin(_d_arrayappendTBody);
+ }
+ else
+ ref Tarr _d_arrayappendT(return scope ref Tarr x, scope Tarr y) @trusted pure
+ {
+ pragma(inline, false);
+
+ mixin(_d_arrayappendTBody);
+ }
+
+ private enum _d_arrayappendTBody = q{
+ import core.stdc.string : memcpy;
+ import core.internal.traits : hasElaborateCopyConstructor, Unqual;
+ import core.lifetime : copyEmplace;
+
+ auto length = x.length;
+
+ _d_arrayappendcTXImpl!Tarr._d_arrayappendcTX(x, y.length);
+
+ static if (hasElaborateCopyConstructor!T)
+ {
+ foreach (i; 0 .. y.length)
+ copyEmplace(y[i], x[length + i]);
+ }
+ else
+ {
+ // blit all elements at once
+ if (y.length)
+ memcpy(cast(Unqual!T *)&x[length], cast(Unqual!T *)&y[0], y.length * T.sizeof);
+ }
+
+ return x;
+ };
+
+ /**
+ * TraceGC wrapper around $(REF _d_arrayappendT, rt,array,appending,_d_arrayappendTImpl).
+ * Bugs:
+ * This function template was ported from a much older runtime hook that bypassed safety,
+ * purity, and throwabilty checks. To prevent breaking existing code, this function template
+ * is temporarily declared `@trusted pure` until the implementation can be brought up to modern D expectations.
+ */
+ alias _d_arrayappendTTrace = _d_HookTraceImpl!(Tarr, _d_arrayappendT, errorMessage);
+}
+
+@safe unittest
+{
+ double[] arr1;
+ foreach (i; 0 .. 4)
+ _d_arrayappendTImpl!(typeof(arr1))._d_arrayappendT(arr1, [cast(double)i]);
+ assert(arr1 == [0.0, 1.0, 2.0, 3.0]);
+}
+
+@safe unittest
+{
+ int blitted;
+ struct Item
+ {
+ this(this)
+ {
+ blitted++;
+ }
+ }
+
+ Item[] arr1 = [Item(), Item()];
+ Item[] arr2 = [Item(), Item()];
+ Item[] arr1_org = [Item(), Item()];
+ arr1_org ~= arr2;
+ _d_arrayappendTImpl!(typeof(arr1))._d_arrayappendT(arr1, arr2);
+
+ // postblit should have triggered on at least the items in arr2
+ assert(blitted >= arr2.length);
+}
+
+@safe nothrow unittest
+{
+ int blitted;
+ struct Item
+ {
+ this(this) nothrow
+ {
+ blitted++;
+ }
+ }
+
+ Item[][] arr1 = [[Item()]];
+ Item[][] arr2 = [[Item()]];
+
+ _d_arrayappendTImpl!(typeof(arr1))._d_arrayappendT(arr1, arr2);
+
+ // no postblit should have happened because arr{1,2} contain dynamic arrays
+ assert(blitted == 0);
+}
+
+@safe nothrow unittest
+{
+ int copied;
+ struct Item
+ {
+ this(const scope ref Item) nothrow
+ {
+ copied++;
+ }
+ }
+
+ Item[1][] arr1 = [[Item()]];
+ Item[1][] arr2 = [[Item()]];
+
+ _d_arrayappendTImpl!(typeof(arr1))._d_arrayappendT(arr1, arr2);
+ // copy constructor should have been invoked because arr{1,2} contain static arrays
+ assert(copied >= arr2.length);
+}
+
+@safe nothrow unittest
+{
+ string str;
+ _d_arrayappendTImpl!(typeof(str))._d_arrayappendT(str, "a");
+ _d_arrayappendTImpl!(typeof(str))._d_arrayappendT(str, "b");
+ _d_arrayappendTImpl!(typeof(str))._d_arrayappendT(str, "c");
+ assert(str == "abc");
+}
diff --git a/libphobos/libdruntime/core/internal/array/capacity.d b/libphobos/libdruntime/core/internal/array/capacity.d
new file mode 100644
index 0000000..9440428
--- /dev/null
+++ b/libphobos/libdruntime/core/internal/array/capacity.d
@@ -0,0 +1,85 @@
+/**
+ This module contains support for controlling dynamic arrays' capacity and length
+
+ Copyright: Copyright Digital Mars 2000 - 2019.
+ License: Distributed under the
+ $(LINK2 http://www.boost.org/LICENSE_1_0.txt, Boost Software License 1.0).
+ (See accompanying file LICENSE)
+ Source: $(DRUNTIMESRC core/internal/_array/_capacity.d)
+*/
+module core.internal.array.capacity;
+
+// HACK: `nothrow` and `pure` is faked.
+private extern (C) void[] _d_arraysetlengthT(const TypeInfo ti, size_t newlength, void[]* p) nothrow pure;
+private extern (C) void[] _d_arraysetlengthiT(const TypeInfo ti, size_t newlength, void[]* p) nothrow pure;
+
+/*
+ * This template is needed because there need to be a `_d_arraysetlengthTTrace!Tarr` instance for every
+ * `_d_arraysetlengthT!Tarr`. By wrapping both of these functions inside of this template we force the
+ * compiler to create a instance of both function for every type that is used.
+ */
+
+/// Implementation of `_d_arraysetlengthT` and `_d_arraysetlengthTTrace`
+template _d_arraysetlengthTImpl(Tarr : T[], T)
+{
+ import core.internal.array.utils : _d_HookTraceImpl;
+
+ private enum errorMessage = "Cannot resize arrays if compiling without support for runtime type information!";
+
+ /**
+ * Resize dynamic array
+ * Params:
+ * arr = the array that will be resized, taken as a reference
+ * newlength = new length of array
+ * Returns:
+ * The new length of the array
+ * Bugs:
+ * The safety level of this function is faked. It shows itself as `@trusted pure nothrow` to not break existing code.
+ */
+ size_t _d_arraysetlengthT(return scope ref Tarr arr, size_t newlength) @trusted pure nothrow
+ {
+ pragma(inline, false);
+ version (D_TypeInfo)
+ {
+ auto ti = typeid(Tarr);
+
+ static if (__traits(isZeroInit, T))
+ ._d_arraysetlengthT(ti, newlength, cast(void[]*)&arr);
+ else
+ ._d_arraysetlengthiT(ti, newlength, cast(void[]*)&arr);
+
+ return arr.length;
+ }
+ else
+ assert(0, errorMessage);
+ }
+
+ /**
+ * TraceGC wrapper around $(REF _d_arraysetlengthT, core,internal,array,core.internal.array.capacity).
+ * Bugs:
+ * This function template was ported from a much older runtime hook that bypassed safety,
+ * purity, and throwabilty checks. To prevent breaking existing code, this function template
+ * is temporarily declared `@trusted pure nothrow` until the implementation can be brought up to modern D expectations.
+ */
+ alias _d_arraysetlengthTTrace = _d_HookTraceImpl!(Tarr, _d_arraysetlengthT, errorMessage);
+}
+
+@safe unittest
+{
+ struct S
+ {
+ float f = 1.0;
+ }
+
+ int[] arr;
+ _d_arraysetlengthTImpl!(typeof(arr))._d_arraysetlengthT(arr, 16);
+ assert(arr.length == 16);
+ foreach (int i; arr)
+ assert(i == int.init);
+
+ shared S[] arr2;
+ _d_arraysetlengthTImpl!(typeof(arr2))._d_arraysetlengthT(arr2, 16);
+ assert(arr2.length == 16);
+ foreach (s; arr2)
+ assert(s == S.init);
+}
diff --git a/libphobos/libdruntime/core/internal/array/casting.d b/libphobos/libdruntime/core/internal/array/casting.d
new file mode 100644
index 0000000..e862f8e
--- /dev/null
+++ b/libphobos/libdruntime/core/internal/array/casting.d
@@ -0,0 +1,115 @@
+/**
+ This module contains compiler support for casting dynamic arrays
+
+ Copyright: Copyright Digital Mars 2000 - 2019.
+ License: Distributed under the
+ $(LINK2 http://www.boost.org/LICENSE_1_0.txt, Boost Software License 1.0).
+ (See accompanying file LICENSE)
+ Source: $(DRUNTIMESRC core/internal/_array/_casting.d)
+*/
+module core.internal.array.casting;
+
+/**
+Used by `__ArrayCast` to emit a descriptive error message.
+
+It is a template so it can be used by `__ArrayCast` in -betterC
+builds. It is separate from `__ArrayCast` to minimize code
+bloat.
+
+Params:
+ fromType = name of the type being cast from
+ fromSize = total size in bytes of the array being cast from
+ toType = name of the type being cast o
+ toSize = total size in bytes of the array being cast to
+ */
+private void onArrayCastError()(string fromType, size_t fromSize, string toType, size_t toSize) @trusted
+{
+ import core.internal.string : unsignedToTempString;
+ import core.memory : pureMalloc;
+
+ // convert discontiguous `msgComponents` to contiguous string on the C heap
+ enum msgLength = 2048;
+ // note: never freed!
+ char* msg = cast(char *)pureMalloc(msgLength);
+
+ size_t index = 0;
+ void add(const(char)[] m)
+ {
+ import core.stdc.string : memcpy;
+
+ auto N = msgLength - 1 - index;
+ if (N > m.length)
+ N = m.length;
+ // prevent superfluous and betterC-unfriendly checks via direct memcpy
+ memcpy(msg + index, m.ptr, N);
+ index += N;
+ }
+
+ add("An array of size ");
+ auto s = unsignedToTempString(fromSize);
+ add(s[]);
+ add(" does not align on an array of size ");
+ s = unsignedToTempString(toSize);
+ add(s[]);
+ add(", so `");
+ add(fromType);
+ add("` cannot be cast to `");
+ add(toType);
+ add("`");
+ msg[index] = '\0'; // null-termination
+
+ // first argument must evaluate to `false` at compile-time to maintain memory safety in release builds
+ assert(false, msg[0 .. index]);
+}
+
+/**
+The compiler lowers expressions of `cast(TTo[])TFrom[]` to
+this implementation.
+
+Params:
+ from = the array to reinterpret-cast
+
+Returns:
+ `from` reinterpreted as `TTo[]`
+ */
+TTo[] __ArrayCast(TFrom, TTo)(return scope TFrom[] from) @nogc pure @trusted
+{
+ const fromSize = from.length * TFrom.sizeof;
+ const toLength = fromSize / TTo.sizeof;
+
+ if ((fromSize % TTo.sizeof) != 0)
+ {
+ onArrayCastError(TFrom.stringof, fromSize, TTo.stringof, toLength * TTo.sizeof);
+ }
+
+ struct Array
+ {
+ size_t length;
+ void* ptr;
+ }
+ auto a = cast(Array*)&from;
+ a.length = toLength; // jam new length
+ return *cast(TTo[]*)a;
+}
+
+@safe @nogc pure nothrow unittest
+{
+ byte[int.sizeof * 3] b = cast(byte) 0xab;
+ int[] i;
+ short[] s;
+
+ i = __ArrayCast!(byte, int)(b);
+ assert(i.length == 3);
+ foreach (v; i)
+ assert(v == cast(int) 0xabab_abab);
+
+ s = __ArrayCast!(byte, short)(b);
+ assert(s.length == 6);
+ foreach (v; s)
+ assert(v == cast(short) 0xabab);
+
+ s = __ArrayCast!(int, short)(i);
+ assert(s.length == 6);
+ foreach (v; s)
+ assert(v == cast(short) 0xabab);
+}
diff --git a/libphobos/libdruntime/core/internal/array/comparison.d b/libphobos/libdruntime/core/internal/array/comparison.d
new file mode 100644
index 0000000..1a68b9b
--- /dev/null
+++ b/libphobos/libdruntime/core/internal/array/comparison.d
@@ -0,0 +1,242 @@
+/**
+ * This module contains compiler support for comparing dynamic arrays
+ *
+ * Copyright: Copyright Digital Mars 2000 - 2019.
+ * License: Distributed under the
+ * $(LINK2 http://www.boost.org/LICENSE_1_0.txt, Boost Software License 1.0).
+ * (See accompanying file LICENSE)
+ * Source: $(DRUNTIMESRC core/internal/_array/_comparison.d)
+ */
+
+module core.internal.array.comparison;
+
+int __cmp(T)(scope const T[] lhs, scope const T[] rhs) @trusted
+ if (__traits(isScalar, T))
+{
+ // Compute U as the implementation type for T
+ static if (is(T == ubyte) || is(T == void) || is(T == bool))
+ alias U = char;
+ else static if (is(T == wchar))
+ alias U = ushort;
+ else static if (is(T == dchar))
+ alias U = uint;
+ else static if (is(T == ifloat))
+ alias U = float;
+ else static if (is(T == idouble))
+ alias U = double;
+ else static if (is(T == ireal))
+ alias U = real;
+ else
+ alias U = T;
+
+ static if (is(U == char))
+ {
+ import core.internal.string : dstrcmp;
+ return dstrcmp(cast(char[]) lhs, cast(char[]) rhs);
+ }
+ else static if (!is(U == T))
+ {
+ // Reuse another implementation
+ return __cmp(cast(U[]) lhs, cast(U[]) rhs);
+ }
+ else
+ {
+ version (BigEndian)
+ static if (__traits(isUnsigned, T) ? !is(T == __vector) : is(T : P*, P))
+ {
+ if (!__ctfe)
+ {
+ import core.stdc.string : memcmp;
+ int c = memcmp(lhs.ptr, rhs.ptr, (lhs.length <= rhs.length ? lhs.length : rhs.length) * T.sizeof);
+ if (c)
+ return c;
+ static if (size_t.sizeof <= uint.sizeof && T.sizeof >= 2)
+ return cast(int) lhs.length - cast(int) rhs.length;
+ else
+ return int(lhs.length > rhs.length) - int(lhs.length < rhs.length);
+ }
+ }
+
+ immutable len = lhs.length <= rhs.length ? lhs.length : rhs.length;
+ foreach (const u; 0 .. len)
+ {
+ static if (__traits(isFloating, T))
+ {
+ immutable a = lhs.ptr[u], b = rhs.ptr[u];
+ static if (is(T == cfloat) || is(T == cdouble)
+ || is(T == creal))
+ {
+ // Use rt.cmath2._Ccmp instead ?
+ auto r = (a.re > b.re) - (a.re < b.re);
+ if (!r) r = (a.im > b.im) - (a.im < b.im);
+ }
+ else
+ {
+ const r = (a > b) - (a < b);
+ }
+ if (r) return r;
+ }
+ else if (lhs.ptr[u] != rhs.ptr[u])
+ return lhs.ptr[u] < rhs.ptr[u] ? -1 : 1;
+ }
+ return (lhs.length > rhs.length) - (lhs.length < rhs.length);
+ }
+}
+
+// This function is called by the compiler when dealing with array
+// comparisons in the semantic analysis phase of CmpExp. The ordering
+// comparison is lowered to a call to this template.
+int __cmp(T1, T2)(T1[] s1, T2[] s2)
+if (!__traits(isScalar, T1) && !__traits(isScalar, T2))
+{
+ import core.internal.traits : Unqual;
+ alias U1 = Unqual!T1;
+ alias U2 = Unqual!T2;
+
+ static if (is(U1 == void) && is(U2 == void))
+ static @trusted ref inout(ubyte) at(inout(void)[] r, size_t i) { return (cast(inout(ubyte)*) r.ptr)[i]; }
+ else
+ static @trusted ref R at(R)(R[] r, size_t i) { return r.ptr[i]; }
+
+ // All unsigned byte-wide types = > dstrcmp
+ immutable len = s1.length <= s2.length ? s1.length : s2.length;
+
+ foreach (const u; 0 .. len)
+ {
+ static if (__traits(compiles, __cmp(at(s1, u), at(s2, u))))
+ {
+ auto c = __cmp(at(s1, u), at(s2, u));
+ if (c != 0)
+ return c;
+ }
+ else static if (__traits(compiles, at(s1, u).opCmp(at(s2, u))))
+ {
+ auto c = at(s1, u).opCmp(at(s2, u));
+ if (c != 0)
+ return c;
+ }
+ else static if (__traits(compiles, at(s1, u) < at(s2, u)))
+ {
+ if (at(s1, u) != at(s2, u))
+ return at(s1, u) < at(s2, u) ? -1 : 1;
+ }
+ else
+ {
+ // TODO: fix this legacy bad behavior, see
+ // https://issues.dlang.org/show_bug.cgi?id=17244
+ static assert(is(U1 == U2), "Internal error.");
+ import core.stdc.string : memcmp;
+ auto c = (() @trusted => memcmp(&at(s1, u), &at(s2, u), U1.sizeof))();
+ if (c != 0)
+ return c;
+ }
+ }
+ return (s1.length > s2.length) - (s1.length < s2.length);
+}
+
+// integral types
+@safe unittest
+{
+ void compareMinMax(T)()
+ {
+ T[2] a = [T.max, T.max];
+ T[2] b = [T.min, T.min];
+
+ assert(__cmp(a, b) > 0);
+ assert(__cmp(b, a) < 0);
+ }
+
+ compareMinMax!int;
+ compareMinMax!uint;
+ compareMinMax!long;
+ compareMinMax!ulong;
+ compareMinMax!short;
+ compareMinMax!ushort;
+ compareMinMax!byte;
+ compareMinMax!dchar;
+ compareMinMax!wchar;
+}
+
+// char types (dstrcmp)
+@safe unittest
+{
+ void compareMinMax(T)()
+ {
+ T[2] a = [T.max, T.max];
+ T[2] b = [T.min, T.min];
+
+ assert(__cmp(a, b) > 0);
+ assert(__cmp(b, a) < 0);
+ }
+
+ compareMinMax!ubyte;
+ compareMinMax!bool;
+ compareMinMax!char;
+ compareMinMax!(const char);
+
+ string s1 = "aaaa";
+ string s2 = "bbbb";
+ assert(__cmp(s2, s1) > 0);
+ assert(__cmp(s1, s2) < 0);
+}
+
+// fp types
+@safe unittest
+{
+ void compareMinMax(T)()
+ {
+ T[2] a = [T.max, T.max];
+ T[2] b = [T.min_normal, T.min_normal];
+ T[2] c = [T.max, T.min_normal];
+ T[1] d = [T.max];
+
+ assert(__cmp(a, b) > 0);
+ assert(__cmp(b, a) < 0);
+ assert(__cmp(a, c) > 0);
+ assert(__cmp(a, d) > 0);
+ assert(__cmp(d, c) < 0);
+ assert(__cmp(c, c) == 0);
+ }
+
+ compareMinMax!real;
+ compareMinMax!float;
+ compareMinMax!double;
+
+ // qualifiers
+ compareMinMax!(const real);
+ compareMinMax!(immutable real);
+}
+
+// void[]
+@safe unittest
+{
+ void[] a;
+ const(void)[] b;
+
+ (() @trusted
+ {
+ a = cast(void[]) "bb";
+ b = cast(const(void)[]) "aa";
+ })();
+
+ assert(__cmp(a, b) > 0);
+ assert(__cmp(b, a) < 0);
+}
+
+// arrays of arrays with mixed modifiers
+@safe unittest
+{
+ // https://issues.dlang.org/show_bug.cgi?id=17876
+ bool less1(immutable size_t[][] a, size_t[][] b) { return a < b; }
+ bool less2(const void[][] a, void[][] b) { return a < b; }
+ bool less3(inout size_t[][] a, size_t[][] b) { return a < b; }
+
+ immutable size_t[][] a = [[1, 2], [3, 4]];
+ size_t[][] b = [[1, 2], [3, 5]];
+ assert(less1(a, b));
+ assert(less3(a, b));
+
+ auto va = [cast(immutable void[])a[0], a[1]];
+ auto vb = [cast(void[])b[0], b[1]];
+ assert(less2(va, vb));
+}
diff --git a/libphobos/libdruntime/core/internal/array/concatenation.d b/libphobos/libdruntime/core/internal/array/concatenation.d
new file mode 100644
index 0000000..955e381
--- /dev/null
+++ b/libphobos/libdruntime/core/internal/array/concatenation.d
@@ -0,0 +1,75 @@
+/**
+ This module contains support for controlling dynamic arrays' concatenation
+ Copyright: Copyright Digital Mars 2000 - 2019.
+ License: Distributed under the
+ $(LINK2 http://www.boost.org/LICENSE_1_0.txt, Boost Software License 1.0).
+ (See accompanying file LICENSE)
+ Source: $(DRUNTIMESRC core/internal/_array/_concatenation.d)
+*/
+module core.internal.array.concatenation;
+
+/// See $(REF _d_arraycatnTX, rt,lifetime)
+private extern (C) void[] _d_arraycatnTX(const TypeInfo ti, scope byte[][] arrs) pure nothrow;
+
+/// Implementation of `_d_arraycatnTX` and `_d_arraycatnTXTrace`
+template _d_arraycatnTXImpl(Tarr : ResultArrT[], ResultArrT : T[], T)
+{
+ import core.internal.array.utils : _d_HookTraceImpl;
+
+ private enum errorMessage = "Cannot concatenate arrays if compiling without support for runtime type information!";
+
+ /**
+ * Concatenating the arrays inside of `arrs`.
+ * `_d_arraycatnTX([a, b, c])` means `a ~ b ~ c`.
+ * Params:
+ * arrs = Array containing arrays that will be concatenated.
+ * Returns:
+ * A newly allocated array that contains all the elements from all the arrays in `arrs`.
+ * Bugs:
+ * This function template was ported from a much older runtime hook that bypassed safety,
+ * purity, and throwabilty checks. To prevent breaking existing code, this function template
+ * is temporarily declared `@trusted pure nothrow` until the implementation can be brought up to modern D expectations.
+ */
+ ResultArrT _d_arraycatnTX(scope const Tarr arrs) @trusted pure nothrow
+ {
+ pragma(inline, false);
+ version (D_TypeInfo)
+ {
+ auto ti = typeid(ResultArrT);
+
+ byte[][] arrs2 = (cast(byte[]*)arrs.ptr)[0 .. arrs.length];
+ void[] result = ._d_arraycatnTX(ti, arrs2);
+ return (cast(T*)result.ptr)[0 .. result.length];
+ }
+ else
+ assert(0, errorMessage);
+ }
+
+ /**
+ * TraceGC wrapper around $(REF _d_arraycatnTX, core,internal,array,concat).
+ * Bugs:
+ * This function template was ported from a much older runtime hook that bypassed safety,
+ * purity, and throwabilty checks. To prevent breaking existing code, this function template
+ * is temporarily declared `@trusted pure nothrow` until the implementation can be brought up to modern D expectations.
+ */
+ alias _d_arraycatnTXTrace = _d_HookTraceImpl!(ResultArrT, _d_arraycatnTX, errorMessage);
+}
+
+@safe unittest
+{
+ int counter;
+ struct S
+ {
+ int val;
+ this(this)
+ {
+ counter++;
+ }
+ }
+
+ S[][] arr = [[S(0), S(1), S(2), S(3)], [S(4), S(5), S(6), S(7)]];
+ S[] result = _d_arraycatnTXImpl!(typeof(arr))._d_arraycatnTX(arr);
+
+ assert(counter == 8);
+ assert(result == [S(0), S(1), S(2), S(3), S(4), S(5), S(6), S(7)]);
+}
diff --git a/libphobos/libdruntime/core/internal/array/construction.d b/libphobos/libdruntime/core/internal/array/construction.d
new file mode 100644
index 0000000..b58ed51
--- /dev/null
+++ b/libphobos/libdruntime/core/internal/array/construction.d
@@ -0,0 +1,307 @@
+/**
+ This module contains compiler support for constructing dynamic arrays
+
+ Copyright: Copyright Digital Mars 2000 - 2019.
+ License: Distributed under the
+ $(LINK2 http://www.boost.org/LICENSE_1_0.txt, Boost Software License 1.0).
+ (See accompanying file LICENSE)
+ Source: $(DRUNTIMESRC core/internal/_array/_construction.d)
+*/
+module core.internal.array.construction;
+
+/**
+ * Does array initialization (not assignment) from another array of the same element type.
+ * Params:
+ * to = what array to initialize
+ * from = what data the array should be initialized with
+ * Returns:
+ * The constructed `to`
+ * Bugs:
+ * This function template was ported from a much older runtime hook that bypassed safety,
+ * purity, and throwabilty checks. To prevent breaking existing code, this function template
+ * is temporarily declared `@trusted` until the implementation can be brought up to modern D expectations.
+ */
+Tarr _d_arrayctor(Tarr : T[], T)(return scope Tarr to, scope Tarr from) @trusted
+{
+ pragma(inline, false);
+ import core.internal.traits : hasElaborateCopyConstructor, Unqual;
+ import core.lifetime : copyEmplace;
+ import core.stdc.string : memcpy;
+ debug(PRINTF) import core.stdc.stdio;
+
+ // Force `enforceRawArraysConformable` to be `pure`
+ void enforceRawArraysConformable(const char[] action, const size_t elementSize, const void[] a1, const void[] a2, in bool allowOverlap = false) @trusted
+ {
+ import core.internal.util.array : enforceRawArraysConformable;
+
+ alias Type = void function(const char[] action, const size_t elementSize, const void[] a1, const void[] a2, in bool allowOverlap = false) pure nothrow;
+ (cast(Type)&enforceRawArraysConformable)(action, elementSize, a1, a2, allowOverlap);
+ }
+
+ debug(PRINTF) printf("_d_arrayctor(to = %p,%d, from = %p,%d) size = %d\n", from.ptr, from.length, to.ptr, to.length, T.tsize);
+
+ auto element_size = T.sizeof;
+
+ void[] vFrom = (cast(void*)from.ptr)[0..from.length];
+ void[] vTo = (cast(void*)to.ptr)[0..to.length];
+ enforceRawArraysConformable("initialization", element_size, vFrom, vTo, false);
+
+ static if (hasElaborateCopyConstructor!T)
+ {
+ size_t i;
+ try
+ {
+ for (i = 0; i < to.length; i++)
+ copyEmplace(from[i], to[i]);
+ }
+ catch (Exception o)
+ {
+ /* Destroy, in reverse order, what we've constructed so far
+ */
+ while (i--)
+ {
+ auto elem = cast(Unqual!T*)&to[i];
+ destroy(*elem);
+ }
+
+ throw o;
+ }
+ }
+ else
+ {
+ // blit all elements at once
+ memcpy(cast(void*) to.ptr, from.ptr, to.length * T.sizeof);
+ }
+
+ return to;
+}
+
+// postblit
+@safe unittest
+{
+ int counter;
+ struct S
+ {
+ int val;
+ this(this) { counter++; }
+ }
+
+ S[4] arr1;
+ S[4] arr2 = [S(0), S(1), S(2), S(3)];
+ _d_arrayctor(arr1[], arr2[]);
+
+ assert(counter == 4);
+ assert(arr1 == arr2);
+}
+
+// copy constructor
+@safe unittest
+{
+ int counter;
+ struct S
+ {
+ int val;
+ this(int val) { this.val = val; }
+ this(const scope ref S rhs)
+ {
+ val = rhs.val;
+ counter++;
+ }
+ }
+
+ S[4] arr1;
+ S[4] arr2 = [S(0), S(1), S(2), S(3)];
+ _d_arrayctor(arr1[], arr2[]);
+
+ assert(counter == 4);
+ assert(arr1 == arr2);
+}
+
+@safe nothrow unittest
+{
+ // Test that throwing works
+ int counter;
+ bool didThrow;
+
+ struct Throw
+ {
+ int val;
+ this(this)
+ {
+ counter++;
+ if (counter == 2)
+ throw new Exception("");
+ }
+ }
+ try
+ {
+ Throw[4] a;
+ Throw[4] b = [Throw(1), Throw(2), Throw(3), Throw(4)];
+ _d_arrayctor(a[], b[]);
+ }
+ catch (Exception)
+ {
+ didThrow = true;
+ }
+ assert(didThrow);
+ assert(counter == 2);
+
+
+ // Test that `nothrow` works
+ didThrow = false;
+ counter = 0;
+ struct NoThrow
+ {
+ int val;
+ this(this)
+ {
+ counter++;
+ }
+ }
+ try
+ {
+ NoThrow[4] a;
+ NoThrow[4] b = [NoThrow(1), NoThrow(2), NoThrow(3), NoThrow(4)];
+ _d_arrayctor(a[], b[]);
+ }
+ catch (Exception)
+ {
+ didThrow = false;
+ }
+ assert(!didThrow);
+ assert(counter == 4);
+}
+
+/**
+ * Do construction of an array.
+ * ti[count] p = value;
+ * Params:
+ * p = what array to initialize
+ * value = what data to construct the array with
+ * Bugs:
+ * This function template was ported from a much older runtime hook that bypassed safety,
+ * purity, and throwabilty checks. To prevent breaking existing code, this function template
+ * is temporarily declared `@trusted` until the implementation can be brought up to modern D expectations.
+ */
+void _d_arraysetctor(Tarr : T[], T)(scope Tarr p, scope ref T value) @trusted
+{
+ pragma(inline, false);
+ import core.internal.traits : Unqual;
+ import core.lifetime : copyEmplace;
+
+ size_t i;
+ try
+ {
+ for (i = 0; i < p.length; i++)
+ copyEmplace(value, p[i]);
+ }
+ catch (Exception o)
+ {
+ // Destroy, in reverse order, what we've constructed so far
+ while (i--)
+ {
+ auto elem = cast(Unqual!T*)&p[i];
+ destroy(*elem);
+ }
+
+ throw o;
+ }
+}
+
+// postblit
+@safe unittest
+{
+ int counter;
+ struct S
+ {
+ int val;
+ this(this)
+ {
+ counter++;
+ }
+ }
+
+ S[4] arr;
+ S s = S(1234);
+ _d_arraysetctor(arr[], s);
+ assert(counter == arr.length);
+ assert(arr == [S(1234), S(1234), S(1234), S(1234)]);
+}
+
+// copy constructor
+@safe unittest
+{
+ int counter;
+ struct S
+ {
+ int val;
+ this(int val) { this.val = val; }
+ this(const scope ref S rhs)
+ {
+ val = rhs.val;
+ counter++;
+ }
+ }
+
+ S[4] arr;
+ S s = S(1234);
+ _d_arraysetctor(arr[], s);
+ assert(counter == arr.length);
+ assert(arr == [S(1234), S(1234), S(1234), S(1234)]);
+}
+
+@safe nothrow unittest
+{
+ // Test that throwing works
+ int counter;
+ bool didThrow;
+ struct Throw
+ {
+ int val;
+ this(this)
+ {
+ counter++;
+ if (counter == 2)
+ throw new Exception("Oh no.");
+ }
+ }
+ try
+ {
+ Throw[4] a;
+ Throw[4] b = [Throw(1), Throw(2), Throw(3), Throw(4)];
+ _d_arrayctor(a[], b[]);
+ }
+ catch (Exception)
+ {
+ didThrow = true;
+ }
+ assert(didThrow);
+ assert(counter == 2);
+
+
+ // Test that `nothrow` works
+ didThrow = false;
+ counter = 0;
+ struct NoThrow
+ {
+ int val;
+ this(this)
+ {
+ counter++;
+ }
+ }
+ try
+ {
+ NoThrow[4] a;
+ NoThrow b = NoThrow(1);
+ _d_arraysetctor(a[], b);
+ foreach (ref e; a)
+ assert(e == NoThrow(1));
+ }
+ catch (Exception)
+ {
+ didThrow = false;
+ }
+ assert(!didThrow);
+ assert(counter == 4);
+}
diff --git a/libphobos/libdruntime/core/internal/array/equality.d b/libphobos/libdruntime/core/internal/array/equality.d
new file mode 100644
index 0000000..b12e2f2
--- /dev/null
+++ b/libphobos/libdruntime/core/internal/array/equality.d
@@ -0,0 +1,237 @@
+/**
+ * This module contains compiler support determining equality of arrays.
+ *
+ * Copyright: Copyright Digital Mars 2000 - 2020.
+ * License: Distributed under the
+ * $(LINK2 http://www.boost.org/LICENSE_1_0.txt, Boost Software License 1.0).
+ * (See accompanying file LICENSE)
+ * Source: $(DRUNTIMESRC core/internal/_array/_equality.d)
+ */
+
+module core.internal.array.equality;
+
+// The compiler lowers `lhs == rhs` to `__equals(lhs, rhs)` for
+// * dynamic arrays,
+// * (most) arrays of different (unqualified) element types, and
+// * arrays of structs with custom opEquals.
+
+ // The scalar-only overload takes advantage of known properties of scalars to
+ // reduce template instantiation. This is expected to be the most common case.
+bool __equals(T1, T2)(scope const T1[] lhs, scope const T2[] rhs)
+@nogc nothrow pure @trusted
+if (__traits(isScalar, T1) && __traits(isScalar, T2))
+{
+ if (lhs.length != rhs.length)
+ return false;
+
+ static if (T1.sizeof == T2.sizeof
+ // Signedness needs to match for types that promote to int.
+ // (Actually it would be okay to memcmp bool[] and byte[] but that is
+ // probably too uncommon to be worth checking for.)
+ && (T1.sizeof >= 4 || __traits(isUnsigned, T1) == __traits(isUnsigned, T2))
+ && !__traits(isFloating, T1) && !__traits(isFloating, T2))
+ {
+ if (!__ctfe)
+ {
+ // This would improperly allow equality of integers and pointers
+ // but the CTFE branch will stop this function from compiling then.
+ import core.stdc.string : memcmp;
+ return lhs.length == 0 ||
+ 0 == memcmp(cast(const void*) lhs.ptr, cast(const void*) rhs.ptr, lhs.length * T1.sizeof);
+ }
+ }
+
+ foreach (const i; 0 .. lhs.length)
+ if (lhs.ptr[i] != rhs.ptr[i])
+ return false;
+ return true;
+}
+
+bool __equals(T1, T2)(scope T1[] lhs, scope T2[] rhs)
+if (!__traits(isScalar, T1) || !__traits(isScalar, T2))
+{
+ if (lhs.length != rhs.length)
+ return false;
+
+ if (lhs.length == 0)
+ return true;
+
+ static if (useMemcmp!(T1, T2))
+ {
+ if (!__ctfe)
+ {
+ static bool trustedMemcmp(scope T1[] lhs, scope T2[] rhs) @trusted @nogc nothrow pure
+ {
+ pragma(inline, true);
+ import core.stdc.string : memcmp;
+ return memcmp(cast(void*) lhs.ptr, cast(void*) rhs.ptr, lhs.length * T1.sizeof) == 0;
+ }
+ return trustedMemcmp(lhs, rhs);
+ }
+ else
+ {
+ foreach (const i; 0 .. lhs.length)
+ {
+ if (at(lhs, i) != at(rhs, i))
+ return false;
+ }
+ return true;
+ }
+ }
+ else
+ {
+ foreach (const i; 0 .. lhs.length)
+ {
+ if (at(lhs, i) != at(rhs, i))
+ return false;
+ }
+ return true;
+ }
+}
+
+@safe unittest
+{
+ assert(__equals([], []));
+ assert(!__equals([1, 2], [1, 2, 3]));
+}
+
+@safe unittest
+{
+ auto a = "hello"c;
+
+ assert(a != "hel");
+ assert(a != "helloo");
+ assert(a != "betty");
+ assert(a == "hello");
+ assert(a != "hxxxx");
+
+ float[] fa = [float.nan];
+ assert(fa != fa);
+}
+
+@safe unittest
+{
+ struct A
+ {
+ int a;
+ }
+
+ auto arr1 = [A(0), A(2)];
+ auto arr2 = [A(0), A(1)];
+ auto arr3 = [A(0), A(1)];
+
+ assert(arr1 != arr2);
+ assert(arr2 == arr3);
+}
+
+@safe unittest
+{
+ struct A
+ {
+ int a;
+ int b;
+
+ bool opEquals(const A other)
+ {
+ return this.a == other.b && this.b == other.a;
+ }
+ }
+
+ auto arr1 = [A(1, 0), A(0, 1)];
+ auto arr2 = [A(1, 0), A(0, 1)];
+ auto arr3 = [A(0, 1), A(1, 0)];
+
+ assert(arr1 != arr2);
+ assert(arr2 == arr3);
+}
+
+// https://issues.dlang.org/show_bug.cgi?id=18252
+@safe unittest
+{
+ string[int][] a1, a2;
+ assert(__equals(a1, a2));
+ assert(a1 == a2);
+ a1 ~= [0: "zero"];
+ a2 ~= [0: "zero"];
+ assert(__equals(a1, a2));
+ assert(a1 == a2);
+ a2[0][1] = "one";
+ assert(!__equals(a1, a2));
+ assert(a1 != a2);
+}
+
+
+private:
+
+// - Recursively folds static array types to their element type,
+// - maps void to ubyte, and
+// - pointers to size_t.
+template BaseType(T)
+{
+ static if (__traits(isStaticArray, T))
+ alias BaseType = BaseType!(typeof(T.init[0]));
+ else static if (is(immutable T == immutable void))
+ alias BaseType = ubyte;
+ else static if (is(T == E*, E))
+ alias BaseType = size_t;
+ else
+ alias BaseType = T;
+}
+
+// Use memcmp if the element sizes match and both base element types are integral.
+// Due to int promotion, disallow small integers of diverging signed-ness though.
+template useMemcmp(T1, T2)
+{
+ static if (T1.sizeof != T2.sizeof)
+ enum useMemcmp = false;
+ else
+ {
+ alias B1 = BaseType!T1;
+ alias B2 = BaseType!T2;
+ enum useMemcmp = __traits(isIntegral, B1) && __traits(isIntegral, B2)
+ && !( (B1.sizeof < 4 || B2.sizeof < 4) && __traits(isUnsigned, B1) != __traits(isUnsigned, B2) );
+ }
+}
+
+unittest
+{
+ enum E { foo, bar }
+
+ static assert(useMemcmp!(byte, byte));
+ static assert(useMemcmp!(ubyte, ubyte));
+ static assert(useMemcmp!(void, const void));
+ static assert(useMemcmp!(void, immutable bool));
+ static assert(useMemcmp!(void, inout char));
+ static assert(useMemcmp!(void, shared ubyte));
+ static assert(!useMemcmp!(void, byte)); // differing signed-ness
+ static assert(!useMemcmp!(char[8], byte[8])); // ditto
+
+ static assert(useMemcmp!(short, short));
+ static assert(useMemcmp!(wchar, ushort));
+ static assert(!useMemcmp!(wchar, short)); // differing signed-ness
+
+ static assert(useMemcmp!(int, uint)); // no promotion, ignoring signed-ness
+ static assert(useMemcmp!(dchar, E));
+
+ static assert(useMemcmp!(immutable void*, size_t));
+ static assert(useMemcmp!(double*, ptrdiff_t));
+ static assert(useMemcmp!(long[2][3], const(ulong)[2][3]));
+
+ static assert(!useMemcmp!(float, float));
+ static assert(!useMemcmp!(double[2], double[2]));
+ static assert(!useMemcmp!(Object, Object));
+ static assert(!useMemcmp!(int[], int[]));
+}
+
+// Returns a reference to an array element, eliding bounds check and
+// casting void to ubyte.
+pragma(inline, true)
+ref at(T)(T[] r, size_t i) @trusted
+ // exclude opaque structs due to https://issues.dlang.org/show_bug.cgi?id=20959
+ if (!(is(T == struct) && !is(typeof(T.sizeof))))
+{
+ static if (is(immutable T == immutable void))
+ return (cast(ubyte*) r.ptr)[i];
+ else
+ return r.ptr[i];
+}
diff --git a/libphobos/libdruntime/core/internal/array/operations.d b/libphobos/libdruntime/core/internal/array/operations.d
new file mode 100644
index 0000000..3e23314
--- /dev/null
+++ b/libphobos/libdruntime/core/internal/array/operations.d
@@ -0,0 +1,670 @@
+/**
+ This module contains support array (vector) operations
+ Copyright: Copyright Digital Mars 2000 - 2019.
+ License: Distributed under the
+ $(LINK2 http://www.boost.org/LICENSE_1_0.txt, Boost Software License 1.0).
+ (See accompanying file LICENSE)
+ Source: $(DRUNTIMESRC core/_internal/_array/_operations.d)
+*/
+module core.internal.array.operations;
+import core.internal.traits : Filter, staticMap, Unqual;
+
+version (GNU) version = GNU_OR_LDC;
+version (LDC) version = GNU_OR_LDC;
+
+/**
+ * Perform array (vector) operations and store the result in `res`. Operand
+ * types and operations are passed as template arguments in Reverse Polish
+ * Notation (RPN).
+
+ * Operands can be slices or scalar types. The element types of all
+ * slices and all scalar types must be implicitly convertible to `T`.
+ *
+ * Operations are encoded as strings, e.g. `"+"`, `"%"`, `"*="`. Unary
+ * operations are prefixed with "u", e.g. `"u-"`, `"u~"`. Only the last
+ * operation can and must be an assignment (`"="`) or op-assignment (`"op="`).
+ *
+ * All slice operands must have the same length as the result slice.
+ *
+ * Params: T[] = type of result slice
+ * Args = operand types and operations in RPN
+ * res = the slice in which to store the results
+ * args = operand values
+ *
+ * Returns: the slice containing the result
+ */
+T[] arrayOp(T : T[], Args...)(T[] res, Filter!(isType, Args) args) @trusted @nogc pure nothrow
+{
+ alias scalarizedExp = staticMap!(toElementType, Args);
+ alias check = typeCheck!(true, T, scalarizedExp); // must support all scalar ops
+
+ foreach (argsIdx, arg; typeof(args))
+ {
+ static if (is(arg == U[], U))
+ {
+ assert(res.length == args[argsIdx].length, "Mismatched array lengths for vector operation");
+ }
+ }
+
+ size_t pos;
+ static if (vectorizeable!(T[], Args))
+ {
+ alias vec = .vec!T;
+ alias load = .load!(T, vec.length);
+ alias store = .store!(T, vec.length);
+
+ // Given that there are at most as many scalars broadcast as there are
+ // operations in any `ary[] = ary[] op const op const`, it should always be
+ // worthwhile to choose vector operations.
+ if (!__ctfe && res.length >= vec.length)
+ {
+ mixin(initScalarVecs!Args);
+
+ auto n = res.length / vec.length;
+ do
+ {
+ mixin(vectorExp!Args ~ ";");
+ pos += vec.length;
+ }
+ while (--n);
+ }
+ }
+ for (; pos < res.length; ++pos)
+ mixin(scalarExp!Args ~ ";");
+
+ return res;
+}
+
+private:
+
+// SIMD helpers
+
+version (DigitalMars)
+{
+ import core.simd;
+
+ template vec(T)
+ {
+ enum regsz = 16; // SSE2
+ enum N = regsz / T.sizeof;
+ alias vec = __vector(T[N]);
+ }
+
+ void store(T, size_t N)(T* p, const scope __vector(T[N]) val)
+ {
+ pragma(inline, true);
+ alias vec = __vector(T[N]);
+
+ static if (is(T == float))
+ cast(void) __simd_sto(XMM.STOUPS, *cast(vec*) p, val);
+ else static if (is(T == double))
+ cast(void) __simd_sto(XMM.STOUPD, *cast(vec*) p, val);
+ else
+ cast(void) __simd_sto(XMM.STODQU, *cast(vec*) p, val);
+ }
+
+ const(__vector(T[N])) load(T, size_t N)(const scope T* p)
+ {
+ import core.simd;
+
+ pragma(inline, true);
+ alias vec = __vector(T[N]);
+
+ static if (is(T == float))
+ return cast(typeof(return)) __simd(XMM.LODUPS, *cast(const vec*) p);
+ else static if (is(T == double))
+ return cast(typeof(return)) __simd(XMM.LODUPD, *cast(const vec*) p);
+ else
+ return cast(typeof(return)) __simd(XMM.LODDQU, *cast(const vec*) p);
+ }
+
+ __vector(T[N]) binop(string op, T, size_t N)(const scope __vector(T[N]) a, const scope __vector(T[N]) b)
+ {
+ pragma(inline, true);
+ return mixin("a " ~ op ~ " b");
+ }
+
+ __vector(T[N]) unaop(string op, T, size_t N)(const scope __vector(T[N]) a)
+ if (op[0] == 'u')
+ {
+ pragma(inline, true);
+ return mixin(op[1 .. $] ~ "a");
+ }
+}
+
+// mixin gen
+
+/**
+Check whether operations on operand types are supported. This
+template recursively reduces the expression tree and determines
+intermediate types.
+Type checking is done here rather than in the compiler to provide more
+detailed error messages.
+
+Params:
+ fail = whether to fail (static assert) with a human-friendly error message
+ T = type of result
+ Args = operand types and operations in RPN
+Returns:
+ The resulting type of the expression
+See_Also:
+ $(LREF arrayOp)
+*/
+template typeCheck(bool fail, T, Args...)
+{
+ enum idx = staticIndexOf!(not!isType, Args);
+ static if (isUnaryOp(Args[idx]))
+ {
+ alias UT = Args[idx - 1];
+ enum op = Args[idx][1 .. $];
+ static if (is(typeof((UT a) => mixin(op ~ "cast(int) a")) RT == return))
+ alias typeCheck = typeCheck!(fail, T, Args[0 .. idx - 1], RT, Args[idx + 1 .. $]);
+ else static if (fail)
+ static assert(0, "Unary `" ~ op ~ "` not supported for type `" ~ UT.stringof ~ "`.");
+ }
+ else static if (isBinaryOp(Args[idx]))
+ {
+ alias LHT = Args[idx - 2];
+ alias RHT = Args[idx - 1];
+ enum op = Args[idx];
+ static if (is(typeof((LHT a, RHT b) => mixin("a " ~ op ~ " b")) RT == return))
+ alias typeCheck = typeCheck!(fail, T, Args[0 .. idx - 2], RT, Args[idx + 1 .. $]);
+ else static if (fail)
+ static assert(0,
+ "Binary `" ~ op ~ "` not supported for types `"
+ ~ LHT.stringof ~ "` and `" ~ RHT.stringof ~ "`.");
+ }
+ else static if (Args[idx] == "=" || isBinaryAssignOp(Args[idx]))
+ {
+ alias RHT = Args[idx - 1];
+ enum op = Args[idx];
+ static if (is(T == __vector(ET[N]), ET, size_t N))
+ {
+ // no `cast(T)` before assignment for vectors
+ static if (is(typeof((T res, RHT b) => mixin("res " ~ op ~ " b")) RT == return)
+ && // workaround https://issues.dlang.org/show_bug.cgi?id=17758
+ (op != "=" || is(Unqual!T == Unqual!RHT)))
+ alias typeCheck = typeCheck!(fail, T, Args[0 .. idx - 1], RT, Args[idx + 1 .. $]);
+ else static if (fail)
+ static assert(0,
+ "Binary op `" ~ op ~ "` not supported for types `"
+ ~ T.stringof ~ "` and `" ~ RHT.stringof ~ "`.");
+ }
+ else
+ {
+ static if (is(typeof((RHT b) => mixin("cast(T) b"))))
+ {
+ static if (is(typeof((T res, T b) => mixin("res " ~ op ~ " b")) RT == return))
+ alias typeCheck = typeCheck!(fail, T, Args[0 .. idx - 1], RT, Args[idx + 1 .. $]);
+ else static if (fail)
+ static assert(0,
+ "Binary op `" ~ op ~ "` not supported for types `"
+ ~ T.stringof ~ "` and `" ~ T.stringof ~ "`.");
+ }
+ else static if (fail)
+ static assert(0,
+ "`cast(" ~ T.stringof ~ ")` not supported for type `" ~ RHT.stringof ~ "`.");
+ }
+ }
+ else
+ static assert(0);
+}
+/// ditto
+template typeCheck(bool fail, T, ResultType)
+{
+ alias typeCheck = ResultType;
+}
+
+version (GNU_OR_LDC)
+{
+ // leave it to the auto-vectorizer
+ enum vectorizeable(E : E[], Args...) = false;
+}
+else
+{
+ // check whether arrayOp is vectorizable
+ template vectorizeable(E : E[], Args...)
+ {
+ static if (is(vec!E))
+ {
+ // type check with vector types
+ enum vectorizeable = is(typeCheck!(false, vec!E, staticMap!(toVecType, Args)));
+ }
+ else
+ enum vectorizeable = false;
+ }
+
+ version (X86_64) unittest
+ {
+ static assert(vectorizeable!(double[], const(double)[], double[], "+", "="));
+ static assert(!vectorizeable!(double[], const(ulong)[], double[], "+", "="));
+ // Vector type are (atm.) not implicitly convertible and would require
+ // lots of SIMD intrinsics. Therefor leave mixed type array ops to
+ // GDC/LDC's auto-vectorizers.
+ static assert(!vectorizeable!(double[], const(uint)[], uint, "+", "="));
+ }
+}
+
+bool isUnaryOp(scope string op) pure nothrow @safe @nogc
+{
+ return op[0] == 'u';
+}
+
+bool isBinaryOp(scope string op) pure nothrow @safe @nogc
+{
+ if (op == "^^")
+ return true;
+ if (op.length != 1)
+ return false;
+ switch (op[0])
+ {
+ case '+', '-', '*', '/', '%', '|', '&', '^':
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool isBinaryAssignOp(string op)
+{
+ return op.length >= 2 && op[$ - 1] == '=' && isBinaryOp(op[0 .. $ - 1]);
+}
+
+// Generate mixin expression to perform scalar arrayOp loop expression, assumes
+// `pos` to be the current slice index, `args` to contain operand values, and
+// `res` the target slice.
+enum scalarExp(Args...) =
+(){
+ string[] stack;
+ size_t argsIdx;
+
+ static if (is(Args[0] == U[], U))
+ alias Type = U;
+ else
+ alias Type = Args[0];
+
+ foreach (i, arg; Args)
+ {
+ static if (is(arg == T[], T))
+ stack ~= "args[" ~ argsIdx++.toString ~ "][pos]";
+ else static if (is(arg))
+ stack ~= "args[" ~ argsIdx++.toString ~ "]";
+ else static if (isUnaryOp(arg))
+ {
+ auto op = arg[0] == 'u' ? arg[1 .. $] : arg;
+ // Explicitly use the old integral promotion rules
+ // See also: https://dlang.org/changelog/2.078.0.html#fix16997
+ static if (is(Type : int))
+ stack[$ - 1] = "cast(typeof(" ~ stack[$ -1] ~ "))" ~ op ~ "cast(int)("~ stack[$ - 1] ~ ")";
+ else
+ stack[$ - 1] = op ~ stack[$ - 1];
+ }
+ else static if (arg == "=")
+ {
+ stack[$ - 1] = "res[pos] = cast(T)(" ~ stack[$ - 1] ~ ")";
+ }
+ else static if (isBinaryAssignOp(arg))
+ {
+ stack[$ - 1] = "res[pos] " ~ arg ~ " cast(T)(" ~ stack[$ - 1] ~ ")";
+ }
+ else static if (isBinaryOp(arg))
+ {
+ stack[$ - 2] = "(" ~ stack[$ - 2] ~ " " ~ arg ~ " " ~ stack[$ - 1] ~ ")";
+ stack.length -= 1;
+ }
+ else
+ assert(0, "Unexpected op " ~ arg);
+ }
+ assert(stack.length == 1);
+ return stack[0];
+}();
+
+// Generate mixin statement to perform vector loop initialization, assumes
+// `args` to contain operand values.
+enum initScalarVecs(Args...) =
+() {
+ size_t scalarsIdx, argsIdx;
+ string res;
+ foreach (arg; Args)
+ {
+ static if (is(arg == T[], T))
+ {
+ ++argsIdx;
+ }
+ else static if (is(arg))
+ res ~= "immutable vec scalar" ~ scalarsIdx++.toString ~ " = args["
+ ~ argsIdx++.toString ~ "];\n";
+ }
+ return res;
+}();
+
+// Generate mixin expression to perform vector arrayOp loop expression, assumes
+// `pos` to be the current slice index, `args` to contain operand values, and
+// `res` the target slice.
+enum vectorExp(Args...) =
+() {
+ size_t scalarsIdx, argsIdx;
+ string[] stack;
+ foreach (arg; Args)
+ {
+ static if (is(arg == T[], T))
+ stack ~= "load(&args[" ~ argsIdx++.toString ~ "][pos])";
+ else static if (is(arg))
+ {
+ ++argsIdx;
+ stack ~= "scalar" ~ scalarsIdx++.toString;
+ }
+ else static if (isUnaryOp(arg))
+ {
+ auto op = arg[0] == 'u' ? arg[1 .. $] : arg;
+ stack[$ - 1] = "unaop!\"" ~ arg ~ "\"(" ~ stack[$ - 1] ~ ")";
+ }
+ else static if (arg == "=")
+ {
+ stack[$ - 1] = "store(&res[pos], " ~ stack[$ - 1] ~ ")";
+ }
+ else static if (isBinaryAssignOp(arg))
+ {
+ stack[$ - 1] = "store(&res[pos], binop!\"" ~ arg[0 .. $ - 1]
+ ~ "\"(load(&res[pos]), " ~ stack[$ - 1] ~ "))";
+ }
+ else static if (isBinaryOp(arg))
+ {
+ stack[$ - 2] = "binop!\"" ~ arg ~ "\"(" ~ stack[$ - 2] ~ ", " ~ stack[$ - 1] ~ ")";
+ stack.length -= 1;
+ }
+ else
+ assert(0, "Unexpected op " ~ arg);
+ }
+ assert(stack.length == 1);
+ return stack[0];
+}();
+
+// other helpers
+
+enum isType(T) = true;
+enum isType(alias a) = false;
+template not(alias tmlp)
+{
+ enum not(Args...) = !tmlp!Args;
+}
+/**
+Find element in `haystack` for which `pred` is true.
+
+Params:
+ pred = the template predicate
+ haystack = elements to search
+Returns:
+ The first index for which `pred!haystack[index]` is true or -1.
+ */
+template staticIndexOf(alias pred, haystack...)
+{
+ static if (pred!(haystack[0]))
+ enum staticIndexOf = 0;
+ else
+ {
+ enum next = staticIndexOf!(pred, haystack[1 .. $]);
+ enum staticIndexOf = next == -1 ? -1 : next + 1;
+ }
+}
+/// converts slice types to their element type, preserves anything else
+alias toElementType(E : E[]) = E;
+alias toElementType(S) = S;
+alias toElementType(alias op) = op;
+/// converts slice types to their element type, preserves anything else
+alias toVecType(E : E[]) = vec!E;
+alias toVecType(S) = vec!S;
+alias toVecType(alias op) = op;
+
+string toString(size_t num)
+{
+ import core.internal.string : unsignedToTempString;
+ version (D_BetterC)
+ {
+ // Workaround for https://issues.dlang.org/show_bug.cgi?id=19268
+ if (__ctfe)
+ {
+ char[20] fixedbuf = void;
+ char[] buf = unsignedToTempString(num, fixedbuf);
+ char[] result = new char[buf.length];
+ result[] = buf[];
+ return (() @trusted => cast(string) result)();
+ }
+ else
+ {
+ // Failing at execution rather than during compilation is
+ // not good, but this is in `core.internal` so it should
+ // not be used by the unwary.
+ assert(0, __FUNCTION__ ~ " not available in -betterC except during CTFE.");
+ }
+ }
+ else
+ {
+ char[20] buf = void;
+ return unsignedToTempString(num, buf).idup;
+ }
+}
+
+bool contains(T)(const scope T[] ary, const scope T[] vals...)
+{
+ foreach (v1; ary)
+ foreach (v2; vals)
+ if (v1 == v2)
+ return true;
+ return false;
+}
+
+// tests
+
+version (CoreUnittest) template TT(T...)
+{
+ alias TT = T;
+}
+
+version (CoreUnittest) template _arrayOp(Args...)
+{
+ alias _arrayOp = arrayOp!Args;
+}
+
+unittest
+{
+ static void check(string op, TA, TB, T, size_t N)(TA a, TB b, const scope ref T[N] exp)
+ {
+ T[N] res;
+ _arrayOp!(T[], TA, TB, op, "=")(res[], a, b);
+ foreach (i; 0 .. N)
+ assert(res[i] == exp[i]);
+ }
+
+ static void check2(string unaOp, string binOp, TA, TB, T, size_t N)(TA a, TB b, const scope ref T[N] exp)
+ {
+ T[N] res;
+ _arrayOp!(T[], TA, TB, unaOp, binOp, "=")(res[], a, b);
+ foreach (i; 0 .. N)
+ assert(res[i] == exp[i]);
+ }
+
+ static void test(T, string op, size_t N = 16)(T a, T b, T exp)
+ {
+ T[N] va = a, vb = b, vexp = exp;
+
+ check!op(va[], vb[], vexp);
+ check!op(va[], b, vexp);
+ check!op(a, vb[], vexp);
+ }
+
+ static void test2(T, string unaOp, string binOp, size_t N = 16)(T a, T b, T exp)
+ {
+ T[N] va = a, vb = b, vexp = exp;
+
+ check2!(unaOp, binOp)(va[], vb[], vexp);
+ check2!(unaOp, binOp)(va[], b, vexp);
+ check2!(unaOp, binOp)(a, vb[], vexp);
+ }
+
+ alias UINTS = TT!(ubyte, ushort, uint, ulong);
+ alias INTS = TT!(byte, short, int, long);
+ alias FLOATS = TT!(float, double);
+
+ foreach (T; TT!(UINTS, INTS, FLOATS))
+ {
+ test!(T, "+")(1, 2, 3);
+ test!(T, "-")(3, 2, 1);
+ static if (__traits(compiles, { import std.math; }))
+ test!(T, "^^")(2, 3, 8);
+
+ test2!(T, "u-", "+")(3, 2, 1);
+ }
+
+ foreach (T; TT!(UINTS, INTS))
+ {
+ test!(T, "|")(1, 2, 3);
+ test!(T, "&")(3, 1, 1);
+ test!(T, "^")(3, 1, 2);
+
+ test2!(T, "u~", "+")(3, cast(T)~2, 5);
+ }
+
+ foreach (T; TT!(INTS, FLOATS))
+ {
+ test!(T, "-")(1, 2, -1);
+ test2!(T, "u-", "+")(-3, -2, -1);
+ test2!(T, "u-", "*")(-3, -2, -6);
+ }
+
+ foreach (T; TT!(UINTS, INTS, FLOATS))
+ {
+ test!(T, "*")(2, 3, 6);
+ test!(T, "/")(8, 4, 2);
+ test!(T, "%")(8, 6, 2);
+ }
+}
+
+// test handling of v op= exp
+unittest
+{
+ uint[32] c;
+ arrayOp!(uint[], uint, "+=")(c[], 2);
+ foreach (v; c)
+ assert(v == 2);
+ static if (__traits(compiles, { import std.math; }))
+ {
+ arrayOp!(uint[], uint, "^^=")(c[], 3);
+ foreach (v; c)
+ assert(v == 8);
+ }
+}
+
+// proper error message for UDT lacking certain ops
+unittest
+{
+ static assert(!is(typeof(&arrayOp!(int[4][], int[4], "+="))));
+ static assert(!is(typeof(&arrayOp!(int[4][], int[4], "u-", "="))));
+
+ static struct S
+ {
+ }
+
+ static assert(!is(typeof(&arrayOp!(S[], S, "+="))));
+ static assert(!is(typeof(&arrayOp!(S[], S[], "*", S, "+="))));
+ static struct S2
+ {
+ S2 opBinary(string op)(in S2) @nogc pure nothrow
+ {
+ return this;
+ }
+
+ ref S2 opOpAssign(string op)(in S2) @nogc pure nothrow
+ {
+ return this;
+ }
+ }
+
+ static assert(is(typeof(&arrayOp!(S2[], S2[], S2[], S2, "*", "+", "="))));
+ static assert(is(typeof(&arrayOp!(S2[], S2[], S2, "*", "+="))));
+}
+
+// test mixed type array op
+unittest
+{
+ uint[32] a = 0xF;
+ float[32] res = 2.0f;
+ arrayOp!(float[], const(uint)[], uint, "&", "*=")(res[], a[], 12);
+ foreach (v; res[])
+ assert(v == 24.0f);
+}
+
+// test mixed type array op
+unittest
+{
+ static struct S
+ {
+ float opBinary(string op)(in S) @nogc const pure nothrow
+ {
+ return 2.0f;
+ }
+ }
+
+ float[32] res = 24.0f;
+ S[32] s;
+ arrayOp!(float[], const(S)[], const(S)[], "+", "/=")(res[], s[], s[]);
+ foreach (v; res[])
+ assert(v == 12.0f);
+}
+
+// test scalar after operation argument
+unittest
+{
+ float[32] res, a = 2, b = 3;
+ float c = 4;
+ arrayOp!(float[], const(float)[], const(float)[], "*", float, "+", "=")(res[], a[], b[], c);
+ foreach (v; res[])
+ assert(v == 2 * 3 + 4);
+}
+
+unittest
+{
+ // https://issues.dlang.org/show_bug.cgi?id=17964
+ uint bug(){
+ uint[] a = [1, 2, 3, 5, 6, 7];
+ uint[] b = [1, 2, 3, 5, 6, 7];
+ a[] |= ~b[];
+ return a[1];
+ }
+ enum x = bug();
+}
+
+// https://issues.dlang.org/show_bug.cgi?id=19796
+unittest
+{
+ double[] data = [0.5];
+ double[] result;
+ result.length = data.length;
+ result[] = -data[];
+ assert(result[0] == -0.5);
+}
+
+// https://issues.dlang.org/show_bug.cgi?id=21110
+unittest
+{
+ import core.exception;
+
+ static void assertThrown(T : Throwable, E)(lazy E expression, string msg)
+ {
+ try
+ expression;
+ catch (T)
+ return;
+ assert(0, "msg");
+ }
+
+ int[] dst;
+ int[] a;
+ int[] b;
+ a.length = 3;
+ b.length = 3;
+ dst.length = 4;
+
+ void func() { dst[] = a[] + b[]; }
+ assertThrown!AssertError(func(), "Array operations with mismatched lengths must throw an error");
+}
diff --git a/libphobos/libdruntime/core/internal/array/utils.d b/libphobos/libdruntime/core/internal/array/utils.d
new file mode 100644
index 0000000..7a829a0
--- /dev/null
+++ b/libphobos/libdruntime/core/internal/array/utils.d
@@ -0,0 +1,121 @@
+/**
+ This module contains utility functions to help the implementation of the runtime hook
+
+ Copyright: Copyright Digital Mars 2000 - 2019.
+ License: Distributed under the
+ $(LINK2 http://www.boost.org/LICENSE_1_0.txt, Boost Software License 1.0).
+ (See accompanying file LICENSE)
+ Source: $(DRUNTIMESRC core/internal/_array/_utils.d)
+*/
+module core.internal.array.utils;
+
+import core.internal.traits : Parameters;
+
+private auto gcStatsPure() nothrow pure
+{
+ import core.memory : GC;
+
+ auto impureBypass = cast(GC.Stats function() pure nothrow)&GC.stats;
+ return impureBypass();
+}
+
+private ulong accumulatePure(string file, int line, string funcname, string name, ulong size) nothrow pure
+{
+ static ulong impureBypass(string file, int line, string funcname, string name, ulong size) @nogc nothrow
+ {
+ import core.internal.traits : externDFunc;
+
+ alias accumulate = externDFunc!("rt.profilegc.accumulate", void function(string file, uint line, string funcname, string type, ulong sz) @nogc nothrow);
+ accumulate(file, line, funcname, name, size);
+ return size;
+ }
+
+ auto func = cast(ulong function(string file, int line, string funcname, string name, ulong size) @nogc nothrow pure)&impureBypass;
+ return func(file, line, funcname, name, size);
+}
+
+/**
+ * TraceGC wrapper around runtime hook `Hook`.
+ * Params:
+ * T = Type of hook to report to accumulate
+ * Hook = The hook to wrap
+ * errorMessage = The error message incase `version != D_TypeInfo`
+ * file = File that called `_d_HookTraceImpl`
+ * line = Line inside of `file` that called `_d_HookTraceImpl`
+ * funcname = Function that called `_d_HookTraceImpl`
+ * parameters = Parameters that will be used to call `Hook`
+ * Bugs:
+ * This function template needs be between the compiler and a much older runtime hook that bypassed safety,
+ * purity, and throwabilty checks. To prevent breaking existing code, this function template
+ * is temporarily declared `@trusted pure` until the implementation can be brought up to modern D expectations.
+*/
+auto _d_HookTraceImpl(T, alias Hook, string errorMessage)(string file, int line, string funcname, Parameters!Hook parameters) @trusted pure
+{
+ version (D_TypeInfo)
+ {
+ pragma(inline, false);
+ string name = T.stringof;
+
+ // FIXME: use rt.tracegc.accumulator when it is accessable in the future.
+ version (tracegc)
+ {
+ import core.stdc.stdio;
+
+ printf("%sTrace file = '%.*s' line = %d function = '%.*s' type = %.*s\n",
+ Hook.stringof.ptr,
+ file.length, file.ptr,
+ line,
+ funcname.length, funcname.ptr,
+ name.length, name.ptr
+ );
+ }
+
+ ulong currentlyAllocated = gcStatsPure().allocatedInCurrentThread;
+
+ scope(exit)
+ {
+ ulong size = gcStatsPure().allocatedInCurrentThread - currentlyAllocated;
+ if (size > 0)
+ if (!accumulatePure(file, line, funcname, name, size)) {
+ // This 'if' and 'assert' is needed to force the compiler to not remove the call to
+ // `accumulatePure`. It really want to do that while optimizing as the function is
+ // `pure` and it does not influence the result of this hook.
+
+ // `accumulatePure` returns the value of `size`, which can never be zero due to the
+ // previous 'if'. So this assert will never be triggered.
+ assert(0);
+ }
+ }
+ return Hook(parameters);
+ }
+ else
+ assert(0, errorMessage);
+}
+
+/**
+ * Check if the function `F` is calleable in a `nothrow` scope.
+ * Params:
+ * F = Function that does not take any parameters
+ * Returns:
+ * if the function is callable in a `nothrow` scope.
+ */
+enum isNoThrow(alias F) = is(typeof(() nothrow { F(); }));
+
+/**
+ * Check if the type `T`'s postblit is called in nothrow, if it exist
+ * Params:
+ * T = Type to check
+ * Returns:
+ * if the postblit is callable in a `nothrow` scope, if it exist.
+ * if it does not exist, return true.
+ */
+template isPostblitNoThrow(T) {
+ static if (__traits(isStaticArray, T))
+ enum isPostblitNoThrow = isPostblitNoThrow!(typeof(T.init[0]));
+ else static if (__traits(hasMember, T, "__xpostblit") &&
+ // Bugzilla 14746: Check that it's the exact member of S.
+ __traits(isSame, T, __traits(parent, T.init.__xpostblit)))
+ enum isPostblitNoThrow = isNoThrow!(T.init.__xpostblit);
+ else
+ enum isPostblitNoThrow = true;
+}
diff --git a/libphobos/libdruntime/core/internal/arrayop.d b/libphobos/libdruntime/core/internal/arrayop.d
deleted file mode 100644
index 34531d8..0000000
--- a/libphobos/libdruntime/core/internal/arrayop.d
+++ /dev/null
@@ -1,451 +0,0 @@
-module core.internal.arrayop;
-import core.internal.traits : Filter, Unqual;
-
-version (GNU) version = GNU_OR_LDC;
-version (LDC) version = GNU_OR_LDC;
-
-/**
- * Perform array (vector) operations and store the result in `res`. Operand
- * types and operations are passed as template arguments in Reverse Polish
- * Notation (RPN).
-
- * Operands can be slices or scalar types. The unqualified element types of all
- * slices must be `T`, scalar types must be implicitly convertible to `T`.
- *
- * Operations are encoded as strings, e.g. `"+"`, `"%"`, `"*="`. Unary
- * operations are prefixed with "u", e.g. `"u-"`, `"u~"`. Only the last
- * operation can and must be an assignment (`"="`) or op-assignment (`"op="`).
- *
- * All slice operands must have the same length as the result slice.
- *
- * Params: T[] = type of result slice
- * Args = operand types and operations in RPN
- * res = the slice in which to store the results
- * args = operand values
- *
- * Returns: the slice containing the result
- */
-T[] arrayOp(T : T[], Args...)(T[] res, Filter!(isType, Args) args) @trusted @nogc pure nothrow
-{
- enum check = opsSupported!(true, T, Filter!(not!isType, Args)); // must support all scalar ops
-
- size_t pos;
- static if (vectorizeable!(T[], Args))
- {
- alias vec = .vec!T;
- alias load = .load!(T, vec.length);
- alias store = .store!(T, vec.length);
-
- // Given that there are at most as many scalars broadcast as there are
- // operations in any `ary[] = ary[] op const op const`, it should always be
- // worthwhile to choose vector operations.
- if (res.length >= vec.length)
- {
- mixin(initScalarVecs!Args);
-
- auto n = res.length / vec.length;
- do
- {
- mixin(vectorExp!Args ~ ";");
- pos += vec.length;
- }
- while (--n);
- }
- }
- for (; pos < res.length; ++pos)
- mixin(scalarExp!Args ~ ";");
-
- return res;
-}
-
-private:
-
-// SIMD helpers
-
-version (DigitalMars)
-{
- import core.simd;
-
- template vec(T)
- {
- enum regsz = 16; // SSE2
- enum N = regsz / T.sizeof;
- alias vec = __vector(T[N]);
- }
-
- void store(T, size_t N)(T* p, in __vector(T[N]) val)
- {
- pragma(inline, true);
- alias vec = __vector(T[N]);
-
- static if (is(T == float))
- cast(void) __simd_sto(XMM.STOUPS, *cast(vec*) p, val);
- else static if (is(T == double))
- cast(void) __simd_sto(XMM.STOUPD, *cast(vec*) p, val);
- else
- cast(void) __simd_sto(XMM.STODQU, *cast(vec*) p, val);
- }
-
- const(__vector(T[N])) load(T, size_t N)(in T* p)
- {
- import core.simd;
-
- pragma(inline, true);
- alias vec = __vector(T[N]);
-
- static if (is(T == float))
- return __simd(XMM.LODUPS, *cast(const vec*) p);
- else static if (is(T == double))
- return __simd(XMM.LODUPD, *cast(const vec*) p);
- else
- return __simd(XMM.LODDQU, *cast(const vec*) p);
- }
-
- __vector(T[N]) binop(string op, T, size_t N)(in __vector(T[N]) a, in __vector(T[N]) b)
- {
- pragma(inline, true);
- return mixin("a " ~ op ~ " b");
- }
-
- __vector(T[N]) unaop(string op, T, size_t N)(in __vector(T[N]) a)
- if (op[0] == 'u')
- {
- pragma(inline, true);
- return mixin(op[1 .. $] ~ "a");
- }
-}
-
-// mixin gen
-
-// Check whether operations `ops` are supported for type `T`. Fails with a human-friendly static assert message, if `fail` is true.
-template opsSupported(bool fail, T, ops...) if (ops.length > 1)
-{
- enum opsSupported = opsSupported!(fail, T, ops[0 .. $ / 2])
- && opsSupported!(fail, T, ops[$ / 2 .. $]);
-}
-
-template opsSupported(bool fail, T, string op)
-{
- static if (isUnaryOp(op))
- {
- enum opsSupported = is(typeof((T a) => mixin(op[1 .. $] ~ " a")));
- static assert(!fail || opsSupported,
- "Unary op `" ~ op[1 .. $] ~ "` not supported for element type " ~ T.stringof ~ ".");
- }
- else
- {
- enum opsSupported = is(typeof((T a, T b) => mixin("a " ~ op ~ " b")));
- static assert(!fail || opsSupported,
- "Binary op `" ~ op ~ "` not supported for element type " ~ T.stringof ~ ".");
- }
-}
-
-// check whether slices have the unqualified element type `E` and scalars are implicitly convertible to `E`
-// i.e. filter out things like float[] = float[] / size_t[]
-enum compatibleVecTypes(E, T : T[]) = is(Unqual!T == Unqual!E); // array elem types must be same (maybe add cvtpi2ps)
-enum compatibleVecTypes(E, T) = is(T : E); // scalar must be convertible to target elem type
-enum compatibleVecTypes(E, Types...) = compatibleVecTypes!(E, Types[0 .. $ / 2])
- && compatibleVecTypes!(E, Types[$ / 2 .. $]);
-
-version (GNU_OR_LDC)
-{
- // leave it to the auto-vectorizer
- enum vectorizeable(E : E[], Args...) = false;
-}
-else
-{
- // check whether arrayOp is vectorizable
- template vectorizeable(E : E[], Args...)
- {
- static if (is(vec!E))
- enum vectorizeable = opsSupported!(false, vec!E, Filter!(not!isType, Args))
- && compatibleVecTypes!(E, Filter!(isType, Args));
- else
- enum vectorizeable = false;
- }
-
- version (X86_64) unittest
- {
- static assert(vectorizeable!(double[], const(double)[], double[], "+", "="));
- static assert(!vectorizeable!(double[], const(ulong)[], double[], "+", "="));
- }
-}
-
-bool isUnaryOp(string op)
-{
- return op[0] == 'u';
-}
-
-bool isBinaryOp(string op)
-{
- if (op == "^^")
- return true;
- if (op.length != 1)
- return false;
- switch (op[0])
- {
- case '+', '-', '*', '/', '%', '|', '&', '^':
- return true;
- default:
- return false;
- }
-}
-
-bool isBinaryAssignOp(string op)
-{
- return op.length >= 2 && op[$ - 1] == '=' && isBinaryOp(op[0 .. $ - 1]);
-}
-
-// Generate mixin expression to perform scalar arrayOp loop expression, assumes
-// `pos` to be the current slice index, `args` to contain operand values, and
-// `res` the target slice.
-string scalarExp(Args...)()
-{
- string[] stack;
- size_t argsIdx;
- foreach (i, arg; Args)
- {
- static if (is(arg == T[], T))
- stack ~= "args[" ~ argsIdx++.toString ~ "][pos]";
- else static if (is(arg))
- stack ~= "args[" ~ argsIdx++.toString ~ "]";
- else static if (isUnaryOp(arg))
- {
- auto op = arg[0] == 'u' ? arg[1 .. $] : arg;
- stack[$ - 1] = op ~ stack[$ - 1];
- }
- else static if (arg == "=")
- {
- stack[$ - 1] = "res[pos] = cast(T)(" ~ stack[$ - 1] ~ ")";
- }
- else static if (isBinaryAssignOp(arg))
- {
- stack[$ - 1] = "res[pos] " ~ arg ~ " cast(T)(" ~ stack[$ - 1] ~ ")";
- }
- else static if (isBinaryOp(arg))
- {
- stack[$ - 2] = "(cast(T)(" ~ stack[$ - 2] ~ " " ~ arg ~ " " ~ stack[$ - 1] ~ "))";
- stack.length -= 1;
- }
- else
- assert(0, "Unexpected op " ~ arg);
- }
- assert(stack.length == 1);
- return stack[0];
-}
-
-// Generate mixin statement to perform vector loop initialization, assumes
-// `args` to contain operand values.
-string initScalarVecs(Args...)()
-{
- size_t scalarsIdx;
- string res;
- foreach (aidx, arg; Args)
- {
- static if (is(arg == T[], T))
- {
- }
- else static if (is(arg))
- res ~= "immutable vec scalar" ~ scalarsIdx++.toString ~ " = args["
- ~ aidx.toString ~ "];\n";
- }
- return res;
-}
-
-// Generate mixin expression to perform vector arrayOp loop expression, assumes
-// `pos` to be the current slice index, `args` to contain operand values, and
-// `res` the target slice.
-string vectorExp(Args...)()
-{
- size_t scalarsIdx, argsIdx;
- string[] stack;
- foreach (i, arg; Args)
- {
- static if (is(arg == T[], T))
- stack ~= "load(&args[" ~ argsIdx++.toString ~ "][pos])";
- else static if (is(arg))
- {
- ++argsIdx;
- stack ~= "scalar" ~ scalarsIdx++.toString;
- }
- else static if (isUnaryOp(arg))
- {
- auto op = arg[0] == 'u' ? arg[1 .. $] : arg;
- stack[$ - 1] = "unaop!\"" ~ arg ~ "\"(" ~ stack[$ - 1] ~ ")";
- }
- else static if (arg == "=")
- {
- stack[$ - 1] = "store(&res[pos], " ~ stack[$ - 1] ~ ")";
- }
- else static if (isBinaryAssignOp(arg))
- {
- stack[$ - 1] = "store(&res[pos], binop!\"" ~ arg[0 .. $ - 1]
- ~ "\"(load(&res[pos]), " ~ stack[$ - 1] ~ "))";
- }
- else static if (isBinaryOp(arg))
- {
- stack[$ - 2] = "binop!\"" ~ arg ~ "\"(" ~ stack[$ - 2] ~ ", " ~ stack[$ - 1] ~ ")";
- stack.length -= 1;
- }
- else
- assert(0, "Unexpected op " ~ arg);
- }
- assert(stack.length == 1);
- return stack[0];
-}
-
-// other helpers
-
-enum isType(T) = true;
-enum isType(alias a) = false;
-template not(alias tmlp)
-{
- enum not(Args...) = !tmlp!Args;
-}
-
-string toString(size_t num)
-{
- import core.internal.string : unsignedToTempString;
-
- char[20] buf = void;
- return unsignedToTempString(num, buf).idup;
-}
-
-bool contains(T)(in T[] ary, in T[] vals...)
-{
- foreach (v1; ary)
- foreach (v2; vals)
- if (v1 == v2)
- return true;
- return false;
-}
-
-// tests
-
-version (unittest) template TT(T...)
-{
- alias TT = T;
-}
-
-version (unittest) template _arrayOp(Args...)
-{
- alias _arrayOp = arrayOp!Args;
-}
-
-unittest
-{
- static void check(string op, TA, TB, T, size_t N)(TA a, TB b, in ref T[N] exp)
- {
- T[N] res;
- _arrayOp!(T[], TA, TB, op, "=")(res[], a, b);
- foreach (i; 0 .. N)
- assert(res[i] == exp[i]);
- }
-
- static void check2(string unaOp, string binOp, TA, TB, T, size_t N)(TA a, TB b, in ref T[N] exp)
- {
- T[N] res;
- _arrayOp!(T[], TA, TB, unaOp, binOp, "=")(res[], a, b);
- foreach (i; 0 .. N)
- assert(res[i] == exp[i]);
- }
-
- static void test(T, string op, size_t N = 16)(T a, T b, T exp)
- {
- T[N] va = a, vb = b, vexp = exp;
-
- check!op(va[], vb[], vexp);
- check!op(va[], b, vexp);
- check!op(a, vb[], vexp);
- }
-
- static void test2(T, string unaOp, string binOp, size_t N = 16)(T a, T b, T exp)
- {
- T[N] va = a, vb = b, vexp = exp;
-
- check2!(unaOp, binOp)(va[], vb[], vexp);
- check2!(unaOp, binOp)(va[], b, vexp);
- check2!(unaOp, binOp)(a, vb[], vexp);
- }
-
- alias UINTS = TT!(ubyte, ushort, uint, ulong);
- alias INTS = TT!(byte, short, int, long);
- alias FLOATS = TT!(float, double);
-
- foreach (T; TT!(UINTS, INTS, FLOATS))
- {
- test!(T, "+")(1, 2, 3);
- test!(T, "-")(3, 2, 1);
- static if (__traits(compiles, { import std.math; }))
- test!(T, "^^")(2, 3, 8);
-
- test2!(T, "u-", "+")(3, 2, 1);
- }
-
- foreach (T; TT!(UINTS, INTS))
- {
- test!(T, "|")(1, 2, 3);
- test!(T, "&")(3, 1, 1);
- test!(T, "^")(3, 1, 2);
-
- test2!(T, "u~", "+")(3, cast(T)~2, 5);
- }
-
- foreach (T; TT!(INTS, FLOATS))
- {
- test!(T, "-")(1, 2, -1);
- test2!(T, "u-", "+")(-3, -2, -1);
- test2!(T, "u-", "*")(-3, -2, -6);
- }
-
- foreach (T; TT!(UINTS, INTS, FLOATS))
- {
- test!(T, "*")(2, 3, 6);
- test!(T, "/")(8, 4, 2);
- test!(T, "%")(8, 6, 2);
- }
-}
-
-// test handling of v op= exp
-unittest
-{
- uint[32] c;
- arrayOp!(uint[], uint, "+=")(c[], 2);
- foreach (v; c)
- assert(v == 2);
- static if (__traits(compiles, { import std.math; }))
- {
- arrayOp!(uint[], uint, "^^=")(c[], 3);
- foreach (v; c)
- assert(v == 8);
- }
-}
-
-// proper error message for UDT lacking certain ops
-unittest
-{
- static assert(!is(typeof(&arrayOp!(int[4][], int[4], "+="))));
- static assert(!is(typeof(&arrayOp!(int[4][], int[4], "u-", "="))));
-
- static struct S
- {
- }
-
- static assert(!is(typeof(&arrayOp!(S[], S, "+="))));
- static assert(!is(typeof(&arrayOp!(S[], S[], "*", S, "+="))));
- static struct S2
- {
- S2 opBinary(string op)(in S2) @nogc pure nothrow
- {
- return this;
- }
-
- ref S2 opOpAssign(string op)(in S2) @nogc pure nothrow
- {
- return this;
- }
- }
-
- static assert(is(typeof(&arrayOp!(S2[], S2[], S2[], S2, "*", "+", "="))));
- static assert(is(typeof(&arrayOp!(S2[], S2[], S2, "*", "+="))));
-}
diff --git a/libphobos/libdruntime/core/internal/atomic.d b/libphobos/libdruntime/core/internal/atomic.d
new file mode 100644
index 0000000..3036ea7
--- /dev/null
+++ b/libphobos/libdruntime/core/internal/atomic.d
@@ -0,0 +1,1141 @@
+/**
+* The core.internal.atomic module comtains the low-level atomic features available in hardware.
+* This module may be a routing layer for compiler intrinsics.
+*
+* Copyright: Copyright Manu Evans 2019.
+* License: $(LINK2 http://www.boost.org/LICENSE_1_0.txt, Boost License 1.0)
+* Authors: Sean Kelly, Alex Rønne Petersen, Manu Evans
+* Source: $(DRUNTIMESRC core/internal/_atomic.d)
+*/
+
+module core.internal.atomic;
+
+import core.atomic : MemoryOrder, has128BitCAS;
+
+version (DigitalMars)
+{
+ private
+ {
+ enum : int
+ {
+ AX, BX, CX, DX, DI, SI, R8, R9
+ }
+
+ immutable string[4][8] registerNames = [
+ [ "AL", "AX", "EAX", "RAX" ],
+ [ "BL", "BX", "EBX", "RBX" ],
+ [ "CL", "CX", "ECX", "RCX" ],
+ [ "DL", "DX", "EDX", "RDX" ],
+ [ "DIL", "DI", "EDI", "RDI" ],
+ [ "SIL", "SI", "ESI", "RSI" ],
+ [ "R8B", "R8W", "R8D", "R8" ],
+ [ "R9B", "R9W", "R9D", "R9" ],
+ ];
+
+ template RegIndex(T)
+ {
+ static if (T.sizeof == 1)
+ enum RegIndex = 0;
+ else static if (T.sizeof == 2)
+ enum RegIndex = 1;
+ else static if (T.sizeof == 4)
+ enum RegIndex = 2;
+ else static if (T.sizeof == 8)
+ enum RegIndex = 3;
+ else
+ static assert(false, "Invalid type");
+ }
+
+ enum SizedReg(int reg, T = size_t) = registerNames[reg][RegIndex!T];
+ }
+
+ inout(T) atomicLoad(MemoryOrder order = MemoryOrder.seq, T)(inout(T)* src) pure nothrow @nogc @trusted
+ if (CanCAS!T)
+ {
+ static assert(order != MemoryOrder.rel, "invalid MemoryOrder for atomicLoad()");
+
+ static if (T.sizeof == size_t.sizeof * 2)
+ {
+ version (D_InlineAsm_X86)
+ {
+ asm pure nothrow @nogc @trusted
+ {
+ push EDI;
+ push EBX;
+ mov EBX, 0;
+ mov ECX, 0;
+ mov EAX, 0;
+ mov EDX, 0;
+ mov EDI, src;
+ lock; cmpxchg8b [EDI];
+ pop EBX;
+ pop EDI;
+ }
+ }
+ else version (D_InlineAsm_X86_64)
+ {
+ version (Windows)
+ {
+ static if (RegisterReturn!T)
+ {
+ enum SrcPtr = SizedReg!CX;
+ enum RetPtr = null;
+ }
+ else
+ {
+ enum SrcPtr = SizedReg!DX;
+ enum RetPtr = SizedReg!CX;
+ }
+
+ mixin (simpleFormat(q{
+ asm pure nothrow @nogc @trusted
+ {
+ naked;
+ push RBX;
+ mov R8, %0;
+ ?1 mov R9, %1;
+ mov RBX, 0;
+ mov RCX, 0;
+ mov RAX, 0;
+ mov RDX, 0;
+ lock; cmpxchg16b [R8];
+ ?1 mov [R9], RAX;
+ ?1 mov 8[R9], RDX;
+ pop RBX;
+ ret;
+ }
+ }, SrcPtr, RetPtr));
+ }
+ else
+ {
+ asm pure nothrow @nogc @trusted
+ {
+ naked;
+ push RBX;
+ mov RBX, 0;
+ mov RCX, 0;
+ mov RAX, 0;
+ mov RDX, 0;
+ lock; cmpxchg16b [RDI];
+ pop RBX;
+ ret;
+ }
+ }
+ }
+ }
+ else static if (needsLoadBarrier!order)
+ {
+ version (D_InlineAsm_X86)
+ {
+ enum SrcReg = SizedReg!CX;
+ enum ZeroReg = SizedReg!(DX, T);
+ enum ResReg = SizedReg!(AX, T);
+
+ mixin (simpleFormat(q{
+ asm pure nothrow @nogc @trusted
+ {
+ mov %1, 0;
+ mov %2, 0;
+ mov %0, src;
+ lock; cmpxchg [%0], %1;
+ }
+ }, SrcReg, ZeroReg, ResReg));
+ }
+ else version (D_InlineAsm_X86_64)
+ {
+ version (Windows)
+ enum SrcReg = SizedReg!CX;
+ else
+ enum SrcReg = SizedReg!DI;
+ enum ZeroReg = SizedReg!(DX, T);
+ enum ResReg = SizedReg!(AX, T);
+
+ mixin (simpleFormat(q{
+ asm pure nothrow @nogc @trusted
+ {
+ naked;
+ mov %1, 0;
+ mov %2, 0;
+ lock; cmpxchg [%0], %1;
+ ret;
+ }
+ }, SrcReg, ZeroReg, ResReg));
+ }
+ }
+ else
+ return *src;
+ }
+
+ void atomicStore(MemoryOrder order = MemoryOrder.seq, T)(T* dest, T value) pure nothrow @nogc @trusted
+ if (CanCAS!T)
+ {
+ static assert(order != MemoryOrder.acq, "Invalid MemoryOrder for atomicStore()");
+
+ static if (T.sizeof == size_t.sizeof * 2)
+ {
+ version (D_InlineAsm_X86)
+ {
+ asm pure nothrow @nogc @trusted
+ {
+ push EDI;
+ push EBX;
+ lea EDI, value;
+ mov EBX, [EDI];
+ mov ECX, 4[EDI];
+ mov EDI, dest;
+ mov EAX, [EDI];
+ mov EDX, 4[EDI];
+ L1: lock; cmpxchg8b [EDI];
+ jne L1;
+ pop EBX;
+ pop EDI;
+ }
+ }
+ else version (D_InlineAsm_X86_64)
+ {
+ version (Windows)
+ {
+ asm pure nothrow @nogc @trusted
+ {
+ naked;
+ push RBX;
+ mov R8, RDX;
+ mov RAX, [RDX];
+ mov RDX, 8[RDX];
+ mov RBX, [RCX];
+ mov RCX, 8[RCX];
+ L1: lock; cmpxchg16b [R8];
+ jne L1;
+ pop RBX;
+ ret;
+ }
+ }
+ else
+ {
+ asm pure nothrow @nogc @trusted
+ {
+ naked;
+ push RBX;
+ mov RBX, RDI;
+ mov RCX, RSI;
+ mov RDI, RDX;
+ mov RAX, [RDX];
+ mov RDX, 8[RDX];
+ L1: lock; cmpxchg16b [RDI];
+ jne L1;
+ pop RBX;
+ ret;
+ }
+ }
+ }
+ }
+ else static if (needsStoreBarrier!order)
+ atomicExchange!(order, false)(dest, value);
+ else
+ *dest = value;
+ }
+
+ T atomicFetchAdd(MemoryOrder order = MemoryOrder.seq, bool result = true, T)(T* dest, T value) pure nothrow @nogc @trusted
+ if (is(T : ulong))
+ {
+ version (D_InlineAsm_X86)
+ {
+ static assert(T.sizeof <= 4, "64bit atomicFetchAdd not supported on 32bit target." );
+
+ enum DestReg = SizedReg!DX;
+ enum ValReg = SizedReg!(AX, T);
+
+ mixin (simpleFormat(q{
+ asm pure nothrow @nogc @trusted
+ {
+ mov %1, value;
+ mov %0, dest;
+ lock; xadd[%0], %1;
+ }
+ }, DestReg, ValReg));
+ }
+ else version (D_InlineAsm_X86_64)
+ {
+ version (Windows)
+ {
+ enum DestReg = SizedReg!DX;
+ enum ValReg = SizedReg!(CX, T);
+ }
+ else
+ {
+ enum DestReg = SizedReg!SI;
+ enum ValReg = SizedReg!(DI, T);
+ }
+ enum ResReg = result ? SizedReg!(AX, T) : null;
+
+ mixin (simpleFormat(q{
+ asm pure nothrow @nogc @trusted
+ {
+ naked;
+ lock; xadd[%0], %1;
+ ?2 mov %2, %1;
+ ret;
+ }
+ }, DestReg, ValReg, ResReg));
+ }
+ else
+ static assert (false, "Unsupported architecture.");
+ }
+
+ T atomicFetchSub(MemoryOrder order = MemoryOrder.seq, bool result = true, T)(T* dest, T value) pure nothrow @nogc @trusted
+ if (is(T : ulong))
+ {
+ return atomicFetchAdd(dest, cast(T)-cast(IntOrLong!T)value);
+ }
+
+ T atomicExchange(MemoryOrder order = MemoryOrder.seq, bool result = true, T)(T* dest, T value) pure nothrow @nogc @trusted
+ if (CanCAS!T)
+ {
+ version (D_InlineAsm_X86)
+ {
+ static assert(T.sizeof <= 4, "64bit atomicExchange not supported on 32bit target." );
+
+ enum DestReg = SizedReg!CX;
+ enum ValReg = SizedReg!(AX, T);
+
+ mixin (simpleFormat(q{
+ asm pure nothrow @nogc @trusted
+ {
+ mov %1, value;
+ mov %0, dest;
+ xchg [%0], %1;
+ }
+ }, DestReg, ValReg));
+ }
+ else version (D_InlineAsm_X86_64)
+ {
+ version (Windows)
+ {
+ enum DestReg = SizedReg!DX;
+ enum ValReg = SizedReg!(CX, T);
+ }
+ else
+ {
+ enum DestReg = SizedReg!SI;
+ enum ValReg = SizedReg!(DI, T);
+ }
+ enum ResReg = result ? SizedReg!(AX, T) : null;
+
+ mixin (simpleFormat(q{
+ asm pure nothrow @nogc @trusted
+ {
+ naked;
+ xchg [%0], %1;
+ ?2 mov %2, %1;
+ ret;
+ }
+ }, DestReg, ValReg, ResReg));
+ }
+ else
+ static assert (false, "Unsupported architecture.");
+ }
+
+ alias atomicCompareExchangeWeak = atomicCompareExchangeStrong;
+
+ bool atomicCompareExchangeStrong(MemoryOrder succ = MemoryOrder.seq, MemoryOrder fail = MemoryOrder.seq, T)(T* dest, T* compare, T value) pure nothrow @nogc @trusted
+ if (CanCAS!T)
+ {
+ version (D_InlineAsm_X86)
+ {
+ static if (T.sizeof <= 4)
+ {
+ enum DestAddr = SizedReg!CX;
+ enum CmpAddr = SizedReg!DI;
+ enum Val = SizedReg!(DX, T);
+ enum Cmp = SizedReg!(AX, T);
+
+ mixin (simpleFormat(q{
+ asm pure nothrow @nogc @trusted
+ {
+ push %1;
+ mov %2, value;
+ mov %1, compare;
+ mov %3, [%1];
+ mov %0, dest;
+ lock; cmpxchg [%0], %2;
+ mov [%1], %3;
+ setz AL;
+ pop %1;
+ }
+ }, DestAddr, CmpAddr, Val, Cmp));
+ }
+ else static if (T.sizeof == 8)
+ {
+ asm pure nothrow @nogc @trusted
+ {
+ push EDI;
+ push EBX;
+ lea EDI, value;
+ mov EBX, [EDI];
+ mov ECX, 4[EDI];
+ mov EDI, compare;
+ mov EAX, [EDI];
+ mov EDX, 4[EDI];
+ mov EDI, dest;
+ lock; cmpxchg8b [EDI];
+ mov EDI, compare;
+ mov [EDI], EAX;
+ mov 4[EDI], EDX;
+ setz AL;
+ pop EBX;
+ pop EDI;
+ }
+ }
+ else
+ static assert(T.sizeof <= 8, "128bit atomicCompareExchangeStrong not supported on 32bit target." );
+ }
+ else version (D_InlineAsm_X86_64)
+ {
+ static if (T.sizeof <= 8)
+ {
+ version (Windows)
+ {
+ enum DestAddr = SizedReg!R8;
+ enum CmpAddr = SizedReg!DX;
+ enum Val = SizedReg!(CX, T);
+ }
+ else
+ {
+ enum DestAddr = SizedReg!DX;
+ enum CmpAddr = SizedReg!SI;
+ enum Val = SizedReg!(DI, T);
+ }
+ enum Res = SizedReg!(AX, T);
+
+ mixin (simpleFormat(q{
+ asm pure nothrow @nogc @trusted
+ {
+ naked;
+ mov %3, [%1];
+ lock; cmpxchg [%0], %2;
+ jne compare_fail;
+ mov AL, 1;
+ ret;
+ compare_fail:
+ mov [%1], %3;
+ xor AL, AL;
+ ret;
+ }
+ }, DestAddr, CmpAddr, Val, Res));
+ }
+ else
+ {
+ version (Windows)
+ {
+ asm pure nothrow @nogc @trusted
+ {
+ naked;
+ push RBX;
+ mov R9, RDX;
+ mov RAX, [RDX];
+ mov RDX, 8[RDX];
+ mov RBX, [RCX];
+ mov RCX, 8[RCX];
+ lock; cmpxchg16b [R8];
+ pop RBX;
+ jne compare_fail;
+ mov AL, 1;
+ ret;
+ compare_fail:
+ mov [R9], RAX;
+ mov 8[R9], RDX;
+ xor AL, AL;
+ ret;
+ }
+ }
+ else
+ {
+ asm pure nothrow @nogc @trusted
+ {
+ naked;
+ push RBX;
+ mov R8, RCX;
+ mov R9, RDX;
+ mov RAX, [RDX];
+ mov RDX, 8[RDX];
+ mov RBX, RDI;
+ mov RCX, RSI;
+ lock; cmpxchg16b [R8];
+ pop RBX;
+ jne compare_fail;
+ mov AL, 1;
+ ret;
+ compare_fail:
+ mov [R9], RAX;
+ mov 8[R9], RDX;
+ xor AL, AL;
+ ret;
+ }
+ }
+ }
+ }
+ else
+ static assert (false, "Unsupported architecture.");
+ }
+
+ alias atomicCompareExchangeWeakNoResult = atomicCompareExchangeStrongNoResult;
+
+ bool atomicCompareExchangeStrongNoResult(MemoryOrder succ = MemoryOrder.seq, MemoryOrder fail = MemoryOrder.seq, T)(T* dest, const T compare, T value) pure nothrow @nogc @trusted
+ if (CanCAS!T)
+ {
+ version (D_InlineAsm_X86)
+ {
+ static if (T.sizeof <= 4)
+ {
+ enum DestAddr = SizedReg!CX;
+ enum Cmp = SizedReg!(AX, T);
+ enum Val = SizedReg!(DX, T);
+
+ mixin (simpleFormat(q{
+ asm pure nothrow @nogc @trusted
+ {
+ mov %2, value;
+ mov %1, compare;
+ mov %0, dest;
+ lock; cmpxchg [%0], %2;
+ setz AL;
+ }
+ }, DestAddr, Cmp, Val));
+ }
+ else static if (T.sizeof == 8)
+ {
+ asm pure nothrow @nogc @trusted
+ {
+ push EDI;
+ push EBX;
+ lea EDI, value;
+ mov EBX, [EDI];
+ mov ECX, 4[EDI];
+ lea EDI, compare;
+ mov EAX, [EDI];
+ mov EDX, 4[EDI];
+ mov EDI, dest;
+ lock; cmpxchg8b [EDI];
+ setz AL;
+ pop EBX;
+ pop EDI;
+ }
+ }
+ else
+ static assert(T.sizeof <= 8, "128bit atomicCompareExchangeStrong not supported on 32bit target." );
+ }
+ else version (D_InlineAsm_X86_64)
+ {
+ static if (T.sizeof <= 8)
+ {
+ version (Windows)
+ {
+ enum DestAddr = SizedReg!R8;
+ enum Cmp = SizedReg!(DX, T);
+ enum Val = SizedReg!(CX, T);
+ }
+ else
+ {
+ enum DestAddr = SizedReg!DX;
+ enum Cmp = SizedReg!(SI, T);
+ enum Val = SizedReg!(DI, T);
+ }
+ enum AXReg = SizedReg!(AX, T);
+
+ mixin (simpleFormat(q{
+ asm pure nothrow @nogc @trusted
+ {
+ naked;
+ mov %3, %1;
+ lock; cmpxchg [%0], %2;
+ setz AL;
+ ret;
+ }
+ }, DestAddr, Cmp, Val, AXReg));
+ }
+ else
+ {
+ version (Windows)
+ {
+ asm pure nothrow @nogc @trusted
+ {
+ naked;
+ push RBX;
+ mov RAX, [RDX];
+ mov RDX, 8[RDX];
+ mov RBX, [RCX];
+ mov RCX, 8[RCX];
+ lock; cmpxchg16b [R8];
+ setz AL;
+ pop RBX;
+ ret;
+ }
+ }
+ else
+ {
+ asm pure nothrow @nogc @trusted
+ {
+ naked;
+ push RBX;
+ mov RAX, RDX;
+ mov RDX, RCX;
+ mov RBX, RDI;
+ mov RCX, RSI;
+ lock; cmpxchg16b [R8];
+ setz AL;
+ pop RBX;
+ ret;
+ }
+ }
+ }
+ }
+ else
+ static assert (false, "Unsupported architecture.");
+ }
+
+ void atomicFence(MemoryOrder order = MemoryOrder.seq)() pure nothrow @nogc @trusted
+ {
+ // TODO: `mfence` should only be required for seq_cst operations, but this depends on
+ // the compiler's backend knowledge to not reorder code inappropriately,
+ // so we'll apply it conservatively.
+ static if (order != MemoryOrder.raw)
+ {
+ version (D_InlineAsm_X86)
+ {
+ import core.cpuid;
+
+ // TODO: review this implementation; it seems way overly complicated
+ asm pure nothrow @nogc @trusted
+ {
+ naked;
+
+ call sse2;
+ test AL, AL;
+ jne Lcpuid;
+
+ // Fast path: We have SSE2, so just use mfence.
+ mfence;
+ jmp Lend;
+
+ Lcpuid:
+
+ // Slow path: We use cpuid to serialize. This is
+ // significantly slower than mfence, but is the
+ // only serialization facility we have available
+ // on older non-SSE2 chips.
+ push EBX;
+
+ mov EAX, 0;
+ cpuid;
+
+ pop EBX;
+
+ Lend:
+
+ ret;
+ }
+ }
+ else version (D_InlineAsm_X86_64)
+ {
+ asm pure nothrow @nogc @trusted
+ {
+ naked;
+ mfence;
+ ret;
+ }
+ }
+ else
+ static assert (false, "Unsupported architecture.");
+ }
+ }
+
+ void pause() pure nothrow @nogc @trusted
+ {
+ version (D_InlineAsm_X86)
+ {
+ asm pure nothrow @nogc @trusted
+ {
+ naked;
+ rep; nop;
+ ret;
+ }
+ }
+ else version (D_InlineAsm_X86_64)
+ {
+ asm pure nothrow @nogc @trusted
+ {
+ naked;
+ // pause; // TODO: DMD should add this opcode to its inline asm
+ rep; nop;
+ ret;
+ }
+ }
+ else
+ {
+ // ARM should `yield`
+ // other architectures? otherwise some sort of nop...
+ }
+ }
+}
+else version (GNU)
+{
+ import gcc.builtins;
+ import gcc.config;
+
+ inout(T) atomicLoad(MemoryOrder order = MemoryOrder.seq, T)(inout(T)* src) pure nothrow @nogc @trusted
+ if (CanCAS!T)
+ {
+ static assert(order != MemoryOrder.rel, "invalid MemoryOrder for atomicLoad()");
+
+ static if (GNU_Have_Atomics || GNU_Have_LibAtomic)
+ {
+ static if (T.sizeof == ubyte.sizeof)
+ {
+ ubyte value = __atomic_load_1(cast(shared)src, order);
+ return *cast(typeof(return)*)&value;
+ }
+ else static if (T.sizeof == ushort.sizeof)
+ {
+ ushort value = __atomic_load_2(cast(shared)src, order);
+ return *cast(typeof(return)*)&value;
+ }
+ else static if (T.sizeof == uint.sizeof)
+ {
+ uint value = __atomic_load_4(cast(shared)src, order);
+ return *cast(typeof(return)*)&value;
+ }
+ else static if (T.sizeof == ulong.sizeof && GNU_Have_64Bit_Atomics)
+ {
+ ulong value = __atomic_load_8(cast(shared)src, order);
+ return *cast(typeof(return)*)&value;
+ }
+ else static if (GNU_Have_LibAtomic)
+ {
+ T value;
+ __atomic_load(T.sizeof, cast(shared)src, &value, order);
+ return *cast(typeof(return)*)&value;
+ }
+ else
+ static assert(0, "Invalid template type specified.");
+ }
+ else
+ {
+ getAtomicMutex.lock();
+ scope(exit) getAtomicMutex.unlock();
+ return *cast(typeof(return)*)&src;
+ }
+ }
+
+ void atomicStore(MemoryOrder order = MemoryOrder.seq, T)(T* dest, T value) pure nothrow @nogc @trusted
+ if (CanCAS!T)
+ {
+ static assert(order != MemoryOrder.acq, "Invalid MemoryOrder for atomicStore()");
+
+ static if (GNU_Have_Atomics || GNU_Have_LibAtomic)
+ {
+ static if (T.sizeof == ubyte.sizeof)
+ __atomic_store_1(cast(shared)dest, *cast(ubyte*)&value, order);
+ else static if (T.sizeof == ushort.sizeof)
+ __atomic_store_2(cast(shared)dest, *cast(ushort*)&value, order);
+ else static if (T.sizeof == uint.sizeof)
+ __atomic_store_4(cast(shared)dest, *cast(uint*)&value, order);
+ else static if (T.sizeof == ulong.sizeof && GNU_Have_64Bit_Atomics)
+ __atomic_store_8(cast(shared)dest, *cast(ulong*)&value, order);
+ else static if (GNU_Have_LibAtomic)
+ __atomic_store(T.sizeof, cast(shared)dest, cast(void*)&value, order);
+ else
+ static assert(0, "Invalid template type specified.");
+ }
+ else
+ {
+ getAtomicMutex.lock();
+ *dest = value;
+ getAtomicMutex.unlock();
+ }
+ }
+
+ T atomicFetchAdd(MemoryOrder order = MemoryOrder.seq, bool result = true, T)(T* dest, T value) pure nothrow @nogc @trusted
+ if (is(T : ulong))
+ {
+ static if (GNU_Have_Atomics || GNU_Have_LibAtomic)
+ {
+ static if (T.sizeof == ubyte.sizeof)
+ return __atomic_fetch_add_1(cast(shared)dest, value, order);
+ else static if (T.sizeof == ushort.sizeof)
+ return __atomic_fetch_add_2(cast(shared)dest, value, order);
+ else static if (T.sizeof == uint.sizeof)
+ return __atomic_fetch_add_4(cast(shared)dest, value, order);
+ else static if (T.sizeof == ulong.sizeof && GNU_Have_64Bit_Atomics)
+ return __atomic_fetch_add_8(cast(shared)dest, value, order);
+ else static if (GNU_Have_LibAtomic)
+ return __atomic_fetch_add(T.sizeof, cast(shared)dest, cast(void*)&value, order);
+ else
+ static assert(0, "Invalid template type specified.");
+ }
+ else
+ {
+ getAtomicMutex.lock();
+ scope(exit) getAtomicMutex.unlock();
+ T tmp = *dest;
+ *dest += value;
+ return tmp;
+ }
+ }
+
+ T atomicFetchSub(MemoryOrder order = MemoryOrder.seq, bool result = true, T)(T* dest, T value) pure nothrow @nogc @trusted
+ if (is(T : ulong))
+ {
+ static if (GNU_Have_Atomics || GNU_Have_LibAtomic)
+ {
+ static if (T.sizeof == ubyte.sizeof)
+ return __atomic_fetch_sub_1(cast(shared)dest, value, order);
+ else static if (T.sizeof == ushort.sizeof)
+ return __atomic_fetch_sub_2(cast(shared)dest, value, order);
+ else static if (T.sizeof == uint.sizeof)
+ return __atomic_fetch_sub_4(cast(shared)dest, value, order);
+ else static if (T.sizeof == ulong.sizeof && GNU_Have_64Bit_Atomics)
+ return __atomic_fetch_sub_8(cast(shared)dest, value, order);
+ else static if (GNU_Have_LibAtomic)
+ return __atomic_fetch_sub(T.sizeof, cast(shared)dest, cast(void*)&value, order);
+ else
+ static assert(0, "Invalid template type specified.");
+ }
+ else
+ {
+ getAtomicMutex.lock();
+ scope(exit) getAtomicMutex.unlock();
+ T tmp = *dest;
+ *dest -= value;
+ return tmp;
+ }
+ }
+
+ T atomicExchange(MemoryOrder order = MemoryOrder.seq, bool result = true, T)(T* dest, T value) pure nothrow @nogc @trusted
+ if (is(T : ulong) || is(T == class) || is(T == interface) || is(T U : U*))
+ {
+ static if (GNU_Have_Atomics || GNU_Have_LibAtomic)
+ {
+ static if (T.sizeof == byte.sizeof)
+ {
+ ubyte res = __atomic_exchange_1(cast(shared)dest, *cast(ubyte*)&value, order);
+ return *cast(typeof(return)*)&res;
+ }
+ else static if (T.sizeof == short.sizeof)
+ {
+ ushort res = __atomic_exchange_2(cast(shared)dest, *cast(ushort*)&value, order);
+ return *cast(typeof(return)*)&res;
+ }
+ else static if (T.sizeof == int.sizeof)
+ {
+ uint res = __atomic_exchange_4(cast(shared)dest, *cast(uint*)&value, order);
+ return *cast(typeof(return)*)&res;
+ }
+ else static if (T.sizeof == long.sizeof && GNU_Have_64Bit_Atomics)
+ {
+ ulong res = __atomic_exchange_8(cast(shared)dest, *cast(ulong*)&value, order);
+ return *cast(typeof(return)*)&res;
+ }
+ else static if (GNU_Have_LibAtomic)
+ {
+ T res = void;
+ __atomic_exchange(T.sizeof, cast(shared)dest, cast(void*)&value, &res, order);
+ return res;
+ }
+ else
+ static assert(0, "Invalid template type specified.");
+ }
+ else
+ {
+ getAtomicMutex.lock();
+ scope(exit) getAtomicMutex.unlock();
+
+ T res = *dest;
+ *dest = value;
+ return res;
+ }
+ }
+
+ bool atomicCompareExchangeWeak(MemoryOrder succ = MemoryOrder.seq, MemoryOrder fail = MemoryOrder.seq, T)(T* dest, T* compare, T value) pure nothrow @nogc @trusted
+ if (CanCAS!T)
+ {
+ return atomicCompareExchangeImpl!(succ, fail, true)(dest, compare, value);
+ }
+
+ bool atomicCompareExchangeStrong(MemoryOrder succ = MemoryOrder.seq, MemoryOrder fail = MemoryOrder.seq, T)(T* dest, T* compare, T value) pure nothrow @nogc @trusted
+ if (CanCAS!T)
+ {
+ return atomicCompareExchangeImpl!(succ, fail, false)(dest, compare, value);
+ }
+
+ bool atomicCompareExchangeStrongNoResult(MemoryOrder succ = MemoryOrder.seq, MemoryOrder fail = MemoryOrder.seq, T)(T* dest, const T compare, T value) pure nothrow @nogc @trusted
+ if (CanCAS!T)
+ {
+ return atomicCompareExchangeImpl!(succ, fail, false)(dest, cast(T*)&compare, value);
+ }
+
+ bool atomicCompareExchangeWeakNoResult(MemoryOrder succ = MemoryOrder.seq, MemoryOrder fail = MemoryOrder.seq, T)(T* dest, const T compare, T value) pure nothrow @nogc @trusted
+ if (CanCAS!T)
+ {
+ return atomicCompareExchangeImpl!(succ, fail, true)(dest, cast(T*)&compare, value);
+ }
+
+ private bool atomicCompareExchangeImpl(MemoryOrder succ = MemoryOrder.seq, MemoryOrder fail = MemoryOrder.seq, bool weak, T)(T* dest, T* compare, T value) pure nothrow @nogc @trusted
+ if (CanCAS!T)
+ {
+ bool res = void;
+
+ static if (GNU_Have_Atomics || GNU_Have_LibAtomic)
+ {
+ static if (T.sizeof == byte.sizeof)
+ res = __atomic_compare_exchange_1(cast(shared)dest, compare, *cast(ubyte*)&value,
+ weak, succ, fail);
+ else static if (T.sizeof == short.sizeof)
+ res = __atomic_compare_exchange_2(cast(shared)dest, compare, *cast(ushort*)&value,
+ weak, succ, fail);
+ else static if (T.sizeof == int.sizeof)
+ res = __atomic_compare_exchange_4(cast(shared)dest, compare, *cast(uint*)&value,
+ weak, succ, fail);
+ else static if (T.sizeof == long.sizeof && GNU_Have_64Bit_Atomics)
+ res = __atomic_compare_exchange_8(cast(shared)dest, compare, *cast(ulong*)&value,
+ weak, succ, fail);
+ else static if (GNU_Have_LibAtomic)
+ res = __atomic_compare_exchange(T.sizeof, cast(shared)dest, compare, cast(void*)&value,
+ succ, fail);
+ else
+ static assert(0, "Invalid template type specified.");
+ }
+ else
+ {
+ static if (T.sizeof == byte.sizeof)
+ alias U = byte;
+ else static if (T.sizeof == short.sizeof)
+ alias U = short;
+ else static if (T.sizeof == int.sizeof)
+ alias U = int;
+ else static if (T.sizeof == long.sizeof)
+ alias U = long;
+ else
+ static assert(0, "Invalid template type specified.");
+
+ getAtomicMutex.lock();
+ scope(exit) getAtomicMutex.unlock();
+
+ if (*cast(U*)dest == *cast(U*)&compare)
+ {
+ *dest = value;
+ res = true;
+ }
+ else
+ {
+ *compare = *dest;
+ res = false;
+ }
+ }
+
+ return res;
+ }
+
+ void atomicFence(MemoryOrder order = MemoryOrder.seq)() pure nothrow @nogc @trusted
+ {
+ static if (GNU_Have_Atomics || GNU_Have_LibAtomic)
+ __atomic_thread_fence(order);
+ else
+ {
+ getAtomicMutex.lock();
+ getAtomicMutex.unlock();
+ }
+ }
+
+ void pause() pure nothrow @nogc @trusted
+ {
+ version (X86)
+ {
+ __builtin_ia32_pause();
+ }
+ else version (X86_64)
+ {
+ __builtin_ia32_pause();
+ }
+ else
+ {
+ // Other architectures? Some sort of nop or barrier.
+ }
+ }
+
+ static if (!GNU_Have_Atomics && !GNU_Have_LibAtomic)
+ {
+ // Use system mutex for atomics, faking the purity of the functions so
+ // that they can be used in pure/nothrow/@safe code.
+ extern (C) private pure @trusted @nogc nothrow
+ {
+ static if (GNU_Thread_Model == ThreadModel.Posix)
+ {
+ import core.sys.posix.pthread;
+ alias atomicMutexHandle = pthread_mutex_t;
+
+ pragma(mangle, "pthread_mutex_init") int fakePureMutexInit(pthread_mutex_t*, pthread_mutexattr_t*);
+ pragma(mangle, "pthread_mutex_lock") int fakePureMutexLock(pthread_mutex_t*);
+ pragma(mangle, "pthread_mutex_unlock") int fakePureMutexUnlock(pthread_mutex_t*);
+ }
+ else static if (GNU_Thread_Model == ThreadModel.Win32)
+ {
+ import core.sys.windows.winbase;
+ alias atomicMutexHandle = CRITICAL_SECTION;
+
+ pragma(mangle, "InitializeCriticalSection") int fakePureMutexInit(CRITICAL_SECTION*);
+ pragma(mangle, "EnterCriticalSection") void fakePureMutexLock(CRITICAL_SECTION*);
+ pragma(mangle, "LeaveCriticalSection") int fakePureMutexUnlock(CRITICAL_SECTION*);
+ }
+ else
+ {
+ alias atomicMutexHandle = int;
+ }
+ }
+
+ // Implements lock/unlock operations.
+ private struct AtomicMutex
+ {
+ int lock() pure @trusted @nogc nothrow
+ {
+ static if (GNU_Thread_Model == ThreadModel.Posix)
+ {
+ if (!_inited)
+ {
+ fakePureMutexInit(&_handle, null);
+ _inited = true;
+ }
+ return fakePureMutexLock(&_handle);
+ }
+ else
+ {
+ static if (GNU_Thread_Model == ThreadModel.Win32)
+ {
+ if (!_inited)
+ {
+ fakePureMutexInit(&_handle);
+ _inited = true;
+ }
+ fakePureMutexLock(&_handle);
+ }
+ return 0;
+ }
+ }
+
+ int unlock() pure @trusted @nogc nothrow
+ {
+ static if (GNU_Thread_Model == ThreadModel.Posix)
+ return fakePureMutexUnlock(&_handle);
+ else
+ {
+ static if (GNU_Thread_Model == ThreadModel.Win32)
+ fakePureMutexUnlock(&_handle);
+ return 0;
+ }
+ }
+
+ private:
+ atomicMutexHandle _handle;
+ bool _inited;
+ }
+
+ // Internal static mutex reference.
+ private AtomicMutex* _getAtomicMutex() @trusted @nogc nothrow
+ {
+ __gshared static AtomicMutex mutex;
+ return &mutex;
+ }
+
+ // Pure alias for _getAtomicMutex.
+ pragma(mangle, _getAtomicMutex.mangleof)
+ private AtomicMutex* getAtomicMutex() pure @trusted @nogc nothrow @property;
+ }
+}
+
+private:
+
+version (Windows)
+{
+ enum RegisterReturn(T) = is(T : U[], U) || is(T : R delegate(A), R, A...);
+}
+
+enum CanCAS(T) = is(T : ulong) ||
+ is(T == class) ||
+ is(T == interface) ||
+ is(T : U*, U) ||
+ is(T : U[], U) ||
+ is(T : R delegate(A), R, A...) ||
+ (is(T == struct) && __traits(isPOD, T) &&
+ (T.sizeof <= size_t.sizeof*2 || // no more than 2 words
+ (T.sizeof == 16 && has128BitCAS)) && // or supports 128-bit CAS
+ (T.sizeof & (T.sizeof - 1)) == 0 // is power of 2
+ );
+
+template IntOrLong(T)
+{
+ static if (T.sizeof > 4)
+ alias IntOrLong = long;
+ else
+ alias IntOrLong = int;
+}
+
+// NOTE: x86 loads implicitly have acquire semantics so a memory
+// barrier is only necessary on releases.
+template needsLoadBarrier( MemoryOrder ms )
+{
+ enum bool needsLoadBarrier = ms == MemoryOrder.seq;
+}
+
+
+// NOTE: x86 stores implicitly have release semantics so a memory
+// barrier is only necessary on acquires.
+template needsStoreBarrier( MemoryOrder ms )
+{
+ enum bool needsStoreBarrier = ms == MemoryOrder.seq;
+}
+
+// this is a helper to build asm blocks
+string simpleFormat(string format, string[] args...)
+{
+ string result;
+ outer: while (format.length)
+ {
+ foreach (i; 0 .. format.length)
+ {
+ if (format[i] == '%' || format[i] == '?')
+ {
+ bool isQ = format[i] == '?';
+ result ~= format[0 .. i++];
+ assert (i < format.length, "Invalid format string");
+ if (format[i] == '%' || format[i] == '?')
+ {
+ assert(!isQ, "Invalid format string");
+ result ~= format[i++];
+ }
+ else
+ {
+ int index = 0;
+ assert (format[i] >= '0' && format[i] <= '9', "Invalid format string");
+ while (i < format.length && format[i] >= '0' && format[i] <= '9')
+ index = index * 10 + (ubyte(format[i++]) - ubyte('0'));
+ if (!isQ)
+ result ~= args[index];
+ else if (!args[index])
+ {
+ size_t j = i;
+ for (; j < format.length;)
+ {
+ if (format[j++] == '\n')
+ break;
+ }
+ i = j;
+ }
+ }
+ format = format[i .. $];
+ continue outer;
+ }
+ }
+ result ~= format;
+ break;
+ }
+ return result;
+}
diff --git a/libphobos/libdruntime/rt/util/container/array.d b/libphobos/libdruntime/core/internal/container/array.d
index f5aa3d7..27292cd 100644
--- a/libphobos/libdruntime/rt/util/container/array.d
+++ b/libphobos/libdruntime/core/internal/container/array.d
@@ -2,12 +2,12 @@
* Array container for internal usage.
*
* Copyright: Copyright Martin Nowak 2013.
- * License: $(WEB www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
+ * License: $(HTTP www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
* Authors: Martin Nowak
*/
-module rt.util.container.array;
+module core.internal.container.array;
-static import common = rt.util.container.common;
+static import common = core.internal.container.common;
import core.exception : onOutOfMemoryErrorNoGC;
@@ -58,21 +58,21 @@ nothrow:
@property ref inout(T) front() inout
in { assert(!empty); }
- body
+ do
{
return _ptr[0];
}
@property ref inout(T) back() inout
in { assert(!empty); }
- body
+ do
{
return _ptr[_length - 1];
}
ref inout(T) opIndex(size_t idx) inout
in { assert(idx < length); }
- body
+ do
{
return _ptr[idx];
}
@@ -84,7 +84,7 @@ nothrow:
inout(T)[] opSlice(size_t a, size_t b) inout
in { assert(a < b && b <= length); }
- body
+ do
{
return _ptr[a .. b];
}
@@ -113,7 +113,7 @@ nothrow:
void remove(size_t idx)
in { assert(idx < length); }
- body
+ do
{
foreach (i; idx .. length - 1)
_ptr[i] = _ptr[i+1];
diff --git a/libphobos/libdruntime/rt/util/container/common.d b/libphobos/libdruntime/core/internal/container/common.d
index 9e6c013..582d63b 100644
--- a/libphobos/libdruntime/rt/util/container/common.d
+++ b/libphobos/libdruntime/core/internal/container/common.d
@@ -2,10 +2,10 @@
* Common code for writing containers.
*
* Copyright: Copyright Martin Nowak 2013.
- * License: $(WEB www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
+ * License: $(HTTP www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
* Authors: Martin Nowak
*/
-module rt.util.container.common;
+module core.internal.container.common;
import core.stdc.stdlib : malloc, realloc;
public import core.stdc.stdlib : free;
@@ -44,11 +44,8 @@ void destroy(T)(ref T t) if (!is(T == struct))
void initialize(T)(ref T t) if (is(T == struct))
{
- import core.stdc.string;
- if (auto p = typeid(T).initializer().ptr)
- memcpy(&t, p, T.sizeof);
- else
- memset(&t, 0, T.sizeof);
+ import core.internal.lifetime : emplaceInitializer;
+ emplaceInitializer(t);
}
void initialize(T)(ref T t) if (!is(T == struct))
@@ -56,7 +53,7 @@ void initialize(T)(ref T t) if (!is(T == struct))
t = T.init;
}
-version (unittest) struct RC()
+version (CoreUnittest) struct RC()
{
nothrow:
this(size_t* cnt) { ++*(_cnt = cnt); }
diff --git a/libphobos/libdruntime/rt/util/container/hashtab.d b/libphobos/libdruntime/core/internal/container/hashtab.d
index fd9f0f7..5e91193 100644
--- a/libphobos/libdruntime/rt/util/container/hashtab.d
+++ b/libphobos/libdruntime/core/internal/container/hashtab.d
@@ -2,13 +2,13 @@
* HashTab container for internal usage.
*
* Copyright: Copyright Martin Nowak 2013.
- * License: $(WEB www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
+ * License: $(HTTP www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
* Authors: Martin Nowak
*/
-module rt.util.container.hashtab;
+module core.internal.container.hashtab;
-import rt.util.container.array;
-static import common = rt.util.container.common;
+import core.internal.container.array;
+static import common = core.internal.container.common;
struct HashTab(Key, Value)
{
@@ -54,7 +54,7 @@ struct HashTab(Key, Value)
void remove(in Key key)
in { assert(key in this); }
- body
+ do
{
ensureNotInOpApply();
@@ -82,7 +82,7 @@ struct HashTab(Key, Value)
ref inout(Value) opIndex(Key key) inout
{
- return *opIn_r(key);
+ return *opBinaryRight!("in")(key);
}
void opIndexAssign(Value value, Key key)
@@ -90,7 +90,8 @@ struct HashTab(Key, Value)
*get(key) = value;
}
- inout(Value)* opIn_r(in Key key) inout
+ inout(Value)* opBinaryRight(string op)(const scope Key key) inout
+ if (op == "in")
{
if (_buckets.length)
{
@@ -125,7 +126,7 @@ private:
Value* get(Key key)
{
- if (auto p = opIn_r(key))
+ if (auto p = opBinaryRight!("in")(key))
return p;
ensureNotInOpApply();
@@ -144,7 +145,7 @@ private:
return &p._value;
}
- static hash_t hashOf(in ref Key key) @trusted
+ static hash_t hashOf(const scope ref Key key) @trusted
{
static if (is(Key U : U[]))
return .hashOf(key, 0);
@@ -162,7 +163,7 @@ private:
{
assert(_buckets.length);
}
- body
+ do
{
immutable ocnt = _buckets.length;
immutable nmask = 2 * ocnt - 1;
@@ -194,7 +195,7 @@ private:
{
assert(_buckets.length >= 2);
}
- body
+ do
{
immutable ocnt = _buckets.length;
immutable ncnt = ocnt >> 1;
diff --git a/libphobos/libdruntime/rt/util/container/treap.d b/libphobos/libdruntime/core/internal/container/treap.d
index f0c04fd..1202b85 100644
--- a/libphobos/libdruntime/rt/util/container/treap.d
+++ b/libphobos/libdruntime/core/internal/container/treap.d
@@ -2,13 +2,12 @@
* Treap container for internal usage.
*
* Copyright: Copyright Digital Mars 2014 - 2014.
- * License: $(WEB www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
+ * License: $(HTTP www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
*/
-module rt.util.container.treap;
+module core.internal.container.treap;
-static import common = rt.util.container.common;
-import rt.util.random;
-import rt.qsort;
+static import common = core.internal.container.common;
+import core.internal.qsort;
struct Treap(E)
{
@@ -27,9 +26,10 @@ nothrow:
removeAll();
}
- void initialize()
+ void initialize(ulong randSeed)
{
- rand48.defaultSeed();
+ Rand _rand = { randSeed };
+ rand = _rand;
}
void insert(E element) @nogc
@@ -52,7 +52,7 @@ nothrow:
return opApplyHelper(root, dg);
}
- version (unittest)
+ version (CoreUnittest)
bool opEquals(E[] elements)
{
size_t i;
@@ -72,7 +72,7 @@ nothrow:
root = null;
}
- version (unittest)
+ version (CoreUnittest)
bool valid()
{
return valid(root);
@@ -108,13 +108,13 @@ nothrow:
private:
Node* root;
- Rand48 rand48;
+ Rand rand;
Node* allocNode(E element) @nogc
{
Node* node = cast(Node*)common.xmalloc(Node.sizeof);
node.left = node.right = null;
- node.priority = rand48();
+ node.priority = rand();
node.element = element;
return node;
}
@@ -224,7 +224,7 @@ static:
return opApplyHelper(node.right, dg);
}
- version (unittest)
+ version (CoreUnittest)
bool valid(Node* node)
{
if (!node)
@@ -262,8 +262,8 @@ unittest
OP[] ops;
uint[] opdata;
- treap.initialize();
srand(cast(uint)time(null));
+ treap.initialize(rand());
uint[] data;
initialLoop:
@@ -336,3 +336,33 @@ initialLoop:
data.length = 0;
assert(treap == data);
}
+
+/// Random number generators for internal usage.
+private struct Rand
+{
+ private ulong rng_state;
+
+@safe @nogc nothrow:
+pure:
+
+ auto opCall()
+ {
+ auto result = front;
+ popFront();
+ return result;
+ }
+
+ @property uint front()
+ {
+ return cast(uint)(rng_state >> 32);
+ }
+
+ void popFront()
+ {
+ immutable ulong a = 2862933555777941757;
+ immutable ulong c = 1;
+ rng_state = a * rng_state + c;
+ }
+
+ enum empty = false;
+}
diff --git a/libphobos/libdruntime/core/internal/convert.d b/libphobos/libdruntime/core/internal/convert.d
index d922049..2789d29 100644
--- a/libphobos/libdruntime/core/internal/convert.d
+++ b/libphobos/libdruntime/core/internal/convert.d
@@ -3,18 +3,17 @@
* This module provides functions to converting different values to const(ubyte)[]
*
* Copyright: Copyright Igor Stepanov 2013-2013.
- * License: $(WEB www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
+ * License: $(HTTP www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
* Authors: Igor Stepanov
* Source: $(DRUNTIMESRC core/internal/_convert.d)
*/
module core.internal.convert;
-import core.internal.traits : Unqual;
/+
A @nogc function can allocate memory during CTFE.
+/
@nogc nothrow pure @trusted
-private ubyte[] ctfe_alloc()(size_t n)
+private ubyte[] ctfe_alloc(size_t n)
{
if (!__ctfe)
{
@@ -34,8 +33,7 @@ private ubyte[] ctfe_alloc()(size_t n)
}
@trusted pure nothrow @nogc
-const(ubyte)[] toUbyte(T)(const ref T val) if (is(Unqual!T == float) || is(Unqual!T == double) || is(Unqual!T == real) ||
- is(Unqual!T == ifloat) || is(Unqual!T == idouble) || is(Unqual!T == ireal))
+const(ubyte)[] toUbyte(T)(const scope ref T val) if (__traits(isFloating, T) && (is(T : real) || is(T : ireal)))
{
if (__ctfe)
{
@@ -84,7 +82,7 @@ const(ubyte)[] toUbyte(T)(const ref T val) if (is(Unqual!T == float) || is(Unqua
ubyte[] buff = ctfe_alloc(T.sizeof);
enum msbSize = double.sizeof;
- static if (is(Unqual!T == ireal))
+ static if (is(T : ireal))
double hi = toPrec!double(val.im);
else
double hi = toPrec!double(val);
@@ -101,7 +99,7 @@ const(ubyte)[] toUbyte(T)(const ref T val) if (is(Unqual!T == float) || is(Unqua
}
else
{
- static if (is(Unqual!T == ireal))
+ static if (is(T : ireal))
double low = toPrec!double(val.im - hi);
else
double low = toPrec!double(val - hi);
@@ -183,7 +181,7 @@ const(ubyte)[] toUbyte(T)(const ref T val) if (is(Unqual!T == float) || is(Unqua
}
@safe pure nothrow @nogc
-private Float parse(bool is_denormalized = false, T)(T x) if (is(Unqual!T == ifloat) || is(Unqual!T == idouble) || is(Unqual!T == ireal))
+private Float parse(bool is_denormalized = false, T:ireal)(T x)
{
return parse(x.im);
}
@@ -191,6 +189,7 @@ private Float parse(bool is_denormalized = false, T)(T x) if (is(Unqual!T == ifl
@safe pure nothrow @nogc
private Float parse(bool is_denormalized = false, T:real)(T x_) if (floatFormat!T != FloatFormat.Real80)
{
+ import core.internal.traits : Unqual;
Unqual!T x = x_;
static assert(floatFormat!T != FloatFormat.DoubleDouble,
"doubledouble float format not supported in CTFE");
@@ -249,6 +248,7 @@ private Float parse(bool is_denormalized = false, T:real)(T x_) if (floatFormat!
@safe pure nothrow @nogc
private Float parse(bool _ = false, T:real)(T x_) if (floatFormat!T == FloatFormat.Real80)
{
+ import core.internal.traits : Unqual;
Unqual!T x = x_;
//HACK @@@3632@@@
@@ -472,14 +472,14 @@ private Float denormalizedMantissa(T)(T x, uint sign) if (floatFormat!T == Float
return Float(fl.mantissa2 & 0x00FFFFFFFFFFFFFFUL , 0, sign, 1);
}
-version (unittest)
+@system unittest
{
- private const(ubyte)[] toUbyte2(T)(T val)
+ static const(ubyte)[] toUbyte2(T)(T val)
{
return toUbyte(val).dup;
}
- private void testNumberConvert(string v)()
+ static void testNumberConvert(string v)()
{
enum ctval = mixin(v);
@@ -495,7 +495,7 @@ version (unittest)
assert(rtbytes[0..testsize] == ctbytes[0..testsize]);
}
- private void testConvert()
+ static void testConvert()
{
/**Test special values*/
testNumberConvert!("-float.infinity");
@@ -572,11 +572,6 @@ version (unittest)
testNumberConvert!("real.min_normal/19");
testNumberConvert!("real.min_normal/17");
- /**Test imaginary values: convert algorithm is same with real values*/
- testNumberConvert!("0.0Fi");
- testNumberConvert!("0.0i");
- testNumberConvert!("0.0Li");
-
/**True random values*/
testNumberConvert!("-0x9.0f7ee55df77618fp-13829L");
testNumberConvert!("0x7.36e6e2640120d28p+8797L");
@@ -605,11 +600,7 @@ version (unittest)
testNumberConvert!("cast(float)0x9.54bb0d88806f714p-7088L");
}
-
- unittest
- {
- testConvert();
- }
+ testConvert();
}
@@ -654,13 +645,13 @@ package template floatSize(T) if (is(T:real) || is(T:ireal))
// all toUbyte functions must be evaluable at compile time
@trusted pure nothrow @nogc
-const(ubyte)[] toUbyte(T)(const T[] arr) if (T.sizeof == 1)
+const(ubyte)[] toUbyte(T)(return scope const T[] arr) if (T.sizeof == 1)
{
return cast(const(ubyte)[])arr;
}
@trusted pure nothrow @nogc
-const(ubyte)[] toUbyte(T)(const T[] arr) if (T.sizeof > 1)
+const(ubyte)[] toUbyte(T)(return scope const T[] arr) if (T.sizeof > 1)
{
if (__ctfe)
{
@@ -692,7 +683,7 @@ const(ubyte)[] toUbyte(T)(const T[] arr) if (T.sizeof > 1)
}
@trusted pure nothrow @nogc
-const(ubyte)[] toUbyte(T)(const ref T val) if (__traits(isIntegral, T) && !is(T == enum) && !is(T == __vector))
+const(ubyte)[] toUbyte(T)(const ref scope T val) if (__traits(isIntegral, T) && !is(T == enum) && !is(T == __vector))
{
static if (T.sizeof == 1)
{
@@ -709,6 +700,7 @@ const(ubyte)[] toUbyte(T)(const ref T val) if (__traits(isIntegral, T) && !is(T
}
else if (__ctfe)
{
+ import core.internal.traits : Unqual;
ubyte[] tmp = ctfe_alloc(T.sizeof);
Unqual!T val_ = val;
for (size_t i = 0; i < T.sizeof; ++i)
@@ -728,7 +720,7 @@ const(ubyte)[] toUbyte(T)(const ref T val) if (__traits(isIntegral, T) && !is(T
}
@trusted pure nothrow @nogc
-const(ubyte)[] toUbyte(T)(const ref T val) if (is(T == __vector))
+const(ubyte)[] toUbyte(T)(const ref scope T val) if (is(T == __vector))
{
if (!__ctfe)
return (cast(const ubyte*) &val)[0 .. T.sizeof];
@@ -749,8 +741,10 @@ const(ubyte)[] toUbyte(T)(const ref T val) if (is(T == __vector))
}
}
+// @@@DEPRECATED_2022-02@@@
+deprecated
@trusted pure nothrow @nogc
-const(ubyte)[] toUbyte(T)(const ref T val) if (is(Unqual!T == cfloat) || is(Unqual!T == cdouble) ||is(Unqual!T == creal))
+const(ubyte)[] toUbyte(T)(const ref return scope T val) if (__traits(isFloating, T) && is(T : creal))
{
if (__ctfe)
{
@@ -770,12 +764,12 @@ const(ubyte)[] toUbyte(T)(const ref T val) if (is(Unqual!T == cfloat) || is(Unqu
}
@trusted pure nothrow @nogc
-const(ubyte)[] toUbyte(T)(const ref T val) if (is(T == enum))
+const(ubyte)[] toUbyte(T)(const ref return scope T val) if (is(T == enum))
{
if (__ctfe)
{
static if (is(T V == enum)){}
- return toUbyte(cast(const V) val);
+ return toUbyte(*cast(const V*) &val);
}
else
{
@@ -789,7 +783,7 @@ nothrow pure @safe unittest
enum Month : uint { jan = 1}
Month m = Month.jan;
const bytes = toUbyte(m);
- enum ctfe_works = (() => { Month x = Month.jan; return toUbyte(x).length > 0; })();
+ enum ctfe_works = (() { Month x = Month.jan; return toUbyte(x).length > 0; })();
}
@trusted pure nothrow @nogc
@@ -807,7 +801,7 @@ const(ubyte)[] toUbyte(T)(const ref T val) if (is(T == delegate) || is(T : V*, V
}
@trusted pure nothrow @nogc
-const(ubyte)[] toUbyte(T)(const ref T val) if (is(T == struct) || is(T == union))
+const(ubyte)[] toUbyte(T)(const ref return scope T val) if (is(T == struct) || is(T == union))
{
if (__ctfe)
{
diff --git a/libphobos/libdruntime/core/internal/dassert.d b/libphobos/libdruntime/core/internal/dassert.d
new file mode 100644
index 0000000..ac7600f
--- /dev/null
+++ b/libphobos/libdruntime/core/internal/dassert.d
@@ -0,0 +1,590 @@
+/*
+ * Support for rich error messages generation with `assert`
+ *
+ * This module provides the `_d_assert_fail` hooks which are instantiated
+ * by the compiler whenever `-checkaction=context` is used.
+ * There are two hooks, one for unary expressions, and one for binary.
+ * When used, the compiler will rewrite `assert(a >= b)` as
+ * `assert(a >= b, _d_assert_fail!(typeof(a))(">=", a, b))`.
+ * Temporaries will be created to avoid side effects if deemed necessary
+ * by the compiler.
+ *
+ * For more information, refer to the implementation in DMD frontend
+ * for `AssertExpression`'s semantic analysis.
+ *
+ * Copyright: D Language Foundation 2018 - 2020
+ * License: $(LINK2 http://www.boost.org/LICENSE_1_0.txt, Boost License 1.0)
+ * Source: $(LINK2 https://github.com/dlang/druntime/blob/master/src/core/internal/dassert.d, _dassert.d)
+ * Documentation: https://dlang.org/phobos/core_internal_dassert.html
+ */
+module core.internal.dassert;
+
+/**
+ * Generates rich assert error messages for unary expressions
+ *
+ * The unary expression `assert(!una)` will be turned into
+ * `assert(!una, _d_assert_fail("!", una))`.
+ * This routine simply acts as if the user wrote `assert(una == false)`.
+ *
+ * Params:
+ * op = Operator that was used in the expression, currently only "!"
+ * is supported.
+ * a = Result of the expression that was used in `assert` before
+ * its implicit conversion to `bool`.
+ *
+ * Returns:
+ * A string such as "$a != true" or "$a == true".
+ */
+string _d_assert_fail(A)(const scope string op, auto ref const scope A a)
+{
+ // Prevent InvalidMemoryOperationError when triggered from a finalizer
+ if (inFinalizer())
+ return "Assertion failed (rich formatting is disabled in finalizers)";
+
+ string[2] vals = [ miniFormatFakeAttributes(a), "true" ];
+ immutable token = op == "!" ? "==" : "!=";
+ return combine(vals[0 .. 1], token, vals[1 .. $]);
+}
+
+/**
+ * Generates rich assert error messages for binary expressions
+ *
+ * The binary expression `assert(x == y)` will be turned into
+ * `assert(x == y, _d_assert_fail!(typeof(x))("==", x, y))`.
+ *
+ * Params:
+ * comp = Comparison operator that was used in the expression.
+ * a = Left hand side operand (can be a tuple).
+ * b = Right hand side operand (can be a tuple).
+ *
+ * Returns:
+ * A string such as "$a $comp $b".
+ */
+template _d_assert_fail(A...)
+{
+ string _d_assert_fail(B...)(
+ const scope string comp, auto ref const scope A a, auto ref const scope B b)
+ if (B.length != 0 || A.length != 1) // Resolve ambiguity with unary overload
+ {
+ // Prevent InvalidMemoryOperationError when triggered from a finalizer
+ if (inFinalizer())
+ return "Assertion failed (rich formatting is disabled in finalizers)";
+
+ string[A.length + B.length] vals;
+ static foreach (idx; 0 .. A.length)
+ vals[idx] = miniFormatFakeAttributes(a[idx]);
+ static foreach (idx; 0 .. B.length)
+ vals[A.length + idx] = miniFormatFakeAttributes(b[idx]);
+ immutable token = invertCompToken(comp);
+ return combine(vals[0 .. A.length], token, vals[A.length .. $]);
+ }
+}
+
+/// Combines the supplied arguments into one string `"valA token valB"`
+private string combine(const scope string[] valA, const scope string token,
+ const scope string[] valB) pure nothrow @nogc @safe
+{
+ // Each separator is 2 chars (", "), plus the two spaces around the token.
+ size_t totalLen = (valA.length - 1) * 2 +
+ (valB.length - 1) * 2 + 2 + token.length;
+
+ // Empty arrays are printed as ()
+ if (valA.length == 0) totalLen += 2;
+ if (valB.length == 0) totalLen += 2;
+
+ foreach (v; valA) totalLen += v.length;
+ foreach (v; valB) totalLen += v.length;
+
+ // Include braces when printing tuples
+ const printBraces = (valA.length + valB.length) != 2;
+ if (printBraces) totalLen += 4; // '(', ')' for both tuples
+
+ char[] buffer = cast(char[]) pureAlloc(totalLen)[0 .. totalLen];
+ // @nogc-concat of "<valA> <comp> <valB>"
+ static void formatTuple (scope char[] buffer, ref size_t n, in string[] vals, in bool printBraces)
+ {
+ if (printBraces) buffer[n++] = '(';
+ foreach (idx, v; vals)
+ {
+ if (idx)
+ {
+ buffer[n++] = ',';
+ buffer[n++] = ' ';
+ }
+ buffer[n .. n + v.length] = v;
+ n += v.length;
+ }
+ if (printBraces) buffer[n++] = ')';
+ }
+
+ size_t n;
+ formatTuple(buffer, n, valA, printBraces);
+ buffer[n++] = ' ';
+ buffer[n .. n + token.length] = token;
+ n += token.length;
+ buffer[n++] = ' ';
+ formatTuple(buffer, n, valB, printBraces);
+ return (() @trusted => cast(string) buffer)();
+}
+
+/// Yields the appropriate `printf` format token for a type `T`
+private template getPrintfFormat(T)
+{
+ static if (is(T == long))
+ {
+ enum getPrintfFormat = "%lld";
+ }
+ else static if (is(T == ulong))
+ {
+ enum getPrintfFormat = "%llu";
+ }
+ else static if (__traits(isIntegral, T))
+ {
+ static if (__traits(isUnsigned, T))
+ {
+ enum getPrintfFormat = "%u";
+ }
+ else
+ {
+ enum getPrintfFormat = "%d";
+ }
+ }
+ else
+ {
+ static assert(0, "Unknown format");
+ }
+}
+
+/**
+ * Generates a textual representation of `v` without relying on Phobos.
+ * The value is formatted as follows:
+ *
+ * - primitive types and arrays yield their respective literals
+ * - pointers are printed as hexadecimal numbers
+ * - enum members are represented by their name
+ * - user-defined types are formatted by either calling `toString`
+ * if defined or printing all members, e.g. `S(1, 2)`
+ *
+ * Note that unions are rejected because this method cannot determine which
+ * member is valid when calling this method.
+ *
+ * Params:
+ * v = the value to print
+ *
+ * Returns: a string respresenting `v` or `V.stringof` if `V` is not supported
+ */
+private string miniFormat(V)(const scope ref V v)
+{
+ import core.internal.traits: isAggregateType;
+
+ /// `shared` values are formatted as their base type
+ static if (is(V == shared T, T))
+ {
+ // Use atomics to avoid race conditions whenever possible
+ static if (__traits(compiles, atomicLoad(v)))
+ {
+ if (!__ctfe)
+ {
+ T tmp = cast(T) atomicLoad(v);
+ return miniFormat(tmp);
+ }
+ }
+
+ // Fall back to a simple cast - we're violating the type system anyways
+ return miniFormat(__ctfe ? cast(const T) v : *cast(const T*) &v);
+ }
+ // Format enum members using their name
+ else static if (is(V BaseType == enum))
+ {
+ // Always generate repeated if's instead of switch to skip the detection
+ // of non-integral enums. This method doesn't need to be fast.
+ static foreach (mem; __traits(allMembers, V))
+ {
+ if (v == __traits(getMember, V, mem))
+ return mem;
+ }
+
+ // Format invalid enum values as their base type
+ enum cast_ = "cast(" ~ V.stringof ~ ")";
+ const val = miniFormat(__ctfe ? cast(const BaseType) v : *cast(const BaseType*) &v);
+ return combine([ cast_ ], "", [ val ]);
+ }
+ else static if (is(V == bool))
+ {
+ return v ? "true" : "false";
+ }
+ // Detect vectors which match isIntegral / isFloating
+ else static if (is(V == __vector(ET[N]), ET, size_t N))
+ {
+ string msg = "[";
+ foreach (i; 0 .. N)
+ {
+ if (i > 0)
+ msg ~= ", ";
+
+ msg ~= miniFormat(v[i]);
+ }
+ msg ~= "]";
+ return msg;
+ }
+ else static if (__traits(isIntegral, V))
+ {
+ static if (is(V == char))
+ {
+ // Avoid invalid code points
+ if (v < 0x7F)
+ return ['\'', v, '\''];
+
+ uint tmp = v;
+ return "cast(char) " ~ miniFormat(tmp);
+ }
+ else static if (is(V == wchar) || is(V == dchar))
+ {
+ import core.internal.utf: isValidDchar, toUTF8;
+
+ // Avoid invalid code points
+ if (isValidDchar(v))
+ return toUTF8(['\'', v, '\'']);
+
+ uint tmp = v;
+ return "cast(" ~ V.stringof ~ ") " ~ miniFormat(tmp);
+ }
+ else
+ {
+ import core.internal.string;
+ static if (__traits(isUnsigned, V))
+ const val = unsignedToTempString(v);
+ else
+ const val = signedToTempString(v);
+ return val.get().idup();
+ }
+ }
+ else static if (__traits(isFloating, V))
+ {
+ import core.stdc.stdio : sprintf;
+ import core.stdc.config : LD = c_long_double;
+
+ // No suitable replacement for sprintf in druntime ATM
+ if (__ctfe)
+ return '<' ~ V.stringof ~ " not supported>";
+
+ // Workaround for https://issues.dlang.org/show_bug.cgi?id=20759
+ static if (is(LD == real))
+ enum realFmt = "%Lg";
+ else
+ enum realFmt = "%g";
+
+ char[60] val;
+ int len;
+ static if (is(V == float) || is(V == double))
+ len = sprintf(&val[0], "%g", v);
+ else static if (is(V == real))
+ len = sprintf(&val[0], realFmt, cast(LD) v);
+ else static if (is(V == cfloat) || is(V == cdouble))
+ len = sprintf(&val[0], "%g + %gi", v.re, v.im);
+ else static if (is(V == creal))
+ len = sprintf(&val[0], realFmt ~ " + " ~ realFmt ~ 'i', cast(LD) v.re, cast(LD) v.im);
+ else static if (is(V == ifloat) || is(V == idouble))
+ len = sprintf(&val[0], "%gi", v);
+ else // ireal
+ {
+ static assert(is(V == ireal));
+ static if (is(LD == real))
+ alias R = ireal;
+ else
+ alias R = idouble;
+ len = sprintf(&val[0], realFmt ~ 'i', cast(R) v);
+ }
+ return val.idup[0 .. len];
+ }
+ // special-handling for void-arrays
+ else static if (is(V == typeof(null)))
+ {
+ return "`null`";
+ }
+ else static if (is(V == U*, U))
+ {
+ // Format as ulong and prepend a 0x for pointers
+ import core.internal.string;
+ return cast(immutable) ("0x" ~ unsignedToTempString!16(cast(ulong) v));
+ }
+ // toString() isn't always const, e.g. classes inheriting from Object
+ else static if (__traits(compiles, { string s = V.init.toString(); }))
+ {
+ // Object references / struct pointers may be null
+ static if (is(V == class) || is(V == interface))
+ {
+ if (v is null)
+ return "`null`";
+ }
+
+ try
+ {
+ // Prefer const overload of toString
+ static if (__traits(compiles, { string s = v.toString(); }))
+ return v.toString();
+ else
+ return (cast() v).toString();
+ }
+ catch (Exception e)
+ {
+ return `<toString() failed: "` ~ e.msg ~ `", called on ` ~ formatMembers(v) ~`>`;
+ }
+ }
+ // Static arrays or slices (but not aggregates with `alias this`)
+ else static if (is(V : U[], U) && !isAggregateType!V)
+ {
+ import core.internal.traits: Unqual;
+ alias E = Unqual!U;
+
+ // special-handling for void-arrays
+ static if (is(E == void))
+ {
+ if (__ctfe)
+ return "<void[] not supported>";
+
+ const bytes = cast(byte[]) v;
+ return miniFormat(bytes);
+ }
+ // anything string-like
+ else static if (is(E == char) || is(E == dchar) || is(E == wchar))
+ {
+ const s = `"` ~ v ~ `"`;
+
+ // v could be a char[], dchar[] or wchar[]
+ static if (is(typeof(s) : const char[]))
+ return cast(immutable) s;
+ else
+ {
+ import core.internal.utf: toUTF8;
+ return toUTF8(s);
+ }
+ }
+ else
+ {
+ string msg = "[";
+ foreach (i, ref el; v)
+ {
+ if (i > 0)
+ msg ~= ", ";
+
+ // don't fully print big arrays
+ if (i >= 30)
+ {
+ msg ~= "...";
+ break;
+ }
+ msg ~= miniFormat(el);
+ }
+ msg ~= "]";
+ return msg;
+ }
+ }
+ else static if (is(V : Val[K], K, Val))
+ {
+ size_t i;
+ string msg = "[";
+ foreach (ref k, ref val; v)
+ {
+ if (i > 0)
+ msg ~= ", ";
+ // don't fully print big AAs
+ if (i++ >= 30)
+ {
+ msg ~= "...";
+ break;
+ }
+ msg ~= miniFormat(k) ~ ": " ~ miniFormat(val);
+ }
+ msg ~= "]";
+ return msg;
+ }
+ else static if (is(V == struct))
+ {
+ return formatMembers(v);
+ }
+ // Extern C++ classes don't have a toString by default
+ else static if (is(V == class) || is(V == interface))
+ {
+ if (v is null)
+ return "null";
+
+ // Extern classes might be opaque
+ static if (is(typeof(v.tupleof)))
+ return formatMembers(v);
+ else
+ return '<' ~ V.stringof ~ '>';
+ }
+ else
+ {
+ return V.stringof;
+ }
+}
+
+/// Formats `v`'s members as `V(<member 1>, <member 2>, ...)`
+private string formatMembers(V)(const scope ref V v)
+{
+ enum ctxPtr = __traits(isNested, V);
+ enum isOverlapped = calcFieldOverlap([ v.tupleof.offsetof ]);
+
+ string msg = V.stringof ~ "(";
+ foreach (i, ref field; v.tupleof)
+ {
+ if (i > 0)
+ msg ~= ", ";
+
+ static if (isOverlapped[i])
+ {
+ msg ~= "<overlapped field>";
+ }
+ else
+ {
+ // Mark context pointer
+ static if (ctxPtr && i == v.tupleof.length - 1)
+ msg ~= "<context>: ";
+
+ msg ~= miniFormat(field);
+ }
+ }
+ msg ~= ")";
+ return msg;
+}
+
+/**
+ * Calculates whether fields are overlapped based on the passed offsets.
+ *
+ * Params:
+ * offsets = offsets of all fields matching the order of `.tupleof`
+ *
+ * Returns: an array such that arr[n] = true indicates that the n'th field
+ * overlaps with an adjacent field
+ **/
+private bool[] calcFieldOverlap(const scope size_t[] offsets)
+{
+ bool[] overlaps = new bool[](offsets.length);
+
+ foreach (const idx; 1 .. overlaps.length)
+ {
+ if (offsets[idx - 1] == offsets[idx])
+ overlaps[idx - 1] = overlaps[idx] = true;
+ }
+
+ return overlaps;
+}
+
+// This should be a local import in miniFormat but fails with a cyclic dependency error
+// core.thread.osthread -> core.time -> object -> core.internal.array.capacity
+// -> core.atomic -> core.thread -> core.thread.osthread
+import core.atomic : atomicLoad;
+
+/// Negates a comparison token, e.g. `==` is mapped to `!=`
+private string invertCompToken(scope string comp) pure nothrow @nogc @safe
+{
+ switch (comp)
+ {
+ case "==":
+ return "!=";
+ case "!=":
+ return "==";
+ case "<":
+ return ">=";
+ case "<=":
+ return ">";
+ case ">":
+ return "<=";
+ case ">=":
+ return "<";
+ case "is":
+ return "!is";
+ case "!is":
+ return "is";
+ case "in":
+ return "!in";
+ case "!in":
+ return "in";
+ default:
+ assert(0, combine(["Invalid comparison operator '"], comp, ["'"]));
+ }
+}
+
+/// Casts the function pointer to include `@safe`, `@nogc`, ...
+private auto assumeFakeAttributes(T)(T t) @trusted
+{
+ import core.internal.traits : Parameters, ReturnType;
+ alias RT = ReturnType!T;
+ alias P = Parameters!T;
+ alias type = RT function(P) nothrow @nogc @safe pure;
+ return cast(type) t;
+}
+
+/// Wrapper for `miniFormat` which assumes that the implementation is `@safe`, `@nogc`, ...
+/// s.t. it does not violate the constraints of the the function containing the `assert`.
+private string miniFormatFakeAttributes(T)(const scope ref T t)
+{
+ alias miniT = miniFormat!T;
+ return assumeFakeAttributes(&miniT)(t);
+}
+
+/// Allocates an array of `t` bytes while pretending to be `@safe`, `@nogc`, ...
+private auto pureAlloc(size_t t)
+{
+ static auto alloc(size_t len)
+ {
+ return new ubyte[len];
+ }
+ return assumeFakeAttributes(&alloc)(t);
+}
+
+/// Wrapper for GC.inFinalizer that fakes purity
+private bool inFinalizer()() pure nothrow @nogc @safe
+{
+ // CTFE doesn't trigger InvalidMemoryErrors
+ import core.memory : GC;
+ return !__ctfe && assumeFakeAttributes(&GC.inFinalizer)();
+}
+
+// https://issues.dlang.org/show_bug.cgi?id=21544
+unittest
+{
+ // Normal enum values
+ enum E { A, BCDE }
+ E e = E.A;
+ assert(miniFormat(e) == "A");
+ e = E.BCDE;
+ assert(miniFormat(e) == "BCDE");
+
+ // Invalid enum value is printed as their implicit base type (int)
+ e = cast(E) 3;
+ assert(miniFormat(e) == "cast(E) 3");
+
+ // Non-integral enums work as well
+ static struct S
+ {
+ int a;
+ string str;
+ }
+
+ enum E2 : S { a2 = S(1, "Hello") }
+ E2 es = E2.a2;
+ assert(miniFormat(es) == `a2`);
+
+ // Even invalid values
+ es = cast(E2) S(2, "World");
+ assert(miniFormat(es) == `cast(E2) S(2, "World")`);
+}
+
+// vectors
+unittest
+{
+ static if (is(__vector(float[4])))
+ {
+ __vector(float[4]) f = [-1.5f, 0.5f, 1.0f, 0.125f];
+ assert(miniFormat(f) == "[-1.5, 0.5, 1, 0.125]");
+ }
+
+ static if (is(__vector(int[4])))
+ {
+ __vector(int[4]) i = [-1, 0, 1, 3];
+ assert(miniFormat(i) == "[-1, 0, 1, 3]");
+ }
+}
diff --git a/libphobos/libdruntime/core/internal/destruction.d b/libphobos/libdruntime/core/internal/destruction.d
new file mode 100644
index 0000000..5c5932d
--- /dev/null
+++ b/libphobos/libdruntime/core/internal/destruction.d
@@ -0,0 +1,47 @@
+/**
+ This module contains implementations for destroying instances of types
+
+ Copyright: Copyright Digital Mars 2000 - 2019.
+ License: Distributed under the
+ $(LINK2 http://www.boost.org/LICENSE_1_0.txt, Boost Software License 1.0).
+ (See accompanying file LICENSE)
+ Source: $(DRUNTIMESRC core/_internal/_destruction.d)
+*/
+module core.internal.destruction;
+
+// compiler frontend lowers dynamic array deconstruction to this
+void __ArrayDtor(T)(scope T[] a)
+{
+ foreach_reverse (ref T e; a)
+ e.__xdtor();
+}
+
+public void destructRecurse(E, size_t n)(ref E[n] arr)
+{
+ import core.internal.traits : hasElaborateDestructor;
+
+ static if (hasElaborateDestructor!E)
+ {
+ foreach_reverse (ref elem; arr)
+ destructRecurse(elem);
+ }
+}
+
+public void destructRecurse(S)(ref S s)
+ if (is(S == struct))
+{
+ static if (__traits(hasMember, S, "__xdtor") &&
+ // Bugzilla 14746: Check that it's the exact member of S.
+ __traits(isSame, S, __traits(parent, s.__xdtor)))
+ s.__xdtor();
+}
+
+// Test static struct
+nothrow @safe @nogc unittest
+{
+ static int i = 0;
+ static struct S { ~this() nothrow @safe @nogc { i = 42; } }
+ S s;
+ destructRecurse(s);
+ assert(i == 42);
+}
diff --git a/libphobos/libdruntime/core/internal/entrypoint.d b/libphobos/libdruntime/core/internal/entrypoint.d
new file mode 100644
index 0000000..839c120
--- /dev/null
+++ b/libphobos/libdruntime/core/internal/entrypoint.d
@@ -0,0 +1,41 @@
+/**
+ This module contains the code for C main and any call(s) to initialize the
+ D runtime and call D main.
+
+ Copyright: Copyright Digital Mars 2000 - 2019.
+ License: Distributed under the
+ $(LINK2 http://www.boost.org/LICENSE_1_0.txt, Boost Software License 1.0).
+ (See accompanying file LICENSE)
+ Source: $(DRUNTIMESRC core/_internal/_entrypoint.d)
+*/
+module core.internal.entrypoint;
+
+/**
+A template containing C main and any call(s) to initialize druntime and
+call D main. Any module containing a D main function declaration will
+cause the compiler to generate a `mixin _d_cmain();` statement to inject
+this code into the module.
+*/
+template _d_cmain()
+{
+ extern(C)
+ {
+ int _d_run_main(int argc, char **argv, void* mainFunc);
+
+ int _Dmain(char[][] args);
+
+ int main(int argc, char **argv)
+ {
+ return _d_run_main(argc, argv, &_Dmain);
+ }
+
+ // Solaris, for unknown reasons, requires both a main() and an _main()
+ version (Solaris)
+ {
+ int _main(int argc, char** argv)
+ {
+ return main(argc, argv);
+ }
+ }
+ }
+}
diff --git a/libphobos/libdruntime/core/internal/gc/bits.d b/libphobos/libdruntime/core/internal/gc/bits.d
new file mode 100644
index 0000000..d50c38f
--- /dev/null
+++ b/libphobos/libdruntime/core/internal/gc/bits.d
@@ -0,0 +1,493 @@
+/**
+ * Contains a bitfield used by the GC.
+ *
+ * Copyright: D Language Foundation 2005 - 2021.
+ * License: $(HTTP www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
+ * Authors: Walter Bright, David Friedman, Sean Kelly
+ */
+module core.internal.gc.bits;
+
+import core.internal.gc.os : os_mem_map, os_mem_unmap, HaveFork;
+
+import core.bitop;
+import core.stdc.string;
+import core.stdc.stdlib;
+import core.exception : onOutOfMemoryError;
+
+// use version gcbitsSingleBitOperation to disable optimizations that use
+// word operands on bulk operation copyRange, setRange, clrRange, etc.
+// version = gcbitsSingleBitOperation;
+
+struct GCBits
+{
+@nogc:
+ alias size_t wordtype;
+
+ enum BITS_PER_WORD = (wordtype.sizeof * 8);
+ enum BITS_SHIFT = (wordtype.sizeof == 8 ? 6 : 5);
+ enum BITS_MASK = (BITS_PER_WORD - 1);
+ enum BITS_0 = cast(wordtype)0;
+ enum BITS_1 = cast(wordtype)1;
+ enum BITS_2 = cast(wordtype)2;
+
+ wordtype* data;
+ size_t nbits;
+
+ void Dtor(bool share = false) nothrow
+ {
+ if (data)
+ {
+ static if (!HaveFork)
+ free(data);
+ else if (share)
+ os_mem_unmap(data, nwords * data[0].sizeof);
+ else
+ free(data);
+ data = null;
+ }
+ }
+
+ void alloc(size_t nbits, bool share = false) nothrow
+ {
+ this.nbits = nbits;
+ static if (!HaveFork)
+ data = cast(typeof(data[0])*)calloc(nwords, data[0].sizeof);
+ else if (share)
+ data = cast(typeof(data[0])*)os_mem_map(nwords * data[0].sizeof, true); // Allocate as MAP_SHARED
+ else
+ data = cast(typeof(data[0])*)calloc(nwords, data[0].sizeof);
+ if (!data)
+ onOutOfMemoryError();
+ }
+
+ wordtype test(size_t i) const nothrow
+ in
+ {
+ assert(i < nbits);
+ }
+ do
+ {
+ return core.bitop.bt(data, i);
+ }
+
+ int set(size_t i) nothrow
+ in
+ {
+ assert(i < nbits);
+ }
+ do
+ {
+ return core.bitop.bts(data, i);
+ }
+
+ int clear(size_t i) nothrow
+ in
+ {
+ assert(i <= nbits);
+ }
+ do
+ {
+ return core.bitop.btr(data, i);
+ }
+
+ // return non-zero if bit already set
+ size_t setLocked(size_t i) nothrow
+ {
+ version (GNU)
+ {
+ import gcc.builtins;
+ const pos = i >> BITS_SHIFT;
+ const mask = BITS_1 << (i & BITS_MASK);
+ mixin("auto val = __atomic_fetch_or_" ~ size_t.sizeof.stringof[0]
+ ~ "(cast(shared)(data + pos), mask, 3);");
+ return (val & mask) != 0;
+ }
+ else version (LDC)
+ {
+ import ldc.intrinsics;
+ const pos = i >> BITS_SHIFT;
+ const mask = BITS_1 << (i & BITS_MASK);
+ auto val = llvm_atomic_rmw_or(cast(shared)(data + pos), mask);
+ return (val & mask) != 0;
+ }
+ else version (D_InlineAsm_X86)
+ {
+ asm @nogc nothrow {
+ mov EAX, this;
+ mov ECX, data[EAX];
+ mov EDX, i;
+ lock;
+ bts dword ptr[ECX], EDX;
+ sbb EAX,EAX;
+ }
+ }
+ else version (D_InlineAsm_X86_64)
+ {
+ asm @nogc nothrow {
+ mov RAX, this;
+ mov RAX, data[RAX];
+ mov RDX, i;
+ lock;
+ bts qword ptr[RAX], RDX;
+ sbb RAX,RAX;
+ }
+ }
+ else
+ {
+ auto pos = i >> BITS_SHIFT;
+ auto pdata = cast(shared)(data + pos);
+ auto mask = BITS_1 << (i & BITS_MASK);
+ auto state = *pdata;
+ if (state & mask)
+ return state;
+
+ import core.atomic;
+ auto newstate = state | mask;
+ while (!cas(pdata, state, newstate))
+ {
+ state = *pdata;
+ if (state & mask)
+ return state;
+ newstate = state | mask;
+ }
+ return 0;
+ }
+ }
+
+ template testAndSet(bool locked)
+ {
+ static if (locked)
+ alias testAndSet = setLocked;
+ else
+ alias testAndSet = set;
+ }
+
+
+ mixin template RangeVars()
+ {
+ size_t firstWord = (target >> BITS_SHIFT);
+ size_t firstOff = target & BITS_MASK;
+ size_t last = target + len - 1;
+ size_t lastWord = (last >> BITS_SHIFT);
+ size_t lastOff = last & BITS_MASK;
+ }
+
+ // extract loops to allow inlining the rest
+ void clearWords(size_t firstWord, size_t lastWord) nothrow
+ {
+ for (size_t w = firstWord; w < lastWord; w++)
+ data[w] = 0;
+ }
+
+ void setWords(size_t firstWord, size_t lastWord) nothrow
+ {
+ for (size_t w = firstWord; w < lastWord; w++)
+ data[w] = ~0;
+ }
+
+ void copyWords(size_t firstWord, size_t lastWord, const(wordtype)* source) nothrow
+ {
+ for (size_t w = firstWord; w < lastWord; w++)
+ data[w] = source[w - firstWord];
+ }
+
+ void copyWordsShifted(size_t firstWord, size_t cntWords, size_t firstOff, const(wordtype)* source) nothrow
+ {
+ wordtype mask = ~BITS_0 << firstOff;
+ data[firstWord] = (data[firstWord] & ~mask) | (source[0] << firstOff);
+ for (size_t w = 1; w < cntWords; w++)
+ data[firstWord + w] = (source[w - 1] >> (BITS_PER_WORD - firstOff)) | (source[w] << firstOff);
+ }
+
+ // target = the biti to start the copy to
+ // destlen = the number of bits to copy from source
+ void copyRange(size_t target, size_t len, const(wordtype)* source) nothrow
+ {
+ version (gcbitsSingleBitOperation)
+ {
+ for (size_t i = 0; i < len; i++)
+ if (source[(i >> BITS_SHIFT)] & (BITS_1 << (i & BITS_MASK)))
+ set(target+i);
+ else
+ clear(target+i);
+ }
+ else
+ {
+ if (len > 0)
+ copyRangeZ(target, len, source);
+ }
+ }
+
+ void copyRangeZ(size_t target, size_t len, const(wordtype)* source) nothrow
+ {
+ mixin RangeVars!();
+
+ if (firstWord == lastWord)
+ {
+ wordtype mask = ((BITS_2 << (lastOff - firstOff)) - 1) << firstOff;
+ data[firstWord] = (data[firstWord] & ~mask) | ((source[0] << firstOff) & mask);
+ }
+ else if (firstOff == 0)
+ {
+ copyWords(firstWord, lastWord, source);
+
+ wordtype mask = (BITS_2 << lastOff) - 1;
+ data[lastWord] = (data[lastWord] & ~mask) | (source[lastWord - firstWord] & mask);
+ }
+ else
+ {
+ size_t cntWords = lastWord - firstWord;
+ copyWordsShifted(firstWord, cntWords, firstOff, source);
+
+ wordtype src = (source[cntWords - 1] >> (BITS_PER_WORD - firstOff)) | (source[cntWords] << firstOff);
+ wordtype mask = (BITS_2 << lastOff) - 1;
+ data[lastWord] = (data[lastWord] & ~mask) | (src & mask);
+ }
+ }
+
+ void copyRangeRepeating(size_t target, size_t destlen, const(wordtype)* source, size_t sourcelen) nothrow
+ {
+ version (gcbitsSingleBitOperation)
+ {
+ for (size_t i=0; i < destlen; i++)
+ {
+ bool b;
+ size_t j = i % sourcelen;
+ b = (source[j >> BITS_SHIFT] & (BITS_1 << (j & BITS_MASK))) != 0;
+ if (b) set(target+i);
+ else clear(target+i);
+ }
+ }
+ else
+ {
+ while (destlen > sourcelen)
+ {
+ copyRange(target, sourcelen, source);
+ target += sourcelen;
+ destlen -= sourcelen;
+ }
+ copyRange(target, destlen, source);
+ }
+ }
+
+ unittest
+ {
+ // simulate broken array append test case in vibe.d
+ GCBits bits;
+ bits.alloc(10000);
+ auto data = bits.data;
+
+ GCBits src;
+ src.alloc(67);
+ src.data[0] = 0x4;
+
+ bits.copyRangeRepeating(2, 10000, src.data, 67);
+
+ foreach (i; 0 .. 10000)
+ if ((i - 2) % 67 == 2)
+ assert(bits.test(i));
+ else
+ assert(!bits.test(i));
+ }
+
+ void setRange(size_t target, size_t len) nothrow
+ {
+ version (gcbitsSingleBitOperation)
+ {
+ for (size_t i = 0; i < len; i++)
+ set(target+i);
+ }
+ else
+ {
+ if (len > 0)
+ setRangeZ(target, len);
+ }
+ }
+
+ void setRangeZ(size_t target, size_t len) nothrow
+ {
+ mixin RangeVars!();
+
+ if (firstWord == lastWord)
+ {
+ wordtype mask = ((BITS_2 << (lastOff - firstOff)) - 1) << firstOff;
+ data[firstWord] |= mask;
+ }
+ else
+ {
+ data[firstWord] |= ~BITS_0 << firstOff;
+ setWords(firstWord + 1, lastWord);
+ wordtype mask = (BITS_2 << lastOff) - 1;
+ data[lastWord] |= mask;
+ }
+ }
+
+ void clrRange(size_t target, size_t len) nothrow
+ {
+ version (gcbitsSingleBitOperation)
+ {
+ for (size_t i = 0; i < len; i++)
+ clear(target+i);
+ }
+ else
+ {
+ if (len > 0)
+ clrRangeZ(target, len);
+ }
+ }
+
+ void clrRangeZ(size_t target, size_t len) nothrow
+ {
+ mixin RangeVars!();
+ if (firstWord == lastWord)
+ {
+ wordtype mask = ((BITS_2 << (lastOff - firstOff)) - 1) << firstOff;
+ data[firstWord] &= ~mask;
+ }
+ else
+ {
+ data[firstWord] &= ~(~BITS_0 << firstOff);
+ clearWords(firstWord + 1, lastWord);
+ wordtype mask = (BITS_2 << lastOff) - 1;
+ data[lastWord] &= ~mask;
+ }
+ }
+
+ unittest
+ {
+ GCBits bits;
+ bits.alloc(1000);
+ auto data = bits.data;
+
+ bits.setRange(0,1);
+ assert(data[0] == 1);
+
+ bits.clrRange(0,1);
+ assert(data[0] == 0);
+
+ bits.setRange(BITS_PER_WORD-1,1);
+ assert(data[0] == BITS_1 << (BITS_PER_WORD-1));
+
+ bits.clrRange(BITS_PER_WORD-1,1);
+ assert(data[0] == 0);
+
+ bits.setRange(12,7);
+ assert(data[0] == 0b0111_1111_0000_0000_0000);
+
+ bits.clrRange(14,4);
+ assert(data[0] == 0b0100_0011_0000_0000_0000);
+
+ bits.clrRange(0,BITS_PER_WORD);
+ assert(data[0] == 0);
+
+ bits.setRange(0,BITS_PER_WORD);
+ assert(data[0] == ~0);
+ assert(data[1] == 0);
+
+ bits.setRange(BITS_PER_WORD,BITS_PER_WORD);
+ assert(data[0] == ~0);
+ assert(data[1] == ~0);
+ assert(data[2] == 0);
+ bits.clrRange(BITS_PER_WORD/2,BITS_PER_WORD);
+ assert(data[0] == (BITS_1 << (BITS_PER_WORD/2)) - 1);
+ assert(data[1] == ~data[0]);
+ assert(data[2] == 0);
+
+ bits.setRange(8*BITS_PER_WORD+1,4*BITS_PER_WORD-2);
+ assert(data[8] == ~0 << 1);
+ assert(data[9] == ~0);
+ assert(data[10] == ~0);
+ assert(data[11] == cast(wordtype)~0 >> 1);
+
+ bits.clrRange(9*BITS_PER_WORD+1,2*BITS_PER_WORD);
+ assert(data[8] == ~0 << 1);
+ assert(data[9] == 1);
+ assert(data[10] == 0);
+ assert(data[11] == ((cast(wordtype)~0 >> 1) & ~1));
+
+ wordtype[4] src = [ 0xa, 0x5, 0xaa, 0x55 ];
+
+ void testCopyRange(size_t start, size_t len, int repeat = 1)
+ {
+ bits.setRange(0, bits.nbits);
+ if (repeat > 1)
+ bits.copyRangeRepeating(start, repeat * len, src.ptr, len);
+ else
+ bits.copyRange(start, len, src.ptr);
+ foreach (i; 0 .. start)
+ assert(bits.test(i));
+ foreach (r; 0 .. repeat)
+ foreach (i; 0 .. len)
+ assert(!bits.test(start + r*len + i) == !core.bitop.bt(src.ptr, i));
+ foreach (i; start + repeat*len .. 10*BITS_PER_WORD)
+ assert(bits.test(i));
+ }
+
+ testCopyRange(20, 10); // short copy range within same word
+ testCopyRange(50, 20); // short copy range spanning two words
+ testCopyRange(64, 3 * BITS_PER_WORD + 3); // aligned copy range
+ testCopyRange(77, 2 * BITS_PER_WORD + 15); // unaligned copy range
+ testCopyRange(64, 127); // copy range within critical end alignment
+
+ testCopyRange(10, 4, 5); // repeating small range within same word
+ testCopyRange(20, 5, 10); // repeating small range spanning two words
+ testCopyRange(40, 21, 7); // repeating medium range
+ testCopyRange(73, 2 * BITS_PER_WORD + 15, 5); // repeating multi-word range
+
+ testCopyRange(2, 3, 166); // failed with assert
+ }
+
+ void zero() nothrow
+ {
+ memset(data, 0, nwords * wordtype.sizeof);
+ }
+
+ void setAll() nothrow
+ {
+ memset(data, 0xFF, nwords * wordtype.sizeof);
+ }
+
+ void copy(GCBits *f) nothrow
+ in
+ {
+ assert(nwords == f.nwords);
+ }
+ do
+ {
+ memcpy(data, f.data, nwords * wordtype.sizeof);
+ }
+
+ @property size_t nwords() const pure nothrow
+ {
+ return (nbits + (BITS_PER_WORD - 1)) >> BITS_SHIFT;
+ }
+}
+
+unittest
+{
+ GCBits b;
+
+ b.alloc(786);
+ assert(!b.test(123));
+ assert(!b.clear(123));
+ assert(!b.set(123));
+ assert(b.test(123));
+ assert(b.clear(123));
+ assert(!b.test(123));
+
+ b.set(785);
+ b.set(0);
+ assert(b.test(785));
+ assert(b.test(0));
+ b.zero();
+ assert(!b.test(785));
+ assert(!b.test(0));
+
+ GCBits b2;
+ b2.alloc(786);
+ b2.set(38);
+ b.copy(&b2);
+ assert(b.test(38));
+ b2.Dtor();
+ b.Dtor();
+}
diff --git a/libphobos/libdruntime/core/internal/gc/impl/conservative/gc.d b/libphobos/libdruntime/core/internal/gc/impl/conservative/gc.d
new file mode 100644
index 0000000..0c49955
--- /dev/null
+++ b/libphobos/libdruntime/core/internal/gc/impl/conservative/gc.d
@@ -0,0 +1,4836 @@
+/**
+ * Contains the garbage collector implementation.
+ *
+ * Copyright: D Language Foundation 2001 - 2021.
+ * License: $(HTTP www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
+ * Authors: Walter Bright, David Friedman, Sean Kelly
+ */
+module core.internal.gc.impl.conservative.gc;
+
+// D Programming Language Garbage Collector implementation
+
+/************** Debugging ***************************/
+
+//debug = PRINTF; // turn on printf's
+//debug = PARALLEL_PRINTF; // turn on printf's
+//debug = COLLECT_PRINTF; // turn on printf's
+//debug = MARK_PRINTF; // turn on printf's
+//debug = PRINTF_TO_FILE; // redirect printf's ouptut to file "gcx.log"
+//debug = LOGGING; // log allocations / frees
+//debug = MEMSTOMP; // stomp on memory
+//debug = SENTINEL; // add underrun/overrrun protection
+ // NOTE: this needs to be enabled globally in the makefiles
+ // (-debug=SENTINEL) to pass druntime's unittests.
+//debug = PTRCHECK; // more pointer checking
+//debug = PTRCHECK2; // thorough but slow pointer checking
+//debug = INVARIANT; // enable invariants
+//debug = PROFILE_API; // profile API calls for config.profile > 1
+//debug = GC_RECURSIVE_LOCK; // check for recursive locking on the same thread
+
+/***************************************************/
+version = COLLECT_PARALLEL; // parallel scanning
+version (Posix)
+ version = COLLECT_FORK;
+
+import core.internal.gc.bits;
+import core.internal.gc.os;
+import core.gc.config;
+import core.gc.gcinterface;
+
+import core.internal.container.treap;
+
+import cstdlib = core.stdc.stdlib : calloc, free, malloc, realloc;
+import core.stdc.string : memcpy, memset, memmove;
+import core.bitop;
+import core.thread;
+static import core.memory;
+
+version (GNU) import gcc.builtins;
+
+debug (PRINTF_TO_FILE) import core.stdc.stdio : sprintf, fprintf, fopen, fflush, FILE;
+else import core.stdc.stdio : sprintf, printf; // needed to output profiling results
+
+import core.time;
+alias currTime = MonoTime.currTime;
+
+// Track total time spent preparing for GC,
+// marking, sweeping and recovering pages.
+__gshared Duration prepTime;
+__gshared Duration markTime;
+__gshared Duration sweepTime;
+__gshared Duration pauseTime;
+__gshared Duration maxPauseTime;
+__gshared Duration maxCollectionTime;
+__gshared size_t numCollections;
+__gshared size_t maxPoolMemory;
+
+__gshared long numMallocs;
+__gshared long numFrees;
+__gshared long numReallocs;
+__gshared long numExtends;
+__gshared long numOthers;
+__gshared long mallocTime; // using ticks instead of MonoTime for better performance
+__gshared long freeTime;
+__gshared long reallocTime;
+__gshared long extendTime;
+__gshared long otherTime;
+__gshared long lockTime;
+
+ulong bytesAllocated; // thread local counter
+
+private
+{
+ extern (C)
+ {
+ // to allow compilation of this module without access to the rt package,
+ // make these functions available from rt.lifetime
+ void rt_finalizeFromGC(void* p, size_t size, uint attr) nothrow;
+ int rt_hasFinalizerInSegment(void* p, size_t size, uint attr, const scope void[] segment) nothrow;
+
+ // Declared as an extern instead of importing core.exception
+ // to avoid inlining - see issue 13725.
+ void onInvalidMemoryOperationError(void* pretend_sideffect = null) @trusted pure nothrow @nogc;
+ void onOutOfMemoryErrorNoGC() @trusted nothrow @nogc;
+
+ version (COLLECT_FORK)
+ version (OSX)
+ pid_t __fork() nothrow;
+ }
+
+ enum
+ {
+ OPFAIL = ~cast(size_t)0
+ }
+}
+
+alias GC gc_t;
+
+/* ============================ GC =============================== */
+
+// register GC in C constructor (_STI_)
+extern(C) pragma(crt_constructor) void _d_register_conservative_gc()
+{
+ import core.gc.registry;
+ registerGCFactory("conservative", &initialize);
+}
+
+extern(C) pragma(crt_constructor) void _d_register_precise_gc()
+{
+ import core.gc.registry;
+ registerGCFactory("precise", &initialize_precise);
+}
+
+private GC initialize()
+{
+ import core.lifetime : emplace;
+
+ auto gc = cast(ConservativeGC) cstdlib.malloc(__traits(classInstanceSize, ConservativeGC));
+ if (!gc)
+ onOutOfMemoryErrorNoGC();
+
+ return emplace(gc);
+}
+
+private GC initialize_precise()
+{
+ ConservativeGC.isPrecise = true;
+ return initialize();
+}
+
+class ConservativeGC : GC
+{
+ // For passing to debug code (not thread safe)
+ __gshared size_t line;
+ __gshared char* file;
+
+ Gcx *gcx; // implementation
+
+ import core.internal.spinlock;
+ static gcLock = shared(AlignedSpinLock)(SpinLock.Contention.lengthy);
+ static bool _inFinalizer;
+ __gshared bool isPrecise = false;
+
+ // lock GC, throw InvalidMemoryOperationError on recursive locking during finalization
+ static void lockNR() @nogc nothrow
+ {
+ if (_inFinalizer)
+ onInvalidMemoryOperationError();
+ gcLock.lock();
+ }
+
+ this()
+ {
+ //config is assumed to have already been initialized
+
+ gcx = cast(Gcx*)cstdlib.calloc(1, Gcx.sizeof);
+ if (!gcx)
+ onOutOfMemoryErrorNoGC();
+ gcx.initialize();
+
+ if (config.initReserve)
+ gcx.reserve(config.initReserve);
+ if (config.disable)
+ gcx.disabled++;
+ }
+
+
+ ~this()
+ {
+ version (linux)
+ {
+ //debug(PRINTF) printf("Thread %x ", pthread_self());
+ //debug(PRINTF) printf("GC.Dtor()\n");
+ }
+
+ if (gcx)
+ {
+ gcx.Dtor();
+ cstdlib.free(gcx);
+ gcx = null;
+ }
+ // TODO: cannot free as memory is overwritten and
+ // the monitor is still read in rt_finalize (called by destroy)
+ // cstdlib.free(cast(void*) this);
+ }
+
+
+ void enable()
+ {
+ static void go(Gcx* gcx) nothrow
+ {
+ assert(gcx.disabled > 0);
+ gcx.disabled--;
+ }
+ runLocked!(go, otherTime, numOthers)(gcx);
+ }
+
+
+ void disable()
+ {
+ static void go(Gcx* gcx) nothrow
+ {
+ gcx.disabled++;
+ }
+ runLocked!(go, otherTime, numOthers)(gcx);
+ }
+
+ debug (GC_RECURSIVE_LOCK) static bool lockedOnThisThread;
+
+ auto runLocked(alias func, Args...)(auto ref Args args)
+ {
+ debug(PROFILE_API) immutable tm = (config.profile > 1 ? currTime.ticks : 0);
+ debug(GC_RECURSIVE_LOCK)
+ {
+ if (lockedOnThisThread)
+ onInvalidMemoryOperationError();
+ lockedOnThisThread = true;
+ }
+ lockNR();
+ scope (failure) gcLock.unlock();
+ debug(PROFILE_API) immutable tm2 = (config.profile > 1 ? currTime.ticks : 0);
+
+ static if (is(typeof(func(args)) == void))
+ func(args);
+ else
+ auto res = func(args);
+
+ debug(PROFILE_API) if (config.profile > 1)
+ lockTime += tm2 - tm;
+ gcLock.unlock();
+ debug(GC_RECURSIVE_LOCK)
+ {
+ if (!lockedOnThisThread)
+ onInvalidMemoryOperationError();
+ lockedOnThisThread = false;
+ }
+
+ static if (!is(typeof(func(args)) == void))
+ return res;
+ }
+
+
+ auto runLocked(alias func, alias time, alias count, Args...)(auto ref Args args)
+ {
+ debug(PROFILE_API) immutable tm = (config.profile > 1 ? currTime.ticks : 0);
+ debug(GC_RECURSIVE_LOCK)
+ {
+ if (lockedOnThisThread)
+ onInvalidMemoryOperationError();
+ lockedOnThisThread = true;
+ }
+ lockNR();
+ scope (failure) gcLock.unlock();
+ debug(PROFILE_API) immutable tm2 = (config.profile > 1 ? currTime.ticks : 0);
+
+ static if (is(typeof(func(args)) == void))
+ func(args);
+ else
+ auto res = func(args);
+
+ debug(PROFILE_API) if (config.profile > 1)
+ {
+ count++;
+ immutable now = currTime.ticks;
+ lockTime += tm2 - tm;
+ time += now - tm2;
+ }
+ gcLock.unlock();
+ debug(GC_RECURSIVE_LOCK)
+ {
+ if (!lockedOnThisThread)
+ onInvalidMemoryOperationError();
+ lockedOnThisThread = false;
+ }
+
+ static if (!is(typeof(func(args)) == void))
+ return res;
+ }
+
+
+ uint getAttr(void* p) nothrow
+ {
+ if (!p)
+ {
+ return 0;
+ }
+
+ static uint go(Gcx* gcx, void* p) nothrow
+ {
+ Pool* pool = gcx.findPool(p);
+ uint oldb = 0;
+
+ if (pool)
+ {
+ p = sentinel_sub(p);
+ if (p != pool.findBase(p))
+ return 0;
+ auto biti = cast(size_t)(p - pool.baseAddr) >> pool.shiftBy;
+
+ oldb = pool.getBits(biti);
+ }
+ return oldb;
+ }
+
+ return runLocked!(go, otherTime, numOthers)(gcx, p);
+ }
+
+
+ uint setAttr(void* p, uint mask) nothrow
+ {
+ if (!p)
+ {
+ return 0;
+ }
+
+ static uint go(Gcx* gcx, void* p, uint mask) nothrow
+ {
+ Pool* pool = gcx.findPool(p);
+ uint oldb = 0;
+
+ if (pool)
+ {
+ p = sentinel_sub(p);
+ if (p != pool.findBase(p))
+ return 0;
+ auto biti = cast(size_t)(p - pool.baseAddr) >> pool.shiftBy;
+
+ oldb = pool.getBits(biti);
+ pool.setBits(biti, mask);
+ }
+ return oldb;
+ }
+
+ return runLocked!(go, otherTime, numOthers)(gcx, p, mask);
+ }
+
+
+ uint clrAttr(void* p, uint mask) nothrow
+ {
+ if (!p)
+ {
+ return 0;
+ }
+
+ static uint go(Gcx* gcx, void* p, uint mask) nothrow
+ {
+ Pool* pool = gcx.findPool(p);
+ uint oldb = 0;
+
+ if (pool)
+ {
+ p = sentinel_sub(p);
+ if (p != pool.findBase(p))
+ return 0;
+ auto biti = cast(size_t)(p - pool.baseAddr) >> pool.shiftBy;
+
+ oldb = pool.getBits(biti);
+ pool.clrBits(biti, mask);
+ }
+ return oldb;
+ }
+
+ return runLocked!(go, otherTime, numOthers)(gcx, p, mask);
+ }
+
+
+ void *malloc(size_t size, uint bits, const TypeInfo ti) nothrow
+ {
+ if (!size)
+ {
+ return null;
+ }
+
+ size_t localAllocSize = void;
+
+ auto p = runLocked!(mallocNoSync, mallocTime, numMallocs)(size, bits, localAllocSize, ti);
+
+ if (!(bits & BlkAttr.NO_SCAN))
+ {
+ memset(p + size, 0, localAllocSize - size);
+ }
+
+ return p;
+ }
+
+
+ //
+ //
+ //
+ private void *mallocNoSync(size_t size, uint bits, ref size_t alloc_size, const TypeInfo ti = null) nothrow
+ {
+ assert(size != 0);
+
+ debug(PRINTF)
+ printf("GC::malloc(gcx = %p, size = %d bits = %x, ti = %s)\n", gcx, size, bits, debugTypeName(ti).ptr);
+
+ assert(gcx);
+ //debug(PRINTF) printf("gcx.self = %x, pthread_self() = %x\n", gcx.self, pthread_self());
+
+ auto p = gcx.alloc(size + SENTINEL_EXTRA, alloc_size, bits, ti);
+ if (!p)
+ onOutOfMemoryErrorNoGC();
+
+ debug (SENTINEL)
+ {
+ p = sentinel_add(p);
+ sentinel_init(p, size);
+ alloc_size = size;
+ }
+ gcx.leakDetector.log_malloc(p, size);
+ bytesAllocated += alloc_size;
+
+ debug(PRINTF) printf(" => p = %p\n", p);
+ return p;
+ }
+
+
+ BlkInfo qalloc( size_t size, uint bits, const scope TypeInfo ti) nothrow
+ {
+
+ if (!size)
+ {
+ return BlkInfo.init;
+ }
+
+ BlkInfo retval;
+
+ retval.base = runLocked!(mallocNoSync, mallocTime, numMallocs)(size, bits, retval.size, ti);
+
+ if (!(bits & BlkAttr.NO_SCAN))
+ {
+ memset(retval.base + size, 0, retval.size - size);
+ }
+
+ retval.attr = bits;
+ return retval;
+ }
+
+
+ void *calloc(size_t size, uint bits, const TypeInfo ti) nothrow
+ {
+ if (!size)
+ {
+ return null;
+ }
+
+ size_t localAllocSize = void;
+
+ auto p = runLocked!(mallocNoSync, mallocTime, numMallocs)(size, bits, localAllocSize, ti);
+
+ memset(p, 0, size);
+ if (!(bits & BlkAttr.NO_SCAN))
+ {
+ memset(p + size, 0, localAllocSize - size);
+ }
+
+ return p;
+ }
+
+
+ void *realloc(void *p, size_t size, uint bits, const TypeInfo ti) nothrow
+ {
+ size_t localAllocSize = void;
+ auto oldp = p;
+
+ p = runLocked!(reallocNoSync, mallocTime, numMallocs)(p, size, bits, localAllocSize, ti);
+
+ if (p && !(bits & BlkAttr.NO_SCAN))
+ {
+ memset(p + size, 0, localAllocSize - size);
+ }
+
+ return p;
+ }
+
+
+ //
+ // bits will be set to the resulting bits of the new block
+ //
+ private void *reallocNoSync(void *p, size_t size, ref uint bits, ref size_t alloc_size, const TypeInfo ti = null) nothrow
+ {
+ if (!size)
+ {
+ if (p)
+ freeNoSync(p);
+ alloc_size = 0;
+ return null;
+ }
+ if (!p)
+ return mallocNoSync(size, bits, alloc_size, ti);
+
+ debug(PRINTF) printf("GC::realloc(p = %p, size = %llu)\n", p, cast(ulong)size);
+
+ Pool *pool = gcx.findPool(p);
+ if (!pool)
+ return null;
+
+ size_t psize;
+ size_t biti;
+
+ debug(SENTINEL)
+ {
+ void* q = p;
+ p = sentinel_sub(p);
+ bool alwaysMalloc = true;
+ }
+ else
+ {
+ alias q = p;
+ enum alwaysMalloc = false;
+ }
+
+ void* doMalloc()
+ {
+ if (!bits)
+ bits = pool.getBits(biti);
+
+ void* p2 = mallocNoSync(size, bits, alloc_size, ti);
+ debug (SENTINEL)
+ psize = sentinel_size(q, psize);
+ if (psize < size)
+ size = psize;
+ //debug(PRINTF) printf("\tcopying %d bytes\n",size);
+ memcpy(p2, q, size);
+ freeNoSync(q);
+ return p2;
+ }
+
+ if (pool.isLargeObject)
+ {
+ auto lpool = cast(LargeObjectPool*) pool;
+ auto psz = lpool.getPages(p); // get allocated size
+ if (psz == 0)
+ return null; // interior pointer
+ psize = psz * PAGESIZE;
+
+ alias pagenum = biti; // happens to be the same, but rename for clarity
+ pagenum = lpool.pagenumOf(p);
+
+ if (size <= PAGESIZE / 2 || alwaysMalloc)
+ return doMalloc(); // switching from large object pool to small object pool
+
+ auto newsz = lpool.numPages(size);
+ if (newsz == psz)
+ {
+ // nothing to do
+ }
+ else if (newsz < psz)
+ {
+ // Shrink in place
+ debug (MEMSTOMP) memset(p + size, 0xF2, psize - size);
+ lpool.freePages(pagenum + newsz, psz - newsz);
+ lpool.mergeFreePageOffsets!(false, true)(pagenum + newsz, psz - newsz);
+ lpool.bPageOffsets[pagenum] = cast(uint) newsz;
+ }
+ else if (pagenum + newsz <= pool.npages)
+ {
+ // Attempt to expand in place (TODO: merge with extend)
+ if (lpool.pagetable[pagenum + psz] != B_FREE)
+ return doMalloc();
+
+ auto newPages = newsz - psz;
+ auto freesz = lpool.bPageOffsets[pagenum + psz];
+ if (freesz < newPages)
+ return doMalloc(); // free range too small
+
+ debug (MEMSTOMP) memset(p + psize, 0xF0, size - psize);
+ debug (PRINTF) printFreeInfo(pool);
+ memset(&lpool.pagetable[pagenum + psz], B_PAGEPLUS, newPages);
+ lpool.bPageOffsets[pagenum] = cast(uint) newsz;
+ for (auto offset = psz; offset < newsz; offset++)
+ lpool.bPageOffsets[pagenum + offset] = cast(uint) offset;
+ if (freesz > newPages)
+ lpool.setFreePageOffsets(pagenum + newsz, freesz - newPages);
+ gcx.usedLargePages += newPages;
+ lpool.freepages -= newPages;
+ debug (PRINTF) printFreeInfo(pool);
+ }
+ else
+ return doMalloc(); // does not fit into current pool
+
+ alloc_size = newsz * PAGESIZE;
+ }
+ else
+ {
+ psize = (cast(SmallObjectPool*) pool).getSize(p); // get allocated bin size
+ if (psize == 0)
+ return null; // interior pointer
+ biti = cast(size_t)(p - pool.baseAddr) >> Pool.ShiftBy.Small;
+ if (pool.freebits.test (biti))
+ return null;
+
+ // allocate if new size is bigger or less than half
+ if (psize < size || psize > size * 2 || alwaysMalloc)
+ return doMalloc();
+
+ alloc_size = psize;
+ if (isPrecise)
+ pool.setPointerBitmapSmall(p, size, psize, bits, ti);
+ }
+
+ if (bits)
+ {
+ pool.clrBits(biti, ~BlkAttr.NONE);
+ pool.setBits(biti, bits);
+ }
+ return p;
+ }
+
+
+ size_t extend(void* p, size_t minsize, size_t maxsize, const TypeInfo ti) nothrow
+ {
+ return runLocked!(extendNoSync, extendTime, numExtends)(p, minsize, maxsize, ti);
+ }
+
+
+ //
+ //
+ //
+ private size_t extendNoSync(void* p, size_t minsize, size_t maxsize, const TypeInfo ti = null) nothrow
+ in
+ {
+ assert(minsize <= maxsize);
+ }
+ do
+ {
+ debug(PRINTF) printf("GC::extend(p = %p, minsize = %zu, maxsize = %zu)\n", p, minsize, maxsize);
+ debug (SENTINEL)
+ {
+ return 0;
+ }
+ else
+ {
+ auto pool = gcx.findPool(p);
+ if (!pool || !pool.isLargeObject)
+ return 0;
+
+ auto lpool = cast(LargeObjectPool*) pool;
+ size_t pagenum = lpool.pagenumOf(p);
+ if (lpool.pagetable[pagenum] != B_PAGE)
+ return 0;
+
+ size_t psz = lpool.bPageOffsets[pagenum];
+ assert(psz > 0);
+
+ auto minsz = lpool.numPages(minsize);
+ auto maxsz = lpool.numPages(maxsize);
+
+ if (pagenum + psz >= lpool.npages)
+ return 0;
+ if (lpool.pagetable[pagenum + psz] != B_FREE)
+ return 0;
+
+ size_t freesz = lpool.bPageOffsets[pagenum + psz];
+ if (freesz < minsz)
+ return 0;
+ size_t sz = freesz > maxsz ? maxsz : freesz;
+ debug (MEMSTOMP) memset(pool.baseAddr + (pagenum + psz) * PAGESIZE, 0xF0, sz * PAGESIZE);
+ memset(lpool.pagetable + pagenum + psz, B_PAGEPLUS, sz);
+ lpool.bPageOffsets[pagenum] = cast(uint) (psz + sz);
+ for (auto offset = psz; offset < psz + sz; offset++)
+ lpool.bPageOffsets[pagenum + offset] = cast(uint) offset;
+ if (freesz > sz)
+ lpool.setFreePageOffsets(pagenum + psz + sz, freesz - sz);
+ lpool.freepages -= sz;
+ gcx.usedLargePages += sz;
+ return (psz + sz) * PAGESIZE;
+ }
+ }
+
+
+ size_t reserve(size_t size) nothrow
+ {
+ if (!size)
+ {
+ return 0;
+ }
+
+ return runLocked!(reserveNoSync, otherTime, numOthers)(size);
+ }
+
+
+ //
+ //
+ //
+ private size_t reserveNoSync(size_t size) nothrow
+ {
+ assert(size != 0);
+ assert(gcx);
+
+ return gcx.reserve(size);
+ }
+
+
+ void free(void *p) nothrow @nogc
+ {
+ if (!p || _inFinalizer)
+ {
+ return;
+ }
+
+ return runLocked!(freeNoSync, freeTime, numFrees)(p);
+ }
+
+
+ //
+ //
+ //
+ private void freeNoSync(void *p) nothrow @nogc
+ {
+ debug(PRINTF) printf("Freeing %p\n", cast(size_t) p);
+ assert (p);
+
+ Pool* pool;
+ size_t pagenum;
+ Bins bin;
+ size_t biti;
+
+ // Find which page it is in
+ pool = gcx.findPool(p);
+ if (!pool) // if not one of ours
+ return; // ignore
+
+ pagenum = pool.pagenumOf(p);
+
+ debug(PRINTF) printf("pool base = %p, PAGENUM = %d of %d, bin = %d\n", pool.baseAddr, pagenum, pool.npages, pool.pagetable[pagenum]);
+ debug(PRINTF) if (pool.isLargeObject) printf("Block size = %d\n", pool.bPageOffsets[pagenum]);
+
+ bin = cast(Bins)pool.pagetable[pagenum];
+
+ // Verify that the pointer is at the beginning of a block,
+ // no action should be taken if p is an interior pointer
+ if (bin > B_PAGE) // B_PAGEPLUS or B_FREE
+ return;
+ size_t off = (sentinel_sub(p) - pool.baseAddr);
+ size_t base = baseOffset(off, bin);
+ if (off != base)
+ return;
+
+ sentinel_Invariant(p);
+ auto q = p;
+ p = sentinel_sub(p);
+ size_t ssize;
+
+ if (pool.isLargeObject) // if large alloc
+ {
+ biti = cast(size_t)(p - pool.baseAddr) >> pool.ShiftBy.Large;
+ assert(bin == B_PAGE);
+ auto lpool = cast(LargeObjectPool*) pool;
+
+ // Free pages
+ size_t npages = lpool.bPageOffsets[pagenum];
+ auto size = npages * PAGESIZE;
+ ssize = sentinel_size(q, size);
+ debug (MEMSTOMP) memset(p, 0xF2, size);
+ lpool.freePages(pagenum, npages);
+ lpool.mergeFreePageOffsets!(true, true)(pagenum, npages);
+ }
+ else
+ {
+ biti = cast(size_t)(p - pool.baseAddr) >> pool.ShiftBy.Small;
+ if (pool.freebits.test (biti))
+ return;
+ // Add to free list
+ List *list = cast(List*)p;
+
+ auto size = binsize[bin];
+ ssize = sentinel_size(q, size);
+ debug (MEMSTOMP) memset(p, 0xF2, size);
+
+ // in case the page hasn't been recovered yet, don't add the object to the free list
+ if (!gcx.recoverPool[bin] || pool.binPageChain[pagenum] == Pool.PageRecovered)
+ {
+ list.next = gcx.bucket[bin];
+ list.pool = pool;
+ gcx.bucket[bin] = list;
+ }
+ pool.freebits.set(biti);
+ }
+ pool.clrBits(biti, ~BlkAttr.NONE);
+
+ gcx.leakDetector.log_free(sentinel_add(p), ssize);
+ }
+
+
+ void* addrOf(void *p) nothrow @nogc
+ {
+ if (!p)
+ {
+ return null;
+ }
+
+ return runLocked!(addrOfNoSync, otherTime, numOthers)(p);
+ }
+
+
+ //
+ //
+ //
+ void* addrOfNoSync(void *p) nothrow @nogc
+ {
+ if (!p)
+ {
+ return null;
+ }
+
+ auto q = gcx.findBase(p);
+ if (q)
+ q = sentinel_add(q);
+ return q;
+ }
+
+
+ size_t sizeOf(void *p) nothrow @nogc
+ {
+ if (!p)
+ {
+ return 0;
+ }
+
+ return runLocked!(sizeOfNoSync, otherTime, numOthers)(p);
+ }
+
+
+ //
+ //
+ //
+ private size_t sizeOfNoSync(void *p) nothrow @nogc
+ {
+ assert (p);
+
+ debug (SENTINEL)
+ {
+ p = sentinel_sub(p);
+ size_t size = gcx.findSize(p);
+ return size ? size - SENTINEL_EXTRA : 0;
+ }
+ else
+ {
+ size_t size = gcx.findSize(p);
+ return size;
+ }
+ }
+
+
+ BlkInfo query(void *p) nothrow
+ {
+ if (!p)
+ {
+ BlkInfo i;
+ return i;
+ }
+
+ return runLocked!(queryNoSync, otherTime, numOthers)(p);
+ }
+
+ //
+ //
+ //
+ BlkInfo queryNoSync(void *p) nothrow
+ {
+ assert(p);
+
+ BlkInfo info = gcx.getInfo(p);
+ debug(SENTINEL)
+ {
+ if (info.base)
+ {
+ info.base = sentinel_add(info.base);
+ info.size = *sentinel_psize(info.base);
+ }
+ }
+ return info;
+ }
+
+
+ /**
+ * Verify that pointer p:
+ * 1) belongs to this memory pool
+ * 2) points to the start of an allocated piece of memory
+ * 3) is not on a free list
+ */
+ void check(void *p) nothrow
+ {
+ if (!p)
+ {
+ return;
+ }
+
+ return runLocked!(checkNoSync, otherTime, numOthers)(p);
+ }
+
+
+ //
+ //
+ //
+ private void checkNoSync(void *p) nothrow
+ {
+ assert(p);
+
+ sentinel_Invariant(p);
+ debug (PTRCHECK)
+ {
+ Pool* pool;
+ size_t pagenum;
+ Bins bin;
+
+ p = sentinel_sub(p);
+ pool = gcx.findPool(p);
+ assert(pool);
+ pagenum = pool.pagenumOf(p);
+ bin = cast(Bins)pool.pagetable[pagenum];
+ assert(bin <= B_PAGE);
+ assert(p == cast(void*)baseOffset(cast(size_t)p, bin));
+
+ debug (PTRCHECK2)
+ {
+ if (bin < B_PAGE)
+ {
+ // Check that p is not on a free list
+ List *list;
+
+ for (list = gcx.bucket[bin]; list; list = list.next)
+ {
+ assert(cast(void*)list != p);
+ }
+ }
+ }
+ }
+ }
+
+
+ void addRoot(void *p) nothrow @nogc
+ {
+ if (!p)
+ {
+ return;
+ }
+
+ gcx.addRoot(p);
+ }
+
+
+ void removeRoot(void *p) nothrow @nogc
+ {
+ if (!p)
+ {
+ return;
+ }
+
+ gcx.removeRoot(p);
+ }
+
+
+ @property RootIterator rootIter() @nogc
+ {
+ return &gcx.rootsApply;
+ }
+
+
+ void addRange(void *p, size_t sz, const TypeInfo ti = null) nothrow @nogc
+ {
+ if (!p || !sz)
+ {
+ return;
+ }
+
+ gcx.addRange(p, p + sz, ti);
+ }
+
+
+ void removeRange(void *p) nothrow @nogc
+ {
+ if (!p)
+ {
+ return;
+ }
+
+ gcx.removeRange(p);
+ }
+
+
+ @property RangeIterator rangeIter() @nogc
+ {
+ return &gcx.rangesApply;
+ }
+
+
+ void runFinalizers(const scope void[] segment) nothrow
+ {
+ static void go(Gcx* gcx, const scope void[] segment) nothrow
+ {
+ gcx.runFinalizers(segment);
+ }
+ return runLocked!(go, otherTime, numOthers)(gcx, segment);
+ }
+
+
+ bool inFinalizer() nothrow @nogc
+ {
+ return _inFinalizer;
+ }
+
+
+ void collect() nothrow
+ {
+ fullCollect();
+ }
+
+
+ void collectNoStack() nothrow
+ {
+ fullCollectNoStack();
+ }
+
+
+ /**
+ * Do full garbage collection.
+ * Return number of pages free'd.
+ */
+ size_t fullCollect() nothrow
+ {
+ debug(PRINTF) printf("GC.fullCollect()\n");
+
+ // Since a finalizer could launch a new thread, we always need to lock
+ // when collecting.
+ static size_t go(Gcx* gcx) nothrow
+ {
+ return gcx.fullcollect(false, true); // standard stop the world
+ }
+ immutable result = runLocked!go(gcx);
+
+ version (none)
+ {
+ GCStats stats;
+
+ getStats(stats);
+ debug(PRINTF) printf("heapSize = %zx, freeSize = %zx\n",
+ stats.heapSize, stats.freeSize);
+ }
+
+ gcx.leakDetector.log_collect();
+ return result;
+ }
+
+
+ /**
+ * do full garbage collection ignoring roots
+ */
+ void fullCollectNoStack() nothrow
+ {
+ // Since a finalizer could launch a new thread, we always need to lock
+ // when collecting.
+ static size_t go(Gcx* gcx) nothrow
+ {
+ return gcx.fullcollect(true, true, true); // standard stop the world
+ }
+ runLocked!go(gcx);
+ }
+
+
+ void minimize() nothrow
+ {
+ static void go(Gcx* gcx) nothrow
+ {
+ gcx.minimize();
+ }
+ runLocked!(go, otherTime, numOthers)(gcx);
+ }
+
+
+ core.memory.GC.Stats stats() nothrow
+ {
+ typeof(return) ret;
+
+ runLocked!(getStatsNoSync, otherTime, numOthers)(ret);
+
+ return ret;
+ }
+
+
+ core.memory.GC.ProfileStats profileStats() nothrow @trusted
+ {
+ typeof(return) ret;
+
+ ret.numCollections = numCollections;
+ ret.totalCollectionTime = prepTime + markTime + sweepTime;
+ ret.totalPauseTime = pauseTime;
+ ret.maxCollectionTime = maxCollectionTime;
+ ret.maxPauseTime = maxPauseTime;
+
+ return ret;
+ }
+
+
+ ulong allocatedInCurrentThread() nothrow
+ {
+ return bytesAllocated;
+ }
+
+
+ //
+ //
+ //
+ private void getStatsNoSync(out core.memory.GC.Stats stats) nothrow
+ {
+ foreach (pool; gcx.pooltable[0 .. gcx.npools])
+ {
+ foreach (bin; pool.pagetable[0 .. pool.npages])
+ {
+ if (bin == B_FREE)
+ stats.freeSize += PAGESIZE;
+ else
+ stats.usedSize += PAGESIZE;
+ }
+ }
+
+ size_t freeListSize;
+ foreach (n; 0 .. B_PAGE)
+ {
+ immutable sz = binsize[n];
+ for (List *list = gcx.bucket[n]; list; list = list.next)
+ freeListSize += sz;
+
+ foreach (pool; gcx.pooltable[0 .. gcx.npools])
+ {
+ if (pool.isLargeObject)
+ continue;
+ for (uint pn = pool.recoverPageFirst[n]; pn < pool.npages; pn = pool.binPageChain[pn])
+ {
+ const bitbase = pn * PAGESIZE / 16;
+ const top = PAGESIZE - sz + 1; // ensure <size> bytes available even if unaligned
+ for (size_t u = 0; u < top; u += sz)
+ if (pool.freebits.test(bitbase + u / 16))
+ freeListSize += sz;
+ }
+ }
+ }
+
+ stats.usedSize -= freeListSize;
+ stats.freeSize += freeListSize;
+ stats.allocatedInCurrentThread = bytesAllocated;
+ }
+}
+
+
+/* ============================ Gcx =============================== */
+
+enum
+{ PAGESIZE = 4096,
+}
+
+
+enum
+{
+ B_16,
+ B_32,
+ B_48,
+ B_64,
+ B_96,
+ B_128,
+ B_176,
+ B_256,
+ B_368,
+ B_512,
+ B_816,
+ B_1024,
+ B_1360,
+ B_2048,
+ B_NUMSMALL,
+
+ B_PAGE = B_NUMSMALL,// start of large alloc
+ B_PAGEPLUS, // continuation of large alloc
+ B_FREE, // free page
+ B_MAX,
+}
+
+
+alias ubyte Bins;
+
+
+struct List
+{
+ List *next;
+ Pool *pool;
+}
+
+// non power of two sizes optimized for small remainder within page (<= 64 bytes)
+immutable short[B_NUMSMALL + 1] binsize = [ 16, 32, 48, 64, 96, 128, 176, 256, 368, 512, 816, 1024, 1360, 2048, 4096 ];
+immutable short[PAGESIZE / 16][B_NUMSMALL + 1] binbase = calcBinBase();
+
+short[PAGESIZE / 16][B_NUMSMALL + 1] calcBinBase()
+{
+ short[PAGESIZE / 16][B_NUMSMALL + 1] bin;
+
+ foreach (i, size; binsize)
+ {
+ short end = (PAGESIZE / size) * size;
+ short bsz = size / 16;
+ foreach (off; 0..PAGESIZE/16)
+ {
+ // add the remainder to the last bin, so no check during scanning
+ // is needed if a false pointer targets that area
+ const base = (off - off % bsz) * 16;
+ bin[i][off] = cast(short)(base < end ? base : end - size);
+ }
+ }
+ return bin;
+}
+
+size_t baseOffset(size_t offset, Bins bin) @nogc nothrow
+{
+ assert(bin <= B_PAGE);
+ return (offset & ~(PAGESIZE - 1)) + binbase[bin][(offset & (PAGESIZE - 1)) >> 4];
+}
+
+alias PageBits = GCBits.wordtype[PAGESIZE / 16 / GCBits.BITS_PER_WORD];
+static assert(PAGESIZE % (GCBits.BITS_PER_WORD * 16) == 0);
+
+// bitmask with bits set at base offsets of objects
+immutable PageBits[B_NUMSMALL] baseOffsetBits = (){
+ PageBits[B_NUMSMALL] bits;
+ foreach (bin; 0..B_NUMSMALL)
+ {
+ size_t size = binsize[bin];
+ const top = PAGESIZE - size + 1; // ensure <size> bytes available even if unaligned
+ for (size_t u = 0; u < top; u += size)
+ {
+ size_t biti = u / 16;
+ size_t off = biti / GCBits.BITS_PER_WORD;
+ size_t mod = biti % GCBits.BITS_PER_WORD;
+ bits[bin][off] |= GCBits.BITS_1 << mod;
+ }
+ }
+ return bits;
+}();
+
+private void set(ref PageBits bits, size_t i) @nogc pure nothrow
+{
+ assert(i < PageBits.sizeof * 8);
+ bts(bits.ptr, i);
+}
+
+/* ============================ Gcx =============================== */
+
+struct Gcx
+{
+ import core.internal.spinlock;
+ auto rootsLock = shared(AlignedSpinLock)(SpinLock.Contention.brief);
+ auto rangesLock = shared(AlignedSpinLock)(SpinLock.Contention.brief);
+ Treap!Root roots;
+ Treap!Range ranges;
+ bool minimizeAfterNextCollection = false;
+ version (COLLECT_FORK)
+ {
+ private pid_t markProcPid = 0;
+ bool shouldFork;
+ }
+
+ debug(INVARIANT) bool initialized;
+ debug(INVARIANT) bool inCollection;
+ uint disabled; // turn off collections if >0
+
+ import core.internal.gc.pooltable;
+ private @property size_t npools() pure const nothrow { return pooltable.length; }
+ PoolTable!Pool pooltable;
+
+ List*[B_NUMSMALL] bucket; // free list for each small size
+
+ // run a collection when reaching those thresholds (number of used pages)
+ float smallCollectThreshold, largeCollectThreshold;
+ uint usedSmallPages, usedLargePages;
+ // total number of mapped pages
+ uint mappedPages;
+
+ debug (LOGGING)
+ LeakDetector leakDetector;
+ else
+ alias leakDetector = LeakDetector;
+
+ SmallObjectPool*[B_NUMSMALL] recoverPool;
+ version (Posix) __gshared Gcx* instance;
+
+ void initialize()
+ {
+ (cast(byte*)&this)[0 .. Gcx.sizeof] = 0;
+ leakDetector.initialize(&this);
+ roots.initialize(0x243F6A8885A308D3UL);
+ ranges.initialize(0x13198A2E03707344UL);
+ smallCollectThreshold = largeCollectThreshold = 0.0f;
+ usedSmallPages = usedLargePages = 0;
+ mappedPages = 0;
+ //printf("gcx = %p, self = %x\n", &this, self);
+ version (Posix)
+ {
+ import core.sys.posix.pthread : pthread_atfork;
+ instance = &this;
+ __gshared atforkHandlersInstalled = false;
+ if (!atforkHandlersInstalled)
+ {
+ pthread_atfork(
+ &_d_gcx_atfork_prepare,
+ &_d_gcx_atfork_parent,
+ &_d_gcx_atfork_child);
+ atforkHandlersInstalled = true;
+ }
+ }
+ debug(INVARIANT) initialized = true;
+ version (COLLECT_FORK)
+ shouldFork = config.fork;
+
+ }
+
+ void Dtor()
+ {
+ if (config.profile)
+ {
+ printf("\tNumber of collections: %llu\n", cast(ulong)numCollections);
+ printf("\tTotal GC prep time: %lld milliseconds\n",
+ prepTime.total!("msecs"));
+ printf("\tTotal mark time: %lld milliseconds\n",
+ markTime.total!("msecs"));
+ printf("\tTotal sweep time: %lld milliseconds\n",
+ sweepTime.total!("msecs"));
+ long maxPause = maxPauseTime.total!("msecs");
+ printf("\tMax Pause Time: %lld milliseconds\n", maxPause);
+ long gcTime = (sweepTime + markTime + prepTime).total!("msecs");
+ printf("\tGrand total GC time: %lld milliseconds\n", gcTime);
+ long pauseTime = (markTime + prepTime).total!("msecs");
+
+ char[30] apitxt = void;
+ apitxt[0] = 0;
+ debug(PROFILE_API) if (config.profile > 1)
+ {
+ static Duration toDuration(long dur)
+ {
+ return MonoTime(dur) - MonoTime(0);
+ }
+
+ printf("\n");
+ printf("\tmalloc: %llu calls, %lld ms\n", cast(ulong)numMallocs, toDuration(mallocTime).total!"msecs");
+ printf("\trealloc: %llu calls, %lld ms\n", cast(ulong)numReallocs, toDuration(reallocTime).total!"msecs");
+ printf("\tfree: %llu calls, %lld ms\n", cast(ulong)numFrees, toDuration(freeTime).total!"msecs");
+ printf("\textend: %llu calls, %lld ms\n", cast(ulong)numExtends, toDuration(extendTime).total!"msecs");
+ printf("\tother: %llu calls, %lld ms\n", cast(ulong)numOthers, toDuration(otherTime).total!"msecs");
+ printf("\tlock time: %lld ms\n", toDuration(lockTime).total!"msecs");
+
+ long apiTime = mallocTime + reallocTime + freeTime + extendTime + otherTime + lockTime;
+ printf("\tGC API: %lld ms\n", toDuration(apiTime).total!"msecs");
+ sprintf(apitxt.ptr, " API%5ld ms", toDuration(apiTime).total!"msecs");
+ }
+
+ printf("GC summary:%5lld MB,%5lld GC%5lld ms, Pauses%5lld ms <%5lld ms%s\n",
+ cast(long) maxPoolMemory >> 20, cast(ulong)numCollections, gcTime,
+ pauseTime, maxPause, apitxt.ptr);
+ }
+
+ version (Posix)
+ instance = null;
+ version (COLLECT_PARALLEL)
+ stopScanThreads();
+
+ debug(INVARIANT) initialized = false;
+
+ for (size_t i = 0; i < npools; i++)
+ {
+ Pool *pool = pooltable[i];
+ mappedPages -= pool.npages;
+ pool.Dtor();
+ cstdlib.free(pool);
+ }
+ assert(!mappedPages);
+ pooltable.Dtor();
+
+ roots.removeAll();
+ ranges.removeAll();
+ toscanConservative.reset();
+ toscanPrecise.reset();
+ }
+
+
+ void Invariant() const { }
+
+ debug(INVARIANT)
+ invariant()
+ {
+ if (initialized)
+ {
+ //printf("Gcx.invariant(): this = %p\n", &this);
+ pooltable.Invariant();
+ for (size_t p = 0; p < pooltable.length; p++)
+ if (pooltable.pools[p].isLargeObject)
+ (cast(LargeObjectPool*)(pooltable.pools[p])).Invariant();
+ else
+ (cast(SmallObjectPool*)(pooltable.pools[p])).Invariant();
+
+ if (!inCollection)
+ (cast()rangesLock).lock();
+ foreach (range; ranges)
+ {
+ assert(range.pbot);
+ assert(range.ptop);
+ assert(range.pbot <= range.ptop);
+ }
+ if (!inCollection)
+ (cast()rangesLock).unlock();
+
+ for (size_t i = 0; i < B_NUMSMALL; i++)
+ {
+ size_t j = 0;
+ List* prev, pprev, ppprev; // keep a short history to inspect in the debugger
+ for (auto list = cast(List*)bucket[i]; list; list = list.next)
+ {
+ auto pool = list.pool;
+ auto biti = cast(size_t)(cast(void*)list - pool.baseAddr) >> Pool.ShiftBy.Small;
+ assert(pool.freebits.test(biti));
+ ppprev = pprev;
+ pprev = prev;
+ prev = list;
+ }
+ }
+ }
+ }
+
+ @property bool collectInProgress() const nothrow
+ {
+ version (COLLECT_FORK)
+ return markProcPid != 0;
+ else
+ return false;
+ }
+
+
+ /**
+ *
+ */
+ void addRoot(void *p) nothrow @nogc
+ {
+ rootsLock.lock();
+ scope (failure) rootsLock.unlock();
+ roots.insert(Root(p));
+ rootsLock.unlock();
+ }
+
+
+ /**
+ *
+ */
+ void removeRoot(void *p) nothrow @nogc
+ {
+ rootsLock.lock();
+ scope (failure) rootsLock.unlock();
+ roots.remove(Root(p));
+ rootsLock.unlock();
+ }
+
+
+ /**
+ *
+ */
+ int rootsApply(scope int delegate(ref Root) nothrow dg) nothrow
+ {
+ rootsLock.lock();
+ scope (failure) rootsLock.unlock();
+ auto ret = roots.opApply(dg);
+ rootsLock.unlock();
+ return ret;
+ }
+
+
+ /**
+ *
+ */
+ void addRange(void *pbot, void *ptop, const TypeInfo ti) nothrow @nogc
+ {
+ //debug(PRINTF) printf("Thread %x ", pthread_self());
+ debug(PRINTF) printf("%p.Gcx::addRange(%p, %p)\n", &this, pbot, ptop);
+ rangesLock.lock();
+ scope (failure) rangesLock.unlock();
+ ranges.insert(Range(pbot, ptop));
+ rangesLock.unlock();
+ }
+
+
+ /**
+ *
+ */
+ void removeRange(void *pbot) nothrow @nogc
+ {
+ //debug(PRINTF) printf("Thread %x ", pthread_self());
+ debug(PRINTF) printf("Gcx.removeRange(%p)\n", pbot);
+ rangesLock.lock();
+ scope (failure) rangesLock.unlock();
+ ranges.remove(Range(pbot, pbot)); // only pbot is used, see Range.opCmp
+ rangesLock.unlock();
+
+ // debug(PRINTF) printf("Wrong thread\n");
+ // This is a fatal error, but ignore it.
+ // The problem is that we can get a Close() call on a thread
+ // other than the one the range was allocated on.
+ //assert(zero);
+ }
+
+ /**
+ *
+ */
+ int rangesApply(scope int delegate(ref Range) nothrow dg) nothrow
+ {
+ rangesLock.lock();
+ scope (failure) rangesLock.unlock();
+ auto ret = ranges.opApply(dg);
+ rangesLock.unlock();
+ return ret;
+ }
+
+
+ /**
+ *
+ */
+ void runFinalizers(const scope void[] segment) nothrow
+ {
+ ConservativeGC._inFinalizer = true;
+ scope (failure) ConservativeGC._inFinalizer = false;
+
+ foreach (pool; pooltable[0 .. npools])
+ {
+ if (!pool.finals.nbits) continue;
+
+ if (pool.isLargeObject)
+ {
+ auto lpool = cast(LargeObjectPool*) pool;
+ lpool.runFinalizers(segment);
+ }
+ else
+ {
+ auto spool = cast(SmallObjectPool*) pool;
+ spool.runFinalizers(segment);
+ }
+ }
+ ConservativeGC._inFinalizer = false;
+ }
+
+ Pool* findPool(void* p) pure nothrow @nogc
+ {
+ return pooltable.findPool(p);
+ }
+
+ /**
+ * Find base address of block containing pointer p.
+ * Returns null if not a gc'd pointer
+ */
+ void* findBase(void *p) nothrow @nogc
+ {
+ Pool *pool;
+
+ pool = findPool(p);
+ if (pool)
+ return pool.findBase(p);
+ return null;
+ }
+
+
+ /**
+ * Find size of pointer p.
+ * Returns 0 if not a gc'd pointer
+ */
+ size_t findSize(void *p) nothrow @nogc
+ {
+ Pool* pool = findPool(p);
+ if (pool)
+ return pool.slGetSize(p);
+ return 0;
+ }
+
+ /**
+ *
+ */
+ BlkInfo getInfo(void* p) nothrow
+ {
+ Pool* pool = findPool(p);
+ if (pool)
+ return pool.slGetInfo(p);
+ return BlkInfo();
+ }
+
+ /**
+ * Computes the bin table using CTFE.
+ */
+ static byte[2049] ctfeBins() nothrow
+ {
+ byte[2049] ret;
+ size_t p = 0;
+ for (Bins b = B_16; b <= B_2048; b++)
+ for ( ; p <= binsize[b]; p++)
+ ret[p] = b;
+
+ return ret;
+ }
+
+ static const byte[2049] binTable = ctfeBins();
+
+ /**
+ * Allocate a new pool of at least size bytes.
+ * Sort it into pooltable[].
+ * Mark all memory in the pool as B_FREE.
+ * Return the actual number of bytes reserved or 0 on error.
+ */
+ size_t reserve(size_t size) nothrow
+ {
+ size_t npages = (size + PAGESIZE - 1) / PAGESIZE;
+
+ // Assume reserve() is for small objects.
+ Pool* pool = newPool(npages, false);
+
+ if (!pool)
+ return 0;
+ return pool.npages * PAGESIZE;
+ }
+
+ /**
+ * Update the thresholds for when to collect the next time
+ */
+ void updateCollectThresholds() nothrow
+ {
+ static float max(float a, float b) nothrow
+ {
+ return a >= b ? a : b;
+ }
+
+ // instantly increases, slowly decreases
+ static float smoothDecay(float oldVal, float newVal) nothrow
+ {
+ // decay to 63.2% of newVal over 5 collections
+ // http://en.wikipedia.org/wiki/Low-pass_filter#Simple_infinite_impulse_response_filter
+ enum alpha = 1.0 / (5 + 1);
+ immutable decay = (newVal - oldVal) * alpha + oldVal;
+ return max(newVal, decay);
+ }
+
+ immutable smTarget = usedSmallPages * config.heapSizeFactor;
+ smallCollectThreshold = smoothDecay(smallCollectThreshold, smTarget);
+ immutable lgTarget = usedLargePages * config.heapSizeFactor;
+ largeCollectThreshold = smoothDecay(largeCollectThreshold, lgTarget);
+ }
+
+ /**
+ * Minimizes physical memory usage by returning free pools to the OS.
+ */
+ void minimize() nothrow
+ {
+ debug(PRINTF) printf("Minimizing.\n");
+
+ foreach (pool; pooltable.minimize())
+ {
+ debug(PRINTF) printFreeInfo(pool);
+ mappedPages -= pool.npages;
+ pool.Dtor();
+ cstdlib.free(pool);
+ }
+
+ debug(PRINTF) printf("Done minimizing.\n");
+ }
+
+ private @property bool lowMem() const nothrow
+ {
+ return isLowOnMem(cast(size_t)mappedPages * PAGESIZE);
+ }
+
+ void* alloc(size_t size, ref size_t alloc_size, uint bits, const TypeInfo ti) nothrow
+ {
+ return size <= PAGESIZE/2 ? smallAlloc(size, alloc_size, bits, ti)
+ : bigAlloc(size, alloc_size, bits, ti);
+ }
+
+ void* smallAlloc(size_t size, ref size_t alloc_size, uint bits, const TypeInfo ti) nothrow
+ {
+ immutable bin = binTable[size];
+ alloc_size = binsize[bin];
+
+ void* p = bucket[bin];
+ if (p)
+ goto L_hasBin;
+
+ if (recoverPool[bin])
+ recoverNextPage(bin);
+
+ bool tryAlloc() nothrow
+ {
+ if (!bucket[bin])
+ {
+ bucket[bin] = allocPage(bin);
+ if (!bucket[bin])
+ return false;
+ }
+ p = bucket[bin];
+ return true;
+ }
+
+ if (!tryAlloc())
+ {
+ if (!lowMem && (disabled || usedSmallPages < smallCollectThreshold))
+ {
+ // disabled or threshold not reached => allocate a new pool instead of collecting
+ if (!newPool(1, false))
+ {
+ // out of memory => try to free some memory
+ fullcollect(false, true); // stop the world
+ if (lowMem)
+ minimize();
+ recoverNextPage(bin);
+ }
+ }
+ else if (usedSmallPages > 0)
+ {
+ fullcollect();
+ if (lowMem)
+ minimize();
+ recoverNextPage(bin);
+ }
+ // tryAlloc will succeed if a new pool was allocated above, if it fails allocate a new pool now
+ if (!tryAlloc() && (!newPool(1, false) || !tryAlloc()))
+ // out of luck or memory
+ onOutOfMemoryErrorNoGC();
+ }
+ assert(p !is null);
+ L_hasBin:
+ // Return next item from free list
+ bucket[bin] = (cast(List*)p).next;
+ auto pool = (cast(List*)p).pool;
+
+ auto biti = (p - pool.baseAddr) >> pool.shiftBy;
+ assert(pool.freebits.test(biti));
+ if (collectInProgress)
+ pool.mark.setLocked(biti); // be sure that the child is aware of the page being used
+ pool.freebits.clear(biti);
+ if (bits)
+ pool.setBits(biti, bits);
+ //debug(PRINTF) printf("\tmalloc => %p\n", p);
+ debug (MEMSTOMP) memset(p, 0xF0, alloc_size);
+
+ if (ConservativeGC.isPrecise)
+ {
+ debug(SENTINEL)
+ pool.setPointerBitmapSmall(sentinel_add(p), size - SENTINEL_EXTRA, size - SENTINEL_EXTRA, bits, ti);
+ else
+ pool.setPointerBitmapSmall(p, size, alloc_size, bits, ti);
+ }
+ return p;
+ }
+
+ /**
+ * Allocate a chunk of memory that is larger than a page.
+ * Return null if out of memory.
+ */
+ void* bigAlloc(size_t size, ref size_t alloc_size, uint bits, const TypeInfo ti = null) nothrow
+ {
+ debug(PRINTF) printf("In bigAlloc. Size: %d\n", size);
+
+ LargeObjectPool* pool;
+ size_t pn;
+ immutable npages = LargeObjectPool.numPages(size);
+ if (npages == size_t.max)
+ onOutOfMemoryErrorNoGC(); // size just below size_t.max requested
+
+ bool tryAlloc() nothrow
+ {
+ foreach (p; pooltable[0 .. npools])
+ {
+ if (!p.isLargeObject || p.freepages < npages)
+ continue;
+ auto lpool = cast(LargeObjectPool*) p;
+ if ((pn = lpool.allocPages(npages)) == OPFAIL)
+ continue;
+ pool = lpool;
+ return true;
+ }
+ return false;
+ }
+
+ bool tryAllocNewPool() nothrow
+ {
+ pool = cast(LargeObjectPool*) newPool(npages, true);
+ if (!pool) return false;
+ pn = pool.allocPages(npages);
+ assert(pn != OPFAIL);
+ return true;
+ }
+
+ if (!tryAlloc())
+ {
+ if (!lowMem && (disabled || usedLargePages < largeCollectThreshold))
+ {
+ // disabled or threshold not reached => allocate a new pool instead of collecting
+ if (!tryAllocNewPool())
+ {
+ // disabled but out of memory => try to free some memory
+ minimizeAfterNextCollection = true;
+ fullcollect(false, true);
+ }
+ }
+ else if (usedLargePages > 0)
+ {
+ minimizeAfterNextCollection = true;
+ fullcollect();
+ }
+ // If alloc didn't yet succeed retry now that we collected/minimized
+ if (!pool && !tryAlloc() && !tryAllocNewPool())
+ // out of luck or memory
+ return null;
+ }
+ assert(pool);
+
+ debug(PRINTF) printFreeInfo(&pool.base);
+ if (collectInProgress)
+ pool.mark.setLocked(pn);
+ usedLargePages += npages;
+
+ debug(PRINTF) printFreeInfo(&pool.base);
+
+ auto p = pool.baseAddr + pn * PAGESIZE;
+ debug(PRINTF) printf("Got large alloc: %p, pt = %d, np = %d\n", p, pool.pagetable[pn], npages);
+ debug (MEMSTOMP) memset(p, 0xF1, size);
+ alloc_size = npages * PAGESIZE;
+ //debug(PRINTF) printf("\tp = %p\n", p);
+
+ if (bits)
+ pool.setBits(pn, bits);
+
+ if (ConservativeGC.isPrecise)
+ {
+ // an array of classes is in fact an array of pointers
+ immutable(void)* rtinfo;
+ if (!ti)
+ rtinfo = rtinfoHasPointers;
+ else if ((bits & BlkAttr.APPENDABLE) && (typeid(ti) is typeid(TypeInfo_Class)))
+ rtinfo = rtinfoHasPointers;
+ else
+ rtinfo = ti.rtInfo();
+ pool.rtinfo[pn] = cast(immutable(size_t)*)rtinfo;
+ }
+
+ return p;
+ }
+
+
+ /**
+ * Allocate a new pool with at least npages in it.
+ * Sort it into pooltable[].
+ * Return null if failed.
+ */
+ Pool *newPool(size_t npages, bool isLargeObject) nothrow
+ {
+ //debug(PRINTF) printf("************Gcx::newPool(npages = %d)****************\n", npages);
+
+ // Minimum of POOLSIZE
+ size_t minPages = config.minPoolSize / PAGESIZE;
+ if (npages < minPages)
+ npages = minPages;
+ else if (npages > minPages)
+ { // Give us 150% of requested size, so there's room to extend
+ auto n = npages + (npages >> 1);
+ if (n < size_t.max/PAGESIZE)
+ npages = n;
+ }
+
+ // Allocate successively larger pools up to 8 megs
+ if (npools)
+ { size_t n;
+
+ n = config.minPoolSize + config.incPoolSize * npools;
+ if (n > config.maxPoolSize)
+ n = config.maxPoolSize; // cap pool size
+ n /= PAGESIZE; // convert bytes to pages
+ if (npages < n)
+ npages = n;
+ }
+
+ //printf("npages = %d\n", npages);
+
+ auto pool = cast(Pool *)cstdlib.calloc(1, isLargeObject ? LargeObjectPool.sizeof : SmallObjectPool.sizeof);
+ if (pool)
+ {
+ pool.initialize(npages, isLargeObject);
+ if (collectInProgress)
+ pool.mark.setAll();
+ if (!pool.baseAddr || !pooltable.insert(pool))
+ {
+ pool.Dtor();
+ cstdlib.free(pool);
+ return null;
+ }
+ }
+
+ mappedPages += npages;
+
+ if (config.profile)
+ {
+ if (cast(size_t)mappedPages * PAGESIZE > maxPoolMemory)
+ maxPoolMemory = cast(size_t)mappedPages * PAGESIZE;
+ }
+ return pool;
+ }
+
+ /**
+ * Allocate a page of bin's.
+ * Returns:
+ * head of a single linked list of new entries
+ */
+ List* allocPage(Bins bin) nothrow
+ {
+ //debug(PRINTF) printf("Gcx::allocPage(bin = %d)\n", bin);
+ for (size_t n = 0; n < npools; n++)
+ {
+ Pool* pool = pooltable[n];
+ if (pool.isLargeObject)
+ continue;
+ if (List* p = (cast(SmallObjectPool*)pool).allocPage(bin))
+ {
+ ++usedSmallPages;
+ return p;
+ }
+ }
+ return null;
+ }
+
+ static struct ScanRange(bool precise)
+ {
+ void* pbot;
+ void* ptop;
+ static if (precise)
+ {
+ void** pbase; // start of memory described by ptrbitmap
+ size_t* ptrbmp; // bits from is_pointer or rtinfo
+ size_t bmplength; // number of valid bits
+ }
+ }
+
+ static struct ToScanStack(RANGE)
+ {
+ nothrow:
+ @disable this(this);
+ auto stackLock = shared(AlignedSpinLock)(SpinLock.Contention.brief);
+
+ void reset()
+ {
+ _length = 0;
+ if (_p)
+ {
+ os_mem_unmap(_p, _cap * RANGE.sizeof);
+ _p = null;
+ }
+ _cap = 0;
+ }
+ void clear()
+ {
+ _length = 0;
+ }
+
+ void push(RANGE rng)
+ {
+ if (_length == _cap) grow();
+ _p[_length++] = rng;
+ }
+
+ RANGE pop()
+ in { assert(!empty); }
+ do
+ {
+ return _p[--_length];
+ }
+
+ bool popLocked(ref RANGE rng)
+ {
+ if (_length == 0)
+ return false;
+
+ stackLock.lock();
+ scope(exit) stackLock.unlock();
+ if (_length == 0)
+ return false;
+ rng = _p[--_length];
+ return true;
+ }
+
+ ref inout(RANGE) opIndex(size_t idx) inout
+ in { assert(idx < _length); }
+ do
+ {
+ return _p[idx];
+ }
+
+ @property size_t length() const { return _length; }
+ @property bool empty() const { return !length; }
+
+ private:
+ void grow()
+ {
+ pragma(inline, false);
+
+ enum initSize = 64 * 1024; // Windows VirtualAlloc granularity
+ immutable ncap = _cap ? 2 * _cap : initSize / RANGE.sizeof;
+ auto p = cast(RANGE*)os_mem_map(ncap * RANGE.sizeof);
+ if (p is null) onOutOfMemoryErrorNoGC();
+ if (_p !is null)
+ {
+ p[0 .. _length] = _p[0 .. _length];
+ os_mem_unmap(_p, _cap * RANGE.sizeof);
+ }
+ _p = p;
+ _cap = ncap;
+ }
+
+ size_t _length;
+ RANGE* _p;
+ size_t _cap;
+ }
+
+ ToScanStack!(ScanRange!false) toscanConservative;
+ ToScanStack!(ScanRange!true) toscanPrecise;
+
+ template scanStack(bool precise)
+ {
+ static if (precise)
+ alias scanStack = toscanPrecise;
+ else
+ alias scanStack = toscanConservative;
+ }
+
+ /**
+ * Search a range of memory values and mark any pointers into the GC pool.
+ */
+ private void mark(bool precise, bool parallel, bool shared_mem)(ScanRange!precise rng) scope nothrow
+ {
+ alias toscan = scanStack!precise;
+
+ debug(MARK_PRINTF)
+ printf("marking range: [%p..%p] (%#llx)\n", pbot, ptop, cast(long)(ptop - pbot));
+
+ // limit the amount of ranges added to the toscan stack
+ enum FANOUT_LIMIT = 32;
+ size_t stackPos;
+ ScanRange!precise[FANOUT_LIMIT] stack = void;
+
+ size_t pcache = 0;
+
+ // let dmd allocate a register for this.pools
+ auto pools = pooltable.pools;
+ const highpool = pooltable.npools - 1;
+ const minAddr = pooltable.minAddr;
+ size_t memSize = pooltable.maxAddr - minAddr;
+ Pool* pool = null;
+
+ // properties of allocation pointed to
+ ScanRange!precise tgt = void;
+
+ for (;;)
+ {
+ auto p = *cast(void**)(rng.pbot);
+
+ debug(MARK_PRINTF) printf("\tmark %p: %p\n", rng.pbot, p);
+
+ if (cast(size_t)(p - minAddr) < memSize &&
+ (cast(size_t)p & ~cast(size_t)(PAGESIZE-1)) != pcache)
+ {
+ static if (precise) if (rng.pbase)
+ {
+ size_t bitpos = cast(void**)rng.pbot - rng.pbase;
+ while (bitpos >= rng.bmplength)
+ {
+ bitpos -= rng.bmplength;
+ rng.pbase += rng.bmplength;
+ }
+ import core.bitop;
+ if (!core.bitop.bt(rng.ptrbmp, bitpos))
+ {
+ debug(MARK_PRINTF) printf("\t\tskipping non-pointer\n");
+ goto LnextPtr;
+ }
+ }
+
+ if (!pool || p < pool.baseAddr || p >= pool.topAddr)
+ {
+ size_t low = 0;
+ size_t high = highpool;
+ while (true)
+ {
+ size_t mid = (low + high) >> 1;
+ pool = pools[mid];
+ if (p < pool.baseAddr)
+ high = mid - 1;
+ else if (p >= pool.topAddr)
+ low = mid + 1;
+ else break;
+
+ if (low > high)
+ goto LnextPtr;
+ }
+ }
+ size_t offset = cast(size_t)(p - pool.baseAddr);
+ size_t biti = void;
+ size_t pn = offset / PAGESIZE;
+ size_t bin = pool.pagetable[pn]; // not Bins to avoid multiple size extension instructions
+
+ debug(MARK_PRINTF)
+ printf("\t\tfound pool %p, base=%p, pn = %lld, bin = %d\n", pool, pool.baseAddr, cast(long)pn, bin);
+
+ // Adjust bit to be at start of allocated memory block
+ if (bin < B_PAGE)
+ {
+ // We don't care abou setting pointsToBase correctly
+ // because it's ignored for small object pools anyhow.
+ auto offsetBase = baseOffset(offset, cast(Bins)bin);
+ biti = offsetBase >> Pool.ShiftBy.Small;
+ //debug(PRINTF) printf("\t\tbiti = x%x\n", biti);
+
+ if (!pool.mark.testAndSet!shared_mem(biti) && !pool.noscan.test(biti))
+ {
+ tgt.pbot = pool.baseAddr + offsetBase;
+ tgt.ptop = tgt.pbot + binsize[bin];
+ static if (precise)
+ {
+ tgt.pbase = cast(void**)pool.baseAddr;
+ tgt.ptrbmp = pool.is_pointer.data;
+ tgt.bmplength = size_t.max; // no repetition
+ }
+ goto LaddRange;
+ }
+ }
+ else if (bin == B_PAGE)
+ {
+ biti = offset >> Pool.ShiftBy.Large;
+ //debug(PRINTF) printf("\t\tbiti = x%x\n", biti);
+
+ pcache = cast(size_t)p & ~cast(size_t)(PAGESIZE-1);
+ tgt.pbot = cast(void*)pcache;
+
+ // For the NO_INTERIOR attribute. This tracks whether
+ // the pointer is an interior pointer or points to the
+ // base address of a block.
+ if (tgt.pbot != sentinel_sub(p) && pool.nointerior.nbits && pool.nointerior.test(biti))
+ goto LnextPtr;
+
+ if (!pool.mark.testAndSet!shared_mem(biti) && !pool.noscan.test(biti))
+ {
+ tgt.ptop = tgt.pbot + (cast(LargeObjectPool*)pool).getSize(pn);
+ goto LaddLargeRange;
+ }
+ }
+ else if (bin == B_PAGEPLUS)
+ {
+ pn -= pool.bPageOffsets[pn];
+ biti = pn * (PAGESIZE >> Pool.ShiftBy.Large);
+
+ pcache = cast(size_t)p & ~cast(size_t)(PAGESIZE-1);
+ if (pool.nointerior.nbits && pool.nointerior.test(biti))
+ goto LnextPtr;
+
+ if (!pool.mark.testAndSet!shared_mem(biti) && !pool.noscan.test(biti))
+ {
+ tgt.pbot = pool.baseAddr + (pn * PAGESIZE);
+ tgt.ptop = tgt.pbot + (cast(LargeObjectPool*)pool).getSize(pn);
+ LaddLargeRange:
+ static if (precise)
+ {
+ auto rtinfo = pool.rtinfo[biti];
+ if (rtinfo is rtinfoNoPointers)
+ goto LnextPtr; // only if inconsistent with noscan
+ if (rtinfo is rtinfoHasPointers)
+ {
+ tgt.pbase = null; // conservative
+ }
+ else
+ {
+ tgt.ptrbmp = cast(size_t*)rtinfo;
+ size_t element_size = *tgt.ptrbmp++;
+ tgt.bmplength = (element_size + (void*).sizeof - 1) / (void*).sizeof;
+ assert(tgt.bmplength);
+
+ debug(SENTINEL)
+ tgt.pbot = sentinel_add(tgt.pbot);
+ if (pool.appendable.test(biti))
+ {
+ // take advantage of knowing array layout in rt.lifetime
+ void* arrtop = tgt.pbot + 16 + *cast(size_t*)tgt.pbot;
+ assert (arrtop > tgt.pbot && arrtop <= tgt.ptop);
+ tgt.pbot += 16;
+ tgt.ptop = arrtop;
+ }
+ else
+ {
+ tgt.ptop = tgt.pbot + element_size;
+ }
+ tgt.pbase = cast(void**)tgt.pbot;
+ }
+ }
+ goto LaddRange;
+ }
+ }
+ else
+ {
+ // Don't mark bits in B_FREE pages
+ assert(bin == B_FREE);
+ }
+ }
+ LnextPtr:
+ rng.pbot += (void*).sizeof;
+ if (rng.pbot < rng.ptop)
+ continue;
+
+ LnextRange:
+ if (stackPos)
+ {
+ // pop range from local stack and recurse
+ rng = stack[--stackPos];
+ }
+ else
+ {
+ static if (parallel)
+ {
+ if (!toscan.popLocked(rng))
+ break; // nothing more to do
+ }
+ else
+ {
+ if (toscan.empty)
+ break; // nothing more to do
+
+ // pop range from global stack and recurse
+ rng = toscan.pop();
+ }
+ }
+ // printf(" pop [%p..%p] (%#zx)\n", p1, p2, cast(size_t)p2 - cast(size_t)p1);
+ goto LcontRange;
+
+ LaddRange:
+ rng.pbot += (void*).sizeof;
+ if (rng.pbot < rng.ptop)
+ {
+ if (stackPos < stack.length)
+ {
+ stack[stackPos] = tgt;
+ stackPos++;
+ continue;
+ }
+ static if (parallel)
+ {
+ toscan.stackLock.lock();
+ scope(exit) toscan.stackLock.unlock();
+ }
+ toscan.push(rng);
+ // reverse order for depth-first-order traversal
+ foreach_reverse (ref range; stack)
+ toscan.push(range);
+ stackPos = 0;
+ }
+ LendOfRange:
+ // continue with last found range
+ rng = tgt;
+
+ LcontRange:
+ pcache = 0;
+ }
+ }
+
+ void markConservative(bool shared_mem)(void *pbot, void *ptop) scope nothrow
+ {
+ if (pbot < ptop)
+ mark!(false, false, shared_mem)(ScanRange!false(pbot, ptop));
+ }
+
+ void markPrecise(bool shared_mem)(void *pbot, void *ptop) scope nothrow
+ {
+ if (pbot < ptop)
+ mark!(true, false, shared_mem)(ScanRange!true(pbot, ptop, null));
+ }
+
+ version (COLLECT_PARALLEL)
+ ToScanStack!(void*) toscanRoots;
+
+ version (COLLECT_PARALLEL)
+ void collectRoots(void *pbot, void *ptop) scope nothrow
+ {
+ const minAddr = pooltable.minAddr;
+ size_t memSize = pooltable.maxAddr - minAddr;
+
+ for (auto p = cast(void**)pbot; cast(void*)p < ptop; p++)
+ {
+ auto ptr = *p;
+ if (cast(size_t)(ptr - minAddr) < memSize)
+ toscanRoots.push(ptr);
+ }
+ }
+
+ // collection step 1: prepare freebits and mark bits
+ void prepare() nothrow
+ {
+ debug(COLLECT_PRINTF) printf("preparing mark.\n");
+
+ for (size_t n = 0; n < npools; n++)
+ {
+ Pool* pool = pooltable[n];
+ if (pool.isLargeObject)
+ pool.mark.zero();
+ else
+ pool.mark.copy(&pool.freebits);
+ }
+ }
+
+ // collection step 2: mark roots and heap
+ void markAll(alias markFn)(bool nostack) nothrow
+ {
+ if (!nostack)
+ {
+ debug(COLLECT_PRINTF) printf("\tscan stacks.\n");
+ // Scan stacks and registers for each paused thread
+ thread_scanAll(&markFn);
+ }
+
+ // Scan roots[]
+ debug(COLLECT_PRINTF) printf("\tscan roots[]\n");
+ foreach (root; roots)
+ {
+ markFn(cast(void*)&root.proot, cast(void*)(&root.proot + 1));
+ }
+
+ // Scan ranges[]
+ debug(COLLECT_PRINTF) printf("\tscan ranges[]\n");
+ //log++;
+ foreach (range; ranges)
+ {
+ debug(COLLECT_PRINTF) printf("\t\t%p .. %p\n", range.pbot, range.ptop);
+ markFn(range.pbot, range.ptop);
+ }
+ //log--;
+ }
+
+ version (COLLECT_PARALLEL)
+ void collectAllRoots(bool nostack) nothrow
+ {
+ if (!nostack)
+ {
+ debug(COLLECT_PRINTF) printf("\tcollect stacks.\n");
+ // Scan stacks and registers for each paused thread
+ thread_scanAll(&collectRoots);
+ }
+
+ // Scan roots[]
+ debug(COLLECT_PRINTF) printf("\tcollect roots[]\n");
+ foreach (root; roots)
+ {
+ toscanRoots.push(root);
+ }
+
+ // Scan ranges[]
+ debug(COLLECT_PRINTF) printf("\tcollect ranges[]\n");
+ foreach (range; ranges)
+ {
+ debug(COLLECT_PRINTF) printf("\t\t%p .. %p\n", range.pbot, range.ptop);
+ collectRoots(range.pbot, range.ptop);
+ }
+ }
+
+ // collection step 3: finalize unreferenced objects, recover full pages with no live objects
+ size_t sweep() nothrow
+ {
+ // Free up everything not marked
+ debug(COLLECT_PRINTF) printf("\tfree'ing\n");
+ size_t freedLargePages;
+ size_t freedSmallPages;
+ size_t freed;
+ for (size_t n = 0; n < npools; n++)
+ {
+ size_t pn;
+ Pool* pool = pooltable[n];
+
+ if (pool.isLargeObject)
+ {
+ auto lpool = cast(LargeObjectPool*)pool;
+ size_t numFree = 0;
+ size_t npages;
+ for (pn = 0; pn < pool.npages; pn += npages)
+ {
+ npages = pool.bPageOffsets[pn];
+ Bins bin = cast(Bins)pool.pagetable[pn];
+ if (bin == B_FREE)
+ {
+ numFree += npages;
+ continue;
+ }
+ assert(bin == B_PAGE);
+ size_t biti = pn;
+
+ if (!pool.mark.test(biti))
+ {
+ void *p = pool.baseAddr + pn * PAGESIZE;
+ void* q = sentinel_add(p);
+ sentinel_Invariant(q);
+
+ if (pool.finals.nbits && pool.finals.clear(biti))
+ {
+ size_t size = npages * PAGESIZE - SENTINEL_EXTRA;
+ uint attr = pool.getBits(biti);
+ rt_finalizeFromGC(q, sentinel_size(q, size), attr);
+ }
+
+ pool.clrBits(biti, ~BlkAttr.NONE ^ BlkAttr.FINALIZE);
+
+ debug(COLLECT_PRINTF) printf("\tcollecting big %p\n", p);
+ leakDetector.log_free(q, sentinel_size(q, npages * PAGESIZE - SENTINEL_EXTRA));
+ pool.pagetable[pn..pn+npages] = B_FREE;
+ if (pn < pool.searchStart) pool.searchStart = pn;
+ freedLargePages += npages;
+ pool.freepages += npages;
+ numFree += npages;
+
+ debug (MEMSTOMP) memset(p, 0xF3, npages * PAGESIZE);
+ // Don't need to update searchStart here because
+ // pn is guaranteed to be greater than last time
+ // we updated it.
+
+ pool.largestFree = pool.freepages; // invalidate
+ }
+ else
+ {
+ if (numFree > 0)
+ {
+ lpool.setFreePageOffsets(pn - numFree, numFree);
+ numFree = 0;
+ }
+ }
+ }
+ if (numFree > 0)
+ lpool.setFreePageOffsets(pn - numFree, numFree);
+ }
+ else
+ {
+ // reinit chain of pages to rebuild free list
+ pool.recoverPageFirst[] = cast(uint)pool.npages;
+
+ for (pn = 0; pn < pool.npages; pn++)
+ {
+ Bins bin = cast(Bins)pool.pagetable[pn];
+
+ if (bin < B_PAGE)
+ {
+ auto freebitsdata = pool.freebits.data + pn * PageBits.length;
+ auto markdata = pool.mark.data + pn * PageBits.length;
+
+ // the entries to free are allocated objects (freebits == false)
+ // that are not marked (mark == false)
+ PageBits toFree;
+ static foreach (w; 0 .. PageBits.length)
+ toFree[w] = (~freebitsdata[w] & ~markdata[w]);
+
+ // the page is unchanged if there is nothing to free
+ bool unchanged = true;
+ static foreach (w; 0 .. PageBits.length)
+ unchanged = unchanged && (toFree[w] == 0);
+ if (unchanged)
+ {
+ bool hasDead = false;
+ static foreach (w; 0 .. PageBits.length)
+ hasDead = hasDead || (~freebitsdata[w] != baseOffsetBits[bin][w]);
+ if (hasDead)
+ {
+ // add to recover chain
+ pool.binPageChain[pn] = pool.recoverPageFirst[bin];
+ pool.recoverPageFirst[bin] = cast(uint)pn;
+ }
+ else
+ {
+ pool.binPageChain[pn] = Pool.PageRecovered;
+ }
+ continue;
+ }
+
+ // the page can be recovered if all of the allocated objects (freebits == false)
+ // are freed
+ bool recoverPage = true;
+ static foreach (w; 0 .. PageBits.length)
+ recoverPage = recoverPage && (~freebitsdata[w] == toFree[w]);
+
+ // We need to loop through each object if any have a finalizer,
+ // or, if any of the debug hooks are enabled.
+ bool doLoop = false;
+ debug (SENTINEL)
+ doLoop = true;
+ else version (assert)
+ doLoop = true;
+ else debug (COLLECT_PRINTF) // need output for each object
+ doLoop = true;
+ else debug (LOGGING)
+ doLoop = true;
+ else debug (MEMSTOMP)
+ doLoop = true;
+ else if (pool.finals.data)
+ {
+ // finalizers must be called on objects that are about to be freed
+ auto finalsdata = pool.finals.data + pn * PageBits.length;
+ static foreach (w; 0 .. PageBits.length)
+ doLoop = doLoop || (toFree[w] & finalsdata[w]) != 0;
+ }
+
+ if (doLoop)
+ {
+ immutable size = binsize[bin];
+ void *p = pool.baseAddr + pn * PAGESIZE;
+ immutable base = pn * (PAGESIZE/16);
+ immutable bitstride = size / 16;
+
+ // ensure that there are at least <size> bytes for every address
+ // below ptop even if unaligned
+ void *ptop = p + PAGESIZE - size + 1;
+ for (size_t i; p < ptop; p += size, i += bitstride)
+ {
+ immutable biti = base + i;
+
+ if (!pool.mark.test(biti))
+ {
+ void* q = sentinel_add(p);
+ sentinel_Invariant(q);
+
+ if (pool.finals.nbits && pool.finals.test(biti))
+ rt_finalizeFromGC(q, sentinel_size(q, size), pool.getBits(biti));
+
+ assert(core.bitop.bt(toFree.ptr, i));
+
+ debug(COLLECT_PRINTF) printf("\tcollecting %p\n", p);
+ leakDetector.log_free(q, sentinel_size(q, size));
+
+ debug (MEMSTOMP) memset(p, 0xF3, size);
+ }
+ }
+ }
+
+ if (recoverPage)
+ {
+ pool.freeAllPageBits(pn);
+
+ pool.pagetable[pn] = B_FREE;
+ // add to free chain
+ pool.binPageChain[pn] = cast(uint) pool.searchStart;
+ pool.searchStart = pn;
+ pool.freepages++;
+ freedSmallPages++;
+ }
+ else
+ {
+ pool.freePageBits(pn, toFree);
+
+ // add to recover chain
+ pool.binPageChain[pn] = pool.recoverPageFirst[bin];
+ pool.recoverPageFirst[bin] = cast(uint)pn;
+ }
+ }
+ }
+ }
+ }
+
+ assert(freedLargePages <= usedLargePages);
+ usedLargePages -= freedLargePages;
+ debug(COLLECT_PRINTF) printf("\tfree'd %u bytes, %u pages from %u pools\n", freed, freedLargePages, npools);
+
+ assert(freedSmallPages <= usedSmallPages);
+ usedSmallPages -= freedSmallPages;
+ debug(COLLECT_PRINTF) printf("\trecovered small pages = %d\n", freedSmallPages);
+
+ return freedLargePages + freedSmallPages;
+ }
+
+ bool recoverPage(SmallObjectPool* pool, size_t pn, Bins bin) nothrow
+ {
+ size_t size = binsize[bin];
+ size_t bitbase = pn * (PAGESIZE / 16);
+
+ auto freebitsdata = pool.freebits.data + pn * PageBits.length;
+
+ // the page had dead objects when collecting, these cannot have been resurrected
+ bool hasDead = false;
+ static foreach (w; 0 .. PageBits.length)
+ hasDead = hasDead || (freebitsdata[w] != 0);
+ assert(hasDead);
+
+ // prepend to buckets, but with forward addresses inside the page
+ assert(bucket[bin] is null);
+ List** bucketTail = &bucket[bin];
+
+ void* p = pool.baseAddr + pn * PAGESIZE;
+ const top = PAGESIZE - size + 1; // ensure <size> bytes available even if unaligned
+ for (size_t u = 0; u < top; u += size)
+ {
+ if (!core.bitop.bt(freebitsdata, u / 16))
+ continue;
+ auto elem = cast(List *)(p + u);
+ elem.pool = &pool.base;
+ *bucketTail = elem;
+ bucketTail = &elem.next;
+ }
+ *bucketTail = null;
+ assert(bucket[bin] !is null);
+ return true;
+ }
+
+ bool recoverNextPage(Bins bin) nothrow
+ {
+ SmallObjectPool* pool = recoverPool[bin];
+ while (pool)
+ {
+ auto pn = pool.recoverPageFirst[bin];
+ while (pn < pool.npages)
+ {
+ auto next = pool.binPageChain[pn];
+ pool.binPageChain[pn] = Pool.PageRecovered;
+ pool.recoverPageFirst[bin] = next;
+ if (recoverPage(pool, pn, bin))
+ return true;
+ pn = next;
+ }
+ pool = setNextRecoverPool(bin, pool.ptIndex + 1);
+ }
+ return false;
+ }
+
+ private SmallObjectPool* setNextRecoverPool(Bins bin, size_t poolIndex) nothrow
+ {
+ Pool* pool;
+ while (poolIndex < npools &&
+ ((pool = pooltable[poolIndex]).isLargeObject ||
+ pool.recoverPageFirst[bin] >= pool.npages))
+ poolIndex++;
+
+ return recoverPool[bin] = poolIndex < npools ? cast(SmallObjectPool*)pool : null;
+ }
+
+ version (COLLECT_FORK)
+ void disableFork() nothrow
+ {
+ markProcPid = 0;
+ shouldFork = false;
+ }
+
+ version (COLLECT_FORK)
+ ChildStatus collectFork(bool block) nothrow
+ {
+ typeof(return) rc = wait_pid(markProcPid, block);
+ final switch (rc)
+ {
+ case ChildStatus.done:
+ debug(COLLECT_PRINTF) printf("\t\tmark proc DONE (block=%d)\n",
+ cast(int) block);
+ markProcPid = 0;
+ // process GC marks then sweep
+ thread_suspendAll();
+ thread_processGCMarks(&isMarked);
+ thread_resumeAll();
+ break;
+ case ChildStatus.running:
+ debug(COLLECT_PRINTF) printf("\t\tmark proc RUNNING\n");
+ if (!block)
+ break;
+ // Something went wrong, if block is true, wait() should never
+ // return RUNNING.
+ goto case ChildStatus.error;
+ case ChildStatus.error:
+ debug(COLLECT_PRINTF) printf("\t\tmark proc ERROR\n");
+ // Try to keep going without forking
+ // and do the marking in this thread
+ break;
+ }
+ return rc;
+ }
+
+ version (COLLECT_FORK)
+ ChildStatus markFork(bool nostack, bool block, bool doParallel) nothrow
+ {
+ // Forking is enabled, so we fork() and start a new concurrent mark phase
+ // in the child. If the collection should not block, the parent process
+ // tells the caller no memory could be recycled immediately (if this collection
+ // was triggered by an allocation, the caller should allocate more memory
+ // to fulfill the request).
+ // If the collection should block, the parent will wait for the mark phase
+ // to finish before returning control to the mutator,
+ // but other threads are restarted and may run in parallel with the mark phase
+ // (unless they allocate or use the GC themselves, in which case
+ // the global GC lock will stop them).
+ // fork now and sweep later
+ int child_mark() scope
+ {
+ if (doParallel)
+ markParallel(nostack);
+ else if (ConservativeGC.isPrecise)
+ markAll!(markPrecise!true)(nostack);
+ else
+ markAll!(markConservative!true)(nostack);
+ return 0;
+ }
+
+ import core.stdc.stdlib : _Exit;
+ debug (PRINTF_TO_FILE)
+ {
+ import core.stdc.stdio : fflush;
+ fflush(null); // avoid duplicated FILE* output
+ }
+ version (OSX)
+ {
+ auto pid = __fork(); // avoids calling handlers (from libc source code)
+ }
+ else version (linux)
+ {
+ // clone() fits better as we don't want to do anything but scanning in the child process.
+ // no fork-handlera are called, so we can avoid deadlocks due to malloc locks. Probably related:
+ // https://sourceware.org/bugzilla/show_bug.cgi?id=4737
+ import core.sys.linux.sched : clone;
+ import core.sys.posix.signal : SIGCHLD;
+ enum CLONE_CHILD_CLEARTID = 0x00200000; /* Register exit futex and memory */
+ const flags = CLONE_CHILD_CLEARTID | SIGCHLD; // child thread id not needed
+ scope int delegate() scope dg = &child_mark;
+ extern(C) static int wrap_delegate(void* arg)
+ {
+ auto dg = cast(int delegate() scope*)arg;
+ return (*dg)();
+ }
+ char[256] stackbuf; // enough stack space for clone() to place some info for the child without stomping the parent stack
+ auto stack = stackbuf.ptr + (isStackGrowingDown ? stackbuf.length : 0);
+ auto pid = clone(&wrap_delegate, stack, flags, &dg);
+ }
+ else
+ {
+ fork_needs_lock = false;
+ auto pid = fork();
+ fork_needs_lock = true;
+ }
+ assert(pid != -1);
+ switch (pid)
+ {
+ case -1: // fork() failed, retry without forking
+ return ChildStatus.error;
+ case 0: // child process (not run with clone)
+ child_mark();
+ _Exit(0);
+ default: // the parent
+ thread_resumeAll();
+ if (!block)
+ {
+ markProcPid = pid;
+ return ChildStatus.running;
+ }
+ ChildStatus r = wait_pid(pid); // block until marking is done
+ if (r == ChildStatus.error)
+ {
+ thread_suspendAll();
+ // there was an error
+ // do the marking in this thread
+ disableFork();
+ if (doParallel)
+ markParallel(nostack);
+ else if (ConservativeGC.isPrecise)
+ markAll!(markPrecise!false)(nostack);
+ else
+ markAll!(markConservative!false)(nostack);
+ } else {
+ assert(r == ChildStatus.done);
+ assert(r != ChildStatus.running);
+ }
+ }
+ return ChildStatus.done; // waited for the child
+ }
+
+ /**
+ * Return number of full pages free'd.
+ * The collection is done concurrently only if block and isFinal are false.
+ */
+ size_t fullcollect(bool nostack = false, bool block = false, bool isFinal = false) nothrow
+ {
+ // It is possible that `fullcollect` will be called from a thread which
+ // is not yet registered in runtime (because allocating `new Thread` is
+ // part of `thread_attachThis` implementation). In that case it is
+ // better not to try actually collecting anything
+
+ if (Thread.getThis() is null)
+ return 0;
+
+ MonoTime start, stop, begin;
+ begin = start = currTime;
+
+ debug(COLLECT_PRINTF) printf("Gcx.fullcollect()\n");
+ version (COLLECT_PARALLEL)
+ {
+ bool doParallel = config.parallel > 0 && !config.fork;
+ if (doParallel && !scanThreadData)
+ {
+ if (isFinal) // avoid starting threads for parallel marking
+ doParallel = false;
+ else
+ startScanThreads();
+ }
+ }
+ else
+ enum doParallel = false;
+
+ //printf("\tpool address range = %p .. %p\n", minAddr, maxAddr);
+
+ version (COLLECT_FORK)
+ bool doFork = shouldFork;
+ else
+ enum doFork = false;
+
+ if (doFork && collectInProgress)
+ {
+ version (COLLECT_FORK)
+ {
+ // If there is a mark process running, check if it already finished.
+ // If that is the case, we move to the sweep phase.
+ // If it's still running, either we block until the mark phase is
+ // done (and then sweep to finish the collection), or in case of error
+ // we redo the mark phase without forking.
+ ChildStatus rc = collectFork(block);
+ final switch (rc)
+ {
+ case ChildStatus.done:
+ break;
+ case ChildStatus.running:
+ return 0;
+ case ChildStatus.error:
+ disableFork();
+ goto Lmark;
+ }
+ }
+ }
+ else
+ {
+Lmark:
+ // lock roots and ranges around suspending threads b/c they're not reentrant safe
+ rangesLock.lock();
+ rootsLock.lock();
+ debug(INVARIANT) inCollection = true;
+ scope (exit)
+ {
+ debug(INVARIANT) inCollection = false;
+ rangesLock.unlock();
+ rootsLock.unlock();
+ }
+ thread_suspendAll();
+
+ prepare();
+
+ stop = currTime;
+ prepTime += (stop - start);
+ start = stop;
+
+ if (doFork && !isFinal && !block) // don't start a new fork during termination
+ {
+ version (COLLECT_FORK)
+ {
+ auto forkResult = markFork(nostack, block, doParallel);
+ final switch (forkResult)
+ {
+ case ChildStatus.error:
+ disableFork();
+ goto Lmark;
+ case ChildStatus.running:
+ // update profiling informations
+ stop = currTime;
+ markTime += (stop - start);
+ Duration pause = stop - begin;
+ if (pause > maxPauseTime)
+ maxPauseTime = pause;
+ pauseTime += pause;
+ return 0;
+ case ChildStatus.done:
+ break;
+ }
+ // if we get here, forking failed and a standard STW collection got issued
+ // threads were suspended again, restart them
+ thread_suspendAll();
+ }
+ }
+ else if (doParallel)
+ {
+ version (COLLECT_PARALLEL)
+ markParallel(nostack);
+ }
+ else
+ {
+ if (ConservativeGC.isPrecise)
+ markAll!(markPrecise!false)(nostack);
+ else
+ markAll!(markConservative!false)(nostack);
+ }
+
+ thread_processGCMarks(&isMarked);
+ thread_resumeAll();
+ isFinal = false;
+ }
+
+ // If we get here with the forking GC, the child process has finished the marking phase
+ // or block == true and we are using standard stop the world collection.
+ // It is time to sweep
+
+ stop = currTime;
+ markTime += (stop - start);
+ Duration pause = stop - begin;
+ if (pause > maxPauseTime)
+ maxPauseTime = pause;
+ pauseTime += pause;
+ start = stop;
+
+ ConservativeGC._inFinalizer = true;
+ size_t freedPages = void;
+ {
+ scope (failure) ConservativeGC._inFinalizer = false;
+ freedPages = sweep();
+ ConservativeGC._inFinalizer = false;
+ }
+
+ // minimize() should be called only after a call to fullcollect
+ // terminates with a sweep
+ if (minimizeAfterNextCollection || lowMem)
+ {
+ minimizeAfterNextCollection = false;
+ minimize();
+ }
+
+ // init bucket lists
+ bucket[] = null;
+ foreach (Bins bin; 0..B_NUMSMALL)
+ setNextRecoverPool(bin, 0);
+
+ stop = currTime;
+ sweepTime += (stop - start);
+
+ Duration collectionTime = stop - begin;
+ if (collectionTime > maxCollectionTime)
+ maxCollectionTime = collectionTime;
+
+ ++numCollections;
+
+ updateCollectThresholds();
+ if (doFork && isFinal)
+ return fullcollect(true, true, false);
+ return freedPages;
+ }
+
+ /**
+ * Returns true if the addr lies within a marked block.
+ *
+ * Warning! This should only be called while the world is stopped inside
+ * the fullcollect function after all live objects have been marked, but before sweeping.
+ */
+ int isMarked(void *addr) scope nothrow
+ {
+ // first, we find the Pool this block is in, then check to see if the
+ // mark bit is clear.
+ auto pool = findPool(addr);
+ if (pool)
+ {
+ auto offset = cast(size_t)(addr - pool.baseAddr);
+ auto pn = offset / PAGESIZE;
+ auto bins = cast(Bins)pool.pagetable[pn];
+ size_t biti = void;
+ if (bins < B_PAGE)
+ {
+ biti = baseOffset(offset, bins) >> pool.ShiftBy.Small;
+ // doesn't need to check freebits because no pointer must exist
+ // to a block that was free before starting the collection
+ }
+ else if (bins == B_PAGE)
+ {
+ biti = pn * (PAGESIZE >> pool.ShiftBy.Large);
+ }
+ else if (bins == B_PAGEPLUS)
+ {
+ pn -= pool.bPageOffsets[pn];
+ biti = pn * (PAGESIZE >> pool.ShiftBy.Large);
+ }
+ else // bins == B_FREE
+ {
+ assert(bins == B_FREE);
+ return IsMarked.no;
+ }
+ return pool.mark.test(biti) ? IsMarked.yes : IsMarked.no;
+ }
+ return IsMarked.unknown;
+ }
+
+ version (Posix)
+ {
+ // A fork might happen while GC code is running in a different thread.
+ // Because that would leave the GC in an inconsistent state,
+ // make sure no GC code is running by acquiring the lock here,
+ // before a fork.
+ // This must not happen if fork is called from the GC with the lock already held
+
+ __gshared bool fork_needs_lock = true; // racing condition with cocurrent calls of fork?
+
+
+ extern(C) static void _d_gcx_atfork_prepare()
+ {
+ if (instance && fork_needs_lock)
+ ConservativeGC.lockNR();
+ }
+
+ extern(C) static void _d_gcx_atfork_parent()
+ {
+ if (instance && fork_needs_lock)
+ ConservativeGC.gcLock.unlock();
+ }
+
+ extern(C) static void _d_gcx_atfork_child()
+ {
+ if (instance && fork_needs_lock)
+ {
+ ConservativeGC.gcLock.unlock();
+
+ // make sure the threads and event handles are reinitialized in a fork
+ version (COLLECT_PARALLEL)
+ {
+ if (Gcx.instance.scanThreadData)
+ {
+ cstdlib.free(Gcx.instance.scanThreadData);
+ Gcx.instance.numScanThreads = 0;
+ Gcx.instance.scanThreadData = null;
+ Gcx.instance.busyThreads = 0;
+
+ memset(&Gcx.instance.evStart, 0, Gcx.instance.evStart.sizeof);
+ memset(&Gcx.instance.evDone, 0, Gcx.instance.evDone.sizeof);
+ }
+ }
+ }
+ }
+ }
+
+ /* ============================ Parallel scanning =============================== */
+ version (COLLECT_PARALLEL):
+ import core.sync.event;
+ import core.atomic;
+ private: // disable invariants for background threads
+
+ static struct ScanThreadData
+ {
+ ThreadID tid;
+ }
+ uint numScanThreads;
+ ScanThreadData* scanThreadData;
+
+ Event evStart;
+ Event evDone;
+
+ shared uint busyThreads;
+ shared uint stoppedThreads;
+ bool stopGC;
+
+ void markParallel(bool nostack) nothrow
+ {
+ toscanRoots.clear();
+ collectAllRoots(nostack);
+ if (toscanRoots.empty)
+ return;
+
+ void** pbot = toscanRoots._p;
+ void** ptop = toscanRoots._p + toscanRoots._length;
+
+ debug(PARALLEL_PRINTF) printf("markParallel\n");
+
+ size_t pointersPerThread = toscanRoots._length / (numScanThreads + 1);
+ if (pointersPerThread > 0)
+ {
+ void pushRanges(bool precise)()
+ {
+ alias toscan = scanStack!precise;
+ toscan.stackLock.lock();
+
+ for (int idx = 0; idx < numScanThreads; idx++)
+ {
+ toscan.push(ScanRange!precise(pbot, pbot + pointersPerThread));
+ pbot += pointersPerThread;
+ }
+ toscan.stackLock.unlock();
+ }
+ if (ConservativeGC.isPrecise)
+ pushRanges!true();
+ else
+ pushRanges!false();
+ }
+ assert(pbot < ptop);
+
+ busyThreads.atomicOp!"+="(1); // main thread is busy
+
+ evStart.set();
+
+ debug(PARALLEL_PRINTF) printf("mark %lld roots\n", cast(ulong)(ptop - pbot));
+
+ if (ConservativeGC.isPrecise)
+ mark!(true, true, true)(ScanRange!true(pbot, ptop, null));
+ else
+ mark!(false, true, true)(ScanRange!false(pbot, ptop));
+
+ busyThreads.atomicOp!"-="(1);
+
+ debug(PARALLEL_PRINTF) printf("waitForScanDone\n");
+ pullFromScanStack();
+ debug(PARALLEL_PRINTF) printf("waitForScanDone done\n");
+ }
+
+ int maxParallelThreads() nothrow
+ {
+ import core.cpuid;
+ auto threads = threadsPerCPU();
+
+ if (threads == 0)
+ {
+ // If the GC is called by module ctors no explicit
+ // import dependency on the GC is generated. So the
+ // GC module is not correctly inserted into the module
+ // initialization chain. As it relies on core.cpuid being
+ // initialized, force this here.
+ try
+ {
+ foreach (m; ModuleInfo)
+ if (m.name == "core.cpuid")
+ if (auto ctor = m.ctor())
+ {
+ ctor();
+ threads = threadsPerCPU();
+ break;
+ }
+ }
+ catch (Exception)
+ {
+ assert(false, "unexpected exception iterating ModuleInfo");
+ }
+ }
+ return threads;
+ }
+
+
+ void startScanThreads() nothrow
+ {
+ auto threads = maxParallelThreads();
+ debug(PARALLEL_PRINTF) printf("startScanThreads: %d threads per CPU\n", threads);
+ if (threads <= 1)
+ return; // either core.cpuid not initialized or single core
+
+ numScanThreads = threads >= config.parallel ? config.parallel : threads - 1;
+
+ scanThreadData = cast(ScanThreadData*) cstdlib.calloc(numScanThreads, ScanThreadData.sizeof);
+ if (!scanThreadData)
+ onOutOfMemoryErrorNoGC();
+
+ evStart.initialize(false, false);
+ evDone.initialize(false, false);
+
+ version (Posix)
+ {
+ import core.sys.posix.signal;
+ // block all signals, scanBackground inherits this mask.
+ // see https://issues.dlang.org/show_bug.cgi?id=20256
+ sigset_t new_mask, old_mask;
+ sigfillset(&new_mask);
+ auto sigmask_rc = pthread_sigmask(SIG_BLOCK, &new_mask, &old_mask);
+ assert(sigmask_rc == 0, "failed to set up GC scan thread sigmask");
+ }
+
+ for (int idx = 0; idx < numScanThreads; idx++)
+ scanThreadData[idx].tid = createLowLevelThread(&scanBackground, 0x4000, &stopScanThreads);
+
+ version (Posix)
+ {
+ sigmask_rc = pthread_sigmask(SIG_SETMASK, &old_mask, null);
+ assert(sigmask_rc == 0, "failed to set up GC scan thread sigmask");
+ }
+ }
+
+ void stopScanThreads() nothrow
+ {
+ if (!numScanThreads)
+ return;
+
+ debug(PARALLEL_PRINTF) printf("stopScanThreads\n");
+ int startedThreads = 0;
+ for (int idx = 0; idx < numScanThreads; idx++)
+ if (scanThreadData[idx].tid != scanThreadData[idx].tid.init)
+ startedThreads++;
+
+ version (Windows)
+ alias allThreadsDead = thread_DLLProcessDetaching;
+ else
+ enum allThreadsDead = false;
+ stopGC = true;
+ while (atomicLoad(stoppedThreads) < startedThreads && !allThreadsDead)
+ {
+ evStart.set();
+ evDone.wait(dur!"msecs"(1));
+ }
+
+ for (int idx = 0; idx < numScanThreads; idx++)
+ {
+ if (scanThreadData[idx].tid != scanThreadData[idx].tid.init)
+ {
+ joinLowLevelThread(scanThreadData[idx].tid);
+ scanThreadData[idx].tid = scanThreadData[idx].tid.init;
+ }
+ }
+
+ evDone.terminate();
+ evStart.terminate();
+
+ cstdlib.free(scanThreadData);
+ // scanThreadData = null; // keep non-null to not start again after shutdown
+ numScanThreads = 0;
+
+ debug(PARALLEL_PRINTF) printf("stopScanThreads done\n");
+ }
+
+ void scanBackground() nothrow
+ {
+ while (!stopGC)
+ {
+ evStart.wait();
+ pullFromScanStack();
+ evDone.set();
+ }
+ stoppedThreads.atomicOp!"+="(1);
+ }
+
+ void pullFromScanStack() nothrow
+ {
+ if (ConservativeGC.isPrecise)
+ pullFromScanStackImpl!true();
+ else
+ pullFromScanStackImpl!false();
+ }
+
+ void pullFromScanStackImpl(bool precise)() nothrow
+ {
+ if (atomicLoad(busyThreads) == 0)
+ return;
+
+ debug(PARALLEL_PRINTF)
+ pthread_t threadId = pthread_self();
+ debug(PARALLEL_PRINTF) printf("scanBackground thread %d start\n", threadId);
+
+ ScanRange!precise rng;
+ alias toscan = scanStack!precise;
+
+ while (atomicLoad(busyThreads) > 0)
+ {
+ if (toscan.empty)
+ {
+ evDone.wait(dur!"msecs"(1));
+ continue;
+ }
+
+ busyThreads.atomicOp!"+="(1);
+ if (toscan.popLocked(rng))
+ {
+ debug(PARALLEL_PRINTF) printf("scanBackground thread %d scanning range [%p,%lld] from stack\n", threadId,
+ rng.pbot, cast(long) (rng.ptop - rng.pbot));
+ mark!(precise, true, true)(rng);
+ }
+ busyThreads.atomicOp!"-="(1);
+ }
+ debug(PARALLEL_PRINTF) printf("scanBackground thread %d done\n", threadId);
+ }
+}
+
+/* ============================ Pool =============================== */
+
+struct Pool
+{
+ void* baseAddr;
+ void* topAddr;
+ size_t ptIndex; // index in pool table
+ GCBits mark; // entries already scanned, or should not be scanned
+ GCBits freebits; // entries that are on the free list (all bits set but for allocated objects at their base offset)
+ GCBits finals; // entries that need finalizer run on them
+ GCBits structFinals;// struct entries that need a finalzier run on them
+ GCBits noscan; // entries that should not be scanned
+ GCBits appendable; // entries that are appendable
+ GCBits nointerior; // interior pointers should be ignored.
+ // Only implemented for large object pools.
+ GCBits is_pointer; // precise GC only: per-word, not per-block like the rest of them (SmallObjectPool only)
+ size_t npages;
+ size_t freepages; // The number of pages not in use.
+ ubyte* pagetable;
+
+ bool isLargeObject;
+
+ enum ShiftBy
+ {
+ Small = 4,
+ Large = 12
+ }
+ ShiftBy shiftBy; // shift count for the divisor used for determining bit indices.
+
+ // This tracks how far back we have to go to find the nearest B_PAGE at
+ // a smaller address than a B_PAGEPLUS. To save space, we use a uint.
+ // This limits individual allocations to 16 terabytes, assuming a 4k
+ // pagesize. (LargeObjectPool only)
+ // For B_PAGE and B_FREE, this specifies the number of pages in this block.
+ // As an optimization, a contiguous range of free pages tracks this information
+ // only for the first and the last page.
+ uint* bPageOffsets;
+
+ // The small object pool uses the same array to keep a chain of
+ // - pages with the same bin size that are still to be recovered
+ // - free pages (searchStart is first free page)
+ // other pages are marked by value PageRecovered
+ alias binPageChain = bPageOffsets;
+
+ enum PageRecovered = uint.max;
+
+ // first of chain of pages to recover (SmallObjectPool only)
+ uint[B_NUMSMALL] recoverPageFirst;
+
+ // precise GC: TypeInfo.rtInfo for allocation (LargeObjectPool only)
+ immutable(size_t)** rtinfo;
+
+ // This variable tracks a conservative estimate of where the first free
+ // page in this pool is, so that if a lot of pages towards the beginning
+ // are occupied, we can bypass them in O(1).
+ size_t searchStart;
+ size_t largestFree; // upper limit for largest free chunk in large object pool
+
+ void initialize(size_t npages, bool isLargeObject) nothrow
+ {
+ assert(npages >= 256);
+
+ this.isLargeObject = isLargeObject;
+ size_t poolsize;
+
+ shiftBy = isLargeObject ? ShiftBy.Large : ShiftBy.Small;
+
+ //debug(PRINTF) printf("Pool::Pool(%u)\n", npages);
+ poolsize = npages * PAGESIZE;
+ baseAddr = cast(byte *)os_mem_map(poolsize);
+
+ // Some of the code depends on page alignment of memory pools
+ assert((cast(size_t)baseAddr & (PAGESIZE - 1)) == 0);
+
+ if (!baseAddr)
+ {
+ //debug(PRINTF) printf("GC fail: poolsize = x%zx, errno = %d\n", poolsize, errno);
+ //debug(PRINTF) printf("message = '%s'\n", sys_errlist[errno]);
+
+ npages = 0;
+ poolsize = 0;
+ }
+ //assert(baseAddr);
+ topAddr = baseAddr + poolsize;
+ auto nbits = cast(size_t)poolsize >> shiftBy;
+
+ version (COLLECT_FORK)
+ mark.alloc(nbits, config.fork);
+ else
+ mark.alloc(nbits);
+ if (ConservativeGC.isPrecise)
+ {
+ if (isLargeObject)
+ {
+ rtinfo = cast(immutable(size_t)**)cstdlib.malloc(npages * (size_t*).sizeof);
+ if (!rtinfo)
+ onOutOfMemoryErrorNoGC();
+ memset(rtinfo, 0, npages * (size_t*).sizeof);
+ }
+ else
+ {
+ is_pointer.alloc(cast(size_t)poolsize/(void*).sizeof);
+ is_pointer.clrRange(0, is_pointer.nbits);
+ }
+ }
+
+ // pagetable already keeps track of what's free for the large object
+ // pool.
+ if (!isLargeObject)
+ {
+ freebits.alloc(nbits);
+ freebits.setRange(0, nbits);
+ }
+
+ noscan.alloc(nbits);
+ appendable.alloc(nbits);
+
+ pagetable = cast(ubyte*)cstdlib.malloc(npages);
+ if (!pagetable)
+ onOutOfMemoryErrorNoGC();
+
+ if (npages > 0)
+ {
+ bPageOffsets = cast(uint*)cstdlib.malloc(npages * uint.sizeof);
+ if (!bPageOffsets)
+ onOutOfMemoryErrorNoGC();
+
+ if (isLargeObject)
+ {
+ bPageOffsets[0] = cast(uint)npages;
+ bPageOffsets[npages-1] = cast(uint)npages;
+ }
+ else
+ {
+ // all pages free
+ foreach (n; 0..npages)
+ binPageChain[n] = cast(uint)(n + 1);
+ recoverPageFirst[] = cast(uint)npages;
+ }
+ }
+
+ memset(pagetable, B_FREE, npages);
+
+ this.npages = npages;
+ this.freepages = npages;
+ this.searchStart = 0;
+ this.largestFree = npages;
+ }
+
+
+ void Dtor() nothrow
+ {
+ if (baseAddr)
+ {
+ int result;
+
+ if (npages)
+ {
+ result = os_mem_unmap(baseAddr, npages * PAGESIZE);
+ assert(result == 0);
+ npages = 0;
+ }
+
+ baseAddr = null;
+ topAddr = null;
+ }
+ if (pagetable)
+ {
+ cstdlib.free(pagetable);
+ pagetable = null;
+ }
+
+ if (bPageOffsets)
+ {
+ cstdlib.free(bPageOffsets);
+ bPageOffsets = null;
+ }
+
+ mark.Dtor(config.fork);
+ if (ConservativeGC.isPrecise)
+ {
+ if (isLargeObject)
+ cstdlib.free(rtinfo);
+ else
+ is_pointer.Dtor();
+ }
+ if (isLargeObject)
+ {
+ nointerior.Dtor();
+ }
+ else
+ {
+ freebits.Dtor();
+ }
+ finals.Dtor();
+ structFinals.Dtor();
+ noscan.Dtor();
+ appendable.Dtor();
+ }
+
+ /**
+ *
+ */
+ uint getBits(size_t biti) nothrow
+ {
+ uint bits;
+
+ if (finals.nbits && finals.test(biti))
+ bits |= BlkAttr.FINALIZE;
+ if (structFinals.nbits && structFinals.test(biti))
+ bits |= BlkAttr.STRUCTFINAL;
+ if (noscan.test(biti))
+ bits |= BlkAttr.NO_SCAN;
+ if (nointerior.nbits && nointerior.test(biti))
+ bits |= BlkAttr.NO_INTERIOR;
+ if (appendable.test(biti))
+ bits |= BlkAttr.APPENDABLE;
+ return bits;
+ }
+
+ /**
+ *
+ */
+ void clrBits(size_t biti, uint mask) nothrow @nogc
+ {
+ immutable dataIndex = biti >> GCBits.BITS_SHIFT;
+ immutable bitOffset = biti & GCBits.BITS_MASK;
+ immutable keep = ~(GCBits.BITS_1 << bitOffset);
+
+ if (mask & BlkAttr.FINALIZE && finals.nbits)
+ finals.data[dataIndex] &= keep;
+
+ if (structFinals.nbits && (mask & BlkAttr.STRUCTFINAL))
+ structFinals.data[dataIndex] &= keep;
+
+ if (mask & BlkAttr.NO_SCAN)
+ noscan.data[dataIndex] &= keep;
+ if (mask & BlkAttr.APPENDABLE)
+ appendable.data[dataIndex] &= keep;
+ if (nointerior.nbits && (mask & BlkAttr.NO_INTERIOR))
+ nointerior.data[dataIndex] &= keep;
+ }
+
+ /**
+ *
+ */
+ void setBits(size_t biti, uint mask) nothrow
+ {
+ // Calculate the mask and bit offset once and then use it to
+ // set all of the bits we need to set.
+ immutable dataIndex = biti >> GCBits.BITS_SHIFT;
+ immutable bitOffset = biti & GCBits.BITS_MASK;
+ immutable orWith = GCBits.BITS_1 << bitOffset;
+
+ if (mask & BlkAttr.STRUCTFINAL)
+ {
+ if (!structFinals.nbits)
+ structFinals.alloc(mark.nbits);
+ structFinals.data[dataIndex] |= orWith;
+ }
+
+ if (mask & BlkAttr.FINALIZE)
+ {
+ if (!finals.nbits)
+ finals.alloc(mark.nbits);
+ finals.data[dataIndex] |= orWith;
+ }
+
+ if (mask & BlkAttr.NO_SCAN)
+ {
+ noscan.data[dataIndex] |= orWith;
+ }
+// if (mask & BlkAttr.NO_MOVE)
+// {
+// if (!nomove.nbits)
+// nomove.alloc(mark.nbits);
+// nomove.data[dataIndex] |= orWith;
+// }
+ if (mask & BlkAttr.APPENDABLE)
+ {
+ appendable.data[dataIndex] |= orWith;
+ }
+
+ if (isLargeObject && (mask & BlkAttr.NO_INTERIOR))
+ {
+ if (!nointerior.nbits)
+ nointerior.alloc(mark.nbits);
+ nointerior.data[dataIndex] |= orWith;
+ }
+ }
+
+ void freePageBits(size_t pagenum, const scope ref PageBits toFree) nothrow
+ {
+ assert(!isLargeObject);
+ assert(!nointerior.nbits); // only for large objects
+
+ import core.internal.traits : staticIota;
+ immutable beg = pagenum * (PAGESIZE / 16 / GCBits.BITS_PER_WORD);
+ foreach (i; staticIota!(0, PageBits.length))
+ {
+ immutable w = toFree[i];
+ if (!w) continue;
+
+ immutable wi = beg + i;
+ freebits.data[wi] |= w;
+ noscan.data[wi] &= ~w;
+ appendable.data[wi] &= ~w;
+ }
+
+ if (finals.nbits)
+ {
+ foreach (i; staticIota!(0, PageBits.length))
+ if (toFree[i])
+ finals.data[beg + i] &= ~toFree[i];
+ }
+
+ if (structFinals.nbits)
+ {
+ foreach (i; staticIota!(0, PageBits.length))
+ if (toFree[i])
+ structFinals.data[beg + i] &= ~toFree[i];
+ }
+ }
+
+ void freeAllPageBits(size_t pagenum) nothrow
+ {
+ assert(!isLargeObject);
+ assert(!nointerior.nbits); // only for large objects
+
+ immutable beg = pagenum * PageBits.length;
+ static foreach (i; 0 .. PageBits.length)
+ {{
+ immutable w = beg + i;
+ freebits.data[w] = ~0;
+ noscan.data[w] = 0;
+ appendable.data[w] = 0;
+ if (finals.data)
+ finals.data[w] = 0;
+ if (structFinals.data)
+ structFinals.data[w] = 0;
+ }}
+ }
+
+ /**
+ * Given a pointer p in the p, return the pagenum.
+ */
+ size_t pagenumOf(void *p) const nothrow @nogc
+ in
+ {
+ assert(p >= baseAddr);
+ assert(p < topAddr);
+ }
+ do
+ {
+ return cast(size_t)(p - baseAddr) / PAGESIZE;
+ }
+
+ public
+ @property bool isFree() const pure nothrow
+ {
+ return npages == freepages;
+ }
+
+ /**
+ * Return number of pages necessary for an allocation of the given size
+ *
+ * returns size_t.max if more than uint.max pages are requested
+ * (return type is still size_t to avoid truncation when being used
+ * in calculations, e.g. npages * PAGESIZE)
+ */
+ static size_t numPages(size_t size) nothrow @nogc
+ {
+ version (D_LP64)
+ {
+ if (size > PAGESIZE * cast(size_t)uint.max)
+ return size_t.max;
+ }
+ else
+ {
+ if (size > size_t.max - PAGESIZE)
+ return size_t.max;
+ }
+ return (size + PAGESIZE - 1) / PAGESIZE;
+ }
+
+ void* findBase(void* p) nothrow @nogc
+ {
+ size_t offset = cast(size_t)(p - baseAddr);
+ size_t pn = offset / PAGESIZE;
+ Bins bin = cast(Bins)pagetable[pn];
+
+ // Adjust bit to be at start of allocated memory block
+ if (bin < B_NUMSMALL)
+ {
+ auto baseOff = baseOffset(offset, bin);
+ const biti = baseOff >> Pool.ShiftBy.Small;
+ if (freebits.test (biti))
+ return null;
+ return baseAddr + baseOff;
+ }
+ if (bin == B_PAGE)
+ {
+ return baseAddr + (offset & (offset.max ^ (PAGESIZE-1)));
+ }
+ if (bin == B_PAGEPLUS)
+ {
+ size_t pageOffset = bPageOffsets[pn];
+ offset -= pageOffset * PAGESIZE;
+ pn -= pageOffset;
+
+ return baseAddr + (offset & (offset.max ^ (PAGESIZE-1)));
+ }
+ // we are in a B_FREE page
+ assert(bin == B_FREE);
+ return null;
+ }
+
+ size_t slGetSize(void* p) nothrow @nogc
+ {
+ if (isLargeObject)
+ return (cast(LargeObjectPool*)&this).getPages(p) * PAGESIZE;
+ else
+ return (cast(SmallObjectPool*)&this).getSize(p);
+ }
+
+ BlkInfo slGetInfo(void* p) nothrow
+ {
+ if (isLargeObject)
+ return (cast(LargeObjectPool*)&this).getInfo(p);
+ else
+ return (cast(SmallObjectPool*)&this).getInfo(p);
+ }
+
+
+ void Invariant() const {}
+
+ debug(INVARIANT)
+ invariant()
+ {
+ if (baseAddr)
+ {
+ //if (baseAddr + npages * PAGESIZE != topAddr)
+ //printf("baseAddr = %p, npages = %d, topAddr = %p\n", baseAddr, npages, topAddr);
+ assert(baseAddr + npages * PAGESIZE == topAddr);
+ }
+
+ if (pagetable !is null)
+ {
+ for (size_t i = 0; i < npages; i++)
+ {
+ Bins bin = cast(Bins)pagetable[i];
+ assert(bin < B_MAX);
+ }
+ }
+ }
+
+ void setPointerBitmapSmall(void* p, size_t s, size_t allocSize, uint attr, const TypeInfo ti) nothrow
+ {
+ if (!(attr & BlkAttr.NO_SCAN))
+ setPointerBitmap(p, s, allocSize, ti, attr);
+ }
+
+ pragma(inline,false)
+ void setPointerBitmap(void* p, size_t s, size_t allocSize, const TypeInfo ti, uint attr) nothrow
+ {
+ size_t offset = p - baseAddr;
+ //debug(PRINTF) printGCBits(&pool.is_pointer);
+
+ debug(PRINTF)
+ printf("Setting a pointer bitmap for %s at %p + %llu\n", debugTypeName(ti).ptr, p, cast(ulong)s);
+
+ if (ti)
+ {
+ if (attr & BlkAttr.APPENDABLE)
+ {
+ // an array of classes is in fact an array of pointers
+ if (typeid(ti) is typeid(TypeInfo_Class))
+ goto L_conservative;
+ s = allocSize;
+ }
+
+ auto rtInfo = cast(const(size_t)*)ti.rtInfo();
+
+ if (rtInfo is rtinfoNoPointers)
+ {
+ debug(PRINTF) printf("\tCompiler generated rtInfo: no pointers\n");
+ is_pointer.clrRange(offset/(void*).sizeof, s/(void*).sizeof);
+ }
+ else if (rtInfo is rtinfoHasPointers)
+ {
+ debug(PRINTF) printf("\tCompiler generated rtInfo: has pointers\n");
+ is_pointer.setRange(offset/(void*).sizeof, s/(void*).sizeof);
+ }
+ else
+ {
+ const(size_t)* bitmap = cast (size_t*) rtInfo;
+ //first element of rtInfo is the size of the object the bitmap encodes
+ size_t element_size = * bitmap;
+ bitmap++;
+ size_t tocopy;
+ if (attr & BlkAttr.APPENDABLE)
+ {
+ tocopy = s/(void*).sizeof;
+ is_pointer.copyRangeRepeating(offset/(void*).sizeof, tocopy, bitmap, element_size/(void*).sizeof);
+ }
+ else
+ {
+ tocopy = (s < element_size ? s : element_size)/(void*).sizeof;
+ is_pointer.copyRange(offset/(void*).sizeof, tocopy, bitmap);
+ }
+
+ debug(PRINTF) printf("\tSetting bitmap for new object (%s)\n\t\tat %p\t\tcopying from %p + %llu: ",
+ debugTypeName(ti).ptr, p, bitmap, cast(ulong)element_size);
+ debug(PRINTF)
+ for (size_t i = 0; i < element_size/((void*).sizeof); i++)
+ printf("%d", (bitmap[i/(8*size_t.sizeof)] >> (i%(8*size_t.sizeof))) & 1);
+ debug(PRINTF) printf("\n");
+
+ if (tocopy * (void*).sizeof < s) // better safe than sorry: if allocated more, assume pointers inside
+ {
+ debug(PRINTF) printf(" Appending %d pointer bits\n", s/(void*).sizeof - tocopy);
+ is_pointer.setRange(offset/(void*).sizeof + tocopy, s/(void*).sizeof - tocopy);
+ }
+ }
+
+ if (s < allocSize)
+ {
+ offset = (offset + s + (void*).sizeof - 1) & ~((void*).sizeof - 1);
+ is_pointer.clrRange(offset/(void*).sizeof, (allocSize - s)/(void*).sizeof);
+ }
+ }
+ else
+ {
+ L_conservative:
+ // limit pointers to actual size of allocation? might fail for arrays that append
+ // without notifying the GC
+ s = allocSize;
+
+ debug(PRINTF) printf("Allocating a block without TypeInfo\n");
+ is_pointer.setRange(offset/(void*).sizeof, s/(void*).sizeof);
+ }
+ //debug(PRINTF) printGCBits(&pool.is_pointer);
+ }
+}
+
+struct LargeObjectPool
+{
+ Pool base;
+ alias base this;
+
+ debug(INVARIANT)
+ void Invariant()
+ {
+ //base.Invariant();
+ for (size_t n = 0; n < npages; )
+ {
+ uint np = bPageOffsets[n];
+ assert(np > 0 && np <= npages - n);
+
+ if (pagetable[n] == B_PAGE)
+ {
+ for (uint p = 1; p < np; p++)
+ {
+ assert(pagetable[n + p] == B_PAGEPLUS);
+ assert(bPageOffsets[n + p] == p);
+ }
+ }
+ else if (pagetable[n] == B_FREE)
+ {
+ for (uint p = 1; p < np; p++)
+ {
+ assert(pagetable[n + p] == B_FREE);
+ }
+ assert(bPageOffsets[n + np - 1] == np);
+ }
+ else
+ assert(false);
+ n += np;
+ }
+ }
+
+ /**
+ * Allocate n pages from Pool.
+ * Returns OPFAIL on failure.
+ */
+ size_t allocPages(size_t n) nothrow
+ {
+ if (largestFree < n || searchStart + n > npages)
+ return OPFAIL;
+
+ //debug(PRINTF) printf("Pool::allocPages(n = %d)\n", n);
+ size_t largest = 0;
+ if (pagetable[searchStart] == B_PAGEPLUS)
+ {
+ searchStart -= bPageOffsets[searchStart]; // jump to B_PAGE
+ searchStart += bPageOffsets[searchStart];
+ }
+ while (searchStart < npages && pagetable[searchStart] == B_PAGE)
+ searchStart += bPageOffsets[searchStart];
+
+ for (size_t i = searchStart; i < npages; )
+ {
+ assert(pagetable[i] == B_FREE);
+
+ auto p = bPageOffsets[i];
+ if (p > n)
+ {
+ setFreePageOffsets(i + n, p - n);
+ goto L_found;
+ }
+ if (p == n)
+ {
+ L_found:
+ pagetable[i] = B_PAGE;
+ bPageOffsets[i] = cast(uint) n;
+ if (n > 1)
+ {
+ memset(&pagetable[i + 1], B_PAGEPLUS, n - 1);
+ for (auto offset = 1; offset < n; offset++)
+ bPageOffsets[i + offset] = cast(uint) offset;
+ }
+ freepages -= n;
+ return i;
+ }
+ if (p > largest)
+ largest = p;
+
+ i += p;
+ while (i < npages && pagetable[i] == B_PAGE)
+ {
+ // we have the size information, so we skip a whole bunch of pages.
+ i += bPageOffsets[i];
+ }
+ }
+
+ // not enough free pages found, remember largest free chunk
+ largestFree = largest;
+ return OPFAIL;
+ }
+
+ /**
+ * Free npages pages starting with pagenum.
+ */
+ void freePages(size_t pagenum, size_t npages) nothrow @nogc
+ {
+ //memset(&pagetable[pagenum], B_FREE, npages);
+ if (pagenum < searchStart)
+ searchStart = pagenum;
+
+ for (size_t i = pagenum; i < npages + pagenum; i++)
+ {
+ assert(pagetable[i] < B_FREE);
+ pagetable[i] = B_FREE;
+ }
+ freepages += npages;
+ largestFree = freepages; // invalidate
+ }
+
+ /**
+ * Set the first and the last entry of a B_FREE block to the size
+ */
+ void setFreePageOffsets(size_t page, size_t num) nothrow @nogc
+ {
+ assert(pagetable[page] == B_FREE);
+ assert(pagetable[page + num - 1] == B_FREE);
+ bPageOffsets[page] = cast(uint)num;
+ if (num > 1)
+ bPageOffsets[page + num - 1] = cast(uint)num;
+ }
+
+ void mergeFreePageOffsets(bool bwd, bool fwd)(size_t page, size_t num) nothrow @nogc
+ {
+ static if (bwd)
+ {
+ if (page > 0 && pagetable[page - 1] == B_FREE)
+ {
+ auto sz = bPageOffsets[page - 1];
+ page -= sz;
+ num += sz;
+ }
+ }
+ static if (fwd)
+ {
+ if (page + num < npages && pagetable[page + num] == B_FREE)
+ num += bPageOffsets[page + num];
+ }
+ setFreePageOffsets(page, num);
+ }
+
+ /**
+ * Get pages of allocation at pointer p in pool.
+ */
+ size_t getPages(void *p) const nothrow @nogc
+ in
+ {
+ assert(p >= baseAddr);
+ assert(p < topAddr);
+ }
+ do
+ {
+ if (cast(size_t)p & (PAGESIZE - 1)) // check for interior pointer
+ return 0;
+ size_t pagenum = pagenumOf(p);
+ Bins bin = cast(Bins)pagetable[pagenum];
+ if (bin != B_PAGE)
+ return 0;
+ return bPageOffsets[pagenum];
+ }
+
+ /**
+ * Get size of allocation at page pn in pool.
+ */
+ size_t getSize(size_t pn) const nothrow @nogc
+ {
+ assert(pagetable[pn] == B_PAGE);
+ return cast(size_t) bPageOffsets[pn] * PAGESIZE;
+ }
+
+ /**
+ *
+ */
+ BlkInfo getInfo(void* p) nothrow
+ {
+ BlkInfo info;
+
+ size_t offset = cast(size_t)(p - baseAddr);
+ size_t pn = offset / PAGESIZE;
+ Bins bin = cast(Bins)pagetable[pn];
+
+ if (bin == B_PAGEPLUS)
+ pn -= bPageOffsets[pn];
+ else if (bin != B_PAGE)
+ return info; // no info for free pages
+
+ info.base = baseAddr + pn * PAGESIZE;
+ info.size = getSize(pn);
+ info.attr = getBits(pn);
+ return info;
+ }
+
+ void runFinalizers(const scope void[] segment) nothrow
+ {
+ foreach (pn; 0 .. npages)
+ {
+ Bins bin = cast(Bins)pagetable[pn];
+ if (bin > B_PAGE)
+ continue;
+ size_t biti = pn;
+
+ if (!finals.test(biti))
+ continue;
+
+ auto p = sentinel_add(baseAddr + pn * PAGESIZE);
+ size_t size = sentinel_size(p, getSize(pn));
+ uint attr = getBits(biti);
+
+ if (!rt_hasFinalizerInSegment(p, size, attr, segment))
+ continue;
+
+ rt_finalizeFromGC(p, size, attr);
+
+ clrBits(biti, ~BlkAttr.NONE);
+
+ if (pn < searchStart)
+ searchStart = pn;
+
+ debug(COLLECT_PRINTF) printf("\tcollecting big %p\n", p);
+ //log_free(sentinel_add(p));
+
+ size_t n = 1;
+ for (; pn + n < npages; ++n)
+ if (pagetable[pn + n] != B_PAGEPLUS)
+ break;
+ debug (MEMSTOMP) memset(baseAddr + pn * PAGESIZE, 0xF3, n * PAGESIZE);
+ freePages(pn, n);
+ mergeFreePageOffsets!(true, true)(pn, n);
+ }
+ }
+}
+
+
+struct SmallObjectPool
+{
+ Pool base;
+ alias base this;
+
+ debug(INVARIANT)
+ void Invariant()
+ {
+ //base.Invariant();
+ uint cntRecover = 0;
+ foreach (Bins bin; 0 .. B_NUMSMALL)
+ {
+ for (auto pn = recoverPageFirst[bin]; pn < npages; pn = binPageChain[pn])
+ {
+ assert(pagetable[pn] == bin);
+ cntRecover++;
+ }
+ }
+ uint cntFree = 0;
+ for (auto pn = searchStart; pn < npages; pn = binPageChain[pn])
+ {
+ assert(pagetable[pn] == B_FREE);
+ cntFree++;
+ }
+ assert(cntFree == freepages);
+ assert(cntFree + cntRecover <= npages);
+ }
+
+ /**
+ * Get size of pointer p in pool.
+ */
+ size_t getSize(void *p) const nothrow @nogc
+ in
+ {
+ assert(p >= baseAddr);
+ assert(p < topAddr);
+ }
+ do
+ {
+ size_t pagenum = pagenumOf(p);
+ Bins bin = cast(Bins)pagetable[pagenum];
+ assert(bin < B_PAGE);
+ if (p != cast(void*)baseOffset(cast(size_t)p, bin)) // check for interior pointer
+ return 0;
+ const biti = cast(size_t)(p - baseAddr) >> ShiftBy.Small;
+ if (freebits.test (biti))
+ return 0;
+ return binsize[bin];
+ }
+
+ BlkInfo getInfo(void* p) nothrow
+ {
+ BlkInfo info;
+ size_t offset = cast(size_t)(p - baseAddr);
+ size_t pn = offset / PAGESIZE;
+ Bins bin = cast(Bins)pagetable[pn];
+
+ if (bin >= B_PAGE)
+ return info;
+
+ auto base = cast(void*)baseOffset(cast(size_t)p, bin);
+ const biti = cast(size_t)(base - baseAddr) >> ShiftBy.Small;
+ if (freebits.test (biti))
+ return info;
+
+ info.base = base;
+ info.size = binsize[bin];
+ offset = info.base - baseAddr;
+ info.attr = getBits(biti);
+
+ return info;
+ }
+
+ void runFinalizers(const scope void[] segment) nothrow
+ {
+ foreach (pn; 0 .. npages)
+ {
+ Bins bin = cast(Bins)pagetable[pn];
+ if (bin >= B_PAGE)
+ continue;
+
+ immutable size = binsize[bin];
+ auto p = baseAddr + pn * PAGESIZE;
+ const ptop = p + PAGESIZE - size + 1;
+ immutable base = pn * (PAGESIZE/16);
+ immutable bitstride = size / 16;
+
+ bool freeBits;
+ PageBits toFree;
+
+ for (size_t i; p < ptop; p += size, i += bitstride)
+ {
+ immutable biti = base + i;
+
+ if (!finals.test(biti))
+ continue;
+
+ auto q = sentinel_add(p);
+ uint attr = getBits(biti);
+ const ssize = sentinel_size(q, size);
+ if (!rt_hasFinalizerInSegment(q, ssize, attr, segment))
+ continue;
+
+ rt_finalizeFromGC(q, ssize, attr);
+
+ freeBits = true;
+ toFree.set(i);
+
+ debug(COLLECT_PRINTF) printf("\tcollecting %p\n", p);
+ //log_free(sentinel_add(p));
+
+ debug (MEMSTOMP) memset(p, 0xF3, size);
+ }
+
+ if (freeBits)
+ freePageBits(pn, toFree);
+ }
+ }
+
+ /**
+ * Allocate a page of bin's.
+ * Returns:
+ * head of a single linked list of new entries
+ */
+ List* allocPage(Bins bin) nothrow
+ {
+ if (searchStart >= npages)
+ return null;
+
+ assert(pagetable[searchStart] == B_FREE);
+
+ L1:
+ size_t pn = searchStart;
+ searchStart = binPageChain[searchStart];
+ binPageChain[pn] = Pool.PageRecovered;
+ pagetable[pn] = cast(ubyte)bin;
+ freepages--;
+
+ // Convert page to free list
+ size_t size = binsize[bin];
+ void* p = baseAddr + pn * PAGESIZE;
+ auto first = cast(List*) p;
+
+ // ensure 2 <size> bytes blocks are available below ptop, one
+ // being set in the loop, and one for the tail block
+ void* ptop = p + PAGESIZE - 2 * size + 1;
+ for (; p < ptop; p += size)
+ {
+ (cast(List *)p).next = cast(List *)(p + size);
+ (cast(List *)p).pool = &base;
+ }
+ (cast(List *)p).next = null;
+ (cast(List *)p).pool = &base;
+ return first;
+ }
+}
+
+debug(SENTINEL) {} else // no additional capacity with SENTINEL
+unittest // bugzilla 14467
+{
+ int[] arr = new int[10];
+ assert(arr.capacity);
+ arr = arr[$..$];
+ assert(arr.capacity);
+}
+
+unittest // bugzilla 15353
+{
+ import core.memory : GC;
+
+ static struct Foo
+ {
+ ~this()
+ {
+ GC.free(buf); // ignored in finalizer
+ }
+
+ void* buf;
+ }
+ new Foo(GC.malloc(10));
+ GC.collect();
+}
+
+unittest // bugzilla 15822
+{
+ import core.memory : GC;
+
+ __gshared ubyte[16] buf;
+ static struct Foo
+ {
+ ~this()
+ {
+ GC.removeRange(ptr);
+ GC.removeRoot(ptr);
+ }
+
+ ubyte* ptr;
+ }
+ GC.addRoot(buf.ptr);
+ GC.addRange(buf.ptr, buf.length);
+ new Foo(buf.ptr);
+ GC.collect();
+}
+
+unittest // bugzilla 1180
+{
+ import core.exception;
+ try
+ {
+ size_t x = size_t.max - 100;
+ byte[] big_buf = new byte[x];
+ }
+ catch (OutOfMemoryError)
+ {
+ }
+}
+
+/* ============================ PRINTF =============================== */
+
+debug(PRINTF_TO_FILE)
+{
+ private __gshared MonoTime gcStartTick;
+ private __gshared FILE* gcx_fh;
+ private __gshared bool hadNewline = false;
+ import core.internal.spinlock;
+ static printLock = shared(AlignedSpinLock)(SpinLock.Contention.lengthy);
+
+ private int printf(ARGS...)(const char* fmt, ARGS args) nothrow
+ {
+ printLock.lock();
+ scope(exit) printLock.unlock();
+
+ if (!gcx_fh)
+ gcx_fh = fopen("gcx.log", "w");
+ if (!gcx_fh)
+ return 0;
+
+ int len;
+ if (MonoTime.ticksPerSecond == 0)
+ {
+ len = fprintf(gcx_fh, "before init: ");
+ }
+ else if (hadNewline)
+ {
+ if (gcStartTick == MonoTime.init)
+ gcStartTick = MonoTime.currTime;
+ immutable timeElapsed = MonoTime.currTime - gcStartTick;
+ immutable secondsAsDouble = timeElapsed.total!"hnsecs" / cast(double)convert!("seconds", "hnsecs")(1);
+ len = fprintf(gcx_fh, "%10.6f: ", secondsAsDouble);
+ }
+ len += fprintf(gcx_fh, fmt, args);
+ fflush(gcx_fh);
+ import core.stdc.string;
+ hadNewline = fmt && fmt[0] && fmt[strlen(fmt) - 1] == '\n';
+ return len;
+ }
+}
+
+debug(PRINTF) void printFreeInfo(Pool* pool) nothrow
+{
+ uint nReallyFree;
+ foreach (i; 0..pool.npages) {
+ if (pool.pagetable[i] >= B_FREE) nReallyFree++;
+ }
+
+ printf("Pool %p: %d really free, %d supposedly free\n", pool, nReallyFree, pool.freepages);
+}
+
+debug(PRINTF)
+void printGCBits(GCBits* bits)
+{
+ for (size_t i = 0; i < bits.nwords; i++)
+ {
+ if (i % 32 == 0) printf("\n\t");
+ printf("%x ", bits.data[i]);
+ }
+ printf("\n");
+}
+
+// we can assume the name is always from a literal, so it is zero terminated
+debug(PRINTF)
+string debugTypeName(const(TypeInfo) ti) nothrow
+{
+ string name;
+ if (ti is null)
+ name = "null";
+ else if (auto ci = cast(TypeInfo_Class)ti)
+ name = ci.name;
+ else if (auto si = cast(TypeInfo_Struct)ti)
+ name = si.mangledName; // .name() might GC-allocate, avoid deadlock
+ else if (auto ci = cast(TypeInfo_Const)ti)
+ static if (__traits(compiles,ci.base)) // different whether compiled with object.di or object.d
+ return debugTypeName(ci.base);
+ else
+ return debugTypeName(ci.next);
+ else
+ name = ti.classinfo.name;
+ return name;
+}
+
+/* ======================= Leak Detector =========================== */
+
+debug (LOGGING)
+{
+ struct Log
+ {
+ void* p;
+ size_t size;
+ size_t line;
+ char* file;
+ void* parent;
+
+ void print() nothrow
+ {
+ printf(" p = %p, size = %lld, parent = %p ", p, cast(ulong)size, parent);
+ if (file)
+ {
+ printf("%s(%u)", file, cast(uint)line);
+ }
+ printf("\n");
+ }
+ }
+
+
+ struct LogArray
+ {
+ size_t dim;
+ size_t allocdim;
+ Log *data;
+
+ void Dtor() nothrow @nogc
+ {
+ if (data)
+ cstdlib.free(data);
+ data = null;
+ }
+
+ void reserve(size_t nentries) nothrow @nogc
+ {
+ assert(dim <= allocdim);
+ if (allocdim - dim < nentries)
+ {
+ allocdim = (dim + nentries) * 2;
+ assert(dim + nentries <= allocdim);
+ if (!data)
+ {
+ data = cast(Log*)cstdlib.malloc(allocdim * Log.sizeof);
+ if (!data && allocdim)
+ onOutOfMemoryErrorNoGC();
+ }
+ else
+ { Log *newdata;
+
+ newdata = cast(Log*)cstdlib.malloc(allocdim * Log.sizeof);
+ if (!newdata && allocdim)
+ onOutOfMemoryErrorNoGC();
+ memcpy(newdata, data, dim * Log.sizeof);
+ cstdlib.free(data);
+ data = newdata;
+ }
+ }
+ }
+
+
+ void push(Log log) nothrow @nogc
+ {
+ reserve(1);
+ data[dim++] = log;
+ }
+
+ void remove(size_t i) nothrow @nogc
+ {
+ memmove(data + i, data + i + 1, (dim - i) * Log.sizeof);
+ dim--;
+ }
+
+
+ size_t find(void *p) nothrow @nogc
+ {
+ for (size_t i = 0; i < dim; i++)
+ {
+ if (data[i].p == p)
+ return i;
+ }
+ return OPFAIL; // not found
+ }
+
+
+ void copy(LogArray *from) nothrow @nogc
+ {
+ if (allocdim < from.dim)
+ reserve(from.dim - dim);
+ assert(from.dim <= allocdim);
+ memcpy(data, from.data, from.dim * Log.sizeof);
+ dim = from.dim;
+ }
+ }
+
+ struct LeakDetector
+ {
+ Gcx* gcx;
+ LogArray current;
+ LogArray prev;
+
+ private void initialize(Gcx* gc)
+ {
+ gcx = gc;
+ //debug(PRINTF) printf("+log_init()\n");
+ current.reserve(1000);
+ prev.reserve(1000);
+ //debug(PRINTF) printf("-log_init()\n");
+ }
+
+
+ private void log_malloc(void *p, size_t size) nothrow
+ {
+ //debug(PRINTF) printf("+log_malloc(p = %p, size = %zd)\n", p, size);
+ Log log;
+
+ log.p = p;
+ log.size = size;
+ log.line = ConservativeGC.line;
+ log.file = ConservativeGC.file;
+ log.parent = null;
+
+ ConservativeGC.line = 0;
+ ConservativeGC.file = null;
+
+ current.push(log);
+ //debug(PRINTF) printf("-log_malloc()\n");
+ }
+
+
+ private void log_free(void *p, size_t size) nothrow @nogc
+ {
+ //debug(PRINTF) printf("+log_free(%p)\n", p);
+ auto i = current.find(p);
+ if (i == OPFAIL)
+ {
+ debug(PRINTF) printf("free'ing unallocated memory %p (size %zu)\n", p, size);
+ }
+ else
+ current.remove(i);
+ //debug(PRINTF) printf("-log_free()\n");
+ }
+
+
+ private void log_collect() nothrow
+ {
+ //debug(PRINTF) printf("+log_collect()\n");
+ // Print everything in current that is not in prev
+
+ debug(PRINTF) printf("New pointers this cycle: --------------------------------\n");
+ size_t used = 0;
+ for (size_t i = 0; i < current.dim; i++)
+ {
+ auto j = prev.find(current.data[i].p);
+ if (j == OPFAIL)
+ current.data[i].print();
+ else
+ used++;
+ }
+
+ debug(PRINTF) printf("All roots this cycle: --------------------------------\n");
+ for (size_t i = 0; i < current.dim; i++)
+ {
+ void* p = current.data[i].p;
+ if (!gcx.findPool(current.data[i].parent))
+ {
+ auto j = prev.find(current.data[i].p);
+ debug(PRINTF) printf(j == OPFAIL ? "N" : " ");
+ current.data[i].print();
+ }
+ }
+
+ debug(PRINTF) printf("Used = %d-------------------------------------------------\n", used);
+ prev.copy(&current);
+
+ debug(PRINTF) printf("-log_collect()\n");
+ }
+
+
+ private void log_parent(void *p, void *parent) nothrow
+ {
+ //debug(PRINTF) printf("+log_parent()\n");
+ auto i = current.find(p);
+ if (i == OPFAIL)
+ {
+ debug(PRINTF) printf("parent'ing unallocated memory %p, parent = %p\n", p, parent);
+ Pool *pool;
+ pool = gcx.findPool(p);
+ assert(pool);
+ size_t offset = cast(size_t)(p - pool.baseAddr);
+ size_t biti;
+ size_t pn = offset / PAGESIZE;
+ Bins bin = cast(Bins)pool.pagetable[pn];
+ biti = (offset & (PAGESIZE - 1)) >> pool.shiftBy;
+ debug(PRINTF) printf("\tbin = %d, offset = x%x, biti = x%x\n", bin, offset, biti);
+ }
+ else
+ {
+ current.data[i].parent = parent;
+ }
+ //debug(PRINTF) printf("-log_parent()\n");
+ }
+ }
+}
+else
+{
+ struct LeakDetector
+ {
+ static void initialize(Gcx* gcx) nothrow { }
+ static void log_malloc(void *p, size_t size) nothrow { }
+ static void log_free(void *p, size_t size) nothrow @nogc {}
+ static void log_collect() nothrow { }
+ static void log_parent(void *p, void *parent) nothrow { }
+ }
+}
+
+/* ============================ SENTINEL =============================== */
+
+debug (SENTINEL)
+{
+ // pre-sentinel must be smaller than 16 bytes so that the same GC bits
+ // are used for the allocated pointer and the user pointer
+ // so use uint for both 32 and 64 bit platforms, limiting usage to < 4GB
+ const uint SENTINEL_PRE = 0xF4F4F4F4;
+ const ubyte SENTINEL_POST = 0xF5; // 8 bits
+ const uint SENTINEL_EXTRA = 2 * uint.sizeof + 1;
+
+
+ inout(uint*) sentinel_psize(inout void *p) nothrow @nogc { return &(cast(inout uint *)p)[-2]; }
+ inout(uint*) sentinel_pre(inout void *p) nothrow @nogc { return &(cast(inout uint *)p)[-1]; }
+ inout(ubyte*) sentinel_post(inout void *p) nothrow @nogc { return &(cast(inout ubyte *)p)[*sentinel_psize(p)]; }
+
+
+ void sentinel_init(void *p, size_t size) nothrow @nogc
+ {
+ assert(size <= uint.max);
+ *sentinel_psize(p) = cast(uint)size;
+ *sentinel_pre(p) = SENTINEL_PRE;
+ *sentinel_post(p) = SENTINEL_POST;
+ }
+
+
+ void sentinel_Invariant(const void *p) nothrow @nogc
+ {
+ debug
+ {
+ assert(*sentinel_pre(p) == SENTINEL_PRE);
+ assert(*sentinel_post(p) == SENTINEL_POST);
+ }
+ else if (*sentinel_pre(p) != SENTINEL_PRE || *sentinel_post(p) != SENTINEL_POST)
+ onInvalidMemoryOperationError(); // also trigger in release build
+ }
+
+ size_t sentinel_size(const void *p, size_t alloc_size) nothrow @nogc
+ {
+ return *sentinel_psize(p);
+ }
+
+ void *sentinel_add(void *p) nothrow @nogc
+ {
+ return p + 2 * uint.sizeof;
+ }
+
+
+ void *sentinel_sub(void *p) nothrow @nogc
+ {
+ return p - 2 * uint.sizeof;
+ }
+}
+else
+{
+ const uint SENTINEL_EXTRA = 0;
+
+
+ void sentinel_init(void *p, size_t size) nothrow @nogc
+ {
+ }
+
+
+ void sentinel_Invariant(const void *p) nothrow @nogc
+ {
+ }
+
+ size_t sentinel_size(const void *p, size_t alloc_size) nothrow @nogc
+ {
+ return alloc_size;
+ }
+
+ void *sentinel_add(void *p) nothrow @nogc
+ {
+ return p;
+ }
+
+
+ void *sentinel_sub(void *p) nothrow @nogc
+ {
+ return p;
+ }
+}
+
+debug (MEMSTOMP)
+unittest
+{
+ import core.memory;
+ auto p = cast(size_t*)GC.malloc(size_t.sizeof*3);
+ assert(*p == cast(size_t)0xF0F0F0F0F0F0F0F0);
+ p[2] = 0; // First two will be used for free list
+ GC.free(p);
+ assert(p[2] == cast(size_t)0xF2F2F2F2F2F2F2F2);
+}
+
+debug (SENTINEL)
+unittest
+{
+ import core.memory;
+ auto p = cast(ubyte*)GC.malloc(1);
+ assert(p[-1] == 0xF4);
+ assert(p[ 1] == 0xF5);
+
+ // See also stand-alone tests in test/gc
+}
+
+unittest
+{
+ import core.memory;
+
+ // https://issues.dlang.org/show_bug.cgi?id=9275
+ GC.removeRoot(null);
+ GC.removeRoot(cast(void*)13);
+}
+
+// improve predictability of coverage of code that is eventually not hit by other tests
+debug (SENTINEL) {} else // cannot extend with SENTINEL
+debug (MARK_PRINTF) {} else // takes forever
+unittest
+{
+ import core.memory;
+ auto p = GC.malloc(260 << 20); // new pool has 390 MB
+ auto q = GC.malloc(65 << 20); // next chunk (larger than 64MB to ensure the same pool is used)
+ auto r = GC.malloc(65 << 20); // another chunk in same pool
+ assert(p + (260 << 20) == q);
+ assert(q + (65 << 20) == r);
+ GC.free(q);
+ // should trigger "assert(bin == B_FREE);" in mark due to dangling pointer q:
+ GC.collect();
+ // should trigger "break;" in extendNoSync:
+ size_t sz = GC.extend(p, 64 << 20, 66 << 20); // trigger size after p large enough (but limited)
+ assert(sz == 325 << 20);
+ GC.free(p);
+ GC.free(r);
+ r = q; // ensure q is not trashed before collection above
+
+ p = GC.malloc(70 << 20); // from the same pool
+ q = GC.malloc(70 << 20);
+ r = GC.malloc(70 << 20);
+ auto s = GC.malloc(70 << 20);
+ auto t = GC.malloc(70 << 20); // 350 MB of 390 MB used
+ assert(p + (70 << 20) == q);
+ assert(q + (70 << 20) == r);
+ assert(r + (70 << 20) == s);
+ assert(s + (70 << 20) == t);
+ GC.free(r); // ensure recalculation of largestFree in nxxt allocPages
+ auto z = GC.malloc(75 << 20); // needs new pool
+
+ GC.free(p);
+ GC.free(q);
+ GC.free(s);
+ GC.free(t);
+ GC.free(z);
+ GC.minimize(); // release huge pool
+}
+
+// https://issues.dlang.org/show_bug.cgi?id=19281
+debug (SENTINEL) {} else // cannot allow >= 4 GB with SENTINEL
+debug (MEMSTOMP) {} else // might take too long to actually touch the memory
+version (D_LP64) unittest
+{
+ static if (__traits(compiles, os_physical_mem))
+ {
+ // only run if the system has enough physical memory
+ size_t sz = 2L^^32;
+ //import core.stdc.stdio;
+ //printf("availphys = %lld", os_physical_mem());
+ if (os_physical_mem() > sz)
+ {
+ import core.memory;
+ GC.collect();
+ GC.minimize();
+ auto stats = GC.stats();
+ auto ptr = GC.malloc(sz, BlkAttr.NO_SCAN);
+ auto info = GC.query(ptr);
+ //printf("info.size = %lld", info.size);
+ assert(info.size >= sz);
+ GC.free(ptr);
+ GC.minimize();
+ auto nstats = GC.stats();
+ assert(nstats.usedSize == stats.usedSize);
+ assert(nstats.freeSize == stats.freeSize);
+ assert(nstats.allocatedInCurrentThread - sz == stats.allocatedInCurrentThread);
+ }
+ }
+}
+
+// https://issues.dlang.org/show_bug.cgi?id=19522
+unittest
+{
+ import core.memory;
+
+ void test(void* p)
+ {
+ assert(GC.getAttr(p) == BlkAttr.NO_SCAN);
+ assert(GC.setAttr(p + 4, BlkAttr.NO_SCAN) == 0); // interior pointer should fail
+ assert(GC.clrAttr(p + 4, BlkAttr.NO_SCAN) == 0); // interior pointer should fail
+ GC.free(p);
+ assert(GC.query(p).base == null);
+ assert(GC.query(p).size == 0);
+ assert(GC.addrOf(p) == null);
+ assert(GC.sizeOf(p) == 0); // fails
+ assert(GC.getAttr(p) == 0);
+ assert(GC.setAttr(p, BlkAttr.NO_SCAN) == 0);
+ assert(GC.clrAttr(p, BlkAttr.NO_SCAN) == 0);
+ }
+ void* large = GC.malloc(10000, BlkAttr.NO_SCAN);
+ test(large);
+
+ void* small = GC.malloc(100, BlkAttr.NO_SCAN);
+ test(small);
+}
+
+unittest
+{
+ import core.memory;
+
+ auto now = currTime;
+ GC.ProfileStats stats1 = GC.profileStats();
+ GC.collect();
+ GC.ProfileStats stats2 = GC.profileStats();
+ auto diff = currTime - now;
+
+ assert(stats2.totalCollectionTime - stats1.totalCollectionTime <= diff);
+ assert(stats2.totalPauseTime - stats1.totalPauseTime <= stats2.totalCollectionTime - stats1.totalCollectionTime);
+
+ assert(stats2.maxPauseTime >= stats1.maxPauseTime);
+ assert(stats2.maxCollectionTime >= stats1.maxCollectionTime);
+}
+
+// https://issues.dlang.org/show_bug.cgi?id=20214
+unittest
+{
+ import core.memory;
+ import core.stdc.stdio;
+
+ // allocate from large pool
+ auto o = GC.malloc(10);
+ auto p = (cast(void**)GC.malloc(4096 * (void*).sizeof))[0 .. 4096];
+ auto q = (cast(void**)GC.malloc(4096 * (void*).sizeof))[0 .. 4096];
+ if (p.ptr + p.length is q.ptr)
+ {
+ q[] = o; // fill with pointers
+
+ // shrink, unused area cleared?
+ auto nq = (cast(void**)GC.realloc(q.ptr, 4000 * (void*).sizeof))[0 .. 4000];
+ assert(q.ptr is nq.ptr);
+ assert(q.ptr[4095] !is o);
+
+ GC.free(q.ptr);
+ // expected to extend in place
+ auto np = (cast(void**)GC.realloc(p.ptr, 4200 * (void*).sizeof))[0 .. 4200];
+ assert(p.ptr is np.ptr);
+ assert(q.ptr[4200] !is o);
+ }
+ else
+ {
+ // adjacent allocations likely but not guaranteed
+ printf("unexpected pointers %p and %p\n", p.ptr, q.ptr);
+ }
+}
diff --git a/libphobos/libdruntime/gc/impl/manual/gc.d b/libphobos/libdruntime/core/internal/gc/impl/manual/gc.d
index f5c6b77..a65c636 100644
--- a/libphobos/libdruntime/gc/impl/manual/gc.d
+++ b/libphobos/libdruntime/core/internal/gc/impl/manual/gc.d
@@ -14,67 +14,52 @@
* GC.
*
* Copyright: Copyright Sean Kelly 2005 - 2016.
- * License: $(WEB www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
+ * License: $(HTTP www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
* Authors: Sean Kelly
*/
+module core.internal.gc.impl.manual.gc;
-/* Copyright Sean Kelly 2005 - 2016.
- * Distributed under the Boost Software License, Version 1.0.
- * (See accompanying file LICENSE or copy at
- * http://www.boost.org/LICENSE_1_0.txt)
- */
-module gc.impl.manual.gc;
-
-import gc.config;
-import gc.gcinterface;
+import core.gc.gcinterface;
-import rt.util.container.array;
+import core.internal.container.array;
import cstdlib = core.stdc.stdlib : calloc, free, malloc, realloc;
static import core.memory;
extern (C) void onOutOfMemoryError(void* pretend_sideffect = null) @trusted pure nothrow @nogc; /* dmd @@@BUG11461@@@ */
-class ManualGC : GC
+// register GC in C constructor (_STI_)
+extern(C) pragma(crt_constructor) void _d_register_manual_gc()
{
- __gshared Array!Root roots;
- __gshared Array!Range ranges;
-
- static void initialize(ref GC gc)
- {
- import core.stdc.string;
-
- if (config.gc != "manual")
- return;
-
- auto p = cstdlib.malloc(__traits(classInstanceSize, ManualGC));
- if (!p)
- onOutOfMemoryError();
+ import core.gc.registry;
+ registerGCFactory("manual", &initialize);
+}
- auto init = typeid(ManualGC).initializer();
- assert(init.length == __traits(classInstanceSize, ManualGC));
- auto instance = cast(ManualGC) memcpy(p, init.ptr, init.length);
- instance.__ctor();
+private GC initialize()
+{
+ import core.lifetime : emplace;
- gc = instance;
- }
+ auto gc = cast(ManualGC) cstdlib.malloc(__traits(classInstanceSize, ManualGC));
+ if (!gc)
+ onOutOfMemoryError();
- static void finalize(ref GC gc)
- {
- if (config.gc != "manual")
- return;
+ return emplace(gc);
+}
- auto instance = cast(ManualGC) gc;
- instance.Dtor();
- cstdlib.free(cast(void*) instance);
- }
+class ManualGC : GC
+{
+ Array!Root roots;
+ Array!Range ranges;
this()
{
}
- void Dtor()
+ ~this()
{
+ // TODO: cannot free as memory is overwritten and
+ // the monitor is still read in rt_finalize (called by destroy)
+ // cstdlib.free(cast(void*) this);
}
void enable()
@@ -121,7 +106,7 @@ class ManualGC : GC
return p;
}
- BlkInfo qalloc(size_t size, uint bits, const TypeInfo ti) nothrow
+ BlkInfo qalloc(size_t size, uint bits, const scope TypeInfo ti) nothrow
{
BlkInfo retval;
retval.base = malloc(size, bits, ti);
@@ -158,7 +143,7 @@ class ManualGC : GC
return 0;
}
- void free(void* p) nothrow
+ void free(void* p) nothrow @nogc
{
cstdlib.free(p);
}
@@ -167,7 +152,7 @@ class ManualGC : GC
* Determine the base address of the block containing p. If p is not a gc
* allocated pointer, return null.
*/
- void* addrOf(void* p) nothrow
+ void* addrOf(void* p) nothrow @nogc
{
return null;
}
@@ -176,7 +161,7 @@ class ManualGC : GC
* Determine the allocated size of pointer p. If p is an interior pointer
* or not a gc allocated pointer, return 0.
*/
- size_t sizeOf(void* p) nothrow
+ size_t sizeOf(void* p) nothrow @nogc
{
return 0;
}
@@ -195,6 +180,11 @@ class ManualGC : GC
return typeof(return).init;
}
+ core.memory.GC.ProfileStats profileStats() nothrow
+ {
+ return typeof(return).init;
+ }
+
void addRoot(void* p) nothrow @nogc
{
roots.insertBack(Root(p));
@@ -263,7 +253,7 @@ class ManualGC : GC
return 0;
}
- void runFinalizers(in void[] segment) nothrow
+ void runFinalizers(const scope void[] segment) nothrow
{
}
@@ -271,4 +261,9 @@ class ManualGC : GC
{
return false;
}
+
+ ulong allocatedInCurrentThread() nothrow
+ {
+ return typeof(return).init;
+ }
}
diff --git a/libphobos/libdruntime/core/internal/gc/impl/proto/gc.d b/libphobos/libdruntime/core/internal/gc/impl/proto/gc.d
new file mode 100644
index 0000000..ff044d9
--- /dev/null
+++ b/libphobos/libdruntime/core/internal/gc/impl/proto/gc.d
@@ -0,0 +1,248 @@
+
+module core.internal.gc.impl.proto.gc;
+
+import core.gc.gcinterface;
+
+import core.internal.container.array;
+
+import cstdlib = core.stdc.stdlib : calloc, free, malloc, realloc;
+static import core.memory;
+
+extern (C) void onOutOfMemoryError(void* pretend_sideffect = null) @trusted pure nothrow @nogc; /* dmd @@@BUG11461@@@ */
+
+private
+{
+ extern (C) void gc_init_nothrow() nothrow @nogc;
+ extern (C) void gc_term();
+
+ extern (C) void gc_enable() nothrow;
+ extern (C) void gc_disable() nothrow;
+
+ extern (C) void* gc_malloc( size_t sz, uint ba = 0, const scope TypeInfo = null ) pure nothrow;
+ extern (C) void* gc_calloc( size_t sz, uint ba = 0, const scope TypeInfo = null ) pure nothrow;
+ extern (C) BlkInfo gc_qalloc( size_t sz, uint ba = 0, const scope TypeInfo = null ) pure nothrow;
+ extern (C) void* gc_realloc(return scope void* p, size_t sz, uint ba = 0, const scope TypeInfo = null ) pure nothrow;
+ extern (C) size_t gc_reserve( size_t sz ) nothrow;
+
+ extern (C) void gc_addRange(const void* p, size_t sz, const scope TypeInfo ti = null ) nothrow @nogc;
+ extern (C) void gc_addRoot(const void* p ) nothrow @nogc;
+}
+
+class ProtoGC : GC
+{
+ Array!Root roots;
+ Array!Range ranges;
+
+ // Call this function when initializing the real GC
+ // upon ProtoGC term. This function should be called
+ // after the real GC is in place.
+ void transferRangesAndRoots()
+ {
+ // Transfer all ranges
+ foreach (ref r; ranges)
+ {
+ // Range(p, p + sz, cast() ti)
+ gc_addRange(r.pbot, r.ptop - r.pbot, r.ti);
+ }
+
+ // Transfer all roots
+ foreach (ref r; roots)
+ {
+ gc_addRoot(r.proot);
+ }
+ }
+
+ this()
+ {
+ }
+
+ void Dtor()
+ {
+ }
+
+ void enable()
+ {
+ .gc_init_nothrow();
+ .gc_enable();
+ }
+
+ void disable()
+ {
+ .gc_init_nothrow();
+ .gc_disable();
+ }
+
+ void collect() nothrow
+ {
+ }
+
+ void collectNoStack() nothrow
+ {
+ }
+
+ void minimize() nothrow
+ {
+ }
+
+ uint getAttr(void* p) nothrow
+ {
+ return 0;
+ }
+
+ uint setAttr(void* p, uint mask) nothrow
+ {
+ return 0;
+ }
+
+ uint clrAttr(void* p, uint mask) nothrow
+ {
+ return 0;
+ }
+
+ void* malloc(size_t size, uint bits, const scope TypeInfo ti) nothrow
+ {
+ .gc_init_nothrow();
+ return .gc_malloc(size, bits, ti);
+ }
+
+ BlkInfo qalloc(size_t size, uint bits, const scope TypeInfo ti) nothrow
+ {
+ .gc_init_nothrow();
+ return .gc_qalloc(size, bits, ti);
+ }
+
+ void* calloc(size_t size, uint bits, const scope TypeInfo ti) nothrow
+ {
+ .gc_init_nothrow();
+ return .gc_calloc(size, bits, ti);
+ }
+
+ void* realloc(void* p, size_t size, uint bits, const scope TypeInfo ti) nothrow
+ {
+ .gc_init_nothrow();
+ return .gc_realloc(p, size, bits, ti);
+ }
+
+ size_t extend(void* p, size_t minsize, size_t maxsize, const scope TypeInfo ti) nothrow
+ {
+ return 0;
+ }
+
+ size_t reserve(size_t size) nothrow
+ {
+ .gc_init_nothrow();
+ return .gc_reserve(size);
+ }
+
+ void free(void* p) nothrow @nogc
+ {
+ if (p) assert(false, "Invalid memory deallocation");
+ }
+
+ void* addrOf(void* p) nothrow @nogc
+ {
+ return null;
+ }
+
+ size_t sizeOf(void* p) nothrow @nogc
+ {
+ return 0;
+ }
+
+ BlkInfo query(void* p) nothrow
+ {
+ return BlkInfo.init;
+ }
+
+ core.memory.GC.Stats stats() nothrow
+ {
+ return typeof(return).init;
+ }
+
+
+ core.memory.GC.ProfileStats profileStats() nothrow
+ {
+ return typeof(return).init;
+ }
+
+
+ void addRoot(void* p) nothrow @nogc
+ {
+ roots.insertBack(Root(p));
+ }
+
+ void removeRoot(void* p) nothrow @nogc
+ {
+ foreach (ref r; roots)
+ {
+ if (r is p)
+ {
+ r = roots.back;
+ roots.popBack();
+ return;
+ }
+ }
+ }
+
+ @property RootIterator rootIter() return @nogc
+ {
+ return &rootsApply;
+ }
+
+ private int rootsApply(scope int delegate(ref Root) nothrow dg)
+ {
+ foreach (ref r; roots)
+ {
+ if (auto result = dg(r))
+ return result;
+ }
+ return 0;
+ }
+
+ void addRange(void* p, size_t sz, const TypeInfo ti = null) nothrow @nogc
+ {
+ ranges.insertBack(Range(p, p + sz, cast() ti));
+ }
+
+ void removeRange(void* p) nothrow @nogc
+ {
+ foreach (ref r; ranges)
+ {
+ if (r.pbot is p)
+ {
+ r = ranges.back;
+ ranges.popBack();
+ return;
+ }
+ }
+ }
+
+ @property RangeIterator rangeIter() return @nogc
+ {
+ return &rangesApply;
+ }
+
+ private int rangesApply(scope int delegate(ref Range) nothrow dg)
+ {
+ foreach (ref r; ranges)
+ {
+ if (auto result = dg(r))
+ return result;
+ }
+ return 0;
+ }
+
+ void runFinalizers(const scope void[] segment) nothrow
+ {
+ }
+
+ bool inFinalizer() nothrow
+ {
+ return false;
+ }
+
+ ulong allocatedInCurrentThread() nothrow
+ {
+ return stats().allocatedInCurrentThread;
+ }
+}
diff --git a/libphobos/libdruntime/gc/os.d b/libphobos/libdruntime/core/internal/gc/os.d
index 337f134..ca4cbe2 100644
--- a/libphobos/libdruntime/gc/os.d
+++ b/libphobos/libdruntime/core/internal/gc/os.d
@@ -1,17 +1,11 @@
/**
* Contains OS-level routines needed by the garbage collector.
*
- * Copyright: Copyright Digital Mars 2005 - 2013.
- * License: $(WEB www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
+ * Copyright: D Language Foundation 2005 - 2021.
+ * License: $(HTTP www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
* Authors: Walter Bright, David Friedman, Sean Kelly, Leandro Lucarella
*/
-
-/* Copyright Digital Mars 2005 - 2013.
- * Distributed under the Boost Software License, Version 1.0.
- * (See accompanying file LICENSE or copy at
- * http://www.boost.org/LICENSE_1_0.txt)
- */
-module gc.os;
+module core.internal.gc.os;
version (Windows)
@@ -40,15 +34,53 @@ else version (Posix)
version = Darwin;
import core.sys.posix.sys.mman;
- version (FreeBSD) import core.sys.freebsd.sys.mman : MAP_ANON;
- version (DragonFlyBSD) import core.sys.dragonflybsd.sys.mman : MAP_ANON;
- version (NetBSD) import core.sys.netbsd.sys.mman : MAP_ANON;
- version (OpenBSD) import core.sys.openbsd.sys.mman : MAP_ANON;
- version (CRuntime_Glibc) import core.sys.linux.sys.mman : MAP_ANON;
- version (Darwin) import core.sys.darwin.sys.mman : MAP_ANON;
- version (CRuntime_UClibc) import core.sys.linux.sys.mman : MAP_ANON;
import core.stdc.stdlib;
+
+ /// Possible results for the wait_pid() function.
+ enum ChildStatus
+ {
+ done, /// The process has finished successfully
+ running, /// The process is still running
+ error /// There was an error waiting for the process
+ }
+
+ /**
+ * Wait for a process with PID pid to finish.
+ *
+ * If block is false, this function will not block, and return ChildStatus.running if
+ * the process is still running. Otherwise it will return always ChildStatus.done
+ * (unless there is an error, in which case ChildStatus.error is returned).
+ */
+ ChildStatus wait_pid(pid_t pid, bool block = true) nothrow @nogc
+ {
+ import core.exception : onForkError;
+
+ int status = void;
+ pid_t waited_pid = void;
+ // In the case where we are blocking, we need to consider signals
+ // arriving while we wait, and resume the waiting if EINTR is returned
+ do {
+ errno = 0;
+ waited_pid = waitpid(pid, &status, block ? 0 : WNOHANG);
+ }
+ while (waited_pid == -1 && errno == EINTR);
+ if (waited_pid == 0)
+ return ChildStatus.running;
+ else if (errno == ECHILD)
+ return ChildStatus.done; // someone called posix.syswait
+ else if (waited_pid != pid || status != 0)
+ {
+ onForkError();
+ return ChildStatus.error;
+ }
+ return ChildStatus.done;
+ }
+
+ public import core.sys.posix.unistd: pid_t, fork;
+ import core.sys.posix.sys.wait: waitpid, WNOHANG;
+ import core.stdc.errno: errno, EINTR, ECHILD;
+
//version = GC_Use_Alloc_MMap;
}
else
@@ -73,9 +105,18 @@ else static assert(false, "No supported allocation methods available.");
static if (is(typeof(VirtualAlloc))) // version (GC_Use_Alloc_Win32)
{
/**
+ * Indicates if an implementation supports fork().
+ *
+ * The value shown here is just demostrative, the real value is defined based
+ * on the OS it's being compiled in.
+ * enum HaveFork = true;
+ */
+ enum HaveFork = false;
+
+ /**
* Map memory.
*/
- void *os_mem_map(size_t nbytes) nothrow
+ void *os_mem_map(size_t nbytes) nothrow @nogc
{
return VirtualAlloc(null, nbytes, MEM_RESERVE | MEM_COMMIT,
PAGE_READWRITE);
@@ -88,35 +129,40 @@ static if (is(typeof(VirtualAlloc))) // version (GC_Use_Alloc_Win32)
* 0 success
* !=0 failure
*/
- int os_mem_unmap(void *base, size_t nbytes) nothrow
+ int os_mem_unmap(void *base, size_t nbytes) nothrow @nogc
{
return cast(int)(VirtualFree(base, 0, MEM_RELEASE) == 0);
}
}
else static if (is(typeof(mmap))) // else version (GC_Use_Alloc_MMap)
{
- void *os_mem_map(size_t nbytes) nothrow
+ enum HaveFork = true;
+
+ void *os_mem_map(size_t nbytes, bool share = false) nothrow @nogc
{ void *p;
- p = mmap(null, nbytes, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
+ auto map_f = share ? MAP_SHARED : MAP_PRIVATE;
+ p = mmap(null, nbytes, PROT_READ | PROT_WRITE, map_f | MAP_ANON, -1, 0);
return (p == MAP_FAILED) ? null : p;
}
- int os_mem_unmap(void *base, size_t nbytes) nothrow
+ int os_mem_unmap(void *base, size_t nbytes) nothrow @nogc
{
return munmap(base, nbytes);
}
}
else static if (is(typeof(valloc))) // else version (GC_Use_Alloc_Valloc)
{
- void *os_mem_map(size_t nbytes) nothrow
+ enum HaveFork = false;
+
+ void *os_mem_map(size_t nbytes) nothrow @nogc
{
return valloc(nbytes);
}
- int os_mem_unmap(void *base, size_t nbytes) nothrow
+ int os_mem_unmap(void *base, size_t nbytes) nothrow @nogc
{
free(base);
return 0;
@@ -129,23 +175,26 @@ else static if (is(typeof(malloc))) // else version (GC_Use_Alloc_Malloc)
// to PAGESIZE alignment, there will be space for a void* at the end
// after PAGESIZE bytes used by the GC.
+ enum HaveFork = false;
- import gc.gc;
+ import core.internal.gc.impl.conservative.gc;
const size_t PAGE_MASK = PAGESIZE - 1;
- void *os_mem_map(size_t nbytes) nothrow
+ void *os_mem_map(size_t nbytes) nothrow @nogc
{ byte *p, q;
p = cast(byte *) malloc(nbytes + PAGESIZE);
+ if (!p)
+ return null;
q = p + ((PAGESIZE - ((cast(size_t) p & PAGE_MASK))) & PAGE_MASK);
* cast(void**)(q + nbytes) = p;
return q;
}
- int os_mem_unmap(void *base, size_t nbytes) nothrow
+ int os_mem_unmap(void *base, size_t nbytes) nothrow @nogc
{
free( *cast(void**)( cast(byte*) base + nbytes ) );
return 0;
@@ -212,3 +261,48 @@ else
}
}
}
+
+/**
+ Get the size of available physical memory
+
+ Returns:
+ size of installed physical RAM
+*/
+version (Windows)
+{
+ ulong os_physical_mem() nothrow @nogc
+ {
+ import core.sys.windows.winbase : GlobalMemoryStatus, MEMORYSTATUS;
+ MEMORYSTATUS stat;
+ GlobalMemoryStatus(&stat);
+ return stat.dwTotalPhys; // limited to 4GB for Win32
+ }
+}
+else version (Darwin)
+{
+ extern (C) int sysctl(const int* name, uint namelen, void* oldp, size_t* oldlenp, const void* newp, size_t newlen) @nogc nothrow;
+ ulong os_physical_mem() nothrow @nogc
+ {
+ enum
+ {
+ CTL_HW = 6,
+ HW_MEMSIZE = 24,
+ }
+ int[2] mib = [ CTL_HW, HW_MEMSIZE ];
+ ulong system_memory_bytes;
+ size_t len = system_memory_bytes.sizeof;
+ if (sysctl(mib.ptr, 2, &system_memory_bytes, &len, null, 0) != 0)
+ return 0;
+ return system_memory_bytes;
+ }
+}
+else version (Posix)
+{
+ ulong os_physical_mem() nothrow @nogc
+ {
+ import core.sys.posix.unistd : sysconf, _SC_PAGESIZE, _SC_PHYS_PAGES;
+ const pageSize = sysconf(_SC_PAGESIZE);
+ const pages = sysconf(_SC_PHYS_PAGES);
+ return pageSize * pages;
+ }
+}
diff --git a/libphobos/libdruntime/gc/pooltable.d b/libphobos/libdruntime/core/internal/gc/pooltable.d
index cfbe465..5924f9c 100644
--- a/libphobos/libdruntime/gc/pooltable.d
+++ b/libphobos/libdruntime/core/internal/gc/pooltable.d
@@ -1,11 +1,11 @@
/**
* A sorted array to quickly lookup pools.
*
- * Copyright: Copyright Digital Mars 2001 -.
- * License: $(WEB www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
+ * Copyright: D Language Foundation 2001 - 2021
+ * License: $(HTTP www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
* Authors: Walter Bright, David Friedman, Sean Kelly, Martin Nowak
*/
-module gc.pooltable;
+module core.internal.gc.pooltable;
static import cstdlib=core.stdc.stdlib;
@@ -42,6 +42,9 @@ nothrow:
++npools;
+ foreach (idx; i .. npools)
+ pools[idx].ptIndex = idx;
+
_minAddr = pools[0].baseAddr;
_maxAddr = pools[npools - 1].topAddr;
@@ -55,14 +58,14 @@ nothrow:
ref inout(Pool*) opIndex(size_t idx) inout pure
in { assert(idx < length); }
- body
+ do
{
return pools[idx];
}
inout(Pool*)[] opSlice(size_t a, size_t b) inout pure
in { assert(a <= length && b <= length); }
- body
+ do
{
return pools[a .. b];
}
@@ -123,7 +126,11 @@ nothrow:
for (; j < npools; ++j)
{
if (!pools[j].isFree) // keep
- swap(pools[i++], pools[j]);
+ {
+ swap(pools[i], pools[j]);
+ pools[i].ptIndex = i;
+ ++i;
+ }
}
// npooltable[0 .. i] => used pools
// npooltable[i .. npools] => free pools
@@ -148,6 +155,9 @@ nothrow:
{
if (!npools) return;
+ foreach (i; 0 .. npools)
+ assert(pools[i].ptIndex == i);
+
foreach (i, pool; pools[0 .. npools - 1])
assert(pool.baseAddr < pools[i + 1].baseAddr);
@@ -173,7 +183,7 @@ unittest
static struct MockPool
{
byte* baseAddr, topAddr;
- size_t freepages, npages;
+ size_t freepages, npages, ptIndex;
@property bool isFree() const pure nothrow { return freepages == npages; }
}
PoolTable!MockPool pooltable;
diff --git a/libphobos/libdruntime/core/internal/gc/proxy.d b/libphobos/libdruntime/core/internal/gc/proxy.d
new file mode 100644
index 0000000..2c89472
--- /dev/null
+++ b/libphobos/libdruntime/core/internal/gc/proxy.d
@@ -0,0 +1,296 @@
+/**
+ * Contains the external GC interface.
+ *
+ * Copyright: D Language Foundation 2005 - 2021.
+ * License: $(HTTP www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
+ * Authors: Walter Bright, Sean Kelly
+ */
+module core.internal.gc.proxy;
+
+import core.internal.gc.impl.proto.gc;
+import core.gc.config;
+import core.gc.gcinterface;
+import core.gc.registry : createGCInstance;
+
+static import core.memory;
+
+private
+{
+ static import core.memory;
+ alias BlkInfo = core.memory.GC.BlkInfo;
+
+ import core.internal.spinlock;
+ static SpinLock instanceLock;
+
+ __gshared bool isInstanceInit = false;
+ __gshared GC _instance = new ProtoGC();
+ __gshared GC proxiedGC; // used to iterate roots of Windows DLLs
+
+ pragma (inline, true) @trusted @nogc nothrow
+ GC instance() { return _instance; }
+}
+
+extern (C)
+{
+ import core.attribute : weak;
+
+ // do not import GC modules, they might add a dependency to this whole module
+ void _d_register_conservative_gc();
+ void _d_register_manual_gc();
+
+ // if you don't want to include the default GCs, replace during link by another implementation
+ void* register_default_gcs() @weak
+ {
+ pragma(inline, false);
+ // do not call, they register implicitly through pragma(crt_constructor)
+ // avoid being optimized away
+ auto reg1 = &_d_register_conservative_gc;
+ auto reg2 = &_d_register_manual_gc;
+ return reg1 < reg2 ? reg1 : reg2;
+ }
+
+ void gc_init()
+ {
+ instanceLock.lock();
+ if (!isInstanceInit)
+ {
+ register_default_gcs();
+ config.initialize();
+ auto protoInstance = instance;
+ auto newInstance = createGCInstance(config.gc);
+ if (newInstance is null)
+ {
+ import core.stdc.stdio : fprintf, stderr;
+ import core.stdc.stdlib : exit;
+
+ fprintf(stderr, "No GC was initialized, please recheck the name of the selected GC ('%.*s').\n", cast(int)config.gc.length, config.gc.ptr);
+ instanceLock.unlock();
+ exit(1);
+
+ // Shouldn't get here.
+ assert(0);
+ }
+ _instance = newInstance;
+ // Transfer all ranges and roots to the real GC.
+ (cast(ProtoGC) protoInstance).transferRangesAndRoots();
+ isInstanceInit = true;
+ }
+ instanceLock.unlock();
+ }
+
+ void gc_init_nothrow() nothrow
+ {
+ scope(failure)
+ {
+ import core.internal.abort;
+ abort("Cannot initialize the garbage collector.\n");
+ assert(0);
+ }
+ gc_init();
+ }
+
+ void gc_term()
+ {
+ if (isInstanceInit)
+ {
+ switch (config.cleanup)
+ {
+ default:
+ import core.stdc.stdio : fprintf, stderr;
+ fprintf(stderr, "Unknown GC cleanup method, please recheck ('%.*s').\n",
+ cast(int)config.cleanup.length, config.cleanup.ptr);
+ break;
+ case "none":
+ break;
+ case "collect":
+ // NOTE: There may be daemons threads still running when this routine is
+ // called. If so, cleaning memory out from under then is a good
+ // way to make them crash horribly. This probably doesn't matter
+ // much since the app is supposed to be shutting down anyway, but
+ // I'm disabling cleanup for now until I can think about it some
+ // more.
+ //
+ // NOTE: Due to popular demand, this has been re-enabled. It still has
+ // the problems mentioned above though, so I guess we'll see.
+
+ instance.collectNoStack(); // not really a 'collect all' -- still scans
+ // static data area, roots, and ranges.
+ break;
+ case "finalize":
+ instance.runFinalizers((cast(ubyte*)null)[0 .. size_t.max]);
+ break;
+ }
+ destroy(instance);
+ }
+ }
+
+ void gc_enable()
+ {
+ instance.enable();
+ }
+
+ void gc_disable()
+ {
+ instance.disable();
+ }
+
+ void gc_collect() nothrow
+ {
+ instance.collect();
+ }
+
+ void gc_minimize() nothrow
+ {
+ instance.minimize();
+ }
+
+ uint gc_getAttr( void* p ) nothrow
+ {
+ return instance.getAttr(p);
+ }
+
+ uint gc_setAttr( void* p, uint a ) nothrow
+ {
+ return instance.setAttr(p, a);
+ }
+
+ uint gc_clrAttr( void* p, uint a ) nothrow
+ {
+ return instance.clrAttr(p, a);
+ }
+
+ void* gc_malloc( size_t sz, uint ba = 0, const scope TypeInfo ti = null ) nothrow
+ {
+ return instance.malloc(sz, ba, ti);
+ }
+
+ BlkInfo gc_qalloc( size_t sz, uint ba = 0, const scope TypeInfo ti = null ) nothrow
+ {
+ return instance.qalloc( sz, ba, ti );
+ }
+
+ void* gc_calloc( size_t sz, uint ba = 0, const scope TypeInfo ti = null ) nothrow
+ {
+ return instance.calloc( sz, ba, ti );
+ }
+
+ void* gc_realloc( void* p, size_t sz, uint ba = 0, const scope TypeInfo ti = null ) nothrow
+ {
+ return instance.realloc( p, sz, ba, ti );
+ }
+
+ size_t gc_extend( void* p, size_t mx, size_t sz, const scope TypeInfo ti = null ) nothrow
+ {
+ return instance.extend( p, mx, sz,ti );
+ }
+
+ size_t gc_reserve( size_t sz ) nothrow
+ {
+ return instance.reserve( sz );
+ }
+
+ void gc_free( void* p ) nothrow @nogc
+ {
+ return instance.free( p );
+ }
+
+ void* gc_addrOf( void* p ) nothrow @nogc
+ {
+ return instance.addrOf( p );
+ }
+
+ size_t gc_sizeOf( void* p ) nothrow @nogc
+ {
+ return instance.sizeOf( p );
+ }
+
+ BlkInfo gc_query( void* p ) nothrow
+ {
+ return instance.query( p );
+ }
+
+ core.memory.GC.Stats gc_stats() nothrow
+ {
+ return instance.stats();
+ }
+
+ core.memory.GC.ProfileStats gc_profileStats() nothrow @safe
+ {
+ return instance.profileStats();
+ }
+
+ void gc_addRoot( void* p ) nothrow @nogc
+ {
+ return instance.addRoot( p );
+ }
+
+ void gc_addRange( void* p, size_t sz, const TypeInfo ti = null ) nothrow @nogc
+ {
+ return instance.addRange( p, sz, ti );
+ }
+
+ void gc_removeRoot( void* p ) nothrow
+ {
+ return instance.removeRoot( p );
+ }
+
+ void gc_removeRange( void* p ) nothrow
+ {
+ return instance.removeRange( p );
+ }
+
+ void gc_runFinalizers(const scope void[] segment ) nothrow
+ {
+ return instance.runFinalizers( segment );
+ }
+
+ bool gc_inFinalizer() nothrow @nogc @safe
+ {
+ return instance.inFinalizer();
+ }
+
+ ulong gc_allocatedInCurrentThread() nothrow
+ {
+ return instance.allocatedInCurrentThread();
+ }
+
+ GC gc_getProxy() nothrow
+ {
+ return instance;
+ }
+
+ export
+ {
+ void gc_setProxy( GC proxy )
+ {
+ foreach (root; instance.rootIter)
+ {
+ proxy.addRoot(root);
+ }
+
+ foreach (range; instance.rangeIter)
+ {
+ proxy.addRange(range.pbot, range.ptop - range.pbot, range.ti);
+ }
+
+ proxiedGC = instance; // remember initial GC to later remove roots
+ _instance = proxy;
+ }
+
+ void gc_clrProxy()
+ {
+ foreach (root; proxiedGC.rootIter)
+ {
+ instance.removeRoot(root);
+ }
+
+ foreach (range; proxiedGC.rangeIter)
+ {
+ instance.removeRange(range);
+ }
+
+ _instance = proxiedGC;
+ proxiedGC = null;
+ }
+ }
+}
diff --git a/libphobos/libdruntime/core/internal/hash.d b/libphobos/libdruntime/core/internal/hash.d
index 8d0067e..e999f0c 100644
--- a/libphobos/libdruntime/core/internal/hash.d
+++ b/libphobos/libdruntime/core/internal/hash.d
@@ -3,14 +3,13 @@
* This module provides functions to uniform calculating hash values for different types
*
* Copyright: Copyright Igor Stepanov 2013-2013.
- * License: $(WEB www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
+ * License: $(HTTP www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
* Authors: Igor Stepanov
* Source: $(DRUNTIMESRC core/internal/_hash.d)
*/
module core.internal.hash;
-import core.internal.convert;
-import core.internal.traits : allSatisfy;
+import core.internal.traits : Unconst;
// If true ensure that positive zero and negative zero have the same hash.
// Historically typeid(float).getHash did this but hashOf(float) did not.
@@ -57,6 +56,15 @@ private enum isFinalClassWithAddressBasedHash(T) = __traits(isFinalClass, T)
static assert(!isFinalClassWithAddressBasedHash!C3);
}
+private template isCppClassWithoutHash(T)
+{
+ static if (!is(T == class) && !is(T == interface))
+ enum isCppClassWithoutHash = false;
+ else
+ enum bool isCppClassWithoutHash = __traits(getLinkage, T) == "C++"
+ && !is(immutable T* : immutable Object*) && !hasCallableToHash!T;
+}
+
/+
Is it valid to calculate a hash code for T based on the bits of its
representation? Always false for interfaces, dynamic arrays, and
@@ -80,18 +88,21 @@ private template canBitwiseHash(T)
enum canBitwiseHash = true;
else static if (is(T == class))
{
- enum canBitwiseHash = isFinalClassWithAddressBasedHash!T;
+ enum canBitwiseHash = isFinalClassWithAddressBasedHash!T || isCppClassWithoutHash!T;
}
else static if (is(T == interface))
{
- enum canBitwiseHash = false;
+ enum canBitwiseHash = isCppClassWithoutHash!T;
}
else static if (is(T == struct))
{
static if (hasCallableToHash!T || __traits(isNested, T))
enum canBitwiseHash = false;
else
+ {
+ import core.internal.traits : allSatisfy;
enum canBitwiseHash = allSatisfy!(.canBitwiseHash, typeof(T.tupleof));
+ }
}
else static if (is(T == union))
{
@@ -117,107 +128,19 @@ private template canBitwiseHash(T)
}
}
-// Overly restrictive for simplicity: has false negatives but no false positives.
-private template useScopeConstPassByValue(T)
-{
- static if (__traits(isScalar, T))
- enum useScopeConstPassByValue = true;
- else static if (is(T == class) || is(T == interface))
- // Overly restrictive for simplicity.
- enum useScopeConstPassByValue = isFinalClassWithAddressBasedHash!T;
- else static if (is(T == struct) || is(T == union))
- {
- // Overly restrictive for simplicity.
- enum useScopeConstPassByValue = T.sizeof <= (int[]).sizeof &&
- __traits(isPOD, T) && // "isPOD" just to check there's no dtor or postblit.
- canBitwiseHash!T; // We can't verify toHash doesn't leak.
- }
- else static if (is(T : E[], E))
- {
- static if (!__traits(isStaticArray, T))
- // Overly restrictive for simplicity.
- enum useScopeConstPassByValue = .useScopeConstPassByValue!E;
- else static if (T.length == 0)
- enum useScopeConstPassByValue = true;
- else
- enum useScopeConstPassByValue = T.sizeof <= (uint[]).sizeof
- && .useScopeConstPassByValue!(typeof(T.init[0]));
- }
- else static if (is(T : V[K], K, V))
- {
- // Overly restrictive for simplicity.
- enum useScopeConstPassByValue = .useScopeConstPassByValue!K
- && .useScopeConstPassByValue!V;
- }
- else
- {
- static assert(is(T == delegate) || is(T : void) || is(T : typeof(null)),
- "Internal error: unanticipated type "~T.stringof);
- enum useScopeConstPassByValue = true;
- }
-}
-
-@safe unittest
-{
- static assert(useScopeConstPassByValue!int);
- static assert(useScopeConstPassByValue!string);
-
- static int ctr;
- static struct S1 { ~this() { ctr++; } }
- static struct S2 { this(this) { ctr++; } }
- static assert(!useScopeConstPassByValue!S1,
- "Don't default pass by value a struct with a non-vacuous destructor.");
- static assert(!useScopeConstPassByValue!S2,
- "Don't default pass by value a struct with a non-vacuous postblit.");
-}
-
-//enum hash. CTFE depends on base type
-size_t hashOf(T)(scope const T val)
-if (is(T EType == enum) && useScopeConstPassByValue!EType)
-{
- static if (is(T EType == enum)) //for EType
- {
- return hashOf(cast(const EType) val);
- }
- else
- {
- static assert(0);
- }
-}
-
-//enum hash. CTFE depends on base type
-size_t hashOf(T)(scope const T val, size_t seed)
-if (is(T EType == enum) && useScopeConstPassByValue!EType)
-{
- static if (is(T EType == enum)) //for EType
- {
- return hashOf(cast(const EType) val, seed);
- }
- else
- {
- static assert(0);
- }
-}
-
//enum hash. CTFE depends on base type
size_t hashOf(T)(auto ref T val, size_t seed = 0)
-if (is(T EType == enum) && !useScopeConstPassByValue!EType)
+if (is(T == enum) && !__traits(isScalar, T))
{
- static if (is(T EType == enum)) //for EType
- {
- EType e_val = cast(EType)val;
- return hashOf(e_val, seed);
- }
- else
- {
- static assert(0);
- }
+ static if (is(T EType == enum)) {} //for EType
+ return hashOf(cast(EType) val, seed);
}
//CTFE ready (depends on base type).
size_t hashOf(T)(scope const auto ref T val, size_t seed = 0)
if (!is(T == enum) && __traits(isStaticArray, T) && canBitwiseHash!T)
{
+ import core.internal.convert : toUbyte;
// FIXME:
// We would like to to do this:
//
@@ -268,10 +191,9 @@ if (!is(T == enum) && __traits(isStaticArray, T) && !canBitwiseHash!T)
//dynamic array hash
size_t hashOf(T)(scope const T val, size_t seed = 0)
-if (!is(T == enum) && !is(T : typeof(null)) && is(T S: S[]) && !__traits(isStaticArray, T)
- && !is(T == struct) && !is(T == class) && !is(T == union)
- && (__traits(isScalar, S) || canBitwiseHash!S))
+if (is(T == S[], S) && (__traits(isScalar, S) || canBitwiseHash!S)) // excludes enum types
{
+ import core.internal.convert : toUbyte;
alias ElementType = typeof(val[0]);
static if (!canBitwiseHash!ElementType)
{
@@ -296,9 +218,7 @@ if (!is(T == enum) && !is(T : typeof(null)) && is(T S: S[]) && !__traits(isStati
//dynamic array hash
size_t hashOf(T)(T val, size_t seed = 0)
-if (!is(T == enum) && !is(T : typeof(null)) && is(T S: S[]) && !__traits(isStaticArray, T)
- && !is(T == struct) && !is(T == class) && !is(T == union)
- && !(__traits(isScalar, S) || canBitwiseHash!S))
+if (is(T == S[], S) && !(__traits(isScalar, S) || canBitwiseHash!S)) // excludes enum types
{
size_t hash = seed;
foreach (ref o; val)
@@ -308,117 +228,148 @@ if (!is(T == enum) && !is(T : typeof(null)) && is(T S: S[]) && !__traits(isStati
return hash;
}
-//arithmetic type hash
-@trusted @nogc nothrow pure
-size_t hashOf(T)(scope const T val) if (!is(T == enum) && __traits(isArithmetic, T)
- && __traits(isIntegral, T) && T.sizeof <= size_t.sizeof && !is(T == __vector))
+// Indicates if F is a built-in complex number type.
+private F coalesceFloat(F)(const F val)
+if (__traits(isFloating, val) && !is(F == __vector) && !is(F : creal))
{
+ static if (floatCoalesceZeroes)
+ if (val == cast(F) 0)
+ return cast(F) 0;
+ static if (floatCoalesceNaNs)
+ if (val != val)
+ return F.nan;
return val;
}
-//arithmetic type hash
+//scalar type hash
@trusted @nogc nothrow pure
-size_t hashOf(T)(scope const T val, size_t seed) if (!is(T == enum) && __traits(isArithmetic, T)
- && __traits(isIntegral, T) && T.sizeof <= size_t.sizeof && !is(T == __vector))
+size_t hashOf(T)(scope const T val) if (__traits(isScalar, T) && !is(T == __vector))
{
- static if (size_t.sizeof < ulong.sizeof)
+ static if (is(T V : V*))
{
- //MurmurHash3 32-bit single round
- enum uint c1 = 0xcc9e2d51;
- enum uint c2 = 0x1b873593;
- enum uint c3 = 0xe6546b64;
- enum uint r1 = 15;
- enum uint r2 = 13;
+ if (__ctfe)
+ {
+ if (val is null) return 0;
+ assert(0, "Unable to calculate hash of non-null pointer at compile time");
+ }
+ size_t result = cast(size_t) val;
+ return result ^ (result >> 4);
+ }
+ else static if (is(T EType == enum) && is(typeof(val[0])))
+ {
+ // Enum type whose base type is vector.
+ return hashOf(cast(EType) val);
+ }
+ else static if (__traits(isIntegral, T))
+ {
+ static if (T.sizeof <= size_t.sizeof)
+ return val;
+ else
+ return cast(size_t) (val ^ (val >>> (size_t.sizeof * 8)));
+ }
+ else static if (is(T : creal))
+ {
+ return hashOf(coalesceFloat(val.re), hashOf(coalesceFloat(val.im)));
}
else
{
- //Half of MurmurHash3 64-bit single round
- //(omits second interleaved update)
- enum ulong c1 = 0x87c37b91114253d5;
- enum ulong c2 = 0x4cf5ad432745937f;
- enum ulong c3 = 0x52dce729;
- enum uint r1 = 31;
- enum uint r2 = 27;
+ static assert(__traits(isFloating, T));
+ auto data = coalesceFloat(val);
+ static if (T.sizeof == float.sizeof && T.mant_dig == float.mant_dig)
+ return *cast(const uint*) &data;
+ else static if (T.sizeof == double.sizeof && T.mant_dig == double.mant_dig)
+ return hashOf(*cast(const ulong*) &data);
+ else
+ {
+ import core.internal.convert : floatSize, toUbyte;
+ return bytesHashWithExactSizeAndAlignment!T(toUbyte(data)[0 .. floatSize!T], 0);
+ }
}
- size_t h = c1 * val;
- h = (h << r1) | (h >>> (size_t.sizeof * 8 - r1));
- h = (h * c2) ^ seed;
- h = (h << r2) | (h >>> (size_t.sizeof * 8 - r2));
- return h * 5 + c3;
}
-//arithmetic type hash
+//scalar type hash
@trusted @nogc nothrow pure
-size_t hashOf(T)(scope const T val, size_t seed = 0) if (!is(T == enum) && __traits(isArithmetic, T)
- && (!__traits(isIntegral, T) || T.sizeof > size_t.sizeof) && !is(T == __vector))
+size_t hashOf(T)(scope const T val, size_t seed) if (__traits(isScalar, T) && !is(T == __vector))
{
- static if (__traits(isFloating, val))
+ static if (is(T V : V*))
{
- import core.internal.convert : floatSize;
-
- static if (floatCoalesceZeroes || floatCoalesceNaNs)
+ if (__ctfe)
{
- import core.internal.traits : Unqual;
- Unqual!T data = val;
- // +0.0 and -0.0 become the same.
- static if (floatCoalesceZeroes && is(typeof(data = 0)))
- if (data == 0) data = 0;
- static if (floatCoalesceZeroes && is(typeof(data = 0.0i)))
- if (data == 0.0i) data = 0.0i;
- static if (floatCoalesceZeroes && is(typeof(data = 0.0 + 0.0i)))
- {
- if (data.re == 0.0) data = 0.0 + (data.im * 1.0i);
- if (data.im == 0.0i) data = data.re + 0.0i;
- }
- static if (floatCoalesceNaNs)
- if (data != data) data = T.nan; // All NaN patterns become the same.
+ if (val is null) return hashOf(size_t(0), seed);
+ assert(0, "Unable to calculate hash of non-null pointer at compile time");
+ }
+ return hashOf(cast(size_t) val, seed);
+ }
+ else static if (is(T EType == enum) && is(typeof(val[0])))
+ {
+ // Enum type whose base type is vector.
+ return hashOf(cast(EType) val, seed);
+ }
+ else static if (__traits(isIntegral, val) && T.sizeof <= size_t.sizeof)
+ {
+ static if (size_t.sizeof < ulong.sizeof)
+ {
+ //MurmurHash3 32-bit single round
+ enum uint c1 = 0xcc9e2d51;
+ enum uint c2 = 0x1b873593;
+ enum uint c3 = 0xe6546b64;
+ enum uint r1 = 15;
+ enum uint r2 = 13;
}
else
{
- alias data = val;
+ //Half of MurmurHash3 64-bit single round
+ //(omits second interleaved update)
+ enum ulong c1 = 0x87c37b91114253d5;
+ enum ulong c2 = 0x4cf5ad432745937f;
+ enum ulong c3 = 0x52dce729;
+ enum uint r1 = 31;
+ enum uint r2 = 27;
}
-
- static if (T.mant_dig == float.mant_dig && T.sizeof == uint.sizeof)
+ size_t h = c1 * val;
+ h = (h << r1) | (h >>> (size_t.sizeof * 8 - r1));
+ h = (h * c2) ^ seed;
+ h = (h << r2) | (h >>> (size_t.sizeof * 8 - r2));
+ return h * 5 + c3;
+ }
+ else static if (__traits(isIntegral, val) && T.sizeof > size_t.sizeof)
+ {
+ static foreach (i; 0 .. T.sizeof / size_t.sizeof)
+ seed = hashOf(cast(size_t) (val >>> (size_t.sizeof * 8 * i)), seed);
+ return seed;
+ }
+ else static if (is(T : creal))
+ {
+ return hashOf(val.re, hashOf(val.im, seed));
+ }
+ else static if (__traits(isFloating, T))
+ {
+ auto data = coalesceFloat(val);
+ static if (T.sizeof == float.sizeof && T.mant_dig == float.mant_dig)
return hashOf(*cast(const uint*) &data, seed);
- else static if (T.mant_dig == double.mant_dig && T.sizeof == ulong.sizeof)
+ else static if (T.sizeof == double.sizeof && T.mant_dig == double.mant_dig)
return hashOf(*cast(const ulong*) &data, seed);
else
{
- static if (is(T : creal) && T.sizeof != 2 * floatSize!(typeof(T.re)))
- {
- auto h1 = hashOf(data.re);
- return hashOf(data.im, h1);
- }
- else static if (is(T : real) || is(T : ireal))
- {
- // Ignore trailing padding
- auto bytes = toUbyte(data)[0 .. floatSize!T];
- return bytesHashWithExactSizeAndAlignment!T(bytes, seed);
- }
- else
- {
- return bytesHashWithExactSizeAndAlignment!T(toUbyte(data), seed);
- }
+ import core.internal.convert : floatSize, toUbyte;
+ return bytesHashWithExactSizeAndAlignment!T(toUbyte(data)[0 .. floatSize!T], seed);
}
}
else
{
- static assert(T.sizeof > size_t.sizeof && __traits(isIntegral, T));
- foreach (i; 0 .. T.sizeof / size_t.sizeof)
- seed = hashOf(cast(size_t) (val >>> (size_t.sizeof * 8 * i)), seed);
- return seed;
+ static assert(0);
}
}
-size_t hashOf(T)(scope const auto ref T val, size_t seed = 0) @safe @nogc nothrow pure
-if (is(T == __vector) && !is(T == enum))
+size_t hashOf(T)(scope const T val, size_t seed = 0) @safe @nogc nothrow pure
+if (is(T == __vector)) // excludes enum types
{
static if (__traits(isFloating, T) && (floatCoalesceZeroes || floatCoalesceNaNs))
{
if (__ctfe)
{
// Workaround for CTFE bug.
- alias E = Unqual!(typeof(val[0]));
+ static if (is(immutable typeof(val[0]) == immutable E, E)) {} // Get E.
E[T.sizeof / E.sizeof] array;
foreach (i; 0 .. T.sizeof / E.sizeof)
array[i] = val[i];
@@ -428,6 +379,7 @@ if (is(T == __vector) && !is(T == enum))
}
else
{
+ import core.internal.convert : toUbyte;
return bytesHashAlignedBy!T(toUbyte(val), seed);
}
}
@@ -446,65 +398,48 @@ size_t hashOf(T)(scope const T val, size_t seed) if (!is(T == enum) && is(T : ty
return hashOf(size_t(0), seed);
}
-//Pointers hash. CTFE unsupported if not null
-@trusted @nogc nothrow pure
-size_t hashOf(T)(scope const T val)
-if (!is(T == enum) && is(T V : V*) && !is(T : typeof(null))
- && !is(T == struct) && !is(T == class) && !is(T == union))
-{
- if (__ctfe)
- {
- if (val is null)
- {
- return 0;
- }
- else
- {
- assert(0, "Unable to calculate hash of non-null pointer at compile time");
- }
-
- }
- auto addr = cast(size_t) val;
- return addr ^ (addr >>> 4);
-}
-
-//Pointers hash. CTFE unsupported if not null
-@trusted @nogc nothrow pure
-size_t hashOf(T)(scope const T val, size_t seed)
-if (!is(T == enum) && is(T V : V*) && !is(T : typeof(null))
- && !is(T == struct) && !is(T == class) && !is(T == union))
-{
- if (__ctfe)
- {
- if (val is null)
- {
- return hashOf(cast(size_t)0, seed);
- }
- else
- {
- assert(0, "Unable to calculate hash of non-null pointer at compile time");
- }
-
- }
- return hashOf(cast(size_t)val, seed);
-}
-
private enum _hashOfStruct =
q{
enum bool isChained = is(typeof(seed) : size_t);
static if (!isChained) enum size_t seed = 0;
- static if (hasCallableToHash!T) //CTFE depends on toHash()
+ static if (hasCallableToHash!(typeof(val))) //CTFE depends on toHash()
{
- static if (isChained)
- return hashOf(cast(size_t) val.toHash(), seed);
+ static if (!__traits(isSame, typeof(val), __traits(parent, val.toHash))
+ && is(typeof(val is null)))
+ {
+ static if (isChained)
+ return hashOf(__traits(getMember, val, __traits(getAliasThis, typeof(val))), seed);
+ else
+ return hashOf(__traits(getMember, val, __traits(getAliasThis, typeof(val))));
+ }
else
- return val.toHash();
+ {
+ static if (isChained)
+ return hashOf(cast(size_t) val.toHash(), seed);
+ else
+ return val.toHash();
+ }
}
else
{
+ import core.internal.convert : toUbyte;
static if (__traits(hasMember, T, "toHash") && is(typeof(T.toHash) == function))
{
- pragma(msg, "Warning: struct "~__traits(identifier, T)~" has method toHash, however it cannot be called with "~T.stringof~" this.");
+ // TODO: in the future maybe this should be changed to a static
+ // assert(0), because if there's a `toHash` the programmer probably
+ // expected it to be called and a compilation failure here will
+ // expose a bug in his code.
+ // In the future we also might want to disallow non-const toHash
+ // altogether.
+ pragma(msg, "Warning: struct "~__traits(identifier, T)
+ ~" has method toHash, however it cannot be called with "
+ ~typeof(val).stringof~" this.");
+ static if (__traits(compiles, __traits(getLocation, T.toHash)))
+ {
+ enum file = __traits(getLocation, T.toHash)[0];
+ enum line = __traits(getLocation, T.toHash)[1].stringof;
+ pragma(msg, " ",__traits(identifier, T),".toHash defined here: ",file,"(",line,")");
+ }
}
static if (T.tupleof.length == 0)
@@ -513,13 +448,12 @@ q{
}
else static if ((is(T == struct) && !canBitwiseHash!T) || T.tupleof.length == 1)
{
- size_t h = void;
- static if (isChained) h = seed;
- foreach (i, F; typeof(val.tupleof))
+ static if (isChained) size_t h = seed;
+ static foreach (i, F; typeof(val.tupleof))
{
static if (__traits(isStaticArray, F))
{
- static if (i == 0 && !isChained) h = 0;
+ static if (i == 0 && !isChained) size_t h = 0;
static if (F.sizeof > 0 && canBitwiseHash!F)
// May use smallBytesHash instead of bytesHash.
h = bytesHashWithExactSizeAndAlignment!F(toUbyte(val.tupleof[i]), h);
@@ -533,30 +467,41 @@ q{
{
static if (hasCallableToHash!F)
{
- static if (i == 0 && !isChained)
- h = val.tupleof[i].toHash();
+ static if (!__traits(isSame, F, __traits(parent, val.tupleof[i].toHash))
+ && is(typeof(val.tupleof[i] is null)))
+ {
+ static if (i == 0 && !isChained)
+ size_t h = hashOf(__traits(getMember, val.tupleof[i], __traits(getAliasThis, F)));
+ else
+ h = hashOf(__traits(getMember, val.tupleof[i], __traits(getAliasThis, F)), h);
+ }
else
- h = hashOf(cast(size_t) val.tupleof[i].toHash(), h);
+ {
+ static if (i == 0 && !isChained)
+ size_t h = val.tupleof[i].toHash();
+ else
+ h = hashOf(cast(size_t) val.tupleof[i].toHash(), h);
+ }
}
else static if (F.tupleof.length == 1)
{
// Handle the single member case separately to avoid unnecessarily using bytesHash.
static if (i == 0 && !isChained)
- h = hashOf(val.tupleof[i].tupleof[0]);
+ size_t h = hashOf(val.tupleof[i].tupleof[0]);
else
h = hashOf(val.tupleof[i].tupleof[0], h);
}
else static if (canBitwiseHash!F)
{
// May use smallBytesHash instead of bytesHash.
- static if (i == 0 && !isChained) h = 0;
+ static if (i == 0 && !isChained) size_t h = 0;
h = bytesHashWithExactSizeAndAlignment!F(toUbyte(val.tupleof[i]), h);
}
else
{
// Nothing special happening.
static if (i == 0 && !isChained)
- h = hashOf(val.tupleof[i]);
+ size_t h = hashOf(val.tupleof[i]);
else
h = hashOf(val.tupleof[i], h);
}
@@ -565,7 +510,7 @@ q{
{
// Nothing special happening.
static if (i == 0 && !isChained)
- h = hashOf(val.tupleof[i]);
+ size_t h = hashOf(val.tupleof[i]);
else
h = hashOf(val.tupleof[i], h);
}
@@ -592,6 +537,7 @@ q{
//struct or union hash
size_t hashOf(T)(scope const auto ref T val, size_t seed = 0)
if (!is(T == enum) && (is(T == struct) || is(T == union))
+ && !is(T == const) && !is(T == immutable)
&& canBitwiseHash!T)
{
mixin(_hashOfStruct);
@@ -613,13 +559,25 @@ if (!is(T == enum) && (is(T == struct) || is(T == union))
mixin(_hashOfStruct);
}
-//delegate hash. CTFE unsupported
+//struct or union hash - https://issues.dlang.org/show_bug.cgi?id=19332 (support might be removed in future)
+size_t hashOf(T)(scope auto ref T val, size_t seed = 0)
+if (!is(T == enum) && (is(T == struct) || is(T == union))
+ && (is(T == const) || is(T == immutable))
+ && canBitwiseHash!T && !canBitwiseHash!(Unconst!T))
+{
+ mixin(_hashOfStruct);
+}
+
+//delegate hash. CTFE only if null.
@trusted @nogc nothrow pure
size_t hashOf(T)(scope const T val, size_t seed = 0) if (!is(T == enum) && is(T == delegate))
{
- assert(!__ctfe, "unable to compute hash of "~T.stringof~" at compile time");
- const(ubyte)[] bytes = (cast(const(ubyte)*)&val)[0 .. T.sizeof];
- return bytesHashWithExactSizeAndAlignment!T(bytes, seed);
+ if (__ctfe)
+ {
+ if (val is null) return hashOf(size_t(0), hashOf(size_t(0), seed));
+ assert(0, "unable to compute hash of "~T.stringof~" at compile time");
+ }
+ return hashOf(val.ptr, hashOf(cast(void*) val.funcptr, seed));
}
//address-based class hash. CTFE only if null.
@@ -648,7 +606,13 @@ if (!is(T == enum) && (is(T == interface) || is(T == class))
&& !canBitwiseHash!T)
{
static if (__traits(compiles, {size_t h = val.toHash();}))
- return val ? val.toHash() : 0;
+ {
+ static if (is(__traits(parent, val.toHash) P) && !is(immutable T* : immutable P*)
+ && is(typeof((ref P p) => p is null)))
+ return val ? hashOf(__traits(getMember, val, __traits(getAliasThis, T))) : 0;
+ else
+ return val ? val.toHash() : 0;
+ }
else
return val ? (cast(Object)val).toHash() : 0;
}
@@ -659,7 +623,14 @@ if (!is(T == enum) && (is(T == interface) || is(T == class))
&& !canBitwiseHash!T)
{
static if (__traits(compiles, {size_t h = val.toHash();}))
- return hashOf(val ? cast(size_t) val.toHash() : size_t(0), seed);
+ {
+ static if (is(__traits(parent, val.toHash) P) && !is(immutable T* : immutable P*)
+ && is(typeof((ref P p) => p is null)))
+ return hashOf(val ? hashOf(__traits(getMember, val, __traits(getAliasThis, T)))
+ : size_t(0), seed);
+ else
+ return hashOf(val ? cast(size_t) val.toHash() : size_t(0), seed);
+ }
else
return hashOf(val ? (cast(Object)val).toHash() : 0, seed);
}
diff --git a/libphobos/libdruntime/core/internal/lifetime.d b/libphobos/libdruntime/core/internal/lifetime.d
new file mode 100644
index 0000000..7e9b5f2
--- /dev/null
+++ b/libphobos/libdruntime/core/internal/lifetime.d
@@ -0,0 +1,213 @@
+module core.internal.lifetime;
+
+import core.lifetime : forward;
+
+/+
+emplaceRef is a package function for druntime internal use. It works like
+emplace, but takes its argument by ref (as opposed to "by pointer").
+This makes it easier to use, easier to be safe, and faster in a non-inline
+build.
+Furthermore, emplaceRef optionally takes a type parameter, which specifies
+the type we want to build. This helps to build qualified objects on mutable
+buffer, without breaking the type system with unsafe casts.
++/
+void emplaceRef(T, UT, Args...)(ref UT chunk, auto ref Args args)
+{
+ static if (args.length == 0)
+ {
+ static assert(is(typeof({static T i;})),
+ "Cannot emplace a " ~ T.stringof ~ " because " ~ T.stringof ~
+ ".this() is annotated with @disable.");
+ static if (is(T == class)) static assert(!__traits(isAbstractClass, T),
+ T.stringof ~ " is abstract and it can't be emplaced");
+ emplaceInitializer(chunk);
+ }
+ else static if (
+ !is(T == struct) && Args.length == 1 /* primitives, enums, arrays */
+ ||
+ Args.length == 1 && is(typeof({T t = forward!(args[0]);})) /* conversions */
+ ||
+ is(typeof(T(forward!args))) /* general constructors */)
+ {
+ static struct S
+ {
+ T payload;
+ this()(auto ref Args args)
+ {
+ static if (__traits(compiles, payload = forward!args))
+ payload = forward!args;
+ else
+ payload = T(forward!args);
+ }
+ }
+ if (__ctfe)
+ {
+ static if (__traits(compiles, chunk = T(forward!args)))
+ chunk = T(forward!args);
+ else static if (args.length == 1 && __traits(compiles, chunk = forward!(args[0])))
+ chunk = forward!(args[0]);
+ else assert(0, "CTFE emplace doesn't support "
+ ~ T.stringof ~ " from " ~ Args.stringof);
+ }
+ else
+ {
+ S* p = () @trusted { return cast(S*) &chunk; }();
+ static if (UT.sizeof > 0)
+ emplaceInitializer(*p);
+ p.__ctor(forward!args);
+ }
+ }
+ else static if (is(typeof(chunk.__ctor(forward!args))))
+ {
+ // This catches the rare case of local types that keep a frame pointer
+ emplaceInitializer(chunk);
+ chunk.__ctor(forward!args);
+ }
+ else
+ {
+ //We can't emplace. Try to diagnose a disabled postblit.
+ static assert(!(Args.length == 1 && is(Args[0] : T)),
+ "Cannot emplace a " ~ T.stringof ~ " because " ~ T.stringof ~
+ ".this(this) is annotated with @disable.");
+
+ //We can't emplace.
+ static assert(false,
+ T.stringof ~ " cannot be emplaced from " ~ Args[].stringof ~ ".");
+ }
+}
+
+// ditto
+static import core.internal.traits;
+void emplaceRef(UT, Args...)(ref UT chunk, auto ref Args args)
+if (is(UT == core.internal.traits.Unqual!UT))
+{
+ emplaceRef!(UT, UT)(chunk, forward!args);
+}
+
+/+
+Emplaces T.init.
+In contrast to `emplaceRef(chunk)`, there are no checks for disabled default
+constructors etc.
++/
+template emplaceInitializer(T)
+if (!is(T == const) && !is(T == immutable) && !is(T == inout))
+{
+ import core.internal.traits : hasElaborateAssign, Unqual;
+
+ // Avoid stack allocation by hacking to get to the struct/union init symbol.
+ static if (is(T == struct) || is(T == union))
+ {
+ pragma(mangle, "_D" ~ Unqual!T.mangleof[1..$] ~ "6__initZ")
+ __gshared extern immutable T initializer;
+ }
+
+ void emplaceInitializer(scope ref T chunk) nothrow pure @trusted
+ {
+ static if (__traits(isZeroInit, T))
+ {
+ import core.stdc.string : memset;
+ memset(cast(void*) &chunk, 0, T.sizeof);
+ }
+ else static if (__traits(isScalar, T) ||
+ T.sizeof <= 16 && !hasElaborateAssign!T && __traits(compiles, (){ T chunk; chunk = T.init; }))
+ {
+ chunk = T.init;
+ }
+ else static if (__traits(isStaticArray, T))
+ {
+ // For static arrays there is no initializer symbol created. Instead, we emplace elements one-by-one.
+ foreach (i; 0 .. T.length)
+ {
+ emplaceInitializer(chunk[i]);
+ }
+ }
+ else
+ {
+ import core.stdc.string : memcpy;
+ memcpy(cast(void*)&chunk, &initializer, T.sizeof);
+ }
+ }
+}
+
+@safe unittest
+{
+ static void testInitializer(T)()
+ {
+ // mutable T
+ {
+ T dst = void;
+ emplaceInitializer(dst);
+ assert(dst is T.init);
+ }
+
+ // shared T
+ {
+ shared T dst = void;
+ emplaceInitializer(dst);
+ assert(dst is shared(T).init);
+ }
+
+ // const T
+ {
+ const T dst = void;
+ static assert(!__traits(compiles, emplaceInitializer(dst)));
+ }
+ }
+
+ static struct ElaborateAndZero
+ {
+ int a;
+ this(this) {}
+ }
+
+ static struct ElaborateAndNonZero
+ {
+ int a = 42;
+ this(this) {}
+ }
+
+ static union LargeNonZeroUnion
+ {
+ byte[128] a = 1;
+ }
+
+ testInitializer!int();
+ testInitializer!double();
+ testInitializer!ElaborateAndZero();
+ testInitializer!ElaborateAndNonZero();
+ testInitializer!LargeNonZeroUnion();
+
+ static if (is(__vector(double[4])))
+ {
+ // DMD 2.096 and GDC 11.1 can't compare vectors with `is` so can't use
+ // testInitializer.
+ enum VE : __vector(double[4])
+ {
+ a = [1.0, 2.0, 3.0, double.nan],
+ b = [4.0, 5.0, 6.0, double.nan],
+ }
+ const VE expected = VE.a;
+ VE dst = VE.b;
+ shared VE sharedDst = VE.b;
+ emplaceInitializer(dst);
+ emplaceInitializer(sharedDst);
+ () @trusted {
+ import core.stdc.string : memcmp;
+ assert(memcmp(&expected, &dst, VE.sizeof) == 0);
+ assert(memcmp(&expected, cast(void*) &sharedDst, VE.sizeof) == 0);
+ }();
+ static assert(!__traits(compiles, emplaceInitializer(expected)));
+ }
+}
+
+/*
+Simple swap function.
+*/
+void swap(T)(ref T lhs, ref T rhs)
+{
+ import core.lifetime : move, moveEmplace;
+
+ T tmp = move(lhs);
+ moveEmplace(rhs, lhs);
+ moveEmplace(tmp, rhs);
+}
diff --git a/libphobos/libdruntime/core/internal/moving.d b/libphobos/libdruntime/core/internal/moving.d
new file mode 100644
index 0000000..9c97d29
--- /dev/null
+++ b/libphobos/libdruntime/core/internal/moving.d
@@ -0,0 +1,147 @@
+/**
+ This module contains the implementation of move semantics of DIP 1014
+
+ Copyright: Copyright Digital Mars 2000 - 2019.
+ License: Distributed under the
+ $(LINK2 http://www.boost.org/LICENSE_1_0.txt, Boost Software License 1.0).
+ (See accompanying file LICENSE)
+ Source: $(DRUNTIMESRC core/_internal/_moving.d)
+*/
+module core.internal.moving;
+
+/**
+Recursively calls the `opPostMove` callbacks of a struct and its members if
+they're defined.
+
+When moving a struct instance, the compiler emits a call to this function
+after blitting the instance and before releasing the original instance's
+memory.
+
+Params:
+ newLocation = reference to struct instance being moved into
+ oldLocation = reference to the original instance
+
+Note:
+ This function is tentatively defined as `nothrow` to prevent
+ `opPostMove` from being defined without `nothrow`, which would allow
+ for possibly confusing changes in program flow.
+*/
+void __move_post_blt(S)(ref S newLocation, ref S oldLocation) nothrow
+ if (is(S == struct))
+{
+ import core.internal.traits : hasElaborateMove;
+ static foreach (i, M; typeof(S.tupleof))
+ {
+ static if (hasElaborateMove!M)
+ {
+ __move_post_blt(newLocation.tupleof[i], oldLocation.tupleof[i]);
+ }
+ }
+
+ static if (__traits(hasMember, S, "opPostMove"))
+ {
+ import core.internal.traits : lvalueOf, rvalueOf;
+ static assert( is(typeof(S.init.opPostMove(lvalueOf!S))) &&
+ !is(typeof(S.init.opPostMove(rvalueOf!S))),
+ "`" ~ S.stringof ~ ".opPostMove` must take exactly one argument of type `" ~ S.stringof ~ "` by reference");
+
+ newLocation.opPostMove(oldLocation);
+ }
+}
+
+void __move_post_blt(S)(ref S newLocation, ref S oldLocation) nothrow
+ if (__traits(isStaticArray, S))
+{
+ import core.internal.traits : hasElaborateMove;
+ static if (S.length && hasElaborateMove!(typeof(newLocation[0])))
+ {
+ foreach (i; 0 .. S.length)
+ __move_post_blt(newLocation[i], oldLocation[i]);
+ }
+}
+
+@safe nothrow unittest
+{
+ struct A
+ {
+ bool movedInto;
+ void opPostMove(const ref A oldLocation)
+ {
+ movedInto = true;
+ }
+ }
+ A src, dest;
+ __move_post_blt(dest, src);
+ assert(dest.movedInto);
+}
+
+@safe nothrow unittest
+{
+ struct A
+ {
+ bool movedInto;
+ void opPostMove(const ref A oldLocation)
+ {
+ movedInto = true;
+ }
+ }
+ struct B
+ {
+ A a;
+
+ bool movedInto;
+ void opPostMove(const ref B oldLocation)
+ {
+ movedInto = true;
+ }
+ }
+ B src, dest;
+ __move_post_blt(dest, src);
+ assert(dest.movedInto && dest.a.movedInto);
+}
+
+@safe nothrow unittest
+{
+ static struct DoNotMove
+ {
+ bool movedInto;
+ void opPostMove(const ref DoNotMove oldLocation)
+ {
+ movedInto = true;
+ }
+ }
+ static DoNotMove doNotMove;
+
+ struct A
+ {
+ @property ref DoNotMove member()
+ {
+ return doNotMove;
+ }
+ }
+ A src, dest;
+ __move_post_blt(dest, src);
+ assert(!doNotMove.movedInto);
+}
+
+@safe nothrow unittest
+{
+ static struct A
+ {
+ bool movedInto;
+ void opPostMove(const ref A oldLocation)
+ {
+ movedInto = true;
+ }
+ }
+ static struct B
+ {
+ A[2] a;
+ }
+ B src, dest;
+ __move_post_blt(dest, src);
+ foreach (ref a; src.a)
+ assert(!a.movedInto);
+ foreach (ref a; dest.a)
+ assert(a.movedInto);
+}
diff --git a/libphobos/libdruntime/core/internal/parseoptions.d b/libphobos/libdruntime/core/internal/parseoptions.d
new file mode 100644
index 0000000..4e5105d
--- /dev/null
+++ b/libphobos/libdruntime/core/internal/parseoptions.d
@@ -0,0 +1,422 @@
+/**
+* parse configuration options
+*
+* Copyright: Copyright Digital Mars 2017
+* License: $(HTTP www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
+*
+* Source: $(DRUNTIMESRC src/core/internal/parseoptions.d)
+*/
+
+module core.internal.parseoptions;
+
+import core.stdc.stdlib;
+import core.stdc.stdio;
+import core.stdc.ctype;
+import core.stdc.string;
+import core.vararg;
+import core.internal.traits : externDFunc, hasUDA;
+
+
+@nogc nothrow:
+extern extern(C) string[] rt_args() @system;
+
+extern extern(C) __gshared bool rt_envvars_enabled;
+extern extern(C) __gshared bool rt_cmdline_enabled;
+extern extern(C) __gshared string[] rt_options;
+
+alias rt_configCallBack = string delegate(string) @nogc nothrow;
+alias fn_configOption = string function(string opt, scope rt_configCallBack dg, bool reverse) @nogc nothrow;
+alias rt_configOption = externDFunc!("rt.config.rt_configOption", fn_configOption);
+
+/// UDA for field treated as memory value
+struct MemVal {}
+
+/**
+* initialize members of struct CFG from rt_config options
+*
+* options will be read from the environment, the command line or embedded
+* into the executable as configured (see rt.config)
+*
+* fields of the struct are populated by parseOptions().
+*/
+bool initConfigOptions(CFG)(ref CFG cfg, string cfgname)
+{
+ string parse(string opt) @nogc nothrow
+ {
+ if (!parseOptions(cfg, opt))
+ return "err";
+ return null; // continue processing
+ }
+ string s = rt_configOption(cfgname, &parse, true);
+ return s is null;
+}
+
+/**
+* initialize members of struct CFG from a string of sub-options.
+*
+* fields of the struct are populated by listing them as space separated
+* sub-options <field-name>:value, e.g. "precise:1 profile:1"
+*
+* supported field value types:
+* - strings (without spaces)
+* - integer types (positive values only)
+* - bool
+* - float
+*
+* If the struct has a member "help" it is called if it is found as a sub-option.
+* If the struct has a member "errorName", is used as the name reported in error
+* messages. Otherwise the struct name is used.
+*/
+bool parseOptions(CFG)(ref CFG cfg, string opt)
+{
+ static if (is(typeof(__traits(getMember, CFG, "errorName"))))
+ string errName = cfg.errorName;
+ else
+ string errName = CFG.stringof;
+ opt = skip!isspace(opt);
+ while (opt.length)
+ {
+ auto tail = find!(c => c == ':' || c == '=' || c == ' ')(opt);
+ auto name = opt[0 .. $ - tail.length];
+ static if (is(typeof(__traits(getMember, CFG, "help"))))
+ if (name == "help")
+ {
+ version (CoreUnittest) {} else
+ cfg.help();
+ opt = skip!isspace(tail);
+ continue;
+ }
+ if (tail.length <= 1 || tail[0] == ' ')
+ return optError("Missing argument for", name, errName);
+ tail = tail[1 .. $];
+
+ NAMES_SWITCH:
+ switch (name)
+ {
+ static foreach (field; __traits(allMembers, CFG))
+ {
+ static if (!is(typeof(__traits(getMember, cfg, field)) == function))
+ {
+ case field:
+ bool r;
+
+ static if (hasUDA!(__traits(getMember, cfg, field), MemVal))
+ r = parse(name, tail, __traits(getMember, cfg, field), errName, true);
+ else
+ r = parse(name, tail, __traits(getMember, cfg, field), errName);
+
+ if (!r)
+ return false;
+
+ break NAMES_SWITCH;
+ }
+ }
+
+ default:
+ return optError("Unknown", name, errName);
+ }
+ opt = skip!isspace(tail);
+ }
+ return true;
+}
+
+/**
+Parses an individual option `optname` value from a provided string `str`.
+The option type is given by the type `T` of the field `res` to which the parsed
+value will be written too.
+In case of an error, `errName` will be used to display an error message and
+the failure of the parsing will be indicated by a falsy return value.
+
+For boolean values, '0/n/N' (false) or '1/y/Y' (true) are accepted.
+
+Params:
+ optname = name of the option to parse
+ str = raw string to parse the option value from
+ res = reference to the resulting data field that the option should be parsed too
+ errName = full-text name of the option which should be displayed in case of errors
+
+Returns: `false` if a parsing error happened.
+*/
+bool rt_parseOption(T)(const(char)[] optname, ref inout(char)[] str, ref T res, const(char)[] errName)
+{
+ return parse(optname, str, res, errName);
+}
+
+private:
+
+bool optError(const scope char[] msg, const scope char[] name, const(char)[] errName)
+{
+ version (CoreUnittest) if (inUnittest) return false;
+
+ fprintf(stderr, "%.*s %.*s option '%.*s'.\n",
+ cast(int)msg.length, msg.ptr,
+ cast(int)errName.length, errName.ptr,
+ cast(int)name.length, name.ptr);
+ return false;
+}
+
+inout(char)[] skip(alias pred)(inout(char)[] str)
+{
+ return find!(c => !pred(c))(str);
+}
+
+inout(char)[] find(alias pred)(inout(char)[] str)
+{
+ foreach (i; 0 .. str.length)
+ if (pred(str[i])) return str[i .. $];
+ return null;
+}
+
+bool parse(T : size_t)(const(char)[] optname, ref inout(char)[] str, ref T res, const(char)[] errName, bool mayHaveSuffix = false)
+in { assert(str.length); }
+do
+{
+ size_t i, v;
+
+ auto tail = find!(c => c == ' ')(str);
+ size_t len = str.length - tail.length;
+
+ import core.checkedint : mulu;
+
+ bool overflowed;
+
+ for (; i < len; i++)
+ {
+ char c = str[i];
+
+ if (isdigit(c))
+ v = 10 * v + c - '0';
+ else // non-digit
+ {
+ if (mayHaveSuffix && i == len-1) // suffix
+ {
+ switch (c)
+ {
+
+ case 'G':
+ v = mulu(v, 1024 * 1024 * 1024, overflowed);
+ break;
+
+ case 'M':
+ v = mulu(v, 1024 * 1024, overflowed);
+ break;
+
+ case 'K':
+ v = mulu(v, 1024, overflowed);
+ break;
+
+ case 'B':
+ break;
+
+ default:
+ return parseError("value with unit type M, K or B", optname, str, "with suffix");
+ }
+
+ if (overflowed)
+ return overflowedError(optname, str);
+
+ i++;
+ break;
+ }
+ else // unexpected non-digit character
+ {
+ i = 0;
+ break;
+ }
+ }
+ }
+
+ if (!i)
+ return parseError("a number", optname, str, errName);
+
+ if (mayHaveSuffix && isdigit(str[len-1]))
+ {
+ // No suffix found, default to megabytes
+
+ v = mulu(v, 1024 * 1024, overflowed);
+
+ if (overflowed)
+ return overflowedError(optname, str);
+ }
+
+ if (v > res.max)
+ return parseError("a number " ~ T.max.stringof ~ " or below", optname, str[0 .. i], errName);
+ str = str[i .. $];
+ res = cast(T) v;
+ return true;
+}
+
+bool parse(const(char)[] optname, ref inout(char)[] str, ref bool res, const(char)[] errName)
+in { assert(str.length); }
+do
+{
+ if (str[0] == '1' || str[0] == 'y' || str[0] == 'Y')
+ res = true;
+ else if (str[0] == '0' || str[0] == 'n' || str[0] == 'N')
+ res = false;
+ else
+ return parseError("'0/n/N' or '1/y/Y'", optname, str, errName);
+ str = str[1 .. $];
+ return true;
+}
+
+bool parse(const(char)[] optname, ref inout(char)[] str, ref float res, const(char)[] errName)
+in { assert(str.length); }
+do
+{
+ // % uint f %n \0
+ char[1 + 10 + 1 + 2 + 1] fmt=void;
+ // specify max-width
+ immutable n = snprintf(fmt.ptr, fmt.length, "%%%uf%%n", cast(uint)str.length);
+ assert(n > 4 && n < fmt.length);
+
+ int nscanned;
+ version (CRuntime_DigitalMars)
+ {
+ /* Older sscanf's in snn.lib can write to its first argument, causing a crash
+ * if the string is in readonly memory. Recent updates to DMD
+ * https://github.com/dlang/dmd/pull/6546
+ * put string literals in readonly memory.
+ * Although sscanf has been fixed,
+ * http://ftp.digitalmars.com/snn.lib
+ * this workaround is here so it still works with the older snn.lib.
+ */
+ // Create mutable copy of str
+ const length = str.length;
+ char* mptr = cast(char*)malloc(length + 1);
+ assert(mptr);
+ memcpy(mptr, str.ptr, length);
+ mptr[length] = 0;
+ const result = sscanf(mptr, fmt.ptr, &res, &nscanned);
+ free(mptr);
+ if (result < 1)
+ return parseError("a float", optname, str, errName);
+ }
+ else
+ {
+ if (sscanf(str.ptr, fmt.ptr, &res, &nscanned) < 1)
+ return parseError("a float", optname, str, errName);
+ }
+ str = str[nscanned .. $];
+ return true;
+}
+
+bool parse(const(char)[] optname, ref inout(char)[] str, ref inout(char)[] res, const(char)[] errName)
+in { assert(str.length); }
+do
+{
+ auto tail = str.find!(c => c == ' ');
+ res = str[0 .. $ - tail.length];
+ if (!res.length)
+ return parseError("an identifier", optname, str, errName);
+ str = tail;
+ return true;
+}
+
+bool parseError(const scope char[] exp, const scope char[] opt, const scope char[] got, const(char)[] errName)
+{
+ version (CoreUnittest) if (inUnittest) return false;
+
+ fprintf(stderr, "Expecting %.*s as argument for %.*s option '%.*s', got '%.*s' instead.\n",
+ cast(int)exp.length, exp.ptr,
+ cast(int)errName.length, errName.ptr,
+ cast(int)opt.length, opt.ptr,
+ cast(int)got.length, got.ptr);
+ return false;
+}
+
+bool overflowedError(const scope char[] opt, const scope char[] got)
+{
+ version (CoreUnittest) if (inUnittest) return false;
+
+ fprintf(stderr, "Argument for %.*s option '%.*s' is too big.\n",
+ cast(int)opt.length, opt.ptr,
+ cast(int)got.length, got.ptr);
+ return false;
+}
+
+size_t min(size_t a, size_t b) { return a <= b ? a : b; }
+
+version (CoreUnittest) __gshared bool inUnittest;
+
+unittest
+{
+ inUnittest = true;
+ scope (exit) inUnittest = false;
+
+ static struct Config
+ {
+ bool disable; // start disabled
+ ubyte profile; // enable profiling with summary when terminating program
+ string gc = "conservative"; // select gc implementation conservative|manual
+
+ @MemVal size_t initReserve; // initial reserve (bytes)
+ @MemVal size_t minPoolSize = 1 << 20; // initial and minimum pool size (bytes)
+ float heapSizeFactor = 2.0; // heap size to used memory ratio
+
+ @nogc nothrow:
+ void help();
+ string errorName() @nogc nothrow { return "GC"; }
+ }
+ Config conf;
+
+ assert(!conf.parseOptions("disable"));
+ assert(!conf.parseOptions("disable:"));
+ assert(!conf.parseOptions("disable:5"));
+ assert(conf.parseOptions("disable:y") && conf.disable);
+ assert(conf.parseOptions("disable:n") && !conf.disable);
+ assert(conf.parseOptions("disable:Y") && conf.disable);
+ assert(conf.parseOptions("disable:N") && !conf.disable);
+ assert(conf.parseOptions("disable:1") && conf.disable);
+ assert(conf.parseOptions("disable:0") && !conf.disable);
+
+ assert(conf.parseOptions("disable=y") && conf.disable);
+ assert(conf.parseOptions("disable=n") && !conf.disable);
+
+ assert(conf.parseOptions("profile=0") && conf.profile == 0);
+ assert(conf.parseOptions("profile=1") && conf.profile == 1);
+ assert(conf.parseOptions("profile=2") && conf.profile == 2);
+ assert(!conf.parseOptions("profile=256"));
+
+ assert(conf.parseOptions("disable:1 minPoolSize:16"));
+ assert(conf.disable);
+ assert(conf.minPoolSize == 1024 * 1024 * 16);
+
+ assert(conf.parseOptions("disable:1 minPoolSize:4096B"));
+ assert(conf.disable);
+ assert(conf.minPoolSize == 4096);
+
+ assert(conf.parseOptions("disable:1 minPoolSize:2K help"));
+ assert(conf.disable);
+ assert(conf.minPoolSize == 2048);
+
+ assert(conf.parseOptions("minPoolSize:3G help"));
+ assert(conf.disable);
+ assert(conf.minPoolSize == 1024UL * 1024 * 1024 * 3);
+
+ assert(!conf.parseOptions("minPoolSize:922337203685477G"), "size_t overflow");
+
+ assert(conf.parseOptions("heapSizeFactor:3.1"));
+ assert(conf.heapSizeFactor == 3.1f);
+ assert(conf.parseOptions("heapSizeFactor:3.1234567890 disable:0"));
+ assert(conf.heapSizeFactor > 3.123f);
+ assert(!conf.disable);
+ assert(!conf.parseOptions("heapSizeFactor:3.0.2.5"));
+ assert(conf.parseOptions("heapSizeFactor:2"));
+ assert(conf.heapSizeFactor == 2.0f);
+
+ assert(!conf.parseOptions("initReserve:foo"));
+ assert(!conf.parseOptions("initReserve:y"));
+ assert(!conf.parseOptions("initReserve:20.5"));
+
+ assert(conf.parseOptions("help"));
+ assert(conf.parseOptions("help profile:1"));
+ assert(conf.parseOptions("help profile:1 help"));
+
+ assert(conf.parseOptions("gc:manual") && conf.gc == "manual");
+ assert(conf.parseOptions("gc:my-gc~modified") && conf.gc == "my-gc~modified");
+ assert(conf.parseOptions("gc:conservative help profile:1") && conf.gc == "conservative" && conf.profile == 1);
+
+ // the config parse doesn't know all available GC names, so should accept unknown ones
+ assert(conf.parseOptions("gc:whatever"));
+}
diff --git a/libphobos/libdruntime/core/internal/postblit.d b/libphobos/libdruntime/core/internal/postblit.d
new file mode 100644
index 0000000..ed4ec1b
--- /dev/null
+++ b/libphobos/libdruntime/core/internal/postblit.d
@@ -0,0 +1,274 @@
+/**
+ This module contains support for D's postblit feature
+
+ Copyright: Copyright Digital Mars 2000 - 2019.
+ License: Distributed under the
+ $(LINK2 http://www.boost.org/LICENSE_1_0.txt, Boost Software License 1.0).
+ (See accompanying file LICENSE)
+ Source: $(DRUNTIMESRC core/_internal/_destruction.d)
+*/
+module core.internal.postblit;
+
+// compiler frontend lowers struct array postblitting to this
+void __ArrayPostblit(T)(T[] a)
+{
+ foreach (ref T e; a)
+ e.__xpostblit();
+}
+
+package void postblitRecurse(S)(ref S s)
+ if (is(S == struct))
+{
+ static if (__traits(hasMember, S, "__xpostblit") &&
+ // Bugzilla 14746: Check that it's the exact member of S.
+ __traits(isSame, S, __traits(parent, s.__xpostblit)))
+ s.__xpostblit();
+}
+
+package void postblitRecurse(E, size_t n)(ref E[n] arr)
+{
+ import core.internal.destruction: destructRecurse;
+ import core.internal.traits : hasElaborateCopyConstructor;
+
+ static if (hasElaborateCopyConstructor!E)
+ {
+ size_t i;
+ scope(failure)
+ {
+ for (; i != 0; --i)
+ {
+ destructRecurse(arr[i - 1]); // What to do if this throws?
+ }
+ }
+
+ for (i = 0; i < arr.length; ++i)
+ postblitRecurse(arr[i]);
+ }
+}
+
+// Test destruction/postblit order
+@safe nothrow pure unittest
+{
+ string[] order;
+
+ struct InnerTop
+ {
+ ~this() @safe nothrow pure
+ {
+ order ~= "destroy inner top";
+ }
+
+ this(this) @safe nothrow pure
+ {
+ order ~= "copy inner top";
+ }
+ }
+
+ struct InnerMiddle {}
+
+ version (none) // https://issues.dlang.org/show_bug.cgi?id=14242
+ struct InnerElement
+ {
+ static char counter = '1';
+
+ ~this() @safe nothrow pure
+ {
+ order ~= "destroy inner element #" ~ counter++;
+ }
+
+ this(this) @safe nothrow pure
+ {
+ order ~= "copy inner element #" ~ counter++;
+ }
+ }
+
+ struct InnerBottom
+ {
+ ~this() @safe nothrow pure
+ {
+ order ~= "destroy inner bottom";
+ }
+
+ this(this) @safe nothrow pure
+ {
+ order ~= "copy inner bottom";
+ }
+ }
+
+ struct S
+ {
+ char[] s;
+ InnerTop top;
+ InnerMiddle middle;
+ version (none) InnerElement[3] array; // https://issues.dlang.org/show_bug.cgi?id=14242
+ int a;
+ InnerBottom bottom;
+ ~this() @safe nothrow pure { order ~= "destroy outer"; }
+ this(this) @safe nothrow pure { order ~= "copy outer"; }
+ }
+
+ string[] destructRecurseOrder;
+ {
+ import core.internal.destruction: destructRecurse;
+
+ S s;
+ destructRecurse(s);
+ destructRecurseOrder = order;
+ order = null;
+ }
+
+ assert(order.length);
+ assert(destructRecurseOrder == order);
+ order = null;
+
+ S s;
+ postblitRecurse(s);
+ assert(order.length);
+ auto postblitRecurseOrder = order;
+ order = null;
+ S s2 = s;
+ assert(order.length);
+ assert(postblitRecurseOrder == order);
+}
+
+@safe unittest
+{
+ // Bugzilla 14746
+ static struct HasPostblit
+ {
+ this(this) { assert(0); }
+ }
+ static struct Owner
+ {
+ HasPostblit* ptr;
+ alias ptr this;
+ }
+
+ Owner o;
+ assert(o.ptr is null);
+ postblitRecurse(o); // must not reach in HasPostblit.__postblit()
+}
+
+// Test handling of fixed-length arrays
+// Separate from first test because of https://issues.dlang.org/show_bug.cgi?id=14242
+@safe unittest
+{
+ string[] order;
+
+ struct S
+ {
+ char id;
+
+ this(this)
+ {
+ order ~= "copy #" ~ id;
+ }
+
+ ~this()
+ {
+ order ~= "destroy #" ~ id;
+ }
+ }
+
+ string[] destructRecurseOrder;
+ {
+ import core.internal.destruction: destructRecurse;
+
+ S[3] arr = [S('1'), S('2'), S('3')];
+ destructRecurse(arr);
+ destructRecurseOrder = order;
+ order = null;
+ }
+ assert(order.length);
+ assert(destructRecurseOrder == order);
+ order = null;
+
+ S[3] arr = [S('1'), S('2'), S('3')];
+ postblitRecurse(arr);
+ assert(order.length);
+ auto postblitRecurseOrder = order;
+ order = null;
+
+ auto arrCopy = arr;
+ assert(order.length);
+ assert(postblitRecurseOrder == order);
+}
+
+// Test handling of failed postblit
+// Not nothrow or @safe because of https://issues.dlang.org/show_bug.cgi?id=14242
+/+ nothrow @safe +/ unittest
+{
+ static class FailedPostblitException : Exception { this() nothrow @safe { super(null); } }
+ static string[] order;
+ static struct Inner
+ {
+ char id;
+
+ @safe:
+ this(this)
+ {
+ order ~= "copy inner #" ~ id;
+ if (id == '2')
+ throw new FailedPostblitException();
+ }
+
+ ~this() nothrow
+ {
+ order ~= "destroy inner #" ~ id;
+ }
+ }
+
+ static struct Outer
+ {
+ Inner inner1, inner2, inner3;
+
+ nothrow @safe:
+ this(char first, char second, char third)
+ {
+ inner1 = Inner(first);
+ inner2 = Inner(second);
+ inner3 = Inner(third);
+ }
+
+ this(this)
+ {
+ order ~= "copy outer";
+ }
+
+ ~this()
+ {
+ order ~= "destroy outer";
+ }
+ }
+
+ auto outer = Outer('1', '2', '3');
+
+ try postblitRecurse(outer);
+ catch (FailedPostblitException) {}
+ catch (Exception) assert(false);
+
+ auto postblitRecurseOrder = order;
+ order = null;
+
+ try auto copy = outer;
+ catch (FailedPostblitException) {}
+ catch (Exception) assert(false);
+
+ assert(postblitRecurseOrder == order);
+ order = null;
+
+ Outer[3] arr = [Outer('1', '1', '1'), Outer('1', '2', '3'), Outer('3', '3', '3')];
+
+ try postblitRecurse(arr);
+ catch (FailedPostblitException) {}
+ catch (Exception) assert(false);
+
+ postblitRecurseOrder = order;
+ order = null;
+
+ try auto arrCopy = arr;
+ catch (FailedPostblitException) {}
+ catch (Exception) assert(false);
+
+ assert(postblitRecurseOrder == order);
+}
diff --git a/libphobos/libdruntime/rt/qsort.d b/libphobos/libdruntime/core/internal/qsort.d
index 079a0f5..ad8307a 100644
--- a/libphobos/libdruntime/rt/qsort.d
+++ b/libphobos/libdruntime/core/internal/qsort.d
@@ -3,20 +3,14 @@
* qsort().
*
* Copyright: Copyright Digital Mars 2000 - 2010.
- * License: $(WEB www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
+ * License: $(HTTP www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
* Authors: Walter Bright, Martin Nowak
*/
-
-/* Copyright Digital Mars 2000 - 2010.
- * Distributed under the Boost Software License, Version 1.0.
- * (See accompanying file LICENSE_1_0.txt or copy at
- * http://www.boost.org/LICENSE_1_0.txt)
- */
-module rt.qsort;
+module core.internal.qsort;
//debug=qsort;
-private import core.stdc.stdlib;
+import core.stdc.stdlib;
version (OSX)
version = Darwin;
@@ -136,8 +130,6 @@ else
}
}
-
-
unittest
{
debug(qsort) printf("array.sort.unittest()\n");
@@ -164,3 +156,41 @@ unittest
assert(a[i] <= a[i + 1]);
}
}
+
+unittest
+{
+ debug(qsort) printf("struct.sort.unittest()\n");
+
+ static struct TestStruct
+ {
+ int value;
+
+ int opCmp(const TestStruct rhs) const
+ {
+ return value <= rhs.value ?
+ value < rhs.value ? -1 : 0 : 1;
+ }
+ }
+
+ TestStruct[] a = new TestStruct[10];
+
+ a[0] = TestStruct(23);
+ a[1] = TestStruct(1);
+ a[2] = TestStruct(64);
+ a[3] = TestStruct(5);
+ a[4] = TestStruct(6);
+ a[5] = TestStruct(5);
+ a[6] = TestStruct(17);
+ a[7] = TestStruct(3);
+ a[8] = TestStruct(0);
+ a[9] = TestStruct(-1);
+
+ _adSort(*cast(void[]*)&a, typeid(TestStruct));
+
+ for (int i = 0; i < a.length - 1; i++)
+ {
+ //printf("i = %d", i);
+ //printf(" %d %d\n", a[i], a[i + 1]);
+ assert(a[i] <= a[i + 1]);
+ }
+}
diff --git a/libphobos/libdruntime/core/internal/spinlock.d b/libphobos/libdruntime/core/internal/spinlock.d
index fb689dc..36d806a 100644
--- a/libphobos/libdruntime/core/internal/spinlock.d
+++ b/libphobos/libdruntime/core/internal/spinlock.d
@@ -2,7 +2,7 @@
* SpinLock for runtime internal usage.
*
* Copyright: Copyright Digital Mars 2015 -.
- * License: $(WEB www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
+ * License: $(HTTP www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
* Authors: Martin Nowak
* Source: $(DRUNTIMESRC core/internal/_spinlock.d)
*/
@@ -50,8 +50,9 @@ shared struct SpinLock
/// yield with backoff
void yield(size_t k)
{
+ import core.time;
if (k < pauseThresh)
- return pause();
+ return core.atomic.pause();
else if (k < 32)
return Thread.yield();
Thread.sleep(1.msecs);
@@ -66,25 +67,9 @@ private:
enum X86 = false;
static if (X86)
- {
enum pauseThresh = 16;
- void pause()
- {
- asm @trusted @nogc nothrow
- {
- // pause instruction
- rep;
- nop;
- }
- }
- }
else
- {
enum pauseThresh = 4;
- void pause()
- {
- }
- }
size_t val;
Contention contention;
@@ -93,7 +78,7 @@ private:
// aligned to cacheline to avoid false sharing
shared align(64) struct AlignedSpinLock
{
- this(SpinLock.Contention contention)
+ this(SpinLock.Contention contention) @trusted @nogc nothrow
{
impl = shared(SpinLock)(contention);
}
diff --git a/libphobos/libdruntime/core/internal/string.d b/libphobos/libdruntime/core/internal/string.d
index d2144c7..529fee4 100644
--- a/libphobos/libdruntime/core/internal/string.d
+++ b/libphobos/libdruntime/core/internal/string.d
@@ -2,9 +2,9 @@
* String manipulation and comparison utilities.
*
* Copyright: Copyright Sean Kelly 2005 - 2009.
- * License: $(WEB www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
+ * License: $(HTTP www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
* Authors: Sean Kelly, Walter Bright
- * Source: $(DRUNTIMESRC src/rt/util/_string.d)
+ * Source: $(DRUNTIMESRC rt/util/_string.d)
*/
module core.internal.string;
@@ -15,66 +15,107 @@ nothrow:
alias UnsignedStringBuf = char[20];
-char[] unsignedToTempString(ulong value, return char[] buf, uint radix = 10) @safe
+/**
+Converts an unsigned integer value to a string of characters.
+
+This implementation is a template so it can be used when compiling with -betterC.
+
+Params:
+ value = the unsigned integer value to convert
+ buf = the pre-allocated buffer used to store the result
+ radix = the numeric base to use in the conversion (defaults to 10)
+
+Returns:
+ The unsigned integer value as a string of characters
+*/
+char[] unsignedToTempString(uint radix = 10)(ulong value, return scope char[] buf) @safe
+if (radix >= 2 && radix <= 16)
{
size_t i = buf.length;
do
{
- ubyte x = cast(ubyte)(value % radix);
- value = value / radix;
- buf[--i] = cast(char)((x < 10) ? x + '0' : x - 10 + 'a');
+ uint x = void;
+ if (value < radix)
+ {
+ x = cast(uint)value;
+ value = 0;
+ }
+ else
+ {
+ x = cast(uint)(value % radix);
+ value /= radix;
+ }
+ buf[--i] = cast(char)((radix <= 10 || x < 10) ? x + '0' : x - 10 + 'a');
} while (value);
return buf[i .. $];
}
-private struct TempStringNoAlloc
+private struct TempStringNoAlloc(ubyte N)
{
- // need to handle 65 bytes for radix of 2 with negative sign.
- private char[65] _buf;
+ private char[N] _buf = void;
private ubyte _len;
- auto get() return
+ inout(char)[] get() inout return
{
return _buf[$-_len..$];
}
alias get this;
}
-auto unsignedToTempString(ulong value, uint radix) @safe
+/**
+Converts an unsigned integer value to a string of characters.
+
+This implementation is a template so it can be used when compiling with -betterC.
+
+Params:
+ value = the unsigned integer value to convert
+ radix = the numeric base to use in the conversion (defaults to 10)
+
+Returns:
+ The unsigned integer value as a string of characters
+*/
+auto unsignedToTempString(uint radix = 10)(ulong value) @safe
{
- TempStringNoAlloc result = void;
- result._len = unsignedToTempString(value, result._buf, radix).length & 0xff;
+ // Need a buffer of 65 bytes for radix of 2 with room for
+ // signedToTempString to possibly add a negative sign.
+ enum bufferSize = radix >= 10 ? 20 : 65;
+ TempStringNoAlloc!bufferSize result = void;
+ result._len = unsignedToTempString!radix(value, result._buf).length & 0xff;
return result;
}
unittest
{
UnsignedStringBuf buf;
- assert(0.unsignedToTempString(buf, 10) == "0");
- assert(1.unsignedToTempString(buf, 10) == "1");
- assert(12.unsignedToTempString(buf, 10) == "12");
- assert(0x12ABCF .unsignedToTempString(buf, 16) == "12abcf");
- assert(long.sizeof.unsignedToTempString(buf, 10) == "8");
- assert(uint.max.unsignedToTempString(buf, 10) == "4294967295");
- assert(ulong.max.unsignedToTempString(buf, 10) == "18446744073709551615");
+ assert(0.unsignedToTempString(buf) == "0");
+ assert(1.unsignedToTempString(buf) == "1");
+ assert(12.unsignedToTempString(buf) == "12");
+ assert(0x12ABCF .unsignedToTempString!16(buf) == "12abcf");
+ assert(long.sizeof.unsignedToTempString(buf) == "8");
+ assert(uint.max.unsignedToTempString(buf) == "4294967295");
+ assert(ulong.max.unsignedToTempString(buf) == "18446744073709551615");
// use stack allocated struct version
- assert(0.unsignedToTempString(10) == "0");
- assert(1.unsignedToTempString(10) == "1");
- assert(12.unsignedToTempString(10) == "12");
- assert(0x12ABCF .unsignedToTempString(16) == "12abcf");
- assert(long.sizeof.unsignedToTempString(10) == "8");
- assert(uint.max.unsignedToTempString(10) == "4294967295");
- assert(ulong.max.unsignedToTempString(10) == "18446744073709551615");
+ assert(0.unsignedToTempString == "0");
+ assert(1.unsignedToTempString == "1");
+ assert(12.unsignedToTempString == "12");
+ assert(0x12ABCF .unsignedToTempString!16 == "12abcf");
+ assert(long.sizeof.unsignedToTempString == "8");
+ assert(uint.max.unsignedToTempString == "4294967295");
+ assert(ulong.max.unsignedToTempString == "18446744073709551615");
+
+ // test bad radices
+ assert(!is(typeof(100.unsignedToTempString!1(buf))));
+ assert(!is(typeof(100.unsignedToTempString!0(buf) == "")));
}
alias SignedStringBuf = char[20];
-char[] signedToTempString(long value, return char[] buf, uint radix) @safe
+char[] signedToTempString(uint radix = 10)(long value, return scope char[] buf) @safe
{
bool neg = value < 0;
if (neg)
value = cast(ulong)-value;
- auto r = unsignedToTempString(value, buf, radix);
+ auto r = unsignedToTempString!radix(value, buf);
if (neg)
{
// about to do a slice without a bounds check
@@ -85,12 +126,12 @@ char[] signedToTempString(long value, return char[] buf, uint radix) @safe
return r;
}
-auto signedToTempString(long value, uint radix) @safe
+auto signedToTempString(uint radix = 10)(long value) @safe
{
bool neg = value < 0;
if (neg)
value = cast(ulong)-value;
- auto r = unsignedToTempString(value, radix);
+ auto r = unsignedToTempString!radix(value);
if (neg)
{
r._len++;
@@ -102,34 +143,34 @@ auto signedToTempString(long value, uint radix) @safe
unittest
{
SignedStringBuf buf;
- assert(0.signedToTempString(buf, 10) == "0");
- assert(1.signedToTempString(buf, 10) == "1");
- assert((-1).signedToTempString(buf, 10) == "-1");
- assert(12.signedToTempString(buf, 10) == "12");
- assert((-12).signedToTempString(buf, 10) == "-12");
- assert(0x12ABCF .signedToTempString(buf, 16) == "12abcf");
- assert((-0x12ABCF) .signedToTempString(buf, 16) == "-12abcf");
- assert(long.sizeof.signedToTempString(buf, 10) == "8");
- assert(int.max.signedToTempString(buf, 10) == "2147483647");
- assert(int.min.signedToTempString(buf, 10) == "-2147483648");
- assert(long.max.signedToTempString(buf, 10) == "9223372036854775807");
- assert(long.min.signedToTempString(buf, 10) == "-9223372036854775808");
+ assert(0.signedToTempString(buf) == "0");
+ assert(1.signedToTempString(buf) == "1");
+ assert((-1).signedToTempString(buf) == "-1");
+ assert(12.signedToTempString(buf) == "12");
+ assert((-12).signedToTempString(buf) == "-12");
+ assert(0x12ABCF .signedToTempString!16(buf) == "12abcf");
+ assert((-0x12ABCF) .signedToTempString!16(buf) == "-12abcf");
+ assert(long.sizeof.signedToTempString(buf) == "8");
+ assert(int.max.signedToTempString(buf) == "2147483647");
+ assert(int.min.signedToTempString(buf) == "-2147483648");
+ assert(long.max.signedToTempString(buf) == "9223372036854775807");
+ assert(long.min.signedToTempString(buf) == "-9223372036854775808");
// use stack allocated struct version
- assert(0.signedToTempString(10) == "0");
- assert(1.signedToTempString(10) == "1");
- assert((-1).signedToTempString(10) == "-1");
- assert(12.signedToTempString(10) == "12");
- assert((-12).signedToTempString(10) == "-12");
- assert(0x12ABCF .signedToTempString(16) == "12abcf");
- assert((-0x12ABCF) .signedToTempString(16) == "-12abcf");
- assert(long.sizeof.signedToTempString(10) == "8");
- assert(int.max.signedToTempString(10) == "2147483647");
- assert(int.min.signedToTempString(10) == "-2147483648");
- assert(long.max.signedToTempString(10) == "9223372036854775807");
- assert(long.min.signedToTempString(10) == "-9223372036854775808");
- assert(long.max.signedToTempString(2) == "111111111111111111111111111111111111111111111111111111111111111");
- assert(long.min.signedToTempString(2) == "-1000000000000000000000000000000000000000000000000000000000000000");
+ assert(0.signedToTempString() == "0");
+ assert(1.signedToTempString == "1");
+ assert((-1).signedToTempString == "-1");
+ assert(12.signedToTempString == "12");
+ assert((-12).signedToTempString == "-12");
+ assert(0x12ABCF .signedToTempString!16 == "12abcf");
+ assert((-0x12ABCF) .signedToTempString!16 == "-12abcf");
+ assert(long.sizeof.signedToTempString == "8");
+ assert(int.max.signedToTempString == "2147483647");
+ assert(int.min.signedToTempString == "-2147483648");
+ assert(long.max.signedToTempString == "9223372036854775807");
+ assert(long.min.signedToTempString == "-9223372036854775808");
+ assert(long.max.signedToTempString!2 == "111111111111111111111111111111111111111111111111111111111111111");
+ assert(long.min.signedToTempString!2 == "-1000000000000000000000000000000000000000000000000000000000000000");
}
@@ -142,7 +183,7 @@ unittest
* Returns:
* number of digits
*/
-int numDigits(uint radix = 10)(ulong value) @safe
+int numDigits(uint radix = 10)(ulong value) @safe if (radix >= 2 && radix <= 36)
{
int n = 1;
while (1)
@@ -188,9 +229,14 @@ unittest
assert(1.numDigits!2 == 1);
assert(2.numDigits!2 == 2);
assert(3.numDigits!2 == 2);
+
+ // test bad radices
+ static assert(!__traits(compiles, 100.numDigits!1()));
+ static assert(!__traits(compiles, 100.numDigits!0()));
+ static assert(!__traits(compiles, 100.numDigits!37()));
}
-int dstrcmp( scope const char[] s1, scope const char[] s2 ) @trusted
+int dstrcmp()( scope const char[] s1, scope const char[] s2 ) @trusted
{
immutable len = s1.length <= s2.length ? s1.length : s2.length;
if (__ctfe)
@@ -209,5 +255,5 @@ int dstrcmp( scope const char[] s1, scope const char[] s2 ) @trusted
if ( ret )
return ret;
}
- return s1.length < s2.length ? -1 : (s1.length > s2.length);
+ return (s1.length > s2.length) - (s1.length < s2.length);
}
diff --git a/libphobos/libdruntime/core/internal/switch_.d b/libphobos/libdruntime/core/internal/switch_.d
new file mode 100644
index 0000000..c052c58
--- /dev/null
+++ b/libphobos/libdruntime/core/internal/switch_.d
@@ -0,0 +1,190 @@
+/**
+This module contains compiler support for switch...case statements
+
+Copyright: Copyright Digital Mars 2000 - 2019.
+License: Distributed under the
+ $(LINK2 http://www.boost.org/LICENSE_1_0.txt, Boost Software License 1.0).
+ (See accompanying file LICENSE)
+Source: $(DRUNTIMESRC core/internal/_switch_.d)
+*/
+module core.internal.switch_;
+
+/**
+Support for switch statements switching on strings.
+Params:
+ caseLabels = sorted array of strings generated by compiler. Note the
+ strings are sorted by length first, and then lexicographically.
+ condition = string to look up in table
+Returns:
+ index of match in caseLabels, a negative integer if not found
+*/
+int __switch(T, caseLabels...)(/*in*/ const scope T[] condition) pure nothrow @safe @nogc
+{
+ // This closes recursion for other cases.
+ static if (caseLabels.length == 0)
+ {
+ return int.min;
+ }
+ else static if (caseLabels.length == 1)
+ {
+ return __cmp(condition, caseLabels[0]) == 0 ? 0 : int.min;
+ }
+ // To be adjusted after measurements
+ // Compile-time inlined binary search.
+ else static if (caseLabels.length < 7)
+ {
+ int r = void;
+ enum mid = cast(int)caseLabels.length / 2;
+ if (condition.length == caseLabels[mid].length)
+ {
+ r = __cmp(condition, caseLabels[mid]);
+ if (r == 0) return mid;
+ }
+ else
+ {
+ // Equivalent to (but faster than) condition.length > caseLabels[$ / 2].length ? 1 : -1
+ r = ((condition.length > caseLabels[mid].length) << 1) - 1;
+ }
+
+ if (r < 0)
+ {
+ // Search the left side
+ return __switch!(T, caseLabels[0 .. mid])(condition);
+ }
+ else
+ {
+ // Search the right side
+ return __switch!(T, caseLabels[mid + 1 .. $])(condition) + mid + 1;
+ }
+ }
+ else
+ {
+ // Need immutable array to be accessible in pure code, but case labels are
+ // currently coerced to the switch condition type (e.g. const(char)[]).
+ pure @trusted nothrow @nogc asImmutable(scope const(T[])[] items)
+ {
+ assert(__ctfe); // only @safe for CTFE
+ immutable T[][caseLabels.length] result = cast(immutable)(items[]);
+ return result;
+ }
+ static immutable T[][caseLabels.length] cases = asImmutable([caseLabels]);
+
+ // Run-time binary search in a static array of labels.
+ return __switchSearch!T(cases[], condition);
+ }
+}
+
+// binary search in sorted string cases, also see `__switch`.
+private int __switchSearch(T)(/*in*/ const scope T[][] cases, /*in*/ const scope T[] condition) pure nothrow @safe @nogc
+{
+ size_t low = 0;
+ size_t high = cases.length;
+
+ do
+ {
+ auto mid = (low + high) / 2;
+ int r = void;
+ if (condition.length == cases[mid].length)
+ {
+ r = __cmp(condition, cases[mid]);
+ if (r == 0) return cast(int) mid;
+ }
+ else
+ {
+ // Generates better code than "expr ? 1 : -1" on dmd and gdc, same with ldc
+ r = ((condition.length > cases[mid].length) << 1) - 1;
+ }
+
+ if (r > 0) low = mid + 1;
+ else high = mid;
+ }
+ while (low < high);
+
+ // Not found
+ return -1;
+}
+
+@system unittest
+{
+ static void testSwitch(T)()
+ {
+ switch (cast(T[]) "c")
+ {
+ case "coo":
+ default:
+ break;
+ }
+
+ static int bug5381(immutable(T)[] s)
+ {
+ switch (s)
+ {
+ case "unittest": return 1;
+ case "D_Version2": return 2;
+ case "nonenone": return 3;
+ case "none": return 4;
+ case "all": return 5;
+ default: return 6;
+ }
+ }
+
+ int rc = bug5381("unittest");
+ assert(rc == 1);
+
+ rc = bug5381("D_Version2");
+ assert(rc == 2);
+
+ rc = bug5381("nonenone");
+ assert(rc == 3);
+
+ rc = bug5381("none");
+ assert(rc == 4);
+
+ rc = bug5381("all");
+ assert(rc == 5);
+
+ rc = bug5381("nonerandom");
+ assert(rc == 6);
+
+ static int binarySearch(immutable(T)[] s)
+ {
+ switch (s)
+ {
+ static foreach (i; 0 .. 16)
+ case i.stringof: return i;
+ default: return -1;
+ }
+ }
+ static foreach (i; 0 .. 16)
+ assert(binarySearch(i.stringof) == i);
+ assert(binarySearch("") == -1);
+ assert(binarySearch("sth.") == -1);
+ assert(binarySearch(null) == -1);
+
+ static int bug16739(immutable(T)[] s)
+ {
+ switch (s)
+ {
+ case "\u0100": return 1;
+ case "a": return 2;
+ default: return 3;
+ }
+ }
+ assert(bug16739("\u0100") == 1);
+ assert(bug16739("a") == 2);
+ assert(bug16739("foo") == 3);
+ }
+ testSwitch!char;
+ testSwitch!wchar;
+ testSwitch!dchar;
+}
+
+/**
+Compiler lowers final switch default case to this (which is a runtime error)
+Old implementation is in core/exception.d
+*/
+void __switch_error()(string file = __FILE__, size_t line = __LINE__)
+{
+ import core.exception : __switch_errorT;
+ __switch_errorT(file, line);
+}
diff --git a/libphobos/libdruntime/core/internal/traits.d b/libphobos/libdruntime/core/internal/traits.d
index 9f79dd0..1856eb8 100644
--- a/libphobos/libdruntime/core/internal/traits.d
+++ b/libphobos/libdruntime/core/internal/traits.d
@@ -2,7 +2,7 @@
* Contains traits for runtime internal usage.
*
* Copyright: Copyright Digital Mars 2014 -.
- * License: $(WEB www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
+ * License: $(HTTP www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
* Authors: Martin Nowak
* Source: $(DRUNTIMESRC core/internal/_traits.d)
*/
@@ -25,38 +25,59 @@ T trustedCast(T, U)(auto ref U u) @trusted pure nothrow
return cast(T)u;
}
-template Unconst(T)
+alias Unconst(T : const U, U) = U;
+
+/// taken from std.traits.Unqual
+template Unqual(T : const U, U)
{
- static if (is(T U == immutable U)) alias Unconst = U;
- else static if (is(T U == inout const U)) alias Unconst = U;
- else static if (is(T U == inout U)) alias Unconst = U;
- else static if (is(T U == const U)) alias Unconst = U;
- else alias Unconst = T;
+ static if (is(U == shared V, V))
+ alias Unqual = V;
+ else
+ alias Unqual = U;
}
-/// taken from std.traits.Unqual
-template Unqual(T)
+template BaseElemOf(T)
{
- version (none) // Error: recursive alias declaration @@@BUG1308@@@
- {
- static if (is(T U == const U)) alias Unqual = Unqual!U;
- else static if (is(T U == immutable U)) alias Unqual = Unqual!U;
- else static if (is(T U == inout U)) alias Unqual = Unqual!U;
- else static if (is(T U == shared U)) alias Unqual = Unqual!U;
- else alias Unqual = T;
- }
- else // workaround
- {
- static if (is(T U == immutable U)) alias Unqual = U;
- else static if (is(T U == shared inout const U)) alias Unqual = U;
- else static if (is(T U == shared inout U)) alias Unqual = U;
- else static if (is(T U == shared const U)) alias Unqual = U;
- else static if (is(T U == shared U)) alias Unqual = U;
- else static if (is(T U == inout const U)) alias Unqual = U;
- else static if (is(T U == inout U)) alias Unqual = U;
- else static if (is(T U == const U)) alias Unqual = U;
- else alias Unqual = T;
- }
+ static if (is(T == E[N], E, size_t N))
+ alias BaseElemOf = BaseElemOf!E;
+ else
+ alias BaseElemOf = T;
+}
+
+unittest
+{
+ static assert(is(BaseElemOf!(int) == int));
+ static assert(is(BaseElemOf!(int[1]) == int));
+ static assert(is(BaseElemOf!(int[1][2]) == int));
+ static assert(is(BaseElemOf!(int[1][]) == int[1][]));
+ static assert(is(BaseElemOf!(int[][1]) == int[]));
+}
+
+// [For internal use]
+template ModifyTypePreservingTQ(alias Modifier, T)
+{
+ static if (is(T U == immutable U)) alias ModifyTypePreservingTQ = immutable Modifier!U;
+ else static if (is(T U == shared inout const U)) alias ModifyTypePreservingTQ = shared inout const Modifier!U;
+ else static if (is(T U == shared inout U)) alias ModifyTypePreservingTQ = shared inout Modifier!U;
+ else static if (is(T U == shared const U)) alias ModifyTypePreservingTQ = shared const Modifier!U;
+ else static if (is(T U == shared U)) alias ModifyTypePreservingTQ = shared Modifier!U;
+ else static if (is(T U == inout const U)) alias ModifyTypePreservingTQ = inout const Modifier!U;
+ else static if (is(T U == inout U)) alias ModifyTypePreservingTQ = inout Modifier!U;
+ else static if (is(T U == const U)) alias ModifyTypePreservingTQ = const Modifier!U;
+ else alias ModifyTypePreservingTQ = Modifier!T;
+}
+@safe unittest
+{
+ alias Intify(T) = int;
+ static assert(is(ModifyTypePreservingTQ!(Intify, real) == int));
+ static assert(is(ModifyTypePreservingTQ!(Intify, const real) == const int));
+ static assert(is(ModifyTypePreservingTQ!(Intify, inout real) == inout int));
+ static assert(is(ModifyTypePreservingTQ!(Intify, inout const real) == inout const int));
+ static assert(is(ModifyTypePreservingTQ!(Intify, shared real) == shared int));
+ static assert(is(ModifyTypePreservingTQ!(Intify, shared const real) == shared const int));
+ static assert(is(ModifyTypePreservingTQ!(Intify, shared inout real) == shared inout int));
+ static assert(is(ModifyTypePreservingTQ!(Intify, shared inout const real) == shared inout const int));
+ static assert(is(ModifyTypePreservingTQ!(Intify, immutable real) == immutable int));
}
// Substitute all `inout` qualifiers that appears in T to `const`
@@ -129,116 +150,190 @@ template staticIota(int beg, int end)
}
}
+private struct __InoutWorkaroundStruct {}
+@property T rvalueOf(T)(T val) { return val; }
+@property T rvalueOf(T)(inout __InoutWorkaroundStruct = __InoutWorkaroundStruct.init);
+@property ref T lvalueOf(T)(inout __InoutWorkaroundStruct = __InoutWorkaroundStruct.init);
+
+// taken from std.traits.isAssignable
+template isAssignable(Lhs, Rhs = Lhs)
+{
+ enum isAssignable = __traits(compiles, lvalueOf!Lhs = rvalueOf!Rhs) && __traits(compiles, lvalueOf!Lhs = lvalueOf!Rhs);
+}
+
+// taken from std.traits.isInnerClass
+template isInnerClass(T) if (is(T == class))
+{
+ static if (is(typeof(T.outer)))
+ {
+ template hasOuterMember(T...)
+ {
+ static if (T.length == 0)
+ enum hasOuterMember = false;
+ else
+ enum hasOuterMember = T[0] == "outer" || hasOuterMember!(T[1 .. $]);
+ }
+ enum isInnerClass = __traits(isSame, typeof(T.outer), __traits(parent, T)) && !hasOuterMember!(__traits(allMembers, T));
+ }
+ else
+ enum isInnerClass = false;
+}
+
template dtorIsNothrow(T)
{
enum dtorIsNothrow = is(typeof(function{T t=void;}) : void function() nothrow);
}
-/*
-Tests whether all given items satisfy a template predicate, i.e. evaluates to
-$(D F!(T[0]) && F!(T[1]) && ... && F!(T[$ - 1])).
-*/
-package(core.internal)
-template allSatisfy(alias F, T...)
+// taken from std.meta.allSatisfy
+enum allSatisfy(alias pred, items...) =
{
- static if (T.length == 0)
+ static foreach (item; items)
+ static if (!pred!item)
+ if (__ctfe) return false;
+ return true;
+}();
+
+// taken from std.meta.anySatisfy
+enum anySatisfy(alias pred, items...) =
+{
+ static foreach (item; items)
+ static if (pred!item)
+ if (__ctfe) return true;
+ return false;
+}();
+
+// simplified from std.traits.maxAlignment
+template maxAlignment(Ts...)
+if (Ts.length > 0)
+{
+ enum maxAlignment =
{
- enum allSatisfy = true;
+ size_t result = 0;
+ static foreach (T; Ts)
+ if (T.alignof > result) result = T.alignof;
+ return result;
+ }();
+}
+
+template classInstanceAlignment(T)
+if (is(T == class))
+{
+ alias classInstanceAlignment = maxAlignment!(void*, typeof(T.tupleof));
+}
+
+/// See $(REF hasElaborateMove, std,traits)
+template hasElaborateMove(S)
+{
+ static if (__traits(isStaticArray, S))
+ {
+ enum bool hasElaborateMove = S.sizeof && hasElaborateMove!(BaseElemOf!S);
}
- else static if (T.length == 1)
+ else static if (is(S == struct))
{
- enum allSatisfy = F!(T[0]);
+ enum hasElaborateMove = (is(typeof(S.init.opPostMove(lvalueOf!S))) &&
+ !is(typeof(S.init.opPostMove(rvalueOf!S)))) ||
+ anySatisfy!(.hasElaborateMove, Fields!S);
}
else
{
- static if (allSatisfy!(F, T[0 .. $/2]))
- enum allSatisfy = allSatisfy!(F, T[$/2 .. $]);
- else
- enum allSatisfy = false;
+ enum bool hasElaborateMove = false;
}
}
-template anySatisfy(alias F, T...)
+// std.traits.hasElaborateDestructor
+template hasElaborateDestructor(S)
{
- static if (T.length == 0)
+ static if (__traits(isStaticArray, S))
{
- enum anySatisfy = false;
+ enum bool hasElaborateDestructor = S.sizeof && hasElaborateDestructor!(BaseElemOf!S);
}
- else static if (T.length == 1)
+ else static if (is(S == struct))
{
- enum anySatisfy = F!(T[0]);
+ enum hasElaborateDestructor = __traits(hasMember, S, "__dtor")
+ || anySatisfy!(.hasElaborateDestructor, Fields!S);
}
else
{
- enum anySatisfy =
- anySatisfy!(F, T[ 0 .. $/2]) ||
- anySatisfy!(F, T[$/2 .. $ ]);
+ enum bool hasElaborateDestructor = false;
}
}
-// simplified from std.traits.maxAlignment
-template maxAlignment(U...)
+// std.traits.hasElaborateCopyDestructor
+template hasElaborateCopyConstructor(S)
{
- static if (U.length == 0)
- static assert(0);
- else static if (U.length == 1)
- enum maxAlignment = U[0].alignof;
- else static if (U.length == 2)
- enum maxAlignment = U[0].alignof > U[1].alignof ? U[0].alignof : U[1].alignof;
+ static if (__traits(isStaticArray, S))
+ {
+ enum bool hasElaborateCopyConstructor = S.sizeof && hasElaborateCopyConstructor!(BaseElemOf!S);
+ }
+ else static if (is(S == struct))
+ {
+ enum hasElaborateCopyConstructor = __traits(hasCopyConstructor, S) || __traits(hasPostblit, S);
+ }
else
{
- enum a = maxAlignment!(U[0 .. ($+1)/2]);
- enum b = maxAlignment!(U[($+1)/2 .. $]);
- enum maxAlignment = a > b ? a : b;
+ enum bool hasElaborateCopyConstructor = false;
}
}
-template classInstanceAlignment(T)
-if (is(T == class))
+@safe unittest
{
- alias classInstanceAlignment = maxAlignment!(void*, typeof(T.tupleof));
-}
+ static struct S
+ {
+ int x;
+ this(return scope ref typeof(this) rhs) { }
+ this(int x, int y) {}
+ }
-// Somehow fails for non-static nested structs without support for aliases
-template hasElaborateDestructor(T...)
-{
- static if (is(T[0]))
- alias S = T[0];
- else
- alias S = typeof(T[0]);
+ static assert(hasElaborateCopyConstructor!S);
+ static assert(!hasElaborateCopyConstructor!(S[0][1]));
- static if (is(S : E[n], E, size_t n) && S.length)
+ static struct S2
{
- enum bool hasElaborateDestructor = hasElaborateDestructor!E;
+ int x;
+ this(int x, int y) {}
}
- else static if (is(S == struct))
+
+ static assert(!hasElaborateCopyConstructor!S2);
+
+ static struct S3
{
- enum hasElaborateDestructor = __traits(hasMember, S, "__dtor")
- || anySatisfy!(.hasElaborateDestructor, S.tupleof);
+ int x;
+ this(return scope ref typeof(this) rhs, int x = 42) { }
+ this(int x, int y) {}
}
- else
- enum bool hasElaborateDestructor = false;
+
+ static assert(hasElaborateCopyConstructor!S3);
}
-// Somehow fails for non-static nested structs without support for aliases
-template hasElaborateCopyConstructor(T...)
+template hasElaborateAssign(S)
{
- static if (is(T[0]))
- alias S = T[0];
- else
- alias S = typeof(T[0]);
-
- static if (is(S : E[n], E, size_t n) && S.length)
+ static if (__traits(isStaticArray, S))
{
- enum bool hasElaborateCopyConstructor = hasElaborateCopyConstructor!E;
+ enum bool hasElaborateAssign = S.sizeof && hasElaborateAssign!(BaseElemOf!S);
}
else static if (is(S == struct))
{
- enum hasElaborateCopyConstructor = __traits(hasMember, S, "__postblit")
- || anySatisfy!(.hasElaborateCopyConstructor, S.tupleof);
+ enum hasElaborateAssign = is(typeof(S.init.opAssign(rvalueOf!S))) ||
+ is(typeof(S.init.opAssign(lvalueOf!S))) ||
+ anySatisfy!(.hasElaborateAssign, Fields!S);
}
else
- enum bool hasElaborateCopyConstructor = false;
+ {
+ enum bool hasElaborateAssign = false;
+ }
+}
+
+template hasIndirections(T)
+{
+ static if (is(T == struct) || is(T == union))
+ enum hasIndirections = anySatisfy!(.hasIndirections, Fields!T);
+ else static if (is(T == E[N], E, size_t N))
+ enum hasIndirections = T.sizeof && is(E == void) ? true : hasIndirections!(BaseElemOf!E);
+ else static if (isFunctionPointer!T)
+ enum hasIndirections = false;
+ else
+ enum hasIndirections = isPointer!T || isDelegate!T || isDynamicArray!T ||
+ __traits(isAssociativeArray, T) || is (T == class) || is(T == interface);
}
template hasUnsharedIndirections(T)
@@ -389,3 +484,333 @@ template Filter(alias pred, TList...)
Filter!(pred, TList[$/2 .. $ ]));
}
}
+
+// std.meta.staticMap
+template staticMap(alias F, T...)
+{
+ static if (T.length == 0)
+ {
+ alias staticMap = AliasSeq!();
+ }
+ else static if (T.length == 1)
+ {
+ alias staticMap = AliasSeq!(F!(T[0]));
+ }
+ /* Cases 2 to 8 improve compile performance by reducing
+ * the number of recursive instantiations of staticMap
+ */
+ else static if (T.length == 2)
+ {
+ alias staticMap = AliasSeq!(F!(T[0]), F!(T[1]));
+ }
+ else static if (T.length == 3)
+ {
+ alias staticMap = AliasSeq!(F!(T[0]), F!(T[1]), F!(T[2]));
+ }
+ else static if (T.length == 4)
+ {
+ alias staticMap = AliasSeq!(F!(T[0]), F!(T[1]), F!(T[2]), F!(T[3]));
+ }
+ else static if (T.length == 5)
+ {
+ alias staticMap = AliasSeq!(F!(T[0]), F!(T[1]), F!(T[2]), F!(T[3]), F!(T[4]));
+ }
+ else static if (T.length == 6)
+ {
+ alias staticMap = AliasSeq!(F!(T[0]), F!(T[1]), F!(T[2]), F!(T[3]), F!(T[4]), F!(T[5]));
+ }
+ else static if (T.length == 7)
+ {
+ alias staticMap = AliasSeq!(F!(T[0]), F!(T[1]), F!(T[2]), F!(T[3]), F!(T[4]), F!(T[5]), F!(T[6]));
+ }
+ else static if (T.length == 8)
+ {
+ alias staticMap = AliasSeq!(F!(T[0]), F!(T[1]), F!(T[2]), F!(T[3]), F!(T[4]), F!(T[5]), F!(T[6]), F!(T[7]));
+ }
+ else
+ {
+ alias staticMap =
+ AliasSeq!(
+ staticMap!(F, T[ 0 .. $/2]),
+ staticMap!(F, T[$/2 .. $ ]));
+ }
+}
+
+// std.exception.assertCTFEable
+version (CoreUnittest) package(core)
+void assertCTFEable(alias dg)()
+{
+ static assert({ cast(void) dg(); return true; }());
+ cast(void) dg();
+}
+
+// std.traits.FunctionTypeOf
+/*
+Get the function type from a callable object `func`.
+
+Using builtin `typeof` on a property function yields the types of the
+property value, not of the property function itself. Still,
+`FunctionTypeOf` is able to obtain function types of properties.
+
+Note:
+Do not confuse function types with function pointer types; function types are
+usually used for compile-time reflection purposes.
+ */
+template FunctionTypeOf(func...)
+if (func.length == 1 /*&& isCallable!func*/)
+{
+ static if (is(typeof(& func[0]) Fsym : Fsym*) && is(Fsym == function) || is(typeof(& func[0]) Fsym == delegate))
+ {
+ alias FunctionTypeOf = Fsym; // HIT: (nested) function symbol
+ }
+ else static if (is(typeof(& func[0].opCall) Fobj == delegate))
+ {
+ alias FunctionTypeOf = Fobj; // HIT: callable object
+ }
+ else static if (is(typeof(& func[0].opCall) Ftyp : Ftyp*) && is(Ftyp == function))
+ {
+ alias FunctionTypeOf = Ftyp; // HIT: callable type
+ }
+ else static if (is(func[0] T) || is(typeof(func[0]) T))
+ {
+ static if (is(T == function))
+ alias FunctionTypeOf = T; // HIT: function
+ else static if (is(T Fptr : Fptr*) && is(Fptr == function))
+ alias FunctionTypeOf = Fptr; // HIT: function pointer
+ else static if (is(T Fdlg == delegate))
+ alias FunctionTypeOf = Fdlg; // HIT: delegate
+ else
+ static assert(0);
+ }
+ else
+ static assert(0);
+}
+
+@safe unittest
+{
+ class C
+ {
+ int value() @property { return 0; }
+ }
+ static assert(is( typeof(C.value) == int ));
+ static assert(is( FunctionTypeOf!(C.value) == function ));
+}
+
+@system unittest
+{
+ int test(int a);
+ int propGet() @property;
+ int propSet(int a) @property;
+ int function(int) test_fp;
+ int delegate(int) test_dg;
+ static assert(is( typeof(test) == FunctionTypeOf!(typeof(test)) ));
+ static assert(is( typeof(test) == FunctionTypeOf!test ));
+ static assert(is( typeof(test) == FunctionTypeOf!test_fp ));
+ static assert(is( typeof(test) == FunctionTypeOf!test_dg ));
+ alias int GetterType() @property;
+ alias int SetterType(int) @property;
+ static assert(is( FunctionTypeOf!propGet == GetterType ));
+ static assert(is( FunctionTypeOf!propSet == SetterType ));
+
+ interface Prop { int prop() @property; }
+ Prop prop;
+ static assert(is( FunctionTypeOf!(Prop.prop) == GetterType ));
+ static assert(is( FunctionTypeOf!(prop.prop) == GetterType ));
+
+ class Callable { int opCall(int) { return 0; } }
+ auto call = new Callable;
+ static assert(is( FunctionTypeOf!call == typeof(test) ));
+
+ struct StaticCallable { static int opCall(int) { return 0; } }
+ StaticCallable stcall_val;
+ StaticCallable* stcall_ptr;
+ static assert(is( FunctionTypeOf!stcall_val == typeof(test) ));
+ static assert(is( FunctionTypeOf!stcall_ptr == typeof(test) ));
+
+ interface Overloads
+ {
+ void test(string);
+ real test(real);
+ int test(int);
+ int test() @property;
+ }
+ alias ov = __traits(getVirtualFunctions, Overloads, "test");
+ alias F_ov0 = FunctionTypeOf!(ov[0]);
+ alias F_ov1 = FunctionTypeOf!(ov[1]);
+ alias F_ov2 = FunctionTypeOf!(ov[2]);
+ alias F_ov3 = FunctionTypeOf!(ov[3]);
+ static assert(is(F_ov0* == void function(string)));
+ static assert(is(F_ov1* == real function(real)));
+ static assert(is(F_ov2* == int function(int)));
+ static assert(is(F_ov3* == int function() @property));
+
+ alias F_dglit = FunctionTypeOf!((int a){ return a; });
+ static assert(is(F_dglit* : int function(int)));
+}
+
+// std.traits.ReturnType
+/*
+Get the type of the return value from a function,
+a pointer to function, a delegate, a struct
+with an opCall, a pointer to a struct with an opCall,
+or a class with an `opCall`. Please note that $(D_KEYWORD ref)
+is not part of a type, but the attribute of the function
+(see template $(LREF functionAttributes)).
+*/
+template ReturnType(func...)
+if (func.length == 1 /*&& isCallable!func*/)
+{
+ static if (is(FunctionTypeOf!func R == return))
+ alias ReturnType = R;
+ else
+ static assert(0, "argument has no return type");
+}
+
+//
+@safe unittest
+{
+ int foo();
+ ReturnType!foo x; // x is declared as int
+}
+
+@safe unittest
+{
+ struct G
+ {
+ int opCall (int i) { return 1;}
+ }
+
+ alias ShouldBeInt = ReturnType!G;
+ static assert(is(ShouldBeInt == int));
+
+ G g;
+ static assert(is(ReturnType!g == int));
+
+ G* p;
+ alias pg = ReturnType!p;
+ static assert(is(pg == int));
+
+ class C
+ {
+ int opCall (int i) { return 1;}
+ }
+
+ static assert(is(ReturnType!C == int));
+
+ C c;
+ static assert(is(ReturnType!c == int));
+
+ class Test
+ {
+ int prop() @property { return 0; }
+ }
+ alias R_Test_prop = ReturnType!(Test.prop);
+ static assert(is(R_Test_prop == int));
+
+ alias R_dglit = ReturnType!((int a) { return a; });
+ static assert(is(R_dglit == int));
+}
+
+// std.traits.Parameters
+/*
+Get, as a tuple, the types of the parameters to a function, a pointer
+to function, a delegate, a struct with an `opCall`, a pointer to a
+struct with an `opCall`, or a class with an `opCall`.
+*/
+template Parameters(func...)
+if (func.length == 1 /*&& isCallable!func*/)
+{
+ static if (is(FunctionTypeOf!func P == function))
+ alias Parameters = P;
+ else
+ static assert(0, "argument has no parameters");
+}
+
+//
+@safe unittest
+{
+ int foo(int, long);
+ void bar(Parameters!foo); // declares void bar(int, long);
+ void abc(Parameters!foo[1]); // declares void abc(long);
+}
+
+@safe unittest
+{
+ int foo(int i, bool b) { return 0; }
+ static assert(is(Parameters!foo == AliasSeq!(int, bool)));
+ static assert(is(Parameters!(typeof(&foo)) == AliasSeq!(int, bool)));
+
+ struct S { real opCall(real r, int i) { return 0.0; } }
+ S s;
+ static assert(is(Parameters!S == AliasSeq!(real, int)));
+ static assert(is(Parameters!(S*) == AliasSeq!(real, int)));
+ static assert(is(Parameters!s == AliasSeq!(real, int)));
+
+ class Test
+ {
+ int prop() @property { return 0; }
+ }
+ alias P_Test_prop = Parameters!(Test.prop);
+ static assert(P_Test_prop.length == 0);
+
+ alias P_dglit = Parameters!((int a){});
+ static assert(P_dglit.length == 1);
+ static assert(is(P_dglit[0] == int));
+}
+
+// Return `true` if `Type` has `member` that evaluates to `true` in a static if condition
+enum isTrue(Type, string member) = __traits(compiles, { static if (__traits(getMember, Type, member)) {} else static assert(0); });
+
+unittest
+{
+ static struct T
+ {
+ enum a = true;
+ enum b = false;
+ enum c = 1;
+ enum d = 45;
+ enum e = "true";
+ enum f = "";
+ enum g = null;
+ alias h = bool;
+ }
+
+ static assert( isTrue!(T, "a"));
+ static assert(!isTrue!(T, "b"));
+ static assert( isTrue!(T, "c"));
+ static assert( isTrue!(T, "d"));
+ static assert( isTrue!(T, "e"));
+ static assert( isTrue!(T, "f"));
+ static assert(!isTrue!(T, "g"));
+ static assert(!isTrue!(T, "h"));
+}
+
+template hasUDA(alias symbol, alias attribute)
+{
+ alias attrs = __traits(getAttributes, symbol);
+
+ static foreach (a; attrs)
+ {
+ static if (is(a == attribute))
+ {
+ enum hasUDA = true;
+ }
+ }
+
+ static if (!__traits(compiles, (hasUDA == true)))
+ enum hasUDA = false;
+}
+
+unittest
+{
+ struct SomeUDA{}
+
+ struct Test
+ {
+ int woUDA;
+ @SomeUDA int withUDA;
+ }
+
+ static assert(hasUDA!(Test.withUDA, SomeUDA));
+ static assert(!hasUDA!(Test.woUDA, SomeUDA));
+}
diff --git a/libphobos/libdruntime/rt/util/utf.d b/libphobos/libdruntime/core/internal/utf.d
index 55869b3..ca0f7f5 100644
--- a/libphobos/libdruntime/rt/util/utf.d
+++ b/libphobos/libdruntime/core/internal/utf.d
@@ -14,12 +14,12 @@
* $(LINK http://anubis.dkuug.dk/JTC1/SC2/WG2/docs/n1335)
*
* Copyright: Copyright Digital Mars 2003 - 2016.
- * License: $(WEB www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
+ * License: $(HTTP www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
* Authors: Walter Bright, Sean Kelly
- * Source: $(DRUNTIMESRC src/rt/util/_utf.d)
+ * Source: $(DRUNTIMESRC core/internal/_utf.d)
*/
-module rt.util.utf;
+module core.internal.utf;
extern (C) void onUnicodeError( string msg, size_t idx, string file = __FILE__, size_t line = __LINE__ ) @safe pure;
@@ -84,7 +84,7 @@ static immutable UTF8stride =
* 0xFF meaning s[i] is not the start of of UTF-8 sequence.
*/
@safe @nogc pure nothrow
-uint stride(in char[] s, size_t i)
+uint stride(const scope char[] s, size_t i)
{
return UTF8stride[s[i]];
}
@@ -94,7 +94,7 @@ uint stride(in char[] s, size_t i)
* in string s.
*/
@safe @nogc pure nothrow
-uint stride(in wchar[] s, size_t i)
+uint stride(const scope wchar[] s, size_t i)
{ uint u = s[i];
return 1 + (u >= 0xD800 && u <= 0xDBFF);
}
@@ -105,7 +105,7 @@ uint stride(in wchar[] s, size_t i)
* Returns: The return value will always be 1.
*/
@safe @nogc pure nothrow
-uint stride(in dchar[] s, size_t i)
+uint stride(const scope dchar[] s, size_t i)
{
return 1;
}
@@ -116,7 +116,7 @@ uint stride(in dchar[] s, size_t i)
* determine the number of UCS characters up to that index i.
*/
@safe pure
-size_t toUCSindex(in char[] s, size_t i)
+size_t toUCSindex(const scope char[] s, size_t i)
{
size_t n;
size_t j;
@@ -135,7 +135,7 @@ size_t toUCSindex(in char[] s, size_t i)
/** ditto */
@safe pure
-size_t toUCSindex(in wchar[] s, size_t i)
+size_t toUCSindex(const scope wchar[] s, size_t i)
{
size_t n;
size_t j;
@@ -154,7 +154,7 @@ size_t toUCSindex(in wchar[] s, size_t i)
/** ditto */
@safe @nogc pure nothrow
-size_t toUCSindex(in dchar[] s, size_t i)
+size_t toUCSindex(const scope dchar[] s, size_t i)
{
return i;
}
@@ -163,7 +163,7 @@ size_t toUCSindex(in dchar[] s, size_t i)
* Given a UCS index n into an array of characters s[], return the UTF index.
*/
@safe pure
-size_t toUTFindex(in char[] s, size_t n)
+size_t toUTFindex(const scope char[] s, size_t n)
{
size_t i;
@@ -179,7 +179,7 @@ size_t toUTFindex(in char[] s, size_t n)
/** ditto */
@safe @nogc pure nothrow
-size_t toUTFindex(in wchar[] s, size_t n)
+size_t toUTFindex(const scope wchar[] s, size_t n)
{
size_t i;
@@ -193,7 +193,7 @@ size_t toUTFindex(in wchar[] s, size_t n)
/** ditto */
@safe @nogc pure nothrow
-size_t toUTFindex(in dchar[] s, size_t n)
+size_t toUTFindex(const scope dchar[] s, size_t n)
{
return n;
}
@@ -206,7 +206,7 @@ size_t toUTFindex(in dchar[] s, size_t n)
* thrown and idx remains unchanged.
*/
@safe pure
-dchar decode(in char[] s, ref size_t idx)
+dchar decode(const scope char[] s, ref size_t idx)
in
{
assert(idx >= 0 && idx < s.length);
@@ -215,7 +215,7 @@ dchar decode(in char[] s, ref size_t idx)
{
assert(isValidDchar(result));
}
- body
+ do
{
size_t len = s.length;
dchar V;
@@ -347,7 +347,7 @@ unittest
/** ditto */
@safe pure
-dchar decode(in wchar[] s, ref size_t idx)
+dchar decode(const scope wchar[] s, ref size_t idx)
in
{
assert(idx >= 0 && idx < s.length);
@@ -356,7 +356,7 @@ dchar decode(in wchar[] s, ref size_t idx)
{
assert(isValidDchar(result));
}
- body
+ do
{
string msg;
dchar V;
@@ -405,12 +405,12 @@ dchar decode(in wchar[] s, ref size_t idx)
/** ditto */
@safe pure
-dchar decode(in dchar[] s, ref size_t idx)
+dchar decode(const scope dchar[] s, ref size_t idx)
in
{
assert(idx >= 0 && idx < s.length);
}
- body
+ do
{
size_t i = idx;
dchar c = s[i];
@@ -437,7 +437,7 @@ void encode(ref char[] s, dchar c)
{
assert(isValidDchar(c));
}
- body
+ do
{
char[] r = s;
@@ -447,7 +447,7 @@ void encode(ref char[] s, dchar c)
}
else
{
- char[4] buf;
+ char[4] buf = void;
uint L;
if (c <= 0x7FF)
@@ -506,7 +506,7 @@ void encode(ref wchar[] s, dchar c)
{
assert(isValidDchar(c));
}
- body
+ do
{
wchar[] r = s;
@@ -516,7 +516,7 @@ void encode(ref wchar[] s, dchar c)
}
else
{
- wchar[2] buf;
+ wchar[2] buf = void;
buf[0] = cast(wchar) ((((c - 0x10000) >> 10) & 0x3FF) + 0xD800);
buf[1] = cast(wchar) (((c - 0x10000) & 0x3FF) + 0xDC00);
@@ -532,7 +532,7 @@ void encode(ref dchar[] s, dchar c)
{
assert(isValidDchar(c));
}
- body
+ do
{
s ~= c;
}
@@ -571,7 +571,7 @@ Checks to see if string is well formed or not. $(D S) can be an array
if it is not. Use to check all untrusted input for correctness.
*/
@safe pure
-void validate(S)(in S s)
+void validate(S)(const scope S s)
{
auto len = s.length;
for (size_t i = 0; i < len; )
@@ -583,12 +583,12 @@ void validate(S)(in S s)
/* =================== Conversion to UTF8 ======================= */
@safe pure nothrow @nogc
-char[] toUTF8(char[] buf, dchar c)
+char[] toUTF8(return char[] buf, dchar c)
in
{
assert(isValidDchar(c));
}
- body
+ do
{
if (c <= 0x7F)
{
@@ -623,19 +623,19 @@ char[] toUTF8(char[] buf, dchar c)
* Encodes string s into UTF-8 and returns the encoded string.
*/
@safe pure nothrow
-string toUTF8(string s)
+string toUTF8(return string s)
in
{
validate(s);
}
- body
+ do
{
return s;
}
/** ditto */
@trusted pure
-string toUTF8(in wchar[] s)
+string toUTF8(const scope wchar[] s)
{
char[] r;
size_t i;
@@ -663,7 +663,7 @@ string toUTF8(in wchar[] s)
/** ditto */
@trusted pure
-string toUTF8(in dchar[] s)
+string toUTF8(const scope dchar[] s)
{
char[] r;
size_t i;
@@ -692,12 +692,12 @@ string toUTF8(in dchar[] s)
/* =================== Conversion to UTF16 ======================= */
@safe pure nothrow @nogc
-wchar[] toUTF16(wchar[] buf, dchar c)
+wchar[] toUTF16(return wchar[] buf, dchar c)
in
{
assert(isValidDchar(c));
}
- body
+ do
{
if (c <= 0xFFFF)
{
@@ -718,13 +718,19 @@ wchar[] toUTF16(wchar[] buf, dchar c)
* an LPWSTR or LPCWSTR argument.
*/
@trusted pure
-wstring toUTF16(in char[] s)
+wstring toUTF16(const scope char[] s)
{
wchar[] r;
size_t slen = s.length;
- r.length = slen;
- r.length = 0;
+ if (!__ctfe)
+ {
+ // Reserve still does a lot if slen is zero.
+ // Return early for that case.
+ if (0 == slen)
+ return ""w;
+ r.reserve(slen);
+ }
for (size_t i = 0; i < slen; )
{
dchar c = s[i];
@@ -745,13 +751,19 @@ wstring toUTF16(in char[] s)
alias const(wchar)* wptr;
/** ditto */
@safe pure
-wptr toUTF16z(in char[] s)
+wptr toUTF16z(const scope char[] s)
{
wchar[] r;
size_t slen = s.length;
- r.length = slen + 1;
- r.length = 0;
+ if (!__ctfe)
+ {
+ // Reserve still does a lot if slen is zero.
+ // Return early for that case.
+ if (0 == slen)
+ return &"\0"w[0];
+ r.reserve(slen + 1);
+ }
for (size_t i = 0; i < slen; )
{
dchar c = s[i];
@@ -772,25 +784,31 @@ wptr toUTF16z(in char[] s)
/** ditto */
@safe pure nothrow
-wstring toUTF16(wstring s)
+wstring toUTF16(return wstring s)
in
{
validate(s);
}
- body
+ do
{
return s;
}
/** ditto */
@trusted pure nothrow
-wstring toUTF16(in dchar[] s)
+wstring toUTF16(const scope dchar[] s)
{
wchar[] r;
size_t slen = s.length;
- r.length = slen;
- r.length = 0;
+ if (!__ctfe)
+ {
+ // Reserve still does a lot if slen is zero.
+ // Return early for that case.
+ if (0 == slen)
+ return ""w;
+ r.reserve(slen);
+ }
for (size_t i = 0; i < slen; i++)
{
encode(r, s[i]);
@@ -804,7 +822,7 @@ wstring toUTF16(in dchar[] s)
* Encodes string s into UTF-32 and returns the encoded string.
*/
@trusted pure
-dstring toUTF32(in char[] s)
+dstring toUTF32(const scope char[] s)
{
dchar[] r;
size_t slen = s.length;
@@ -825,7 +843,7 @@ dstring toUTF32(in char[] s)
/** ditto */
@trusted pure
-dstring toUTF32(in wchar[] s)
+dstring toUTF32(const scope wchar[] s)
{
dchar[] r;
size_t slen = s.length;
@@ -846,12 +864,12 @@ dstring toUTF32(in wchar[] s)
/** ditto */
@safe pure nothrow
-dstring toUTF32(dstring s)
+dstring toUTF32(return dstring s)
in
{
validate(s);
}
- body
+ do
{
return s;
}
diff --git a/libphobos/libdruntime/rt/util/array.d b/libphobos/libdruntime/core/internal/util/array.d
index b2cfb8d..bc9b72c 100644
--- a/libphobos/libdruntime/rt/util/array.d
+++ b/libphobos/libdruntime/core/internal/util/array.d
@@ -1,12 +1,12 @@
/**
-Array utilities.
-
-Copyright: Denis Shelomovskij 2013
-License: $(HTTP boost.org/LICENSE_1_0.txt, Boost License 1.0).
-Authors: Denis Shelomovskij
-Source: $(DRUNTIMESRC src/rt/util/_array.d)
-*/
-module rt.util.array;
+ * Array utilities.
+ *
+ * Copyright: Denis Shelomovskij 2013
+ * License: $(HTTP boost.org/LICENSE_1_0.txt, Boost License 1.0).
+ * Authors: Denis Shelomovskij
+ * Source: $(DRUNTIMESRC core/internal/util/_array.d)
+ */
+module core.internal.util.array;
import core.internal.string;
@@ -16,15 +16,15 @@ import core.stdc.stdint;
@safe /* pure dmd @@@BUG11461@@@ */ nothrow:
void enforceTypedArraysConformable(T)(const char[] action,
- const T[] a1, const T[] a2, in bool allowOverlap = false)
+ const T[] a1, const T[] a2, const bool allowOverlap = false)
{
_enforceSameLength(action, a1.length, a2.length);
if (!allowOverlap)
_enforceNoOverlap(action, arrayToPtr(a1), arrayToPtr(a2), T.sizeof * a1.length);
}
-void enforceRawArraysConformable(const char[] action, in size_t elementSize,
- const void[] a1, const void[] a2, in bool allowOverlap = false)
+void enforceRawArraysConformable(const char[] action, const size_t elementSize,
+ const void[] a1, const void[] a2, const bool allowOverlap = false)
{
_enforceSameLength(action, a1.length, a2.length);
if (!allowOverlap)
@@ -32,7 +32,7 @@ void enforceRawArraysConformable(const char[] action, in size_t elementSize,
}
private void _enforceSameLength(const char[] action,
- in size_t length1, in size_t length2)
+ const size_t length1, const size_t length2)
{
if (length1 == length2)
return;
@@ -41,14 +41,14 @@ private void _enforceSameLength(const char[] action,
string msg = "Array lengths don't match for ";
msg ~= action;
msg ~= ": ";
- msg ~= length1.unsignedToTempString(tmpBuff, 10);
+ msg ~= length1.unsignedToTempString(tmpBuff);
msg ~= " != ";
- msg ~= length2.unsignedToTempString(tmpBuff, 10);
- throw new Error(msg);
+ msg ~= length2.unsignedToTempString(tmpBuff);
+ assert(0, msg);
}
private void _enforceNoOverlap(const char[] action,
- uintptr_t ptr1, uintptr_t ptr2, in size_t bytes)
+ uintptr_t ptr1, uintptr_t ptr2, const size_t bytes)
{
const d = ptr1 > ptr2 ? ptr1 - ptr2 : ptr2 - ptr1;
if (d >= bytes)
@@ -59,10 +59,10 @@ private void _enforceNoOverlap(const char[] action,
string msg = "Overlapping arrays in ";
msg ~= action;
msg ~= ": ";
- msg ~= overlappedBytes.unsignedToTempString(tmpBuff, 10);
+ msg ~= overlappedBytes.unsignedToTempString(tmpBuff);
msg ~= " byte(s) overlap of ";
- msg ~= bytes.unsignedToTempString(tmpBuff, 10);
- throw new Error(msg);
+ msg ~= bytes.unsignedToTempString(tmpBuff);
+ assert(0, msg);
}
private uintptr_t arrayToPtr(const void[] array) @trusted
diff --git a/libphobos/libdruntime/core/internal/util/math.d b/libphobos/libdruntime/core/internal/util/math.d
new file mode 100644
index 0000000..416e370
--- /dev/null
+++ b/libphobos/libdruntime/core/internal/util/math.d
@@ -0,0 +1,53 @@
+// Written in the D programming language
+
+/**
+ * Internal math utilities.
+ *
+ * Copyright: The D Language Foundation 2021.
+ * License: $(HTTP boost.org/LICENSE_1_0.txt, Boost License 1.0).
+ * Authors: Luís Ferreira
+ * Source: $(DRUNTIMESRC core/internal/util/_math.d)
+ */
+module core.internal.util.math;
+
+/**
+ * Calculates the maximum of the passed arguments
+ * Params:
+ * a = first value to select the maximum from
+ * b = second value to select the maximum from
+ * Returns: The maximum of the passed-in values.
+ */
+T max(T)(T a, T b) pure nothrow @nogc @safe
+{
+ return b > a ? b : a;
+}
+
+/**
+ * Calculates the minimum of the passed arguments
+ * Params:
+ * a = first value to select the minimum from
+ * b = second value to select the minimum from
+ * Returns: The minimum of the passed-in values.
+ */
+T min(T)(T a, T b) pure nothrow @nogc @safe
+{
+ return b < a ? b : a;
+}
+
+///
+@safe pure @nogc nothrow
+unittest
+{
+ assert(max(1,3) == 3);
+ assert(max(3,1) == 3);
+ assert(max(1,1) == 1);
+}
+
+///
+@safe pure @nogc nothrow
+unittest
+{
+ assert(min(1,3) == 1);
+ assert(min(3,1) == 1);
+ assert(min(1,1) == 1);
+}
diff --git a/libphobos/libdruntime/core/lifetime.d b/libphobos/libdruntime/core/lifetime.d
new file mode 100644
index 0000000..fc47b1d
--- /dev/null
+++ b/libphobos/libdruntime/core/lifetime.d
@@ -0,0 +1,2201 @@
+module core.lifetime;
+
+import core.internal.attributes : betterC;
+
+// emplace
+/**
+Given a pointer `chunk` to uninitialized memory (but already typed
+as `T`), constructs an object of non-`class` type `T` at that
+address. If `T` is a class, initializes the class reference to null.
+Returns: A pointer to the newly constructed object (which is the same
+as `chunk`).
+ */
+T* emplace(T)(T* chunk) @safe pure nothrow
+{
+ import core.internal.lifetime : emplaceRef;
+
+ emplaceRef!T(*chunk);
+ return chunk;
+}
+
+///
+@betterC
+@system unittest
+{
+ static struct S
+ {
+ int i = 42;
+ }
+ S[2] s2 = void;
+ emplace(&s2);
+ assert(s2[0].i == 42 && s2[1].i == 42);
+}
+
+///
+@system unittest
+{
+ interface I {}
+ class K : I {}
+
+ K k = void;
+ emplace(&k);
+ assert(k is null);
+
+ I i = void;
+ emplace(&i);
+ assert(i is null);
+}
+
+/**
+Given a pointer `chunk` to uninitialized memory (but already typed
+as a non-class type `T`), constructs an object of type `T` at
+that address from arguments `args`. If `T` is a class, initializes
+the class reference to `args[0]`.
+This function can be `@trusted` if the corresponding constructor of
+`T` is `@safe`.
+Returns: A pointer to the newly constructed object (which is the same
+as `chunk`).
+ */
+T* emplace(T, Args...)(T* chunk, auto ref Args args)
+ if (is(T == struct) || Args.length == 1)
+{
+ import core.internal.lifetime : emplaceRef;
+
+ emplaceRef!T(*chunk, forward!args);
+ return chunk;
+}
+
+///
+@betterC
+@system unittest
+{
+ int a;
+ int b = 42;
+ assert(*emplace!int(&a, b) == 42);
+}
+
+@betterC
+@system unittest
+{
+ shared int i;
+ emplace(&i, 42);
+ assert(i == 42);
+}
+
+/**
+Given a raw memory area `chunk` (but already typed as a class type `T`),
+constructs an object of `class` type `T` at that address. The constructor
+is passed the arguments `Args`.
+If `T` is an inner class whose `outer` field can be used to access an instance
+of the enclosing class, then `Args` must not be empty, and the first member of it
+must be a valid initializer for that `outer` field. Correct initialization of
+this field is essential to access members of the outer class inside `T` methods.
+Note:
+This function is `@safe` if the corresponding constructor of `T` is `@safe`.
+Returns: The newly constructed object.
+ */
+T emplace(T, Args...)(T chunk, auto ref Args args)
+ if (is(T == class))
+{
+ import core.internal.traits : isInnerClass;
+
+ static assert(!__traits(isAbstractClass, T), T.stringof ~
+ " is abstract and it can't be emplaced");
+
+ // Initialize the object in its pre-ctor state
+ enum classSize = __traits(classInstanceSize, T);
+ (() @trusted => (cast(void*) chunk)[0 .. classSize] = typeid(T).initializer[])();
+
+ static if (isInnerClass!T)
+ {
+ static assert(Args.length > 0,
+ "Initializing an inner class requires a pointer to the outer class");
+ static assert(is(Args[0] : typeof(T.outer)),
+ "The first argument must be a pointer to the outer class");
+
+ chunk.outer = args[0];
+ alias args1 = args[1..$];
+ }
+ else alias args1 = args;
+
+ // Call the ctor if any
+ static if (is(typeof(chunk.__ctor(forward!args1))))
+ {
+ // T defines a genuine constructor accepting args
+ // Go the classic route: write .init first, then call ctor
+ chunk.__ctor(forward!args1);
+ }
+ else
+ {
+ static assert(args1.length == 0 && !is(typeof(&T.__ctor)),
+ "Don't know how to initialize an object of type "
+ ~ T.stringof ~ " with arguments " ~ typeof(args1).stringof);
+ }
+ return chunk;
+}
+
+///
+@safe unittest
+{
+ () @safe {
+ class SafeClass
+ {
+ int x;
+ @safe this(int x) { this.x = x; }
+ }
+
+ auto buf = new void[__traits(classInstanceSize, SafeClass)];
+ auto support = (() @trusted => cast(SafeClass)(buf.ptr))();
+ auto safeClass = emplace!SafeClass(support, 5);
+ assert(safeClass.x == 5);
+
+ class UnsafeClass
+ {
+ int x;
+ @system this(int x) { this.x = x; }
+ }
+
+ auto buf2 = new void[__traits(classInstanceSize, UnsafeClass)];
+ auto support2 = (() @trusted => cast(UnsafeClass)(buf2.ptr))();
+ static assert(!__traits(compiles, emplace!UnsafeClass(support2, 5)));
+ static assert(!__traits(compiles, emplace!UnsafeClass(buf2, 5)));
+ }();
+}
+
+@safe unittest
+{
+ class Outer
+ {
+ int i = 3;
+ class Inner
+ {
+ @safe auto getI() { return i; }
+ }
+ }
+ auto outerBuf = new void[__traits(classInstanceSize, Outer)];
+ auto outerSupport = (() @trusted => cast(Outer)(outerBuf.ptr))();
+
+ auto innerBuf = new void[__traits(classInstanceSize, Outer.Inner)];
+ auto innerSupport = (() @trusted => cast(Outer.Inner)(innerBuf.ptr))();
+
+ auto inner = innerSupport.emplace!(Outer.Inner)(outerSupport.emplace!Outer);
+ assert(inner.getI == 3);
+}
+
+/**
+Given a raw memory area `chunk`, constructs an object of `class` type `T` at
+that address. The constructor is passed the arguments `Args`.
+If `T` is an inner class whose `outer` field can be used to access an instance
+of the enclosing class, then `Args` must not be empty, and the first member of it
+must be a valid initializer for that `outer` field. Correct initialization of
+this field is essential to access members of the outer class inside `T` methods.
+Preconditions:
+`chunk` must be at least as large as `T` needs and should have an alignment
+multiple of `T`'s alignment. (The size of a `class` instance is obtained by using
+$(D __traits(classInstanceSize, T))).
+Note:
+This function can be `@trusted` if the corresponding constructor of `T` is `@safe`.
+Returns: The newly constructed object.
+ */
+T emplace(T, Args...)(void[] chunk, auto ref Args args)
+ if (is(T == class))
+{
+ import core.internal.traits : maxAlignment;
+
+ enum classSize = __traits(classInstanceSize, T);
+ assert(chunk.length >= classSize, "chunk size too small.");
+
+ enum alignment = maxAlignment!(void*, typeof(T.tupleof));
+ assert((cast(size_t) chunk.ptr) % alignment == 0, "chunk is not aligned.");
+
+ return emplace!T(cast(T)(chunk.ptr), forward!args);
+}
+
+///
+@system unittest
+{
+ static class C
+ {
+ int i;
+ this(int i){this.i = i;}
+ }
+ auto buf = new void[__traits(classInstanceSize, C)];
+ auto c = emplace!C(buf, 5);
+ assert(c.i == 5);
+}
+
+@system unittest
+{
+ class Outer
+ {
+ int i = 3;
+ class Inner
+ {
+ auto getI() { return i; }
+ }
+ }
+ auto outerBuf = new void[__traits(classInstanceSize, Outer)];
+ auto innerBuf = new void[__traits(classInstanceSize, Outer.Inner)];
+ auto inner = innerBuf.emplace!(Outer.Inner)(outerBuf.emplace!Outer);
+ assert(inner.getI == 3);
+}
+
+@nogc pure nothrow @safe unittest
+{
+ static class __conv_EmplaceTestClass
+ {
+ @nogc @safe pure nothrow:
+ int i = 3;
+ this(int i)
+ {
+ assert(this.i == 3);
+ this.i = 10 + i;
+ }
+ this(ref int i)
+ {
+ assert(this.i == 3);
+ this.i = 20 + i;
+ }
+ this(int i, ref int j)
+ {
+ assert(this.i == 3 && i == 5 && j == 6);
+ this.i = i;
+ ++j;
+ }
+ }
+
+ int var = 6;
+ align(__conv_EmplaceTestClass.alignof) ubyte[__traits(classInstanceSize, __conv_EmplaceTestClass)] buf;
+ auto support = (() @trusted => cast(__conv_EmplaceTestClass)(buf.ptr))();
+
+ auto fromRval = emplace!__conv_EmplaceTestClass(support, 1);
+ assert(fromRval.i == 11);
+
+ auto fromLval = emplace!__conv_EmplaceTestClass(support, var);
+ assert(fromLval.i == 26);
+
+ auto k = emplace!__conv_EmplaceTestClass(support, 5, var);
+ assert(k.i == 5);
+ assert(var == 7);
+}
+
+/**
+Given a raw memory area `chunk`, constructs an object of non-$(D
+class) type `T` at that address. The constructor is passed the
+arguments `args`, if any.
+Preconditions:
+`chunk` must be at least as large
+as `T` needs and should have an alignment multiple of `T`'s
+alignment.
+Note:
+This function can be `@trusted` if the corresponding constructor of
+`T` is `@safe`.
+Returns: A pointer to the newly constructed object.
+ */
+T* emplace(T, Args...)(void[] chunk, auto ref Args args)
+ if (!is(T == class))
+{
+ import core.internal.traits : Unqual;
+ import core.internal.lifetime : emplaceRef;
+
+ assert(chunk.length >= T.sizeof, "chunk size too small.");
+ assert((cast(size_t) chunk.ptr) % T.alignof == 0, "emplace: Chunk is not aligned.");
+
+ emplaceRef!(T, Unqual!T)(*cast(Unqual!T*) chunk.ptr, forward!args);
+ return cast(T*) chunk.ptr;
+}
+
+///
+@betterC
+@system unittest
+{
+ struct S
+ {
+ int a, b;
+ }
+ void[S.sizeof] buf = void;
+ S s;
+ s.a = 42;
+ s.b = 43;
+ auto s1 = emplace!S(buf, s);
+ assert(s1.a == 42 && s1.b == 43);
+}
+
+// Bulk of emplace unittests starts here
+
+@betterC
+@system unittest /* unions */
+{
+ static union U
+ {
+ string a;
+ int b;
+ struct
+ {
+ long c;
+ int[] d;
+ }
+ }
+ U u1 = void;
+ U u2 = { "hello" };
+ emplace(&u1, u2);
+ assert(u1.a == "hello");
+}
+
+@system unittest // bugzilla 15772
+{
+ abstract class Foo {}
+ class Bar: Foo {}
+ void[] memory;
+ // test in emplaceInitializer
+ static assert(!is(typeof(emplace!Foo(cast(Foo*) memory.ptr))));
+ static assert( is(typeof(emplace!Bar(cast(Bar*) memory.ptr))));
+ // test in the emplace overload that takes void[]
+ static assert(!is(typeof(emplace!Foo(memory))));
+ static assert( is(typeof(emplace!Bar(memory))));
+}
+
+@betterC
+@system unittest
+{
+ struct S { @disable this(); }
+ S s = void;
+ static assert(!__traits(compiles, emplace(&s)));
+ emplace(&s, S.init);
+}
+
+@betterC
+@system unittest
+{
+ struct S1
+ {}
+
+ struct S2
+ {
+ void opAssign(S2);
+ }
+
+ S1 s1 = void;
+ S2 s2 = void;
+ S1[2] as1 = void;
+ S2[2] as2 = void;
+ emplace(&s1);
+ emplace(&s2);
+ emplace(&as1);
+ emplace(&as2);
+}
+
+@system unittest
+{
+ static struct S1
+ {
+ this(this) @disable;
+ }
+ static struct S2
+ {
+ this() @disable;
+ }
+ S1[2] ss1 = void;
+ S2[2] ss2 = void;
+ emplace(&ss1);
+ static assert(!__traits(compiles, emplace(&ss2)));
+ S1 s1 = S1.init;
+ S2 s2 = S2.init;
+ static assert(!__traits(compiles, emplace(&ss1, s1)));
+ emplace(&ss2, s2);
+}
+
+@system unittest
+{
+ struct S
+ {
+ immutable int i;
+ }
+ S s = void;
+ S[2] ss1 = void;
+ S[2] ss2 = void;
+ emplace(&s, 5);
+ assert(s.i == 5);
+ emplace(&ss1, s);
+ assert(ss1[0].i == 5 && ss1[1].i == 5);
+ emplace(&ss2, ss1);
+ assert(ss2 == ss1);
+}
+
+//Start testing emplace-args here
+
+@system unittest
+{
+ interface I {}
+ class K : I {}
+
+ K k = null, k2 = new K;
+ assert(k !is k2);
+ emplace!K(&k, k2);
+ assert(k is k2);
+
+ I i = null;
+ assert(i !is k);
+ emplace!I(&i, k);
+ assert(i is k);
+}
+
+@system unittest
+{
+ static struct S
+ {
+ int i = 5;
+ void opAssign(S){assert(0);}
+ }
+ S[2] sa = void;
+ S[2] sb;
+ emplace(&sa, sb);
+ assert(sa[0].i == 5 && sa[1].i == 5);
+}
+
+//Start testing emplace-struct here
+
+// Test constructor branch
+@betterC
+@system unittest
+{
+ struct S
+ {
+ double x = 5, y = 6;
+ this(int a, int b)
+ {
+ assert(x == 5 && y == 6);
+ x = a;
+ y = b;
+ }
+ }
+
+ void[S.sizeof] s1 = void;
+ auto s2 = S(42, 43);
+ assert(*emplace!S(cast(S*) s1.ptr, s2) == s2);
+ assert(*emplace!S(cast(S*) s1, 44, 45) == S(44, 45));
+}
+
+@system unittest
+{
+ static struct __conv_EmplaceTest
+ {
+ int i = 3;
+ this(int i)
+ {
+ assert(this.i == 3 && i == 5);
+ this.i = i;
+ }
+ this(int i, ref int j)
+ {
+ assert(i == 5 && j == 6);
+ this.i = i;
+ ++j;
+ }
+
+ @disable:
+ this();
+ this(this);
+ void opAssign();
+ }
+
+ __conv_EmplaceTest k = void;
+ emplace(&k, 5);
+ assert(k.i == 5);
+
+ int var = 6;
+ __conv_EmplaceTest x = void;
+ emplace(&x, 5, var);
+ assert(x.i == 5);
+ assert(var == 7);
+
+ var = 6;
+ auto z = emplace!__conv_EmplaceTest(new void[__conv_EmplaceTest.sizeof], 5, var);
+ assert(z.i == 5);
+ assert(var == 7);
+}
+
+// Test matching fields branch
+@betterC
+@system unittest
+{
+ struct S { uint n; }
+ S s;
+ emplace!S(&s, 2U);
+ assert(s.n == 2);
+}
+
+@betterC
+@safe unittest
+{
+ struct S { int a, b; this(int){} }
+ S s;
+ static assert(!__traits(compiles, emplace!S(&s, 2, 3)));
+}
+
+@betterC
+@system unittest
+{
+ struct S { int a, b = 7; }
+ S s1 = void, s2 = void;
+
+ emplace!S(&s1, 2);
+ assert(s1.a == 2 && s1.b == 7);
+
+ emplace!S(&s2, 2, 3);
+ assert(s2.a == 2 && s2.b == 3);
+}
+
+//opAssign
+@betterC
+@system unittest
+{
+ static struct S
+ {
+ int i = 5;
+ void opAssign(int){assert(0);}
+ void opAssign(S){assert(0);}
+ }
+ S sa1 = void;
+ S sa2 = void;
+ S sb1 = S(1);
+ emplace(&sa1, sb1);
+ emplace(&sa2, 2);
+ assert(sa1.i == 1);
+ assert(sa2.i == 2);
+}
+
+//postblit precedence
+@betterC
+@system unittest
+{
+ //Works, but breaks in "-w -O" because of @@@9332@@@.
+ //Uncomment test when 9332 is fixed.
+ static struct S
+ {
+ int i;
+
+ this(S other){assert(false);}
+ this(int i){this.i = i;}
+ this(this){}
+ }
+ S a = void;
+ assert(is(typeof({S b = a;}))); //Postblit
+ assert(is(typeof({S b = S(a);}))); //Constructor
+ auto b = S(5);
+ emplace(&a, b);
+ assert(a.i == 5);
+
+ static struct S2
+ {
+ int* p;
+ this(const S2){}
+ }
+ static assert(!is(immutable S2 : S2));
+ S2 s2 = void;
+ immutable is2 = (immutable S2).init;
+ emplace(&s2, is2);
+}
+
+//nested structs and postblit
+@system unittest
+{
+ static struct S
+ {
+ int* p;
+ this(int i){p = [i].ptr;}
+ this(this)
+ {
+ if (p)
+ p = [*p].ptr;
+ }
+ }
+ static struct SS
+ {
+ S s;
+ void opAssign(const SS)
+ {
+ assert(0);
+ }
+ }
+ SS ssa = void;
+ SS ssb = SS(S(5));
+ emplace(&ssa, ssb);
+ assert(*ssa.s.p == 5);
+ assert(ssa.s.p != ssb.s.p);
+}
+
+//disabled postblit
+@betterC
+@system unittest
+{
+ static struct S1
+ {
+ int i;
+ @disable this(this);
+ }
+ S1 s1 = void;
+ emplace(&s1, 1);
+ assert(s1.i == 1);
+ static assert(!__traits(compiles, emplace(&s1, s1))); // copy disabled
+ static assert(__traits(compiles, emplace(&s1, move(s1)))); // move not affected
+
+ static struct S2
+ {
+ int i;
+ @disable this(this);
+ this(ref S2){}
+ }
+ S2 s2 = void;
+ //static assert(!__traits(compiles, emplace(&s2, 1)));
+ emplace(&s2, S2.init);
+
+ static struct SS1
+ {
+ S1 s;
+ }
+ SS1 ss1 = void;
+ emplace(&ss1);
+ static assert(!__traits(compiles, emplace(&ss1, ss1))); // copying disabled
+ static assert(__traits(compiles, emplace(&ss1, move(ss1)))); // move unaffected
+
+ static struct SS2
+ {
+ S2 s;
+ }
+ SS2 ss2 = void;
+ emplace(&ss2);
+ static assert(!__traits(compiles, emplace(&ss2, ss2))); // copying disabled
+ static assert(__traits(compiles, emplace(&ss2, SS2.init))); // move is OK
+
+
+ // SS1 sss1 = s1; //This doesn't compile
+ // SS1 sss1 = SS1(s1); //This doesn't compile
+ // So emplace shouldn't compile either
+ static assert(!__traits(compiles, emplace(&sss1, s1)));
+ static assert(!__traits(compiles, emplace(&sss2, s2)));
+}
+
+//Imutability
+@betterC
+@system unittest
+{
+ //Castable immutability
+ {
+ static struct S1
+ {
+ int i;
+ }
+ static assert(is( immutable(S1) : S1));
+ S1 sa = void;
+ auto sb = immutable(S1)(5);
+ emplace(&sa, sb);
+ assert(sa.i == 5);
+ }
+ //Un-castable immutability
+ {
+ static struct S2
+ {
+ int* p;
+ }
+ static assert(!is(immutable(S2) : S2));
+ S2 sa = void;
+ auto sb = immutable(S2)(null);
+ assert(!__traits(compiles, emplace(&sa, sb)));
+ }
+}
+
+@betterC
+@system unittest
+{
+ static struct S
+ {
+ immutable int i;
+ immutable(int)* j;
+ }
+ S s = void;
+ emplace(&s, 1, null);
+ emplace(&s, 2, &s.i);
+ assert(s is S(2, &s.i));
+}
+
+//Context pointer
+@system unittest
+{
+ int i = 0;
+ {
+ struct S1
+ {
+ void foo(){++i;}
+ }
+ S1 sa = void;
+ S1 sb;
+ emplace(&sa, sb);
+ sa.foo();
+ assert(i == 1);
+ }
+ {
+ struct S2
+ {
+ void foo(){++i;}
+ this(this){}
+ }
+ S2 sa = void;
+ S2 sb;
+ emplace(&sa, sb);
+ sa.foo();
+ assert(i == 2);
+ }
+}
+
+//Alias this
+@betterC
+@system unittest
+{
+ static struct S
+ {
+ int i;
+ }
+ //By Ref
+ {
+ static struct SS1
+ {
+ int j;
+ S s;
+ alias s this;
+ }
+ S s = void;
+ SS1 ss = SS1(1, S(2));
+ emplace(&s, ss);
+ assert(s.i == 2);
+ }
+ //By Value
+ {
+ static struct SS2
+ {
+ int j;
+ S s;
+ S foo() @property{return s;}
+ alias foo this;
+ }
+ S s = void;
+ SS2 ss = SS2(1, S(2));
+ emplace(&s, ss);
+ assert(s.i == 2);
+ }
+}
+
+version (CoreUnittest)
+{
+ //Ambiguity
+ private struct __std_conv_S
+ {
+ int i;
+ this(__std_conv_SS ss) {assert(0);}
+ static opCall(__std_conv_SS ss)
+ {
+ __std_conv_S s; s.i = ss.j;
+ return s;
+ }
+ }
+ private struct __std_conv_SS
+ {
+ int j;
+ __std_conv_S s;
+ ref __std_conv_S foo() return @property {s.i = j; return s;}
+ alias foo this;
+ }
+}
+
+@system unittest
+{
+ static assert(is(__std_conv_SS : __std_conv_S));
+ __std_conv_S s = void;
+ __std_conv_SS ss = __std_conv_SS(1);
+
+ __std_conv_S sTest1 = ss; //this calls "SS alias this" (and not "S.this(SS)")
+ emplace(&s, ss); //"alias this" should take precedence in emplace over "opCall"
+ assert(s.i == 1);
+}
+
+//Nested classes
+@system unittest
+{
+ class A{}
+ static struct S
+ {
+ A a;
+ }
+ S s1 = void;
+ S s2 = S(new A);
+ emplace(&s1, s2);
+ assert(s1.a is s2.a);
+}
+
+//safety & nothrow & CTFE
+@betterC
+@system unittest
+{
+ //emplace should be safe for anything with no elaborate opassign
+ static struct S1
+ {
+ int i;
+ }
+ static struct S2
+ {
+ int i;
+ this(int j)@safe nothrow{i = j;}
+ }
+
+ int i;
+ S1 s1 = void;
+ S2 s2 = void;
+
+ auto pi = &i;
+ auto ps1 = &s1;
+ auto ps2 = &s2;
+
+ void foo() @safe nothrow
+ {
+ emplace(pi);
+ emplace(pi, 5);
+ emplace(ps1);
+ emplace(ps1, 5);
+ emplace(ps1, S1.init);
+ emplace(ps2);
+ emplace(ps2, 5);
+ emplace(ps2, S2.init);
+ }
+ foo();
+
+ T bar(T)() @property
+ {
+ T t/+ = void+/; //CTFE void illegal
+ emplace(&t, 5);
+ return t;
+ }
+ // CTFE
+ enum a = bar!int;
+ static assert(a == 5);
+ enum b = bar!S1;
+ static assert(b.i == 5);
+ enum c = bar!S2;
+ static assert(c.i == 5);
+ // runtime
+ auto aa = bar!int;
+ assert(aa == 5);
+ auto bb = bar!S1;
+ assert(bb.i == 5);
+ auto cc = bar!S2;
+ assert(cc.i == 5);
+}
+
+@betterC
+@system unittest
+{
+ struct S
+ {
+ int[2] get(){return [1, 2];}
+ alias get this;
+ }
+ struct SS
+ {
+ int[2] ii;
+ }
+ struct ISS
+ {
+ int[2] ii;
+ }
+ S s;
+ SS ss = void;
+ ISS iss = void;
+ emplace(&ss, s);
+ emplace(&iss, s);
+ assert(ss.ii == [1, 2]);
+ assert(iss.ii == [1, 2]);
+}
+
+//disable opAssign
+@betterC
+@system unittest
+{
+ static struct S
+ {
+ @disable void opAssign(S);
+ }
+ S s;
+ emplace(&s, S.init);
+}
+
+//opCall
+@betterC
+@system unittest
+{
+ int i;
+ //Without constructor
+ {
+ static struct S1
+ {
+ int i;
+ static S1 opCall(int*){assert(0);}
+ }
+ S1 s = void;
+ static assert(!__traits(compiles, emplace(&s, 1)));
+ }
+ //With constructor
+ {
+ static struct S2
+ {
+ int i = 0;
+ static S2 opCall(int*){assert(0);}
+ static S2 opCall(int){assert(0);}
+ this(int i){this.i = i;}
+ }
+ S2 s = void;
+ emplace(&s, 1);
+ assert(s.i == 1);
+ }
+ //With postblit ambiguity
+ {
+ static struct S3
+ {
+ int i = 0;
+ static S3 opCall(ref S3){assert(0);}
+ }
+ S3 s = void;
+ emplace(&s, S3.init);
+ }
+}
+
+//static arrays
+@system unittest
+{
+ static struct S
+ {
+ int[2] ii;
+ }
+ static struct IS
+ {
+ immutable int[2] ii;
+ }
+ int[2] ii;
+ S s = void;
+ IS ims = void;
+ ubyte ub = 2;
+ emplace(&s, ub);
+ emplace(&s, ii);
+ emplace(&ims, ub);
+ emplace(&ims, ii);
+ uint[2] uu;
+ static assert(!__traits(compiles, {S ss = S(uu);}));
+ static assert(!__traits(compiles, emplace(&s, uu)));
+}
+
+@system unittest
+{
+ int[2] sii;
+ int[2] sii2;
+ uint[2] uii;
+ uint[2] uii2;
+ emplace(&sii, 1);
+ emplace(&sii, 1U);
+ emplace(&uii, 1);
+ emplace(&uii, 1U);
+ emplace(&sii, sii2);
+ //emplace(&sii, uii2); //Sorry, this implementation doesn't know how to...
+ //emplace(&uii, sii2); //Sorry, this implementation doesn't know how to...
+ emplace(&uii, uii2);
+ emplace(&sii, sii2[]);
+ //emplace(&sii, uii2[]); //Sorry, this implementation doesn't know how to...
+ //emplace(&uii, sii2[]); //Sorry, this implementation doesn't know how to...
+ emplace(&uii, uii2[]);
+}
+
+@system unittest
+{
+ bool allowDestruction = false;
+ struct S
+ {
+ int i;
+ this(this){}
+ ~this(){assert(allowDestruction);}
+ }
+ S s = S(1);
+ S[2] ss1 = void;
+ S[2] ss2 = void;
+ S[2] ss3 = void;
+ emplace(&ss1, s);
+ emplace(&ss2, ss1);
+ emplace(&ss3, ss2[]);
+ assert(ss1[1] == s);
+ assert(ss2[1] == s);
+ assert(ss3[1] == s);
+ allowDestruction = true;
+}
+
+@system unittest
+{
+ //Checks postblit, construction, and context pointer
+ int count = 0;
+ struct S
+ {
+ this(this)
+ {
+ ++count;
+ }
+ ~this()
+ {
+ --count;
+ }
+ }
+
+ S s;
+ {
+ S[4] ss = void;
+ emplace(&ss, s);
+ assert(count == 4);
+ }
+ assert(count == 0);
+}
+
+@system unittest
+{
+ struct S
+ {
+ int i;
+ }
+ S s;
+ S[2][2][2] sss = void;
+ emplace(&sss, s);
+}
+
+@system unittest //Constness
+{
+ import core.internal.lifetime : emplaceRef;
+
+ int a = void;
+ emplaceRef!(const int)(a, 5);
+
+ immutable i = 5;
+ const(int)* p = void;
+ emplaceRef!(const int*)(p, &i);
+
+ struct S
+ {
+ int* p;
+ }
+ alias IS = immutable(S);
+ S s = void;
+ emplaceRef!IS(s, IS());
+ S[2] ss = void;
+ emplaceRef!(IS[2])(ss, IS());
+
+ IS[2] iss = IS.init;
+ emplaceRef!(IS[2])(ss, iss);
+ emplaceRef!(IS[2])(ss, iss[]);
+}
+
+@betterC
+pure nothrow @safe @nogc unittest
+{
+ import core.internal.lifetime : emplaceRef;
+
+ int i;
+ emplaceRef(i);
+ emplaceRef!int(i);
+ emplaceRef(i, 5);
+ emplaceRef!int(i, 5);
+}
+
+// Test attribute propagation for UDTs
+pure nothrow @safe /* @nogc */ unittest
+{
+ import core.internal.lifetime : emplaceRef;
+
+ static struct Safe
+ {
+ this(this) pure nothrow @safe @nogc {}
+ }
+
+ Safe safe = void;
+ emplaceRef(safe, Safe());
+
+ Safe[1] safeArr = [Safe()];
+ Safe[1] uninitializedSafeArr = void;
+ emplaceRef(uninitializedSafeArr, safe);
+ emplaceRef(uninitializedSafeArr, safeArr);
+
+ static struct Unsafe
+ {
+ this(this) @system {}
+ }
+
+ Unsafe unsafe = void;
+ static assert(!__traits(compiles, emplaceRef(unsafe, unsafe)));
+
+ Unsafe[1] unsafeArr = [Unsafe()];
+ Unsafe[1] uninitializedUnsafeArr = void;
+ static assert(!__traits(compiles, emplaceRef(uninitializedUnsafeArr, unsafe)));
+ static assert(!__traits(compiles, emplaceRef(uninitializedUnsafeArr, unsafeArr)));
+}
+
+@betterC
+@system unittest
+{
+ // Issue 15313
+ static struct Node
+ {
+ int payload;
+ Node* next;
+ uint refs;
+ }
+
+ import core.stdc.stdlib : malloc;
+ void[] buf = malloc(Node.sizeof)[0 .. Node.sizeof];
+
+ const Node* n = emplace!(const Node)(buf, 42, null, 10);
+ assert(n.payload == 42);
+ assert(n.next == null);
+ assert(n.refs == 10);
+}
+
+@system unittest
+{
+ class A
+ {
+ int x = 5;
+ int y = 42;
+ this(int z)
+ {
+ assert(x == 5 && y == 42);
+ x = y = z;
+ }
+ }
+ void[] buf;
+
+ static align(A.alignof) byte[__traits(classInstanceSize, A)] sbuf;
+ buf = sbuf[];
+ auto a = emplace!A(buf, 55);
+ assert(a.x == 55 && a.y == 55);
+
+ // emplace in bigger buffer
+ buf = new byte[](__traits(classInstanceSize, A) + 10);
+ a = emplace!A(buf, 55);
+ assert(a.x == 55 && a.y == 55);
+
+ // need ctor args
+ static assert(!is(typeof(emplace!A(buf))));
+}
+
+//constructor arguments forwarding
+@betterC
+@system unittest
+{
+ static struct S
+ {
+ this()(auto ref long arg)
+ {
+ // assert that arg is an lvalue
+ static assert(__traits(isRef, arg));
+ }
+ this()(auto ref double arg)
+ // assert that arg is an rvalue
+ {
+ static assert(!__traits(isRef, arg));
+ }
+ }
+ S obj = void;
+ long i;
+ emplace(&obj, i); // lvalue
+ emplace(&obj, 0.0); // rvalue
+}
+// Bulk of emplace unittests ends here
+
+/**
+ * Emplaces a copy of the specified source value into uninitialized memory,
+ * i.e., simulates `T target = source` copy-construction for cases where the
+ * target memory is already allocated and to be initialized with a copy.
+ *
+ * Params:
+ * source = value to be copied into target
+ * target = uninitialized value to be initialized with a copy of source
+ */
+void copyEmplace(S, T)(ref S source, ref T target) @system
+ if (is(immutable S == immutable T))
+{
+ import core.internal.traits : BaseElemOf, hasElaborateCopyConstructor, Unconst, Unqual;
+
+ // cannot have the following as simple template constraint due to nested-struct special case...
+ static if (!__traits(compiles, (ref S src) { T tgt = src; }))
+ {
+ alias B = BaseElemOf!T;
+ enum isNestedStruct = is(B == struct) && __traits(isNested, B);
+ static assert(isNestedStruct, "cannot copy-construct " ~ T.stringof ~ " from " ~ S.stringof);
+ }
+
+ void blit()
+ {
+ import core.stdc.string : memcpy;
+ memcpy(cast(Unqual!(T)*) &target, cast(Unqual!(T)*) &source, T.sizeof);
+ }
+
+ static if (is(T == struct))
+ {
+ static if (__traits(hasPostblit, T))
+ {
+ blit();
+ (cast() target).__xpostblit();
+ }
+ else static if (__traits(hasCopyConstructor, T))
+ {
+ emplace(cast(Unqual!(T)*) &target); // blit T.init
+ static if (__traits(isNested, T))
+ {
+ // copy context pointer
+ *(cast(void**) &target.tupleof[$-1]) = cast(void*) source.tupleof[$-1];
+ }
+ target.__ctor(source); // invoke copy ctor
+ }
+ else
+ {
+ blit(); // no opAssign
+ }
+ }
+ else static if (is(T == E[n], E, size_t n))
+ {
+ static if (hasElaborateCopyConstructor!E)
+ {
+ size_t i;
+ try
+ {
+ for (i = 0; i < n; i++)
+ copyEmplace(source[i], target[i]);
+ }
+ catch (Exception e)
+ {
+ // destroy, in reverse order, what we've constructed so far
+ while (i--)
+ destroy(*cast(Unconst!(E)*) &target[i]);
+ throw e;
+ }
+ }
+ else // trivial copy
+ {
+ blit(); // all elements at once
+ }
+ }
+ else
+ {
+ *cast(Unconst!(T)*) &target = *cast(Unconst!(T)*) &source;
+ }
+}
+
+///
+@betterC
+@system pure nothrow @nogc unittest
+{
+ int source = 123;
+ int target = void;
+ copyEmplace(source, target);
+ assert(target == 123);
+}
+
+///
+@betterC
+@system pure nothrow @nogc unittest
+{
+ immutable int[1][1] source = [ [123] ];
+ immutable int[1][1] target = void;
+ copyEmplace(source, target);
+ assert(target[0][0] == 123);
+}
+
+///
+@betterC
+@system pure nothrow @nogc unittest
+{
+ struct S
+ {
+ int x;
+ void opAssign(const scope ref S rhs) @safe pure nothrow @nogc
+ {
+ assert(0);
+ }
+ }
+
+ S source = S(42);
+ S target = void;
+ copyEmplace(source, target);
+ assert(target.x == 42);
+}
+
+// preserve shared-ness
+@system pure nothrow unittest
+{
+ auto s = new Object();
+ auto ss = new shared Object();
+
+ Object t;
+ shared Object st;
+
+ copyEmplace(s, t);
+ assert(t is s);
+
+ copyEmplace(ss, st);
+ assert(st is ss);
+
+ static assert(!__traits(compiles, copyEmplace(s, st)));
+ static assert(!__traits(compiles, copyEmplace(ss, t)));
+}
+
+version (DigitalMars) version (X86) version (Posix) version = DMD_X86_Posix;
+
+// don't violate immutability for reference types
+@system pure nothrow unittest
+{
+ auto s = new Object();
+ auto si = new immutable Object();
+
+ Object t;
+ immutable Object ti;
+
+ copyEmplace(s, t);
+ assert(t is s);
+
+ copyEmplace(si, ti);
+ version (DMD_X86_Posix) { /* wrongly fails without -O */ } else
+ assert(ti is si);
+
+ static assert(!__traits(compiles, copyEmplace(s, ti)));
+ static assert(!__traits(compiles, copyEmplace(si, t)));
+}
+
+version (CoreUnittest)
+{
+ private void testCopyEmplace(S, T)(const scope T* expected = null)
+ {
+ S source;
+ T target = void;
+ copyEmplace(source, target);
+ if (expected)
+ assert(target == *expected);
+ else
+ {
+ T expectedCopy = source;
+ assert(target == expectedCopy);
+ }
+ }
+}
+
+// postblit
+@system pure nothrow @nogc unittest
+{
+ static struct S
+ {
+ @safe pure nothrow @nogc:
+ int x = 42;
+ this(this) { x += 10; }
+ }
+
+ testCopyEmplace!(S, S)();
+ testCopyEmplace!(immutable S, S)();
+ testCopyEmplace!(S, immutable S)();
+ testCopyEmplace!(immutable S, immutable S)();
+
+ testCopyEmplace!(S[1], S[1])();
+ testCopyEmplace!(immutable S[1], S[1])();
+
+ // copying to an immutable static array works, but `T expected = source`
+ // wrongly ignores the postblit: https://issues.dlang.org/show_bug.cgi?id=8950
+ immutable S[1] expectedImmutable = [S(52)];
+ testCopyEmplace!(S[1], immutable S[1])(&expectedImmutable);
+ testCopyEmplace!(immutable S[1], immutable S[1])(&expectedImmutable);
+}
+
+// copy constructors
+@system pure nothrow @nogc unittest
+{
+ static struct S
+ {
+ @safe pure nothrow @nogc:
+ int x = 42;
+ this(int x) { this.x = x; }
+ this(const scope ref S rhs) { x = rhs.x + 10; }
+ this(const scope ref S rhs) immutable { x = rhs.x + 20; }
+ }
+
+ testCopyEmplace!(S, S)();
+ testCopyEmplace!(immutable S, S)();
+ testCopyEmplace!(S, immutable S)();
+ testCopyEmplace!(immutable S, immutable S)();
+
+ // static arrays work, but `T expected = source` wrongly ignores copy ctors
+ // https://issues.dlang.org/show_bug.cgi?id=20365
+ S[1] expectedMutable = [S(52)];
+ immutable S[1] expectedImmutable = [immutable S(62)];
+ testCopyEmplace!(S[1], S[1])(&expectedMutable);
+ testCopyEmplace!(immutable S[1], S[1])(&expectedMutable);
+ testCopyEmplace!(S[1], immutable S[1])(&expectedImmutable);
+ testCopyEmplace!(immutable S[1], immutable S[1])(&expectedImmutable);
+}
+
+// copy constructor in nested struct
+@system pure nothrow unittest
+{
+ int copies;
+ struct S
+ {
+ @safe pure nothrow @nogc:
+ size_t x = 42;
+ this(size_t x) { this.x = x; }
+ this(const scope ref S rhs)
+ {
+ assert(x == 42); // T.init
+ x = rhs.x;
+ ++copies;
+ }
+ }
+
+ {
+ copies = 0;
+ S source = S(123);
+ immutable S target = void;
+ copyEmplace(source, target);
+ assert(target is source);
+ assert(copies == 1);
+ }
+
+ {
+ copies = 0;
+ immutable S[1] source = [immutable S(456)];
+ S[1] target = void;
+ copyEmplace(source, target);
+ assert(target[0] is source[0]);
+ assert(copies == 1);
+ }
+}
+
+// destruction of partially copied static array
+@system unittest
+{
+ static struct S
+ {
+ __gshared int[] deletions;
+ int x;
+ this(this) { if (x == 5) throw new Exception(""); }
+ ~this() { deletions ~= x; }
+ }
+
+ alias T = immutable S[3][2];
+ T source = [ [S(1), S(2), S(3)], [S(4), S(5), S(6)] ];
+ T target = void;
+ try
+ {
+ copyEmplace(source, target);
+ assert(0);
+ }
+ catch (Exception)
+ {
+ static immutable expectedDeletions = [ 4, 3, 2, 1 ];
+ version (DigitalMars)
+ {
+ assert(S.deletions == expectedDeletions ||
+ S.deletions == [ 4 ]); // FIXME: happens with -O
+ }
+ else
+ assert(S.deletions == expectedDeletions);
+ }
+}
+
+/**
+Forwards function arguments while keeping `out`, `ref`, and `lazy` on
+the parameters.
+
+Params:
+ args = a parameter list or an $(REF AliasSeq,std,meta).
+Returns:
+ An `AliasSeq` of `args` with `out`, `ref`, and `lazy` saved.
+*/
+template forward(args...)
+{
+ import core.internal.traits : AliasSeq;
+
+ static if (args.length)
+ {
+ alias arg = args[0];
+ // by ref || lazy || const/immutable
+ static if (__traits(isRef, arg) ||
+ __traits(isOut, arg) ||
+ __traits(isLazy, arg) ||
+ !is(typeof(move(arg))))
+ alias fwd = arg;
+ // (r)value
+ else
+ @property auto fwd(){ return move(arg); }
+
+ static if (args.length == 1)
+ alias forward = fwd;
+ else
+ alias forward = AliasSeq!(fwd, forward!(args[1..$]));
+ }
+ else
+ alias forward = AliasSeq!();
+}
+
+///
+@safe unittest
+{
+ class C
+ {
+ static int foo(int n) { return 1; }
+ static int foo(ref int n) { return 2; }
+ }
+
+ // with forward
+ int bar()(auto ref int x) { return C.foo(forward!x); }
+
+ // without forward
+ int baz()(auto ref int x) { return C.foo(x); }
+
+ int i;
+ assert(bar(1) == 1);
+ assert(bar(i) == 2);
+
+ assert(baz(1) == 2);
+ assert(baz(i) == 2);
+}
+
+///
+@safe unittest
+{
+ void foo(int n, ref string s) { s = null; foreach (i; 0 .. n) s ~= "Hello"; }
+
+ // forwards all arguments which are bound to parameter tuple
+ void bar(Args...)(auto ref Args args) { return foo(forward!args); }
+
+ // forwards all arguments with swapping order
+ void baz(Args...)(auto ref Args args) { return foo(forward!args[$/2..$], forward!args[0..$/2]); }
+
+ string s;
+ bar(1, s);
+ assert(s == "Hello");
+ baz(s, 2);
+ assert(s == "HelloHello");
+}
+
+@safe unittest
+{
+ auto foo(TL...)(auto ref TL args)
+ {
+ string result = "";
+ foreach (i, _; args)
+ {
+ //pragma(msg, "[",i,"] ", __traits(isRef, args[i]) ? "L" : "R");
+ result ~= __traits(isRef, args[i]) ? "L" : "R";
+ }
+ return result;
+ }
+
+ string bar(TL...)(auto ref TL args)
+ {
+ return foo(forward!args);
+ }
+ string baz(TL...)(auto ref TL args)
+ {
+ int x;
+ return foo(forward!args[3], forward!args[2], 1, forward!args[1], forward!args[0], x);
+ }
+
+ struct S {}
+ S makeS(){ return S(); }
+ int n;
+ string s;
+ assert(bar(S(), makeS(), n, s) == "RRLL");
+ assert(baz(S(), makeS(), n, s) == "LLRRRL");
+}
+
+@betterC
+@safe unittest
+{
+ ref int foo(return ref int a) { return a; }
+ ref int bar(Args)(auto ref Args args)
+ {
+ return foo(forward!args);
+ }
+ static assert(!__traits(compiles, { auto x1 = bar(3); })); // case of NG
+ int value = 3;
+ auto x2 = bar(value); // case of OK
+}
+
+///
+@betterC
+@safe unittest
+{
+ struct X {
+ int i;
+ this(this)
+ {
+ ++i;
+ }
+ }
+
+ struct Y
+ {
+ private X x_;
+ this()(auto ref X x)
+ {
+ x_ = forward!x;
+ }
+ }
+
+ struct Z
+ {
+ private const X x_;
+ this()(auto ref X x)
+ {
+ x_ = forward!x;
+ }
+ this()(auto const ref X x)
+ {
+ x_ = forward!x;
+ }
+ }
+
+ X x;
+ const X cx;
+ auto constX = (){ const X x; return x; };
+ static assert(__traits(compiles, { Y y = x; }));
+ static assert(__traits(compiles, { Y y = X(); }));
+ static assert(!__traits(compiles, { Y y = cx; }));
+ static assert(!__traits(compiles, { Y y = constX(); }));
+ static assert(__traits(compiles, { Z z = x; }));
+ static assert(__traits(compiles, { Z z = X(); }));
+ static assert(__traits(compiles, { Z z = cx; }));
+ static assert(__traits(compiles, { Z z = constX(); }));
+
+
+ Y y1 = x;
+ // ref lvalue, copy
+ assert(y1.x_.i == 1);
+ Y y2 = X();
+ // rvalue, move
+ assert(y2.x_.i == 0);
+
+ Z z1 = x;
+ // ref lvalue, copy
+ assert(z1.x_.i == 1);
+ Z z2 = X();
+ // rvalue, move
+ assert(z2.x_.i == 0);
+ Z z3 = cx;
+ // ref const lvalue, copy
+ assert(z3.x_.i == 1);
+ Z z4 = constX();
+ // const rvalue, copy
+ assert(z4.x_.i == 1);
+}
+
+// lazy -> lazy
+@betterC
+@safe unittest
+{
+ int foo1(lazy int i) { return i; }
+ int foo2(A)(auto ref A i) { return foo1(forward!i); }
+ int foo3(lazy int i) { return foo2(i); }
+
+ int numCalls = 0;
+ assert(foo3({ ++numCalls; return 42; }()) == 42);
+ assert(numCalls == 1);
+}
+
+// lazy -> non-lazy
+@betterC
+@safe unittest
+{
+ int foo1(int a, int b) { return a + b; }
+ int foo2(A...)(auto ref A args) { return foo1(forward!args); }
+ int foo3(int a, lazy int b) { return foo2(a, b); }
+
+ int numCalls;
+ assert(foo3(11, { ++numCalls; return 31; }()) == 42);
+ assert(numCalls == 1);
+}
+
+// non-lazy -> lazy
+@betterC
+@safe unittest
+{
+ int foo1(int a, lazy int b) { return a + b; }
+ int foo2(A...)(auto ref A args) { return foo1(forward!args); }
+ int foo3(int a, int b) { return foo2(a, b); }
+
+ assert(foo3(11, 31) == 42);
+}
+
+// out
+@betterC
+@safe unittest
+{
+ void foo1(int a, out int b) { b = a; }
+ void foo2(A...)(auto ref A args) { foo1(forward!args); }
+ void foo3(int a, out int b) { foo2(a, b); }
+
+ int b;
+ foo3(42, b);
+ assert(b == 42);
+}
+
+// move
+/**
+Moves `source` into `target`, via a destructive copy when necessary.
+
+If `T` is a struct with a destructor or postblit defined, source is reset
+to its `.init` value after it is moved into target, otherwise it is
+left unchanged.
+
+Preconditions:
+If source has internal pointers that point to itself and doesn't define
+opPostMove, it cannot be moved, and will trigger an assertion failure.
+
+Params:
+ source = Data to copy.
+ target = Where to copy into. The destructor, if any, is invoked before the
+ copy is performed.
+*/
+void move(T)(ref T source, ref T target)
+{
+ moveImpl(target, source);
+}
+
+/// For non-struct types, `move` just performs `target = source`:
+@safe unittest
+{
+ Object obj1 = new Object;
+ Object obj2 = obj1;
+ Object obj3;
+
+ move(obj2, obj3);
+ assert(obj3 is obj1);
+ // obj2 unchanged
+ assert(obj2 is obj1);
+}
+
+///
+pure nothrow @safe @nogc unittest
+{
+ // Structs without destructors are simply copied
+ struct S1
+ {
+ int a = 1;
+ int b = 2;
+ }
+ S1 s11 = { 10, 11 };
+ S1 s12;
+
+ move(s11, s12);
+
+ assert(s12 == S1(10, 11));
+ assert(s11 == s12);
+
+ // But structs with destructors or postblits are reset to their .init value
+ // after copying to the target.
+ struct S2
+ {
+ int a = 1;
+ int b = 2;
+
+ ~this() pure nothrow @safe @nogc { }
+ }
+ S2 s21 = { 3, 4 };
+ S2 s22;
+
+ move(s21, s22);
+
+ assert(s21 == S2(1, 2));
+ assert(s22 == S2(3, 4));
+}
+
+@safe unittest
+{
+ import core.internal.traits;
+
+ assertCTFEable!((){
+ Object obj1 = new Object;
+ Object obj2 = obj1;
+ Object obj3;
+ move(obj2, obj3);
+ assert(obj3 is obj1);
+
+ static struct S1 { int a = 1, b = 2; }
+ S1 s11 = { 10, 11 };
+ S1 s12;
+ move(s11, s12);
+ assert(s11.a == 10 && s11.b == 11 && s12.a == 10 && s12.b == 11);
+
+ static struct S2 { int a = 1; int * b; }
+ S2 s21 = { 10, null };
+ s21.b = new int;
+ S2 s22;
+ move(s21, s22);
+ assert(s21 == s22);
+ });
+ // Issue 5661 test(1)
+ static struct S3
+ {
+ static struct X { int n = 0; ~this(){n = 0;} }
+ X x;
+ }
+ static assert(hasElaborateDestructor!S3);
+ S3 s31, s32;
+ s31.x.n = 1;
+ move(s31, s32);
+ assert(s31.x.n == 0);
+ assert(s32.x.n == 1);
+
+ // Issue 5661 test(2)
+ static struct S4
+ {
+ static struct X { int n = 0; this(this){n = 0;} }
+ X x;
+ }
+ static assert(hasElaborateCopyConstructor!S4);
+ S4 s41, s42;
+ s41.x.n = 1;
+ move(s41, s42);
+ assert(s41.x.n == 0);
+ assert(s42.x.n == 1);
+
+ // Issue 13990 test
+ class S5;
+
+ S5 s51;
+ S5 s52 = s51;
+ S5 s53;
+ move(s52, s53);
+ assert(s53 is s51);
+}
+
+/// Ditto
+T move(T)(return scope ref T source)
+{
+ return moveImpl(source);
+}
+
+/// Non-copyable structs can still be moved:
+pure nothrow @safe @nogc unittest
+{
+ struct S
+ {
+ int a = 1;
+ @disable this(this);
+ ~this() pure nothrow @safe @nogc {}
+ }
+ S s1;
+ s1.a = 2;
+ S s2 = move(s1);
+ assert(s1.a == 1);
+ assert(s2.a == 2);
+}
+
+// https://issues.dlang.org/show_bug.cgi?id=20869
+// `move` should propagate the attributes of `opPostMove`
+@system unittest
+{
+ static struct S
+ {
+ void opPostMove(const ref S old) nothrow @system
+ {
+ __gshared int i;
+ new int(i++); // Force @gc impure @system
+ }
+ }
+
+ alias T = void function() @system nothrow;
+ static assert(is(typeof({ S s; move(s); }) == T));
+ static assert(is(typeof({ S s; move(s, s); }) == T));
+}
+
+private void moveImpl(T)(scope ref T target, return scope ref T source)
+{
+ import core.internal.traits : hasElaborateDestructor;
+
+ static if (is(T == struct))
+ {
+ // Unsafe when compiling without -dip1000
+ if ((() @trusted => &source == &target)()) return;
+ // Destroy target before overwriting it
+ static if (hasElaborateDestructor!T) target.__xdtor();
+ }
+ // move and emplace source into target
+ moveEmplaceImpl(target, source);
+}
+
+private T moveImpl(T)(return scope ref T source)
+{
+ // Properly infer safety from moveEmplaceImpl as the implementation below
+ // might void-initialize pointers in result and hence needs to be @trusted
+ if (false) moveEmplaceImpl(source, source);
+
+ return trustedMoveImpl(source);
+}
+
+private T trustedMoveImpl(T)(return scope ref T source) @trusted
+{
+ T result = void;
+ moveEmplaceImpl(result, source);
+ return result;
+}
+
+@safe unittest
+{
+ import core.internal.traits;
+
+ assertCTFEable!((){
+ Object obj1 = new Object;
+ Object obj2 = obj1;
+ Object obj3 = move(obj2);
+ assert(obj3 is obj1);
+
+ static struct S1 { int a = 1, b = 2; }
+ S1 s11 = { 10, 11 };
+ S1 s12 = move(s11);
+ assert(s11.a == 10 && s11.b == 11 && s12.a == 10 && s12.b == 11);
+
+ static struct S2 { int a = 1; int * b; }
+ S2 s21 = { 10, null };
+ s21.b = new int;
+ S2 s22 = move(s21);
+ assert(s21 == s22);
+ });
+
+ // Issue 5661 test(1)
+ static struct S3
+ {
+ static struct X { int n = 0; ~this(){n = 0;} }
+ X x;
+ }
+ static assert(hasElaborateDestructor!S3);
+ S3 s31;
+ s31.x.n = 1;
+ S3 s32 = move(s31);
+ assert(s31.x.n == 0);
+ assert(s32.x.n == 1);
+
+ // Issue 5661 test(2)
+ static struct S4
+ {
+ static struct X { int n = 0; this(this){n = 0;} }
+ X x;
+ }
+ static assert(hasElaborateCopyConstructor!S4);
+ S4 s41;
+ s41.x.n = 1;
+ S4 s42 = move(s41);
+ assert(s41.x.n == 0);
+ assert(s42.x.n == 1);
+
+ // Issue 13990 test
+ class S5;
+
+ S5 s51;
+ S5 s52 = s51;
+ S5 s53;
+ s53 = move(s52);
+ assert(s53 is s51);
+}
+
+@betterC
+@system unittest
+{
+ static struct S { int n = 0; ~this() @system { n = 0; } }
+ S a, b;
+ static assert(!__traits(compiles, () @safe { move(a, b); }));
+ static assert(!__traits(compiles, () @safe { move(a); }));
+ a.n = 1;
+ () @trusted { move(a, b); }();
+ assert(a.n == 0);
+ a.n = 1;
+ () @trusted { move(a); }();
+ assert(a.n == 0);
+}
+/+ this can't be tested in druntime, tests are still run in phobos
+@safe unittest//Issue 6217
+{
+ import std.algorithm.iteration : map;
+ auto x = map!"a"([1,2,3]);
+ x = move(x);
+}
++/
+@betterC
+@safe unittest// Issue 8055
+{
+ static struct S
+ {
+ int x;
+ ~this()
+ {
+ assert(x == 0);
+ }
+ }
+ S foo(S s)
+ {
+ return move(s);
+ }
+ S a;
+ a.x = 0;
+ auto b = foo(a);
+ assert(b.x == 0);
+}
+
+@system unittest// Issue 8057
+{
+ int n = 10;
+ struct S
+ {
+ int x;
+ ~this()
+ {
+ // Access to enclosing scope
+ assert(n == 10);
+ }
+ }
+ S foo(S s)
+ {
+ // Move nested struct
+ return move(s);
+ }
+ S a;
+ a.x = 1;
+ auto b = foo(a);
+ assert(b.x == 1);
+
+ // Regression 8171
+ static struct Array(T)
+ {
+ // nested struct has no member
+ struct Payload
+ {
+ ~this() {}
+ }
+ }
+ Array!int.Payload x = void;
+ move(x);
+ move(x, x);
+}
+
+// target must be first-parameter, because in void-functions DMD + dip1000 allows it to take the place of a return-scope
+private void moveEmplaceImpl(T)(scope ref T target, return scope ref T source)
+{
+ import core.stdc.string : memcpy, memset;
+ import core.internal.traits;
+
+ // TODO: this assert pulls in half of phobos. we need to work out an alternative assert strategy.
+// static if (!is(T == class) && hasAliasing!T) if (!__ctfe)
+// {
+// import std.exception : doesPointTo;
+// assert(!doesPointTo(source, source) && !hasElaborateMove!T),
+// "Cannot move object with internal pointer unless `opPostMove` is defined.");
+// }
+
+ static if (is(T == struct))
+ {
+ // Unsafe when compiling without -dip1000
+ assert((() @trusted => &source !is &target)(), "source and target must not be identical");
+
+ static if (hasElaborateAssign!T || !isAssignable!T)
+ () @trusted { memcpy(&target, &source, T.sizeof); }();
+ else
+ target = source;
+
+ static if (hasElaborateMove!T)
+ __move_post_blt(target, source);
+
+ // If the source defines a destructor or a postblit hook, we must obliterate the
+ // object in order to avoid double freeing and undue aliasing
+ static if (hasElaborateDestructor!T || hasElaborateCopyConstructor!T)
+ {
+ // If T is nested struct, keep original context pointer
+ static if (__traits(isNested, T))
+ enum sz = T.sizeof - (void*).sizeof;
+ else
+ enum sz = T.sizeof;
+
+ static if (__traits(isZeroInit, T))
+ () @trusted { memset(&source, 0, sz); }();
+ else
+ {
+ auto init = typeid(T).initializer();
+ () @trusted { memcpy(&source, init.ptr, sz); }();
+ }
+ }
+ }
+ else static if (__traits(isStaticArray, T))
+ {
+ for (size_t i = 0; i < source.length; ++i)
+ move(source[i], target[i]);
+ }
+ else
+ {
+ // Primitive data (including pointers and arrays) or class -
+ // assignment works great
+ target = source;
+ }
+}
+
+/**
+ * Similar to $(LREF move) but assumes `target` is uninitialized. This
+ * is more efficient because `source` can be blitted over `target`
+ * without destroying or initializing it first.
+ *
+ * Params:
+ * source = value to be moved into target
+ * target = uninitialized value to be filled by source
+ */
+void moveEmplace(T)(ref T source, ref T target) @system
+{
+ moveEmplaceImpl(target, source);
+}
+
+///
+@betterC
+pure nothrow @nogc @system unittest
+{
+ static struct Foo
+ {
+ pure nothrow @nogc:
+ this(int* ptr) { _ptr = ptr; }
+ ~this() { if (_ptr) ++*_ptr; }
+ int* _ptr;
+ }
+
+ int val;
+ Foo foo1 = void; // uninitialized
+ auto foo2 = Foo(&val); // initialized
+ assert(foo2._ptr is &val);
+
+ // Using `move(foo2, foo1)` would have an undefined effect because it would destroy
+ // the uninitialized foo1.
+ // moveEmplace directly overwrites foo1 without destroying or initializing it first.
+ moveEmplace(foo2, foo1);
+ assert(foo1._ptr is &val);
+ assert(foo2._ptr is null);
+ assert(val == 0);
+}
+
+// issue 18913
+@safe unittest
+{
+ static struct NoCopy
+ {
+ int payload;
+ ~this() { }
+ @disable this(this);
+ }
+
+ static void f(NoCopy[2]) { }
+
+ NoCopy[2] ncarray = [ NoCopy(1), NoCopy(2) ];
+
+ static assert(!__traits(compiles, f(ncarray)));
+ f(move(ncarray));
+}
diff --git a/libphobos/libdruntime/core/memory.d b/libphobos/libdruntime/core/memory.d
index af0fee1..3770c13 100644
--- a/libphobos/libdruntime/core/memory.d
+++ b/libphobos/libdruntime/core/memory.d
@@ -104,31 +104,26 @@
module core.memory;
+version (ARM)
+ version = AnyARM;
+else version (AArch64)
+ version = AnyARM;
+
+version (iOS)
+ version = iOSDerived;
+else version (TVOS)
+ version = iOSDerived;
+else version (WatchOS)
+ version = iOSDerived;
private
{
- extern (C) void gc_init();
- extern (C) void gc_term();
-
- extern (C) void gc_enable() nothrow;
- extern (C) void gc_disable() nothrow;
- extern (C) void gc_collect() nothrow;
- extern (C) void gc_minimize() nothrow;
-
extern (C) uint gc_getAttr( void* p ) pure nothrow;
extern (C) uint gc_setAttr( void* p, uint a ) pure nothrow;
extern (C) uint gc_clrAttr( void* p, uint a ) pure nothrow;
- extern (C) void* gc_malloc( size_t sz, uint ba = 0, const TypeInfo = null ) pure nothrow;
- extern (C) void* gc_calloc( size_t sz, uint ba = 0, const TypeInfo = null ) pure nothrow;
- extern (C) BlkInfo_ gc_qalloc( size_t sz, uint ba = 0, const TypeInfo = null ) pure nothrow;
- extern (C) void* gc_realloc( void* p, size_t sz, uint ba = 0, const TypeInfo = null ) pure nothrow;
- extern (C) size_t gc_extend( void* p, size_t mx, size_t sz, const TypeInfo = null ) pure nothrow;
- extern (C) size_t gc_reserve( size_t sz ) nothrow;
- extern (C) void gc_free( void* p ) pure nothrow;
-
- extern (C) void* gc_addrOf( void* p ) pure nothrow;
- extern (C) size_t gc_sizeOf( void* p ) pure nothrow;
+ extern (C) void* gc_addrOf( void* p ) pure nothrow @nogc;
+ extern (C) size_t gc_sizeOf( void* p ) pure nothrow @nogc;
struct BlkInfo_
{
@@ -137,19 +132,96 @@ private
uint attr;
}
- extern (C) BlkInfo_ gc_query( void* p ) pure nothrow;
+ extern (C) BlkInfo_ gc_query(return scope void* p) pure nothrow;
extern (C) GC.Stats gc_stats ( ) nothrow @nogc;
+ extern (C) GC.ProfileStats gc_profileStats ( ) nothrow @nogc @safe;
+}
- extern (C) void gc_addRoot( in void* p ) nothrow @nogc;
- extern (C) void gc_addRange( in void* p, size_t sz, const TypeInfo ti = null ) nothrow @nogc;
+version (CoreDoc)
+{
+ /**
+ * The minimum size of a system page in bytes.
+ *
+ * This is a compile time, platform specific value. This value might not
+ * be accurate, since it might be possible to change this value. Whenever
+ * possible, please use $(LREF pageSize) instead, which is initialized
+ * during runtime.
+ *
+ * The minimum size is useful when the context requires a compile time known
+ * value, like the size of a static array: `ubyte[minimumPageSize] buffer`.
+ */
+ enum minimumPageSize : size_t;
+}
+else version (AnyARM)
+{
+ version (iOSDerived)
+ enum size_t minimumPageSize = 16384;
+ else
+ enum size_t minimumPageSize = 4096;
+}
+else
+ enum size_t minimumPageSize = 4096;
- extern (C) void gc_removeRoot( in void* p ) nothrow @nogc;
- extern (C) void gc_removeRange( in void* p ) nothrow @nogc;
- extern (C) void gc_runFinalizers( in void[] segment );
+///
+unittest
+{
+ ubyte[minimumPageSize] buffer;
+}
- package extern (C) bool gc_inFinalizer();
+/**
+ * The size of a system page in bytes.
+ *
+ * This value is set at startup time of the application. It's safe to use
+ * early in the start process, like in shared module constructors and
+ * initialization of the D runtime itself.
+ */
+immutable size_t pageSize;
+
+///
+unittest
+{
+ ubyte[] buffer = new ubyte[pageSize];
}
+// The reason for this elaborated way of declaring a function is:
+//
+// * `pragma(crt_constructor)` is used to declare a constructor that is called by
+// the C runtime, before C main. This allows the `pageSize` value to be used
+// during initialization of the D runtime. This also avoids any issues with
+// static module constructors and circular references.
+//
+// * `pragma(mangle)` is used because `pragma(crt_constructor)` requires a
+// function with C linkage. To avoid any name conflict with other C symbols,
+// standard D mangling is used.
+//
+// * The extra function declaration, without the body, is to be able to get the
+// D mangling of the function without the need to hardcode the value.
+//
+// * The extern function declaration also has the side effect of making it
+// impossible to manually call the function with standard syntax. This is to
+// make it more difficult to call the function again, manually.
+private void initialize();
+pragma(crt_constructor)
+pragma(mangle, `_D` ~ initialize.mangleof)
+private extern (C) void initialize() @system
+{
+ version (Posix)
+ {
+ import core.sys.posix.unistd : sysconf, _SC_PAGESIZE;
+
+ (cast() pageSize) = cast(size_t) sysconf(_SC_PAGESIZE);
+ }
+ else version (Windows)
+ {
+ import core.sys.windows.winbase : GetSystemInfo, SYSTEM_INFO;
+
+ SYSTEM_INFO si;
+ GetSystemInfo(&si);
+ (cast() pageSize) = cast(size_t) si.dwPageSize;
+ }
+ else
+ static assert(false, __FUNCTION__ ~ " is not implemented on this platform");
+}
/**
* This struct encapsulates all garbage collection functionality for the D
@@ -168,18 +240,37 @@ struct GC
size_t usedSize;
/// number of free bytes on the GC heap (might only get updated after a collection)
size_t freeSize;
+ /// number of bytes allocated for current thread since program start
+ ulong allocatedInCurrentThread;
}
/**
+ * Aggregation of current profile information
+ */
+ static struct ProfileStats
+ {
+ import core.time : Duration;
+ /// total number of GC cycles
+ size_t numCollections;
+ /// total time spent doing GC
+ Duration totalCollectionTime;
+ /// total time threads were paused doing GC
+ Duration totalPauseTime;
+ /// largest time threads were paused during one GC cycle
+ Duration maxPauseTime;
+ /// largest time spent doing one GC cycle
+ Duration maxCollectionTime;
+ }
+
+extern(C):
+
+ /**
* Enables automatic garbage collection behavior if collections have
* previously been suspended by a call to disable. This function is
* reentrant, and must be called once for every call to disable before
* automatic collections are enabled.
*/
- static void enable() nothrow /* FIXME pure */
- {
- gc_enable();
- }
+ pragma(mangle, "gc_enable") static void enable() nothrow; /* FIXME pure */
/**
@@ -189,10 +280,7 @@ struct GC
* such as during an out of memory condition. This function is reentrant,
* but enable must be called once for each call to disable.
*/
- static void disable() nothrow /* FIXME pure */
- {
- gc_disable();
- }
+ pragma(mangle, "gc_disable") static void disable() nothrow; /* FIXME pure */
/**
@@ -202,21 +290,16 @@ struct GC
* and then to reclaim free space. This action may need to suspend all
* running threads for at least part of the collection process.
*/
- static void collect() nothrow /* FIXME pure */
- {
- gc_collect();
- }
+ pragma(mangle, "gc_collect") static void collect() nothrow; /* FIXME pure */
/**
* Indicates that the managed memory space be minimized by returning free
* physical memory to the operating system. The amount of free memory
* returned depends on the allocator design and on program behavior.
*/
- static void minimize() nothrow /* FIXME pure */
- {
- gc_minimize();
- }
+ pragma(mangle, "gc_minimize") static void minimize() nothrow; /* FIXME pure */
+extern(D):
/**
* Elements for a bit field representing memory block attributes. These
@@ -288,9 +371,9 @@ struct GC
* A bit field containing any bits set for the memory block referenced by
* p or zero on error.
*/
- static uint getAttr( in void* p ) nothrow
+ static uint getAttr( const scope void* p ) nothrow
{
- return getAttr(cast()p);
+ return gc_getAttr(cast(void*) p);
}
@@ -315,9 +398,9 @@ struct GC
* The result of a call to getAttr after the specified bits have been
* set.
*/
- static uint setAttr( in void* p, uint a ) nothrow
+ static uint setAttr( const scope void* p, uint a ) nothrow
{
- return setAttr(cast()p, a);
+ return gc_setAttr(cast(void*) p, a);
}
@@ -342,9 +425,9 @@ struct GC
* The result of a call to getAttr after the specified bits have been
* cleared.
*/
- static uint clrAttr( in void* p, uint a ) nothrow
+ static uint clrAttr( const scope void* p, uint a ) nothrow
{
- return clrAttr(cast()p, a);
+ return gc_clrAttr(cast(void*) p, a);
}
@@ -354,6 +437,7 @@ struct GC
return gc_clrAttr( p, a );
}
+extern(C):
/**
* Requests an aligned block of managed memory from the garbage collector.
@@ -375,10 +459,7 @@ struct GC
* Throws:
* OutOfMemoryError on allocation failure.
*/
- static void* malloc( size_t sz, uint ba = 0, const TypeInfo ti = null ) pure nothrow
- {
- return gc_malloc( sz, ba, ti );
- }
+ pragma(mangle, "gc_malloc") static void* malloc(size_t sz, uint ba = 0, const scope TypeInfo ti = null) pure nothrow;
/**
@@ -401,10 +482,7 @@ struct GC
* Throws:
* OutOfMemoryError on allocation failure.
*/
- static BlkInfo qalloc( size_t sz, uint ba = 0, const TypeInfo ti = null ) pure nothrow
- {
- return gc_qalloc( sz, ba, ti );
- }
+ pragma(mangle, "gc_qalloc") static BlkInfo qalloc(size_t sz, uint ba = 0, const scope TypeInfo ti = null) pure nothrow;
/**
@@ -428,52 +506,55 @@ struct GC
* Throws:
* OutOfMemoryError on allocation failure.
*/
- static void* calloc( size_t sz, uint ba = 0, const TypeInfo ti = null ) pure nothrow
- {
- return gc_calloc( sz, ba, ti );
- }
+ pragma(mangle, "gc_calloc") static void* calloc(size_t sz, uint ba = 0, const TypeInfo ti = null) pure nothrow;
/**
- * If sz is zero, the memory referenced by p will be deallocated as if
- * by a call to free. A new memory block of size sz will then be
- * allocated as if by a call to malloc, or the implementation may instead
- * resize the memory block in place. The contents of the new memory block
- * will be the same as the contents of the old memory block, up to the
- * lesser of the new and old sizes. Note that existing memory will only
- * be freed by realloc if sz is equal to zero. The garbage collector is
- * otherwise expected to later reclaim the memory block if it is unused.
- * If allocation fails, this function will call onOutOfMemory which is
- * expected to throw an OutOfMemoryError. If p references memory not
- * originally allocated by this garbage collector, or if it points to the
- * interior of a memory block, no action will be taken. If ba is zero
- * (the default) and p references the head of a valid, known memory block
- * then any bits set on the current block will be set on the new block if a
- * reallocation is required. If ba is not zero and p references the head
- * of a valid, known memory block then the bits in ba will replace those on
- * the current memory block and will also be set on the new block if a
- * reallocation is required.
+ * Extend, shrink or allocate a new block of memory keeping the contents of
+ * an existing block
+ *
+ * If `sz` is zero, the memory referenced by p will be deallocated as if
+ * by a call to `free`.
+ * If `p` is `null`, new memory will be allocated via `malloc`.
+ * If `p` is pointing to memory not allocated from the GC or to the interior
+ * of an allocated memory block, no operation is performed and null is returned.
+ *
+ * Otherwise, a new memory block of size `sz` will be allocated as if by a
+ * call to `malloc`, or the implementation may instead resize or shrink the memory
+ * block in place.
+ * The contents of the new memory block will be the same as the contents
+ * of the old memory block, up to the lesser of the new and old sizes.
+ *
+ * The caller guarantees that there are no other live pointers to the
+ * passed memory block, still it might not be freed immediately by `realloc`.
+ * The garbage collector can reclaim the memory block in a later
+ * collection if it is unused.
+ * If allocation fails, this function will throw an `OutOfMemoryError`.
+ *
+ * If `ba` is zero (the default) the attributes of the existing memory
+ * will be used for an allocation.
+ * If `ba` is not zero and no new memory is allocated, the bits in ba will
+ * replace those of the current memory block.
*
* Params:
- * p = A pointer to the root of a valid memory block or to null.
+ * p = A pointer to the base of a valid memory block or to `null`.
* sz = The desired allocation size in bytes.
- * ba = A bitmask of the attributes to set on this block.
+ * ba = A bitmask of the BlkAttr attributes to set on this block.
* ti = TypeInfo to describe the memory. The GC might use this information
* to improve scanning for pointers or to call finalizers.
*
* Returns:
- * A reference to the allocated memory on success or null if sz is
- * zero. On failure, the original value of p is returned.
+ * A reference to the allocated memory on success or `null` if `sz` is
+ * zero or the pointer does not point to the base of an GC allocated
+ * memory block.
*
* Throws:
- * OutOfMemoryError on allocation failure.
+ * `OutOfMemoryError` on allocation failure.
*/
- static void* realloc( void* p, size_t sz, uint ba = 0, const TypeInfo ti = null ) pure nothrow
- {
- return gc_realloc( p, sz, ba, ti );
- }
+ pragma(mangle, "gc_realloc") static void* realloc(return void* p, size_t sz, uint ba = 0, const TypeInfo ti = null) pure nothrow;
- /// Issue 13111
+ // https://issues.dlang.org/show_bug.cgi?id=13111
+ ///
unittest
{
enum size1 = 1 << 11 + 1; // page in large object pool
@@ -482,7 +563,7 @@ struct GC
auto data1 = cast(ubyte*)GC.calloc(size1);
auto data2 = cast(ubyte*)GC.realloc(data1, size2);
- BlkInfo info = query(data2);
+ GC.BlkInfo info = GC.query(data2);
assert(info.size >= size2);
}
@@ -512,10 +593,7 @@ struct GC
* as an indicator of success. $(LREF capacity) should be used to
* retrieve actual usable slice capacity.
*/
- static size_t extend( void* p, size_t mx, size_t sz, const TypeInfo ti = null ) pure nothrow
- {
- return gc_extend( p, mx, sz, ti );
- }
+ pragma(mangle, "gc_extend") static size_t extend(void* p, size_t mx, size_t sz, const TypeInfo ti = null) pure nothrow;
/// Standard extending
unittest
{
@@ -557,10 +635,7 @@ struct GC
* Returns:
* The actual number of bytes reserved or zero on error.
*/
- static size_t reserve( size_t sz ) nothrow /* FIXME pure */
- {
- return gc_reserve( sz );
- }
+ pragma(mangle, "gc_reserve") static size_t reserve(size_t sz) nothrow; /* FIXME pure */
/**
@@ -569,16 +644,14 @@ struct GC
* collector, if p points to the interior of a memory block, or if this
* method is called from a finalizer, no action will be taken. The block
* will not be finalized regardless of whether the FINALIZE attribute is
- * set. If finalization is desired, use delete instead.
+ * set. If finalization is desired, call $(REF1 destroy, object) prior to `GC.free`.
*
* Params:
* p = A pointer to the root of a valid memory block or to null.
*/
- static void free( void* p ) pure nothrow
- {
- gc_free( p );
- }
+ pragma(mangle, "gc_free") static void free(void* p) pure nothrow @nogc;
+extern(D):
/**
* Returns the base address of the memory block containing p. This value
@@ -595,19 +668,17 @@ struct GC
* Returns:
* The base address of the memory block referenced by p or null on error.
*/
- static inout(void)* addrOf( inout(void)* p ) nothrow /* FIXME pure */
+ static inout(void)* addrOf( inout(void)* p ) nothrow @nogc pure @trusted
{
return cast(inout(void)*)gc_addrOf(cast(void*)p);
}
-
/// ditto
- static void* addrOf(void* p) pure nothrow
+ static void* addrOf(void* p) pure nothrow @nogc @trusted
{
return gc_addrOf(p);
}
-
/**
* Returns the true size of the memory block referenced by p. This value
* represents the maximum number of bytes for which a call to realloc may
@@ -621,14 +692,14 @@ struct GC
* Returns:
* The size in bytes of the memory block referenced by p or zero on error.
*/
- static size_t sizeOf( in void* p ) nothrow
+ static size_t sizeOf( const scope void* p ) nothrow @nogc /* FIXME pure */
{
return gc_sizeOf(cast(void*)p);
}
/// ditto
- static size_t sizeOf(void* p) pure nothrow
+ static size_t sizeOf(void* p) pure nothrow @nogc
{
return gc_sizeOf( p );
}
@@ -659,14 +730,14 @@ struct GC
* Information regarding the memory block referenced by p or BlkInfo.init
* on error.
*/
- static BlkInfo query( in void* p ) nothrow
+ static BlkInfo query(return scope const void* p) nothrow
{
return gc_query(cast(void*)p);
}
/// ditto
- static BlkInfo query(void* p) pure nothrow
+ static BlkInfo query(return scope void* p) pure nothrow
{
return gc_query( p );
}
@@ -681,6 +752,17 @@ struct GC
}
/**
+ * Returns runtime profile stats for currently active GC implementation
+ * See `core.memory.GC.ProfileStats` for list of available metrics.
+ */
+ static ProfileStats profileStats() nothrow @nogc @safe
+ {
+ return gc_profileStats();
+ }
+
+extern(C):
+
+ /**
* Adds an internal root pointing to the GC memory block referenced by p.
* As a result, the block referenced by p itself and any blocks accessible
* via it will be considered live until the root is removed again.
@@ -725,10 +807,7 @@ struct GC
* }
* ---
*/
- static void addRoot( in void* p ) nothrow @nogc /* FIXME pure */
- {
- gc_addRoot( p );
- }
+ pragma(mangle, "gc_addRoot") static void addRoot(const void* p) nothrow @nogc; /* FIXME pure */
/**
@@ -739,10 +818,7 @@ struct GC
* Params:
* p = A pointer into a GC-managed memory block or null.
*/
- static void removeRoot( in void* p ) nothrow @nogc /* FIXME pure */
- {
- gc_removeRoot( p );
- }
+ pragma(mangle, "gc_removeRoot") static void removeRoot(const void* p) nothrow @nogc; /* FIXME pure */
/**
@@ -773,10 +849,7 @@ struct GC
* // rawMemory will be recognized on collection.
* ---
*/
- static void addRange( in void* p, size_t sz, const TypeInfo ti = null ) @nogc nothrow /* FIXME pure */
- {
- gc_addRange( p, sz, ti );
- }
+ pragma(mangle, "gc_addRange") static void addRange(const void* p, size_t sz, const TypeInfo ti = null) @nogc nothrow; /* FIXME pure */
/**
@@ -788,10 +861,7 @@ struct GC
* Params:
* p = A pointer to a valid memory address or to null.
*/
- static void removeRange( in void* p ) nothrow @nogc /* FIXME pure */
- {
- gc_removeRange( p );
- }
+ pragma(mangle, "gc_removeRange") static void removeRange(const void* p) nothrow @nogc; /* FIXME pure */
/**
@@ -804,9 +874,133 @@ struct GC
* Params:
* segment = address range of a code segment.
*/
- static void runFinalizers( in void[] segment )
+ pragma(mangle, "gc_runFinalizers") static void runFinalizers(const scope void[] segment);
+
+ /**
+ * Queries the GC whether the current thread is running object finalization
+ * as part of a GC collection, or an explicit call to runFinalizers.
+ *
+ * As some GC implementations (such as the current conservative one) don't
+ * support GC memory allocation during object finalization, this function
+ * can be used to guard against such programming errors.
+ *
+ * Returns:
+ * true if the current thread is in a finalizer, a destructor invoked by
+ * the GC.
+ */
+ pragma(mangle, "gc_inFinalizer") static bool inFinalizer() nothrow @nogc @safe;
+
+ ///
+ @safe nothrow @nogc unittest
{
- gc_runFinalizers( segment );
+ // Only code called from a destructor is executed during finalization.
+ assert(!GC.inFinalizer);
+ }
+
+ ///
+ unittest
+ {
+ enum Outcome
+ {
+ notCalled,
+ calledManually,
+ calledFromDruntime
+ }
+
+ static class Resource
+ {
+ static Outcome outcome;
+
+ this()
+ {
+ outcome = Outcome.notCalled;
+ }
+
+ ~this()
+ {
+ if (GC.inFinalizer)
+ {
+ outcome = Outcome.calledFromDruntime;
+
+ import core.exception : InvalidMemoryOperationError;
+ try
+ {
+ /*
+ * Presently, allocating GC memory during finalization
+ * is forbidden and leads to
+ * `InvalidMemoryOperationError` being thrown.
+ *
+ * `GC.inFinalizer` can be used to guard against
+ * programming erros such as these and is also a more
+ * efficient way to verify whether a destructor was
+ * invoked by the GC.
+ */
+ cast(void) GC.malloc(1);
+ assert(false);
+ }
+ catch (InvalidMemoryOperationError e)
+ {
+ return;
+ }
+ assert(false);
+ }
+ else
+ outcome = Outcome.calledManually;
+ }
+ }
+
+ static void createGarbage()
+ {
+ auto r = new Resource;
+ r = null;
+ }
+
+ assert(Resource.outcome == Outcome.notCalled);
+ createGarbage();
+ GC.collect;
+ assert(
+ Resource.outcome == Outcome.notCalled ||
+ Resource.outcome == Outcome.calledFromDruntime);
+
+ auto r = new Resource;
+ GC.runFinalizers((cast(const void*)typeid(Resource).destructor)[0..1]);
+ assert(Resource.outcome == Outcome.calledFromDruntime);
+ Resource.outcome = Outcome.notCalled;
+
+ debug(MEMSTOMP) {} else
+ {
+ // assume Resource data is still available
+ r.destroy;
+ assert(Resource.outcome == Outcome.notCalled);
+ }
+
+ r = new Resource;
+ assert(Resource.outcome == Outcome.notCalled);
+ r.destroy;
+ assert(Resource.outcome == Outcome.calledManually);
+ }
+
+ /**
+ * Returns the number of bytes allocated for the current thread
+ * since program start. It is the same as
+ * GC.stats().allocatedInCurrentThread, but faster.
+ */
+ pragma(mangle, "gc_allocatedInCurrentThread") static ulong allocatedInCurrentThread() nothrow;
+
+ /// Using allocatedInCurrentThread
+ nothrow unittest
+ {
+ ulong currentlyAllocated = GC.allocatedInCurrentThread();
+ struct DataStruct
+ {
+ long l1;
+ long l2;
+ long l3;
+ long l4;
+ }
+ DataStruct* unused = new DataStruct;
+ assert(GC.allocatedInCurrentThread() == currentlyAllocated + 32);
+ assert(GC.stats().allocatedInCurrentThread == currentlyAllocated + 32);
}
}
@@ -814,6 +1008,7 @@ struct GC
* Pure variants of C's memory allocation functions `malloc`, `calloc`, and
* `realloc` and deallocation function `free`.
*
+ * UNIX 98 requires that errno be set to ENOMEM upon failure.
* Purity is achieved by saving and restoring the value of `errno`, thus
* behaving as if it were never changed.
*
@@ -821,44 +1016,44 @@ struct GC
* $(LINK2 https://dlang.org/spec/function.html#pure-functions, D's rules for purity),
* which allow for memory allocation under specific circumstances.
*/
-void* pureMalloc(size_t size) @trusted pure @nogc nothrow
+void* pureMalloc()(size_t size) @trusted pure @nogc nothrow
{
- const errno = fakePureGetErrno();
+ const errnosave = fakePureErrno;
void* ret = fakePureMalloc(size);
- if (!ret || errno != 0)
- {
- cast(void)fakePureSetErrno(errno);
- }
+ fakePureErrno = errnosave;
return ret;
}
/// ditto
-void* pureCalloc(size_t nmemb, size_t size) @trusted pure @nogc nothrow
+void* pureCalloc()(size_t nmemb, size_t size) @trusted pure @nogc nothrow
{
- const errno = fakePureGetErrno();
+ const errnosave = fakePureErrno;
void* ret = fakePureCalloc(nmemb, size);
- if (!ret || errno != 0)
- {
- cast(void)fakePureSetErrno(errno);
- }
+ fakePureErrno = errnosave;
return ret;
}
/// ditto
-void* pureRealloc(void* ptr, size_t size) @system pure @nogc nothrow
+void* pureRealloc()(void* ptr, size_t size) @system pure @nogc nothrow
{
- const errno = fakePureGetErrno();
+ const errnosave = fakePureErrno;
void* ret = fakePureRealloc(ptr, size);
- if (!ret || errno != 0)
- {
- cast(void)fakePureSetErrno(errno);
- }
+ fakePureErrno = errnosave;
return ret;
}
+
/// ditto
-void pureFree(void* ptr) @system pure @nogc nothrow
+void pureFree()(void* ptr) @system pure @nogc nothrow
{
- const errno = fakePureGetErrno();
- fakePureFree(ptr);
- cast(void)fakePureSetErrno(errno);
+ version (Posix)
+ {
+ // POSIX free doesn't set errno
+ fakePureFree(ptr);
+ }
+ else
+ {
+ const errnosave = fakePureErrno;
+ fakePureFree(ptr);
+ fakePureErrno = errnosave;
+ }
}
///
@@ -881,40 +1076,451 @@ void pureFree(void* ptr) @system pure @nogc nothrow
@system pure nothrow @nogc unittest
{
- const int errno = fakePureGetErrno();
+ const int errno = fakePureErrno();
void* x = pureMalloc(10); // normal allocation
- assert(errno == fakePureGetErrno()); // errno shouldn't change
+ assert(errno == fakePureErrno()); // errno shouldn't change
assert(x !is null); // allocation should succeed
x = pureRealloc(x, 10); // normal reallocation
- assert(errno == fakePureGetErrno()); // errno shouldn't change
+ assert(errno == fakePureErrno()); // errno shouldn't change
assert(x !is null); // allocation should succeed
fakePureFree(x);
void* y = pureCalloc(10, 1); // normal zeroed allocation
- assert(errno == fakePureGetErrno()); // errno shouldn't change
+ assert(errno == fakePureErrno()); // errno shouldn't change
assert(y !is null); // allocation should succeed
fakePureFree(y);
- // subtract 2 because snn.lib adds 2 unconditionally before passing
- // the size to the Windows API
- void* z = pureMalloc(size_t.max - 2); // won't affect `errno`
- assert(errno == fakePureGetErrno()); // errno shouldn't change
+ // Workaround bug in glibc 2.26
+ // See also: https://issues.dlang.org/show_bug.cgi?id=17956
+ void* z = pureMalloc(size_t.max & ~255); // won't affect `errno`
+ assert(errno == fakePureErrno()); // errno shouldn't change
assert(z is null);
}
// locally purified for internal use here only
-extern (C) private pure @system @nogc nothrow
+
+static import core.stdc.errno;
+static if (__traits(getOverloads, core.stdc.errno, "errno").length == 1
+ && __traits(getLinkage, core.stdc.errno.errno) == "C")
{
- pragma(mangle, "getErrno") int fakePureGetErrno();
- pragma(mangle, "setErrno") int fakePureSetErrno(int);
+ extern(C) pragma(mangle, __traits(identifier, core.stdc.errno.errno))
+ private ref int fakePureErrno() @nogc nothrow pure @system;
+}
+else
+{
+ extern(C) private @nogc nothrow pure @system
+ {
+ pragma(mangle, __traits(identifier, core.stdc.errno.getErrno))
+ @property int fakePureErrno();
+ pragma(mangle, __traits(identifier, core.stdc.errno.setErrno))
+ @property int fakePureErrno(int);
+ }
+}
+
+version (D_BetterC) {}
+else // TODO: remove this function after Phobos no longer needs it.
+extern (C) private @system @nogc nothrow
+{
+ ref int fakePureErrnoImpl()
+ {
+ import core.stdc.errno;
+ return errno();
+ }
+}
+
+extern (C) private pure @system @nogc nothrow
+{
pragma(mangle, "malloc") void* fakePureMalloc(size_t);
pragma(mangle, "calloc") void* fakePureCalloc(size_t nmemb, size_t size);
pragma(mangle, "realloc") void* fakePureRealloc(void* ptr, size_t size);
pragma(mangle, "free") void fakePureFree(void* ptr);
}
+
+/**
+Destroys and then deallocates an object.
+
+In detail, `__delete(x)` returns with no effect if `x` is `null`. Otherwise, it
+performs the following actions in sequence:
+$(UL
+ $(LI
+ Calls the destructor `~this()` for the object referred to by `x`
+ (if `x` is a class or interface reference) or
+ for the object pointed to by `x` (if `x` is a pointer to a `struct`).
+ Arrays of structs call the destructor, if defined, for each element in the array.
+ If no destructor is defined, this step has no effect.
+ )
+ $(LI
+ Frees the memory allocated for `x`. If `x` is a reference to a class
+ or interface, the memory allocated for the underlying instance is freed. If `x` is
+ a pointer, the memory allocated for the pointed-to object is freed. If `x` is a
+ built-in array, the memory allocated for the array is freed.
+ If `x` does not refer to memory previously allocated with `new` (or the lower-level
+ equivalents in the GC API), the behavior is undefined.
+ )
+ $(LI
+ Lastly, `x` is set to `null`. Any attempt to read or write the freed memory via
+ other references will result in undefined behavior.
+ )
+)
+
+Note: Users should prefer $(REF1 destroy, object) to explicitly finalize objects,
+and only resort to $(REF __delete, core,memory) when $(REF destroy, object)
+wouldn't be a feasible option.
+
+Params:
+ x = aggregate object that should be destroyed
+
+See_Also: $(REF1 destroy, object), $(REF free, core,GC)
+
+History:
+
+The `delete` keyword allowed to free GC-allocated memory.
+As this is inherently not `@safe`, it has been deprecated.
+This function has been added to provide an easy transition from `delete`.
+It performs the same functionality as the former `delete` keyword.
+*/
+void __delete(T)(ref T x) @system
+{
+ static void _destructRecurse(S)(ref S s)
+ if (is(S == struct))
+ {
+ static if (__traits(hasMember, S, "__xdtor") &&
+ // Bugzilla 14746: Check that it's the exact member of S.
+ __traits(isSame, S, __traits(parent, s.__xdtor)))
+ s.__xdtor();
+ }
+
+ // See also: https://github.com/dlang/dmd/blob/v2.078.0/src/dmd/e2ir.d#L3886
+ static if (is(T == interface))
+ {
+ .object.destroy(x);
+ }
+ else static if (is(T == class))
+ {
+ .object.destroy(x);
+ }
+ else static if (is(T == U*, U))
+ {
+ static if (is(U == struct))
+ _destructRecurse(*x);
+ }
+ else static if (is(T : E[], E))
+ {
+ static if (is(E == struct))
+ {
+ foreach_reverse (ref e; x)
+ _destructRecurse(e);
+ }
+ }
+ else
+ {
+ static assert(0, "It is not possible to delete: `" ~ T.stringof ~ "`");
+ }
+
+ static if (is(T == interface) ||
+ is(T == class) ||
+ is(T == U2*, U2))
+ {
+ GC.free(GC.addrOf(cast(void*) x));
+ x = null;
+ }
+ else static if (is(T : E2[], E2))
+ {
+ GC.free(GC.addrOf(cast(void*) x.ptr));
+ x = null;
+ }
+}
+
+/// Deleting classes
+unittest
+{
+ bool dtorCalled;
+ class B
+ {
+ int test;
+ ~this()
+ {
+ dtorCalled = true;
+ }
+ }
+ B b = new B();
+ B a = b;
+ b.test = 10;
+
+ assert(GC.addrOf(cast(void*) b) != null);
+ __delete(b);
+ assert(b is null);
+ assert(dtorCalled);
+ assert(GC.addrOf(cast(void*) b) == null);
+ // but be careful, a still points to it
+ assert(a !is null);
+ assert(GC.addrOf(cast(void*) a) == null); // but not a valid GC pointer
+}
+
+/// Deleting interfaces
+unittest
+{
+ bool dtorCalled;
+ interface A
+ {
+ int quack();
+ }
+ class B : A
+ {
+ int a;
+ int quack()
+ {
+ a++;
+ return a;
+ }
+ ~this()
+ {
+ dtorCalled = true;
+ }
+ }
+ A a = new B();
+ a.quack();
+
+ assert(GC.addrOf(cast(void*) a) != null);
+ __delete(a);
+ assert(a is null);
+ assert(dtorCalled);
+ assert(GC.addrOf(cast(void*) a) == null);
+}
+
+/// Deleting structs
+unittest
+{
+ bool dtorCalled;
+ struct A
+ {
+ string test;
+ ~this()
+ {
+ dtorCalled = true;
+ }
+ }
+ auto a = new A("foo");
+
+ assert(GC.addrOf(cast(void*) a) != null);
+ __delete(a);
+ assert(a is null);
+ assert(dtorCalled);
+ assert(GC.addrOf(cast(void*) a) == null);
+}
+
+/// Deleting arrays
+unittest
+{
+ int[] a = [1, 2, 3];
+ auto b = a;
+
+ assert(GC.addrOf(b.ptr) != null);
+ __delete(b);
+ assert(b is null);
+ assert(GC.addrOf(b.ptr) == null);
+ // but be careful, a still points to it
+ assert(a !is null);
+ assert(GC.addrOf(a.ptr) == null); // but not a valid GC pointer
+}
+
+/// Deleting arrays of structs
+unittest
+{
+ int dtorCalled;
+ struct A
+ {
+ int a;
+ ~this()
+ {
+ assert(dtorCalled == a);
+ dtorCalled++;
+ }
+ }
+ auto arr = [A(1), A(2), A(3)];
+ arr[0].a = 2;
+ arr[1].a = 1;
+ arr[2].a = 0;
+
+ assert(GC.addrOf(arr.ptr) != null);
+ __delete(arr);
+ assert(dtorCalled == 3);
+ assert(GC.addrOf(arr.ptr) == null);
+}
+
+// Deleting raw memory
+unittest
+{
+ import core.memory : GC;
+ auto a = GC.malloc(5);
+ assert(GC.addrOf(cast(void*) a) != null);
+ __delete(a);
+ assert(a is null);
+ assert(GC.addrOf(cast(void*) a) == null);
+}
+
+// __delete returns with no effect if x is null
+unittest
+{
+ Object x = null;
+ __delete(x);
+
+ struct S { ~this() { } }
+ class C { }
+ interface I { }
+
+ int[] a; __delete(a);
+ S[] as; __delete(as);
+ C c; __delete(c);
+ I i; __delete(i);
+ C* pc = &c; __delete(*pc);
+ I* pi = &i; __delete(*pi);
+ int* pint; __delete(pint);
+ S* ps; __delete(ps);
+}
+
+// https://issues.dlang.org/show_bug.cgi?id=19092
+unittest
+{
+ const(int)[] x = [1, 2, 3];
+ assert(GC.addrOf(x.ptr) != null);
+ __delete(x);
+ assert(x is null);
+ assert(GC.addrOf(x.ptr) == null);
+
+ immutable(int)[] y = [1, 2, 3];
+ assert(GC.addrOf(y.ptr) != null);
+ __delete(y);
+ assert(y is null);
+ assert(GC.addrOf(y.ptr) == null);
+}
+
+// test realloc behaviour
+unittest
+{
+ static void set(int* p, size_t size)
+ {
+ foreach (i; 0 .. size)
+ *p++ = cast(int) i;
+ }
+ static void verify(int* p, size_t size)
+ {
+ foreach (i; 0 .. size)
+ assert(*p++ == i);
+ }
+ static void test(size_t memsize)
+ {
+ int* p = cast(int*) GC.malloc(memsize * int.sizeof);
+ assert(p);
+ set(p, memsize);
+ verify(p, memsize);
+
+ int* q = cast(int*) GC.realloc(p + 4, 2 * memsize * int.sizeof);
+ assert(q == null);
+
+ q = cast(int*) GC.realloc(p + memsize / 2, 2 * memsize * int.sizeof);
+ assert(q == null);
+
+ q = cast(int*) GC.realloc(p + memsize - 1, 2 * memsize * int.sizeof);
+ assert(q == null);
+
+ int* r = cast(int*) GC.realloc(p, 5 * memsize * int.sizeof);
+ verify(r, memsize);
+ set(r, 5 * memsize);
+
+ int* s = cast(int*) GC.realloc(r, 2 * memsize * int.sizeof);
+ verify(s, 2 * memsize);
+
+ assert(GC.realloc(s, 0) == null); // free
+ assert(GC.addrOf(p) == null);
+ }
+
+ test(16);
+ test(200);
+ test(800); // spans large and small pools
+ test(1200);
+ test(8000);
+
+ void* p = GC.malloc(100);
+ assert(GC.realloc(&p, 50) == null); // non-GC pointer
+}
+
+// test GC.profileStats
+unittest
+{
+ auto stats = GC.profileStats();
+ GC.collect();
+ auto nstats = GC.profileStats();
+ assert(nstats.numCollections > stats.numCollections);
+}
+
+// in rt.lifetime:
+private extern (C) void* _d_newitemU(scope const TypeInfo _ti) @system pure nothrow;
+
+/**
+Moves a value to a new GC allocation.
+
+Params:
+ value = Value to be moved. If the argument is an lvalue and a struct with a
+ destructor or postblit, it will be reset to its `.init` value.
+
+Returns:
+ A pointer to the new GC-allocated value.
+*/
+T* moveToGC(T)(auto ref T value)
+{
+ static T* doIt(ref T value) @trusted
+ {
+ import core.lifetime : moveEmplace;
+ auto mem = cast(T*) _d_newitemU(typeid(T)); // allocate but don't initialize
+ moveEmplace(value, *mem);
+ return mem;
+ }
+
+ return doIt(value); // T dtor might be @system
+}
+
+///
+@safe pure nothrow unittest
+{
+ struct S
+ {
+ int x;
+ this(this) @disable;
+ ~this() @safe pure nothrow @nogc {}
+ }
+
+ S* p;
+
+ // rvalue
+ p = moveToGC(S(123));
+ assert(p.x == 123);
+
+ // lvalue
+ auto lval = S(456);
+ p = moveToGC(lval);
+ assert(p.x == 456);
+ assert(lval.x == 0);
+}
+
+// @system dtor
+unittest
+{
+ struct S
+ {
+ int x;
+ ~this() @system {}
+ }
+
+ // lvalue case is @safe, ref param isn't destructed
+ static assert(__traits(compiles, (ref S lval) @safe { moveToGC(lval); }));
+
+ // rvalue case is @system, value param is destructed
+ static assert(!__traits(compiles, () @safe { moveToGC(S(0)); }));
+}
diff --git a/libphobos/libdruntime/core/runtime.d b/libphobos/libdruntime/core/runtime.d
index 5fc9904..bfb72e0 100644
--- a/libphobos/libdruntime/core/runtime.d
+++ b/libphobos/libdruntime/core/runtime.d
@@ -4,13 +4,8 @@
* Copyright: Copyright Sean Kelly 2005 - 2009.
* License: $(LINK2 http://www.boost.org/LICENSE_1_0.txt, Boost License 1.0)
* Authors: Sean Kelly
- * Source: $(DRUNTIMESRC core/_runtime.d)
- */
-
-/* Copyright Sean Kelly 2005 - 2009.
- * Distributed under the Boost Software License, Version 1.0.
- * (See accompanying file LICENSE or copy at
- * http://www.boost.org/LICENSE_1_0.txt)
+ * Source: $(LINK2 https://github.com/dlang/druntime/blob/master/src/core/runtime.d, _runtime.d)
+ * Documentation: https://dlang.org/phobos/core_runtime.html
*/
/* NOTE: This file has been patched from the original DMD distribution to
@@ -18,8 +13,6 @@
*/
module core.runtime;
-version (Windows) import core.stdc.wchar_ : wchar_t;
-
version (OSX)
version = Darwin;
else version (iOS)
@@ -29,10 +22,30 @@ else version (TVOS)
else version (WatchOS)
version = Darwin;
+version (GNU)
+{
+ import gcc.backtrace;
+ // This shouldn't be necessary but ensure that code doesn't get mixed
+ // It does however prevent the unittest SEGV handler to be installed,
+ // which is desireable as it uses backtrace directly.
+ private enum hasExecinfo = false;
+}
+else version (DRuntime_Use_Libunwind)
+{
+ import core.internal.backtrace.libunwind;
+ // This shouldn't be necessary but ensure that code doesn't get mixed
+ // It does however prevent the unittest SEGV handler to be installed,
+ // which is desireable as it uses backtrace directly.
+ private enum hasExecinfo = false;
+}
+else
+ import core.internal.execinfo;
+
/// C interface for Runtime.loadLibrary
extern (C) void* rt_loadLibrary(const char* name);
/// ditto
-version (Windows) extern (C) void* rt_loadLibraryW(const wchar_t* name);
+version (Windows) extern (C) void* rt_loadLibraryW(const wchar* name);
+
/// C interface for Runtime.unloadLibrary, returns 1/0 instead of bool
extern (C) int rt_unloadLibrary(void* ptr);
@@ -41,29 +54,69 @@ extern(C) int rt_init();
/// C interface for Runtime.terminate, returns 1/0 instead of bool
extern(C) int rt_term();
+/**
+ * This type is returned by the module unit test handler to indicate testing
+ * results.
+ */
+struct UnitTestResult
+{
+ /**
+ * Number of modules which were tested
+ */
+ size_t executed;
+
+ /**
+ * Number of modules passed the unittests
+ */
+ size_t passed;
+
+ /**
+ * Should the main function be run or not? This is ignored if any tests
+ * failed.
+ */
+ bool runMain;
+
+ /**
+ * Should we print a summary of the results?
+ */
+ bool summarize;
+
+ /**
+ * Simple check for whether execution should continue after unit tests
+ * have been run. Works with legacy code that expected a bool return.
+ *
+ * Returns:
+ * true if execution should continue after testing is complete, false if
+ * not.
+ */
+ bool opCast(T : bool)() const
+ {
+ return runMain && (executed == passed);
+ }
+
+ /// Simple return code that says unit tests pass, and main should be run
+ enum UnitTestResult pass = UnitTestResult(0, 0, true, false);
+ /// Simple return code that says unit tests failed.
+ enum UnitTestResult fail = UnitTestResult(1, 0, false, false);
+}
+
+/// Legacy module unit test handler
+alias bool function() ModuleUnitTester;
+/// Module unit test handler
+alias UnitTestResult function() ExtendedModuleUnitTester;
private
{
- alias bool function() ModuleUnitTester;
alias bool function(Object) CollectHandler;
alias Throwable.TraceInfo function( void* ptr ) TraceHandler;
- extern (C) void rt_setCollectHandler( CollectHandler h );
- extern (C) CollectHandler rt_getCollectHandler();
-
- extern (C) void rt_setTraceHandler( TraceHandler h );
- extern (C) TraceHandler rt_getTraceHandler();
-
alias void delegate( Throwable ) ExceptionHandler;
extern (C) void _d_print_throwable(Throwable t);
extern (C) void* thread_stackBottom();
-
- extern (C) string[] rt_args();
- extern (C) CArgs rt_cArgs() @nogc;
}
-static this()
+shared static this()
{
// NOTE: Some module ctors will run before this handler is set, so it's
// still possible the app could exit without a stack trace. If
@@ -108,13 +161,6 @@ struct Runtime
return !!rt_init();
}
- deprecated("Please use the overload of Runtime.initialize that takes no argument.")
- static bool initialize(ExceptionHandler dg = null)
- {
- return !!rt_init();
- }
-
-
/**
* Terminates the runtime. This call is to be used in instances where the
* standard program termination process will not be not executed. This is
@@ -129,23 +175,13 @@ struct Runtime
return !!rt_term();
}
- deprecated("Please use the overload of Runtime.terminate that takes no argument.")
- static bool terminate(ExceptionHandler dg = null)
- {
- return !!rt_term();
- }
-
-
/**
* Returns the arguments supplied when the process was started.
*
* Returns:
* The arguments supplied when this process was started.
*/
- static @property string[] args()
- {
- return rt_args();
- }
+ extern(C) pragma(mangle, "rt_args") static @property string[] args();
/**
* Returns the unprocessed C arguments supplied when the process was started.
@@ -168,10 +204,7 @@ struct Runtime
* }
* ---
*/
- static @property CArgs cArgs() @nogc
- {
- return rt_cArgs();
- }
+ extern(C) pragma(mangle, "rt_cArgs") static @property CArgs cArgs() @nogc;
/**
* Locates a dynamic library with the supplied library name and dynamically
@@ -184,12 +217,13 @@ struct Runtime
* Returns:
* A reference to the library or null on error.
*/
- static void* loadLibrary()(in char[] name)
+ static void* loadLibrary()(const scope char[] name)
{
import core.stdc.stdlib : free, malloc;
version (Windows)
{
- import core.sys.windows.windows;
+ import core.sys.windows.winnls : CP_UTF8, MultiByteToWideChar;
+ import core.sys.windows.winnt : WCHAR;
if (name.length == 0) return null;
// Load a DLL at runtime
@@ -198,7 +232,7 @@ struct Runtime
if (len == 0)
return null;
- auto buf = cast(wchar_t*)malloc((len+1) * wchar_t.sizeof);
+ auto buf = cast(WCHAR*)malloc((len+1) * WCHAR.sizeof);
if (buf is null) return null;
scope (exit) free(buf);
@@ -253,10 +287,7 @@ struct Runtime
* Params:
* h = The new trace handler. Set to null to use the default handler.
*/
- static @property void traceHandler( TraceHandler h )
- {
- rt_setTraceHandler( h );
- }
+ extern(C) pragma(mangle, "rt_setTraceHandler") static @property void traceHandler(TraceHandler h);
/**
* Gets the current trace handler.
@@ -264,10 +295,7 @@ struct Runtime
* Returns:
* The current trace handler or null if none has been set.
*/
- static @property TraceHandler traceHandler()
- {
- return rt_getTraceHandler();
- }
+ extern(C) pragma(mangle, "rt_getTraceHandler") static @property TraceHandler traceHandler();
/**
* Overrides the default collect hander with a user-supplied version. This
@@ -280,10 +308,7 @@ struct Runtime
* Params:
* h = The new collect handler. Set to null to use the default handler.
*/
- static @property void collectHandler( CollectHandler h )
- {
- rt_setCollectHandler( h );
- }
+ extern(C) pragma(mangle, "rt_setCollectHandler") static @property void collectHandler( CollectHandler h );
/**
@@ -292,10 +317,7 @@ struct Runtime
* Returns:
* The current collect handler or null if none has been set.
*/
- static @property CollectHandler collectHandler()
- {
- return rt_getCollectHandler();
- }
+ extern(C) pragma(mangle, "rt_getCollectHandler") static @property CollectHandler collectHandler();
/**
@@ -304,26 +326,39 @@ struct Runtime
* value of this routine indicates to the runtime whether the tests ran
* without error.
*
+ * There are two options for handlers. The `bool` version is deprecated but
+ * will be kept for legacy support. Returning `true` from the handler is
+ * equivalent to returning `UnitTestResult.pass` from the extended version.
+ * Returning `false` from the handler is equivalent to returning
+ * `UnitTestResult.fail` from the extended version.
+ *
+ * See the documentation for `UnitTestResult` to see how you should set up
+ * the return structure.
+ *
+ * See the documentation for `runModuleUnitTests` for how the default
+ * algorithm works, or read the example below.
+ *
* Params:
- * h = The new unit tester. Set to null to use the default unit tester.
+ * h = The new unit tester. Set both to null to use the default unit
+ * tester.
*
* Example:
* ---------
- * version (unittest) shared static this()
+ * shared static this()
* {
* import core.runtime;
*
- * Runtime.moduleUnitTester = &customModuleUnitTester;
+ * Runtime.extendedModuleUnitTester = &customModuleUnitTester;
* }
*
- * bool customModuleUnitTester()
+ * UnitTestResult customModuleUnitTester()
* {
* import std.stdio;
*
* writeln("Using customModuleUnitTester");
*
* // Do the same thing as the default moduleUnitTester:
- * size_t failed = 0;
+ * UnitTestResult result;
* foreach (m; ModuleInfo)
* {
* if (m)
@@ -332,45 +367,82 @@ struct Runtime
*
* if (fp)
* {
+ * ++result.executed;
* try
* {
* fp();
+ * ++result.passed;
* }
* catch (Throwable e)
* {
* writeln(e);
- * failed++;
* }
* }
* }
* }
- * return failed == 0;
+ * if (result.executed != result.passed)
+ * {
+ * result.runMain = false; // don't run main
+ * result.summarize = true; // print failure
+ * }
+ * else
+ * {
+ * result.runMain = true; // all UT passed
+ * result.summarize = false; // be quiet about it.
+ * }
+ * return result;
* }
* ---------
*/
+ static @property void extendedModuleUnitTester( ExtendedModuleUnitTester h )
+ {
+ sm_extModuleUnitTester = h;
+ }
+
+ /// Ditto
static @property void moduleUnitTester( ModuleUnitTester h )
{
sm_moduleUnitTester = h;
}
-
/**
- * Gets the current module unit tester.
+ * Gets the current legacy module unit tester.
+ *
+ * This property should not be used, but is supported for legacy purposes.
+ *
+ * Note that if the extended unit test handler is set, this handler will
+ * be ignored.
*
* Returns:
- * The current module unit tester handler or null if none has been set.
+ * The current legacy module unit tester handler or null if none has been
+ * set.
*/
static @property ModuleUnitTester moduleUnitTester()
{
return sm_moduleUnitTester;
}
+ /**
+ * Gets the current module unit tester.
+ *
+ * This handler overrides any legacy module unit tester set by the
+ * moduleUnitTester property.
+ *
+ * Returns:
+ * The current module unit tester handler or null if none has been
+ * set.
+ */
+ static @property ExtendedModuleUnitTester extendedModuleUnitTester()
+ {
+ return sm_extModuleUnitTester;
+ }
private:
// NOTE: This field will only ever be set in a static ctor and should
// never occur within any but the main thread, so it is safe to
// make it __gshared.
+ __gshared ExtendedModuleUnitTester sm_extModuleUnitTester = null;
__gshared ModuleUnitTester sm_moduleUnitTester = null;
}
@@ -444,40 +516,52 @@ extern (C) void profilegc_setlogfilename(string name);
/**
* This routine is called by the runtime to run module unit tests on startup.
- * The user-supplied unit tester will be called if one has been supplied,
+ * The user-supplied unit tester will be called if one has been set,
* otherwise all unit tests will be run in sequence.
*
+ * If the extended unittest handler is registered, this function returns the
+ * result from that handler directly.
+ *
+ * If a legacy boolean returning custom handler is used, `false` maps to
+ * `UnitTestResult.fail`, and `true` maps to `UnitTestResult.pass`. This was
+ * the original behavior of the unit testing system.
+ *
+ * If no unittest custom handlers are registered, the following algorithm is
+ * executed (the behavior can be affected by the `--DRT-testmode` switch
+ * below):
+ * 1. Execute any unittests present. For each that fails, print the stack
+ * trace and continue.
+ * 2. If no unittests were present, set summarize to false, and runMain to
+ * true.
+ * 3. Otherwise, set summarize to true, and runMain to false.
+ *
+ * See the documentation for `UnitTestResult` for details on how the runtime
+ * treats the return value from this function.
+ *
+ * If the switch `--DRT-testmode` is passed to the executable, it can have
+ * one of 3 values:
+ * 1. "run-main": even if unit tests are run (and all pass), runMain is set
+ to true.
+ * 2. "test-or-main": any unit tests present will cause the program to
+ * summarize the results and exit regardless of the result. This is the
+ * default.
+ * 3. "test-only", runMain is set to false, even with no tests present.
+ *
+ * This command-line parameter does not affect custom unit test handlers.
+ *
* Returns:
- * true if execution should continue after testing is complete and false if
- * not. Default behavior is to return true.
+ * A `UnitTestResult` struct indicating the result of running unit tests.
*/
-extern (C) bool runModuleUnitTests()
+extern (C) UnitTestResult runModuleUnitTests()
{
- // backtrace
- version (GNU)
- import gcc.backtrace;
- version (CRuntime_Glibc)
- import core.sys.linux.execinfo;
- else version (Darwin)
- import core.sys.darwin.execinfo;
- else version (FreeBSD)
- import core.sys.freebsd.execinfo;
- else version (NetBSD)
- import core.sys.netbsd.execinfo;
- else version (DragonFlyBSD)
- import core.sys.dragonflybsd.execinfo;
- else version (Windows)
+ version (Windows)
import core.sys.windows.stacktrace;
- else version (Solaris)
- import core.sys.solaris.execinfo;
- else version (CRuntime_UClibc)
- import core.sys.linux.execinfo;
- static if ( __traits( compiles, new LibBacktrace(0) ) )
+ static if (__traits(compiles, new LibBacktrace(0)))
{
import core.sys.posix.signal; // segv handler
- static extern (C) void unittestSegvHandler( int signum, siginfo_t* info, void* ptr )
+ static extern (C) void unittestSegvHandler(int signum, siginfo_t* info, void* ptr)
{
import core.stdc.stdio;
fprintf(stderr, "Segmentation fault while running unittests:\n");
@@ -496,18 +580,18 @@ extern (C) bool runModuleUnitTests()
sigaction_t oldbus = void;
(cast(byte*) &action)[0 .. action.sizeof] = 0;
- sigfillset( &action.sa_mask ); // block other signals
+ sigfillset(&action.sa_mask); // block other signals
action.sa_flags = SA_SIGINFO | SA_RESETHAND;
action.sa_sigaction = &unittestSegvHandler;
- sigaction( SIGSEGV, &action, &oldseg );
- sigaction( SIGBUS, &action, &oldbus );
- scope( exit )
+ sigaction(SIGSEGV, &action, &oldseg);
+ sigaction(SIGBUS, &action, &oldbus);
+ scope (exit)
{
- sigaction( SIGSEGV, &oldseg, null );
- sigaction( SIGBUS, &oldbus, null );
+ sigaction(SIGSEGV, &oldseg, null);
+ sigaction(SIGBUS, &oldbus, null);
}
}
- else static if ( __traits( compiles, backtrace ) )
+ else static if (hasExecinfo)
{
import core.sys.posix.signal; // segv handler
@@ -537,385 +621,311 @@ extern (C) bool runModuleUnitTests()
}
}
- if ( Runtime.sm_moduleUnitTester is null )
+ if (Runtime.sm_extModuleUnitTester !is null)
+ return Runtime.sm_extModuleUnitTester();
+ else if (Runtime.sm_moduleUnitTester !is null)
+ return Runtime.sm_moduleUnitTester() ? UnitTestResult.pass : UnitTestResult.fail;
+ UnitTestResult results;
+ foreach ( m; ModuleInfo )
{
- size_t failed = 0;
- foreach ( m; ModuleInfo )
+ if ( !m )
+ continue;
+ auto fp = m.unitTest;
+ if ( !fp )
+ continue;
+
+ import core.exception;
+ ++results.executed;
+ try
{
- if ( m )
+ fp();
+ ++results.passed;
+ }
+ catch ( Throwable e )
+ {
+ if ( typeid(e) == typeid(AssertError) )
{
- auto fp = m.unitTest;
-
- if ( fp )
+ // Crude heuristic to figure whether the assertion originates in
+ // the unittested module. TODO: improve.
+ auto moduleName = m.name;
+ if (moduleName.length && e.file.length > moduleName.length
+ && e.file[0 .. moduleName.length] == moduleName)
{
- try
- {
- fp();
- }
- catch ( Throwable e )
- {
- _d_print_throwable(e);
- failed++;
- }
+ import core.stdc.stdio;
+ printf("%.*s(%llu): [unittest] %.*s\n",
+ cast(int) e.file.length, e.file.ptr, cast(ulong) e.line,
+ cast(int) e.message.length, e.message.ptr);
+
+ // Exception originates in the same module, don't print
+ // the stack trace.
+ // TODO: omit stack trace only if assert was thrown
+ // directly by the unittest.
+ continue;
}
}
+ // TODO: perhaps indent all of this stuff.
+ _d_print_throwable(e);
}
- return failed == 0;
}
- return Runtime.sm_moduleUnitTester();
-}
+ import core.internal.parseoptions : rt_configOption;
-///////////////////////////////////////////////////////////////////////////////
-// Default Implementations
-///////////////////////////////////////////////////////////////////////////////
+ if (results.passed != results.executed)
+ {
+ // by default, we always print a summary if there are failures.
+ results.summarize = true;
+ }
+ else switch (rt_configOption("testmode", null, false))
+ {
+ case "run-main":
+ results.runMain = true;
+ break;
+ case "test-only":
+ // Never run main, always summarize
+ results.summarize = true;
+ break;
+ case "":
+ // By default, do not run main if tests are present.
+ case "test-or-main":
+ // only run main if there were no tests. Only summarize if we are not
+ // running main.
+ results.runMain = (results.executed == 0);
+ results.summarize = !results.runMain;
+ break;
+ default:
+ assert(0, "Unknown --DRT-testmode option: " ~ rt_configOption("testmode", null, false));
+ }
+ return results;
+}
/**
+ * Get the default `Throwable.TraceInfo` implementation for the platform
+ *
+ * This functions returns a trace handler, allowing to inspect the
+ * current stack trace.
+ *
+ * Params:
+ * ptr = (Windows only) The context to get the stack trace from.
+ * When `null` (the default), start from the current frame.
*
+ * Returns:
+ * A `Throwable.TraceInfo` implementation suitable to iterate over the stack,
+ * or `null`. If called from a finalizer (destructor), always returns `null`
+ * as trace handlers allocate.
*/
Throwable.TraceInfo defaultTraceHandler( void* ptr = null )
{
- // backtrace
- version (GNU)
- import gcc.backtrace;
- version (CRuntime_Glibc)
- import core.sys.linux.execinfo;
- else version (Darwin)
- import core.sys.darwin.execinfo;
- else version (FreeBSD)
- import core.sys.freebsd.execinfo;
- else version (NetBSD)
- import core.sys.netbsd.execinfo;
- else version (DragonFlyBSD)
- import core.sys.dragonflybsd.execinfo;
- else version (Windows)
- import core.sys.windows.stacktrace;
- else version (Solaris)
- import core.sys.solaris.execinfo;
- else version (CRuntime_UClibc)
- import core.sys.linux.execinfo;
-
// avoid recursive GC calls in finalizer, trace handlers should be made @nogc instead
- import core.memory : gc_inFinalizer;
- if (gc_inFinalizer)
+ import core.memory : GC;
+ if (GC.inFinalizer)
return null;
- //printf("runtime.defaultTraceHandler()\n");
- static if ( __traits( compiles, new LibBacktrace(0) ) )
+ static if (__traits(compiles, new LibBacktrace(0)))
{
version (Posix)
- {
static enum FIRSTFRAME = 4;
- }
else version (Win64)
- {
static enum FIRSTFRAME = 4;
- }
else
- {
static enum FIRSTFRAME = 0;
- }
return new LibBacktrace(FIRSTFRAME);
}
- else static if ( __traits( compiles, new UnwindBacktrace(0) ) )
+ else static if (__traits(compiles, new UnwindBacktrace(0)))
{
version (Posix)
- {
static enum FIRSTFRAME = 5;
- }
else version (Win64)
- {
static enum FIRSTFRAME = 4;
- }
else
- {
static enum FIRSTFRAME = 0;
- }
return new UnwindBacktrace(FIRSTFRAME);
}
- else static if ( __traits( compiles, backtrace ) )
+ else version (Windows)
{
- import core.demangle;
- import core.stdc.stdlib : free;
- import core.stdc.string : strlen, memchr, memmove;
-
- class DefaultTraceInfo : Throwable.TraceInfo
+ import core.sys.windows.stacktrace;
+ static if (__traits(compiles, new StackTrace(0, null)))
{
- this()
- {
- numframes = 0; //backtrace( callstack, MAXFRAMES );
- if (numframes < 2) // backtrace() failed, do it ourselves
- {
- static void** getBasePtr()
- {
- version (D_InlineAsm_X86)
- asm { naked; mov EAX, EBP; ret; }
- else
- version (D_InlineAsm_X86_64)
- asm { naked; mov RAX, RBP; ret; }
- else
- return null;
- }
-
- auto stackTop = getBasePtr();
- auto stackBottom = cast(void**) thread_stackBottom();
- void* dummy;
-
- if ( stackTop && &dummy < stackTop && stackTop < stackBottom )
- {
- auto stackPtr = stackTop;
-
- for ( numframes = 0; stackTop <= stackPtr &&
- stackPtr < stackBottom &&
- numframes < MAXFRAMES; )
- {
- enum CALL_INSTRUCTION_SIZE = 1; // it may not be 1 but it is good enough to get
- // in CALL instruction address range for backtrace
- callstack[numframes++] = *(stackPtr + 1) - CALL_INSTRUCTION_SIZE;
- stackPtr = cast(void**) *stackPtr;
- }
- }
- }
- }
-
- override int opApply( scope int delegate(ref const(char[])) dg ) const
- {
- return opApply( (ref size_t, ref const(char[]) buf)
- {
- return dg( buf );
- } );
- }
+ import core.sys.windows.winnt : CONTEXT;
+ version (Win64)
+ enum FIRSTFRAME = 4;
+ else version (Win32)
+ enum FIRSTFRAME = 0;
+ return new StackTrace(FIRSTFRAME, cast(CONTEXT*)ptr);
+ }
+ else
+ return null;
+ }
+ else static if (__traits(compiles, new DefaultTraceInfo()))
+ return new DefaultTraceInfo();
+ else
+ return null;
+}
- override int opApply( scope int delegate(ref size_t, ref const(char[])) dg ) const
- {
- version (Posix)
- {
- // NOTE: The first 4 frames with the current implementation are
- // inside core.runtime and the object code, so eliminate
- // these for readability. The alternative would be to
- // exclude the first N frames that are in a list of
- // mangled function names.
- enum FIRSTFRAME = 4;
- }
- else version (Windows)
- {
- // NOTE: On Windows, the number of frames to exclude is based on
- // whether the exception is user or system-generated, so
- // it may be necessary to exclude a list of function names
- // instead.
- enum FIRSTFRAME = 0;
- }
+/// Example of a simple program printing its stack trace
+unittest
+{
+ import core.runtime;
+ import core.stdc.stdio;
- version (linux) enum enableDwarf = true;
- else version (FreeBSD) enum enableDwarf = true;
- else version (DragonFlyBSD) enum enableDwarf = true;
- else version (Darwin) enum enableDwarf = true;
- else enum enableDwarf = false;
+ void main()
+ {
+ auto trace = defaultTraceHandler(null);
+ foreach (line; trace)
+ {
+ printf("%.*s\n", cast(int)line.length, line.ptr);
+ }
+ }
+}
- static if (enableDwarf)
- {
- import core.internal.traits : externDFunc;
-
- alias traceHandlerOpApplyImpl = externDFunc!(
- "rt.backtrace.dwarf.traceHandlerOpApplyImpl",
- int function(const void*[], scope int delegate(ref size_t, ref const(char[])))
- );
-
- if (numframes >= FIRSTFRAME)
- {
- return traceHandlerOpApplyImpl(
- callstack[FIRSTFRAME .. numframes],
- dg
- );
- }
- else
- {
- return 0;
- }
- }
- else
- {
- const framelist = backtrace_symbols( callstack.ptr, numframes );
- scope(exit) free(cast(void*) framelist);
-
- int ret = 0;
- for ( int i = FIRSTFRAME; i < numframes; ++i )
- {
- char[4096] fixbuf;
- auto buf = framelist[i][0 .. strlen(framelist[i])];
- auto pos = cast(size_t)(i - FIRSTFRAME);
- buf = fixline( buf, fixbuf );
- ret = dg( pos, buf );
- if ( ret )
- break;
- }
- return ret;
- }
+version (DRuntime_Use_Libunwind)
+{
+ import core.internal.backtrace.handler;
- }
+ alias DefaultTraceInfo = LibunwindHandler;
+}
+/// Default implementation for most POSIX systems
+else static if (hasExecinfo) private class DefaultTraceInfo : Throwable.TraceInfo
+{
+ import core.demangle;
+ import core.stdc.stdlib : free;
+ import core.stdc.string : strlen, memchr, memmove;
- override string toString() const
+ this()
+ {
+ // it may not be 1 but it is good enough to get
+ // in CALL instruction address range for backtrace
+ enum CALL_INSTRUCTION_SIZE = 1;
+
+ static if (__traits(compiles, backtrace((void**).init, int.init)))
+ numframes = backtrace(this.callstack.ptr, MAXFRAMES);
+ // Backtrace succeeded, adjust the frame to point to the caller
+ if (numframes >= 2)
+ foreach (ref elem; this.callstack)
+ elem -= CALL_INSTRUCTION_SIZE;
+ else // backtrace() failed, do it ourselves
+ {
+ static void** getBasePtr()
{
- string buf;
- foreach ( i, line; this )
- buf ~= i ? "\n" ~ line : line;
- return buf;
+ version (D_InlineAsm_X86)
+ asm { naked; mov EAX, EBP; ret; }
+ else
+ version (D_InlineAsm_X86_64)
+ asm { naked; mov RAX, RBP; ret; }
+ else
+ return null;
}
- private:
- int numframes;
- static enum MAXFRAMES = 128;
- void*[MAXFRAMES] callstack = void;
+ auto stackTop = getBasePtr();
+ auto stackBottom = cast(void**) thread_stackBottom();
+ void* dummy;
- private:
- const(char)[] fixline( const(char)[] buf, return ref char[4096] fixbuf ) const
+ if ( stackTop && &dummy < stackTop && stackTop < stackBottom )
{
- size_t symBeg, symEnd;
- version (Darwin)
- {
- // format is:
- // 1 module 0x00000000 D6module4funcAFZv + 0
- for ( size_t i = 0, n = 0; i < buf.length; i++ )
- {
- if ( ' ' == buf[i] )
- {
- n++;
- while ( i < buf.length && ' ' == buf[i] )
- i++;
- if ( 3 > n )
- continue;
- symBeg = i;
- while ( i < buf.length && ' ' != buf[i] )
- i++;
- symEnd = i;
- break;
- }
- }
- }
- else version (CRuntime_Glibc)
- {
- // format is: module(_D6module4funcAFZv) [0x00000000]
- // or: module(_D6module4funcAFZv+0x78) [0x00000000]
- auto bptr = cast(char*) memchr( buf.ptr, '(', buf.length );
- auto eptr = cast(char*) memchr( buf.ptr, ')', buf.length );
- auto pptr = cast(char*) memchr( buf.ptr, '+', buf.length );
-
- if (pptr && pptr < eptr)
- eptr = pptr;
-
- if ( bptr++ && eptr )
- {
- symBeg = bptr - buf.ptr;
- symEnd = eptr - buf.ptr;
- }
- }
- else version (FreeBSD)
- {
- // format is: 0x00000000 <_D6module4funcAFZv+0x78> at module
- auto bptr = cast(char*) memchr( buf.ptr, '<', buf.length );
- auto eptr = cast(char*) memchr( buf.ptr, '+', buf.length );
-
- if ( bptr++ && eptr )
- {
- symBeg = bptr - buf.ptr;
- symEnd = eptr - buf.ptr;
- }
- }
- else version (NetBSD)
- {
- // format is: 0x00000000 <_D6module4funcAFZv+0x78> at module
- auto bptr = cast(char*) memchr( buf.ptr, '<', buf.length );
- auto eptr = cast(char*) memchr( buf.ptr, '+', buf.length );
-
- if ( bptr++ && eptr )
- {
- symBeg = bptr - buf.ptr;
- symEnd = eptr - buf.ptr;
- }
- }
- else version (DragonFlyBSD)
- {
- // format is: 0x00000000 <_D6module4funcAFZv+0x78> at module
- auto bptr = cast(char*) memchr( buf.ptr, '<', buf.length );
- auto eptr = cast(char*) memchr( buf.ptr, '+', buf.length );
-
- if ( bptr++ && eptr )
- {
- symBeg = bptr - buf.ptr;
- symEnd = eptr - buf.ptr;
- }
- }
- else version (Solaris)
- {
- // format is object'symbol+offset [pc]
- auto bptr = cast(char*) memchr( buf.ptr, '\'', buf.length );
- auto eptr = cast(char*) memchr( buf.ptr, '+', buf.length );
-
- if ( bptr++ && eptr )
- {
- symBeg = bptr - buf.ptr;
- symEnd = eptr - buf.ptr;
- }
- }
- else
- {
- // fallthrough
- }
+ auto stackPtr = stackTop;
- assert(symBeg < buf.length && symEnd < buf.length);
- assert(symBeg <= symEnd);
-
- enum min = (size_t a, size_t b) => a <= b ? a : b;
- if (symBeg == symEnd || symBeg >= fixbuf.length)
+ for ( numframes = 0; stackTop <= stackPtr &&
+ stackPtr < stackBottom &&
+ numframes < MAXFRAMES; )
{
- immutable len = min(buf.length, fixbuf.length);
- fixbuf[0 .. len] = buf[0 .. len];
- return fixbuf[0 .. len];
- }
- else
- {
- fixbuf[0 .. symBeg] = buf[0 .. symBeg];
-
- auto sym = demangle(buf[symBeg .. symEnd], fixbuf[symBeg .. $]);
-
- if (sym.ptr !is fixbuf.ptr + symBeg)
- {
- // demangle reallocated the buffer, copy the symbol to fixbuf
- immutable len = min(fixbuf.length - symBeg, sym.length);
- memmove(fixbuf.ptr + symBeg, sym.ptr, len);
- if (symBeg + len == fixbuf.length)
- return fixbuf[];
- }
-
- immutable pos = symBeg + sym.length;
- assert(pos < fixbuf.length);
- immutable tail = buf.length - symEnd;
- immutable len = min(fixbuf.length - pos, tail);
- fixbuf[pos .. pos + len] = buf[symEnd .. symEnd + len];
- return fixbuf[0 .. pos + len];
+ callstack[numframes++] = *(stackPtr + 1) - CALL_INSTRUCTION_SIZE;
+ stackPtr = cast(void**) *stackPtr;
}
}
}
+ }
- return new DefaultTraceInfo;
+ override int opApply( scope int delegate(ref const(char[])) dg ) const
+ {
+ return opApply( (ref size_t, ref const(char[]) buf)
+ {
+ return dg( buf );
+ } );
}
- else static if ( __traits( compiles, new StackTrace(0, null) ) )
+
+ override int opApply( scope int delegate(ref size_t, ref const(char[])) dg ) const
{
- version (Win64)
+ version (linux) enum enableDwarf = true;
+ else version (FreeBSD) enum enableDwarf = true;
+ else version (DragonFlyBSD) enum enableDwarf = true;
+ else version (Darwin) enum enableDwarf = true;
+ else enum enableDwarf = false;
+
+ const framelist = backtrace_symbols( callstack.ptr, numframes );
+ scope(exit) free(cast(void*) framelist);
+
+ static if (enableDwarf)
{
- static enum FIRSTFRAME = 4;
+ import core.internal.backtrace.dwarf;
+ return traceHandlerOpApplyImpl(numframes,
+ i => callstack[i],
+ (i) { auto str = framelist[i][0 .. strlen(framelist[i])]; return getMangledSymbolName(str); },
+ dg);
}
- else version (Win32)
+ else
{
- static enum FIRSTFRAME = 0;
+ int ret = 0;
+ for (size_t pos = 0; pos < numframes; ++pos)
+ {
+ char[4096] fixbuf = void;
+ auto buf = framelist[pos][0 .. strlen(framelist[pos])];
+ buf = fixline( buf, fixbuf );
+ ret = dg( pos, buf );
+ if ( ret )
+ break;
+ }
+ return ret;
}
- import core.sys.windows.winnt : CONTEXT;
- auto s = new StackTrace(FIRSTFRAME, cast(CONTEXT*)ptr);
- return s;
}
- else
+
+ override string toString() const
{
- return null;
+ string buf;
+ foreach ( i, line; this )
+ buf ~= i ? "\n" ~ line : line;
+ return buf;
+ }
+
+private:
+ int numframes;
+ static enum MAXFRAMES = 128;
+ void*[MAXFRAMES] callstack = void;
+
+private:
+ const(char)[] fixline( const(char)[] buf, return ref char[4096] fixbuf ) const
+ {
+ size_t symBeg, symEnd;
+
+ getMangledSymbolName(buf, symBeg, symEnd);
+
+ enum min = (size_t a, size_t b) => a <= b ? a : b;
+ if (symBeg == symEnd || symBeg >= fixbuf.length)
+ {
+ immutable len = min(buf.length, fixbuf.length);
+ fixbuf[0 .. len] = buf[0 .. len];
+ return fixbuf[0 .. len];
+ }
+ else
+ {
+ fixbuf[0 .. symBeg] = buf[0 .. symBeg];
+
+ auto sym = demangle(buf[symBeg .. symEnd], fixbuf[symBeg .. $]);
+
+ if (sym.ptr !is fixbuf.ptr + symBeg)
+ {
+ // demangle reallocated the buffer, copy the symbol to fixbuf
+ immutable len = min(fixbuf.length - symBeg, sym.length);
+ memmove(fixbuf.ptr + symBeg, sym.ptr, len);
+ if (symBeg + len == fixbuf.length)
+ return fixbuf[];
+ }
+
+ immutable pos = symBeg + sym.length;
+ assert(pos < fixbuf.length);
+ immutable tail = buf.length - symEnd;
+ immutable len = min(fixbuf.length - pos, tail);
+ fixbuf[pos .. pos + len] = buf[symEnd .. symEnd + len];
+ return fixbuf[0 .. pos + len];
+ }
}
}
diff --git a/libphobos/libdruntime/core/stdc/math.d b/libphobos/libdruntime/core/stdc/math.d
index 2de6e57..de029c4 100644
--- a/libphobos/libdruntime/core/stdc/math.d
+++ b/libphobos/libdruntime/core/stdc/math.d
@@ -315,17 +315,17 @@ version (CRuntime_DigitalMars)
pure uint __fpclassify_d(double x);
pure uint __fpclassify_ld(real x);
- extern (D)
- {
//int fpclassify(real-floating x);
///
- extern(C) pragma(mangle, "__fpclassify_f") pure int fpclassify(float x);
+ pragma(mangle, "__fpclassify_f") pure int fpclassify(float x);
///
- extern(C) pragma(mangle, "__fpclassify_d") pure int fpclassify(double x);
+ pragma(mangle, "__fpclassify_d") pure int fpclassify(double x);
///
- extern(C) pragma(mangle, real.sizeof == double.sizeof ? "__fpclassify_d" : "__fpclassify_ld")
+ pragma(mangle, real.sizeof == double.sizeof ? "__fpclassify_d" : "__fpclassify_ld")
pure int fpclassify(real x);
+ extern (D)
+ {
//int isfinite(real-floating x);
///
pure int isfinite(float x) { return fpclassify(x) >= FP_NORMAL; }
@@ -452,17 +452,17 @@ else version (CRuntime_Microsoft) // fully supported since MSVCRT 12 (VS 2013) o
pure int __signbit(double x);
pure int __signbitl(real x);
+ //int fpclassify(real-floating x);
+ ///
+ pragma(mangle, "__fpclassifyf") pure int fpclassify(float x);
+ ///
+ pragma(mangle, "__fpclassify") pure int fpclassify(double x);
+ ///
+ pragma(mangle, real.sizeof == double.sizeof ? "__fpclassify" : "__fpclassifyl")
+ pure int fpclassify(real x);
+
extern (D)
{
- //int fpclassify(real-floating x);
- ///
- extern(C) pragma(mangle, "__fpclassifyf") pure int fpclassify(float x);
- ///
- extern(C) pragma(mangle, "__fpclassify") pure int fpclassify(double x);
- ///
- extern(C) pragma(mangle, real.sizeof == double.sizeof ? "__fpclassify" : "__fpclassifyl")
- pure int fpclassify(real x);
-
//int isfinite(real-floating x);
///
pure int isfinite(float x) { return (fpclassify(x) & FP_NORMAL) == 0; }
@@ -478,16 +478,19 @@ else version (CRuntime_Microsoft) // fully supported since MSVCRT 12 (VS 2013) o
pure int isinf(double x) { return fpclassify(x) == FP_INFINITE; }
///
pure int isinf(real x) { return fpclassify(x) == FP_INFINITE; }
+ }
- //int isnan(real-floating x);
- ///
- extern(C) pragma(mangle, "__isnanf") pure int isnan(float x);
- ///
- extern(C) pragma(mangle, "__isnan") pure int isnan(double x);
- ///
- extern(C) pragma(mangle, real.sizeof == double.sizeof ? "__isnan" : "__isnanl")
- pure int isnan(real x);
+ //int isnan(real-floating x);
+ ///
+ pragma(mangle, "__isnanf") pure int isnan(float x);
+ ///
+ pragma(mangle, "__isnan") pure int isnan(double x);
+ ///
+ pragma(mangle, real.sizeof == double.sizeof ? "__isnan" : "__isnanl")
+ pure int isnan(real x);
+ extern (D)
+ {
//int isnormal(real-floating x);
///
int isnormal(float x) { return fpclassify(x) == FP_NORMAL; }
@@ -495,16 +498,16 @@ else version (CRuntime_Microsoft) // fully supported since MSVCRT 12 (VS 2013) o
int isnormal(double x) { return fpclassify(x) == FP_NORMAL; }
///
int isnormal(real x) { return fpclassify(x) == FP_NORMAL; }
-
- //int signbit(real-floating x);
- ///
- extern(C) pragma(mangle, "__signbitf") pure int signbit(float x);
- ///
- extern(C) pragma(mangle, "__signbit") pure int signbit(double x);
- ///
- extern(C) pragma(mangle, real.sizeof == double.sizeof ? "__signbit" : "__signbitl")
- int signbit(real x);
}
+
+ //int signbit(real-floating x);
+ ///
+ pragma(mangle, "__signbitf") pure int signbit(float x);
+ ///
+ pragma(mangle, "__signbit") pure int signbit(double x);
+ ///
+ pragma(mangle, real.sizeof == double.sizeof ? "__signbit" : "__signbitl")
+ int signbit(real x);
}
else
{
@@ -644,61 +647,58 @@ else version (CRuntime_Glibc)
pure int __signbit(double x);
pure int __signbitl(real x);
- extern (D)
- {
//int fpclassify(real-floating x);
///
- extern(C) pragma(mangle, "__fpclassifyf") pure int fpclassify(float x);
+ pragma(mangle, "__fpclassifyf") pure int fpclassify(float x);
///
- extern(C) pragma(mangle, "__fpclassify") pure int fpclassify(double x);
+ pragma(mangle, "__fpclassify") pure int fpclassify(double x);
///
- extern(C) pragma(mangle, real.sizeof == double.sizeof ? "__fpclassify" : "__fpclassifyl")
+ pragma(mangle, real.sizeof == double.sizeof ? "__fpclassify" : "__fpclassifyl")
pure int fpclassify(real x);
//int isfinite(real-floating x);
///
- extern(C) pragma(mangle, "__finitef") pure int isfinite(float x);
+ pragma(mangle, "__finitef") pure int isfinite(float x);
///
- extern(C) pragma(mangle, "__finite") pure int isfinite(double x);
+ pragma(mangle, "__finite") pure int isfinite(double x);
///
- extern(C) pragma(mangle, real.sizeof == double.sizeof ? "__finite" : "__finitel")
+ pragma(mangle, real.sizeof == double.sizeof ? "__finite" : "__finitel")
pure int isfinite(real x);
//int isinf(real-floating x);
///
- extern(C) pragma(mangle, "__isinff") pure int isinf(float x);
+ pragma(mangle, "__isinff") pure int isinf(float x);
///
- extern(C) pragma(mangle, "__isinf") pure int isinf(double x);
+ pragma(mangle, "__isinf") pure int isinf(double x);
///
- extern(C) pragma(mangle, real.sizeof == double.sizeof ? "__isinf" : "__isinfl")
+ pragma(mangle, real.sizeof == double.sizeof ? "__isinf" : "__isinfl")
pure int isinf(real x);
//int isnan(real-floating x);
///
- extern(C) pragma(mangle, "__isnanf") pure int isnan(float x);
+ pragma(mangle, "__isnanf") pure int isnan(float x);
///
- extern(C) pragma(mangle, "__isnan") pure int isnan(double x);
+ pragma(mangle, "__isnan") pure int isnan(double x);
///
- extern(C) pragma(mangle, real.sizeof == double.sizeof ? "__isnan" : "__isnanl")
+ pragma(mangle, real.sizeof == double.sizeof ? "__isnan" : "__isnanl")
pure int isnan(real x);
//int isnormal(real-floating x);
///
- pure int isnormal(float x) { return fpclassify(x) == FP_NORMAL; }
+ extern (D) pure int isnormal(float x) { return fpclassify(x) == FP_NORMAL; }
///
- pure int isnormal(double x) { return fpclassify(x) == FP_NORMAL; }
+ extern (D) pure int isnormal(double x) { return fpclassify(x) == FP_NORMAL; }
///
- pure int isnormal(real x) { return fpclassify(x) == FP_NORMAL; }
+ extern (D) pure int isnormal(real x) { return fpclassify(x) == FP_NORMAL; }
//int signbit(real-floating x);
///
- extern(C) pragma(mangle, "__signbitf") pure int signbit(float x);
+ pragma(mangle, "__signbitf") pure int signbit(float x);
///
- extern(C) pragma(mangle, "__signbit") pure int signbit(double x);
+ pragma(mangle, "__signbit") pure int signbit(double x);
///
- extern(C) pragma(mangle, real.sizeof == double.sizeof ? "__signbit" : "__signbitl")
+ pragma(mangle, real.sizeof == double.sizeof ? "__signbit" : "__signbitl")
pure int signbit(real x);
- }
}
else version (CRuntime_Musl)
{
@@ -736,16 +736,16 @@ else version (CRuntime_Musl)
int __signbitl(real x);
}
- extern (D) pure
- {
//int fpclassify(real-floating x);
///
- extern(C) pragma(mangle, "__fpclassifyf") int fpclassify(float x);
+ pragma(mangle, "__fpclassifyf") pure int fpclassify(float x);
///
- extern(C) pragma(mangle, "__fpclassify") int fpclassify(double x);
+ pragma(mangle, "__fpclassify") pure int fpclassify(double x);
///
- extern(C) pragma(mangle, real.sizeof == double.sizeof ? "__fpclassify" : "__fpclassifyl")
- int fpclassify(real x);
+ pragma(mangle, real.sizeof == double.sizeof ? "__fpclassify" : "__fpclassifyl")
+ pure int fpclassify(real x);
+ extern (D) pure
+ {
private uint __FLOAT_BITS(float __f)
{
union __u_t {
@@ -813,16 +813,16 @@ else version (CRuntime_Musl)
int isnormal(double x) { return fpclassify(x) == FP_NORMAL; }
///
int isnormal(real x) { return fpclassify(x) == FP_NORMAL; }
+ }
//int signbit(real-floating x);
///
- extern(C) pragma(mangle, "__signbitf") int signbit(float x);
+ pragma(mangle, "__signbitf") pure int signbit(float x);
///
- extern(C) pragma(mangle, "__signbit") int signbit(double x);
+ pragma(mangle, "__signbit") pure int signbit(double x);
///
- extern(C) pragma(mangle, real.sizeof == double.sizeof ? "__signbit" : "__signbitl")
- int signbit(real x);
- }
+ pragma(mangle, real.sizeof == double.sizeof ? "__signbit" : "__signbitl")
+ pure int signbit(real x);
}
else version (CRuntime_UClibc)
{
@@ -870,55 +870,55 @@ else version (CRuntime_UClibc)
int __signbit(double x);
int __signbitl(real x);
- extern (D)
- {
///
- extern(C) pragma(mangle, "__fpclassifyf") int fpclassify(float x);
+ pragma(mangle, "__fpclassifyf") int fpclassify(float x);
///
- extern(C) pragma(mangle, "__fpclassify") int fpclassify(double x);
+ pragma(mangle, "__fpclassify") int fpclassify(double x);
///
- extern(C) pragma(mangle, real.sizeof == double.sizeof ? "__fpclassify" : "__fpclassifyl")
+ pragma(mangle, real.sizeof == double.sizeof ? "__fpclassify" : "__fpclassifyl")
int fpclassify(real x);
///
- extern(C) pragma(mangle, "__finitef") int isfinite(float x);
+ pragma(mangle, "__finitef") int isfinite(float x);
///
- extern(C) pragma(mangle, "__finite") int isfinite(double x);
+ pragma(mangle, "__finite") int isfinite(double x);
///
- extern(C) pragma(mangle, real.sizeof == double.sizeof ? "__finite" : "__finitel")
+ pragma(mangle, real.sizeof == double.sizeof ? "__finite" : "__finitel")
int isfinite(real x);
///
- extern(C) pragma(mangle, "__isinff") int isinf(float x);
+ pragma(mangle, "__isinff") int isinf(float x);
///
- extern(C) pragma(mangle, "__isinf") int isinf(double x);
+ pragma(mangle, "__isinf") int isinf(double x);
///
- extern(C) pragma(mangle, real.sizeof == double.sizeof ? "__isinf" : "__isinfl")
+ pragma(mangle, real.sizeof == double.sizeof ? "__isinf" : "__isinfl")
int isinf(real x);
///
- extern(C) pragma(mangle, "__isnanf") int isnan(float x);
+ pragma(mangle, "__isnanf") int isnan(float x);
///
- extern(C) pragma(mangle, "__isnan") int isnan(double x);
+ pragma(mangle, "__isnan") int isnan(double x);
///
- extern(C) pragma(mangle, real.sizeof == double.sizeof ? "__isnan" : "__isnanl")
+ pragma(mangle, real.sizeof == double.sizeof ? "__isnan" : "__isnanl")
int isnan(real x);
+ extern (D)
+ {
///
int isnormal(float x) { return fpclassify(x) == FP_NORMAL; }
///
int isnormal(double x) { return fpclassify(x) == FP_NORMAL; }
///
int isnormal(real x) { return fpclassify(x) == FP_NORMAL; }
+ }
///
- extern(C) pragma(mangle, "__signbitf") int signbit(float x);
+ pragma(mangle, "__signbitf") int signbit(float x);
///
- extern(C) pragma(mangle, "__signbit") int signbit(double x);
+ pragma(mangle, "__signbit") int signbit(double x);
///
- extern(C) pragma(mangle, real.sizeof == double.sizeof ? "__signbit" : "__signbitl")
+ pragma(mangle, real.sizeof == double.sizeof ? "__signbit" : "__signbitl")
int signbit(real x);
- }
}
else version (Darwin)
{
@@ -999,40 +999,40 @@ else version (Darwin)
pure int __isnanl(real x);
}
- extern (D)
- {
//int fpclassify(real-floating x);
///
- extern(C) pragma(mangle, "__fpclassifyf") pure int fpclassify(float x);
+ pragma(mangle, "__fpclassifyf") pure int fpclassify(float x);
///
- extern(C) pragma(mangle, "__fpclassifyd") pure int fpclassify(double x);
+ pragma(mangle, "__fpclassifyd") pure int fpclassify(double x);
///
- extern(C) pragma(mangle, __fpclassifyl.mangleof) pure int fpclassify(real x);
+ pragma(mangle, __fpclassifyl.mangleof) pure int fpclassify(real x);
//int isfinite(real-floating x);
///
- extern(C) pragma(mangle, "__isfinitef") pure int isfinite(float x);
+ pragma(mangle, "__isfinitef") pure int isfinite(float x);
///
- extern(C) pragma(mangle, "__isfinited") pure int isfinite(double x);
+ pragma(mangle, "__isfinited") pure int isfinite(double x);
///
- extern(C) pragma(mangle, __isfinitel.mangleof) pure int isfinite(real x);
+ pragma(mangle, __isfinitel.mangleof) pure int isfinite(real x);
//int isinf(real-floating x);
///
- extern(C) pragma(mangle, "__isinff") pure int isinf(float x);
+ pragma(mangle, "__isinff") pure int isinf(float x);
///
- extern(C) pragma(mangle, "__isinfd") pure int isinf(double x);
+ pragma(mangle, "__isinfd") pure int isinf(double x);
///
- extern(C) pragma(mangle, __isinfl.mangleof) pure int isinf(real x);
+ pragma(mangle, __isinfl.mangleof) pure int isinf(real x);
//int isnan(real-floating x);
///
- extern(C) pragma(mangle, "__isnanf") pure int isnan(float x);
+ pragma(mangle, "__isnanf") pure int isnan(float x);
///
- extern(C) pragma(mangle, "__isnand") pure int isnan(double x);
+ pragma(mangle, "__isnand") pure int isnan(double x);
///
- extern(C) pragma(mangle, __isnanl.mangleof) pure int isnan(real x);
+ pragma(mangle, __isnanl.mangleof) pure int isnan(real x);
+ extern (D)
+ {
//int isnormal(real-floating x);
///
pure int isnormal(float x) { return fpclassify(x) == FP_NORMAL; }
@@ -1040,15 +1040,15 @@ else version (Darwin)
pure int isnormal(double x) { return fpclassify(x) == FP_NORMAL; }
///
pure int isnormal(real x) { return fpclassify(x) == FP_NORMAL; }
+ }
//int signbit(real-floating x);
///
- extern(C) pragma(mangle, "__signbitf") pure int signbit(float x);
+ pragma(mangle, "__signbitf") pure int signbit(float x);
///
- extern(C) pragma(mangle, "__signbitd") pure int signbit(double x);
+ pragma(mangle, "__signbitd") pure int signbit(double x);
///
- extern(C) pragma(mangle, "__signbitl") pure int signbit(real x);
- }
+ pragma(mangle, "__signbitl") pure int signbit(real x);
}
else version (FreeBSD)
{
@@ -1092,56 +1092,53 @@ else version (FreeBSD)
pure int __signbitf(float);
pure int __signbitl(real);
- extern (D)
- {
//int fpclassify(real-floating x);
///
- extern(C) pragma(mangle, "__fpclassifyf") pure int fpclassify(float x);
+ pragma(mangle, "__fpclassifyf") pure int fpclassify(float x);
///
- extern(C) pragma(mangle, "__fpclassifyd") pure int fpclassify(double x);
+ pragma(mangle, "__fpclassifyd") pure int fpclassify(double x);
///
- extern(C) pragma(mangle, "__fpclassifyl") pure int fpclassify(real x);
+ pragma(mangle, "__fpclassifyl") pure int fpclassify(real x);
//int isfinite(real-floating x);
///
- extern(C) pragma(mangle, "__isfinitef") pure int isfinite(float x);
+ pragma(mangle, "__isfinitef") pure int isfinite(float x);
///
- extern(C) pragma(mangle, "__isfinite") pure int isfinite(double x);
+ pragma(mangle, "__isfinite") pure int isfinite(double x);
///
- extern(C) pragma(mangle, "__isfinitel") pure int isfinite(real x);
+ pragma(mangle, "__isfinitel") pure int isfinite(real x);
//int isinf(real-floating x);
///
- extern(C) pragma(mangle, "__isinff") pure int isinf(float x);
+ pragma(mangle, "__isinff") pure int isinf(float x);
///
- pure int isinf(double x) { return __isinfl(x); }
+ extern (D) pure int isinf(double x) { return __isinfl(x); }
///
- extern(C) pragma(mangle, "__isinfl") pure int isinf(real x);
+ pragma(mangle, "__isinfl") pure int isinf(real x);
//int isnan(real-floating x);
///
- pure int isnan(float x) { return __isnanl(x); }
+ extern (D) pure int isnan(float x) { return __isnanl(x); }
///
- pure int isnan(double x) { return __isnanl(x); }
+ extern (D) pure int isnan(double x) { return __isnanl(x); }
///
- extern(C) pragma(mangle, "__isnanl") pure int isnan(real x);
+ pragma(mangle, "__isnanl") pure int isnan(real x);
//int isnormal(real-floating x);
///
- extern(C) pragma(mangle, "__isnormalf") pure int isnormal(float x);
+ pragma(mangle, "__isnormalf") pure int isnormal(float x);
///
- extern(C) pragma(mangle, "__isnormal") pure int isnormal(double x);
+ pragma(mangle, "__isnormal") pure int isnormal(double x);
///
- extern(C) pragma(mangle, "__isnormall") pure int isnormal(real x);
+ pragma(mangle, "__isnormall") pure int isnormal(real x);
//int signbit(real-floating x);
///
- extern(C) pragma(mangle, "__signbitf") pure int signbit(float x);
+ pragma(mangle, "__signbitf") pure int signbit(float x);
///
- extern(C) pragma(mangle, "__signbit") pure int signbit(double x);
+ pragma(mangle, "__signbit") pure int signbit(double x);
///
- pure int signbit(real x) { return __signbit(x); }
- }
+ extern (D) pure int signbit(real x) { return __signbit(x); }
}
else version (OpenBSD)
{
@@ -1185,56 +1182,53 @@ else version (OpenBSD)
pure int __signbitf(float);
pure int __signbitl(real);
- extern (D)
- {
//int fpclassify(real-floating x);
///
- extern(C) pragma(mangle, "__fpclassifyf") pure int fpclassify(float x);
+ pragma(mangle, "__fpclassifyf") pure int fpclassify(float x);
///
- extern(C) pragma(mangle, "__fpclassify") pure int fpclassify(double x);
+ pragma(mangle, "__fpclassify") pure int fpclassify(double x);
///
- extern(C) pragma(mangle, "__fpclassifyl") pure int fpclassify(real x);
+ pragma(mangle, "__fpclassifyl") pure int fpclassify(real x);
//int isfinite(real-floating x);
///
- extern(C) pragma(mangle, "__isfinitef") pure int isfinite(float x);
+ pragma(mangle, "__isfinitef") pure int isfinite(float x);
///
- extern(C) pragma(mangle, "__isfinite") pure int isfinite(double x);
+ pragma(mangle, "__isfinite") pure int isfinite(double x);
///
- extern(C) pragma(mangle, "__isfinitel") pure int isfinite(real x);
+ pragma(mangle, "__isfinitel") pure int isfinite(real x);
//int isinf(real-floating x);
///
- extern(C) pragma(mangle, "__isinff") pure int isinf(float x);
+ pragma(mangle, "__isinff") pure int isinf(float x);
///
- pure int isinf(double x) { return __isinfl(x); }
+ extern (D) pure int isinf(double x) { return __isinfl(x); }
///
- extern(C) pragma(mangle, "__isinfl") pure int isinf(real x);
+ pragma(mangle, "__isinfl") pure int isinf(real x);
//int isnan(real-floating x);
///
- pure int isnan(float x) { return __isnanl(x); }
+ extern (D) pure int isnan(float x) { return __isnanl(x); }
///
- pure int isnan(double x) { return __isnanl(x); }
+ extern (D) pure int isnan(double x) { return __isnanl(x); }
///
- extern(C) pragma(mangle, "__isnanl") pure int isnan(real x);
+ pragma(mangle, "__isnanl") pure int isnan(real x);
//int isnormal(real-floating x);
///
- extern(C) pragma(mangle, "__isnormalf") pure int isnormal(float x);
+ pragma(mangle, "__isnormalf") pure int isnormal(float x);
///
- extern(C) pragma(mangle, "__isnormal") pure int isnormal(double x);
+ pragma(mangle, "__isnormal") pure int isnormal(double x);
///
- extern(C) pragma(mangle, "__isnormall") pure int isnormal(real x);
+ pragma(mangle, "__isnormall") pure int isnormal(real x);
//int signbit(real-floating x);
///
- extern(C) pragma(mangle, "__signbitf") pure int signbit(float x);
+ pragma(mangle, "__signbitf") pure int signbit(float x);
///
- extern(C) pragma(mangle, "__signbit") pure int signbit(double x);
+ pragma(mangle, "__signbit") pure int signbit(double x);
///
- pure int signbit(real x) { return __signbit(x); }
- }
+ extern (D) pure int signbit(real x) { return __signbit(x); }
}
else version (NetBSD)
{
@@ -1266,17 +1260,17 @@ else version (NetBSD)
pure uint __fpclassifyd(double x);
pure uint __fpclassifyl(real x);
- extern (D)
- {
//int fpclassify(real-floating x);
///
- extern(C) pragma(mangle, "__fpclassifyf") pure int fpclassify(float x);
+ pragma(mangle, "__fpclassifyf") pure int fpclassify(float x);
///
- extern(C) pragma(mangle, "__fpclassifyd") pure int fpclassify(double x);
+ pragma(mangle, "__fpclassifyd") pure int fpclassify(double x);
///
- extern(C) pragma(mangle, real.sizeof == double.sizeof ? "__fpclassifyd" : "__fpclassifyl")
+ pragma(mangle, real.sizeof == double.sizeof ? "__fpclassifyd" : "__fpclassifyl")
pure int fpclassify(real x);
+ extern (D)
+ {
//int isfinite(real-floating x);
///
pure int isfinite(float x) { return fpclassify(x) >= FP_NORMAL; }
@@ -1360,32 +1354,29 @@ else version (DragonFlyBSD)
pure int __signbitf(float);
pure int __signbitl(real);
- extern (D)
- {
- extern(C) pragma(mangle, "__fpclassifyf") pure int fpclassify(float x);
- extern(C) pragma(mangle, "__fpclassifyd") pure int fpclassify(double x);
- extern(C) pragma(mangle, "__fpclassifyl") pure int fpclassify(real x);
+ pragma(mangle, "__fpclassifyf") pure int fpclassify(float x);
+ pragma(mangle, "__fpclassifyd") pure int fpclassify(double x);
+ pragma(mangle, "__fpclassifyl") pure int fpclassify(real x);
- extern(C) pragma(mangle, "__isfinitef") pure int isfinite(float x);
- extern(C) pragma(mangle, "__isfinite") pure int isfinite(double x);
- extern(C) pragma(mangle, "__isfinitel") pure int isfinite(real x);
+ pragma(mangle, "__isfinitef") pure int isfinite(float x);
+ pragma(mangle, "__isfinite") pure int isfinite(double x);
+ pragma(mangle, "__isfinitel") pure int isfinite(real x);
- extern(C) pragma(mangle, "__isinff") pure int isinf(float x);
- extern(C) pragma(mangle, "__isinf") pure int isinf(double x);
- extern(C) pragma(mangle, "__isinfl") pure int isinf(real x);
+ pragma(mangle, "__isinff") pure int isinf(float x);
+ pragma(mangle, "__isinf") pure int isinf(double x);
+ pragma(mangle, "__isinfl") pure int isinf(real x);
- extern(C) pragma(mangle, "__isnanf") pure int isnan(float x);
- extern(C) pragma(mangle, "__isnan") pure int isnan(double x);
- extern(C) pragma(mangle, "__isnanl") pure int isnan(real x);
+ pragma(mangle, "__isnanf") pure int isnan(float x);
+ pragma(mangle, "__isnan") pure int isnan(double x);
+ pragma(mangle, "__isnanl") pure int isnan(real x);
- extern(C) pragma(mangle, "__isnormalf") pure int isnormal(float x);
- extern(C) pragma(mangle, "__isnormal") pure int isnormal(double x);
- extern(C) pragma(mangle, "__isnormall") pure int isnormal(real x);
+ pragma(mangle, "__isnormalf") pure int isnormal(float x);
+ pragma(mangle, "__isnormal") pure int isnormal(double x);
+ pragma(mangle, "__isnormall") pure int isnormal(real x);
- extern(C) pragma(mangle, "__signbitf") pure int signbit(float x);
- extern(C) pragma(mangle, "__signbit") pure int signbit(double x);
- extern(C) pragma(mangle, "__signbitl") pure int signbit(real x);
- }
+ pragma(mangle, "__signbitf") pure int signbit(float x);
+ pragma(mangle, "__signbit") pure int signbit(double x);
+ pragma(mangle, "__signbitl") pure int signbit(real x);
}
else version (Solaris)
{
@@ -1393,17 +1384,14 @@ else version (Solaris)
pure int __isnan(double x);
pure int __isnanl(real x);
- extern (D)
- {
//int isnan(real-floating x);
///
- extern(C) pragma(mangle, "__isnanf") pure int isnan(float x);
+ pragma(mangle, "__isnanf") pure int isnan(float x);
///
- extern(C) pragma(mangle, "__isnan") pure int isnan(double x);
+ pragma(mangle, "__isnan") pure int isnan(double x);
///
- extern(C) pragma(mangle, real.sizeof == double.sizeof ? "__isnan" : "__isnanl")
+ pragma(mangle, real.sizeof == double.sizeof ? "__isnan" : "__isnanl")
pure int isnan(real x);
- }
}
else version (CRuntime_Bionic)
{
@@ -1448,89 +1436,86 @@ else version (CRuntime_Bionic)
pure int __signbitf(float);
pure int __signbitl(real);
- extern (D)
- {
//int fpclassify(real-floating x);
///
- extern(C) pragma(mangle, "__fpclassifyf") pure int fpclassify(float x);
+ pragma(mangle, "__fpclassifyf") pure int fpclassify(float x);
///
- extern(C) pragma(mangle, "__fpclassifyd") pure int fpclassify(double x);
+ pragma(mangle, "__fpclassifyd") pure int fpclassify(double x);
///
- extern(C) pragma(mangle, "__fpclassifyl") pure int fpclassify(real x);
+ pragma(mangle, "__fpclassifyl") pure int fpclassify(real x);
//int isfinite(real-floating x);
///
- extern(C) pragma(mangle, "__isfinitef") pure int isfinite(float x);
+ pragma(mangle, "__isfinitef") pure int isfinite(float x);
///
- extern(C) pragma(mangle, "__isfinite") pure int isfinite(double x);
+ pragma(mangle, "__isfinite") pure int isfinite(double x);
///
- extern(C) pragma(mangle, "__isfinitel") pure int isfinite(real x);
+ pragma(mangle, "__isfinitel") pure int isfinite(real x);
//int isinf(real-floating x);
///
- extern(C) pragma(mangle, "__isinff") pure int isinf(float x);
+ pragma(mangle, "__isinff") pure int isinf(float x);
///
- extern(C) pragma(mangle, "__isinf") pure int isinf(double x);
+ pragma(mangle, "__isinf") pure int isinf(double x);
///
- extern(C) pragma(mangle, "__isinfl") pure int isinf(real x);
+ pragma(mangle, "__isinfl") pure int isinf(real x);
//int isnan(real-floating x);
///
- extern(C) pragma(mangle, "isnanf") pure int isnan(float x);
+ pragma(mangle, "isnanf") pure int isnan(float x);
///
- extern(C) pragma(mangle, "__isnanl") pure int isnan(real x);
+ pragma(mangle, "__isnanl") pure int isnan(real x);
//int isnormal(real-floating x);
///
- extern(C) pragma(mangle, "__isnormalf") pure int isnormal(float x);
+ pragma(mangle, "__isnormalf") pure int isnormal(float x);
///
- extern(C) pragma(mangle, "__isnormal") pure int isnormal(double x);
+ pragma(mangle, "__isnormal") pure int isnormal(double x);
///
- extern(C) pragma(mangle, "__isnormall") pure int isnormal(real x);
+ pragma(mangle, "__isnormall") pure int isnormal(real x);
//int signbit(real-floating x);
///
- extern(C) pragma(mangle, "__signbitf") pure int signbit(float x);
+ pragma(mangle, "__signbitf") pure int signbit(float x);
///
- extern(C) pragma(mangle, "__signbit") pure int signbit(double x);
+ pragma(mangle, "__signbit") pure int signbit(double x);
///
- extern(C) pragma(mangle, "__signbitl") pure int signbit(real x);
- }
+ pragma(mangle, "__signbitl") pure int signbit(real x);
}
extern (D)
{
//int isgreater(real-floating x, real-floating y);
///
- pure int isgreater(float x, float y) { return x > y && !isunordered(x, y); }
+ pure int isgreater(float x, float y) { return x > y; }
///
- pure int isgreater(double x, double y) { return x > y && !isunordered(x, y); }
+ pure int isgreater(double x, double y) { return x > y; }
///
- pure int isgreater(real x, real y) { return x > y && !isunordered(x, y); }
+ pure int isgreater(real x, real y) { return x > y; }
//int isgreaterequal(real-floating x, real-floating y);
///
- pure int isgreaterequal(float x, float y) { return x >= y && !isunordered(x, y); }
+ pure int isgreaterequal(float x, float y) { return x >= y; }
///
- pure int isgreaterequal(double x, double y) { return x >= y && !isunordered(x, y); }
+ pure int isgreaterequal(double x, double y) { return x >= y; }
///
- pure int isgreaterequal(real x, real y) { return x >= y && !isunordered(x, y); }
+ pure int isgreaterequal(real x, real y) { return x >= y; }
//int isless(real-floating x, real-floating y);
///
- pure int isless(float x, float y) { return x < y && !isunordered(x, y); }
+ pure int isless(float x, float y) { return x < y; }
///
- pure int isless(double x, double y) { return x < y && !isunordered(x, y); }
+ pure int isless(double x, double y) { return x < y; }
///
- pure int isless(real x, real y) { return x < y && !isunordered(x, y); }
+ pure int isless(real x, real y) { return x < y; }
//int islessequal(real-floating x, real-floating y);
///
- pure int islessequal(float x, float y) { return x <= y && !isunordered(x, y); }
+ pure int islessequal(float x, float y) { return x <= y; }
///
- pure int islessequal(double x, double y) { return x <= y && !isunordered(x, y); }
+ pure int islessequal(double x, double y) { return x <= y; }
///
- pure int islessequal(real x, real y) { return x <= y && !isunordered(x, y); }
+ pure int islessequal(real x, real y) { return x <= y; }
//int islessgreater(real-floating x, real-floating y);
///
diff --git a/libphobos/libdruntime/core/stdc/stdint.d b/libphobos/libdruntime/core/stdc/stdint.d
index ac71b1d..9db2fda 100644
--- a/libphobos/libdruntime/core/stdc/stdint.d
+++ b/libphobos/libdruntime/core/stdc/stdint.d
@@ -36,7 +36,20 @@ extern (C):
nothrow:
@nogc:
-
+// These are defined the same way as D basic types, so the definition is
+// platform-independant
+alias int8_t = byte; ///
+alias int16_t = short; ///
+alias uint8_t = ubyte; ///
+alias uint16_t = ushort; ///
+
+// 32 bit types and need to be defined on-platform basis, because
+// they might have C++ binary mangling of `int` or `long`.
+// 64-bit types respectively might be mangled as `long` or `long long`
+
+// It would seem correct to define intmax_t and uintmax_t here, but C and C++
+// compilers don't in practice always set them to the maximum supported value.
+// See https://quuxplusone.github.io/blog/2019/02/28/is-int128-integral/
static if (is(ucent))
{
alias int128_t = cent; ///
@@ -45,10 +58,6 @@ static if (is(ucent))
version (Windows)
{
- alias int8_t = byte; ///
- alias int16_t = short; ///
- alias uint8_t = ubyte; ///
- alias uint16_t = ushort; ///
version (CRuntime_DigitalMars)
{
alias int32_t = cpp_long; ///
@@ -62,23 +71,23 @@ version (Windows)
alias int64_t = long; ///
alias uint64_t = ulong; ///
- alias int_least8_t = byte; ///
- alias uint_least8_t = ubyte; ///
- alias int_least16_t = short; ///
- alias uint_least16_t = ushort; ///
- alias int_least32_t = int32_t; ///
+ alias int_least8_t = byte; ///
+ alias uint_least8_t = ubyte; ///
+ alias int_least16_t = short; ///
+ alias uint_least16_t = ushort; ///
+ alias int_least32_t = int32_t; ///
alias uint_least32_t = uint32_t; ///
- alias int_least64_t = long; ///
- alias uint_least64_t = ulong; ///
-
- alias int_fast8_t = byte; ///
- alias uint_fast8_t = ubyte; ///
- alias int_fast16_t = int; ///
- alias uint_fast16_t = uint; ///
- alias int_fast32_t = int32_t; ///
+ alias int_least64_t = long; ///
+ alias uint_least64_t = ulong; ///
+
+ alias int_fast8_t = byte; ///
+ alias uint_fast8_t = ubyte; ///
+ alias int_fast16_t = int; ///
+ alias uint_fast16_t = uint; ///
+ alias int_fast32_t = int32_t; ///
alias uint_fast32_t = uint32_t; ///
- alias int_fast64_t = long; ///
- alias uint_fast64_t = ulong; ///
+ alias int_fast64_t = long; ///
+ alias uint_fast64_t = ulong; ///
alias intptr_t = ptrdiff_t; ///
alias uintptr_t = size_t; ///
@@ -87,10 +96,6 @@ version (Windows)
}
else version (Darwin)
{
- alias int8_t = byte; ///
- alias int16_t = short; ///
- alias uint8_t = ubyte; ///
- alias uint16_t = ushort; ///
alias int32_t = int; ///
alias uint32_t = uint; ///
alias int64_t = cpp_longlong; ///
@@ -121,23 +126,19 @@ else version (Darwin)
}
else version (Posix)
{
- alias int8_t = byte; ///
- alias int16_t = short; ///
- alias uint8_t = ubyte; ///
- alias uint16_t = ushort; ///
- alias int32_t = int; ///
- alias uint32_t = uint; ///
- alias int64_t = long; ///
- alias uint64_t = ulong; ///
-
- alias int_least8_t = byte; ///
- alias uint_least8_t = ubyte; ///
- alias int_least16_t = short; ///
+ alias int32_t = int; ///
+ alias uint32_t = uint; ///
+ alias int64_t = long; ///
+ alias uint64_t = ulong; ///
+
+ alias int_least8_t = byte; ///
+ alias uint_least8_t = ubyte; ///
+ alias int_least16_t = short; ///
alias uint_least16_t = ushort; ///
- alias int_least32_t = int; ///
- alias uint_least32_t = uint; ///
- alias int_least64_t = long; ///
- alias uint_least64_t = ulong; ///
+ alias int_least32_t = int; ///
+ alias uint_least32_t = uint; ///
+ alias int_least64_t = long; ///
+ alias uint_least64_t = ulong;///
version (FreeBSD)
{
@@ -166,13 +167,13 @@ else version (Posix)
alias int_fast32_t = ptrdiff_t; ///
alias uint_fast32_t = size_t; ///
}
- alias int_fast64_t = long; ///
- alias uint_fast64_t = ulong; ///
+ alias int_fast64_t = long; ///
+ alias uint_fast64_t = ulong; ///
alias intptr_t = ptrdiff_t; ///
- alias uintptr_t = size_t; ///
- alias intmax_t = long; ///
- alias uintmax_t = ulong; ///
+ alias uintptr_t = size_t; ///
+ alias intmax_t = long; ///
+ alias uintmax_t = ulong; ///
}
else
{
diff --git a/libphobos/libdruntime/core/stdcpp/allocator.d b/libphobos/libdruntime/core/stdcpp/allocator.d
new file mode 100644
index 0000000..abf97c4
--- /dev/null
+++ b/libphobos/libdruntime/core/stdcpp/allocator.d
@@ -0,0 +1,373 @@
+/**
+ * D binding to C++ std::allocator.
+ *
+ * Copyright: Copyright (c) 2019 D Language Foundation
+ * License: Distributed under the
+ * $(LINK2 http://www.boost.org/LICENSE_1_0.txt, Boost Software License 1.0).
+ * (See accompanying file LICENSE)
+ * Authors: Manu Evans
+ * Source: $(DRUNTIMESRC core/stdcpp/allocator.d)
+ */
+
+module core.stdcpp.allocator;
+
+import core.stdcpp.new_;
+import core.stdcpp.xutility : StdNamespace, __cpp_sized_deallocation, __cpp_aligned_new;
+
+extern(C++, (StdNamespace)):
+
+/**
+ * Allocators are classes that define memory models to be used by some parts of
+ * the C++ Standard Library, and most specifically, by STL containers.
+ */
+extern(C++, class)
+struct allocator(T)
+{
+ static assert(!is(T == const), "The C++ Standard forbids containers of const elements because allocator!(const T) is ill-formed.");
+ static assert(!is(T == immutable), "immutable is not representable in C++");
+ static assert(!is(T == class), "Instantiation with `class` is not supported; D can't mangle the base (non-pointer) type of a class. Use `extern (C++, class) struct T { ... }` instead.");
+extern(D):
+
+ ///
+ this(U)(ref allocator!U) {}
+
+ ///
+ alias size_type = size_t;
+ ///
+ alias difference_type = ptrdiff_t;
+ ///
+ alias pointer = T*;
+ ///
+ alias value_type = T;
+
+ ///
+ enum propagate_on_container_move_assignment = true;
+ ///
+ enum is_always_equal = true;
+
+ ///
+ alias rebind(U) = allocator!U;
+
+ version (CppRuntime_Microsoft)
+ {
+ import core.stdcpp.xutility : _MSC_VER;
+
+ ///
+ T* allocate(size_t count) @nogc
+ {
+ static if (_MSC_VER <= 1800)
+ {
+ import core.stdcpp.xutility : _Xbad_alloc;
+ if (count == 0)
+ return null;
+ void* mem;
+ if ((size_t.max / T.sizeof < count) || (mem = __cpp_new(count * T.sizeof)) is null)
+ _Xbad_alloc();
+ return cast(T*)mem;
+ }
+ else
+ {
+ enum _Align = _New_alignof!T;
+
+ static size_t _Get_size_of_n(T)(const size_t _Count)
+ {
+ static if (T.sizeof == 1)
+ return _Count;
+ else
+ {
+ enum size_t _Max_possible = size_t.max / T.sizeof;
+ return _Max_possible < _Count ? size_t.max : _Count * T.sizeof;
+ }
+ }
+
+ const size_t _Bytes = _Get_size_of_n!T(count);
+ if (_Bytes == 0)
+ return null;
+
+ static if (!__cpp_aligned_new || _Align <= __STDCPP_DEFAULT_NEW_ALIGNMENT__)
+ {
+ version (INTEL_ARCH)
+ {
+ if (_Bytes >= _Big_allocation_threshold)
+ return cast(T*)_Allocate_manually_vector_aligned(_Bytes);
+ }
+ return cast(T*)__cpp_new(_Bytes);
+ }
+ else
+ {
+ size_t _Passed_align = _Align;
+ version (INTEL_ARCH)
+ {
+ if (_Bytes >= _Big_allocation_threshold)
+ _Passed_align = _Align < _Big_allocation_alignment ? _Big_allocation_alignment : _Align;
+ }
+ return cast(T*)__cpp_new_aligned(_Bytes, cast(align_val_t)_Passed_align);
+ }
+ }
+ }
+ ///
+ void deallocate(T* ptr, size_t count) @nogc
+ {
+ static if (_MSC_VER <= 1800)
+ {
+ __cpp_delete(ptr);
+ }
+ else
+ {
+ // this is observed from VS2017
+ void* _Ptr = ptr;
+ size_t _Bytes = T.sizeof * count;
+
+ enum _Align = _New_alignof!T;
+ static if (!__cpp_aligned_new || _Align <= __STDCPP_DEFAULT_NEW_ALIGNMENT__)
+ {
+ version (INTEL_ARCH)
+ {
+ if (_Bytes >= _Big_allocation_threshold)
+ _Adjust_manually_vector_aligned(_Ptr, _Bytes);
+ }
+ static if (_MSC_VER <= 1900)
+ __cpp_delete(ptr);
+ else
+ __cpp_delete_size(_Ptr, _Bytes);
+ }
+ else
+ {
+ size_t _Passed_align = _Align;
+ version (INTEL_ARCH)
+ {
+ if (_Bytes >= _Big_allocation_threshold)
+ _Passed_align = _Align < _Big_allocation_alignment ? _Big_allocation_alignment : _Align;
+ }
+ __cpp_delete_size_aligned(_Ptr, _Bytes, cast(align_val_t)_Passed_align);
+ }
+ }
+ }
+
+ ///
+ enum size_t max_size = size_t.max / T.sizeof;
+ }
+ else version (CppRuntime_Gcc)
+ {
+ ///
+ T* allocate(size_t count, const(void)* = null) @nogc
+ {
+// if (count > max_size)
+// std::__throw_bad_alloc();
+
+ static if (__cpp_aligned_new && T.alignof > __STDCPP_DEFAULT_NEW_ALIGNMENT__)
+ return cast(T*)__cpp_new_aligned(count * T.sizeof, cast(align_val_t)T.alignof);
+ else
+ return cast(T*)__cpp_new(count * T.sizeof);
+ }
+ ///
+ void deallocate(T* ptr, size_t count) @nogc
+ {
+ // NOTE: GCC doesn't seem to use the sized delete when it's available...
+
+ static if (__cpp_aligned_new && T.alignof > __STDCPP_DEFAULT_NEW_ALIGNMENT__)
+ __cpp_delete_aligned(cast(void*)ptr, cast(align_val_t)T.alignof);
+ else
+ __cpp_delete(cast(void*)ptr);
+ }
+
+ ///
+ enum size_t max_size = (ptrdiff_t.max < size_t.max ? cast(size_t)ptrdiff_t.max : size_t.max) / T.sizeof;
+ }
+ else version (CppRuntime_Clang)
+ {
+ ///
+ T* allocate(size_t count, const(void)* = null) @nogc
+ {
+// if (count > max_size)
+// __throw_length_error("allocator!T.allocate(size_t n) 'n' exceeds maximum supported size");
+
+ static if (__cpp_aligned_new && T.alignof > __STDCPP_DEFAULT_NEW_ALIGNMENT__)
+ return cast(T*)__cpp_new_aligned(count * T.sizeof, cast(align_val_t)T.alignof);
+ else
+ return cast(T*)__cpp_new(count * T.sizeof);
+ }
+ ///
+ void deallocate(T* ptr, size_t count) @nogc
+ {
+ static if (__cpp_aligned_new && T.alignof > __STDCPP_DEFAULT_NEW_ALIGNMENT__)
+ {
+ static if (__cpp_sized_deallocation)
+ return __cpp_delete_size_aligned(cast(void*)ptr, count * T.sizeof, cast(align_val_t)T.alignof);
+ else
+ return __cpp_delete_aligned(cast(void*)ptr, cast(align_val_t)T.alignof);
+ }
+ else static if (__cpp_sized_deallocation)
+ return __cpp_delete_size(cast(void*)ptr, count * T.sizeof);
+ else
+ return __cpp_delete(cast(void*)ptr);
+ }
+
+ ///
+ enum size_t max_size = size_t.max / T.sizeof;
+ }
+ else
+ {
+ static assert(false, "C++ runtime not supported");
+ }
+}
+
+///
+extern(C++, (StdNamespace))
+struct allocator_traits(Alloc)
+{
+ import core.internal.traits : isTrue;
+
+ ///
+ alias allocator_type = Alloc;
+ ///
+ alias value_type = allocator_type.value_type;
+ ///
+ alias size_type = allocator_type.size_type;
+ ///
+ alias difference_type = allocator_type.difference_type;
+ ///
+ alias pointer = allocator_type.pointer;
+
+ ///
+ enum propagate_on_container_copy_assignment = isTrue!(allocator_type, "propagate_on_container_copy_assignment");
+ ///
+ enum propagate_on_container_move_assignment = isTrue!(allocator_type, "propagate_on_container_move_assignment");
+ ///
+ enum propagate_on_container_swap = isTrue!(allocator_type, "propagate_on_container_swap");
+ ///
+ enum is_always_equal = isTrue!(allocator_type, "is_always_equal");
+
+ ///
+ template rebind_alloc(U)
+ {
+ static if (__traits(hasMember, allocator_type, "rebind"))
+ alias rebind_alloc = allocator_type.rebind!U;
+ else
+ alias rebind_alloc = allocator_type!U;
+ }
+ ///
+ alias rebind_traits(U) = allocator_traits!(rebind_alloc!U);
+
+ ///
+ static size_type max_size()(auto ref allocator_type a)
+ {
+ static if (__traits(hasMember, allocator_type, "max_size"))
+ return a.max_size();
+ else
+ return size_type.max / value_type.sizeof;
+ }
+
+ ///
+ static allocator_type select_on_container_copy_construction()(auto ref allocator_type a)
+ {
+ static if (__traits(hasMember, allocator_type, "select_on_container_copy_construction"))
+ return a.select_on_container_copy_construction();
+ else
+ return a;
+ }
+}
+
+private:
+
+// MSVC has some bonus complexity!
+version (CppRuntime_Microsoft)
+{
+ // some versions of VS require a `* const` pointer mangling hack
+ // we need a way to supply the target VS version to the compile
+ version = NeedsMangleHack;
+
+ version (X86)
+ version = INTEL_ARCH;
+ version (X86_64)
+ version = INTEL_ARCH;
+
+ // HACK: should we guess _DEBUG for `debug` builds?
+ version (_DEBUG)
+ enum _DEBUG = true;
+ else version (NDEBUG)
+ enum _DEBUG = false;
+ else
+ {
+ import core.stdcpp.xutility : __CXXLIB__;
+ enum _DEBUG = __CXXLIB__.length && 'd' == __CXXLIB__[$-1]; // libcmtd, msvcrtd
+ }
+
+ enum _New_alignof(T) = T.alignof > __STDCPP_DEFAULT_NEW_ALIGNMENT__ ? T.alignof : __STDCPP_DEFAULT_NEW_ALIGNMENT__;
+
+ version (INTEL_ARCH)
+ {
+ enum size_t _Big_allocation_threshold = 4096;
+ enum size_t _Big_allocation_alignment = 32;
+
+ static assert(2 * (void*).sizeof <= _Big_allocation_alignment, "Big allocation alignment should at least match vector register alignment");
+ static assert((v => v != 0 && (v & (v - 1)) == 0)(_Big_allocation_alignment), "Big allocation alignment must be a power of two");
+ static assert(size_t.sizeof == (void*).sizeof, "uintptr_t is not the same size as size_t");
+
+ // NOTE: this must track `_DEBUG` macro used in C++...
+ static if (_DEBUG)
+ enum size_t _Non_user_size = 2 * (void*).sizeof + _Big_allocation_alignment - 1;
+ else
+ enum size_t _Non_user_size = (void*).sizeof + _Big_allocation_alignment - 1;
+
+ version (Win64)
+ enum size_t _Big_allocation_sentinel = 0xFAFAFAFAFAFAFAFA;
+ else
+ enum size_t _Big_allocation_sentinel = 0xFAFAFAFA;
+
+ extern(D) // Template so it gets compiled according to _DEBUG.
+ void* _Allocate_manually_vector_aligned()(const size_t _Bytes) @nogc
+ {
+ size_t _Block_size = _Non_user_size + _Bytes;
+ if (_Block_size <= _Bytes)
+ _Block_size = size_t.max;
+
+ const size_t _Ptr_container = cast(size_t)__cpp_new(_Block_size);
+ if (!(_Ptr_container != 0))
+ assert(false, "invalid argument");
+ void* _Ptr = cast(void*)((_Ptr_container + _Non_user_size) & ~(_Big_allocation_alignment - 1));
+ (cast(size_t*)_Ptr)[-1] = _Ptr_container;
+
+ static if (_DEBUG)
+ (cast(size_t*)_Ptr)[-2] = _Big_allocation_sentinel;
+ return (_Ptr);
+ }
+
+ extern(D) // Template so it gets compiled according to _DEBUG.
+ void _Adjust_manually_vector_aligned()(ref void* _Ptr, ref size_t _Bytes) pure nothrow @nogc
+ {
+ _Bytes += _Non_user_size;
+
+ const size_t* _Ptr_user = cast(size_t*)_Ptr;
+ const size_t _Ptr_container = _Ptr_user[-1];
+
+ // If the following asserts, it likely means that we are performing
+ // an aligned delete on memory coming from an unaligned allocation.
+ static if (_DEBUG)
+ assert(_Ptr_user[-2] == _Big_allocation_sentinel, "invalid argument");
+
+ // Extra paranoia on aligned allocation/deallocation; ensure _Ptr_container is
+ // in range [_Min_back_shift, _Non_user_size]
+ static if (_DEBUG)
+ enum size_t _Min_back_shift = 2 * (void*).sizeof;
+ else
+ enum size_t _Min_back_shift = (void*).sizeof;
+
+ const size_t _Back_shift = cast(size_t)_Ptr - _Ptr_container;
+ if (!(_Back_shift >= _Min_back_shift && _Back_shift <= _Non_user_size))
+ assert(false, "invalid argument");
+ _Ptr = cast(void*)_Ptr_container;
+ }
+ }
+}
+version (CppRuntime_Clang)
+{
+ // Helper for container swap
+ package(core.stdcpp) void __swap_allocator(Alloc)(ref Alloc __a1, ref Alloc __a2)
+ {
+ import core.internal.lifetime : swap;
+
+ static if (allocator_traits!Alloc.propagate_on_container_swap)
+ swap(__a1, __a2);
+ }
+}
diff --git a/libphobos/libdruntime/core/stdcpp/array.d b/libphobos/libdruntime/core/stdcpp/array.d
new file mode 100644
index 0000000..eb63d4c
--- /dev/null
+++ b/libphobos/libdruntime/core/stdcpp/array.d
@@ -0,0 +1,133 @@
+/**
+ * D header file for interaction with C++ std::array.
+ *
+ * Copyright: Copyright (c) 2018 D Language Foundation
+ * License: Distributed under the
+ * $(LINK2 http://www.boost.org/LICENSE_1_0.txt, Boost Software License 1.0).
+ * (See accompanying file LICENSE)
+ * Authors: Manu Evans
+ * Source: $(DRUNTIMESRC core/stdcpp/array.d)
+ */
+
+module core.stdcpp.array;
+
+import core.stdcpp.xutility : StdNamespace;
+
+// hacks to support DMD on Win32
+version (CppRuntime_Microsoft)
+{
+ version = CppRuntime_Windows; // use the MS runtime ABI for win32
+}
+else version (CppRuntime_DigitalMars)
+{
+ version = CppRuntime_Windows; // use the MS runtime ABI for win32
+ pragma(msg, "std::array not supported by DMC");
+}
+
+extern(C++, (StdNamespace)):
+
+/**
+ * D language counterpart to C++ std::array.
+ *
+ * C++ reference: $(LINK2 https://en.cppreference.com/w/cpp/container/array)
+ */
+extern(C++, class) struct array(T, size_t N)
+{
+extern(D):
+pragma(inline, true):
+
+ ///
+ alias size_type = size_t;
+ ///
+ alias difference_type = ptrdiff_t;
+ ///
+ alias value_type = T;
+ ///
+ alias pointer = T*;
+ ///
+ alias const_pointer = const(T)*;
+
+ ///
+ alias as_array this;
+
+ /// Variadic constructor
+ this(T[N] args ...) { this[] = args[]; }
+
+ ///
+ void fill()(auto ref const(T) value) { this[] = value; }
+
+pure nothrow @nogc:
+ ///
+ size_type size() const @safe { return N; }
+ ///
+ alias length = size;
+ ///
+ alias opDollar = length;
+ ///
+ size_type max_size() const @safe { return N; }
+ ///
+ bool empty() const @safe { return N == 0; }
+
+ ///
+ ref inout(T) front() inout @safe { static if (N > 0) { return this[0]; } else { return as_array()[][0]; /* HACK: force OOB */ } }
+ ///
+ ref inout(T) back() inout @safe { static if (N > 0) { return this[N-1]; } else { return as_array()[][0]; /* HACK: force OOB */ } }
+
+ version (CppRuntime_Windows)
+ {
+ ///
+ inout(T)* data() inout @safe { return &_Elems[0]; }
+ ///
+ ref inout(T)[N] as_array() inout @safe { return _Elems[0 .. N]; }
+ ///
+ ref inout(T) at(size_type i) inout @safe { return _Elems[0 .. N][i]; }
+
+ private:
+ T[N ? N : 1] _Elems;
+ }
+ else version (CppRuntime_Gcc)
+ {
+ ///
+ inout(T)* data() inout @safe { static if (N > 0) { return &_M_elems[0]; } else { return null; } }
+ ///
+ ref inout(T)[N] as_array() inout @trusted { return data()[0 .. N]; }
+ ///
+ ref inout(T) at(size_type i) inout @trusted { return data()[0 .. N][i]; }
+
+ private:
+ static if (N > 0)
+ {
+ T[N] _M_elems;
+ }
+ else
+ {
+ struct _Placeholder {}
+ _Placeholder _M_placeholder;
+ }
+ }
+ else version (CppRuntime_Clang)
+ {
+ ///
+ inout(T)* data() inout @trusted { static if (N > 0) { return &__elems_[0]; } else { return cast(inout(T)*)__elems_.ptr; } }
+ ///
+ ref inout(T)[N] as_array() inout @trusted { return data()[0 .. N]; }
+ ///
+ ref inout(T) at(size_type i) inout @trusted { return data()[0 .. N][i]; }
+
+ private:
+ static if (N > 0)
+ {
+ T[N] __elems_;
+ }
+ else
+ {
+ struct _ArrayInStructT { T[1] __data_; }
+ align(_ArrayInStructT.alignof)
+ byte[_ArrayInStructT.sizeof] __elems_ = void;
+ }
+ }
+ else
+ {
+ static assert(false, "C++ runtime not supported");
+ }
+}
diff --git a/libphobos/libdruntime/core/stdcpp/exception.d b/libphobos/libdruntime/core/stdcpp/exception.d
index 14203b0..f920057 100644
--- a/libphobos/libdruntime/core/stdcpp/exception.d
+++ b/libphobos/libdruntime/core/stdcpp/exception.d
@@ -6,95 +6,134 @@
* Copyright: Copyright (c) 2016 D Language Foundation
* License: $(HTTP boost.org/LICENSE_1_0.txt, Boost License 1.0).
* Authors: $(HTTP digitalmars.com, Walter Bright)
+ * Manu Evans
* Source: $(DRUNTIMESRC core/stdcpp/_exception.d)
*/
module core.stdcpp.exception;
-extern (C++, "std"):
+import core.stdcpp.xutility : __cplusplus, CppStdRevision;
+import core.attribute : weak;
-version (CppRuntime_DigitalMars)
-{
- import core.stdcpp.typeinfo;
+version (CppRuntime_Gcc)
+ version = GenericBaseException;
+version (CppRuntime_Clang)
+ version = GenericBaseException;
- alias void function() unexpected_handler;
- unexpected_handler set_unexpected(unexpected_handler f) nothrow;
- void unexpected();
+extern (C++, "std"):
+@nogc:
- alias void function() terminate_handler;
- terminate_handler set_terminate(terminate_handler f) nothrow;
- void terminate();
+///
+alias terminate_handler = void function() nothrow;
+///
+terminate_handler set_terminate(terminate_handler f) nothrow;
+///
+terminate_handler get_terminate() nothrow;
+///
+void terminate() nothrow;
- bool uncaught_exception();
+static if (__cplusplus < CppStdRevision.cpp17)
+{
+ ///
+ alias unexpected_handler = void function();
+ ///
+ deprecated unexpected_handler set_unexpected(unexpected_handler f) nothrow;
+ ///
+ deprecated unexpected_handler get_unexpected() nothrow;
+ ///
+ deprecated void unexpected();
+}
+static if (__cplusplus < CppStdRevision.cpp17)
+{
+ ///
+ bool uncaught_exception() nothrow;
+}
+else static if (__cplusplus == CppStdRevision.cpp17)
+{
+ ///
+ deprecated bool uncaught_exception() nothrow;
+}
+static if (__cplusplus >= CppStdRevision.cpp17)
+{
+ ///
+ int uncaught_exceptions() nothrow;
+}
+
+version (GenericBaseException)
+{
+ ///
class exception
{
- this() nothrow { }
- this(const exception) nothrow { }
- //exception operator=(const exception) nothrow { return this; }
- //virtual ~this() nothrow;
- void dtor() { }
- const(char)* what() const nothrow;
- }
+ @nogc:
+ ///
+ this() nothrow {}
+ ///
+ @weak ~this() nothrow {} // HACK: this should extern, but then we have link errors!
- class bad_exception : exception
- {
- this() nothrow { }
- this(const bad_exception) nothrow { }
- //bad_exception operator=(const bad_exception) nothrow { return this; }
- //virtual ~this() nothrow;
- override const(char)* what() const nothrow;
+ ///
+ @weak const(char)* what() const nothrow { return "unknown"; } // HACK: this should extern, but then we have link errors!
+
+ protected:
+ this(const(char)*, int = 1) nothrow { this(); } // compat with MS derived classes
}
}
-else version (CppRuntime_Gcc)
+else version (CppRuntime_DigitalMars)
{
- alias void function() unexpected_handler;
- unexpected_handler set_unexpected(unexpected_handler f) nothrow;
- void unexpected();
-
- alias void function() terminate_handler;
- terminate_handler set_terminate(terminate_handler f) nothrow;
- void terminate();
-
- pure bool uncaught_exception();
-
+ ///
class exception
{
- this();
+ @nogc:
+ ///
+ this() nothrow {}
//virtual ~this();
- void dtor1();
- void dtor2();
- const(char)* what() const;
- }
+ void dtor() { } // reserve slot in vtbl[]
- class bad_exception : exception
- {
- this();
- //virtual ~this();
- override const(char)* what() const;
+ ///
+ const(char)* what() const nothrow;
+
+ protected:
+ this(const(char)*, int = 1) nothrow { this(); } // compat with MS derived classes
}
}
else version (CppRuntime_Microsoft)
{
+ ///
class exception
{
- this();
- this(const exception);
- //exception operator=(const exception) { return this; }
- //virtual ~this();
- void dtor() { }
- const(char)* what() const;
-
- private:
- const(char)* mywhat;
- bool dofree;
- }
+ @nogc:
+ ///
+ this(const(char)* message = "unknown", int = 1) nothrow { msg = message; }
+ ///
+ @weak ~this() nothrow {}
- class bad_exception : exception
- {
- this(const(char)* msg = "bad exception");
- //virtual ~this();
+ ///
+ @weak const(char)* what() const nothrow { return msg != null ? msg : "unknown exception"; }
+
+ // TODO: do we want this? exceptions are classes... ref types.
+// final ref exception opAssign(ref const(exception) e) nothrow { msg = e.msg; return this; }
+
+ protected:
+ @weak void _Doraise() const { assert(0); }
+
+ protected:
+ const(char)* msg;
}
+
}
else
static assert(0, "Missing std::exception binding for this platform");
+
+///
+class bad_exception : exception
+{
+@nogc:
+ ///
+ this(const(char)* message = "bad exception") { super(message); }
+
+ version (GenericBaseException)
+ {
+ ///
+ @weak override const(char)* what() const nothrow { return "bad exception"; }
+ }
+}
diff --git a/libphobos/libdruntime/core/stdcpp/memory.d b/libphobos/libdruntime/core/stdcpp/memory.d
new file mode 100644
index 0000000..bd7976c
--- /dev/null
+++ b/libphobos/libdruntime/core/stdcpp/memory.d
@@ -0,0 +1,163 @@
+/**
+* D binding to C++ <memory>.
+*
+* Copyright: Copyright (c) 2019 D Language Foundation
+* License: Distributed under the
+* $(LINK2 http://www.boost.org/LICENSE_1_0.txt, Boost Software License 1.0).
+* (See accompanying file LICENSE)
+* Authors: Manu Evans
+* Source: $(DRUNTIMESRC core/stdcpp/memory.d)
+*/
+
+module core.stdcpp.memory;
+
+public import core.stdcpp.allocator;
+
+import core.stdcpp.xutility : StdNamespace;
+
+extern(C++, (StdNamespace)):
+
+///
+unique_ptr!T make_unique(T, Args...)(auto ref Args args)
+{
+ import core.lifetime : forward;
+ import core.stdcpp.new_ : cpp_new;
+
+ return unique_ptr!T(cpp_new!T(forward!args));
+}
+
+///
+struct default_delete(T)
+{
+ ///
+ alias pointer = ClassOrPtr!T;
+
+ ///
+ void opCall()(pointer ptr) const
+ {
+ import core.stdcpp.new_ : cpp_delete;
+
+ cpp_delete(ptr);
+ }
+}
+
+///
+extern(C++, class)
+struct unique_ptr(T, Deleter = default_delete!T)
+{
+extern(D):
+ ///
+ this(this) @disable;
+
+ ///
+ ~this()
+ {
+ reset();
+ }
+
+ ///
+ ref unique_ptr opAssign(typeof(null))
+ {
+ reset();
+ return this;
+ }
+
+ ///
+ void reset(pointer p = null)
+ {
+ pointer t = __ptr();
+ __ptr() = p;
+ if (t)
+ get_deleter()(t);
+ }
+
+nothrow pure @safe @nogc:
+ ///
+ alias pointer = ClassOrPtr!T;
+ ///
+ alias element_type = T;
+ ///
+ alias deleter_type = Deleter;
+
+ ///
+ this(pointer ptr)
+ {
+ __ptr() = ptr;
+ }
+
+ ///
+ inout(pointer) get() inout nothrow
+ {
+ return __ptr();
+ }
+
+ ///
+ bool opCast(T : bool)() const nothrow
+ {
+ return __ptr() != null;
+ }
+
+ ///
+ pointer release() nothrow
+ {
+ pointer t = __ptr();
+ __ptr() = null;
+ return t;
+ }
+
+// void swap(ref unique_ptr u) nothrow
+// {
+// __ptr_.swap(__u.__ptr_);
+// }
+
+ version (CppRuntime_Microsoft)
+ {
+ ///
+ ref inout(deleter_type) get_deleter() inout nothrow { return _Mypair._Myval1; }
+
+ private:
+ import core.stdcpp.xutility : _Compressed_pair;
+
+ ref pointer __ptr() nothrow { return _Mypair._Myval2; }
+ inout(pointer) __ptr() inout nothrow { return _Mypair._Myval2; }
+
+ _Compressed_pair!(Deleter, pointer) _Mypair;
+ }
+ else version (CppRuntime_Gcc)
+ {
+ ///
+ ref inout(deleter_type) get_deleter() inout nothrow { return _M_t.get!1; }
+
+ private:
+ import core.stdcpp.tuple : tuple, get;
+
+ ref pointer __ptr() nothrow { return _M_t.get!0; }
+ inout(pointer) __ptr() inout nothrow { return _M_t.get!0; }
+
+ tuple!(pointer, Deleter) _M_t;
+ }
+ else version (CppRuntime_Clang)
+ {
+ ///
+ ref inout(deleter_type) get_deleter() inout nothrow { return __ptr_.second; }
+
+ private:
+ import core.stdcpp.xutility : __compressed_pair;
+
+ ref pointer __ptr() nothrow { return __ptr_.first; }
+ inout(pointer) __ptr() inout nothrow { return __ptr_.first; }
+
+ __compressed_pair!(pointer, deleter_type) __ptr_;
+ }
+}
+
+
+private:
+
+template ClassOrPtr(T)
+{
+ static if (is(T == class))
+ alias ClassOrPtr = T;
+ else
+ alias ClassOrPtr = T*;
+}
diff --git a/libphobos/libdruntime/core/stdcpp/new_.d b/libphobos/libdruntime/core/stdcpp/new_.d
new file mode 100644
index 0000000..77c179c
--- /dev/null
+++ b/libphobos/libdruntime/core/stdcpp/new_.d
@@ -0,0 +1,186 @@
+/**
+ * D binding to C++ <new>
+ *
+ * Copyright: Copyright (c) 2019 D Language Foundation
+ * License: Distributed under the
+ * $(LINK2 http://www.boost.org/LICENSE_1_0.txt, Boost Software License 1.0).
+ * (See accompanying file LICENSE)
+ * Authors: Manu Evans
+ * Source: $(DRUNTIMESRC core/stdcpp/new_.d)
+ */
+
+module core.stdcpp.new_;
+
+import core.stdcpp.xutility : __cpp_sized_deallocation, __cpp_aligned_new;
+import core.stdcpp.exception : exception;
+
+// TODO: this really should come from __traits(getTargetInfo, "defaultNewAlignment")
+version (D_LP64)
+ enum size_t __STDCPP_DEFAULT_NEW_ALIGNMENT__ = 16;
+else
+ enum size_t __STDCPP_DEFAULT_NEW_ALIGNMENT__ = 8;
+
+extern (C++, "std")
+{
+ ///
+ struct nothrow_t {}
+
+ ///
+ enum align_val_t : size_t { defaultAlignment = __STDCPP_DEFAULT_NEW_ALIGNMENT__ };
+
+ ///
+ class bad_alloc : exception
+ {
+ @nogc:
+ ///
+ this() { super("bad allocation", 1); }
+ }
+}
+
+
+///
+T* cpp_new(T, Args...)(auto ref Args args) if (!is(T == class))
+{
+ import core.lifetime : emplace, forward;
+
+ T* mem = cast(T*)__cpp_new(T.sizeof);
+ return mem.emplace(forward!args);
+}
+
+///
+T cpp_new(T, Args...)(auto ref Args args) if (is(T == class))
+{
+ import core.lifetime : emplace, forward;
+
+ T mem = cast(T)__cpp_new(__traits(classInstanceSize, T));
+ return mem.emplace(forward!args);
+}
+
+///
+void cpp_delete(T)(T* ptr) if (!is(T == class))
+{
+ destroy!false(*ptr);
+ __cpp_delete(ptr);
+}
+
+///
+void cpp_delete(T)(T instance) if (is(T == class))
+{
+ destroy!false(instance);
+ __cpp_delete(cast(void*) instance);
+}
+
+
+// raw C++ functions
+extern(C++):
+@nogc:
+
+/// Binding for ::operator new(std::size_t count)
+pragma(mangle, __new_mangle)
+void* __cpp_new(size_t count);
+
+/// Binding for ::operator new(std::size_t count, const std::nothrow_t&)
+pragma(mangle, __new_nothrow_mangle)
+void* __cpp_new_nothrow(size_t count, ref const(nothrow_t) = std_nothrow) nothrow;
+
+/// Binding for ::operator delete(void* ptr)
+pragma(mangle, __delete_mangle)
+void __cpp_delete(void* ptr);
+
+/// Binding for ::operator delete(void* ptr, const std::nothrow_t& tag)
+pragma(mangle, __delete_nothrow_mangle)
+void __cpp_delete_nothrow(void* ptr, ref const(nothrow_t) = std_nothrow) nothrow;
+
+static if (__cpp_sized_deallocation)
+{
+ /// Binding for ::operator delete(void* ptr, size_t size)
+ pragma(mangle, __delete_size_mangle)
+ void __cpp_delete_size(void* ptr, size_t size);
+}
+static if (__cpp_aligned_new)
+{
+ /// Binding for ::operator new(std::size_t count, std::align_val_t al)
+ pragma(mangle, __new_align_mangle)
+ void* __cpp_new_aligned(size_t count, align_val_t alignment);
+
+ /// Binding for ::operator new(std::size_t count, std::align_val_t al, const std::nothrow_t&)
+ pragma(mangle, __new_aligned_nothrow_mangle)
+ void* __cpp_new_aligned_nothrow(size_t count, align_val_t alignment, ref const(nothrow_t) = std_nothrow) nothrow;
+
+ /// Binding for ::operator delete(void* ptr, std::align_val_t al)
+ pragma(mangle, __delete_align_mangle)
+ void __cpp_delete_aligned(void* ptr, align_val_t alignment);
+
+ /// Binding for ::operator delete(void* ptr, std::align_val_t al, const std::nothrow_t& tag)
+ pragma(mangle, __delete_align_nothrow_mangle)
+ void __cpp_delete_align_nothrow(void* ptr, align_val_t alignment, ref const(nothrow_t) = std_nothrow) nothrow;
+
+ /// Binding for ::operator delete(void* ptr, size_t size, std::align_val_t al)
+ pragma(mangle, __delete_size_align_mangle)
+ void __cpp_delete_size_aligned(void* ptr, size_t size, align_val_t alignment);
+}
+
+private:
+extern (D):
+
+__gshared immutable nothrow_t std_nothrow;
+
+// we have to hard-code the mangling for the global new/delete operators
+version (CppRuntime_Microsoft)
+{
+ version (D_LP64)
+ {
+ enum __new_mangle = "??2@YAPEAX_K@Z";
+ enum __new_nothrow_mangle = "??2@YAPEAX_KAEBUnothrow_t@std@@@Z";
+ enum __delete_mangle = "??3@YAXPEAX@Z";
+ enum __delete_nothrow_mangle = "??3@YAXPEAXAEBUnothrow_t@std@@@Z";
+ enum __delete_size_mangle = "??3@YAXPEAX_K@Z";
+ enum __new_align_mangle = "??2@YAPEAX_KW4align_val_t@std@@@Z";
+ enum __new_aligned_nothrow_mangle = "??2@YAPEAX_KW4align_val_t@std@@AEBUnothrow_t@1@@Z";
+ enum __delete_align_mangle = "??3@YAXPEAXW4align_val_t@std@@@Z";
+ enum __delete_align_nothrow_mangle = "??3@YAXPEAXW4align_val_t@std@@AEBUnothrow_t@1@@Z";
+ enum __delete_size_align_mangle = "??3@YAXPEAX_KW4align_val_t@std@@@Z";
+ }
+ else
+ {
+ enum __new_mangle = "??2@YAPAXI@Z";
+ enum __new_nothrow_mangle = "??2@YAPAXIABUnothrow_t@std@@@Z";
+ enum __delete_mangle = "??3@YAXPAX@Z";
+ enum __delete_nothrow_mangle = "??3@YAXPAXABUnothrow_t@std@@@Z";
+ enum __delete_size_mangle = "??3@YAXPAXI@Z";
+ enum __new_align_mangle = "??2@YAPAXIW4align_val_t@std@@@Z";
+ enum __new_aligned_nothrow_mangle = "??2@YAPAXIW4align_val_t@std@@ABUnothrow_t@1@@Z";
+ enum __delete_align_mangle = "??3@YAXPAXW4align_val_t@std@@@Z";
+ enum __delete_align_nothrow_mangle = "??3@YAXPAXW4align_val_t@std@@ABUnothrow_t@1@@Z";
+ enum __delete_size_align_mangle = "??3@YAXPAXIW4align_val_t@std@@@Z";
+ }
+}
+else
+{
+ version (D_LP64)
+ {
+ enum __new_mangle = "_Znwm";
+ enum __new_nothrow_mangle = "_ZnwmRKSt9nothrow_t";
+ enum __delete_mangle = "_ZdlPv";
+ enum __delete_nothrow_mangle = "_ZdlPvRKSt9nothrow_t";
+ enum __delete_size_mangle = "_ZdlPvm";
+ enum __new_align_mangle = "_ZnwmSt11align_val_t";
+ enum __new_aligned_nothrow_mangle = "_ZnwmSt11align_val_tRKSt9nothrow_t";
+ enum __delete_align_mangle = "_ZdlPvSt11align_val_t";
+ enum __delete_align_nothrow_mangle = "_ZdlPvSt11align_val_tRKSt9nothrow_t";
+ enum __delete_size_align_mangle = "_ZdlPvmSt11align_val_t";
+ }
+ else
+ {
+ enum __new_mangle = "_Znwj";
+ enum __new_nothrow_mangle = "_ZnwjRKSt9nothrow_t";
+ enum __delete_mangle = "_ZdlPv";
+ enum __delete_nothrow_mangle = "_ZdlPvRKSt9nothrow_t";
+ enum __delete_size_mangle = "_ZdlPvj";
+ enum __new_align_mangle = "_ZnwjSt11align_val_t";
+ enum __new_aligned_nothrow_mangle = "_ZnwjSt11align_val_tRKSt9nothrow_t";
+ enum __delete_align_mangle = "_ZdlPvSt11align_val_t";
+ enum __delete_align_nothrow_mangle = "_ZdlPvSt11align_val_tRKSt9nothrow_t";
+ enum __delete_size_align_mangle = "_ZdlPvjSt11align_val_t";
+ }
+}
diff --git a/libphobos/libdruntime/core/stdcpp/string.d b/libphobos/libdruntime/core/stdcpp/string.d
new file mode 100644
index 0000000..1cdb0f4
--- /dev/null
+++ b/libphobos/libdruntime/core/stdcpp/string.d
@@ -0,0 +1,2593 @@
+/**
+ * D header file for interaction with C++ std::string.
+ *
+ * Copyright: Copyright (c) 2019 D Language Foundation
+ * License: Distributed under the
+ * $(LINK2 http://www.boost.org/LICENSE_1_0.txt, Boost Software License 1.0).
+ * (See accompanying file LICENSE)
+ * Authors: Guillaume Chatelet
+ * Manu Evans
+ * Source: $(DRUNTIMESRC core/stdcpp/string.d)
+ */
+
+module core.stdcpp.string;
+
+import core.stdcpp.allocator;
+import core.stdcpp.xutility : StdNamespace;
+import core.stdc.stddef : wchar_t;
+
+version (OSX)
+ version = Darwin;
+else version (iOS)
+ version = Darwin;
+else version (TVOS)
+ version = Darwin;
+else version (WatchOS)
+ version = Darwin;
+
+version (Darwin)
+{
+ // Apple decided to rock a different ABI... good for them!
+ version = _LIBCPP_ABI_ALTERNATE_STRING_LAYOUT;
+}
+
+version (CppRuntime_Gcc)
+{
+ version (_GLIBCXX_USE_CXX98_ABI)
+ {
+ private enum StringNamespace = "std";
+ version = __GTHREADS;
+ }
+ else
+ {
+ import core.internal.traits : AliasSeq;
+ private enum StringNamespace = AliasSeq!("std", "__cxx11");
+ }
+}
+else
+ alias StringNamespace = StdNamespace;
+
+enum DefaultConstruct { value }
+
+/// Constructor argument for default construction
+enum Default = DefaultConstruct();
+
+@nogc:
+
+/**
+ * Character traits classes specify character properties and provide specific
+ * semantics for certain operations on characters and sequences of characters.
+ */
+extern(C++, (StdNamespace)) struct char_traits(CharT)
+{
+ alias char_type = CharT;
+
+ static size_t length(const(char_type)* s) @trusted pure nothrow @nogc
+ {
+ static if (is(char_type == char) || is(char_type == ubyte))
+ {
+ import core.stdc.string : strlen;
+ return strlen(s);
+ }
+ else
+ {
+ size_t len = 0;
+ for (; *s != char_type(0); ++s)
+ ++len;
+ return len;
+ }
+ }
+
+ static char_type* move(char_type* s1, const char_type* s2, size_t n) @trusted pure nothrow @nogc
+ {
+ import core.stdc.string : memmove;
+ import core.stdc.wchar_ : wmemmove;
+ import core.stdc.stddef : wchar_t;
+
+ if (n == 0)
+ return s1;
+
+ version (CRuntime_Microsoft)
+ {
+ enum crt = __traits(getTargetInfo, "cppRuntimeLibrary");
+ static if (crt.length >= 6 && crt[0 .. 6] == "msvcrt")
+ enum use_wmemmove = false; // https://issues.dlang.org/show_bug.cgi?id=20456
+ else
+ enum use_wmemmove = true;
+ }
+ else
+ enum use_wmemmove = true;
+
+ static if (use_wmemmove
+ && (is(char_type == wchar_t)
+ || is(char_type == ushort) && wchar_t.sizeof == ushort.sizeof // Windows
+ || is(char_type == uint) && wchar_t.sizeof == uint.sizeof)) // POSIX
+ return cast(char_type*) wmemmove(s1, s2, n);
+ else
+ return cast(char_type*) memmove(s1, s2, n * char_type.sizeof);
+ }
+}
+
+// I don't think we can have these here, otherwise symbols are emit to druntime, and we don't want that...
+//alias std_string = basic_string!char;
+//alias std_u16string = basic_string!wchar; // TODO: can't mangle these yet either...
+//alias std_u32string = basic_string!dchar;
+//alias std_wstring = basic_string!wchar_t; // TODO: we can't mangle wchar_t properly (yet?)
+
+/**
+ * D language counterpart to C++ std::basic_string.
+ *
+ * C++ reference: $(LINK2 https://en.cppreference.com/w/cpp/string/basic_string)
+ */
+extern(C++, class)
+extern(C++, (StringNamespace))
+struct basic_string(T, Traits = char_traits!T, Alloc = allocator!T)
+{
+extern(D):
+@nogc:
+
+ ///
+ enum size_type npos = size_type.max;
+
+ ///
+ alias size_type = size_t;
+ ///
+ alias difference_type = ptrdiff_t;
+ ///
+ alias value_type = T;
+ ///
+ alias traits_type = Traits;
+ ///
+ alias allocator_type = Alloc;
+ ///
+ alias pointer = value_type*;
+ ///
+ alias const_pointer = const(value_type)*;
+
+ ///
+ alias toString = as_array;
+
+ /// MSVC allocates on default initialisation in debug, which can't be modelled by D `struct`
+ @disable this();
+
+ ///
+ alias length = size;
+ ///
+ alias opDollar = length;
+ ///
+ bool empty() const nothrow @safe { return size() == 0; }
+
+ ///
+ size_t[2] opSlice(size_t dim : 0)(size_t start, size_t end) const pure nothrow @safe @nogc { return [start, end]; }
+
+ ///
+ ref inout(T) opIndex(size_t index) inout pure nothrow @safe @nogc { return as_array[index]; }
+ ///
+ inout(T)[] opIndex(size_t[2] slice) inout pure nothrow @safe @nogc { return as_array[slice[0] .. slice[1]]; }
+ ///
+ inout(T)[] opIndex() inout pure nothrow @safe @nogc { return as_array(); }
+
+ /// Two `basic_string`s are equal if they represent the same sequence of code units.
+ bool opEquals(scope const ref basic_string s) const pure nothrow @safe { return as_array == s.as_array; }
+ /// ditto
+ bool opEquals(scope const T[] s) const pure nothrow @safe { return as_array == s; }
+
+ /// Performs lexicographical comparison.
+ int opCmp(scope const ref basic_string rhs) const pure nothrow @safe { return __cmp(as_array, rhs.as_array); }
+ /// ditto
+ int opCmp(scope const T[] rhs) const pure nothrow @safe { return __cmp(as_array, rhs); }
+
+ /// Hash to allow `basic_string`s to be used as keys for built-in associative arrays.
+ /// **The result will generally not be the same as C++ `std::hash<std::basic_string<T>>`.**
+ size_t toHash() const @nogc nothrow pure @safe { return .hashOf(as_array); }
+
+ ///
+ void clear() { eos(0); } // TODO: bounds-check
+ ///
+ void resize(size_type n, T c = T(0)) @trusted
+ {
+ if (n <= size())
+ eos(n);
+ else
+ append(n - size(), c);
+ }
+
+ ///
+ ref inout(T) front() inout nothrow @safe { return this[0]; }
+ ///
+ ref inout(T) back() inout nothrow @safe { return this[$-1]; }
+
+ ///
+ const(T)* c_str() const nothrow @safe { return data(); }
+
+ // Modifiers
+ ///
+ ref basic_string opAssign()(auto ref basic_string str) { return assign(str); }
+// ref basic_string assign(size_type n, T c);
+ ///
+ ref basic_string opAssign(const(T)[] str) { return assign(str); }
+ ///
+ ref basic_string opAssign(T c) { return assign((&c)[0 .. 1]); }
+
+ ///
+ ref basic_string opIndexAssign(T c, size_t index) { as_array[index] = c; return this; }
+ ///
+ ref basic_string opIndexAssign(T c, size_t[2] slice) { as_array[slice[0] .. slice[1]] = c; return this; }
+ ///
+ ref basic_string opIndexAssign(const(T)[] str, size_t[2] slice) { as_array[slice[0] .. slice[1]] = str[]; return this; }
+ ///
+ ref basic_string opIndexAssign(T c) { as_array[] = c; return this; }
+ ///
+ ref basic_string opIndexAssign(const(T)[] str) { as_array[] = str[]; return this; }
+
+ ///
+ ref basic_string opIndexOpAssign(string op)(T c, size_t index) { mixin("as_array[index] " ~ op ~ "= c;"); return this; }
+ ///
+ ref basic_string opIndexOpAssign(string op)(T c, size_t[2] slice) { mixin("as_array[slice[0] .. slice[1]] " ~ op ~ "= c;"); return this; }
+ ///
+ ref basic_string opIndexOpAssign(string op)(const(T)[] str, size_t[2] slice) { mixin("as_array[slice[0] .. slice[1]] " ~ op ~ "= str[];"); return this; }
+ ///
+ ref basic_string opIndexOpAssign(string op)(T c) { mixin("as_array[] " ~ op ~ "= c;"); return this; }
+ ///
+ ref basic_string opIndexOpAssign(string op)(const(T)[] str) { mixin("as_array[] " ~ op ~ "= str[];"); return this; }
+ ///
+ ref basic_string append(T c) { return append((&c)[0 .. 1]); }
+ ///
+ ref basic_string opOpAssign(string op : "~")(const(T)[] str) { return append(str); }
+ ///
+ ref basic_string opOpAssign(string op : "~")(T c) { return append((&c)[0 .. 1]); }
+
+ ///
+ ref basic_string insert(size_type pos, ref const(basic_string) str) { return insert(pos, str.data(), str.size()); }
+ ///
+ ref basic_string insert(size_type pos, ref const(basic_string) str, size_type subpos, size_type sublen) @trusted
+ {
+ const _strsz = str.size();
+ assert(subpos <= _strsz);
+// if (subpos > _strsz)
+// throw new RangeError("subpos exceeds length of str");
+ return insert(pos, str.data() + subpos, min(sublen, _strsz - subpos));
+ }
+ ///
+ ref basic_string insert(S : size_type)(S pos, const(T)* s)
+ {
+ // This overload is declared as a template to give precedence to the slice overload const(T)[] in case of conflict.
+ assert(s);
+ return insert(pos, s, traits_type.length(s));
+ }
+ ///
+ ref basic_string insert(size_type pos, const(T)[] s) { insert(pos, &s[0], s.length); return this; }
+
+ ///
+ ref basic_string erase(size_type pos = 0) // TODO: bounds-check
+ {
+// _My_data._Check_offset(pos);
+ eos(pos);
+ return this;
+ }
+ ///
+ ref basic_string erase(size_type pos, size_type len) // TODO: bounds-check
+ {
+// _My_data._Check_offset(pos);
+ T[] str = as_array();
+ size_type new_len = str.length - len;
+ this[pos .. new_len] = this[pos + len .. str.length]; // TODO: should be memmove!
+ eos(new_len);
+ return this;
+ }
+
+ ///
+ ref basic_string replace()(size_type pos, size_type len, auto ref basic_string str) { return replace(pos, len, str.data(), str.size()); }
+ ///
+ ref basic_string replace()(size_type pos, size_type len, auto ref basic_string str,
+ size_type subpos, size_type sublen=npos)
+ {
+ size_type strsz = str.size();
+ assert(subpos <= strsz);
+// if (subpos > strsz)
+// throw new RangeError("subpos exceeds size of str");
+ return replace(pos, len, str.data() + subpos, min(sublen, strsz - subpos));
+ }
+ ///
+ ref basic_string replace(size_type pos, size_type len, const(value_type)[] s) { return replace(pos, len, s.ptr, s.length); }
+ ///
+ ref basic_string replace(S : size_type)(S pos, size_type len, const(value_type)* s)
+ {
+ // This overload is declared as a template to give precedence to the slice overload const(T)[] in case of conflict.
+ assert(s !is null, "string::replace received null");
+ return replace(pos, len, s, traits_type.length(s));
+ }
+
+ ///
+ void push_back(T c) @trusted { append((&c)[0 .. 1]); }
+ ///
+ void pop_back() { erase(size() - 1); }
+
+ version (CppRuntime_Microsoft)
+ {
+ //----------------------------------------------------------------------------------
+ // Microsoft runtime
+ //----------------------------------------------------------------------------------
+
+ ///
+ this(DefaultConstruct) { _Alloc_proxy(); _Tidy_init(); }
+ ///
+ this(const(T)[] str) { _Alloc_proxy(); _Tidy_init(); assign(str); }
+ ///
+ this(const(T)[] str, ref const(allocator_type) al) { _Alloc_proxy(); _AssignAllocator(al); _Tidy_init(); assign(str); }
+ ///
+ this(this)
+ {
+ _Alloc_proxy();
+ if (_Get_data()._IsAllocated())
+ {
+ T[] _Str = _Get_data()._Mystr;
+ _Tidy_init();
+ assign(_Str);
+ }
+ }
+
+ ///
+ ~this() { _Tidy_deallocate(); }
+
+ ///
+ ref inout(Alloc) get_allocator() inout { return _Getal(); }
+
+ ///
+ size_type max_size() const nothrow @safe { return ((size_t.max / T.sizeof) - 1) / 2; } // HACK: clone the windows version precisely?
+
+ ///
+ size_type size() const nothrow @safe { return _Get_data()._Mysize; }
+ ///
+ size_type capacity() const nothrow @safe { return _Get_data()._Myres; }
+ ///
+ inout(T)* data() inout @safe { return _Get_data()._Myptr; }
+ ///
+ inout(T)[] as_array() inout nothrow @trusted { return _Get_data()._Myptr[0 .. _Get_data()._Mysize]; }
+ ///
+ ref inout(T) at(size_type i) inout nothrow @trusted { return _Get_data()._Myptr[0 .. _Get_data()._Mysize][i]; }
+
+ ///
+ ref basic_string assign(const(T)[] str)
+ {
+ size_type _Count = str.length;
+ auto _My_data = &_Get_data();
+ if (_Count <= _My_data._Myres)
+ {
+ T* _Old_ptr = _My_data._Myptr;
+ _My_data._Mysize = _Count;
+ _Old_ptr[0 .. _Count] = str[]; // TODO: this needs to be a memmove(), does that work here?
+ _Old_ptr[_Count] = T(0);
+ return this;
+ }
+ return _Reallocate_for(_Count, (T* _New_ptr, size_type _Count, const(T)* _Ptr) nothrow {
+ _New_ptr[0 .. _Count] = _Ptr[0 .. _Count];
+ _New_ptr[_Count] = T(0);
+ }, str.ptr);
+ }
+
+ ///
+ ref basic_string assign(const ref basic_string str)
+ {
+ if (&this != &str)
+ assign(str.as_array);
+ return this;
+ }
+
+ ///
+ ref basic_string append(const(T)[] str)
+ {
+ size_type _Count = str.length;
+ auto _My_data = &_Get_data();
+ size_type _Old_size = _My_data._Mysize;
+ if (_Count <= _My_data._Myres - _Old_size)
+ {
+ pointer _Old_ptr = _My_data._Myptr;
+ _My_data._Mysize = _Old_size + _Count;
+ _Old_ptr[_Old_size .. _Old_size + _Count] = str[]; // TODO: this needs to be a memmove(), does that work here?
+ _Old_ptr[_Old_size + _Count] = T(0);
+ return this;
+ }
+ return _Reallocate_grow_by(_Count, (T* _New_ptr, const(T)[] _Old_str, const(T)[] _Str) {
+ _New_ptr[0 .. _Old_str.length] = _Old_str[];
+ _New_ptr[_Old_str.length .. _Old_str.length + _Str.length] = _Str[];
+ _New_ptr[_Old_str.length + _Str.length] = T(0);
+ }, str);
+ }
+
+ ///
+ ref basic_string append(size_type n, T c)
+ {
+ alias _Count = n;
+ alias _Ch = c;
+ auto _My_data = &_Get_data();
+ const size_type _Old_size = _My_data._Mysize;
+ if (_Count <= _My_data._Myres - _Old_size)
+ {
+ _My_data._Mysize = _Old_size + _Count;
+ pointer _Old_ptr = _My_data._Myptr();
+ _Old_ptr[_Old_size .. _Old_size + _Count] = _Ch;
+ _Old_ptr[_Old_size + _Count] = T(0);
+ return this;
+ }
+
+ return _Reallocate_grow_by(_Count, (T* _New_ptr, const(T)[] _Old_str, size_type _Count, T _Ch) {
+ _New_ptr[0 .. _Old_str.length] = _Old_str[];
+ _New_ptr[_Old_str.length .. _Old_str.length + _Count] = _Ch;
+ _New_ptr[_Old_str.length + _Count] = T(0);
+ }, _Count, _Ch);
+ }
+
+ ///
+ void reserve(size_type _Newcap = 0)
+ {
+ // determine new minimum length of allocated storage
+
+ auto _My_data = &_Get_data();
+
+ if (_My_data._Mysize > _Newcap)
+ {
+ // requested capacity is not large enough for current size, ignore
+ return; // nothing to do
+ }
+
+ if (_My_data._Myres == _Newcap)
+ {
+ // we're already at the requested capacity
+ return; // nothing to do
+ }
+
+ if (_My_data._Myres < _Newcap)
+ {
+ // reallocate to grow
+ const size_type _Old_size = _My_data._Mysize;
+ _Reallocate_grow_by(
+ _Newcap - _Old_size, (T* _New_ptr, const(T)[] _Old_str) {
+ _New_ptr[0 .. _Old_str.length] = _Old_str[];
+ _New_ptr[_Old_str.length] = _Old_str.ptr[_Old_str.length];
+ });
+
+ _My_data._Mysize = _Old_size;
+ return;
+ }
+
+ if (_My_data._BUF_SIZE > _Newcap && _My_data._Large_string_engaged())
+ {
+ // deallocate everything; switch back to "small" mode
+ _Become_small();
+ return;
+ }
+
+ // ignore requests to reserve to [_BUF_SIZE, _Myres)
+ }
+
+ ///
+ void shrink_to_fit()
+ {
+ // reduce capacity
+
+ auto _My_data = &_Get_data();
+ if (!_My_data._Large_string_engaged())
+ {
+ // can't shrink from small mode
+ return;
+ }
+
+ if (_My_data._Mysize < _My_data._BUF_SIZE)
+ {
+ _Become_small();
+ return;
+ }
+
+ const size_type _Target_capacity = min(_My_data._Mysize | _My_data._ALLOC_MASK, max_size());
+ if (_Target_capacity < _My_data._Myres)
+ {
+ // worth shrinking, do it
+ auto _Al = &_Getal();
+ pointer _New_ptr = _Al.allocate(_Target_capacity + 1); // throws
+ _Base._Orphan_all();
+ _New_ptr[0 .. _My_data._Mysize + 1] = _My_data._Bx._Ptr[0 .. _My_data._Mysize + 1];
+ _Al.deallocate(_My_data._Bx._Ptr, _My_data._Myres + 1);
+ _My_data._Bx._Ptr = _New_ptr;
+ _My_data._Myres = _Target_capacity;
+ }
+ }
+
+ ///
+ ref basic_string insert(size_type pos, const(T)* s, size_type n)
+ {
+ // insert [_Ptr, _Ptr + _Count) at _Off
+ alias _Off = pos;
+ alias _Ptr = s;
+ alias _Count = n;
+ auto _My_data = &_Get_data();
+// _My_data._Check_offset(_Off);
+ const size_type _Old_size = _My_data._Mysize;
+ if (_Count <= _My_data._Myres - _Old_size)
+ {
+ _My_data._Mysize = _Old_size + _Count;
+ T* _Old_ptr = _My_data._Myptr();
+ T* _Insert_at = _Old_ptr + _Off;
+ // the range [_Ptr, _Ptr + _Ptr_shifted_after) is left alone by moving the suffix out,
+ // while the range [_Ptr + _Ptr_shifted_after, _Ptr + _Count) shifts down by _Count
+ size_type _Ptr_shifted_after;
+ if (_Ptr + _Count <= _Insert_at || _Ptr > _Old_ptr + _Old_size)
+ {
+ // inserted content is before the shifted region, or does not alias
+ _Ptr_shifted_after = _Count; // none of _Ptr's data shifts
+ }
+ else if (_Insert_at <= _Ptr)
+ {
+ // all of [_Ptr, _Ptr + _Count) shifts
+ _Ptr_shifted_after = 0;
+ }
+ else
+ {
+ // [_Ptr, _Ptr + _Count) contains _Insert_at, so only the part after _Insert_at shifts
+ _Ptr_shifted_after = cast(size_type)(_Insert_at - _Ptr);
+ }
+
+ _Traits.move(_Insert_at + _Count, _Insert_at, _Old_size - _Off + 1); // move suffix + null down
+ _Insert_at[0 .. _Ptr_shifted_after] = _Ptr[0 .. _Ptr_shifted_after];
+ (_Insert_at + _Ptr_shifted_after)[0 .. _Count - _Ptr_shifted_after] = (_Ptr + _Count + _Ptr_shifted_after)[0 .. _Count - _Ptr_shifted_after];
+ return this;
+ }
+
+ return _Reallocate_grow_by(
+ _Count,
+ (T* _New_ptr, const(T)[] _Old_str, size_type _Off, const(T)* _Ptr, size_type _Count) {
+ _New_ptr[0 .. _Off] = _Old_str[0 .. _Off];
+ _New_ptr[_Off .. _Off + _Count] = _Ptr[0 .. _Count];
+ _New_ptr[_Off + _Count .. _Old_str.length + _Count + 1] = _Old_str.ptr[_Off .. _Old_str.length + 1];
+ },
+ _Off, _Ptr, _Count);
+ }
+
+ ///
+ ref basic_string insert(size_type pos, size_type n, T c)
+ {
+ // insert _Count * _Ch at _Off
+ alias _Off = pos;
+ alias _Count = n;
+ alias _Ch = c;
+ auto _My_data = &_Get_data();
+// _My_data._Check_offset(_Off);
+ const size_type _Old_size = _My_data._Mysize;
+ if (_Count <= _My_data._Myres - _Old_size)
+ {
+ _My_data._Mysize = _Old_size + _Count;
+ T* _Old_ptr = _My_data._Myptr();
+ T* _Insert_at = _Old_ptr + _Off;
+ _Traits.move(_Insert_at + _Count, _Insert_at, _Old_size - _Off + 1); // move suffix + null down
+ _Insert_at[0 .. _Count] = _Ch; // fill hole
+ return this;
+ }
+
+ return _Reallocate_grow_by(
+ _Count,
+ (T* _New_ptr, const(T)[] _Old_str, size_type _Off, size_type _Count, T _Ch)
+ {
+ _New_ptr[0 .. _Off] = _Old_str[0 .. _Off];
+ _New_ptr[_Off .. _Off + _Count] = _Ch;
+ _New_ptr[_Off + _Count .. _Old_str.length + 1] = _Old_str.ptr[_Off .. _Old_str.length + 1];
+ },
+ _Off, _Count, _Ch);
+ }
+
+ ///
+ ref basic_string replace(size_type pos, size_type len, const(T)* s, size_type slen)
+ {
+ // replace [_Off, _Off + _N0) with [_Ptr, _Ptr + _Count)
+ alias _Off = pos;
+ alias _N0 = len;
+ alias _Ptr = s;
+ alias _Count = slen;
+ auto _My_data = &_Get_data();
+// _Mypair._Myval2._Check_offset(_Off);
+ _N0 = _My_data._Clamp_suffix_size(_Off, _N0);
+ if (_N0 == _Count)
+ {
+ // size doesn't change, so a single move does the trick
+ _Traits.move(_My_data._Myptr() + _Off, _Ptr, _Count);
+ return this;
+ }
+
+ const size_type _Old_size = _My_data._Mysize;
+ const size_type _Suffix_size = _Old_size - _N0 - _Off + 1;
+ if (_Count < _N0)
+ {
+ // suffix shifts backwards; we don't have to move anything out of the way
+ _My_data._Mysize = _Old_size - (_N0 - _Count);
+ T* _Old_ptr = _My_data._Myptr();
+ T* _Insert_at = _Old_ptr + _Off;
+ _Traits.move(_Insert_at, _Ptr, _Count);
+ _Traits.move(_Insert_at + _Count, _Insert_at + _N0, _Suffix_size);
+ return this;
+ }
+
+ const size_type _Growth = cast(size_type)(_Count - _N0);
+ if (_Growth <= _My_data._Myres - _Old_size)
+ {
+ // growth fits
+ _My_data._Mysize = _Old_size + _Growth;
+ T* _Old_ptr = _My_data._Myptr();
+ T* _Insert_at = _Old_ptr + _Off;
+ T* _Suffix_at = _Insert_at + _N0;
+
+ size_type _Ptr_shifted_after; // see rationale in insert
+ if (_Ptr + _Count <= _Insert_at || _Ptr > _Old_ptr + _Old_size)
+ _Ptr_shifted_after = _Count;
+ else if (_Suffix_at <= _Ptr)
+ _Ptr_shifted_after = 0;
+ else
+ _Ptr_shifted_after = cast(size_type)(_Suffix_at - _Ptr);
+
+ _Traits.move(_Suffix_at + _Growth, _Suffix_at, _Suffix_size);
+ // next case must be move, in case _Ptr begins before _Insert_at and contains part of the hole;
+ // this case doesn't occur in insert because the new content must come from outside the removed
+ // content there (because in insert there is no removed content)
+ _Traits.move(_Insert_at, _Ptr, _Ptr_shifted_after);
+ // the next case can be copy, because it comes from the chunk moved out of the way in the
+ // first move, and the hole we're filling can't alias the chunk we moved out of the way
+ _Insert_at[_Ptr_shifted_after .. _Count] = _Ptr[_Growth + _Ptr_shifted_after .. _Growth + _Count];
+ return this;
+ }
+
+ return _Reallocate_grow_by(
+ _Growth,
+ (T* _New_ptr, const(T)[] _Old_str, size_type _Off, size_type _N0, const(T)* _Ptr, size_type _Count) {
+ _New_ptr[0 .. _Off] = _Old_str[0 .. _Off];
+ _New_ptr[_Off .. _Count] = _Ptr[0 .. _Count];
+ const __n = _Old_str.length - _N0 - _Off + 1;
+ (_New_ptr + _Off + _Count)[0 .. __n] = (_Old_str.ptr + _Off + _N0)[0 .. __n];
+ },
+ _Off, _N0, _Ptr, _Count);
+ }
+
+ ///
+ ref basic_string replace(size_type _Off, size_type _N0, size_type _Count, T _Ch)
+ {
+ // replace [_Off, _Off + _N0) with _Count * _Ch
+ auto _My_data = &_Get_data();
+// _My_data._Check_offset(_Off);
+ _N0 = _My_data._Clamp_suffix_size(_Off, _N0);
+ if (_Count == _N0)
+ {
+ _My_data._Myptr()[_Off .. _Off + _Count] = _Ch;
+ return this;
+ }
+
+ const size_type _Old_size = _My_data._Mysize;
+ if (_Count < _N0 || _Count - _N0 <= _My_data._Myres - _Old_size)
+ {
+ // either we are shrinking, or the growth fits
+ _My_data._Mysize = _Old_size + _Count - _N0; // may temporarily overflow;
+ // OK because size_type must be unsigned
+ T* _Old_ptr = _My_data._Myptr();
+ T* _Insert_at = _Old_ptr + _Off;
+ _Traits.move(_Insert_at + _Count, _Insert_at + _N0, _Old_size - _N0 - _Off + 1);
+ _Insert_at[0 .. _Count] = _Ch;
+ return this;
+ }
+
+ return _Reallocate_grow_by(
+ _Count - _N0,
+ (T* _New_ptr, const(T)[] _Old_str, size_type _Off, size_type _N0, size_type _Count, T _Ch) {
+ _New_ptr[0 .. _Off] = _Old_str[0 .. _Off];
+ _New_ptr[_Off .. _Off + _Count] = _Ch;
+ const __n = _Old_str.length - _N0 - _Off + 1;
+ (_New_ptr + _Off + _Count)[0 .. __n] = (_Old_str.ptr + _Off + _N0)[0 .. __n];
+ },
+ _Off, _N0, _Count, _Ch);
+ }
+
+ ///
+ void swap(ref basic_string _Right)
+ {
+ import core.internal.lifetime : swap;
+ import core.stdcpp.type_traits : is_empty;
+
+ if (&this != &_Right)
+ {
+ static if (!is_empty!allocator_type.value
+ && allocator_traits!allocator_type.propagate_on_container_swap)
+ {
+ swap(_Getal(), _Right._Getal());
+ }
+
+ static if (_ITERATOR_DEBUG_LEVEL != 0)
+ {
+ auto _My_data = &_Get_data();
+ const bool _My_large = _My_data._Large_string_engaged();
+ const bool _Right_large = _Right._Get_data()._Large_string_engaged();
+ if (!_My_large)
+ _Base._Orphan_all();
+
+ if (!_Right_large)
+ _Right._Base._Orphan_all();
+
+ if (_My_large || _Right_large)
+ _My_data._Base._Swap_proxy_and_iterators(_Right._Get_data()._Base);
+ } // _ITERATOR_DEBUG_LEVEL != 0
+ }
+
+ _Swap_data!_Can_memcpy_val(_Right);
+ }
+
+ private:
+ import core.stdcpp.xutility : MSVCLinkDirectives;
+ import core.stdcpp.xutility : _Container_base;
+
+ alias _Traits = traits_type;
+ alias _Scary_val = _String_val!T;
+
+ enum bool _Can_memcpy_val = is(_Traits == char_traits!E, E) && is(pointer == U*, U);
+ // This offset skips over the _Container_base members, if any
+ enum size_t _Memcpy_val_offset = _Size_after_ebco_v!_Container_base;
+ enum size_t _Memcpy_val_size = _Scary_val.sizeof - _Memcpy_val_offset;
+
+ // Make sure the object files wont link against mismatching objects
+ mixin MSVCLinkDirectives!true;
+
+ pragma (inline, true)
+ {
+ void eos(size_type offset) nothrow { _Get_data()._Myptr[_Get_data()._Mysize = offset] = T(0); }
+
+ ref inout(_Base.Alloc) _Getal() inout nothrow @safe { return _Base._Mypair._Myval1; }
+ ref inout(_Base.ValTy) _Get_data() inout nothrow @safe { return _Base._Mypair._Myval2; }
+ }
+
+ void _Alloc_proxy() nothrow
+ {
+ static if (_ITERATOR_DEBUG_LEVEL > 0)
+ _Base._Alloc_proxy();
+ }
+
+ void _AssignAllocator(ref const(allocator_type) al) nothrow
+ {
+ static if (_Base._Mypair._HasFirst)
+ _Getal() = al;
+ }
+
+ void _Become_small()
+ {
+ // release any held storage and return to small string mode
+ // pre: *this is in large string mode
+ // pre: this is small enough to return to small string mode
+ auto _My_data = &_Get_data();
+ _Base._Orphan_all();
+ pointer _Ptr = _My_data._Bx._Ptr;
+ auto _Al = &_Getal();
+ _My_data._Bx._Buf[0 .. _My_data._Mysize + 1] = _Ptr[0 .. _My_data._Mysize + 1];
+ _Al.deallocate(_Ptr, _My_data._Myres + 1);
+ _My_data._Myres = _My_data._BUF_SIZE - 1;
+ }
+
+ void _Tidy_init() nothrow
+ {
+ auto _My_data = &_Get_data();
+ _My_data._Mysize = 0;
+ _My_data._Myres = _My_data._BUF_SIZE - 1;
+ _My_data._Bx._Buf[0] = T(0);
+ }
+
+ size_type _Calculate_growth(size_type _Requested) const nothrow
+ {
+ auto _My_data = &_Get_data();
+ size_type _Masked = _Requested | _My_data._ALLOC_MASK;
+ size_type _Old = _My_data._Myres;
+ size_type _Expanded = _Old + _Old / 2;
+ return _Masked > _Expanded ? _Masked : _Expanded;
+ }
+
+ ref basic_string _Reallocate_for(_ArgTys...)(size_type _New_size, void function(pointer, size_type, _ArgTys) nothrow @nogc _Fn, _ArgTys _Args)
+ {
+ auto _My_data = &_Get_data();
+ size_type _Old_capacity = _My_data._Myres;
+ size_type _New_capacity = _Calculate_growth(_New_size);
+ auto _Al = &_Getal();
+ pointer _New_ptr = _Al.allocate(_New_capacity + 1); // throws
+ _Base._Orphan_all();
+ _My_data._Mysize = _New_size;
+ _My_data._Myres = _New_capacity;
+ _Fn(_New_ptr, _New_size, _Args);
+ if (_My_data._BUF_SIZE <= _Old_capacity)
+ _Al.deallocate(_My_data._Bx._Ptr, _Old_capacity + 1);
+ _My_data._Bx._Ptr = _New_ptr;
+ return this;
+ }
+
+ ref basic_string _Reallocate_grow_by(_ArgTys...)(size_type _Size_increase, void function(pointer, const(T)[], _ArgTys) nothrow @nogc _Fn, _ArgTys _Args)
+ {
+ auto _My_data = &_Get_data();
+ size_type _Old_size = _My_data._Mysize;
+ size_type _New_size = _Old_size + _Size_increase;
+ size_type _Old_capacity = _My_data._Myres;
+ size_type _New_capacity = _Calculate_growth(_New_size);
+ auto _Al = &_Getal();
+ pointer _New_ptr = _Al.allocate(_New_capacity + 1); // throws
+ _Base._Orphan_all();
+ _My_data._Mysize = _New_size;
+ _My_data._Myres = _New_capacity;
+ if (_My_data._BUF_SIZE <= _Old_capacity)
+ {
+ pointer _Old_ptr = _My_data._Bx._Ptr;
+ _Fn(_New_ptr, _Old_ptr[0 .. _Old_size], _Args);
+ _Al.deallocate(_Old_ptr, _Old_capacity + 1);
+ }
+ else
+ _Fn(_New_ptr, _My_data._Bx._Buf[0 .. _Old_size], _Args);
+ _My_data._Bx._Ptr = _New_ptr;
+ return this;
+ }
+
+ void _Tidy_deallocate()
+ {
+ _Base._Orphan_all();
+ auto _My_data = &_Get_data();
+ if (_My_data._BUF_SIZE <= _My_data._Myres)
+ {
+ pointer _Ptr = _My_data._Bx._Ptr;
+ auto _Al = &_Getal();
+ _Al.deallocate(_Ptr, _My_data._Myres + 1);
+ }
+ _My_data._Mysize = 0;
+ _My_data._Myres = _My_data._BUF_SIZE - 1;
+ _My_data._Bx._Buf[0] = T(0);
+ }
+
+ void _Swap_data(bool _memcpy : true)(ref basic_string _Right)
+ {
+ import core.stdc.string : memcpy;
+
+ // exchange _String_val instances with _Right, memcpy optimization
+ auto _My_data = &_Get_data();
+ auto _My_data_mem = cast(ubyte*)_My_data + _Memcpy_val_offset;
+ auto _Right_data_mem = cast(ubyte*)(&_Right._Get_data()) + _Memcpy_val_offset;
+ ubyte[_Memcpy_val_size] _Temp_mem;
+ memcpy(_Temp_mem.ptr, _My_data_mem, _Memcpy_val_size);
+ memcpy(_My_data_mem, _Right_data_mem, _Memcpy_val_size);
+ memcpy(_Right_data_mem, _Temp_mem.ptr, _Memcpy_val_size);
+ }
+
+ void _Swap_data(bool _memcpy : false)(ref basic_string _Right)
+ {
+ import core.lifetime : swap;
+
+ // exchange _String_val instances with _Right, general case
+ auto _My_data = &_Get_data();
+ auto _Right_data = &_Right._Get_data();
+ const bool _My_large = _My_data._Large_string_engaged();
+ const bool _Right_large = _Right_data._Large_string_engaged();
+ if (_My_large)
+ {
+ if (_Right_large) // swap buffers, iterators preserved
+ swap(_My_data._Bx._Ptr, _Right_data._Bx._Ptr);
+ else // swap large with small
+ _Swap_bx_large_with_small(*_My_data, *_Right_data);
+ }
+ else
+ {
+ if (_Right_large) // swap small with large
+ _Swap_bx_large_with_small(*_Right_data, *_My_data);
+ else
+ {
+ enum _BUF_SIZE = _My_data._BUF_SIZE;
+ T[_BUF_SIZE] _Temp_buf;
+ _Temp_buf[0 .. _BUF_SIZE] = _My_data._Bx._Buf[0 .. _BUF_SIZE];
+ _My_data._Bx._Buf[0 .. _BUF_SIZE] = _Right_data._Bx._Buf[0 .. _BUF_SIZE];
+ _Right_data._Bx._Buf[0 .. _BUF_SIZE] = _Temp_buf[0 .. _BUF_SIZE];
+ }
+ }
+
+ swap(_My_data._Mysize, _Right_data._Mysize);
+ swap(_My_data._Myres, _Right_data._Myres);
+ }
+
+ void _Swap_bx_large_with_small(ref _Scary_val _Starts_large, ref _Scary_val _Starts_small)
+ {
+ // exchange a string in large mode with one in small mode
+ pointer _Ptr = _Starts_large._Bx._Ptr;
+ _Starts_large._Bx._Buf[] = _Starts_small._Bx._Buf[];
+ _Starts_small._Bx._Ptr = _Ptr;
+ }
+
+ _String_alloc!(_String_base_types!(T, Alloc)) _Base;
+ }
+ else version (CppRuntime_Gcc)
+ {
+ version (_GLIBCXX_USE_CXX98_ABI)
+ {
+ //----------------------------------------------------------------------------------
+ // Old GCC/libstdc++ ref-counted implementation
+ //----------------------------------------------------------------------------------
+
+ ///
+ this(DefaultConstruct)
+ {
+ version (_GLIBCXX_FULLY_DYNAMIC_STRING)
+ static_assert(false, "DO WE NEED THIS?");
+ else
+ _M_data = _S_empty_rep()._M_refdata();
+ }
+ ///
+ this(const(T)[] str, ref const(allocator_type) al) { _M_assign_allocator(al); this(str); }
+ ///
+ this(const(T)[] str)
+ {
+ _M_data = _S_construct(str.ptr, str.ptr + str.length, _M_get_allocator);
+ }
+ ///
+ this(const ref basic_string str)
+ {
+ import core.stdcpp.type_traits : is_empty;
+
+ static if (!is_empty!allocator_type.value)
+ _M_Alloc = str.get_allocator();
+ _M_data = str._M_rep()._M_grab(get_allocator(), str.get_allocator());
+ }
+
+ ///
+ ~this() { _M_rep()._M_dispose(get_allocator()); }
+
+ ///
+ ref inout(Alloc) get_allocator() inout { return _M_get_allocator(); }
+
+ ///
+ size_type max_size() const nothrow @safe { return _Rep._S_max_size; }
+
+ ///
+ size_type size() const nothrow @safe { return _M_rep()._M_length; }
+ ///
+ size_type capacity() const nothrow { return _M_rep()._M_capacity; }
+ ///
+ inout(T)* data() inout @safe { return _M_data; }
+ ///
+ inout(T)[] as_array() inout nothrow @trusted { return _M_data[0 .. _M_rep()._M_length]; }
+ ///
+ ref inout(T) at(size_type i) inout nothrow { return _M_data[0 .. _M_rep()._M_length][i]; }
+
+ ///
+ ref basic_string assign(const(T)[] str)
+ {
+ const(T)* __s = str.ptr;
+ size_t __n = str.length;
+// __glibcxx_requires_string_len(__s, __n);
+ _M_check_length(size(), __n, "basic_string::assign");
+ if (_M_disjunct(__s) || _M_rep()._M_is_shared())
+ return _M_replace_safe(size_type(0), this.size(), __s, __n);
+ else
+ {
+ const size_type __pos = __s - _M_data;
+ if (__pos >= __n)
+ _S_copy(_M_data, __s, __n);
+ else if (__pos)
+ _S_move(_M_data, __s, __n);
+ _M_rep()._M_set_length_and_sharable(__n);
+ return this;
+ }
+ }
+
+ ///
+ ref basic_string assign(const ref basic_string str)
+ {
+ if (_M_rep() != str._M_rep())
+ {
+ // XXX MT
+ allocator_type __a = this.get_allocator();
+ T* __tmp = str._M_rep()._M_grab(__a, str.get_allocator());
+ _M_rep()._M_dispose(__a);
+ _M_data = __tmp;
+ }
+ return this;
+ }
+
+ ///
+ ref basic_string append(const(T)[] str)
+ {
+ const(T)* __s = str.ptr;
+ size_t __n = str.length;
+// __glibcxx_requires_string_len(__s, __n);
+ if (__n)
+ {
+ _M_check_length(size_type(0), __n, "basic_string::append");
+ const size_type __len = __n + size();
+ if (__len > capacity() || _M_rep()._M_is_shared())
+ {
+ if (_M_disjunct(__s))
+ reserve(__len);
+ else
+ {
+ const size_type __off = __s - _M_data;
+ reserve(__len);
+ __s = _M_data + __off;
+ }
+ }
+ _S_copy(_M_data + size(), __s, __n);
+ _M_rep()._M_set_length_and_sharable(__len);
+ }
+ return this;
+ }
+
+ ///
+ ref basic_string append(size_type __n, T __c)
+ {
+ if (__n)
+ {
+ _M_check_length(size_type(0), __n, "basic_string::append");
+ const size_type __len = __n + size();
+ if (__len > capacity() || _M_rep()._M_is_shared())
+ reserve(__len);
+ const __sz = size();
+ _M_data[__sz .. __sz + __n] = __c;
+ _M_rep()._M_set_length_and_sharable(__len);
+ }
+ return this;
+ }
+
+ ///
+ void reserve(size_type __res = 0)
+ {
+ if (__res != capacity() || _M_rep()._M_is_shared())
+ {
+ // Make sure we don't shrink below the current size
+ if (__res < size())
+ __res = size();
+ allocator_type __a = get_allocator();
+ T* __tmp = _M_rep()._M_clone(__a, __res - size());
+ _M_rep()._M_dispose(__a);
+ _M_data = __tmp;
+ }
+ }
+
+ ///
+ void shrink_to_fit() nothrow
+ {
+ if (capacity() > size())
+ {
+ try reserve(0);
+ catch (Throwable) {}
+ }
+ }
+
+ ///
+ ref basic_string insert(size_type __pos, const(T)* __s, size_type __n)
+ {
+// __glibcxx_requires_string_len(__s, __n);
+ cast(void) _M_check(__pos, "basic_string::insert");
+ _M_check_length(size_type(0), __n, "basic_string::insert");
+ if (_M_disjunct(__s) || _M_rep()._M_is_shared())
+ return _M_replace_safe(__pos, size_type(0), __s, __n);
+ else
+ {
+ // Work in-place.
+ const size_type __off = __s - _M_data;
+ _M_mutate(__pos, 0, __n);
+ __s = _M_data + __off;
+ T* __p = _M_data + __pos;
+ if (__s + __n <= __p)
+ __p[0 .. __n] = __s[0 .. __n];
+ else if (__s >= __p)
+ __p[0 .. __n] = (__s + __n)[0 .. __n];
+ else
+ {
+ const size_type __nleft = __p - __s;
+ __p[0 .. __nleft] = __s[0.. __nleft];
+ (__p + __nleft)[0 .. __n - __nleft] = (__p + __n)[0 .. __n - __nleft];
+ }
+ return this;
+ }
+ }
+
+ ///
+ ref basic_string insert(size_type pos, size_type n, T c)
+ {
+ return _M_replace_aux(_M_check(pos, "basic_string::insert"), size_type(0), n, c);
+ }
+
+ ///
+ ref basic_string replace(size_type __pos, size_type __n1, const(T)* __s, size_type __n2)
+ {
+// __glibcxx_requires_string_len(__s, __n2);
+ cast(void) _M_check(__pos, "basic_string::replace");
+ __n1 = _M_limit(__pos, __n1);
+ _M_check_length(__n1, __n2, "basic_string::replace");
+ bool __left;
+ if (_M_disjunct(__s) || _M_rep()._M_is_shared())
+ return _M_replace_safe(__pos, __n1, __s, __n2);
+ else if ((__left = __s + __n2 <= _M_data + __pos) == true || _M_data + __pos + __n1 <= __s)
+ {
+ // Work in-place: non-overlapping case.
+ size_type __off = __s - _M_data;
+ __left ? __off : (__off += __n2 - __n1);
+ _M_mutate(__pos, __n1, __n2);
+ (_M_data + __pos)[0 .. __n2] = (_M_data + __off)[0 .. __n2];
+ return this;
+ }
+ else
+ {
+ // Todo: overlapping case.
+ auto __tmp = basic_string(__s[0 .. __n2]);
+ return _M_replace_safe(__pos, __n1, __tmp._M_data, __n2);
+ }
+ }
+
+ ///
+ ref basic_string replace(size_type pos, size_type n1, size_type n2, T c)
+ {
+ return _M_replace_aux(_M_check(pos, "basic_string::replace"), _M_limit(pos, n1), n2, c);
+ }
+
+ ///
+ void swap(ref basic_string __s)
+ {
+ if (_M_rep()._M_is_leaked())
+ _M_rep()._M_set_sharable();
+ if (__s._M_rep()._M_is_leaked())
+ __s._M_rep()._M_set_sharable();
+ if (this.get_allocator() == __s.get_allocator())
+ {
+ T* __tmp = _M_data;
+ _M_data = __s._M_data;
+ __s._M_data = __tmp;
+ }
+ // The code below can usually be optimized away.
+ else
+ {
+ import core.lifetime : move;
+
+ auto __tmp1 = basic_string(this[], __s.get_allocator());
+ auto __tmp2 = basic_string(__s[], this.get_allocator());
+ this = move(__tmp2);
+ __s = move(__tmp1);
+ }
+ }
+
+ private:
+ import core.stdcpp.type_traits : is_empty;
+
+ version (__GTHREADS)
+ {
+ import core.atomic;
+ alias _Atomic_word = int; // should we use atomic!int?
+ }
+ else
+ alias _Atomic_word = int;
+
+ struct _Rep_base
+ {
+ size_type _M_length;
+ size_type _M_capacity;
+ _Atomic_word _M_refcount;
+ }
+
+ struct _Rep
+ {
+ _Rep_base base;
+ alias base this;
+
+ alias _Raw_bytes_alloc = Alloc.rebind!char;
+
+ enum size_type _S_max_size = (((npos - _Rep_base.sizeof) / T.sizeof) - 1) / 4;
+ enum T _S_terminal = T(0);
+
+ __gshared size_type[(_Rep_base.sizeof + T.sizeof + size_type.sizeof - 1) / size_type.sizeof] _S_empty_rep_storage;
+
+ static ref _Rep _S_empty_rep() nothrow @trusted { return *cast(_Rep*)_S_empty_rep_storage.ptr; }
+
+ void _M_set_sharable() nothrow
+ {
+ _M_refcount = 0;
+ }
+
+ void _M_set_length_and_sharable(size_type __n) nothrow
+ {
+ if (&this != &_S_empty_rep())
+ {
+ _M_set_sharable();
+ _M_length = __n;
+ _M_refdata()[__n] = _S_terminal;
+ }
+ }
+
+ bool _M_is_leaked() const nothrow
+ {
+ import core.atomic : atomicLoad;
+
+ version (__GTHREADS)
+ return atomicLoad!(MemoryOrder.raw)(this._M_refcount) < 0;
+ else
+ return _M_refcount < 0;
+ }
+//
+ bool _M_is_shared() const nothrow
+ {
+ import core.atomic : atomicLoad;
+
+ version (__GTHREADS)
+ return atomicLoad!(MemoryOrder.acq)(this._M_refcount) > 0;
+ else
+ return _M_refcount > 0;
+ }
+
+ T* _M_refdata() nothrow @trusted { return cast(T*)(&this + 1); }
+
+ T* _M_grab(ref allocator_type __alloc1, const ref allocator_type __alloc2)
+ {
+ return (!_M_is_leaked() && __alloc1 == __alloc2)
+ ? _M_refcopy() : _M_clone(__alloc1);
+ }
+
+ static _Rep* _S_create(size_type __capacity, size_type __old_capacity, ref Alloc __alloc)
+ {
+ assert(__capacity <= _S_max_size);
+// if (__capacity > _S_max_size)
+// __throw_length_error(__N("basic_string::_S_create"));
+
+ enum __pagesize = 4096;
+ enum __malloc_header_size = 4 * pointer.sizeof;
+
+ if (__capacity > __old_capacity && __capacity < 2 * __old_capacity)
+ __capacity = 2 * __old_capacity;
+
+ size_type __size = (__capacity + 1) * T.sizeof + _Rep.sizeof;
+
+ const size_type __adj_size = __size + __malloc_header_size;
+ if (__adj_size > __pagesize && __capacity > __old_capacity)
+ {
+ const size_type __extra = __pagesize - __adj_size % __pagesize;
+ __capacity += __extra / T.sizeof;
+ if (__capacity > _S_max_size)
+ __capacity = _S_max_size;
+ __size = (__capacity + 1) * T.sizeof + _Rep.sizeof;
+ }
+
+ _Rep* __p = cast(_Rep*)_Raw_bytes_alloc(__alloc).allocate(__size);
+ *__p = _Rep.init;
+ __p._M_capacity = __capacity;
+ __p._M_set_sharable();
+ return __p;
+ }
+
+ void _M_dispose(ref Alloc __a)
+ {
+ import core.stdcpp.xutility : __exchange_and_add_dispatch;
+
+ if (&this != &_S_empty_rep())
+ {
+ // Be race-detector-friendly. For more info see bits/c++config.
+// _GLIBCXX_SYNCHRONIZATION_HAPPENS_BEFORE(&this._M_refcount);
+ // Decrement of _M_refcount is acq_rel, because:
+ // - all but last decrements need to release to synchronize with
+ // the last decrement that will delete the object.
+ // - the last decrement needs to acquire to synchronize with
+ // all the previous decrements.
+ // - last but one decrement needs to release to synchronize with
+ // the acquire load in _M_is_shared that will conclude that
+ // the object is not shared anymore.
+ if (__exchange_and_add_dispatch(&this._M_refcount, -1) <= 0)
+ {
+// _GLIBCXX_SYNCHRONIZATION_HAPPENS_AFTER(&this._M_refcount);
+ _M_destroy(__a);
+ }
+ }
+ }
+
+ void _M_destroy(ref Alloc __a)
+ {
+ const size_type __size = _Rep_base.sizeof + (_M_capacity + 1) * T.sizeof;
+ _Raw_bytes_alloc(__a).deallocate(cast(char*)&this, __size);
+ }
+
+ T* _M_refcopy() nothrow @trusted
+ {
+ import core.stdcpp.xutility : __atomic_add_dispatch;
+
+ if (&this != &_S_empty_rep())
+ __atomic_add_dispatch(&this._M_refcount, 1);
+ return _M_refdata();
+ // XXX MT
+ }
+
+ T* _M_clone(ref Alloc __alloc, size_type __res = 0)
+ {
+ const size_type __requested_cap = _M_length + __res;
+ _Rep* __r = _S_create(__requested_cap, _M_capacity, __alloc);
+ if (_M_length)
+ _S_copy(__r._M_refdata(), _M_refdata(), _M_length);
+
+ __r._M_set_length_and_sharable(_M_length);
+ return __r._M_refdata();
+ }
+ }
+
+ static if (!is_empty!allocator_type.value)
+ allocator_type _M_Alloc;
+ T* _M_p; // The actual data.
+
+ alias _M_data = _M_p;
+
+ pragma (inline, true)
+ {
+ void eos(size_type offset)
+ {
+ _M_mutate(offset, size() - offset, size_type(0));
+ }
+
+ ref inout(allocator_type) _M_get_allocator() inout
+ {
+ static if (!is_empty!allocator_type.value)
+ return _M_Alloc;
+ else
+ return *cast(inout(allocator_type)*)&this;
+ }
+
+ _Rep* _M_rep() const nothrow @trusted { return &(cast(_Rep*)_M_data)[-1]; }
+
+ size_type _M_limit(size_type __pos, size_type __off) const @safe nothrow @nogc pure
+ {
+ const bool __testoff = __off < size() - __pos;
+ return __testoff ? __off : size() - __pos;
+ }
+ }
+
+ size_type _M_check(size_type __pos, const char* __s) const
+ {
+ assert(__pos <= size());
+// if (__pos > size())
+// __throw_out_of_range_fmt(__N("%s: __pos (which is %zu) > "
+// "this->size() (which is %zu)"),
+// __s, __pos, this->size());
+ return __pos;
+ }
+
+ static ref _Rep _S_empty_rep() nothrow
+ {
+ return _Rep._S_empty_rep();
+ }
+
+ static T* _S_construct(const(T)* __beg, const(T)* __end, ref Alloc __a)
+ {
+ version (_GLIBCXX_FULLY_DYNAMIC_STRING) {} else
+ {
+ if (__beg == __end && __a == Alloc())
+ return _S_empty_rep()._M_refdata();
+ }
+
+ const size_type __dnew = __end - __beg;
+
+ _Rep* __r = _Rep._S_create(__dnew, size_type(0), __a);
+ _S_copy(__r._M_refdata(), __beg, __end - __beg);
+ __r._M_set_length_and_sharable(__dnew);
+ return __r._M_refdata();
+ }
+
+ ref basic_string _M_replace_safe(size_type __pos1, size_type __n1, const(T)* __s, size_type __n2)
+ {
+ _M_mutate(__pos1, __n1, __n2);
+ if (__n2)
+ _S_copy(_M_data + __pos1, __s, __n2);
+ return this;
+ }
+
+ ref basic_string _M_replace_aux(size_type __pos1, size_type __n1, size_type __n2, T __c)
+ {
+ _M_check_length(__n1, __n2, "basic_string::_M_replace_aux");
+ _M_mutate(__pos1, __n1, __n2);
+ if (__n2)
+ _M_data[__pos1 .. __pos1 + __n2] = __c;
+ return this;
+ }
+
+ void _M_mutate(size_type __pos, size_type __len1, size_type __len2)
+ {
+ const size_type __old_size = size();
+ const size_type __new_size = __old_size + __len2 - __len1;
+ const size_type __how_much = __old_size - __pos - __len1;
+
+ if (__new_size > capacity() || _M_rep()._M_is_shared())
+ {
+ allocator_type __a = get_allocator();
+ _Rep* __r = _Rep._S_create(__new_size, capacity(), __a);
+
+ if (__pos)
+ _S_copy(__r._M_refdata(), _M_data, __pos);
+ if (__how_much)
+ _S_copy(__r._M_refdata() + __pos + __len2, _M_data + __pos + __len1, __how_much);
+
+ allocator_type* __al = cast() &__a;
+ _M_rep()._M_dispose(*__al);
+ _M_data = __r._M_refdata();
+ }
+ else if (__how_much && __len1 != __len2)
+ _S_move(_M_data + __pos + __len2, _M_data + __pos + __len1, __how_much);
+ _M_rep()._M_set_length_and_sharable(__new_size);
+ }
+ }
+ else
+ {
+ pragma(msg, "libstdc++ std::__cxx11::basic_string is not yet supported; the struct contains an interior pointer which breaks D move semantics!");
+
+ //----------------------------------------------------------------------------------
+ // GCC/libstdc++ modern implementation
+ //----------------------------------------------------------------------------------
+
+ ///
+ this(DefaultConstruct) { _M_p = _M_local_data(); _M_set_length(0); }
+ ///
+ this(const(T)[] str, ref const(allocator_type) al) { _M_assign_allocator(al); this(str); }
+ ///
+ this(const(T)[] str)
+ {
+ _M_p = _M_local_data();
+ _M_construct(str.ptr, str.length);
+ }
+ ///
+ this(this)
+ {
+ assert(false);
+ // TODO: how do I know if it was local before?!
+ }
+
+ ///
+ ~this() { _M_dispose(); }
+
+ ///
+ ref inout(Alloc) get_allocator() inout { return _M_get_allocator(); }
+
+ ///
+ size_type max_size() const nothrow @safe { return ((size_t.max / T.sizeof) - 1) / 2; }
+
+ ///
+ size_type size() const nothrow @safe { return _M_string_length; }
+ ///
+ size_type capacity() const nothrow { return _M_is_local ? _S_local_capacity : _M_allocated_capacity; }
+ ///
+ inout(T)* data() inout @safe { return _M_data; }
+ ///
+ inout(T)[] as_array() inout nothrow @trusted { return _M_data[0 .. _M_string_length]; }
+ ///
+ ref inout(T) at(size_type i) inout nothrow { return _M_data[0 .. _M_string_length][i]; }
+
+ ///
+ ref basic_string assign(const(T)[] str)
+ {
+// __glibcxx_requires_string_len(str.ptr, str.length);
+ return _M_replace(size_type(0), size(), str.ptr, str.length);
+ }
+
+ ///
+ ref basic_string assign(const ref basic_string str)
+ {
+ if (&this != &str)
+ assign(str.as_array);
+ return this;
+ }
+
+ ///
+ ref basic_string append(const(T)[] str)
+ {
+// __glibcxx_requires_string_len(str.ptr, str.length);
+ _M_check_length(size_type(0), str.length, "basic_string::append");
+ return _M_append(str.ptr, str.length);
+ }
+
+ ///
+ ref basic_string append(size_type n, T c)
+ {
+ return _M_replace_aux(size(), size_type(0), n, c);
+ }
+
+ ///
+ void reserve(size_type __res = 0)
+ {
+ // Make sure we don't shrink below the current size.
+ if (__res < length())
+ __res = length();
+
+ const size_type __capacity = capacity();
+ if (__res != __capacity)
+ {
+ if (__res > __capacity || __res > size_type(_S_local_capacity))
+ {
+ pointer __tmp = _M_create(__res, __capacity);
+ _S_copy(__tmp, _M_data, length() + 1);
+ _M_dispose();
+ _M_data = __tmp;
+ _M_capacity = __res;
+ }
+ else if (!_M_is_local())
+ {
+ _S_copy(_M_local_data(), _M_data, length() + 1);
+ _M_destroy(__capacity);
+ _M_data = _M_local_data();
+ }
+ }
+ }
+
+ ///
+ void shrink_to_fit() nothrow
+ {
+ if (capacity() > size())
+ {
+ try reserve(0);
+ catch (Throwable) {}
+ }
+ }
+
+ ///
+ ref basic_string insert(size_type pos, const(T)* s, size_type n)
+ {
+ return replace(pos, size_type(0), s, n);
+ }
+
+ ///
+ ref basic_string insert(size_type pos, size_type n, T c)
+ {
+ return _M_replace_aux(_M_check(pos, "basic_string::insert"), size_type(0), n, c);
+ }
+
+ ///
+ ref basic_string replace(size_type pos, size_type n1, const(T)* s, size_type n2)
+ {
+// __glibcxx_requires_string_len(s, n2);
+ return _M_replace(_M_check(pos, "basic_string::replace"), _M_limit(pos, n1), s, n2);
+ }
+
+ ///
+ ref basic_string replace(size_type pos, size_type n1, size_type n2, T c)
+ {
+ return _M_replace_aux(_M_check(pos, "basic_string::replace"), _M_limit(pos, n1), n2, c);
+ }
+
+ ///
+ void swap(ref basic_string __s)
+ {
+ if (&this == &__s)
+ return;
+
+ __alloc_on_swap(__s._M_get_allocator());
+
+ if (_M_is_local())
+ {
+ if (__s._M_is_local())
+ {
+ if (length() && __s.length())
+ {
+ T[_S_local_capacity + 1] __tmp_data;
+ __tmp_data[] = __s._M_local_buf[];
+ __s._M_local_buf[] = _M_local_buf[];
+ _M_local_buf[] = __tmp_data[];
+ }
+ else if (__s.length())
+ {
+ _M_local_buf[] = __s._M_local_buf[];
+ _M_length = __s.length();
+ __s._M_set_length(0);
+ return;
+ }
+ else if (length())
+ {
+ __s._M_local_buf[] = _M_local_buf[];
+ __s._M_length = length();
+ _M_set_length(0);
+ return;
+ }
+ }
+ else
+ {
+ const size_type __tmp_capacity = __s._M_allocated_capacity;
+ __s._M_local_buf[] = _M_local_buf[];
+ _M_data = __s._M_data;
+ __s._M_data = __s._M_local_buf.ptr;
+ _M_capacity = __tmp_capacity;
+ }
+ }
+ else
+ {
+ const size_type __tmp_capacity = _M_allocated_capacity;
+ if (__s._M_is_local())
+ {
+ _M_local_buf[] = __s._M_local_buf[];
+ __s._M_data = _M_data;
+ _M_data = _M_local_buf.ptr;
+ }
+ else
+ {
+ pointer __tmp_ptr = _M_data;
+ _M_data = __s._M_data;
+ __s._M_data = __tmp_ptr;
+ _M_capacity = __s._M_allocated_capacity;
+ }
+ __s._M_capacity = __tmp_capacity;
+ }
+
+ const size_type __tmp_length = length();
+ _M_length = __s.length();
+ __s._M_length = __tmp_length;
+ }
+
+ private:
+// import core.exception : RangeError;
+ import core.stdcpp.type_traits : is_empty;
+
+ static if (!is_empty!allocator_type.value)
+ allocator_type _M_Alloc;
+ pointer _M_p; // The actual data.
+ size_type _M_string_length;
+
+ enum size_type _S_local_capacity = 15 / T.sizeof;
+ union
+ {
+ T[_S_local_capacity + 1] _M_local_buf;
+ size_type _M_allocated_capacity;
+ }
+
+ alias _M_length = _M_string_length;
+ alias _M_capacity = _M_allocated_capacity;
+ alias _M_data = _M_p;
+
+ pragma (inline, true)
+ {
+ void eos(size_type offset) nothrow { _M_set_length(offset); }
+
+ inout(pointer) _M_local_data() inout { return _M_local_buf.ptr; }
+ bool _M_is_local() const { return _M_data == _M_local_data; }
+
+ ref inout(allocator_type) _M_get_allocator() inout
+ {
+ static if (!is_empty!allocator_type.value)
+ return _M_Alloc;
+ else
+ return *cast(inout(allocator_type)*)&this;
+ }
+
+ void _M_set_length(size_type __n)
+ {
+ _M_length = __n;
+ _M_data[__n] = T(0);
+ }
+
+ size_type _M_check(size_type __pos, const char* __s) const
+ {
+ assert(__pos <= size());
+// if (__pos > size())
+// __throw_out_of_range_fmt(__N("%s: __pos (which is %zu) > "
+// "this->size() (which is %zu)"),
+// __s, __pos, this->size());
+ return __pos;
+ }
+
+ // NB: _M_limit doesn't check for a bad __pos value.
+ size_type _M_limit(size_type __pos, size_type __off) const nothrow pure @nogc @safe
+ {
+ const bool __testoff = __off < size() - __pos;
+ return __testoff ? __off : size() - __pos;
+ }
+
+ void __alloc_on_swap()(ref allocator_type __a)
+ if (!is_empty!allocator_type.value)
+ {
+ import core.internal.lifetime : swap;
+
+ static if (allocator_traits!allocator_type.propagate_on_container_swap)
+ swap(_M_get_allocator(), __a);
+ }
+
+ void __alloc_on_swap()(ref allocator_type __a)
+ if (is_empty!allocator_type.value)
+ {
+ import core.internal.lifetime : swap;
+ import core.lifetime : move;
+
+ static if (allocator_traits!allocator_type.propagate_on_container_swap)
+ {
+ static if (is(typeof(_M_get_allocator().opAssign(move(__a)))))
+ swap(_M_get_allocator(), __a);
+ }
+ }
+ }
+
+ void _M_construct(const(T)* __beg, size_type __dnew)
+ {
+ if (__dnew > _S_local_capacity)
+ {
+ _M_data = _M_create(__dnew, size_type(0));
+ _M_capacity = __dnew;
+ }
+ _M_data[0 .. __dnew] = __beg[0 .. __dnew];
+ _M_set_length(__dnew);
+ }
+
+ pointer _M_create(ref size_type __capacity, size_type __old_capacity)
+ {
+ assert(__capacity <= max_size());
+// if (__capacity > max_size())
+// throw new RangeError("Length exceeds `max_size()`"); // std::__throw_length_error(__N("basic_string::_M_create"));
+ if (__capacity > __old_capacity && __capacity < 2 * __old_capacity)
+ {
+ __capacity = 2 * __old_capacity;
+ if (__capacity > max_size())
+ __capacity = max_size();
+ }
+ return _M_get_allocator().allocate(__capacity + 1);
+ }
+
+ ref basic_string _M_replace(size_type __pos, size_type __len1, const(T)* __s, const size_type __len2)
+ {
+ _M_check_length(__len1, __len2, "basic_string::_M_replace");
+
+ const size_type __old_size = size();
+ const size_type __new_size = __old_size + __len2 - __len1;
+
+ if (__new_size <= capacity())
+ {
+ pointer __p = _M_data + __pos;
+
+ const size_type __how_much = __old_size - __pos - __len1;
+ if (_M_disjunct(__s))
+ {
+ if (__how_much && __len1 != __len2)
+ _S_move(__p + __len2, __p + __len1, __how_much);
+ if (__len2)
+ _S_copy(__p, __s, __len2);
+ }
+ else
+ {
+ // Work in-place.
+ if (__len2 && __len2 <= __len1)
+ _S_move(__p, __s, __len2);
+ if (__how_much && __len1 != __len2)
+ _S_move(__p + __len2, __p + __len1, __how_much);
+ if (__len2 > __len1)
+ {
+ if (__s + __len2 <= __p + __len1)
+ _S_move(__p, __s, __len2);
+ else if (__s >= __p + __len1)
+ _S_copy(__p, __s + __len2 - __len1, __len2);
+ else
+ {
+ const size_type __nleft = (__p + __len1) - __s;
+ _S_move(__p, __s, __nleft);
+ _S_copy(__p + __nleft, __p + __len2,
+ __len2 - __nleft);
+ }
+ }
+ }
+ }
+ else
+ _M_mutate(__pos, __len1, __s, __len2);
+
+ _M_set_length(__new_size);
+ return this;
+ }
+
+ ref basic_string _M_replace_aux(size_type __pos1, size_type __n1, size_type __n2, T __c)
+ {
+ _M_check_length(__n1, __n2, "basic_string::_M_replace_aux");
+
+ const size_type __old_size = size();
+ const size_type __new_size = __old_size + __n2 - __n1;
+
+ if (__new_size <= capacity())
+ {
+ pointer __p = _M_data + __pos1;
+
+ const size_type __how_much = __old_size - __pos1 - __n1;
+ if (__how_much && __n1 != __n2)
+ _S_move(__p + __n2, __p + __n1, __how_much);
+ }
+ else
+ _M_mutate(__pos1, __n1, null, __n2);
+
+ if (__n2)
+ _M_data[__pos1 .. __pos1 + __n2] = __c;
+
+ _M_set_length(__new_size);
+ return this;
+ }
+
+ ref basic_string _M_append(const(T)* __s, size_type __n)
+ {
+ const size_type __len = __n + size();
+ if (__len <= capacity())
+ {
+ if (__n)
+ _S_copy(_M_data + size(), __s, __n);
+ }
+ else
+ _M_mutate(size(), size_type(0), __s, __n);
+ _M_set_length(__len);
+ return this;
+ }
+
+ void _M_mutate(size_type __pos, size_type __len1, const(T)* __s, size_type __len2)
+ {
+ const size_type __how_much = length() - __pos - __len1;
+
+ size_type __new_capacity = length() + __len2 - __len1;
+ pointer __r = _M_create(__new_capacity, capacity());
+
+ if (__pos)
+ _S_copy(__r, _M_data, __pos);
+ if (__s && __len2)
+ _S_copy(__r + __pos, __s, __len2);
+ if (__how_much)
+ _S_copy(__r + __pos + __len2,
+ _M_data + __pos + __len1, __how_much);
+
+ _M_dispose();
+ _M_data = __r;
+ _M_capacity = __new_capacity;
+ }
+
+ void _M_dispose()
+ {
+ if (!_M_is_local)
+ _M_destroy(_M_allocated_capacity);
+ }
+
+ void _M_destroy(size_type __size)
+ {
+ _M_get_allocator().deallocate(_M_data, __size + 1);
+ }
+ }
+
+ // common GCC/stdlibc++ code
+
+ void _M_check_length(size_type __n1, size_type __n2, const char* __s) const
+ {
+ assert (!(max_size() - (size() - __n1) < __n2));
+// if (max_size() - (size() - __n1) < __n2)
+// __throw_length_error(__N(__s));
+ }
+
+ void _M_assign_allocator(ref const(allocator_type) al) nothrow
+ {
+ static if (!is_empty!allocator_type.value)
+ _M_Alloc = al;
+ }
+
+ bool _M_disjunct(const(T)* __s) const nothrow
+ {
+ return __s < _M_data || _M_data + size() < __s;
+ }
+
+ static void _S_move(T* __d, const(T)* __s, size_type __n)
+ {
+ if (__d == __s)
+ return;
+ if (__d < __s)
+ {
+ for (size_t i = 0; i < __n; ++i)
+ __d[i] = __s[i];
+ }
+ else
+ {
+ for (ptrdiff_t i = __n - 1; i >= 0; --i)
+ __d[i] = __s[i];
+ }
+ }
+ static void _S_copy(T* __d, const(T)* __s, size_type __n)
+ {
+ __d[0 .. __n] = __s[0 .. __n];
+ }
+ }
+ else version (CppRuntime_Clang)
+ {
+ //----------------------------------------------------------------------------------
+ // Clang/libc++ implementation
+ //----------------------------------------------------------------------------------
+
+ ///
+ this(DefaultConstruct) { __zero(); }
+ ///
+ this(const(T)[] str, ref const(allocator_type) al) { __assign_allocator(al); this(str); }
+ ///
+ this(const(T)[] str) { __init(str.ptr, str.length); }
+ ///
+ this(this)
+ {
+ if (__is_long())
+ __init(__get_long_pointer(), __get_long_size());
+ }
+
+ ///
+ ~this()
+ {
+// __get_db()->__erase_c(this); // TODO: support `_LIBCPP_DEBUG_LEVEL >= 2` ??
+ if (__is_long())
+ __alloc().deallocate(__get_long_pointer(), __get_long_cap());
+ }
+
+ ///
+ ref inout(Alloc) get_allocator() inout { return __alloc(); }
+
+ ///
+ size_type max_size() const nothrow @safe
+ {
+ size_type __m = size_t.max; // TODO: __alloc_traits::max_size(__alloc());
+ version (BigEndian)
+ return (__m <= ~__long_mask ? __m : __m/2) - __alignment;
+ else
+ return __m - __alignment;
+ }
+
+ ///
+ size_type size() const nothrow { return __is_long() ? __get_long_size() : __get_short_size(); }
+ ///
+ size_type capacity() const nothrow { return (__is_long() ? __get_long_cap() : __min_cap) - 1; }
+ ///
+ inout(T)* data() inout @safe { return __get_pointer(); }
+ ///
+ inout(T)[] as_array() inout nothrow @trusted { return __get_pointer()[0 .. size()]; }
+ ///
+ ref inout(T) at(size_type i) inout nothrow @trusted { return __get_pointer()[0 .. size()][i]; }
+
+ ///
+ ref basic_string assign(const(T)[] str)
+ {
+ const(value_type)* __s = str.ptr;
+ size_type __n = str.length;
+ size_type __cap = capacity();
+ if (__cap >= __n)
+ {
+ value_type* __p = __get_pointer();
+ __p[0 .. __n] = __s[0 .. __n]; // TODO: is memmove?
+ __p[__n] = value_type(0);
+ __set_size(__n);
+// __invalidate_iterators_past(__n); // TODO: support `_LIBCPP_DEBUG_LEVEL >= 2` ??
+ }
+ else
+ {
+ size_type __sz = size();
+ __grow_by_and_replace(__cap, __n - __cap, __sz, 0, __sz, __n, __s);
+ }
+ return this;
+ }
+
+ ///
+ ref basic_string assign(const ref basic_string str)
+ {
+ if (&this != &str)
+ assign(str.as_array);
+ return this;
+ }
+
+ ///
+ ref basic_string append(const(T)[] str)
+ {
+ const(value_type)* __s = str.ptr;
+ size_type __n = str.length;
+ size_type __cap = capacity();
+ size_type __sz = size();
+ if (__cap - __sz >= __n)
+ {
+ if (__n)
+ {
+ value_type* __p = __get_pointer();
+ (__p + __sz)[0 .. __n] = __s[0 .. __n];
+ __sz += __n;
+ __set_size(__sz);
+ __p[__sz] = value_type(0);
+ }
+ }
+ else
+ __grow_by_and_replace(__cap, __sz + __n - __cap, __sz, __sz, 0, __n, __s);
+ return this;
+ }
+
+ ///
+ ref basic_string append(size_type __n, value_type __c)
+ {
+ if (__n)
+ {
+ size_type __cap = capacity();
+ size_type __sz = size();
+ if (__cap - __sz < __n)
+ __grow_by(__cap, __sz + __n - __cap, __sz, __sz, 0);
+ pointer __p = __get_pointer();
+ __p[__sz .. __sz + __n] = __c;
+ __sz += __n;
+ __set_size(__sz);
+ __p[__sz] = value_type(0);
+ }
+ return this;
+ }
+
+ ///
+ void reserve(size_type __res_arg = 0)
+ {
+ assert(__res_arg <= max_size());
+// if (__res_arg > max_size())
+// __throw_length_error();
+ size_type __cap = capacity();
+ size_type __sz = size();
+ __res_arg = max(__res_arg, __sz);
+ __res_arg = __recommend(__res_arg);
+ if (__res_arg != __cap)
+ {
+ pointer __new_data, __p;
+ bool __was_long, __now_long;
+ if (__res_arg == __min_cap - 1)
+ {
+ __was_long = true;
+ __now_long = false;
+ __new_data = __get_short_pointer();
+ __p = __get_long_pointer();
+ }
+ else
+ {
+ if (__res_arg > __cap)
+ __new_data = __alloc().allocate(__res_arg+1);
+ else
+ {
+ try
+ __new_data = __alloc().allocate(__res_arg+1);
+ catch (Throwable)
+ return;
+ }
+ __now_long = true;
+ __was_long = __is_long();
+ __p = __get_pointer();
+ }
+ __new_data[0 .. size()+1] = __p[0 .. size()+1];
+ if (__was_long)
+ __alloc().deallocate(__p, __cap+1);
+ if (__now_long)
+ {
+ __set_long_cap(__res_arg+1);
+ __set_long_size(__sz);
+ __set_long_pointer(__new_data);
+ }
+ else
+ __set_short_size(__sz);
+// __invalidate_all_iterators(); // TODO:
+ }
+ }
+
+ ///
+ void shrink_to_fit()
+ {
+ reserve();
+ }
+
+ ///
+ ref basic_string insert(size_type __pos, const(value_type)* __s, size_type __n)
+ {
+ assert(__n == 0 || __s != null, "string::insert received null");
+ size_type __sz = size();
+ assert(__pos <= __sz);
+// if (__pos > __sz)
+// this->__throw_out_of_range();
+ size_type __cap = capacity();
+ if (__cap - __sz >= __n)
+ {
+ if (__n)
+ {
+ value_type* __p = __get_pointer();
+ size_type __n_move = __sz - __pos;
+ if (__n_move != 0)
+ {
+ if (__p + __pos <= __s && __s < __p + __sz)
+ __s += __n;
+ traits_type.move(__p + __pos + __n, __p + __pos, __n_move);
+ }
+ traits_type.move(__p + __pos, __s, __n);
+ __sz += __n;
+ __set_size(__sz);
+ __p[__sz] = value_type(0);
+ }
+ }
+ else
+ __grow_by_and_replace(__cap, __sz + __n - __cap, __sz, __pos, 0, __n, __s);
+ return this;
+ }
+
+ ///
+ ref basic_string insert(size_type pos, size_type n, value_type c)
+ {
+ alias __pos = pos;
+ alias __n = n;
+ alias __c = c;
+ size_type __sz = size();
+ assert(__pos <= __sz);
+// if (__pos > __sz)
+// __throw_out_of_range();
+ if (__n)
+ {
+ size_type __cap = capacity();
+ value_type* __p;
+ if (__cap - __sz >= __n)
+ {
+ __p = __get_pointer();
+ size_type __n_move = __sz - __pos;
+ if (__n_move != 0)
+ traits_type.move(__p + __pos + __n, __p + __pos, __n_move);
+ }
+ else
+ {
+ __grow_by(__cap, __sz + __n - __cap, __sz, __pos, 0, __n);
+ __p = __get_long_pointer();
+ }
+ __p[__pos .. __pos + __n] = __c;
+ __sz += __n;
+ __set_size(__sz);
+ __p[__sz] = value_type(0);
+ }
+ return this;
+ }
+
+ ///
+ ref basic_string replace(size_type __pos, size_type __n1, const(T)* __s, size_type __n2)
+ {
+ assert(__n2 == 0 || __s != null, "string::replace received null");
+ size_type __sz = size();
+ assert(__pos <= __sz);
+// if (__pos > __sz)
+// __throw_out_of_range();
+ __n1 = min(__n1, __sz - __pos);
+ size_type __cap = capacity();
+ if (__cap - __sz + __n1 >= __n2)
+ {
+ value_type* __p = __get_pointer();
+ if (__n1 != __n2)
+ {
+ size_type __n_move = __sz - __pos - __n1;
+ if (__n_move != 0)
+ {
+ if (__n1 > __n2)
+ {
+ traits_type.move(__p + __pos, __s, __n2);
+ traits_type.move(__p + __pos + __n2, __p + __pos + __n1, __n_move);
+ goto __finish;
+ }
+ if (__p + __pos < __s && __s < __p + __sz)
+ {
+ if (__p + __pos + __n1 <= __s)
+ __s += __n2 - __n1;
+ else // __p + __pos < __s < __p + __pos + __n1
+ {
+ traits_type.move(__p + __pos, __s, __n1);
+ __pos += __n1;
+ __s += __n2;
+ __n2 -= __n1;
+ __n1 = 0;
+ }
+ }
+ traits_type.move(__p + __pos + __n2, __p + __pos + __n1, __n_move);
+ }
+ }
+ traits_type.move(__p + __pos, __s, __n2);
+ __finish:
+ // __sz += __n2 - __n1; in this and the below function below can cause unsigned integer overflow,
+ // but this is a safe operation, so we disable the check.
+ __sz += __n2 - __n1;
+ __set_size(__sz);
+// __invalidate_iterators_past(__sz); // TODO
+ __p[__sz] = value_type(0);
+ }
+ else
+ __grow_by_and_replace(__cap, __sz - __n1 + __n2 - __cap, __sz, __pos, __n1, __n2, __s);
+ return this;
+ }
+
+ ///
+ ref basic_string replace(size_type __pos, size_type __n1, size_type __n2, value_type __c)
+ {
+ size_type __sz = size();
+ assert(__pos <= __sz);
+// if (__pos > __sz)
+// __throw_out_of_range();
+ __n1 = min(__n1, __sz - __pos);
+ size_type __cap = capacity();
+ value_type* __p;
+ if (__cap - __sz + __n1 >= __n2)
+ {
+ __p = __get_pointer();
+ if (__n1 != __n2)
+ {
+ size_type __n_move = __sz - __pos - __n1;
+ if (__n_move != 0)
+ traits_type.move(__p + __pos + __n2, __p + __pos + __n1, __n_move);
+ }
+ }
+ else
+ {
+ __grow_by(__cap, __sz - __n1 + __n2 - __cap, __sz, __pos, __n1, __n2);
+ __p = __get_long_pointer();
+ }
+ __p[__pos .. __pos + __n2] = __c;
+ __sz += __n2 - __n1;
+ __set_size(__sz);
+// __invalidate_iterators_past(__sz); // TODO
+ __p[__sz] = value_type(0);
+ return this;
+ }
+
+ ///
+ void swap(ref basic_string __str)
+ {
+ import core.internal.lifetime : swap;
+// static if (_LIBCPP_DEBUG_LEVEL >= 2)
+// {
+// if (!__is_long())
+// __get_db().__invalidate_all(&this);
+// if (!__str.__is_long())
+// __get_db().__invalidate_all(&__str);
+// __get_db().swap(&this, &__str);
+// }
+ assert(
+ __alloc_traits.propagate_on_container_swap ||
+ __alloc_traits.is_always_equal ||
+ __alloc() == __str.__alloc(), "swapping non-equal allocators");
+ swap(__r_.first(), __str.__r_.first());
+ __swap_allocator(__alloc(), __str.__alloc());
+ }
+
+ private:
+// import core.exception : RangeError;
+ import core.stdcpp.xutility : __compressed_pair;
+
+ alias __alloc_traits = allocator_traits!allocator_type;
+
+ enum __alignment = 16;
+
+ version (_LIBCPP_ABI_ALTERNATE_STRING_LAYOUT)
+ {
+ struct __long
+ {
+ pointer __data_;
+ size_type __size_;
+ size_type __cap_;
+ }
+
+ version (BigEndian)
+ {
+ enum size_type __short_mask = 0x01;
+ enum size_type __long_mask = 0x1;
+ }
+ else
+ {
+ enum size_type __short_mask = 0x80;
+ enum size_type __long_mask = ~(size_type(~0) >> 1);
+ }
+
+ enum size_type __min_cap = (__long.sizeof - 1)/value_type.sizeof > 2 ? (__long.sizeof - 1)/value_type.sizeof : 2;
+
+ struct __short
+ {
+ value_type[__min_cap] __data_;
+ struct
+ {
+ static if (value_type.sizeof > 1)
+ ubyte[value_type.sizeof-1] __xx; // __padding<value_type>
+ ubyte __size_;
+ }
+ }
+ }
+ else
+ {
+ struct __long
+ {
+ size_type __cap_;
+ size_type __size_;
+ pointer __data_;
+ }
+
+ version (BigEndian)
+ {
+ enum size_type __short_mask = 0x80;
+ enum size_type __long_mask = ~(size_type(~0) >> 1);
+ }
+ else
+ {
+ enum size_type __short_mask = 0x01;
+ enum size_type __long_mask = 0x1;
+ }
+
+ enum size_type __min_cap = (__long.sizeof - 1)/value_type.sizeof > 2 ? (__long.sizeof - 1)/value_type.sizeof : 2;
+
+ struct __short
+ {
+ union
+ {
+ ubyte __size_;
+ value_type __lx;
+ }
+ value_type[__min_cap] __data_;
+ }
+ }
+
+ union __ulx { __long __lx; __short __lxx; }
+ enum __n_words = __ulx.sizeof / size_type.sizeof;
+
+ struct __raw
+ {
+ size_type[__n_words] __words;
+ }
+
+ struct __rep
+ {
+ union
+ {
+ __long __l;
+ __short __s;
+ __raw __r;
+ }
+ }
+
+ __compressed_pair!(__rep, allocator_type) __r_;
+
+ pragma (inline, true)
+ {
+ void eos(size_type offset) nothrow
+ {
+ __set_size(offset);
+// __invalidate_iterators_past(__sz); // TODO: support `_LIBCPP_DEBUG_LEVEL >= 2` ??
+ __get_pointer()[offset] = value_type(0);
+ }
+
+ version (_LIBCPP_ABI_ALTERNATE_STRING_LAYOUT)
+ {
+ version (BigEndian)
+ {
+ void __set_short_size(size_type __s) nothrow @safe { __r_.first().__s.__size_ = cast(ubyte)(__s << 1); }
+ size_type __get_short_size() const nothrow @safe { return __r_.first().__s.__size_ >> 1; }
+ }
+ else
+ {
+ void __set_short_size(size_type __s) nothrow @safe { __r_.first().__s.__size_ = cast(ubyte)(__s);}
+ size_type __get_short_size() const nothrow @safe { return __r_.first().__s.__size_;}
+ }
+ }
+ else
+ {
+ version (BigEndian)
+ {
+ void __set_short_size(size_type __s) nothrow @safe { __r_.first().__s.__size_ = cast(ubyte)(__s); }
+ size_type __get_short_size() const nothrow @safe { return __r_.first().__s.__size_; }
+ }
+ else
+ {
+ void __set_short_size(size_type __s) nothrow @safe { __r_.first().__s.__size_ = cast(ubyte)(__s << 1); }
+ size_type __get_short_size() const nothrow @safe { return __r_.first().__s.__size_ >> 1; }
+ }
+ }
+ void __set_long_size(size_type __s) nothrow { __r_.first().__l.__size_ = __s; }
+ size_type __get_long_size() const nothrow { return __r_.first().__l.__size_; }
+ void __set_size(size_type __s) nothrow { if (__is_long()) __set_long_size(__s); else __set_short_size(__s); }
+
+ void __set_long_cap(size_type __s) nothrow { __r_.first().__l.__cap_ = __long_mask | __s; }
+ size_type __get_long_cap() const nothrow { return __r_.first().__l.__cap_ & size_type(~__long_mask); }
+
+ void __set_long_pointer(pointer __p) nothrow { __r_.first().__l.__data_ = __p; }
+ inout(T)* __get_long_pointer() inout nothrow { return __r_.first().__l.__data_; }
+ inout(T)* __get_short_pointer() inout nothrow @safe { return &__r_.first().__s.__data_[0]; }
+ inout(T)* __get_pointer() inout nothrow { return __is_long() ? __get_long_pointer() : __get_short_pointer(); }
+
+ bool __is_long() const nothrow @safe { return (__r_.first().__s.__size_ & __short_mask) != 0; }
+
+ void __zero() nothrow @safe { __r_.first().__r.__words[] = 0; }
+
+ ref inout(allocator_type) __alloc() inout nothrow @safe { return __r_.second(); }
+
+ void __init(const(value_type)* __s, size_type __sz) { return __init(__s, __sz, __sz); }
+ }
+
+ void __assign_allocator(ref const(allocator_type) al) nothrow
+ {
+ static if (!__r_.Ty2Empty)
+ __alloc() = al;
+ }
+
+ void __init(const(value_type)* __s, size_type __sz, size_type __reserve)
+ {
+ assert(__reserve <= max_size());
+// if (__reserve > max_size())
+// throw new RangeError("Length exceeds `max_size()`"); // this->__throw_length_error();
+ pointer __p;
+ if (__reserve < __min_cap)
+ {
+ __set_short_size(__sz);
+ __p = __get_short_pointer();
+ }
+ else
+ {
+ size_type __cap = __recommend(__reserve);
+ __p = __alloc().allocate(__cap+1, null);
+ __set_long_pointer(__p);
+ __set_long_cap(__cap+1);
+ __set_long_size(__sz);
+ }
+ __p[0 .. __sz] = __s[0 .. __sz];
+ __p[__sz] = value_type(0);
+ }
+
+ static size_type __recommend(size_type __s) nothrow @safe
+ {
+ static size_type __align_it(size_type __a)(size_type __s) nothrow @safe { return (__s + (__a-1)) & ~(__a-1); }
+ if (__s < __min_cap) return __min_cap - 1;
+ size_type __guess = __align_it!(value_type.sizeof < __alignment ? __alignment/value_type.sizeof : 1)(__s+1) - 1;
+ if (__guess == __min_cap) ++__guess;
+ return __guess;
+ }
+
+ void __grow_by_and_replace(size_type __old_cap, size_type __delta_cap, size_type __old_sz, size_type __n_copy,
+ size_type __n_del, size_type __n_add, const(value_type)* __p_new_stuff)
+ {
+ size_type __ms = max_size();
+ assert(__delta_cap <= __ms - __old_cap - 1);
+// if (__delta_cap > __ms - __old_cap - 1)
+// throw new RangeError("Length exceeds `max_size()`"); // this->__throw_length_error();
+ pointer __old_p = __get_pointer();
+ size_type __cap = __old_cap < __ms / 2 - __alignment ?
+ __recommend(max(__old_cap + __delta_cap, 2 * __old_cap)) :
+ __ms - 1;
+ pointer __p = __alloc().allocate(__cap+1);
+// __invalidate_all_iterators(); // TODO: support `_LIBCPP_DEBUG_LEVEL >= 2` ??
+ if (__n_copy != 0)
+ __p[0 .. __n_copy] = __old_p[0 .. __n_copy];
+ if (__n_add != 0)
+ (__p + __n_copy)[0 .. __n_add] = __p_new_stuff[0 .. __n_add];
+ size_type __sec_cp_sz = __old_sz - __n_del - __n_copy;
+ if (__sec_cp_sz != 0)
+ (__p + __n_copy + __n_add)[0 .. __sec_cp_sz] = (__old_p + __n_copy + __n_del)[0 .. __sec_cp_sz];
+ if (__old_cap+1 != __min_cap)
+ __alloc().deallocate(__old_p, __old_cap+1);
+ __set_long_pointer(__p);
+ __set_long_cap(__cap+1);
+ __old_sz = __n_copy + __n_add + __sec_cp_sz;
+ __set_long_size(__old_sz);
+ __p[__old_sz] = value_type(0);
+ }
+
+ void __grow_by(size_type __old_cap, size_type __delta_cap, size_type __old_sz,
+ size_type __n_copy, size_type __n_del, size_type __n_add = 0)
+ {
+ size_type __ms = max_size();
+ assert(__delta_cap <= __ms - __old_cap);
+// if (__delta_cap > __ms - __old_cap)
+// __throw_length_error();
+ pointer __old_p = __get_pointer();
+ size_type __cap = __old_cap < __ms / 2 - __alignment ?
+ __recommend(max(__old_cap + __delta_cap, 2 * __old_cap)) :
+ __ms - 1;
+ pointer __p = __alloc().allocate(__cap+1);
+// __invalidate_all_iterators(); // TODO:
+ if (__n_copy != 0)
+ __p[0 .. __n_copy] = __old_p[0 .. __n_copy];
+ size_type __sec_cp_sz = __old_sz - __n_del - __n_copy;
+ if (__sec_cp_sz != 0)
+ (__p + __n_copy + __n_add)[0 .. __sec_cp_sz] = (__old_p + __n_copy + __n_del)[0 .. __sec_cp_sz];
+ if (__old_cap+1 != __min_cap)
+ __alloc().deallocate(__old_p, __old_cap+1);
+ __set_long_pointer(__p);
+ __set_long_cap(__cap+1);
+ }
+ }
+ else
+ {
+ static assert(false, "C++ runtime not supported");
+ }
+}
+
+
+// platform detail
+private:
+version (CppRuntime_Microsoft)
+{
+ import core.stdcpp.xutility : _ITERATOR_DEBUG_LEVEL;
+
+extern(C++, (StdNamespace)):
+ extern (C++) struct _String_base_types(_Elem, _Alloc)
+ {
+ alias Ty = _Elem;
+ alias Alloc = _Alloc;
+ }
+
+ extern (C++, class) struct _String_alloc(_Alloc_types)
+ {
+ import core.stdcpp.xutility : _Compressed_pair;
+
+ alias Ty = _Alloc_types.Ty;
+ alias Alloc = _Alloc_types.Alloc;
+ alias ValTy = _String_val!Ty;
+
+ extern(D) @safe @nogc:
+ pragma(inline, true)
+ {
+ ref inout(Alloc) _Getal() inout pure nothrow { return _Mypair._Myval1; }
+ ref inout(ValTy) _Get_data() inout pure nothrow { return _Mypair._Myval2; }
+ }
+
+ void _Orphan_all() nothrow { _Get_data._Base._Orphan_all(); }
+
+ static if (_ITERATOR_DEBUG_LEVEL > 0)
+ {
+ import core.stdcpp.xutility : _Container_proxy;
+
+ ~this()
+ {
+ _Free_proxy();
+ }
+
+ pragma(inline, true)
+ ref inout(_Container_proxy*) _Myproxy() inout pure nothrow { return _Get_data._Base._Myproxy; }
+
+ void _Alloc_proxy() nothrow @trusted
+ {
+ import core.lifetime : emplace;
+
+ alias _Alproxy = Alloc.rebind!_Container_proxy;
+ try // TODO: or should we make allocator<T>::allocate() `nothrow`?
+ _Myproxy() = _Alproxy(_Getal()).allocate(1);
+ catch (Throwable)
+ assert(false, "Failed to allocate iterator debug container proxy");
+ emplace!_Container_proxy(_Myproxy());
+ _Myproxy()._Mycont = &_Get_data()._Base;
+ }
+ void _Free_proxy() nothrow @trusted
+ {
+ alias _Alproxy = Alloc.rebind!_Container_proxy;
+ _Orphan_all();
+ destroy!false(*_Myproxy());
+ try // TODO: or should we make allocator<T>::deallocate() `nothrow`?
+ _Alproxy(_Getal()).deallocate(_Myproxy(), 1);
+ catch (Throwable)
+ assert(false, "Failed to deallocate iterator debug container proxy");
+ _Myproxy() = null;
+ }
+ }
+
+ _Compressed_pair!(Alloc, ValTy) _Mypair;
+ }
+
+ extern (C++, class) struct _String_val(T)
+ {
+ import core.stdcpp.xutility : _Container_base;
+ import core.stdcpp.type_traits : is_empty;
+
+ enum _BUF_SIZE = 16 / T.sizeof < 1 ? 1 : 16 / T.sizeof;
+ enum _ALLOC_MASK = T.sizeof <= 1 ? 15 : T.sizeof <= 2 ? 7 : T.sizeof <= 4 ? 3 : T.sizeof <= 8 ? 1 : 0;
+
+ static if (!is_empty!_Container_base.value)
+ _Container_base _Base;
+ else
+ ref inout(_Container_base) _Base() inout { return *cast(inout(_Container_base)*)&this; }
+
+ union _Bxty
+ {
+ T[_BUF_SIZE] _Buf;
+ T* _Ptr;
+ }
+
+ _Bxty _Bx;
+ size_t _Mysize = 0; // current length of string
+ size_t _Myres = _BUF_SIZE - 1; // current storage reserved for string
+
+ pragma (inline, true):
+ extern (D):
+ pure nothrow @nogc:
+ bool _IsAllocated() const @safe { return _BUF_SIZE <= _Myres; }
+ alias _Large_string_engaged = _IsAllocated;
+ @property inout(T)* _Myptr() inout @trusted { return _BUF_SIZE <= _Myres ? _Bx._Ptr : _Bx._Buf.ptr; }
+ @property inout(T)[] _Mystr() inout @trusted { return _BUF_SIZE <= _Myres ? _Bx._Ptr[0 .. _Mysize] : _Bx._Buf[0 .. _Mysize]; }
+
+ auto _Clamp_suffix_size(T)(const T _Off, const T _Size) const
+ {
+ // trims _Size to the longest it can be assuming a string at/after _Off
+ return min(_Size, _Mysize - _Off);
+ }
+ }
+
+ template _Size_after_ebco_v(_Ty)
+ {
+ import core.stdcpp.type_traits : is_empty;
+
+ enum size_t _Size_after_ebco_v = is_empty!_Ty.value ? 0 : _Ty.sizeof; // get _Ty's size after being EBCO'd
+ }
+}
+
+auto ref T max(T)(auto ref T a, auto ref T b) { return b > a ? b : a; }
+auto ref T min(T)(auto ref T a, auto ref T b) { return b < a ? b : a; }
diff --git a/libphobos/libdruntime/core/stdcpp/string_view.d b/libphobos/libdruntime/core/stdcpp/string_view.d
new file mode 100644
index 0000000..172c170
--- /dev/null
+++ b/libphobos/libdruntime/core/stdcpp/string_view.d
@@ -0,0 +1,130 @@
+/**
+ * D header file for interaction with C++ std::string_view.
+ *
+ * Copyright: Copyright (c) 2018 D Language Foundation
+ * License: Distributed under the
+ * $(LINK2 http://www.boost.org/LICENSE_1_0.txt, Boost Software License 1.0).
+ * (See accompanying file LICENSE)
+ * Authors: Manu Evans
+ * Source: $(DRUNTIMESRC core/stdcpp/string_view.d)
+ */
+
+module core.stdcpp.string_view;
+
+import core.stdc.stddef : wchar_t;
+import core.stdcpp.xutility : StdNamespace;
+
+// hacks to support DMD on Win32
+version (CppRuntime_Microsoft)
+{
+ version = CppRuntime_Windows; // use the MS runtime ABI for win32
+}
+else version (CppRuntime_DigitalMars)
+{
+ version = CppRuntime_Windows; // use the MS runtime ABI for win32
+ pragma(msg, "std::basic_string_view not supported by DMC");
+}
+
+extern(C++, (StdNamespace)):
+@nogc:
+
+///
+alias string_view = basic_string_view!char;
+///
+alias u16string_view = basic_string_view!wchar;
+///
+alias u32string_view = basic_string_view!dchar;
+///
+alias wstring_view = basic_string_view!wchar_t;
+
+
+/**
+ * Character traits classes specify character properties and provide specific
+ * semantics for certain operations on characters and sequences of characters.
+ */
+extern(C++, struct) struct char_traits(CharT) {}
+
+
+/**
+* D language counterpart to C++ std::basic_string_view.
+*
+* C++ reference: $(LINK2 hhttps://en.cppreference.com/w/cpp/string/basic_string_view)
+*/
+extern(C++, class) struct basic_string_view(T, Traits = char_traits!T)
+{
+extern(D):
+pragma(inline, true):
+pure nothrow @nogc:
+
+ ///
+ enum size_type npos = size_type.max;
+
+ ///
+ alias size_type = size_t;
+ ///
+ alias difference_type = ptrdiff_t;
+ ///
+ alias value_type = T;
+ ///
+ alias pointer = T*;
+ ///
+ alias const_pointer = const(T)*;
+
+ ///
+ alias as_array this;
+ ///
+ alias toString = as_array;
+
+ ///
+ this(const(T)[] str) @trusted { __data = str.ptr; __size = str.length; }
+
+ ///
+ alias length = size;
+ ///
+ alias opDollar = length;
+ ///
+ size_type size() const @safe { return __size; }
+ ///
+ bool empty() const @safe { return __size == 0; }
+
+ ///
+ const(T)* data() const @safe { return __data; }
+ ///
+ const(T)[] as_array() const @trusted { return __data[0 .. __size]; }
+
+ ///
+ ref const(T) at(size_type i) const @trusted { return __data[0 .. __size][i]; }
+
+ ///
+ ref const(T) front() const @safe { return this[0]; }
+ ///
+ ref const(T) back() const @safe { return this[$-1]; }
+
+private:
+ // use the proper field names from C++ so debugging doesn't get weird
+ version (CppRuntime_Windows)
+ {
+ const_pointer _Mydata;
+ size_type _Mysize;
+
+ alias __data = _Mydata;
+ alias __size = _Mysize;
+ }
+ else version (CppRuntime_Gcc)
+ {
+ size_t _M_len;
+ const(T)* _M_str;
+
+ alias __data = _M_str;
+ alias __size = _M_len;
+ }
+ else version (CppRuntime_Clang)
+ {
+ const value_type* __data;
+ size_type __size;
+ }
+ else
+ {
+ static assert(false, "C++ runtime not supported");
+ }
+}
diff --git a/libphobos/libdruntime/core/stdcpp/type_traits.d b/libphobos/libdruntime/core/stdcpp/type_traits.d
new file mode 100644
index 0000000..e4bf41c
--- /dev/null
+++ b/libphobos/libdruntime/core/stdcpp/type_traits.d
@@ -0,0 +1,50 @@
+/**
+ * D header file for interaction with C++ std::type_traits.
+ *
+ * Copyright: Copyright Digital Mars 2018.
+ * License: Distributed under the
+ * $(LINK2 http://www.boost.org/LICENSE_1_0.txt, Boost Software License 1.0).
+ * (See accompanying file LICENSE)
+ * Authors: Manu Evans
+ * Source: $(DRUNTIMESRC core/stdcpp/type_traits.d)
+ */
+
+module core.stdcpp.type_traits;
+
+extern(C++, "std"):
+
+///
+struct integral_constant(T, T Val)
+{
+ ///
+ enum T value = Val;
+ ///
+ alias value_type = T;
+ ///
+ alias type = typeof(this);
+}
+
+///
+alias bool_constant(bool b) = integral_constant!(bool, b);
+
+// Useful for dealing with enable_if constraints.
+///
+alias true_type = bool_constant!true;
+///
+alias false_type = bool_constant!false;
+
+///
+struct is_empty(T)
+{
+ static if (is(T == struct))
+ private enum __is_empty = T.tupleof.length == 0;
+ else
+ private enum __is_empty = false;
+
+ ///
+ enum bool value = __is_empty;
+ ///
+ alias value_type = bool;
+ ///
+ alias type = integral_constant!(bool, value);
+}
diff --git a/libphobos/libdruntime/core/stdcpp/typeinfo.d b/libphobos/libdruntime/core/stdcpp/typeinfo.d
index 693b8cd..53b25c5 100644
--- a/libphobos/libdruntime/core/stdcpp/typeinfo.d
+++ b/libphobos/libdruntime/core/stdcpp/typeinfo.d
@@ -11,11 +11,14 @@
module core.stdcpp.typeinfo;
+import core.attribute : weak;
+
version (CppRuntime_DigitalMars)
{
import core.stdcpp.exception;
extern (C++, "std"):
+ @nogc:
class type_info
{
@@ -27,8 +30,8 @@ version (CppRuntime_DigitalMars)
//bool operator==(const type_info rhs) const;
//bool operator!=(const type_info rhs) const;
- final bool before(const type_info rhs) const;
- final const(char)* name() const;
+ final bool before(const type_info rhs) const nothrow;
+ final const(char)* name() const nothrow;
protected:
//type_info();
private:
@@ -59,6 +62,7 @@ else version (CppRuntime_Microsoft)
import core.stdcpp.exception;
extern (C++, "std"):
+ @nogc:
struct __type_info_node
{
@@ -70,12 +74,11 @@ else version (CppRuntime_Microsoft)
class type_info
{
- //virtual ~this();
- void dtor() { } // reserve slot in vtbl[]
+ @weak ~this() nothrow {}
//bool operator==(const type_info rhs) const;
//bool operator!=(const type_info rhs) const;
- final bool before(const type_info rhs) const;
- final const(char)* name(__type_info_node* p = &__type_info_root_node) const;
+ final bool before(const type_info rhs) const nothrow;
+ final const(char)* name(__type_info_node* p = &__type_info_root_node) const nothrow;
private:
void* pdata;
@@ -85,13 +88,13 @@ else version (CppRuntime_Microsoft)
class bad_cast : exception
{
- this(const(char)* msg = "bad cast");
+ this(const(char)* msg = "bad cast") @nogc nothrow { super(msg); }
//virtual ~this();
}
class bad_typeid : exception
{
- this(const(char)* msg = "bad typeid");
+ this(const(char)* msg = "bad typeid") @nogc nothrow { super(msg); }
//virtual ~this();
}
}
@@ -101,19 +104,21 @@ else version (CppRuntime_Gcc)
extern (C++, "__cxxabiv1")
{
- class __class_type_info;
+ extern(C++, class) struct __class_type_info;
}
extern (C++, "std"):
+ @nogc:
- class type_info
+ abstract class type_info
{
- void dtor1(); // consume destructor slot in vtbl[]
- void dtor2(); // consume destructor slot in vtbl[]
- final const(char)* name()() const nothrow {
+ @weak ~this() {}
+ @weak final const(char)* name() const nothrow
+ {
return _name[0] == '*' ? _name + 1 : _name;
}
- final bool before()(const type_info _arg) const {
+ @weak final bool before(const type_info _arg) const nothrow
+ {
import core.stdc.string : strcmp;
return (_name[0] == '*' && _arg._name[0] == '*')
? _name < _arg._name
@@ -123,24 +128,66 @@ else version (CppRuntime_Gcc)
bool __is_pointer_p() const;
bool __is_function_p() const;
bool __do_catch(const type_info, void**, uint) const;
- bool __do_upcast(const __class_type_info, void**) const;
+ bool __do_upcast(const __class_type_info*, void**) const;
+ protected:
const(char)* _name;
- this(const(char)*);
+
+ this(const(char)* name) { _name = name; }
+ }
+
+ class bad_cast : exception
+ {
+ this() nothrow {}
+ //~this();
+ @weak override const(char)* what() const nothrow { return "bad cast"; }
+ }
+
+ class bad_typeid : exception
+ {
+ this() nothrow {}
+ //~this();
+ @weak override const(char)* what() const nothrow { return "bad typeid"; }
+ }
+}
+else version (CppRuntime_Clang)
+{
+ import core.stdcpp.exception;
+
+ extern (C++, "std"):
+ @nogc:
+
+ abstract class type_info
+ {
+ @weak ~this() {}
+ @weak final const(char)* name() const nothrow
+ {
+ return __type_name;
+ }
+ @weak final bool before(const type_info __arg) const nothrow
+ {
+ return __type_name < __arg.__type_name;
+ }
+ //bool operator==(const type_info) const;
+
+ protected:
+ const(char)* __type_name;
+
+ this(const(char)* __n) { __type_name = __n; }
}
class bad_cast : exception
{
- this();
+ this() nothrow {}
//~this();
- override const(char)* what() const;
+ @weak override const(char)* what() const nothrow { return "bad cast"; }
}
class bad_typeid : exception
{
- this();
+ this() nothrow {}
//~this();
- override const(char)* what() const;
+ @weak override const(char)* what() const nothrow { return "bad typeid"; }
}
}
else
diff --git a/libphobos/libdruntime/core/stdcpp/utility.d b/libphobos/libdruntime/core/stdcpp/utility.d
new file mode 100644
index 0000000..a34e356
--- /dev/null
+++ b/libphobos/libdruntime/core/stdcpp/utility.d
@@ -0,0 +1,50 @@
+/**
+ * D header file for interaction with Microsoft C++ <utility>
+ *
+ * Copyright: Copyright (c) 2018 D Language Foundation
+ * License: Distributed under the
+ * $(LINK2 http://www.boost.org/LICENSE_1_0.txt, Boost Software License 1.0).
+ * (See accompanying file LICENSE)
+ * Authors: Manu Evans
+ * Source: $(DRUNTIMESRC core/stdcpp/utility.d)
+ */
+
+module core.stdcpp.utility;
+
+import core.stdcpp.xutility : StdNamespace;
+
+extern(C++, (StdNamespace)):
+@nogc:
+
+/**
+* D language counterpart to C++ std::pair.
+*
+* C++ reference: $(LINK2 https://en.cppreference.com/w/cpp/utility/pair)
+*/
+struct pair(T1, T2)
+{
+ ///
+ alias first_type = T1;
+ ///
+ alias second_type = T2;
+
+ ///
+ T1 first;
+ ///
+ T2 second;
+
+ // FreeBSD has pair as non-POD so add a contructor
+ version (FreeBSD)
+ {
+ this(T1 t1, T2 t2) inout
+ {
+ first = t1;
+ second = t2;
+ }
+ this(ref return scope inout pair!(T1, T2) src) inout
+ {
+ first = src.first;
+ second = src.second;
+ }
+ }
+}
diff --git a/libphobos/libdruntime/core/stdcpp/vector.d b/libphobos/libdruntime/core/stdcpp/vector.d
new file mode 100644
index 0000000..c64dbf6
--- /dev/null
+++ b/libphobos/libdruntime/core/stdcpp/vector.d
@@ -0,0 +1,850 @@
+/**
+ * D header file for interaction with C++ std::vector.
+ *
+ * Copyright: Copyright (c) 2018 D Language Foundation
+ * License: Distributed under the
+ * $(LINK2 http://www.boost.org/LICENSE_1_0.txt, Boost Software License 1.0).
+ * (See accompanying file LICENSE)
+ * Authors: Guillaume Chatelet
+ * Manu Evans
+ * Source: $(DRUNTIMESRC core/stdcpp/vector.d)
+ */
+
+module core.stdcpp.vector;
+
+///////////////////////////////////////////////////////////////////////////////
+// std::vector declaration.
+//
+// Current caveats :
+// - missing noexcept
+// - nothrow @trusted @nogc for most functions depend on knowledge
+// of T's construction/destruction/assignment semantics
+///////////////////////////////////////////////////////////////////////////////
+
+import core.stdcpp.allocator;
+
+enum DefaultConstruct { value }
+
+/// Constructor argument for default construction
+enum Default = DefaultConstruct();
+
+extern(C++, "std"):
+
+alias vector(T) = vector!(T, allocator!T);
+extern(C++, class) struct vector(T, Alloc)
+{
+ import core.lifetime : forward, move, core_emplace = emplace;
+
+ static assert(!is(T == bool), "vector!bool not supported!");
+extern(D):
+
+ ///
+ alias size_type = size_t;
+ ///
+ alias difference_type = ptrdiff_t;
+ ///
+ alias value_type = T;
+ ///
+ alias allocator_type = Alloc;
+ ///
+ alias pointer = T*;
+ ///
+ alias const_pointer = const(T)*;
+
+ /// MSVC allocates on default initialisation in debug, which can't be modelled by D `struct`
+ @disable this();
+
+ ///
+ alias length = size;
+ ///
+ alias opDollar = length;
+
+ ///
+ size_t[2] opSlice(size_t dim : 0)(size_t start, size_t end) const pure nothrow @safe @nogc { return [start, end]; }
+
+ ///
+ ref inout(T) opIndex(size_t index) inout pure nothrow @safe @nogc { return as_array[index]; }
+ ///
+ inout(T)[] opIndex(size_t[2] slice) inout pure nothrow @safe @nogc { return as_array[slice[0] .. slice[1]]; }
+ ///
+ inout(T)[] opIndex() inout pure nothrow @safe @nogc { return as_array(); }
+
+ ///
+ ref vector opAssign(U)(auto ref vector!(U, Alloc) s) { opAssign(s.as_array); return this; }
+ ///
+ ref vector opAssign(T[] array)
+ {
+ clear();
+ reserve(array.length);
+ insert(0, array);
+ return this;
+ }
+
+ ///
+ void opIndexAssign()(auto ref T val, size_t index) { as_array[index] = val; }
+ ///
+ void opIndexAssign()(auto ref T val, size_t[2] slice) { as_array[slice[0] .. slice[1]] = val; }
+ ///
+ void opIndexAssign(T[] val, size_t[2] slice) { as_array[slice[0] .. slice[1]] = val[]; }
+ ///
+ void opIndexAssign()(auto ref T val) { as_array[] = val; }
+ ///
+ void opIndexAssign(T[] val) { as_array[] = val[]; }
+
+ ///
+ void opIndexOpAssign(string op)(auto ref T val, size_t index) { mixin("as_array[index] " ~ op ~ "= val;"); }
+ ///
+ void opIndexOpAssign(string op)(auto ref T val, size_t[2] slice) { mixin("as_array[slice[0] .. slice[1]] " ~ op ~ "= val;"); }
+ ///
+ void opIndexOpAssign(string op)(T[] val, size_t[2] slice) { mixin("as_array[slice[0] .. slice[1]] " ~ op ~ "= val[];"); }
+ ///
+ void opIndexOpAssign(string op)(auto ref T val) { mixin("as_array[] " ~ op ~ "= val;"); }
+ ///
+ void opIndexOpAssign(string op)(T[] val) { mixin("as_array[] " ~ op ~ "= val[];"); }
+
+ ///
+ ref inout(T) front() inout pure nothrow @safe @nogc { return as_array[0]; }
+ ///
+ ref inout(T) back() inout pure nothrow @safe @nogc { return as_array[$-1]; }
+
+ ///
+ ref vector opOpAssign(string op : "~")(auto ref T item) { push_back(forward!item); return this; }
+ ///
+ ref vector opOpAssign(string op : "~")(T[] array) { insert(length, array); return this; }
+
+ ///
+ void append(T[] array) { insert(length, array); }
+
+ /// Performs elementwise equality check.
+ bool opEquals(this This, That)(auto ref That rhs)
+ if (is(immutable That == immutable vector)) { return as_array == rhs.as_array; }
+
+ /// Performs lexicographical comparison.
+ static if (is(typeof((ref T a, ref T b) => a < b)))
+ int opCmp(this This, That)(auto ref That rhs)
+ if (is(immutable That == immutable vector)) { return __cmp(as_array, rhs.as_array); }
+
+ /// Hash to allow `vector`s to be used as keys for built-in associative arrays.
+ /// **The result will generally not be the same as C++ `std::hash<std::vector<T>>`.**
+ size_t toHash() const { return .hashOf(as_array); }
+
+ // Modifiers
+ ///
+ void push_back(U)(auto ref U element)
+ {
+ emplace_back(forward!element);
+ }
+
+ version (CppRuntime_Microsoft)
+ {
+ //----------------------------------------------------------------------------------
+ // Microsoft runtime
+ //----------------------------------------------------------------------------------
+
+ ///
+ this(DefaultConstruct) @nogc { _Alloc_proxy(); }
+ ///
+ this()(size_t count)
+ {
+ _Alloc_proxy();
+ _Buy(count);
+ scope(failure) _Tidy();
+ _Get_data()._Mylast = _Udefault(_Get_data()._Myfirst, count);
+ }
+ ///
+ this()(size_t count, auto ref T val)
+ {
+ _Alloc_proxy();
+ _Buy(count);
+ scope(failure) _Tidy();
+ _Get_data()._Mylast = _Ufill(_Get_data()._Myfirst, count, val);
+ }
+ ///
+ this()(T[] array)
+ {
+ _Alloc_proxy();
+ _Buy(array.length);
+ scope(failure) _Tidy();
+ _Get_data()._Mylast = _Utransfer!false(array.ptr, array.ptr + array.length, _Get_data()._Myfirst);
+ }
+ ///
+ this(this)
+ {
+ _Alloc_proxy();
+ pointer _First = _Get_data()._Myfirst;
+ pointer _Last = _Get_data()._Mylast;
+ _Buy(_Last - _First);
+ scope(failure) _Tidy();
+ _Get_data()._Mylast = _Utransfer!false(_First, _Last, _Get_data()._Myfirst);
+ }
+
+ ///
+ ~this() { _Tidy(); }
+
+ ///
+ ref inout(Alloc) get_allocator() inout pure nothrow @safe @nogc { return _Getal(); }
+
+ ///
+ size_type max_size() const pure nothrow @safe @nogc { return ((size_t.max / T.sizeof) - 1) / 2; } // HACK: clone the windows version precisely?
+
+ ///
+ size_type size() const pure nothrow @safe @nogc { return _Get_data()._Mylast - _Get_data()._Myfirst; }
+ ///
+ size_type capacity() const pure nothrow @safe @nogc { return _Get_data()._Myend - _Get_data()._Myfirst; }
+ ///
+ bool empty() const pure nothrow @safe @nogc { return _Get_data()._Myfirst == _Get_data()._Mylast; }
+ ///
+ inout(T)* data() inout pure nothrow @safe @nogc { return _Get_data()._Myfirst; }
+ ///
+ inout(T)[] as_array() inout pure nothrow @trusted @nogc { return _Get_data()._Myfirst[0 .. size()]; }
+ ///
+ ref inout(T) at(size_type i) inout pure nothrow @trusted @nogc { return _Get_data()._Myfirst[0 .. size()][i]; }
+
+ ///
+ ref T emplace_back(Args...)(auto ref Args args)
+ {
+ if (_Has_unused_capacity())
+ return _Emplace_back_with_unused_capacity(forward!args);
+ return *_Emplace_reallocate(_Get_data()._Mylast, forward!args);
+ }
+
+ ///
+ void reserve(const size_type newCapacity)
+ {
+ if (newCapacity > capacity())
+ {
+// if (newCapacity > max_size())
+// _Xlength();
+ _Reallocate_exactly(newCapacity);
+ }
+ }
+
+ ///
+ void shrink_to_fit()
+ {
+ if (_Has_unused_capacity())
+ {
+ if (empty())
+ _Tidy();
+ else
+ _Reallocate_exactly(size());
+ }
+ }
+
+ ///
+ void pop_back()
+ {
+ static if (_ITERATOR_DEBUG_LEVEL == 2)
+ {
+ assert(!empty(), "vector empty before pop");
+ _Orphan_range(_Get_data()._Mylast - 1, _Get_data()._Mylast);
+ }
+ destroy!false(_Get_data()._Mylast[-1]);
+ --_Get_data()._Mylast;
+ }
+
+ ///
+ void clear()
+ {
+ _Base._Orphan_all();
+ _Destroy(_Get_data()._Myfirst, _Get_data()._Mylast);
+ _Get_data()._Mylast = _Get_data()._Myfirst;
+ }
+
+ ///
+ void resize()(const size_type newsize)
+ {
+ static assert(is(typeof({static T i;})), T.stringof ~ ".this() is annotated with @disable.");
+ _Resize(newsize, (pointer _Dest, size_type _Count) => _Udefault(_Dest, _Count));
+ }
+
+ ///
+ void resize()(const size_type newsize, auto ref T val)
+ {
+ _Resize(newsize, (pointer _Dest, size_type _Count) => _Ufill(_Dest, _Count, forward!val));
+ }
+
+ void emplace(Args...)(size_t offset, auto ref Args args)
+ {
+ pointer _Whereptr = _Get_data()._Myfirst + offset;
+ pointer _Oldlast = _Get_data()._Mylast;
+ if (_Has_unused_capacity())
+ {
+ if (_Whereptr == _Oldlast)
+ _Emplace_back_with_unused_capacity(forward!args);
+ else
+ {
+ T _Obj = T(forward!args);
+ static if (_ITERATOR_DEBUG_LEVEL == 2)
+ _Orphan_range(_Whereptr, _Oldlast);
+ move(_Oldlast[-1], *_Oldlast);
+ ++_Get_data()._Mylast;
+ _Move_backward_unchecked(_Whereptr, _Oldlast - 1, _Oldlast);
+ move(_Obj, *_Whereptr);
+ }
+ return;
+ }
+ _Emplace_reallocate(_Whereptr, forward!args);
+ }
+
+ ///
+ void insert(size_t offset, T[] array)
+ {
+ pointer _Where = _Get_data()._Myfirst + offset;
+ pointer _First = array.ptr;
+ pointer _Last = _First + array.length;
+
+ const size_type _Count = array.length;
+ const size_type _Whereoff = offset;
+ const bool _One_at_back = _Count == 1 && _Get_data()._Myfirst + _Whereoff == _Get_data()._Mylast;
+
+ if (_Count == 0)
+ {
+ // nothing to do, avoid invalidating iterators
+ }
+ else if (_Count > _Unused_capacity())
+ { // reallocate
+ const size_type _Oldsize = size();
+
+// if (_Count > max_size() - _Oldsize)
+// _Xlength();
+
+ const size_type _Newsize = _Oldsize + _Count;
+ const size_type _Newcapacity = _Calculate_growth(_Newsize);
+
+ pointer _Newvec = _Getal().allocate(_Newcapacity);
+ pointer _Constructed_last = _Newvec + _Whereoff + _Count;
+ pointer _Constructed_first = _Constructed_last;
+
+ try
+ {
+ _Utransfer!false(_First, _Last, _Newvec + _Whereoff);
+ _Constructed_first = _Newvec + _Whereoff;
+
+ if (_One_at_back)
+ {
+ _Utransfer!(true, true)(_Get_data()._Myfirst, _Get_data()._Mylast, _Newvec);
+ }
+ else
+ {
+ _Utransfer!true(_Get_data()._Myfirst, _Where, _Newvec);
+ _Constructed_first = _Newvec;
+ _Utransfer!true(_Where, _Get_data()._Mylast, _Newvec + _Whereoff + _Count);
+ }
+ }
+ catch (Throwable e)
+ {
+ _Destroy(_Constructed_first, _Constructed_last);
+ _Getal().deallocate(_Newvec, _Newcapacity);
+ throw e;
+ }
+
+ _Change_array(_Newvec, _Newsize, _Newcapacity);
+ }
+ else
+ { // Attempt to provide the strong guarantee for EmplaceConstructible failure.
+ // If we encounter copy/move construction/assignment failure, provide the basic guarantee.
+ // (For one-at-back, this provides the strong guarantee.)
+
+ pointer _Oldlast = _Get_data()._Mylast;
+ const size_type _Affected_elements = cast(size_type)(_Oldlast - _Where);
+
+ if (_Count < _Affected_elements)
+ { // some affected elements must be assigned
+ _Get_data()._Mylast = _Utransfer!true(_Oldlast - _Count, _Oldlast, _Oldlast);
+ _Move_backward_unchecked(_Where, _Oldlast - _Count, _Oldlast);
+ _Destroy(_Where, _Where + _Count);
+
+ try
+ {
+ _Utransfer!false(_First, _Last, _Where);
+ }
+ catch (Throwable e)
+ {
+ // glue the broken pieces back together
+ try
+ {
+ _Utransfer!true(_Where + _Count, _Where + 2 * _Count, _Where);
+ }
+ catch (Throwable e)
+ {
+ // vaporize the detached piece
+ static if (_ITERATOR_DEBUG_LEVEL == 2)
+ _Orphan_range(_Where, _Oldlast);
+ _Destroy(_Where + _Count, _Get_data()._Mylast);
+ _Get_data()._Mylast = _Where;
+ throw e;
+ }
+
+ _Move_unchecked(_Where + 2 * _Count, _Get_data()._Mylast, _Where + _Count);
+ _Destroy(_Oldlast, _Get_data()._Mylast);
+ _Get_data()._Mylast = _Oldlast;
+ throw e;
+ }
+ }
+ else
+ { // affected elements don't overlap before/after
+ pointer _Relocated = _Where + _Count;
+ _Get_data()._Mylast = _Utransfer!true(_Where, _Oldlast, _Relocated);
+ _Destroy(_Where, _Oldlast);
+
+ try
+ {
+ _Utransfer!false(_First, _Last, _Where);
+ }
+ catch (Throwable e)
+ {
+ // glue the broken pieces back together
+ try
+ {
+ _Utransfer!true(_Relocated, _Get_data()._Mylast, _Where);
+ }
+ catch (Throwable e)
+ {
+ // vaporize the detached piece
+ static if (_ITERATOR_DEBUG_LEVEL == 2)
+ _Orphan_range(_Where, _Oldlast);
+ _Destroy(_Relocated, _Get_data()._Mylast);
+ _Get_data()._Mylast = _Where;
+ throw e;
+ }
+
+ _Destroy(_Relocated, _Get_data()._Mylast);
+ _Get_data()._Mylast = _Oldlast;
+ throw e;
+ }
+ }
+ static if (_ITERATOR_DEBUG_LEVEL == 2)
+ _Orphan_range(_Where, _Oldlast);
+ }
+ }
+
+ private:
+ import core.stdcpp.xutility : MSVCLinkDirectives;
+
+ // Make sure the object files wont link against mismatching objects
+ mixin MSVCLinkDirectives!true;
+
+ pragma(inline, true)
+ {
+ ref inout(_Base.Alloc) _Getal() inout pure nothrow @safe @nogc { return _Base._Mypair._Myval1; }
+ ref inout(_Base.ValTy) _Get_data() inout pure nothrow @safe @nogc { return _Base._Mypair._Myval2; }
+ }
+
+ void _Alloc_proxy() @nogc
+ {
+ static if (_ITERATOR_DEBUG_LEVEL > 0)
+ _Base._Alloc_proxy();
+ }
+
+ void _AssignAllocator(ref const(allocator_type) al) nothrow @nogc
+ {
+ static if (_Base._Mypair._HasFirst)
+ _Getal() = al;
+ }
+
+ bool _Buy(size_type _Newcapacity) @trusted @nogc
+ {
+ _Get_data()._Myfirst = null;
+ _Get_data()._Mylast = null;
+ _Get_data()._Myend = null;
+
+ if (_Newcapacity == 0)
+ return false;
+
+ // TODO: how to handle this in D? kinda like a range exception...
+// if (_Newcapacity > max_size())
+// _Xlength();
+
+ _Get_data()._Myfirst = _Getal().allocate(_Newcapacity);
+ _Get_data()._Mylast = _Get_data()._Myfirst;
+ _Get_data()._Myend = _Get_data()._Myfirst + _Newcapacity;
+
+ return true;
+ }
+
+ static void _Destroy(pointer _First, pointer _Last)
+ {
+ for (; _First != _Last; ++_First)
+ destroy!false(*_First);
+ }
+
+ void _Tidy()
+ {
+ _Base._Orphan_all();
+ if (_Get_data()._Myfirst)
+ {
+ _Destroy(_Get_data()._Myfirst, _Get_data()._Mylast);
+ _Getal().deallocate(_Get_data()._Myfirst, capacity());
+ _Get_data()._Myfirst = null;
+ _Get_data()._Mylast = null;
+ _Get_data()._Myend = null;
+ }
+ }
+
+ size_type _Unused_capacity() const pure nothrow @safe @nogc
+ {
+ return _Get_data()._Myend - _Get_data()._Mylast;
+ }
+
+ bool _Has_unused_capacity() const pure nothrow @safe @nogc
+ {
+ return _Get_data()._Myend != _Get_data()._Mylast;
+ }
+
+ ref T _Emplace_back_with_unused_capacity(Args...)(auto ref Args args)
+ {
+ core_emplace(_Get_data()._Mylast, forward!args);
+ static if (_ITERATOR_DEBUG_LEVEL == 2)
+ _Orphan_range(_Get_data()._Mylast, _Get_data()._Mylast);
+ return *_Get_data()._Mylast++;
+ }
+
+ pointer _Emplace_reallocate(_Valty...)(pointer _Whereptr, auto ref _Valty _Val)
+ {
+ const size_type _Whereoff = _Whereptr - _Get_data()._Myfirst;
+ const size_type _Oldsize = size();
+
+ // TODO: what should we do in D? kinda like a range overflow?
+// if (_Oldsize == max_size())
+// _Xlength();
+
+ const size_type _Newsize = _Oldsize + 1;
+ const size_type _Newcapacity = _Calculate_growth(_Newsize);
+
+ pointer _Newvec = _Getal().allocate(_Newcapacity);
+ pointer _Constructed_last = _Newvec + _Whereoff + 1;
+ pointer _Constructed_first = _Constructed_last;
+
+ try
+ {
+ core_emplace(_Newvec + _Whereoff, forward!_Val);
+ _Constructed_first = _Newvec + _Whereoff;
+ if (_Whereptr == _Get_data()._Mylast)
+ _Utransfer!(true, true)(_Get_data()._Myfirst, _Get_data()._Mylast, _Newvec);
+ else
+ {
+ _Utransfer!true(_Get_data()._Myfirst, _Whereptr, _Newvec);
+ _Constructed_first = _Newvec;
+ _Utransfer!true(_Whereptr, _Get_data()._Mylast, _Newvec + _Whereoff + 1);
+ }
+ }
+ catch (Throwable e)
+ {
+ _Destroy(_Constructed_first, _Constructed_last);
+ _Getal().deallocate(_Newvec, _Newcapacity);
+ throw e;
+ }
+
+ _Change_array(_Newvec, _Newsize, _Newcapacity);
+ return _Get_data()._Myfirst + _Whereoff;
+ }
+
+ void _Resize(_Lambda)(const size_type _Newsize, _Lambda _Udefault_or_fill)
+ {
+ const size_type _Oldsize = size();
+ const size_type _Oldcapacity = capacity();
+
+ if (_Newsize > _Oldcapacity)
+ {
+// if (_Newsize > max_size())
+// _Xlength();
+
+ const size_type _Newcapacity = _Calculate_growth(_Newsize);
+
+ pointer _Newvec = _Getal().allocate(_Newcapacity);
+ pointer _Appended_first = _Newvec + _Oldsize;
+ pointer _Appended_last = _Appended_first;
+
+ try
+ {
+ _Appended_last = _Udefault_or_fill(_Appended_first, _Newsize - _Oldsize);
+ _Utransfer!(true, true)(_Get_data()._Myfirst, _Get_data()._Mylast, _Newvec);
+ }
+ catch (Throwable e)
+ {
+ _Destroy(_Appended_first, _Appended_last);
+ _Getal().deallocate(_Newvec, _Newcapacity);
+ throw e;
+ }
+ _Change_array(_Newvec, _Newsize, _Newcapacity);
+ }
+ else if (_Newsize > _Oldsize)
+ {
+ pointer _Oldlast = _Get_data()._Mylast;
+ _Get_data()._Mylast = _Udefault_or_fill(_Oldlast, _Newsize - _Oldsize);
+ static if (_ITERATOR_DEBUG_LEVEL == 2)
+ _Orphan_range(_Oldlast, _Oldlast);
+ }
+ else if (_Newsize == _Oldsize)
+ {
+ // nothing to do, avoid invalidating iterators
+ }
+ else
+ {
+ pointer _Newlast = _Get_data()._Myfirst + _Newsize;
+ static if (_ITERATOR_DEBUG_LEVEL == 2)
+ _Orphan_range(_Newlast, _Get_data()._Mylast);
+ _Destroy(_Newlast, _Get_data()._Mylast);
+ _Get_data()._Mylast = _Newlast;
+ }
+ }
+
+ void _Reallocate_exactly(const size_type _Newcapacity)
+ {
+ import core.lifetime : moveEmplace;
+
+ const size_type _Size = size();
+ pointer _Newvec = _Getal().allocate(_Newcapacity);
+
+ try
+ {
+ for (size_t i = _Size; i > 0; )
+ {
+ --i;
+ moveEmplace(_Get_data()._Myfirst[i], _Newvec[i]);
+ }
+ }
+ catch (Throwable e)
+ {
+ _Getal().deallocate(_Newvec, _Newcapacity);
+ throw e;
+ }
+
+ _Change_array(_Newvec, _Size, _Newcapacity);
+ }
+
+ void _Change_array(pointer _Newvec, const size_type _Newsize, const size_type _Newcapacity)
+ {
+ _Base._Orphan_all();
+
+ if (_Get_data()._Myfirst != null)
+ {
+ _Destroy(_Get_data()._Myfirst, _Get_data()._Mylast);
+ _Getal().deallocate(_Get_data()._Myfirst, capacity());
+ }
+
+ _Get_data()._Myfirst = _Newvec;
+ _Get_data()._Mylast = _Newvec + _Newsize;
+ _Get_data()._Myend = _Newvec + _Newcapacity;
+ }
+
+ size_type _Calculate_growth(const size_type _Newsize) const pure nothrow @nogc @safe
+ {
+ const size_type _Oldcapacity = capacity();
+ if (_Oldcapacity > max_size() - _Oldcapacity/2)
+ return _Newsize;
+ const size_type _Geometric = _Oldcapacity + _Oldcapacity/2;
+ if (_Geometric < _Newsize)
+ return _Newsize;
+ return _Geometric;
+ }
+
+ struct _Uninitialized_backout
+ {
+ this() @disable;
+ this(pointer _Dest)
+ {
+ _First = _Dest;
+ _Last = _Dest;
+ }
+ ~this()
+ {
+ _Destroy(_First, _Last);
+ }
+ void _Emplace_back(Args...)(auto ref Args args)
+ {
+ core_emplace(_Last, forward!args);
+ ++_Last;
+ }
+ pointer _Release()
+ {
+ _First = _Last;
+ return _Last;
+ }
+ private:
+ pointer _First;
+ pointer _Last;
+ }
+ pointer _Utransfer(bool _move, bool _ifNothrow = false)(pointer _First, pointer _Last, pointer _Dest)
+ {
+ // TODO: if copy/move are trivial, then we can memcpy/memmove
+ auto _Backout = _Uninitialized_backout(_Dest);
+ for (; _First != _Last; ++_First)
+ {
+ static if (_move && (!_ifNothrow || true)) // isNothrow!T (move in D is always nothrow! ...until opPostMove)
+ _Backout._Emplace_back(move(*_First));
+ else
+ _Backout._Emplace_back(*_First);
+ }
+ return _Backout._Release();
+ }
+ pointer _Ufill()(pointer _Dest, size_t _Count, auto ref T val)
+ {
+ // TODO: if T.sizeof == 1 and no elaborate constructor, fast-path to memset
+ // TODO: if copy ctor/postblit are nothrow, just range assign
+ auto _Backout = _Uninitialized_backout(_Dest);
+ for (; 0 < _Count; --_Count)
+ _Backout._Emplace_back(val);
+ return _Backout._Release();
+ }
+ pointer _Udefault()(pointer _Dest, size_t _Count)
+ {
+ // TODO: if zero init, then fast-path to zeromem
+ auto _Backout = _Uninitialized_backout(_Dest);
+ for (; 0 < _Count; --_Count)
+ _Backout._Emplace_back();
+ return _Backout._Release();
+ }
+ pointer _Move_unchecked(pointer _First, pointer _Last, pointer _Dest)
+ {
+ // TODO: can `memmove` if conditions are right...
+ for (; _First != _Last; ++_Dest, ++_First)
+ move(*_First, *_Dest);
+ return _Dest;
+ }
+ pointer _Move_backward_unchecked(pointer _First, pointer _Last, pointer _Dest)
+ {
+ while (_First != _Last)
+ move(*--_Last, *--_Dest);
+ return _Dest;
+ }
+
+ static if (_ITERATOR_DEBUG_LEVEL == 2)
+ {
+ void _Orphan_range(pointer _First, pointer _Last) const @nogc
+ {
+ import core.stdcpp.xutility : _Lockit, _LOCK_DEBUG;
+
+ alias const_iterator = _Base.const_iterator;
+ auto _Lock = _Lockit(_LOCK_DEBUG);
+
+ const_iterator** _Pnext = cast(const_iterator**)_Get_data()._Base._Getpfirst();
+ if (!_Pnext)
+ return;
+
+ while (*_Pnext)
+ {
+ if ((*_Pnext)._Ptr < _First || _Last < (*_Pnext)._Ptr)
+ {
+ _Pnext = cast(const_iterator**)(*_Pnext)._Base._Getpnext();
+ }
+ else
+ {
+ (*_Pnext)._Base._Clrcont();
+ *_Pnext = *cast(const_iterator**)(*_Pnext)._Base._Getpnext();
+ }
+ }
+ }
+ }
+
+ _Vector_alloc!(_Vec_base_types!(T, Alloc)) _Base;
+ }
+ else version (None)
+ {
+ size_type size() const pure nothrow @safe @nogc { return 0; }
+ size_type capacity() const pure nothrow @safe @nogc { return 0; }
+ bool empty() const pure nothrow @safe @nogc { return true; }
+
+ inout(T)* data() inout pure nothrow @safe @nogc { return null; }
+ inout(T)[] as_array() inout pure nothrow @trusted @nogc { return null; }
+ ref inout(T) at(size_type i) inout pure nothrow @trusted @nogc { data()[0]; }
+ }
+ else
+ {
+ static assert(false, "C++ runtime not supported");
+ }
+}
+
+
+// platform detail
+private:
+version (CppRuntime_Microsoft)
+{
+ import core.stdcpp.xutility : _ITERATOR_DEBUG_LEVEL;
+
+ extern (C++, struct) struct _Vec_base_types(_Ty, _Alloc0)
+ {
+ alias Ty = _Ty;
+ alias Alloc = _Alloc0;
+ }
+
+ extern (C++, class) struct _Vector_alloc(_Alloc_types)
+ {
+ import core.stdcpp.xutility : _Compressed_pair;
+ extern(D):
+ @nogc:
+
+ alias Ty = _Alloc_types.Ty;
+ alias Alloc = _Alloc_types.Alloc;
+ alias ValTy = _Vector_val!Ty;
+
+ void _Orphan_all() nothrow @safe
+ {
+ static if (is(typeof(ValTy._Base)))
+ _Mypair._Myval2._Base._Orphan_all();
+ }
+
+ static if (_ITERATOR_DEBUG_LEVEL != 0)
+ {
+ import core.stdcpp.xutility : _Container_proxy;
+
+ alias const_iterator = _Vector_const_iterator!(ValTy);
+
+ ~this()
+ {
+ _Free_proxy();
+ }
+
+ void _Alloc_proxy() @trusted
+ {
+ import core.lifetime : emplace;
+
+ alias _Alproxy = Alloc.rebind!_Container_proxy;
+ _Alproxy _Proxy_allocator = _Alproxy(_Mypair._Myval1);
+ _Mypair._Myval2._Base._Myproxy = _Proxy_allocator.allocate(1);
+ emplace(_Mypair._Myval2._Base._Myproxy);
+ _Mypair._Myval2._Base._Myproxy._Mycont = &_Mypair._Myval2._Base;
+ }
+ void _Free_proxy()
+ {
+ alias _Alproxy = Alloc.rebind!_Container_proxy;
+ _Alproxy _Proxy_allocator = _Alproxy(_Mypair._Myval1);
+ _Orphan_all();
+ destroy!false(_Mypair._Myval2._Base._Myproxy);
+ _Proxy_allocator.deallocate(_Mypair._Myval2._Base._Myproxy, 1);
+ _Mypair._Myval2._Base._Myproxy = null;
+ }
+ }
+
+ _Compressed_pair!(Alloc, ValTy) _Mypair;
+ }
+
+ extern (C++, class) struct _Vector_val(T)
+ {
+ import core.stdcpp.xutility : _Container_base;
+ import core.stdcpp.type_traits : is_empty;
+
+ alias pointer = T*;
+
+ static if (!is_empty!_Container_base.value)
+ _Container_base _Base;
+
+ pointer _Myfirst; // pointer to beginning of array
+ pointer _Mylast; // pointer to current end of sequence
+ pointer _Myend; // pointer to end of array
+ }
+
+ static if (_ITERATOR_DEBUG_LEVEL > 0)
+ {
+ extern (C++, class) struct _Vector_const_iterator(_Myvec)
+ {
+ import core.stdcpp.xutility : _Iterator_base;
+ import core.stdcpp.type_traits : is_empty;
+
+ static if (!is_empty!_Iterator_base.value)
+ _Iterator_base _Base;
+ _Myvec.pointer _Ptr;
+ }
+ }
+}
diff --git a/libphobos/libdruntime/core/stdcpp/xutility.d b/libphobos/libdruntime/core/stdcpp/xutility.d
new file mode 100644
index 0000000..fa61701
--- /dev/null
+++ b/libphobos/libdruntime/core/stdcpp/xutility.d
@@ -0,0 +1,427 @@
+/**
+ * D header file for interaction with Microsoft C++ <xutility>
+ *
+ * Copyright: Copyright (c) 2018 D Language Foundation
+ * License: Distributed under the
+ * $(LINK2 http://www.boost.org/LICENSE_1_0.txt, Boost Software License 1.0).
+ * (See accompanying file LICENSE)
+ * Authors: Manu Evans
+ * Source: $(DRUNTIMESRC core/stdcpp/xutility.d)
+ */
+
+module core.stdcpp.xutility;
+
+@nogc:
+
+version (CppRuntime_Clang)
+{
+ import core.internal.traits : AliasSeq;
+ enum StdNamespace = AliasSeq!("std", "__1");
+}
+else
+{
+ enum StdNamespace = "std";
+}
+
+enum CppStdRevision : uint
+{
+ cpp98 = 199711,
+ cpp11 = 201103,
+ cpp14 = 201402,
+ cpp17 = 201703
+}
+
+enum __cplusplus = __traits(getTargetInfo, "cppStd");
+
+// wrangle C++ features
+enum __cpp_sized_deallocation = __cplusplus >= CppStdRevision.cpp14 || is(typeof(_MSC_VER)) ? 201309 : 0;
+enum __cpp_aligned_new = __cplusplus >= CppStdRevision.cpp17 ? 201606 : 0;
+
+
+version (CppRuntime_Microsoft)
+{
+ import core.stdcpp.type_traits : is_empty;
+
+ version (_MSC_VER_1200)
+ enum _MSC_VER = 1200;
+ else version (_MSC_VER_1300)
+ enum _MSC_VER = 1300;
+ else version (_MSC_VER_1310)
+ enum _MSC_VER = 1310;
+ else version (_MSC_VER_1400)
+ enum _MSC_VER = 1400;
+ else version (_MSC_VER_1500)
+ enum _MSC_VER = 1500;
+ else version (_MSC_VER_1600)
+ enum _MSC_VER = 1600;
+ else version (_MSC_VER_1700)
+ enum _MSC_VER = 1700;
+ else version (_MSC_VER_1800)
+ enum _MSC_VER = 1800;
+ else version (_MSC_VER_1900)
+ enum _MSC_VER = 1900;
+ else version (_MSC_VER_1910)
+ enum _MSC_VER = 1910;
+ else version (_MSC_VER_1911)
+ enum _MSC_VER = 1911;
+ else version (_MSC_VER_1912)
+ enum _MSC_VER = 1912;
+ else version (_MSC_VER_1913)
+ enum _MSC_VER = 1913;
+ else version (_MSC_VER_1914)
+ enum _MSC_VER = 1914;
+ else version (_MSC_VER_1915)
+ enum _MSC_VER = 1915;
+ else version (_MSC_VER_1916)
+ enum _MSC_VER = 1916;
+ else version (_MSC_VER_1920)
+ enum _MSC_VER = 1920;
+ else version (_MSC_VER_1921)
+ enum _MSC_VER = 1921;
+ else version (_MSC_VER_1922)
+ enum _MSC_VER = 1922;
+ else version (_MSC_VER_1923)
+ enum _MSC_VER = 1923;
+ else
+ enum _MSC_VER = 1923; // assume most recent compiler version
+
+ // Client code can mixin the set of MSVC linker directives
+ mixin template MSVCLinkDirectives(bool failMismatch = false)
+ {
+ import core.stdcpp.xutility : __CXXLIB__, _ITERATOR_DEBUG_LEVEL;
+
+ static if (__CXXLIB__ == "libcmtd")
+ {
+ pragma(lib, "libcpmtd");
+ static if (failMismatch)
+ pragma(linkerDirective, "/FAILIFMISMATCH:RuntimeLibrary=MTd_StaticDebug");
+ }
+ else static if (__CXXLIB__ == "msvcrtd")
+ {
+ pragma(lib, "msvcprtd");
+ static if (failMismatch)
+ pragma(linkerDirective, "/FAILIFMISMATCH:RuntimeLibrary=MDd_DynamicDebug");
+ }
+ else static if (__CXXLIB__ == "libcmt")
+ {
+ pragma(lib, "libcpmt");
+ static if (failMismatch)
+ pragma(linkerDirective, "/FAILIFMISMATCH:RuntimeLibrary=MT_StaticRelease");
+ }
+ else static if (__CXXLIB__ == "msvcrt")
+ {
+ pragma(lib, "msvcprt");
+ static if (failMismatch)
+ pragma(linkerDirective, "/FAILIFMISMATCH:RuntimeLibrary=MD_DynamicRelease");
+ }
+ static if (failMismatch)
+ pragma(linkerDirective, "/FAILIFMISMATCH:_ITERATOR_DEBUG_LEVEL=" ~ ('0' + _ITERATOR_DEBUG_LEVEL));
+ }
+
+ // HACK: should we guess _DEBUG for `debug` builds?
+ version (NDEBUG) {}
+ else debug version = _DEBUG;
+
+ // By specific user request
+ version (_ITERATOR_DEBUG_LEVEL_0)
+ enum _ITERATOR_DEBUG_LEVEL = 0;
+ else version (_ITERATOR_DEBUG_LEVEL_1)
+ enum _ITERATOR_DEBUG_LEVEL = 1;
+ else version (_ITERATOR_DEBUG_LEVEL_2)
+ enum _ITERATOR_DEBUG_LEVEL = 2;
+ else
+ {
+ // Match the C Runtime
+ static if (__CXXLIB__ == "libcmtd" || __CXXLIB__ == "msvcrtd")
+ enum _ITERATOR_DEBUG_LEVEL = 2;
+ else static if (__CXXLIB__ == "libcmt" || __CXXLIB__ == "msvcrt" ||
+ __CXXLIB__ == "msvcrt100" || __CXXLIB__ == "msvcrt110" || __CXXLIB__ == "msvcrt120")
+ enum _ITERATOR_DEBUG_LEVEL = 0;
+ else
+ {
+ static if (__CXXLIB__.length > 0)
+ pragma(msg, "Unrecognised C++ runtime library '" ~ __CXXLIB__ ~ "'");
+
+ // No runtime specified; as a best-guess, -release will produce code that matches the MSVC release CRT
+ version (_DEBUG)
+ enum _ITERATOR_DEBUG_LEVEL = 2;
+ else
+ enum _ITERATOR_DEBUG_LEVEL = 0;
+ }
+ }
+
+ // convenient alias for the C++ std library name
+ enum __CXXLIB__ = __traits(getTargetInfo, "cppRuntimeLibrary");
+
+extern(C++, "std"):
+package:
+ enum _LOCK_DEBUG = 3;
+
+ extern(C++, class) struct _Lockit
+ {
+ this(int) nothrow @nogc @safe;
+ ~this() nothrow @nogc @safe;
+
+ private:
+ int _Locktype;
+ }
+ void dummyDtor() { assert(false); }
+ pragma(linkerDirective, "/ALTERNATENAME:" ~ _Lockit.__dtor.mangleof ~ "=" ~ dummyDtor.mangleof);
+
+ struct _Container_base0
+ {
+ extern(D):
+ void _Orphan_all()() nothrow @nogc @safe {}
+ void _Swap_all()(ref _Container_base0) nothrow @nogc @safe {}
+ void _Swap_proxy_and_iterators()(ref _Container_base0) nothrow {}
+ }
+ struct _Iterator_base0
+ {
+ extern(D):
+ void _Adopt()(const(void)*) nothrow @nogc @safe {}
+ const(_Container_base0)* _Getcont()() const nothrow @nogc @safe { return null; }
+
+ enum bool _Unwrap_when_unverified = true;
+ }
+
+ struct _Container_proxy
+ {
+ const(_Container_base12)* _Mycont;
+ _Iterator_base12* _Myfirstiter;
+ }
+
+ struct _Container_base12
+ {
+ extern(D):
+ inout(_Iterator_base12*)*_Getpfirst()() inout nothrow @nogc @safe
+ {
+ return _Myproxy == null ? null : &_Myproxy._Myfirstiter;
+ }
+ void _Orphan_all()() nothrow @nogc @safe
+ {
+ static if (_ITERATOR_DEBUG_LEVEL == 2)
+ {
+ if (_Myproxy != null)
+ {
+ auto _Lock = _Lockit(_LOCK_DEBUG);
+ for (_Iterator_base12 **_Pnext = &_Myproxy._Myfirstiter; *_Pnext != null; *_Pnext = (*_Pnext)._Mynextiter)
+ (*_Pnext)._Myproxy = null;
+ _Myproxy._Myfirstiter = null;
+ }
+ }
+ }
+// void _Swap_all()(ref _Container_base12) nothrow @nogc;
+
+ void _Swap_proxy_and_iterators()(ref _Container_base12 _Right) nothrow
+ {
+ static if (_ITERATOR_DEBUG_LEVEL == 2)
+ auto _Lock = _Lockit(_LOCK_DEBUG);
+
+ _Container_proxy* _Temp = _Myproxy;
+ _Myproxy = _Right._Myproxy;
+ _Right._Myproxy = _Temp;
+
+ if (_Myproxy)
+ _Myproxy._Mycont = &this;
+
+ if (_Right._Myproxy)
+ _Right._Myproxy._Mycont = &_Right;
+ }
+
+ _Container_proxy* _Myproxy;
+ }
+
+ struct _Iterator_base12
+ {
+ extern(D):
+ void _Adopt()(_Container_base12 *_Parent) nothrow @nogc @safe
+ {
+ if (_Parent == null)
+ {
+ static if (_ITERATOR_DEBUG_LEVEL == 2)
+ {
+ auto _Lock = _Lockit(_LOCK_DEBUG);
+ _Orphan_me();
+ }
+ }
+ else
+ {
+ _Container_proxy *_Parent_proxy = _Parent._Myproxy;
+
+ static if (_ITERATOR_DEBUG_LEVEL == 2)
+ {
+ if (_Myproxy != _Parent_proxy)
+ {
+ auto _Lock = _Lockit(_LOCK_DEBUG);
+ _Orphan_me();
+ _Mynextiter = _Parent_proxy._Myfirstiter;
+ _Parent_proxy._Myfirstiter = &this;
+ _Myproxy = _Parent_proxy;
+ }
+ }
+ else
+ _Myproxy = _Parent_proxy;
+ }
+ }
+ void _Clrcont()() nothrow @nogc @safe
+ {
+ _Myproxy = null;
+ }
+ const(_Container_base12)* _Getcont()() const nothrow @nogc @safe
+ {
+ return _Myproxy == null ? null : _Myproxy._Mycont;
+ }
+ inout(_Iterator_base12*)*_Getpnext()() inout nothrow @nogc @safe
+ {
+ return &_Mynextiter;
+ }
+ void _Orphan_me()() nothrow @nogc @safe
+ {
+ static if (_ITERATOR_DEBUG_LEVEL == 2)
+ {
+ if (_Myproxy != null)
+ {
+ _Iterator_base12 **_Pnext = &_Myproxy._Myfirstiter;
+ while (*_Pnext != null && *_Pnext != &this)
+ _Pnext = &(*_Pnext)._Mynextiter;
+ assert(*_Pnext, "ITERATOR LIST CORRUPTED!");
+ *_Pnext = _Mynextiter;
+ _Myproxy = null;
+ }
+ }
+ }
+
+ enum bool _Unwrap_when_unverified = _ITERATOR_DEBUG_LEVEL == 0;
+
+ _Container_proxy *_Myproxy;
+ _Iterator_base12 *_Mynextiter;
+ }
+
+ static if (_ITERATOR_DEBUG_LEVEL == 0)
+ {
+ alias _Container_base = _Container_base0;
+ alias _Iterator_base = _Iterator_base0;
+ }
+ else
+ {
+ alias _Container_base = _Container_base12;
+ alias _Iterator_base = _Iterator_base12;
+ }
+
+ extern (C++, class) struct _Compressed_pair(_Ty1, _Ty2, bool Ty1Empty = is_empty!_Ty1.value)
+ {
+ pragma (inline, true):
+ extern(D):
+ pure nothrow @nogc:
+ enum _HasFirst = !Ty1Empty;
+
+ ref inout(_Ty1) first() inout @safe { return _Myval1; }
+ ref inout(_Ty2) second() inout @safe { return _Myval2; }
+
+ static if (!Ty1Empty)
+ _Ty1 _Myval1;
+ else
+ {
+ @property ref inout(_Ty1) _Myval1() inout @trusted { return *_GetBase(); }
+ private inout(_Ty1)* _GetBase() inout @trusted { return cast(inout(_Ty1)*)&this; }
+ }
+ _Ty2 _Myval2;
+ }
+
+ // these are all [[noreturn]]
+ void _Xbad_alloc() nothrow;
+ void _Xinvalid_argument(const(char)* message) nothrow;
+ void _Xlength_error(const(char)* message) nothrow;
+ void _Xout_of_range(const(char)* message) nothrow;
+ void _Xoverflow_error(const(char)* message) nothrow;
+ void _Xruntime_error(const(char)* message) nothrow;
+}
+else version (CppRuntime_Clang)
+{
+ import core.stdcpp.type_traits : is_empty;
+
+extern(C++, "std"):
+
+ extern (C++, class) struct __compressed_pair(_T1, _T2)
+ {
+ pragma (inline, true):
+ extern(D):
+ enum Ty1Empty = is_empty!_T1.value;
+ enum Ty2Empty = is_empty!_T2.value;
+
+ ref inout(_T1) first() inout nothrow @safe @nogc { return __value1_; }
+ ref inout(_T2) second() inout nothrow @safe @nogc { return __value2_; }
+
+ private:
+ private inout(_T1)* __get_base1() inout { return cast(inout(_T1)*)&this; }
+ private inout(_T2)* __get_base2() inout { return cast(inout(_T2)*)&__get_base1()[Ty1Empty ? 0 : 1]; }
+
+ static if (!Ty1Empty)
+ _T1 __value1_;
+ else
+ @property ref inout(_T1) __value1_() inout nothrow @trusted @nogc { return *__get_base1(); }
+ static if (!Ty2Empty)
+ _T2 __value2_;
+ else
+ @property ref inout(_T2) __value2_() inout nothrow @trusted @nogc { return *__get_base2(); }
+ }
+}
+version (CppRuntime_Gcc)
+{
+ import core.atomic;
+
+ alias _Atomic_word = int;
+
+ void __atomic_add_dispatch()(_Atomic_word* __mem, int __val) nothrow @nogc @safe
+ {
+ version (__GTHREADS)
+ {
+ // TODO: check __gthread_active_p()
+// if (__gthread_active_p())
+ __atomic_add(__mem, __val);
+// }
+// else
+// __atomic_add_single(__mem, __val);
+ }
+ else
+ __atomic_add_single(__mem, __val);
+ }
+
+ void __atomic_add()(_Atomic_word* __mem, int __val) nothrow @nogc @safe
+ {
+ atomicFetchAdd!(MemoryOrder.acq_rel)(*__mem, __val);
+ }
+
+ void __atomic_add_single()(_Atomic_word* __mem, int __val) nothrow @nogc @safe
+ {
+ *__mem += __val;
+ }
+
+ _Atomic_word __exchange_and_add_dispatch()(_Atomic_word* __mem, int __val) nothrow @nogc @safe
+ {
+ version (__GTHREADS)
+ {
+ // TODO: check __gthread_active_p()
+ return __exchange_and_add(__mem, __val);
+
+// if (__gthread_active_p())
+// return __exchange_and_add(__mem, __val);
+// else
+// return __exchange_and_add_single(__mem, __val);
+ }
+ else
+ return __exchange_and_add_single(__mem, __val);
+ }
+
+ _Atomic_word __exchange_and_add()(_Atomic_word* __mem, int __val) nothrow @nogc @safe
+ {
+ return atomicFetchAdd!(MemoryOrder.acq_rel)(*__mem, __val);
+ }
+
+ _Atomic_word __exchange_and_add_single()(_Atomic_word* __mem, int __val) nothrow @nogc @safe
+ {
+ _Atomic_word __result = *__mem;
+ *__mem += __val;
+ return __result;
+ }
+}
diff --git a/libphobos/libdruntime/core/sync/barrier.d b/libphobos/libdruntime/core/sync/barrier.d
index dd54d5c..1d04421 100644
--- a/libphobos/libdruntime/core/sync/barrier.d
+++ b/libphobos/libdruntime/core/sync/barrier.d
@@ -17,14 +17,8 @@ module core.sync.barrier;
public import core.sync.exception;
-private import core.sync.condition;
-private import core.sync.mutex;
-
-version (Posix)
-{
- private import core.stdc.errno;
- private import core.sys.posix.pthread;
-}
+import core.sync.condition;
+import core.sync.mutex;
////////////////////////////////////////////////////////////////////////////////
@@ -60,7 +54,7 @@ class Barrier
{
assert( limit > 0 );
}
- body
+ do
{
m_lock = new Mutex;
m_cond = new Condition( m_lock );
@@ -112,40 +106,35 @@ private:
// Unit Tests
////////////////////////////////////////////////////////////////////////////////
-
-version (unittest)
+unittest
{
- private import core.thread;
+ import core.thread;
+ int numThreads = 10;
+ auto barrier = new Barrier( numThreads );
+ auto synInfo = new Object;
+ int numReady = 0;
+ int numPassed = 0;
- unittest
+ void threadFn()
{
- int numThreads = 10;
- auto barrier = new Barrier( numThreads );
- auto synInfo = new Object;
- int numReady = 0;
- int numPassed = 0;
-
- void threadFn()
+ synchronized( synInfo )
{
- synchronized( synInfo )
- {
- ++numReady;
- }
- barrier.wait();
- synchronized( synInfo )
- {
- ++numPassed;
- }
+ ++numReady;
}
-
- auto group = new ThreadGroup;
-
- for ( int i = 0; i < numThreads; ++i )
+ barrier.wait();
+ synchronized( synInfo )
{
- group.create( &threadFn );
+ ++numPassed;
}
- group.joinAll();
- assert( numReady == numThreads && numPassed == numThreads );
}
+
+ auto group = new ThreadGroup;
+
+ for ( int i = 0; i < numThreads; ++i )
+ {
+ group.create( &threadFn );
+ }
+ group.joinAll();
+ assert( numReady == numThreads && numPassed == numThreads );
}
diff --git a/libphobos/libdruntime/core/sync/condition.d b/libphobos/libdruntime/core/sync/condition.d
index 8afa8f7..674d78d 100644
--- a/libphobos/libdruntime/core/sync/condition.d
+++ b/libphobos/libdruntime/core/sync/condition.d
@@ -22,20 +22,20 @@ public import core.time;
version (Windows)
{
- private import core.sync.semaphore;
- private import core.sys.windows.basetsd /+: HANDLE+/;
- private import core.sys.windows.winbase /+: CloseHandle, CreateSemaphoreA, CRITICAL_SECTION,
+ import core.sync.semaphore;
+ import core.sys.windows.basetsd /+: HANDLE+/;
+ import core.sys.windows.winbase /+: CloseHandle, CreateSemaphoreA, CRITICAL_SECTION,
DeleteCriticalSection, EnterCriticalSection, INFINITE, InitializeCriticalSection,
LeaveCriticalSection, ReleaseSemaphore, WAIT_OBJECT_0, WaitForSingleObject+/;
- private import core.sys.windows.windef /+: BOOL, DWORD+/;
- private import core.sys.windows.winerror /+: WAIT_TIMEOUT+/;
+ import core.sys.windows.windef /+: BOOL, DWORD+/;
+ import core.sys.windows.winerror /+: WAIT_TIMEOUT+/;
}
else version (Posix)
{
- private import core.sync.config;
- private import core.stdc.errno;
- private import core.sys.posix.pthread;
- private import core.sys.posix.time;
+ import core.sync.config;
+ import core.stdc.errno;
+ import core.sys.posix.pthread;
+ import core.sys.posix.time;
}
else
{
@@ -76,27 +76,71 @@ class Condition
*/
this( Mutex m ) nothrow @safe
{
+ this(m, true);
+ }
+
+ /// ditto
+ this( shared Mutex m ) shared nothrow @safe
+ {
+ this(m, true);
+ }
+
+ //
+ private this(this Q, M)( M m, bool _unused_ ) nothrow @trusted
+ if ((is(Q == Condition) && is(M == Mutex)) ||
+ (is(Q == shared Condition) && is(M == shared Mutex)))
+ {
version (Windows)
{
- m_blockLock = CreateSemaphoreA( null, 1, 1, null );
+ static if (is(Q == Condition))
+ {
+ alias HANDLE_TYPE = void*;
+ }
+ else
+ {
+ alias HANDLE_TYPE = shared(void*);
+ }
+ m_blockLock = cast(HANDLE_TYPE) CreateSemaphoreA( null, 1, 1, null );
if ( m_blockLock == m_blockLock.init )
throw new SyncError( "Unable to initialize condition" );
- scope(failure) CloseHandle( m_blockLock );
+ scope(failure) CloseHandle( cast(void*) m_blockLock );
- m_blockQueue = CreateSemaphoreA( null, 0, int.max, null );
+ m_blockQueue = cast(HANDLE_TYPE) CreateSemaphoreA( null, 0, int.max, null );
if ( m_blockQueue == m_blockQueue.init )
throw new SyncError( "Unable to initialize condition" );
- scope(failure) CloseHandle( m_blockQueue );
+ scope(failure) CloseHandle( cast(void*) m_blockQueue );
- InitializeCriticalSection( &m_unblockLock );
+ InitializeCriticalSection( cast(RTL_CRITICAL_SECTION*) &m_unblockLock );
m_assocMutex = m;
}
else version (Posix)
{
m_assocMutex = m;
- int rc = pthread_cond_init( &m_hndl, null );
- if ( rc )
- throw new SyncError( "Unable to initialize condition" );
+ static if ( is( typeof( pthread_condattr_setclock ) ) )
+ {
+ () @trusted
+ {
+ pthread_condattr_t attr = void;
+ int rc = pthread_condattr_init( &attr );
+ if ( rc )
+ throw new SyncError( "Unable to initialize condition" );
+ rc = pthread_condattr_setclock( &attr, CLOCK_MONOTONIC );
+ if ( rc )
+ throw new SyncError( "Unable to initialize condition" );
+ rc = pthread_cond_init( cast(pthread_cond_t*) &m_hndl, &attr );
+ if ( rc )
+ throw new SyncError( "Unable to initialize condition" );
+ rc = pthread_condattr_destroy( &attr );
+ if ( rc )
+ throw new SyncError( "Unable to initialize condition" );
+ } ();
+ }
+ else
+ {
+ int rc = pthread_cond_init( cast(pthread_cond_t*) &m_hndl, null );
+ if ( rc )
+ throw new SyncError( "Unable to initialize condition" );
+ }
}
}
@@ -135,12 +179,23 @@ class Condition
return m_assocMutex;
}
+ /// ditto
+ @property shared(Mutex) mutex() shared
+ {
+ return m_assocMutex;
+ }
+
// undocumented function for internal use
final @property Mutex mutex_nothrow() pure nothrow @safe @nogc
{
return m_assocMutex;
}
+ // ditto
+ final @property shared(Mutex) mutex_nothrow() shared pure nothrow @safe @nogc
+ {
+ return m_assocMutex;
+ }
////////////////////////////////////////////////////////////////////////////
// General Actions
@@ -155,19 +210,31 @@ class Condition
*/
void wait()
{
+ wait!(typeof(this))(true);
+ }
+
+ /// ditto
+ void wait() shared
+ {
+ wait!(typeof(this))(true);
+ }
+
+ /// ditto
+ void wait(this Q)( bool _unused_ )
+ if (is(Q == Condition) || is(Q == shared Condition))
+ {
version (Windows)
{
timedWait( INFINITE );
}
else version (Posix)
{
- int rc = pthread_cond_wait( &m_hndl, m_assocMutex.handleAddr() );
+ int rc = pthread_cond_wait( cast(pthread_cond_t*) &m_hndl, (cast(Mutex) m_assocMutex).handleAddr() );
if ( rc )
throw new SyncError( "Unable to wait for condition" );
}
}
-
/**
* Suspends the calling thread until a notification occurs or until the
* supplied time period has elapsed.
@@ -185,11 +252,24 @@ class Condition
* true if notified before the timeout and false if not.
*/
bool wait( Duration val )
+ {
+ return wait!(typeof(this))(val, true);
+ }
+
+ /// ditto
+ bool wait( Duration val ) shared
+ {
+ return wait!(typeof(this))(val, true);
+ }
+
+ /// ditto
+ bool wait(this Q)( Duration val, bool _unused_ )
+ if (is(Q == Condition) || is(Q == shared Condition))
in
{
assert( !val.isNegative );
}
- body
+ do
{
version (Windows)
{
@@ -209,8 +289,8 @@ class Condition
timespec t = void;
mktspec( t, val );
- int rc = pthread_cond_timedwait( &m_hndl,
- m_assocMutex.handleAddr(),
+ int rc = pthread_cond_timedwait( cast(pthread_cond_t*) &m_hndl,
+ (cast(Mutex) m_assocMutex).handleAddr(),
&t );
if ( !rc )
return true;
@@ -220,7 +300,6 @@ class Condition
}
}
-
/**
* Notifies one waiter.
*
@@ -229,19 +308,46 @@ class Condition
*/
void notify()
{
+ notify!(typeof(this))(true);
+ }
+
+ /// ditto
+ void notify() shared
+ {
+ notify!(typeof(this))(true);
+ }
+
+ /// ditto
+ void notify(this Q)( bool _unused_ )
+ if (is(Q == Condition) || is(Q == shared Condition))
+ {
version (Windows)
{
- notify( false );
+ notify_( false );
}
else version (Posix)
{
- int rc = pthread_cond_signal( &m_hndl );
+ // Since OS X 10.7 (Lion), pthread_cond_signal returns EAGAIN after retrying 8192 times,
+ // so need to retrying while it returns EAGAIN.
+ //
+ // 10.7.0 (Lion): http://www.opensource.apple.com/source/Libc/Libc-763.11/pthreads/pthread_cond.c
+ // 10.8.0 (Mountain Lion): http://www.opensource.apple.com/source/Libc/Libc-825.24/pthreads/pthread_cond.c
+ // 10.10.0 (Yosemite): http://www.opensource.apple.com/source/libpthread/libpthread-105.1.4/src/pthread_cond.c
+ // 10.11.0 (El Capitan): http://www.opensource.apple.com/source/libpthread/libpthread-137.1.1/src/pthread_cond.c
+ // 10.12.0 (Sierra): http://www.opensource.apple.com/source/libpthread/libpthread-218.1.3/src/pthread_cond.c
+ // 10.13.0 (High Sierra): http://www.opensource.apple.com/source/libpthread/libpthread-301.1.6/src/pthread_cond.c
+ // 10.14.0 (Mojave): http://www.opensource.apple.com/source/libpthread/libpthread-330.201.1/src/pthread_cond.c
+ // 10.14.1 (Mojave): http://www.opensource.apple.com/source/libpthread/libpthread-330.220.2/src/pthread_cond.c
+
+ int rc;
+ do {
+ rc = pthread_cond_signal( cast(pthread_cond_t*) &m_hndl );
+ } while ( rc == EAGAIN );
if ( rc )
throw new SyncError( "Unable to notify condition" );
}
}
-
/**
* Notifies all waiters.
*
@@ -250,40 +356,84 @@ class Condition
*/
void notifyAll()
{
+ notifyAll!(typeof(this))(true);
+ }
+
+ /// ditto
+ void notifyAll() shared
+ {
+ notifyAll!(typeof(this))(true);
+ }
+
+ /// ditto
+ void notifyAll(this Q)( bool _unused_ )
+ if (is(Q == Condition) || is(Q == shared Condition))
+ {
version (Windows)
{
- notify( true );
+ notify_( true );
}
else version (Posix)
{
- int rc = pthread_cond_broadcast( &m_hndl );
+ // Since OS X 10.7 (Lion), pthread_cond_broadcast returns EAGAIN after retrying 8192 times,
+ // so need to retrying while it returns EAGAIN.
+ //
+ // 10.7.0 (Lion): http://www.opensource.apple.com/source/Libc/Libc-763.11/pthreads/pthread_cond.c
+ // 10.8.0 (Mountain Lion): http://www.opensource.apple.com/source/Libc/Libc-825.24/pthreads/pthread_cond.c
+ // 10.10.0 (Yosemite): http://www.opensource.apple.com/source/libpthread/libpthread-105.1.4/src/pthread_cond.c
+ // 10.11.0 (El Capitan): http://www.opensource.apple.com/source/libpthread/libpthread-137.1.1/src/pthread_cond.c
+ // 10.12.0 (Sierra): http://www.opensource.apple.com/source/libpthread/libpthread-218.1.3/src/pthread_cond.c
+ // 10.13.0 (High Sierra): http://www.opensource.apple.com/source/libpthread/libpthread-301.1.6/src/pthread_cond.c
+ // 10.14.0 (Mojave): http://www.opensource.apple.com/source/libpthread/libpthread-330.201.1/src/pthread_cond.c
+ // 10.14.1 (Mojave): http://www.opensource.apple.com/source/libpthread/libpthread-330.220.2/src/pthread_cond.c
+
+ int rc;
+ do {
+ rc = pthread_cond_broadcast( cast(pthread_cond_t*) &m_hndl );
+ } while ( rc == EAGAIN );
if ( rc )
throw new SyncError( "Unable to notify condition" );
}
}
-
private:
version (Windows)
{
- bool timedWait( DWORD timeout )
+ bool timedWait(this Q)( DWORD timeout )
+ if (is(Q == Condition) || is(Q == shared Condition))
{
+ static if (is(Q == Condition))
+ {
+ auto op(string o, T, V1)(ref T val, V1 mod)
+ {
+ return mixin("val " ~ o ~ "mod");
+ }
+ }
+ else
+ {
+ auto op(string o, T, V1)(ref shared T val, V1 mod)
+ {
+ import core.atomic: atomicOp;
+ return atomicOp!o(val, mod);
+ }
+ }
+
int numSignalsLeft;
int numWaitersGone;
DWORD rc;
- rc = WaitForSingleObject( m_blockLock, INFINITE );
+ rc = WaitForSingleObject( cast(HANDLE) m_blockLock, INFINITE );
assert( rc == WAIT_OBJECT_0 );
- m_numWaitersBlocked++;
+ op!"+="(m_numWaitersBlocked, 1);
- rc = ReleaseSemaphore( m_blockLock, 1, null );
+ rc = ReleaseSemaphore( cast(HANDLE) m_blockLock, 1, null );
assert( rc );
m_assocMutex.unlock();
scope(failure) m_assocMutex.lock();
- rc = WaitForSingleObject( m_blockQueue, timeout );
+ rc = WaitForSingleObject( cast(HANDLE) m_blockQueue, timeout );
assert( rc == WAIT_OBJECT_0 || rc == WAIT_TIMEOUT );
bool timedOut = (rc == WAIT_TIMEOUT);
@@ -297,7 +447,7 @@ private:
// timeout (or canceled)
if ( m_numWaitersBlocked != 0 )
{
- m_numWaitersBlocked--;
+ op!"-="(m_numWaitersBlocked, 1);
// do not unblock next waiter below (already unblocked)
numSignalsLeft = 0;
}
@@ -307,12 +457,12 @@ private:
m_numWaitersGone = 1;
}
}
- if ( --m_numWaitersToUnblock == 0 )
+ if ( op!"-="(m_numWaitersToUnblock, 1) == 0 )
{
if ( m_numWaitersBlocked != 0 )
{
// open the gate
- rc = ReleaseSemaphore( m_blockLock, 1, null );
+ rc = ReleaseSemaphore( cast(HANDLE) m_blockLock, 1, null );
assert( rc );
// do not open the gate below again
numSignalsLeft = 0;
@@ -323,14 +473,14 @@ private:
}
}
}
- else if ( ++m_numWaitersGone == int.max / 2 )
+ else if ( op!"+="(m_numWaitersGone, 1) == int.max / 2 )
{
// timeout/canceled or spurious event :-)
- rc = WaitForSingleObject( m_blockLock, INFINITE );
+ rc = WaitForSingleObject( cast(HANDLE) m_blockLock, INFINITE );
assert( rc == WAIT_OBJECT_0 );
// something is going on here - test of timeouts?
- m_numWaitersBlocked -= m_numWaitersGone;
- rc = ReleaseSemaphore( m_blockLock, 1, null );
+ op!"-="(m_numWaitersBlocked, m_numWaitersGone);
+ rc = ReleaseSemaphore( cast(HANDLE) m_blockLock, 1, null );
assert( rc == WAIT_OBJECT_0 );
m_numWaitersGone = 0;
}
@@ -342,17 +492,17 @@ private:
// better now than spurious later (same as ResetEvent)
for ( ; numWaitersGone > 0; --numWaitersGone )
{
- rc = WaitForSingleObject( m_blockQueue, INFINITE );
+ rc = WaitForSingleObject( cast(HANDLE) m_blockQueue, INFINITE );
assert( rc == WAIT_OBJECT_0 );
}
// open the gate
- rc = ReleaseSemaphore( m_blockLock, 1, null );
+ rc = ReleaseSemaphore( cast(HANDLE) m_blockLock, 1, null );
assert( rc );
}
else if ( numSignalsLeft != 0 )
{
// unblock next waiter
- rc = ReleaseSemaphore( m_blockQueue, 1, null );
+ rc = ReleaseSemaphore( cast(HANDLE) m_blockQueue, 1, null );
assert( rc );
}
m_assocMutex.lock();
@@ -360,8 +510,25 @@ private:
}
- void notify( bool all )
+ void notify_(this Q)( bool all )
+ if (is(Q == Condition) || is(Q == shared Condition))
{
+ static if (is(Q == Condition))
+ {
+ auto op(string o, T, V1)(ref T val, V1 mod)
+ {
+ return mixin("val " ~ o ~ "mod");
+ }
+ }
+ else
+ {
+ auto op(string o, T, V1)(ref shared T val, V1 mod)
+ {
+ import core.atomic: atomicOp;
+ return atomicOp!o(val, mod);
+ }
+ }
+
DWORD rc;
EnterCriticalSection( &m_unblockLock );
@@ -376,23 +543,23 @@ private:
}
if ( all )
{
- m_numWaitersToUnblock += m_numWaitersBlocked;
+ op!"+="(m_numWaitersToUnblock, m_numWaitersBlocked);
m_numWaitersBlocked = 0;
}
else
{
- m_numWaitersToUnblock++;
- m_numWaitersBlocked--;
+ op!"+="(m_numWaitersToUnblock, 1);
+ op!"-="(m_numWaitersBlocked, 1);
}
LeaveCriticalSection( &m_unblockLock );
}
else if ( m_numWaitersBlocked > m_numWaitersGone )
{
- rc = WaitForSingleObject( m_blockLock, INFINITE );
+ rc = WaitForSingleObject( cast(HANDLE) m_blockLock, INFINITE );
assert( rc == WAIT_OBJECT_0 );
if ( 0 != m_numWaitersGone )
{
- m_numWaitersBlocked -= m_numWaitersGone;
+ op!"-="(m_numWaitersBlocked, m_numWaitersGone);
m_numWaitersGone = 0;
}
if ( all )
@@ -403,10 +570,10 @@ private:
else
{
m_numWaitersToUnblock = 1;
- m_numWaitersBlocked--;
+ op!"-="(m_numWaitersBlocked, 1);
}
LeaveCriticalSection( &m_unblockLock );
- rc = ReleaseSemaphore( m_blockQueue, 1, null );
+ rc = ReleaseSemaphore( cast(HANDLE) m_blockQueue, 1, null );
assert( rc );
}
else
@@ -439,12 +606,11 @@ private:
// Unit Tests
////////////////////////////////////////////////////////////////////////////////
-
-version (unittest)
+unittest
{
- private import core.thread;
- private import core.sync.mutex;
- private import core.sync.semaphore;
+ import core.thread;
+ import core.sync.mutex;
+ import core.sync.semaphore;
void testNotify()
@@ -601,11 +767,173 @@ version (unittest)
assert( !alertedTwo );
}
+ testNotify();
+ testNotifyAll();
+ testWaitTimeout();
+}
+
+unittest
+{
+ import core.thread;
+ import core.sync.mutex;
+ import core.sync.semaphore;
+
- unittest
+ void testNotify()
{
- testNotify();
- testNotifyAll();
- testWaitTimeout();
+ auto mutex = new shared Mutex;
+ auto condReady = new shared Condition( mutex );
+ auto semDone = new Semaphore;
+ auto synLoop = new Object;
+ int numWaiters = 10;
+ int numTries = 10;
+ int numReady = 0;
+ int numTotal = 0;
+ int numDone = 0;
+ int numPost = 0;
+
+ void waiter()
+ {
+ for ( int i = 0; i < numTries; ++i )
+ {
+ synchronized( mutex )
+ {
+ while ( numReady < 1 )
+ {
+ condReady.wait();
+ }
+ --numReady;
+ ++numTotal;
+ }
+
+ synchronized( synLoop )
+ {
+ ++numDone;
+ }
+ semDone.wait();
+ }
+ }
+
+ auto group = new ThreadGroup;
+
+ for ( int i = 0; i < numWaiters; ++i )
+ group.create( &waiter );
+
+ for ( int i = 0; i < numTries; ++i )
+ {
+ for ( int j = 0; j < numWaiters; ++j )
+ {
+ synchronized( mutex )
+ {
+ ++numReady;
+ condReady.notify();
+ }
+ }
+ while ( true )
+ {
+ synchronized( synLoop )
+ {
+ if ( numDone >= numWaiters )
+ break;
+ }
+ Thread.yield();
+ }
+ for ( int j = 0; j < numWaiters; ++j )
+ {
+ semDone.notify();
+ }
+ }
+
+ group.joinAll();
+ assert( numTotal == numWaiters * numTries );
+ }
+
+
+ void testNotifyAll()
+ {
+ auto mutex = new shared Mutex;
+ auto condReady = new shared Condition( mutex );
+ int numWaiters = 10;
+ int numReady = 0;
+ int numDone = 0;
+ bool alert = false;
+
+ void waiter()
+ {
+ synchronized( mutex )
+ {
+ ++numReady;
+ while ( !alert )
+ condReady.wait();
+ ++numDone;
+ }
+ }
+
+ auto group = new ThreadGroup;
+
+ for ( int i = 0; i < numWaiters; ++i )
+ group.create( &waiter );
+
+ while ( true )
+ {
+ synchronized( mutex )
+ {
+ if ( numReady >= numWaiters )
+ {
+ alert = true;
+ condReady.notifyAll();
+ break;
+ }
+ }
+ Thread.yield();
+ }
+ group.joinAll();
+ assert( numReady == numWaiters && numDone == numWaiters );
+ }
+
+
+ void testWaitTimeout()
+ {
+ auto mutex = new shared Mutex;
+ auto condReady = new shared Condition( mutex );
+ bool waiting = false;
+ bool alertedOne = true;
+ bool alertedTwo = true;
+
+ void waiter()
+ {
+ synchronized( mutex )
+ {
+ waiting = true;
+ // we never want to miss the notification (30s)
+ alertedOne = condReady.wait( dur!"seconds"(30) );
+ // but we don't want to wait long for the timeout (10ms)
+ alertedTwo = condReady.wait( dur!"msecs"(10) );
+ }
+ }
+
+ auto thread = new Thread( &waiter );
+ thread.start();
+
+ while ( true )
+ {
+ synchronized( mutex )
+ {
+ if ( waiting )
+ {
+ condReady.notify();
+ break;
+ }
+ }
+ Thread.yield();
+ }
+ thread.join();
+ assert( waiting );
+ assert( alertedOne );
+ assert( !alertedTwo );
}
+
+ testNotify();
+ testNotifyAll();
+ testWaitTimeout();
}
diff --git a/libphobos/libdruntime/core/sync/config.d b/libphobos/libdruntime/core/sync/config.d
index b2de225..39f7a8c 100644
--- a/libphobos/libdruntime/core/sync/config.d
+++ b/libphobos/libdruntime/core/sync/config.d
@@ -3,7 +3,7 @@
* specific to this package.
*
* Copyright: Copyright Sean Kelly 2005 - 2009.
- * License: $(WEB www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
+ * License: $(HTTP www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
* Authors: Sean Kelly
* Source: $(DRUNTIMESRC core/sync/_config.d)
*/
@@ -18,16 +18,17 @@ module core.sync.config;
version (Posix)
{
- private import core.sys.posix.time;
- private import core.sys.posix.sys.time;
- private import core.time;
+ import core.sys.posix.pthread;
+ import core.sys.posix.time;
+ import core.sys.posix.sys.time;
+ import core.time;
- void mktspec( ref timespec t ) nothrow
+ void mktspec( ref timespec t ) nothrow @nogc
{
- static if ( false && is( typeof( clock_gettime ) ) )
+ static if ( is (typeof ( pthread_condattr_setclock ) ) )
{
- clock_gettime( CLOCK_REALTIME, &t );
+ clock_gettime( CLOCK_MONOTONIC, &t );
}
else
{
@@ -41,14 +42,14 @@ version (Posix)
}
- void mktspec( ref timespec t, Duration delta ) nothrow
+ void mktspec( ref timespec t, Duration delta ) nothrow @nogc
{
mktspec( t );
mvtspec( t, delta );
}
- void mvtspec( ref timespec t, Duration delta ) nothrow
+ void mvtspec( ref timespec t, Duration delta ) nothrow @nogc
{
auto val = delta;
val += dur!"seconds"( t.tv_sec );
diff --git a/libphobos/libdruntime/core/sync/event.d b/libphobos/libdruntime/core/sync/event.d
new file mode 100644
index 0000000..3795106
--- /dev/null
+++ b/libphobos/libdruntime/core/sync/event.d
@@ -0,0 +1,345 @@
+/**
+ * The event module provides a primitive for lightweight signaling of other threads
+ * (emulating Windows events on Posix)
+ *
+ * Copyright: Copyright (c) 2019 D Language Foundation
+ * License: Distributed under the
+ * $(LINK2 http://www.boost.org/LICENSE_1_0.txt, Boost Software License 1.0).
+ * (See accompanying file LICENSE)
+ * Authors: Rainer Schuetze
+ * Source: $(DRUNTIMESRC core/sync/event.d)
+ */
+module core.sync.event;
+
+version (Windows)
+{
+ import core.sys.windows.basetsd /+: HANDLE +/;
+ import core.sys.windows.winerror /+: WAIT_TIMEOUT +/;
+ import core.sys.windows.winbase /+: CreateEvent, CloseHandle, SetEvent, ResetEvent,
+ WaitForSingleObject, INFINITE, WAIT_OBJECT_0+/;
+}
+else version (Posix)
+{
+ import core.sys.posix.pthread;
+ import core.sys.posix.sys.types;
+ import core.sys.posix.time;
+}
+else
+{
+ static assert(false, "Platform not supported");
+}
+
+import core.time;
+import core.internal.abort : abort;
+
+/**
+ * represents an event. Clients of an event are suspended while waiting
+ * for the event to be "signaled".
+ *
+ * Implemented using `pthread_mutex` and `pthread_condition` on Posix and
+ * `CreateEvent` and `SetEvent` on Windows.
+---
+import core.sync.event, core.thread, std.file;
+
+struct ProcessFile
+{
+ ThreadGroup group;
+ Event event;
+ void[] buffer;
+
+ void doProcess()
+ {
+ event.wait();
+ // process buffer
+ }
+
+ void process(string filename)
+ {
+ event.initialize(true, false);
+ group = new ThreadGroup;
+ for (int i = 0; i < 10; ++i)
+ group.create(&doProcess);
+
+ buffer = std.file.read(filename);
+ event.set();
+ group.joinAll();
+ event.terminate();
+ }
+}
+---
+ */
+struct Event
+{
+nothrow @nogc:
+ /**
+ * Creates an event object.
+ *
+ * Params:
+ * manualReset = the state of the event is not reset automatically after resuming waiting clients
+ * initialState = initial state of the signal
+ */
+ this(bool manualReset, bool initialState)
+ {
+ initialize(manualReset, initialState);
+ }
+
+ /**
+ * Initializes an event object. Does nothing if the event is already initialized.
+ *
+ * Params:
+ * manualReset = the state of the event is not reset automatically after resuming waiting clients
+ * initialState = initial state of the signal
+ */
+ void initialize(bool manualReset, bool initialState)
+ {
+ version (Windows)
+ {
+ if (m_event)
+ return;
+ m_event = CreateEvent(null, manualReset, initialState, null);
+ m_event || abort("Error: CreateEvent failed.");
+ }
+ else version (Posix)
+ {
+ if (m_initalized)
+ return;
+ pthread_mutex_init(cast(pthread_mutex_t*) &m_mutex, null) == 0 ||
+ abort("Error: pthread_mutex_init failed.");
+ static if ( is( typeof( pthread_condattr_setclock ) ) )
+ {
+ pthread_condattr_t attr = void;
+ pthread_condattr_init(&attr) == 0 ||
+ abort("Error: pthread_condattr_init failed.");
+ pthread_condattr_setclock(&attr, CLOCK_MONOTONIC) == 0 ||
+ abort("Error: pthread_condattr_setclock failed.");
+ pthread_cond_init(&m_cond, &attr) == 0 ||
+ abort("Error: pthread_cond_init failed.");
+ pthread_condattr_destroy(&attr) == 0 ||
+ abort("Error: pthread_condattr_destroy failed.");
+ }
+ else
+ {
+ pthread_cond_init(&m_cond, null) == 0 ||
+ abort("Error: pthread_cond_init failed.");
+ }
+ m_state = initialState;
+ m_manualReset = manualReset;
+ m_initalized = true;
+ }
+ }
+
+ // copying not allowed, can produce resource leaks
+ @disable this(this);
+ @disable void opAssign(Event);
+
+ ~this()
+ {
+ terminate();
+ }
+
+ /**
+ * deinitialize event. Does nothing if the event is not initialized. There must not be
+ * threads currently waiting for the event to be signaled.
+ */
+ void terminate()
+ {
+ version (Windows)
+ {
+ if (m_event)
+ CloseHandle(m_event);
+ m_event = null;
+ }
+ else version (Posix)
+ {
+ if (m_initalized)
+ {
+ pthread_mutex_destroy(&m_mutex) == 0 ||
+ abort("Error: pthread_mutex_destroy failed.");
+ pthread_cond_destroy(&m_cond) == 0 ||
+ abort("Error: pthread_cond_destroy failed.");
+ m_initalized = false;
+ }
+ }
+ }
+
+
+ /// Set the event to "signaled", so that waiting clients are resumed
+ void set()
+ {
+ version (Windows)
+ {
+ if (m_event)
+ SetEvent(m_event);
+ }
+ else version (Posix)
+ {
+ if (m_initalized)
+ {
+ pthread_mutex_lock(&m_mutex);
+ m_state = true;
+ pthread_cond_broadcast(&m_cond);
+ pthread_mutex_unlock(&m_mutex);
+ }
+ }
+ }
+
+ /// Reset the event manually
+ void reset()
+ {
+ version (Windows)
+ {
+ if (m_event)
+ ResetEvent(m_event);
+ }
+ else version (Posix)
+ {
+ if (m_initalized)
+ {
+ pthread_mutex_lock(&m_mutex);
+ m_state = false;
+ pthread_mutex_unlock(&m_mutex);
+ }
+ }
+ }
+
+ /**
+ * Wait for the event to be signaled without timeout.
+ *
+ * Returns:
+ * `true` if the event is in signaled state, `false` if the event is uninitialized or another error occured
+ */
+ bool wait()
+ {
+ version (Windows)
+ {
+ return m_event && WaitForSingleObject(m_event, INFINITE) == WAIT_OBJECT_0;
+ }
+ else version (Posix)
+ {
+ return wait(Duration.max);
+ }
+ }
+
+ /**
+ * Wait for the event to be signaled with timeout.
+ *
+ * Params:
+ * tmout = the maximum time to wait
+ * Returns:
+ * `true` if the event is in signaled state, `false` if the event was nonsignaled for the given time or
+ * the event is uninitialized or another error occured
+ */
+ bool wait(Duration tmout)
+ {
+ version (Windows)
+ {
+ if (!m_event)
+ return false;
+
+ auto maxWaitMillis = dur!("msecs")(uint.max - 1);
+
+ while (tmout > maxWaitMillis)
+ {
+ auto res = WaitForSingleObject(m_event, uint.max - 1);
+ if (res != WAIT_TIMEOUT)
+ return res == WAIT_OBJECT_0;
+ tmout -= maxWaitMillis;
+ }
+ auto ms = cast(uint)(tmout.total!"msecs");
+ return WaitForSingleObject(m_event, ms) == WAIT_OBJECT_0;
+ }
+ else version (Posix)
+ {
+ if (!m_initalized)
+ return false;
+
+ pthread_mutex_lock(&m_mutex);
+
+ int result = 0;
+ if (!m_state)
+ {
+ if (tmout == Duration.max)
+ {
+ result = pthread_cond_wait(&m_cond, &m_mutex);
+ }
+ else
+ {
+ import core.sync.config;
+
+ timespec t = void;
+ mktspec(t, tmout);
+
+ result = pthread_cond_timedwait(&m_cond, &m_mutex, &t);
+ }
+ }
+ if (result == 0 && !m_manualReset)
+ m_state = false;
+
+ pthread_mutex_unlock(&m_mutex);
+
+ return result == 0;
+ }
+ }
+
+private:
+ version (Windows)
+ {
+ HANDLE m_event;
+ }
+ else version (Posix)
+ {
+ pthread_mutex_t m_mutex;
+ pthread_cond_t m_cond;
+ bool m_initalized;
+ bool m_state;
+ bool m_manualReset;
+ }
+}
+
+// Test single-thread (non-shared) use.
+@nogc nothrow unittest
+{
+ // auto-reset, initial state false
+ Event ev1 = Event(false, false);
+ assert(!ev1.wait(1.dur!"msecs"));
+ ev1.set();
+ assert(ev1.wait());
+ assert(!ev1.wait(1.dur!"msecs"));
+
+ // manual-reset, initial state true
+ Event ev2 = Event(true, true);
+ assert(ev2.wait());
+ assert(ev2.wait());
+ ev2.reset();
+ assert(!ev2.wait(1.dur!"msecs"));
+}
+
+unittest
+{
+ import core.thread, core.atomic;
+
+ scope event = new Event(true, false);
+ int numThreads = 10;
+ shared int numRunning = 0;
+
+ void testFn()
+ {
+ event.wait(8.dur!"seconds"); // timeout below limit for druntime test_runner
+ numRunning.atomicOp!"+="(1);
+ }
+
+ auto group = new ThreadGroup;
+
+ for (int i = 0; i < numThreads; ++i)
+ group.create(&testFn);
+
+ auto start = MonoTime.currTime;
+ assert(numRunning == 0);
+
+ event.set();
+ group.joinAll();
+
+ assert(numRunning == numThreads);
+
+ assert(MonoTime.currTime - start < 5.dur!"seconds");
+}
diff --git a/libphobos/libdruntime/core/sync/mutex.d b/libphobos/libdruntime/core/sync/mutex.d
index 024009f..b153ab9 100644
--- a/libphobos/libdruntime/core/sync/mutex.d
+++ b/libphobos/libdruntime/core/sync/mutex.d
@@ -20,13 +20,13 @@ public import core.sync.exception;
version (Windows)
{
- private import core.sys.windows.winbase /+: CRITICAL_SECTION, DeleteCriticalSection,
+ import core.sys.windows.winbase /+: CRITICAL_SECTION, DeleteCriticalSection,
EnterCriticalSection, InitializeCriticalSection, LeaveCriticalSection,
TryEnterCriticalSection+/;
}
else version (Posix)
{
- private import core.sys.posix.pthread;
+ import core.sys.posix.pthread;
}
else
{
@@ -129,7 +129,7 @@ class Mutex :
assert(obj.__monitor is null,
"The provided object has a monitor already set!");
}
- body
+ do
{
this();
obj.__monitor = cast(void*) &m_proxy;
@@ -345,14 +345,10 @@ unittest
@system @nogc nothrow unittest
{
import core.stdc.stdlib : malloc, free;
+ import core.lifetime : emplace;
- void* p = malloc(__traits(classInstanceSize, Mutex));
-
- auto ti = typeid(Mutex);
- p[0 .. ti.initializer.length] = ti.initializer[];
-
- shared Mutex mtx = cast(shared(Mutex)) p;
- mtx.__ctor();
+ auto mtx = cast(shared Mutex) malloc(__traits(classInstanceSize, Mutex));
+ emplace(mtx);
mtx.lock_nothrow();
diff --git a/libphobos/libdruntime/core/sync/rwmutex.d b/libphobos/libdruntime/core/sync/rwmutex.d
index ba94a9e..89ef667 100644
--- a/libphobos/libdruntime/core/sync/rwmutex.d
+++ b/libphobos/libdruntime/core/sync/rwmutex.d
@@ -17,13 +17,13 @@ module core.sync.rwmutex;
public import core.sync.exception;
-private import core.sync.condition;
-private import core.sync.mutex;
-private import core.memory;
+import core.sync.condition;
+import core.sync.mutex;
+import core.memory;
version (Posix)
{
- private import core.sys.posix.pthread;
+ import core.sys.posix.pthread;
}
@@ -225,6 +225,51 @@ class ReadWriteMutex
}
}
+ /**
+ * Attempts to acquire a read lock on the enclosing mutex. If one can
+ * be obtained without blocking, the lock is acquired and true is
+ * returned. If not, the function blocks until either the lock can be
+ * obtained or the time elapsed exceeds $(D_PARAM timeout), returning
+ * true if the lock was acquired and false if the function timed out.
+ *
+ * Params:
+ * timeout = maximum amount of time to wait for the lock
+ * Returns:
+ * true if the lock was acquired and false if not.
+ */
+ bool tryLock(Duration timeout)
+ {
+ synchronized( m_commonMutex )
+ {
+ if (!shouldQueueReader)
+ {
+ ++m_numActiveReaders;
+ return true;
+ }
+
+ enum zero = Duration.zero();
+ if (timeout <= zero)
+ return false;
+
+ ++m_numQueuedReaders;
+ scope(exit) --m_numQueuedReaders;
+
+ enum maxWaitPerCall = dur!"hours"(24 * 365); // Avoid problems calling wait with huge Duration.
+ const initialTime = MonoTime.currTime;
+ m_readerQueue.wait(timeout < maxWaitPerCall ? timeout : maxWaitPerCall);
+ while (shouldQueueReader)
+ {
+ const timeElapsed = MonoTime.currTime - initialTime;
+ if (timeElapsed >= timeout)
+ return false;
+ auto nextWait = timeout - timeElapsed;
+ m_readerQueue.wait(nextWait < maxWaitPerCall ? nextWait : maxWaitPerCall);
+ }
+ ++m_numActiveReaders;
+ return true;
+ }
+ }
+
private:
@property bool shouldQueueReader()
@@ -341,6 +386,50 @@ class ReadWriteMutex
}
}
+ /**
+ * Attempts to acquire a write lock on the enclosing mutex. If one can
+ * be obtained without blocking, the lock is acquired and true is
+ * returned. If not, the function blocks until either the lock can be
+ * obtained or the time elapsed exceeds $(D_PARAM timeout), returning
+ * true if the lock was acquired and false if the function timed out.
+ *
+ * Params:
+ * timeout = maximum amount of time to wait for the lock
+ * Returns:
+ * true if the lock was acquired and false if not.
+ */
+ bool tryLock(Duration timeout)
+ {
+ synchronized( m_commonMutex )
+ {
+ if (!shouldQueueWriter)
+ {
+ ++m_numActiveWriters;
+ return true;
+ }
+
+ enum zero = Duration.zero();
+ if (timeout <= zero)
+ return false;
+
+ ++m_numQueuedWriters;
+ scope(exit) --m_numQueuedWriters;
+
+ enum maxWaitPerCall = dur!"hours"(24 * 365); // Avoid problems calling wait with huge Duration.
+ const initialTime = MonoTime.currTime;
+ m_writerQueue.wait(timeout < maxWaitPerCall ? timeout : maxWaitPerCall);
+ while (shouldQueueWriter)
+ {
+ const timeElapsed = MonoTime.currTime - initialTime;
+ if (timeElapsed >= timeout)
+ return false;
+ auto nextWait = timeout - timeElapsed;
+ m_writerQueue.wait(nextWait < maxWaitPerCall ? nextWait : maxWaitPerCall);
+ }
+ ++m_numActiveWriters;
+ return true;
+ }
+ }
private:
@property bool shouldQueueWriter()
@@ -526,3 +615,79 @@ unittest
runTest(ReadWriteMutex.Policy.PREFER_READERS);
runTest(ReadWriteMutex.Policy.PREFER_WRITERS);
}
+
+unittest
+{
+ import core.atomic, core.thread;
+ __gshared ReadWriteMutex rwmutex;
+ shared static bool threadTriedOnceToGetLock;
+ shared static bool threadFinallyGotLock;
+
+ rwmutex = new ReadWriteMutex();
+ atomicFence;
+ const maxTimeAllowedForTest = dur!"seconds"(20);
+ // Test ReadWriteMutex.Reader.tryLock(Duration).
+ {
+ static void testReaderTryLock()
+ {
+ assert(!rwmutex.reader.tryLock(Duration.min));
+ threadTriedOnceToGetLock.atomicStore(true);
+ assert(rwmutex.reader.tryLock(Duration.max));
+ threadFinallyGotLock.atomicStore(true);
+ rwmutex.reader.unlock;
+ }
+ assert(rwmutex.writer.tryLock(Duration.zero), "should have been able to obtain lock without blocking");
+ auto otherThread = new Thread(&testReaderTryLock).start;
+ const failIfThisTimeisReached = MonoTime.currTime + maxTimeAllowedForTest;
+ Thread.yield;
+ // We started otherThread with the writer lock held so otherThread's
+ // first rwlock.reader.tryLock with timeout Duration.min should fail.
+ while (!threadTriedOnceToGetLock.atomicLoad)
+ {
+ assert(MonoTime.currTime < failIfThisTimeisReached, "timed out");
+ Thread.yield;
+ }
+ rwmutex.writer.unlock;
+ // Soon after we release the writer lock otherThread's second
+ // rwlock.reader.tryLock with timeout Duration.max should succeed.
+ while (!threadFinallyGotLock.atomicLoad)
+ {
+ assert(MonoTime.currTime < failIfThisTimeisReached, "timed out");
+ Thread.yield;
+ }
+ otherThread.join;
+ }
+ threadTriedOnceToGetLock.atomicStore(false); // Reset.
+ threadFinallyGotLock.atomicStore(false); // Reset.
+ // Test ReadWriteMutex.Writer.tryLock(Duration).
+ {
+ static void testWriterTryLock()
+ {
+ assert(!rwmutex.writer.tryLock(Duration.min));
+ threadTriedOnceToGetLock.atomicStore(true);
+ assert(rwmutex.writer.tryLock(Duration.max));
+ threadFinallyGotLock.atomicStore(true);
+ rwmutex.writer.unlock;
+ }
+ assert(rwmutex.reader.tryLock(Duration.zero), "should have been able to obtain lock without blocking");
+ auto otherThread = new Thread(&testWriterTryLock).start;
+ const failIfThisTimeisReached = MonoTime.currTime + maxTimeAllowedForTest;
+ Thread.yield;
+ // We started otherThread with the reader lock held so otherThread's
+ // first rwlock.writer.tryLock with timeout Duration.min should fail.
+ while (!threadTriedOnceToGetLock.atomicLoad)
+ {
+ assert(MonoTime.currTime < failIfThisTimeisReached, "timed out");
+ Thread.yield;
+ }
+ rwmutex.reader.unlock;
+ // Soon after we release the reader lock otherThread's second
+ // rwlock.writer.tryLock with timeout Duration.max should succeed.
+ while (!threadFinallyGotLock.atomicLoad)
+ {
+ assert(MonoTime.currTime < failIfThisTimeisReached, "timed out");
+ Thread.yield;
+ }
+ otherThread.join;
+ }
+}
diff --git a/libphobos/libdruntime/core/sync/semaphore.d b/libphobos/libdruntime/core/sync/semaphore.d
index 56ac7dc..cf2bddb 100644
--- a/libphobos/libdruntime/core/sync/semaphore.d
+++ b/libphobos/libdruntime/core/sync/semaphore.d
@@ -29,25 +29,25 @@ else version (WatchOS)
version (Windows)
{
- private import core.sys.windows.basetsd /+: HANDLE+/;
- private import core.sys.windows.winbase /+: CloseHandle, CreateSemaphoreA, INFINITE,
+ import core.sys.windows.basetsd /+: HANDLE+/;
+ import core.sys.windows.winbase /+: CloseHandle, CreateSemaphoreA, INFINITE,
ReleaseSemaphore, WAIT_OBJECT_0, WaitForSingleObject+/;
- private import core.sys.windows.windef /+: BOOL, DWORD+/;
- private import core.sys.windows.winerror /+: WAIT_TIMEOUT+/;
+ import core.sys.windows.windef /+: BOOL, DWORD+/;
+ import core.sys.windows.winerror /+: WAIT_TIMEOUT+/;
}
else version (Darwin)
{
- private import core.sync.config;
- private import core.stdc.errno;
- private import core.sys.posix.time;
- private import core.sys.darwin.mach.semaphore;
+ import core.sync.config;
+ import core.stdc.errno;
+ import core.sys.posix.time;
+ import core.sys.darwin.mach.semaphore;
}
else version (Posix)
{
- private import core.sync.config;
- private import core.stdc.errno;
- private import core.sys.posix.pthread;
- private import core.sys.posix.semaphore;
+ import core.sync.config;
+ import core.stdc.errno;
+ import core.sys.posix.pthread;
+ import core.sys.posix.semaphore;
}
else
{
@@ -197,7 +197,7 @@ class Semaphore
{
assert( !period.isNegative );
}
- body
+ do
{
version (Windows)
{
@@ -253,8 +253,11 @@ class Semaphore
}
else version (Posix)
{
+ import core.sys.posix.time : clock_gettime, CLOCK_REALTIME;
+
timespec t = void;
- mktspec( t, period );
+ clock_gettime( CLOCK_REALTIME, &t );
+ mvtspec( t, period );
while ( true )
{
@@ -359,8 +362,7 @@ protected:
// Unit Tests
////////////////////////////////////////////////////////////////////////////////
-
-version (unittest)
+unittest
{
import core.thread, core.atomic;
@@ -447,10 +449,6 @@ version (unittest)
assert(alertedOne && !alertedTwo);
}
-
- unittest
- {
- testWait();
- testWaitTimeout();
- }
+ testWait();
+ testWaitTimeout();
}
diff --git a/libphobos/libdruntime/core/sys/darwin/dlfcn.d b/libphobos/libdruntime/core/sys/darwin/dlfcn.d
index a38d900..406d588 100644
--- a/libphobos/libdruntime/core/sys/darwin/dlfcn.d
+++ b/libphobos/libdruntime/core/sys/darwin/dlfcn.d
@@ -38,3 +38,8 @@ int dladdr(const scope void* addr, Dl_info* info);
enum RTLD_NOLOAD = 0x10;
enum RTLD_NODELETE = 0x80;
enum RTLD_FIRST = 0x100;
+
+enum RTLD_NEXT = cast(void*) -1;
+enum RTLD_DEFAULT = cast(void*) -2;
+enum RTLD_SELF = cast(void*) -3;
+enum RTLD_MAIN_ONLY = cast(void*) -5;
diff --git a/libphobos/libdruntime/core/sys/dragonflybsd/sys/elf32.d b/libphobos/libdruntime/core/sys/dragonflybsd/sys/elf32.d
index 2c35d0b..035bba5 100644
--- a/libphobos/libdruntime/core/sys/dragonflybsd/sys/elf32.d
+++ b/libphobos/libdruntime/core/sys/dragonflybsd/sys/elf32.d
@@ -112,7 +112,7 @@ extern (D) pure
{
auto ELF32_M_SYM(I)(I info) @safe { return info >> 8; }
auto ELF32_M_SIZE(I)(I info) { return cast(ubyte)info; }
- auto ELF32_M_INFO(S, SZ)(S sym, SZ size) { return (sym << 8) + cast(ubye)size; }
+ auto ELF32_M_INFO(S, SZ)(S sym, SZ size) { return (sym << 8) + cast(ubyte)size; }
}
struct Elf32_Cap
diff --git a/libphobos/libdruntime/core/sys/dragonflybsd/sys/elf64.d b/libphobos/libdruntime/core/sys/dragonflybsd/sys/elf64.d
index 94b7e42..f7d9247 100644
--- a/libphobos/libdruntime/core/sys/dragonflybsd/sys/elf64.d
+++ b/libphobos/libdruntime/core/sys/dragonflybsd/sys/elf64.d
@@ -118,7 +118,7 @@ extern (D) pure
{
auto ELF64_M_SYM(I)(I info) @safe { return info >> 8; }
auto ELF64_M_SIZE(I)(I info) { return cast(ubyte)info; }
- auto ELF64_M_INFO(S, SZ)(S sym, SZ size) @safe { return (sym << 8) + cast(ubye)size; }
+ auto ELF64_M_INFO(S, SZ)(S sym, SZ size) @safe { return (sym << 8) + cast(ubyte)size; }
}
struct Elf64_Cap
diff --git a/libphobos/libdruntime/core/sys/freebsd/sys/elf32.d b/libphobos/libdruntime/core/sys/freebsd/sys/elf32.d
index 6145522..63cc4f9 100644
--- a/libphobos/libdruntime/core/sys/freebsd/sys/elf32.d
+++ b/libphobos/libdruntime/core/sys/freebsd/sys/elf32.d
@@ -112,7 +112,7 @@ extern (D)
{
auto ELF32_M_SYM(I)(I info) { return info >> 8; }
auto ELF32_M_SIZE(I)(I info) { return cast(ubyte)info; }
- auto ELF32_M_INFO(S, SZ)(S sym, SZ size) { return (sym << 8) + cast(ubye)size; }
+ auto ELF32_M_INFO(S, SZ)(S sym, SZ size) { return (sym << 8) + cast(ubyte)size; }
}
struct Elf32_Cap
diff --git a/libphobos/libdruntime/core/sys/freebsd/sys/elf64.d b/libphobos/libdruntime/core/sys/freebsd/sys/elf64.d
index f208b01..8c63e04 100644
--- a/libphobos/libdruntime/core/sys/freebsd/sys/elf64.d
+++ b/libphobos/libdruntime/core/sys/freebsd/sys/elf64.d
@@ -127,7 +127,7 @@ extern (D)
{
auto ELF64_M_SYM(I)(I info) { return info >> 8; }
auto ELF64_M_SIZE(I)(I info) { return cast(ubyte)info; }
- auto ELF64_M_INFO(S, SZ)(S sym, SZ size) { return (sym << 8) + cast(ubye)size; }
+ auto ELF64_M_INFO(S, SZ)(S sym, SZ size) { return (sym << 8) + cast(ubyte)size; }
}
struct Elf64_Cap
diff --git a/libphobos/libdruntime/core/sys/linux/fs.d b/libphobos/libdruntime/core/sys/linux/fs.d
new file mode 100644
index 0000000..5faa756
--- /dev/null
+++ b/libphobos/libdruntime/core/sys/linux/fs.d
@@ -0,0 +1,265 @@
+/**
+ * D header file for the linux/fs.h interface.
+ *
+ * This file has definitions for some important file table structures
+ * and constants and structures used by various generic file system
+ * ioctl's.
+ *
+ * Copyright: The D Language Foundation 2021.
+ * License : $(LINK2 http://www.boost.org/LICENSE_1_0.txt, Boost License 1.0)
+ * Authors : Luís Ferreira
+ */
+module core.sys.linux.fs;
+
+version (linux):
+
+public import core.sys.posix.sys.ioctl;
+
+import core.stdc.config : c_ulong, c_long;
+
+extern (C):
+@system:
+@nogc:
+nothrow:
+
+enum INR_OPEN_CUR = 1024; /// Initial setting for nfile rlimits
+enum INR_OPEN_MAX = 4096; /// Hard limit for nfile rlimits
+
+enum BLOCK_SIZE_BITS = 10; ///
+enum BLOCK_SIZE = 1 << BLOCK_SIZE_BITS; ///
+
+enum
+{
+ SEEK_SET = 0, /// seek relative to beginning of file
+ SEEK_CUR = 1, /// seek relative to current file position
+ SEEK_END = 2, /// seek relative to end of file
+ SEEK_DATA = 3, /// seek to the next data
+ SEEK_HOLE = 4, /// seek to the next hole
+ SEEK_MAX = SEEK_HOLE, ///
+}
+
+enum
+{
+ RENAME_NOREPLACE = 1 << 0, /// Don't overwrite target
+ RENAME_EXCHANGE = 1 << 1, /// Exchange source and dest
+ RENAME_WHITEOUT = 1 << 2, /// Whiteout source
+}
+
+struct file_clone_range
+{
+ long src_fd;
+ ulong src_offset;
+ ulong src_length;
+ ulong dest_offset;
+}
+
+struct fstrim_range
+{
+ ulong start;
+ ulong len;
+ ulong minlen;
+}
+
+/**
+ * extent-same (dedupe) ioctls; these MUST match the btrfs ioctl definitions
+ */
+enum
+{
+ FILE_DEDUPE_RANGE_SAME = 0,
+ FILE_DEDUPE_RANGE_DIFFERS = 1,
+}
+
+/**
+ * from struct btrfs_ioctl_file_extent_same_info
+ */
+struct file_dedupe_range_info
+{
+ long dest_fd; /// in - destination file
+ ulong dest_offset; /// in - start of extent in destination
+ ulong bytes_deduped; /// out - total # of bytes we were able to dedupe from this file.
+ /** status of this dedupe operation:
+ * < 0 for error
+ * == FILE_DEDUPE_RANGE_SAME if dedupe succeeds
+ * == FILE_DEDUPE_RANGE_DIFFERS if data differs
+ */
+ int status;
+ uint reserved; /// must be zero
+}
+
+/**
+ * from struct btrfs_ioctl_file_extent_same_args
+ */
+struct file_dedupe_range
+{
+ ulong src_offset; /// in - start of extent in source
+ ulong src_length; /// in - length of extent
+ ushort dest_count; /// in - total elements in info array
+ ushort reserved1; /// must be zero
+ uint reserved2; /// must be zero
+ file_dedupe_range_info[0] info;
+}
+
+/**
+ * And dynamically-tunable limits and defaults:
+ */
+struct files_stat_struct
+{
+ c_ulong nr_files; /// read only
+ c_ulong nr_free_files; /// read only
+ c_ulong max_files; /// tunable
+}
+
+struct inodes_stat_t
+{
+ c_long nr_inodes;
+ c_long nr_unused;
+ c_long[5] dummy; /// padding for sysctl ABI compatibility
+}
+
+enum NR_FILE = 8192;
+
+/**
+ * Structure for FS_IOC_FSGETXATTR[A] and FS_IOC_FSSETXATTR.
+ */
+struct fsxattr
+{
+ uint fsx_xflags;
+ uint fsx_extsize;
+ uint fsx_nextents;
+ uint fsx_projid; /// project identifier
+ uint fsx_cowextsize; /// CoW extsize
+ ubyte[8] fsx_pad;
+}
+
+/*
+ * Flags for the fsx_xflags field
+ */
+enum {
+ S_XFLAG_REALTIME = 0x00000001, /// data in realtime volume
+ S_XFLAG_PREALLOC = 0x00000002, /// preallocated file extents
+ S_XFLAG_IMMUTABLE = 0x00000008, /// file cannot be modified
+ S_XFLAG_APPEND = 0x00000010, /// all writes append
+ S_XFLAG_SYNC = 0x00000020, /// all writes synchronous
+ S_XFLAG_NOATIME = 0x00000040, /// do not update access time
+ S_XFLAG_NODUMP = 0x00000080, /// do not include in backups
+ S_XFLAG_RTINHERIT = 0x00000100, /// create with rt bit set
+ S_XFLAG_PROJINHERIT = 0x00000200, /// create with parents projid
+ S_XFLAG_NOSYMLINKS = 0x00000400, /// disallow symlink creation
+ S_XFLAG_EXTSIZE = 0x00000800, /// extent size allocator hint
+ S_XFLAG_EXTSZINHERIT = 0x00001000, /// inherit inode extent size
+ S_XFLAG_NODEFRAG = 0x00002000, /// do not defragment
+ S_XFLAG_FILESTREAM = 0x00004000, /// use filestream allocator
+ S_XFLAG_DAX = 0x00008000, /// use DAX for IO
+ S_XFLAG_COWEXTSIZE = 0x00010000, /// CoW extent size allocator hint
+ S_XFLAG_HASATTR = 0x80000000, /// no DIFLAG for this
+}
+
+enum BLKROSET = _IO(0x12, 93); /// set device read-only
+enum BLKROGET = _IO(0x12, 94); /// get read-only status
+enum BLKRRPART = _IO(0x12, 95); /// re-read partition table
+enum BLKGETSIZE = _IO(0x12, 96); /// return device size
+enum BLKFLSBUF = _IO(0x12, 97); /// flush buffer cache
+enum BLKRASET = _IO(0x12, 98); /// set read ahead for block device
+enum BLKRAGET = _IO(0x12, 99); /// get current read ahead setting
+enum BLKFRASET = _IO(0x12, 100); /// set filesystem
+enum BLKFRAGET = _IO(0x12, 101); /// get filesystem
+enum BLKSECTSET = _IO(0x12, 102); /// set max sectors per request
+enum BLKSECTGET = _IO(0x12, 103); /// get max sectors per request
+enum BLKSSZGET = _IO(0x12, 104); /// get block device sector size
+
+
+enum BLKBSZGET = _IOR!size_t(0x12, 112);
+enum BLKBSZSET = _IOW!size_t(0x12, 113);
+enum BLKGETSIZE64 = _IOR!size_t(0x12, 114);
+enum BLKTRACESTART = _IO(0x12, 116);
+enum BLKTRACESTOP = _IO(0x12, 117);
+enum BLKTRACETEARDOWN = _IO(0x12, 118);
+enum BLKDISCARD = _IO(0x12, 119);
+enum BLKIOMIN = _IO(0x12, 120);
+enum BLKIOOPT = _IO(0x12, 121);
+enum BLKALIGNOFF = _IO(0x12, 122);
+enum BLKPBSZGET = _IO(0x12, 123);
+enum BLKDISCARDZEROES = _IO(0x12, 124);
+enum BLKSECDISCARD = _IO(0x12, 125);
+enum BLKROTATIONAL = _IO(0x12, 126);
+enum BLKZEROOUT = _IO(0x12, 127);
+
+enum BMAP_IOCTL = 1; /// obsolete - kept for compatibility
+enum FIBMAP = _IO(0x00, 1); /// bmap access
+enum FIGETBSZ = _IO(0x00, 2); /// get the block size used for bmap
+
+enum FSLABEL_MAX = 256; /// Max chars for the interface; each fs may differ
+
+/**
+ * Inode flags (FS_IOC_GETFLAGS / FS_IOC_SETFLAGS)
+ *
+ * Note: for historical reasons, these flags were originally used and
+ * defined for use by ext2/ext3, and then other file systems started
+ * using these flags so they wouldn't need to write their own version
+ * of chattr/lsattr (which was shipped as part of e2fsprogs). You
+ * should think twice before trying to use these flags in new
+ * contexts, or trying to assign these flags, since they are used both
+ * as the UAPI and the on-disk encoding for ext2/3/4. Also, we are
+ * almost out of 32-bit flags. :-)
+ *
+ * We have recently hoisted FS_IOC_FSGETXATTR / FS_IOC_FSSETXATTR from
+ * XFS to the generic FS level interface. This uses a structure that
+ * has padding and hence has more room to grow, so it may be more
+ * appropriate for many new use cases.
+ */
+enum {
+ FS_SECRM_FL = 0x00000001, /// Secure deletion
+ FS_UNRM_FL = 0x00000002, /// Undelete
+ FS_COMPR_FL = 0x00000004, /// Compress file
+ FS_SYNC_FL = 0x00000008, /// Synchronous updates
+ FS_IMMUTABLE_FL = 0x00000010, /// Immutable file
+ FS_APPEND_FL = 0x00000020, /// writes to file may only append
+ FS_NODUMP_FL = 0x00000040, /// do not dump file
+ FS_NOATIME_FL = 0x00000080, /// do not update atime
+ FS_DIRTY_FL = 0x00000100, /// Reserved for compression usage
+ FS_COMPRBLK_FL = 0x00000200, /// One or more compressed clusters
+ FS_NOCOMP_FL = 0x00000400, /// Don't compress
+ FS_ENCRYPT_FL = 0x00000800, /// Encrypted file
+ FS_BTREE_FL = 0x00001000, /// btree format dir
+ FS_INDEX_FL = 0x00001000, /// hash-indexed directory
+ FS_IMAGIC_FL = 0x00002000, /// AFS directory
+ FS_JOURNAL_DATA_FL = 0x00004000, /// Reserved for ext3
+ FS_NOTAIL_FL = 0x00008000, /// file tail should not be merged
+ FS_DIRSYNC_FL = 0x00010000, /// dirsync behaviour (directories only)
+ FS_TOPDIR_FL = 0x00020000, /// Top of directory hierarchie
+ FS_HUGE_FILE_FL = 0x00040000, /// Reserved for ext4
+ FS_EXTENT_FL = 0x00080000, /// Extents
+ FS_VERITY_FL = 0x00100000, /// Verity protected inode
+ FS_EA_INODE_FL = 0x00200000, /// Inode used for large EA
+ FS_EOFBLOCKS_FL = 0x00400000, /// Reserved for ext4
+ FS_NOCOW_FL = 0x00800000, /// Do not cow file
+ FS_DAX_FL = 0x02000000, /// Inode is DAX
+ FS_INLINE_DATA_FL = 0x10000000, /// Reserved for ext4
+ FS_PROJINHERIT_FL = 0x20000000, /// Create with parents projid
+ FS_CASEFOLD_FL = 0x40000000, /// Folder is case insensitive
+ FS_RESERVED_FL = 0x80000000, /// reserved for ext2 lib
+}
+
+enum FS_FL_USER_VISIBLE = 0x0003DFFF; /// User visible flags
+enum FS_FL_USER_MODIFIABLE = 0x000380FF; /// User modifiable flags
+
+enum SYNC_FILE_RANGE_WAIT_BEFORE = 1;
+enum SYNC_FILE_RANGE_WRITE = 2;
+enum SYNC_FILE_RANGE_WAIT_AFTER = 4;
+enum SYNC_FILE_RANGE_WRITE_AND_WAIT = SYNC_FILE_RANGE_WRITE | SYNC_FILE_RANGE_WAIT_BEFORE | SYNC_FILE_RANGE_WAIT_AFTER;
+
+alias __kernel_rwf_t = int;
+
+/**
+ * Flags for preadv2/pwritev2:
+ */
+enum : __kernel_rwf_t {
+ RWF_HIPRI = 0x00000001, /// high priority request, poll if possible
+ RWF_DSYNC = 0x00000002, /// per-IO O_DSYNC
+ RWF_SYNC = 0x00000004, /// per-IO O_SYNC
+ RWF_NOWAIT = 0x00000008, /// per-IO, return -EAGAIN if operation would block
+ RWF_APPEND = 0x00000010, /// per-IO O_APPEND
+}
+
+/// mask of flags supported by the kernel
+enum RWF_SUPPORTED = RWF_HIPRI | RWF_DSYNC | RWF_SYNC | RWF_NOWAIT | RWF_APPEND;
diff --git a/libphobos/libdruntime/core/sys/linux/io_uring.d b/libphobos/libdruntime/core/sys/linux/io_uring.d
new file mode 100644
index 0000000..5e1a20c
--- /dev/null
+++ b/libphobos/libdruntime/core/sys/linux/io_uring.d
@@ -0,0 +1,414 @@
+/**
+ * D header file for the io_uring interface.
+ * Available since Linux 5.1
+ *
+ * Copyright: Copyright Jens Axboe 2019,
+ * Copyright Christoph Hellwig 2019.
+ * License : $(LINK2 http://www.boost.org/LICENSE_1_0.txt, Boost License 1.0)
+ * Authors : Luís Ferreira
+ */
+module core.sys.linux.io_uring;
+
+version (linux):
+
+import core.sys.linux.fs : __kernel_rwf_t;
+
+extern (C):
+@system:
+@nogc:
+nothrow:
+@system:
+
+/**
+ * IO submission data structure (Submission Queue Entry)
+ */
+struct io_uring_sqe
+{
+ /// type of operation for this sqe
+ ubyte opcode;
+ /// IOSQE_* flags
+ ubyte flags;
+ /// ioprio for the request
+ ushort ioprio;
+ /// file descriptor to do IO on
+ int fd;
+ union
+ {
+ /// offset into file
+ ulong off;
+ ulong addr2;
+ }
+
+ union
+ {
+ /// pointer to buffer or iovecs
+ ulong addr;
+ ulong splice_off_in;
+ }
+
+ /// buffer size or number of iovecs
+ uint len;
+ union
+ {
+ __kernel_rwf_t rw_flags;
+ uint fsync_flags;
+
+ /// compatibility
+ ushort poll_events;
+ /// word-reversed for BE
+ uint poll32_events;
+
+ uint sync_range_flags;
+ uint msg_flags;
+ uint timeout_flags;
+ uint accept_flags;
+ uint cancel_flags;
+ uint open_flags;
+ uint statx_flags;
+ uint fadvise_advice;
+ uint splice_flags;
+ uint rename_flags;
+ uint unlink_flags;
+ }
+
+ /// data to be passed back at completion time
+ ulong user_data;
+ union
+ {
+ struct
+ {
+ /**
+ * pack this to avoid bogus arm OABI complaints
+ */
+ union
+ {
+ align (1):
+
+ /// index into fixed buffers, if used
+ ushort buf_index;
+ /// for grouped buffer selection
+ ushort buf_group;
+ }
+
+ /// personality to use, if used
+ ushort personality;
+ int splice_fd_in;
+ }
+
+ ulong[3] __pad2;
+ }
+}
+
+enum
+{
+ IOSQE_FIXED_FILE_BIT = 0,
+ IOSQE_IO_DRAIN_BIT = 1,
+ IOSQE_IO_LINK_BIT = 2,
+ IOSQE_IO_HARDLINK_BIT = 3,
+ IOSQE_ASYNC_BIT = 4,
+ IOSQE_BUFFER_SELECT_BIT = 5
+}
+
+enum
+{
+ /// use fixed fileset
+ IOSQE_FIXED_FILE = 1U << IOSQE_FIXED_FILE_BIT,
+ /// issue after inflight IO
+ IOSQE_IO_DRAIN = 1U << IOSQE_IO_DRAIN_BIT,
+ /// links next sqe
+ IOSQE_IO_LINK = 1U << IOSQE_IO_LINK_BIT,
+ /// like LINK, but stronger
+ IOSQE_IO_HARDLINK = 1U << IOSQE_IO_HARDLINK_BIT,
+ /// always go async
+ IOSQE_ASYNC = 1U << IOSQE_ASYNC_BIT,
+ /// select buffer from sqe.buf_group
+ IOSQE_BUFFER_SELECT = 1U << IOSQE_BUFFER_SELECT_BIT,
+}
+
+/**
+ * io_uring_setup() flags
+ */
+enum
+{
+ /// io_context is polled
+ IORING_SETUP_IOPOLL = 1U << 0,
+ /// SQ poll thread
+ IORING_SETUP_SQPOLL = 1U << 1,
+ /// sq_thread_cpu is valid
+ IORING_SETUP_SQ_AFF = 1U << 2,
+ /// app defines CQ size
+ IORING_SETUP_CQSIZE = 1U << 3,
+ /// clamp SQ/CQ ring sizes
+ IORING_SETUP_CLAMP = 1U << 4,
+ /// attach to existing wq
+ IORING_SETUP_ATTACH_WQ = 1U << 5,
+ /// start with ring disabled
+ IORING_SETUP_R_DISABLED = 1U << 6,
+}
+
+enum
+{
+ IORING_OP_NOP = 0,
+ IORING_OP_READV = 1,
+ IORING_OP_WRITEV = 2,
+ IORING_OP_FSYNC = 3,
+ IORING_OP_READ_FIXED = 4,
+ IORING_OP_WRITE_FIXED = 5,
+ IORING_OP_POLL_ADD = 6,
+ IORING_OP_POLL_REMOVE = 7,
+ IORING_OP_SYNC_FILE_RANGE = 8,
+ IORING_OP_SENDMSG = 9,
+ IORING_OP_RECVMSG = 10,
+ IORING_OP_TIMEOUT = 11,
+ IORING_OP_TIMEOUT_REMOVE = 12,
+ IORING_OP_ACCEPT = 13,
+ IORING_OP_ASYNC_CANCEL = 14,
+ IORING_OP_LINK_TIMEOUT = 15,
+ IORING_OP_CONNECT = 16,
+ IORING_OP_FALLOCATE = 17,
+ IORING_OP_OPENAT = 18,
+ IORING_OP_CLOSE = 19,
+ IORING_OP_FILES_UPDATE = 20,
+ IORING_OP_STATX = 21,
+ IORING_OP_READ = 22,
+ IORING_OP_WRITE = 23,
+ IORING_OP_FADVISE = 24,
+ IORING_OP_MADVISE = 25,
+ IORING_OP_SEND = 26,
+ IORING_OP_RECV = 27,
+ IORING_OP_OPENAT2 = 28,
+ IORING_OP_EPOLL_CTL = 29,
+ IORING_OP_SPLICE = 30,
+ IORING_OP_PROVIDE_BUFFERS = 31,
+ IORING_OP_REMOVE_BUFFERS = 32,
+ IORING_OP_TEE = 33,
+ IORING_OP_SHUTDOWN = 34,
+ IORING_OP_RENAMEAT = 35,
+ IORING_OP_UNLINKAT = 36,
+
+ IORING_OP_LAST = 37
+}
+
+enum
+{
+ IORING_FSYNC_DATASYNC = 1U << 0,
+}
+
+enum
+{
+ IORING_TIMEOUT_ABS = 1U << 0,
+ IORING_TIMEOUT_UPDATE = 1U << 1,
+}
+
+enum SPLICE_F_FD_IN_FIXED = 1U << 31;
+
+/**
+ * IO completion data structure (Completion Queue Entry)
+ */
+struct io_uring_cqe
+{
+ /// submission passed back
+ ulong user_data;
+ /// result code for this event
+ int res;
+
+ uint flags;
+}
+
+/**
+ * If set, the upper 16 bits are the buffer ID
+ */
+enum IORING_CQE_F_BUFFER = 1U << 0;
+
+enum
+{
+ IORING_CQE_BUFFER_SHIFT = 16,
+}
+
+/**
+ * Magic offsets for the application to mmap the data it needs
+ */
+enum
+{
+ IORING_OFF_SQ_RING = 0UL,
+ IORING_OFF_CQ_RING = 0x8000000UL,
+ IORING_OFF_SQES = 0x10000000UL,
+}
+
+/**
+ * Filled with the offset for mmap(2)
+ */
+struct io_sqring_offsets
+{
+ uint head;
+ uint tail;
+ uint ring_mask;
+ uint ring_entries;
+ uint flags;
+ uint dropped;
+ uint array;
+ uint resv1;
+ ulong resv2;
+}
+
+enum
+{
+ /// needs io_uring_enter wakeup
+ IORING_SQ_NEED_WAKEUP = 1U << 0,
+ /// CQ ring is overflown
+ IORING_SQ_CQ_OVERFLOW = 1U << 1,
+}
+
+struct io_cqring_offsets
+{
+ uint head;
+ uint tail;
+ uint ring_mask;
+ uint ring_entries;
+ uint overflow;
+ uint cqes;
+ uint flags;
+ uint resv1;
+ ulong resv2;
+}
+
+enum
+{
+ /// disable eventfd notifications
+ IORING_CQ_EVENTFD_DISABLED = 1U << 0,
+}
+
+/**
+ * io_uring_enter(2) flags
+ */
+enum
+{
+ IORING_ENTER_GETEVENTS = 1U << 0,
+ IORING_ENTER_SQ_WAKEUP = 1U << 1,
+ IORING_ENTER_SQ_WAIT = 1U << 2,
+ IORING_ENTER_EXT_ARG = 1U << 3,
+}
+
+/**
+ * Passed in for io_uring_setup(2)
+ */
+struct io_uring_params
+{
+ uint sq_entries;
+ uint cq_entries;
+ uint flags;
+ uint sq_thread_cpu;
+ uint sq_thread_idle;
+ uint features;
+ uint wq_fd;
+ uint[3] resv;
+ io_sqring_offsets sq_off;
+ io_cqring_offsets cq_off;
+}
+
+enum
+{
+ IORING_FEAT_SINGLE_MMAP = 1U << 0,
+ IORING_FEAT_NODROP = 1U << 1,
+ IORING_FEAT_SUBMIT_STABLE = 1U << 2,
+ IORING_FEAT_RW_CUR_POS = 1U << 3,
+ IORING_FEAT_CUR_PERSONALITY = 1U << 4,
+ IORING_FEAT_FAST_POLL = 1U << 5,
+ IORING_FEAT_POLL_32BITS = 1U << 6,
+ IORING_FEAT_SQPOLL_NONFIXED = 1U << 7,
+ IORING_FEAT_EXT_ARG = 1U << 8,
+}
+
+/**
+ * io_uring_register(2) opcodes and arguments
+ */
+enum
+{
+ IORING_REGISTER_BUFFERS = 0,
+ IORING_UNREGISTER_BUFFERS = 1,
+ IORING_REGISTER_FILES = 2,
+ IORING_UNREGISTER_FILES = 3,
+ IORING_REGISTER_EVENTFD = 4,
+ IORING_UNREGISTER_EVENTFD = 5,
+ IORING_REGISTER_FILES_UPDATE = 6,
+ IORING_REGISTER_EVENTFD_ASYNC = 7,
+ IORING_REGISTER_PROBE = 8,
+ IORING_REGISTER_PERSONALITY = 9,
+ IORING_UNREGISTER_PERSONALITY = 10,
+ IORING_REGISTER_RESTRICTIONS = 11,
+ IORING_REGISTER_ENABLE_RINGS = 12,
+
+ IORING_REGISTER_LAST = 13
+}
+
+struct io_uring_files_update
+{
+ uint offset;
+ uint resv;
+ ulong fds;
+}
+
+enum IO_URING_OP_SUPPORTED = 1U << 0;
+
+struct io_uring_probe_op
+{
+ ubyte op;
+ ubyte resv;
+
+ /// IO_URING_OP_* flags
+ ushort flags;
+ uint resv2;
+}
+
+struct io_uring_probe
+{
+ /// last opcode supported
+ ubyte last_op;
+
+ /// length of ops[] array below
+ ubyte ops_len;
+
+ ushort resv;
+ uint[3] resv2;
+ io_uring_probe_op[0] ops;
+}
+
+struct io_uring_restriction
+{
+ ushort opcode;
+
+ union
+ {
+ ubyte register_op;
+ ubyte sqe_op;
+ ubyte sqe_flags;
+ }
+
+ ubyte resv;
+ uint[3] resv2;
+}
+
+enum
+{
+ /// Allow an io_uring_register(2) opcode
+ IORING_RESTRICTION_REGISTER_OP = 0,
+
+ /// Allow an sqe opcode
+ IORING_RESTRICTION_SQE_OP = 1,
+
+ /// Allow sqe flags
+ IORING_RESTRICTION_SQE_FLAGS_ALLOWED = 2,
+
+ /// Require sqe flags (these flags must be set on each submission)
+ IORING_RESTRICTION_SQE_FLAGS_REQUIRED = 3,
+
+ IORING_RESTRICTION_LAST = 4
+}
+
+struct io_uring_getevents_arg
+{
+ ulong sigmask;
+ uint sigmask_sz;
+ uint pad;
+ ulong ts;
+}
diff --git a/libphobos/libdruntime/core/sys/linux/perf_event.d b/libphobos/libdruntime/core/sys/linux/perf_event.d
new file mode 100644
index 0000000..805b47e
--- /dev/null
+++ b/libphobos/libdruntime/core/sys/linux/perf_event.d
@@ -0,0 +1,2515 @@
+/**
+ * D header file for perf_event_open system call.
+ *
+ * Converted from linux userspace header, comments included.
+ *
+ * Authors: Max Haughton
+ */
+module core.sys.linux.perf_event;
+version (linux) : extern (C):
+@nogc:
+nothrow:
+@system:
+
+import core.sys.posix.sys.ioctl;
+import core.sys.posix.unistd;
+
+version (HPPA) version = HPPA_Any;
+version (HPPA64) version = HPPA_Any;
+version (PPC) version = PPC_Any;
+version (PPC64) version = PPC_Any;
+version (RISCV32) version = RISCV_Any;
+version (RISCV64) version = RISCV_Any;
+version (S390) version = IBMZ_Any;
+version (SPARC) version = SPARC_Any;
+version (SPARC64) version = SPARC_Any;
+version (SystemZ) version = IBMZ_Any;
+
+version (X86_64)
+{
+ version (D_X32)
+ enum __NR_perf_event_open = 0x40000000 + 298;
+ else
+ enum __NR_perf_event_open = 298;
+}
+else version (X86)
+{
+ enum __NR_perf_event_open = 336;
+}
+else version (ARM)
+{
+ enum __NR_perf_event_open = 364;
+}
+else version (AArch64)
+{
+ enum __NR_perf_event_open = 241;
+}
+else version (HPPA_Any)
+{
+ enum __NR_perf_event_open = 318;
+}
+else version (IBMZ_Any)
+{
+ enum __NR_perf_event_open = 331;
+}
+else version (MIPS32)
+{
+ enum __NR_perf_event_open = 4333;
+}
+else version (MIPS64)
+{
+ version (MIPS_N32)
+ enum __NR_perf_event_open = 6296;
+ else version (MIPS_N64)
+ enum __NR_perf_event_open = 5292;
+ else
+ static assert(0, "Architecture not supported");
+}
+else version (PPC_Any)
+{
+ enum __NR_perf_event_open = 319;
+}
+else version (RISCV_Any)
+{
+ enum __NR_perf_event_open = 241;
+}
+else version (SPARC_Any)
+{
+ enum __NR_perf_event_open = 327;
+}
+else
+{
+ static assert(0, "Architecture not supported");
+}
+extern (C) extern long syscall(long __sysno, ...);
+static long perf_event_open(perf_event_attr* hw_event, pid_t pid, int cpu, int group_fd, ulong flags)
+{
+ return syscall(__NR_perf_event_open, hw_event, pid, cpu, group_fd, flags);
+}
+/*
+ * User-space ABI bits:
+ */
+
+/**
+ * attr.type
+ */
+enum perf_type_id
+{
+ PERF_TYPE_HARDWARE = 0,
+ PERF_TYPE_SOFTWARE = 1,
+ PERF_TYPE_TRACEPOINT = 2,
+ PERF_TYPE_HW_CACHE = 3,
+ PERF_TYPE_RAW = 4,
+ PERF_TYPE_BREAKPOINT = 5,
+
+ PERF_TYPE_MAX = 6 /* non-ABI */
+}
+/**
+ * Generalized performance event event_id types, used by the
+ * attr.event_id parameter of the sys_perf_event_open()
+ * syscall:
+ */
+enum perf_hw_id
+{
+ ///
+ PERF_COUNT_HW_CPU_CYCLES = 0,
+ ///
+ PERF_COUNT_HW_INSTRUCTIONS = 1,
+ ///
+ PERF_COUNT_HW_CACHE_REFERENCES = 2,
+ ///
+ PERF_COUNT_HW_CACHE_MISSES = 3,
+ ///
+ PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4,
+ ///
+ PERF_COUNT_HW_BRANCH_MISSES = 5,
+ ///
+ PERF_COUNT_HW_BUS_CYCLES = 6,
+ ///
+ PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7,
+ ///
+ PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8,
+ ///
+ PERF_COUNT_HW_REF_CPU_CYCLES = 9,
+ ///
+ PERF_COUNT_HW_MAX = 10 /* non-ABI */
+}
+
+/**
+ * Generalized hardware cache events:
+ *
+ * { L1-D, L1-I, LLC, ITLB, DTLB, BPU, NODE } x
+ * { read, write, prefetch } x
+ * { accesses, misses }
+ */
+enum perf_hw_cache_id
+{
+ ///
+ PERF_COUNT_HW_CACHE_L1D = 0,
+ ///
+ PERF_COUNT_HW_CACHE_L1I = 1,
+ ///
+ PERF_COUNT_HW_CACHE_LL = 2,
+ ///
+ PERF_COUNT_HW_CACHE_DTLB = 3,
+ ///
+ PERF_COUNT_HW_CACHE_ITLB = 4,
+ ///
+ PERF_COUNT_HW_CACHE_BPU = 5,
+ ///
+ PERF_COUNT_HW_CACHE_NODE = 6,
+ ///
+ PERF_COUNT_HW_CACHE_MAX = 7 /* non-ABI */
+}
+///
+enum perf_hw_cache_op_id
+{
+ ///
+ PERF_COUNT_HW_CACHE_OP_READ = 0,
+ ///
+ PERF_COUNT_HW_CACHE_OP_WRITE = 1,
+ ///
+ PERF_COUNT_HW_CACHE_OP_PREFETCH = 2,
+ ///
+ PERF_COUNT_HW_CACHE_OP_MAX = 3 /* non-ABI */
+}
+///
+enum perf_hw_cache_op_result_id
+{
+ ///
+ PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0,
+ ///
+ PERF_COUNT_HW_CACHE_RESULT_MISS = 1,
+ ///
+ PERF_COUNT_HW_CACHE_RESULT_MAX = 2 /* non-ABI */
+}
+
+/**
+ * Special "software" events provided by the kernel, even if the hardware
+ * does not support performance events. These events measure various
+ * physical and sw events of the kernel (and allow the profiling of them as
+ * well):
+ */
+enum perf_sw_ids
+{
+ ///
+ PERF_COUNT_SW_CPU_CLOCK = 0,
+ ///
+ PERF_COUNT_SW_TASK_CLOCK = 1,
+ ///
+ PERF_COUNT_SW_PAGE_FAULTS = 2,
+ ///
+ PERF_COUNT_SW_CONTEXT_SWITCHES = 3,
+ ///
+ PERF_COUNT_SW_CPU_MIGRATIONS = 4,
+ ///
+ PERF_COUNT_SW_PAGE_FAULTS_MIN = 5,
+ ///
+ PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6,
+ ///
+ PERF_COUNT_SW_ALIGNMENT_FAULTS = 7,
+ ///
+ PERF_COUNT_SW_EMULATION_FAULTS = 8,
+ ///
+ PERF_COUNT_SW_DUMMY = 9,
+ ///
+ PERF_COUNT_SW_BPF_OUTPUT = 10,
+ ///
+ PERF_COUNT_SW_MAX = 11 /* non-ABI */
+}
+
+/**
+ * Bits that can be set in attr.sample_type to request information
+ * in the overflow packets.
+ */
+enum perf_event_sample_format
+{
+ ///
+ PERF_SAMPLE_IP = 1U << 0,
+ ///
+ PERF_SAMPLE_TID = 1U << 1,
+ ///
+ PERF_SAMPLE_TIME = 1U << 2,
+ ///
+ PERF_SAMPLE_ADDR = 1U << 3,
+ ///
+ PERF_SAMPLE_READ = 1U << 4,
+ ///
+ PERF_SAMPLE_CALLCHAIN = 1U << 5,
+ ///
+ PERF_SAMPLE_ID = 1U << 6,
+ ///
+ PERF_SAMPLE_CPU = 1U << 7,
+ ///
+ PERF_SAMPLE_PERIOD = 1U << 8,
+ ///
+ PERF_SAMPLE_STREAM_ID = 1U << 9,
+ ///
+ PERF_SAMPLE_RAW = 1U << 10,
+ ///
+ PERF_SAMPLE_BRANCH_STACK = 1U << 11,
+ ///
+ PERF_SAMPLE_REGS_USER = 1U << 12,
+ ///
+ PERF_SAMPLE_STACK_USER = 1U << 13,
+ ///
+ PERF_SAMPLE_WEIGHT = 1U << 14,
+ ///
+ PERF_SAMPLE_DATA_SRC = 1U << 15,
+ ///
+ PERF_SAMPLE_IDENTIFIER = 1U << 16,
+ ///
+ PERF_SAMPLE_TRANSACTION = 1U << 17,
+ ///
+ PERF_SAMPLE_REGS_INTR = 1U << 18,
+ ///
+ PERF_SAMPLE_PHYS_ADDR = 1U << 19,
+ ///
+ PERF_SAMPLE_MAX = 1U << 20 /* non-ABI */
+}
+
+/**
+ * values to program into branch_sample_type when PERF_SAMPLE_BRANCH is set
+ *
+ * If the user does not pass priv level information via branch_sample_type,
+ * the kernel uses the event's priv level. Branch and event priv levels do
+ * not have to match. Branch priv level is checked for permissions.
+ *
+ * The branch types can be combined, however BRANCH_ANY covers all types
+ * of branches and therefore it supersedes all the other types.
+ */
+enum perf_branch_sample_type_shift
+{
+ PERF_SAMPLE_BRANCH_USER_SHIFT = 0, /** user branches */
+ PERF_SAMPLE_BRANCH_KERNEL_SHIFT = 1, /** kernel branches */
+ PERF_SAMPLE_BRANCH_HV_SHIFT = 2, /** hypervisor branches */
+
+ PERF_SAMPLE_BRANCH_ANY_SHIFT = 3, /** any branch types */
+ PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT = 4, /** any call branch */
+ PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT = 5, /** any return branch */
+ PERF_SAMPLE_BRANCH_IND_CALL_SHIFT = 6, /** indirect calls */
+ PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT = 7, /** transaction aborts */
+ PERF_SAMPLE_BRANCH_IN_TX_SHIFT = 8, /** in transaction */
+ PERF_SAMPLE_BRANCH_NO_TX_SHIFT = 9, /** not in transaction */
+ PERF_SAMPLE_BRANCH_COND_SHIFT = 10, /** conditional branches */
+
+ PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT = 11, /** call/ret stack */
+ PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT = 12, /** indirect jumps */
+ PERF_SAMPLE_BRANCH_CALL_SHIFT = 13, /** direct call */
+
+ PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT = 14, /** no flags */
+ PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT = 15, /** no cycles */
+
+ PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT = 16, /** save branch type */
+
+ PERF_SAMPLE_BRANCH_MAX_SHIFT = 17 /** non-ABI */
+}
+///
+enum perf_branch_sample_type
+{
+ PERF_SAMPLE_BRANCH_USER = 1U << perf_branch_sample_type_shift.PERF_SAMPLE_BRANCH_USER_SHIFT,
+ PERF_SAMPLE_BRANCH_KERNEL = 1U << perf_branch_sample_type_shift.PERF_SAMPLE_BRANCH_KERNEL_SHIFT,
+ PERF_SAMPLE_BRANCH_HV = 1U << perf_branch_sample_type_shift.PERF_SAMPLE_BRANCH_HV_SHIFT,
+ PERF_SAMPLE_BRANCH_ANY = 1U << perf_branch_sample_type_shift.PERF_SAMPLE_BRANCH_ANY_SHIFT,
+ PERF_SAMPLE_BRANCH_ANY_CALL = 1U << perf_branch_sample_type_shift.PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT,
+ PERF_SAMPLE_BRANCH_ANY_RETURN = 1U << perf_branch_sample_type_shift.PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT,
+ PERF_SAMPLE_BRANCH_IND_CALL = 1U << perf_branch_sample_type_shift.PERF_SAMPLE_BRANCH_IND_CALL_SHIFT,
+ PERF_SAMPLE_BRANCH_ABORT_TX = 1U << perf_branch_sample_type_shift.PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT,
+ PERF_SAMPLE_BRANCH_IN_TX = 1U << perf_branch_sample_type_shift.PERF_SAMPLE_BRANCH_IN_TX_SHIFT,
+ PERF_SAMPLE_BRANCH_NO_TX = 1U << perf_branch_sample_type_shift.PERF_SAMPLE_BRANCH_NO_TX_SHIFT,
+ PERF_SAMPLE_BRANCH_COND = 1U << perf_branch_sample_type_shift.PERF_SAMPLE_BRANCH_COND_SHIFT,
+ PERF_SAMPLE_BRANCH_CALL_STACK = 1U << perf_branch_sample_type_shift.PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT,
+ PERF_SAMPLE_BRANCH_IND_JUMP = 1U << perf_branch_sample_type_shift.PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT,
+ PERF_SAMPLE_BRANCH_CALL = 1U << perf_branch_sample_type_shift.PERF_SAMPLE_BRANCH_CALL_SHIFT,
+ PERF_SAMPLE_BRANCH_NO_FLAGS = 1U << perf_branch_sample_type_shift.PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT,
+ PERF_SAMPLE_BRANCH_NO_CYCLES = 1U << perf_branch_sample_type_shift.PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT,
+ PERF_SAMPLE_BRANCH_TYPE_SAVE = 1U << perf_branch_sample_type_shift.PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT,
+ PERF_SAMPLE_BRANCH_MAX = 1U << perf_branch_sample_type_shift.PERF_SAMPLE_BRANCH_MAX_SHIFT
+}
+
+/**
+ * Common flow change classification
+ */
+enum
+{
+ PERF_BR_UNKNOWN = 0, /** unknown */
+ PERF_BR_COND = 1, /** conditional */
+ PERF_BR_UNCOND = 2, /** unconditional */
+ PERF_BR_IND = 3, /** indirect */
+ PERF_BR_CALL = 4, /** function call */
+ PERF_BR_IND_CALL = 5, /** indirect function call */
+ PERF_BR_RET = 6, /** function return */
+ PERF_BR_SYSCALL = 7, /** syscall */
+ PERF_BR_SYSRET = 8, /** syscall return */
+ PERF_BR_COND_CALL = 9, /** conditional function call */
+ PERF_BR_COND_RET = 10, /** conditional function return */
+ PERF_BR_MAX = 11
+}
+
+///
+enum PERF_SAMPLE_BRANCH_PLM_ALL = perf_branch_sample_type.PERF_SAMPLE_BRANCH_USER
+ | perf_branch_sample_type.PERF_SAMPLE_BRANCH_KERNEL
+ | perf_branch_sample_type.PERF_SAMPLE_BRANCH_HV;
+
+/**
+ * Values to determine ABI of the registers dump.
+ */
+enum perf_sample_regs_abi
+{
+ ///
+ PERF_SAMPLE_REGS_ABI_NONE = 0,
+ ///
+ PERF_SAMPLE_REGS_ABI_32 = 1,
+ ///
+ PERF_SAMPLE_REGS_ABI_64 = 2
+}
+
+/**
+ * Values for the memory transaction event qualifier, mostly for
+ * abort events. Multiple bits can be set.
+ */
+enum
+{
+ PERF_TXN_ELISION = 1 << 0, /** From elision */
+ PERF_TXN_TRANSACTION = 1 << 1, /** From transaction */
+ PERF_TXN_SYNC = 1 << 2, /** Instruction is related */
+ PERF_TXN_ASYNC = 1 << 3, /** Instruction not related */
+ PERF_TXN_RETRY = 1 << 4, /** Retry possible */
+ PERF_TXN_CONFLICT = 1 << 5, /** Conflict abort */
+ PERF_TXN_CAPACITY_WRITE = 1 << 6, /** Capacity write abort */
+ PERF_TXN_CAPACITY_READ = 1 << 7, /** Capacity read abort */
+
+ PERF_TXN_MAX = 1 << 8, /** non-ABI */
+
+ /** bits 32..63 are reserved for the abort code */
+
+ ///PERF_TXN_ABORT_MASK = 0xffffffff << 32,
+ PERF_TXN_ABORT_SHIFT = 32
+}
+
+/**
+ * The format of the data returned by read() on a perf event fd,
+ * as specified by attr.read_format:
+ * ---
+ * struct read_format {
+ * { u64 value;
+ * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
+ * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
+ * { u64 id; } && PERF_FORMAT_ID
+ * } && !PERF_FORMAT_GROUP
+ *
+ * { u64 nr;
+ * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
+ * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
+ * { u64 value;
+ * { u64 id; } && PERF_FORMAT_ID
+ * } cntr[nr];
+ * } && PERF_FORMAT_GROUP
+ * };
+ * ---
+ */
+enum perf_event_read_format
+{
+ ///
+ PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0,
+ ///
+ PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1,
+ ///
+ PERF_FORMAT_ID = 1U << 2,
+ ///
+ PERF_FORMAT_GROUP = 1U << 3,
+ PERF_FORMAT_MAX = 1U << 4 /** non-ABI */
+}
+
+enum PERF_ATTR_SIZE_VER0 = 64; /** sizeof first published struct */
+enum PERF_ATTR_SIZE_VER1 = 72; /** add: config2 */
+enum PERF_ATTR_SIZE_VER2 = 80; /** add: branch_sample_type */
+enum PERF_ATTR_SIZE_VER3 = 96; /** add: sample_regs_user */
+/* add: sample_stack_user */
+enum PERF_ATTR_SIZE_VER4 = 104; /** add: sample_regs_intr */
+enum PERF_ATTR_SIZE_VER5 = 112; /** add: aux_watermark */
+
+/**
+ * Hardware event_id to monitor via a performance monitoring event:
+ *
+ * @sample_max_stack: Max number of frame pointers in a callchain,
+ * should be < /proc/sys/kernel/perf_event_max_stack
+ */
+struct perf_event_attr
+{
+ /**
+ *Major type: hardware/software/tracepoint/etc.
+ */
+ uint type;
+
+ /**
+ * Size of the attr structure, for fwd/bwd compat.
+ */
+ uint size;
+
+ /**
+ * Type specific configuration information.
+ */
+ ulong config;
+ ///
+ union
+ {
+ ///
+ ulong sample_period;
+ ///
+ ulong sample_freq;
+ }
+ ///
+ ulong sample_type;
+ ///
+ ulong read_format;
+
+ // mixin(bitfields!(
+ // ulong, "disabled", 1,
+ // ulong, "inherit", 1,
+ // ulong, "pinned", 1,
+ // ulong, "exclusive", 1,
+ // ulong, "exclude_user", 1,
+ // ulong, "exclude_kernel", 1,
+ // ulong, "exclude_hv", 1,
+ // ulong, "exclude_idle", 1,
+ // ulong, "mmap", 1,
+ // ulong, "comm", 1,
+ // ulong, "freq", 1,
+ // ulong, "inherit_stat", 1,
+ // ulong, "enable_on_exec", 1,
+ // ulong, "task", 1,
+ // ulong, "watermark", 1,
+ // ulong, "precise_ip", 2,
+ // ulong, "mmap_data", 1,
+ // ulong, "sample_id_all", 1,
+ // ulong, "exclude_host", 1,
+ // ulong, "exclude_guest", 1,
+ // ulong, "exclude_callchain_kernel", 1,
+ // ulong, "exclude_callchain_user", 1,
+ // ulong, "mmap2", 1,
+ // ulong, "comm_exec", 1,
+ // ulong, "use_clockid", 1,
+ // ulong, "context_switch", 1,
+ // ulong, "write_backward", 1,
+ // ulong, "namespaces", 1,
+ // ulong, "__reserved_1", 35));
+ private ulong perf_event_attr_bitmanip;
+ ///
+ @property ulong disabled() @safe pure nothrow @nogc const
+ {
+ auto result = (perf_event_attr_bitmanip & 1U) >> 0U;
+ return cast(ulong) result;
+ }
+ ///
+ @property void disabled(ulong v) @safe pure nothrow @nogc
+ {
+ assert(v >= disabled_min,
+ "Value is smaller than the minimum value of bitfield 'disabled'");
+ assert(v <= disabled_max,
+ "Value is greater than the maximum value of bitfield 'disabled'");
+ perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))(
+ (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 1U)) | (
+ (cast(typeof(perf_event_attr_bitmanip)) v << 0U) & 1U));
+ }
+
+ enum ulong disabled_min = cast(ulong) 0U;
+ enum ulong disabled_max = cast(ulong) 1U;
+ ///
+ @property ulong inherit() @safe pure nothrow @nogc const
+ {
+ auto result = (perf_event_attr_bitmanip & 2U) >> 1U;
+ return cast(ulong) result;
+ }
+ ///
+ @property void inherit(ulong v) @safe pure nothrow @nogc
+ {
+ assert(v >= inherit_min,
+ "Value is smaller than the minimum value of bitfield 'inherit'");
+ assert(v <= inherit_max,
+ "Value is greater than the maximum value of bitfield 'inherit'");
+ perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))(
+ (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 2U)) | (
+ (cast(typeof(perf_event_attr_bitmanip)) v << 1U) & 2U));
+ }
+
+ enum ulong inherit_min = cast(ulong) 0U;
+ enum ulong inherit_max = cast(ulong) 1U;
+ ///
+ @property ulong pinned() @safe pure nothrow @nogc const
+ {
+ auto result = (perf_event_attr_bitmanip & 4U) >> 2U;
+ return cast(ulong) result;
+ }
+ ///
+ @property void pinned(ulong v) @safe pure nothrow @nogc
+ {
+ assert(v >= pinned_min,
+ "Value is smaller than the minimum value of bitfield 'pinned'");
+ assert(v <= pinned_max,
+ "Value is greater than the maximum value of bitfield 'pinned'");
+ perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))(
+ (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 4U)) | (
+ (cast(typeof(perf_event_attr_bitmanip)) v << 2U) & 4U));
+ }
+
+ enum ulong pinned_min = cast(ulong) 0U;
+ enum ulong pinned_max = cast(ulong) 1U;
+ ///
+ @property ulong exclusive() @safe pure nothrow @nogc const
+ {
+ auto result = (perf_event_attr_bitmanip & 8U) >> 3U;
+ return cast(ulong) result;
+ }
+ ///
+ @property void exclusive(ulong v) @safe pure nothrow @nogc
+ {
+ assert(v >= exclusive_min,
+ "Value is smaller than the minimum value of bitfield 'exclusive'");
+ assert(v <= exclusive_max,
+ "Value is greater than the maximum value of bitfield 'exclusive'");
+ perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))(
+ (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 8U)) | (
+ (cast(typeof(perf_event_attr_bitmanip)) v << 3U) & 8U));
+ }
+
+ enum ulong exclusive_min = cast(ulong) 0U;
+ enum ulong exclusive_max = cast(ulong) 1U;
+ ///
+ @property ulong exclude_user() @safe pure nothrow @nogc const
+ {
+ auto result = (perf_event_attr_bitmanip & 16U) >> 4U;
+ return cast(ulong) result;
+ }
+ ///
+ @property void exclude_user(ulong v) @safe pure nothrow @nogc
+ {
+ assert(v >= exclude_user_min,
+ "Value is smaller than the minimum value of bitfield 'exclude_user'");
+ assert(v <= exclude_user_max,
+ "Value is greater than the maximum value of bitfield 'exclude_user'");
+ perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))(
+ (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 16U)) | (
+ (cast(typeof(perf_event_attr_bitmanip)) v << 4U) & 16U));
+ }
+
+ enum ulong exclude_user_min = cast(ulong) 0U;
+ enum ulong exclude_user_max = cast(ulong) 1U;
+ ///
+ @property ulong exclude_kernel() @safe pure nothrow @nogc const
+ {
+ auto result = (perf_event_attr_bitmanip & 32U) >> 5U;
+ return cast(ulong) result;
+ }
+ ///
+ @property void exclude_kernel(ulong v) @safe pure nothrow @nogc
+ {
+ assert(v >= exclude_kernel_min,
+ "Value is smaller than the minimum value of bitfield 'exclude_kernel'");
+ assert(v <= exclude_kernel_max,
+ "Value is greater than the maximum value of bitfield 'exclude_kernel'");
+ perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))(
+ (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 32U)) | (
+ (cast(typeof(perf_event_attr_bitmanip)) v << 5U) & 32U));
+ }
+
+ enum ulong exclude_kernel_min = cast(ulong) 0U;
+ enum ulong exclude_kernel_max = cast(ulong) 1U;
+ ///
+ @property ulong exclude_hv() @safe pure nothrow @nogc const
+ {
+ auto result = (perf_event_attr_bitmanip & 64U) >> 6U;
+ return cast(ulong) result;
+ }
+ ///
+ @property void exclude_hv(ulong v) @safe pure nothrow @nogc
+ {
+ assert(v >= exclude_hv_min,
+ "Value is smaller than the minimum value of bitfield 'exclude_hv'");
+ assert(v <= exclude_hv_max,
+ "Value is greater than the maximum value of bitfield 'exclude_hv'");
+ perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))(
+ (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 64U)) | (
+ (cast(typeof(perf_event_attr_bitmanip)) v << 6U) & 64U));
+ }
+
+ enum ulong exclude_hv_min = cast(ulong) 0U;
+ enum ulong exclude_hv_max = cast(ulong) 1U;
+ ///
+ @property ulong exclude_idle() @safe pure nothrow @nogc const
+ {
+ auto result = (perf_event_attr_bitmanip & 128U) >> 7U;
+ return cast(ulong) result;
+ }
+ ///
+ @property void exclude_idle(ulong v) @safe pure nothrow @nogc
+ {
+ assert(v >= exclude_idle_min,
+ "Value is smaller than the minimum value of bitfield 'exclude_idle'");
+ assert(v <= exclude_idle_max,
+ "Value is greater than the maximum value of bitfield 'exclude_idle'");
+ perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))(
+ (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 128U)) | (
+ (cast(typeof(perf_event_attr_bitmanip)) v << 7U) & 128U));
+ }
+
+ enum ulong exclude_idle_min = cast(ulong) 0U;
+ enum ulong exclude_idle_max = cast(ulong) 1U;
+ ///
+ @property ulong mmap() @safe pure nothrow @nogc const
+ {
+ auto result = (perf_event_attr_bitmanip & 256U) >> 8U;
+ return cast(ulong) result;
+ }
+ ///
+ @property void mmap(ulong v) @safe pure nothrow @nogc
+ {
+ assert(v >= mmap_min, "Value is smaller than the minimum value of bitfield 'mmap'");
+ assert(v <= mmap_max, "Value is greater than the maximum value of bitfield 'mmap'");
+ perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))(
+ (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 256U)) | (
+ (cast(typeof(perf_event_attr_bitmanip)) v << 8U) & 256U));
+ }
+
+ enum ulong mmap_min = cast(ulong) 0U;
+ enum ulong mmap_max = cast(ulong) 1U;
+ ///
+ @property ulong comm() @safe pure nothrow @nogc const
+ {
+ auto result = (perf_event_attr_bitmanip & 512U) >> 9U;
+ return cast(ulong) result;
+ }
+ ///
+ @property void comm(ulong v) @safe pure nothrow @nogc
+ {
+ assert(v >= comm_min, "Value is smaller than the minimum value of bitfield 'comm'");
+ assert(v <= comm_max, "Value is greater than the maximum value of bitfield 'comm'");
+ perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))(
+ (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 512U)) | (
+ (cast(typeof(perf_event_attr_bitmanip)) v << 9U) & 512U));
+ }
+
+ enum ulong comm_min = cast(ulong) 0U;
+ enum ulong comm_max = cast(ulong) 1U;
+ ///
+ @property ulong freq() @safe pure nothrow @nogc const
+ {
+ auto result = (perf_event_attr_bitmanip & 1024U) >> 10U;
+ return cast(ulong) result;
+ }
+ ///
+ @property void freq(ulong v) @safe pure nothrow @nogc
+ {
+ assert(v >= freq_min, "Value is smaller than the minimum value of bitfield 'freq'");
+ assert(v <= freq_max, "Value is greater than the maximum value of bitfield 'freq'");
+ perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))(
+ (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 1024U)) | (
+ (cast(typeof(perf_event_attr_bitmanip)) v << 10U) & 1024U));
+ }
+
+ enum ulong freq_min = cast(ulong) 0U;
+ enum ulong freq_max = cast(ulong) 1U;
+ ///
+ @property ulong inherit_stat() @safe pure nothrow @nogc const
+ {
+ auto result = (perf_event_attr_bitmanip & 2048U) >> 11U;
+ return cast(ulong) result;
+ }
+ ///
+ @property void inherit_stat(ulong v) @safe pure nothrow @nogc
+ {
+ assert(v >= inherit_stat_min,
+ "Value is smaller than the minimum value of bitfield 'inherit_stat'");
+ assert(v <= inherit_stat_max,
+ "Value is greater than the maximum value of bitfield 'inherit_stat'");
+ perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))(
+ (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 2048U)) | (
+ (cast(typeof(perf_event_attr_bitmanip)) v << 11U) & 2048U));
+ }
+
+ enum ulong inherit_stat_min = cast(ulong) 0U;
+ enum ulong inherit_stat_max = cast(ulong) 1U;
+ ///
+ @property ulong enable_on_exec() @safe pure nothrow @nogc const
+ {
+ auto result = (perf_event_attr_bitmanip & 4096U) >> 12U;
+ return cast(ulong) result;
+ }
+ ///
+ @property void enable_on_exec(ulong v) @safe pure nothrow @nogc
+ {
+ assert(v >= enable_on_exec_min,
+ "Value is smaller than the minimum value of bitfield 'enable_on_exec'");
+ assert(v <= enable_on_exec_max,
+ "Value is greater than the maximum value of bitfield 'enable_on_exec'");
+ perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))(
+ (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 4096U)) | (
+ (cast(typeof(perf_event_attr_bitmanip)) v << 12U) & 4096U));
+ }
+
+ enum ulong enable_on_exec_min = cast(ulong) 0U;
+ enum ulong enable_on_exec_max = cast(ulong) 1U;
+ ///
+ @property ulong task() @safe pure nothrow @nogc const
+ {
+ auto result = (perf_event_attr_bitmanip & 8192U) >> 13U;
+ return cast(ulong) result;
+ }
+ ///
+ @property void task(ulong v) @safe pure nothrow @nogc
+ {
+ assert(v >= task_min, "Value is smaller than the minimum value of bitfield 'task'");
+ assert(v <= task_max, "Value is greater than the maximum value of bitfield 'task'");
+ perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))(
+ (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 8192U)) | (
+ (cast(typeof(perf_event_attr_bitmanip)) v << 13U) & 8192U));
+ }
+
+ enum ulong task_min = cast(ulong) 0U;
+ enum ulong task_max = cast(ulong) 1U;
+ ///
+ @property ulong watermark() @safe pure nothrow @nogc const
+ {
+ auto result = (perf_event_attr_bitmanip & 16384U) >> 14U;
+ return cast(ulong) result;
+ }
+ ///
+ @property void watermark(ulong v) @safe pure nothrow @nogc
+ {
+ assert(v >= watermark_min,
+ "Value is smaller than the minimum value of bitfield 'watermark'");
+ assert(v <= watermark_max,
+ "Value is greater than the maximum value of bitfield 'watermark'");
+ perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))(
+ (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 16384U)) | (
+ (cast(typeof(perf_event_attr_bitmanip)) v << 14U) & 16384U));
+ }
+
+ enum ulong watermark_min = cast(ulong) 0U;
+ enum ulong watermark_max = cast(ulong) 1U;
+ ///
+ @property ulong precise_ip() @safe pure nothrow @nogc const
+ {
+ auto result = (perf_event_attr_bitmanip & 98304U) >> 15U;
+ return cast(ulong) result;
+ }
+ ///
+ @property void precise_ip(ulong v) @safe pure nothrow @nogc
+ {
+ assert(v >= precise_ip_min,
+ "Value is smaller than the minimum value of bitfield 'precise_ip'");
+ assert(v <= precise_ip_max,
+ "Value is greater than the maximum value of bitfield 'precise_ip'");
+ perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))(
+ (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 98304U)) | (
+ (cast(typeof(perf_event_attr_bitmanip)) v << 15U) & 98304U));
+ }
+
+ enum ulong precise_ip_min = cast(ulong) 0U;
+ enum ulong precise_ip_max = cast(ulong) 3U;
+ ///
+ @property ulong mmap_data() @safe pure nothrow @nogc const
+ {
+ auto result = (perf_event_attr_bitmanip & 131072U) >> 17U;
+ return cast(ulong) result;
+ }
+ ///
+ @property void mmap_data(ulong v) @safe pure nothrow @nogc
+ {
+ assert(v >= mmap_data_min,
+ "Value is smaller than the minimum value of bitfield 'mmap_data'");
+ assert(v <= mmap_data_max,
+ "Value is greater than the maximum value of bitfield 'mmap_data'");
+ perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))(
+ (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 131072U)) | (
+ (cast(typeof(perf_event_attr_bitmanip)) v << 17U) & 131072U));
+ }
+
+ enum ulong mmap_data_min = cast(ulong) 0U;
+ enum ulong mmap_data_max = cast(ulong) 1U;
+ ///
+ @property ulong sample_id_all() @safe pure nothrow @nogc const
+ {
+ auto result = (perf_event_attr_bitmanip & 262144U) >> 18U;
+ return cast(ulong) result;
+ }
+ ///
+ @property void sample_id_all(ulong v) @safe pure nothrow @nogc
+ {
+ assert(v >= sample_id_all_min,
+ "Value is smaller than the minimum value of bitfield 'sample_id_all'");
+ assert(v <= sample_id_all_max,
+ "Value is greater than the maximum value of bitfield 'sample_id_all'");
+ perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))(
+ (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 262144U)) | (
+ (cast(typeof(perf_event_attr_bitmanip)) v << 18U) & 262144U));
+ }
+
+ enum ulong sample_id_all_min = cast(ulong) 0U;
+ enum ulong sample_id_all_max = cast(ulong) 1U;
+ ///
+ @property ulong exclude_host() @safe pure nothrow @nogc const
+ {
+ auto result = (perf_event_attr_bitmanip & 524288U) >> 19U;
+ return cast(ulong) result;
+ }
+ ///
+ @property void exclude_host(ulong v) @safe pure nothrow @nogc
+ {
+ assert(v >= exclude_host_min,
+ "Value is smaller than the minimum value of bitfield 'exclude_host'");
+ assert(v <= exclude_host_max,
+ "Value is greater than the maximum value of bitfield 'exclude_host'");
+ perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))(
+ (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 524288U)) | (
+ (cast(typeof(perf_event_attr_bitmanip)) v << 19U) & 524288U));
+ }
+
+ enum ulong exclude_host_min = cast(ulong) 0U;
+ enum ulong exclude_host_max = cast(ulong) 1U;
+ ///
+ @property ulong exclude_guest() @safe pure nothrow @nogc const
+ {
+ auto result = (perf_event_attr_bitmanip & 1048576U) >> 20U;
+ return cast(ulong) result;
+ }
+ ///
+ @property void exclude_guest(ulong v) @safe pure nothrow @nogc
+ {
+ assert(v >= exclude_guest_min,
+ "Value is smaller than the minimum value of bitfield 'exclude_guest'");
+ assert(v <= exclude_guest_max,
+ "Value is greater than the maximum value of bitfield 'exclude_guest'");
+ perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))(
+ (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 1048576U)) | (
+ (cast(typeof(perf_event_attr_bitmanip)) v << 20U) & 1048576U));
+ }
+
+ enum ulong exclude_guest_min = cast(ulong) 0U;
+ enum ulong exclude_guest_max = cast(ulong) 1U;
+ ///
+ @property ulong exclude_callchain_kernel() @safe pure nothrow @nogc const
+ {
+ auto result = (perf_event_attr_bitmanip & 2097152U) >> 21U;
+ return cast(ulong) result;
+ }
+ ///
+ @property void exclude_callchain_kernel(ulong v) @safe pure nothrow @nogc
+ {
+ assert(v >= exclude_callchain_kernel_min,
+ "Value is smaller than the minimum value of bitfield 'exclude_callchain_kernel'");
+ assert(v <= exclude_callchain_kernel_max,
+ "Value is greater than the maximum value of bitfield 'exclude_callchain_kernel'");
+ perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))(
+ (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 2097152U)) | (
+ (cast(typeof(perf_event_attr_bitmanip)) v << 21U) & 2097152U));
+ }
+
+ enum ulong exclude_callchain_kernel_min = cast(ulong) 0U;
+ enum ulong exclude_callchain_kernel_max = cast(ulong) 1U;
+ ///
+ @property ulong exclude_callchain_user() @safe pure nothrow @nogc const
+ {
+ auto result = (perf_event_attr_bitmanip & 4194304U) >> 22U;
+ return cast(ulong) result;
+ }
+ ///
+ @property void exclude_callchain_user(ulong v) @safe pure nothrow @nogc
+ {
+ assert(v >= exclude_callchain_user_min,
+ "Value is smaller than the minimum value of bitfield 'exclude_callchain_user'");
+ assert(v <= exclude_callchain_user_max,
+ "Value is greater than the maximum value of bitfield 'exclude_callchain_user'");
+ perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))(
+ (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 4194304U)) | (
+ (cast(typeof(perf_event_attr_bitmanip)) v << 22U) & 4194304U));
+ }
+
+ enum ulong exclude_callchain_user_min = cast(ulong) 0U;
+ enum ulong exclude_callchain_user_max = cast(ulong) 1U;
+ ///
+ @property ulong mmap2() @safe pure nothrow @nogc const
+ {
+ auto result = (perf_event_attr_bitmanip & 8388608U) >> 23U;
+ return cast(ulong) result;
+ }
+ ///
+ @property void mmap2(ulong v) @safe pure nothrow @nogc
+ {
+ assert(v >= mmap2_min,
+ "Value is smaller than the minimum value of bitfield 'mmap2'");
+ assert(v <= mmap2_max,
+ "Value is greater than the maximum value of bitfield 'mmap2'");
+ perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))(
+ (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 8388608U)) | (
+ (cast(typeof(perf_event_attr_bitmanip)) v << 23U) & 8388608U));
+ }
+
+ enum ulong mmap2_min = cast(ulong) 0U;
+ enum ulong mmap2_max = cast(ulong) 1U;
+ ///
+ @property ulong comm_exec() @safe pure nothrow @nogc const
+ {
+ auto result = (perf_event_attr_bitmanip & 16777216U) >> 24U;
+ return cast(ulong) result;
+ }
+ ///
+ @property void comm_exec(ulong v) @safe pure nothrow @nogc
+ {
+ assert(v >= comm_exec_min,
+ "Value is smaller than the minimum value of bitfield 'comm_exec'");
+ assert(v <= comm_exec_max,
+ "Value is greater than the maximum value of bitfield 'comm_exec'");
+ perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))(
+ (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 16777216U)) | (
+ (cast(typeof(perf_event_attr_bitmanip)) v << 24U) & 16777216U));
+ }
+
+ enum ulong comm_exec_min = cast(ulong) 0U;
+ enum ulong comm_exec_max = cast(ulong) 1U;
+ ///
+ @property ulong use_clockid() @safe pure nothrow @nogc const
+ {
+ auto result = (perf_event_attr_bitmanip & 33554432U) >> 25U;
+ return cast(ulong) result;
+ }
+ ///
+ @property void use_clockid(ulong v) @safe pure nothrow @nogc
+ {
+ assert(v >= use_clockid_min,
+ "Value is smaller than the minimum value of bitfield 'use_clockid'");
+ assert(v <= use_clockid_max,
+ "Value is greater than the maximum value of bitfield 'use_clockid'");
+ perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))(
+ (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 33554432U)) | (
+ (cast(typeof(perf_event_attr_bitmanip)) v << 25U) & 33554432U));
+ }
+
+ enum ulong use_clockid_min = cast(ulong) 0U;
+ enum ulong use_clockid_max = cast(ulong) 1U;
+ ///
+ @property ulong context_switch() @safe pure nothrow @nogc const
+ {
+ auto result = (perf_event_attr_bitmanip & 67108864U) >> 26U;
+ return cast(ulong) result;
+ }
+ ///
+ @property void context_switch(ulong v) @safe pure nothrow @nogc
+ {
+ assert(v >= context_switch_min,
+ "Value is smaller than the minimum value of bitfield 'context_switch'");
+ assert(v <= context_switch_max,
+ "Value is greater than the maximum value of bitfield 'context_switch'");
+ perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))(
+ (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 67108864U)) | (
+ (cast(typeof(perf_event_attr_bitmanip)) v << 26U) & 67108864U));
+ }
+
+ enum ulong context_switch_min = cast(ulong) 0U;
+ enum ulong context_switch_max = cast(ulong) 1U;
+ ///
+ @property ulong write_backward() @safe pure nothrow @nogc const
+ {
+ auto result = (perf_event_attr_bitmanip & 134217728U) >> 27U;
+ return cast(ulong) result;
+ }
+ ///
+ @property void write_backward(ulong v) @safe pure nothrow @nogc
+ {
+ assert(v >= write_backward_min,
+ "Value is smaller than the minimum value of bitfield 'write_backward'");
+ assert(v <= write_backward_max,
+ "Value is greater than the maximum value of bitfield 'write_backward'");
+ perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))(
+ (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 134217728U)) | (
+ (cast(typeof(perf_event_attr_bitmanip)) v << 27U) & 134217728U));
+ }
+
+ enum ulong write_backward_min = cast(ulong) 0U;
+ enum ulong write_backward_max = cast(ulong) 1U;
+ ///
+ @property ulong namespaces() @safe pure nothrow @nogc const
+ {
+ auto result = (perf_event_attr_bitmanip & 268435456U) >> 28U;
+ return cast(ulong) result;
+ }
+ ///
+ @property void namespaces(ulong v) @safe pure nothrow @nogc
+ {
+ assert(v >= namespaces_min,
+ "Value is smaller than the minimum value of bitfield 'namespaces'");
+ assert(v <= namespaces_max,
+ "Value is greater than the maximum value of bitfield 'namespaces'");
+ perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))(
+ (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 268435456U)) | (
+ (cast(typeof(perf_event_attr_bitmanip)) v << 28U) & 268435456U));
+ }
+
+ enum ulong namespaces_min = cast(ulong) 0U;
+ enum ulong namespaces_max = cast(ulong) 1U;
+ ///
+ @property ulong __reserved_1() @safe pure nothrow @nogc const
+ {
+ auto result = (perf_event_attr_bitmanip & 18446744073172680704UL) >> 29U;
+ return cast(ulong) result;
+ }
+ ///
+ @property void __reserved_1(ulong v) @safe pure nothrow @nogc
+ {
+ assert(v >= __reserved_1_min,
+ "Value is smaller than the minimum value of bitfield '__reserved_1'");
+ assert(v <= __reserved_1_max,
+ "Value is greater than the maximum value of bitfield '__reserved_1'");
+ perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))(
+ (perf_event_attr_bitmanip & (-1 - cast(
+ typeof(perf_event_attr_bitmanip)) 18446744073172680704UL)) | (
+ (cast(typeof(perf_event_attr_bitmanip)) v << 29U) & 18446744073172680704UL));
+ }
+
+ enum ulong __reserved_1_min = cast(ulong) 0U;
+ enum ulong __reserved_1_max = cast(ulong) 34359738367UL;
+ ///
+ union
+ {
+ uint wakeup_events; /** wakeup every n events */
+ uint wakeup_watermark; /** bytes before wakeup */
+ }
+ ///
+ uint bp_type;
+
+ union
+ {
+ ///
+ ulong bp_addr;
+ ulong config1; /** extension of config */
+ }
+
+ union
+ {
+ ///
+ ulong bp_len;
+ ulong config2; /** extension of config1 */
+ }
+
+ ulong branch_sample_type; /** enum perf_branch_sample_type */
+
+ /**
+ * Defines set of user regs to dump on samples.
+ * See asm/perf_regs.h for details.
+ */
+ ulong sample_regs_user;
+
+ /**
+ * Defines size of the user stack to dump on samples.
+ */
+ uint sample_stack_user;
+ ///
+ int clockid;
+
+ /**
+ * Defines set of regs to dump for each sample
+ * state captured on:
+ * - precise = 0: PMU interrupt
+ * - precise > 0: sampled instruction
+ *
+ * See asm/perf_regs.h for details.
+ */
+ ulong sample_regs_intr;
+
+ /**
+ * Wakeup watermark for AUX area
+ */
+ uint aux_watermark;
+ ///
+ ushort sample_max_stack;
+ /** align to __u64 */
+ ushort __reserved_2;
+}
+///
+extern (D) auto perf_flags(T)(auto ref T attr)
+{
+ return *(&attr.read_format + 1);
+}
+
+/**
+ * Ioctls that can be done on a perf event fd:
+ */
+enum PERF_EVENT_IOC_ENABLE = _IO('$', 0);
+///
+enum PERF_EVENT_IOC_DISABLE = _IO('$', 1);
+///
+enum PERF_EVENT_IOC_REFRESH = _IO('$', 2);
+///
+enum PERF_EVENT_IOC_RESET = _IO('$', 3);
+///
+enum PERF_EVENT_IOC_PERIOD = _IOW!ulong('$', 4);
+///
+enum PERF_EVENT_IOC_SET_OUTPUT = _IO('$', 5);
+///
+enum PERF_EVENT_IOC_SET_FILTER = _IOW!(char*)('$', 6);
+///
+enum PERF_EVENT_IOC_ID = _IOR!(ulong*)('$', 7);
+///
+enum PERF_EVENT_IOC_SET_BPF = _IOW!uint('$', 8);
+///
+enum PERF_EVENT_IOC_PAUSE_OUTPUT = _IOW!uint('$', 9);
+
+///
+enum perf_event_ioc_flags
+{
+ PERF_IOC_FLAG_GROUP = 1U << 0
+}
+
+/**
+ * Structure of the page that can be mapped via mmap
+ */
+struct perf_event_mmap_page
+{
+ uint version_; /** version number of this structure */
+ uint compat_version; /** lowest version this is compat with */
+
+ /**
+ * Bits needed to read the hw events in user-space.
+ * ---
+ * u32 seq, time_mult, time_shift, index, width;
+ * u64 count, enabled, running;
+ * u64 cyc, time_offset;
+ * s64 pmc = 0;
+ *
+ * do {
+ * seq = pc->lock;
+ * barrier()
+ *
+ * enabled = pc->time_enabled;
+ * running = pc->time_running;
+ *
+ * if (pc->cap_usr_time && enabled != running) {
+ * cyc = rdtsc();
+ * time_offset = pc->time_offset;
+ * time_mult = pc->time_mult;
+ * time_shift = pc->time_shift;
+ * }
+ *
+ * index = pc->index;
+ * count = pc->offset;
+ * if (pc->cap_user_rdpmc && index) {
+ * width = pc->pmc_width;
+ * pmc = rdpmc(index - 1);
+ * }
+ *
+ * barrier();
+ * } while (pc->lock != seq);
+ * ---
+ * NOTE: for obvious reason this only works on self-monitoring
+ * processes.
+ */
+ uint lock; /** seqlock for synchronization */
+ uint index; /** hardware event identifier */
+ long offset; /** add to hardware event value */
+ ulong time_enabled; /** time event active */
+ ulong time_running; /** time event on cpu */
+ ///
+ union
+ {
+ ///
+ ulong capabilities;
+
+ struct
+ {
+ /* mixin(bitfields!(ulong, "cap_bit0", 1, ulong, "cap_bit0_is_deprecated", 1, ulong,
+ "cap_user_rdpmc", 1, ulong, "cap_user_time", 1, ulong,
+ "cap_user_time_zero", 1, ulong, "cap_____res", 59)); */
+
+ private ulong mmap_page_bitmanip;
+ ///
+ @property ulong cap_bit0() @safe pure nothrow @nogc const
+ {
+ auto result = (mmap_page_bitmanip & 1U) >> 0U;
+ return cast(ulong) result;
+ }
+ ///
+ @property void cap_bit0(ulong v) @safe pure nothrow @nogc
+ {
+ assert(v >= cap_bit0_min,
+ "Value is smaller than the minimum value of bitfield 'cap_bit0'");
+ assert(v <= cap_bit0_max,
+ "Value is greater than the maximum value of bitfield 'cap_bit0'");
+ mmap_page_bitmanip = cast(typeof(mmap_page_bitmanip))(
+ (mmap_page_bitmanip & (-1 - cast(typeof(mmap_page_bitmanip)) 1U)) | (
+ (cast(typeof(mmap_page_bitmanip)) v << 0U) & 1U));
+ }
+
+ enum ulong cap_bit0_min = cast(ulong) 0U;
+ enum ulong cap_bit0_max = cast(ulong) 1U;
+ ///
+ @property ulong cap_bit0_is_deprecated() @safe pure nothrow @nogc const
+ {
+ auto result = (mmap_page_bitmanip & 2U) >> 1U;
+ return cast(ulong) result;
+ }
+ ///
+ @property void cap_bit0_is_deprecated(ulong v) @safe pure nothrow @nogc
+ {
+ assert(v >= cap_bit0_is_deprecated_min,
+ "Value is smaller than the minimum value of bitfield 'cap_bit0_is_deprecated'");
+ assert(v <= cap_bit0_is_deprecated_max,
+ "Value is greater than the maximum value of bitfield 'cap_bit0_is_deprecated'");
+ mmap_page_bitmanip = cast(typeof(mmap_page_bitmanip))(
+ (mmap_page_bitmanip & (-1 - cast(typeof(mmap_page_bitmanip)) 2U)) | (
+ (cast(typeof(mmap_page_bitmanip)) v << 1U) & 2U));
+ }
+
+ enum ulong cap_bit0_is_deprecated_min = cast(ulong) 0U;
+ enum ulong cap_bit0_is_deprecated_max = cast(ulong) 1U;
+ ///
+ @property ulong cap_user_rdpmc() @safe pure nothrow @nogc const
+ {
+ auto result = (mmap_page_bitmanip & 4U) >> 2U;
+ return cast(ulong) result;
+ }
+ ///
+ @property void cap_user_rdpmc(ulong v) @safe pure nothrow @nogc
+ {
+ assert(v >= cap_user_rdpmc_min,
+ "Value is smaller than the minimum value of bitfield 'cap_user_rdpmc'");
+ assert(v <= cap_user_rdpmc_max,
+ "Value is greater than the maximum value of bitfield 'cap_user_rdpmc'");
+ mmap_page_bitmanip = cast(typeof(mmap_page_bitmanip))(
+ (mmap_page_bitmanip & (-1 - cast(typeof(mmap_page_bitmanip)) 4U)) | (
+ (cast(typeof(mmap_page_bitmanip)) v << 2U) & 4U));
+ }
+
+ enum ulong cap_user_rdpmc_min = cast(ulong) 0U;
+ enum ulong cap_user_rdpmc_max = cast(ulong) 1U;
+ ///
+ @property ulong cap_user_time() @safe pure nothrow @nogc const
+ {
+ auto result = (mmap_page_bitmanip & 8U) >> 3U;
+ return cast(ulong) result;
+ }
+ ///
+ @property void cap_user_time(ulong v) @safe pure nothrow @nogc
+ {
+ assert(v >= cap_user_time_min,
+ "Value is smaller than the minimum value of bitfield 'cap_user_time'");
+ assert(v <= cap_user_time_max,
+ "Value is greater than the maximum value of bitfield 'cap_user_time'");
+ mmap_page_bitmanip = cast(typeof(mmap_page_bitmanip))(
+ (mmap_page_bitmanip & (-1 - cast(typeof(mmap_page_bitmanip)) 8U)) | (
+ (cast(typeof(mmap_page_bitmanip)) v << 3U) & 8U));
+ }
+
+ enum ulong cap_user_time_min = cast(ulong) 0U;
+ enum ulong cap_user_time_max = cast(ulong) 1U;
+ ///
+ @property ulong cap_user_time_zero() @safe pure nothrow @nogc const
+ {
+ auto result = (mmap_page_bitmanip & 16U) >> 4U;
+ return cast(ulong) result;
+ }
+ ///
+ @property void cap_user_time_zero(ulong v) @safe pure nothrow @nogc
+ {
+ assert(v >= cap_user_time_zero_min,
+ "Value is smaller than the minimum value of bitfield 'cap_user_time_zero'");
+ assert(v <= cap_user_time_zero_max,
+ "Value is greater than the maximum value of bitfield 'cap_user_time_zero'");
+ mmap_page_bitmanip = cast(typeof(mmap_page_bitmanip))(
+ (mmap_page_bitmanip & (-1 - cast(typeof(mmap_page_bitmanip)) 16U)) | (
+ (cast(typeof(mmap_page_bitmanip)) v << 4U) & 16U));
+ }
+
+ enum ulong cap_user_time_zero_min = cast(ulong) 0U;
+ enum ulong cap_user_time_zero_max = cast(ulong) 1U;
+ ///
+ @property ulong cap_____res() @safe pure nothrow @nogc const
+ {
+ auto result = (mmap_page_bitmanip & 18446744073709551584UL) >> 5U;
+ return cast(ulong) result;
+ }
+ ///
+ @property void cap_____res(ulong v) @safe pure nothrow @nogc
+ {
+ assert(v >= cap_____res_min,
+ "Value is smaller than the minimum value of bitfield 'cap_____res'");
+ assert(v <= cap_____res_max,
+ "Value is greater than the maximum value of bitfield 'cap_____res'");
+ mmap_page_bitmanip = cast(typeof(mmap_page_bitmanip))((mmap_page_bitmanip & (
+ -1 - cast(typeof(mmap_page_bitmanip)) 18446744073709551584UL)) | (
+ (cast(typeof(mmap_page_bitmanip)) v << 5U) & 18446744073709551584UL));
+ }
+
+ enum ulong cap_____res_min = cast(ulong) 0U;
+ enum ulong cap_____res_max = cast(ulong) 576460752303423487UL;
+ }
+ }
+
+ /**
+ * If cap_user_rdpmc this field provides the bit-width of the value
+ * read using the rdpmc() or equivalent instruction. This can be used
+ * to sign extend the result like:
+ *
+ * pmc <<= 64 - width;
+ * pmc >>= 64 - width; // signed shift right
+ * count += pmc;
+ */
+ ushort pmc_width;
+
+ /**
+ * If cap_usr_time the below fields can be used to compute the time
+ * delta since time_enabled (in ns) using rdtsc or similar.
+ *
+ * u64 quot, rem;
+ * u64 delta;
+ *
+ * quot = (cyc >> time_shift);
+ * rem = cyc & (((u64)1 << time_shift) - 1);
+ * delta = time_offset + quot * time_mult +
+ * ((rem * time_mult) >> time_shift);
+ *
+ * Where time_offset,time_mult,time_shift and cyc are read in the
+ * seqcount loop described above. This delta can then be added to
+ * enabled and possible running (if index), improving the scaling:
+ *
+ * enabled += delta;
+ * if (index)
+ * running += delta;
+ *
+ * quot = count / running;
+ * rem = count % running;
+ * count = quot * enabled + (rem * enabled) / running;
+ */
+ ushort time_shift;
+ ///
+ uint time_mult;
+ ///
+ ulong time_offset;
+ /**
+ * If cap_usr_time_zero, the hardware clock (e.g. TSC) can be calculated
+ * from sample timestamps.
+ *
+ * time = timestamp - time_zero;
+ * quot = time / time_mult;
+ * rem = time % time_mult;
+ * cyc = (quot << time_shift) + (rem << time_shift) / time_mult;
+ *
+ * And vice versa:
+ *
+ * quot = cyc >> time_shift;
+ * rem = cyc & (((u64)1 << time_shift) - 1);
+ * timestamp = time_zero + quot * time_mult +
+ * ((rem * time_mult) >> time_shift);
+ */
+ ulong time_zero;
+ uint size; /** Header size up to __reserved[] fields. */
+
+ /**
+ * Hole for extension of the self monitor capabilities
+ */
+
+ ubyte[948] __reserved; /** align to 1k. */
+
+ /**
+ * Control data for the mmap() data buffer.
+ *
+ * User-space reading the @data_head value should issue an smp_rmb(),
+ * after reading this value.
+ *
+ * When the mapping is PROT_WRITE the @data_tail value should be
+ * written by userspace to reflect the last read data, after issueing
+ * an smp_mb() to separate the data read from the ->data_tail store.
+ * In this case the kernel will not over-write unread data.
+ *
+ * See perf_output_put_handle() for the data ordering.
+ *
+ * data_{offset,size} indicate the location and size of the perf record
+ * buffer within the mmapped area.
+ */
+ ulong data_head; /** head in the data section */
+ ulong data_tail; /** user-space written tail */
+ ulong data_offset; /** where the buffer starts */
+ ulong data_size; /** data buffer size */
+
+ /**
+ * AUX area is defined by aux_{offset,size} fields that should be set
+ * by the userspace, so that
+ * ---
+ * aux_offset >= data_offset + data_size
+ * ---
+ * prior to mmap()ing it. Size of the mmap()ed area should be aux_size.
+ *
+ * Ring buffer pointers aux_{head,tail} have the same semantics as
+ * data_{head,tail} and same ordering rules apply.
+ */
+ ulong aux_head;
+ ///
+ ulong aux_tail;
+ ///
+ ulong aux_offset;
+ ///
+ ulong aux_size;
+}
+///
+enum PERF_RECORD_MISC_CPUMODE_MASK = 7 << 0;
+///
+enum PERF_RECORD_MISC_CPUMODE_UNKNOWN = 0 << 0;
+///
+enum PERF_RECORD_MISC_KERNEL = 1 << 0;
+///
+enum PERF_RECORD_MISC_USER = 2 << 0;
+///
+enum PERF_RECORD_MISC_HYPERVISOR = 3 << 0;
+///
+enum PERF_RECORD_MISC_GUEST_KERNEL = 4 << 0;
+///
+enum PERF_RECORD_MISC_GUEST_USER = 5 << 0;
+
+/**
+ * Indicates that /proc/PID/maps parsing are truncated by time out.
+ */
+enum PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT = 1 << 12;
+/**
+ * PERF_RECORD_MISC_MMAP_DATA and PERF_RECORD_MISC_COMM_EXEC are used on
+ * different events so can reuse the same bit position.
+ * Ditto PERF_RECORD_MISC_SWITCH_OUT.
+ */
+enum PERF_RECORD_MISC_MMAP_DATA = 1 << 13;
+///
+enum PERF_RECORD_MISC_COMM_EXEC = 1 << 13;
+///
+enum PERF_RECORD_MISC_SWITCH_OUT = 1 << 13;
+/**
+ * Indicates that the content of PERF_SAMPLE_IP points to
+ * the actual instruction that triggered the event. See also
+ * perf_event_attr::precise_ip.
+ */
+enum PERF_RECORD_MISC_EXACT_IP = 1 << 14;
+/**
+ * Reserve the last bit to indicate some extended misc field
+ */
+enum PERF_RECORD_MISC_EXT_RESERVED = 1 << 15;
+///
+struct perf_event_header
+{
+ ///
+ uint type;
+ ///
+ ushort misc;
+ ///
+ ushort size;
+}
+///
+struct perf_ns_link_info
+{
+ ///
+ ulong dev;
+ ///
+ ulong ino;
+}
+
+enum
+{
+ ///
+ NET_NS_INDEX = 0,
+ ///
+ UTS_NS_INDEX = 1,
+ ///
+ IPC_NS_INDEX = 2,
+ ///
+ PID_NS_INDEX = 3,
+ ///
+ USER_NS_INDEX = 4,
+ ///
+ MNT_NS_INDEX = 5,
+ ///
+ CGROUP_NS_INDEX = 6,
+ NR_NAMESPACES = 7 /** number of available namespaces */
+}
+///
+enum perf_event_type
+{
+ /**
+ * If perf_event_attr.sample_id_all is set then all event types will
+ * have the sample_type selected fields related to where/when
+ * (identity) an event took place (TID, TIME, ID, STREAM_ID, CPU,
+ * IDENTIFIER) described in PERF_RECORD_SAMPLE below, it will be stashed
+ * just after the perf_event_header and the fields already present for
+ * the existing fields, i.e. at the end of the payload. That way a newer
+ * perf.data file will be supported by older perf tools, with these new
+ * optional fields being ignored.
+ * ---
+ * struct sample_id {
+ * { u32 pid, tid; } && PERF_SAMPLE_TID
+ * { u64 time; } && PERF_SAMPLE_TIME
+ * { u64 id; } && PERF_SAMPLE_ID
+ * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID
+ * { u32 cpu, res; } && PERF_SAMPLE_CPU
+ * { u64 id; } && PERF_SAMPLE_IDENTIFIER
+ * } && perf_event_attr::sample_id_all
+ * ---
+ * Note that PERF_SAMPLE_IDENTIFIER duplicates PERF_SAMPLE_ID. The
+ * advantage of PERF_SAMPLE_IDENTIFIER is that its position is fixed
+ * relative to header.size.
+ */
+
+ /*
+ * The MMAP events record the PROT_EXEC mappings so that we can
+ * correlate userspace IPs to code. They have the following structure:
+ * ---
+ * struct {
+ * struct perf_event_header header;
+ *
+ * u32 pid, tid;
+ * u64 addr;
+ * u64 len;
+ * u64 pgoff;
+ * char filename[];
+ * struct sample_id sample_id;
+ * };
+ * ---
+ */
+ PERF_RECORD_MMAP = 1,
+
+ /**
+ * ---
+ * struct {
+ * struct perf_event_header header;
+ * u64 id;
+ * u64 lost;
+ * struct sample_id sample_id;
+ * };
+ * ---
+ */
+ PERF_RECORD_LOST = 2,
+
+ /**
+ * ---
+ * struct {
+ * struct perf_event_header header;
+ *
+ * u32 pid, tid;
+ * char comm[];
+ * struct sample_id sample_id;
+ * };
+ * ---
+ */
+ PERF_RECORD_COMM = 3,
+
+ /**
+ * ---
+ * struct {
+ * struct perf_event_header header;
+ * u32 pid, ppid;
+ * u32 tid, ptid;
+ * u64 time;
+ * struct sample_id sample_id;
+ * };
+ * ---
+ */
+ PERF_RECORD_EXIT = 4,
+
+ /**
+ * ---
+ * struct {
+ * struct perf_event_header header;
+ * u64 time;
+ * u64 id;
+ * u64 stream_id;
+ * struct sample_id sample_id;
+ * };
+ * ---
+ */
+ PERF_RECORD_THROTTLE = 5,
+ PERF_RECORD_UNTHROTTLE = 6,
+ /**
+ * ---
+ * struct {
+ * struct perf_event_header header;
+ * u32 pid, ppid;
+ * u32 tid, ptid;
+ * u64 time;
+ * struct sample_id sample_id;
+ * };
+ * ---
+ */
+ PERF_RECORD_FORK = 7,
+ /**
+ * ---
+ * struct {
+ * struct perf_event_header header;
+ * u32 pid, tid;
+ *
+ * struct read_format values;
+ * struct sample_id sample_id;
+ * };
+ * ---
+ */
+ PERF_RECORD_READ = 8,
+ /**
+ * ---
+ * struct {
+ * struct perf_event_header header;
+ *
+ * #
+ * # Note that PERF_SAMPLE_IDENTIFIER duplicates PERF_SAMPLE_ID.
+ * # The advantage of PERF_SAMPLE_IDENTIFIER is that its position
+ * # is fixed relative to header.
+ * #
+ *
+ * { u64 id; } && PERF_SAMPLE_IDENTIFIER
+ * { u64 ip; } && PERF_SAMPLE_IP
+ * { u32 pid, tid; } && PERF_SAMPLE_TID
+ * { u64 time; } && PERF_SAMPLE_TIME
+ * { u64 addr; } && PERF_SAMPLE_ADDR
+ * { u64 id; } && PERF_SAMPLE_ID
+ * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID
+ * { u32 cpu, res; } && PERF_SAMPLE_CPU
+ * { u64 period; } && PERF_SAMPLE_PERIOD
+ *
+ * { struct read_format values; } && PERF_SAMPLE_READ
+ *
+ * { u64 nr,
+ * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN
+ *
+ * #
+ * # The RAW record below is opaque data wrt the ABI
+ * #
+ * # That is, the ABI doesn't make any promises wrt to
+ * # the stability of its content, it may vary depending
+ * # on event, hardware, kernel version and phase of
+ * # the moon.
+ * #
+ * # In other words, PERF_SAMPLE_RAW contents are not an ABI.
+ * #
+ *
+ * { u32 size;
+ * char data[size];}&& PERF_SAMPLE_RAW
+ *
+ * { u64 nr;
+ * { u64 from, to, flags } lbr[nr];} && PERF_SAMPLE_BRANCH_STACK
+ *
+ * { u64 abi; # enum perf_sample_regs_abi
+ * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_USER
+ *
+ * { u64 size;
+ * char data[size];
+ * u64 dyn_size; } && PERF_SAMPLE_STACK_USER
+ *
+ * { u64 weight; } && PERF_SAMPLE_WEIGHT
+ * { u64 data_src; } && PERF_SAMPLE_DATA_SRC
+ * { u64 transaction; } && PERF_SAMPLE_TRANSACTION
+ * { u64 abi; # enum perf_sample_regs_abi
+ * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_INTR
+ * { u64 phys_addr;} && PERF_SAMPLE_PHYS_ADDR
+ * };
+ * ---
+ */
+ PERF_RECORD_SAMPLE = 9,
+
+ /**
+ * ---
+ * The MMAP2 records are an augmented version of MMAP, they add
+ * maj, min, ino numbers to be used to uniquely identify each mapping
+ *
+ * struct {
+ * struct perf_event_header header;
+ *
+ * u32 pid, tid;
+ * u64 addr;
+ * u64 len;
+ * u64 pgoff;
+ * u32 maj;
+ * u32 min;
+ * u64 ino;
+ * u64 ino_generation;
+ * u32 prot, flags;
+ * char filename[];
+ * struct sample_id sample_id;
+ * };
+ * ---
+ */
+ PERF_RECORD_MMAP2 = 10,
+
+ /**
+ * Records that new data landed in the AUX buffer part.
+ * ---
+ * struct {
+ * struct perf_event_header header;
+ *
+ * u64 aux_offset;
+ * u64 aux_size;
+ * u64 flags;
+ * struct sample_id sample_id;
+ * };
+ * ---
+ */
+ PERF_RECORD_AUX = 11,
+
+ /**
+ * ---
+ * Indicates that instruction trace has started
+ *
+ * struct {
+ * struct perf_event_header header;
+ * u32 pid;
+ * u32 tid;
+ * };
+ * ---
+ */
+ PERF_RECORD_ITRACE_START = 12,
+
+ /**
+ * Records the dropped/lost sample number.
+ * ---
+ * struct {
+ * struct perf_event_header header;
+ *
+ * u64 lost;
+ * struct sample_id sample_id;
+ * };
+ * ---
+ */
+ PERF_RECORD_LOST_SAMPLES = 13,
+
+ /**
+ *
+ * Records a context switch in or out (flagged by
+ * PERF_RECORD_MISC_SWITCH_OUT). See also
+ * PERF_RECORD_SWITCH_CPU_WIDE.
+ * ---
+ * struct {
+ * struct perf_event_header header;
+ * struct sample_id sample_id;
+ * };
+ * ---
+ */
+ PERF_RECORD_SWITCH = 14,
+
+ /**
+ * CPU-wide version of PERF_RECORD_SWITCH with next_prev_pid and
+ * next_prev_tid that are the next (switching out) or previous
+ * (switching in) pid/tid.
+ * ---
+ * struct {
+ * struct perf_event_header header;
+ * u32 next_prev_pid;
+ * u32 next_prev_tid;
+ * struct sample_id sample_id;
+ * };
+ * ---
+ */
+ PERF_RECORD_SWITCH_CPU_WIDE = 15,
+
+ /**
+ * ---
+ * struct {
+ * struct perf_event_header header;
+ * u32 pid;
+ * u32 tid;
+ * u64 nr_namespaces;
+ * { u64 dev, inode; } [nr_namespaces];
+ * struct sample_id sample_id;
+ * };
+ * ---
+ */
+ PERF_RECORD_NAMESPACES = 16,
+
+ PERF_RECORD_MAX = 17 /* non-ABI */
+}
+///
+enum PERF_MAX_STACK_DEPTH = 127;
+///
+enum PERF_MAX_CONTEXTS_PER_STACK = 8;
+///
+enum perf_callchain_context
+{
+ ///
+ PERF_CONTEXT_HV = cast(ulong)-32,
+ ///
+ PERF_CONTEXT_KERNEL = cast(ulong)-128,
+ ///
+ PERF_CONTEXT_USER = cast(ulong)-512,
+ ///
+ PERF_CONTEXT_GUEST = cast(ulong)-2048,
+ ///
+ PERF_CONTEXT_GUEST_KERNEL = cast(ulong)-2176,
+ ///
+ PERF_CONTEXT_GUEST_USER = cast(ulong)-2560,
+ ///
+ PERF_CONTEXT_MAX = cast(ulong)-4095
+}
+
+/**
+ * PERF_RECORD_AUX::flags bits
+ */
+enum PERF_AUX_FLAG_TRUNCATED = 0x01; /** record was truncated to fit */
+enum PERF_AUX_FLAG_OVERWRITE = 0x02; /** snapshot from overwrite mode */
+enum PERF_AUX_FLAG_PARTIAL = 0x04; /** record contains gaps */
+enum PERF_AUX_FLAG_COLLISION = 0x08; /** sample collided with another */
+///
+enum PERF_FLAG_FD_NO_GROUP = 1UL << 0;
+///
+enum PERF_FLAG_FD_OUTPUT = 1UL << 1;
+enum PERF_FLAG_PID_CGROUP = 1UL << 2; /** pid=cgroup id, per-cpu mode only */
+enum PERF_FLAG_FD_CLOEXEC = 1UL << 3; /** O_CLOEXEC */
+///perm_mem_data_src is endian specific.
+version (LittleEndian)
+{
+ ///
+ union perf_mem_data_src
+ {
+ ///
+ ulong val;
+
+ struct
+ {
+ /* mixin(bitfields!(ulong, "mem_op", 5, ulong, "mem_lvl", 14, ulong,
+ "mem_snoop", 5, ulong, "mem_lock", 2, ulong, "mem_dtlb", 7, ulong,
+ "mem_lvl_num", 4, ulong, "mem_remote", 1, ulong,
+ "mem_snoopx", 2, ulong, "mem_rsvd", 24)); */
+
+ private ulong perf_mem_data_src_bitmanip;
+ ///
+ @property ulong mem_op() @safe pure nothrow @nogc const
+ {
+ auto result = (perf_mem_data_src_bitmanip & 31U) >> 0U;
+ return cast(ulong) result;
+ }
+ ///
+ @property void mem_op(ulong v) @safe pure nothrow @nogc
+ {
+ assert(v >= mem_op_min,
+ "Value is smaller than the minimum value of bitfield 'mem_op'");
+ assert(v <= mem_op_max,
+ "Value is greater than the maximum value of bitfield 'mem_op'");
+ perf_mem_data_src_bitmanip = cast(
+ typeof(perf_mem_data_src_bitmanip))((perf_mem_data_src_bitmanip & (
+ -1 - cast(typeof(perf_mem_data_src_bitmanip)) 31U)) | (
+ (cast(typeof(perf_mem_data_src_bitmanip)) v << 0U) & 31U));
+ }
+
+ enum ulong mem_op_min = cast(ulong) 0U;
+ enum ulong mem_op_max = cast(ulong) 31U;
+ ///
+ @property ulong mem_lvl() @safe pure nothrow @nogc const
+ {
+ auto result = (perf_mem_data_src_bitmanip & 524256U) >> 5U;
+ return cast(ulong) result;
+ }
+ ///
+ @property void mem_lvl(ulong v) @safe pure nothrow @nogc
+ {
+ assert(v >= mem_lvl_min,
+ "Value is smaller than the minimum value of bitfield 'mem_lvl'");
+ assert(v <= mem_lvl_max,
+ "Value is greater than the maximum value of bitfield 'mem_lvl'");
+ perf_mem_data_src_bitmanip = cast(
+ typeof(perf_mem_data_src_bitmanip))((perf_mem_data_src_bitmanip & (
+ -1 - cast(typeof(perf_mem_data_src_bitmanip)) 524256U)) | (
+ (cast(typeof(perf_mem_data_src_bitmanip)) v << 5U) & 524256U));
+ }
+
+ enum ulong mem_lvl_min = cast(ulong) 0U;
+ enum ulong mem_lvl_max = cast(ulong) 16383U;
+ ///
+ @property ulong mem_snoop() @safe pure nothrow @nogc const
+ {
+ auto result = (perf_mem_data_src_bitmanip & 16252928U) >> 19U;
+ return cast(ulong) result;
+ }
+ ///
+ @property void mem_snoop(ulong v) @safe pure nothrow @nogc
+ {
+ assert(v >= mem_snoop_min,
+ "Value is smaller than the minimum value of bitfield 'mem_snoop'");
+ assert(v <= mem_snoop_max,
+ "Value is greater than the maximum value of bitfield 'mem_snoop'");
+ perf_mem_data_src_bitmanip = cast(
+ typeof(perf_mem_data_src_bitmanip))((perf_mem_data_src_bitmanip & (
+ -1 - cast(typeof(perf_mem_data_src_bitmanip)) 16252928U)) | (
+ (cast(typeof(perf_mem_data_src_bitmanip)) v << 19U) & 16252928U));
+ }
+
+ enum ulong mem_snoop_min = cast(ulong) 0U;
+ enum ulong mem_snoop_max = cast(ulong) 31U;
+ ///
+ @property ulong mem_lock() @safe pure nothrow @nogc const
+ {
+ auto result = (perf_mem_data_src_bitmanip & 50331648U) >> 24U;
+ return cast(ulong) result;
+ }
+ ///
+ @property void mem_lock(ulong v) @safe pure nothrow @nogc
+ {
+ assert(v >= mem_lock_min,
+ "Value is smaller than the minimum value of bitfield 'mem_lock'");
+ assert(v <= mem_lock_max,
+ "Value is greater than the maximum value of bitfield 'mem_lock'");
+ perf_mem_data_src_bitmanip = cast(
+ typeof(perf_mem_data_src_bitmanip))((perf_mem_data_src_bitmanip & (
+ -1 - cast(typeof(perf_mem_data_src_bitmanip)) 50331648U)) | (
+ (cast(typeof(perf_mem_data_src_bitmanip)) v << 24U) & 50331648U));
+ }
+
+ enum ulong mem_lock_min = cast(ulong) 0U;
+ enum ulong mem_lock_max = cast(ulong) 3U;
+ ///
+ @property ulong mem_dtlb() @safe pure nothrow @nogc const
+ {
+ auto result = (perf_mem_data_src_bitmanip & 8522825728UL) >> 26U;
+ return cast(ulong) result;
+ }
+ ///
+ @property void mem_dtlb(ulong v) @safe pure nothrow @nogc
+ {
+ assert(v >= mem_dtlb_min,
+ "Value is smaller than the minimum value of bitfield 'mem_dtlb'");
+ assert(v <= mem_dtlb_max,
+ "Value is greater than the maximum value of bitfield 'mem_dtlb'");
+ perf_mem_data_src_bitmanip = cast(
+ typeof(perf_mem_data_src_bitmanip))((perf_mem_data_src_bitmanip & (
+ -1 - cast(typeof(perf_mem_data_src_bitmanip)) 8522825728UL)) | (
+ (cast(typeof(perf_mem_data_src_bitmanip)) v << 26U) & 8522825728UL));
+ }
+
+ enum ulong mem_dtlb_min = cast(ulong) 0U;
+ enum ulong mem_dtlb_max = cast(ulong) 127U;
+ ///
+ @property ulong mem_lvl_num() @safe pure nothrow @nogc const
+ {
+ auto result = (perf_mem_data_src_bitmanip & 128849018880UL) >> 33U;
+ return cast(ulong) result;
+ }
+ ///
+ @property void mem_lvl_num(ulong v) @safe pure nothrow @nogc
+ {
+ assert(v >= mem_lvl_num_min,
+ "Value is smaller than the minimum value of bitfield 'mem_lvl_num'");
+ assert(v <= mem_lvl_num_max,
+ "Value is greater than the maximum value of bitfield 'mem_lvl_num'");
+ perf_mem_data_src_bitmanip = cast(
+ typeof(perf_mem_data_src_bitmanip))((perf_mem_data_src_bitmanip & (
+ -1 - cast(typeof(perf_mem_data_src_bitmanip)) 128849018880UL)) | (
+ (cast(typeof(perf_mem_data_src_bitmanip)) v << 33U) & 128849018880UL));
+ }
+
+ enum ulong mem_lvl_num_min = cast(ulong) 0U;
+ enum ulong mem_lvl_num_max = cast(ulong) 15U;
+ ///
+ @property ulong mem_remote() @safe pure nothrow @nogc const
+ {
+ auto result = (perf_mem_data_src_bitmanip & 137438953472UL) >> 37U;
+ return cast(ulong) result;
+ }
+ ///
+ @property void mem_remote(ulong v) @safe pure nothrow @nogc
+ {
+ assert(v >= mem_remote_min,
+ "Value is smaller than the minimum value of bitfield 'mem_remote'");
+ assert(v <= mem_remote_max,
+ "Value is greater than the maximum value of bitfield 'mem_remote'");
+ perf_mem_data_src_bitmanip = cast(
+ typeof(perf_mem_data_src_bitmanip))((perf_mem_data_src_bitmanip & (
+ -1 - cast(typeof(perf_mem_data_src_bitmanip)) 137438953472UL)) | (
+ (cast(typeof(perf_mem_data_src_bitmanip)) v << 37U) & 137438953472UL));
+ }
+
+ enum ulong mem_remote_min = cast(ulong) 0U;
+ enum ulong mem_remote_max = cast(ulong) 1U;
+ ///
+ @property ulong mem_snoopx() @safe pure nothrow @nogc const
+ {
+ auto result = (perf_mem_data_src_bitmanip & 824633720832UL) >> 38U;
+ return cast(ulong) result;
+ }
+ ///
+ @property void mem_snoopx(ulong v) @safe pure nothrow @nogc
+ {
+ assert(v >= mem_snoopx_min,
+ "Value is smaller than the minimum value of bitfield 'mem_snoopx'");
+ assert(v <= mem_snoopx_max,
+ "Value is greater than the maximum value of bitfield 'mem_snoopx'");
+ perf_mem_data_src_bitmanip = cast(
+ typeof(perf_mem_data_src_bitmanip))((perf_mem_data_src_bitmanip & (
+ -1 - cast(typeof(perf_mem_data_src_bitmanip)) 824633720832UL)) | (
+ (cast(typeof(perf_mem_data_src_bitmanip)) v << 38U) & 824633720832UL));
+ }
+
+ enum ulong mem_snoopx_min = cast(ulong) 0U;
+ enum ulong mem_snoopx_max = cast(ulong) 3U;
+ ///
+ @property ulong mem_rsvd() @safe pure nothrow @nogc const
+ {
+ auto result = (perf_mem_data_src_bitmanip & 18446742974197923840UL) >> 40U;
+ return cast(ulong) result;
+ }
+ ///
+ @property void mem_rsvd(ulong v) @safe pure nothrow @nogc
+ {
+ assert(v >= mem_rsvd_min,
+ "Value is smaller than the minimum value of bitfield 'mem_rsvd'");
+ assert(v <= mem_rsvd_max,
+ "Value is greater than the maximum value of bitfield 'mem_rsvd'");
+ perf_mem_data_src_bitmanip = cast(
+ typeof(perf_mem_data_src_bitmanip))(
+ (perf_mem_data_src_bitmanip & (-1 - cast(
+ typeof(perf_mem_data_src_bitmanip)) 18446742974197923840UL)) | (
+ (cast(typeof(perf_mem_data_src_bitmanip)) v << 40U) & 18446742974197923840UL));
+ }
+
+ enum ulong mem_rsvd_min = cast(ulong) 0U;
+ enum ulong mem_rsvd_max = cast(ulong) 16777215U;
+
+ }
+ }
+}
+else
+{
+ ///
+ union perf_mem_data_src
+ {
+ ///
+ ulong val;
+
+ struct
+ {
+ import std.bitmanip : bitfields;
+
+ /* mixin(bitfields!(ulong, "mem_rsvd", 24, ulong, "mem_snoopx", 2, ulong,
+ "mem_remote", 1, ulong, "mem_lvl_num", 4, ulong, "mem_dtlb", 7, ulong,
+ "mem_lock", 2, ulong, "mem_snoop", 5, ulong, "mem_lvl",
+ 14, ulong, "mem_op", 5)); */
+ private ulong perf_mem_data_src;
+ ///
+ @property ulong mem_rsvd() @safe pure nothrow @nogc const
+ {
+ auto result = (perf_mem_data_src & 16777215U) >> 0U;
+ return cast(ulong) result;
+ }
+ ///
+ @property void mem_rsvd(ulong v) @safe pure nothrow @nogc
+ {
+ assert(v >= mem_rsvd_min,
+ "Value is smaller than the minimum value of bitfield 'mem_rsvd'");
+ assert(v <= mem_rsvd_max,
+ "Value is greater than the maximum value of bitfield 'mem_rsvd'");
+ perf_mem_data_src = cast(typeof(perf_mem_data_src))(
+ (perf_mem_data_src & (-1 - cast(typeof(perf_mem_data_src)) 16777215U)) | (
+ (cast(typeof(perf_mem_data_src)) v << 0U) & 16777215U));
+ }
+
+ enum ulong mem_rsvd_min = cast(ulong) 0U;
+ enum ulong mem_rsvd_max = cast(ulong) 16777215U;
+ ///
+ @property ulong mem_snoopx() @safe pure nothrow @nogc const
+ {
+ auto result = (perf_mem_data_src & 50331648U) >> 24U;
+ return cast(ulong) result;
+ }
+ ///
+ @property void mem_snoopx(ulong v) @safe pure nothrow @nogc
+ {
+ assert(v >= mem_snoopx_min,
+ "Value is smaller than the minimum value of bitfield 'mem_snoopx'");
+ assert(v <= mem_snoopx_max,
+ "Value is greater than the maximum value of bitfield 'mem_snoopx'");
+ perf_mem_data_src = cast(typeof(perf_mem_data_src))(
+ (perf_mem_data_src & (-1 - cast(typeof(perf_mem_data_src)) 50331648U)) | (
+ (cast(typeof(perf_mem_data_src)) v << 24U) & 50331648U));
+ }
+
+ enum ulong mem_snoopx_min = cast(ulong) 0U;
+ enum ulong mem_snoopx_max = cast(ulong) 3U;
+ ///
+ @property ulong mem_remote() @safe pure nothrow @nogc const
+ {
+ auto result = (perf_mem_data_src & 67108864U) >> 26U;
+ return cast(ulong) result;
+ }
+ ///
+ @property void mem_remote(ulong v) @safe pure nothrow @nogc
+ {
+ assert(v >= mem_remote_min,
+ "Value is smaller than the minimum value of bitfield 'mem_remote'");
+ assert(v <= mem_remote_max,
+ "Value is greater than the maximum value of bitfield 'mem_remote'");
+ perf_mem_data_src = cast(typeof(perf_mem_data_src))(
+ (perf_mem_data_src & (-1 - cast(typeof(perf_mem_data_src)) 67108864U)) | (
+ (cast(typeof(perf_mem_data_src)) v << 26U) & 67108864U));
+ }
+
+ enum ulong mem_remote_min = cast(ulong) 0U;
+ enum ulong mem_remote_max = cast(ulong) 1U;
+ ///
+ @property ulong mem_lvl_num() @safe pure nothrow @nogc const
+ {
+ auto result = (perf_mem_data_src & 2013265920U) >> 27U;
+ return cast(ulong) result;
+ }
+ ///
+ @property void mem_lvl_num(ulong v) @safe pure nothrow @nogc
+ {
+ assert(v >= mem_lvl_num_min,
+ "Value is smaller than the minimum value of bitfield 'mem_lvl_num'");
+ assert(v <= mem_lvl_num_max,
+ "Value is greater than the maximum value of bitfield 'mem_lvl_num'");
+ perf_mem_data_src = cast(typeof(perf_mem_data_src))(
+ (perf_mem_data_src & (-1 - cast(typeof(perf_mem_data_src)) 2013265920U)) | (
+ (cast(typeof(perf_mem_data_src)) v << 27U) & 2013265920U));
+ }
+
+ enum ulong mem_lvl_num_min = cast(ulong) 0U;
+ enum ulong mem_lvl_num_max = cast(ulong) 15U;
+ ///
+ @property ulong mem_dtlb() @safe pure nothrow @nogc const
+ {
+ auto result = (perf_mem_data_src & 272730423296UL) >> 31U;
+ return cast(ulong) result;
+ }
+ ///
+ @property void mem_dtlb(ulong v) @safe pure nothrow @nogc
+ {
+ assert(v >= mem_dtlb_min,
+ "Value is smaller than the minimum value of bitfield 'mem_dtlb'");
+ assert(v <= mem_dtlb_max,
+ "Value is greater than the maximum value of bitfield 'mem_dtlb'");
+ perf_mem_data_src = cast(typeof(perf_mem_data_src))(
+ (perf_mem_data_src & (-1 - cast(typeof(perf_mem_data_src)) 272730423296UL)) | (
+ (cast(typeof(perf_mem_data_src)) v << 31U) & 272730423296UL));
+ }
+
+ enum ulong mem_dtlb_min = cast(ulong) 0U;
+ enum ulong mem_dtlb_max = cast(ulong) 127U;
+ ///
+ @property ulong mem_lock() @safe pure nothrow @nogc const
+ {
+ auto result = (perf_mem_data_src & 824633720832UL) >> 38U;
+ return cast(ulong) result;
+ }
+ ///
+ @property void mem_lock(ulong v) @safe pure nothrow @nogc
+ {
+ assert(v >= mem_lock_min,
+ "Value is smaller than the minimum value of bitfield 'mem_lock'");
+ assert(v <= mem_lock_max,
+ "Value is greater than the maximum value of bitfield 'mem_lock'");
+ perf_mem_data_src = cast(typeof(perf_mem_data_src))(
+ (perf_mem_data_src & (-1 - cast(typeof(perf_mem_data_src)) 824633720832UL)) | (
+ (cast(typeof(perf_mem_data_src)) v << 38U) & 824633720832UL));
+ }
+
+ enum ulong mem_lock_min = cast(ulong) 0U;
+ enum ulong mem_lock_max = cast(ulong) 3U;
+ ///
+ @property ulong mem_snoop() @safe pure nothrow @nogc const
+ {
+ auto result = (perf_mem_data_src & 34084860461056UL) >> 40U;
+ return cast(ulong) result;
+ }
+ ///
+ @property void mem_snoop(ulong v) @safe pure nothrow @nogc
+ {
+ assert(v >= mem_snoop_min,
+ "Value is smaller than the minimum value of bitfield 'mem_snoop'");
+ assert(v <= mem_snoop_max,
+ "Value is greater than the maximum value of bitfield 'mem_snoop'");
+ perf_mem_data_src = cast(typeof(perf_mem_data_src))(
+ (perf_mem_data_src & (-1 - cast(typeof(perf_mem_data_src)) 34084860461056UL)) | (
+ (cast(typeof(perf_mem_data_src)) v << 40U) & 34084860461056UL));
+ }
+
+ enum ulong mem_snoop_min = cast(ulong) 0U;
+ enum ulong mem_snoop_max = cast(ulong) 31U;
+ ///
+ @property ulong mem_lvl() @safe pure nothrow @nogc const
+ {
+ auto result = (perf_mem_data_src & 576425567931334656UL) >> 45U;
+ return cast(ulong) result;
+ }
+ ///
+ @property void mem_lvl(ulong v) @safe pure nothrow @nogc
+ {
+ assert(v >= mem_lvl_min,
+ "Value is smaller than the minimum value of bitfield 'mem_lvl'");
+ assert(v <= mem_lvl_max,
+ "Value is greater than the maximum value of bitfield 'mem_lvl'");
+ perf_mem_data_src = cast(typeof(perf_mem_data_src))((perf_mem_data_src & (
+ -1 - cast(typeof(perf_mem_data_src)) 576425567931334656UL)) | (
+ (cast(typeof(perf_mem_data_src)) v << 45U) & 576425567931334656UL));
+ }
+
+ enum ulong mem_lvl_min = cast(ulong) 0U;
+ enum ulong mem_lvl_max = cast(ulong) 16383U;
+ ///
+ @property ulong mem_op() @safe pure nothrow @nogc const
+ {
+ auto result = (perf_mem_data_src & 17870283321406128128UL) >> 59U;
+ return cast(ulong) result;
+ }
+ ///
+ @property void mem_op(ulong v) @safe pure nothrow @nogc
+ {
+ assert(v >= mem_op_min,
+ "Value is smaller than the minimum value of bitfield 'mem_op'");
+ assert(v <= mem_op_max,
+ "Value is greater than the maximum value of bitfield 'mem_op'");
+ perf_mem_data_src = cast(typeof(perf_mem_data_src))((perf_mem_data_src & (
+ -1 - cast(typeof(perf_mem_data_src)) 17870283321406128128UL)) | (
+ (cast(typeof(perf_mem_data_src)) v << 59U) & 17870283321406128128UL));
+ }
+
+ enum ulong mem_op_min = cast(ulong) 0U;
+ enum ulong mem_op_max = cast(ulong) 31U;
+ }
+ }
+}
+
+/* snoop mode, ext */
+/* remote */
+/* memory hierarchy level number */
+/* tlb access */
+/* lock instr */
+/* snoop mode */
+/* memory hierarchy level */
+/* type of opcode */
+
+/** type of opcode (load/store/prefetch,code) */
+enum PERF_MEM_OP_NA = 0x01; /** not available */
+enum PERF_MEM_OP_LOAD = 0x02; /** load instruction */
+enum PERF_MEM_OP_STORE = 0x04; /** store instruction */
+enum PERF_MEM_OP_PFETCH = 0x08; /** prefetch */
+enum PERF_MEM_OP_EXEC = 0x10; /** code (execution) */
+enum PERF_MEM_OP_SHIFT = 0;
+
+/* memory hierarchy (memory level, hit or miss) */
+enum PERF_MEM_LVL_NA = 0x01; /** not available */
+enum PERF_MEM_LVL_HIT = 0x02; /** hit level */
+enum PERF_MEM_LVL_MISS = 0x04; /** miss level */
+enum PERF_MEM_LVL_L1 = 0x08; /** L1 */
+enum PERF_MEM_LVL_LFB = 0x10; /** Line Fill Buffer */
+enum PERF_MEM_LVL_L2 = 0x20; /** L2 */
+enum PERF_MEM_LVL_L3 = 0x40; /** L3 */
+enum PERF_MEM_LVL_LOC_RAM = 0x80; /** Local DRAM */
+enum PERF_MEM_LVL_REM_RAM1 = 0x100; /** Remote DRAM (1 hop) */
+enum PERF_MEM_LVL_REM_RAM2 = 0x200; /** Remote DRAM (2 hops) */
+enum PERF_MEM_LVL_REM_CCE1 = 0x400; /** Remote Cache (1 hop) */
+enum PERF_MEM_LVL_REM_CCE2 = 0x800; /** Remote Cache (2 hops) */
+enum PERF_MEM_LVL_IO = 0x1000; /** I/O memory */
+enum PERF_MEM_LVL_UNC = 0x2000; /** Uncached memory */
+///
+enum PERF_MEM_LVL_SHIFT = 5;
+
+enum PERF_MEM_REMOTE_REMOTE = 0x01; /** Remote */
+///
+enum PERF_MEM_REMOTE_SHIFT = 37;
+
+enum PERF_MEM_LVLNUM_L1 = 0x01; /** L1 */
+enum PERF_MEM_LVLNUM_L2 = 0x02; /** L2 */
+enum PERF_MEM_LVLNUM_L3 = 0x03; /** L3 */
+enum PERF_MEM_LVLNUM_L4 = 0x04; /** L4 */
+/* 5-0xa available */
+enum PERF_MEM_LVLNUM_ANY_CACHE = 0x0b; /** Any cache */
+enum PERF_MEM_LVLNUM_LFB = 0x0c; /** LFB */
+enum PERF_MEM_LVLNUM_RAM = 0x0d; /** RAM */
+enum PERF_MEM_LVLNUM_PMEM = 0x0e; /** PMEM */
+enum PERF_MEM_LVLNUM_NA = 0x0f; /** N/A */
+///
+enum PERF_MEM_LVLNUM_SHIFT = 33;
+
+/* snoop mode */
+enum PERF_MEM_SNOOP_NA = 0x01; /** not available */
+enum PERF_MEM_SNOOP_NONE = 0x02; /** no snoop */
+enum PERF_MEM_SNOOP_HIT = 0x04; /** snoop hit */
+enum PERF_MEM_SNOOP_MISS = 0x08; /** snoop miss */
+enum PERF_MEM_SNOOP_HITM = 0x10; /** snoop hit modified */
+///
+enum PERF_MEM_SNOOP_SHIFT = 19;
+
+enum PERF_MEM_SNOOPX_FWD = 0x01; /** forward */
+/** 1 free */
+enum PERF_MEM_SNOOPX_SHIFT = 37;
+
+/** locked instruction */
+enum PERF_MEM_LOCK_NA = 0x01; /** not available */
+enum PERF_MEM_LOCK_LOCKED = 0x02; /** locked transaction */
+///
+enum PERF_MEM_LOCK_SHIFT = 24;
+
+/* TLB access */
+enum PERF_MEM_TLB_NA = 0x01; /** not available */
+enum PERF_MEM_TLB_HIT = 0x02; /** hit level */
+enum PERF_MEM_TLB_MISS = 0x04; /** miss level */
+enum PERF_MEM_TLB_L1 = 0x08; /** L1 */
+enum PERF_MEM_TLB_L2 = 0x10; /** L2 */
+enum PERF_MEM_TLB_WK = 0x20; /** Hardware Walker*/
+enum PERF_MEM_TLB_OS = 0x40; /** OS fault handler */
+///
+enum PERF_MEM_TLB_SHIFT = 26;
+
+/**
+ * single taken branch record layout:
+ *
+ * from: source instruction (may not always be a branch insn)
+ * to: branch target
+ * mispred: branch target was mispredicted
+ * predicted: branch target was predicted
+ *
+ * support for mispred, predicted is optional. In case it
+ * is not supported mispred = predicted = 0.
+ *
+ * in_tx: running in a hardware transaction
+ * abort: aborting a hardware transaction
+ * cycles: cycles from last branch (or 0 if not supported)
+ * type: branch type
+ */
+struct perf_branch_entry
+{
+ ///
+ ulong from;
+ ///
+ ulong to;
+
+ /* mixin(bitfields!(ulong, "mispred", 1, ulong, "predicted", 1, ulong,
+ "in_tx", 1, ulong, "abort", 1, ulong, "cycles", 16, ulong, "type",
+ 4, ulong, "reserved", 40)); */
+ private ulong perf_branch_entry_bitmanip;
+ ///
+ @property ulong mispred() @safe pure nothrow @nogc const
+ {
+ auto result = (perf_branch_entry_bitmanip & 1U) >> 0U;
+ return cast(ulong) result;
+ }
+ ///
+ @property void mispred(ulong v) @safe pure nothrow @nogc
+ {
+ assert(v >= mispred_min,
+ "Value is smaller than the minimum value of bitfield 'mispred'");
+ assert(v <= mispred_max,
+ "Value is greater than the maximum value of bitfield 'mispred'");
+ perf_branch_entry_bitmanip = cast(typeof(perf_branch_entry_bitmanip))(
+ (perf_branch_entry_bitmanip & (-1 - cast(typeof(perf_branch_entry_bitmanip)) 1U)) | (
+ (cast(typeof(perf_branch_entry_bitmanip)) v << 0U) & 1U));
+ }
+
+ enum ulong mispred_min = cast(ulong) 0U;
+ enum ulong mispred_max = cast(ulong) 1U;
+ ///
+ @property ulong predicted() @safe pure nothrow @nogc const
+ {
+ auto result = (perf_branch_entry_bitmanip & 2U) >> 1U;
+ return cast(ulong) result;
+ }
+ ///
+ @property void predicted(ulong v) @safe pure nothrow @nogc
+ {
+ assert(v >= predicted_min,
+ "Value is smaller than the minimum value of bitfield 'predicted'");
+ assert(v <= predicted_max,
+ "Value is greater than the maximum value of bitfield 'predicted'");
+ perf_branch_entry_bitmanip = cast(typeof(perf_branch_entry_bitmanip))(
+ (perf_branch_entry_bitmanip & (-1 - cast(typeof(perf_branch_entry_bitmanip)) 2U)) | (
+ (cast(typeof(perf_branch_entry_bitmanip)) v << 1U) & 2U));
+ }
+
+ enum ulong predicted_min = cast(ulong) 0U;
+ enum ulong predicted_max = cast(ulong) 1U;
+ ///
+ @property ulong in_tx() @safe pure nothrow @nogc const
+ {
+ auto result = (perf_branch_entry_bitmanip & 4U) >> 2U;
+ return cast(ulong) result;
+ }
+ ///
+ @property void in_tx(ulong v) @safe pure nothrow @nogc
+ {
+ assert(v >= in_tx_min,
+ "Value is smaller than the minimum value of bitfield 'in_tx'");
+ assert(v <= in_tx_max,
+ "Value is greater than the maximum value of bitfield 'in_tx'");
+ perf_branch_entry_bitmanip = cast(typeof(perf_branch_entry_bitmanip))(
+ (perf_branch_entry_bitmanip & (-1 - cast(typeof(perf_branch_entry_bitmanip)) 4U)) | (
+ (cast(typeof(perf_branch_entry_bitmanip)) v << 2U) & 4U));
+ }
+
+ enum ulong in_tx_min = cast(ulong) 0U;
+ enum ulong in_tx_max = cast(ulong) 1U;
+ ///
+ @property ulong abort() @safe pure nothrow @nogc const
+ {
+ auto result = (perf_branch_entry_bitmanip & 8U) >> 3U;
+ return cast(ulong) result;
+ }
+ ///
+ @property void abort(ulong v) @safe pure nothrow @nogc
+ {
+ assert(v >= abort_min,
+ "Value is smaller than the minimum value of bitfield 'abort'");
+ assert(v <= abort_max,
+ "Value is greater than the maximum value of bitfield 'abort'");
+ perf_branch_entry_bitmanip = cast(typeof(perf_branch_entry_bitmanip))(
+ (perf_branch_entry_bitmanip & (-1 - cast(typeof(perf_branch_entry_bitmanip)) 8U)) | (
+ (cast(typeof(perf_branch_entry_bitmanip)) v << 3U) & 8U));
+ }
+
+ enum ulong abort_min = cast(ulong) 0U;
+ enum ulong abort_max = cast(ulong) 1U;
+ ///
+ @property ulong cycles() @safe pure nothrow @nogc const
+ {
+ auto result = (perf_branch_entry_bitmanip & 1048560U) >> 4U;
+ return cast(ulong) result;
+ }
+ ///
+ @property void cycles(ulong v) @safe pure nothrow @nogc
+ {
+ assert(v >= cycles_min,
+ "Value is smaller than the minimum value of bitfield 'cycles'");
+ assert(v <= cycles_max,
+ "Value is greater than the maximum value of bitfield 'cycles'");
+ perf_branch_entry_bitmanip = cast(typeof(perf_branch_entry_bitmanip))(
+ (perf_branch_entry_bitmanip & (-1 - cast(typeof(perf_branch_entry_bitmanip)) 1048560U)) | (
+ (cast(typeof(perf_branch_entry_bitmanip)) v << 4U) & 1048560U));
+ }
+
+ enum ulong cycles_min = cast(ulong) 0U;
+ enum ulong cycles_max = cast(ulong) 65535U;
+ ///
+ @property ulong type() @safe pure nothrow @nogc const
+ {
+ auto result = (perf_branch_entry_bitmanip & 15728640U) >> 20U;
+ return cast(ulong) result;
+ }
+ ///
+ @property void type(ulong v) @safe pure nothrow @nogc
+ {
+ assert(v >= type_min, "Value is smaller than the minimum value of bitfield 'type'");
+ assert(v <= type_max, "Value is greater than the maximum value of bitfield 'type'");
+ perf_branch_entry_bitmanip = cast(typeof(perf_branch_entry_bitmanip))(
+ (perf_branch_entry_bitmanip & (-1 - cast(typeof(perf_branch_entry_bitmanip)) 15728640U)) | (
+ (cast(typeof(perf_branch_entry_bitmanip)) v << 20U) & 15728640U));
+ }
+
+ enum ulong type_min = cast(ulong) 0U;
+ enum ulong type_max = cast(ulong) 15U;
+ ///
+ @property ulong reserved() @safe pure nothrow @nogc const
+ {
+ auto result = (perf_branch_entry_bitmanip & 18446744073692774400UL) >> 24U;
+ return cast(ulong) result;
+ }
+ ///
+ @property void reserved(ulong v) @safe pure nothrow @nogc
+ {
+ assert(v >= reserved_min,
+ "Value is smaller than the minimum value of bitfield 'reserved'");
+ assert(v <= reserved_max,
+ "Value is greater than the maximum value of bitfield 'reserved'");
+ perf_branch_entry_bitmanip = cast(typeof(perf_branch_entry_bitmanip))(
+ (perf_branch_entry_bitmanip & (-1 - cast(
+ typeof(perf_branch_entry_bitmanip)) 18446744073692774400UL)) | (
+ (cast(typeof(perf_branch_entry_bitmanip)) v << 24U) & 18446744073692774400UL));
+ }
+
+ enum ulong reserved_min = cast(ulong) 0U;
+ enum ulong reserved_max = cast(ulong) 1099511627775UL;
+}
diff --git a/libphobos/libdruntime/core/sys/linux/sys/procfs.d b/libphobos/libdruntime/core/sys/linux/sys/procfs.d
new file mode 100644
index 0000000..6a113e1
--- /dev/null
+++ b/libphobos/libdruntime/core/sys/linux/sys/procfs.d
@@ -0,0 +1,15 @@
+/**
+ * D header file for GNU/Linux.
+ *
+ * License: $(HTTP www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
+ * Authors: Teodor Dutu
+ */
+
+module core.sys.linux.sys.procfs;
+
+import core.sys.posix.sys.types : pid_t;
+
+version (linux)
+{
+ alias lwpid_t = pid_t;
+}
diff --git a/libphobos/libdruntime/core/sys/netbsd/sys/elf32.d b/libphobos/libdruntime/core/sys/netbsd/sys/elf32.d
index b22b97f..ac623d6 100644
--- a/libphobos/libdruntime/core/sys/netbsd/sys/elf32.d
+++ b/libphobos/libdruntime/core/sys/netbsd/sys/elf32.d
@@ -112,7 +112,7 @@ extern (D)
{
auto ELF32_M_SYM(I)(I info) { return info >> 8; }
auto ELF32_M_SIZE(I)(I info) { return cast(ubyte)info; }
- auto ELF32_M_INFO(S, SZ)(S sym, SZ size) { return (sym << 8) + cast(ubye)size; }
+ auto ELF32_M_INFO(S, SZ)(S sym, SZ size) { return (sym << 8) + cast(ubyte)size; }
}
struct Elf32_Cap
diff --git a/libphobos/libdruntime/core/sys/netbsd/sys/elf64.d b/libphobos/libdruntime/core/sys/netbsd/sys/elf64.d
index f78d066..659ac40 100644
--- a/libphobos/libdruntime/core/sys/netbsd/sys/elf64.d
+++ b/libphobos/libdruntime/core/sys/netbsd/sys/elf64.d
@@ -118,7 +118,7 @@ extern (D)
{
auto ELF64_M_SYM(I)(I info) { return info >> 8; }
auto ELF64_M_SIZE(I)(I info) { return cast(ubyte)info; }
- auto ELF64_M_INFO(S, SZ)(S sym, SZ size) { return (sym << 8) + cast(ubye)size; }
+ auto ELF64_M_INFO(S, SZ)(S sym, SZ size) { return (sym << 8) + cast(ubyte)size; }
}
struct Elf64_Cap
diff --git a/libphobos/libdruntime/core/sys/openbsd/execinfo.d b/libphobos/libdruntime/core/sys/openbsd/execinfo.d
new file mode 100644
index 0000000..f5b317f
--- /dev/null
+++ b/libphobos/libdruntime/core/sys/openbsd/execinfo.d
@@ -0,0 +1,147 @@
+/**
+ * OpenBSD implementation of glibc's $(LINK2 http://www.gnu.org/software/libc/manual/html_node/Backtraces.html backtrace) facility.
+ *
+ * Copyright: Copyright Martin Nowak 2012.
+ * License: $(LINK2 http://www.boost.org/LICENSE_1_0.txt, Boost License 1.0)
+ * Authors: Martin Nowak
+ * Source: $(DRUNTIMESRC core/sys/openbsd/_execinfo.d)
+ */
+module core.sys.openbsd.execinfo;
+
+version (OpenBSD):
+extern (C):
+nothrow:
+
+version (GNU)
+ version = BacktraceExternal;
+
+version (BacktraceExternal)
+{
+ size_t backtrace(void**, size_t);
+ char** backtrace_symbols(const(void*)*, size_t);
+ void backtrace_symbols_fd(const(void*)*, size_t, int);
+ char** backtrace_symbols_fmt(const(void*)*, size_t, const char*);
+ int backtrace_symbols_fd_fmt(const(void*)*, size_t, int, const char*);
+}
+else
+{
+ import core.sys.openbsd.dlfcn;
+
+ // Use extern (D) so that these functions don't collide with libexecinfo.
+
+ extern (D) int backtrace(void** buffer, int size)
+ {
+ import core.thread : thread_stackBottom;
+
+ void** p, pend=cast(void**)thread_stackBottom();
+ version (D_InlineAsm_X86)
+ asm nothrow @trusted { mov p[EBP], EBP; }
+ else version (D_InlineAsm_X86_64)
+ asm nothrow @trusted { mov p[RBP], RBP; }
+ else
+ static assert(false, "Architecture not supported.");
+
+ int i;
+ for (; i < size && p < pend; ++i)
+ {
+ buffer[i] = *(p + 1);
+ auto pnext = cast(void**)*p;
+ if (pnext <= p) break;
+ p = pnext;
+ }
+ return i;
+ }
+
+
+ extern (D) char** backtrace_symbols(const(void*)* buffer, int size)
+ {
+ static void* realloc(void* p, size_t len) nothrow
+ {
+ static import cstdlib=core.stdc.stdlib;
+ auto res = cstdlib.realloc(p, len);
+ if (res is null) cstdlib.free(p);
+ return res;
+ }
+
+ if (size <= 0) return null;
+
+ size_t pos = size * (char*).sizeof;
+ char** p = cast(char**)realloc(null, pos);
+ if (p is null) return null;
+
+ Dl_info info;
+ foreach (i, addr; buffer[0 .. size])
+ {
+ if (dladdr(addr, &info) == 0)
+ (cast(ubyte*)&info)[0 .. info.sizeof] = 0;
+ fixupDLInfo(addr, info);
+
+ immutable len = formatStackFrame(null, 0, addr, info);
+ assert(len > 0);
+
+ p = cast(char**)realloc(p, pos + len);
+ if (p is null) return null;
+
+ formatStackFrame(cast(char*)p + pos, len, addr, info) == len || assert(0);
+
+ p[i] = cast(char*)pos;
+ pos += len;
+ }
+ foreach (i; 0 .. size)
+ {
+ pos = cast(size_t)p[i];
+ p[i] = cast(char*)p + pos;
+ }
+ return p;
+ }
+
+
+ extern (D) void backtrace_symbols_fd(const(void*)* buffer, int size, int fd)
+ {
+ import core.sys.posix.unistd : write;
+ import core.stdc.stdlib : alloca;
+
+ if (size <= 0) return;
+
+ Dl_info info;
+ foreach (i, addr; buffer[0 .. size])
+ {
+ if (dladdr(addr, &info) == 0)
+ (cast(ubyte*)&info)[0 .. info.sizeof] = 0;
+ fixupDLInfo(addr, info);
+
+ enum maxAlloca = 1024;
+ enum min = (size_t a, size_t b) => a <= b ? a : b;
+ immutable len = min(formatStackFrame(null, 0, addr, info), maxAlloca);
+ assert(len > 0);
+
+ auto p = cast(char*)alloca(len);
+ if (p is null) return;
+
+ formatStackFrame(p, len, addr, info) >= len || assert(0);
+ p[len - 1] = '\n';
+ write(fd, p, len);
+ }
+ }
+
+
+ private void fixupDLInfo(const(void)* addr, ref Dl_info info)
+ {
+ if (info.dli_fname is null) info.dli_fname = "???";
+ if (info.dli_fbase is null) info.dli_fbase = null;
+ if (info.dli_sname is null) info.dli_sname = "???";
+ if (info.dli_saddr is null) info.dli_saddr = cast(void*)addr;
+ }
+
+
+ private size_t formatStackFrame(char* p, size_t plen, const(void)* addr, const ref Dl_info info)
+ {
+ import core.stdc.stdio : snprintf;
+
+ immutable off = addr - info.dli_saddr;
+ immutable len = snprintf(p, plen, "%p <%s+%zd> at %s",
+ addr, info.dli_sname, off, info.dli_fname);
+ assert(len > 0);
+ return cast(size_t)len + 1; // + '\0'
+ }
+}
diff --git a/libphobos/libdruntime/core/sys/openbsd/sys/elf32.d b/libphobos/libdruntime/core/sys/openbsd/sys/elf32.d
index cefee38..dae977a 100644
--- a/libphobos/libdruntime/core/sys/openbsd/sys/elf32.d
+++ b/libphobos/libdruntime/core/sys/openbsd/sys/elf32.d
@@ -112,7 +112,7 @@ extern (D) pure
{
auto ELF32_M_SYM(I)(I info) @safe { return info >> 8; }
auto ELF32_M_SIZE(I)(I info) { return cast(ubyte)info; }
- auto ELF32_M_INFO(S, SZ)(S sym, SZ size) { return (sym << 8) + cast(ubye)size; }
+ auto ELF32_M_INFO(S, SZ)(S sym, SZ size) { return (sym << 8) + cast(ubyte)size; }
}
struct Elf32_Cap
diff --git a/libphobos/libdruntime/core/sys/openbsd/sys/elf64.d b/libphobos/libdruntime/core/sys/openbsd/sys/elf64.d
index d5e15fc..e26a5fc 100644
--- a/libphobos/libdruntime/core/sys/openbsd/sys/elf64.d
+++ b/libphobos/libdruntime/core/sys/openbsd/sys/elf64.d
@@ -118,7 +118,7 @@ extern (D) pure
{
auto ELF64_M_SYM(I)(I info) @safe { return info >> 8; }
auto ELF64_M_SIZE(I)(I info) { return cast(ubyte)info; }
- auto ELF64_M_INFO(S, SZ)(S sym, SZ size) @safe { return (sym << 8) + cast(ubye)size; }
+ auto ELF64_M_INFO(S, SZ)(S sym, SZ size) @safe { return (sym << 8) + cast(ubyte)size; }
}
struct Elf64_Cap
diff --git a/libphobos/libdruntime/core/sys/posix/arpa/inet.d b/libphobos/libdruntime/core/sys/posix/arpa/inet.d
index 6881142..c602e17 100644
--- a/libphobos/libdruntime/core/sys/posix/arpa/inet.d
+++ b/libphobos/libdruntime/core/sys/posix/arpa/inet.d
@@ -68,8 +68,6 @@ version (CRuntime_Glibc)
in_addr_t s_addr;
}
- enum INET_ADDRSTRLEN = 16;
-
@trusted pure
{
uint32_t htonl(uint32_t);
@@ -93,8 +91,6 @@ else version (Darwin)
in_addr_t s_addr;
}
- enum INET_ADDRSTRLEN = 16;
-
@trusted pure
{
uint32_t htonl(uint32_t);
@@ -118,8 +114,6 @@ else version (FreeBSD)
in_addr_t s_addr;
}
- enum INET_ADDRSTRLEN = 16;
-
@trusted pure
{
uint32_t htonl(uint32_t);
@@ -143,8 +137,6 @@ else version (NetBSD)
in_addr_t s_addr;
}
- enum INET_ADDRSTRLEN = 16;
-
@trusted pure
{
uint32_t htonl(uint32_t);
@@ -168,30 +160,22 @@ else version (OpenBSD)
in_addr_t s_addr;
}
- enum INET_ADDRSTRLEN = 16;
-
@safe pure extern (D)
{
- private
+ version (BigEndian)
{
- uint32_t __swap32( uint32_t x )
- {
- uint32_t byte32_swap = (x & 0xff) << 24 | (x &0xff00) << 8 |
- (x & 0xff0000) >> 8 | (x & 0xff000000) >> 24;
- return byte32_swap;
- }
-
- uint16_t __swap16( uint16_t x )
- {
- uint16_t byte16_swap = (x & 0xff) << 8 | (x & 0xff00) >> 8;
- return byte16_swap;
- }
+ uint32_t htonl(uint32_t x) { return x; }
+ uint16_t htons(uint16_t x) { return x; }
}
+ else
+ {
+ import core.bitop : bswap, byteswap;
- uint32_t htonl(uint32_t x) { return __swap32(x); }
- uint16_t htons(uint16_t x) { return __swap16(x); }
- uint32_t ntohl(uint32_t x) { return __swap32(x); }
- uint16_t ntohs(uint16_t x) { return __swap16(x); }
+ uint32_t htonl(uint32_t x) { return bswap(x); }
+ uint16_t htons(uint16_t x) { return byteswap(x); }
+ }
+ alias ntohl = htonl;
+ alias ntohs = htons;
}
in_addr_t inet_addr(const scope char*);
@@ -209,8 +193,6 @@ else version (DragonFlyBSD)
in_addr_t s_addr;
}
- enum INET_ADDRSTRLEN = 16;
-
@trusted pure
{
uint32_t htonl(uint32_t);
@@ -233,7 +215,6 @@ else version (Solaris)
{
in_addr_t s_addr;
}
- enum INET_ADDRSTRLEN = 16;
@trusted pure
{
@@ -257,30 +238,22 @@ else version (CRuntime_Bionic)
in_addr_t s_addr;
}
- enum INET_ADDRSTRLEN = 16;
-
@safe pure extern (D)
{
- private
+ version (BigEndian)
{
- uint32_t __swap32( uint32_t x )
- {
- uint32_t byte32_swap = (x & 0xff) << 24 | (x &0xff00) << 8 |
- (x & 0xff0000) >> 8 | (x & 0xff000000) >> 24;
- return byte32_swap;
- }
-
- uint16_t __swap16( uint16_t x )
- {
- uint16_t byte16_swap = (x & 0xff) << 8 | (x & 0xff00) >> 8;
- return byte16_swap;
- }
+ uint32_t htonl(uint32_t x) { return x; }
+ uint16_t htons(uint16_t x) { return x; }
}
+ else
+ {
+ import core.bitop : bswap, byteswap;
- uint32_t htonl(uint32_t x) { return __swap32(x); }
- uint16_t htons(uint16_t x) { return __swap16(x); }
- uint32_t ntohl(uint32_t x) { return __swap32(x); }
- uint16_t ntohs(uint16_t x) { return __swap16(x); }
+ uint32_t htonl(uint32_t x) { return bswap(x); }
+ uint16_t htons(uint16_t x) { return byteswap(x); }
+ }
+ alias ntohl = htonl;
+ alias ntohs = htons;
}
in_addr_t inet_addr(const scope char*);
@@ -298,8 +271,6 @@ else version (CRuntime_Musl)
in_addr_t s_addr;
}
- enum INET_ADDRSTRLEN = 16;
-
@trusted pure
{
uint32_t htonl(uint32_t);
@@ -323,8 +294,6 @@ else version (CRuntime_UClibc)
in_addr_t s_addr;
}
- enum INET_ADDRSTRLEN = 16;
-
@trusted pure
{
uint32_t htonl(uint32_t);
@@ -339,9 +308,6 @@ else version (CRuntime_UClibc)
int inet_pton(int, const scope char*, void*);
}
-//
-// IPV6 (IP6)
-//
/*
NOTE: The following must must be defined in core.sys.posix.arpa.inet to break
a circular import: INET6_ADDRSTRLEN.
@@ -349,39 +315,5 @@ NOTE: The following must must be defined in core.sys.posix.arpa.inet to break
INET6_ADDRSTRLEN // from core.sys.posix.netinet.in_
*/
-version (CRuntime_Glibc)
-{
- enum INET6_ADDRSTRLEN = 46;
-}
-else version (Darwin)
-{
- enum INET6_ADDRSTRLEN = 46;
-}
-else version (FreeBSD)
-{
- enum INET6_ADDRSTRLEN = 46;
-}
-else version (NetBSD)
-{
- enum INET6_ADDRSTRLEN = 46;
-}
-else version (OpenBSD)
-{
- enum INET6_ADDRSTRLEN = 46;
-}
-else version (DragonFlyBSD)
-{
- enum INET6_ADDRSTRLEN = 46;
-}
-else version (Solaris)
-{
- enum INET6_ADDRSTRLEN = 46;
-}
-else version (CRuntime_Bionic)
-{
- enum INET6_ADDRSTRLEN = 46;
-}
-else version (CRuntime_UClibc)
-{
- enum INET6_ADDRSTRLEN = 46;
-}
+enum INET_ADDRSTRLEN = 16;
+enum INET6_ADDRSTRLEN = 46;
diff --git a/libphobos/libdruntime/core/sys/posix/fcntl.d b/libphobos/libdruntime/core/sys/posix/fcntl.d
index 59df921..6833f3b 100644
--- a/libphobos/libdruntime/core/sys/posix/fcntl.d
+++ b/libphobos/libdruntime/core/sys/posix/fcntl.d
@@ -159,6 +159,7 @@ version (CRuntime_Glibc)
enum O_APPEND = 0x400; // octal 02000
enum O_NONBLOCK = 0x800; // octal 04000
+ enum O_CLOEXEC = 0x80000; // octal 02000000
enum O_SYNC = 0x101000; // octal 04010000
enum O_DSYNC = 0x1000; // octal 010000
enum O_RSYNC = O_SYNC;
@@ -172,6 +173,7 @@ version (CRuntime_Glibc)
enum O_APPEND = 0x00008; // octal 010
enum O_NONBLOCK = 0x10004; // octal 0200004
+ enum O_CLOEXEC = 0x200000; // octal 02000000
enum O_SYNC = 0x48000; // octal 01100000
enum O_DSYNC = 0x40000; // octal 01000000
enum O_RSYNC = 0x80000; // octal 02000000
@@ -186,6 +188,7 @@ version (CRuntime_Glibc)
enum O_APPEND = 0x0008;
enum O_DSYNC = 0x0010;
enum O_NONBLOCK = 0x0080;
+ enum O_CLOEXEC = 0x80000;
enum O_RSYNC = O_SYNC;
enum O_SYNC = 0x4010;
}
@@ -198,6 +201,7 @@ version (CRuntime_Glibc)
enum O_APPEND = 0x400; // octal 02000
enum O_NONBLOCK = 0x800; // octal 04000
+ enum O_CLOEXEC = 0x80000; // octal 02000000
enum O_SYNC = 0x101000; // octal 04010000
enum O_DSYNC = 0x1000; // octal 010000
enum O_RSYNC = O_SYNC;
@@ -211,6 +215,7 @@ version (CRuntime_Glibc)
enum O_APPEND = 0x400; // octal 02000
enum O_NONBLOCK = 0x800; // octal 04000
+ enum O_CLOEXEC = 0x80000; // octal 02000000
enum O_SYNC = 0x101000; // octal 04010000
enum O_DSYNC = 0x1000; // octal 010000
enum O_RSYNC = O_SYNC;
@@ -224,6 +229,7 @@ version (CRuntime_Glibc)
enum O_APPEND = 0x400; // octal 02000
enum O_NONBLOCK = 0x800; // octal 04000
+ enum O_CLOEXEC = 0x80000; // octal 02000000
enum O_SYNC = 0x101000; // octal 04010000
enum O_DSYNC = 0x1000; // octal 010000
enum O_RSYNC = O_SYNC;
@@ -237,6 +243,7 @@ version (CRuntime_Glibc)
enum O_APPEND = 0x8;
enum O_NONBLOCK = 0x4000;
+ enum O_CLOEXEC = 0x400000;
enum O_SYNC = 0x802000;
enum O_DSYNC = 0x2000;
enum O_RSYNC = O_SYNC;
@@ -250,6 +257,7 @@ version (CRuntime_Glibc)
enum O_APPEND = 0x400; // octal 02000
enum O_NONBLOCK = 0x800; // octal 04000
+ enum O_CLOEXEC = 0x80000; // octal 02000000
enum O_SYNC = 0x101000; // octal 04010000
enum O_DSYNC = 0x1000; // octal 010000
enum O_RSYNC = O_SYNC;
@@ -895,10 +903,10 @@ else version (CRuntime_Musl)
O_SEARCH = O_PATH,
O_EXEC = O_PATH,
- O_ACCMODE = (03|O_SEARCH),
- O_RDONLY = 00,
- O_WRONLY = 01,
- O_RDWR = 02,
+ O_ACCMODE = (3|O_SEARCH),
+ O_RDONLY = 0,
+ O_WRONLY = 1,
+ O_RDWR = 2,
}
enum
{
diff --git a/libphobos/libdruntime/core/sys/posix/net/if_.d b/libphobos/libdruntime/core/sys/posix/net/if_.d
index 3713673..e63af4f 100644
--- a/libphobos/libdruntime/core/sys/posix/net/if_.d
+++ b/libphobos/libdruntime/core/sys/posix/net/if_.d
@@ -157,4 +157,4 @@ else version (CRuntime_UClibc)
char* if_indextoname(uint, char*);
if_nameindex_t* if_nameindex();
void if_freenameindex(if_nameindex_t*);
-}
+} \ No newline at end of file
diff --git a/libphobos/libdruntime/core/sys/posix/semaphore.d b/libphobos/libdruntime/core/sys/posix/semaphore.d
index 4f6f639..a163e59 100644
--- a/libphobos/libdruntime/core/sys/posix/semaphore.d
+++ b/libphobos/libdruntime/core/sys/posix/semaphore.d
@@ -99,7 +99,7 @@ else version (NetBSD)
}
else version (OpenBSD)
{
- struct __sem { }
+ struct __sem;
alias sem_t = __sem*;
enum SEM_FAILED = cast(sem_t*) null;
diff --git a/libphobos/libdruntime/core/sys/posix/setjmp.d b/libphobos/libdruntime/core/sys/posix/setjmp.d
index b98d321..91e3a19 100644
--- a/libphobos/libdruntime/core/sys/posix/setjmp.d
+++ b/libphobos/libdruntime/core/sys/posix/setjmp.d
@@ -261,6 +261,10 @@ else version (OpenBSD)
{
enum _JBLEN = 64;
}
+ else version (AArch64)
+ {
+ enum _JBLEN = 64;
+ }
else version (PPC)
{
enum _JBLEN = 100;
diff --git a/libphobos/libdruntime/core/sys/posix/stdio.d b/libphobos/libdruntime/core/sys/posix/stdio.d
index 41b52da..c8f92ec 100644
--- a/libphobos/libdruntime/core/sys/posix/stdio.d
+++ b/libphobos/libdruntime/core/sys/posix/stdio.d
@@ -526,6 +526,16 @@ else version (CRuntime_Musl)
int putc_unlocked(int, FILE*);
int putchar_unlocked(int);
}
+else version (CRuntime_Bionic)
+{
+ void flockfile(FILE*);
+ int ftrylockfile(FILE*);
+ void funlockfile(FILE*);
+ int getc_unlocked(FILE*);
+ int getchar_unlocked();
+ int putc_unlocked(int, FILE*);
+ int putchar_unlocked(int);
+}
else version (Darwin)
{
void flockfile(FILE*);
diff --git a/libphobos/libdruntime/core/sys/posix/string.d b/libphobos/libdruntime/core/sys/posix/string.d
index e17dfc6..b9e1c1c 100644
--- a/libphobos/libdruntime/core/sys/posix/string.d
+++ b/libphobos/libdruntime/core/sys/posix/string.d
@@ -31,11 +31,11 @@ public import core.sys.posix.locale : locale_t;
public import core.stdc.string;
/// Copy string until character found
-void* memccpy(return void* dst, scope const void* src, int c, size_t n);
+void* memccpy(return void* dst, scope const void* src, int c, size_t n) pure;
/// Copy string (including terminating '\0')
-char* stpcpy(return char* dst, scope const char* src);
+char* stpcpy(return char* dst, scope const char* src) pure;
/// Ditto
-char* stpncpy(return char* dst, const char* src, size_t len);
+char* stpncpy(return char* dst, const char* src, size_t len) pure;
/// Compare strings according to current collation
int strcoll_l(scope const char* s1, scope const char* s2, locale_t locale);
///
@@ -43,7 +43,7 @@ char* strerror_l(int, locale_t);
/// Save a copy of a string
char* strndup(scope const char* str, size_t len);
/// Find length of string up to `maxlen`
-size_t strnlen(scope const char* str, size_t maxlen);
+size_t strnlen(scope const char* str, size_t maxlen) pure;
/// System signal messages
const(char)* strsignal(int);
/// Isolate sequential tokens in a null-terminated string
diff --git a/libphobos/libdruntime/core/sys/windows/basetsd.d b/libphobos/libdruntime/core/sys/windows/basetsd.d
index 97983c6..3bcac12 100644
--- a/libphobos/libdruntime/core/sys/windows/basetsd.d
+++ b/libphobos/libdruntime/core/sys/windows/basetsd.d
@@ -59,7 +59,7 @@ package mixin template AlignedStr(int alignVal, string name, string memberlist,
mixin( _alignSpec ~ " struct " ~ name ~" { " ~ _alignSpec ~":"~ memberlist~" }" );
}
-version (unittest) {
+version (CoreUnittest) {
private mixin AlignedStr!(16, "_Test_Aligned_Str", q{char a; char b;});
private mixin AlignedStr!(0, "_Test_NoAligned_Str", q{char a; char b;});
}
diff --git a/libphobos/libdruntime/core/sys/windows/dll.d b/libphobos/libdruntime/core/sys/windows/dll.d
index cc2422b..8e9d7a0 100644
--- a/libphobos/libdruntime/core/sys/windows/dll.d
+++ b/libphobos/libdruntime/core/sys/windows/dll.d
@@ -425,7 +425,6 @@ int dll_getRefCount( HINSTANCE hInstance ) nothrow @nogc
mov peb, RAX;
}
}
-
}
else version (Win32)
{
diff --git a/libphobos/libdruntime/core/sys/windows/sqlext.d b/libphobos/libdruntime/core/sys/windows/sqlext.d
index 3acfc5a..1f89105 100644
--- a/libphobos/libdruntime/core/sys/windows/sqlext.d
+++ b/libphobos/libdruntime/core/sys/windows/sqlext.d
@@ -535,7 +535,7 @@ enum SQL_U_UNION_ALL = 2;
enum SQL_UB_OFF = 0UL;
enum SQL_UB_DEFAULT = SQL_UB_OFF;
-enum SQL_UB_ON = 01UL;
+enum SQL_UB_ON = 1UL;
enum SQL_UNION = 96;
enum SQL_UNSEARCHABLE = 0;
diff --git a/libphobos/libdruntime/core/thread/fiber.d b/libphobos/libdruntime/core/thread/fiber.d
index 2f90f17..56f6d67 100644
--- a/libphobos/libdruntime/core/thread/fiber.d
+++ b/libphobos/libdruntime/core/thread/fiber.d
@@ -1710,7 +1710,7 @@ unittest {
assert( composed.state == Fiber.State.TERM );
}
-version (unittest)
+version (CoreUnittest)
{
class TestFiber : Fiber
{
diff --git a/libphobos/libdruntime/core/thread/osthread.d b/libphobos/libdruntime/core/thread/osthread.d
index 9fcd30e..653adb9 100644
--- a/libphobos/libdruntime/core/thread/osthread.d
+++ b/libphobos/libdruntime/core/thread/osthread.d
@@ -1049,7 +1049,7 @@ class Thread : ThreadBase
}
}
-private Thread toThread(ThreadBase t) @trusted nothrow @nogc pure
+private Thread toThread(return scope ThreadBase t) @trusted nothrow @nogc pure
{
return cast(Thread) cast(void*) t;
}
@@ -1209,6 +1209,18 @@ unittest
thr.join();
}
+// https://issues.dlang.org/show_bug.cgi?id=22124
+unittest
+{
+ Thread thread = new Thread({});
+ auto fun(Thread t, int x)
+ {
+ t.__ctor({x = 3;});
+ return t;
+ }
+ static assert(!__traits(compiles, () @nogc => fun(thread, 3) ));
+}
+
unittest
{
import core.sync.semaphore;
@@ -2212,15 +2224,7 @@ version (Windows)
void append( Throwable t )
{
- if ( obj.m_unhandled is null )
- obj.m_unhandled = t;
- else
- {
- Throwable last = obj.m_unhandled;
- while ( last.next !is null )
- last = last.next;
- last.next = t;
- }
+ obj.m_unhandled = Throwable.chainTogether(obj.m_unhandled, t);
}
version (D_InlineAsm_X86)
@@ -2367,15 +2371,7 @@ else version (Posix)
void append( Throwable t )
{
- if ( obj.m_unhandled is null )
- obj.m_unhandled = t;
- else
- {
- Throwable last = obj.m_unhandled;
- while ( last.next !is null )
- last = last.next;
- last.next = t;
- }
+ obj.m_unhandled = Throwable.chainTogether(obj.m_unhandled, t);
}
try
{
diff --git a/libphobos/libdruntime/core/thread/threadbase.d b/libphobos/libdruntime/core/thread/threadbase.d
index 0a8de10..4592bf1 100644
--- a/libphobos/libdruntime/core/thread/threadbase.d
+++ b/libphobos/libdruntime/core/thread/threadbase.d
@@ -573,11 +573,9 @@ package(core.thread):
static void initLocks() @nogc
{
- _slock[] = typeid(Mutex).initializer[];
- (cast(Mutex)_slock.ptr).__ctor();
-
- _criticalRegionLock[] = typeid(Mutex).initializer[];
- (cast(Mutex)_criticalRegionLock.ptr).__ctor();
+ import core.lifetime : emplace;
+ emplace!Mutex(_slock[]);
+ emplace!Mutex(_criticalRegionLock[]);
}
static void termLocks() @nogc
@@ -1338,8 +1336,8 @@ package
void initLowlevelThreads() @nogc
{
- ll_lock[] = typeid(Mutex).initializer[];
- lowlevelLock.__ctor();
+ import core.lifetime : emplace;
+ emplace(lowlevelLock());
}
void termLowlevelThreads() @nogc
diff --git a/libphobos/libdruntime/core/time.d b/libphobos/libdruntime/core/time.d
index e7744c8..26e515b 100644
--- a/libphobos/libdruntime/core/time.d
+++ b/libphobos/libdruntime/core/time.d
@@ -22,8 +22,6 @@
system clock ticks, using the highest precision that the system provides.))
$(TR $(TDNW $(LREF MonoTime)) $(TD Represents a monotonic timestamp in
system clock ticks, using the highest precision that the system provides.))
- $(TR $(TDNW $(LREF FracSec)) $(TD Represents fractional seconds
- (portions of time smaller than a second).))
$(LEADINGROW Functions)
$(TR $(TDNW $(LREF convert)) $(TD Generic way of converting between two time
units.))
@@ -40,37 +38,27 @@
$(TR $(TH )
$(TH From $(LREF Duration))
$(TH From $(LREF TickDuration))
- $(TH From $(LREF FracSec))
$(TH From units)
)
$(TR $(TD $(B To $(LREF Duration)))
$(TD -)
$(TD $(D tickDuration.)$(REF_SHORT to, std,conv)$(D !Duration()))
- $(TD -)
$(TD $(D dur!"msecs"(5)) or $(D 5.msecs()))
)
$(TR $(TD $(B To $(LREF TickDuration)))
$(TD $(D duration.)$(REF_SHORT to, std,conv)$(D !TickDuration()))
$(TD -)
- $(TD -)
$(TD $(D TickDuration.from!"msecs"(msecs)))
)
- $(TR $(TD $(B To $(LREF FracSec)))
- $(TD $(D duration.fracSec))
- $(TD -)
- $(TD -)
- $(TD $(D FracSec.from!"msecs"(msecs)))
- )
$(TR $(TD $(B To units))
$(TD $(D duration.total!"days"))
$(TD $(D tickDuration.msecs))
- $(TD $(D fracSec.msecs))
$(TD $(D convert!("days", "msecs")(msecs)))
))
Copyright: Copyright 2010 - 2012
- License: $(WEB www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
- Authors: Jonathan M Davis and Kato Shoichi
+ License: $(HTTP www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
+ Authors: $(HTTP jmdavisprog.com, Jonathan M Davis) and Kato Shoichi
Source: $(DRUNTIMESRC core/_time.d)
Macros:
NBSP=&nbsp;
@@ -80,7 +68,6 @@ module core.time;
import core.exception;
import core.stdc.time;
import core.stdc.stdio;
-import core.internal.traits : _Unqual = Unqual;
import core.internal.string;
version (Windows)
@@ -128,13 +115,6 @@ ulong mach_absolute_time();
}
-//To verify that an lvalue isn't required.
-version (unittest) private T copy(T)(T t)
-{
- return t;
-}
-
-
/++
What type of clock to use with $(LREF MonoTime) / $(LREF MonoTimeImpl) or
$(D std.datetime.Clock.currTime). They default to $(D ClockType.normal),
@@ -181,7 +161,7 @@ version (CoreDdoc) enum ClockType
On systems which do not support a coarser clock,
$(D MonoTimeImpl!(ClockType.coarse)) will internally use the same clock
- as $(D Monotime) does, and $(D Clock.currTime!(ClockType.coarse)) will
+ as $(D MonoTime) does, and $(D Clock.currTime!(ClockType.coarse)) will
use the same clock as $(D Clock.currTime). This is because the coarse
clock is doing the same thing as the normal clock (just at lower
precision), whereas some of the other clock types
@@ -536,7 +516,7 @@ public:
+/
static @property nothrow @nogc Duration min() { return Duration(long.min); }
- unittest
+ version (CoreUnittest) unittest
{
assert(zero == dur!"seconds"(0));
assert(Duration.max == Duration(long.max));
@@ -561,30 +541,31 @@ public:
+/
int opCmp(Duration rhs) const nothrow @nogc
{
- if (_hnsecs < rhs._hnsecs)
- return -1;
- if (_hnsecs > rhs._hnsecs)
- return 1;
- return 0;
+ return (_hnsecs > rhs._hnsecs) - (_hnsecs < rhs._hnsecs);
}
- unittest
+ version (CoreUnittest) unittest
{
- foreach (T; _TypeTuple!(Duration, const Duration, immutable Duration))
+ import core.internal.traits : rvalueOf;
+ foreach (T; AliasSeq!(Duration, const Duration, immutable Duration))
{
- foreach (U; _TypeTuple!(Duration, const Duration, immutable Duration))
+ foreach (U; AliasSeq!(Duration, const Duration, immutable Duration))
{
T t = 42;
- U u = t;
+ // workaround https://issues.dlang.org/show_bug.cgi?id=18296
+ version (D_Coverage)
+ U u = T(t._hnsecs);
+ else
+ U u = t;
assert(t == u);
- assert(copy(t) == u);
- assert(t == copy(u));
+ assert(rvalueOf(t) == u);
+ assert(t == rvalueOf(u));
}
}
- foreach (D; _TypeTuple!(Duration, const Duration, immutable Duration))
+ foreach (D; AliasSeq!(Duration, const Duration, immutable Duration))
{
- foreach (E; _TypeTuple!(Duration, const Duration, immutable Duration))
+ foreach (E; AliasSeq!(Duration, const Duration, immutable Duration))
{
assert((cast(D)Duration(12)).opCmp(cast(E)Duration(12)) == 0);
assert((cast(D)Duration(-12)).opCmp(cast(E)Duration(-12)) == 0);
@@ -595,23 +576,23 @@ public:
assert((cast(D)Duration(12)).opCmp(cast(E)Duration(10)) > 0);
assert((cast(D)Duration(12)).opCmp(cast(E)Duration(-12)) > 0);
- assert(copy(cast(D)Duration(12)).opCmp(cast(E)Duration(12)) == 0);
- assert(copy(cast(D)Duration(-12)).opCmp(cast(E)Duration(-12)) == 0);
+ assert(rvalueOf(cast(D)Duration(12)).opCmp(cast(E)Duration(12)) == 0);
+ assert(rvalueOf(cast(D)Duration(-12)).opCmp(cast(E)Duration(-12)) == 0);
- assert(copy(cast(D)Duration(10)).opCmp(cast(E)Duration(12)) < 0);
- assert(copy(cast(D)Duration(-12)).opCmp(cast(E)Duration(12)) < 0);
+ assert(rvalueOf(cast(D)Duration(10)).opCmp(cast(E)Duration(12)) < 0);
+ assert(rvalueOf(cast(D)Duration(-12)).opCmp(cast(E)Duration(12)) < 0);
- assert(copy(cast(D)Duration(12)).opCmp(cast(E)Duration(10)) > 0);
- assert(copy(cast(D)Duration(12)).opCmp(cast(E)Duration(-12)) > 0);
+ assert(rvalueOf(cast(D)Duration(12)).opCmp(cast(E)Duration(10)) > 0);
+ assert(rvalueOf(cast(D)Duration(12)).opCmp(cast(E)Duration(-12)) > 0);
- assert((cast(D)Duration(12)).opCmp(copy(cast(E)Duration(12))) == 0);
- assert((cast(D)Duration(-12)).opCmp(copy(cast(E)Duration(-12))) == 0);
+ assert((cast(D)Duration(12)).opCmp(rvalueOf(cast(E)Duration(12))) == 0);
+ assert((cast(D)Duration(-12)).opCmp(rvalueOf(cast(E)Duration(-12))) == 0);
- assert((cast(D)Duration(10)).opCmp(copy(cast(E)Duration(12))) < 0);
- assert((cast(D)Duration(-12)).opCmp(copy(cast(E)Duration(12))) < 0);
+ assert((cast(D)Duration(10)).opCmp(rvalueOf(cast(E)Duration(12))) < 0);
+ assert((cast(D)Duration(-12)).opCmp(rvalueOf(cast(E)Duration(12))) < 0);
- assert((cast(D)Duration(12)).opCmp(copy(cast(E)Duration(10))) > 0);
- assert((cast(D)Duration(12)).opCmp(copy(cast(E)Duration(-12))) > 0);
+ assert((cast(D)Duration(12)).opCmp(rvalueOf(cast(E)Duration(10))) > 0);
+ assert((cast(D)Duration(12)).opCmp(rvalueOf(cast(E)Duration(-12))) > 0);
}
}
}
@@ -634,20 +615,20 @@ public:
rhs = The duration to add to or subtract from this $(D Duration).
+/
Duration opBinary(string op, D)(D rhs) const nothrow @nogc
- if (((op == "+" || op == "-" || op == "%") && is(_Unqual!D == Duration)) ||
- ((op == "+" || op == "-") && is(_Unqual!D == TickDuration)))
+ if (((op == "+" || op == "-" || op == "%") && is(immutable D == immutable Duration)) ||
+ ((op == "+" || op == "-") && is(immutable D == immutable TickDuration)))
{
- static if (is(_Unqual!D == Duration))
+ static if (is(immutable D == immutable Duration))
return Duration(mixin("_hnsecs " ~ op ~ " rhs._hnsecs"));
- else if (is(_Unqual!D == TickDuration))
+ else
return Duration(mixin("_hnsecs " ~ op ~ " rhs.hnsecs"));
}
- unittest
+ version (CoreUnittest) unittest
{
- foreach (D; _TypeTuple!(Duration, const Duration, immutable Duration))
+ foreach (D; AliasSeq!(Duration, const Duration, immutable Duration))
{
- foreach (E; _TypeTuple!(Duration, const Duration, immutable Duration))
+ foreach (E; AliasSeq!(Duration, const Duration, immutable Duration))
{
assert((cast(D)Duration(5)) + (cast(E)Duration(7)) == Duration(12));
assert((cast(D)Duration(5)) - (cast(E)Duration(7)) == Duration(-2));
@@ -678,7 +659,7 @@ public:
assert((cast(D)Duration(-7)) % (cast(E)Duration(5)) == Duration(-2));
}
- foreach (T; _TypeTuple!(TickDuration, const TickDuration, immutable TickDuration))
+ foreach (T; AliasSeq!(TickDuration, const TickDuration, immutable TickDuration))
{
assertApprox((cast(D)Duration(5)) + cast(T)TickDuration.from!"usecs"(7), Duration(70), Duration(80));
assertApprox((cast(D)Duration(5)) - cast(T)TickDuration.from!"usecs"(7), Duration(-70), Duration(-60));
@@ -720,16 +701,16 @@ public:
+/
Duration opBinaryRight(string op, D)(D lhs) const nothrow @nogc
if ((op == "+" || op == "-") &&
- is(_Unqual!D == TickDuration))
+ is(immutable D == immutable TickDuration))
{
return Duration(mixin("lhs.hnsecs " ~ op ~ " _hnsecs"));
}
- unittest
+ version (CoreUnittest) unittest
{
- foreach (D; _TypeTuple!(Duration, const Duration, immutable Duration))
+ foreach (D; AliasSeq!(Duration, const Duration, immutable Duration))
{
- foreach (T; _TypeTuple!(TickDuration, const TickDuration, immutable TickDuration))
+ foreach (T; AliasSeq!(TickDuration, const TickDuration, immutable TickDuration))
{
assertApprox((cast(T)TickDuration.from!"usecs"(7)) + cast(D)Duration(5), Duration(70), Duration(80));
assertApprox((cast(T)TickDuration.from!"usecs"(7)) - cast(D)Duration(5), Duration(60), Duration(70));
@@ -772,18 +753,18 @@ public:
Params:
rhs = The duration to add to or subtract from this $(D Duration).
+/
- ref Duration opOpAssign(string op, D)(in D rhs) nothrow @nogc
- if (((op == "+" || op == "-" || op == "%") && is(_Unqual!D == Duration)) ||
- ((op == "+" || op == "-") && is(_Unqual!D == TickDuration)))
+ ref Duration opOpAssign(string op, D)(const scope D rhs) nothrow @nogc
+ if (((op == "+" || op == "-" || op == "%") && is(immutable D == immutable Duration)) ||
+ ((op == "+" || op == "-") && is(immutable D == immutable TickDuration)))
{
- static if (is(_Unqual!D == Duration))
+ static if (is(immutable D == immutable Duration))
mixin("_hnsecs " ~ op ~ "= rhs._hnsecs;");
- else if (is(_Unqual!D == TickDuration))
+ else
mixin("_hnsecs " ~ op ~ "= rhs.hnsecs;");
return this;
}
- unittest
+ version (CoreUnittest) unittest
{
static void test1(string op, E)(Duration actual, in E rhs, Duration expected, size_t line = __LINE__)
{
@@ -801,7 +782,7 @@ public:
assertApprox(actual, lower, upper, "op assign failed", line);
}
- foreach (E; _TypeTuple!(Duration, const Duration, immutable Duration))
+ foreach (E; AliasSeq!(Duration, const Duration, immutable Duration))
{
test1!"+="(Duration(5), (cast(E)Duration(7)), Duration(12));
test1!"-="(Duration(5), (cast(E)Duration(7)), Duration(-2));
@@ -832,7 +813,7 @@ public:
test1!"%="(Duration(-7), (cast(E)Duration(-5)), Duration(-2));
}
- foreach (T; _TypeTuple!(TickDuration, const TickDuration, immutable TickDuration))
+ foreach (T; AliasSeq!(TickDuration, const TickDuration, immutable TickDuration))
{
test2!"+="(Duration(5), cast(T)TickDuration.from!"usecs"(7), Duration(70), Duration(80));
test2!"-="(Duration(5), cast(T)TickDuration.from!"usecs"(7), Duration(-70), Duration(-60));
@@ -855,9 +836,9 @@ public:
test2!"-="(Duration(-7), cast(T)TickDuration.from!"usecs"(-5), Duration(38), Duration(48));
}
- foreach (D; _TypeTuple!(const Duration, immutable Duration))
+ foreach (D; AliasSeq!(const Duration, immutable Duration))
{
- foreach (E; _TypeTuple!(Duration, const Duration, immutable Duration,
+ foreach (E; AliasSeq!(Duration, const Duration, immutable Duration,
TickDuration, const TickDuration, immutable TickDuration))
{
D lhs = D(120);
@@ -888,9 +869,9 @@ public:
mixin("return Duration(_hnsecs " ~ op ~ " value);");
}
- unittest
+ version (CoreUnittest) unittest
{
- foreach (D; _TypeTuple!(Duration, const Duration, immutable Duration))
+ foreach (D; AliasSeq!(Duration, const Duration, immutable Duration))
{
assert((cast(D)Duration(5)) * 7 == Duration(35));
assert((cast(D)Duration(7)) * 5 == Duration(35));
@@ -909,9 +890,9 @@ public:
}
}
- unittest
+ version (CoreUnittest) unittest
{
- foreach (D; _TypeTuple!(Duration, const Duration, immutable Duration))
+ foreach (D; AliasSeq!(Duration, const Duration, immutable Duration))
{
assert((cast(D)Duration(5)) / 7 == Duration(0));
assert((cast(D)Duration(7)) / 5 == Duration(1));
@@ -950,7 +931,7 @@ public:
return this;
}
- unittest
+ version (CoreUnittest) unittest
{
static void test(D)(D actual, long value, Duration expected, size_t line = __LINE__)
{
@@ -982,7 +963,7 @@ public:
static assert(!__traits(compiles, idur *= 12));
}
- unittest
+ version (CoreUnittest) unittest
{
static void test(Duration actual, long value, Duration expected, size_t line = __LINE__)
{
@@ -1030,7 +1011,7 @@ public:
return _hnsecs / rhs._hnsecs;
}
- unittest
+ version (CoreUnittest) unittest
{
assert(Duration(5) / Duration(7) == 0);
assert(Duration(7) / Duration(5) == 1);
@@ -1069,9 +1050,9 @@ public:
return opBinary!op(value);
}
- unittest
+ version (CoreUnittest) unittest
{
- foreach (D; _TypeTuple!(Duration, const Duration, immutable Duration))
+ foreach (D; AliasSeq!(Duration, const Duration, immutable Duration))
{
assert(5 * cast(D)Duration(7) == Duration(35));
assert(7 * cast(D)Duration(5) == Duration(35));
@@ -1100,9 +1081,9 @@ public:
return Duration(-_hnsecs);
}
- unittest
+ version (CoreUnittest) unittest
{
- foreach (D; _TypeTuple!(Duration, const Duration, immutable Duration))
+ foreach (D; AliasSeq!(Duration, const Duration, immutable Duration))
{
assert(-(cast(D)Duration(7)) == Duration(-7));
assert(-(cast(D)Duration(5)) == Duration(-5));
@@ -1121,22 +1102,22 @@ public:
$(D duration.to!TickDuration())
+/
TickDuration opCast(T)() const nothrow @nogc
- if (is(_Unqual!T == TickDuration))
+ if (is(immutable T == immutable TickDuration))
{
return TickDuration.from!"hnsecs"(_hnsecs);
}
- unittest
+ version (CoreUnittest) unittest
{
- foreach (D; _TypeTuple!(Duration, const Duration, immutable Duration))
+ foreach (D; AliasSeq!(Duration, const Duration, immutable Duration))
{
- foreach (units; _TypeTuple!("seconds", "msecs", "usecs", "hnsecs"))
+ foreach (units; AliasSeq!("seconds", "msecs", "usecs", "hnsecs"))
{
enum unitsPerSec = convert!("seconds", units)(1);
if (TickDuration.ticksPerSec >= unitsPerSec)
{
- foreach (T; _TypeTuple!(TickDuration, const TickDuration, immutable TickDuration))
+ foreach (T; AliasSeq!(TickDuration, const TickDuration, immutable TickDuration))
{
auto t = TickDuration.from!units(1);
assertApprox(cast(T)cast(D)dur!units(1), t - TickDuration(1), t + TickDuration(1), units);
@@ -1165,7 +1146,7 @@ public:
return _hnsecs != 0;
}
- unittest
+ version (CoreUnittest) unittest
{
auto d = 10.minutes;
assert(d);
@@ -1175,7 +1156,7 @@ public:
//Temporary hack until bug http://d.puremagic.com/issues/show_bug.cgi?id=5747 is fixed.
Duration opCast(T)() const nothrow @nogc
- if (is(_Unqual!T == Duration))
+ if (is(immutable T == immutable Duration))
{
return this;
}
@@ -1299,13 +1280,13 @@ public:
enum allAreMutableIntegralTypes = allAreMutableIntegralTypes!(Args[1 .. $]);
}
- unittest
+ version (CoreUnittest) unittest
{
- foreach (T; _TypeTuple!(long, int, short, byte, ulong, uint, ushort, ubyte))
+ foreach (T; AliasSeq!(long, int, short, byte, ulong, uint, ushort, ubyte))
static assert(allAreMutableIntegralTypes!T);
- foreach (T; _TypeTuple!(long, int, short, byte, ulong, uint, ushort, ubyte))
+ foreach (T; AliasSeq!(long, int, short, byte, ulong, uint, ushort, ubyte))
static assert(!allAreMutableIntegralTypes!(const T));
- foreach (T; _TypeTuple!(char, wchar, dchar, float, double, real, string))
+ foreach (T; AliasSeq!(char, wchar, dchar, float, double, real, string))
static assert(!allAreMutableIntegralTypes!T);
static assert(allAreMutableIntegralTypes!(long, int, short, byte));
static assert(!allAreMutableIntegralTypes!(long, int, short, char, byte));
@@ -1366,9 +1347,9 @@ public:
}
}
- pure nothrow unittest
+ version (CoreUnittest) pure nothrow unittest
{
- foreach (D; _TypeTuple!(const Duration, immutable Duration))
+ foreach (D; AliasSeq!(const Duration, immutable Duration))
{
D d = dur!"weeks"(3) + dur!"days"(5) + dur!"hours"(19) + dur!"minutes"(7) +
dur!"seconds"(2) + dur!"hnsecs"(1234567);
@@ -1474,7 +1455,7 @@ public:
static assert(!is(typeof(d.split("hnsecs", "seconds", "msecs")())));
static assert(!is(typeof(d.split("seconds", "hnecs", "msecs")())));
static assert(!is(typeof(d.split("seconds", "msecs", "msecs")())));
- alias _TypeTuple!("nsecs", "hnsecs", "usecs", "msecs", "seconds",
+ alias AliasSeq!("nsecs", "hnsecs", "usecs", "msecs", "seconds",
"minutes", "hours", "days", "weeks") timeStrs;
foreach (i, str; timeStrs[1 .. $])
static assert(!is(typeof(d.split!(timeStrs[i - 1], str)())));
@@ -1521,10 +1502,7 @@ public:
units == "hnsecs" ||
units == "nsecs")
{
- static if (units == "nsecs")
- return convert!("hnsecs", "nsecs")(_hnsecs);
- else
- return getUnitsFromHNSecs!units(_hnsecs);
+ return convert!("hnsecs", units)(_hnsecs);
}
///
@@ -1543,9 +1521,9 @@ public:
assert(dur!"nsecs"(2007).total!"nsecs" == 2000);
}
- unittest
+ version (CoreUnittest) unittest
{
- foreach (D; _TypeTuple!(const Duration, immutable Duration))
+ foreach (D; AliasSeq!(const Duration, immutable Duration))
{
assert((cast(D)dur!"weeks"(12)).total!"weeks" == 12);
assert((cast(D)dur!"weeks"(12)).total!"days" == 84);
@@ -1598,7 +1576,7 @@ public:
unit = "μs";
else
unit = plural ? units : units[0 .. $-1];
- res ~= signedToTempString(val, 10);
+ res ~= signedToTempString(val);
res ~= " ";
res ~= unit;
}
@@ -1649,9 +1627,9 @@ public:
assert(usecs(-5239492).toString() == "-5 secs, -239 ms, and -492 μs");
}
- unittest
+ version (CoreUnittest) unittest
{
- foreach (D; _TypeTuple!(Duration, const Duration, immutable Duration))
+ foreach (D; AliasSeq!(Duration, const Duration, immutable Duration))
{
assert((cast(D)Duration(0)).toString() == "0 hnsecs");
assert((cast(D)Duration(1)).toString() == "1 hnsec");
@@ -1707,9 +1685,9 @@ public:
return _hnsecs < 0;
}
- unittest
+ version (CoreUnittest) unittest
{
- foreach (D; _TypeTuple!(Duration, const Duration, immutable Duration))
+ foreach (D; AliasSeq!(Duration, const Duration, immutable Duration))
{
assert(!(cast(D)Duration(100)).isNegative);
assert(!(cast(D)Duration(1)).isNegative);
@@ -1766,7 +1744,7 @@ unittest
td = The TickDuration to convert
+/
T to(string units, T, D)(D td) @safe pure nothrow @nogc
- if (is(_Unqual!D == TickDuration) &&
+ if (is(immutable D == immutable TickDuration) &&
(units == "seconds" ||
units == "msecs" ||
units == "usecs" ||
@@ -1803,8 +1781,9 @@ unittest
long tl = to!("seconds",long)(t);
assert(tl == 1000);
+ import core.stdc.math : fabs;
double td = to!("seconds",double)(t);
- assert(_abs(td - 1000) < 0.001);
+ assert(fabs(td - 1000) < 0.001);
}
unittest
@@ -1819,12 +1798,12 @@ unittest
auto _str(F)(F val)
{
static if (is(F == int) || is(F == long))
- return signedToTempString(val, 10);
+ return signedToTempString(val);
else
- return unsignedToTempString(val, 10);
+ return unsignedToTempString(val);
}
- foreach (F; _TypeTuple!(int,uint,long,ulong,float,double,real))
+ foreach (F; AliasSeq!(int,uint,long,ulong,float,double,real))
{
F t1f = to!(U,F)(t1);
F t2f = to!(U,F)(t2);
@@ -1950,7 +1929,7 @@ unittest
unittest
{
- foreach (D; _TypeTuple!(Duration, const Duration, immutable Duration))
+ foreach (D; AliasSeq!(Duration, const Duration, immutable Duration))
{
assert(dur!"weeks"(7).total!"weeks" == 7);
assert(dur!"days"(7).total!"days" == 7);
@@ -2093,7 +2072,7 @@ struct MonoTimeImpl(ClockType clockType)
static assert(0, "Unsupported platform");
// POD value, test mutable/const/immutable conversion
- unittest
+ version (CoreUnittest) unittest
{
MonoTimeImpl m;
const MonoTimeImpl cm = m;
@@ -2130,26 +2109,26 @@ struct MonoTimeImpl(ClockType clockType)
version (Windows)
{
- long ticks;
- if (QueryPerformanceCounter(&ticks) == 0)
- {
- // This probably cannot happen on Windows 95 or later
- import core.internal.abort : abort;
- abort("Call to QueryPerformanceCounter failed.");
- }
+ long ticks = void;
+ QueryPerformanceCounter(&ticks);
return MonoTimeImpl(ticks);
}
else version (Darwin)
return MonoTimeImpl(mach_absolute_time());
else version (Posix)
{
- timespec ts;
- if (clock_gettime(clockArg, &ts) != 0)
+ timespec ts = void;
+ immutable error = clock_gettime(clockArg, &ts);
+ // clockArg is supported and if tv_sec is long or larger
+ // overflow won't happen before 292 billion years A.D.
+ static if (ts.tv_sec.max < long.max)
{
- import core.internal.abort : abort;
- abort("Call to clock_gettime failed.");
+ if (error)
+ {
+ import core.internal.abort : abort;
+ abort("Call to clock_gettime failed.");
+ }
}
-
return MonoTimeImpl(convClockFreq(ts.tv_sec * 1_000_000_000L + ts.tv_nsec,
1_000_000_000L,
ticksPerSecond));
@@ -2176,7 +2155,7 @@ struct MonoTimeImpl(ClockType clockType)
MonoTimeImpl min() { return MonoTimeImpl(long.min); }
}
- unittest
+ version (CoreUnittest) unittest
{
assert(MonoTimeImpl.zero == MonoTimeImpl(0));
assert(MonoTimeImpl.max == MonoTimeImpl(long.max));
@@ -2199,28 +2178,28 @@ struct MonoTimeImpl(ClockType clockType)
+/
int opCmp(MonoTimeImpl rhs) const pure nothrow @nogc
{
- if (_ticks < rhs._ticks)
- return -1;
- return _ticks > rhs._ticks ? 1 : 0;
+ return (_ticks > rhs._ticks) - (_ticks < rhs._ticks);
}
- unittest
+ version (CoreUnittest) unittest
{
+ import core.internal.traits : rvalueOf;
const t = MonoTimeImpl.currTime;
- assert(t == copy(t));
+ assert(t == rvalueOf(t));
}
- unittest
+ version (CoreUnittest) unittest
{
+ import core.internal.traits : rvalueOf;
const before = MonoTimeImpl.currTime;
auto after = MonoTimeImpl(before._ticks + 42);
assert(before < after);
- assert(copy(before) <= before);
- assert(copy(after) > before);
- assert(after >= copy(after));
+ assert(rvalueOf(before) <= before);
+ assert(rvalueOf(after) > before);
+ assert(after >= rvalueOf(after));
}
- unittest
+ version (CoreUnittest) unittest
{
const currTime = MonoTimeImpl.currTime;
assert(MonoTimeImpl(long.max) > MonoTimeImpl(0));
@@ -2274,16 +2253,17 @@ assert(before + timeElapsed == after);
return Duration(convClockFreq(diff , ticksPerSecond, hnsecsPer!"seconds"));
}
- unittest
+ version (CoreUnittest) unittest
{
+ import core.internal.traits : rvalueOf;
const t = MonoTimeImpl.currTime;
- assert(t - copy(t) == Duration.zero);
+ assert(t - rvalueOf(t) == Duration.zero);
static assert(!__traits(compiles, t + t));
}
- unittest
+ version (CoreUnittest) unittest
{
- static void test(in MonoTimeImpl before, in MonoTimeImpl after, in Duration min)
+ static void test(const scope MonoTimeImpl before, const scope MonoTimeImpl after, const scope Duration min)
{
immutable diff = after - before;
assert(diff >= min);
@@ -2312,14 +2292,14 @@ assert(before + timeElapsed == after);
mixin("return MonoTimeImpl(_ticks " ~ op ~ " rhsConverted);");
}
- unittest
+ version (CoreUnittest) unittest
{
const t = MonoTimeImpl.currTime;
assert(t + Duration(0) == t);
assert(t - Duration(0) == t);
}
- unittest
+ version (CoreUnittest) unittest
{
const t = MonoTimeImpl.currTime;
@@ -2345,7 +2325,7 @@ assert(before + timeElapsed == after);
return this;
}
- unittest
+ version (CoreUnittest) unittest
{
auto mt = MonoTimeImpl.currTime;
const initial = mt;
@@ -2386,7 +2366,7 @@ assert(before + timeElapsed == after);
return _ticks;
}
- unittest
+ version (CoreUnittest) unittest
{
const mt = MonoTimeImpl.currTime;
assert(mt.ticks == mt._ticks);
@@ -2405,7 +2385,7 @@ assert(before + timeElapsed == after);
return _ticksPerSecond[_clockIdx];
}
- unittest
+ version (CoreUnittest) unittest
{
assert(MonoTimeImpl.ticksPerSecond == _ticksPerSecond[_clockIdx]);
}
@@ -2415,15 +2395,15 @@ assert(before + timeElapsed == after);
string toString() const pure nothrow
{
static if (clockType == ClockType.normal)
- return "MonoTime(" ~ signedToTempString(_ticks, 10) ~ " ticks, " ~ signedToTempString(ticksPerSecond, 10) ~ " ticks per second)";
+ return "MonoTime(" ~ signedToTempString(_ticks) ~ " ticks, " ~ signedToTempString(ticksPerSecond) ~ " ticks per second)";
else
- return "MonoTimeImpl!(ClockType." ~ _clockName ~ ")(" ~ signedToTempString(_ticks, 10) ~ " ticks, " ~
- signedToTempString(ticksPerSecond, 10) ~ " ticks per second)";
+ return "MonoTimeImpl!(ClockType." ~ _clockName ~ ")(" ~ signedToTempString(_ticks) ~ " ticks, " ~
+ signedToTempString(ticksPerSecond) ~ " ticks per second)";
}
- unittest
+ version (CoreUnittest) unittest
{
- static min(T)(T a, T b) { return a < b ? a : b; }
+ import core.internal.util.math : min;
static void eat(ref string s, const(char)[] exp)
{
@@ -2438,9 +2418,9 @@ assert(before + timeElapsed == after);
else
eat(str, "MonoTimeImpl!(ClockType."~_clockName~")(");
- eat(str, signedToTempString(mt._ticks, 10));
+ eat(str, signedToTempString(mt._ticks));
eat(str, " ticks, ");
- eat(str, signedToTempString(ticksPerSecond, 10));
+ eat(str, signedToTempString(ticksPerSecond));
eat(str, " ticks per second)");
}
@@ -2448,7 +2428,7 @@ private:
// static immutable long _ticksPerSecond;
- unittest
+ version (CoreUnittest) unittest
{
assert(_ticksPerSecond[_clockIdx]);
}
@@ -2784,8 +2764,7 @@ struct TickDuration
{
/++
It's the same as $(D TickDuration(0)), but it's provided to be
- consistent with $(D Duration) and $(D FracSec), which provide $(D zero)
- properties.
+ consistent with $(D Duration), which provides a $(D zero) property.
+/
TickDuration zero() { return TickDuration(0); }
@@ -2800,7 +2779,7 @@ struct TickDuration
TickDuration min() { return TickDuration(long.min); }
}
- unittest
+ version (CoreUnittest) unittest
{
assert(zero == TickDuration(0));
assert(TickDuration.max == TickDuration(long.max));
@@ -2851,7 +2830,7 @@ struct TickDuration
appOrigin = TickDuration.currSystemTick;
}
- unittest
+ version (CoreUnittest) unittest
{
assert(ticksPerSec);
}
@@ -2874,9 +2853,9 @@ struct TickDuration
return this.to!("seconds", long)();
}
- unittest
+ version (CoreUnittest) unittest
{
- foreach (T; _TypeTuple!(TickDuration, const TickDuration, immutable TickDuration))
+ foreach (T; AliasSeq!(TickDuration, const TickDuration, immutable TickDuration))
{
assert((cast(T)TickDuration(ticksPerSec)).seconds == 1);
assert((cast(T)TickDuration(ticksPerSec - 1)).seconds == 0);
@@ -2945,11 +2924,11 @@ struct TickDuration
return TickDuration(cast(long)(length * (ticksPerSec / cast(real)unitsPerSec)));
}
- unittest
+ version (CoreUnittest) unittest
{
- foreach (units; _TypeTuple!("seconds", "msecs", "usecs", "nsecs"))
+ foreach (units; AliasSeq!("seconds", "msecs", "usecs", "nsecs"))
{
- foreach (T; _TypeTuple!(TickDuration, const TickDuration, immutable TickDuration))
+ foreach (T; AliasSeq!(TickDuration, const TickDuration, immutable TickDuration))
{
assertApprox((cast(T)TickDuration.from!units(1000)).to!(units, long)(),
500, 1500, units);
@@ -2970,21 +2949,21 @@ struct TickDuration
$(D tickDuration.to!Duration())
+/
Duration opCast(T)() @safe const pure nothrow @nogc
- if (is(_Unqual!T == Duration))
+ if (is(immutable T == immutable Duration))
{
return Duration(hnsecs);
}
- unittest
+ version (CoreUnittest) unittest
{
- foreach (D; _TypeTuple!(Duration, const Duration, immutable Duration))
+ foreach (D; AliasSeq!(Duration, const Duration, immutable Duration))
{
- foreach (T; _TypeTuple!(TickDuration, const TickDuration, immutable TickDuration))
+ foreach (T; AliasSeq!(TickDuration, const TickDuration, immutable TickDuration))
{
auto expected = dur!"seconds"(1);
assert(cast(D)cast(T)TickDuration.from!"seconds"(1) == expected);
- foreach (units; _TypeTuple!("msecs", "usecs", "hnsecs"))
+ foreach (units; AliasSeq!("msecs", "usecs", "hnsecs"))
{
D actual = cast(D)cast(T)TickDuration.from!units(1_000_000);
assertApprox(actual, dur!units(900_000), dur!units(1_100_000));
@@ -2996,7 +2975,7 @@ struct TickDuration
//Temporary hack until bug http://d.puremagic.com/issues/show_bug.cgi?id=5747 is fixed.
TickDuration opCast(T)() @safe const pure nothrow @nogc
- if (is(_Unqual!T == TickDuration))
+ if (is(immutable T == immutable TickDuration))
{
return this;
}
@@ -3025,9 +3004,9 @@ struct TickDuration
return this;
}
- unittest
+ version (CoreUnittest) unittest
{
- foreach (T; _TypeTuple!(TickDuration, const TickDuration, immutable TickDuration))
+ foreach (T; AliasSeq!(TickDuration, const TickDuration, immutable TickDuration))
{
auto a = TickDuration.currSystemTick;
auto result = a += cast(T)TickDuration.currSystemTick;
@@ -3039,7 +3018,7 @@ struct TickDuration
assert(b == result);
assert(b.to!("seconds", real)() <= 0);
- foreach (U; _TypeTuple!(const TickDuration, immutable TickDuration))
+ foreach (U; AliasSeq!(const TickDuration, immutable TickDuration))
{
U u = TickDuration(12);
static assert(!__traits(compiles, u += cast(T)TickDuration.currSystemTick));
@@ -3070,13 +3049,13 @@ struct TickDuration
return TickDuration(mixin("length " ~ op ~ " rhs.length"));
}
- unittest
+ version (CoreUnittest) unittest
{
- foreach (T; _TypeTuple!(TickDuration, const TickDuration, immutable TickDuration))
+ foreach (T; AliasSeq!(TickDuration, const TickDuration, immutable TickDuration))
{
T a = TickDuration.currSystemTick;
T b = TickDuration.currSystemTick;
- assert((a + b).seconds > 0);
+ assert((a + b).usecs > 0);
assert((a - b).seconds <= 0);
}
}
@@ -3091,9 +3070,9 @@ struct TickDuration
return TickDuration(-length);
}
- unittest
+ version (CoreUnittest) unittest
{
- foreach (T; _TypeTuple!(TickDuration, const TickDuration, immutable TickDuration))
+ foreach (T; AliasSeq!(TickDuration, const TickDuration, immutable TickDuration))
{
assert(-(cast(T)TickDuration(7)) == TickDuration(-7));
assert(-(cast(T)TickDuration(5)) == TickDuration(-5));
@@ -3109,26 +3088,27 @@ struct TickDuration
+/
int opCmp(TickDuration rhs) @safe const pure nothrow @nogc
{
- return length < rhs.length ? -1 : (length == rhs.length ? 0 : 1);
+ return (length > rhs.length) - (length < rhs.length);
}
- unittest
+ version (CoreUnittest) unittest
{
- foreach (T; _TypeTuple!(TickDuration, const TickDuration, immutable TickDuration))
+ import core.internal.traits : rvalueOf;
+ foreach (T; AliasSeq!(TickDuration, const TickDuration, immutable TickDuration))
{
- foreach (U; _TypeTuple!(TickDuration, const TickDuration, immutable TickDuration))
+ foreach (U; AliasSeq!(TickDuration, const TickDuration, immutable TickDuration))
{
T t = TickDuration.currSystemTick;
U u = t;
assert(t == u);
- assert(copy(t) == u);
- assert(t == copy(u));
+ assert(rvalueOf(t) == u);
+ assert(t == rvalueOf(u));
}
}
- foreach (T; _TypeTuple!(TickDuration, const TickDuration, immutable TickDuration))
+ foreach (T; AliasSeq!(TickDuration, const TickDuration, immutable TickDuration))
{
- foreach (U; _TypeTuple!(TickDuration, const TickDuration, immutable TickDuration))
+ foreach (U; AliasSeq!(TickDuration, const TickDuration, immutable TickDuration))
{
T t = TickDuration.currSystemTick;
U u = t + t;
@@ -3137,15 +3117,15 @@ struct TickDuration
assert(u > t);
assert(u >= u);
- assert(copy(t) < u);
- assert(copy(t) <= t);
- assert(copy(u) > t);
- assert(copy(u) >= u);
+ assert(rvalueOf(t) < u);
+ assert(rvalueOf(t) <= t);
+ assert(rvalueOf(u) > t);
+ assert(rvalueOf(u) >= u);
- assert(t < copy(u));
- assert(t <= copy(t));
- assert(u > copy(t));
- assert(u >= copy(u));
+ assert(t < rvalueOf(u));
+ assert(t <= rvalueOf(t));
+ assert(u > rvalueOf(t));
+ assert(u >= rvalueOf(u));
}
}
}
@@ -3170,7 +3150,7 @@ struct TickDuration
length = cast(long)(length * value);
}
- unittest
+ version (CoreUnittest) unittest
{
immutable curr = TickDuration.currSystemTick;
TickDuration t1 = curr;
@@ -3187,7 +3167,7 @@ struct TickDuration
t1 *= 2.1;
assert(t1 > t2);
- foreach (T; _TypeTuple!(const TickDuration, immutable TickDuration))
+ foreach (T; AliasSeq!(const TickDuration, immutable TickDuration))
{
T t = TickDuration.currSystemTick;
assert(!__traits(compiles, t *= 12));
@@ -3221,7 +3201,7 @@ struct TickDuration
length = cast(long)(length / value);
}
- unittest
+ version (CoreUnittest) unittest
{
immutable curr = TickDuration.currSystemTick;
immutable t1 = curr;
@@ -3240,7 +3220,7 @@ struct TickDuration
_assertThrown!TimeException(t2 /= 0);
- foreach (T; _TypeTuple!(const TickDuration, immutable TickDuration))
+ foreach (T; AliasSeq!(const TickDuration, immutable TickDuration))
{
T t = TickDuration.currSystemTick;
assert(!__traits(compiles, t /= 12));
@@ -3268,9 +3248,9 @@ struct TickDuration
return TickDuration(cast(long)(length * value));
}
- unittest
+ version (CoreUnittest) unittest
{
- foreach (T; _TypeTuple!(TickDuration, const TickDuration, immutable TickDuration))
+ foreach (T; AliasSeq!(TickDuration, const TickDuration, immutable TickDuration))
{
T t1 = TickDuration.currSystemTick;
T t2 = t1 + t1;
@@ -3307,9 +3287,9 @@ struct TickDuration
return TickDuration(cast(long)(length / value));
}
- unittest
+ version (CoreUnittest) unittest
{
- foreach (T; _TypeTuple!(TickDuration, const TickDuration, immutable TickDuration))
+ foreach (T; AliasSeq!(TickDuration, const TickDuration, immutable TickDuration))
{
T t1 = TickDuration.currSystemTick;
T t2 = t1 + t1;
@@ -3332,7 +3312,7 @@ struct TickDuration
this.length = ticks;
}
- unittest
+ version (CoreUnittest) unittest
{
foreach (i; [-42, 0, 42])
assert(TickDuration(i).length == i);
@@ -3369,10 +3349,8 @@ struct TickDuration
import core.internal.abort : abort;
version (Windows)
{
- ulong ticks;
- if (QueryPerformanceCounter(cast(long*)&ticks) == 0)
- abort("Failed in QueryPerformanceCounter().");
-
+ ulong ticks = void;
+ QueryPerformanceCounter(cast(long*)&ticks);
return TickDuration(ticks);
}
else version (Darwin)
@@ -3381,10 +3359,8 @@ struct TickDuration
return TickDuration(cast(long)mach_absolute_time());
else
{
- timeval tv;
- if (gettimeofday(&tv, null) != 0)
- abort("Failed in gettimeofday().");
-
+ timeval tv = void;
+ gettimeofday(&tv, null);
return TickDuration(tv.tv_sec * TickDuration.ticksPerSec +
tv.tv_usec * TickDuration.ticksPerSec / 1000 / 1000);
}
@@ -3393,26 +3369,32 @@ struct TickDuration
{
static if (is(typeof(clock_gettime)))
{
- timespec ts;
- if (clock_gettime(CLOCK_MONOTONIC, &ts) != 0)
- abort("Failed in clock_gettime().");
-
+ timespec ts = void;
+ immutable error = clock_gettime(CLOCK_MONOTONIC, &ts);
+ // CLOCK_MONOTONIC is supported and if tv_sec is long or larger
+ // overflow won't happen before 292 billion years A.D.
+ static if (ts.tv_sec.max < long.max)
+ {
+ if (error)
+ {
+ import core.internal.abort : abort;
+ abort("Call to clock_gettime failed.");
+ }
+ }
return TickDuration(ts.tv_sec * TickDuration.ticksPerSec +
ts.tv_nsec * TickDuration.ticksPerSec / 1000 / 1000 / 1000);
}
else
{
- timeval tv;
- if (gettimeofday(&tv, null) != 0)
- abort("Failed in gettimeofday().");
-
+ timeval tv = void;
+ gettimeofday(&tv, null);
return TickDuration(tv.tv_sec * TickDuration.ticksPerSec +
tv.tv_usec * TickDuration.ticksPerSec / 1000 / 1000);
}
}
}
- @safe nothrow unittest
+ version (CoreUnittest) @safe nothrow unittest
{
assert(TickDuration.currSystemTick.length > 0);
}
@@ -3500,13 +3482,13 @@ unittest
unittest
{
- foreach (units; _TypeTuple!("weeks", "days", "hours", "seconds", "msecs", "usecs", "hnsecs", "nsecs"))
+ foreach (units; AliasSeq!("weeks", "days", "hours", "seconds", "msecs", "usecs", "hnsecs", "nsecs"))
{
static assert(!__traits(compiles, convert!("years", units)(12)), units);
static assert(!__traits(compiles, convert!(units, "years")(12)), units);
}
- foreach (units; _TypeTuple!("years", "months", "weeks", "days",
+ foreach (units; AliasSeq!("years", "months", "weeks", "days",
"hours", "seconds", "msecs", "usecs", "hnsecs", "nsecs"))
{
assert(convert!(units, units)(12) == 12);
@@ -3568,612 +3550,6 @@ unittest
assert(convert!("nsecs", "hnsecs")(100) == 1);
}
-
-/++
- Represents fractional seconds.
-
- This is the portion of the time which is smaller than a second and it cannot
- hold values which would be greater than or equal to a second (or less than
- or equal to a negative second).
-
- It holds hnsecs internally, but you can create it using either milliseconds,
- microseconds, or hnsecs. What it does is allow for a simple way to set or
- adjust the fractional seconds portion of a $(D Duration) or a
- $(REF SysTime, std,datetime) without having to worry about whether you're
- dealing with milliseconds, microseconds, or hnsecs.
-
- $(D FracSec)'s functions which take time unit strings do accept
- $(D "nsecs"), but because the resolution of $(D Duration) and
- $(REF SysTime, std,datetime) is hnsecs, you don't actually get precision higher
- than hnsecs. $(D "nsecs") is accepted merely for convenience. Any values
- given as nsecs will be converted to hnsecs using $(D convert) (which uses
- truncating division when converting to smaller units).
- +/
-struct FracSec
-{
-@safe pure:
-
-public:
-
- /++
- A $(D FracSec) of $(D 0). It's shorter than doing something like
- $(D FracSec.from!"msecs"(0)) and more explicit than $(D FracSec.init).
- +/
- static @property nothrow @nogc FracSec zero() { return FracSec(0); }
-
- unittest
- {
- assert(zero == FracSec.from!"msecs"(0));
- }
-
-
- /++
- Create a $(D FracSec) from the given units ($(D "msecs"), $(D "usecs"),
- or $(D "hnsecs")).
-
- Params:
- units = The units to create a FracSec from.
- value = The number of the given units passed the second.
-
- Throws:
- $(D TimeException) if the given value would result in a $(D FracSec)
- greater than or equal to $(D 1) second or less than or equal to
- $(D -1) seconds.
- +/
- static FracSec from(string units)(long value)
- if (units == "msecs" ||
- units == "usecs" ||
- units == "hnsecs" ||
- units == "nsecs")
- {
- immutable hnsecs = cast(int)convert!(units, "hnsecs")(value);
- _enforceValid(hnsecs);
- return FracSec(hnsecs);
- }
-
- unittest
- {
- assert(FracSec.from!"msecs"(0) == FracSec(0));
- assert(FracSec.from!"usecs"(0) == FracSec(0));
- assert(FracSec.from!"hnsecs"(0) == FracSec(0));
-
- foreach (sign; [1, -1])
- {
- _assertThrown!TimeException(from!"msecs"(1000 * sign));
-
- assert(FracSec.from!"msecs"(1 * sign) == FracSec(10_000 * sign));
- assert(FracSec.from!"msecs"(999 * sign) == FracSec(9_990_000 * sign));
-
- _assertThrown!TimeException(from!"usecs"(1_000_000 * sign));
-
- assert(FracSec.from!"usecs"(1 * sign) == FracSec(10 * sign));
- assert(FracSec.from!"usecs"(999 * sign) == FracSec(9990 * sign));
- assert(FracSec.from!"usecs"(999_999 * sign) == FracSec(9999_990 * sign));
-
- _assertThrown!TimeException(from!"hnsecs"(10_000_000 * sign));
-
- assert(FracSec.from!"hnsecs"(1 * sign) == FracSec(1 * sign));
- assert(FracSec.from!"hnsecs"(999 * sign) == FracSec(999 * sign));
- assert(FracSec.from!"hnsecs"(999_999 * sign) == FracSec(999_999 * sign));
- assert(FracSec.from!"hnsecs"(9_999_999 * sign) == FracSec(9_999_999 * sign));
-
- assert(FracSec.from!"nsecs"(1 * sign) == FracSec(0));
- assert(FracSec.from!"nsecs"(10 * sign) == FracSec(0));
- assert(FracSec.from!"nsecs"(99 * sign) == FracSec(0));
- assert(FracSec.from!"nsecs"(100 * sign) == FracSec(1 * sign));
- assert(FracSec.from!"nsecs"(99_999 * sign) == FracSec(999 * sign));
- assert(FracSec.from!"nsecs"(99_999_999 * sign) == FracSec(999_999 * sign));
- assert(FracSec.from!"nsecs"(999_999_999 * sign) == FracSec(9_999_999 * sign));
- }
- }
-
-
- /++
- Returns the negation of this $(D FracSec).
- +/
- FracSec opUnary(string op)() const nothrow @nogc
- if (op == "-")
- {
- return FracSec(-_hnsecs);
- }
-
- unittest
- {
- foreach (val; [-7, -5, 0, 5, 7])
- {
- foreach (F; _TypeTuple!(FracSec, const FracSec, immutable FracSec))
- {
- F fs = FracSec(val);
- assert(-fs == FracSec(-val));
- }
- }
- }
-
-
- /++
- The value of this $(D FracSec) as milliseconds.
- +/
- @property int msecs() const nothrow @nogc
- {
- return cast(int)convert!("hnsecs", "msecs")(_hnsecs);
- }
-
- unittest
- {
- foreach (F; _TypeTuple!(FracSec, const FracSec, immutable FracSec))
- {
- assert(FracSec(0).msecs == 0);
-
- foreach (sign; [1, -1])
- {
- assert((cast(F)FracSec(1 * sign)).msecs == 0);
- assert((cast(F)FracSec(999 * sign)).msecs == 0);
- assert((cast(F)FracSec(999_999 * sign)).msecs == 99 * sign);
- assert((cast(F)FracSec(9_999_999 * sign)).msecs == 999 * sign);
- }
- }
- }
-
-
- /++
- The value of this $(D FracSec) as milliseconds.
-
- Params:
- milliseconds = The number of milliseconds passed the second.
-
- Throws:
- $(D TimeException) if the given value is not less than $(D 1) second
- and greater than a $(D -1) seconds.
- +/
- @property void msecs(int milliseconds)
- {
- immutable hnsecs = cast(int)convert!("msecs", "hnsecs")(milliseconds);
- _enforceValid(hnsecs);
- _hnsecs = hnsecs;
- }
-
- unittest
- {
- static void test(int msecs, FracSec expected = FracSec.init, size_t line = __LINE__)
- {
- FracSec fs;
- fs.msecs = msecs;
-
- if (fs != expected)
- throw new AssertError("unittest failure", __FILE__, line);
- }
-
- _assertThrown!TimeException(test(-1000));
- _assertThrown!TimeException(test(1000));
-
- test(0, FracSec(0));
-
- foreach (sign; [1, -1])
- {
- test(1 * sign, FracSec(10_000 * sign));
- test(999 * sign, FracSec(9_990_000 * sign));
- }
-
- foreach (F; _TypeTuple!(const FracSec, immutable FracSec))
- {
- F fs = FracSec(1234567);
- static assert(!__traits(compiles, fs.msecs = 12), F.stringof);
- }
- }
-
-
- /++
- The value of this $(D FracSec) as microseconds.
- +/
- @property int usecs() const nothrow @nogc
- {
- return cast(int)convert!("hnsecs", "usecs")(_hnsecs);
- }
-
- unittest
- {
- foreach (F; _TypeTuple!(FracSec, const FracSec, immutable FracSec))
- {
- assert(FracSec(0).usecs == 0);
-
- foreach (sign; [1, -1])
- {
- assert((cast(F)FracSec(1 * sign)).usecs == 0);
- assert((cast(F)FracSec(999 * sign)).usecs == 99 * sign);
- assert((cast(F)FracSec(999_999 * sign)).usecs == 99_999 * sign);
- assert((cast(F)FracSec(9_999_999 * sign)).usecs == 999_999 * sign);
- }
- }
- }
-
-
- /++
- The value of this $(D FracSec) as microseconds.
-
- Params:
- microseconds = The number of microseconds passed the second.
-
- Throws:
- $(D TimeException) if the given value is not less than $(D 1) second
- and greater than a $(D -1) seconds.
- +/
- @property void usecs(int microseconds)
- {
- immutable hnsecs = cast(int)convert!("usecs", "hnsecs")(microseconds);
- _enforceValid(hnsecs);
- _hnsecs = hnsecs;
- }
-
- unittest
- {
- static void test(int usecs, FracSec expected = FracSec.init, size_t line = __LINE__)
- {
- FracSec fs;
- fs.usecs = usecs;
-
- if (fs != expected)
- throw new AssertError("unittest failure", __FILE__, line);
- }
-
- _assertThrown!TimeException(test(-1_000_000));
- _assertThrown!TimeException(test(1_000_000));
-
- test(0, FracSec(0));
-
- foreach (sign; [1, -1])
- {
- test(1 * sign, FracSec(10 * sign));
- test(999 * sign, FracSec(9990 * sign));
- test(999_999 * sign, FracSec(9_999_990 * sign));
- }
-
- foreach (F; _TypeTuple!(const FracSec, immutable FracSec))
- {
- F fs = FracSec(1234567);
- static assert(!__traits(compiles, fs.usecs = 12), F.stringof);
- }
- }
-
-
- /++
- The value of this $(D FracSec) as hnsecs.
- +/
- @property int hnsecs() const nothrow @nogc
- {
- return _hnsecs;
- }
-
- unittest
- {
- foreach (F; _TypeTuple!(FracSec, const FracSec, immutable FracSec))
- {
- assert(FracSec(0).hnsecs == 0);
-
- foreach (sign; [1, -1])
- {
- assert((cast(F)FracSec(1 * sign)).hnsecs == 1 * sign);
- assert((cast(F)FracSec(999 * sign)).hnsecs == 999 * sign);
- assert((cast(F)FracSec(999_999 * sign)).hnsecs == 999_999 * sign);
- assert((cast(F)FracSec(9_999_999 * sign)).hnsecs == 9_999_999 * sign);
- }
- }
- }
-
-
- /++
- The value of this $(D FracSec) as hnsecs.
-
- Params:
- hnsecs = The number of hnsecs passed the second.
-
- Throws:
- $(D TimeException) if the given value is not less than $(D 1) second
- and greater than a $(D -1) seconds.
- +/
- @property void hnsecs(int hnsecs)
- {
- _enforceValid(hnsecs);
- _hnsecs = hnsecs;
- }
-
- unittest
- {
- static void test(int hnsecs, FracSec expected = FracSec.init, size_t line = __LINE__)
- {
- FracSec fs;
- fs.hnsecs = hnsecs;
-
- if (fs != expected)
- throw new AssertError("unittest failure", __FILE__, line);
- }
-
- _assertThrown!TimeException(test(-10_000_000));
- _assertThrown!TimeException(test(10_000_000));
-
- test(0, FracSec(0));
-
- foreach (sign; [1, -1])
- {
- test(1 * sign, FracSec(1 * sign));
- test(999 * sign, FracSec(999 * sign));
- test(999_999 * sign, FracSec(999_999 * sign));
- test(9_999_999 * sign, FracSec(9_999_999 * sign));
- }
-
- foreach (F; _TypeTuple!(const FracSec, immutable FracSec))
- {
- F fs = FracSec(1234567);
- static assert(!__traits(compiles, fs.hnsecs = 12), F.stringof);
- }
- }
-
-
- /++
- The value of this $(D FracSec) as nsecs.
-
- Note that this does not give you any greater precision
- than getting the value of this $(D FracSec) as hnsecs.
- +/
- @property int nsecs() const nothrow @nogc
- {
- return cast(int)convert!("hnsecs", "nsecs")(_hnsecs);
- }
-
- unittest
- {
- foreach (F; _TypeTuple!(FracSec, const FracSec, immutable FracSec))
- {
- assert(FracSec(0).nsecs == 0);
-
- foreach (sign; [1, -1])
- {
- assert((cast(F)FracSec(1 * sign)).nsecs == 100 * sign);
- assert((cast(F)FracSec(999 * sign)).nsecs == 99_900 * sign);
- assert((cast(F)FracSec(999_999 * sign)).nsecs == 99_999_900 * sign);
- assert((cast(F)FracSec(9_999_999 * sign)).nsecs == 999_999_900 * sign);
- }
- }
- }
-
-
- /++
- The value of this $(D FracSec) as nsecs.
-
- Note that this does not give you any greater precision
- than setting the value of this $(D FracSec) as hnsecs.
-
- Params:
- nsecs = The number of nsecs passed the second.
-
- Throws:
- $(D TimeException) if the given value is not less than $(D 1) second
- and greater than a $(D -1) seconds.
- +/
- @property void nsecs(long nsecs)
- {
- immutable hnsecs = cast(int)convert!("nsecs", "hnsecs")(nsecs);
- _enforceValid(hnsecs);
- _hnsecs = hnsecs;
- }
-
- unittest
- {
- static void test(int nsecs, FracSec expected = FracSec.init, size_t line = __LINE__)
- {
- FracSec fs;
- fs.nsecs = nsecs;
-
- if (fs != expected)
- throw new AssertError("unittest failure", __FILE__, line);
- }
-
- _assertThrown!TimeException(test(-1_000_000_000));
- _assertThrown!TimeException(test(1_000_000_000));
-
- test(0, FracSec(0));
-
- foreach (sign; [1, -1])
- {
- test(1 * sign, FracSec(0));
- test(10 * sign, FracSec(0));
- test(100 * sign, FracSec(1 * sign));
- test(999 * sign, FracSec(9 * sign));
- test(999_999 * sign, FracSec(9999 * sign));
- test(9_999_999 * sign, FracSec(99_999 * sign));
- }
-
- foreach (F; _TypeTuple!(const FracSec, immutable FracSec))
- {
- F fs = FracSec(1234567);
- static assert(!__traits(compiles, fs.nsecs = 12), F.stringof);
- }
- }
-
-
- /+
- Converts this $(D TickDuration) to a string.
- +/
- //Due to bug http://d.puremagic.com/issues/show_bug.cgi?id=3715 , we can't
- //have versions of toString() with extra modifiers, so we define one version
- //with modifiers and one without.
- string toString()
- {
- return _toStringImpl();
- }
-
-
- /++
- Converts this $(D TickDuration) to a string.
- +/
- //Due to bug http://d.puremagic.com/issues/show_bug.cgi?id=3715 , we can't
- //have versions of toString() with extra modifiers, so we define one version
- //with modifiers and one without.
- string toString() const nothrow
- {
- return _toStringImpl();
- }
-
- unittest
- {
- auto fs = FracSec(12);
- const cfs = FracSec(12);
- immutable ifs = FracSec(12);
- assert(fs.toString() == "12 hnsecs");
- assert(cfs.toString() == "12 hnsecs");
- assert(ifs.toString() == "12 hnsecs");
- }
-
-
-private:
-
- /+
- Since we have two versions of $(D toString), we have $(D _toStringImpl)
- so that they can share implementations.
- +/
- string _toStringImpl() const nothrow
- {
- long hnsecs = _hnsecs;
-
- immutable milliseconds = splitUnitsFromHNSecs!"msecs"(hnsecs);
- immutable microseconds = splitUnitsFromHNSecs!"usecs"(hnsecs);
-
- if (hnsecs == 0)
- {
- if (microseconds == 0)
- {
- if (milliseconds == 0)
- return "0 hnsecs";
- else
- {
- if (milliseconds == 1)
- return "1 ms";
- else
- {
- auto r = signedToTempString(milliseconds, 10).idup;
- r ~= " ms";
- return r;
- }
- }
- }
- else
- {
- immutable fullMicroseconds = getUnitsFromHNSecs!"usecs"(_hnsecs);
-
- if (fullMicroseconds == 1)
- return "1 μs";
- else
- {
- auto r = signedToTempString(fullMicroseconds, 10).idup;
- r ~= " μs";
- return r;
- }
- }
- }
- else
- {
- if (_hnsecs == 1)
- return "1 hnsec";
- else
- {
- auto r = signedToTempString(_hnsecs, 10).idup;
- r ~= " hnsecs";
- return r;
- }
- }
- }
-
- unittest
- {
- foreach (sign; [1 , -1])
- {
- immutable signStr = sign == 1 ? "" : "-";
-
- assert(FracSec.from!"msecs"(0 * sign).toString() == "0 hnsecs");
- assert(FracSec.from!"msecs"(1 * sign).toString() == signStr ~ "1 ms");
- assert(FracSec.from!"msecs"(2 * sign).toString() == signStr ~ "2 ms");
- assert(FracSec.from!"msecs"(100 * sign).toString() == signStr ~ "100 ms");
- assert(FracSec.from!"msecs"(999 * sign).toString() == signStr ~ "999 ms");
-
- assert(FracSec.from!"usecs"(0* sign).toString() == "0 hnsecs");
- assert(FracSec.from!"usecs"(1* sign).toString() == signStr ~ "1 μs");
- assert(FracSec.from!"usecs"(2* sign).toString() == signStr ~ "2 μs");
- assert(FracSec.from!"usecs"(100* sign).toString() == signStr ~ "100 μs");
- assert(FracSec.from!"usecs"(999* sign).toString() == signStr ~ "999 μs");
- assert(FracSec.from!"usecs"(1000* sign).toString() == signStr ~ "1 ms");
- assert(FracSec.from!"usecs"(2000* sign).toString() == signStr ~ "2 ms");
- assert(FracSec.from!"usecs"(9999* sign).toString() == signStr ~ "9999 μs");
- assert(FracSec.from!"usecs"(10_000* sign).toString() == signStr ~ "10 ms");
- assert(FracSec.from!"usecs"(20_000* sign).toString() == signStr ~ "20 ms");
- assert(FracSec.from!"usecs"(100_000* sign).toString() == signStr ~ "100 ms");
- assert(FracSec.from!"usecs"(100_001* sign).toString() == signStr ~ "100001 μs");
- assert(FracSec.from!"usecs"(999_999* sign).toString() == signStr ~ "999999 μs");
-
- assert(FracSec.from!"hnsecs"(0* sign).toString() == "0 hnsecs");
- assert(FracSec.from!"hnsecs"(1* sign).toString() == (sign == 1 ? "1 hnsec" : "-1 hnsecs"));
- assert(FracSec.from!"hnsecs"(2* sign).toString() == signStr ~ "2 hnsecs");
- assert(FracSec.from!"hnsecs"(100* sign).toString() == signStr ~ "10 μs");
- assert(FracSec.from!"hnsecs"(999* sign).toString() == signStr ~ "999 hnsecs");
- assert(FracSec.from!"hnsecs"(1000* sign).toString() == signStr ~ "100 μs");
- assert(FracSec.from!"hnsecs"(2000* sign).toString() == signStr ~ "200 μs");
- assert(FracSec.from!"hnsecs"(9999* sign).toString() == signStr ~ "9999 hnsecs");
- assert(FracSec.from!"hnsecs"(10_000* sign).toString() == signStr ~ "1 ms");
- assert(FracSec.from!"hnsecs"(20_000* sign).toString() == signStr ~ "2 ms");
- assert(FracSec.from!"hnsecs"(100_000* sign).toString() == signStr ~ "10 ms");
- assert(FracSec.from!"hnsecs"(100_001* sign).toString() == signStr ~ "100001 hnsecs");
- assert(FracSec.from!"hnsecs"(200_000* sign).toString() == signStr ~ "20 ms");
- assert(FracSec.from!"hnsecs"(999_999* sign).toString() == signStr ~ "999999 hnsecs");
- assert(FracSec.from!"hnsecs"(1_000_001* sign).toString() == signStr ~ "1000001 hnsecs");
- assert(FracSec.from!"hnsecs"(9_999_999* sign).toString() == signStr ~ "9999999 hnsecs");
- }
- }
-
-
- /+
- Returns whether the given number of hnsecs fits within the range of
- $(D FracSec).
-
- Params:
- hnsecs = The number of hnsecs.
- +/
- static bool _valid(int hnsecs) nothrow @nogc
- {
- immutable second = convert!("seconds", "hnsecs")(1);
- return hnsecs > -second && hnsecs < second;
- }
-
-
- /+
- Throws:
- $(D TimeException) if $(D valid(hnsecs)) is $(D false).
- +/
- static void _enforceValid(int hnsecs)
- {
- if (!_valid(hnsecs))
- throw new TimeException("FracSec must be greater than equal to 0 and less than 1 second.");
- }
-
-
- /+
- Params:
- hnsecs = The number of hnsecs passed the second.
- +/
- this(int hnsecs) nothrow @nogc
- {
- _hnsecs = hnsecs;
- }
-
-
- invariant()
- {
- if (!_valid(_hnsecs))
- throw new AssertError("Invariant Failure: hnsecs [" ~ signedToTempString(_hnsecs, 10).idup ~ "]", __FILE__, __LINE__);
- }
-
-
- int _hnsecs;
-}
-
-
/++
Exception type used by core.time.
+/
@@ -4318,7 +3694,6 @@ long splitUnitsFromHNSecs(string units)(ref long hnsecs) @safe pure nothrow @nog
return value;
}
-///
unittest
{
auto hnsecs = 2595000000007L;
@@ -4331,83 +3706,6 @@ unittest
assert(hnsecs == 7);
}
-
-/+
- This function is used to split out the units without getting the remaining
- hnsecs.
-
- See_Also:
- $(LREF splitUnitsFromHNSecs)
-
- Params:
- units = The units to split out.
- hnsecs = The current total hnsecs.
-
- Returns:
- The split out value.
- +/
-long getUnitsFromHNSecs(string units)(long hnsecs) @safe pure nothrow @nogc
- if (units == "weeks" ||
- units == "days" ||
- units == "hours" ||
- units == "minutes" ||
- units == "seconds" ||
- units == "msecs" ||
- units == "usecs" ||
- units == "hnsecs")
-{
- return convert!("hnsecs", units)(hnsecs);
-}
-
-///
-unittest
-{
- auto hnsecs = 2595000000007L;
- immutable days = getUnitsFromHNSecs!"days"(hnsecs);
- assert(days == 3);
- assert(hnsecs == 2595000000007L);
-}
-
-
-/+
- This function is used to split out the units without getting the units but
- just the remaining hnsecs.
-
- See_Also:
- $(LREF splitUnitsFromHNSecs)
-
- Params:
- units = The units to split out.
- hnsecs = The current total hnsecs.
-
- Returns:
- The remaining hnsecs.
- +/
-long removeUnitsFromHNSecs(string units)(long hnsecs) @safe pure nothrow @nogc
- if (units == "weeks" ||
- units == "days" ||
- units == "hours" ||
- units == "minutes" ||
- units == "seconds" ||
- units == "msecs" ||
- units == "usecs" ||
- units == "hnsecs")
-{
- immutable value = convert!("hnsecs", units)(hnsecs);
-
- return hnsecs - convert!(units, "hnsecs")(value);
-}
-
-///
-unittest
-{
- auto hnsecs = 2595000000007L;
- auto returned = removeUnitsFromHNSecs!"days"(hnsecs);
- assert(returned == 3000000007);
- assert(hnsecs == 2595000000007L);
-}
-
-
/+
Whether all of the given strings are among the accepted strings.
+/
@@ -4494,63 +3792,6 @@ unittest
assert(!unitsAreInDescendingOrder("days", "hours", "days"));
}
-
-/+
- The time units which are one step larger than the given units.
- +/
-template nextLargerTimeUnits(string units)
- if (units == "days" ||
- units == "hours" ||
- units == "minutes" ||
- units == "seconds" ||
- units == "msecs" ||
- units == "usecs" ||
- units == "hnsecs" ||
- units == "nsecs")
-{
- static if (units == "days")
- enum nextLargerTimeUnits = "weeks";
- else static if (units == "hours")
- enum nextLargerTimeUnits = "days";
- else static if (units == "minutes")
- enum nextLargerTimeUnits = "hours";
- else static if (units == "seconds")
- enum nextLargerTimeUnits = "minutes";
- else static if (units == "msecs")
- enum nextLargerTimeUnits = "seconds";
- else static if (units == "usecs")
- enum nextLargerTimeUnits = "msecs";
- else static if (units == "hnsecs")
- enum nextLargerTimeUnits = "usecs";
- else static if (units == "nsecs")
- enum nextLargerTimeUnits = "hnsecs";
- else
- static assert(0, "Broken template constraint");
-}
-
-///
-unittest
-{
- assert(nextLargerTimeUnits!"minutes" == "hours");
- assert(nextLargerTimeUnits!"hnsecs" == "usecs");
-}
-
-unittest
-{
- assert(nextLargerTimeUnits!"nsecs" == "hnsecs");
- assert(nextLargerTimeUnits!"hnsecs" == "usecs");
- assert(nextLargerTimeUnits!"usecs" == "msecs");
- assert(nextLargerTimeUnits!"msecs" == "seconds");
- assert(nextLargerTimeUnits!"seconds" == "minutes");
- assert(nextLargerTimeUnits!"minutes" == "hours");
- assert(nextLargerTimeUnits!"hours" == "days");
- assert(nextLargerTimeUnits!"days" == "weeks");
-
- static assert(!__traits(compiles, nextLargerTimeUnits!"weeks"));
- static assert(!__traits(compiles, nextLargerTimeUnits!"months"));
- static assert(!__traits(compiles, nextLargerTimeUnits!"years"));
-}
-
version (Darwin)
long machTicksPerSecond()
{
@@ -4581,16 +3822,16 @@ double _abs(double val) @safe pure nothrow @nogc
}
-version (unittest)
+version (CoreUnittest)
string doubleToString(double value) @safe pure nothrow
{
string result;
if (value < 0 && cast(long)value == 0)
result = "-0";
else
- result = signedToTempString(cast(long)value, 10).idup;
+ result = signedToTempString(cast(long)value).idup;
result ~= '.';
- result ~= unsignedToTempString(cast(ulong)(_abs((value - cast(long)value) * 1_000_000) + .5), 10);
+ result ~= unsignedToTempString(cast(ulong)(_abs((value - cast(long)value) * 1_000_000) + .5));
while (result[$-1] == '0')
result = result[0 .. $-1];
@@ -4612,21 +3853,17 @@ unittest
assert(aStr == "-0.337", aStr);
}
-version (unittest) const(char)* numToStringz()(long value) @trusted pure nothrow
+version (CoreUnittest) const(char)* numToStringz()(long value) @trusted pure nothrow
{
- return (signedToTempString(value, 10) ~ "\0").ptr;
+ return (signedToTempString(value) ~ "\0").ptr;
}
-/+ A copy of std.typecons.TypeTuple. +/
-template _TypeTuple(TList...)
-{
- alias TList _TypeTuple;
-}
+import core.internal.traits : AliasSeq;
/+ An adjusted copy of std.exception.assertThrown. +/
-version (unittest) void _assertThrown(T : Throwable = Exception, E)
+version (CoreUnittest) void _assertThrown(T : Throwable = Exception, E)
(lazy E expression,
string msg = null,
string file = __FILE__,
@@ -4721,7 +3958,7 @@ unittest
}
-version (unittest) void assertApprox(D, E)(D actual,
+version (CoreUnittest) void assertApprox(D, E)(D actual,
E lower,
E upper,
string msg = "unittest failure",
@@ -4734,7 +3971,7 @@ version (unittest) void assertApprox(D, E)(D actual,
throw new AssertError(msg ~ ": upper: " ~ actual.toString(), __FILE__, line);
}
-version (unittest) void assertApprox(D, E)(D actual,
+version (CoreUnittest) void assertApprox(D, E)(D actual,
E lower,
E upper,
string msg = "unittest failure",
@@ -4743,14 +3980,14 @@ version (unittest) void assertApprox(D, E)(D actual,
{
if (actual.length < lower.length || actual.length > upper.length)
{
- throw new AssertError(msg ~ (": [" ~ signedToTempString(lower.length, 10) ~ "] [" ~
- signedToTempString(actual.length, 10) ~ "] [" ~
- signedToTempString(upper.length, 10) ~ "]").idup,
+ throw new AssertError(msg ~ (": [" ~ signedToTempString(lower.length) ~ "] [" ~
+ signedToTempString(actual.length) ~ "] [" ~
+ signedToTempString(upper.length) ~ "]").idup,
__FILE__, line);
}
}
-version (unittest) void assertApprox(MT)(MT actual,
+version (CoreUnittest) void assertApprox(MT)(MT actual,
MT lower,
MT upper,
string msg = "unittest failure",
@@ -4760,14 +3997,14 @@ version (unittest) void assertApprox(MT)(MT actual,
assertApprox(actual._ticks, lower._ticks, upper._ticks, msg, line);
}
-version (unittest) void assertApprox()(long actual,
+version (CoreUnittest) void assertApprox()(long actual,
long lower,
long upper,
string msg = "unittest failure",
size_t line = __LINE__)
{
if (actual < lower)
- throw new AssertError(msg ~ ": lower: " ~ signedToTempString(actual, 10).idup, __FILE__, line);
+ throw new AssertError(msg ~ ": lower: " ~ signedToTempString(actual).idup, __FILE__, line);
if (actual > upper)
- throw new AssertError(msg ~ ": upper: " ~ signedToTempString(actual, 10).idup, __FILE__, line);
+ throw new AssertError(msg ~ ": upper: " ~ signedToTempString(actual).idup, __FILE__, line);
}
diff --git a/libphobos/libdruntime/gc/bits.d b/libphobos/libdruntime/gc/bits.d
deleted file mode 100644
index 60ba4f4..0000000
--- a/libphobos/libdruntime/gc/bits.d
+++ /dev/null
@@ -1,129 +0,0 @@
-/**
- * Contains a bitfield used by the GC.
- *
- * Copyright: Copyright Digital Mars 2005 - 2013.
- * License: $(WEB www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
- * Authors: Walter Bright, David Friedman, Sean Kelly
- */
-
-/* Copyright Digital Mars 2005 - 2013.
- * Distributed under the Boost Software License, Version 1.0.
- * (See accompanying file LICENSE or copy at
- * http://www.boost.org/LICENSE_1_0.txt)
- */
-module gc.bits;
-
-
-import core.bitop;
-import core.stdc.string;
-import core.stdc.stdlib;
-import core.exception : onOutOfMemoryError;
-
-struct GCBits
-{
- alias size_t wordtype;
-
- enum BITS_PER_WORD = (wordtype.sizeof * 8);
- enum BITS_SHIFT = (wordtype.sizeof == 8 ? 6 : 5);
- enum BITS_MASK = (BITS_PER_WORD - 1);
- enum BITS_1 = cast(wordtype)1;
-
- wordtype* data;
- size_t nbits;
-
- void Dtor() nothrow
- {
- if (data)
- {
- free(data);
- data = null;
- }
- }
-
- void alloc(size_t nbits) nothrow
- {
- this.nbits = nbits;
- data = cast(typeof(data[0])*)calloc(nwords, data[0].sizeof);
- if (!data)
- onOutOfMemoryError();
- }
-
- wordtype test(size_t i) const nothrow
- in
- {
- assert(i < nbits);
- }
- body
- {
- return core.bitop.bt(data, i);
- }
-
- int set(size_t i) nothrow
- in
- {
- assert(i < nbits);
- }
- body
- {
- return core.bitop.bts(data, i);
- }
-
- int clear(size_t i) nothrow
- in
- {
- assert(i <= nbits);
- }
- body
- {
- return core.bitop.btr(data, i);
- }
-
- void zero() nothrow
- {
- memset(data, 0, nwords * wordtype.sizeof);
- }
-
- void copy(GCBits *f) nothrow
- in
- {
- assert(nwords == f.nwords);
- }
- body
- {
- memcpy(data, f.data, nwords * wordtype.sizeof);
- }
-
- @property size_t nwords() const pure nothrow
- {
- return (nbits + (BITS_PER_WORD - 1)) >> BITS_SHIFT;
- }
-}
-
-unittest
-{
- GCBits b;
-
- b.alloc(786);
- assert(!b.test(123));
- assert(!b.clear(123));
- assert(!b.set(123));
- assert(b.test(123));
- assert(b.clear(123));
- assert(!b.test(123));
-
- b.set(785);
- b.set(0);
- assert(b.test(785));
- assert(b.test(0));
- b.zero();
- assert(!b.test(785));
- assert(!b.test(0));
-
- GCBits b2;
- b2.alloc(786);
- b2.set(38);
- b.copy(&b2);
- assert(b.test(38));
- b2.Dtor();
- b.Dtor();
-}
diff --git a/libphobos/libdruntime/gc/config.d b/libphobos/libdruntime/gc/config.d
deleted file mode 100644
index b0789cd..0000000
--- a/libphobos/libdruntime/gc/config.d
+++ /dev/null
@@ -1,291 +0,0 @@
-/**
-* Contains the garbage collector configuration.
-*
-* Copyright: Copyright Digital Mars 2016
-* License: $(WEB www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
-*/
-
-module gc.config;
-
-import core.stdc.stdlib;
-import core.stdc.stdio;
-import core.stdc.ctype;
-import core.stdc.string;
-import core.vararg;
-
-nothrow @nogc:
-extern extern(C) string[] rt_args();
-
-extern extern(C) __gshared bool rt_envvars_enabled;
-extern extern(C) __gshared bool rt_cmdline_enabled;
-extern extern(C) __gshared string[] rt_options;
-
-__gshared Config config;
-
-struct Config
-{
- bool disable; // start disabled
- ubyte profile; // enable profiling with summary when terminating program
- string gc = "conservative"; // select gc implementation conservative|manual
-
- size_t initReserve; // initial reserve (MB)
- size_t minPoolSize = 1; // initial and minimum pool size (MB)
- size_t maxPoolSize = 64; // maximum pool size (MB)
- size_t incPoolSize = 3; // pool size increment (MB)
- float heapSizeFactor = 2.0; // heap size to used memory ratio
-
-@nogc nothrow:
-
- bool initialize()
- {
- import core.internal.traits : externDFunc;
-
- alias rt_configCallBack = string delegate(string) @nogc nothrow;
- alias fn_configOption = string function(string opt, scope rt_configCallBack dg, bool reverse) @nogc nothrow;
-
- alias rt_configOption = externDFunc!("rt.config.rt_configOption", fn_configOption);
-
- string parse(string opt) @nogc nothrow
- {
- if (!parseOptions(opt))
- return "err";
- return null; // continue processing
- }
- string s = rt_configOption("gcopt", &parse, true);
- return s is null;
- }
-
- void help()
- {
- version (unittest) if (inUnittest) return;
-
- string s = "GC options are specified as white space separated assignments:
- disable:0|1 - start disabled (%d)
- profile:0|1|2 - enable profiling with summary when terminating program (%d)
- gc:conservative|manual - select gc implementation (default = conservative)
-
- initReserve:N - initial memory to reserve in MB (%lld)
- minPoolSize:N - initial and minimum pool size in MB (%lld)
- maxPoolSize:N - maximum pool size in MB (%lld)
- incPoolSize:N - pool size increment MB (%lld)
- heapSizeFactor:N - targeted heap size to used memory ratio (%g)
-";
- printf(s.ptr, disable, profile, cast(long)initReserve, cast(long)minPoolSize,
- cast(long)maxPoolSize, cast(long)incPoolSize, heapSizeFactor);
- }
-
- bool parseOptions(string opt)
- {
- opt = skip!isspace(opt);
- while (opt.length)
- {
- auto tail = find!(c => c == ':' || c == '=' || c == ' ')(opt);
- auto name = opt[0 .. $ - tail.length];
- if (name == "help")
- {
- help();
- opt = skip!isspace(tail);
- continue;
- }
- if (tail.length <= 1 || tail[0] == ' ')
- return optError("Missing argument for", name);
- tail = tail[1 .. $];
-
- switch (name)
- {
- foreach (field; __traits(allMembers, Config))
- {
- static if (!is(typeof(__traits(getMember, this, field)) == function))
- {
- case field:
- if (!parse(name, tail, __traits(getMember, this, field)))
- return false;
- break;
- }
- }
- break;
-
- default:
- return optError("Unknown", name);
- }
- opt = skip!isspace(tail);
- }
- return true;
- }
-}
-
-private:
-
-bool optError(in char[] msg, in char[] name)
-{
- version (unittest) if (inUnittest) return false;
-
- fprintf(stderr, "%.*s GC option '%.*s'.\n",
- cast(int)msg.length, msg.ptr,
- cast(int)name.length, name.ptr);
- return false;
-}
-
-inout(char)[] skip(alias pred)(inout(char)[] str)
-{
- return find!(c => !pred(c))(str);
-}
-
-inout(char)[] find(alias pred)(inout(char)[] str)
-{
- foreach (i; 0 .. str.length)
- if (pred(str[i])) return str[i .. $];
- return null;
-}
-
-bool parse(T:size_t)(const(char)[] optname, ref inout(char)[] str, ref T res)
-in { assert(str.length); }
-body
-{
- size_t i, v;
- for (; i < str.length && isdigit(str[i]); ++i)
- v = 10 * v + str[i] - '0';
-
- if (!i)
- return parseError("a number", optname, str);
- if (v > res.max)
- return parseError("a number " ~ T.max.stringof ~ " or below", optname, str[0 .. i]);
- str = str[i .. $];
- res = cast(T) v;
- return true;
-}
-
-bool parse(const(char)[] optname, ref inout(char)[] str, ref bool res)
-in { assert(str.length); }
-body
-{
- if (str[0] == '1' || str[0] == 'y' || str[0] == 'Y')
- res = true;
- else if (str[0] == '0' || str[0] == 'n' || str[0] == 'N')
- res = false;
- else
- return parseError("'0/n/N' or '1/y/Y'", optname, str);
- str = str[1 .. $];
- return true;
-}
-
-bool parse(const(char)[] optname, ref inout(char)[] str, ref float res)
-in { assert(str.length); }
-body
-{
- // % uint f %n \0
- char[1 + 10 + 1 + 2 + 1] fmt=void;
- // specify max-width
- immutable n = snprintf(fmt.ptr, fmt.length, "%%%uf%%n", cast(uint)str.length);
- assert(n > 4 && n < fmt.length);
-
- int nscanned;
- version (CRuntime_DigitalMars)
- {
- /* Older sscanf's in snn.lib can write to its first argument, causing a crash
- * if the string is in readonly memory. Recent updates to DMD
- * https://github.com/dlang/dmd/pull/6546
- * put string literals in readonly memory.
- * Although sscanf has been fixed,
- * http://ftp.digitalmars.com/snn.lib
- * this workaround is here so it still works with the older snn.lib.
- */
- // Create mutable copy of str
- const length = str.length;
- char* mptr = cast(char*)malloc(length + 1);
- assert(mptr);
- memcpy(mptr, str.ptr, length);
- mptr[length] = 0;
- const result = sscanf(mptr, fmt.ptr, &res, &nscanned);
- free(mptr);
- if (result < 1)
- return parseError("a float", optname, str);
- }
- else
- {
- if (sscanf(str.ptr, fmt.ptr, &res, &nscanned) < 1)
- return parseError("a float", optname, str);
- }
- str = str[nscanned .. $];
- return true;
-}
-
-bool parse(const(char)[] optname, ref inout(char)[] str, ref inout(char)[] res)
-in { assert(str.length); }
-body
-{
- auto tail = str.find!(c => c == ':' || c == '=' || c == ' ');
- res = str[0 .. $ - tail.length];
- if (!res.length)
- return parseError("an identifier", optname, str);
- str = tail;
- return true;
-}
-
-bool parseError(in char[] exp, in char[] opt, in char[] got)
-{
- version (unittest) if (inUnittest) return false;
-
- fprintf(stderr, "Expecting %.*s as argument for GC option '%.*s', got '%.*s' instead.\n",
- cast(int)exp.length, exp.ptr,
- cast(int)opt.length, opt.ptr,
- cast(int)got.length, got.ptr);
- return false;
-}
-
-size_t min(size_t a, size_t b) { return a <= b ? a : b; }
-
-version (unittest) __gshared bool inUnittest;
-
-unittest
-{
- inUnittest = true;
- scope (exit) inUnittest = false;
-
- Config conf;
- assert(!conf.parseOptions("disable"));
- assert(!conf.parseOptions("disable:"));
- assert(!conf.parseOptions("disable:5"));
- assert(conf.parseOptions("disable:y") && conf.disable);
- assert(conf.parseOptions("disable:n") && !conf.disable);
- assert(conf.parseOptions("disable:Y") && conf.disable);
- assert(conf.parseOptions("disable:N") && !conf.disable);
- assert(conf.parseOptions("disable:1") && conf.disable);
- assert(conf.parseOptions("disable:0") && !conf.disable);
-
- assert(conf.parseOptions("disable=y") && conf.disable);
- assert(conf.parseOptions("disable=n") && !conf.disable);
-
- assert(conf.parseOptions("profile=0") && conf.profile == 0);
- assert(conf.parseOptions("profile=1") && conf.profile == 1);
- assert(conf.parseOptions("profile=2") && conf.profile == 2);
- assert(!conf.parseOptions("profile=256"));
-
- assert(conf.parseOptions("disable:1 minPoolSize:16"));
- assert(conf.disable);
- assert(conf.minPoolSize == 16);
-
- assert(conf.parseOptions("heapSizeFactor:3.1"));
- assert(conf.heapSizeFactor == 3.1f);
- assert(conf.parseOptions("heapSizeFactor:3.1234567890 disable:0"));
- assert(conf.heapSizeFactor > 3.123f);
- assert(!conf.disable);
- assert(!conf.parseOptions("heapSizeFactor:3.0.2.5"));
- assert(conf.parseOptions("heapSizeFactor:2"));
- assert(conf.heapSizeFactor == 2.0f);
-
- assert(!conf.parseOptions("initReserve:foo"));
- assert(!conf.parseOptions("initReserve:y"));
- assert(!conf.parseOptions("initReserve:20.5"));
-
- assert(conf.parseOptions("help"));
- assert(conf.parseOptions("help profile:1"));
- assert(conf.parseOptions("help profile:1 help"));
-
- assert(conf.parseOptions("gc:manual") && conf.gc == "manual");
- assert(conf.parseOptions("gc:my-gc~modified") && conf.gc == "my-gc~modified");
- assert(conf.parseOptions("gc:conservative help profile:1") && conf.gc == "conservative" && conf.profile == 1);
-
- // the config parse doesn't know all available GC names, so should accept unknown ones
- assert(conf.parseOptions("gc:whatever"));
-}
diff --git a/libphobos/libdruntime/gc/impl/conservative/gc.d b/libphobos/libdruntime/gc/impl/conservative/gc.d
deleted file mode 100644
index 300a32a..0000000
--- a/libphobos/libdruntime/gc/impl/conservative/gc.d
+++ /dev/null
@@ -1,3413 +0,0 @@
-/**
- * Contains the garbage collector implementation.
- *
- * Copyright: Copyright Digital Mars 2001 - 2016.
- * License: $(WEB www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
- * Authors: Walter Bright, David Friedman, Sean Kelly
- */
-
-/* Copyright Digital Mars 2005 - 2016.
- * Distributed under the Boost Software License, Version 1.0.
- * (See accompanying file LICENSE or copy at
- * http://www.boost.org/LICENSE_1_0.txt)
- */
-module gc.impl.conservative.gc;
-
-// D Programming Language Garbage Collector implementation
-
-/************** Debugging ***************************/
-
-//debug = PRINTF; // turn on printf's
-//debug = COLLECT_PRINTF; // turn on printf's
-//debug = PRINTF_TO_FILE; // redirect printf's ouptut to file "gcx.log"
-//debug = LOGGING; // log allocations / frees
-//debug = MEMSTOMP; // stomp on memory
-//debug = SENTINEL; // add underrun/overrrun protection
- // NOTE: this needs to be enabled globally in the makefiles
- // (-debug=SENTINEL) to pass druntime's unittests.
-//debug = PTRCHECK; // more pointer checking
-//debug = PTRCHECK2; // thorough but slow pointer checking
-//debug = INVARIANT; // enable invariants
-//debug = PROFILE_API; // profile API calls for config.profile > 1
-
-/***************************************************/
-
-import gc.bits;
-import gc.os;
-import gc.config;
-import gc.gcinterface;
-
-import rt.util.container.treap;
-
-import cstdlib = core.stdc.stdlib : calloc, free, malloc, realloc;
-import core.stdc.string : memcpy, memset, memmove;
-import core.bitop;
-import core.thread;
-static import core.memory;
-
-version (GNU) import gcc.builtins;
-
-debug (PRINTF_TO_FILE) import core.stdc.stdio : sprintf, fprintf, fopen, fflush, FILE;
-else import core.stdc.stdio : sprintf, printf; // needed to output profiling results
-
-import core.time;
-alias currTime = MonoTime.currTime;
-
-debug(PRINTF_TO_FILE)
-{
- private __gshared MonoTime gcStartTick;
- private __gshared FILE* gcx_fh;
-
- private int printf(ARGS...)(const char* fmt, ARGS args) nothrow
- {
- if (!gcx_fh)
- gcx_fh = fopen("gcx.log", "w");
- if (!gcx_fh)
- return 0;
-
- int len;
- if (MonoTime.ticksPerSecond == 0)
- {
- len = fprintf(gcx_fh, "before init: ");
- }
- else
- {
- if (gcStartTick == MonoTime.init)
- gcStartTick = MonoTime.currTime;
- immutable timeElapsed = MonoTime.currTime - gcStartTick;
- immutable secondsAsDouble = timeElapsed.total!"hnsecs" / cast(double)convert!("seconds", "hnsecs")(1);
- len = fprintf(gcx_fh, "%10.6f: ", secondsAsDouble);
- }
- len += fprintf(gcx_fh, fmt, args);
- fflush(gcx_fh);
- return len;
- }
-}
-
-debug(PRINTF) void printFreeInfo(Pool* pool) nothrow
-{
- uint nReallyFree;
- foreach (i; 0..pool.npages) {
- if (pool.pagetable[i] >= B_FREE) nReallyFree++;
- }
-
- printf("Pool %p: %d really free, %d supposedly free\n", pool, nReallyFree, pool.freepages);
-}
-
-// Track total time spent preparing for GC,
-// marking, sweeping and recovering pages.
-__gshared Duration prepTime;
-__gshared Duration markTime;
-__gshared Duration sweepTime;
-__gshared Duration recoverTime;
-__gshared Duration maxPauseTime;
-__gshared size_t numCollections;
-__gshared size_t maxPoolMemory;
-
-__gshared long numMallocs;
-__gshared long numFrees;
-__gshared long numReallocs;
-__gshared long numExtends;
-__gshared long numOthers;
-__gshared long mallocTime; // using ticks instead of MonoTime for better performance
-__gshared long freeTime;
-__gshared long reallocTime;
-__gshared long extendTime;
-__gshared long otherTime;
-__gshared long lockTime;
-
-private
-{
- extern (C)
- {
- // to allow compilation of this module without access to the rt package,
- // make these functions available from rt.lifetime
- void rt_finalizeFromGC(void* p, size_t size, uint attr) nothrow;
- int rt_hasFinalizerInSegment(void* p, size_t size, uint attr, in void[] segment) nothrow;
-
- // Declared as an extern instead of importing core.exception
- // to avoid inlining - see issue 13725.
- void onInvalidMemoryOperationError() @nogc nothrow;
- void onOutOfMemoryErrorNoGC() @nogc nothrow;
- }
-
- enum
- {
- OPFAIL = ~cast(size_t)0
- }
-}
-
-
-alias GC gc_t;
-
-
-/* ======================= Leak Detector =========================== */
-
-
-debug (LOGGING)
-{
- struct Log
- {
- void* p;
- size_t size;
- size_t line;
- char* file;
- void* parent;
-
- void print() nothrow
- {
- printf(" p = %p, size = %zd, parent = %p ", p, size, parent);
- if (file)
- {
- printf("%s(%u)", file, cast(uint)line);
- }
- printf("\n");
- }
- }
-
-
- struct LogArray
- {
- size_t dim;
- size_t allocdim;
- Log *data;
-
- void Dtor() nothrow
- {
- if (data)
- cstdlib.free(data);
- data = null;
- }
-
- void reserve(size_t nentries) nothrow
- {
- assert(dim <= allocdim);
- if (allocdim - dim < nentries)
- {
- allocdim = (dim + nentries) * 2;
- assert(dim + nentries <= allocdim);
- if (!data)
- {
- data = cast(Log*)cstdlib.malloc(allocdim * Log.sizeof);
- if (!data && allocdim)
- onOutOfMemoryErrorNoGC();
- }
- else
- { Log *newdata;
-
- newdata = cast(Log*)cstdlib.malloc(allocdim * Log.sizeof);
- if (!newdata && allocdim)
- onOutOfMemoryErrorNoGC();
- memcpy(newdata, data, dim * Log.sizeof);
- cstdlib.free(data);
- data = newdata;
- }
- }
- }
-
-
- void push(Log log) nothrow
- {
- reserve(1);
- data[dim++] = log;
- }
-
- void remove(size_t i) nothrow
- {
- memmove(data + i, data + i + 1, (dim - i) * Log.sizeof);
- dim--;
- }
-
-
- size_t find(void *p) nothrow
- {
- for (size_t i = 0; i < dim; i++)
- {
- if (data[i].p == p)
- return i;
- }
- return OPFAIL; // not found
- }
-
-
- void copy(LogArray *from) nothrow
- {
- reserve(from.dim - dim);
- assert(from.dim <= allocdim);
- memcpy(data, from.data, from.dim * Log.sizeof);
- dim = from.dim;
- }
- }
-}
-
-
-/* ============================ GC =============================== */
-
-class ConservativeGC : GC
-{
- // For passing to debug code (not thread safe)
- __gshared size_t line;
- __gshared char* file;
-
- Gcx *gcx; // implementation
-
- import core.internal.spinlock;
- static gcLock = shared(AlignedSpinLock)(SpinLock.Contention.lengthy);
- static bool _inFinalizer;
-
- // lock GC, throw InvalidMemoryOperationError on recursive locking during finalization
- static void lockNR() @nogc nothrow
- {
- if (_inFinalizer)
- onInvalidMemoryOperationError();
- gcLock.lock();
- }
-
-
- static void initialize(ref GC gc)
- {
- import core.stdc.string: memcpy;
-
- if (config.gc != "conservative")
- return;
-
- auto p = cstdlib.malloc(__traits(classInstanceSize,ConservativeGC));
-
- if (!p)
- onOutOfMemoryErrorNoGC();
-
- auto init = typeid(ConservativeGC).initializer();
- assert(init.length == __traits(classInstanceSize, ConservativeGC));
- auto instance = cast(ConservativeGC) memcpy(p, init.ptr, init.length);
- instance.__ctor();
-
- gc = instance;
- }
-
-
- static void finalize(ref GC gc)
- {
- if (config.gc != "conservative")
- return;
-
- auto instance = cast(ConservativeGC) gc;
- instance.Dtor();
- cstdlib.free(cast(void*)instance);
- }
-
-
- this()
- {
- //config is assumed to have already been initialized
-
- gcx = cast(Gcx*)cstdlib.calloc(1, Gcx.sizeof);
- if (!gcx)
- onOutOfMemoryErrorNoGC();
- gcx.initialize();
-
- if (config.initReserve)
- gcx.reserve(config.initReserve << 20);
- if (config.disable)
- gcx.disabled++;
- }
-
-
- void Dtor()
- {
- version (linux)
- {
- //debug(PRINTF) printf("Thread %x ", pthread_self());
- //debug(PRINTF) printf("GC.Dtor()\n");
- }
-
- if (gcx)
- {
- gcx.Dtor();
- cstdlib.free(gcx);
- gcx = null;
- }
- }
-
-
- void enable()
- {
- static void go(Gcx* gcx) nothrow
- {
- assert(gcx.disabled > 0);
- gcx.disabled--;
- }
- runLocked!(go, otherTime, numOthers)(gcx);
- }
-
-
- void disable()
- {
- static void go(Gcx* gcx) nothrow
- {
- gcx.disabled++;
- }
- runLocked!(go, otherTime, numOthers)(gcx);
- }
-
-
- auto runLocked(alias func, Args...)(auto ref Args args)
- {
- debug(PROFILE_API) immutable tm = (config.profile > 1 ? currTime.ticks : 0);
- lockNR();
- scope (failure) gcLock.unlock();
- debug(PROFILE_API) immutable tm2 = (config.profile > 1 ? currTime.ticks : 0);
-
- static if (is(typeof(func(args)) == void))
- func(args);
- else
- auto res = func(args);
-
- debug(PROFILE_API) if (config.profile > 1)
- lockTime += tm2 - tm;
- gcLock.unlock();
-
- static if (!is(typeof(func(args)) == void))
- return res;
- }
-
-
- auto runLocked(alias func, alias time, alias count, Args...)(auto ref Args args)
- {
- debug(PROFILE_API) immutable tm = (config.profile > 1 ? currTime.ticks : 0);
- lockNR();
- scope (failure) gcLock.unlock();
- debug(PROFILE_API) immutable tm2 = (config.profile > 1 ? currTime.ticks : 0);
-
- static if (is(typeof(func(args)) == void))
- func(args);
- else
- auto res = func(args);
-
- debug(PROFILE_API) if (config.profile > 1)
- {
- count++;
- immutable now = currTime.ticks;
- lockTime += tm2 - tm;
- time += now - tm2;
- }
- gcLock.unlock();
-
- static if (!is(typeof(func(args)) == void))
- return res;
- }
-
-
- uint getAttr(void* p) nothrow
- {
- if (!p)
- {
- return 0;
- }
-
- static uint go(Gcx* gcx, void* p) nothrow
- {
- Pool* pool = gcx.findPool(p);
- uint oldb = 0;
-
- if (pool)
- {
- p = sentinel_sub(p);
- auto biti = cast(size_t)(p - pool.baseAddr) >> pool.shiftBy;
-
- oldb = pool.getBits(biti);
- }
- return oldb;
- }
-
- return runLocked!(go, otherTime, numOthers)(gcx, p);
- }
-
-
- uint setAttr(void* p, uint mask) nothrow
- {
- if (!p)
- {
- return 0;
- }
-
- static uint go(Gcx* gcx, void* p, uint mask) nothrow
- {
- Pool* pool = gcx.findPool(p);
- uint oldb = 0;
-
- if (pool)
- {
- p = sentinel_sub(p);
- auto biti = cast(size_t)(p - pool.baseAddr) >> pool.shiftBy;
-
- oldb = pool.getBits(biti);
- pool.setBits(biti, mask);
- }
- return oldb;
- }
-
- return runLocked!(go, otherTime, numOthers)(gcx, p, mask);
- }
-
-
- uint clrAttr(void* p, uint mask) nothrow
- {
- if (!p)
- {
- return 0;
- }
-
- static uint go(Gcx* gcx, void* p, uint mask) nothrow
- {
- Pool* pool = gcx.findPool(p);
- uint oldb = 0;
-
- if (pool)
- {
- p = sentinel_sub(p);
- auto biti = cast(size_t)(p - pool.baseAddr) >> pool.shiftBy;
-
- oldb = pool.getBits(biti);
- pool.clrBits(biti, mask);
- }
- return oldb;
- }
-
- return runLocked!(go, otherTime, numOthers)(gcx, p, mask);
- }
-
-
- void *malloc(size_t size, uint bits, const TypeInfo ti) nothrow
- {
- if (!size)
- {
- return null;
- }
-
- size_t localAllocSize = void;
-
- auto p = runLocked!(mallocNoSync, mallocTime, numMallocs)(size, bits, localAllocSize, ti);
-
- if (!(bits & BlkAttr.NO_SCAN))
- {
- memset(p + size, 0, localAllocSize - size);
- }
-
- return p;
- }
-
-
- //
- //
- //
- private void *mallocNoSync(size_t size, uint bits, ref size_t alloc_size, const TypeInfo ti = null) nothrow
- {
- assert(size != 0);
-
- //debug(PRINTF) printf("GC::malloc(size = %d, gcx = %p)\n", size, gcx);
- assert(gcx);
- //debug(PRINTF) printf("gcx.self = %x, pthread_self() = %x\n", gcx.self, pthread_self());
-
- auto p = gcx.alloc(size + SENTINEL_EXTRA, alloc_size, bits);
- if (!p)
- onOutOfMemoryErrorNoGC();
-
- debug (SENTINEL)
- {
- p = sentinel_add(p);
- sentinel_init(p, size);
- alloc_size = size;
- }
- gcx.log_malloc(p, size);
-
- return p;
- }
-
-
- BlkInfo qalloc( size_t size, uint bits, const TypeInfo ti) nothrow
- {
-
- if (!size)
- {
- return BlkInfo.init;
- }
-
- BlkInfo retval;
-
- retval.base = runLocked!(mallocNoSync, mallocTime, numMallocs)(size, bits, retval.size, ti);
-
- if (!(bits & BlkAttr.NO_SCAN))
- {
- memset(retval.base + size, 0, retval.size - size);
- }
-
- retval.attr = bits;
- return retval;
- }
-
-
- void *calloc(size_t size, uint bits, const TypeInfo ti) nothrow
- {
- if (!size)
- {
- return null;
- }
-
- size_t localAllocSize = void;
-
- auto p = runLocked!(mallocNoSync, mallocTime, numMallocs)(size, bits, localAllocSize, ti);
-
- memset(p, 0, size);
- if (!(bits & BlkAttr.NO_SCAN))
- {
- memset(p + size, 0, localAllocSize - size);
- }
-
- return p;
- }
-
-
- void *realloc(void *p, size_t size, uint bits, const TypeInfo ti) nothrow
- {
- size_t localAllocSize = void;
- auto oldp = p;
-
- p = runLocked!(reallocNoSync, mallocTime, numMallocs)(p, size, bits, localAllocSize, ti);
-
- if (p !is oldp && !(bits & BlkAttr.NO_SCAN))
- {
- memset(p + size, 0, localAllocSize - size);
- }
-
- return p;
- }
-
-
- //
- // bits will be set to the resulting bits of the new block
- //
- private void *reallocNoSync(void *p, size_t size, ref uint bits, ref size_t alloc_size, const TypeInfo ti = null) nothrow
- {
- if (!size)
- { if (p)
- { freeNoSync(p);
- p = null;
- }
- alloc_size = 0;
- }
- else if (!p)
- {
- p = mallocNoSync(size, bits, alloc_size, ti);
- }
- else
- { void *p2;
- size_t psize;
-
- //debug(PRINTF) printf("GC::realloc(p = %p, size = %zu)\n", p, size);
- debug (SENTINEL)
- {
- sentinel_Invariant(p);
- psize = *sentinel_size(p);
- if (psize != size)
- {
- if (psize)
- {
- Pool *pool = gcx.findPool(p);
-
- if (pool)
- {
- auto biti = cast(size_t)(sentinel_sub(p) - pool.baseAddr) >> pool.shiftBy;
-
- if (bits)
- {
- pool.clrBits(biti, ~BlkAttr.NONE);
- pool.setBits(biti, bits);
- }
- else
- {
- bits = pool.getBits(biti);
- }
- }
- }
- p2 = mallocNoSync(size, bits, alloc_size, ti);
- if (psize < size)
- size = psize;
- //debug(PRINTF) printf("\tcopying %d bytes\n",size);
- memcpy(p2, p, size);
- p = p2;
- }
- }
- else
- {
- auto pool = gcx.findPool(p);
- if (pool.isLargeObject)
- {
- auto lpool = cast(LargeObjectPool*) pool;
- psize = lpool.getSize(p); // get allocated size
-
- if (size <= PAGESIZE / 2)
- goto Lmalloc; // switching from large object pool to small object pool
-
- auto psz = psize / PAGESIZE;
- auto newsz = (size + PAGESIZE - 1) / PAGESIZE;
- if (newsz == psz)
- {
- alloc_size = psize;
- return p;
- }
-
- auto pagenum = lpool.pagenumOf(p);
-
- if (newsz < psz)
- { // Shrink in place
- debug (MEMSTOMP) memset(p + size, 0xF2, psize - size);
- lpool.freePages(pagenum + newsz, psz - newsz);
- }
- else if (pagenum + newsz <= pool.npages)
- { // Attempt to expand in place
- foreach (binsz; lpool.pagetable[pagenum + psz .. pagenum + newsz])
- if (binsz != B_FREE)
- goto Lmalloc;
-
- debug (MEMSTOMP) memset(p + psize, 0xF0, size - psize);
- debug(PRINTF) printFreeInfo(pool);
- memset(&lpool.pagetable[pagenum + psz], B_PAGEPLUS, newsz - psz);
- gcx.usedLargePages += newsz - psz;
- lpool.freepages -= (newsz - psz);
- debug(PRINTF) printFreeInfo(pool);
- }
- else
- goto Lmalloc; // does not fit into current pool
-
- lpool.updateOffsets(pagenum);
- if (bits)
- {
- immutable biti = cast(size_t)(p - pool.baseAddr) >> pool.shiftBy;
- pool.clrBits(biti, ~BlkAttr.NONE);
- pool.setBits(biti, bits);
- }
- alloc_size = newsz * PAGESIZE;
- return p;
- }
-
- psize = (cast(SmallObjectPool*) pool).getSize(p); // get allocated size
- if (psize < size || // if new size is bigger
- psize > size * 2) // or less than half
- {
- Lmalloc:
- if (psize && pool)
- {
- auto biti = cast(size_t)(p - pool.baseAddr) >> pool.shiftBy;
-
- if (bits)
- {
- pool.clrBits(biti, ~BlkAttr.NONE);
- pool.setBits(biti, bits);
- }
- else
- {
- bits = pool.getBits(biti);
- }
- }
- p2 = mallocNoSync(size, bits, alloc_size, ti);
- if (psize < size)
- size = psize;
- //debug(PRINTF) printf("\tcopying %d bytes\n",size);
- memcpy(p2, p, size);
- p = p2;
- }
- else
- alloc_size = psize;
- }
- }
- return p;
- }
-
-
- size_t extend(void* p, size_t minsize, size_t maxsize, const TypeInfo ti) nothrow
- {
- return runLocked!(extendNoSync, extendTime, numExtends)(p, minsize, maxsize, ti);
- }
-
-
- //
- //
- //
- private size_t extendNoSync(void* p, size_t minsize, size_t maxsize, const TypeInfo ti = null) nothrow
- in
- {
- assert(minsize <= maxsize);
- }
- body
- {
- //debug(PRINTF) printf("GC::extend(p = %p, minsize = %zu, maxsize = %zu)\n", p, minsize, maxsize);
- debug (SENTINEL)
- {
- return 0;
- }
- else
- {
- auto pool = gcx.findPool(p);
- if (!pool || !pool.isLargeObject)
- return 0;
-
- auto lpool = cast(LargeObjectPool*) pool;
- auto psize = lpool.getSize(p); // get allocated size
- if (psize < PAGESIZE)
- return 0; // cannot extend buckets
-
- auto psz = psize / PAGESIZE;
- auto minsz = (minsize + PAGESIZE - 1) / PAGESIZE;
- auto maxsz = (maxsize + PAGESIZE - 1) / PAGESIZE;
-
- auto pagenum = lpool.pagenumOf(p);
-
- size_t sz;
- for (sz = 0; sz < maxsz; sz++)
- {
- auto i = pagenum + psz + sz;
- if (i == lpool.npages)
- break;
- if (lpool.pagetable[i] != B_FREE)
- { if (sz < minsz)
- return 0;
- break;
- }
- }
- if (sz < minsz)
- return 0;
- debug (MEMSTOMP) memset(pool.baseAddr + (pagenum + psz) * PAGESIZE, 0xF0, sz * PAGESIZE);
- memset(lpool.pagetable + pagenum + psz, B_PAGEPLUS, sz);
- lpool.updateOffsets(pagenum);
- lpool.freepages -= sz;
- gcx.usedLargePages += sz;
- return (psz + sz) * PAGESIZE;
- }
- }
-
-
- size_t reserve(size_t size) nothrow
- {
- if (!size)
- {
- return 0;
- }
-
- return runLocked!(reserveNoSync, otherTime, numOthers)(size);
- }
-
-
- //
- //
- //
- private size_t reserveNoSync(size_t size) nothrow
- {
- assert(size != 0);
- assert(gcx);
-
- return gcx.reserve(size);
- }
-
-
- void free(void *p) nothrow
- {
- if (!p || _inFinalizer)
- {
- return;
- }
-
- return runLocked!(freeNoSync, freeTime, numFrees)(p);
- }
-
-
- //
- //
- //
- private void freeNoSync(void *p) nothrow
- {
- debug(PRINTF) printf("Freeing %p\n", cast(size_t) p);
- assert (p);
-
- Pool* pool;
- size_t pagenum;
- Bins bin;
- size_t biti;
-
- // Find which page it is in
- pool = gcx.findPool(p);
- if (!pool) // if not one of ours
- return; // ignore
-
- pagenum = pool.pagenumOf(p);
-
- debug(PRINTF) printf("pool base = %p, PAGENUM = %d of %d, bin = %d\n", pool.baseAddr, pagenum, pool.npages, pool.pagetable[pagenum]);
- debug(PRINTF) if (pool.isLargeObject) printf("Block size = %d\n", pool.bPageOffsets[pagenum]);
-
- bin = cast(Bins)pool.pagetable[pagenum];
-
- // Verify that the pointer is at the beginning of a block,
- // no action should be taken if p is an interior pointer
- if (bin > B_PAGE) // B_PAGEPLUS or B_FREE
- return;
- if ((sentinel_sub(p) - pool.baseAddr) & (binsize[bin] - 1))
- return;
-
- sentinel_Invariant(p);
- p = sentinel_sub(p);
- biti = cast(size_t)(p - pool.baseAddr) >> pool.shiftBy;
-
- pool.clrBits(biti, ~BlkAttr.NONE);
-
- if (pool.isLargeObject) // if large alloc
- {
- assert(bin == B_PAGE);
- auto lpool = cast(LargeObjectPool*) pool;
-
- // Free pages
- size_t npages = lpool.bPageOffsets[pagenum];
- debug (MEMSTOMP) memset(p, 0xF2, npages * PAGESIZE);
- lpool.freePages(pagenum, npages);
- }
- else
- { // Add to free list
- List *list = cast(List*)p;
-
- debug (MEMSTOMP) memset(p, 0xF2, binsize[bin]);
-
- list.next = gcx.bucket[bin];
- list.pool = pool;
- gcx.bucket[bin] = list;
- }
-
- gcx.log_free(sentinel_add(p));
- }
-
-
- void* addrOf(void *p) nothrow
- {
- if (!p)
- {
- return null;
- }
-
- return runLocked!(addrOfNoSync, otherTime, numOthers)(p);
- }
-
-
- //
- //
- //
- void* addrOfNoSync(void *p) nothrow
- {
- if (!p)
- {
- return null;
- }
-
- auto q = gcx.findBase(p);
- if (q)
- q = sentinel_add(q);
- return q;
- }
-
-
- size_t sizeOf(void *p) nothrow
- {
- if (!p)
- {
- return 0;
- }
-
- return runLocked!(sizeOfNoSync, otherTime, numOthers)(p);
- }
-
-
- //
- //
- //
- private size_t sizeOfNoSync(void *p) nothrow
- {
- assert (p);
-
- debug (SENTINEL)
- {
- p = sentinel_sub(p);
- size_t size = gcx.findSize(p);
-
- // Check for interior pointer
- // This depends on:
- // 1) size is a power of 2 for less than PAGESIZE values
- // 2) base of memory pool is aligned on PAGESIZE boundary
- if (cast(size_t)p & (size - 1) & (PAGESIZE - 1))
- size = 0;
- return size ? size - SENTINEL_EXTRA : 0;
- }
- else
- {
- size_t size = gcx.findSize(p);
-
- // Check for interior pointer
- // This depends on:
- // 1) size is a power of 2 for less than PAGESIZE values
- // 2) base of memory pool is aligned on PAGESIZE boundary
- if (cast(size_t)p & (size - 1) & (PAGESIZE - 1))
- return 0;
- return size;
- }
- }
-
-
- BlkInfo query(void *p) nothrow
- {
- if (!p)
- {
- BlkInfo i;
- return i;
- }
-
- return runLocked!(queryNoSync, otherTime, numOthers)(p);
- }
-
- //
- //
- //
- BlkInfo queryNoSync(void *p) nothrow
- {
- assert(p);
-
- BlkInfo info = gcx.getInfo(p);
- debug(SENTINEL)
- {
- if (info.base)
- {
- info.base = sentinel_add(info.base);
- info.size = *sentinel_size(info.base);
- }
- }
- return info;
- }
-
-
- /**
- * Verify that pointer p:
- * 1) belongs to this memory pool
- * 2) points to the start of an allocated piece of memory
- * 3) is not on a free list
- */
- void check(void *p) nothrow
- {
- if (!p)
- {
- return;
- }
-
- return runLocked!(checkNoSync, otherTime, numOthers)(p);
- }
-
-
- //
- //
- //
- private void checkNoSync(void *p) nothrow
- {
- assert(p);
-
- sentinel_Invariant(p);
- debug (PTRCHECK)
- {
- Pool* pool;
- size_t pagenum;
- Bins bin;
- size_t size;
-
- p = sentinel_sub(p);
- pool = gcx.findPool(p);
- assert(pool);
- pagenum = pool.pagenumOf(p);
- bin = cast(Bins)pool.pagetable[pagenum];
- assert(bin <= B_PAGE);
- size = binsize[bin];
- assert((cast(size_t)p & (size - 1)) == 0);
-
- debug (PTRCHECK2)
- {
- if (bin < B_PAGE)
- {
- // Check that p is not on a free list
- List *list;
-
- for (list = gcx.bucket[bin]; list; list = list.next)
- {
- assert(cast(void*)list != p);
- }
- }
- }
- }
- }
-
-
- void addRoot(void *p) nothrow @nogc
- {
- if (!p)
- {
- return;
- }
-
- gcx.addRoot(p);
- }
-
-
- void removeRoot(void *p) nothrow @nogc
- {
- if (!p)
- {
- return;
- }
-
- gcx.removeRoot(p);
- }
-
-
- @property RootIterator rootIter() @nogc
- {
- return &gcx.rootsApply;
- }
-
-
- void addRange(void *p, size_t sz, const TypeInfo ti = null) nothrow @nogc
- {
- if (!p || !sz)
- {
- return;
- }
-
- gcx.addRange(p, p + sz, ti);
- }
-
-
- void removeRange(void *p) nothrow @nogc
- {
- if (!p)
- {
- return;
- }
-
- gcx.removeRange(p);
- }
-
-
- @property RangeIterator rangeIter() @nogc
- {
- return &gcx.rangesApply;
- }
-
-
- void runFinalizers(in void[] segment) nothrow
- {
- static void go(Gcx* gcx, in void[] segment) nothrow
- {
- gcx.runFinalizers(segment);
- }
- return runLocked!(go, otherTime, numOthers)(gcx, segment);
- }
-
-
- bool inFinalizer() nothrow
- {
- return _inFinalizer;
- }
-
-
- void collect() nothrow
- {
- fullCollect();
- }
-
-
- void collectNoStack() nothrow
- {
- fullCollectNoStack();
- }
-
-
- /**
- * Do full garbage collection.
- * Return number of pages free'd.
- */
- size_t fullCollect() nothrow
- {
- debug(PRINTF) printf("GC.fullCollect()\n");
-
- // Since a finalizer could launch a new thread, we always need to lock
- // when collecting.
- static size_t go(Gcx* gcx) nothrow
- {
- return gcx.fullcollect();
- }
- immutable result = runLocked!go(gcx);
-
- version (none)
- {
- GCStats stats;
-
- getStats(stats);
- debug(PRINTF) printf("heapSize = %zx, freeSize = %zx\n",
- stats.heapSize, stats.freeSize);
- }
-
- gcx.log_collect();
- return result;
- }
-
-
- /**
- * do full garbage collection ignoring roots
- */
- void fullCollectNoStack() nothrow
- {
- // Since a finalizer could launch a new thread, we always need to lock
- // when collecting.
- static size_t go(Gcx* gcx) nothrow
- {
- return gcx.fullcollect(true);
- }
- runLocked!go(gcx);
- }
-
-
- void minimize() nothrow
- {
- static void go(Gcx* gcx) nothrow
- {
- gcx.minimize();
- }
- runLocked!(go, otherTime, numOthers)(gcx);
- }
-
-
- core.memory.GC.Stats stats() nothrow
- {
- typeof(return) ret;
-
- runLocked!(getStatsNoSync, otherTime, numOthers)(ret);
-
- return ret;
- }
-
-
- //
- //
- //
- private void getStatsNoSync(out core.memory.GC.Stats stats) nothrow
- {
- foreach (pool; gcx.pooltable[0 .. gcx.npools])
- {
- foreach (bin; pool.pagetable[0 .. pool.npages])
- {
- if (bin == B_FREE)
- stats.freeSize += PAGESIZE;
- else
- stats.usedSize += PAGESIZE;
- }
- }
-
- size_t freeListSize;
- foreach (n; 0 .. B_PAGE)
- {
- immutable sz = binsize[n];
- for (List *list = gcx.bucket[n]; list; list = list.next)
- freeListSize += sz;
- }
-
- stats.usedSize -= freeListSize;
- stats.freeSize += freeListSize;
- }
-}
-
-
-/* ============================ Gcx =============================== */
-
-enum
-{ PAGESIZE = 4096,
- POOLSIZE = (4096*256),
-}
-
-
-enum
-{
- B_16,
- B_32,
- B_64,
- B_128,
- B_256,
- B_512,
- B_1024,
- B_2048,
- B_PAGE, // start of large alloc
- B_PAGEPLUS, // continuation of large alloc
- B_FREE, // free page
- B_MAX
-}
-
-
-alias ubyte Bins;
-
-
-struct List
-{
- List *next;
- Pool *pool;
-}
-
-
-immutable uint[B_MAX] binsize = [ 16,32,64,128,256,512,1024,2048,4096 ];
-immutable size_t[B_MAX] notbinsize = [ ~(16-1),~(32-1),~(64-1),~(128-1),~(256-1),
- ~(512-1),~(1024-1),~(2048-1),~(4096-1) ];
-
-alias PageBits = GCBits.wordtype[PAGESIZE / 16 / GCBits.BITS_PER_WORD];
-static assert(PAGESIZE % (GCBits.BITS_PER_WORD * 16) == 0);
-
-private void set(ref PageBits bits, size_t i) @nogc pure nothrow
-{
- assert(i < PageBits.sizeof * 8);
- bts(bits.ptr, i);
-}
-
-/* ============================ Gcx =============================== */
-
-struct Gcx
-{
- import core.internal.spinlock;
- auto rootsLock = shared(AlignedSpinLock)(SpinLock.Contention.brief);
- auto rangesLock = shared(AlignedSpinLock)(SpinLock.Contention.brief);
- Treap!Root roots;
- Treap!Range ranges;
-
- bool log; // turn on logging
- debug(INVARIANT) bool initialized;
- uint disabled; // turn off collections if >0
-
- import gc.pooltable;
- @property size_t npools() pure const nothrow { return pooltable.length; }
- PoolTable!Pool pooltable;
-
- List*[B_PAGE] bucket; // free list for each small size
-
- // run a collection when reaching those thresholds (number of used pages)
- float smallCollectThreshold, largeCollectThreshold;
- uint usedSmallPages, usedLargePages;
- // total number of mapped pages
- uint mappedPages;
-
- void initialize()
- {
- (cast(byte*)&this)[0 .. Gcx.sizeof] = 0;
- log_init();
- roots.initialize();
- ranges.initialize();
- smallCollectThreshold = largeCollectThreshold = 0.0f;
- usedSmallPages = usedLargePages = 0;
- mappedPages = 0;
- //printf("gcx = %p, self = %x\n", &this, self);
- debug(INVARIANT) initialized = true;
- }
-
-
- void Dtor()
- {
- if (config.profile)
- {
- printf("\tNumber of collections: %llu\n", cast(ulong)numCollections);
- printf("\tTotal GC prep time: %lld milliseconds\n",
- prepTime.total!("msecs"));
- printf("\tTotal mark time: %lld milliseconds\n",
- markTime.total!("msecs"));
- printf("\tTotal sweep time: %lld milliseconds\n",
- sweepTime.total!("msecs"));
- printf("\tTotal page recovery time: %lld milliseconds\n",
- recoverTime.total!("msecs"));
- long maxPause = maxPauseTime.total!("msecs");
- printf("\tMax Pause Time: %lld milliseconds\n", maxPause);
- long gcTime = (recoverTime + sweepTime + markTime + prepTime).total!("msecs");
- printf("\tGrand total GC time: %lld milliseconds\n", gcTime);
- long pauseTime = (markTime + prepTime).total!("msecs");
-
- char[30] apitxt;
- apitxt[0] = 0;
- debug(PROFILE_API) if (config.profile > 1)
- {
- static Duration toDuration(long dur)
- {
- return MonoTime(dur) - MonoTime(0);
- }
-
- printf("\n");
- printf("\tmalloc: %llu calls, %lld ms\n", cast(ulong)numMallocs, toDuration(mallocTime).total!"msecs");
- printf("\trealloc: %llu calls, %lld ms\n", cast(ulong)numReallocs, toDuration(reallocTime).total!"msecs");
- printf("\tfree: %llu calls, %lld ms\n", cast(ulong)numFrees, toDuration(freeTime).total!"msecs");
- printf("\textend: %llu calls, %lld ms\n", cast(ulong)numExtends, toDuration(extendTime).total!"msecs");
- printf("\tother: %llu calls, %lld ms\n", cast(ulong)numOthers, toDuration(otherTime).total!"msecs");
- printf("\tlock time: %lld ms\n", toDuration(lockTime).total!"msecs");
-
- long apiTime = mallocTime + reallocTime + freeTime + extendTime + otherTime + lockTime;
- printf("\tGC API: %lld ms\n", toDuration(apiTime).total!"msecs");
- sprintf(apitxt.ptr, " API%5ld ms", toDuration(apiTime).total!"msecs");
- }
-
- printf("GC summary:%5lld MB,%5lld GC%5lld ms, Pauses%5lld ms <%5lld ms%s\n",
- cast(long) maxPoolMemory >> 20, cast(ulong)numCollections, gcTime,
- pauseTime, maxPause, apitxt.ptr);
- }
-
- debug(INVARIANT) initialized = false;
-
- for (size_t i = 0; i < npools; i++)
- {
- Pool *pool = pooltable[i];
- mappedPages -= pool.npages;
- pool.Dtor();
- cstdlib.free(pool);
- }
- assert(!mappedPages);
- pooltable.Dtor();
-
- roots.removeAll();
- ranges.removeAll();
- toscan.reset();
- }
-
-
- void Invariant() const { }
-
- debug(INVARIANT)
- invariant()
- {
- if (initialized)
- {
- //printf("Gcx.invariant(): this = %p\n", &this);
- pooltable.Invariant();
-
- rangesLock.lock();
- foreach (range; ranges)
- {
- assert(range.pbot);
- assert(range.ptop);
- assert(range.pbot <= range.ptop);
- }
- rangesLock.unlock();
-
- for (size_t i = 0; i < B_PAGE; i++)
- {
- for (auto list = cast(List*)bucket[i]; list; list = list.next)
- {
- }
- }
- }
- }
-
-
- /**
- *
- */
- void addRoot(void *p) nothrow @nogc
- {
- rootsLock.lock();
- scope (failure) rootsLock.unlock();
- roots.insert(Root(p));
- rootsLock.unlock();
- }
-
-
- /**
- *
- */
- void removeRoot(void *p) nothrow @nogc
- {
- rootsLock.lock();
- scope (failure) rootsLock.unlock();
- roots.remove(Root(p));
- rootsLock.unlock();
- }
-
-
- /**
- *
- */
- int rootsApply(scope int delegate(ref Root) nothrow dg) nothrow
- {
- rootsLock.lock();
- scope (failure) rootsLock.unlock();
- auto ret = roots.opApply(dg);
- rootsLock.unlock();
- return ret;
- }
-
-
- /**
- *
- */
- void addRange(void *pbot, void *ptop, const TypeInfo ti) nothrow @nogc
- {
- //debug(PRINTF) printf("Thread %x ", pthread_self());
- debug(PRINTF) printf("%p.Gcx::addRange(%p, %p)\n", &this, pbot, ptop);
- rangesLock.lock();
- scope (failure) rangesLock.unlock();
- ranges.insert(Range(pbot, ptop));
- rangesLock.unlock();
- }
-
-
- /**
- *
- */
- void removeRange(void *pbot) nothrow @nogc
- {
- //debug(PRINTF) printf("Thread %x ", pthread_self());
- debug(PRINTF) printf("Gcx.removeRange(%p)\n", pbot);
- rangesLock.lock();
- scope (failure) rangesLock.unlock();
- ranges.remove(Range(pbot, pbot)); // only pbot is used, see Range.opCmp
- rangesLock.unlock();
-
- // debug(PRINTF) printf("Wrong thread\n");
- // This is a fatal error, but ignore it.
- // The problem is that we can get a Close() call on a thread
- // other than the one the range was allocated on.
- //assert(zero);
- }
-
- /**
- *
- */
- int rangesApply(scope int delegate(ref Range) nothrow dg) nothrow
- {
- rangesLock.lock();
- scope (failure) rangesLock.unlock();
- auto ret = ranges.opApply(dg);
- rangesLock.unlock();
- return ret;
- }
-
-
- /**
- *
- */
- void runFinalizers(in void[] segment) nothrow
- {
- ConservativeGC._inFinalizer = true;
- scope (failure) ConservativeGC._inFinalizer = false;
-
- foreach (pool; pooltable[0 .. npools])
- {
- if (!pool.finals.nbits) continue;
-
- if (pool.isLargeObject)
- {
- auto lpool = cast(LargeObjectPool*) pool;
- lpool.runFinalizers(segment);
- }
- else
- {
- auto spool = cast(SmallObjectPool*) pool;
- spool.runFinalizers(segment);
- }
- }
- ConservativeGC._inFinalizer = false;
- }
-
- Pool* findPool(void* p) pure nothrow
- {
- return pooltable.findPool(p);
- }
-
- /**
- * Find base address of block containing pointer p.
- * Returns null if not a gc'd pointer
- */
- void* findBase(void *p) nothrow
- {
- Pool *pool;
-
- pool = findPool(p);
- if (pool)
- {
- size_t offset = cast(size_t)(p - pool.baseAddr);
- size_t pn = offset / PAGESIZE;
- Bins bin = cast(Bins)pool.pagetable[pn];
-
- // Adjust bit to be at start of allocated memory block
- if (bin <= B_PAGE)
- {
- return pool.baseAddr + (offset & notbinsize[bin]);
- }
- else if (bin == B_PAGEPLUS)
- {
- auto pageOffset = pool.bPageOffsets[pn];
- offset -= pageOffset * PAGESIZE;
- pn -= pageOffset;
-
- return pool.baseAddr + (offset & (offset.max ^ (PAGESIZE-1)));
- }
- else
- {
- // we are in a B_FREE page
- assert(bin == B_FREE);
- return null;
- }
- }
- return null;
- }
-
-
- /**
- * Find size of pointer p.
- * Returns 0 if not a gc'd pointer
- */
- size_t findSize(void *p) nothrow
- {
- Pool* pool = findPool(p);
- if (pool)
- return pool.slGetSize(p);
- return 0;
- }
-
- /**
- *
- */
- BlkInfo getInfo(void* p) nothrow
- {
- Pool* pool = findPool(p);
- if (pool)
- return pool.slGetInfo(p);
- return BlkInfo();
- }
-
- /**
- * Computes the bin table using CTFE.
- */
- static byte[2049] ctfeBins() nothrow
- {
- byte[2049] ret;
- size_t p = 0;
- for (Bins b = B_16; b <= B_2048; b++)
- for ( ; p <= binsize[b]; p++)
- ret[p] = b;
-
- return ret;
- }
-
- static const byte[2049] binTable = ctfeBins();
-
- /**
- * Allocate a new pool of at least size bytes.
- * Sort it into pooltable[].
- * Mark all memory in the pool as B_FREE.
- * Return the actual number of bytes reserved or 0 on error.
- */
- size_t reserve(size_t size) nothrow
- {
- size_t npages = (size + PAGESIZE - 1) / PAGESIZE;
-
- // Assume reserve() is for small objects.
- Pool* pool = newPool(npages, false);
-
- if (!pool)
- return 0;
- return pool.npages * PAGESIZE;
- }
-
- /**
- * Update the thresholds for when to collect the next time
- */
- void updateCollectThresholds() nothrow
- {
- static float max(float a, float b) nothrow
- {
- return a >= b ? a : b;
- }
-
- // instantly increases, slowly decreases
- static float smoothDecay(float oldVal, float newVal) nothrow
- {
- // decay to 63.2% of newVal over 5 collections
- // http://en.wikipedia.org/wiki/Low-pass_filter#Simple_infinite_impulse_response_filter
- enum alpha = 1.0 / (5 + 1);
- immutable decay = (newVal - oldVal) * alpha + oldVal;
- return max(newVal, decay);
- }
-
- immutable smTarget = usedSmallPages * config.heapSizeFactor;
- smallCollectThreshold = smoothDecay(smallCollectThreshold, smTarget);
- immutable lgTarget = usedLargePages * config.heapSizeFactor;
- largeCollectThreshold = smoothDecay(largeCollectThreshold, lgTarget);
- }
-
- /**
- * Minimizes physical memory usage by returning free pools to the OS.
- */
- void minimize() nothrow
- {
- debug(PRINTF) printf("Minimizing.\n");
-
- foreach (pool; pooltable.minimize())
- {
- debug(PRINTF) printFreeInfo(pool);
- mappedPages -= pool.npages;
- pool.Dtor();
- cstdlib.free(pool);
- }
-
- debug(PRINTF) printf("Done minimizing.\n");
- }
-
- private @property bool lowMem() const nothrow
- {
- return isLowOnMem(mappedPages * PAGESIZE);
- }
-
- void* alloc(size_t size, ref size_t alloc_size, uint bits) nothrow
- {
- return size <= 2048 ? smallAlloc(binTable[size], alloc_size, bits)
- : bigAlloc(size, alloc_size, bits);
- }
-
- void* smallAlloc(Bins bin, ref size_t alloc_size, uint bits) nothrow
- {
- alloc_size = binsize[bin];
-
- void* p;
- bool tryAlloc() nothrow
- {
- if (!bucket[bin])
- {
- bucket[bin] = allocPage(bin);
- if (!bucket[bin])
- return false;
- }
- p = bucket[bin];
- return true;
- }
-
- if (!tryAlloc())
- {
- if (!lowMem && (disabled || usedSmallPages < smallCollectThreshold))
- {
- // disabled or threshold not reached => allocate a new pool instead of collecting
- if (!newPool(1, false))
- {
- // out of memory => try to free some memory
- fullcollect();
- if (lowMem) minimize();
- }
- }
- else
- {
- fullcollect();
- if (lowMem) minimize();
- }
- // tryAlloc will succeed if a new pool was allocated above, if it fails allocate a new pool now
- if (!tryAlloc() && (!newPool(1, false) || !tryAlloc()))
- // out of luck or memory
- onOutOfMemoryErrorNoGC();
- }
- assert(p !is null);
-
- // Return next item from free list
- bucket[bin] = (cast(List*)p).next;
- auto pool = (cast(List*)p).pool;
- if (bits)
- pool.setBits((p - pool.baseAddr) >> pool.shiftBy, bits);
- //debug(PRINTF) printf("\tmalloc => %p\n", p);
- debug (MEMSTOMP) memset(p, 0xF0, alloc_size);
- return p;
- }
-
- /**
- * Allocate a chunk of memory that is larger than a page.
- * Return null if out of memory.
- */
- void* bigAlloc(size_t size, ref size_t alloc_size, uint bits, const TypeInfo ti = null) nothrow
- {
- debug(PRINTF) printf("In bigAlloc. Size: %d\n", size);
-
- LargeObjectPool* pool;
- size_t pn;
- immutable npages = (size + PAGESIZE - 1) / PAGESIZE;
- if (npages == 0)
- onOutOfMemoryErrorNoGC(); // size just below size_t.max requested
-
- bool tryAlloc() nothrow
- {
- foreach (p; pooltable[0 .. npools])
- {
- if (!p.isLargeObject || p.freepages < npages)
- continue;
- auto lpool = cast(LargeObjectPool*) p;
- if ((pn = lpool.allocPages(npages)) == OPFAIL)
- continue;
- pool = lpool;
- return true;
- }
- return false;
- }
-
- bool tryAllocNewPool() nothrow
- {
- pool = cast(LargeObjectPool*) newPool(npages, true);
- if (!pool) return false;
- pn = pool.allocPages(npages);
- assert(pn != OPFAIL);
- return true;
- }
-
- if (!tryAlloc())
- {
- if (!lowMem && (disabled || usedLargePages < largeCollectThreshold))
- {
- // disabled or threshold not reached => allocate a new pool instead of collecting
- if (!tryAllocNewPool())
- {
- // disabled but out of memory => try to free some memory
- fullcollect();
- minimize();
- }
- }
- else
- {
- fullcollect();
- minimize();
- }
- // If alloc didn't yet succeed retry now that we collected/minimized
- if (!pool && !tryAlloc() && !tryAllocNewPool())
- // out of luck or memory
- return null;
- }
- assert(pool);
-
- debug(PRINTF) printFreeInfo(&pool.base);
- pool.pagetable[pn] = B_PAGE;
- if (npages > 1)
- memset(&pool.pagetable[pn + 1], B_PAGEPLUS, npages - 1);
- pool.updateOffsets(pn);
- usedLargePages += npages;
- pool.freepages -= npages;
-
- debug(PRINTF) printFreeInfo(&pool.base);
-
- auto p = pool.baseAddr + pn * PAGESIZE;
- debug(PRINTF) printf("Got large alloc: %p, pt = %d, np = %d\n", p, pool.pagetable[pn], npages);
- debug (MEMSTOMP) memset(p, 0xF1, size);
- alloc_size = npages * PAGESIZE;
- //debug(PRINTF) printf("\tp = %p\n", p);
-
- if (bits)
- pool.setBits(pn, bits);
- return p;
- }
-
-
- /**
- * Allocate a new pool with at least npages in it.
- * Sort it into pooltable[].
- * Return null if failed.
- */
- Pool *newPool(size_t npages, bool isLargeObject) nothrow
- {
- //debug(PRINTF) printf("************Gcx::newPool(npages = %d)****************\n", npages);
-
- // Minimum of POOLSIZE
- size_t minPages = (config.minPoolSize << 20) / PAGESIZE;
- if (npages < minPages)
- npages = minPages;
- else if (npages > minPages)
- { // Give us 150% of requested size, so there's room to extend
- auto n = npages + (npages >> 1);
- if (n < size_t.max/PAGESIZE)
- npages = n;
- }
-
- // Allocate successively larger pools up to 8 megs
- if (npools)
- { size_t n;
-
- n = config.minPoolSize + config.incPoolSize * npools;
- if (n > config.maxPoolSize)
- n = config.maxPoolSize; // cap pool size
- n *= (1 << 20) / PAGESIZE; // convert MB to pages
- if (npages < n)
- npages = n;
- }
-
- //printf("npages = %d\n", npages);
-
- auto pool = cast(Pool *)cstdlib.calloc(1, isLargeObject ? LargeObjectPool.sizeof : SmallObjectPool.sizeof);
- if (pool)
- {
- pool.initialize(npages, isLargeObject);
- if (!pool.baseAddr || !pooltable.insert(pool))
- {
- pool.Dtor();
- cstdlib.free(pool);
- return null;
- }
- }
-
- mappedPages += npages;
-
- if (config.profile)
- {
- if (mappedPages * PAGESIZE > maxPoolMemory)
- maxPoolMemory = mappedPages * PAGESIZE;
- }
- return pool;
- }
-
- /**
- * Allocate a page of bin's.
- * Returns:
- * head of a single linked list of new entries
- */
- List* allocPage(Bins bin) nothrow
- {
- //debug(PRINTF) printf("Gcx::allocPage(bin = %d)\n", bin);
- for (size_t n = 0; n < npools; n++)
- {
- Pool* pool = pooltable[n];
- if (pool.isLargeObject)
- continue;
- if (List* p = (cast(SmallObjectPool*)pool).allocPage(bin))
- {
- ++usedSmallPages;
- return p;
- }
- }
- return null;
- }
-
- static struct ToScanStack
- {
- nothrow:
- @disable this(this);
-
- void reset()
- {
- _length = 0;
- os_mem_unmap(_p, _cap * Range.sizeof);
- _p = null;
- _cap = 0;
- }
-
- void push(Range rng)
- {
- if (_length == _cap) grow();
- _p[_length++] = rng;
- }
-
- Range pop()
- in { assert(!empty); }
- body
- {
- return _p[--_length];
- }
-
- ref inout(Range) opIndex(size_t idx) inout
- in { assert(idx < _length); }
- body
- {
- return _p[idx];
- }
-
- @property size_t length() const { return _length; }
- @property bool empty() const { return !length; }
-
- private:
- void grow()
- {
- enum initSize = 64 * 1024; // Windows VirtualAlloc granularity
- immutable ncap = _cap ? 2 * _cap : initSize / Range.sizeof;
- auto p = cast(Range*)os_mem_map(ncap * Range.sizeof);
- if (p is null) onOutOfMemoryErrorNoGC();
- if (_p !is null)
- {
- p[0 .. _length] = _p[0 .. _length];
- os_mem_unmap(_p, _cap * Range.sizeof);
- }
- _p = p;
- _cap = ncap;
- }
-
- size_t _length;
- Range* _p;
- size_t _cap;
- }
-
- ToScanStack toscan;
-
- /**
- * Search a range of memory values and mark any pointers into the GC pool.
- */
- void mark(void *pbot, void *ptop) scope nothrow
- {
- void **p1 = cast(void **)pbot;
- void **p2 = cast(void **)ptop;
-
- // limit the amount of ranges added to the toscan stack
- enum FANOUT_LIMIT = 32;
- size_t stackPos;
- Range[FANOUT_LIMIT] stack = void;
-
- Lagain:
- size_t pcache = 0;
-
- // let dmd allocate a register for this.pools
- auto pools = pooltable.pools;
- const highpool = pooltable.npools - 1;
- const minAddr = pooltable.minAddr;
- const maxAddr = pooltable.maxAddr;
-
- //printf("marking range: [%p..%p] (%#zx)\n", p1, p2, cast(size_t)p2 - cast(size_t)p1);
- Lnext: for (; p1 < p2; p1++)
- {
- auto p = *p1;
-
- //if (log) debug(PRINTF) printf("\tmark %p\n", p);
- if (p >= minAddr && p < maxAddr)
- {
- if ((cast(size_t)p & ~cast(size_t)(PAGESIZE-1)) == pcache)
- continue;
-
- Pool* pool = void;
- size_t low = 0;
- size_t high = highpool;
- while (true)
- {
- size_t mid = (low + high) >> 1;
- pool = pools[mid];
- if (p < pool.baseAddr)
- high = mid - 1;
- else if (p >= pool.topAddr)
- low = mid + 1;
- else break;
-
- if (low > high)
- continue Lnext;
- }
- size_t offset = cast(size_t)(p - pool.baseAddr);
- size_t biti = void;
- size_t pn = offset / PAGESIZE;
- Bins bin = cast(Bins)pool.pagetable[pn];
- void* base = void;
-
- //debug(PRINTF) printf("\t\tfound pool %p, base=%p, pn = %zd, bin = %d, biti = x%x\n", pool, pool.baseAddr, pn, bin, biti);
-
- // Adjust bit to be at start of allocated memory block
- if (bin < B_PAGE)
- {
- // We don't care abou setting pointsToBase correctly
- // because it's ignored for small object pools anyhow.
- auto offsetBase = offset & notbinsize[bin];
- biti = offsetBase >> pool.shiftBy;
- base = pool.baseAddr + offsetBase;
- //debug(PRINTF) printf("\t\tbiti = x%x\n", biti);
-
- if (!pool.mark.set(biti) && !pool.noscan.test(biti)) {
- stack[stackPos++] = Range(base, base + binsize[bin]);
- if (stackPos == stack.length)
- break;
- }
- }
- else if (bin == B_PAGE)
- {
- auto offsetBase = offset & notbinsize[bin];
- base = pool.baseAddr + offsetBase;
- biti = offsetBase >> pool.shiftBy;
- //debug(PRINTF) printf("\t\tbiti = x%x\n", biti);
-
- pcache = cast(size_t)p & ~cast(size_t)(PAGESIZE-1);
-
- // For the NO_INTERIOR attribute. This tracks whether
- // the pointer is an interior pointer or points to the
- // base address of a block.
- bool pointsToBase = (base == sentinel_sub(p));
- if (!pointsToBase && pool.nointerior.nbits && pool.nointerior.test(biti))
- continue;
-
- if (!pool.mark.set(biti) && !pool.noscan.test(biti)) {
- stack[stackPos++] = Range(base, base + pool.bPageOffsets[pn] * PAGESIZE);
- if (stackPos == stack.length)
- break;
- }
- }
- else if (bin == B_PAGEPLUS)
- {
- pn -= pool.bPageOffsets[pn];
- base = pool.baseAddr + (pn * PAGESIZE);
- biti = pn * (PAGESIZE >> pool.shiftBy);
-
- pcache = cast(size_t)p & ~cast(size_t)(PAGESIZE-1);
- if (pool.nointerior.nbits && pool.nointerior.test(biti))
- continue;
-
- if (!pool.mark.set(biti) && !pool.noscan.test(biti)) {
- stack[stackPos++] = Range(base, base + pool.bPageOffsets[pn] * PAGESIZE);
- if (stackPos == stack.length)
- break;
- }
- }
- else
- {
- // Don't mark bits in B_FREE pages
- assert(bin == B_FREE);
- continue;
- }
- }
- }
-
- Range next=void;
- if (p1 < p2)
- {
- // local stack is full, push it to the global stack
- assert(stackPos == stack.length);
- toscan.push(Range(p1, p2));
- // reverse order for depth-first-order traversal
- foreach_reverse (ref rng; stack[0 .. $ - 1])
- toscan.push(rng);
- stackPos = 0;
- next = stack[$-1];
- }
- else if (stackPos)
- {
- // pop range from local stack and recurse
- next = stack[--stackPos];
- }
- else if (!toscan.empty)
- {
- // pop range from global stack and recurse
- next = toscan.pop();
- }
- else
- {
- // nothing more to do
- return;
- }
- p1 = cast(void**)next.pbot;
- p2 = cast(void**)next.ptop;
- // printf(" pop [%p..%p] (%#zx)\n", p1, p2, cast(size_t)p2 - cast(size_t)p1);
- goto Lagain;
- }
-
- // collection step 1: prepare freebits and mark bits
- void prepare() nothrow
- {
- size_t n;
- Pool* pool;
-
- for (n = 0; n < npools; n++)
- {
- pool = pooltable[n];
- pool.mark.zero();
- if (!pool.isLargeObject) pool.freebits.zero();
- }
-
- debug(COLLECT_PRINTF) printf("Set bits\n");
-
- // Mark each free entry, so it doesn't get scanned
- for (n = 0; n < B_PAGE; n++)
- {
- for (List *list = bucket[n]; list; list = list.next)
- {
- pool = list.pool;
- assert(pool);
- pool.freebits.set(cast(size_t)(cast(void*)list - pool.baseAddr) / 16);
- }
- }
-
- debug(COLLECT_PRINTF) printf("Marked free entries.\n");
-
- for (n = 0; n < npools; n++)
- {
- pool = pooltable[n];
- if (!pool.isLargeObject)
- {
- pool.mark.copy(&pool.freebits);
- }
- }
- }
-
- // collection step 2: mark roots and heap
- void markAll(bool nostack) nothrow
- {
- if (!nostack)
- {
- debug(COLLECT_PRINTF) printf("\tscan stacks.\n");
- // Scan stacks and registers for each paused thread
- thread_scanAll(&mark);
- }
-
- // Scan roots[]
- debug(COLLECT_PRINTF) printf("\tscan roots[]\n");
- foreach (root; roots)
- {
- mark(cast(void*)&root.proot, cast(void*)(&root.proot + 1));
- }
-
- // Scan ranges[]
- debug(COLLECT_PRINTF) printf("\tscan ranges[]\n");
- //log++;
- foreach (range; ranges)
- {
- debug(COLLECT_PRINTF) printf("\t\t%p .. %p\n", range.pbot, range.ptop);
- mark(range.pbot, range.ptop);
- }
- //log--;
- }
-
- // collection step 3: free all unreferenced objects
- size_t sweep() nothrow
- {
- // Free up everything not marked
- debug(COLLECT_PRINTF) printf("\tfree'ing\n");
- size_t freedLargePages;
- size_t freed;
- for (size_t n = 0; n < npools; n++)
- {
- size_t pn;
- Pool* pool = pooltable[n];
-
- if (pool.isLargeObject)
- {
- for (pn = 0; pn < pool.npages; pn++)
- {
- Bins bin = cast(Bins)pool.pagetable[pn];
- if (bin > B_PAGE) continue;
- size_t biti = pn;
-
- if (!pool.mark.test(biti))
- {
- void *p = pool.baseAddr + pn * PAGESIZE;
- void* q = sentinel_add(p);
- sentinel_Invariant(q);
-
- if (pool.finals.nbits && pool.finals.clear(biti))
- {
- size_t size = pool.bPageOffsets[pn] * PAGESIZE - SENTINEL_EXTRA;
- uint attr = pool.getBits(biti);
- rt_finalizeFromGC(q, size, attr);
- }
-
- pool.clrBits(biti, ~BlkAttr.NONE ^ BlkAttr.FINALIZE);
-
- debug(COLLECT_PRINTF) printf("\tcollecting big %p\n", p);
- log_free(q);
- pool.pagetable[pn] = B_FREE;
- if (pn < pool.searchStart) pool.searchStart = pn;
- freedLargePages++;
- pool.freepages++;
-
- debug (MEMSTOMP) memset(p, 0xF3, PAGESIZE);
- while (pn + 1 < pool.npages && pool.pagetable[pn + 1] == B_PAGEPLUS)
- {
- pn++;
- pool.pagetable[pn] = B_FREE;
-
- // Don't need to update searchStart here because
- // pn is guaranteed to be greater than last time
- // we updated it.
-
- pool.freepages++;
- freedLargePages++;
-
- debug (MEMSTOMP)
- { p += PAGESIZE;
- memset(p, 0xF3, PAGESIZE);
- }
- }
- pool.largestFree = pool.freepages; // invalidate
- }
- }
- }
- else
- {
-
- for (pn = 0; pn < pool.npages; pn++)
- {
- Bins bin = cast(Bins)pool.pagetable[pn];
-
- if (bin < B_PAGE)
- {
- immutable size = binsize[bin];
- void *p = pool.baseAddr + pn * PAGESIZE;
- void *ptop = p + PAGESIZE;
- immutable base = pn * (PAGESIZE/16);
- immutable bitstride = size / 16;
-
- bool freeBits;
- PageBits toFree;
-
- for (size_t i; p < ptop; p += size, i += bitstride)
- {
- immutable biti = base + i;
-
- if (!pool.mark.test(biti))
- {
- void* q = sentinel_add(p);
- sentinel_Invariant(q);
-
- if (pool.finals.nbits && pool.finals.test(biti))
- rt_finalizeFromGC(q, size - SENTINEL_EXTRA, pool.getBits(biti));
-
- freeBits = true;
- toFree.set(i);
-
- debug(COLLECT_PRINTF) printf("\tcollecting %p\n", p);
- log_free(sentinel_add(p));
-
- debug (MEMSTOMP) memset(p, 0xF3, size);
-
- freed += size;
- }
- }
-
- if (freeBits)
- pool.freePageBits(pn, toFree);
- }
- }
- }
- }
-
- assert(freedLargePages <= usedLargePages);
- usedLargePages -= freedLargePages;
- debug(COLLECT_PRINTF) printf("\tfree'd %u bytes, %u pages from %u pools\n", freed, freedLargePages, npools);
- return freedLargePages;
- }
-
- // collection step 4: recover pages with no live objects, rebuild free lists
- size_t recover() nothrow
- {
- // init tail list
- List**[B_PAGE] tail = void;
- foreach (i, ref next; tail)
- next = &bucket[i];
-
- // Free complete pages, rebuild free list
- debug(COLLECT_PRINTF) printf("\tfree complete pages\n");
- size_t freedSmallPages;
- for (size_t n = 0; n < npools; n++)
- {
- size_t pn;
- Pool* pool = pooltable[n];
-
- if (pool.isLargeObject)
- continue;
-
- for (pn = 0; pn < pool.npages; pn++)
- {
- Bins bin = cast(Bins)pool.pagetable[pn];
- size_t biti;
- size_t u;
-
- if (bin < B_PAGE)
- {
- size_t size = binsize[bin];
- size_t bitstride = size / 16;
- size_t bitbase = pn * (PAGESIZE / 16);
- size_t bittop = bitbase + (PAGESIZE / 16);
- void* p;
-
- biti = bitbase;
- for (biti = bitbase; biti < bittop; biti += bitstride)
- {
- if (!pool.freebits.test(biti))
- goto Lnotfree;
- }
- pool.pagetable[pn] = B_FREE;
- if (pn < pool.searchStart) pool.searchStart = pn;
- pool.freepages++;
- freedSmallPages++;
- continue;
-
- Lnotfree:
- p = pool.baseAddr + pn * PAGESIZE;
- for (u = 0; u < PAGESIZE; u += size)
- {
- biti = bitbase + u / 16;
- if (!pool.freebits.test(biti))
- continue;
- auto elem = cast(List *)(p + u);
- elem.pool = pool;
- *tail[bin] = elem;
- tail[bin] = &elem.next;
- }
- }
- }
- }
- // terminate tail list
- foreach (ref next; tail)
- *next = null;
-
- assert(freedSmallPages <= usedSmallPages);
- usedSmallPages -= freedSmallPages;
- debug(COLLECT_PRINTF) printf("\trecovered pages = %d\n", freedSmallPages);
- return freedSmallPages;
- }
-
- /**
- * Return number of full pages free'd.
- */
- size_t fullcollect(bool nostack = false) nothrow
- {
- MonoTime start, stop, begin;
-
- if (config.profile)
- {
- begin = start = currTime;
- }
-
- debug(COLLECT_PRINTF) printf("Gcx.fullcollect()\n");
- //printf("\tpool address range = %p .. %p\n", minAddr, maxAddr);
-
- {
- // lock roots and ranges around suspending threads b/c they're not reentrant safe
- rangesLock.lock();
- rootsLock.lock();
- scope (exit)
- {
- rangesLock.unlock();
- rootsLock.unlock();
- }
- thread_suspendAll();
-
- prepare();
-
- if (config.profile)
- {
- stop = currTime;
- prepTime += (stop - start);
- start = stop;
- }
-
- markAll(nostack);
-
- thread_processGCMarks(&isMarked);
- thread_resumeAll();
- }
-
- if (config.profile)
- {
- stop = currTime;
- markTime += (stop - start);
- Duration pause = stop - begin;
- if (pause > maxPauseTime)
- maxPauseTime = pause;
- start = stop;
- }
-
- ConservativeGC._inFinalizer = true;
- size_t freedLargePages=void;
- {
- scope (failure) ConservativeGC._inFinalizer = false;
- freedLargePages = sweep();
- ConservativeGC._inFinalizer = false;
- }
-
- if (config.profile)
- {
- stop = currTime;
- sweepTime += (stop - start);
- start = stop;
- }
-
- immutable freedSmallPages = recover();
-
- if (config.profile)
- {
- stop = currTime;
- recoverTime += (stop - start);
- ++numCollections;
- }
-
- updateCollectThresholds();
-
- return freedLargePages + freedSmallPages;
- }
-
- /**
- * Returns true if the addr lies within a marked block.
- *
- * Warning! This should only be called while the world is stopped inside
- * the fullcollect function.
- */
- int isMarked(void *addr) scope nothrow
- {
- // first, we find the Pool this block is in, then check to see if the
- // mark bit is clear.
- auto pool = findPool(addr);
- if (pool)
- {
- auto offset = cast(size_t)(addr - pool.baseAddr);
- auto pn = offset / PAGESIZE;
- auto bins = cast(Bins)pool.pagetable[pn];
- size_t biti = void;
- if (bins <= B_PAGE)
- {
- biti = (offset & notbinsize[bins]) >> pool.shiftBy;
- }
- else if (bins == B_PAGEPLUS)
- {
- pn -= pool.bPageOffsets[pn];
- biti = pn * (PAGESIZE >> pool.shiftBy);
- }
- else // bins == B_FREE
- {
- assert(bins == B_FREE);
- return IsMarked.no;
- }
- return pool.mark.test(biti) ? IsMarked.yes : IsMarked.no;
- }
- return IsMarked.unknown;
- }
-
-
- /***** Leak Detector ******/
-
-
- debug (LOGGING)
- {
- LogArray current;
- LogArray prev;
-
-
- void log_init()
- {
- //debug(PRINTF) printf("+log_init()\n");
- current.reserve(1000);
- prev.reserve(1000);
- //debug(PRINTF) printf("-log_init()\n");
- }
-
-
- void log_malloc(void *p, size_t size) nothrow
- {
- //debug(PRINTF) printf("+log_malloc(p = %p, size = %zd)\n", p, size);
- Log log;
-
- log.p = p;
- log.size = size;
- log.line = GC.line;
- log.file = GC.file;
- log.parent = null;
-
- GC.line = 0;
- GC.file = null;
-
- current.push(log);
- //debug(PRINTF) printf("-log_malloc()\n");
- }
-
-
- void log_free(void *p) nothrow
- {
- //debug(PRINTF) printf("+log_free(%p)\n", p);
- auto i = current.find(p);
- if (i == OPFAIL)
- {
- debug(PRINTF) printf("free'ing unallocated memory %p\n", p);
- }
- else
- current.remove(i);
- //debug(PRINTF) printf("-log_free()\n");
- }
-
-
- void log_collect() nothrow
- {
- //debug(PRINTF) printf("+log_collect()\n");
- // Print everything in current that is not in prev
-
- debug(PRINTF) printf("New pointers this cycle: --------------------------------\n");
- size_t used = 0;
- for (size_t i = 0; i < current.dim; i++)
- {
- auto j = prev.find(current.data[i].p);
- if (j == OPFAIL)
- current.data[i].print();
- else
- used++;
- }
-
- debug(PRINTF) printf("All roots this cycle: --------------------------------\n");
- for (size_t i = 0; i < current.dim; i++)
- {
- void* p = current.data[i].p;
- if (!findPool(current.data[i].parent))
- {
- auto j = prev.find(current.data[i].p);
- debug(PRINTF) printf(j == OPFAIL ? "N" : " ");
- current.data[i].print();
- }
- }
-
- debug(PRINTF) printf("Used = %d-------------------------------------------------\n", used);
- prev.copy(&current);
-
- debug(PRINTF) printf("-log_collect()\n");
- }
-
-
- void log_parent(void *p, void *parent) nothrow
- {
- //debug(PRINTF) printf("+log_parent()\n");
- auto i = current.find(p);
- if (i == OPFAIL)
- {
- debug(PRINTF) printf("parent'ing unallocated memory %p, parent = %p\n", p, parent);
- Pool *pool;
- pool = findPool(p);
- assert(pool);
- size_t offset = cast(size_t)(p - pool.baseAddr);
- size_t biti;
- size_t pn = offset / PAGESIZE;
- Bins bin = cast(Bins)pool.pagetable[pn];
- biti = (offset & notbinsize[bin]);
- debug(PRINTF) printf("\tbin = %d, offset = x%x, biti = x%x\n", bin, offset, biti);
- }
- else
- {
- current.data[i].parent = parent;
- }
- //debug(PRINTF) printf("-log_parent()\n");
- }
-
- }
- else
- {
- void log_init() nothrow { }
- void log_malloc(void *p, size_t size) nothrow { }
- void log_free(void *p) nothrow { }
- void log_collect() nothrow { }
- void log_parent(void *p, void *parent) nothrow { }
- }
-}
-
-/* ============================ Pool =============================== */
-
-struct Pool
-{
- void* baseAddr;
- void* topAddr;
- GCBits mark; // entries already scanned, or should not be scanned
- GCBits freebits; // entries that are on the free list
- GCBits finals; // entries that need finalizer run on them
- GCBits structFinals;// struct entries that need a finalzier run on them
- GCBits noscan; // entries that should not be scanned
- GCBits appendable; // entries that are appendable
- GCBits nointerior; // interior pointers should be ignored.
- // Only implemented for large object pools.
- size_t npages;
- size_t freepages; // The number of pages not in use.
- ubyte* pagetable;
-
- bool isLargeObject;
-
- uint shiftBy; // shift count for the divisor used for determining bit indices.
-
- // This tracks how far back we have to go to find the nearest B_PAGE at
- // a smaller address than a B_PAGEPLUS. To save space, we use a uint.
- // This limits individual allocations to 16 terabytes, assuming a 4k
- // pagesize.
- uint* bPageOffsets;
-
- // This variable tracks a conservative estimate of where the first free
- // page in this pool is, so that if a lot of pages towards the beginning
- // are occupied, we can bypass them in O(1).
- size_t searchStart;
- size_t largestFree; // upper limit for largest free chunk in large object pool
-
- void initialize(size_t npages, bool isLargeObject) nothrow
- {
- this.isLargeObject = isLargeObject;
- size_t poolsize;
-
- shiftBy = isLargeObject ? 12 : 4;
-
- //debug(PRINTF) printf("Pool::Pool(%u)\n", npages);
- poolsize = npages * PAGESIZE;
- assert(poolsize >= POOLSIZE);
- baseAddr = cast(byte *)os_mem_map(poolsize);
-
- // Some of the code depends on page alignment of memory pools
- assert((cast(size_t)baseAddr & (PAGESIZE - 1)) == 0);
-
- if (!baseAddr)
- {
- //debug(PRINTF) printf("GC fail: poolsize = x%zx, errno = %d\n", poolsize, errno);
- //debug(PRINTF) printf("message = '%s'\n", sys_errlist[errno]);
-
- npages = 0;
- poolsize = 0;
- }
- //assert(baseAddr);
- topAddr = baseAddr + poolsize;
- auto nbits = cast(size_t)poolsize >> shiftBy;
-
- mark.alloc(nbits);
-
- // pagetable already keeps track of what's free for the large object
- // pool.
- if (!isLargeObject)
- {
- freebits.alloc(nbits);
- }
-
- noscan.alloc(nbits);
- appendable.alloc(nbits);
-
- pagetable = cast(ubyte*)cstdlib.malloc(npages);
- if (!pagetable)
- onOutOfMemoryErrorNoGC();
-
- if (isLargeObject)
- {
- bPageOffsets = cast(uint*)cstdlib.malloc(npages * uint.sizeof);
- if (!bPageOffsets)
- onOutOfMemoryErrorNoGC();
- }
-
- memset(pagetable, B_FREE, npages);
-
- this.npages = npages;
- this.freepages = npages;
- this.searchStart = 0;
- this.largestFree = npages;
- }
-
-
- void Dtor() nothrow
- {
- if (baseAddr)
- {
- int result;
-
- if (npages)
- {
- result = os_mem_unmap(baseAddr, npages * PAGESIZE);
- assert(result == 0);
- npages = 0;
- }
-
- baseAddr = null;
- topAddr = null;
- }
- if (pagetable)
- {
- cstdlib.free(pagetable);
- pagetable = null;
- }
-
- if (bPageOffsets)
- cstdlib.free(bPageOffsets);
-
- mark.Dtor();
- if (isLargeObject)
- {
- nointerior.Dtor();
- }
- else
- {
- freebits.Dtor();
- }
- finals.Dtor();
- structFinals.Dtor();
- noscan.Dtor();
- appendable.Dtor();
- }
-
- /**
- *
- */
- uint getBits(size_t biti) nothrow
- {
- uint bits;
-
- if (finals.nbits && finals.test(biti))
- bits |= BlkAttr.FINALIZE;
- if (structFinals.nbits && structFinals.test(biti))
- bits |= BlkAttr.STRUCTFINAL;
- if (noscan.test(biti))
- bits |= BlkAttr.NO_SCAN;
- if (nointerior.nbits && nointerior.test(biti))
- bits |= BlkAttr.NO_INTERIOR;
- if (appendable.test(biti))
- bits |= BlkAttr.APPENDABLE;
- return bits;
- }
-
- /**
- *
- */
- void clrBits(size_t biti, uint mask) nothrow
- {
- immutable dataIndex = biti >> GCBits.BITS_SHIFT;
- immutable bitOffset = biti & GCBits.BITS_MASK;
- immutable keep = ~(GCBits.BITS_1 << bitOffset);
-
- if (mask & BlkAttr.FINALIZE && finals.nbits)
- finals.data[dataIndex] &= keep;
-
- if (structFinals.nbits && (mask & BlkAttr.STRUCTFINAL))
- structFinals.data[dataIndex] &= keep;
-
- if (mask & BlkAttr.NO_SCAN)
- noscan.data[dataIndex] &= keep;
- if (mask & BlkAttr.APPENDABLE)
- appendable.data[dataIndex] &= keep;
- if (nointerior.nbits && (mask & BlkAttr.NO_INTERIOR))
- nointerior.data[dataIndex] &= keep;
- }
-
- /**
- *
- */
- void setBits(size_t biti, uint mask) nothrow
- {
- // Calculate the mask and bit offset once and then use it to
- // set all of the bits we need to set.
- immutable dataIndex = biti >> GCBits.BITS_SHIFT;
- immutable bitOffset = biti & GCBits.BITS_MASK;
- immutable orWith = GCBits.BITS_1 << bitOffset;
-
- if (mask & BlkAttr.STRUCTFINAL)
- {
- if (!structFinals.nbits)
- structFinals.alloc(mark.nbits);
- structFinals.data[dataIndex] |= orWith;
- }
-
- if (mask & BlkAttr.FINALIZE)
- {
- if (!finals.nbits)
- finals.alloc(mark.nbits);
- finals.data[dataIndex] |= orWith;
- }
-
- if (mask & BlkAttr.NO_SCAN)
- {
- noscan.data[dataIndex] |= orWith;
- }
-// if (mask & BlkAttr.NO_MOVE)
-// {
-// if (!nomove.nbits)
-// nomove.alloc(mark.nbits);
-// nomove.data[dataIndex] |= orWith;
-// }
- if (mask & BlkAttr.APPENDABLE)
- {
- appendable.data[dataIndex] |= orWith;
- }
-
- if (isLargeObject && (mask & BlkAttr.NO_INTERIOR))
- {
- if (!nointerior.nbits)
- nointerior.alloc(mark.nbits);
- nointerior.data[dataIndex] |= orWith;
- }
- }
-
- void freePageBits(size_t pagenum, in ref PageBits toFree) nothrow
- {
- assert(!isLargeObject);
- assert(!nointerior.nbits); // only for large objects
-
- import core.internal.traits : staticIota;
- immutable beg = pagenum * (PAGESIZE / 16 / GCBits.BITS_PER_WORD);
- foreach (i; staticIota!(0, PageBits.length))
- {
- immutable w = toFree[i];
- if (!w) continue;
-
- immutable wi = beg + i;
- freebits.data[wi] |= w;
- noscan.data[wi] &= ~w;
- appendable.data[wi] &= ~w;
- }
-
- if (finals.nbits)
- {
- foreach (i; staticIota!(0, PageBits.length))
- if (toFree[i])
- finals.data[beg + i] &= ~toFree[i];
- }
-
- if (structFinals.nbits)
- {
- foreach (i; staticIota!(0, PageBits.length))
- if (toFree[i])
- structFinals.data[beg + i] &= ~toFree[i];
- }
- }
-
- /**
- * Given a pointer p in the p, return the pagenum.
- */
- size_t pagenumOf(void *p) const nothrow
- in
- {
- assert(p >= baseAddr);
- assert(p < topAddr);
- }
- body
- {
- return cast(size_t)(p - baseAddr) / PAGESIZE;
- }
-
- @property bool isFree() const pure nothrow
- {
- return npages == freepages;
- }
-
- size_t slGetSize(void* p) nothrow
- {
- if (isLargeObject)
- return (cast(LargeObjectPool*)&this).getSize(p);
- else
- return (cast(SmallObjectPool*)&this).getSize(p);
- }
-
- BlkInfo slGetInfo(void* p) nothrow
- {
- if (isLargeObject)
- return (cast(LargeObjectPool*)&this).getInfo(p);
- else
- return (cast(SmallObjectPool*)&this).getInfo(p);
- }
-
-
- void Invariant() const {}
-
- debug(INVARIANT)
- invariant()
- {
- //mark.Invariant();
- //scan.Invariant();
- //freebits.Invariant();
- //finals.Invariant();
- //structFinals.Invariant();
- //noscan.Invariant();
- //appendable.Invariant();
- //nointerior.Invariant();
-
- if (baseAddr)
- {
- //if (baseAddr + npages * PAGESIZE != topAddr)
- //printf("baseAddr = %p, npages = %d, topAddr = %p\n", baseAddr, npages, topAddr);
- assert(baseAddr + npages * PAGESIZE == topAddr);
- }
-
- if (pagetable !is null)
- {
- for (size_t i = 0; i < npages; i++)
- {
- Bins bin = cast(Bins)pagetable[i];
- assert(bin < B_MAX);
- }
- }
- }
-}
-
-struct LargeObjectPool
-{
- Pool base;
- alias base this;
-
- void updateOffsets(size_t fromWhere) nothrow
- {
- assert(pagetable[fromWhere] == B_PAGE);
- size_t pn = fromWhere + 1;
- for (uint offset = 1; pn < npages; pn++, offset++)
- {
- if (pagetable[pn] != B_PAGEPLUS) break;
- bPageOffsets[pn] = offset;
- }
-
- // Store the size of the block in bPageOffsets[fromWhere].
- bPageOffsets[fromWhere] = cast(uint) (pn - fromWhere);
- }
-
- /**
- * Allocate n pages from Pool.
- * Returns OPFAIL on failure.
- */
- size_t allocPages(size_t n) nothrow
- {
- if (largestFree < n || searchStart + n > npages)
- return OPFAIL;
-
- //debug(PRINTF) printf("Pool::allocPages(n = %d)\n", n);
- size_t largest = 0;
- if (pagetable[searchStart] == B_PAGEPLUS)
- {
- searchStart -= bPageOffsets[searchStart]; // jump to B_PAGE
- searchStart += bPageOffsets[searchStart];
- }
- while (searchStart < npages && pagetable[searchStart] == B_PAGE)
- searchStart += bPageOffsets[searchStart];
-
- for (size_t i = searchStart; i < npages; )
- {
- assert(pagetable[i] == B_FREE);
- size_t p = 1;
- while (p < n && i + p < npages && pagetable[i + p] == B_FREE)
- p++;
-
- if (p == n)
- return i;
-
- if (p > largest)
- largest = p;
-
- i += p;
- while (i < npages && pagetable[i] == B_PAGE)
- {
- // we have the size information, so we skip a whole bunch of pages.
- i += bPageOffsets[i];
- }
- }
-
- // not enough free pages found, remember largest free chunk
- largestFree = largest;
- return OPFAIL;
- }
-
- /**
- * Free npages pages starting with pagenum.
- */
- void freePages(size_t pagenum, size_t npages) nothrow
- {
- //memset(&pagetable[pagenum], B_FREE, npages);
- if (pagenum < searchStart)
- searchStart = pagenum;
-
- for (size_t i = pagenum; i < npages + pagenum; i++)
- {
- if (pagetable[i] < B_FREE)
- {
- freepages++;
- }
-
- pagetable[i] = B_FREE;
- }
- largestFree = freepages; // invalidate
- }
-
- /**
- * Get size of pointer p in pool.
- */
- size_t getSize(void *p) const nothrow
- in
- {
- assert(p >= baseAddr);
- assert(p < topAddr);
- }
- body
- {
- size_t pagenum = pagenumOf(p);
- Bins bin = cast(Bins)pagetable[pagenum];
- assert(bin == B_PAGE);
- return bPageOffsets[pagenum] * PAGESIZE;
- }
-
- /**
- *
- */
- BlkInfo getInfo(void* p) nothrow
- {
- BlkInfo info;
-
- size_t offset = cast(size_t)(p - baseAddr);
- size_t pn = offset / PAGESIZE;
- Bins bin = cast(Bins)pagetable[pn];
-
- if (bin == B_PAGEPLUS)
- pn -= bPageOffsets[pn];
- else if (bin != B_PAGE)
- return info; // no info for free pages
-
- info.base = baseAddr + pn * PAGESIZE;
- info.size = bPageOffsets[pn] * PAGESIZE;
-
- info.attr = getBits(pn);
- return info;
- }
-
- void runFinalizers(in void[] segment) nothrow
- {
- foreach (pn; 0 .. npages)
- {
- Bins bin = cast(Bins)pagetable[pn];
- if (bin > B_PAGE)
- continue;
- size_t biti = pn;
-
- if (!finals.test(biti))
- continue;
-
- auto p = sentinel_add(baseAddr + pn * PAGESIZE);
- size_t size = bPageOffsets[pn] * PAGESIZE - SENTINEL_EXTRA;
- uint attr = getBits(biti);
-
- if (!rt_hasFinalizerInSegment(p, size, attr, segment))
- continue;
-
- rt_finalizeFromGC(p, size, attr);
-
- clrBits(biti, ~BlkAttr.NONE);
-
- if (pn < searchStart)
- searchStart = pn;
-
- debug(COLLECT_PRINTF) printf("\tcollecting big %p\n", p);
- //log_free(sentinel_add(p));
-
- size_t n = 1;
- for (; pn + n < npages; ++n)
- if (pagetable[pn + n] != B_PAGEPLUS)
- break;
- debug (MEMSTOMP) memset(baseAddr + pn * PAGESIZE, 0xF3, n * PAGESIZE);
- freePages(pn, n);
- }
- }
-}
-
-
-struct SmallObjectPool
-{
- Pool base;
- alias base this;
-
- /**
- * Get size of pointer p in pool.
- */
- size_t getSize(void *p) const nothrow
- in
- {
- assert(p >= baseAddr);
- assert(p < topAddr);
- }
- body
- {
- size_t pagenum = pagenumOf(p);
- Bins bin = cast(Bins)pagetable[pagenum];
- assert(bin < B_PAGE);
- return binsize[bin];
- }
-
- BlkInfo getInfo(void* p) nothrow
- {
- BlkInfo info;
- size_t offset = cast(size_t)(p - baseAddr);
- size_t pn = offset / PAGESIZE;
- Bins bin = cast(Bins)pagetable[pn];
-
- if (bin >= B_PAGE)
- return info;
-
- info.base = cast(void*)((cast(size_t)p) & notbinsize[bin]);
- info.size = binsize[bin];
- offset = info.base - baseAddr;
- info.attr = getBits(cast(size_t)(offset >> shiftBy));
-
- return info;
- }
-
- void runFinalizers(in void[] segment) nothrow
- {
- foreach (pn; 0 .. npages)
- {
- Bins bin = cast(Bins)pagetable[pn];
- if (bin >= B_PAGE)
- continue;
-
- immutable size = binsize[bin];
- auto p = baseAddr + pn * PAGESIZE;
- const ptop = p + PAGESIZE;
- immutable base = pn * (PAGESIZE/16);
- immutable bitstride = size / 16;
-
- bool freeBits;
- PageBits toFree;
-
- for (size_t i; p < ptop; p += size, i += bitstride)
- {
- immutable biti = base + i;
-
- if (!finals.test(biti))
- continue;
-
- auto q = sentinel_add(p);
- uint attr = getBits(biti);
-
- if (!rt_hasFinalizerInSegment(q, size, attr, segment))
- continue;
-
- rt_finalizeFromGC(q, size, attr);
-
- freeBits = true;
- toFree.set(i);
-
- debug(COLLECT_PRINTF) printf("\tcollecting %p\n", p);
- //log_free(sentinel_add(p));
-
- debug (MEMSTOMP) memset(p, 0xF3, size);
- }
-
- if (freeBits)
- freePageBits(pn, toFree);
- }
- }
-
- /**
- * Allocate a page of bin's.
- * Returns:
- * head of a single linked list of new entries
- */
- List* allocPage(Bins bin) nothrow
- {
- size_t pn;
- for (pn = searchStart; pn < npages; pn++)
- if (pagetable[pn] == B_FREE)
- goto L1;
-
- return null;
-
- L1:
- searchStart = pn + 1;
- pagetable[pn] = cast(ubyte)bin;
- freepages--;
-
- // Convert page to free list
- size_t size = binsize[bin];
- void* p = baseAddr + pn * PAGESIZE;
- void* ptop = p + PAGESIZE - size;
- auto first = cast(List*) p;
-
- for (; p < ptop; p += size)
- {
- (cast(List *)p).next = cast(List *)(p + size);
- (cast(List *)p).pool = &base;
- }
- (cast(List *)p).next = null;
- (cast(List *)p).pool = &base;
- return first;
- }
-}
-
-unittest // bugzilla 14467
-{
- int[] arr = new int[10];
- assert(arr.capacity);
- arr = arr[$..$];
- assert(arr.capacity);
-}
-
-unittest // bugzilla 15353
-{
- import core.memory : GC;
-
- static struct Foo
- {
- ~this()
- {
- GC.free(buf); // ignored in finalizer
- }
-
- void* buf;
- }
- new Foo(GC.malloc(10));
- GC.collect();
-}
-
-unittest // bugzilla 15822
-{
- import core.memory : GC;
-
- ubyte[16] buf;
- static struct Foo
- {
- ~this()
- {
- GC.removeRange(ptr);
- GC.removeRoot(ptr);
- }
-
- ubyte* ptr;
- }
- GC.addRoot(buf.ptr);
- GC.addRange(buf.ptr, buf.length);
- new Foo(buf.ptr);
- GC.collect();
-}
-
-unittest // bugzilla 1180
-{
- import core.exception;
- try
- {
- size_t x = size_t.max - 100;
- byte[] big_buf = new byte[x];
- }
- catch (OutOfMemoryError)
- {
- }
-}
-
-/* ============================ SENTINEL =============================== */
-
-
-debug (SENTINEL)
-{
- const size_t SENTINEL_PRE = cast(size_t) 0xF4F4F4F4F4F4F4F4UL; // 32 or 64 bits
- const ubyte SENTINEL_POST = 0xF5; // 8 bits
- const uint SENTINEL_EXTRA = 2 * size_t.sizeof + 1;
-
-
- inout(size_t*) sentinel_size(inout void *p) nothrow { return &(cast(inout size_t *)p)[-2]; }
- inout(size_t*) sentinel_pre(inout void *p) nothrow { return &(cast(inout size_t *)p)[-1]; }
- inout(ubyte*) sentinel_post(inout void *p) nothrow { return &(cast(inout ubyte *)p)[*sentinel_size(p)]; }
-
-
- void sentinel_init(void *p, size_t size) nothrow
- {
- *sentinel_size(p) = size;
- *sentinel_pre(p) = SENTINEL_PRE;
- *sentinel_post(p) = SENTINEL_POST;
- }
-
-
- void sentinel_Invariant(const void *p) nothrow
- {
- debug
- {
- assert(*sentinel_pre(p) == SENTINEL_PRE);
- assert(*sentinel_post(p) == SENTINEL_POST);
- }
- else if (*sentinel_pre(p) != SENTINEL_PRE || *sentinel_post(p) != SENTINEL_POST)
- onInvalidMemoryOperationError(); // also trigger in release build
- }
-
-
- void *sentinel_add(void *p) nothrow
- {
- return p + 2 * size_t.sizeof;
- }
-
-
- void *sentinel_sub(void *p) nothrow
- {
- return p - 2 * size_t.sizeof;
- }
-}
-else
-{
- const uint SENTINEL_EXTRA = 0;
-
-
- void sentinel_init(void *p, size_t size) nothrow
- {
- }
-
-
- void sentinel_Invariant(const void *p) nothrow
- {
- }
-
-
- void *sentinel_add(void *p) nothrow
- {
- return p;
- }
-
-
- void *sentinel_sub(void *p) nothrow
- {
- return p;
- }
-}
-
-debug (MEMSTOMP)
-unittest
-{
- import core.memory;
- auto p = cast(uint*)GC.malloc(uint.sizeof*3);
- assert(*p == 0xF0F0F0F0);
- p[2] = 0; // First two will be used for free list
- GC.free(p);
- assert(p[2] == 0xF2F2F2F2);
-}
-
-debug (SENTINEL)
-unittest
-{
- import core.memory;
- auto p = cast(ubyte*)GC.malloc(1);
- assert(p[-1] == 0xF4);
- assert(p[ 1] == 0xF5);
-/*
- p[1] = 0;
- bool thrown;
- try
- GC.free(p);
- catch (Error e)
- thrown = true;
- p[1] = 0xF5;
- assert(thrown);
-*/
-}
-
-unittest
-{
- import core.memory;
-
- // https://issues.dlang.org/show_bug.cgi?id=9275
- GC.removeRoot(null);
- GC.removeRoot(cast(void*)13);
-}
-
-// improve predictability of coverage of code that is eventually not hit by other tests
-unittest
-{
- import core.memory;
- auto p = GC.malloc(260 << 20); // new pool has 390 MB
- auto q = GC.malloc(65 << 20); // next chunk (larger than 64MB to ensure the same pool is used)
- auto r = GC.malloc(65 << 20); // another chunk in same pool
- assert(p + (260 << 20) == q);
- assert(q + (65 << 20) == r);
- GC.free(q);
- // should trigger "assert(bin == B_FREE);" in mark due to dangling pointer q:
- GC.collect();
- // should trigger "break;" in extendNoSync:
- size_t sz = GC.extend(p, 64 << 20, 66 << 20); // trigger size after p large enough (but limited)
- assert(sz == 325 << 20);
- GC.free(p);
- GC.free(r);
- r = q; // ensure q is not trashed before collection above
-
- p = GC.malloc(70 << 20); // from the same pool
- q = GC.malloc(70 << 20);
- r = GC.malloc(70 << 20);
- auto s = GC.malloc(70 << 20);
- auto t = GC.malloc(70 << 20); // 350 MB of 390 MB used
- assert(p + (70 << 20) == q);
- assert(q + (70 << 20) == r);
- assert(r + (70 << 20) == s);
- assert(s + (70 << 20) == t);
- GC.free(r); // ensure recalculation of largestFree in nxxt allocPages
- auto z = GC.malloc(75 << 20); // needs new pool
-
- GC.free(p);
- GC.free(q);
- GC.free(s);
- GC.free(t);
- GC.free(z);
- GC.minimize(); // release huge pool
-}
-
diff --git a/libphobos/libdruntime/gc/proxy.d b/libphobos/libdruntime/gc/proxy.d
deleted file mode 100644
index 794da90..0000000
--- a/libphobos/libdruntime/gc/proxy.d
+++ /dev/null
@@ -1,239 +0,0 @@
-/**
- * Contains the external GC interface.
- *
- * Copyright: Copyright Digital Mars 2005 - 2016.
- * License: $(WEB www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
- * Authors: Walter Bright, Sean Kelly
- */
-
-/* Copyright Digital Mars 2005 - 2016.
- * Distributed under the Boost Software License, Version 1.0.
- * (See accompanying file LICENSE or copy at
- * http://www.boost.org/LICENSE_1_0.txt)
- */
-module gc.proxy;
-
-import gc.impl.conservative.gc;
-import gc.impl.manual.gc;
-import gc.config;
-import gc.gcinterface;
-
-static import core.memory;
-
-private
-{
- static import core.memory;
- alias BlkInfo = core.memory.GC.BlkInfo;
-
- extern (C) void thread_init();
- extern (C) void thread_term();
-
- __gshared GC instance;
- __gshared GC proxiedGC; // used to iterate roots of Windows DLLs
-
-}
-
-
-extern (C)
-{
-
- void gc_init()
- {
- config.initialize();
- ManualGC.initialize(instance);
- ConservativeGC.initialize(instance);
- if (instance is null)
- {
- import core.stdc.stdio : fprintf, stderr;
- import core.stdc.stdlib : exit;
-
- fprintf(stderr, "No GC was initialized, please recheck the name of the selected GC ('%.*s').\n", cast(int)config.gc.length, config.gc.ptr);
- exit(1);
- }
-
- // NOTE: The GC must initialize the thread library
- // before its first collection.
- thread_init();
- }
-
- void gc_term()
- {
- // NOTE: There may be daemons threads still running when this routine is
- // called. If so, cleaning memory out from under then is a good
- // way to make them crash horribly. This probably doesn't matter
- // much since the app is supposed to be shutting down anyway, but
- // I'm disabling cleanup for now until I can think about it some
- // more.
- //
- // NOTE: Due to popular demand, this has been re-enabled. It still has
- // the problems mentioned above though, so I guess we'll see.
-
- instance.collectNoStack(); // not really a 'collect all' -- still scans
- // static data area, roots, and ranges.
-
- thread_term();
-
- ManualGC.finalize(instance);
- ConservativeGC.finalize(instance);
- }
-
- void gc_enable()
- {
- instance.enable();
- }
-
- void gc_disable()
- {
- instance.disable();
- }
-
- void gc_collect() nothrow
- {
- instance.collect();
- }
-
- void gc_minimize() nothrow
- {
- instance.minimize();
- }
-
- uint gc_getAttr( void* p ) nothrow
- {
- return instance.getAttr(p);
- }
-
- uint gc_setAttr( void* p, uint a ) nothrow
- {
- return instance.setAttr(p, a);
- }
-
- uint gc_clrAttr( void* p, uint a ) nothrow
- {
- return instance.clrAttr(p, a);
- }
-
- void* gc_malloc( size_t sz, uint ba = 0, const TypeInfo ti = null ) nothrow
- {
- return instance.malloc(sz, ba, ti);
- }
-
- BlkInfo gc_qalloc( size_t sz, uint ba = 0, const TypeInfo ti = null ) nothrow
- {
- return instance.qalloc( sz, ba, ti );
- }
-
- void* gc_calloc( size_t sz, uint ba = 0, const TypeInfo ti = null ) nothrow
- {
- return instance.calloc( sz, ba, ti );
- }
-
- void* gc_realloc( void* p, size_t sz, uint ba = 0, const TypeInfo ti = null ) nothrow
- {
- return instance.realloc( p, sz, ba, ti );
- }
-
- size_t gc_extend( void* p, size_t mx, size_t sz, const TypeInfo ti = null ) nothrow
- {
- return instance.extend( p, mx, sz,ti );
- }
-
- size_t gc_reserve( size_t sz ) nothrow
- {
- return instance.reserve( sz );
- }
-
- void gc_free( void* p ) nothrow
- {
- return instance.free( p );
- }
-
- void* gc_addrOf( void* p ) nothrow
- {
- return instance.addrOf( p );
- }
-
- size_t gc_sizeOf( void* p ) nothrow
- {
- return instance.sizeOf( p );
- }
-
- BlkInfo gc_query( void* p ) nothrow
- {
- return instance.query( p );
- }
-
- core.memory.GC.Stats gc_stats() nothrow
- {
- return instance.stats();
- }
-
- void gc_addRoot( void* p ) nothrow
- {
- return instance.addRoot( p );
- }
-
- void gc_addRange( void* p, size_t sz, const TypeInfo ti = null ) nothrow
- {
- return instance.addRange( p, sz, ti );
- }
-
- void gc_removeRoot( void* p ) nothrow
- {
- return instance.removeRoot( p );
- }
-
- void gc_removeRange( void* p ) nothrow
- {
- return instance.removeRange( p );
- }
-
- void gc_runFinalizers( in void[] segment ) nothrow
- {
- return instance.runFinalizers( segment );
- }
-
- bool gc_inFinalizer() nothrow
- {
- return instance.inFinalizer();
- }
-
- GC gc_getProxy() nothrow
- {
- return instance;
- }
-
- export
- {
- void gc_setProxy( GC proxy )
- {
- foreach (root; instance.rootIter)
- {
- proxy.addRoot(root);
- }
-
- foreach (range; instance.rangeIter)
- {
- proxy.addRange(range.pbot, range.ptop - range.pbot, range.ti);
- }
-
- proxiedGC = instance; // remember initial GC to later remove roots
- instance = proxy;
- }
-
- void gc_clrProxy()
- {
- foreach (root; proxiedGC.rootIter)
- {
- instance.removeRoot(root);
- }
-
- foreach (range; proxiedGC.rangeIter)
- {
- instance.removeRange(range);
- }
-
- instance = proxiedGC;
- proxiedGC = null;
- }
- }
-}
diff --git a/libphobos/libdruntime/gcc/deh.d b/libphobos/libdruntime/gcc/deh.d
index bbc351c..85322db 100644
--- a/libphobos/libdruntime/gcc/deh.d
+++ b/libphobos/libdruntime/gcc/deh.d
@@ -32,8 +32,8 @@ import gcc.attributes;
extern(C)
{
- int _d_isbaseof(ClassInfo, ClassInfo);
- void _d_createTrace(Object, void*);
+ int _d_isbaseof(ClassInfo, ClassInfo) @nogc nothrow pure @safe;
+ void _d_createTrace(Throwable, void*);
void _d_print_throwable(Throwable t);
}
@@ -432,6 +432,9 @@ extern(C) void* __gdc_begin_catch(_Unwind_Exception* unwindHeader)
ExceptionHeader* header = ExceptionHeader.toExceptionHeader(unwindHeader);
void* objectp = cast(void*)header.object;
+ // Remove our reference to the exception. We should not decrease its refcount,
+ // because we pass the object on to the caller.
+ header.object = null;
// Something went wrong when stacking up chained headers...
if (header != ExceptionHeader.pop())
@@ -455,6 +458,11 @@ extern(C) void _d_throw(Throwable object)
// Add to thrown exception stack.
eh.push();
+ // Increment reference count if object is a refcounted Throwable.
+ auto refcount = object.refcount();
+ if (refcount)
+ object.refcount() = refcount + 1;
+
// Called by unwinder when exception object needs destruction by other than our code.
extern(C) void exception_cleanup(_Unwind_Reason_Code code, _Unwind_Exception* exc)
{
@@ -976,14 +984,10 @@ private _Unwind_Reason_Code __gdc_personality(_Unwind_Action actions,
if (currentLsd != nextLsd)
break;
- // Add our object onto the end of the existing chain.
- Throwable n = ehn.object;
- while (n.next)
- n = n.next;
- n.next = eh.object;
+ // Add our object onto the end of the existing chain and replace
+ // our exception object with in-flight one.
+ eh.object = Throwable.chainTogether(ehn.object, eh.object);
- // Replace our exception object with in-flight one
- eh.object = ehn.object;
if (nextHandler != handler && !bypassed)
{
handler = nextHandler;
diff --git a/libphobos/libdruntime/gcc/emutls.d b/libphobos/libdruntime/gcc/emutls.d
index 4622305..e0eab8c 100644
--- a/libphobos/libdruntime/gcc/emutls.d
+++ b/libphobos/libdruntime/gcc/emutls.d
@@ -25,7 +25,8 @@
module gcc.emutls;
import core.atomic, core.stdc.stdlib, core.stdc.string, core.sync.mutex;
-import rt.util.container.array, rt.util.container.hashtab;
+import core.internal.container.array;
+import core.internal.container.hashtab;
import core.internal.traits : classInstanceAlignment;
import gcc.builtins, gcc.gthread;
diff --git a/libphobos/libdruntime/gcc/sections/elf.d b/libphobos/libdruntime/gcc/sections/elf.d
index 3480fb9..9662cdd 100644
--- a/libphobos/libdruntime/gcc/sections/elf.d
+++ b/libphobos/libdruntime/gcc/sections/elf.d
@@ -90,8 +90,8 @@ import core.sys.posix.pthread;
import rt.deh;
import rt.dmain2;
import rt.minfo;
-import rt.util.container.array;
-import rt.util.container.hashtab;
+import core.internal.container.array;
+import core.internal.container.hashtab;
import gcc.builtins;
import gcc.config;
import gcc.sections.common;
@@ -124,7 +124,7 @@ struct DSO
return _moduleGroup.modules;
}
- @property ref inout(ModuleGroup) moduleGroup() inout nothrow @nogc
+ @property ref inout(ModuleGroup) moduleGroup() inout return nothrow @nogc
{
return _moduleGroup;
}
diff --git a/libphobos/libdruntime/gcc/sections/macho.d b/libphobos/libdruntime/gcc/sections/macho.d
index 3ce58a53..e6e7966 100644
--- a/libphobos/libdruntime/gcc/sections/macho.d
+++ b/libphobos/libdruntime/gcc/sections/macho.d
@@ -31,8 +31,8 @@ import core.sys.darwin.mach.dyld;
import core.sys.darwin.mach.getsect;
import core.sys.posix.pthread;
import rt.minfo;
-import rt.util.container.array;
-import rt.util.container.hashtab;
+import core.internal.container.array;
+import core.internal.container.hashtab;
import gcc.sections.common;
version (GNU_EMUTLS)
@@ -66,7 +66,7 @@ struct DSO
return _moduleGroup.modules;
}
- @property ref inout(ModuleGroup) moduleGroup() inout nothrow @nogc
+ @property ref inout(ModuleGroup) moduleGroup() inout return nothrow @nogc
{
return _moduleGroup;
}
diff --git a/libphobos/libdruntime/gcc/sections/pecoff.d b/libphobos/libdruntime/gcc/sections/pecoff.d
index ed0340e..7361349 100644
--- a/libphobos/libdruntime/gcc/sections/pecoff.d
+++ b/libphobos/libdruntime/gcc/sections/pecoff.d
@@ -30,8 +30,8 @@ import core.sys.windows.winbase;
import core.sys.windows.windef;
import core.sys.windows.winnt;
import rt.minfo;
-import rt.util.container.array;
-import rt.util.container.hashtab;
+import core.internal.container.array;
+import core.internal.container.hashtab;
import gcc.sections.common;
version (GNU_EMUTLS)
@@ -65,7 +65,7 @@ struct DSO
return _moduleGroup.modules;
}
- @property ref inout(ModuleGroup) moduleGroup() inout nothrow @nogc
+ @property ref inout(ModuleGroup) moduleGroup() inout return nothrow @nogc
{
return _moduleGroup;
}
diff --git a/libphobos/libdruntime/object.d b/libphobos/libdruntime/object.d
index e96d1c4..151755f 100644
--- a/libphobos/libdruntime/object.d
+++ b/libphobos/libdruntime/object.d
@@ -1,21 +1,62 @@
/**
+ * $(SCRIPT inhibitQuickIndex = 1;)
+ * $(DIVC quickindex,
+ * $(BOOKTABLE,
+ * $(TR $(TH Category) $(TH Symbols))
+ * $(TR $(TD Arrays) $(TD
+ * $(MYREF assumeSafeAppend)
+ * $(MYREF capacity)
+ * $(MYREF idup)
+ * $(MYREF reserve)
+ * ))
+ * $(TR $(TD Associative arrays) $(TD
+ * $(MYREF byKey)
+ * $(MYREF byKeyValue)
+ * $(MYREF byValue)
+ * $(MYREF clear)
+ * $(MYREF get)
+ * $(MYREF keys)
+ * $(MYREF rehash)
+ * $(MYREF require)
+ * $(MYREF update)
+ * $(MYREF values)
+ * ))
+ * $(TR $(TD General) $(TD
+ * $(MYREF destroy)
+ * $(MYREF dup)
+ * $(MYREF hashOf)
+ * $(MYREF opEquals)
+ * ))
+ * $(TR $(TD Types) $(TD
+ * $(MYREF Error)
+ * $(MYREF Exception)
+ * $(MYREF noreturn)
+ * $(MYREF Object)
+ * $(MYREF Throwable)
+ * ))
+ * $(TR $(TD Type info) $(TD
+ * $(MYREF Interface)
+ * $(MYREF ModuleInfo)
+ * $(MYREF OffsetTypeInfo)
+ * $(MYREF RTInfoImpl)
+ * $(MYREF rtinfoNoPointers)
+ * $(MYREF TypeInfo)
+ * $(MYREF TypeInfo_Class)
+ * ))
+ * ))
+ *
* Forms the symbols available to all D programs. Includes Object, which is
* the root of the class object hierarchy. This module is implicitly
* imported.
*
* Copyright: Copyright Digital Mars 2000 - 2011.
- * License: $(WEB www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
+ * License: $(HTTP www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
* Authors: Walter Bright, Sean Kelly
+ * Source: $(DRUNTIMESRC object.d)
*/
module object;
-private
-{
- extern (C) Object _d_newclass(const TypeInfo_Class ci);
- extern (C) void rt_finalize(void *data, bool det=true);
-}
-
alias size_t = typeof(int.sizeof);
alias ptrdiff_t = typeof(cast(void*)0 - cast(void*)0);
@@ -29,7 +70,12 @@ alias string = immutable(char)[];
alias wstring = immutable(wchar)[];
alias dstring = immutable(dchar)[];
-version (D_ObjectiveC) public import core.attribute : selector;
+version (D_ObjectiveC)
+{
+ deprecated("explicitly import `selector` instead using: `import core.attribute : selector;`")
+ public import core.attribute : selector;
+}
+version (Posix) public import core.attribute : gnuAbiTag;
// Some ABIs use a complex varargs implementation requiring TypeInfo.argTypes().
version (GNU)
@@ -65,6 +111,20 @@ class Object
return typeid(this).name;
}
+ @system unittest
+ {
+ enum unittest_sym_name = __traits(identifier, __traits(parent, (){}));
+ enum fqn_unittest = "object.Object." ~ unittest_sym_name; // object.__unittest_LX_CY
+
+ class C {}
+
+ Object obj = new Object;
+ C c = new C;
+
+ assert(obj.toString() == "object.Object");
+ assert(c.toString() == fqn_unittest ~ ".C");
+ }
+
/**
* Compute hash function for Object.
*/
@@ -100,6 +160,23 @@ class Object
//return this !is o;
}
+ @system unittest
+ {
+ Object obj = new Object;
+
+ bool gotCaught;
+ try
+ {
+ obj.opCmp(new Object);
+ }
+ catch (Exception e)
+ {
+ gotCaught = true;
+ assert(e.msg == "need opCmp for class object.Object");
+ }
+ assert(gotCaught);
+ }
+
/**
* Test whether $(D this) is equal to $(D o).
* The default implementation only compares by identity (using the $(D is) operator).
@@ -149,9 +226,18 @@ class Object
}
return null;
}
+
+ @system unittest
+ {
+ Object valid_obj = Object.factory("object.Object");
+ Object invalid_obj = Object.factory("object.__this_class_doesnt_exist__");
+
+ assert(valid_obj !is null);
+ assert(invalid_obj is null);
+ }
}
-auto opEquals(Object lhs, Object rhs)
+bool opEquals(Object lhs, Object rhs)
{
// If aliased to the same object or both null => equal
if (lhs is rhs) return true;
@@ -159,6 +245,8 @@ auto opEquals(Object lhs, Object rhs)
// If either is null => non-equal
if (lhs is null || rhs is null) return false;
+ if (!lhs.opEquals(rhs)) return false;
+
// If same exact type => one call to method opEquals
if (typeid(lhs) is typeid(rhs) ||
!__ctfe && typeid(lhs).opEquals(typeid(rhs)))
@@ -166,22 +254,116 @@ auto opEquals(Object lhs, Object rhs)
(issue 7147). But CTFE also guarantees that equal TypeInfos are
always identical. So, no opEquals needed during CTFE. */
{
- return lhs.opEquals(rhs);
+ return true;
}
// General case => symmetric calls to method opEquals
- return lhs.opEquals(rhs) && rhs.opEquals(lhs);
+ return rhs.opEquals(lhs);
}
/************************
* Returns true if lhs and rhs are equal.
*/
-auto opEquals(const Object lhs, const Object rhs)
+bool opEquals(const Object lhs, const Object rhs)
{
// A hack for the moment.
return opEquals(cast()lhs, cast()rhs);
}
+/// If aliased to the same object or both null => equal
+@system unittest
+{
+ class F { int flag; this(int flag) { this.flag = flag; } }
+
+ F f;
+ assert(f == f); // both null
+ f = new F(1);
+ assert(f == f); // both aliased to the same object
+}
+
+/// If either is null => non-equal
+@system unittest
+{
+ class F { int flag; this(int flag) { this.flag = flag; } }
+ F f;
+ assert(!(new F(0) == f));
+ assert(!(f == new F(0)));
+}
+
+/// If same exact type => one call to method opEquals
+@system unittest
+{
+ class F
+ {
+ int flag;
+
+ this(int flag)
+ {
+ this.flag = flag;
+ }
+
+ override bool opEquals(const Object o)
+ {
+ return flag == (cast(F) o).flag;
+ }
+ }
+
+ F f;
+ assert(new F(0) == new F(0));
+ assert(!(new F(0) == new F(1)));
+}
+
+/// General case => symmetric calls to method opEquals
+@system unittest
+{
+ int fEquals, gEquals;
+
+ class Base
+ {
+ int flag;
+ this(int flag)
+ {
+ this.flag = flag;
+ }
+ }
+
+ class F : Base
+ {
+ this(int flag) { super(flag); }
+
+ override bool opEquals(const Object o)
+ {
+ fEquals++;
+ return flag == (cast(Base) o).flag;
+ }
+ }
+
+ class G : Base
+ {
+ this(int flag) { super(flag); }
+
+ override bool opEquals(const Object o)
+ {
+ gEquals++;
+ return flag == (cast(Base) o).flag;
+ }
+ }
+
+ assert(new F(1) == new G(1));
+ assert(fEquals == 1);
+ assert(gEquals == 1);
+}
+
+// To cover const Object opEquals
+@system unittest
+{
+ const Object obj1 = new Object;
+ const Object obj2 = new Object;
+
+ assert(obj1 == obj1);
+ assert(obj1 != obj2);
+}
+
private extern(C) void _d_setSameMutex(shared Object ownee, shared Object owner) nothrow;
void setSameMutex(shared Object ownee, shared Object owner)
@@ -189,6 +371,24 @@ void setSameMutex(shared Object ownee, shared Object owner)
_d_setSameMutex(ownee, owner);
}
+@system unittest
+{
+ shared Object obj1 = new Object;
+ synchronized class C
+ {
+ void bar() {}
+ }
+ shared C obj2 = new shared(C);
+ obj2.bar();
+
+ assert(obj1.__monitor != obj2.__monitor);
+ assert(obj1.__monitor is null);
+
+ setSameMutex(obj1, obj2);
+ assert(obj1.__monitor == obj2.__monitor);
+ assert(obj1.__monitor !is null);
+}
+
/**
* Information about an interface.
* When an object is accessed via an interface, an Interface* appears as the
@@ -218,7 +418,7 @@ struct OffsetTypeInfo
*/
class TypeInfo
{
- override string toString() const pure @safe nothrow
+ override string toString() const @safe nothrow
{
return typeid(this).name;
}
@@ -228,18 +428,21 @@ class TypeInfo
return hashOf(this.toString());
}
- override int opCmp(Object o)
+ override int opCmp(Object rhs)
{
- import core.internal.traits : externDFunc;
- alias dstrcmp = externDFunc!("core.internal.string.dstrcmp",
- int function(scope const char[] s1, scope const char[] s2) @trusted pure nothrow @nogc);
-
- if (this is o)
+ if (this is rhs)
return 0;
- TypeInfo ti = cast(TypeInfo)o;
+ auto ti = cast(TypeInfo) rhs;
if (ti is null)
return 1;
- return dstrcmp(this.toString(), ti.toString());
+ return __cmp(this.toString(), ti.toString());
+ }
+
+ @system unittest
+ {
+ assert(typeid(void) <= typeid(void));
+ assert(typeid(void).opCmp(null));
+ assert(!typeid(void).opCmp(typeid(void)));
}
override bool opEquals(Object o)
@@ -254,6 +457,14 @@ class TypeInfo
return ti && this.toString() == ti.toString();
}
+ @system unittest
+ {
+ auto anotherObj = new Object();
+
+ assert(typeid(void).opEquals(typeid(void)));
+ assert(!typeid(void).opEquals(anotherObj));
+ }
+
/**
* Computes a hash of the instance of a type.
* Params:
@@ -280,8 +491,22 @@ class TypeInfo
/// Swaps two instances of the type.
void swap(void* p1, void* p2) const
{
- immutable size_t n = tsize;
- for (size_t i = 0; i < n; i++)
+ size_t remaining = tsize;
+ // If the type might contain pointers perform the swap in pointer-sized
+ // chunks in case a garbage collection pass interrupts this function.
+ if ((cast(size_t) p1 | cast(size_t) p2) % (void*).alignof == 0)
+ {
+ while (remaining >= (void*).sizeof)
+ {
+ void* tmp = *cast(void**) p1;
+ *cast(void**) p1 = *cast(void**) p2;
+ *cast(void**) p2 = tmp;
+ p1 += (void*).sizeof;
+ p2 += (void*).sizeof;
+ remaining -= (void*).sizeof;
+ }
+ }
+ for (size_t i = 0; i < remaining; i++)
{
byte t = (cast(byte *)p1)[i];
(cast(byte*)p1)[i] = (cast(byte*)p2)[i];
@@ -289,6 +514,36 @@ class TypeInfo
}
}
+ @system unittest
+ {
+ class _TypeInfo_Dummy : TypeInfo
+ {
+ override const(void)[] initializer() const { return []; }
+ @property override size_t tsize() nothrow pure const @safe @nogc { return tsize_val; }
+
+ size_t tsize_val;
+ }
+ auto dummy = new _TypeInfo_Dummy();
+ cast(void)dummy.initializer(); // For coverage completeness
+
+ int a = 2, b = -2;
+ dummy.swap(&a, &b);
+ // does nothing because tsize is 0
+ assert(a == 2);
+ assert(b == -2);
+
+ dummy.tsize_val = int.sizeof;
+ dummy.swap(&a, &b);
+ assert(a == -2);
+ assert(b == 2);
+
+ void* ptr_a = null, ptr_b = cast(void*)1;
+ dummy.tsize_val = (void*).sizeof;
+ dummy.swap(&ptr_a, &ptr_b);
+ assert(ptr_a is cast(void*)1);
+ assert(ptr_b is null);
+ }
+
/** Get TypeInfo for 'next' type, as defined by what kind of type this is,
null if none. */
@property inout(TypeInfo) next() nothrow pure inout @nogc { return null; }
@@ -302,7 +557,7 @@ class TypeInfo
abstract const(void)[] initializer() nothrow pure const @safe @nogc;
/** Get flags for type: 1 means GC should scan for pointers,
- 2 means arg of this type is passed in XMM register */
+ 2 means arg of this type is passed in SIMD register(s) if available */
@property uint flags() nothrow pure const @safe @nogc { return 0; }
/// Get type information on the contents of the type; null if not available
@@ -330,9 +585,87 @@ class TypeInfo
@property immutable(void)* rtInfo() nothrow pure const @safe @nogc { return rtinfoHasPointers; } // better safe than sorry
}
+@system unittest
+{
+ class _TypeInfo_Dummy : TypeInfo
+ {
+ override const(void)[] initializer() const { return []; }
+ }
+ auto dummy = new _TypeInfo_Dummy();
+ cast(void)dummy.initializer(); // For coverage completeness
+
+ assert(dummy.rtInfo() is rtinfoHasPointers);
+ assert(typeid(void).rtInfo() is rtinfoNoPointers);
+
+ assert(dummy.tsize() == 0);
+
+ bool gotCaught;
+ try
+ {
+ dummy.compare(null, null);
+ } catch (Error e)
+ {
+ gotCaught = true;
+ assert(e.msg == "TypeInfo.compare is not implemented");
+ }
+ assert(gotCaught);
+
+ assert(dummy.equals(null, null));
+ assert(!dummy.equals(cast(void*)1, null));
+}
+
+@system unittest
+{
+ assert(typeid(void).next() is null);
+ assert(typeid(void).offTi() is null);
+ assert(typeid(void).tsize() == 1);
+
+ version (WithArgTypes)
+ {
+ TypeInfo ti1;
+ TypeInfo ti2;
+ assert(typeid(void).argTypes(ti1, ti2) == 0);
+ assert(typeid(void) is ti1);
+
+ assert(ti1 !is null);
+ assert(ti2 is null);
+ }
+}
+
+@system unittest
+{
+ class _ZypeInfo_Dummy : TypeInfo
+ {
+ override const(void)[] initializer() const { return []; }
+ }
+ auto dummy2 = new _ZypeInfo_Dummy();
+ cast(void)dummy2.initializer(); // For coverage completeness
+
+ assert(typeid(void) > dummy2);
+ assert(dummy2 < typeid(void));
+}
+
+@safe unittest
+{
+ enum unittest_sym_name = __traits(identifier, __traits(parent, (){}));
+ enum fqn_unittest = "object." ~ unittest_sym_name; // object.__unittest_LX_CY
+
+ class _TypeInfo_Dummy : TypeInfo
+ {
+ override const(void)[] initializer() const { return []; }
+ }
+
+ auto dummy = new _TypeInfo_Dummy();
+ cast(void)dummy.initializer(); // For coverage completeness
+
+ assert(dummy.toString() == fqn_unittest ~ "._TypeInfo_Dummy");
+ assert(dummy.toHash() == hashOf(dummy.toString()));
+ assert(dummy.getHash(null) == 0);
+}
+
class TypeInfo_Enum : TypeInfo
{
- override string toString() const { return name; }
+ override string toString() const pure { return name; }
override bool opEquals(Object o)
{
@@ -343,15 +676,117 @@ class TypeInfo_Enum : TypeInfo
this.base == c.base;
}
+ @system unittest
+ {
+ enum E { A, B, C }
+ enum EE { A, B, C }
+
+ assert(typeid(E).opEquals(typeid(E)));
+ assert(!typeid(E).opEquals(typeid(EE)));
+ }
+
override size_t getHash(scope const void* p) const { return base.getHash(p); }
+
+ @system unittest
+ {
+ enum E { A, B, C }
+ E e1 = E.A;
+ E e2 = E.B;
+
+ assert(typeid(E).getHash(&e1) == hashOf(E.A));
+ assert(typeid(E).getHash(&e2) == hashOf(E.B));
+
+ enum ES : string { A = "foo", B = "bar" }
+ ES es1 = ES.A;
+ ES es2 = ES.B;
+
+ assert(typeid(ES).getHash(&es1) == hashOf("foo"));
+ assert(typeid(ES).getHash(&es2) == hashOf("bar"));
+ }
+
override bool equals(in void* p1, in void* p2) const { return base.equals(p1, p2); }
+
+ @system unittest
+ {
+ enum E { A, B, C }
+
+ E e1 = E.A;
+ E e2 = E.B;
+
+ assert(typeid(E).equals(&e1, &e1));
+ assert(!typeid(E).equals(&e1, &e2));
+ }
+
override int compare(in void* p1, in void* p2) const { return base.compare(p1, p2); }
+
+ @system unittest
+ {
+ enum E { A, B, C }
+
+ E e1 = E.A;
+ E e2 = E.B;
+
+ assert(typeid(E).compare(&e1, &e1) == 0);
+ assert(typeid(E).compare(&e1, &e2) < 0);
+ assert(typeid(E).compare(&e2, &e1) > 0);
+ }
+
override @property size_t tsize() nothrow pure const { return base.tsize; }
+
+ @safe unittest
+ {
+ enum E { A, B, C }
+ enum ES : string { A = "a", B = "b", C = "c"}
+
+ assert(typeid(E).tsize == E.sizeof);
+ assert(typeid(ES).tsize == ES.sizeof);
+ assert(typeid(E).tsize != ES.sizeof);
+ }
+
override void swap(void* p1, void* p2) const { return base.swap(p1, p2); }
+ @system unittest
+ {
+ enum E { A, B, C }
+
+ E e1 = E.A;
+ E e2 = E.B;
+
+ typeid(E).swap(&e1, &e2);
+ assert(e1 == E.B);
+ assert(e2 == E.A);
+ }
+
override @property inout(TypeInfo) next() nothrow pure inout { return base.next; }
+
+ @system unittest
+ {
+ enum E { A, B, C }
+
+ assert(typeid(E).next is null);
+ }
+
override @property uint flags() nothrow pure const { return base.flags; }
+ @safe unittest
+ {
+ enum E { A, B, C }
+
+ assert(typeid(E).flags == 0);
+ }
+
+ override const(OffsetTypeInfo)[] offTi() const { return base.offTi; }
+
+ @system unittest
+ {
+ enum E { A, B, C }
+
+ assert(typeid(E).offTi is null);
+ }
+
+ override void destroy(void* p) const { return base.destroy(p); }
+ override void postblit(void* p) const { return base.postblit(p); }
+
override const(void)[] initializer() const
{
return m_init.length ? m_init : base.initializer();
@@ -371,7 +806,19 @@ class TypeInfo_Enum : TypeInfo
void[] m_init;
}
-unittest // issue 12233
+@safe unittest
+{
+ enum unittest_sym_name = __traits(identifier, __traits(parent, (){}));
+ enum fqn_unittest = "object." ~ unittest_sym_name; // object.__unittest_LX_CY
+
+ enum E { A, B, C }
+ enum EE { A, B, C }
+
+ assert(typeid(E).toString() == fqn_unittest ~ ".E");
+}
+
+
+@safe unittest // issue 12233
{
static assert(is(typeof(TypeInfo.init) == TypeInfo));
assert(TypeInfo.init is null);
@@ -404,12 +851,8 @@ class TypeInfo_Pointer : TypeInfo
override int compare(in void* p1, in void* p2) const
{
- if (*cast(void**)p1 < *cast(void**)p2)
- return -1;
- else if (*cast(void**)p1 > *cast(void**)p2)
- return 1;
- else
- return 0;
+ const v1 = *cast(void**) p1, v2 = *cast(void**) p2;
+ return (v1 > v2) - (v1 < v2);
}
override @property size_t tsize() nothrow pure const
@@ -483,7 +926,7 @@ class TypeInfo_Array : TypeInfo
if (result)
return result;
}
- return cast(int)a1.length - cast(int)a2.length;
+ return (a1.length > a2.length) - (a1.length < a2.length);
}
override @property size_t tsize() nothrow pure const
@@ -531,12 +974,12 @@ class TypeInfo_StaticArray : TypeInfo
{
override string toString() const
{
- import core.internal.traits : externDFunc;
- alias sizeToTempString = externDFunc!("core.internal.string.unsignedToTempString",
- char[] function(ulong, return char[], uint) @safe pure nothrow @nogc);
+ import core.internal.string : unsignedToTempString;
char[20] tmpBuff = void;
- return value.toString() ~ "[" ~ sizeToTempString(len, tmpBuff, 10) ~ "]";
+ const lenString = unsignedToTempString(len, tmpBuff);
+
+ return (() @trusted => cast(string) (value.toString() ~ "[" ~ lenString ~ "]"))();
}
override bool opEquals(Object o)
@@ -585,28 +1028,22 @@ class TypeInfo_StaticArray : TypeInfo
override void swap(void* p1, void* p2) const
{
- import core.memory;
import core.stdc.string : memcpy;
- void* tmp;
- size_t sz = value.tsize;
- ubyte[16] buffer;
- void* pbuffer;
-
- if (sz < buffer.sizeof)
- tmp = buffer.ptr;
- else
- tmp = pbuffer = (new void[sz]).ptr;
-
- for (size_t u = 0; u < len; u += sz)
+ size_t remaining = value.tsize * len;
+ void[size_t.sizeof * 4] buffer = void;
+ while (remaining > buffer.length)
{
- size_t o = u * sz;
- memcpy(tmp, p1 + o, sz);
- memcpy(p1 + o, p2 + o, sz);
- memcpy(p2 + o, tmp, sz);
+ memcpy(buffer.ptr, p1, buffer.length);
+ memcpy(p1, p2, buffer.length);
+ memcpy(p2, buffer.ptr, buffer.length);
+ p1 += buffer.length;
+ p2 += buffer.length;
+ remaining -= buffer.length;
}
- if (pbuffer)
- GC.free(pbuffer);
+ memcpy(buffer.ptr, p1, remaining);
+ memcpy(p1, p2, remaining);
+ memcpy(p2, buffer.ptr, remaining);
}
override const(void)[] initializer() nothrow pure const
@@ -656,6 +1093,23 @@ class TypeInfo_StaticArray : TypeInfo
override @property immutable(void)* rtInfo() nothrow pure const @safe { return value.rtInfo(); }
}
+// https://issues.dlang.org/show_bug.cgi?id=21315
+@system unittest
+{
+ int[16] a, b;
+ foreach (int i; 0 .. 16)
+ {
+ a[i] = i;
+ b[i] = ~i;
+ }
+ typeid(int[16]).swap(&a, &b);
+ foreach (int i; 0 .. 16)
+ {
+ assert(a[i] == ~i);
+ assert(b[i] == i);
+ }
+}
+
class TypeInfo_AssociativeArray : TypeInfo
{
override string toString() const
@@ -731,7 +1185,7 @@ class TypeInfo_Vector : TypeInfo
override void swap(void* p1, void* p2) const { return base.swap(p1, p2); }
override @property inout(TypeInfo) next() nothrow pure inout { return base.next; }
- override @property uint flags() nothrow pure const { return base.flags; }
+ override @property uint flags() nothrow pure const { return 2; /* passed in SIMD register */ }
override const(void)[] initializer() nothrow pure const
{
@@ -750,14 +1204,14 @@ class TypeInfo_Vector : TypeInfo
class TypeInfo_Function : TypeInfo
{
- override string toString() const
+ override string toString() const pure @trusted
{
import core.demangle : demangleType;
alias SafeDemangleFunctionType = char[] function (const(char)[] buf, char[] dst = null) @safe nothrow pure;
- SafeDemangleFunctionType demangle = ( () @trusted => cast(SafeDemangleFunctionType)(&demangleType) ) ();
+ SafeDemangleFunctionType demangle = cast(SafeDemangleFunctionType) &demangleType;
- return (() @trusted => cast(string)(demangle(deco))) ();
+ return cast(string) demangle(deco);
}
override bool opEquals(Object o)
@@ -790,7 +1244,7 @@ class TypeInfo_Function : TypeInfo
string deco;
}
-unittest
+@safe unittest
{
abstract class C
{
@@ -805,11 +1259,57 @@ unittest
assert(typeid(functionTypes[2]).toString() == "int function(int, int)");
}
+@system unittest
+{
+ abstract class C
+ {
+ void func();
+ void func(int a);
+ }
+
+ alias functionTypes = typeof(__traits(getVirtualFunctions, C, "func"));
+
+ Object obj = typeid(functionTypes[0]);
+ assert(obj.opEquals(typeid(functionTypes[0])));
+ assert(typeid(functionTypes[0]) == typeid(functionTypes[0]));
+ assert(typeid(functionTypes[0]) != typeid(functionTypes[1]));
+
+ assert(typeid(functionTypes[0]).tsize() == 0);
+ assert(typeid(functionTypes[0]).initializer() is null);
+ assert(typeid(functionTypes[0]).rtInfo() is null);
+}
+
class TypeInfo_Delegate : TypeInfo
{
- override string toString() const
+ override string toString() const pure @trusted
+ {
+ import core.demangle : demangleType;
+
+ alias SafeDemangleFunctionType = char[] function (const(char)[] buf, char[] dst = null) @safe nothrow pure;
+ SafeDemangleFunctionType demangle = cast(SafeDemangleFunctionType) &demangleType;
+
+ return cast(string) demangle(deco);
+ }
+
+ @safe unittest
{
- return cast(string)(next.toString() ~ " delegate()");
+ double sqr(double x) { return x * x; }
+ sqr(double.init); // for coverage completeness
+
+ auto delegate_str = "double delegate(double) pure nothrow @nogc @safe";
+
+ assert(typeid(typeof(&sqr)).toString() == delegate_str);
+ assert(delegate_str.hashOf() == typeid(typeof(&sqr)).hashOf());
+ assert(typeid(typeof(&sqr)).toHash() == typeid(typeof(&sqr)).hashOf());
+
+ int g;
+
+ alias delegate_type = typeof((int a, int b) => a + b + g);
+ delegate_str = "int delegate(int, int) pure nothrow @nogc @safe";
+
+ assert(typeid(delegate_type).toString() == delegate_str);
+ assert(delegate_str.hashOf() == typeid(delegate_type).hashOf());
+ assert(typeid(delegate_type).toHash() == typeid(delegate_type).hashOf());
}
override bool opEquals(Object o)
@@ -820,6 +1320,19 @@ class TypeInfo_Delegate : TypeInfo
return c && this.deco == c.deco;
}
+ @system unittest
+ {
+ double sqr(double x) { return x * x; }
+ int dbl(int x) { return x + x; }
+ sqr(double.init); // for coverage completeness
+ dbl(int.init); // for coverage completeness
+
+ Object obj = typeid(typeof(&sqr));
+ assert(obj.opEquals(typeid(typeof(&sqr))));
+ assert(typeid(typeof(&sqr)) == typeid(typeof(&sqr)));
+ assert(typeid(typeof(&dbl)) != typeid(typeof(&sqr)));
+ }
+
override size_t getHash(scope const void* p) @trusted const
{
return hashOf(*cast(void delegate()*)p);
@@ -877,6 +1390,10 @@ class TypeInfo_Delegate : TypeInfo
override @property immutable(void)* rtInfo() nothrow pure const @safe { return RTInfo!(int delegate()); }
}
+private extern (C) Object _d_newclass(const TypeInfo_Class ci);
+private extern (C) int _d_isbaseof(scope TypeInfo_Class child,
+ scope const TypeInfo_Class parent) @nogc nothrow pure @safe; // rt.cast_
+
/**
* Runtime type information about a class.
* Can be retrieved from an object instance by using the
@@ -884,14 +1401,14 @@ class TypeInfo_Delegate : TypeInfo
*/
class TypeInfo_Class : TypeInfo
{
- override string toString() const { return info.name; }
+ override string toString() const pure { return name; }
override bool opEquals(Object o)
{
if (this is o)
return true;
auto c = cast(const TypeInfo_Class)o;
- return c && this.info.name == c.info.name;
+ return c && this.name == c.name;
}
override size_t getHash(scope const void* p) @trusted const
@@ -947,8 +1464,8 @@ class TypeInfo_Class : TypeInfo
return m_offTi;
}
- @property auto info() @safe nothrow pure const { return this; }
- @property auto typeinfo() @safe nothrow pure const { return this; }
+ final @property auto info() @safe @nogc nothrow pure const return { return this; }
+ final @property auto typeinfo() @safe @nogc nothrow pure const return { return this; }
byte[] m_init; /** class static initializer
* (init.length gives size in bytes of class)
@@ -983,7 +1500,7 @@ class TypeInfo_Class : TypeInfo
* Search all modules for TypeInfo_Class corresponding to classname.
* Returns: null if not found
*/
- static const(TypeInfo_Class) find(in char[] classname)
+ static const(TypeInfo_Class) find(const scope char[] classname)
{
foreach (m; ModuleInfo)
{
@@ -1019,11 +1536,41 @@ class TypeInfo_Class : TypeInfo
}
return o;
}
+
+ /**
+ * Returns true if the class described by `child` derives from or is
+ * the class described by this `TypeInfo_Class`. Always returns false
+ * if the argument is null.
+ *
+ * Params:
+ * child = TypeInfo for some class
+ * Returns:
+ * true if the class described by `child` derives from or is the
+ * class described by this `TypeInfo_Class`.
+ */
+ final bool isBaseOf(scope const TypeInfo_Class child) const @nogc nothrow pure @trusted
+ {
+ if (m_init.length)
+ {
+ // If this TypeInfo_Class represents an actual class we only need
+ // to check the child and its direct ancestors.
+ for (auto ti = cast() child; ti !is null; ti = ti.base)
+ if (ti is this)
+ return true;
+ return false;
+ }
+ else
+ {
+ // If this TypeInfo_Class is the .info field of a TypeInfo_Interface
+ // we also need to recursively check the child's interfaces.
+ return child !is null && _d_isbaseof(cast() child, this);
+ }
+ }
}
alias ClassInfo = TypeInfo_Class;
-unittest
+@safe unittest
{
// Bugzilla 14401
static class X
@@ -1039,7 +1586,7 @@ unittest
class TypeInfo_Interface : TypeInfo
{
- override string toString() const { return info.name; }
+ override string toString() const pure { return info.name; }
override bool opEquals(Object o)
{
@@ -1108,19 +1655,67 @@ class TypeInfo_Interface : TypeInfo
override @property uint flags() nothrow pure const { return 1; }
TypeInfo_Class info;
+
+ /**
+ * Returns true if the class described by `child` derives from the
+ * interface described by this `TypeInfo_Interface`. Always returns
+ * false if the argument is null.
+ *
+ * Params:
+ * child = TypeInfo for some class
+ * Returns:
+ * true if the class described by `child` derives from the
+ * interface described by this `TypeInfo_Interface`.
+ */
+ final bool isBaseOf(scope const TypeInfo_Class child) const @nogc nothrow pure @trusted
+ {
+ return child !is null && _d_isbaseof(cast() child, this.info);
+ }
+
+ /**
+ * Returns true if the interface described by `child` derives from
+ * or is the interface described by this `TypeInfo_Interface`.
+ * Always returns false if the argument is null.
+ *
+ * Params:
+ * child = TypeInfo for some interface
+ * Returns:
+ * true if the interface described by `child` derives from or is
+ * the interface described by this `TypeInfo_Interface`.
+ */
+ final bool isBaseOf(scope const TypeInfo_Interface child) const @nogc nothrow pure @trusted
+ {
+ return child !is null && _d_isbaseof(cast() child.info, this.info);
+ }
+}
+
+@safe unittest
+{
+ enum unittest_sym_name = __traits(identifier, __traits(parent, (){}));
+ enum fqn_unittest = "object." ~ unittest_sym_name; // object.__unittest_LX_CY
+
+ interface I {}
+
+ assert(fqn_unittest ~ ".I" == typeid(I).info.name);
+ assert((fqn_unittest ~ ".I").hashOf() == typeid(I).hashOf());
+ assert(typeid(I).toHash() == typeid(I).hashOf());
}
class TypeInfo_Struct : TypeInfo
{
override string toString() const { return name; }
+ override size_t toHash() const
+ {
+ return hashOf(this.mangledName);
+ }
+
override bool opEquals(Object o)
{
if (this is o)
return true;
auto s = cast(const TypeInfo_Struct)o;
- return s && this.name == s.name &&
- this.initializer().length == s.initializer().length;
+ return s && this.mangledName == s.mangledName;
}
override size_t getHash(scope const void* p) @trusted pure nothrow const
@@ -1219,23 +1814,45 @@ class TypeInfo_Struct : TypeInfo
(*xpostblit)(p);
}
- string name;
- void[] m_init; // initializer; m_init.ptr == null if 0 initialize
+ string mangledName;
+
+ final @property string name() nothrow const @trusted
+ {
+ import core.demangle : demangleType;
+
+ if (mangledName is null) // e.g., opaque structs
+ return null;
- @safe pure nothrow
- {
- size_t function(in void*) xtoHash;
- bool function(in void*, in void*) xopEquals;
- int function(in void*, in void*) xopCmp;
- string function(in void*) xtoString;
+ const key = cast(const void*) this; // faster lookup than TypeInfo_Struct, at the cost of potential duplicates per binary
+ static string[typeof(key)] demangledNamesCache; // per thread
+
+ // not nothrow:
+ //return demangledNamesCache.require(key, cast(string) demangleType(mangledName));
+
+ if (auto pDemangled = key in demangledNamesCache)
+ return *pDemangled;
+
+ const demangled = cast(string) demangleType(mangledName);
+ demangledNamesCache[key] = demangled;
+ return demangled;
+ }
+
+ void[] m_init; // initializer; m_init.ptr == null if 0 initialize
- enum StructFlags : uint
+ @safe pure nothrow
{
- hasPointers = 0x1,
- isDynamicType = 0x2, // built at runtime, needs type info in xdtor
+ size_t function(in void*) xtoHash;
+ bool function(in void*, in void*) xopEquals;
+ int function(in void*, in void*) xopCmp;
+ string function(in void*) xtoString;
+
+ enum StructFlags : uint
+ {
+ hasPointers = 0x1,
+ isDynamicType = 0x2, // built at runtime, needs type info in xdtor
+ }
+ StructFlags m_flags;
}
- StructFlags m_flags;
- }
union
{
void function(void*) xdtor;
@@ -1261,7 +1878,7 @@ class TypeInfo_Struct : TypeInfo
immutable(void)* m_RTInfo; // data for precise GC
}
-unittest
+@system unittest
{
struct S
{
@@ -1428,12 +2045,7 @@ class TypeInfo_Inout : TypeInfo_Const
}
}
-
-///////////////////////////////////////////////////////////////////////////////
-// ModuleInfo
-///////////////////////////////////////////////////////////////////////////////
-
-
+// Contents of Moduleinfo._flags
enum
{
MIctorstart = 0x1, // we've started constructing it
@@ -1452,31 +2064,35 @@ enum
MIname = 0x1000,
}
-
+/*****************************************
+ * An instance of ModuleInfo is generated into the object file for each compiled module.
+ *
+ * It provides access to various aspects of the module.
+ * It is not generated for betterC.
+ */
struct ModuleInfo
{
- uint _flags;
+ uint _flags; // MIxxxx
uint _index; // index into _moduleinfo_array[]
version (all)
{
deprecated("ModuleInfo cannot be copy-assigned because it is a variable-sized struct.")
- void opAssign(in ModuleInfo m) { _flags = m._flags; _index = m._index; }
+ void opAssign(const scope ModuleInfo m) { _flags = m._flags; _index = m._index; }
}
else
{
@disable this();
- @disable this(this) const;
}
const:
- private void* addrOf(int flag) nothrow pure @nogc
+ private void* addrOf(int flag) return nothrow pure @nogc
in
{
assert(flag >= MItlsctor && flag <= MIname);
assert(!(flag & (flag - 1)) && !(flag & ~(flag - 1) << 1));
}
- body
+ do
{
import core.stdc.string : strlen;
@@ -1539,42 +2155,74 @@ const:
@property uint flags() nothrow pure @nogc { return _flags; }
+ /************************
+ * Returns:
+ * module constructor for thread locals, `null` if there isn't one
+ */
@property void function() tlsctor() nothrow pure @nogc
{
return flags & MItlsctor ? *cast(typeof(return)*)addrOf(MItlsctor) : null;
}
+ /************************
+ * Returns:
+ * module destructor for thread locals, `null` if there isn't one
+ */
@property void function() tlsdtor() nothrow pure @nogc
{
return flags & MItlsdtor ? *cast(typeof(return)*)addrOf(MItlsdtor) : null;
}
+ /*****************************
+ * Returns:
+ * address of a module's `const(MemberInfo)[] getMembers(string)` function, `null` if there isn't one
+ */
@property void* xgetMembers() nothrow pure @nogc
{
return flags & MIxgetMembers ? *cast(typeof(return)*)addrOf(MIxgetMembers) : null;
}
+ /************************
+ * Returns:
+ * module constructor, `null` if there isn't one
+ */
@property void function() ctor() nothrow pure @nogc
{
return flags & MIctor ? *cast(typeof(return)*)addrOf(MIctor) : null;
}
+ /************************
+ * Returns:
+ * module destructor, `null` if there isn't one
+ */
@property void function() dtor() nothrow pure @nogc
{
return flags & MIdtor ? *cast(typeof(return)*)addrOf(MIdtor) : null;
}
+ /************************
+ * Returns:
+ * module order independent constructor, `null` if there isn't one
+ */
@property void function() ictor() nothrow pure @nogc
{
return flags & MIictor ? *cast(typeof(return)*)addrOf(MIictor) : null;
}
+ /*************
+ * Returns:
+ * address of function that runs the module's unittests, `null` if there isn't one
+ */
@property void function() unitTest() nothrow pure @nogc
{
return flags & MIunitTest ? *cast(typeof(return)*)addrOf(MIunitTest) : null;
}
- @property immutable(ModuleInfo*)[] importedModules() nothrow pure @nogc
+ /****************
+ * Returns:
+ * array of pointers to the ModuleInfo's of modules imported by this one
+ */
+ @property immutable(ModuleInfo*)[] importedModules() return nothrow pure @nogc
{
if (flags & MIimportedModules)
{
@@ -1584,7 +2232,11 @@ const:
return null;
}
- @property TypeInfo_Class[] localClasses() nothrow pure @nogc
+ /****************
+ * Returns:
+ * array of TypeInfo_Class references for classes defined in this module
+ */
+ @property TypeInfo_Class[] localClasses() return nothrow pure @nogc
{
if (flags & MIlocalClasses)
{
@@ -1594,16 +2246,16 @@ const:
return null;
}
- @property string name() nothrow pure @nogc
+ /********************
+ * Returns:
+ * name of module, `null` if no name
+ */
+ @property string name() return nothrow pure @nogc
{
- if (true || flags & MIname) // always available for now
- {
- import core.stdc.string : strlen;
+ import core.stdc.string : strlen;
- auto p = cast(immutable char*)addrOf(MIname);
- return p[0 .. strlen(p)];
- }
- // return null;
+ auto p = cast(immutable char*) addrOf(MIname);
+ return p[0 .. strlen(p)];
}
static int opApply(scope int delegate(ModuleInfo*) dg)
@@ -1617,7 +2269,7 @@ const:
}
}
-unittest
+@system unittest
{
ModuleInfo* m1;
foreach (m; ModuleInfo)
@@ -1677,23 +2329,115 @@ class Throwable : Object
* caught $(D Exception) will be chained to the new $(D Throwable) via this
* field.
*/
- Throwable next;
+ private Throwable nextInChain;
- @nogc @safe pure nothrow this(string msg, Throwable next = null)
+ private uint _refcount; // 0 : allocated by GC
+ // 1 : allocated by _d_newThrowable()
+ // 2.. : reference count + 1
+
+ /**
+ * Returns:
+ * A reference to the _next error in the list. This is used when a new
+ * $(D Throwable) is thrown from inside a $(D catch) block. The originally
+ * caught $(D Exception) will be chained to the new $(D Throwable) via this
+ * field.
+ */
+ @property inout(Throwable) next() @safe inout return scope pure nothrow @nogc { return nextInChain; }
+
+ /**
+ * Replace next in chain with `tail`.
+ * Use `chainTogether` instead if at all possible.
+ */
+ @property void next(Throwable tail) @safe scope pure nothrow @nogc
+ {
+ if (tail && tail._refcount)
+ ++tail._refcount; // increment the replacement *first*
+
+ auto n = nextInChain;
+ nextInChain = null; // sever the tail before deleting it
+
+ if (n && n._refcount)
+ _d_delThrowable(n); // now delete the old tail
+
+ nextInChain = tail; // and set the new tail
+ }
+
+ /**
+ * Returns:
+ * mutable reference to the reference count, which is
+ * 0 - allocated by the GC, 1 - allocated by _d_newThrowable(),
+ * and >=2 which is the reference count + 1
+ * Note:
+ * Marked as `@system` to discourage casual use of it.
+ */
+ @system @nogc final pure nothrow ref uint refcount() return { return _refcount; }
+
+ /**
+ * Loop over the chain of Throwables.
+ */
+ int opApply(scope int delegate(Throwable) dg)
+ {
+ int result = 0;
+ for (Throwable t = this; t; t = t.nextInChain)
+ {
+ result = dg(t);
+ if (result)
+ break;
+ }
+ return result;
+ }
+
+ /**
+ * Append `e2` to chain of exceptions that starts with `e1`.
+ * Params:
+ * e1 = start of chain (can be null)
+ * e2 = second part of chain (can be null)
+ * Returns:
+ * Throwable that is at the start of the chain; null if both `e1` and `e2` are null
+ */
+ static @__future @system @nogc pure nothrow Throwable chainTogether(return scope Throwable e1, return scope Throwable e2)
+ {
+ if (!e1)
+ return e2;
+ if (!e2)
+ return e1;
+ if (e2.refcount())
+ ++e2.refcount();
+
+ for (auto e = e1; 1; e = e.nextInChain)
+ {
+ if (!e.nextInChain)
+ {
+ e.nextInChain = e2;
+ break;
+ }
+ }
+ return e1;
+ }
+
+ @nogc @safe pure nothrow this(string msg, Throwable nextInChain = null)
{
this.msg = msg;
- this.next = next;
+ this.nextInChain = nextInChain;
+ if (nextInChain && nextInChain._refcount)
+ ++nextInChain._refcount;
//this.info = _d_traceContext();
}
- @nogc @safe pure nothrow this(string msg, string file, size_t line, Throwable next = null)
+ @nogc @safe pure nothrow this(string msg, string file, size_t line, Throwable nextInChain = null)
{
- this(msg, next);
+ this(msg, nextInChain);
this.file = file;
this.line = line;
//this.info = _d_traceContext();
}
+ @trusted nothrow ~this()
+ {
+ if (nextInChain && nextInChain._refcount)
+ _d_delThrowable(nextInChain);
+ }
+
/**
* Overrides $(D Object.toString) and returns the error message.
* Internally this forwards to the $(D toString) overload that
@@ -1714,15 +2458,13 @@ class Throwable : Object
*/
void toString(scope void delegate(in char[]) sink) const
{
- import core.internal.traits : externDFunc;
- alias sizeToTempString = externDFunc!("core.internal.string.unsignedToTempString",
- char[] function(ulong, return char[], uint) @safe pure nothrow @nogc);
+ import core.internal.string : unsignedToTempString;
char[20] tmpBuff = void;
sink(typeid(this).name);
sink("@"); sink(file);
- sink("("); sink(sizeToTempString(line, tmpBuff, 10)); sink(")");
+ sink("("); sink(unsignedToTempString(line, tmpBuff)); sink(")");
if (msg.length)
{
@@ -1744,6 +2486,19 @@ class Throwable : Object
}
}
}
+
+ /**
+ * Get the message describing the error.
+ * Base behavior is to return the `Throwable.msg` field.
+ * Override to return some other error message.
+ *
+ * Returns:
+ * Error message
+ */
+ @__future const(char)[] message() const
+ {
+ return this.msg;
+ }
}
@@ -1759,29 +2514,45 @@ class Exception : Throwable
{
/**
- * Creates a new instance of Exception. The next parameter is used
+ * Creates a new instance of Exception. The nextInChain parameter is used
* internally and should always be $(D null) when passed by user code.
* This constructor does not automatically throw the newly-created
* Exception; the $(D throw) statement should be used for that purpose.
*/
- @nogc @safe pure nothrow this(string msg, string file = __FILE__, size_t line = __LINE__, Throwable next = null)
+ @nogc @safe pure nothrow this(string msg, string file = __FILE__, size_t line = __LINE__, Throwable nextInChain = null)
{
- super(msg, file, line, next);
+ super(msg, file, line, nextInChain);
}
- @nogc @safe pure nothrow this(string msg, Throwable next, string file = __FILE__, size_t line = __LINE__)
+ @nogc @safe pure nothrow this(string msg, Throwable nextInChain, string file = __FILE__, size_t line = __LINE__)
{
- super(msg, file, line, next);
+ super(msg, file, line, nextInChain);
}
}
-unittest
+///
+@safe unittest
+{
+ bool gotCaught;
+ try
+ {
+ throw new Exception("msg");
+ }
+ catch (Exception e)
+ {
+ gotCaught = true;
+ assert(e.msg == "msg");
+ }
+ assert(gotCaught);
+}
+
+@system unittest
{
{
auto e = new Exception("msg");
assert(e.file == __FILE__);
assert(e.line == __LINE__ - 2);
- assert(e.next is null);
+ assert(e.nextInChain is null);
assert(e.msg == "msg");
}
@@ -1789,7 +2560,7 @@ unittest
auto e = new Exception("msg", new Exception("It's an Exception!"), "hello", 42);
assert(e.file == "hello");
assert(e.line == 42);
- assert(e.next !is null);
+ assert(e.nextInChain !is null);
assert(e.msg == "msg");
}
@@ -1797,9 +2568,14 @@ unittest
auto e = new Exception("msg", "hello", 42, new Exception("It's an Exception!"));
assert(e.file == "hello");
assert(e.line == 42);
- assert(e.next !is null);
+ assert(e.nextInChain !is null);
assert(e.msg == "msg");
}
+
+ {
+ auto e = new Exception("message");
+ assert(e.message == "message");
+ }
}
@@ -1815,20 +2591,20 @@ unittest
class Error : Throwable
{
/**
- * Creates a new instance of Error. The next parameter is used
+ * Creates a new instance of Error. The nextInChain parameter is used
* internally and should always be $(D null) when passed by user code.
* This constructor does not automatically throw the newly-created
* Error; the $(D throw) statement should be used for that purpose.
*/
- @nogc @safe pure nothrow this(string msg, Throwable next = null)
+ @nogc @safe pure nothrow this(string msg, Throwable nextInChain = null)
{
- super(msg, next);
+ super(msg, nextInChain);
bypassedException = null;
}
- @nogc @safe pure nothrow this(string msg, string file, size_t line, Throwable next = null)
+ @nogc @safe pure nothrow this(string msg, string file, size_t line, Throwable nextInChain = null)
{
- super(msg, file, line, next);
+ super(msg, file, line, nextInChain);
bypassedException = null;
}
@@ -1837,13 +2613,29 @@ class Error : Throwable
Throwable bypassedException;
}
-unittest
+///
+@system unittest
+{
+ bool gotCaught;
+ try
+ {
+ throw new Error("msg");
+ }
+ catch (Error e)
+ {
+ gotCaught = true;
+ assert(e.msg == "msg");
+ }
+ assert(gotCaught);
+}
+
+@safe unittest
{
{
auto e = new Error("msg");
assert(e.file is null);
assert(e.line == 0);
- assert(e.next is null);
+ assert(e.nextInChain is null);
assert(e.msg == "msg");
assert(e.bypassedException is null);
}
@@ -1852,7 +2644,7 @@ unittest
auto e = new Error("msg", new Exception("It's an Exception!"));
assert(e.file is null);
assert(e.line == 0);
- assert(e.next !is null);
+ assert(e.nextInChain !is null);
assert(e.msg == "msg");
assert(e.bypassedException is null);
}
@@ -1861,32 +2653,24 @@ unittest
auto e = new Error("msg", "hello", 42, new Exception("It's an Exception!"));
assert(e.file == "hello");
assert(e.line == 42);
- assert(e.next !is null);
+ assert(e.nextInChain !is null);
assert(e.msg == "msg");
assert(e.bypassedException is null);
}
}
-/* Used in Exception Handling LSDA tables to 'wrap' C++ type info
- * so it can be distinguished from D TypeInfo
- */
-class __cpp_type_info_ptr
-{
- void* ptr; // opaque pointer to C++ RTTI type info
-}
-
extern (C)
{
// from druntime/src/rt/aaA.d
private struct AA { void* impl; }
// size_t _aaLen(in AA aa) pure nothrow @nogc;
- private void* _aaGetY(AA* paa, const TypeInfo_AssociativeArray ti, in size_t valsz, in void* pkey) pure nothrow;
- private void* _aaGetX(AA* paa, const TypeInfo_AssociativeArray ti, in size_t valsz, in void* pkey, out bool found) pure nothrow;
+ private void* _aaGetY(AA* paa, const TypeInfo_AssociativeArray ti, const size_t valsz, const scope void* pkey) pure nothrow;
+ private void* _aaGetX(AA* paa, const TypeInfo_AssociativeArray ti, const size_t valsz, const scope void* pkey, out bool found) pure nothrow;
// inout(void)* _aaGetRvalueX(inout AA aa, in TypeInfo keyti, in size_t valsz, in void* pkey);
- inout(void[]) _aaValues(inout AA aa, in size_t keysz, in size_t valsz, const TypeInfo tiValueArray) pure nothrow;
- inout(void[]) _aaKeys(inout AA aa, in size_t keysz, const TypeInfo tiKeyArray) pure nothrow;
- void* _aaRehash(AA* paa, in TypeInfo keyti) pure nothrow;
+ inout(void[]) _aaValues(inout AA aa, const size_t keysz, const size_t valsz, const TypeInfo tiValueArray) pure nothrow;
+ inout(void[]) _aaKeys(inout AA aa, const size_t keysz, const TypeInfo tiKeyArray) pure nothrow;
+ void* _aaRehash(AA* paa, const scope TypeInfo keyti) pure nothrow;
void _aaClear(AA aa) pure nothrow;
// alias _dg_t = extern(D) int delegate(void*);
@@ -1902,8 +2686,8 @@ extern (C)
void* _aaRangeFrontValue(AARange r) pure nothrow @nogc @safe;
void _aaRangePopFront(ref AARange r) pure nothrow @nogc @safe;
- int _aaEqual(in TypeInfo tiRaw, in AA aa1, in AA aa2);
- hash_t _aaGetHash(in AA* aa, in TypeInfo tiRaw) nothrow;
+ int _aaEqual(scope const TypeInfo tiRaw, scope const AA aa1, scope const AA aa2);
+ hash_t _aaGetHash(scope const AA* aa, scope const TypeInfo tiRaw) nothrow;
/*
_d_assocarrayliteralTX marked as pure, because aaLiteral can be called from pure code.
@@ -1926,17 +2710,44 @@ alias AssociativeArray(Key, Value) = Value[Key];
* Params:
* aa = The associative array.
*/
-void clear(T : Value[Key], Value, Key)(T aa)
+void clear(Value, Key)(Value[Key] aa)
{
_aaClear(*cast(AA *) &aa);
}
/* ditto */
-void clear(T : Value[Key], Value, Key)(T* aa)
+void clear(Value, Key)(Value[Key]* aa)
{
_aaClear(*cast(AA *) aa);
}
+///
+@system unittest
+{
+ auto aa = ["k1": 2];
+ aa.clear;
+ assert("k1" !in aa);
+}
+
+// Issue 20559
+@system unittest
+{
+ static class Foo
+ {
+ int[string] aa;
+ alias aa this;
+ }
+
+ auto v = new Foo();
+ v["Hello World"] = 42;
+ v.clear;
+ assert("Hello World" !in v);
+
+ // Test for T*
+ static assert(!__traits(compiles, (&v).clear));
+ static assert( __traits(compiles, (*(&v)).clear));
+}
+
/***********************************
* Reorganizes the associative array in place so that lookups are more
* efficient.
@@ -2000,15 +2811,16 @@ V[K] dup(T : V[K], K, V)(T aa)
return *cast(V*)pv;
}
- if (auto postblit = _getPostblit!V())
- {
- foreach (k, ref v; aa)
- postblit(duplicateElem(k, v));
- }
- else
+ foreach (k, ref v; aa)
{
- foreach (k, ref v; aa)
+ static if (!__traits(hasPostblit, V))
duplicateElem(k, v);
+ else static if (__traits(isStaticArray, V))
+ _doPostblit(duplicateElem(k, v)[]);
+ else static if (!is(typeof(v.__xpostblit())) && is(immutable V == immutable UV, UV))
+ (() @trusted => *cast(UV*) &duplicateElem(k, v))().__xpostblit();
+ else
+ duplicateElem(k, v).__xpostblit();
}
return result;
@@ -2020,6 +2832,15 @@ V[K] dup(T : V[K], K, V)(T* aa)
return (*aa).dup;
}
+///
+@safe unittest
+{
+ auto aa = ["k1": 2];
+ auto a2 = aa.dup;
+ aa["k2"] = 3;
+ assert("k2" !in a2);
+}
+
// this should never be made public.
private AARange _aaToRange(T: V[K], K, V)(ref T aa) pure nothrow @nogc @safe
{
@@ -2048,10 +2869,9 @@ auto byKey(T : V[K], K, V)(T aa) pure nothrow @nogc @safe
pure nothrow @nogc:
@property bool empty() @safe { return _aaRangeEmpty(r); }
- @property ref front()
+ @property ref front() @trusted
{
- auto p = (() @trusted => cast(substInout!K*) _aaRangeFrontKey(r)) ();
- return *p;
+ return *cast(substInout!K*) _aaRangeFrontKey(r);
}
void popFront() @safe { _aaRangePopFront(r); }
@property Result save() { return this; }
@@ -2066,6 +2886,17 @@ auto byKey(T : V[K], K, V)(T* aa) pure nothrow @nogc
return (*aa).byKey();
}
+///
+@safe unittest
+{
+ auto dict = [1: 0, 2: 0];
+ int sum;
+ foreach (v; dict.byKey)
+ sum += v;
+
+ assert(sum == 3);
+}
+
/***********************************
* Returns a forward range over the values of the associative array.
* Params:
@@ -2083,10 +2914,9 @@ auto byValue(T : V[K], K, V)(T aa) pure nothrow @nogc @safe
pure nothrow @nogc:
@property bool empty() @safe { return _aaRangeEmpty(r); }
- @property ref front()
+ @property ref front() @trusted
{
- auto p = (() @trusted => cast(substInout!V*) _aaRangeFrontValue(r)) ();
- return *p;
+ return *cast(substInout!V*) _aaRangeFrontValue(r);
}
void popFront() @safe { _aaRangePopFront(r); }
@property Result save() { return this; }
@@ -2101,6 +2931,17 @@ auto byValue(T : V[K], K, V)(T* aa) pure nothrow @nogc
return (*aa).byValue();
}
+///
+@safe unittest
+{
+ auto dict = ["k1": 1, "k2": 2];
+ int sum;
+ foreach (v; dict.byValue)
+ sum += v;
+
+ assert(sum == 3);
+}
+
/***********************************
* Returns a forward range over the key value pairs of the associative array.
* Params:
@@ -2127,15 +2968,13 @@ auto byKeyValue(T : V[K], K, V)(T aa) pure nothrow @nogc @safe
private void* keyp;
private void* valp;
- @property ref key() inout
+ @property ref key() inout @trusted
{
- auto p = (() @trusted => cast(substInout!K*) keyp) ();
- return *p;
+ return *cast(substInout!K*) keyp;
}
- @property ref value() inout
+ @property ref value() inout @trusted
{
- auto p = (() @trusted => cast(substInout!V*) valp) ();
- return *p;
+ return *cast(substInout!V*) valp;
}
}
return Pair(_aaRangeFrontKey(r),
@@ -2154,6 +2993,17 @@ auto byKeyValue(T : V[K], K, V)(T* aa) pure nothrow @nogc
return (*aa).byKeyValue();
}
+///
+@safe unittest
+{
+ auto dict = ["k1": 1, "k2": 2];
+ int sum;
+ foreach (e; dict.byKeyValue)
+ sum += e.value;
+
+ assert(sum == 3);
+}
+
/***********************************
* Returns a dynamic array, the elements of which are the keys in the
* associative array.
@@ -2169,9 +3019,12 @@ Key[] keys(T : Value[Key], Value, Key)(T aa) @property
alias realAA = aa;
else
const(Value[Key]) realAA = aa;
- auto a = cast(void[])_aaKeys(*cast(inout(AA)*)&realAA, Key.sizeof, typeid(Key[]));
- auto res = *cast(Key[]*)&a;
- _doPostblit(res);
+ auto res = () @trusted {
+ auto a = cast(void[])_aaKeys(*cast(inout(AA)*)&realAA, Key.sizeof, typeid(Key[]));
+ return *cast(Key[]*)&a;
+ }();
+ static if (__traits(hasPostblit, Key))
+ _doPostblit(res);
return res;
}
@@ -2181,7 +3034,18 @@ Key[] keys(T : Value[Key], Value, Key)(T *aa) @property
return (*aa).keys;
}
-@system unittest
+///
+@safe unittest
+{
+ auto aa = [1: "v1", 2: "v2"];
+ int sum;
+ foreach (k; aa.keys)
+ sum += k;
+
+ assert(sum == 3);
+}
+
+@safe unittest
{
static struct S
{
@@ -2194,6 +3058,36 @@ Key[] keys(T : Value[Key], Value, Key)(T *aa) @property
assert(s.keys.length == 0);
}
+@safe unittest
+{
+ @safe static struct Key
+ {
+ string str;
+ this(this) @safe {}
+ }
+ string[Key] aa;
+ static assert(__traits(compiles, {
+ void test() @safe {
+ const _ = aa.keys;
+ }
+ }));
+}
+
+@safe unittest
+{
+ static struct Key
+ {
+ string str;
+ this(this) @system {}
+ }
+ string[Key] aa;
+ static assert(!__traits(compiles, {
+ void test() @safe {
+ const _ = aa.keys;
+ }
+ }));
+}
+
/***********************************
* Returns a dynamic array, the elements of which are the values in the
* associative array.
@@ -2209,9 +3103,12 @@ Value[] values(T : Value[Key], Value, Key)(T aa) @property
alias realAA = aa;
else
const(Value[Key]) realAA = aa;
- auto a = cast(void[])_aaValues(*cast(inout(AA)*)&realAA, Key.sizeof, Value.sizeof, typeid(Value[]));
- auto res = *cast(Value[]*)&a;
- _doPostblit(res);
+ auto res = () @trusted {
+ auto a = cast(void[])_aaValues(*cast(inout(AA)*)&realAA, Key.sizeof, Value.sizeof, typeid(Value[]));
+ return *cast(Value[]*)&a;
+ }();
+ static if (__traits(hasPostblit, Value))
+ _doPostblit(res);
return res;
}
@@ -2221,7 +3118,18 @@ Value[] values(T : Value[Key], Value, Key)(T *aa) @property
return (*aa).values;
}
-@system unittest
+///
+@safe unittest
+{
+ auto aa = ["k1": 1, "k2": 2];
+ int sum;
+ foreach (e; aa.values)
+ sum += e;
+
+ assert(sum == 3);
+}
+
+@safe unittest
{
static struct S
{
@@ -2234,6 +3142,36 @@ Value[] values(T : Value[Key], Value, Key)(T *aa) @property
assert(s.values.length == 0);
}
+@safe unittest
+{
+ @safe static struct Value
+ {
+ string str;
+ this(this) @safe {}
+ }
+ Value[string] aa;
+ static assert(__traits(compiles, {
+ void test() @safe {
+ const _ = aa.values;
+ }
+ }));
+}
+
+@safe unittest
+{
+ static struct Value
+ {
+ string str;
+ this(this) @system {}
+ }
+ Value[string] aa;
+ static assert(!__traits(compiles, {
+ void test() @safe {
+ const _ = aa.values;
+ }
+ }));
+}
+
/***********************************
* Looks up key; if it exists returns corresponding value else evaluates and
* returns defaultValue.
@@ -2256,6 +3194,13 @@ inout(V) get(K, V)(inout(V[K])* aa, K key, lazy inout(V) defaultValue)
return (*aa).get(key, defaultValue);
}
+@safe unittest
+{
+ auto aa = ["k1": 1];
+ assert(aa.get("k1", 0) == 1);
+ assert(aa.get("k2", 0) == 0);
+}
+
/***********************************
* Looks up key; if it exists returns corresponding value else evaluates
* value, adds it to the associative array and returns it.
@@ -2281,48 +3226,38 @@ ref V require(K, V)(ref V[K] aa, K key, lazy V value = V.init)
{
auto p = cast(V*) _aaGetX(cast(AA*) &aa, typeid(V[K]), V.sizeof, &key, found);
}
- return found ? *p : (*p = value);
-}
-
-// Constraints for aa update. Delegates, Functions or Functors (classes that
-// provide opCall) are allowed. See unittest for an example.
-private
-{
- template isCreateOperation(C, V)
+ if (found)
+ return *p;
+ else
{
- static if (is(C : V delegate()) || is(C : V function()))
- enum bool isCreateOperation = true;
- else static if (isCreateOperation!(typeof(&C.opCall), V))
- enum bool isCreateOperation = true;
- else
- enum bool isCreateOperation = false;
+ *p = value; // Not `return (*p = value)` since if `=` is overloaded
+ return *p; // this might not return a ref to the left-hand side.
}
+}
- template isUpdateOperation(U, V)
- {
- static if (is(U : V delegate(ref V)) || is(U : V function(ref V)))
- enum bool isUpdateOperation = true;
- else static if (isUpdateOperation!(typeof(&U.opCall), V))
- enum bool isUpdateOperation = true;
- else
- enum bool isUpdateOperation = false;
- }
+///
+@safe unittest
+{
+ auto aa = ["k1": 1];
+ assert(aa.require("k1", 0) == 1);
+ assert(aa.require("k2", 0) == 0);
+ assert(aa["k2"] == 0);
}
// Tests whether T can be @safe-ly copied. Use a union to exclude destructor from the test.
private enum bool isSafeCopyable(T) = is(typeof(() @safe { union U { T x; } T *x; auto u = U(*x); }));
/***********************************
- * Looks up key; if it exists applies the update delegate else evaluates the
- * create delegate and adds it to the associative array
+ * Looks up key; if it exists applies the update callable else evaluates the
+ * create callable and adds it to the associative array
* Params:
* aa = The associative array.
* key = The key.
- * create = The delegate to apply on create.
- * update = The delegate to apply on update.
+ * create = The callable to apply on create.
+ * update = The callable to apply on update.
*/
void update(K, V, C, U)(ref V[K] aa, K key, scope C create, scope U update)
-if (isCreateOperation!(C, V) && isUpdateOperation!(U, V))
+if (is(typeof(create()) : V) && (is(typeof(update(aa[K.init])) : V) || is(typeof(update(aa[K.init])) == void)))
{
bool found;
// if key is @safe-ly copyable, `update` may infer @safe
@@ -2340,10 +3275,35 @@ if (isCreateOperation!(C, V) && isUpdateOperation!(U, V))
if (!found)
*p = create();
else
- *p = update(*p);
+ {
+ static if (is(typeof(update(*p)) == void))
+ update(*p);
+ else
+ *p = update(*p);
+ }
+}
+
+///
+@system unittest
+{
+ auto aa = ["k1": 1];
+
+ aa.update("k1", {
+ return -1; // create (won't be executed)
+ }, (ref int v) {
+ v += 1; // update
+ });
+ assert(aa["k1"] == 2);
+
+ aa.update("k2", {
+ return 0; // create
+ }, (ref int v) {
+ v = -1; // update (won't be executed)
+ });
+ assert(aa["k2"] == 0);
}
-unittest
+@safe unittest
{
static struct S
{
@@ -2373,557 +3333,348 @@ unittest
static assert(!is(typeof(() @safe { aais.update(S(1234), { return 1234; }, (ref int x) { x++; return x; }); })));
}
-private void _destructRecurse(S)(ref S s)
- if (is(S == struct))
-{
- static if (__traits(hasMember, S, "__xdtor") &&
- // Bugzilla 14746: Check that it's the exact member of S.
- __traits(isSame, S, __traits(parent, s.__xdtor)))
- s.__xdtor();
-}
-
-private void _destructRecurse(E, size_t n)(ref E[n] arr)
-{
- import core.internal.traits : hasElaborateDestructor;
-
- static if (hasElaborateDestructor!E)
- {
- foreach_reverse (ref elem; arr)
- _destructRecurse(elem);
- }
-}
-
-// Public and explicitly undocumented
-void _postblitRecurse(S)(ref S s)
- if (is(S == struct))
-{
- static if (__traits(hasMember, S, "__xpostblit") &&
- // Bugzilla 14746: Check that it's the exact member of S.
- __traits(isSame, S, __traits(parent, s.__xpostblit)))
- s.__xpostblit();
-}
-
-// Ditto
-void _postblitRecurse(E, size_t n)(ref E[n] arr)
+@safe unittest
{
- import core.internal.traits : hasElaborateCopyConstructor;
-
- static if (hasElaborateCopyConstructor!E)
+ struct S0
{
- size_t i;
- scope(failure)
+ int opCall(ref int v)
{
- for (; i != 0; --i)
- {
- _destructRecurse(arr[i - 1]); // What to do if this throws?
- }
+ return v + 1;
}
-
- for (i = 0; i < arr.length; ++i)
- _postblitRecurse(arr[i]);
}
-}
-
-// Test destruction/postblit order
-@safe nothrow pure unittest
-{
- string[] order;
- struct InnerTop
+ struct S1
{
- ~this() @safe nothrow pure
+ int opCall()()
{
- order ~= "destroy inner top";
+ return -2;
}
- this(this) @safe nothrow pure
+ T opCall(T)(ref T v)
{
- order ~= "copy inner top";
+ return v + 1;
}
}
- struct InnerMiddle {}
+ int[string] a = ["2" : 1];
+ a.update("2", () => -1, S0.init);
+ assert(a["2"] == 2);
+ a.update("0", () => -1, S0.init);
+ assert(a["0"] == -1);
+ a.update("2", S1.init, S1.init);
+ assert(a["2"] == 3);
+ a.update("1", S1.init, S1.init);
+ assert(a["1"] == -2);
+}
- version (none) // https://issues.dlang.org/show_bug.cgi?id=14242
- struct InnerElement
- {
- static char counter = '1';
+@system unittest
+{
+ int[string] aa;
- ~this() @safe nothrow pure
- {
- order ~= "destroy inner element #" ~ counter++;
- }
+ foreach (n; 0 .. 2)
+ aa.update("k1", {
+ return 7;
+ }, (ref int v) {
+ return v + 3;
+ });
+ assert(aa["k1"] == 10);
+}
- this(this) @safe nothrow pure
- {
- order ~= "copy inner element #" ~ counter++;
- }
- }
+version (CoreDdoc)
+{
+ // This lets DDoc produce better documentation.
- struct InnerBottom
- {
- ~this() @safe nothrow pure
- {
- order ~= "destroy inner bottom";
- }
+ /**
+ Calculates the hash value of `arg` with an optional `seed` initial value.
+ The result might not be equal to `typeid(T).getHash(&arg)`.
- this(this) @safe nothrow pure
- {
- order ~= "copy inner bottom";
- }
- }
+ Params:
+ arg = argument to calculate the hash value of
+ seed = optional `seed` value (may be used for hash chaining)
- struct S
+ Return: calculated hash value of `arg`
+ */
+ size_t hashOf(T)(auto ref T arg, size_t seed)
{
- char[] s;
- InnerTop top;
- InnerMiddle middle;
- version (none) InnerElement[3] array; // https://issues.dlang.org/show_bug.cgi?id=14242
- int a;
- InnerBottom bottom;
- ~this() @safe nothrow pure { order ~= "destroy outer"; }
- this(this) @safe nothrow pure { order ~= "copy outer"; }
+ static import core.internal.hash;
+ return core.internal.hash.hashOf(arg, seed);
}
-
- string[] destructRecurseOrder;
+ /// ditto
+ size_t hashOf(T)(auto ref T arg)
{
- S s;
- _destructRecurse(s);
- destructRecurseOrder = order;
- order = null;
+ static import core.internal.hash;
+ return core.internal.hash.hashOf(arg);
}
- assert(order.length);
- assert(destructRecurseOrder == order);
- order = null;
-
- S s;
- _postblitRecurse(s);
- assert(order.length);
- auto postblitRecurseOrder = order;
- order = null;
- S s2 = s;
- assert(order.length);
- assert(postblitRecurseOrder == order);
+ @safe unittest
+ {
+ auto h1 = "my.string".hashOf;
+ assert(h1 == "my.string".hashOf);
+ }
}
-
-// Test static struct
-nothrow @safe @nogc unittest
+else
{
- static int i = 0;
- static struct S { ~this() nothrow @safe @nogc { i = 42; } }
- S s;
- _destructRecurse(s);
- assert(i == 42);
+ public import core.internal.hash : hashOf;
}
-unittest
+///
+@system unittest
{
- // Bugzilla 14746
- static struct HasDtor
+ class MyObject
{
- ~this() { assert(0); }
+ size_t myMegaHash() const @safe pure nothrow
+ {
+ return 42;
+ }
}
- static struct Owner
+ struct Test
{
- HasDtor* ptr;
- alias ptr this;
+ int a;
+ string b;
+ MyObject c;
+ size_t toHash() const pure nothrow
+ {
+ size_t hash = a.hashOf();
+ hash = b.hashOf(hash);
+ size_t h1 = c.myMegaHash();
+ hash = h1.hashOf(hash); //Mix two hash values
+ return hash;
+ }
}
+}
- Owner o;
- assert(o.ptr is null);
- destroy(o); // must not reach in HasDtor.__dtor()
+bool _xopEquals(in void*, in void*)
+{
+ throw new Error("TypeInfo.equals is not implemented");
}
-unittest
+bool _xopCmp(in void*, in void*)
{
- // Bugzilla 14746
- static struct HasPostblit
- {
- this(this) { assert(0); }
- }
- static struct Owner
- {
- HasPostblit* ptr;
- alias ptr this;
- }
+ throw new Error("TypeInfo.compare is not implemented");
+}
- Owner o;
- assert(o.ptr is null);
- _postblitRecurse(o); // must not reach in HasPostblit.__postblit()
+/******************************************
+ * Create RTInfo for type T
+ */
+
+template RTInfoImpl(size_t[] pointerBitmap)
+{
+ immutable size_t[pointerBitmap.length] RTInfoImpl = pointerBitmap[];
}
-// Test handling of fixed-length arrays
-// Separate from first test because of https://issues.dlang.org/show_bug.cgi?id=14242
-unittest
+template NoPointersBitmapPayload(size_t N)
{
- string[] order;
+ enum size_t[N] NoPointersBitmapPayload = 0;
+}
- struct S
- {
- char id;
+template RTInfo(T)
+{
+ enum pointerBitmap = __traits(getPointerBitmap, T);
+ static if (pointerBitmap[1 .. $] == NoPointersBitmapPayload!(pointerBitmap.length - 1))
+ enum RTInfo = rtinfoNoPointers;
+ else
+ enum RTInfo = RTInfoImpl!(pointerBitmap).ptr;
+}
- this(this)
- {
- order ~= "copy #" ~ id;
- }
+/**
+* shortcuts for the precise GC, also generated by the compiler
+* used instead of the actual pointer bitmap
+*/
+enum immutable(void)* rtinfoNoPointers = null;
+enum immutable(void)* rtinfoHasPointers = cast(void*)1;
- ~this()
- {
- order ~= "destroy #" ~ id;
- }
- }
+// Helper functions
- string[] destructRecurseOrder;
+private inout(TypeInfo) getElement(return inout TypeInfo value) @trusted pure nothrow
+{
+ TypeInfo element = cast() value;
+ for (;;)
{
- S[3] arr = [S('1'), S('2'), S('3')];
- _destructRecurse(arr);
- destructRecurseOrder = order;
- order = null;
+ if (auto qualified = cast(TypeInfo_Const) element)
+ element = qualified.base;
+ else if (auto redefined = cast(TypeInfo_Enum) element)
+ element = redefined.base;
+ else if (auto staticArray = cast(TypeInfo_StaticArray) element)
+ element = staticArray.value;
+ else if (auto vector = cast(TypeInfo_Vector) element)
+ element = vector.base;
+ else
+ break;
}
- assert(order.length);
- assert(destructRecurseOrder == order);
- order = null;
-
- S[3] arr = [S('1'), S('2'), S('3')];
- _postblitRecurse(arr);
- assert(order.length);
- auto postblitRecurseOrder = order;
- order = null;
-
- auto arrCopy = arr;
- assert(order.length);
- assert(postblitRecurseOrder == order);
+ return cast(inout) element;
}
-// Test handling of failed postblit
-// Not nothrow or @safe because of https://issues.dlang.org/show_bug.cgi?id=14242
-/+ nothrow @safe +/ unittest
+private size_t getArrayHash(const scope TypeInfo element, const scope void* ptr, const size_t count) @trusted nothrow
{
- static class FailedPostblitException : Exception { this() nothrow @safe { super(null); } }
- static string[] order;
- static struct Inner
- {
- char id;
-
- @safe:
- this(this)
- {
- order ~= "copy inner #" ~ id;
- if (id == '2')
- throw new FailedPostblitException();
- }
+ if (!count)
+ return 0;
- ~this() nothrow
- {
- order ~= "destroy inner #" ~ id;
- }
- }
+ const size_t elementSize = element.tsize;
+ if (!elementSize)
+ return 0;
- static struct Outer
+ static bool hasCustomToHash(const scope TypeInfo value) @trusted pure nothrow
{
- Inner inner1, inner2, inner3;
-
- nothrow @safe:
- this(char first, char second, char third)
- {
- inner1 = Inner(first);
- inner2 = Inner(second);
- inner3 = Inner(third);
- }
+ const element = getElement(value);
- this(this)
- {
- order ~= "copy outer";
- }
+ if (const struct_ = cast(const TypeInfo_Struct) element)
+ return !!struct_.xtoHash;
- ~this()
- {
- order ~= "destroy outer";
- }
+ return cast(const TypeInfo_Array) element
+ || cast(const TypeInfo_AssociativeArray) element
+ || cast(const ClassInfo) element
+ || cast(const TypeInfo_Interface) element;
}
- auto outer = Outer('1', '2', '3');
-
- try _postblitRecurse(outer);
- catch (FailedPostblitException) {}
- catch (Exception) assert(false);
-
- auto postblitRecurseOrder = order;
- order = null;
-
- try auto copy = outer;
- catch (FailedPostblitException) {}
- catch (Exception) assert(false);
-
- assert(postblitRecurseOrder == order);
- order = null;
-
- Outer[3] arr = [Outer('1', '1', '1'), Outer('1', '2', '3'), Outer('3', '3', '3')];
-
- try _postblitRecurse(arr);
- catch (FailedPostblitException) {}
- catch (Exception) assert(false);
+ if (!hasCustomToHash(element))
+ return hashOf(ptr[0 .. elementSize * count]);
- postblitRecurseOrder = order;
- order = null;
+ size_t hash = 0;
+ foreach (size_t i; 0 .. count)
+ hash = hashOf(element.getHash(ptr + i * elementSize), hash);
+ return hash;
+}
- try auto arrCopy = arr;
- catch (FailedPostblitException) {}
- catch (Exception) assert(false);
+/// Provide the .dup array property.
+@property auto dup(T)(T[] a)
+ if (!is(const(T) : T))
+{
+ import core.internal.traits : Unconst;
+ static assert(is(T : Unconst!T), "Cannot implicitly convert type "~T.stringof~
+ " to "~Unconst!T.stringof~" in dup.");
- assert(postblitRecurseOrder == order);
+ return _dup!(T, Unconst!T)(a);
}
-/++
- Destroys the given object and puts it in an invalid state. It's used to
- _destroy an object so that any cleanup which its destructor or finalizer
- does is done and so that it no longer references any other objects. It does
- $(I not) initiate a GC cycle or free any GC memory.
- +/
-void destroy(T)(T obj) if (is(T == class))
+///
+@safe unittest
{
- rt_finalize(cast(void*)obj);
+ auto arr = [1, 2];
+ auto arr2 = arr.dup;
+ arr[0] = 0;
+ assert(arr == [0, 2]);
+ assert(arr2 == [1, 2]);
}
/// ditto
-void destroy(T)(T obj) if (is(T == interface))
-{
- destroy(cast(Object)obj);
-}
-
-version (unittest) unittest
-{
- interface I { }
- {
- class A: I { string s = "A"; this() {} }
- auto a = new A, b = new A;
- a.s = b.s = "asd";
- destroy(a);
- assert(a.s == "A");
-
- I i = b;
- destroy(i);
- assert(b.s == "A");
- }
- {
- static bool destroyed = false;
- class B: I
- {
- string s = "B";
- this() {}
- ~this()
- {
- destroyed = true;
- }
- }
- auto a = new B, b = new B;
- a.s = b.s = "asd";
- destroy(a);
- assert(destroyed);
- assert(a.s == "B");
-
- destroyed = false;
- I i = b;
- destroy(i);
- assert(destroyed);
- assert(b.s == "B");
- }
- // this test is invalid now that the default ctor is not run after clearing
- version (none)
- {
- class C
- {
- string s;
- this()
- {
- s = "C";
- }
- }
- auto a = new C;
- a.s = "asd";
- destroy(a);
- assert(a.s == "C");
- }
+// const overload to support implicit conversion to immutable (unique result, see DIP29)
+@property T[] dup(T)(const(T)[] a)
+ if (is(const(T) : T))
+{
+ return _dup!(const(T), T)(a);
}
-/// ditto
-void destroy(T)(ref T obj) if (is(T == struct))
-{
- _destructRecurse(obj);
- () @trusted {
- auto buf = (cast(ubyte*) &obj)[0 .. T.sizeof];
- auto init = cast(ubyte[])typeid(T).initializer();
- if (init.ptr is null) // null ptr means initialize to 0s
- buf[] = 0;
- else
- buf[] = init[];
- } ();
-}
-version (unittest) nothrow @safe @nogc unittest
-{
- {
- struct A { string s = "A"; }
- A a;
- a.s = "asd";
- destroy(a);
- assert(a.s == "A");
- }
- {
- static int destroyed = 0;
- struct C
- {
- string s = "C";
- ~this() nothrow @safe @nogc
- {
- destroyed ++;
- }
- }
-
- struct B
- {
- C c;
- string s = "B";
- ~this() nothrow @safe @nogc
- {
- destroyed ++;
- }
- }
- B a;
- a.s = "asd";
- a.c.s = "jkl";
- destroy(a);
- assert(destroyed == 2);
- assert(a.s == "B");
- assert(a.c.s == "C" );
- }
+/// Provide the .idup array property.
+@property immutable(T)[] idup(T)(T[] a)
+{
+ static assert(is(T : immutable(T)), "Cannot implicitly convert type "~T.stringof~
+ " to immutable in idup.");
+ return _dup!(T, immutable(T))(a);
}
/// ditto
-void destroy(T : U[n], U, size_t n)(ref T obj) if (!is(T == struct))
+@property immutable(T)[] idup(T:void)(const(T)[] a)
{
- foreach_reverse (ref e; obj[])
- destroy(e);
+ return a.dup;
}
-version (unittest) unittest
+///
+@safe unittest
{
- int[2] a;
- a[0] = 1;
- a[1] = 2;
- destroy(a);
- assert(a == [ 0, 0 ]);
+ char[] arr = ['a', 'b', 'c'];
+ string s = arr.idup;
+ arr[0] = '.';
+ assert(s == "abc");
}
-unittest
+private U[] _dup(T, U)(scope T[] a) pure nothrow @trusted if (__traits(isPOD, T))
{
- static struct vec2f {
- float[2] values;
- alias values this;
- }
+ if (__ctfe)
+ return _dupCtfe!(T, U)(a);
- vec2f v;
- destroy!vec2f(v);
+ import core.stdc.string : memcpy;
+ auto arr = _d_newarrayU(typeid(T[]), a.length);
+ memcpy(arr.ptr, cast(const(void)*) a.ptr, T.sizeof * a.length);
+ return *cast(U[]*) &arr;
}
-unittest
+private U[] _dupCtfe(T, U)(scope T[] a)
{
- // Bugzilla 15009
- static string op;
- static struct S
- {
- int x;
- this(int x) { op ~= "C" ~ cast(char)('0'+x); this.x = x; }
- this(this) { op ~= "P" ~ cast(char)('0'+x); }
- ~this() { op ~= "D" ~ cast(char)('0'+x); }
- }
-
- {
- S[2] a1 = [S(1), S(2)];
- op = "";
- }
- assert(op == "D2D1"); // built-in scope destruction
- {
- S[2] a1 = [S(1), S(2)];
- op = "";
- destroy(a1);
- assert(op == "D2D1"); // consistent with built-in behavior
- }
-
+ static if (is(T : void))
+ assert(0, "Cannot dup a void[] array at compile time.");
+ else
{
- S[2][2] a2 = [[S(1), S(2)], [S(3), S(4)]];
- op = "";
- }
- assert(op == "D4D3D2D1");
- {
- S[2][2] a2 = [[S(1), S(2)], [S(3), S(4)]];
- op = "";
- destroy(a2);
- assert(op == "D4D3D2D1", op);
+ U[] res;
+ foreach (ref e; a)
+ res ~= e;
+ return res;
}
}
-/// ditto
-void destroy(T)(ref T obj)
- if (!is(T == struct) && !is(T == interface) && !is(T == class) && !_isStaticArray!T)
+private U[] _dup(T, U)(T[] a) if (!__traits(isPOD, T))
{
- obj = T.init;
-}
+ // note: copyEmplace is `@system` inside a `@trusted` block, so the __ctfe branch
+ // has the extra duty to infer _dup `@system` when the copy-constructor is `@system`.
+ if (__ctfe)
+ return _dupCtfe!(T, U)(a);
-template _isStaticArray(T : U[N], U, size_t N)
-{
- enum bool _isStaticArray = true;
-}
+ import core.lifetime: copyEmplace;
+ U[] res = () @trusted {
+ auto arr = cast(U*) _d_newarrayU(typeid(T[]), a.length);
+ size_t i;
+ scope (failure)
+ {
+ import core.internal.lifetime: emplaceInitializer;
+ // Initialize all remaining elements to not destruct garbage
+ foreach (j; i .. a.length)
+ emplaceInitializer(cast() arr[j]);
+ }
+ for (; i < a.length; i++)
+ {
+ copyEmplace(a.ptr[i], arr[i]);
+ }
+ return cast(U[])(arr[0..a.length]);
+ } ();
-template _isStaticArray(T)
-{
- enum bool _isStaticArray = false;
+ return res;
}
-version (unittest) unittest
+// https://issues.dlang.org/show_bug.cgi?id=22107
+@safe unittest
{
- {
- int a = 42;
- destroy(a);
- assert(a == 0);
- }
- {
- float a = 42;
- destroy(a);
- assert(isnan(a));
- }
-}
+ static int i;
+ @safe struct S
+ {
+ this(this) { i++; }
+ }
-version (unittest)
-{
- private bool isnan(float x)
+ void fun(scope S[] values...) @safe
{
- return x != x;
+ values.dup;
}
}
-private
-{
- extern (C) void _d_arrayshrinkfit(const TypeInfo ti, void[] arr) nothrow;
- extern (C) size_t _d_arraysetcapacity(const TypeInfo ti, size_t newcapacity, void *arrptr) pure nothrow;
-}
+// HACK: This is a lie. `_d_arraysetcapacity` is neither `nothrow` nor `pure`, but this lie is
+// necessary for now to prevent breaking code.
+private extern (C) size_t _d_arraysetcapacity(const TypeInfo ti, size_t newcapacity, void[]* arrptr) pure nothrow;
/**
- * (Property) Gets the current _capacity of a slice. The _capacity is the size
- * that the slice can grow to before the underlying array must be
- * reallocated or extended.
- *
- * If an append must reallocate a slice with no possibility of extension, then
- * `0` is returned. This happens when the slice references a static array, or
- * if another slice references elements past the end of the current slice.
- *
- * Note: The _capacity of a slice may be impacted by operations on other slices.
- */
+(Property) Gets the current _capacity of a slice. The _capacity is the size
+that the slice can grow to before the underlying array must be
+reallocated or extended.
+
+If an append must reallocate a slice with no possibility of extension, then
+`0` is returned. This happens when the slice references a static array, or
+if another slice references elements past the end of the current slice.
+
+Note: The _capacity of a slice may be impacted by operations on other slices.
+*/
@property size_t capacity(T)(T[] arr) pure nothrow @trusted
{
- return _d_arraysetcapacity(typeid(T[]), 0, cast(void *)&arr);
+ return _d_arraysetcapacity(typeid(T[]), 0, cast(void[]*)&arr);
}
+
///
@safe unittest
{
@@ -2948,38 +3699,54 @@ private
}
/**
- * Reserves capacity for a slice. The capacity is the size
- * that the slice can grow to before the underlying array must be
- * reallocated or extended.
- *
- * Returns: The new capacity of the array (which may be larger than
- * the requested capacity).
- */
+Reserves capacity for a slice. The capacity is the size
+that the slice can grow to before the underlying array must be
+reallocated or extended.
+
+Returns: The new capacity of the array (which may be larger than
+the requested capacity).
+*/
size_t reserve(T)(ref T[] arr, size_t newcapacity) pure nothrow @trusted
{
- return _d_arraysetcapacity(typeid(T[]), newcapacity, cast(void *)&arr);
+ if (__ctfe)
+ return newcapacity;
+ else
+ return _d_arraysetcapacity(typeid(T[]), newcapacity, cast(void[]*)&arr);
}
+
///
-unittest
+@safe unittest
{
//Static array slice: no capacity. Reserve relocates.
int[4] sarray = [1, 2, 3, 4];
int[] slice = sarray[];
auto u = slice.reserve(8);
assert(u >= 8);
- assert(sarray.ptr !is slice.ptr);
+ assert(&sarray[0] !is &slice[0]);
assert(slice.capacity == u);
//Dynamic array slices
int[] a = [1, 2, 3, 4];
a.reserve(8); //prepare a for appending 4 more items
- auto p = a.ptr;
+ auto p = &a[0];
u = a.capacity;
a ~= [5, 6, 7, 8];
- assert(p == a.ptr); //a should not have been reallocated
+ assert(p == &a[0]); //a should not have been reallocated
assert(u == a.capacity); //a should not have been extended
}
+// https://issues.dlang.org/show_bug.cgi?id=12330, reserve() at CTFE time
+@safe unittest
+{
+ int[] foo() {
+ int[] result;
+ auto a = result.reserve = 5;
+ assert(a == 5);
+ return result;
+ }
+ enum r = foo();
+}
+
// Issue 6646: should be possible to use array.reserve from SafeD.
@safe unittest
{
@@ -2987,28 +3754,33 @@ unittest
a.reserve(10);
}
+// HACK: This is a lie. `_d_arrayshrinkfit` is not `nothrow`, but this lie is necessary
+// for now to prevent breaking code.
+private extern (C) void _d_arrayshrinkfit(const TypeInfo ti, void[] arr) nothrow;
+
/**
- * Assume that it is safe to append to this array. Appends made to this array
- * after calling this function may append in place, even if the array was a
- * slice of a larger array to begin with.
- *
- * Use this only when it is certain there are no elements in use beyond the
- * array in the memory block. If there are, those elements will be
- * overwritten by appending to this array.
- *
- * Warning: Calling this function, and then using references to data located after the
- * given array results in undefined behavior.
- *
- * Returns:
- * The input is returned.
- */
-auto ref inout(T[]) assumeSafeAppend(T)(auto ref inout(T[]) arr) nothrow
+Assume that it is safe to append to this array. Appends made to this array
+after calling this function may append in place, even if the array was a
+slice of a larger array to begin with.
+
+Use this only when it is certain there are no elements in use beyond the
+array in the memory block. If there are, those elements will be
+overwritten by appending to this array.
+
+Warning: Calling this function, and then using references to data located after the
+given array results in undefined behavior.
+
+Returns:
+ The input is returned.
+*/
+auto ref inout(T[]) assumeSafeAppend(T)(auto ref inout(T[]) arr) nothrow @system
{
_d_arrayshrinkfit(typeid(T[]), *(cast(void[]*)&arr));
return arr;
}
+
///
-unittest
+@system unittest
{
int[] a = [1, 2, 3, 4];
@@ -3026,7 +3798,7 @@ unittest
}
}
-unittest
+@system unittest
{
int[] arr;
auto newcap = arr.reserve(2000);
@@ -3042,7 +3814,7 @@ unittest
assert(ptr == arr.ptr);
}
-unittest
+@system unittest
{
int[] arr = [1, 2, 3];
void foo(ref int[] i)
@@ -3056,7 +3828,7 @@ unittest
}
// https://issues.dlang.org/show_bug.cgi?id=10574
-unittest
+@system unittest
{
int[] a;
immutable(int[]) b;
@@ -3070,914 +3842,913 @@ unittest
assert(is(typeof(b3) == immutable(int[])));
}
-version (none)
-{
- // enforce() copied from Phobos std.contracts for destroy(), left out until
- // we decide whether to use it.
-
+private extern (C) void[] _d_newarrayU(const scope TypeInfo ti, size_t length) pure nothrow;
- T _enforce(T, string file = __FILE__, int line = __LINE__)
- (T value, lazy const(char)[] msg = null)
+private void _doPostblit(T)(T[] arr)
+{
+ // infer static postblit type, run postblit if any
+ static if (__traits(hasPostblit, T))
{
- if (!value) bailOut(file, line, msg);
- return value;
+ static if (__traits(isStaticArray, T) && is(T : E[], E))
+ _doPostblit(cast(E[]) arr);
+ else static if (!is(typeof(arr[0].__xpostblit())) && is(immutable T == immutable U, U))
+ foreach (ref elem; (() @trusted => cast(U[]) arr)())
+ elem.__xpostblit();
+ else
+ foreach (ref elem; arr)
+ elem.__xpostblit();
}
+}
- T _enforce(T, string file = __FILE__, int line = __LINE__)
- (T value, scope void delegate() dg)
- {
- if (!value) dg();
- return value;
- }
+@safe unittest
+{
+ static struct S1 { int* p; }
+ static struct S2 { @disable this(); }
+ static struct S3 { @disable this(this); }
- T _enforce(T)(T value, lazy Exception ex)
+ int dg1() pure nothrow @safe
{
- if (!value) throw ex();
- return value;
+ {
+ char[] m;
+ string i;
+ m = m.dup;
+ i = i.idup;
+ m = i.dup;
+ i = m.idup;
+ }
+ {
+ S1[] m;
+ immutable(S1)[] i;
+ m = m.dup;
+ i = i.idup;
+ static assert(!is(typeof(m.idup)));
+ static assert(!is(typeof(i.dup)));
+ }
+ {
+ S3[] m;
+ immutable(S3)[] i;
+ static assert(!is(typeof(m.dup)));
+ static assert(!is(typeof(i.idup)));
+ }
+ {
+ shared(S1)[] m;
+ m = m.dup;
+ static assert(!is(typeof(m.idup)));
+ }
+ {
+ int[] a = (inout(int)) { inout(const(int))[] a; return a.dup; }(0);
+ }
+ return 1;
}
- private void _bailOut(string file, int line, in char[] msg)
+ int dg2() pure nothrow @safe
{
- char[21] buf;
- throw new Exception(cast(string)(file ~ "(" ~ ulongToString(buf[], line) ~ "): " ~ (msg ? msg : "Enforcement failed")));
+ {
+ S2[] m = [S2.init, S2.init];
+ immutable(S2)[] i = [S2.init, S2.init];
+ m = m.dup;
+ m = i.dup;
+ i = m.idup;
+ i = i.idup;
+ }
+ return 2;
}
-}
-
-/***************************************
- * Helper function used to see if two containers of different
- * types have the same contents in the same sequence.
- */
+ enum a = dg1();
+ enum b = dg2();
+ assert(dg1() == a);
+ assert(dg2() == b);
+}
-bool _ArrayEq(T1, T2)(T1[] a1, T2[] a2)
+@system unittest
{
- if (a1.length != a2.length)
- return false;
+ static struct Sunpure { this(this) @safe nothrow {} }
+ static struct Sthrow { this(this) @safe pure {} }
+ static struct Sunsafe { this(this) @system pure nothrow {} }
+ static struct Snocopy { @disable this(this); }
- // This is function is used as a compiler intrinsic and explicitly written
- // in a lowered flavor to use as few CTFE instructions as possible.
- size_t idx = 0;
- immutable length = a1.length;
+ [].dup!Sunpure;
+ [].dup!Sthrow;
+ cast(void) [].dup!Sunsafe;
+ static assert(!__traits(compiles, () pure { [].dup!Sunpure; }));
+ static assert(!__traits(compiles, () nothrow { [].dup!Sthrow; }));
+ static assert(!__traits(compiles, () @safe { [].dup!Sunsafe; }));
+ static assert(!__traits(compiles, () { [].dup!Snocopy; }));
- for (;idx < length;++idx)
- {
- if (a1[idx] != a2[idx])
- return false;
- }
- return true;
+ [].idup!Sunpure;
+ [].idup!Sthrow;
+ [].idup!Sunsafe;
+ static assert(!__traits(compiles, () pure { [].idup!Sunpure; }));
+ static assert(!__traits(compiles, () nothrow { [].idup!Sthrow; }));
+ static assert(!__traits(compiles, () @safe { [].idup!Sunsafe; }));
+ static assert(!__traits(compiles, () { [].idup!Snocopy; }));
}
-version (D_Ddoc)
+@safe unittest
{
- // This lets DDoc produce better documentation.
+ // test that the copy-constructor is called with .dup
+ static struct ArrElem
+ {
+ int a;
+ this(int a)
+ {
+ this.a = a;
+ }
+ this(ref const ArrElem)
+ {
+ a = 2;
+ }
+ this(ref ArrElem) immutable
+ {
+ a = 3;
+ }
+ }
- /**
- Calculates the hash value of `arg` with an optional `seed` initial value.
- The result might not be equal to `typeid(T).getHash(&arg)`.
+ auto arr = [ArrElem(1), ArrElem(1)];
- Params:
- arg = argument to calculate the hash value of
- seed = optional `seed` value (may be used for hash chaining)
+ ArrElem[] b = arr.dup;
+ assert(b[0].a == 2 && b[1].a == 2);
- Return: calculated hash value of `arg`
- */
- size_t hashOf(T)(auto ref T arg, size_t seed)
- {
- static import core.internal.hash;
- return core.internal.hash.hashOf(arg, seed);
- }
- /// ditto
- size_t hashOf(T)(auto ref T arg)
- {
- static import core.internal.hash;
- return core.internal.hash.hashOf(arg);
- }
-}
-else
-{
- public import core.internal.hash : hashOf;
+ immutable ArrElem[] c = arr.idup;
+ assert(c[0].a == 3 && c[1].a == 3);
}
-unittest
+@system unittest
{
- // Issue # 16654 / 16764
- auto a = [1];
- auto b = a.dup;
- assert(hashOf(a) == hashOf(b));
+ static struct Sunpure { this(ref const typeof(this)) @safe nothrow {} }
+ static struct Sthrow { this(ref const typeof(this)) @safe pure {} }
+ static struct Sunsafe { this(ref const typeof(this)) @system pure nothrow {} }
+ [].dup!Sunpure;
+ [].dup!Sthrow;
+ cast(void) [].dup!Sunsafe;
+ static assert(!__traits(compiles, () pure { [].dup!Sunpure; }));
+ static assert(!__traits(compiles, () nothrow { [].dup!Sthrow; }));
+ static assert(!__traits(compiles, () @safe { [].dup!Sunsafe; }));
+
+ // for idup to work on structs that have copy constructors, it is necessary
+ // that the struct defines a copy constructor that creates immutable objects
+ static struct ISunpure { this(ref const typeof(this)) immutable @safe nothrow {} }
+ static struct ISthrow { this(ref const typeof(this)) immutable @safe pure {} }
+ static struct ISunsafe { this(ref const typeof(this)) immutable @system pure nothrow {} }
+ [].idup!ISunpure;
+ [].idup!ISthrow;
+ [].idup!ISunsafe;
+ static assert(!__traits(compiles, () pure { [].idup!ISunpure; }));
+ static assert(!__traits(compiles, () nothrow { [].idup!ISthrow; }));
+ static assert(!__traits(compiles, () @safe { [].idup!ISunsafe; }));
}
-bool _xopEquals(in void*, in void*)
+@safe unittest
{
- throw new Error("TypeInfo.equals is not implemented");
+ static int*[] pureFoo() pure { return null; }
+ { char[] s; immutable x = s.dup; }
+ { immutable x = (cast(int*[])null).dup; }
+ { immutable x = pureFoo(); }
+ { immutable x = pureFoo().dup; }
}
-bool _xopCmp(in void*, in void*)
+@safe unittest
{
- throw new Error("TypeInfo.compare is not implemented");
+ auto a = [1, 2, 3];
+ auto b = a.dup;
+ debug(SENTINEL) {} else
+ assert(b.capacity >= 3);
}
-void __ctfeWrite(const string s) @nogc @safe pure nothrow {}
+@system unittest
+{
+ // Bugzilla 12580
+ void[] m = [0];
+ shared(void)[] s = [cast(shared)1];
+ immutable(void)[] i = [cast(immutable)2];
-/******************************************
- * Create RTInfo for type T
- */
+ s = s.dup;
+ static assert(is(typeof(s.dup) == shared(void)[]));
-template RTInfoImpl(size_t[] pointerBitmap)
-{
- immutable size_t[pointerBitmap.length] RTInfoImpl = pointerBitmap[];
+ m = i.dup;
+ i = m.dup;
+ i = i.idup;
+ i = m.idup;
+ i = s.idup;
+ i = s.dup;
+ static assert(!__traits(compiles, m = s.dup));
}
-template NoPointersBitmapPayload(size_t N)
+@safe unittest
{
- enum size_t[N] NoPointersBitmapPayload = 0;
-}
+ // Bugzilla 13809
+ static struct S
+ {
+ this(this) {}
+ ~this() {}
+ }
-template RTInfo(T)
-{
- enum pointerBitmap = __traits(getPointerBitmap, T);
- static if (pointerBitmap[1 .. $] == NoPointersBitmapPayload!(pointerBitmap.length - 1))
- enum RTInfo = rtinfoNoPointers;
- else
- enum RTInfo = RTInfoImpl!(pointerBitmap).ptr;
+ S[] arr;
+ auto a = arr.dup;
}
-/**
-* shortcuts for the precise GC, also generated by the compiler
-* used instead of the actual pointer bitmap
-*/
-enum immutable(void)* rtinfoNoPointers = null;
-enum immutable(void)* rtinfoHasPointers = cast(void*)1;
-
-// lhs == rhs lowers to __equals(lhs, rhs) for dynamic arrays
-bool __equals(T1, T2)(T1[] lhs, T2[] rhs)
+@system unittest
{
- import core.internal.traits : Unqual;
- alias U1 = Unqual!T1;
- alias U2 = Unqual!T2;
-
- static @trusted ref R at(R)(R[] r, size_t i) { return r.ptr[i]; }
- static @trusted R trustedCast(R, S)(S[] r) { return cast(R) r; }
-
- if (lhs.length != rhs.length)
- return false;
-
- if (lhs.length == 0 && rhs.length == 0)
- return true;
-
- static if (is(U1 == void) && is(U2 == void))
- {
- return __equals(trustedCast!(ubyte[])(lhs), trustedCast!(ubyte[])(rhs));
- }
- else static if (is(U1 == void))
- {
- return __equals(trustedCast!(ubyte[])(lhs), rhs);
- }
- else static if (is(U2 == void))
- {
- return __equals(lhs, trustedCast!(ubyte[])(rhs));
- }
- else static if (!is(U1 == U2))
+ // Bugzilla 16504
+ static struct S
{
- // This should replace src/object.d _ArrayEq which
- // compares arrays of different types such as long & int,
- // char & wchar.
- // Compiler lowers to __ArrayEq in dmd/src/opover.d
- foreach (const u; 0 .. lhs.length)
- {
- if (at(lhs, u) != at(rhs, u))
- return false;
- }
- return true;
+ __gshared int* gp;
+ int* p;
+ // postblit and hence .dup could escape
+ this(this) { gp = p; }
}
- else static if (__traits(isIntegral, U1))
+
+ int p;
+ scope S[1] arr = [S(&p)];
+ auto a = arr.dup; // dup does escape
+}
+
+// https://issues.dlang.org/show_bug.cgi?id=21983
+// dup/idup destroys partially constructed arrays on failure
+@safe unittest
+{
+ static struct SImpl(bool postblit)
{
+ int num;
+ long l = 0xDEADBEEF;
- if (!__ctfe)
+ static if (postblit)
{
- import core.stdc.string : memcmp;
- return () @trusted { return memcmp(cast(void*)lhs.ptr, cast(void*)rhs.ptr, lhs.length * U1.sizeof) == 0; }();
+ this(this)
+ {
+ if (this.num == 3)
+ throw new Exception("");
+ }
}
else
{
- foreach (const u; 0 .. lhs.length)
+ this(scope ref const SImpl other)
{
- if (at(lhs, u) != at(rhs, u))
- return false;
+ if (other.num == 3)
+ throw new Exception("");
+
+ this.num = other.num;
+ this.l = other.l;
}
- return true;
}
- }
- else
- {
- foreach (const u; 0 .. lhs.length)
+
+ ~this() @trusted
{
- static if (__traits(compiles, __equals(at(lhs, u), at(rhs, u))))
- {
- if (!__equals(at(lhs, u), at(rhs, u)))
- return false;
- }
- else static if (__traits(isFloating, U1))
- {
- if (at(lhs, u) != at(rhs, u))
- return false;
- }
- else static if (is(U1 : Object) && is(U2 : Object))
+ if (l != 0xDEADBEEF)
{
- if (!(cast(Object)at(lhs, u) is cast(Object)at(rhs, u)
- || at(lhs, u) && (cast(Object)at(lhs, u)).opEquals(cast(Object)at(rhs, u))))
- return false;
- }
- else static if (__traits(hasMember, U1, "opEquals"))
- {
- if (!at(lhs, u).opEquals(at(rhs, u)))
- return false;
- }
- else static if (is(U1 == delegate))
- {
- if (at(lhs, u) != at(rhs, u))
- return false;
- }
- else static if (is(U1 == U11*, U11))
- {
- if (at(lhs, u) != at(rhs, u))
- return false;
- }
- else
- {
- if (at(lhs, u).tupleof != at(rhs, u).tupleof)
- return false;
+ import core.stdc.stdio;
+ printf("Unexpected value: %lld\n", l);
+ fflush(stdout);
+ assert(false);
}
}
-
- return true;
}
-}
-unittest {
- assert(__equals([], []));
- assert(!__equals([1, 2], [1, 2, 3]));
-}
+ alias Postblit = SImpl!true;
+ alias Copy = SImpl!false;
-unittest
-{
- struct A
+ static int test(S)()
{
- int a;
+ S[4] arr = [ S(1), S(2), S(3), S(4) ];
+ try
+ {
+ arr.dup();
+ assert(false);
+ }
+ catch (Exception)
+ {
+ return 1;
+ }
}
- auto arr1 = [A(0), A(2)];
- auto arr2 = [A(0), A(1)];
- auto arr3 = [A(0), A(1)];
+ static assert(test!Postblit());
+ assert(test!Postblit());
- assert(arr1 != arr2);
- assert(arr2 == arr3);
+ static assert(test!Copy());
+ assert(test!Copy());
}
-unittest
+/**
+Destroys the given object and optionally resets to initial state. It's used to
+_destroy an object, calling its destructor or finalizer so it no longer
+references any other objects. It does $(I not) initiate a GC cycle or free
+any GC memory.
+If `initialize` is supplied `false`, the object is considered invalid after
+destruction, and should not be referenced.
+*/
+void destroy(bool initialize = true, T)(ref T obj) if (is(T == struct))
{
- struct A
- {
- int a;
- int b;
-
- bool opEquals(const A other)
- {
- return this.a == other.b && this.b == other.a;
- }
- }
+ import core.internal.destruction : destructRecurse;
- auto arr1 = [A(1, 0), A(0, 1)];
- auto arr2 = [A(1, 0), A(0, 1)];
- auto arr3 = [A(0, 1), A(1, 0)];
+ destructRecurse(obj);
- assert(arr1 != arr2);
- assert(arr2 == arr3);
+ static if (initialize)
+ {
+ import core.internal.lifetime : emplaceInitializer;
+ emplaceInitializer(obj); // emplace T.init
+ }
}
-// Compare class and interface objects for ordering.
-private int __cmp(Obj)(Obj lhs, Obj rhs)
-if (is(Obj : Object))
+@safe unittest
{
- if (lhs is rhs)
- return 0;
- // Regard null references as always being "less than"
- if (!lhs)
- return -1;
- if (!rhs)
- return 1;
- return lhs.opCmp(rhs);
+ struct A { string s = "A"; }
+ A a = {s: "B"};
+ assert(a.s == "B");
+ a.destroy;
+ assert(a.s == "A");
}
-int __cmp(T)(const T[] lhs, const T[] rhs) @trusted
-if (__traits(isScalar, T))
-{
- // Compute U as the implementation type for T
- static if (is(T == ubyte) || is(T == void) || is(T == bool))
- alias U = char;
- else static if (is(T == wchar))
- alias U = ushort;
- else static if (is(T == dchar))
- alias U = uint;
- else static if (is(T == ifloat))
- alias U = float;
- else static if (is(T == idouble))
- alias U = double;
- else static if (is(T == ireal))
- alias U = real;
- else
- alias U = T;
-
- static if (is(U == char))
- {
- import core.internal.string : dstrcmp;
- return dstrcmp(cast(char[]) lhs, cast(char[]) rhs);
- }
- else static if (!is(U == T))
+nothrow @safe @nogc unittest
+{
{
- // Reuse another implementation
- return __cmp(cast(U[]) lhs, cast(U[]) rhs);
+ struct A { string s = "A"; }
+ A a;
+ a.s = "asd";
+ destroy!false(a);
+ assert(a.s == "asd");
+ destroy(a);
+ assert(a.s == "A");
}
- else
{
- immutable len = lhs.length <= rhs.length ? lhs.length : rhs.length;
- foreach (const u; 0 .. len)
+ static int destroyed = 0;
+ struct C
{
- static if (__traits(isFloating, T))
+ string s = "C";
+ ~this() nothrow @safe @nogc
{
- immutable a = lhs.ptr[u], b = rhs.ptr[u];
- static if (is(T == cfloat) || is(T == cdouble)
- || is(T == creal))
- {
- // Use rt.cmath2._Ccmp instead ?
- auto r = (a.re > b.re) - (a.re < b.re);
- if (!r) r = (a.im > b.im) - (a.im < b.im);
- }
- else
- {
- const r = (a > b) - (a < b);
- }
- if (r) return r;
+ destroyed ++;
}
- else if (lhs.ptr[u] != rhs.ptr[u])
- return lhs.ptr[u] < rhs.ptr[u] ? -1 : 1;
}
- return lhs.length < rhs.length ? -1 : (lhs.length > rhs.length);
- }
-}
-
-// This function is called by the compiler when dealing with array
-// comparisons in the semantic analysis phase of CmpExp. The ordering
-// comparison is lowered to a call to this template.
-int __cmp(T1, T2)(T1[] s1, T2[] s2)
-if (!__traits(isScalar, T1) && !__traits(isScalar, T2))
-{
- import core.internal.traits : Unqual;
- alias U1 = Unqual!T1;
- alias U2 = Unqual!T2;
-
- static if (is(U1 == void) && is(U2 == void))
- static @trusted ref inout(ubyte) at(inout(void)[] r, size_t i) { return (cast(inout(ubyte)*) r.ptr)[i]; }
- else
- static @trusted ref R at(R)(R[] r, size_t i) { return r.ptr[i]; }
-
- // All unsigned byte-wide types = > dstrcmp
- immutable len = s1.length <= s2.length ? s1.length : s2.length;
- foreach (const u; 0 .. len)
- {
- static if (__traits(compiles, __cmp(at(s1, u), at(s2, u))))
- {
- auto c = __cmp(at(s1, u), at(s2, u));
- if (c != 0)
- return c;
- }
- else static if (__traits(compiles, at(s1, u).opCmp(at(s2, u))))
- {
- auto c = at(s1, u).opCmp(at(s2, u));
- if (c != 0)
- return c;
- }
- else static if (__traits(compiles, at(s1, u) < at(s2, u)))
- {
- if (at(s1, u) != at(s2, u))
- return at(s1, u) < at(s2, u) ? -1 : 1;
- }
- else
+ struct B
{
- // TODO: fix this legacy bad behavior, see
- // https://issues.dlang.org/show_bug.cgi?id=17244
- static assert(is(U1 == U2), "Internal error.");
- import core.stdc.string : memcmp;
- auto c = (() @trusted => memcmp(&at(s1, u), &at(s2, u), U1.sizeof))();
- if (c != 0)
- return c;
+ C c;
+ string s = "B";
+ ~this() nothrow @safe @nogc
+ {
+ destroyed ++;
+ }
}
+ B a;
+ a.s = "asd";
+ a.c.s = "jkl";
+ destroy!false(a);
+ assert(destroyed == 2);
+ assert(a.s == "asd");
+ assert(a.c.s == "jkl" );
+ destroy(a);
+ assert(destroyed == 4);
+ assert(a.s == "B");
+ assert(a.c.s == "C" );
}
- return s1.length < s2.length ? -1 : (s1.length > s2.length);
}
-// integral types
-@safe unittest
+private extern (C) void rt_finalize(void *data, bool det=true) nothrow;
+
+/// ditto
+void destroy(bool initialize = true, T)(T obj) if (is(T == class))
{
- void compareMinMax(T)()
+ static if (__traits(getLinkage, T) == "C++")
{
- T[2] a = [T.max, T.max];
- T[2] b = [T.min, T.min];
+ static if (__traits(hasMember, T, "__xdtor"))
+ obj.__xdtor();
- assert(__cmp(a, b) > 0);
- assert(__cmp(b, a) < 0);
+ static if (initialize)
+ {
+ enum classSize = __traits(classInstanceSize, T);
+ (cast(void*)obj)[0 .. classSize] = typeid(T).initializer[];
+ }
}
-
- compareMinMax!int;
- compareMinMax!uint;
- compareMinMax!long;
- compareMinMax!ulong;
- compareMinMax!short;
- compareMinMax!ushort;
- compareMinMax!byte;
- compareMinMax!dchar;
- compareMinMax!wchar;
+ else
+ rt_finalize(cast(void*)obj);
}
-// char types (dstrcmp)
-@safe unittest
+/// ditto
+void destroy(bool initialize = true, T)(T obj) if (is(T == interface))
{
- void compareMinMax(T)()
- {
- T[2] a = [T.max, T.max];
- T[2] b = [T.min, T.min];
-
- assert(__cmp(a, b) > 0);
- assert(__cmp(b, a) < 0);
- }
-
- compareMinMax!ubyte;
- compareMinMax!bool;
- compareMinMax!char;
- compareMinMax!(const char);
+ static assert(__traits(getLinkage, T) == "D", "Invalid call to destroy() on extern(" ~ __traits(getLinkage, T) ~ ") interface");
- string s1 = "aaaa";
- string s2 = "bbbb";
- assert(__cmp(s2, s1) > 0);
- assert(__cmp(s1, s2) < 0);
+ destroy!initialize(cast(Object)obj);
}
-// fp types
-@safe unittest
+/// Reference type demonstration
+@system unittest
{
- void compareMinMax(T)()
+ class C
{
- T[2] a = [T.max, T.max];
- T[2] b = [T.min_normal, T.min_normal];
- T[2] c = [T.max, T.min_normal];
- T[1] d = [T.max];
+ struct Agg
+ {
+ static int dtorCount;
- assert(__cmp(a, b) > 0);
- assert(__cmp(b, a) < 0);
- assert(__cmp(a, c) > 0);
- assert(__cmp(a, d) > 0);
- assert(__cmp(d, c) < 0);
- assert(__cmp(c, c) == 0);
- }
+ int x = 10;
+ ~this() { dtorCount++; }
+ }
- compareMinMax!real;
- compareMinMax!float;
- compareMinMax!double;
- compareMinMax!ireal;
- compareMinMax!ifloat;
- compareMinMax!idouble;
- compareMinMax!creal;
- //compareMinMax!cfloat;
- compareMinMax!cdouble;
+ static int dtorCount;
- // qualifiers
- compareMinMax!(const real);
- compareMinMax!(immutable real);
-}
+ string s = "S";
+ Agg a;
+ ~this() { dtorCount++; }
+ }
-// void[]
-@safe unittest
-{
- void[] a;
- const(void)[] b;
+ C c = new C();
+ assert(c.dtorCount == 0); // destructor not yet called
+ assert(c.s == "S"); // initial state `c.s` is `"S"`
+ assert(c.a.dtorCount == 0); // destructor not yet called
+ assert(c.a.x == 10); // initial state `c.a.x` is `10`
+ c.s = "T";
+ c.a.x = 30;
+ assert(c.s == "T"); // `c.s` is `"T"`
+ destroy(c);
+ assert(c.dtorCount == 1); // `c`'s destructor was called
+ assert(c.s == "S"); // `c.s` is back to its inital state, `"S"`
+ assert(c.a.dtorCount == 1); // `c.a`'s destructor was called
+ assert(c.a.x == 10); // `c.a.x` is back to its inital state, `10`
- (() @trusted
+ // check C++ classes work too!
+ extern (C++) class CPP
{
- a = cast(void[]) "bb";
- b = cast(const(void)[]) "aa";
- })();
+ struct Agg
+ {
+ __gshared int dtorCount;
- assert(__cmp(a, b) > 0);
- assert(__cmp(b, a) < 0);
-}
+ int x = 10;
+ ~this() { dtorCount++; }
+ }
-// arrays of arrays with mixed modifiers
+ __gshared int dtorCount;
+
+ string s = "S";
+ Agg a;
+ ~this() { dtorCount++; }
+ }
+
+ CPP cpp = new CPP();
+ assert(cpp.dtorCount == 0); // destructor not yet called
+ assert(cpp.s == "S"); // initial state `cpp.s` is `"S"`
+ assert(cpp.a.dtorCount == 0); // destructor not yet called
+ assert(cpp.a.x == 10); // initial state `cpp.a.x` is `10`
+ cpp.s = "T";
+ cpp.a.x = 30;
+ assert(cpp.s == "T"); // `cpp.s` is `"T"`
+ destroy!false(cpp); // destroy without initialization
+ assert(cpp.dtorCount == 1); // `cpp`'s destructor was called
+ assert(cpp.s == "T"); // `cpp.s` is not initialized
+ assert(cpp.a.dtorCount == 1); // `cpp.a`'s destructor was called
+ assert(cpp.a.x == 30); // `cpp.a.x` is not initialized
+ destroy(cpp);
+ assert(cpp.dtorCount == 2); // `cpp`'s destructor was called again
+ assert(cpp.s == "S"); // `cpp.s` is back to its inital state, `"S"`
+ assert(cpp.a.dtorCount == 2); // `cpp.a`'s destructor was called again
+ assert(cpp.a.x == 10); // `cpp.a.x` is back to its inital state, `10`
+}
+
+/// Value type demonstration
@safe unittest
{
- // https://issues.dlang.org/show_bug.cgi?id=17876
- bool less1(immutable size_t[][] a, size_t[][] b) { return a < b; }
- bool less2(const void[][] a, void[][] b) { return a < b; }
- bool less3(inout size_t[][] a, size_t[][] b) { return a < b; }
-
- immutable size_t[][] a = [[1, 2], [3, 4]];
- size_t[][] b = [[1, 2], [3, 5]];
- assert(less1(a, b));
- assert(less3(a, b));
-
- auto va = [cast(immutable void[])a[0], a[1]];
- auto vb = [cast(void[])b[0], b[1]];
- assert(less2(va, vb));
+ int i;
+ assert(i == 0); // `i`'s initial state is `0`
+ i = 1;
+ assert(i == 1); // `i` changed to `1`
+ destroy!false(i);
+ assert(i == 1); // `i` was not initialized
+ destroy(i);
+ assert(i == 0); // `i` is back to its initial state `0`
}
-// objects
-@safe unittest
+@system unittest
{
- class C
+ extern(C++)
+ static class C
{
- int i;
- this(int i) { this.i = i; }
-
- override int opCmp(Object c) const @safe
- {
- return i - (cast(C)c).i;
- }
+ void* ptr;
+ this() {}
}
- auto c1 = new C(1);
- auto c2 = new C(2);
- assert(__cmp(c1, null) > 0);
- assert(__cmp(null, c1) < 0);
- assert(__cmp(c1, c1) == 0);
- assert(__cmp(c1, c2) < 0);
- assert(__cmp(c2, c1) > 0);
-
- assert(__cmp([c1, c1][], [c2, c2][]) < 0);
- assert(__cmp([c2, c2], [c1, c1]) > 0);
+ destroy!false(new C());
+ destroy!true(new C());
}
-// structs
-@safe unittest
+@system unittest
{
- struct C
+ // class with an `alias this`
+ class A
{
- ubyte i;
- this(ubyte i) { this.i = i; }
+ static int dtorCount;
+ ~this()
+ {
+ dtorCount++;
+ }
}
- auto c1 = C(1);
- auto c2 = C(2);
+ class B
+ {
+ A a;
+ alias a this;
+ this()
+ {
+ a = new A;
+ }
+ static int dtorCount;
+ ~this()
+ {
+ dtorCount++;
+ }
+ }
+ auto b = new B;
+ assert(A.dtorCount == 0);
+ assert(B.dtorCount == 0);
+ destroy(b);
+ assert(A.dtorCount == 0);
+ assert(B.dtorCount == 1);
- assert(__cmp([c1, c1][], [c2, c2][]) < 0);
- assert(__cmp([c2, c2], [c1, c1]) > 0);
- assert(__cmp([c2, c2], [c2, c1]) > 0);
+ auto a = new A;
+ destroy(a);
+ assert(A.dtorCount == 1);
}
-// Compiler hook into the runtime implementation of array (vector) operations.
-template _arrayOp(Args...)
+@system unittest
{
- import core.internal.arrayop;
- alias _arrayOp = arrayOp!Args;
-}
-
-// Helper functions
+ interface I { }
+ {
+ class A: I { string s = "A"; this() {} }
+ auto a = new A, b = new A;
+ a.s = b.s = "asd";
+ destroy(a);
+ assert(a.s == "A");
-private inout(TypeInfo) getElement(inout TypeInfo value) @trusted pure nothrow
-{
- TypeInfo element = cast() value;
- for (;;)
+ I i = b;
+ destroy(i);
+ assert(b.s == "A");
+ }
{
- if (auto qualified = cast(TypeInfo_Const) element)
- element = qualified.base;
- else if (auto redefined = cast(TypeInfo_Enum) element)
- element = redefined.base;
- else if (auto staticArray = cast(TypeInfo_StaticArray) element)
- element = staticArray.value;
- else if (auto vector = cast(TypeInfo_Vector) element)
- element = vector.base;
- else
- break;
+ static bool destroyed = false;
+ class B: I
+ {
+ string s = "B";
+ this() {}
+ ~this()
+ {
+ destroyed = true;
+ }
+ }
+ auto a = new B, b = new B;
+ a.s = b.s = "asd";
+ destroy(a);
+ assert(destroyed);
+ assert(a.s == "B");
+
+ destroyed = false;
+ I i = b;
+ destroy(i);
+ assert(destroyed);
+ assert(b.s == "B");
+ }
+ // this test is invalid now that the default ctor is not run after clearing
+ version (none)
+ {
+ class C
+ {
+ string s;
+ this()
+ {
+ s = "C";
+ }
+ }
+ auto a = new C;
+ a.s = "asd";
+ destroy(a);
+ assert(a.s == "C");
}
- return cast(inout) element;
}
-private size_t getArrayHash(in TypeInfo element, in void* ptr, in size_t count) @trusted nothrow
+nothrow @safe @nogc unittest
{
- if (!count)
- return 0;
-
- const size_t elementSize = element.tsize;
- if (!elementSize)
- return 0;
-
- static bool hasCustomToHash(in TypeInfo value) @trusted pure nothrow
{
- const element = getElement(value);
-
- if (const struct_ = cast(const TypeInfo_Struct) element)
- return !!struct_.xtoHash;
-
- return cast(const TypeInfo_Array) element
- || cast(const TypeInfo_AssociativeArray) element
- || cast(const ClassInfo) element
- || cast(const TypeInfo_Interface) element;
+ struct A { string s = "A"; }
+ A a;
+ a.s = "asd";
+ destroy!false(a);
+ assert(a.s == "asd");
+ destroy(a);
+ assert(a.s == "A");
}
+ {
+ static int destroyed = 0;
+ struct C
+ {
+ string s = "C";
+ ~this() nothrow @safe @nogc
+ {
+ destroyed ++;
+ }
+ }
- import core.internal.traits : externDFunc;
- if (!hasCustomToHash(element))
- return hashOf(ptr[0 .. elementSize * count]);
-
- size_t hash = 0;
- foreach (size_t i; 0 .. count)
- hash = hashOf(element.getHash(ptr + i * elementSize), hash);
- return hash;
+ struct B
+ {
+ C c;
+ string s = "B";
+ ~this() nothrow @safe @nogc
+ {
+ destroyed ++;
+ }
+ }
+ B a;
+ a.s = "asd";
+ a.c.s = "jkl";
+ destroy!false(a);
+ assert(destroyed == 2);
+ assert(a.s == "asd");
+ assert(a.c.s == "jkl" );
+ destroy(a);
+ assert(destroyed == 4);
+ assert(a.s == "B");
+ assert(a.c.s == "C" );
+ }
}
-/// Provide the .dup array property.
-@property auto dup(T)(T[] a)
- if (!is(const(T) : T))
+nothrow unittest
{
- import core.internal.traits : Unconst;
- static assert(is(T : Unconst!T), "Cannot implicitly convert type "~T.stringof~
- " to "~Unconst!T.stringof~" in dup.");
+ // Bugzilla 20049: Test to ensure proper behavior of `nothrow` destructors
+ class C
+ {
+ static int dtorCount = 0;
+ this() nothrow {}
+ ~this() nothrow { dtorCount++; }
+ }
- // wrap unsafe _dup in @trusted to preserve @safe postblit
- static if (__traits(compiles, (T b) @safe { T a = b; }))
- return _trustedDup!(T, Unconst!T)(a);
- else
- return _dup!(T, Unconst!T)(a);
+ auto c = new C;
+ destroy(c);
+ assert(C.dtorCount == 1);
}
/// ditto
-// const overload to support implicit conversion to immutable (unique result, see DIP29)
-@property T[] dup(T)(const(T)[] a)
- if (is(const(T) : T))
+void destroy(bool initialize = true, T)(ref T obj)
+if (__traits(isStaticArray, T))
{
- // wrap unsafe _dup in @trusted to preserve @safe postblit
- static if (__traits(compiles, (T b) @safe { T a = b; }))
- return _trustedDup!(const(T), T)(a);
- else
- return _dup!(const(T), T)(a);
+ foreach_reverse (ref e; obj[])
+ destroy!initialize(e);
}
-
-/// Provide the .idup array property.
-@property immutable(T)[] idup(T)(T[] a)
+@safe unittest
{
- static assert(is(T : immutable(T)), "Cannot implicitly convert type "~T.stringof~
- " to immutable in idup.");
-
- // wrap unsafe _dup in @trusted to preserve @safe postblit
- static if (__traits(compiles, (T b) @safe { T a = b; }))
- return _trustedDup!(T, immutable(T))(a);
- else
- return _dup!(T, immutable(T))(a);
+ int[2] a;
+ a[0] = 1;
+ a[1] = 2;
+ destroy!false(a);
+ assert(a == [ 1, 2 ]);
+ destroy(a);
+ assert(a == [ 0, 0 ]);
}
-/// ditto
-@property immutable(T)[] idup(T:void)(const(T)[] a)
+@safe unittest
{
- return a.dup;
-}
+ static struct vec2f {
+ float[2] values;
+ alias values this;
+ }
-private U[] _trustedDup(T, U)(T[] a) @trusted
-{
- return _dup!(T, U)(a);
+ vec2f v;
+ destroy!(true, vec2f)(v);
}
-private U[] _dup(T, U)(T[] a) // pure nothrow depends on postblit
+@system unittest
{
- if (__ctfe)
+ // Bugzilla 15009
+ static string op;
+ static struct S
{
- static if (is(T : void))
- assert(0, "Cannot dup a void[] array at compile time.");
- else
- {
- U[] res;
- foreach (ref e; a)
- res ~= e;
- return res;
- }
+ int x;
+ this(int x) { op ~= "C" ~ cast(char)('0'+x); this.x = x; }
+ this(this) { op ~= "P" ~ cast(char)('0'+x); }
+ ~this() { op ~= "D" ~ cast(char)('0'+x); }
}
- import core.stdc.string : memcpy;
-
- void[] arr = _d_newarrayU(typeid(T[]), a.length);
- memcpy(arr.ptr, cast(const(void)*)a.ptr, T.sizeof * a.length);
- auto res = *cast(U[]*)&arr;
-
- static if (!is(T : void))
- _doPostblit(res);
- return res;
-}
-
-private extern (C) void[] _d_newarrayU(const TypeInfo ti, size_t length) pure nothrow;
-
-
-/**************
- * Get the postblit for type T.
- * Returns:
- * null if no postblit is necessary
- * function pointer for struct postblits
- * delegate for class postblits
- */
-private auto _getPostblit(T)() @trusted pure nothrow @nogc
-{
- // infer static postblit type, run postblit if any
- static if (is(T == struct))
{
- import core.internal.traits : Unqual;
- // use typeid(Unqual!T) here to skip TypeInfo_Const/Shared/...
- alias _PostBlitType = typeof(function (ref T t){ T a = t; });
- return cast(_PostBlitType)typeid(Unqual!T).xpostblit;
+ S[2] a1 = [S(1), S(2)];
+ op = "";
}
- else if ((&typeid(T).postblit).funcptr !is &TypeInfo.postblit)
+ assert(op == "D2D1"); // built-in scope destruction
{
- alias _PostBlitType = typeof(delegate (ref T t){ T a = t; });
- return cast(_PostBlitType)&typeid(T).postblit;
+ S[2] a1 = [S(1), S(2)];
+ op = "";
+ destroy(a1);
+ assert(op == "D2D1"); // consistent with built-in behavior
}
- else
- return null;
-}
-private void _doPostblit(T)(T[] arr)
-{
- // infer static postblit type, run postblit if any
- if (auto postblit = _getPostblit!T())
{
- foreach (ref elem; arr)
- postblit(elem);
+ S[2][2] a2 = [[S(1), S(2)], [S(3), S(4)]];
+ op = "";
+ }
+ assert(op == "D4D3D2D1");
+ {
+ S[2][2] a2 = [[S(1), S(2)], [S(3), S(4)]];
+ op = "";
+ destroy(a2);
+ assert(op == "D4D3D2D1", op);
}
}
-unittest
+// https://issues.dlang.org/show_bug.cgi?id=19218
+@system unittest
{
- static struct S1 { int* p; }
- static struct S2 { @disable this(); }
- static struct S3 { @disable this(this); }
-
- int dg1() pure nothrow @safe
+ static struct S
{
- {
- char[] m;
- string i;
- m = m.dup;
- i = i.idup;
- m = i.dup;
- i = m.idup;
- }
- {
- S1[] m;
- immutable(S1)[] i;
- m = m.dup;
- i = i.idup;
- static assert(!is(typeof(m.idup)));
- static assert(!is(typeof(i.dup)));
- }
- {
- S3[] m;
- immutable(S3)[] i;
- static assert(!is(typeof(m.dup)));
- static assert(!is(typeof(i.idup)));
- }
- {
- shared(S1)[] m;
- m = m.dup;
- static assert(!is(typeof(m.idup)));
- }
- {
- int[] a = (inout(int)) { inout(const(int))[] a; return a.dup; }(0);
- }
- return 1;
+ static dtorCount = 0;
+ ~this() { ++dtorCount; }
}
- int dg2() pure nothrow @safe
+ static interface I
{
- {
- S2[] m = [S2.init, S2.init];
- immutable(S2)[] i = [S2.init, S2.init];
- m = m.dup;
- m = i.dup;
- i = m.idup;
- i = i.idup;
- }
- return 2;
+ ref S[3] getArray();
+ alias getArray this;
}
- enum a = dg1();
- enum b = dg2();
- assert(dg1() == a);
- assert(dg2() == b);
-}
+ static class C : I
+ {
+ static dtorCount = 0;
+ ~this() { ++dtorCount; }
-unittest
-{
- static struct Sunpure { this(this) @safe nothrow {} }
- static struct Sthrow { this(this) @safe pure {} }
- static struct Sunsafe { this(this) @system pure nothrow {} }
+ S[3] a;
+ alias a this;
- static assert( __traits(compiles, () { [].dup!Sunpure; }));
- static assert(!__traits(compiles, () pure { [].dup!Sunpure; }));
- static assert( __traits(compiles, () { [].dup!Sthrow; }));
- static assert(!__traits(compiles, () nothrow { [].dup!Sthrow; }));
- static assert( __traits(compiles, () { [].dup!Sunsafe; }));
- static assert(!__traits(compiles, () @safe { [].dup!Sunsafe; }));
+ ref S[3] getArray() { return a; }
+ }
- static assert( __traits(compiles, () { [].idup!Sunpure; }));
- static assert(!__traits(compiles, () pure { [].idup!Sunpure; }));
- static assert( __traits(compiles, () { [].idup!Sthrow; }));
- static assert(!__traits(compiles, () nothrow { [].idup!Sthrow; }));
- static assert( __traits(compiles, () { [].idup!Sunsafe; }));
- static assert(!__traits(compiles, () @safe { [].idup!Sunsafe; }));
+ C c = new C();
+ destroy(c);
+ assert(S.dtorCount == 3);
+ assert(C.dtorCount == 1);
+
+ I i = new C();
+ destroy(i);
+ assert(S.dtorCount == 6);
+ assert(C.dtorCount == 2);
}
-unittest
+/// ditto
+void destroy(bool initialize = true, T)(ref T obj)
+ if (!is(T == struct) && !is(T == interface) && !is(T == class) && !__traits(isStaticArray, T))
{
- static int*[] pureFoo() pure { return null; }
- { char[] s; immutable x = s.dup; }
- { immutable x = (cast(int*[])null).dup; }
- { immutable x = pureFoo(); }
- { immutable x = pureFoo().dup; }
+ static if (initialize)
+ obj = T.init;
}
-unittest
+@safe unittest
{
- auto a = [1, 2, 3];
- auto b = a.dup;
- debug(SENTINEL) {} else
- assert(b.capacity >= 3);
+ {
+ int a = 42;
+ destroy!false(a);
+ assert(a == 42);
+ destroy(a);
+ assert(a == 0);
+ }
+ {
+ float a = 42;
+ destroy!false(a);
+ assert(a == 42);
+ destroy(a);
+ assert(a != a); // isnan
+ }
}
-unittest
+@safe unittest
{
- // Bugzilla 12580
- void[] m = [0];
- shared(void)[] s = [cast(shared)1];
- immutable(void)[] i = [cast(immutable)2];
+ // Bugzilla 14746
+ static struct HasDtor
+ {
+ ~this() { assert(0); }
+ }
+ static struct Owner
+ {
+ HasDtor* ptr;
+ alias ptr this;
+ }
- s = s.dup;
- static assert(is(typeof(s.dup) == shared(void)[]));
+ Owner o;
+ assert(o.ptr is null);
+ destroy(o); // must not reach in HasDtor.__dtor()
+}
- m = i.dup;
- i = m.dup;
- i = i.idup;
- i = m.idup;
- i = s.idup;
- i = s.dup;
- static assert(!__traits(compiles, m = s.dup));
+/* ************************************************************************
+ COMPILER SUPPORT
+The compiler lowers certain expressions to instantiations of the following
+templates. They must be implicitly imported, which is why they are here
+in this file. They must also be `public` as they must be visible from the
+scope in which they are instantiated. They are explicitly undocumented as
+they are only intended to be instantiated by the compiler, not the user.
+**************************************************************************/
+
+public import core.internal.entrypoint : _d_cmain;
+
+public import core.internal.array.appending : _d_arrayappendTImpl;
+public import core.internal.array.appending : _d_arrayappendcTXImpl;
+public import core.internal.array.comparison : __cmp;
+public import core.internal.array.equality : __equals;
+public import core.internal.array.casting: __ArrayCast;
+public import core.internal.array.concatenation : _d_arraycatnTXImpl;
+public import core.internal.array.construction : _d_arrayctor;
+public import core.internal.array.construction : _d_arraysetctor;
+public import core.internal.array.capacity: _d_arraysetlengthTImpl;
+
+public import core.internal.dassert: _d_assert_fail;
+
+public import core.internal.destruction: __ArrayDtor;
+
+public import core.internal.moving: __move_post_blt;
+
+public import core.internal.postblit: __ArrayPostblit;
+
+public import core.internal.switch_: __switch;
+public import core.internal.switch_: __switch_error;
+
+public @trusted @nogc nothrow pure extern (C) void _d_delThrowable(scope Throwable);
+
+// Compare class and interface objects for ordering.
+private int __cmp(Obj)(Obj lhs, Obj rhs)
+if (is(Obj : Object))
+{
+ if (lhs is rhs)
+ return 0;
+ // Regard null references as always being "less than"
+ if (!lhs)
+ return -1;
+ if (!rhs)
+ return 1;
+ return lhs.opCmp(rhs);
}
-unittest
+// objects
+@safe unittest
{
- // Bugzilla 13809
- static struct S
+ class C
{
- this(this) {}
- ~this() {}
+ int i;
+ this(int i) { this.i = i; }
+
+ override int opCmp(Object c) const @safe
+ {
+ return i - (cast(C)c).i;
+ }
}
- S[] arr;
- auto a = arr.dup;
+ auto c1 = new C(1);
+ auto c2 = new C(2);
+ assert(__cmp(c1, null) > 0);
+ assert(__cmp(null, c1) < 0);
+ assert(__cmp(c1, c1) == 0);
+ assert(__cmp(c1, c2) < 0);
+ assert(__cmp(c2, c1) > 0);
+
+ assert(__cmp([c1, c1][], [c2, c2][]) < 0);
+ assert(__cmp([c2, c2], [c1, c1]) > 0);
}
-unittest
+// structs
+@safe unittest
{
- // Bugzilla 16504
- static struct S
+ struct C
{
- __gshared int* gp;
- int* p;
- // postblit and hence .dup could escape
- this(this) { gp = p; }
+ ubyte i;
+ this(ubyte i) { this.i = i; }
}
- int p;
- scope arr = [S(&p)];
- auto a = arr.dup; // dup does escape
+ auto c1 = C(1);
+ auto c2 = C(2);
+
+ assert(__cmp([c1, c1][], [c2, c2][]) < 0);
+ assert(__cmp([c2, c2], [c1, c1]) > 0);
+ assert(__cmp([c2, c2], [c2, c1]) > 0);
}
-// compiler frontend lowers dynamic array comparison to this
-bool __ArrayEq(T1, T2)(T1[] a, T2[] b)
+@safe unittest
{
- if (a.length != b.length)
- return false;
- foreach (size_t i; 0 .. a.length)
- {
- if (a[i] != b[i])
- return false;
- }
- return true;
+ auto a = "hello"c;
+
+ assert(a > "hel");
+ assert(a >= "hel");
+ assert(a < "helloo");
+ assert(a <= "helloo");
+ assert(a > "betty");
+ assert(a >= "betty");
+ assert(a == "hello");
+ assert(a <= "hello");
+ assert(a >= "hello");
+ assert(a < "я");
}
-// compiler frontend lowers struct array postblitting to this
-void __ArrayPostblit(T)(T[] a)
+// Used in Exception Handling LSDA tables to 'wrap' C++ type info
+// so it can be distinguished from D TypeInfo
+class __cpp_type_info_ptr
{
- foreach (ref T e; a)
- e.__xpostblit();
+ void* ptr; // opaque pointer to C++ RTTI type info
}
-// compiler frontend lowers dynamic array deconstruction to this
-void __ArrayDtor(T)(T[] a)
+// Compiler hook into the runtime implementation of array (vector) operations.
+template _arrayOp(Args...)
{
- foreach_reverse (ref T e; a)
- e.__xdtor();
+ import core.internal.array.operations;
+ alias _arrayOp = arrayOp!Args;
}
+
+public import core.builtins : __ctfeWrite;
diff --git a/libphobos/libdruntime/rt/aApply.d b/libphobos/libdruntime/rt/aApply.d
index f665702..bea441f 100644
--- a/libphobos/libdruntime/rt/aApply.d
+++ b/libphobos/libdruntime/rt/aApply.d
@@ -4,13 +4,13 @@
* of those.
*
* Copyright: Copyright Digital Mars 2004 - 2010.
- * License: $(WEB www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
+ * License: $(HTTP www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
* Authors: Walter Bright
- * Source: $(DRUNTIMESRC src/rt/_aApply.d)
+ * Source: $(DRUNTIMESRC rt/_aApply.d)
*/
module rt.aApply;
-private import rt.util.utf : decode, toUTF8;
+import core.internal.utf : decode, toUTF8;
/**********************************************/
/* 1 argument versions */
diff --git a/libphobos/libdruntime/rt/aApplyR.d b/libphobos/libdruntime/rt/aApplyR.d
index b29d370..6db6530 100644
--- a/libphobos/libdruntime/rt/aApplyR.d
+++ b/libphobos/libdruntime/rt/aApplyR.d
@@ -4,8 +4,9 @@
* of those.
*
* Copyright: Copyright Digital Mars 2004 - 2010.
- * License: $(WEB www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
+ * License: $(HTTP www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
* Authors: Walter Bright, Sean Kelly
+ * Source: $(DRUNTIMESRC rt/_aApplyR.d)
*/
/* Copyright Digital Mars 2004 - 2010.
@@ -20,7 +21,7 @@ module rt.aApplyR;
* and dchar, and 2 of each of those.
*/
-private import rt.util.utf;
+import core.internal.utf;
/**********************************************/
/* 1 argument versions */
diff --git a/libphobos/libdruntime/rt/aaA.d b/libphobos/libdruntime/rt/aaA.d
index 0ccf902..0181053 100644
--- a/libphobos/libdruntime/rt/aaA.d
+++ b/libphobos/libdruntime/rt/aaA.d
@@ -2,8 +2,9 @@
* Implementation of associative arrays.
*
* Copyright: Copyright Digital Mars 2000 - 2015.
- * License: $(WEB www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
+ * License: $(HTTP www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
* Authors: Martin Nowak
+ * Source: $(DRUNTIMESRC rt/_aaA.d)
*/
module rt.aaA;
@@ -11,6 +12,7 @@ module rt.aaA;
extern (C) immutable int _aaVersion = 1;
import core.memory : GC;
+import core.internal.util.math : min, max;
// grow threshold
private enum GROW_NUM = 4;
@@ -48,13 +50,12 @@ struct AA
private struct Impl
{
private:
- this(in TypeInfo_AssociativeArray ti, size_t sz = INIT_NUM_BUCKETS)
+ this(scope const TypeInfo_AssociativeArray ti, size_t sz = INIT_NUM_BUCKETS)
{
keysz = cast(uint) ti.key.tsize;
valsz = cast(uint) ti.value.tsize;
buckets = allocBuckets(sz);
firstUsed = cast(uint) buckets.length;
- entryTI = fakeEntryTI(ti.key, ti.value);
valoff = cast(uint) talign(keysz, ti.value.talign);
import rt.lifetime : hasPostblit, unqualify;
@@ -63,6 +64,8 @@ private:
flags |= Flags.keyHasPostblit;
if ((ti.key.flags | ti.value.flags) & 1)
flags |= Flags.hasPointers;
+
+ entryTI = fakeEntryTI(this, ti.key, ti.value);
}
Bucket[] buckets;
@@ -110,7 +113,7 @@ private:
}
// lookup a key
- inout(Bucket)* findSlotLookup(size_t hash, in void* pkey, in TypeInfo keyti) inout
+ inout(Bucket)* findSlotLookup(size_t hash, scope const void* pkey, scope const TypeInfo keyti) inout
{
for (size_t i = hash & mask, j = 1;; ++j)
{
@@ -122,7 +125,7 @@ private:
}
}
- void grow(in TypeInfo keyti)
+ void grow(scope const TypeInfo keyti)
{
// If there are so many deleted entries, that growing would push us
// below the shrink threshold, we just purge deleted entries instead.
@@ -132,7 +135,7 @@ private:
resize(GROW_FAC * dim);
}
- void shrink(in TypeInfo keyti)
+ void shrink(scope const TypeInfo keyti)
{
if (dim > INIT_NUM_BUCKETS)
resize(dim / GROW_FAC);
@@ -200,7 +203,7 @@ Bucket[] allocBuckets(size_t dim) @trusted pure nothrow
// Entry
//------------------------------------------------------------------------------
-private void* allocEntry(in Impl* aa, in void* pkey)
+private void* allocEntry(scope const Impl* aa, scope const void* pkey)
{
import rt.lifetime : _d_newitemU;
import core.stdc.string : memcpy, memset;
@@ -243,19 +246,45 @@ private bool hasDtor(const TypeInfo ti)
return false;
}
+private immutable(void)* getRTInfo(const TypeInfo ti)
+{
+ // classes are references
+ const isNoClass = ti && typeid(ti) !is typeid(TypeInfo_Class);
+ return isNoClass ? ti.rtInfo() : rtinfoHasPointers;
+}
+
// build type info for Entry with additional key and value fields
-TypeInfo_Struct fakeEntryTI(const TypeInfo keyti, const TypeInfo valti)
+TypeInfo_Struct fakeEntryTI(ref Impl aa, const TypeInfo keyti, const TypeInfo valti)
{
import rt.lifetime : unqualify;
auto kti = unqualify(keyti);
auto vti = unqualify(valti);
- if (!hasDtor(kti) && !hasDtor(vti))
+
+ // figure out whether RTInfo has to be generated (indicated by rtisize > 0)
+ enum pointersPerWord = 8 * (void*).sizeof * (void*).sizeof;
+ auto rtinfo = rtinfoNoPointers;
+ size_t rtisize = 0;
+ immutable(size_t)* keyinfo = void;
+ immutable(size_t)* valinfo = void;
+ if (aa.flags & Impl.Flags.hasPointers)
+ {
+ // classes are references
+ keyinfo = cast(immutable(size_t)*) getRTInfo(keyti);
+ valinfo = cast(immutable(size_t)*) getRTInfo(valti);
+
+ if (keyinfo is rtinfoHasPointers && valinfo is rtinfoHasPointers)
+ rtinfo = rtinfoHasPointers;
+ else
+ rtisize = 1 + (aa.valoff + aa.valsz + pointersPerWord - 1) / pointersPerWord;
+ }
+ bool entryHasDtor = hasDtor(kti) || hasDtor(vti);
+ if (rtisize == 0 && !entryHasDtor)
return null;
// save kti and vti after type info for struct
enum sizeti = __traits(classInstanceSize, TypeInfo_Struct);
- void* p = GC.malloc(sizeti + 2 * (void*).sizeof);
+ void* p = GC.malloc(sizeti + (2 + rtisize) * (void*).sizeof);
import core.stdc.string : memcpy;
memcpy(p, typeid(TypeInfo_Struct).initializer().ptr, sizeti);
@@ -265,26 +294,146 @@ TypeInfo_Struct fakeEntryTI(const TypeInfo keyti, const TypeInfo valti)
extra[0] = cast() kti;
extra[1] = cast() vti;
- static immutable tiName = __MODULE__ ~ ".Entry!(...)";
- ti.name = tiName;
+ static immutable tiMangledName = "S2rt3aaA__T5EntryZ";
+ ti.mangledName = tiMangledName;
+
+ ti.m_RTInfo = rtisize > 0 ? rtinfoEntry(aa, keyinfo, valinfo, cast(size_t*)(extra + 2), rtisize) : rtinfo;
+ ti.m_flags = ti.m_RTInfo is rtinfoNoPointers ? cast(TypeInfo_Struct.StructFlags)0 : TypeInfo_Struct.StructFlags.hasPointers;
// we don't expect the Entry objects to be used outside of this module, so we have control
// over the non-usage of the callback methods and other entries and can keep these null
// xtoHash, xopEquals, xopCmp, xtoString and xpostblit
- ti.m_RTInfo = rtinfoNoPointers;
- immutable entrySize = talign(kti.tsize, vti.talign) + vti.tsize;
+ immutable entrySize = aa.valoff + aa.valsz;
ti.m_init = (cast(ubyte*) null)[0 .. entrySize]; // init length, but not ptr
- // xdtor needs to be built from the dtors of key and value for the GC
- ti.xdtorti = &entryDtor;
+ if (entryHasDtor)
+ {
+ // xdtor needs to be built from the dtors of key and value for the GC
+ ti.xdtorti = &entryDtor;
+ ti.m_flags |= TypeInfo_Struct.StructFlags.isDynamicType;
+ }
- ti.m_flags = TypeInfo_Struct.StructFlags.isDynamicType;
- ti.m_flags |= (keyti.flags | valti.flags) & TypeInfo_Struct.StructFlags.hasPointers;
ti.m_align = cast(uint) max(kti.talign, vti.talign);
return ti;
}
+// build appropriate RTInfo at runtime
+immutable(void)* rtinfoEntry(ref Impl aa, immutable(size_t)* keyinfo, immutable(size_t)* valinfo, size_t* rtinfoData, size_t rtinfoSize)
+{
+ enum bitsPerWord = 8 * size_t.sizeof;
+
+ rtinfoData[0] = aa.valoff + aa.valsz;
+ rtinfoData[1..rtinfoSize] = 0;
+
+ void copyKeyInfo(string src)()
+ {
+ size_t pos = 1;
+ size_t keybits = aa.keysz / (void*).sizeof;
+ while (keybits >= bitsPerWord)
+ {
+ rtinfoData[pos] = mixin(src);
+ keybits -= bitsPerWord;
+ pos++;
+ }
+ if (keybits > 0)
+ rtinfoData[pos] = mixin(src) & ((cast(size_t) 1 << keybits) - 1);
+ }
+
+ if (keyinfo is rtinfoHasPointers)
+ copyKeyInfo!"~cast(size_t) 0"();
+ else if (keyinfo !is rtinfoNoPointers)
+ copyKeyInfo!"keyinfo[pos]"();
+
+ void copyValInfo(string src)()
+ {
+ size_t bitpos = aa.valoff / (void*).sizeof;
+ size_t pos = 1;
+ size_t dstpos = 1 + bitpos / bitsPerWord;
+ size_t begoff = bitpos % bitsPerWord;
+ size_t valbits = aa.valsz / (void*).sizeof;
+ size_t endoff = (bitpos + valbits) % bitsPerWord;
+ for (;;)
+ {
+ const bits = bitsPerWord - begoff;
+ size_t s = mixin(src);
+ rtinfoData[dstpos] |= s << begoff;
+ if (begoff > 0 && valbits > bits)
+ rtinfoData[dstpos+1] |= s >> bits;
+ if (valbits < bitsPerWord)
+ break;
+ valbits -= bitsPerWord;
+ dstpos++;
+ pos++;
+ }
+ if (endoff > 0)
+ rtinfoData[dstpos] &= ((cast(size_t) 1 << endoff) - 1);
+ }
+
+ if (valinfo is rtinfoHasPointers)
+ copyValInfo!"~cast(size_t) 0"();
+ else if (valinfo !is rtinfoNoPointers)
+ copyValInfo!"valinfo[pos]"();
+
+ return cast(immutable(void)*) rtinfoData;
+}
+
+unittest
+{
+ void test(K, V)()
+ {
+ static struct Entry
+ {
+ K key;
+ V val;
+ }
+ auto keyti = typeid(K);
+ auto valti = typeid(V);
+ auto valrti = getRTInfo(valti);
+ auto keyrti = getRTInfo(keyti);
+
+ auto impl = new Impl(typeid(V[K]));
+ if (valrti is rtinfoNoPointers && keyrti is rtinfoNoPointers)
+ {
+ assert(!(impl.flags & Impl.Flags.hasPointers));
+ assert(impl.entryTI is null);
+ }
+ else if (valrti is rtinfoHasPointers && keyrti is rtinfoHasPointers)
+ {
+ assert(impl.flags & Impl.Flags.hasPointers);
+ assert(impl.entryTI is null);
+ }
+ else
+ {
+ auto rtInfo = cast(size_t*) impl.entryTI.rtInfo();
+ auto refInfo = cast(size_t*) typeid(Entry).rtInfo();
+ assert(rtInfo[0] == refInfo[0]); // size
+ enum bytesPerWord = 8 * size_t.sizeof * (void*).sizeof;
+ size_t words = (rtInfo[0] + bytesPerWord - 1) / bytesPerWord;
+ foreach (i; 0 .. words)
+ assert(rtInfo[1 + i] == refInfo[i + 1]);
+ }
+ }
+ test!(long, int)();
+ test!(string, string);
+ test!(ubyte[16], Object);
+
+ static struct Small
+ {
+ ubyte[16] guid;
+ string name;
+ }
+ test!(string, Small);
+
+ static struct Large
+ {
+ ubyte[1024] data;
+ string[412] names;
+ ubyte[1024] moredata;
+ }
+ test!(Large, Large);
+}
+
//==============================================================================
// Helper functions
//------------------------------------------------------------------------------
@@ -307,14 +456,14 @@ private size_t mix(size_t h) @safe pure nothrow @nogc
return h;
}
-private size_t calcHash(in void* pkey, in TypeInfo keyti)
+private size_t calcHash(scope const void* pkey, scope const TypeInfo keyti)
{
immutable hash = keyti.getHash(pkey);
// highest bit is set to distinguish empty/deleted from filled buckets
return mix(hash) | HASH_FILLED_MARK;
}
-private size_t nextpow2(in size_t n) pure nothrow @nogc
+private size_t nextpow2(const size_t n) pure nothrow @nogc
{
import core.bitop : bsr;
@@ -332,22 +481,12 @@ pure nothrow @nogc unittest
assert(nextpow2(n) == pow2);
}
-private T min(T)(T a, T b) pure nothrow @nogc
-{
- return a < b ? a : b;
-}
-
-private T max(T)(T a, T b) pure nothrow @nogc
-{
- return b < a ? a : b;
-}
-
//==============================================================================
// API Implementation
//------------------------------------------------------------------------------
/// Determine number of entries in associative array.
-extern (C) size_t _aaLen(in AA aa) pure nothrow @nogc
+extern (C) size_t _aaLen(scope const AA aa) pure nothrow @nogc
{
return aa ? aa.length : 0;
}
@@ -356,7 +495,7 @@ extern (C) size_t _aaLen(in AA aa) pure nothrow @nogc
* Lookup *pkey in aa.
* Called only from implementation of (aa[key]) expressions when value is mutable.
* Params:
- * aa = associative array opaque pointer
+ * paa = associative array opaque pointer
* ti = TypeInfo for the associative array
* valsz = ignored
* pkey = pointer to the key value
@@ -365,18 +504,18 @@ extern (C) size_t _aaLen(in AA aa) pure nothrow @nogc
* If key was not in the aa, a mutable pointer to newly inserted value which
* is set to all zeros
*/
-extern (C) void* _aaGetY(AA* aa, const TypeInfo_AssociativeArray ti,
- in size_t valsz, in void* pkey)
+extern (C) void* _aaGetY(AA* paa, const TypeInfo_AssociativeArray ti,
+ const size_t valsz, scope const void* pkey)
{
bool found;
- return _aaGetX(aa, ti, valsz, pkey, found);
+ return _aaGetX(paa, ti, valsz, pkey, found);
}
/******************************
* Lookup *pkey in aa.
* Called only from implementation of require
* Params:
- * aa = associative array opaque pointer
+ * paa = associative array opaque pointer
* ti = TypeInfo for the associative array
* valsz = ignored
* pkey = pointer to the key value
@@ -386,12 +525,16 @@ extern (C) void* _aaGetY(AA* aa, const TypeInfo_AssociativeArray ti,
* If key was not in the aa, a mutable pointer to newly inserted value which
* is set to all zeros
*/
-extern (C) void* _aaGetX(AA* aa, const TypeInfo_AssociativeArray ti,
- in size_t valsz, in void* pkey, out bool found)
+extern (C) void* _aaGetX(AA* paa, const TypeInfo_AssociativeArray ti,
+ const size_t valsz, scope const void* pkey, out bool found)
{
// lazily alloc implementation
- if (aa.impl is null)
- aa.impl = new Impl(ti);
+ AA aa = *paa;
+ if (aa is null)
+ {
+ aa = new Impl(ti);
+ *paa = aa;
+ }
// get hash and bucket for key
immutable hash = calcHash(pkey, ti.key);
@@ -417,7 +560,7 @@ extern (C) void* _aaGetX(AA* aa, const TypeInfo_AssociativeArray ti,
// update search cache and allocate entry
aa.firstUsed = min(aa.firstUsed, cast(uint)(p - aa.buckets.ptr));
p.hash = hash;
- p.entry = allocEntry(aa.impl, pkey);
+ p.entry = allocEntry(aa, pkey);
// postblit for key
if (aa.flags & Impl.Flags.keyHasPostblit)
{
@@ -440,8 +583,8 @@ extern (C) void* _aaGetX(AA* aa, const TypeInfo_AssociativeArray ti,
* Returns:
* pointer to value if present, null otherwise
*/
-extern (C) inout(void)* _aaGetRvalueX(inout AA aa, in TypeInfo keyti, in size_t valsz,
- in void* pkey)
+extern (C) inout(void)* _aaGetRvalueX(inout AA aa, scope const TypeInfo keyti, const size_t valsz,
+ scope const void* pkey)
{
return _aaInX(aa, keyti, pkey);
}
@@ -456,7 +599,7 @@ extern (C) inout(void)* _aaGetRvalueX(inout AA aa, in TypeInfo keyti, in size_t
* Returns:
* pointer to value if present, null otherwise
*/
-extern (C) inout(void)* _aaInX(inout AA aa, in TypeInfo keyti, in void* pkey)
+extern (C) inout(void)* _aaInX(inout AA aa, scope const TypeInfo keyti, scope const void* pkey)
{
if (aa.empty)
return null;
@@ -467,8 +610,8 @@ extern (C) inout(void)* _aaInX(inout AA aa, in TypeInfo keyti, in void* pkey)
return null;
}
-/// Delete entry in AA, return true if it was present
-extern (C) bool _aaDelX(AA aa, in TypeInfo keyti, in void* pkey)
+/// Delete entry scope const AA, return true if it was present
+extern (C) bool _aaDelX(AA aa, scope const TypeInfo keyti, scope const void* pkey)
{
if (aa.empty)
return false;
@@ -481,7 +624,9 @@ extern (C) bool _aaDelX(AA aa, in TypeInfo keyti, in void* pkey)
p.entry = null;
++aa.deleted;
- if (aa.length * SHRINK_DEN < aa.dim * SHRINK_NUM)
+ // `shrink` reallocates, and allocating from a finalizer leads to
+ // InvalidMemoryError: https://issues.dlang.org/show_bug.cgi?id=21442
+ if (aa.length * SHRINK_DEN < aa.dim * SHRINK_NUM && !GC.inFinalizer())
aa.shrink(keyti);
return true;
@@ -494,20 +639,21 @@ extern (C) void _aaClear(AA aa) pure nothrow
{
if (!aa.empty)
{
- aa.impl.clear();
+ aa.clear();
}
}
/// Rehash AA
-extern (C) void* _aaRehash(AA* paa, in TypeInfo keyti) pure nothrow
+extern (C) void* _aaRehash(AA* paa, scope const TypeInfo keyti) pure nothrow
{
- if (!paa.empty)
- paa.resize(nextpow2(INIT_DEN * paa.length / INIT_NUM));
- return *paa;
+ AA aa = *paa;
+ if (!aa.empty)
+ aa.resize(nextpow2(INIT_DEN * aa.length / INIT_NUM));
+ return aa;
}
/// Return a GC allocated array of all values
-extern (C) inout(void[]) _aaValues(inout AA aa, in size_t keysz, in size_t valsz,
+extern (C) inout(void[]) _aaValues(inout AA aa, const size_t keysz, const size_t valsz,
const TypeInfo tiValueArray) pure nothrow
{
if (aa.empty)
@@ -531,7 +677,7 @@ extern (C) inout(void[]) _aaValues(inout AA aa, in size_t keysz, in size_t valsz
}
/// Return a GC allocated array of all keys
-extern (C) inout(void[]) _aaKeys(inout AA aa, in size_t keysz, const TypeInfo tiKeyArray) pure nothrow
+extern (C) inout(void[]) _aaKeys(inout AA aa, const size_t keysz, const TypeInfo tiKeyArray) pure nothrow
{
if (aa.empty)
return null;
@@ -557,7 +703,7 @@ extern (D) alias dg_t = int delegate(void*);
extern (D) alias dg2_t = int delegate(void*, void*);
/// foreach opApply over all values
-extern (C) int _aaApply(AA aa, in size_t keysz, dg_t dg)
+extern (C) int _aaApply(AA aa, const size_t keysz, dg_t dg)
{
if (aa.empty)
return 0;
@@ -574,7 +720,7 @@ extern (C) int _aaApply(AA aa, in size_t keysz, dg_t dg)
}
/// foreach opApply over all key/value pairs
-extern (C) int _aaApply2(AA aa, in size_t keysz, dg2_t dg)
+extern (C) int _aaApply2(AA aa, const size_t keysz, dg2_t dg)
{
if (aa.empty)
return 0;
@@ -639,9 +785,9 @@ extern (C) Impl* _d_assocarrayliteralTX(const TypeInfo_AssociativeArray ti, void
}
/// compares 2 AAs for equality
-extern (C) int _aaEqual(in TypeInfo tiRaw, in AA aa1, in AA aa2)
+extern (C) int _aaEqual(scope const TypeInfo tiRaw, scope const AA aa1, scope const AA aa2)
{
- if (aa1.impl is aa2.impl)
+ if (aa1 is aa2)
return true;
immutable len = _aaLen(aa1);
@@ -669,8 +815,10 @@ extern (C) int _aaEqual(in TypeInfo tiRaw, in AA aa1, in AA aa2)
}
/// compute a hash
-extern (C) hash_t _aaGetHash(in AA* aa, in TypeInfo tiRaw) nothrow
+extern (C) hash_t _aaGetHash(scope const AA* paa, scope const TypeInfo tiRaw) nothrow
{
+ const AA aa = *paa;
+
if (aa.empty)
return 0;
@@ -707,7 +855,7 @@ struct Range
extern (C) pure nothrow @nogc @safe
{
- Range _aaRange(AA aa)
+ Range _aaRange(return AA aa)
{
if (!aa)
return Range();
@@ -715,7 +863,7 @@ extern (C) pure nothrow @nogc @safe
foreach (i; aa.firstUsed .. aa.dim)
{
if (aa.buckets[i].filled)
- return Range(aa.impl, i);
+ return Range(aa, i);
}
return Range(aa, aa.dim);
}
@@ -756,7 +904,7 @@ extern (C) pure nothrow @nogc @safe
}
}
-// Most tests are now in in test_aa.d
+// Most tests are now in test_aa.d
// test postblit for AA literals
unittest
diff --git a/libphobos/libdruntime/rt/adi.d b/libphobos/libdruntime/rt/adi.d
index 44f0e15..ea5a78f 100644
--- a/libphobos/libdruntime/rt/adi.d
+++ b/libphobos/libdruntime/rt/adi.d
@@ -6,7 +6,7 @@
* $(LINK2 http://www.boost.org/LICENSE_1_0.txt, Boost Software License 1.0).
* (See accompanying file LICENSE)
* Authors: Walter Bright
- * Source: $(DRUNTIMESRC src/rt/_adi.d)
+ * Source: $(DRUNTIMESRC rt/_adi.d)
*/
module rt.adi;
@@ -16,66 +16,6 @@ module rt.adi;
private
{
debug(adi) import core.stdc.stdio;
- import core.stdc.string;
- import core.stdc.stdlib;
- import core.memory;
- import rt.util.utf;
-
- extern (C) void[] _adSort(void[] a, TypeInfo ti);
-}
-
-private dchar[] mallocUTF32(C)(in C[] s)
-{
- size_t j = 0;
- auto p = cast(dchar*)malloc(dchar.sizeof * s.length);
- auto r = p[0..s.length]; // r[] will never be longer than s[]
- foreach (dchar c; s)
- r[j++] = c;
- return r[0 .. j];
-}
-
-/**********************************************
- * Sort array of chars.
- */
-
-extern (C) char[] _adSortChar(char[] a)
-{
- if (a.length > 1)
- {
- auto da = mallocUTF32(a);
- _adSort(*cast(void[]*)&da, typeid(da[0]));
- size_t i = 0;
- foreach (dchar d; da)
- { char[4] buf;
- auto t = toUTF8(buf, d);
- a[i .. i + t.length] = t[];
- i += t.length;
- }
- free(da.ptr);
- }
- return a;
-}
-
-/**********************************************
- * Sort array of wchars.
- */
-
-extern (C) wchar[] _adSortWchar(wchar[] a)
-{
- if (a.length > 1)
- {
- auto da = mallocUTF32(a);
- _adSort(*cast(void[]*)&da, typeid(da[0]));
- size_t i = 0;
- foreach (dchar d; da)
- { wchar[2] buf;
- auto t = toUTF16(buf, d);
- a[i .. i + t.length] = t[];
- i += t.length;
- }
- free(da.ptr);
- }
- return a;
}
/***************************************
@@ -85,27 +25,6 @@ extern (C) wchar[] _adSortWchar(wchar[] a)
* 0 not equal
*/
-extern (C) int _adEq(void[] a1, void[] a2, TypeInfo ti)
-{
- debug(adi) printf("_adEq(a1.length = %d, a2.length = %d)\n", a1.length, a2.length);
- if (a1.length != a2.length)
- return 0; // not equal
- auto sz = ti.tsize;
- auto p1 = a1.ptr;
- auto p2 = a2.ptr;
-
- if (sz == 1)
- // We should really have a ti.isPOD() check for this
- return (memcmp(p1, p2, a1.length) == 0);
-
- for (size_t i = 0; i < a1.length; i++)
- {
- if (!ti.equals(p1 + i * sz, p2 + i * sz))
- return 0; // not equal
- }
- return 1; // equal
-}
-
extern (C) int _adEq2(void[] a1, void[] a2, TypeInfo ti)
{
debug(adi) printf("_adEq2(a1.length = %d, a2.length = %d)\n", a1.length, a2.length);
@@ -115,218 +34,43 @@ extern (C) int _adEq2(void[] a1, void[] a2, TypeInfo ti)
return 0;
return 1;
}
-unittest
+
+@safe unittest
{
debug(adi) printf("array.Eq unittest\n");
- auto a = "hello"c;
+ struct S(T) { T val; }
+ alias String = S!string;
+ alias Float = S!float;
+
+ String[1] a = [String("hello"c)];
- assert(a != "hel");
- assert(a != "helloo");
- assert(a != "betty");
- assert(a == "hello");
- assert(a != "hxxxx");
+ assert(a != [String("hel")]);
+ assert(a != [String("helloo")]);
+ assert(a != [String("betty")]);
+ assert(a == [String("hello")]);
+ assert(a != [String("hxxxx")]);
- float[] fa = [float.nan];
+ Float[1] fa = [Float(float.nan)];
assert(fa != fa);
}
-/***************************************
- * Support for array compare test.
- */
-
-extern (C) int _adCmp(void[] a1, void[] a2, TypeInfo ti)
+unittest
{
- debug(adi) printf("adCmp()\n");
- auto len = a1.length;
- if (a2.length < len)
- len = a2.length;
- auto sz = ti.tsize;
- void *p1 = a1.ptr;
- void *p2 = a2.ptr;
+ debug(adi) printf("struct.Eq unittest\n");
- if (sz == 1)
- { // We should really have a ti.isPOD() check for this
- auto c = memcmp(p1, p2, len);
- if (c)
- return c;
- }
- else
+ static struct TestStruct
{
- for (size_t i = 0; i < len; i++)
+ int value;
+
+ bool opEquals(const TestStruct rhs) const
{
- auto c = ti.compare(p1 + i * sz, p2 + i * sz);
- if (c)
- return c;
+ return value == rhs.value;
}
}
- if (a1.length == a2.length)
- return 0;
- return (a1.length > a2.length) ? 1 : -1;
-}
-
-extern (C) int _adCmp2(void[] a1, void[] a2, TypeInfo ti)
-{
- debug(adi) printf("_adCmp2(a1.length = %d, a2.length = %d)\n", a1.length, a2.length);
- return ti.compare(&a1, &a2);
-}
-unittest
-{
- debug(adi) printf("array.Cmp unittest\n");
-
- auto a = "hello"c;
-
- assert(a > "hel");
- assert(a >= "hel");
- assert(a < "helloo");
- assert(a <= "helloo");
- assert(a > "betty");
- assert(a >= "betty");
- assert(a == "hello");
- assert(a <= "hello");
- assert(a >= "hello");
- assert(a < "я");
-}
-
-/***************************************
- * Support for array compare test.
- */
-
-extern (C) int _adCmpChar(void[] a1, void[] a2)
-{
- version (D_InlineAsm_X86)
- {
- asm
- { naked ;
-
- push EDI ;
- push ESI ;
-
- mov ESI,a1+4[4+ESP] ;
- mov EDI,a2+4[4+ESP] ;
-
- mov ECX,a1[4+ESP] ;
- mov EDX,a2[4+ESP] ;
-
- cmp ECX,EDX ;
- jb GotLength ;
-
- mov ECX,EDX ;
-
-GotLength:
- cmp ECX,4 ;
- jb DoBytes ;
-
- // Do alignment if neither is dword aligned
- test ESI,3 ;
- jz Aligned ;
-
- test EDI,3 ;
- jz Aligned ;
-DoAlign:
- mov AL,[ESI] ; //align ESI to dword bounds
- mov DL,[EDI] ;
-
- cmp AL,DL ;
- jnz Unequal ;
-
- inc ESI ;
- inc EDI ;
-
- test ESI,3 ;
-
- lea ECX,[ECX-1] ;
- jnz DoAlign ;
-Aligned:
- mov EAX,ECX ;
-
- // do multiple of 4 bytes at a time
-
- shr ECX,2 ;
- jz TryOdd ;
-
- repe ;
- cmpsd ;
-
- jnz UnequalQuad ;
-
-TryOdd:
- mov ECX,EAX ;
-DoBytes:
- // if still equal and not end of string, do up to 3 bytes slightly
- // slower.
-
- and ECX,3 ;
- jz Equal ;
-
- repe ;
- cmpsb ;
-
- jnz Unequal ;
-Equal:
- mov EAX,a1[4+ESP] ;
- mov EDX,a2[4+ESP] ;
-
- sub EAX,EDX ;
- pop ESI ;
-
- pop EDI ;
- ret ;
-
-UnequalQuad:
- mov EDX,[EDI-4] ;
- mov EAX,[ESI-4] ;
-
- cmp AL,DL ;
- jnz Unequal ;
-
- cmp AH,DH ;
- jnz Unequal ;
-
- shr EAX,16 ;
-
- shr EDX,16 ;
-
- cmp AL,DL ;
- jnz Unequal ;
-
- cmp AH,DH ;
-Unequal:
- sbb EAX,EAX ;
- pop ESI ;
-
- or EAX,1 ;
- pop EDI ;
-
- ret ;
- }
- }
- else
- {
- debug(adi) printf("adCmpChar()\n");
- auto len = a1.length;
- if (a2.length < len)
- len = a2.length;
- auto c = memcmp(cast(char *)a1.ptr, cast(char *)a2.ptr, len);
- if (!c)
- c = cast(int)a1.length - cast(int)a2.length;
- return c;
- }
-}
-
-unittest
-{
- debug(adi) printf("array.CmpChar unittest\n");
-
- auto a = "hello"c;
- assert(a > "hel");
- assert(a >= "hel");
- assert(a < "helloo");
- assert(a <= "helloo");
- assert(a > "betty");
- assert(a >= "betty");
- assert(a == "hello");
- assert(a <= "hello");
- assert(a >= "hello");
+ TestStruct[] b = [TestStruct(5)];
+ TestStruct[] c = [TestStruct(6)];
+ assert(_adEq2(*cast(void[]*)&b, *cast(void[]*)&c, typeid(TestStruct[])) == false);
+ assert(_adEq2(*cast(void[]*)&b, *cast(void[]*)&b, typeid(TestStruct[])) == true);
}
diff --git a/libphobos/libdruntime/rt/arrayassign.d b/libphobos/libdruntime/rt/arrayassign.d
index 389ff92..21d50b0 100644
--- a/libphobos/libdruntime/rt/arrayassign.d
+++ b/libphobos/libdruntime/rt/arrayassign.d
@@ -6,14 +6,14 @@
* License: Distributed under the
* $(LINK2 http://www.boost.org/LICENSE_1_0.txt, Boost Software License 1.0).
* Authors: Walter Bright, Kenji Hara
- * Source: $(DRUNTIMESRC src/rt/_arrayassign.d)
+ * Source: $(DRUNTIMESRC rt/_arrayassign.d)
*/
module rt.arrayassign;
private
{
- import rt.util.array;
+ import core.internal.util.array;
import core.stdc.string;
import core.stdc.stdlib;
debug(PRINTF) import core.stdc.stdio;
diff --git a/libphobos/libdruntime/rt/arraycast.d b/libphobos/libdruntime/rt/arraycast.d
deleted file mode 100644
index d16d30d..0000000
--- a/libphobos/libdruntime/rt/arraycast.d
+++ /dev/null
@@ -1,52 +0,0 @@
-/**
- * Implementation of array cast support routines.
- *
- * Copyright: Copyright Digital Mars 2004 - 2016.
- * License: Distributed under the
- * $(LINK2 http://www.boost.org/LICENSE_1_0.txt, Boost Software License 1.0).
- * Authors: Walter Bright, Sean Kelly
- * Source: $(DRUNTIMESRC src/rt/_arraycast.d)
- */
-
-module rt.arraycast;
-
-/******************************************
- * Runtime helper to convert dynamic array of one
- * type to dynamic array of another.
- * Adjusts the length of the array.
- * Throws an error if new length is not aligned.
- */
-
-extern (C)
-
-@trusted nothrow
-void[] _d_arraycast(size_t tsize, size_t fsize, void[] a)
-{
- auto length = a.length;
-
- auto nbytes = length * fsize;
- if (nbytes % tsize != 0)
- {
- throw new Error("array cast misalignment");
- }
- length = nbytes / tsize;
- *cast(size_t *)&a = length; // jam new length
- return a;
-}
-
-unittest
-{
- byte[int.sizeof * 3] b;
- int[] i;
- short[] s;
-
- i = cast(int[])b;
- assert(i.length == 3);
-
- s = cast(short[])b;
- assert(s.length == 6);
-
- s = cast(short[])i;
- assert(s.length == 6);
-}
-
diff --git a/libphobos/libdruntime/rt/arraycat.d b/libphobos/libdruntime/rt/arraycat.d
index f3f05c3..d879480 100644
--- a/libphobos/libdruntime/rt/arraycat.d
+++ b/libphobos/libdruntime/rt/arraycat.d
@@ -5,7 +5,7 @@
* License: Distributed under the
* $(LINK2 http://www.boost.org/LICENSE_1_0.txt, Boost Software License 1.0).
* Authors: Walter Bright, Sean Kelly
- * Source: $(DRUNTIMESRC src/rt/_arraycat.d)
+ * Source: $(DRUNTIMESRC rt/_arraycat.d)
*/
module rt.arraycat;
@@ -13,7 +13,7 @@ module rt.arraycat;
private
{
import core.stdc.string;
- import rt.util.array;
+ import core.internal.util.array;
debug(PRINTF) import core.stdc.stdio;
}
diff --git a/libphobos/libdruntime/rt/cast_.d b/libphobos/libdruntime/rt/cast_.d
index f34d82e..dcb4438 100644
--- a/libphobos/libdruntime/rt/cast_.d
+++ b/libphobos/libdruntime/rt/cast_.d
@@ -2,8 +2,9 @@
* Implementation of array assignment support routines.
*
* Copyright: Copyright Digital Mars 2004 - 2010.
- * License: $(WEB www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
+ * License: $(HTTP www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
* Authors: Walter Bright, Sean Kelly
+ * Source: $(DRUNTIMESRC rt/_cast_.d)
*/
/* Copyright Digital Mars 2004 - 2010.
@@ -14,6 +15,19 @@
module rt.cast_;
extern (C):
+@nogc:
+nothrow:
+pure:
+
+// Needed because ClassInfo.opEquals(Object) does a dynamic cast,
+// but we are trying to implement dynamic cast.
+extern (D) private bool areClassInfosEqual(scope const ClassInfo a, scope const ClassInfo b) @safe
+{
+ if (a is b)
+ return true;
+ // take care of potential duplicates across binaries
+ return a.name == b.name;
+}
/******************************************
* Given a pointer:
@@ -22,7 +36,7 @@ extern (C):
* If it is null, return null.
* Else, undefined crash
*/
-Object _d_toObject(void* p)
+Object _d_toObject(return void* p)
{
if (!p)
return null;
@@ -74,21 +88,21 @@ void* _d_dynamic_cast(Object o, ClassInfo c)
return res;
}
-int _d_isbaseof2(ClassInfo oc, ClassInfo c, ref size_t offset)
+int _d_isbaseof2(scope ClassInfo oc, scope const ClassInfo c, scope ref size_t offset) @safe
{
- if (oc is c)
+ if (areClassInfosEqual(oc, c))
return true;
do
{
- if (oc.base is c)
+ if (oc.base && areClassInfosEqual(oc.base, c))
return true;
// Bugzilla 2013: Use depth-first search to calculate offset
// from the derived (oc) to the base (c).
foreach (iface; oc.interfaces)
{
- if (iface.classinfo is c || _d_isbaseof2(iface.classinfo, c, offset))
+ if (areClassInfosEqual(iface.classinfo, c) || _d_isbaseof2(iface.classinfo, c, offset))
{
offset += iface.offset;
return true;
@@ -101,19 +115,19 @@ int _d_isbaseof2(ClassInfo oc, ClassInfo c, ref size_t offset)
return false;
}
-int _d_isbaseof(ClassInfo oc, ClassInfo c)
+int _d_isbaseof(scope ClassInfo oc, scope const ClassInfo c) @safe
{
- if (oc is c)
+ if (areClassInfosEqual(oc, c))
return true;
do
{
- if (oc.base is c)
+ if (oc.base && areClassInfosEqual(oc.base, c))
return true;
foreach (iface; oc.interfaces)
{
- if (iface.classinfo is c || _d_isbaseof(iface.classinfo, c))
+ if (areClassInfosEqual(iface.classinfo, c) || _d_isbaseof(iface.classinfo, c))
return true;
}
@@ -122,20 +136,3 @@ int _d_isbaseof(ClassInfo oc, ClassInfo c)
return false;
}
-
-/*********************************
- * Find the vtbl[] associated with Interface ic.
- */
-void* _d_interface_vtbl(ClassInfo ic, Object o)
-{
- debug(cast_) printf("__d_interface_vtbl(o = %p, ic = %p)\n", o, ic);
-
- assert(o);
-
- foreach (iface; typeid(o).interfaces)
- {
- if (iface.classinfo is ic)
- return cast(void*) iface.vtbl;
- }
- assert(0);
-}
diff --git a/libphobos/libdruntime/rt/config.d b/libphobos/libdruntime/rt/config.d
index 904f721..f7682f3 100644
--- a/libphobos/libdruntime/rt/config.d
+++ b/libphobos/libdruntime/rt/config.d
@@ -1,68 +1,73 @@
/**
-* Configuration options for druntime
-*
-* Copyright: Copyright Digital Mars 2014.
-* License: Distributed under the
-* $(LINK2 http://www.boost.org/LICENSE_1_0.txt, Boost Software License 1.0).
-* (See accompanying file LICENSE)
-* Authors: Rainer Schuetze
-* Source: $(DRUNTIMESRC src/rt/_config.d)
+Configuration options for druntime.
+
+The default way to configure the runtime is by passing command line arguments
+starting with `--DRT-` and followed by the option name, e.g. `--DRT-gcopt` to
+configure the GC.
+When command line parsing is enabled, command line options starting
+with `--DRT-` are filtered out before calling main, so the program
+will not see them. They are still available via `rt_args()`.
+
+Configuration via the command line can be disabled by declaring a variable for the
+linker to pick up before using it's default from the runtime:
+
+---
+extern(C) __gshared bool rt_cmdline_enabled = false;
+---
+
+Likewise, declare a boolean rt_envvars_enabled to enable configuration via the
+environment variable `DRT_` followed by the option name, e.g. `DRT_GCOPT`:
+
+---
+extern(C) __gshared bool rt_envvars_enabled = true;
+---
+
+Setting default configuration properties in the executable can be done by specifying an
+array of options named `rt_options`:
+
+---
+extern(C) __gshared string[] rt_options = [ "gcopt=precise:1 profile:1"];
+---
+
+Evaluation order of options is `rt_options`, then environment variables, then command
+line arguments, i.e. if command line arguments are not disabled, they can override
+options specified through the environment or embedded in the executable.
+
+Copyright: Copyright Digital Mars 2014.
+License: Distributed under the
+ $(LINK2 http://www.boost.org/LICENSE_1_0.txt, Boost Software License 1.0).
+ (See accompanying file LICENSE)
+Authors: Rainer Schuetze
+Source: $(DRUNTIMESRC rt/_config.d)
*/
module rt.config;
-// The default way to configure the runtime is by passing command line arguments
-// starting with "--DRT-" and followed by the option name, e.g. "--DRT-gcopt" to
-// configure the GC.
-// Command line options starting with "--DRT-" are filtered out before calling main,
-// so the program will not see them. They are still available via rt_args().
-//
-// Configuration via the command line can be disabled by declaring a variable for the
-// linker to pick up before using it's default from the runtime:
-//
-// extern(C) __gshared bool rt_cmdline_enabled = false;
-//
-// Likewise, declare a boolean rt_envvars_enabled to enable configuration via the
-// environment variable "DRT_" followed by the option name, e.g. "DRT_GCOPT":
-//
-// extern(C) __gshared bool rt_envvars_enabled = true;
-//
-// Setting default configuration properties in the executable can be done by specifying an
-// array of options named rt_options:
-//
-// extern(C) __gshared string[] rt_options = [ "gcopt=precise:1 profile:1"];
-//
-// Evaluation order of options is rt_options, then environment variables, then command
-// line arguments, i.e. if command line arguments are not disabled, they can override
-// options specified through the environment or embedded in the executable.
-
-import core.demangle : cPrefix;
-
// put each variable in its own COMDAT by making them template instances
template rt_envvars_enabled()
{
- pragma(mangle, cPrefix ~ "rt_envvars_enabled") __gshared bool rt_envvars_enabled = false;
+ extern(C) pragma(mangle, "rt_envvars_enabled") __gshared bool rt_envvars_enabled = false;
}
template rt_cmdline_enabled()
{
- pragma(mangle, cPrefix ~ "rt_cmdline_enabled") __gshared bool rt_cmdline_enabled = true;
+ extern(C) pragma(mangle, "rt_cmdline_enabled") __gshared bool rt_cmdline_enabled = true;
}
template rt_options()
{
- pragma(mangle, cPrefix ~ "rt_options") __gshared string[] rt_options = [];
+ extern(C) pragma(mangle, "rt_options") __gshared string[] rt_options = [];
}
import core.stdc.ctype : toupper;
import core.stdc.stdlib : getenv;
import core.stdc.string : strlen;
-extern extern(C) string[] rt_args() @nogc nothrow;
+extern extern(C) string[] rt_args() @nogc nothrow @system;
alias rt_configCallBack = string delegate(string) @nogc nothrow;
/**
* get a druntime config option using standard configuration options
-* opt name of the option to retreive
+* opt name of the option to retrieve
* dg if non-null, passes the option through this
* delegate and only returns its return value if non-null
* reverse reverse the default processing order cmdline/envvar/rt_options
diff --git a/libphobos/libdruntime/rt/critical_.d b/libphobos/libdruntime/rt/critical_.d
index 9404261..ae18122 100644
--- a/libphobos/libdruntime/rt/critical_.d
+++ b/libphobos/libdruntime/rt/critical_.d
@@ -2,8 +2,9 @@
* Implementation of support routines for synchronized blocks.
*
* Copyright: Copyright Digital Mars 2000 - 2011.
- * License: $(WEB www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
+ * License: $(HTTP www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
* Authors: Walter Bright, Sean Kelly
+ * Source: $(DRUNTIMESRC rt/_critical_.d)
*/
/* Copyright Digital Mars 2000 - 2011.
diff --git a/libphobos/libdruntime/rt/deh.d b/libphobos/libdruntime/rt/deh.d
index 440e242..695f2ce 100644
--- a/libphobos/libdruntime/rt/deh.d
+++ b/libphobos/libdruntime/rt/deh.d
@@ -1,12 +1,37 @@
/**
- * Implementation of exception handling support routines.
+ * Entry point for exception handling support routines.
*
- * Copyright: Copyright Digital Mars 1999 - 2013.
+ * There are three style of exception handling being supported by DMD:
+ * DWARF, Win32, and Win64. The Win64 code also supports POSIX.
+ * Support for those scheme is in `rt.dwarfeh`, `rt.deh_win32`, and
+ * `rt.deh_win64_posix`, respectively, and publicly imported here.
+ *
+ * When an exception is thrown by the user, the compiler translates
+ * code like `throw e;` into either `_d_throwdwarf` (for DWARF exceptions)
+ * or `_d_throwc` (Win32 / Win64), with the `Exception` object as argument.
+ *
+ * During those functions' handling, they eventually call `_d_createTrace`,
+ * which will store inside the `Exception` object the return of
+ * `_d_traceContext`, which is an object implementing `Throwable.TraceInfo`.
+ * `_d_traceContext` is a configurable hook, and by default will call
+ * `core.runtime : defaultTraceHandler`, which itself will call `backtrace`
+ * or something similar to store an array of stack frames (`void*` pointers)
+ * in the object it returns.
+ * Note that `defaultTraceHandler` returns a GC-allocated instance,
+ * hence a GC allocation can happen in the middle of throwing an `Exception`.
+ *
+ * The `Throwable.TraceInfo`-implementing should not resolves function names,
+ * file and line number until its `opApply` function is called, avoiding the
+ * overhead of reading the debug infos until the user call `toString`.
+ * If the user only calls `Throwable.message` (or use `Throwable.msg` directly),
+ * only the overhead of `backtrace` will be paid, which is minimal enouh.
+ *
+ * Copyright: Copyright Digital Mars 1999 - 2020.
* License: Distributed under the
* $(LINK2 http://www.boost.org/LICENSE_1_0.txt, Boost Software License 1.0).
* (See accompanying file LICENSE)
* Authors: Walter Bright
- * Source: $(DRUNTIMESRC src/rt/deh.d)
+ * Source: $(DRUNTIMESRC rt/deh.d)
*/
/* NOTE: This file has been patched from the original DMD distribution to
@@ -17,10 +42,8 @@ module rt.deh;
extern (C)
{
Throwable.TraceInfo _d_traceContext(void* ptr = null);
- void _d_createTrace(Object o, void* context)
+ void _d_createTrace(Throwable t, void* context)
{
- auto t = cast(Throwable) o;
-
if (t !is null && t.info is null &&
cast(byte*) t !is typeid(t).initializer.ptr)
{
@@ -39,4 +62,3 @@ else version (Posix)
public import rt.deh_win64_posix;
else
static assert (0, "Unsupported architecture");
-
diff --git a/libphobos/libdruntime/rt/dmain2.d b/libphobos/libdruntime/rt/dmain2.d
index e6acbd5..328452e 100644
--- a/libphobos/libdruntime/rt/dmain2.d
+++ b/libphobos/libdruntime/rt/dmain2.d
@@ -1,12 +1,12 @@
/**
* Contains druntime startup and shutdown routines.
*
- * Copyright: Copyright Digital Mars 2000 - 2013.
+ * Copyright: Copyright Digital Mars 2000 - 2018.
* License: Distributed under the
* $(LINK2 http://www.boost.org/LICENSE_1_0.txt, Boost Software License 1.0).
* (See accompanying file LICENSE)
* Authors: Walter Bright, Sean Kelly
- * Source: $(DRUNTIMESRC src/rt/_dmain2.d)
+ * Source: $(DRUNTIMESRC rt/_dmain2.d)
*/
/* NOTE: This file has been patched from the original DMD distribution to
@@ -14,22 +14,27 @@
*/
module rt.dmain2;
-private
-{
- import rt.memory;
- import rt.sections;
- import core.atomic;
- import core.stdc.stddef;
- import core.stdc.stdlib;
- import core.stdc.string;
- import core.stdc.stdio; // for printf()
- import core.stdc.errno : errno;
-}
+import rt.memory;
+import rt.sections;
+import core.atomic;
+import core.stdc.stddef;
+import core.stdc.stdlib;
+import core.stdc.string;
+import core.stdc.stdio; // for printf()
+import core.stdc.errno : errno;
version (Windows)
{
- private import core.stdc.wchar_;
- private import core.sys.windows.windows;
+ import core.stdc.wchar_;
+ import core.sys.windows.basetsd : HANDLE;
+ import core.sys.windows.shellapi : CommandLineToArgvW;
+ import core.sys.windows.winbase : FreeLibrary, GetCommandLineW, GetProcAddress,
+ IsDebuggerPresent, LoadLibraryW, LocalFree, WriteFile;
+ import core.sys.windows.wincon : CONSOLE_SCREEN_BUFFER_INFO, GetConsoleOutputCP,
+ GetConsoleScreenBufferInfo;
+ import core.sys.windows.winnls : CP_UTF8, MultiByteToWideChar, WideCharToMultiByte;
+ import core.sys.windows.winnt : WCHAR;
+ import core.sys.windows.winuser : MB_ICONERROR, MessageBoxW;
pragma(lib, "shell32.lib"); // needed for CommandLineToArgvW
}
@@ -47,27 +52,33 @@ version (DragonFlyBSD)
import core.stdc.fenv;
}
+// not sure why we can't define this in one place, but this is to keep this
+// module from importing core.runtime.
+struct UnitTestResult
+{
+ size_t executed;
+ size_t passed;
+ bool runMain;
+ bool summarize;
+}
+
extern (C) void _d_monitor_staticctor();
extern (C) void _d_monitor_staticdtor();
extern (C) void _d_critical_init();
extern (C) void _d_critical_term();
extern (C) void gc_init();
extern (C) void gc_term();
+extern (C) void thread_init() @nogc;
+extern (C) void thread_term() @nogc;
extern (C) void lifetime_init();
extern (C) void rt_moduleCtor();
extern (C) void rt_moduleTlsCtor();
extern (C) void rt_moduleDtor();
extern (C) void rt_moduleTlsDtor();
extern (C) void thread_joinAll();
-extern (C) bool runModuleUnitTests();
+extern (C) UnitTestResult runModuleUnitTests();
extern (C) void _d_initMonoTime();
-version (OSX)
-{
- // The bottom of the stack
- extern (C) __gshared void* __osx_stack_end = cast(void*)0xC0000000;
-}
-
version (CRuntime_Microsoft)
{
extern(C) void init_msvc();
@@ -83,9 +94,6 @@ extern (C) string[] rt_args()
return _d_args;
}
-// make arguments passed to main available for being filtered by runtime initializers
-extern(C) __gshared char[][] _d_main_args = null;
-
// This variable is only ever set by a debugger on initialization so it should
// be fine to leave it as __gshared.
extern (C) __gshared bool rt_trapExceptions = true;
@@ -123,7 +131,8 @@ extern (C) int rt_init()
// this initializes mono time before anything else to allow usage
// in other druntime systems.
_d_initMonoTime();
- gc_init();
+ thread_init();
+ // TODO: fixme - calls GC.addRange -> Initializes GC
initStaticDataGC();
lifetime_init();
rt_moduleCtor();
@@ -132,7 +141,7 @@ extern (C) int rt_init()
}
catch (Throwable t)
{
- _initCount = 0;
+ atomicStore!(MemoryOrder.raw)(_initCount, 0);
_d_print_throwable(t);
}
_d_critical_term();
@@ -145,7 +154,7 @@ extern (C) int rt_init()
*/
extern (C) int rt_term()
{
- if (!_initCount) return 0; // was never initialized
+ if (atomicLoad!(MemoryOrder.raw)(_initCount) == 0) return 0; // was never initialized
if (atomicOp!"-="(_initCount, 1)) return 1;
try
@@ -154,6 +163,7 @@ extern (C) int rt_term()
thread_joinAll();
rt_moduleDtor();
gc_term();
+ thread_term();
return 1;
}
catch (Throwable t)
@@ -234,74 +244,22 @@ extern (C) CArgs rt_cArgs() @nogc
return _cArgs;
}
-/***********************************
- * Run the given main function.
- * Its purpose is to wrap the D main()
- * function and catch any unhandled exceptions.
- */
+/// Type of the D main() function (`_Dmain`).
private alias extern(C) int function(char[][] args) MainFunc;
-extern (C) int _d_run_main(int argc, char **argv, MainFunc mainFunc)
+/**
+ * Sets up the D char[][] command-line args, initializes druntime,
+ * runs embedded unittests and then runs the given D main() function,
+ * optionally catching and printing any unhandled exceptions.
+ */
+extern (C) int _d_run_main(int argc, char** argv, MainFunc mainFunc)
{
+ // Set up _cArgs and array of D char[] slices, then forward to _d_run_main2
+
// Remember the original C argc/argv
_cArgs.argc = argc;
_cArgs.argv = argv;
- int result;
-
- version (OSX)
- { /* OSX does not provide a way to get at the top of the
- * stack, except for the magic value 0xC0000000.
- * But as far as the gc is concerned, argv is at the top
- * of the main thread's stack, so save the address of that.
- */
- __osx_stack_end = cast(void*)&argv;
- }
-
- version (FreeBSD) version (D_InlineAsm_X86)
- {
- /*
- * FreeBSD/i386 sets the FPU precision mode to 53 bit double.
- * Make it 64 bit extended.
- */
- ushort fpucw;
- asm
- {
- fstsw fpucw;
- or fpucw, 0b11_00_111111; // 11: use 64 bit extended-precision
- // 111111: mask all FP exceptions
- fldcw fpucw;
- }
- }
- version (CRuntime_Microsoft)
- {
- // enable full precision for reals
- version (D_InlineAsm_X86_64)
- {
- asm
- {
- push RAX;
- fstcw word ptr [RSP];
- or [RSP], 0b11_00_111111; // 11: use 64 bit extended-precision
- // 111111: mask all FP exceptions
- fldcw word ptr [RSP];
- pop RAX;
- }
- }
- else version (D_InlineAsm_X86)
- {
- asm
- {
- push EAX;
- fstcw word ptr [ESP];
- or [ESP], 0b11_00_111111; // 11: use 64 bit extended-precision
- // 111111: mask all FP exceptions
- fldcw word ptr [ESP];
- pop EAX;
- }
- }
- }
-
version (Windows)
{
/* Because we want args[] to be UTF-8, and Windows doesn't guarantee that,
@@ -309,10 +267,10 @@ extern (C) int _d_run_main(int argc, char **argv, MainFunc mainFunc)
* Then, reparse into wargc/wargs, and then use Windows API to convert
* to UTF-8.
*/
- const wchar_t* wCommandLine = GetCommandLineW();
+ const wCommandLine = GetCommandLineW();
immutable size_t wCommandLineLength = wcslen(wCommandLine);
int wargc;
- wchar_t** wargs = CommandLineToArgvW(wCommandLine, &wargc);
+ auto wargs = CommandLineToArgvW(wCommandLine, &wargc);
// assert(wargc == argc); /* argc can be broken by Unicode arguments */
// Allocate args[] on the stack - use wargc
@@ -357,6 +315,114 @@ extern (C) int _d_run_main(int argc, char **argv, MainFunc mainFunc)
else
static assert(0);
+ return _d_run_main2(args, totalArgsLength, mainFunc);
+}
+
+/**
+ * Windows-specific version for wide command-line arguments, e.g.,
+ * from a wmain/wWinMain C entry point.
+ * This wide version uses the specified arguments, unlike narrow
+ * _d_run_main which uses the actual (wide) process arguments instead.
+ */
+version (Windows)
+extern (C) int _d_wrun_main(int argc, wchar** wargv, MainFunc mainFunc)
+{
+ // Allocate args[] on the stack
+ char[][] args = (cast(char[]*) alloca(argc * (char[]).sizeof))[0 .. argc];
+
+ // 1st pass: compute each argument's length as UTF-16 and UTF-8
+ size_t totalArgsLength = 0;
+ foreach (i; 0 .. argc)
+ {
+ const warg = wargv[i];
+ const size_t wlen = wcslen(warg) + 1; // incl. terminating null
+ assert(wlen <= cast(size_t) int.max, "wlen cannot exceed int.max");
+ const int len = WideCharToMultiByte(CP_UTF8, 0, warg, cast(int) wlen, null, 0, null, null);
+ args[i] = (cast(char*) wlen)[0 .. len]; // args[i].ptr = wlen, args[i].length = len
+ totalArgsLength += len;
+ }
+
+ // Allocate a single buffer for all (null-terminated) argument strings in UTF-8 on the stack
+ char* utf8Buffer = cast(char*) alloca(totalArgsLength);
+
+ // 2nd pass: convert to UTF-8 and finalize `args`
+ char* utf8 = utf8Buffer;
+ foreach (i; 0 .. argc)
+ {
+ const wlen = cast(int) args[i].ptr;
+ const len = cast(int) args[i].length;
+ WideCharToMultiByte(CP_UTF8, 0, wargv[i], wlen, utf8, len, null, null);
+ args[i] = utf8[0 .. len-1]; // excl. terminating null
+ utf8 += len;
+ }
+
+ // Set C argc/argv; argv is a new stack-allocated array of UTF-8 C strings
+ char*[] argv = (cast(char**) alloca(argc * (char*).sizeof))[0 .. argc];
+ foreach (i, ref arg; argv)
+ arg = args[i].ptr;
+ _cArgs.argc = argc;
+ _cArgs.argv = argv.ptr;
+
+ totalArgsLength -= argc; // excl. null terminator per arg
+ return _d_run_main2(args, totalArgsLength, mainFunc);
+}
+
+private extern (C) int _d_run_main2(char[][] args, size_t totalArgsLength, MainFunc mainFunc)
+{
+ int result;
+
+ version (FreeBSD) version (D_InlineAsm_X86)
+ {
+ /*
+ * FreeBSD/i386 sets the FPU precision mode to 53 bit double.
+ * Make it 64 bit extended.
+ */
+ ushort fpucw;
+ asm
+ {
+ fstsw fpucw;
+ or fpucw, 0b11_00_111111; // 11: use 64 bit extended-precision
+ // 111111: mask all FP exceptions
+ fldcw fpucw;
+ }
+ }
+ version (CRuntime_Microsoft)
+ {
+ // enable full precision for reals
+ version (D_InlineAsm_X86_64)
+ {
+ asm
+ {
+ push RAX;
+ fstcw word ptr [RSP];
+ or [RSP], 0b11_00_111111; // 11: use 64 bit extended-precision
+ // 111111: mask all FP exceptions
+ fldcw word ptr [RSP];
+ pop RAX;
+ }
+ }
+ else version (D_InlineAsm_X86)
+ {
+ asm
+ {
+ push EAX;
+ fstcw word ptr [ESP];
+ or [ESP], 0b11_00_111111; // 11: use 64 bit extended-precision
+ // 111111: mask all FP exceptions
+ fldcw word ptr [ESP];
+ pop EAX;
+ }
+ }
+ else version (GNU_InlineAsm)
+ {
+ size_t fpu_cw;
+ asm { "fstcw %0" : "=m" (fpu_cw); }
+ fpu_cw |= 0b11_00_111111; // 11: use 64 bit extended-precision
+ // 111111: mask all FP exceptions
+ asm { "fldcw %0" : "=m" (fpu_cw); }
+ }
+ }
+
/* Create a copy of args[] on the stack to be used for main, so that rt_args()
* cannot be modified by the user.
* Note that when this function returns, _d_args will refer to garbage.
@@ -368,28 +434,33 @@ extern (C) int _d_run_main(int argc, char **argv, MainFunc mainFunc)
char[][] argsCopy = buff[0 .. args.length];
auto argBuff = cast(char*) (buff + args.length);
size_t j = 0;
+ import rt.config : rt_cmdline_enabled;
+ bool parseOpts = rt_cmdline_enabled!();
foreach (arg; args)
{
- if (arg.length < 6 || arg[0..6] != "--DRT-") // skip D runtime options
- {
- argsCopy[j++] = (argBuff[0 .. arg.length] = arg[]);
- argBuff += arg.length;
- }
+ // Do not pass Druntime options to the program
+ if (parseOpts && arg.length >= 6 && arg[0 .. 6] == "--DRT-")
+ continue;
+ // https://issues.dlang.org/show_bug.cgi?id=20459
+ if (arg == "--")
+ parseOpts = false;
+ argsCopy[j++] = (argBuff[0 .. arg.length] = arg[]);
+ argBuff += arg.length;
}
args = argsCopy[0..j];
}
- bool trapExceptions = rt_trapExceptions;
+ auto useExceptionTrap = parseExceptionOptions();
version (Windows)
{
if (IsDebuggerPresent())
- trapExceptions = false;
+ useExceptionTrap = false;
}
void tryExec(scope void delegate() dg)
{
- if (trapExceptions)
+ if (useExceptionTrap)
{
try
{
@@ -417,8 +488,34 @@ extern (C) int _d_run_main(int argc, char **argv, MainFunc mainFunc)
// thrown during cleanup, however, will abort the cleanup process.
void runAll()
{
- if (rt_init() && runModuleUnitTests())
- tryExec({ result = mainFunc(args); });
+ if (rt_init())
+ {
+ auto utResult = runModuleUnitTests();
+ assert(utResult.passed <= utResult.executed);
+ if (utResult.passed == utResult.executed)
+ {
+ if (utResult.summarize)
+ {
+ if (utResult.passed == 0)
+ .fprintf(.stderr, "No unittests run\n");
+ else
+ .fprintf(.stderr, "%d modules passed unittests\n",
+ cast(int)utResult.passed);
+ }
+ if (utResult.runMain)
+ tryExec({ result = mainFunc(args); });
+ else
+ result = EXIT_SUCCESS;
+ }
+ else
+ {
+ if (utResult.summarize)
+ .fprintf(.stderr, "%d/%d modules FAILED unittests\n",
+ cast(int)(utResult.executed - utResult.passed),
+ cast(int)utResult.executed);
+ result = EXIT_FAILURE;
+ }
+ }
else
result = EXIT_FAILURE;
@@ -441,17 +538,17 @@ extern (C) int _d_run_main(int argc, char **argv, MainFunc mainFunc)
return result;
}
-private void formatThrowable(Throwable t, scope void delegate(in char[] s) nothrow sink)
+private void formatThrowable(Throwable t, scope void delegate(const scope char[] s) nothrow sink)
{
- for (; t; t = t.next)
+ foreach (u; t)
{
- t.toString(sink); sink("\n");
+ u.toString(sink); sink("\n");
- auto e = cast(Error)t;
+ auto e = cast(Error)u;
if (e is null || e.bypassedException is null) continue;
sink("=== Bypassed ===\n");
- for (auto t2 = e.bypassedException; t2; t2 = t2.next)
+ foreach (t2; e.bypassedException)
{
t2.toString(sink); sink("\n");
}
@@ -459,6 +556,18 @@ private void formatThrowable(Throwable t, scope void delegate(in char[] s) nothr
}
}
+private auto parseExceptionOptions()
+{
+ import rt.config : rt_configOption;
+ import core.internal.parseoptions : rt_parseOption;
+ const optName = "trapExceptions";
+ auto option = rt_configOption(optName);
+ auto trap = rt_trapExceptions;
+ if (option.length)
+ rt_parseOption(optName, option, trap, "");
+ return trap;
+}
+
extern (C) void _d_print_throwable(Throwable t)
{
// On Windows, a console may not be present to print the output to.
@@ -468,17 +577,17 @@ extern (C) void _d_print_throwable(Throwable t)
{
static struct WSink
{
- wchar_t* ptr; size_t len;
+ WCHAR* ptr; size_t len;
- void sink(in char[] s) scope nothrow
+ void sink(const scope char[] s) scope nothrow
{
if (!s.length) return;
int swlen = MultiByteToWideChar(
CP_UTF8, 0, s.ptr, cast(int)s.length, null, 0);
if (!swlen) return;
- auto newPtr = cast(wchar_t*)realloc(ptr,
- (this.len + swlen + 1) * wchar_t.sizeof);
+ auto newPtr = cast(WCHAR*)realloc(ptr,
+ (this.len + swlen + 1) * WCHAR.sizeof);
if (!newPtr) return;
ptr = newPtr;
auto written = MultiByteToWideChar(
@@ -486,7 +595,7 @@ extern (C) void _d_print_throwable(Throwable t)
len += written;
}
- wchar_t* get() { if (ptr) ptr[len] = 0; return ptr; }
+ typeof(ptr) get() { if (ptr) ptr[len] = 0; return ptr; }
void free() { .free(ptr); }
}
@@ -558,7 +667,7 @@ extern (C) void _d_print_throwable(Throwable t)
}
}
- void sink(in char[] buf) scope nothrow
+ void sink(const scope char[] buf) scope nothrow
{
fprintf(stderr, "%.*s", cast(int)buf.length, buf.ptr);
}
diff --git a/libphobos/libdruntime/rt/dylib_fixes.c b/libphobos/libdruntime/rt/dylib_fixes.c
index 7bcf34b..e484fed 100644
--- a/libphobos/libdruntime/rt/dylib_fixes.c
+++ b/libphobos/libdruntime/rt/dylib_fixes.c
@@ -2,7 +2,7 @@
* OS X support for dynamic libraries.
*
* Copyright: Copyright Digital Mars 2010 - 2010.
- * License: $(WEB www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
+ * License: $(HTTP www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
* Authors: Walter Bright
*/
diff --git a/libphobos/libdruntime/rt/ehalloc.d b/libphobos/libdruntime/rt/ehalloc.d
new file mode 100644
index 0000000..1dcd69a
--- /dev/null
+++ b/libphobos/libdruntime/rt/ehalloc.d
@@ -0,0 +1,125 @@
+/**
+ * Exception allocation, cloning, and release compiler support routines.
+ *
+ * Copyright: Copyright (c) 2017 by D Language Foundation
+ * License: Distributed under the
+ * $(LINK2 http://www.boost.org/LICENSE_1_0.txt, Boost Software License 1.0).
+ * (See accompanying file LICENSE)
+ * Authors: Walter Bright
+ * Source: $(DRUNTIMESRC rt/_ehalloc.d)
+ */
+
+module rt.ehalloc;
+
+//debug = PRINTF;
+
+debug(PRINTF)
+{
+ import core.stdc.stdio;
+}
+
+/**********************************************
+ * Allocate an exception of type `ci` from the exception pool.
+ * It has the same interface as `rt.lifetime._d_newclass()`.
+ * The class type must be Throwable or derived from it,
+ * and cannot be a COM or C++ class. The compiler must enforce
+ * this.
+ * Returns:
+ * default initialized instance of the type
+ */
+
+extern (C) Throwable _d_newThrowable(const TypeInfo_Class ci)
+{
+ debug(PRINTF) printf("_d_newThrowable(ci = %p, %s)\n", ci, cast(char *)ci.name);
+
+ assert(!(ci.m_flags & TypeInfo_Class.ClassFlags.isCOMclass));
+ assert(!(ci.m_flags & TypeInfo_Class.ClassFlags.isCPPclass));
+
+ import core.stdc.stdlib : malloc;
+ auto init = ci.initializer;
+ void* p = malloc(init.length);
+ if (!p)
+ {
+ import core.exception : onOutOfMemoryError;
+ onOutOfMemoryError();
+ }
+
+ debug(PRINTF) printf(" p = %p\n", p);
+
+ // initialize it
+ p[0 .. init.length] = init[];
+
+ if (!(ci.m_flags & TypeInfo_Class.ClassFlags.noPointers))
+ {
+ // Inform the GC about the pointers in the object instance
+ import core.memory : GC;
+
+ GC.addRange(p, init.length, ci);
+ }
+
+ debug(PRINTF) printf("initialization done\n");
+ Throwable t = cast(Throwable)p;
+ t.refcount() = 1;
+ return t;
+}
+
+
+/********************************************
+ * Delete exception instance `t` from the exception pool.
+ * Must have been allocated with `_d_newThrowable()`.
+ * This is meant to be called at the close of a catch block.
+ * It's nothrow because otherwise any function with a catch block could
+ * not be nothrow.
+ * Input:
+ * t = Throwable
+ */
+
+nothrow extern (C) void _d_delThrowable(Throwable t)
+{
+ if (t)
+ {
+ debug(PRINTF) printf("_d_delThrowable(%p)\n", t);
+
+ /* If allocated by the GC, don't free it.
+ * Let the GC handle it.
+ * Supporting this is necessary while transitioning
+ * to this new scheme for allocating exceptions.
+ */
+ auto refcount = t.refcount();
+ if (refcount == 0)
+ return; // it was allocated by the GC
+
+ if (refcount == 1)
+ assert(0); // no zombie objects
+
+ t.refcount() = --refcount;
+ if (refcount > 1)
+ return;
+
+ TypeInfo_Class **pc = cast(TypeInfo_Class **)t;
+ if (*pc)
+ {
+ TypeInfo_Class ci = **pc;
+
+ if (!(ci.m_flags & TypeInfo_Class.ClassFlags.noPointers))
+ {
+ // Inform the GC about the pointers in the object instance
+ import core.memory : GC;
+ GC.removeRange(cast(void*) t);
+ }
+ }
+
+ try
+ {
+ import rt.lifetime : rt_finalize;
+ rt_finalize(cast(void*) t);
+ }
+ catch (Throwable t)
+ {
+ assert(0); // should never happen since Throwable.~this() is nothrow
+ }
+ import core.stdc.stdlib : free;
+ debug(PRINTF) printf("free(%p)\n", t);
+ free(cast(void*) t);
+ }
+}
diff --git a/libphobos/libdruntime/rt/invariant.d b/libphobos/libdruntime/rt/invariant.d
index 4dddfad..e536196 100644
--- a/libphobos/libdruntime/rt/invariant.d
+++ b/libphobos/libdruntime/rt/invariant.d
@@ -2,8 +2,9 @@
* Implementation of invariant support routines.
*
* Copyright: Copyright Digital Mars 2007 - 2010.
- * License: $(WEB www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
+ * License: $(HTTP www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
* Authors: Walter Bright
+ * Source: $(DRUNTIMESRC rt/_invariant.d)
*/
/* Copyright Digital Mars 2007 - 2010.
diff --git a/libphobos/libdruntime/rt/lifetime.d b/libphobos/libdruntime/rt/lifetime.d
index 6a6eb50..f1a9d87 100644
--- a/libphobos/libdruntime/rt/lifetime.d
+++ b/libphobos/libdruntime/rt/lifetime.d
@@ -7,22 +7,18 @@
* $(LINK2 http://www.boost.org/LICENSE_1_0.txt, Boost Software License 1.0).
* (See accompanying file LICENSE)
* Authors: Walter Bright, Sean Kelly, Steven Schveighoffer
- * Source: $(DRUNTIMESRC src/rt/_lifetime.d)
+ * Source: $(DRUNTIMESRC rt/_lifetime.d)
*/
module rt.lifetime;
-import core.stdc.stdlib;
-import core.stdc.string;
-import core.stdc.stdarg;
-import core.bitop;
+import core.attribute : weak;
import core.memory;
debug(PRINTF) import core.stdc.stdio;
static import rt.tlsgc;
alias BlkInfo = GC.BlkInfo;
alias BlkAttr = GC.BlkAttr;
-import core.exception : onOutOfMemoryError, onFinalizeError, onInvalidMemoryOperationError;
private
{
@@ -52,7 +48,7 @@ extern (C) void lifetime_init()
/**
*
*/
-extern (C) void* _d_allocmemory(size_t sz)
+extern (C) void* _d_allocmemory(size_t sz) @weak
{
return GC.malloc(sz);
}
@@ -60,9 +56,12 @@ extern (C) void* _d_allocmemory(size_t sz)
/**
*
*/
-extern (C) Object _d_newclass(const ClassInfo ci)
+extern (C) Object _d_newclass(const ClassInfo ci) @weak
{
+ import core.stdc.stdlib;
+ import core.exception : onOutOfMemoryError;
void* p;
+ auto init = ci.initializer;
debug(PRINTF) printf("_d_newclass(ci = %p, %s)\n", ci, cast(char *)ci.name);
if (ci.m_flags & TypeInfo_Class.ClassFlags.isCOMclass)
@@ -71,7 +70,7 @@ extern (C) Object _d_newclass(const ClassInfo ci)
* function called by Release() when Release()'s reference count goes
* to zero.
*/
- p = malloc(ci.initializer.length);
+ p = malloc(init.length);
if (!p)
onOutOfMemoryError();
}
@@ -85,26 +84,26 @@ extern (C) Object _d_newclass(const ClassInfo ci)
attr |= BlkAttr.FINALIZE;
if (ci.m_flags & TypeInfo_Class.ClassFlags.noPointers)
attr |= BlkAttr.NO_SCAN;
- p = GC.malloc(ci.initializer.length, attr, ci);
+ p = GC.malloc(init.length, attr, ci);
debug(PRINTF) printf(" p = %p\n", p);
}
debug(PRINTF)
{
printf("p = %p\n", p);
- printf("ci = %p, ci.init.ptr = %p, len = %llu\n", ci, ci.initializer.ptr, cast(ulong)ci.initializer.length);
- printf("vptr = %p\n", *cast(void**) ci.initializer);
- printf("vtbl[0] = %p\n", (*cast(void***) ci.initializer)[0]);
- printf("vtbl[1] = %p\n", (*cast(void***) ci.initializer)[1]);
- printf("init[0] = %x\n", (cast(uint*) ci.initializer)[0]);
- printf("init[1] = %x\n", (cast(uint*) ci.initializer)[1]);
- printf("init[2] = %x\n", (cast(uint*) ci.initializer)[2]);
- printf("init[3] = %x\n", (cast(uint*) ci.initializer)[3]);
- printf("init[4] = %x\n", (cast(uint*) ci.initializer)[4]);
+ printf("ci = %p, ci.init.ptr = %p, len = %llu\n", ci, init.ptr, cast(ulong)init.length);
+ printf("vptr = %p\n", *cast(void**) init);
+ printf("vtbl[0] = %p\n", (*cast(void***) init)[0]);
+ printf("vtbl[1] = %p\n", (*cast(void***) init)[1]);
+ printf("init[0] = %x\n", (cast(uint*) init)[0]);
+ printf("init[1] = %x\n", (cast(uint*) init)[1]);
+ printf("init[2] = %x\n", (cast(uint*) init)[2]);
+ printf("init[3] = %x\n", (cast(uint*) init)[3]);
+ printf("init[4] = %x\n", (cast(uint*) init)[4]);
}
// initialize it
- p[0 .. ci.initializer.length] = ci.initializer[];
+ p[0 .. init.length] = init[];
debug(PRINTF) printf("initialization done\n");
return cast(Object) p;
@@ -134,7 +133,7 @@ private extern (D) alias void function (Object) fp_t;
/**
*
*/
-extern (C) void _d_delclass(Object* p)
+extern (C) void _d_delclass(Object* p) @weak
{
if (*p)
{
@@ -169,7 +168,7 @@ extern (C) void _d_delclass(Object* p)
* being deleted is a pointer to a struct with a destructor
* but doesn't have an overloaded delete operator.
*/
-extern (C) void _d_delstruct(void** p, TypeInfo_Struct inf)
+extern (C) void _d_delstruct(void** p, TypeInfo_Struct inf) @weak
{
if (*p)
{
@@ -182,7 +181,7 @@ extern (C) void _d_delstruct(void** p, TypeInfo_Struct inf)
}
// strip const/immutable/shared/inout from type info
-inout(TypeInfo) unqualify(inout(TypeInfo) cti) pure nothrow @nogc
+inout(TypeInfo) unqualify(return inout(TypeInfo) cti) pure nothrow @nogc
{
TypeInfo ti = cast() cti;
while (ti)
@@ -382,7 +381,7 @@ size_t __arrayAllocLength(ref BlkInfo info, const TypeInfo tinext) pure nothrow
/**
get the start of the array for the given block
*/
-void *__arrayStart(BlkInfo info) nothrow pure
+void *__arrayStart(return BlkInfo info) nothrow pure
{
return info.base + ((info.size & BIGLENGTHMASK) ? LARGEPREFIX : 0);
}
@@ -398,10 +397,26 @@ size_t __arrayPad(size_t size, const TypeInfo tinext) nothrow pure @trusted
}
/**
+ clear padding that might not be zeroed by the GC (it assumes it is within the
+ requested size from the start, but it is actually at the end of the allocated block)
+ */
+private void __arrayClearPad(ref BlkInfo info, size_t arrsize, size_t padsize) nothrow pure
+{
+ import core.stdc.string;
+ if (padsize > MEDPAD && !(info.attr & BlkAttr.NO_SCAN) && info.base)
+ {
+ if (info.size < PAGESIZE)
+ memset(info.base + arrsize, 0, padsize);
+ else
+ memset(info.base, 0, LARGEPREFIX);
+ }
+}
+
+/**
allocate an array memory block by applying the proper padding and
assigning block attributes if not inherited from the existing block
*/
-BlkInfo __arrayAlloc(size_t arrsize, const TypeInfo ti, const TypeInfo tinext) nothrow pure
+BlkInfo __arrayAlloc(size_t arrsize, const scope TypeInfo ti, const TypeInfo tinext) nothrow pure
{
import core.checkedint;
@@ -417,24 +432,30 @@ BlkInfo __arrayAlloc(size_t arrsize, const TypeInfo ti, const TypeInfo tinext) n
uint attr = (!(tinext.flags & 1) ? BlkAttr.NO_SCAN : 0) | BlkAttr.APPENDABLE;
if (typeInfoSize)
attr |= BlkAttr.STRUCTFINAL | BlkAttr.FINALIZE;
- return GC.qalloc(padded_size, attr, ti);
+
+ auto bi = GC.qalloc(padded_size, attr, tinext);
+ __arrayClearPad(bi, arrsize, padsize);
+ return bi;
}
-BlkInfo __arrayAlloc(size_t arrsize, ref BlkInfo info, const TypeInfo ti, const TypeInfo tinext)
+BlkInfo __arrayAlloc(size_t arrsize, ref BlkInfo info, const scope TypeInfo ti, const TypeInfo tinext)
{
import core.checkedint;
if (!info.base)
return __arrayAlloc(arrsize, ti, tinext);
+ immutable padsize = __arrayPad(arrsize, tinext);
bool overflow;
- auto padded_size = addu(arrsize, __arrayPad(arrsize, tinext), overflow);
+ auto padded_size = addu(arrsize, padsize, overflow);
if (overflow)
{
return BlkInfo();
}
- return GC.qalloc(padded_size, info.attr, ti);
+ auto bi = GC.qalloc(padded_size, info.attr, tinext);
+ __arrayClearPad(bi, arrsize, padsize);
+ return bi;
}
/**
@@ -468,6 +489,8 @@ else
{
if (!__blkcache_storage)
{
+ import core.stdc.stdlib;
+ import core.stdc.string;
// allocate the block cache for the first time
immutable size = BlkInfo.sizeof * N_CACHE_BLOCKS;
__blkcache_storage = cast(BlkInfo *)malloc(size);
@@ -482,6 +505,7 @@ static ~this()
// free the blkcache
if (__blkcache_storage)
{
+ import core.stdc.stdlib;
free(__blkcache_storage);
__blkcache_storage = null;
}
@@ -670,7 +694,10 @@ extern(C) void _d_arrayshrinkfit(const TypeInfo ti, void[] arr) /+nothrow+/
// Note: Since we "assume" the append is safe, it means it is not shared.
// Since it is not shared, we also know it won't throw (no lock).
if (!__setArrayAllocLength(info, newsize, false, tinext))
+ {
+ import core.exception : onInvalidMemoryOperationError;
onInvalidMemoryOperationError();
+ }
// cache the block if not already done.
if (!isshared && !bic)
@@ -720,14 +747,17 @@ void __doPostblit(void *ptr, size_t len, const TypeInfo ti)
* of 0 to get the current capacity. Returns the number of elements that can
* actually be stored once the resizing is done.
*/
-extern(C) size_t _d_arraysetcapacity(const TypeInfo ti, size_t newcapacity, void[]* p)
+extern(C) size_t _d_arraysetcapacity(const TypeInfo ti, size_t newcapacity, void[]* p) @weak
in
{
assert(ti);
assert(!(*p).length || (*p).ptr);
}
-body
+do
{
+ import core.stdc.string;
+ import core.exception : onOutOfMemoryError;
+
// step 1, get the block
auto isshared = typeid(ti) is typeid(TypeInfo_Shared);
auto bic = isshared ? null : __getBlkInfo((*p).ptr);
@@ -890,8 +920,10 @@ Lcontinue:
* Allocate a new uninitialized array of length elements.
* ti is the type of the resulting array, or pointer to element.
*/
-extern (C) void[] _d_newarrayU(const TypeInfo ti, size_t length) pure nothrow
+extern (C) void[] _d_newarrayU(const scope TypeInfo ti, size_t length) pure nothrow @weak
{
+ import core.exception : onOutOfMemoryError;
+
auto tinext = unqualify(ti.next);
auto size = tinext.tsize;
@@ -949,8 +981,10 @@ Lcontinue:
* ti is the type of the resulting array, or pointer to element.
* (For when the array is initialized to 0)
*/
-extern (C) void[] _d_newarrayT(const TypeInfo ti, size_t length) pure nothrow
+extern (C) void[] _d_newarrayT(const TypeInfo ti, size_t length) pure nothrow @weak
{
+ import core.stdc.string;
+
void[] result = _d_newarrayU(ti, length);
auto tinext = unqualify(ti.next);
auto size = tinext.tsize;
@@ -962,7 +996,7 @@ extern (C) void[] _d_newarrayT(const TypeInfo ti, size_t length) pure nothrow
/**
* For when the array has a non-zero initializer.
*/
-extern (C) void[] _d_newarrayiT(const TypeInfo ti, size_t length) pure nothrow
+extern (C) void[] _d_newarrayiT(const TypeInfo ti, size_t length) pure nothrow @weak
{
import core.internal.traits : AliasSeq;
@@ -983,6 +1017,7 @@ extern (C) void[] _d_newarrayiT(const TypeInfo ti, size_t length) pure nothrow
default:
{
+ import core.stdc.string;
immutable sz = init.length;
for (size_t u = 0; u < size * length; u += sz)
memcpy(result.ptr + u, init.ptr, sz);
@@ -1036,7 +1071,7 @@ void[] _d_newarrayOpT(alias op)(const TypeInfo ti, size_t[] dims)
/**
*
*/
-extern (C) void[] _d_newarraymTX(const TypeInfo ti, size_t[] dims)
+extern (C) void[] _d_newarraymTX(const TypeInfo ti, size_t[] dims) @weak
{
debug(PRINTF) printf("_d_newarraymT(dims.length = %d)\n", dims.length);
@@ -1052,7 +1087,7 @@ extern (C) void[] _d_newarraymTX(const TypeInfo ti, size_t[] dims)
/**
*
*/
-extern (C) void[] _d_newarraymiTX(const TypeInfo ti, size_t[] dims)
+extern (C) void[] _d_newarraymiTX(const TypeInfo ti, size_t[] dims) @weak
{
debug(PRINTF) printf("_d_newarraymiT(dims.length = %d)\n", dims.length);
@@ -1068,12 +1103,13 @@ extern (C) void[] _d_newarraymiTX(const TypeInfo ti, size_t[] dims)
* Allocate an uninitialized non-array item.
* This is an optimization to avoid things needed for arrays like the __arrayPad(size).
*/
-extern (C) void* _d_newitemU(in TypeInfo _ti)
+extern (C) void* _d_newitemU(scope const TypeInfo _ti) pure nothrow @weak
{
auto ti = unqualify(_ti);
auto flags = !(ti.flags & 1) ? BlkAttr.NO_SCAN : 0;
immutable tiSize = structTypeInfoSize(ti);
- immutable size = ti.tsize + tiSize;
+ immutable itemSize = ti.tsize;
+ immutable size = itemSize + tiSize;
if (tiSize)
flags |= BlkAttr.STRUCTFINAL | BlkAttr.FINALIZE;
@@ -1081,22 +1117,27 @@ extern (C) void* _d_newitemU(in TypeInfo _ti)
auto p = blkInf.base;
if (tiSize)
+ {
+ *cast(TypeInfo*)(p + itemSize) = null; // the GC might not have cleared this area
*cast(TypeInfo*)(p + blkInf.size - tiSize) = cast() ti;
+ }
return p;
}
/// Same as above, zero initializes the item.
-extern (C) void* _d_newitemT(in TypeInfo _ti)
+extern (C) void* _d_newitemT(in TypeInfo _ti) pure nothrow @weak
{
+ import core.stdc.string;
auto p = _d_newitemU(_ti);
memset(p, 0, _ti.tsize);
return p;
}
/// Same as above, for item with non-zero initializer.
-extern (C) void* _d_newitemiT(in TypeInfo _ti)
+extern (C) void* _d_newitemiT(in TypeInfo _ti) pure nothrow @weak
{
+ import core.stdc.string;
auto p = _d_newitemU(_ti);
auto init = _ti.initializer();
assert(init.length <= _ti.tsize);
@@ -1113,15 +1154,6 @@ struct Array
byte* data;
}
-
-/**
- * This function has been replaced by _d_delarray_t
- */
-extern (C) void _d_delarray(void[]* p)
-{
- _d_delarray_t(p, null);
-}
-
debug(PRINTF)
{
extern(C) void printArrayCache()
@@ -1138,7 +1170,7 @@ debug(PRINTF)
/**
*
*/
-extern (C) void _d_delarray_t(void[]* p, const TypeInfo_Struct ti)
+extern (C) void _d_delarray_t(void[]* p, const TypeInfo_Struct ti) @weak
{
if (p)
{
@@ -1164,7 +1196,7 @@ extern (C) void _d_delarray_t(void[]* p, const TypeInfo_Struct ti)
}
}
-unittest
+deprecated unittest
{
__gshared size_t countDtor = 0;
struct S
@@ -1176,7 +1208,7 @@ unittest
auto x = new S[10000];
void* p = x.ptr;
assert(GC.addrOf(p) != null);
- delete x;
+ _d_delarray_t(cast(void[]*)&x, typeid(typeof(x[0]))); // delete x;
assert(GC.addrOf(p) == null);
assert(countDtor == 10000);
@@ -1185,7 +1217,7 @@ unittest
auto z = y[200 .. 300];
p = z.ptr;
assert(GC.addrOf(p) != null);
- delete z;
+ _d_delarray_t(cast(void[]*)&z, typeid(typeof(z[0]))); // delete z;
assert(GC.addrOf(p) == null);
assert(countDtor == 10000 + 400);
}
@@ -1193,7 +1225,7 @@ unittest
/**
*
*/
-extern (C) void _d_delmemory(void* *p)
+extern (C) void _d_delmemory(void* *p) @weak
{
if (*p)
{
@@ -1206,7 +1238,7 @@ extern (C) void _d_delmemory(void* *p)
/**
*
*/
-extern (C) void _d_callinterfacefinalizer(void *p)
+extern (C) void _d_callinterfacefinalizer(void *p) @weak
{
if (p)
{
@@ -1220,7 +1252,7 @@ extern (C) void _d_callinterfacefinalizer(void *p)
/**
*
*/
-extern (C) void _d_callfinalizer(void* p)
+extern (C) void _d_callfinalizer(void* p) @weak
{
rt_finalize( p );
}
@@ -1324,6 +1356,7 @@ void finalize_array2(void* p, size_t size) nothrow
}
catch (Exception e)
{
+ import core.exception : onFinalizeError;
onFinalizeError(si, e);
}
}
@@ -1353,6 +1386,7 @@ void finalize_struct(void* p, size_t size) nothrow
}
catch (Exception e)
{
+ import core.exception : onFinalizeError;
onFinalizeError(ti, e);
}
}
@@ -1393,6 +1427,7 @@ extern (C) void rt_finalize2(void* p, bool det = true, bool resetMemory = true)
}
catch (Exception e)
{
+ import core.exception : onFinalizeError;
onFinalizeError(*pc, e);
}
finally
@@ -1401,12 +1436,12 @@ extern (C) void rt_finalize2(void* p, bool det = true, bool resetMemory = true)
}
}
-extern (C) void rt_finalize(void* p, bool det = true)
+extern (C) void rt_finalize(void* p, bool det = true) nothrow
{
rt_finalize2(p, det, true);
}
-extern (C) void rt_finalizeFromGC(void* p, size_t size, uint attr)
+extern (C) void rt_finalizeFromGC(void* p, size_t size, uint attr) nothrow
{
// to verify: reset memory necessary?
if (!(attr & BlkAttr.STRUCTFINAL))
@@ -1421,14 +1456,17 @@ extern (C) void rt_finalizeFromGC(void* p, size_t size, uint attr)
/**
* Resize dynamic arrays with 0 initializers.
*/
-extern (C) void[] _d_arraysetlengthT(const TypeInfo ti, size_t newlength, void[]* p)
+extern (C) void[] _d_arraysetlengthT(const TypeInfo ti, size_t newlength, void[]* p) @weak
in
{
assert(ti);
assert(!(*p).length || (*p).ptr);
}
-body
+do
{
+ import core.stdc.string;
+ import core.exception : onOutOfMemoryError;
+
debug(PRINTF)
{
//printf("_d_arraysetlengthT(p = %p, sizeelem = %d, newlength = %d)\n", p, sizeelem, newlength);
@@ -1436,170 +1474,179 @@ body
printf("\tp.ptr = %p, p.length = %d\n", (*p).ptr, (*p).length);
}
- void* newdata = void;
- if (newlength)
+ if (newlength <= (*p).length)
{
- if (newlength <= (*p).length)
+ *p = (*p)[0 .. newlength];
+ void* newdata = (*p).ptr;
+ return newdata[0 .. newlength];
+ }
+ auto tinext = unqualify(ti.next);
+ size_t sizeelem = tinext.tsize;
+
+ /* Calculate: newsize = newlength * sizeelem
+ */
+ bool overflow = false;
+ version (D_InlineAsm_X86)
+ {
+ size_t newsize = void;
+
+ asm pure nothrow @nogc
{
- *p = (*p)[0 .. newlength];
- newdata = (*p).ptr;
- return newdata[0 .. newlength];
+ mov EAX, newlength;
+ mul EAX, sizeelem;
+ mov newsize, EAX;
+ setc overflow;
}
- auto tinext = unqualify(ti.next);
- size_t sizeelem = tinext.tsize;
- version (D_InlineAsm_X86)
- {
- size_t newsize = void;
+ }
+ else version (D_InlineAsm_X86_64)
+ {
+ size_t newsize = void;
- asm pure nothrow @nogc
- {
- mov EAX, newlength;
- mul EAX, sizeelem;
- mov newsize, EAX;
- jc Loverflow;
- }
- }
- else version (D_InlineAsm_X86_64)
+ asm pure nothrow @nogc
{
- size_t newsize = void;
-
- asm pure nothrow @nogc
- {
- mov RAX, newlength;
- mul RAX, sizeelem;
- mov newsize, RAX;
- jc Loverflow;
- }
+ mov RAX, newlength;
+ mul RAX, sizeelem;
+ mov newsize, RAX;
+ setc overflow;
}
- else
- {
- import core.checkedint : mulu;
+ }
+ else
+ {
+ import core.checkedint : mulu;
+ const size_t newsize = mulu(sizeelem, newlength, overflow);
+ }
+ if (overflow)
+ {
+ onOutOfMemoryError();
+ assert(0);
+ }
- bool overflow = false;
- size_t newsize = mulu(sizeelem, newlength, overflow);
- if (overflow)
- goto Loverflow;
- }
+ debug(PRINTF) printf("newsize = %x, newlength = %x\n", newsize, newlength);
- debug(PRINTF) printf("newsize = %x, newlength = %x\n", newsize, newlength);
+ const isshared = typeid(ti) is typeid(TypeInfo_Shared);
+
+ if (!(*p).ptr)
+ {
+ // pointer was null, need to allocate
+ auto info = __arrayAlloc(newsize, ti, tinext);
+ if (info.base is null)
+ {
+ onOutOfMemoryError();
+ assert(0);
+ }
+ __setArrayAllocLength(info, newsize, isshared, tinext);
+ if (!isshared)
+ __insertBlkInfoCache(info, null);
+ void* newdata = cast(byte *)__arrayStart(info);
+ memset(newdata, 0, newsize);
+ *p = newdata[0 .. newlength];
+ return *p;
+ }
- auto isshared = typeid(ti) is typeid(TypeInfo_Shared);
+ const size_t size = (*p).length * sizeelem;
+ auto bic = isshared ? null : __getBlkInfo((*p).ptr);
+ auto info = bic ? *bic : GC.query((*p).ptr);
- if ((*p).ptr)
+ /* Attempt to extend past the end of the existing array.
+ * If not possible, allocate new space for entire array and copy.
+ */
+ bool allocateAndCopy = false;
+ void* newdata = (*p).ptr;
+ if (info.base && (info.attr & BlkAttr.APPENDABLE))
+ {
+ // calculate the extent of the array given the base.
+ const size_t offset = (*p).ptr - __arrayStart(info);
+ if (info.size >= PAGESIZE)
{
- newdata = (*p).ptr;
- if (newlength > (*p).length)
+ // size of array is at the front of the block
+ if (!__setArrayAllocLength(info, newsize + offset, isshared, tinext, size + offset))
{
- size_t size = (*p).length * sizeelem;
- auto bic = isshared ? null : __getBlkInfo((*p).ptr);
- auto info = bic ? *bic : GC.query((*p).ptr);
- if (info.base && (info.attr & BlkAttr.APPENDABLE))
+ // check to see if it failed because there is not
+ // enough space
+ if (*(cast(size_t*)info.base) == size + offset)
{
- // calculate the extent of the array given the base.
- size_t offset = (*p).ptr - __arrayStart(info);
- if (info.size >= PAGESIZE)
+ // not enough space, try extending
+ auto extendsize = newsize + offset + LARGEPAD - info.size;
+ auto u = GC.extend(info.base, extendsize, extendsize);
+ if (u)
{
- // size of array is at the front of the block
- if (!__setArrayAllocLength(info, newsize + offset, isshared, tinext, size + offset))
- {
- // check to see if it failed because there is not
- // enough space
- if (*(cast(size_t*)info.base) == size + offset)
- {
- // not enough space, try extending
- auto extendsize = newsize + offset + LARGEPAD - info.size;
- auto u = GC.extend(info.base, extendsize, extendsize);
- if (u)
- {
- // extend worked, now try setting the length
- // again.
- info.size = u;
- if (__setArrayAllocLength(info, newsize + offset, isshared, tinext, size + offset))
- {
- if (!isshared)
- __insertBlkInfoCache(info, bic);
- goto L1;
- }
- }
- }
-
- // couldn't do it, reallocate
- goto L2;
- }
- else if (!isshared && !bic)
+ // extend worked, now try setting the length
+ // again.
+ info.size = u;
+ if (__setArrayAllocLength(info, newsize + offset, isshared, tinext, size + offset))
{
- // add this to the cache, it wasn't present previously.
- __insertBlkInfoCache(info, null);
+ if (!isshared)
+ __insertBlkInfoCache(info, bic);
+ memset(newdata + size, 0, newsize - size);
+ *p = newdata[0 .. newlength];
+ return *p;
}
}
- else if (!__setArrayAllocLength(info, newsize + offset, isshared, tinext, size + offset))
- {
- // could not resize in place
- goto L2;
- }
- else if (!isshared && !bic)
- {
- // add this to the cache, it wasn't present previously.
- __insertBlkInfoCache(info, null);
- }
}
- else
- {
- if (info.base)
- {
- L2:
- if (bic)
- {
- // a chance that flags have changed since this was cached, we should fetch the most recent flags
- info.attr = GC.getAttr(info.base) | BlkAttr.APPENDABLE;
- }
- info = __arrayAlloc(newsize, info, ti, tinext);
- }
- else
- {
- info = __arrayAlloc(newsize, ti, tinext);
- }
- if (info.base is null)
- goto Loverflow;
-
- __setArrayAllocLength(info, newsize, isshared, tinext);
- if (!isshared)
- __insertBlkInfoCache(info, bic);
- newdata = cast(byte *)__arrayStart(info);
- newdata[0 .. size] = (*p).ptr[0 .. size];
-
- // do postblit processing
- __doPostblit(newdata, size, tinext);
- }
- L1:
- memset(newdata + size, 0, newsize - size);
+ // couldn't do it, reallocate
+ allocateAndCopy = true;
+ }
+ else if (!isshared && !bic)
+ {
+ // add this to the cache, it wasn't present previously.
+ __insertBlkInfoCache(info, null);
}
}
- else
+ else if (!__setArrayAllocLength(info, newsize + offset, isshared, tinext, size + offset))
{
- // pointer was null, need to allocate
- auto info = __arrayAlloc(newsize, ti, tinext);
- if (info.base is null)
- goto Loverflow;
- __setArrayAllocLength(info, newsize, isshared, tinext);
- if (!isshared)
- __insertBlkInfoCache(info, null);
- newdata = cast(byte *)__arrayStart(info);
- memset(newdata, 0, newsize);
+ // could not resize in place
+ allocateAndCopy = true;
+ }
+ else if (!isshared && !bic)
+ {
+ // add this to the cache, it wasn't present previously.
+ __insertBlkInfoCache(info, null);
}
}
else
+ allocateAndCopy = true;
+
+ if (allocateAndCopy)
{
- newdata = (*p).ptr;
+ if (info.base)
+ {
+ if (bic)
+ {
+ // a chance that flags have changed since this was cached, we should fetch the most recent flags
+ info.attr = GC.getAttr(info.base) | BlkAttr.APPENDABLE;
+ }
+ info = __arrayAlloc(newsize, info, ti, tinext);
+ }
+ else
+ {
+ info = __arrayAlloc(newsize, ti, tinext);
+ }
+
+ if (info.base is null)
+ {
+ onOutOfMemoryError();
+ assert(0);
+ }
+
+ __setArrayAllocLength(info, newsize, isshared, tinext);
+ if (!isshared)
+ __insertBlkInfoCache(info, bic);
+ newdata = cast(byte *)__arrayStart(info);
+ newdata[0 .. size] = (*p).ptr[0 .. size];
+
+ /* Do postblit processing, as we are making a copy and the
+ * original array may have references.
+ * Note that this may throw.
+ */
+ __doPostblit(newdata, size, tinext);
}
+ // Zero the unused portion of the newly allocated space
+ memset(newdata + size, 0, newsize - size);
+
*p = newdata[0 .. newlength];
return *p;
-
-Loverflow:
- onOutOfMemoryError();
- assert(0);
}
@@ -1611,205 +1658,221 @@ Loverflow:
* initsize size of initializer
* ... initializer
*/
-extern (C) void[] _d_arraysetlengthiT(const TypeInfo ti, size_t newlength, void[]* p)
+extern (C) void[] _d_arraysetlengthiT(const TypeInfo ti, size_t newlength, void[]* p) @weak
in
{
assert(!(*p).length || (*p).ptr);
}
-body
+do
{
- void* newdata;
- auto tinext = unqualify(ti.next);
- auto sizeelem = tinext.tsize;
- auto initializer = tinext.initializer();
- auto initsize = initializer.length;
-
- assert(sizeelem);
- assert(initsize);
- assert(initsize <= sizeelem);
- assert((sizeelem / initsize) * initsize == sizeelem);
+ import core.stdc.string;
+ import core.exception : onOutOfMemoryError;
debug(PRINTF)
{
- printf("_d_arraysetlengthiT(p = %p, sizeelem = %d, newlength = %d, initsize = %d)\n", p, sizeelem, newlength, initsize);
+ //printf("_d_arraysetlengthiT(p = %p, sizeelem = %d, newlength = %d)\n", p, sizeelem, newlength);
if (p)
- printf("\tp.data = %p, p.length = %d\n", (*p).ptr, (*p).length);
+ printf("\tp.ptr = %p, p.length = %d\n", (*p).ptr, (*p).length);
}
- if (newlength)
+ if (newlength <= (*p).length)
{
- version (D_InlineAsm_X86)
- {
- size_t newsize = void;
+ *p = (*p)[0 .. newlength];
+ void* newdata = (*p).ptr;
+ return newdata[0 .. newlength];
+ }
+ auto tinext = unqualify(ti.next);
+ size_t sizeelem = tinext.tsize;
- asm
- {
- mov EAX,newlength ;
- mul EAX,sizeelem ;
- mov newsize,EAX ;
- jc Loverflow ;
- }
+ /* Calculate: newsize = newlength * sizeelem
+ */
+ bool overflow = false;
+ version (D_InlineAsm_X86)
+ {
+ size_t newsize = void;
+
+ asm pure nothrow @nogc
+ {
+ mov EAX, newlength;
+ mul EAX, sizeelem;
+ mov newsize, EAX;
+ setc overflow;
}
- else version (D_InlineAsm_X86_64)
+ }
+ else version (D_InlineAsm_X86_64)
+ {
+ size_t newsize = void;
+
+ asm pure nothrow @nogc
{
- size_t newsize = void;
+ mov RAX, newlength;
+ mul RAX, sizeelem;
+ mov newsize, RAX;
+ setc overflow;
+ }
+ }
+ else
+ {
+ import core.checkedint : mulu;
+ const size_t newsize = mulu(sizeelem, newlength, overflow);
+ }
+ if (overflow)
+ {
+ onOutOfMemoryError();
+ assert(0);
+ }
- asm
- {
- mov RAX,newlength ;
- mul RAX,sizeelem ;
- mov newsize,RAX ;
- jc Loverflow ;
- }
+ debug(PRINTF) printf("newsize = %x, newlength = %x\n", newsize, newlength);
+
+ const isshared = typeid(ti) is typeid(TypeInfo_Shared);
+
+ static void doInitialize(void *start, void *end, const void[] initializer)
+ {
+ if (initializer.length == 1)
+ {
+ memset(start, *(cast(ubyte*)initializer.ptr), end - start);
}
else
{
- import core.checkedint : mulu;
+ auto q = initializer.ptr;
+ immutable initsize = initializer.length;
+ for (; start < end; start += initsize)
+ {
+ memcpy(start, q, initsize);
+ }
+ }
+ }
- bool overflow = false;
- size_t newsize = mulu(sizeelem, newlength, overflow);
- if (overflow)
- goto Loverflow;
+ if (!(*p).ptr)
+ {
+ // pointer was null, need to allocate
+ auto info = __arrayAlloc(newsize, ti, tinext);
+ if (info.base is null)
+ {
+ onOutOfMemoryError();
+ assert(0);
}
- debug(PRINTF) printf("newsize = %x, newlength = %x\n", newsize, newlength);
+ __setArrayAllocLength(info, newsize, isshared, tinext);
+ if (!isshared)
+ __insertBlkInfoCache(info, null);
+ void* newdata = cast(byte *)__arrayStart(info);
+ doInitialize(newdata, newdata + newsize, tinext.initializer);
+ *p = newdata[0 .. newlength];
+ return *p;
+ }
+ const size_t size = (*p).length * sizeelem;
+ auto bic = isshared ? null : __getBlkInfo((*p).ptr);
+ auto info = bic ? *bic : GC.query((*p).ptr);
- size_t size = (*p).length * sizeelem;
- auto isshared = typeid(ti) is typeid(TypeInfo_Shared);
- if ((*p).ptr)
+ /* Attempt to extend past the end of the existing array.
+ * If not possible, allocate new space for entire array and copy.
+ */
+ bool allocateAndCopy = false;
+ void* newdata = (*p).ptr;
+
+ if (info.base && (info.attr & BlkAttr.APPENDABLE))
+ {
+ // calculate the extent of the array given the base.
+ const size_t offset = (*p).ptr - __arrayStart(info);
+ if (info.size >= PAGESIZE)
{
- newdata = (*p).ptr;
- if (newlength > (*p).length)
+ // size of array is at the front of the block
+ if (!__setArrayAllocLength(info, newsize + offset, isshared, tinext, size + offset))
{
- auto bic = isshared ? null : __getBlkInfo((*p).ptr);
- auto info = bic ? *bic : GC.query((*p).ptr);
-
- // calculate the extent of the array given the base.
- size_t offset = (*p).ptr - __arrayStart(info);
- if (info.base && (info.attr & BlkAttr.APPENDABLE))
- {
- if (info.size >= PAGESIZE)
- {
- // size of array is at the front of the block
- if (!__setArrayAllocLength(info, newsize + offset, isshared, tinext, size + offset))
- {
- // check to see if it failed because there is not
- // enough space
- if (*(cast(size_t*)info.base) == size + offset)
- {
- // not enough space, try extending
- auto extendsize = newsize + offset + LARGEPAD - info.size;
- auto u = GC.extend(info.base, extendsize, extendsize);
- if (u)
- {
- // extend worked, now try setting the length
- // again.
- info.size = u;
- if (__setArrayAllocLength(info, newsize + offset, isshared, tinext, size + offset))
- {
- if (!isshared)
- __insertBlkInfoCache(info, bic);
- goto L1;
- }
- }
- }
-
- // couldn't do it, reallocate
- goto L2;
- }
- else if (!isshared && !bic)
- {
- // add this to the cache, it wasn't present previously.
- __insertBlkInfoCache(info, null);
- }
- }
- else if (!__setArrayAllocLength(info, newsize + offset, isshared, tinext, size + offset))
- {
- // could not resize in place
- goto L2;
- }
- else if (!isshared && !bic)
- {
- // add this to the cache, it wasn't present previously.
- __insertBlkInfoCache(info, null);
- }
- }
- else
+ // check to see if it failed because there is not
+ // enough space
+ if (*(cast(size_t*)info.base) == size + offset)
{
- // not appendable or not part of the heap yet.
- if (info.base)
+ // not enough space, try extending
+ auto extendsize = newsize + offset + LARGEPAD - info.size;
+ auto u = GC.extend(info.base, extendsize, extendsize);
+ if (u)
{
- L2:
- if (bic)
+ // extend worked, now try setting the length
+ // again.
+ info.size = u;
+ if (__setArrayAllocLength(info, newsize + offset, isshared, tinext, size + offset))
{
- // a chance that flags have changed since this was cached, we should fetch the most recent flags
- info.attr = GC.getAttr(info.base) | BlkAttr.APPENDABLE;
+ if (!isshared)
+ __insertBlkInfoCache(info, bic);
+ doInitialize(newdata + size, newdata + newsize, tinext.initializer);
+ *p = newdata[0 .. newlength];
+ return *p;
}
- info = __arrayAlloc(newsize, info, ti, tinext);
}
- else
- {
- info = __arrayAlloc(newsize, ti, tinext);
- }
- __setArrayAllocLength(info, newsize, isshared, tinext);
- if (!isshared)
- __insertBlkInfoCache(info, bic);
- newdata = cast(byte *)__arrayStart(info);
- newdata[0 .. size] = (*p).ptr[0 .. size];
-
- // do postblit processing
- __doPostblit(newdata, size, tinext);
}
- L1: ;
+
+ // couldn't do it, reallocate
+ allocateAndCopy = true;
+ }
+ else if (!isshared && !bic)
+ {
+ // add this to the cache, it wasn't present previously.
+ __insertBlkInfoCache(info, null);
}
}
- else
+ else if (!__setArrayAllocLength(info, newsize + offset, isshared, tinext, size + offset))
{
- // length was zero, need to allocate
- auto info = __arrayAlloc(newsize, ti, tinext);
- __setArrayAllocLength(info, newsize, isshared, tinext);
- if (!isshared)
- __insertBlkInfoCache(info, null);
- newdata = cast(byte *)__arrayStart(info);
+ // could not resize in place
+ allocateAndCopy = true;
}
-
- auto q = initializer.ptr; // pointer to initializer
-
- if (newsize > size)
+ else if (!isshared && !bic)
{
- if (initsize == 1)
- {
- debug(PRINTF) printf("newdata = %p, size = %d, newsize = %d, *q = %d\n", newdata, size, newsize, *cast(byte*)q);
- memset(newdata + size, *cast(byte*)q, newsize - size);
- }
- else
- {
- for (size_t u = size; u < newsize; u += initsize)
- {
- memcpy(newdata + u, q, initsize);
- }
- }
+ // add this to the cache, it wasn't present previously.
+ __insertBlkInfoCache(info, null);
}
}
else
+ allocateAndCopy = true;
+
+ if (allocateAndCopy)
{
- newdata = (*p).ptr;
+ if (info.base)
+ {
+ if (bic)
+ {
+ // a chance that flags have changed since this was cached, we should fetch the most recent flags
+ info.attr = GC.getAttr(info.base) | BlkAttr.APPENDABLE;
+ }
+ info = __arrayAlloc(newsize, info, ti, tinext);
+ }
+ else
+ {
+ info = __arrayAlloc(newsize, ti, tinext);
+ }
+
+ if (info.base is null)
+ {
+ onOutOfMemoryError();
+ assert(0);
+ }
+
+ __setArrayAllocLength(info, newsize, isshared, tinext);
+ if (!isshared)
+ __insertBlkInfoCache(info, bic);
+ newdata = cast(byte *)__arrayStart(info);
+ newdata[0 .. size] = (*p).ptr[0 .. size];
+
+ /* Do postblit processing, as we are making a copy and the
+ * original array may have references.
+ * Note that this may throw.
+ */
+ __doPostblit(newdata, size, tinext);
}
+ // Initialize the unused portion of the newly allocated space
+ doInitialize(newdata + size, newdata + newsize, tinext.initializer);
*p = newdata[0 .. newlength];
return *p;
-
-Loverflow:
- onOutOfMemoryError();
- assert(0);
}
-
/**
* Append y[] to array x[]
*/
-extern (C) void[] _d_arrayappendT(const TypeInfo ti, ref byte[] x, byte[] y)
+extern (C) void[] _d_arrayappendT(const TypeInfo ti, ref byte[] x, byte[] y) @weak
{
+ import core.stdc.string;
auto length = x.length;
auto tinext = unqualify(ti.next);
auto sizeelem = tinext.tsize; // array element size
@@ -1879,6 +1942,7 @@ size_t newCapacity(size_t newlength, size_t size)
*/
//long mult = 100 + (1000L * size) / (6 * log2plus1(newcap));
//long mult = 100 + (1000L * size) / log2plus1(newcap);
+ import core.bitop;
long mult = 100 + (1000L) / (bsr(newcap) + 1);
// testing shows 1.02 for large arrays is about the point of diminishing return
@@ -1908,8 +1972,9 @@ size_t newCapacity(size_t newlength, size_t size)
* Caller must initialize those elements.
*/
extern (C)
-byte[] _d_arrayappendcTX(const TypeInfo ti, ref byte[] px, size_t n)
+byte[] _d_arrayappendcTX(const TypeInfo ti, return scope ref byte[] px, size_t n) @weak
{
+ import core.stdc.string;
// This is a cut&paste job from _d_arrayappendT(). Should be refactored.
// only optimize array append where ti is not a shared type
@@ -2011,28 +2076,28 @@ byte[] _d_arrayappendcTX(const TypeInfo ti, ref byte[] px, size_t n)
/**
* Append dchar to char[]
*/
-extern (C) void[] _d_arrayappendcd(ref byte[] x, dchar c)
+extern (C) void[] _d_arrayappendcd(ref byte[] x, dchar c) @weak
{
// c could encode into from 1 to 4 characters
char[4] buf = void;
- byte[] appendthis; // passed to appendT
+ char[] appendthis; // passed to appendT
if (c <= 0x7F)
{
buf.ptr[0] = cast(char)c;
- appendthis = (cast(byte *)buf.ptr)[0..1];
+ appendthis = buf[0..1];
}
else if (c <= 0x7FF)
{
buf.ptr[0] = cast(char)(0xC0 | (c >> 6));
buf.ptr[1] = cast(char)(0x80 | (c & 0x3F));
- appendthis = (cast(byte *)buf.ptr)[0..2];
+ appendthis = buf[0..2];
}
else if (c <= 0xFFFF)
{
buf.ptr[0] = cast(char)(0xE0 | (c >> 12));
buf.ptr[1] = cast(char)(0x80 | ((c >> 6) & 0x3F));
buf.ptr[2] = cast(char)(0x80 | (c & 0x3F));
- appendthis = (cast(byte *)buf.ptr)[0..3];
+ appendthis = buf[0..3];
}
else if (c <= 0x10FFFF)
{
@@ -2040,7 +2105,7 @@ extern (C) void[] _d_arrayappendcd(ref byte[] x, dchar c)
buf.ptr[1] = cast(char)(0x80 | ((c >> 12) & 0x3F));
buf.ptr[2] = cast(char)(0x80 | ((c >> 6) & 0x3F));
buf.ptr[3] = cast(char)(0x80 | (c & 0x3F));
- appendthis = (cast(byte *)buf.ptr)[0..4];
+ appendthis = buf[0..4];
}
else
{
@@ -2053,7 +2118,12 @@ extern (C) void[] _d_arrayappendcd(ref byte[] x, dchar c)
// get a typeinfo from the compiler. Assuming shared is the safest option.
// Once the compiler is fixed, the proper typeinfo should be forwarded.
//
- return _d_arrayappendT(typeid(shared char[]), x, appendthis);
+
+ // Hack because _d_arrayappendT takes `x` as a reference
+ auto xx = cast(shared(char)[])x;
+ object._d_arrayappendTImpl!(shared(char)[])._d_arrayappendT(xx, cast(shared(char)[])appendthis);
+ x = cast(byte[])xx;
+ return x;
}
unittest
@@ -2088,25 +2158,21 @@ unittest
/**
* Append dchar to wchar[]
*/
-extern (C) void[] _d_arrayappendwd(ref byte[] x, dchar c)
+extern (C) void[] _d_arrayappendwd(ref byte[] x, dchar c) @weak
{
// c could encode into from 1 to 2 w characters
wchar[2] buf = void;
- byte[] appendthis; // passed to appendT
+ wchar[] appendthis; // passed to appendT
if (c <= 0xFFFF)
{
buf.ptr[0] = cast(wchar) c;
- // note that although we are passing only 1 byte here, appendT
- // interprets this as being an array of wchar, making the necessary
- // casts.
- appendthis = (cast(byte *)buf.ptr)[0..1];
+ appendthis = buf[0..1];
}
else
{
buf.ptr[0] = cast(wchar) ((((c - 0x10000) >> 10) & 0x3FF) + 0xD800);
buf.ptr[1] = cast(wchar) (((c - 0x10000) & 0x3FF) + 0xDC00);
- // ditto from above.
- appendthis = (cast(byte *)buf.ptr)[0..2];
+ appendthis = buf[0..2];
}
//
@@ -2114,14 +2180,18 @@ extern (C) void[] _d_arrayappendwd(ref byte[] x, dchar c)
// get a typeinfo from the compiler. Assuming shared is the safest option.
// Once the compiler is fixed, the proper typeinfo should be forwarded.
//
- return _d_arrayappendT(typeid(shared wchar[]), x, appendthis);
+
+ auto xx = (cast(shared(wchar)*)x.ptr)[0 .. x.length];
+ object._d_arrayappendTImpl!(shared(wchar)[])._d_arrayappendT(xx, cast(shared(wchar)[])appendthis);
+ x = (cast(byte*)xx.ptr)[0 .. xx.length];
+ return x;
}
/**
*
*/
-extern (C) byte[] _d_arraycatT(const TypeInfo ti, byte[] x, byte[] y)
+extern (C) byte[] _d_arraycatT(const TypeInfo ti, byte[] x, byte[] y) @weak
out (result)
{
auto tinext = unqualify(ti.next);
@@ -2142,8 +2212,9 @@ out (result)
size_t cap = GC.sizeOf(result.ptr);
assert(!cap || cap > result.length * sizeelem);
}
-body
+do
{
+ import core.stdc.string;
version (none)
{
/* Cannot use this optimization because:
@@ -2186,8 +2257,10 @@ body
/**
*
*/
-extern (C) void[] _d_arraycatnTX(const TypeInfo ti, byte[][] arrs)
+extern (C) void[] _d_arraycatnTX(const TypeInfo ti, scope byte[][] arrs) @weak
{
+ import core.stdc.string;
+
size_t length;
auto tinext = unqualify(ti.next);
auto size = tinext.tsize; // array element size
@@ -2225,7 +2298,7 @@ extern (C) void[] _d_arraycatnTX(const TypeInfo ti, byte[][] arrs)
* Allocate the array, rely on the caller to do the initialization of the array.
*/
extern (C)
-void* _d_arrayliteralTX(const TypeInfo ti, size_t length)
+void* _d_arrayliteralTX(const TypeInfo ti, size_t length) @weak
{
auto tinext = unqualify(ti.next);
auto sizeelem = tinext.tsize; // array element size
@@ -2333,7 +2406,7 @@ unittest
}
// cannot define structs inside unit test block, or they become nested structs.
-version (unittest)
+version (CoreUnittest)
{
struct S1
{
@@ -2383,12 +2456,22 @@ unittest
// Bugzilla 3454 - Inconsistent flag setting in GC.realloc()
static void test(size_t multiplier)
{
- auto p = GC.malloc(8 * multiplier, BlkAttr.NO_SCAN);
+ auto p = GC.malloc(8 * multiplier, 0);
+ assert(GC.getAttr(p) == 0);
+
+ // no move, set attr
+ p = GC.realloc(p, 8 * multiplier + 5, BlkAttr.NO_SCAN);
assert(GC.getAttr(p) == BlkAttr.NO_SCAN);
- p = GC.realloc(p, 2 * multiplier, BlkAttr.NO_SCAN);
+
+ // shrink, copy attr
+ p = GC.realloc(p, 2 * multiplier, 0);
+ assert(GC.getAttr(p) == BlkAttr.NO_SCAN);
+
+ // extend, copy attr
+ p = GC.realloc(p, 8 * multiplier, 0);
assert(GC.getAttr(p) == BlkAttr.NO_SCAN);
}
- test(1);
+ test(16);
test(1024 * 1024);
}
@@ -2505,7 +2588,7 @@ unittest
// test struct finalizers
debug(SENTINEL) {} else
-unittest
+deprecated unittest
{
__gshared int dtorCount;
static struct S1
@@ -2520,12 +2603,12 @@ unittest
dtorCount = 0;
S1* s1 = new S1;
- delete s1;
+ _d_delstruct(cast(void**)&s1, typeid(typeof(*s1))); // delete s1;
assert(dtorCount == 1);
dtorCount = 0;
S1[] arr1 = new S1[7];
- delete arr1;
+ _d_delarray_t(cast(void[]*)&arr1, typeid(typeof(arr1[0]))); // delete arr1;
assert(dtorCount == 7);
dtorCount = 0;
@@ -2596,6 +2679,77 @@ unittest
assert(dtorCount == 4);
}
+// test struct dtor handling not causing false pointers
+unittest
+{
+ // for 64-bit, allocate a struct of size 40
+ static struct S
+ {
+ size_t[4] data;
+ S* ptr4;
+ }
+ auto p1 = new S;
+ auto p2 = new S;
+ p2.ptr4 = p1;
+
+ // a struct with a dtor with size 32, but the dtor will cause
+ // allocation to be larger by a pointer
+ static struct A
+ {
+ size_t[3] data;
+ S* ptr3;
+
+ ~this() {}
+ }
+
+ GC.free(p2);
+ auto a = new A; // reuse same memory
+ if (cast(void*)a is cast(void*)p2) // reusage not guaranteed
+ {
+ auto ptr = cast(S**)(a + 1);
+ assert(*ptr != p1); // still same data as p2.ptr4?
+ }
+
+ // small array
+ static struct SArr
+ {
+ void*[10] data;
+ }
+ auto arr1 = new SArr;
+ arr1.data[] = p1;
+ GC.free(arr1);
+
+ // allocates 2*A.sizeof + (void*).sizeof (TypeInfo) + 1 (array length)
+ auto arr2 = new A[2];
+ if (cast(void*)arr1 is cast(void*)arr2.ptr) // reusage not guaranteed
+ {
+ auto ptr = cast(S**)(arr2.ptr + 2);
+ assert(*ptr != p1); // still same data as p2.ptr4?
+ }
+
+ // large array
+ static struct LArr
+ {
+ void*[1023] data;
+ }
+ auto larr1 = new LArr;
+ larr1.data[] = p1;
+ GC.free(larr1);
+
+ auto larr2 = new S[255];
+ if (cast(void*)larr1 is cast(void*)larr2.ptr - LARGEPREFIX) // reusage not guaranteed
+ {
+ auto ptr = cast(S**)larr1;
+ assert(ptr[0] != p1); // 16 bytes array header
+ assert(ptr[1] != p1);
+ version (D_LP64) {} else
+ {
+ assert(ptr[2] != p1);
+ assert(ptr[3] != p1);
+ }
+ }
+}
+
// test class finalizers exception handling
unittest
{
diff --git a/libphobos/libdruntime/rt/memory.d b/libphobos/libdruntime/rt/memory.d
index 220b3d2..99b00c0 100644
--- a/libphobos/libdruntime/rt/memory.d
+++ b/libphobos/libdruntime/rt/memory.d
@@ -7,7 +7,7 @@
* $(LINK2 http://www.boost.org/LICENSE_1_0.txt, Boost Software License 1.0).
* (See accompanying file LICENSE)
* Authors: Walter Bright, Sean Kelly
- * Source: $(DRUNTIMESRC src/rt/_memory.d)
+ * Source: $(DRUNTIMESRC rt/_memory.d)
*/
module rt.memory;
diff --git a/libphobos/libdruntime/rt/minfo.d b/libphobos/libdruntime/rt/minfo.d
index 4722866..0d5cd22 100644
--- a/libphobos/libdruntime/rt/minfo.d
+++ b/libphobos/libdruntime/rt/minfo.d
@@ -7,7 +7,7 @@
* $(LINK2 http://www.boost.org/LICENSE_1_0.txt, Boost Software License 1.0).
* (See accompanying file LICENSE)
* Authors: Walter Bright, Sean Kelly
- * Source: $(DRUNTIMESRC src/rt/_minfo.d)
+ * Source: $(DRUNTIMESRC rt/_minfo.d)
*/
module rt.minfo;
@@ -165,7 +165,7 @@ struct ModuleGroup
void sortCtors(string cycleHandling)
{
import core.bitop : bts, btr, bt, BitRange;
- import rt.util.container.hashtab;
+ import core.internal.container.hashtab;
enum OnCycle
{
@@ -287,7 +287,7 @@ struct ModuleGroup
else
enum EOL = "\n";
- sink("Cyclic dependency between module ");
+ sink("Cyclic dependency between module constructors/destructors of ");
sink(_modules[sourceIdx].name);
sink(" and ");
sink(_modules[cycleIdx].name);
@@ -544,7 +544,7 @@ struct ModuleGroup
* behavior.
*
* Params:
- * edges - The module edges as found in the `importedModules` member of
+ * edges = The module edges as found in the `importedModules` member of
* each ModuleInfo. Generated in sortCtors.
* Returns:
* true if no cycle is found, false if one was.
@@ -566,7 +566,7 @@ struct ModuleGroup
}
auto stack = (cast(StackRec*).calloc(len, StackRec.sizeof))[0 .. len];
- // TODO: reuse GCBits by moving it to rt.util.container or core.internal
+ // TODO: reuse GCBits by moving it to core.internal.container
immutable nwords = (len + 8 * size_t.sizeof - 1) / (8 * size_t.sizeof);
auto ctorstart = cast(size_t*).malloc(nwords * size_t.sizeof);
auto ctordone = cast(size_t*).malloc(nwords * size_t.sizeof);
diff --git a/libphobos/libdruntime/rt/monitor_.d b/libphobos/libdruntime/rt/monitor_.d
index 8cb3c3a..6bfce63 100644
--- a/libphobos/libdruntime/rt/monitor_.d
+++ b/libphobos/libdruntime/rt/monitor_.d
@@ -2,8 +2,9 @@
* Contains the implementation for object monitors.
*
* Copyright: Copyright Digital Mars 2000 - 2015.
- * License: $(WEB www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
+ * License: $(HTTP www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
* Authors: Walter Bright, Sean Kelly, Martin Nowak
+ * Source: $(DRUNTIMESRC rt/_monitor_.d)
*/
/* NOTE: This file has been patched from the original DMD distribution to
@@ -25,7 +26,7 @@ in
{
assert(ownee.__monitor is null);
}
-body
+do
{
auto m = ensureMonitor(cast(Object) owner);
if (m.impl is null)
@@ -81,7 +82,7 @@ in
{
assert(h !is null, "Synchronized object must not be null.");
}
-body
+do
{
auto m = cast(Monitor*) ensureMonitor(h);
auto i = m.impl;
@@ -185,6 +186,7 @@ version (GNU)
version (SingleThreaded)
{
+@nogc:
alias Mutex = int;
void initMutex(Mutex* mtx)
@@ -262,7 +264,7 @@ struct Monitor
private:
-@property ref shared(Monitor*) monitor(Object h) pure nothrow @nogc
+@property ref shared(Monitor*) monitor(return Object h) pure nothrow @nogc
{
return *cast(shared Monitor**)&h.__monitor;
}
diff --git a/libphobos/libdruntime/rt/obj.d b/libphobos/libdruntime/rt/obj.d
deleted file mode 100644
index 97dfbb5..0000000
--- a/libphobos/libdruntime/rt/obj.d
+++ /dev/null
@@ -1,35 +0,0 @@
-/**
- * Contains object comparator functions called by generated code.
- *
- * Copyright: Copyright Digital Mars 2002 - 2010.
- * License: $(WEB www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
- * Authors: Walter Bright
- */
-
-/* Copyright Digital Mars 2000 - 2010.
- * Distributed under the Boost Software License, Version 1.0.
- * (See accompanying file LICENSE or copy at
- * http://www.boost.org/LICENSE_1_0.txt)
- */
-module rt.obj;
-
-extern (C):
-
-/********************************
- * Compiler helper for operator == for class objects.
- */
-
-int _d_obj_eq(Object o1, Object o2)
-{
- return o1 is o2 || (o1 && o1.opEquals(o2));
-}
-
-
-/********************************
- * Compiler helper for operator <, <=, >, >= for class objects.
- */
-
-int _d_obj_cmp(Object o1, Object o2)
-{
- return o1.opCmp(o2);
-}
diff --git a/libphobos/libdruntime/rt/profilegc.d b/libphobos/libdruntime/rt/profilegc.d
new file mode 100644
index 0000000..45e0d51
--- /dev/null
+++ b/libphobos/libdruntime/rt/profilegc.d
@@ -0,0 +1,170 @@
+/*
+ * Data collection and report generation for
+ * -profile=gc
+ * switch
+ *
+ * Copyright: Copyright Digital Mars 2015 - 2015.
+ * License: Distributed under the
+ * $(LINK2 http://www.boost.org/LICENSE_1_0.txt, Boost Software License 1.0).
+ * (See accompanying file LICENSE)
+ * Authors: Andrei Alexandrescu and Walter Bright
+ * Source: $(DRUNTIMESRC rt/_profilegc.d)
+ */
+
+module rt.profilegc;
+
+private:
+
+import core.stdc.stdio;
+import core.stdc.stdlib;
+import core.stdc.string;
+
+import core.exception : onOutOfMemoryError;
+import core.internal.container.hashtab;
+
+struct Entry { ulong count, size; }
+
+char[] buffer;
+HashTab!(const(char)[], Entry) newCounts;
+
+__gshared
+{
+ HashTab!(const(char)[], Entry) globalNewCounts;
+ string logfilename = "profilegc.log";
+}
+
+/****
+ * Set file name for output.
+ * A file name of "" means write results to stdout.
+ * Params:
+ * name = file name
+ */
+
+extern (C) void profilegc_setlogfilename(string name)
+{
+ logfilename = name ~ "\0";
+}
+
+public void accumulate(string file, uint line, string funcname, string type, ulong sz) @nogc nothrow
+{
+ if (sz == 0)
+ return;
+
+ char[3 * line.sizeof + 1] buf = void;
+ auto buflen = snprintf(buf.ptr, buf.length, "%u", line);
+
+ auto length = type.length + 1 + funcname.length + 1 + file.length + 1 + buflen;
+ if (length > buffer.length)
+ {
+ // Enlarge buffer[] so it is big enough
+ assert(buffer.length > 0 || buffer.ptr is null);
+ auto p = cast(char*)realloc(buffer.ptr, length);
+ if (!p)
+ onOutOfMemoryError();
+ buffer = p[0 .. length];
+ }
+
+ // "type funcname file:line"
+ buffer[0 .. type.length] = type[];
+ buffer[type.length] = ' ';
+ buffer[type.length + 1 ..
+ type.length + 1 + funcname.length] = funcname[];
+ buffer[type.length + 1 + funcname.length] = ' ';
+ buffer[type.length + 1 + funcname.length + 1 ..
+ type.length + 1 + funcname.length + 1 + file.length] = file[];
+ buffer[type.length + 1 + funcname.length + 1 + file.length] = ':';
+ buffer[type.length + 1 + funcname.length + 1 + file.length + 1 ..
+ type.length + 1 + funcname.length + 1 + file.length + 1 + buflen] = buf[0 .. buflen];
+
+ if (auto pcount = cast(string)buffer[0 .. length] in newCounts)
+ { // existing entry
+ pcount.count++;
+ pcount.size += sz;
+ }
+ else
+ {
+ auto key = (cast(char*) malloc(char.sizeof * length))[0 .. length];
+ key[] = buffer[0..length];
+ newCounts[key] = Entry(1, sz); // new entry
+ }
+}
+
+// Merge thread local newCounts into globalNewCounts
+static ~this()
+{
+ if (newCounts.length)
+ {
+ synchronized
+ {
+ foreach (name, entry; newCounts)
+ {
+ if (!(name in globalNewCounts))
+ globalNewCounts[name] = Entry.init;
+
+ globalNewCounts[name].count += entry.count;
+ globalNewCounts[name].size += entry.size;
+ }
+ }
+ newCounts.reset();
+ }
+ free(buffer.ptr);
+ buffer = null;
+}
+
+// Write report to stderr
+shared static ~this()
+{
+ static struct Result
+ {
+ const(char)[] name;
+ Entry entry;
+
+ // qsort() comparator to sort by count field
+ extern (C) static int qsort_cmp(scope const void *r1, scope const void *r2) @nogc nothrow
+ {
+ auto result1 = cast(Result*)r1;
+ auto result2 = cast(Result*)r2;
+ long cmp = result2.entry.size - result1.entry.size;
+ if (cmp) return cmp < 0 ? -1 : 1;
+ cmp = result2.entry.count - result1.entry.count;
+ if (cmp) return cmp < 0 ? -1 : 1;
+ if (result2.name == result1.name) return 0;
+ // ascending order for names reads better
+ return result2.name > result1.name ? -1 : 1;
+ }
+ }
+
+ size_t size = globalNewCounts.length;
+ Result[] counts = (cast(Result*) malloc(size * Result.sizeof))[0 .. size];
+ scope(exit)
+ free(counts.ptr);
+
+ size_t i;
+ foreach (name, entry; globalNewCounts)
+ {
+ counts[i].name = name;
+ counts[i].entry = entry;
+ ++i;
+ }
+
+ if (counts.length)
+ {
+ qsort(counts.ptr, counts.length, Result.sizeof, &Result.qsort_cmp);
+
+ FILE* fp = logfilename.length == 0 ? stdout : fopen((logfilename).ptr, "w");
+ if (fp)
+ {
+ fprintf(fp, "bytes allocated, allocations, type, function, file:line\n");
+ foreach (ref c; counts)
+ {
+ fprintf(fp, "%15llu\t%15llu\t%8.*s\n",
+ cast(ulong)c.entry.size, cast(ulong)c.entry.count,
+ cast(int) c.name.length, c.name.ptr);
+ }
+ if (logfilename.length)
+ fclose(fp);
+ }
+ else
+ fprintf(stderr, "cannot write profilegc log file '%.*s'", cast(int) logfilename.length, logfilename.ptr);
+ }
+}
diff --git a/libphobos/libdruntime/rt/sections.d b/libphobos/libdruntime/rt/sections.d
index 6009a79..006d48d 100644
--- a/libphobos/libdruntime/rt/sections.d
+++ b/libphobos/libdruntime/rt/sections.d
@@ -5,7 +5,7 @@
* $(LINK2 http://www.boost.org/LICENSE_1_0.txt, Boost Software License 1.0).
* (See accompanying file LICENSE)
* Authors: Walter Bright, Sean Kelly, Martin Nowak
- * Source: $(DRUNTIMESRC src/rt/_sections.d)
+ * Source: $(DRUNTIMESRC rt/_sections.d)
*/
/* NOTE: This file has been patched from the original DMD distribution to
@@ -26,10 +26,23 @@ version (GNU)
public import gcc.sections;
else version (CRuntime_Glibc)
public import rt.sections_elf_shared;
+else version (CRuntime_Musl)
+ public import rt.sections_elf_shared;
else version (FreeBSD)
public import rt.sections_elf_shared;
else version (NetBSD)
public import rt.sections_elf_shared;
+else version (OpenBSD)
+{
+ /**
+ * OpenBSD is missing support needed for elf_shared.
+ * See the top of sections_solaris.d for more info.
+ */
+
+ public import rt.sections_solaris;
+}
+else version (DragonFlyBSD)
+ public import rt.sections_elf_shared;
else version (Solaris)
public import rt.sections_solaris;
else version (Darwin)
@@ -47,6 +60,8 @@ else version (CRuntime_Microsoft)
public import rt.sections_win64;
else version (CRuntime_Bionic)
public import rt.sections_android;
+else version (CRuntime_UClibc)
+ public import rt.sections_elf_shared;
else
static assert(0, "unimplemented");
diff --git a/libphobos/libdruntime/rt/switch_.d b/libphobos/libdruntime/rt/switch_.d
deleted file mode 100644
index 73ad636..0000000
--- a/libphobos/libdruntime/rt/switch_.d
+++ /dev/null
@@ -1,424 +0,0 @@
-/**
- * Contains support code for switch blocks using string constants.
- *
- * Copyright: Copyright Digital Mars 2004 - 2010.
- * License: $(WEB www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
- * Authors: Walter Bright, Sean Kelly
- */
-
-/* Copyright Digital Mars 2004 - 2010.
- * Distributed under the Boost Software License, Version 1.0.
- * (See accompanying file LICENSE or copy at
- * http://www.boost.org/LICENSE_1_0.txt)
- */
-module rt.switch_;
-
-private import core.stdc.string;
-
-/******************************************************
- * Support for switch statements switching on strings.
- * Input:
- * table[] sorted array of strings generated by compiler
- * ca string to look up in table
- * Output:
- * result index of match in table[]
- * -1 if not in table
- */
-
-extern (C):
-
-int _d_switch_string(char[][] table, char[] ca)
-in
-{
- //printf("in _d_switch_string()\n");
- assert(table.length >= 0);
- assert(ca.length >= 0);
-
- // Make sure table[] is sorted correctly
- for (size_t j = 1u; j < table.length; j++)
- {
- auto len1 = table[j - 1].length;
- auto len2 = table[j].length;
-
- assert(len1 <= len2);
- if (len1 == len2)
- {
- int ci;
-
- ci = memcmp(table[j - 1].ptr, table[j].ptr, len1);
- assert(ci < 0); // ci==0 means a duplicate
- }
- }
-}
-out (result)
-{
- int cj;
-
- //printf("out _d_switch_string()\n");
- if (result == -1)
- {
- // Not found
- for (auto i = 0u; i < table.length; i++)
- {
- if (table[i].length == ca.length)
- { cj = memcmp(table[i].ptr, ca.ptr, ca.length);
- assert(cj != 0);
- }
- }
- }
- else
- {
- assert(0 <= result && cast(size_t)result < table.length);
- for (auto i = 0u; 1; i++)
- {
- assert(i < table.length);
- if (table[i].length == ca.length)
- {
- cj = memcmp(table[i].ptr, ca.ptr, ca.length);
- if (cj == 0)
- {
- assert(i == result);
- break;
- }
- }
- }
- }
-}
-body
-{
- //printf("body _d_switch_string(%.*s)\n", ca.length, ca.ptr);
- size_t low = 0;
- size_t high = table.length;
-
- version (none)
- {
- // Print table
- printf("ca[] = '%s'\n", ca.length, ca.ptr);
- for (auto i = 0; i < high; i++)
- {
- auto pca = table[i];
- printf("table[%d] = %d, '%.*s'\n", i, pca.length, pca.length, pca.ptr);
- }
- }
- if (high &&
- ca.length >= table[0].length &&
- ca.length <= table[high - 1].length)
- {
- // Looking for 0 length string, which would only be at the beginning
- if (ca.length == 0)
- return 0;
-
- char c1 = ca[0];
-
- // Do binary search
- while (low < high)
- {
- auto mid = (low + high) >> 1;
- auto pca = table[mid];
- auto c = cast(sizediff_t)(ca.length - pca.length);
- if (c == 0)
- {
- c = cast(ubyte)c1 - cast(ubyte)pca[0];
- if (c == 0)
- {
- c = memcmp(ca.ptr, pca.ptr, ca.length);
- if (c == 0)
- { //printf("found %d\n", mid);
- return cast(int)mid;
- }
- }
- }
- if (c < 0)
- {
- high = mid;
- }
- else
- {
- low = mid + 1;
- }
- }
- }
-
- //printf("not found\n");
- return -1; // not found
-}
-
-unittest
-{
- switch (cast(char []) "c")
- {
- case "coo":
- default:
- break;
- }
-
- int bug5381(string s)
- {
- switch (s)
- {
- case "unittest": return 1;
- case "D_Version2": return 2;
- case "none": return 3;
- case "all": return 4;
- default: return 5;
- }
- }
- int rc = bug5381("none");
- assert(rc == 3);
-}
-
-/**********************************
- * Same thing, but for wide chars.
- */
-
-int _d_switch_ustring(wchar[][] table, wchar[] ca)
-in
-{
- //printf("in _d_switch_ustring()\n");
- assert(table.length >= 0);
- assert(ca.length >= 0);
-
- // Make sure table[] is sorted correctly
- for (size_t j = 1u; j < table.length; j++)
- {
- auto len1 = table[j - 1].length;
- auto len2 = table[j].length;
-
- assert(len1 <= len2);
- if (len1 == len2)
- {
- int c;
-
- c = memcmp(table[j - 1].ptr, table[j].ptr, len1 * wchar.sizeof);
- assert(c < 0); // c==0 means a duplicate
- }
- }
-}
-out (result)
-{
- int c;
-
- //printf("out _d_switch_ustring()\n");
- if (result == -1)
- {
- // Not found
- for (auto i = 0u; i < table.length; i++)
- {
- if (table[i].length == ca.length)
- { c = memcmp(table[i].ptr, ca.ptr, ca.length * wchar.sizeof);
- assert(c != 0);
- }
- }
- }
- else
- {
- assert(0 <= result && cast(size_t)result < table.length);
- for (auto i = 0u; 1; i++)
- {
- assert(i < table.length);
- if (table[i].length == ca.length)
- {
- c = memcmp(table[i].ptr, ca.ptr, ca.length * wchar.sizeof);
- if (c == 0)
- {
- assert(i == result);
- break;
- }
- }
- }
- }
-}
-body
-{
- //printf("body _d_switch_ustring()\n");
- size_t low = 0;
- auto high = table.length;
-
- version (none)
- {
- // Print table
- wprintf("ca[] = '%.*s'\n", ca.length, ca.ptr);
- for (auto i = 0; i < high; i++)
- {
- auto pca = table[i];
- wprintf("table[%d] = %d, '%.*s'\n", i, pca.length, pca.length, pca.ptr);
- }
- }
-
- // Do binary search
- while (low < high)
- {
- auto mid = (low + high) >> 1;
- auto pca = table[mid];
- auto c = cast(sizediff_t)(ca.length - pca.length);
- if (c == 0)
- {
- c = memcmp(ca.ptr, pca.ptr, ca.length * wchar.sizeof);
- if (c == 0)
- { //printf("found %d\n", mid);
- return cast(int)mid;
- }
- }
- if (c < 0)
- {
- high = mid;
- }
- else
- {
- low = mid + 1;
- }
- }
- //printf("not found\n");
- return -1; // not found
-}
-
-
-unittest
-{
- switch (cast(wchar []) "c")
- {
- case "coo":
- default:
- break;
- }
-
- int bug5381(wstring ws)
- {
- switch (ws)
- {
- case "unittest": return 1;
- case "D_Version2": return 2;
- case "none": return 3;
- case "all": return 4;
- default: return 5;
- }
- }
- int rc = bug5381("none"w);
- assert(rc == 3);
-}
-
-/**********************************
- * Same thing, but for wide chars.
- */
-
-int _d_switch_dstring(dchar[][] table, dchar[] ca)
-in
-{
- //printf("in _d_switch_dstring()\n");
- assert(table.length >= 0);
- assert(ca.length >= 0);
-
- // Make sure table[] is sorted correctly
- for (auto j = 1u; j < table.length; j++)
- {
- auto len1 = table[j - 1].length;
- auto len2 = table[j].length;
-
- assert(len1 <= len2);
- if (len1 == len2)
- {
- auto c = memcmp(table[j - 1].ptr, table[j].ptr, len1 * dchar.sizeof);
- assert(c < 0); // c==0 means a duplicate
- }
- }
-}
-out (result)
-{
- //printf("out _d_switch_dstring()\n");
- if (result == -1)
- {
- // Not found
- for (auto i = 0u; i < table.length; i++)
- {
- if (table[i].length == ca.length)
- { auto c = memcmp(table[i].ptr, ca.ptr, ca.length * dchar.sizeof);
- assert(c != 0);
- }
- }
- }
- else
- {
- assert(0 <= result && cast(size_t)result < table.length);
- for (auto i = 0u; 1; i++)
- {
- assert(i < table.length);
- if (table[i].length == ca.length)
- {
- auto c = memcmp(table[i].ptr, ca.ptr, ca.length * dchar.sizeof);
- if (c == 0)
- {
- assert(i == result);
- break;
- }
- }
- }
- }
-}
-body
-{
- //printf("body _d_switch_dstring()\n");
- size_t low = 0;
- auto high = table.length;
-
- version (none)
- {
- // Print table
- wprintf("ca[] = '%.*s'\n", ca.length, ca.ptr);
- for (auto i = 0; i < high; i++)
- {
- auto pca = table[i];
- wprintf("table[%d] = %d, '%.*s'\n", i, pca.length, pca.length, pca.ptr);
- }
- }
-
- // Do binary search
- while (low < high)
- {
- auto mid = (low + high) >> 1;
- auto pca = table[mid];
- auto c = cast(sizediff_t)(ca.length - pca.length);
- if (c == 0)
- {
- c = memcmp(ca.ptr, pca.ptr, ca.length * dchar.sizeof);
- if (c == 0)
- { //printf("found %d\n", mid);
- return cast(int)mid;
- }
- }
- if (c < 0)
- {
- high = mid;
- }
- else
- {
- low = mid + 1;
- }
- }
- //printf("not found\n");
- return -1; // not found
-}
-
-
-unittest
-{
- switch (cast(dchar []) "c")
- {
- case "coo":
- default:
- break;
- }
-
- int bug5381(dstring ds)
- {
- switch (ds)
- {
- case "unittest": return 1;
- case "D_Version2": return 2;
- case "none": return 3;
- case "all": return 4;
- default: return 5;
- }
- }
- int rc = bug5381("none"d);
- assert(rc == 3);
-}
diff --git a/libphobos/libdruntime/rt/tlsgc.d b/libphobos/libdruntime/rt/tlsgc.d
index db7347f..b13a1b3 100644
--- a/libphobos/libdruntime/rt/tlsgc.d
+++ b/libphobos/libdruntime/rt/tlsgc.d
@@ -1,8 +1,9 @@
/**
*
* Copyright: Copyright Digital Mars 2011 - 2012.
- * License: $(WEB www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
+ * License: $(HTTP www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
* Authors: Martin Nowak
+ * Source: $(DRUNTIMESRC rt/tlsgc.d)
*/
/* Copyright Digital Mars 2011.
diff --git a/libphobos/libdruntime/rt/util/random.d b/libphobos/libdruntime/rt/util/random.d
deleted file mode 100644
index 69e4cfe..0000000
--- a/libphobos/libdruntime/rt/util/random.d
+++ /dev/null
@@ -1,51 +0,0 @@
-/**
- * Random number generators for internal usage.
- *
- * Copyright: Copyright Digital Mars 2014.
- * License: $(WEB www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
- */
-module rt.util.random;
-
-struct Rand48
-{
- private ulong rng_state;
-
-@safe @nogc nothrow:
-
- void defaultSeed()
- {
- import ctime = core.stdc.time : time;
- seed(cast(uint)ctime.time(null));
- }
-
-pure:
-
- void seed(uint seedval)
- {
- assert(seedval);
- rng_state = cast(ulong)seedval << 16 | 0x330e;
- popFront();
- }
-
- auto opCall()
- {
- auto result = front;
- popFront();
- return result;
- }
-
- @property uint front()
- {
- return cast(uint)(rng_state >> 16);
- }
-
- void popFront()
- {
- immutable ulong a = 25214903917;
- immutable ulong c = 11;
- immutable ulong m_mask = (1uL << 48uL) - 1;
- rng_state = (a*rng_state+c) & m_mask;
- }
-
- enum empty = false;
-}
diff --git a/libphobos/libdruntime/rt/util/typeinfo.d b/libphobos/libdruntime/rt/util/typeinfo.d
index 31770a0..d06254c 100644
--- a/libphobos/libdruntime/rt/util/typeinfo.d
+++ b/libphobos/libdruntime/rt/util/typeinfo.d
@@ -4,8 +4,10 @@
* Copyright: Copyright Kenji Hara 2014-.
* License: <a href="http://www.boost.org/LICENSE_1_0.txt">Boost License 1.0</a>.
* Authors: Kenji Hara
+ * Source: $(DRUNTIMESRC rt/util/_typeinfo.d)
*/
module rt.util.typeinfo;
+import rt.util.utility : d_cfloat, d_cdouble, d_creal, isComplex;
static import core.internal.hash;
template Floating(T)
@@ -35,14 +37,16 @@ if (is(T == float) || is(T == double) || is(T == real))
public alias hashOf = core.internal.hash.hashOf;
}
+
+// @@@DEPRECATED_2.105@@@
template Floating(T)
-if (is(T == cfloat) || is(T == cdouble) || is(T == creal))
+if (isComplex!T)
{
pure nothrow @safe:
bool equals(T f1, T f2)
{
- return f1 == f2;
+ return f1.re == f2.re && f1.im == f2.im;
}
int compare(T f1, T f2)
@@ -62,12 +66,14 @@ if (is(T == cfloat) || is(T == cdouble) || is(T == creal))
return result;
}
- public alias hashOf = core.internal.hash.hashOf;
+ size_t hashOf(scope const T val)
+ {
+ return core.internal.hash.hashOf(val.re, core.internal.hash.hashOf(val.im));
+ }
}
template Array(T)
-if (is(T == float) || is(T == double) || is(T == real) ||
- is(T == cfloat) || is(T == cdouble) || is(T == creal))
+if (is(T == float) || is(T == double) || is(T == real))
{
pure nothrow @safe:
@@ -94,17 +100,56 @@ if (is(T == float) || is(T == double) || is(T == real) ||
if (int c = Floating!T.compare(s1[u], s2[u]))
return c;
}
- if (s1.length < s2.length)
- return -1;
- else if (s1.length > s2.length)
- return 1;
- return 0;
+ return (s1.length > s2.length) - (s1.length < s2.length);
}
public alias hashOf = core.internal.hash.hashOf;
}
-version (unittest)
+// @@@DEPRECATED_2.105@@@
+template Array(T)
+if (isComplex!T)
+{
+ pure nothrow @safe:
+
+ bool equals(T[] s1, T[] s2)
+ {
+ size_t len = s1.length;
+ if (len != s2.length)
+ return false;
+ for (size_t u = 0; u < len; u++)
+ {
+ if (!Floating!T.equals(s1[u], s2[u]))
+ return false;
+ }
+ return true;
+ }
+
+ int compare(T[] s1, T[] s2)
+ {
+ size_t len = s1.length;
+ if (s2.length < len)
+ len = s2.length;
+ for (size_t u = 0; u < len; u++)
+ {
+ if (int c = Floating!T.compare(s1[u], s2[u]))
+ return c;
+ }
+ return (s1.length > s2.length) - (s1.length < s2.length);
+ }
+
+ size_t hashOf(scope const T[] val)
+ {
+ size_t hash = 0;
+ foreach (ref o; val)
+ {
+ hash = core.internal.hash.hashOf(Floating!T.hashOf(o), hash);
+ }
+ return hash;
+ }
+}
+
+version (CoreUnittest)
{
alias TypeTuple(T...) = T;
}
@@ -162,109 +207,6 @@ unittest
ti = typeid(S[3]);
assert(ti.getHash(&sa1) == ti.getHash(&sa2));
}();
-
- // imaginary types
- foreach (F; TypeTuple!(ifloat, idouble, ireal))
- (){ // workaround #2396
- alias S = SX!F;
- F f1 = +0.0i,
- f2 = -0.0i;
-
- assert(f1 == f2);
- assert(f1 !is f2);
- ti = typeid(F);
- assert(ti.getHash(&f1) == ti.getHash(&f2));
-
- F[] a1 = [f1, f1, f1];
- F[] a2 = [f2, f2, f2];
- assert(a1 == a2);
- assert(a1 !is a2);
- ti = typeid(F[]);
- assert(ti.getHash(&a1) == ti.getHash(&a2));
-
- F[][] aa1 = [a1, a1, a1];
- F[][] aa2 = [a2, a2, a2];
- assert(aa1 == aa2);
- assert(aa1 !is aa2);
- ti = typeid(F[][]);
- assert(ti.getHash(&aa1) == ti.getHash(&aa2));
-
- S s1 = {f1},
- s2 = {f2};
- assert(s1 == s2);
- assert(s1 !is s2);
- ti = typeid(S);
- assert(ti.getHash(&s1) == ti.getHash(&s2));
-
- S[] da1 = [S(f1), S(f1), S(f1)],
- da2 = [S(f2), S(f2), S(f2)];
- assert(da1 == da2);
- assert(da1 !is da2);
- ti = typeid(S[]);
- assert(ti.getHash(&da1) == ti.getHash(&da2));
-
- S[3] sa1 = {f1},
- sa2 = {f2};
- assert(sa1 == sa2);
- assert(sa1[] !is sa2[]);
- ti = typeid(S[3]);
- assert(ti.getHash(&sa1) == ti.getHash(&sa2));
- }();
-
- // complex types
- foreach (F; TypeTuple!(cfloat, cdouble, creal))
- (){ // workaround #2396
- alias S = SX!F;
- F[4] f = [+0.0 + 0.0i,
- +0.0 - 0.0i,
- -0.0 + 0.0i,
- -0.0 - 0.0i];
-
- foreach (i, f1; f) foreach (j, f2; f) if (i != j)
- {
- assert(f1 == 0 + 0i);
-
- assert(f1 == f2);
- assert(f1 !is f2);
- ti = typeid(F);
- assert(ti.getHash(&f1) == ti.getHash(&f2));
-
- F[] a1 = [f1, f1, f1];
- F[] a2 = [f2, f2, f2];
- assert(a1 == a2);
- assert(a1 !is a2);
- ti = typeid(F[]);
- assert(ti.getHash(&a1) == ti.getHash(&a2));
-
- F[][] aa1 = [a1, a1, a1];
- F[][] aa2 = [a2, a2, a2];
- assert(aa1 == aa2);
- assert(aa1 !is aa2);
- ti = typeid(F[][]);
- assert(ti.getHash(&aa1) == ti.getHash(&aa2));
-
- S s1 = {f1},
- s2 = {f2};
- assert(s1 == s2);
- assert(s1 !is s2);
- ti = typeid(S);
- assert(ti.getHash(&s1) == ti.getHash(&s2));
-
- S[] da1 = [S(f1), S(f1), S(f1)],
- da2 = [S(f2), S(f2), S(f2)];
- assert(da1 == da2);
- assert(da1 !is da2);
- ti = typeid(S[]);
- assert(ti.getHash(&da1) == ti.getHash(&da2));
-
- S[3] sa1 = {f1},
- sa2 = {f2};
- assert(sa1 == sa2);
- assert(sa1[] !is sa2[]);
- ti = typeid(S[3]);
- assert(ti.getHash(&sa1) == ti.getHash(&sa2));
- }
- }();
}
// Reduces to `T` if `cond` is `true` or `U` otherwise.
@@ -279,7 +221,7 @@ TypeInfo information for built-in types.
A `Base` type may be specified, which must be a type with the same layout, alignment, hashing, and
equality comparison as type `T`. This saves on code size because parts of `Base` will be reused. Example:
-`float` and `ifloat` or `char` and `ubyte`. The implementation assumes `Base` and `T` hash the same, swap
+`char` and `ubyte`. The implementation assumes `Base` and `T` hash the same, swap
the same, have the same ABI flags, and compare the same for equality. For ordering comparisons, we detect
during compilation whether they have different signedness and override appropriately. For initializer, we
detect if we need to override. The overriding initializer should be nonzero.
@@ -296,7 +238,7 @@ if (T.sizeof == Base.sizeof && T.alignof == Base.alignof)
static if (is(T == Base))
override size_t getHash(scope const void* p)
{
- static if (__traits(isFloating, T))
+ static if (__traits(isFloating, T) || isComplex!T)
return Floating!T.hashOf(*cast(T*)p);
else
return hashOf(*cast(const T *)p);
@@ -306,7 +248,7 @@ if (T.sizeof == Base.sizeof && T.alignof == Base.alignof)
static if (is(T == Base))
override bool equals(in void* p1, in void* p2)
{
- static if (__traits(isFloating, T))
+ static if (__traits(isFloating, T) || isComplex!T)
return Floating!T.equals(*cast(T*)p1, *cast(T*)p2);
else
return *cast(T *)p1 == *cast(T *)p2;
@@ -316,7 +258,7 @@ if (T.sizeof == Base.sizeof && T.alignof == Base.alignof)
static if (is(T == Base) || (__traits(isIntegral, T) && T.max != Base.max))
override int compare(in void* p1, in void* p2)
{
- static if (__traits(isFloating, T))
+ static if (__traits(isFloating, T) || isComplex!T)
{
return Floating!T.compare(*cast(T*)p1, *cast(T*)p2);
}
@@ -375,9 +317,12 @@ if (T.sizeof == Base.sizeof && T.alignof == Base.alignof)
}
static if (is(T == Base))
- static if (__traits(isFloating, T) && T.mant_dig != 64)
+ {
+ static if ((__traits(isFloating, T) && T.mant_dig != 64) ||
+ (isComplex!T && T.re.mant_dig != 64))
// FP types except 80-bit X87 are passed in SIMD register.
override @property uint flags() const { return 2; }
+ }
}
unittest
@@ -414,7 +359,7 @@ TypeInfo information for arrays of built-in types.
A `Base` type may be specified, which must be a type with the same layout, alignment, hashing, and
equality comparison as type `T`. This saves on code size because parts of `Base` will be reused. Example:
-`float` and `ifloat` or `char` and `ubyte`. The implementation assumes `Base` and `T` hash the same, swap
+`char` and `ubyte`. The implementation assumes `Base` and `T` hash the same, swap
the same, have the same ABI flags, and compare the same for equality. For ordering comparisons, we detect
during compilation whether they have different signedness and override appropriately. For initializer, we
detect if we need to override. The overriding initializer should be nonzero.
@@ -429,7 +374,7 @@ private class TypeInfoArrayGeneric(T, Base = T) : Select!(is(T == Base), TypeInf
static if (is(T == Base))
override size_t getHash(scope const void* p) @trusted const
{
- static if (__traits(isFloating, T))
+ static if (__traits(isFloating, T) || isComplex!T)
return Array!T.hashOf(*cast(T[]*)p);
else
return hashOf(*cast(const T[]*) p);
@@ -438,7 +383,7 @@ private class TypeInfoArrayGeneric(T, Base = T) : Select!(is(T == Base), TypeInf
static if (is(T == Base))
override bool equals(in void* p1, in void* p2) const
{
- static if (__traits(isFloating, T))
+ static if (__traits(isFloating, T) || isComplex!T)
{
return Array!T.equals(*cast(T[]*)p1, *cast(T[]*)p2);
}
@@ -455,7 +400,7 @@ private class TypeInfoArrayGeneric(T, Base = T) : Select!(is(T == Base), TypeInf
static if (is(T == Base) || (__traits(isIntegral, T) && T.max != Base.max))
override int compare(in void* p1, in void* p2) const
{
- static if (__traits(isFloating, T))
+ static if (__traits(isFloating, T) || isComplex!T)
{
return Array!T.compare(*cast(T[]*)p1, *cast(T[]*)p2);
}
@@ -519,12 +464,12 @@ class TypeInfo_v : TypeInfoGeneric!ubyte
{
return 1;
}
+}
- unittest
- {
- assert(typeid(void).toString == "void");
- assert(typeid(void).flags == 1);
- }
+unittest
+{
+ assert(typeid(void).toString == "void");
+ assert(typeid(void).flags == 1);
}
// All integrals.
@@ -545,17 +490,36 @@ static if (is(ucent)) class TypeInfo_zk : TypeInfoGeneric!ucent {}
// All simple floating-point types.
class TypeInfo_f : TypeInfoGeneric!float {}
-class TypeInfo_o : TypeInfoGeneric!(ifloat, float) {}
class TypeInfo_d : TypeInfoGeneric!double {}
-class TypeInfo_p : TypeInfoGeneric!(idouble, double) {}
class TypeInfo_e : TypeInfoGeneric!real {}
-class TypeInfo_j : TypeInfoGeneric!(ireal, real) {}
+
+// All imaginary floating-point types.
+
+// ifloat @@@DEPRECATED_2.105@@@
+deprecated class TypeInfo_o : TypeInfoGeneric!float
+{
+ override string toString() const pure nothrow @safe { return "ifloat"; }
+}
+
+// idouble @@@DEPRECATED_2.105@@@
+deprecated class TypeInfo_p : TypeInfoGeneric!double
+{
+ override string toString() const pure nothrow @safe { return "idouble"; }
+}
+
+// ireal @@@DEPRECATED_2.105@@@
+deprecated class TypeInfo_j : TypeInfoGeneric!real
+{
+ override string toString() const pure nothrow @safe { return "ireal"; }
+}
// All complex floating-point types.
-// cfloat
-class TypeInfo_q : TypeInfoGeneric!cfloat
+// cfloat @@@DEPRECATED_2.105@@@
+deprecated class TypeInfo_q : TypeInfoGeneric!d_cfloat
{
+ override string toString() const pure nothrow @safe { return "cfloat"; }
+
const: nothrow: pure: @trusted:
static if (__traits(hasMember, TypeInfo, "argTypes"))
override int argTypes(out TypeInfo arg1, out TypeInfo arg2)
@@ -565,9 +529,11 @@ class TypeInfo_q : TypeInfoGeneric!cfloat
}
}
-// cdouble
-class TypeInfo_r : TypeInfoGeneric!cdouble
+// cdouble @@@DEPRECATED_2.105@@@
+deprecated class TypeInfo_r : TypeInfoGeneric!d_cdouble
{
+ override string toString() const pure nothrow @safe { return "cdouble"; }
+
const: nothrow: pure: @trusted:
static if (__traits(hasMember, TypeInfo, "argTypes"))
override int argTypes(out TypeInfo arg1, out TypeInfo arg2)
@@ -578,9 +544,11 @@ class TypeInfo_r : TypeInfoGeneric!cdouble
}
}
-// creal
-class TypeInfo_c : TypeInfoGeneric!creal
+// creal @@@DEPRECATED_2.105@@@
+deprecated class TypeInfo_c : TypeInfoGeneric!d_creal
{
+ override string toString() const pure nothrow @safe { return "creal"; }
+
const: nothrow: pure: @trusted:
static if (__traits(hasMember, TypeInfo, "argTypes"))
override int argTypes(out TypeInfo arg1, out TypeInfo arg2)
@@ -591,18 +559,6 @@ class TypeInfo_c : TypeInfoGeneric!creal
}
}
-static if (__traits(hasMember, TypeInfo, "argTypes"))
- unittest
- {
- TypeInfo t1, t2;
- assert(typeid(cfloat).argTypes(t1, t2) == 0 && t1 == typeid(double) &&
- t2 is null);
- assert(typeid(cdouble).argTypes(t1, t2) == 0 && t1 == typeid(double) &&
- t2 == typeid(double));
- assert(typeid(creal).argTypes(t1, t2) == 0 && t1 == typeid(real) &&
- t2 == typeid(real));
- }
-
// Arrays of all integrals.
class TypeInfo_Ah : TypeInfoArrayGeneric!ubyte {}
class TypeInfo_Ab : TypeInfoArrayGeneric!(bool, ubyte) {}
@@ -623,7 +579,7 @@ class TypeInfo_Aw : TypeInfoArrayGeneric!(dchar, uint) {}
class TypeInfo_Am : TypeInfoArrayGeneric!ulong {}
class TypeInfo_Al : TypeInfoArrayGeneric!(long, ulong) {}
-version (unittest)
+version (CoreUnittest)
private extern (C) void[] _adSort(void[] a, TypeInfo ti);
unittest
@@ -662,16 +618,50 @@ unittest
assert(!(a1 < b1 && b1 < a1)); // Original failing case
}
-// Arrays of all floating point types.
+// Arrays of all simple floating-point types.
class TypeInfo_Af : TypeInfoArrayGeneric!float {}
-class TypeInfo_Ao : TypeInfoArrayGeneric!(ifloat, float) {}
class TypeInfo_Ad : TypeInfoArrayGeneric!double {}
-class TypeInfo_Ap : TypeInfoArrayGeneric!(idouble, double) {}
class TypeInfo_Ae : TypeInfoArrayGeneric!real {}
-class TypeInfo_Aj : TypeInfoArrayGeneric!(ireal, real) {}
-class TypeInfo_Aq : TypeInfoArrayGeneric!cfloat {}
-class TypeInfo_Ar : TypeInfoArrayGeneric!cdouble {}
-class TypeInfo_Ac : TypeInfoArrayGeneric!creal {}
+
+// Arrays of all imaginary floating-point types.
+
+// ifloat @@@DEPRECATED_2.105@@@
+deprecated class TypeInfo_Ao : TypeInfoArrayGeneric!float
+{
+ override string toString() const pure nothrow @safe { return "ifloat[]"; }
+}
+
+// idouble @@@DEPRECATED_2.105@@@
+deprecated class TypeInfo_Ap : TypeInfoArrayGeneric!double
+{
+ override string toString() const pure nothrow @safe { return "idouble[]"; }
+}
+
+// ireal @@@DEPRECATED_2.105@@@
+deprecated class TypeInfo_Aj : TypeInfoArrayGeneric!real
+{
+ override string toString() const pure nothrow @safe { return "ireal[]"; }
+}
+
+// Arrays of all complex floating-point types.
+
+// cfloat @@@DEPRECATED_2.105@@@
+deprecated class TypeInfo_Aq : TypeInfoArrayGeneric!d_cfloat
+{
+ override string toString() const pure nothrow @safe { return "cfloat[]"; }
+}
+
+// cdouble @@@DEPRECATED_2.105@@@
+deprecated class TypeInfo_Ar : TypeInfoArrayGeneric!d_cdouble
+{
+ override string toString() const pure nothrow @safe { return "cdouble[]"; }
+}
+
+// creal @@@DEPRECATED_2.105@@@
+deprecated class TypeInfo_Ac : TypeInfoArrayGeneric!d_creal
+{
+ override string toString() const pure nothrow @safe { return "creal[]"; }
+}
// void[] is a bit different, behaves like ubyte[] for comparison purposes.
class TypeInfo_Av : TypeInfo_Ah
diff --git a/libphobos/libdruntime/rt/util/utility.d b/libphobos/libdruntime/rt/util/utility.d
new file mode 100644
index 0000000..b1796fd
--- /dev/null
+++ b/libphobos/libdruntime/rt/util/utility.d
@@ -0,0 +1,44 @@
+/**
+ * Contains various utility functions used by the runtime implementation.
+ *
+ * Copyright: Copyright Digital Mars 2016.
+ * License: Distributed under the
+ * $(LINK2 http://www.boost.org/LICENSE_1_0.txt, Boost Software License 1.0).
+ * (See accompanying file LICENSE)
+ * Authors: Jacob Carlborg
+ * Source: $(DRUNTIMESRC rt/util/_utility.d)
+ */
+module rt.util.utility;
+
+/**
+ * Asserts that the given condition is `true`.
+ *
+ * The assertion is independent from -release, by abort()ing. Regular assertions
+ * throw an AssertError and thus require an initialized GC, which might not be
+ * the case (yet or anymore) for the startup/shutdown code in this package
+ * (called by CRT ctors/dtors etc.).
+ */
+package(rt) void safeAssert(
+ bool condition, scope string msg, scope string file = __FILE__, size_t line = __LINE__
+) nothrow @nogc @safe
+{
+ import core.internal.abort;
+ condition || abort(msg, file, line);
+}
+
+// @@@DEPRECATED_2.105@@@
+// Remove this when complex types have been removed from the language.
+package(rt)
+{
+ private struct _Complex(T) { T re; T im; }
+
+ enum __c_complex_float : _Complex!float;
+ enum __c_complex_double : _Complex!double;
+ enum __c_complex_real : _Complex!real; // This is why we don't use stdc.config
+
+ alias d_cfloat = __c_complex_float;
+ alias d_cdouble = __c_complex_double;
+ alias d_creal = __c_complex_real;
+
+ enum isComplex(T) = is(T == d_cfloat) || is(T == d_cdouble) || is(T == d_creal);
+}