aboutsummaryrefslogtreecommitdiff
path: root/compiler-rt
diff options
context:
space:
mode:
Diffstat (limited to 'compiler-rt')
-rw-r--r--compiler-rt/cmake/Modules/AllSupportedArchDefs.cmake3
-rw-r--r--compiler-rt/lib/asan/asan_descriptions.cpp62
-rw-r--r--compiler-rt/lib/asan/asan_fake_stack.cpp4
-rw-r--r--compiler-rt/lib/asan/asan_rtl.cpp4
-rw-r--r--compiler-rt/lib/builtins/CMakeLists.txt2
-rw-r--r--compiler-rt/lib/builtins/aarch64/sme-libc-opt-memcpy-memmove-sve.S180
-rw-r--r--compiler-rt/lib/builtins/aarch64/sme-libc-opt-memcpy-memmove.S3
-rw-r--r--compiler-rt/lib/dfsan/dfsan_allocator.cpp15
-rw-r--r--compiler-rt/lib/dfsan/dfsan_custom.cpp13
-rw-r--r--compiler-rt/lib/dfsan/dfsan_platform.h14
-rw-r--r--compiler-rt/lib/hwasan/hwasan.h9
-rw-r--r--compiler-rt/lib/hwasan/hwasan_linux.cpp16
-rw-r--r--compiler-rt/lib/orc/elfnix_platform.cpp103
-rw-r--r--compiler-rt/lib/rtsan/CMakeLists.txt5
-rw-r--r--compiler-rt/lib/rtsan/rtsan_context.cpp2
-rw-r--r--compiler-rt/lib/rtsan/rtsan_suppressions.cpp3
-rwxr-xr-xcompiler-rt/lib/sanitizer_common/symbolizer/scripts/build_symbolizer.sh1
-rw-r--r--compiler-rt/lib/sanitizer_common/tests/sanitizer_procmaps_mac_test.cpp1
-rw-r--r--compiler-rt/lib/scudo/standalone/secondary.h36
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_flags.cpp2
-rw-r--r--compiler-rt/test/dfsan/custom.cpp2
-rw-r--r--compiler-rt/test/dfsan/lit.cfg.py8
-rw-r--r--compiler-rt/test/dfsan/origin_endianness.c4
-rw-r--r--compiler-rt/test/dfsan/pair.cpp15
-rw-r--r--compiler-rt/test/dfsan/struct.c9
-rw-r--r--compiler-rt/test/orc/TestCases/Linux/Generic/ctor-dtor.cpp86
-rw-r--r--compiler-rt/test/sanitizer_common/TestCases/sanitizer_coverage_allowlist_ignorelist.cpp8
27 files changed, 533 insertions, 77 deletions
diff --git a/compiler-rt/cmake/Modules/AllSupportedArchDefs.cmake b/compiler-rt/cmake/Modules/AllSupportedArchDefs.cmake
index 0cae5da..c2de0d0 100644
--- a/compiler-rt/cmake/Modules/AllSupportedArchDefs.cmake
+++ b/compiler-rt/cmake/Modules/AllSupportedArchDefs.cmake
@@ -45,7 +45,8 @@ set(ALL_ASAN_SUPPORTED_ARCH ${X86} ${X86_64} ${ARM32} ${ARM64} ${RISCV64}
${MIPS32} ${MIPS64} ${PPC64} ${S390X} ${SPARC} ${SPARCV9} ${HEXAGON}
${LOONGARCH64})
set(ALL_ASAN_ABI_SUPPORTED_ARCH ${X86_64} ${ARM64} ${ARM64_32})
-set(ALL_DFSAN_SUPPORTED_ARCH ${X86_64} ${MIPS64} ${ARM64} ${LOONGARCH64})
+set(ALL_DFSAN_SUPPORTED_ARCH ${X86_64} ${MIPS64} ${ARM64} ${LOONGARCH64}
+ ${S390X})
set(ALL_RTSAN_SUPPORTED_ARCH ${X86_64} ${ARM64})
if(ANDROID)
diff --git a/compiler-rt/lib/asan/asan_descriptions.cpp b/compiler-rt/lib/asan/asan_descriptions.cpp
index 18c2a6c..551b819 100644
--- a/compiler-rt/lib/asan/asan_descriptions.cpp
+++ b/compiler-rt/lib/asan/asan_descriptions.cpp
@@ -36,37 +36,43 @@ AsanThreadIdAndName::AsanThreadIdAndName(u32 tid)
asanThreadRegistry().CheckLocked();
}
+// Prints this thread and, if flags()->print_full_thread_history, its ancestors
void DescribeThread(AsanThreadContext *context) {
- CHECK(context);
- asanThreadRegistry().CheckLocked();
- // No need to announce the main thread.
- if (context->tid == kMainTid || context->announced) {
- return;
- }
- context->announced = true;
-
- InternalScopedString str;
- str.AppendF("Thread %s", AsanThreadIdAndName(context).c_str());
-
- AsanThreadContext *parent_context =
- context->parent_tid == kInvalidTid
- ? nullptr
- : GetThreadContextByTidLocked(context->parent_tid);
-
- // `context->parent_tid` may point to reused slot. Check `unique_id` which
- // is always smaller for the parent, always greater for a new user.
- if (!parent_context || context->unique_id <= parent_context->unique_id) {
- str.Append(" created by unknown thread\n");
+ while (true) {
+ CHECK(context);
+ asanThreadRegistry().CheckLocked();
+ // No need to announce the main thread.
+ if (context->tid == kMainTid || context->announced) {
+ return;
+ }
+ context->announced = true;
+
+ InternalScopedString str;
+ str.AppendF("Thread %s", AsanThreadIdAndName(context).c_str());
+
+ AsanThreadContext* parent_context =
+ context->parent_tid == kInvalidTid
+ ? nullptr
+ : GetThreadContextByTidLocked(context->parent_tid);
+
+ // `context->parent_tid` may point to reused slot. Check `unique_id` which
+ // is always smaller for the parent, always greater for a new user.
+ if (!parent_context || context->unique_id <= parent_context->unique_id) {
+ str.Append(" created by unknown thread\n");
+ Printf("%s", str.data());
+ return;
+ }
+ str.AppendF(" created by %s here:\n",
+ AsanThreadIdAndName(context->parent_tid).c_str());
Printf("%s", str.data());
- return;
+ StackDepotGet(context->stack_id).Print();
+
+ // Describe parent thread if requested
+ if (flags()->print_full_thread_history)
+ context = parent_context;
+ else
+ return;
}
- str.AppendF(" created by %s here:\n",
- AsanThreadIdAndName(context->parent_tid).c_str());
- Printf("%s", str.data());
- StackDepotGet(context->stack_id).Print();
- // Recursively described parent thread if needed.
- if (flags()->print_full_thread_history)
- DescribeThread(parent_context);
}
// Shadow descriptions
diff --git a/compiler-rt/lib/asan/asan_fake_stack.cpp b/compiler-rt/lib/asan/asan_fake_stack.cpp
index af73d31..96228818 100644
--- a/compiler-rt/lib/asan/asan_fake_stack.cpp
+++ b/compiler-rt/lib/asan/asan_fake_stack.cpp
@@ -77,11 +77,11 @@ FakeStack* FakeStack::Create(uptr stack_size_log) {
VReport(1,
"T%d: FakeStack created: %p -- %p stack_size_log: %zd; "
"mmapped %zdK, noreserve=%d, true_start: %p, start of first frame: "
- "0x%zx\n",
+ "%p\n",
GetCurrentTidOrInvalid(), (void*)p,
(void*)(p + FakeStack::RequiredSize(stack_size_log)), stack_size_log,
size >> 10, flags()->uar_noreserve, res->true_start,
- res->GetFrame(stack_size_log, /*class_id*/ 0, /*pos*/ 0));
+ (void*)res->GetFrame(stack_size_log, /*class_id*/ 0, /*pos*/ 0));
return res;
}
diff --git a/compiler-rt/lib/asan/asan_rtl.cpp b/compiler-rt/lib/asan/asan_rtl.cpp
index b9ba250..c036a13 100644
--- a/compiler-rt/lib/asan/asan_rtl.cpp
+++ b/compiler-rt/lib/asan/asan_rtl.cpp
@@ -618,7 +618,9 @@ static void UnpoisonDefaultStack() {
int local_stack;
const uptr page_size = GetPageSizeCached();
top = curr_thread->stack_top();
- bottom = ((uptr)&local_stack - page_size) & ~(page_size - 1);
+ bottom = (uptr)&local_stack & ~(page_size - 1);
+ if (AddrIsInMem(bottom - page_size))
+ bottom -= page_size;
} else {
CHECK(!SANITIZER_FUCHSIA);
// If we haven't seen this thread, try asking the OS for stack bounds.
diff --git a/compiler-rt/lib/builtins/CMakeLists.txt b/compiler-rt/lib/builtins/CMakeLists.txt
index c3dbd65..6e454f23 100644
--- a/compiler-rt/lib/builtins/CMakeLists.txt
+++ b/compiler-rt/lib/builtins/CMakeLists.txt
@@ -660,7 +660,7 @@ if (COMPILER_RT_HAS_AARCH64_SME)
elseif (NOT COMPILER_RT_DISABLE_AARCH64_FMV AND COMPILER_RT_HAS_FNO_BUILTIN_FLAG AND COMPILER_RT_AARCH64_FMV_USES_GLOBAL_CONSTRUCTOR)
list(APPEND aarch64_SOURCES aarch64/sme-abi.S aarch64/sme-abi-assert.c)
if(COMPILER_RT_HAS_ARM_UNALIGNED AND COMPILER_RT_HAS_ARM_FP)
- list(APPEND aarch64_SOURCES aarch64/sme-libc-opt-memset-memchr.S aarch64/sme-libc-opt-memcpy-memmove.S)
+ list(APPEND aarch64_SOURCES aarch64/sme-libc-opt-memset-memchr.S aarch64/sme-libc-opt-memcpy-memmove.S aarch64/sme-libc-opt-memcpy-memmove-sve.S)
elseif(COMPILER_RT_HAS_ARM_UNALIGNED)
list(APPEND aarch64_SOURCES aarch64/sme-libc-memset-memchr.c aarch64/sme-libc-opt-memcpy-memmove.S)
message(WARNING "AArch64 SME ABI assembly-optimized memset/memchr disabled: target does not have hardware floating-point support.")
diff --git a/compiler-rt/lib/builtins/aarch64/sme-libc-opt-memcpy-memmove-sve.S b/compiler-rt/lib/builtins/aarch64/sme-libc-opt-memcpy-memmove-sve.S
new file mode 100644
index 0000000..be9dcf0
--- /dev/null
+++ b/compiler-rt/lib/builtins/aarch64/sme-libc-opt-memcpy-memmove-sve.S
@@ -0,0 +1,180 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file contains assembly-optimized implementations of Scalable Matrix
+/// Extension (SME) compatible memcpy and memmove functions.
+///
+/// These implementations depend on unaligned access support.
+///
+/// Routines taken from libc/AOR_v20.02/string/aarch64.
+///
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+#if defined(__ARM_FEATURE_SVE)
+
+//
+// __arm_sc_memcpy / __arm_sc_memmove
+//
+
+#define dstin x0
+#define src x1
+#define count x2
+#define dst x3
+#define srcend x4
+#define dstend x5
+#define tmp1 x6
+#define vlen x6
+
+#define A_q q0
+#define B_q q1
+#define C_q q2
+#define D_q q3
+#define E_q q4
+#define F_q q5
+#define G_q q6
+#define H_q q7
+
+/* This implementation handles overlaps and supports both memcpy and memmove
+ from a single entry point. It uses unaligned accesses and branchless
+ sequences to keep the code small, simple and improve performance.
+ SVE vectors are used to speedup small copies.
+
+ Copies are split into 3 main cases: small copies of up to 32 bytes, medium
+ copies of up to 128 bytes, and large copies. The overhead of the overlap
+ check is negligible since it is only required for large copies.
+
+ Large copies use a software pipelined loop processing 64 bytes per iteration.
+ The source pointer is 16-byte aligned to minimize unaligned accesses.
+ The loop tail is handled by always copying 64 bytes from the end.
+*/
+
+DEFINE_COMPILERRT_FUNCTION(__arm_sc_memcpy)
+ cmp count, 128
+ b.hi 3f // copy_long
+ cntb vlen
+ cmp count, vlen, lsl 1
+ b.hi 0f // copy32_128
+
+ whilelo p0.b, xzr, count
+ whilelo p1.b, vlen, count
+ ld1b z0.b, p0/z, [src, 0, mul vl]
+ ld1b z1.b, p1/z, [src, 1, mul vl]
+ st1b z0.b, p0, [dstin, 0, mul vl]
+ st1b z1.b, p1, [dstin, 1, mul vl]
+ ret
+
+ /* Medium copies: 33..128 bytes. */
+0: // copy32_128
+ add srcend, src, count
+ add dstend, dstin, count
+ ldp A_q, B_q, [src]
+ ldp C_q, D_q, [srcend, -32]
+ cmp count, 64
+ b.hi 1f // copy128
+ stp A_q, B_q, [dstin]
+ stp C_q, D_q, [dstend, -32]
+ ret
+
+ /* Copy 65..128 bytes. */
+1: // copy128
+ ldp E_q, F_q, [src, 32]
+ cmp count, 96
+ b.ls 2f // copy96
+ ldp G_q, H_q, [srcend, -64]
+ stp G_q, H_q, [dstend, -64]
+2: // copy96
+ stp A_q, B_q, [dstin]
+ stp E_q, F_q, [dstin, 32]
+ stp C_q, D_q, [dstend, -32]
+ ret
+
+ /* Copy more than 128 bytes. */
+3: // copy_long
+ add srcend, src, count
+ add dstend, dstin, count
+
+ /* Use backwards copy if there is an overlap. */
+ sub tmp1, dstin, src
+ cmp tmp1, count
+ b.lo 6f // copy_long_backwards
+
+ /* Copy 16 bytes and then align src to 16-byte alignment. */
+ ldr D_q, [src]
+ and tmp1, src, 15
+ bic src, src, 15
+ sub dst, dstin, tmp1
+ add count, count, tmp1 /* Count is now 16 too large. */
+ ldp A_q, B_q, [src, 16]
+ str D_q, [dstin]
+ ldp C_q, D_q, [src, 48]
+ subs count, count, 128 + 16 /* Test and readjust count. */
+ b.ls 5f // copy64_from_end
+4: // loop64
+ stp A_q, B_q, [dst, 16]
+ ldp A_q, B_q, [src, 80]
+ stp C_q, D_q, [dst, 48]
+ ldp C_q, D_q, [src, 112]
+ add src, src, 64
+ add dst, dst, 64
+ subs count, count, 64
+ b.hi 4b // loop64
+
+ /* Write the last iteration and copy 64 bytes from the end. */
+5: // copy64_from_end
+ ldp E_q, F_q, [srcend, -64]
+ stp A_q, B_q, [dst, 16]
+ ldp A_q, B_q, [srcend, -32]
+ stp C_q, D_q, [dst, 48]
+ stp E_q, F_q, [dstend, -64]
+ stp A_q, B_q, [dstend, -32]
+ ret
+
+ /* Large backwards copy for overlapping copies.
+ Copy 16 bytes and then align srcend to 16-byte alignment. */
+6: // copy_long_backwards
+ cbz tmp1, 9f // return
+ ldr D_q, [srcend, -16]
+ and tmp1, srcend, 15
+ bic srcend, srcend, 15
+ sub count, count, tmp1
+ ldp A_q, B_q, [srcend, -32]
+ str D_q, [dstend, -16]
+ ldp C_q, D_q, [srcend, -64]
+ sub dstend, dstend, tmp1
+ subs count, count, 128
+ b.ls 8f // copy64_from_start
+
+7: // loop64_backwards
+ str B_q, [dstend, -16]
+ str A_q, [dstend, -32]
+ ldp A_q, B_q, [srcend, -96]
+ str D_q, [dstend, -48]
+ str C_q, [dstend, -64]!
+ ldp C_q, D_q, [srcend, -128]
+ sub srcend, srcend, 64
+ subs count, count, 64
+ b.hi 7b // loop64_backwards
+
+ /* Write the last iteration and copy 64 bytes from the start. */
+8: // copy64_from_start
+ ldp E_q, F_q, [src, 32]
+ stp A_q, B_q, [dstend, -32]
+ ldp A_q, B_q, [src]
+ stp C_q, D_q, [dstend, -64]
+ stp E_q, F_q, [dstin, 32]
+ stp A_q, B_q, [dstin]
+9: // return
+ ret
+END_COMPILERRT_FUNCTION (__arm_sc_memcpy)
+
+DEFINE_COMPILERRT_FUNCTION_ALIAS(__arm_sc_memmove, __arm_sc_memcpy)
+
+#endif // defined(__ARM_FEATURE_SVE)
diff --git a/compiler-rt/lib/builtins/aarch64/sme-libc-opt-memcpy-memmove.S b/compiler-rt/lib/builtins/aarch64/sme-libc-opt-memcpy-memmove.S
index 8bc759a..3d79ca3 100644
--- a/compiler-rt/lib/builtins/aarch64/sme-libc-opt-memcpy-memmove.S
+++ b/compiler-rt/lib/builtins/aarch64/sme-libc-opt-memcpy-memmove.S
@@ -18,6 +18,8 @@
#include "../assembly.h"
+#if !defined(__ARM_FEATURE_SVE)
+
//
// __arm_sc_memcpy / __arm_sc_memmove
//
@@ -246,3 +248,4 @@ END_COMPILERRT_FUNCTION(__arm_sc_memcpy)
DEFINE_COMPILERRT_FUNCTION_ALIAS(__arm_sc_memmove, __arm_sc_memcpy)
+#endif // !defined(__ARM_FEATURE_SVE)
diff --git a/compiler-rt/lib/dfsan/dfsan_allocator.cpp b/compiler-rt/lib/dfsan/dfsan_allocator.cpp
index 160b1a6..c558328 100644
--- a/compiler-rt/lib/dfsan/dfsan_allocator.cpp
+++ b/compiler-rt/lib/dfsan/dfsan_allocator.cpp
@@ -44,17 +44,26 @@ struct DFsanMapUnmapCallback {
// duplicated as MappingDesc::ALLOCATOR in dfsan_platform.h.
#if defined(__aarch64__)
const uptr kAllocatorSpace = 0xE00000000000ULL;
+const uptr kAllocatorSpaceSize = 0x40000000000; // 4T.
+#elif defined(__s390x__)
+const uptr kAllocatorSpace = 0x440000000000ULL;
+const uptr kAllocatorSpaceSize = 0x020000000000; // 2T.
#else
const uptr kAllocatorSpace = 0x700000000000ULL;
+const uptr kAllocatorSpaceSize = 0x40000000000; // 4T.
#endif
+#if defined(__s390x__)
+const uptr kMaxAllowedMallocSize = 2UL << 30; // 2G.
+#else
const uptr kMaxAllowedMallocSize = 1ULL << 40;
+#endif
struct AP64 { // Allocator64 parameters. Deliberately using a short name.
static const uptr kSpaceBeg = kAllocatorSpace;
- static const uptr kSpaceSize = 0x40000000000; // 4T.
+ static const uptr kSpaceSize = kAllocatorSpaceSize;
static const uptr kMetadataSize = sizeof(Metadata);
- typedef DefaultSizeClassMap SizeClassMap;
- typedef DFsanMapUnmapCallback MapUnmapCallback;
+ using SizeClassMap = DefaultSizeClassMap;
+ using MapUnmapCallback = DFsanMapUnmapCallback;
static const uptr kFlags = 0;
using AddressSpaceView = LocalAddressSpaceView;
};
diff --git a/compiler-rt/lib/dfsan/dfsan_custom.cpp b/compiler-rt/lib/dfsan/dfsan_custom.cpp
index dbc00d7..b060e5c 100644
--- a/compiler-rt/lib/dfsan/dfsan_custom.cpp
+++ b/compiler-rt/lib/dfsan/dfsan_custom.cpp
@@ -2332,7 +2332,20 @@ static int format_buffer(char *str, size_t size, const char *fmt,
case 'g':
case 'G':
if (*(formatter.fmt_cur - 1) == 'L') {
+#if defined(__s390x__)
+ // SystemZ treats float128 argument as an aggregate type and copies
+ // shadow and Origin to passed argument temporary. But passed
+ // argument va_labels and va_origins are zero. Here. we get
+ // Shadow/Origin corresponding to in-memory argument and update
+ // va_labels and va_origins.
+ long double* arg = va_arg(ap, long double*);
+ *va_labels = *shadow_for(arg);
+ if (va_origins != nullptr)
+ *va_origins = *origin_for(arg);
+ retval = formatter.format(*arg);
+#else
retval = formatter.format(va_arg(ap, long double));
+#endif
} else {
retval = formatter.format(va_arg(ap, double));
}
diff --git a/compiler-rt/lib/dfsan/dfsan_platform.h b/compiler-rt/lib/dfsan/dfsan_platform.h
index 01f0de4..59c39cbe3 100644
--- a/compiler-rt/lib/dfsan/dfsan_platform.h
+++ b/compiler-rt/lib/dfsan/dfsan_platform.h
@@ -67,6 +67,20 @@ const MappingDesc kMemoryLayout[] = {
};
# define MEM_TO_SHADOW(mem) ((uptr)mem ^ 0xB00000000000ULL)
# define SHADOW_TO_ORIGIN(shadow) (((uptr)(shadow)) + 0x200000000000ULL)
+# elif SANITIZER_LINUX && SANITIZER_S390_64
+const MappingDesc kMemoryLayout[] = {
+ {0x000000000000ULL, 0x040000000000ULL, MappingDesc::APP, "app-low"},
+ {0x040000000000ULL, 0x080000000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x080000000000ULL, 0x180000000000ULL, MappingDesc::SHADOW, "shadow"},
+ {0x180000000000ULL, 0x1C0000000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x1C0000000000ULL, 0x2C0000000000ULL, MappingDesc::ORIGIN, "origin"},
+ {0x2C0000000000ULL, 0x440000000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x440000000000ULL, 0x460000000000ULL, MappingDesc::ALLOCATOR, "allocator"},
+ {0x460000000000ULL, 0x500000000000ULL, MappingDesc::APP, "app-high"}};
+
+# define MEM_TO_SHADOW(mem) \
+ ((((uptr)(mem)) & ~0xC00000000000ULL) + 0x080000000000ULL)
+# define SHADOW_TO_ORIGIN(shadow) (((uptr)(shadow)) + 0x140000000000ULL)
# else
// All of the following configurations are supported.
diff --git a/compiler-rt/lib/hwasan/hwasan.h b/compiler-rt/lib/hwasan/hwasan.h
index 1ae463f..9201ed0 100644
--- a/compiler-rt/lib/hwasan/hwasan.h
+++ b/compiler-rt/lib/hwasan/hwasan.h
@@ -57,11 +57,18 @@ constexpr unsigned kTaggableRegionCheckShift =
// Tags are done in upper bits using Intel LAM.
constexpr unsigned kAddressTagShift = 57;
constexpr unsigned kTagBits = 6;
-#else
+#elif defined(__aarch64__)
// TBI (Top Byte Ignore) feature of AArch64: bits [63:56] are ignored in address
// translation and can be used to store a tag.
constexpr unsigned kAddressTagShift = 56;
constexpr unsigned kTagBits = 8;
+#elif SANITIZER_RISCV64
+// Pointer Masking extension for RISC-V: Top PMLEN (16 or 7) bits are ignored in
+// address translation and can be used to store a tag.
+constexpr unsigned kAddressTagShift = 56;
+constexpr unsigned kTagBits = 8;
+#else
+# error Architecture not supported
#endif // defined(HWASAN_ALIASING_MODE)
// Mask for extracting tag bits from the lower 8 bits.
diff --git a/compiler-rt/lib/hwasan/hwasan_linux.cpp b/compiler-rt/lib/hwasan/hwasan_linux.cpp
index 68651d3..716a8d4 100644
--- a/compiler-rt/lib/hwasan/hwasan_linux.cpp
+++ b/compiler-rt/lib/hwasan/hwasan_linux.cpp
@@ -134,6 +134,7 @@ static void MaybeDieIfNoTaggingAbi(const char *message) {
# define PR_SET_TAGGED_ADDR_CTRL 55
# define PR_GET_TAGGED_ADDR_CTRL 56
# define PR_TAGGED_ADDR_ENABLE (1UL << 0)
+# define PR_PMLEN_SHIFT 24
# define ARCH_GET_UNTAG_MASK 0x4001
# define ARCH_ENABLE_TAGGED_ADDR 0x4002
# define ARCH_GET_MAX_TAG_BITS 0x4003
@@ -182,7 +183,7 @@ static bool EnableTaggingAbi() {
if (mask & kAddressTagMask)
return false;
return true;
-# else
+# elif defined(__aarch64__)
// Enable ARM TBI tagging for the process. If for some reason tagging is not
// supported, prctl(PR_SET_TAGGED_ADDR_CTRL, PR_TAGGED_ADDR_ENABLE) returns
// -EINVAL.
@@ -194,7 +195,18 @@ static bool EnableTaggingAbi() {
PR_TAGGED_ADDR_ENABLE)
return false;
return true;
-# endif // __x86_64__
+# elif SANITIZER_RISCV64
+ // Enable RISC-V address tagging via pointer masking.
+ uptr req = kTagBits << PR_PMLEN_SHIFT | PR_TAGGED_ADDR_ENABLE;
+ if (internal_iserror(internal_prctl(PR_SET_TAGGED_ADDR_CTRL, req, 0, 0, 0)))
+ return false;
+ uptr rsp = internal_prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0);
+ if (internal_iserror(rsp))
+ return false;
+ return rsp & PR_TAGGED_ADDR_ENABLE;
+# else
+# error Architecture not supported
+# endif // __x86_64__
}
void InitializeOsSupport() {
diff --git a/compiler-rt/lib/orc/elfnix_platform.cpp b/compiler-rt/lib/orc/elfnix_platform.cpp
index 0ec628b..54d6a55 100644
--- a/compiler-rt/lib/orc/elfnix_platform.cpp
+++ b/compiler-rt/lib/orc/elfnix_platform.cpp
@@ -73,6 +73,7 @@ private:
AtExitsVector AtExits;
std::vector<PerJITDylibState *> Deps;
RecordSectionsTracker<void (*)()> RecordedInits;
+ RecordSectionsTracker<void (*)()> RecordedFinis;
bool referenced() const {
return LinkedAgainstRefCount != 0 || RefCount != 0;
@@ -100,6 +101,10 @@ public:
std::vector<ExecutorAddrRange> Inits);
Error deregisterInits(ExecutorAddr HeaderAddr,
std::vector<ExecutorAddrRange> Inits);
+ Error registerFinis(ExecutorAddr HeaderAddr,
+ std::vector<ExecutorAddrRange> Finis);
+ Error deregisterFinis(ExecutorAddr HeaderAddr,
+ std::vector<ExecutorAddrRange> Finis);
Error deregisterObjectSections(ELFNixPerObjectSectionsToRegister POSR);
const char *dlerror();
@@ -130,6 +135,8 @@ private:
Error runInits(std::unique_lock<std::recursive_mutex> &JDStatesLock,
PerJITDylibState &JDS);
+ Error runFinis(std::unique_lock<std::recursive_mutex> &JDStatesLock,
+ PerJITDylibState &JDS);
Expected<void *> dlopenImpl(std::string_view Path, int Mode);
Error dlopenFull(std::unique_lock<std::recursive_mutex> &JDStatesLock,
PerJITDylibState &JDS);
@@ -301,6 +308,46 @@ Error ELFNixPlatformRuntimeState::deregisterInits(
return Error::success();
}
+Error ELFNixPlatformRuntimeState::registerFinis(
+ ExecutorAddr HeaderAddr, std::vector<ExecutorAddrRange> Finis) {
+ std::lock_guard<std::recursive_mutex> Lock(JDStatesMutex);
+ PerJITDylibState *JDS =
+ getJITDylibStateByHeaderAddr(HeaderAddr.toPtr<void *>());
+
+ if (!JDS) {
+ std::ostringstream ErrStream;
+ ErrStream << "Could not register fini sections for unrecognized header "
+ << HeaderAddr.toPtr<void *>();
+ return make_error<StringError>(ErrStream.str());
+ }
+
+ for (auto &F : Finis) {
+ JDS->RecordedFinis.add(F.toSpan<void (*)()>());
+ }
+
+ return Error::success();
+}
+
+Error ELFNixPlatformRuntimeState::deregisterFinis(
+ ExecutorAddr HeaderAddr, std::vector<ExecutorAddrRange> Finis) {
+ std::lock_guard<std::recursive_mutex> Lock(JDStatesMutex);
+ PerJITDylibState *JDS =
+ getJITDylibStateByHeaderAddr(HeaderAddr.toPtr<void *>());
+
+ if (!JDS) {
+ std::ostringstream ErrStream;
+ ErrStream << "Could not deregister fini sections for unrecognized header "
+ << HeaderAddr.toPtr<void *>();
+ return make_error<StringError>(ErrStream.str());
+ }
+
+ for (auto &F : Finis) {
+ JDS->RecordedFinis.removeIfPresent(F);
+ }
+
+ return Error::success();
+}
+
const char *ELFNixPlatformRuntimeState::dlerror() { return DLFcnError.c_str(); }
void *ELFNixPlatformRuntimeState::dlopen(std::string_view Path, int Mode) {
@@ -451,6 +498,31 @@ Error ELFNixPlatformRuntimeState::runInits(
return Error::success();
}
+Error ELFNixPlatformRuntimeState::runFinis(
+ std::unique_lock<std::recursive_mutex> &JDStatesLock,
+ PerJITDylibState &JDS) {
+ std::vector<span<void (*)()>> FiniSections;
+
+ // Collect all fini sections (reset to move all to "new" for processing)
+ JDS.RecordedFinis.reset();
+ FiniSections.reserve(JDS.RecordedFinis.numNewSections());
+
+ JDS.RecordedFinis.processNewSections(
+ [&](span<void (*)()> Finis) { FiniSections.push_back(Finis); });
+
+ JDStatesLock.unlock();
+
+ // Run in forward order - sections are already sorted correctly by the JIT:
+ // .dtors first (in order), then .fini_array (in descending priority order)
+ for (auto Sec : FiniSections)
+ for (auto *Fini : Sec)
+ Fini();
+
+ JDStatesLock.lock();
+
+ return Error::success();
+}
+
Expected<void *> ELFNixPlatformRuntimeState::dlopenImpl(std::string_view Path,
int Mode) {
std::unique_lock<std::recursive_mutex> Lock(JDStatesMutex);
@@ -602,8 +674,13 @@ Error ELFNixPlatformRuntimeState::dlcloseImpl(void *DSOHandle) {
Error ELFNixPlatformRuntimeState::dlcloseInitialize(
std::unique_lock<std::recursive_mutex> &JDStatesLock,
PerJITDylibState &JDS) {
+ // Run fini sections BEFORE atexits (mirrors static dtor order)
+ if (auto Err = runFinis(JDStatesLock, JDS))
+ return Err;
+
runAtExits(JDStatesLock, JDS);
JDS.RecordedInits.reset();
+ JDS.RecordedFinis.reset();
for (auto *DepJDS : JDS.Deps)
if (!JDS.referenced())
if (auto Err = dlcloseInitialize(JDStatesLock, *DepJDS))
@@ -726,6 +803,32 @@ __orc_rt_elfnix_deregister_init_sections(char *ArgData, size_t ArgSize) {
.release();
}
+ORC_RT_INTERFACE orc_rt_WrapperFunctionResult
+__orc_rt_elfnix_register_fini_sections(char *ArgData, size_t ArgSize) {
+ return WrapperFunction<SPSError(SPSExecutorAddr,
+ SPSSequence<SPSExecutorAddrRange>)>::
+ handle(ArgData, ArgSize,
+ [](ExecutorAddr HeaderAddr,
+ std::vector<ExecutorAddrRange> &Finis) {
+ return ELFNixPlatformRuntimeState::get().registerFinis(
+ HeaderAddr, std::move(Finis));
+ })
+ .release();
+}
+
+ORC_RT_INTERFACE orc_rt_WrapperFunctionResult
+__orc_rt_elfnix_deregister_fini_sections(char *ArgData, size_t ArgSize) {
+ return WrapperFunction<SPSError(SPSExecutorAddr,
+ SPSSequence<SPSExecutorAddrRange>)>::
+ handle(ArgData, ArgSize,
+ [](ExecutorAddr HeaderAddr,
+ std::vector<ExecutorAddrRange> &Finis) {
+ return ELFNixPlatformRuntimeState::get().deregisterFinis(
+ HeaderAddr, std::move(Finis));
+ })
+ .release();
+}
+
/// Wrapper function for registering metadata on a per-object basis.
ORC_RT_INTERFACE orc_rt_WrapperFunctionResult
__orc_rt_elfnix_register_object_sections(char *ArgData, size_t ArgSize) {
diff --git a/compiler-rt/lib/rtsan/CMakeLists.txt b/compiler-rt/lib/rtsan/CMakeLists.txt
index a4413d9..a74dccc 100644
--- a/compiler-rt/lib/rtsan/CMakeLists.txt
+++ b/compiler-rt/lib/rtsan/CMakeLists.txt
@@ -28,10 +28,9 @@ set(RTSAN_HEADERS
set(RTSAN_DEPS)
set(RTSAN_CFLAGS
- ${COMPILER_RT_COMMON_CFLAGS}
- ${COMPILER_RT_CXX_CFLAGS}
+ ${SANITIZER_COMMON_CFLAGS}
-DSANITIZER_COMMON_NO_REDEFINE_BUILTINS)
-set(RTSAN_LINK_FLAGS ${COMPILER_RT_COMMON_LINK_FLAGS})
+set(RTSAN_LINK_FLAGS ${SANITIZER_COMMON_LINK_FLAGS})
set(RTSAN_DYNAMIC_LIBS
${COMPILER_RT_UNWINDER_LINK_LIBS}
${SANITIZER_CXX_ABI_LIBRARIES}
diff --git a/compiler-rt/lib/rtsan/rtsan_context.cpp b/compiler-rt/lib/rtsan/rtsan_context.cpp
index 536d62e..0903d14 100644
--- a/compiler-rt/lib/rtsan/rtsan_context.cpp
+++ b/compiler-rt/lib/rtsan/rtsan_context.cpp
@@ -12,8 +12,8 @@
#include "rtsan/rtsan.h"
#include "sanitizer_common/sanitizer_allocator_internal.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
-#include <new>
#include <pthread.h>
using namespace __sanitizer;
diff --git a/compiler-rt/lib/rtsan/rtsan_suppressions.cpp b/compiler-rt/lib/rtsan/rtsan_suppressions.cpp
index 2bcfbee..789d8b2 100644
--- a/compiler-rt/lib/rtsan/rtsan_suppressions.cpp
+++ b/compiler-rt/lib/rtsan/rtsan_suppressions.cpp
@@ -16,11 +16,10 @@
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_suppressions.h"
#include "sanitizer_common/sanitizer_symbolizer.h"
-#include <new>
-
using namespace __sanitizer;
using namespace __rtsan;
diff --git a/compiler-rt/lib/sanitizer_common/symbolizer/scripts/build_symbolizer.sh b/compiler-rt/lib/sanitizer_common/symbolizer/scripts/build_symbolizer.sh
index 1519a9b..3dbc328 100755
--- a/compiler-rt/lib/sanitizer_common/symbolizer/scripts/build_symbolizer.sh
+++ b/compiler-rt/lib/sanitizer_common/symbolizer/scripts/build_symbolizer.sh
@@ -109,7 +109,6 @@ if [[ ! -f ${LIBCXX_BUILD}/build.ninja ]]; then
-DLIBCXXABI_ENABLE_ASSERTIONS=OFF \
-DLIBCXXABI_ENABLE_EXCEPTIONS=OFF \
-DLIBCXXABI_USE_LLVM_UNWINDER=OFF \
- -DLIBCXX_ENABLE_ASSERTIONS=OFF \
-DLIBCXX_ENABLE_EXCEPTIONS=OFF \
-DLIBCXX_ENABLE_RTTI=OFF \
-DCMAKE_SHARED_LINKER_FLAGS="$LINKFLAGS" \
diff --git a/compiler-rt/lib/sanitizer_common/tests/sanitizer_procmaps_mac_test.cpp b/compiler-rt/lib/sanitizer_common/tests/sanitizer_procmaps_mac_test.cpp
index 7547528..fbb20ad 100644
--- a/compiler-rt/lib/sanitizer_common/tests/sanitizer_procmaps_mac_test.cpp
+++ b/compiler-rt/lib/sanitizer_common/tests/sanitizer_procmaps_mac_test.cpp
@@ -106,7 +106,6 @@ public:
start_load_cmd_addr = (const char *)(mock_header.data() + header_size);
sizeofcmds = header->sizeofcmds;
- const char *last_byte_load_cmd_addr = (start_load_cmd_addr + sizeofcmds - 1);
data_.current_image = -1; // So the loop in ::Next runs just once
}
diff --git a/compiler-rt/lib/scudo/standalone/secondary.h b/compiler-rt/lib/scudo/standalone/secondary.h
index 04e33c0..bb32110 100644
--- a/compiler-rt/lib/scudo/standalone/secondary.h
+++ b/compiler-rt/lib/scudo/standalone/secondary.h
@@ -100,6 +100,12 @@ struct CachedBlock {
u16 Next = 0;
u16 Prev = 0;
+ enum CacheFlags : u16 {
+ None = 0,
+ NoAccess = 0x1,
+ };
+ CacheFlags Flags = CachedBlock::None;
+
bool isValid() { return CommitBase != 0; }
void invalidate() { CommitBase = 0; }
@@ -284,6 +290,7 @@ public:
Entry.BlockBegin = BlockBegin;
Entry.MemMap = MemMap;
Entry.Time = UINT64_MAX;
+ Entry.Flags = CachedBlock::None;
bool MemoryTaggingEnabled = useMemoryTagging<Config>(Options);
if (MemoryTaggingEnabled) {
@@ -299,6 +306,7 @@ public:
Entry.MemMap.setMemoryPermission(Entry.CommitBase, Entry.CommitSize,
MAP_NOACCESS);
}
+ Entry.Flags = CachedBlock::NoAccess;
}
// Usually only one entry will be evicted from the cache.
@@ -522,20 +530,18 @@ public:
}
void disableMemoryTagging() EXCLUDES(Mutex) {
+ if (Config::getQuarantineDisabled())
+ return;
+
ScopedLock L(Mutex);
- if (!Config::getQuarantineDisabled()) {
- for (u32 I = 0; I != Config::getQuarantineSize(); ++I) {
- if (Quarantine[I].isValid()) {
- MemMapT &MemMap = Quarantine[I].MemMap;
- unmapCallBack(MemMap);
- Quarantine[I].invalidate();
- }
+ for (u32 I = 0; I != Config::getQuarantineSize(); ++I) {
+ if (Quarantine[I].isValid()) {
+ MemMapT &MemMap = Quarantine[I].MemMap;
+ unmapCallBack(MemMap);
+ Quarantine[I].invalidate();
}
- QuarantinePos = -1U;
}
-
- for (CachedBlock &Entry : LRUEntries)
- Entry.MemMap.setMemoryPermission(Entry.CommitBase, Entry.CommitSize, 0);
+ QuarantinePos = -1U;
}
void disable() NO_THREAD_SAFETY_ANALYSIS { Mutex.lock(); }
@@ -754,9 +760,15 @@ MapAllocator<Config>::tryAllocateFromCache(const Options &Options, uptr Size,
LargeBlock::Header *H = reinterpret_cast<LargeBlock::Header *>(
LargeBlock::addHeaderTag<Config>(EntryHeaderPos));
bool Zeroed = Entry.Time == 0;
+
+ if (UNLIKELY(Entry.Flags & CachedBlock::NoAccess)) {
+ // NOTE: Flags set to 0 actually restores read-write.
+ Entry.MemMap.setMemoryPermission(Entry.CommitBase, Entry.CommitSize,
+ /*Flags=*/0);
+ }
+
if (useMemoryTagging<Config>(Options)) {
uptr NewBlockBegin = reinterpret_cast<uptr>(H + 1);
- Entry.MemMap.setMemoryPermission(Entry.CommitBase, Entry.CommitSize, 0);
if (Zeroed) {
storeTags(LargeBlock::addHeaderTag<Config>(Entry.CommitBase),
NewBlockBegin);
diff --git a/compiler-rt/lib/tsan/rtl/tsan_flags.cpp b/compiler-rt/lib/tsan/rtl/tsan_flags.cpp
index efaaef8..fba9765 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_flags.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_flags.cpp
@@ -37,7 +37,7 @@ inline bool FlagHandler<LockDuringWriteSetting>::Parse(const char *value) {
*t_ = kNoLockDuringWritesAllProcesses;
return true;
}
- Printf("ERROR: Invalid value for signal handler option: '%s'\n", value);
+ Printf("ERROR: Invalid value for lock_during_write option: '%s'\n", value);
return false;
}
diff --git a/compiler-rt/test/dfsan/custom.cpp b/compiler-rt/test/dfsan/custom.cpp
index 873af5c..b4d6b18 100644
--- a/compiler-rt/test/dfsan/custom.cpp
+++ b/compiler-rt/test/dfsan/custom.cpp
@@ -2240,7 +2240,7 @@ void test_sscanf() {
strcpy(input_buf, "-559038737");
test_sscanf_chunk(-559038737, "%d", input_ptr, 1);
strcpy(input_buf, "3735928559");
- test_sscanf_chunk(3735928559, "%u", input_ptr, 1);
+ test_sscanf_chunk(3735928559, "%lu", input_ptr, 1);
strcpy(input_buf, "12345");
test_sscanf_chunk(12345, "%i", input_ptr, 1);
strcpy(input_buf, "0751");
diff --git a/compiler-rt/test/dfsan/lit.cfg.py b/compiler-rt/test/dfsan/lit.cfg.py
index b26ff3e..ec07db5 100644
--- a/compiler-rt/test/dfsan/lit.cfg.py
+++ b/compiler-rt/test/dfsan/lit.cfg.py
@@ -10,6 +10,9 @@ config.test_source_root = os.path.dirname(__file__)
# Setup default compiler flags used with -fsanitize=dataflow option.
clang_dfsan_cflags = ["-fsanitize=dataflow"] + [config.target_cflags]
+# s390x arch needs -mbackchain to print stack backtrace of origin.
+if config.target_arch == "s390x":
+ clang_dfsan_cflags.append("-mbackchain")
clang_dfsan_cxxflags = config.cxx_mode_flags + clang_dfsan_cflags
@@ -25,5 +28,8 @@ config.substitutions.append(("%clangxx_dfsan ", build_invocation(clang_dfsan_cxx
config.suffixes = [".c", ".cpp"]
# DataFlowSanitizer tests are currently supported on Linux only.
-if not (config.target_os in ["Linux"] and config.target_arch in ["aarch64", "x86_64", "loongarch64"]):
+if not (
+ config.target_os in ["Linux"]
+ and config.target_arch in ["aarch64", "x86_64", "loongarch64", "s390x"]
+):
config.unsupported = True
diff --git a/compiler-rt/test/dfsan/origin_endianness.c b/compiler-rt/test/dfsan/origin_endianness.c
index a73dcda..cd0b198 100644
--- a/compiler-rt/test/dfsan/origin_endianness.c
+++ b/compiler-rt/test/dfsan/origin_endianness.c
@@ -16,10 +16,10 @@ __attribute__((noinline)) FULL_TYPE foo(FULL_TYPE a, FULL_TYPE b) {
int main(int argc, char *argv[]) {
FULL_TYPE a = 1;
FULL_TYPE b = 10;
- dfsan_set_label(4, (HALF_TYPE *)&a, sizeof(HALF_TYPE));
+ dfsan_set_label(4, (HALF_TYPE *)&a + 1, sizeof(HALF_TYPE));
FULL_TYPE c = foo(a, b);
dfsan_print_origin_trace(&c, NULL);
- dfsan_print_origin_trace((HALF_TYPE *)&c, NULL);
+ dfsan_print_origin_trace((HALF_TYPE *)&c + 1, NULL);
}
// CHECK: Taint value 0x4 {{.*}} origin tracking ()
diff --git a/compiler-rt/test/dfsan/pair.cpp b/compiler-rt/test/dfsan/pair.cpp
index 94bbfc7..e3b3bcc 100644
--- a/compiler-rt/test/dfsan/pair.cpp
+++ b/compiler-rt/test/dfsan/pair.cpp
@@ -6,6 +6,9 @@
#include <sanitizer/dfsan_interface.h>
#include <utility>
+// SystemZ identifies labels for struct elements precisely across all
+// optimization labels.
+
__attribute__((noinline))
std::pair<int *, int>
make_pair(int *p, int i) { return {p, i}; }
@@ -65,7 +68,7 @@ void test_simple_constructors() {
int i1 = pair1.second;
int *ptr1 = pair1.first;
-#ifdef O0
+#if defined(O0) && !defined(__s390x__)
assert(dfsan_read_label(&i1, sizeof(i1)) == 10);
assert(dfsan_read_label(&ptr1, sizeof(ptr1)) == 10);
#else
@@ -77,7 +80,7 @@ void test_simple_constructors() {
int i2 = pair2.second;
int *ptr2 = pair2.first;
-#ifdef O0
+#if defined(O0) && !defined(__s390x__)
assert(dfsan_read_label(&i2, sizeof(i2)) == 10);
assert(dfsan_read_label(&ptr2, sizeof(ptr2)) == 10);
#else
@@ -89,7 +92,7 @@ void test_simple_constructors() {
int i3 = pair3.second;
int *ptr3 = pair3.first;
-#ifdef O0
+#if defined(O0) && !defined(__s390x__)
assert(dfsan_read_label(&i3, sizeof(i3)) == 10);
assert(dfsan_read_label(&ptr3, sizeof(ptr3)) == 10);
#else
@@ -101,7 +104,7 @@ void test_simple_constructors() {
int i4 = pair4.second;
int *ptr4 = pair4.first;
-#ifdef O0
+#if defined(O0) && !defined(__s390x__)
assert(dfsan_read_label(&i4, sizeof(i4)) == 10);
assert(dfsan_read_label(&ptr4, sizeof(ptr4)) == 10);
#else
@@ -139,7 +142,7 @@ void test_branches() {
{
std::pair<const char *, uint32_t> r = return_ptr_and_i32(q, res);
-#ifdef O0
+#if defined(O0) && !defined(__s390x__)
assert(dfsan_read_label(&r.first, sizeof(r.first)) == 10);
assert(dfsan_read_label(&r.second, sizeof(r.second)) == 10);
#else
@@ -150,7 +153,7 @@ void test_branches() {
{
std::pair<const char *, uint64_t> r = return_ptr_and_i64(q, res);
-#ifdef O0
+#if defined(O0) && !defined(__s390x__)
assert(dfsan_read_label(&r.first, sizeof(r.first)) == 10);
assert(dfsan_read_label(&r.second, sizeof(r.second)) == 10);
#else
diff --git a/compiler-rt/test/dfsan/struct.c b/compiler-rt/test/dfsan/struct.c
index 7ba0016..fa976434 100644
--- a/compiler-rt/test/dfsan/struct.c
+++ b/compiler-rt/test/dfsan/struct.c
@@ -4,6 +4,9 @@
#include <assert.h>
#include <sanitizer/dfsan_interface.h>
+// SystemZ identifies labels for struct elements precisely across all
+// optimization labels.
+
typedef struct Pair {
int i;
char *ptr;
@@ -47,7 +50,7 @@ int main(void) {
dfsan_label i1_label = dfsan_read_label(&i1, sizeof(i1));
dfsan_label ptr1_label = dfsan_read_label(&ptr1, sizeof(ptr1));
-#if defined(O0)
+#if defined(O0) && !defined(__s390x__)
assert(i1_label == (i_label | ptr_label));
assert(ptr1_label == (i_label | ptr_label));
#else
@@ -61,7 +64,7 @@ int main(void) {
dfsan_label i2_label = dfsan_read_label(&i2, sizeof(i2));
dfsan_label ptr2_label = dfsan_read_label(&ptr2, sizeof(ptr2));
-#if defined(O0)
+#if defined(O0) && !defined(__s390x__)
assert(i2_label == (i_label | ptr_label));
assert(ptr2_label == (i_label | ptr_label));
#else
@@ -75,7 +78,7 @@ int main(void) {
dfsan_label i3_label = dfsan_read_label(&i3, sizeof(i3));
dfsan_label ptr3_label = dfsan_read_label(&ptr3, sizeof(ptr3));
-#if defined(O0)
+#if defined(O0) && !defined(__s390x__)
assert(i3_label == (i_label | ptr_label));
assert(ptr3_label == (i_label | ptr_label));
#else
diff --git a/compiler-rt/test/orc/TestCases/Linux/Generic/ctor-dtor.cpp b/compiler-rt/test/orc/TestCases/Linux/Generic/ctor-dtor.cpp
new file mode 100644
index 0000000..d5a1e80
--- /dev/null
+++ b/compiler-rt/test/orc/TestCases/Linux/Generic/ctor-dtor.cpp
@@ -0,0 +1,86 @@
+// Check that constructors and destructors are run in the expected order.
+//
+// RUN: %clang -c -o %t.o %s
+// RUN: %llvm_jitlink %t.o | FileCheck %s
+//
+// REQUIRES: system-linux && host-arch-compatible
+
+// CHECK: <init_array.101>
+// CHECK: <init_array.102>
+// CHECK: <init_array.103>
+// CHECK: <init_array>
+// CHECK: <ctors.103>
+// CHECK: <ctors.102>
+// CHECK: <ctors.101>
+// CHECK: <ctors>
+// CHECK: <dtors>
+// CHECK: <dtors.101>
+// CHECK: <dtors.102>
+// CHECK: <dtors.103>
+// CHECK: <fini_array>
+// CHECK: <fini_array.103>
+// CHECK: <fini_array.102>
+// CHECK: <fini_array.101>
+#include <stdio.h>
+
+typedef void (*ctor_t)(void);
+typedef void (*dtor_t)(void);
+
+__attribute__((constructor)) void init_array() { puts("<init_array>"); }
+
+__attribute__((constructor(101))) void init_array_101() {
+ puts("<init_array.101>");
+}
+
+__attribute__((constructor(102))) void init_array_102() {
+ puts("<init_array.102>");
+}
+__attribute__((constructor(103))) void init_array_103() {
+ puts("<init_array.103>");
+}
+
+static void ctors(void) { puts("<ctors>"); }
+__attribute__((section(".ctors"), used)) static ctor_t ctors_ptr = ctors;
+
+static void ctors_101(void) { puts("<ctors.101>"); }
+__attribute__((section(".ctors.101"), used)) static ctor_t ctors_1_ptr =
+ ctors_101;
+
+static void ctors_102(void) { puts("<ctors.102>"); }
+__attribute__((section(".ctors.102"), used)) static ctor_t ctors_2_ptr =
+ ctors_102;
+
+static void ctors_103(void) { puts("<ctors.103>"); }
+__attribute__((section(".ctors.103"), used)) static ctor_t ctors_3_ptr =
+ ctors_103;
+
+__attribute__((destructor)) void fini_array() { puts("<fini_array>"); }
+
+__attribute__((destructor(101))) void fini_array_101() {
+ puts("<fini_array.101>");
+}
+
+__attribute__((destructor(102))) void fini_array_102() {
+ puts("<fini_array.102>");
+}
+
+__attribute__((destructor(103))) void fini_array_103() {
+ puts("<fini_array.103>");
+}
+
+static void dtors(void) { puts("<dtors>"); }
+__attribute__((section(".dtors"), used)) static dtor_t dtors_ptr = dtors;
+
+static void dtors_101(void) { puts("<dtors.101>"); }
+__attribute__((section(".dtors.101"), used)) static dtor_t dtors_1_ptr =
+ dtors_101;
+
+static void dtors_102(void) { puts("<dtors.102>"); }
+__attribute__((section(".dtors.102"), used)) static dtor_t dtors_2_ptr =
+ dtors_102;
+
+static void dtors_103(void) { puts("<dtors.103>"); }
+__attribute__((section(".dtors.103"), used)) static dtor_t dtors_3_ptr =
+ dtors_103;
+
+int main(void) { return 0; }
diff --git a/compiler-rt/test/sanitizer_common/TestCases/sanitizer_coverage_allowlist_ignorelist.cpp b/compiler-rt/test/sanitizer_common/TestCases/sanitizer_coverage_allowlist_ignorelist.cpp
index 6fdd23b..933997c 100644
--- a/compiler-rt/test/sanitizer_common/TestCases/sanitizer_coverage_allowlist_ignorelist.cpp
+++ b/compiler-rt/test/sanitizer_common/TestCases/sanitizer_coverage_allowlist_ignorelist.cpp
@@ -24,10 +24,10 @@
// Check inline-8bit-counters
// RUN: echo 'section "__sancov_cntrs"' > patterns.txt
-// RUN: echo '%[0-9]\+ = load i8, ptr @__sancov_gen_' >> patterns.txt
-// RUN: echo 'store i8 %[0-9]\+, ptr @__sancov_gen_' >> patterns.txt
-// RUN: echo '%[0-9]\+ = load i8, ptr getelementptr (\[[0-9]\+ x i8\], ptr @__sancov_gen_' >> patterns.txt
-// RUN: echo 'store i8 %[0-9]\+, ptr getelementptr (\[[0-9]\+ x i8\], ptr @__sancov_gen_' >> patterns.txt
+// RUN: echo '%[0-9][0-9]* = load i8, ptr @__sancov_gen_' >> patterns.txt
+// RUN: echo 'store i8 %[0-9][0-9]*, ptr @__sancov_gen_' >> patterns.txt
+// RUN: echo '%[0-9][0-9]* = load i8, ptr getelementptr (\[[0-9][0-9]* x i8\], ptr @__sancov_gen_' >> patterns.txt
+// RUN: echo 'store i8 %[0-9][0-9]*, ptr getelementptr (\[[0-9][0-9]* x i8\], ptr @__sancov_gen_' >> patterns.txt
// Check indirect-calls
// RUN: echo 'call void @__sanitizer_cov_trace_pc_indir' >> patterns.txt