diff options
Diffstat (limited to 'compiler-rt/lib')
65 files changed, 950 insertions, 351 deletions
diff --git a/compiler-rt/lib/asan/CMakeLists.txt b/compiler-rt/lib/asan/CMakeLists.txt index 97cc5c8..7d07ec7 100644 --- a/compiler-rt/lib/asan/CMakeLists.txt +++ b/compiler-rt/lib/asan/CMakeLists.txt @@ -106,6 +106,12 @@ if(MSVC) endif() set(ASAN_CFLAGS ${SANITIZER_COMMON_CFLAGS}) +# Win/ASan relies on the runtime functions being hotpatchable. See +# https://github.com/llvm/llvm-project/pull/149444 +if(MSVC) + list(APPEND ASAN_CFLAGS /hotpatch) +endif() + append_list_if(MSVC /Zl ASAN_CFLAGS) set(ASAN_COMMON_DEFINITIONS "") @@ -282,7 +288,7 @@ else() endif() # On AIX, we only need the static libraries. - if (NOT ${CMAKE_SYSTEM_NAME} MATCHES "AIX") + if (NOT "${CMAKE_SYSTEM_NAME}" MATCHES "AIX") foreach(arch ${ASAN_SUPPORTED_ARCH}) if (COMPILER_RT_HAS_VERSION_SCRIPT) if(WIN32) @@ -392,7 +398,7 @@ add_compiler_rt_resource_file(asan_ignorelist asan_ignorelist.txt asan) # On AIX, static sanitizer libraries are not added to the DSO, so we need to put # asan.link_with_main_exec.txt and asan_cxx.link_with_main_exec.txt to the build # and install dir for use in resolving undefined sanitizer symbols at runtime. -if (${CMAKE_SYSTEM_NAME} MATCHES "AIX") +if ("${CMAKE_SYSTEM_NAME}" MATCHES "AIX") foreach(arch ${ASAN_SUPPORTED_ARCH}) add_compiler_rt_cfg(asan_symbols_${arch} asan.link_with_main_exec.txt asan ${arch}) add_compiler_rt_cfg(asan_cxx_symbols_${arch} asan_cxx.link_with_main_exec.txt asan ${arch}) diff --git a/compiler-rt/lib/asan/asan_allocator.cpp b/compiler-rt/lib/asan/asan_allocator.cpp index 9ebe4d0..752ba9ab 100644 --- a/compiler-rt/lib/asan/asan_allocator.cpp +++ b/compiler-rt/lib/asan/asan_allocator.cpp @@ -547,6 +547,7 @@ struct Allocator { ComputeUserRequestedAlignmentLog(alignment); if (alignment < min_alignment) alignment = min_alignment; + bool upgraded_from_zero = false; if (size == 0) { // We'd be happy to avoid allocating memory for zero-size requests, but // some programs/tests depend on this behavior and assume that malloc @@ -555,6 +556,7 @@ struct Allocator { // consecutive "new" calls must be different even if the allocated size // is zero. size = 1; + upgraded_from_zero = true; } CHECK(IsPowerOfTwo(alignment)); uptr rz_log = ComputeRZLog(size); @@ -637,6 +639,10 @@ struct Allocator { *shadow = fl.poison_partial ? (size & (ASAN_SHADOW_GRANULARITY - 1)) : 0; } + if (upgraded_from_zero) + PoisonShadow(user_beg, ASAN_SHADOW_GRANULARITY, + kAsanHeapLeftRedzoneMagic); + AsanStats &thread_stats = GetCurrentThreadStats(); thread_stats.mallocs++; thread_stats.malloced += size; diff --git a/compiler-rt/lib/asan/asan_errors.h b/compiler-rt/lib/asan/asan_errors.h index b3af655..f339b35d 100644 --- a/compiler-rt/lib/asan/asan_errors.h +++ b/compiler-rt/lib/asan/asan_errors.h @@ -362,7 +362,7 @@ struct ErrorBadParamsToCopyContiguousContainerAnnotations : ErrorBase { u32 tid, BufferedStackTrace *stack_, uptr old_storage_beg_, uptr old_storage_end_, uptr new_storage_beg_, uptr new_storage_end_) : ErrorBase(tid, 10, - "bad-__sanitizer_annotate_double_ended_contiguous_container"), + "bad-__sanitizer_copy_contiguous_container_annotations"), stack(stack_), old_storage_beg(old_storage_beg_), old_storage_end(old_storage_end_), diff --git a/compiler-rt/lib/asan/asan_fake_stack.cpp b/compiler-rt/lib/asan/asan_fake_stack.cpp index 0f69607..c3ed252 100644 --- a/compiler-rt/lib/asan/asan_fake_stack.cpp +++ b/compiler-rt/lib/asan/asan_fake_stack.cpp @@ -54,18 +54,34 @@ FakeStack *FakeStack::Create(uptr stack_size_log) { stack_size_log = kMinStackSizeLog; if (stack_size_log > kMaxStackSizeLog) stack_size_log = kMaxStackSizeLog; + CHECK_LE(kMaxStackFrameSizeLog, stack_size_log); uptr size = RequiredSize(stack_size_log); + uptr padded_size = size + kMaxStackFrameSize; + void *true_res = reinterpret_cast<void *>( + flags()->uar_noreserve ? MmapNoReserveOrDie(padded_size, "FakeStack") + : MmapOrDie(padded_size, "FakeStack")); + // GetFrame() requires the property that + // (res + kFlagsOffset + SizeRequiredForFlags(stack_size_log)) is aligned to + // kMaxStackFrameSize. + // We didn't use MmapAlignedOrDieOnFatalError, because it requires that the + // *size* is a power of 2, which is an overly strong condition. + static_assert(alignof(FakeStack) <= kMaxStackFrameSize); FakeStack *res = reinterpret_cast<FakeStack *>( - flags()->uar_noreserve ? MmapNoReserveOrDie(size, "FakeStack") - : MmapOrDie(size, "FakeStack")); + RoundUpTo( + (uptr)true_res + kFlagsOffset + SizeRequiredForFlags(stack_size_log), + kMaxStackFrameSize) - + kFlagsOffset - SizeRequiredForFlags(stack_size_log)); + res->true_start = true_res; res->stack_size_log_ = stack_size_log; u8 *p = reinterpret_cast<u8 *>(res); VReport(1, "T%d: FakeStack created: %p -- %p stack_size_log: %zd; " - "mmapped %zdK, noreserve=%d \n", + "mmapped %zdK, noreserve=%d, true_start: %p, start of first frame: " + "0x%zx\n", GetCurrentTidOrInvalid(), (void *)p, (void *)(p + FakeStack::RequiredSize(stack_size_log)), stack_size_log, - size >> 10, flags()->uar_noreserve); + size >> 10, flags()->uar_noreserve, res->true_start, + res->GetFrame(stack_size_log, /*class_id*/ 0, /*pos*/ 0)); return res; } @@ -79,8 +95,10 @@ void FakeStack::Destroy(int tid) { Report("T%d: FakeStack destroyed: %s\n", tid, str.data()); } uptr size = RequiredSize(stack_size_log_); - FlushUnneededASanShadowMemory(reinterpret_cast<uptr>(this), size); - UnmapOrDie(this, size); + uptr padded_size = size + kMaxStackFrameSize; + FlushUnneededASanShadowMemory(reinterpret_cast<uptr>(true_start), + padded_size); + UnmapOrDie(true_start, padded_size); } void FakeStack::PoisonAll(u8 magic) { diff --git a/compiler-rt/lib/asan/asan_fake_stack.h b/compiler-rt/lib/asan/asan_fake_stack.h index 270a198..50706e6 100644 --- a/compiler-rt/lib/asan/asan_fake_stack.h +++ b/compiler-rt/lib/asan/asan_fake_stack.h @@ -32,12 +32,12 @@ struct FakeFrame { // is not popped but remains there for quite some time until gets used again. // So, we poison the objects on the fake stack when function returns. // It helps us find use-after-return bugs. -// // The FakeStack objects is allocated by a single mmap call and has no other // pointers. The size of the fake stack depends on the actual thread stack size // and thus can not be a constant. // stack_size is a power of two greater or equal to the thread's stack size; // we store it as its logarithm (stack_size_log). +// FakeStack is padded such that GetFrame() is aligned to BytesInSizeClass(). // FakeStack has kNumberOfSizeClasses (11) size classes, each size class // is a power of two, starting from 64 bytes. Each size class occupies // stack_size bytes and thus can allocate @@ -56,6 +56,9 @@ struct FakeFrame { class FakeStack { static const uptr kMinStackFrameSizeLog = 6; // Min frame is 64B. static const uptr kMaxStackFrameSizeLog = 16; // Max stack frame is 64K. + static_assert(kMaxStackFrameSizeLog >= kMinStackFrameSizeLog); + + static const u64 kMaxStackFrameSize = 1 << kMaxStackFrameSizeLog; public: static const uptr kNumberOfSizeClasses = @@ -66,7 +69,7 @@ class FakeStack { void Destroy(int tid); - // stack_size_log is at least 15 (stack_size >= 32K). + // min_uar_stack_size_log is 16 (stack_size >= 64KB) static uptr SizeRequiredForFlags(uptr stack_size_log) { return ((uptr)1) << (stack_size_log + 1 - kMinStackFrameSizeLog); } @@ -110,6 +113,28 @@ class FakeStack { } // Get frame by class_id and pos. + // Return values are guaranteed to be aligned to BytesInSizeClass(class_id), + // which is useful in combination with + // ASanStackFrameLayout::ComputeASanStackFrameLayout(). + // + // Note that alignment to 1<<kMaxStackFrameSizeLog (aka + // BytesInSizeClass(max_class_id)) implies alignment to BytesInSizeClass() + // for any class_id, since the class sizes are increasing powers of 2. + // + // 1) (this + kFlagsOffset + SizeRequiredForFlags())) is aligned to + // 1<<kMaxStackFrameSizeLog (see FakeStack::Create) + // + // Note that SizeRequiredForFlags(16) == 2048. If FakeStack::Create() had + // merely returned an address from mmap (4K-aligned), the addition would + // not be 4K-aligned. + // 2) We know that stack_size_log >= kMaxStackFrameSizeLog (otherwise you + // couldn't store a single frame of that size in the entire stack) + // hence (1<<stack_size_log) is aligned to 1<<kMaxStackFrameSizeLog + // and ((1<<stack_size_log) * class_id) is aligned to + // 1<<kMaxStackFrameSizeLog + // 3) BytesInSizeClass(class_id) * pos is aligned to + // BytesInSizeClass(class_id) + // The sum of these is aligned to BytesInSizeClass(class_id). u8 *GetFrame(uptr stack_size_log, uptr class_id, uptr pos) { return reinterpret_cast<u8 *>(this) + kFlagsOffset + SizeRequiredForFlags(stack_size_log) + @@ -156,15 +181,18 @@ class FakeStack { private: FakeStack() { } - static const uptr kFlagsOffset = 4096; // This is were the flags begin. + static const uptr kFlagsOffset = 4096; // This is where the flags begin. // Must match the number of uses of DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID COMPILER_CHECK(kNumberOfSizeClasses == 11); static const uptr kMaxStackMallocSize = ((uptr)1) << kMaxStackFrameSizeLog; uptr hint_position_[kNumberOfSizeClasses]; uptr stack_size_log_; - // a bit is set if something was allocated from the corresponding size class. bool needs_gc_; + // We allocated more memory than needed to ensure the FakeStack (and, by + // extension, each of the fake stack frames) is aligned. We keep track of the + // true start so that we can unmap it. + void *true_start; }; FakeStack *GetTLSFakeStack(); diff --git a/compiler-rt/lib/asan/asan_fuchsia.cpp b/compiler-rt/lib/asan/asan_fuchsia.cpp index 6876be1..a9e5dad 100644 --- a/compiler-rt/lib/asan/asan_fuchsia.cpp +++ b/compiler-rt/lib/asan/asan_fuchsia.cpp @@ -32,11 +32,11 @@ void EarlySanitizerInit() {} namespace __asan { -// The system already set up the shadow memory for us. -// __sanitizer::GetMaxUserVirtualAddress has already been called by -// AsanInitInternal->InitializeHighMemEnd (asan_rtl.cpp). -// Just do some additional sanity checks here. void InitializeShadowMemory() { + // Explicitly setup shadow here right beforer any of the ShadowBounds members + // are used. + InitShadowBounds(); + if (Verbosity()) PrintAddressSpaceLayout(); diff --git a/compiler-rt/lib/asan/asan_mapping.h b/compiler-rt/lib/asan/asan_mapping.h index 91fe60d..bddae9a 100644 --- a/compiler-rt/lib/asan/asan_mapping.h +++ b/compiler-rt/lib/asan/asan_mapping.h @@ -83,18 +83,29 @@ // || `[0x0000000000, 0x0d5554ffff]` || LowMem || // // Default Linux/AArch64 (39-bit VMA) mapping: -// || `[0x2000000000, 0x7fffffffff]` || highmem || -// || `[0x1400000000, 0x1fffffffff]` || highshadow || -// || `[0x1200000000, 0x13ffffffff]` || shadowgap || -// || `[0x1000000000, 0x11ffffffff]` || lowshadow || -// || `[0x0000000000, 0x0fffffffff]` || lowmem || +// TODO: this mapping is ok, but the allocator size is too large on non-Android +// AArch64 platforms (see asan_allocator.h) +// || `[0x2000000000, 0x7fffffffff]` || highmem || 384GB +// || `[0x1400000000, 0x1fffffffff]` || highshadow || 48GB +// || `[0x1200000000, 0x13ffffffff]` || shadowgap || 8GB +// || `[0x1000000000, 0x11ffffffff]` || lowshadow || 8GB +// || `[0x0000000000, 0x0fffffffff]` || lowmem || 64GB // // Default Linux/AArch64 (42-bit VMA) mapping: -// || `[0x10000000000, 0x3ffffffffff]` || highmem || -// || `[0x0a000000000, 0x0ffffffffff]` || highshadow || -// || `[0x09000000000, 0x09fffffffff]` || shadowgap || -// || `[0x08000000000, 0x08fffffffff]` || lowshadow || -// || `[0x00000000000, 0x07fffffffff]` || lowmem || +// TODO: this mapping is ok, but the allocator size is too large on non-Android +// AArch64 platforms (see asan_allocator.h) +// || `[0x09000000000, 0x03ffffffffff]` || highmem || 3520GB +// || `[0x02200000000, 0x008fffffffff]` || highshadow || 440GB +// || `[0x01200000000, 0x0021ffffffff]` || shadowgap || 64GB +// || `[0x01000000000, 0x0011ffffffff]` || lowshadow || 8GB +// || `[0x00000000000, 0x000fffffffff]` || lowmem || 64GB +// +// Default Linux/AArch64 (48-bit VMA) mapping: +// || `[0x201000000000, 0xffffffffffff]` || HighMem || 229312GB +// || `[0x041200000000, 0x200fffffffff]` || HighShadow || 28664GB +// || `[0x001200000000, 0x0411ffffffff]` || ShadowGap || 4096GB +// || `[0x001000000000, 0x0011ffffffff]` || LowShadow || 8GB +// || `[0x000000000000, 0x000fffffffff]` || LowMem || 64GB // // Default Linux/S390 mapping: // || `[0x30000000, 0x7fffffff]` || HighMem || diff --git a/compiler-rt/lib/asan/scripts/asan_symbolize.py b/compiler-rt/lib/asan/scripts/asan_symbolize.py index 058a161..8ecd66c 100755 --- a/compiler-rt/lib/asan/scripts/asan_symbolize.py +++ b/compiler-rt/lib/asan/scripts/asan_symbolize.py @@ -507,20 +507,29 @@ class SymbolizationLoop(object): assert result return result - def get_symbolized_lines(self, symbolized_lines, inc_frame_counter=True): + def get_symbolized_lines(self, symbolized_lines): if not symbolized_lines: - if inc_frame_counter: - self.frame_no += 1 - return [self.current_line] - else: - assert inc_frame_counter - result = [] - for symbolized_frame in symbolized_lines: - result.append( - " #%s %s" % (str(self.frame_no), symbolized_frame.rstrip()) + # If it is an unparsable frame, but contains a frame counter and address + # replace the frame counter so the stack is still consistent. + unknown_stack_frame_format = r"^( *#([0-9]+) +)(0x[0-9a-f]+) +.*" + match = re.match(unknown_stack_frame_format, self.current_line) + if match: + rewritten_line = ( + self.current_line[: match.start(2)] + + str(self.frame_no) + + self.current_line[match.end(2) :] ) self.frame_no += 1 - return result + return [rewritten_line] + # Not a frame line so don't increment the frame counter. + return [self.current_line] + result = [] + for symbolized_frame in symbolized_lines: + result.append( + " #%s %s" % (str(self.frame_no), symbolized_frame.rstrip()) + ) + self.frame_no += 1 + return result def process_logfile(self): self.frame_no = 0 @@ -546,8 +555,7 @@ class SymbolizationLoop(object): match = re.match(stack_trace_line_format, line) if not match: logging.debug('Line "{}" does not match regex'.format(line)) - # Not a frame line so don't increment the frame counter. - return self.get_symbolized_lines(None, inc_frame_counter=False) + return self.get_symbolized_lines(None) logging.debug(line) _, frameno_str, addr, binary, offset = match.groups() diff --git a/compiler-rt/lib/asan/tests/asan_fake_stack_test.cpp b/compiler-rt/lib/asan/tests/asan_fake_stack_test.cpp index 504b0aaf..c60e2ea 100644 --- a/compiler-rt/lib/asan/tests/asan_fake_stack_test.cpp +++ b/compiler-rt/lib/asan/tests/asan_fake_stack_test.cpp @@ -113,6 +113,7 @@ TEST(FakeStack, Allocate) { uptr bytes_in_class = FakeStack::BytesInSizeClass(cid); for (uptr j = 0; j < n; j++) { FakeFrame *ff = fs->Allocate(stack_size_log, cid, 0); + EXPECT_EQ(reinterpret_cast<uptr>(ff) % bytes_in_class, 0U); uptr x = reinterpret_cast<uptr>(ff); EXPECT_TRUE(s.insert(std::make_pair(ff, cid)).second); EXPECT_EQ(x, fs->AddrIsInFakeStack(x)); diff --git a/compiler-rt/lib/asan/tests/asan_test.cpp b/compiler-rt/lib/asan/tests/asan_test.cpp index 2d054ee..2d23a12 100644 --- a/compiler-rt/lib/asan/tests/asan_test.cpp +++ b/compiler-rt/lib/asan/tests/asan_test.cpp @@ -395,7 +395,8 @@ TEST(AddressSanitizer, ReallocTest) { } free(ptr); // Realloc pointer returned by malloc(0). - int *ptr2 = Ident((int*)malloc(0)); + void *ptr0 = malloc(0); + int *ptr2 = Ident((int *)ptr0); ptr2 = Ident((int*)realloc(ptr2, sizeof(*ptr2))); *ptr2 = 42; EXPECT_EQ(42, *ptr2); diff --git a/compiler-rt/lib/builtins/aarch64/lse.S b/compiler-rt/lib/builtins/aarch64/lse.S index d7c1db7..a444d82 100644 --- a/compiler-rt/lib/builtins/aarch64/lse.S +++ b/compiler-rt/lib/builtins/aarch64/lse.S @@ -264,7 +264,7 @@ END_COMPILERRT_OUTLINE_FUNCTION(NAME(LDNM)) NO_EXEC_STACK_DIRECTIVE -// GNU property note for BTI and PAC -GNU_PROPERTY_BTI_PAC +// GNU property note for BTI, PAC, and GCS +GNU_PROPERTY_BTI_PAC_GCS #endif // defined(__aarch64__) || defined(__arm64ec__) diff --git a/compiler-rt/lib/builtins/aarch64/sme-abi.S b/compiler-rt/lib/builtins/aarch64/sme-abi.S index 7c47336..d5510ac 100644 --- a/compiler-rt/lib/builtins/aarch64/sme-abi.S +++ b/compiler-rt/lib/builtins/aarch64/sme-abi.S @@ -371,5 +371,5 @@ END_COMPILERRT_FUNCTION(__arm_sme_restore) NO_EXEC_STACK_DIRECTIVE -// GNU property note for BTI and PAC -GNU_PROPERTY_BTI_PAC +// GNU property note for BTI, PAC, and GCS +GNU_PROPERTY_BTI_PAC_GCS diff --git a/compiler-rt/lib/builtins/assembly.h b/compiler-rt/lib/builtins/assembly.h index 89372f1..d7db7d8 100644 --- a/compiler-rt/lib/builtins/assembly.h +++ b/compiler-rt/lib/builtins/assembly.h @@ -79,11 +79,12 @@ #define FUNC_ALIGN #endif -// BTI and PAC gnu property note +// BTI, PAC, and GCS gnu property note #define NT_GNU_PROPERTY_TYPE_0 5 #define GNU_PROPERTY_AARCH64_FEATURE_1_AND 0xc0000000 #define GNU_PROPERTY_AARCH64_FEATURE_1_BTI 1 #define GNU_PROPERTY_AARCH64_FEATURE_1_PAC 2 +#define GNU_PROPERTY_AARCH64_FEATURE_1_GCS 4 #if defined(__ARM_FEATURE_BTI_DEFAULT) #define BTI_FLAG GNU_PROPERTY_AARCH64_FEATURE_1_BTI @@ -97,6 +98,12 @@ #define PAC_FLAG 0 #endif +#if defined(__ARM_FEATURE_GCS_DEFAULT) +#define GCS_FLAG GNU_PROPERTY_AARCH64_FEATURE_1_GCS +#else +#define GCS_FLAG 0 +#endif + #define GNU_PROPERTY(type, value) \ .pushsection .note.gnu.property, "a" SEPARATOR \ .p2align 3 SEPARATOR \ @@ -118,11 +125,12 @@ #define BTI_J #endif -#if (BTI_FLAG | PAC_FLAG) != 0 -#define GNU_PROPERTY_BTI_PAC \ - GNU_PROPERTY(GNU_PROPERTY_AARCH64_FEATURE_1_AND, BTI_FLAG | PAC_FLAG) +#if (BTI_FLAG | PAC_FLAG | GCS_FLAG) != 0 +#define GNU_PROPERTY_BTI_PAC_GCS \ + GNU_PROPERTY(GNU_PROPERTY_AARCH64_FEATURE_1_AND, \ + BTI_FLAG | PAC_FLAG | GCS_FLAG) #else -#define GNU_PROPERTY_BTI_PAC +#define GNU_PROPERTY_BTI_PAC_GCS #endif #if defined(__clang__) || defined(__GCC_HAVE_DWARF2_CFI_ASM) diff --git a/compiler-rt/lib/builtins/cpu_model/aarch64.c b/compiler-rt/lib/builtins/cpu_model/aarch64.c index be002dd..d788052 100644 --- a/compiler-rt/lib/builtins/cpu_model/aarch64.c +++ b/compiler-rt/lib/builtins/cpu_model/aarch64.c @@ -34,12 +34,12 @@ typedef struct __ifunc_arg_t { _Bool __aarch64_have_lse_atomics __attribute__((visibility("hidden"), nocommon)) = false; -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) || defined(__OpenBSD__) // clang-format off: should not reorder sys/auxv.h alphabetically #include <sys/auxv.h> // clang-format on #include "aarch64/hwcap.inc" -#include "aarch64/lse_atomics/freebsd.inc" +#include "aarch64/lse_atomics/elf_aux_info.inc" #elif defined(__Fuchsia__) #include "aarch64/hwcap.inc" #include "aarch64/lse_atomics/fuchsia.inc" @@ -68,9 +68,9 @@ struct { // clang-format off #if defined(__APPLE__) #include "aarch64/fmv/apple.inc" -#elif defined(__FreeBSD__) +#elif defined(__FreeBSD__) || defined(__OpenBSD__) #include "aarch64/fmv/mrs.inc" -#include "aarch64/fmv/freebsd.inc" +#include "aarch64/fmv/elf_aux_info.inc" #elif defined(__Fuchsia__) #include "aarch64/fmv/fuchsia.inc" #elif defined(__ANDROID__) diff --git a/compiler-rt/lib/builtins/cpu_model/aarch64/fmv/freebsd.inc b/compiler-rt/lib/builtins/cpu_model/aarch64/fmv/elf_aux_info.inc index aa975dc..aa975dc 100644 --- a/compiler-rt/lib/builtins/cpu_model/aarch64/fmv/freebsd.inc +++ b/compiler-rt/lib/builtins/cpu_model/aarch64/fmv/elf_aux_info.inc diff --git a/compiler-rt/lib/builtins/cpu_model/aarch64/lse_atomics/freebsd.inc b/compiler-rt/lib/builtins/cpu_model/aarch64/lse_atomics/elf_aux_info.inc index 4a1f9c2..4a1f9c2 100644 --- a/compiler-rt/lib/builtins/cpu_model/aarch64/lse_atomics/freebsd.inc +++ b/compiler-rt/lib/builtins/cpu_model/aarch64/lse_atomics/elf_aux_info.inc diff --git a/compiler-rt/lib/builtins/cpu_model/x86.c b/compiler-rt/lib/builtins/cpu_model/x86.c index 606571d..79705ca 100644 --- a/compiler-rt/lib/builtins/cpu_model/x86.c +++ b/compiler-rt/lib/builtins/cpu_model/x86.c @@ -328,7 +328,7 @@ static const char *getIntelProcessorTypeAndSubtype(unsigned Family, const char *CPU = 0; switch (Family) { - case 6: + case 0x6: switch (Model) { case 0x0f: // Intel Core 2 Duo processor, Intel Core 2 Duo mobile // processor, Intel Core 2 Quad processor, Intel Core 2 Quad @@ -626,7 +626,7 @@ static const char *getIntelProcessorTypeAndSubtype(unsigned Family, break; } break; - case 19: + case 0x13: switch (Model) { // Diamond Rapids: case 0x01: diff --git a/compiler-rt/lib/builtins/crtbegin.c b/compiler-rt/lib/builtins/crtbegin.c index 447474b..8b5f98f 100644 --- a/compiler-rt/lib/builtins/crtbegin.c +++ b/compiler-rt/lib/builtins/crtbegin.c @@ -19,7 +19,7 @@ __attribute__((visibility("hidden"))) void *__dso_handle = &__dso_handle; #ifdef EH_USE_FRAME_REGISTRY -__extension__ static void *__EH_FRAME_LIST__[] +__extension__ static void *const __EH_FRAME_LIST__[] __attribute__((section(".eh_frame"), aligned(sizeof(void *)))) = {}; extern void __register_frame_info(const void *, void *) __attribute__((weak)); diff --git a/compiler-rt/lib/dfsan/dfsan.cpp b/compiler-rt/lib/dfsan/dfsan.cpp index 886e93e..d09a9a70 100644 --- a/compiler-rt/lib/dfsan/dfsan.cpp +++ b/compiler-rt/lib/dfsan/dfsan.cpp @@ -792,7 +792,7 @@ static void PrintNoOriginTrackingWarning() { static void PrintNoTaintWarning(const void *address) { Decorator d; - Printf(" %sDFSan: no tainted value at %x%s\n", d.Warning(), address, + Printf(" %sDFSan: no tainted value at %zx%s\n", d.Warning(), (uptr)address, d.Default()); } diff --git a/compiler-rt/lib/fuzzer/FuzzerDriver.cpp b/compiler-rt/lib/fuzzer/FuzzerDriver.cpp index ad3a65a..af9c260 100644 --- a/compiler-rt/lib/fuzzer/FuzzerDriver.cpp +++ b/compiler-rt/lib/fuzzer/FuzzerDriver.cpp @@ -306,6 +306,11 @@ static int RunInMultipleProcesses(const std::vector<std::string> &Args, return HasErrors ? 1 : 0; } +void StartRssThread(Fuzzer *F, size_t RssLimitMb); + +// Fuchsia needs to do some book checking before starting the RssThread, +// so it has its own implementation. +#if !LIBFUZZER_FUCHSIA static void RssThread(Fuzzer *F, size_t RssLimitMb) { while (true) { SleepSeconds(1); @@ -315,12 +320,13 @@ static void RssThread(Fuzzer *F, size_t RssLimitMb) { } } -static void StartRssThread(Fuzzer *F, size_t RssLimitMb) { +void StartRssThread(Fuzzer *F, size_t RssLimitMb) { if (!RssLimitMb) return; std::thread T(RssThread, F, RssLimitMb); T.detach(); } +#endif int RunOneTest(Fuzzer *F, const char *InputFilePath, size_t MaxLen) { Unit U = FileToVector(InputFilePath); diff --git a/compiler-rt/lib/fuzzer/FuzzerUtilFuchsia.cpp b/compiler-rt/lib/fuzzer/FuzzerUtilFuchsia.cpp index 7f065c7..1ae8e66 100644 --- a/compiler-rt/lib/fuzzer/FuzzerUtilFuchsia.cpp +++ b/compiler-rt/lib/fuzzer/FuzzerUtilFuchsia.cpp @@ -68,6 +68,9 @@ void ExitOnErr(zx_status_t Status, const char *Syscall) { } void AlarmHandler(int Seconds) { + // Signal the alarm thread started. + ExitOnErr(_zx_object_signal(SignalHandlerEvent, 0, ZX_USER_SIGNAL_0), + "_zx_object_signal alarm"); while (true) { SleepSeconds(Seconds); Fuzzer::StaticAlarmCallback(); @@ -282,6 +285,7 @@ void CrashHandler() { Self, ZX_EXCEPTION_CHANNEL_DEBUGGER, &Channel.Handle), "_zx_task_create_exception_channel"); + // Signal the crash thread started. ExitOnErr(_zx_object_signal(SignalHandlerEvent, 0, ZX_USER_SIGNAL_0), "_zx_object_signal"); @@ -385,10 +389,49 @@ void StopSignalHandler() { _zx_handle_close(SignalHandlerEvent); } +void RssThread(Fuzzer *F, size_t RssLimitMb) { + // Signal the rss thread started. + // + // We must wait for this thread to start because we could accidentally suspend + // it while the crash handler is attempting to handle the + // ZX_EXCP_THREAD_STARTING exception. If the crash handler is suspended by the + // lsan machinery, then there's no way for this thread to indicate it's + // suspended because it's blocked on waiting for the exception to be handled. + ExitOnErr(_zx_object_signal(SignalHandlerEvent, 0, ZX_USER_SIGNAL_0), + "_zx_object_signal rss"); + while (true) { + SleepSeconds(1); + size_t Peak = GetPeakRSSMb(); + if (Peak > RssLimitMb) + F->RssLimitCallback(); + } +} + } // namespace +void StartRssThread(Fuzzer *F, size_t RssLimitMb) { + // Set up the crash handler and wait until it is ready before proceeding. + assert(SignalHandlerEvent == ZX_HANDLE_INVALID); + ExitOnErr(_zx_event_create(0, &SignalHandlerEvent), "_zx_event_create"); + + if (!RssLimitMb) + return; + std::thread T(RssThread, F, RssLimitMb); + T.detach(); + + // Wait for the rss thread to start. + ExitOnErr(_zx_object_wait_one(SignalHandlerEvent, ZX_USER_SIGNAL_0, + ZX_TIME_INFINITE, nullptr), + "_zx_object_wait_one rss"); + ExitOnErr(_zx_object_signal(SignalHandlerEvent, ZX_USER_SIGNAL_0, 0), + "_zx_object_signal rss clear"); +} + // Platform specific functions. void SetSignalHandler(const FuzzingOptions &Options) { + assert(SignalHandlerEvent != ZX_HANDLE_INVALID && + "This should've been setup by StartRssThread."); + // Make sure information from libFuzzer and the sanitizers are easy to // reassemble. `__sanitizer_log_write` has the added benefit of ensuring the // DSO map is always available for the symbolizer. @@ -404,6 +447,20 @@ void SetSignalHandler(const FuzzingOptions &Options) { if (Options.HandleAlrm && Options.UnitTimeoutSec > 0) { std::thread T(AlarmHandler, Options.UnitTimeoutSec / 2 + 1); T.detach(); + + // Wait for the alarm thread to start. + // + // We must wait for this thread to start because we could accidentally + // suspend it while the crash handler is attempting to handle the + // ZX_EXCP_THREAD_STARTING exception. If the crash handler is suspended by + // the lsan machinery, then there's no way for this thread to indicate it's + // suspended because it's blocked on waiting for the exception to be + // handled. + ExitOnErr(_zx_object_wait_one(SignalHandlerEvent, ZX_USER_SIGNAL_0, + ZX_TIME_INFINITE, nullptr), + "_zx_object_wait_one alarm"); + ExitOnErr(_zx_object_signal(SignalHandlerEvent, ZX_USER_SIGNAL_0, 0), + "_zx_object_signal alarm clear"); } // Options.HandleInt and Options.HandleTerm are not supported on Fuchsia @@ -413,9 +470,6 @@ void SetSignalHandler(const FuzzingOptions &Options) { !Options.HandleFpe && !Options.HandleAbrt && !Options.HandleTrap) return; - // Set up the crash handler and wait until it is ready before proceeding. - ExitOnErr(_zx_event_create(0, &SignalHandlerEvent), "_zx_event_create"); - SignalHandler = std::thread(CrashHandler); zx_status_t Status = _zx_object_wait_one(SignalHandlerEvent, ZX_USER_SIGNAL_0, ZX_TIME_INFINITE, nullptr); diff --git a/compiler-rt/lib/gwp_asan/tests/basic.cpp b/compiler-rt/lib/gwp_asan/tests/basic.cpp index 88e7ed1..ec2cacc 100644 --- a/compiler-rt/lib/gwp_asan/tests/basic.cpp +++ b/compiler-rt/lib/gwp_asan/tests/basic.cpp @@ -8,6 +8,8 @@ #include "gwp_asan/tests/harness.h" +#include <unistd.h> + TEST_F(CustomGuardedPoolAllocator, BasicAllocation) { InitNumSlots(1); void *Ptr = GPA.allocate(1); @@ -65,11 +67,12 @@ TEST_F(DefaultGuardedPoolAllocator, NonPowerOfTwoAlignment) { // Added multi-page slots? You'll need to expand this test. TEST_F(DefaultGuardedPoolAllocator, TooBigForSinglePageSlots) { - EXPECT_EQ(nullptr, GPA.allocate(0x1001, 0)); - EXPECT_EQ(nullptr, GPA.allocate(0x1001, 1)); - EXPECT_EQ(nullptr, GPA.allocate(0x1001, 0x1000)); - EXPECT_EQ(nullptr, GPA.allocate(1, 0x2000)); - EXPECT_EQ(nullptr, GPA.allocate(0, 0x2000)); + size_t PageSize = sysconf(_SC_PAGESIZE); + EXPECT_EQ(nullptr, GPA.allocate(PageSize + 1, 0)); + EXPECT_EQ(nullptr, GPA.allocate(PageSize + 1, 1)); + EXPECT_EQ(nullptr, GPA.allocate(PageSize + 1, PageSize)); + EXPECT_EQ(nullptr, GPA.allocate(1, 2 * PageSize)); + EXPECT_EQ(nullptr, GPA.allocate(0, 2 * PageSize)); } TEST_F(CustomGuardedPoolAllocator, AllocAllSlots) { diff --git a/compiler-rt/lib/gwp_asan/tests/never_allocated.cpp b/compiler-rt/lib/gwp_asan/tests/never_allocated.cpp index 2f695b4..6e1ee47d 100644 --- a/compiler-rt/lib/gwp_asan/tests/never_allocated.cpp +++ b/compiler-rt/lib/gwp_asan/tests/never_allocated.cpp @@ -12,9 +12,13 @@ #include "gwp_asan/crash_handler.h" #include "gwp_asan/tests/harness.h" +#include <unistd.h> + TEST_P(BacktraceGuardedPoolAllocatorDeathTest, NeverAllocated) { + size_t PageSize = sysconf(_SC_PAGESIZE); + SCOPED_TRACE(""); - void *Ptr = GPA.allocate(0x1000); + void *Ptr = GPA.allocate(PageSize); GPA.deallocate(Ptr); std::string DeathNeedle = @@ -23,7 +27,7 @@ TEST_P(BacktraceGuardedPoolAllocatorDeathTest, NeverAllocated) { // Trigger a guard page in a completely different slot that's never allocated. // Previously, there was a bug that this would result in nullptr-dereference // in the posix crash handler. - char *volatile NeverAllocatedPtr = static_cast<char *>(Ptr) + 0x3000; + char *volatile NeverAllocatedPtr = static_cast<char *>(Ptr) + 3 * PageSize; if (!Recoverable) { EXPECT_DEATH(*NeverAllocatedPtr = 0, DeathNeedle); return; @@ -37,8 +41,8 @@ TEST_P(BacktraceGuardedPoolAllocatorDeathTest, NeverAllocated) { GetOutputBuffer().clear(); for (size_t i = 0; i < 100; ++i) { *NeverAllocatedPtr = 0; - *(NeverAllocatedPtr + 0x2000) = 0; - *(NeverAllocatedPtr + 0x3000) = 0; + *(NeverAllocatedPtr + 2 * PageSize) = 0; + *(NeverAllocatedPtr + 3 * PageSize) = 0; ASSERT_TRUE(GetOutputBuffer().empty()); } diff --git a/compiler-rt/lib/hwasan/hwasan.cpp b/compiler-rt/lib/hwasan/hwasan.cpp index 24384d8..615bae4 100644 --- a/compiler-rt/lib/hwasan/hwasan.cpp +++ b/compiler-rt/lib/hwasan/hwasan.cpp @@ -176,7 +176,7 @@ static void HwasanFormatMemoryUsage(InternalScopedString &s) { "HWASAN pid: %d rss: %zd threads: %zd stacks: %zd" " thr_aux: %zd stack_depot: %zd uniq_stacks: %zd" " heap: %zd", - internal_getpid(), GetRSS(), thread_stats.n_live_threads, + (int)internal_getpid(), GetRSS(), thread_stats.n_live_threads, thread_stats.total_stack_size, thread_stats.n_live_threads * thread_list.MemoryUsedPerThread(), sds.allocated, sds.n_uniq_ids, asc[AllocatorStatMapped]); @@ -692,7 +692,7 @@ void __hwasan_handle_longjmp(const void *sp_dst) { "WARNING: HWASan is ignoring requested __hwasan_handle_longjmp: " "stack top: %p; target %p; distance: %p (%zd)\n" "False positive error reports may follow\n", - (void *)sp, (void *)dst, dst - sp, dst - sp); + (void *)sp, (void *)dst, (void *)(dst - sp), dst - sp); return; } TagMemory(sp, dst - sp, 0); diff --git a/compiler-rt/lib/hwasan/hwasan_interceptors_vfork.S b/compiler-rt/lib/hwasan/hwasan_interceptors_vfork.S index fd20825e..825f411 100644 --- a/compiler-rt/lib/hwasan/hwasan_interceptors_vfork.S +++ b/compiler-rt/lib/hwasan/hwasan_interceptors_vfork.S @@ -11,4 +11,4 @@ NO_EXEC_STACK_DIRECTIVE -GNU_PROPERTY_BTI_PAC +GNU_PROPERTY_BTI_PAC_GCS diff --git a/compiler-rt/lib/hwasan/hwasan_interface_internal.h b/compiler-rt/lib/hwasan/hwasan_interface_internal.h index 8f2f77d..86ddfea 100644 --- a/compiler-rt/lib/hwasan/hwasan_interface_internal.h +++ b/compiler-rt/lib/hwasan/hwasan_interface_internal.h @@ -247,6 +247,13 @@ void *__hwasan_memmove_match_all(void *dest, const void *src, uptr n, u8); SANITIZER_INTERFACE_ATTRIBUTE void __hwasan_set_error_report_callback(void (*callback)(const char *)); + +// hwasan does not need fake stack, so we leave it empty here. +SANITIZER_INTERFACE_ATTRIBUTE +void __sanitizer_start_switch_fiber(void **, const void *bottom, uptr size); +SANITIZER_INTERFACE_ATTRIBUTE +void __sanitizer_finish_switch_fiber(void *, const void **bottom_old, + uptr *size_old); } // extern "C" #endif // HWASAN_INTERFACE_INTERNAL_H diff --git a/compiler-rt/lib/hwasan/hwasan_malloc_bisect.h b/compiler-rt/lib/hwasan/hwasan_malloc_bisect.h index 7d134e8..52a2843 100644 --- a/compiler-rt/lib/hwasan/hwasan_malloc_bisect.h +++ b/compiler-rt/lib/hwasan/hwasan_malloc_bisect.h @@ -41,7 +41,7 @@ static inline bool malloc_bisect(StackTrace *stack, uptr orig_size) { if (h < left || h > right) return false; if (flags()->malloc_bisect_dump) { - Printf("[alloc] %u %zu\n", h, orig_size); + Printf("[alloc] %u %zu\n", (u32)h, orig_size); stack->Print(); } return true; diff --git a/compiler-rt/lib/hwasan/hwasan_report.cpp b/compiler-rt/lib/hwasan/hwasan_report.cpp index bc66e6e..6eafcf9 100644 --- a/compiler-rt/lib/hwasan/hwasan_report.cpp +++ b/compiler-rt/lib/hwasan/hwasan_report.cpp @@ -306,8 +306,9 @@ static void PrintStackAllocations(const StackAllocationsRingBuffer *sa, "%p is located %zd bytes %s a %zd-byte local variable %s " "[%p,%p) " "in %s %s\n", - untagged_addr, offset, whence, local.size, local.name, best_beg, - best_beg + local.size, local.function_name, location.data()); + (void *)untagged_addr, offset, whence, local.size, local.name, + (void *)best_beg, (void *)(best_beg + local.size), + local.function_name, location.data()); location.clear(); Printf("%s\n", d.Default()); } @@ -738,8 +739,8 @@ void BaseReport::PrintHeapOrGlobalCandidate() const { Printf("%s", d.Location()); Printf("%p is located %zd bytes %s a %zd-byte region [%p,%p)\n", untagged_addr, offset, whence, - candidate.heap.end - candidate.heap.begin, candidate.heap.begin, - candidate.heap.end); + candidate.heap.end - candidate.heap.begin, + (void *)candidate.heap.begin, (void *)candidate.heap.end); Printf("%s", d.Allocation()); Printf("allocated by thread T%u here:\n", candidate.heap.thread_id); Printf("%s", d.Default()); @@ -762,11 +763,11 @@ void BaseReport::PrintHeapOrGlobalCandidate() const { Printf( "%p is located %zd bytes %s a %zd-byte global variable " "%s [%p,%p) in %s\n", - untagged_addr, + (void *)untagged_addr, candidate.after ? untagged_addr - (info.start + info.size) : info.start - untagged_addr, candidate.after ? "after" : "before", info.size, info.name, - info.start, info.start + info.size, module_name); + (void *)info.start, (void *)(info.start + info.size), module_name); } else { uptr size = GetGlobalSizeFromDescriptor(candidate.untagged_addr); if (size == 0) @@ -774,14 +775,14 @@ void BaseReport::PrintHeapOrGlobalCandidate() const { Printf( "%p is located %s a global variable in " "\n #0 0x%x (%s+0x%x)\n", - untagged_addr, candidate.after ? "after" : "before", - candidate.untagged_addr, module_name, module_address); + (void *)untagged_addr, candidate.after ? "after" : "before", + (void *)candidate.untagged_addr, module_name, (u32)module_address); else Printf( "%p is located %s a %zd-byte global variable in " "\n #0 0x%x (%s+0x%x)\n", - untagged_addr, candidate.after ? "after" : "before", size, - candidate.untagged_addr, module_name, module_address); + (void *)untagged_addr, candidate.after ? "after" : "before", size, + (void *)candidate.untagged_addr, module_name, (u32)module_address); } Printf("%s", d.Default()); } @@ -792,8 +793,8 @@ void BaseReport::PrintAddressDescription() const { int num_descriptions_printed = 0; if (MemIsShadow(untagged_addr)) { - Printf("%s%p is HWAsan shadow memory.\n%s", d.Location(), untagged_addr, - d.Default()); + Printf("%s%p is HWAsan shadow memory.\n%s", d.Location(), + (void *)untagged_addr, d.Default()); return; } @@ -802,7 +803,7 @@ void BaseReport::PrintAddressDescription() const { Printf( "%s[%p,%p) is a %s %s heap chunk; " "size: %zd offset: %zd\n%s", - d.Location(), heap.begin, heap.begin + heap.size, + d.Location(), (void *)heap.begin, (void *)(heap.begin + heap.size), heap.from_small_heap ? "small" : "large", heap.is_allocated ? "allocated" : "unallocated", heap.size, untagged_addr - heap.begin, d.Default()); @@ -821,8 +822,8 @@ void BaseReport::PrintAddressDescription() const { Printf("%s", d.Error()); Printf("\nCause: stack tag-mismatch\n"); Printf("%s", d.Location()); - Printf("Address %p is located in stack of thread T%zd\n", untagged_addr, - sa.thread_id()); + Printf("Address %p is located in stack of thread T%zd\n", + (void *)untagged_addr, (ssize)sa.thread_id()); Printf("%s", d.Default()); announce_by_id(sa.thread_id()); PrintStackAllocations(sa.get(), ptr_tag, untagged_addr); @@ -842,9 +843,9 @@ void BaseReport::PrintAddressDescription() const { Printf("\nCause: use-after-free\n"); Printf("%s", d.Location()); Printf("%p is located %zd bytes inside a %zd-byte region [%p,%p)\n", - untagged_addr, untagged_addr - UntagAddr(har.tagged_addr), - har.requested_size, UntagAddr(har.tagged_addr), - UntagAddr(har.tagged_addr) + har.requested_size); + (void *)untagged_addr, untagged_addr - UntagAddr(har.tagged_addr), + (ssize)har.requested_size, UntagAddr(har.tagged_addr), + (void *)(UntagAddr(har.tagged_addr) + har.requested_size)); Printf("%s", d.Allocation()); Printf("freed by thread T%u here:\n", ha.free_thread_id); Printf("%s", d.Default()); @@ -858,7 +859,7 @@ void BaseReport::PrintAddressDescription() const { // Print a developer note: the index of this heap object // in the thread's deallocation ring buffer. Printf("hwasan_dev_note_heap_rb_distance: %zd %zd\n", ha.ring_index + 1, - flags()->heap_history_size); + (ssize)flags()->heap_history_size); Printf("hwasan_dev_note_num_matching_addrs: %zd\n", ha.num_matching_addrs); Printf("hwasan_dev_note_num_matching_addrs_4b: %zd\n", ha.num_matching_addrs_4b); @@ -915,10 +916,11 @@ InvalidFreeReport::~InvalidFreeReport() { const Thread *thread = GetCurrentThread(); if (thread) { Report("ERROR: %s: %s on address %p at pc %p on thread T%zd\n", - SanitizerToolName, bug_type, untagged_addr, pc, thread->unique_id()); + SanitizerToolName, bug_type, (void *)untagged_addr, (void *)pc, + (ssize)thread->unique_id()); } else { Report("ERROR: %s: %s on address %p at pc %p on unknown thread\n", - SanitizerToolName, bug_type, untagged_addr, pc); + SanitizerToolName, bug_type, (void *)untagged_addr, (void *)pc); } Printf("%s", d.Access()); if (shadow.addr) { @@ -967,7 +969,8 @@ TailOverwrittenReport::~TailOverwrittenReport() { Printf("%s", d.Error()); const char *bug_type = "allocation-tail-overwritten"; Report("ERROR: %s: %s; heap object [%p,%p) of size %zd\n", SanitizerToolName, - bug_type, untagged_addr, untagged_addr + orig_size, orig_size); + bug_type, (void *)untagged_addr, (void *)(untagged_addr + orig_size), + orig_size); Printf("\n%s", d.Default()); Printf( "Stack of invalid access unknown. Issue detected at deallocation " @@ -1037,7 +1040,7 @@ TagMismatchReport::~TagMismatchReport() { uptr pc = GetTopPc(stack); Printf("%s", d.Error()); Report("ERROR: %s: %s on address %p at pc %p\n", SanitizerToolName, bug_type, - untagged_addr, pc); + (void *)untagged_addr, (void *)pc); Thread *t = GetCurrentThread(); @@ -1049,12 +1052,12 @@ TagMismatchReport::~TagMismatchReport() { GetShortTagCopy(MemToShadow(untagged_addr + mismatch_offset)); Printf( "%s of size %zu at %p tags: %02x/%02x(%02x) (ptr/mem) in thread T%zd\n", - is_store ? "WRITE" : "READ", access_size, untagged_addr, ptr_tag, - mem_tag, short_tag, t->unique_id()); + is_store ? "WRITE" : "READ", access_size, (void *)untagged_addr, + ptr_tag, mem_tag, short_tag, (ssize)t->unique_id()); } else { Printf("%s of size %zu at %p tags: %02x/%02x (ptr/mem) in thread T%zd\n", - is_store ? "WRITE" : "READ", access_size, untagged_addr, ptr_tag, - mem_tag, t->unique_id()); + is_store ? "WRITE" : "READ", access_size, (void *)untagged_addr, + ptr_tag, mem_tag, (ssize)t->unique_id()); } if (mismatch_offset) Printf("Invalid access starting at offset %zu\n", mismatch_offset); @@ -1093,7 +1096,7 @@ void ReportTagMismatch(StackTrace *stack, uptr tagged_addr, uptr access_size, // See the frame breakdown defined in __hwasan_tag_mismatch (from // hwasan_tag_mismatch_{aarch64,riscv64}.S). void ReportRegisters(const uptr *frame, uptr pc) { - Printf("\nRegisters where the failure occurred (pc %p):\n", pc); + Printf("\nRegisters where the failure occurred (pc %p):\n", (void *)pc); // We explicitly print a single line (4 registers/line) each iteration to // reduce the amount of logcat error messages printed. Each Printf() will diff --git a/compiler-rt/lib/hwasan/hwasan_setjmp_aarch64.S b/compiler-rt/lib/hwasan/hwasan_setjmp_aarch64.S index 0c0abb6..b8d98b0 100644 --- a/compiler-rt/lib/hwasan/hwasan_setjmp_aarch64.S +++ b/compiler-rt/lib/hwasan/hwasan_setjmp_aarch64.S @@ -99,4 +99,4 @@ ASM_TRAMPOLINE_ALIAS(_setjmp, setjmp) // We do not need executable stack. NO_EXEC_STACK_DIRECTIVE -GNU_PROPERTY_BTI_PAC +GNU_PROPERTY_BTI_PAC_GCS diff --git a/compiler-rt/lib/hwasan/hwasan_tag_mismatch_aarch64.S b/compiler-rt/lib/hwasan/hwasan_tag_mismatch_aarch64.S index fd060c5..be82475 100644 --- a/compiler-rt/lib/hwasan/hwasan_tag_mismatch_aarch64.S +++ b/compiler-rt/lib/hwasan/hwasan_tag_mismatch_aarch64.S @@ -157,4 +157,4 @@ mismatch: // We do not need executable stack. NO_EXEC_STACK_DIRECTIVE -GNU_PROPERTY_BTI_PAC +GNU_PROPERTY_BTI_PAC_GCS diff --git a/compiler-rt/lib/hwasan/hwasan_thread.cpp b/compiler-rt/lib/hwasan/hwasan_thread.cpp index 5c07522..bebc11d 100644 --- a/compiler-rt/lib/hwasan/hwasan_thread.cpp +++ b/compiler-rt/lib/hwasan/hwasan_thread.cpp @@ -119,10 +119,64 @@ void Thread::Destroy() { *GetCurrentThreadLongPtr() = 0; } +void Thread::StartSwitchFiber(uptr bottom, uptr size) { + if (atomic_load(&stack_switching_, memory_order_acquire)) { + Report("ERROR: starting fiber switch while in fiber switch\n"); + Die(); + } + + next_stack_bottom_ = bottom; + next_stack_top_ = bottom + size; + atomic_store(&stack_switching_, 1, memory_order_release); +} + +void Thread::FinishSwitchFiber(uptr *bottom_old, uptr *size_old) { + if (!atomic_load(&stack_switching_, memory_order_acquire)) { + Report("ERROR: finishing a fiber switch that has not started\n"); + Die(); + } + + if (bottom_old) + *bottom_old = stack_bottom_; + if (size_old) + *size_old = stack_top_ - stack_bottom_; + stack_bottom_ = next_stack_bottom_; + stack_top_ = next_stack_top_; + atomic_store(&stack_switching_, 0, memory_order_release); + next_stack_top_ = 0; + next_stack_bottom_ = 0; +} + +inline Thread::StackBounds Thread::GetStackBounds() const { + if (!atomic_load(&stack_switching_, memory_order_acquire)) { + // Make sure the stack bounds are fully initialized. + if (stack_bottom_ >= stack_top_) + return {0, 0}; + return {stack_bottom_, stack_top_}; + } + const uptr cur_stack = (uptr)__builtin_frame_address(0); + // Note: need to check next stack first, because FinishSwitchFiber + // may be in process of overwriting stack_top_/bottom_. But in such case + // we are already on the next stack. + if (cur_stack >= next_stack_bottom_ && cur_stack < next_stack_top_) + return {next_stack_bottom_, next_stack_top_}; + return {stack_bottom_, stack_top_}; +} + +uptr Thread::stack_top() { return GetStackBounds().top; } + +uptr Thread::stack_bottom() { return GetStackBounds().bottom; } + +uptr Thread::stack_size() { + const auto bounds = GetStackBounds(); + return bounds.top - bounds.bottom; +} + void Thread::Print(const char *Prefix) { - Printf("%sT%zd %p stack: [%p,%p) sz: %zd tls: [%p,%p)\n", Prefix, unique_id_, - (void *)this, stack_bottom(), stack_top(), - stack_top() - stack_bottom(), tls_begin(), tls_end()); + Printf("%sT%zd %p stack: [%p,%p) sz: %zd tls: [%p,%p)\n", Prefix, + (ssize)unique_id_, (void *)this, (void *)stack_bottom(), + (void *)stack_top(), stack_top() - stack_bottom(), (void *)tls_begin(), + (void *)tls_end()); } static u32 xorshift(u32 state) { @@ -226,3 +280,25 @@ void PrintThreads() { } } // namespace __lsan + +// ---------------------- Interface ---------------- {{{1 +using namespace __hwasan; + +extern "C" { +SANITIZER_INTERFACE_ATTRIBUTE +void __sanitizer_start_switch_fiber(void **, const void *bottom, uptr size) { + if (auto *t = GetCurrentThread()) + t->StartSwitchFiber((uptr)bottom, size); + else + VReport(1, "__hwasan_start_switch_fiber called from unknown thread\n"); +} + +SANITIZER_INTERFACE_ATTRIBUTE +void __sanitizer_finish_switch_fiber(void *, const void **bottom_old, + uptr *size_old) { + if (auto *t = GetCurrentThread()) + t->FinishSwitchFiber((uptr *)bottom_old, size_old); + else + VReport(1, "__hwasan_finish_switch_fiber called from unknown thread\n"); +} +} diff --git a/compiler-rt/lib/hwasan/hwasan_thread.h b/compiler-rt/lib/hwasan/hwasan_thread.h index 62d6157..8ef282f 100644 --- a/compiler-rt/lib/hwasan/hwasan_thread.h +++ b/compiler-rt/lib/hwasan/hwasan_thread.h @@ -41,9 +41,9 @@ class Thread { void Destroy(); - uptr stack_top() { return stack_top_; } - uptr stack_bottom() { return stack_bottom_; } - uptr stack_size() { return stack_top() - stack_bottom(); } + uptr stack_top(); + uptr stack_bottom(); + uptr stack_size(); uptr tls_begin() { return tls_begin_; } uptr tls_end() { return tls_end_; } DTLS *dtls() { return dtls_; } @@ -53,6 +53,9 @@ class Thread { return addr >= stack_bottom_ && addr < stack_top_; } + void StartSwitchFiber(uptr bottom, uptr size); + void FinishSwitchFiber(uptr *bottom_old, uptr *size_old); + AllocatorCache *allocator_cache() { return &allocator_cache_; } HeapAllocationsRingBuffer *heap_allocations() { return heap_allocations_; } StackAllocationsRingBuffer *stack_allocations() { return stack_allocations_; } @@ -80,9 +83,22 @@ class Thread { void ClearShadowForThreadStackAndTLS(); void Print(const char *prefix); void InitRandomState(); + + struct StackBounds { + uptr bottom; + uptr top; + }; + StackBounds GetStackBounds() const; + uptr vfork_spill_; uptr stack_top_; uptr stack_bottom_; + // these variables are used when the thread is about to switch stack + uptr next_stack_top_; + uptr next_stack_bottom_; + // true if switching is in progress + atomic_uint8_t stack_switching_; + uptr tls_begin_; uptr tls_end_; DTLS *dtls_; diff --git a/compiler-rt/lib/lsan/lsan_common.cpp b/compiler-rt/lib/lsan/lsan_common.cpp index d9afa45..39537b6 100644 --- a/compiler-rt/lib/lsan/lsan_common.cpp +++ b/compiler-rt/lib/lsan/lsan_common.cpp @@ -806,7 +806,7 @@ static bool ReportUnsuspendedThreads( succeded = false; Report( "Running thread %zu was not suspended. False leaks are possible.\n", - os_id); + (usize)os_id); } } return succeded; diff --git a/compiler-rt/lib/memprof/memprof_malloc_linux.cpp b/compiler-rt/lib/memprof/memprof_malloc_linux.cpp index 2a028c7..68fe654 100644 --- a/compiler-rt/lib/memprof/memprof_malloc_linux.cpp +++ b/compiler-rt/lib/memprof/memprof_malloc_linux.cpp @@ -50,6 +50,24 @@ INTERCEPTOR(void, cfree, void *ptr) { } #endif // SANITIZER_INTERCEPT_CFREE +#if SANITIZER_INTERCEPT_FREE_SIZED +INTERCEPTOR(void, free_sized, void *ptr, uptr size) { + if (DlsymAlloc::PointerIsMine(ptr)) + return DlsymAlloc::Free(ptr); + GET_STACK_TRACE_FREE; + memprof_delete(ptr, size, 0, &stack, FROM_MALLOC); +} +#endif // SANITIZER_INTERCEPT_FREE_SIZED + +#if SANITIZER_INTERCEPT_FREE_ALIGNED_SIZED +INTERCEPTOR(void, free_aligned_sized, void *ptr, uptr alignment, uptr size) { + if (DlsymAlloc::PointerIsMine(ptr)) + return DlsymAlloc::Free(ptr); + GET_STACK_TRACE_FREE; + memprof_delete(ptr, size, alignment, &stack, FROM_MALLOC); +} +#endif // SANITIZER_INTERCEPT_FREE_ALIGNED_SIZED + INTERCEPTOR(void *, malloc, uptr size) { if (DlsymAlloc::Use()) return DlsymAlloc::Allocate(size); diff --git a/compiler-rt/lib/memprof/memprof_shadow_setup.cpp b/compiler-rt/lib/memprof/memprof_shadow_setup.cpp index e7832f6..7712a94 100644 --- a/compiler-rt/lib/memprof/memprof_shadow_setup.cpp +++ b/compiler-rt/lib/memprof/memprof_shadow_setup.cpp @@ -29,7 +29,7 @@ static void ProtectGap(uptr addr, uptr size) { Printf("protect_shadow_gap=0:" " not protecting shadow gap, allocating gap's shadow\n" "|| `[%p, %p]` || ShadowGap's shadow ||\n", - GapShadowBeg, GapShadowEnd); + (void *)GapShadowBeg, (void *)GapShadowEnd); ReserveShadowMemoryRange(GapShadowBeg, GapShadowEnd, "unprotected gap shadow"); return; diff --git a/compiler-rt/lib/msan/msan_allocator.cpp b/compiler-rt/lib/msan/msan_allocator.cpp index 2b543db..64df863 100644 --- a/compiler-rt/lib/msan/msan_allocator.cpp +++ b/compiler-rt/lib/msan/msan_allocator.cpp @@ -230,6 +230,12 @@ static void *MsanAllocate(BufferedStackTrace *stack, uptr size, uptr alignment, __msan_set_origin(allocated, size, o.raw_id()); } } + + uptr actually_allocated_size = allocator.GetActuallyAllocatedSize(allocated); + // For compatibility, the allocator converted 0-sized allocations into 1 byte + if (size == 0 && actually_allocated_size > 0 && flags()->poison_in_malloc) + __msan_poison(allocated, 1); + UnpoisonParam(2); RunMallocHooks(allocated, size); return allocated; diff --git a/compiler-rt/lib/msan/tests/msan_test.cpp b/compiler-rt/lib/msan/tests/msan_test.cpp index d1c4814..b0d8409 100644 --- a/compiler-rt/lib/msan/tests/msan_test.cpp +++ b/compiler-rt/lib/msan/tests/msan_test.cpp @@ -4271,14 +4271,39 @@ TEST(VectorSadTest, sse2_psad_bw) { } TEST(VectorMaddTest, mmx_pmadd_wd) { - V4x16 a = {Poisoned<U2>(), 1, 2, 3}; + V4x16 a = {Poisoned<U2>(0), 1, 2, 3}; V4x16 b = {100, 101, 102, 103}; V2x32 c = _mm_madd_pi16(a, b); + // Multiply step: + // {Poison * 100, 1 * 101, 2 * 102, 3 * 103} + // == {Poison, 1 * 101, 2 * 102, 3 * 103} + // Notice that for the poisoned value, we ignored the concrete zero value. + // + // Horizontal add step: + // {Poison + 1 * 101, 2 * 102 + 3 * 103} + // == {Poison, 2 * 102 + 3 * 103} EXPECT_POISONED(c[0]); EXPECT_NOT_POISONED(c[1]); EXPECT_EQ((unsigned)(2 * 102 + 3 * 103), c[1]); + + V4x16 d = {Poisoned<U2>(0), 1, 0, 3}; + V4x16 e = {100, 101, Poisoned<U2>(102), 103}; + V2x32 f = _mm_madd_pi16(d, e); + // Multiply step: + // {Poison * 100, 1 * 101, 0 * Poison, 3 * 103} + // == {Poison, 1 * 101, 0 , 3 * 103} + // Notice that 0 * Poison == 0. + // + // Horizontal add step: + // {Poison + 1 * 101, 0 + 3 * 103} + // == {Poison, 3 * 103} + + EXPECT_POISONED(f[0]); + EXPECT_NOT_POISONED(f[1]); + + EXPECT_EQ((unsigned)(3 * 103), f[1]); } TEST(VectorCmpTest, mm_cmpneq_ps) { diff --git a/compiler-rt/lib/rtsan/rtsan_interceptors_posix.cpp b/compiler-rt/lib/rtsan/rtsan_interceptors_posix.cpp index a9d864e..1b1ff9b 100644 --- a/compiler-rt/lib/rtsan/rtsan_interceptors_posix.cpp +++ b/compiler-rt/lib/rtsan/rtsan_interceptors_posix.cpp @@ -15,6 +15,7 @@ #include "interception/interception.h" #include "sanitizer_common/sanitizer_allocator_dlsym.h" +#include "sanitizer_common/sanitizer_glibc_version.h" #include "sanitizer_common/sanitizer_platform_interceptors.h" #include "interception/interception.h" @@ -766,6 +767,12 @@ INTERCEPTOR(int, pthread_join, pthread_t thread, void **value_ptr) { return REAL(pthread_join)(thread, value_ptr); } +INTERCEPTOR(int, pthread_cond_init, pthread_cond_t *cond, + const pthread_condattr_t *a) { + __rtsan_notify_intercepted_call("pthread_cond_init"); + return REAL(pthread_cond_init)(cond, a); +} + INTERCEPTOR(int, pthread_cond_signal, pthread_cond_t *cond) { __rtsan_notify_intercepted_call("pthread_cond_signal"); return REAL(pthread_cond_signal)(cond); @@ -788,6 +795,11 @@ INTERCEPTOR(int, pthread_cond_timedwait, pthread_cond_t *cond, return REAL(pthread_cond_timedwait)(cond, mutex, ts); } +INTERCEPTOR(int, pthread_cond_destroy, pthread_cond_t *cond) { + __rtsan_notify_intercepted_call("pthread_cond_destroy"); + return REAL(pthread_cond_destroy)(cond); +} + INTERCEPTOR(int, pthread_rwlock_rdlock, pthread_rwlock_t *lock) { __rtsan_notify_intercepted_call("pthread_rwlock_rdlock"); return REAL(pthread_rwlock_rdlock)(lock); @@ -1641,10 +1653,26 @@ void __rtsan::InitializeInterceptors() { INTERCEPT_FUNCTION(pthread_mutex_lock); INTERCEPT_FUNCTION(pthread_mutex_unlock); INTERCEPT_FUNCTION(pthread_join); + + // See the comment in tsan_interceptors_posix.cpp. +#if SANITIZER_GLIBC && !__GLIBC_PREREQ(2, 36) && \ + (defined(__x86_64__) || defined(__mips__) || SANITIZER_PPC64V1 || \ + defined(__s390x__)) + INTERCEPT_FUNCTION_VER(pthread_cond_init, "GLIBC_2.3.2"); + INTERCEPT_FUNCTION_VER(pthread_cond_signal, "GLIBC_2.3.2"); + INTERCEPT_FUNCTION_VER(pthread_cond_broadcast, "GLIBC_2.3.2"); + INTERCEPT_FUNCTION_VER(pthread_cond_wait, "GLIBC_2.3.2"); + INTERCEPT_FUNCTION_VER(pthread_cond_timedwait, "GLIBC_2.3.2"); + INTERCEPT_FUNCTION_VER(pthread_cond_destroy, "GLIBC_2.3.2"); +#else + INTERCEPT_FUNCTION(pthread_cond_init); INTERCEPT_FUNCTION(pthread_cond_signal); INTERCEPT_FUNCTION(pthread_cond_broadcast); INTERCEPT_FUNCTION(pthread_cond_wait); INTERCEPT_FUNCTION(pthread_cond_timedwait); + INTERCEPT_FUNCTION(pthread_cond_destroy); +#endif + INTERCEPT_FUNCTION(pthread_rwlock_rdlock); INTERCEPT_FUNCTION(pthread_rwlock_unlock); INTERCEPT_FUNCTION(pthread_rwlock_wrlock); diff --git a/compiler-rt/lib/rtsan/tests/rtsan_test_interceptors_posix.cpp b/compiler-rt/lib/rtsan/tests/rtsan_test_interceptors_posix.cpp index 9b684e3..20e3b48 100644 --- a/compiler-rt/lib/rtsan/tests/rtsan_test_interceptors_posix.cpp +++ b/compiler-rt/lib/rtsan/tests/rtsan_test_interceptors_posix.cpp @@ -753,15 +753,24 @@ TEST_F(RtsanOpenedFileTest, RewindDieWhenRealtime) { #endif TEST_F(RtsanOpenedFileTest, IoctlDiesWhenRealtime) { - auto Func = [this]() { ioctl(GetOpenFd(), FIONREAD); }; + auto Func = [this]() { + int arg{}; + ioctl(GetOpenFd(), FIONREAD, &arg); + EXPECT_THAT(arg, Ge(0)); + }; ExpectRealtimeDeath(Func, "ioctl"); ExpectNonRealtimeSurvival(Func); } +TEST_F(RtsanOpenedFileTest, IoctlBehavesWithoutOutputArg) { + const int result = ioctl(GetOpenFd(), FIONCLEX); + EXPECT_THAT(result, Ne(-1)); +} + TEST_F(RtsanOpenedFileTest, IoctlBehavesWithOutputArg) { int arg{}; - ioctl(GetOpenFd(), FIONREAD, &arg); - + const int result = ioctl(GetOpenFd(), FIONREAD, &arg); + ASSERT_THAT(result, Ne(-1)); EXPECT_THAT(arg, Ge(0)); } @@ -1232,6 +1241,24 @@ TEST(TestRtsanInterceptors, SpinLockLockDiesWhenRealtime) { } #endif +TEST(TestRtsanInterceptors, PthreadCondInitDiesWhenRealtime) { + pthread_cond_t cond{}; + auto Func = [&cond]() { pthread_cond_init(&cond, nullptr); }; + ExpectRealtimeDeath(Func, "pthread_cond_init"); + ExpectNonRealtimeSurvival(Func); +} + +TEST(TestRtsanInterceptors, PthreadCondDestroyDiesWhenRealtime) { + pthread_cond_t cond{}; + ASSERT_EQ(0, pthread_cond_init(&cond, nullptr)); + + auto Func = [&cond]() { pthread_cond_destroy(&cond); }; + ExpectRealtimeDeath(Func, "pthread_cond_destroy"); + ExpectNonRealtimeSurvival(Func); + + pthread_cond_destroy(&cond); +} + TEST(TestRtsanInterceptors, PthreadCondSignalDiesWhenRealtime) { pthread_cond_t cond{}; ASSERT_EQ(0, pthread_cond_init(&cond, nullptr)); diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h index 51ac1b6..b39eb15 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h @@ -113,6 +113,24 @@ class SizeClassAllocator64 { // ~(uptr)0. void Init(s32 release_to_os_interval_ms, uptr heap_start = 0) { uptr TotalSpaceSize = kSpaceSize + AdditionalSize(); + + uptr MaxAddr = GetMaxUserVirtualAddress(); + // VReport does not call the sanitizer allocator. + VReport(3, "Max user virtual address: 0x%zx\n", MaxAddr); + VReport(3, "Total space size for primary allocator: 0x%zx\n", + TotalSpaceSize); + // TODO: revise the check if we ever configure sanitizers to deliberately + // map beyond the 2**48 barrier (note that Linux pretends the VMA is + // limited to 48-bit for backwards compatibility, but allows apps to + // explicitly specify an address beyond that). + if (heap_start + TotalSpaceSize >= MaxAddr) { + // We can't easily adjust the requested heap size, because kSpaceSize is + // const (for optimization) and used throughout the code. + VReport(0, "Error: heap size %zx exceeds max user virtual address %zx\n", + TotalSpaceSize, MaxAddr); + VReport( + 0, "Try using a kernel that allows a larger virtual address space\n"); + } PremappedHeap = heap_start != 0; if (PremappedHeap) { CHECK(!kUsingConstantSpaceBeg); diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_aarch64.inc.S b/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_aarch64.inc.S index cdfa6f1..5066953 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_aarch64.inc.S +++ b/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_aarch64.inc.S @@ -43,6 +43,6 @@ ASM_SIZE(vfork) ASM_INTERCEPTOR_TRAMPOLINE(vfork) ASM_TRAMPOLINE_ALIAS(vfork, vfork) -GNU_PROPERTY_BTI_PAC +GNU_PROPERTY_BTI_PAC_GCS #endif diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.cpp index 625f30c..3c61b60 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.cpp +++ b/compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.cpp @@ -14,6 +14,7 @@ #include "sanitizer_fuchsia.h" #if SANITIZER_FUCHSIA +# include <limits.h> # include <pthread.h> # include <stdlib.h> # include <unistd.h> @@ -117,11 +118,37 @@ uptr GetMmapGranularity() { return _zx_system_get_page_size(); } sanitizer_shadow_bounds_t ShadowBounds; +// Any sanitizer that utilizes shadow should explicitly call whenever it's +// appropriate for that sanitizer to reference shadow bounds. For ASan, this is +// done in `InitializeShadowMemory` and for HWASan, this is done in +// `InitShadow`. void InitShadowBounds() { ShadowBounds = __sanitizer_shadow_bounds(); } +// TODO(leonardchan): It's not immediately clear from a user perspective if +// `GetMaxUserVirtualAddress` should be called exatly once on runtime startup +// or can be called multiple times. Currently it looks like most instances of +// `GetMaxUserVirtualAddress` are meant to be called once, but if someone +// decides to call this multiple times in the future, we should have a separate +// function that's ok to call multiple times. Ideally we would just invoke this +// syscall once. Also for Fuchsia, this syscall technically gets invoked twice +// since `__sanitizer_shadow_bounds` also invokes this syscall under the hood. uptr GetMaxUserVirtualAddress() { - InitShadowBounds(); - return ShadowBounds.memory_limit - 1; + zx_info_vmar_t info; + zx_status_t status = _zx_object_get_info(_zx_vmar_root_self(), ZX_INFO_VMAR, + &info, sizeof(info), NULL, NULL); + CHECK_EQ(status, ZX_OK); + + // Find the top of the accessible address space. + uintptr_t top = info.base + info.len; + + // Round it up to a power-of-two size. There may be some pages at + // the top that can't actually be mapped, but for purposes of the + // the shadow, we'll pretend they could be. + int bit = (sizeof(uintptr_t) * CHAR_BIT) - __builtin_clzl(top); + if (top != (uintptr_t)1 << bit) + top = (uintptr_t)1 << (bit + 1); + + return top - 1; } uptr GetMaxVirtualAddress() { return GetMaxUserVirtualAddress(); } diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp index f5cb85b..530ff90 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp +++ b/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp @@ -29,6 +29,7 @@ # include "sanitizer_solaris.h" # if SANITIZER_HAIKU +# define _GNU_SOURCE # define _DEFAULT_SOURCE # endif diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp index 3bc24152..d4811ff 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp +++ b/compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp @@ -769,11 +769,17 @@ void internal_join_thread(void *th) { pthread_join((pthread_t)th, 0); } static Mutex syslog_lock; # endif +# if SANITIZER_DRIVERKIT +# define SANITIZER_OS_LOG os_log +# else +# define SANITIZER_OS_LOG os_log_error +# endif + void WriteOneLineToSyslog(const char *s) { #if !SANITIZER_GO syslog_lock.CheckLocked(); if (GetMacosAlignedVersion() >= MacosVersion(10, 12)) { - os_log_error(OS_LOG_DEFAULT, "%{public}s", s); + SANITIZER_OS_LOG(OS_LOG_DEFAULT, "%{public}s", s); } else { #pragma clang diagnostic push // as_log is deprecated. @@ -837,22 +843,22 @@ void LogMessageOnPrintf(const char *str) { void LogFullErrorReport(const char *buffer) { # if !SANITIZER_GO - // Log with os_log_error. This will make it into the crash log. + // When logging with os_log_error this will make it into the crash log. if (internal_strncmp(SanitizerToolName, "AddressSanitizer", sizeof("AddressSanitizer") - 1) == 0) - os_log_error(OS_LOG_DEFAULT, "Address Sanitizer reported a failure."); + SANITIZER_OS_LOG(OS_LOG_DEFAULT, "Address Sanitizer reported a failure."); else if (internal_strncmp(SanitizerToolName, "UndefinedBehaviorSanitizer", sizeof("UndefinedBehaviorSanitizer") - 1) == 0) - os_log_error(OS_LOG_DEFAULT, - "Undefined Behavior Sanitizer reported a failure."); + SANITIZER_OS_LOG(OS_LOG_DEFAULT, + "Undefined Behavior Sanitizer reported a failure."); else if (internal_strncmp(SanitizerToolName, "ThreadSanitizer", sizeof("ThreadSanitizer") - 1) == 0) - os_log_error(OS_LOG_DEFAULT, "Thread Sanitizer reported a failure."); + SANITIZER_OS_LOG(OS_LOG_DEFAULT, "Thread Sanitizer reported a failure."); else - os_log_error(OS_LOG_DEFAULT, "Sanitizer tool reported a failure."); + SANITIZER_OS_LOG(OS_LOG_DEFAULT, "Sanitizer tool reported a failure."); if (common_flags()->log_to_syslog) - os_log_error(OS_LOG_DEFAULT, "Consult syslog for more information."); + SANITIZER_OS_LOG(OS_LOG_DEFAULT, "Consult syslog for more information."); // Log to syslog. // The logging on OS X may call pthread_create so we need the threading diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_freebsd.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_freebsd.cpp index 4c1e005..c4fa1e3 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_freebsd.cpp +++ b/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_freebsd.cpp @@ -71,14 +71,8 @@ #include <semaphore.h> #include <signal.h> #include <stddef.h> -#include <md5.h> -#include <sha224.h> -#include <sha256.h> -#include <sha384.h> -#include <sha512.h> #include <stdio.h> #include <stringlist.h> -#include <term.h> #include <termios.h> #include <time.h> #include <ttyent.h> @@ -370,22 +364,6 @@ const int si_SEGV_MAPERR = SEGV_MAPERR; const int si_SEGV_ACCERR = SEGV_ACCERR; const int unvis_valid = UNVIS_VALID; const int unvis_validpush = UNVIS_VALIDPUSH; - -const unsigned MD5_CTX_sz = sizeof(MD5_CTX); -const unsigned MD5_return_length = MD5_DIGEST_STRING_LENGTH; - -#define SHA2_CONST(LEN) \ - const unsigned SHA##LEN##_CTX_sz = sizeof(SHA##LEN##_CTX); \ - const unsigned SHA##LEN##_return_length = SHA##LEN##_DIGEST_STRING_LENGTH; \ - const unsigned SHA##LEN##_block_length = SHA##LEN##_BLOCK_LENGTH; \ - const unsigned SHA##LEN##_digest_length = SHA##LEN##_DIGEST_LENGTH - -SHA2_CONST(224); -SHA2_CONST(256); -SHA2_CONST(384); -SHA2_CONST(512); - -#undef SHA2_CONST } // namespace __sanitizer using namespace __sanitizer; diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_freebsd.h b/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_freebsd.h index 382b67c..1cbb40e 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_freebsd.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_freebsd.h @@ -710,22 +710,6 @@ extern unsigned IOCTL_KDSKBMODE; extern const int si_SEGV_MAPERR; extern const int si_SEGV_ACCERR; -extern const unsigned MD5_CTX_sz; -extern const unsigned MD5_return_length; - -#define SHA2_EXTERN(LEN) \ - extern const unsigned SHA##LEN##_CTX_sz; \ - extern const unsigned SHA##LEN##_return_length; \ - extern const unsigned SHA##LEN##_block_length; \ - extern const unsigned SHA##LEN##_digest_length - -SHA2_EXTERN(224); -SHA2_EXTERN(256); -SHA2_EXTERN(384); -SHA2_EXTERN(512); - -#undef SHA2_EXTERN - struct __sanitizer_cap_rights { u64 cr_rights[2]; }; diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_netbsd.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_netbsd.cpp index aacd28c..435f3b2 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_netbsd.cpp +++ b/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_netbsd.cpp @@ -498,7 +498,6 @@ struct urio_command { #include <md5.h> #include <rmd160.h> #include <soundcard.h> -#include <term.h> #include <termios.h> #include <time.h> #include <ttyent.h> @@ -515,7 +514,7 @@ struct urio_command { #include <stringlist.h> #if defined(__x86_64__) -#include <nvmm.h> +#include <dev/nvmm/nvmm_ioctl.h> #endif // clang-format on diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_redefine_builtins.h b/compiler-rt/lib/sanitizer_common/sanitizer_redefine_builtins.h index bda0f04..7d88911 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_redefine_builtins.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_redefine_builtins.h @@ -15,7 +15,7 @@ # define SANITIZER_REDEFINE_BUILTINS_H // The asm hack only works with GCC and Clang. -# if !defined(_WIN32) && !defined(_AIX) +# if !defined(_WIN32) && !defined(_AIX) && !defined(__APPLE__) asm(R"( .set memcpy, __sanitizer_internal_memcpy diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cpp index d5cf0f1..2bf547f 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cpp +++ b/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cpp @@ -405,10 +405,22 @@ struct ScopedSetTracerPID { // This detects whether ptrace is blocked (e.g., by seccomp), by forking and // then attempting ptrace. -// This separate check is necessary because StopTheWorld() creates a child -// process with a shared virtual address space and shared TLS, and therefore +// This separate check is necessary because StopTheWorld() creates a thread +// with a shared virtual address space and shared TLS, and therefore // cannot use waitpid() due to the shared errno. static void TestPTrace() { +# if SANITIZER_SPARC + // internal_fork() on SPARC actually calls __fork(). We can't safely fork, + // because it's possible seccomp has been configured to disallow fork() but + // allow clone(). + VReport(1, "WARNING: skipping TestPTrace() because this is SPARC\n"); + VReport(1, + "If seccomp blocks ptrace, LeakSanitizer may hang without further " + "notice\n"); + VReport( + 1, + "If seccomp does not block ptrace, you can safely ignore this warning\n"); +# else // Heuristic: only check the first time this is called. This is not always // correct (e.g., user manually triggers leak detection, then updates // seccomp, then leak detection is triggered again). @@ -417,35 +429,46 @@ static void TestPTrace() { return; checked = true; - // We hope that fork() is not too expensive, because of copy-on-write. + // Hopefully internal_fork() is not too expensive, thanks to copy-on-write. // Besides, this is only called the first time. + // Note that internal_fork() on non-SPARC Linux actually calls + // SYSCALL(clone); thus, it is reasonable to use it because if seccomp kills + // TestPTrace(), it would have killed StopTheWorld() anyway. int pid = internal_fork(); if (pid < 0) { int rverrno; - if (internal_iserror(pid, &rverrno)) { - Report("WARNING: TestPTrace() failed to fork (errno %d)\n", rverrno); - } - internal__exit(-1); + if (internal_iserror(pid, &rverrno)) + VReport(0, "WARNING: TestPTrace() failed to fork (errno %d)\n", rverrno); + + // We don't abort the sanitizer - it's still worth letting the sanitizer + // try. + return; } if (pid == 0) { // Child subprocess + + // TODO: consider checking return value of internal_ptrace, to handle + // SCMP_ACT_ERRNO. However, be careful not to consume too many + // resources performing a proper ptrace. internal_ptrace(PTRACE_ATTACH, 0, nullptr, nullptr); internal__exit(0); } else { int wstatus; internal_waitpid(pid, &wstatus, 0); + // Handle SCMP_ACT_KILL if (WIFSIGNALED(wstatus)) { VReport(0, - "Warning: ptrace appears to be blocked (is seccomp enabled?). " + "WARNING: ptrace appears to be blocked (is seccomp enabled?). " "LeakSanitizer may hang.\n"); VReport(0, "Child exited with signal %d.\n", WTERMSIG(wstatus)); // We don't abort the sanitizer - it's still worth letting the sanitizer // try. } } +# endif } void StopTheWorld(StopTheWorldCallback callback, void *argument) { diff --git a/compiler-rt/lib/tsan/dd/dd_interceptors.cpp b/compiler-rt/lib/tsan/dd/dd_interceptors.cpp index 2c36f69..50531ea 100644 --- a/compiler-rt/lib/tsan/dd/dd_interceptors.cpp +++ b/compiler-rt/lib/tsan/dd/dd_interceptors.cpp @@ -11,6 +11,7 @@ #include "dd_rtl.h" #include "interception/interception.h" #include "sanitizer_common/sanitizer_allocator_internal.h" +#include "sanitizer_common/sanitizer_glibc_version.h" #include "sanitizer_common/sanitizer_procmaps.h" using namespace __dsan; @@ -312,12 +313,24 @@ void InitializeInterceptors() { INTERCEPT_FUNCTION(pthread_rwlock_timedwrlock); INTERCEPT_FUNCTION(pthread_rwlock_unlock); + // See the comment in tsan_interceptors_posix.cpp. +#if SANITIZER_GLIBC && !__GLIBC_PREREQ(2, 36) && \ + (defined(__x86_64__) || defined(__mips__) || SANITIZER_PPC64V1 || \ + defined(__s390x__)) INTERCEPT_FUNCTION_VER(pthread_cond_init, "GLIBC_2.3.2"); INTERCEPT_FUNCTION_VER(pthread_cond_signal, "GLIBC_2.3.2"); INTERCEPT_FUNCTION_VER(pthread_cond_broadcast, "GLIBC_2.3.2"); INTERCEPT_FUNCTION_VER(pthread_cond_wait, "GLIBC_2.3.2"); INTERCEPT_FUNCTION_VER(pthread_cond_timedwait, "GLIBC_2.3.2"); INTERCEPT_FUNCTION_VER(pthread_cond_destroy, "GLIBC_2.3.2"); +#else + INTERCEPT_FUNCTION(pthread_cond_init); + INTERCEPT_FUNCTION(pthread_cond_signal); + INTERCEPT_FUNCTION(pthread_cond_broadcast); + INTERCEPT_FUNCTION(pthread_cond_wait); + INTERCEPT_FUNCTION(pthread_cond_timedwait); + INTERCEPT_FUNCTION(pthread_cond_destroy); +#endif // for symbolizer INTERCEPT_FUNCTION(realpath); diff --git a/compiler-rt/lib/tsan/go/test.c b/compiler-rt/lib/tsan/go/test.c index 1b0d828..d328ab1 100644 --- a/compiler-rt/lib/tsan/go/test.c +++ b/compiler-rt/lib/tsan/go/test.c @@ -63,6 +63,13 @@ int main(void) { __tsan_init(&thr0, &proc0, symbolize_cb); current_proc = proc0; +#if defined(__riscv) && (__riscv_xlen == 64) && defined(__linux__) + // Use correct go_heap for riscv64 sv39. + if (65 - __builtin_clzl((unsigned long)__builtin_frame_address(0)) == 39) { + go_heap = (void *)0x511100000; + } +#endif + // Allocate something resembling a heap in Go. buf0 = mmap(go_heap, 16384, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_FIXED | MAP_ANON, -1, 0); diff --git a/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp b/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp index 795e053..b46a810 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp @@ -22,6 +22,7 @@ #include "sanitizer_common/sanitizer_internal_defs.h" #include "sanitizer_common/sanitizer_libc.h" #include "sanitizer_common/sanitizer_linux.h" +#include "sanitizer_common/sanitizer_placement_new.h" #include "sanitizer_common/sanitizer_platform_interceptors.h" #include "sanitizer_common/sanitizer_platform_limits_netbsd.h" #include "sanitizer_common/sanitizer_platform_limits_posix.h" @@ -78,17 +79,6 @@ struct ucontext_t { }; #endif -#if defined(__x86_64__) || defined(__mips__) || SANITIZER_PPC64V1 || \ - defined(__s390x__) -#define PTHREAD_ABI_BASE "GLIBC_2.3.2" -#elif defined(__aarch64__) || SANITIZER_PPC64V2 -#define PTHREAD_ABI_BASE "GLIBC_2.17" -#elif SANITIZER_LOONGARCH64 -#define PTHREAD_ABI_BASE "GLIBC_2.36" -#elif SANITIZER_RISCV64 -# define PTHREAD_ABI_BASE "GLIBC_2.27" -#endif - extern "C" int pthread_attr_init(void *attr); extern "C" int pthread_attr_destroy(void *attr); DECLARE_REAL(int, pthread_attr_getdetachstate, void *, void *) @@ -340,11 +330,6 @@ void ScopedInterceptor::DisableIgnoresImpl() { } #define TSAN_INTERCEPT(func) INTERCEPT_FUNCTION(func) -#if SANITIZER_FREEBSD || SANITIZER_NETBSD -# define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION(func) -#else -# define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION_VER(func, ver) -#endif #if SANITIZER_FREEBSD # define TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(func) \ INTERCEPT_FUNCTION(_pthread_##func) @@ -2141,13 +2126,29 @@ static void ReportErrnoSpoiling(ThreadState *thr, uptr pc, int sig) { // StackTrace::GetNestInstructionPc(pc) is used because return address is // expected, OutputReport() will undo this. ObtainCurrentStack(thr, StackTrace::GetNextInstructionPc(pc), &stack); - ThreadRegistryLock l(&ctx->thread_registry); - ScopedReport rep(ReportTypeErrnoInSignal); - rep.SetSigNum(sig); - if (!IsFiredSuppression(ctx, ReportTypeErrnoInSignal, stack)) { - rep.AddStack(stack, true); - OutputReport(thr, rep); + // Use alloca, because malloc during signal handling deadlocks + ScopedReport *rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport)); + bool suppressed; + // Take a new scope as Apple platforms require the below locks released + // before symbolizing in order to avoid a deadlock + { + ThreadRegistryLock l(&ctx->thread_registry); + new (rep) ScopedReport(ReportTypeErrnoInSignal); + rep->SetSigNum(sig); + suppressed = IsFiredSuppression(ctx, ReportTypeErrnoInSignal, stack); + if (!suppressed) + rep->AddStack(stack, true); +#if SANITIZER_APPLE + } // Close this scope to release the locks before writing report +#endif + if (!suppressed) + OutputReport(thr, *rep); + + // Need to manually destroy this because we used placement new to allocate + rep->~ScopedReport(); +#if !SANITIZER_APPLE } +#endif } static void CallUserSignalHandler(ThreadState *thr, bool sync, bool acquire, @@ -3024,12 +3025,26 @@ void InitializeInterceptors() { TSAN_INTERCEPT(pthread_timedjoin_np); #endif - TSAN_INTERCEPT_VER(pthread_cond_init, PTHREAD_ABI_BASE); - TSAN_INTERCEPT_VER(pthread_cond_signal, PTHREAD_ABI_BASE); - TSAN_INTERCEPT_VER(pthread_cond_broadcast, PTHREAD_ABI_BASE); - TSAN_INTERCEPT_VER(pthread_cond_wait, PTHREAD_ABI_BASE); - TSAN_INTERCEPT_VER(pthread_cond_timedwait, PTHREAD_ABI_BASE); - TSAN_INTERCEPT_VER(pthread_cond_destroy, PTHREAD_ABI_BASE); + // In glibc versions older than 2.36, dlsym(RTLD_NEXT, "pthread_cond_init") + // may return an outdated symbol (max(2.2,base_version)) if the port was + // introduced before 2.3.2 (when the new pthread_cond_t was introduced). +#if SANITIZER_GLIBC && !__GLIBC_PREREQ(2, 36) && \ + (defined(__x86_64__) || defined(__mips__) || SANITIZER_PPC64V1 || \ + defined(__s390x__)) + INTERCEPT_FUNCTION_VER(pthread_cond_init, "GLIBC_2.3.2"); + INTERCEPT_FUNCTION_VER(pthread_cond_signal, "GLIBC_2.3.2"); + INTERCEPT_FUNCTION_VER(pthread_cond_broadcast, "GLIBC_2.3.2"); + INTERCEPT_FUNCTION_VER(pthread_cond_wait, "GLIBC_2.3.2"); + INTERCEPT_FUNCTION_VER(pthread_cond_timedwait, "GLIBC_2.3.2"); + INTERCEPT_FUNCTION_VER(pthread_cond_destroy, "GLIBC_2.3.2"); +#else + INTERCEPT_FUNCTION(pthread_cond_init); + INTERCEPT_FUNCTION(pthread_cond_signal); + INTERCEPT_FUNCTION(pthread_cond_broadcast); + INTERCEPT_FUNCTION(pthread_cond_wait); + INTERCEPT_FUNCTION(pthread_cond_timedwait); + INTERCEPT_FUNCTION(pthread_cond_destroy); +#endif TSAN_MAYBE_PTHREAD_COND_CLOCKWAIT; diff --git a/compiler-rt/lib/tsan/rtl/tsan_interface_ann.cpp b/compiler-rt/lib/tsan/rtl/tsan_interface_ann.cpp index befd6a3..02ca823 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_interface_ann.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_interface_ann.cpp @@ -437,16 +437,30 @@ void __tsan_mutex_post_divert(void *addr, unsigned flagz) { } static void ReportMutexHeldWrongContext(ThreadState *thr, uptr pc) { - ThreadRegistryLock l(&ctx->thread_registry); - ScopedReport rep(ReportTypeMutexHeldWrongContext); - for (uptr i = 0; i < thr->mset.Size(); ++i) { - MutexSet::Desc desc = thr->mset.Get(i); - rep.AddMutex(desc.addr, desc.stack_id); + // Use alloca, because malloc during signal handling deadlocks + ScopedReport *rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport)); + // Take a new scope as Apple platforms require the below locks released + // before symbolizing in order to avoid a deadlock + { + ThreadRegistryLock l(&ctx->thread_registry); + new (rep) ScopedReport(ReportTypeMutexHeldWrongContext); + for (uptr i = 0; i < thr->mset.Size(); ++i) { + MutexSet::Desc desc = thr->mset.Get(i); + rep->AddMutex(desc.addr, desc.stack_id); + } + VarSizeStackTrace trace; + ObtainCurrentStack(thr, pc, &trace); + rep->AddStack(trace, true); +#if SANITIZER_APPLE + } // Close this scope to release the locks +#endif + OutputReport(thr, *rep); + + // Need to manually destroy this because we used placement new to allocate + rep->~ScopedReport(); +#if !SANITIZER_APPLE } - VarSizeStackTrace trace; - ObtainCurrentStack(thr, pc, &trace); - rep.AddStack(trace, true); - OutputReport(thr, rep); +#endif } INTERFACE_ATTRIBUTE diff --git a/compiler-rt/lib/tsan/rtl/tsan_mman.cpp b/compiler-rt/lib/tsan/rtl/tsan_mman.cpp index 0ea83fb..caacb36 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_mman.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_mman.cpp @@ -182,10 +182,24 @@ static void SignalUnsafeCall(ThreadState *thr, uptr pc) { ObtainCurrentStack(thr, pc, &stack); if (IsFiredSuppression(ctx, ReportTypeSignalUnsafe, stack)) return; - ThreadRegistryLock l(&ctx->thread_registry); - ScopedReport rep(ReportTypeSignalUnsafe); - rep.AddStack(stack, true); - OutputReport(thr, rep); + // Use alloca, because malloc during signal handling deadlocks + ScopedReport *rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport)); + // Take a new scope as Apple platforms require the below locks released + // before symbolizing in order to avoid a deadlock + { + ThreadRegistryLock l(&ctx->thread_registry); + new (rep) ScopedReport(ReportTypeSignalUnsafe); + rep->AddStack(stack, true); +#if SANITIZER_APPLE + } // Close this scope to release the locks +#endif + OutputReport(thr, *rep); + + // Need to manually destroy this because we used placement new to allocate + rep->~ScopedReport(); +#if !SANITIZER_APPLE + } +#endif } diff --git a/compiler-rt/lib/tsan/rtl/tsan_platform.h b/compiler-rt/lib/tsan/rtl/tsan_platform.h index ada594b..00b493b 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_platform.h +++ b/compiler-rt/lib/tsan/rtl/tsan_platform.h @@ -681,6 +681,32 @@ struct MappingGoMips64_47 { static const uptr kShadowAdd = 0x200000000000ull; }; +/* Go on linux/riscv64 (39-bit VMA) +0000 0001 0000 - 000f 0000 0000: executable and heap (60 GiB) +000f 0000 0000 - 0010 0000 0000: - +0010 0000 0000 - 0030 0000 0000: shadow - 128 GiB ( ~ 2 * app) +0030 0000 0000 - 0038 0000 0000: metainfo - 32 GiB ( ~ 0.5 * app) +0038 0000 0000 - 0040 0000 0000: - +*/ +struct MappingGoRiscv64_39 { + static const uptr kMetaShadowBeg = 0x003000000000ull; + static const uptr kMetaShadowEnd = 0x003800000000ull; + static const uptr kShadowBeg = 0x001000000000ull; + static const uptr kShadowEnd = 0x003000000000ull; + static const uptr kLoAppMemBeg = 0x000000010000ull; + static const uptr kLoAppMemEnd = 0x000f00000000ull; + static const uptr kMidAppMemBeg = 0; + static const uptr kMidAppMemEnd = 0; + static const uptr kHiAppMemBeg = 0; + static const uptr kHiAppMemEnd = 0; + static const uptr kHeapMemBeg = 0; + static const uptr kHeapMemEnd = 0; + static const uptr kVdsoBeg = 0; + static const uptr kShadowMsk = 0; + static const uptr kShadowXor = 0; + static const uptr kShadowAdd = 0x001000000000ull; +}; + /* Go on linux/riscv64 (48-bit VMA) 0000 0001 0000 - 00e0 0000 0000: executable and heap (896 GiB) 00e0 0000 0000 - 2000 0000 0000: - @@ -689,13 +715,13 @@ struct MappingGoMips64_47 { 3000 0000 0000 - 3100 0000 0000: metainfo - 1 TiB ( ~ 1 * app) 3100 0000 0000 - 8000 0000 0000: - */ -struct MappingGoRiscv64 { +struct MappingGoRiscv64_48 { static const uptr kMetaShadowBeg = 0x300000000000ull; static const uptr kMetaShadowEnd = 0x310000000000ull; static const uptr kShadowBeg = 0x200000000000ull; static const uptr kShadowEnd = 0x240000000000ull; static const uptr kLoAppMemBeg = 0x000000010000ull; - static const uptr kLoAppMemEnd = 0x000e00000000ull; + static const uptr kLoAppMemEnd = 0x00e000000000ull; static const uptr kMidAppMemBeg = 0; static const uptr kMidAppMemEnd = 0; static const uptr kHiAppMemBeg = 0; @@ -756,7 +782,12 @@ ALWAYS_INLINE auto SelectMapping(Arg arg) { # elif defined(__loongarch_lp64) return Func::template Apply<MappingGoLoongArch64_47>(arg); # elif SANITIZER_RISCV64 - return Func::template Apply<MappingGoRiscv64>(arg); + switch (vmaSize) { + case 39: + return Func::template Apply<MappingGoRiscv64_39>(arg); + case 48: + return Func::template Apply<MappingGoRiscv64_48>(arg); + } # elif SANITIZER_WINDOWS return Func::template Apply<MappingGoWindows>(arg); # else @@ -827,7 +858,8 @@ void ForEachMapping() { Func::template Apply<MappingGoAarch64>(); Func::template Apply<MappingGoLoongArch64_47>(); Func::template Apply<MappingGoMips64_47>(); - Func::template Apply<MappingGoRiscv64>(); + Func::template Apply<MappingGoRiscv64_39>(); + Func::template Apply<MappingGoRiscv64_48>(); Func::template Apply<MappingGoS390x>(); } diff --git a/compiler-rt/lib/tsan/rtl/tsan_platform_linux.cpp b/compiler-rt/lib/tsan/rtl/tsan_platform_linux.cpp index 2c55645..4b55aab 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_platform_linux.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_platform_linux.cpp @@ -393,9 +393,9 @@ void InitializePlatformEarly() { Die(); } # else - if (vmaSize != 48) { + if (vmaSize != 39 && vmaSize != 48) { Printf("FATAL: ThreadSanitizer: unsupported VMA range\n"); - Printf("FATAL: Found %zd - Supported 48\n", vmaSize); + Printf("FATAL: Found %zd - Supported 39 and 48\n", vmaSize); Die(); } # endif diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl_aarch64.S b/compiler-rt/lib/tsan/rtl/tsan_rtl_aarch64.S index 7d920be..f1d11a3 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_rtl_aarch64.S +++ b/compiler-rt/lib/tsan/rtl/tsan_rtl_aarch64.S @@ -222,6 +222,6 @@ ASM_SIZE(ASM_SYMBOL_INTERCEPTOR(__sigsetjmp)) NO_EXEC_STACK_DIRECTIVE -GNU_PROPERTY_BTI_PAC +GNU_PROPERTY_BTI_PAC_GCS #endif diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl_access.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl_access.cpp index 77cba5f..b2e7047 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_rtl_access.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_rtl_access.cpp @@ -419,6 +419,11 @@ NOINLINE void TraceRestartMemoryAccess(ThreadState* thr, uptr pc, uptr addr, ALWAYS_INLINE USED void MemoryAccess(ThreadState* thr, uptr pc, uptr addr, uptr size, AccessType typ) { +#if SANITIZER_APPLE && !SANITIZER_GO + // Swift symbolizer can be intercepted and deadlock without this + if (thr->in_symbolizer) + return; +#endif RawShadow* shadow_mem = MemToShadow(addr); UNUSED char memBuf[4][64]; DPrintf2("#%d: Access: %d@%d %p/%zd typ=0x%x {%s, %s, %s, %s}\n", thr->tid, diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp index 2a2bf42..30f5e96 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp @@ -11,14 +11,15 @@ //===----------------------------------------------------------------------===// #include <sanitizer_common/sanitizer_deadlock_detector_interface.h> +#include <sanitizer_common/sanitizer_placement_new.h> #include <sanitizer_common/sanitizer_stackdepot.h> -#include "tsan_rtl.h" #include "tsan_flags.h" -#include "tsan_sync.h" +#include "tsan_platform.h" #include "tsan_report.h" +#include "tsan_rtl.h" #include "tsan_symbolize.h" -#include "tsan_platform.h" +#include "tsan_sync.h" namespace __tsan { @@ -55,14 +56,28 @@ static void ReportMutexMisuse(ThreadState *thr, uptr pc, ReportType typ, return; if (!ShouldReport(thr, typ)) return; - ThreadRegistryLock l(&ctx->thread_registry); - ScopedReport rep(typ); - rep.AddMutex(addr, creation_stack_id); - VarSizeStackTrace trace; - ObtainCurrentStack(thr, pc, &trace); - rep.AddStack(trace, true); - rep.AddLocation(addr, 1); - OutputReport(thr, rep); + // Use alloca, because malloc during signal handling deadlocks + ScopedReport *rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport)); + // Take a new scope as Apple platforms require the below locks released + // before symbolizing in order to avoid a deadlock + { + ThreadRegistryLock l(&ctx->thread_registry); + new (rep) ScopedReport(typ); + rep->AddMutex(addr, creation_stack_id); + VarSizeStackTrace trace; + ObtainCurrentStack(thr, pc, &trace); + rep->AddStack(trace, true); + rep->AddLocation(addr, 1); +#if SANITIZER_APPLE + } // Close this scope to release the locks +#endif + OutputReport(thr, *rep); + + // Need to manually destroy this because we used placement new to allocate + rep->~ScopedReport(); +#if !SANITIZER_APPLE + } +#endif } static void RecordMutexLock(ThreadState *thr, uptr pc, uptr addr, @@ -528,53 +543,81 @@ void AfterSleep(ThreadState *thr, uptr pc) { void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r) { if (r == 0 || !ShouldReport(thr, ReportTypeDeadlock)) return; - ThreadRegistryLock l(&ctx->thread_registry); - ScopedReport rep(ReportTypeDeadlock); - for (int i = 0; i < r->n; i++) { - rep.AddMutex(r->loop[i].mtx_ctx0, r->loop[i].stk[0]); - rep.AddUniqueTid((int)r->loop[i].thr_ctx); - rep.AddThread((int)r->loop[i].thr_ctx); - } - uptr dummy_pc = 0x42; - for (int i = 0; i < r->n; i++) { - for (int j = 0; j < (flags()->second_deadlock_stack ? 2 : 1); j++) { - u32 stk = r->loop[i].stk[j]; - StackTrace stack; - if (stk && stk != kInvalidStackID) { - stack = StackDepotGet(stk); - } else { - // Sometimes we fail to extract the stack trace (FIXME: investigate), - // but we should still produce some stack trace in the report. - stack = StackTrace(&dummy_pc, 1); + // Use alloca, because malloc during signal handling deadlocks + ScopedReport *rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport)); + // Take a new scope as Apple platforms require the below locks released + // before symbolizing in order to avoid a deadlock + { + ThreadRegistryLock l(&ctx->thread_registry); + new (rep) ScopedReport(ReportTypeDeadlock); + for (int i = 0; i < r->n; i++) { + rep->AddMutex(r->loop[i].mtx_ctx0, r->loop[i].stk[0]); + rep->AddUniqueTid((int)r->loop[i].thr_ctx); + rep->AddThread((int)r->loop[i].thr_ctx); + } + uptr dummy_pc = 0x42; + for (int i = 0; i < r->n; i++) { + for (int j = 0; j < (flags()->second_deadlock_stack ? 2 : 1); j++) { + u32 stk = r->loop[i].stk[j]; + StackTrace stack; + if (stk && stk != kInvalidStackID) { + stack = StackDepotGet(stk); + } else { + // Sometimes we fail to extract the stack trace (FIXME: investigate), + // but we should still produce some stack trace in the report. + stack = StackTrace(&dummy_pc, 1); + } + rep->AddStack(stack, true); } - rep.AddStack(stack, true); } +#if SANITIZER_APPLE + } // Close this scope to release the locks +#endif + OutputReport(thr, *rep); + + // Need to manually destroy this because we used placement new to allocate + rep->~ScopedReport(); +#if !SANITIZER_APPLE } - OutputReport(thr, rep); +#endif } void ReportDestroyLocked(ThreadState *thr, uptr pc, uptr addr, FastState last_lock, StackID creation_stack_id) { - // We need to lock the slot during RestoreStack because it protects - // the slot journal. - Lock slot_lock(&ctx->slots[static_cast<uptr>(last_lock.sid())].mtx); - ThreadRegistryLock l0(&ctx->thread_registry); - Lock slots_lock(&ctx->slot_mtx); - ScopedReport rep(ReportTypeMutexDestroyLocked); - rep.AddMutex(addr, creation_stack_id); - VarSizeStackTrace trace; - ObtainCurrentStack(thr, pc, &trace); - rep.AddStack(trace, true); - - Tid tid; - DynamicMutexSet mset; - uptr tag; - if (!RestoreStack(EventType::kLock, last_lock.sid(), last_lock.epoch(), addr, - 0, kAccessWrite, &tid, &trace, mset, &tag)) - return; - rep.AddStack(trace, true); - rep.AddLocation(addr, 1); - OutputReport(thr, rep); + // Use alloca, because malloc during signal handling deadlocks + ScopedReport *rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport)); + // Take a new scope as Apple platforms require the below locks released + // before symbolizing in order to avoid a deadlock + { + // We need to lock the slot during RestoreStack because it protects + // the slot journal. + Lock slot_lock(&ctx->slots[static_cast<uptr>(last_lock.sid())].mtx); + ThreadRegistryLock l0(&ctx->thread_registry); + Lock slots_lock(&ctx->slot_mtx); + new (rep) ScopedReport(ReportTypeMutexDestroyLocked); + rep->AddMutex(addr, creation_stack_id); + VarSizeStackTrace trace; + ObtainCurrentStack(thr, pc, &trace); + rep->AddStack(trace, true); + + Tid tid; + DynamicMutexSet mset; + uptr tag; + if (!RestoreStack(EventType::kLock, last_lock.sid(), last_lock.epoch(), + addr, 0, kAccessWrite, &tid, &trace, mset, &tag)) + return; + rep->AddStack(trace, true); + rep->AddLocation(addr, 1); +#if SANITIZER_APPLE + } // Close this scope to release the locks +#endif + OutputReport(thr, *rep); + + // Need to manually destroy this because we used placement new to allocate + rep->~ScopedReport(); +#if !SANITIZER_APPLE + } +#endif } } // namespace __tsan diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp index e6f0fda..43aef30 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp @@ -16,6 +16,7 @@ #include "sanitizer_common/sanitizer_placement_new.h" #include "sanitizer_common/sanitizer_stackdepot.h" #include "sanitizer_common/sanitizer_stacktrace.h" +#include "tsan_defs.h" #include "tsan_fd.h" #include "tsan_flags.h" #include "tsan_mman.h" @@ -806,65 +807,80 @@ void ReportRace(ThreadState *thr, RawShadow *shadow_mem, Shadow cur, Shadow old, DynamicMutexSet mset1; MutexSet *mset[kMop] = {&thr->mset, mset1}; - // We need to lock the slot during RestoreStack because it protects - // the slot journal. - Lock slot_lock(&ctx->slots[static_cast<uptr>(s[1].sid())].mtx); - ThreadRegistryLock l0(&ctx->thread_registry); - Lock slots_lock(&ctx->slot_mtx); - if (SpuriousRace(old)) - return; - if (!RestoreStack(EventType::kAccessExt, s[1].sid(), s[1].epoch(), addr1, - size1, typ1, &tids[1], &traces[1], mset[1], &tags[1])) { - StoreShadow(&ctx->last_spurious_race, old.raw()); - return; - } + // Use alloca, because malloc during signal handling deadlocks + ScopedReport *rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport)); + // Take a new scope as Apple platforms require the below locks released + // before symbolizing in order to avoid a deadlock + { + // We need to lock the slot during RestoreStack because it protects + // the slot journal. + Lock slot_lock(&ctx->slots[static_cast<uptr>(s[1].sid())].mtx); + ThreadRegistryLock l0(&ctx->thread_registry); + Lock slots_lock(&ctx->slot_mtx); + if (SpuriousRace(old)) + return; + if (!RestoreStack(EventType::kAccessExt, s[1].sid(), s[1].epoch(), addr1, + size1, typ1, &tids[1], &traces[1], mset[1], &tags[1])) { + StoreShadow(&ctx->last_spurious_race, old.raw()); + return; + } - if (IsFiredSuppression(ctx, rep_typ, traces[1])) - return; + if (IsFiredSuppression(ctx, rep_typ, traces[1])) + return; - if (HandleRacyStacks(thr, traces)) - return; + if (HandleRacyStacks(thr, traces)) + return; - // If any of the accesses has a tag, treat this as an "external" race. - uptr tag = kExternalTagNone; - for (uptr i = 0; i < kMop; i++) { - if (tags[i] != kExternalTagNone) { - rep_typ = ReportTypeExternalRace; - tag = tags[i]; - break; + // If any of the accesses has a tag, treat this as an "external" race. + uptr tag = kExternalTagNone; + for (uptr i = 0; i < kMop; i++) { + if (tags[i] != kExternalTagNone) { + rep_typ = ReportTypeExternalRace; + tag = tags[i]; + break; + } } - } - ScopedReport rep(rep_typ, tag); - for (uptr i = 0; i < kMop; i++) - rep.AddMemoryAccess(addr, tags[i], s[i], tids[i], traces[i], mset[i]); + new (rep) ScopedReport(rep_typ, tag); + for (uptr i = 0; i < kMop; i++) + rep->AddMemoryAccess(addr, tags[i], s[i], tids[i], traces[i], mset[i]); - for (uptr i = 0; i < kMop; i++) { - ThreadContext *tctx = static_cast<ThreadContext *>( - ctx->thread_registry.GetThreadLocked(tids[i])); - rep.AddThread(tctx); - } + for (uptr i = 0; i < kMop; i++) { + ThreadContext *tctx = static_cast<ThreadContext *>( + ctx->thread_registry.GetThreadLocked(tids[i])); + rep->AddThread(tctx); + } - rep.AddLocation(addr_min, addr_max - addr_min); - - if (flags()->print_full_thread_history) { - const ReportDesc *rep_desc = rep.GetReport(); - for (uptr i = 0; i < rep_desc->threads.Size(); i++) { - Tid parent_tid = rep_desc->threads[i]->parent_tid; - if (parent_tid == kMainTid || parent_tid == kInvalidTid) - continue; - ThreadContext *parent_tctx = static_cast<ThreadContext *>( - ctx->thread_registry.GetThreadLocked(parent_tid)); - rep.AddThread(parent_tctx); + rep->AddLocation(addr_min, addr_max - addr_min); + + if (flags()->print_full_thread_history) { + const ReportDesc *rep_desc = rep->GetReport(); + for (uptr i = 0; i < rep_desc->threads.Size(); i++) { + Tid parent_tid = rep_desc->threads[i]->parent_tid; + if (parent_tid == kMainTid || parent_tid == kInvalidTid) + continue; + ThreadContext *parent_tctx = static_cast<ThreadContext *>( + ctx->thread_registry.GetThreadLocked(parent_tid)); + rep->AddThread(parent_tctx); + } } - } #if !SANITIZER_GO - if (!((typ0 | typ1) & kAccessFree) && - s[1].epoch() <= thr->last_sleep_clock.Get(s[1].sid())) - rep.AddSleep(thr->last_sleep_stack_id); + if (!((typ0 | typ1) & kAccessFree) && + s[1].epoch() <= thr->last_sleep_clock.Get(s[1].sid())) + rep->AddSleep(thr->last_sleep_stack_id); +#endif + +#if SANITIZER_APPLE + } // Close this scope to release the locks +#endif + OutputReport(thr, *rep); + + // Need to manually destroy this because we used placement new to allocate + rep->~ScopedReport(); +#if !SANITIZER_APPLE + } #endif - OutputReport(thr, rep); } void PrintCurrentStack(ThreadState *thr, uptr pc) { diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cpp index c6a8fd2..b1464cc 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cpp @@ -88,15 +88,33 @@ void ThreadFinalize(ThreadState *thr) { #if !SANITIZER_GO if (!ShouldReport(thr, ReportTypeThreadLeak)) return; - ThreadRegistryLock l(&ctx->thread_registry); Vector<ThreadLeak> leaks; - ctx->thread_registry.RunCallbackForEachThreadLocked(CollectThreadLeaks, - &leaks); + { + ThreadRegistryLock l(&ctx->thread_registry); + ctx->thread_registry.RunCallbackForEachThreadLocked(CollectThreadLeaks, + &leaks); + } + for (uptr i = 0; i < leaks.Size(); i++) { - ScopedReport rep(ReportTypeThreadLeak); - rep.AddThread(leaks[i].tctx, true); - rep.SetCount(leaks[i].count); - OutputReport(thr, rep); + // Use alloca, because malloc during signal handling deadlocks + ScopedReport *rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport)); + // Take a new scope as Apple platforms require the below locks released + // before symbolizing in order to avoid a deadlock + { + ThreadRegistryLock l(&ctx->thread_registry); + new (rep) ScopedReport(ReportTypeThreadLeak); + rep->AddThread(leaks[i].tctx, true); + rep->SetCount(leaks[i].count); +# if SANITIZER_APPLE + } // Close this scope to release the locks +# endif + OutputReport(thr, *rep); + + // Need to manually destroy this because we used placement new to allocate + rep->~ScopedReport(); +# if !SANITIZER_APPLE + } +# endif } #endif } diff --git a/compiler-rt/lib/ubsan_minimal/ubsan_minimal_handlers.cpp b/compiler-rt/lib/ubsan_minimal/ubsan_minimal_handlers.cpp index ebc36a85..1e14307 100644 --- a/compiler-rt/lib/ubsan_minimal/ubsan_minimal_handlers.cpp +++ b/compiler-rt/lib/ubsan_minimal/ubsan_minimal_handlers.cpp @@ -34,12 +34,16 @@ static char *append_hex(uintptr_t d, char *buf, const char *end) { return buf; } -static void format_msg(const char *kind, uintptr_t caller, char *buf, - const char *end) { +static void format_msg(const char *kind, uintptr_t caller, + const uintptr_t *address, char *buf, const char *end) { buf = append_str("ubsan: ", buf, end); buf = append_str(kind, buf, end); buf = append_str(" by 0x", buf, end); buf = append_hex(caller, buf, end); + if (address) { + buf = append_str(" address 0x", buf, end); + buf = append_hex(*address, buf, end); + } buf = append_str("\n", buf, end); if (buf == end) --buf; // Make sure we don't cause a buffer overflow. @@ -47,7 +51,7 @@ static void format_msg(const char *kind, uintptr_t caller, char *buf, } SANITIZER_INTERFACE_WEAK_DEF(void, __ubsan_report_error, const char *kind, - uintptr_t caller) { + uintptr_t caller, const uintptr_t *address) { if (caller == 0) return; while (true) { @@ -80,28 +84,32 @@ SANITIZER_INTERFACE_WEAK_DEF(void, __ubsan_report_error, const char *kind, __sanitizer::atomic_store_relaxed(&caller_pcs[sz], caller); char msg_buf[128]; - format_msg(kind, caller, msg_buf, msg_buf + sizeof(msg_buf)); + format_msg(kind, caller, address, msg_buf, msg_buf + sizeof(msg_buf)); message(msg_buf); } } SANITIZER_INTERFACE_WEAK_DEF(void, __ubsan_report_error_fatal, const char *kind, - uintptr_t caller) { + uintptr_t caller, const uintptr_t *address) { // Use another handlers, in case it's already overriden. - __ubsan_report_error(kind, caller); + __ubsan_report_error(kind, caller, address); } #if defined(__ANDROID__) extern "C" __attribute__((weak)) void android_set_abort_message(const char *); -static void abort_with_message(const char *kind, uintptr_t caller) { +static void abort_with_message(const char *kind, uintptr_t caller, + const uintptr_t *address) { char msg_buf[128]; - format_msg(kind, caller, msg_buf, msg_buf + sizeof(msg_buf)); + format_msg(kind, caller, address, msg_buf, msg_buf + sizeof(msg_buf)); if (&android_set_abort_message) android_set_abort_message(msg_buf); abort(); } #else -static void abort_with_message(const char *kind, uintptr_t caller) { abort(); } +static void abort_with_message(const char *kind, uintptr_t caller, + const uintptr_t *address) { + abort(); +} #endif #if SANITIZER_DEBUG @@ -117,25 +125,42 @@ void NORETURN CheckFailed(const char *file, int, const char *cond, u64, u64) { } // namespace __sanitizer #endif -#define INTERFACE extern "C" __attribute__((visibility("default"))) - #define HANDLER_RECOVER(name, kind) \ - INTERFACE void __ubsan_handle_##name##_minimal() { \ - __ubsan_report_error(kind, GET_CALLER_PC()); \ + SANITIZER_INTERFACE_WEAK_DEF(void, __ubsan_handle_##name##_minimal) { \ + __ubsan_report_error(kind, GET_CALLER_PC(), nullptr); \ } #define HANDLER_NORECOVER(name, kind) \ - INTERFACE void __ubsan_handle_##name##_minimal_abort() { \ + SANITIZER_INTERFACE_WEAK_DEF(void, __ubsan_handle_##name##_minimal_abort) { \ uintptr_t caller = GET_CALLER_PC(); \ - __ubsan_report_error_fatal(kind, caller); \ - abort_with_message(kind, caller); \ + __ubsan_report_error_fatal(kind, caller, nullptr); \ + abort_with_message(kind, caller, nullptr); \ } #define HANDLER(name, kind) \ HANDLER_RECOVER(name, kind) \ HANDLER_NORECOVER(name, kind) -HANDLER(type_mismatch, "type-mismatch") +#define HANDLER_RECOVER_PTR(name, kind) \ + SANITIZER_INTERFACE_WEAK_DEF(void, __ubsan_handle_##name##_minimal, \ + const uintptr_t address) { \ + __ubsan_report_error(kind, GET_CALLER_PC(), &address); \ + } + +#define HANDLER_NORECOVER_PTR(name, kind) \ + SANITIZER_INTERFACE_WEAK_DEF(void, __ubsan_handle_##name##_minimal_abort, \ + const uintptr_t address) { \ + uintptr_t caller = GET_CALLER_PC(); \ + __ubsan_report_error_fatal(kind, caller, &address); \ + abort_with_message(kind, caller, &address); \ + } + +// A version of a handler that takes a pointer to a value. +#define HANDLER_PTR(name, kind) \ + HANDLER_RECOVER_PTR(name, kind) \ + HANDLER_NORECOVER_PTR(name, kind) + +HANDLER_PTR(type_mismatch, "type-mismatch") HANDLER(alignment_assumption, "alignment-assumption") HANDLER(add_overflow, "add-overflow") HANDLER(sub_overflow, "sub-overflow") diff --git a/compiler-rt/lib/xray/xray_fdr_logging.cpp b/compiler-rt/lib/xray/xray_fdr_logging.cpp index 7def356..977a0b9 100644 --- a/compiler-rt/lib/xray/xray_fdr_logging.cpp +++ b/compiler-rt/lib/xray/xray_fdr_logging.cpp @@ -73,7 +73,7 @@ static_assert(std::is_trivially_destructible<ThreadLocalData>::value, static pthread_key_t Key; // Global BufferQueue. -static std::byte BufferQueueStorage[sizeof(BufferQueue)]; +alignas(BufferQueue) static std::byte BufferQueueStorage[sizeof(BufferQueue)]; static BufferQueue *BQ = nullptr; // Global thresholds for function durations. diff --git a/compiler-rt/lib/xray/xray_init.cpp b/compiler-rt/lib/xray/xray_init.cpp index 020bfe5..9cc6d5f 100644 --- a/compiler-rt/lib/xray/xray_init.cpp +++ b/compiler-rt/lib/xray/xray_init.cpp @@ -105,7 +105,7 @@ __xray_register_sleds(const XRaySledEntry *SledsBegin, } if (Verbosity()) - Report("Registering %d new functions!\n", SledMap.Functions); + Report("Registering %d new functions!\n", (int)SledMap.Functions); { SpinMutexLock Guard(&XRayInstrMapMutex); diff --git a/compiler-rt/lib/xray/xray_interface.cpp b/compiler-rt/lib/xray/xray_interface.cpp index 3f97827..9bf0c56 100644 --- a/compiler-rt/lib/xray/xray_interface.cpp +++ b/compiler-rt/lib/xray/xray_interface.cpp @@ -308,7 +308,8 @@ XRayPatchingStatus controlPatchingObjectUnchecked(bool Enable, int32_t ObjId) { return XRayPatchingStatus::NOT_INITIALIZED; if (Verbosity()) - Report("Patching object %d with %d functions.\n", ObjId, InstrMap.Entries); + Report("Patching object %d with %d functions.\n", ObjId, + (int)InstrMap.Entries); // Check if the corresponding DSO has been unloaded. if (!InstrMap.Loaded) { |