diff options
Diffstat (limited to 'compiler-rt/lib')
-rw-r--r-- | compiler-rt/lib/asan/asan_mac.cpp | 20 | ||||
-rw-r--r-- | compiler-rt/lib/memprof/memprof_interface_internal.h | 10 | ||||
-rw-r--r-- | compiler-rt/lib/memprof/memprof_rawprofile.cpp | 32 | ||||
-rw-r--r-- | compiler-rt/lib/memprof/tests/CMakeLists.txt | 1 | ||||
-rw-r--r-- | compiler-rt/lib/memprof/tests/histogram_encoding.cpp | 35 | ||||
-rw-r--r-- | compiler-rt/lib/scudo/standalone/allocator_config.def | 3 | ||||
-rw-r--r-- | compiler-rt/lib/scudo/standalone/allocator_config_wrapper.h | 12 | ||||
-rw-r--r-- | compiler-rt/lib/scudo/standalone/combined.h | 34 | ||||
-rw-r--r-- | compiler-rt/lib/scudo/standalone/secondary.h | 21 | ||||
-rw-r--r-- | compiler-rt/lib/scudo/standalone/tests/combined_test.cpp | 103 | ||||
-rw-r--r-- | compiler-rt/lib/tsan/rtl/tsan_report.h | 13 | ||||
-rw-r--r-- | compiler-rt/lib/tsan/rtl/tsan_rtl.h | 3 | ||||
-rw-r--r-- | compiler-rt/lib/tsan/rtl/tsan_rtl_access.cpp | 6 | ||||
-rw-r--r-- | compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp | 6 | ||||
-rw-r--r-- | compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp | 77 |
15 files changed, 306 insertions, 70 deletions
diff --git a/compiler-rt/lib/asan/asan_mac.cpp b/compiler-rt/lib/asan/asan_mac.cpp index 1f3c79e..a68e362 100644 --- a/compiler-rt/lib/asan/asan_mac.cpp +++ b/compiler-rt/lib/asan/asan_mac.cpp @@ -130,6 +130,7 @@ typedef void* dispatch_queue_t; typedef void* dispatch_source_t; typedef u64 dispatch_time_t; typedef void (*dispatch_function_t)(void *block); +typedef void (*dispatch_apply_function_t)(void *, size_t); typedef void* (*worker_t)(void *block); typedef unsigned long dispatch_mach_reason; typedef void *dispatch_mach_msg_t; @@ -149,7 +150,11 @@ typedef void (^dispatch_mach_handler_t)(dispatch_mach_reason reason, // A wrapper for the ObjC blocks used to support libdispatch. typedef struct { void *block; - dispatch_function_t func; + union { + dispatch_function_t dispatch_func; + dispatch_apply_function_t dispatch_apply_func; + static_assert(sizeof(dispatch_func) == sizeof(dispatch_apply_func)); + }; u32 parent_tid; } asan_block_context_t; @@ -177,7 +182,7 @@ void asan_dispatch_call_block_and_release(void *block) { block, (void*)pthread_self()); asan_register_worker_thread(context->parent_tid, &stack); // Call the original dispatcher for the block. - context->func(context->block); + context->dispatch_func(context->block); asan_free(context, &stack); } @@ -193,7 +198,7 @@ asan_block_context_t *alloc_asan_context(void *ctxt, dispatch_function_t func, asan_block_context_t *asan_ctxt = (asan_block_context_t*) asan_malloc(sizeof(asan_block_context_t), stack); asan_ctxt->block = ctxt; - asan_ctxt->func = func; + asan_ctxt->dispatch_func = func; asan_ctxt->parent_tid = GetCurrentTidOrInvalid(); return asan_ctxt; } @@ -249,14 +254,17 @@ extern "C" void asan_dispatch_apply_f_work(void *context, size_t iteration) { GET_STACK_TRACE_THREAD; asan_block_context_t *asan_ctxt = (asan_block_context_t *)context; asan_register_worker_thread(asan_ctxt->parent_tid, &stack); - ((void (*)(void *, size_t))asan_ctxt->func)(asan_ctxt->block, iteration); + asan_ctxt->dispatch_apply_func(asan_ctxt->block, iteration); } INTERCEPTOR(void, dispatch_apply_f, size_t iterations, dispatch_queue_t queue, - void *ctxt, void (*work)(void *, size_t)) { + void *ctxt, dispatch_apply_function_t work) { GET_STACK_TRACE_THREAD; asan_block_context_t *asan_ctxt = - alloc_asan_context(ctxt, (dispatch_function_t)work, &stack); + (asan_block_context_t *)asan_malloc(sizeof(asan_block_context_t), &stack); + asan_ctxt->block = ctxt; + asan_ctxt->dispatch_apply_func = work; + asan_ctxt->parent_tid = GetCurrentTidOrInvalid(); REAL(dispatch_apply_f)(iterations, queue, (void *)asan_ctxt, asan_dispatch_apply_f_work); } diff --git a/compiler-rt/lib/memprof/memprof_interface_internal.h b/compiler-rt/lib/memprof/memprof_interface_internal.h index 7d3a937..1fd0748 100644 --- a/compiler-rt/lib/memprof/memprof_interface_internal.h +++ b/compiler-rt/lib/memprof/memprof_interface_internal.h @@ -36,8 +36,14 @@ SANITIZER_INTERFACE_ATTRIBUTE void __memprof_record_access(void const volatile *addr); SANITIZER_INTERFACE_ATTRIBUTE +void __memprof_record_access_hist(void const volatile *addr); + +SANITIZER_INTERFACE_ATTRIBUTE void __memprof_record_access_range(void const volatile *addr, uptr size); +SANITIZER_INTERFACE_ATTRIBUTE +void __memprof_record_access_range_hist(void const volatile *addr, uptr size); + SANITIZER_INTERFACE_ATTRIBUTE void __memprof_print_accumulated_stats(); SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE extern char @@ -51,6 +57,10 @@ extern uptr __memprof_shadow_memory_dynamic_address; SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE extern char __memprof_profile_filename[1]; + +SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE extern bool + __memprof_histogram; + SANITIZER_INTERFACE_ATTRIBUTE int __memprof_profile_dump(); SANITIZER_INTERFACE_ATTRIBUTE void __memprof_profile_reset(); diff --git a/compiler-rt/lib/memprof/memprof_rawprofile.cpp b/compiler-rt/lib/memprof/memprof_rawprofile.cpp index a897648..f579e12 100644 --- a/compiler-rt/lib/memprof/memprof_rawprofile.cpp +++ b/compiler-rt/lib/memprof/memprof_rawprofile.cpp @@ -7,10 +7,7 @@ #include "sanitizer_common/sanitizer_allocator_internal.h" #include "sanitizer_common/sanitizer_array_ref.h" #include "sanitizer_common/sanitizer_common.h" -#include "sanitizer_common/sanitizer_linux.h" -#include "sanitizer_common/sanitizer_procmaps.h" #include "sanitizer_common/sanitizer_stackdepot.h" -#include "sanitizer_common/sanitizer_stackdepotbase.h" #include "sanitizer_common/sanitizer_stacktrace.h" #include "sanitizer_common/sanitizer_vector.h" @@ -19,10 +16,20 @@ using ::__sanitizer::Vector; using ::llvm::memprof::MemInfoBlock; using SegmentEntry = ::llvm::memprof::SegmentEntry; using Header = ::llvm::memprof::Header; +using ::llvm::memprof::encodeHistogramCount; namespace { template <class T> char *WriteBytes(const T &Pod, char *Buffer) { - *(T *)Buffer = Pod; + static_assert(is_trivially_copyable<T>::value, "T must be POD"); + const uint8_t *Src = reinterpret_cast<const uint8_t *>(&Pod); + + for (size_t I = 0; I < sizeof(T); ++I) +#if defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + // Reverse byte order since reader is little-endian. + Buffer[I] = Src[sizeof(T) - 1 - I]; +#else + Buffer[I] = Src[I]; +#endif return Buffer + sizeof(T); } @@ -32,7 +39,6 @@ void RecordStackId(const uptr Key, UNUSED LockedMemInfoBlock *const &MIB, auto *StackIds = reinterpret_cast<Vector<u64> *>(Arg); StackIds->PushBack(Key); } -} // namespace u64 SegmentSizeBytes(ArrayRef<LoadedModule> Modules) { u64 NumSegmentsToRecord = 0; @@ -169,18 +175,21 @@ void SerializeMIBInfoToBuffer(MIBMapTy &MIBMap, const Vector<u64> &StackIds, // FIXME: We unnecessarily serialize the AccessHistogram pointer. Adding a // serialization schema will fix this issue. See also FIXME in // deserialization. - Ptr = WriteBytes((*h)->mib, Ptr); - for (u64 j = 0; j < (*h)->mib.AccessHistogramSize; ++j) { - u64 HistogramEntry = ((u64 *)((*h)->mib.AccessHistogram))[j]; + auto &MIB = (*h)->mib; + Ptr = WriteBytes(MIB, Ptr); + for (u64 j = 0; j < MIB.AccessHistogramSize; ++j) { + u16 HistogramEntry = + encodeHistogramCount(((u64 *)(MIB.AccessHistogram))[j]); Ptr = WriteBytes(HistogramEntry, Ptr); } - if ((*h)->mib.AccessHistogramSize > 0) { - InternalFree((void *)((*h)->mib.AccessHistogram)); + if (MIB.AccessHistogramSize > 0) { + InternalFree((void *)MIB.AccessHistogram); } } CHECK(ExpectedNumBytes >= static_cast<u64>(Ptr - Buffer) && "Expected num bytes != actual bytes written"); } +} // namespace // Format // ---------- Header @@ -249,7 +258,7 @@ u64 SerializeToRawProfile(MIBMapTy &MIBMap, ArrayRef<LoadedModule> Modules, }, reinterpret_cast<void *>(&TotalAccessHistogramEntries)); const u64 NumHistogramBytes = - RoundUpTo(TotalAccessHistogramEntries * sizeof(uint64_t), 8); + RoundUpTo(TotalAccessHistogramEntries * sizeof(uint16_t), 8); const u64 NumStackBytes = RoundUpTo(StackSizeBytes(StackIds), 8); @@ -285,5 +294,4 @@ u64 SerializeToRawProfile(MIBMapTy &MIBMap, ArrayRef<LoadedModule> Modules, return TotalSizeBytes; } - } // namespace __memprof diff --git a/compiler-rt/lib/memprof/tests/CMakeLists.txt b/compiler-rt/lib/memprof/tests/CMakeLists.txt index 0b5c302..1603d47 100644 --- a/compiler-rt/lib/memprof/tests/CMakeLists.txt +++ b/compiler-rt/lib/memprof/tests/CMakeLists.txt @@ -26,6 +26,7 @@ set(MEMPROF_SOURCES ../memprof_rawprofile.cpp) set(MEMPROF_UNITTESTS + histogram_encoding.cpp rawprofile.cpp driver.cpp) diff --git a/compiler-rt/lib/memprof/tests/histogram_encoding.cpp b/compiler-rt/lib/memprof/tests/histogram_encoding.cpp new file mode 100644 index 0000000..be20595 --- /dev/null +++ b/compiler-rt/lib/memprof/tests/histogram_encoding.cpp @@ -0,0 +1,35 @@ +#include <cstdint> +#include <vector> + +#include "profile/MemProfData.inc" +#include "gtest/gtest.h" + +namespace llvm { +namespace memprof { +namespace { +TEST(MemProf, F16EncodeDecode) { + const std::vector<uint64_t> TestCases = { + 0, 100, 4095, 4096, 5000, 8191, 65535, 1000000, 134213640, 200000000, + }; + + for (const uint64_t TestCase : TestCases) { + const uint16_t Encoded = encodeHistogramCount(TestCase); + const uint64_t Decoded = decodeHistogramCount(Encoded); + + const uint64_t MaxRepresentable = static_cast<uint64_t>(MaxMantissa) + << MaxExponent; + + if (TestCase >= MaxRepresentable) { + EXPECT_EQ(Decoded, MaxRepresentable); + } else if (TestCase <= MaxMantissa) { + EXPECT_EQ(Decoded, TestCase); + } else { + // The decoded value should be close to the original value. + // The error should be less than 1/1024 for larger numbers. + EXPECT_NEAR(Decoded, TestCase, static_cast<double>(TestCase) / 1024.0); + } + } +} +} // namespace +} // namespace memprof +} // namespace llvm diff --git a/compiler-rt/lib/scudo/standalone/allocator_config.def b/compiler-rt/lib/scudo/standalone/allocator_config.def index 84fcec0..7485308 100644 --- a/compiler-rt/lib/scudo/standalone/allocator_config.def +++ b/compiler-rt/lib/scudo/standalone/allocator_config.def @@ -54,6 +54,9 @@ BASE_REQUIRED_TEMPLATE_TYPE(SecondaryT) // Indicates possible support for Memory Tagging. BASE_OPTIONAL(const bool, MaySupportMemoryTagging, false) +// Disable the quarantine code. +BASE_OPTIONAL(const bool, QuarantineDisabled, false) + // PRIMARY_REQUIRED_TYPE(NAME) // // SizeClassMap to use with the Primary. diff --git a/compiler-rt/lib/scudo/standalone/allocator_config_wrapper.h b/compiler-rt/lib/scudo/standalone/allocator_config_wrapper.h index ac639ee..5bfa700 100644 --- a/compiler-rt/lib/scudo/standalone/allocator_config_wrapper.h +++ b/compiler-rt/lib/scudo/standalone/allocator_config_wrapper.h @@ -60,6 +60,10 @@ template <typename AllocatorConfig> struct PrimaryConfig { return BaseConfig<AllocatorConfig>::getMaySupportMemoryTagging(); } + static constexpr bool getQuarantineDisabled() { + return BaseConfig<AllocatorConfig>::getQuarantineDisabled(); + } + #define PRIMARY_REQUIRED_TYPE(NAME) \ using NAME = typename AllocatorConfig::Primary::NAME; @@ -92,6 +96,10 @@ template <typename AllocatorConfig> struct SecondaryConfig { return BaseConfig<AllocatorConfig>::getMaySupportMemoryTagging(); } + static constexpr bool getQuarantineDisabled() { + return BaseConfig<AllocatorConfig>::getQuarantineDisabled(); + } + #define SECONDARY_REQUIRED_TEMPLATE_TYPE(NAME) \ template <typename T> \ using NAME = typename AllocatorConfig::Secondary::template NAME<T>; @@ -111,6 +119,10 @@ template <typename AllocatorConfig> struct SecondaryConfig { return BaseConfig<AllocatorConfig>::getMaySupportMemoryTagging(); } + static constexpr bool getQuarantineDisabled() { + return BaseConfig<AllocatorConfig>::getQuarantineDisabled(); + } + #define SECONDARY_CACHE_OPTIONAL(TYPE, NAME, DEFAULT) \ OPTIONAL_TEMPLATE(TYPE, NAME, DEFAULT, Cache::NAME) \ static constexpr removeConst<TYPE>::type get##NAME() { \ diff --git a/compiler-rt/lib/scudo/standalone/combined.h b/compiler-rt/lib/scudo/standalone/combined.h index 87acdec..985bfb4 100644 --- a/compiler-rt/lib/scudo/standalone/combined.h +++ b/compiler-rt/lib/scudo/standalone/combined.h @@ -184,9 +184,11 @@ public: const s32 ReleaseToOsIntervalMs = getFlags()->release_to_os_interval_ms; Primary.init(ReleaseToOsIntervalMs); Secondary.init(&Stats, ReleaseToOsIntervalMs); - Quarantine.init( - static_cast<uptr>(getFlags()->quarantine_size_kb << 10), - static_cast<uptr>(getFlags()->thread_local_quarantine_size_kb << 10)); + if (!AllocatorConfig::getQuarantineDisabled()) { + Quarantine.init( + static_cast<uptr>(getFlags()->quarantine_size_kb << 10), + static_cast<uptr>(getFlags()->thread_local_quarantine_size_kb << 10)); + } } void enableRingBuffer() NO_THREAD_SAFETY_ANALYSIS { @@ -276,16 +278,20 @@ public: // the last two items). void commitBack(TSD<ThisT> *TSD) { TSD->assertLocked(/*BypassCheck=*/true); - Quarantine.drain(&TSD->getQuarantineCache(), - QuarantineCallback(*this, TSD->getSizeClassAllocator())); + if (!AllocatorConfig::getQuarantineDisabled()) { + Quarantine.drain(&TSD->getQuarantineCache(), + QuarantineCallback(*this, TSD->getSizeClassAllocator())); + } TSD->getSizeClassAllocator().destroy(&Stats); } void drainCache(TSD<ThisT> *TSD) { TSD->assertLocked(/*BypassCheck=*/true); - Quarantine.drainAndRecycle( - &TSD->getQuarantineCache(), - QuarantineCallback(*this, TSD->getSizeClassAllocator())); + if (!AllocatorConfig::getQuarantineDisabled()) { + Quarantine.drainAndRecycle( + &TSD->getQuarantineCache(), + QuarantineCallback(*this, TSD->getSizeClassAllocator())); + } TSD->getSizeClassAllocator().drain(); } void drainCaches() { TSDRegistry.drainCaches(this); } @@ -612,7 +618,8 @@ public: #endif TSDRegistry.disable(); Stats.disable(); - Quarantine.disable(); + if (!AllocatorConfig::getQuarantineDisabled()) + Quarantine.disable(); Primary.disable(); Secondary.disable(); disableRingBuffer(); @@ -623,7 +630,8 @@ public: enableRingBuffer(); Secondary.enable(); Primary.enable(); - Quarantine.enable(); + if (!AllocatorConfig::getQuarantineDisabled()) + Quarantine.enable(); Stats.enable(); TSDRegistry.enable(); #ifdef GWP_ASAN_HOOKS @@ -1252,7 +1260,8 @@ private: // If the quarantine is disabled, the actual size of a chunk is 0 or larger // than the maximum allowed, we return a chunk directly to the backend. // This purposefully underflows for Size == 0. - const bool BypassQuarantine = !Quarantine.getCacheSize() || + const bool BypassQuarantine = AllocatorConfig::getQuarantineDisabled() || + !Quarantine.getCacheSize() || ((Size - 1) >= QuarantineMaxChunkSize) || !Header->ClassId; if (BypassQuarantine) @@ -1642,7 +1651,8 @@ private: uptr getStats(ScopedString *Str) { Primary.getStats(Str); Secondary.getStats(Str); - Quarantine.getStats(Str); + if (!AllocatorConfig::getQuarantineDisabled()) + Quarantine.getStats(Str); TSDRegistry.getStats(Str); return Str->length(); } diff --git a/compiler-rt/lib/scudo/standalone/secondary.h b/compiler-rt/lib/scudo/standalone/secondary.h index f04c5b7..38c9a9e 100644 --- a/compiler-rt/lib/scudo/standalone/secondary.h +++ b/compiler-rt/lib/scudo/standalone/secondary.h @@ -312,7 +312,7 @@ public: break; } - if (Config::getQuarantineSize()) { + if (!Config::getQuarantineDisabled() && Config::getQuarantineSize()) { QuarantinePos = (QuarantinePos + 1) % Max(Config::getQuarantineSize(), 1u); if (!Quarantine[QuarantinePos].isValid()) { @@ -508,14 +508,16 @@ public: void disableMemoryTagging() EXCLUDES(Mutex) { ScopedLock L(Mutex); - for (u32 I = 0; I != Config::getQuarantineSize(); ++I) { - if (Quarantine[I].isValid()) { - MemMapT &MemMap = Quarantine[I].MemMap; - unmapCallBack(MemMap); - Quarantine[I].invalidate(); + if (!Config::getQuarantineDisabled()) { + for (u32 I = 0; I != Config::getQuarantineSize(); ++I) { + if (Quarantine[I].isValid()) { + MemMapT &MemMap = Quarantine[I].MemMap; + unmapCallBack(MemMap); + Quarantine[I].invalidate(); + } } + QuarantinePos = -1U; } - QuarantinePos = -1U; for (CachedBlock &Entry : LRUEntries) Entry.MemMap.setMemoryPermission(Entry.CommitBase, Entry.CommitSize, 0); @@ -575,8 +577,9 @@ private: if (!LRUEntries.size() || OldestTime == 0 || OldestTime > Time) return; OldestTime = 0; - for (uptr I = 0; I < Config::getQuarantineSize(); I++) - releaseIfOlderThan(Quarantine[I], Time); + if (!Config::getQuarantineDisabled()) + for (uptr I = 0; I < Config::getQuarantineSize(); I++) + releaseIfOlderThan(Quarantine[I], Time); for (uptr I = 0; I < Config::getEntriesArraySize(); I++) releaseIfOlderThan(Entries[I], Time); } diff --git a/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp b/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp index 7e8d5b4..1eff9eb 100644 --- a/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp +++ b/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp @@ -623,20 +623,20 @@ SCUDO_TYPED_TEST(ScudoCombinedDeathTest, DisableMemoryTagging) { SCUDO_TYPED_TEST(ScudoCombinedTest, Stats) { auto *Allocator = this->Allocator.get(); - scudo::uptr BufferSize = 8192; - std::vector<char> Buffer(BufferSize); - scudo::uptr ActualSize = Allocator->getStats(Buffer.data(), BufferSize); - while (ActualSize > BufferSize) { - BufferSize = ActualSize + 1024; - Buffer.resize(BufferSize); - ActualSize = Allocator->getStats(Buffer.data(), BufferSize); + std::string Stats(10000, '\0'); + scudo::uptr ActualSize = Allocator->getStats(Stats.data(), Stats.size()); + if (ActualSize > Stats.size()) { + Stats.resize(ActualSize); + ActualSize = Allocator->getStats(Stats.data(), Stats.size()); } - std::string Stats(Buffer.begin(), Buffer.end()); + EXPECT_GE(Stats.size(), ActualSize); + // Basic checks on the contents of the statistics output, which also allows us // to verify that we got it all. EXPECT_NE(Stats.find("Stats: SizeClassAllocator"), std::string::npos); EXPECT_NE(Stats.find("Stats: MapAllocator"), std::string::npos); - EXPECT_NE(Stats.find("Stats: Quarantine"), std::string::npos); + // Do not explicitly check for quarantine stats since a config can disable + // them. Other tests verify this (QuarantineEnabled/QuarantineDisabled). } SCUDO_TYPED_TEST_SKIP_THREAD_SAFETY(ScudoCombinedTest, Drain) { @@ -1076,3 +1076,88 @@ TEST(ScudoCombinedTest, BasicTrustyConfig) { #endif #endif + +struct TestQuarantineSizeClassConfig { + static const scudo::uptr NumBits = 1; + static const scudo::uptr MinSizeLog = 10; + static const scudo::uptr MidSizeLog = 10; + static const scudo::uptr MaxSizeLog = 13; + static const scudo::u16 MaxNumCachedHint = 8; + static const scudo::uptr MaxBytesCachedLog = 12; + static const scudo::uptr SizeDelta = 0; +}; + +struct TestQuarantineConfig { + static const bool MaySupportMemoryTagging = false; + + template <class A> using TSDRegistryT = scudo::TSDRegistrySharedT<A, 1U, 1U>; + + struct Primary { + // Tiny allocator, its Primary only serves chunks of four sizes. + using SizeClassMap = scudo::FixedSizeClassMap<DeathSizeClassConfig>; + static const scudo::uptr RegionSizeLog = DeathRegionSizeLog; + static const scudo::s32 MinReleaseToOsIntervalMs = INT32_MIN; + static const scudo::s32 MaxReleaseToOsIntervalMs = INT32_MAX; + typedef scudo::uptr CompactPtrT; + static const scudo::uptr CompactPtrScale = 0; + static const bool EnableRandomOffset = true; + static const scudo::uptr MapSizeIncrement = 1UL << 18; + static const scudo::uptr GroupSizeLog = 18; + }; + template <typename Config> + using PrimaryT = scudo::SizeClassAllocator64<Config>; + + struct Secondary { + template <typename Config> + using CacheT = scudo::MapAllocatorNoCache<Config>; + }; + + template <typename Config> using SecondaryT = scudo::MapAllocator<Config>; +}; + +// Verify that the quarantine exists by default. +TEST(ScudoCombinedTest, QuarantineEnabled) { + using AllocatorT = scudo::Allocator<TestQuarantineConfig>; + auto Allocator = std::unique_ptr<AllocatorT>(new AllocatorT()); + + const scudo::uptr Size = 1000U; + void *P = Allocator->allocate(Size, Origin); + EXPECT_NE(P, nullptr); + Allocator->deallocate(P, Origin); + + std::string Stats(10000, '\0'); + scudo::uptr ActualSize = Allocator->getStats(Stats.data(), Stats.size()); + if (ActualSize > Stats.size()) { + Stats.resize(ActualSize); + ActualSize = Allocator->getStats(Stats.data(), Stats.size()); + } + EXPECT_GE(Stats.size(), ActualSize); + + // Quarantine stats should be present. + EXPECT_NE(Stats.find("Stats: Quarantine"), std::string::npos); +} + +struct TestQuarantineDisabledConfig : TestQuarantineConfig { + static const bool QuarantineDisabled = true; +}; + +TEST(ScudoCombinedTest, QuarantineDisabled) { + using AllocatorT = scudo::Allocator<TestQuarantineDisabledConfig>; + auto Allocator = std::unique_ptr<AllocatorT>(new AllocatorT()); + + const scudo::uptr Size = 1000U; + void *P = Allocator->allocate(Size, Origin); + EXPECT_NE(P, nullptr); + Allocator->deallocate(P, Origin); + + std::string Stats(10000, '\0'); + scudo::uptr ActualSize = Allocator->getStats(Stats.data(), Stats.size()); + if (ActualSize > Stats.size()) { + Stats.resize(ActualSize); + ActualSize = Allocator->getStats(Stats.data(), Stats.size()); + } + EXPECT_GE(Stats.size(), ActualSize); + + // No quarantine stats should not be present. + EXPECT_EQ(Stats.find("Stats: Quarantine"), std::string::npos); +} diff --git a/compiler-rt/lib/tsan/rtl/tsan_report.h b/compiler-rt/lib/tsan/rtl/tsan_report.h index 8975540..53bb219 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_report.h +++ b/compiler-rt/lib/tsan/rtl/tsan_report.h @@ -12,6 +12,8 @@ #ifndef TSAN_REPORT_H #define TSAN_REPORT_H +#include "sanitizer_common/sanitizer_internal_defs.h" +#include "sanitizer_common/sanitizer_stacktrace.h" #include "sanitizer_common/sanitizer_symbolizer.h" #include "sanitizer_common/sanitizer_thread_registry.h" #include "sanitizer_common/sanitizer_vector.h" @@ -56,6 +58,7 @@ struct ReportMop { bool atomic; uptr external_tag; Vector<ReportMopMutex> mset; + StackTrace stack_trace; ReportStack *stack; ReportMop(); @@ -79,6 +82,7 @@ struct ReportLocation { int fd = 0; bool fd_closed = false; bool suppressable = false; + StackID stack_id = 0; ReportStack *stack = nullptr; }; @@ -89,15 +93,23 @@ struct ReportThread { ThreadType thread_type; char *name; Tid parent_tid; + StackID stack_id; ReportStack *stack; + bool suppressable; }; struct ReportMutex { int id; uptr addr; + StackID stack_id; ReportStack *stack; }; +struct AddedLocationAddr { + uptr addr; + usize locs_idx; +}; + class ReportDesc { public: ReportType typ; @@ -105,6 +117,7 @@ class ReportDesc { Vector<ReportStack*> stacks; Vector<ReportMop*> mops; Vector<ReportLocation*> locs; + Vector<AddedLocationAddr> added_location_addrs; Vector<ReportMutex*> mutexes; Vector<ReportThread*> threads; Vector<Tid> unique_tids; diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl.h b/compiler-rt/lib/tsan/rtl/tsan_rtl.h index 46276f2..0b6d5f0 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_rtl.h +++ b/compiler-rt/lib/tsan/rtl/tsan_rtl.h @@ -420,6 +420,7 @@ class ScopedReportBase { void AddSleep(StackID stack_id); void SetCount(int count); void SetSigNum(int sig); + void SymbolizeStackElems(void); const ReportDesc *GetReport() const; @@ -498,7 +499,7 @@ void ForkChildAfter(ThreadState *thr, uptr pc, bool start_thread); void ReportRace(ThreadState *thr, RawShadow *shadow_mem, Shadow cur, Shadow old, AccessType typ); -bool OutputReport(ThreadState *thr, const ScopedReport &srep); +bool OutputReport(ThreadState *thr, ScopedReport &srep); bool IsFiredSuppression(Context *ctx, ReportType type, StackTrace trace); bool IsExpectedReport(uptr addr, uptr size); diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl_access.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl_access.cpp index 487fa49..77cba5f 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_rtl_access.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_rtl_access.cpp @@ -684,7 +684,7 @@ void MemoryAccessRangeT(ThreadState* thr, uptr pc, uptr addr, uptr size) { DCHECK(IsAppMem(addr + size - 1)); } if (!IsShadowMem(shadow_mem)) { - Printf("Bad shadow start addr: %p (%p)\n", shadow_mem, (void*)addr); + Printf("Bad shadow start addr: %p (%p)\n", (void*)shadow_mem, (void*)addr); DCHECK(IsShadowMem(shadow_mem)); } @@ -693,12 +693,12 @@ void MemoryAccessRangeT(ThreadState* thr, uptr pc, uptr addr, uptr size) { RawShadow* shadow_mem_end = shadow_mem + rounded_size / kShadowCell * kShadowCnt; if (!IsShadowMem(shadow_mem_end - 1)) { - Printf("Bad shadow end addr: %p (%p)\n", shadow_mem_end - 1, + Printf("Bad shadow end addr: %p (%p)\n", (void*)(shadow_mem_end - 1), (void*)(addr + size - 1)); Printf( "Shadow start addr (ok): %p (%p); size: 0x%zx; rounded_size: 0x%zx; " "kShadowMultiplier: %zx\n", - shadow_mem, (void*)addr, size, rounded_size, kShadowMultiplier); + (void*)shadow_mem, (void*)addr, size, rounded_size, kShadowMultiplier); DCHECK(IsShadowMem(shadow_mem_end - 1)); } #endif diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp index 2a8aa19..2a2bf42 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp @@ -539,13 +539,15 @@ void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r) { for (int i = 0; i < r->n; i++) { for (int j = 0; j < (flags()->second_deadlock_stack ? 2 : 1); j++) { u32 stk = r->loop[i].stk[j]; + StackTrace stack; if (stk && stk != kInvalidStackID) { - rep.AddStack(StackDepotGet(stk), true); + stack = StackDepotGet(stk); } else { // Sometimes we fail to extract the stack trace (FIXME: investigate), // but we should still produce some stack trace in the report. - rep.AddStack(StackTrace(&dummy_pc, 1), true); + stack = StackTrace(&dummy_pc, 1); } + rep.AddStack(stack, true); } } OutputReport(thr, rep); diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp index 0820bf1..e6f0fda 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp @@ -11,6 +11,7 @@ //===----------------------------------------------------------------------===// #include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_internal_defs.h" #include "sanitizer_common/sanitizer_libc.h" #include "sanitizer_common/sanitizer_placement_new.h" #include "sanitizer_common/sanitizer_stackdepot.h" @@ -187,10 +188,8 @@ void ScopedReportBase::AddMemoryAccess(uptr addr, uptr external_tag, Shadow s, mop->size = size; mop->write = !(typ & kAccessRead); mop->atomic = typ & kAccessAtomic; - mop->stack = SymbolizeStack(stack); mop->external_tag = external_tag; - if (mop->stack) - mop->stack->suppressable = true; + mop->stack_trace = stack; for (uptr i = 0; i < mset->Size(); i++) { MutexSet::Desc d = mset->Get(i); int id = this->AddMutex(d.addr, d.stack_id); @@ -199,6 +198,56 @@ void ScopedReportBase::AddMemoryAccess(uptr addr, uptr external_tag, Shadow s, } } +void ScopedReportBase::SymbolizeStackElems() { + // symbolize memory ops + for (usize i = 0, size = rep_->mops.Size(); i < size; i++) { + ReportMop *mop = rep_->mops[i]; + mop->stack = SymbolizeStack(mop->stack_trace); + if (mop->stack) + mop->stack->suppressable = true; + } + + // symbolize locations + for (usize i = 0, size = rep_->locs.Size(); i < size; i++) { + // added locations have a NULL placeholder - don't dereference them + if (ReportLocation *loc = rep_->locs[i]) + loc->stack = SymbolizeStackId(loc->stack_id); + } + + // symbolize any added locations + for (usize i = 0, size = rep_->added_location_addrs.Size(); i < size; i++) { + AddedLocationAddr *added_loc = &rep_->added_location_addrs[i]; + if (ReportLocation *loc = SymbolizeData(added_loc->addr)) { + loc->suppressable = true; + rep_->locs[added_loc->locs_idx] = loc; + } + } + + // Filter out any added location placeholders that could not be symbolized + usize j = 0; + for (usize i = 0, size = rep_->locs.Size(); i < size; i++) { + if (rep_->locs[i] != nullptr) { + rep_->locs[j] = rep_->locs[i]; + j++; + } + } + rep_->locs.Resize(j); + + // symbolize threads + for (usize i = 0, size = rep_->threads.Size(); i < size; i++) { + ReportThread *rt = rep_->threads[i]; + rt->stack = SymbolizeStackId(rt->stack_id); + if (rt->stack) + rt->stack->suppressable = rt->suppressable; + } + + // symbolize mutexes + for (usize i = 0, size = rep_->mutexes.Size(); i < size; i++) { + ReportMutex *rm = rep_->mutexes[i]; + rm->stack = SymbolizeStackId(rm->stack_id); + } +} + void ScopedReportBase::AddUniqueTid(Tid unique_tid) { rep_->unique_tids.PushBack(unique_tid); } @@ -216,10 +265,8 @@ void ScopedReportBase::AddThread(const ThreadContext *tctx, bool suppressable) { rt->name = internal_strdup(tctx->name); rt->parent_tid = tctx->parent_tid; rt->thread_type = tctx->thread_type; - rt->stack = 0; - rt->stack = SymbolizeStackId(tctx->creation_stack_id); - if (rt->stack) - rt->stack->suppressable = suppressable; + rt->stack_id = tctx->creation_stack_id; + rt->suppressable = suppressable; } #if !SANITIZER_GO @@ -270,7 +317,7 @@ int ScopedReportBase::AddMutex(uptr addr, StackID creation_stack_id) { rep_->mutexes.PushBack(rm); rm->id = rep_->mutexes.Size() - 1; rm->addr = addr; - rm->stack = SymbolizeStackId(creation_stack_id); + rm->stack_id = creation_stack_id; return rm->id; } @@ -288,7 +335,7 @@ void ScopedReportBase::AddLocation(uptr addr, uptr size) { loc->fd_closed = closed; loc->fd = fd; loc->tid = creat_tid; - loc->stack = SymbolizeStackId(creat_stack); + loc->stack_id = creat_stack; rep_->locs.PushBack(loc); AddThread(creat_tid); return; @@ -310,7 +357,7 @@ void ScopedReportBase::AddLocation(uptr addr, uptr size) { loc->heap_chunk_size = b->siz; loc->external_tag = b->tag; loc->tid = b->tid; - loc->stack = SymbolizeStackId(b->stk); + loc->stack_id = b->stk; rep_->locs.PushBack(loc); AddThread(b->tid); return; @@ -324,11 +371,8 @@ void ScopedReportBase::AddLocation(uptr addr, uptr size) { AddThread(tctx); } #endif - if (ReportLocation *loc = SymbolizeData(addr)) { - loc->suppressable = true; - rep_->locs.PushBack(loc); - return; - } + rep_->added_location_addrs.PushBack({addr, rep_->locs.Size()}); + rep_->locs.PushBack(nullptr); } #if !SANITIZER_GO @@ -628,11 +672,12 @@ static bool HandleRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2]) { return false; } -bool OutputReport(ThreadState *thr, const ScopedReport &srep) { +bool OutputReport(ThreadState *thr, ScopedReport &srep) { // These should have been checked in ShouldReport. // It's too late to check them here, we have already taken locks. CHECK(flags()->report_bugs); CHECK(!thr->suppress_reports); + srep.SymbolizeStackElems(); atomic_store_relaxed(&ctx->last_symbolize_time_ns, NanoTime()); const ReportDesc *rep = srep.GetReport(); CHECK_EQ(thr->current_report, nullptr); |