diff options
author | Florian Mayer <fmayer@google.com> | 2024-02-16 19:19:12 -0800 |
---|---|---|
committer | Florian Mayer <fmayer@google.com> | 2024-02-16 22:59:53 -0800 |
commit | 3da01663313803530a936aee7ecc592534aeb380 (patch) | |
tree | b6e5d0a77b570bfe2d3d96c19eafed809f18a656 | |
parent | 7106389b25436538f0373484a8ab0428ea681410 (diff) | |
download | llvm-3da01663313803530a936aee7ecc592534aeb380.zip llvm-3da01663313803530a936aee7ecc592534aeb380.tar.gz llvm-3da01663313803530a936aee7ecc592534aeb380.tar.bz2 |
Reland^2 "[scudo] resize stack depot for allocation ring buffer"
Fix some warnings by matching types.
This reverts commit e1164d063558b1e89f20109d83c079caae1825d8.
-rw-r--r-- | compiler-rt/lib/scudo/standalone/combined.h | 121 | ||||
-rw-r--r-- | compiler-rt/lib/scudo/standalone/fuzz/get_error_info_fuzzer.cpp | 14 | ||||
-rw-r--r-- | compiler-rt/lib/scudo/standalone/platform.h | 10 | ||||
-rw-r--r-- | compiler-rt/lib/scudo/standalone/stack_depot.h | 92 | ||||
-rw-r--r-- | compiler-rt/lib/scudo/standalone/tests/combined_test.cpp | 38 | ||||
-rw-r--r-- | compiler-rt/lib/scudo/standalone/wrappers_c_bionic.cpp | 9 |
6 files changed, 215 insertions, 69 deletions
diff --git a/compiler-rt/lib/scudo/standalone/combined.h b/compiler-rt/lib/scudo/standalone/combined.h index 62473d1..080ba42 100644 --- a/compiler-rt/lib/scudo/standalone/combined.h +++ b/compiler-rt/lib/scudo/standalone/combined.h @@ -9,6 +9,7 @@ #ifndef SCUDO_COMBINED_H_ #define SCUDO_COMBINED_H_ +#include "atomic_helpers.h" #include "chunk.h" #include "common.h" #include "flags.h" @@ -282,7 +283,7 @@ public: return reinterpret_cast<void *>(addHeaderTag(reinterpret_cast<uptr>(Ptr))); } - NOINLINE u32 collectStackTrace() { + NOINLINE u32 collectStackTrace(UNUSED StackDepot *Depot) { #ifdef HAVE_ANDROID_UNSAFE_FRAME_POINTER_CHASE // Discard collectStackTrace() frame and allocator function frame. constexpr uptr DiscardFrames = 2; @@ -290,7 +291,7 @@ public: uptr Size = android_unsafe_frame_pointer_chase(Stack, MaxTraceSize + DiscardFrames); Size = Min<uptr>(Size, MaxTraceSize + DiscardFrames); - return Depot.insert(Stack + Min<uptr>(DiscardFrames, Size), Stack + Size); + return Depot->insert(Stack + Min<uptr>(DiscardFrames, Size), Stack + Size); #else return 0; #endif @@ -687,12 +688,12 @@ public: Quarantine.disable(); Primary.disable(); Secondary.disable(); - Depot.disable(); + Depot->disable(); } void enable() NO_THREAD_SAFETY_ANALYSIS { initThreadMaybe(); - Depot.enable(); + Depot->enable(); Secondary.enable(); Primary.enable(); Quarantine.enable(); @@ -915,8 +916,14 @@ public: Primary.Options.clear(OptionBit::AddLargeAllocationSlack); } - const char *getStackDepotAddress() const { - return reinterpret_cast<const char *>(&Depot); + const char *getStackDepotAddress() { + initThreadMaybe(); + return reinterpret_cast<char *>(Depot); + } + + uptr getStackDepotSize() { + initThreadMaybe(); + return StackDepotSize; } const char *getRegionInfoArrayAddress() const { @@ -945,21 +952,35 @@ public: if (!Depot->find(Hash, &RingPos, &Size)) return; for (unsigned I = 0; I != Size && I != MaxTraceSize; ++I) - Trace[I] = static_cast<uintptr_t>((*Depot)[RingPos + I]); + Trace[I] = static_cast<uintptr_t>(Depot->at(RingPos + I)); } static void getErrorInfo(struct scudo_error_info *ErrorInfo, uintptr_t FaultAddr, const char *DepotPtr, - const char *RegionInfoPtr, const char *RingBufferPtr, - size_t RingBufferSize, const char *Memory, - const char *MemoryTags, uintptr_t MemoryAddr, - size_t MemorySize) { + size_t DepotSize, const char *RegionInfoPtr, + const char *RingBufferPtr, size_t RingBufferSize, + const char *Memory, const char *MemoryTags, + uintptr_t MemoryAddr, size_t MemorySize) { + // N.B. we need to support corrupted data in any of the buffers here. We get + // this information from an external process (the crashing process) that + // should not be able to crash the crash dumper (crash_dump on Android). + // See also the get_error_info_fuzzer. *ErrorInfo = {}; if (!allocatorSupportsMemoryTagging<Config>() || MemoryAddr + MemorySize < MemoryAddr) return; - auto *Depot = reinterpret_cast<const StackDepot *>(DepotPtr); + const StackDepot *Depot = nullptr; + if (DepotPtr) { + // check for corrupted StackDepot. First we need to check whether we can + // read the metadata, then whether the metadata matches the size. + if (DepotSize < sizeof(*Depot)) + return; + Depot = reinterpret_cast<const StackDepot *>(DepotPtr); + if (!Depot->isValid(DepotSize)) + return; + } + size_t NextErrorReport = 0; // Check for OOB in the current block and the two surrounding blocks. Beyond @@ -1025,7 +1046,9 @@ private: uptr GuardedAllocSlotSize = 0; #endif // GWP_ASAN_HOOKS - StackDepot Depot; + StackDepot *Depot = nullptr; + uptr StackDepotSize = 0; + MemMapT RawStackDepotMap; struct AllocationRingBuffer { struct Entry { @@ -1234,11 +1257,18 @@ private: storeEndMarker(RoundNewPtr, NewSize, BlockEnd); } - void storePrimaryAllocationStackMaybe(const Options &Options, void *Ptr) { + StackDepot *getDepotIfEnabled(const Options &Options) { if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks))) + return nullptr; + return Depot; + } + + void storePrimaryAllocationStackMaybe(const Options &Options, void *Ptr) { + auto *Depot = getDepotIfEnabled(Options); + if (!Depot) return; auto *Ptr32 = reinterpret_cast<u32 *>(Ptr); - Ptr32[MemTagAllocationTraceIndex] = collectStackTrace(); + Ptr32[MemTagAllocationTraceIndex] = collectStackTrace(Depot); Ptr32[MemTagAllocationTidIndex] = getThreadID(); } @@ -1268,10 +1298,10 @@ private: void storeSecondaryAllocationStackMaybe(const Options &Options, void *Ptr, uptr Size) { - if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks))) + auto *Depot = getDepotIfEnabled(Options); + if (!Depot) return; - - u32 Trace = collectStackTrace(); + u32 Trace = collectStackTrace(Depot); u32 Tid = getThreadID(); auto *Ptr32 = reinterpret_cast<u32 *>(Ptr); @@ -1283,14 +1313,14 @@ private: void storeDeallocationStackMaybe(const Options &Options, void *Ptr, u8 PrevTag, uptr Size) { - if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks))) + auto *Depot = getDepotIfEnabled(Options); + if (!Depot) return; - auto *Ptr32 = reinterpret_cast<u32 *>(Ptr); u32 AllocationTrace = Ptr32[MemTagAllocationTraceIndex]; u32 AllocationTid = Ptr32[MemTagAllocationTidIndex]; - u32 DeallocationTrace = collectStackTrace(); + u32 DeallocationTrace = collectStackTrace(Depot); u32 DeallocationTid = getThreadID(); storeRingBufferEntry(addFixedTag(untagPointer(Ptr), PrevTag), @@ -1369,8 +1399,10 @@ private: UntaggedFaultAddr < ChunkAddr ? BUFFER_UNDERFLOW : BUFFER_OVERFLOW; R->allocation_address = ChunkAddr; R->allocation_size = Header.SizeOrUnusedBytes; - collectTraceMaybe(Depot, R->allocation_trace, - Data[MemTagAllocationTraceIndex]); + if (Depot) { + collectTraceMaybe(Depot, R->allocation_trace, + Data[MemTagAllocationTraceIndex]); + } R->allocation_tid = Data[MemTagAllocationTidIndex]; return NextErrorReport == NumErrorReports; }; @@ -1393,7 +1425,7 @@ private: auto *RingBuffer = reinterpret_cast<const AllocationRingBuffer *>(RingBufferPtr); size_t RingBufferElements = ringBufferElementsFromBytes(RingBufferSize); - if (!RingBuffer || RingBufferElements == 0) + if (!RingBuffer || RingBufferElements == 0 || !Depot) return; uptr Pos = atomic_load_relaxed(&RingBuffer->Pos); @@ -1483,6 +1515,43 @@ private: return; u32 AllocationRingBufferSize = static_cast<u32>(getFlags()->allocation_ring_buffer_size); + + // We store alloc and free stacks for each entry. + constexpr u32 kStacksPerRingBufferEntry = 2; + constexpr u32 kMaxU32Pow2 = ~(UINT32_MAX >> 1); + static_assert(isPowerOfTwo(kMaxU32Pow2)); + constexpr u32 kFramesPerStack = 8; + static_assert(isPowerOfTwo(kFramesPerStack)); + + // We need StackDepot to be aligned to 8-bytes so the ring we store after + // is correctly assigned. + static_assert(sizeof(StackDepot) % alignof(atomic_u64) == 0); + + // Make sure the maximum sized StackDepot fits withint a uintptr_t to + // simplify the overflow checking. + static_assert(sizeof(StackDepot) + UINT32_MAX * sizeof(atomic_u64) * + UINT32_MAX * sizeof(atomic_u32) < + UINTPTR_MAX); + + if (AllocationRingBufferSize > kMaxU32Pow2 / kStacksPerRingBufferEntry) + return; + u32 TabSize = static_cast<u32>(roundUpPowerOfTwo(kStacksPerRingBufferEntry * + AllocationRingBufferSize)); + if (TabSize > UINT32_MAX / kFramesPerStack) + return; + u32 RingSize = static_cast<u32>(TabSize * kFramesPerStack); + DCHECK(isPowerOfTwo(RingSize)); + + StackDepotSize = sizeof(StackDepot) + sizeof(atomic_u64) * RingSize + + sizeof(atomic_u32) * TabSize; + MemMapT DepotMap; + DepotMap.map( + /*Addr=*/0U, roundUp(StackDepotSize, getPageSizeCached()), + "scudo:stack_depot"); + Depot = reinterpret_cast<StackDepot *>(DepotMap.getBase()); + Depot->init(RingSize, TabSize); + RawStackDepotMap = DepotMap; + MemMapT MemMap; MemMap.map( /*Addr=*/0U, @@ -1505,6 +1574,10 @@ private: RawRingBufferMap.getCapacity()); } RawRingBuffer = nullptr; + if (Depot) { + RawStackDepotMap.unmap(RawStackDepotMap.getBase(), + RawStackDepotMap.getCapacity()); + } } static constexpr size_t ringBufferSizeInBytes(u32 RingBufferElements) { diff --git a/compiler-rt/lib/scudo/standalone/fuzz/get_error_info_fuzzer.cpp b/compiler-rt/lib/scudo/standalone/fuzz/get_error_info_fuzzer.cpp index 5b01ebe..2cef1c4 100644 --- a/compiler-rt/lib/scudo/standalone/fuzz/get_error_info_fuzzer.cpp +++ b/compiler-rt/lib/scudo/standalone/fuzz/get_error_info_fuzzer.cpp @@ -9,6 +9,7 @@ #define SCUDO_FUZZ #include "allocator_config.h" #include "combined.h" +#include "common.h" #include <fuzzer/FuzzedDataProvider.h> @@ -31,11 +32,6 @@ extern "C" int LLVMFuzzerTestOneInput(uint8_t *Data, size_t Size) { std::string StackDepotBytes = FDP.ConsumeRandomLengthString(FDP.remaining_bytes()); - std::vector<char> StackDepot(sizeof(scudo::StackDepot), 0); - for (size_t i = 0; i < StackDepotBytes.length() && i < StackDepot.size(); - ++i) { - StackDepot[i] = StackDepotBytes[i]; - } std::string RegionInfoBytes = FDP.ConsumeRandomLengthString(FDP.remaining_bytes()); @@ -48,9 +44,9 @@ extern "C" int LLVMFuzzerTestOneInput(uint8_t *Data, size_t Size) { std::string RingBufferBytes = FDP.ConsumeRemainingBytesAsString(); scudo_error_info ErrorInfo; - AllocatorT::getErrorInfo(&ErrorInfo, FaultAddr, StackDepot.data(), - RegionInfo.data(), RingBufferBytes.data(), - RingBufferBytes.size(), Memory, MemoryTags, - MemoryAddr, MemorySize); + AllocatorT::getErrorInfo(&ErrorInfo, FaultAddr, StackDepotBytes.data(), + StackDepotBytes.size(), RegionInfo.data(), + RingBufferBytes.data(), RingBufferBytes.size(), + Memory, MemoryTags, MemoryAddr, MemorySize); return 0; } diff --git a/compiler-rt/lib/scudo/standalone/platform.h b/compiler-rt/lib/scudo/standalone/platform.h index b71a86b..5af1275 100644 --- a/compiler-rt/lib/scudo/standalone/platform.h +++ b/compiler-rt/lib/scudo/standalone/platform.h @@ -63,16 +63,6 @@ #define SCUDO_CAN_USE_MTE (SCUDO_LINUX || SCUDO_TRUSTY) #endif -// Use smaller table sizes for fuzzing in order to reduce input size. -// Trusty just has less available memory. -#ifndef SCUDO_SMALL_STACK_DEPOT -#if defined(SCUDO_FUZZ) || SCUDO_TRUSTY -#define SCUDO_SMALL_STACK_DEPOT 1 -#else -#define SCUDO_SMALL_STACK_DEPOT 0 -#endif -#endif - #ifndef SCUDO_ENABLE_HOOKS #define SCUDO_ENABLE_HOOKS 0 #endif diff --git a/compiler-rt/lib/scudo/standalone/stack_depot.h b/compiler-rt/lib/scudo/standalone/stack_depot.h index e887d1b..620137e 100644 --- a/compiler-rt/lib/scudo/standalone/stack_depot.h +++ b/compiler-rt/lib/scudo/standalone/stack_depot.h @@ -10,6 +10,7 @@ #define SCUDO_STACK_DEPOT_H_ #include "atomic_helpers.h" +#include "common.h" #include "mutex.h" namespace scudo { @@ -38,7 +39,7 @@ public: } }; -class StackDepot { +class alignas(atomic_u64) StackDepot { HybridMutex RingEndMu; u32 RingEnd = 0; @@ -62,28 +63,77 @@ class StackDepot { // This is achieved by re-checking the hash of the stack trace before // returning the trace. -#if SCUDO_SMALL_STACK_DEPOT - static const uptr TabBits = 4; -#else - static const uptr TabBits = 16; -#endif - static const uptr TabSize = 1 << TabBits; - static const uptr TabMask = TabSize - 1; - atomic_u32 Tab[TabSize] = {}; - -#if SCUDO_SMALL_STACK_DEPOT - static const uptr RingBits = 4; -#else - static const uptr RingBits = 19; -#endif - static const uptr RingSize = 1 << RingBits; - static const uptr RingMask = RingSize - 1; - atomic_u64 Ring[RingSize] = {}; + u32 RingSize = 0; + u32 RingMask = 0; + u32 TabMask = 0; + // This is immediately followed by RingSize atomic_u64 and + // (TabMask + 1) atomic_u32. + + atomic_u64 *getRing() { + return reinterpret_cast<atomic_u64 *>(reinterpret_cast<char *>(this) + + sizeof(StackDepot)); + } + + atomic_u32 *getTab() { + return reinterpret_cast<atomic_u32 *>(reinterpret_cast<char *>(this) + + sizeof(StackDepot) + + sizeof(atomic_u64) * RingSize); + } + + const atomic_u64 *getRing() const { + return reinterpret_cast<const atomic_u64 *>( + reinterpret_cast<const char *>(this) + sizeof(StackDepot)); + } + + const atomic_u32 *getTab() const { + return reinterpret_cast<const atomic_u32 *>( + reinterpret_cast<const char *>(this) + sizeof(StackDepot) + + sizeof(atomic_u64) * RingSize); + } public: + void init(u32 RingSz, u32 TabSz) { + DCHECK(isPowerOfTwo(RingSz)); + DCHECK(isPowerOfTwo(TabSz)); + RingSize = RingSz; + RingMask = RingSz - 1; + TabMask = TabSz - 1; + } + + // Ensure that RingSize, RingMask and TabMask are set up in a way that + // all accesses are within range of BufSize. + bool isValid(uptr BufSize) const { + if (RingSize == 0 || !isPowerOfTwo(RingSize)) + return false; + uptr RingBytes = sizeof(atomic_u64) * RingSize; + if (RingMask + 1 != RingSize) + return false; + + if (TabMask == 0) + return false; + uptr TabSize = TabMask + 1; + if (!isPowerOfTwo(TabSize)) + return false; + uptr TabBytes = sizeof(atomic_u32) * TabSize; + + // Subtract and detect underflow. + if (BufSize < sizeof(StackDepot)) + return false; + BufSize -= sizeof(StackDepot); + if (BufSize < TabBytes) + return false; + BufSize -= TabBytes; + if (BufSize < RingBytes) + return false; + return BufSize == RingBytes; + } + // Insert hash of the stack trace [Begin, End) into the stack depot, and // return the hash. u32 insert(uptr *Begin, uptr *End) { + auto *Tab = getTab(); + auto *Ring = getRing(); + MurMur2HashBuilder B; for (uptr *I = Begin; I != End; ++I) B.add(u32(*I) >> 2); @@ -112,6 +162,9 @@ public: // accessed via operator[] passing indexes between *RingPosPtr and // *RingPosPtr + *SizePtr. bool find(u32 Hash, uptr *RingPosPtr, uptr *SizePtr) const { + auto *Tab = getTab(); + auto *Ring = getRing(); + u32 Pos = Hash & TabMask; u32 RingPos = atomic_load_relaxed(&Tab[Pos]); if (RingPos >= RingSize) @@ -133,7 +186,8 @@ public: return B.get() == Hash; } - u64 operator[](uptr RingPos) const { + u64 at(uptr RingPos) const { + auto *Ring = getRing(); return atomic_load_relaxed(&Ring[RingPos & RingMask]); } diff --git a/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp b/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp index 13f5ac1..434429b 100644 --- a/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp +++ b/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp @@ -7,6 +7,7 @@ //===----------------------------------------------------------------------===// #include "memtag.h" +#include "stack_depot.h" #include "tests/scudo_unit_test.h" #include "allocator_config.h" @@ -870,8 +871,8 @@ SCUDO_TYPED_TEST(ScudoCombinedTest, ReallocateInPlaceStress) { SCUDO_TYPED_TEST(ScudoCombinedTest, RingBufferSize) { auto *Allocator = this->Allocator.get(); auto Size = Allocator->getRingBufferSize(); - if (Size > 0) - EXPECT_EQ(Allocator->getRingBufferAddress()[Size - 1], '\0'); + ASSERT_GT(Size, 0u); + EXPECT_EQ(Allocator->getRingBufferAddress()[Size - 1], '\0'); } SCUDO_TYPED_TEST(ScudoCombinedTest, RingBufferAddress) { @@ -881,6 +882,39 @@ SCUDO_TYPED_TEST(ScudoCombinedTest, RingBufferAddress) { EXPECT_EQ(Addr, Allocator->getRingBufferAddress()); } +SCUDO_TYPED_TEST(ScudoCombinedTest, StackDepotSize) { + auto *Allocator = this->Allocator.get(); + auto Size = Allocator->getStackDepotSize(); + ASSERT_GT(Size, 0u); + EXPECT_EQ(Allocator->getStackDepotAddress()[Size - 1], '\0'); +} + +SCUDO_TYPED_TEST(ScudoCombinedTest, StackDepotAddress) { + auto *Allocator = this->Allocator.get(); + auto *Addr = Allocator->getStackDepotAddress(); + EXPECT_NE(Addr, nullptr); + EXPECT_EQ(Addr, Allocator->getStackDepotAddress()); +} + +SCUDO_TYPED_TEST(ScudoCombinedTest, StackDepot) { + alignas(scudo::StackDepot) char Buf[sizeof(scudo::StackDepot) + + 1024 * sizeof(scudo::atomic_u64) + + 1024 * sizeof(scudo::atomic_u32)] = {}; + auto *Depot = reinterpret_cast<scudo::StackDepot *>(Buf); + Depot->init(1024, 1024); + ASSERT_TRUE(Depot->isValid(sizeof(Buf))); + ASSERT_FALSE(Depot->isValid(sizeof(Buf) - 1)); + scudo::uptr Stack[] = {1, 2, 3}; + scudo::u32 Elem = Depot->insert(&Stack[0], &Stack[3]); + scudo::uptr RingPosPtr = 0; + scudo::uptr SizePtr = 0; + ASSERT_TRUE(Depot->find(Elem, &RingPosPtr, &SizePtr)); + ASSERT_EQ(SizePtr, 3); + EXPECT_EQ(Depot->at(RingPosPtr), 1); + EXPECT_EQ(Depot->at(RingPosPtr + 1), 2); + EXPECT_EQ(Depot->at(RingPosPtr + 2), 3); +} + #if SCUDO_CAN_USE_PRIMARY64 #if SCUDO_TRUSTY diff --git a/compiler-rt/lib/scudo/standalone/wrappers_c_bionic.cpp b/compiler-rt/lib/scudo/standalone/wrappers_c_bionic.cpp index 21694c3..e9d8c1e 100644 --- a/compiler-rt/lib/scudo/standalone/wrappers_c_bionic.cpp +++ b/compiler-rt/lib/scudo/standalone/wrappers_c_bionic.cpp @@ -43,10 +43,9 @@ INTERFACE void __scudo_get_error_info( const char *stack_depot, size_t stack_depot_size, const char *region_info, const char *ring_buffer, size_t ring_buffer_size, const char *memory, const char *memory_tags, uintptr_t memory_addr, size_t memory_size) { - (void)(stack_depot_size); - Allocator.getErrorInfo(error_info, fault_addr, stack_depot, region_info, - ring_buffer, ring_buffer_size, memory, memory_tags, - memory_addr, memory_size); + Allocator.getErrorInfo(error_info, fault_addr, stack_depot, stack_depot_size, + region_info, ring_buffer, ring_buffer_size, memory, + memory_tags, memory_addr, memory_size); } INTERFACE const char *__scudo_get_stack_depot_addr() { @@ -54,7 +53,7 @@ INTERFACE const char *__scudo_get_stack_depot_addr() { } INTERFACE size_t __scudo_get_stack_depot_size() { - return sizeof(scudo::StackDepot); + return Allocator.getStackDepotSize(); } INTERFACE const char *__scudo_get_region_info_addr() { |