aboutsummaryrefslogtreecommitdiff
path: root/compiler-rt/lib/scudo
diff options
context:
space:
mode:
Diffstat (limited to 'compiler-rt/lib/scudo')
-rw-r--r--compiler-rt/lib/scudo/standalone/allocator_config.def4
-rw-r--r--compiler-rt/lib/scudo/standalone/combined.h98
-rw-r--r--compiler-rt/lib/scudo/standalone/mem_map_linux.cpp7
-rw-r--r--compiler-rt/lib/scudo/standalone/memtag.h6
-rw-r--r--compiler-rt/lib/scudo/standalone/primary64.h2
-rw-r--r--compiler-rt/lib/scudo/standalone/secondary.h69
-rw-r--r--compiler-rt/lib/scudo/standalone/tests/combined_test.cpp336
-rw-r--r--compiler-rt/lib/scudo/standalone/tests/map_test.cpp46
-rw-r--r--compiler-rt/lib/scudo/standalone/tests/memtag_test.cpp1
-rw-r--r--compiler-rt/lib/scudo/standalone/tests/primary_test.cpp36
-rw-r--r--compiler-rt/lib/scudo/standalone/tests/quarantine_test.cpp16
-rw-r--r--compiler-rt/lib/scudo/standalone/tests/secondary_test.cpp89
-rw-r--r--compiler-rt/lib/scudo/standalone/tests/size_class_map_test.cpp4
-rw-r--r--compiler-rt/lib/scudo/standalone/tests/wrappers_c_test.cpp9
-rw-r--r--compiler-rt/lib/scudo/standalone/tsd_exclusive.h19
-rw-r--r--compiler-rt/lib/scudo/standalone/tsd_shared.h48
16 files changed, 677 insertions, 113 deletions
diff --git a/compiler-rt/lib/scudo/standalone/allocator_config.def b/compiler-rt/lib/scudo/standalone/allocator_config.def
index 7485308..0aea7b8 100644
--- a/compiler-rt/lib/scudo/standalone/allocator_config.def
+++ b/compiler-rt/lib/scudo/standalone/allocator_config.def
@@ -57,6 +57,10 @@ BASE_OPTIONAL(const bool, MaySupportMemoryTagging, false)
// Disable the quarantine code.
BASE_OPTIONAL(const bool, QuarantineDisabled, false)
+// If set to true, malloc_usable_size returns the exact size of the allocation.
+// If set to false, return the total available size in the allocation.
+BASE_OPTIONAL(const bool, ExactUsableSize, true)
+
// PRIMARY_REQUIRED_TYPE(NAME)
//
// SizeClassMap to use with the Primary.
diff --git a/compiler-rt/lib/scudo/standalone/combined.h b/compiler-rt/lib/scudo/standalone/combined.h
index 329ec45..5108f02 100644
--- a/compiler-rt/lib/scudo/standalone/combined.h
+++ b/compiler-rt/lib/scudo/standalone/combined.h
@@ -171,8 +171,7 @@ public:
Primary.Options.set(OptionBit::DeallocTypeMismatch);
if (getFlags()->delete_size_mismatch)
Primary.Options.set(OptionBit::DeleteSizeMismatch);
- if (allocatorSupportsMemoryTagging<AllocatorConfig>() &&
- systemSupportsMemoryTagging())
+ if (systemSupportsMemoryTagging())
Primary.Options.set(OptionBit::UseMemoryTagging);
QuarantineMaxChunkSize =
@@ -689,16 +688,15 @@ public:
Base = untagPointer(Base);
const uptr From = Base;
const uptr To = Base + Size;
- bool MayHaveTaggedPrimary =
- allocatorSupportsMemoryTagging<AllocatorConfig>() &&
- systemSupportsMemoryTagging();
+ const Options Options = Primary.Options.load();
+ bool MayHaveTaggedPrimary = useMemoryTagging<AllocatorConfig>(Options);
auto Lambda = [this, From, To, MayHaveTaggedPrimary, Callback,
Arg](uptr Block) {
if (Block < From || Block >= To)
return;
uptr Chunk;
Chunk::UnpackedHeader Header;
- if (MayHaveTaggedPrimary) {
+ if (UNLIKELY(MayHaveTaggedPrimary)) {
// A chunk header can either have a zero tag (tagged primary) or the
// header tag (secondary, or untagged primary). We don't know which so
// try both.
@@ -706,19 +704,26 @@ public:
if (!getChunkFromBlock(Block, &Chunk, &Header) &&
!getChunkFromBlock(addHeaderTag(Block), &Chunk, &Header))
return;
- } else {
- if (!getChunkFromBlock(addHeaderTag(Block), &Chunk, &Header))
- return;
+ } else if (!getChunkFromBlock(addHeaderTag(Block), &Chunk, &Header)) {
+ return;
}
- if (Header.State == Chunk::State::Allocated) {
- uptr TaggedChunk = Chunk;
- if (allocatorSupportsMemoryTagging<AllocatorConfig>())
- TaggedChunk = untagPointer(TaggedChunk);
- if (useMemoryTagging<AllocatorConfig>(Primary.Options.load()))
- TaggedChunk = loadTag(Chunk);
- Callback(TaggedChunk, getSize(reinterpret_cast<void *>(Chunk), &Header),
- Arg);
+
+ if (Header.State != Chunk::State::Allocated)
+ return;
+
+ uptr TaggedChunk = Chunk;
+ if (allocatorSupportsMemoryTagging<AllocatorConfig>())
+ TaggedChunk = untagPointer(TaggedChunk);
+ uptr Size;
+ if (UNLIKELY(useMemoryTagging<AllocatorConfig>(Primary.Options.load()))) {
+ TaggedChunk = loadTag(Chunk);
+ Size = getSize(reinterpret_cast<void *>(Chunk), &Header);
+ } else if (AllocatorConfig::getExactUsableSize()) {
+ Size = getSize(reinterpret_cast<void *>(Chunk), &Header);
+ } else {
+ Size = getUsableSize(reinterpret_cast<void *>(Chunk), &Header);
}
+ Callback(TaggedChunk, Size, Arg);
};
Primary.iterateOverBlocks(Lambda);
Secondary.iterateOverBlocks(Lambda);
@@ -759,16 +764,50 @@ public:
return false;
}
- // Return the usable size for a given chunk. Technically we lie, as we just
- // report the actual size of a chunk. This is done to counteract code actively
- // writing past the end of a chunk (like sqlite3) when the usable size allows
- // for it, which then forces realloc to copy the usable size of a chunk as
- // opposed to its actual size.
+ ALWAYS_INLINE uptr getUsableSize(const void *Ptr,
+ Chunk::UnpackedHeader *Header) {
+ void *BlockBegin = getBlockBegin(Ptr, Header);
+ if (LIKELY(Header->ClassId)) {
+ return SizeClassMap::getSizeByClassId(Header->ClassId) -
+ (reinterpret_cast<uptr>(Ptr) - reinterpret_cast<uptr>(BlockBegin));
+ }
+
+ uptr UntaggedPtr = reinterpret_cast<uptr>(Ptr);
+ if (allocatorSupportsMemoryTagging<AllocatorConfig>()) {
+ UntaggedPtr = untagPointer(UntaggedPtr);
+ BlockBegin = untagPointer(BlockBegin);
+ }
+ return SecondaryT::getBlockEnd(BlockBegin) - UntaggedPtr;
+ }
+
+ // Return the usable size for a given chunk. If MTE is enabled or if the
+ // ExactUsableSize config parameter is true, we report the exact size of
+ // the original allocation size. Otherwise, we will return the total
+ // actual usable size.
uptr getUsableSize(const void *Ptr) {
if (UNLIKELY(!Ptr))
return 0;
- return getAllocSize(Ptr);
+ if (AllocatorConfig::getExactUsableSize() ||
+ UNLIKELY(useMemoryTagging<AllocatorConfig>(Primary.Options.load())))
+ return getAllocSize(Ptr);
+
+ initThreadMaybe();
+
+#ifdef GWP_ASAN_HOOKS
+ if (UNLIKELY(GuardedAlloc.pointerIsMine(Ptr)))
+ return GuardedAlloc.getSize(Ptr);
+#endif // GWP_ASAN_HOOKS
+
+ Ptr = getHeaderTaggedPointer(const_cast<void *>(Ptr));
+ Chunk::UnpackedHeader Header;
+ Chunk::loadHeader(Cookie, Ptr, &Header);
+
+ // Getting the alloc size of a chunk only makes sense if it's allocated.
+ if (UNLIKELY(Header.State != Chunk::State::Allocated))
+ reportInvalidChunkState(AllocatorAction::Sizing, Ptr);
+
+ return getUsableSize(Ptr, &Header);
}
uptr getAllocSize(const void *Ptr) {
@@ -951,6 +990,19 @@ public:
MemorySize, 2, 16);
}
+ uptr getBlockBeginTestOnly(const void *Ptr) {
+ Chunk::UnpackedHeader Header;
+ Chunk::loadHeader(Cookie, Ptr, &Header);
+ DCHECK(Header.State == Chunk::State::Allocated);
+
+ if (allocatorSupportsMemoryTagging<AllocatorConfig>())
+ Ptr = untagPointer(const_cast<void *>(Ptr));
+ void *Begin = getBlockBegin(Ptr, &Header);
+ if (allocatorSupportsMemoryTagging<AllocatorConfig>())
+ Begin = untagPointer(Begin);
+ return reinterpret_cast<uptr>(Begin);
+ }
+
private:
typedef typename PrimaryT::SizeClassMap SizeClassMap;
diff --git a/compiler-rt/lib/scudo/standalone/mem_map_linux.cpp b/compiler-rt/lib/scudo/standalone/mem_map_linux.cpp
index 783c4f0..df3e54ca 100644
--- a/compiler-rt/lib/scudo/standalone/mem_map_linux.cpp
+++ b/compiler-rt/lib/scudo/standalone/mem_map_linux.cpp
@@ -122,7 +122,12 @@ void MemMapLinux::setMemoryPermissionImpl(uptr Addr, uptr Size, uptr Flags) {
void MemMapLinux::releaseAndZeroPagesToOSImpl(uptr From, uptr Size) {
void *Addr = reinterpret_cast<void *>(From);
- while (madvise(Addr, Size, MADV_DONTNEED) == -1 && errno == EAGAIN) {
+ int rc;
+ while ((rc = madvise(Addr, Size, MADV_DONTNEED)) == -1 && errno == EAGAIN) {
+ }
+ if (rc == -1) {
+ // If we can't madvies the memory, then we still need to zero it.
+ memset(Addr, 0, Size);
}
}
diff --git a/compiler-rt/lib/scudo/standalone/memtag.h b/compiler-rt/lib/scudo/standalone/memtag.h
index 83ebe67..073e72c 100644
--- a/compiler-rt/lib/scudo/standalone/memtag.h
+++ b/compiler-rt/lib/scudo/standalone/memtag.h
@@ -108,7 +108,7 @@ inline void enableSystemMemoryTaggingTestOnly() {
#else // !SCUDO_CAN_USE_MTE
-inline bool systemSupportsMemoryTagging() { return false; }
+inline constexpr bool systemSupportsMemoryTagging() { return false; }
inline NORETURN bool systemDetectsMemoryTagFaultsTestOnly() {
UNREACHABLE("memory tagging not supported");
@@ -261,9 +261,7 @@ inline uptr loadTag(uptr Ptr) {
#else
-inline NORETURN bool systemSupportsMemoryTagging() {
- UNREACHABLE("memory tagging not supported");
-}
+inline constexpr bool systemSupportsMemoryTagging() { return false; }
inline NORETURN bool systemDetectsMemoryTagFaultsTestOnly() {
UNREACHABLE("memory tagging not supported");
diff --git a/compiler-rt/lib/scudo/standalone/primary64.h b/compiler-rt/lib/scudo/standalone/primary64.h
index 747b1a2..c2401c8 100644
--- a/compiler-rt/lib/scudo/standalone/primary64.h
+++ b/compiler-rt/lib/scudo/standalone/primary64.h
@@ -1394,7 +1394,7 @@ uptr SizeClassAllocator64<Config>::releaseToOSMaybe(RegionInfo *Region,
Region->FreeListInfo.PushedBlocks) *
BlockSize;
if (UNLIKELY(BytesInFreeList == 0))
- return false;
+ return 0;
// ==================================================================== //
// 1. Check if we have enough free blocks and if it's worth doing a page
diff --git a/compiler-rt/lib/scudo/standalone/secondary.h b/compiler-rt/lib/scudo/standalone/secondary.h
index f0b7bce..2509db2 100644
--- a/compiler-rt/lib/scudo/standalone/secondary.h
+++ b/compiler-rt/lib/scudo/standalone/secondary.h
@@ -249,6 +249,7 @@ public:
LRUEntries.clear();
LRUEntries.init(Entries, sizeof(Entries));
+ OldestPresentEntry = nullptr;
AvailEntries.clear();
AvailEntries.init(Entries, sizeof(Entries));
@@ -322,8 +323,6 @@ public:
}
CachedBlock PrevEntry = Quarantine[QuarantinePos];
Quarantine[QuarantinePos] = Entry;
- if (OldestTime == 0)
- OldestTime = Entry.Time;
Entry = PrevEntry;
}
@@ -339,9 +338,6 @@ public:
}
insert(Entry);
-
- if (OldestTime == 0)
- OldestTime = Entry.Time;
} while (0);
for (MemMapT &EvictMemMap : EvictionMemMaps)
@@ -355,7 +351,6 @@ public:
SCUDO_SCOPED_TRACE(
GetSecondaryReleaseToOSTraceName(ReleaseToOS::Normal));
- // TODO: Add ReleaseToOS logic to LRU algorithm
releaseOlderThan(Time - static_cast<u64>(Interval) * 1000000);
Mutex.unlock();
} else
@@ -535,6 +530,11 @@ public:
void unmapTestOnly() { empty(); }
+ void releaseOlderThanTestOnly(u64 ReleaseTime) {
+ ScopedLock L(Mutex);
+ releaseOlderThan(ReleaseTime);
+ }
+
private:
void insert(const CachedBlock &Entry) REQUIRES(Mutex) {
CachedBlock *AvailEntry = AvailEntries.front();
@@ -542,10 +542,16 @@ private:
*AvailEntry = Entry;
LRUEntries.push_front(AvailEntry);
+ if (OldestPresentEntry == nullptr && AvailEntry->Time != 0)
+ OldestPresentEntry = AvailEntry;
}
void remove(CachedBlock *Entry) REQUIRES(Mutex) {
DCHECK(Entry->isValid());
+ if (OldestPresentEntry == Entry) {
+ OldestPresentEntry = LRUEntries.getPrev(Entry);
+ DCHECK(OldestPresentEntry == nullptr || OldestPresentEntry->Time != 0);
+ }
LRUEntries.remove(Entry);
Entry->invalidate();
AvailEntries.push_front(Entry);
@@ -560,6 +566,7 @@ private:
for (CachedBlock &Entry : LRUEntries)
MapInfo[N++] = Entry.MemMap;
LRUEntries.clear();
+ OldestPresentEntry = nullptr;
}
for (uptr I = 0; I < N; I++) {
MemMapT &MemMap = MapInfo[I];
@@ -567,36 +574,42 @@ private:
}
}
- void releaseIfOlderThan(CachedBlock &Entry, u64 Time) REQUIRES(Mutex) {
- if (!Entry.isValid() || !Entry.Time)
- return;
- if (Entry.Time > Time) {
- if (OldestTime == 0 || Entry.Time < OldestTime)
- OldestTime = Entry.Time;
- return;
+ void releaseOlderThan(u64 ReleaseTime) REQUIRES(Mutex) {
+ SCUDO_SCOPED_TRACE(GetSecondaryReleaseOlderThanTraceName());
+
+ if (!Config::getQuarantineDisabled()) {
+ for (uptr I = 0; I < Config::getQuarantineSize(); I++) {
+ auto &Entry = Quarantine[I];
+ if (!Entry.isValid() || Entry.Time == 0 || Entry.Time > ReleaseTime)
+ continue;
+ Entry.MemMap.releaseAndZeroPagesToOS(Entry.CommitBase,
+ Entry.CommitSize);
+ Entry.Time = 0;
+ }
}
- Entry.MemMap.releaseAndZeroPagesToOS(Entry.CommitBase, Entry.CommitSize);
- Entry.Time = 0;
- }
- void releaseOlderThan(u64 Time) REQUIRES(Mutex) {
- SCUDO_SCOPED_TRACE(GetSecondaryReleaseOlderThanTraceName());
+ for (CachedBlock *Entry = OldestPresentEntry; Entry != nullptr;
+ Entry = LRUEntries.getPrev(Entry)) {
+ DCHECK(Entry->isValid());
+ DCHECK(Entry->Time != 0);
+
+ if (Entry->Time > ReleaseTime) {
+ // All entries are newer than this, so no need to keep scanning.
+ OldestPresentEntry = Entry;
+ return;
+ }
- if (!LRUEntries.size() || OldestTime == 0 || OldestTime > Time)
- return;
- OldestTime = 0;
- if (!Config::getQuarantineDisabled())
- for (uptr I = 0; I < Config::getQuarantineSize(); I++)
- releaseIfOlderThan(Quarantine[I], Time);
- for (uptr I = 0; I < Config::getEntriesArraySize(); I++)
- releaseIfOlderThan(Entries[I], Time);
+ Entry->MemMap.releaseAndZeroPagesToOS(Entry->CommitBase,
+ Entry->CommitSize);
+ Entry->Time = 0;
+ }
+ OldestPresentEntry = nullptr;
}
HybridMutex Mutex;
u32 QuarantinePos GUARDED_BY(Mutex) = 0;
atomic_u32 MaxEntriesCount = {};
atomic_uptr MaxEntrySize = {};
- u64 OldestTime GUARDED_BY(Mutex) = 0;
atomic_s32 ReleaseToOsIntervalMs = {};
u32 CallsToRetrieve GUARDED_BY(Mutex) = 0;
u32 SuccessfulRetrieves GUARDED_BY(Mutex) = 0;
@@ -606,6 +619,8 @@ private:
NonZeroLengthArray<CachedBlock, Config::getQuarantineSize()>
Quarantine GUARDED_BY(Mutex) = {};
+ // The oldest entry in the LRUEntries that has Time non-zero.
+ CachedBlock *OldestPresentEntry GUARDED_BY(Mutex) = nullptr;
// Cached blocks stored in LRU order
DoublyLinkedList<CachedBlock> LRUEntries GUARDED_BY(Mutex);
// The unused Entries
diff --git a/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp b/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp
index 5fdfd1e..b70b9c9 100644
--- a/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp
+++ b/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp
@@ -18,6 +18,7 @@
#include "size_class_map.h"
#include <algorithm>
+#include <atomic>
#include <condition_variable>
#include <memory>
#include <mutex>
@@ -326,8 +327,10 @@ void ScudoCombinedTest<Config>::BasicTest(scudo::uptr SizeLog) {
}
}
- Allocator->printStats();
- Allocator->printFragmentationInfo();
+ if (TEST_HAS_FAILURE) {
+ Allocator->printStats();
+ Allocator->printFragmentationInfo();
+ }
}
#define SCUDO_MAKE_BASIC_TEST(SizeLog) \
@@ -1152,6 +1155,249 @@ TEST(ScudoCombinedTest, QuarantineDisabled) {
EXPECT_EQ(Stats.find("Stats: Quarantine"), std::string::npos);
}
+struct UsableSizeClassConfig {
+ static const scudo::uptr NumBits = 1;
+ static const scudo::uptr MinSizeLog = 10;
+ static const scudo::uptr MidSizeLog = 10;
+ static const scudo::uptr MaxSizeLog = 13;
+ static const scudo::u16 MaxNumCachedHint = 8;
+ static const scudo::uptr MaxBytesCachedLog = 12;
+ static const scudo::uptr SizeDelta = 0;
+};
+
+struct TestExactUsableSizeConfig {
+ static const bool MaySupportMemoryTagging = false;
+ static const bool QuarantineDisabled = true;
+
+ template <class A> using TSDRegistryT = scudo::TSDRegistrySharedT<A, 1U, 1U>;
+
+ struct Primary {
+ // In order to properly test the usable size, this Primary config has
+ // four real size classes: 1024, 2048, 4096, 8192.
+ using SizeClassMap = scudo::FixedSizeClassMap<UsableSizeClassConfig>;
+ static const scudo::uptr RegionSizeLog = 21U;
+ static const scudo::s32 MinReleaseToOsIntervalMs = INT32_MIN;
+ static const scudo::s32 MaxReleaseToOsIntervalMs = INT32_MAX;
+ typedef scudo::uptr CompactPtrT;
+ static const scudo::uptr CompactPtrScale = 0;
+ static const bool EnableRandomOffset = true;
+ static const scudo::uptr MapSizeIncrement = 1UL << 18;
+ static const scudo::uptr GroupSizeLog = 18;
+ };
+ template <typename Config>
+ using PrimaryT = scudo::SizeClassAllocator64<Config>;
+
+ struct Secondary {
+ template <typename Config>
+ using CacheT = scudo::MapAllocatorNoCache<Config>;
+ };
+
+ template <typename Config> using SecondaryT = scudo::MapAllocator<Config>;
+};
+
+template <class AllocatorT> void VerifyExactUsableSize(AllocatorT &Allocator) {
+ // Scan through all sizes up to 10000 then some larger sizes.
+ for (scudo::uptr Size = 1; Size < 10000; Size++) {
+ void *P = Allocator.allocate(Size, Origin);
+ EXPECT_EQ(Size, Allocator.getUsableSize(P))
+ << "Failed usable size at allocation size " << Size;
+ Allocator.deallocate(P, Origin);
+ }
+
+ // Verify that aligned allocations also return the exact size allocated.
+ const scudo::uptr AllocSize = 313;
+ for (scudo::uptr Align = 1; Align <= 8; Align++) {
+ void *P = Allocator.allocate(AllocSize, Origin, 1U << Align);
+ EXPECT_EQ(AllocSize, Allocator.getUsableSize(P))
+ << "Failed usable size at allocation size " << AllocSize << " at align "
+ << 1 << Align;
+ Allocator.deallocate(P, Origin);
+ }
+
+ // Verify an explicitly large allocations.
+ const scudo::uptr LargeAllocSize = 1000000;
+ void *P = Allocator.allocate(LargeAllocSize, Origin);
+ EXPECT_EQ(LargeAllocSize, Allocator.getUsableSize(P));
+ Allocator.deallocate(P, Origin);
+
+ // Now do it for aligned allocations for large allocations.
+ for (scudo::uptr Align = 1; Align <= 8; Align++) {
+ void *P = Allocator.allocate(LargeAllocSize, Origin, 1U << Align);
+ EXPECT_EQ(LargeAllocSize, Allocator.getUsableSize(P))
+ << "Failed usable size at allocation size " << AllocSize << " at align "
+ << 1 << Align;
+ Allocator.deallocate(P, Origin);
+ }
+}
+
+template <class AllocatorT>
+void VerifyIterateOverUsableSize(AllocatorT &Allocator) {
+ // This will not verify if the size is the exact size or the size of the
+ // size class. Instead verify that the size matches the usable size and
+ // assume the other tests have verified getUsableSize.
+ std::unordered_map<void *, size_t> Pointers;
+ Pointers.insert({Allocator.allocate(128, Origin), 0U});
+ Pointers.insert({Allocator.allocate(128, Origin, 32), 0U});
+ Pointers.insert({Allocator.allocate(2000, Origin), 0U});
+ Pointers.insert({Allocator.allocate(2000, Origin, 64), 0U});
+ Pointers.insert({Allocator.allocate(8000, Origin), 0U});
+ Pointers.insert({Allocator.allocate(8000, Origin, 128), 0U});
+ Pointers.insert({Allocator.allocate(2000205, Origin), 0U});
+ Pointers.insert({Allocator.allocate(2000205, Origin, 128), 0U});
+ Pointers.insert({Allocator.allocate(2000205, Origin, 256), 0U});
+
+ Allocator.disable();
+ Allocator.iterateOverChunks(
+ 0, static_cast<scudo::uptr>(SCUDO_MMAP_RANGE_SIZE - 1),
+ [](uintptr_t Base, size_t Size, void *Arg) {
+ std::unordered_map<void *, size_t> *Pointers =
+ reinterpret_cast<std::unordered_map<void *, size_t> *>(Arg);
+ (*Pointers)[reinterpret_cast<void *>(Base)] = Size;
+ },
+ reinterpret_cast<void *>(&Pointers));
+ Allocator.enable();
+
+ for (auto [Ptr, IterateSize] : Pointers) {
+ EXPECT_NE(0U, IterateSize)
+ << "Pointer " << Ptr << " not found in iterateOverChunks call.";
+ EXPECT_EQ(IterateSize, Allocator.getUsableSize(Ptr))
+ << "Pointer " << Ptr
+ << " mismatch between iterate size and usable size.";
+ Allocator.deallocate(Ptr, Origin);
+ }
+}
+
+TEST(ScudoCombinedTest, ExactUsableSize) {
+ using AllocatorT = scudo::Allocator<TestExactUsableSizeConfig>;
+ auto Allocator = std::unique_ptr<AllocatorT>(new AllocatorT());
+
+ VerifyExactUsableSize<AllocatorT>(*Allocator);
+ VerifyIterateOverUsableSize<AllocatorT>(*Allocator);
+}
+
+struct TestExactUsableSizeMTEConfig : TestExactUsableSizeConfig {
+ static const bool MaySupportMemoryTagging = true;
+};
+
+TEST(ScudoCombinedTest, ExactUsableSizeMTE) {
+ if (!scudo::archSupportsMemoryTagging() ||
+ !scudo::systemDetectsMemoryTagFaultsTestOnly())
+ TEST_SKIP("Only supported on systems that can enable MTE.");
+
+ scudo::enableSystemMemoryTaggingTestOnly();
+
+ using AllocatorT = scudo::Allocator<TestExactUsableSizeMTEConfig>;
+ auto Allocator = std::unique_ptr<AllocatorT>(new AllocatorT());
+
+ VerifyExactUsableSize<AllocatorT>(*Allocator);
+ VerifyIterateOverUsableSize<AllocatorT>(*Allocator);
+}
+
+template <class AllocatorT>
+void VerifyUsableSizePrimary(AllocatorT &Allocator) {
+ std::vector<scudo::uptr> SizeClasses = {1024U, 2048U, 4096U, 8192U};
+ for (size_t I = 0; I < SizeClasses.size(); I++) {
+ scudo::uptr SizeClass = SizeClasses[I];
+ scudo::uptr StartSize;
+ if (I == 0)
+ StartSize = 1;
+ else
+ StartSize = SizeClasses[I - 1];
+ scudo::uptr UsableSize = SizeClass - scudo::Chunk::getHeaderSize();
+ for (scudo::uptr Size = StartSize; Size < UsableSize; Size++) {
+ void *P = Allocator.allocate(Size, Origin);
+ EXPECT_EQ(UsableSize, Allocator.getUsableSize(P))
+ << "Failed usable size at allocation size " << Size
+ << " for size class " << SizeClass;
+ memset(P, 0xff, UsableSize);
+ EXPECT_EQ(Allocator.getBlockBeginTestOnly(P) + SizeClass,
+ reinterpret_cast<scudo::uptr>(P) + UsableSize);
+ Allocator.deallocate(P, Origin);
+ }
+
+ StartSize = UsableSize + 1;
+ }
+
+ std::vector<scudo::uptr> Alignments = {32U, 128U};
+ for (size_t I = 0; I < SizeClasses.size(); I++) {
+ scudo::uptr SizeClass = SizeClasses[I];
+ scudo::uptr AllocSize;
+ if (I == 0)
+ AllocSize = 1;
+ else
+ AllocSize = SizeClasses[I - 1] + 1;
+
+ for (auto Alignment : Alignments) {
+ void *P = Allocator.allocate(AllocSize, Origin, Alignment);
+ scudo::uptr UsableSize = Allocator.getUsableSize(P);
+ memset(P, 0xff, UsableSize);
+ EXPECT_EQ(Allocator.getBlockBeginTestOnly(P) + SizeClass,
+ reinterpret_cast<scudo::uptr>(P) + UsableSize)
+ << "Failed usable size at allocation size " << AllocSize
+ << " for size class " << SizeClass << " at alignment " << Alignment;
+ Allocator.deallocate(P, Origin);
+ }
+ }
+}
+
+template <class AllocatorT>
+void VerifyUsableSizeSecondary(AllocatorT &Allocator) {
+ const scudo::uptr LargeAllocSize = 996780;
+ const scudo::uptr PageSize = scudo::getPageSizeCached();
+ void *P = Allocator.allocate(LargeAllocSize, Origin);
+ scudo::uptr UsableSize = Allocator.getUsableSize(P);
+ memset(P, 0xff, UsableSize);
+ // Assumes that the secondary always rounds up allocations to a page boundary.
+ EXPECT_EQ(scudo::roundUp(reinterpret_cast<scudo::uptr>(P) + LargeAllocSize,
+ PageSize),
+ reinterpret_cast<scudo::uptr>(P) + UsableSize);
+ Allocator.deallocate(P, Origin);
+
+ // Check aligned allocations now.
+ for (scudo::uptr Alignment = 1; Alignment <= 8; Alignment++) {
+ void *P = Allocator.allocate(LargeAllocSize, Origin, 1U << Alignment);
+ scudo::uptr UsableSize = Allocator.getUsableSize(P);
+ EXPECT_EQ(scudo::roundUp(reinterpret_cast<scudo::uptr>(P) + LargeAllocSize,
+ PageSize),
+ reinterpret_cast<scudo::uptr>(P) + UsableSize)
+ << "Failed usable size at allocation size " << LargeAllocSize
+ << " at alignment " << Alignment;
+ Allocator.deallocate(P, Origin);
+ }
+}
+
+struct TestFullUsableSizeConfig : TestExactUsableSizeConfig {
+ static const bool ExactUsableSize = false;
+};
+
+TEST(ScudoCombinedTest, FullUsableSize) {
+ using AllocatorT = scudo::Allocator<TestFullUsableSizeConfig>;
+ auto Allocator = std::unique_ptr<AllocatorT>(new AllocatorT());
+
+ VerifyUsableSizePrimary<AllocatorT>(*Allocator);
+ VerifyUsableSizeSecondary<AllocatorT>(*Allocator);
+ VerifyIterateOverUsableSize<AllocatorT>(*Allocator);
+}
+
+struct TestFullUsableSizeMTEConfig : TestFullUsableSizeConfig {
+ static const bool MaySupportMemoryTagging = true;
+};
+
+TEST(ScudoCombinedTest, FullUsableSizeMTE) {
+ if (!scudo::archSupportsMemoryTagging() ||
+ !scudo::systemDetectsMemoryTagFaultsTestOnly())
+ TEST_SKIP("Only supported on systems that can enable MTE.");
+
+ scudo::enableSystemMemoryTaggingTestOnly();
+
+ using AllocatorT = scudo::Allocator<TestFullUsableSizeMTEConfig>;
+ auto Allocator = std::unique_ptr<AllocatorT>(new AllocatorT());
+
+ // When MTE is enabled, you get exact sizes.
+ VerifyExactUsableSize<AllocatorT>(*Allocator);
+ VerifyIterateOverUsableSize<AllocatorT>(*Allocator);
+}
+
// Verify that no special quarantine blocks appear in iterateOverChunks.
TEST(ScudoCombinedTest, QuarantineIterateOverChunks) {
using AllocatorT = TestAllocator<TestQuarantineConfig>;
@@ -1182,3 +1428,89 @@ TEST(ScudoCombinedTest, QuarantineIterateOverChunks) {
<< std::hex << Base << " Size " << std::dec << Size;
}
}
+
+struct InitSizeClassConfig {
+ static const scudo::uptr NumBits = 1;
+ static const scudo::uptr MinSizeLog = 10;
+ static const scudo::uptr MidSizeLog = 10;
+ static const scudo::uptr MaxSizeLog = 13;
+ static const scudo::u16 MaxNumCachedHint = 8;
+ static const scudo::uptr MaxBytesCachedLog = 12;
+ static const scudo::uptr SizeDelta = 0;
+};
+
+struct TestInitSizeConfig {
+ static const bool MaySupportMemoryTagging = false;
+ static const bool QuarantineDisabled = true;
+
+ struct Primary {
+ using SizeClassMap = scudo::FixedSizeClassMap<InitSizeClassConfig>;
+ static const scudo::uptr RegionSizeLog = 21U;
+ static const scudo::s32 MinReleaseToOsIntervalMs = INT32_MIN;
+ static const scudo::s32 MaxReleaseToOsIntervalMs = INT32_MAX;
+ typedef scudo::uptr CompactPtrT;
+ static const scudo::uptr CompactPtrScale = 0;
+ static const bool EnableRandomOffset = true;
+ static const scudo::uptr MapSizeIncrement = 1UL << 18;
+ static const scudo::uptr GroupSizeLog = 18;
+ };
+ template <typename Config>
+ using PrimaryT = scudo::SizeClassAllocator64<Config>;
+
+ struct Secondary {
+ template <typename Config>
+ using CacheT = scudo::MapAllocatorNoCache<Config>;
+ };
+
+ template <typename Config> using SecondaryT = scudo::MapAllocator<Config>;
+};
+
+struct TestInitSizeTSDSharedConfig : public TestInitSizeConfig {
+ template <class A> using TSDRegistryT = scudo::TSDRegistrySharedT<A, 4U, 4U>;
+};
+
+struct TestInitSizeTSDExclusiveConfig : public TestInitSizeConfig {
+ template <class A> using TSDRegistryT = scudo::TSDRegistryExT<A>;
+};
+
+template <class AllocatorT> void RunStress() {
+ auto Allocator = std::unique_ptr<AllocatorT>(new AllocatorT());
+
+ // This test is designed to try and have many threads trying to initialize
+ // the TSD at the same time. Make sure this doesn't crash.
+ std::atomic_bool StartRunning = false;
+ std::vector<std::thread *> threads;
+ for (size_t I = 0; I < 16; I++) {
+ threads.emplace_back(new std::thread([&Allocator, &StartRunning]() {
+ while (!StartRunning.load())
+ ;
+
+ void *Ptr = Allocator->allocate(10, Origin);
+ EXPECT_TRUE(Ptr != nullptr);
+ // Make sure this value is not optimized away.
+ asm volatile("" : : "r,m"(Ptr) : "memory");
+ Allocator->deallocate(Ptr, Origin);
+ }));
+ }
+
+ StartRunning = true;
+
+ for (auto *thread : threads) {
+ thread->join();
+ delete thread;
+ }
+}
+
+TEST(ScudoCombinedTest, StressThreadInitTSDShared) {
+ using AllocatorT = scudo::Allocator<TestInitSizeTSDSharedConfig>;
+ // Run the stress test a few times.
+ for (size_t I = 0; I < 10; I++)
+ RunStress<AllocatorT>();
+}
+
+TEST(ScudoCombinedTest, StressThreadInitTSDExclusive) {
+ using AllocatorT = scudo::Allocator<TestInitSizeTSDExclusiveConfig>;
+ // Run the stress test a few times.
+ for (size_t I = 0; I < 10; I++)
+ RunStress<AllocatorT>();
+}
diff --git a/compiler-rt/lib/scudo/standalone/tests/map_test.cpp b/compiler-rt/lib/scudo/standalone/tests/map_test.cpp
index cc7d3ee..9d1a35c 100644
--- a/compiler-rt/lib/scudo/standalone/tests/map_test.cpp
+++ b/compiler-rt/lib/scudo/standalone/tests/map_test.cpp
@@ -14,6 +14,10 @@
#include <string.h>
#include <unistd.h>
+#if SCUDO_LINUX
+#include <sys/mman.h>
+#endif
+
static const char *MappingName = "scudo:test";
TEST(ScudoMapTest, PageSize) {
@@ -89,3 +93,45 @@ TEST(ScudoMapTest, MapGrowUnmap) {
memset(reinterpret_cast<void *>(Q), 0xbb, PageSize);
MemMap.unmap();
}
+
+// Verify that zeroing works properly.
+TEST(ScudoMapTest, Zeroing) {
+ scudo::ReservedMemoryT ReservedMemory;
+ const scudo::uptr PageSize = scudo::getPageSizeCached();
+ const scudo::uptr Size = 3 * PageSize;
+ ReservedMemory.create(/*Addr=*/0U, Size, MappingName);
+ ASSERT_TRUE(ReservedMemory.isCreated());
+
+ scudo::MemMapT MemMap = ReservedMemory.dispatch(ReservedMemory.getBase(),
+ ReservedMemory.getCapacity());
+ EXPECT_TRUE(
+ MemMap.remap(MemMap.getBase(), MemMap.getCapacity(), MappingName));
+ unsigned char *Data = reinterpret_cast<unsigned char *>(MemMap.getBase());
+ memset(Data, 1U, MemMap.getCapacity());
+ // Spot check some values.
+ EXPECT_EQ(1U, Data[0]);
+ EXPECT_EQ(1U, Data[PageSize]);
+ EXPECT_EQ(1U, Data[PageSize * 2]);
+ MemMap.releaseAndZeroPagesToOS(MemMap.getBase(), MemMap.getCapacity());
+ EXPECT_EQ(0U, Data[0]);
+ EXPECT_EQ(0U, Data[PageSize]);
+ EXPECT_EQ(0U, Data[PageSize * 2]);
+
+#if SCUDO_LINUX
+ // Now verify that if madvise fails, the data is still zeroed.
+ memset(Data, 1U, MemMap.getCapacity());
+ if (mlock(Data, MemMap.getCapacity()) != -1) {
+ EXPECT_EQ(1U, Data[0]);
+ EXPECT_EQ(1U, Data[PageSize]);
+ EXPECT_EQ(1U, Data[PageSize * 2]);
+ MemMap.releaseAndZeroPagesToOS(MemMap.getBase(), MemMap.getCapacity());
+ EXPECT_EQ(0U, Data[0]);
+ EXPECT_EQ(0U, Data[PageSize]);
+ EXPECT_EQ(0U, Data[PageSize * 2]);
+
+ EXPECT_NE(-1, munlock(Data, MemMap.getCapacity()));
+ }
+#endif
+
+ MemMap.unmap();
+}
diff --git a/compiler-rt/lib/scudo/standalone/tests/memtag_test.cpp b/compiler-rt/lib/scudo/standalone/tests/memtag_test.cpp
index 09093e1..d0d93316 100644
--- a/compiler-rt/lib/scudo/standalone/tests/memtag_test.cpp
+++ b/compiler-rt/lib/scudo/standalone/tests/memtag_test.cpp
@@ -28,7 +28,6 @@ TEST(MemtagBasicDeathTest, Unsupported) {
EXPECT_DEATH(untagPointer((uptr)0), "not supported");
EXPECT_DEATH(extractTag((uptr)0), "not supported");
- EXPECT_DEATH(systemSupportsMemoryTagging(), "not supported");
EXPECT_DEATH(systemDetectsMemoryTagFaultsTestOnly(), "not supported");
EXPECT_DEATH(enableSystemMemoryTaggingTestOnly(), "not supported");
diff --git a/compiler-rt/lib/scudo/standalone/tests/primary_test.cpp b/compiler-rt/lib/scudo/standalone/tests/primary_test.cpp
index 1f5df28..3a087c4 100644
--- a/compiler-rt/lib/scudo/standalone/tests/primary_test.cpp
+++ b/compiler-rt/lib/scudo/standalone/tests/primary_test.cpp
@@ -230,9 +230,11 @@ SCUDO_TYPED_TEST(ScudoPrimaryTest, BasicPrimary) {
}
SizeClassAllocator.destroy(nullptr);
Allocator->releaseToOS(scudo::ReleaseToOS::Force);
- scudo::ScopedString Str;
- Allocator->getStats(&Str);
- Str.output();
+ if (TEST_HAS_FAILURE) {
+ scudo::ScopedString Str;
+ Allocator->getStats(&Str);
+ Str.output();
+ }
}
struct SmallRegionsConfig {
@@ -289,10 +291,12 @@ TEST(ScudoPrimaryTest, Primary64OOM) {
SizeClassAllocator.destroy(nullptr);
Allocator.releaseToOS(scudo::ReleaseToOS::Force);
- scudo::ScopedString Str;
- Allocator.getStats(&Str);
- Str.output();
EXPECT_EQ(AllocationFailed, true);
+ if (TEST_HAS_FAILURE) {
+ scudo::ScopedString Str;
+ Allocator.getStats(&Str);
+ Str.output();
+ }
Allocator.unmapTestOnly();
}
@@ -328,9 +332,11 @@ SCUDO_TYPED_TEST(ScudoPrimaryTest, PrimaryIterate) {
}
SizeClassAllocator.destroy(nullptr);
Allocator->releaseToOS(scudo::ReleaseToOS::Force);
- scudo::ScopedString Str;
- Allocator->getStats(&Str);
- Str.output();
+ if (TEST_HAS_FAILURE) {
+ scudo::ScopedString Str;
+ Allocator->getStats(&Str);
+ Str.output();
+ }
}
SCUDO_TYPED_TEST(ScudoPrimaryTest, PrimaryThreaded) {
@@ -385,11 +391,13 @@ SCUDO_TYPED_TEST(ScudoPrimaryTest, PrimaryThreaded) {
for (auto &T : Threads)
T.join();
Allocator->releaseToOS(scudo::ReleaseToOS::Force);
- scudo::ScopedString Str;
- Allocator->getStats(&Str);
- Allocator->getFragmentationInfo(&Str);
- Allocator->getMemoryGroupFragmentationInfo(&Str);
- Str.output();
+ if (TEST_HAS_FAILURE) {
+ scudo::ScopedString Str;
+ Allocator->getStats(&Str);
+ Allocator->getFragmentationInfo(&Str);
+ Allocator->getMemoryGroupFragmentationInfo(&Str);
+ Str.output();
+ }
}
// Through a simple allocation that spans two pages, verify that releaseToOS
diff --git a/compiler-rt/lib/scudo/standalone/tests/quarantine_test.cpp b/compiler-rt/lib/scudo/standalone/tests/quarantine_test.cpp
index 54d42ed..e3e983b 100644
--- a/compiler-rt/lib/scudo/standalone/tests/quarantine_test.cpp
+++ b/compiler-rt/lib/scudo/standalone/tests/quarantine_test.cpp
@@ -216,9 +216,11 @@ TEST(ScudoQuarantineTest, GlobalQuarantine) {
Quarantine.drainAndRecycle(&Cache, Cb);
EXPECT_EQ(Cache.getSize(), 0UL);
- scudo::ScopedString Str;
- Quarantine.getStats(&Str);
- Str.output();
+ if (TEST_HAS_FAILURE) {
+ scudo::ScopedString Str;
+ Quarantine.getStats(&Str);
+ Str.output();
+ }
}
struct PopulateQuarantineThread {
@@ -248,9 +250,11 @@ TEST(ScudoQuarantineTest, ThreadedGlobalQuarantine) {
for (scudo::uptr I = 0; I < NumberOfThreads; I++)
pthread_join(T[I].Thread, 0);
- scudo::ScopedString Str;
- Quarantine.getStats(&Str);
- Str.output();
+ if (TEST_HAS_FAILURE) {
+ scudo::ScopedString Str;
+ Quarantine.getStats(&Str);
+ Str.output();
+ }
for (scudo::uptr I = 0; I < NumberOfThreads; I++)
Quarantine.drainAndRecycle(&T[I].Cache, Cb);
diff --git a/compiler-rt/lib/scudo/standalone/tests/secondary_test.cpp b/compiler-rt/lib/scudo/standalone/tests/secondary_test.cpp
index d8a7f6b..8741c82 100644
--- a/compiler-rt/lib/scudo/standalone/tests/secondary_test.cpp
+++ b/compiler-rt/lib/scudo/standalone/tests/secondary_test.cpp
@@ -27,9 +27,7 @@
const scudo::uptr PageSize = scudo::getPageSizeCached();
template <typename Config> static scudo::Options getOptionsForConfig() {
- if (!Config::getMaySupportMemoryTagging() ||
- !scudo::archSupportsMemoryTagging() ||
- !scudo::systemSupportsMemoryTagging())
+ if (!scudo::systemSupportsMemoryTagging())
return {};
scudo::AtomicOptions AO;
AO.set(scudo::OptionBit::UseMemoryTagging);
@@ -403,6 +401,11 @@ template <class Config> struct CacheInfoType {
MemMap.getBase(), MemMap);
}
}
+
+ void storeMemMap(scudo::MemMapT &MemMap) {
+ Cache->store(Options, MemMap.getBase(), MemMap.getCapacity(),
+ MemMap.getBase(), MemMap);
+ }
};
TEST(ScudoSecondaryTest, AllocatorCacheEntryOrder) {
@@ -503,3 +506,83 @@ TEST(ScudoSecondaryTest, AllocatorCacheOptions) {
Info.Cache->setOption(scudo::Option::MaxCacheEntrySize, 1UL << 20));
EXPECT_TRUE(Info.Cache->canCache(1UL << 16));
}
+
+TEST(ScudoSecondaryTest, ReleaseOlderThanAllEntries) {
+ CacheInfoType<TestCacheConfig> Info;
+ using CacheConfig = CacheInfoType<TestCacheConfig>::CacheConfig;
+
+ Info.Cache->releaseOlderThanTestOnly(UINT64_MAX);
+
+ Info.fillCacheWithSameSizeBlocks(CacheConfig::getDefaultMaxEntriesCount(),
+ 1024);
+ for (size_t I = 0; I < Info.MemMaps.size(); I++) {
+ // Set the first u32 value to a non-zero value.
+ *reinterpret_cast<scudo::u32 *>(Info.MemMaps[I].getBase()) = 10;
+ }
+
+ Info.Cache->releaseOlderThanTestOnly(UINT64_MAX);
+
+ EXPECT_EQ(Info.MemMaps.size(), CacheConfig::getDefaultMaxEntriesCount());
+ for (size_t I = 0; I < Info.MemMaps.size(); I++) {
+ // All released maps will now be zero.
+ EXPECT_EQ(*reinterpret_cast<scudo::u32 *>(Info.MemMaps[I].getBase()), 0U);
+ }
+}
+
+// This test assumes that the timestamp comes from getMonotonicFast.
+TEST(ScudoSecondaryTest, ReleaseOlderThanGroups) {
+ CacheInfoType<TestCacheConfig> Info;
+
+ // Disable the release interval so we can do tests the releaseOlderThan
+ // function.
+ Info.Cache->setOption(scudo::Option::ReleaseInterval, -1);
+
+ // Create all of the maps we are going to use.
+ for (size_t I = 0; I < 6; I++) {
+ Info.MemMaps.emplace_back(Info.allocate(1024));
+ // Set the first u32 value to a non-zero value.
+ *reinterpret_cast<scudo::u32 *>(Info.MemMaps[I].getBase()) = 10;
+ }
+
+ // Create three groups of entries at three different intervals.
+ Info.storeMemMap(Info.MemMaps[0]);
+ Info.storeMemMap(Info.MemMaps[1]);
+ scudo::u64 FirstTime = scudo::getMonotonicTimeFast();
+
+ // Need to make sure the next set of entries are stamped with a newer time.
+ while (scudo::getMonotonicTimeFast() <= FirstTime)
+ ;
+
+ Info.storeMemMap(Info.MemMaps[2]);
+ Info.storeMemMap(Info.MemMaps[3]);
+ scudo::u64 SecondTime = scudo::getMonotonicTimeFast();
+
+ // Need to make sure the next set of entries are stamped with a newer time.
+ while (scudo::getMonotonicTimeFast() <= SecondTime)
+ ;
+
+ Info.storeMemMap(Info.MemMaps[4]);
+ Info.storeMemMap(Info.MemMaps[5]);
+ scudo::u64 ThirdTime = scudo::getMonotonicTimeFast();
+
+ Info.Cache->releaseOlderThanTestOnly(FirstTime);
+ for (size_t I = 0; I < 2; I++) {
+ EXPECT_EQ(*reinterpret_cast<scudo::u32 *>(Info.MemMaps[I].getBase()), 0U);
+ }
+ for (size_t I = 2; I < 6; I++) {
+ EXPECT_EQ(*reinterpret_cast<scudo::u32 *>(Info.MemMaps[I].getBase()), 10U);
+ }
+
+ Info.Cache->releaseOlderThanTestOnly(SecondTime);
+ for (size_t I = 0; I < 4; I++) {
+ EXPECT_EQ(*reinterpret_cast<scudo::u32 *>(Info.MemMaps[I].getBase()), 0U);
+ }
+ for (size_t I = 4; I < 6; I++) {
+ EXPECT_EQ(*reinterpret_cast<scudo::u32 *>(Info.MemMaps[I].getBase()), 10U);
+ }
+
+ Info.Cache->releaseOlderThanTestOnly(ThirdTime);
+ for (size_t I = 0; I < 6; I++) {
+ EXPECT_EQ(*reinterpret_cast<scudo::u32 *>(Info.MemMaps[I].getBase()), 0U);
+ }
+}
diff --git a/compiler-rt/lib/scudo/standalone/tests/size_class_map_test.cpp b/compiler-rt/lib/scudo/standalone/tests/size_class_map_test.cpp
index 05b5835..73b2823 100644
--- a/compiler-rt/lib/scudo/standalone/tests/size_class_map_test.cpp
+++ b/compiler-rt/lib/scudo/standalone/tests/size_class_map_test.cpp
@@ -12,8 +12,10 @@
template <class SizeClassMap> void testSizeClassMap() {
typedef SizeClassMap SCMap;
- scudo::printMap<SCMap>();
scudo::validateMap<SCMap>();
+ if (TEST_HAS_FAILURE) {
+ scudo::printMap<SCMap>();
+ }
}
TEST(ScudoSizeClassMapTest, DefaultSizeClassMap) {
diff --git a/compiler-rt/lib/scudo/standalone/tests/wrappers_c_test.cpp b/compiler-rt/lib/scudo/standalone/tests/wrappers_c_test.cpp
index 612317b..9e5d065 100644
--- a/compiler-rt/lib/scudo/standalone/tests/wrappers_c_test.cpp
+++ b/compiler-rt/lib/scudo/standalone/tests/wrappers_c_test.cpp
@@ -588,8 +588,13 @@ TEST_F(ScudoWrappersCTest, MallocInfo) {
EXPECT_EQ(errno, 0);
fclose(F);
EXPECT_EQ(strncmp(Buffer, "<malloc version=\"scudo-", 23), 0);
- EXPECT_NE(nullptr, strstr(Buffer, "<alloc size=\"1234\" count=\""));
- EXPECT_NE(nullptr, strstr(Buffer, "<alloc size=\"4321\" count=\""));
+ std::string expected;
+ expected =
+ "<alloc size=\"" + std::to_string(malloc_usable_size(P1)) + "\" count=\"";
+ EXPECT_NE(nullptr, strstr(Buffer, expected.c_str()));
+ expected =
+ "<alloc size=\"" + std::to_string(malloc_usable_size(P2)) + "\" count=\"";
+ EXPECT_NE(nullptr, strstr(Buffer, expected.c_str()));
free(P1);
free(P2);
diff --git a/compiler-rt/lib/scudo/standalone/tsd_exclusive.h b/compiler-rt/lib/scudo/standalone/tsd_exclusive.h
index a58ba65..75921f2 100644
--- a/compiler-rt/lib/scudo/standalone/tsd_exclusive.h
+++ b/compiler-rt/lib/scudo/standalone/tsd_exclusive.h
@@ -52,17 +52,20 @@ template <class Allocator> struct TSDRegistryExT {
bool UnlockRequired;
};
- void init(Allocator *Instance) REQUIRES(Mutex) {
- DCHECK(!Initialized);
+ void init(Allocator *Instance) EXCLUDES(Mutex) {
+ ScopedLock L(Mutex);
+ // If more than one thread is initializing at the exact same moment, the
+ // threads that lose don't need to do anything.
+ if (UNLIKELY(atomic_load_relaxed(&Initialized) != 0))
+ return;
Instance->init();
CHECK_EQ(pthread_key_create(&PThreadKey, teardownThread<Allocator>), 0);
FallbackTSD.init(Instance);
- Initialized = true;
+ atomic_store_relaxed(&Initialized, 1);
}
- void initOnceMaybe(Allocator *Instance) EXCLUDES(Mutex) {
- ScopedLock L(Mutex);
- if (LIKELY(Initialized))
+ void initOnceMaybe(Allocator *Instance) {
+ if (LIKELY(atomic_load_relaxed(&Initialized) != 0))
return;
init(Instance); // Sets Initialized.
}
@@ -81,7 +84,7 @@ template <class Allocator> struct TSDRegistryExT {
FallbackTSD = {};
State = {};
ScopedLock L(Mutex);
- Initialized = false;
+ atomic_store_relaxed(&Initialized, 0);
}
void drainCaches(Allocator *Instance) {
@@ -158,7 +161,7 @@ private:
}
pthread_key_t PThreadKey = {};
- bool Initialized GUARDED_BY(Mutex) = false;
+ atomic_u8 Initialized = {};
atomic_u8 Disabled = {};
TSD<Allocator> FallbackTSD;
HybridMutex Mutex;
diff --git a/compiler-rt/lib/scudo/standalone/tsd_shared.h b/compiler-rt/lib/scudo/standalone/tsd_shared.h
index 8b570a77..425a028 100644
--- a/compiler-rt/lib/scudo/standalone/tsd_shared.h
+++ b/compiler-rt/lib/scudo/standalone/tsd_shared.h
@@ -47,20 +47,24 @@ struct TSDRegistrySharedT {
TSD<Allocator> *CurrentTSD;
};
- void init(Allocator *Instance) REQUIRES(Mutex) {
- DCHECK(!Initialized);
+ void init(Allocator *Instance) EXCLUDES(Mutex) {
+ ScopedLock L(Mutex);
+ // If more than one thread is initializing at the exact same moment, the
+ // threads that lose don't need to do anything.
+ if (UNLIKELY(atomic_load_relaxed(&Initialized) != 0))
+ return;
+
Instance->init();
for (u32 I = 0; I < TSDsArraySize; I++)
TSDs[I].init(Instance);
const u32 NumberOfCPUs = getNumberOfCPUs();
setNumberOfTSDs((NumberOfCPUs == 0) ? DefaultTSDCount
: Min(NumberOfCPUs, DefaultTSDCount));
- Initialized = true;
+ atomic_store_relaxed(&Initialized, 1);
}
- void initOnceMaybe(Allocator *Instance) EXCLUDES(Mutex) {
- ScopedLock L(Mutex);
- if (LIKELY(Initialized))
+ void initOnceMaybe(Allocator *Instance) {
+ if (LIKELY(atomic_load_relaxed(&Initialized) != 0))
return;
init(Instance); // Sets Initialized.
}
@@ -72,11 +76,11 @@ struct TSDRegistrySharedT {
}
setCurrentTSD(nullptr);
ScopedLock L(Mutex);
- Initialized = false;
+ atomic_store_relaxed(&Initialized, 0);
}
void drainCaches(Allocator *Instance) {
- ScopedLock L(MutexTSDs);
+ ScopedLock L(Mutex);
for (uptr I = 0; I < NumberOfTSDs; ++I) {
TSDs[I].lock();
Instance->drainCache(&TSDs[I]);
@@ -104,8 +108,10 @@ struct TSDRegistrySharedT {
}
bool setOption(Option O, sptr Value) {
- if (O == Option::MaxTSDsCount)
+ if (O == Option::MaxTSDsCount) {
+ ScopedLock L(Mutex);
return setNumberOfTSDs(static_cast<u32>(Value));
+ }
if (O == Option::ThreadDisableMemInit)
setDisableMemInit(Value);
// Not supported by the TSD Registry, but not an error either.
@@ -114,8 +120,8 @@ struct TSDRegistrySharedT {
bool getDisableMemInit() const { return *getTlsPtr() & 1; }
- void getStats(ScopedString *Str) EXCLUDES(MutexTSDs) {
- ScopedLock L(MutexTSDs);
+ void getStats(ScopedString *Str) EXCLUDES(Mutex) {
+ ScopedLock L(Mutex);
Str->append("Stats: SharedTSDs: %u available; total %u\n", NumberOfTSDs,
TSDsArraySize);
@@ -169,8 +175,7 @@ private:
return reinterpret_cast<TSD<Allocator> *>(*getTlsPtr() & ~1ULL);
}
- bool setNumberOfTSDs(u32 N) EXCLUDES(MutexTSDs) {
- ScopedLock L(MutexTSDs);
+ bool setNumberOfTSDs(u32 N) REQUIRES(Mutex) {
if (N < NumberOfTSDs)
return false;
if (N > TSDsArraySize)
@@ -211,14 +216,14 @@ private:
// TSDs is an array of locks which is not supported for marking thread-safety
// capability.
NOINLINE TSD<Allocator> *getTSDAndLockSlow(TSD<Allocator> *CurrentTSD)
- EXCLUDES(MutexTSDs) {
+ EXCLUDES(Mutex) {
// Use the Precedence of the current TSD as our random seed. Since we are
// in the slow path, it means that tryLock failed, and as a result it's
// very likely that said Precedence is non-zero.
const u32 R = static_cast<u32>(CurrentTSD->getPrecedence());
u32 N, Inc;
{
- ScopedLock L(MutexTSDs);
+ ScopedLock L(Mutex);
N = NumberOfTSDs;
DCHECK_NE(NumberOfCoPrimes, 0U);
Inc = CoPrimes[R % NumberOfCoPrimes];
@@ -255,12 +260,15 @@ private:
}
atomic_u32 CurrentIndex = {};
- u32 NumberOfTSDs GUARDED_BY(MutexTSDs) = 0;
- u32 NumberOfCoPrimes GUARDED_BY(MutexTSDs) = 0;
- u32 CoPrimes[TSDsArraySize] GUARDED_BY(MutexTSDs) = {};
- bool Initialized GUARDED_BY(Mutex) = false;
+ u32 NumberOfTSDs GUARDED_BY(Mutex) = 0;
+ u32 NumberOfCoPrimes GUARDED_BY(Mutex) = 0;
+ u32 CoPrimes[TSDsArraySize] GUARDED_BY(Mutex) = {};
+ atomic_u8 Initialized = {};
+ // Used for global initialization and TSDs access.
+ // Acquiring the global initialization should only lock once in normal
+ // operation, which is why using it for TSDs access should not cause
+ // any interference.
HybridMutex Mutex;
- HybridMutex MutexTSDs;
TSD<Allocator> TSDs[TSDsArraySize];
};