diff options
author | Jakub Jelinek <jakub@redhat.com> | 2023-11-15 12:45:58 +0100 |
---|---|---|
committer | Jakub Jelinek <jakub@redhat.com> | 2023-11-15 12:45:58 +0100 |
commit | 28219f7f99a80519d1c6ab5e5dc83b4c7f8d7251 (patch) | |
tree | 42e3657c58ff08a654f04aeb0f43b3bc75930bbc /libsanitizer/lsan | |
parent | 4d86dc51e34d2a5695b617afeb56e3414836a79a (diff) | |
download | gcc-28219f7f99a80519d1c6ab5e5dc83b4c7f8d7251.zip gcc-28219f7f99a80519d1c6ab5e5dc83b4c7f8d7251.tar.gz gcc-28219f7f99a80519d1c6ab5e5dc83b4c7f8d7251.tar.bz2 |
libsanitizer: merge from upstream (c425db2eb558c263)
The following patch is result of libsanitizer/merge.sh
from c425db2eb558c263 (yesterday evening).
Bootstrapped/regtested on x86_64-linux and i686-linux (together with
the follow-up 3 patches I'm about to post).
BTW, seems upstream has added riscv64 support for I think lsan/tsan,
so if anyone is willing to try it there, it would be a matter of
copying e.g. the s390*-*-linux* libsanitizer/configure.tgt entry
to riscv64-*-linux* with the obvious s/s390x/riscv64/ change in it.
Diffstat (limited to 'libsanitizer/lsan')
-rw-r--r-- | libsanitizer/lsan/lsan.cpp | 2 | ||||
-rw-r--r-- | libsanitizer/lsan/lsan_allocator.cpp | 37 | ||||
-rw-r--r-- | libsanitizer/lsan/lsan_allocator.h | 25 | ||||
-rw-r--r-- | libsanitizer/lsan/lsan_common.cpp | 142 | ||||
-rw-r--r-- | libsanitizer/lsan/lsan_common.h | 27 | ||||
-rw-r--r-- | libsanitizer/lsan/lsan_common_fuchsia.cpp | 3 | ||||
-rw-r--r-- | libsanitizer/lsan/lsan_common_mac.cpp | 15 | ||||
-rw-r--r-- | libsanitizer/lsan/lsan_interceptors.cpp | 117 | ||||
-rw-r--r-- | libsanitizer/lsan/lsan_mac.cpp | 2 | ||||
-rw-r--r-- | libsanitizer/lsan/lsan_thread.cpp | 31 | ||||
-rw-r--r-- | libsanitizer/lsan/lsan_thread.h | 4 |
11 files changed, 255 insertions, 150 deletions
diff --git a/libsanitizer/lsan/lsan.cpp b/libsanitizer/lsan/lsan.cpp index 319f399..6b22360 100644 --- a/libsanitizer/lsan/lsan.cpp +++ b/libsanitizer/lsan/lsan.cpp @@ -97,7 +97,7 @@ extern "C" void __lsan_init() { ReplaceSystemMalloc(); InitTlsSize(); InitializeInterceptors(); - InitializeThreadRegistry(); + InitializeThreads(); InstallDeadlySignalHandlers(LsanOnDeadlySignal); InitializeMainThread(); InstallAtExitCheckLeaks(); diff --git a/libsanitizer/lsan/lsan_allocator.cpp b/libsanitizer/lsan/lsan_allocator.cpp index ee7faca..12d579a 100644 --- a/libsanitizer/lsan/lsan_allocator.cpp +++ b/libsanitizer/lsan/lsan_allocator.cpp @@ -49,8 +49,11 @@ void InitializeAllocator() { max_malloc_size = kMaxAllowedMallocSize; } +void AllocatorThreadStart() { allocator.InitCache(GetAllocatorCache()); } + void AllocatorThreadFinish() { allocator.SwallowCache(GetAllocatorCache()); + allocator.DestroyCache(GetAllocatorCache()); } static ChunkMetadata *Metadata(const void *p) { @@ -65,12 +68,14 @@ static void RegisterAllocation(const StackTrace &stack, void *p, uptr size) { m->stack_trace_id = StackDepotPut(stack); m->requested_size = size; atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 1, memory_order_relaxed); + RunMallocHooks(p, size); } static void RegisterDeallocation(void *p) { if (!p) return; ChunkMetadata *m = Metadata(p); CHECK(m); + RunFreeHooks(p); atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 0, memory_order_relaxed); } @@ -104,7 +109,6 @@ void *Allocate(const StackTrace &stack, uptr size, uptr alignment, if (cleared && allocator.FromPrimary(p)) memset(p, 0, size); RegisterAllocation(stack, p, size); - RunMallocHooks(p, size); return p; } @@ -119,7 +123,6 @@ static void *Calloc(uptr nmemb, uptr size, const StackTrace &stack) { } void Deallocate(void *p) { - RunFreeHooks(p); RegisterDeallocation(p); allocator.Deallocate(GetAllocatorCache(), p); } @@ -169,6 +172,10 @@ uptr GetMallocUsableSize(const void *p) { return m->requested_size; } +uptr GetMallocUsableSizeFast(const void *p) { + return Metadata(p)->requested_size; +} + int lsan_posix_memalign(void **memptr, uptr alignment, uptr size, const StackTrace &stack) { if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) { @@ -339,15 +346,6 @@ IgnoreObjectResult IgnoreObject(const void *p) { } } -void GetAdditionalThreadContextPtrsLocked(InternalMmapVector<uptr> *ptrs) { - // This function can be used to treat memory reachable from `tctx` as live. - // This is useful for threads that have been created but not yet started. - - // This is currently a no-op because the LSan `pthread_create()` interceptor - // blocks until the child thread starts which keeps the thread's `arg` pointer - // live. -} - } // namespace __lsan using namespace __lsan; @@ -368,7 +366,7 @@ uptr __sanitizer_get_heap_size() { } SANITIZER_INTERFACE_ATTRIBUTE -uptr __sanitizer_get_free_bytes() { return 0; } +uptr __sanitizer_get_free_bytes() { return 1; } SANITIZER_INTERFACE_ATTRIBUTE uptr __sanitizer_get_unmapped_bytes() { return 0; } @@ -377,7 +375,9 @@ SANITIZER_INTERFACE_ATTRIBUTE uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; } SANITIZER_INTERFACE_ATTRIBUTE -int __sanitizer_get_ownership(const void *p) { return Metadata(p) != nullptr; } +int __sanitizer_get_ownership(const void *p) { + return GetMallocBegin(p) != nullptr; +} SANITIZER_INTERFACE_ATTRIBUTE const void * __sanitizer_get_allocated_begin(const void *p) { @@ -389,4 +389,15 @@ uptr __sanitizer_get_allocated_size(const void *p) { return GetMallocUsableSize(p); } +SANITIZER_INTERFACE_ATTRIBUTE +uptr __sanitizer_get_allocated_size_fast(const void *p) { + DCHECK_EQ(p, __sanitizer_get_allocated_begin(p)); + uptr ret = GetMallocUsableSizeFast(p); + DCHECK_EQ(ret, __sanitizer_get_allocated_size(p)); + return ret; +} + +SANITIZER_INTERFACE_ATTRIBUTE +void __sanitizer_purge_allocator() { allocator.ForceReleaseToOS(); } + } // extern "C" diff --git a/libsanitizer/lsan/lsan_allocator.h b/libsanitizer/lsan/lsan_allocator.h index 10c1672..5eed0cb 100644 --- a/libsanitizer/lsan/lsan_allocator.h +++ b/libsanitizer/lsan/lsan_allocator.h @@ -32,6 +32,7 @@ template<typename Callable> void ForEachChunk(const Callable &callback); void GetAllocatorCacheRange(uptr *begin, uptr *end); +void AllocatorThreadStart(); void AllocatorThreadFinish(); void InitializeAllocator(); @@ -67,20 +68,42 @@ using PrimaryAllocator = PrimaryAllocatorASVT<LocalAddressSpaceView>; #else # if SANITIZER_FUCHSIA || defined(__powerpc64__) const uptr kAllocatorSpace = ~(uptr)0; +# if SANITIZER_RISCV64 +// See the comments in compiler-rt/lib/asan/asan_allocator.h for why these +// values were chosen. +const uptr kAllocatorSize = UINT64_C(1) << 33; // 8GB +using LSanSizeClassMap = SizeClassMap</*kNumBits=*/2, + /*kMinSizeLog=*/5, + /*kMidSizeLog=*/8, + /*kMaxSizeLog=*/18, + /*kNumCachedHintT=*/8, + /*kMaxBytesCachedLog=*/10>; +static_assert(LSanSizeClassMap::kNumClassesRounded <= 32, + "32 size classes is the optimal number to ensure tests run " + "effieciently on Fuchsia."); +# else const uptr kAllocatorSize = 0x40000000000ULL; // 4T. +using LSanSizeClassMap = DefaultSizeClassMap; +# endif +# elif SANITIZER_RISCV64 +const uptr kAllocatorSpace = ~(uptr)0; +const uptr kAllocatorSize = 0x2000000000ULL; // 128G. +using LSanSizeClassMap = DefaultSizeClassMap; # elif SANITIZER_APPLE const uptr kAllocatorSpace = 0x600000000000ULL; const uptr kAllocatorSize = 0x40000000000ULL; // 4T. +using LSanSizeClassMap = DefaultSizeClassMap; # else const uptr kAllocatorSpace = 0x500000000000ULL; const uptr kAllocatorSize = 0x40000000000ULL; // 4T. +using LSanSizeClassMap = DefaultSizeClassMap; # endif template <typename AddressSpaceViewTy> struct AP64 { // Allocator64 parameters. Deliberately using a short name. static const uptr kSpaceBeg = kAllocatorSpace; static const uptr kSpaceSize = kAllocatorSize; static const uptr kMetadataSize = sizeof(ChunkMetadata); - typedef DefaultSizeClassMap SizeClassMap; + using SizeClassMap = LSanSizeClassMap; typedef NoOpMapUnmapCallback MapUnmapCallback; static const uptr kFlags = 0; using AddressSpaceView = AddressSpaceViewTy; diff --git a/libsanitizer/lsan/lsan_common.cpp b/libsanitizer/lsan/lsan_common.cpp index ae29e4a..8b1af5b 100644 --- a/libsanitizer/lsan/lsan_common.cpp +++ b/libsanitizer/lsan/lsan_common.cpp @@ -34,15 +34,13 @@ # else # define OBJC_DATA_MASK 0x00007ffffffffff8UL # endif -// https://github.com/apple-oss-distributions/objc4/blob/8701d5672d3fd3cd817aeb84db1077aafe1a1604/runtime/objc-runtime-new.h#L139 -# define OBJC_FAST_IS_RW 0x8000000000000000UL # endif namespace __lsan { // This mutex is used to prevent races between DoLeakCheck and IgnoreObject, and // also to protect the global list of root regions. -Mutex global_mutex; +static Mutex global_mutex; Flags lsan_flags; @@ -173,13 +171,11 @@ static uptr GetCallerPC(const StackTrace &stack) { } # if SANITIZER_APPLE -// Objective-C class data pointers are stored with flags in the low bits, so -// they need to be transformed back into something that looks like a pointer. -static inline void *MaybeTransformPointer(void *p) { +// Several pointers in the Objective-C runtime (method cache and class_rw_t, +// for example) are tagged with additional bits we need to strip. +static inline void *TransformPointer(void *p) { uptr ptr = reinterpret_cast<uptr>(p); - if ((ptr & OBJC_FAST_IS_RW) == OBJC_FAST_IS_RW) - ptr &= OBJC_DATA_MASK; - return reinterpret_cast<void *>(ptr); + return reinterpret_cast<void *>(ptr & OBJC_DATA_MASK); } # endif @@ -241,12 +237,6 @@ static LeakSuppressionContext *GetSuppressionContext() { return suppression_ctx; } -static InternalMmapVectorNoCtor<RootRegion> root_regions; - -InternalMmapVectorNoCtor<RootRegion> const *GetRootRegions() { - return &root_regions; -} - void InitCommonLsan() { if (common_flags()->detect_leaks) { // Initialization which can fail or print warnings should only be done if @@ -270,9 +260,14 @@ static inline bool MaybeUserPointer(uptr p) { if (p < kMinAddress) return false; # if defined(__x86_64__) - // TODO: add logic similar to ARM when Intel LAM is available. - // Accept only canonical form user-space addresses. - return ((p >> 47) == 0); + // TODO: support LAM48 and 5 level page tables. + // LAM_U57 mask format + // * top byte: 0x81 because the format is: [0] [6-bit tag] [0] + // * top-1 byte: 0xff because it should be 0 + // * top-2 byte: 0x80 because Linux uses 128 TB VMA ending at 0x7fffffffffff + constexpr uptr kLAM_U57Mask = 0x81ff80; + constexpr uptr kPointerMask = kLAM_U57Mask << 40; + return ((p & kPointerMask) == 0); # elif defined(__mips64) return ((p >> 40) == 0); # elif defined(__aarch64__) @@ -307,7 +302,7 @@ void ScanRangeForPointers(uptr begin, uptr end, Frontier *frontier, for (; pp + sizeof(void *) <= end; pp += alignment) { void *p = *reinterpret_cast<void **>(pp); # if SANITIZER_APPLE - p = MaybeTransformPointer(p); + p = TransformPointer(p); # endif if (!MaybeUserPointer(reinterpret_cast<uptr>(p))) continue; @@ -527,38 +522,52 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads, # endif // SANITIZER_FUCHSIA -void ScanRootRegion(Frontier *frontier, const RootRegion &root_region, - uptr region_begin, uptr region_end, bool is_readable) { - uptr intersection_begin = Max(root_region.begin, region_begin); - uptr intersection_end = Min(region_end, root_region.begin + root_region.size); - if (intersection_begin >= intersection_end) - return; - LOG_POINTERS("Root region %p-%p intersects with mapped region %p-%p (%s)\n", - (void *)root_region.begin, - (void *)(root_region.begin + root_region.size), - (void *)region_begin, (void *)region_end, - is_readable ? "readable" : "unreadable"); - if (is_readable) - ScanRangeForPointers(intersection_begin, intersection_end, frontier, "ROOT", - kReachable); +// A map that contains [region_begin, region_end) pairs. +using RootRegions = DenseMap<detail::DenseMapPair<uptr, uptr>, uptr>; + +static RootRegions &GetRootRegionsLocked() { + global_mutex.CheckLocked(); + static RootRegions *regions = nullptr; + alignas(RootRegions) static char placeholder[sizeof(RootRegions)]; + if (!regions) + regions = new (placeholder) RootRegions(); + return *regions; } -static void ProcessRootRegion(Frontier *frontier, - const RootRegion &root_region) { - MemoryMappingLayout proc_maps(/*cache_enabled*/ true); - MemoryMappedSegment segment; - while (proc_maps.Next(&segment)) { - ScanRootRegion(frontier, root_region, segment.start, segment.end, - segment.IsReadable()); +bool HasRootRegions() { return !GetRootRegionsLocked().empty(); } + +void ScanRootRegions(Frontier *frontier, + const InternalMmapVectorNoCtor<Region> &mapped_regions) { + if (!flags()->use_root_regions) + return; + + InternalMmapVector<Region> regions; + GetRootRegionsLocked().forEach([&](const auto &kv) { + regions.push_back({kv.first.first, kv.first.second}); + return true; + }); + + InternalMmapVector<Region> intersection; + Intersect(mapped_regions, regions, intersection); + + for (const Region &r : intersection) { + LOG_POINTERS("Root region intersects with mapped region at %p-%p\n", + (void *)r.begin, (void *)r.end); + ScanRangeForPointers(r.begin, r.end, frontier, "ROOT", kReachable); } } // Scans root regions for heap pointers. static void ProcessRootRegions(Frontier *frontier) { - if (!flags()->use_root_regions) + if (!flags()->use_root_regions || !HasRootRegions()) return; - for (uptr i = 0; i < root_regions.size(); i++) - ProcessRootRegion(frontier, root_regions[i]); + MemoryMappingLayout proc_maps(/*cache_enabled*/ true); + MemoryMappedSegment segment; + InternalMmapVector<Region> mapped_regions; + while (proc_maps.Next(&segment)) + if (segment.IsReadable()) + mapped_regions.push_back({segment.start, segment.end}); + ScanRootRegions(frontier, mapped_regions); } static void FloodFillTag(Frontier *frontier, ChunkTag tag) { @@ -941,8 +950,8 @@ void LeakReport::PrintSummary() { allocations += leaks_[i].hit_count; } InternalScopedString summary; - summary.append("%zu byte(s) leaked in %zu allocation(s).", bytes, - allocations); + summary.AppendF("%zu byte(s) leaked in %zu allocation(s).", bytes, + allocations); ReportErrorSummary(summary.data()); } @@ -1013,36 +1022,37 @@ void __lsan_ignore_object(const void *p) { SANITIZER_INTERFACE_ATTRIBUTE void __lsan_register_root_region(const void *begin, uptr size) { #if CAN_SANITIZE_LEAKS - Lock l(&global_mutex); - RootRegion region = {reinterpret_cast<uptr>(begin), size}; - root_regions.push_back(region); VReport(1, "Registered root region at %p of size %zu\n", begin, size); + uptr b = reinterpret_cast<uptr>(begin); + uptr e = b + size; + CHECK_LT(b, e); + + Lock l(&global_mutex); + ++GetRootRegionsLocked()[{b, e}]; #endif // CAN_SANITIZE_LEAKS } SANITIZER_INTERFACE_ATTRIBUTE void __lsan_unregister_root_region(const void *begin, uptr size) { #if CAN_SANITIZE_LEAKS - Lock l(&global_mutex); - bool removed = false; - for (uptr i = 0; i < root_regions.size(); i++) { - RootRegion region = root_regions[i]; - if (region.begin == reinterpret_cast<uptr>(begin) && region.size == size) { - removed = true; - uptr last_index = root_regions.size() - 1; - root_regions[i] = root_regions[last_index]; - root_regions.pop_back(); - VReport(1, "Unregistered root region at %p of size %zu\n", begin, size); - break; + uptr b = reinterpret_cast<uptr>(begin); + uptr e = b + size; + CHECK_LT(b, e); + VReport(1, "Unregistered root region at %p of size %zu\n", begin, size); + + { + Lock l(&global_mutex); + if (auto *f = GetRootRegionsLocked().find({b, e})) { + if (--(f->second) == 0) + GetRootRegionsLocked().erase(f); + return; } } - if (!removed) { - Report( - "__lsan_unregister_root_region(): region at %p of size %zu has not " - "been registered.\n", - begin, size); - Die(); - } + Report( + "__lsan_unregister_root_region(): region at %p of size %zu has not " + "been registered.\n", + begin, size); + Die(); #endif // CAN_SANITIZE_LEAKS } diff --git a/libsanitizer/lsan/lsan_common.h b/libsanitizer/lsan/lsan_common.h index a1f2d1a..d3e7683 100644 --- a/libsanitizer/lsan/lsan_common.h +++ b/libsanitizer/lsan/lsan_common.h @@ -18,6 +18,7 @@ #include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_internal_defs.h" #include "sanitizer_common/sanitizer_platform.h" +#include "sanitizer_common/sanitizer_range.h" #include "sanitizer_common/sanitizer_stackdepot.h" #include "sanitizer_common/sanitizer_stoptheworld.h" #include "sanitizer_common/sanitizer_symbolizer.h" @@ -79,11 +80,6 @@ enum IgnoreObjectResult { kIgnoreObjectInvalid }; -struct Range { - uptr begin; - uptr end; -}; - //// -------------------------------------------------------------------------- //// Poisoning prototypes. //// -------------------------------------------------------------------------- @@ -96,8 +92,8 @@ bool WordIsPoisoned(uptr addr); //// -------------------------------------------------------------------------- // Wrappers for ThreadRegistry access. -void LockThreadRegistry() SANITIZER_NO_THREAD_SAFETY_ANALYSIS; -void UnlockThreadRegistry() SANITIZER_NO_THREAD_SAFETY_ANALYSIS; +void LockThreads() SANITIZER_NO_THREAD_SAFETY_ANALYSIS; +void UnlockThreads() SANITIZER_NO_THREAD_SAFETY_ANALYSIS; // If called from the main thread, updates the main thread's TID in the thread // registry. We need this to handle processes that fork() without a subsequent // exec(), which invalidates the recorded TID. To update it, we must call @@ -160,13 +156,13 @@ IgnoreObjectResult IgnoreObject(const void *p); struct ScopedStopTheWorldLock { ScopedStopTheWorldLock() { - LockThreadRegistry(); + LockThreads(); LockAllocator(); } ~ScopedStopTheWorldLock() { UnlockAllocator(); - UnlockThreadRegistry(); + UnlockThreads(); } ScopedStopTheWorldLock &operator=(const ScopedStopTheWorldLock &) = delete; @@ -239,11 +235,6 @@ void InitializePlatformSpecificModules(); void ProcessGlobalRegions(Frontier *frontier); void ProcessPlatformSpecificAllocations(Frontier *frontier); -struct RootRegion { - uptr begin; - uptr size; -}; - // LockStuffAndStopTheWorld can start to use Scan* calls to collect into // this Frontier vector before the StopTheWorldCallback actually runs. // This is used when the OS has a unified callback API for suspending @@ -256,9 +247,11 @@ struct CheckForLeaksParam { bool success = false; }; -InternalMmapVectorNoCtor<RootRegion> const *GetRootRegions(); -void ScanRootRegion(Frontier *frontier, RootRegion const ®ion, - uptr region_begin, uptr region_end, bool is_readable); +using Region = Range; + +bool HasRootRegions(); +void ScanRootRegions(Frontier *frontier, + const InternalMmapVectorNoCtor<Region> ®ion); // Run stoptheworld while holding any platform-specific locks, as well as the // allocator and thread registry locks. void LockStuffAndStopTheWorld(StopTheWorldCallback callback, diff --git a/libsanitizer/lsan/lsan_common_fuchsia.cpp b/libsanitizer/lsan/lsan_common_fuchsia.cpp index bcad1c2..cb3fe1f 100644 --- a/libsanitizer/lsan/lsan_common_fuchsia.cpp +++ b/libsanitizer/lsan/lsan_common_fuchsia.cpp @@ -119,7 +119,8 @@ void LockStuffAndStopTheWorld(StopTheWorldCallback callback, auto i = __sanitizer::InternalLowerBound(params->allocator_caches, begin); if (i < params->allocator_caches.size() && params->allocator_caches[i] >= begin && - end - params->allocator_caches[i] <= sizeof(AllocatorCache)) { + params->allocator_caches[i] <= end && + end - params->allocator_caches[i] >= sizeof(AllocatorCache)) { // Split the range in two and omit the allocator cache within. ScanRangeForPointers(begin, params->allocator_caches[i], ¶ms->argument->frontier, "TLS", kReachable); diff --git a/libsanitizer/lsan/lsan_common_mac.cpp b/libsanitizer/lsan/lsan_common_mac.cpp index 9ccf098..4e51989 100644 --- a/libsanitizer/lsan/lsan_common_mac.cpp +++ b/libsanitizer/lsan/lsan_common_mac.cpp @@ -165,7 +165,8 @@ void ProcessPlatformSpecificAllocations(Frontier *frontier) { vm_address_t address = 0; kern_return_t err = KERN_SUCCESS; - InternalMmapVectorNoCtor<RootRegion> const *root_regions = GetRootRegions(); + InternalMmapVector<Region> mapped_regions; + bool use_root_regions = flags()->use_root_regions && HasRootRegions(); RegionScanState scan_state; while (err == KERN_SUCCESS) { @@ -203,8 +204,7 @@ void ProcessPlatformSpecificAllocations(Frontier *frontier) { // Recursing over the full memory map is very slow, break out // early if we don't need the full iteration. - if (scan_state.seen_regions == SeenRegion::All && - !(flags()->use_root_regions && root_regions->size() > 0)) { + if (scan_state.seen_regions == SeenRegion::All && !use_root_regions) { break; } @@ -215,15 +215,12 @@ void ProcessPlatformSpecificAllocations(Frontier *frontier) { // // TODO(fjricci) - remove this once sanitizer_procmaps_mac has the same // behavior as sanitizer_procmaps_linux and traverses all memory regions - if (flags()->use_root_regions) { - for (uptr i = 0; i < root_regions->size(); i++) { - ScanRootRegion(frontier, (*root_regions)[i], address, end_address, - info.protection & kProtectionRead); - } - } + if (use_root_regions && (info.protection & kProtectionRead)) + mapped_regions.push_back({address, end_address}); address = end_address; } + ScanRootRegions(frontier, mapped_regions); } // On darwin, we can intercept _exit gracefully, and return a failing exit code diff --git a/libsanitizer/lsan/lsan_interceptors.cpp b/libsanitizer/lsan/lsan_interceptors.cpp index 3f8ef3f..885f7ad 100644 --- a/libsanitizer/lsan/lsan_interceptors.cpp +++ b/libsanitizer/lsan/lsan_interceptors.cpp @@ -197,7 +197,7 @@ INTERCEPTOR(void*, pvalloc, uptr size) { #endif // SANITIZER_INTERCEPT_PVALLOC #if SANITIZER_INTERCEPT_CFREE -INTERCEPTOR(void, cfree, void *p) ALIAS(WRAPPER_NAME(free)); +INTERCEPTOR(void, cfree, void *p) ALIAS(WRAP(free)); #define LSAN_MAYBE_INTERCEPT_CFREE INTERCEPT_FUNCTION(cfree) #else #define LSAN_MAYBE_INTERCEPT_CFREE @@ -415,16 +415,10 @@ INTERCEPTOR(char *, strerror, int errnum) { #if SANITIZER_POSIX -struct ThreadParam { - void *(*callback)(void *arg); - void *param; - atomic_uintptr_t tid; -}; - -extern "C" void *__lsan_thread_start_func(void *arg) { - ThreadParam *p = (ThreadParam*)arg; - void* (*callback)(void *arg) = p->callback; - void *param = p->param; +template <bool Detached> +static void *ThreadStartFunc(void *arg) { + u32 parent_tid = (uptr)arg; + uptr tid = ThreadCreate(parent_tid, Detached); // Wait until the last iteration to maximize the chance that we are the last // destructor to run. #if !SANITIZER_NETBSD && !SANITIZER_FREEBSD @@ -433,55 +427,103 @@ extern "C" void *__lsan_thread_start_func(void *arg) { Report("LeakSanitizer: failed to set thread key.\n"); Die(); } -#endif - int tid = 0; - while ((tid = atomic_load(&p->tid, memory_order_acquire)) == 0) - internal_sched_yield(); +# endif ThreadStart(tid, GetTid()); - atomic_store(&p->tid, 0, memory_order_release); - return callback(param); + auto self = GetThreadSelf(); + auto args = GetThreadArgRetval().GetArgs(self); + void *retval = (*args.routine)(args.arg_retval); + GetThreadArgRetval().Finish(self, retval); + return retval; } INTERCEPTOR(int, pthread_create, void *th, void *attr, void *(*callback)(void *), void *param) { ENSURE_LSAN_INITED; EnsureMainThreadIDIsCorrect(); + + bool detached = [attr]() { + int d = 0; + return attr && !pthread_attr_getdetachstate(attr, &d) && IsStateDetached(d); + }(); + __sanitizer_pthread_attr_t myattr; if (!attr) { pthread_attr_init(&myattr); attr = &myattr; } AdjustStackSize(attr); - int detached = 0; - pthread_attr_getdetachstate(attr, &detached); - ThreadParam p; - p.callback = callback; - p.param = param; - atomic_store(&p.tid, 0, memory_order_relaxed); - int res; + uptr this_tid = GetCurrentThreadId(); + int result; { // Ignore all allocations made by pthread_create: thread stack/TLS may be // stored by pthread for future reuse even after thread destruction, and // the linked list it's stored in doesn't even hold valid pointers to the // objects, the latter are calculated by obscure pointer arithmetic. ScopedInterceptorDisabler disabler; - res = REAL(pthread_create)(th, attr, __lsan_thread_start_func, &p); - } - if (res == 0) { - int tid = ThreadCreate(GetCurrentThreadId(), IsStateDetached(detached)); - CHECK_NE(tid, kMainTid); - atomic_store(&p.tid, tid, memory_order_release); - while (atomic_load(&p.tid, memory_order_acquire) != 0) - internal_sched_yield(); + GetThreadArgRetval().Create(detached, {callback, param}, [&]() -> uptr { + result = REAL(pthread_create)( + th, attr, detached ? ThreadStartFunc<true> : ThreadStartFunc<false>, + (void *)this_tid); + return result ? 0 : *(uptr *)(th); + }); } if (attr == &myattr) pthread_attr_destroy(&myattr); - return res; + return result; } -INTERCEPTOR(int, pthread_join, void *t, void **arg) { - return REAL(pthread_join)(t, arg); +INTERCEPTOR(int, pthread_join, void *thread, void **retval) { + int result; + GetThreadArgRetval().Join((uptr)thread, [&]() { + result = REAL(pthread_join)(thread, retval); + return !result; + }); + return result; +} + +INTERCEPTOR(int, pthread_detach, void *thread) { + int result; + GetThreadArgRetval().Detach((uptr)thread, [&]() { + result = REAL(pthread_detach)(thread); + return !result; + }); + return result; +} + +INTERCEPTOR(void, pthread_exit, void *retval) { + GetThreadArgRetval().Finish(GetThreadSelf(), retval); + REAL(pthread_exit)(retval); +} + +# if SANITIZER_INTERCEPT_TRYJOIN +INTERCEPTOR(int, pthread_tryjoin_np, void *thread, void **ret) { + int result; + GetThreadArgRetval().Join((uptr)thread, [&]() { + result = REAL(pthread_tryjoin_np)(thread, ret); + return !result; + }); + return result; +} +# define LSAN_MAYBE_INTERCEPT_TRYJOIN INTERCEPT_FUNCTION(pthread_tryjoin_np) +# else +# define LSAN_MAYBE_INTERCEPT_TRYJOIN +# endif // SANITIZER_INTERCEPT_TRYJOIN + +# if SANITIZER_INTERCEPT_TIMEDJOIN +INTERCEPTOR(int, pthread_timedjoin_np, void *thread, void **ret, + const struct timespec *abstime) { + int result; + GetThreadArgRetval().Join((uptr)thread, [&]() { + result = REAL(pthread_timedjoin_np)(thread, ret, abstime); + return !result; + }); + return result; } +# define LSAN_MAYBE_INTERCEPT_TIMEDJOIN \ + INTERCEPT_FUNCTION(pthread_timedjoin_np) +# else +# define LSAN_MAYBE_INTERCEPT_TIMEDJOIN +# endif // SANITIZER_INTERCEPT_TIMEDJOIN DEFINE_REAL_PTHREAD_FUNCTIONS @@ -491,6 +533,7 @@ INTERCEPTOR(void, _exit, int status) { } #define COMMON_INTERCEPT_FUNCTION(name) INTERCEPT_FUNCTION(name) +#define SIGNAL_INTERCEPTOR_ENTER() ENSURE_LSAN_INITED #include "sanitizer_common/sanitizer_signal_interceptors.inc" #endif // SANITIZER_POSIX @@ -518,6 +561,10 @@ void InitializeInterceptors() { LSAN_MAYBE_INTERCEPT_MALLOPT; INTERCEPT_FUNCTION(pthread_create); INTERCEPT_FUNCTION(pthread_join); + INTERCEPT_FUNCTION(pthread_detach); + INTERCEPT_FUNCTION(pthread_exit); + LSAN_MAYBE_INTERCEPT_TIMEDJOIN; + LSAN_MAYBE_INTERCEPT_TRYJOIN; INTERCEPT_FUNCTION(_exit); LSAN_MAYBE_INTERCEPT__LWP_EXIT; diff --git a/libsanitizer/lsan/lsan_mac.cpp b/libsanitizer/lsan/lsan_mac.cpp index 2bcd005..990954a 100644 --- a/libsanitizer/lsan/lsan_mac.cpp +++ b/libsanitizer/lsan/lsan_mac.cpp @@ -80,7 +80,7 @@ extern "C" void lsan_dispatch_call_block_and_release(void *block) { VReport(2, "lsan_dispatch_call_block_and_release(): " "context: %p, pthread_self: %p\n", - block, pthread_self()); + block, (void*)pthread_self()); lsan_register_worker_thread(context->parent_tid); // Call the original dispatcher for the block. context->func(context->block); diff --git a/libsanitizer/lsan/lsan_thread.cpp b/libsanitizer/lsan/lsan_thread.cpp index 9da42f3..8aa3111 100644 --- a/libsanitizer/lsan/lsan_thread.cpp +++ b/libsanitizer/lsan/lsan_thread.cpp @@ -24,6 +24,7 @@ namespace __lsan { static ThreadRegistry *thread_registry; +static ThreadArgRetval *thread_arg_retval; static Mutex mu_for_thread_context; static LowLevelAllocator allocator_for_thread_context; @@ -33,16 +34,26 @@ static ThreadContextBase *CreateThreadContext(u32 tid) { return new (allocator_for_thread_context) ThreadContext(tid); } -void InitializeThreadRegistry() { - static ALIGNED(64) char thread_registry_placeholder[sizeof(ThreadRegistry)]; +void InitializeThreads() { + static ALIGNED(alignof( + ThreadRegistry)) char thread_registry_placeholder[sizeof(ThreadRegistry)]; thread_registry = new (thread_registry_placeholder) ThreadRegistry(CreateThreadContext); + + static ALIGNED(alignof(ThreadArgRetval)) char + thread_arg_retval_placeholder[sizeof(ThreadArgRetval)]; + thread_arg_retval = new (thread_arg_retval_placeholder) ThreadArgRetval(); } +ThreadArgRetval &GetThreadArgRetval() { return *thread_arg_retval; } + ThreadContextLsanBase::ThreadContextLsanBase(int tid) : ThreadContextBase(tid) {} -void ThreadContextLsanBase::OnStarted(void *arg) { SetCurrentThread(this); } +void ThreadContextLsanBase::OnStarted(void *arg) { + SetCurrentThread(this); + AllocatorThreadStart(); +} void ThreadContextLsanBase::OnFinished() { AllocatorThreadFinish(); @@ -72,9 +83,15 @@ void GetThreadExtraStackRangesLocked(tid_t os_id, InternalMmapVector<Range> *ranges) {} void GetThreadExtraStackRangesLocked(InternalMmapVector<Range> *ranges) {} -void LockThreadRegistry() { thread_registry->Lock(); } +void LockThreads() { + thread_registry->Lock(); + thread_arg_retval->Lock(); +} -void UnlockThreadRegistry() { thread_registry->Unlock(); } +void UnlockThreads() { + thread_arg_retval->Unlock(); + thread_registry->Unlock(); +} ThreadRegistry *GetLsanThreadRegistryLocked() { thread_registry->CheckLocked(); @@ -92,4 +109,8 @@ void GetRunningThreadsLocked(InternalMmapVector<tid_t> *threads) { threads); } +void GetAdditionalThreadContextPtrsLocked(InternalMmapVector<uptr> *ptrs) { + GetThreadArgRetval().GetAllPtrsLocked(ptrs); +} + } // namespace __lsan diff --git a/libsanitizer/lsan/lsan_thread.h b/libsanitizer/lsan/lsan_thread.h index 709a029..222066e 100644 --- a/libsanitizer/lsan/lsan_thread.h +++ b/libsanitizer/lsan/lsan_thread.h @@ -14,6 +14,7 @@ #ifndef LSAN_THREAD_H #define LSAN_THREAD_H +#include "sanitizer_common/sanitizer_thread_arg_retval.h" #include "sanitizer_common/sanitizer_thread_registry.h" namespace __lsan { @@ -43,10 +44,11 @@ class ThreadContextLsanBase : public ThreadContextBase { // This subclass of ThreadContextLsanBase is declared in an OS-specific header. class ThreadContext; -void InitializeThreadRegistry(); +void InitializeThreads(); void InitializeMainThread(); ThreadRegistry *GetLsanThreadRegistryLocked(); +ThreadArgRetval &GetThreadArgRetval(); u32 ThreadCreate(u32 tid, bool detached, void *arg = nullptr); void ThreadFinish(); |