diff options
author | Martin Liska <mliska@suse.cz> | 2022-11-15 11:11:41 +0100 |
---|---|---|
committer | Martin Liska <mliska@suse.cz> | 2022-11-15 16:44:55 +0100 |
commit | 5f3fa2655cb256d336e90e74e42a2452d0fbf0e8 (patch) | |
tree | 374004f2a4a03bed153fff6a5d4ec156d105e0d9 /libsanitizer/lsan | |
parent | d1288d850944f69a795e4ff444a427eba3fec11b (diff) | |
download | gcc-5f3fa2655cb256d336e90e74e42a2452d0fbf0e8.zip gcc-5f3fa2655cb256d336e90e74e42a2452d0fbf0e8.tar.gz gcc-5f3fa2655cb256d336e90e74e42a2452d0fbf0e8.tar.bz2 |
libsanitizer: merge from upstream ae59131d3ef311fb4b1e50627c6457be00e60dc9
Diffstat (limited to 'libsanitizer/lsan')
-rw-r--r-- | libsanitizer/lsan/lsan_common.cpp | 49 | ||||
-rw-r--r-- | libsanitizer/lsan/lsan_common.h | 2 | ||||
-rw-r--r-- | libsanitizer/lsan/lsan_common_mac.cpp | 74 |
3 files changed, 100 insertions, 25 deletions
diff --git a/libsanitizer/lsan/lsan_common.cpp b/libsanitizer/lsan/lsan_common.cpp index 94bb3cc..5762746 100644 --- a/libsanitizer/lsan/lsan_common.cpp +++ b/libsanitizer/lsan/lsan_common.cpp @@ -26,6 +26,18 @@ #include "sanitizer_common/sanitizer_tls_get_addr.h" #if CAN_SANITIZE_LEAKS + +# if SANITIZER_APPLE +// https://github.com/apple-oss-distributions/objc4/blob/8701d5672d3fd3cd817aeb84db1077aafe1a1604/runtime/objc-runtime-new.h#L127 +# if SANITIZER_IOS && !SANITIZER_IOSSIM +# define OBJC_DATA_MASK 0x0000007ffffffff8UL +# else +# define OBJC_DATA_MASK 0x00007ffffffffff8UL +# endif +// https://github.com/apple-oss-distributions/objc4/blob/8701d5672d3fd3cd817aeb84db1077aafe1a1604/runtime/objc-runtime-new.h#L139 +# define OBJC_FAST_IS_RW 0x8000000000000000UL +# endif + namespace __lsan { // This mutex is used to prevent races between DoLeakCheck and IgnoreObject, and @@ -160,6 +172,17 @@ static uptr GetCallerPC(const StackTrace &stack) { return 0; } +# if SANITIZER_APPLE +// Objective-C class data pointers are stored with flags in the low bits, so +// they need to be transformed back into something that looks like a pointer. +static inline void *MaybeTransformPointer(void *p) { + uptr ptr = reinterpret_cast<uptr>(p); + if ((ptr & OBJC_FAST_IS_RW) == OBJC_FAST_IS_RW) + ptr &= OBJC_DATA_MASK; + return reinterpret_cast<void *>(ptr); +} +# endif + // On Linux, treats all chunks allocated from ld-linux.so as reachable, which // covers dynamically allocated TLS blocks, internal dynamic loader's loaded // modules accounting etc. @@ -276,6 +299,9 @@ void ScanRangeForPointers(uptr begin, uptr end, Frontier *frontier, pp = pp + alignment - pp % alignment; for (; pp + sizeof(void *) <= end; pp += alignment) { void *p = *reinterpret_cast<void **>(pp); +# if SANITIZER_APPLE + p = MaybeTransformPointer(p); +# endif if (!MaybeUserPointer(reinterpret_cast<uptr>(p))) continue; uptr chunk = PointsIntoChunk(p); @@ -332,7 +358,8 @@ void ForEachExtraStackRangeCb(uptr begin, uptr end, void *arg) { # if SANITIZER_FUCHSIA // Fuchsia handles all threads together with its own callback. -static void ProcessThreads(SuspendedThreadsList const &, Frontier *) {} +static void ProcessThreads(SuspendedThreadsList const &, Frontier *, tid_t, + uptr) {} # else @@ -365,7 +392,8 @@ static void ProcessThreadRegistry(Frontier *frontier) { // Scans thread data (stacks and TLS) for heap pointers. static void ProcessThreads(SuspendedThreadsList const &suspended_threads, - Frontier *frontier) { + Frontier *frontier, tid_t caller_tid, + uptr caller_sp) { InternalMmapVector<uptr> registers; for (uptr i = 0; i < suspended_threads.ThreadCount(); i++) { tid_t os_id = static_cast<tid_t>(suspended_threads.GetThreadID(i)); @@ -392,6 +420,9 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads, continue; sp = stack_begin; } + if (suspended_threads.GetThreadID(i) == caller_tid) { + sp = caller_sp; + } if (flags()->use_registers && have_registers) { uptr registers_begin = reinterpret_cast<uptr>(registers.data()); @@ -572,7 +603,8 @@ static void CollectIgnoredCb(uptr chunk, void *arg) { // Sets the appropriate tag on each chunk. static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads, - Frontier *frontier) { + Frontier *frontier, tid_t caller_tid, + uptr caller_sp) { const InternalMmapVector<u32> &suppressed_stacks = GetSuppressionContext()->GetSortedSuppressedStacks(); if (!suppressed_stacks.empty()) { @@ -581,7 +613,7 @@ static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads, } ForEachChunk(CollectIgnoredCb, frontier); ProcessGlobalRegions(frontier); - ProcessThreads(suspended_threads, frontier); + ProcessThreads(suspended_threads, frontier, caller_tid, caller_sp); ProcessRootRegions(frontier); FloodFillTag(frontier, kReachable); @@ -677,7 +709,8 @@ static void CheckForLeaksCallback(const SuspendedThreadsList &suspended_threads, CHECK(param); CHECK(!param->success); ReportUnsuspendedThreads(suspended_threads); - ClassifyAllChunks(suspended_threads, ¶m->frontier); + ClassifyAllChunks(suspended_threads, ¶m->frontier, param->caller_tid, + param->caller_sp); ForEachChunk(CollectLeaksCb, ¶m->leaks); // Clean up for subsequent leak checks. This assumes we did not overwrite any // kIgnored tags. @@ -716,6 +749,12 @@ static bool CheckForLeaks() { for (int i = 0;; ++i) { EnsureMainThreadIDIsCorrect(); CheckForLeaksParam param; + // Capture calling thread's stack pointer early, to avoid false negatives. + // Old frame with dead pointers might be overlapped by new frame inside + // CheckForLeaks which does not use bytes with pointers before the + // threads are suspended and stack pointers captured. + param.caller_tid = GetTid(); + param.caller_sp = reinterpret_cast<uptr>(__builtin_frame_address(0)); LockStuffAndStopTheWorld(CheckForLeaksCallback, ¶m); if (!param.success) { Report("LeakSanitizer has encountered a fatal error.\n"); diff --git a/libsanitizer/lsan/lsan_common.h b/libsanitizer/lsan/lsan_common.h index d715375..20ef7c4 100644 --- a/libsanitizer/lsan/lsan_common.h +++ b/libsanitizer/lsan/lsan_common.h @@ -145,6 +145,8 @@ struct RootRegion { struct CheckForLeaksParam { Frontier frontier; LeakedChunks leaks; + tid_t caller_tid; + uptr caller_sp; bool success = false; }; diff --git a/libsanitizer/lsan/lsan_common_mac.cpp b/libsanitizer/lsan/lsan_common_mac.cpp index 26b623f..b6b1509 100644 --- a/libsanitizer/lsan/lsan_common_mac.cpp +++ b/libsanitizer/lsan/lsan_common_mac.cpp @@ -17,21 +17,36 @@ #if CAN_SANITIZE_LEAKS && SANITIZER_APPLE -#include "sanitizer_common/sanitizer_allocator_internal.h" -#include "lsan_allocator.h" +# include <mach/mach.h> +# include <mach/vm_statistics.h> +# include <pthread.h> -#include <pthread.h> +# include "lsan_allocator.h" +# include "sanitizer_common/sanitizer_allocator_internal.h" +namespace __lsan { -#include <mach/mach.h> +enum class SeenRegion { + None = 0, + AllocOnce = 1 << 0, + LibDispatch = 1 << 1, + Foundation = 1 << 2, + All = AllocOnce | LibDispatch | Foundation +}; + +inline SeenRegion operator|(SeenRegion left, SeenRegion right) { + return static_cast<SeenRegion>(static_cast<int>(left) | + static_cast<int>(right)); +} -// Only introduced in Mac OS X 10.9. -#ifdef VM_MEMORY_OS_ALLOC_ONCE -static const int kSanitizerVmMemoryOsAllocOnce = VM_MEMORY_OS_ALLOC_ONCE; -#else -static const int kSanitizerVmMemoryOsAllocOnce = 73; -#endif +inline SeenRegion &operator|=(SeenRegion &left, const SeenRegion &right) { + left = left | right; + return left; +} -namespace __lsan { +struct RegionScanState { + SeenRegion seen_regions = SeenRegion::None; + bool in_libdispatch = false; +}; typedef struct { int disable_counter; @@ -148,6 +163,7 @@ void ProcessPlatformSpecificAllocations(Frontier *frontier) { InternalMmapVectorNoCtor<RootRegion> const *root_regions = GetRootRegions(); + RegionScanState scan_state; while (err == KERN_SUCCESS) { vm_size_t size = 0; unsigned depth = 1; @@ -157,17 +173,35 @@ void ProcessPlatformSpecificAllocations(Frontier *frontier) { (vm_region_info_t)&info, &count); uptr end_address = address + size; - - // libxpc stashes some pointers in the Kernel Alloc Once page, - // make sure not to report those as leaks. - if (info.user_tag == kSanitizerVmMemoryOsAllocOnce) { + if (info.user_tag == VM_MEMORY_OS_ALLOC_ONCE) { + // libxpc stashes some pointers in the Kernel Alloc Once page, + // make sure not to report those as leaks. + scan_state.seen_regions |= SeenRegion::AllocOnce; ScanRangeForPointers(address, end_address, frontier, "GLOBAL", kReachable); + } else if (info.user_tag == VM_MEMORY_FOUNDATION) { + // Objective-C block trampolines use the Foundation region. + scan_state.seen_regions |= SeenRegion::Foundation; + ScanRangeForPointers(address, end_address, frontier, "GLOBAL", + kReachable); + } else if (info.user_tag == VM_MEMORY_LIBDISPATCH) { + // Dispatch continuations use the libdispatch region. Empirically, there + // can be more than one region with this tag, so we'll optimistically + // assume that they're continguous. Otherwise, we would need to scan every + // region to ensure we find them all. + scan_state.in_libdispatch = true; + ScanRangeForPointers(address, end_address, frontier, "GLOBAL", + kReachable); + } else if (scan_state.in_libdispatch) { + scan_state.seen_regions |= SeenRegion::LibDispatch; + scan_state.in_libdispatch = false; + } - // Recursing over the full memory map is very slow, break out - // early if we don't need the full iteration. - if (!flags()->use_root_regions || !root_regions->size()) - break; + // Recursing over the full memory map is very slow, break out + // early if we don't need the full iteration. + if (scan_state.seen_regions == SeenRegion::All && + !(flags()->use_root_regions && root_regions->size() > 0)) { + break; } // This additional root region scan is required on Darwin in order to @@ -199,6 +233,6 @@ void LockStuffAndStopTheWorld(StopTheWorldCallback callback, StopTheWorld(callback, argument); } -} // namespace __lsan +} // namespace __lsan #endif // CAN_SANITIZE_LEAKS && SANITIZER_APPLE |