aboutsummaryrefslogtreecommitdiff
path: root/libsanitizer/lsan
diff options
context:
space:
mode:
authorMartin Liska <mliska@suse.cz>2018-10-31 12:14:23 +0100
committerMartin Liska <marxin@gcc.gnu.org>2018-10-31 11:14:23 +0000
commiteac975312214dccb9d425b3e8d0b93e85c11ddf4 (patch)
tree431086825b095506e32d4c0b0e479c62254e2b69 /libsanitizer/lsan
parent95fba530b6f68f336090abb5699ae9f24d1e22e6 (diff)
downloadgcc-eac975312214dccb9d425b3e8d0b93e85c11ddf4.zip
gcc-eac975312214dccb9d425b3e8d0b93e85c11ddf4.tar.gz
gcc-eac975312214dccb9d425b3e8d0b93e85c11ddf4.tar.bz2
backport: All source files: Merge from upstream 345033.
Merge from upstream 345033. 2018-10-31 Martin Liska <mliska@suse.cz> * All source files: Merge from upstream 345033. From-SVN: r265665
Diffstat (limited to 'libsanitizer/lsan')
-rw-r--r--libsanitizer/lsan/lsan.cc9
-rw-r--r--libsanitizer/lsan/lsan.h12
-rw-r--r--libsanitizer/lsan/lsan_allocator.cc74
-rw-r--r--libsanitizer/lsan/lsan_allocator.h15
-rw-r--r--libsanitizer/lsan/lsan_common.cc48
-rw-r--r--libsanitizer/lsan/lsan_common.h13
-rw-r--r--libsanitizer/lsan/lsan_common_linux.cc20
-rw-r--r--libsanitizer/lsan/lsan_common_mac.cc11
-rw-r--r--libsanitizer/lsan/lsan_interceptors.cc70
-rw-r--r--libsanitizer/lsan/lsan_malloc_mac.cc3
-rw-r--r--libsanitizer/lsan/lsan_thread.cc5
11 files changed, 205 insertions, 75 deletions
diff --git a/libsanitizer/lsan/lsan.cc b/libsanitizer/lsan/lsan.cc
index 7540aeb..e926110 100644
--- a/libsanitizer/lsan/lsan.cc
+++ b/libsanitizer/lsan/lsan.cc
@@ -64,16 +64,17 @@ static void InitializeFlags() {
if (Verbosity()) ReportUnrecognizedFlags();
if (common_flags()->help) parser.PrintFlagDescriptions();
+
+ __sanitizer_set_report_path(common_flags()->log_path);
}
static void OnStackUnwind(const SignalContext &sig, const void *,
BufferedStackTrace *stack) {
- GetStackTraceWithPcBpAndContext(stack, kStackTraceMax, sig.pc, sig.bp,
- sig.context,
- common_flags()->fast_unwind_on_fatal);
+ GetStackTrace(stack, kStackTraceMax, sig.pc, sig.bp, sig.context,
+ common_flags()->fast_unwind_on_fatal);
}
-void LsanOnDeadlySignal(int signo, void *siginfo, void *context) {
+static void LsanOnDeadlySignal(int signo, void *siginfo, void *context) {
HandleDeadlySignal(siginfo, context, GetCurrentThread(), &OnStackUnwind,
nullptr);
}
diff --git a/libsanitizer/lsan/lsan.h b/libsanitizer/lsan/lsan.h
index 42d4214..a40493c 100644
--- a/libsanitizer/lsan/lsan.h
+++ b/libsanitizer/lsan/lsan.h
@@ -16,9 +16,8 @@
#define GET_STACK_TRACE(max_size, fast) \
__sanitizer::BufferedStackTrace stack; \
- GetStackTraceWithPcBpAndContext(&stack, max_size, \
- StackTrace::GetCurrentPc(), \
- GET_CURRENT_FRAME(), nullptr, fast);
+ GetStackTrace(&stack, max_size, StackTrace::GetCurrentPc(), \
+ GET_CURRENT_FRAME(), nullptr, fast);
#define GET_STACK_TRACE_FATAL \
GET_STACK_TRACE(kStackTraceMax, common_flags()->fast_unwind_on_fatal)
@@ -44,10 +43,9 @@ void ReplaceSystemMalloc();
// The pc will be in the position 0 of the resulting stack trace.
// The bp may refer to the current frame or to the caller's frame.
ALWAYS_INLINE
-void GetStackTraceWithPcBpAndContext(__sanitizer::BufferedStackTrace *stack,
- __sanitizer::uptr max_depth,
- __sanitizer::uptr pc, __sanitizer::uptr bp,
- void *context, bool fast) {
+void GetStackTrace(__sanitizer::BufferedStackTrace *stack,
+ __sanitizer::uptr max_depth, __sanitizer::uptr pc,
+ __sanitizer::uptr bp, void *context, bool fast) {
uptr stack_top = 0, stack_bottom = 0;
ThreadContext *t;
if (fast && (t = CurrentThreadContext())) {
diff --git a/libsanitizer/lsan/lsan_allocator.cc b/libsanitizer/lsan/lsan_allocator.cc
index 9e16680..6b57c50 100644
--- a/libsanitizer/lsan/lsan_allocator.cc
+++ b/libsanitizer/lsan/lsan_allocator.cc
@@ -15,6 +15,7 @@
#include "sanitizer_common/sanitizer_allocator.h"
#include "sanitizer_common/sanitizer_allocator_checks.h"
#include "sanitizer_common/sanitizer_allocator_interface.h"
+#include "sanitizer_common/sanitizer_allocator_report.h"
#include "sanitizer_common/sanitizer_errno.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
@@ -68,15 +69,27 @@ static void RegisterDeallocation(void *p) {
atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 0, memory_order_relaxed);
}
+static void *ReportAllocationSizeTooBig(uptr size, const StackTrace &stack) {
+ if (AllocatorMayReturnNull()) {
+ Report("WARNING: LeakSanitizer failed to allocate 0x%zx bytes\n", size);
+ return nullptr;
+ }
+ ReportAllocationSizeTooBig(size, kMaxAllowedMallocSize, &stack);
+}
+
void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
bool cleared) {
if (size == 0)
size = 1;
- if (size > kMaxAllowedMallocSize) {
- Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", size);
- return Allocator::FailureHandler::OnBadRequest();
- }
+ if (size > kMaxAllowedMallocSize)
+ return ReportAllocationSizeTooBig(size, stack);
void *p = allocator.Allocate(GetAllocatorCache(), size, alignment);
+ if (UNLIKELY(!p)) {
+ SetAllocatorOutOfMemory();
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ ReportOutOfMemory(size, &stack);
+ }
// Do not rely on the allocator to clear the memory (it's slow).
if (cleared && allocator.FromPrimary(p))
memset(p, 0, size);
@@ -87,8 +100,11 @@ void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
}
static void *Calloc(uptr nmemb, uptr size, const StackTrace &stack) {
- if (UNLIKELY(CheckForCallocOverflow(size, nmemb)))
- return Allocator::FailureHandler::OnBadRequest();
+ if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ ReportCallocOverflow(nmemb, size, &stack);
+ }
size *= nmemb;
return Allocate(stack, size, 1, true);
}
@@ -104,9 +120,8 @@ void *Reallocate(const StackTrace &stack, void *p, uptr new_size,
uptr alignment) {
RegisterDeallocation(p);
if (new_size > kMaxAllowedMallocSize) {
- Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", new_size);
allocator.Deallocate(GetAllocatorCache(), p);
- return Allocator::FailureHandler::OnBadRequest();
+ return ReportAllocationSizeTooBig(new_size, stack);
}
p = allocator.Reallocate(GetAllocatorCache(), p, new_size, alignment);
RegisterAllocation(stack, p, new_size);
@@ -124,10 +139,38 @@ uptr GetMallocUsableSize(const void *p) {
return m->requested_size;
}
+int lsan_posix_memalign(void **memptr, uptr alignment, uptr size,
+ const StackTrace &stack) {
+ if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
+ if (AllocatorMayReturnNull())
+ return errno_EINVAL;
+ ReportInvalidPosixMemalignAlignment(alignment, &stack);
+ }
+ void *ptr = Allocate(stack, size, alignment, kAlwaysClearMemory);
+ if (UNLIKELY(!ptr))
+ // OOM error is already taken care of by Allocate.
+ return errno_ENOMEM;
+ CHECK(IsAligned((uptr)ptr, alignment));
+ *memptr = ptr;
+ return 0;
+}
+
+void *lsan_aligned_alloc(uptr alignment, uptr size, const StackTrace &stack) {
+ if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
+ errno = errno_EINVAL;
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ ReportInvalidAlignedAllocAlignment(size, alignment, &stack);
+ }
+ return SetErrnoOnNull(Allocate(stack, size, alignment, kAlwaysClearMemory));
+}
+
void *lsan_memalign(uptr alignment, uptr size, const StackTrace &stack) {
if (UNLIKELY(!IsPowerOfTwo(alignment))) {
errno = errno_EINVAL;
- return Allocator::FailureHandler::OnBadRequest();
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ ReportInvalidAllocationAlignment(alignment, &stack);
}
return SetErrnoOnNull(Allocate(stack, size, alignment, kAlwaysClearMemory));
}
@@ -153,6 +196,19 @@ void *lsan_valloc(uptr size, const StackTrace &stack) {
Allocate(stack, size, GetPageSizeCached(), kAlwaysClearMemory));
}
+void *lsan_pvalloc(uptr size, const StackTrace &stack) {
+ uptr PageSize = GetPageSizeCached();
+ if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
+ errno = errno_ENOMEM;
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ ReportPvallocOverflow(size, &stack);
+ }
+ // pvalloc(0) should allocate one page.
+ size = size ? RoundUpTo(size, PageSize) : PageSize;
+ return SetErrnoOnNull(Allocate(stack, size, PageSize, kAlwaysClearMemory));
+}
+
uptr lsan_mz_size(const void *p) {
return GetMallocUsableSize(p);
}
diff --git a/libsanitizer/lsan/lsan_allocator.h b/libsanitizer/lsan/lsan_allocator.h
index b0c0ec2..37260c0 100644
--- a/libsanitizer/lsan/lsan_allocator.h
+++ b/libsanitizer/lsan/lsan_allocator.h
@@ -66,9 +66,16 @@ struct AP32 {
};
typedef SizeClassAllocator32<AP32> PrimaryAllocator;
#elif defined(__x86_64__) || defined(__powerpc64__)
+# if defined(__powerpc64__)
+const uptr kAllocatorSpace = 0xa0000000000ULL;
+const uptr kAllocatorSize = 0x20000000000ULL; // 2T.
+# else
+const uptr kAllocatorSpace = 0x600000000000ULL;
+const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
+# endif
struct AP64 { // Allocator64 parameters. Deliberately using a short name.
- static const uptr kSpaceBeg = 0x600000000000ULL;
- static const uptr kSpaceSize = 0x40000000000ULL; // 4T.
+ static const uptr kSpaceBeg = kAllocatorSpace;
+ static const uptr kSpaceSize = kAllocatorSize;
static const uptr kMetadataSize = sizeof(ChunkMetadata);
typedef DefaultSizeClassMap SizeClassMap;
typedef NoOpMapUnmapCallback MapUnmapCallback;
@@ -81,12 +88,16 @@ typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
AllocatorCache *GetAllocatorCache();
+int lsan_posix_memalign(void **memptr, uptr alignment, uptr size,
+ const StackTrace &stack);
+void *lsan_aligned_alloc(uptr alignment, uptr size, const StackTrace &stack);
void *lsan_memalign(uptr alignment, uptr size, const StackTrace &stack);
void *lsan_malloc(uptr size, const StackTrace &stack);
void lsan_free(void *p);
void *lsan_realloc(void *p, uptr size, const StackTrace &stack);
void *lsan_calloc(uptr nmemb, uptr size, const StackTrace &stack);
void *lsan_valloc(uptr size, const StackTrace &stack);
+void *lsan_pvalloc(uptr size, const StackTrace &stack);
uptr lsan_mz_size(const void *p);
} // namespace __lsan
diff --git a/libsanitizer/lsan/lsan_common.cc b/libsanitizer/lsan/lsan_common.cc
index 4afce9d..a4424a8 100644
--- a/libsanitizer/lsan/lsan_common.cc
+++ b/libsanitizer/lsan/lsan_common.cc
@@ -13,14 +13,15 @@
#include "lsan_common.h"
#include "sanitizer_common/sanitizer_common.h"
-#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_flag_parser.h"
+#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_procmaps.h"
+#include "sanitizer_common/sanitizer_report_decorator.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
#include "sanitizer_common/sanitizer_suppressions.h"
-#include "sanitizer_common/sanitizer_report_decorator.h"
+#include "sanitizer_common/sanitizer_thread_registry.h"
#include "sanitizer_common/sanitizer_tls_get_addr.h"
#if CAN_SANITIZE_LEAKS
@@ -102,7 +103,7 @@ InternalMmapVector<RootRegion> const *GetRootRegions() { return root_regions; }
void InitializeRootRegions() {
CHECK(!root_regions);
ALIGNED(64) static char placeholder[sizeof(InternalMmapVector<RootRegion>)];
- root_regions = new(placeholder) InternalMmapVector<RootRegion>(1);
+ root_regions = new (placeholder) InternalMmapVector<RootRegion>(); // NOLINT
}
const char *MaybeCallLsanDefaultOptions() {
@@ -212,9 +213,10 @@ void ForEachExtraStackRangeCb(uptr begin, uptr end, void* arg) {
// Scans thread data (stacks and TLS) for heap pointers.
static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
Frontier *frontier) {
- InternalScopedBuffer<uptr> registers(suspended_threads.RegisterCount());
+ InternalMmapVector<uptr> registers(suspended_threads.RegisterCount());
uptr registers_begin = reinterpret_cast<uptr>(registers.data());
- uptr registers_end = registers_begin + registers.size();
+ uptr registers_end =
+ reinterpret_cast<uptr>(registers.data() + registers.size());
for (uptr i = 0; i < suspended_threads.ThreadCount(); i++) {
tid_t os_id = static_cast<tid_t>(suspended_threads.GetThreadID(i));
LOG_THREADS("Processing thread %d.\n", os_id);
@@ -409,8 +411,9 @@ static void MarkInvalidPCCb(uptr chunk, void *arg) {
}
}
-// On Linux, handles dynamically allocated TLS blocks by treating all chunks
-// allocated from ld-linux.so as reachable.
+// On Linux, treats all chunks allocated from ld-linux.so as reachable, which
+// covers dynamically allocated TLS blocks, internal dynamic loader's loaded
+// modules accounting etc.
// Dynamic TLS blocks contain the TLS variables of dynamically loaded modules.
// They are allocated with a __libc_memalign() call in allocate_and_init()
// (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those
@@ -441,7 +444,7 @@ void ProcessPC(Frontier *frontier) {
// Sets the appropriate tag on each chunk.
static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) {
// Holds the flood fill frontier.
- Frontier frontier(1);
+ Frontier frontier;
ForEachChunk(CollectIgnoredCb, &frontier);
ProcessGlobalRegions(&frontier);
@@ -503,7 +506,7 @@ static void CollectLeaksCb(uptr chunk, void *arg) {
}
static void PrintMatchedSuppressions() {
- InternalMmapVector<Suppression *> matched(1);
+ InternalMmapVector<Suppression *> matched;
GetSuppressionContext()->GetMatched(&matched);
if (!matched.size())
return;
@@ -522,11 +525,36 @@ struct CheckForLeaksParam {
LeakReport leak_report;
};
+static void ReportIfNotSuspended(ThreadContextBase *tctx, void *arg) {
+ const InternalMmapVector<tid_t> &suspended_threads =
+ *(const InternalMmapVector<tid_t> *)arg;
+ if (tctx->status == ThreadStatusRunning) {
+ uptr i = InternalLowerBound(suspended_threads, 0, suspended_threads.size(),
+ tctx->os_id, CompareLess<int>());
+ if (i >= suspended_threads.size() || suspended_threads[i] != tctx->os_id)
+ Report("Running thread %d was not suspended. False leaks are possible.\n",
+ tctx->os_id);
+ };
+}
+
+static void ReportUnsuspendedThreads(
+ const SuspendedThreadsList &suspended_threads) {
+ InternalMmapVector<tid_t> threads(suspended_threads.ThreadCount());
+ for (uptr i = 0; i < suspended_threads.ThreadCount(); ++i)
+ threads[i] = suspended_threads.GetThreadID(i);
+
+ Sort(threads.data(), threads.size());
+
+ GetThreadRegistryLocked()->RunCallbackForEachThreadLocked(
+ &ReportIfNotSuspended, &threads);
+}
+
static void CheckForLeaksCallback(const SuspendedThreadsList &suspended_threads,
void *arg) {
CheckForLeaksParam *param = reinterpret_cast<CheckForLeaksParam *>(arg);
CHECK(param);
CHECK(!param->success);
+ ReportUnsuspendedThreads(suspended_threads);
ClassifyAllChunks(suspended_threads);
ForEachChunk(CollectLeaksCb, &param->leak_report);
// Clean up for subsequent leak checks. This assumes we did not overwrite any
@@ -681,7 +709,7 @@ void LeakReport::ReportTopLeaks(uptr num_leaks_to_report) {
uptr unsuppressed_count = UnsuppressedLeakCount();
if (num_leaks_to_report > 0 && num_leaks_to_report < unsuppressed_count)
Printf("The %zu top leak(s):\n", num_leaks_to_report);
- InternalSort(&leaks_, leaks_.size(), LeakComparator);
+ Sort(leaks_.data(), leaks_.size(), &LeakComparator);
uptr leaks_reported = 0;
for (uptr i = 0; i < leaks_.size(); i++) {
if (leaks_[i].is_suppressed) continue;
diff --git a/libsanitizer/lsan/lsan_common.h b/libsanitizer/lsan/lsan_common.h
index e99cd9e..b82474a 100644
--- a/libsanitizer/lsan/lsan_common.h
+++ b/libsanitizer/lsan/lsan_common.h
@@ -25,9 +25,9 @@
// because of "small" (4 bytes) pointer size that leads to high false negative
// ratio on large leaks. But we still want to have it for some 32 bit arches
// (e.g. x86), see https://github.com/google/sanitizers/issues/403.
-// To enable LeakSanitizer on new architecture, one need to implement
-// internal_clone function as well as (probably) adjust TLS machinery for
-// new architecture inside sanitizer library.
+// To enable LeakSanitizer on a new architecture, one needs to implement the
+// internal_clone function as well as (probably) adjust the TLS machinery for
+// the new architecture inside the sanitizer library.
#if (SANITIZER_LINUX && !SANITIZER_ANDROID || SANITIZER_MAC) && \
(SANITIZER_WORDSIZE == 64) && \
(defined(__x86_64__) || defined(__mips64) || defined(__aarch64__) || \
@@ -45,6 +45,7 @@
namespace __sanitizer {
class FlagParser;
+class ThreadRegistry;
struct DTLS;
}
@@ -93,7 +94,7 @@ struct LeakedObject {
// Aggregates leaks by stack trace prefix.
class LeakReport {
public:
- LeakReport() : next_id_(0), leaks_(1), leaked_objects_(1) {}
+ LeakReport() {}
void AddLeakedChunk(uptr chunk, u32 stack_trace_id, uptr leaked_size,
ChunkTag tag);
void ReportTopLeaks(uptr max_leaks);
@@ -101,12 +102,11 @@ class LeakReport {
void ApplySuppressions();
uptr UnsuppressedLeakCount();
-
private:
void PrintReportForLeak(uptr index);
void PrintLeakedObjectsForLeak(uptr index);
- u32 next_id_;
+ u32 next_id_ = 0;
InternalMmapVector<Leak> leaks_;
InternalMmapVector<LeakedObject> leaked_objects_;
};
@@ -203,6 +203,7 @@ bool WordIsPoisoned(uptr addr);
// Wrappers for ThreadRegistry access.
void LockThreadRegistry();
void UnlockThreadRegistry();
+ThreadRegistry *GetThreadRegistryLocked();
bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end,
uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
uptr *cache_end, DTLS **dtls);
diff --git a/libsanitizer/lsan/lsan_common_linux.cc b/libsanitizer/lsan/lsan_common_linux.cc
index 6777272..cffbfc9 100644
--- a/libsanitizer/lsan/lsan_common_linux.cc
+++ b/libsanitizer/lsan/lsan_common_linux.cc
@@ -18,6 +18,7 @@
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_getauxval.h"
#include "sanitizer_common/sanitizer_linux.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
@@ -28,8 +29,12 @@ static const char kLinkerName[] = "ld";
static char linker_placeholder[sizeof(LoadedModule)] ALIGNED(64);
static LoadedModule *linker = nullptr;
-static bool IsLinker(const char* full_name) {
- return LibraryNameIs(full_name, kLinkerName);
+static bool IsLinker(const LoadedModule& module) {
+#if SANITIZER_USE_GETAUXVAL
+ return module.base_address() == getauxval(AT_BASE);
+#else
+ return LibraryNameIs(module.full_name(), kLinkerName);
+#endif // SANITIZER_USE_GETAUXVAL
}
__attribute__((tls_model("initial-exec")))
@@ -47,22 +52,25 @@ void InitializePlatformSpecificModules() {
ListOfModules modules;
modules.init();
for (LoadedModule &module : modules) {
- if (!IsLinker(module.full_name())) continue;
+ if (!IsLinker(module))
+ continue;
if (linker == nullptr) {
linker = reinterpret_cast<LoadedModule *>(linker_placeholder);
*linker = module;
module = LoadedModule();
} else {
VReport(1, "LeakSanitizer: Multiple modules match \"%s\". "
- "TLS will not be handled correctly.\n", kLinkerName);
+ "TLS and other allocations originating from linker might be "
+ "falsely reported as leaks.\n", kLinkerName);
linker->clear();
linker = nullptr;
return;
}
}
if (linker == nullptr) {
- VReport(1, "LeakSanitizer: Dynamic linker not found. "
- "TLS will not be handled correctly.\n");
+ VReport(1, "LeakSanitizer: Dynamic linker not found. TLS and other "
+ "allocations originating from linker might be falsely reported "
+ "as leaks.\n");
}
}
diff --git a/libsanitizer/lsan/lsan_common_mac.cc b/libsanitizer/lsan/lsan_common_mac.cc
index 6e763da..8337cd2 100644
--- a/libsanitizer/lsan/lsan_common_mac.cc
+++ b/libsanitizer/lsan/lsan_common_mac.cc
@@ -117,7 +117,8 @@ void ProcessGlobalRegions(Frontier *frontier) {
for (auto name : kSkippedSecNames) CHECK(ARRAY_SIZE(name) < kMaxSegName);
MemoryMappingLayout memory_mapping(false);
- InternalMmapVector<LoadedModule> modules(/*initial_capacity*/ 128);
+ InternalMmapVector<LoadedModule> modules;
+ modules.reserve(128);
memory_mapping.DumpListOfModules(&modules);
for (uptr i = 0; i < modules.size(); ++i) {
// Even when global scanning is disabled, we still need to scan
@@ -139,12 +140,6 @@ void ProcessGlobalRegions(Frontier *frontier) {
}
void ProcessPlatformSpecificAllocations(Frontier *frontier) {
- mach_port_name_t port;
- if (task_for_pid(mach_task_self(), internal_getpid(), &port)
- != KERN_SUCCESS) {
- return;
- }
-
unsigned depth = 1;
vm_size_t size = 0;
vm_address_t address = 0;
@@ -155,7 +150,7 @@ void ProcessPlatformSpecificAllocations(Frontier *frontier) {
while (err == KERN_SUCCESS) {
struct vm_region_submap_info_64 info;
- err = vm_region_recurse_64(port, &address, &size, &depth,
+ err = vm_region_recurse_64(mach_task_self(), &address, &size, &depth,
(vm_region_info_t)&info, &count);
uptr end_address = address + size;
diff --git a/libsanitizer/lsan/lsan_interceptors.cc b/libsanitizer/lsan/lsan_interceptors.cc
index c9279aa..7c594e5 100644
--- a/libsanitizer/lsan/lsan_interceptors.cc
+++ b/libsanitizer/lsan/lsan_interceptors.cc
@@ -12,6 +12,7 @@
#include "interception/interception.h"
#include "sanitizer_common/sanitizer_allocator.h"
+#include "sanitizer_common/sanitizer_allocator_report.h"
#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_flags.h"
@@ -84,9 +85,7 @@ INTERCEPTOR(void*, realloc, void *q, uptr size) {
INTERCEPTOR(int, posix_memalign, void **memptr, uptr alignment, uptr size) {
ENSURE_LSAN_INITED;
GET_STACK_TRACE_MALLOC;
- *memptr = lsan_memalign(alignment, size, stack);
- // FIXME: Return ENOMEM if user requested more than max alloc size.
- return 0;
+ return lsan_posix_memalign(memptr, alignment, size, stack);
}
INTERCEPTOR(void*, valloc, uptr size) {
@@ -121,7 +120,7 @@ INTERCEPTOR(void *, __libc_memalign, uptr alignment, uptr size) {
INTERCEPTOR(void*, aligned_alloc, uptr alignment, uptr size) {
ENSURE_LSAN_INITED;
GET_STACK_TRACE_MALLOC;
- return lsan_memalign(alignment, size, stack);
+ return lsan_aligned_alloc(alignment, size, stack);
}
#define LSAN_MAYBE_INTERCEPT_ALIGNED_ALLOC INTERCEPT_FUNCTION(aligned_alloc)
#else
@@ -164,13 +163,7 @@ INTERCEPTOR(int, mallopt, int cmd, int value) {
INTERCEPTOR(void*, pvalloc, uptr size) {
ENSURE_LSAN_INITED;
GET_STACK_TRACE_MALLOC;
- uptr PageSize = GetPageSizeCached();
- size = RoundUpTo(size, PageSize);
- if (size == 0) {
- // pvalloc(0) should allocate one page.
- size = PageSize;
- }
- return Allocate(stack, size, GetPageSizeCached(), kAlwaysClearMemory);
+ return lsan_pvalloc(size, stack);
}
#define LSAN_MAYBE_INTERCEPT_PVALLOC INTERCEPT_FUNCTION(pvalloc)
#else
@@ -200,21 +193,21 @@ INTERCEPTOR(int, mprobe, void *ptr) {
// TODO(alekseys): throw std::bad_alloc instead of dying on OOM.
-#define OPERATOR_NEW_BODY(nothrow) \
- ENSURE_LSAN_INITED; \
- GET_STACK_TRACE_MALLOC; \
- void *res = lsan_malloc(size, stack); \
- if (!nothrow && UNLIKELY(!res)) DieOnFailure::OnOOM(); \
+#define OPERATOR_NEW_BODY(nothrow)\
+ ENSURE_LSAN_INITED;\
+ GET_STACK_TRACE_MALLOC;\
+ void *res = lsan_malloc(size, stack);\
+ if (!nothrow && UNLIKELY(!res)) ReportOutOfMemory(size, &stack);\
return res;
-#define OPERATOR_NEW_BODY_ALIGN(nothrow) \
- ENSURE_LSAN_INITED; \
- GET_STACK_TRACE_MALLOC; \
- void *res = lsan_memalign((uptr)align, size, stack); \
- if (!nothrow && UNLIKELY(!res)) DieOnFailure::OnOOM(); \
+#define OPERATOR_NEW_BODY_ALIGN(nothrow)\
+ ENSURE_LSAN_INITED;\
+ GET_STACK_TRACE_MALLOC;\
+ void *res = lsan_memalign((uptr)align, size, stack);\
+ if (!nothrow && UNLIKELY(!res)) ReportOutOfMemory(size, &stack);\
return res;
-#define OPERATOR_DELETE_BODY \
- ENSURE_LSAN_INITED; \
+#define OPERATOR_DELETE_BODY\
+ ENSURE_LSAN_INITED;\
lsan_free(ptr);
// On OS X it's not enough to just provide our own 'operator new' and
@@ -307,6 +300,7 @@ INTERCEPTOR(void, _ZdaPvRKSt9nothrow_t, void *ptr, std::nothrow_t const&)
///// Thread initialization and finalization. /////
+#if !SANITIZER_NETBSD && !SANITIZER_FREEBSD
static unsigned g_thread_finalize_key;
static void thread_finalize(void *v) {
@@ -320,6 +314,29 @@ static void thread_finalize(void *v) {
}
ThreadFinish();
}
+#endif
+
+#if SANITIZER_NETBSD
+INTERCEPTOR(void, _lwp_exit) {
+ ENSURE_LSAN_INITED;
+ ThreadFinish();
+ REAL(_lwp_exit)();
+}
+#define LSAN_MAYBE_INTERCEPT__LWP_EXIT INTERCEPT_FUNCTION(_lwp_exit)
+#else
+#define LSAN_MAYBE_INTERCEPT__LWP_EXIT
+#endif
+
+#if SANITIZER_INTERCEPT_THR_EXIT
+INTERCEPTOR(void, thr_exit, tid_t *state) {
+ ENSURE_LSAN_INITED;
+ ThreadFinish();
+ REAL(thr_exit)(state);
+}
+#define LSAN_MAYBE_INTERCEPT_THR_EXIT INTERCEPT_FUNCTION(thr_exit)
+#else
+#define LSAN_MAYBE_INTERCEPT_THR_EXIT
+#endif
struct ThreadParam {
void *(*callback)(void *arg);
@@ -333,11 +350,13 @@ extern "C" void *__lsan_thread_start_func(void *arg) {
void *param = p->param;
// Wait until the last iteration to maximize the chance that we are the last
// destructor to run.
+#if !SANITIZER_NETBSD && !SANITIZER_FREEBSD
if (pthread_setspecific(g_thread_finalize_key,
(void*)GetPthreadDestructorIterations())) {
Report("LeakSanitizer: failed to set thread key.\n");
Die();
}
+#endif
int tid = 0;
while ((tid = atomic_load(&p->tid, memory_order_acquire)) == 0)
internal_sched_yield();
@@ -425,10 +444,15 @@ void InitializeInterceptors() {
INTERCEPT_FUNCTION(pthread_join);
INTERCEPT_FUNCTION(_exit);
+ LSAN_MAYBE_INTERCEPT__LWP_EXIT;
+ LSAN_MAYBE_INTERCEPT_THR_EXIT;
+
+#if !SANITIZER_NETBSD && !SANITIZER_FREEBSD
if (pthread_key_create(&g_thread_finalize_key, &thread_finalize)) {
Report("LeakSanitizer: failed to create thread key.\n");
Die();
}
+#endif
}
} // namespace __lsan
diff --git a/libsanitizer/lsan/lsan_malloc_mac.cc b/libsanitizer/lsan/lsan_malloc_mac.cc
index 2d810af..2458b50 100644
--- a/libsanitizer/lsan/lsan_malloc_mac.cc
+++ b/libsanitizer/lsan/lsan_malloc_mac.cc
@@ -35,6 +35,9 @@ using namespace __lsan;
#define COMMON_MALLOC_CALLOC(count, size) \
GET_STACK_TRACE_MALLOC; \
void *p = lsan_calloc(count, size, stack)
+#define COMMON_MALLOC_POSIX_MEMALIGN(memptr, alignment, size) \
+ GET_STACK_TRACE_MALLOC; \
+ int res = lsan_posix_memalign(memptr, alignment, size, stack)
#define COMMON_MALLOC_VALLOC(size) \
GET_STACK_TRACE_MALLOC; \
void *p = lsan_valloc(size, stack)
diff --git a/libsanitizer/lsan/lsan_thread.cc b/libsanitizer/lsan/lsan_thread.cc
index e03e876..388990b 100644
--- a/libsanitizer/lsan/lsan_thread.cc
+++ b/libsanitizer/lsan/lsan_thread.cc
@@ -153,4 +153,9 @@ void UnlockThreadRegistry() {
thread_registry->Unlock();
}
+ThreadRegistry *GetThreadRegistryLocked() {
+ thread_registry->CheckLocked();
+ return thread_registry;
+}
+
} // namespace __lsan