aboutsummaryrefslogtreecommitdiff
path: root/libsanitizer/lsan
diff options
context:
space:
mode:
authorKostya Serebryany <kcc@google.com>2014-05-22 07:09:21 +0000
committerKostya Serebryany <kcc@gcc.gnu.org>2014-05-22 07:09:21 +0000
commitdee5ea7a0bfe95367820a443ef4a7c813e598b55 (patch)
treef90afdf42b3ae78508a5c6422f458a5bb0216aa2 /libsanitizer/lsan
parentb95591361e32a755231d99c348f8a43e2aed0187 (diff)
downloadgcc-dee5ea7a0bfe95367820a443ef4a7c813e598b55.zip
gcc-dee5ea7a0bfe95367820a443ef4a7c813e598b55.tar.gz
gcc-dee5ea7a0bfe95367820a443ef4a7c813e598b55.tar.bz2
libsanitizer merge from upstream r209283
From-SVN: r210743
Diffstat (limited to 'libsanitizer/lsan')
-rw-r--r--libsanitizer/lsan/lsan.cc12
-rw-r--r--libsanitizer/lsan/lsan_allocator.cc6
-rw-r--r--libsanitizer/lsan/lsan_common.cc350
-rw-r--r--libsanitizer/lsan/lsan_common.h47
-rw-r--r--libsanitizer/lsan/lsan_common_linux.cc25
-rw-r--r--libsanitizer/lsan/lsan_interceptors.cc39
6 files changed, 328 insertions, 151 deletions
diff --git a/libsanitizer/lsan/lsan.cc b/libsanitizer/lsan/lsan.cc
index 270979a..c1481f5 100644
--- a/libsanitizer/lsan/lsan.cc
+++ b/libsanitizer/lsan/lsan.cc
@@ -33,6 +33,11 @@ static void InitializeCommonFlags() {
ParseCommonFlagsFromString(cf, GetEnv("LSAN_OPTIONS"));
}
+///// Interface to the common LSan module. /////
+bool WordIsPoisoned(uptr addr) {
+ return false;
+}
+
} // namespace __lsan
using namespace __lsan; // NOLINT
@@ -53,12 +58,7 @@ extern "C" void __lsan_init() {
ThreadStart(tid, GetTid());
SetCurrentThread(tid);
- // Start symbolizer process if necessary.
- if (common_flags()->symbolize) {
- Symbolizer::Init(common_flags()->external_symbolizer_path);
- } else {
- Symbolizer::Disable();
- }
+ Symbolizer::Init(common_flags()->external_symbolizer_path);
InitCommonLsan();
if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit)
diff --git a/libsanitizer/lsan/lsan_allocator.cc b/libsanitizer/lsan/lsan_allocator.cc
index ce47dfc..3e81ebe 100644
--- a/libsanitizer/lsan/lsan_allocator.cc
+++ b/libsanitizer/lsan/lsan_allocator.cc
@@ -141,7 +141,11 @@ uptr PointsIntoChunk(void* p) {
if (addr < chunk) return 0;
ChunkMetadata *m = Metadata(reinterpret_cast<void *>(chunk));
CHECK(m);
- if (m->allocated && addr < chunk + m->requested_size)
+ if (!m->allocated)
+ return 0;
+ if (addr < chunk + m->requested_size)
+ return chunk;
+ if (IsSpecialCaseOfOperatorNew0(chunk, m->requested_size, addr))
return chunk;
return 0;
}
diff --git a/libsanitizer/lsan/lsan_common.cc b/libsanitizer/lsan/lsan_common.cc
index bbc5b5f..78afa77 100644
--- a/libsanitizer/lsan/lsan_common.cc
+++ b/libsanitizer/lsan/lsan_common.cc
@@ -15,6 +15,7 @@
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_placement_new.h"
+#include "sanitizer_common/sanitizer_procmaps.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
#include "sanitizer_common/sanitizer_stoptheworld.h"
@@ -24,7 +25,8 @@
#if CAN_SANITIZE_LEAKS
namespace __lsan {
-// This mutex is used to prevent races between DoLeakCheck and IgnoreObject.
+// This mutex is used to prevent races between DoLeakCheck and IgnoreObject, and
+// also to protect the global list of root regions.
BlockingMutex global_mutex(LINKER_INITIALIZED);
THREADLOCAL int disable_counter;
@@ -39,42 +41,56 @@ static void InitializeFlags() {
f->resolution = 0;
f->max_leaks = 0;
f->exitcode = 23;
+ f->print_suppressions = true;
f->suppressions="";
f->use_registers = true;
f->use_globals = true;
f->use_stacks = true;
f->use_tls = true;
+ f->use_root_regions = true;
f->use_unaligned = false;
- f->verbosity = 0;
+ f->use_poisoned = false;
f->log_pointers = false;
f->log_threads = false;
const char *options = GetEnv("LSAN_OPTIONS");
if (options) {
- ParseFlag(options, &f->use_registers, "use_registers");
- ParseFlag(options, &f->use_globals, "use_globals");
- ParseFlag(options, &f->use_stacks, "use_stacks");
- ParseFlag(options, &f->use_tls, "use_tls");
- ParseFlag(options, &f->use_unaligned, "use_unaligned");
- ParseFlag(options, &f->report_objects, "report_objects");
- ParseFlag(options, &f->resolution, "resolution");
+ ParseFlag(options, &f->use_registers, "use_registers", "");
+ ParseFlag(options, &f->use_globals, "use_globals", "");
+ ParseFlag(options, &f->use_stacks, "use_stacks", "");
+ ParseFlag(options, &f->use_tls, "use_tls", "");
+ ParseFlag(options, &f->use_root_regions, "use_root_regions", "");
+ ParseFlag(options, &f->use_unaligned, "use_unaligned", "");
+ ParseFlag(options, &f->use_poisoned, "use_poisoned", "");
+ ParseFlag(options, &f->report_objects, "report_objects", "");
+ ParseFlag(options, &f->resolution, "resolution", "");
CHECK_GE(&f->resolution, 0);
- ParseFlag(options, &f->max_leaks, "max_leaks");
+ ParseFlag(options, &f->max_leaks, "max_leaks", "");
CHECK_GE(&f->max_leaks, 0);
- ParseFlag(options, &f->verbosity, "verbosity");
- ParseFlag(options, &f->log_pointers, "log_pointers");
- ParseFlag(options, &f->log_threads, "log_threads");
- ParseFlag(options, &f->exitcode, "exitcode");
- ParseFlag(options, &f->suppressions, "suppressions");
+ ParseFlag(options, &f->log_pointers, "log_pointers", "");
+ ParseFlag(options, &f->log_threads, "log_threads", "");
+ ParseFlag(options, &f->exitcode, "exitcode", "");
+ ParseFlag(options, &f->print_suppressions, "print_suppressions", "");
+ ParseFlag(options, &f->suppressions, "suppressions", "");
}
}
+#define LOG_POINTERS(...) \
+ do { \
+ if (flags()->log_pointers) Report(__VA_ARGS__); \
+ } while (0);
+
+#define LOG_THREADS(...) \
+ do { \
+ if (flags()->log_threads) Report(__VA_ARGS__); \
+ } while (0);
+
SuppressionContext *suppression_ctx;
void InitializeSuppressions() {
CHECK(!suppression_ctx);
- ALIGNED(64) static char placeholder_[sizeof(SuppressionContext)];
- suppression_ctx = new(placeholder_) SuppressionContext;
+ ALIGNED(64) static char placeholder[sizeof(SuppressionContext)];
+ suppression_ctx = new(placeholder) SuppressionContext;
char *suppressions_from_file;
uptr buffer_size;
if (ReadFileToBuffer(flags()->suppressions, &suppressions_from_file,
@@ -89,8 +105,22 @@ void InitializeSuppressions() {
suppression_ctx->Parse(__lsan_default_suppressions());
}
+struct RootRegion {
+ const void *begin;
+ uptr size;
+};
+
+InternalMmapVector<RootRegion> *root_regions;
+
+void InitializeRootRegions() {
+ CHECK(!root_regions);
+ ALIGNED(64) static char placeholder[sizeof(InternalMmapVector<RootRegion>)];
+ root_regions = new(placeholder) InternalMmapVector<RootRegion>(1);
+}
+
void InitCommonLsan() {
InitializeFlags();
+ InitializeRootRegions();
if (common_flags()->detect_leaks) {
// Initialization which can fail or print warnings should only be done if
// LSan is actually enabled.
@@ -130,8 +160,7 @@ void ScanRangeForPointers(uptr begin, uptr end,
Frontier *frontier,
const char *region_type, ChunkTag tag) {
const uptr alignment = flags()->pointer_alignment();
- if (flags()->log_pointers)
- Report("Scanning %s range %p-%p.\n", region_type, begin, end);
+ LOG_POINTERS("Scanning %s range %p-%p.\n", region_type, begin, end);
uptr pp = begin;
if (pp % alignment)
pp = pp + alignment - pp % alignment;
@@ -146,10 +175,19 @@ void ScanRangeForPointers(uptr begin, uptr end,
// Reachable beats ignored beats leaked.
if (m.tag() == kReachable) continue;
if (m.tag() == kIgnored && tag != kReachable) continue;
+
+ // Do this check relatively late so we can log only the interesting cases.
+ if (!flags()->use_poisoned && WordIsPoisoned(pp)) {
+ LOG_POINTERS(
+ "%p is poisoned: ignoring %p pointing into chunk %p-%p of size "
+ "%zu.\n",
+ pp, p, chunk, chunk + m.requested_size(), m.requested_size());
+ continue;
+ }
+
m.set_tag(tag);
- if (flags()->log_pointers)
- Report("%p: found %p pointing into chunk %p-%p of size %zu.\n", pp, p,
- chunk, chunk + m.requested_size(), m.requested_size());
+ LOG_POINTERS("%p: found %p pointing into chunk %p-%p of size %zu.\n", pp, p,
+ chunk, chunk + m.requested_size(), m.requested_size());
if (frontier)
frontier->push_back(chunk);
}
@@ -168,7 +206,7 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
uptr registers_end = registers_begin + registers.size();
for (uptr i = 0; i < suspended_threads.thread_count(); i++) {
uptr os_id = static_cast<uptr>(suspended_threads.GetThreadID(i));
- if (flags()->log_threads) Report("Processing thread %d.\n", os_id);
+ LOG_THREADS("Processing thread %d.\n", os_id);
uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end;
bool thread_found = GetThreadRangesLocked(os_id, &stack_begin, &stack_end,
&tls_begin, &tls_end,
@@ -176,8 +214,7 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
if (!thread_found) {
// If a thread can't be found in the thread registry, it's probably in the
// process of destruction. Log this event and move on.
- if (flags()->log_threads)
- Report("Thread %d not found in registry.\n", os_id);
+ LOG_THREADS("Thread %d not found in registry.\n", os_id);
continue;
}
uptr sp;
@@ -194,14 +231,12 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
"REGISTERS", kReachable);
if (flags()->use_stacks) {
- if (flags()->log_threads)
- Report("Stack at %p-%p, SP = %p.\n", stack_begin, stack_end, sp);
+ LOG_THREADS("Stack at %p-%p (SP = %p).\n", stack_begin, stack_end, sp);
if (sp < stack_begin || sp >= stack_end) {
// SP is outside the recorded stack range (e.g. the thread is running a
// signal handler on alternate stack). Again, consider the entire stack
// range to be reachable.
- if (flags()->log_threads)
- Report("WARNING: stack pointer not in stack range.\n");
+ LOG_THREADS("WARNING: stack pointer not in stack range.\n");
} else {
// Shrink the stack range to ignore out-of-scope values.
stack_begin = sp;
@@ -212,7 +247,7 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
}
if (flags()->use_tls) {
- if (flags()->log_threads) Report("TLS at %p-%p.\n", tls_begin, tls_end);
+ LOG_THREADS("TLS at %p-%p.\n", tls_begin, tls_end);
if (cache_begin == cache_end) {
ScanRangeForPointers(tls_begin, tls_end, frontier, "TLS", kReachable);
} else {
@@ -230,6 +265,37 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
}
}
+static void ProcessRootRegion(Frontier *frontier, uptr root_begin,
+ uptr root_end) {
+ MemoryMappingLayout proc_maps(/*cache_enabled*/true);
+ uptr begin, end, prot;
+ while (proc_maps.Next(&begin, &end,
+ /*offset*/ 0, /*filename*/ 0, /*filename_size*/ 0,
+ &prot)) {
+ uptr intersection_begin = Max(root_begin, begin);
+ uptr intersection_end = Min(end, root_end);
+ if (intersection_begin >= intersection_end) continue;
+ bool is_readable = prot & MemoryMappingLayout::kProtectionRead;
+ LOG_POINTERS("Root region %p-%p intersects with mapped region %p-%p (%s)\n",
+ root_begin, root_end, begin, end,
+ is_readable ? "readable" : "unreadable");
+ if (is_readable)
+ ScanRangeForPointers(intersection_begin, intersection_end, frontier,
+ "ROOT", kReachable);
+ }
+}
+
+// Scans root regions for heap pointers.
+static void ProcessRootRegions(Frontier *frontier) {
+ if (!flags()->use_root_regions) return;
+ CHECK(root_regions);
+ for (uptr i = 0; i < root_regions->size(); i++) {
+ RootRegion region = (*root_regions)[i];
+ uptr begin_addr = reinterpret_cast<uptr>(region.begin);
+ ProcessRootRegion(frontier, begin_addr, begin_addr + region.size);
+ }
+}
+
static void FloodFillTag(Frontier *frontier, ChunkTag tag) {
while (frontier->size()) {
uptr next_chunk = frontier->back();
@@ -264,30 +330,27 @@ static void CollectIgnoredCb(uptr chunk, void *arg) {
// Sets the appropriate tag on each chunk.
static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) {
// Holds the flood fill frontier.
- Frontier frontier(GetPageSizeCached());
+ Frontier frontier(1);
- if (flags()->use_globals)
- ProcessGlobalRegions(&frontier);
+ ProcessGlobalRegions(&frontier);
ProcessThreads(suspended_threads, &frontier);
+ ProcessRootRegions(&frontier);
FloodFillTag(&frontier, kReachable);
// The check here is relatively expensive, so we do this in a separate flood
// fill. That way we can skip the check for chunks that are reachable
// otherwise.
- if (flags()->log_pointers)
- Report("Processing platform-specific allocations.\n");
+ LOG_POINTERS("Processing platform-specific allocations.\n");
ProcessPlatformSpecificAllocations(&frontier);
FloodFillTag(&frontier, kReachable);
- if (flags()->log_pointers)
- Report("Scanning ignored chunks.\n");
+ LOG_POINTERS("Scanning ignored chunks.\n");
CHECK_EQ(0, frontier.size());
ForEachChunk(CollectIgnoredCb, &frontier);
FloodFillTag(&frontier, kIgnored);
// Iterate over leaked chunks and mark those that are reachable from other
// leaked chunks.
- if (flags()->log_pointers)
- Report("Scanning leaked chunks.\n");
+ LOG_POINTERS("Scanning leaked chunks.\n");
ForEachChunk(MarkIndirectlyLeakedCb, 0 /* arg */);
}
@@ -298,7 +361,8 @@ static void PrintStackTraceById(u32 stack_trace_id) {
StackTrace::PrintStack(trace, size);
}
-// ForEachChunk callback. Aggregates unreachable chunks into a LeakReport.
+// ForEachChunk callback. Aggregates information about unreachable chunks into
+// a LeakReport.
static void CollectLeaksCb(uptr chunk, void *arg) {
CHECK(arg);
LeakReport *leak_report = reinterpret_cast<LeakReport *>(arg);
@@ -307,26 +371,17 @@ static void CollectLeaksCb(uptr chunk, void *arg) {
if (!m.allocated()) return;
if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) {
uptr resolution = flags()->resolution;
+ u32 stack_trace_id = 0;
if (resolution > 0) {
uptr size = 0;
const uptr *trace = StackDepotGet(m.stack_trace_id(), &size);
size = Min(size, resolution);
- leak_report->Add(StackDepotPut(trace, size), m.requested_size(), m.tag());
+ stack_trace_id = StackDepotPut(trace, size);
} else {
- leak_report->Add(m.stack_trace_id(), m.requested_size(), m.tag());
+ stack_trace_id = m.stack_trace_id();
}
- }
-}
-
-// ForEachChunkCallback. Prints addresses of unreachable chunks.
-static void PrintLeakedCb(uptr chunk, void *arg) {
- chunk = GetUserBegin(chunk);
- LsanMetadata m(chunk);
- if (!m.allocated()) return;
- if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) {
- Printf("%s leaked %zu byte object at %p.\n",
- m.tag() == kDirectlyLeaked ? "Directly" : "Indirectly",
- m.requested_size(), chunk);
+ leak_report->AddLeakedChunk(chunk, stack_trace_id, m.requested_size(),
+ m.tag());
}
}
@@ -345,12 +400,6 @@ static void PrintMatchedSuppressions() {
Printf("%s\n\n", line);
}
-static void PrintLeaked() {
- Printf("\n");
- Printf("Reporting individual objects:\n");
- ForEachChunk(PrintLeakedCb, 0 /* arg */);
-}
-
struct DoLeakCheckParam {
bool success;
LeakReport leak_report;
@@ -361,11 +410,8 @@ static void DoLeakCheckCallback(const SuspendedThreadsList &suspended_threads,
DoLeakCheckParam *param = reinterpret_cast<DoLeakCheckParam *>(arg);
CHECK(param);
CHECK(!param->success);
- CHECK(param->leak_report.IsEmpty());
ClassifyAllChunks(suspended_threads);
ForEachChunk(CollectLeaksCb, &param->leak_report);
- if (!param->leak_report.IsEmpty() && flags()->report_objects)
- PrintLeaked();
param->success = true;
}
@@ -376,7 +422,7 @@ void DoLeakCheck() {
if (already_done) return;
already_done = true;
if (&__lsan_is_turned_off && __lsan_is_turned_off())
- return;
+ return;
DoLeakCheckParam param;
param.success = false;
@@ -390,8 +436,9 @@ void DoLeakCheck() {
Report("LeakSanitizer has encountered a fatal error.\n");
Die();
}
- uptr have_unsuppressed = param.leak_report.ApplySuppressions();
- if (have_unsuppressed) {
+ param.leak_report.ApplySuppressions();
+ uptr unsuppressed_count = param.leak_report.UnsuppressedLeakCount();
+ if (unsuppressed_count > 0) {
Decorator d;
Printf("\n"
"================================================================="
@@ -399,27 +446,37 @@ void DoLeakCheck() {
Printf("%s", d.Error());
Report("ERROR: LeakSanitizer: detected memory leaks\n");
Printf("%s", d.End());
- param.leak_report.PrintLargest(flags()->max_leaks);
+ param.leak_report.ReportTopLeaks(flags()->max_leaks);
}
- if (have_unsuppressed || (flags()->verbosity >= 1)) {
+ if (flags()->print_suppressions)
PrintMatchedSuppressions();
+ if (unsuppressed_count > 0) {
param.leak_report.PrintSummary();
+ if (flags()->exitcode)
+ internal__exit(flags()->exitcode);
}
- if (have_unsuppressed && flags()->exitcode)
- internal__exit(flags()->exitcode);
}
static Suppression *GetSuppressionForAddr(uptr addr) {
+ Suppression *s;
+
+ // Suppress by module name.
+ const char *module_name;
+ uptr module_offset;
+ if (Symbolizer::Get()->GetModuleNameAndOffsetForPC(addr, &module_name,
+ &module_offset) &&
+ suppression_ctx->Match(module_name, SuppressionLeak, &s))
+ return s;
+
+ // Suppress by file or function name.
static const uptr kMaxAddrFrames = 16;
InternalScopedBuffer<AddressInfo> addr_frames(kMaxAddrFrames);
for (uptr i = 0; i < kMaxAddrFrames; i++) new (&addr_frames[i]) AddressInfo();
- uptr addr_frames_num = Symbolizer::Get()->SymbolizeCode(
+ uptr addr_frames_num = Symbolizer::Get()->SymbolizePC(
addr, addr_frames.data(), kMaxAddrFrames);
for (uptr i = 0; i < addr_frames_num; i++) {
- Suppression* s;
if (suppression_ctx->Match(addr_frames[i].function, SuppressionLeak, &s) ||
- suppression_ctx->Match(addr_frames[i].file, SuppressionLeak, &s) ||
- suppression_ctx->Match(addr_frames[i].module, SuppressionLeak, &s))
+ suppression_ctx->Match(addr_frames[i].file, SuppressionLeak, &s))
return s;
}
return 0;
@@ -439,26 +496,35 @@ static Suppression *GetSuppressionForStack(u32 stack_trace_id) {
///// LeakReport implementation. /////
// A hard limit on the number of distinct leaks, to avoid quadratic complexity
-// in LeakReport::Add(). We don't expect to ever see this many leaks in
-// real-world applications.
+// in LeakReport::AddLeakedChunk(). We don't expect to ever see this many leaks
+// in real-world applications.
// FIXME: Get rid of this limit by changing the implementation of LeakReport to
// use a hash table.
const uptr kMaxLeaksConsidered = 5000;
-void LeakReport::Add(u32 stack_trace_id, uptr leaked_size, ChunkTag tag) {
+void LeakReport::AddLeakedChunk(uptr chunk, u32 stack_trace_id,
+ uptr leaked_size, ChunkTag tag) {
CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked);
bool is_directly_leaked = (tag == kDirectlyLeaked);
- for (uptr i = 0; i < leaks_.size(); i++)
+ uptr i;
+ for (i = 0; i < leaks_.size(); i++) {
if (leaks_[i].stack_trace_id == stack_trace_id &&
leaks_[i].is_directly_leaked == is_directly_leaked) {
leaks_[i].hit_count++;
leaks_[i].total_size += leaked_size;
- return;
+ break;
}
- if (leaks_.size() == kMaxLeaksConsidered) return;
- Leak leak = { /* hit_count */ 1, leaked_size, stack_trace_id,
- is_directly_leaked, /* is_suppressed */ false };
- leaks_.push_back(leak);
+ }
+ if (i == leaks_.size()) {
+ if (leaks_.size() == kMaxLeaksConsidered) return;
+ Leak leak = { next_id_++, /* hit_count */ 1, leaked_size, stack_trace_id,
+ is_directly_leaked, /* is_suppressed */ false };
+ leaks_.push_back(leak);
+ }
+ if (flags()->report_objects) {
+ LeakedObject obj = {leaks_[i].id, chunk, leaked_size};
+ leaked_objects_.push_back(obj);
+ }
}
static bool LeakComparator(const Leak &leak1, const Leak &leak2) {
@@ -468,7 +534,7 @@ static bool LeakComparator(const Leak &leak1, const Leak &leak2) {
return leak1.is_directly_leaked;
}
-void LeakReport::PrintLargest(uptr num_leaks_to_print) {
+void LeakReport::ReportTopLeaks(uptr num_leaks_to_report) {
CHECK(leaks_.size() <= kMaxLeaksConsidered);
Printf("\n");
if (leaks_.size() == kMaxLeaksConsidered)
@@ -476,31 +542,49 @@ void LeakReport::PrintLargest(uptr num_leaks_to_print) {
"reported.\n",
kMaxLeaksConsidered);
- uptr unsuppressed_count = 0;
- for (uptr i = 0; i < leaks_.size(); i++)
- if (!leaks_[i].is_suppressed) unsuppressed_count++;
- if (num_leaks_to_print > 0 && num_leaks_to_print < unsuppressed_count)
- Printf("The %zu largest leak(s):\n", num_leaks_to_print);
+ uptr unsuppressed_count = UnsuppressedLeakCount();
+ if (num_leaks_to_report > 0 && num_leaks_to_report < unsuppressed_count)
+ Printf("The %zu top leak(s):\n", num_leaks_to_report);
InternalSort(&leaks_, leaks_.size(), LeakComparator);
- uptr leaks_printed = 0;
- Decorator d;
+ uptr leaks_reported = 0;
for (uptr i = 0; i < leaks_.size(); i++) {
if (leaks_[i].is_suppressed) continue;
- Printf("%s", d.Leak());
- Printf("%s leak of %zu byte(s) in %zu object(s) allocated from:\n",
- leaks_[i].is_directly_leaked ? "Direct" : "Indirect",
- leaks_[i].total_size, leaks_[i].hit_count);
- Printf("%s", d.End());
- PrintStackTraceById(leaks_[i].stack_trace_id);
- leaks_printed++;
- if (leaks_printed == num_leaks_to_print) break;
+ PrintReportForLeak(i);
+ leaks_reported++;
+ if (leaks_reported == num_leaks_to_report) break;
}
- if (leaks_printed < unsuppressed_count) {
- uptr remaining = unsuppressed_count - leaks_printed;
+ if (leaks_reported < unsuppressed_count) {
+ uptr remaining = unsuppressed_count - leaks_reported;
Printf("Omitting %zu more leak(s).\n", remaining);
}
}
+void LeakReport::PrintReportForLeak(uptr index) {
+ Decorator d;
+ Printf("%s", d.Leak());
+ Printf("%s leak of %zu byte(s) in %zu object(s) allocated from:\n",
+ leaks_[index].is_directly_leaked ? "Direct" : "Indirect",
+ leaks_[index].total_size, leaks_[index].hit_count);
+ Printf("%s", d.End());
+
+ PrintStackTraceById(leaks_[index].stack_trace_id);
+
+ if (flags()->report_objects) {
+ Printf("Objects leaked above:\n");
+ PrintLeakedObjectsForLeak(index);
+ Printf("\n");
+ }
+}
+
+void LeakReport::PrintLeakedObjectsForLeak(uptr index) {
+ u32 leak_id = leaks_[index].id;
+ for (uptr j = 0; j < leaked_objects_.size(); j++) {
+ if (leaked_objects_[j].leak_id == leak_id)
+ Printf("%p (%zu bytes)\n", leaked_objects_[j].addr,
+ leaked_objects_[j].size);
+ }
+}
+
void LeakReport::PrintSummary() {
CHECK(leaks_.size() <= kMaxLeaksConsidered);
uptr bytes = 0, allocations = 0;
@@ -516,20 +600,24 @@ void LeakReport::PrintSummary() {
ReportErrorSummary(summary.data());
}
-uptr LeakReport::ApplySuppressions() {
- uptr unsuppressed_count = 0;
+void LeakReport::ApplySuppressions() {
for (uptr i = 0; i < leaks_.size(); i++) {
Suppression *s = GetSuppressionForStack(leaks_[i].stack_trace_id);
if (s) {
s->weight += leaks_[i].total_size;
s->hit_count += leaks_[i].hit_count;
leaks_[i].is_suppressed = true;
- } else {
- unsuppressed_count++;
}
}
- return unsuppressed_count;
}
+
+uptr LeakReport::UnsuppressedLeakCount() {
+ uptr result = 0;
+ for (uptr i = 0; i < leaks_.size(); i++)
+ if (!leaks_[i].is_suppressed) result++;
+ return result;
+}
+
} // namespace __lsan
#endif // CAN_SANITIZE_LEAKS
@@ -545,13 +633,51 @@ void __lsan_ignore_object(const void *p) {
// locked.
BlockingMutexLock l(&global_mutex);
IgnoreObjectResult res = IgnoreObjectLocked(p);
- if (res == kIgnoreObjectInvalid && flags()->verbosity >= 2)
- Report("__lsan_ignore_object(): no heap object found at %p", p);
- if (res == kIgnoreObjectAlreadyIgnored && flags()->verbosity >= 2)
- Report("__lsan_ignore_object(): "
+ if (res == kIgnoreObjectInvalid)
+ VReport(1, "__lsan_ignore_object(): no heap object found at %p", p);
+ if (res == kIgnoreObjectAlreadyIgnored)
+ VReport(1, "__lsan_ignore_object(): "
"heap object at %p is already being ignored\n", p);
- if (res == kIgnoreObjectSuccess && flags()->verbosity >= 3)
- Report("__lsan_ignore_object(): ignoring heap object at %p\n", p);
+ if (res == kIgnoreObjectSuccess)
+ VReport(1, "__lsan_ignore_object(): ignoring heap object at %p\n", p);
+#endif // CAN_SANITIZE_LEAKS
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __lsan_register_root_region(const void *begin, uptr size) {
+#if CAN_SANITIZE_LEAKS
+ BlockingMutexLock l(&global_mutex);
+ CHECK(root_regions);
+ RootRegion region = {begin, size};
+ root_regions->push_back(region);
+ VReport(1, "Registered root region at %p of size %llu\n", begin, size);
+#endif // CAN_SANITIZE_LEAKS
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __lsan_unregister_root_region(const void *begin, uptr size) {
+#if CAN_SANITIZE_LEAKS
+ BlockingMutexLock l(&global_mutex);
+ CHECK(root_regions);
+ bool removed = false;
+ for (uptr i = 0; i < root_regions->size(); i++) {
+ RootRegion region = (*root_regions)[i];
+ if (region.begin == begin && region.size == size) {
+ removed = true;
+ uptr last_index = root_regions->size() - 1;
+ (*root_regions)[i] = (*root_regions)[last_index];
+ root_regions->pop_back();
+ VReport(1, "Unregistered root region at %p of size %llu\n", begin, size);
+ break;
+ }
+ }
+ if (!removed) {
+ Report(
+ "__lsan_unregister_root_region(): region at %p of size %llu has not "
+ "been registered.\n",
+ begin, size);
+ Die();
+ }
#endif // CAN_SANITIZE_LEAKS
}
diff --git a/libsanitizer/lsan/lsan_common.h b/libsanitizer/lsan/lsan_common.h
index 5d9b4eb..0c84d41 100644
--- a/libsanitizer/lsan/lsan_common.h
+++ b/libsanitizer/lsan/lsan_common.h
@@ -19,7 +19,7 @@
#include "sanitizer_common/sanitizer_platform.h"
#include "sanitizer_common/sanitizer_symbolizer.h"
-#if SANITIZER_LINUX && defined(__x86_64__)
+#if SANITIZER_LINUX && defined(__x86_64__) && (SANITIZER_WORDSIZE == 64)
#define CAN_SANITIZE_LEAKS 1
#else
#define CAN_SANITIZE_LEAKS 0
@@ -49,6 +49,8 @@ struct Flags {
int max_leaks;
// If nonzero kill the process with this exit code upon finding leaks.
int exitcode;
+ // Print matched suppressions after leak checking.
+ bool print_suppressions;
// Suppressions file name.
const char* suppressions;
@@ -61,12 +63,13 @@ struct Flags {
bool use_registers;
// TLS and thread-specific storage.
bool use_tls;
+ // Regions added via __lsan_register_root_region().
+ bool use_root_regions;
// Consider unaligned pointers valid.
bool use_unaligned;
-
- // User-visible verbosity.
- int verbosity;
+ // Consider pointers found in poisoned memory to be valid.
+ bool use_poisoned;
// Debug logging.
bool log_pointers;
@@ -77,6 +80,7 @@ extern Flags lsan_flags;
inline Flags *flags() { return &lsan_flags; }
struct Leak {
+ u32 id;
uptr hit_count;
uptr total_size;
u32 stack_trace_id;
@@ -84,17 +88,31 @@ struct Leak {
bool is_suppressed;
};
+struct LeakedObject {
+ u32 leak_id;
+ uptr addr;
+ uptr size;
+};
+
// Aggregates leaks by stack trace prefix.
class LeakReport {
public:
- LeakReport() : leaks_(1) {}
- void Add(u32 stack_trace_id, uptr leaked_size, ChunkTag tag);
- void PrintLargest(uptr max_leaks);
+ LeakReport() : next_id_(0), leaks_(1), leaked_objects_(1) {}
+ void AddLeakedChunk(uptr chunk, u32 stack_trace_id, uptr leaked_size,
+ ChunkTag tag);
+ void ReportTopLeaks(uptr max_leaks);
void PrintSummary();
- bool IsEmpty() { return leaks_.size() == 0; }
- uptr ApplySuppressions();
+ void ApplySuppressions();
+ uptr UnsuppressedLeakCount();
+
+
private:
+ void PrintReportForLeak(uptr index);
+ void PrintLeakedObjectsForLeak(uptr index);
+
+ u32 next_id_;
InternalMmapVector<Leak> leaks_;
+ InternalMmapVector<LeakedObject> leaked_objects_;
};
typedef InternalMmapVector<uptr> Frontier;
@@ -119,6 +137,15 @@ void InitCommonLsan();
void DoLeakCheck();
bool DisabledInThisThread();
+// Special case for "new T[0]" where T is a type with DTOR.
+// new T[0] will allocate one word for the array size (0) and store a pointer
+// to the end of allocated chunk.
+inline bool IsSpecialCaseOfOperatorNew0(uptr chunk_beg, uptr chunk_size,
+ uptr addr) {
+ return chunk_size == sizeof(uptr) && chunk_beg + chunk_size == addr &&
+ *reinterpret_cast<uptr *>(chunk_beg) == 0;
+}
+
// The following must be implemented in the parent tool.
void ForEachChunk(ForEachChunkCallback callback, void *arg);
@@ -127,6 +154,8 @@ void GetAllocatorGlobalRange(uptr *begin, uptr *end);
// Wrappers for allocator's ForceLock()/ForceUnlock().
void LockAllocator();
void UnlockAllocator();
+// Returns true if [addr, addr + sizeof(void *)) is poisoned.
+bool WordIsPoisoned(uptr addr);
// Wrappers for ThreadRegistry access.
void LockThreadRegistry();
void UnlockThreadRegistry();
diff --git a/libsanitizer/lsan/lsan_common_linux.cc b/libsanitizer/lsan/lsan_common_linux.cc
index 80d2459..c318fbc 100644
--- a/libsanitizer/lsan/lsan_common_linux.cc
+++ b/libsanitizer/lsan/lsan_common_linux.cc
@@ -17,6 +17,7 @@
#include <link.h>
#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_linux.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
@@ -41,11 +42,11 @@ void InitializePlatformSpecificModules() {
return;
}
if (num_matches == 0)
- Report("LeakSanitizer: Dynamic linker not found. "
- "TLS will not be handled correctly.\n");
+ VReport(1, "LeakSanitizer: Dynamic linker not found. "
+ "TLS will not be handled correctly.\n");
else if (num_matches > 1)
- Report("LeakSanitizer: Multiple modules match \"%s\". "
- "TLS will not be handled correctly.\n", kLinkerName);
+ VReport(1, "LeakSanitizer: Multiple modules match \"%s\". "
+ "TLS will not be handled correctly.\n", kLinkerName);
linker = 0;
}
@@ -81,6 +82,7 @@ static int ProcessGlobalRegionsCallback(struct dl_phdr_info *info, size_t size,
// Scans global variables for heap pointers.
void ProcessGlobalRegions(Frontier *frontier) {
+ if (!flags()->use_globals) return;
// FIXME: dl_iterate_phdr acquires a linker lock, so we run a risk of
// deadlocking by running this under StopTheWorld. However, the lock is
// reentrant, so we should be able to fix this by acquiring the lock before
@@ -127,6 +129,21 @@ static void ProcessPlatformSpecificAllocationsCb(uptr chunk, void *arg) {
// Handles dynamically allocated TLS blocks by treating all chunks allocated
// from ld-linux.so as reachable.
+// Dynamic TLS blocks contain the TLS variables of dynamically loaded modules.
+// They are allocated with a __libc_memalign() call in allocate_and_init()
+// (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those
+// blocks, but we can make sure they come from our own allocator by intercepting
+// __libc_memalign(). On top of that, there is no easy way to reach them. Their
+// addresses are stored in a dynamically allocated array (the DTV) which is
+// referenced from the static TLS. Unfortunately, we can't just rely on the DTV
+// being reachable from the static TLS, and the dynamic TLS being reachable from
+// the DTV. This is because the initial DTV is allocated before our interception
+// mechanism kicks in, and thus we don't recognize it as allocated memory. We
+// can't special-case it either, since we don't know its size.
+// Our solution is to include in the root set all allocations made from
+// ld-linux.so (which is where allocate_and_init() is implemented). This is
+// guaranteed to include all dynamic TLS blocks (and possibly other allocations
+// which we don't care about).
void ProcessPlatformSpecificAllocations(Frontier *frontier) {
if (!flags()->use_tls) return;
if (!linker) return;
diff --git a/libsanitizer/lsan/lsan_interceptors.cc b/libsanitizer/lsan/lsan_interceptors.cc
index 1940902..dfaad32 100644
--- a/libsanitizer/lsan/lsan_interceptors.cc
+++ b/libsanitizer/lsan/lsan_interceptors.cc
@@ -10,11 +10,11 @@
//
//===----------------------------------------------------------------------===//
-#include "interception/interception.h"
#include "sanitizer_common/sanitizer_allocator.h"
#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_interception.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_linux.h"
#include "sanitizer_common/sanitizer_platform_limits_posix.h"
@@ -32,19 +32,19 @@ int pthread_key_create(unsigned *key, void (*destructor)(void* v));
int pthread_setspecific(unsigned key, const void *v);
}
-#define GET_STACK_TRACE \
- StackTrace stack; \
- { \
- uptr stack_top = 0, stack_bottom = 0; \
- ThreadContext *t; \
- bool fast = common_flags()->fast_unwind_on_malloc; \
- if (fast && (t = CurrentThreadContext())) { \
- stack_top = t->stack_end(); \
- stack_bottom = t->stack_begin(); \
- } \
- stack.Unwind(__sanitizer::common_flags()->malloc_context_size, \
- StackTrace::GetCurrentPc(), \
- GET_CURRENT_FRAME(), stack_top, stack_bottom, fast); \
+#define GET_STACK_TRACE \
+ StackTrace stack; \
+ { \
+ uptr stack_top = 0, stack_bottom = 0; \
+ ThreadContext *t; \
+ bool fast = common_flags()->fast_unwind_on_malloc; \
+ if (fast && (t = CurrentThreadContext())) { \
+ stack_top = t->stack_end(); \
+ stack_bottom = t->stack_begin(); \
+ } \
+ stack.Unwind(__sanitizer::common_flags()->malloc_context_size, \
+ StackTrace::GetCurrentPc(), GET_CURRENT_FRAME(), 0, \
+ stack_top, stack_bottom, fast); \
}
#define ENSURE_LSAN_INITED do { \
@@ -150,7 +150,7 @@ INTERCEPTOR(void*, pvalloc, uptr size) {
return Allocate(stack, size, GetPageSizeCached(), kAlwaysClearMemory);
}
-INTERCEPTOR(void, cfree, void *p) ALIAS("free");
+INTERCEPTOR(void, cfree, void *p) ALIAS(WRAPPER_NAME(free));
#define OPERATOR_NEW_BODY \
ENSURE_LSAN_INITED; \
@@ -171,9 +171,9 @@ void *operator new[](uptr size, std::nothrow_t const&) { OPERATOR_NEW_BODY; }
Deallocate(ptr);
INTERCEPTOR_ATTRIBUTE
-void operator delete(void *ptr) { OPERATOR_DELETE_BODY; }
+void operator delete(void *ptr) throw() { OPERATOR_DELETE_BODY; }
INTERCEPTOR_ATTRIBUTE
-void operator delete[](void *ptr) { OPERATOR_DELETE_BODY; }
+void operator delete[](void *ptr) throw() { OPERATOR_DELETE_BODY; }
INTERCEPTOR_ATTRIBUTE
void operator delete(void *ptr, std::nothrow_t const&) { OPERATOR_DELETE_BODY; }
INTERCEPTOR_ATTRIBUTE
@@ -183,7 +183,8 @@ void operator delete[](void *ptr, std::nothrow_t const &) {
// We need this to intercept the __libc_memalign calls that are used to
// allocate dynamic TLS space in ld-linux.so.
-INTERCEPTOR(void *, __libc_memalign, uptr align, uptr s) ALIAS("memalign");
+INTERCEPTOR(void *, __libc_memalign, uptr align, uptr s)
+ ALIAS(WRAPPER_NAME(memalign));
///// Thread initialization and finalization. /////
@@ -236,7 +237,7 @@ INTERCEPTOR(int, pthread_create, void *th, void *attr,
pthread_attr_init(&myattr);
attr = &myattr;
}
- AdjustStackSizeLinux(attr);
+ AdjustStackSize(attr);
int detached = 0;
pthread_attr_getdetachstate(attr, &detached);
ThreadParam p;