aboutsummaryrefslogtreecommitdiff
path: root/libsanitizer/hwasan
diff options
context:
space:
mode:
authorMartin Liska <mliska@suse.cz>2023-04-26 09:42:29 +0200
committerMartin Liska <mliska@suse.cz>2023-04-26 15:51:56 +0200
commitd53b3d94aaf211ffb2159614f5aaaf03ceb861cc (patch)
tree4202fe134ccc618029a07e7d1913c608bd7fb77f /libsanitizer/hwasan
parenta8e1551bdb372aa3cfe754429b5efd6229ae5fdb (diff)
downloadgcc-d53b3d94aaf211ffb2159614f5aaaf03ceb861cc.zip
gcc-d53b3d94aaf211ffb2159614f5aaaf03ceb861cc.tar.gz
gcc-d53b3d94aaf211ffb2159614f5aaaf03ceb861cc.tar.bz2
libsanitizer: merge from upstream (3185e47b5a8444e9fd).
Diffstat (limited to 'libsanitizer/hwasan')
-rw-r--r--libsanitizer/hwasan/hwasan.cpp36
-rw-r--r--libsanitizer/hwasan/hwasan.h2
-rw-r--r--libsanitizer/hwasan/hwasan_allocation_functions.cpp26
-rw-r--r--libsanitizer/hwasan/hwasan_allocator.cpp270
-rw-r--r--libsanitizer/hwasan/hwasan_allocator.h51
-rw-r--r--libsanitizer/hwasan/hwasan_checks.h78
-rw-r--r--libsanitizer/hwasan/hwasan_flags.inc3
-rw-r--r--libsanitizer/hwasan/hwasan_fuchsia.cpp8
-rw-r--r--libsanitizer/hwasan/hwasan_interceptors.cpp16
-rw-r--r--libsanitizer/hwasan/hwasan_linux.cpp11
-rw-r--r--libsanitizer/hwasan/hwasan_new_delete.cpp24
-rw-r--r--libsanitizer/hwasan/hwasan_poisoning.cpp8
-rw-r--r--libsanitizer/hwasan/hwasan_registers.h56
-rw-r--r--libsanitizer/hwasan/hwasan_report.cpp21
-rw-r--r--libsanitizer/hwasan/hwasan_setjmp_riscv64.S2
-rw-r--r--libsanitizer/hwasan/hwasan_thread.cpp58
-rw-r--r--libsanitizer/hwasan/hwasan_thread.h14
-rw-r--r--libsanitizer/hwasan/hwasan_thread_list.h43
18 files changed, 626 insertions, 101 deletions
diff --git a/libsanitizer/hwasan/hwasan.cpp b/libsanitizer/hwasan/hwasan.cpp
index 9db4fb0..097136f 100644
--- a/libsanitizer/hwasan/hwasan.cpp
+++ b/libsanitizer/hwasan/hwasan.cpp
@@ -86,6 +86,9 @@ static void InitializeFlags() {
cf.clear_shadow_mmap_threshold = 4096 * (SANITIZER_ANDROID ? 2 : 8);
// Sigtrap is used in error reporting.
cf.handle_sigtrap = kHandleSignalExclusive;
+ // For now only tested on Linux. Other plantforms can be turned on as they
+ // become ready.
+ cf.detect_leaks = cf.detect_leaks && SANITIZER_LINUX && !SANITIZER_ANDROID;
#if SANITIZER_ANDROID
// Let platform handle other signals. It is better at reporting them then we
@@ -106,6 +109,15 @@ static void InitializeFlags() {
RegisterHwasanFlags(&parser, f);
RegisterCommonFlags(&parser);
+#if CAN_SANITIZE_LEAKS
+ __lsan::Flags *lf = __lsan::flags();
+ lf->SetDefaults();
+
+ FlagParser lsan_parser;
+ __lsan::RegisterLsanFlags(&lsan_parser, lf);
+ RegisterCommonFlags(&lsan_parser);
+#endif
+
#if HWASAN_CONTAINS_UBSAN
__ubsan::Flags *uf = __ubsan::flags();
uf->SetDefaults();
@@ -118,12 +130,18 @@ static void InitializeFlags() {
// Override from user-specified string.
if (__hwasan_default_options)
parser.ParseString(__hwasan_default_options());
+#if CAN_SANITIZE_LEAKS
+ lsan_parser.ParseString(__lsan_default_options());
+#endif
#if HWASAN_CONTAINS_UBSAN
const char *ubsan_default_options = __ubsan_default_options();
ubsan_parser.ParseString(ubsan_default_options);
#endif
parser.ParseStringFromEnv("HWASAN_OPTIONS");
+#if CAN_SANITIZE_LEAKS
+ lsan_parser.ParseStringFromEnv("LSAN_OPTIONS");
+#endif
#if HWASAN_CONTAINS_UBSAN
ubsan_parser.ParseStringFromEnv("UBSAN_OPTIONS");
#endif
@@ -133,6 +151,12 @@ static void InitializeFlags() {
if (Verbosity()) ReportUnrecognizedFlags();
if (common_flags()->help) parser.PrintFlagDescriptions();
+ // Flag validation:
+ if (!CAN_SANITIZE_LEAKS && common_flags()->detect_leaks) {
+ Report("%s: detect_leaks is not supported on this platform.\n",
+ SanitizerToolName);
+ Die();
+ }
}
static void CheckUnwind() {
@@ -368,10 +392,20 @@ __attribute__((constructor(0))) void __hwasan_init() {
HwasanAllocatorInit();
HwasanInstallAtForkHandler();
+ if (CAN_SANITIZE_LEAKS) {
+ __lsan::InitCommonLsan();
+ InstallAtExitCheckLeaks();
+ }
+
#if HWASAN_CONTAINS_UBSAN
__ubsan::InitAsPlugin();
#endif
+ if (CAN_SANITIZE_LEAKS && common_flags()->detect_leaks) {
+ __lsan::ScopedInterceptorDisabler disabler;
+ Symbolizer::LateInitialize();
+ }
+
VPrintf(1, "HWAddressSanitizer init done\n");
hwasan_init_is_running = 0;
@@ -519,7 +553,7 @@ void __hwasan_store16_noabort(uptr p) {
}
void __hwasan_tag_memory(uptr p, u8 tag, uptr sz) {
- TagMemoryAligned(p, sz, tag);
+ TagMemoryAligned(UntagAddr(p), sz, tag);
}
uptr __hwasan_tag_pointer(uptr p, u8 tag) {
diff --git a/libsanitizer/hwasan/hwasan.h b/libsanitizer/hwasan/hwasan.h
index ef4055a..c3d71a2 100644
--- a/libsanitizer/hwasan/hwasan.h
+++ b/libsanitizer/hwasan/hwasan.h
@@ -144,6 +144,8 @@ void HwasanOnDeadlySignal(int signo, void *info, void *context);
void HwasanInstallAtForkHandler();
+void InstallAtExitCheckLeaks();
+
void UpdateMemoryUsage();
void AppendToErrorMessageBuffer(const char *buffer);
diff --git a/libsanitizer/hwasan/hwasan_allocation_functions.cpp b/libsanitizer/hwasan/hwasan_allocation_functions.cpp
index 9cd82db..59ad633 100644
--- a/libsanitizer/hwasan/hwasan_allocation_functions.cpp
+++ b/libsanitizer/hwasan/hwasan_allocation_functions.cpp
@@ -16,14 +16,25 @@
#include "interception/interception.h"
#include "sanitizer_common/sanitizer_allocator_dlsym.h"
#include "sanitizer_common/sanitizer_allocator_interface.h"
+#include "sanitizer_common/sanitizer_mallinfo.h"
#include "sanitizer_common/sanitizer_tls_get_addr.h"
-#if !SANITIZER_FUCHSIA
-
using namespace __hwasan;
struct DlsymAlloc : public DlSymAllocator<DlsymAlloc> {
static bool UseImpl() { return !hwasan_inited; }
+ static void OnAllocate(const void *ptr, uptr size) {
+# if CAN_SANITIZE_LEAKS
+ // Suppress leaks from dlerror(). Previously dlsym hack on global array was
+ // used by leak sanitizer as a root region.
+ __lsan_register_root_region(ptr, size);
+# endif
+ }
+ static void OnFree(const void *ptr, uptr size) {
+# if CAN_SANITIZE_LEAKS
+ __lsan_unregister_root_region(ptr, size);
+# endif
+ }
};
extern "C" {
@@ -143,12 +154,19 @@ void *__sanitizer_malloc(uptr size) {
} // extern "C"
-#if HWASAN_WITH_INTERCEPTORS
+#if HWASAN_WITH_INTERCEPTORS || SANITIZER_FUCHSIA
+#if SANITIZER_FUCHSIA
+// Fuchsia does not use WRAP/wrappers used for the interceptor infrastructure.
+# define INTERCEPTOR_ALIAS(RET, FN, ARGS...) \
+ extern "C" SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE RET FN( \
+ ARGS) ALIAS("__sanitizer_" #FN)
+#else
# define INTERCEPTOR_ALIAS(RET, FN, ARGS...) \
extern "C" SANITIZER_INTERFACE_ATTRIBUTE RET WRAP(FN)(ARGS) \
ALIAS("__sanitizer_" #FN); \
extern "C" SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE RET FN( \
ARGS) ALIAS("__sanitizer_" #FN)
+#endif
INTERCEPTOR_ALIAS(int, posix_memalign, void **memptr, SIZE_T alignment,
SIZE_T size);
@@ -171,5 +189,3 @@ INTERCEPTOR_ALIAS(int, mallopt, int cmd, int value);
INTERCEPTOR_ALIAS(void, malloc_stats, void);
# endif
#endif // #if HWASAN_WITH_INTERCEPTORS
-
-#endif // SANITIZER_FUCHSIA
diff --git a/libsanitizer/hwasan/hwasan_allocator.cpp b/libsanitizer/hwasan/hwasan_allocator.cpp
index 8424551..d3cb5c8 100644
--- a/libsanitizer/hwasan/hwasan_allocator.cpp
+++ b/libsanitizer/hwasan/hwasan_allocator.cpp
@@ -21,6 +21,7 @@
#include "hwasan_malloc_bisect.h"
#include "hwasan_thread.h"
#include "hwasan_report.h"
+#include "lsan/lsan_common.h"
namespace __hwasan {
@@ -32,40 +33,39 @@ static atomic_uint8_t hwasan_allocator_tagging_enabled;
static constexpr tag_t kFallbackAllocTag = 0xBB & kTagMask;
static constexpr tag_t kFallbackFreeTag = 0xBC;
-enum RightAlignMode {
- kRightAlignNever,
- kRightAlignSometimes,
- kRightAlignAlways
+enum {
+ // Either just allocated by underlying allocator, but AsanChunk is not yet
+ // ready, or almost returned to undelying allocator and AsanChunk is already
+ // meaningless.
+ CHUNK_INVALID = 0,
+ // The chunk is allocated and not yet freed.
+ CHUNK_ALLOCATED = 1,
};
+
// Initialized in HwasanAllocatorInit, an never changed.
static ALIGNED(16) u8 tail_magic[kShadowAlignment - 1];
+static uptr max_malloc_size;
bool HwasanChunkView::IsAllocated() const {
- return metadata_ && metadata_->alloc_context_id &&
- metadata_->get_requested_size();
-}
-
-// Aligns the 'addr' right to the granule boundary.
-static uptr AlignRight(uptr addr, uptr requested_size) {
- uptr tail_size = requested_size % kShadowAlignment;
- if (!tail_size) return addr;
- return addr + kShadowAlignment - tail_size;
+ return metadata_ && metadata_->IsAllocated();
}
uptr HwasanChunkView::Beg() const {
- if (metadata_ && metadata_->right_aligned)
- return AlignRight(block_, metadata_->get_requested_size());
return block_;
}
uptr HwasanChunkView::End() const {
return Beg() + UsedSize();
}
uptr HwasanChunkView::UsedSize() const {
- return metadata_->get_requested_size();
+ return metadata_->GetRequestedSize();
}
u32 HwasanChunkView::GetAllocStackId() const {
- return metadata_->alloc_context_id;
+ return metadata_->GetAllocStackId();
+}
+
+u32 HwasanChunkView::GetAllocThreadId() const {
+ return metadata_->GetAllocThreadId();
}
uptr HwasanChunkView::ActualSize() const {
@@ -76,10 +76,58 @@ bool HwasanChunkView::FromSmallHeap() const {
return allocator.FromPrimary(reinterpret_cast<void *>(block_));
}
+bool HwasanChunkView::AddrIsInside(uptr addr) const {
+ return (addr >= Beg()) && (addr < Beg() + UsedSize());
+}
+
+inline void Metadata::SetAllocated(u32 stack, u64 size) {
+ Thread *t = GetCurrentThread();
+ u64 context = t ? t->unique_id() : kMainTid;
+ context <<= 32;
+ context += stack;
+ requested_size_low = size & ((1ul << 32) - 1);
+ requested_size_high = size >> 32;
+ atomic_store(&alloc_context_id, context, memory_order_relaxed);
+ atomic_store(&chunk_state, CHUNK_ALLOCATED, memory_order_release);
+}
+
+inline void Metadata::SetUnallocated() {
+ atomic_store(&chunk_state, CHUNK_INVALID, memory_order_release);
+ requested_size_low = 0;
+ requested_size_high = 0;
+ atomic_store(&alloc_context_id, 0, memory_order_relaxed);
+}
+
+inline bool Metadata::IsAllocated() const {
+ return atomic_load(&chunk_state, memory_order_relaxed) == CHUNK_ALLOCATED;
+}
+
+inline u64 Metadata::GetRequestedSize() const {
+ return (static_cast<u64>(requested_size_high) << 32) + requested_size_low;
+}
+
+inline u32 Metadata::GetAllocStackId() const {
+ return atomic_load(&alloc_context_id, memory_order_relaxed);
+}
+
+inline u32 Metadata::GetAllocThreadId() const {
+ u64 context = atomic_load(&alloc_context_id, memory_order_relaxed);
+ u32 tid = context >> 32;
+ return tid;
+}
+
void GetAllocatorStats(AllocatorStatCounters s) {
allocator.GetStats(s);
}
+inline void Metadata::SetLsanTag(__lsan::ChunkTag tag) {
+ lsan_tag = tag;
+}
+
+inline __lsan::ChunkTag Metadata::GetLsanTag() const {
+ return static_cast<__lsan::ChunkTag>(lsan_tag);
+}
+
uptr GetAliasRegionStart() {
#if defined(HWASAN_ALIASING_MODE)
constexpr uptr kAliasRegionOffset = 1ULL << (kTaggableRegionCheckShift - 1);
@@ -105,6 +153,12 @@ void HwasanAllocatorInit() {
GetAliasRegionStart());
for (uptr i = 0; i < sizeof(tail_magic); i++)
tail_magic[i] = GetCurrentThread()->GenerateRandomTag();
+ if (common_flags()->max_allocation_size_mb) {
+ max_malloc_size = common_flags()->max_allocation_size_mb << 20;
+ max_malloc_size = Min(max_malloc_size, kMaxAllowedMallocSize);
+ } else {
+ max_malloc_size = kMaxAllowedMallocSize;
+ }
}
void HwasanAllocatorLock() { allocator.ForceLock(); }
@@ -124,13 +178,16 @@ static uptr TaggedSize(uptr size) {
static void *HwasanAllocate(StackTrace *stack, uptr orig_size, uptr alignment,
bool zeroise) {
- if (orig_size > kMaxAllowedMallocSize) {
+ // Keep this consistent with LSAN and ASAN behavior.
+ if (UNLIKELY(orig_size == 0))
+ orig_size = 1;
+ if (UNLIKELY(orig_size > max_malloc_size)) {
if (AllocatorMayReturnNull()) {
Report("WARNING: HWAddressSanitizer failed to allocate 0x%zx bytes\n",
orig_size);
return nullptr;
}
- ReportAllocationSizeTooBig(orig_size, kMaxAllowedMallocSize, stack);
+ ReportAllocationSizeTooBig(orig_size, max_malloc_size, stack);
}
if (UNLIKELY(IsRssLimitExceeded())) {
if (AllocatorMayReturnNull())
@@ -155,11 +212,6 @@ static void *HwasanAllocate(StackTrace *stack, uptr orig_size, uptr alignment,
return nullptr;
ReportOutOfMemory(size, stack);
}
- Metadata *meta =
- reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated));
- meta->set_requested_size(orig_size);
- meta->alloc_context_id = StackDepotPut(*stack);
- meta->right_aligned = false;
if (zeroise) {
internal_memset(allocated, 0, size);
} else if (flags()->max_malloc_fill_size > 0) {
@@ -199,6 +251,13 @@ static void *HwasanAllocate(StackTrace *stack, uptr orig_size, uptr alignment,
}
}
+ Metadata *meta =
+ reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated));
+#if CAN_SANITIZE_LEAKS
+ meta->SetLsanTag(__lsan::DisabledInThisThread() ? __lsan::kIgnored
+ : __lsan::kDirectlyLeaked);
+#endif
+ meta->SetAllocated(StackDepotPut(*stack), orig_size);
RunMallocHooks(user_ptr, size);
return user_ptr;
}
@@ -244,9 +303,10 @@ static void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) {
ReportInvalidFree(stack, reinterpret_cast<uptr>(tagged_ptr));
return;
}
- uptr orig_size = meta->get_requested_size();
+ uptr orig_size = meta->GetRequestedSize();
u32 free_context_id = StackDepotPut(*stack);
- u32 alloc_context_id = meta->alloc_context_id;
+ u32 alloc_context_id = meta->GetAllocStackId();
+ u32 alloc_thread_id = meta->GetAllocThreadId();
// Check tail magic.
uptr tagged_size = TaggedSize(orig_size);
@@ -265,8 +325,8 @@ static void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) {
orig_size, tail_magic);
}
- meta->set_requested_size(0);
- meta->alloc_context_id = 0;
+ // TODO(kstoimenov): consider meta->SetUnallocated(free_context_id).
+ meta->SetUnallocated();
// This memory will not be reused by anyone else, so we are free to keep it
// poisoned.
Thread *t = GetCurrentThread();
@@ -298,8 +358,9 @@ static void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) {
if (t) {
allocator.Deallocate(t->allocator_cache(), aligned_ptr);
if (auto *ha = t->heap_allocations())
- ha->push({reinterpret_cast<uptr>(tagged_ptr), alloc_context_id,
- free_context_id, static_cast<u32>(orig_size)});
+ ha->push({reinterpret_cast<uptr>(tagged_ptr), alloc_thread_id,
+ alloc_context_id, free_context_id,
+ static_cast<u32>(orig_size)});
} else {
SpinMutexLock l(&fallback_mutex);
AllocatorCache *cache = &fallback_allocator_cache;
@@ -322,7 +383,7 @@ static void *HwasanReallocate(StackTrace *stack, void *tagged_ptr_old,
reinterpret_cast<Metadata *>(allocator.GetMetaData(untagged_ptr_old));
internal_memcpy(
UntagPtr(tagged_ptr_new), untagged_ptr_old,
- Min(new_size, static_cast<uptr>(meta->get_requested_size())));
+ Min(new_size, static_cast<uptr>(meta->GetRequestedSize())));
HwasanDeallocate(stack, tagged_ptr_old);
}
return tagged_ptr_new;
@@ -348,19 +409,30 @@ HwasanChunkView FindHeapChunkByAddress(uptr address) {
return HwasanChunkView(reinterpret_cast<uptr>(block), metadata);
}
+static const void *AllocationBegin(const void *p) {
+ const void *untagged_ptr = UntagPtr(p);
+ if (!untagged_ptr)
+ return nullptr;
+
+ const void *beg = allocator.GetBlockBegin(untagged_ptr);
+ if (!beg)
+ return nullptr;
+
+ Metadata *b = (Metadata *)allocator.GetMetaData(beg);
+ if (b->GetRequestedSize() == 0)
+ return nullptr;
+
+ tag_t tag = GetTagFromPointer((uptr)p);
+ return (const void *)AddTagToPointer((uptr)beg, tag);
+}
+
static uptr AllocationSize(const void *tagged_ptr) {
const void *untagged_ptr = UntagPtr(tagged_ptr);
if (!untagged_ptr) return 0;
const void *beg = allocator.GetBlockBegin(untagged_ptr);
Metadata *b = (Metadata *)allocator.GetMetaData(untagged_ptr);
- if (b->right_aligned) {
- if (beg != reinterpret_cast<void *>(RoundDownTo(
- reinterpret_cast<uptr>(untagged_ptr), kShadowAlignment)))
- return 0;
- } else {
- if (beg != untagged_ptr) return 0;
- }
- return b->get_requested_size();
+ if (beg != untagged_ptr) return 0;
+ return b->GetRequestedSize();
}
void *hwasan_malloc(uptr size, StackTrace *stack) {
@@ -451,6 +523,122 @@ void hwasan_free(void *ptr, StackTrace *stack) {
} // namespace __hwasan
+// --- Implementation of LSan-specific functions --- {{{1
+namespace __lsan {
+
+void LockAllocator() {
+ __hwasan::HwasanAllocatorLock();
+}
+
+void UnlockAllocator() {
+ __hwasan::HwasanAllocatorUnlock();
+}
+
+void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
+ *begin = (uptr)&__hwasan::allocator;
+ *end = *begin + sizeof(__hwasan::allocator);
+}
+
+uptr PointsIntoChunk(void *p) {
+ p = __hwasan::InTaggableRegion(reinterpret_cast<uptr>(p)) ? UntagPtr(p) : p;
+ uptr addr = reinterpret_cast<uptr>(p);
+ uptr chunk =
+ reinterpret_cast<uptr>(__hwasan::allocator.GetBlockBeginFastLocked(p));
+ if (!chunk)
+ return 0;
+ __hwasan::Metadata *metadata = reinterpret_cast<__hwasan::Metadata *>(
+ __hwasan::allocator.GetMetaData(reinterpret_cast<void *>(chunk)));
+ if (!metadata || !metadata->IsAllocated())
+ return 0;
+ if (addr < chunk + metadata->GetRequestedSize())
+ return chunk;
+ if (IsSpecialCaseOfOperatorNew0(chunk, metadata->GetRequestedSize(), addr))
+ return chunk;
+ return 0;
+}
+
+uptr GetUserBegin(uptr chunk) {
+ if (__hwasan::InTaggableRegion(chunk))
+ CHECK_EQ(UntagAddr(chunk), chunk);
+ void *block = __hwasan::allocator.GetBlockBeginFastLocked(
+ reinterpret_cast<void *>(chunk));
+ if (!block)
+ return 0;
+ __hwasan::Metadata *metadata = reinterpret_cast<__hwasan::Metadata *>(
+ __hwasan::allocator.GetMetaData(block));
+ if (!metadata || !metadata->IsAllocated())
+ return 0;
+
+ return reinterpret_cast<uptr>(block);
+}
+
+uptr GetUserAddr(uptr chunk) {
+ tag_t mem_tag = *(tag_t *)__hwasan::MemToShadow(chunk);
+ if (!__hwasan::InTaggableRegion(chunk))
+ return chunk;
+ return AddTagToPointer(chunk, mem_tag);
+}
+
+LsanMetadata::LsanMetadata(uptr chunk) {
+ if (__hwasan::InTaggableRegion(chunk))
+ CHECK_EQ(UntagAddr(chunk), chunk);
+ metadata_ =
+ chunk ? __hwasan::allocator.GetMetaData(reinterpret_cast<void *>(chunk))
+ : nullptr;
+}
+
+bool LsanMetadata::allocated() const {
+ if (!metadata_)
+ return false;
+ __hwasan::Metadata *m = reinterpret_cast<__hwasan::Metadata *>(metadata_);
+ return m->IsAllocated();
+}
+
+ChunkTag LsanMetadata::tag() const {
+ __hwasan::Metadata *m = reinterpret_cast<__hwasan::Metadata *>(metadata_);
+ return m->GetLsanTag();
+}
+
+void LsanMetadata::set_tag(ChunkTag value) {
+ __hwasan::Metadata *m = reinterpret_cast<__hwasan::Metadata *>(metadata_);
+ m->SetLsanTag(value);
+}
+
+uptr LsanMetadata::requested_size() const {
+ __hwasan::Metadata *m = reinterpret_cast<__hwasan::Metadata *>(metadata_);
+ return m->GetRequestedSize();
+}
+
+u32 LsanMetadata::stack_trace_id() const {
+ __hwasan::Metadata *m = reinterpret_cast<__hwasan::Metadata *>(metadata_);
+ return m->GetAllocStackId();
+}
+
+void ForEachChunk(ForEachChunkCallback callback, void *arg) {
+ __hwasan::allocator.ForEachChunk(callback, arg);
+}
+
+IgnoreObjectResult IgnoreObject(const void *p) {
+ p = __hwasan::InTaggableRegion(reinterpret_cast<uptr>(p)) ? UntagPtr(p) : p;
+ uptr addr = reinterpret_cast<uptr>(p);
+ uptr chunk = reinterpret_cast<uptr>(__hwasan::allocator.GetBlockBegin(p));
+ if (!chunk)
+ return kIgnoreObjectInvalid;
+ __hwasan::Metadata *metadata = reinterpret_cast<__hwasan::Metadata *>(
+ __hwasan::allocator.GetMetaData(reinterpret_cast<void *>(chunk)));
+ if (!metadata || !metadata->IsAllocated())
+ return kIgnoreObjectInvalid;
+ if (addr >= chunk + metadata->GetRequestedSize())
+ return kIgnoreObjectInvalid;
+ if (metadata->GetLsanTag() == kIgnored)
+ return kIgnoreObjectAlreadyIgnored;
+
+ metadata->SetLsanTag(kIgnored);
+ return kIgnoreObjectSuccess;
+}
+
+} // namespace __lsan
+
using namespace __hwasan;
void __hwasan_enable_allocator_tagging() {
@@ -481,4 +669,8 @@ uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
int __sanitizer_get_ownership(const void *p) { return AllocationSize(p) != 0; }
+const void *__sanitizer_get_allocated_begin(const void *p) {
+ return AllocationBegin(p);
+}
+
uptr __sanitizer_get_allocated_size(const void *p) { return AllocationSize(p); }
diff --git a/libsanitizer/hwasan/hwasan_allocator.h b/libsanitizer/hwasan/hwasan_allocator.h
index bae53b5..b7a06da 100644
--- a/libsanitizer/hwasan/hwasan_allocator.h
+++ b/libsanitizer/hwasan/hwasan_allocator.h
@@ -17,6 +17,7 @@
#include "hwasan_interface_internal.h"
#include "hwasan_mapping.h"
#include "hwasan_poisoning.h"
+#include "lsan/lsan_common.h"
#include "sanitizer_common/sanitizer_allocator.h"
#include "sanitizer_common/sanitizer_allocator_checks.h"
#include "sanitizer_common/sanitizer_allocator_interface.h"
@@ -31,18 +32,25 @@
namespace __hwasan {
struct Metadata {
+ private:
+ atomic_uint64_t alloc_context_id;
u32 requested_size_low;
- u32 requested_size_high : 31;
- u32 right_aligned : 1;
- u32 alloc_context_id;
- u64 get_requested_size() {
- return (static_cast<u64>(requested_size_high) << 32) + requested_size_low;
- }
- void set_requested_size(u64 size) {
- requested_size_low = size & ((1ul << 32) - 1);
- requested_size_high = size >> 32;
- }
+ u16 requested_size_high;
+ atomic_uint8_t chunk_state;
+ u8 lsan_tag;
+
+ public:
+ inline void SetAllocated(u32 stack, u64 size);
+ inline void SetUnallocated();
+
+ inline bool IsAllocated() const;
+ inline u64 GetRequestedSize() const;
+ inline u32 GetAllocStackId() const;
+ inline u32 GetAllocThreadId() const;
+ inline void SetLsanTag(__lsan::ChunkTag tag);
+ inline __lsan::ChunkTag GetLsanTag() const;
};
+static_assert(sizeof(Metadata) == 16);
struct HwasanMapUnmapCallback {
void OnMap(uptr p, uptr size) const { UpdateMemoryUsage(); }
@@ -61,15 +69,21 @@ struct AP64 {
#if defined(HWASAN_ALIASING_MODE)
static const uptr kSpaceSize = 1ULL << kAddressTagShift;
+ typedef __sanitizer::DefaultSizeClassMap SizeClassMap;
+#elif SANITIZER_LINUX && !SANITIZER_ANDROID
+ static const uptr kSpaceSize = 0x40000000000ULL; // 4T.
+ typedef __sanitizer::DefaultSizeClassMap SizeClassMap;
#else
- static const uptr kSpaceSize = 0x2000000000ULL;
+ static const uptr kSpaceSize = 0x2000000000ULL; // 128G.
+ typedef __sanitizer::VeryDenseSizeClassMap SizeClassMap;
#endif
+
static const uptr kMetadataSize = sizeof(Metadata);
- typedef __sanitizer::VeryDenseSizeClassMap SizeClassMap;
using AddressSpaceView = LocalAddressSpaceView;
typedef HwasanMapUnmapCallback MapUnmapCallback;
static const uptr kFlags = 0;
};
+
typedef SizeClassAllocator64<AP64> PrimaryAllocator;
typedef CombinedAllocator<PrimaryAllocator> Allocator;
typedef Allocator::AllocatorCache AllocatorCache;
@@ -87,8 +101,12 @@ class HwasanChunkView {
uptr UsedSize() const; // Size requested by the user
uptr ActualSize() const; // Size allocated by the allocator.
u32 GetAllocStackId() const;
+ u32 GetAllocThreadId() const;
bool FromSmallHeap() const;
+ bool AddrIsInside(uptr addr) const;
+
private:
+ friend class __lsan::LsanMetadata;
uptr block_;
Metadata *const metadata_;
};
@@ -97,13 +115,12 @@ HwasanChunkView FindHeapChunkByAddress(uptr address);
// Information about one (de)allocation that happened in the past.
// These are recorded in a thread-local ring buffer.
-// TODO: this is currently 24 bytes (20 bytes + alignment).
-// Compress it to 16 bytes or extend it to be more useful.
struct HeapAllocationRecord {
uptr tagged_addr;
- u32 alloc_context_id;
- u32 free_context_id;
- u32 requested_size;
+ u32 alloc_thread_id;
+ u32 alloc_context_id;
+ u32 free_context_id;
+ u32 requested_size;
};
typedef RingBuffer<HeapAllocationRecord> HeapAllocationsRingBuffer;
diff --git a/libsanitizer/hwasan/hwasan_checks.h b/libsanitizer/hwasan/hwasan_checks.h
index b0b37d7..514d351 100644
--- a/libsanitizer/hwasan/hwasan_checks.h
+++ b/libsanitizer/hwasan/hwasan_checks.h
@@ -15,17 +15,49 @@
#include "hwasan_allocator.h"
#include "hwasan_mapping.h"
+#include "hwasan_registers.h"
#include "sanitizer_common/sanitizer_common.h"
namespace __hwasan {
-template <unsigned X>
+
+enum class ErrorAction { Abort, Recover };
+enum class AccessType { Load, Store };
+
+// Used when the access size is known.
+constexpr unsigned SigTrapEncoding(ErrorAction EA, AccessType AT,
+ unsigned LogSize) {
+ return 0x20 * (EA == ErrorAction::Recover) +
+ 0x10 * (AT == AccessType::Store) + LogSize;
+}
+
+// Used when the access size varies at runtime.
+constexpr unsigned SigTrapEncoding(ErrorAction EA, AccessType AT) {
+ return SigTrapEncoding(EA, AT, 0xf);
+}
+
+template <ErrorAction EA, AccessType AT, size_t LogSize>
__attribute__((always_inline)) static void SigTrap(uptr p) {
-#if defined(__aarch64__)
+ // Other platforms like linux can use signals for intercepting an exception
+ // and dispatching to HandleTagMismatch. The fuchsias implementation doesn't
+ // use signals so we can call it here directly instead.
+#if CAN_GET_REGISTERS && SANITIZER_FUCHSIA
+ auto regs = GetRegisters();
+ size_t size = 2 << LogSize;
+ AccessInfo access_info = {
+ .addr = p,
+ .size = size,
+ .is_store = AT == AccessType::Store,
+ .is_load = AT == AccessType::Load,
+ .recover = EA == ErrorAction::Recover,
+ };
+ HandleTagMismatch(access_info, (uptr)__builtin_return_address(0),
+ (uptr)__builtin_frame_address(0), /*uc=*/nullptr, regs.x);
+#elif defined(__aarch64__)
(void)p;
// 0x900 is added to do not interfere with the kernel use of lower values of
// brk immediate.
register uptr x0 asm("x0") = p;
- asm("brk %1\n\t" ::"r"(x0), "n"(0x900 + X));
+ asm("brk %1\n\t" ::"r"(x0), "n"(0x900 + SigTrapEncoding(EA, AT, LogSize)));
#elif defined(__x86_64__)
// INT3 + NOP DWORD ptr [EAX + X] to pass X to our signal handler, 5 bytes
// total. The pointer is passed via rdi.
@@ -34,7 +66,7 @@ __attribute__((always_inline)) static void SigTrap(uptr p) {
// different nop command, the three bytes one).
asm volatile(
"int3\n"
- "nopl %c0(%%rax)\n" ::"n"(0x40 + X),
+ "nopl %c0(%%rax)\n" ::"n"(0x40 + SigTrapEncoding(EA, AT, LogSize)),
"D"(p));
#elif SANITIZER_RISCV64
// Put pointer into x10
@@ -44,7 +76,7 @@ __attribute__((always_inline)) static void SigTrap(uptr p) {
asm volatile(
"ebreak\n"
"addiw x0, x0, %1\n" ::"r"(x10),
- "I"(0x40 + X));
+ "I"(0x40 + SigTrapEncoding(EA, AT, LogSize)));
#else
// FIXME: not always sigill.
__builtin_trap();
@@ -53,17 +85,31 @@ __attribute__((always_inline)) static void SigTrap(uptr p) {
}
// Version with access size which is not power of 2
-template <unsigned X>
+template <ErrorAction EA, AccessType AT>
__attribute__((always_inline)) static void SigTrap(uptr p, uptr size) {
-#if defined(__aarch64__)
+ // Other platforms like linux can use signals for intercepting an exception
+ // and dispatching to HandleTagMismatch. The fuchsias implementation doesn't
+ // use signals so we can call it here directly instead.
+#if CAN_GET_REGISTERS && SANITIZER_FUCHSIA
+ auto regs = GetRegisters();
+ AccessInfo access_info = {
+ .addr = p,
+ .size = size,
+ .is_store = AT == AccessType::Store,
+ .is_load = AT == AccessType::Load,
+ .recover = EA == ErrorAction::Recover,
+ };
+ HandleTagMismatch(access_info, (uptr)__builtin_return_address(0),
+ (uptr)__builtin_frame_address(0), /*uc=*/nullptr, regs.x);
+#elif defined(__aarch64__)
register uptr x0 asm("x0") = p;
register uptr x1 asm("x1") = size;
- asm("brk %2\n\t" ::"r"(x0), "r"(x1), "n"(0x900 + X));
+ asm("brk %2\n\t" ::"r"(x0), "r"(x1), "n"(0x900 + SigTrapEncoding(EA, AT)));
#elif defined(__x86_64__)
// Size is stored in rsi.
asm volatile(
"int3\n"
- "nopl %c0(%%rax)\n" ::"n"(0x40 + X),
+ "nopl %c0(%%rax)\n" ::"n"(0x40 + SigTrapEncoding(EA, AT)),
"D"(p), "S"(size));
#elif SANITIZER_RISCV64
// Put access size into x11
@@ -72,7 +118,7 @@ __attribute__((always_inline)) static void SigTrap(uptr p, uptr size) {
asm volatile(
"ebreak\n"
"addiw x0, x0, %2\n" ::"r"(x10),
- "r"(x11), "I"(0x40 + X));
+ "r"(x11), "I"(0x40 + SigTrapEncoding(EA, AT)));
#else
__builtin_trap();
#endif
@@ -94,9 +140,6 @@ __attribute__((always_inline, nodebug)) static bool PossiblyShortTagMatches(
return *(u8 *)(ptr | (kShadowAlignment - 1)) == ptr_tag;
}
-enum class ErrorAction { Abort, Recover };
-enum class AccessType { Load, Store };
-
template <ErrorAction EA, AccessType AT, unsigned LogSize>
__attribute__((always_inline, nodebug)) static void CheckAddress(uptr p) {
if (!InTaggableRegion(p))
@@ -104,8 +147,7 @@ __attribute__((always_inline, nodebug)) static void CheckAddress(uptr p) {
uptr ptr_raw = p & ~kAddressTagMask;
tag_t mem_tag = *(tag_t *)MemToShadow(ptr_raw);
if (UNLIKELY(!PossiblyShortTagMatches(mem_tag, p, 1 << LogSize))) {
- SigTrap<0x20 * (EA == ErrorAction::Recover) +
- 0x10 * (AT == AccessType::Store) + LogSize>(p);
+ SigTrap<EA, AT, LogSize>(p);
if (EA == ErrorAction::Abort)
__builtin_unreachable();
}
@@ -122,8 +164,7 @@ __attribute__((always_inline, nodebug)) static void CheckAddressSized(uptr p,
tag_t *shadow_last = (tag_t *)MemToShadow(ptr_raw + sz);
for (tag_t *t = shadow_first; t < shadow_last; ++t)
if (UNLIKELY(ptr_tag != *t)) {
- SigTrap<0x20 * (EA == ErrorAction::Recover) +
- 0x10 * (AT == AccessType::Store) + 0xf>(p, sz);
+ SigTrap<EA, AT>(p, sz);
if (EA == ErrorAction::Abort)
__builtin_unreachable();
}
@@ -132,8 +173,7 @@ __attribute__((always_inline, nodebug)) static void CheckAddressSized(uptr p,
if (UNLIKELY(tail_sz != 0 &&
!PossiblyShortTagMatches(
*shadow_last, end & ~(kShadowAlignment - 1), tail_sz))) {
- SigTrap<0x20 * (EA == ErrorAction::Recover) +
- 0x10 * (AT == AccessType::Store) + 0xf>(p, sz);
+ SigTrap<EA, AT>(p, sz);
if (EA == ErrorAction::Abort)
__builtin_unreachable();
}
diff --git a/libsanitizer/hwasan/hwasan_flags.inc b/libsanitizer/hwasan/hwasan_flags.inc
index 4a226ee..978fa46 100644
--- a/libsanitizer/hwasan/hwasan_flags.inc
+++ b/libsanitizer/hwasan/hwasan_flags.inc
@@ -23,6 +23,9 @@ HWASAN_FLAG(bool, tag_in_free, true, "")
HWASAN_FLAG(bool, print_stats, false, "")
HWASAN_FLAG(bool, halt_on_error, true, "")
HWASAN_FLAG(bool, atexit, false, "")
+HWASAN_FLAG(
+ bool, print_live_threads_info, true,
+ "If set, prints the remaining threads in report as an extra information.")
// Test only flag to disable malloc/realloc/free memory tagging on startup.
// Tagging can be reenabled with __hwasan_enable_allocator_tagging().
diff --git a/libsanitizer/hwasan/hwasan_fuchsia.cpp b/libsanitizer/hwasan/hwasan_fuchsia.cpp
index 858fac0..d1696f8 100644
--- a/libsanitizer/hwasan/hwasan_fuchsia.cpp
+++ b/libsanitizer/hwasan/hwasan_fuchsia.cpp
@@ -185,6 +185,8 @@ void InstallAtExitHandler() {}
void HwasanInstallAtForkHandler() {}
+void InstallAtExitCheckLeaks() {}
+
void InitializeOsSupport() {
#ifdef __aarch64__
uint32_t features = 0;
@@ -202,6 +204,12 @@ void InitializeOsSupport() {
} // namespace __hwasan
+namespace __lsan {
+
+bool UseExitcodeOnLeak() { return __hwasan::flags()->halt_on_error; }
+
+} // namespace __lsan
+
extern "C" {
void *__sanitizer_before_thread_create_hook(thrd_t thread, bool detached,
diff --git a/libsanitizer/hwasan/hwasan_interceptors.cpp b/libsanitizer/hwasan/hwasan_interceptors.cpp
index c67927d..16ac85e 100644
--- a/libsanitizer/hwasan/hwasan_interceptors.cpp
+++ b/libsanitizer/hwasan/hwasan_interceptors.cpp
@@ -39,11 +39,19 @@ static void *HwasanThreadStartFunc(void *arg) {
INTERCEPTOR(int, pthread_create, void *th, void *attr, void *(*callback)(void*),
void * param) {
- ScopedTaggingDisabler disabler;
+ EnsureMainThreadIDIsCorrect();
+ ScopedTaggingDisabler tagging_disabler;
ThreadStartArg *A = reinterpret_cast<ThreadStartArg *> (MmapOrDie(
GetPageSizeCached(), "pthread_create"));
*A = {callback, param};
- int res = REAL(pthread_create)(th, attr, &HwasanThreadStartFunc, A);
+ int res;
+ {
+ // ASAN uses the same approach to disable leaks from pthread_create.
+# if CAN_SANITIZE_LEAKS
+ __lsan::ScopedInterceptorDisabler lsan_disabler;
+# endif
+ res = REAL(pthread_create)(th, attr, &HwasanThreadStartFunc, A);
+ }
return res;
}
@@ -220,6 +228,10 @@ INTERCEPTOR(void, longjmp, __hw_jmp_buf env, int val) {
namespace __hwasan {
int OnExit() {
+ if (CAN_SANITIZE_LEAKS && common_flags()->detect_leaks &&
+ __lsan::HasReportedLeaks()) {
+ return common_flags()->exitcode;
+ }
// FIXME: ask frontend whether we need to return failure.
return 0;
}
diff --git a/libsanitizer/hwasan/hwasan_linux.cpp b/libsanitizer/hwasan/hwasan_linux.cpp
index 88ccfde..d3e4b53 100644
--- a/libsanitizer/hwasan/hwasan_linux.cpp
+++ b/libsanitizer/hwasan/hwasan_linux.cpp
@@ -541,6 +541,17 @@ void HwasanInstallAtForkHandler() {
pthread_atfork(before, after, after);
}
+void InstallAtExitCheckLeaks() {
+ if (CAN_SANITIZE_LEAKS) {
+ if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit) {
+ if (flags()->halt_on_error)
+ Atexit(__lsan::DoLeakCheck);
+ else
+ Atexit(__lsan::DoRecoverableLeakCheckVoid);
+ }
+ }
+}
+
} // namespace __hwasan
#endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
diff --git a/libsanitizer/hwasan/hwasan_new_delete.cpp b/libsanitizer/hwasan/hwasan_new_delete.cpp
index 495046a..f0fd372 100644
--- a/libsanitizer/hwasan/hwasan_new_delete.cpp
+++ b/libsanitizer/hwasan/hwasan_new_delete.cpp
@@ -92,6 +92,14 @@ INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete[](
void *ptr, std::nothrow_t const &) {
OPERATOR_DELETE_BODY;
}
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete(
+ void *ptr, size_t) NOEXCEPT {
+ OPERATOR_DELETE_BODY;
+}
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete[](
+ void *ptr, size_t) NOEXCEPT {
+ OPERATOR_DELETE_BODY;
+}
#endif // OPERATOR_NEW_BODY
@@ -134,5 +142,21 @@ INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete[](
void *ptr, std::align_val_t, std::nothrow_t const &) NOEXCEPT {
OPERATOR_DELETE_BODY;
}
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete(
+ void *ptr, size_t, std::align_val_t) NOEXCEPT {
+ OPERATOR_DELETE_BODY;
+}
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete[](
+ void *ptr, size_t, std::align_val_t) NOEXCEPT {
+ OPERATOR_DELETE_BODY;
+}
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete(
+ void *ptr, size_t, std::align_val_t, std::nothrow_t const &) NOEXCEPT {
+ OPERATOR_DELETE_BODY;
+}
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete[](
+ void *ptr, size_t, std::align_val_t, std::nothrow_t const &) NOEXCEPT {
+ OPERATOR_DELETE_BODY;
+}
#endif // OPERATOR_NEW_ALIGN_BODY
diff --git a/libsanitizer/hwasan/hwasan_poisoning.cpp b/libsanitizer/hwasan/hwasan_poisoning.cpp
index 5aafdb1..a4e5935 100644
--- a/libsanitizer/hwasan/hwasan_poisoning.cpp
+++ b/libsanitizer/hwasan/hwasan_poisoning.cpp
@@ -26,3 +26,11 @@ uptr TagMemory(uptr p, uptr size, tag_t tag) {
}
} // namespace __hwasan
+
+// --- Implementation of LSan-specific functions --- {{{1
+namespace __lsan {
+bool WordIsPoisoned(uptr addr) {
+ // Fixme: implement actual tag checking.
+ return false;
+}
+} // namespace __lsan
diff --git a/libsanitizer/hwasan/hwasan_registers.h b/libsanitizer/hwasan/hwasan_registers.h
new file mode 100644
index 0000000..48a140f
--- /dev/null
+++ b/libsanitizer/hwasan/hwasan_registers.h
@@ -0,0 +1,56 @@
+//===-- hwasan_registers.h --------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This describes the register state retrieved by hwasan when error reporting.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef HWASAN_REGISTERS_H
+#define HWASAN_REGISTERS_H
+
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_platform.h"
+
+#if defined(__aarch64__)
+
+# define CAN_GET_REGISTERS 1
+
+struct Registers {
+ uptr x[32];
+};
+
+__attribute__((always_inline, unused)) static Registers GetRegisters() {
+ Registers regs;
+ __asm__ volatile(
+ "stp x0, x1, [%1, #(8 * 0)]\n"
+ "stp x2, x3, [%1, #(8 * 2)]\n"
+ "stp x4, x5, [%1, #(8 * 4)]\n"
+ "stp x6, x7, [%1, #(8 * 6)]\n"
+ "stp x8, x9, [%1, #(8 * 8)]\n"
+ "stp x10, x11, [%1, #(8 * 10)]\n"
+ "stp x12, x13, [%1, #(8 * 12)]\n"
+ "stp x14, x15, [%1, #(8 * 14)]\n"
+ "stp x16, x17, [%1, #(8 * 16)]\n"
+ "stp x18, x19, [%1, #(8 * 18)]\n"
+ "stp x20, x21, [%1, #(8 * 20)]\n"
+ "stp x22, x23, [%1, #(8 * 22)]\n"
+ "stp x24, x25, [%1, #(8 * 24)]\n"
+ "stp x26, x27, [%1, #(8 * 26)]\n"
+ "stp x28, x29, [%1, #(8 * 28)]\n"
+ : "=m"(regs)
+ : "r"(regs.x));
+ regs.x[30] = reinterpret_cast<uintptr_t>(__builtin_return_address(0));
+ regs.x[31] = reinterpret_cast<uintptr_t>(__builtin_frame_address(0));
+ return regs;
+}
+
+#else
+# define CAN_GET_REGISTERS 0
+#endif
+
+#endif // HWASAN_REGISTERS_H
diff --git a/libsanitizer/hwasan/hwasan_report.cpp b/libsanitizer/hwasan/hwasan_report.cpp
index de08215..8f9dc6c 100644
--- a/libsanitizer/hwasan/hwasan_report.cpp
+++ b/libsanitizer/hwasan/hwasan_report.cpp
@@ -102,6 +102,15 @@ static StackTrace GetStackTraceFromId(u32 id) {
return res;
}
+static void MaybePrintAndroidHelpUrl() {
+#if SANITIZER_ANDROID
+ Printf(
+ "Learn more about HWASan reports: "
+ "https://source.android.com/docs/security/test/memory-safety/"
+ "hwasan-reports\n");
+#endif
+}
+
// A RAII object that holds a copy of the current thread stack ring buffer.
// The actual stack buffer may change while we are iterating over it (for
// example, Printf may call syslog() which can itself be built with hwasan).
@@ -322,7 +331,7 @@ static void ShowHeapOrGlobalCandidate(uptr untagged_addr, tag_t *candidate,
untagged_addr, offset, whence, chunk.UsedSize(), chunk.Beg(),
chunk.End());
Printf("%s", d.Allocation());
- Printf("allocated here:\n");
+ Printf("allocated by thread T%u here:\n", chunk.GetAllocThreadId());
Printf("%s", d.Default());
GetStackTraceFromId(chunk.GetAllocStackId()).Print();
return;
@@ -464,12 +473,12 @@ void PrintAddressDescription(
har.requested_size, UntagAddr(har.tagged_addr),
UntagAddr(har.tagged_addr) + har.requested_size);
Printf("%s", d.Allocation());
- Printf("freed by thread T%zd here:\n", t->unique_id());
+ Printf("freed by thread T%u here:\n", t->unique_id());
Printf("%s", d.Default());
GetStackTraceFromId(har.free_context_id).Print();
Printf("%s", d.Allocation());
- Printf("previously allocated here:\n", t);
+ Printf("previously allocated by thread T%u here:\n", har.alloc_thread_id);
Printf("%s", d.Default());
GetStackTraceFromId(har.alloc_context_id).Print();
@@ -492,7 +501,8 @@ void PrintAddressDescription(
}
// Print the remaining threads, as an extra information, 1 line per thread.
- hwasanThreadList().VisitAllLiveThreads([&](Thread *t) { t->Announce(); });
+ if (flags()->print_live_threads_info)
+ hwasanThreadList().VisitAllLiveThreads([&](Thread *t) { t->Announce(); });
if (!num_descriptions_printed)
// We exhausted our possibilities. Bail out.
@@ -600,6 +610,7 @@ void ReportInvalidFree(StackTrace *stack, uptr tagged_addr) {
if (tag_ptr)
PrintTagsAroundAddr(tag_ptr);
+ MaybePrintAndroidHelpUrl();
ReportErrorSummary(bug_type, stack);
}
@@ -673,6 +684,7 @@ void ReportTailOverwritten(StackTrace *stack, uptr tagged_addr, uptr orig_size,
tag_t *tag_ptr = reinterpret_cast<tag_t*>(MemToShadow(untagged_addr));
PrintTagsAroundAddr(tag_ptr);
+ MaybePrintAndroidHelpUrl();
ReportErrorSummary(bug_type, stack);
}
@@ -742,6 +754,7 @@ void ReportTagMismatch(StackTrace *stack, uptr tagged_addr, uptr access_size,
if (registers_frame)
ReportRegisters(registers_frame, pc);
+ MaybePrintAndroidHelpUrl();
ReportErrorSummary(bug_type, stack);
}
diff --git a/libsanitizer/hwasan/hwasan_setjmp_riscv64.S b/libsanitizer/hwasan/hwasan_setjmp_riscv64.S
index f33c491..43f9c3c 100644
--- a/libsanitizer/hwasan/hwasan_setjmp_riscv64.S
+++ b/libsanitizer/hwasan/hwasan_setjmp_riscv64.S
@@ -36,7 +36,7 @@ ASM_TYPE_FUNCTION(__interceptor_setjmp)
__interceptor_setjmp:
CFI_STARTPROC
addi x11, x0, 0
- j __interceptor_sigsetjmp
+ tail __interceptor_sigsetjmp
CFI_ENDPROC
ASM_SIZE(__interceptor_setjmp)
diff --git a/libsanitizer/hwasan/hwasan_thread.cpp b/libsanitizer/hwasan/hwasan_thread.cpp
index c776ae1..3375782 100644
--- a/libsanitizer/hwasan/hwasan_thread.cpp
+++ b/libsanitizer/hwasan/hwasan_thread.cpp
@@ -5,6 +5,7 @@
#include "hwasan_interface_internal.h"
#include "hwasan_mapping.h"
#include "hwasan_poisoning.h"
+#include "hwasan_thread_list.h"
#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_file.h"
#include "sanitizer_common/sanitizer_placement_new.h"
@@ -43,6 +44,8 @@ void Thread::Init(uptr stack_buffer_start, uptr stack_buffer_size,
static atomic_uint64_t unique_id;
unique_id_ = atomic_fetch_add(&unique_id, 1, memory_order_relaxed);
+ if (!IsMainThread())
+ os_id_ = GetTid();
if (auto sz = flags()->heap_history_size)
heap_allocations_ = HeapAllocationsRingBuffer::New(sz);
@@ -54,6 +57,7 @@ void Thread::Init(uptr stack_buffer_start, uptr stack_buffer_size,
InitStackRingBuffer(stack_buffer_start, stack_buffer_size);
#endif
InitStackAndTls(state);
+ dtls_ = DTLS_Get();
}
void Thread::InitStackRingBuffer(uptr stack_buffer_start,
@@ -147,4 +151,58 @@ tag_t Thread::GenerateRandomTag(uptr num_bits) {
return tag;
}
+void EnsureMainThreadIDIsCorrect() {
+ auto *t = __hwasan::GetCurrentThread();
+ if (t && (t->IsMainThread()))
+ t->set_os_id(GetTid());
+}
+
} // namespace __hwasan
+
+// --- Implementation of LSan-specific functions --- {{{1
+namespace __lsan {
+
+static __hwasan::HwasanThreadList *GetHwasanThreadListLocked() {
+ auto &tl = __hwasan::hwasanThreadList();
+ tl.CheckLocked();
+ return &tl;
+}
+
+static __hwasan::Thread *GetThreadByOsIDLocked(tid_t os_id) {
+ return GetHwasanThreadListLocked()->FindThreadLocked(
+ [os_id](__hwasan::Thread *t) { return t->os_id() == os_id; });
+}
+
+void LockThreadRegistry() { __hwasan::hwasanThreadList().Lock(); }
+
+void UnlockThreadRegistry() { __hwasan::hwasanThreadList().Unlock(); }
+
+void EnsureMainThreadIDIsCorrect() { __hwasan::EnsureMainThreadIDIsCorrect(); }
+
+bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end,
+ uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
+ uptr *cache_end, DTLS **dtls) {
+ auto *t = GetThreadByOsIDLocked(os_id);
+ if (!t)
+ return false;
+ *stack_begin = t->stack_bottom();
+ *stack_end = t->stack_top();
+ *tls_begin = t->tls_begin();
+ *tls_end = t->tls_end();
+ // Fixme: is this correct for HWASan.
+ *cache_begin = 0;
+ *cache_end = 0;
+ *dtls = t->dtls();
+ return true;
+}
+
+void GetAllThreadAllocatorCachesLocked(InternalMmapVector<uptr> *caches) {}
+
+void GetThreadExtraStackRangesLocked(tid_t os_id,
+ InternalMmapVector<Range> *ranges) {}
+void GetThreadExtraStackRangesLocked(InternalMmapVector<Range> *ranges) {}
+
+void GetAdditionalThreadContextPtrsLocked(InternalMmapVector<uptr> *ptrs) {}
+void GetRunningThreadsLocked(InternalMmapVector<tid_t> *threads) {}
+
+} // namespace __lsan
diff --git a/libsanitizer/hwasan/hwasan_thread.h b/libsanitizer/hwasan/hwasan_thread.h
index 3db7c1a..9e1b438 100644
--- a/libsanitizer/hwasan/hwasan_thread.h
+++ b/libsanitizer/hwasan/hwasan_thread.h
@@ -46,6 +46,7 @@ class Thread {
uptr stack_size() { return stack_top() - stack_bottom(); }
uptr tls_begin() { return tls_begin_; }
uptr tls_end() { return tls_end_; }
+ DTLS *dtls() { return dtls_; }
bool IsMainThread() { return unique_id_ == 0; }
bool AddrIsInStack(uptr addr) {
@@ -61,13 +62,16 @@ class Thread {
void DisableTagging() { tagging_disabled_++; }
void EnableTagging() { tagging_disabled_--; }
- u64 unique_id() const { return unique_id_; }
+ u32 unique_id() const { return unique_id_; }
void Announce() {
if (announced_) return;
announced_ = true;
Print("Thread: ");
}
+ tid_t os_id() const { return os_id_; }
+ void set_os_id(tid_t os_id) { os_id_ = os_id; }
+
uptr &vfork_spill() { return vfork_spill_; }
private:
@@ -81,6 +85,7 @@ class Thread {
uptr stack_bottom_;
uptr tls_begin_;
uptr tls_end_;
+ DTLS *dtls_;
u32 random_state_;
u32 random_buffer_;
@@ -89,7 +94,9 @@ class Thread {
HeapAllocationsRingBuffer *heap_allocations_;
StackAllocationsRingBuffer *stack_allocations_;
- u64 unique_id_; // counting from zero.
+ u32 unique_id_; // counting from zero.
+
+ tid_t os_id_;
u32 tagging_disabled_; // if non-zero, malloc uses zero tag in this thread.
@@ -103,6 +110,9 @@ class Thread {
Thread *GetCurrentThread();
uptr *GetCurrentThreadLongPtr();
+// Used to handle fork().
+void EnsureMainThreadIDIsCorrect();
+
struct ScopedTaggingDisabler {
ScopedTaggingDisabler() { GetCurrentThread()->DisableTagging(); }
~ScopedTaggingDisabler() { GetCurrentThread()->EnableTagging(); }
diff --git a/libsanitizer/hwasan/hwasan_thread_list.h b/libsanitizer/hwasan/hwasan_thread_list.h
index 15916a8..97485b1 100644
--- a/libsanitizer/hwasan/hwasan_thread_list.h
+++ b/libsanitizer/hwasan/hwasan_thread_list.h
@@ -71,7 +71,7 @@ struct ThreadStats {
uptr total_stack_size;
};
-class HwasanThreadList {
+class SANITIZER_MUTEX HwasanThreadList {
public:
HwasanThreadList(uptr storage, uptr size)
: free_space_(storage), free_space_end_(storage + size) {
@@ -85,7 +85,8 @@ class HwasanThreadList {
RoundUpTo(ring_buffer_size_ + sizeof(Thread), ring_buffer_size_ * 2);
}
- Thread *CreateCurrentThread(const Thread::InitState *state = nullptr) {
+ Thread *CreateCurrentThread(const Thread::InitState *state = nullptr)
+ SANITIZER_EXCLUDES(free_list_mutex_, live_list_mutex_) {
Thread *t = nullptr;
{
SpinMutexLock l(&free_list_mutex_);
@@ -114,7 +115,8 @@ class HwasanThreadList {
ReleaseMemoryPagesToOS(start, start + thread_alloc_size_);
}
- void RemoveThreadFromLiveList(Thread *t) {
+ void RemoveThreadFromLiveList(Thread *t)
+ SANITIZER_EXCLUDES(live_list_mutex_) {
SpinMutexLock l(&live_list_mutex_);
for (Thread *&t2 : live_list_)
if (t2 == t) {
@@ -127,7 +129,7 @@ class HwasanThreadList {
CHECK(0 && "thread not found in live list");
}
- void ReleaseThread(Thread *t) {
+ void ReleaseThread(Thread *t) SANITIZER_EXCLUDES(free_list_mutex_) {
RemoveThreadStats(t);
t->Destroy();
DontNeedThread(t);
@@ -149,30 +151,47 @@ class HwasanThreadList {
}
template <class CB>
- void VisitAllLiveThreads(CB cb) {
+ void VisitAllLiveThreads(CB cb) SANITIZER_EXCLUDES(live_list_mutex_) {
SpinMutexLock l(&live_list_mutex_);
for (Thread *t : live_list_) cb(t);
}
- void AddThreadStats(Thread *t) {
+ template <class CB>
+ Thread *FindThreadLocked(CB cb) SANITIZER_CHECK_LOCKED(stats_mutex_) {
+ CheckLocked();
+ for (Thread *t : live_list_)
+ if (cb(t))
+ return t;
+ return nullptr;
+ }
+
+ void AddThreadStats(Thread *t) SANITIZER_EXCLUDES(stats_mutex_) {
SpinMutexLock l(&stats_mutex_);
stats_.n_live_threads++;
stats_.total_stack_size += t->stack_size();
}
- void RemoveThreadStats(Thread *t) {
+ void RemoveThreadStats(Thread *t) SANITIZER_EXCLUDES(stats_mutex_) {
SpinMutexLock l(&stats_mutex_);
stats_.n_live_threads--;
stats_.total_stack_size -= t->stack_size();
}
- ThreadStats GetThreadStats() {
+ ThreadStats GetThreadStats() SANITIZER_EXCLUDES(stats_mutex_) {
SpinMutexLock l(&stats_mutex_);
return stats_;
}
uptr GetRingBufferSize() const { return ring_buffer_size_; }
+ void Lock() SANITIZER_ACQUIRE(live_list_mutex_) { live_list_mutex_.Lock(); }
+ void CheckLocked() const SANITIZER_CHECK_LOCKED(live_list_mutex_) {
+ live_list_mutex_.CheckLocked();
+ }
+ void Unlock() SANITIZER_RELEASE(live_list_mutex_) {
+ live_list_mutex_.Unlock();
+ }
+
private:
Thread *AllocThread() {
SpinMutexLock l(&free_space_mutex_);
@@ -191,12 +210,14 @@ class HwasanThreadList {
uptr thread_alloc_size_;
SpinMutex free_list_mutex_;
- InternalMmapVector<Thread *> free_list_;
+ InternalMmapVector<Thread *> free_list_
+ SANITIZER_GUARDED_BY(free_list_mutex_);
SpinMutex live_list_mutex_;
- InternalMmapVector<Thread *> live_list_;
+ InternalMmapVector<Thread *> live_list_
+ SANITIZER_GUARDED_BY(live_list_mutex_);
- ThreadStats stats_;
SpinMutex stats_mutex_;
+ ThreadStats stats_ SANITIZER_GUARDED_BY(stats_mutex_);
};
void InitThreadList(uptr storage, uptr size);