aboutsummaryrefslogtreecommitdiff
path: root/libsanitizer/hwasan
diff options
context:
space:
mode:
authorJakub Jelinek <jakub@redhat.com>2023-11-15 12:45:58 +0100
committerJakub Jelinek <jakub@redhat.com>2023-11-15 12:45:58 +0100
commit28219f7f99a80519d1c6ab5e5dc83b4c7f8d7251 (patch)
tree42e3657c58ff08a654f04aeb0f43b3bc75930bbc /libsanitizer/hwasan
parent4d86dc51e34d2a5695b617afeb56e3414836a79a (diff)
downloadgcc-28219f7f99a80519d1c6ab5e5dc83b4c7f8d7251.zip
gcc-28219f7f99a80519d1c6ab5e5dc83b4c7f8d7251.tar.gz
gcc-28219f7f99a80519d1c6ab5e5dc83b4c7f8d7251.tar.bz2
libsanitizer: merge from upstream (c425db2eb558c263)
The following patch is result of libsanitizer/merge.sh from c425db2eb558c263 (yesterday evening). Bootstrapped/regtested on x86_64-linux and i686-linux (together with the follow-up 3 patches I'm about to post). BTW, seems upstream has added riscv64 support for I think lsan/tsan, so if anyone is willing to try it there, it would be a matter of copying e.g. the s390*-*-linux* libsanitizer/configure.tgt entry to riscv64-*-linux* with the obvious s/s390x/riscv64/ change in it.
Diffstat (limited to 'libsanitizer/hwasan')
-rw-r--r--libsanitizer/hwasan/hwasan.cpp142
-rw-r--r--libsanitizer/hwasan/hwasan_allocation_functions.cpp6
-rw-r--r--libsanitizer/hwasan/hwasan_allocator.cpp71
-rw-r--r--libsanitizer/hwasan/hwasan_allocator.h7
-rw-r--r--libsanitizer/hwasan/hwasan_exceptions.cpp3
-rw-r--r--libsanitizer/hwasan/hwasan_globals.cpp2
-rw-r--r--libsanitizer/hwasan/hwasan_globals.h1
-rw-r--r--libsanitizer/hwasan/hwasan_interceptors.cpp379
-rw-r--r--libsanitizer/hwasan/hwasan_interface_internal.h59
-rw-r--r--libsanitizer/hwasan/hwasan_linux.cpp11
-rw-r--r--libsanitizer/hwasan/hwasan_memintrinsics.cpp30
-rw-r--r--libsanitizer/hwasan/hwasan_platform_interceptors.h1001
-rw-r--r--libsanitizer/hwasan/hwasan_report.cpp856
-rw-r--r--libsanitizer/hwasan/hwasan_report.h2
-rw-r--r--libsanitizer/hwasan/hwasan_setjmp_aarch64.S45
-rw-r--r--libsanitizer/hwasan/hwasan_setjmp_riscv64.S31
-rw-r--r--libsanitizer/hwasan/hwasan_setjmp_x86_64.S28
-rw-r--r--libsanitizer/hwasan/hwasan_tag_mismatch_aarch64.S8
-rw-r--r--libsanitizer/hwasan/hwasan_thread.cpp44
-rw-r--r--libsanitizer/hwasan/hwasan_thread_list.cpp19
-rw-r--r--libsanitizer/hwasan/hwasan_thread_list.h9
21 files changed, 2265 insertions, 489 deletions
diff --git a/libsanitizer/hwasan/hwasan.cpp b/libsanitizer/hwasan/hwasan.cpp
index 26aae9b..2f6cb10 100644
--- a/libsanitizer/hwasan/hwasan.cpp
+++ b/libsanitizer/hwasan/hwasan.cpp
@@ -86,9 +86,11 @@ static void InitializeFlags() {
cf.clear_shadow_mmap_threshold = 4096 * (SANITIZER_ANDROID ? 2 : 8);
// Sigtrap is used in error reporting.
cf.handle_sigtrap = kHandleSignalExclusive;
- // For now only tested on Linux. Other plantforms can be turned on as they
- // become ready.
- cf.detect_leaks = cf.detect_leaks && SANITIZER_LINUX && !SANITIZER_ANDROID;
+ // For now only tested on Linux and Fuchsia. Other plantforms can be turned
+ // on as they become ready.
+ constexpr bool can_detect_leaks =
+ (SANITIZER_LINUX && !SANITIZER_ANDROID) || SANITIZER_FUCHSIA;
+ cf.detect_leaks = cf.detect_leaks && can_detect_leaks;
#if SANITIZER_ANDROID
// Let platform handle other signals. It is better at reporting them then we
@@ -170,7 +172,7 @@ static void HwasanFormatMemoryUsage(InternalScopedString &s) {
auto sds = StackDepotGetStats();
AllocatorStatCounters asc;
GetAllocatorStats(asc);
- s.append(
+ s.AppendF(
"HWASAN pid: %d rss: %zd threads: %zd stacks: %zd"
" thr_aux: %zd stack_depot: %zd uniq_stacks: %zd"
" heap: %zd",
@@ -290,14 +292,20 @@ static bool InitializeSingleGlobal(const hwasan_global &global) {
}
static void InitLoadedGlobals() {
- dl_iterate_phdr(
- [](dl_phdr_info *info, size_t /* size */, void * /* data */) -> int {
- for (const hwasan_global &global : HwasanGlobalsFor(
- info->dlpi_addr, info->dlpi_phdr, info->dlpi_phnum))
- InitializeSingleGlobal(global);
- return 0;
- },
- nullptr);
+ // Fuchsia's libc provides a hook (__sanitizer_module_loaded) that runs on
+ // the startup path which calls into __hwasan_library_loaded on all
+ // initially loaded modules, so explicitly registering the globals here
+ // isn't needed.
+ if constexpr (!SANITIZER_FUCHSIA) {
+ dl_iterate_phdr(
+ [](dl_phdr_info *info, size_t /* size */, void * /* data */) -> int {
+ for (const hwasan_global &global : HwasanGlobalsFor(
+ info->dlpi_addr, info->dlpi_phdr, info->dlpi_phnum))
+ InitializeSingleGlobal(global);
+ return 0;
+ },
+ nullptr);
+ }
}
// Prepare to run instrumented code on the main thread.
@@ -364,13 +372,7 @@ __attribute__((constructor(0))) void __hwasan_init() {
DisableCoreDumperIfNecessary();
InitInstrumentation();
- if constexpr (!SANITIZER_FUCHSIA) {
- // Fuchsia's libc provides a hook (__sanitizer_module_loaded) that runs on
- // the startup path which calls into __hwasan_library_loaded on all
- // initially loaded modules, so explicitly registering the globals here
- // isn't needed.
- InitLoadedGlobals();
- }
+ InitLoadedGlobals();
// Needs to be called here because flags()->random_tags might not have been
// initialized when InitInstrumentation() was called.
@@ -530,6 +532,56 @@ void __hwasan_load16_noabort(uptr p) {
CheckAddress<ErrorAction::Recover, AccessType::Load, 4>(p);
}
+void __hwasan_loadN_match_all(uptr p, uptr sz, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddressSized<ErrorAction::Abort, AccessType::Load>(p, sz);
+}
+void __hwasan_load1_match_all(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Abort, AccessType::Load, 0>(p);
+}
+void __hwasan_load2_match_all(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Abort, AccessType::Load, 1>(p);
+}
+void __hwasan_load4_match_all(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Abort, AccessType::Load, 2>(p);
+}
+void __hwasan_load8_match_all(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Abort, AccessType::Load, 3>(p);
+}
+void __hwasan_load16_match_all(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Abort, AccessType::Load, 4>(p);
+}
+
+void __hwasan_loadN_match_all_noabort(uptr p, uptr sz, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddressSized<ErrorAction::Recover, AccessType::Load>(p, sz);
+}
+void __hwasan_load1_match_all_noabort(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Recover, AccessType::Load, 0>(p);
+}
+void __hwasan_load2_match_all_noabort(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Recover, AccessType::Load, 1>(p);
+}
+void __hwasan_load4_match_all_noabort(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Recover, AccessType::Load, 2>(p);
+}
+void __hwasan_load8_match_all_noabort(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Recover, AccessType::Load, 3>(p);
+}
+void __hwasan_load16_match_all_noabort(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Recover, AccessType::Load, 4>(p);
+}
+
void __hwasan_storeN(uptr p, uptr sz) {
CheckAddressSized<ErrorAction::Abort, AccessType::Store>(p, sz);
}
@@ -568,6 +620,56 @@ void __hwasan_store16_noabort(uptr p) {
CheckAddress<ErrorAction::Recover, AccessType::Store, 4>(p);
}
+void __hwasan_storeN_match_all(uptr p, uptr sz, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddressSized<ErrorAction::Abort, AccessType::Store>(p, sz);
+}
+void __hwasan_store1_match_all(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Abort, AccessType::Store, 0>(p);
+}
+void __hwasan_store2_match_all(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Abort, AccessType::Store, 1>(p);
+}
+void __hwasan_store4_match_all(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Abort, AccessType::Store, 2>(p);
+}
+void __hwasan_store8_match_all(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Abort, AccessType::Store, 3>(p);
+}
+void __hwasan_store16_match_all(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Abort, AccessType::Store, 4>(p);
+}
+
+void __hwasan_storeN_match_all_noabort(uptr p, uptr sz, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddressSized<ErrorAction::Recover, AccessType::Store>(p, sz);
+}
+void __hwasan_store1_match_all_noabort(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Recover, AccessType::Store, 0>(p);
+}
+void __hwasan_store2_match_all_noabort(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Recover, AccessType::Store, 1>(p);
+}
+void __hwasan_store4_match_all_noabort(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Recover, AccessType::Store, 2>(p);
+}
+void __hwasan_store8_match_all_noabort(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Recover, AccessType::Store, 3>(p);
+}
+void __hwasan_store16_match_all_noabort(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Recover, AccessType::Store, 4>(p);
+}
+
void __hwasan_tag_memory(uptr p, u8 tag, uptr sz) {
TagMemoryAligned(UntagAddr(p), sz, tag);
}
@@ -579,7 +681,7 @@ uptr __hwasan_tag_pointer(uptr p, u8 tag) {
void __hwasan_handle_longjmp(const void *sp_dst) {
uptr dst = (uptr)sp_dst;
// HWASan does not support tagged SP.
- CHECK(GetTagFromPointer(dst) == 0);
+ CHECK_EQ(GetTagFromPointer(dst), 0);
uptr sp = (uptr)__builtin_frame_address(0);
static const uptr kMaxExpectedCleanupSize = 64 << 20; // 64M
diff --git a/libsanitizer/hwasan/hwasan_allocation_functions.cpp b/libsanitizer/hwasan/hwasan_allocation_functions.cpp
index 59ad633..75d91ed 100644
--- a/libsanitizer/hwasan/hwasan_allocation_functions.cpp
+++ b/libsanitizer/hwasan/hwasan_allocation_functions.cpp
@@ -159,13 +159,13 @@ void *__sanitizer_malloc(uptr size) {
// Fuchsia does not use WRAP/wrappers used for the interceptor infrastructure.
# define INTERCEPTOR_ALIAS(RET, FN, ARGS...) \
extern "C" SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE RET FN( \
- ARGS) ALIAS("__sanitizer_" #FN)
+ ARGS) ALIAS(__sanitizer_##FN)
#else
# define INTERCEPTOR_ALIAS(RET, FN, ARGS...) \
extern "C" SANITIZER_INTERFACE_ATTRIBUTE RET WRAP(FN)(ARGS) \
- ALIAS("__sanitizer_" #FN); \
+ ALIAS(__sanitizer_##FN); \
extern "C" SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE RET FN( \
- ARGS) ALIAS("__sanitizer_" #FN)
+ ARGS) ALIAS(__sanitizer_##FN)
#endif
INTERCEPTOR_ALIAS(int, posix_memalign, void **memptr, SIZE_T alignment,
diff --git a/libsanitizer/hwasan/hwasan_allocator.cpp b/libsanitizer/hwasan/hwasan_allocator.cpp
index 3b59741..d21ba02 100644
--- a/libsanitizer/hwasan/hwasan_allocator.cpp
+++ b/libsanitizer/hwasan/hwasan_allocator.cpp
@@ -149,8 +149,9 @@ void HwasanAllocatorInit() {
atomic_store_relaxed(&hwasan_allocator_tagging_enabled,
!flags()->disable_allocator_tagging);
SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
- allocator.Init(common_flags()->allocator_release_to_os_interval_ms,
- GetAliasRegionStart());
+ allocator.InitLinkerInitialized(
+ common_flags()->allocator_release_to_os_interval_ms,
+ GetAliasRegionStart());
for (uptr i = 0; i < sizeof(tail_magic); i++)
tail_magic[i] = GetCurrentThread()->GenerateRandomTag();
if (common_flags()->max_allocation_size_mb) {
@@ -165,8 +166,11 @@ void HwasanAllocatorLock() { allocator.ForceLock(); }
void HwasanAllocatorUnlock() { allocator.ForceUnlock(); }
-void AllocatorSwallowThreadLocalCache(AllocatorCache *cache) {
+void AllocatorThreadStart(AllocatorCache *cache) { allocator.InitCache(cache); }
+
+void AllocatorThreadFinish(AllocatorCache *cache) {
allocator.SwallowCache(cache);
+ allocator.DestroyCache(cache);
}
static uptr TaggedSize(uptr size) {
@@ -230,28 +234,23 @@ static void *HwasanAllocate(StackTrace *stack, uptr orig_size, uptr alignment,
}
void *user_ptr = allocated;
- // Tagging can only be skipped when both tag_in_malloc and tag_in_free are
- // false. When tag_in_malloc = false and tag_in_free = true malloc needs to
- // retag to 0.
if (InTaggableRegion(reinterpret_cast<uptr>(user_ptr)) &&
- (flags()->tag_in_malloc || flags()->tag_in_free) &&
- atomic_load_relaxed(&hwasan_allocator_tagging_enabled)) {
- if (flags()->tag_in_malloc && malloc_bisect(stack, orig_size)) {
- tag_t tag = t ? t->GenerateRandomTag() : kFallbackAllocTag;
- uptr tag_size = orig_size ? orig_size : 1;
- uptr full_granule_size = RoundDownTo(tag_size, kShadowAlignment);
- user_ptr =
- (void *)TagMemoryAligned((uptr)user_ptr, full_granule_size, tag);
- if (full_granule_size != tag_size) {
- u8 *short_granule =
- reinterpret_cast<u8 *>(allocated) + full_granule_size;
- TagMemoryAligned((uptr)short_granule, kShadowAlignment,
- tag_size % kShadowAlignment);
- short_granule[kShadowAlignment - 1] = tag;
- }
- } else {
- user_ptr = (void *)TagMemoryAligned((uptr)user_ptr, size, 0);
+ atomic_load_relaxed(&hwasan_allocator_tagging_enabled) &&
+ flags()->tag_in_malloc && malloc_bisect(stack, orig_size)) {
+ tag_t tag = t ? t->GenerateRandomTag() : kFallbackAllocTag;
+ uptr tag_size = orig_size ? orig_size : 1;
+ uptr full_granule_size = RoundDownTo(tag_size, kShadowAlignment);
+ user_ptr = (void *)TagMemoryAligned((uptr)user_ptr, full_granule_size, tag);
+ if (full_granule_size != tag_size) {
+ u8 *short_granule = reinterpret_cast<u8 *>(allocated) + full_granule_size;
+ TagMemoryAligned((uptr)short_granule, kShadowAlignment,
+ tag_size % kShadowAlignment);
+ short_granule[kShadowAlignment - 1] = tag;
}
+ } else {
+ // Tagging can not be completely skipped. If it's disabled, we need to tag
+ // with zeros.
+ user_ptr = (void *)TagMemoryAligned((uptr)user_ptr, size, 0);
}
Metadata *meta =
@@ -261,7 +260,7 @@ static void *HwasanAllocate(StackTrace *stack, uptr orig_size, uptr alignment,
: __lsan::kDirectlyLeaked);
#endif
meta->SetAllocated(StackDepotPut(*stack), orig_size);
- RunMallocHooks(user_ptr, size);
+ RunMallocHooks(user_ptr, orig_size);
return user_ptr;
}
@@ -288,8 +287,6 @@ static bool CheckInvalidFree(StackTrace *stack, void *untagged_ptr,
static void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) {
CHECK(tagged_ptr);
- RunFreeHooks(tagged_ptr);
-
void *untagged_ptr = UntagPtr(tagged_ptr);
if (CheckInvalidFree(stack, untagged_ptr, tagged_ptr))
@@ -304,6 +301,9 @@ static void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) {
ReportInvalidFree(stack, reinterpret_cast<uptr>(tagged_ptr));
return;
}
+
+ RunFreeHooks(tagged_ptr);
+
uptr orig_size = meta->GetRequestedSize();
u32 free_context_id = StackDepotPut(*stack);
u32 alloc_context_id = meta->GetAllocStackId();
@@ -340,7 +340,8 @@ static void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) {
internal_memset(aligned_ptr, flags()->free_fill_byte, fill_size);
}
if (in_taggable_region && flags()->tag_in_free && malloc_bisect(stack, 0) &&
- atomic_load_relaxed(&hwasan_allocator_tagging_enabled)) {
+ atomic_load_relaxed(&hwasan_allocator_tagging_enabled) &&
+ allocator.FromPrimary(untagged_ptr) /* Secondary 0-tag and unmap.*/) {
// Always store full 8-bit tags on free to maximize UAF detection.
tag_t tag;
if (t) {
@@ -437,6 +438,15 @@ static uptr AllocationSize(const void *p) {
return b->GetRequestedSize();
}
+static uptr AllocationSizeFast(const void *p) {
+ const void *untagged_ptr = UntagPtr(p);
+ void *aligned_ptr = reinterpret_cast<void *>(
+ RoundDownTo(reinterpret_cast<uptr>(untagged_ptr), kShadowAlignment));
+ Metadata *meta =
+ reinterpret_cast<Metadata *>(allocator.GetMetaData(aligned_ptr));
+ return meta->GetRequestedSize();
+}
+
void *hwasan_malloc(uptr size, StackTrace *stack) {
return SetErrnoOnNull(HwasanAllocate(stack, size, sizeof(u64), false));
}
@@ -675,4 +685,11 @@ const void *__sanitizer_get_allocated_begin(const void *p) {
uptr __sanitizer_get_allocated_size(const void *p) { return AllocationSize(p); }
+uptr __sanitizer_get_allocated_size_fast(const void *p) {
+ DCHECK_EQ(p, __sanitizer_get_allocated_begin(p));
+ uptr ret = AllocationSizeFast(p);
+ DCHECK_EQ(ret, __sanitizer_get_allocated_size(p));
+ return ret;
+}
+
void __sanitizer_purge_allocator() { allocator.ForceReleaseToOS(); }
diff --git a/libsanitizer/hwasan/hwasan_allocator.h b/libsanitizer/hwasan/hwasan_allocator.h
index ecf3f68..2ada2a0 100644
--- a/libsanitizer/hwasan/hwasan_allocator.h
+++ b/libsanitizer/hwasan/hwasan_allocator.h
@@ -54,6 +54,10 @@ static_assert(sizeof(Metadata) == 16);
struct HwasanMapUnmapCallback {
void OnMap(uptr p, uptr size) const { UpdateMemoryUsage(); }
+ void OnMapSecondary(uptr p, uptr size, uptr user_begin,
+ uptr user_size) const {
+ UpdateMemoryUsage();
+ }
void OnUnmap(uptr p, uptr size) const {
// We are about to unmap a chunk of user memory.
// It can return as user-requested mmap() or another thread stack.
@@ -88,7 +92,8 @@ typedef SizeClassAllocator64<AP64> PrimaryAllocator;
typedef CombinedAllocator<PrimaryAllocator> Allocator;
typedef Allocator::AllocatorCache AllocatorCache;
-void AllocatorSwallowThreadLocalCache(AllocatorCache *cache);
+void AllocatorThreadStart(AllocatorCache *cache);
+void AllocatorThreadFinish(AllocatorCache *cache);
class HwasanChunkView {
public:
diff --git a/libsanitizer/hwasan/hwasan_exceptions.cpp b/libsanitizer/hwasan/hwasan_exceptions.cpp
index c9968a5..bf700bf 100644
--- a/libsanitizer/hwasan/hwasan_exceptions.cpp
+++ b/libsanitizer/hwasan/hwasan_exceptions.cpp
@@ -62,7 +62,8 @@ __hwasan_personality_wrapper(int version, _Unwind_Action actions,
#error Unsupported architecture
#endif
uptr sp = get_cfa(context);
- TagMemory(sp, fp - sp, 0);
+ TagMemory(UntagAddr(sp), UntagAddr(fp) - UntagAddr(sp),
+ GetTagFromPointer(sp));
}
return rc;
diff --git a/libsanitizer/hwasan/hwasan_globals.cpp b/libsanitizer/hwasan/hwasan_globals.cpp
index d71bcd7..7e0f3df 100644
--- a/libsanitizer/hwasan/hwasan_globals.cpp
+++ b/libsanitizer/hwasan/hwasan_globals.cpp
@@ -13,6 +13,8 @@
#include "hwasan_globals.h"
+#include "sanitizer_common/sanitizer_array_ref.h"
+
namespace __hwasan {
enum { NT_LLVM_HWASAN_GLOBALS = 3 };
diff --git a/libsanitizer/hwasan/hwasan_globals.h b/libsanitizer/hwasan/hwasan_globals.h
index fd7adf7..94cd53e 100644
--- a/libsanitizer/hwasan/hwasan_globals.h
+++ b/libsanitizer/hwasan/hwasan_globals.h
@@ -16,6 +16,7 @@
#include <link.h>
+#include "sanitizer_common/sanitizer_array_ref.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
diff --git a/libsanitizer/hwasan/hwasan_interceptors.cpp b/libsanitizer/hwasan/hwasan_interceptors.cpp
index 67edba4..d9237cf 100644
--- a/libsanitizer/hwasan/hwasan_interceptors.cpp
+++ b/libsanitizer/hwasan/hwasan_interceptors.cpp
@@ -14,10 +14,17 @@
// sanitizer_common/sanitizer_common_interceptors.h
//===----------------------------------------------------------------------===//
+#define SANITIZER_COMMON_NO_REDEFINE_BUILTINS
+
#include "hwasan.h"
+#include "hwasan_allocator.h"
#include "hwasan_checks.h"
+#include "hwasan_mapping.h"
+#include "hwasan_platform_interceptors.h"
#include "hwasan_thread.h"
+#include "hwasan_thread_list.h"
#include "interception/interception.h"
+#include "sanitizer_common/sanitizer_errno.h"
#include "sanitizer_common/sanitizer_linux.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
@@ -25,21 +32,47 @@
using namespace __hwasan;
-#if HWASAN_WITH_INTERCEPTORS
-
-struct ThreadStartArg {
- thread_callback_t callback;
- void *param;
- __sanitizer_sigset_t starting_sigset_;
+struct HWAsanInterceptorContext {
+ const char *interceptor_name;
};
-static void *HwasanThreadStartFunc(void *arg) {
- __hwasan_thread_enter();
- ThreadStartArg A = *reinterpret_cast<ThreadStartArg*>(arg);
- SetSigProcMask(&A.starting_sigset_, nullptr);
- UnmapOrDie(arg, GetPageSizeCached());
- return A.callback(A.param);
-}
+# define ACCESS_MEMORY_RANGE(ctx, offset, size, access) \
+ do { \
+ __hwasan::CheckAddressSized<ErrorAction::Abort, access>((uptr)offset, \
+ size); \
+ } while (0)
+
+# define HWASAN_READ_RANGE(ctx, offset, size) \
+ ACCESS_MEMORY_RANGE(ctx, offset, size, AccessType::Load)
+# define HWASAN_WRITE_RANGE(ctx, offset, size) \
+ ACCESS_MEMORY_RANGE(ctx, offset, size, AccessType::Store)
+
+# if !SANITIZER_APPLE
+# define HWASAN_INTERCEPT_FUNC(name) \
+ do { \
+ if (!INTERCEPT_FUNCTION(name)) \
+ VReport(1, "HWAddressSanitizer: failed to intercept '%s'\n", #name); \
+ } while (0)
+# define HWASAN_INTERCEPT_FUNC_VER(name, ver) \
+ do { \
+ if (!INTERCEPT_FUNCTION_VER(name, ver)) \
+ VReport(1, "HWAddressSanitizer: failed to intercept '%s@@%s'\n", \
+ #name, ver); \
+ } while (0)
+# define HWASAN_INTERCEPT_FUNC_VER_UNVERSIONED_FALLBACK(name, ver) \
+ do { \
+ if (!INTERCEPT_FUNCTION_VER(name, ver) && !INTERCEPT_FUNCTION(name)) \
+ VReport( \
+ 1, "HWAddressSanitizer: failed to intercept '%s@@%s' or '%s'\n", \
+ #name, ver, #name); \
+ } while (0)
+
+# else
+// OS X interceptors don't need to be initialized with INTERCEPT_FUNCTION.
+# define HWASAN_INTERCEPT_FUNC(name)
+# endif // SANITIZER_APPLE
+
+# if HWASAN_WITH_INTERCEPTORS
# define COMMON_SYSCALL_PRE_READ_RANGE(p, s) __hwasan_loadN((uptr)p, (uptr)s)
# define COMMON_SYSCALL_PRE_WRITE_RANGE(p, s) \
@@ -57,26 +90,251 @@ static void *HwasanThreadStartFunc(void *arg) {
# include "sanitizer_common/sanitizer_common_syscalls.inc"
# include "sanitizer_common/sanitizer_syscalls_netbsd.inc"
-INTERCEPTOR(int, pthread_create, void *th, void *attr, void *(*callback)(void*),
- void * param) {
+# define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \
+ HWASAN_WRITE_RANGE(ctx, ptr, size)
+
+# define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) \
+ HWASAN_READ_RANGE(ctx, ptr, size)
+
+# define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \
+ HWAsanInterceptorContext _ctx = {#func}; \
+ ctx = (void *)&_ctx; \
+ do { \
+ (void)(ctx); \
+ (void)(func); \
+ } while (false)
+
+# define COMMON_INTERCEPTOR_DIR_ACQUIRE(ctx, path) \
+ do { \
+ (void)(ctx); \
+ (void)(path); \
+ } while (false)
+
+# define COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd) \
+ do { \
+ (void)(ctx); \
+ (void)(fd); \
+ } while (false)
+
+# define COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd) \
+ do { \
+ (void)(ctx); \
+ (void)(fd); \
+ } while (false)
+
+# define COMMON_INTERCEPTOR_FD_SOCKET_ACCEPT(ctx, fd, newfd) \
+ do { \
+ (void)(ctx); \
+ (void)(fd); \
+ (void)(newfd); \
+ } while (false)
+
+# define COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, name) \
+ do { \
+ (void)(ctx); \
+ (void)(name); \
+ } while (false)
+
+# define COMMON_INTERCEPTOR_SET_PTHREAD_NAME(ctx, thread, name) \
+ do { \
+ (void)(ctx); \
+ (void)(thread); \
+ (void)(name); \
+ } while (false)
+
+# define COMMON_INTERCEPTOR_BLOCK_REAL(name) \
+ do { \
+ (void)(name); \
+ } while (false)
+
+# define COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, dst, v, size) \
+ { \
+ if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) \
+ return internal_memset(dst, v, size); \
+ COMMON_INTERCEPTOR_ENTER(ctx, memset, dst, v, size); \
+ if (MemIsApp(UntagAddr(reinterpret_cast<uptr>(dst))) && \
+ common_flags()->intercept_intrin) \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, size); \
+ return REAL(memset)(dst, v, size); \
+ }
+
+# define COMMON_INTERCEPTOR_STRERROR() \
+ do { \
+ } while (false)
+
+# define COMMON_INTERCEPT_FUNCTION(name) HWASAN_INTERCEPT_FUNC(name)
+
+# define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED (!hwasan_inited)
+
+// The main purpose of the mmap interceptor is to prevent the user from
+// allocating on top of shadow pages.
+//
+// For compatibility, it does not tag pointers, nor does it allow
+// MAP_FIXED in combination with a tagged pointer. (Since mmap itself
+// will not return a tagged pointer, the tagged pointer must have come
+// from elsewhere, such as the secondary allocator, which makes it a
+// very odd usecase.)
+template <class Mmap>
+static void *mmap_interceptor(Mmap real_mmap, void *addr, SIZE_T length,
+ int prot, int flags, int fd, OFF64_T offset) {
+ if (addr) {
+ if (flags & map_fixed) CHECK_EQ(addr, UntagPtr(addr));
+
+ addr = UntagPtr(addr);
+ }
+ SIZE_T rounded_length = RoundUpTo(length, GetPageSize());
+ void *end_addr = (char *)addr + (rounded_length - 1);
+ if (addr && length &&
+ (!MemIsApp(reinterpret_cast<uptr>(addr)) ||
+ !MemIsApp(reinterpret_cast<uptr>(end_addr)))) {
+ // User requested an address that is incompatible with HWASan's
+ // memory layout. Use a different address if allowed, else fail.
+ if (flags & map_fixed) {
+ errno = errno_EINVAL;
+ return (void *)-1;
+ } else {
+ addr = nullptr;
+ }
+ }
+ void *res = real_mmap(addr, length, prot, flags, fd, offset);
+ if (length && res != (void *)-1) {
+ uptr beg = reinterpret_cast<uptr>(res);
+ DCHECK(IsAligned(beg, GetPageSize()));
+ if (!MemIsApp(beg) || !MemIsApp(beg + rounded_length - 1)) {
+ // Application has attempted to map more memory than is supported by
+ // HWASan. Act as if we ran out of memory.
+ internal_munmap(res, length);
+ errno = errno_ENOMEM;
+ return (void *)-1;
+ }
+ __hwasan::TagMemoryAligned(beg, rounded_length, 0);
+ }
+
+ return res;
+}
+
+template <class Munmap>
+static int munmap_interceptor(Munmap real_munmap, void *addr, SIZE_T length) {
+ // We should not tag if munmap fail, but it's to late to tag after
+ // real_munmap, as the pages could be mmaped by another thread.
+ uptr beg = reinterpret_cast<uptr>(addr);
+ if (length && IsAligned(beg, GetPageSize())) {
+ SIZE_T rounded_length = RoundUpTo(length, GetPageSize());
+ // Protect from unmapping the shadow.
+ if (!MemIsApp(beg) || !MemIsApp(beg + rounded_length - 1)) {
+ errno = errno_EINVAL;
+ return -1;
+ }
+ __hwasan::TagMemoryAligned(beg, rounded_length, 0);
+ }
+ return real_munmap(addr, length);
+}
+
+# define COMMON_INTERCEPTOR_MMAP_IMPL(ctx, mmap, addr, length, prot, flags, \
+ fd, offset) \
+ do { \
+ (void)(ctx); \
+ return mmap_interceptor(REAL(mmap), addr, sz, prot, flags, fd, off); \
+ } while (false)
+
+# define COMMON_INTERCEPTOR_MUNMAP_IMPL(ctx, addr, length) \
+ do { \
+ (void)(ctx); \
+ return munmap_interceptor(REAL(munmap), addr, sz); \
+ } while (false)
+
+# include "sanitizer_common/sanitizer_common_interceptors_memintrinsics.inc"
+# include "sanitizer_common/sanitizer_common_interceptors.inc"
+
+struct ThreadStartArg {
+ __sanitizer_sigset_t starting_sigset_;
+};
+
+static void *HwasanThreadStartFunc(void *arg) {
+ __hwasan_thread_enter();
+ SetSigProcMask(&reinterpret_cast<ThreadStartArg *>(arg)->starting_sigset_,
+ nullptr);
+ InternalFree(arg);
+ auto self = GetThreadSelf();
+ auto args = hwasanThreadArgRetval().GetArgs(self);
+ void *retval = (*args.routine)(args.arg_retval);
+ hwasanThreadArgRetval().Finish(self, retval);
+ return retval;
+}
+
+extern "C" {
+int pthread_attr_getdetachstate(void *attr, int *v);
+}
+
+INTERCEPTOR(int, pthread_create, void *thread, void *attr,
+ void *(*callback)(void *), void *param) {
EnsureMainThreadIDIsCorrect();
ScopedTaggingDisabler tagging_disabler;
- ThreadStartArg *A = reinterpret_cast<ThreadStartArg *> (MmapOrDie(
- GetPageSizeCached(), "pthread_create"));
- A->callback = callback;
- A->param = param;
+ bool detached = [attr]() {
+ int d = 0;
+ return attr && !pthread_attr_getdetachstate(attr, &d) && IsStateDetached(d);
+ }();
+ ThreadStartArg *A = (ThreadStartArg *)InternalAlloc(sizeof(ThreadStartArg));
ScopedBlockSignals block(&A->starting_sigset_);
// ASAN uses the same approach to disable leaks from pthread_create.
# if CAN_SANITIZE_LEAKS
__lsan::ScopedInterceptorDisabler lsan_disabler;
# endif
- return REAL(pthread_create)(th, attr, &HwasanThreadStartFunc, A);
+
+ int result;
+ hwasanThreadArgRetval().Create(detached, {callback, param}, [&]() -> uptr {
+ result = REAL(pthread_create)(thread, attr, &HwasanThreadStartFunc, A);
+ return result ? 0 : *(uptr *)(thread);
+ });
+ if (result != 0)
+ InternalFree(A);
+ return result;
+}
+
+INTERCEPTOR(int, pthread_join, void *thread, void **retval) {
+ int result;
+ hwasanThreadArgRetval().Join((uptr)thread, [&]() {
+ result = REAL(pthread_join)(thread, retval);
+ return !result;
+ });
+ return result;
}
-INTERCEPTOR(int, pthread_join, void *t, void **arg) {
- return REAL(pthread_join)(t, arg);
+INTERCEPTOR(int, pthread_detach, void *thread) {
+ int result;
+ hwasanThreadArgRetval().Detach((uptr)thread, [&]() {
+ result = REAL(pthread_detach)(thread);
+ return !result;
+ });
+ return result;
}
+INTERCEPTOR(void, pthread_exit, void *retval) {
+ hwasanThreadArgRetval().Finish(GetThreadSelf(), retval);
+ REAL(pthread_exit)(retval);
+}
+
+# if SANITIZER_GLIBC
+INTERCEPTOR(int, pthread_tryjoin_np, void *thread, void **ret) {
+ int result;
+ hwasanThreadArgRetval().Join((uptr)thread, [&]() {
+ result = REAL(pthread_tryjoin_np)(thread, ret);
+ return !result;
+ });
+ return result;
+}
+
+INTERCEPTOR(int, pthread_timedjoin_np, void *thread, void **ret,
+ const struct timespec *abstime) {
+ int result;
+ hwasanThreadArgRetval().Join((uptr)thread, [&]() {
+ result = REAL(pthread_timedjoin_np)(thread, ret, abstime);
+ return !result;
+ });
+ return result;
+}
+# endif
+
DEFINE_REAL_PTHREAD_FUNCTIONS
DEFINE_REAL(int, vfork)
@@ -85,13 +343,13 @@ DECLARE_EXTERN_INTERCEPTOR_AND_WRAPPER(int, vfork)
// Get and/or change the set of blocked signals.
extern "C" int sigprocmask(int __how, const __hw_sigset_t *__restrict __set,
__hw_sigset_t *__restrict __oset);
-#define SIG_BLOCK 0
-#define SIG_SETMASK 2
+# define SIG_BLOCK 0
+# define SIG_SETMASK 2
extern "C" int __sigjmp_save(__hw_sigjmp_buf env, int savemask) {
env[0].__magic = kHwJmpBufMagic;
env[0].__mask_was_saved =
- (savemask && sigprocmask(SIG_BLOCK, (__hw_sigset_t *)0,
- &env[0].__saved_mask) == 0);
+ (savemask &&
+ sigprocmask(SIG_BLOCK, (__hw_sigset_t *)0, &env[0].__saved_mask) == 0);
return 0;
}
@@ -120,26 +378,27 @@ InternalLongjmp(__hw_register_buf env, int retval) {
# if defined(__aarch64__)
register long int retval_tmp asm("x1") = retval;
register void *env_address asm("x0") = &env[0];
- asm volatile("ldp x19, x20, [%0, #0<<3];"
- "ldp x21, x22, [%0, #2<<3];"
- "ldp x23, x24, [%0, #4<<3];"
- "ldp x25, x26, [%0, #6<<3];"
- "ldp x27, x28, [%0, #8<<3];"
- "ldp x29, x30, [%0, #10<<3];"
- "ldp d8, d9, [%0, #14<<3];"
- "ldp d10, d11, [%0, #16<<3];"
- "ldp d12, d13, [%0, #18<<3];"
- "ldp d14, d15, [%0, #20<<3];"
- "ldr x5, [%0, #13<<3];"
- "mov sp, x5;"
- // Return the value requested to return through arguments.
- // This should be in x1 given what we requested above.
- "cmp %1, #0;"
- "mov x0, #1;"
- "csel x0, %1, x0, ne;"
- "br x30;"
- : "+r"(env_address)
- : "r"(retval_tmp));
+ asm volatile(
+ "ldp x19, x20, [%0, #0<<3];"
+ "ldp x21, x22, [%0, #2<<3];"
+ "ldp x23, x24, [%0, #4<<3];"
+ "ldp x25, x26, [%0, #6<<3];"
+ "ldp x27, x28, [%0, #8<<3];"
+ "ldp x29, x30, [%0, #10<<3];"
+ "ldp d8, d9, [%0, #14<<3];"
+ "ldp d10, d11, [%0, #16<<3];"
+ "ldp d12, d13, [%0, #18<<3];"
+ "ldp d14, d15, [%0, #20<<3];"
+ "ldr x5, [%0, #13<<3];"
+ "mov sp, x5;"
+ // Return the value requested to return through arguments.
+ // This should be in x1 given what we requested above.
+ "cmp %1, #0;"
+ "mov x0, #1;"
+ "csel x0, %1, x0, ne;"
+ "br x30;"
+ : "+r"(env_address)
+ : "r"(retval_tmp));
# elif defined(__x86_64__)
register long int retval_tmp asm("%rsi") = retval;
register void *env_address asm("%rdi") = &env[0];
@@ -215,8 +474,7 @@ INTERCEPTOR(void, siglongjmp, __hw_sigjmp_buf env, int val) {
if (env[0].__mask_was_saved)
// Restore the saved signal mask.
- (void)sigprocmask(SIG_SETMASK, &env[0].__saved_mask,
- (__hw_sigset_t *)0);
+ (void)sigprocmask(SIG_SETMASK, &env[0].__saved_mask, (__hw_sigset_t *)0);
InternalLongjmp(env[0].__jmpbuf, val);
}
@@ -238,8 +496,8 @@ INTERCEPTOR(void, longjmp, __hw_jmp_buf env, int val) {
}
InternalLongjmp(env[0].__jmpbuf, val);
}
-#undef SIG_BLOCK
-#undef SIG_SETMASK
+# undef SIG_BLOCK
+# undef SIG_SETMASK
# endif // HWASAN_WITH_INTERCEPTORS
@@ -254,7 +512,7 @@ int OnExit() {
return 0;
}
-} // namespace __hwasan
+} // namespace __hwasan
namespace __hwasan {
@@ -262,19 +520,30 @@ void InitializeInterceptors() {
static int inited = 0;
CHECK_EQ(inited, 0);
-#if HWASAN_WITH_INTERCEPTORS
-#if defined(__linux__)
+# if HWASAN_WITH_INTERCEPTORS
+ InitializeCommonInterceptors();
+
+ (void)(read_iovec);
+ (void)(write_iovec);
+
+# if defined(__linux__)
INTERCEPT_FUNCTION(__libc_longjmp);
INTERCEPT_FUNCTION(longjmp);
INTERCEPT_FUNCTION(siglongjmp);
INTERCEPT_FUNCTION(vfork);
-#endif // __linux__
+# endif // __linux__
INTERCEPT_FUNCTION(pthread_create);
INTERCEPT_FUNCTION(pthread_join);
+ INTERCEPT_FUNCTION(pthread_detach);
+ INTERCEPT_FUNCTION(pthread_exit);
+# if SANITIZER_GLIBC
+ INTERCEPT_FUNCTION(pthread_tryjoin_np);
+ INTERCEPT_FUNCTION(pthread_timedjoin_np);
+# endif
# endif
inited = 1;
}
-} // namespace __hwasan
+} // namespace __hwasan
#endif // #if !SANITIZER_FUCHSIA
diff --git a/libsanitizer/hwasan/hwasan_interface_internal.h b/libsanitizer/hwasan/hwasan_interface_internal.h
index d1ecbb5..e7804cc 100644
--- a/libsanitizer/hwasan/hwasan_interface_internal.h
+++ b/libsanitizer/hwasan/hwasan_interface_internal.h
@@ -77,6 +77,32 @@ SANITIZER_INTERFACE_ATTRIBUTE
void __hwasan_load16_noabort(uptr);
SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_loadN_match_all(uptr, uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_load1_match_all(uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_load2_match_all(uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_load4_match_all(uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_load8_match_all(uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_load16_match_all(uptr, u8);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_loadN_match_all_noabort(uptr, uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_load1_match_all_noabort(uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_load2_match_all_noabort(uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_load4_match_all_noabort(uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_load8_match_all_noabort(uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_load16_match_all_noabort(uptr, u8);
+
+SANITIZER_INTERFACE_ATTRIBUTE
void __hwasan_storeN(uptr, uptr);
SANITIZER_INTERFACE_ATTRIBUTE
void __hwasan_store1(uptr);
@@ -103,6 +129,32 @@ SANITIZER_INTERFACE_ATTRIBUTE
void __hwasan_store16_noabort(uptr);
SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_storeN_match_all(uptr, uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_store1_match_all(uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_store2_match_all(uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_store4_match_all(uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_store8_match_all(uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_store16_match_all(uptr, u8);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_storeN_match_all_noabort(uptr, uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_store1_match_all_noabort(uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_store2_match_all_noabort(uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_store4_match_all_noabort(uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_store8_match_all_noabort(uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_store16_match_all_noabort(uptr, u8);
+
+SANITIZER_INTERFACE_ATTRIBUTE
void __hwasan_tag_memory(uptr p, u8 tag, uptr sz);
SANITIZER_INTERFACE_ATTRIBUTE
@@ -184,6 +236,13 @@ SANITIZER_INTERFACE_ATTRIBUTE
void *__hwasan_memmove(void *dest, const void *src, uptr n);
SANITIZER_INTERFACE_ATTRIBUTE
+void *__hwasan_memcpy_match_all(void *dst, const void *src, uptr size, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__hwasan_memset_match_all(void *s, int c, uptr n, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__hwasan_memmove_match_all(void *dest, const void *src, uptr n, u8);
+
+SANITIZER_INTERFACE_ATTRIBUTE
void __hwasan_set_error_report_callback(void (*callback)(const char *));
} // extern "C"
diff --git a/libsanitizer/hwasan/hwasan_linux.cpp b/libsanitizer/hwasan/hwasan_linux.cpp
index d3e4b53..6f5e943 100644
--- a/libsanitizer/hwasan/hwasan_linux.cpp
+++ b/libsanitizer/hwasan/hwasan_linux.cpp
@@ -283,7 +283,7 @@ void InitThreads() {
bool MemIsApp(uptr p) {
// Memory outside the alias range has non-zero tags.
# if !defined(HWASAN_ALIASING_MODE)
- CHECK(GetTagFromPointer(p) == 0);
+ CHECK_EQ(GetTagFromPointer(p), 0);
# endif
return (p >= kHighMemStart && p <= kHighMemEnd) ||
@@ -302,8 +302,15 @@ extern "C" void __hwasan_thread_exit() {
Thread *t = GetCurrentThread();
// Make sure that signal handler can not see a stale current thread pointer.
atomic_signal_fence(memory_order_seq_cst);
- if (t)
+ if (t) {
+ // Block async signals on the thread as the handler can be instrumented.
+ // After this point instrumented code can't access essential data from TLS
+ // and will crash.
+ // Bionic already calls __hwasan_thread_exit with blocked signals.
+ if (SANITIZER_GLIBC)
+ BlockSignals();
hwasanThreadList().ReleaseThread(t);
+ }
}
# if HWASAN_WITH_INTERCEPTORS
diff --git a/libsanitizer/hwasan/hwasan_memintrinsics.cpp b/libsanitizer/hwasan/hwasan_memintrinsics.cpp
index ea7f5ce..16d6f90 100644
--- a/libsanitizer/hwasan/hwasan_memintrinsics.cpp
+++ b/libsanitizer/hwasan/hwasan_memintrinsics.cpp
@@ -42,3 +42,33 @@ void *__hwasan_memmove(void *to, const void *from, uptr size) {
reinterpret_cast<uptr>(from), size);
return memmove(to, from, size);
}
+
+void *__hwasan_memset_match_all(void *block, int c, uptr size,
+ u8 match_all_tag) {
+ if (GetTagFromPointer(reinterpret_cast<uptr>(block)) != match_all_tag)
+ CheckAddressSized<ErrorAction::Recover, AccessType::Store>(
+ reinterpret_cast<uptr>(block), size);
+ return memset(block, c, size);
+}
+
+void *__hwasan_memcpy_match_all(void *to, const void *from, uptr size,
+ u8 match_all_tag) {
+ if (GetTagFromPointer(reinterpret_cast<uptr>(to)) != match_all_tag)
+ CheckAddressSized<ErrorAction::Recover, AccessType::Store>(
+ reinterpret_cast<uptr>(to), size);
+ if (GetTagFromPointer(reinterpret_cast<uptr>(from)) != match_all_tag)
+ CheckAddressSized<ErrorAction::Recover, AccessType::Load>(
+ reinterpret_cast<uptr>(from), size);
+ return memcpy(to, from, size);
+}
+
+void *__hwasan_memmove_match_all(void *to, const void *from, uptr size,
+ u8 match_all_tag) {
+ if (GetTagFromPointer(reinterpret_cast<uptr>(to)) != match_all_tag)
+ CheckAddressSized<ErrorAction::Recover, AccessType::Store>(
+ reinterpret_cast<uptr>(to), size);
+ if (GetTagFromPointer(reinterpret_cast<uptr>(from)) != match_all_tag)
+ CheckAddressSized<ErrorAction::Recover, AccessType::Load>(
+ reinterpret_cast<uptr>(from), size);
+ return memmove(to, from, size);
+}
diff --git a/libsanitizer/hwasan/hwasan_platform_interceptors.h b/libsanitizer/hwasan/hwasan_platform_interceptors.h
new file mode 100644
index 0000000..d92b510
--- /dev/null
+++ b/libsanitizer/hwasan/hwasan_platform_interceptors.h
@@ -0,0 +1,1001 @@
+#ifndef HWASAN_PLATFORM_INTERCEPTORS_H
+#define HWASAN_PLATFORM_INTERCEPTORS_H
+
+#include "sanitizer_common/sanitizer_platform_interceptors.h"
+
+// This file cancels out most of the sanitizer_common interception, thus
+// allowing HWASan to selectively reuse some of the interceptors.
+//
+// To re-enable sanitizer_common's interception of a function, comment out
+// the corresponding '#undef SANITIZER_INTERCEPT_fn' and
+// '#define SANITIZER_INTERCEPT_fn 0':
+// - We prefer to comment out rather than delete the lines, to show that
+// it is deliberate, rather than an accidental omission.
+// - We do not use '#define SANITIZE_INTERCEPT_fn 1', because
+// interception is usually conditional (e.g., based on SI_POSIX); we let
+// the condition in sanitizers_platform_interceptors.h take effect.
+
+// Originally generated with:
+// cat ../sanitizer_common/sanitizer_platform_interceptors.h | grep '^#define SANITIZER_INTERCEPT' | cut -d ' ' -f 2 | while read x; do echo "#undef $x"; echo "#define $x 0"; echo; done
+#undef SANITIZER_INTERCEPT_STRLEN
+#define SANITIZER_INTERCEPT_STRLEN 0
+
+#undef SANITIZER_INTERCEPT_STRNLEN
+#define SANITIZER_INTERCEPT_STRNLEN 0
+
+#undef SANITIZER_INTERCEPT_STRCMP
+#define SANITIZER_INTERCEPT_STRCMP 0
+
+#undef SANITIZER_INTERCEPT_STRSTR
+#define SANITIZER_INTERCEPT_STRSTR 0
+
+#undef SANITIZER_INTERCEPT_STRCASESTR
+#define SANITIZER_INTERCEPT_STRCASESTR 0
+
+#undef SANITIZER_INTERCEPT_STRTOK
+#define SANITIZER_INTERCEPT_STRTOK 0
+
+#undef SANITIZER_INTERCEPT_STRCHR
+#define SANITIZER_INTERCEPT_STRCHR 0
+
+#undef SANITIZER_INTERCEPT_STRCHRNUL
+#define SANITIZER_INTERCEPT_STRCHRNUL 0
+
+#undef SANITIZER_INTERCEPT_STRRCHR
+#define SANITIZER_INTERCEPT_STRRCHR 0
+
+#undef SANITIZER_INTERCEPT_STRSPN
+#define SANITIZER_INTERCEPT_STRSPN 0
+
+#undef SANITIZER_INTERCEPT_STRPBRK
+#define SANITIZER_INTERCEPT_STRPBRK 0
+
+#undef SANITIZER_INTERCEPT_TEXTDOMAIN
+#define SANITIZER_INTERCEPT_TEXTDOMAIN 0
+
+#undef SANITIZER_INTERCEPT_STRCASECMP
+#define SANITIZER_INTERCEPT_STRCASECMP 0
+
+// #undef SANITIZER_INTERCEPT_MEMSET
+// #define SANITIZER_INTERCEPT_MEMSET 0
+
+// #undef SANITIZER_INTERCEPT_MEMMOVE
+// #define SANITIZER_INTERCEPT_MEMMOVE 0
+
+// #undef SANITIZER_INTERCEPT_MEMCPY
+// #define SANITIZER_INTERCEPT_MEMCPY 0
+
+// #undef SANITIZER_INTERCEPT_MEMCMP
+// #define SANITIZER_INTERCEPT_MEMCMP 0
+
+// #undef SANITIZER_INTERCEPT_BCMP
+// #define SANITIZER_INTERCEPT_BCMP 0
+
+#undef SANITIZER_INTERCEPT_STRNDUP
+#define SANITIZER_INTERCEPT_STRNDUP 0
+
+#undef SANITIZER_INTERCEPT___STRNDUP
+#define SANITIZER_INTERCEPT___STRNDUP 0
+
+#undef SANITIZER_INTERCEPT_MEMMEM
+#define SANITIZER_INTERCEPT_MEMMEM 0
+
+#undef SANITIZER_INTERCEPT_MEMCHR
+#define SANITIZER_INTERCEPT_MEMCHR 0
+
+#undef SANITIZER_INTERCEPT_MEMRCHR
+#define SANITIZER_INTERCEPT_MEMRCHR 0
+
+#undef SANITIZER_INTERCEPT_READ
+#define SANITIZER_INTERCEPT_READ 0
+
+#undef SANITIZER_INTERCEPT_PREAD
+#define SANITIZER_INTERCEPT_PREAD 0
+
+#undef SANITIZER_INTERCEPT_WRITE
+#define SANITIZER_INTERCEPT_WRITE 0
+
+#undef SANITIZER_INTERCEPT_PWRITE
+#define SANITIZER_INTERCEPT_PWRITE 0
+
+#undef SANITIZER_INTERCEPT_FREAD
+#define SANITIZER_INTERCEPT_FREAD 0
+
+#undef SANITIZER_INTERCEPT_FWRITE
+#define SANITIZER_INTERCEPT_FWRITE 0
+
+#undef SANITIZER_INTERCEPT_FGETS
+#define SANITIZER_INTERCEPT_FGETS 0
+
+#undef SANITIZER_INTERCEPT_FPUTS
+#define SANITIZER_INTERCEPT_FPUTS 0
+
+#undef SANITIZER_INTERCEPT_PUTS
+#define SANITIZER_INTERCEPT_PUTS 0
+
+#undef SANITIZER_INTERCEPT_PREAD64
+#define SANITIZER_INTERCEPT_PREAD64 0
+
+#undef SANITIZER_INTERCEPT_PWRITE64
+#define SANITIZER_INTERCEPT_PWRITE64 0
+
+#undef SANITIZER_INTERCEPT_READV
+#define SANITIZER_INTERCEPT_READV 0
+
+#undef SANITIZER_INTERCEPT_WRITEV
+#define SANITIZER_INTERCEPT_WRITEV 0
+
+#undef SANITIZER_INTERCEPT_PREADV
+#define SANITIZER_INTERCEPT_PREADV 0
+
+#undef SANITIZER_INTERCEPT_PWRITEV
+#define SANITIZER_INTERCEPT_PWRITEV 0
+
+#undef SANITIZER_INTERCEPT_PREADV64
+#define SANITIZER_INTERCEPT_PREADV64 0
+
+#undef SANITIZER_INTERCEPT_PWRITEV64
+#define SANITIZER_INTERCEPT_PWRITEV64 0
+
+#undef SANITIZER_INTERCEPT_PRCTL
+#define SANITIZER_INTERCEPT_PRCTL 0
+
+#undef SANITIZER_INTERCEPT_LOCALTIME_AND_FRIENDS
+#define SANITIZER_INTERCEPT_LOCALTIME_AND_FRIENDS 0
+
+#undef SANITIZER_INTERCEPT_STRPTIME
+#define SANITIZER_INTERCEPT_STRPTIME 0
+
+#undef SANITIZER_INTERCEPT_SCANF
+#define SANITIZER_INTERCEPT_SCANF 0
+
+#undef SANITIZER_INTERCEPT_ISOC99_SCANF
+#define SANITIZER_INTERCEPT_ISOC99_SCANF 0
+
+#undef SANITIZER_INTERCEPT_PRINTF
+#define SANITIZER_INTERCEPT_PRINTF 0
+
+#undef SANITIZER_INTERCEPT_PRINTF_L
+#define SANITIZER_INTERCEPT_PRINTF_L 0
+
+#undef SANITIZER_INTERCEPT_ISOC99_PRINTF
+#define SANITIZER_INTERCEPT_ISOC99_PRINTF 0
+
+#undef SANITIZER_INTERCEPT___PRINTF_CHK
+#define SANITIZER_INTERCEPT___PRINTF_CHK 0
+
+#undef SANITIZER_INTERCEPT_FREXP
+#define SANITIZER_INTERCEPT_FREXP 0
+
+#undef SANITIZER_INTERCEPT_FREXPF_FREXPL
+#define SANITIZER_INTERCEPT_FREXPF_FREXPL 0
+
+#undef SANITIZER_INTERCEPT_GETPWNAM_AND_FRIENDS
+#define SANITIZER_INTERCEPT_GETPWNAM_AND_FRIENDS 0
+
+#undef SANITIZER_INTERCEPT_GETPWNAM_R_AND_FRIENDS
+#define SANITIZER_INTERCEPT_GETPWNAM_R_AND_FRIENDS 0
+
+#undef SANITIZER_INTERCEPT_GETPWENT
+#define SANITIZER_INTERCEPT_GETPWENT 0
+
+#undef SANITIZER_INTERCEPT_FGETGRENT_R
+#define SANITIZER_INTERCEPT_FGETGRENT_R 0
+
+#undef SANITIZER_INTERCEPT_FGETPWENT
+#define SANITIZER_INTERCEPT_FGETPWENT 0
+
+#undef SANITIZER_INTERCEPT_GETPWENT_R
+#define SANITIZER_INTERCEPT_GETPWENT_R 0
+
+#undef SANITIZER_INTERCEPT_FGETPWENT_R
+#define SANITIZER_INTERCEPT_FGETPWENT_R 0
+
+#undef SANITIZER_INTERCEPT_SETPWENT
+#define SANITIZER_INTERCEPT_SETPWENT 0
+
+#undef SANITIZER_INTERCEPT_CLOCK_GETTIME
+#define SANITIZER_INTERCEPT_CLOCK_GETTIME 0
+
+#undef SANITIZER_INTERCEPT_CLOCK_GETCPUCLOCKID
+#define SANITIZER_INTERCEPT_CLOCK_GETCPUCLOCKID 0
+
+#undef SANITIZER_INTERCEPT_GETITIMER
+#define SANITIZER_INTERCEPT_GETITIMER 0
+
+#undef SANITIZER_INTERCEPT_TIME
+#define SANITIZER_INTERCEPT_TIME 0
+
+#undef SANITIZER_INTERCEPT_GLOB
+#define SANITIZER_INTERCEPT_GLOB 0
+
+#undef SANITIZER_INTERCEPT_GLOB64
+#define SANITIZER_INTERCEPT_GLOB64 0
+
+#undef SANITIZER_INTERCEPT___B64_TO
+#define SANITIZER_INTERCEPT___B64_TO 0
+
+#undef SANITIZER_INTERCEPT_DN_COMP_EXPAND
+#define SANITIZER_INTERCEPT_DN_COMP_EXPAND 0
+
+#undef SANITIZER_INTERCEPT_POSIX_SPAWN
+#define SANITIZER_INTERCEPT_POSIX_SPAWN 0
+
+#undef SANITIZER_INTERCEPT_WAIT
+#define SANITIZER_INTERCEPT_WAIT 0
+
+#undef SANITIZER_INTERCEPT_INET
+#define SANITIZER_INTERCEPT_INET 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_GETSCHEDPARAM
+#define SANITIZER_INTERCEPT_PTHREAD_GETSCHEDPARAM 0
+
+#undef SANITIZER_INTERCEPT_GETADDRINFO
+#define SANITIZER_INTERCEPT_GETADDRINFO 0
+
+#undef SANITIZER_INTERCEPT_GETNAMEINFO
+#define SANITIZER_INTERCEPT_GETNAMEINFO 0
+
+#undef SANITIZER_INTERCEPT_GETSOCKNAME
+#define SANITIZER_INTERCEPT_GETSOCKNAME 0
+
+#undef SANITIZER_INTERCEPT_GETHOSTBYNAME
+#define SANITIZER_INTERCEPT_GETHOSTBYNAME 0
+
+#undef SANITIZER_INTERCEPT_GETHOSTBYNAME2
+#define SANITIZER_INTERCEPT_GETHOSTBYNAME2 0
+
+#undef SANITIZER_INTERCEPT_GETHOSTBYNAME_R
+#define SANITIZER_INTERCEPT_GETHOSTBYNAME_R 0
+
+#undef SANITIZER_INTERCEPT_GETHOSTBYNAME2_R
+#define SANITIZER_INTERCEPT_GETHOSTBYNAME2_R 0
+
+#undef SANITIZER_INTERCEPT_GETHOSTBYADDR_R
+#define SANITIZER_INTERCEPT_GETHOSTBYADDR_R 0
+
+#undef SANITIZER_INTERCEPT_GETHOSTENT_R
+#define SANITIZER_INTERCEPT_GETHOSTENT_R 0
+
+#undef SANITIZER_INTERCEPT_GETSOCKOPT
+#define SANITIZER_INTERCEPT_GETSOCKOPT 0
+
+#undef SANITIZER_INTERCEPT_ACCEPT
+#define SANITIZER_INTERCEPT_ACCEPT 0
+
+#undef SANITIZER_INTERCEPT_ACCEPT4
+#define SANITIZER_INTERCEPT_ACCEPT4 0
+
+#undef SANITIZER_INTERCEPT_PACCEPT
+#define SANITIZER_INTERCEPT_PACCEPT 0
+
+#undef SANITIZER_INTERCEPT_MODF
+#define SANITIZER_INTERCEPT_MODF 0
+
+#undef SANITIZER_INTERCEPT_RECVMSG
+#define SANITIZER_INTERCEPT_RECVMSG 0
+
+#undef SANITIZER_INTERCEPT_SENDMSG
+#define SANITIZER_INTERCEPT_SENDMSG 0
+
+#undef SANITIZER_INTERCEPT_RECVMMSG
+#define SANITIZER_INTERCEPT_RECVMMSG 0
+
+#undef SANITIZER_INTERCEPT_SENDMMSG
+#define SANITIZER_INTERCEPT_SENDMMSG 0
+
+#undef SANITIZER_INTERCEPT_SYSMSG
+#define SANITIZER_INTERCEPT_SYSMSG 0
+
+#undef SANITIZER_INTERCEPT_GETPEERNAME
+#define SANITIZER_INTERCEPT_GETPEERNAME 0
+
+#undef SANITIZER_INTERCEPT_IOCTL
+#define SANITIZER_INTERCEPT_IOCTL 0
+
+#undef SANITIZER_INTERCEPT_INET_ATON
+#define SANITIZER_INTERCEPT_INET_ATON 0
+
+#undef SANITIZER_INTERCEPT_SYSINFO
+#define SANITIZER_INTERCEPT_SYSINFO 0
+
+#undef SANITIZER_INTERCEPT_READDIR
+#define SANITIZER_INTERCEPT_READDIR 0
+
+#undef SANITIZER_INTERCEPT_READDIR64
+#define SANITIZER_INTERCEPT_READDIR64 0
+
+#undef SANITIZER_INTERCEPT_PTRACE
+#define SANITIZER_INTERCEPT_PTRACE 0
+
+#undef SANITIZER_INTERCEPT_PTRACE
+#define SANITIZER_INTERCEPT_PTRACE 0
+
+#undef SANITIZER_INTERCEPT_SETLOCALE
+#define SANITIZER_INTERCEPT_SETLOCALE 0
+
+#undef SANITIZER_INTERCEPT_GETCWD
+#define SANITIZER_INTERCEPT_GETCWD 0
+
+#undef SANITIZER_INTERCEPT_GET_CURRENT_DIR_NAME
+#define SANITIZER_INTERCEPT_GET_CURRENT_DIR_NAME 0
+
+#undef SANITIZER_INTERCEPT_STRTOIMAX
+#define SANITIZER_INTERCEPT_STRTOIMAX 0
+
+#undef SANITIZER_INTERCEPT_MBSTOWCS
+#define SANITIZER_INTERCEPT_MBSTOWCS 0
+
+#undef SANITIZER_INTERCEPT_MBSNRTOWCS
+#define SANITIZER_INTERCEPT_MBSNRTOWCS 0
+
+#undef SANITIZER_INTERCEPT_WCSTOMBS
+#define SANITIZER_INTERCEPT_WCSTOMBS 0
+
+#undef SANITIZER_INTERCEPT_STRXFRM
+#define SANITIZER_INTERCEPT_STRXFRM 0
+
+#undef SANITIZER_INTERCEPT___STRXFRM_L
+#define SANITIZER_INTERCEPT___STRXFRM_L 0
+
+#undef SANITIZER_INTERCEPT_WCSXFRM
+#define SANITIZER_INTERCEPT_WCSXFRM 0
+
+#undef SANITIZER_INTERCEPT___WCSXFRM_L
+#define SANITIZER_INTERCEPT___WCSXFRM_L 0
+
+#undef SANITIZER_INTERCEPT_WCSNRTOMBS
+#define SANITIZER_INTERCEPT_WCSNRTOMBS 0
+
+#undef SANITIZER_INTERCEPT_WCRTOMB
+#define SANITIZER_INTERCEPT_WCRTOMB 0
+
+#undef SANITIZER_INTERCEPT_WCTOMB
+#define SANITIZER_INTERCEPT_WCTOMB 0
+
+#undef SANITIZER_INTERCEPT_TCGETATTR
+#define SANITIZER_INTERCEPT_TCGETATTR 0
+
+#undef SANITIZER_INTERCEPT_REALPATH
+#define SANITIZER_INTERCEPT_REALPATH 0
+
+#undef SANITIZER_INTERCEPT_CANONICALIZE_FILE_NAME
+#define SANITIZER_INTERCEPT_CANONICALIZE_FILE_NAME 0
+
+#undef SANITIZER_INTERCEPT_CONFSTR
+#define SANITIZER_INTERCEPT_CONFSTR 0
+
+#undef SANITIZER_INTERCEPT_SCHED_GETAFFINITY
+#define SANITIZER_INTERCEPT_SCHED_GETAFFINITY 0
+
+#undef SANITIZER_INTERCEPT_SCHED_GETPARAM
+#define SANITIZER_INTERCEPT_SCHED_GETPARAM 0
+
+#undef SANITIZER_INTERCEPT_STRERROR
+#define SANITIZER_INTERCEPT_STRERROR 0
+
+#undef SANITIZER_INTERCEPT_STRERROR_R
+#define SANITIZER_INTERCEPT_STRERROR_R 0
+
+#undef SANITIZER_INTERCEPT_XPG_STRERROR_R
+#define SANITIZER_INTERCEPT_XPG_STRERROR_R 0
+
+#undef SANITIZER_INTERCEPT_SCANDIR
+#define SANITIZER_INTERCEPT_SCANDIR 0
+
+#undef SANITIZER_INTERCEPT_SCANDIR64
+#define SANITIZER_INTERCEPT_SCANDIR64 0
+
+#undef SANITIZER_INTERCEPT_GETGROUPS
+#define SANITIZER_INTERCEPT_GETGROUPS 0
+
+#undef SANITIZER_INTERCEPT_POLL
+#define SANITIZER_INTERCEPT_POLL 0
+
+#undef SANITIZER_INTERCEPT_PPOLL
+#define SANITIZER_INTERCEPT_PPOLL 0
+
+#undef SANITIZER_INTERCEPT_WORDEXP
+#define SANITIZER_INTERCEPT_WORDEXP 0
+
+#undef SANITIZER_INTERCEPT_SIGWAIT
+#define SANITIZER_INTERCEPT_SIGWAIT 0
+
+#undef SANITIZER_INTERCEPT_SIGWAITINFO
+#define SANITIZER_INTERCEPT_SIGWAITINFO 0
+
+#undef SANITIZER_INTERCEPT_SIGTIMEDWAIT
+#define SANITIZER_INTERCEPT_SIGTIMEDWAIT 0
+
+#undef SANITIZER_INTERCEPT_SIGSETOPS
+#define SANITIZER_INTERCEPT_SIGSETOPS 0
+
+#undef SANITIZER_INTERCEPT_SIGSET_LOGICOPS
+#define SANITIZER_INTERCEPT_SIGSET_LOGICOPS 0
+
+#undef SANITIZER_INTERCEPT_SIGPENDING
+#define SANITIZER_INTERCEPT_SIGPENDING 0
+
+#undef SANITIZER_INTERCEPT_SIGPROCMASK
+#define SANITIZER_INTERCEPT_SIGPROCMASK 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_SIGMASK
+#define SANITIZER_INTERCEPT_PTHREAD_SIGMASK 0
+
+#undef SANITIZER_INTERCEPT_BACKTRACE
+#define SANITIZER_INTERCEPT_BACKTRACE 0
+
+#undef SANITIZER_INTERCEPT_GETMNTENT
+#define SANITIZER_INTERCEPT_GETMNTENT 0
+
+#undef SANITIZER_INTERCEPT_GETMNTENT_R
+#define SANITIZER_INTERCEPT_GETMNTENT_R 0
+
+#undef SANITIZER_INTERCEPT_STATFS
+#define SANITIZER_INTERCEPT_STATFS 0
+
+#undef SANITIZER_INTERCEPT_STATFS64
+#define SANITIZER_INTERCEPT_STATFS64 0
+
+#undef SANITIZER_INTERCEPT_STATVFS
+#define SANITIZER_INTERCEPT_STATVFS 0
+
+#undef SANITIZER_INTERCEPT_STATVFS64
+#define SANITIZER_INTERCEPT_STATVFS64 0
+
+#undef SANITIZER_INTERCEPT_INITGROUPS
+#define SANITIZER_INTERCEPT_INITGROUPS 0
+
+#undef SANITIZER_INTERCEPT_ETHER_NTOA_ATON
+#define SANITIZER_INTERCEPT_ETHER_NTOA_ATON 0
+
+#undef SANITIZER_INTERCEPT_ETHER_HOST
+#define SANITIZER_INTERCEPT_ETHER_HOST 0
+
+#undef SANITIZER_INTERCEPT_ETHER_R
+#define SANITIZER_INTERCEPT_ETHER_R 0
+
+#undef SANITIZER_INTERCEPT_SHMCTL
+#define SANITIZER_INTERCEPT_SHMCTL 0
+
+#undef SANITIZER_INTERCEPT_RANDOM_R
+#define SANITIZER_INTERCEPT_RANDOM_R 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_ATTR_GET
+#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GET 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_ATTR_GETINHERITSCHED
+#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GETINHERITSCHED 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_ATTR_GETAFFINITY_NP
+#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GETAFFINITY_NP 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_GETAFFINITY_NP
+#define SANITIZER_INTERCEPT_PTHREAD_GETAFFINITY_NP 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_ATTR_GET_SCHED
+#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GET_SCHED 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPSHARED
+#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPSHARED 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETTYPE
+#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETTYPE 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPROTOCOL
+#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPROTOCOL 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPRIOCEILING
+#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPRIOCEILING 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETROBUST
+#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETROBUST 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETROBUST_NP
+#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETROBUST_NP 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_RWLOCKATTR_GETPSHARED
+#define SANITIZER_INTERCEPT_PTHREAD_RWLOCKATTR_GETPSHARED 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_RWLOCKATTR_GETKIND_NP
+#define SANITIZER_INTERCEPT_PTHREAD_RWLOCKATTR_GETKIND_NP 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_CONDATTR_GETPSHARED
+#define SANITIZER_INTERCEPT_PTHREAD_CONDATTR_GETPSHARED 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_CONDATTR_GETCLOCK
+#define SANITIZER_INTERCEPT_PTHREAD_CONDATTR_GETCLOCK 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_BARRIERATTR_GETPSHARED
+#define SANITIZER_INTERCEPT_PTHREAD_BARRIERATTR_GETPSHARED 0
+
+#undef SANITIZER_INTERCEPT_TRYJOIN
+#define SANITIZER_INTERCEPT_TRYJOIN 0
+
+#undef SANITIZER_INTERCEPT_TIMEDJOIN
+#define SANITIZER_INTERCEPT_TIMEDJOIN 0
+
+#undef SANITIZER_INTERCEPT_THR_EXIT
+#define SANITIZER_INTERCEPT_THR_EXIT 0
+
+#undef SANITIZER_INTERCEPT_TMPNAM
+#define SANITIZER_INTERCEPT_TMPNAM 0
+
+#undef SANITIZER_INTERCEPT_TMPNAM_R
+#define SANITIZER_INTERCEPT_TMPNAM_R 0
+
+#undef SANITIZER_INTERCEPT_PTSNAME
+#define SANITIZER_INTERCEPT_PTSNAME 0
+
+#undef SANITIZER_INTERCEPT_PTSNAME_R
+#define SANITIZER_INTERCEPT_PTSNAME_R 0
+
+#undef SANITIZER_INTERCEPT_TTYNAME
+#define SANITIZER_INTERCEPT_TTYNAME 0
+
+#undef SANITIZER_INTERCEPT_TTYNAME_R
+#define SANITIZER_INTERCEPT_TTYNAME_R 0
+
+#undef SANITIZER_INTERCEPT_TEMPNAM
+#define SANITIZER_INTERCEPT_TEMPNAM 0
+
+#undef SANITIZER_INTERCEPT_SINCOS
+#define SANITIZER_INTERCEPT_SINCOS 0
+
+#undef SANITIZER_INTERCEPT_REMQUO
+#define SANITIZER_INTERCEPT_REMQUO 0
+
+#undef SANITIZER_INTERCEPT_REMQUOL
+#define SANITIZER_INTERCEPT_REMQUOL 0
+
+#undef SANITIZER_INTERCEPT_LGAMMA
+#define SANITIZER_INTERCEPT_LGAMMA 0
+
+#undef SANITIZER_INTERCEPT_LGAMMAL
+#define SANITIZER_INTERCEPT_LGAMMAL 0
+
+#undef SANITIZER_INTERCEPT_LGAMMA_R
+#define SANITIZER_INTERCEPT_LGAMMA_R 0
+
+#undef SANITIZER_INTERCEPT_LGAMMAL_R
+#define SANITIZER_INTERCEPT_LGAMMAL_R 0
+
+#undef SANITIZER_INTERCEPT_DRAND48_R
+#define SANITIZER_INTERCEPT_DRAND48_R 0
+
+#undef SANITIZER_INTERCEPT_RAND_R
+#define SANITIZER_INTERCEPT_RAND_R 0
+
+#undef SANITIZER_INTERCEPT_ICONV
+#define SANITIZER_INTERCEPT_ICONV 0
+
+#undef SANITIZER_INTERCEPT_TIMES
+#define SANITIZER_INTERCEPT_TIMES 0
+
+#undef SANITIZER_INTERCEPT_GETLINE
+#define SANITIZER_INTERCEPT_GETLINE 0
+
+#undef SANITIZER_INTERCEPT__EXIT
+#define SANITIZER_INTERCEPT__EXIT 0
+
+#undef SANITIZER_INTERCEPT___LIBC_MUTEX
+#define SANITIZER_INTERCEPT___LIBC_MUTEX 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_SETNAME_NP
+#define SANITIZER_INTERCEPT_PTHREAD_SETNAME_NP 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_GETNAME_NP
+#define SANITIZER_INTERCEPT_PTHREAD_GETNAME_NP 0
+
+#undef SANITIZER_INTERCEPT_TLS_GET_ADDR
+#define SANITIZER_INTERCEPT_TLS_GET_ADDR 0
+
+#undef SANITIZER_INTERCEPT_LISTXATTR
+#define SANITIZER_INTERCEPT_LISTXATTR 0
+
+#undef SANITIZER_INTERCEPT_GETXATTR
+#define SANITIZER_INTERCEPT_GETXATTR 0
+
+#undef SANITIZER_INTERCEPT_GETRESID
+#define SANITIZER_INTERCEPT_GETRESID 0
+
+#undef SANITIZER_INTERCEPT_GETIFADDRS
+#define SANITIZER_INTERCEPT_GETIFADDRS 0
+
+#undef SANITIZER_INTERCEPT_IF_INDEXTONAME
+#define SANITIZER_INTERCEPT_IF_INDEXTONAME 0
+
+#undef SANITIZER_INTERCEPT_CAPGET
+#define SANITIZER_INTERCEPT_CAPGET 0
+
+#undef SANITIZER_INTERCEPT_AEABI_MEM
+#define SANITIZER_INTERCEPT_AEABI_MEM 0
+
+#undef SANITIZER_INTERCEPT_AEABI_MEM
+#define SANITIZER_INTERCEPT_AEABI_MEM 0
+
+#undef SANITIZER_INTERCEPT___BZERO
+#define SANITIZER_INTERCEPT___BZERO 0
+
+#undef SANITIZER_INTERCEPT_BZERO
+#define SANITIZER_INTERCEPT_BZERO 0
+
+#undef SANITIZER_INTERCEPT_FTIME
+#define SANITIZER_INTERCEPT_FTIME 0
+
+#undef SANITIZER_INTERCEPT_XDR
+#define SANITIZER_INTERCEPT_XDR 0
+
+#undef SANITIZER_INTERCEPT_XDRREC
+#define SANITIZER_INTERCEPT_XDRREC 0
+
+#undef SANITIZER_INTERCEPT_TSEARCH
+#define SANITIZER_INTERCEPT_TSEARCH 0
+
+#undef SANITIZER_INTERCEPT_LIBIO_INTERNALS
+#define SANITIZER_INTERCEPT_LIBIO_INTERNALS 0
+
+#undef SANITIZER_INTERCEPT_FOPEN
+#define SANITIZER_INTERCEPT_FOPEN 0
+
+#undef SANITIZER_INTERCEPT_FOPEN64
+#define SANITIZER_INTERCEPT_FOPEN64 0
+
+#undef SANITIZER_INTERCEPT_OPEN_MEMSTREAM
+#define SANITIZER_INTERCEPT_OPEN_MEMSTREAM 0
+
+#undef SANITIZER_INTERCEPT_OBSTACK
+#define SANITIZER_INTERCEPT_OBSTACK 0
+
+#undef SANITIZER_INTERCEPT_FFLUSH
+#define SANITIZER_INTERCEPT_FFLUSH 0
+
+#undef SANITIZER_INTERCEPT_FCLOSE
+#define SANITIZER_INTERCEPT_FCLOSE 0
+
+#undef SANITIZER_INTERCEPT_DLOPEN_DLCLOSE
+#define SANITIZER_INTERCEPT_DLOPEN_DLCLOSE 0
+
+#undef SANITIZER_INTERCEPT_GETPASS
+#define SANITIZER_INTERCEPT_GETPASS 0
+
+#undef SANITIZER_INTERCEPT_TIMERFD
+#define SANITIZER_INTERCEPT_TIMERFD 0
+
+#undef SANITIZER_INTERCEPT_MLOCKX
+#define SANITIZER_INTERCEPT_MLOCKX 0
+
+#undef SANITIZER_INTERCEPT_FOPENCOOKIE
+#define SANITIZER_INTERCEPT_FOPENCOOKIE 0
+
+#undef SANITIZER_INTERCEPT_SEM
+#define SANITIZER_INTERCEPT_SEM 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_SETCANCEL
+#define SANITIZER_INTERCEPT_PTHREAD_SETCANCEL 0
+
+#undef SANITIZER_INTERCEPT_MINCORE
+#define SANITIZER_INTERCEPT_MINCORE 0
+
+#undef SANITIZER_INTERCEPT_PROCESS_VM_READV
+#define SANITIZER_INTERCEPT_PROCESS_VM_READV 0
+
+#undef SANITIZER_INTERCEPT_CTERMID
+#define SANITIZER_INTERCEPT_CTERMID 0
+
+#undef SANITIZER_INTERCEPT_CTERMID_R
+#define SANITIZER_INTERCEPT_CTERMID_R 0
+
+#undef SANITIZER_INTERCEPTOR_HOOKS
+#define SANITIZER_INTERCEPTOR_HOOKS 0
+
+#undef SANITIZER_INTERCEPT_RECV_RECVFROM
+#define SANITIZER_INTERCEPT_RECV_RECVFROM 0
+
+#undef SANITIZER_INTERCEPT_SEND_SENDTO
+#define SANITIZER_INTERCEPT_SEND_SENDTO 0
+
+#undef SANITIZER_INTERCEPT_EVENTFD_READ_WRITE
+#define SANITIZER_INTERCEPT_EVENTFD_READ_WRITE 0
+
+#undef SANITIZER_INTERCEPT_STAT
+#define SANITIZER_INTERCEPT_STAT 0
+
+#undef SANITIZER_INTERCEPT_STAT64
+#define SANITIZER_INTERCEPT_STAT64 0
+
+#undef SANITIZER_INTERCEPT_LSTAT
+#define SANITIZER_INTERCEPT_LSTAT 0
+
+#undef SANITIZER_INTERCEPT___XSTAT
+#define SANITIZER_INTERCEPT___XSTAT 0
+
+#undef SANITIZER_INTERCEPT___XSTAT64
+#define SANITIZER_INTERCEPT___XSTAT64 0
+
+#undef SANITIZER_INTERCEPT___LXSTAT
+#define SANITIZER_INTERCEPT___LXSTAT 0
+
+#undef SANITIZER_INTERCEPT___LXSTAT64
+#define SANITIZER_INTERCEPT___LXSTAT64 0
+
+#undef SANITIZER_INTERCEPT_UTMP
+#define SANITIZER_INTERCEPT_UTMP 0
+
+#undef SANITIZER_INTERCEPT_UTMPX
+#define SANITIZER_INTERCEPT_UTMPX 0
+
+#undef SANITIZER_INTERCEPT_GETLOADAVG
+#define SANITIZER_INTERCEPT_GETLOADAVG 0
+
+// #undef SANITIZER_INTERCEPT_MMAP
+// #define SANITIZER_INTERCEPT_MMAP 0
+
+#undef SANITIZER_INTERCEPT_MMAP64
+#define SANITIZER_INTERCEPT_MMAP64 0
+
+#undef SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO
+#define SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO 0
+
+#undef SANITIZER_INTERCEPT_MEMALIGN
+#define SANITIZER_INTERCEPT_MEMALIGN 0
+
+#undef SANITIZER_INTERCEPT___LIBC_MEMALIGN
+#define SANITIZER_INTERCEPT___LIBC_MEMALIGN 0
+
+#undef SANITIZER_INTERCEPT_PVALLOC
+#define SANITIZER_INTERCEPT_PVALLOC 0
+
+#undef SANITIZER_INTERCEPT_CFREE
+#define SANITIZER_INTERCEPT_CFREE 0
+
+#undef SANITIZER_INTERCEPT_REALLOCARRAY
+#define SANITIZER_INTERCEPT_REALLOCARRAY 0
+
+#undef SANITIZER_INTERCEPT_ALIGNED_ALLOC
+#define SANITIZER_INTERCEPT_ALIGNED_ALLOC 0
+
+#undef SANITIZER_INTERCEPT_MALLOC_USABLE_SIZE
+#define SANITIZER_INTERCEPT_MALLOC_USABLE_SIZE 0
+
+#undef SANITIZER_INTERCEPT_MCHECK_MPROBE
+#define SANITIZER_INTERCEPT_MCHECK_MPROBE 0
+
+#undef SANITIZER_INTERCEPT_WCSLEN
+#define SANITIZER_INTERCEPT_WCSLEN 0
+
+#undef SANITIZER_INTERCEPT_WCSCAT
+#define SANITIZER_INTERCEPT_WCSCAT 0
+
+#undef SANITIZER_INTERCEPT_WCSDUP
+#define SANITIZER_INTERCEPT_WCSDUP 0
+
+#undef SANITIZER_INTERCEPT_SIGNAL_AND_SIGACTION
+#define SANITIZER_INTERCEPT_SIGNAL_AND_SIGACTION 0
+
+#undef SANITIZER_INTERCEPT_BSD_SIGNAL
+#define SANITIZER_INTERCEPT_BSD_SIGNAL 0
+
+#undef SANITIZER_INTERCEPT_ACCT
+#define SANITIZER_INTERCEPT_ACCT 0
+
+#undef SANITIZER_INTERCEPT_USER_FROM_UID
+#define SANITIZER_INTERCEPT_USER_FROM_UID 0
+
+#undef SANITIZER_INTERCEPT_UID_FROM_USER
+#define SANITIZER_INTERCEPT_UID_FROM_USER 0
+
+#undef SANITIZER_INTERCEPT_GROUP_FROM_GID
+#define SANITIZER_INTERCEPT_GROUP_FROM_GID 0
+
+#undef SANITIZER_INTERCEPT_GID_FROM_GROUP
+#define SANITIZER_INTERCEPT_GID_FROM_GROUP 0
+
+#undef SANITIZER_INTERCEPT_ACCESS
+#define SANITIZER_INTERCEPT_ACCESS 0
+
+#undef SANITIZER_INTERCEPT_FACCESSAT
+#define SANITIZER_INTERCEPT_FACCESSAT 0
+
+#undef SANITIZER_INTERCEPT_GETGROUPLIST
+#define SANITIZER_INTERCEPT_GETGROUPLIST 0
+
+#undef SANITIZER_INTERCEPT_STRLCPY
+#define SANITIZER_INTERCEPT_STRLCPY 0
+
+#undef SANITIZER_INTERCEPT_NAME_TO_HANDLE_AT
+#define SANITIZER_INTERCEPT_NAME_TO_HANDLE_AT 0
+
+#undef SANITIZER_INTERCEPT_OPEN_BY_HANDLE_AT
+#define SANITIZER_INTERCEPT_OPEN_BY_HANDLE_AT 0
+
+#undef SANITIZER_INTERCEPT_READLINK
+#define SANITIZER_INTERCEPT_READLINK 0
+
+#undef SANITIZER_INTERCEPT_READLINKAT
+#define SANITIZER_INTERCEPT_READLINKAT 0
+
+#undef SANITIZER_INTERCEPT_DEVNAME
+#define SANITIZER_INTERCEPT_DEVNAME 0
+
+#undef SANITIZER_INTERCEPT_DEVNAME_R
+#define SANITIZER_INTERCEPT_DEVNAME_R 0
+
+#undef SANITIZER_INTERCEPT_FGETLN
+#define SANITIZER_INTERCEPT_FGETLN 0
+
+#undef SANITIZER_INTERCEPT_STRMODE
+#define SANITIZER_INTERCEPT_STRMODE 0
+
+#undef SANITIZER_INTERCEPT_TTYENT
+#define SANITIZER_INTERCEPT_TTYENT 0
+
+#undef SANITIZER_INTERCEPT_TTYENTPATH
+#define SANITIZER_INTERCEPT_TTYENTPATH 0
+
+#undef SANITIZER_INTERCEPT_PROTOENT
+#define SANITIZER_INTERCEPT_PROTOENT 0
+
+#undef SANITIZER_INTERCEPT_PROTOENT_R
+#define SANITIZER_INTERCEPT_PROTOENT_R 0
+
+#undef SANITIZER_INTERCEPT_NETENT
+#define SANITIZER_INTERCEPT_NETENT 0
+
+#undef SANITIZER_INTERCEPT_SETVBUF
+#define SANITIZER_INTERCEPT_SETVBUF 0
+
+#undef SANITIZER_INTERCEPT_GETMNTINFO
+#define SANITIZER_INTERCEPT_GETMNTINFO 0
+
+#undef SANITIZER_INTERCEPT_MI_VECTOR_HASH
+#define SANITIZER_INTERCEPT_MI_VECTOR_HASH 0
+
+#undef SANITIZER_INTERCEPT_GETVFSSTAT
+#define SANITIZER_INTERCEPT_GETVFSSTAT 0
+
+#undef SANITIZER_INTERCEPT_REGEX
+#define SANITIZER_INTERCEPT_REGEX 0
+
+#undef SANITIZER_INTERCEPT_REGEXSUB
+#define SANITIZER_INTERCEPT_REGEXSUB 0
+
+#undef SANITIZER_INTERCEPT_FTS
+#define SANITIZER_INTERCEPT_FTS 0
+
+#undef SANITIZER_INTERCEPT_SYSCTL
+#define SANITIZER_INTERCEPT_SYSCTL 0
+
+#undef SANITIZER_INTERCEPT_ASYSCTL
+#define SANITIZER_INTERCEPT_ASYSCTL 0
+
+#undef SANITIZER_INTERCEPT_SYSCTLGETMIBINFO
+#define SANITIZER_INTERCEPT_SYSCTLGETMIBINFO 0
+
+#undef SANITIZER_INTERCEPT_NL_LANGINFO
+#define SANITIZER_INTERCEPT_NL_LANGINFO 0
+
+#undef SANITIZER_INTERCEPT_MODCTL
+#define SANITIZER_INTERCEPT_MODCTL 0
+
+#undef SANITIZER_INTERCEPT_CAPSICUM
+#define SANITIZER_INTERCEPT_CAPSICUM 0
+
+#undef SANITIZER_INTERCEPT_STRTONUM
+#define SANITIZER_INTERCEPT_STRTONUM 0
+
+#undef SANITIZER_INTERCEPT_FPARSELN
+#define SANITIZER_INTERCEPT_FPARSELN 0
+
+#undef SANITIZER_INTERCEPT_STATVFS1
+#define SANITIZER_INTERCEPT_STATVFS1 0
+
+#undef SANITIZER_INTERCEPT_STRTOI
+#define SANITIZER_INTERCEPT_STRTOI 0
+
+#undef SANITIZER_INTERCEPT_CAPSICUM
+#define SANITIZER_INTERCEPT_CAPSICUM 0
+
+#undef SANITIZER_INTERCEPT_SHA1
+#define SANITIZER_INTERCEPT_SHA1 0
+
+#undef SANITIZER_INTERCEPT_MD4
+#define SANITIZER_INTERCEPT_MD4 0
+
+#undef SANITIZER_INTERCEPT_RMD160
+#define SANITIZER_INTERCEPT_RMD160 0
+
+#undef SANITIZER_INTERCEPT_MD5
+#define SANITIZER_INTERCEPT_MD5 0
+
+#undef SANITIZER_INTERCEPT_FSEEK
+#define SANITIZER_INTERCEPT_FSEEK 0
+
+#undef SANITIZER_INTERCEPT_MD2
+#define SANITIZER_INTERCEPT_MD2 0
+
+#undef SANITIZER_INTERCEPT_SHA2
+#define SANITIZER_INTERCEPT_SHA2 0
+
+#undef SANITIZER_INTERCEPT_CDB
+#define SANITIZER_INTERCEPT_CDB 0
+
+#undef SANITIZER_INTERCEPT_VIS
+#define SANITIZER_INTERCEPT_VIS 0
+
+#undef SANITIZER_INTERCEPT_POPEN
+#define SANITIZER_INTERCEPT_POPEN 0
+
+#undef SANITIZER_INTERCEPT_POPENVE
+#define SANITIZER_INTERCEPT_POPENVE 0
+
+#undef SANITIZER_INTERCEPT_PCLOSE
+#define SANITIZER_INTERCEPT_PCLOSE 0
+
+#undef SANITIZER_INTERCEPT_FUNOPEN
+#define SANITIZER_INTERCEPT_FUNOPEN 0
+
+#undef SANITIZER_INTERCEPT_FUNOPEN2
+#define SANITIZER_INTERCEPT_FUNOPEN2 0
+
+#undef SANITIZER_INTERCEPT_GETFSENT
+#define SANITIZER_INTERCEPT_GETFSENT 0
+
+#undef SANITIZER_INTERCEPT_ARC4RANDOM
+#define SANITIZER_INTERCEPT_ARC4RANDOM 0
+
+#undef SANITIZER_INTERCEPT_FDEVNAME
+#define SANITIZER_INTERCEPT_FDEVNAME 0
+
+#undef SANITIZER_INTERCEPT_GETUSERSHELL
+#define SANITIZER_INTERCEPT_GETUSERSHELL 0
+
+#undef SANITIZER_INTERCEPT_SL_INIT
+#define SANITIZER_INTERCEPT_SL_INIT 0
+
+#undef SANITIZER_INTERCEPT_GETRANDOM
+#define SANITIZER_INTERCEPT_GETRANDOM 0
+
+#undef SANITIZER_INTERCEPT___CXA_ATEXIT
+#define SANITIZER_INTERCEPT___CXA_ATEXIT 0
+
+#undef SANITIZER_INTERCEPT_ATEXIT
+#define SANITIZER_INTERCEPT_ATEXIT 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_ATFORK
+#define SANITIZER_INTERCEPT_PTHREAD_ATFORK 0
+
+#undef SANITIZER_INTERCEPT_GETENTROPY
+#define SANITIZER_INTERCEPT_GETENTROPY 0
+
+#undef SANITIZER_INTERCEPT_QSORT
+#define SANITIZER_INTERCEPT_QSORT 0
+
+#undef SANITIZER_INTERCEPT_QSORT_R
+#define SANITIZER_INTERCEPT_QSORT_R 0
+
+#undef SANITIZER_INTERCEPT_BSEARCH
+#define SANITIZER_INTERCEPT_BSEARCH 0
+
+#undef SANITIZER_INTERCEPT_SIGALTSTACK
+#define SANITIZER_INTERCEPT_SIGALTSTACK 0
+
+#undef SANITIZER_INTERCEPT_UNAME
+#define SANITIZER_INTERCEPT_UNAME 0
+
+#undef SANITIZER_INTERCEPT___XUNAME
+#define SANITIZER_INTERCEPT___XUNAME 0
+
+#undef SANITIZER_INTERCEPT_FLOPEN
+#define SANITIZER_INTERCEPT_FLOPEN 0
+
+#undef SANITIZER_INTERCEPT_PROCCTL
+#define SANITIZER_INTERCEPT_PROCCTL 0
+
+#undef SANITIZER_INTERCEPT_HEXDUMP
+#define SANITIZER_INTERCEPT_HEXDUMP 0
+
+#undef SANITIZER_INTERCEPT_ARGP_PARSE
+#define SANITIZER_INTERCEPT_ARGP_PARSE 0
+
+#endif // HWASAN_PLATFORM_INTERCEPTORS_H
diff --git a/libsanitizer/hwasan/hwasan_report.cpp b/libsanitizer/hwasan/hwasan_report.cpp
index 8f9dc6c..5e8aa31 100644
--- a/libsanitizer/hwasan/hwasan_report.cpp
+++ b/libsanitizer/hwasan/hwasan_report.cpp
@@ -22,8 +22,10 @@
#include "hwasan_thread.h"
#include "hwasan_thread_list.h"
#include "sanitizer_common/sanitizer_allocator_internal.h"
+#include "sanitizer_common/sanitizer_array_ref.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_mutex.h"
#include "sanitizer_common/sanitizer_report_decorator.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
@@ -36,7 +38,7 @@ namespace __hwasan {
class ScopedReport {
public:
- ScopedReport(bool fatal = false) : error_message_(1), fatal(fatal) {
+ explicit ScopedReport(bool fatal) : fatal(fatal) {
Lock lock(&error_message_lock_);
error_message_ptr_ = fatal ? &error_message_ : nullptr;
++hwasan_report_count;
@@ -64,11 +66,7 @@ class ScopedReport {
Lock lock(&error_message_lock_);
if (!error_message_ptr_)
return;
- uptr len = internal_strlen(msg);
- uptr old_size = error_message_ptr_->size();
- error_message_ptr_->resize(old_size + len);
- // overwrite old trailing '\0', keep new trailing '\0' untouched.
- internal_memcpy(&(*error_message_ptr_)[old_size - 1], msg, len);
+ error_message_ptr_->Append(msg);
}
static void SetErrorReportCallback(void (*callback)(const char *)) {
@@ -77,17 +75,17 @@ class ScopedReport {
}
private:
- ScopedErrorReportLock error_report_lock_;
- InternalMmapVector<char> error_message_;
+ InternalScopedString error_message_;
bool fatal;
- static InternalMmapVector<char> *error_message_ptr_;
static Mutex error_message_lock_;
+ static InternalScopedString *error_message_ptr_
+ SANITIZER_GUARDED_BY(error_message_lock_);
static void (*error_report_callback_)(const char *);
};
-InternalMmapVector<char> *ScopedReport::error_message_ptr_;
Mutex ScopedReport::error_message_lock_;
+InternalScopedString *ScopedReport::error_message_ptr_;
void (*ScopedReport::error_report_callback_)(const char *);
// If there is an active ScopedReport, append to its error message.
@@ -111,29 +109,45 @@ static void MaybePrintAndroidHelpUrl() {
#endif
}
+namespace {
// A RAII object that holds a copy of the current thread stack ring buffer.
// The actual stack buffer may change while we are iterating over it (for
// example, Printf may call syslog() which can itself be built with hwasan).
class SavedStackAllocations {
public:
- SavedStackAllocations(StackAllocationsRingBuffer *rb) {
+ SavedStackAllocations() = default;
+
+ explicit SavedStackAllocations(Thread *t) { CopyFrom(t); }
+
+ void CopyFrom(Thread *t) {
+ StackAllocationsRingBuffer *rb = t->stack_allocations();
uptr size = rb->size() * sizeof(uptr);
void *storage =
MmapAlignedOrDieOnFatalError(size, size * 2, "saved stack allocations");
new (&rb_) StackAllocationsRingBuffer(*rb, storage);
+ thread_id_ = t->unique_id();
}
~SavedStackAllocations() {
- StackAllocationsRingBuffer *rb = get();
- UnmapOrDie(rb->StartOfStorage(), rb->size() * sizeof(uptr));
+ if (rb_) {
+ StackAllocationsRingBuffer *rb = get();
+ UnmapOrDie(rb->StartOfStorage(), rb->size() * sizeof(uptr));
+ }
+ }
+
+ const StackAllocationsRingBuffer *get() const {
+ return (const StackAllocationsRingBuffer *)&rb_;
}
StackAllocationsRingBuffer *get() {
return (StackAllocationsRingBuffer *)&rb_;
}
+ u32 thread_id() const { return thread_id_; }
+
private:
- uptr rb_;
+ uptr rb_ = 0;
+ u32 thread_id_;
};
class Decorator: public __sanitizer::SanitizerCommonDecorator {
@@ -146,6 +160,7 @@ class Decorator: public __sanitizer::SanitizerCommonDecorator {
const char *Location() { return Green(); }
const char *Thread() { return Green(); }
};
+} // namespace
static bool FindHeapAllocation(HeapAllocationsRingBuffer *rb, uptr tagged_addr,
HeapAllocationRecord *har, uptr *ring_index,
@@ -186,7 +201,7 @@ static bool FindHeapAllocation(HeapAllocationsRingBuffer *rb, uptr tagged_addr,
return false;
}
-static void PrintStackAllocations(StackAllocationsRingBuffer *sa,
+static void PrintStackAllocations(const StackAllocationsRingBuffer *sa,
tag_t addr_tag, uptr untagged_addr) {
uptr frames = Min((uptr)flags()->stack_history_size, sa->size());
bool found_local = false;
@@ -242,12 +257,13 @@ static void PrintStackAllocations(StackAllocationsRingBuffer *sa,
break;
uptr pc_mask = (1ULL << 48) - 1;
uptr pc = record & pc_mask;
- frame_desc.append(" record_addr:0x%zx record:0x%zx",
- reinterpret_cast<uptr>(record_addr), record);
+ frame_desc.AppendF(" record_addr:0x%zx record:0x%zx",
+ reinterpret_cast<uptr>(record_addr), record);
if (SymbolizedStack *frame = Symbolizer::GetOrInit()->SymbolizePC(pc)) {
- RenderFrame(&frame_desc, " %F %L", 0, frame->info.address, &frame->info,
- common_flags()->symbolize_vs_style,
- common_flags()->strip_path_prefix);
+ StackTracePrinter::GetOrInit()->RenderFrame(
+ &frame_desc, " %F %L", 0, frame->info.address, &frame->info,
+ common_flags()->symbolize_vs_style,
+ common_flags()->strip_path_prefix);
frame->ClearAll();
}
Printf("%s\n", frame_desc.data());
@@ -305,22 +321,342 @@ static uptr GetGlobalSizeFromDescriptor(uptr ptr) {
return 0;
}
-static void ShowHeapOrGlobalCandidate(uptr untagged_addr, tag_t *candidate,
- tag_t *left, tag_t *right) {
- Decorator d;
- uptr mem = ShadowToMem(reinterpret_cast<uptr>(candidate));
- HwasanChunkView chunk = FindHeapChunkByAddress(mem);
+void ReportStats() {}
+
+constexpr uptr kDumpWidth = 16;
+constexpr uptr kShadowLines = 17;
+constexpr uptr kShadowDumpSize = kShadowLines * kDumpWidth;
+
+constexpr uptr kShortLines = 3;
+constexpr uptr kShortDumpSize = kShortLines * kDumpWidth;
+constexpr uptr kShortDumpOffset = (kShadowLines - kShortLines) / 2 * kDumpWidth;
+
+static uptr GetPrintTagStart(uptr addr) {
+ addr = MemToShadow(addr);
+ addr = RoundDownTo(addr, kDumpWidth);
+ addr -= kDumpWidth * (kShadowLines / 2);
+ return addr;
+}
+
+template <typename PrintTag>
+static void PrintTagInfoAroundAddr(uptr addr, uptr num_rows,
+ InternalScopedString &s,
+ PrintTag print_tag) {
+ uptr center_row_beg = RoundDownTo(addr, kDumpWidth);
+ uptr beg_row = center_row_beg - kDumpWidth * (num_rows / 2);
+ uptr end_row = center_row_beg + kDumpWidth * ((num_rows + 1) / 2);
+ for (uptr row = beg_row; row < end_row; row += kDumpWidth) {
+ s.Append(row == center_row_beg ? "=>" : " ");
+ s.AppendF("%p:", (void *)ShadowToMem(row));
+ for (uptr i = 0; i < kDumpWidth; i++) {
+ s.Append(row + i == addr ? "[" : " ");
+ print_tag(s, row + i);
+ s.Append(row + i == addr ? "]" : " ");
+ }
+ s.AppendF("\n");
+ }
+}
+
+template <typename GetTag, typename GetShortTag>
+static void PrintTagsAroundAddr(uptr addr, GetTag get_tag,
+ GetShortTag get_short_tag) {
+ InternalScopedString s;
+ addr = MemToShadow(addr);
+ s.AppendF(
+ "Memory tags around the buggy address (one tag corresponds to %zd "
+ "bytes):\n",
+ kShadowAlignment);
+ PrintTagInfoAroundAddr(addr, kShadowLines, s,
+ [&](InternalScopedString &s, uptr tag_addr) {
+ tag_t tag = get_tag(tag_addr);
+ s.AppendF("%02x", tag);
+ });
+
+ s.AppendF(
+ "Tags for short granules around the buggy address (one tag corresponds "
+ "to %zd bytes):\n",
+ kShadowAlignment);
+ PrintTagInfoAroundAddr(addr, kShortLines, s,
+ [&](InternalScopedString &s, uptr tag_addr) {
+ tag_t tag = get_tag(tag_addr);
+ if (tag >= 1 && tag <= kShadowAlignment) {
+ tag_t short_tag = get_short_tag(tag_addr);
+ s.AppendF("%02x", short_tag);
+ } else {
+ s.AppendF("..");
+ }
+ });
+ s.AppendF(
+ "See "
+ "https://clang.llvm.org/docs/"
+ "HardwareAssistedAddressSanitizerDesign.html#short-granules for a "
+ "description of short granule tags\n");
+ Printf("%s", s.data());
+}
+
+static uptr GetTopPc(const StackTrace *stack) {
+ return stack->size ? StackTrace::GetPreviousInstructionPc(stack->trace[0])
+ : 0;
+}
+
+namespace {
+class BaseReport {
+ public:
+ BaseReport(StackTrace *stack, bool fatal, uptr tagged_addr, uptr access_size)
+ : scoped_report(fatal),
+ stack(stack),
+ tagged_addr(tagged_addr),
+ access_size(access_size),
+ untagged_addr(UntagAddr(tagged_addr)),
+ ptr_tag(GetTagFromPointer(tagged_addr)),
+ mismatch_offset(FindMismatchOffset()),
+ heap(CopyHeapChunk()),
+ allocations(CopyAllocations()),
+ candidate(FindBufferOverflowCandidate()),
+ shadow(CopyShadow()) {}
+
+ protected:
+ struct OverflowCandidate {
+ uptr untagged_addr = 0;
+ bool after = false;
+ bool is_close = false;
+
+ struct {
+ uptr begin = 0;
+ uptr end = 0;
+ u32 thread_id = 0;
+ u32 stack_id = 0;
+ bool is_allocated = false;
+ } heap;
+ };
+
+ struct HeapAllocation {
+ HeapAllocationRecord har = {};
+ uptr ring_index = 0;
+ uptr num_matching_addrs = 0;
+ uptr num_matching_addrs_4b = 0;
+ u32 free_thread_id = 0;
+ };
+
+ struct Allocations {
+ ArrayRef<SavedStackAllocations> stack;
+ ArrayRef<HeapAllocation> heap;
+ };
+
+ struct HeapChunk {
+ uptr begin = 0;
+ uptr size = 0;
+ u32 stack_id = 0;
+ bool from_small_heap = false;
+ bool is_allocated = false;
+ };
+
+ struct Shadow {
+ uptr addr = 0;
+ tag_t tags[kShadowDumpSize] = {};
+ tag_t short_tags[kShortDumpSize] = {};
+ };
+
+ sptr FindMismatchOffset() const;
+ Shadow CopyShadow() const;
+ tag_t GetTagCopy(uptr addr) const;
+ tag_t GetShortTagCopy(uptr addr) const;
+ HeapChunk CopyHeapChunk() const;
+ Allocations CopyAllocations();
+ OverflowCandidate FindBufferOverflowCandidate() const;
+ void PrintAddressDescription() const;
+ void PrintHeapOrGlobalCandidate() const;
+ void PrintTags(uptr addr) const;
+
+ SavedStackAllocations stack_allocations_storage[16];
+ HeapAllocation heap_allocations_storage[256];
+
+ const ScopedReport scoped_report;
+ const StackTrace *stack = nullptr;
+ const uptr tagged_addr = 0;
+ const uptr access_size = 0;
+ const uptr untagged_addr = 0;
+ const tag_t ptr_tag = 0;
+ const sptr mismatch_offset = 0;
+
+ const HeapChunk heap;
+ const Allocations allocations;
+ const OverflowCandidate candidate;
+
+ const Shadow shadow;
+};
+
+sptr BaseReport::FindMismatchOffset() const {
+ if (!access_size)
+ return 0;
+ sptr offset =
+ __hwasan_test_shadow(reinterpret_cast<void *>(tagged_addr), access_size);
+ CHECK_GE(offset, 0);
+ CHECK_LT(offset, static_cast<sptr>(access_size));
+ tag_t *tag_ptr =
+ reinterpret_cast<tag_t *>(MemToShadow(untagged_addr + offset));
+ tag_t mem_tag = *tag_ptr;
+
+ if (mem_tag && mem_tag < kShadowAlignment) {
+ tag_t *granule_ptr = reinterpret_cast<tag_t *>((untagged_addr + offset) &
+ ~(kShadowAlignment - 1));
+ // If offset is 0, (untagged_addr + offset) is not aligned to granules.
+ // This is the offset of the leftmost accessed byte within the bad granule.
+ u8 in_granule_offset = (untagged_addr + offset) & (kShadowAlignment - 1);
+ tag_t short_tag = granule_ptr[kShadowAlignment - 1];
+ // The first mismatch was a short granule that matched the ptr_tag.
+ if (short_tag == ptr_tag) {
+ // If the access starts after the end of the short granule, then the first
+ // bad byte is the first byte of the access; otherwise it is the first
+ // byte past the end of the short granule
+ if (mem_tag > in_granule_offset) {
+ offset += mem_tag - in_granule_offset;
+ }
+ }
+ }
+ return offset;
+}
+
+BaseReport::Shadow BaseReport::CopyShadow() const {
+ Shadow result;
+ if (!MemIsApp(untagged_addr))
+ return result;
+
+ result.addr = GetPrintTagStart(untagged_addr + mismatch_offset);
+ uptr tag_addr = result.addr;
+ uptr short_end = kShortDumpOffset + ARRAY_SIZE(shadow.short_tags);
+ for (uptr i = 0; i < ARRAY_SIZE(result.tags); ++i, ++tag_addr) {
+ if (!MemIsShadow(tag_addr))
+ continue;
+ result.tags[i] = *reinterpret_cast<tag_t *>(tag_addr);
+ if (i < kShortDumpOffset || i >= short_end)
+ continue;
+ uptr granule_addr = ShadowToMem(tag_addr);
+ if (1 <= result.tags[i] && result.tags[i] <= kShadowAlignment &&
+ IsAccessibleMemoryRange(granule_addr, kShadowAlignment)) {
+ result.short_tags[i - kShortDumpOffset] =
+ *reinterpret_cast<tag_t *>(granule_addr + kShadowAlignment - 1);
+ }
+ }
+ return result;
+}
+
+tag_t BaseReport::GetTagCopy(uptr addr) const {
+ CHECK_GE(addr, shadow.addr);
+ uptr idx = addr - shadow.addr;
+ CHECK_LT(idx, ARRAY_SIZE(shadow.tags));
+ return shadow.tags[idx];
+}
+
+tag_t BaseReport::GetShortTagCopy(uptr addr) const {
+ CHECK_GE(addr, shadow.addr + kShortDumpOffset);
+ uptr idx = addr - shadow.addr - kShortDumpOffset;
+ CHECK_LT(idx, ARRAY_SIZE(shadow.short_tags));
+ return shadow.short_tags[idx];
+}
+
+BaseReport::HeapChunk BaseReport::CopyHeapChunk() const {
+ HeapChunk result = {};
+ if (MemIsShadow(untagged_addr))
+ return result;
+ HwasanChunkView chunk = FindHeapChunkByAddress(untagged_addr);
+ result.begin = chunk.Beg();
+ if (result.begin) {
+ result.size = chunk.ActualSize();
+ result.from_small_heap = chunk.FromSmallHeap();
+ result.is_allocated = chunk.IsAllocated();
+ result.stack_id = chunk.GetAllocStackId();
+ }
+ return result;
+}
+
+BaseReport::Allocations BaseReport::CopyAllocations() {
+ if (MemIsShadow(untagged_addr))
+ return {};
+ uptr stack_allocations_count = 0;
+ uptr heap_allocations_count = 0;
+ hwasanThreadList().VisitAllLiveThreads([&](Thread *t) {
+ if (stack_allocations_count < ARRAY_SIZE(stack_allocations_storage) &&
+ t->AddrIsInStack(untagged_addr)) {
+ stack_allocations_storage[stack_allocations_count++].CopyFrom(t);
+ }
+
+ if (heap_allocations_count < ARRAY_SIZE(heap_allocations_storage)) {
+ // Scan all threads' ring buffers to find if it's a heap-use-after-free.
+ HeapAllocationRecord har;
+ uptr ring_index, num_matching_addrs, num_matching_addrs_4b;
+ if (FindHeapAllocation(t->heap_allocations(), tagged_addr, &har,
+ &ring_index, &num_matching_addrs,
+ &num_matching_addrs_4b)) {
+ auto &ha = heap_allocations_storage[heap_allocations_count++];
+ ha.har = har;
+ ha.ring_index = ring_index;
+ ha.num_matching_addrs = num_matching_addrs;
+ ha.num_matching_addrs_4b = num_matching_addrs_4b;
+ ha.free_thread_id = t->unique_id();
+ }
+ }
+ });
+
+ return {{stack_allocations_storage, stack_allocations_count},
+ {heap_allocations_storage, heap_allocations_count}};
+}
+
+BaseReport::OverflowCandidate BaseReport::FindBufferOverflowCandidate() const {
+ OverflowCandidate result = {};
+ if (MemIsShadow(untagged_addr))
+ return result;
+ // Check if this looks like a heap buffer overflow by scanning
+ // the shadow left and right and looking for the first adjacent
+ // object with a different memory tag. If that tag matches ptr_tag,
+ // check the allocator if it has a live chunk there.
+ tag_t *tag_ptr = reinterpret_cast<tag_t *>(MemToShadow(untagged_addr));
+ tag_t *candidate_tag_ptr = nullptr, *left = tag_ptr, *right = tag_ptr;
+ uptr candidate_distance = 0;
+ for (; candidate_distance < 1000; candidate_distance++) {
+ if (MemIsShadow(reinterpret_cast<uptr>(left)) && TagsEqual(ptr_tag, left)) {
+ candidate_tag_ptr = left;
+ break;
+ }
+ --left;
+ if (MemIsShadow(reinterpret_cast<uptr>(right)) &&
+ TagsEqual(ptr_tag, right)) {
+ candidate_tag_ptr = right;
+ break;
+ }
+ ++right;
+ }
+
+ constexpr auto kCloseCandidateDistance = 1;
+ result.is_close = candidate_distance <= kCloseCandidateDistance;
+
+ result.after = candidate_tag_ptr == left;
+ result.untagged_addr = ShadowToMem(reinterpret_cast<uptr>(candidate_tag_ptr));
+ HwasanChunkView chunk = FindHeapChunkByAddress(result.untagged_addr);
if (chunk.IsAllocated()) {
+ result.heap.is_allocated = true;
+ result.heap.begin = chunk.Beg();
+ result.heap.end = chunk.End();
+ result.heap.thread_id = chunk.GetAllocThreadId();
+ result.heap.stack_id = chunk.GetAllocStackId();
+ }
+ return result;
+}
+
+void BaseReport::PrintHeapOrGlobalCandidate() const {
+ Decorator d;
+ if (candidate.heap.is_allocated) {
uptr offset;
const char *whence;
- if (untagged_addr < chunk.End() && untagged_addr >= chunk.Beg()) {
- offset = untagged_addr - chunk.Beg();
+ if (candidate.heap.begin <= untagged_addr &&
+ untagged_addr < candidate.heap.end) {
+ offset = untagged_addr - candidate.heap.begin;
whence = "inside";
- } else if (candidate == left) {
- offset = untagged_addr - chunk.End();
+ } else if (candidate.after) {
+ offset = untagged_addr - candidate.heap.end;
whence = "after";
} else {
- offset = chunk.Beg() - untagged_addr;
+ offset = candidate.heap.begin - untagged_addr;
whence = "before";
}
Printf("%s", d.Error());
@@ -328,12 +664,13 @@ static void ShowHeapOrGlobalCandidate(uptr untagged_addr, tag_t *candidate,
Printf("%s", d.Default());
Printf("%s", d.Location());
Printf("%p is located %zd bytes %s a %zd-byte region [%p,%p)\n",
- untagged_addr, offset, whence, chunk.UsedSize(), chunk.Beg(),
- chunk.End());
+ untagged_addr, offset, whence,
+ candidate.heap.end - candidate.heap.begin, candidate.heap.begin,
+ candidate.heap.end);
Printf("%s", d.Allocation());
- Printf("allocated by thread T%u here:\n", chunk.GetAllocThreadId());
+ Printf("allocated by thread T%u here:\n", candidate.heap.thread_id);
Printf("%s", d.Default());
- GetStackTraceFromId(chunk.GetAllocStackId()).Print();
+ GetStackTraceFromId(candidate.heap.stack_id).Print();
return;
}
// Check whether the address points into a loaded library. If so, this is
@@ -341,47 +678,45 @@ static void ShowHeapOrGlobalCandidate(uptr untagged_addr, tag_t *candidate,
const char *module_name;
uptr module_address;
Symbolizer *sym = Symbolizer::GetOrInit();
- if (sym->GetModuleNameAndOffsetForPC(mem, &module_name, &module_address)) {
+ if (sym->GetModuleNameAndOffsetForPC(candidate.untagged_addr, &module_name,
+ &module_address)) {
Printf("%s", d.Error());
Printf("\nCause: global-overflow\n");
Printf("%s", d.Default());
DataInfo info;
Printf("%s", d.Location());
- if (sym->SymbolizeData(mem, &info) && info.start) {
+ if (sym->SymbolizeData(candidate.untagged_addr, &info) && info.start) {
Printf(
"%p is located %zd bytes %s a %zd-byte global variable "
"%s [%p,%p) in %s\n",
untagged_addr,
- candidate == left ? untagged_addr - (info.start + info.size)
- : info.start - untagged_addr,
- candidate == left ? "after" : "before", info.size, info.name,
+ candidate.after ? untagged_addr - (info.start + info.size)
+ : info.start - untagged_addr,
+ candidate.after ? "after" : "before", info.size, info.name,
info.start, info.start + info.size, module_name);
} else {
- uptr size = GetGlobalSizeFromDescriptor(mem);
+ uptr size = GetGlobalSizeFromDescriptor(candidate.untagged_addr);
if (size == 0)
// We couldn't find the size of the global from the descriptors.
Printf(
"%p is located %s a global variable in "
"\n #0 0x%x (%s+0x%x)\n",
- untagged_addr, candidate == left ? "after" : "before", mem,
- module_name, module_address);
+ untagged_addr, candidate.after ? "after" : "before",
+ candidate.untagged_addr, module_name, module_address);
else
Printf(
"%p is located %s a %zd-byte global variable in "
"\n #0 0x%x (%s+0x%x)\n",
- untagged_addr, candidate == left ? "after" : "before", size, mem,
- module_name, module_address);
+ untagged_addr, candidate.after ? "after" : "before", size,
+ candidate.untagged_addr, module_name, module_address);
}
Printf("%s", d.Default());
}
}
-void PrintAddressDescription(
- uptr tagged_addr, uptr access_size,
- StackAllocationsRingBuffer *current_stack_allocations) {
+void BaseReport::PrintAddressDescription() const {
Decorator d;
int num_descriptions_printed = 0;
- uptr untagged_addr = UntagAddr(tagged_addr);
if (MemIsShadow(untagged_addr)) {
Printf("%s%p is HWAsan shadow memory.\n%s", d.Location(), untagged_addr,
@@ -390,113 +725,80 @@ void PrintAddressDescription(
}
// Print some very basic information about the address, if it's a heap.
- HwasanChunkView chunk = FindHeapChunkByAddress(untagged_addr);
- if (uptr beg = chunk.Beg()) {
- uptr size = chunk.ActualSize();
- Printf("%s[%p,%p) is a %s %s heap chunk; "
- "size: %zd offset: %zd\n%s",
- d.Location(),
- beg, beg + size,
- chunk.FromSmallHeap() ? "small" : "large",
- chunk.IsAllocated() ? "allocated" : "unallocated",
- size, untagged_addr - beg,
- d.Default());
+ if (heap.begin) {
+ Printf(
+ "%s[%p,%p) is a %s %s heap chunk; "
+ "size: %zd offset: %zd\n%s",
+ d.Location(), heap.begin, heap.begin + heap.size,
+ heap.from_small_heap ? "small" : "large",
+ heap.is_allocated ? "allocated" : "unallocated", heap.size,
+ untagged_addr - heap.begin, d.Default());
}
- tag_t addr_tag = GetTagFromPointer(tagged_addr);
+ auto announce_by_id = [](u32 thread_id) {
+ hwasanThreadList().VisitAllLiveThreads([&](Thread *t) {
+ if (thread_id == t->unique_id())
+ t->Announce();
+ });
+ };
- bool on_stack = false;
// Check stack first. If the address is on the stack of a live thread, we
// know it cannot be a heap / global overflow.
- hwasanThreadList().VisitAllLiveThreads([&](Thread *t) {
- if (t->AddrIsInStack(untagged_addr)) {
- on_stack = true;
- // TODO(fmayer): figure out how to distinguish use-after-return and
- // stack-buffer-overflow.
- Printf("%s", d.Error());
- Printf("\nCause: stack tag-mismatch\n");
- Printf("%s", d.Location());
- Printf("Address %p is located in stack of thread T%zd\n", untagged_addr,
- t->unique_id());
- Printf("%s", d.Default());
- t->Announce();
-
- auto *sa = (t == GetCurrentThread() && current_stack_allocations)
- ? current_stack_allocations
- : t->stack_allocations();
- PrintStackAllocations(sa, addr_tag, untagged_addr);
- num_descriptions_printed++;
- }
- });
+ for (const auto &sa : allocations.stack) {
+ // TODO(fmayer): figure out how to distinguish use-after-return and
+ // stack-buffer-overflow.
+ Printf("%s", d.Error());
+ Printf("\nCause: stack tag-mismatch\n");
+ Printf("%s", d.Location());
+ Printf("Address %p is located in stack of thread T%zd\n", untagged_addr,
+ sa.thread_id());
+ Printf("%s", d.Default());
+ announce_by_id(sa.thread_id());
+ PrintStackAllocations(sa.get(), ptr_tag, untagged_addr);
+ num_descriptions_printed++;
+ }
- // Check if this looks like a heap buffer overflow by scanning
- // the shadow left and right and looking for the first adjacent
- // object with a different memory tag. If that tag matches addr_tag,
- // check the allocator if it has a live chunk there.
- tag_t *tag_ptr = reinterpret_cast<tag_t*>(MemToShadow(untagged_addr));
- tag_t *candidate = nullptr, *left = tag_ptr, *right = tag_ptr;
- uptr candidate_distance = 0;
- for (; candidate_distance < 1000; candidate_distance++) {
- if (MemIsShadow(reinterpret_cast<uptr>(left)) &&
- TagsEqual(addr_tag, left)) {
- candidate = left;
- break;
- }
- --left;
- if (MemIsShadow(reinterpret_cast<uptr>(right)) &&
- TagsEqual(addr_tag, right)) {
- candidate = right;
- break;
- }
- ++right;
+ if (allocations.stack.empty() && candidate.untagged_addr &&
+ candidate.is_close) {
+ PrintHeapOrGlobalCandidate();
+ num_descriptions_printed++;
}
- constexpr auto kCloseCandidateDistance = 1;
+ for (const auto &ha : allocations.heap) {
+ const HeapAllocationRecord har = ha.har;
- if (!on_stack && candidate && candidate_distance <= kCloseCandidateDistance) {
- ShowHeapOrGlobalCandidate(untagged_addr, candidate, left, right);
+ Printf("%s", d.Error());
+ Printf("\nCause: use-after-free\n");
+ Printf("%s", d.Location());
+ Printf("%p is located %zd bytes inside a %zd-byte region [%p,%p)\n",
+ untagged_addr, untagged_addr - UntagAddr(har.tagged_addr),
+ har.requested_size, UntagAddr(har.tagged_addr),
+ UntagAddr(har.tagged_addr) + har.requested_size);
+ Printf("%s", d.Allocation());
+ Printf("freed by thread T%u here:\n", ha.free_thread_id);
+ Printf("%s", d.Default());
+ GetStackTraceFromId(har.free_context_id).Print();
+
+ Printf("%s", d.Allocation());
+ Printf("previously allocated by thread T%u here:\n", har.alloc_thread_id);
+ Printf("%s", d.Default());
+ GetStackTraceFromId(har.alloc_context_id).Print();
+
+ // Print a developer note: the index of this heap object
+ // in the thread's deallocation ring buffer.
+ Printf("hwasan_dev_note_heap_rb_distance: %zd %zd\n", ha.ring_index + 1,
+ flags()->heap_history_size);
+ Printf("hwasan_dev_note_num_matching_addrs: %zd\n", ha.num_matching_addrs);
+ Printf("hwasan_dev_note_num_matching_addrs_4b: %zd\n",
+ ha.num_matching_addrs_4b);
+
+ announce_by_id(ha.free_thread_id);
+ // TODO: announce_by_id(har.alloc_thread_id);
num_descriptions_printed++;
}
- hwasanThreadList().VisitAllLiveThreads([&](Thread *t) {
- // Scan all threads' ring buffers to find if it's a heap-use-after-free.
- HeapAllocationRecord har;
- uptr ring_index, num_matching_addrs, num_matching_addrs_4b;
- if (FindHeapAllocation(t->heap_allocations(), tagged_addr, &har,
- &ring_index, &num_matching_addrs,
- &num_matching_addrs_4b)) {
- Printf("%s", d.Error());
- Printf("\nCause: use-after-free\n");
- Printf("%s", d.Location());
- Printf("%p is located %zd bytes inside a %zd-byte region [%p,%p)\n",
- untagged_addr, untagged_addr - UntagAddr(har.tagged_addr),
- har.requested_size, UntagAddr(har.tagged_addr),
- UntagAddr(har.tagged_addr) + har.requested_size);
- Printf("%s", d.Allocation());
- Printf("freed by thread T%u here:\n", t->unique_id());
- Printf("%s", d.Default());
- GetStackTraceFromId(har.free_context_id).Print();
-
- Printf("%s", d.Allocation());
- Printf("previously allocated by thread T%u here:\n", har.alloc_thread_id);
- Printf("%s", d.Default());
- GetStackTraceFromId(har.alloc_context_id).Print();
-
- // Print a developer note: the index of this heap object
- // in the thread's deallocation ring buffer.
- Printf("hwasan_dev_note_heap_rb_distance: %zd %zd\n", ring_index + 1,
- flags()->heap_history_size);
- Printf("hwasan_dev_note_num_matching_addrs: %zd\n", num_matching_addrs);
- Printf("hwasan_dev_note_num_matching_addrs_4b: %zd\n",
- num_matching_addrs_4b);
-
- t->Announce();
- num_descriptions_printed++;
- }
- });
-
- if (candidate && num_descriptions_printed == 0) {
- ShowHeapOrGlobalCandidate(untagged_addr, candidate, left, right);
+ if (candidate.untagged_addr && num_descriptions_printed == 0) {
+ PrintHeapOrGlobalCandidate();
num_descriptions_printed++;
}
@@ -515,77 +817,24 @@ void PrintAddressDescription(
}
}
-void ReportStats() {}
-
-static void PrintTagInfoAroundAddr(tag_t *tag_ptr, uptr num_rows,
- void (*print_tag)(InternalScopedString &s,
- tag_t *tag)) {
- const uptr row_len = 16; // better be power of two.
- tag_t *center_row_beg = reinterpret_cast<tag_t *>(
- RoundDownTo(reinterpret_cast<uptr>(tag_ptr), row_len));
- tag_t *beg_row = center_row_beg - row_len * (num_rows / 2);
- tag_t *end_row = center_row_beg + row_len * ((num_rows + 1) / 2);
- InternalScopedString s;
- for (tag_t *row = beg_row; row < end_row; row += row_len) {
- s.append("%s", row == center_row_beg ? "=>" : " ");
- s.append("%p:", (void *)ShadowToMem(reinterpret_cast<uptr>(row)));
- for (uptr i = 0; i < row_len; i++) {
- s.append("%s", row + i == tag_ptr ? "[" : " ");
- print_tag(s, &row[i]);
- s.append("%s", row + i == tag_ptr ? "]" : " ");
- }
- s.append("\n");
+void BaseReport::PrintTags(uptr addr) const {
+ if (shadow.addr) {
+ PrintTagsAroundAddr(
+ addr, [&](uptr addr) { return GetTagCopy(addr); },
+ [&](uptr addr) { return GetShortTagCopy(addr); });
}
- Printf("%s", s.data());
}
-static void PrintTagsAroundAddr(tag_t *tag_ptr) {
- Printf(
- "Memory tags around the buggy address (one tag corresponds to %zd "
- "bytes):\n", kShadowAlignment);
- PrintTagInfoAroundAddr(tag_ptr, 17, [](InternalScopedString &s, tag_t *tag) {
- s.append("%02x", *tag);
- });
-
- Printf(
- "Tags for short granules around the buggy address (one tag corresponds "
- "to %zd bytes):\n",
- kShadowAlignment);
- PrintTagInfoAroundAddr(tag_ptr, 3, [](InternalScopedString &s, tag_t *tag) {
- if (*tag >= 1 && *tag <= kShadowAlignment) {
- uptr granule_addr = ShadowToMem(reinterpret_cast<uptr>(tag));
- s.append("%02x",
- *reinterpret_cast<u8 *>(granule_addr + kShadowAlignment - 1));
- } else {
- s.append("..");
- }
- });
- Printf(
- "See "
- "https://clang.llvm.org/docs/"
- "HardwareAssistedAddressSanitizerDesign.html#short-granules for a "
- "description of short granule tags\n");
-}
+class InvalidFreeReport : public BaseReport {
+ public:
+ InvalidFreeReport(StackTrace *stack, uptr tagged_addr)
+ : BaseReport(stack, flags()->halt_on_error, tagged_addr, 0) {}
+ ~InvalidFreeReport();
-uptr GetTopPc(StackTrace *stack) {
- return stack->size ? StackTrace::GetPreviousInstructionPc(stack->trace[0])
- : 0;
-}
+ private:
+};
-void ReportInvalidFree(StackTrace *stack, uptr tagged_addr) {
- ScopedReport R(flags()->halt_on_error);
-
- uptr untagged_addr = UntagAddr(tagged_addr);
- tag_t ptr_tag = GetTagFromPointer(tagged_addr);
- tag_t *tag_ptr = nullptr;
- tag_t mem_tag = 0;
- if (MemIsApp(untagged_addr)) {
- tag_ptr = reinterpret_cast<tag_t *>(MemToShadow(untagged_addr));
- if (MemIsShadow(reinterpret_cast<uptr>(tag_ptr)))
- mem_tag = *tag_ptr;
- else
- tag_ptr = nullptr;
- }
+InvalidFreeReport::~InvalidFreeReport() {
Decorator d;
Printf("%s", d.Error());
uptr pc = GetTopPc(stack);
@@ -599,36 +848,49 @@ void ReportInvalidFree(StackTrace *stack, uptr tagged_addr) {
SanitizerToolName, bug_type, untagged_addr, pc);
}
Printf("%s", d.Access());
- if (tag_ptr)
- Printf("tags: %02x/%02x (ptr/mem)\n", ptr_tag, mem_tag);
+ if (shadow.addr) {
+ Printf("tags: %02x/%02x (ptr/mem)\n", ptr_tag,
+ GetTagCopy(MemToShadow(untagged_addr)));
+ }
Printf("%s", d.Default());
stack->Print();
- PrintAddressDescription(tagged_addr, 0, nullptr);
-
- if (tag_ptr)
- PrintTagsAroundAddr(tag_ptr);
-
+ PrintAddressDescription();
+ PrintTags(untagged_addr);
MaybePrintAndroidHelpUrl();
ReportErrorSummary(bug_type, stack);
}
-void ReportTailOverwritten(StackTrace *stack, uptr tagged_addr, uptr orig_size,
- const u8 *expected) {
- uptr tail_size = kShadowAlignment - (orig_size % kShadowAlignment);
- u8 actual_expected[kShadowAlignment];
- internal_memcpy(actual_expected, expected, tail_size);
- tag_t ptr_tag = GetTagFromPointer(tagged_addr);
- // Short granule is stashed in the last byte of the magic string. To avoid
- // confusion, make the expected magic string contain the short granule tag.
- if (orig_size % kShadowAlignment != 0) {
- actual_expected[tail_size - 1] = ptr_tag;
+class TailOverwrittenReport : public BaseReport {
+ public:
+ explicit TailOverwrittenReport(StackTrace *stack, uptr tagged_addr,
+ uptr orig_size, const u8 *expected)
+ : BaseReport(stack, flags()->halt_on_error, tagged_addr, 0),
+ orig_size(orig_size),
+ tail_size(kShadowAlignment - (orig_size % kShadowAlignment)) {
+ CHECK_GT(tail_size, 0U);
+ CHECK_LT(tail_size, kShadowAlignment);
+ internal_memcpy(tail_copy,
+ reinterpret_cast<u8 *>(untagged_addr + orig_size),
+ tail_size);
+ internal_memcpy(actual_expected, expected, tail_size);
+ // Short granule is stashed in the last byte of the magic string. To avoid
+ // confusion, make the expected magic string contain the short granule tag.
+ if (orig_size % kShadowAlignment != 0)
+ actual_expected[tail_size - 1] = ptr_tag;
}
+ ~TailOverwrittenReport();
+
+ private:
+ const uptr orig_size = 0;
+ const uptr tail_size = 0;
+ u8 actual_expected[kShadowAlignment] = {};
+ u8 tail_copy[kShadowAlignment] = {};
+};
- ScopedReport R(flags()->halt_on_error);
+TailOverwrittenReport::~TailOverwrittenReport() {
Decorator d;
- uptr untagged_addr = UntagAddr(tagged_addr);
Printf("%s", d.Error());
const char *bug_type = "allocation-tail-overwritten";
Report("ERROR: %s: %s; heap object [%p,%p) of size %zd\n", SanitizerToolName,
@@ -641,61 +903,62 @@ void ReportTailOverwritten(StackTrace *stack, uptr tagged_addr, uptr orig_size,
Printf("deallocated here:\n");
Printf("%s", d.Default());
stack->Print();
- HwasanChunkView chunk = FindHeapChunkByAddress(untagged_addr);
- if (chunk.Beg()) {
+ if (heap.begin) {
Printf("%s", d.Allocation());
Printf("allocated here:\n");
Printf("%s", d.Default());
- GetStackTraceFromId(chunk.GetAllocStackId()).Print();
+ GetStackTraceFromId(heap.stack_id).Print();
}
InternalScopedString s;
- CHECK_GT(tail_size, 0U);
- CHECK_LT(tail_size, kShadowAlignment);
- u8 *tail = reinterpret_cast<u8*>(untagged_addr + orig_size);
- s.append("Tail contains: ");
- for (uptr i = 0; i < kShadowAlignment - tail_size; i++)
- s.append(".. ");
+ u8 *tail = tail_copy;
+ s.AppendF("Tail contains: ");
+ for (uptr i = 0; i < kShadowAlignment - tail_size; i++) s.AppendF(".. ");
+ for (uptr i = 0; i < tail_size; i++) s.AppendF("%02x ", tail[i]);
+ s.AppendF("\n");
+ s.AppendF("Expected: ");
+ for (uptr i = 0; i < kShadowAlignment - tail_size; i++) s.AppendF(".. ");
+ for (uptr i = 0; i < tail_size; i++) s.AppendF("%02x ", actual_expected[i]);
+ s.AppendF("\n");
+ s.AppendF(" ");
+ for (uptr i = 0; i < kShadowAlignment - tail_size; i++) s.AppendF(" ");
for (uptr i = 0; i < tail_size; i++)
- s.append("%02x ", tail[i]);
- s.append("\n");
- s.append("Expected: ");
- for (uptr i = 0; i < kShadowAlignment - tail_size; i++)
- s.append(".. ");
- for (uptr i = 0; i < tail_size; i++) s.append("%02x ", actual_expected[i]);
- s.append("\n");
- s.append(" ");
- for (uptr i = 0; i < kShadowAlignment - tail_size; i++)
- s.append(" ");
- for (uptr i = 0; i < tail_size; i++)
- s.append("%s ", actual_expected[i] != tail[i] ? "^^" : " ");
-
- s.append("\nThis error occurs when a buffer overflow overwrites memory\n"
- "after a heap object, but within the %zd-byte granule, e.g.\n"
- " char *x = new char[20];\n"
- " x[25] = 42;\n"
- "%s does not detect such bugs in uninstrumented code at the time of write,"
- "\nbut can detect them at the time of free/delete.\n"
- "To disable this feature set HWASAN_OPTIONS=free_checks_tail_magic=0\n",
- kShadowAlignment, SanitizerToolName);
+ s.AppendF("%s ", actual_expected[i] != tail[i] ? "^^" : " ");
+
+ s.AppendF(
+ "\nThis error occurs when a buffer overflow overwrites memory\n"
+ "after a heap object, but within the %zd-byte granule, e.g.\n"
+ " char *x = new char[20];\n"
+ " x[25] = 42;\n"
+ "%s does not detect such bugs in uninstrumented code at the time of "
+ "write,"
+ "\nbut can detect them at the time of free/delete.\n"
+ "To disable this feature set HWASAN_OPTIONS=free_checks_tail_magic=0\n",
+ kShadowAlignment, SanitizerToolName);
Printf("%s", s.data());
GetCurrentThread()->Announce();
-
- tag_t *tag_ptr = reinterpret_cast<tag_t*>(MemToShadow(untagged_addr));
- PrintTagsAroundAddr(tag_ptr);
-
+ PrintTags(untagged_addr);
MaybePrintAndroidHelpUrl();
ReportErrorSummary(bug_type, stack);
}
-void ReportTagMismatch(StackTrace *stack, uptr tagged_addr, uptr access_size,
- bool is_store, bool fatal, uptr *registers_frame) {
- ScopedReport R(fatal);
- SavedStackAllocations current_stack_allocations(
- GetCurrentThread()->stack_allocations());
+class TagMismatchReport : public BaseReport {
+ public:
+ explicit TagMismatchReport(StackTrace *stack, uptr tagged_addr,
+ uptr access_size, bool is_store, bool fatal,
+ uptr *registers_frame)
+ : BaseReport(stack, fatal, tagged_addr, access_size),
+ is_store(is_store),
+ registers_frame(registers_frame) {}
+ ~TagMismatchReport();
+
+ private:
+ const bool is_store;
+ const uptr *registers_frame;
+};
+TagMismatchReport::~TagMismatchReport() {
Decorator d;
- uptr untagged_addr = UntagAddr(tagged_addr);
// TODO: when possible, try to print heap-use-after-free, etc.
const char *bug_type = "tag-mismatch";
uptr pc = GetTopPc(stack);
@@ -705,31 +968,12 @@ void ReportTagMismatch(StackTrace *stack, uptr tagged_addr, uptr access_size,
Thread *t = GetCurrentThread();
- sptr offset =
- __hwasan_test_shadow(reinterpret_cast<void *>(tagged_addr), access_size);
- CHECK(offset >= 0 && offset < static_cast<sptr>(access_size));
- tag_t ptr_tag = GetTagFromPointer(tagged_addr);
- tag_t *tag_ptr =
- reinterpret_cast<tag_t *>(MemToShadow(untagged_addr + offset));
- tag_t mem_tag = *tag_ptr;
+ tag_t mem_tag = GetTagCopy(MemToShadow(untagged_addr + mismatch_offset));
Printf("%s", d.Access());
if (mem_tag && mem_tag < kShadowAlignment) {
- tag_t *granule_ptr = reinterpret_cast<tag_t *>((untagged_addr + offset) &
- ~(kShadowAlignment - 1));
- // If offset is 0, (untagged_addr + offset) is not aligned to granules.
- // This is the offset of the leftmost accessed byte within the bad granule.
- u8 in_granule_offset = (untagged_addr + offset) & (kShadowAlignment - 1);
- tag_t short_tag = granule_ptr[kShadowAlignment - 1];
- // The first mismatch was a short granule that matched the ptr_tag.
- if (short_tag == ptr_tag) {
- // If the access starts after the end of the short granule, then the first
- // bad byte is the first byte of the access; otherwise it is the first
- // byte past the end of the short granule
- if (mem_tag > in_granule_offset) {
- offset += mem_tag - in_granule_offset;
- }
- }
+ tag_t short_tag =
+ GetShortTagCopy(MemToShadow(untagged_addr + mismatch_offset));
Printf(
"%s of size %zu at %p tags: %02x/%02x(%02x) (ptr/mem) in thread T%zd\n",
is_store ? "WRITE" : "READ", access_size, untagged_addr, ptr_tag,
@@ -739,17 +983,16 @@ void ReportTagMismatch(StackTrace *stack, uptr tagged_addr, uptr access_size,
is_store ? "WRITE" : "READ", access_size, untagged_addr, ptr_tag,
mem_tag, t->unique_id());
}
- if (offset != 0)
- Printf("Invalid access starting at offset %zu\n", offset);
+ if (mismatch_offset)
+ Printf("Invalid access starting at offset %zu\n", mismatch_offset);
Printf("%s", d.Default());
stack->Print();
- PrintAddressDescription(tagged_addr, access_size,
- current_stack_allocations.get());
+ PrintAddressDescription();
t->Announce();
- PrintTagsAroundAddr(tag_ptr);
+ PrintTags(untagged_addr + mismatch_offset);
if (registers_frame)
ReportRegisters(registers_frame, pc);
@@ -757,10 +1000,26 @@ void ReportTagMismatch(StackTrace *stack, uptr tagged_addr, uptr access_size,
MaybePrintAndroidHelpUrl();
ReportErrorSummary(bug_type, stack);
}
+} // namespace
+
+void ReportInvalidFree(StackTrace *stack, uptr tagged_addr) {
+ InvalidFreeReport R(stack, tagged_addr);
+}
+
+void ReportTailOverwritten(StackTrace *stack, uptr tagged_addr, uptr orig_size,
+ const u8 *expected) {
+ TailOverwrittenReport R(stack, tagged_addr, orig_size, expected);
+}
+
+void ReportTagMismatch(StackTrace *stack, uptr tagged_addr, uptr access_size,
+ bool is_store, bool fatal, uptr *registers_frame) {
+ TagMismatchReport R(stack, tagged_addr, access_size, is_store, fatal,
+ registers_frame);
+}
// See the frame breakdown defined in __hwasan_tag_mismatch (from
// hwasan_tag_mismatch_{aarch64,riscv64}.S).
-void ReportRegisters(uptr *frame, uptr pc) {
+void ReportRegisters(const uptr *frame, uptr pc) {
Printf("Registers where the failure occurred (pc %p):\n", pc);
// We explicitly print a single line (4 registers/line) each iteration to
@@ -772,7 +1031,8 @@ void ReportRegisters(uptr *frame, uptr pc) {
frame[0], frame[1], frame[2], frame[3]);
#elif SANITIZER_RISCV64
Printf(" sp %016llx x1 %016llx x2 %016llx x3 %016llx\n",
- reinterpret_cast<u8 *>(frame) + 256, frame[1], frame[2], frame[3]);
+ reinterpret_cast<const u8 *>(frame) + 256, frame[1], frame[2],
+ frame[3]);
#endif
Printf(" x4 %016llx x5 %016llx x6 %016llx x7 %016llx\n",
frame[4], frame[5], frame[6], frame[7]);
@@ -790,7 +1050,7 @@ void ReportRegisters(uptr *frame, uptr pc) {
// passes it to this function.
#if defined(__aarch64__)
Printf(" x28 %016llx x29 %016llx x30 %016llx sp %016llx\n", frame[28],
- frame[29], frame[30], reinterpret_cast<u8 *>(frame) + 256);
+ frame[29], frame[30], reinterpret_cast<const u8 *>(frame) + 256);
#elif SANITIZER_RISCV64
Printf(" x28 %016llx x29 %016llx x30 %016llx x31 %016llx\n", frame[28],
frame[29], frame[30], frame[31]);
diff --git a/libsanitizer/hwasan/hwasan_report.h b/libsanitizer/hwasan/hwasan_report.h
index de86c38..bb9492a 100644
--- a/libsanitizer/hwasan/hwasan_report.h
+++ b/libsanitizer/hwasan/hwasan_report.h
@@ -26,7 +26,7 @@ void ReportTagMismatch(StackTrace *stack, uptr addr, uptr access_size,
void ReportInvalidFree(StackTrace *stack, uptr addr);
void ReportTailOverwritten(StackTrace *stack, uptr addr, uptr orig_size,
const u8 *expected);
-void ReportRegisters(uptr *registers_frame, uptr pc);
+void ReportRegisters(const uptr *registers_frame, uptr pc);
void ReportAtExitStatistics();
diff --git a/libsanitizer/hwasan/hwasan_setjmp_aarch64.S b/libsanitizer/hwasan/hwasan_setjmp_aarch64.S
index 744748a..0c0abb6 100644
--- a/libsanitizer/hwasan/hwasan_setjmp_aarch64.S
+++ b/libsanitizer/hwasan/hwasan_setjmp_aarch64.S
@@ -31,33 +31,37 @@
.section .text
.file "hwasan_setjmp_aarch64.S"
-.global __interceptor_setjmp
-ASM_TYPE_FUNCTION(__interceptor_setjmp)
-__interceptor_setjmp:
+.global ASM_WRAPPER_NAME(setjmp)
+ASM_TYPE_FUNCTION(ASM_WRAPPER_NAME(setjmp))
+ASM_WRAPPER_NAME(setjmp):
CFI_STARTPROC
BTI_C
mov x1, #0
- b __interceptor_sigsetjmp
+ b ASM_WRAPPER_NAME(sigsetjmp)
CFI_ENDPROC
-ASM_SIZE(__interceptor_setjmp)
+ASM_SIZE(ASM_WRAPPER_NAME(setjmp))
+
+ASM_INTERCEPTOR_TRAMPOLINE(setjmp)
#if SANITIZER_ANDROID
// Bionic also defines a function `setjmp` that calls `sigsetjmp` saving the
// current signal.
-.global __interceptor_setjmp_bionic
-ASM_TYPE_FUNCTION(__interceptor_setjmp_bionic)
-__interceptor_setjmp_bionic:
+.global ASM_WRAPPER_NAME(setjmp_bionic)
+ASM_TYPE_FUNCTION(ASM_WRAPPER_NAME(setjmp_bionic))
+ASM_WRAPPER_NAME(setjmp_bionic):
CFI_STARTPROC
BTI_C
mov x1, #1
- b __interceptor_sigsetjmp
+ b ASM_WRAPPER_NAME(sigsetjmp)
CFI_ENDPROC
-ASM_SIZE(__interceptor_setjmp_bionic)
+ASM_SIZE(ASM_WRAPPER_NAME(setjmp_bionic))
+
+ASM_INTERCEPTOR_TRAMPOLINE(setjmp_bionic)
#endif
-.global __interceptor_sigsetjmp
-ASM_TYPE_FUNCTION(__interceptor_sigsetjmp)
-__interceptor_sigsetjmp:
+.global ASM_WRAPPER_NAME(sigsetjmp)
+ASM_TYPE_FUNCTION(ASM_WRAPPER_NAME(sigsetjmp))
+ASM_WRAPPER_NAME(sigsetjmp):
CFI_STARTPROC
BTI_C
stp x19, x20, [x0, #0<<3]
@@ -77,22 +81,19 @@ __interceptor_sigsetjmp:
// This function is defined in hwasan_interceptors.cc
b __sigjmp_save
CFI_ENDPROC
-ASM_SIZE(__interceptor_sigsetjmp)
+ASM_SIZE(ASM_WRAPPER_NAME(sigsetjmp))
+ASM_INTERCEPTOR_TRAMPOLINE(sigsetjmp)
-.macro WEAK_ALIAS first second
- .weak \second
- .equ \second\(), \first
-.endm
#if SANITIZER_ANDROID
-WEAK_ALIAS __interceptor_sigsetjmp, sigsetjmp
-WEAK_ALIAS __interceptor_setjmp_bionic, setjmp
+ASM_TRAMPOLINE_ALIAS(sigsetjmp, sigsetjmp)
+ASM_TRAMPOLINE_ALIAS(setjmp, setjmp_bionic)
#else
-WEAK_ALIAS __interceptor_sigsetjmp, __sigsetjmp
+ASM_TRAMPOLINE_ALIAS(__sigsetjmp, sigsetjmp)
#endif
-WEAK_ALIAS __interceptor_setjmp, _setjmp
+ASM_TRAMPOLINE_ALIAS(_setjmp, setjmp)
#endif
// We do not need executable stack.
diff --git a/libsanitizer/hwasan/hwasan_setjmp_riscv64.S b/libsanitizer/hwasan/hwasan_setjmp_riscv64.S
index 43f9c3c..c01f4e2 100644
--- a/libsanitizer/hwasan/hwasan_setjmp_riscv64.S
+++ b/libsanitizer/hwasan/hwasan_setjmp_riscv64.S
@@ -31,18 +31,18 @@
.section .text
.file "hwasan_setjmp_riscv64.S"
-.global __interceptor_setjmp
-ASM_TYPE_FUNCTION(__interceptor_setjmp)
-__interceptor_setjmp:
+.global ASM_WRAPPER_NAME(setjmp)
+ASM_TYPE_FUNCTION(ASM_WRAPPER_NAME(setjmp))
+ASM_WRAPPER_NAME(setjmp):
CFI_STARTPROC
addi x11, x0, 0
- tail __interceptor_sigsetjmp
+ tail ASM_WRAPPER_NAME(sigsetjmp)
CFI_ENDPROC
-ASM_SIZE(__interceptor_setjmp)
+ASM_SIZE(ASM_WRAPPER_NAME(setjmp))
-.global __interceptor_sigsetjmp
-ASM_TYPE_FUNCTION(__interceptor_sigsetjmp)
-__interceptor_sigsetjmp:
+.global ASM_WRAPPER_NAME(sigsetjmp)
+ASM_TYPE_FUNCTION(ASM_WRAPPER_NAME(sigsetjmp))
+ASM_WRAPPER_NAME(sigsetjmp):
CFI_STARTPROC
sd ra, 0<<3(x10)
sd s0, 1<<3(x10)
@@ -80,17 +80,12 @@ __interceptor_sigsetjmp:
// This function is defined in hwasan_interceptors.cc
tail __sigjmp_save
CFI_ENDPROC
-ASM_SIZE(__interceptor_sigsetjmp)
+ASM_SIZE(ASM_WRAPPER_NAME(sigsetjmp))
-
-.macro WEAK_ALIAS first second
- .weak \second
- .equ \second\(), \first
-.endm
-
-WEAK_ALIAS __interceptor_sigsetjmp, __sigsetjmp
-
-WEAK_ALIAS __interceptor_setjmp, _setjmp
+ASM_INTERCEPTOR_TRAMPOLINE(sigsetjmp)
+ASM_TRAMPOLINE_ALIAS(__sigsetjmp, sigsetjmp)
+ASM_INTERCEPTOR_TRAMPOLINE(setjmp)
+ASM_TRAMPOLINE_ALIAS(_setjmp, setjmp)
#endif
// We do not need executable stack.
diff --git a/libsanitizer/hwasan/hwasan_setjmp_x86_64.S b/libsanitizer/hwasan/hwasan_setjmp_x86_64.S
index a5a3858..9804e8d 100644
--- a/libsanitizer/hwasan/hwasan_setjmp_x86_64.S
+++ b/libsanitizer/hwasan/hwasan_setjmp_x86_64.S
@@ -31,19 +31,19 @@
.section .text
.file "hwasan_setjmp_x86_64.S"
-.global __interceptor_setjmp
-ASM_TYPE_FUNCTION(__interceptor_setjmp)
-__interceptor_setjmp:
+.global ASM_WRAPPER_NAME(setjmp)
+ASM_TYPE_FUNCTION(ASM_WRAPPER_NAME(setjmp))
+ASM_WRAPPER_NAME(setjmp):
CFI_STARTPROC
_CET_ENDBR
xorl %esi, %esi
jmp .Linterceptor_sigsetjmp
CFI_ENDPROC
-ASM_SIZE(__interceptor_setjmp)
+ASM_SIZE(ASM_WRAPPER_NAME(setjmp))
-.global __interceptor_sigsetjmp
-ASM_TYPE_FUNCTION(__interceptor_sigsetjmp)
-__interceptor_sigsetjmp:
+.global ASM_WRAPPER_NAME(sigsetjmp)
+ASM_TYPE_FUNCTION(ASM_WRAPPER_NAME(sigsetjmp))
+ASM_WRAPPER_NAME(sigsetjmp):
.Linterceptor_sigsetjmp:
CFI_STARTPROC
_CET_ENDBR
@@ -67,16 +67,12 @@ __interceptor_sigsetjmp:
jmp __sigjmp_save
CFI_ENDPROC
-ASM_SIZE(__interceptor_sigsetjmp)
+ASM_SIZE(ASM_WRAPPER_NAME(sigsetjmp))
-
-.macro WEAK_ALIAS first second
- .weak \second
- .equ \second\(), \first
-.endm
-
-WEAK_ALIAS __interceptor_sigsetjmp, __sigsetjmp
-WEAK_ALIAS __interceptor_setjmp, _setjmp
+ASM_INTERCEPTOR_TRAMPOLINE(sigsetjmp)
+ASM_TRAMPOLINE_ALIAS(__sigsetjmp, sigsetjmp)
+ASM_INTERCEPTOR_TRAMPOLINE(setjmp)
+ASM_TRAMPOLINE_ALIAS(_setjmp, setjmp)
#endif
// We do not need executable stack.
diff --git a/libsanitizer/hwasan/hwasan_tag_mismatch_aarch64.S b/libsanitizer/hwasan/hwasan_tag_mismatch_aarch64.S
index bcb0df4..fd060c5 100644
--- a/libsanitizer/hwasan/hwasan_tag_mismatch_aarch64.S
+++ b/libsanitizer/hwasan/hwasan_tag_mismatch_aarch64.S
@@ -89,16 +89,16 @@ __hwasan_tag_mismatch:
ubfx x16, x0, #4, #52
ldrb w16, [x9, x16]
cmp w16, #0xf
- b.hi __hwasan_tag_mismatch_v2
+ b.hi mismatch
cmp w16, w17
- b.lo __hwasan_tag_mismatch_v2
+ b.lo mismatch
// Load the real tag from the last byte of the granule and compare against
// the pointer tag.
orr x16, x0, #0xf
ldrb w16, [x16]
cmp x16, x0, lsr #56
- b.ne __hwasan_tag_mismatch_v2
+ b.ne mismatch
// Restore x0, x1 and sp to their values from before the __hwasan_tag_mismatch
// call and resume execution.
@@ -108,6 +108,8 @@ __hwasan_tag_mismatch:
.global __hwasan_tag_mismatch_v2
.type __hwasan_tag_mismatch_v2, %function
__hwasan_tag_mismatch_v2:
+// Avoid using global label, to prevent "relocation out of range".
+mismatch:
CFI_STARTPROC
BTI_J
diff --git a/libsanitizer/hwasan/hwasan_thread.cpp b/libsanitizer/hwasan/hwasan_thread.cpp
index 3375782..ce36547 100644
--- a/libsanitizer/hwasan/hwasan_thread.cpp
+++ b/libsanitizer/hwasan/hwasan_thread.cpp
@@ -58,6 +58,16 @@ void Thread::Init(uptr stack_buffer_start, uptr stack_buffer_size,
#endif
InitStackAndTls(state);
dtls_ = DTLS_Get();
+ AllocatorThreadStart(allocator_cache());
+
+ if (flags()->verbose_threads) {
+ if (IsMainThread()) {
+ Printf("sizeof(Thread): %zd sizeof(HeapRB): %zd sizeof(StackRB): %zd\n",
+ sizeof(Thread), heap_allocations_->SizeInBytes(),
+ stack_allocations_->size() * sizeof(uptr));
+ }
+ Print("Creating : ");
+ }
}
void Thread::InitStackRingBuffer(uptr stack_buffer_start,
@@ -79,28 +89,23 @@ void Thread::InitStackRingBuffer(uptr stack_buffer_start,
CHECK(MemIsApp(stack_bottom_));
CHECK(MemIsApp(stack_top_ - 1));
}
-
- if (flags()->verbose_threads) {
- if (IsMainThread()) {
- Printf("sizeof(Thread): %zd sizeof(HeapRB): %zd sizeof(StackRB): %zd\n",
- sizeof(Thread), heap_allocations_->SizeInBytes(),
- stack_allocations_->size() * sizeof(uptr));
- }
- Print("Creating : ");
- }
}
void Thread::ClearShadowForThreadStackAndTLS() {
if (stack_top_ != stack_bottom_)
- TagMemory(stack_bottom_, stack_top_ - stack_bottom_, 0);
+ TagMemory(UntagAddr(stack_bottom_),
+ UntagAddr(stack_top_) - UntagAddr(stack_bottom_),
+ GetTagFromPointer(stack_top_));
if (tls_begin_ != tls_end_)
- TagMemory(tls_begin_, tls_end_ - tls_begin_, 0);
+ TagMemory(UntagAddr(tls_begin_),
+ UntagAddr(tls_end_) - UntagAddr(tls_begin_),
+ GetTagFromPointer(tls_begin_));
}
void Thread::Destroy() {
if (flags()->verbose_threads)
Print("Destroying: ");
- AllocatorSwallowThreadLocalCache(allocator_cache());
+ AllocatorThreadFinish(allocator_cache());
ClearShadowForThreadStackAndTLS();
if (heap_allocations_)
heap_allocations_->Delete();
@@ -173,9 +178,15 @@ static __hwasan::Thread *GetThreadByOsIDLocked(tid_t os_id) {
[os_id](__hwasan::Thread *t) { return t->os_id() == os_id; });
}
-void LockThreadRegistry() { __hwasan::hwasanThreadList().Lock(); }
+void LockThreads() {
+ __hwasan::hwasanThreadList().Lock();
+ __hwasan::hwasanThreadArgRetval().Lock();
+}
-void UnlockThreadRegistry() { __hwasan::hwasanThreadList().Unlock(); }
+void UnlockThreads() {
+ __hwasan::hwasanThreadArgRetval().Unlock();
+ __hwasan::hwasanThreadList().Unlock();
+}
void EnsureMainThreadIDIsCorrect() { __hwasan::EnsureMainThreadIDIsCorrect(); }
@@ -202,7 +213,10 @@ void GetThreadExtraStackRangesLocked(tid_t os_id,
InternalMmapVector<Range> *ranges) {}
void GetThreadExtraStackRangesLocked(InternalMmapVector<Range> *ranges) {}
-void GetAdditionalThreadContextPtrsLocked(InternalMmapVector<uptr> *ptrs) {}
+void GetAdditionalThreadContextPtrsLocked(InternalMmapVector<uptr> *ptrs) {
+ __hwasan::hwasanThreadArgRetval().GetAllPtrsLocked(ptrs);
+}
+
void GetRunningThreadsLocked(InternalMmapVector<tid_t> *threads) {}
} // namespace __lsan
diff --git a/libsanitizer/hwasan/hwasan_thread_list.cpp b/libsanitizer/hwasan/hwasan_thread_list.cpp
index fa46e65..7df4dd3 100644
--- a/libsanitizer/hwasan/hwasan_thread_list.cpp
+++ b/libsanitizer/hwasan/hwasan_thread_list.cpp
@@ -1,15 +1,28 @@
#include "hwasan_thread_list.h"
+#include "sanitizer_common/sanitizer_thread_arg_retval.h"
+
namespace __hwasan {
-static ALIGNED(16) char thread_list_placeholder[sizeof(HwasanThreadList)];
+
static HwasanThreadList *hwasan_thread_list;
+static ThreadArgRetval *thread_data;
HwasanThreadList &hwasanThreadList() { return *hwasan_thread_list; }
+ThreadArgRetval &hwasanThreadArgRetval() { return *thread_data; }
void InitThreadList(uptr storage, uptr size) {
- CHECK(hwasan_thread_list == nullptr);
+ CHECK_EQ(hwasan_thread_list, nullptr);
+
+ static ALIGNED(alignof(
+ HwasanThreadList)) char thread_list_placeholder[sizeof(HwasanThreadList)];
hwasan_thread_list =
new (thread_list_placeholder) HwasanThreadList(storage, size);
+
+ CHECK_EQ(thread_data, nullptr);
+
+ static ALIGNED(alignof(
+ ThreadArgRetval)) char thread_data_placeholder[sizeof(ThreadArgRetval)];
+ thread_data = new (thread_data_placeholder) ThreadArgRetval();
}
-} // namespace __hwasan
+} // namespace __hwasan
diff --git a/libsanitizer/hwasan/hwasan_thread_list.h b/libsanitizer/hwasan/hwasan_thread_list.h
index 97485b1..82f6c70 100644
--- a/libsanitizer/hwasan/hwasan_thread_list.h
+++ b/libsanitizer/hwasan/hwasan_thread_list.h
@@ -47,8 +47,8 @@
#include "hwasan_allocator.h"
#include "hwasan_flags.h"
#include "hwasan_thread.h"
-
#include "sanitizer_common/sanitizer_placement_new.h"
+#include "sanitizer_common/sanitizer_thread_arg_retval.h"
namespace __hwasan {
@@ -131,9 +131,9 @@ class SANITIZER_MUTEX HwasanThreadList {
void ReleaseThread(Thread *t) SANITIZER_EXCLUDES(free_list_mutex_) {
RemoveThreadStats(t);
+ RemoveThreadFromLiveList(t);
t->Destroy();
DontNeedThread(t);
- RemoveThreadFromLiveList(t);
SpinMutexLock l(&free_list_mutex_);
free_list_.push_back(t);
}
@@ -157,7 +157,7 @@ class SANITIZER_MUTEX HwasanThreadList {
}
template <class CB>
- Thread *FindThreadLocked(CB cb) SANITIZER_CHECK_LOCKED(stats_mutex_) {
+ Thread *FindThreadLocked(CB cb) SANITIZER_CHECK_LOCKED(live_list_mutex_) {
CheckLocked();
for (Thread *t : live_list_)
if (cb(t))
@@ -199,7 +199,7 @@ class SANITIZER_MUTEX HwasanThreadList {
CHECK(IsAligned(free_space_, align));
Thread *t = (Thread *)(free_space_ + ring_buffer_size_);
free_space_ += thread_alloc_size_;
- CHECK(free_space_ <= free_space_end_ && "out of thread memory");
+ CHECK_LE(free_space_, free_space_end_);
return t;
}
@@ -222,5 +222,6 @@ class SANITIZER_MUTEX HwasanThreadList {
void InitThreadList(uptr storage, uptr size);
HwasanThreadList &hwasanThreadList();
+ThreadArgRetval &hwasanThreadArgRetval();
} // namespace __hwasan