aboutsummaryrefslogtreecommitdiff
path: root/libsanitizer/hwasan
diff options
context:
space:
mode:
authorMartin Liska <mliska@suse.cz>2021-05-12 14:37:22 +0200
committerMartin Liska <mliska@suse.cz>2021-05-13 09:29:17 +0200
commitd0fee87e0ce24f066cde3dbf9605abce24dd75e1 (patch)
tree9172c165d55d36021fa70059ed0e9fef5324119e /libsanitizer/hwasan
parent810afb0b5fbb9da1e0e51ee9607f275f14c17459 (diff)
downloadgcc-d0fee87e0ce24f066cde3dbf9605abce24dd75e1.zip
gcc-d0fee87e0ce24f066cde3dbf9605abce24dd75e1.tar.gz
gcc-d0fee87e0ce24f066cde3dbf9605abce24dd75e1.tar.bz2
libsanitizer: merge from master
Merged revision: f58e0513dd95944b81ce7a6e7b49ba656de7d75f
Diffstat (limited to 'libsanitizer/hwasan')
-rw-r--r--libsanitizer/hwasan/hwasan.cpp19
-rw-r--r--libsanitizer/hwasan/hwasan.h41
-rw-r--r--libsanitizer/hwasan/hwasan_allocator.cpp28
-rw-r--r--libsanitizer/hwasan/hwasan_allocator.h19
-rw-r--r--libsanitizer/hwasan/hwasan_checks.h5
-rw-r--r--libsanitizer/hwasan/hwasan_dynamic_shadow.cpp16
-rw-r--r--libsanitizer/hwasan/hwasan_flags.h2
-rw-r--r--libsanitizer/hwasan/hwasan_flags.inc9
-rw-r--r--libsanitizer/hwasan/hwasan_interceptors.cpp3
-rw-r--r--libsanitizer/hwasan/hwasan_interceptors_vfork.S3
-rw-r--r--libsanitizer/hwasan/hwasan_interface_internal.h3
-rw-r--r--libsanitizer/hwasan/hwasan_linux.cpp41
-rw-r--r--libsanitizer/hwasan/hwasan_mapping.h2
-rw-r--r--libsanitizer/hwasan/hwasan_memintrinsics.cpp4
-rw-r--r--libsanitizer/hwasan/hwasan_new_delete.cpp39
-rw-r--r--libsanitizer/hwasan/hwasan_report.cpp26
-rw-r--r--libsanitizer/hwasan/hwasan_setjmp.S6
-rw-r--r--libsanitizer/hwasan/hwasan_tag_mismatch_aarch64.S6
-rw-r--r--libsanitizer/hwasan/hwasan_thread.cpp15
-rw-r--r--libsanitizer/hwasan/hwasan_thread.h4
-rw-r--r--libsanitizer/hwasan/hwasan_thread_list.h90
21 files changed, 264 insertions, 117 deletions
diff --git a/libsanitizer/hwasan/hwasan.cpp b/libsanitizer/hwasan/hwasan.cpp
index c532211..8d6c252 100644
--- a/libsanitizer/hwasan/hwasan.cpp
+++ b/libsanitizer/hwasan/hwasan.cpp
@@ -128,16 +128,11 @@ static void InitializeFlags() {
if (common_flags()->help) parser.PrintFlagDescriptions();
}
-static void HWAsanCheckFailed(const char *file, int line, const char *cond,
- u64 v1, u64 v2) {
- Report("HWAddressSanitizer CHECK failed: %s:%d \"%s\" (0x%zx, 0x%zx)\n", file,
- line, cond, (uptr)v1, (uptr)v2);
- PRINT_CURRENT_STACK_CHECK();
- Die();
+static void CheckUnwind() {
+ GET_FATAL_STACK_TRACE_PC_BP(StackTrace::GetCurrentPc(), GET_CURRENT_FRAME());
+ stack.Print();
}
-static constexpr uptr kMemoryUsageBufferSize = 4096;
-
static void HwasanFormatMemoryUsage(InternalScopedString &s) {
HwasanThreadList &thread_list = hwasanThreadList();
auto thread_stats = thread_list.GetThreadStats();
@@ -155,6 +150,8 @@ static void HwasanFormatMemoryUsage(InternalScopedString &s) {
}
#if SANITIZER_ANDROID
+static constexpr uptr kMemoryUsageBufferSize = 4096;
+
static char *memory_usage_buffer = nullptr;
static void InitMemoryUsage() {
@@ -171,7 +168,7 @@ void UpdateMemoryUsage() {
return;
if (!memory_usage_buffer)
InitMemoryUsage();
- InternalScopedString s(kMemoryUsageBufferSize);
+ InternalScopedString s;
HwasanFormatMemoryUsage(s);
internal_strncpy(memory_usage_buffer, s.data(), kMemoryUsageBufferSize - 1);
memory_usage_buffer[kMemoryUsageBufferSize - 1] = '\0';
@@ -271,7 +268,7 @@ void __hwasan_init() {
InitializeFlags();
// Install tool-specific callbacks in sanitizer_common.
- SetCheckFailedCallback(HWAsanCheckFailed);
+ SetCheckUnwindCallback(CheckUnwind);
__sanitizer_set_report_path(common_flags()->log_path);
@@ -493,7 +490,7 @@ extern "C" void *__hwasan_extra_spill_area() {
}
void __hwasan_print_memory_usage() {
- InternalScopedString s(kMemoryUsageBufferSize);
+ InternalScopedString s;
HwasanFormatMemoryUsage(s);
Printf("%s\n", s.data());
}
diff --git a/libsanitizer/hwasan/hwasan.h b/libsanitizer/hwasan/hwasan.h
index d4521ef..8515df5 100644
--- a/libsanitizer/hwasan/hwasan.h
+++ b/libsanitizer/hwasan/hwasan.h
@@ -14,11 +14,12 @@
#ifndef HWASAN_H
#define HWASAN_H
+#include "hwasan_flags.h"
+#include "hwasan_interface_internal.h"
+#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
-#include "hwasan_interface_internal.h"
-#include "hwasan_flags.h"
#include "ubsan/ubsan_platform.h"
#ifndef HWASAN_CONTAINS_UBSAN
@@ -35,10 +36,31 @@
typedef u8 tag_t;
+#if defined(__x86_64__)
+// Tags are done in middle bits using userspace aliasing.
+constexpr unsigned kAddressTagShift = 39;
+constexpr unsigned kTagBits = 3;
+
+// The alias region is placed next to the shadow so the upper bits of all
+// taggable addresses matches the upper bits of the shadow base. This shift
+// value determines which upper bits must match. It has a floor of 44 since the
+// shadow is always 8TB.
+// TODO(morehouse): In alias mode we can shrink the shadow and use a
+// simpler/faster shadow calculation.
+constexpr unsigned kTaggableRegionCheckShift =
+ __sanitizer::Max(kAddressTagShift + kTagBits + 1U, 44U);
+#else
// TBI (Top Byte Ignore) feature of AArch64: bits [63:56] are ignored in address
// translation and can be used to store a tag.
-const unsigned kAddressTagShift = 56;
-const uptr kAddressTagMask = 0xFFUL << kAddressTagShift;
+constexpr unsigned kAddressTagShift = 56;
+constexpr unsigned kTagBits = 8;
+#endif // defined(__x86_64__)
+
+// Mask for extracting tag bits from the lower 8 bits.
+constexpr uptr kTagMask = (1UL << kTagBits) - 1;
+
+// Mask for extracting tag bits from full pointers.
+constexpr uptr kAddressTagMask = kTagMask << kAddressTagShift;
// Minimal alignment of the shadow base address. Determines the space available
// for threads and stack histories. This is an ABI constant.
@@ -50,7 +72,7 @@ const unsigned kRecordFPLShift = 4;
const unsigned kRecordFPModulus = 1 << (64 - kRecordFPShift + kRecordFPLShift);
static inline tag_t GetTagFromPointer(uptr p) {
- return p >> kAddressTagShift;
+ return (p >> kAddressTagShift) & kTagMask;
}
static inline uptr UntagAddr(uptr tagged_addr) {
@@ -105,15 +127,6 @@ void InstallAtExitHandler();
if (hwasan_inited) \
stack.Unwind(pc, bp, nullptr, common_flags()->fast_unwind_on_fatal)
-#define GET_FATAL_STACK_TRACE_HERE \
- GET_FATAL_STACK_TRACE_PC_BP(StackTrace::GetCurrentPc(), GET_CURRENT_FRAME())
-
-#define PRINT_CURRENT_STACK_CHECK() \
- { \
- GET_FATAL_STACK_TRACE_HERE; \
- stack.Print(); \
- }
-
void HwasanTSDInit();
void HwasanTSDThreadInit();
diff --git a/libsanitizer/hwasan/hwasan_allocator.cpp b/libsanitizer/hwasan/hwasan_allocator.cpp
index 0b6b734..a6fc794 100644
--- a/libsanitizer/hwasan/hwasan_allocator.cpp
+++ b/libsanitizer/hwasan/hwasan_allocator.cpp
@@ -29,8 +29,8 @@ static AllocatorCache fallback_allocator_cache;
static SpinMutex fallback_mutex;
static atomic_uint8_t hwasan_allocator_tagging_enabled;
-static const tag_t kFallbackAllocTag = 0xBB;
-static const tag_t kFallbackFreeTag = 0xBC;
+static constexpr tag_t kFallbackAllocTag = 0xBB & kTagMask;
+static constexpr tag_t kFallbackFreeTag = 0xBC;
enum RightAlignMode {
kRightAlignNever,
@@ -84,7 +84,8 @@ void HwasanAllocatorInit() {
atomic_store_relaxed(&hwasan_allocator_tagging_enabled,
!flags()->disable_allocator_tagging);
SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
- allocator.Init(common_flags()->allocator_release_to_os_interval_ms);
+ allocator.Init(common_flags()->allocator_release_to_os_interval_ms,
+ kAliasRegionStart);
for (uptr i = 0; i < sizeof(tail_magic); i++)
tail_magic[i] = GetCurrentThread()->GenerateRandomTag();
}
@@ -148,7 +149,8 @@ static void *HwasanAllocate(StackTrace *stack, uptr orig_size, uptr alignment,
// Tagging can only be skipped when both tag_in_malloc and tag_in_free are
// false. When tag_in_malloc = false and tag_in_free = true malloc needs to
// retag to 0.
- if ((flags()->tag_in_malloc || flags()->tag_in_free) &&
+ if (InTaggableRegion(reinterpret_cast<uptr>(user_ptr)) &&
+ (flags()->tag_in_malloc || flags()->tag_in_free) &&
atomic_load_relaxed(&hwasan_allocator_tagging_enabled)) {
if (flags()->tag_in_malloc && malloc_bisect(stack, orig_size)) {
tag_t tag = t ? t->GenerateRandomTag() : kFallbackAllocTag;
@@ -175,6 +177,8 @@ static void *HwasanAllocate(StackTrace *stack, uptr orig_size, uptr alignment,
static bool PointerAndMemoryTagsMatch(void *tagged_ptr) {
CHECK(tagged_ptr);
uptr tagged_uptr = reinterpret_cast<uptr>(tagged_ptr);
+ if (!InTaggableRegion(tagged_uptr))
+ return true;
tag_t mem_tag = *reinterpret_cast<tag_t *>(
MemToShadow(reinterpret_cast<uptr>(UntagPtr(tagged_ptr))));
return PossiblyShortTagMatches(mem_tag, tagged_uptr, 1);
@@ -187,7 +191,9 @@ static void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) {
if (!PointerAndMemoryTagsMatch(tagged_ptr))
ReportInvalidFree(stack, reinterpret_cast<uptr>(tagged_ptr));
- void *untagged_ptr = UntagPtr(tagged_ptr);
+ void *untagged_ptr = InTaggableRegion(reinterpret_cast<uptr>(tagged_ptr))
+ ? UntagPtr(tagged_ptr)
+ : tagged_ptr;
void *aligned_ptr = reinterpret_cast<void *>(
RoundDownTo(reinterpret_cast<uptr>(untagged_ptr), kShadowAlignment));
Metadata *meta =
@@ -219,10 +225,14 @@ static void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) {
Min(TaggedSize(orig_size), (uptr)flags()->max_free_fill_size);
internal_memset(aligned_ptr, flags()->free_fill_byte, fill_size);
}
- if (flags()->tag_in_free && malloc_bisect(stack, 0) &&
- atomic_load_relaxed(&hwasan_allocator_tagging_enabled))
+ if (InTaggableRegion(reinterpret_cast<uptr>(tagged_ptr)) &&
+ flags()->tag_in_free && malloc_bisect(stack, 0) &&
+ atomic_load_relaxed(&hwasan_allocator_tagging_enabled)) {
+ // Always store full 8-bit tags on free to maximize UAF detection.
+ tag_t tag = t ? t->GenerateRandomTag(/*num_bits=*/8) : kFallbackFreeTag;
TagMemoryAligned(reinterpret_cast<uptr>(aligned_ptr), TaggedSize(orig_size),
- t ? t->GenerateRandomTag() : kFallbackFreeTag);
+ tag);
+ }
if (t) {
allocator.Deallocate(t->allocator_cache(), aligned_ptr);
if (auto *ha = t->heap_allocations())
@@ -365,7 +375,7 @@ int hwasan_posix_memalign(void **memptr, uptr alignment, uptr size,
// OOM error is already taken care of by HwasanAllocate.
return errno_ENOMEM;
CHECK(IsAligned((uptr)ptr, alignment));
- *(void **)UntagPtr(memptr) = ptr;
+ *memptr = ptr;
return 0;
}
diff --git a/libsanitizer/hwasan/hwasan_allocator.h b/libsanitizer/hwasan/hwasan_allocator.h
index 43670a6..03bbcff 100644
--- a/libsanitizer/hwasan/hwasan_allocator.h
+++ b/libsanitizer/hwasan/hwasan_allocator.h
@@ -13,13 +13,15 @@
#ifndef HWASAN_ALLOCATOR_H
#define HWASAN_ALLOCATOR_H
+#include "hwasan.h"
+#include "hwasan_interface_internal.h"
+#include "hwasan_poisoning.h"
#include "sanitizer_common/sanitizer_allocator.h"
#include "sanitizer_common/sanitizer_allocator_checks.h"
#include "sanitizer_common/sanitizer_allocator_interface.h"
#include "sanitizer_common/sanitizer_allocator_report.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_ring_buffer.h"
-#include "hwasan_poisoning.h"
#if !defined(__aarch64__) && !defined(__x86_64__)
#error Unsupported platform
@@ -55,7 +57,12 @@ static const uptr kMaxAllowedMallocSize = 1UL << 40; // 1T
struct AP64 {
static const uptr kSpaceBeg = ~0ULL;
+
+#if defined(__x86_64__)
+ static const uptr kSpaceSize = 1ULL << kAddressTagShift;
+#else
static const uptr kSpaceSize = 0x2000000000ULL;
+#endif
static const uptr kMetadataSize = sizeof(Metadata);
typedef __sanitizer::VeryDenseSizeClassMap SizeClassMap;
using AddressSpaceView = LocalAddressSpaceView;
@@ -102,6 +109,16 @@ typedef RingBuffer<HeapAllocationRecord> HeapAllocationsRingBuffer;
void GetAllocatorStats(AllocatorStatCounters s);
+inline bool InTaggableRegion(uptr addr) {
+#if defined(__x86_64__)
+ // Aliases are mapped next to shadow so that the upper bits match the shadow
+ // base.
+ return (addr >> kTaggableRegionCheckShift) ==
+ (__hwasan_shadow_memory_dynamic_address >> kTaggableRegionCheckShift);
+#endif
+ return true;
+}
+
} // namespace __hwasan
#endif // HWASAN_ALLOCATOR_H
diff --git a/libsanitizer/hwasan/hwasan_checks.h b/libsanitizer/hwasan/hwasan_checks.h
index a8de0fe..ab543ea 100644
--- a/libsanitizer/hwasan/hwasan_checks.h
+++ b/libsanitizer/hwasan/hwasan_checks.h
@@ -13,6 +13,7 @@
#ifndef HWASAN_CHECKS_H
#define HWASAN_CHECKS_H
+#include "hwasan_allocator.h"
#include "hwasan_mapping.h"
#include "sanitizer_common/sanitizer_common.h"
@@ -81,6 +82,8 @@ enum class AccessType { Load, Store };
template <ErrorAction EA, AccessType AT, unsigned LogSize>
__attribute__((always_inline, nodebug)) static void CheckAddress(uptr p) {
+ if (!InTaggableRegion(p))
+ return;
uptr ptr_raw = p & ~kAddressTagMask;
tag_t mem_tag = *(tag_t *)MemToShadow(ptr_raw);
if (UNLIKELY(!PossiblyShortTagMatches(mem_tag, p, 1 << LogSize))) {
@@ -94,7 +97,7 @@ __attribute__((always_inline, nodebug)) static void CheckAddress(uptr p) {
template <ErrorAction EA, AccessType AT>
__attribute__((always_inline, nodebug)) static void CheckAddressSized(uptr p,
uptr sz) {
- if (sz == 0)
+ if (sz == 0 || !InTaggableRegion(p))
return;
tag_t ptr_tag = GetTagFromPointer(p);
uptr ptr_raw = p & ~kAddressTagMask;
diff --git a/libsanitizer/hwasan/hwasan_dynamic_shadow.cpp b/libsanitizer/hwasan/hwasan_dynamic_shadow.cpp
index 12730b2..f53276e 100644
--- a/libsanitizer/hwasan/hwasan_dynamic_shadow.cpp
+++ b/libsanitizer/hwasan/hwasan_dynamic_shadow.cpp
@@ -12,15 +12,17 @@
///
//===----------------------------------------------------------------------===//
-#include "hwasan.h"
#include "hwasan_dynamic_shadow.h"
-#include "hwasan_mapping.h"
-#include "sanitizer_common/sanitizer_common.h"
-#include "sanitizer_common/sanitizer_posix.h"
#include <elf.h>
#include <link.h>
+#include "hwasan.h"
+#include "hwasan_mapping.h"
+#include "hwasan_thread_list.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_posix.h"
+
// The code in this file needs to run in an unrelocated binary. It should not
// access any external symbol, including its own non-hidden globals.
@@ -117,6 +119,12 @@ namespace __hwasan {
void InitShadowGOT() {}
uptr FindDynamicShadowStart(uptr shadow_size_bytes) {
+#if defined(__x86_64__)
+ constexpr uptr kAliasSize = 1ULL << kAddressTagShift;
+ constexpr uptr kNumAliases = 1ULL << kTagBits;
+ return MapDynamicShadowAndAliases(shadow_size_bytes, kAliasSize, kNumAliases,
+ RingBufferSize());
+#endif
return MapDynamicShadow(shadow_size_bytes, kShadowScale, kShadowBaseAlignment,
kHighMemEnd);
}
diff --git a/libsanitizer/hwasan/hwasan_flags.h b/libsanitizer/hwasan/hwasan_flags.h
index 0a6998f..b177501 100644
--- a/libsanitizer/hwasan/hwasan_flags.h
+++ b/libsanitizer/hwasan/hwasan_flags.h
@@ -12,6 +12,8 @@
#ifndef HWASAN_FLAGS_H
#define HWASAN_FLAGS_H
+#include "sanitizer_common/sanitizer_internal_defs.h"
+
namespace __hwasan {
struct Flags {
diff --git a/libsanitizer/hwasan/hwasan_flags.inc b/libsanitizer/hwasan/hwasan_flags.inc
index 8e431d9..18ea47f 100644
--- a/libsanitizer/hwasan/hwasan_flags.inc
+++ b/libsanitizer/hwasan/hwasan_flags.inc
@@ -72,3 +72,12 @@ HWASAN_FLAG(uptr, malloc_bisect_right, 0,
HWASAN_FLAG(bool, malloc_bisect_dump, false,
"Print all allocations within [malloc_bisect_left, "
"malloc_bisect_right] range ")
+
+
+// Exit if we fail to enable the AArch64 kernel ABI relaxation which allows
+// tagged pointers in syscalls. This is the default, but being able to disable
+// that behaviour is useful for running the testsuite on more platforms (the
+// testsuite can run since we manually ensure any pointer arguments to syscalls
+// are untagged before the call.
+HWASAN_FLAG(bool, fail_without_syscall_abi, true,
+ "Exit if fail to request relaxed syscall ABI.")
diff --git a/libsanitizer/hwasan/hwasan_interceptors.cpp b/libsanitizer/hwasan/hwasan_interceptors.cpp
index 44e569e..ad67e27 100644
--- a/libsanitizer/hwasan/hwasan_interceptors.cpp
+++ b/libsanitizer/hwasan/hwasan_interceptors.cpp
@@ -221,8 +221,7 @@ INTERCEPTOR(int, pthread_create, void *th, void *attr, void *(*callback)(void*),
ThreadStartArg *A = reinterpret_cast<ThreadStartArg *> (MmapOrDie(
GetPageSizeCached(), "pthread_create"));
*A = {callback, param};
- int res = REAL(pthread_create)(UntagPtr(th), UntagPtr(attr),
- &HwasanThreadStartFunc, A);
+ int res = REAL(pthread_create)(th, attr, &HwasanThreadStartFunc, A);
return res;
}
diff --git a/libsanitizer/hwasan/hwasan_interceptors_vfork.S b/libsanitizer/hwasan/hwasan_interceptors_vfork.S
index 23d5659..fd20825 100644
--- a/libsanitizer/hwasan/hwasan_interceptors_vfork.S
+++ b/libsanitizer/hwasan/hwasan_interceptors_vfork.S
@@ -1,4 +1,5 @@
#include "sanitizer_common/sanitizer_asm.h"
+#include "builtins/assembly.h"
#if defined(__linux__) && HWASAN_WITH_INTERCEPTORS
#define COMMON_INTERCEPTOR_SPILL_AREA __hwasan_extra_spill_area
@@ -9,3 +10,5 @@
#endif
NO_EXEC_STACK_DIRECTIVE
+
+GNU_PROPERTY_BTI_PAC
diff --git a/libsanitizer/hwasan/hwasan_interface_internal.h b/libsanitizer/hwasan/hwasan_interface_internal.h
index aedda31..25c0f94 100644
--- a/libsanitizer/hwasan/hwasan_interface_internal.h
+++ b/libsanitizer/hwasan/hwasan_interface_internal.h
@@ -222,6 +222,9 @@ SANITIZER_INTERFACE_ATTRIBUTE
void *__hwasan_memset(void *s, int c, uptr n);
SANITIZER_INTERFACE_ATTRIBUTE
void *__hwasan_memmove(void *dest, const void *src, uptr n);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_set_error_report_callback(void (*callback)(const char *));
} // extern "C"
#endif // HWASAN_INTERFACE_INTERNAL_H
diff --git a/libsanitizer/hwasan/hwasan_linux.cpp b/libsanitizer/hwasan/hwasan_linux.cpp
index e99926d..8ce0ff7 100644
--- a/libsanitizer/hwasan/hwasan_linux.cpp
+++ b/libsanitizer/hwasan/hwasan_linux.cpp
@@ -76,6 +76,8 @@ uptr kHighShadowEnd;
uptr kHighMemStart;
uptr kHighMemEnd;
+uptr kAliasRegionStart; // Always 0 on non-x86.
+
static void PrintRange(uptr start, uptr end, const char *name) {
Printf("|| [%p, %p] || %.*s ||\n", (void *)start, (void *)end, 10, name);
}
@@ -119,9 +121,11 @@ void InitPrctl() {
#define PR_GET_TAGGED_ADDR_CTRL 56
#define PR_TAGGED_ADDR_ENABLE (1UL << 0)
// Check we're running on a kernel that can use the tagged address ABI.
- if (internal_prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0) == (uptr)-1 &&
- errno == EINVAL) {
-#if SANITIZER_ANDROID
+ int local_errno = 0;
+ if (internal_iserror(internal_prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0),
+ &local_errno) &&
+ local_errno == EINVAL) {
+#if SANITIZER_ANDROID || defined(__x86_64__)
// Some older Android kernels have the tagged pointer ABI on
// unconditionally, and hence don't have the tagged-addr prctl while still
// allow the ABI.
@@ -129,17 +133,20 @@ void InitPrctl() {
// case.
return;
#else
- Printf(
- "FATAL: "
- "HWAddressSanitizer requires a kernel with tagged address ABI.\n");
- Die();
+ if (flags()->fail_without_syscall_abi) {
+ Printf(
+ "FATAL: "
+ "HWAddressSanitizer requires a kernel with tagged address ABI.\n");
+ Die();
+ }
#endif
}
// Turn on the tagged address ABI.
- if (internal_prctl(PR_SET_TAGGED_ADDR_CTRL, PR_TAGGED_ADDR_ENABLE, 0, 0, 0) ==
- (uptr)-1 ||
- !internal_prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0)) {
+ if ((internal_iserror(internal_prctl(PR_SET_TAGGED_ADDR_CTRL,
+ PR_TAGGED_ADDR_ENABLE, 0, 0, 0)) ||
+ !internal_prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0)) &&
+ flags()->fail_without_syscall_abi) {
Printf(
"FATAL: HWAddressSanitizer failed to enable tagged address syscall "
"ABI.\nSuggest check `sysctl abi.tagged_addr_disabled` "
@@ -174,6 +181,18 @@ bool InitShadow() {
// High memory starts where allocated shadow allows.
kHighMemStart = ShadowToMem(kHighShadowStart);
+#if defined(__x86_64__)
+ constexpr uptr kAliasRegionOffset = 1ULL << (kTaggableRegionCheckShift - 1);
+ kAliasRegionStart =
+ __hwasan_shadow_memory_dynamic_address + kAliasRegionOffset;
+
+ CHECK_EQ(kAliasRegionStart >> kTaggableRegionCheckShift,
+ __hwasan_shadow_memory_dynamic_address >> kTaggableRegionCheckShift);
+ CHECK_EQ(
+ (kAliasRegionStart + kAliasRegionOffset - 1) >> kTaggableRegionCheckShift,
+ __hwasan_shadow_memory_dynamic_address >> kTaggableRegionCheckShift);
+#endif
+
// Check the sanity of the defined memory ranges (there might be gaps).
CHECK_EQ(kHighMemStart % GetMmapGranularity(), 0);
CHECK_GT(kHighMemStart, kHighShadowEnd);
@@ -217,7 +236,9 @@ void InitThreads() {
}
bool MemIsApp(uptr p) {
+#if !defined(__x86_64__) // Memory outside the alias range has non-zero tags.
CHECK(GetTagFromPointer(p) == 0);
+#endif
return p >= kHighMemStart || (p >= kLowMemStart && p <= kLowMemEnd);
}
diff --git a/libsanitizer/hwasan/hwasan_mapping.h b/libsanitizer/hwasan/hwasan_mapping.h
index c149687..8243d1e 100644
--- a/libsanitizer/hwasan/hwasan_mapping.h
+++ b/libsanitizer/hwasan/hwasan_mapping.h
@@ -48,6 +48,8 @@ extern uptr kHighShadowEnd;
extern uptr kHighMemStart;
extern uptr kHighMemEnd;
+extern uptr kAliasRegionStart;
+
inline uptr MemToShadow(uptr untagged_addr) {
return (untagged_addr >> kShadowScale) +
__hwasan_shadow_memory_dynamic_address;
diff --git a/libsanitizer/hwasan/hwasan_memintrinsics.cpp b/libsanitizer/hwasan/hwasan_memintrinsics.cpp
index e82d77a..fab017a 100644
--- a/libsanitizer/hwasan/hwasan_memintrinsics.cpp
+++ b/libsanitizer/hwasan/hwasan_memintrinsics.cpp
@@ -24,7 +24,7 @@ using namespace __hwasan;
void *__hwasan_memset(void *block, int c, uptr size) {
CheckAddressSized<ErrorAction::Recover, AccessType::Store>(
reinterpret_cast<uptr>(block), size);
- return memset(UntagPtr(block), c, size);
+ return memset(block, c, size);
}
void *__hwasan_memcpy(void *to, const void *from, uptr size) {
@@ -32,7 +32,7 @@ void *__hwasan_memcpy(void *to, const void *from, uptr size) {
reinterpret_cast<uptr>(to), size);
CheckAddressSized<ErrorAction::Recover, AccessType::Load>(
reinterpret_cast<uptr>(from), size);
- return memcpy(UntagPtr(to), UntagPtr(from), size);
+ return memcpy(to, from, size);
}
void *__hwasan_memmove(void *to, const void *from, uptr size) {
diff --git a/libsanitizer/hwasan/hwasan_new_delete.cpp b/libsanitizer/hwasan/hwasan_new_delete.cpp
index 8d01d39..69cddda 100644
--- a/libsanitizer/hwasan/hwasan_new_delete.cpp
+++ b/libsanitizer/hwasan/hwasan_new_delete.cpp
@@ -27,6 +27,12 @@
void *res = hwasan_malloc(size, &stack);\
if (!nothrow && UNLIKELY(!res)) ReportOutOfMemory(size, &stack);\
return res
+#define OPERATOR_NEW_ALIGN_BODY(nothrow) \
+ GET_MALLOC_STACK_TRACE; \
+ void *res = hwasan_aligned_alloc(static_cast<uptr>(align), size, &stack); \
+ if (!nothrow && UNLIKELY(!res)) \
+ ReportOutOfMemory(size, &stack); \
+ return res
#define OPERATOR_DELETE_BODY \
GET_MALLOC_STACK_TRACE; \
@@ -50,6 +56,7 @@ using namespace __hwasan;
// Fake std::nothrow_t to avoid including <new>.
namespace std {
struct nothrow_t {};
+ enum class align_val_t : size_t {};
} // namespace std
@@ -66,6 +73,22 @@ INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void *operator new[](size_t size, std::nothrow_t const&) {
OPERATOR_NEW_BODY(true /*nothrow*/);
}
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void *operator new(
+ size_t size, std::align_val_t align) {
+ OPERATOR_NEW_ALIGN_BODY(false /*nothrow*/);
+}
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void *operator new[](
+ size_t size, std::align_val_t align) {
+ OPERATOR_NEW_ALIGN_BODY(false /*nothrow*/);
+}
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void *operator new(
+ size_t size, std::align_val_t align, std::nothrow_t const &) {
+ OPERATOR_NEW_ALIGN_BODY(true /*nothrow*/);
+}
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void *operator new[](
+ size_t size, std::align_val_t align, std::nothrow_t const &) {
+ OPERATOR_NEW_ALIGN_BODY(true /*nothrow*/);
+}
INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void operator delete(void *ptr) NOEXCEPT { OPERATOR_DELETE_BODY; }
@@ -77,5 +100,21 @@ INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void operator delete[](void *ptr, std::nothrow_t const&) {
OPERATOR_DELETE_BODY;
}
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete(
+ void *ptr, std::align_val_t align) NOEXCEPT {
+ OPERATOR_DELETE_BODY;
+}
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete[](
+ void *ptr, std::align_val_t) NOEXCEPT {
+ OPERATOR_DELETE_BODY;
+}
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete(
+ void *ptr, std::align_val_t, std::nothrow_t const &) NOEXCEPT {
+ OPERATOR_DELETE_BODY;
+}
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete[](
+ void *ptr, std::align_val_t, std::nothrow_t const &) NOEXCEPT {
+ OPERATOR_DELETE_BODY;
+}
#endif // OPERATOR_NEW_BODY
diff --git a/libsanitizer/hwasan/hwasan_report.cpp b/libsanitizer/hwasan/hwasan_report.cpp
index 894a149..c021779 100644
--- a/libsanitizer/hwasan/hwasan_report.cpp
+++ b/libsanitizer/hwasan/hwasan_report.cpp
@@ -43,12 +43,16 @@ class ScopedReport {
}
~ScopedReport() {
+ void (*report_cb)(const char *);
{
BlockingMutexLock lock(&error_message_lock_);
- if (fatal)
- SetAbortMessage(error_message_.data());
+ report_cb = error_report_callback_;
error_message_ptr_ = nullptr;
}
+ if (report_cb)
+ report_cb(error_message_.data());
+ if (fatal)
+ SetAbortMessage(error_message_.data());
if (common_flags()->print_module_map >= 2 ||
(fatal && common_flags()->print_module_map))
DumpProcessMap();
@@ -66,6 +70,12 @@ class ScopedReport {
// overwrite old trailing '\0', keep new trailing '\0' untouched.
internal_memcpy(&(*error_message_ptr_)[old_size - 1], msg, len);
}
+
+ static void SetErrorReportCallback(void (*callback)(const char *)) {
+ BlockingMutexLock lock(&error_message_lock_);
+ error_report_callback_ = callback;
+ }
+
private:
ScopedErrorReportLock error_report_lock_;
InternalMmapVector<char> error_message_;
@@ -73,10 +83,12 @@ class ScopedReport {
static InternalMmapVector<char> *error_message_ptr_;
static BlockingMutex error_message_lock_;
+ static void (*error_report_callback_)(const char *);
};
InternalMmapVector<char> *ScopedReport::error_message_ptr_;
BlockingMutex ScopedReport::error_message_lock_;
+void (*ScopedReport::error_report_callback_)(const char *);
// If there is an active ScopedReport, append to its error message.
void AppendToErrorMessageBuffer(const char *buffer) {
@@ -212,7 +224,7 @@ static void PrintStackAllocations(StackAllocationsRingBuffer *sa,
// We didn't find any locals. Most likely we don't have symbols, so dump
// the information that we have for offline analysis.
- InternalScopedString frame_desc(GetPageSizeCached() * 2);
+ InternalScopedString frame_desc;
Printf("Previously allocated frames:\n");
for (uptr i = 0; i < frames; i++) {
const uptr *record_addr = &(*sa)[i];
@@ -447,7 +459,7 @@ static void PrintTagInfoAroundAddr(tag_t *tag_ptr, uptr num_rows,
RoundDownTo(reinterpret_cast<uptr>(tag_ptr), row_len));
tag_t *beg_row = center_row_beg - row_len * (num_rows / 2);
tag_t *end_row = center_row_beg + row_len * ((num_rows + 1) / 2);
- InternalScopedString s(GetPageSizeCached() * 8);
+ InternalScopedString s;
for (tag_t *row = beg_row; row < end_row; row += row_len) {
s.append("%s", row == center_row_beg ? "=>" : " ");
s.append("%p:", row);
@@ -535,7 +547,7 @@ void ReportTailOverwritten(StackTrace *stack, uptr tagged_addr, uptr orig_size,
GetStackTraceFromId(chunk.GetAllocStackId()).Print();
}
- InternalScopedString s(GetPageSizeCached() * 8);
+ InternalScopedString s;
CHECK_GT(tail_size, 0U);
CHECK_LT(tail_size, kShadowAlignment);
u8 *tail = reinterpret_cast<u8*>(untagged_addr + orig_size);
@@ -650,3 +662,7 @@ void ReportRegisters(uptr *frame, uptr pc) {
}
} // namespace __hwasan
+
+void __hwasan_set_error_report_callback(void (*callback)(const char *)) {
+ __hwasan::ScopedReport::SetErrorReportCallback(callback);
+}
diff --git a/libsanitizer/hwasan/hwasan_setjmp.S b/libsanitizer/hwasan/hwasan_setjmp.S
index 0c13543..381af63 100644
--- a/libsanitizer/hwasan/hwasan_setjmp.S
+++ b/libsanitizer/hwasan/hwasan_setjmp.S
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_asm.h"
+#include "builtins/assembly.h"
#if HWASAN_WITH_INTERCEPTORS && defined(__aarch64__)
#include "sanitizer_common/sanitizer_platform.h"
@@ -34,6 +35,7 @@
ASM_TYPE_FUNCTION(__interceptor_setjmp)
__interceptor_setjmp:
CFI_STARTPROC
+ BTI_C
mov x1, #0
b __interceptor_sigsetjmp
CFI_ENDPROC
@@ -46,6 +48,7 @@ ASM_SIZE(__interceptor_setjmp)
ASM_TYPE_FUNCTION(__interceptor_setjmp_bionic)
__interceptor_setjmp_bionic:
CFI_STARTPROC
+ BTI_C
mov x1, #1
b __interceptor_sigsetjmp
CFI_ENDPROC
@@ -56,6 +59,7 @@ ASM_SIZE(__interceptor_setjmp_bionic)
ASM_TYPE_FUNCTION(__interceptor_sigsetjmp)
__interceptor_sigsetjmp:
CFI_STARTPROC
+ BTI_C
stp x19, x20, [x0, #0<<3]
stp x21, x22, [x0, #2<<3]
stp x23, x24, [x0, #4<<3]
@@ -98,3 +102,5 @@ ALIAS __interceptor_setjmp, _setjmp
// We do not need executable stack.
NO_EXEC_STACK_DIRECTIVE
+
+GNU_PROPERTY_BTI_PAC
diff --git a/libsanitizer/hwasan/hwasan_tag_mismatch_aarch64.S b/libsanitizer/hwasan/hwasan_tag_mismatch_aarch64.S
index 08df127..bcb0df4 100644
--- a/libsanitizer/hwasan/hwasan_tag_mismatch_aarch64.S
+++ b/libsanitizer/hwasan/hwasan_tag_mismatch_aarch64.S
@@ -1,4 +1,5 @@
#include "sanitizer_common/sanitizer_asm.h"
+#include "builtins/assembly.h"
// The content of this file is AArch64-only:
#if defined(__aarch64__)
@@ -74,6 +75,8 @@
.global __hwasan_tag_mismatch
.type __hwasan_tag_mismatch, %function
__hwasan_tag_mismatch:
+ BTI_J
+
// Compute the granule position one past the end of the access.
mov x16, #1
and x17, x1, #0xf
@@ -106,6 +109,7 @@ __hwasan_tag_mismatch:
.type __hwasan_tag_mismatch_v2, %function
__hwasan_tag_mismatch_v2:
CFI_STARTPROC
+ BTI_J
// Set the CFA to be the return address for caller of __hwasan_check_*. Note
// that we do not emit CFI predicates to describe the contents of this stack
@@ -150,3 +154,5 @@ __hwasan_tag_mismatch_v2:
// We do not need executable stack.
NO_EXEC_STACK_DIRECTIVE
+
+GNU_PROPERTY_BTI_PAC
diff --git a/libsanitizer/hwasan/hwasan_thread.cpp b/libsanitizer/hwasan/hwasan_thread.cpp
index b81a635..bb4d56a 100644
--- a/libsanitizer/hwasan/hwasan_thread.cpp
+++ b/libsanitizer/hwasan/hwasan_thread.cpp
@@ -35,6 +35,10 @@ void Thread::InitRandomState() {
}
void Thread::Init(uptr stack_buffer_start, uptr stack_buffer_size) {
+ CHECK_EQ(0, unique_id_); // try to catch bad stack reuse
+ CHECK_EQ(0, stack_top_);
+ CHECK_EQ(0, stack_bottom_);
+
static u64 unique_id;
unique_id_ = unique_id++;
if (auto sz = flags()->heap_history_size)
@@ -113,18 +117,21 @@ static u32 xorshift(u32 state) {
}
// Generate a (pseudo-)random non-zero tag.
-tag_t Thread::GenerateRandomTag() {
+tag_t Thread::GenerateRandomTag(uptr num_bits) {
+ DCHECK_GT(num_bits, 0);
if (tagging_disabled_) return 0;
tag_t tag;
+ const uptr tag_mask = (1ULL << num_bits) - 1;
do {
if (flags()->random_tags) {
if (!random_buffer_)
random_buffer_ = random_state_ = xorshift(random_state_);
CHECK(random_buffer_);
- tag = random_buffer_ & 0xFF;
- random_buffer_ >>= 8;
+ tag = random_buffer_ & tag_mask;
+ random_buffer_ >>= num_bits;
} else {
- tag = random_state_ = (random_state_ + 1) & 0xFF;
+ random_state_ += 1;
+ tag = random_state_ & tag_mask;
}
} while (!tag);
return tag;
diff --git a/libsanitizer/hwasan/hwasan_thread.h b/libsanitizer/hwasan/hwasan_thread.h
index ebcdb79..1c71cab 100644
--- a/libsanitizer/hwasan/hwasan_thread.h
+++ b/libsanitizer/hwasan/hwasan_thread.h
@@ -42,7 +42,7 @@ class Thread {
HeapAllocationsRingBuffer *heap_allocations() { return heap_allocations_; }
StackAllocationsRingBuffer *stack_allocations() { return stack_allocations_; }
- tag_t GenerateRandomTag();
+ tag_t GenerateRandomTag(uptr num_bits = kTagBits);
void DisableTagging() { tagging_disabled_++; }
void EnableTagging() { tagging_disabled_--; }
@@ -74,8 +74,6 @@ class Thread {
HeapAllocationsRingBuffer *heap_allocations_;
StackAllocationsRingBuffer *stack_allocations_;
- Thread *next_; // All live threads form a linked list.
-
u64 unique_id_; // counting from zero.
u32 tagging_disabled_; // if non-zero, malloc uses zero tag in this thread.
diff --git a/libsanitizer/hwasan/hwasan_thread_list.h b/libsanitizer/hwasan/hwasan_thread_list.h
index 914b632..11c5863 100644
--- a/libsanitizer/hwasan/hwasan_thread_list.h
+++ b/libsanitizer/hwasan/hwasan_thread_list.h
@@ -66,40 +66,6 @@ static uptr RingBufferSize() {
return 0;
}
-struct ThreadListHead {
- Thread *list_;
-
- ThreadListHead() : list_(nullptr) {}
-
- void Push(Thread *t) {
- t->next_ = list_;
- list_ = t;
- }
-
- Thread *Pop() {
- Thread *t = list_;
- if (t)
- list_ = t->next_;
- return t;
- }
-
- void Remove(Thread *t) {
- Thread **cur = &list_;
- while (*cur != t) cur = &(*cur)->next_;
- CHECK(*cur && "thread not found");
- *cur = (*cur)->next_;
- }
-
- template <class CB>
- void ForEach(CB cb) {
- Thread *t = list_;
- while (t) {
- cb(t);
- t = t->next_;
- }
- }
-};
-
struct ThreadStats {
uptr n_live_threads;
uptr total_stack_size;
@@ -120,17 +86,23 @@ class HwasanThreadList {
}
Thread *CreateCurrentThread() {
- Thread *t;
+ Thread *t = nullptr;
{
- SpinMutexLock l(&list_mutex_);
- t = free_list_.Pop();
- if (t) {
- uptr start = (uptr)t - ring_buffer_size_;
- internal_memset((void *)start, 0, ring_buffer_size_ + sizeof(Thread));
- } else {
- t = AllocThread();
+ SpinMutexLock l(&free_list_mutex_);
+ if (!free_list_.empty()) {
+ t = free_list_.back();
+ free_list_.pop_back();
}
- live_list_.Push(t);
+ }
+ if (t) {
+ uptr start = (uptr)t - ring_buffer_size_;
+ internal_memset((void *)start, 0, ring_buffer_size_ + sizeof(Thread));
+ } else {
+ t = AllocThread();
+ }
+ {
+ SpinMutexLock l(&live_list_mutex_);
+ live_list_.push_back(t);
}
t->Init((uptr)t - ring_buffer_size_, ring_buffer_size_);
AddThreadStats(t);
@@ -142,13 +114,26 @@ class HwasanThreadList {
ReleaseMemoryPagesToOS(start, start + thread_alloc_size_);
}
+ void RemoveThreadFromLiveList(Thread *t) {
+ SpinMutexLock l(&live_list_mutex_);
+ for (Thread *&t2 : live_list_)
+ if (t2 == t) {
+ // To remove t2, copy the last element of the list in t2's position, and
+ // pop_back(). This works even if t2 is itself the last element.
+ t2 = live_list_.back();
+ live_list_.pop_back();
+ return;
+ }
+ CHECK(0 && "thread not found in live list");
+ }
+
void ReleaseThread(Thread *t) {
RemoveThreadStats(t);
t->Destroy();
- SpinMutexLock l(&list_mutex_);
- live_list_.Remove(t);
- free_list_.Push(t);
DontNeedThread(t);
+ RemoveThreadFromLiveList(t);
+ SpinMutexLock l(&free_list_mutex_);
+ free_list_.push_back(t);
}
Thread *GetThreadByBufferAddress(uptr p) {
@@ -165,8 +150,8 @@ class HwasanThreadList {
template <class CB>
void VisitAllLiveThreads(CB cb) {
- SpinMutexLock l(&list_mutex_);
- live_list_.ForEach(cb);
+ SpinMutexLock l(&live_list_mutex_);
+ for (Thread *t : live_list_) cb(t);
}
void AddThreadStats(Thread *t) {
@@ -188,6 +173,7 @@ class HwasanThreadList {
private:
Thread *AllocThread() {
+ SpinMutexLock l(&free_space_mutex_);
uptr align = ring_buffer_size_ * 2;
CHECK(IsAligned(free_space_, align));
Thread *t = (Thread *)(free_space_ + ring_buffer_size_);
@@ -196,14 +182,16 @@ class HwasanThreadList {
return t;
}
+ SpinMutex free_space_mutex_;
uptr free_space_;
uptr free_space_end_;
uptr ring_buffer_size_;
uptr thread_alloc_size_;
- ThreadListHead free_list_;
- ThreadListHead live_list_;
- SpinMutex list_mutex_;
+ SpinMutex free_list_mutex_;
+ InternalMmapVector<Thread *> free_list_;
+ SpinMutex live_list_mutex_;
+ InternalMmapVector<Thread *> live_list_;
ThreadStats stats_;
SpinMutex stats_mutex_;