aboutsummaryrefslogtreecommitdiff
path: root/libsanitizer/hwasan
diff options
context:
space:
mode:
authorMartin Liska <mliska@suse.cz>2022-08-30 11:45:34 +0200
committerMartin Liska <mliska@suse.cz>2022-08-30 12:53:50 +0200
commit600413c4f3d70392285192fb99634bcbeb97f83f (patch)
tree0586f1cc2feaa4f5a3d632926b08bde261c39786 /libsanitizer/hwasan
parentbdd3547ae4279c14a9db883719c9648ed09dc18a (diff)
downloadgcc-600413c4f3d70392285192fb99634bcbeb97f83f.zip
gcc-600413c4f3d70392285192fb99634bcbeb97f83f.tar.gz
gcc-600413c4f3d70392285192fb99634bcbeb97f83f.tar.bz2
libsanitizer: merge from master (84a71d5259c2682403cdbd8710592410a2f128ab)
Diffstat (limited to 'libsanitizer/hwasan')
-rw-r--r--libsanitizer/hwasan/hwasan.cpp18
-rw-r--r--libsanitizer/hwasan/hwasan.h11
-rw-r--r--libsanitizer/hwasan/hwasan_allocator.h4
-rw-r--r--libsanitizer/hwasan/hwasan_checks.h19
-rw-r--r--libsanitizer/hwasan/hwasan_exceptions.cpp2
-rw-r--r--libsanitizer/hwasan/hwasan_fuchsia.cpp20
-rw-r--r--libsanitizer/hwasan/hwasan_interceptors.cpp45
-rw-r--r--libsanitizer/hwasan/hwasan_interface_internal.h8
-rw-r--r--libsanitizer/hwasan/hwasan_linux.cpp183
-rw-r--r--libsanitizer/hwasan/hwasan_report.cpp13
-rw-r--r--libsanitizer/hwasan/hwasan_setjmp_riscv64.S97
-rw-r--r--libsanitizer/hwasan/hwasan_tag_mismatch_riscv64.S132
12 files changed, 495 insertions, 57 deletions
diff --git a/libsanitizer/hwasan/hwasan.cpp b/libsanitizer/hwasan/hwasan.cpp
index f8725a1..bb946c2 100644
--- a/libsanitizer/hwasan/hwasan.cpp
+++ b/libsanitizer/hwasan/hwasan.cpp
@@ -218,8 +218,8 @@ void HandleTagMismatch(AccessInfo ai, uptr pc, uptr frame, void *uc,
registers_frame);
}
-void HwasanTagMismatch(uptr addr, uptr access_info, uptr *registers_frame,
- size_t outsize) {
+void HwasanTagMismatch(uptr addr, uptr pc, uptr frame, uptr access_info,
+ uptr *registers_frame, size_t outsize) {
__hwasan::AccessInfo ai;
ai.is_store = access_info & 0x10;
ai.is_load = !ai.is_store;
@@ -230,9 +230,7 @@ void HwasanTagMismatch(uptr addr, uptr access_info, uptr *registers_frame,
else
ai.size = 1 << (access_info & 0xf);
- HandleTagMismatch(ai, (uptr)__builtin_return_address(0),
- (uptr)__builtin_frame_address(0), nullptr, registers_frame);
- __builtin_unreachable();
+ HandleTagMismatch(ai, pc, frame, nullptr, registers_frame);
}
Thread *GetCurrentThread() {
@@ -576,6 +574,12 @@ u8 __hwasan_generate_tag() {
return t->GenerateRandomTag();
}
+void __hwasan_add_frame_record(u64 frame_record_info) {
+ Thread *t = GetCurrentThread();
+ if (t)
+ t->stack_allocations()->push(frame_record_info);
+}
+
#if !SANITIZER_SUPPORTS_WEAK_HOOKS
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
@@ -594,7 +598,9 @@ void __sanitizer_print_stack_trace() {
// rest of the mismatch handling code (C++).
void __hwasan_tag_mismatch4(uptr addr, uptr access_info, uptr *registers_frame,
size_t outsize) {
- __hwasan::HwasanTagMismatch(addr, access_info, registers_frame, outsize);
+ __hwasan::HwasanTagMismatch(addr, (uptr)__builtin_return_address(0),
+ (uptr)__builtin_frame_address(0), access_info,
+ registers_frame, outsize);
}
} // extern "C"
diff --git a/libsanitizer/hwasan/hwasan.h b/libsanitizer/hwasan/hwasan.h
index 3cc2fc4..ef4055a 100644
--- a/libsanitizer/hwasan/hwasan.h
+++ b/libsanitizer/hwasan/hwasan.h
@@ -167,8 +167,8 @@ void HandleTagMismatch(AccessInfo ai, uptr pc, uptr frame, void *uc,
// This dispatches to HandleTagMismatch but sets up the AccessInfo, program
// counter, and frame pointer.
-void HwasanTagMismatch(uptr addr, uptr access_info, uptr *registers_frame,
- size_t outsize);
+void HwasanTagMismatch(uptr addr, uptr pc, uptr frame, uptr access_info,
+ uptr *registers_frame, size_t outsize);
} // namespace __hwasan
@@ -181,6 +181,13 @@ typedef unsigned long __hw_sigset_t;
constexpr size_t kHwRegisterBufSize = 22;
# elif defined(__x86_64__)
constexpr size_t kHwRegisterBufSize = 8;
+# elif SANITIZER_RISCV64
+// saving PC, 12 int regs, sp, 12 fp regs
+# ifndef __riscv_float_abi_soft
+constexpr size_t kHwRegisterBufSize = 1 + 12 + 1 + 12;
+# else
+constexpr size_t kHwRegisterBufSize = 1 + 12 + 1;
+# endif
# endif
typedef unsigned long long __hw_register_buf[kHwRegisterBufSize];
struct __hw_jmp_buf_struct {
diff --git a/libsanitizer/hwasan/hwasan_allocator.h b/libsanitizer/hwasan/hwasan_allocator.h
index 35c3d6b..bae53b5 100644
--- a/libsanitizer/hwasan/hwasan_allocator.h
+++ b/libsanitizer/hwasan/hwasan_allocator.h
@@ -24,8 +24,8 @@
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_ring_buffer.h"
-#if !defined(__aarch64__) && !defined(__x86_64__)
-#error Unsupported platform
+#if !defined(__aarch64__) && !defined(__x86_64__) && !(SANITIZER_RISCV64)
+# error Unsupported platform
#endif
namespace __hwasan {
diff --git a/libsanitizer/hwasan/hwasan_checks.h b/libsanitizer/hwasan/hwasan_checks.h
index ab543ea..b0b37d7 100644
--- a/libsanitizer/hwasan/hwasan_checks.h
+++ b/libsanitizer/hwasan/hwasan_checks.h
@@ -36,6 +36,15 @@ __attribute__((always_inline)) static void SigTrap(uptr p) {
"int3\n"
"nopl %c0(%%rax)\n" ::"n"(0x40 + X),
"D"(p));
+#elif SANITIZER_RISCV64
+ // Put pointer into x10
+ // addiw contains immediate of 0x40 + X, where 0x40 is magic number and X
+ // encodes access size
+ register uptr x10 asm("x10") = p;
+ asm volatile(
+ "ebreak\n"
+ "addiw x0, x0, %1\n" ::"r"(x10),
+ "I"(0x40 + X));
#else
// FIXME: not always sigill.
__builtin_trap();
@@ -56,6 +65,14 @@ __attribute__((always_inline)) static void SigTrap(uptr p, uptr size) {
"int3\n"
"nopl %c0(%%rax)\n" ::"n"(0x40 + X),
"D"(p), "S"(size));
+#elif SANITIZER_RISCV64
+ // Put access size into x11
+ register uptr x10 asm("x10") = p;
+ register uptr x11 asm("x11") = size;
+ asm volatile(
+ "ebreak\n"
+ "addiw x0, x0, %2\n" ::"r"(x10),
+ "r"(x11), "I"(0x40 + X));
#else
__builtin_trap();
#endif
@@ -71,7 +88,7 @@ __attribute__((always_inline, nodebug)) static bool PossiblyShortTagMatches(
return false;
if ((ptr & (kShadowAlignment - 1)) + sz > mem_tag)
return false;
-#ifndef __aarch64__
+#if !defined(__aarch64__) && !(SANITIZER_RISCV64)
ptr = UntagAddr(ptr);
#endif
return *(u8 *)(ptr | (kShadowAlignment - 1)) == ptr_tag;
diff --git a/libsanitizer/hwasan/hwasan_exceptions.cpp b/libsanitizer/hwasan/hwasan_exceptions.cpp
index 6ed1da3..c9968a5 100644
--- a/libsanitizer/hwasan/hwasan_exceptions.cpp
+++ b/libsanitizer/hwasan/hwasan_exceptions.cpp
@@ -56,6 +56,8 @@ __hwasan_personality_wrapper(int version, _Unwind_Action actions,
uptr fp = get_gr(context, 6); // rbp
#elif defined(__aarch64__)
uptr fp = get_gr(context, 29); // x29
+#elif SANITIZER_RISCV64
+ uptr fp = get_gr(context, 8); // x8
#else
#error Unsupported architecture
#endif
diff --git a/libsanitizer/hwasan/hwasan_fuchsia.cpp b/libsanitizer/hwasan/hwasan_fuchsia.cpp
index 94e5c5f..967c796 100644
--- a/libsanitizer/hwasan/hwasan_fuchsia.cpp
+++ b/libsanitizer/hwasan/hwasan_fuchsia.cpp
@@ -15,6 +15,9 @@
#include "sanitizer_common/sanitizer_fuchsia.h"
#if SANITIZER_FUCHSIA
+#include <zircon/features.h>
+#include <zircon/syscalls.h>
+
#include "hwasan.h"
#include "hwasan_interface_internal.h"
#include "hwasan_report.h"
@@ -182,9 +185,20 @@ void InstallAtExitHandler() {}
void HwasanInstallAtForkHandler() {}
-// TODO(fxbug.dev/81499): Once we finalize the tagged pointer ABI in zircon, we should come back
-// here and implement the appropriate check that TBI is enabled.
-void InitializeOsSupport() {}
+void InitializeOsSupport() {
+#ifdef __aarch64__
+ uint32_t features = 0;
+ CHECK_EQ(zx_system_get_features(ZX_FEATURE_KIND_ADDRESS_TAGGING, &features),
+ ZX_OK);
+ if (!(features & ZX_ARM64_FEATURE_ADDRESS_TAGGING_TBI) &&
+ flags()->fail_without_syscall_abi) {
+ Printf(
+ "FATAL: HWAddressSanitizer requires "
+ "ZX_ARM64_FEATURE_ADDRESS_TAGGING_TBI.\n");
+ Die();
+ }
+#endif
+}
} // namespace __hwasan
diff --git a/libsanitizer/hwasan/hwasan_interceptors.cpp b/libsanitizer/hwasan/hwasan_interceptors.cpp
index 8dc886e..c67927d 100644
--- a/libsanitizer/hwasan/hwasan_interceptors.cpp
+++ b/libsanitizer/hwasan/hwasan_interceptors.cpp
@@ -75,6 +75,8 @@ InternalLongjmp(__hw_register_buf env, int retval) {
constexpr size_t kSpIndex = 13;
# elif defined(__x86_64__)
constexpr size_t kSpIndex = 6;
+# elif SANITIZER_RISCV64
+ constexpr size_t kSpIndex = 13;
# endif
// Clear all memory tags on the stack between here and where we're going.
@@ -131,6 +133,49 @@ InternalLongjmp(__hw_register_buf env, int retval) {
"cmovnz %1,%%rax;"
"jmp *%%rdx;" ::"r"(env_address),
"r"(retval_tmp));
+# elif SANITIZER_RISCV64
+ register long int retval_tmp asm("x11") = retval;
+ register void *env_address asm("x10") = &env[0];
+ asm volatile(
+ "ld ra, 0<<3(%0);"
+ "ld s0, 1<<3(%0);"
+ "ld s1, 2<<3(%0);"
+ "ld s2, 3<<3(%0);"
+ "ld s3, 4<<3(%0);"
+ "ld s4, 5<<3(%0);"
+ "ld s5, 6<<3(%0);"
+ "ld s6, 7<<3(%0);"
+ "ld s7, 8<<3(%0);"
+ "ld s8, 9<<3(%0);"
+ "ld s9, 10<<3(%0);"
+ "ld s10, 11<<3(%0);"
+ "ld s11, 12<<3(%0);"
+# if __riscv_float_abi_double
+ "fld fs0, 14<<3(%0);"
+ "fld fs1, 15<<3(%0);"
+ "fld fs2, 16<<3(%0);"
+ "fld fs3, 17<<3(%0);"
+ "fld fs4, 18<<3(%0);"
+ "fld fs5, 19<<3(%0);"
+ "fld fs6, 20<<3(%0);"
+ "fld fs7, 21<<3(%0);"
+ "fld fs8, 22<<3(%0);"
+ "fld fs9, 23<<3(%0);"
+ "fld fs10, 24<<3(%0);"
+ "fld fs11, 25<<3(%0);"
+# elif __riscv_float_abi_soft
+# else
+# error "Unsupported case"
+# endif
+ "ld a4, 13<<3(%0);"
+ "mv sp, a4;"
+ // Return the value requested to return through arguments.
+ // This should be in x11 given what we requested above.
+ "seqz a0, %1;"
+ "add a0, a0, %1;"
+ "ret;"
+ : "+r"(env_address)
+ : "r"(retval_tmp));
# endif
}
diff --git a/libsanitizer/hwasan/hwasan_interface_internal.h b/libsanitizer/hwasan/hwasan_interface_internal.h
index ef771ad..d1ecbb5 100644
--- a/libsanitizer/hwasan/hwasan_interface_internal.h
+++ b/libsanitizer/hwasan/hwasan_interface_internal.h
@@ -168,6 +168,14 @@ void __hwasan_thread_exit();
SANITIZER_INTERFACE_ATTRIBUTE
void __hwasan_print_memory_usage();
+// The compiler will generate this when
+// `-hwasan-record-stack-history-with-calls` is added as a flag, which will add
+// frame record information to the stack ring buffer. This is an alternative to
+// the compiler emitting instructions in the prologue for doing the same thing
+// by accessing the ring buffer directly.
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_add_frame_record(u64 frame_record_info);
+
SANITIZER_INTERFACE_ATTRIBUTE
void *__hwasan_memcpy(void *dst, const void *src, uptr size);
SANITIZER_INTERFACE_ATTRIBUTE
diff --git a/libsanitizer/hwasan/hwasan_linux.cpp b/libsanitizer/hwasan/hwasan_linux.cpp
index ba9e236..88ccfde 100644
--- a/libsanitizer/hwasan/hwasan_linux.cpp
+++ b/libsanitizer/hwasan/hwasan_linux.cpp
@@ -110,15 +110,84 @@ static void InitializeShadowBaseAddress(uptr shadow_size_bytes) {
FindDynamicShadowStart(shadow_size_bytes);
}
-void InitializeOsSupport() {
+static void MaybeDieIfNoTaggingAbi(const char *message) {
+ if (!flags()->fail_without_syscall_abi)
+ return;
+ Printf("FATAL: %s\n", message);
+ Die();
+}
+
# define PR_SET_TAGGED_ADDR_CTRL 55
# define PR_GET_TAGGED_ADDR_CTRL 56
# define PR_TAGGED_ADDR_ENABLE (1UL << 0)
+# define ARCH_GET_UNTAG_MASK 0x4001
+# define ARCH_ENABLE_TAGGED_ADDR 0x4002
+# define ARCH_GET_MAX_TAG_BITS 0x4003
+
+static bool CanUseTaggingAbi() {
+# if defined(__x86_64__)
+ unsigned long num_bits = 0;
+ // Check for x86 LAM support. This API is based on a currently unsubmitted
+ // patch to the Linux kernel (as of August 2022) and is thus subject to
+ // change. The patch is here:
+ // https://lore.kernel.org/all/20220815041803.17954-1-kirill.shutemov@linux.intel.com/
+ //
+ // arch_prctl(ARCH_GET_MAX_TAG_BITS, &bits) returns the maximum number of tag
+ // bits the user can request, or zero if LAM is not supported by the hardware.
+ if (internal_iserror(internal_arch_prctl(ARCH_GET_MAX_TAG_BITS,
+ reinterpret_cast<uptr>(&num_bits))))
+ return false;
+ // The platform must provide enough bits for HWASan tags.
+ if (num_bits < kTagBits)
+ return false;
+ return true;
+# else
+ // Check for ARM TBI support.
+ return !internal_iserror(internal_prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0));
+# endif // __x86_64__
+}
+
+static bool EnableTaggingAbi() {
+# if defined(__x86_64__)
+ // Enable x86 LAM tagging for the process.
+ //
+ // arch_prctl(ARCH_ENABLE_TAGGED_ADDR, bits) enables tagging if the number of
+ // tag bits requested by the user does not exceed that provided by the system.
+ // arch_prctl(ARCH_GET_UNTAG_MASK, &mask) returns the mask of significant
+ // address bits. It is ~0ULL if either LAM is disabled for the process or LAM
+ // is not supported by the hardware.
+ if (internal_iserror(internal_arch_prctl(ARCH_ENABLE_TAGGED_ADDR, kTagBits)))
+ return false;
+ unsigned long mask = 0;
+ // Make sure the tag bits are where we expect them to be.
+ if (internal_iserror(internal_arch_prctl(ARCH_GET_UNTAG_MASK,
+ reinterpret_cast<uptr>(&mask))))
+ return false;
+ // @mask has ones for non-tag bits, whereas @kAddressTagMask has ones for tag
+ // bits. Therefore these masks must not overlap.
+ if (mask & kAddressTagMask)
+ return false;
+ return true;
+# else
+ // Enable ARM TBI tagging for the process. If for some reason tagging is not
+ // supported, prctl(PR_SET_TAGGED_ADDR_CTRL, PR_TAGGED_ADDR_ENABLE) returns
+ // -EINVAL.
+ if (internal_iserror(internal_prctl(PR_SET_TAGGED_ADDR_CTRL,
+ PR_TAGGED_ADDR_ENABLE, 0, 0, 0)))
+ return false;
+ // Ensure that TBI is enabled.
+ if (internal_prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0) !=
+ PR_TAGGED_ADDR_ENABLE)
+ return false;
+ return true;
+# endif // __x86_64__
+}
+
+void InitializeOsSupport() {
// Check we're running on a kernel that can use the tagged address ABI.
- int local_errno = 0;
- if (internal_iserror(internal_prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0),
- &local_errno) &&
- local_errno == EINVAL) {
+ bool has_abi = CanUseTaggingAbi();
+
+ if (!has_abi) {
# if SANITIZER_ANDROID || defined(HWASAN_ALIASING_MODE)
// Some older Android kernels have the tagged pointer ABI on
// unconditionally, and hence don't have the tagged-addr prctl while still
@@ -127,46 +196,22 @@ void InitializeOsSupport() {
// case.
return;
# else
- if (flags()->fail_without_syscall_abi) {
- Printf(
- "FATAL: "
- "HWAddressSanitizer requires a kernel with tagged address ABI.\n");
- Die();
- }
+ MaybeDieIfNoTaggingAbi(
+ "HWAddressSanitizer requires a kernel with tagged address ABI.");
# endif
}
- // Turn on the tagged address ABI.
- if ((internal_iserror(internal_prctl(PR_SET_TAGGED_ADDR_CTRL,
- PR_TAGGED_ADDR_ENABLE, 0, 0, 0)) ||
- !internal_prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0))) {
-# if defined(__x86_64__) && !defined(HWASAN_ALIASING_MODE)
- // Try the new prctl API for Intel LAM. The API is based on a currently
- // unsubmitted patch to the Linux kernel (as of May 2021) and is thus
- // subject to change. Patch is here:
- // https://lore.kernel.org/linux-mm/20210205151631.43511-12-kirill.shutemov@linux.intel.com/
- int tag_bits = kTagBits;
- int tag_shift = kAddressTagShift;
- if (!internal_iserror(
- internal_prctl(PR_SET_TAGGED_ADDR_CTRL, PR_TAGGED_ADDR_ENABLE,
- reinterpret_cast<unsigned long>(&tag_bits),
- reinterpret_cast<unsigned long>(&tag_shift), 0))) {
- CHECK_EQ(tag_bits, kTagBits);
- CHECK_EQ(tag_shift, kAddressTagShift);
- return;
- }
-# endif // defined(__x86_64__) && !defined(HWASAN_ALIASING_MODE)
- if (flags()->fail_without_syscall_abi) {
- Printf(
- "FATAL: HWAddressSanitizer failed to enable tagged address syscall "
- "ABI.\nSuggest check `sysctl abi.tagged_addr_disabled` "
- "configuration.\n");
- Die();
- }
- }
-# undef PR_SET_TAGGED_ADDR_CTRL
-# undef PR_GET_TAGGED_ADDR_CTRL
-# undef PR_TAGGED_ADDR_ENABLE
+ if (EnableTaggingAbi())
+ return;
+
+# if SANITIZER_ANDROID
+ MaybeDieIfNoTaggingAbi(
+ "HWAddressSanitizer failed to enable tagged address syscall ABI.\n"
+ "Check the `sysctl abi.tagged_addr_disabled` configuration.");
+# else
+ MaybeDieIfNoTaggingAbi(
+ "HWAddressSanitizer failed to enable tagged address syscall ABI.\n");
+# endif
}
bool InitShadow() {
@@ -358,6 +403,47 @@ static AccessInfo GetAccessInfo(siginfo_t *info, ucontext_t *uc) {
const uptr size =
size_log == 0xf ? uc->uc_mcontext.gregs[REG_RSI] : 1U << size_log;
+# elif SANITIZER_RISCV64
+ // Access type is encoded in the instruction following EBREAK as
+ // ADDI x0, x0, [0x40 + 0xXY]. For Y == 0xF, access size is stored in
+ // X11 register. Access address is always in X10 register.
+ uptr pc = (uptr)uc->uc_mcontext.__gregs[REG_PC];
+ uint8_t byte1 = *((u8 *)(pc + 0));
+ uint8_t byte2 = *((u8 *)(pc + 1));
+ uint8_t byte3 = *((u8 *)(pc + 2));
+ uint8_t byte4 = *((u8 *)(pc + 3));
+ uint32_t ebreak = (byte1 | (byte2 << 8) | (byte3 << 16) | (byte4 << 24));
+ bool isFaultShort = false;
+ bool isEbreak = (ebreak == 0x100073);
+ bool isShortEbreak = false;
+# if defined(__riscv_compressed)
+ isFaultShort = ((ebreak & 0x3) != 0x3);
+ isShortEbreak = ((ebreak & 0xffff) == 0x9002);
+# endif
+ // faulted insn is not ebreak, not our case
+ if (!(isEbreak || isShortEbreak))
+ return AccessInfo{};
+ // advance pc to point after ebreak and reconstruct addi instruction
+ pc += isFaultShort ? 2 : 4;
+ byte1 = *((u8 *)(pc + 0));
+ byte2 = *((u8 *)(pc + 1));
+ byte3 = *((u8 *)(pc + 2));
+ byte4 = *((u8 *)(pc + 3));
+ // reconstruct instruction
+ uint32_t instr = (byte1 | (byte2 << 8) | (byte3 << 16) | (byte4 << 24));
+ // check if this is really 32 bit instruction
+ // code is encoded in top 12 bits, since instruction is supposed to be with
+ // imm
+ const unsigned code = (instr >> 20) & 0xffff;
+ const uptr addr = uc->uc_mcontext.__gregs[10];
+ const bool is_store = code & 0x10;
+ const bool recover = code & 0x20;
+ const unsigned size_log = code & 0xf;
+ if (size_log > 4 && size_log != 0xf)
+ return AccessInfo{}; // Not our case
+ const uptr size =
+ size_log == 0xf ? uc->uc_mcontext.__gregs[11] : 1U << size_log;
+
# else
# error Unsupported architecture
# endif
@@ -376,6 +462,19 @@ static bool HwasanOnSIGTRAP(int signo, siginfo_t *info, ucontext_t *uc) {
# if defined(__aarch64__)
uc->uc_mcontext.pc += 4;
# elif defined(__x86_64__)
+# elif SANITIZER_RISCV64
+ // pc points to EBREAK which is 2 bytes long
+ uint8_t *exception_source = (uint8_t *)(uc->uc_mcontext.__gregs[REG_PC]);
+ uint8_t byte1 = (uint8_t)(*(exception_source + 0));
+ uint8_t byte2 = (uint8_t)(*(exception_source + 1));
+ uint8_t byte3 = (uint8_t)(*(exception_source + 2));
+ uint8_t byte4 = (uint8_t)(*(exception_source + 3));
+ uint32_t faulted = (byte1 | (byte2 << 8) | (byte3 << 16) | (byte4 << 24));
+ bool isFaultShort = false;
+# if defined(__riscv_compressed)
+ isFaultShort = ((faulted & 0x3) != 0x3);
+# endif
+ uc->uc_mcontext.__gregs[REG_PC] += isFaultShort ? 2 : 4;
# else
# error Unsupported architecture
# endif
diff --git a/libsanitizer/hwasan/hwasan_report.cpp b/libsanitizer/hwasan/hwasan_report.cpp
index 66d3d15..fe769589 100644
--- a/libsanitizer/hwasan/hwasan_report.cpp
+++ b/libsanitizer/hwasan/hwasan_report.cpp
@@ -746,7 +746,7 @@ void ReportTagMismatch(StackTrace *stack, uptr tagged_addr, uptr access_size,
}
// See the frame breakdown defined in __hwasan_tag_mismatch (from
-// hwasan_tag_mismatch_aarch64.S).
+// hwasan_tag_mismatch_{aarch64,riscv64}.S).
void ReportRegisters(uptr *frame, uptr pc) {
Printf("Registers where the failure occurred (pc %p):\n", pc);
@@ -754,8 +754,13 @@ void ReportRegisters(uptr *frame, uptr pc) {
// reduce the amount of logcat error messages printed. Each Printf() will
// result in a new logcat line, irrespective of whether a newline is present,
// and so we wish to reduce the number of Printf() calls we have to make.
+#if defined(__aarch64__)
Printf(" x0 %016llx x1 %016llx x2 %016llx x3 %016llx\n",
frame[0], frame[1], frame[2], frame[3]);
+#elif SANITIZER_RISCV64
+ Printf(" sp %016llx x1 %016llx x2 %016llx x3 %016llx\n",
+ reinterpret_cast<u8 *>(frame) + 256, frame[1], frame[2], frame[3]);
+#endif
Printf(" x4 %016llx x5 %016llx x6 %016llx x7 %016llx\n",
frame[4], frame[5], frame[6], frame[7]);
Printf(" x8 %016llx x9 %016llx x10 %016llx x11 %016llx\n",
@@ -770,8 +775,14 @@ void ReportRegisters(uptr *frame, uptr pc) {
frame[24], frame[25], frame[26], frame[27]);
// hwasan_check* reduces the stack pointer by 256, then __hwasan_tag_mismatch
// passes it to this function.
+#if defined(__aarch64__)
Printf(" x28 %016llx x29 %016llx x30 %016llx sp %016llx\n", frame[28],
frame[29], frame[30], reinterpret_cast<u8 *>(frame) + 256);
+#elif SANITIZER_RISCV64
+ Printf(" x28 %016llx x29 %016llx x30 %016llx x31 %016llx\n", frame[28],
+ frame[29], frame[30], frame[31]);
+#else
+#endif
}
} // namespace __hwasan
diff --git a/libsanitizer/hwasan/hwasan_setjmp_riscv64.S b/libsanitizer/hwasan/hwasan_setjmp_riscv64.S
new file mode 100644
index 0000000..f33c491
--- /dev/null
+++ b/libsanitizer/hwasan/hwasan_setjmp_riscv64.S
@@ -0,0 +1,97 @@
+//===-- hwasan_setjmp_riscv64.S -------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of HWAddressSanitizer.
+// setjmp interceptor for risc-v.
+// HWAddressSanitizer runtime.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_asm.h"
+#include "builtins/assembly.h"
+
+#if HWASAN_WITH_INTERCEPTORS && defined(__riscv) && (__riscv_xlen == 64)
+#include "sanitizer_common/sanitizer_platform.h"
+
+// We want to save the context of the calling function.
+// That requires
+// 1) No modification of the link register by this function.
+// 2) No modification of the stack pointer by this function.
+// 3) (no modification of any other saved register, but that's not really going
+// to occur, and hence isn't as much of a worry).
+//
+// There's essentially no way to ensure that the compiler will not modify the
+// stack pointer when compiling a C function.
+// Hence we have to write this function in assembly.
+
+.section .text
+.file "hwasan_setjmp_riscv64.S"
+
+.global __interceptor_setjmp
+ASM_TYPE_FUNCTION(__interceptor_setjmp)
+__interceptor_setjmp:
+ CFI_STARTPROC
+ addi x11, x0, 0
+ j __interceptor_sigsetjmp
+ CFI_ENDPROC
+ASM_SIZE(__interceptor_setjmp)
+
+.global __interceptor_sigsetjmp
+ASM_TYPE_FUNCTION(__interceptor_sigsetjmp)
+__interceptor_sigsetjmp:
+ CFI_STARTPROC
+ sd ra, 0<<3(x10)
+ sd s0, 1<<3(x10)
+ sd s1, 2<<3(x10)
+ sd s2, 3<<3(x10)
+ sd s3, 4<<3(x10)
+ sd s4, 5<<3(x10)
+ sd s5, 6<<3(x10)
+ sd s6, 7<<3(x10)
+ sd s7, 8<<3(x10)
+ sd s8, 9<<3(x10)
+ sd s9, 10<<3(x10)
+ sd s10, 11<<3(x10)
+ sd s11, 12<<3(x10)
+ sd sp, 13<<3(x10)
+#if __riscv_float_abi_double
+ fsd fs0, 14<<3(x10)
+ fsd fs1, 15<<3(x10)
+ fsd fs2, 16<<3(x10)
+ fsd fs3, 17<<3(x10)
+ fsd fs4, 18<<3(x10)
+ fsd fs5, 19<<3(x10)
+ fsd fs6, 20<<3(x10)
+ fsd fs7, 21<<3(x10)
+ fsd fs8, 22<<3(x10)
+ fsd fs9, 23<<3(x10)
+ fsd fs10, 24<<3(x10)
+ fsd fs11, 25<<3(x10)
+#elif __riscv_float_abi_soft
+#else
+# error "Unsupported case"
+#endif
+ // We always have the second argument to __sigjmp_save (savemask) set, since
+ // the _setjmp function above has set it for us as `false`.
+ // This function is defined in hwasan_interceptors.cc
+ tail __sigjmp_save
+ CFI_ENDPROC
+ASM_SIZE(__interceptor_sigsetjmp)
+
+
+.macro WEAK_ALIAS first second
+ .weak \second
+ .equ \second\(), \first
+.endm
+
+WEAK_ALIAS __interceptor_sigsetjmp, __sigsetjmp
+
+WEAK_ALIAS __interceptor_setjmp, _setjmp
+#endif
+
+// We do not need executable stack.
+NO_EXEC_STACK_DIRECTIVE
diff --git a/libsanitizer/hwasan/hwasan_tag_mismatch_riscv64.S b/libsanitizer/hwasan/hwasan_tag_mismatch_riscv64.S
new file mode 100644
index 0000000..487a042
--- /dev/null
+++ b/libsanitizer/hwasan/hwasan_tag_mismatch_riscv64.S
@@ -0,0 +1,132 @@
+#include "sanitizer_common/sanitizer_asm.h"
+
+// The content of this file is RISCV64-only:
+#if defined(__riscv) && (__riscv_xlen == 64)
+
+// The responsibility of the HWASan entry point in compiler-rt is to primarily
+// readjust the stack from the callee and save the current register values to
+// the stack.
+// This entry point function should be called from a __hwasan_check_* symbol.
+// These are generated during a lowering pass in the backend, and are found in
+// RISCVAsmPrinter::EmitHwasanMemaccessSymbols(). Please look there for
+// further information.
+// The __hwasan_check_* caller of this function should have expanded the stack
+// and saved the previous values of x10(arg0), x11(arg1), x1(ra), and x8(fp).
+// This function will "consume" these saved values and treats it as part of its
+// own stack frame. In this sense, the __hwasan_check_* callee and this function
+// "share" a stack frame. This allows us to omit having unwinding information
+// (.cfi_*) present in every __hwasan_check_* function, therefore reducing binary size.
+// This is particularly important as hwasan_check_* instances are duplicated in every
+// translation unit where HWASan is enabled.
+// This function calls HwasanTagMismatch to step back into the C++ code that
+// completes the stack unwinding and error printing. This function is is not
+// permitted to return.
+
+
+// | ... |
+// | ... |
+// | Previous stack frames... |
+// +=================================+
+// | ... |
+// | |
+// | Stack frame space for x12 - x31.|
+// | |
+// | ... |
+// +---------------------------------+ <-- [SP + 96]
+// | Saved x11(arg1), as |
+// | __hwasan_check_* clobbers it. |
+// +---------------------------------+ <-- [SP + 88]
+// | Saved x10(arg0), as |
+// | __hwasan_check_* clobbers it. |
+// +---------------------------------+ <-- [SP + 80]
+// | |
+// | Stack frame space for x9. |
+// +---------------------------------+ <-- [SP + 72]
+// | |
+// | Saved x8(fp), as |
+// | __hwasan_check_* clobbers it. |
+// +---------------------------------+ <-- [SP + 64]
+// | ... |
+// | |
+// | Stack frame space for x2 - x7. |
+// | |
+// | ... |
+// +---------------------------------+ <-- [SP + 16]
+// | Return address (x1) for caller |
+// | of __hwasan_check_*. |
+// +---------------------------------+ <-- [SP + 8]
+// | Reserved place for x0, possibly |
+// | junk, since we don't save it. |
+// +---------------------------------+ <-- [x2 / SP]
+
+// This function takes two arguments:
+// * x10/a0: The data address.
+// * x11/a1: The encoded access info for the failing access.
+
+.section .text
+.file "hwasan_tag_mismatch_riscv64.S"
+
+.global __hwasan_tag_mismatch_v2
+ASM_TYPE_FUNCTION(__hwasan_tag_mismatch_v2)
+__hwasan_tag_mismatch_v2:
+ CFI_STARTPROC
+
+ // Set the CFA to be the return address for caller of __hwasan_check_*. Note
+ // that we do not emit CFI predicates to describe the contents of this stack
+ // frame, as this proxy entry point should never be debugged. The contents
+ // are static and are handled by the unwinder after calling
+ // __hwasan_tag_mismatch. The frame pointer is already correctly setup
+ // by __hwasan_check_*.
+ addi fp, sp, 256
+ CFI_DEF_CFA(fp, 0)
+ CFI_OFFSET(ra, -248)
+ CFI_OFFSET(fp, -192)
+
+ // Save the rest of the registers into the preallocated space left by
+ // __hwasan_check.
+ sd x31, 248(sp)
+ sd x30, 240(sp)
+ sd x29, 232(sp)
+ sd x28, 224(sp)
+ sd x27, 216(sp)
+ sd x26, 208(sp)
+ sd x25, 200(sp)
+ sd x24, 192(sp)
+ sd x23, 184(sp)
+ sd x22, 176(sp)
+ sd x21, 168(sp)
+ sd x20, 160(sp)
+ sd x19, 152(sp)
+ sd x18, 144(sp)
+ sd x17, 136(sp)
+ sd x16, 128(sp)
+ sd x15, 120(sp)
+ sd x14, 112(sp)
+ sd x13, 104(sp)
+ sd x12, 96(sp)
+ // sd x11, 88(sp) ; already saved
+ // sd x10, 80(sp) ; already saved
+ sd x9, 72(sp)
+ // sd x8, 64(sp) ; already saved
+ sd x7, 56(sp)
+ sd x6, 48(sp)
+ sd x5, 40(sp)
+ sd x4, 32(sp)
+ sd x3, 24(sp)
+ sd x2, 16(sp)
+ // sd x1, 8(sp) ; already saved
+ // sd x0, 0(sp) ; don't store zero register
+
+ // Pass the address of the frame to __hwasan_tag_mismatch4, so that it can
+ // extract the saved registers from this frame without having to worry about
+ // finding this frame.
+ mv x12, sp
+
+ call __hwasan_tag_mismatch4
+ CFI_ENDPROC
+ASM_SIZE(__hwasan_tag_mismatch_v2)
+
+#endif // defined(__riscv) && (__riscv_xlen == 64)
+
+// We do not need executable stack.
+NO_EXEC_STACK_DIRECTIVE