aboutsummaryrefslogtreecommitdiff
path: root/compiler-rt
diff options
context:
space:
mode:
Diffstat (limited to 'compiler-rt')
-rw-r--r--compiler-rt/CMakeLists.txt2
-rw-r--r--compiler-rt/lib/asan/asan_fake_stack.cpp151
-rw-r--r--compiler-rt/lib/asan/asan_fake_stack.h3
-rw-r--r--compiler-rt/lib/asan/asan_thread.cpp6
-rw-r--r--compiler-rt/lib/asan/asan_thread.h2
-rw-r--r--compiler-rt/lib/asan/tests/CMakeLists.txt14
-rw-r--r--compiler-rt/lib/builtins/cpu_model/x86.c7
7 files changed, 108 insertions, 77 deletions
diff --git a/compiler-rt/CMakeLists.txt b/compiler-rt/CMakeLists.txt
index 9f8e833..5931b60 100644
--- a/compiler-rt/CMakeLists.txt
+++ b/compiler-rt/CMakeLists.txt
@@ -83,6 +83,8 @@ mark_as_advanced(COMPILER_RT_BUILD_ORC)
option(COMPILER_RT_BUILD_GWP_ASAN "Build GWP-ASan, and link it into SCUDO" ON)
mark_as_advanced(COMPILER_RT_BUILD_GWP_ASAN)
option(COMPILER_RT_ENABLE_CET "Build Compiler RT with CET enabled" OFF)
+option(COMPILER_RT_ASAN_UNIT_TESTS_USE_HOST_RUNTIME "Build asan unit tests without depending upon a just-built asan runtime" OFF)
+mark_as_advanced(COMPILER_RT_ASAN_UNIT_TESTS_USE_HOST_RUNTIME)
option(COMPILER_RT_SCUDO_STANDALONE_SYSROOT_PATH "Set custom sysroot for building SCUDO standalone" OFF)
mark_as_advanced(COMPILER_RT_SCUDO_STANDALONE_SYSROOT_PATH)
diff --git a/compiler-rt/lib/asan/asan_fake_stack.cpp b/compiler-rt/lib/asan/asan_fake_stack.cpp
index c3ed252..d3fa953 100644
--- a/compiler-rt/lib/asan/asan_fake_stack.cpp
+++ b/compiler-rt/lib/asan/asan_fake_stack.cpp
@@ -28,7 +28,7 @@ static const u64 kAllocaRedzoneMask = 31UL;
// For small size classes inline PoisonShadow for better performance.
ALWAYS_INLINE void SetShadow(uptr ptr, uptr size, uptr class_id, u64 magic) {
CHECK(AddrIsAlignedByGranularity(ptr + size));
- u64 *shadow = reinterpret_cast<u64*>(MemToShadow(ptr));
+ u64* shadow = reinterpret_cast<u64*>(MemToShadow(ptr));
if (ASAN_SHADOW_SCALE == 3 && class_id <= 6) {
// This code expects ASAN_SHADOW_SCALE=3.
for (uptr i = 0; i < (((uptr)1) << class_id); i++) {
@@ -47,7 +47,7 @@ ALWAYS_INLINE void SetShadow(uptr ptr, uptr size, uptr class_id, u64 magic) {
}
}
-FakeStack *FakeStack::Create(uptr stack_size_log) {
+FakeStack* FakeStack::Create(uptr stack_size_log) {
static uptr kMinStackSizeLog = 16;
static uptr kMaxStackSizeLog = FIRST_32_SECOND_64(24, 28);
if (stack_size_log < kMinStackSizeLog)
@@ -57,7 +57,7 @@ FakeStack *FakeStack::Create(uptr stack_size_log) {
CHECK_LE(kMaxStackFrameSizeLog, stack_size_log);
uptr size = RequiredSize(stack_size_log);
uptr padded_size = size + kMaxStackFrameSize;
- void *true_res = reinterpret_cast<void *>(
+ void* true_res = reinterpret_cast<void*>(
flags()->uar_noreserve ? MmapNoReserveOrDie(padded_size, "FakeStack")
: MmapOrDie(padded_size, "FakeStack"));
// GetFrame() requires the property that
@@ -66,20 +66,20 @@ FakeStack *FakeStack::Create(uptr stack_size_log) {
// We didn't use MmapAlignedOrDieOnFatalError, because it requires that the
// *size* is a power of 2, which is an overly strong condition.
static_assert(alignof(FakeStack) <= kMaxStackFrameSize);
- FakeStack *res = reinterpret_cast<FakeStack *>(
+ FakeStack* res = reinterpret_cast<FakeStack*>(
RoundUpTo(
(uptr)true_res + kFlagsOffset + SizeRequiredForFlags(stack_size_log),
kMaxStackFrameSize) -
kFlagsOffset - SizeRequiredForFlags(stack_size_log));
res->true_start = true_res;
res->stack_size_log_ = stack_size_log;
- u8 *p = reinterpret_cast<u8 *>(res);
+ u8* p = reinterpret_cast<u8*>(res);
VReport(1,
"T%d: FakeStack created: %p -- %p stack_size_log: %zd; "
"mmapped %zdK, noreserve=%d, true_start: %p, start of first frame: "
"0x%zx\n",
- GetCurrentTidOrInvalid(), (void *)p,
- (void *)(p + FakeStack::RequiredSize(stack_size_log)), stack_size_log,
+ GetCurrentTidOrInvalid(), (void*)p,
+ (void*)(p + FakeStack::RequiredSize(stack_size_log)), stack_size_log,
size >> 10, flags()->uar_noreserve, res->true_start,
res->GetFrame(stack_size_log, /*class_id*/ 0, /*pos*/ 0));
return res;
@@ -109,14 +109,14 @@ void FakeStack::PoisonAll(u8 magic) {
#if !defined(_MSC_VER) || defined(__clang__)
ALWAYS_INLINE USED
#endif
-FakeFrame *FakeStack::Allocate(uptr stack_size_log, uptr class_id,
- uptr real_stack) {
+ FakeFrame* FakeStack::Allocate(uptr stack_size_log, uptr class_id,
+ uptr real_stack) {
CHECK_LT(class_id, kNumberOfSizeClasses);
if (needs_gc_)
GC(real_stack);
- uptr &hint_position = hint_position_[class_id];
+ uptr& hint_position = hint_position_[class_id];
const int num_iter = NumberOfFrames(stack_size_log, class_id);
- u8 *flags = GetFlags(stack_size_log, class_id);
+ u8* flags = GetFlags(stack_size_log, class_id);
for (int i = 0; i < num_iter; i++) {
uptr pos = ModuloNumberOfFrames(stack_size_log, class_id, hint_position++);
// This part is tricky. On one hand, checking and setting flags[pos]
@@ -126,22 +126,24 @@ FakeFrame *FakeStack::Allocate(uptr stack_size_log, uptr class_id,
// and so will not touch this particular byte. So, it is safe to do this
// with regular non-atomic load and store (at least I was not able to make
// this code crash).
- if (flags[pos]) continue;
+ if (flags[pos])
+ continue;
flags[pos] = 1;
- FakeFrame *res = reinterpret_cast<FakeFrame *>(
- GetFrame(stack_size_log, class_id, pos));
+ FakeFrame* res =
+ reinterpret_cast<FakeFrame*>(GetFrame(stack_size_log, class_id, pos));
res->real_stack = real_stack;
*SavedFlagPtr(reinterpret_cast<uptr>(res), class_id) = &flags[pos];
return res;
}
- return nullptr; // We are out of fake stack.
+ return nullptr; // We are out of fake stack.
}
-uptr FakeStack::AddrIsInFakeStack(uptr ptr, uptr *frame_beg, uptr *frame_end) {
+uptr FakeStack::AddrIsInFakeStack(uptr ptr, uptr* frame_beg, uptr* frame_end) {
uptr stack_size_log = this->stack_size_log();
uptr beg = reinterpret_cast<uptr>(GetFrame(stack_size_log, 0, 0));
uptr end = reinterpret_cast<uptr>(this) + RequiredSize(stack_size_log);
- if (ptr < beg || ptr >= end) return 0;
+ if (ptr < beg || ptr >= end)
+ return 0;
uptr class_id = (ptr - beg) >> stack_size_log;
uptr base = beg + (class_id << stack_size_log);
CHECK_LE(base, ptr);
@@ -153,9 +155,7 @@ uptr FakeStack::AddrIsInFakeStack(uptr ptr, uptr *frame_beg, uptr *frame_end) {
return res;
}
-void FakeStack::HandleNoReturn() {
- needs_gc_ = true;
-}
+void FakeStack::HandleNoReturn() { needs_gc_ = true; }
// Hack: The statement below is not true if we take into account sigaltstack or
// makecontext. It should be possible to make GC to discard wrong stack frame if
@@ -170,7 +170,7 @@ void FakeStack::HandleNoReturn() {
// We do it based on their 'real_stack' values -- everything that is lower
// than the current real_stack is garbage.
NOINLINE void FakeStack::GC(uptr real_stack) {
- AsanThread *curr_thread = GetCurrentThread();
+ AsanThread* curr_thread = GetCurrentThread();
if (!curr_thread)
return; // Try again when we have a thread.
auto top = curr_thread->stack_top();
@@ -179,12 +179,13 @@ NOINLINE void FakeStack::GC(uptr real_stack) {
return; // Not the default stack.
for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++) {
- u8 *flags = GetFlags(stack_size_log(), class_id);
+ u8* flags = GetFlags(stack_size_log(), class_id);
for (uptr i = 0, n = NumberOfFrames(stack_size_log(), class_id); i < n;
i++) {
- if (flags[i] == 0) continue; // not allocated.
- FakeFrame *ff = reinterpret_cast<FakeFrame *>(
- GetFrame(stack_size_log(), class_id, i));
+ if (flags[i] == 0)
+ continue; // not allocated.
+ FakeFrame* ff =
+ reinterpret_cast<FakeFrame*>(GetFrame(stack_size_log(), class_id, i));
// GC only on the default stack.
if (bottom < ff->real_stack && ff->real_stack < real_stack) {
flags[i] = 0;
@@ -197,14 +198,15 @@ NOINLINE void FakeStack::GC(uptr real_stack) {
needs_gc_ = false;
}
-void FakeStack::ForEachFakeFrame(RangeIteratorCallback callback, void *arg) {
+void FakeStack::ForEachFakeFrame(RangeIteratorCallback callback, void* arg) {
for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++) {
- u8 *flags = GetFlags(stack_size_log(), class_id);
+ u8* flags = GetFlags(stack_size_log(), class_id);
for (uptr i = 0, n = NumberOfFrames(stack_size_log(), class_id); i < n;
i++) {
- if (flags[i] == 0) continue; // not allocated.
- FakeFrame *ff = reinterpret_cast<FakeFrame *>(
- GetFrame(stack_size_log(), class_id, i));
+ if (flags[i] == 0)
+ continue; // not allocated.
+ FakeFrame* ff =
+ reinterpret_cast<FakeFrame*>(GetFrame(stack_size_log(), class_id, i));
uptr begin = reinterpret_cast<uptr>(ff);
callback(begin, begin + FakeStack::BytesInSizeClass(class_id), arg);
}
@@ -212,44 +214,51 @@ void FakeStack::ForEachFakeFrame(RangeIteratorCallback callback, void *arg) {
}
#if (SANITIZER_LINUX && !SANITIZER_ANDROID) || SANITIZER_FUCHSIA
-static THREADLOCAL FakeStack *fake_stack_tls;
+static THREADLOCAL FakeStack* fake_stack_tls;
-FakeStack *GetTLSFakeStack() {
- return fake_stack_tls;
-}
-void SetTLSFakeStack(FakeStack *fs) {
- fake_stack_tls = fs;
-}
+static FakeStack* GetTLSFakeStack() { return fake_stack_tls; }
+static void SetTLSFakeStack(FakeStack* fs) { fake_stack_tls = fs; }
+void ResetTLSFakeStack() { fake_stack_tls = nullptr; }
#else
-FakeStack *GetTLSFakeStack() { return 0; }
-void SetTLSFakeStack(FakeStack *fs) { }
+static FakeStack* GetTLSFakeStack() { return nullptr; }
+static void SetTLSFakeStack(FakeStack*) {}
+void ResetTLSFakeStack() {}
#endif // (SANITIZER_LINUX && !SANITIZER_ANDROID) || SANITIZER_FUCHSIA
-static FakeStack *GetFakeStack() {
- AsanThread *t = GetCurrentThread();
- if (!t) return nullptr;
+static FakeStack* GetFakeStack() {
+ AsanThread* t = GetCurrentThread();
+ if (!t)
+ return nullptr;
return t->get_or_create_fake_stack();
}
-static FakeStack *GetFakeStackFast() {
- if (FakeStack *fs = GetTLSFakeStack())
+static FakeStack* GetFakeStackFast() {
+ FakeStack* fs = GetTLSFakeStack();
+ if (LIKELY(fs))
return fs;
if (!__asan_option_detect_stack_use_after_return)
return nullptr;
- return GetFakeStack();
+ fs = GetFakeStack();
+ if (LIKELY(fs))
+ SetTLSFakeStack(fs);
+ return fs;
}
-static FakeStack *GetFakeStackFastAlways() {
- if (FakeStack *fs = GetTLSFakeStack())
+static FakeStack* GetFakeStackFastAlways() {
+ FakeStack* fs = GetTLSFakeStack();
+ if (LIKELY(fs))
return fs;
- return GetFakeStack();
+ fs = GetFakeStack();
+ if (LIKELY(fs))
+ SetTLSFakeStack(fs);
+ return fs;
}
static ALWAYS_INLINE uptr OnMalloc(uptr class_id, uptr size) {
- FakeStack *fs = GetFakeStackFast();
+ FakeStack* fs = GetFakeStackFast();
if (!fs)
return 0;
- FakeFrame *ff =
+ FakeFrame* ff =
fs->Allocate(fs->stack_size_log(), class_id, GET_CURRENT_FRAME());
if (!ff)
return 0; // Out of fake stack.
@@ -259,10 +268,10 @@ static ALWAYS_INLINE uptr OnMalloc(uptr class_id, uptr size) {
}
static ALWAYS_INLINE uptr OnMallocAlways(uptr class_id, uptr size) {
- FakeStack *fs = GetFakeStackFastAlways();
+ FakeStack* fs = GetFakeStackFastAlways();
if (!fs)
return 0;
- FakeFrame *ff =
+ FakeFrame* ff =
fs->Allocate(fs->stack_size_log(), class_id, GET_CURRENT_FRAME());
if (!ff)
return 0; // Out of fake stack.
@@ -276,17 +285,17 @@ static ALWAYS_INLINE void OnFree(uptr ptr, uptr class_id, uptr size) {
SetShadow(ptr, size, class_id, kMagic8);
}
-} // namespace __asan
+} // namespace __asan
// ---------------------- Interface ---------------- {{{1
using namespace __asan;
#define DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(class_id) \
extern "C" SANITIZER_INTERFACE_ATTRIBUTE uptr \
- __asan_stack_malloc_##class_id(uptr size) { \
+ __asan_stack_malloc_##class_id(uptr size) { \
return OnMalloc(class_id, size); \
} \
extern "C" SANITIZER_INTERFACE_ATTRIBUTE uptr \
- __asan_stack_malloc_always_##class_id(uptr size) { \
+ __asan_stack_malloc_always_##class_id(uptr size) { \
return OnMallocAlways(class_id, size); \
} \
extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __asan_stack_free_##class_id( \
@@ -311,21 +320,25 @@ extern "C" {
// -asan-use-after-return=never, after modal UAR flag lands
// (https://github.com/google/sanitizers/issues/1394)
SANITIZER_INTERFACE_ATTRIBUTE
-void *__asan_get_current_fake_stack() { return GetFakeStackFast(); }
+void* __asan_get_current_fake_stack() { return GetFakeStackFast(); }
SANITIZER_INTERFACE_ATTRIBUTE
-void *__asan_addr_is_in_fake_stack(void *fake_stack, void *addr, void **beg,
- void **end) {
- FakeStack *fs = reinterpret_cast<FakeStack*>(fake_stack);
- if (!fs) return nullptr;
+void* __asan_addr_is_in_fake_stack(void* fake_stack, void* addr, void** beg,
+ void** end) {
+ FakeStack* fs = reinterpret_cast<FakeStack*>(fake_stack);
+ if (!fs)
+ return nullptr;
uptr frame_beg, frame_end;
- FakeFrame *frame = reinterpret_cast<FakeFrame *>(fs->AddrIsInFakeStack(
+ FakeFrame* frame = reinterpret_cast<FakeFrame*>(fs->AddrIsInFakeStack(
reinterpret_cast<uptr>(addr), &frame_beg, &frame_end));
- if (!frame) return nullptr;
+ if (!frame)
+ return nullptr;
if (frame->magic != kCurrentStackFrameMagic)
return nullptr;
- if (beg) *beg = reinterpret_cast<void*>(frame_beg);
- if (end) *end = reinterpret_cast<void*>(frame_end);
+ if (beg)
+ *beg = reinterpret_cast<void*>(frame_beg);
+ if (end)
+ *end = reinterpret_cast<void*>(frame_end);
return reinterpret_cast<void*>(frame->real_stack);
}
@@ -344,9 +357,9 @@ void __asan_alloca_poison(uptr addr, uptr size) {
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_allocas_unpoison(uptr top, uptr bottom) {
- if ((!top) || (top > bottom)) return;
- REAL(memset)
- (reinterpret_cast<void *>(MemToShadow(top)), 0,
- (bottom - top) / ASAN_SHADOW_GRANULARITY);
+ if ((!top) || (top > bottom))
+ return;
+ REAL(memset)(reinterpret_cast<void*>(MemToShadow(top)), 0,
+ (bottom - top) / ASAN_SHADOW_GRANULARITY);
}
-} // extern "C"
+} // extern "C"
diff --git a/compiler-rt/lib/asan/asan_fake_stack.h b/compiler-rt/lib/asan/asan_fake_stack.h
index 50706e6..593c137 100644
--- a/compiler-rt/lib/asan/asan_fake_stack.h
+++ b/compiler-rt/lib/asan/asan_fake_stack.h
@@ -195,8 +195,7 @@ class FakeStack {
void *true_start;
};
-FakeStack *GetTLSFakeStack();
-void SetTLSFakeStack(FakeStack *fs);
+void ResetTLSFakeStack();
} // namespace __asan
diff --git a/compiler-rt/lib/asan/asan_thread.cpp b/compiler-rt/lib/asan/asan_thread.cpp
index 2627ae1..0ed58bb 100644
--- a/compiler-rt/lib/asan/asan_thread.cpp
+++ b/compiler-rt/lib/asan/asan_thread.cpp
@@ -163,7 +163,7 @@ void AsanThread::StartSwitchFiber(FakeStack **fake_stack_save, uptr bottom,
if (fake_stack_save)
*fake_stack_save = fake_stack_;
fake_stack_ = nullptr;
- SetTLSFakeStack(nullptr);
+ ResetTLSFakeStack();
// if fake_stack_save is null, the fiber will die, delete the fakestack
if (!fake_stack_save && current_fake_stack)
current_fake_stack->Destroy(this->tid());
@@ -177,8 +177,8 @@ void AsanThread::FinishSwitchFiber(FakeStack *fake_stack_save, uptr *bottom_old,
}
if (fake_stack_save) {
- SetTLSFakeStack(fake_stack_save);
fake_stack_ = fake_stack_save;
+ ResetTLSFakeStack();
}
if (bottom_old)
@@ -242,7 +242,7 @@ FakeStack *AsanThread::AsyncSignalSafeLazyInitFakeStack() {
Max(stack_size_log, static_cast<uptr>(flags()->min_uar_stack_size_log));
fake_stack_ = FakeStack::Create(stack_size_log);
DCHECK_EQ(GetCurrentThread(), this);
- SetTLSFakeStack(fake_stack_);
+ ResetTLSFakeStack();
return fake_stack_;
}
return nullptr;
diff --git a/compiler-rt/lib/asan/asan_thread.h b/compiler-rt/lib/asan/asan_thread.h
index 12f0cc7..19b7f34 100644
--- a/compiler-rt/lib/asan/asan_thread.h
+++ b/compiler-rt/lib/asan/asan_thread.h
@@ -104,7 +104,7 @@ class AsanThread {
if (!fake_stack_) return;
FakeStack *t = fake_stack_;
fake_stack_ = nullptr;
- SetTLSFakeStack(nullptr);
+ ResetTLSFakeStack();
t->Destroy(tid);
}
diff --git a/compiler-rt/lib/asan/tests/CMakeLists.txt b/compiler-rt/lib/asan/tests/CMakeLists.txt
index 9cd9c97..6d88c96 100644
--- a/compiler-rt/lib/asan/tests/CMakeLists.txt
+++ b/compiler-rt/lib/asan/tests/CMakeLists.txt
@@ -170,11 +170,21 @@ function(add_asan_tests arch test_runtime)
set(CONFIG_NAME ${ARCH_UPPER_CASE}${OS_NAME}Config)
set(CONFIG_NAME_DYNAMIC ${ARCH_UPPER_CASE}${OS_NAME}DynamicConfig)
+ # On some platforms, unit tests can be run against the runtime that shipped
+ # with the host compiler with COMPILER_RT_TEST_STANDALONE_BUILD_LIBS=OFF.
+ # COMPILER_RT_ASAN_UNIT_TESTS_USE_HOST_RUNTIME=ON removes the dependency
+ # on `asan`, allowing the tests to be run independently without
+ # a newly built asan runtime.
+ set(ASAN_UNIT_TEST_DEPS asan)
+ if(COMPILER_RT_ASAN_UNIT_TESTS_USE_HOST_RUNTIME)
+ set(ASAN_UNIT_TEST_DEPS)
+ endif()
+
# Closure to keep the values.
function(generate_asan_tests test_objects test_suite testname)
generate_compiler_rt_tests(${test_objects} ${test_suite} ${testname} ${arch}
COMPILE_DEPS ${ASAN_UNITTEST_HEADERS} ${ASAN_IGNORELIST_FILE}
- DEPS asan
+ DEPS ${ASAN_UNIT_TEST_DEPS}
KIND ${TEST_KIND}
${ARGN}
)
@@ -215,7 +225,7 @@ function(add_asan_tests arch test_runtime)
add_compiler_rt_test(AsanDynamicUnitTests "${dynamic_test_name}" "${arch}"
SUBDIR "${CONFIG_NAME_DYNAMIC}"
OBJECTS ${ASAN_INST_TEST_OBJECTS}
- DEPS asan ${ASAN_INST_TEST_OBJECTS}
+ DEPS ${ASAN_UNIT_TEST_DEPS} ${ASAN_INST_TEST_OBJECTS}
LINK_FLAGS ${ASAN_DYNAMIC_UNITTEST_INSTRUMENTED_LINK_FLAGS} ${TARGET_LINK_FLAGS} ${DYNAMIC_LINK_FLAGS}
)
endif()
diff --git a/compiler-rt/lib/builtins/cpu_model/x86.c b/compiler-rt/lib/builtins/cpu_model/x86.c
index a40675c..d91e13c 100644
--- a/compiler-rt/lib/builtins/cpu_model/x86.c
+++ b/compiler-rt/lib/builtins/cpu_model/x86.c
@@ -520,6 +520,13 @@ static const char *getIntelProcessorTypeAndSubtype(unsigned Family,
*Subtype = INTEL_COREI7_PANTHERLAKE;
break;
+ // Wildcatlake:
+ case 0xd5:
+ CPU = "wildcatlake";
+ *Type = INTEL_COREI7;
+ *Subtype = INTEL_COREI7_PANTHERLAKE;
+ break;
+
// Icelake Xeon:
case 0x6a:
case 0x6c: