aboutsummaryrefslogtreecommitdiff
path: root/compiler-rt
diff options
context:
space:
mode:
Diffstat (limited to 'compiler-rt')
-rw-r--r--compiler-rt/CMakeLists.txt2
-rw-r--r--compiler-rt/lib/asan/asan_fake_stack.cpp151
-rw-r--r--compiler-rt/lib/asan/asan_fake_stack.h3
-rw-r--r--compiler-rt/lib/asan/asan_thread.cpp6
-rw-r--r--compiler-rt/lib/asan/asan_thread.h2
-rw-r--r--compiler-rt/lib/asan/tests/CMakeLists.txt14
-rw-r--r--compiler-rt/lib/builtins/cpu_model/x86.c7
-rw-r--r--compiler-rt/lib/msan/msan.h1
-rw-r--r--compiler-rt/lib/msan/msan_allocator.cpp44
-rw-r--r--compiler-rt/lib/msan/msan_report.cpp4
-rw-r--r--compiler-rt/test/msan/allocator_padding.cpp94
-rw-r--r--compiler-rt/test/msan/zero_alloc.cpp11
12 files changed, 253 insertions, 86 deletions
diff --git a/compiler-rt/CMakeLists.txt b/compiler-rt/CMakeLists.txt
index 9f8e833..5931b60 100644
--- a/compiler-rt/CMakeLists.txt
+++ b/compiler-rt/CMakeLists.txt
@@ -83,6 +83,8 @@ mark_as_advanced(COMPILER_RT_BUILD_ORC)
option(COMPILER_RT_BUILD_GWP_ASAN "Build GWP-ASan, and link it into SCUDO" ON)
mark_as_advanced(COMPILER_RT_BUILD_GWP_ASAN)
option(COMPILER_RT_ENABLE_CET "Build Compiler RT with CET enabled" OFF)
+option(COMPILER_RT_ASAN_UNIT_TESTS_USE_HOST_RUNTIME "Build asan unit tests without depending upon a just-built asan runtime" OFF)
+mark_as_advanced(COMPILER_RT_ASAN_UNIT_TESTS_USE_HOST_RUNTIME)
option(COMPILER_RT_SCUDO_STANDALONE_SYSROOT_PATH "Set custom sysroot for building SCUDO standalone" OFF)
mark_as_advanced(COMPILER_RT_SCUDO_STANDALONE_SYSROOT_PATH)
diff --git a/compiler-rt/lib/asan/asan_fake_stack.cpp b/compiler-rt/lib/asan/asan_fake_stack.cpp
index c3ed252..d3fa953 100644
--- a/compiler-rt/lib/asan/asan_fake_stack.cpp
+++ b/compiler-rt/lib/asan/asan_fake_stack.cpp
@@ -28,7 +28,7 @@ static const u64 kAllocaRedzoneMask = 31UL;
// For small size classes inline PoisonShadow for better performance.
ALWAYS_INLINE void SetShadow(uptr ptr, uptr size, uptr class_id, u64 magic) {
CHECK(AddrIsAlignedByGranularity(ptr + size));
- u64 *shadow = reinterpret_cast<u64*>(MemToShadow(ptr));
+ u64* shadow = reinterpret_cast<u64*>(MemToShadow(ptr));
if (ASAN_SHADOW_SCALE == 3 && class_id <= 6) {
// This code expects ASAN_SHADOW_SCALE=3.
for (uptr i = 0; i < (((uptr)1) << class_id); i++) {
@@ -47,7 +47,7 @@ ALWAYS_INLINE void SetShadow(uptr ptr, uptr size, uptr class_id, u64 magic) {
}
}
-FakeStack *FakeStack::Create(uptr stack_size_log) {
+FakeStack* FakeStack::Create(uptr stack_size_log) {
static uptr kMinStackSizeLog = 16;
static uptr kMaxStackSizeLog = FIRST_32_SECOND_64(24, 28);
if (stack_size_log < kMinStackSizeLog)
@@ -57,7 +57,7 @@ FakeStack *FakeStack::Create(uptr stack_size_log) {
CHECK_LE(kMaxStackFrameSizeLog, stack_size_log);
uptr size = RequiredSize(stack_size_log);
uptr padded_size = size + kMaxStackFrameSize;
- void *true_res = reinterpret_cast<void *>(
+ void* true_res = reinterpret_cast<void*>(
flags()->uar_noreserve ? MmapNoReserveOrDie(padded_size, "FakeStack")
: MmapOrDie(padded_size, "FakeStack"));
// GetFrame() requires the property that
@@ -66,20 +66,20 @@ FakeStack *FakeStack::Create(uptr stack_size_log) {
// We didn't use MmapAlignedOrDieOnFatalError, because it requires that the
// *size* is a power of 2, which is an overly strong condition.
static_assert(alignof(FakeStack) <= kMaxStackFrameSize);
- FakeStack *res = reinterpret_cast<FakeStack *>(
+ FakeStack* res = reinterpret_cast<FakeStack*>(
RoundUpTo(
(uptr)true_res + kFlagsOffset + SizeRequiredForFlags(stack_size_log),
kMaxStackFrameSize) -
kFlagsOffset - SizeRequiredForFlags(stack_size_log));
res->true_start = true_res;
res->stack_size_log_ = stack_size_log;
- u8 *p = reinterpret_cast<u8 *>(res);
+ u8* p = reinterpret_cast<u8*>(res);
VReport(1,
"T%d: FakeStack created: %p -- %p stack_size_log: %zd; "
"mmapped %zdK, noreserve=%d, true_start: %p, start of first frame: "
"0x%zx\n",
- GetCurrentTidOrInvalid(), (void *)p,
- (void *)(p + FakeStack::RequiredSize(stack_size_log)), stack_size_log,
+ GetCurrentTidOrInvalid(), (void*)p,
+ (void*)(p + FakeStack::RequiredSize(stack_size_log)), stack_size_log,
size >> 10, flags()->uar_noreserve, res->true_start,
res->GetFrame(stack_size_log, /*class_id*/ 0, /*pos*/ 0));
return res;
@@ -109,14 +109,14 @@ void FakeStack::PoisonAll(u8 magic) {
#if !defined(_MSC_VER) || defined(__clang__)
ALWAYS_INLINE USED
#endif
-FakeFrame *FakeStack::Allocate(uptr stack_size_log, uptr class_id,
- uptr real_stack) {
+ FakeFrame* FakeStack::Allocate(uptr stack_size_log, uptr class_id,
+ uptr real_stack) {
CHECK_LT(class_id, kNumberOfSizeClasses);
if (needs_gc_)
GC(real_stack);
- uptr &hint_position = hint_position_[class_id];
+ uptr& hint_position = hint_position_[class_id];
const int num_iter = NumberOfFrames(stack_size_log, class_id);
- u8 *flags = GetFlags(stack_size_log, class_id);
+ u8* flags = GetFlags(stack_size_log, class_id);
for (int i = 0; i < num_iter; i++) {
uptr pos = ModuloNumberOfFrames(stack_size_log, class_id, hint_position++);
// This part is tricky. On one hand, checking and setting flags[pos]
@@ -126,22 +126,24 @@ FakeFrame *FakeStack::Allocate(uptr stack_size_log, uptr class_id,
// and so will not touch this particular byte. So, it is safe to do this
// with regular non-atomic load and store (at least I was not able to make
// this code crash).
- if (flags[pos]) continue;
+ if (flags[pos])
+ continue;
flags[pos] = 1;
- FakeFrame *res = reinterpret_cast<FakeFrame *>(
- GetFrame(stack_size_log, class_id, pos));
+ FakeFrame* res =
+ reinterpret_cast<FakeFrame*>(GetFrame(stack_size_log, class_id, pos));
res->real_stack = real_stack;
*SavedFlagPtr(reinterpret_cast<uptr>(res), class_id) = &flags[pos];
return res;
}
- return nullptr; // We are out of fake stack.
+ return nullptr; // We are out of fake stack.
}
-uptr FakeStack::AddrIsInFakeStack(uptr ptr, uptr *frame_beg, uptr *frame_end) {
+uptr FakeStack::AddrIsInFakeStack(uptr ptr, uptr* frame_beg, uptr* frame_end) {
uptr stack_size_log = this->stack_size_log();
uptr beg = reinterpret_cast<uptr>(GetFrame(stack_size_log, 0, 0));
uptr end = reinterpret_cast<uptr>(this) + RequiredSize(stack_size_log);
- if (ptr < beg || ptr >= end) return 0;
+ if (ptr < beg || ptr >= end)
+ return 0;
uptr class_id = (ptr - beg) >> stack_size_log;
uptr base = beg + (class_id << stack_size_log);
CHECK_LE(base, ptr);
@@ -153,9 +155,7 @@ uptr FakeStack::AddrIsInFakeStack(uptr ptr, uptr *frame_beg, uptr *frame_end) {
return res;
}
-void FakeStack::HandleNoReturn() {
- needs_gc_ = true;
-}
+void FakeStack::HandleNoReturn() { needs_gc_ = true; }
// Hack: The statement below is not true if we take into account sigaltstack or
// makecontext. It should be possible to make GC to discard wrong stack frame if
@@ -170,7 +170,7 @@ void FakeStack::HandleNoReturn() {
// We do it based on their 'real_stack' values -- everything that is lower
// than the current real_stack is garbage.
NOINLINE void FakeStack::GC(uptr real_stack) {
- AsanThread *curr_thread = GetCurrentThread();
+ AsanThread* curr_thread = GetCurrentThread();
if (!curr_thread)
return; // Try again when we have a thread.
auto top = curr_thread->stack_top();
@@ -179,12 +179,13 @@ NOINLINE void FakeStack::GC(uptr real_stack) {
return; // Not the default stack.
for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++) {
- u8 *flags = GetFlags(stack_size_log(), class_id);
+ u8* flags = GetFlags(stack_size_log(), class_id);
for (uptr i = 0, n = NumberOfFrames(stack_size_log(), class_id); i < n;
i++) {
- if (flags[i] == 0) continue; // not allocated.
- FakeFrame *ff = reinterpret_cast<FakeFrame *>(
- GetFrame(stack_size_log(), class_id, i));
+ if (flags[i] == 0)
+ continue; // not allocated.
+ FakeFrame* ff =
+ reinterpret_cast<FakeFrame*>(GetFrame(stack_size_log(), class_id, i));
// GC only on the default stack.
if (bottom < ff->real_stack && ff->real_stack < real_stack) {
flags[i] = 0;
@@ -197,14 +198,15 @@ NOINLINE void FakeStack::GC(uptr real_stack) {
needs_gc_ = false;
}
-void FakeStack::ForEachFakeFrame(RangeIteratorCallback callback, void *arg) {
+void FakeStack::ForEachFakeFrame(RangeIteratorCallback callback, void* arg) {
for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++) {
- u8 *flags = GetFlags(stack_size_log(), class_id);
+ u8* flags = GetFlags(stack_size_log(), class_id);
for (uptr i = 0, n = NumberOfFrames(stack_size_log(), class_id); i < n;
i++) {
- if (flags[i] == 0) continue; // not allocated.
- FakeFrame *ff = reinterpret_cast<FakeFrame *>(
- GetFrame(stack_size_log(), class_id, i));
+ if (flags[i] == 0)
+ continue; // not allocated.
+ FakeFrame* ff =
+ reinterpret_cast<FakeFrame*>(GetFrame(stack_size_log(), class_id, i));
uptr begin = reinterpret_cast<uptr>(ff);
callback(begin, begin + FakeStack::BytesInSizeClass(class_id), arg);
}
@@ -212,44 +214,51 @@ void FakeStack::ForEachFakeFrame(RangeIteratorCallback callback, void *arg) {
}
#if (SANITIZER_LINUX && !SANITIZER_ANDROID) || SANITIZER_FUCHSIA
-static THREADLOCAL FakeStack *fake_stack_tls;
+static THREADLOCAL FakeStack* fake_stack_tls;
-FakeStack *GetTLSFakeStack() {
- return fake_stack_tls;
-}
-void SetTLSFakeStack(FakeStack *fs) {
- fake_stack_tls = fs;
-}
+static FakeStack* GetTLSFakeStack() { return fake_stack_tls; }
+static void SetTLSFakeStack(FakeStack* fs) { fake_stack_tls = fs; }
+void ResetTLSFakeStack() { fake_stack_tls = nullptr; }
#else
-FakeStack *GetTLSFakeStack() { return 0; }
-void SetTLSFakeStack(FakeStack *fs) { }
+static FakeStack* GetTLSFakeStack() { return nullptr; }
+static void SetTLSFakeStack(FakeStack*) {}
+void ResetTLSFakeStack() {}
#endif // (SANITIZER_LINUX && !SANITIZER_ANDROID) || SANITIZER_FUCHSIA
-static FakeStack *GetFakeStack() {
- AsanThread *t = GetCurrentThread();
- if (!t) return nullptr;
+static FakeStack* GetFakeStack() {
+ AsanThread* t = GetCurrentThread();
+ if (!t)
+ return nullptr;
return t->get_or_create_fake_stack();
}
-static FakeStack *GetFakeStackFast() {
- if (FakeStack *fs = GetTLSFakeStack())
+static FakeStack* GetFakeStackFast() {
+ FakeStack* fs = GetTLSFakeStack();
+ if (LIKELY(fs))
return fs;
if (!__asan_option_detect_stack_use_after_return)
return nullptr;
- return GetFakeStack();
+ fs = GetFakeStack();
+ if (LIKELY(fs))
+ SetTLSFakeStack(fs);
+ return fs;
}
-static FakeStack *GetFakeStackFastAlways() {
- if (FakeStack *fs = GetTLSFakeStack())
+static FakeStack* GetFakeStackFastAlways() {
+ FakeStack* fs = GetTLSFakeStack();
+ if (LIKELY(fs))
return fs;
- return GetFakeStack();
+ fs = GetFakeStack();
+ if (LIKELY(fs))
+ SetTLSFakeStack(fs);
+ return fs;
}
static ALWAYS_INLINE uptr OnMalloc(uptr class_id, uptr size) {
- FakeStack *fs = GetFakeStackFast();
+ FakeStack* fs = GetFakeStackFast();
if (!fs)
return 0;
- FakeFrame *ff =
+ FakeFrame* ff =
fs->Allocate(fs->stack_size_log(), class_id, GET_CURRENT_FRAME());
if (!ff)
return 0; // Out of fake stack.
@@ -259,10 +268,10 @@ static ALWAYS_INLINE uptr OnMalloc(uptr class_id, uptr size) {
}
static ALWAYS_INLINE uptr OnMallocAlways(uptr class_id, uptr size) {
- FakeStack *fs = GetFakeStackFastAlways();
+ FakeStack* fs = GetFakeStackFastAlways();
if (!fs)
return 0;
- FakeFrame *ff =
+ FakeFrame* ff =
fs->Allocate(fs->stack_size_log(), class_id, GET_CURRENT_FRAME());
if (!ff)
return 0; // Out of fake stack.
@@ -276,17 +285,17 @@ static ALWAYS_INLINE void OnFree(uptr ptr, uptr class_id, uptr size) {
SetShadow(ptr, size, class_id, kMagic8);
}
-} // namespace __asan
+} // namespace __asan
// ---------------------- Interface ---------------- {{{1
using namespace __asan;
#define DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(class_id) \
extern "C" SANITIZER_INTERFACE_ATTRIBUTE uptr \
- __asan_stack_malloc_##class_id(uptr size) { \
+ __asan_stack_malloc_##class_id(uptr size) { \
return OnMalloc(class_id, size); \
} \
extern "C" SANITIZER_INTERFACE_ATTRIBUTE uptr \
- __asan_stack_malloc_always_##class_id(uptr size) { \
+ __asan_stack_malloc_always_##class_id(uptr size) { \
return OnMallocAlways(class_id, size); \
} \
extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __asan_stack_free_##class_id( \
@@ -311,21 +320,25 @@ extern "C" {
// -asan-use-after-return=never, after modal UAR flag lands
// (https://github.com/google/sanitizers/issues/1394)
SANITIZER_INTERFACE_ATTRIBUTE
-void *__asan_get_current_fake_stack() { return GetFakeStackFast(); }
+void* __asan_get_current_fake_stack() { return GetFakeStackFast(); }
SANITIZER_INTERFACE_ATTRIBUTE
-void *__asan_addr_is_in_fake_stack(void *fake_stack, void *addr, void **beg,
- void **end) {
- FakeStack *fs = reinterpret_cast<FakeStack*>(fake_stack);
- if (!fs) return nullptr;
+void* __asan_addr_is_in_fake_stack(void* fake_stack, void* addr, void** beg,
+ void** end) {
+ FakeStack* fs = reinterpret_cast<FakeStack*>(fake_stack);
+ if (!fs)
+ return nullptr;
uptr frame_beg, frame_end;
- FakeFrame *frame = reinterpret_cast<FakeFrame *>(fs->AddrIsInFakeStack(
+ FakeFrame* frame = reinterpret_cast<FakeFrame*>(fs->AddrIsInFakeStack(
reinterpret_cast<uptr>(addr), &frame_beg, &frame_end));
- if (!frame) return nullptr;
+ if (!frame)
+ return nullptr;
if (frame->magic != kCurrentStackFrameMagic)
return nullptr;
- if (beg) *beg = reinterpret_cast<void*>(frame_beg);
- if (end) *end = reinterpret_cast<void*>(frame_end);
+ if (beg)
+ *beg = reinterpret_cast<void*>(frame_beg);
+ if (end)
+ *end = reinterpret_cast<void*>(frame_end);
return reinterpret_cast<void*>(frame->real_stack);
}
@@ -344,9 +357,9 @@ void __asan_alloca_poison(uptr addr, uptr size) {
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_allocas_unpoison(uptr top, uptr bottom) {
- if ((!top) || (top > bottom)) return;
- REAL(memset)
- (reinterpret_cast<void *>(MemToShadow(top)), 0,
- (bottom - top) / ASAN_SHADOW_GRANULARITY);
+ if ((!top) || (top > bottom))
+ return;
+ REAL(memset)(reinterpret_cast<void*>(MemToShadow(top)), 0,
+ (bottom - top) / ASAN_SHADOW_GRANULARITY);
}
-} // extern "C"
+} // extern "C"
diff --git a/compiler-rt/lib/asan/asan_fake_stack.h b/compiler-rt/lib/asan/asan_fake_stack.h
index 50706e6..593c137 100644
--- a/compiler-rt/lib/asan/asan_fake_stack.h
+++ b/compiler-rt/lib/asan/asan_fake_stack.h
@@ -195,8 +195,7 @@ class FakeStack {
void *true_start;
};
-FakeStack *GetTLSFakeStack();
-void SetTLSFakeStack(FakeStack *fs);
+void ResetTLSFakeStack();
} // namespace __asan
diff --git a/compiler-rt/lib/asan/asan_thread.cpp b/compiler-rt/lib/asan/asan_thread.cpp
index 2627ae1..0ed58bb 100644
--- a/compiler-rt/lib/asan/asan_thread.cpp
+++ b/compiler-rt/lib/asan/asan_thread.cpp
@@ -163,7 +163,7 @@ void AsanThread::StartSwitchFiber(FakeStack **fake_stack_save, uptr bottom,
if (fake_stack_save)
*fake_stack_save = fake_stack_;
fake_stack_ = nullptr;
- SetTLSFakeStack(nullptr);
+ ResetTLSFakeStack();
// if fake_stack_save is null, the fiber will die, delete the fakestack
if (!fake_stack_save && current_fake_stack)
current_fake_stack->Destroy(this->tid());
@@ -177,8 +177,8 @@ void AsanThread::FinishSwitchFiber(FakeStack *fake_stack_save, uptr *bottom_old,
}
if (fake_stack_save) {
- SetTLSFakeStack(fake_stack_save);
fake_stack_ = fake_stack_save;
+ ResetTLSFakeStack();
}
if (bottom_old)
@@ -242,7 +242,7 @@ FakeStack *AsanThread::AsyncSignalSafeLazyInitFakeStack() {
Max(stack_size_log, static_cast<uptr>(flags()->min_uar_stack_size_log));
fake_stack_ = FakeStack::Create(stack_size_log);
DCHECK_EQ(GetCurrentThread(), this);
- SetTLSFakeStack(fake_stack_);
+ ResetTLSFakeStack();
return fake_stack_;
}
return nullptr;
diff --git a/compiler-rt/lib/asan/asan_thread.h b/compiler-rt/lib/asan/asan_thread.h
index 12f0cc7..19b7f34 100644
--- a/compiler-rt/lib/asan/asan_thread.h
+++ b/compiler-rt/lib/asan/asan_thread.h
@@ -104,7 +104,7 @@ class AsanThread {
if (!fake_stack_) return;
FakeStack *t = fake_stack_;
fake_stack_ = nullptr;
- SetTLSFakeStack(nullptr);
+ ResetTLSFakeStack();
t->Destroy(tid);
}
diff --git a/compiler-rt/lib/asan/tests/CMakeLists.txt b/compiler-rt/lib/asan/tests/CMakeLists.txt
index 9cd9c97..6d88c96 100644
--- a/compiler-rt/lib/asan/tests/CMakeLists.txt
+++ b/compiler-rt/lib/asan/tests/CMakeLists.txt
@@ -170,11 +170,21 @@ function(add_asan_tests arch test_runtime)
set(CONFIG_NAME ${ARCH_UPPER_CASE}${OS_NAME}Config)
set(CONFIG_NAME_DYNAMIC ${ARCH_UPPER_CASE}${OS_NAME}DynamicConfig)
+ # On some platforms, unit tests can be run against the runtime that shipped
+ # with the host compiler with COMPILER_RT_TEST_STANDALONE_BUILD_LIBS=OFF.
+ # COMPILER_RT_ASAN_UNIT_TESTS_USE_HOST_RUNTIME=ON removes the dependency
+ # on `asan`, allowing the tests to be run independently without
+ # a newly built asan runtime.
+ set(ASAN_UNIT_TEST_DEPS asan)
+ if(COMPILER_RT_ASAN_UNIT_TESTS_USE_HOST_RUNTIME)
+ set(ASAN_UNIT_TEST_DEPS)
+ endif()
+
# Closure to keep the values.
function(generate_asan_tests test_objects test_suite testname)
generate_compiler_rt_tests(${test_objects} ${test_suite} ${testname} ${arch}
COMPILE_DEPS ${ASAN_UNITTEST_HEADERS} ${ASAN_IGNORELIST_FILE}
- DEPS asan
+ DEPS ${ASAN_UNIT_TEST_DEPS}
KIND ${TEST_KIND}
${ARGN}
)
@@ -215,7 +225,7 @@ function(add_asan_tests arch test_runtime)
add_compiler_rt_test(AsanDynamicUnitTests "${dynamic_test_name}" "${arch}"
SUBDIR "${CONFIG_NAME_DYNAMIC}"
OBJECTS ${ASAN_INST_TEST_OBJECTS}
- DEPS asan ${ASAN_INST_TEST_OBJECTS}
+ DEPS ${ASAN_UNIT_TEST_DEPS} ${ASAN_INST_TEST_OBJECTS}
LINK_FLAGS ${ASAN_DYNAMIC_UNITTEST_INSTRUMENTED_LINK_FLAGS} ${TARGET_LINK_FLAGS} ${DYNAMIC_LINK_FLAGS}
)
endif()
diff --git a/compiler-rt/lib/builtins/cpu_model/x86.c b/compiler-rt/lib/builtins/cpu_model/x86.c
index a40675c..d91e13c 100644
--- a/compiler-rt/lib/builtins/cpu_model/x86.c
+++ b/compiler-rt/lib/builtins/cpu_model/x86.c
@@ -520,6 +520,13 @@ static const char *getIntelProcessorTypeAndSubtype(unsigned Family,
*Subtype = INTEL_COREI7_PANTHERLAKE;
break;
+ // Wildcatlake:
+ case 0xd5:
+ CPU = "wildcatlake";
+ *Type = INTEL_COREI7;
+ *Subtype = INTEL_COREI7_PANTHERLAKE;
+ break;
+
// Icelake Xeon:
case 0x6a:
case 0x6c:
diff --git a/compiler-rt/lib/msan/msan.h b/compiler-rt/lib/msan/msan.h
index 7fb58be..edb2699 100644
--- a/compiler-rt/lib/msan/msan.h
+++ b/compiler-rt/lib/msan/msan.h
@@ -303,6 +303,7 @@ u32 ChainOrigin(u32 id, StackTrace *stack);
const int STACK_TRACE_TAG_POISON = StackTrace::TAG_CUSTOM + 1;
const int STACK_TRACE_TAG_FIELDS = STACK_TRACE_TAG_POISON + 1;
const int STACK_TRACE_TAG_VPTR = STACK_TRACE_TAG_FIELDS + 1;
+const int STACK_TRACE_TAG_ALLOC_PADDING = STACK_TRACE_TAG_VPTR + 1;
#define GET_MALLOC_STACK_TRACE \
UNINITIALIZED BufferedStackTrace stack; \
diff --git a/compiler-rt/lib/msan/msan_allocator.cpp b/compiler-rt/lib/msan/msan_allocator.cpp
index 64df863..80608aa 100644
--- a/compiler-rt/lib/msan/msan_allocator.cpp
+++ b/compiler-rt/lib/msan/msan_allocator.cpp
@@ -217,25 +217,52 @@ static void *MsanAllocate(BufferedStackTrace *stack, uptr size, uptr alignment,
}
auto *meta = reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated));
meta->requested_size = size;
+ uptr actually_allocated_size = allocator.GetActuallyAllocatedSize(allocated);
+ void* padding_start = reinterpret_cast<char*>(allocated) + size;
+ uptr padding_size = actually_allocated_size - size;
+
+ // - With calloc(7,1), we can set the ideal tagging:
+ // bytes 0-6: initialized, origin not set (and irrelevant)
+ // byte 7: uninitialized, origin TAG_ALLOC_PADDING
+ // bytes 8-15: uninitialized, origin TAG_ALLOC_PADDING
+ // - If we have malloc(7) and __msan_get_track_origins() > 1, the 4-byte
+ // origin granularity only allows the slightly suboptimal tagging:
+ // bytes 0-6: uninitialized, origin TAG_ALLOC
+ // byte 7: uninitialized, origin TAG_ALLOC (suboptimal)
+ // bytes 8-15: uninitialized, origin TAG_ALLOC_PADDING
+ // - If we have malloc(7) and __msan_get_track_origins() == 1, we use a
+ // single origin bean to reduce overhead:
+ // bytes 0-6: uninitialized, origin TAG_ALLOC
+ // byte 7: uninitialized, origin TAG_ALLOC (suboptimal)
+ // bytes 8-15: uninitialized, origin TAG_ALLOC (suboptimal)
+ if (__msan_get_track_origins() && flags()->poison_in_malloc &&
+ (zero || (__msan_get_track_origins() > 1))) {
+ stack->tag = STACK_TRACE_TAG_ALLOC_PADDING;
+ Origin o2 = Origin::CreateHeapOrigin(stack);
+ __msan_set_origin(padding_start, padding_size, o2.raw_id());
+ }
+
if (zero) {
if (allocator.FromPrimary(allocated))
__msan_clear_and_unpoison(allocated, size);
else
__msan_unpoison(allocated, size); // Mem is already zeroed.
+
+ if (flags()->poison_in_malloc)
+ __msan_poison(padding_start, padding_size);
} else if (flags()->poison_in_malloc) {
- __msan_poison(allocated, size);
+ __msan_poison(allocated, actually_allocated_size);
+
if (__msan_get_track_origins()) {
stack->tag = StackTrace::TAG_ALLOC;
Origin o = Origin::CreateHeapOrigin(stack);
- __msan_set_origin(allocated, size, o.raw_id());
+ __msan_set_origin(
+ allocated,
+ __msan_get_track_origins() == 1 ? actually_allocated_size : size,
+ o.raw_id());
}
}
- uptr actually_allocated_size = allocator.GetActuallyAllocatedSize(allocated);
- // For compatibility, the allocator converted 0-sized allocations into 1 byte
- if (size == 0 && actually_allocated_size > 0 && flags()->poison_in_malloc)
- __msan_poison(allocated, 1);
-
UnpoisonParam(2);
RunMallocHooks(allocated, size);
return allocated;
@@ -255,9 +282,10 @@ void __msan::MsanDeallocate(BufferedStackTrace *stack, void *p) {
if (flags()->poison_in_free && allocator.FromPrimary(p)) {
__msan_poison(p, size);
if (__msan_get_track_origins()) {
+ uptr actually_allocated_size = allocator.GetActuallyAllocatedSize(p);
stack->tag = StackTrace::TAG_DEALLOC;
Origin o = Origin::CreateHeapOrigin(stack);
- __msan_set_origin(p, size, o.raw_id());
+ __msan_set_origin(p, actually_allocated_size, o.raw_id());
}
}
if (MsanThread *t = GetCurrentThread()) {
diff --git a/compiler-rt/lib/msan/msan_report.cpp b/compiler-rt/lib/msan/msan_report.cpp
index 99bf81f..cd0bf67 100644
--- a/compiler-rt/lib/msan/msan_report.cpp
+++ b/compiler-rt/lib/msan/msan_report.cpp
@@ -90,6 +90,10 @@ static void DescribeOrigin(u32 id) {
Printf(" %sVirtual table ptr was destroyed%s\n", d.Origin(),
d.Default());
break;
+ case STACK_TRACE_TAG_ALLOC_PADDING:
+ Printf(" %sUninitialized value is outside of heap allocation%s\n",
+ d.Origin(), d.Default());
+ break;
default:
Printf(" %sUninitialized value was created%s\n", d.Origin(),
d.Default());
diff --git a/compiler-rt/test/msan/allocator_padding.cpp b/compiler-rt/test/msan/allocator_padding.cpp
new file mode 100644
index 0000000..72acf31
--- /dev/null
+++ b/compiler-rt/test/msan/allocator_padding.cpp
@@ -0,0 +1,94 @@
+// *** malloc: all bytes are uninitialized
+// * malloc byte 0
+// RUN: %clang_msan -fsanitize-memory-track-origins=1 %s -o %t && not %run %t 0 2>&1 \
+// RUN: | FileCheck %s --check-prefixes=CHECK,ORIGIN-ALLOC
+// RUN: %clang_msan -fsanitize-memory-track-origins=2 %s -o %t && not %run %t 0 2>&1 \
+// RUN: | FileCheck %s --check-prefixes=CHECK,ORIGIN-ALLOC
+//
+// * malloc byte 6
+// RUN: %clang_msan -fsanitize-memory-track-origins=2 %s -o %t && not %run %t 6 2>&1 \
+// RUN: | FileCheck %s --check-prefixes=CHECK,ORIGIN-ALLOC
+// RUN: %clang_msan -fsanitize-memory-track-origins=1 %s -o %t && not %run %t 6 2>&1 \
+// RUN: | FileCheck %s --check-prefixes=CHECK,ORIGIN-ALLOC
+//
+// This test assumes the allocator allocates 16 bytes for malloc(7). Bytes
+// 7-15 are padding.
+//
+// * malloc byte 7
+// Edge case: when the origin granularity spans both ALLOC and ALLOC_PADDING,
+// ALLOC always takes precedence.
+// RUN: %clang_msan -fsanitize-memory-track-origins=1 %s -o %t && not %run %t 7 2>&1 \
+// RUN: | FileCheck %s --check-prefixes=CHECK,ORIGIN-ALLOC
+// RUN: %clang_msan -fsanitize-memory-track-origins=2 %s -o %t && not %run %t 7 2>&1 \
+// RUN: | FileCheck %s --check-prefixes=CHECK,ORIGIN-ALLOC
+//
+// Bytes 8-15 are padding
+// For track-origins=1, ALLOC is used instead of ALLOC_PADDING.
+//
+// * malloc byte 8
+// RUN: %clang_msan -fsanitize-memory-track-origins=1 %s -o %t && not %run %t 8 2>&1 \
+// RUN: | FileCheck %s --check-prefixes=CHECK,ORIGIN-ALLOC
+// RUN: %clang_msan -fsanitize-memory-track-origins=2 %s -o %t && not %run %t 8 2>&1 \
+// RUN: | FileCheck %s --check-prefixes=CHECK,ORIGIN-ALLOC-PADDING
+//
+// * malloc byte 15
+// RUN: %clang_msan -fsanitize-memory-track-origins=1 %s -o %t && not %run %t 15 2>&1 \
+// RUN: | FileCheck %s --check-prefixes=CHECK,ORIGIN-ALLOC
+// RUN: %clang_msan -fsanitize-memory-track-origins=2 %s -o %t && not %run %t 15 2>&1 \
+// RUN: | FileCheck %s --check-prefixes=CHECK,ORIGIN-ALLOC-PADDING
+
+// *** calloc
+// Bytes 0-6 are fully initialized, so no MSan report should happen.
+//
+// * calloc byte 0
+// RUN: %clang_msan -fsanitize-memory-track-origins=1 -DUSE_CALLOC %s -o %t && %run %t 0 2>&1
+// RUN: %clang_msan -fsanitize-memory-track-origins=2 -DUSE_CALLOC %s -o %t && %run %t 0 2>&1
+//
+// * calloc byte 6
+// RUN: %clang_msan -fsanitize-memory-track-origins=1 -DUSE_CALLOC %s -o %t && %run %t 6 2>&1
+// RUN: %clang_msan -fsanitize-memory-track-origins=2 -DUSE_CALLOC %s -o %t && %run %t 6 2>&1
+//
+// * calloc byte 7
+// Byte 7 is uninitialized. Unlike malloc, this is tagged as ALLOC_PADDING
+// (since the origin does not need to track bytes 4-6).
+// RUN: %clang_msan -fsanitize-memory-track-origins=1 -DUSE_CALLOC %s -o %t && not %run %t 7 2>&1 \
+// RUN: | FileCheck %s --check-prefixes=CHECK,ORIGIN-ALLOC-PADDING
+// RUN: %clang_msan -fsanitize-memory-track-origins=2 -DUSE_CALLOC %s -o %t && not %run %t 7 2>&1 \
+// RUN: | FileCheck %s --check-prefixes=CHECK,ORIGIN-ALLOC-PADDING
+//
+// * calloc byte 8
+// RUN: %clang_msan -fsanitize-memory-track-origins=1 -DUSE_CALLOC %s -o %t && not %run %t 8 2>&1 \
+// RUN: | FileCheck %s --check-prefixes=CHECK,ORIGIN-ALLOC-PADDING
+// RUN: %clang_msan -fsanitize-memory-track-origins=2 -DUSE_CALLOC %s -o %t && not %run %t 8 2>&1 \
+// RUN: | FileCheck %s --check-prefixes=CHECK,ORIGIN-ALLOC-PADDING
+//
+// * calloc byte 15
+// RUN: %clang_msan -fsanitize-memory-track-origins=1 -DUSE_CALLOC %s -o %t && not %run %t 15 2>&1 \
+// RUN: | FileCheck %s --check-prefixes=CHECK,ORIGIN-ALLOC-PADDING
+// RUN: %clang_msan -fsanitize-memory-track-origins=2 -DUSE_CALLOC %s -o %t && not %run %t 15 2>&1 \
+// RUN: | FileCheck %s --check-prefixes=CHECK,ORIGIN-ALLOC-PADDING
+
+#include <assert.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+int main(int argc, char **argv) {
+#ifdef USE_CALLOC
+ char *p = (char *)calloc(7, 1);
+#else
+ char *p = (char *)malloc(7);
+#endif
+
+ if (argc == 2) {
+ int index = atoi(argv[1]);
+
+ printf("p[%d] = %d\n", index, p[index]);
+ // CHECK: WARNING: MemorySanitizer: use-of-uninitialized-value
+ // CHECK: {{#0 0x.* in main .*allocator_padding.cpp:}}[[@LINE-2]]
+ // ORIGIN-ALLOC: Uninitialized value was created by a heap allocation
+ // ORIGIN-ALLOC-PADDING: Uninitialized value is outside of heap allocation
+ free(p);
+ }
+
+ return 0;
+}
diff --git a/compiler-rt/test/msan/zero_alloc.cpp b/compiler-rt/test/msan/zero_alloc.cpp
index 1451e1e..f4cf1d8 100644
--- a/compiler-rt/test/msan/zero_alloc.cpp
+++ b/compiler-rt/test/msan/zero_alloc.cpp
@@ -1,4 +1,9 @@
-// RUN: %clang_msan -Wno-alloc-size -fsanitize-recover=memory %s -o %t && not %run %t 2>&1 | FileCheck %s
+// RUN: %clang_msan -Wno-alloc-size -fsanitize-recover=memory %s -o %t && not %run %t 2>&1 \
+// RUN: | FileCheck %s --check-prefix=CHECK
+// RUN: %clang_msan -Wno-alloc-size -fsanitize-recover=memory -fsanitize-memory-track-origins=1 %s -o %t && not %run %t 2>&1 \
+// RUN: | FileCheck %s --check-prefixes=CHECK,DISCOUNT
+// RUN: %clang_msan -Wno-alloc-size -fsanitize-recover=memory -fsanitize-memory-track-origins=2 %s -o %t && not %run %t 2>&1 \
+// RUN: | FileCheck %s --check-prefixes=CHECK,ORIGINS
#include <stdio.h>
#include <stdlib.h>
@@ -10,6 +15,7 @@ int main(int argc, char **argv) {
printf("Content of p1 is: %d\n", *p1);
// CHECK: WARNING: MemorySanitizer: use-of-uninitialized-value
// CHECK: {{#0 0x.* in main .*zero_alloc.cpp:}}[[@LINE-2]]
+ // DISCOUNT,ORIGINS: Uninitialized value is outside of heap allocation
free(p1);
}
@@ -19,6 +25,7 @@ int main(int argc, char **argv) {
printf("Content of p2 is: %d\n", *p2);
// CHECK: WARNING: MemorySanitizer: use-of-uninitialized-value
// CHECK: {{#0 0x.* in main .*zero_alloc.cpp:}}[[@LINE-2]]
+ // DISCOUNT,ORIGINS: Uninitialized value is outside of heap allocation
free(p2);
}
@@ -28,6 +35,8 @@ int main(int argc, char **argv) {
printf("Content of p2 is: %d\n", *p3);
// CHECK: WARNING: MemorySanitizer: use-of-uninitialized-value
// CHECK: {{#0 0x.* in main .*zero_alloc.cpp:}}[[@LINE-2]]
+ // DISCOUNT: Uninitialized value was created by a heap allocation
+ // ORIGINS: Uninitialized value is outside of heap allocation
free(p3);
}