aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMarco Elver <elver@google.com>2022-08-11 10:40:57 +0200
committerMarco Elver <elver@google.com>2022-08-11 10:43:49 +0200
commitc47ec95531619fcc4faaf2c74de49779b71a5b5b (patch)
treeab0e87f1ee79aab51d5f606a35d4e05e4010d6ff
parentf695554a2a5550ae40da35af9ac22bfcca5db09a (diff)
downloadllvm-c47ec95531619fcc4faaf2c74de49779b71a5b5b.zip
llvm-c47ec95531619fcc4faaf2c74de49779b71a5b5b.tar.gz
llvm-c47ec95531619fcc4faaf2c74de49779b71a5b5b.tar.bz2
[MemorySanitizer] Support memcpy.inline and memset.inline
Other sanitizers (ASan, TSan, see added tests) already handle memcpy.inline and memset.inline by not relying on InstVisitor to turn the intrinsics into calls. Only MSan instrumentation currently does not support them due to missing InstVisitor callbacks. Fix it by actually making InstVisitor handle Mem*InlineInst. While the mem*.inline intrinsics promise no calls to external functions as an optimization, for the sanitizers we need to break this guarantee since access into the runtime is required either way, and performance can no longer be guaranteed. All other cases, where generating a call is incorrect, should instead use no_sanitize. Fixes: https://github.com/llvm/llvm-project/issues/57048 Reviewed By: vitalybuka, dvyukov Differential Revision: https://reviews.llvm.org/D131577
-rw-r--r--llvm/include/llvm/IR/InstVisitor.h9
-rw-r--r--llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp18
-rw-r--r--llvm/test/Instrumentation/AddressSanitizer/mem-intrinsics.ll15
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/msan_basic.ll25
-rw-r--r--llvm/test/Instrumentation/ThreadSanitizer/tsan_basic.ll20
5 files changed, 80 insertions, 7 deletions
diff --git a/llvm/include/llvm/IR/InstVisitor.h b/llvm/include/llvm/IR/InstVisitor.h
index 7fec081..311e0ac 100644
--- a/llvm/include/llvm/IR/InstVisitor.h
+++ b/llvm/include/llvm/IR/InstVisitor.h
@@ -207,10 +207,9 @@ public:
RetTy visitDbgLabelInst(DbgLabelInst &I) { DELEGATE(DbgInfoIntrinsic);}
RetTy visitDbgInfoIntrinsic(DbgInfoIntrinsic &I){ DELEGATE(IntrinsicInst); }
RetTy visitMemSetInst(MemSetInst &I) { DELEGATE(MemIntrinsic); }
+ RetTy visitMemSetInlineInst(MemSetInlineInst &I){ DELEGATE(MemSetInst); }
RetTy visitMemCpyInst(MemCpyInst &I) { DELEGATE(MemTransferInst); }
- RetTy visitMemCpyInlineInst(MemCpyInlineInst &I) {
- DELEGATE(MemTransferInst);
- }
+ RetTy visitMemCpyInlineInst(MemCpyInlineInst &I){ DELEGATE(MemCpyInst); }
RetTy visitMemMoveInst(MemMoveInst &I) { DELEGATE(MemTransferInst); }
RetTy visitMemTransferInst(MemTransferInst &I) { DELEGATE(MemIntrinsic); }
RetTy visitMemIntrinsic(MemIntrinsic &I) { DELEGATE(IntrinsicInst); }
@@ -290,8 +289,12 @@ private:
case Intrinsic::dbg_value: DELEGATE(DbgValueInst);
case Intrinsic::dbg_label: DELEGATE(DbgLabelInst);
case Intrinsic::memcpy: DELEGATE(MemCpyInst);
+ case Intrinsic::memcpy_inline:
+ DELEGATE(MemCpyInlineInst);
case Intrinsic::memmove: DELEGATE(MemMoveInst);
case Intrinsic::memset: DELEGATE(MemSetInst);
+ case Intrinsic::memset_inline:
+ DELEGATE(MemSetInlineInst);
case Intrinsic::vastart: DELEGATE(VAStartInst);
case Intrinsic::vaend: DELEGATE(VAEndInst);
case Intrinsic::vacopy: DELEGATE(VACopyInst);
diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
index 22085e0..a3c6059 100644
--- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
@@ -2545,10 +2545,20 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
I.eraseFromParent();
}
- // Similar to memmove: avoid copying shadow twice.
- // This is somewhat unfortunate as it may slowdown small constant memcpys.
- // FIXME: consider doing manual inline for small constant sizes and proper
- // alignment.
+ /// Instrument memcpy
+ ///
+ /// Similar to memmove: avoid copying shadow twice. This is somewhat
+ /// unfortunate as it may slowdown small constant memcpys.
+ /// FIXME: consider doing manual inline for small constant sizes and proper
+ /// alignment.
+ ///
+ /// Note: This also handles memcpy.inline, which promises no calls to external
+ /// functions as an optimization. However, with instrumentation enabled this
+ /// is difficult to promise; additionally, we know that the MSan runtime
+ /// exists and provides __msan_memcpy(). Therefore, we assume that with
+ /// instrumentation it's safe to turn memcpy.inline into a call to
+ /// __msan_memcpy(). Should this be wrong, such as when implementing memcpy()
+ /// itself, instrumentation should be disabled with the no_sanitize attribute.
void visitMemCpyInst(MemCpyInst &I) {
getShadow(I.getArgOperand(1)); // Ensure shadow initialized
IRBuilder<> IRB(&I);
diff --git a/llvm/test/Instrumentation/AddressSanitizer/mem-intrinsics.ll b/llvm/test/Instrumentation/AddressSanitizer/mem-intrinsics.ll
index cc80cbe..8922dc0 100644
--- a/llvm/test/Instrumentation/AddressSanitizer/mem-intrinsics.ll
+++ b/llvm/test/Instrumentation/AddressSanitizer/mem-intrinsics.ll
@@ -8,8 +8,10 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
target triple = "x86_64-unknown-linux-gnu"
declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind
+declare void @llvm.memset.inline.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind
declare void @llvm.memmove.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1) nounwind
declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1) nounwind
+declare void @llvm.memcpy.inline.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1) nounwind
define void @memintr_test(i8* %a, i8* %b) nounwind uwtable sanitize_address {
entry:
@@ -27,6 +29,19 @@ define void @memintr_test(i8* %a, i8* %b) nounwind uwtable sanitize_address {
; CHECK-NOPREFIX: @memcpy
; CHECK: ret void
+define void @memintr_inline_test(i8* %a, i8* %b) nounwind uwtable sanitize_address {
+ entry:
+ tail call void @llvm.memset.inline.p0i8.i64(i8* %a, i8 0, i64 100, i1 false)
+ tail call void @llvm.memcpy.inline.p0i8.p0i8.i64(i8* %a, i8* %b, i64 100, i1 false)
+ ret void
+}
+; CHECK-LABEL: memintr_inline_test
+; CHECK-PREFIX: @__asan_memset
+; CHECK-PREFIX: @__asan_memcpy
+; CHECK-NOPREFIX: @memset
+; CHECK-NOPREFIX: @memcpy
+; CHECK: ret void
+
define void @memintr_test_nosanitize(i8* %a, i8* %b) nounwind uwtable {
entry:
tail call void @llvm.memset.p0i8.i64(i8* %a, i8 0, i64 100, i1 false)
diff --git a/llvm/test/Instrumentation/MemorySanitizer/msan_basic.ll b/llvm/test/Instrumentation/MemorySanitizer/msan_basic.ll
index 875d642..6264598 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/msan_basic.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/msan_basic.ll
@@ -233,6 +233,31 @@ declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) n
; CHECK: call i8* @__msan_memcpy
; CHECK: ret void
+; memset.inline
+define void @MemSetInline(i8* nocapture %x) nounwind uwtable sanitize_memory {
+entry:
+ call void @llvm.memset.inline.p0i8.i64(i8* %x, i8 42, i64 10, i1 false)
+ ret void
+}
+
+declare void @llvm.memset.inline.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind
+
+; CHECK-LABEL: @MemSetInline
+; CHECK: call i8* @__msan_memset
+; CHECK: ret void
+
+; memcpy.inline
+define void @MemCpyInline(i8* nocapture %x, i8* nocapture %y) nounwind uwtable sanitize_memory {
+entry:
+ call void @llvm.memcpy.inline.p0i8.p0i8.i64(i8* %x, i8* %y, i64 10, i1 false)
+ ret void
+}
+
+declare void @llvm.memcpy.inline.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind
+
+; CHECK-LABEL: @MemCpyInline
+; CHECK: call i8* @__msan_memcpy
+; CHECK: ret void
; memmove is lowered to a call
define void @MemMove(i8* nocapture %x, i8* nocapture %y) nounwind uwtable sanitize_memory {
diff --git a/llvm/test/Instrumentation/ThreadSanitizer/tsan_basic.ll b/llvm/test/Instrumentation/ThreadSanitizer/tsan_basic.ll
index 62bbbb2..9a1ef9e 100644
--- a/llvm/test/Instrumentation/ThreadSanitizer/tsan_basic.ll
+++ b/llvm/test/Instrumentation/ThreadSanitizer/tsan_basic.ll
@@ -22,8 +22,10 @@ entry:
declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1)
+declare void @llvm.memcpy.inline.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1)
declare void @llvm.memmove.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1)
declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1)
+declare void @llvm.memset.inline.p0i8.i64(i8* nocapture, i8, i64, i1)
; Check that tsan converts mem intrinsics back to function calls.
@@ -37,6 +39,15 @@ entry:
; CHECK: ret void
}
+define void @MemCpyInlineTest(i8* nocapture %x, i8* nocapture %y) sanitize_thread {
+entry:
+ tail call void @llvm.memcpy.inline.p0i8.p0i8.i64(i8* align 4 %x, i8* align 4 %y, i64 16, i1 false)
+ ret void
+; CHECK: define void @MemCpyInlineTest
+; CHECK: call i8* @memcpy
+; CHECK: ret void
+}
+
define void @MemMoveTest(i8* nocapture %x, i8* nocapture %y) sanitize_thread {
entry:
tail call void @llvm.memmove.p0i8.p0i8.i64(i8* align 4 %x, i8* align 4 %y, i64 16, i1 false)
@@ -55,6 +66,15 @@ entry:
; CHECK: ret void
}
+define void @MemSetInlineTest(i8* nocapture %x) sanitize_thread {
+entry:
+ tail call void @llvm.memset.inline.p0i8.i64(i8* align 4 %x, i8 77, i64 16, i1 false)
+ ret void
+; CHECK: define void @MemSetInlineTest
+; CHECK: call i8* @memset
+; CHECK: ret void
+}
+
; CHECK-LABEL: @SwiftError
; CHECK-NOT: __tsan_read
; CHECK-NOT: __tsan_write