diff options
author | Vitaly Buka <vitalybuka@google.com> | 2023-11-17 15:13:11 -0800 |
---|---|---|
committer | GitHub <noreply@github.com> | 2023-11-17 15:13:11 -0800 |
commit | a05e736d288a7f2009ee9d057e78713d9adeeb5f (patch) | |
tree | c14591bbee646878d7fac2c4eacdfbc4d3395784 | |
parent | 506c47df00bbd9e527ecc5ac6e192b5fe5daa2c5 (diff) | |
download | llvm-a05e736d288a7f2009ee9d057e78713d9adeeb5f.zip llvm-a05e736d288a7f2009ee9d057e78713d9adeeb5f.tar.gz llvm-a05e736d288a7f2009ee9d057e78713d9adeeb5f.tar.bz2 |
[msan][x86] Fix shadow if vararg overflow beyond kParamTLSSize
Caller puts argument shadow one by one into __msan_va_arg_tls, until it
reaches kParamTLSSize. After that it still increment OverflowOffset but
does not store the shadow.
Callee needs OverflowOffset to prepare a shadow for the entire overflow
area. It's done by creating "varargs shadow copy" for complete list of
args, copying available shadow from __msan_va_arg_tls, and clearing the
rest.
However callee does not know if the tail of __msan_va_arg_tls was not
able to fit an argument, and callee will copy tail shadow into "varargs
shadow copy", and later used as a shadow for an omitted argument.
So that unused tail of the __msan_va_arg_tls must be cleared if left
unused.
This allows us to enable compiler-rt/test/msan/vararg_shadow.cpp for
x86.
Reviewers: kstoimenov, thurstond
Reviewed By: thurstond
Pull Request: https://github.com/llvm/llvm-project/pull/72707
-rw-r--r-- | compiler-rt/test/msan/vararg_shadow.cpp | 4 | ||||
-rw-r--r-- | llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp | 68 | ||||
-rw-r--r-- | llvm/test/Instrumentation/MemorySanitizer/X86/vararg_shadow.ll | 1 |
3 files changed, 52 insertions, 21 deletions
diff --git a/compiler-rt/test/msan/vararg_shadow.cpp b/compiler-rt/test/msan/vararg_shadow.cpp index 78f1591..e491a4a 100644 --- a/compiler-rt/test/msan/vararg_shadow.cpp +++ b/compiler-rt/test/msan/vararg_shadow.cpp @@ -3,8 +3,8 @@ // Without -fno-sanitize-memory-param-retval we can't even pass poisoned values. // RUN: %clangxx_msan -fno-sanitize-memory-param-retval -fsanitize-memory-track-origins=0 -O3 %s -o %t -// The most of targets fail the test. -// XFAIL: target={{(x86|aarch64|loongarch64|mips|powerpc64).*}} +// Nothing works yet. +// XFAIL: target={{(aarch64|loongarch64|mips|powerpc64).*}} #include <sanitizer/msan_interface.h> #include <stdarg.h> diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp index a70aa89..06cccab 100644 --- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp @@ -4669,16 +4669,22 @@ struct VarArgHelperBase : public VarArgHelper { /// Compute the shadow address for a given va_arg. Value *getShadowPtrForVAArgument(Type *Ty, IRBuilder<> &IRB, - unsigned ArgOffset, unsigned ArgSize) { - // Make sure we don't overflow __msan_va_arg_tls. - if (ArgOffset + ArgSize > kParamTLSSize) - return nullptr; + unsigned ArgOffset) { Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy); Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset)); return IRB.CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(Ty), 0), "_msarg_va_s"); } + /// Compute the shadow address for a given va_arg. + Value *getShadowPtrForVAArgument(Type *Ty, IRBuilder<> &IRB, + unsigned ArgOffset, unsigned ArgSize) { + // Make sure we don't overflow __msan_va_arg_tls. + if (ArgOffset + ArgSize > kParamTLSSize) + return nullptr; + return getShadowPtrForVAArgument(Ty, IRB, ArgOffset); + } + /// Compute the origin address for a given va_arg. Value *getOriginPtrForVAArgument(IRBuilder<> &IRB, int ArgOffset) { Value *Base = IRB.CreatePointerCast(MS.VAArgOriginTLS, MS.IntptrTy); @@ -4772,6 +4778,24 @@ struct VarArgAMD64Helper : public VarArgHelperBase { unsigned FpOffset = AMD64GpEndOffset; unsigned OverflowOffset = AMD64FpEndOffset; const DataLayout &DL = F.getParent()->getDataLayout(); + + auto CleanUnusedTLS = [&](Value *ShadowBase, unsigned BaseOffset) { + // Make sure we don't overflow __msan_va_arg_tls. + if (OverflowOffset <= kParamTLSSize) + return false; // Not needed, end is not reacheed. + + // The tails of __msan_va_arg_tls is not large enough to fit full + // value shadow, but it will be copied to backup anyway. Make it + // clean. + if (BaseOffset < kParamTLSSize) { + Value *TailSize = ConstantInt::getSigned(IRB.getInt32Ty(), + kParamTLSSize - BaseOffset); + IRB.CreateMemSet(ShadowBase, ConstantInt::getNullValue(IRB.getInt8Ty()), + TailSize, Align(8)); + } + return true; // Incomplete + }; + for (const auto &[ArgNo, A] : llvm::enumerate(CB.args())) { bool IsFixed = ArgNo < CB.getFunctionType()->getNumParams(); bool IsByVal = CB.paramHasAttr(ArgNo, Attribute::ByVal); @@ -4784,19 +4808,22 @@ struct VarArgAMD64Helper : public VarArgHelperBase { assert(A->getType()->isPointerTy()); Type *RealTy = CB.getParamByValType(ArgNo); uint64_t ArgSize = DL.getTypeAllocSize(RealTy); - Value *ShadowBase = getShadowPtrForVAArgument( - RealTy, IRB, OverflowOffset, alignTo(ArgSize, 8)); + uint64_t AlignedSize = alignTo(ArgSize, 8); + unsigned BaseOffset = OverflowOffset; + Value *ShadowBase = + getShadowPtrForVAArgument(RealTy, IRB, OverflowOffset); Value *OriginBase = nullptr; if (MS.TrackOrigins) OriginBase = getOriginPtrForVAArgument(IRB, OverflowOffset); - OverflowOffset += alignTo(ArgSize, 8); - if (!ShadowBase) - continue; + OverflowOffset += AlignedSize; + + if (CleanUnusedTLS(ShadowBase, BaseOffset)) + continue; // We have no space to copy shadow there. + Value *ShadowPtr, *OriginPtr; std::tie(ShadowPtr, OriginPtr) = MSV.getShadowOriginPtr(A, IRB, IRB.getInt8Ty(), kShadowTLSAlignment, /*isStore*/ false); - IRB.CreateMemCpy(ShadowBase, kShadowTLSAlignment, ShadowPtr, kShadowTLSAlignment, ArgSize); if (MS.TrackOrigins) @@ -4811,36 +4838,39 @@ struct VarArgAMD64Helper : public VarArgHelperBase { Value *ShadowBase, *OriginBase = nullptr; switch (AK) { case AK_GeneralPurpose: - ShadowBase = - getShadowPtrForVAArgument(A->getType(), IRB, GpOffset, 8); + ShadowBase = getShadowPtrForVAArgument(A->getType(), IRB, GpOffset); if (MS.TrackOrigins) OriginBase = getOriginPtrForVAArgument(IRB, GpOffset); GpOffset += 8; + assert(GpOffset <= kParamTLSSize); break; case AK_FloatingPoint: - ShadowBase = - getShadowPtrForVAArgument(A->getType(), IRB, FpOffset, 16); + ShadowBase = getShadowPtrForVAArgument(A->getType(), IRB, FpOffset); if (MS.TrackOrigins) OriginBase = getOriginPtrForVAArgument(IRB, FpOffset); FpOffset += 16; + assert(FpOffset <= kParamTLSSize); break; case AK_Memory: if (IsFixed) continue; uint64_t ArgSize = DL.getTypeAllocSize(A->getType()); + uint64_t AlignedSize = alignTo(ArgSize, 8); + unsigned BaseOffset = OverflowOffset; ShadowBase = - getShadowPtrForVAArgument(A->getType(), IRB, OverflowOffset, 8); - if (MS.TrackOrigins) + getShadowPtrForVAArgument(A->getType(), IRB, OverflowOffset); + if (MS.TrackOrigins) { OriginBase = getOriginPtrForVAArgument(IRB, OverflowOffset); - OverflowOffset += alignTo(ArgSize, 8); + } + OverflowOffset += AlignedSize; + if (CleanUnusedTLS(ShadowBase, BaseOffset)) + continue; // We have no space to copy shadow there. } // Take fixed arguments into account for GpOffset and FpOffset, // but don't actually store shadows for them. // TODO(glider): don't call get*PtrForVAArgument() for them. if (IsFixed) continue; - if (!ShadowBase) - continue; Value *Shadow = MSV.getShadow(A); IRB.CreateAlignedStore(Shadow, ShadowBase, kShadowTLSAlignment); if (MS.TrackOrigins) { diff --git a/llvm/test/Instrumentation/MemorySanitizer/X86/vararg_shadow.ll b/llvm/test/Instrumentation/MemorySanitizer/X86/vararg_shadow.ll index 6ac48c5..aff4d2c5 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/X86/vararg_shadow.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/X86/vararg_shadow.ll @@ -1303,6 +1303,7 @@ define linkonce_odr dso_local void @_Z4test3I11LongDouble4EvT_(ptr noundef byval ; CHECK-NEXT: [[TMP64:%.*]] = xor i64 [[TMP63]], 87960930222080 ; CHECK-NEXT: [[TMP65:%.*]] = inttoptr i64 [[TMP64]] to ptr ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 688) to ptr), ptr align 8 [[TMP65]], i64 64, i1 false) +; CHECK-NEXT: call void @llvm.memset.p0.i32(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 752) to ptr), i8 0, i32 48, i1 false) ; CHECK-NEXT: store i64 1280, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void (ptr, i32, ...) @_Z5test2I11LongDouble4EvT_iz(ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], i32 noundef 20, ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]]) ; CHECK-NEXT: ret void |