aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPaul Robinson <paul.robinson@sony.com>2019-09-30 15:11:23 +0000
committerTom Stellard <tstellar@redhat.com>2019-11-21 15:43:45 -0800
commit28c1f51f14ff62f2b8ad05bd363718c2c2aef29e (patch)
tree328c9f828895d7c4cefe72f6b98d38f98ff5d109
parent76817ab1e1043d269f415928fabd3f4a533b7e83 (diff)
downloadllvm-28c1f51f14ff62f2b8ad05bd363718c2c2aef29e.zip
llvm-28c1f51f14ff62f2b8ad05bd363718c2c2aef29e.tar.gz
llvm-28c1f51f14ff62f2b8ad05bd363718c2c2aef29e.tar.bz2
Merging r373220:
------------------------------------------------------------------------ r373220 | probinson | 2019-09-30 08:11:23 -0700 (Mon, 30 Sep 2019) | 12 lines [SSP] [3/3] cmpxchg and addrspacecast instructions can now trigger stack protectors. Fixes PR42238. Add test coverage for llvm.memset, as proxy for all llvm.mem* intrinsics. There are two issues here: (1) they could be lowered to a libc call, which could be intercepted, and do Bad Stuff; (2) with a non-constant size, they could overwrite the current stack frame. The test was mostly written by Matt Arsenault in r363169, which was later reverted; I tweaked what he had and added the llvm.memset part. Differential Revision: https://reviews.llvm.org/D67845 ------------------------------------------------------------------------
-rw-r--r--llvm/lib/CodeGen/StackProtector.cpp23
-rw-r--r--llvm/test/CodeGen/X86/stack-protector-2.ll165
2 files changed, 186 insertions, 2 deletions
diff --git a/llvm/lib/CodeGen/StackProtector.cpp b/llvm/lib/CodeGen/StackProtector.cpp
index 68e902c2..baa57e2 100644
--- a/llvm/lib/CodeGen/StackProtector.cpp
+++ b/llvm/lib/CodeGen/StackProtector.cpp
@@ -165,12 +165,19 @@ bool StackProtector::HasAddressTaken(const Instruction *AI,
if (AI == cast<StoreInst>(I)->getValueOperand())
return true;
break;
+ case Instruction::AtomicCmpXchg:
+ // cmpxchg conceptually includes both a load and store from the same
+ // location. So, like store, the value being stored is what matters.
+ if (AI == cast<AtomicCmpXchgInst>(I)->getNewValOperand())
+ return true;
+ break;
case Instruction::PtrToInt:
if (AI == cast<PtrToIntInst>(I)->getOperand(0))
return true;
break;
case Instruction::Call: {
- // Ignore intrinsics that are not calls. TODO: Use isLoweredToCall().
+ // Ignore intrinsics that do not become real instructions.
+ // TODO: Narrow this to intrinsics that have store-like effects.
const auto *CI = cast<CallInst>(I);
if (!isa<DbgInfoIntrinsic>(CI) && !CI->isLifetimeStartOrEnd())
return true;
@@ -181,6 +188,7 @@ bool StackProtector::HasAddressTaken(const Instruction *AI,
case Instruction::BitCast:
case Instruction::GetElementPtr:
case Instruction::Select:
+ case Instruction::AddrSpaceCast:
if (HasAddressTaken(I, VisitedPHIs))
return true;
break;
@@ -193,8 +201,19 @@ bool StackProtector::HasAddressTaken(const Instruction *AI,
return true;
break;
}
- default:
+ case Instruction::Load:
+ case Instruction::AtomicRMW:
+ case Instruction::Ret:
+ // These instructions take an address operand, but have load-like or
+ // other innocuous behavior that should not trigger a stack protector.
+ // atomicrmw conceptually has both load and store semantics, but the
+ // value being stored must be integer; so if a pointer is being stored,
+ // we'll catch it in the PtrToInt case above.
break;
+ default:
+ // Conservatively return true for any instruction that takes an address
+ // operand, but is not handled above.
+ return true;
}
}
return false;
diff --git a/llvm/test/CodeGen/X86/stack-protector-2.ll b/llvm/test/CodeGen/X86/stack-protector-2.ll
new file mode 100644
index 0000000..dc86b93
--- /dev/null
+++ b/llvm/test/CodeGen/X86/stack-protector-2.ll
@@ -0,0 +1,165 @@
+; RUN: llc -mtriple=x86_64-pc-linux-gnu -start-before=stack-protector -stop-after=stack-protector -o - < %s | FileCheck %s
+; Bugs 42238/43308: Test some additional situations not caught previously.
+
+define void @store_captures() #0 {
+; CHECK-LABEL: @store_captures(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[STACKGUARDSLOT:%.*]] = alloca i8*
+; CHECK-NEXT: [[STACKGUARD:%.*]] = load volatile i8*, i8* addrspace(257)* inttoptr (i32 40 to i8* addrspace(257)*)
+; CHECK-NEXT: call void @llvm.stackprotector(i8* [[STACKGUARD]], i8** [[STACKGUARDSLOT]])
+; CHECK-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
+; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
+; CHECK-NEXT: [[J:%.*]] = alloca i32*, align 8
+; CHECK-NEXT: store i32 0, i32* [[RETVAL]]
+; CHECK-NEXT: [[LOAD:%.*]] = load i32, i32* [[A]], align 4
+; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[LOAD]], 1
+; CHECK-NEXT: store i32 [[ADD]], i32* [[A]], align 4
+; CHECK-NEXT: store i32* [[A]], i32** [[J]], align 8
+; CHECK-NEXT: [[STACKGUARD1:%.*]] = load volatile i8*, i8* addrspace(257)* inttoptr (i32 40 to i8* addrspace(257)*)
+; CHECK-NEXT: [[TMP0:%.*]] = load volatile i8*, i8** [[STACKGUARDSLOT]]
+; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i8* [[STACKGUARD1]], [[TMP0]]
+; CHECK-NEXT: br i1 [[TMP1]], label [[SP_RETURN:%.*]], label [[CALLSTACKCHECKFAILBLK:%.*]], !prof !0
+; CHECK: SP_return:
+; CHECK-NEXT: ret void
+; CHECK: CallStackCheckFailBlk:
+; CHECK-NEXT: call void @__stack_chk_fail()
+; CHECK-NEXT: unreachable
+;
+entry:
+ %retval = alloca i32, align 4
+ %a = alloca i32, align 4
+ %j = alloca i32*, align 8
+ store i32 0, i32* %retval
+ %load = load i32, i32* %a, align 4
+ %add = add nsw i32 %load, 1
+ store i32 %add, i32* %a, align 4
+ store i32* %a, i32** %j, align 8
+ ret void
+}
+
+define i32* @non_captures() #0 {
+; load, atomicrmw, and ret do not trigger a stack protector.
+; CHECK-LABEL: @non_captures(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
+; CHECK-NEXT: [[LOAD:%.*]] = load i32, i32* [[A]], align 4
+; CHECK-NEXT: [[ATOM:%.*]] = atomicrmw add i32* [[A]], i32 1 seq_cst
+; CHECK-NEXT: ret i32* [[A]]
+;
+entry:
+ %a = alloca i32, align 4
+ %load = load i32, i32* %a, align 4
+ %atom = atomicrmw add i32* %a, i32 1 seq_cst
+ ret i32* %a
+}
+
+define void @store_addrspacecast_captures() #0 {
+; CHECK-LABEL: @store_addrspacecast_captures(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[STACKGUARDSLOT:%.*]] = alloca i8*
+; CHECK-NEXT: [[STACKGUARD:%.*]] = load volatile i8*, i8* addrspace(257)* inttoptr (i32 40 to i8* addrspace(257)*)
+; CHECK-NEXT: call void @llvm.stackprotector(i8* [[STACKGUARD]], i8** [[STACKGUARDSLOT]])
+; CHECK-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
+; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
+; CHECK-NEXT: [[J:%.*]] = alloca i32 addrspace(1)*, align 8
+; CHECK-NEXT: store i32 0, i32* [[RETVAL]]
+; CHECK-NEXT: [[LOAD:%.*]] = load i32, i32* [[A]], align 4
+; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[LOAD]], 1
+; CHECK-NEXT: store i32 [[ADD]], i32* [[A]], align 4
+; CHECK-NEXT: [[A_ADDRSPACECAST:%.*]] = addrspacecast i32* [[A]] to i32 addrspace(1)*
+; CHECK-NEXT: store i32 addrspace(1)* [[A_ADDRSPACECAST]], i32 addrspace(1)** [[J]], align 8
+; CHECK-NEXT: [[STACKGUARD1:%.*]] = load volatile i8*, i8* addrspace(257)* inttoptr (i32 40 to i8* addrspace(257)*)
+; CHECK-NEXT: [[TMP0:%.*]] = load volatile i8*, i8** [[STACKGUARDSLOT]]
+; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i8* [[STACKGUARD1]], [[TMP0]]
+; CHECK-NEXT: br i1 [[TMP1]], label [[SP_RETURN:%.*]], label [[CALLSTACKCHECKFAILBLK:%.*]], !prof !0
+; CHECK: SP_return:
+; CHECK-NEXT: ret void
+; CHECK: CallStackCheckFailBlk:
+; CHECK-NEXT: call void @__stack_chk_fail()
+; CHECK-NEXT: unreachable
+;
+entry:
+ %retval = alloca i32, align 4
+ %a = alloca i32, align 4
+ %j = alloca i32 addrspace(1)*, align 8
+ store i32 0, i32* %retval
+ %load = load i32, i32* %a, align 4
+ %add = add nsw i32 %load, 1
+ store i32 %add, i32* %a, align 4
+ %a.addrspacecast = addrspacecast i32* %a to i32 addrspace(1)*
+ store i32 addrspace(1)* %a.addrspacecast, i32 addrspace(1)** %j, align 8
+ ret void
+}
+
+define void @cmpxchg_captures() #0 {
+; CHECK-LABEL: @cmpxchg_captures(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[STACKGUARDSLOT:%.*]] = alloca i8*
+; CHECK-NEXT: [[STACKGUARD:%.*]] = load volatile i8*, i8* addrspace(257)* inttoptr (i32 40 to i8* addrspace(257)*)
+; CHECK-NEXT: call void @llvm.stackprotector(i8* [[STACKGUARD]], i8** [[STACKGUARDSLOT]])
+; CHECK-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
+; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
+; CHECK-NEXT: [[J:%.*]] = alloca i32*, align 8
+; CHECK-NEXT: store i32 0, i32* [[RETVAL]]
+; CHECK-NEXT: [[LOAD:%.*]] = load i32, i32* [[A]], align 4
+; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[LOAD]], 1
+; CHECK-NEXT: store i32 [[ADD]], i32* [[A]], align 4
+; CHECK-NEXT: [[TMP0:%.*]] = cmpxchg i32** [[J]], i32* null, i32* [[A]] seq_cst monotonic
+; CHECK-NEXT: [[STACKGUARD1:%.*]] = load volatile i8*, i8* addrspace(257)* inttoptr (i32 40 to i8* addrspace(257)*)
+; CHECK-NEXT: [[TMP1:%.*]] = load volatile i8*, i8** [[STACKGUARDSLOT]]
+; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i8* [[STACKGUARD1]], [[TMP1]]
+; CHECK-NEXT: br i1 [[TMP2]], label [[SP_RETURN:%.*]], label [[CALLSTACKCHECKFAILBLK:%.*]], !prof !0
+; CHECK: SP_return:
+; CHECK-NEXT: ret void
+; CHECK: CallStackCheckFailBlk:
+; CHECK-NEXT: call void @__stack_chk_fail()
+; CHECK-NEXT: unreachable
+;
+entry:
+ %retval = alloca i32, align 4
+ %a = alloca i32, align 4
+ %j = alloca i32*, align 8
+ store i32 0, i32* %retval
+ %load = load i32, i32* %a, align 4
+ %add = add nsw i32 %load, 1
+ store i32 %add, i32* %a, align 4
+
+ cmpxchg i32** %j, i32* null, i32* %a seq_cst monotonic
+ ret void
+}
+
+define void @memset_captures(i64 %c) #0 {
+; CHECK-LABEL: @memset_captures(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[STACKGUARDSLOT:%.*]] = alloca i8*
+; CHECK-NEXT: [[STACKGUARD:%.*]] = load volatile i8*, i8* addrspace(257)* inttoptr (i32 40 to i8* addrspace(257)*)
+; CHECK-NEXT: call void @llvm.stackprotector(i8* [[STACKGUARD]], i8** [[STACKGUARDSLOT]])
+; CHECK-NEXT: [[CADDR:%.*]] = alloca i64, align 8
+; CHECK-NEXT: store i64 %c, i64* [[CADDR]], align 8
+; CHECK-NEXT: [[I:%.*]] = alloca i32, align 4
+; CHECK-NEXT: [[IPTR:%.*]] = bitcast i32* [[I]] to i8*
+; CHECK-NEXT: [[COUNT:%.*]] = load i64, i64* [[CADDR]], align 8
+; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 4 [[IPTR]], i8 0, i64 [[COUNT]], i1 false)
+; CHECK-NEXT: [[STACKGUARD1:%.*]] = load volatile i8*, i8* addrspace(257)* inttoptr (i32 40 to i8* addrspace(257)*)
+; CHECK-NEXT: [[TMP1:%.*]] = load volatile i8*, i8** [[STACKGUARDSLOT]]
+; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i8* [[STACKGUARD1]], [[TMP1]]
+; CHECK-NEXT: br i1 [[TMP2]], label [[SP_RETURN:%.*]], label [[CALLSTACKCHECKFAILBLK:%.*]], !prof !0
+; CHECK: SP_return:
+; CHECK-NEXT: ret void
+; CHECK: CallStackCheckFailBlk:
+; CHECK-NEXT: call void @__stack_chk_fail()
+; CHECK-NEXT: unreachable
+;
+entry:
+ %c.addr = alloca i64, align 8
+ store i64 %c, i64* %c.addr, align 8
+ %i = alloca i32, align 4
+ %i.ptr = bitcast i32* %i to i8*
+ %count = load i64, i64* %c.addr, align 8
+ call void @llvm.memset.p0i8.i64(i8* align 4 %i.ptr, i8 0, i64 %count, i1 false)
+ ret void
+}
+
+declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i1 immarg)
+
+attributes #0 = { sspstrong }