aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib')
-rw-r--r--llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp7
-rw-r--r--llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp63
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp5
-rw-r--r--llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp12
-rw-r--r--llvm/lib/Target/AArch64/AArch64ISelLowering.cpp43
-rw-r--r--llvm/lib/Target/AArch64/AArch64InstrInfo.cpp7
-rw-r--r--llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp2
-rw-r--r--llvm/lib/Target/AMDGPU/SIISelLowering.cpp10
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp15
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVLegalizerInfo.cpp1
-rw-r--r--llvm/lib/Target/X86/X86FixupSetCC.cpp6
-rw-r--r--llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp48
12 files changed, 165 insertions, 54 deletions
diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
index 56e13f0..884c3f1 100644
--- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
@@ -2362,6 +2362,13 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
MachineInstr::copyFlagsFromInstruction(CI));
return true;
}
+ case Intrinsic::modf: {
+ ArrayRef<Register> VRegs = getOrCreateVRegs(CI);
+ MIRBuilder.buildModf(VRegs[0], VRegs[1],
+ getOrCreateVReg(*CI.getArgOperand(0)),
+ MachineInstr::copyFlagsFromInstruction(CI));
+ return true;
+ }
case Intrinsic::sincos: {
ArrayRef<Register> VRegs = getOrCreateVRegs(CI);
MIRBuilder.buildFSincos(VRegs[0], VRegs[1],
diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
index 03dfa6f..cffaf7c 100644
--- a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
@@ -471,6 +471,8 @@ static RTLIB::Libcall getRTLibDesc(unsigned Opcode, unsigned Size) {
RTLIBCASE(TANH_F);
case TargetOpcode::G_FSINCOS:
RTLIBCASE(SINCOS_F);
+ case TargetOpcode::G_FMODF:
+ RTLIBCASE(MODF_F);
case TargetOpcode::G_FLOG10:
RTLIBCASE(LOG10_F);
case TargetOpcode::G_FLOG:
@@ -703,6 +705,46 @@ LegalizerHelper::LegalizeResult LegalizerHelper::emitSincosLibcall(
}
LegalizerHelper::LegalizeResult
+LegalizerHelper::emitModfLibcall(MachineInstr &MI, MachineIRBuilder &MIRBuilder,
+ unsigned Size, Type *OpType,
+ LostDebugLocObserver &LocObserver) {
+ MachineFunction &MF = MIRBuilder.getMF();
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+
+ Register DstFrac = MI.getOperand(0).getReg();
+ Register DstInt = MI.getOperand(1).getReg();
+ Register Src = MI.getOperand(2).getReg();
+ LLT DstTy = MRI.getType(DstFrac);
+
+ int MemSize = DstTy.getSizeInBytes();
+ Align Alignment = getStackTemporaryAlignment(DstTy);
+ const DataLayout &DL = MIRBuilder.getDataLayout();
+ unsigned AddrSpace = DL.getAllocaAddrSpace();
+ MachinePointerInfo PtrInfo;
+
+ Register StackPtrInt =
+ createStackTemporary(TypeSize::getFixed(MemSize), Alignment, PtrInfo)
+ .getReg(0);
+
+ auto &Ctx = MF.getFunction().getContext();
+ auto LibcallResult = createLibcall(
+ MIRBuilder, getRTLibDesc(MI.getOpcode(), Size), {DstFrac, OpType, 0},
+ {{Src, OpType, 0}, {StackPtrInt, PointerType::get(Ctx, AddrSpace), 1}},
+ LocObserver, &MI);
+
+ if (LibcallResult != LegalizeResult::Legalized)
+ return LegalizerHelper::UnableToLegalize;
+
+ MachineMemOperand *LoadMMOInt = MF.getMachineMemOperand(
+ PtrInfo, MachineMemOperand::MOLoad, MemSize, Alignment);
+
+ MIRBuilder.buildLoad(DstInt, StackPtrInt, *LoadMMOInt);
+ MI.eraseFromParent();
+
+ return LegalizerHelper::Legalized;
+}
+
+LegalizerHelper::LegalizeResult
llvm::createMemLibcall(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
MachineInstr &MI, LostDebugLocObserver &LocObserver) {
auto &Ctx = MIRBuilder.getMF().getFunction().getContext();
@@ -1341,6 +1383,16 @@ LegalizerHelper::libcall(MachineInstr &MI, LostDebugLocObserver &LocObserver) {
}
return emitSincosLibcall(MI, MIRBuilder, Size, HLTy, LocObserver);
}
+ case TargetOpcode::G_FMODF: {
+ LLT LLTy = MRI.getType(MI.getOperand(0).getReg());
+ unsigned Size = LLTy.getSizeInBits();
+ Type *HLTy = getFloatTypeForLLT(Ctx, LLTy);
+ if (!HLTy || (Size != 32 && Size != 64 && Size != 80 && Size != 128)) {
+ LLVM_DEBUG(dbgs() << "No libcall available for type " << LLTy << ".\n");
+ return UnableToLegalize;
+ }
+ return emitModfLibcall(MI, MIRBuilder, Size, HLTy, LocObserver);
+ }
case TargetOpcode::G_LROUND:
case TargetOpcode::G_LLROUND:
case TargetOpcode::G_INTRINSIC_LRINT:
@@ -3333,6 +3385,16 @@ LegalizerHelper::widenScalar(MachineInstr &MI, unsigned TypeIdx, LLT WideTy) {
widenScalarDst(MI, WideTy, 0, TargetOpcode::G_FPTRUNC);
Observer.changedInstr(MI);
return Legalized;
+ case TargetOpcode::G_FMODF: {
+ Observer.changingInstr(MI);
+ widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_FPEXT);
+
+ widenScalarDst(MI, WideTy, 1, TargetOpcode::G_FPTRUNC);
+ MIRBuilder.setInsertPt(MIRBuilder.getMBB(), --MIRBuilder.getInsertPt());
+ widenScalarDst(MI, WideTy, 0, TargetOpcode::G_FPTRUNC);
+ Observer.changedInstr(MI);
+ return Legalized;
+ }
case TargetOpcode::G_FPOWI:
case TargetOpcode::G_FLDEXP:
case TargetOpcode::G_STRICT_FLDEXP: {
@@ -5472,6 +5534,7 @@ LegalizerHelper::fewerElementsVector(MachineInstr &MI, unsigned TypeIdx,
case G_LROUND:
case G_LLROUND:
case G_INTRINSIC_TRUNC:
+ case G_FMODF:
case G_FCOS:
case G_FSIN:
case G_FTAN:
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 8fc7eab..95f53fe 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -4762,6 +4762,11 @@ unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, const APInt &DemandedElts,
case ISD::AssertZext:
Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
return VTBits-Tmp;
+ case ISD::FREEZE:
+ if (isGuaranteedNotToBeUndefOrPoison(Op.getOperand(0), DemandedElts,
+ /*PoisonOnly=*/false))
+ return ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
+ break;
case ISD::MERGE_VALUES:
return ComputeNumSignBits(Op.getOperand(Op.getResNo()), DemandedElts,
Depth + 1);
diff --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
index 6a1b06e..177b4b0 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
@@ -2089,7 +2089,8 @@ void AArch64DAGToDAGISel::SelectMultiVectorLutiLane(SDNode *Node,
if (!ImmToReg<AArch64::ZT0, 0>(Node->getOperand(2), ZtValue))
return;
- SDValue Ops[] = {ZtValue, Node->getOperand(3), Node->getOperand(4)};
+ SDValue Chain = Node->getOperand(0);
+ SDValue Ops[] = {ZtValue, Node->getOperand(3), Node->getOperand(4), Chain};
SDLoc DL(Node);
EVT VT = Node->getValueType(0);
@@ -2110,14 +2111,15 @@ void AArch64DAGToDAGISel::SelectMultiVectorLutiLane(SDNode *Node,
void AArch64DAGToDAGISel::SelectMultiVectorLuti(SDNode *Node,
unsigned NumOutVecs,
unsigned Opc) {
-
SDValue ZtValue;
- SmallVector<SDValue, 4> Ops;
if (!ImmToReg<AArch64::ZT0, 0>(Node->getOperand(2), ZtValue))
return;
- Ops.push_back(ZtValue);
- Ops.push_back(createZMulTuple({Node->getOperand(3), Node->getOperand(4)}));
+ SDValue Chain = Node->getOperand(0);
+ SDValue Ops[] = {ZtValue,
+ createZMulTuple({Node->getOperand(3), Node->getOperand(4)}),
+ Chain};
+
SDLoc DL(Node);
EVT VT = Node->getValueType(0);
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 45f5235..a1f4734 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -27234,6 +27234,21 @@ static bool isLanes1toNKnownZero(SDValue Op) {
}
}
+// Return true if the vector operation can guarantee that the first lane of its
+// result is active.
+static bool isLane0KnownActive(SDValue Op) {
+ switch (Op.getOpcode()) {
+ default:
+ return false;
+ case AArch64ISD::REINTERPRET_CAST:
+ return isLane0KnownActive(Op->getOperand(0));
+ case ISD::SPLAT_VECTOR:
+ return isOneConstant(Op.getOperand(0));
+ case AArch64ISD::PTRUE:
+ return Op.getConstantOperandVal(0) == AArch64SVEPredPattern::all;
+ };
+}
+
static SDValue removeRedundantInsertVectorElt(SDNode *N) {
assert(N->getOpcode() == ISD::INSERT_VECTOR_ELT && "Unexpected node!");
SDValue InsertVec = N->getOperand(0);
@@ -27519,6 +27534,32 @@ static SDValue performMULLCombine(SDNode *N,
return SDValue();
}
+static SDValue performPTestFirstCombine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI,
+ SelectionDAG &DAG) {
+ if (DCI.isBeforeLegalize())
+ return SDValue();
+
+ SDLoc DL(N);
+ auto Mask = N->getOperand(0);
+ auto Pred = N->getOperand(1);
+
+ if (!isLane0KnownActive(Mask))
+ return SDValue();
+
+ if (Pred->getOpcode() == AArch64ISD::REINTERPRET_CAST)
+ Pred = Pred->getOperand(0);
+
+ if (Pred->getOpcode() == ISD::CONCAT_VECTORS) {
+ Pred = Pred->getOperand(0);
+ Pred = DAG.getNode(AArch64ISD::REINTERPRET_CAST, DL, MVT::nxv16i1, Pred);
+ return DAG.getNode(AArch64ISD::PTEST_FIRST, DL, N->getValueType(0), Mask,
+ Pred);
+ }
+
+ return SDValue();
+}
+
static SDValue
performScalarToVectorCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
SelectionDAG &DAG) {
@@ -27875,6 +27916,8 @@ SDValue AArch64TargetLowering::PerformDAGCombine(SDNode *N,
case AArch64ISD::UMULL:
case AArch64ISD::PMULL:
return performMULLCombine(N, DCI, DAG);
+ case AArch64ISD::PTEST_FIRST:
+ return performPTestFirstCombine(N, DCI, DAG);
case ISD::INTRINSIC_VOID:
case ISD::INTRINSIC_W_CHAIN:
switch (N->getConstantOperandVal(1)) {
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
index 5a51c81..35b27ea 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
@@ -1503,6 +1503,13 @@ AArch64InstrInfo::canRemovePTestInstr(MachineInstr *PTest, MachineInstr *Mask,
getElementSizeForOpcode(PredOpcode))
return PredOpcode;
+ // For PTEST_FIRST(PTRUE_ALL, WHILE), the PTEST_FIRST is redundant since
+ // WHILEcc performs an implicit PTEST with an all active mask, setting
+ // the N flag as the PTEST_FIRST would.
+ if (PTest->getOpcode() == AArch64::PTEST_PP_FIRST &&
+ isPTrueOpcode(MaskOpcode) && Mask->getOperand(1).getImm() == 31)
+ return PredOpcode;
+
return {};
}
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
index 7ee54c5..c197550e 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
@@ -438,7 +438,7 @@ AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST)
getActionDefinitionsBuilder({G_FCOS, G_FSIN, G_FPOW, G_FLOG, G_FLOG2,
G_FLOG10, G_FTAN, G_FEXP, G_FEXP2, G_FEXP10,
G_FACOS, G_FASIN, G_FATAN, G_FATAN2, G_FCOSH,
- G_FSINH, G_FTANH})
+ G_FSINH, G_FTANH, G_FMODF})
// We need a call for these, so we always need to scalarize.
.scalarize(0)
// Regardless of FP16 support, widen 16-bit elements to 32-bits.
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 1653008..f7265c5 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -64,14 +64,6 @@ static cl::opt<bool> UseDivergentRegisterIndexing(
cl::desc("Use indirect register addressing for divergent indexes"),
cl::init(false));
-// TODO: This option should be removed once we switch to always using PTRADD in
-// the SelectionDAG.
-static cl::opt<bool> UseSelectionDAGPTRADD(
- "amdgpu-use-sdag-ptradd", cl::Hidden,
- cl::desc("Generate ISD::PTRADD nodes for 64-bit pointer arithmetic in the "
- "SelectionDAG ISel"),
- cl::init(false));
-
static bool denormalModeIsFlushAllF32(const MachineFunction &MF) {
const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
return Info->getMode().FP32Denormals == DenormalMode::getPreserveSign();
@@ -11466,7 +11458,7 @@ static bool isNoUnsignedWrap(SDValue Addr) {
bool SITargetLowering::shouldPreservePtrArith(const Function &F,
EVT PtrVT) const {
- return UseSelectionDAGPTRADD && PtrVT == MVT::i64;
+ return PtrVT == MVT::i64;
}
bool SITargetLowering::canTransformPtrArithOutOfBounds(const Function &F,
diff --git a/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp b/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp
index 273edf3..0afec42 100644
--- a/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp
@@ -752,6 +752,8 @@ bool SPIRVInstructionSelector::spvSelect(Register ResVReg,
return selectExtInst(ResVReg, ResType, I, CL::exp, GL::Exp);
case TargetOpcode::G_FEXP2:
return selectExtInst(ResVReg, ResType, I, CL::exp2, GL::Exp2);
+ case TargetOpcode::G_FMODF:
+ return selectModf(ResVReg, ResType, I);
case TargetOpcode::G_FLOG:
return selectExtInst(ResVReg, ResType, I, CL::log, GL::Log);
@@ -3453,9 +3455,6 @@ bool SPIRVInstructionSelector::selectIntrinsic(Register ResVReg,
case Intrinsic::spv_discard: {
return selectDiscard(ResVReg, ResType, I);
}
- case Intrinsic::modf: {
- return selectModf(ResVReg, ResType, I);
- }
default: {
std::string DiagMsg;
raw_string_ostream OS(DiagMsg);
@@ -4268,6 +4267,7 @@ bool SPIRVInstructionSelector::selectModf(Register ResVReg,
PtrTyReg,
LLT::pointer(storageClassToAddressSpace(SPIRV::StorageClass::Function),
GR.getPointerSize()));
+
// Assign SPIR-V type of the pointer type of the alloca variable to the
// new register.
GR.assignSPIRVTypeToVReg(PtrType, PtrTyReg, MIRBuilder.getMF());
@@ -4280,10 +4280,7 @@ bool SPIRVInstructionSelector::selectModf(Register ResVReg,
.addUse(GR.getSPIRVTypeID(PtrType))
.addImm(static_cast<uint32_t>(SPIRV::StorageClass::Function));
Register Variable = AllocaMIB->getOperand(0).getReg();
- // Modf must have 4 operands, the first two are the 2 parts of the result,
- // the third is the operand, and the last one is the floating point value.
- assert(I.getNumOperands() == 4 &&
- "Expected 4 operands for modf instruction");
+
MachineBasicBlock &BB = *I.getParent();
// Create the OpenCLLIB::modf instruction.
auto MIB =
@@ -4293,8 +4290,8 @@ bool SPIRVInstructionSelector::selectModf(Register ResVReg,
.addImm(static_cast<uint32_t>(SPIRV::InstructionSet::OpenCL_std))
.addImm(CL::modf)
.setMIFlags(I.getFlags())
- .add(I.getOperand(3)) // Floating point value.
- .addUse(Variable); // Pointer to integral part.
+ .add(I.getOperand(I.getNumExplicitDefs())) // Floating point value.
+ .addUse(Variable); // Pointer to integral part.
// Assign the integral part stored in the ptr to the second element of the
// result.
Register IntegralPartReg = I.getOperand(1).getReg();
diff --git a/llvm/lib/Target/SPIRV/SPIRVLegalizerInfo.cpp b/llvm/lib/Target/SPIRV/SPIRVLegalizerInfo.cpp
index db85e33..53074ea 100644
--- a/llvm/lib/Target/SPIRV/SPIRVLegalizerInfo.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVLegalizerInfo.cpp
@@ -300,6 +300,7 @@ SPIRVLegalizerInfo::SPIRVLegalizerInfo(const SPIRVSubtarget &ST) {
getActionDefinitionsBuilder({G_STRICT_FSQRT,
G_FPOW,
G_FEXP,
+ G_FMODF,
G_FEXP2,
G_FLOG,
G_FLOG2,
diff --git a/llvm/lib/Target/X86/X86FixupSetCC.cpp b/llvm/lib/Target/X86/X86FixupSetCC.cpp
index 2de89947..ea93a57 100644
--- a/llvm/lib/Target/X86/X86FixupSetCC.cpp
+++ b/llvm/lib/Target/X86/X86FixupSetCC.cpp
@@ -136,6 +136,12 @@ bool X86FixupSetCCPass::runOnMachineFunction(MachineFunction &MF) {
.addReg(ZeroReg)
.addReg(Reg0)
.addImm(X86::sub_8bit);
+
+ // Redirect the debug-instr-number to the setcc.
+ if (unsigned InstrNum = ZExt->peekDebugInstrNum())
+ MF.makeDebugValueSubstitution({InstrNum, 0},
+ {MI.getDebugInstrNum(), 0});
+
ToErase.push_back(ZExt);
}
}
diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
index cf076b9a..eff6f0c 100644
--- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
@@ -1923,20 +1923,17 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
///
/// Shadow = ParamTLS+ArgOffset.
Value *getShadowPtrForArgument(IRBuilder<> &IRB, int ArgOffset) {
- Value *Base = IRB.CreatePointerCast(MS.ParamTLS, MS.IntptrTy);
- if (ArgOffset)
- Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
- return IRB.CreateIntToPtr(Base, IRB.getPtrTy(0), "_msarg");
+ return IRB.CreatePtrAdd(MS.ParamTLS,
+ ConstantInt::get(MS.IntptrTy, ArgOffset), "_msarg");
}
/// Compute the origin address for a given function argument.
Value *getOriginPtrForArgument(IRBuilder<> &IRB, int ArgOffset) {
if (!MS.TrackOrigins)
return nullptr;
- Value *Base = IRB.CreatePointerCast(MS.ParamOriginTLS, MS.IntptrTy);
- if (ArgOffset)
- Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
- return IRB.CreateIntToPtr(Base, IRB.getPtrTy(0), "_msarg_o");
+ return IRB.CreatePtrAdd(MS.ParamOriginTLS,
+ ConstantInt::get(MS.IntptrTy, ArgOffset),
+ "_msarg_o");
}
/// Compute the shadow address for a retval.
@@ -7219,9 +7216,8 @@ struct VarArgHelperBase : public VarArgHelper {
/// Compute the shadow address for a given va_arg.
Value *getShadowPtrForVAArgument(IRBuilder<> &IRB, unsigned ArgOffset) {
- Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy);
- Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
- return IRB.CreateIntToPtr(Base, MS.PtrTy, "_msarg_va_s");
+ return IRB.CreatePtrAdd(
+ MS.VAArgTLS, ConstantInt::get(MS.IntptrTy, ArgOffset), "_msarg_va_s");
}
/// Compute the shadow address for a given va_arg.
@@ -7235,12 +7231,12 @@ struct VarArgHelperBase : public VarArgHelper {
/// Compute the origin address for a given va_arg.
Value *getOriginPtrForVAArgument(IRBuilder<> &IRB, int ArgOffset) {
- Value *Base = IRB.CreatePointerCast(MS.VAArgOriginTLS, MS.IntptrTy);
// getOriginPtrForVAArgument() is always called after
// getShadowPtrForVAArgument(), so __msan_va_arg_origin_tls can never
// overflow.
- Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
- return IRB.CreateIntToPtr(Base, MS.PtrTy, "_msarg_va_o");
+ return IRB.CreatePtrAdd(MS.VAArgOriginTLS,
+ ConstantInt::get(MS.IntptrTy, ArgOffset),
+ "_msarg_va_o");
}
void CleanUnusedTLS(IRBuilder<> &IRB, Value *ShadowBase,
@@ -7467,10 +7463,8 @@ struct VarArgAMD64Helper : public VarArgHelperBase {
NextNodeIRBuilder IRB(OrigInst);
Value *VAListTag = OrigInst->getArgOperand(0);
- Value *RegSaveAreaPtrPtr = IRB.CreateIntToPtr(
- IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
- ConstantInt::get(MS.IntptrTy, 16)),
- MS.PtrTy);
+ Value *RegSaveAreaPtrPtr =
+ IRB.CreatePtrAdd(VAListTag, ConstantInt::get(MS.IntptrTy, 16));
Value *RegSaveAreaPtr = IRB.CreateLoad(MS.PtrTy, RegSaveAreaPtrPtr);
Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
const Align Alignment = Align(16);
@@ -7482,10 +7476,8 @@ struct VarArgAMD64Helper : public VarArgHelperBase {
if (MS.TrackOrigins)
IRB.CreateMemCpy(RegSaveAreaOriginPtr, Alignment, VAArgTLSOriginCopy,
Alignment, AMD64FpEndOffset);
- Value *OverflowArgAreaPtrPtr = IRB.CreateIntToPtr(
- IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
- ConstantInt::get(MS.IntptrTy, 8)),
- MS.PtrTy);
+ Value *OverflowArgAreaPtrPtr =
+ IRB.CreatePtrAdd(VAListTag, ConstantInt::get(MS.IntptrTy, 8));
Value *OverflowArgAreaPtr =
IRB.CreateLoad(MS.PtrTy, OverflowArgAreaPtrPtr);
Value *OverflowArgAreaShadowPtr, *OverflowArgAreaOriginPtr;
@@ -7615,19 +7607,15 @@ struct VarArgAArch64Helper : public VarArgHelperBase {
// Retrieve a va_list field of 'void*' size.
Value *getVAField64(IRBuilder<> &IRB, Value *VAListTag, int offset) {
- Value *SaveAreaPtrPtr = IRB.CreateIntToPtr(
- IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
- ConstantInt::get(MS.IntptrTy, offset)),
- MS.PtrTy);
+ Value *SaveAreaPtrPtr =
+ IRB.CreatePtrAdd(VAListTag, ConstantInt::get(MS.IntptrTy, offset));
return IRB.CreateLoad(Type::getInt64Ty(*MS.C), SaveAreaPtrPtr);
}
// Retrieve a va_list field of 'int' size.
Value *getVAField32(IRBuilder<> &IRB, Value *VAListTag, int offset) {
- Value *SaveAreaPtr = IRB.CreateIntToPtr(
- IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
- ConstantInt::get(MS.IntptrTy, offset)),
- MS.PtrTy);
+ Value *SaveAreaPtr =
+ IRB.CreatePtrAdd(VAListTag, ConstantInt::get(MS.IntptrTy, offset));
Value *SaveArea32 = IRB.CreateLoad(IRB.getInt32Ty(), SaveAreaPtr);
return IRB.CreateSExt(SaveArea32, MS.IntptrTy);
}