aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib/CodeGen/GlobalISel
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/CodeGen/GlobalISel')
-rw-r--r--llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp44
-rw-r--r--llvm/lib/CodeGen/GlobalISel/Utils.cpp22
2 files changed, 54 insertions, 12 deletions
diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
index fd38c30..bbfae57 100644
--- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
@@ -1592,9 +1592,19 @@ bool IRTranslator::translateGetElementPtr(const User &U,
Type *OffsetIRTy = DL->getIndexType(PtrIRTy);
LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
- uint32_t Flags = 0;
+ uint32_t PtrAddFlags = 0;
+ // Each PtrAdd generated to implement the GEP inherits its nuw, nusw, inbounds
+ // flags.
if (const Instruction *I = dyn_cast<Instruction>(&U))
- Flags = MachineInstr::copyFlagsFromInstruction(*I);
+ PtrAddFlags = MachineInstr::copyFlagsFromInstruction(*I);
+
+ auto PtrAddFlagsWithConst = [&](int64_t Offset) {
+ // For nusw/inbounds GEP with an offset that is nonnegative when interpreted
+ // as signed, assume there is no unsigned overflow.
+ if (Offset >= 0 && (PtrAddFlags & MachineInstr::MIFlag::NoUSWrap))
+ return PtrAddFlags | MachineInstr::MIFlag::NoUWrap;
+ return PtrAddFlags;
+ };
// Normalize Vector GEP - all scalar operands should be converted to the
// splat vector.
@@ -1644,7 +1654,9 @@ bool IRTranslator::translateGetElementPtr(const User &U,
if (Offset != 0) {
auto OffsetMIB = MIRBuilder.buildConstant({OffsetTy}, Offset);
- BaseReg = MIRBuilder.buildPtrAdd(PtrTy, BaseReg, OffsetMIB.getReg(0))
+ BaseReg = MIRBuilder
+ .buildPtrAdd(PtrTy, BaseReg, OffsetMIB.getReg(0),
+ PtrAddFlagsWithConst(Offset))
.getReg(0);
Offset = 0;
}
@@ -1668,12 +1680,23 @@ bool IRTranslator::translateGetElementPtr(const User &U,
if (ElementSize != 1) {
auto ElementSizeMIB = MIRBuilder.buildConstant(
getLLTForType(*OffsetIRTy, *DL), ElementSize);
+
+ // The multiplication is NUW if the GEP is NUW and NSW if the GEP is
+ // NUSW.
+ uint32_t ScaleFlags = PtrAddFlags & MachineInstr::MIFlag::NoUWrap;
+ if (PtrAddFlags & MachineInstr::MIFlag::NoUSWrap)
+ ScaleFlags |= MachineInstr::MIFlag::NoSWrap;
+
GepOffsetReg =
- MIRBuilder.buildMul(OffsetTy, IdxReg, ElementSizeMIB).getReg(0);
- } else
+ MIRBuilder.buildMul(OffsetTy, IdxReg, ElementSizeMIB, ScaleFlags)
+ .getReg(0);
+ } else {
GepOffsetReg = IdxReg;
+ }
- BaseReg = MIRBuilder.buildPtrAdd(PtrTy, BaseReg, GepOffsetReg).getReg(0);
+ BaseReg =
+ MIRBuilder.buildPtrAdd(PtrTy, BaseReg, GepOffsetReg, PtrAddFlags)
+ .getReg(0);
}
}
@@ -1681,11 +1704,8 @@ bool IRTranslator::translateGetElementPtr(const User &U,
auto OffsetMIB =
MIRBuilder.buildConstant(OffsetTy, Offset);
- if (Offset >= 0 && cast<GEPOperator>(U).isInBounds())
- Flags |= MachineInstr::MIFlag::NoUWrap;
-
MIRBuilder.buildPtrAdd(getOrCreateVReg(U), BaseReg, OffsetMIB.getReg(0),
- Flags);
+ PtrAddFlagsWithConst(Offset));
return true;
}
@@ -2189,8 +2209,8 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
unsigned Op = ID == Intrinsic::lifetime_start ? TargetOpcode::LIFETIME_START
: TargetOpcode::LIFETIME_END;
- const AllocaInst *AI = cast<AllocaInst>(CI.getArgOperand(1));
- if (!AI->isStaticAlloca())
+ const AllocaInst *AI = dyn_cast<AllocaInst>(CI.getArgOperand(1));
+ if (!AI || !AI->isStaticAlloca())
return true;
MIRBuilder.buildInstr(Op).addFrameIndex(getOrCreateFrameIndex(*AI));
diff --git a/llvm/lib/CodeGen/GlobalISel/Utils.cpp b/llvm/lib/CodeGen/GlobalISel/Utils.cpp
index f48bfc0..8955dd0 100644
--- a/llvm/lib/CodeGen/GlobalISel/Utils.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/Utils.cpp
@@ -1401,6 +1401,21 @@ bool llvm::isBuildVectorConstantSplat(const Register Reg,
return false;
}
+bool llvm::isBuildVectorConstantSplat(const Register Reg,
+ const MachineRegisterInfo &MRI,
+ APInt SplatValue, bool AllowUndef) {
+ if (auto SplatValAndReg = getAnyConstantSplat(Reg, MRI, AllowUndef)) {
+ if (SplatValAndReg->Value.getBitWidth() < SplatValue.getBitWidth())
+ return APInt::isSameValue(
+ SplatValAndReg->Value.sext(SplatValue.getBitWidth()), SplatValue);
+ return APInt::isSameValue(
+ SplatValAndReg->Value,
+ SplatValue.sext(SplatValAndReg->Value.getBitWidth()));
+ }
+
+ return false;
+}
+
bool llvm::isBuildVectorConstantSplat(const MachineInstr &MI,
const MachineRegisterInfo &MRI,
int64_t SplatValue, bool AllowUndef) {
@@ -1408,6 +1423,13 @@ bool llvm::isBuildVectorConstantSplat(const MachineInstr &MI,
AllowUndef);
}
+bool llvm::isBuildVectorConstantSplat(const MachineInstr &MI,
+ const MachineRegisterInfo &MRI,
+ APInt SplatValue, bool AllowUndef) {
+ return isBuildVectorConstantSplat(MI.getOperand(0).getReg(), MRI, SplatValue,
+ AllowUndef);
+}
+
std::optional<APInt>
llvm::getIConstantSplatVal(const Register Reg, const MachineRegisterInfo &MRI) {
if (auto SplatValAndReg =