aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib/CodeGen
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/CodeGen')
-rw-r--r--llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp2
-rw-r--r--llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp6
-rw-r--r--llvm/lib/CodeGen/AsmPrinter/DwarfUnit.cpp17
-rw-r--r--llvm/lib/CodeGen/BasicBlockSectionsProfileReader.cpp19
-rw-r--r--llvm/lib/CodeGen/GlobalISel/Utils.cpp2
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp113
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp25
7 files changed, 165 insertions, 19 deletions
diff --git a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
index 8aa488f..f65d88a 100644
--- a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
+++ b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
@@ -1443,7 +1443,7 @@ getBBAddrMapFeature(const MachineFunction &MF, int NumMBBSectionRanges,
MF.hasBBSections() && NumMBBSectionRanges > 1,
// Use static_cast to avoid breakage of tests on windows.
static_cast<bool>(BBAddrMapSkipEmitBBEntries), HasCalls,
- static_cast<bool>(EmitBBHash)};
+ static_cast<bool>(EmitBBHash), false};
}
void AsmPrinter::emitBBAddrMapSection(const MachineFunction &MF) {
diff --git a/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp b/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp
index 518121e..751d373 100644
--- a/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp
+++ b/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp
@@ -1793,9 +1793,13 @@ void DwarfCompileUnit::createBaseTypeDIEs() {
"_" + Twine(Btr.BitSize)).toStringRef(Str));
addUInt(Die, dwarf::DW_AT_encoding, dwarf::DW_FORM_data1, Btr.Encoding);
// Round up to smallest number of bytes that contains this number of bits.
+ // ExprRefedBaseTypes is populated with types referenced by
+ // DW_OP_LLVM_convert operations in location expressions. These are often
+ // byte-sized, but one common counter-example is 1-bit sized conversions
+ // from `i1` types. TODO: Should these use DW_AT_bit_size? See
+ // DwarfUnit::constructTypeDIE.
addUInt(Die, dwarf::DW_AT_byte_size, std::nullopt,
divideCeil(Btr.BitSize, 8));
-
Btr.Die = &Die;
}
}
diff --git a/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.cpp b/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.cpp
index e40fb76..b16e1315 100644
--- a/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.cpp
+++ b/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.cpp
@@ -766,8 +766,19 @@ void DwarfUnit::constructTypeDIE(DIE &Buffer, const DIBasicType *BTy) {
addUInt(Buffer, dwarf::DW_AT_encoding, dwarf::DW_FORM_data1,
BTy->getEncoding());
- uint64_t Size = BTy->getSizeInBits() >> 3;
- addUInt(Buffer, dwarf::DW_AT_byte_size, std::nullopt, Size);
+ uint64_t SizeInBytes = divideCeil(BTy->getSizeInBits(), 8);
+ addUInt(Buffer, dwarf::DW_AT_byte_size, std::nullopt, SizeInBytes);
+ if (BTy->getTag() == dwarf::Tag::DW_TAG_base_type) {
+ // DW_TAG_base_type:
+ // If the value of an object of the given type does not fully occupy the
+ // storage described by a byte size attribute, the base type entry may also
+ // have a DW_AT_bit_size [...] attribute.
+ // TODO: Do big endian targets need DW_AT_data_bit_offset? See discussion in
+ // pull request #164372.
+ if (uint64_t DataSizeInBits = BTy->getDataSizeInBits();
+ DataSizeInBits && DataSizeInBits != SizeInBytes * 8)
+ addUInt(Buffer, dwarf::DW_AT_bit_size, std::nullopt, DataSizeInBits);
+ }
if (BTy->isBigEndian())
addUInt(Buffer, dwarf::DW_AT_endianity, std::nullopt, dwarf::DW_END_big);
@@ -1109,7 +1120,7 @@ void DwarfUnit::constructTypeDIE(DIE &Buffer, const DICompositeType *CTy) {
constructMemberDIE(Buffer, DDTy);
}
} else if (auto *Property = dyn_cast<DIObjCProperty>(Element)) {
- DIE &ElemDie = createAndAddDIE(Property->getTag(), Buffer);
+ DIE &ElemDie = createAndAddDIE(Property->getTag(), Buffer, Property);
StringRef PropertyName = Property->getName();
addString(ElemDie, dwarf::DW_AT_APPLE_property_name, PropertyName);
if (Property->getType())
diff --git a/llvm/lib/CodeGen/BasicBlockSectionsProfileReader.cpp b/llvm/lib/CodeGen/BasicBlockSectionsProfileReader.cpp
index fbcd614..485b44ae 100644
--- a/llvm/lib/CodeGen/BasicBlockSectionsProfileReader.cpp
+++ b/llvm/lib/CodeGen/BasicBlockSectionsProfileReader.cpp
@@ -287,6 +287,25 @@ Error BasicBlockSectionsProfileReader::ReadV1Profile() {
}
continue;
}
+ case 'h': { // Basic block hash secifier.
+ // Skip the profile when the profile iterator (FI) refers to the
+ // past-the-end element.
+ if (FI == ProgramPathAndClusterInfo.end())
+ continue;
+ for (auto BBIDHashStr : Values) {
+ auto [BBIDStr, HashStr] = BBIDHashStr.split(':');
+ unsigned long long BBID = 0, Hash = 0;
+ if (getAsUnsignedInteger(BBIDStr, 10, BBID))
+ return createProfileParseError(Twine("unsigned integer expected: '") +
+ BBIDStr + "'");
+ if (getAsUnsignedInteger(HashStr, 16, Hash))
+ return createProfileParseError(
+ Twine("unsigned integer expected in hex format: '") + HashStr +
+ "'");
+ FI->second.BBHashes[BBID] = Hash;
+ }
+ continue;
+ }
default:
return createProfileParseError(Twine("invalid specifier: '") +
Twine(Specifier) + "'");
diff --git a/llvm/lib/CodeGen/GlobalISel/Utils.cpp b/llvm/lib/CodeGen/GlobalISel/Utils.cpp
index ca82857..5fab6ec 100644
--- a/llvm/lib/CodeGen/GlobalISel/Utils.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/Utils.cpp
@@ -1893,6 +1893,8 @@ static bool canCreateUndefOrPoison(Register Reg, const MachineRegisterInfo &MRI,
case TargetOpcode::G_UADDSAT:
case TargetOpcode::G_SSUBSAT:
case TargetOpcode::G_USUBSAT:
+ case TargetOpcode::G_SBFX:
+ case TargetOpcode::G_UBFX:
return false;
case TargetOpcode::G_SSHLSAT:
case TargetOpcode::G_USHLSAT:
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index cf221bb..bdd6bf0 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -2715,6 +2715,12 @@ SDValue DAGCombiner::visitPTRADD(SDNode *N) {
(N->getFlags() & N0->getFlags()) & SDNodeFlags::NoUnsignedWrap;
SDValue Add = DAG.getNode(ISD::ADD, DL, IntVT, {Y, Z}, Flags);
AddToWorklist(Add.getNode());
+ // We can't set InBounds even if both original ptradds were InBounds and
+ // NUW: SDAG usually represents pointers as integers, therefore, the
+ // matched pattern behaves as if it had implicit casts:
+ // (ptradd inbounds (inttoptr (ptrtoint (ptradd inbounds x, y))), z)
+ // The outer inbounds ptradd might therefore rely on a provenance that x
+ // does not have.
return DAG.getMemBasePlusOffset(X, Add, DL, Flags);
}
}
@@ -2740,6 +2746,12 @@ SDValue DAGCombiner::visitPTRADD(SDNode *N) {
// that.
SDNodeFlags Flags =
(N->getFlags() & N0->getFlags()) & SDNodeFlags::NoUnsignedWrap;
+ // We can't set InBounds even if both original ptradds were InBounds and
+ // NUW: SDAG usually represents pointers as integers, therefore, the
+ // matched pattern behaves as if it had implicit casts:
+ // (ptradd inbounds (inttoptr (ptrtoint (ptradd inbounds GA, v))), c)
+ // The outer inbounds ptradd might therefore rely on a provenance that
+ // GA does not have.
SDValue Inner = DAG.getMemBasePlusOffset(GAValue, N1, DL, Flags);
AddToWorklist(Inner.getNode());
return DAG.getMemBasePlusOffset(Inner, N0.getOperand(1), DL, Flags);
@@ -2763,8 +2775,13 @@ SDValue DAGCombiner::visitPTRADD(SDNode *N) {
bool ZIsConstant = DAG.isConstantIntBuildVectorOrConstantInt(Z);
// If both additions in the original were NUW, reassociation preserves that.
- SDNodeFlags ReassocFlags =
- (N->getFlags() & N1->getFlags()) & SDNodeFlags::NoUnsignedWrap;
+ SDNodeFlags CommonFlags = N->getFlags() & N1->getFlags();
+ SDNodeFlags ReassocFlags = CommonFlags & SDNodeFlags::NoUnsignedWrap;
+ if (CommonFlags.hasNoUnsignedWrap()) {
+ // If both operations are NUW and the PTRADD is inbounds, the offests are
+ // both non-negative, so the reassociated PTRADDs are also inbounds.
+ ReassocFlags |= N->getFlags() & SDNodeFlags::InBounds;
+ }
if (ZIsConstant != YIsConstant) {
if (YIsConstant)
@@ -22743,7 +22760,10 @@ SDValue DAGCombiner::replaceStoreOfInsertLoad(StoreSDNode *ST) {
NewPtr = DAG.getMemBasePlusOffset(Ptr, TypeSize::getFixed(COffset), DL);
PointerInfo = ST->getPointerInfo().getWithOffset(COffset);
} else {
- NewPtr = TLI.getVectorElementPointer(DAG, Ptr, Value.getValueType(), Idx);
+ // The original DAG loaded the entire vector from memory, so arithmetic
+ // within it must be inbounds.
+ NewPtr = TLI.getInboundsVectorElementPointer(DAG, Ptr, Value.getValueType(),
+ Idx);
}
return DAG.getStore(Chain, DL, Elt, NewPtr, PointerInfo, ST->getAlign(),
@@ -23506,6 +23526,93 @@ SDValue DAGCombiner::visitINSERT_VECTOR_ELT(SDNode *N) {
// inselt undef, InVal, EltNo --> build_vector < InVal, InVal, ... >
if (InVec.isUndef() && TLI.shouldSplatInsEltVarIndex(VT))
return DAG.getSplat(VT, DL, InVal);
+
+ // Extend this type to be byte-addressable
+ EVT OldVT = VT;
+ EVT EltVT = VT.getVectorElementType();
+ bool IsByteSized = EltVT.isByteSized();
+ if (!IsByteSized) {
+ EltVT =
+ EltVT.changeTypeToInteger().getRoundIntegerType(*DAG.getContext());
+ VT = VT.changeElementType(EltVT);
+ }
+
+ // Check if this operation will be handled the default way for its type.
+ auto IsTypeDefaultHandled = [this](EVT VT) {
+ return TLI.getTypeAction(*DAG.getContext(), VT) ==
+ TargetLowering::TypeSplitVector ||
+ TLI.isOperationExpand(ISD::INSERT_VECTOR_ELT, VT);
+ };
+
+ // Check if this operation is illegal and will be handled the default way,
+ // even after extending the type to be byte-addressable.
+ if (IsTypeDefaultHandled(OldVT) && IsTypeDefaultHandled(VT)) {
+ // For each dynamic insertelt, the default way will save the vector to
+ // the stack, store at an offset, and load the modified vector. This can
+ // dramatically increase code size if we have a chain of insertelts on a
+ // large vector: requiring O(V*C) stores/loads where V = length of
+ // vector and C is length of chain. If each insertelt is only fed into the
+ // next, the vector is write-only across this chain, and we can just
+ // save once before the chain and load after in O(V + C) operations.
+ SmallVector<SDNode *> Seq{N};
+ unsigned NumDynamic = 1;
+ while (true) {
+ SDValue InVec = Seq.back()->getOperand(0);
+ if (InVec.getOpcode() != ISD::INSERT_VECTOR_ELT)
+ break;
+ Seq.push_back(InVec.getNode());
+ NumDynamic += !isa<ConstantSDNode>(InVec.getOperand(2));
+ }
+
+ // It always and only makes sense to lower this sequence when we have more
+ // than one dynamic insertelt, since we will not have more than V constant
+ // insertelts, so we will be reducing the total number of stores+loads.
+ if (NumDynamic > 1) {
+ // In cases where the vector is illegal it will be broken down into
+ // parts and stored in parts - we should use the alignment for the
+ // smallest part.
+ Align SmallestAlign = DAG.getReducedAlign(VT, /*UseABI=*/false);
+ SDValue StackPtr =
+ DAG.CreateStackTemporary(VT.getStoreSize(), SmallestAlign);
+ auto &MF = DAG.getMachineFunction();
+ int FrameIndex = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
+ auto PtrInfo = MachinePointerInfo::getFixedStack(MF, FrameIndex);
+
+ // Save the vector to the stack
+ SDValue InVec = Seq.back()->getOperand(0);
+ if (!IsByteSized)
+ InVec = DAG.getNode(ISD::ANY_EXTEND, DL, VT, InVec);
+ SDValue Store = DAG.getStore(DAG.getEntryNode(), DL, InVec, StackPtr,
+ PtrInfo, SmallestAlign);
+
+ // Lower each dynamic insertelt to a store
+ for (SDNode *N : reverse(Seq)) {
+ SDValue Elmnt = N->getOperand(1);
+ SDValue Index = N->getOperand(2);
+
+ // Check if we have to extend the element type
+ if (!IsByteSized && Elmnt.getValueType().bitsLT(EltVT))
+ Elmnt = DAG.getNode(ISD::ANY_EXTEND, DL, EltVT, Elmnt);
+
+ // Store the new element. This may be larger than the vector element
+ // type, so use a truncating store.
+ SDValue EltPtr =
+ TLI.getVectorElementPointer(DAG, StackPtr, VT, Index);
+ EVT EltVT = Elmnt.getValueType();
+ Store = DAG.getTruncStore(
+ Store, DL, Elmnt, EltPtr, MachinePointerInfo::getUnknownStack(MF),
+ EltVT,
+ commonAlignment(SmallestAlign, EltVT.getFixedSizeInBits() / 8));
+ }
+
+ // Load the saved vector from the stack
+ SDValue Load =
+ DAG.getLoad(VT, DL, Store, StackPtr, PtrInfo, SmallestAlign);
+ SDValue LoadV = Load.getValue(0);
+ return IsByteSized ? LoadV : DAG.getAnyExtOrTrunc(LoadV, DL, OldVT);
+ }
+ }
+
return SDValue();
}
diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index da4e409..9bdf822 100644
--- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -10668,19 +10668,20 @@ static SDValue clampDynamicVectorIndex(SelectionDAG &DAG, SDValue Idx,
DAG.getConstant(MaxIndex, dl, IdxVT));
}
-SDValue TargetLowering::getVectorElementPointer(SelectionDAG &DAG,
- SDValue VecPtr, EVT VecVT,
- SDValue Index) const {
+SDValue
+TargetLowering::getVectorElementPointer(SelectionDAG &DAG, SDValue VecPtr,
+ EVT VecVT, SDValue Index,
+ const SDNodeFlags PtrArithFlags) const {
return getVectorSubVecPointer(
DAG, VecPtr, VecVT,
EVT::getVectorVT(*DAG.getContext(), VecVT.getVectorElementType(), 1),
- Index);
+ Index, PtrArithFlags);
}
-SDValue TargetLowering::getVectorSubVecPointer(SelectionDAG &DAG,
- SDValue VecPtr, EVT VecVT,
- EVT SubVecVT,
- SDValue Index) const {
+SDValue
+TargetLowering::getVectorSubVecPointer(SelectionDAG &DAG, SDValue VecPtr,
+ EVT VecVT, EVT SubVecVT, SDValue Index,
+ const SDNodeFlags PtrArithFlags) const {
SDLoc dl(Index);
// Make sure the index type is big enough to compute in.
Index = DAG.getZExtOrTrunc(Index, dl, VecPtr.getValueType());
@@ -10704,7 +10705,7 @@ SDValue TargetLowering::getVectorSubVecPointer(SelectionDAG &DAG,
Index = DAG.getNode(ISD::MUL, dl, IdxVT, Index,
DAG.getConstant(EltSize, dl, IdxVT));
- return DAG.getMemBasePlusOffset(VecPtr, Index, dl);
+ return DAG.getMemBasePlusOffset(VecPtr, Index, dl, PtrArithFlags);
}
//===----------------------------------------------------------------------===//
@@ -12382,8 +12383,10 @@ SDValue TargetLowering::scalarizeExtractedVectorLoad(EVT ResultVT,
!IsFast)
return SDValue();
- SDValue NewPtr =
- getVectorElementPointer(DAG, OriginalLoad->getBasePtr(), InVecVT, EltNo);
+ // The original DAG loaded the entire vector from memory, so arithmetic
+ // within it must be inbounds.
+ SDValue NewPtr = getInboundsVectorElementPointer(
+ DAG, OriginalLoad->getBasePtr(), InVecVT, EltNo);
// We are replacing a vector load with a scalar load. The new load must have
// identical memory op ordering to the original.