aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib')
-rw-r--r--llvm/lib/CGData/CodeGenDataWriter.cpp4
-rw-r--r--llvm/lib/CodeGen/RDFLiveness.cpp9
-rw-r--r--llvm/lib/MC/DXContainerRootSignature.cpp5
-rw-r--r--llvm/lib/Object/ArchiveWriter.cpp20
-rw-r--r--llvm/lib/ProfileData/Coverage/CoverageMappingReader.cpp12
-rw-r--r--llvm/lib/ProfileData/Coverage/CoverageMappingWriter.cpp2
-rw-r--r--llvm/lib/ProfileData/InstrProf.cpp2
-rw-r--r--llvm/lib/ProfileData/InstrProfReader.cpp10
-rw-r--r--llvm/lib/Target/ARM/ARMAsmPrinter.cpp19
-rw-r--r--llvm/lib/Target/Hexagon/RDFCopy.cpp2
-rw-r--r--llvm/lib/Target/Hexagon/RDFCopy.h8
-rw-r--r--llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp2
-rw-r--r--llvm/lib/Target/LoongArch/LoongArchLASXInstrInfo.td16
-rw-r--r--llvm/lib/Target/LoongArch/LoongArchLSXInstrInfo.td16
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp51
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVBuiltins.td24
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVCommandLine.cpp5
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp11
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVInstrInfo.td33
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp14
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp68
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVPrepareFunctions.cpp24
-rw-r--r--llvm/lib/Transforms/Vectorize/LoopVectorize.cpp7
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlan.cpp15
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp17
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlanUtils.cpp4
26 files changed, 340 insertions, 60 deletions
diff --git a/llvm/lib/CGData/CodeGenDataWriter.cpp b/llvm/lib/CGData/CodeGenDataWriter.cpp
index 14a8558..a2bbcee 100644
--- a/llvm/lib/CGData/CodeGenDataWriter.cpp
+++ b/llvm/lib/CGData/CodeGenDataWriter.cpp
@@ -40,7 +40,7 @@ void CGDataOStream::patch(ArrayRef<CGDataPatchItem> P) {
for (const auto &K : P) {
for (size_t I = 0; I < K.D.size(); ++I) {
uint64_t Bytes =
- endian::byte_swap<uint64_t, llvm::endianness::little>(K.D[I]);
+ endian::byte_swap<uint64_t>(K.D[I], llvm::endianness::little);
Data.replace(K.Pos + I * sizeof(uint64_t), sizeof(uint64_t),
reinterpret_cast<const char *>(&Bytes), sizeof(uint64_t));
}
@@ -52,7 +52,7 @@ void CGDataOStream::patch(ArrayRef<CGDataPatchItem> P) {
for (const auto &K : P) {
for (size_t I = 0; I < K.D.size(); ++I) {
uint64_t Bytes =
- endian::byte_swap<uint64_t, llvm::endianness::little>(K.D[I]);
+ endian::byte_swap<uint64_t>(K.D[I], llvm::endianness::little);
VOStream.pwrite(reinterpret_cast<const char *>(&Bytes),
sizeof(uint64_t), K.Pos + I * sizeof(uint64_t));
}
diff --git a/llvm/lib/CodeGen/RDFLiveness.cpp b/llvm/lib/CodeGen/RDFLiveness.cpp
index 318422b..2e1cf49 100644
--- a/llvm/lib/CodeGen/RDFLiveness.cpp
+++ b/llvm/lib/CodeGen/RDFLiveness.cpp
@@ -652,8 +652,9 @@ void Liveness::computePhiInfo() {
// defs, cache the result of subtracting these defs from a given register
// ref.
using RefHash = std::hash<RegisterRef>;
- using RefEqual = std::equal_to<RegisterRef>;
- using SubMap = std::unordered_map<RegisterRef, RegisterRef>;
+ using RefEqual = RegisterRefEqualTo;
+ using SubMap =
+ std::unordered_map<RegisterRef, RegisterRef, RefHash, RefEqual>;
std::unordered_map<RegisterAggr, SubMap> Subs;
auto ClearIn = [](RegisterRef RR, const RegisterAggr &Mid, SubMap &SM) {
if (Mid.empty())
@@ -868,7 +869,7 @@ void Liveness::computeLiveIns() {
std::vector<RegisterRef> LV;
for (const MachineBasicBlock::RegisterMaskPair &LI : B.liveins())
LV.push_back(RegisterRef(LI.PhysReg, LI.LaneMask));
- llvm::sort(LV, std::less<RegisterRef>(PRI));
+ llvm::sort(LV, RegisterRefLess(PRI));
dbgs() << printMBBReference(B) << "\t rec = {";
for (auto I : LV)
dbgs() << ' ' << Print(I, DFG);
@@ -878,7 +879,7 @@ void Liveness::computeLiveIns() {
LV.clear();
for (RegisterRef RR : LiveMap[&B].refs())
LV.push_back(RR);
- llvm::sort(LV, std::less<RegisterRef>(PRI));
+ llvm::sort(LV, RegisterRefLess(PRI));
dbgs() << "\tcomp = {";
for (auto I : LV)
dbgs() << ' ' << Print(I, DFG);
diff --git a/llvm/lib/MC/DXContainerRootSignature.cpp b/llvm/lib/MC/DXContainerRootSignature.cpp
index 2338370..713aa3d8 100644
--- a/llvm/lib/MC/DXContainerRootSignature.cpp
+++ b/llvm/lib/MC/DXContainerRootSignature.cpp
@@ -23,9 +23,8 @@ static uint32_t writePlaceholder(raw_svector_ostream &Stream) {
static uint32_t rewriteOffsetToCurrentByte(raw_svector_ostream &Stream,
uint32_t Offset) {
uint32_t ByteOffset = Stream.tell();
- uint32_t Value =
- support::endian::byte_swap<uint32_t, llvm::endianness::little>(
- ByteOffset);
+ uint32_t Value = support::endian::byte_swap<uint32_t>(
+ ByteOffset, llvm::endianness::little);
Stream.pwrite(reinterpret_cast<const char *>(&Value), sizeof(Value), Offset);
return ByteOffset;
}
diff --git a/llvm/lib/Object/ArchiveWriter.cpp b/llvm/lib/Object/ArchiveWriter.cpp
index 6fc0889..a112597 100644
--- a/llvm/lib/Object/ArchiveWriter.cpp
+++ b/llvm/lib/Object/ArchiveWriter.cpp
@@ -1119,10 +1119,26 @@ Error writeArchiveToStream(raw_ostream &Out,
// to switch to 64-bit. Note that the file can be larger than 4GB as long as
// the last member starts before the 4GB offset.
if (*HeadersSize + LastMemberHeaderOffset >= Sym64Threshold) {
- if (Kind == object::Archive::K_DARWIN)
+ switch (Kind) {
+ case object::Archive::K_COFF:
+ // COFF format has no 64-bit version, so we use GNU64 instead.
+ if (!SymMap.Map.empty() && !SymMap.ECMap.empty())
+ // Only the COFF format supports the ECSYMBOLS section, so don’t use
+ // GNU64 when two symbol maps are required.
+ return make_error<object::GenericBinaryError>(
+ "Archive is too large: ARM64X does not support archives larger "
+ "than 4GB");
+ // Since this changes the headers, we need to recalculate everything.
+ return writeArchiveToStream(Out, NewMembers, WriteSymtab,
+ object::Archive::K_GNU64, Deterministic,
+ Thin, IsEC, Warn);
+ case object::Archive::K_DARWIN:
Kind = object::Archive::K_DARWIN64;
- else
+ break;
+ default:
Kind = object::Archive::K_GNU64;
+ break;
+ }
HeadersSize.reset();
}
}
diff --git a/llvm/lib/ProfileData/Coverage/CoverageMappingReader.cpp b/llvm/lib/ProfileData/Coverage/CoverageMappingReader.cpp
index fc2577e..075ad8d 100644
--- a/llvm/lib/ProfileData/Coverage/CoverageMappingReader.cpp
+++ b/llvm/lib/ProfileData/Coverage/CoverageMappingReader.cpp
@@ -949,9 +949,9 @@ loadTestingFormat(StringRef Data, StringRef CompilationDir) {
if (Data.size() < sizeof(uint64_t))
return make_error<CoverageMapError>(coveragemap_error::malformed,
"the size of data is too small");
- auto TestingVersion =
- support::endian::byte_swap<uint64_t, llvm::endianness::little>(
- *reinterpret_cast<const uint64_t *>(Data.data()));
+ auto TestingVersion = support::endian::byte_swap<uint64_t>(
+ *reinterpret_cast<const uint64_t *>(Data.data()),
+ llvm::endianness::little);
Data = Data.substr(sizeof(uint64_t));
// Read the ProfileNames data.
@@ -1274,9 +1274,9 @@ BinaryCoverageReader::create(
std::vector<std::unique_ptr<BinaryCoverageReader>> Readers;
if (ObjectBuffer.getBuffer().size() > sizeof(TestingFormatMagic)) {
- uint64_t Magic =
- support::endian::byte_swap<uint64_t, llvm::endianness::little>(
- *reinterpret_cast<const uint64_t *>(ObjectBuffer.getBufferStart()));
+ uint64_t Magic = support::endian::byte_swap<uint64_t>(
+ *reinterpret_cast<const uint64_t *>(ObjectBuffer.getBufferStart()),
+ llvm::endianness::little);
if (Magic == TestingFormatMagic) {
// This is a special format used for testing.
auto ReaderOrErr =
diff --git a/llvm/lib/ProfileData/Coverage/CoverageMappingWriter.cpp b/llvm/lib/ProfileData/Coverage/CoverageMappingWriter.cpp
index 12b1687..3875f01 100644
--- a/llvm/lib/ProfileData/Coverage/CoverageMappingWriter.cpp
+++ b/llvm/lib/ProfileData/Coverage/CoverageMappingWriter.cpp
@@ -292,7 +292,7 @@ void CoverageMappingWriter::write(raw_ostream &OS) {
void TestingFormatWriter::write(raw_ostream &OS, TestingFormatVersion Version) {
auto ByteSwap = [](uint64_t N) {
- return support::endian::byte_swap<uint64_t, llvm::endianness::little>(N);
+ return support::endian::byte_swap<uint64_t>(N, llvm::endianness::little);
};
// Output a 64bit magic number.
diff --git a/llvm/lib/ProfileData/InstrProf.cpp b/llvm/lib/ProfileData/InstrProf.cpp
index e1c6315..3c8e44a 100644
--- a/llvm/lib/ProfileData/InstrProf.cpp
+++ b/llvm/lib/ProfileData/InstrProf.cpp
@@ -292,7 +292,7 @@ void ProfOStream::patch(ArrayRef<PatchItem> P) {
for (const auto &K : P) {
for (int I = 0, E = K.D.size(); I != E; I++) {
uint64_t Bytes =
- endian::byte_swap<uint64_t, llvm::endianness::little>(K.D[I]);
+ endian::byte_swap<uint64_t>(K.D[I], llvm::endianness::little);
Data.replace(K.Pos + I * sizeof(uint64_t), sizeof(uint64_t),
(const char *)&Bytes, sizeof(uint64_t));
}
diff --git a/llvm/lib/ProfileData/InstrProfReader.cpp b/llvm/lib/ProfileData/InstrProfReader.cpp
index 1da92ea..d2ae4b5 100644
--- a/llvm/lib/ProfileData/InstrProfReader.cpp
+++ b/llvm/lib/ProfileData/InstrProfReader.cpp
@@ -1186,10 +1186,10 @@ IndexedInstrProfReader::readSummary(IndexedInstrProf::ProfVersion Version,
if (Version >= IndexedInstrProf::Version4) {
const IndexedInstrProf::Summary *SummaryInLE =
reinterpret_cast<const IndexedInstrProf::Summary *>(Cur);
- uint64_t NFields = endian::byte_swap<uint64_t, llvm::endianness::little>(
- SummaryInLE->NumSummaryFields);
- uint64_t NEntries = endian::byte_swap<uint64_t, llvm::endianness::little>(
- SummaryInLE->NumCutoffEntries);
+ uint64_t NFields = endian::byte_swap<uint64_t>(
+ SummaryInLE->NumSummaryFields, llvm::endianness::little);
+ uint64_t NEntries = endian::byte_swap<uint64_t>(
+ SummaryInLE->NumCutoffEntries, llvm::endianness::little);
uint32_t SummarySize =
IndexedInstrProf::Summary::getSize(NFields, NEntries);
std::unique_ptr<IndexedInstrProf::Summary> SummaryData =
@@ -1198,7 +1198,7 @@ IndexedInstrProfReader::readSummary(IndexedInstrProf::ProfVersion Version,
const uint64_t *Src = reinterpret_cast<const uint64_t *>(SummaryInLE);
uint64_t *Dst = reinterpret_cast<uint64_t *>(SummaryData.get());
for (unsigned I = 0; I < SummarySize / sizeof(uint64_t); I++)
- Dst[I] = endian::byte_swap<uint64_t, llvm::endianness::little>(Src[I]);
+ Dst[I] = endian::byte_swap<uint64_t>(Src[I], llvm::endianness::little);
SummaryEntryVector DetailedSummary;
for (unsigned I = 0; I < SummaryData->NumCutoffEntries; I++) {
diff --git a/llvm/lib/Target/ARM/ARMAsmPrinter.cpp b/llvm/lib/Target/ARM/ARMAsmPrinter.cpp
index 2381effb..1f773e2 100644
--- a/llvm/lib/Target/ARM/ARMAsmPrinter.cpp
+++ b/llvm/lib/Target/ARM/ARMAsmPrinter.cpp
@@ -632,6 +632,19 @@ static bool checkDenormalAttributeConsistency(const Module &M, StringRef Attr,
});
}
+// Returns true if all functions have different denormal modes.
+static bool checkDenormalAttributeInconsistency(const Module &M) {
+ auto F = M.functions().begin();
+ auto E = M.functions().end();
+ if (F == E)
+ return false;
+ DenormalMode Value = F->getDenormalModeRaw();
+ ++F;
+ return std::any_of(F, E, [&](const Function &F) {
+ return !F.isDeclaration() && F.getDenormalModeRaw() != Value;
+ });
+}
+
void ARMAsmPrinter::emitAttributes() {
MCTargetStreamer &TS = *OutStreamer->getTargetStreamer();
ARMTargetStreamer &ATS = static_cast<ARMTargetStreamer &>(TS);
@@ -698,7 +711,9 @@ void ARMAsmPrinter::emitAttributes() {
DenormalMode::getPositiveZero()))
ATS.emitAttribute(ARMBuildAttrs::ABI_FP_denormal,
ARMBuildAttrs::PositiveZero);
- else if (!TM.Options.UnsafeFPMath)
+ else if (checkDenormalAttributeInconsistency(*MMI->getModule()) ||
+ checkDenormalAttributeConsistency(
+ *MMI->getModule(), "denormal-fp-math", DenormalMode::getIEEE()))
ATS.emitAttribute(ARMBuildAttrs::ABI_FP_denormal,
ARMBuildAttrs::IEEEDenormals);
else {
@@ -733,7 +748,7 @@ void ARMAsmPrinter::emitAttributes() {
TM.Options.NoTrappingFPMath)
ATS.emitAttribute(ARMBuildAttrs::ABI_FP_exceptions,
ARMBuildAttrs::Not_Allowed);
- else if (!TM.Options.UnsafeFPMath) {
+ else {
ATS.emitAttribute(ARMBuildAttrs::ABI_FP_exceptions, ARMBuildAttrs::Allowed);
// If the user has permitted this code to choose the IEEE 754
diff --git a/llvm/lib/Target/Hexagon/RDFCopy.cpp b/llvm/lib/Target/Hexagon/RDFCopy.cpp
index fafdad0..3b1d3bd 100644
--- a/llvm/lib/Target/Hexagon/RDFCopy.cpp
+++ b/llvm/lib/Target/Hexagon/RDFCopy.cpp
@@ -108,7 +108,7 @@ bool CopyPropagation::scanBlock(MachineBasicBlock *B) {
for (NodeAddr<InstrNode*> IA : BA.Addr->members(DFG)) {
if (DFG.IsCode<NodeAttrs::Stmt>(IA)) {
NodeAddr<StmtNode*> SA = IA;
- EqualityMap EM(std::less<RegisterRef>(DFG.getPRI()));
+ EqualityMap EM(RegisterRefLess(DFG.getPRI()));
if (interpretAsCopy(SA.Addr->getCode(), EM))
recordCopy(SA, EM);
}
diff --git a/llvm/lib/Target/Hexagon/RDFCopy.h b/llvm/lib/Target/Hexagon/RDFCopy.h
index e4fb898..92b2c65 100644
--- a/llvm/lib/Target/Hexagon/RDFCopy.h
+++ b/llvm/lib/Target/Hexagon/RDFCopy.h
@@ -25,8 +25,8 @@ class MachineInstr;
namespace rdf {
struct CopyPropagation {
- CopyPropagation(DataFlowGraph &dfg) : MDT(dfg.getDT()), DFG(dfg),
- RDefMap(std::less<RegisterRef>(DFG.getPRI())) {}
+ CopyPropagation(DataFlowGraph &dfg)
+ : MDT(dfg.getDT()), DFG(dfg), RDefMap(RegisterRefLess(DFG.getPRI())) {}
virtual ~CopyPropagation() = default;
@@ -35,7 +35,7 @@ namespace rdf {
bool trace() const { return Trace; }
DataFlowGraph &getDFG() { return DFG; }
- using EqualityMap = std::map<RegisterRef, RegisterRef>;
+ using EqualityMap = std::map<RegisterRef, RegisterRef, RegisterRefLess>;
virtual bool interpretAsCopy(const MachineInstr *MI, EqualityMap &EM);
private:
@@ -45,7 +45,7 @@ namespace rdf {
bool Trace = false;
// map: register -> (map: stmt -> reaching def)
- std::map<RegisterRef,std::map<NodeId,NodeId>> RDefMap;
+ std::map<RegisterRef, std::map<NodeId, NodeId>, RegisterRefLess> RDefMap;
// map: statement -> (map: dst reg -> src reg)
std::map<NodeId, EqualityMap> CopyMap;
std::vector<NodeId> Copies;
diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
index 94f53d5..ecd003c 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
@@ -340,6 +340,7 @@ LoongArchTargetLowering::LoongArchTargetLowering(const TargetMachine &TM,
{ISD::SETNE, ISD::SETGE, ISD::SETGT, ISD::SETUGE, ISD::SETUGT}, VT,
Expand);
setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
+ setOperationAction(ISD::ABS, VT, Legal);
setOperationAction(ISD::ABDS, VT, Legal);
setOperationAction(ISD::ABDU, VT, Legal);
setOperationAction(ISD::SADDSAT, VT, Legal);
@@ -419,6 +420,7 @@ LoongArchTargetLowering::LoongArchTargetLowering(const TargetMachine &TM,
{ISD::SETNE, ISD::SETGE, ISD::SETGT, ISD::SETUGE, ISD::SETUGT}, VT,
Expand);
setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
+ setOperationAction(ISD::ABS, VT, Legal);
setOperationAction(ISD::ABDS, VT, Legal);
setOperationAction(ISD::ABDU, VT, Legal);
setOperationAction(ISD::SADDSAT, VT, Legal);
diff --git a/llvm/lib/Target/LoongArch/LoongArchLASXInstrInfo.td b/llvm/lib/Target/LoongArch/LoongArchLASXInstrInfo.td
index adfe990..bbc0489 100644
--- a/llvm/lib/Target/LoongArch/LoongArchLASXInstrInfo.td
+++ b/llvm/lib/Target/LoongArch/LoongArchLASXInstrInfo.td
@@ -2015,10 +2015,26 @@ def : Pat<(v4i32(fp_to_uint v4f64:$vj)),
(XVFTINTRZ_LU_D v4f64:$vj)),
sub_128)>;
+// abs
+def : Pat<(abs v32i8:$xj), (XVMAX_B v32i8:$xj, (XVNEG_B v32i8:$xj))>;
+def : Pat<(abs v16i16:$xj), (XVMAX_H v16i16:$xj, (XVNEG_H v16i16:$xj))>;
+def : Pat<(abs v8i32:$xj), (XVMAX_W v8i32:$xj, (XVNEG_W v8i32:$xj))>;
+def : Pat<(abs v4i64:$xj), (XVMAX_D v4i64:$xj, (XVNEG_D v4i64:$xj))>;
+
// XVABSD_{B/H/W/D}[U]
defm : PatXrXr<abds, "XVABSD">;
defm : PatXrXrU<abdu, "XVABSD">;
+// XVADDA_{B/H/W/D}
+def : Pat<(add (v32i8 (abs v32i8:$xj)), (v32i8 (abs v32i8:$xk))),
+ (XVADDA_B v32i8:$xj, v32i8:$xk)>;
+def : Pat<(add (v16i16 (abs v16i16:$xj)), (v16i16 (abs v16i16:$xk))),
+ (XVADDA_H v16i16:$xj, v16i16:$xk)>;
+def : Pat<(add (v8i32 (abs v8i32:$xj)), (v8i32 (abs v8i32:$xk))),
+ (XVADDA_W v8i32:$xj, v8i32:$xk)>;
+def : Pat<(add (v4i64 (abs v4i64:$xj)), (v4i64 (abs v4i64:$xk))),
+ (XVADDA_D v4i64:$xj, v4i64:$xk)>;
+
// XVSADD_{B/H/W/D}[U], XVSSUB_{B/H/W/D}[U]
defm : PatXrXr<saddsat, "XVSADD">;
defm : PatXrXr<ssubsat, "XVSSUB">;
diff --git a/llvm/lib/Target/LoongArch/LoongArchLSXInstrInfo.td b/llvm/lib/Target/LoongArch/LoongArchLSXInstrInfo.td
index 2c36099..8d1dc99 100644
--- a/llvm/lib/Target/LoongArch/LoongArchLSXInstrInfo.td
+++ b/llvm/lib/Target/LoongArch/LoongArchLSXInstrInfo.td
@@ -2154,10 +2154,26 @@ def : Pat<(f32 f32imm_vldi:$in),
def : Pat<(f64 f64imm_vldi:$in),
(f64 (EXTRACT_SUBREG (VLDI (to_f64imm_vldi f64imm_vldi:$in)), sub_64))>;
+// abs
+def : Pat<(abs v16i8:$vj), (VMAX_B v16i8:$vj, (VNEG_B v16i8:$vj))>;
+def : Pat<(abs v8i16:$vj), (VMAX_H v8i16:$vj, (VNEG_H v8i16:$vj))>;
+def : Pat<(abs v4i32:$vj), (VMAX_W v4i32:$vj, (VNEG_W v4i32:$vj))>;
+def : Pat<(abs v2i64:$vj), (VMAX_D v2i64:$vj, (VNEG_D v2i64:$vj))>;
+
// VABSD_{B/H/W/D}[U]
defm : PatVrVr<abds, "VABSD">;
defm : PatVrVrU<abdu, "VABSD">;
+// VADDA_{B/H/W/D}
+def : Pat<(add (v16i8 (abs v16i8:$vj)), (v16i8 (abs v16i8:$vk))),
+ (VADDA_B v16i8:$vj, v16i8:$vk)>;
+def : Pat<(add (v8i16 (abs v8i16:$vj)), (v8i16 (abs v8i16:$vk))),
+ (VADDA_H v8i16:$vj, v8i16:$vk)>;
+def : Pat<(add (v4i32 (abs v4i32:$vj)), (v4i32 (abs v4i32:$vk))),
+ (VADDA_W v4i32:$vj, v4i32:$vk)>;
+def : Pat<(add (v2i64 (abs v2i64:$vj)), (v2i64 (abs v2i64:$vk))),
+ (VADDA_D v2i64:$vj, v2i64:$vk)>;
+
// VSADD_{B/H/W/D}[U], VSSUB_{B/H/W/D}[U]
defm : PatVrVr<saddsat, "VSADD">;
defm : PatVrVr<ssubsat, "VSSUB">;
diff --git a/llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp b/llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp
index 86f4459..f704d3a 100644
--- a/llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp
@@ -1096,6 +1096,41 @@ static bool build2DBlockIOINTELInst(const SPIRV::IncomingCall *Call,
return true;
}
+static bool buildPipeInst(const SPIRV::IncomingCall *Call, unsigned Opcode,
+ unsigned Scope, MachineIRBuilder &MIRBuilder,
+ SPIRVGlobalRegistry *GR) {
+ switch (Opcode) {
+ case SPIRV::OpCommitReadPipe:
+ case SPIRV::OpCommitWritePipe:
+ return buildOpFromWrapper(MIRBuilder, Opcode, Call, Register(0));
+ case SPIRV::OpGroupCommitReadPipe:
+ case SPIRV::OpGroupCommitWritePipe:
+ case SPIRV::OpGroupReserveReadPipePackets:
+ case SPIRV::OpGroupReserveWritePipePackets: {
+ Register ScopeConstReg =
+ MIRBuilder.buildConstant(LLT::scalar(32), Scope).getReg(0);
+ MachineRegisterInfo *MRI = MIRBuilder.getMRI();
+ MRI->setRegClass(ScopeConstReg, &SPIRV::iIDRegClass);
+ MachineInstrBuilder MIB;
+ MIB = MIRBuilder.buildInstr(Opcode);
+ // Add Return register and type.
+ if (Opcode == SPIRV::OpGroupReserveReadPipePackets ||
+ Opcode == SPIRV::OpGroupReserveWritePipePackets)
+ MIB.addDef(Call->ReturnRegister)
+ .addUse(GR->getSPIRVTypeID(Call->ReturnType));
+
+ MIB.addUse(ScopeConstReg);
+ for (unsigned int i = 0; i < Call->Arguments.size(); ++i)
+ MIB.addUse(Call->Arguments[i]);
+
+ return true;
+ }
+ default:
+ return buildOpFromWrapper(MIRBuilder, Opcode, Call,
+ GR->getSPIRVTypeID(Call->ReturnType));
+ }
+}
+
static unsigned getNumComponentsForDim(SPIRV::Dim::Dim dim) {
switch (dim) {
case SPIRV::Dim::DIM_1D:
@@ -2350,6 +2385,20 @@ static bool generate2DBlockIOINTELInst(const SPIRV::IncomingCall *Call,
return build2DBlockIOINTELInst(Call, Opcode, MIRBuilder, GR);
}
+static bool generatePipeInst(const SPIRV::IncomingCall *Call,
+ MachineIRBuilder &MIRBuilder,
+ SPIRVGlobalRegistry *GR) {
+ const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
+ unsigned Opcode =
+ SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
+
+ unsigned Scope = SPIRV::Scope::Workgroup;
+ if (Builtin->Name.contains("sub_group"))
+ Scope = SPIRV::Scope::Subgroup;
+
+ return buildPipeInst(Call, Opcode, Scope, MIRBuilder, GR);
+}
+
static bool buildNDRange(const SPIRV::IncomingCall *Call,
MachineIRBuilder &MIRBuilder,
SPIRVGlobalRegistry *GR) {
@@ -2948,6 +2997,8 @@ std::optional<bool> lowerBuiltin(const StringRef DemangledCall,
return generateTernaryBitwiseFunctionINTELInst(Call.get(), MIRBuilder, GR);
case SPIRV::Block2DLoadStore:
return generate2DBlockIOINTELInst(Call.get(), MIRBuilder, GR);
+ case SPIRV::Pipe:
+ return generatePipeInst(Call.get(), MIRBuilder, GR);
}
return false;
}
diff --git a/llvm/lib/Target/SPIRV/SPIRVBuiltins.td b/llvm/lib/Target/SPIRV/SPIRVBuiltins.td
index d08560b..2a8deb6 100644
--- a/llvm/lib/Target/SPIRV/SPIRVBuiltins.td
+++ b/llvm/lib/Target/SPIRV/SPIRVBuiltins.td
@@ -69,6 +69,7 @@ def ExtendedBitOps : BuiltinGroup;
def BindlessINTEL : BuiltinGroup;
def TernaryBitwiseINTEL : BuiltinGroup;
def Block2DLoadStore : BuiltinGroup;
+def Pipe : BuiltinGroup;
//===----------------------------------------------------------------------===//
// Class defining a demangled builtin record. The information in the record
@@ -633,6 +634,29 @@ defm : DemangledNativeBuiltin<"__spirv_AtomicSMax", OpenCL_std, Atomic, 4, 4, Op
defm : DemangledNativeBuiltin<"__spirv_AtomicUMin", OpenCL_std, Atomic, 4, 4, OpAtomicUMin>;
defm : DemangledNativeBuiltin<"__spirv_AtomicUMax", OpenCL_std, Atomic, 4, 4, OpAtomicUMax>;
+// Pipe Instruction
+defm : DemangledNativeBuiltin<"__read_pipe_2", OpenCL_std, Pipe,2, 2, OpReadPipe>;
+defm : DemangledNativeBuiltin<"__write_pipe_2", OpenCL_std, Pipe, 2, 2, OpWritePipe>;
+defm : DemangledNativeBuiltin<"__read_pipe_4", OpenCL_std, Pipe,4, 4, OpReservedReadPipe>;
+defm : DemangledNativeBuiltin<"__write_pipe_4", OpenCL_std, Pipe, 4, 4, OpReservedWritePipe>;
+defm : DemangledNativeBuiltin<"__reserve_read_pipe", OpenCL_std, Pipe, 2, 2, OpReserveReadPipePackets>;
+defm : DemangledNativeBuiltin<"__reserve_write_pipe", OpenCL_std, Pipe, 2, 2, OpReserveWritePipePackets>;
+defm : DemangledNativeBuiltin<"__commit_read_pipe", OpenCL_std, Pipe, 2, 2, OpCommitReadPipe>;
+defm : DemangledNativeBuiltin<"__commit_write_pipe", OpenCL_std, Pipe, 2, 2, OpCommitWritePipe>;
+defm : DemangledNativeBuiltin<"is_valid_reserve_id", OpenCL_std, Pipe, 1, 1, OpIsValidReserveId>;
+defm : DemangledNativeBuiltin<"__get_pipe_num_packets_ro", OpenCL_std, Pipe, 1, 1, OpGetNumPipePackets>;
+defm : DemangledNativeBuiltin<"__get_pipe_max_packets_ro", OpenCL_std, Pipe, 1, 1, OpGetMaxPipePackets>;
+defm : DemangledNativeBuiltin<"__get_pipe_num_packets_wo", OpenCL_std, Pipe, 1, 1, OpGetNumPipePackets>;
+defm : DemangledNativeBuiltin<"__get_pipe_max_packets_wo", OpenCL_std, Pipe, 1, 1, OpGetMaxPipePackets>;
+defm : DemangledNativeBuiltin<"__work_group_reserve_read_pipe", OpenCL_std, Pipe, 2, 2, OpGroupReserveReadPipePackets>;
+defm : DemangledNativeBuiltin<"__work_group_reserve_write_pipe", OpenCL_std, Pipe, 2, 2, OpGroupReserveWritePipePackets>;
+defm : DemangledNativeBuiltin<"__work_group_commit_read_pipe", OpenCL_std, Pipe, 2, 2, OpGroupCommitReadPipe>;
+defm : DemangledNativeBuiltin<"__work_group_commit_write_pipe", OpenCL_std, Pipe, 2, 2, OpGroupCommitWritePipe>;
+defm : DemangledNativeBuiltin<"__sub_group_reserve_read_pipe", OpenCL_std, Pipe, 2, 2, OpGroupReserveReadPipePackets>;
+defm : DemangledNativeBuiltin<"__sub_group_reserve_write_pipe", OpenCL_std, Pipe, 2, 2, OpGroupReserveWritePipePackets>;
+defm : DemangledNativeBuiltin<"__sub_group_commit_read_pipe", OpenCL_std, Pipe, 2, 2, OpGroupCommitReadPipe>;
+defm : DemangledNativeBuiltin<"__sub_group_commit_write_pipe", OpenCL_std, Pipe, 2, 2, OpGroupCommitWritePipe>;
+
// Barrier builtin records:
defm : DemangledNativeBuiltin<"barrier", OpenCL_std, Barrier, 1, 3, OpControlBarrier>;
defm : DemangledNativeBuiltin<"work_group_barrier", OpenCL_std, Barrier, 1, 3, OpControlBarrier>;
diff --git a/llvm/lib/Target/SPIRV/SPIRVCommandLine.cpp b/llvm/lib/Target/SPIRV/SPIRVCommandLine.cpp
index 993de9e..85ea9e1 100644
--- a/llvm/lib/Target/SPIRV/SPIRVCommandLine.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVCommandLine.cpp
@@ -148,7 +148,10 @@ static const std::map<std::string, SPIRV::Extension::Extension, std::less<>>
SPIRV::Extension::Extension::SPV_KHR_float_controls2},
{"SPV_INTEL_tensor_float32_conversion",
SPIRV::Extension::Extension::SPV_INTEL_tensor_float32_conversion},
- {"SPV_KHR_bfloat16", SPIRV::Extension::Extension::SPV_KHR_bfloat16}};
+ {"SPV_KHR_bfloat16", SPIRV::Extension::Extension::SPV_KHR_bfloat16},
+ {"SPV_EXT_relaxed_printf_string_address_space",
+ SPIRV::Extension::Extension::
+ SPV_EXT_relaxed_printf_string_address_space}};
bool SPIRVExtensionsParser::parse(cl::Option &O, StringRef ArgName,
StringRef ArgValue,
diff --git a/llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp b/llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp
index f5a49e2..704edd3 100644
--- a/llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp
@@ -1909,11 +1909,12 @@ Instruction *SPIRVEmitIntrinsics::visitInsertValueInst(InsertValueInst &I) {
B.SetInsertPoint(&I);
SmallVector<Type *, 1> Types = {I.getInsertedValueOperand()->getType()};
SmallVector<Value *> Args;
- for (auto &Op : I.operands())
- if (isa<UndefValue>(Op))
- Args.push_back(UndefValue::get(B.getInt32Ty()));
- else
- Args.push_back(Op);
+ Value *AggregateOp = I.getAggregateOperand();
+ if (isa<UndefValue>(AggregateOp))
+ Args.push_back(UndefValue::get(B.getInt32Ty()));
+ else
+ Args.push_back(AggregateOp);
+ Args.push_back(I.getInsertedValueOperand());
for (auto &Op : I.indices())
Args.push_back(B.getInt32(Op));
Instruction *NewI =
diff --git a/llvm/lib/Target/SPIRV/SPIRVInstrInfo.td b/llvm/lib/Target/SPIRV/SPIRVInstrInfo.td
index 496dcba..1723bfb 100644
--- a/llvm/lib/Target/SPIRV/SPIRVInstrInfo.td
+++ b/llvm/lib/Target/SPIRV/SPIRVInstrInfo.td
@@ -763,7 +763,38 @@ def OpGetDefaultQueue: Op<303, (outs ID:$res), (ins TYPE:$type),
def OpBuildNDRange: Op<304, (outs ID:$res), (ins TYPE:$type, ID:$GWS, ID:$LWS, ID:$GWO),
"$res = OpBuildNDRange $type $GWS $LWS $GWO">;
-// TODO: 3.42.23. Pipe Instructions
+// 3.42.23. Pipe Instructions
+
+def OpReadPipe: Op<274, (outs ID:$res), (ins TYPE:$type, ID:$Pipe, ID:$Pointer, ID:$PcktSize, ID:$PcktAlign),
+ "$res = OpReadPipe $type $Pipe $Pointer $PcktSize $PcktAlign">;
+def OpWritePipe: Op<275, (outs ID:$res), (ins TYPE:$type, ID:$Pipe, ID:$Pointer, ID:$PcktSize, ID:$PcktAlign),
+ "$res = OpWritePipe $type $Pipe $Pointer $PcktSize $PcktAlign">;
+def OpReservedReadPipe : Op<276, (outs ID:$res), (ins TYPE:$type, ID:$Pipe, ID:$ReserveId, ID:$Index, ID:$Pointer, ID:$PcktSize, ID:$PcktAlign),
+ "$res = OpReservedReadPipe $type $Pipe $ReserveId $Index $Pointer $PcktSize $PcktAlign">;
+def OpReservedWritePipe : Op<277, (outs ID:$res), (ins TYPE:$type, ID:$Pipe, ID:$ReserveId, ID:$Index, ID:$Pointer, ID:$PcktSize, ID:$PcktAlign),
+ "$res = OpReservedWritePipe $type $Pipe $ReserveId $Index $Pointer $PcktSize $PcktAlign">;
+def OpReserveReadPipePackets : Op<278, (outs ID:$res), (ins TYPE:$type, ID:$Pipe, ID:$NumPckts, ID:$PcktSize, ID:$PcktAlign),
+ "$res = OpReserveReadPipePackets $type $Pipe $NumPckts $PcktSize $PcktAlign">;
+def OpReserveWritePipePackets : Op<279, (outs ID:$res), (ins TYPE:$type, ID:$Pipe, ID:$NumPckts, ID:$PcktSize, ID:$PcktAlign),
+ "$res = OpReserveWritePipePackets $type $Pipe $NumPckts $PcktSize $PcktAlign">;
+def OpCommitReadPipe : Op<280, (outs), (ins ID:$Pipe, ID:$ReserveId, ID:$PcktSize, ID:$PcktAlign),
+ "OpCommitReadPipe $Pipe $ReserveId $PcktSize $PcktAlign">;
+def OpCommitWritePipe : Op<281, (outs), (ins ID:$Pipe, ID:$ReserveId, ID:$PcktSize, ID:$PcktAlign),
+ "OpCommitWritePipe $Pipe $ReserveId $PcktSize $PcktAlign">;
+def OpIsValidReserveId : Op<282, (outs ID:$res), (ins TYPE:$type, ID:$ReserveId),
+ "$res = OpIsValidReserveId $type $ReserveId">;
+def OpGetNumPipePackets : Op<283, (outs ID:$res), (ins TYPE:$type, ID:$Pipe, ID:$PacketSize, ID:$PacketAlign),
+ "$res = OpGetNumPipePackets $type $Pipe $PacketSize $PacketAlign">;
+def OpGetMaxPipePackets : Op<284, (outs ID:$res), (ins TYPE:$type, ID:$Pipe, ID:$PacketSize, ID:$PacketAlign),
+ "$res = OpGetMaxPipePackets $type $Pipe $PacketSize $PacketAlign">;
+def OpGroupReserveReadPipePackets : Op<285, (outs ID:$res), (ins TYPE:$type, ID:$Scope, ID:$Pipe, ID:$NumPckts, ID:$PacketSize, ID:$PacketAlign),
+ "$res = OpGroupReserveReadPipePackets $type $Scope $Pipe $NumPckts $PacketSize $PacketAlign">;
+def OpGroupReserveWritePipePackets : Op<286, (outs ID:$res), (ins TYPE:$type, ID:$Scope, ID:$Pipe, ID:$NumPckts, ID:$PacketSize, ID:$PacketAlign),
+ "$res = OpGroupReserveWritePipePackets $type $Scope $Pipe $NumPckts $PacketSize $PacketAlign">;
+def OpGroupCommitReadPipe : Op<287, (outs), (ins ID:$Scope, ID:$Pipe, ID:$ReserveId, ID:$PacketSize, ID:$PacketAlign),
+ "OpGroupCommitReadPipe $Scope $Pipe $ReserveId $PacketSize $PacketAlign">;
+def OpGroupCommitWritePipe : Op<288, (outs), (ins ID:$Scope, ID:$Pipe, ID:$ReserveId, ID:$PacketSize, ID:$PacketAlign),
+ "OpGroupCommitWritePipe $Scope $Pipe $ReserveId $PacketSize $PacketAlign">;
// 3.42.24. Non-Uniform Instructions
diff --git a/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp b/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp
index a7b2179..5266e20 100644
--- a/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp
@@ -197,6 +197,8 @@ private:
bool selectOverflowArith(Register ResVReg, const SPIRVType *ResType,
MachineInstr &I, unsigned Opcode) const;
+ bool selectDebugTrap(Register ResVReg, const SPIRVType *ResType,
+ MachineInstr &I) const;
bool selectIntegerDot(Register ResVReg, const SPIRVType *ResType,
MachineInstr &I, bool Signed) const;
@@ -999,16 +1001,26 @@ bool SPIRVInstructionSelector::spvSelect(Register ResVReg,
// represent code after lowering or intrinsics which are not implemented but
// should not crash when found in a customer's LLVM IR input.
case TargetOpcode::G_TRAP:
- case TargetOpcode::G_DEBUGTRAP:
case TargetOpcode::G_UBSANTRAP:
case TargetOpcode::DBG_LABEL:
return true;
+ case TargetOpcode::G_DEBUGTRAP:
+ return selectDebugTrap(ResVReg, ResType, I);
default:
return false;
}
}
+bool SPIRVInstructionSelector::selectDebugTrap(Register ResVReg,
+ const SPIRVType *ResType,
+ MachineInstr &I) const {
+ unsigned Opcode = SPIRV::OpNop;
+ MachineBasicBlock &BB = *I.getParent();
+ return BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
+ .constrainAllUses(TII, TRI, RBI);
+}
+
bool SPIRVInstructionSelector::selectExtInst(Register ResVReg,
const SPIRVType *ResType,
MachineInstr &I,
diff --git a/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp b/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp
index a95f393..bc159d5 100644
--- a/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp
@@ -1222,6 +1222,31 @@ static void AddDotProductRequirements(const MachineInstr &MI,
}
}
+void addPrintfRequirements(const MachineInstr &MI,
+ SPIRV::RequirementHandler &Reqs,
+ const SPIRVSubtarget &ST) {
+ SPIRVGlobalRegistry *GR = ST.getSPIRVGlobalRegistry();
+ const SPIRVType *PtrType = GR->getSPIRVTypeForVReg(MI.getOperand(4).getReg());
+ if (PtrType) {
+ MachineOperand ASOp = PtrType->getOperand(1);
+ if (ASOp.isImm()) {
+ unsigned AddrSpace = ASOp.getImm();
+ if (AddrSpace != SPIRV::StorageClass::UniformConstant) {
+ if (!ST.canUseExtension(
+ SPIRV::Extension::
+ SPV_EXT_relaxed_printf_string_address_space)) {
+ report_fatal_error("SPV_EXT_relaxed_printf_string_address_space is "
+ "required because printf uses a format string not "
+ "in constant address space.",
+ false);
+ }
+ Reqs.addExtension(
+ SPIRV::Extension::SPV_EXT_relaxed_printf_string_address_space);
+ }
+ }
+ }
+}
+
static bool isBFloat16Type(const SPIRVType *TypeDef) {
return TypeDef && TypeDef->getNumOperands() == 3 &&
TypeDef->getOpcode() == SPIRV::OpTypeFloat &&
@@ -1230,8 +1255,9 @@ static bool isBFloat16Type(const SPIRVType *TypeDef) {
}
void addInstrRequirements(const MachineInstr &MI,
- SPIRV::RequirementHandler &Reqs,
+ SPIRV::ModuleAnalysisInfo &MAI,
const SPIRVSubtarget &ST) {
+ SPIRV::RequirementHandler &Reqs = MAI.Reqs;
switch (MI.getOpcode()) {
case SPIRV::OpMemoryModel: {
int64_t Addr = MI.getOperand(0).getImm();
@@ -1321,6 +1347,12 @@ void addInstrRequirements(const MachineInstr &MI,
static_cast<int64_t>(
SPIRV::InstructionSet::NonSemantic_Shader_DebugInfo_100)) {
Reqs.addExtension(SPIRV::Extension::SPV_KHR_non_semantic_info);
+ break;
+ }
+ if (MI.getOperand(3).getImm() ==
+ static_cast<int64_t>(SPIRV::OpenCLExtInst::printf)) {
+ addPrintfRequirements(MI, Reqs, ST);
+ break;
}
break;
}
@@ -1781,15 +1813,45 @@ void addInstrRequirements(const MachineInstr &MI,
break;
case SPIRV::OpConvertHandleToImageINTEL:
case SPIRV::OpConvertHandleToSamplerINTEL:
- case SPIRV::OpConvertHandleToSampledImageINTEL:
+ case SPIRV::OpConvertHandleToSampledImageINTEL: {
if (!ST.canUseExtension(SPIRV::Extension::SPV_INTEL_bindless_images))
report_fatal_error("OpConvertHandleTo[Image/Sampler/SampledImage]INTEL "
"instructions require the following SPIR-V extension: "
"SPV_INTEL_bindless_images",
false);
+ SPIRVGlobalRegistry *GR = ST.getSPIRVGlobalRegistry();
+ SPIRV::AddressingModel::AddressingModel AddrModel = MAI.Addr;
+ SPIRVType *TyDef = GR->getSPIRVTypeForVReg(MI.getOperand(1).getReg());
+ if (MI.getOpcode() == SPIRV::OpConvertHandleToImageINTEL &&
+ TyDef->getOpcode() != SPIRV::OpTypeImage) {
+ report_fatal_error("Incorrect return type for the instruction "
+ "OpConvertHandleToImageINTEL",
+ false);
+ } else if (MI.getOpcode() == SPIRV::OpConvertHandleToSamplerINTEL &&
+ TyDef->getOpcode() != SPIRV::OpTypeSampler) {
+ report_fatal_error("Incorrect return type for the instruction "
+ "OpConvertHandleToSamplerINTEL",
+ false);
+ } else if (MI.getOpcode() == SPIRV::OpConvertHandleToSampledImageINTEL &&
+ TyDef->getOpcode() != SPIRV::OpTypeSampledImage) {
+ report_fatal_error("Incorrect return type for the instruction "
+ "OpConvertHandleToSampledImageINTEL",
+ false);
+ }
+ SPIRVType *SpvTy = GR->getSPIRVTypeForVReg(MI.getOperand(2).getReg());
+ unsigned Bitwidth = GR->getScalarOrVectorBitWidth(SpvTy);
+ if (!(Bitwidth == 32 && AddrModel == SPIRV::AddressingModel::Physical32) &&
+ !(Bitwidth == 64 && AddrModel == SPIRV::AddressingModel::Physical64)) {
+ report_fatal_error(
+ "Parameter value must be a 32-bit scalar in case of "
+ "Physical32 addressing model or a 64-bit scalar in case of "
+ "Physical64 addressing model",
+ false);
+ }
Reqs.addExtension(SPIRV::Extension::SPV_INTEL_bindless_images);
Reqs.addCapability(SPIRV::Capability::BindlessImagesINTEL);
break;
+ }
case SPIRV::OpSubgroup2DBlockLoadINTEL:
case SPIRV::OpSubgroup2DBlockLoadTransposeINTEL:
case SPIRV::OpSubgroup2DBlockLoadTransformINTEL:
@@ -1927,7 +1989,7 @@ static void collectReqs(const Module &M, SPIRV::ModuleAnalysisInfo &MAI,
continue;
for (const MachineBasicBlock &MBB : *MF)
for (const MachineInstr &MI : MBB)
- addInstrRequirements(MI, MAI.Reqs, ST);
+ addInstrRequirements(MI, MAI, ST);
}
// Collect requirements for OpExecutionMode instructions.
auto Node = M.getNamedMetadata("spirv.ExecutionMode");
diff --git a/llvm/lib/Target/SPIRV/SPIRVPrepareFunctions.cpp b/llvm/lib/Target/SPIRV/SPIRVPrepareFunctions.cpp
index 2b34f61..4e4e6fb 100644
--- a/llvm/lib/Target/SPIRV/SPIRVPrepareFunctions.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVPrepareFunctions.cpp
@@ -335,6 +335,21 @@ static void lowerFunnelShifts(IntrinsicInst *FSHIntrinsic) {
FSHIntrinsic->setCalledFunction(FSHFunc);
}
+static void lowerConstrainedFPCmpIntrinsic(
+ ConstrainedFPCmpIntrinsic *ConstrainedCmpIntrinsic,
+ SmallVector<Instruction *> &EraseFromParent) {
+ if (!ConstrainedCmpIntrinsic)
+ return;
+ // Extract the floating-point values being compared
+ Value *LHS = ConstrainedCmpIntrinsic->getArgOperand(0);
+ Value *RHS = ConstrainedCmpIntrinsic->getArgOperand(1);
+ FCmpInst::Predicate Pred = ConstrainedCmpIntrinsic->getPredicate();
+ IRBuilder<> Builder(ConstrainedCmpIntrinsic);
+ Value *FCmp = Builder.CreateFCmp(Pred, LHS, RHS);
+ ConstrainedCmpIntrinsic->replaceAllUsesWith(FCmp);
+ EraseFromParent.push_back(dyn_cast<Instruction>(ConstrainedCmpIntrinsic));
+}
+
static void lowerExpectAssume(IntrinsicInst *II) {
// If we cannot use the SPV_KHR_expect_assume extension, then we need to
// ignore the intrinsic and move on. It should be removed later on by LLVM.
@@ -376,6 +391,7 @@ static bool toSpvLifetimeIntrinsic(IntrinsicInst *II, Intrinsic::ID NewID) {
bool SPIRVPrepareFunctions::substituteIntrinsicCalls(Function *F) {
bool Changed = false;
const SPIRVSubtarget &STI = TM.getSubtarget<SPIRVSubtarget>(*F);
+ SmallVector<Instruction *> EraseFromParent;
for (BasicBlock &BB : *F) {
for (Instruction &I : make_early_inc_range(BB)) {
auto Call = dyn_cast<CallInst>(&I);
@@ -423,9 +439,17 @@ bool SPIRVPrepareFunctions::substituteIntrinsicCalls(Function *F) {
lowerPtrAnnotation(II);
Changed = true;
break;
+ case Intrinsic::experimental_constrained_fcmp:
+ case Intrinsic::experimental_constrained_fcmps:
+ lowerConstrainedFPCmpIntrinsic(dyn_cast<ConstrainedFPCmpIntrinsic>(II),
+ EraseFromParent);
+ Changed = true;
+ break;
}
}
}
+ for (auto *I : EraseFromParent)
+ I->eraseFromParent();
return Changed;
}
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index 93a5f22..96f52076 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -2438,8 +2438,9 @@ struct CSEDenseMapInfo {
} // end anonymous namespace
-///Perform cse of induction variable instructions.
-static void cse(BasicBlock *BB) {
+/// FIXME: This legacy common-subexpression-elimination routine is scheduled for
+/// removal, in favor of the VPlan-based one.
+static void legacyCSE(BasicBlock *BB) {
// Perform simple cse.
SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap;
for (Instruction &In : llvm::make_early_inc_range(*BB)) {
@@ -2543,7 +2544,7 @@ void InnerLoopVectorizer::fixVectorizedLoop(VPTransformState &State) {
BasicBlock *HeaderBB = State.CFG.VPBB2IRBB[HeaderVPBB];
// Remove redundant induction instructions.
- cse(HeaderBB);
+ legacyCSE(HeaderBB);
}
void InnerLoopVectorizer::fixNonInductionPHIs(VPTransformState &State) {
diff --git a/llvm/lib/Transforms/Vectorize/VPlan.cpp b/llvm/lib/Transforms/Vectorize/VPlan.cpp
index a1c6f79..81f1956 100644
--- a/llvm/lib/Transforms/Vectorize/VPlan.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlan.cpp
@@ -845,19 +845,10 @@ InstructionCost VPRegionBlock::cost(ElementCount VF, VPCostContext &Ctx) {
if (VF.isScalable())
return InstructionCost::getInvalid();
- // First compute the cost of the conditionally executed recipes, followed by
- // account for the branching cost, except if the mask is a header mask or
- // uniform condition.
- using namespace llvm::VPlanPatternMatch;
+ // Compute and return the cost of the conditionally executed recipes.
+ assert(VF.isVector() && "Can only compute vector cost at the moment.");
VPBasicBlock *Then = cast<VPBasicBlock>(getEntry()->getSuccessors()[0]);
- InstructionCost ThenCost = Then->cost(VF, Ctx);
-
- // For the scalar case, we may not always execute the original predicated
- // block, Thus, scale the block's cost by the probability of executing it.
- if (VF.isScalar())
- return ThenCost / getPredBlockCostDivisor(Ctx.CostKind);
-
- return ThenCost;
+ return Then->cost(VF, Ctx);
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
index 58fab8f..5252e1f 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
@@ -2853,6 +2853,7 @@ void VPlanTransforms::replaceSymbolicStrides(
return R->getParent()->getParent() ||
R->getParent() == Plan.getVectorLoopRegion()->getSinglePredecessor();
};
+ ValueToSCEVMapTy RewriteMap;
for (const SCEV *Stride : StridesMap.values()) {
using namespace SCEVPatternMatch;
auto *StrideV = cast<SCEVUnknown>(Stride)->getValue();
@@ -2880,6 +2881,22 @@ void VPlanTransforms::replaceSymbolicStrides(
VPValue *CI = Plan.getOrAddLiveIn(ConstantInt::get(U->getType(), C));
StrideVPV->replaceUsesWithIf(CI, CanUseVersionedStride);
}
+ RewriteMap[StrideV] = PSE.getSCEV(StrideV);
+ }
+
+ for (VPRecipeBase &R : *Plan.getEntry()) {
+ auto *ExpSCEV = dyn_cast<VPExpandSCEVRecipe>(&R);
+ if (!ExpSCEV)
+ continue;
+ const SCEV *ScevExpr = ExpSCEV->getSCEV();
+ auto *NewSCEV =
+ SCEVParameterRewriter::rewrite(ScevExpr, *PSE.getSE(), RewriteMap);
+ if (NewSCEV != ScevExpr) {
+ VPValue *NewExp = vputils::getOrCreateVPValueForSCEVExpr(Plan, NewSCEV);
+ ExpSCEV->replaceAllUsesWith(NewExp);
+ if (Plan.getTripCount() == ExpSCEV)
+ Plan.resetTripCount(NewExp);
+ }
}
}
diff --git a/llvm/lib/Transforms/Vectorize/VPlanUtils.cpp b/llvm/lib/Transforms/Vectorize/VPlanUtils.cpp
index eac0e70..0599930 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanUtils.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanUtils.cpp
@@ -13,6 +13,7 @@
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
using namespace llvm;
+using namespace llvm::VPlanPatternMatch;
bool vputils::onlyFirstLaneUsed(const VPValue *Def) {
return all_of(Def->users(),
@@ -63,7 +64,6 @@ bool vputils::isHeaderMask(const VPValue *V, VPlan &Plan) {
};
VPValue *A, *B;
- using namespace VPlanPatternMatch;
if (match(V, m_ActiveLaneMask(m_VPValue(A), m_VPValue(B), m_One())))
return B == Plan.getTripCount() &&
@@ -90,7 +90,6 @@ const SCEV *vputils::getSCEVExprForVPValue(VPValue *V, ScalarEvolution &SE) {
}
bool vputils::isUniformAcrossVFsAndUFs(VPValue *V) {
- using namespace VPlanPatternMatch;
// Live-ins are uniform.
if (V->isLiveIn())
return true;
@@ -159,7 +158,6 @@ std::optional<VPValue *>
vputils::getRecipesForUncountableExit(VPlan &Plan,
SmallVectorImpl<VPRecipeBase *> &Recipes,
SmallVectorImpl<VPRecipeBase *> &GEPs) {
- using namespace llvm::VPlanPatternMatch;
// Given a VPlan like the following (just including the recipes contributing
// to loop control exiting here, not the actual work), we're looking to match
// the recipes contributing to the uncountable exit condition comparison