aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib/Target/SPIRV
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Target/SPIRV')
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVAsmPrinter.cpp202
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp26
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVBuiltins.h2
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVCallLowering.cpp6
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp214
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.cpp2
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVInstrInfo.cpp21
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVInstrInfo.h3
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp57
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVLegalizerInfo.cpp8
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp321
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.h7
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVPreLegalizer.cpp3
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVSymbolicOperands.td4
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVUtils.cpp16
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVUtils.h51
16 files changed, 886 insertions, 57 deletions
diff --git a/llvm/lib/Target/SPIRV/SPIRVAsmPrinter.cpp b/llvm/lib/Target/SPIRV/SPIRVAsmPrinter.cpp
index c2a6e51..b765fec 100644
--- a/llvm/lib/Target/SPIRV/SPIRVAsmPrinter.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVAsmPrinter.cpp
@@ -81,6 +81,7 @@ public:
void outputExecutionMode(const Module &M);
void outputAnnotations(const Module &M);
void outputModuleSections();
+ void outputFPFastMathDefaultInfo();
bool isHidden() {
return MF->getFunction()
.getFnAttribute(SPIRV_BACKEND_SERVICE_FUN_NAME)
@@ -498,11 +499,27 @@ void SPIRVAsmPrinter::outputExecutionMode(const Module &M) {
NamedMDNode *Node = M.getNamedMetadata("spirv.ExecutionMode");
if (Node) {
for (unsigned i = 0; i < Node->getNumOperands(); i++) {
+ // If SPV_KHR_float_controls2 is enabled and we find any of
+ // FPFastMathDefault, ContractionOff or SignedZeroInfNanPreserve execution
+ // modes, skip it, it'll be done somewhere else.
+ if (ST->canUseExtension(SPIRV::Extension::SPV_KHR_float_controls2)) {
+ const auto EM =
+ cast<ConstantInt>(
+ cast<ConstantAsMetadata>((Node->getOperand(i))->getOperand(1))
+ ->getValue())
+ ->getZExtValue();
+ if (EM == SPIRV::ExecutionMode::FPFastMathDefault ||
+ EM == SPIRV::ExecutionMode::ContractionOff ||
+ EM == SPIRV::ExecutionMode::SignedZeroInfNanPreserve)
+ continue;
+ }
+
MCInst Inst;
Inst.setOpcode(SPIRV::OpExecutionMode);
addOpsFromMDNode(cast<MDNode>(Node->getOperand(i)), Inst, MAI);
outputMCInst(Inst);
}
+ outputFPFastMathDefaultInfo();
}
for (auto FI = M.begin(), E = M.end(); FI != E; ++FI) {
const Function &F = *FI;
@@ -552,12 +569,84 @@ void SPIRVAsmPrinter::outputExecutionMode(const Module &M) {
}
if (ST->isKernel() && !M.getNamedMetadata("spirv.ExecutionMode") &&
!M.getNamedMetadata("opencl.enable.FP_CONTRACT")) {
- MCInst Inst;
- Inst.setOpcode(SPIRV::OpExecutionMode);
- Inst.addOperand(MCOperand::createReg(FReg));
- unsigned EM = static_cast<unsigned>(SPIRV::ExecutionMode::ContractionOff);
- Inst.addOperand(MCOperand::createImm(EM));
- outputMCInst(Inst);
+ if (ST->canUseExtension(SPIRV::Extension::SPV_KHR_float_controls2)) {
+ // When SPV_KHR_float_controls2 is enabled, ContractionOff is
+ // deprecated. We need to use FPFastMathDefault with the appropriate
+ // flags instead. Since FPFastMathDefault takes a target type, we need
+ // to emit it for each floating-point type that exists in the module
+ // to match the effect of ContractionOff. As of now, there are 3 FP
+ // types: fp16, fp32 and fp64.
+
+ // We only end up here because there is no "spirv.ExecutionMode"
+ // metadata, so that means no FPFastMathDefault. Therefore, we only
+ // need to make sure AllowContract is set to 0, as the rest of flags.
+ // We still need to emit the OpExecutionMode instruction, otherwise
+ // it's up to the client API to define the flags. Therefore, we need
+ // to find the constant with 0 value.
+
+ // Collect the SPIRVTypes for fp16, fp32, and fp64 and the constant of
+ // type int32 with 0 value to represent the FP Fast Math Mode.
+ std::vector<const MachineInstr *> SPIRVFloatTypes;
+ const MachineInstr *ConstZero = nullptr;
+ for (const MachineInstr *MI :
+ MAI->getMSInstrs(SPIRV::MB_TypeConstVars)) {
+ // Skip if the instruction is not OpTypeFloat or OpConstant.
+ unsigned OpCode = MI->getOpcode();
+ if (OpCode != SPIRV::OpTypeFloat && OpCode != SPIRV::OpConstantNull)
+ continue;
+
+ // Collect the SPIRV type if it's a float.
+ if (OpCode == SPIRV::OpTypeFloat) {
+ // Skip if the target type is not fp16, fp32, fp64.
+ const unsigned OpTypeFloatSize = MI->getOperand(1).getImm();
+ if (OpTypeFloatSize != 16 && OpTypeFloatSize != 32 &&
+ OpTypeFloatSize != 64) {
+ continue;
+ }
+ SPIRVFloatTypes.push_back(MI);
+ } else {
+ // Check if the constant is int32, if not skip it.
+ const MachineRegisterInfo &MRI = MI->getMF()->getRegInfo();
+ MachineInstr *TypeMI = MRI.getVRegDef(MI->getOperand(1).getReg());
+ if (!TypeMI || TypeMI->getOperand(1).getImm() != 32)
+ continue;
+
+ ConstZero = MI;
+ }
+ }
+
+ // When SPV_KHR_float_controls2 is enabled, ContractionOff is
+ // deprecated. We need to use FPFastMathDefault with the appropriate
+ // flags instead. Since FPFastMathDefault takes a target type, we need
+ // to emit it for each floating-point type that exists in the module
+ // to match the effect of ContractionOff. As of now, there are 3 FP
+ // types: fp16, fp32 and fp64.
+ for (const MachineInstr *MI : SPIRVFloatTypes) {
+ MCInst Inst;
+ Inst.setOpcode(SPIRV::OpExecutionModeId);
+ Inst.addOperand(MCOperand::createReg(FReg));
+ unsigned EM =
+ static_cast<unsigned>(SPIRV::ExecutionMode::FPFastMathDefault);
+ Inst.addOperand(MCOperand::createImm(EM));
+ const MachineFunction *MF = MI->getMF();
+ MCRegister TypeReg =
+ MAI->getRegisterAlias(MF, MI->getOperand(0).getReg());
+ Inst.addOperand(MCOperand::createReg(TypeReg));
+ assert(ConstZero && "There should be a constant zero.");
+ MCRegister ConstReg = MAI->getRegisterAlias(
+ ConstZero->getMF(), ConstZero->getOperand(0).getReg());
+ Inst.addOperand(MCOperand::createReg(ConstReg));
+ outputMCInst(Inst);
+ }
+ } else {
+ MCInst Inst;
+ Inst.setOpcode(SPIRV::OpExecutionMode);
+ Inst.addOperand(MCOperand::createReg(FReg));
+ unsigned EM =
+ static_cast<unsigned>(SPIRV::ExecutionMode::ContractionOff);
+ Inst.addOperand(MCOperand::createImm(EM));
+ outputMCInst(Inst);
+ }
}
}
}
@@ -606,6 +695,101 @@ void SPIRVAsmPrinter::outputAnnotations(const Module &M) {
}
}
+void SPIRVAsmPrinter::outputFPFastMathDefaultInfo() {
+ // Collect the SPIRVTypes that are OpTypeFloat and the constants of type
+ // int32, that might be used as FP Fast Math Mode.
+ std::vector<const MachineInstr *> SPIRVFloatTypes;
+ // Hashtable to associate immediate values with the constant holding them.
+ std::unordered_map<int, const MachineInstr *> ConstMap;
+ for (const MachineInstr *MI : MAI->getMSInstrs(SPIRV::MB_TypeConstVars)) {
+ // Skip if the instruction is not OpTypeFloat or OpConstant.
+ unsigned OpCode = MI->getOpcode();
+ if (OpCode != SPIRV::OpTypeFloat && OpCode != SPIRV::OpConstantI &&
+ OpCode != SPIRV::OpConstantNull)
+ continue;
+
+ // Collect the SPIRV type if it's a float.
+ if (OpCode == SPIRV::OpTypeFloat) {
+ SPIRVFloatTypes.push_back(MI);
+ } else {
+ // Check if the constant is int32, if not skip it.
+ const MachineRegisterInfo &MRI = MI->getMF()->getRegInfo();
+ MachineInstr *TypeMI = MRI.getVRegDef(MI->getOperand(1).getReg());
+ if (!TypeMI || TypeMI->getOpcode() != SPIRV::OpTypeInt ||
+ TypeMI->getOperand(1).getImm() != 32)
+ continue;
+
+ if (OpCode == SPIRV::OpConstantI)
+ ConstMap[MI->getOperand(2).getImm()] = MI;
+ else
+ ConstMap[0] = MI;
+ }
+ }
+
+ for (const auto &[Func, FPFastMathDefaultInfoVec] :
+ MAI->FPFastMathDefaultInfoMap) {
+ if (FPFastMathDefaultInfoVec.empty())
+ continue;
+
+ for (const MachineInstr *MI : SPIRVFloatTypes) {
+ unsigned OpTypeFloatSize = MI->getOperand(1).getImm();
+ unsigned Index = SPIRV::FPFastMathDefaultInfoVector::
+ computeFPFastMathDefaultInfoVecIndex(OpTypeFloatSize);
+ assert(Index < FPFastMathDefaultInfoVec.size() &&
+ "Index out of bounds for FPFastMathDefaultInfoVec");
+ const auto &FPFastMathDefaultInfo = FPFastMathDefaultInfoVec[Index];
+ assert(FPFastMathDefaultInfo.Ty &&
+ "Expected target type for FPFastMathDefaultInfo");
+ assert(FPFastMathDefaultInfo.Ty->getScalarSizeInBits() ==
+ OpTypeFloatSize &&
+ "Mismatched float type size");
+ MCInst Inst;
+ Inst.setOpcode(SPIRV::OpExecutionModeId);
+ MCRegister FuncReg = MAI->getFuncReg(Func);
+ assert(FuncReg.isValid());
+ Inst.addOperand(MCOperand::createReg(FuncReg));
+ Inst.addOperand(
+ MCOperand::createImm(SPIRV::ExecutionMode::FPFastMathDefault));
+ MCRegister TypeReg =
+ MAI->getRegisterAlias(MI->getMF(), MI->getOperand(0).getReg());
+ Inst.addOperand(MCOperand::createReg(TypeReg));
+ unsigned Flags = FPFastMathDefaultInfo.FastMathFlags;
+ if (FPFastMathDefaultInfo.ContractionOff &&
+ (Flags & SPIRV::FPFastMathMode::AllowContract))
+ report_fatal_error(
+ "Conflicting FPFastMathFlags: ContractionOff and AllowContract");
+
+ if (FPFastMathDefaultInfo.SignedZeroInfNanPreserve &&
+ !(Flags &
+ (SPIRV::FPFastMathMode::NotNaN | SPIRV::FPFastMathMode::NotInf |
+ SPIRV::FPFastMathMode::NSZ))) {
+ if (FPFastMathDefaultInfo.FPFastMathDefault)
+ report_fatal_error("Conflicting FPFastMathFlags: "
+ "SignedZeroInfNanPreserve but at least one of "
+ "NotNaN/NotInf/NSZ is enabled.");
+ }
+
+ // Don't emit if none of the execution modes was used.
+ if (Flags == SPIRV::FPFastMathMode::None &&
+ !FPFastMathDefaultInfo.ContractionOff &&
+ !FPFastMathDefaultInfo.SignedZeroInfNanPreserve &&
+ !FPFastMathDefaultInfo.FPFastMathDefault)
+ continue;
+
+ // Retrieve the constant instruction for the immediate value.
+ auto It = ConstMap.find(Flags);
+ if (It == ConstMap.end())
+ report_fatal_error("Expected constant instruction for FP Fast Math "
+ "Mode operand of FPFastMathDefault execution mode.");
+ const MachineInstr *ConstMI = It->second;
+ MCRegister ConstReg = MAI->getRegisterAlias(
+ ConstMI->getMF(), ConstMI->getOperand(0).getReg());
+ Inst.addOperand(MCOperand::createReg(ConstReg));
+ outputMCInst(Inst);
+ }
+ }
+}
+
void SPIRVAsmPrinter::outputModuleSections() {
const Module *M = MMI->getModule();
// Get the global subtarget to output module-level info.
@@ -614,7 +798,8 @@ void SPIRVAsmPrinter::outputModuleSections() {
MAI = &SPIRVModuleAnalysis::MAI;
assert(ST && TII && MAI && M && "Module analysis is required");
// Output instructions according to the Logical Layout of a Module:
- // 1,2. All OpCapability instructions, then optional OpExtension instructions.
+ // 1,2. All OpCapability instructions, then optional OpExtension
+ // instructions.
outputGlobalRequirements();
// 3. Optional OpExtInstImport instructions.
outputOpExtInstImports(*M);
@@ -622,7 +807,8 @@ void SPIRVAsmPrinter::outputModuleSections() {
outputOpMemoryModel();
// 5. All entry point declarations, using OpEntryPoint.
outputEntryPoints();
- // 6. Execution-mode declarations, using OpExecutionMode or OpExecutionModeId.
+ // 6. Execution-mode declarations, using OpExecutionMode or
+ // OpExecutionModeId.
outputExecutionMode(*M);
// 7a. Debug: all OpString, OpSourceExtension, OpSource, and
// OpSourceContinued, without forward references.
diff --git a/llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp b/llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp
index f704d3a..0e0c454 100644
--- a/llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp
@@ -1162,11 +1162,24 @@ static unsigned getNumSizeComponents(SPIRVType *imgType) {
static bool generateExtInst(const SPIRV::IncomingCall *Call,
MachineIRBuilder &MIRBuilder,
- SPIRVGlobalRegistry *GR) {
+ SPIRVGlobalRegistry *GR, const CallBase &CB) {
// Lookup the extended instruction number in the TableGen records.
const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
uint32_t Number =
SPIRV::lookupExtendedBuiltin(Builtin->Name, Builtin->Set)->Number;
+ // fmin_common and fmax_common are now deprecated, and we should use fmin and
+ // fmax with NotInf and NotNaN flags instead. Keep original number to add
+ // later the NoNans and NoInfs flags.
+ uint32_t OrigNumber = Number;
+ const SPIRVSubtarget &ST =
+ cast<SPIRVSubtarget>(MIRBuilder.getMF().getSubtarget());
+ if (ST.canUseExtension(SPIRV::Extension::SPV_KHR_float_controls2) &&
+ (Number == SPIRV::OpenCLExtInst::fmin_common ||
+ Number == SPIRV::OpenCLExtInst::fmax_common)) {
+ Number = (Number == SPIRV::OpenCLExtInst::fmin_common)
+ ? SPIRV::OpenCLExtInst::fmin
+ : SPIRV::OpenCLExtInst::fmax;
+ }
// Build extended instruction.
auto MIB =
@@ -1178,6 +1191,13 @@ static bool generateExtInst(const SPIRV::IncomingCall *Call,
for (auto Argument : Call->Arguments)
MIB.addUse(Argument);
+ MIB.getInstr()->copyIRFlags(CB);
+ if (OrigNumber == SPIRV::OpenCLExtInst::fmin_common ||
+ OrigNumber == SPIRV::OpenCLExtInst::fmax_common) {
+ // Add NoNans and NoInfs flags to fmin/fmax instruction.
+ MIB.getInstr()->setFlag(MachineInstr::MIFlag::FmNoNans);
+ MIB.getInstr()->setFlag(MachineInstr::MIFlag::FmNoInfs);
+ }
return true;
}
@@ -2908,7 +2928,7 @@ std::optional<bool> lowerBuiltin(const StringRef DemangledCall,
MachineIRBuilder &MIRBuilder,
const Register OrigRet, const Type *OrigRetTy,
const SmallVectorImpl<Register> &Args,
- SPIRVGlobalRegistry *GR) {
+ SPIRVGlobalRegistry *GR, const CallBase &CB) {
LLVM_DEBUG(dbgs() << "Lowering builtin call: " << DemangledCall << "\n");
// Lookup the builtin in the TableGen records.
@@ -2931,7 +2951,7 @@ std::optional<bool> lowerBuiltin(const StringRef DemangledCall,
// Match the builtin with implementation based on the grouping.
switch (Call->Builtin->Group) {
case SPIRV::Extended:
- return generateExtInst(Call.get(), MIRBuilder, GR);
+ return generateExtInst(Call.get(), MIRBuilder, GR, CB);
case SPIRV::Relational:
return generateRelationalInst(Call.get(), MIRBuilder, GR);
case SPIRV::Group:
diff --git a/llvm/lib/Target/SPIRV/SPIRVBuiltins.h b/llvm/lib/Target/SPIRV/SPIRVBuiltins.h
index 1a8641a..f6a5234 100644
--- a/llvm/lib/Target/SPIRV/SPIRVBuiltins.h
+++ b/llvm/lib/Target/SPIRV/SPIRVBuiltins.h
@@ -39,7 +39,7 @@ std::optional<bool> lowerBuiltin(const StringRef DemangledCall,
MachineIRBuilder &MIRBuilder,
const Register OrigRet, const Type *OrigRetTy,
const SmallVectorImpl<Register> &Args,
- SPIRVGlobalRegistry *GR);
+ SPIRVGlobalRegistry *GR, const CallBase &CB);
/// Helper function for finding a builtin function attributes
/// by a demangled function name. Defined in SPIRVBuiltins.cpp.
diff --git a/llvm/lib/Target/SPIRV/SPIRVCallLowering.cpp b/llvm/lib/Target/SPIRV/SPIRVCallLowering.cpp
index a412887..1a7c02c 100644
--- a/llvm/lib/Target/SPIRV/SPIRVCallLowering.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVCallLowering.cpp
@@ -641,9 +641,9 @@ bool SPIRVCallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
GR->getPointerSize()));
}
}
- if (auto Res =
- SPIRV::lowerBuiltin(DemangledName, ST->getPreferredInstructionSet(),
- MIRBuilder, ResVReg, OrigRetTy, ArgVRegs, GR))
+ if (auto Res = SPIRV::lowerBuiltin(
+ DemangledName, ST->getPreferredInstructionSet(), MIRBuilder,
+ ResVReg, OrigRetTy, ArgVRegs, GR, *Info.CB))
return *Res;
}
diff --git a/llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp b/llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp
index 704edd3..9f2e075 100644
--- a/llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp
@@ -25,6 +25,7 @@
#include "llvm/IR/TypedPointerType.h"
#include "llvm/Transforms/Utils/Local.h"
+#include <cassert>
#include <queue>
#include <unordered_set>
@@ -152,6 +153,7 @@ class SPIRVEmitIntrinsics
void insertPtrCastOrAssignTypeInstr(Instruction *I, IRBuilder<> &B);
bool shouldTryToAddMemAliasingDecoration(Instruction *Inst);
void insertSpirvDecorations(Instruction *I, IRBuilder<> &B);
+ void insertConstantsForFPFastMathDefault(Module &M);
void processGlobalValue(GlobalVariable &GV, IRBuilder<> &B);
void processParamTypes(Function *F, IRBuilder<> &B);
void processParamTypesByFunHeader(Function *F, IRBuilder<> &B);
@@ -2249,6 +2251,198 @@ void SPIRVEmitIntrinsics::insertSpirvDecorations(Instruction *I,
}
}
+static SPIRV::FPFastMathDefaultInfoVector &getOrCreateFPFastMathDefaultInfoVec(
+ const Module &M,
+ DenseMap<Function *, SPIRV::FPFastMathDefaultInfoVector>
+ &FPFastMathDefaultInfoMap,
+ Function *F) {
+ auto it = FPFastMathDefaultInfoMap.find(F);
+ if (it != FPFastMathDefaultInfoMap.end())
+ return it->second;
+
+ // If the map does not contain the entry, create a new one. Initialize it to
+ // contain all 3 elements sorted by bit width of target type: {half, float,
+ // double}.
+ SPIRV::FPFastMathDefaultInfoVector FPFastMathDefaultInfoVec;
+ FPFastMathDefaultInfoVec.emplace_back(Type::getHalfTy(M.getContext()),
+ SPIRV::FPFastMathMode::None);
+ FPFastMathDefaultInfoVec.emplace_back(Type::getFloatTy(M.getContext()),
+ SPIRV::FPFastMathMode::None);
+ FPFastMathDefaultInfoVec.emplace_back(Type::getDoubleTy(M.getContext()),
+ SPIRV::FPFastMathMode::None);
+ return FPFastMathDefaultInfoMap[F] = std::move(FPFastMathDefaultInfoVec);
+}
+
+static SPIRV::FPFastMathDefaultInfo &getFPFastMathDefaultInfo(
+ SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec,
+ const Type *Ty) {
+ size_t BitWidth = Ty->getScalarSizeInBits();
+ int Index =
+ SPIRV::FPFastMathDefaultInfoVector::computeFPFastMathDefaultInfoVecIndex(
+ BitWidth);
+ assert(Index >= 0 && Index < 3 &&
+ "Expected FPFastMathDefaultInfo for half, float, or double");
+ assert(FPFastMathDefaultInfoVec.size() == 3 &&
+ "Expected FPFastMathDefaultInfoVec to have exactly 3 elements");
+ return FPFastMathDefaultInfoVec[Index];
+}
+
+void SPIRVEmitIntrinsics::insertConstantsForFPFastMathDefault(Module &M) {
+ const SPIRVSubtarget *ST = TM->getSubtargetImpl();
+ if (!ST->canUseExtension(SPIRV::Extension::SPV_KHR_float_controls2))
+ return;
+
+ // Store the FPFastMathDefaultInfo in the FPFastMathDefaultInfoMap.
+ // We need the entry point (function) as the key, and the target
+ // type and flags as the value.
+ // We also need to check ContractionOff and SignedZeroInfNanPreserve
+ // execution modes, as they are now deprecated and must be replaced
+ // with FPFastMathDefaultInfo.
+ auto Node = M.getNamedMetadata("spirv.ExecutionMode");
+ if (!Node) {
+ if (!M.getNamedMetadata("opencl.enable.FP_CONTRACT")) {
+ // This requires emitting ContractionOff. However, because
+ // ContractionOff is now deprecated, we need to replace it with
+ // FPFastMathDefaultInfo with FP Fast Math Mode bitmask set to all 0.
+ // We need to create the constant for that.
+
+ // Create constant instruction with the bitmask flags.
+ Constant *InitValue =
+ ConstantInt::get(Type::getInt32Ty(M.getContext()), 0);
+ // TODO: Reuse constant if there is one already with the required
+ // value.
+ [[maybe_unused]] GlobalVariable *GV =
+ new GlobalVariable(M, // Module
+ Type::getInt32Ty(M.getContext()), // Type
+ true, // isConstant
+ GlobalValue::InternalLinkage, // Linkage
+ InitValue // Initializer
+ );
+ }
+ return;
+ }
+
+ // The table maps function pointers to their default FP fast math info. It
+ // can be assumed that the SmallVector is sorted by the bit width of the
+ // type. The first element is the smallest bit width, and the last element
+ // is the largest bit width, therefore, we will have {half, float, double}
+ // in the order of their bit widths.
+ DenseMap<Function *, SPIRV::FPFastMathDefaultInfoVector>
+ FPFastMathDefaultInfoMap;
+
+ for (unsigned i = 0; i < Node->getNumOperands(); i++) {
+ MDNode *MDN = cast<MDNode>(Node->getOperand(i));
+ assert(MDN->getNumOperands() >= 2 && "Expected at least 2 operands");
+ Function *F = cast<Function>(
+ cast<ConstantAsMetadata>(MDN->getOperand(0))->getValue());
+ const auto EM =
+ cast<ConstantInt>(
+ cast<ConstantAsMetadata>(MDN->getOperand(1))->getValue())
+ ->getZExtValue();
+ if (EM == SPIRV::ExecutionMode::FPFastMathDefault) {
+ assert(MDN->getNumOperands() == 4 &&
+ "Expected 4 operands for FPFastMathDefault");
+ const Type *T = cast<ValueAsMetadata>(MDN->getOperand(2))->getType();
+ unsigned Flags =
+ cast<ConstantInt>(
+ cast<ConstantAsMetadata>(MDN->getOperand(3))->getValue())
+ ->getZExtValue();
+ SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec =
+ getOrCreateFPFastMathDefaultInfoVec(M, FPFastMathDefaultInfoMap, F);
+ SPIRV::FPFastMathDefaultInfo &Info =
+ getFPFastMathDefaultInfo(FPFastMathDefaultInfoVec, T);
+ Info.FastMathFlags = Flags;
+ Info.FPFastMathDefault = true;
+ } else if (EM == SPIRV::ExecutionMode::ContractionOff) {
+ assert(MDN->getNumOperands() == 2 &&
+ "Expected no operands for ContractionOff");
+
+ // We need to save this info for every possible FP type, i.e. {half,
+ // float, double, fp128}.
+ SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec =
+ getOrCreateFPFastMathDefaultInfoVec(M, FPFastMathDefaultInfoMap, F);
+ for (SPIRV::FPFastMathDefaultInfo &Info : FPFastMathDefaultInfoVec) {
+ Info.ContractionOff = true;
+ }
+ } else if (EM == SPIRV::ExecutionMode::SignedZeroInfNanPreserve) {
+ assert(MDN->getNumOperands() == 3 &&
+ "Expected 1 operand for SignedZeroInfNanPreserve");
+ unsigned TargetWidth =
+ cast<ConstantInt>(
+ cast<ConstantAsMetadata>(MDN->getOperand(2))->getValue())
+ ->getZExtValue();
+ // We need to save this info only for the FP type with TargetWidth.
+ SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec =
+ getOrCreateFPFastMathDefaultInfoVec(M, FPFastMathDefaultInfoMap, F);
+ int Index = SPIRV::FPFastMathDefaultInfoVector::
+ computeFPFastMathDefaultInfoVecIndex(TargetWidth);
+ assert(Index >= 0 && Index < 3 &&
+ "Expected FPFastMathDefaultInfo for half, float, or double");
+ assert(FPFastMathDefaultInfoVec.size() == 3 &&
+ "Expected FPFastMathDefaultInfoVec to have exactly 3 elements");
+ FPFastMathDefaultInfoVec[Index].SignedZeroInfNanPreserve = true;
+ }
+ }
+
+ std::unordered_map<unsigned, GlobalVariable *> GlobalVars;
+ for (auto &[Func, FPFastMathDefaultInfoVec] : FPFastMathDefaultInfoMap) {
+ if (FPFastMathDefaultInfoVec.empty())
+ continue;
+
+ for (const SPIRV::FPFastMathDefaultInfo &Info : FPFastMathDefaultInfoVec) {
+ assert(Info.Ty && "Expected target type for FPFastMathDefaultInfo");
+ // Skip if none of the execution modes was used.
+ unsigned Flags = Info.FastMathFlags;
+ if (Flags == SPIRV::FPFastMathMode::None && !Info.ContractionOff &&
+ !Info.SignedZeroInfNanPreserve && !Info.FPFastMathDefault)
+ continue;
+
+ // Check if flags are compatible.
+ if (Info.ContractionOff && (Flags & SPIRV::FPFastMathMode::AllowContract))
+ report_fatal_error("Conflicting FPFastMathFlags: ContractionOff "
+ "and AllowContract");
+
+ if (Info.SignedZeroInfNanPreserve &&
+ !(Flags &
+ (SPIRV::FPFastMathMode::NotNaN | SPIRV::FPFastMathMode::NotInf |
+ SPIRV::FPFastMathMode::NSZ))) {
+ if (Info.FPFastMathDefault)
+ report_fatal_error("Conflicting FPFastMathFlags: "
+ "SignedZeroInfNanPreserve but at least one of "
+ "NotNaN/NotInf/NSZ is enabled.");
+ }
+
+ if ((Flags & SPIRV::FPFastMathMode::AllowTransform) &&
+ !((Flags & SPIRV::FPFastMathMode::AllowReassoc) &&
+ (Flags & SPIRV::FPFastMathMode::AllowContract))) {
+ report_fatal_error("Conflicting FPFastMathFlags: "
+ "AllowTransform requires AllowReassoc and "
+ "AllowContract to be set.");
+ }
+
+ auto it = GlobalVars.find(Flags);
+ GlobalVariable *GV = nullptr;
+ if (it != GlobalVars.end()) {
+ // Reuse existing global variable.
+ GV = it->second;
+ } else {
+ // Create constant instruction with the bitmask flags.
+ Constant *InitValue =
+ ConstantInt::get(Type::getInt32Ty(M.getContext()), Flags);
+ // TODO: Reuse constant if there is one already with the required
+ // value.
+ GV = new GlobalVariable(M, // Module
+ Type::getInt32Ty(M.getContext()), // Type
+ true, // isConstant
+ GlobalValue::InternalLinkage, // Linkage
+ InitValue // Initializer
+ );
+ GlobalVars[Flags] = GV;
+ }
+ }
+ }
+}
+
void SPIRVEmitIntrinsics::processInstrAfterVisit(Instruction *I,
IRBuilder<> &B) {
auto *II = dyn_cast<IntrinsicInst>(I);
@@ -2569,9 +2763,9 @@ GetElementPtrInst *
SPIRVEmitIntrinsics::simplifyZeroLengthArrayGepInst(GetElementPtrInst *GEP) {
// getelementptr [0 x T], P, 0 (zero), I -> getelementptr T, P, I.
// If type is 0-length array and first index is 0 (zero), drop both the
- // 0-length array type and the first index. This is a common pattern in the
- // IR, e.g. when using a zero-length array as a placeholder for a flexible
- // array such as unbound arrays.
+ // 0-length array type and the first index. This is a common pattern in
+ // the IR, e.g. when using a zero-length array as a placeholder for a
+ // flexible array such as unbound arrays.
assert(GEP && "GEP is null");
Type *SrcTy = GEP->getSourceElementType();
SmallVector<Value *, 8> Indices(GEP->indices());
@@ -2633,8 +2827,9 @@ bool SPIRVEmitIntrinsics::runOnFunction(Function &Func) {
processParamTypesByFunHeader(CurrF, B);
- // StoreInst's operand type can be changed during the next transformations,
- // so we need to store it in the set. Also store already transformed types.
+ // StoreInst's operand type can be changed during the next
+ // transformations, so we need to store it in the set. Also store already
+ // transformed types.
for (auto &I : instructions(Func)) {
StoreInst *SI = dyn_cast<StoreInst>(&I);
if (!SI)
@@ -2681,8 +2876,8 @@ bool SPIRVEmitIntrinsics::runOnFunction(Function &Func) {
for (auto &I : llvm::reverse(instructions(Func)))
deduceOperandElementType(&I, &IncompleteRets);
- // Pass forward for PHIs only, their operands are not preceed the instruction
- // in meaning of `instructions(Func)`.
+ // Pass forward for PHIs only, their operands are not preceed the
+ // instruction in meaning of `instructions(Func)`.
for (BasicBlock &BB : Func)
for (PHINode &Phi : BB.phis())
if (isPointerTy(Phi.getType()))
@@ -2692,8 +2887,8 @@ bool SPIRVEmitIntrinsics::runOnFunction(Function &Func) {
TrackConstants = true;
if (!I->getType()->isVoidTy() || isa<StoreInst>(I))
setInsertPointAfterDef(B, I);
- // Visitors return either the original/newly created instruction for further
- // processing, nullptr otherwise.
+ // Visitors return either the original/newly created instruction for
+ // further processing, nullptr otherwise.
I = visit(*I);
if (!I)
continue;
@@ -2816,6 +3011,7 @@ bool SPIRVEmitIntrinsics::runOnModule(Module &M) {
bool Changed = false;
parseFunDeclarations(M);
+ insertConstantsForFPFastMathDefault(M);
TodoType.clear();
for (auto &F : M)
diff --git a/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.cpp b/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.cpp
index 115766c..6fd1c7e 100644
--- a/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.cpp
@@ -806,7 +806,7 @@ Register SPIRVGlobalRegistry::buildGlobalVariable(
// arguments.
MDNode *GVarMD = nullptr;
if (GVar && (GVarMD = GVar->getMetadata("spirv.Decorations")) != nullptr)
- buildOpSpirvDecorations(Reg, MIRBuilder, GVarMD);
+ buildOpSpirvDecorations(Reg, MIRBuilder, GVarMD, ST);
return Reg;
}
diff --git a/llvm/lib/Target/SPIRV/SPIRVInstrInfo.cpp b/llvm/lib/Target/SPIRV/SPIRVInstrInfo.cpp
index 45e88fc..ba95ad8 100644
--- a/llvm/lib/Target/SPIRV/SPIRVInstrInfo.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVInstrInfo.cpp
@@ -132,7 +132,8 @@ bool SPIRVInstrInfo::isHeaderInstr(const MachineInstr &MI) const {
}
}
-bool SPIRVInstrInfo::canUseFastMathFlags(const MachineInstr &MI) const {
+bool SPIRVInstrInfo::canUseFastMathFlags(const MachineInstr &MI,
+ bool KHRFloatControls2) const {
switch (MI.getOpcode()) {
case SPIRV::OpFAddS:
case SPIRV::OpFSubS:
@@ -146,6 +147,24 @@ bool SPIRVInstrInfo::canUseFastMathFlags(const MachineInstr &MI) const {
case SPIRV::OpFRemV:
case SPIRV::OpFMod:
return true;
+ case SPIRV::OpFNegateV:
+ case SPIRV::OpFNegate:
+ case SPIRV::OpOrdered:
+ case SPIRV::OpUnordered:
+ case SPIRV::OpFOrdEqual:
+ case SPIRV::OpFOrdNotEqual:
+ case SPIRV::OpFOrdLessThan:
+ case SPIRV::OpFOrdLessThanEqual:
+ case SPIRV::OpFOrdGreaterThan:
+ case SPIRV::OpFOrdGreaterThanEqual:
+ case SPIRV::OpFUnordEqual:
+ case SPIRV::OpFUnordNotEqual:
+ case SPIRV::OpFUnordLessThan:
+ case SPIRV::OpFUnordLessThanEqual:
+ case SPIRV::OpFUnordGreaterThan:
+ case SPIRV::OpFUnordGreaterThanEqual:
+ case SPIRV::OpExtInst:
+ return KHRFloatControls2 ? true : false;
default:
return false;
}
diff --git a/llvm/lib/Target/SPIRV/SPIRVInstrInfo.h b/llvm/lib/Target/SPIRV/SPIRVInstrInfo.h
index 72d2243..4de9d6a 100644
--- a/llvm/lib/Target/SPIRV/SPIRVInstrInfo.h
+++ b/llvm/lib/Target/SPIRV/SPIRVInstrInfo.h
@@ -36,7 +36,8 @@ public:
bool isTypeDeclInstr(const MachineInstr &MI) const;
bool isDecorationInstr(const MachineInstr &MI) const;
bool isAliasingInstr(const MachineInstr &MI) const;
- bool canUseFastMathFlags(const MachineInstr &MI) const;
+ bool canUseFastMathFlags(const MachineInstr &MI,
+ bool KHRFloatControls2) const;
bool canUseNSW(const MachineInstr &MI) const;
bool canUseNUW(const MachineInstr &MI) const;
diff --git a/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp b/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp
index 5266e20..273edf3 100644
--- a/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp
@@ -314,7 +314,8 @@ private:
MachineInstr &I) const;
bool selectModf(Register ResVReg, const SPIRVType *ResType,
MachineInstr &I) const;
-
+ bool selectFrexp(Register ResVReg, const SPIRVType *ResType,
+ MachineInstr &I) const;
// Utilities
std::pair<Register, bool>
buildI32Constant(uint32_t Val, MachineInstr &I,
@@ -835,6 +836,9 @@ bool SPIRVInstructionSelector::spvSelect(Register ResVReg,
case TargetOpcode::G_USUBSAT:
return selectExtInst(ResVReg, ResType, I, CL::u_sub_sat);
+ case TargetOpcode::G_FFREXP:
+ return selectFrexp(ResVReg, ResType, I);
+
case TargetOpcode::G_UADDO:
return selectOverflowArith(ResVReg, ResType, I,
ResType->getOpcode() == SPIRV::OpTypeVector
@@ -1069,7 +1073,8 @@ bool SPIRVInstructionSelector::selectExtInst(Register ResVReg,
.addDef(ResVReg)
.addUse(GR.getSPIRVTypeID(ResType))
.addImm(static_cast<uint32_t>(Set))
- .addImm(Opcode);
+ .addImm(Opcode)
+ .setMIFlags(I.getFlags());
const unsigned NumOps = I.getNumOperands();
unsigned Index = 1;
if (Index < NumOps &&
@@ -1119,6 +1124,53 @@ bool SPIRVInstructionSelector::selectExtInstForLRound(
return false;
}
+bool SPIRVInstructionSelector::selectFrexp(Register ResVReg,
+ const SPIRVType *ResType,
+ MachineInstr &I) const {
+ ExtInstList ExtInsts = {{SPIRV::InstructionSet::OpenCL_std, CL::frexp},
+ {SPIRV::InstructionSet::GLSL_std_450, GL::Frexp}};
+ for (const auto &Ex : ExtInsts) {
+ SPIRV::InstructionSet::InstructionSet Set = Ex.first;
+ uint32_t Opcode = Ex.second;
+ if (!STI.canUseExtInstSet(Set))
+ continue;
+
+ MachineIRBuilder MIRBuilder(I);
+ SPIRVType *PointeeTy = GR.getSPIRVTypeForVReg(I.getOperand(1).getReg());
+ const SPIRVType *PointerType = GR.getOrCreateSPIRVPointerType(
+ PointeeTy, MIRBuilder, SPIRV::StorageClass::Function);
+ Register PointerVReg =
+ createVirtualRegister(PointerType, &GR, MRI, MRI->getMF());
+
+ auto It = getOpVariableMBBIt(I);
+ auto MIB = BuildMI(*It->getParent(), It, It->getDebugLoc(),
+ TII.get(SPIRV::OpVariable))
+ .addDef(PointerVReg)
+ .addUse(GR.getSPIRVTypeID(PointerType))
+ .addImm(static_cast<uint32_t>(SPIRV::StorageClass::Function))
+ .constrainAllUses(TII, TRI, RBI);
+
+ MIB = MIB &
+ BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst))
+ .addDef(ResVReg)
+ .addUse(GR.getSPIRVTypeID(ResType))
+ .addImm(static_cast<uint32_t>(Ex.first))
+ .addImm(Opcode)
+ .add(I.getOperand(2))
+ .addUse(PointerVReg)
+ .constrainAllUses(TII, TRI, RBI);
+
+ MIB = MIB &
+ BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpLoad))
+ .addDef(I.getOperand(1).getReg())
+ .addUse(GR.getSPIRVTypeID(PointeeTy))
+ .addUse(PointerVReg)
+ .constrainAllUses(TII, TRI, RBI);
+ return MIB;
+ }
+ return false;
+}
+
bool SPIRVInstructionSelector::selectOpWithSrcs(Register ResVReg,
const SPIRVType *ResType,
MachineInstr &I,
@@ -2578,6 +2630,7 @@ bool SPIRVInstructionSelector::selectCmp(Register ResVReg,
.addUse(GR.getSPIRVTypeID(ResType))
.addUse(Cmp0)
.addUse(Cmp1)
+ .setMIFlags(I.getFlags())
.constrainAllUses(TII, TRI, RBI);
}
diff --git a/llvm/lib/Target/SPIRV/SPIRVLegalizerInfo.cpp b/llvm/lib/Target/SPIRV/SPIRVLegalizerInfo.cpp
index 27bb54c..db85e33 100644
--- a/llvm/lib/Target/SPIRV/SPIRVLegalizerInfo.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVLegalizerInfo.cpp
@@ -290,6 +290,9 @@ SPIRVLegalizerInfo::SPIRVLegalizerInfo(const SPIRVSubtarget &ST) {
// Control-flow. In some cases (e.g. constants) s1 may be promoted to s32.
getActionDefinitionsBuilder(G_BRCOND).legalFor({s1, s32});
+ getActionDefinitionsBuilder(G_FFREXP).legalForCartesianProduct(
+ allFloatScalarsAndVectors, {s32, v2s32, v3s32, v4s32, v8s32, v16s32});
+
// TODO: Review the target OpenCL and GLSL Extended Instruction Set specs to
// tighten these requirements. Many of these math functions are only legal on
// specific bitwidths, so they are not selectable for
@@ -584,7 +587,8 @@ bool SPIRVLegalizerInfo::legalizeIsFPClass(
}
if (FPClassTest PartialCheck = Mask & fcNan) {
- auto InfWithQnanBitC = buildSPIRVConstant(IntTy, Inf | QNaNBitMask);
+ auto InfWithQnanBitC =
+ buildSPIRVConstant(IntTy, std::move(Inf) | QNaNBitMask);
if (PartialCheck == fcNan) {
// isnan(V) ==> abs(V) u> int(inf)
appendToRes(
@@ -610,7 +614,7 @@ bool SPIRVLegalizerInfo::legalizeIsFPClass(
APInt ExpLSB = ExpMask & ~(ExpMask.shl(1));
auto ExpMinusOne = assignSPIRVTy(
MIRBuilder.buildSub(IntTy, Abs, buildSPIRVConstant(IntTy, ExpLSB)));
- APInt MaxExpMinusOne = ExpMask - ExpLSB;
+ APInt MaxExpMinusOne = std::move(ExpMask) - ExpLSB;
auto NormalRes = assignSPIRVTy(
MIRBuilder.buildICmp(CmpInst::Predicate::ICMP_ULT, DstTy, ExpMinusOne,
buildSPIRVConstant(IntTy, MaxExpMinusOne)));
diff --git a/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp b/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp
index bc159d5..dc717a6 100644
--- a/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp
@@ -248,6 +248,22 @@ static InstrSignature instrToSignature(const MachineInstr &MI,
Register DefReg;
InstrSignature Signature{MI.getOpcode()};
for (unsigned i = 0; i < MI.getNumOperands(); ++i) {
+ // The only decorations that can be applied more than once to a given <id>
+ // or structure member are UserSemantic(5635), CacheControlLoadINTEL (6442),
+ // and CacheControlStoreINTEL (6443). For all the rest of decorations, we
+ // will only add to the signature the Opcode, the id to which it applies,
+ // and the decoration id, disregarding any decoration flags. This will
+ // ensure that any subsequent decoration with the same id will be deemed as
+ // a duplicate. Then, at the call site, we will be able to handle duplicates
+ // in the best way.
+ unsigned Opcode = MI.getOpcode();
+ if ((Opcode == SPIRV::OpDecorate) && i >= 2) {
+ unsigned DecorationID = MI.getOperand(1).getImm();
+ if (DecorationID != SPIRV::Decoration::UserSemantic &&
+ DecorationID != SPIRV::Decoration::CacheControlLoadINTEL &&
+ DecorationID != SPIRV::Decoration::CacheControlStoreINTEL)
+ continue;
+ }
const MachineOperand &MO = MI.getOperand(i);
size_t h;
if (MO.isReg()) {
@@ -559,8 +575,54 @@ static void collectOtherInstr(MachineInstr &MI, SPIRV::ModuleAnalysisInfo &MAI,
MAI.setSkipEmission(&MI);
InstrSignature MISign = instrToSignature(MI, MAI, true);
auto FoundMI = IS.insert(std::move(MISign));
- if (!FoundMI.second)
+ if (!FoundMI.second) {
+ if (MI.getOpcode() == SPIRV::OpDecorate) {
+ assert(MI.getNumOperands() >= 2 &&
+ "Decoration instructions must have at least 2 operands");
+ assert(MSType == SPIRV::MB_Annotations &&
+ "Only OpDecorate instructions can be duplicates");
+ // For FPFastMathMode decoration, we need to merge the flags of the
+ // duplicate decoration with the original one, so we need to find the
+ // original instruction that has the same signature. For the rest of
+ // instructions, we will simply skip the duplicate.
+ if (MI.getOperand(1).getImm() != SPIRV::Decoration::FPFastMathMode)
+ return; // Skip duplicates of other decorations.
+
+ const SPIRV::InstrList &Decorations = MAI.MS[MSType];
+ for (const MachineInstr *OrigMI : Decorations) {
+ if (instrToSignature(*OrigMI, MAI, true) == MISign) {
+ assert(OrigMI->getNumOperands() == MI.getNumOperands() &&
+ "Original instruction must have the same number of operands");
+ assert(
+ OrigMI->getNumOperands() == 3 &&
+ "FPFastMathMode decoration must have 3 operands for OpDecorate");
+ unsigned OrigFlags = OrigMI->getOperand(2).getImm();
+ unsigned NewFlags = MI.getOperand(2).getImm();
+ if (OrigFlags == NewFlags)
+ return; // No need to merge, the flags are the same.
+
+ // Emit warning about possible conflict between flags.
+ unsigned FinalFlags = OrigFlags | NewFlags;
+ llvm::errs()
+ << "Warning: Conflicting FPFastMathMode decoration flags "
+ "in instruction: "
+ << *OrigMI << "Original flags: " << OrigFlags
+ << ", new flags: " << NewFlags
+ << ". They will be merged on a best effort basis, but not "
+ "validated. Final flags: "
+ << FinalFlags << "\n";
+ MachineInstr *OrigMINonConst = const_cast<MachineInstr *>(OrigMI);
+ MachineOperand &OrigFlagsOp = OrigMINonConst->getOperand(2);
+ OrigFlagsOp =
+ MachineOperand::CreateImm(static_cast<unsigned>(FinalFlags));
+ return; // Merge done, so we found a duplicate; don't add it to MAI.MS
+ }
+ }
+ assert(false && "No original instruction found for the duplicate "
+ "OpDecorate, but we found one in IS.");
+ }
return; // insert failed, so we found a duplicate; don't add it to MAI.MS
+ }
// No duplicates, so add it.
if (Append)
MAI.MS[MSType].push_back(&MI);
@@ -934,6 +996,11 @@ static void addOpDecorateReqs(const MachineInstr &MI, unsigned DecIndex,
} else if (Dec == SPIRV::Decoration::FPMaxErrorDecorationINTEL) {
Reqs.addRequirements(SPIRV::Capability::FPMaxErrorINTEL);
Reqs.addExtension(SPIRV::Extension::SPV_INTEL_fp_max_error);
+ } else if (Dec == SPIRV::Decoration::FPFastMathMode) {
+ if (ST.canUseExtension(SPIRV::Extension::SPV_KHR_float_controls2)) {
+ Reqs.addRequirements(SPIRV::Capability::FloatControls2);
+ Reqs.addExtension(SPIRV::Extension::SPV_KHR_float_controls2);
+ }
}
}
@@ -1994,10 +2061,13 @@ static void collectReqs(const Module &M, SPIRV::ModuleAnalysisInfo &MAI,
// Collect requirements for OpExecutionMode instructions.
auto Node = M.getNamedMetadata("spirv.ExecutionMode");
if (Node) {
- bool RequireFloatControls = false, RequireFloatControls2 = false,
+ bool RequireFloatControls = false, RequireIntelFloatControls2 = false,
+ RequireKHRFloatControls2 = false,
VerLower14 = !ST.isAtLeastSPIRVVer(VersionTuple(1, 4));
- bool HasFloatControls2 =
+ bool HasIntelFloatControls2 =
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_float_controls2);
+ bool HasKHRFloatControls2 =
+ ST.canUseExtension(SPIRV::Extension::SPV_KHR_float_controls2);
for (unsigned i = 0; i < Node->getNumOperands(); i++) {
MDNode *MDN = cast<MDNode>(Node->getOperand(i));
const MDOperand &MDOp = MDN->getOperand(1);
@@ -2010,7 +2080,6 @@ static void collectReqs(const Module &M, SPIRV::ModuleAnalysisInfo &MAI,
switch (EM) {
case SPIRV::ExecutionMode::DenormPreserve:
case SPIRV::ExecutionMode::DenormFlushToZero:
- case SPIRV::ExecutionMode::SignedZeroInfNanPreserve:
case SPIRV::ExecutionMode::RoundingModeRTE:
case SPIRV::ExecutionMode::RoundingModeRTZ:
RequireFloatControls = VerLower14;
@@ -2021,8 +2090,28 @@ static void collectReqs(const Module &M, SPIRV::ModuleAnalysisInfo &MAI,
case SPIRV::ExecutionMode::RoundingModeRTNINTEL:
case SPIRV::ExecutionMode::FloatingPointModeALTINTEL:
case SPIRV::ExecutionMode::FloatingPointModeIEEEINTEL:
- if (HasFloatControls2) {
- RequireFloatControls2 = true;
+ if (HasIntelFloatControls2) {
+ RequireIntelFloatControls2 = true;
+ MAI.Reqs.getAndAddRequirements(
+ SPIRV::OperandCategory::ExecutionModeOperand, EM, ST);
+ }
+ break;
+ case SPIRV::ExecutionMode::FPFastMathDefault: {
+ if (HasKHRFloatControls2) {
+ RequireKHRFloatControls2 = true;
+ MAI.Reqs.getAndAddRequirements(
+ SPIRV::OperandCategory::ExecutionModeOperand, EM, ST);
+ }
+ break;
+ }
+ case SPIRV::ExecutionMode::ContractionOff:
+ case SPIRV::ExecutionMode::SignedZeroInfNanPreserve:
+ if (HasKHRFloatControls2) {
+ RequireKHRFloatControls2 = true;
+ MAI.Reqs.getAndAddRequirements(
+ SPIRV::OperandCategory::ExecutionModeOperand,
+ SPIRV::ExecutionMode::FPFastMathDefault, ST);
+ } else {
MAI.Reqs.getAndAddRequirements(
SPIRV::OperandCategory::ExecutionModeOperand, EM, ST);
}
@@ -2037,8 +2126,10 @@ static void collectReqs(const Module &M, SPIRV::ModuleAnalysisInfo &MAI,
if (RequireFloatControls &&
ST.canUseExtension(SPIRV::Extension::SPV_KHR_float_controls))
MAI.Reqs.addExtension(SPIRV::Extension::SPV_KHR_float_controls);
- if (RequireFloatControls2)
+ if (RequireIntelFloatControls2)
MAI.Reqs.addExtension(SPIRV::Extension::SPV_INTEL_float_controls2);
+ if (RequireKHRFloatControls2)
+ MAI.Reqs.addExtension(SPIRV::Extension::SPV_KHR_float_controls2);
}
for (auto FI = M.begin(), E = M.end(); FI != E; ++FI) {
const Function &F = *FI;
@@ -2078,8 +2169,11 @@ static void collectReqs(const Module &M, SPIRV::ModuleAnalysisInfo &MAI,
}
}
-static unsigned getFastMathFlags(const MachineInstr &I) {
+static unsigned getFastMathFlags(const MachineInstr &I,
+ const SPIRVSubtarget &ST) {
unsigned Flags = SPIRV::FPFastMathMode::None;
+ bool CanUseKHRFloatControls2 =
+ ST.canUseExtension(SPIRV::Extension::SPV_KHR_float_controls2);
if (I.getFlag(MachineInstr::MIFlag::FmNoNans))
Flags |= SPIRV::FPFastMathMode::NotNaN;
if (I.getFlag(MachineInstr::MIFlag::FmNoInfs))
@@ -2088,12 +2182,45 @@ static unsigned getFastMathFlags(const MachineInstr &I) {
Flags |= SPIRV::FPFastMathMode::NSZ;
if (I.getFlag(MachineInstr::MIFlag::FmArcp))
Flags |= SPIRV::FPFastMathMode::AllowRecip;
- if (I.getFlag(MachineInstr::MIFlag::FmReassoc))
- Flags |= SPIRV::FPFastMathMode::Fast;
+ if (I.getFlag(MachineInstr::MIFlag::FmContract) && CanUseKHRFloatControls2)
+ Flags |= SPIRV::FPFastMathMode::AllowContract;
+ if (I.getFlag(MachineInstr::MIFlag::FmReassoc)) {
+ if (CanUseKHRFloatControls2)
+ // LLVM reassoc maps to SPIRV transform, see
+ // https://github.com/KhronosGroup/SPIRV-Registry/issues/326 for details.
+ // Because we are enabling AllowTransform, we must enable AllowReassoc and
+ // AllowContract too, as required by SPIRV spec. Also, we used to map
+ // MIFlag::FmReassoc to FPFastMathMode::Fast, which now should instead by
+ // replaced by turning all the other bits instead. Therefore, we're
+ // enabling every bit here except None and Fast.
+ Flags |= SPIRV::FPFastMathMode::NotNaN | SPIRV::FPFastMathMode::NotInf |
+ SPIRV::FPFastMathMode::NSZ | SPIRV::FPFastMathMode::AllowRecip |
+ SPIRV::FPFastMathMode::AllowTransform |
+ SPIRV::FPFastMathMode::AllowReassoc |
+ SPIRV::FPFastMathMode::AllowContract;
+ else
+ Flags |= SPIRV::FPFastMathMode::Fast;
+ }
+
+ if (CanUseKHRFloatControls2) {
+ // Error out if SPIRV::FPFastMathMode::Fast is enabled.
+ assert(!(Flags & SPIRV::FPFastMathMode::Fast) &&
+ "SPIRV::FPFastMathMode::Fast is deprecated and should not be used "
+ "anymore.");
+
+ // Error out if AllowTransform is enabled without AllowReassoc and
+ // AllowContract.
+ assert((!(Flags & SPIRV::FPFastMathMode::AllowTransform) ||
+ ((Flags & SPIRV::FPFastMathMode::AllowReassoc &&
+ Flags & SPIRV::FPFastMathMode::AllowContract))) &&
+ "SPIRV::FPFastMathMode::AllowTransform requires AllowReassoc and "
+ "AllowContract flags to be enabled as well.");
+ }
+
return Flags;
}
-static bool isFastMathMathModeAvailable(const SPIRVSubtarget &ST) {
+static bool isFastMathModeAvailable(const SPIRVSubtarget &ST) {
if (ST.isKernel())
return true;
if (ST.getSPIRVVersion() < VersionTuple(1, 2))
@@ -2101,9 +2228,10 @@ static bool isFastMathMathModeAvailable(const SPIRVSubtarget &ST) {
return ST.canUseExtension(SPIRV::Extension::SPV_KHR_float_controls2);
}
-static void handleMIFlagDecoration(MachineInstr &I, const SPIRVSubtarget &ST,
- const SPIRVInstrInfo &TII,
- SPIRV::RequirementHandler &Reqs) {
+static void handleMIFlagDecoration(
+ MachineInstr &I, const SPIRVSubtarget &ST, const SPIRVInstrInfo &TII,
+ SPIRV::RequirementHandler &Reqs, const SPIRVGlobalRegistry *GR,
+ SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec) {
if (I.getFlag(MachineInstr::MIFlag::NoSWrap) && TII.canUseNSW(I) &&
getSymbolicOperandRequirements(SPIRV::OperandCategory::DecorationOperand,
SPIRV::Decoration::NoSignedWrap, ST, Reqs)
@@ -2119,13 +2247,53 @@ static void handleMIFlagDecoration(MachineInstr &I, const SPIRVSubtarget &ST,
buildOpDecorate(I.getOperand(0).getReg(), I, TII,
SPIRV::Decoration::NoUnsignedWrap, {});
}
- if (!TII.canUseFastMathFlags(I))
- return;
- unsigned FMFlags = getFastMathFlags(I);
- if (FMFlags == SPIRV::FPFastMathMode::None)
+ if (!TII.canUseFastMathFlags(
+ I, ST.canUseExtension(SPIRV::Extension::SPV_KHR_float_controls2)))
return;
- if (isFastMathMathModeAvailable(ST)) {
+ unsigned FMFlags = getFastMathFlags(I, ST);
+ if (FMFlags == SPIRV::FPFastMathMode::None) {
+ // We also need to check if any FPFastMathDefault info was set for the
+ // types used in this instruction.
+ if (FPFastMathDefaultInfoVec.empty())
+ return;
+
+ // There are three types of instructions that can use fast math flags:
+ // 1. Arithmetic instructions (FAdd, FMul, FSub, FDiv, FRem, etc.)
+ // 2. Relational instructions (FCmp, FOrd, FUnord, etc.)
+ // 3. Extended instructions (ExtInst)
+ // For arithmetic instructions, the floating point type can be in the
+ // result type or in the operands, but they all must be the same.
+ // For the relational and logical instructions, the floating point type
+ // can only be in the operands 1 and 2, not the result type. Also, the
+ // operands must have the same type. For the extended instructions, the
+ // floating point type can be in the result type or in the operands. It's
+ // unclear if the operands and the result type must be the same. Let's
+ // assume they must be. Therefore, for 1. and 2., we can check the first
+ // operand type, and for 3. we can check the result type.
+ assert(I.getNumOperands() >= 3 && "Expected at least 3 operands");
+ Register ResReg = I.getOpcode() == SPIRV::OpExtInst
+ ? I.getOperand(1).getReg()
+ : I.getOperand(2).getReg();
+ SPIRVType *ResType = GR->getSPIRVTypeForVReg(ResReg, I.getMF());
+ const Type *Ty = GR->getTypeForSPIRVType(ResType);
+ Ty = Ty->isVectorTy() ? cast<VectorType>(Ty)->getElementType() : Ty;
+
+ // Match instruction type with the FPFastMathDefaultInfoVec.
+ bool Emit = false;
+ for (SPIRV::FPFastMathDefaultInfo &Elem : FPFastMathDefaultInfoVec) {
+ if (Ty == Elem.Ty) {
+ FMFlags = Elem.FastMathFlags;
+ Emit = Elem.ContractionOff || Elem.SignedZeroInfNanPreserve ||
+ Elem.FPFastMathDefault;
+ break;
+ }
+ }
+
+ if (FMFlags == SPIRV::FPFastMathMode::None && !Emit)
+ return;
+ }
+ if (isFastMathModeAvailable(ST)) {
Register DstReg = I.getOperand(0).getReg();
buildOpDecorate(DstReg, I, TII, SPIRV::Decoration::FPFastMathMode,
{FMFlags});
@@ -2135,14 +2303,17 @@ static void handleMIFlagDecoration(MachineInstr &I, const SPIRVSubtarget &ST,
// Walk all functions and add decorations related to MI flags.
static void addDecorations(const Module &M, const SPIRVInstrInfo &TII,
MachineModuleInfo *MMI, const SPIRVSubtarget &ST,
- SPIRV::ModuleAnalysisInfo &MAI) {
+ SPIRV::ModuleAnalysisInfo &MAI,
+ const SPIRVGlobalRegistry *GR) {
for (auto F = M.begin(), E = M.end(); F != E; ++F) {
MachineFunction *MF = MMI->getMachineFunction(*F);
if (!MF)
continue;
+
for (auto &MBB : *MF)
for (auto &MI : MBB)
- handleMIFlagDecoration(MI, ST, TII, MAI.Reqs);
+ handleMIFlagDecoration(MI, ST, TII, MAI.Reqs, GR,
+ MAI.FPFastMathDefaultInfoMap[&(*F)]);
}
}
@@ -2188,6 +2359,111 @@ static void patchPhis(const Module &M, SPIRVGlobalRegistry *GR,
}
}
+static SPIRV::FPFastMathDefaultInfoVector &getOrCreateFPFastMathDefaultInfoVec(
+ const Module &M, SPIRV::ModuleAnalysisInfo &MAI, const Function *F) {
+ auto it = MAI.FPFastMathDefaultInfoMap.find(F);
+ if (it != MAI.FPFastMathDefaultInfoMap.end())
+ return it->second;
+
+ // If the map does not contain the entry, create a new one. Initialize it to
+ // contain all 3 elements sorted by bit width of target type: {half, float,
+ // double}.
+ SPIRV::FPFastMathDefaultInfoVector FPFastMathDefaultInfoVec;
+ FPFastMathDefaultInfoVec.emplace_back(Type::getHalfTy(M.getContext()),
+ SPIRV::FPFastMathMode::None);
+ FPFastMathDefaultInfoVec.emplace_back(Type::getFloatTy(M.getContext()),
+ SPIRV::FPFastMathMode::None);
+ FPFastMathDefaultInfoVec.emplace_back(Type::getDoubleTy(M.getContext()),
+ SPIRV::FPFastMathMode::None);
+ return MAI.FPFastMathDefaultInfoMap[F] = std::move(FPFastMathDefaultInfoVec);
+}
+
+static SPIRV::FPFastMathDefaultInfo &getFPFastMathDefaultInfo(
+ SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec,
+ const Type *Ty) {
+ size_t BitWidth = Ty->getScalarSizeInBits();
+ int Index =
+ SPIRV::FPFastMathDefaultInfoVector::computeFPFastMathDefaultInfoVecIndex(
+ BitWidth);
+ assert(Index >= 0 && Index < 3 &&
+ "Expected FPFastMathDefaultInfo for half, float, or double");
+ assert(FPFastMathDefaultInfoVec.size() == 3 &&
+ "Expected FPFastMathDefaultInfoVec to have exactly 3 elements");
+ return FPFastMathDefaultInfoVec[Index];
+}
+
+static void collectFPFastMathDefaults(const Module &M,
+ SPIRV::ModuleAnalysisInfo &MAI,
+ const SPIRVSubtarget &ST) {
+ if (!ST.canUseExtension(SPIRV::Extension::SPV_KHR_float_controls2))
+ return;
+
+ // Store the FPFastMathDefaultInfo in the FPFastMathDefaultInfoMap.
+ // We need the entry point (function) as the key, and the target
+ // type and flags as the value.
+ // We also need to check ContractionOff and SignedZeroInfNanPreserve
+ // execution modes, as they are now deprecated and must be replaced
+ // with FPFastMathDefaultInfo.
+ auto Node = M.getNamedMetadata("spirv.ExecutionMode");
+ if (!Node)
+ return;
+
+ for (unsigned i = 0; i < Node->getNumOperands(); i++) {
+ MDNode *MDN = cast<MDNode>(Node->getOperand(i));
+ assert(MDN->getNumOperands() >= 2 && "Expected at least 2 operands");
+ const Function *F = cast<Function>(
+ cast<ConstantAsMetadata>(MDN->getOperand(0))->getValue());
+ const auto EM =
+ cast<ConstantInt>(
+ cast<ConstantAsMetadata>(MDN->getOperand(1))->getValue())
+ ->getZExtValue();
+ if (EM == SPIRV::ExecutionMode::FPFastMathDefault) {
+ assert(MDN->getNumOperands() == 4 &&
+ "Expected 4 operands for FPFastMathDefault");
+
+ const Type *T = cast<ValueAsMetadata>(MDN->getOperand(2))->getType();
+ unsigned Flags =
+ cast<ConstantInt>(
+ cast<ConstantAsMetadata>(MDN->getOperand(3))->getValue())
+ ->getZExtValue();
+ SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec =
+ getOrCreateFPFastMathDefaultInfoVec(M, MAI, F);
+ SPIRV::FPFastMathDefaultInfo &Info =
+ getFPFastMathDefaultInfo(FPFastMathDefaultInfoVec, T);
+ Info.FastMathFlags = Flags;
+ Info.FPFastMathDefault = true;
+ } else if (EM == SPIRV::ExecutionMode::ContractionOff) {
+ assert(MDN->getNumOperands() == 2 &&
+ "Expected no operands for ContractionOff");
+
+ // We need to save this info for every possible FP type, i.e. {half,
+ // float, double, fp128}.
+ SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec =
+ getOrCreateFPFastMathDefaultInfoVec(M, MAI, F);
+ for (SPIRV::FPFastMathDefaultInfo &Info : FPFastMathDefaultInfoVec) {
+ Info.ContractionOff = true;
+ }
+ } else if (EM == SPIRV::ExecutionMode::SignedZeroInfNanPreserve) {
+ assert(MDN->getNumOperands() == 3 &&
+ "Expected 1 operand for SignedZeroInfNanPreserve");
+ unsigned TargetWidth =
+ cast<ConstantInt>(
+ cast<ConstantAsMetadata>(MDN->getOperand(2))->getValue())
+ ->getZExtValue();
+ // We need to save this info only for the FP type with TargetWidth.
+ SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec =
+ getOrCreateFPFastMathDefaultInfoVec(M, MAI, F);
+ int Index = SPIRV::FPFastMathDefaultInfoVector::
+ computeFPFastMathDefaultInfoVecIndex(TargetWidth);
+ assert(Index >= 0 && Index < 3 &&
+ "Expected FPFastMathDefaultInfo for half, float, or double");
+ assert(FPFastMathDefaultInfoVec.size() == 3 &&
+ "Expected FPFastMathDefaultInfoVec to have exactly 3 elements");
+ FPFastMathDefaultInfoVec[Index].SignedZeroInfNanPreserve = true;
+ }
+ }
+}
+
struct SPIRV::ModuleAnalysisInfo SPIRVModuleAnalysis::MAI;
void SPIRVModuleAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
@@ -2209,7 +2485,8 @@ bool SPIRVModuleAnalysis::runOnModule(Module &M) {
patchPhis(M, GR, *TII, MMI);
addMBBNames(M, *TII, MMI, *ST, MAI);
- addDecorations(M, *TII, MMI, *ST, MAI);
+ collectFPFastMathDefaults(M, MAI, *ST);
+ addDecorations(M, *TII, MMI, *ST, MAI, GR);
collectReqs(M, MAI, MMI, *ST);
diff --git a/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.h b/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.h
index 41c792a..d8376cd 100644
--- a/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.h
+++ b/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.h
@@ -159,6 +159,13 @@ struct ModuleAnalysisInfo {
InstrList MS[NUM_MODULE_SECTIONS];
// The table maps MBB number to SPIR-V unique ID register.
DenseMap<std::pair<const MachineFunction *, int>, MCRegister> BBNumToRegMap;
+ // The table maps function pointers to their default FP fast math info. It can
+ // be assumed that the SmallVector is sorted by the bit width of the type. The
+ // first element is the smallest bit width, and the last element is the
+ // largest bit width, therefore, we will have {half, float, double} in
+ // the order of their bit widths.
+ DenseMap<const Function *, SPIRV::FPFastMathDefaultInfoVector>
+ FPFastMathDefaultInfoMap;
MCRegister getFuncReg(const Function *F) {
assert(F && "Function is null");
diff --git a/llvm/lib/Target/SPIRV/SPIRVPreLegalizer.cpp b/llvm/lib/Target/SPIRV/SPIRVPreLegalizer.cpp
index 1a08c6a..db6f2d6 100644
--- a/llvm/lib/Target/SPIRV/SPIRVPreLegalizer.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVPreLegalizer.cpp
@@ -839,6 +839,7 @@ static uint32_t convertFloatToSPIRVWord(float F) {
static void insertSpirvDecorations(MachineFunction &MF, SPIRVGlobalRegistry *GR,
MachineIRBuilder MIB) {
+ const SPIRVSubtarget &ST = cast<SPIRVSubtarget>(MIB.getMF().getSubtarget());
SmallVector<MachineInstr *, 10> ToErase;
for (MachineBasicBlock &MBB : MF) {
for (MachineInstr &MI : MBB) {
@@ -849,7 +850,7 @@ static void insertSpirvDecorations(MachineFunction &MF, SPIRVGlobalRegistry *GR,
MIB.setInsertPt(*MI.getParent(), MI.getNextNode());
if (isSpvIntrinsic(MI, Intrinsic::spv_assign_decoration)) {
buildOpSpirvDecorations(MI.getOperand(1).getReg(), MIB,
- MI.getOperand(2).getMetadata());
+ MI.getOperand(2).getMetadata(), ST);
} else if (isSpvIntrinsic(MI,
Intrinsic::spv_assign_fpmaxerror_decoration)) {
ConstantFP *OpV = mdconst::dyn_extract<ConstantFP>(
diff --git a/llvm/lib/Target/SPIRV/SPIRVSymbolicOperands.td b/llvm/lib/Target/SPIRV/SPIRVSymbolicOperands.td
index 66ce5a2..6a32dba 100644
--- a/llvm/lib/Target/SPIRV/SPIRVSymbolicOperands.td
+++ b/llvm/lib/Target/SPIRV/SPIRVSymbolicOperands.td
@@ -802,6 +802,7 @@ defm RoundingModeRTPINTEL : ExecutionModeOperand<5620, [RoundToInfinityINTEL]>;
defm RoundingModeRTNINTEL : ExecutionModeOperand<5621, [RoundToInfinityINTEL]>;
defm FloatingPointModeALTINTEL : ExecutionModeOperand<5622, [FloatingPointModeINTEL]>;
defm FloatingPointModeIEEEINTEL : ExecutionModeOperand<5623, [FloatingPointModeINTEL]>;
+defm FPFastMathDefault : ExecutionModeOperand<6028, [FloatControls2]>;
//===----------------------------------------------------------------------===//
// Multiclass used to define StorageClass enum values and at the same time
@@ -1153,6 +1154,9 @@ defm NotInf : FPFastMathModeOperand<0x2, [Kernel]>;
defm NSZ : FPFastMathModeOperand<0x4, [Kernel]>;
defm AllowRecip : FPFastMathModeOperand<0x8, [Kernel]>;
defm Fast : FPFastMathModeOperand<0x10, [Kernel]>;
+defm AllowContract : FPFastMathModeOperand<0x10000, [FloatControls2]>;
+defm AllowReassoc : FPFastMathModeOperand<0x20000, [FloatControls2]>;
+defm AllowTransform : FPFastMathModeOperand<0x40000, [FloatControls2]>;
//===----------------------------------------------------------------------===//
// Multiclass used to define FPRoundingMode enum values and at the same time
diff --git a/llvm/lib/Target/SPIRV/SPIRVUtils.cpp b/llvm/lib/Target/SPIRV/SPIRVUtils.cpp
index 820e56b..327c011 100644
--- a/llvm/lib/Target/SPIRV/SPIRVUtils.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVUtils.cpp
@@ -181,7 +181,7 @@ void buildOpMemberDecorate(Register Reg, MachineInstr &I,
}
void buildOpSpirvDecorations(Register Reg, MachineIRBuilder &MIRBuilder,
- const MDNode *GVarMD) {
+ const MDNode *GVarMD, const SPIRVSubtarget &ST) {
for (unsigned I = 0, E = GVarMD->getNumOperands(); I != E; ++I) {
auto *OpMD = dyn_cast<MDNode>(GVarMD->getOperand(I));
if (!OpMD)
@@ -193,6 +193,20 @@ void buildOpSpirvDecorations(Register Reg, MachineIRBuilder &MIRBuilder,
if (!DecorationId)
report_fatal_error("Expect SPIR-V <Decoration> operand to be the first "
"element of the decoration");
+
+ // The goal of `spirv.Decorations` metadata is to provide a way to
+ // represent SPIR-V entities that do not map to LLVM in an obvious way.
+ // FP flags do have obvious matches between LLVM IR and SPIR-V.
+ // Additionally, we have no guarantee at this point that the flags passed
+ // through the decoration are not violated already in the optimizer passes.
+ // Therefore, we simply ignore FP flags, including NoContraction, and
+ // FPFastMathMode.
+ if (DecorationId->getZExtValue() ==
+ static_cast<uint32_t>(SPIRV::Decoration::NoContraction) ||
+ DecorationId->getZExtValue() ==
+ static_cast<uint32_t>(SPIRV::Decoration::FPFastMathMode)) {
+ continue; // Ignored.
+ }
auto MIB = MIRBuilder.buildInstr(SPIRV::OpDecorate)
.addUse(Reg)
.addImm(static_cast<uint32_t>(DecorationId->getZExtValue()));
diff --git a/llvm/lib/Target/SPIRV/SPIRVUtils.h b/llvm/lib/Target/SPIRV/SPIRVUtils.h
index 45c520a..409a0fd 100644
--- a/llvm/lib/Target/SPIRV/SPIRVUtils.h
+++ b/llvm/lib/Target/SPIRV/SPIRVUtils.h
@@ -113,6 +113,54 @@ public:
std::function<bool(BasicBlock *)> Op);
};
+namespace SPIRV {
+struct FPFastMathDefaultInfo {
+ const Type *Ty = nullptr;
+ unsigned FastMathFlags = 0;
+ // When SPV_KHR_float_controls2 ContractionOff and SignzeroInfNanPreserve are
+ // deprecated, and we replace them with FPFastMathDefault appropriate flags
+ // instead. However, we have no guarantee about the order in which we will
+ // process execution modes. Therefore it could happen that we first process
+ // ContractionOff, setting AllowContraction bit to 0, and then we process
+ // FPFastMathDefault enabling AllowContraction bit, effectively invalidating
+ // ContractionOff. Because of that, it's best to keep separate bits for the
+ // different execution modes, and we will try and combine them later when we
+ // emit OpExecutionMode instructions.
+ bool ContractionOff = false;
+ bool SignedZeroInfNanPreserve = false;
+ bool FPFastMathDefault = false;
+
+ FPFastMathDefaultInfo() = default;
+ FPFastMathDefaultInfo(const Type *Ty, unsigned FastMathFlags)
+ : Ty(Ty), FastMathFlags(FastMathFlags) {}
+ bool operator==(const FPFastMathDefaultInfo &Other) const {
+ return Ty == Other.Ty && FastMathFlags == Other.FastMathFlags &&
+ ContractionOff == Other.ContractionOff &&
+ SignedZeroInfNanPreserve == Other.SignedZeroInfNanPreserve &&
+ FPFastMathDefault == Other.FPFastMathDefault;
+ }
+};
+
+struct FPFastMathDefaultInfoVector
+ : public SmallVector<SPIRV::FPFastMathDefaultInfo, 3> {
+ static size_t computeFPFastMathDefaultInfoVecIndex(size_t BitWidth) {
+ switch (BitWidth) {
+ case 16: // half
+ return 0;
+ case 32: // float
+ return 1;
+ case 64: // double
+ return 2;
+ default:
+ report_fatal_error("Expected BitWidth to be 16, 32, 64", false);
+ }
+ llvm_unreachable(
+ "Unreachable code in computeFPFastMathDefaultInfoVecIndex");
+ }
+};
+
+} // namespace SPIRV
+
// Add the given string as a series of integer operand, inserting null
// terminators and padding to make sure the operands all have 32-bit
// little-endian words.
@@ -161,7 +209,7 @@ void buildOpMemberDecorate(Register Reg, MachineInstr &I,
// Add an OpDecorate instruction by "spirv.Decorations" metadata node.
void buildOpSpirvDecorations(Register Reg, MachineIRBuilder &MIRBuilder,
- const MDNode *GVarMD);
+ const MDNode *GVarMD, const SPIRVSubtarget &ST);
// Return a valid position for the OpVariable instruction inside a function,
// i.e., at the beginning of the first block of the function.
@@ -508,6 +556,5 @@ unsigned getArrayComponentCount(const MachineRegisterInfo *MRI,
const MachineInstr *ResType);
MachineBasicBlock::iterator
getFirstValidInstructionInsertPoint(MachineBasicBlock &BB);
-
} // namespace llvm
#endif // LLVM_LIB_TARGET_SPIRV_SPIRVUTILS_H