diff options
Diffstat (limited to 'llvm/lib/CodeGen')
-rw-r--r-- | llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp | 1 | ||||
-rw-r--r-- | llvm/lib/CodeGen/AsmPrinter/DwarfExpression.cpp | 60 | ||||
-rw-r--r-- | llvm/lib/CodeGen/CodeGen.cpp | 1 | ||||
-rw-r--r-- | llvm/lib/CodeGen/CommandFlags.cpp | 9 | ||||
-rw-r--r-- | llvm/lib/CodeGen/IfConversion.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/CodeGen/MIR2Vec.cpp | 166 | ||||
-rw-r--r-- | llvm/lib/CodeGen/MIRFSDiscriminator.cpp | 6 | ||||
-rw-r--r-- | llvm/lib/CodeGen/MIRSampleProfile.cpp | 4 | ||||
-rw-r--r-- | llvm/lib/CodeGen/MachineLICM.cpp | 18 | ||||
-rw-r--r-- | llvm/lib/CodeGen/RegAllocFast.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 48 | ||||
-rw-r--r-- | llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp | 15 | ||||
-rw-r--r-- | llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp | 7 | ||||
-rw-r--r-- | llvm/lib/CodeGen/ShrinkWrap.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/CodeGen/TargetOptionsImpl.cpp | 2 |
16 files changed, 263 insertions, 82 deletions
diff --git a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp index e2af0c5..fefde64f 100644 --- a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp @@ -1438,6 +1438,7 @@ getBBAddrMapFeature(const MachineFunction &MF, int NumMBBSectionRanges, BBFreqEnabled, BrProbEnabled, MF.hasBBSections() && NumMBBSectionRanges > 1, + // Use static_cast to avoid breakage of tests on windows. static_cast<bool>(BBAddrMapSkipEmitBBEntries), HasCalls, false}; diff --git a/llvm/lib/CodeGen/AsmPrinter/DwarfExpression.cpp b/llvm/lib/CodeGen/AsmPrinter/DwarfExpression.cpp index f0f0861..c7d45897 100644 --- a/llvm/lib/CodeGen/AsmPrinter/DwarfExpression.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/DwarfExpression.cpp @@ -566,32 +566,54 @@ bool DwarfExpression::addExpression( case dwarf::DW_OP_LLVM_extract_bits_zext: { unsigned SizeInBits = Op->getArg(1); unsigned BitOffset = Op->getArg(0); + unsigned DerefSize = 0; + // Operations are done in the DWARF "generic type" whose size + // is the size of a pointer. + unsigned PtrSizeInBytes = CU.getAsmPrinter()->MAI->getCodePointerSize(); // If we have a memory location then dereference to get the value, though // we have to make sure we don't dereference any bytes past the end of the // object. if (isMemoryLocation()) { - emitOp(dwarf::DW_OP_deref_size); - emitUnsigned(alignTo(BitOffset + SizeInBits, 8) / 8); + DerefSize = alignTo(BitOffset + SizeInBits, 8) / 8; + if (DerefSize == PtrSizeInBytes) { + emitOp(dwarf::DW_OP_deref); + } else { + emitOp(dwarf::DW_OP_deref_size); + emitUnsigned(DerefSize); + } } - // Extract the bits by a shift left (to shift out the bits after what we - // want to extract) followed by shift right (to shift the bits to position - // 0 and also sign/zero extend). These operations are done in the DWARF - // "generic type" whose size is the size of a pointer. - unsigned PtrSizeInBytes = CU.getAsmPrinter()->MAI->getCodePointerSize(); - unsigned LeftShift = PtrSizeInBytes * 8 - (SizeInBits + BitOffset); - unsigned RightShift = LeftShift + BitOffset; - if (LeftShift) { - emitOp(dwarf::DW_OP_constu); - emitUnsigned(LeftShift); - emitOp(dwarf::DW_OP_shl); - } - if (RightShift) { - emitOp(dwarf::DW_OP_constu); - emitUnsigned(RightShift); - emitOp(OpNum == dwarf::DW_OP_LLVM_extract_bits_sext ? dwarf::DW_OP_shra - : dwarf::DW_OP_shr); + // If a dereference was emitted for an unsigned value, and + // there's no bit offset, then a bit of optimization is + // possible. + if (OpNum == dwarf::DW_OP_LLVM_extract_bits_zext && BitOffset == 0) { + if (8 * DerefSize == SizeInBits) { + // The correct value is already on the stack. + } else { + // No need to shift, we can just mask off the desired bits. + emitOp(dwarf::DW_OP_constu); + emitUnsigned((1u << SizeInBits) - 1); + emitOp(dwarf::DW_OP_and); + } + } else { + // Extract the bits by a shift left (to shift out the bits after what we + // want to extract) followed by shift right (to shift the bits to + // position 0 and also sign/zero extend). + unsigned LeftShift = PtrSizeInBytes * 8 - (SizeInBits + BitOffset); + unsigned RightShift = LeftShift + BitOffset; + if (LeftShift) { + emitOp(dwarf::DW_OP_constu); + emitUnsigned(LeftShift); + emitOp(dwarf::DW_OP_shl); + } + if (RightShift) { + emitOp(dwarf::DW_OP_constu); + emitUnsigned(RightShift); + emitOp(OpNum == dwarf::DW_OP_LLVM_extract_bits_sext + ? dwarf::DW_OP_shra + : dwarf::DW_OP_shr); + } } // The value is now at the top of the stack, so set the location to diff --git a/llvm/lib/CodeGen/CodeGen.cpp b/llvm/lib/CodeGen/CodeGen.cpp index c438eae..9795a0b 100644 --- a/llvm/lib/CodeGen/CodeGen.cpp +++ b/llvm/lib/CodeGen/CodeGen.cpp @@ -98,6 +98,7 @@ void llvm::initializeCodeGen(PassRegistry &Registry) { initializeMachineUniformityAnalysisPassPass(Registry); initializeMIR2VecVocabLegacyAnalysisPass(Registry); initializeMIR2VecVocabPrinterLegacyPassPass(Registry); + initializeMIR2VecPrinterLegacyPassPass(Registry); initializeMachineUniformityInfoPrinterPassPass(Registry); initializeMachineVerifierLegacyPassPass(Registry); initializeObjCARCContractLegacyPassPass(Registry); diff --git a/llvm/lib/CodeGen/CommandFlags.cpp b/llvm/lib/CodeGen/CommandFlags.cpp index 0522698..c1365f4 100644 --- a/llvm/lib/CodeGen/CommandFlags.cpp +++ b/llvm/lib/CodeGen/CommandFlags.cpp @@ -64,7 +64,6 @@ CGOPT_EXP(uint64_t, LargeDataThreshold) CGOPT(ExceptionHandling, ExceptionModel) CGOPT_EXP(CodeGenFileType, FileType) CGOPT(FramePointerKind, FramePointerUsage) -CGOPT(bool, EnableUnsafeFPMath) CGOPT(bool, EnableNoInfsFPMath) CGOPT(bool, EnableNoNaNsFPMath) CGOPT(bool, EnableNoSignedZerosFPMath) @@ -219,12 +218,6 @@ codegen::RegisterCodeGenFlags::RegisterCodeGenFlags() { "Enable frame pointer elimination"))); CGBINDOPT(FramePointerUsage); - static cl::opt<bool> EnableUnsafeFPMath( - "enable-unsafe-fp-math", - cl::desc("Enable optimizations that may decrease FP precision"), - cl::init(false)); - CGBINDOPT(EnableUnsafeFPMath); - static cl::opt<bool> EnableNoInfsFPMath( "enable-no-infs-fp-math", cl::desc("Enable FP math optimizations that assume no +-Infs"), @@ -552,7 +545,6 @@ TargetOptions codegen::InitTargetOptionsFromCodeGenFlags(const Triple &TheTriple) { TargetOptions Options; Options.AllowFPOpFusion = getFuseFPOps(); - Options.UnsafeFPMath = getEnableUnsafeFPMath(); Options.NoInfsFPMath = getEnableNoInfsFPMath(); Options.NoNaNsFPMath = getEnableNoNaNsFPMath(); Options.NoSignedZerosFPMath = getEnableNoSignedZerosFPMath(); @@ -706,7 +698,6 @@ void codegen::setFunctionAttributes(StringRef CPU, StringRef Features, if (getStackRealign()) NewAttrs.addAttribute("stackrealign"); - HANDLE_BOOL_ATTR(EnableUnsafeFPMathView, "unsafe-fp-math"); HANDLE_BOOL_ATTR(EnableNoInfsFPMathView, "no-infs-fp-math"); HANDLE_BOOL_ATTR(EnableNoNaNsFPMathView, "no-nans-fp-math"); HANDLE_BOOL_ATTR(EnableNoSignedZerosFPMathView, "no-signed-zeros-fp-math"); diff --git a/llvm/lib/CodeGen/IfConversion.cpp b/llvm/lib/CodeGen/IfConversion.cpp index f80e1e8..3ac6d2a 100644 --- a/llvm/lib/CodeGen/IfConversion.cpp +++ b/llvm/lib/CodeGen/IfConversion.cpp @@ -1498,7 +1498,7 @@ static void UpdatePredRedefs(MachineInstr &MI, LivePhysRegs &Redefs) { // Before stepping forward past MI, remember which regs were live // before MI. This is needed to set the Undef flag only when reg is // dead. - SparseSet<MCPhysReg, identity<MCPhysReg>> LiveBeforeMI; + SparseSet<MCPhysReg, MCPhysReg> LiveBeforeMI; LiveBeforeMI.setUniverse(TRI->getNumRegs()); for (unsigned Reg : Redefs) LiveBeforeMI.insert(Reg); diff --git a/llvm/lib/CodeGen/MIR2Vec.cpp b/llvm/lib/CodeGen/MIR2Vec.cpp index 5c78d98..99be1fc0 100644 --- a/llvm/lib/CodeGen/MIR2Vec.cpp +++ b/llvm/lib/CodeGen/MIR2Vec.cpp @@ -12,6 +12,7 @@ //===----------------------------------------------------------------------===// #include "llvm/CodeGen/MIR2Vec.h" +#include "llvm/ADT/DepthFirstIterator.h" #include "llvm/ADT/Statistic.h" #include "llvm/CodeGen/TargetInstrInfo.h" #include "llvm/IR/Module.h" @@ -29,20 +30,30 @@ using namespace mir2vec; STATISTIC(MIRVocabMissCounter, "Number of lookups to MIR entities not present in the vocabulary"); -cl::OptionCategory llvm::mir2vec::MIR2VecCategory("MIR2Vec Options"); +namespace llvm { +namespace mir2vec { +cl::OptionCategory MIR2VecCategory("MIR2Vec Options"); // FIXME: Use a default vocab when not specified static cl::opt<std::string> VocabFile("mir2vec-vocab-path", cl::Optional, cl::desc("Path to the vocabulary file for MIR2Vec"), cl::init(""), cl::cat(MIR2VecCategory)); -cl::opt<float> - llvm::mir2vec::OpcWeight("mir2vec-opc-weight", cl::Optional, cl::init(1.0), - cl::desc("Weight for machine opcode embeddings"), - cl::cat(MIR2VecCategory)); +cl::opt<float> OpcWeight("mir2vec-opc-weight", cl::Optional, cl::init(1.0), + cl::desc("Weight for machine opcode embeddings"), + cl::cat(MIR2VecCategory)); +cl::opt<MIR2VecKind> MIR2VecEmbeddingKind( + "mir2vec-kind", cl::Optional, + cl::values(clEnumValN(MIR2VecKind::Symbolic, "symbolic", + "Generate symbolic embeddings for MIR")), + cl::init(MIR2VecKind::Symbolic), cl::desc("MIR2Vec embedding kind"), + cl::cat(MIR2VecCategory)); + +} // namespace mir2vec +} // namespace llvm //===----------------------------------------------------------------------===// -// Vocabulary Implementation +// Vocabulary //===----------------------------------------------------------------------===// MIRVocabulary::MIRVocabulary(VocabMap &&OpcodeEntries, @@ -188,6 +199,28 @@ void MIRVocabulary::buildCanonicalOpcodeMapping() { << " unique base opcodes\n"); } +Expected<MIRVocabulary> +MIRVocabulary::createDummyVocabForTest(const TargetInstrInfo &TII, + unsigned Dim) { + assert(Dim > 0 && "Dimension must be greater than zero"); + + float DummyVal = 0.1f; + + // Create dummy embeddings for all canonical opcode names + VocabMap DummyVocabMap; + for (unsigned Opcode = 0; Opcode < TII.getNumOpcodes(); ++Opcode) { + std::string BaseOpcode = extractBaseOpcodeName(TII.getName(Opcode)); + if (DummyVocabMap.count(BaseOpcode) == 0) { + // Only add if not already present + DummyVocabMap[BaseOpcode] = Embedding(Dim, DummyVal); + DummyVal += 0.1f; + } + } + + // Create and return vocabulary with dummy embeddings + return MIRVocabulary::create(std::move(DummyVocabMap), TII); +} + //===----------------------------------------------------------------------===// // MIR2VecVocabLegacyAnalysis Implementation //===----------------------------------------------------------------------===// @@ -258,7 +291,73 @@ MIR2VecVocabLegacyAnalysis::getMIR2VecVocabulary(const Module &M) { } //===----------------------------------------------------------------------===// -// Printer Passes Implementation +// MIREmbedder and its subclasses +//===----------------------------------------------------------------------===// + +std::unique_ptr<MIREmbedder> MIREmbedder::create(MIR2VecKind Mode, + const MachineFunction &MF, + const MIRVocabulary &Vocab) { + switch (Mode) { + case MIR2VecKind::Symbolic: + return std::make_unique<SymbolicMIREmbedder>(MF, Vocab); + } + return nullptr; +} + +Embedding MIREmbedder::computeEmbeddings(const MachineBasicBlock &MBB) const { + Embedding MBBVector(Dimension, 0); + + // Get instruction info for opcode name resolution + const auto &Subtarget = MF.getSubtarget(); + const auto *TII = Subtarget.getInstrInfo(); + if (!TII) { + MF.getFunction().getContext().emitError( + "MIR2Vec: No TargetInstrInfo available; cannot compute embeddings"); + return MBBVector; + } + + // Process each machine instruction in the basic block + for (const auto &MI : MBB) { + // Skip debug instructions and other metadata + if (MI.isDebugInstr()) + continue; + MBBVector += computeEmbeddings(MI); + } + + return MBBVector; +} + +Embedding MIREmbedder::computeEmbeddings() const { + Embedding MFuncVector(Dimension, 0); + + // Consider all reachable machine basic blocks in the function + for (const auto *MBB : depth_first(&MF)) + MFuncVector += computeEmbeddings(*MBB); + return MFuncVector; +} + +SymbolicMIREmbedder::SymbolicMIREmbedder(const MachineFunction &MF, + const MIRVocabulary &Vocab) + : MIREmbedder(MF, Vocab) {} + +std::unique_ptr<SymbolicMIREmbedder> +SymbolicMIREmbedder::create(const MachineFunction &MF, + const MIRVocabulary &Vocab) { + return std::make_unique<SymbolicMIREmbedder>(MF, Vocab); +} + +Embedding SymbolicMIREmbedder::computeEmbeddings(const MachineInstr &MI) const { + // Skip debug instructions and other metadata + if (MI.isDebugInstr()) + return Embedding(Dimension, 0); + + // Todo: Add operand/argument contributions + + return Vocab[MI.getOpcode()]; +} + +//===----------------------------------------------------------------------===// +// Printer Passes //===----------------------------------------------------------------------===// char MIR2VecVocabPrinterLegacyPass::ID = 0; @@ -297,3 +396,56 @@ MachineFunctionPass * llvm::createMIR2VecVocabPrinterLegacyPass(raw_ostream &OS) { return new MIR2VecVocabPrinterLegacyPass(OS); } + +char MIR2VecPrinterLegacyPass::ID = 0; +INITIALIZE_PASS_BEGIN(MIR2VecPrinterLegacyPass, "print-mir2vec", + "MIR2Vec Embedder Printer Pass", false, true) +INITIALIZE_PASS_DEPENDENCY(MIR2VecVocabLegacyAnalysis) +INITIALIZE_PASS_DEPENDENCY(MachineModuleInfoWrapperPass) +INITIALIZE_PASS_END(MIR2VecPrinterLegacyPass, "print-mir2vec", + "MIR2Vec Embedder Printer Pass", false, true) + +bool MIR2VecPrinterLegacyPass::runOnMachineFunction(MachineFunction &MF) { + auto &Analysis = getAnalysis<MIR2VecVocabLegacyAnalysis>(); + auto VocabOrErr = + Analysis.getMIR2VecVocabulary(*MF.getFunction().getParent()); + assert(VocabOrErr && "Failed to get MIR2Vec vocabulary"); + auto &MIRVocab = *VocabOrErr; + + auto Emb = mir2vec::MIREmbedder::create(MIR2VecEmbeddingKind, MF, MIRVocab); + if (!Emb) { + OS << "Error creating MIR2Vec embeddings for function " << MF.getName() + << "\n"; + return false; + } + + OS << "MIR2Vec embeddings for machine function " << MF.getName() << ":\n"; + OS << "Machine Function vector: "; + Emb->getMFunctionVector().print(OS); + + OS << "Machine basic block vectors:\n"; + for (const MachineBasicBlock &MBB : MF) { + OS << "Machine basic block: " << MBB.getFullName() << ":\n"; + Emb->getMBBVector(MBB).print(OS); + } + + OS << "Machine instruction vectors:\n"; + for (const MachineBasicBlock &MBB : MF) { + for (const MachineInstr &MI : MBB) { + // Skip debug instructions as they are not + // embedded + if (MI.isDebugInstr()) + continue; + + OS << "Machine instruction: "; + MI.print(OS); + Emb->getMInstVector(MI).print(OS); + } + } + + return false; +} + +MachineFunctionPass *llvm::createMIR2VecPrinterLegacyPass(raw_ostream &OS) { + return new MIR2VecPrinterLegacyPass(OS); +} diff --git a/llvm/lib/CodeGen/MIRFSDiscriminator.cpp b/llvm/lib/CodeGen/MIRFSDiscriminator.cpp index d988a2a..e37f784 100644 --- a/llvm/lib/CodeGen/MIRFSDiscriminator.cpp +++ b/llvm/lib/CodeGen/MIRFSDiscriminator.cpp @@ -15,6 +15,7 @@ #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/DenseSet.h" #include "llvm/Analysis/BlockFrequencyInfoImpl.h" +#include "llvm/CodeGen/MIRFSDiscriminatorOptions.h" #include "llvm/CodeGen/Passes.h" #include "llvm/IR/DebugInfoMetadata.h" #include "llvm/IR/Function.h" @@ -35,13 +36,10 @@ using namespace sampleprofutil; // TODO(xur): Remove this option and related code once we make true as the // default. -namespace llvm { -cl::opt<bool> ImprovedFSDiscriminator( +cl::opt<bool> llvm::ImprovedFSDiscriminator( "improved-fs-discriminator", cl::Hidden, cl::init(false), cl::desc("New FS discriminators encoding (incompatible with the original " "encoding)")); -} // namespace llvm - char MIRAddFSDiscriminators::ID = 0; INITIALIZE_PASS(MIRAddFSDiscriminators, DEBUG_TYPE, diff --git a/llvm/lib/CodeGen/MIRSampleProfile.cpp b/llvm/lib/CodeGen/MIRSampleProfile.cpp index 9bba50e8..d44f577 100644 --- a/llvm/lib/CodeGen/MIRSampleProfile.cpp +++ b/llvm/lib/CodeGen/MIRSampleProfile.cpp @@ -15,6 +15,7 @@ #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/DenseSet.h" #include "llvm/Analysis/BlockFrequencyInfoImpl.h" +#include "llvm/CodeGen/MIRFSDiscriminatorOptions.h" #include "llvm/CodeGen/MachineBlockFrequencyInfo.h" #include "llvm/CodeGen/MachineBranchProbabilityInfo.h" #include "llvm/CodeGen/MachineDominators.h" @@ -62,9 +63,6 @@ static cl::opt<bool> ViewBFIAfter("fs-viewbfi-after", cl::Hidden, cl::init(false), cl::desc("View BFI after MIR loader")); -namespace llvm { -extern cl::opt<bool> ImprovedFSDiscriminator; -} char MIRProfileLoaderPass::ID = 0; INITIALIZE_PASS_BEGIN(MIRProfileLoaderPass, DEBUG_TYPE, diff --git a/llvm/lib/CodeGen/MachineLICM.cpp b/llvm/lib/CodeGen/MachineLICM.cpp index 7acddff..729e73c 100644 --- a/llvm/lib/CodeGen/MachineLICM.cpp +++ b/llvm/lib/CodeGen/MachineLICM.cpp @@ -932,12 +932,11 @@ void MachineLICMImpl::InitRegPressure(MachineBasicBlock *BB) { void MachineLICMImpl::UpdateRegPressure(const MachineInstr *MI, bool ConsiderUnseenAsDef) { auto Cost = calcRegisterCost(MI, /*ConsiderSeen=*/true, ConsiderUnseenAsDef); - for (const auto &RPIdAndCost : Cost) { - unsigned Class = RPIdAndCost.first; - if (static_cast<int>(RegPressure[Class]) < -RPIdAndCost.second) + for (const auto &[Class, Weight] : Cost) { + if (static_cast<int>(RegPressure[Class]) < -Weight) RegPressure[Class] = 0; else - RegPressure[Class] += RPIdAndCost.second; + RegPressure[Class] += Weight; } } @@ -1215,11 +1214,10 @@ bool MachineLICMImpl::IsCheapInstruction(MachineInstr &MI) const { /// given cost matrix can cause high register pressure. bool MachineLICMImpl::CanCauseHighRegPressure( const SmallDenseMap<unsigned, int> &Cost, bool CheapInstr) { - for (const auto &RPIdAndCost : Cost) { - if (RPIdAndCost.second <= 0) + for (const auto &[Class, Weight] : Cost) { + if (Weight <= 0) continue; - unsigned Class = RPIdAndCost.first; int Limit = RegLimit[Class]; // Don't hoist cheap instructions if they would increase register pressure, @@ -1228,7 +1226,7 @@ bool MachineLICMImpl::CanCauseHighRegPressure( return true; for (const auto &RP : BackTrace) - if (static_cast<int>(RP[Class]) + RPIdAndCost.second >= Limit) + if (static_cast<int>(RP[Class]) + Weight >= Limit) return true; } @@ -1246,8 +1244,8 @@ void MachineLICMImpl::UpdateBackTraceRegPressure(const MachineInstr *MI) { // Update register pressure of blocks from loop header to current block. for (auto &RP : BackTrace) - for (const auto &RPIdAndCost : Cost) - RP[RPIdAndCost.first] += RPIdAndCost.second; + for (const auto &[Class, Weight] : Cost) + RP[Class] += Weight; } /// Return true if it is potentially profitable to hoist the given loop diff --git a/llvm/lib/CodeGen/RegAllocFast.cpp b/llvm/lib/CodeGen/RegAllocFast.cpp index 804480c..72b364c 100644 --- a/llvm/lib/CodeGen/RegAllocFast.cpp +++ b/llvm/lib/CodeGen/RegAllocFast.cpp @@ -211,7 +211,7 @@ private: unsigned getSparseSetIndex() const { return VirtReg.virtRegIndex(); } }; - using LiveRegMap = SparseSet<LiveReg, identity<unsigned>, uint16_t>; + using LiveRegMap = SparseSet<LiveReg, unsigned, identity_cxx20, uint16_t>; /// This map contains entries for each virtual register that is currently /// available in a physical register. LiveRegMap LiveVirtRegs; diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index 6bf9008..d2ea652 100644 --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -2476,16 +2476,17 @@ static bool canFoldInAddressingMode(SDNode *N, SDNode *Use, SelectionDAG &DAG, /// masked vector operation if the target supports it. static SDValue foldSelectWithIdentityConstant(SDNode *N, SelectionDAG &DAG, bool ShouldCommuteOperands) { - // Match a select as operand 1. The identity constant that we are looking for - // is only valid as operand 1 of a non-commutative binop. SDValue N0 = N->getOperand(0); SDValue N1 = N->getOperand(1); + + // Match a select as operand 1. The identity constant that we are looking for + // is only valid as operand 1 of a non-commutative binop. if (ShouldCommuteOperands) std::swap(N0, N1); - unsigned SelOpcode = N1.getOpcode(); - if ((SelOpcode != ISD::VSELECT && SelOpcode != ISD::SELECT) || - !N1.hasOneUse()) + SDValue Cond, TVal, FVal; + if (!sd_match(N1, m_OneUse(m_SelectLike(m_Value(Cond), m_Value(TVal), + m_Value(FVal))))) return SDValue(); // We can't hoist all instructions because of immediate UB (not speculatable). @@ -2493,11 +2494,9 @@ static SDValue foldSelectWithIdentityConstant(SDNode *N, SelectionDAG &DAG, if (!DAG.isSafeToSpeculativelyExecuteNode(N)) return SDValue(); + unsigned SelOpcode = N1.getOpcode(); unsigned Opcode = N->getOpcode(); EVT VT = N->getValueType(0); - SDValue Cond = N1.getOperand(0); - SDValue TVal = N1.getOperand(1); - SDValue FVal = N1.getOperand(2); const TargetLowering &TLI = DAG.getTargetLoweringInfo(); // This transform increases uses of N0, so freeze it to be safe. @@ -13856,12 +13855,11 @@ static SDValue tryToFoldExtendSelectLoad(SDNode *N, const TargetLowering &TLI, Opcode == ISD::ANY_EXTEND) && "Expected EXTEND dag node in input!"); - if (!(N0->getOpcode() == ISD::SELECT || N0->getOpcode() == ISD::VSELECT) || - !N0.hasOneUse()) + SDValue Cond, Op1, Op2; + if (!sd_match(N0, m_OneUse(m_SelectLike(m_Value(Cond), m_Value(Op1), + m_Value(Op2))))) return SDValue(); - SDValue Op1 = N0->getOperand(1); - SDValue Op2 = N0->getOperand(2); if (!isCompatibleLoad(Op1, Opcode) || !isCompatibleLoad(Op2, Opcode)) return SDValue(); @@ -13883,7 +13881,7 @@ static SDValue tryToFoldExtendSelectLoad(SDNode *N, const TargetLowering &TLI, SDValue Ext1 = DAG.getNode(Opcode, DL, VT, Op1); SDValue Ext2 = DAG.getNode(Opcode, DL, VT, Op2); - return DAG.getSelect(DL, VT, N0->getOperand(0), Ext1, Ext2); + return DAG.getSelect(DL, VT, Cond, Ext1, Ext2); } /// Try to fold a sext/zext/aext dag node into a ConstantSDNode or @@ -16433,7 +16431,8 @@ SDValue DAGCombiner::visitTRUNCATE(SDNode *N) { case ISD::OR: case ISD::XOR: if (!LegalOperations && N0.hasOneUse() && - (isConstantOrConstantVector(N0.getOperand(0), true) || + (N0.getOperand(0) == N0.getOperand(1) || + isConstantOrConstantVector(N0.getOperand(0), true) || isConstantOrConstantVector(N0.getOperand(1), true))) { // TODO: We already restricted this to pre-legalization, but for vectors // we are extra cautious to not create an unsupported operation. @@ -17461,8 +17460,8 @@ SDValue DAGCombiner::visitFSUBForFMACombine(SDNode *N) { // fold (fsub (fpext (fneg (fmul, x, y))), z) // -> (fneg (fma (fpext x), (fpext y), z)) // Note: This could be removed with appropriate canonicalization of the - // input expression into (fneg (fadd (fpext (fmul, x, y)), z). However, the - // orthogonal flags -fp-contract=fast and -enable-unsafe-fp-math prevent + // input expression into (fneg (fadd (fpext (fmul, x, y)), z)). However, the + // command line flag -fp-contract=fast and fast-math flag contract prevent // from implementing the canonicalization in visitFSUB. if (matcher.match(N0, ISD::FP_EXTEND)) { SDValue N00 = N0.getOperand(0); @@ -17486,7 +17485,7 @@ SDValue DAGCombiner::visitFSUBForFMACombine(SDNode *N) { // -> (fneg (fma (fpext x)), (fpext y), z) // Note: This could be removed with appropriate canonicalization of the // input expression into (fneg (fadd (fpext (fmul, x, y)), z). However, the - // orthogonal flags -fp-contract=fast and -enable-unsafe-fp-math prevent + // command line flag -fp-contract=fast and fast-math flag contract prevent // from implementing the canonicalization in visitFSUB. if (matcher.match(N0, ISD::FNEG)) { SDValue N00 = N0.getOperand(0); @@ -29619,13 +29618,14 @@ static SDValue takeInexpensiveLog2(SelectionDAG &DAG, const SDLoc &DL, EVT VT, } // c ? X : Y -> c ? Log2(X) : Log2(Y) - if ((Op.getOpcode() == ISD::SELECT || Op.getOpcode() == ISD::VSELECT) && - Op.hasOneUse()) { - if (SDValue LogX = takeInexpensiveLog2(DAG, DL, VT, Op.getOperand(1), - Depth + 1, AssumeNonZero)) - if (SDValue LogY = takeInexpensiveLog2(DAG, DL, VT, Op.getOperand(2), - Depth + 1, AssumeNonZero)) - return DAG.getSelect(DL, VT, Op.getOperand(0), LogX, LogY); + SDValue Cond, TVal, FVal; + if (sd_match(Op, m_OneUse(m_SelectLike(m_Value(Cond), m_Value(TVal), + m_Value(FVal))))) { + if (SDValue LogX = + takeInexpensiveLog2(DAG, DL, VT, TVal, Depth + 1, AssumeNonZero)) + if (SDValue LogY = + takeInexpensiveLog2(DAG, DL, VT, FVal, Depth + 1, AssumeNonZero)) + return DAG.getSelect(DL, VT, Cond, LogX, LogY); } // log2(umin(X, Y)) -> umin(log2(X), log2(Y)) diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp index 437d0f4..bf1abfe 100644 --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp @@ -3765,6 +3765,8 @@ bool DAGTypeLegalizer::SoftPromoteHalfOperand(SDNode *N, unsigned OpNo) { case ISD::FP_TO_UINT: case ISD::LRINT: case ISD::LLRINT: + case ISD::LROUND: + case ISD::LLROUND: Res = SoftPromoteHalfOp_Op0WithStrict(N); break; case ISD::FP_TO_SINT_SAT: diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp index 88a4a8b..b1776ea 100644 --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp @@ -429,7 +429,20 @@ SDValue DAGTypeLegalizer::PromoteIntRes_Atomic0(AtomicSDNode *N) { } SDValue DAGTypeLegalizer::PromoteIntRes_Atomic1(AtomicSDNode *N) { - SDValue Op2 = GetPromotedInteger(N->getOperand(2)); + SDValue Op2 = N->getOperand(2); + switch (TLI.getExtendForAtomicRMWArg(N->getOpcode())) { + case ISD::SIGN_EXTEND: + Op2 = SExtPromotedInteger(Op2); + break; + case ISD::ZERO_EXTEND: + Op2 = ZExtPromotedInteger(Op2); + break; + case ISD::ANY_EXTEND: + Op2 = GetPromotedInteger(Op2); + break; + default: + llvm_unreachable("Invalid atomic op extension"); + } SDValue Res = DAG.getAtomic(N->getOpcode(), SDLoc(N), N->getMemoryVT(), N->getChain(), N->getBasePtr(), diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp index 20a0efd..dcf2df3 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -1977,8 +1977,13 @@ SDValue SelectionDAGBuilder::getValueImpl(const Value *V) { if (const Instruction *Inst = dyn_cast<Instruction>(V)) { Register InReg = FuncInfo.InitializeRegForValue(Inst); + std::optional<CallingConv::ID> CallConv; + auto *CI = dyn_cast<CallInst>(Inst); + if (CI && !CI->isInlineAsm()) + CallConv = CI->getCallingConv(); + RegsForValue RFV(*DAG.getContext(), TLI, DAG.getDataLayout(), InReg, - Inst->getType(), std::nullopt); + Inst->getType(), CallConv); SDValue Chain = DAG.getEntryNode(); return RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, V); } diff --git a/llvm/lib/CodeGen/ShrinkWrap.cpp b/llvm/lib/CodeGen/ShrinkWrap.cpp index 826e412..8358105 100644 --- a/llvm/lib/CodeGen/ShrinkWrap.cpp +++ b/llvm/lib/CodeGen/ShrinkWrap.cpp @@ -319,7 +319,7 @@ bool ShrinkWrapImpl::useOrDefCSROrFI(const MachineInstr &MI, RegScavenger *RS, return isa<GlobalValue>(UO); } if (const PseudoSourceValue *PSV = Op->getPseudoValue()) - return PSV->isJumpTable(); + return PSV->isJumpTable() || PSV->isConstantPool(); return false; }; // Load/store operations may access the stack indirectly when we previously diff --git a/llvm/lib/CodeGen/TargetOptionsImpl.cpp b/llvm/lib/CodeGen/TargetOptionsImpl.cpp index 5eb86e7..049efe8 100644 --- a/llvm/lib/CodeGen/TargetOptionsImpl.cpp +++ b/llvm/lib/CodeGen/TargetOptionsImpl.cpp @@ -51,7 +51,7 @@ bool TargetOptions::FramePointerIsReserved(const MachineFunction &MF) const { /// HonorSignDependentRoundingFPMath - Return true if the codegen must assume /// that the rounding mode of the FPU can change from its default. bool TargetOptions::HonorSignDependentRoundingFPMath() const { - return !UnsafeFPMath && HonorSignDependentRoundingFPMathOption; + return HonorSignDependentRoundingFPMathOption; } /// NOTE: There are targets that still do not support the debug entry values |