aboutsummaryrefslogtreecommitdiff
path: root/llvm
diff options
context:
space:
mode:
Diffstat (limited to 'llvm')
-rw-r--r--llvm/include/llvm/CodeGen/TargetInstrInfo.h14
-rw-r--r--llvm/include/llvm/Support/SpecialCaseList.h103
-rw-r--r--llvm/lib/CodeGen/AggressiveAntiDepBreaker.cpp4
-rw-r--r--llvm/lib/CodeGen/BreakFalseDeps.cpp2
-rw-r--r--llvm/lib/CodeGen/CriticalAntiDepBreaker.cpp4
-rw-r--r--llvm/lib/CodeGen/FixupStatepointCallerSaved.cpp6
-rw-r--r--llvm/lib/CodeGen/GlobalISel/Utils.cpp2
-rw-r--r--llvm/lib/CodeGen/InitUndef.cpp2
-rw-r--r--llvm/lib/CodeGen/InlineSpiller.cpp8
-rw-r--r--llvm/lib/CodeGen/LiveRangeEdit.cpp2
-rw-r--r--llvm/lib/CodeGen/MachineInstr.cpp2
-rw-r--r--llvm/lib/CodeGen/MachineLICM.cpp2
-rw-r--r--llvm/lib/CodeGen/MachineSink.cpp2
-rw-r--r--llvm/lib/CodeGen/MachineVerifier.cpp11
-rw-r--r--llvm/lib/CodeGen/RegAllocFast.cpp7
-rw-r--r--llvm/lib/CodeGen/RegisterCoalescer.cpp2
-rw-r--r--llvm/lib/CodeGen/RegisterScavenging.cpp4
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp16
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/FastISel.cpp2
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp9
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp2
-rw-r--r--llvm/lib/CodeGen/SplitKit.cpp7
-rw-r--r--llvm/lib/CodeGen/TargetFrameLoweringImpl.cpp5
-rw-r--r--llvm/lib/CodeGen/TargetInstrInfo.cpp32
-rw-r--r--llvm/lib/CodeGen/TwoAddressInstructionPass.cpp2
-rw-r--r--llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp11
-rw-r--r--llvm/lib/Passes/PassBuilderPipelines.cpp2
-rw-r--r--llvm/lib/Support/SpecialCaseList.cpp158
-rw-r--r--llvm/lib/Target/AArch64/AArch64ConditionalCompares.cpp10
-rw-r--r--llvm/lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp2
-rw-r--r--llvm/lib/Target/AArch64/AArch64InstrInfo.cpp29
-rw-r--r--llvm/lib/Target/AArch64/AArch64InstrInfo.h5
-rw-r--r--llvm/lib/Target/AArch64/AArch64MIPeepholeOpt.cpp12
-rw-r--r--llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp2
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPU.td2
-rw-r--r--llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp5
-rw-r--r--llvm/lib/Target/AMDGPU/SIFoldOperands.cpp4
-rw-r--r--llvm/lib/Target/AMDGPU/SIInstrInfo.cpp25
-rw-r--r--llvm/lib/Target/AMDGPU/SIInstrInfo.h14
-rw-r--r--llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp6
-rw-r--r--llvm/lib/Target/AMDGPU/SILowerSGPRSpills.cpp9
-rw-r--r--llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp2
-rw-r--r--llvm/lib/Target/ARC/ARCInstrInfo.cpp12
-rw-r--r--llvm/lib/Target/ARC/ARCInstrInfo.h6
-rw-r--r--llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp106
-rw-r--r--llvm/lib/Target/ARM/ARMBaseInstrInfo.h11
-rw-r--r--llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp5
-rw-r--r--llvm/lib/Target/ARM/ARMFrameLowering.cpp3
-rw-r--r--llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp8
-rw-r--r--llvm/lib/Target/ARM/MLxExpansionPass.cpp2
-rw-r--r--llvm/lib/Target/ARM/Thumb1InstrInfo.cpp11
-rw-r--r--llvm/lib/Target/ARM/Thumb1InstrInfo.h5
-rw-r--r--llvm/lib/Target/ARM/Thumb2InstrInfo.cpp26
-rw-r--r--llvm/lib/Target/ARM/Thumb2InstrInfo.h5
-rw-r--r--llvm/lib/Target/AVR/AVRInstrInfo.cpp12
-rw-r--r--llvm/lib/Target/AVR/AVRInstrInfo.h6
-rw-r--r--llvm/lib/Target/BPF/BPFInstrInfo.cpp11
-rw-r--r--llvm/lib/Target/BPF/BPFInstrInfo.h5
-rw-r--r--llvm/lib/Target/DirectX/DXILWriter/DXILBitcodeWriter.cpp4
-rw-r--r--llvm/lib/Target/Hexagon/HexagonBitSimplify.cpp2
-rw-r--r--llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp15
-rw-r--r--llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp11
-rw-r--r--llvm/lib/Target/Hexagon/HexagonInstrInfo.h5
-rw-r--r--llvm/lib/Target/Hexagon/HexagonLoadStoreWidening.cpp4
-rw-r--r--llvm/lib/Target/Hexagon/HexagonSubtarget.h2
-rw-r--r--llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp4
-rw-r--r--llvm/lib/Target/Lanai/LanaiInstrInfo.cpp6
-rw-r--r--llvm/lib/Target/Lanai/LanaiInstrInfo.h6
-rw-r--r--llvm/lib/Target/LoongArch/LoongArchDeadRegisterDefinitions.cpp3
-rw-r--r--llvm/lib/Target/LoongArch/LoongArchFrameLowering.cpp2
-rw-r--r--llvm/lib/Target/LoongArch/LoongArchInstrInfo.cpp16
-rw-r--r--llvm/lib/Target/LoongArch/LoongArchInstrInfo.h6
-rw-r--r--llvm/lib/Target/M68k/M68kInstrInfo.cpp14
-rw-r--r--llvm/lib/Target/M68k/M68kInstrInfo.h6
-rw-r--r--llvm/lib/Target/MSP430/MSP430InstrInfo.cpp13
-rw-r--r--llvm/lib/Target/MSP430/MSP430InstrInfo.h6
-rw-r--r--llvm/lib/Target/Mips/Mips16InstrInfo.cpp11
-rw-r--r--llvm/lib/Target/Mips/Mips16InstrInfo.h5
-rw-r--r--llvm/lib/Target/Mips/MipsInstrInfo.h15
-rw-r--r--llvm/lib/Target/Mips/MipsSEFrameLowering.cpp49
-rw-r--r--llvm/lib/Target/Mips/MipsSEInstrInfo.cpp43
-rw-r--r--llvm/lib/Target/Mips/MipsSEInstrInfo.h5
-rw-r--r--llvm/lib/Target/PowerPC/PPCFrameLowering.cpp11
-rw-r--r--llvm/lib/Target/PowerPC/PPCInstrInfo.cpp23
-rw-r--r--llvm/lib/Target/PowerPC/PPCInstrInfo.h12
-rw-r--r--llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp4
-rw-r--r--llvm/lib/Target/RISCV/RISCVDeadRegisterDefinitions.cpp3
-rw-r--r--llvm/lib/Target/RISCV/RISCVFrameLowering.cpp28
-rw-r--r--llvm/lib/Target/RISCV/RISCVISelLowering.cpp10
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfo.cpp27
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfo.h6
-rw-r--r--llvm/lib/Target/RISCV/RISCVSubtarget.h1
-rw-r--r--llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp4
-rw-r--r--llvm/lib/Target/Sparc/SparcInstrInfo.cpp11
-rw-r--r--llvm/lib/Target/Sparc/SparcInstrInfo.h5
-rw-r--r--llvm/lib/Target/SystemZ/SystemZFrameLowering.cpp16
-rw-r--r--llvm/lib/Target/SystemZ/SystemZHazardRecognizer.cpp3
-rw-r--r--llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp14
-rw-r--r--llvm/lib/Target/SystemZ/SystemZInstrInfo.h6
-rw-r--r--llvm/lib/Target/VE/VEInstrInfo.cpp11
-rw-r--r--llvm/lib/Target/VE/VEInstrInfo.h6
-rw-r--r--llvm/lib/Target/X86/X86AvoidStoreForwardingBlocks.cpp6
-rw-r--r--llvm/lib/Target/X86/X86DomainReassignment.cpp4
-rw-r--r--llvm/lib/Target/X86/X86FastPreTileConfig.cpp3
-rw-r--r--llvm/lib/Target/X86/X86FrameLowering.cpp7
-rw-r--r--llvm/lib/Target/X86/X86InstrInfo.cpp57
-rw-r--r--llvm/lib/Target/X86/X86InstrInfo.h14
-rw-r--r--llvm/lib/Target/X86/X86OptimizeLEAs.cpp2
-rw-r--r--llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp2
-rw-r--r--llvm/lib/Target/XCore/Disassembler/XCoreDisassembler.cpp6
-rw-r--r--llvm/lib/Target/XCore/XCoreFrameLowering.cpp5
-rw-r--r--llvm/lib/Target/XCore/XCoreInstrInfo.cpp5
-rw-r--r--llvm/lib/Target/XCore/XCoreInstrInfo.h6
-rw-r--r--llvm/lib/Target/Xtensa/XtensaFrameLowering.cpp2
-rw-r--r--llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp18
-rw-r--r--llvm/lib/Target/Xtensa/XtensaInstrInfo.h5
-rw-r--r--llvm/test/CodeGen/AArch64/aarch64-reassociate-accumulators.ll56
-rw-r--r--llvm/test/CodeGen/AArch64/cgdata-merge-local.ll12
-rw-r--r--llvm/test/CodeGen/AArch64/cgdata-merge-no-params.ll12
-rw-r--r--llvm/test/CodeGen/AArch64/cgdata-no-merge-unnamed.ll12
-rw-r--r--llvm/test/CodeGen/AArch64/divrem.ll6
-rw-r--r--llvm/test/CodeGen/AArch64/ldp-stp-scaled-unscaled-pairs.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/machine-combiner-subregs.mir35
-rw-r--r--llvm/test/CodeGen/AArch64/machine-outliner-iterative.mir8
-rw-r--r--llvm/test/CodeGen/AArch64/misched-fusion-cmp-bcc.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/no-quad-ldp-stp.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/ptrauth-bti-call.ll28
-rw-r--r--llvm/test/CodeGen/AArch64/ptrauth-call-rv-marker.ll102
-rw-r--r--llvm/test/CodeGen/AArch64/ptrauth-reloc.ll6
-rw-r--r--llvm/test/CodeGen/AMDGPU/remat-fp64-constants.ll5
-rw-r--r--llvm/test/CodeGen/AMDGPU/spillv16.ll204
-rw-r--r--llvm/test/CodeGen/Hexagon/late_instr.ll2
-rw-r--r--llvm/test/CodeGen/Hexagon/swp-carried-1.ll2
-rw-r--r--llvm/test/CodeGen/Hexagon/swp-conv3x3-nested.ll2
-rw-r--r--llvm/test/CodeGen/Hexagon/swp-epilog-phi11.ll2
-rw-r--r--llvm/test/CodeGen/Hexagon/swp-epilog-phi12.ll2
-rw-r--r--llvm/test/CodeGen/Hexagon/swp-epilog-phi7.ll2
-rw-r--r--llvm/test/CodeGen/Hexagon/swp-kernel-phi1.ll2
-rw-r--r--llvm/test/CodeGen/Hexagon/swp-matmul-bitext.ll2
-rw-r--r--llvm/test/CodeGen/Hexagon/swp-order-copies.ll2
-rw-r--r--llvm/test/CodeGen/Hexagon/swp-order-deps7.ll2
-rw-r--r--llvm/test/CodeGen/Hexagon/swp-reuse-phi-6.ll2
-rw-r--r--llvm/test/CodeGen/RISCV/branch-on-zero.ll16
-rw-r--r--llvm/test/CodeGen/RISCV/machine-pipeliner.ll46
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-exact-vlen.ll10
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/machine-combiner-subreg-verifier-error.mir39
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/pr95865.ll43
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vandn-sdnode.ll66
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vcpop-shl-zext-opt.ll28
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vxrm-insert-out-of-loop.ll24
-rw-r--r--llvm/test/CodeGen/RISCV/sra-xor-sra.ll32
-rw-r--r--llvm/test/CodeGen/X86/apx/no-rex2-general.ll122
-rw-r--r--llvm/test/CodeGen/X86/apx/no-rex2-pseudo-amx.ll29
-rw-r--r--llvm/test/CodeGen/X86/apx/no-rex2-pseudo-x87.ll31
-rw-r--r--llvm/test/CodeGen/X86/apx/no-rex2-special.ll113
-rw-r--r--llvm/test/Other/new-pm-defaults.ll1
-rw-r--r--llvm/test/Other/new-pm-thinlto-postlink-defaults.ll1
-rw-r--r--llvm/test/Other/new-pm-thinlto-postlink-pgo-defaults.ll1
-rw-r--r--llvm/test/Other/new-pm-thinlto-postlink-samplepgo-defaults.ll1
-rw-r--r--llvm/test/Other/new-pm-thinlto-prelink-defaults.ll1
-rw-r--r--llvm/test/Other/new-pm-thinlto-prelink-pgo-defaults.ll1
-rw-r--r--llvm/test/Other/new-pm-thinlto-prelink-samplepgo-defaults.ll1
-rw-r--r--llvm/unittests/Analysis/AliasAnalysisTest.cpp50
-rw-r--r--llvm/unittests/Analysis/AliasSetTrackerTest.cpp6
-rw-r--r--llvm/unittests/Analysis/AssumeBundleQueriesTest.cpp90
-rw-r--r--llvm/unittests/Analysis/CGSCCPassManagerTest.cpp16
-rw-r--r--llvm/unittests/Analysis/CaptureTrackingTest.cpp40
-rw-r--r--llvm/unittests/Analysis/DDGTest.cpp44
-rw-r--r--llvm/unittests/Analysis/FunctionPropertiesAnalysisTest.cpp44
-rw-r--r--llvm/unittests/Analysis/LazyCallGraphTest.cpp208
-rw-r--r--llvm/unittests/Analysis/SparsePropagation.cpp8
-rw-r--r--llvm/unittests/Analysis/UnrollAnalyzerTest.cpp30
-rw-r--r--llvm/unittests/Analysis/ValueTrackingTest.cpp12
-rw-r--r--llvm/unittests/MIR/MachineMetadata.cpp12
-rw-r--r--llvm/unittests/Target/WebAssembly/WebAssemblyExceptionInfoTest.cpp4
-rw-r--r--llvm/unittests/Transforms/IPO/AttributorTest.cpp12
-rw-r--r--llvm/unittests/Transforms/Scalar/LICMTest.cpp12
-rw-r--r--llvm/unittests/Transforms/Scalar/LoopPassManagerTest.cpp44
-rw-r--r--llvm/unittests/Transforms/Utils/BasicBlockUtilsTest.cpp8
-rw-r--r--llvm/unittests/Transforms/Utils/CloningTest.cpp10
-rw-r--r--llvm/unittests/Transforms/Utils/CodeExtractorTest.cpp60
-rw-r--r--llvm/unittests/Transforms/Utils/CodeMoverUtilsTest.cpp134
-rw-r--r--llvm/unittests/Transforms/Utils/LocalTest.cpp14
-rw-r--r--llvm/unittests/Transforms/Utils/MemTransferLowering.cpp36
-rw-r--r--llvm/unittests/Transforms/Utils/ScalarEvolutionExpanderTest.cpp16
-rw-r--r--llvm/unittests/Transforms/Utils/UnrollLoopTest.cpp14
-rw-r--r--llvm/unittests/Transforms/Utils/ValueMapperTest.cpp2
187 files changed, 1858 insertions, 1570 deletions
diff --git a/llvm/include/llvm/CodeGen/TargetInstrInfo.h b/llvm/include/llvm/CodeGen/TargetInstrInfo.h
index 02cd925..18142c2c 100644
--- a/llvm/include/llvm/CodeGen/TargetInstrInfo.h
+++ b/llvm/include/llvm/CodeGen/TargetInstrInfo.h
@@ -159,9 +159,8 @@ public:
/// Given a machine instruction descriptor, returns the register
/// class constraint for OpNum, or NULL.
- virtual const TargetRegisterClass *
- getRegClass(const MCInstrDesc &MCID, unsigned OpNum,
- const TargetRegisterInfo *TRI) const;
+ virtual const TargetRegisterClass *getRegClass(const MCInstrDesc &MCID,
+ unsigned OpNum) const;
/// Returns true if MI is an instruction we are unable to reason about
/// (like a call or something with unmodeled side effects).
@@ -464,8 +463,7 @@ public:
/// SubIdx.
virtual void reMaterialize(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI, Register DestReg,
- unsigned SubIdx, const MachineInstr &Orig,
- const TargetRegisterInfo &TRI) const;
+ unsigned SubIdx, const MachineInstr &Orig) const;
/// Clones instruction or the whole instruction bundle \p Orig and
/// insert into \p MBB before \p InsertBefore. The target may update operands
@@ -1198,8 +1196,7 @@ public:
/// register spill instruction, part of prologue, during the frame lowering.
virtual void storeRegToStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register SrcReg,
- bool isKill, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ bool isKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const {
llvm_unreachable("Target didn't implement "
"TargetInstrInfo::storeRegToStackSlot!");
@@ -1217,8 +1214,7 @@ public:
/// register reload instruction, part of epilogue, during the frame lowering.
virtual void loadRegFromStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg,
- int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ int FrameIndex, const TargetRegisterClass *RC, Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const {
llvm_unreachable("Target didn't implement "
"TargetInstrInfo::loadRegFromStackSlot!");
diff --git a/llvm/include/llvm/Support/SpecialCaseList.h b/llvm/include/llvm/Support/SpecialCaseList.h
index dee4db5..5a012cf 100644
--- a/llvm/include/llvm/Support/SpecialCaseList.h
+++ b/llvm/include/llvm/Support/SpecialCaseList.h
@@ -12,19 +12,11 @@
#ifndef LLVM_SUPPORT_SPECIALCASELIST_H
#define LLVM_SUPPORT_SPECIALCASELIST_H
-#include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/RadixTree.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/StringMap.h"
-#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/Allocator.h"
-#include "llvm/Support/Compiler.h"
-#include "llvm/Support/GlobPattern.h"
-#include "llvm/Support/Regex.h"
+#include "llvm/Support/Error.h"
#include <memory>
#include <string>
#include <utility>
-#include <variant>
#include <vector>
namespace llvm {
@@ -125,91 +117,19 @@ protected:
SpecialCaseList(SpecialCaseList const &) = delete;
SpecialCaseList &operator=(SpecialCaseList const &) = delete;
-private:
- using Match = std::pair<StringRef, unsigned>;
- static constexpr Match NotMatched = {"", 0};
-
- // Lagacy v1 matcher.
- class RegexMatcher {
- public:
- LLVM_ABI Error insert(StringRef Pattern, unsigned LineNumber);
- LLVM_ABI void preprocess(bool BySize);
-
- LLVM_ABI Match match(StringRef Query) const;
-
- struct Reg {
- Reg(StringRef Name, unsigned LineNo, Regex &&Rg)
- : Name(Name), LineNo(LineNo), Rg(std::move(Rg)) {}
- StringRef Name;
- unsigned LineNo;
- Regex Rg;
- };
-
- std::vector<Reg> RegExes;
- };
-
- class GlobMatcher {
- public:
- LLVM_ABI Error insert(StringRef Pattern, unsigned LineNumber);
- LLVM_ABI void preprocess(bool BySize);
-
- LLVM_ABI Match match(StringRef Query) const;
-
- struct Glob {
- Glob(StringRef Name, unsigned LineNo, GlobPattern &&Pattern)
- : Name(Name), LineNo(LineNo), Pattern(std::move(Pattern)) {}
- StringRef Name;
- unsigned LineNo;
- GlobPattern Pattern;
- };
-
- std::vector<GlobMatcher::Glob> Globs;
-
- RadixTree<iterator_range<StringRef::const_iterator>,
- RadixTree<iterator_range<StringRef::const_reverse_iterator>,
- SmallVector<int, 1>>>
- PrefixSuffixToGlob;
-
- RadixTree<iterator_range<StringRef::const_iterator>, SmallVector<int, 1>>
- SubstrToGlob;
- };
-
- /// Represents a set of patterns and their line numbers
- class Matcher {
- public:
- LLVM_ABI Matcher(bool UseGlobs, bool RemoveDotSlash);
-
- LLVM_ABI Error insert(StringRef Pattern, unsigned LineNumber);
- LLVM_ABI void preprocess(bool BySize);
-
- LLVM_ABI Match match(StringRef Query) const;
-
- LLVM_ABI bool matchAny(StringRef Query) const {
- return match(Query) != NotMatched;
- }
-
- std::variant<RegexMatcher, GlobMatcher> M;
- bool RemoveDotSlash;
- };
-
- using SectionEntries = StringMap<StringMap<Matcher>>;
-
-protected:
class Section {
public:
- Section(StringRef Str, unsigned FileIdx, bool UseGlobs)
- : SectionMatcher(UseGlobs, /*RemoveDotSlash=*/false), SectionStr(Str),
- FileIdx(FileIdx) {}
-
- Section(Section &&) = default;
+ LLVM_ABI Section(StringRef Name, unsigned FileIdx, bool UseGlobs);
+ LLVM_ABI Section(Section &&);
+ LLVM_ABI ~Section();
- // Return name of the section, its entire string in [].
- StringRef name() const { return SectionStr; }
+ // Returns name of the section, its entire string in [].
+ StringRef name() const { return Name; }
// Returns true if string 'Name' matches section name interpreted as a glob.
LLVM_ABI bool matchName(StringRef Name) const;
- // Return sequence number of the file where this section is defined.
+ // Returns sequence number of the file where this section is defined.
unsigned fileIndex() const { return FileIdx; }
// Helper method to search by Prefix, Query, and Category. Returns
@@ -227,14 +147,11 @@ protected:
private:
friend class SpecialCaseList;
- LLVM_ABI void preprocess(bool OrderBySize);
- LLVM_ABI const SpecialCaseList::Matcher *
- findMatcher(StringRef Prefix, StringRef Category) const;
+ class SectionImpl;
- Matcher SectionMatcher;
- std::string SectionStr;
- SectionEntries Entries;
+ StringRef Name;
unsigned FileIdx;
+ std::unique_ptr<SectionImpl> Impl;
};
ArrayRef<const Section> sections() const { return Sections; }
diff --git a/llvm/lib/CodeGen/AggressiveAntiDepBreaker.cpp b/llvm/lib/CodeGen/AggressiveAntiDepBreaker.cpp
index 6567bd4..46b5bb7 100644
--- a/llvm/lib/CodeGen/AggressiveAntiDepBreaker.cpp
+++ b/llvm/lib/CodeGen/AggressiveAntiDepBreaker.cpp
@@ -395,7 +395,7 @@ void AggressiveAntiDepBreaker::PrescanInstruction(
// Note register reference...
const TargetRegisterClass *RC = nullptr;
if (i < MI.getDesc().getNumOperands())
- RC = TII->getRegClass(MI.getDesc(), i, TRI);
+ RC = TII->getRegClass(MI.getDesc(), i);
AggressiveAntiDepState::RegisterReference RR = { &MO, RC };
RegRefs.emplace(Reg.asMCReg(), RR);
}
@@ -479,7 +479,7 @@ void AggressiveAntiDepBreaker::ScanInstruction(MachineInstr &MI,
// Note register reference...
const TargetRegisterClass *RC = nullptr;
if (i < MI.getDesc().getNumOperands())
- RC = TII->getRegClass(MI.getDesc(), i, TRI);
+ RC = TII->getRegClass(MI.getDesc(), i);
AggressiveAntiDepState::RegisterReference RR = { &MO, RC };
RegRefs.emplace(Reg.asMCReg(), RR);
}
diff --git a/llvm/lib/CodeGen/BreakFalseDeps.cpp b/llvm/lib/CodeGen/BreakFalseDeps.cpp
index 1846880..fead3ee 100644
--- a/llvm/lib/CodeGen/BreakFalseDeps.cpp
+++ b/llvm/lib/CodeGen/BreakFalseDeps.cpp
@@ -133,7 +133,7 @@ bool BreakFalseDeps::pickBestRegisterForUndef(MachineInstr *MI, unsigned OpIdx,
}
// Get the undef operand's register class
- const TargetRegisterClass *OpRC = TII->getRegClass(MI->getDesc(), OpIdx, TRI);
+ const TargetRegisterClass *OpRC = TII->getRegClass(MI->getDesc(), OpIdx);
assert(OpRC && "Not a valid register class");
// If the instruction has a true dependency, we can hide the false depdency
diff --git a/llvm/lib/CodeGen/CriticalAntiDepBreaker.cpp b/llvm/lib/CodeGen/CriticalAntiDepBreaker.cpp
index 86377cf..3259a3e 100644
--- a/llvm/lib/CodeGen/CriticalAntiDepBreaker.cpp
+++ b/llvm/lib/CodeGen/CriticalAntiDepBreaker.cpp
@@ -187,7 +187,7 @@ void CriticalAntiDepBreaker::PrescanInstruction(MachineInstr &MI) {
const TargetRegisterClass *NewRC = nullptr;
if (i < MI.getDesc().getNumOperands())
- NewRC = TII->getRegClass(MI.getDesc(), i, TRI);
+ NewRC = TII->getRegClass(MI.getDesc(), i);
// For now, only allow the register to be changed if its register
// class is consistent across all uses.
@@ -316,7 +316,7 @@ void CriticalAntiDepBreaker::ScanInstruction(MachineInstr &MI, unsigned Count) {
const TargetRegisterClass *NewRC = nullptr;
if (i < MI.getDesc().getNumOperands())
- NewRC = TII->getRegClass(MI.getDesc(), i, TRI);
+ NewRC = TII->getRegClass(MI.getDesc(), i);
// For now, only allow the register to be changed if its register
// class is consistent across all uses.
diff --git a/llvm/lib/CodeGen/FixupStatepointCallerSaved.cpp b/llvm/lib/CodeGen/FixupStatepointCallerSaved.cpp
index 8b74dce..c23cac7 100644
--- a/llvm/lib/CodeGen/FixupStatepointCallerSaved.cpp
+++ b/llvm/lib/CodeGen/FixupStatepointCallerSaved.cpp
@@ -420,7 +420,7 @@ public:
LLVM_DEBUG(dbgs() << "Insert spill before " << *InsertBefore);
TII.storeRegToStackSlot(*MI.getParent(), InsertBefore, Reg, IsKill, FI,
- RC, &TRI, Register());
+ RC, Register());
}
}
@@ -429,7 +429,7 @@ public:
const TargetRegisterClass *RC = TRI.getMinimalPhysRegClass(Reg);
int FI = RegToSlotIdx[Reg];
if (It != MBB->end()) {
- TII.loadRegFromStackSlot(*MBB, It, Reg, FI, RC, &TRI, Register());
+ TII.loadRegFromStackSlot(*MBB, It, Reg, FI, RC, Register());
return;
}
@@ -437,7 +437,7 @@ public:
// and then swap them.
assert(!MBB->empty() && "Empty block");
--It;
- TII.loadRegFromStackSlot(*MBB, It, Reg, FI, RC, &TRI, Register());
+ TII.loadRegFromStackSlot(*MBB, It, Reg, FI, RC, Register());
MachineInstr *Reload = It->getPrevNode();
int Dummy = 0;
(void)Dummy;
diff --git a/llvm/lib/CodeGen/GlobalISel/Utils.cpp b/llvm/lib/CodeGen/GlobalISel/Utils.cpp
index 5fab6ec..e8954a3 100644
--- a/llvm/lib/CodeGen/GlobalISel/Utils.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/Utils.cpp
@@ -114,7 +114,7 @@ Register llvm::constrainOperandRegClass(
// Assume physical registers are properly constrained.
assert(Reg.isVirtual() && "PhysReg not implemented");
- const TargetRegisterClass *OpRC = TII.getRegClass(II, OpIdx, &TRI);
+ const TargetRegisterClass *OpRC = TII.getRegClass(II, OpIdx);
// Some of the target independent instructions, like COPY, may not impose any
// register class constraints on some of their operands: If it's a use, we can
// skip constraining as the instruction defining the register would constrain
diff --git a/llvm/lib/CodeGen/InitUndef.cpp b/llvm/lib/CodeGen/InitUndef.cpp
index e07e598..12b36f5 100644
--- a/llvm/lib/CodeGen/InitUndef.cpp
+++ b/llvm/lib/CodeGen/InitUndef.cpp
@@ -232,7 +232,7 @@ bool InitUndef::processBasicBlock(MachineFunction &MF, MachineBasicBlock &MBB,
MachineOperand &UseMO = MI.getOperand(UseOpIdx);
if (UseMO.getReg() == MCRegister::NoRegister) {
const TargetRegisterClass *RC =
- TII->getRegClass(MI.getDesc(), UseOpIdx, TRI);
+ TII->getRegClass(MI.getDesc(), UseOpIdx);
Register NewDest = MRI->createVirtualRegister(RC);
// We don't have a way to update dead lanes, so keep track of the
// new register so that we avoid querying it later.
diff --git a/llvm/lib/CodeGen/InlineSpiller.cpp b/llvm/lib/CodeGen/InlineSpiller.cpp
index c3e0964..6837030 100644
--- a/llvm/lib/CodeGen/InlineSpiller.cpp
+++ b/llvm/lib/CodeGen/InlineSpiller.cpp
@@ -473,7 +473,7 @@ bool InlineSpiller::hoistSpillInsideBB(LiveInterval &SpillLI,
MachineInstrSpan MIS(MII, MBB);
// Insert spill without kill flag immediately after def.
TII.storeRegToStackSlot(*MBB, MII, SrcReg, false, StackSlot,
- MRI.getRegClass(SrcReg), &TRI, Register());
+ MRI.getRegClass(SrcReg), Register());
LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MII);
for (const MachineInstr &MI : make_range(MIS.begin(), MII))
getVDefInterval(MI, LIS);
@@ -1119,7 +1119,7 @@ void InlineSpiller::insertReload(Register NewVReg,
MachineInstrSpan MIS(MI, &MBB);
TII.loadRegFromStackSlot(MBB, MI, NewVReg, StackSlot,
- MRI.getRegClass(NewVReg), &TRI, Register());
+ MRI.getRegClass(NewVReg), Register());
LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MI);
@@ -1155,7 +1155,7 @@ void InlineSpiller::insertSpill(Register NewVReg, bool isKill,
if (IsRealSpill)
TII.storeRegToStackSlot(MBB, SpillBefore, NewVReg, isKill, StackSlot,
- MRI.getRegClass(NewVReg), &TRI, Register());
+ MRI.getRegClass(NewVReg), Register());
else
// Don't spill undef value.
// Anything works for undef, in particular keeping the memory
@@ -1729,7 +1729,7 @@ void HoistSpillHelper::hoistAllSpills() {
MachineBasicBlock::iterator MII = IPA.getLastInsertPointIter(OrigLI, *BB);
MachineInstrSpan MIS(MII, BB);
TII.storeRegToStackSlot(*BB, MII, LiveReg, false, Slot,
- MRI.getRegClass(LiveReg), &TRI, Register());
+ MRI.getRegClass(LiveReg), Register());
LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MII);
for (const MachineInstr &MI : make_range(MIS.begin(), MII))
getVDefInterval(MI, LIS);
diff --git a/llvm/lib/CodeGen/LiveRangeEdit.cpp b/llvm/lib/CodeGen/LiveRangeEdit.cpp
index 5b0365d..6fe1170 100644
--- a/llvm/lib/CodeGen/LiveRangeEdit.cpp
+++ b/llvm/lib/CodeGen/LiveRangeEdit.cpp
@@ -88,7 +88,7 @@ SlotIndex LiveRangeEdit::rematerializeAt(MachineBasicBlock &MBB,
bool Late, unsigned SubIdx,
MachineInstr *ReplaceIndexMI) {
assert(RM.OrigMI && "Invalid remat");
- TII.reMaterialize(MBB, MI, DestReg, SubIdx, *RM.OrigMI, tri);
+ TII.reMaterialize(MBB, MI, DestReg, SubIdx, *RM.OrigMI);
// DestReg of the cloned instruction cannot be Dead. Set isDead of DestReg
// to false anyway in case the isDead flag of RM.OrigMI's dest register
// is true.
diff --git a/llvm/lib/CodeGen/MachineInstr.cpp b/llvm/lib/CodeGen/MachineInstr.cpp
index 37e5c51..eb46124 100644
--- a/llvm/lib/CodeGen/MachineInstr.cpp
+++ b/llvm/lib/CodeGen/MachineInstr.cpp
@@ -978,7 +978,7 @@ MachineInstr::getRegClassConstraint(unsigned OpIdx,
assert(getMF() && "Can't have an MF reference here!");
// Most opcodes have fixed constraints in their MCInstrDesc.
if (!isInlineAsm())
- return TII->getRegClass(getDesc(), OpIdx, TRI);
+ return TII->getRegClass(getDesc(), OpIdx);
if (!getOperand(OpIdx).isReg())
return nullptr;
diff --git a/llvm/lib/CodeGen/MachineLICM.cpp b/llvm/lib/CodeGen/MachineLICM.cpp
index 729e73c..c169467 100644
--- a/llvm/lib/CodeGen/MachineLICM.cpp
+++ b/llvm/lib/CodeGen/MachineLICM.cpp
@@ -1399,7 +1399,7 @@ MachineInstr *MachineLICMImpl::ExtractHoistableLoad(MachineInstr *MI,
if (NewOpc == 0) return nullptr;
const MCInstrDesc &MID = TII->get(NewOpc);
MachineFunction &MF = *MI->getMF();
- const TargetRegisterClass *RC = TII->getRegClass(MID, LoadRegIndex, TRI);
+ const TargetRegisterClass *RC = TII->getRegClass(MID, LoadRegIndex);
// Ok, we're unfolding. Create a temporary register and do the unfold.
Register Reg = MRI->createVirtualRegister(RC);
diff --git a/llvm/lib/CodeGen/MachineSink.cpp b/llvm/lib/CodeGen/MachineSink.cpp
index 94ed82e..0ceeda4 100644
--- a/llvm/lib/CodeGen/MachineSink.cpp
+++ b/llvm/lib/CodeGen/MachineSink.cpp
@@ -569,7 +569,7 @@ bool MachineSinking::PerformSinkAndFold(MachineInstr &MI,
// Sink a copy of the instruction, replacing a COPY instruction.
MachineBasicBlock::iterator InsertPt = SinkDst->getIterator();
Register DstReg = SinkDst->getOperand(0).getReg();
- TII->reMaterialize(*SinkDst->getParent(), InsertPt, DstReg, 0, MI, *TRI);
+ TII->reMaterialize(*SinkDst->getParent(), InsertPt, DstReg, 0, MI);
New = &*std::prev(InsertPt);
if (!New->getDebugLoc())
New->setDebugLoc(SinkDst->getDebugLoc());
diff --git a/llvm/lib/CodeGen/MachineVerifier.cpp b/llvm/lib/CodeGen/MachineVerifier.cpp
index fdf1048..013f529 100644
--- a/llvm/lib/CodeGen/MachineVerifier.cpp
+++ b/llvm/lib/CodeGen/MachineVerifier.cpp
@@ -2657,8 +2657,7 @@ MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
return;
}
if (MONum < MCID.getNumOperands()) {
- if (const TargetRegisterClass *DRC =
- TII->getRegClass(MCID, MONum, TRI)) {
+ if (const TargetRegisterClass *DRC = TII->getRegClass(MCID, MONum)) {
if (!DRC->contains(Reg)) {
report("Illegal physical register for instruction", MO, MONum);
OS << printReg(Reg, TRI) << " is not a "
@@ -2742,12 +2741,11 @@ MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
// has register class constraint, the virtual register must
// comply to it.
if (!isPreISelGenericOpcode(MCID.getOpcode()) &&
- MONum < MCID.getNumOperands() &&
- TII->getRegClass(MCID, MONum, TRI)) {
+ MONum < MCID.getNumOperands() && TII->getRegClass(MCID, MONum)) {
report("Virtual register does not match instruction constraint", MO,
MONum);
OS << "Expect register class "
- << TRI->getRegClassName(TII->getRegClass(MCID, MONum, TRI))
+ << TRI->getRegClassName(TII->getRegClass(MCID, MONum))
<< " but got nothing\n";
return;
}
@@ -2773,8 +2771,7 @@ MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
}
}
if (MONum < MCID.getNumOperands()) {
- if (const TargetRegisterClass *DRC =
- TII->getRegClass(MCID, MONum, TRI)) {
+ if (const TargetRegisterClass *DRC = TII->getRegClass(MCID, MONum)) {
if (SubIdx) {
const TargetRegisterClass *SuperRC =
TRI->getLargestLegalSuperClass(RC, *MF);
diff --git a/llvm/lib/CodeGen/RegAllocFast.cpp b/llvm/lib/CodeGen/RegAllocFast.cpp
index ec6ffd4..9097728 100644
--- a/llvm/lib/CodeGen/RegAllocFast.cpp
+++ b/llvm/lib/CodeGen/RegAllocFast.cpp
@@ -594,8 +594,7 @@ void RegAllocFastImpl::spill(MachineBasicBlock::iterator Before,
LLVM_DEBUG(dbgs() << " to stack slot #" << FI << '\n');
const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg);
- TII->storeRegToStackSlot(*MBB, Before, AssignedReg, Kill, FI, &RC, TRI,
- VirtReg);
+ TII->storeRegToStackSlot(*MBB, Before, AssignedReg, Kill, FI, &RC, VirtReg);
++NumStores;
MachineBasicBlock::iterator FirstTerm = MBB->getFirstTerminator();
@@ -652,7 +651,7 @@ void RegAllocFastImpl::reload(MachineBasicBlock::iterator Before,
<< printReg(PhysReg, TRI) << '\n');
int FI = getStackSpaceFor(VirtReg);
const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg);
- TII->loadRegFromStackSlot(*MBB, Before, PhysReg, FI, &RC, TRI, VirtReg);
+ TII->loadRegFromStackSlot(*MBB, Before, PhysReg, FI, &RC, VirtReg);
++NumLoads;
}
@@ -1123,7 +1122,7 @@ bool RegAllocFastImpl::defineVirtReg(MachineInstr &MI, unsigned OpNum,
if (MO.isMBB()) {
MachineBasicBlock *Succ = MO.getMBB();
TII->storeRegToStackSlot(*Succ, Succ->begin(), PhysReg, Kill, FI,
- &RC, TRI, VirtReg);
+ &RC, VirtReg);
++NumStores;
Succ->addLiveIn(PhysReg);
}
diff --git a/llvm/lib/CodeGen/RegisterCoalescer.cpp b/llvm/lib/CodeGen/RegisterCoalescer.cpp
index f93a7f2..005e44f 100644
--- a/llvm/lib/CodeGen/RegisterCoalescer.cpp
+++ b/llvm/lib/CodeGen/RegisterCoalescer.cpp
@@ -1374,7 +1374,7 @@ bool RegisterCoalescer::reMaterializeDef(const CoalescerPair &CP,
}
const unsigned DefSubIdx = DefMI->getOperand(0).getSubReg();
- const TargetRegisterClass *DefRC = TII->getRegClass(MCID, 0, TRI);
+ const TargetRegisterClass *DefRC = TII->getRegClass(MCID, 0);
if (!DefMI->isImplicitDef()) {
if (DstReg.isPhysical()) {
Register NewDstReg = DstReg;
diff --git a/llvm/lib/CodeGen/RegisterScavenging.cpp b/llvm/lib/CodeGen/RegisterScavenging.cpp
index 7e26c2e..d886167 100644
--- a/llvm/lib/CodeGen/RegisterScavenging.cpp
+++ b/llvm/lib/CodeGen/RegisterScavenging.cpp
@@ -276,14 +276,14 @@ RegScavenger::spill(Register Reg, const TargetRegisterClass &RC, int SPAdj,
": Cannot scavenge register without an emergency "
"spill slot!");
}
- TII->storeRegToStackSlot(*MBB, Before, Reg, true, FI, &RC, TRI, Register());
+ TII->storeRegToStackSlot(*MBB, Before, Reg, true, FI, &RC, Register());
MachineBasicBlock::iterator II = std::prev(Before);
unsigned FIOperandNum = getFrameIndexOperandNum(*II);
TRI->eliminateFrameIndex(II, SPAdj, FIOperandNum, this);
// Restore the scavenged register before its use (or first terminator).
- TII->loadRegFromStackSlot(*MBB, UseMI, Reg, FI, &RC, TRI, Register());
+ TII->loadRegFromStackSlot(*MBB, UseMI, Reg, FI, &RC, Register());
II = std::prev(UseMI);
FIOperandNum = getFrameIndexOperandNum(*II);
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 4f2eb1e..df353c4 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -10988,6 +10988,22 @@ SDValue DAGCombiner::visitSRA(SDNode *N) {
}
}
+ // fold (sra (xor (sra x, c1), -1), c2) -> (xor (sra x, c3), -1)
+ // This allows merging two arithmetic shifts even when there's a NOT in
+ // between.
+ SDValue X;
+ APInt C1;
+ if (N1C && sd_match(N0, m_OneUse(m_Not(
+ m_OneUse(m_Sra(m_Value(X), m_ConstInt(C1))))))) {
+ APInt C2 = N1C->getAPIntValue();
+ zeroExtendToMatch(C1, C2, 1 /* Overflow Bit */);
+ APInt Sum = C1 + C2;
+ unsigned ShiftSum = Sum.getLimitedValue(OpSizeInBits - 1);
+ SDValue NewShift = DAG.getNode(
+ ISD::SRA, DL, VT, X, DAG.getShiftAmountConstant(ShiftSum, VT, DL));
+ return DAG.getNOT(DL, NewShift, VT);
+ }
+
// fold (sra (shl X, m), (sub result_size, n))
// -> (sign_extend (trunc (shl X, (sub (sub result_size, n), m)))) for
// result_size - n != m.
diff --git a/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp b/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp
index 507b2d6..5c84059 100644
--- a/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp
@@ -1965,7 +1965,7 @@ Register FastISel::createResultReg(const TargetRegisterClass *RC) {
Register FastISel::constrainOperandRegClass(const MCInstrDesc &II, Register Op,
unsigned OpNum) {
if (Op.isVirtual()) {
- const TargetRegisterClass *RegClass = TII.getRegClass(II, OpNum, &TRI);
+ const TargetRegisterClass *RegClass = TII.getRegClass(II, OpNum);
if (!MRI.constrainRegClass(Op, RegClass)) {
// If it's not legal to COPY between the register classes, something
// has gone very wrong before we got here.
diff --git a/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp b/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
index d84c3fb..72d0c44 100644
--- a/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
@@ -125,7 +125,7 @@ void InstrEmitter::EmitCopyFromReg(SDValue Op, bool IsClone, Register SrcReg,
const TargetRegisterClass *RC = nullptr;
if (i + II.getNumDefs() < II.getNumOperands()) {
RC = TRI->getAllocatableClass(
- TII->getRegClass(II, i + II.getNumDefs(), TRI));
+ TII->getRegClass(II, i + II.getNumDefs()));
}
if (!UseRC)
UseRC = RC;
@@ -197,7 +197,7 @@ void InstrEmitter::CreateVirtualRegisters(SDNode *Node,
// register instead of creating a new vreg.
Register VRBase;
const TargetRegisterClass *RC =
- TRI->getAllocatableClass(TII->getRegClass(II, i, TRI));
+ TRI->getAllocatableClass(TII->getRegClass(II, i));
// Always let the value type influence the used register class. The
// constraints on the instruction may be too lax to represent the value
// type correctly. For example, a 64-bit float (X86::FR64) can't live in
@@ -330,7 +330,7 @@ InstrEmitter::AddRegisterOperand(MachineInstrBuilder &MIB,
if (II) {
const TargetRegisterClass *OpRC = nullptr;
if (IIOpNum < II->getNumOperands())
- OpRC = TII->getRegClass(*II, IIOpNum, TRI);
+ OpRC = TII->getRegClass(*II, IIOpNum);
if (OpRC) {
unsigned MinNumRegs = MinRCSize;
@@ -409,8 +409,7 @@ void InstrEmitter::AddOperand(MachineInstrBuilder &MIB, SDValue Op,
Register VReg = R->getReg();
MVT OpVT = Op.getSimpleValueType();
const TargetRegisterClass *IIRC =
- II ? TRI->getAllocatableClass(TII->getRegClass(*II, IIOpNum, TRI))
- : nullptr;
+ II ? TRI->getAllocatableClass(TII->getRegClass(*II, IIOpNum)) : nullptr;
const TargetRegisterClass *OpRC =
TLI->isTypeLegal(OpVT)
? TLI->getRegClassFor(OpVT,
diff --git a/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp b/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
index f70b6cd..12fc26d 100644
--- a/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
@@ -340,7 +340,7 @@ static void GetCostForDef(const ScheduleDAGSDNodes::RegDefIter &RegDefPos,
unsigned Idx = RegDefPos.GetIdx();
const MCInstrDesc &Desc = TII->get(Opcode);
- const TargetRegisterClass *RC = TII->getRegClass(Desc, Idx, TRI);
+ const TargetRegisterClass *RC = TII->getRegClass(Desc, Idx);
assert(RC && "Not a valid register class");
RegClass = RC->getID();
// FIXME: Cost arbitrarily set to 1 because there doesn't seem to be a
diff --git a/llvm/lib/CodeGen/SplitKit.cpp b/llvm/lib/CodeGen/SplitKit.cpp
index f9ecb2c..8ec4bfb 100644
--- a/llvm/lib/CodeGen/SplitKit.cpp
+++ b/llvm/lib/CodeGen/SplitKit.cpp
@@ -1509,10 +1509,9 @@ void SplitEditor::forceRecomputeVNI(const VNInfo &ParentVNI) {
}
// Trace value through phis.
- SmallPtrSet<const VNInfo *, 8> Visited; ///< whether VNI was/is in worklist.
- SmallVector<const VNInfo *, 4> WorkList;
- Visited.insert(&ParentVNI);
- WorkList.push_back(&ParentVNI);
+ ///< whether VNI was/is in worklist.
+ SmallPtrSet<const VNInfo *, 8> Visited = {&ParentVNI};
+ SmallVector<const VNInfo *, 4> WorkList = {&ParentVNI};
const LiveInterval &ParentLI = Edit->getParent();
const SlotIndexes &Indexes = *LIS.getSlotIndexes();
diff --git a/llvm/lib/CodeGen/TargetFrameLoweringImpl.cpp b/llvm/lib/CodeGen/TargetFrameLoweringImpl.cpp
index 70c3b2c..ebf6d1a 100644
--- a/llvm/lib/CodeGen/TargetFrameLoweringImpl.cpp
+++ b/llvm/lib/CodeGen/TargetFrameLoweringImpl.cpp
@@ -198,7 +198,7 @@ void TargetFrameLowering::spillCalleeSavedRegister(
} else {
const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
TII->storeRegToStackSlot(SaveBlock, MI, Reg, true, CS.getFrameIdx(), RC,
- TRI, Register());
+ Register());
}
}
@@ -212,8 +212,7 @@ void TargetFrameLowering::restoreCalleeSavedRegister(
.addReg(CS.getDstReg(), getKillRegState(true));
} else {
const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
- TII->loadRegFromStackSlot(MBB, MI, Reg, CS.getFrameIdx(), RC, TRI,
- Register());
+ TII->loadRegFromStackSlot(MBB, MI, Reg, CS.getFrameIdx(), RC, Register());
assert(MI != MBB.begin() && "loadRegFromStackSlot didn't insert any code!");
}
}
diff --git a/llvm/lib/CodeGen/TargetInstrInfo.cpp b/llvm/lib/CodeGen/TargetInstrInfo.cpp
index 7c89e51..d503d7a 100644
--- a/llvm/lib/CodeGen/TargetInstrInfo.cpp
+++ b/llvm/lib/CodeGen/TargetInstrInfo.cpp
@@ -58,9 +58,8 @@ static cl::opt<unsigned int> MaxAccumulatorWidth(
TargetInstrInfo::~TargetInstrInfo() = default;
-const TargetRegisterClass *
-TargetInstrInfo::getRegClass(const MCInstrDesc &MCID, unsigned OpNum,
- const TargetRegisterInfo * /*RemoveMe*/) const {
+const TargetRegisterClass *TargetInstrInfo::getRegClass(const MCInstrDesc &MCID,
+ unsigned OpNum) const {
if (OpNum >= MCID.getNumOperands())
return nullptr;
@@ -448,10 +447,10 @@ bool TargetInstrInfo::getStackSlotRange(const TargetRegisterClass *RC,
return true;
}
-void TargetInstrInfo::reMaterialize(
- MachineBasicBlock &MBB, MachineBasicBlock::iterator I, Register DestReg,
- unsigned SubIdx, const MachineInstr &Orig,
- const TargetRegisterInfo & /*Remove me*/) const {
+void TargetInstrInfo::reMaterialize(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I,
+ Register DestReg, unsigned SubIdx,
+ const MachineInstr &Orig) const {
MachineInstr *MI = MBB.getParent()->CloneMachineInstr(&Orig);
MI->substituteRegister(MI->getOperand(0).getReg(), DestReg, SubIdx, TRI);
MBB.insert(I, MI);
@@ -795,11 +794,11 @@ MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI,
// code.
BuildMI(*MBB, Pos, MI.getDebugLoc(), get(TargetOpcode::KILL)).add(MO);
} else {
- storeRegToStackSlot(*MBB, Pos, MO.getReg(), MO.isKill(), FI, RC, &TRI,
+ storeRegToStackSlot(*MBB, Pos, MO.getReg(), MO.isKill(), FI, RC,
Register());
}
} else
- loadRegFromStackSlot(*MBB, Pos, MO.getReg(), FI, RC, &TRI, Register());
+ loadRegFromStackSlot(*MBB, Pos, MO.getReg(), FI, RC, Register());
return &*--Pos;
}
@@ -1331,9 +1330,12 @@ void TargetInstrInfo::reassociateOps(
MachineOperand &OpC = Root.getOperand(0);
Register RegA = OpA.getReg();
+ unsigned SubRegA = OpA.getSubReg();
Register RegB = OpB.getReg();
Register RegX = OpX.getReg();
+ unsigned SubRegX = OpX.getSubReg();
Register RegY = OpY.getReg();
+ unsigned SubRegY = OpY.getSubReg();
Register RegC = OpC.getReg();
if (RegA.isVirtual())
@@ -1351,6 +1353,7 @@ void TargetInstrInfo::reassociateOps(
// recycling RegB because the MachineCombiner's computation of the critical
// path requires a new register definition rather than an existing one.
Register NewVR = MRI.createVirtualRegister(RC);
+ unsigned SubRegNewVR = 0;
InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
auto [NewRootOpc, NewPrevOpc] = getReassociationOpcodes(Pattern, Root, Prev);
@@ -1363,6 +1366,7 @@ void TargetInstrInfo::reassociateOps(
if (SwapPrevOperands) {
std::swap(RegX, RegY);
+ std::swap(SubRegX, SubRegY);
std::swap(KillX, KillY);
}
@@ -1415,9 +1419,9 @@ void TargetInstrInfo::reassociateOps(
if (Idx == 0)
continue;
if (Idx == PrevFirstOpIdx)
- MIB1.addReg(RegX, getKillRegState(KillX));
+ MIB1.addReg(RegX, getKillRegState(KillX), SubRegX);
else if (Idx == PrevSecondOpIdx)
- MIB1.addReg(RegY, getKillRegState(KillY));
+ MIB1.addReg(RegY, getKillRegState(KillY), SubRegY);
else
MIB1.add(MO);
}
@@ -1425,6 +1429,7 @@ void TargetInstrInfo::reassociateOps(
if (SwapRootOperands) {
std::swap(RegA, NewVR);
+ std::swap(SubRegA, SubRegNewVR);
std::swap(KillA, KillNewVR);
}
@@ -1436,9 +1441,9 @@ void TargetInstrInfo::reassociateOps(
if (Idx == 0)
continue;
if (Idx == RootFirstOpIdx)
- MIB2 = MIB2.addReg(RegA, getKillRegState(KillA));
+ MIB2 = MIB2.addReg(RegA, getKillRegState(KillA), SubRegA);
else if (Idx == RootSecondOpIdx)
- MIB2 = MIB2.addReg(NewVR, getKillRegState(KillNewVR));
+ MIB2 = MIB2.addReg(NewVR, getKillRegState(KillNewVR), SubRegNewVR);
else
MIB2 = MIB2.add(MO);
}
@@ -1526,6 +1531,7 @@ void TargetInstrInfo::genAlternativeCodeSequence(
if (IndexedReg.index() == 0)
continue;
+ // FIXME: Losing subregisters
MachineInstr *Instr = MRI.getUniqueVRegDef(IndexedReg.value());
MachineInstrBuilder MIB;
Register AccReg;
diff --git a/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp b/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp
index b99e1c7..3f2961c 100644
--- a/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp
+++ b/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp
@@ -1402,7 +1402,7 @@ bool TwoAddressInstructionImpl::tryInstructionTransform(
// Unfold the load.
LLVM_DEBUG(dbgs() << "2addr: UNFOLDING: " << MI);
const TargetRegisterClass *RC = TRI->getAllocatableClass(
- TII->getRegClass(UnfoldMCID, LoadRegIndex, TRI));
+ TII->getRegClass(UnfoldMCID, LoadRegIndex));
Register Reg = MRI->createVirtualRegister(RC);
SmallVector<MachineInstr *, 2> NewMIs;
if (!TII->unfoldMemoryOperand(*MF, MI, Reg,
diff --git a/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp b/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
index 18a4f0a..ac86fa85 100644
--- a/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
+++ b/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
@@ -8482,8 +8482,15 @@ GlobalVariable *OpenMPIRBuilder::getOrCreateInternalVariable(
// create different versions of the function for different OMP internal
// variables.
const DataLayout &DL = M.getDataLayout();
- unsigned AddressSpaceVal =
- AddressSpace ? *AddressSpace : DL.getDefaultGlobalsAddressSpace();
+ // TODO: Investigate why AMDGPU expects AS 0 for globals even though the
+ // default global AS is 1.
+ // See double-target-call-with-declare-target.f90 and
+ // declare-target-vars-in-target-region.f90 libomptarget
+ // tests.
+ unsigned AddressSpaceVal = AddressSpace ? *AddressSpace
+ : M.getTargetTriple().isAMDGPU()
+ ? 0
+ : DL.getDefaultGlobalsAddressSpace();
auto Linkage = this->M.getTargetTriple().getArch() == Triple::wasm32
? GlobalValue::InternalLinkage
: GlobalValue::CommonLinkage;
diff --git a/llvm/lib/Passes/PassBuilderPipelines.cpp b/llvm/lib/Passes/PassBuilderPipelines.cpp
index 2fe963b..dd73c04 100644
--- a/llvm/lib/Passes/PassBuilderPipelines.cpp
+++ b/llvm/lib/Passes/PassBuilderPipelines.cpp
@@ -228,7 +228,7 @@ static cl::opt<bool> EnableLoopHeaderDuplication(
static cl::opt<bool>
EnableDFAJumpThreading("enable-dfa-jump-thread",
cl::desc("Enable DFA jump threading"),
- cl::init(true), cl::Hidden);
+ cl::init(false), cl::Hidden);
static cl::opt<bool>
EnableHotColdSplit("hot-cold-split",
diff --git a/llvm/lib/Support/SpecialCaseList.cpp b/llvm/lib/Support/SpecialCaseList.cpp
index beec8b8..91f98cf 100644
--- a/llvm/lib/Support/SpecialCaseList.cpp
+++ b/llvm/lib/Support/SpecialCaseList.cpp
@@ -14,26 +14,94 @@
//===----------------------------------------------------------------------===//
#include "llvm/Support/SpecialCaseList.h"
+#include "llvm/ADT/RadixTree.h"
#include "llvm/ADT/STLExtras.h"
-#include "llvm/ADT/SetVector.h"
-#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/Support/GlobPattern.h"
#include "llvm/Support/LineIterator.h"
#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/Regex.h"
#include "llvm/Support/VirtualFileSystem.h"
#include "llvm/Support/raw_ostream.h"
-#include <algorithm>
-#include <limits>
#include <memory>
#include <stdio.h>
#include <string>
#include <system_error>
#include <utility>
+#include <variant>
+#include <vector>
namespace llvm {
-Error SpecialCaseList::RegexMatcher::insert(StringRef Pattern,
- unsigned LineNumber) {
+namespace {
+
+using Match = std::pair<StringRef, unsigned>;
+static constexpr Match NotMatched = {"", 0};
+
+// Lagacy v1 matcher.
+class RegexMatcher {
+public:
+ Error insert(StringRef Pattern, unsigned LineNumber);
+ void preprocess(bool BySize);
+
+ Match match(StringRef Query) const;
+
+ struct Reg {
+ Reg(StringRef Name, unsigned LineNo, Regex &&Rg)
+ : Name(Name), LineNo(LineNo), Rg(std::move(Rg)) {}
+ StringRef Name;
+ unsigned LineNo;
+ Regex Rg;
+ };
+
+ std::vector<Reg> RegExes;
+};
+
+class GlobMatcher {
+public:
+ Error insert(StringRef Pattern, unsigned LineNumber);
+ void preprocess(bool BySize);
+
+ Match match(StringRef Query) const;
+
+ struct Glob {
+ Glob(StringRef Name, unsigned LineNo, GlobPattern &&Pattern)
+ : Name(Name), LineNo(LineNo), Pattern(std::move(Pattern)) {}
+ StringRef Name;
+ unsigned LineNo;
+ GlobPattern Pattern;
+ };
+
+ std::vector<GlobMatcher::Glob> Globs;
+
+ RadixTree<iterator_range<StringRef::const_iterator>,
+ RadixTree<iterator_range<StringRef::const_reverse_iterator>,
+ SmallVector<int, 1>>>
+ PrefixSuffixToGlob;
+
+ RadixTree<iterator_range<StringRef::const_iterator>, SmallVector<int, 1>>
+ SubstrToGlob;
+};
+
+/// Represents a set of patterns and their line numbers
+class Matcher {
+public:
+ Matcher(bool UseGlobs, bool RemoveDotSlash);
+
+ Error insert(StringRef Pattern, unsigned LineNumber);
+ void preprocess(bool BySize);
+ Match match(StringRef Query) const;
+
+ bool matchAny(StringRef Query) const { return match(Query).second > 0; }
+
+ std::variant<RegexMatcher, GlobMatcher> M;
+ bool RemoveDotSlash;
+};
+
+Error RegexMatcher::insert(StringRef Pattern, unsigned LineNumber) {
if (Pattern.empty())
return createStringError(errc::invalid_argument,
"Supplied regex was blank");
@@ -57,7 +125,7 @@ Error SpecialCaseList::RegexMatcher::insert(StringRef Pattern,
return Error::success();
}
-void SpecialCaseList::RegexMatcher::preprocess(bool BySize) {
+void RegexMatcher::preprocess(bool BySize) {
if (BySize) {
llvm::stable_sort(RegExes, [](const Reg &A, const Reg &B) {
return A.Name.size() < B.Name.size();
@@ -65,16 +133,14 @@ void SpecialCaseList::RegexMatcher::preprocess(bool BySize) {
}
}
-SpecialCaseList::Match
-SpecialCaseList::RegexMatcher::match(StringRef Query) const {
+Match RegexMatcher::match(StringRef Query) const {
for (const auto &R : reverse(RegExes))
if (R.Rg.match(Query))
return {R.Name, R.LineNo};
return NotMatched;
}
-Error SpecialCaseList::GlobMatcher::insert(StringRef Pattern,
- unsigned LineNumber) {
+Error GlobMatcher::insert(StringRef Pattern, unsigned LineNumber) {
if (Pattern.empty())
return createStringError(errc::invalid_argument, "Supplied glob was blank");
@@ -85,7 +151,7 @@ Error SpecialCaseList::GlobMatcher::insert(StringRef Pattern,
return Error::success();
}
-void SpecialCaseList::GlobMatcher::preprocess(bool BySize) {
+void GlobMatcher::preprocess(bool BySize) {
if (BySize) {
llvm::stable_sort(Globs, [](const Glob &A, const Glob &B) {
return A.Name.size() < B.Name.size();
@@ -115,8 +181,7 @@ void SpecialCaseList::GlobMatcher::preprocess(bool BySize) {
}
}
-SpecialCaseList::Match
-SpecialCaseList::GlobMatcher::match(StringRef Query) const {
+Match GlobMatcher::match(StringRef Query) const {
int Best = -1;
if (!PrefixSuffixToGlob.empty()) {
for (const auto &[_, SToGlob] : PrefixSuffixToGlob.find_prefixes(Query)) {
@@ -164,7 +229,7 @@ SpecialCaseList::GlobMatcher::match(StringRef Query) const {
return {Globs[Best].Name, Globs[Best].LineNo};
}
-SpecialCaseList::Matcher::Matcher(bool UseGlobs, bool RemoveDotSlash)
+Matcher::Matcher(bool UseGlobs, bool RemoveDotSlash)
: RemoveDotSlash(RemoveDotSlash) {
if (UseGlobs)
M.emplace<GlobMatcher>();
@@ -172,20 +237,34 @@ SpecialCaseList::Matcher::Matcher(bool UseGlobs, bool RemoveDotSlash)
M.emplace<RegexMatcher>();
}
-Error SpecialCaseList::Matcher::insert(StringRef Pattern, unsigned LineNumber) {
+Error Matcher::insert(StringRef Pattern, unsigned LineNumber) {
return std::visit([&](auto &V) { return V.insert(Pattern, LineNumber); }, M);
}
-void SpecialCaseList::Matcher::preprocess(bool BySize) {
+void Matcher::preprocess(bool BySize) {
return std::visit([&](auto &V) { return V.preprocess(BySize); }, M);
}
-SpecialCaseList::Match SpecialCaseList::Matcher::match(StringRef Query) const {
+Match Matcher::match(StringRef Query) const {
if (RemoveDotSlash)
Query = llvm::sys::path::remove_leading_dotslash(Query);
- return std::visit(
- [&](auto &V) -> SpecialCaseList::Match { return V.match(Query); }, M);
+ return std::visit([&](auto &V) -> Match { return V.match(Query); }, M);
}
+} // namespace
+
+class SpecialCaseList::Section::SectionImpl {
+public:
+ void preprocess(bool OrderBySize);
+ const Matcher *findMatcher(StringRef Prefix, StringRef Category) const;
+
+ using SectionEntries = StringMap<StringMap<Matcher>>;
+
+ explicit SectionImpl(bool UseGlobs)
+ : SectionMatcher(UseGlobs, /*RemoveDotSlash=*/false) {}
+
+ Matcher SectionMatcher;
+ SectionEntries Entries;
+};
// TODO: Refactor this to return Expected<...>
std::unique_ptr<SpecialCaseList>
@@ -243,11 +322,11 @@ bool SpecialCaseList::createInternal(const MemoryBuffer *MB, std::string &Error,
Expected<SpecialCaseList::Section *>
SpecialCaseList::addSection(StringRef SectionStr, unsigned FileNo,
unsigned LineNo, bool UseGlobs) {
+ SectionStr = SectionStr.copy(StrAlloc);
Sections.emplace_back(SectionStr, FileNo, UseGlobs);
auto &Section = Sections.back();
- SectionStr = SectionStr.copy(StrAlloc);
- if (auto Err = Section.SectionMatcher.insert(SectionStr, LineNo)) {
+ if (auto Err = Section.Impl->SectionMatcher.insert(SectionStr, LineNo)) {
return createStringError(errc::invalid_argument,
"malformed section at line " + Twine(LineNo) +
": '" + SectionStr +
@@ -279,7 +358,7 @@ bool SpecialCaseList::parse(unsigned FileIdx, const MemoryBuffer *MB,
Error = toString(std::move(Err));
return false;
}
- Section *CurrentSection = ErrOrSection.get();
+ Section::SectionImpl *CurrentImpl = ErrOrSection.get()->Impl.get();
// This is the current list of prefixes for all existing users matching file
// path. We may need parametrization in constructor in future.
@@ -307,7 +386,7 @@ bool SpecialCaseList::parse(unsigned FileIdx, const MemoryBuffer *MB,
Error = toString(std::move(Err));
return false;
}
- CurrentSection = ErrOrSection.get();
+ CurrentImpl = ErrOrSection.get()->Impl.get();
continue;
}
@@ -320,7 +399,7 @@ bool SpecialCaseList::parse(unsigned FileIdx, const MemoryBuffer *MB,
}
auto [Pattern, Category] = Postfix.split("=");
- auto [It, _] = CurrentSection->Entries[Prefix].try_emplace(
+ auto [It, _] = CurrentImpl->Entries[Prefix].try_emplace(
Category, UseGlobs,
RemoveDotSlash && llvm::is_contained(PathPrefixes, Prefix));
Pattern = Pattern.copy(StrAlloc);
@@ -334,7 +413,7 @@ bool SpecialCaseList::parse(unsigned FileIdx, const MemoryBuffer *MB,
}
for (Section &S : Sections)
- S.preprocess(OrderBySize);
+ S.Impl->preprocess(OrderBySize);
return true;
}
@@ -351,7 +430,7 @@ std::pair<unsigned, unsigned>
SpecialCaseList::inSectionBlame(StringRef Section, StringRef Prefix,
StringRef Query, StringRef Category) const {
for (const auto &S : reverse(Sections)) {
- if (S.SectionMatcher.matchAny(Section)) {
+ if (S.Impl->SectionMatcher.matchAny(Section)) {
unsigned Blame = S.getLastMatch(Prefix, Query, Category);
if (Blame)
return {S.FileIdx, Blame};
@@ -360,13 +439,22 @@ SpecialCaseList::inSectionBlame(StringRef Section, StringRef Prefix,
return NotFound;
}
+SpecialCaseList::Section::Section(StringRef Str, unsigned FileIdx,
+ bool UseGlobs)
+ : Name(Str), FileIdx(FileIdx),
+ Impl(std::make_unique<SectionImpl>(UseGlobs)) {}
+
+SpecialCaseList::Section::Section(Section &&) = default;
+
+SpecialCaseList::Section::~Section() = default;
+
bool SpecialCaseList::Section::matchName(StringRef Name) const {
- return SectionMatcher.matchAny(Name);
+ return Impl->SectionMatcher.matchAny(Name);
}
-const SpecialCaseList::Matcher *
-SpecialCaseList::Section::findMatcher(StringRef Prefix,
- StringRef Category) const {
+const Matcher *
+SpecialCaseList::Section::SectionImpl::findMatcher(StringRef Prefix,
+ StringRef Category) const {
SectionEntries::const_iterator I = Entries.find(Prefix);
if (I == Entries.end())
return nullptr;
@@ -377,7 +465,7 @@ SpecialCaseList::Section::findMatcher(StringRef Prefix,
return &II->second;
}
-LLVM_ABI void SpecialCaseList::Section::preprocess(bool OrderBySize) {
+void SpecialCaseList::Section::SectionImpl::preprocess(bool OrderBySize) {
SectionMatcher.preprocess(false);
for (auto &[K1, E] : Entries)
for (auto &[K2, M] : E)
@@ -387,7 +475,7 @@ LLVM_ABI void SpecialCaseList::Section::preprocess(bool OrderBySize) {
unsigned SpecialCaseList::Section::getLastMatch(StringRef Prefix,
StringRef Query,
StringRef Category) const {
- if (const Matcher *M = findMatcher(Prefix, Category))
+ if (const Matcher *M = Impl->findMatcher(Prefix, Category))
return M->match(Query).second;
return 0;
}
@@ -395,13 +483,13 @@ unsigned SpecialCaseList::Section::getLastMatch(StringRef Prefix,
StringRef SpecialCaseList::Section::getLongestMatch(StringRef Prefix,
StringRef Query,
StringRef Category) const {
- if (const Matcher *M = findMatcher(Prefix, Category))
+ if (const Matcher *M = Impl->findMatcher(Prefix, Category))
return M->match(Query).first;
return {};
}
bool SpecialCaseList::Section::hasPrefix(StringRef Prefix) const {
- return Entries.find(Prefix) != Entries.end();
+ return Impl->Entries.find(Prefix) != Impl->Entries.end();
}
} // namespace llvm
diff --git a/llvm/lib/Target/AArch64/AArch64ConditionalCompares.cpp b/llvm/lib/Target/AArch64/AArch64ConditionalCompares.cpp
index cb831963..7712d2a 100644
--- a/llvm/lib/Target/AArch64/AArch64ConditionalCompares.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ConditionalCompares.cpp
@@ -629,8 +629,7 @@ void SSACCmpConv::convert(SmallVectorImpl<MachineBasicBlock *> &RemovedBlocks) {
}
const MCInstrDesc &MCID = TII->get(Opc);
// Create a dummy virtual register for the SUBS def.
- Register DestReg =
- MRI->createVirtualRegister(TII->getRegClass(MCID, 0, TRI));
+ Register DestReg = MRI->createVirtualRegister(TII->getRegClass(MCID, 0));
// Insert a SUBS Rn, #0 instruction instead of the cbz / cbnz.
BuildMI(*Head, Head->end(), TermDL, MCID)
.addReg(DestReg, RegState::Define | RegState::Dead)
@@ -638,8 +637,7 @@ void SSACCmpConv::convert(SmallVectorImpl<MachineBasicBlock *> &RemovedBlocks) {
.addImm(0)
.addImm(0);
// SUBS uses the GPR*sp register classes.
- MRI->constrainRegClass(HeadCond[2].getReg(),
- TII->getRegClass(MCID, 1, TRI));
+ MRI->constrainRegClass(HeadCond[2].getReg(), TII->getRegClass(MCID, 1));
}
Head->splice(Head->end(), CmpBB, CmpBB->begin(), CmpBB->end());
@@ -686,10 +684,10 @@ void SSACCmpConv::convert(SmallVectorImpl<MachineBasicBlock *> &RemovedBlocks) {
unsigned NZCV = AArch64CC::getNZCVToSatisfyCondCode(CmpBBTailCC);
const MCInstrDesc &MCID = TII->get(Opc);
MRI->constrainRegClass(CmpMI->getOperand(FirstOp).getReg(),
- TII->getRegClass(MCID, 0, TRI));
+ TII->getRegClass(MCID, 0));
if (CmpMI->getOperand(FirstOp + 1).isReg())
MRI->constrainRegClass(CmpMI->getOperand(FirstOp + 1).getReg(),
- TII->getRegClass(MCID, 1, TRI));
+ TII->getRegClass(MCID, 1));
MachineInstrBuilder MIB = BuildMI(*Head, CmpMI, CmpMI->getDebugLoc(), MCID)
.add(CmpMI->getOperand(FirstOp)); // Register Rn
if (isZBranch)
diff --git a/llvm/lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp b/llvm/lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp
index 75361f5..4ff49a6 100644
--- a/llvm/lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp
+++ b/llvm/lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp
@@ -156,7 +156,7 @@ void AArch64DeadRegisterDefinitions::processMachineBasicBlock(
LLVM_DEBUG(dbgs() << " Ignoring, def is tied operand.\n");
continue;
}
- const TargetRegisterClass *RC = TII->getRegClass(Desc, I, TRI);
+ const TargetRegisterClass *RC = TII->getRegClass(Desc, I);
unsigned NewReg;
if (RC == nullptr) {
LLVM_DEBUG(dbgs() << " Ignoring, register is not a GPR.\n");
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
index 66e4949..b93e562 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
@@ -5664,7 +5664,6 @@ void AArch64InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
Register SrcReg, bool isKill, int FI,
const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI,
Register VReg,
MachineInstr::MIFlag Flags) const {
MachineFunction &MF = *MBB.getParent();
@@ -5678,7 +5677,7 @@ void AArch64InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
bool Offset = true;
MCRegister PNRReg = MCRegister::NoRegister;
unsigned StackID = TargetStackID::Default;
- switch (TRI->getSpillSize(*RC)) {
+ switch (RI.getSpillSize(*RC)) {
case 1:
if (AArch64::FPR8RegClass.hasSubClassEq(RC))
Opc = AArch64::STRBui;
@@ -5841,10 +5840,12 @@ static void loadRegPairFromStackSlot(const TargetRegisterInfo &TRI,
.addMemOperand(MMO);
}
-void AArch64InstrInfo::loadRegFromStackSlot(
- MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DestReg,
- int FI, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI,
- Register VReg, MachineInstr::MIFlag Flags) const {
+void AArch64InstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ Register DestReg, int FI,
+ const TargetRegisterClass *RC,
+ Register VReg,
+ MachineInstr::MIFlag Flags) const {
MachineFunction &MF = *MBB.getParent();
MachineFrameInfo &MFI = MF.getFrameInfo();
MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(MF, FI);
@@ -5856,7 +5857,7 @@ void AArch64InstrInfo::loadRegFromStackSlot(
bool Offset = true;
unsigned StackID = TargetStackID::Default;
Register PNRReg = MCRegister::NoRegister;
- switch (TRI->getSpillSize(*RC)) {
+ switch (TRI.getSpillSize(*RC)) {
case 1:
if (AArch64::FPR8RegClass.hasSubClassEq(RC))
Opc = AArch64::LDRBui;
@@ -6492,10 +6493,10 @@ MachineInstr *AArch64InstrInfo::foldMemoryOperandImpl(
"Mismatched register size in non subreg COPY");
if (IsSpill)
storeRegToStackSlot(MBB, InsertPt, SrcReg, SrcMO.isKill(), FrameIndex,
- getRegClass(SrcReg), &TRI, Register());
+ getRegClass(SrcReg), Register());
else
loadRegFromStackSlot(MBB, InsertPt, DstReg, FrameIndex,
- getRegClass(DstReg), &TRI, Register());
+ getRegClass(DstReg), Register());
return &*--InsertPt;
}
@@ -6513,8 +6514,7 @@ MachineInstr *AArch64InstrInfo::foldMemoryOperandImpl(
assert(SrcMO.getSubReg() == 0 &&
"Unexpected subreg on physical register");
storeRegToStackSlot(MBB, InsertPt, AArch64::XZR, SrcMO.isKill(),
- FrameIndex, &AArch64::GPR64RegClass, &TRI,
- Register());
+ FrameIndex, &AArch64::GPR64RegClass, Register());
return &*--InsertPt;
}
@@ -6548,7 +6548,7 @@ MachineInstr *AArch64InstrInfo::foldMemoryOperandImpl(
assert(TRI.getRegSizeInBits(*getRegClass(SrcReg)) ==
TRI.getRegSizeInBits(*FillRC) &&
"Mismatched regclass size on folded subreg COPY");
- loadRegFromStackSlot(MBB, InsertPt, DstReg, FrameIndex, FillRC, &TRI,
+ loadRegFromStackSlot(MBB, InsertPt, DstReg, FrameIndex, FillRC,
Register());
MachineInstr &LoadMI = *--InsertPt;
MachineOperand &LoadDst = LoadMI.getOperand(0);
@@ -11063,8 +11063,6 @@ static Register cloneInstr(const MachineInstr *MI, unsigned ReplaceOprNum,
MachineBasicBlock::iterator InsertTo) {
MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
const TargetInstrInfo *TII = MBB.getParent()->getSubtarget().getInstrInfo();
- const TargetRegisterInfo *TRI =
- MBB.getParent()->getSubtarget().getRegisterInfo();
MachineInstr *NewMI = MBB.getParent()->CloneMachineInstr(MI);
Register Result = 0;
for (unsigned I = 0; I < NewMI->getNumOperands(); ++I) {
@@ -11073,8 +11071,7 @@ static Register cloneInstr(const MachineInstr *MI, unsigned ReplaceOprNum,
MRI.getRegClass(NewMI->getOperand(0).getReg()));
NewMI->getOperand(I).setReg(Result);
} else if (I == ReplaceOprNum) {
- MRI.constrainRegClass(ReplaceReg,
- TII->getRegClass(NewMI->getDesc(), I, TRI));
+ MRI.constrainRegClass(ReplaceReg, TII->getRegClass(NewMI->getDesc(), I));
NewMI->getOperand(I).setReg(ReplaceReg);
}
}
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.h b/llvm/lib/Target/AArch64/AArch64InstrInfo.h
index 179574a..979c9ac 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.h
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.h
@@ -353,14 +353,13 @@ public:
void storeRegToStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg,
- bool isKill, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ bool isKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
void loadRegFromStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
Register DestReg, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
// This tells target independent code that it is okay to pass instructions
diff --git a/llvm/lib/Target/AArch64/AArch64MIPeepholeOpt.cpp b/llvm/lib/Target/AArch64/AArch64MIPeepholeOpt.cpp
index 04e76c7..d25db89 100644
--- a/llvm/lib/Target/AArch64/AArch64MIPeepholeOpt.cpp
+++ b/llvm/lib/Target/AArch64/AArch64MIPeepholeOpt.cpp
@@ -595,17 +595,17 @@ bool AArch64MIPeepholeOpt::splitTwoPartImm(
// Determine register classes for destinations and register operands
const TargetRegisterClass *FirstInstrDstRC =
- TII->getRegClass(TII->get(Opcode.first), 0, TRI);
+ TII->getRegClass(TII->get(Opcode.first), 0);
const TargetRegisterClass *FirstInstrOperandRC =
- TII->getRegClass(TII->get(Opcode.first), 1, TRI);
+ TII->getRegClass(TII->get(Opcode.first), 1);
const TargetRegisterClass *SecondInstrDstRC =
(Opcode.first == Opcode.second)
? FirstInstrDstRC
- : TII->getRegClass(TII->get(Opcode.second), 0, TRI);
+ : TII->getRegClass(TII->get(Opcode.second), 0);
const TargetRegisterClass *SecondInstrOperandRC =
(Opcode.first == Opcode.second)
? FirstInstrOperandRC
- : TII->getRegClass(TII->get(Opcode.second), 1, TRI);
+ : TII->getRegClass(TII->get(Opcode.second), 1);
// Get old registers destinations and new register destinations
Register DstReg = MI.getOperand(0).getReg();
@@ -784,14 +784,14 @@ bool AArch64MIPeepholeOpt::visitUBFMXri(MachineInstr &MI) {
}
const TargetRegisterClass *DstRC64 =
- TII->getRegClass(TII->get(MI.getOpcode()), 0, TRI);
+ TII->getRegClass(TII->get(MI.getOpcode()), 0);
const TargetRegisterClass *DstRC32 =
TRI->getSubRegisterClass(DstRC64, AArch64::sub_32);
assert(DstRC32 && "Destination register class of UBFMXri doesn't have a "
"sub_32 subregister class");
const TargetRegisterClass *SrcRC64 =
- TII->getRegClass(TII->get(MI.getOpcode()), 1, TRI);
+ TII->getRegClass(TII->get(MI.getOpcode()), 1);
const TargetRegisterClass *SrcRC32 =
TRI->getSubRegisterClass(SrcRC64, AArch64::sub_32);
assert(SrcRC32 && "Source register class of UBFMXri doesn't have a sub_32 "
diff --git a/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp b/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp
index eaf8723..f3cf222 100644
--- a/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp
@@ -897,7 +897,7 @@ AArch64RegisterInfo::materializeFrameBaseRegister(MachineBasicBlock *MBB,
const MCInstrDesc &MCID = TII->get(AArch64::ADDXri);
MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
Register BaseReg = MRI.createVirtualRegister(&AArch64::GPR64spRegClass);
- MRI.constrainRegClass(BaseReg, TII->getRegClass(MCID, 0, this));
+ MRI.constrainRegClass(BaseReg, TII->getRegClass(MCID, 0));
unsigned Shifter = AArch64_AM::getShifterImm(AArch64_AM::LSL, 0);
BuildMI(*MBB, Ins, DL, MCID, BaseReg)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPU.td b/llvm/lib/Target/AMDGPU/AMDGPU.td
index 54d94b1..0b61adf 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPU.td
+++ b/llvm/lib/Target/AMDGPU/AMDGPU.td
@@ -2069,6 +2069,7 @@ def FeatureISAVersion12 : FeatureSet<
FeatureMemoryAtomicFAddF32DenormalSupport,
FeatureBVHDualAndBVH8Insts,
FeatureWaitsBeforeSystemScopeStores,
+ FeatureD16Writes32BitVgpr
]>;
def FeatureISAVersion12_50 : FeatureSet<
@@ -2143,6 +2144,7 @@ def FeatureISAVersion12_50 : FeatureSet<
FeatureSupportsXNACK,
FeatureXNACK,
FeatureClusters,
+ FeatureD16Writes32BitVgpr,
]>;
def FeatureISAVersion12_51 : FeatureSet<
diff --git a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp
index 9fbf9e5..23ba4ad 100644
--- a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp
+++ b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp
@@ -2011,7 +2011,7 @@ void PreRARematStage::rematerialize() {
// Rematerialize DefMI to its use block.
TII->reMaterialize(*InsertPos->getParent(), InsertPos, Reg,
- AMDGPU::NoSubRegister, *DefMI, *DAG.TRI);
+ AMDGPU::NoSubRegister, *DefMI);
Remat.RematMI = &*std::prev(InsertPos);
DAG.LIS->InsertMachineInstrInMaps(*Remat.RematMI);
@@ -2163,8 +2163,7 @@ void PreRARematStage::finalizeGCNSchedStage() {
// Re-rematerialize MI at the end of its original region. Note that it may
// not be rematerialized exactly in the same position as originally within
// the region, but it should not matter much.
- TII->reMaterialize(*MBB, InsertPos, Reg, AMDGPU::NoSubRegister, RematMI,
- *DAG.TRI);
+ TII->reMaterialize(*MBB, InsertPos, Reg, AMDGPU::NoSubRegister, RematMI);
MachineInstr *NewMI = &*std::prev(InsertPos);
DAG.LIS->InsertMachineInstrInMaps(*NewMI);
diff --git a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
index 964309b..293005c 100644
--- a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
+++ b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
@@ -713,7 +713,7 @@ bool SIFoldOperandsImpl::updateOperand(FoldCandidate &Fold) const {
// Verify the register is compatible with the operand.
if (const TargetRegisterClass *OpRC =
- TII->getRegClass(MI->getDesc(), Fold.UseOpNo, TRI)) {
+ TII->getRegClass(MI->getDesc(), Fold.UseOpNo)) {
const TargetRegisterClass *NewRC =
TRI->getRegClassForReg(*MRI, New->getReg());
@@ -2394,7 +2394,7 @@ bool SIFoldOperandsImpl::tryFoldRegSequence(MachineInstr &MI) {
unsigned OpIdx = Op - &UseMI->getOperand(0);
const MCInstrDesc &InstDesc = UseMI->getDesc();
- const TargetRegisterClass *OpRC = TII->getRegClass(InstDesc, OpIdx, TRI);
+ const TargetRegisterClass *OpRC = TII->getRegClass(InstDesc, OpIdx);
if (!OpRC || !TRI->isVectorSuperClass(OpRC))
return false;
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
index 6b397e0..9c78040 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -1668,8 +1668,7 @@ unsigned SIInstrInfo::getVectorRegSpillSaveOpcode(
void SIInstrInfo::storeRegToStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register SrcReg,
- bool isKill, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ bool isKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg,
MachineInstr::MIFlag Flags) const {
MachineFunction *MF = MBB.getParent();
SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
@@ -1681,7 +1680,7 @@ void SIInstrInfo::storeRegToStackSlot(
MachineMemOperand *MMO = MF->getMachineMemOperand(
PtrInfo, MachineMemOperand::MOStore, FrameInfo.getObjectSize(FrameIndex),
FrameInfo.getObjectAlign(FrameIndex));
- unsigned SpillSize = TRI->getSpillSize(*RC);
+ unsigned SpillSize = RI.getSpillSize(*RC);
MachineRegisterInfo &MRI = MF->getRegInfo();
if (RI.isSGPRClass(RC)) {
@@ -1863,14 +1862,13 @@ void SIInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
Register DestReg, int FrameIndex,
const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI,
Register VReg,
MachineInstr::MIFlag Flags) const {
MachineFunction *MF = MBB.getParent();
SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
MachineFrameInfo &FrameInfo = MF->getFrameInfo();
const DebugLoc &DL = MBB.findDebugLoc(MI);
- unsigned SpillSize = TRI->getSpillSize(*RC);
+ unsigned SpillSize = RI.getSpillSize(*RC);
MachinePointerInfo PtrInfo
= MachinePointerInfo::getFixedStack(*MF, FrameIndex);
@@ -2519,8 +2517,8 @@ bool SIInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
void SIInstrInfo::reMaterialize(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I, Register DestReg,
- unsigned SubIdx, const MachineInstr &Orig,
- const TargetRegisterInfo &RI) const {
+ unsigned SubIdx,
+ const MachineInstr &Orig) const {
// Try shrinking the instruction to remat only the part needed for current
// context.
@@ -2570,7 +2568,7 @@ void SIInstrInfo::reMaterialize(MachineBasicBlock &MBB,
const MCInstrDesc &TID = get(NewOpcode);
const TargetRegisterClass *NewRC =
- RI.getAllocatableClass(getRegClass(TID, 0, &RI));
+ RI.getAllocatableClass(getRegClass(TID, 0));
MRI.setRegClass(DestReg, NewRC);
UseMO->setReg(DestReg);
@@ -2600,7 +2598,7 @@ void SIInstrInfo::reMaterialize(MachineBasicBlock &MBB,
break;
}
- TargetInstrInfo::reMaterialize(MBB, I, DestReg, SubIdx, Orig, RI);
+ TargetInstrInfo::reMaterialize(MBB, I, DestReg, SubIdx, Orig);
}
std::pair<MachineInstr*, MachineInstr*>
@@ -3613,7 +3611,7 @@ bool SIInstrInfo::foldImmediate(MachineInstr &UseMI, MachineInstr &DefMI,
AMDGPU::V_MOV_B64_PSEUDO, AMDGPU::V_ACCVGPR_WRITE_B32_e64}) {
const MCInstrDesc &MovDesc = get(MovOp);
- const TargetRegisterClass *MovDstRC = getRegClass(MovDesc, 0, &RI);
+ const TargetRegisterClass *MovDstRC = getRegClass(MovDesc, 0);
if (Is16Bit) {
// We just need to find a correctly sized register class, so the
// subregister index compatibility doesn't matter since we're statically
@@ -6028,9 +6026,8 @@ SIInstrInfo::getWholeWaveFunctionSetup(MachineFunction &MF) const {
// FIXME: This should not be an overridable function. All subtarget dependent
// operand modifications should go through isLookupRegClassByHwMode in the
// generic handling.
-const TargetRegisterClass *
-SIInstrInfo::getRegClass(const MCInstrDesc &TID, unsigned OpNum,
- const TargetRegisterInfo *TRI) const {
+const TargetRegisterClass *SIInstrInfo::getRegClass(const MCInstrDesc &TID,
+ unsigned OpNum) const {
if (OpNum >= TID.getNumOperands())
return nullptr;
const MCOperandInfo &OpInfo = TID.operands()[OpNum];
@@ -6805,7 +6802,7 @@ void SIInstrInfo::legalizeOperandsFLAT(MachineRegisterInfo &MRI,
return;
const TargetRegisterClass *DeclaredRC =
- getRegClass(MI.getDesc(), SAddr->getOperandNo(), &RI);
+ getRegClass(MI.getDesc(), SAddr->getOperandNo());
Register ToSGPR = readlaneVGPRToSGPR(SAddr->getReg(), MI, MRI, DeclaredRC);
SAddr->setReg(ToSGPR);
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.h b/llvm/lib/Target/AMDGPU/SIInstrInfo.h
index 8d693b1..c048b85 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.h
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.h
@@ -307,22 +307,19 @@ public:
void storeRegToStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register SrcReg,
- bool isKill, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ bool isKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
void loadRegFromStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg,
- int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ int FrameIndex, const TargetRegisterClass *RC, Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
bool expandPostRAPseudo(MachineInstr &MI) const override;
void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
Register DestReg, unsigned SubIdx,
- const MachineInstr &Orig,
- const TargetRegisterInfo &TRI) const override;
+ const MachineInstr &Orig) const override;
// Splits a V_MOV_B64_DPP_PSEUDO opcode into a pair of v_mov_b32_dpp
// instructions. Returns a pair of generated instructions.
@@ -1622,9 +1619,8 @@ public:
/// Return true if this opcode should not be used by codegen.
bool isAsmOnlyOpcode(int MCOp) const;
- const TargetRegisterClass *
- getRegClass(const MCInstrDesc &TID, unsigned OpNum,
- const TargetRegisterInfo *TRI) const override;
+ const TargetRegisterClass *getRegClass(const MCInstrDesc &TID,
+ unsigned OpNum) const override;
void fixImplicitOperands(MachineInstr &MI) const;
diff --git a/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp b/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp
index 00aae2c9..fcf91e0 100644
--- a/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp
+++ b/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp
@@ -1337,11 +1337,9 @@ SILoadStoreOptimizer::checkAndPrepareMerge(CombineInfo &CI,
int Data1Idx = AMDGPU::getNamedOperandIdx(Write2Opc.getOpcode(),
AMDGPU::OpName::data1);
- const TargetRegisterClass *DataRC0 =
- TII->getRegClass(Write2Opc, Data0Idx, TRI);
+ const TargetRegisterClass *DataRC0 = TII->getRegClass(Write2Opc, Data0Idx);
- const TargetRegisterClass *DataRC1 =
- TII->getRegClass(Write2Opc, Data1Idx, TRI);
+ const TargetRegisterClass *DataRC1 = TII->getRegClass(Write2Opc, Data1Idx);
if (unsigned SubReg = Data0->getSubReg()) {
DataRC0 = TRI->getMatchingSuperRegClass(MRI->getRegClass(Data0->getReg()),
diff --git a/llvm/lib/Target/AMDGPU/SILowerSGPRSpills.cpp b/llvm/lib/Target/AMDGPU/SILowerSGPRSpills.cpp
index 40eeeb8..cbd08f0 100644
--- a/llvm/lib/Target/AMDGPU/SILowerSGPRSpills.cpp
+++ b/llvm/lib/Target/AMDGPU/SILowerSGPRSpills.cpp
@@ -117,27 +117,26 @@ static void insertCSRSaves(MachineBasicBlock &SaveBlock,
MachineFunction &MF = *SaveBlock.getParent();
const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
- const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
const SIRegisterInfo *RI = ST.getRegisterInfo();
MachineBasicBlock::iterator I = SaveBlock.begin();
- if (!TFI->spillCalleeSavedRegisters(SaveBlock, I, CSI, TRI)) {
+ if (!TFI->spillCalleeSavedRegisters(SaveBlock, I, CSI, RI)) {
for (const CalleeSavedInfo &CS : CSI) {
// Insert the spill to the stack frame.
MCRegister Reg = CS.getReg();
MachineInstrSpan MIS(I, &SaveBlock);
- const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(
+ const TargetRegisterClass *RC = RI->getMinimalPhysRegClass(
Reg, Reg == RI->getReturnAddressReg(MF) ? MVT::i64 : MVT::i32);
// If this value was already livein, we probably have a direct use of the
// incoming register value, so don't kill at the spill point. This happens
// since we pass some special inputs (workgroup IDs) in the callee saved
// range.
- const bool IsLiveIn = isLiveIntoMBB(Reg, SaveBlock, TRI);
+ const bool IsLiveIn = isLiveIntoMBB(Reg, SaveBlock, RI);
TII.storeRegToStackSlot(SaveBlock, I, Reg, !IsLiveIn, CS.getFrameIdx(),
- RC, TRI, Register());
+ RC, Register());
if (Indexes) {
assert(std::distance(MIS.begin(), I) == 1);
diff --git a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
index caff354..86ca22c 100644
--- a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
+++ b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
@@ -1346,7 +1346,7 @@ void SIPeepholeSDWA::legalizeScalarOperands(MachineInstr &MI,
continue;
unsigned I = Op.getOperandNo();
- const TargetRegisterClass *OpRC = TII->getRegClass(Desc, I, TRI);
+ const TargetRegisterClass *OpRC = TII->getRegClass(Desc, I);
if (!OpRC || !TRI->isVSSuperClass(OpRC))
continue;
diff --git a/llvm/lib/Target/ARC/ARCInstrInfo.cpp b/llvm/lib/Target/ARC/ARCInstrInfo.cpp
index 2dec6ff..e17ecbf 100644
--- a/llvm/lib/Target/ARC/ARCInstrInfo.cpp
+++ b/llvm/lib/Target/ARC/ARCInstrInfo.cpp
@@ -294,8 +294,7 @@ void ARCInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
void ARCInstrInfo::storeRegToStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator I, Register SrcReg,
- bool IsKill, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ bool IsKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg,
MachineInstr::MIFlag Flags) const {
DebugLoc DL = MBB.findDebugLoc(I);
MachineFunction &MF = *MBB.getParent();
@@ -307,11 +306,11 @@ void ARCInstrInfo::storeRegToStackSlot(
MFI.getObjectAlign(FrameIndex));
assert(MMO && "Couldn't get MachineMemOperand for store to stack.");
- assert(TRI->getSpillSize(*RC) == 4 &&
+ assert(TRI.getSpillSize(*RC) == 4 &&
"Only support 4-byte stores to stack now.");
assert(ARC::GPR32RegClass.hasSubClassEq(RC) &&
"Only support GPR32 stores to stack now.");
- LLVM_DEBUG(dbgs() << "Created store reg=" << printReg(SrcReg, TRI)
+ LLVM_DEBUG(dbgs() << "Created store reg=" << printReg(SrcReg, &TRI)
<< " to FrameIndex=" << FrameIndex << "\n");
BuildMI(MBB, I, DL, get(ARC::ST_rs9))
.addReg(SrcReg, getKillRegState(IsKill))
@@ -324,7 +323,6 @@ void ARCInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
Register DestReg, int FrameIndex,
const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI,
Register VReg,
MachineInstr::MIFlag Flags) const {
DebugLoc DL = MBB.findDebugLoc(I);
@@ -336,11 +334,11 @@ void ARCInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
MFI.getObjectAlign(FrameIndex));
assert(MMO && "Couldn't get MachineMemOperand for store to stack.");
- assert(TRI->getSpillSize(*RC) == 4 &&
+ assert(TRI.getSpillSize(*RC) == 4 &&
"Only support 4-byte loads from stack now.");
assert(ARC::GPR32RegClass.hasSubClassEq(RC) &&
"Only support GPR32 stores to stack now.");
- LLVM_DEBUG(dbgs() << "Created load reg=" << printReg(DestReg, TRI)
+ LLVM_DEBUG(dbgs() << "Created load reg=" << printReg(DestReg, &TRI)
<< " from FrameIndex=" << FrameIndex << "\n");
BuildMI(MBB, I, DL, get(ARC::LD_rs9))
.addReg(DestReg, RegState::Define)
diff --git a/llvm/lib/Target/ARC/ARCInstrInfo.h b/llvm/lib/Target/ARC/ARCInstrInfo.h
index 2cf05ba..ebeaf87 100644
--- a/llvm/lib/Target/ARC/ARCInstrInfo.h
+++ b/llvm/lib/Target/ARC/ARCInstrInfo.h
@@ -70,14 +70,12 @@ public:
void storeRegToStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register SrcReg,
- bool IsKill, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ bool IsKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
void loadRegFromStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg,
- int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ int FrameIndex, const TargetRegisterClass *RC, Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
bool
diff --git a/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp b/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
index b466ca6f..6077c18 100644
--- a/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
+++ b/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
@@ -929,15 +929,15 @@ ARMBaseInstrInfo::describeLoadedValue(const MachineInstr &MI,
return TargetInstrInfo::describeLoadedValue(MI, Reg);
}
-const MachineInstrBuilder &
-ARMBaseInstrInfo::AddDReg(MachineInstrBuilder &MIB, unsigned Reg,
- unsigned SubIdx, unsigned State,
- const TargetRegisterInfo *TRI) const {
+const MachineInstrBuilder &ARMBaseInstrInfo::AddDReg(MachineInstrBuilder &MIB,
+ unsigned Reg,
+ unsigned SubIdx,
+ unsigned State) const {
if (!SubIdx)
return MIB.addReg(Reg, State);
if (Register::isPhysicalRegister(Reg))
- return MIB.addReg(TRI->getSubReg(Reg, SubIdx), State);
+ return MIB.addReg(getRegisterInfo().getSubReg(Reg, SubIdx), State);
return MIB.addReg(Reg, State, SubIdx);
}
@@ -945,18 +945,18 @@ void ARMBaseInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
Register SrcReg, bool isKill, int FI,
const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI,
Register VReg,
MachineInstr::MIFlag Flags) const {
MachineFunction &MF = *MBB.getParent();
MachineFrameInfo &MFI = MF.getFrameInfo();
Align Alignment = MFI.getObjectAlign(FI);
+ const ARMBaseRegisterInfo &TRI = getRegisterInfo();
MachineMemOperand *MMO = MF.getMachineMemOperand(
MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOStore,
MFI.getObjectSize(FI), Alignment);
- switch (TRI->getSpillSize(*RC)) {
+ switch (TRI.getSpillSize(*RC)) {
case 2:
if (ARM::HPRRegClass.hasSubClassEq(RC)) {
BuildMI(MBB, I, DebugLoc(), get(ARM::VSTRH))
@@ -1011,8 +1011,8 @@ void ARMBaseInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
} else if (ARM::GPRPairRegClass.hasSubClassEq(RC)) {
if (Subtarget.hasV5TEOps()) {
MachineInstrBuilder MIB = BuildMI(MBB, I, DebugLoc(), get(ARM::STRD));
- AddDReg(MIB, SrcReg, ARM::gsub_0, getKillRegState(isKill), TRI);
- AddDReg(MIB, SrcReg, ARM::gsub_1, 0, TRI);
+ AddDReg(MIB, SrcReg, ARM::gsub_0, getKillRegState(isKill));
+ AddDReg(MIB, SrcReg, ARM::gsub_1, 0);
MIB.addFrameIndex(FI).addReg(0).addImm(0).addMemOperand(MMO)
.add(predOps(ARMCC::AL));
} else {
@@ -1022,8 +1022,8 @@ void ARMBaseInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
.addFrameIndex(FI)
.addMemOperand(MMO)
.add(predOps(ARMCC::AL));
- AddDReg(MIB, SrcReg, ARM::gsub_0, getKillRegState(isKill), TRI);
- AddDReg(MIB, SrcReg, ARM::gsub_1, 0, TRI);
+ AddDReg(MIB, SrcReg, ARM::gsub_0, getKillRegState(isKill));
+ AddDReg(MIB, SrcReg, ARM::gsub_1, 0);
}
} else
llvm_unreachable("Unknown reg class!");
@@ -1073,9 +1073,9 @@ void ARMBaseInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
.addFrameIndex(FI)
.add(predOps(ARMCC::AL))
.addMemOperand(MMO);
- MIB = AddDReg(MIB, SrcReg, ARM::dsub_0, getKillRegState(isKill), TRI);
- MIB = AddDReg(MIB, SrcReg, ARM::dsub_1, 0, TRI);
- AddDReg(MIB, SrcReg, ARM::dsub_2, 0, TRI);
+ MIB = AddDReg(MIB, SrcReg, ARM::dsub_0, getKillRegState(isKill));
+ MIB = AddDReg(MIB, SrcReg, ARM::dsub_1, 0);
+ AddDReg(MIB, SrcReg, ARM::dsub_2, 0);
}
} else
llvm_unreachable("Unknown reg class!");
@@ -1105,10 +1105,10 @@ void ARMBaseInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
.addFrameIndex(FI)
.add(predOps(ARMCC::AL))
.addMemOperand(MMO);
- MIB = AddDReg(MIB, SrcReg, ARM::dsub_0, getKillRegState(isKill), TRI);
- MIB = AddDReg(MIB, SrcReg, ARM::dsub_1, 0, TRI);
- MIB = AddDReg(MIB, SrcReg, ARM::dsub_2, 0, TRI);
- AddDReg(MIB, SrcReg, ARM::dsub_3, 0, TRI);
+ MIB = AddDReg(MIB, SrcReg, ARM::dsub_0, getKillRegState(isKill));
+ MIB = AddDReg(MIB, SrcReg, ARM::dsub_1, 0);
+ MIB = AddDReg(MIB, SrcReg, ARM::dsub_2, 0);
+ AddDReg(MIB, SrcReg, ARM::dsub_3, 0);
}
} else
llvm_unreachable("Unknown reg class!");
@@ -1125,14 +1125,14 @@ void ARMBaseInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
.addFrameIndex(FI)
.add(predOps(ARMCC::AL))
.addMemOperand(MMO);
- MIB = AddDReg(MIB, SrcReg, ARM::dsub_0, getKillRegState(isKill), TRI);
- MIB = AddDReg(MIB, SrcReg, ARM::dsub_1, 0, TRI);
- MIB = AddDReg(MIB, SrcReg, ARM::dsub_2, 0, TRI);
- MIB = AddDReg(MIB, SrcReg, ARM::dsub_3, 0, TRI);
- MIB = AddDReg(MIB, SrcReg, ARM::dsub_4, 0, TRI);
- MIB = AddDReg(MIB, SrcReg, ARM::dsub_5, 0, TRI);
- MIB = AddDReg(MIB, SrcReg, ARM::dsub_6, 0, TRI);
- AddDReg(MIB, SrcReg, ARM::dsub_7, 0, TRI);
+ MIB = AddDReg(MIB, SrcReg, ARM::dsub_0, getKillRegState(isKill));
+ MIB = AddDReg(MIB, SrcReg, ARM::dsub_1, 0);
+ MIB = AddDReg(MIB, SrcReg, ARM::dsub_2, 0);
+ MIB = AddDReg(MIB, SrcReg, ARM::dsub_3, 0);
+ MIB = AddDReg(MIB, SrcReg, ARM::dsub_4, 0);
+ MIB = AddDReg(MIB, SrcReg, ARM::dsub_5, 0);
+ MIB = AddDReg(MIB, SrcReg, ARM::dsub_6, 0);
+ AddDReg(MIB, SrcReg, ARM::dsub_7, 0);
} else
llvm_unreachable("Unknown reg class!");
break;
@@ -1208,10 +1208,12 @@ Register ARMBaseInstrInfo::isStoreToStackSlotPostFE(const MachineInstr &MI,
return false;
}
-void ARMBaseInstrInfo::loadRegFromStackSlot(
- MachineBasicBlock &MBB, MachineBasicBlock::iterator I, Register DestReg,
- int FI, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI,
- Register VReg, MachineInstr::MIFlag Flags) const {
+void ARMBaseInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I,
+ Register DestReg, int FI,
+ const TargetRegisterClass *RC,
+ Register VReg,
+ MachineInstr::MIFlag Flags) const {
DebugLoc DL;
if (I != MBB.end()) DL = I->getDebugLoc();
MachineFunction &MF = *MBB.getParent();
@@ -1221,7 +1223,8 @@ void ARMBaseInstrInfo::loadRegFromStackSlot(
MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOLoad,
MFI.getObjectSize(FI), Alignment);
- switch (TRI->getSpillSize(*RC)) {
+ const ARMBaseRegisterInfo &TRI = getRegisterInfo();
+ switch (TRI.getSpillSize(*RC)) {
case 2:
if (ARM::HPRRegClass.hasSubClassEq(RC)) {
BuildMI(MBB, I, DL, get(ARM::VLDRH), DestReg)
@@ -1272,8 +1275,8 @@ void ARMBaseInstrInfo::loadRegFromStackSlot(
if (Subtarget.hasV5TEOps()) {
MIB = BuildMI(MBB, I, DL, get(ARM::LDRD));
- AddDReg(MIB, DestReg, ARM::gsub_0, RegState::DefineNoRead, TRI);
- AddDReg(MIB, DestReg, ARM::gsub_1, RegState::DefineNoRead, TRI);
+ AddDReg(MIB, DestReg, ARM::gsub_0, RegState::DefineNoRead);
+ AddDReg(MIB, DestReg, ARM::gsub_1, RegState::DefineNoRead);
MIB.addFrameIndex(FI).addReg(0).addImm(0).addMemOperand(MMO)
.add(predOps(ARMCC::AL));
} else {
@@ -1283,8 +1286,8 @@ void ARMBaseInstrInfo::loadRegFromStackSlot(
.addFrameIndex(FI)
.addMemOperand(MMO)
.add(predOps(ARMCC::AL));
- MIB = AddDReg(MIB, DestReg, ARM::gsub_0, RegState::DefineNoRead, TRI);
- MIB = AddDReg(MIB, DestReg, ARM::gsub_1, RegState::DefineNoRead, TRI);
+ MIB = AddDReg(MIB, DestReg, ARM::gsub_0, RegState::DefineNoRead);
+ MIB = AddDReg(MIB, DestReg, ARM::gsub_1, RegState::DefineNoRead);
}
if (DestReg.isPhysical())
@@ -1330,9 +1333,9 @@ void ARMBaseInstrInfo::loadRegFromStackSlot(
.addFrameIndex(FI)
.addMemOperand(MMO)
.add(predOps(ARMCC::AL));
- MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::DefineNoRead, TRI);
- MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::DefineNoRead, TRI);
- MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::DefineNoRead, TRI);
+ MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::DefineNoRead);
+ MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::DefineNoRead);
+ MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::DefineNoRead);
if (DestReg.isPhysical())
MIB.addReg(DestReg, RegState::ImplicitDefine);
}
@@ -1359,10 +1362,10 @@ void ARMBaseInstrInfo::loadRegFromStackSlot(
.addFrameIndex(FI)
.add(predOps(ARMCC::AL))
.addMemOperand(MMO);
- MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::DefineNoRead, TRI);
- MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::DefineNoRead, TRI);
- MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::DefineNoRead, TRI);
- MIB = AddDReg(MIB, DestReg, ARM::dsub_3, RegState::DefineNoRead, TRI);
+ MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::DefineNoRead);
+ MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::DefineNoRead);
+ MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::DefineNoRead);
+ MIB = AddDReg(MIB, DestReg, ARM::dsub_3, RegState::DefineNoRead);
if (DestReg.isPhysical())
MIB.addReg(DestReg, RegState::ImplicitDefine);
}
@@ -1380,14 +1383,14 @@ void ARMBaseInstrInfo::loadRegFromStackSlot(
.addFrameIndex(FI)
.add(predOps(ARMCC::AL))
.addMemOperand(MMO);
- MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::DefineNoRead, TRI);
- MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::DefineNoRead, TRI);
- MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::DefineNoRead, TRI);
- MIB = AddDReg(MIB, DestReg, ARM::dsub_3, RegState::DefineNoRead, TRI);
- MIB = AddDReg(MIB, DestReg, ARM::dsub_4, RegState::DefineNoRead, TRI);
- MIB = AddDReg(MIB, DestReg, ARM::dsub_5, RegState::DefineNoRead, TRI);
- MIB = AddDReg(MIB, DestReg, ARM::dsub_6, RegState::DefineNoRead, TRI);
- MIB = AddDReg(MIB, DestReg, ARM::dsub_7, RegState::DefineNoRead, TRI);
+ MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::DefineNoRead);
+ MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::DefineNoRead);
+ MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::DefineNoRead);
+ MIB = AddDReg(MIB, DestReg, ARM::dsub_3, RegState::DefineNoRead);
+ MIB = AddDReg(MIB, DestReg, ARM::dsub_4, RegState::DefineNoRead);
+ MIB = AddDReg(MIB, DestReg, ARM::dsub_5, RegState::DefineNoRead);
+ MIB = AddDReg(MIB, DestReg, ARM::dsub_6, RegState::DefineNoRead);
+ MIB = AddDReg(MIB, DestReg, ARM::dsub_7, RegState::DefineNoRead);
if (DestReg.isPhysical())
MIB.addReg(DestReg, RegState::ImplicitDefine);
} else
@@ -1653,8 +1656,7 @@ static unsigned duplicateCPV(MachineFunction &MF, unsigned &CPI) {
void ARMBaseInstrInfo::reMaterialize(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
Register DestReg, unsigned SubIdx,
- const MachineInstr &Orig,
- const TargetRegisterInfo &TRI) const {
+ const MachineInstr &Orig) const {
unsigned Opcode = Orig.getOpcode();
switch (Opcode) {
default: {
diff --git a/llvm/lib/Target/ARM/ARMBaseInstrInfo.h b/llvm/lib/Target/ARM/ARMBaseInstrInfo.h
index 27f8e3b..04e2ab0 100644
--- a/llvm/lib/Target/ARM/ARMBaseInstrInfo.h
+++ b/llvm/lib/Target/ARM/ARMBaseInstrInfo.h
@@ -216,14 +216,13 @@ public:
void storeRegToStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg,
- bool isKill, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ bool isKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
void loadRegFromStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
Register DestReg, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
bool expandPostRAPseudo(MachineInstr &MI) const override;
@@ -232,16 +231,14 @@ public:
void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
Register DestReg, unsigned SubIdx,
- const MachineInstr &Orig,
- const TargetRegisterInfo &TRI) const override;
+ const MachineInstr &Orig) const override;
MachineInstr &
duplicate(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore,
const MachineInstr &Orig) const override;
const MachineInstrBuilder &AddDReg(MachineInstrBuilder &MIB, unsigned Reg,
- unsigned SubIdx, unsigned State,
- const TargetRegisterInfo *TRI) const;
+ unsigned SubIdx, unsigned State) const;
bool produceSameValue(const MachineInstr &MI0, const MachineInstr &MI1,
const MachineRegisterInfo *MRI) const override;
diff --git a/llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp b/llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp
index ce1cdb3..80921ce 100644
--- a/llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp
+++ b/llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp
@@ -708,7 +708,7 @@ ARMBaseRegisterInfo::materializeFrameBaseRegister(MachineBasicBlock *MBB,
const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
const MCInstrDesc &MCID = TII.get(ADDriOpc);
Register BaseReg = MRI.createVirtualRegister(&ARM::GPRRegClass);
- MRI.constrainRegClass(BaseReg, TII.getRegClass(MCID, 0, this));
+ MRI.constrainRegClass(BaseReg, TII.getRegClass(MCID, 0));
MachineInstrBuilder MIB = BuildMI(*MBB, Ins, DL, MCID, BaseReg)
.addFrameIndex(FrameIdx).addImm(Offset);
@@ -881,8 +881,7 @@ ARMBaseRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
Register PredReg = (PIdx == -1) ? Register() : MI.getOperand(PIdx+1).getReg();
const MCInstrDesc &MCID = MI.getDesc();
- const TargetRegisterClass *RegClass =
- TII.getRegClass(MCID, FIOperandNum, this);
+ const TargetRegisterClass *RegClass = TII.getRegClass(MCID, FIOperandNum);
if (Offset == 0 && (FrameReg.isVirtual() || RegClass->contains(FrameReg)))
// Must be addrmode4/6.
diff --git a/llvm/lib/Target/ARM/ARMFrameLowering.cpp b/llvm/lib/Target/ARM/ARMFrameLowering.cpp
index 138981a..21a1135 100644
--- a/llvm/lib/Target/ARM/ARMFrameLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMFrameLowering.cpp
@@ -2342,7 +2342,6 @@ static unsigned estimateRSStackSizeLimit(MachineFunction &MF,
const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
const ARMBaseInstrInfo &TII =
*static_cast<const ARMBaseInstrInfo *>(MF.getSubtarget().getInstrInfo());
- const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
unsigned Limit = (1 << 12) - 1;
for (auto &MBB : MF) {
for (auto &MI : MBB) {
@@ -2364,7 +2363,7 @@ static unsigned estimateRSStackSizeLimit(MachineFunction &MF,
break;
const MCInstrDesc &MCID = MI.getDesc();
- const TargetRegisterClass *RegClass = TII.getRegClass(MCID, i, TRI);
+ const TargetRegisterClass *RegClass = TII.getRegClass(MCID, i);
if (RegClass && !RegClass->contains(ARM::SP))
HasNonSPFrameIndex = true;
diff --git a/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp b/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
index cd4299b..db37b76 100644
--- a/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
+++ b/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
@@ -2424,7 +2424,7 @@ bool ARMPreAllocLoadStoreOpt::RescheduleOps(
Ops.pop_back();
const MCInstrDesc &MCID = TII->get(NewOpc);
- const TargetRegisterClass *TRC = TII->getRegClass(MCID, 0, TRI);
+ const TargetRegisterClass *TRC = TII->getRegClass(MCID, 0);
MRI->constrainRegClass(FirstReg, TRC);
MRI->constrainRegClass(SecondReg, TRC);
@@ -3014,7 +3014,7 @@ static void AdjustBaseAndOffset(MachineInstr *MI, Register NewBaseReg,
MachineFunction *MF = MI->getMF();
MachineRegisterInfo &MRI = MF->getRegInfo();
const MCInstrDesc &MCID = TII->get(MI->getOpcode());
- const TargetRegisterClass *TRC = TII->getRegClass(MCID, BaseOp, TRI);
+ const TargetRegisterClass *TRC = TII->getRegClass(MCID, BaseOp);
MRI.constrainRegClass(NewBaseReg, TRC);
int OldOffset = MI->getOperand(BaseOp + 1).getImm();
@@ -3071,10 +3071,10 @@ static MachineInstr *createPostIncLoadStore(MachineInstr *MI, int Offset,
const MCInstrDesc &MCID = TII->get(NewOpcode);
// Constrain the def register class
- const TargetRegisterClass *TRC = TII->getRegClass(MCID, 0, TRI);
+ const TargetRegisterClass *TRC = TII->getRegClass(MCID, 0);
MRI.constrainRegClass(NewReg, TRC);
// And do the same for the base operand
- TRC = TII->getRegClass(MCID, 2, TRI);
+ TRC = TII->getRegClass(MCID, 2);
MRI.constrainRegClass(MI->getOperand(1).getReg(), TRC);
unsigned AddrMode = (MCID.TSFlags & ARMII::AddrModeMask);
diff --git a/llvm/lib/Target/ARM/MLxExpansionPass.cpp b/llvm/lib/Target/ARM/MLxExpansionPass.cpp
index 8e1bf1d..eb237b4 100644
--- a/llvm/lib/Target/ARM/MLxExpansionPass.cpp
+++ b/llvm/lib/Target/ARM/MLxExpansionPass.cpp
@@ -283,7 +283,7 @@ MLxExpansion::ExpandFPMLxInstruction(MachineBasicBlock &MBB, MachineInstr *MI,
const MCInstrDesc &MCID1 = TII->get(MulOpc);
const MCInstrDesc &MCID2 = TII->get(AddSubOpc);
- Register TmpReg = MRI->createVirtualRegister(TII->getRegClass(MCID1, 0, TRI));
+ Register TmpReg = MRI->createVirtualRegister(TII->getRegClass(MCID1, 0));
MachineInstrBuilder MIB = BuildMI(MBB, MI, MI->getDebugLoc(), MCID1, TmpReg)
.addReg(Src1Reg, getKillRegState(Src1Kill))
diff --git a/llvm/lib/Target/ARM/Thumb1InstrInfo.cpp b/llvm/lib/Target/ARM/Thumb1InstrInfo.cpp
index f95ba6a4..01f588f 100644
--- a/llvm/lib/Target/ARM/Thumb1InstrInfo.cpp
+++ b/llvm/lib/Target/ARM/Thumb1InstrInfo.cpp
@@ -116,7 +116,6 @@ void Thumb1InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
Register SrcReg, bool isKill, int FI,
const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI,
Register VReg,
MachineInstr::MIFlag Flags) const {
assert((RC == &ARM::tGPRRegClass ||
@@ -142,10 +141,12 @@ void Thumb1InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
}
}
-void Thumb1InstrInfo::loadRegFromStackSlot(
- MachineBasicBlock &MBB, MachineBasicBlock::iterator I, Register DestReg,
- int FI, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI,
- Register VReg, MachineInstr::MIFlag Flags) const {
+void Thumb1InstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I,
+ Register DestReg, int FI,
+ const TargetRegisterClass *RC,
+ Register VReg,
+ MachineInstr::MIFlag Flags) const {
assert((RC->hasSuperClassEq(&ARM::tGPRRegClass) ||
(DestReg.isPhysical() && isARMLowRegister(DestReg))) &&
"Unknown regclass!");
diff --git a/llvm/lib/Target/ARM/Thumb1InstrInfo.h b/llvm/lib/Target/ARM/Thumb1InstrInfo.h
index 16350a6..289a30a 100644
--- a/llvm/lib/Target/ARM/Thumb1InstrInfo.h
+++ b/llvm/lib/Target/ARM/Thumb1InstrInfo.h
@@ -43,14 +43,13 @@ public:
bool RenamableSrc = false) const override;
void storeRegToStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg,
- bool isKill, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ bool isKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
void loadRegFromStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
Register DestReg, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
bool canCopyGluedNodeDuringSchedule(SDNode *N) const override;
diff --git a/llvm/lib/Target/ARM/Thumb2InstrInfo.cpp b/llvm/lib/Target/ARM/Thumb2InstrInfo.cpp
index b66e407..efb92c9 100644
--- a/llvm/lib/Target/ARM/Thumb2InstrInfo.cpp
+++ b/llvm/lib/Target/ARM/Thumb2InstrInfo.cpp
@@ -165,7 +165,6 @@ void Thumb2InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
Register SrcReg, bool isKill, int FI,
const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI,
Register VReg,
MachineInstr::MIFlag Flags) const {
DebugLoc DL;
@@ -197,20 +196,22 @@ void Thumb2InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
}
MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::t2STRDi8));
- AddDReg(MIB, SrcReg, ARM::gsub_0, getKillRegState(isKill), TRI);
- AddDReg(MIB, SrcReg, ARM::gsub_1, 0, TRI);
+ AddDReg(MIB, SrcReg, ARM::gsub_0, getKillRegState(isKill));
+ AddDReg(MIB, SrcReg, ARM::gsub_1, 0);
MIB.addFrameIndex(FI).addImm(0).addMemOperand(MMO).add(predOps(ARMCC::AL));
return;
}
- ARMBaseInstrInfo::storeRegToStackSlot(MBB, I, SrcReg, isKill, FI, RC, TRI,
+ ARMBaseInstrInfo::storeRegToStackSlot(MBB, I, SrcReg, isKill, FI, RC,
Register());
}
-void Thumb2InstrInfo::loadRegFromStackSlot(
- MachineBasicBlock &MBB, MachineBasicBlock::iterator I, Register DestReg,
- int FI, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI,
- Register VReg, MachineInstr::MIFlag Flags) const {
+void Thumb2InstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I,
+ Register DestReg, int FI,
+ const TargetRegisterClass *RC,
+ Register VReg,
+ MachineInstr::MIFlag Flags) const {
MachineFunction &MF = *MBB.getParent();
MachineFrameInfo &MFI = MF.getFrameInfo();
MachineMemOperand *MMO = MF.getMachineMemOperand(
@@ -238,8 +239,8 @@ void Thumb2InstrInfo::loadRegFromStackSlot(
}
MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::t2LDRDi8));
- AddDReg(MIB, DestReg, ARM::gsub_0, RegState::DefineNoRead, TRI);
- AddDReg(MIB, DestReg, ARM::gsub_1, RegState::DefineNoRead, TRI);
+ AddDReg(MIB, DestReg, ARM::gsub_0, RegState::DefineNoRead);
+ AddDReg(MIB, DestReg, ARM::gsub_1, RegState::DefineNoRead);
MIB.addFrameIndex(FI).addImm(0).addMemOperand(MMO).add(predOps(ARMCC::AL));
if (DestReg.isPhysical())
@@ -247,8 +248,7 @@ void Thumb2InstrInfo::loadRegFromStackSlot(
return;
}
- ARMBaseInstrInfo::loadRegFromStackSlot(MBB, I, DestReg, FI, RC, TRI,
- Register());
+ ARMBaseInstrInfo::loadRegFromStackSlot(MBB, I, DestReg, FI, RC, Register());
}
void Thumb2InstrInfo::expandLoadStackGuard(
@@ -564,7 +564,7 @@ bool llvm::rewriteT2FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
bool isSub = false;
MachineFunction &MF = *MI.getParent()->getParent();
- const TargetRegisterClass *RegClass = TII.getRegClass(Desc, FrameRegIdx, TRI);
+ const TargetRegisterClass *RegClass = TII.getRegClass(Desc, FrameRegIdx);
// Memory operands in inline assembly always use AddrModeT2_i12.
if (Opcode == ARM::INLINEASM || Opcode == ARM::INLINEASM_BR)
diff --git a/llvm/lib/Target/ARM/Thumb2InstrInfo.h b/llvm/lib/Target/ARM/Thumb2InstrInfo.h
index 59ef39d..1e11cb3 100644
--- a/llvm/lib/Target/ARM/Thumb2InstrInfo.h
+++ b/llvm/lib/Target/ARM/Thumb2InstrInfo.h
@@ -44,14 +44,13 @@ public:
void storeRegToStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg,
- bool isKill, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ bool isKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
void loadRegFromStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
Register DestReg, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
/// getRegisterInfo - TargetInstrInfo is a superset of MRegister info. As
diff --git a/llvm/lib/Target/AVR/AVRInstrInfo.cpp b/llvm/lib/Target/AVR/AVRInstrInfo.cpp
index 5e247cb..6c37ba1 100644
--- a/llvm/lib/Target/AVR/AVRInstrInfo.cpp
+++ b/llvm/lib/Target/AVR/AVRInstrInfo.cpp
@@ -126,8 +126,7 @@ Register AVRInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
void AVRInstrInfo::storeRegToStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register SrcReg,
- bool isKill, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ bool isKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg,
MachineInstr::MIFlag Flags) const {
MachineFunction &MF = *MBB.getParent();
AVRMachineFunctionInfo *AFI = MF.getInfo<AVRMachineFunctionInfo>();
@@ -142,9 +141,9 @@ void AVRInstrInfo::storeRegToStackSlot(
MFI.getObjectAlign(FrameIndex));
unsigned Opcode = 0;
- if (TRI->isTypeLegalForClass(*RC, MVT::i8)) {
+ if (RI.isTypeLegalForClass(*RC, MVT::i8)) {
Opcode = AVR::STDPtrQRr;
- } else if (TRI->isTypeLegalForClass(*RC, MVT::i16)) {
+ } else if (RI.isTypeLegalForClass(*RC, MVT::i16)) {
Opcode = AVR::STDWPtrQRr;
} else {
llvm_unreachable("Cannot store this register into a stack slot!");
@@ -161,7 +160,6 @@ void AVRInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
Register DestReg, int FrameIndex,
const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI,
Register VReg,
MachineInstr::MIFlag Flags) const {
MachineFunction &MF = *MBB.getParent();
@@ -173,9 +171,9 @@ void AVRInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
MFI.getObjectAlign(FrameIndex));
unsigned Opcode = 0;
- if (TRI->isTypeLegalForClass(*RC, MVT::i8)) {
+ if (TRI.isTypeLegalForClass(*RC, MVT::i8)) {
Opcode = AVR::LDDRdPtrQ;
- } else if (TRI->isTypeLegalForClass(*RC, MVT::i16)) {
+ } else if (TRI.isTypeLegalForClass(*RC, MVT::i16)) {
// Opcode = AVR::LDDWRdPtrQ;
//: FIXME: remove this once PR13375 gets fixed
Opcode = AVR::LDDWRdYQ;
diff --git a/llvm/lib/Target/AVR/AVRInstrInfo.h b/llvm/lib/Target/AVR/AVRInstrInfo.h
index 759aea2..4db535a 100644
--- a/llvm/lib/Target/AVR/AVRInstrInfo.h
+++ b/llvm/lib/Target/AVR/AVRInstrInfo.h
@@ -79,13 +79,11 @@ public:
bool RenamableSrc = false) const override;
void storeRegToStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register SrcReg,
- bool isKill, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ bool isKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
void loadRegFromStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg,
- int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ int FrameIndex, const TargetRegisterClass *RC, Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
Register isLoadFromStackSlot(const MachineInstr &MI,
int &FrameIndex) const override;
diff --git a/llvm/lib/Target/BPF/BPFInstrInfo.cpp b/llvm/lib/Target/BPF/BPFInstrInfo.cpp
index 0e56e65..095e249 100644
--- a/llvm/lib/Target/BPF/BPFInstrInfo.cpp
+++ b/llvm/lib/Target/BPF/BPFInstrInfo.cpp
@@ -127,7 +127,6 @@ void BPFInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
Register SrcReg, bool IsKill, int FI,
const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI,
Register VReg,
MachineInstr::MIFlag Flags) const {
DebugLoc DL;
@@ -148,10 +147,12 @@ void BPFInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
llvm_unreachable("Can't store this register to stack slot");
}
-void BPFInstrInfo::loadRegFromStackSlot(
- MachineBasicBlock &MBB, MachineBasicBlock::iterator I, Register DestReg,
- int FI, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI,
- Register VReg, MachineInstr::MIFlag Flags) const {
+void BPFInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I,
+ Register DestReg, int FI,
+ const TargetRegisterClass *RC,
+ Register VReg,
+ MachineInstr::MIFlag Flags) const {
DebugLoc DL;
if (I != MBB.end())
DL = I->getDebugLoc();
diff --git a/llvm/lib/Target/BPF/BPFInstrInfo.h b/llvm/lib/Target/BPF/BPFInstrInfo.h
index 911e880..d3ef9bc 100644
--- a/llvm/lib/Target/BPF/BPFInstrInfo.h
+++ b/llvm/lib/Target/BPF/BPFInstrInfo.h
@@ -39,14 +39,13 @@ public:
void storeRegToStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg,
- bool isKill, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ bool isKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
void loadRegFromStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
Register DestReg, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
diff --git a/llvm/lib/Target/DirectX/DXILWriter/DXILBitcodeWriter.cpp b/llvm/lib/Target/DirectX/DXILWriter/DXILBitcodeWriter.cpp
index 26a8728..48a9085 100644
--- a/llvm/lib/Target/DirectX/DXILWriter/DXILBitcodeWriter.cpp
+++ b/llvm/lib/Target/DirectX/DXILWriter/DXILBitcodeWriter.cpp
@@ -1169,8 +1169,8 @@ void DXILBitcodeWriter::writeModuleInfo() {
// We need to hardcode a triple and datalayout that's compatible with the
// historical DXIL triple and datalayout from DXC.
StringRef Triple = "dxil-ms-dx";
- StringRef DL = "e-m:e-p:32:32-i1:8-i8:8-i16:32-i32:32-i64:64-"
- "f16:32-f32:32-f64:64-n8:16:32:64";
+ StringRef DL = "e-m:e-p:32:32-i1:32-i8:8-i16:16-i32:32-i64:64-"
+ "f16:16-f32:32-f64:64-n8:16:32:64";
writeStringRecord(Stream, bitc::MODULE_CODE_TRIPLE, Triple, 0 /*TODO*/);
writeStringRecord(Stream, bitc::MODULE_CODE_DATALAYOUT, DL, 0 /*TODO*/);
diff --git a/llvm/lib/Target/Hexagon/HexagonBitSimplify.cpp b/llvm/lib/Target/Hexagon/HexagonBitSimplify.cpp
index 68f5312..b378ce4 100644
--- a/llvm/lib/Target/Hexagon/HexagonBitSimplify.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonBitSimplify.cpp
@@ -1886,7 +1886,7 @@ bool BitSimplification::matchHalf(unsigned SelfR,
bool BitSimplification::validateReg(BitTracker::RegisterRef R, unsigned Opc,
unsigned OpNum) {
- auto *OpRC = HII.getRegClass(HII.get(Opc), OpNum, &HRI);
+ auto *OpRC = HII.getRegClass(HII.get(Opc), OpNum);
auto *RRC = HBS::getFinalVRegClass(R, MRI);
return OpRC->hasSubClassEq(RRC);
}
diff --git a/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp b/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp
index dd343d9..df61226 100644
--- a/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp
@@ -1405,7 +1405,7 @@ bool HexagonFrameLowering::insertCSRSpillsInBlock(MachineBasicBlock &MBB,
bool IsKill = !HRI.isEHReturnCalleeSaveReg(Reg);
int FI = I.getFrameIdx();
const TargetRegisterClass *RC = HRI.getMinimalPhysRegClass(Reg);
- HII.storeRegToStackSlot(MBB, MI, Reg, IsKill, FI, RC, &HRI, Register());
+ HII.storeRegToStackSlot(MBB, MI, Reg, IsKill, FI, RC, Register());
if (IsKill)
MBB.addLiveIn(Reg);
}
@@ -1470,7 +1470,7 @@ bool HexagonFrameLowering::insertCSRRestoresInBlock(MachineBasicBlock &MBB,
MCRegister Reg = I.getReg();
const TargetRegisterClass *RC = HRI.getMinimalPhysRegClass(Reg);
int FI = I.getFrameIdx();
- HII.loadRegFromStackSlot(MBB, MI, Reg, FI, RC, &HRI, Register());
+ HII.loadRegFromStackSlot(MBB, MI, Reg, FI, RC, Register());
}
return true;
@@ -1814,8 +1814,7 @@ bool HexagonFrameLowering::expandStoreVecPred(MachineBasicBlock &B,
.addReg(SrcR, getKillRegState(IsKill))
.addReg(TmpR0, RegState::Kill);
- auto *HRI = B.getParent()->getSubtarget<HexagonSubtarget>().getRegisterInfo();
- HII.storeRegToStackSlot(B, It, TmpR1, true, FI, RC, HRI, Register());
+ HII.storeRegToStackSlot(B, It, TmpR1, true, FI, RC, Register());
expandStoreVec(B, std::prev(It), MRI, HII, NewRegs);
NewRegs.push_back(TmpR0);
@@ -1844,9 +1843,7 @@ bool HexagonFrameLowering::expandLoadVecPred(MachineBasicBlock &B,
BuildMI(B, It, DL, HII.get(Hexagon::A2_tfrsi), TmpR0)
.addImm(0x01010101);
- MachineFunction &MF = *B.getParent();
- auto *HRI = MF.getSubtarget<HexagonSubtarget>().getRegisterInfo();
- HII.loadRegFromStackSlot(B, It, TmpR1, FI, RC, HRI, Register());
+ HII.loadRegFromStackSlot(B, It, TmpR1, FI, RC, Register());
expandLoadVec(B, std::prev(It), MRI, HII, NewRegs);
BuildMI(B, It, DL, HII.get(Hexagon::V6_vandvrt), DstR)
@@ -2225,7 +2222,7 @@ void HexagonFrameLowering::optimizeSpillSlots(MachineFunction &MF,
if (!Bad) {
// If the addressing mode is ok, check the register class.
unsigned OpNum = Load ? 0 : 2;
- auto *RC = HII.getRegClass(In.getDesc(), OpNum, &HRI);
+ auto *RC = HII.getRegClass(In.getDesc(), OpNum);
RC = getCommonRC(SI.RC, RC);
if (RC == nullptr)
Bad = true;
@@ -2395,7 +2392,7 @@ void HexagonFrameLowering::optimizeSpillSlots(MachineFunction &MF,
HexagonBlockRanges::RegisterRef SrcRR = { SrcOp.getReg(),
SrcOp.getSubReg() };
- auto *RC = HII.getRegClass(SI.getDesc(), 2, &HRI);
+ auto *RC = HII.getRegClass(SI.getDesc(), 2);
// The this-> is needed to unconfuse MSVC.
Register FoundR = this->findPhysReg(MF, Range, IM, DM, RC);
LLVM_DEBUG(dbgs() << "Replacement reg:" << printReg(FoundR, &HRI)
diff --git a/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp b/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp
index dd9f2fa..7682af4 100644
--- a/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp
@@ -964,7 +964,6 @@ void HexagonInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
Register SrcReg, bool isKill, int FI,
const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI,
Register VReg,
MachineInstr::MIFlag Flags) const {
DebugLoc DL = MBB.findDebugLoc(I);
@@ -1009,10 +1008,12 @@ void HexagonInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
}
}
-void HexagonInstrInfo::loadRegFromStackSlot(
- MachineBasicBlock &MBB, MachineBasicBlock::iterator I, Register DestReg,
- int FI, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI,
- Register VReg, MachineInstr::MIFlag Flags) const {
+void HexagonInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I,
+ Register DestReg, int FI,
+ const TargetRegisterClass *RC,
+ Register VReg,
+ MachineInstr::MIFlag Flags) const {
DebugLoc DL = MBB.findDebugLoc(I);
MachineFunction &MF = *MBB.getParent();
MachineFrameInfo &MFI = MF.getFrameInfo();
diff --git a/llvm/lib/Target/Hexagon/HexagonInstrInfo.h b/llvm/lib/Target/Hexagon/HexagonInstrInfo.h
index 7a0c77c..796b978 100644
--- a/llvm/lib/Target/Hexagon/HexagonInstrInfo.h
+++ b/llvm/lib/Target/Hexagon/HexagonInstrInfo.h
@@ -188,8 +188,7 @@ public:
/// is true, the register operand is the last use and must be marked kill.
void storeRegToStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg,
- bool isKill, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ bool isKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
/// Load the specified register of the given register class from the specified
@@ -198,7 +197,7 @@ public:
void loadRegFromStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
Register DestReg, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
/// This function is called for all pseudo instructions
diff --git a/llvm/lib/Target/Hexagon/HexagonLoadStoreWidening.cpp b/llvm/lib/Target/Hexagon/HexagonLoadStoreWidening.cpp
index 7cbd81f..54969b2 100644
--- a/llvm/lib/Target/Hexagon/HexagonLoadStoreWidening.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonLoadStoreWidening.cpp
@@ -646,7 +646,7 @@ bool HexagonLoadStoreWidening::createWideStores(InstrGroup &OG, InstrGroup &NG,
MachineInstr *CombI;
if (Acc != 0) {
const MCInstrDesc &TfrD = TII->get(Hexagon::A2_tfrsi);
- const TargetRegisterClass *RC = TII->getRegClass(TfrD, 0, TRI);
+ const TargetRegisterClass *RC = TII->getRegClass(TfrD, 0);
Register VReg = MF->getRegInfo().createVirtualRegister(RC);
MachineInstr *TfrI = BuildMI(*MF, DL, TfrD, VReg).addImm(LowerAcc);
NG.push_back(TfrI);
@@ -677,7 +677,7 @@ bool HexagonLoadStoreWidening::createWideStores(InstrGroup &OG, InstrGroup &NG,
} else {
// Create vreg = A2_tfrsi #Acc; mem[hw] = vreg
const MCInstrDesc &TfrD = TII->get(Hexagon::A2_tfrsi);
- const TargetRegisterClass *RC = TII->getRegClass(TfrD, 0, TRI);
+ const TargetRegisterClass *RC = TII->getRegClass(TfrD, 0);
Register VReg = MF->getRegInfo().createVirtualRegister(RC);
MachineInstr *TfrI = BuildMI(*MF, DL, TfrD, VReg).addImm(int(Acc));
NG.push_back(TfrI);
diff --git a/llvm/lib/Target/Hexagon/HexagonSubtarget.h b/llvm/lib/Target/Hexagon/HexagonSubtarget.h
index 30794f6..7dfede2 100644
--- a/llvm/lib/Target/Hexagon/HexagonSubtarget.h
+++ b/llvm/lib/Target/Hexagon/HexagonSubtarget.h
@@ -294,6 +294,8 @@ public:
bool useBSBScheduling() const { return UseBSBScheduling; }
bool enableMachineScheduler() const override;
+ bool enableTerminalRule() const override { return true; }
+
// Always use the TargetLowering default scheduler.
// FIXME: This will use the vliw scheduler which is probably just hurting
// compiler time and will be removed eventually anyway.
diff --git a/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp b/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp
index cb88d1a..d39b79a 100644
--- a/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp
@@ -653,7 +653,7 @@ bool HexagonPacketizerList::canPromoteToNewValueStore(const MachineInstr &MI,
const MCInstrDesc& MCID = PacketMI.getDesc();
// First operand is always the result.
- const TargetRegisterClass *PacketRC = HII->getRegClass(MCID, 0, HRI);
+ const TargetRegisterClass *PacketRC = HII->getRegClass(MCID, 0);
// Double regs can not feed into new value store: PRM section: 5.4.2.2.
if (PacketRC == &Hexagon::DoubleRegsRegClass)
return false;
@@ -866,7 +866,7 @@ bool HexagonPacketizerList::canPromoteToDotNew(const MachineInstr &MI,
return false;
const MCInstrDesc& MCID = PI.getDesc();
- const TargetRegisterClass *VecRC = HII->getRegClass(MCID, 0, HRI);
+ const TargetRegisterClass *VecRC = HII->getRegClass(MCID, 0);
if (DisableVecDblNVStores && VecRC == &Hexagon::HvxWRRegClass)
return false;
diff --git a/llvm/lib/Target/Lanai/LanaiInstrInfo.cpp b/llvm/lib/Target/Lanai/LanaiInstrInfo.cpp
index b3d2856..14b7557 100644
--- a/llvm/lib/Target/Lanai/LanaiInstrInfo.cpp
+++ b/llvm/lib/Target/Lanai/LanaiInstrInfo.cpp
@@ -49,8 +49,7 @@ void LanaiInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
void LanaiInstrInfo::storeRegToStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator Position,
Register SourceRegister, bool IsKill, int FrameIndex,
- const TargetRegisterClass *RegisterClass,
- const TargetRegisterInfo * /*RegisterInfo*/, Register /*VReg*/,
+ const TargetRegisterClass *RegisterClass, Register /*VReg*/,
MachineInstr::MIFlag /*Flags*/) const {
DebugLoc DL;
if (Position != MBB.end()) {
@@ -70,8 +69,7 @@ void LanaiInstrInfo::storeRegToStackSlot(
void LanaiInstrInfo::loadRegFromStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator Position,
Register DestinationRegister, int FrameIndex,
- const TargetRegisterClass *RegisterClass,
- const TargetRegisterInfo * /*RegisterInfo*/, Register /*VReg*/,
+ const TargetRegisterClass *RegisterClass, Register /*VReg*/,
MachineInstr::MIFlag /*Flags*/) const {
DebugLoc DL;
if (Position != MBB.end()) {
diff --git a/llvm/lib/Target/Lanai/LanaiInstrInfo.h b/llvm/lib/Target/Lanai/LanaiInstrInfo.h
index d9827624..155e2f0 100644
--- a/llvm/lib/Target/Lanai/LanaiInstrInfo.h
+++ b/llvm/lib/Target/Lanai/LanaiInstrInfo.h
@@ -58,15 +58,13 @@ public:
void storeRegToStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator Position,
Register SourceRegister, bool IsKill, int FrameIndex,
- const TargetRegisterClass *RegisterClass,
- const TargetRegisterInfo *RegisterInfo, Register VReg,
+ const TargetRegisterClass *RegisterClass, Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
void loadRegFromStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator Position,
Register DestinationRegister, int FrameIndex,
- const TargetRegisterClass *RegisterClass,
- const TargetRegisterInfo *RegisterInfo, Register VReg,
+ const TargetRegisterClass *RegisterClass, Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
bool expandPostRAPseudo(MachineInstr &MI) const override;
diff --git a/llvm/lib/Target/LoongArch/LoongArchDeadRegisterDefinitions.cpp b/llvm/lib/Target/LoongArch/LoongArchDeadRegisterDefinitions.cpp
index 0ccebeb3..6358e348 100644
--- a/llvm/lib/Target/LoongArch/LoongArchDeadRegisterDefinitions.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchDeadRegisterDefinitions.cpp
@@ -60,7 +60,6 @@ bool LoongArchDeadRegisterDefinitions::runOnMachineFunction(
return false;
const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
- const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
LiveIntervals &LIS = getAnalysis<LiveIntervalsWrapperPass>().getLIS();
LLVM_DEBUG(dbgs() << "***** LoongArchDeadRegisterDefinitions *****\n");
@@ -86,7 +85,7 @@ bool LoongArchDeadRegisterDefinitions::runOnMachineFunction(
continue;
LLVM_DEBUG(dbgs() << " Dead def operand #" << I << " in:\n ";
MI.print(dbgs()));
- const TargetRegisterClass *RC = TII->getRegClass(Desc, I, TRI);
+ const TargetRegisterClass *RC = TII->getRegClass(Desc, I);
if (!(RC && RC->contains(LoongArch::R0))) {
LLVM_DEBUG(dbgs() << " Ignoring, register is not a GPR.\n");
continue;
diff --git a/llvm/lib/Target/LoongArch/LoongArchFrameLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchFrameLowering.cpp
index 1493bf4..690b063 100644
--- a/llvm/lib/Target/LoongArch/LoongArchFrameLowering.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchFrameLowering.cpp
@@ -449,7 +449,7 @@ bool LoongArchFrameLowering::spillCalleeSavedRegisters(
bool IsKill =
!(Reg == LoongArch::R1 && MF->getFrameInfo().isReturnAddressTaken());
const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
- TII.storeRegToStackSlot(MBB, MI, Reg, IsKill, CS.getFrameIdx(), RC, TRI,
+ TII.storeRegToStackSlot(MBB, MI, Reg, IsKill, CS.getFrameIdx(), RC,
Register());
}
diff --git a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.cpp b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.cpp
index 5eb3bd6..9fc862a 100644
--- a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.cpp
@@ -113,14 +113,14 @@ void LoongArchInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
void LoongArchInstrInfo::storeRegToStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator I, Register SrcReg,
bool IsKill, int FI, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
- MachineInstr::MIFlag Flags) const {
+
+ Register VReg, MachineInstr::MIFlag Flags) const {
MachineFunction *MF = MBB.getParent();
MachineFrameInfo &MFI = MF->getFrameInfo();
unsigned Opcode;
if (LoongArch::GPRRegClass.hasSubClassEq(RC))
- Opcode = TRI->getRegSizeInBits(LoongArch::GPRRegClass) == 32
+ Opcode = TRI.getRegSizeInBits(LoongArch::GPRRegClass) == 32
? LoongArch::ST_W
: LoongArch::ST_D;
else if (LoongArch::FPR32RegClass.hasSubClassEq(RC))
@@ -149,8 +149,8 @@ void LoongArchInstrInfo::storeRegToStackSlot(
void LoongArchInstrInfo::loadRegFromStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator I, Register DstReg,
- int FI, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI,
- Register VReg, MachineInstr::MIFlag Flags) const {
+ int FI, const TargetRegisterClass *RC, Register VReg,
+ MachineInstr::MIFlag Flags) const {
MachineFunction *MF = MBB.getParent();
MachineFrameInfo &MFI = MF->getFrameInfo();
DebugLoc DL;
@@ -159,7 +159,7 @@ void LoongArchInstrInfo::loadRegFromStackSlot(
unsigned Opcode;
if (LoongArch::GPRRegClass.hasSubClassEq(RC))
- Opcode = TRI->getRegSizeInBits(LoongArch::GPRRegClass) == 32
+ Opcode = RegInfo.getRegSizeInBits(LoongArch::GPRRegClass) == 32
? LoongArch::LD_W
: LoongArch::LD_D;
else if (LoongArch::FPR32RegClass.hasSubClassEq(RC))
@@ -665,13 +665,13 @@ void LoongArchInstrInfo::insertIndirectBranch(MachineBasicBlock &MBB,
if (FrameIndex == -1)
report_fatal_error("The function size is incorrectly estimated.");
storeRegToStackSlot(MBB, PCALAU12I, Scav, /*IsKill=*/true, FrameIndex,
- &LoongArch::GPRRegClass, TRI, Register());
+ &LoongArch::GPRRegClass, Register());
TRI->eliminateFrameIndex(std::prev(PCALAU12I.getIterator()),
/*SpAdj=*/0, /*FIOperandNum=*/1);
PCALAU12I.getOperand(1).setMBB(&RestoreBB);
ADDI.getOperand(2).setMBB(&RestoreBB);
loadRegFromStackSlot(RestoreBB, RestoreBB.end(), Scav, FrameIndex,
- &LoongArch::GPRRegClass, TRI, Register());
+ &LoongArch::GPRRegClass, Register());
TRI->eliminateFrameIndex(RestoreBB.back(),
/*SpAdj=*/0, /*FIOperandNum=*/1);
}
diff --git a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.h b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.h
index d43d229..9f7a0a2 100644
--- a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.h
+++ b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.h
@@ -40,13 +40,11 @@ public:
void storeRegToStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg,
- bool IsKill, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ bool IsKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
void loadRegFromStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DstReg,
- int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ int FrameIndex, const TargetRegisterClass *RC, Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
// Materializes the given integer Val into DstReg.
diff --git a/llvm/lib/Target/M68k/M68kInstrInfo.cpp b/llvm/lib/Target/M68k/M68kInstrInfo.cpp
index c6be190b..91077ff 100644
--- a/llvm/lib/Target/M68k/M68kInstrInfo.cpp
+++ b/llvm/lib/Target/M68k/M68kInstrInfo.cpp
@@ -43,7 +43,7 @@ using namespace llvm;
void M68kInstrInfo::anchor() {}
M68kInstrInfo::M68kInstrInfo(const M68kSubtarget &STI)
- : M68kGenInstrInfo(STI, M68k::ADJCALLSTACKDOWN, M68k::ADJCALLSTACKUP, 0,
+ : M68kGenInstrInfo(STI, RI, M68k::ADJCALLSTACKDOWN, M68k::ADJCALLSTACKUP, 0,
M68k::RET),
Subtarget(STI), RI(STI) {}
@@ -838,15 +838,14 @@ bool M68kInstrInfo::getStackSlotRange(const TargetRegisterClass *RC,
void M68kInstrInfo::storeRegToStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register SrcReg,
- bool IsKill, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ bool IsKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg,
MachineInstr::MIFlag Flags) const {
const MachineFrameInfo &MFI = MBB.getParent()->getFrameInfo();
- assert(MFI.getObjectSize(FrameIndex) >= TRI->getSpillSize(*RC) &&
+ assert(MFI.getObjectSize(FrameIndex) >= TRI.getSpillSize(*RC) &&
"Stack slot is too small to store");
(void)MFI;
- unsigned Opc = getStoreRegOpcode(SrcReg, RC, TRI, Subtarget);
+ unsigned Opc = getStoreRegOpcode(SrcReg, RC, &TRI, Subtarget);
DebugLoc DL = MBB.findDebugLoc(MI);
// (0,FrameIndex) <- $reg
M68k::addFrameReference(BuildMI(MBB, MI, DL, get(Opc)), FrameIndex)
@@ -857,15 +856,14 @@ void M68kInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
Register DstReg, int FrameIndex,
const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI,
Register VReg,
MachineInstr::MIFlag Flags) const {
const MachineFrameInfo &MFI = MBB.getParent()->getFrameInfo();
- assert(MFI.getObjectSize(FrameIndex) >= TRI->getSpillSize(*RC) &&
+ assert(MFI.getObjectSize(FrameIndex) >= TRI.getSpillSize(*RC) &&
"Stack slot is too small to load");
(void)MFI;
- unsigned Opc = getLoadRegOpcode(DstReg, RC, TRI, Subtarget);
+ unsigned Opc = getLoadRegOpcode(DstReg, RC, &TRI, Subtarget);
DebugLoc DL = MBB.findDebugLoc(MI);
M68k::addFrameReference(BuildMI(MBB, MI, DL, get(Opc), DstReg), FrameIndex);
}
diff --git a/llvm/lib/Target/M68k/M68kInstrInfo.h b/llvm/lib/Target/M68k/M68kInstrInfo.h
index 97615d6..2b3789d 100644
--- a/llvm/lib/Target/M68k/M68kInstrInfo.h
+++ b/llvm/lib/Target/M68k/M68kInstrInfo.h
@@ -280,14 +280,12 @@ public:
void storeRegToStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register SrcReg,
- bool IsKill, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ bool IsKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
void loadRegFromStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg,
- int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ int FrameIndex, const TargetRegisterClass *RC, Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
bool expandPostRAPseudo(MachineInstr &MI) const override;
diff --git a/llvm/lib/Target/MSP430/MSP430InstrInfo.cpp b/llvm/lib/Target/MSP430/MSP430InstrInfo.cpp
index af053b8..0fb4e9d 100644
--- a/llvm/lib/Target/MSP430/MSP430InstrInfo.cpp
+++ b/llvm/lib/Target/MSP430/MSP430InstrInfo.cpp
@@ -32,8 +32,7 @@ MSP430InstrInfo::MSP430InstrInfo(const MSP430Subtarget &STI)
void MSP430InstrInfo::storeRegToStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register SrcReg,
- bool isKill, int FrameIdx, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ bool isKill, int FrameIdx, const TargetRegisterClass *RC, Register VReg,
MachineInstr::MIFlag Flags) const {
DebugLoc DL;
if (MI != MBB.end()) DL = MI->getDebugLoc();
@@ -57,10 +56,12 @@ void MSP430InstrInfo::storeRegToStackSlot(
llvm_unreachable("Cannot store this register to stack slot!");
}
-void MSP430InstrInfo::loadRegFromStackSlot(
- MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg,
- int FrameIdx, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI,
- Register VReg, MachineInstr::MIFlag Flags) const {
+void MSP430InstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ Register DestReg, int FrameIdx,
+ const TargetRegisterClass *RC,
+ Register VReg,
+ MachineInstr::MIFlag Flags) const {
DebugLoc DL;
if (MI != MBB.end()) DL = MI->getDebugLoc();
MachineFunction &MF = *MBB.getParent();
diff --git a/llvm/lib/Target/MSP430/MSP430InstrInfo.h b/llvm/lib/Target/MSP430/MSP430InstrInfo.h
index 316c136..c0a3984 100644
--- a/llvm/lib/Target/MSP430/MSP430InstrInfo.h
+++ b/llvm/lib/Target/MSP430/MSP430InstrInfo.h
@@ -42,13 +42,11 @@ public:
void storeRegToStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register SrcReg,
- bool isKill, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ bool isKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
void loadRegFromStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg,
- int FrameIdx, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ int FrameIdx, const TargetRegisterClass *RC, Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
unsigned getInstSizeInBytes(const MachineInstr &MI) const override;
diff --git a/llvm/lib/Target/Mips/Mips16InstrInfo.cpp b/llvm/lib/Target/Mips/Mips16InstrInfo.cpp
index 69b96cf..d23ec57 100644
--- a/llvm/lib/Target/Mips/Mips16InstrInfo.cpp
+++ b/llvm/lib/Target/Mips/Mips16InstrInfo.cpp
@@ -101,7 +101,6 @@ void Mips16InstrInfo::storeRegToStack(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
Register SrcReg, bool isKill, int FI,
const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI,
int64_t Offset,
MachineInstr::MIFlag Flags) const {
DebugLoc DL;
@@ -116,10 +115,12 @@ void Mips16InstrInfo::storeRegToStack(MachineBasicBlock &MBB,
.addMemOperand(MMO);
}
-void Mips16InstrInfo::loadRegFromStack(
- MachineBasicBlock &MBB, MachineBasicBlock::iterator I, Register DestReg,
- int FI, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI,
- int64_t Offset, MachineInstr::MIFlag Flags) const {
+void Mips16InstrInfo::loadRegFromStack(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I,
+ Register DestReg, int FI,
+ const TargetRegisterClass *RC,
+ int64_t Offset,
+ MachineInstr::MIFlag Flags) const {
DebugLoc DL;
if (I != MBB.end()) DL = I->getDebugLoc();
MachineMemOperand *MMO = GetMemOperand(MBB, FI, MachineMemOperand::MOLoad);
diff --git a/llvm/lib/Target/Mips/Mips16InstrInfo.h b/llvm/lib/Target/Mips/Mips16InstrInfo.h
index 2834fd3..4300d08 100644
--- a/llvm/lib/Target/Mips/Mips16InstrInfo.h
+++ b/llvm/lib/Target/Mips/Mips16InstrInfo.h
@@ -56,13 +56,14 @@ public:
void storeRegToStack(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg,
bool isKill, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, int64_t Offset,
+ int64_t Offset,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
void loadRegFromStack(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
Register DestReg, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, int64_t Offset,
+
+ int64_t Offset,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
bool expandPostRAPseudo(MachineInstr &MI) const override;
diff --git a/llvm/lib/Target/Mips/MipsInstrInfo.h b/llvm/lib/Target/Mips/MipsInstrInfo.h
index fc94248..0b90972 100644
--- a/llvm/lib/Target/Mips/MipsInstrInfo.h
+++ b/llvm/lib/Target/Mips/MipsInstrInfo.h
@@ -147,31 +147,28 @@ public:
void storeRegToStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg,
- bool isKill, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ bool isKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override {
- storeRegToStack(MBB, MBBI, SrcReg, isKill, FrameIndex, RC, TRI, 0, Flags);
+ storeRegToStack(MBB, MBBI, SrcReg, isKill, FrameIndex, RC, 0, Flags);
}
void loadRegFromStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
Register DestReg, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override {
- loadRegFromStack(MBB, MBBI, DestReg, FrameIndex, RC, TRI, 0, Flags);
+ loadRegFromStack(MBB, MBBI, DestReg, FrameIndex, RC, 0, Flags);
}
virtual void
storeRegToStack(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
Register SrcReg, bool isKill, int FrameIndex,
- const TargetRegisterClass *RC, const TargetRegisterInfo *TRI,
- int64_t Offset,
+ const TargetRegisterClass *RC, int64_t Offset,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const = 0;
virtual void loadRegFromStack(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg,
- int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, int64_t Offset,
+ int FrameIndex, const TargetRegisterClass *RC, int64_t Offset,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const = 0;
virtual void adjustStackPtr(unsigned SP, int64_t Amount,
diff --git a/llvm/lib/Target/Mips/MipsSEFrameLowering.cpp b/llvm/lib/Target/Mips/MipsSEFrameLowering.cpp
index f08704a..942194c 100644
--- a/llvm/lib/Target/Mips/MipsSEFrameLowering.cpp
+++ b/llvm/lib/Target/Mips/MipsSEFrameLowering.cpp
@@ -172,7 +172,7 @@ void ExpandPseudo::expandLoadCCond(MachineBasicBlock &MBB, Iter I) {
Register VR = MRI.createVirtualRegister(RC);
Register Dst = I->getOperand(0).getReg(), FI = I->getOperand(1).getIndex();
- TII.loadRegFromStack(MBB, I, VR, FI, RC, &RegInfo, 0);
+ TII.loadRegFromStack(MBB, I, VR, FI, RC, 0);
BuildMI(MBB, I, I->getDebugLoc(), TII.get(TargetOpcode::COPY), Dst)
.addReg(VR, RegState::Kill);
}
@@ -189,7 +189,7 @@ void ExpandPseudo::expandStoreCCond(MachineBasicBlock &MBB, Iter I) {
BuildMI(MBB, I, I->getDebugLoc(), TII.get(TargetOpcode::COPY), VR)
.addReg(Src, getKillRegState(I->getOperand(0).isKill()));
- TII.storeRegToStack(MBB, I, VR, true, FI, RC, &RegInfo, 0);
+ TII.storeRegToStack(MBB, I, VR, true, FI, RC, 0);
}
void ExpandPseudo::expandLoadACC(MachineBasicBlock &MBB, Iter I,
@@ -210,9 +210,9 @@ void ExpandPseudo::expandLoadACC(MachineBasicBlock &MBB, Iter I,
DebugLoc DL = I->getDebugLoc();
const MCInstrDesc &Desc = TII.get(TargetOpcode::COPY);
- TII.loadRegFromStack(MBB, I, VR0, FI, RC, &RegInfo, 0);
+ TII.loadRegFromStack(MBB, I, VR0, FI, RC, 0);
BuildMI(MBB, I, DL, Desc, Lo).addReg(VR0, RegState::Kill);
- TII.loadRegFromStack(MBB, I, VR1, FI, RC, &RegInfo, RegSize);
+ TII.loadRegFromStack(MBB, I, VR1, FI, RC, RegSize);
BuildMI(MBB, I, DL, Desc, Hi).addReg(VR1, RegState::Kill);
}
@@ -234,9 +234,9 @@ void ExpandPseudo::expandStoreACC(MachineBasicBlock &MBB, Iter I,
DebugLoc DL = I->getDebugLoc();
BuildMI(MBB, I, DL, TII.get(MFLoOpc), VR0).addReg(Src);
- TII.storeRegToStack(MBB, I, VR0, true, FI, RC, &RegInfo, 0);
+ TII.storeRegToStack(MBB, I, VR0, true, FI, RC, 0);
BuildMI(MBB, I, DL, TII.get(MFHiOpc), VR1).addReg(Src, SrcKill);
- TII.storeRegToStack(MBB, I, VR1, true, FI, RC, &RegInfo, RegSize);
+ TII.storeRegToStack(MBB, I, VR1, true, FI, RC, RegSize);
}
bool ExpandPseudo::expandCopy(MachineBasicBlock &MBB, Iter I) {
@@ -321,11 +321,9 @@ bool ExpandPseudo::expandBuildPairF64(MachineBasicBlock &MBB,
int FI = MF.getInfo<MipsFunctionInfo>()->getMoveF64ViaSpillFI(MF, RC2);
if (!Subtarget.isLittle())
std::swap(LoReg, HiReg);
- TII.storeRegToStack(MBB, I, LoReg, I->getOperand(1).isKill(), FI, RC,
- &RegInfo, 0);
- TII.storeRegToStack(MBB, I, HiReg, I->getOperand(2).isKill(), FI, RC,
- &RegInfo, 4);
- TII.loadRegFromStack(MBB, I, DstReg, FI, RC2, &RegInfo, 0);
+ TII.storeRegToStack(MBB, I, LoReg, I->getOperand(1).isKill(), FI, RC, 0);
+ TII.storeRegToStack(MBB, I, HiReg, I->getOperand(2).isKill(), FI, RC, 4);
+ TII.loadRegFromStack(MBB, I, DstReg, FI, RC2, 0);
return true;
}
@@ -385,8 +383,8 @@ bool ExpandPseudo::expandExtractElementF64(MachineBasicBlock &MBB,
// We re-use the same spill slot each time so that the stack frame doesn't
// grow too much in functions with a large number of moves.
int FI = MF.getInfo<MipsFunctionInfo>()->getMoveF64ViaSpillFI(MF, RC);
- TII.storeRegToStack(MBB, I, SrcReg, Op1.isKill(), FI, RC, &RegInfo, 0);
- TII.loadRegFromStack(MBB, I, DstReg, FI, RC2, &RegInfo, Offset);
+ TII.storeRegToStack(MBB, I, SrcReg, Op1.isKill(), FI, RC, 0);
+ TII.loadRegFromStack(MBB, I, DstReg, FI, RC2, Offset);
return true;
}
@@ -480,8 +478,7 @@ void MipsSEFrameLowering::emitPrologue(MachineFunction &MF,
if (!MBB.isLiveIn(ABI.GetEhDataReg(I)))
MBB.addLiveIn(ABI.GetEhDataReg(I));
TII.storeRegToStackSlot(MBB, MBBI, ABI.GetEhDataReg(I), false,
- MipsFI->getEhDataRegFI(I), RC, &RegInfo,
- Register());
+ MipsFI->getEhDataRegFI(I), RC, Register());
}
// Emit .cfi_offset directives for eh data registers.
@@ -579,8 +576,7 @@ void MipsSEFrameLowering::emitInterruptPrologueStub(
.setMIFlag(MachineInstr::FrameSetup);
STI.getInstrInfo()->storeRegToStack(MBB, MBBI, Mips::K1, false,
- MipsFI->getISRRegFI(0), PtrRC,
- STI.getRegisterInfo(), 0);
+ MipsFI->getISRRegFI(0), PtrRC, 0);
// Fetch and Spill Status
MBB.addLiveIn(Mips::COP012);
@@ -590,8 +586,7 @@ void MipsSEFrameLowering::emitInterruptPrologueStub(
.setMIFlag(MachineInstr::FrameSetup);
STI.getInstrInfo()->storeRegToStack(MBB, MBBI, Mips::K1, false,
- MipsFI->getISRRegFI(1), PtrRC,
- STI.getRegisterInfo(), 0);
+ MipsFI->getISRRegFI(1), PtrRC, 0);
// Build the configuration for disabling lower priority interrupts. Non EIC
// interrupts need to be masked off with zero, EIC from the Cause register.
@@ -657,7 +652,6 @@ void MipsSEFrameLowering::emitEpilogue(MachineFunction &MF,
const MipsSEInstrInfo &TII =
*static_cast<const MipsSEInstrInfo *>(STI.getInstrInfo());
- const MipsRegisterInfo &RegInfo = *STI.getRegisterInfo();
DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
MipsABIInfo ABI = STI.getABI();
@@ -690,8 +684,7 @@ void MipsSEFrameLowering::emitEpilogue(MachineFunction &MF,
// Insert instructions that restore eh data registers.
for (int J = 0; J < 4; ++J) {
TII.loadRegFromStackSlot(MBB, I, ABI.GetEhDataReg(J),
- MipsFI->getEhDataRegFI(J), RC, &RegInfo,
- Register());
+ MipsFI->getEhDataRegFI(J), RC, Register());
}
}
@@ -722,17 +715,15 @@ void MipsSEFrameLowering::emitInterruptEpilogueStub(
BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::EHB));
// Restore EPC
- STI.getInstrInfo()->loadRegFromStackSlot(MBB, MBBI, Mips::K1,
- MipsFI->getISRRegFI(0), PtrRC,
- STI.getRegisterInfo(), Register());
+ STI.getInstrInfo()->loadRegFromStackSlot(
+ MBB, MBBI, Mips::K1, MipsFI->getISRRegFI(0), PtrRC, Register());
BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::MTC0), Mips::COP014)
.addReg(Mips::K1)
.addImm(0);
// Restore Status
- STI.getInstrInfo()->loadRegFromStackSlot(MBB, MBBI, Mips::K1,
- MipsFI->getISRRegFI(1), PtrRC,
- STI.getRegisterInfo(), Register());
+ STI.getInstrInfo()->loadRegFromStackSlot(
+ MBB, MBBI, Mips::K1, MipsFI->getISRRegFI(1), PtrRC, Register());
BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::MTC0), Mips::COP012)
.addReg(Mips::K1)
.addImm(0);
@@ -795,7 +786,7 @@ bool MipsSEFrameLowering::spillCalleeSavedRegisters(
// Insert the spill to the stack frame.
bool IsKill = !IsRAAndRetAddrIsTaken;
const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
- TII.storeRegToStackSlot(MBB, MI, Reg, IsKill, I.getFrameIdx(), RC, TRI,
+ TII.storeRegToStackSlot(MBB, MI, Reg, IsKill, I.getFrameIdx(), RC,
Register());
}
diff --git a/llvm/lib/Target/Mips/MipsSEInstrInfo.cpp b/llvm/lib/Target/Mips/MipsSEInstrInfo.cpp
index 517f489..a1d0aa0 100644
--- a/llvm/lib/Target/Mips/MipsSEInstrInfo.cpp
+++ b/llvm/lib/Target/Mips/MipsSEInstrInfo.cpp
@@ -209,7 +209,6 @@ void MipsSEInstrInfo::storeRegToStack(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
Register SrcReg, bool isKill, int FI,
const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI,
int64_t Offset,
MachineInstr::MIFlag Flags) const {
DebugLoc DL;
@@ -235,16 +234,16 @@ void MipsSEInstrInfo::storeRegToStack(MachineBasicBlock &MBB,
Opc = Mips::SDC1;
else if (Mips::FGR64RegClass.hasSubClassEq(RC))
Opc = Mips::SDC164;
- else if (TRI->isTypeLegalForClass(*RC, MVT::v16i8))
+ else if (RI.isTypeLegalForClass(*RC, MVT::v16i8))
Opc = Mips::ST_B;
- else if (TRI->isTypeLegalForClass(*RC, MVT::v8i16) ||
- TRI->isTypeLegalForClass(*RC, MVT::v8f16))
+ else if (RI.isTypeLegalForClass(*RC, MVT::v8i16) ||
+ RI.isTypeLegalForClass(*RC, MVT::v8f16))
Opc = Mips::ST_H;
- else if (TRI->isTypeLegalForClass(*RC, MVT::v4i32) ||
- TRI->isTypeLegalForClass(*RC, MVT::v4f32))
+ else if (RI.isTypeLegalForClass(*RC, MVT::v4i32) ||
+ RI.isTypeLegalForClass(*RC, MVT::v4f32))
Opc = Mips::ST_W;
- else if (TRI->isTypeLegalForClass(*RC, MVT::v2i64) ||
- TRI->isTypeLegalForClass(*RC, MVT::v2f64))
+ else if (RI.isTypeLegalForClass(*RC, MVT::v2i64) ||
+ RI.isTypeLegalForClass(*RC, MVT::v2f64))
Opc = Mips::ST_D;
else if (Mips::LO32RegClass.hasSubClassEq(RC))
Opc = Mips::SW;
@@ -281,10 +280,12 @@ void MipsSEInstrInfo::storeRegToStack(MachineBasicBlock &MBB,
.addFrameIndex(FI).addImm(Offset).addMemOperand(MMO);
}
-void MipsSEInstrInfo::loadRegFromStack(
- MachineBasicBlock &MBB, MachineBasicBlock::iterator I, Register DestReg,
- int FI, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI,
- int64_t Offset, MachineInstr::MIFlag Flags) const {
+void MipsSEInstrInfo::loadRegFromStack(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I,
+ Register DestReg, int FI,
+ const TargetRegisterClass *RC,
+ int64_t Offset,
+ MachineInstr::MIFlag Flags) const {
DebugLoc DL;
if (I != MBB.end()) DL = I->getDebugLoc();
MachineMemOperand *MMO = GetMemOperand(MBB, FI, MachineMemOperand::MOLoad);
@@ -313,16 +314,16 @@ void MipsSEInstrInfo::loadRegFromStack(
Opc = Mips::LDC1;
else if (Mips::FGR64RegClass.hasSubClassEq(RC))
Opc = Mips::LDC164;
- else if (TRI->isTypeLegalForClass(*RC, MVT::v16i8))
+ else if (RI.isTypeLegalForClass(*RC, MVT::v16i8))
Opc = Mips::LD_B;
- else if (TRI->isTypeLegalForClass(*RC, MVT::v8i16) ||
- TRI->isTypeLegalForClass(*RC, MVT::v8f16))
+ else if (RI.isTypeLegalForClass(*RC, MVT::v8i16) ||
+ RI.isTypeLegalForClass(*RC, MVT::v8f16))
Opc = Mips::LD_H;
- else if (TRI->isTypeLegalForClass(*RC, MVT::v4i32) ||
- TRI->isTypeLegalForClass(*RC, MVT::v4f32))
+ else if (RI.isTypeLegalForClass(*RC, MVT::v4i32) ||
+ RI.isTypeLegalForClass(*RC, MVT::v4f32))
Opc = Mips::LD_W;
- else if (TRI->isTypeLegalForClass(*RC, MVT::v2i64) ||
- TRI->isTypeLegalForClass(*RC, MVT::v2f64))
+ else if (RI.isTypeLegalForClass(*RC, MVT::v2i64) ||
+ RI.isTypeLegalForClass(*RC, MVT::v2f64))
Opc = Mips::LD_D;
else if (Mips::HI32RegClass.hasSubClassEq(RC))
Opc = Mips::LW;
@@ -678,8 +679,8 @@ MipsSEInstrInfo::compareOpndSize(unsigned Opc,
const MCInstrDesc &Desc = get(Opc);
assert(Desc.NumOperands == 2 && "Unary instruction expected.");
const MipsRegisterInfo *RI = &getRegisterInfo();
- unsigned DstRegSize = RI->getRegSizeInBits(*getRegClass(Desc, 0, RI));
- unsigned SrcRegSize = RI->getRegSizeInBits(*getRegClass(Desc, 1, RI));
+ unsigned DstRegSize = RI->getRegSizeInBits(*getRegClass(Desc, 0));
+ unsigned SrcRegSize = RI->getRegSizeInBits(*getRegClass(Desc, 1));
return std::make_pair(DstRegSize > SrcRegSize, DstRegSize < SrcRegSize);
}
diff --git a/llvm/lib/Target/Mips/MipsSEInstrInfo.h b/llvm/lib/Target/Mips/MipsSEInstrInfo.h
index 0a7a0e5..5c48ccd 100644
--- a/llvm/lib/Target/Mips/MipsSEInstrInfo.h
+++ b/llvm/lib/Target/Mips/MipsSEInstrInfo.h
@@ -50,13 +50,12 @@ public:
void storeRegToStack(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register SrcReg,
bool isKill, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, int64_t Offset,
+ int64_t Offset,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
void loadRegFromStack(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg,
- int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, int64_t Offset,
+ int FrameIndex, const TargetRegisterClass *RC, int64_t Offset,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
bool expandPostRAPseudo(MachineInstr &MI) const override;
diff --git a/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp b/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp
index 910bc9d..aae3e49 100644
--- a/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp
@@ -2520,11 +2520,11 @@ bool PPCFrameLowering::spillCalleeSavedRegisters(
// saved vector registers.
if (Subtarget.needsSwapsForVSXMemOps() &&
!MF->getFunction().hasFnAttribute(Attribute::NoUnwind))
- TII.storeRegToStackSlotNoUpd(MBB, MI, Reg, !IsLiveIn,
- I.getFrameIdx(), RC, TRI);
+ TII.storeRegToStackSlotNoUpd(MBB, MI, Reg, !IsLiveIn, I.getFrameIdx(),
+ RC);
else
TII.storeRegToStackSlot(MBB, MI, Reg, !IsLiveIn, I.getFrameIdx(), RC,
- TRI, Register());
+ Register());
}
}
}
@@ -2690,10 +2690,9 @@ bool PPCFrameLowering::restoreCalleeSavedRegisters(
// saved vector registers.
if (Subtarget.needsSwapsForVSXMemOps() &&
!MF->getFunction().hasFnAttribute(Attribute::NoUnwind))
- TII.loadRegFromStackSlotNoUpd(MBB, I, Reg, CSI[i].getFrameIdx(), RC,
- TRI);
+ TII.loadRegFromStackSlotNoUpd(MBB, I, Reg, CSI[i].getFrameIdx(), RC);
else
- TII.loadRegFromStackSlot(MBB, I, Reg, CSI[i].getFrameIdx(), RC, TRI,
+ TII.loadRegFromStackSlot(MBB, I, Reg, CSI[i].getFrameIdx(), RC,
Register());
assert(I != MBB.begin() &&
diff --git a/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp b/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
index 8d9d4c7..366a7b6 100644
--- a/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
+++ b/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
@@ -2014,8 +2014,7 @@ void PPCInstrInfo::StoreRegToStackSlot(
void PPCInstrInfo::storeRegToStackSlotNoUpd(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, unsigned SrcReg,
- bool isKill, int FrameIdx, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI) const {
+ bool isKill, int FrameIdx, const TargetRegisterClass *RC) const {
MachineFunction &MF = *MBB.getParent();
SmallVector<MachineInstr *, 4> NewMIs;
@@ -2034,8 +2033,7 @@ void PPCInstrInfo::storeRegToStackSlotNoUpd(
void PPCInstrInfo::storeRegToStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register SrcReg,
- bool isKill, int FrameIdx, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ bool isKill, int FrameIdx, const TargetRegisterClass *RC, Register VReg,
MachineInstr::MIFlag Flags) const {
// We need to avoid a situation in which the value from a VRRC register is
// spilled using an Altivec instruction and reloaded into a VSRC register
@@ -2045,7 +2043,7 @@ void PPCInstrInfo::storeRegToStackSlot(
// the register is defined using an Altivec instruction and is then used by a
// VSX instruction.
RC = updatedRC(RC);
- storeRegToStackSlotNoUpd(MBB, MI, SrcReg, isKill, FrameIdx, RC, TRI);
+ storeRegToStackSlotNoUpd(MBB, MI, SrcReg, isKill, FrameIdx, RC);
}
void PPCInstrInfo::LoadRegFromStackSlot(MachineFunction &MF, const DebugLoc &DL,
@@ -2060,8 +2058,7 @@ void PPCInstrInfo::LoadRegFromStackSlot(MachineFunction &MF, const DebugLoc &DL,
void PPCInstrInfo::loadRegFromStackSlotNoUpd(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, unsigned DestReg,
- int FrameIdx, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI) const {
+ int FrameIdx, const TargetRegisterClass *RC) const {
MachineFunction &MF = *MBB.getParent();
SmallVector<MachineInstr*, 4> NewMIs;
DebugLoc DL;
@@ -2080,10 +2077,12 @@ void PPCInstrInfo::loadRegFromStackSlotNoUpd(
NewMIs.back()->addMemOperand(MF, MMO);
}
-void PPCInstrInfo::loadRegFromStackSlot(
- MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg,
- int FrameIdx, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI,
- Register VReg, MachineInstr::MIFlag Flags) const {
+void PPCInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ Register DestReg, int FrameIdx,
+ const TargetRegisterClass *RC,
+ Register VReg,
+ MachineInstr::MIFlag Flags) const {
// We need to avoid a situation in which the value from a VRRC register is
// spilled using an Altivec instruction and reloaded into a VSRC register
// using a VSX instruction. The issue with this is that the VSX
@@ -2093,7 +2092,7 @@ void PPCInstrInfo::loadRegFromStackSlot(
// VSX instruction.
RC = updatedRC(RC);
- loadRegFromStackSlotNoUpd(MBB, MI, DestReg, FrameIdx, RC, TRI);
+ loadRegFromStackSlotNoUpd(MBB, MI, DestReg, FrameIdx, RC);
}
bool PPCInstrInfo::
diff --git a/llvm/lib/Target/PowerPC/PPCInstrInfo.h b/llvm/lib/Target/PowerPC/PPCInstrInfo.h
index d67fc28..8b824bc 100644
--- a/llvm/lib/Target/PowerPC/PPCInstrInfo.h
+++ b/llvm/lib/Target/PowerPC/PPCInstrInfo.h
@@ -570,7 +570,8 @@ public:
void storeRegToStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg,
bool isKill, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+
+ Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
// Emits a register spill without updating the register class for vector
@@ -579,13 +580,13 @@ public:
void storeRegToStackSlotNoUpd(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
unsigned SrcReg, bool isKill, int FrameIndex,
- const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI) const;
+ const TargetRegisterClass *RC) const;
void loadRegFromStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
Register DestReg, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+
+ Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
// Emits a register reload without updating the register class for vector
@@ -594,8 +595,7 @@ public:
void loadRegFromStackSlotNoUpd(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
unsigned DestReg, int FrameIndex,
- const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI) const;
+ const TargetRegisterClass *RC) const;
unsigned getStoreOpcodeForSpill(const TargetRegisterClass *RC) const;
diff --git a/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp b/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp
index 85b4072..b3a7c82 100644
--- a/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp
+++ b/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp
@@ -2023,7 +2023,7 @@ Register PPCRegisterInfo::materializeFrameBaseRegister(MachineBasicBlock *MBB,
MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
const TargetRegisterClass *RC = getPointerRegClass();
Register BaseReg = MRI.createVirtualRegister(RC);
- MRI.constrainRegClass(BaseReg, TII.getRegClass(MCID, 0, this));
+ MRI.constrainRegClass(BaseReg, TII.getRegClass(MCID, 0));
BuildMI(*MBB, Ins, DL, MCID, BaseReg)
.addFrameIndex(FrameIdx).addImm(Offset);
@@ -2051,7 +2051,7 @@ void PPCRegisterInfo::resolveFrameIndex(MachineInstr &MI, Register BaseReg,
const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
const MCInstrDesc &MCID = MI.getDesc();
MachineRegisterInfo &MRI = MF.getRegInfo();
- MRI.constrainRegClass(BaseReg, TII.getRegClass(MCID, FIOperandNum, this));
+ MRI.constrainRegClass(BaseReg, TII.getRegClass(MCID, FIOperandNum));
}
bool PPCRegisterInfo::isFrameOffsetLegal(const MachineInstr *MI,
diff --git a/llvm/lib/Target/RISCV/RISCVDeadRegisterDefinitions.cpp b/llvm/lib/Target/RISCV/RISCVDeadRegisterDefinitions.cpp
index 51180f5..5d3d9b5 100644
--- a/llvm/lib/Target/RISCV/RISCVDeadRegisterDefinitions.cpp
+++ b/llvm/lib/Target/RISCV/RISCVDeadRegisterDefinitions.cpp
@@ -59,7 +59,6 @@ bool RISCVDeadRegisterDefinitions::runOnMachineFunction(MachineFunction &MF) {
return false;
const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
- const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
LiveIntervals &LIS = getAnalysis<LiveIntervalsWrapperPass>().getLIS();
LLVM_DEBUG(dbgs() << "***** RISCVDeadRegisterDefinitions *****\n");
@@ -89,7 +88,7 @@ bool RISCVDeadRegisterDefinitions::runOnMachineFunction(MachineFunction &MF) {
LLVM_DEBUG(dbgs() << " Dead def operand #" << I << " in:\n ";
MI.print(dbgs()));
Register X0Reg;
- const TargetRegisterClass *RC = TII->getRegClass(Desc, I, TRI);
+ const TargetRegisterClass *RC = TII->getRegClass(Desc, I);
if (RC && RC->contains(RISCV::X0)) {
X0Reg = RISCV::X0;
} else if (RC && RC->contains(RISCV::X0_W)) {
diff --git a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
index f881c4c..f7fc952 100644
--- a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
@@ -291,12 +291,12 @@ static void emitSiFiveCLICPreemptibleSaves(MachineFunction &MF,
// which affects other passes.
TII->storeRegToStackSlot(MBB, MBBI, RISCV::X8, /* IsKill=*/true,
RVFI->getInterruptCSRFrameIndex(0),
- &RISCV::GPRRegClass, STI.getRegisterInfo(),
- Register(), MachineInstr::FrameSetup);
+ &RISCV::GPRRegClass, Register(),
+ MachineInstr::FrameSetup);
TII->storeRegToStackSlot(MBB, MBBI, RISCV::X9, /* IsKill=*/true,
RVFI->getInterruptCSRFrameIndex(1),
- &RISCV::GPRRegClass, STI.getRegisterInfo(),
- Register(), MachineInstr::FrameSetup);
+ &RISCV::GPRRegClass, Register(),
+ MachineInstr::FrameSetup);
// Put `mcause` into X8 (s0), and `mepc` into X9 (s1). If either of these are
// used in the function, then they will appear in `getUnmanagedCSI` and will
@@ -357,14 +357,12 @@ static void emitSiFiveCLICPreemptibleRestores(MachineFunction &MF,
// X8 and X9 need to be restored to their values on function entry, which we
// saved onto the stack in `emitSiFiveCLICPreemptibleSaves`.
- TII->loadRegFromStackSlot(MBB, MBBI, RISCV::X9,
- RVFI->getInterruptCSRFrameIndex(1),
- &RISCV::GPRRegClass, STI.getRegisterInfo(),
- Register(), MachineInstr::FrameSetup);
- TII->loadRegFromStackSlot(MBB, MBBI, RISCV::X8,
- RVFI->getInterruptCSRFrameIndex(0),
- &RISCV::GPRRegClass, STI.getRegisterInfo(),
- Register(), MachineInstr::FrameSetup);
+ TII->loadRegFromStackSlot(
+ MBB, MBBI, RISCV::X9, RVFI->getInterruptCSRFrameIndex(1),
+ &RISCV::GPRRegClass, Register(), MachineInstr::FrameSetup);
+ TII->loadRegFromStackSlot(
+ MBB, MBBI, RISCV::X8, RVFI->getInterruptCSRFrameIndex(0),
+ &RISCV::GPRRegClass, Register(), MachineInstr::FrameSetup);
}
// Get the ID of the libcall used for spilling and restoring callee saved
@@ -2177,7 +2175,7 @@ bool RISCVFrameLowering::spillCalleeSavedRegisters(
MCRegister Reg = CS.getReg();
const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
TII.storeRegToStackSlot(MBB, MI, Reg, !MBB.isLiveIn(Reg),
- CS.getFrameIdx(), RC, TRI, Register(),
+ CS.getFrameIdx(), RC, Register(),
MachineInstr::FrameSetup);
}
};
@@ -2267,8 +2265,8 @@ bool RISCVFrameLowering::restoreCalleeSavedRegisters(
for (auto &CS : CSInfo) {
MCRegister Reg = CS.getReg();
const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
- TII.loadRegFromStackSlot(MBB, MI, Reg, CS.getFrameIdx(), RC, TRI,
- Register(), MachineInstr::FrameDestroy);
+ TII.loadRegFromStackSlot(MBB, MI, Reg, CS.getFrameIdx(), RC, Register(),
+ MachineInstr::FrameDestroy);
assert(MI != MBB.begin() &&
"loadRegFromStackSlot didn't insert any code!");
}
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index a3ccbd8..4d86a36 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -22203,8 +22203,7 @@ static MachineBasicBlock *emitSplitF64Pseudo(MachineInstr &MI,
MachineFunction &MF = *BB->getParent();
DebugLoc DL = MI.getDebugLoc();
- const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
- const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
+ const RISCVInstrInfo &TII = *MF.getSubtarget<RISCVSubtarget>().getInstrInfo();
Register LoReg = MI.getOperand(0).getReg();
Register HiReg = MI.getOperand(1).getReg();
Register SrcReg = MI.getOperand(2).getReg();
@@ -22213,7 +22212,7 @@ static MachineBasicBlock *emitSplitF64Pseudo(MachineInstr &MI,
int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
TII.storeRegToStackSlot(*BB, MI, SrcReg, MI.getOperand(2).isKill(), FI, SrcRC,
- RI, Register());
+ Register());
MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
MachineMemOperand *MMOLo =
MF.getMachineMemOperand(MPI, MachineMemOperand::MOLoad, 4, Align(8));
@@ -22239,8 +22238,7 @@ static MachineBasicBlock *emitBuildPairF64Pseudo(MachineInstr &MI,
MachineFunction &MF = *BB->getParent();
DebugLoc DL = MI.getDebugLoc();
- const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
- const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
+ const RISCVInstrInfo &TII = *MF.getSubtarget<RISCVSubtarget>().getInstrInfo();
Register DstReg = MI.getOperand(0).getReg();
Register LoReg = MI.getOperand(1).getReg();
Register HiReg = MI.getOperand(2).getReg();
@@ -22263,7 +22261,7 @@ static MachineBasicBlock *emitBuildPairF64Pseudo(MachineInstr &MI,
.addFrameIndex(FI)
.addImm(4)
.addMemOperand(MMOHi);
- TII.loadRegFromStackSlot(*BB, MI, DstReg, FI, DstRC, RI, Register());
+ TII.loadRegFromStackSlot(*BB, MI, DstReg, FI, DstRC, Register());
MI.eraseFromParent(); // The pseudo instruction is gone now.
return BB;
}
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
index ce8dd3b..e0cdd11 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
@@ -639,7 +639,6 @@ void RISCVInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
Register SrcReg, bool IsKill, int FI,
const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI,
Register VReg,
MachineInstr::MIFlag Flags) const {
MachineFunction *MF = MBB.getParent();
@@ -647,8 +646,8 @@ void RISCVInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
unsigned Opcode;
if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
- Opcode = TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
- RISCV::SW : RISCV::SD;
+ Opcode = RegInfo.getRegSizeInBits(RISCV::GPRRegClass) == 32 ? RISCV::SW
+ : RISCV::SD;
} else if (RISCV::GPRF16RegClass.hasSubClassEq(RC)) {
Opcode = RISCV::SH_INX;
} else if (RISCV::GPRF32RegClass.hasSubClassEq(RC)) {
@@ -705,7 +704,7 @@ void RISCVInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
.addFrameIndex(FI)
.addMemOperand(MMO)
.setMIFlag(Flags);
- NumVRegSpilled += TRI->getRegSizeInBits(*RC) / RISCV::RVVBitsPerBlock;
+ NumVRegSpilled += RegInfo.getRegSizeInBits(*RC) / RISCV::RVVBitsPerBlock;
} else {
MachineMemOperand *MMO = MF->getMachineMemOperand(
MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOStore,
@@ -720,10 +719,12 @@ void RISCVInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
}
}
-void RISCVInstrInfo::loadRegFromStackSlot(
- MachineBasicBlock &MBB, MachineBasicBlock::iterator I, Register DstReg,
- int FI, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI,
- Register VReg, MachineInstr::MIFlag Flags) const {
+void RISCVInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I,
+ Register DstReg, int FI,
+ const TargetRegisterClass *RC,
+ Register VReg,
+ MachineInstr::MIFlag Flags) const {
MachineFunction *MF = MBB.getParent();
MachineFrameInfo &MFI = MF->getFrameInfo();
DebugLoc DL =
@@ -731,8 +732,8 @@ void RISCVInstrInfo::loadRegFromStackSlot(
unsigned Opcode;
if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
- Opcode = TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
- RISCV::LW : RISCV::LD;
+ Opcode = RegInfo.getRegSizeInBits(RISCV::GPRRegClass) == 32 ? RISCV::LW
+ : RISCV::LD;
} else if (RISCV::GPRF16RegClass.hasSubClassEq(RC)) {
Opcode = RISCV::LH_INX;
} else if (RISCV::GPRF32RegClass.hasSubClassEq(RC)) {
@@ -788,7 +789,7 @@ void RISCVInstrInfo::loadRegFromStackSlot(
.addFrameIndex(FI)
.addMemOperand(MMO)
.setMIFlag(Flags);
- NumVRegReloaded += TRI->getRegSizeInBits(*RC) / RISCV::RVVBitsPerBlock;
+ NumVRegReloaded += RegInfo.getRegSizeInBits(*RC) / RISCV::RVVBitsPerBlock;
} else {
MachineMemOperand *MMO = MF->getMachineMemOperand(
MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOLoad,
@@ -1379,14 +1380,14 @@ void RISCVInstrInfo::insertIndirectBranch(MachineBasicBlock &MBB,
report_fatal_error("underestimated function size");
storeRegToStackSlot(MBB, MI, TmpGPR, /*IsKill=*/true, FrameIndex,
- &RISCV::GPRRegClass, TRI, Register());
+ &RISCV::GPRRegClass, Register());
TRI->eliminateFrameIndex(std::prev(MI.getIterator()),
/*SpAdj=*/0, /*FIOperandNum=*/1);
MI.getOperand(1).setMBB(&RestoreBB);
loadRegFromStackSlot(RestoreBB, RestoreBB.end(), TmpGPR, FrameIndex,
- &RISCV::GPRRegClass, TRI, Register());
+ &RISCV::GPRRegClass, Register());
TRI->eliminateFrameIndex(RestoreBB.back(),
/*SpAdj=*/0, /*FIOperandNum=*/1);
}
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.h b/llvm/lib/Target/RISCV/RISCVInstrInfo.h
index 800af26..0ffe015 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.h
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.h
@@ -116,13 +116,13 @@ public:
void storeRegToStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg,
bool IsKill, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+
+ Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
void loadRegFromStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DstReg,
- int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ int FrameIndex, const TargetRegisterClass *RC, Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
using TargetInstrInfo::foldMemoryOperandImpl;
diff --git a/llvm/lib/Target/RISCV/RISCVSubtarget.h b/llvm/lib/Target/RISCV/RISCVSubtarget.h
index d5ffa6c..4026364 100644
--- a/llvm/lib/Target/RISCV/RISCVSubtarget.h
+++ b/llvm/lib/Target/RISCV/RISCVSubtarget.h
@@ -146,6 +146,7 @@ public:
}
bool enableMachineScheduler() const override { return true; }
+ bool enableTerminalRule() const override { return true; }
bool enablePostRAScheduler() const override { return UsePostRAScheduler; }
diff --git a/llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp b/llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp
index fdf9a4f..e1ff243 100644
--- a/llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp
+++ b/llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp
@@ -455,7 +455,7 @@ bool RISCVVectorPeephole::convertSameMaskVMergeToVMv(MachineInstr &MI) {
True->getOperand(1).setReg(MI.getOperand(2).getReg());
// If True is masked then its passthru needs to be in VRNoV0.
MRI->constrainRegClass(True->getOperand(1).getReg(),
- TII->getRegClass(True->getDesc(), 1, TRI));
+ TII->getRegClass(True->getDesc(), 1));
}
MI.setDesc(TII->get(NewOpc));
@@ -675,7 +675,7 @@ bool RISCVVectorPeephole::foldVMV_V_V(MachineInstr &MI) {
if (Passthru.getReg().isValid())
MRI->constrainRegClass(
Passthru.getReg(),
- TII->getRegClass(Src->getDesc(), SrcPassthru.getOperandNo(), TRI));
+ TII->getRegClass(Src->getDesc(), SrcPassthru.getOperandNo()));
}
if (RISCVII::hasVecPolicyOp(Src->getDesc().TSFlags)) {
diff --git a/llvm/lib/Target/Sparc/SparcInstrInfo.cpp b/llvm/lib/Target/Sparc/SparcInstrInfo.cpp
index fcd6cd7..6596379 100644
--- a/llvm/lib/Target/Sparc/SparcInstrInfo.cpp
+++ b/llvm/lib/Target/Sparc/SparcInstrInfo.cpp
@@ -527,7 +527,6 @@ void SparcInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
Register SrcReg, bool isKill, int FI,
const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI,
Register VReg,
MachineInstr::MIFlag Flags) const {
DebugLoc DL;
@@ -564,10 +563,12 @@ void SparcInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
llvm_unreachable("Can't store this register to stack slot");
}
-void SparcInstrInfo::loadRegFromStackSlot(
- MachineBasicBlock &MBB, MachineBasicBlock::iterator I, Register DestReg,
- int FI, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI,
- Register VReg, MachineInstr::MIFlag Flags) const {
+void SparcInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I,
+ Register DestReg, int FI,
+ const TargetRegisterClass *RC,
+ Register VReg,
+ MachineInstr::MIFlag Flags) const {
DebugLoc DL;
if (I != MBB.end()) DL = I->getDebugLoc();
diff --git a/llvm/lib/Target/Sparc/SparcInstrInfo.h b/llvm/lib/Target/Sparc/SparcInstrInfo.h
index 01d0204..273888f 100644
--- a/llvm/lib/Target/Sparc/SparcInstrInfo.h
+++ b/llvm/lib/Target/Sparc/SparcInstrInfo.h
@@ -92,14 +92,13 @@ public:
void storeRegToStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg,
- bool isKill, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ bool isKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
void loadRegFromStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
Register DestReg, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
Register getGlobalBaseReg(MachineFunction *MF) const;
diff --git a/llvm/lib/Target/SystemZ/SystemZFrameLowering.cpp b/llvm/lib/Target/SystemZ/SystemZFrameLowering.cpp
index dcefff9..570bbd8 100644
--- a/llvm/lib/Target/SystemZ/SystemZFrameLowering.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZFrameLowering.cpp
@@ -360,12 +360,12 @@ bool SystemZELFFrameLowering::spillCalleeSavedRegisters(
if (SystemZ::FP64BitRegClass.contains(Reg)) {
MBB.addLiveIn(Reg);
TII->storeRegToStackSlot(MBB, MBBI, Reg, true, I.getFrameIdx(),
- &SystemZ::FP64BitRegClass, TRI, Register());
+ &SystemZ::FP64BitRegClass, Register());
}
if (SystemZ::VR128BitRegClass.contains(Reg)) {
MBB.addLiveIn(Reg);
TII->storeRegToStackSlot(MBB, MBBI, Reg, true, I.getFrameIdx(),
- &SystemZ::VR128BitRegClass, TRI, Register());
+ &SystemZ::VR128BitRegClass, Register());
}
}
@@ -389,10 +389,10 @@ bool SystemZELFFrameLowering::restoreCalleeSavedRegisters(
MCRegister Reg = I.getReg();
if (SystemZ::FP64BitRegClass.contains(Reg))
TII->loadRegFromStackSlot(MBB, MBBI, Reg, I.getFrameIdx(),
- &SystemZ::FP64BitRegClass, TRI, Register());
+ &SystemZ::FP64BitRegClass, Register());
if (SystemZ::VR128BitRegClass.contains(Reg))
TII->loadRegFromStackSlot(MBB, MBBI, Reg, I.getFrameIdx(),
- &SystemZ::VR128BitRegClass, TRI, Register());
+ &SystemZ::VR128BitRegClass, Register());
}
// Restore call-saved GPRs (but not call-clobbered varargs, which at
@@ -1157,12 +1157,12 @@ bool SystemZXPLINKFrameLowering::spillCalleeSavedRegisters(
if (SystemZ::FP64BitRegClass.contains(Reg)) {
MBB.addLiveIn(Reg);
TII->storeRegToStackSlot(MBB, MBBI, Reg, true, I.getFrameIdx(),
- &SystemZ::FP64BitRegClass, TRI, Register());
+ &SystemZ::FP64BitRegClass, Register());
}
if (SystemZ::VR128BitRegClass.contains(Reg)) {
MBB.addLiveIn(Reg);
TII->storeRegToStackSlot(MBB, MBBI, Reg, true, I.getFrameIdx(),
- &SystemZ::VR128BitRegClass, TRI, Register());
+ &SystemZ::VR128BitRegClass, Register());
}
}
@@ -1189,10 +1189,10 @@ bool SystemZXPLINKFrameLowering::restoreCalleeSavedRegisters(
MCRegister Reg = I.getReg();
if (SystemZ::FP64BitRegClass.contains(Reg))
TII->loadRegFromStackSlot(MBB, MBBI, Reg, I.getFrameIdx(),
- &SystemZ::FP64BitRegClass, TRI, Register());
+ &SystemZ::FP64BitRegClass, Register());
if (SystemZ::VR128BitRegClass.contains(Reg))
TII->loadRegFromStackSlot(MBB, MBBI, Reg, I.getFrameIdx(),
- &SystemZ::VR128BitRegClass, TRI, Register());
+ &SystemZ::VR128BitRegClass, Register());
}
// Restore call-saved GPRs (but not call-clobbered varargs, which at
diff --git a/llvm/lib/Target/SystemZ/SystemZHazardRecognizer.cpp b/llvm/lib/Target/SystemZ/SystemZHazardRecognizer.cpp
index 5313fba..8fc339f 100644
--- a/llvm/lib/Target/SystemZ/SystemZHazardRecognizer.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZHazardRecognizer.cpp
@@ -115,11 +115,10 @@ SystemZHazardRecognizer::fitsIntoCurrentGroup(SUnit *SU) const {
}
bool SystemZHazardRecognizer::has4RegOps(const MachineInstr *MI) const {
- const TargetRegisterInfo *TRI = &TII->getRegisterInfo();
const MCInstrDesc &MID = MI->getDesc();
unsigned Count = 0;
for (unsigned OpIdx = 0; OpIdx < MID.getNumOperands(); OpIdx++) {
- const TargetRegisterClass *RC = TII->getRegClass(MID, OpIdx, TRI);
+ const TargetRegisterClass *RC = TII->getRegClass(MID, OpIdx);
if (RC == nullptr)
continue;
if (OpIdx >= MID.getNumDefs() &&
diff --git a/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp b/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp
index 23e7e7e..eb1ce4a 100644
--- a/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp
@@ -1023,8 +1023,8 @@ void SystemZInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
void SystemZInstrInfo::storeRegToStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg,
bool isKill, int FrameIdx, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
- MachineInstr::MIFlag Flags) const {
+
+ Register VReg, MachineInstr::MIFlag Flags) const {
DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
// Callers may expect a single instruction, so keep 128-bit moves
@@ -1036,10 +1036,12 @@ void SystemZInstrInfo::storeRegToStackSlot(
FrameIdx);
}
-void SystemZInstrInfo::loadRegFromStackSlot(
- MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DestReg,
- int FrameIdx, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI,
- Register VReg, MachineInstr::MIFlag Flags) const {
+void SystemZInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ Register DestReg, int FrameIdx,
+ const TargetRegisterClass *RC,
+ Register VReg,
+ MachineInstr::MIFlag Flags) const {
DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
// Callers may expect a single instruction, so keep 128-bit moves
diff --git a/llvm/lib/Target/SystemZ/SystemZInstrInfo.h b/llvm/lib/Target/SystemZ/SystemZInstrInfo.h
index 7b9ad7b..4aecdd7 100644
--- a/llvm/lib/Target/SystemZ/SystemZInstrInfo.h
+++ b/llvm/lib/Target/SystemZ/SystemZInstrInfo.h
@@ -281,12 +281,14 @@ public:
void storeRegToStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg,
bool isKill, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+
+ Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
void loadRegFromStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
Register DestReg, int FrameIdx, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+
+ Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
MachineInstr *convertToThreeAddress(MachineInstr &MI, LiveVariables *LV,
LiveIntervals *LIS) const override;
diff --git a/llvm/lib/Target/VE/VEInstrInfo.cpp b/llvm/lib/Target/VE/VEInstrInfo.cpp
index bae703b..b9ac5d6 100644
--- a/llvm/lib/Target/VE/VEInstrInfo.cpp
+++ b/llvm/lib/Target/VE/VEInstrInfo.cpp
@@ -459,7 +459,6 @@ void VEInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
Register SrcReg, bool isKill, int FI,
const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI,
Register VReg,
MachineInstr::MIFlag Flags) const {
DebugLoc DL;
@@ -519,10 +518,12 @@ void VEInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
report_fatal_error("Can't store this register to stack slot");
}
-void VEInstrInfo::loadRegFromStackSlot(
- MachineBasicBlock &MBB, MachineBasicBlock::iterator I, Register DestReg,
- int FI, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI,
- Register VReg, MachineInstr::MIFlag Flags) const {
+void VEInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I,
+ Register DestReg, int FI,
+ const TargetRegisterClass *RC,
+ Register VReg,
+ MachineInstr::MIFlag Flags) const {
DebugLoc DL;
if (I != MBB.end())
DL = I->getDebugLoc();
diff --git a/llvm/lib/Target/VE/VEInstrInfo.h b/llvm/lib/Target/VE/VEInstrInfo.h
index 408d3ab..cedf7f2 100644
--- a/llvm/lib/Target/VE/VEInstrInfo.h
+++ b/llvm/lib/Target/VE/VEInstrInfo.h
@@ -92,13 +92,15 @@ public:
void storeRegToStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg,
bool isKill, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+
+ Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
void loadRegFromStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
Register DestReg, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+
+ Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
/// } Stack Spill & Reload
diff --git a/llvm/lib/Target/X86/X86AvoidStoreForwardingBlocks.cpp b/llvm/lib/Target/X86/X86AvoidStoreForwardingBlocks.cpp
index d2e3527..9473e8d 100644
--- a/llvm/lib/Target/X86/X86AvoidStoreForwardingBlocks.cpp
+++ b/llvm/lib/Target/X86/X86AvoidStoreForwardingBlocks.cpp
@@ -387,8 +387,8 @@ void X86AvoidSFBPass::buildCopy(MachineInstr *LoadInst, unsigned NLoadOpcode,
MachineMemOperand *LMMO = *LoadInst->memoperands_begin();
MachineMemOperand *SMMO = *StoreInst->memoperands_begin();
- Register Reg1 = MRI->createVirtualRegister(
- TII->getRegClass(TII->get(NLoadOpcode), 0, TRI));
+ Register Reg1 =
+ MRI->createVirtualRegister(TII->getRegClass(TII->get(NLoadOpcode), 0));
MachineInstr *NewLoad =
BuildMI(*MBB, LoadInst, LoadInst->getDebugLoc(), TII->get(NLoadOpcode),
Reg1)
@@ -553,7 +553,7 @@ void X86AvoidSFBPass::findPotentiallylBlockedCopies(MachineFunction &MF) {
}
unsigned X86AvoidSFBPass::getRegSizeInBytes(MachineInstr *LoadInst) {
- const auto *TRC = TII->getRegClass(TII->get(LoadInst->getOpcode()), 0, TRI);
+ const auto *TRC = TII->getRegClass(TII->get(LoadInst->getOpcode()), 0);
return TRI->getRegSizeInBits(*TRC) / 8;
}
diff --git a/llvm/lib/Target/X86/X86DomainReassignment.cpp b/llvm/lib/Target/X86/X86DomainReassignment.cpp
index 5d19011..2047a53 100644
--- a/llvm/lib/Target/X86/X86DomainReassignment.cpp
+++ b/llvm/lib/Target/X86/X86DomainReassignment.cpp
@@ -174,8 +174,8 @@ public:
MachineBasicBlock *MBB = MI->getParent();
const DebugLoc &DL = MI->getDebugLoc();
- Register Reg = MRI->createVirtualRegister(
- TII->getRegClass(TII->get(DstOpcode), 0, MRI->getTargetRegisterInfo()));
+ Register Reg =
+ MRI->createVirtualRegister(TII->getRegClass(TII->get(DstOpcode), 0));
MachineInstrBuilder Bld = BuildMI(*MBB, MI, DL, TII->get(DstOpcode), Reg);
for (const MachineOperand &MO : llvm::drop_begin(MI->operands()))
Bld.add(MO);
diff --git a/llvm/lib/Target/X86/X86FastPreTileConfig.cpp b/llvm/lib/Target/X86/X86FastPreTileConfig.cpp
index 06f729a..25799f4 100644
--- a/llvm/lib/Target/X86/X86FastPreTileConfig.cpp
+++ b/llvm/lib/Target/X86/X86FastPreTileConfig.cpp
@@ -206,8 +206,7 @@ void X86FastPreTileConfig::spill(MachineBasicBlock::iterator Before,
const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg);
// Don't need shape information for tile store, becasue it is adjacent to
// the tile def instruction.
- TII->storeRegToStackSlot(*MBB, Before, VirtReg, Kill, FI, &RC, TRI,
- Register());
+ TII->storeRegToStackSlot(*MBB, Before, VirtReg, Kill, FI, &RC, Register());
++NumStores;
// TODO: update DBG_VALUEs
diff --git a/llvm/lib/Target/X86/X86FrameLowering.cpp b/llvm/lib/Target/X86/X86FrameLowering.cpp
index a66a321..8bca634 100644
--- a/llvm/lib/Target/X86/X86FrameLowering.cpp
+++ b/llvm/lib/Target/X86/X86FrameLowering.cpp
@@ -3093,8 +3093,8 @@ bool X86FrameLowering::spillCalleeSavedRegisters(
MBB.addLiveIn(Reg);
const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT);
- TII.storeRegToStackSlot(MBB, MI, Reg, true, I.getFrameIdx(), RC, TRI,
- Register(), MachineInstr::FrameSetup);
+ TII.storeRegToStackSlot(MBB, MI, Reg, true, I.getFrameIdx(), RC, Register(),
+ MachineInstr::FrameSetup);
}
return true;
@@ -3166,8 +3166,7 @@ bool X86FrameLowering::restoreCalleeSavedRegisters(
VT = STI.hasBWI() ? MVT::v64i1 : MVT::v16i1;
const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT);
- TII.loadRegFromStackSlot(MBB, MI, Reg, I.getFrameIdx(), RC, TRI,
- Register());
+ TII.loadRegFromStackSlot(MBB, MI, Reg, I.getFrameIdx(), RC, Register());
}
// Clear the stack slot for spill base pointer register.
diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp
index 2c6d1af..61d9608 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.cpp
+++ b/llvm/lib/Target/X86/X86InstrInfo.cpp
@@ -93,10 +93,9 @@ X86InstrInfo::X86InstrInfo(const X86Subtarget &STI)
X86::CATCHRET, (STI.is64Bit() ? X86::RET64 : X86::RET32)),
Subtarget(STI), RI(STI.getTargetTriple()) {}
-const TargetRegisterClass *
-X86InstrInfo::getRegClass(const MCInstrDesc &MCID, unsigned OpNum,
- const TargetRegisterInfo *TRI) const {
- auto *RC = TargetInstrInfo::getRegClass(MCID, OpNum, TRI);
+const TargetRegisterClass *X86InstrInfo::getRegClass(const MCInstrDesc &MCID,
+ unsigned OpNum) const {
+ auto *RC = TargetInstrInfo::getRegClass(MCID, OpNum);
// If the target does not have egpr, then r16-r31 will be resereved for all
// instructions.
if (!RC || !Subtarget.hasEGPR())
@@ -958,8 +957,7 @@ bool X86InstrInfo::isReMaterializableImpl(
void X86InstrInfo::reMaterialize(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
Register DestReg, unsigned SubIdx,
- const MachineInstr &Orig,
- const TargetRegisterInfo &TRI) const {
+ const MachineInstr &Orig) const {
bool ClobbersEFLAGS = Orig.modifiesRegister(X86::EFLAGS, &TRI);
if (ClobbersEFLAGS && MBB.computeRegisterLiveness(&TRI, X86::EFLAGS, I) !=
MachineBasicBlock::LQR_Dead) {
@@ -4782,14 +4780,14 @@ void X86InstrInfo::loadStoreTileReg(MachineBasicBlock &MBB,
void X86InstrInfo::storeRegToStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register SrcReg,
bool isKill, int FrameIdx, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
- MachineInstr::MIFlag Flags) const {
+
+ Register VReg, MachineInstr::MIFlag Flags) const {
const MachineFunction &MF = *MBB.getParent();
const MachineFrameInfo &MFI = MF.getFrameInfo();
- assert(MFI.getObjectSize(FrameIdx) >= TRI->getSpillSize(*RC) &&
+ assert(MFI.getObjectSize(FrameIdx) >= RI.getSpillSize(*RC) &&
"Stack slot too small for store");
- unsigned Alignment = std::max<uint32_t>(TRI->getSpillSize(*RC), 16);
+ unsigned Alignment = std::max<uint32_t>(RI.getSpillSize(*RC), 16);
bool isAligned =
(Subtarget.getFrameLowering()->getStackAlign() >= Alignment) ||
(RI.canRealignStack(MF) && !MFI.isFixedObjectIndex(FrameIdx));
@@ -4803,15 +4801,17 @@ void X86InstrInfo::storeRegToStackSlot(
.setMIFlag(Flags);
}
-void X86InstrInfo::loadRegFromStackSlot(
- MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg,
- int FrameIdx, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI,
- Register VReg, MachineInstr::MIFlag Flags) const {
+void X86InstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ Register DestReg, int FrameIdx,
+ const TargetRegisterClass *RC,
+ Register VReg,
+ MachineInstr::MIFlag Flags) const {
const MachineFunction &MF = *MBB.getParent();
const MachineFrameInfo &MFI = MF.getFrameInfo();
- assert(MFI.getObjectSize(FrameIdx) >= TRI->getSpillSize(*RC) &&
+ assert(MFI.getObjectSize(FrameIdx) >= RI.getSpillSize(*RC) &&
"Load size exceeds stack slot");
- unsigned Alignment = std::max<uint32_t>(TRI->getSpillSize(*RC), 16);
+ unsigned Alignment = std::max<uint32_t>(RI.getSpillSize(*RC), 16);
bool isAligned =
(Subtarget.getFrameLowering()->getStackAlign() >= Alignment) ||
(RI.canRealignStack(MF) && !MFI.isFixedObjectIndex(FrameIdx));
@@ -5553,7 +5553,7 @@ bool X86InstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg,
return false;
ShouldUpdateCC = true;
} else if (ImmDelta != 0) {
- unsigned BitWidth = TRI->getRegSizeInBits(*MRI->getRegClass(SrcReg));
+ unsigned BitWidth = RI.getRegSizeInBits(*MRI->getRegClass(SrcReg));
// Shift amount for min/max constants to adjust for 8/16/32 instruction
// sizes.
switch (OldCC) {
@@ -7235,7 +7235,6 @@ static void updateOperandRegConstraints(MachineFunction &MF,
MachineInstr &NewMI,
const TargetInstrInfo &TII) {
MachineRegisterInfo &MRI = MF.getRegInfo();
- const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo();
for (int Idx : llvm::seq<int>(0, NewMI.getNumOperands())) {
MachineOperand &MO = NewMI.getOperand(Idx);
@@ -7247,7 +7246,7 @@ static void updateOperandRegConstraints(MachineFunction &MF,
continue;
auto *NewRC =
- MRI.constrainRegClass(Reg, TII.getRegClass(NewMI.getDesc(), Idx, &TRI));
+ MRI.constrainRegClass(Reg, TII.getRegClass(NewMI.getDesc(), Idx));
if (!NewRC) {
LLVM_DEBUG(
dbgs() << "WARNING: Unable to update register constraint for operand "
@@ -7345,7 +7344,7 @@ MachineInstr *X86InstrInfo::foldMemoryOperandCustom(
unsigned SrcIdx = (Imm >> 6) & 3;
const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
- const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum, &RI);
+ const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum);
unsigned RCSize = TRI.getRegSizeInBits(*RC) / 8;
if ((Size == 0 || Size >= 16) && RCSize >= 16 &&
(MI.getOpcode() != X86::INSERTPSrri || Alignment >= Align(4))) {
@@ -7370,7 +7369,7 @@ MachineInstr *X86InstrInfo::foldMemoryOperandCustom(
// TODO: In most cases AVX doesn't have a 8-byte alignment requirement.
if (OpNum == 2) {
const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
- const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum, &RI);
+ const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum);
unsigned RCSize = TRI.getRegSizeInBits(*RC) / 8;
if ((Size == 0 || Size >= 16) && RCSize >= 16 && Alignment >= Align(8)) {
unsigned NewOpCode =
@@ -7389,7 +7388,7 @@ MachineInstr *X86InstrInfo::foldMemoryOperandCustom(
// table twice.
if (OpNum == 2) {
const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
- const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum, &RI);
+ const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum);
unsigned RCSize = TRI.getRegSizeInBits(*RC) / 8;
if ((Size == 0 || Size >= 16) && RCSize >= 16 && Alignment < Align(16)) {
MachineInstr *NewMI =
@@ -7524,7 +7523,7 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
bool NarrowToMOV32rm = false;
if (Size) {
const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
- const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum, &RI);
+ const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum);
unsigned RCSize = TRI.getRegSizeInBits(*RC) / 8;
// Check if it's safe to fold the load. If the size of the object is
// narrower than the load width, then it's not.
@@ -8118,9 +8117,9 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
RC == &X86::VK32WMRegClass || RC == &X86::VK64WMRegClass;
};
- if (Op1.isReg() && IsVKWMClass(getRegClass(MCID, 1, &RI)))
+ if (Op1.isReg() && IsVKWMClass(getRegClass(MCID, 1)))
MaskReg = Op1.getReg();
- else if (Op2.isReg() && IsVKWMClass(getRegClass(MCID, 2, &RI)))
+ else if (Op2.isReg() && IsVKWMClass(getRegClass(MCID, 2)))
MaskReg = Op2.getReg();
if (MaskReg) {
@@ -8524,7 +8523,7 @@ bool X86InstrInfo::unfoldMemoryOperand(
const MCInstrDesc &MCID = get(Opc);
- const TargetRegisterClass *RC = getRegClass(MCID, Index, &RI);
+ const TargetRegisterClass *RC = getRegClass(MCID, Index);
const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
// TODO: Check if 32-byte or greater accesses are slow too?
if (!MI.hasOneMemOperand() && RC == &X86::VR128RegClass &&
@@ -8635,7 +8634,7 @@ bool X86InstrInfo::unfoldMemoryOperand(
// Emit the store instruction.
if (UnfoldStore) {
- const TargetRegisterClass *DstRC = getRegClass(MCID, 0, &RI);
+ const TargetRegisterClass *DstRC = getRegClass(MCID, 0);
auto MMOs = extractStoreMMOs(MI.memoperands(), MF);
unsigned Alignment = std::max<uint32_t>(TRI.getSpillSize(*DstRC), 16);
bool isAligned = !MMOs.empty() && MMOs.front()->getAlign() >= Alignment;
@@ -8667,7 +8666,7 @@ bool X86InstrInfo::unfoldMemoryOperand(
const MCInstrDesc &MCID = get(Opc);
MachineFunction &MF = DAG.getMachineFunction();
const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
- const TargetRegisterClass *RC = getRegClass(MCID, Index, &RI);
+ const TargetRegisterClass *RC = getRegClass(MCID, Index);
unsigned NumDefs = MCID.NumDefs;
std::vector<SDValue> AddrOps;
std::vector<SDValue> BeforeOps;
@@ -8718,7 +8717,7 @@ bool X86InstrInfo::unfoldMemoryOperand(
std::vector<EVT> VTs;
const TargetRegisterClass *DstRC = nullptr;
if (MCID.getNumDefs() > 0) {
- DstRC = getRegClass(MCID, 0, &RI);
+ DstRC = getRegClass(MCID, 0);
VTs.push_back(*TRI.legalclasstypes_begin(*DstRC));
}
for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) {
diff --git a/llvm/lib/Target/X86/X86InstrInfo.h b/llvm/lib/Target/X86/X86InstrInfo.h
index 5f75559..a547fcd 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.h
+++ b/llvm/lib/Target/X86/X86InstrInfo.h
@@ -246,9 +246,8 @@ public:
/// GR*RegClass (definition in TD file)
/// ->
/// GR*_NOREX2RegClass (Returned register class)
- const TargetRegisterClass *
- getRegClass(const MCInstrDesc &MCID, unsigned OpNum,
- const TargetRegisterInfo *TRI) const override;
+ const TargetRegisterClass *getRegClass(const MCInstrDesc &MCID,
+ unsigned OpNum) const override;
/// getRegisterInfo - TargetInstrInfo is a superset of MRegister info. As
/// such, whenever a client has an instance of instruction info, it should
@@ -343,8 +342,7 @@ public:
bool isReMaterializableImpl(const MachineInstr &MI) const override;
void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
Register DestReg, unsigned SubIdx,
- const MachineInstr &Orig,
- const TargetRegisterInfo &TRI) const override;
+ const MachineInstr &Orig) const override;
/// Given an operand within a MachineInstr, insert preceding code to put it
/// into the right format for a particular kind of LEA instruction. This may
@@ -469,14 +467,14 @@ public:
bool RenamableSrc = false) const override;
void storeRegToStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register SrcReg,
- bool isKill, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ bool isKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
void loadRegFromStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg,
int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+
+ Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
void loadStoreTileReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
diff --git a/llvm/lib/Target/X86/X86OptimizeLEAs.cpp b/llvm/lib/Target/X86/X86OptimizeLEAs.cpp
index 167bed1..c964605 100644
--- a/llvm/lib/Target/X86/X86OptimizeLEAs.cpp
+++ b/llvm/lib/Target/X86/X86OptimizeLEAs.cpp
@@ -359,7 +359,7 @@ bool X86OptimizeLEAPass::chooseBestLEA(
// example MOV8mr_NOREX. We could constrain the register class of the LEA
// def to suit MI, however since this case is very rare and hard to
// reproduce in a test it's just more reliable to skip the LEA.
- if (TII->getRegClass(Desc, MemOpNo + X86::AddrBaseReg, TRI) !=
+ if (TII->getRegClass(Desc, MemOpNo + X86::AddrBaseReg) !=
MRI->getRegClass(DefMI->getOperand(0).getReg()))
continue;
diff --git a/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp b/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp
index e0b3b61..d0d897e 100644
--- a/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp
+++ b/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp
@@ -841,7 +841,7 @@ getRegClassForUnfoldedLoad(const X86InstrInfo &TII, unsigned Opcode) {
unsigned UnfoldedOpc = TII.getOpcodeAfterMemoryUnfold(
Opcode, /*UnfoldLoad*/ true, /*UnfoldStore*/ false, &Index);
const MCInstrDesc &MCID = TII.get(UnfoldedOpc);
- return TII.getRegClass(MCID, Index, &TII.getRegisterInfo());
+ return TII.getRegClass(MCID, Index);
}
void X86SpeculativeLoadHardeningPass::unfoldCallAndJumpLoads(
diff --git a/llvm/lib/Target/XCore/Disassembler/XCoreDisassembler.cpp b/llvm/lib/Target/XCore/Disassembler/XCoreDisassembler.cpp
index 096ad08..0e00db49 100644
--- a/llvm/lib/Target/XCore/Disassembler/XCoreDisassembler.cpp
+++ b/llvm/lib/Target/XCore/Disassembler/XCoreDisassembler.cpp
@@ -69,7 +69,7 @@ static bool readInstruction32(ArrayRef<uint8_t> Bytes, uint64_t Address,
return true;
}
-static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo) {
+static MCRegister getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo) {
const MCRegisterInfo *RegInfo = D->getContext().getRegisterInfo();
return RegInfo->getRegClass(RC).getRegister(RegNo);
}
@@ -79,7 +79,7 @@ static DecodeStatus DecodeGRRegsRegisterClass(MCInst &Inst, unsigned RegNo,
const MCDisassembler *Decoder) {
if (RegNo > 11)
return MCDisassembler::Fail;
- unsigned Reg = getReg(Decoder, XCore::GRRegsRegClassID, RegNo);
+ MCRegister Reg = getReg(Decoder, XCore::GRRegsRegClassID, RegNo);
Inst.addOperand(MCOperand::createReg(Reg));
return MCDisassembler::Success;
}
@@ -89,7 +89,7 @@ static DecodeStatus DecodeRRegsRegisterClass(MCInst &Inst, unsigned RegNo,
const MCDisassembler *Decoder) {
if (RegNo > 15)
return MCDisassembler::Fail;
- unsigned Reg = getReg(Decoder, XCore::RRegsRegClassID, RegNo);
+ MCRegister Reg = getReg(Decoder, XCore::RRegsRegClassID, RegNo);
Inst.addOperand(MCOperand::createReg(Reg));
return MCDisassembler::Success;
}
diff --git a/llvm/lib/Target/XCore/XCoreFrameLowering.cpp b/llvm/lib/Target/XCore/XCoreFrameLowering.cpp
index cdb5186..351a221 100644
--- a/llvm/lib/Target/XCore/XCoreFrameLowering.cpp
+++ b/llvm/lib/Target/XCore/XCoreFrameLowering.cpp
@@ -432,7 +432,7 @@ bool XCoreFrameLowering::spillCalleeSavedRegisters(
// Add the callee-saved register as live-in. It's killed at the spill.
MBB.addLiveIn(Reg);
const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
- TII.storeRegToStackSlot(MBB, MI, Reg, true, I.getFrameIdx(), RC, TRI,
+ TII.storeRegToStackSlot(MBB, MI, Reg, true, I.getFrameIdx(), RC,
Register());
if (emitFrameMoves) {
auto Store = MI;
@@ -458,8 +458,7 @@ bool XCoreFrameLowering::restoreCalleeSavedRegisters(
"LR & FP are always handled in emitEpilogue");
const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
- TII.loadRegFromStackSlot(MBB, MI, Reg, CSR.getFrameIdx(), RC, TRI,
- Register());
+ TII.loadRegFromStackSlot(MBB, MI, Reg, CSR.getFrameIdx(), RC, Register());
assert(MI != MBB.begin() &&
"loadRegFromStackSlot didn't insert any code!");
// Insert in reverse order. loadRegFromStackSlot can insert multiple
diff --git a/llvm/lib/Target/XCore/XCoreInstrInfo.cpp b/llvm/lib/Target/XCore/XCoreInstrInfo.cpp
index 80fda34..075910c 100644
--- a/llvm/lib/Target/XCore/XCoreInstrInfo.cpp
+++ b/llvm/lib/Target/XCore/XCoreInstrInfo.cpp
@@ -355,8 +355,8 @@ void XCoreInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
void XCoreInstrInfo::storeRegToStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator I, Register SrcReg,
bool isKill, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
- MachineInstr::MIFlag Flags) const {
+
+ Register VReg, MachineInstr::MIFlag Flags) const {
DebugLoc DL;
if (I != MBB.end() && !I->isDebugInstr())
DL = I->getDebugLoc();
@@ -377,7 +377,6 @@ void XCoreInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
Register DestReg, int FrameIndex,
const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI,
Register VReg,
MachineInstr::MIFlag Flags) const {
DebugLoc DL;
diff --git a/llvm/lib/Target/XCore/XCoreInstrInfo.h b/llvm/lib/Target/XCore/XCoreInstrInfo.h
index 3543392..c4e399e 100644
--- a/llvm/lib/Target/XCore/XCoreInstrInfo.h
+++ b/llvm/lib/Target/XCore/XCoreInstrInfo.h
@@ -71,13 +71,15 @@ public:
void storeRegToStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register SrcReg,
bool isKill, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+
+ Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
void loadRegFromStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg,
int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+
+ Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
bool reverseBranchCondition(
diff --git a/llvm/lib/Target/Xtensa/XtensaFrameLowering.cpp b/llvm/lib/Target/Xtensa/XtensaFrameLowering.cpp
index cf9a2a0..1c0dc66 100644
--- a/llvm/lib/Target/Xtensa/XtensaFrameLowering.cpp
+++ b/llvm/lib/Target/Xtensa/XtensaFrameLowering.cpp
@@ -314,7 +314,7 @@ bool XtensaFrameLowering::spillCalleeSavedRegisters(
bool IsKill = !IsA0AndRetAddrIsTaken;
const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
TII.storeRegToStackSlot(EntryBlock, MI, Reg, IsKill, CSI[i].getFrameIdx(),
- RC, TRI, Register());
+ RC, Register());
}
return true;
diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp b/llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp
index 6bbebde..d7b05ac 100644
--- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp
+++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp
@@ -145,8 +145,8 @@ void XtensaInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
void XtensaInstrInfo::storeRegToStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg,
bool isKill, int FrameIdx, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
- MachineInstr::MIFlag Flags) const {
+
+ Register VReg, MachineInstr::MIFlag Flags) const {
DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
unsigned LoadOpcode, StoreOpcode;
getLoadStoreOpcodes(RC, LoadOpcode, StoreOpcode, FrameIdx);
@@ -155,10 +155,12 @@ void XtensaInstrInfo::storeRegToStackSlot(
addFrameReference(MIB, FrameIdx);
}
-void XtensaInstrInfo::loadRegFromStackSlot(
- MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DestReg,
- int FrameIdx, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI,
- Register VReg, MachineInstr::MIFlag Flags) const {
+void XtensaInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ Register DestReg, int FrameIdx,
+ const TargetRegisterClass *RC,
+ Register VReg,
+ MachineInstr::MIFlag Flags) const {
DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
unsigned LoadOpcode, StoreOpcode;
getLoadStoreOpcodes(RC, LoadOpcode, StoreOpcode, FrameIdx);
@@ -544,12 +546,12 @@ void XtensaInstrInfo::insertIndirectBranch(MachineBasicBlock &MBB,
"function code size is significantly larger than estimated");
storeRegToStackSlot(MBB, L32R, ScavRegister, /*IsKill=*/true, FrameIndex,
- &Xtensa::ARRegClass, &RI, Register());
+ &Xtensa::ARRegClass, Register());
RI.eliminateFrameIndex(std::prev(L32R.getIterator()),
/*SpAdj=*/0, /*FIOperandNum=*/1);
loadRegFromStackSlot(RestoreBB, RestoreBB.end(), ScavRegister, FrameIndex,
- &Xtensa::ARRegClass, &RI, Register());
+ &Xtensa::ARRegClass, Register());
RI.eliminateFrameIndex(RestoreBB.back(),
/*SpAdj=*/0, /*FIOperandNum=*/1);
JumpToMBB = &RestoreBB;
diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.h b/llvm/lib/Target/Xtensa/XtensaInstrInfo.h
index 1808cb3..0b46d6c 100644
--- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.h
+++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.h
@@ -56,14 +56,13 @@ public:
void storeRegToStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg,
- bool isKill, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ bool isKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
void loadRegFromStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
Register DestReg, int FrameIdx, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
// Get the load and store opcodes for a given register class and offset.
diff --git a/llvm/test/CodeGen/AArch64/aarch64-reassociate-accumulators.ll b/llvm/test/CodeGen/AArch64/aarch64-reassociate-accumulators.ll
index a84d666..d1bcad4 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-reassociate-accumulators.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-reassociate-accumulators.ll
@@ -24,8 +24,8 @@ loop:
%acc_phi = phi <8 x i16> [ zeroinitializer, %entry ], [ %acc_next, %loop ]
%ptr1_i = getelementptr i8, ptr %ptr1, i32 %i
%ptr2_i = getelementptr i8, ptr %ptr2, i32 %i
- %a = load <8 x i8>, <8 x i8>* %ptr1_i, align 1
- %b = load <8 x i8>, <8 x i8>* %ptr2_i, align 1
+ %a = load <8 x i8>, ptr %ptr1_i, align 1
+ %b = load <8 x i8>, ptr %ptr2_i, align 1
%vabd = call <8 x i8> @llvm.aarch64.neon.sabd.v8i8(<8 x i8> %a, <8 x i8> %b)
%vabd_ext = zext <8 x i8> %vabd to <8 x i16>
%acc_next = add <8 x i16> %vabd_ext, %acc_phi
@@ -65,8 +65,8 @@ loop:
%acc_phi = phi <4 x i32> [ zeroinitializer, %entry ], [ %acc_next, %loop ]
%ptr1_i = getelementptr i16, ptr %ptr1, i32 %i
%ptr2_i = getelementptr i16, ptr %ptr2, i32 %i
- %a = load <4 x i16>, <4 x i16>* %ptr1_i, align 1
- %b = load <4 x i16>, <4 x i16>* %ptr2_i, align 1
+ %a = load <4 x i16>, ptr %ptr1_i, align 1
+ %b = load <4 x i16>, ptr %ptr2_i, align 1
%vabd = tail call <4 x i16> @llvm.aarch64.neon.sabd.v4i16(<4 x i16> %a, <4 x i16> %b)
%vmov = zext <4 x i16> %vabd to <4 x i32>
%acc_next = add <4 x i32> %vmov, %acc_phi
@@ -116,8 +116,8 @@ loop:
%acc_phi_lo = phi <8 x i16> [ zeroinitializer, %entry ], [ %acc_next_lo, %loop ]
%ptr1_i = getelementptr i8, ptr %ptr1, i32 %i
%ptr2_i = getelementptr i8, ptr %ptr2, i32 %i
- %a = load <16 x i8>, <16 x i8>* %ptr1_i, align 1
- %b = load <16 x i8>, <16 x i8>* %ptr2_i, align 1
+ %a = load <16 x i8>, ptr %ptr1_i, align 1
+ %b = load <16 x i8>, ptr %ptr2_i, align 1
%a_hi = shufflevector <16 x i8> %a, <16 x i8> zeroinitializer, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
%b_hi = shufflevector <16 x i8> %b, <16 x i8> zeroinitializer, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
%a_lo = shufflevector <16 x i8> %a, <16 x i8> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -160,8 +160,8 @@ loop:
%acc_phi = phi <4 x i32> [ zeroinitializer, %entry ], [ %acc_next, %loop ]
%ptr1_i = getelementptr i32, ptr %ptr1, i32 %i
%ptr2_i = getelementptr i32, ptr %ptr2, i32 %i
- %a = load <4 x i32>, <4 x i32>* %ptr1_i, align 1
- %b = load <4 x i32>, <4 x i32>* %ptr2_i, align 1
+ %a = load <4 x i32>, ptr %ptr1_i, align 1
+ %b = load <4 x i32>, ptr %ptr2_i, align 1
%vabd = tail call <4 x i32> @llvm.aarch64.neon.uabd.v4i32(<4 x i32> %a, <4 x i32> %b)
%acc_next = add <4 x i32> %acc_phi, %vabd
%next_i = add i32 %i, 4
@@ -198,8 +198,8 @@ loop:
; Load values from ptr1 and ptr2
%ptr1_i = getelementptr i32, ptr %ptr1, i32 %i
%ptr2_i = getelementptr i32, ptr %ptr2, i32 %i
- %a = load <4 x i32>, <4 x i32>* %ptr1_i, align 1
- %b = load <4 x i32>, <4 x i32>* %ptr2_i, align 1
+ %a = load <4 x i32>, ptr %ptr1_i, align 1
+ %b = load <4 x i32>, ptr %ptr2_i, align 1
; Perform the intrinsic operation
%vabd = tail call <4 x i32> @llvm.aarch64.neon.sabd.v4i32(<4 x i32> %a, <4 x i32> %b)
%acc_next = add <4 x i32> %acc_phi, %vabd
@@ -237,8 +237,8 @@ loop:
%acc_phi = phi <2 x i32> [ zeroinitializer, %entry ], [ %acc_next, %loop ]
%ptr1_i = getelementptr i32, ptr %ptr1, i32 %i
%ptr2_i = getelementptr i32, ptr %ptr2, i32 %i
- %a = load <2 x i32>, <2 x i32>* %ptr1_i, align 1
- %b = load <2 x i32>, <2 x i32>* %ptr2_i, align 1
+ %a = load <2 x i32>, ptr %ptr1_i, align 1
+ %b = load <2 x i32>, ptr %ptr2_i, align 1
%vabd = tail call <2 x i32> @llvm.aarch64.neon.uabd.v2i32(<2 x i32> %a, <2 x i32> %b)
%acc_next = add <2 x i32> %acc_phi, %vabd
%next_i = add i32 %i, 2
@@ -272,8 +272,8 @@ loop:
%acc_phi = phi <8 x i8> [ zeroinitializer, %entry ], [ %acc_next, %loop ]
%ptr1_i = getelementptr i8, ptr %ptr1, i32 %i
%ptr2_i = getelementptr i8, ptr %ptr2, i32 %i
- %a = load <8 x i8>, <8 x i8>* %ptr1_i, align 1
- %b = load <8 x i8>, <8 x i8>* %ptr2_i, align 1
+ %a = load <8 x i8>, ptr %ptr1_i, align 1
+ %b = load <8 x i8>, ptr %ptr2_i, align 1
%vabd = tail call <8 x i8> @llvm.aarch64.neon.uabd.v8i8(<8 x i8> %a, <8 x i8> %b)
%acc_next = add <8 x i8> %acc_phi, %vabd
%next_i = add i32 %i, 8
@@ -307,8 +307,8 @@ loop:
%acc_phi = phi <16 x i8> [ zeroinitializer, %entry ], [ %acc_next, %loop ]
%ptr1_i = getelementptr i8, ptr %ptr1, i32 %i
%ptr2_i = getelementptr i8, ptr %ptr2, i32 %i
- %a = load <16 x i8>, <16 x i8>* %ptr1_i, align 1
- %b = load <16 x i8>, <16 x i8>* %ptr2_i, align 1
+ %a = load <16 x i8>, ptr %ptr1_i, align 1
+ %b = load <16 x i8>, ptr %ptr2_i, align 1
%vabd = tail call <16 x i8> @llvm.aarch64.neon.uabd.v16i8(<16 x i8> %a, <16 x i8> %b)
%acc_next = add <16 x i8> %acc_phi, %vabd
%next_i = add i32 %i, 16
@@ -342,8 +342,8 @@ loop:
%acc_phi = phi <8 x i16> [ zeroinitializer, %entry ], [ %acc_next, %loop ]
%ptr1_i = getelementptr i16, ptr %ptr1, i32 %i
%ptr2_i = getelementptr i16, ptr %ptr2, i32 %i
- %a = load <8 x i16>, <8 x i16>* %ptr1_i, align 1
- %b = load <8 x i16>, <8 x i16>* %ptr2_i, align 1
+ %a = load <8 x i16>, ptr %ptr1_i, align 1
+ %b = load <8 x i16>, ptr %ptr2_i, align 1
%vabd = tail call <8 x i16> @llvm.aarch64.neon.uabd.v8i16(<8 x i16> %a, <8 x i16> %b)
%acc_next = add <8 x i16> %acc_phi, %vabd
%next_i = add i32 %i, 8
@@ -377,8 +377,8 @@ loop:
%acc_phi = phi <8 x i8> [ zeroinitializer, %entry ], [ %acc_next, %loop ]
%ptr1_i = getelementptr i8, ptr %ptr1, i32 %i
%ptr2_i = getelementptr i8, ptr %ptr2, i32 %i
- %a = load <8 x i8>, <8 x i8>* %ptr1_i, align 1
- %b = load <8 x i8>, <8 x i8>* %ptr2_i, align 1
+ %a = load <8 x i8>, ptr %ptr1_i, align 1
+ %b = load <8 x i8>, ptr %ptr2_i, align 1
%vabd = tail call <8 x i8> @llvm.aarch64.neon.sabd.v8i8(<8 x i8> %a, <8 x i8> %b)
%acc_next = add <8 x i8> %acc_phi, %vabd
%next_i = add i32 %i, 8
@@ -411,8 +411,8 @@ loop:
%acc_phi = phi <4 x i16> [ zeroinitializer, %entry ], [ %acc_next, %loop ]
%ptr1_i = getelementptr i16, ptr %ptr1, i32 %i
%ptr2_i = getelementptr i16, ptr %ptr2, i32 %i
- %a = load <4 x i16>, <4 x i16>* %ptr1_i, align 1
- %b = load <4 x i16>, <4 x i16>* %ptr2_i, align 1
+ %a = load <4 x i16>, ptr %ptr1_i, align 1
+ %b = load <4 x i16>, ptr %ptr2_i, align 1
%vabd = tail call <4 x i16> @llvm.aarch64.neon.sabd.v4i16(<4 x i16> %a, <4 x i16> %b)
%acc_next = add <4 x i16> %acc_phi, %vabd
%next_i = add i32 %i, 4
@@ -445,8 +445,8 @@ loop:
%acc_phi = phi <8 x i16> [ zeroinitializer, %entry ], [ %acc_next, %loop ]
%ptr1_i = getelementptr i16, ptr %ptr1, i32 %i
%ptr2_i = getelementptr i16, ptr %ptr2, i32 %i
- %a = load <8 x i16>, <8 x i16>* %ptr1_i, align 1
- %b = load <8 x i16>, <8 x i16>* %ptr2_i, align 1
+ %a = load <8 x i16>, ptr %ptr1_i, align 1
+ %b = load <8 x i16>, ptr %ptr2_i, align 1
%vabd = tail call <8 x i16> @llvm.aarch64.neon.sabd.v8i16(<8 x i16> %a, <8 x i16> %b)
%acc_next = add <8 x i16> %acc_phi, %vabd
%next_i = add i32 %i, 8
@@ -480,8 +480,8 @@ loop:
%acc_phi = phi <8 x i16> [ zeroinitializer, %entry ], [ %acc_next, %loop ]
%ptr1_i = getelementptr i8, ptr %ptr1, i32 %i
%ptr2_i = getelementptr i8, ptr %ptr2, i32 %i
- %a = load <8 x i8>, <8 x i8>* %ptr1_i, align 1
- %b = load <8 x i8>, <8 x i8>* %ptr2_i, align 1
+ %a = load <8 x i8>, ptr %ptr1_i, align 1
+ %b = load <8 x i8>, ptr %ptr2_i, align 1
%vabd = tail call <8 x i8> @llvm.aarch64.neon.uabd.v8i8(<8 x i8> %a, <8 x i8> %b)
%vmov = zext <8 x i8> %vabd to <8 x i16>
%acc_next = add <8 x i16> %vmov, %acc_phi
@@ -516,8 +516,8 @@ loop:
%acc_phi = phi <4 x i32> [ zeroinitializer, %entry ], [ %acc_next, %loop ]
%ptr1_i = getelementptr i16, ptr %ptr1, i32 %i
%ptr2_i = getelementptr i16, ptr %ptr2, i32 %i
- %a = load <4 x i16>, <4 x i16>* %ptr1_i, align 1
- %b = load <4 x i16>, <4 x i16>* %ptr2_i, align 1
+ %a = load <4 x i16>, ptr %ptr1_i, align 1
+ %b = load <4 x i16>, ptr %ptr2_i, align 1
%vabd = tail call <4 x i16> @llvm.aarch64.neon.uabd.v4i16(<4 x i16> %a, <4 x i16> %b)
%vmov = zext <4 x i16> %vabd to <4 x i32>
%acc_next = add <4 x i32> %vmov, %acc_phi
diff --git a/llvm/test/CodeGen/AArch64/cgdata-merge-local.ll b/llvm/test/CodeGen/AArch64/cgdata-merge-local.ll
index 608fe29..d421b3f 100644
--- a/llvm/test/CodeGen/AArch64/cgdata-merge-local.ll
+++ b/llvm/test/CodeGen/AArch64/cgdata-merge-local.ll
@@ -54,9 +54,9 @@
define i32 @f1(i32 %a) {
entry:
%idxprom = sext i32 %a to i64
- %arrayidx = getelementptr inbounds [0 x i32], [0 x i32]* @g, i64 0, i64 %idxprom
- %0 = load i32, i32* %arrayidx, align 4
- %1 = load volatile i32, i32* @g1, align 4
+ %arrayidx = getelementptr inbounds [0 x i32], ptr @g, i64 0, i64 %idxprom
+ %0 = load i32, ptr %arrayidx, align 4
+ %1 = load volatile i32, ptr @g1, align 4
%mul = mul nsw i32 %1, %0
%add = add nsw i32 %mul, 1
ret i32 %add
@@ -65,9 +65,9 @@ entry:
define i32 @f2(i32 %a) {
entry:
%idxprom = sext i32 %a to i64
- %arrayidx = getelementptr inbounds [0 x i32], [0 x i32]* @g, i64 0, i64 %idxprom
- %0 = load i32, i32* %arrayidx, align 4
- %1 = load volatile i32, i32* @g2, align 4
+ %arrayidx = getelementptr inbounds [0 x i32], ptr @g, i64 0, i64 %idxprom
+ %0 = load i32, ptr %arrayidx, align 4
+ %1 = load volatile i32, ptr @g2, align 4
%mul = mul nsw i32 %1, %0
%add = add nsw i32 %mul, 1
ret i32 %add
diff --git a/llvm/test/CodeGen/AArch64/cgdata-merge-no-params.ll b/llvm/test/CodeGen/AArch64/cgdata-merge-no-params.ll
index 10f0e10..a9da125 100644
--- a/llvm/test/CodeGen/AArch64/cgdata-merge-no-params.ll
+++ b/llvm/test/CodeGen/AArch64/cgdata-merge-no-params.ll
@@ -19,9 +19,9 @@
define i32 @f1(i32 %a) {
entry:
%idxprom = sext i32 %a to i64
- %arrayidx = getelementptr inbounds [0 x i32], [0 x i32]* @g, i64 0, i64 %idxprom
- %0 = load i32, i32* %arrayidx, align 4
- %1 = load volatile i32, i32* @g1, align 4
+ %arrayidx = getelementptr inbounds [0 x i32], ptr @g, i64 0, i64 %idxprom
+ %0 = load i32, ptr %arrayidx, align 4
+ %1 = load volatile i32, ptr @g1, align 4
%mul = mul nsw i32 %1, %0
%add = add nsw i32 %mul, 1
ret i32 %add
@@ -30,9 +30,9 @@ entry:
define i32 @f2(i32 %a) {
entry:
%idxprom = sext i32 %a to i64
- %arrayidx = getelementptr inbounds [0 x i32], [0 x i32]* @g, i64 0, i64 %idxprom
- %0 = load i32, i32* %arrayidx, align 4
- %1 = load volatile i32, i32* @g1, align 4
+ %arrayidx = getelementptr inbounds [0 x i32], ptr @g, i64 0, i64 %idxprom
+ %0 = load i32, ptr %arrayidx, align 4
+ %1 = load volatile i32, ptr @g1, align 4
%mul = mul nsw i32 %1, %0
%add = add nsw i32 %mul, 1
ret i32 %add
diff --git a/llvm/test/CodeGen/AArch64/cgdata-no-merge-unnamed.ll b/llvm/test/CodeGen/AArch64/cgdata-no-merge-unnamed.ll
index 9986af7..7ab2aba 100644
--- a/llvm/test/CodeGen/AArch64/cgdata-no-merge-unnamed.ll
+++ b/llvm/test/CodeGen/AArch64/cgdata-no-merge-unnamed.ll
@@ -12,9 +12,9 @@
define i32 @0(i32 %a) {
entry:
%idxprom = sext i32 %a to i64
- %arrayidx = getelementptr inbounds [0 x i32], [0 x i32]* @g, i64 0, i64 %idxprom
- %0 = load i32, i32* %arrayidx, align 4
- %1 = load volatile i32, i32* @g1, align 4
+ %arrayidx = getelementptr inbounds [0 x i32], ptr @g, i64 0, i64 %idxprom
+ %0 = load i32, ptr %arrayidx, align 4
+ %1 = load volatile i32, ptr @g1, align 4
%mul = mul nsw i32 %1, %0
%add = add nsw i32 %mul, 1
ret i32 %add
@@ -23,9 +23,9 @@ entry:
define i32 @1(i32 %a) {
entry:
%idxprom = sext i32 %a to i64
- %arrayidx = getelementptr inbounds [0 x i32], [0 x i32]* @g, i64 0, i64 %idxprom
- %0 = load i32, i32* %arrayidx, align 4
- %1 = load volatile i32, i32* @g2, align 4
+ %arrayidx = getelementptr inbounds [0 x i32], ptr @g, i64 0, i64 %idxprom
+ %0 = load i32, ptr %arrayidx, align 4
+ %1 = load volatile i32, ptr @g2, align 4
%mul = mul nsw i32 %1, %0
%add = add nsw i32 %mul, 1
ret i32 %add
diff --git a/llvm/test/CodeGen/AArch64/divrem.ll b/llvm/test/CodeGen/AArch64/divrem.ll
index 5cd7e09..e3cbd17 100644
--- a/llvm/test/CodeGen/AArch64/divrem.ll
+++ b/llvm/test/CodeGen/AArch64/divrem.ll
@@ -2,7 +2,7 @@
; SDIVREM/UDIVREM DAG nodes are generated but expanded when lowering and
; should not generate select error.
-define <2 x i32> @test_udivrem(<2 x i32> %x, < 2 x i32> %y, < 2 x i32>* %z) {
+define <2 x i32> @test_udivrem(<2 x i32> %x, < 2 x i32> %y, ptr %z) {
; CHECK-LABEL: test_udivrem
; CHECK-DAG: udivrem
; CHECK-NOT: LLVM ERROR: Cannot select
@@ -12,10 +12,10 @@ define <2 x i32> @test_udivrem(<2 x i32> %x, < 2 x i32> %y, < 2 x i32>* %z) {
ret <2 x i32> %1
}
-define <4 x i32> @test_sdivrem(<4 x i32> %x, ptr %y) {
+define <4 x i32> @test_sdivrem(<4 x i32> %x, ptr %y) {
; CHECK-LABEL: test_sdivrem
; CHECK-DAG: sdivrem
- %div = sdiv <4 x i32> %x, < i32 20, i32 20, i32 20, i32 20 >
+ %div = sdiv <4 x i32> %x, < i32 20, i32 20, i32 20, i32 20 >
store <4 x i32> %div, ptr %y
%1 = srem <4 x i32> %x, < i32 20, i32 20, i32 20, i32 20 >
ret <4 x i32> %1
diff --git a/llvm/test/CodeGen/AArch64/ldp-stp-scaled-unscaled-pairs.ll b/llvm/test/CodeGen/AArch64/ldp-stp-scaled-unscaled-pairs.ll
index 91cf605..c0c8894 100644
--- a/llvm/test/CodeGen/AArch64/ldp-stp-scaled-unscaled-pairs.ll
+++ b/llvm/test/CodeGen/AArch64/ldp-stp-scaled-unscaled-pairs.ll
@@ -85,7 +85,7 @@ define i64 @test_ldrsw_ldursw(ptr %p) #0 {
; CHECK-NEXT: add.2d v0, v[[V0]], v[[V1]]
; CHECK-NEXT: ret
define <2 x i64> @test_ldrq_ldruq_invalidoffset(ptr %p) #0 {
- %tmp1 = load <2 x i64>, < 2 x i64>* %p, align 8
+ %tmp1 = load <2 x i64>, ptr %p, align 8
%add.ptr2 = getelementptr inbounds i64, ptr %p, i64 3
%tmp2 = load <2 x i64>, ptr %add.ptr2, align 8
%add = add nsw <2 x i64> %tmp1, %tmp2
diff --git a/llvm/test/CodeGen/AArch64/machine-combiner-subregs.mir b/llvm/test/CodeGen/AArch64/machine-combiner-subregs.mir
new file mode 100644
index 0000000..c96a038
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/machine-combiner-subregs.mir
@@ -0,0 +1,35 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 6
+# RUN: llc -mtriple=aarch64-gnu-linux -mcpu=neoverse-n2 -run-pass=machine-combiner -o - %s | FileCheck %s
+
+# Make sure machine combiner doesn't drop subregister indexes.
+
+---
+name: reassociate_adds2_reassoc
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $q0, $q1, $q2, $q3
+
+ ; CHECK-LABEL: name: reassociate_adds2_reassoc
+ ; CHECK: liveins: $q0, $q1, $q2, $q3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr128 = COPY $q1
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:fpr128 = COPY $q2
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:fpr128 = COPY $q3
+ ; CHECK-NEXT: [[FADDSrr:%[0-9]+]]:fpr32 = nsz reassoc nofpexcept FADDSrr [[COPY]].ssub, [[COPY1]].ssub, implicit $fpcr
+ ; CHECK-NEXT: [[FADDSrr1:%[0-9]+]]:fpr32 = nsz reassoc nofpexcept FADDSrr [[COPY2]].ssub, [[COPY3]].ssub, implicit $fpcr
+ ; CHECK-NEXT: [[FADDSrr2:%[0-9]+]]:fpr32 = nsz reassoc nofpexcept FADDSrr killed [[FADDSrr1]], killed [[FADDSrr]], implicit $fpcr
+ ; CHECK-NEXT: $s0 = COPY [[FADDSrr2]]
+ ; CHECK-NEXT: RET_ReallyLR implicit $s0
+ %0:fpr128 = COPY $q0
+ %1:fpr128 = COPY $q1
+ %2:fpr128 = COPY $q2
+ %3:fpr128 = COPY $q3
+ %4:fpr32 = nsz reassoc nofpexcept FADDSrr %0.ssub, %1.ssub, implicit $fpcr
+ %5:fpr32 = nsz reassoc nofpexcept FADDSrr %2.ssub, killed %4, implicit $fpcr
+ %6:fpr32 = nsz reassoc nofpexcept FADDSrr killed %5, %3.ssub, implicit $fpcr
+ $s0 = COPY %6
+ RET_ReallyLR implicit $s0
+
+...
diff --git a/llvm/test/CodeGen/AArch64/machine-outliner-iterative.mir b/llvm/test/CodeGen/AArch64/machine-outliner-iterative.mir
index b7fbdc0..a635231 100644
--- a/llvm/test/CodeGen/AArch64/machine-outliner-iterative.mir
+++ b/llvm/test/CodeGen/AArch64/machine-outliner-iterative.mir
@@ -6,9 +6,9 @@
#
#; define void @"$s12"(...) { define i64 @"$s5” (...) { define void @"$s13"(...) {
# ... ... ...
-# %8 = load i1, i1* %7 %8 = load i1, i1* %7
-# %9 = load i4, i4*, %6 %9 = load i4, i4*, %6 %9 = load i4, i4*, %6
-# store i4 %9, i4* %5 store i4 %9, i4* %5 store i4 %9, i4* %5
+# %8 = load i1, ptr %7 %8 = load i1, ptr %7
+# %9 = load i4, ptr, %6 %9 = load i4, ptr, %6 %9 = load i4, ptr, %6
+# store i4 %9, ptr %5 store i4 %9, ptr %5 store i4 %9, ptr %5
# ... ... ...
# } } }
#
@@ -16,7 +16,7 @@
#
# define void @"$s12"(...) { define i64 @"$s5” (...) { define void @"$s13"(...) {
# ... ... ...
-# %8 = load i1, i1* %7 %8 = load i1, i1* %7
+# %8 = load i1, ptr %7 %8 = load i1, ptr %7
# call void @outlined_function_1_1 call void @outlined_function_1_1 call void @outlined_function_1_1
# ... ... ...
# } } }
diff --git a/llvm/test/CodeGen/AArch64/misched-fusion-cmp-bcc.ll b/llvm/test/CodeGen/AArch64/misched-fusion-cmp-bcc.ll
index 700a060..0a10e80 100644
--- a/llvm/test/CodeGen/AArch64/misched-fusion-cmp-bcc.ll
+++ b/llvm/test/CodeGen/AArch64/misched-fusion-cmp-bcc.ll
@@ -15,10 +15,10 @@
; RUN: llc %s -o - -O0 -mtriple=aarch64-unknown -mcpu=ampere1b | FileCheck %s
-define void @test_cmp_bcc_fusion(i32 %x, i32 %y, i32* %arr) {
+define void @test_cmp_bcc_fusion(i32 %x, i32 %y, ptr %arr) {
entry:
%cmp = icmp eq i32 %x, %y
- store i32 %x, i32* %arr, align 4
+ store i32 %x, ptr %arr, align 4
br i1 %cmp, label %if_true, label %if_false
if_true:
diff --git a/llvm/test/CodeGen/AArch64/no-quad-ldp-stp.ll b/llvm/test/CodeGen/AArch64/no-quad-ldp-stp.ll
index b7dde88..1a85f80 100644
--- a/llvm/test/CodeGen/AArch64/no-quad-ldp-stp.ll
+++ b/llvm/test/CodeGen/AArch64/no-quad-ldp-stp.ll
@@ -19,7 +19,7 @@ define void @test_nopair_st(ptr %ptr, <2 x double> %v1, <2 x double> %v2) {
; SLOW-NOT: ldp
; FAST: ldp
define <2 x i64> @test_nopair_ld(ptr %p) {
- %tmp1 = load <2 x i64>, < 2 x i64>* %p, align 8
+ %tmp1 = load <2 x i64>, ptr %p, align 8
%add.ptr2 = getelementptr inbounds i64, ptr %p, i64 2
%tmp2 = load <2 x i64>, ptr %add.ptr2, align 8
%add = add nsw <2 x i64> %tmp1, %tmp2
diff --git a/llvm/test/CodeGen/AArch64/ptrauth-bti-call.ll b/llvm/test/CodeGen/AArch64/ptrauth-bti-call.ll
index 0356a46..df5e1a9 100644
--- a/llvm/test/CodeGen/AArch64/ptrauth-bti-call.ll
+++ b/llvm/test/CodeGen/AArch64/ptrauth-bti-call.ll
@@ -17,7 +17,7 @@
; CHECK-NEXT: bti c
; CHECK-NEXT: mov x16, x0
; CHECK-NEXT: braaz x16
-define i32 @test_tailcall_ia_0(i32 ()* %arg0) #0 {
+define i32 @test_tailcall_ia_0(ptr %arg0) #0 {
%tmp0 = tail call i32 %arg0() [ "ptrauth"(i32 0, i64 0) ]
ret i32 %tmp0
}
@@ -26,7 +26,7 @@ define i32 @test_tailcall_ia_0(i32 ()* %arg0) #0 {
; CHECK-NEXT: bti c
; CHECK-NEXT: mov x16, x0
; CHECK-NEXT: brabz x16
-define i32 @test_tailcall_ib_0(i32 ()* %arg0) #0 {
+define i32 @test_tailcall_ib_0(ptr %arg0) #0 {
%tmp0 = tail call i32 %arg0() [ "ptrauth"(i32 1, i64 0) ]
ret i32 %tmp0
}
@@ -36,7 +36,7 @@ define i32 @test_tailcall_ib_0(i32 ()* %arg0) #0 {
; CHECK-NEXT: mov x16, x0
; CHECK-NEXT: mov x17, #42
; CHECK-NEXT: braa x16, x17
-define i32 @test_tailcall_ia_imm(i32 ()* %arg0) #0 {
+define i32 @test_tailcall_ia_imm(ptr %arg0) #0 {
%tmp0 = tail call i32 %arg0() [ "ptrauth"(i32 0, i64 42) ]
ret i32 %tmp0
}
@@ -46,7 +46,7 @@ define i32 @test_tailcall_ia_imm(i32 ()* %arg0) #0 {
; CHECK-NEXT: mov x16, x0
; CHECK-NEXT: mov x17, #42
; CHECK-NEXT: brab x16, x17
-define i32 @test_tailcall_ib_imm(i32 ()* %arg0) #0 {
+define i32 @test_tailcall_ib_imm(ptr %arg0) #0 {
%tmp0 = tail call i32 %arg0() [ "ptrauth"(i32 1, i64 42) ]
ret i32 %tmp0
}
@@ -60,8 +60,8 @@ define i32 @test_tailcall_ib_imm(i32 ()* %arg0) #0 {
; ELF-NEXT: ldr x1, [x1]
; ELF-NEXT: mov x16, x0
; ELF-NEXT: braa x16, x1
-define i32 @test_tailcall_ia_var(i32 ()* %arg0, i64* %arg1) #0 {
- %tmp0 = load i64, i64* %arg1
+define i32 @test_tailcall_ia_var(ptr %arg0, ptr %arg1) #0 {
+ %tmp0 = load i64, ptr %arg1
%tmp1 = tail call i32 %arg0() [ "ptrauth"(i32 0, i64 %tmp0) ]
ret i32 %tmp1
}
@@ -75,8 +75,8 @@ define i32 @test_tailcall_ia_var(i32 ()* %arg0, i64* %arg1) #0 {
; ELF-NEXT: ldr x1, [x1]
; ELF-NEXT: mov x16, x0
; ELF-NEXT: brab x16, x1
-define i32 @test_tailcall_ib_var(i32 ()* %arg0, i64* %arg1) #0 {
- %tmp0 = load i64, i64* %arg1
+define i32 @test_tailcall_ib_var(ptr %arg0, ptr %arg1) #0 {
+ %tmp0 = load i64, ptr %arg1
%tmp1 = tail call i32 %arg0() [ "ptrauth"(i32 1, i64 %tmp0) ]
ret i32 %tmp1
}
@@ -85,7 +85,7 @@ define i32 @test_tailcall_ib_var(i32 ()* %arg0, i64* %arg1) #0 {
; CHECK-NEXT: bti c
; CHECK-NEXT: mov x16, x0
; CHECK-NEXT: braa x16, x1
-define i32 @test_tailcall_ia_arg(i32 ()* %arg0, i64 %arg1) #0 {
+define i32 @test_tailcall_ia_arg(ptr %arg0, i64 %arg1) #0 {
%tmp0 = tail call i32 %arg0() [ "ptrauth"(i32 0, i64 %arg1) ]
ret i32 %tmp0
}
@@ -94,7 +94,7 @@ define i32 @test_tailcall_ia_arg(i32 ()* %arg0, i64 %arg1) #0 {
; CHECK-NEXT: bti c
; CHECK-NEXT: mov x16, x0
; CHECK-NEXT: brab x16, x1
-define i32 @test_tailcall_ib_arg(i32 ()* %arg0, i64 %arg1) #0 {
+define i32 @test_tailcall_ib_arg(ptr %arg0, i64 %arg1) #0 {
%tmp0 = tail call i32 %arg0() [ "ptrauth"(i32 1, i64 %arg1) ]
ret i32 %tmp0
}
@@ -103,8 +103,8 @@ define i32 @test_tailcall_ib_arg(i32 ()* %arg0, i64 %arg1) #0 {
; CHECK-NEXT: bti c
; CHECK-NEXT: ldr x16, [x0]
; CHECK-NEXT: braa x16, x1
-define i32 @test_tailcall_ia_arg_ind(i32 ()** %arg0, i64 %arg1) #0 {
- %tmp0 = load i32 ()*, i32 ()** %arg0
+define i32 @test_tailcall_ia_arg_ind(ptr %arg0, i64 %arg1) #0 {
+ %tmp0 = load ptr, ptr %arg0
%tmp1 = tail call i32 %tmp0() [ "ptrauth"(i32 0, i64 %arg1) ]
ret i32 %tmp1
}
@@ -113,8 +113,8 @@ define i32 @test_tailcall_ia_arg_ind(i32 ()** %arg0, i64 %arg1) #0 {
; CHECK-NEXT: bti c
; CHECK-NEXT: ldr x16, [x0]
; CHECK-NEXT: brab x16, x1
-define i32 @test_tailcall_ib_arg_ind(i32 ()** %arg0, i64 %arg1) #0 {
- %tmp0 = load i32 ()*, i32 ()** %arg0
+define i32 @test_tailcall_ib_arg_ind(ptr %arg0, i64 %arg1) #0 {
+ %tmp0 = load ptr, ptr %arg0
%tmp1 = tail call i32 %tmp0() [ "ptrauth"(i32 1, i64 %arg1) ]
ret i32 %tmp1
}
diff --git a/llvm/test/CodeGen/AArch64/ptrauth-call-rv-marker.ll b/llvm/test/CodeGen/AArch64/ptrauth-call-rv-marker.ll
index 9cf77b1..950db5f 100644
--- a/llvm/test/CodeGen/AArch64/ptrauth-call-rv-marker.ll
+++ b/llvm/test/CodeGen/AArch64/ptrauth-call-rv-marker.ll
@@ -4,18 +4,18 @@
target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
target triple = "arm64e-apple-iphoneos"
-declare i8* @foo0(i32)
-declare i8* @foo1()
+declare ptr @foo0(i32)
+declare ptr @foo1()
-declare void @llvm.objc.release(i8*)
-declare i8* @llvm.objc.retainAutoreleasedReturnValue(i8*)
-declare i8* @llvm.objc.unsafeClaimAutoreleasedReturnValue(i8*)
+declare void @llvm.objc.release(ptr)
+declare ptr @llvm.objc.retainAutoreleasedReturnValue(ptr)
+declare ptr @llvm.objc.unsafeClaimAutoreleasedReturnValue(ptr)
-declare void @foo2(i8*)
+declare void @foo2(ptr)
declare void @foo(i64, i64, i64)
-define void @rv_marker_ptrauth_blraa(i8* ()** %arg0, i64 %arg1) {
+define void @rv_marker_ptrauth_blraa(ptr %arg0, i64 %arg1) {
; CHECK-LABEL: rv_marker_ptrauth_blraa
; CHECK: ldr [[ADDR:x[0-9]+]], [
; CHECK-NEXT: blraa [[ADDR]], x1
@@ -23,14 +23,14 @@ define void @rv_marker_ptrauth_blraa(i8* ()** %arg0, i64 %arg1) {
; CHECK-NEXT: bl objc_retainAutoreleasedReturnValue
;
entry:
- %tmp0 = load i8* ()*, i8* ()** %arg0
- %call0 = call i8* %tmp0() [ "ptrauth"(i32 0, i64 %arg1), "clang.arc.attachedcall"(i8* (i8*)* @llvm.objc.retainAutoreleasedReturnValue) ]
- tail call void @foo2(i8* %call0)
- tail call void @llvm.objc.release(i8* %call0)
+ %tmp0 = load ptr, ptr %arg0
+ %call0 = call ptr %tmp0() [ "ptrauth"(i32 0, i64 %arg1), "clang.arc.attachedcall"(ptr @llvm.objc.retainAutoreleasedReturnValue) ]
+ tail call void @foo2(ptr %call0)
+ tail call void @llvm.objc.release(ptr %call0)
ret void
}
-define void @rv_marker_ptrauth_blraa_unsafeClaim(i8* ()** %arg0, i64 %arg1) {
+define void @rv_marker_ptrauth_blraa_unsafeClaim(ptr %arg0, i64 %arg1) {
; CHECK-LABEL: rv_marker_ptrauth_blraa_unsafeClaim
; CHECK: ldr [[ADDR:x[0-9]+]], [
; CHECK-NEXT: blraa [[ADDR]], x1
@@ -38,14 +38,14 @@ define void @rv_marker_ptrauth_blraa_unsafeClaim(i8* ()** %arg0, i64 %arg1) {
; CHECK-NEXT: bl objc_unsafeClaimAutoreleasedReturnValue
;
entry:
- %tmp0 = load i8* ()*, i8* ()** %arg0
- %call0 = call i8* %tmp0() [ "ptrauth"(i32 0, i64 %arg1), "clang.arc.attachedcall"(i8* (i8*)* @llvm.objc.unsafeClaimAutoreleasedReturnValue) ]
- tail call void @foo2(i8* %call0)
- tail call void @llvm.objc.release(i8* %call0)
+ %tmp0 = load ptr, ptr %arg0
+ %call0 = call ptr %tmp0() [ "ptrauth"(i32 0, i64 %arg1), "clang.arc.attachedcall"(ptr @llvm.objc.unsafeClaimAutoreleasedReturnValue) ]
+ tail call void @foo2(ptr %call0)
+ tail call void @llvm.objc.release(ptr %call0)
ret void
}
-define void @rv_marker_ptrauth_blraa_disc_imm16(i8* ()** %arg0) {
+define void @rv_marker_ptrauth_blraa_disc_imm16(ptr %arg0) {
; CHECK-LABEL: rv_marker_ptrauth_blraa_disc_imm16
; CHECK: ldr [[ADDR:x[0-9]+]], [
; CHECK-NEXT: mov x17, #45431
@@ -53,14 +53,14 @@ define void @rv_marker_ptrauth_blraa_disc_imm16(i8* ()** %arg0) {
; CHECK-NEXT: mov x29, x29
; CHECK-NEXT: bl objc_retainAutoreleasedReturnValue
;
- %tmp0 = load i8* ()*, i8* ()** %arg0
- %call0 = call i8* %tmp0() [ "ptrauth"(i32 1, i64 45431), "clang.arc.attachedcall"(i8* (i8*)* @llvm.objc.retainAutoreleasedReturnValue) ]
- tail call void @foo2(i8* %call0)
- tail call void @llvm.objc.release(i8* %call0)
+ %tmp0 = load ptr, ptr %arg0
+ %call0 = call ptr %tmp0() [ "ptrauth"(i32 1, i64 45431), "clang.arc.attachedcall"(ptr @llvm.objc.retainAutoreleasedReturnValue) ]
+ tail call void @foo2(ptr %call0)
+ tail call void @llvm.objc.release(ptr %call0)
ret void
}
-define void @rv_marker_ptrauth_blraa_multiarg(i8* (i64, i64, i64)** %arg0, i64 %arg1, i64 %a, i64 %b, i64 %c) {
+define void @rv_marker_ptrauth_blraa_multiarg(ptr %arg0, i64 %arg1, i64 %a, i64 %b, i64 %c) {
; CHECK-LABEL: rv_marker_ptrauth_blraa_multiarg
; CHECK: mov [[TMP:x[0-9]+]], x1
; CHECK-DAG: ldr [[ADDR:x[0-9]+]]
@@ -71,28 +71,28 @@ define void @rv_marker_ptrauth_blraa_multiarg(i8* (i64, i64, i64)** %arg0, i64 %
; CHECK-NEXT: bl objc_retainAutoreleasedReturnValue
;
entry:
- %tmp0 = load i8* (i64, i64, i64)*, i8* (i64, i64, i64)** %arg0
- %call0 = call i8* %tmp0(i64 %c, i64 %b, i64 %a) [ "ptrauth"(i32 0, i64 %arg1), "clang.arc.attachedcall"(i8* (i8*)* @llvm.objc.retainAutoreleasedReturnValue) ]
- tail call void @foo2(i8* %call0)
- tail call void @llvm.objc.release(i8* %call0)
+ %tmp0 = load ptr, ptr %arg0
+ %call0 = call ptr %tmp0(i64 %c, i64 %b, i64 %a) [ "ptrauth"(i32 0, i64 %arg1), "clang.arc.attachedcall"(ptr @llvm.objc.retainAutoreleasedReturnValue) ]
+ tail call void @foo2(ptr %call0)
+ tail call void @llvm.objc.release(ptr %call0)
ret void
}
-define void @rv_marker_ptrauth_blrab(i8* ()** %arg0, i64 %arg1) {
+define void @rv_marker_ptrauth_blrab(ptr %arg0, i64 %arg1) {
; CHECK-LABEL: rv_marker_ptrauth_blrab
; CHECK: ldr [[ADDR:x[0-9]+]], [
; CHECK-NEXT: blrab [[ADDR]], x1
; CHECK-NEXT: mov x29, x29
; CHECK-NEXT: bl objc_retainAutoreleasedReturnValue
;
- %tmp0 = load i8* ()*, i8* ()** %arg0
- %call0 = call i8* %tmp0() [ "ptrauth"(i32 1, i64 %arg1), "clang.arc.attachedcall"(i8* (i8*)* @llvm.objc.retainAutoreleasedReturnValue) ]
- tail call void @foo2(i8* %call0)
- tail call void @llvm.objc.release(i8* %call0)
+ %tmp0 = load ptr, ptr %arg0
+ %call0 = call ptr %tmp0() [ "ptrauth"(i32 1, i64 %arg1), "clang.arc.attachedcall"(ptr @llvm.objc.retainAutoreleasedReturnValue) ]
+ tail call void @foo2(ptr %call0)
+ tail call void @llvm.objc.release(ptr %call0)
ret void
}
-define void @rv_marker_ptrauth_blrab_disc_imm16(i8* ()** %arg0) {
+define void @rv_marker_ptrauth_blrab_disc_imm16(ptr %arg0) {
; CHECK-LABEL: rv_marker_ptrauth_blrab_disc_imm16
; CHECK: ldr [[ADDR:x[0-9]+]], [
; CHECK-NEXT: mov x17, #256
@@ -100,42 +100,42 @@ define void @rv_marker_ptrauth_blrab_disc_imm16(i8* ()** %arg0) {
; CHECK-NEXT: mov x29, x29
; CHECK-NEXT: bl objc_retainAutoreleasedReturnValue
;
- %tmp0 = load i8* ()*, i8* ()** %arg0
- %call0 = call i8* %tmp0() [ "ptrauth"(i32 1, i64 256), "clang.arc.attachedcall"(i8* (i8*)* @llvm.objc.retainAutoreleasedReturnValue) ]
- tail call void @foo2(i8* %call0)
- tail call void @llvm.objc.release(i8* %call0)
+ %tmp0 = load ptr, ptr %arg0
+ %call0 = call ptr %tmp0() [ "ptrauth"(i32 1, i64 256), "clang.arc.attachedcall"(ptr @llvm.objc.retainAutoreleasedReturnValue) ]
+ tail call void @foo2(ptr %call0)
+ tail call void @llvm.objc.release(ptr %call0)
ret void
}
-define void @rv_marker_ptrauth_blraaz(i8* ()** %arg0) {
+define void @rv_marker_ptrauth_blraaz(ptr %arg0) {
; CHECK-LABEL: rv_marker_ptrauth_blraaz
; CHECK: ldr [[ADDR:x[0-9]+]], [
; CHECK-NEXT: blraaz [[ADDR]]
; CHECK-NEXT: mov x29, x29
; CHECK-NEXT: bl objc_retainAutoreleasedReturnValue
;
- %tmp0 = load i8* ()*, i8* ()** %arg0
- %call0 = call i8* %tmp0() [ "ptrauth"(i32 0, i64 0), "clang.arc.attachedcall"(i8* (i8*)* @llvm.objc.retainAutoreleasedReturnValue) ]
- tail call void @foo2(i8* %call0)
- tail call void @llvm.objc.release(i8* %call0)
+ %tmp0 = load ptr, ptr %arg0
+ %call0 = call ptr %tmp0() [ "ptrauth"(i32 0, i64 0), "clang.arc.attachedcall"(ptr @llvm.objc.retainAutoreleasedReturnValue) ]
+ tail call void @foo2(ptr %call0)
+ tail call void @llvm.objc.release(ptr %call0)
ret void
}
-define void @rv_marker_ptrauth_blrabz(i8* ()** %arg0) {
+define void @rv_marker_ptrauth_blrabz(ptr %arg0) {
; CHECK-LABEL: rv_marker_ptrauth_blrabz
; CHECK: ldr [[ADDR:x[0-9]+]], [
; CHECK-NEXT: blrabz [[ADDR]]
; CHECK-NEXT: mov x29, x29
; CHECK-NEXT: bl objc_retainAutoreleasedReturnValue
;
- %tmp0 = load i8* ()*, i8* ()** %arg0
- %call0 = call i8* %tmp0() [ "ptrauth"(i32 1, i64 0), "clang.arc.attachedcall"(i8* (i8*)* @llvm.objc.retainAutoreleasedReturnValue) ]
- tail call void @foo2(i8* %call0)
- tail call void @llvm.objc.release(i8* %call0)
+ %tmp0 = load ptr, ptr %arg0
+ %call0 = call ptr %tmp0() [ "ptrauth"(i32 1, i64 0), "clang.arc.attachedcall"(ptr @llvm.objc.retainAutoreleasedReturnValue) ]
+ tail call void @foo2(ptr %call0)
+ tail call void @llvm.objc.release(ptr %call0)
ret void
}
-define void @rv_marker_ptrauth_blrabz_multiarg(i8* (i64, i64, i64)** %arg0, i64 %a, i64 %b, i64 %c) {
+define void @rv_marker_ptrauth_blrabz_multiarg(ptr %arg0, i64 %a, i64 %b, i64 %c) {
; CHECK-LABEL: rv_marker_ptrauth_blrabz_multiarg
; CHECK: mov [[TMP:x[0-9]+]], x1
; CHECK-DAG: ldr [[ADDR:x[0-9]+]], [
@@ -146,9 +146,9 @@ define void @rv_marker_ptrauth_blrabz_multiarg(i8* (i64, i64, i64)** %arg0, i64
; CHECK-NEXT: mov x29, x29
; CHECK-NEXT: bl objc_retainAutoreleasedReturnValue
;
- %tmp0 = load i8* (i64, i64, i64)*, i8* (i64, i64, i64)** %arg0
- %call0 = call i8* %tmp0(i64 %c, i64 %b, i64 %a) [ "ptrauth"(i32 1, i64 0), "clang.arc.attachedcall"(i8* (i8*)* @llvm.objc.retainAutoreleasedReturnValue) ]
- tail call void @foo2(i8* %call0)
- tail call void @llvm.objc.release(i8* %call0)
+ %tmp0 = load ptr, ptr %arg0
+ %call0 = call ptr %tmp0(i64 %c, i64 %b, i64 %a) [ "ptrauth"(i32 1, i64 0), "clang.arc.attachedcall"(ptr @llvm.objc.retainAutoreleasedReturnValue) ]
+ tail call void @foo2(ptr %call0)
+ tail call void @llvm.objc.release(ptr %call0)
ret void
}
diff --git a/llvm/test/CodeGen/AArch64/ptrauth-reloc.ll b/llvm/test/CodeGen/AArch64/ptrauth-reloc.ll
index 932cc94..02c643f 100644
--- a/llvm/test/CodeGen/AArch64/ptrauth-reloc.ll
+++ b/llvm/test/CodeGen/AArch64/ptrauth-reloc.ll
@@ -87,7 +87,7 @@
; CHECK-MACHO-NEXT: _g.offset.ref.da.0:
; CHECK-MACHO-NEXT: .quad (_g+16)@AUTH(da,0)
-@g.offset.ref.da.0 = constant ptr ptrauth (i8* getelementptr (i8, ptr @g, i64 16), i32 2)
+@g.offset.ref.da.0 = constant ptr ptrauth (ptr getelementptr (i8, ptr @g, i64 16), i32 2)
; CHECK-ELF-LABEL: .globl g.big_offset.ref.da.0
; CHECK-ELF-NEXT: .p2align 3
@@ -99,7 +99,7 @@
; CHECK-MACHO-NEXT: _g.big_offset.ref.da.0:
; CHECK-MACHO-NEXT: .quad (_g+2147549185)@AUTH(da,0)
-@g.big_offset.ref.da.0 = constant ptr ptrauth (i8* getelementptr (i8, ptr @g, i64 add (i64 2147483648, i64 65537)), i32 2)
+@g.big_offset.ref.da.0 = constant ptr ptrauth (ptr getelementptr (i8, ptr @g, i64 add (i64 2147483648, i64 65537)), i32 2)
; CHECK-ELF-LABEL: .globl g.weird_ref.da.0
; CHECK-ELF-NEXT: .p2align 3
@@ -111,7 +111,7 @@
; CHECK-MACHO-NEXT: _g.weird_ref.da.0:
; CHECK-MACHO-NEXT: .quad (_g+16)@AUTH(da,0)
-@g.weird_ref.da.0 = constant i64 ptrtoint (ptr inttoptr (i64 ptrtoint (ptr ptrauth (i8* getelementptr (i8, ptr @g, i64 16), i32 2) to i64) to ptr) to i64)
+@g.weird_ref.da.0 = constant i64 ptrtoint (ptr inttoptr (i64 ptrtoint (ptr ptrauth (ptr getelementptr (i8, ptr @g, i64 16), i32 2) to i64) to ptr) to i64)
; CHECK-ELF-LABEL: .globl g_weak.ref.ia.42
; CHECK-ELF-NEXT: .p2align 3
diff --git a/llvm/test/CodeGen/AMDGPU/remat-fp64-constants.ll b/llvm/test/CodeGen/AMDGPU/remat-fp64-constants.ll
index c552f9d..88a51e9 100644
--- a/llvm/test/CodeGen/AMDGPU/remat-fp64-constants.ll
+++ b/llvm/test/CodeGen/AMDGPU/remat-fp64-constants.ll
@@ -1,10 +1,13 @@
; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 --stress-regalloc=10 < %s | FileCheck -check-prefix=GCN %s
; RUN: llc -global-isel -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 --stress-regalloc=10 < %s | FileCheck -check-prefix=GCN %s
+; Rematerialization test for fp64 constants (w/ intentionally high register pressure).
+; Check to make sure we have at least six constant MOVs, not necessarily consecutive, inside the loop.
+
; GCN-LABEL: {{^}}test_remat_sgpr:
; GCN-NOT: v_writelane_b32
-; GCN-COUNT-4: s_mov_b32 s{{[0-9]+}}, 0x
; GCN: {{^}}[[LOOP:.LBB[0-9_]+]]:
+; GCN-COUNT-6: {{s_mov_b32|v_mov_b32_e32}} {{[sv]}}{{[0-9]+}}, 0x
; GCN-NOT: v_writelane_b32
; GCN: s_cbranch_{{[^ ]+}} [[LOOP]]
; GCN: .sgpr_spill_count: 0
diff --git a/llvm/test/CodeGen/AMDGPU/spillv16.ll b/llvm/test/CodeGen/AMDGPU/spillv16.ll
index 2d54ac8..9686c9d 100644
--- a/llvm/test/CodeGen/AMDGPU/spillv16.ll
+++ b/llvm/test/CodeGen/AMDGPU/spillv16.ll
@@ -1,6 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -mattr=+real-true16 -enable-misched=0 -post-RA-scheduler=0 -stress-regalloc=8 < %s | FileCheck %s -check-prefixes=GCN,GCN-TRUE16
; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -mattr=-real-true16 -enable-misched=0 -post-RA-scheduler=0 -stress-regalloc=8 < %s | FileCheck %s -check-prefixes=GCN,GCN-FAKE16
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1200 -mattr=+real-true16,+d16-write-vgpr32 -enable-misched=0 -post-RA-scheduler=0 -stress-regalloc=8 < %s | FileCheck %s -check-prefixes=GFX12-TRUE16,GFX12-TRUE16-D16W32
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1200 -mattr=+real-true16,-d16-write-vgpr32 -enable-misched=0 -post-RA-scheduler=0 -stress-regalloc=8 < %s | FileCheck %s -check-prefixes=GFX12-TRUE16,GFX12-TRUE16-D16W16
; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1250 -mattr=+real-true16 -enable-misched=0 -post-RA-scheduler=0 -stress-regalloc=8 < %s | FileCheck %s -check-prefixes=GFX1250,GFX1250-TRUE16
; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1250 -mattr=-real-true16 -enable-misched=0 -post-RA-scheduler=0 -stress-regalloc=8 < %s | FileCheck %s -check-prefixes=GFX1250,GFX1250-FAKE16
@@ -35,6 +37,26 @@ define void @spill_i16_alu() {
; GCN-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
; GCN-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
+; GFX12-TRUE16-LABEL: spill_i16_alu:
+; GFX12-TRUE16: ; %bb.0: ; %entry
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: scratch_load_d16_b16 v0, off, s32 scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_nc_u16 v0.l, 0x7b, v0.l
+; GFX12-TRUE16-NEXT: scratch_store_b16 off, v0, s32 offset:2 ; 2-byte Folded Spill
+; GFX12-TRUE16-NEXT: ;;#ASMSTART
+; GFX12-TRUE16-NEXT: ;;#ASMEND
+; GFX12-TRUE16-NEXT: scratch_load_d16_b16 v0, off, s32 offset:2 th:TH_LOAD_LU ; 2-byte Folded Reload
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: scratch_store_b16 off, v0, s32 scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
; GFX1250-TRUE16-LABEL: spill_i16_alu:
; GFX1250-TRUE16: ; %bb.0: ; %entry
; GFX1250-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
@@ -126,6 +148,56 @@ define void @spill_i16_alu_two_vals() {
; GCN-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
; GCN-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
+; GFX12-TRUE16-D16W32-LABEL: spill_i16_alu_two_vals:
+; GFX12-TRUE16-D16W32: ; %bb.0: ; %entry
+; GFX12-TRUE16-D16W32-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-D16W32-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-D16W32-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-D16W32-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-D16W32-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-D16W32-NEXT: scratch_load_d16_b16 v0, off, s32 scope:SCOPE_SYS
+; GFX12-TRUE16-D16W32-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-D16W32-NEXT: v_add_nc_u16 v0.l, 0x7b, v0.l
+; GFX12-TRUE16-D16W32-NEXT: scratch_store_b16 off, v0, s32 offset:6 ; 2-byte Folded Spill
+; GFX12-TRUE16-D16W32-NEXT: ;;#ASMSTART
+; GFX12-TRUE16-D16W32-NEXT: ;;#ASMEND
+; GFX12-TRUE16-D16W32-NEXT: scratch_load_d16_b16 v0, off, s32 offset:4 scope:SCOPE_SYS
+; GFX12-TRUE16-D16W32-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-D16W32-NEXT: scratch_load_d16_hi_b16 v0, off, s32 offset:6 th:TH_LOAD_LU ; 2-byte Folded Reload
+; GFX12-TRUE16-D16W32-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-D16W32-NEXT: v_add_nc_u16 v0.l, 0x7b, v0.l
+; GFX12-TRUE16-D16W32-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-D16W32-NEXT: scratch_store_d16_hi_b16 off, v0, s32 scope:SCOPE_SYS
+; GFX12-TRUE16-D16W32-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-D16W32-NEXT: scratch_store_b16 off, v0, s32 offset:4 scope:SCOPE_SYS
+; GFX12-TRUE16-D16W32-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-D16W32-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-TRUE16-D16W16-LABEL: spill_i16_alu_two_vals:
+; GFX12-TRUE16-D16W16: ; %bb.0: ; %entry
+; GFX12-TRUE16-D16W16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-D16W16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-D16W16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-D16W16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-D16W16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-D16W16-NEXT: scratch_load_d16_b16 v0, off, s32 scope:SCOPE_SYS
+; GFX12-TRUE16-D16W16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-D16W16-NEXT: v_add_nc_u16 v0.l, 0x7b, v0.l
+; GFX12-TRUE16-D16W16-NEXT: scratch_store_b16 off, v0, s32 offset:6 ; 2-byte Folded Spill
+; GFX12-TRUE16-D16W16-NEXT: ;;#ASMSTART
+; GFX12-TRUE16-D16W16-NEXT: ;;#ASMEND
+; GFX12-TRUE16-D16W16-NEXT: scratch_load_d16_b16 v0, off, s32 offset:4 scope:SCOPE_SYS
+; GFX12-TRUE16-D16W16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-D16W16-NEXT: scratch_load_d16_hi_b16 v0, off, s32 offset:6 th:TH_LOAD_LU ; 2-byte Folded Reload
+; GFX12-TRUE16-D16W16-NEXT: v_add_nc_u16 v0.l, 0x7b, v0.l
+; GFX12-TRUE16-D16W16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-D16W16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-D16W16-NEXT: scratch_store_d16_hi_b16 off, v0, s32 scope:SCOPE_SYS
+; GFX12-TRUE16-D16W16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-D16W16-NEXT: scratch_store_b16 off, v0, s32 offset:4 scope:SCOPE_SYS
+; GFX12-TRUE16-D16W16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-D16W16-NEXT: s_setpc_b64 s[30:31]
+;
; GFX1250-TRUE16-LABEL: spill_i16_alu_two_vals:
; GFX1250-TRUE16: ; %bb.0: ; %entry
; GFX1250-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
@@ -223,6 +295,25 @@ define void @spill_i16() {
; GCN-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
; GCN-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
+; GFX12-TRUE16-LABEL: spill_i16:
+; GFX12-TRUE16: ; %bb.0: ; %entry
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: scratch_load_d16_b16 v0, off, s32 scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: scratch_store_b16 off, v0, s32 offset:2 ; 2-byte Folded Spill
+; GFX12-TRUE16-NEXT: ;;#ASMSTART
+; GFX12-TRUE16-NEXT: ;;#ASMEND
+; GFX12-TRUE16-NEXT: scratch_load_d16_b16 v0, off, s32 offset:2 th:TH_LOAD_LU ; 2-byte Folded Reload
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: scratch_store_b16 off, v0, s32 scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
; GFX1250-LABEL: spill_i16:
; GFX1250: ; %bb.0: ; %entry
; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
@@ -282,6 +373,25 @@ define void @spill_half() {
; GCN-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
; GCN-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
+; GFX12-TRUE16-LABEL: spill_half:
+; GFX12-TRUE16: ; %bb.0: ; %entry
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: scratch_load_d16_b16 v0, off, s32 scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: scratch_store_b16 off, v0, s32 offset:2 ; 2-byte Folded Spill
+; GFX12-TRUE16-NEXT: ;;#ASMSTART
+; GFX12-TRUE16-NEXT: ;;#ASMEND
+; GFX12-TRUE16-NEXT: scratch_load_d16_b16 v0, off, s32 offset:2 th:TH_LOAD_LU ; 2-byte Folded Reload
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: scratch_store_b16 off, v0, s32 scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
; GFX1250-LABEL: spill_half:
; GFX1250: ; %bb.0: ; %entry
; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
@@ -341,6 +451,25 @@ define void @spill_i16_from_v2i16() {
; GCN-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
; GCN-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
+; GFX12-TRUE16-LABEL: spill_i16_from_v2i16:
+; GFX12-TRUE16: ; %bb.0: ; %entry
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: scratch_load_d16_b16 v0, off, s32 offset:2 scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: scratch_store_b16 off, v0, s32 offset:8 ; 2-byte Folded Spill
+; GFX12-TRUE16-NEXT: ;;#ASMSTART
+; GFX12-TRUE16-NEXT: ;;#ASMEND
+; GFX12-TRUE16-NEXT: scratch_load_d16_b16 v0, off, s32 offset:8 th:TH_LOAD_LU ; 2-byte Folded Reload
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: scratch_store_b16 off, v0, s32 offset:2 scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
; GFX1250-LABEL: spill_i16_from_v2i16:
; GFX1250: ; %bb.0: ; %entry
; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
@@ -414,13 +543,39 @@ define void @spill_2xi16_from_v2i16() {
; GCN-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
; GCN-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
+; GFX12-TRUE16-LABEL: spill_2xi16_from_v2i16:
+; GFX12-TRUE16: ; %bb.0: ; %entry
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: scratch_load_d16_b16 v0, off, s32 offset:2 scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: scratch_store_b16 off, v0, s32 offset:8 ; 2-byte Folded Spill
+; GFX12-TRUE16-NEXT: scratch_load_d16_b16 v0, off, s32 scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: scratch_store_b16 off, v0, s32 offset:10 ; 2-byte Folded Spill
+; GFX12-TRUE16-NEXT: ;;#ASMSTART
+; GFX12-TRUE16-NEXT: ;;#ASMEND
+; GFX12-TRUE16-NEXT: scratch_load_d16_b16 v0, off, s32 offset:8 th:TH_LOAD_LU ; 2-byte Folded Reload
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: scratch_store_b16 off, v0, s32 offset:2 scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: scratch_load_d16_b16 v0, off, s32 offset:10 th:TH_LOAD_LU ; 2-byte Folded Reload
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: scratch_store_b16 off, v0, s32 scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
; GFX1250-TRUE16-LABEL: spill_2xi16_from_v2i16:
; GFX1250-TRUE16: ; %bb.0: ; %entry
; GFX1250-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX1250-TRUE16-NEXT: s_wait_kmcnt 0x0
; GFX1250-TRUE16-NEXT: scratch_load_u16 v0, off, s32 offset:2 scope:SCOPE_SYS
; GFX1250-TRUE16-NEXT: s_wait_loadcnt 0x0
-; GFX1250-TRUE16-NEXT: s_clause 0x1
+; GFX1250-TRUE16-NEXT: s_clause 0x1 ; 4-byte Folded Spill
; GFX1250-TRUE16-NEXT: scratch_store_b32 off, v0, s32 offset:12
; GFX1250-TRUE16-NEXT: scratch_load_u16 v0, off, s32 scope:SCOPE_SYS
; GFX1250-TRUE16-NEXT: s_wait_loadcnt 0x0
@@ -444,7 +599,7 @@ define void @spill_2xi16_from_v2i16() {
; GFX1250-FAKE16-NEXT: s_wait_kmcnt 0x0
; GFX1250-FAKE16-NEXT: scratch_load_u16 v0, off, s32 offset:2 scope:SCOPE_SYS
; GFX1250-FAKE16-NEXT: s_wait_loadcnt 0x0
-; GFX1250-FAKE16-NEXT: s_clause 0x1
+; GFX1250-FAKE16-NEXT: s_clause 0x1 ; 4-byte Folded Spill
; GFX1250-FAKE16-NEXT: scratch_store_b32 off, v0, s32 offset:8
; GFX1250-FAKE16-NEXT: scratch_load_u16 v0, off, s32 scope:SCOPE_SYS
; GFX1250-FAKE16-NEXT: s_wait_loadcnt 0x0
@@ -520,6 +675,32 @@ define void @spill_2xi16_from_v2i16_one_free_reg() {
; GCN-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
; GCN-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
+; GFX12-TRUE16-LABEL: spill_2xi16_from_v2i16_one_free_reg:
+; GFX12-TRUE16: ; %bb.0: ; %entry
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: scratch_load_d16_b16 v0, off, s32 offset:2 scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: scratch_store_b16 off, v0, s32 offset:8 ; 2-byte Folded Spill
+; GFX12-TRUE16-NEXT: scratch_load_d16_b16 v0, off, s32 scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: scratch_store_b16 off, v0, s32 offset:10 ; 2-byte Folded Spill
+; GFX12-TRUE16-NEXT: ;;#ASMSTART
+; GFX12-TRUE16-NEXT: ;;#ASMEND
+; GFX12-TRUE16-NEXT: scratch_load_d16_b16 v0, off, s32 offset:8 th:TH_LOAD_LU ; 2-byte Folded Reload
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: scratch_store_b16 off, v0, s32 offset:2 scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: scratch_load_d16_b16 v0, off, s32 offset:10 th:TH_LOAD_LU ; 2-byte Folded Reload
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: scratch_store_b16 off, v0, s32 scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
; GFX1250-TRUE16-LABEL: spill_2xi16_from_v2i16_one_free_reg:
; GFX1250-TRUE16: ; %bb.0: ; %entry
; GFX1250-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
@@ -595,6 +776,25 @@ define void @spill_v2i16() {
; GCN-NEXT: s_waitcnt_vscnt null, 0x0
; GCN-NEXT: s_setpc_b64 s[30:31]
;
+; GFX12-TRUE16-LABEL: spill_v2i16:
+; GFX12-TRUE16: ; %bb.0: ; %entry
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: scratch_load_b32 v0, off, s32 offset:4 scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: scratch_store_b32 off, v0, s32 offset:8 ; 4-byte Folded Spill
+; GFX12-TRUE16-NEXT: ;;#ASMSTART
+; GFX12-TRUE16-NEXT: ;;#ASMEND
+; GFX12-TRUE16-NEXT: scratch_load_b32 v0, off, s32 offset:8 th:TH_LOAD_LU ; 4-byte Folded Reload
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: scratch_store_b32 off, v0, s32 offset:4 scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
; GFX1250-LABEL: spill_v2i16:
; GFX1250: ; %bb.0: ; %entry
; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
diff --git a/llvm/test/CodeGen/Hexagon/late_instr.ll b/llvm/test/CodeGen/Hexagon/late_instr.ll
index 93e5a7d..6bd1261 100644
--- a/llvm/test/CodeGen/Hexagon/late_instr.ll
+++ b/llvm/test/CodeGen/Hexagon/late_instr.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=hexagon -disable-hsdr < %s | FileCheck %s
+; RUN: llc -mtriple=hexagon -disable-hsdr -terminal-rule=0 < %s | FileCheck %s
; Check if instruction vandqrt.acc and its predecessor are scheduled in consecutive packets.
; CHECK: or(q{{[0-3]+}},q{{[0-3]+}})
diff --git a/llvm/test/CodeGen/Hexagon/swp-carried-1.ll b/llvm/test/CodeGen/Hexagon/swp-carried-1.ll
index 6993bd6..f2beadf 100644
--- a/llvm/test/CodeGen/Hexagon/swp-carried-1.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-carried-1.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=hexagon -rdf-opt=0 -disable-hexagon-misched -hexagon-initial-cfg-cleanup=0 -lsr-setupcost-depth-limit=1 -disable-cgp-delete-phis < %s -pipeliner-experimental-cg=true | FileCheck %s
+; RUN: llc -mtriple=hexagon -rdf-opt=0 -disable-hexagon-misched -hexagon-initial-cfg-cleanup=0 -lsr-setupcost-depth-limit=1 -disable-cgp-delete-phis < %s -pipeliner-experimental-cg=true -terminal-rule=0 | FileCheck %s
; Test that we generate the correct code when a loop carried value
; is scheduled one stage earlier than it's use. The code in
diff --git a/llvm/test/CodeGen/Hexagon/swp-conv3x3-nested.ll b/llvm/test/CodeGen/Hexagon/swp-conv3x3-nested.ll
index 006a8b6..69b89a6 100644
--- a/llvm/test/CodeGen/Hexagon/swp-conv3x3-nested.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-conv3x3-nested.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=hexagon < %s -pipeliner-experimental-cg=true | FileCheck %s
+; RUN: llc -mtriple=hexagon < %s -pipeliner-experimental-cg=true -terminal-rule=0 | FileCheck %s
; This version of the conv3x3 test has both loops. This test checks that the
; inner loop has 14 packets.
diff --git a/llvm/test/CodeGen/Hexagon/swp-epilog-phi11.ll b/llvm/test/CodeGen/Hexagon/swp-epilog-phi11.ll
index d1b9c51..0466b6d 100644
--- a/llvm/test/CodeGen/Hexagon/swp-epilog-phi11.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-epilog-phi11.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=hexagon-unknown-elf -mcpu=hexagonv55 -hexagon-initial-cfg-cleanup=0 < %s | FileCheck %s
+; RUN: llc -mtriple=hexagon-unknown-elf -mcpu=hexagonv55 -hexagon-initial-cfg-cleanup=0 -terminal-rule=0 < %s | FileCheck %s
; Test that the pipeliner correctly generates the operands in the
; epilog.
diff --git a/llvm/test/CodeGen/Hexagon/swp-epilog-phi12.ll b/llvm/test/CodeGen/Hexagon/swp-epilog-phi12.ll
index ba479b6..c6631bd 100644
--- a/llvm/test/CodeGen/Hexagon/swp-epilog-phi12.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-epilog-phi12.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=hexagon -hexagon-initial-cfg-cleanup=0 -pipeliner-experimental-cg=true -disable-cgp-delete-phis < %s | FileCheck %s
+; RUN: llc -mtriple=hexagon -hexagon-initial-cfg-cleanup=0 -pipeliner-experimental-cg=true -disable-cgp-delete-phis -terminal-rule=0 < %s | FileCheck %s
; Test epilogue generation when reading loop-carried dependency from a previous
; stage. The first epilogue should read value from iteration N-1 of the kernel.
diff --git a/llvm/test/CodeGen/Hexagon/swp-epilog-phi7.ll b/llvm/test/CodeGen/Hexagon/swp-epilog-phi7.ll
index 96a3893..d90e7c4 100644
--- a/llvm/test/CodeGen/Hexagon/swp-epilog-phi7.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-epilog-phi7.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=hexagon -O2 -enable-pipeliner -disable-block-placement=0 < %s | FileCheck %s
+; RUN: llc -mtriple=hexagon -O2 -enable-pipeliner -disable-block-placement=0 -terminal-rule=0 < %s | FileCheck %s
; For the Phis generated in the epilog, test that we generate the correct
; names for the values coming from the prolog stages. The test belows
diff --git a/llvm/test/CodeGen/Hexagon/swp-kernel-phi1.ll b/llvm/test/CodeGen/Hexagon/swp-kernel-phi1.ll
index 6ca8e94..2a428ff 100644
--- a/llvm/test/CodeGen/Hexagon/swp-kernel-phi1.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-kernel-phi1.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=hexagon -enable-pipeliner-opt-size -hexagon-initial-cfg-cleanup=0 < %s -pipeliner-experimental-cg=true | FileCheck %s
+; RUN: llc -mtriple=hexagon -enable-pipeliner-opt-size -hexagon-initial-cfg-cleanup=0 -terminal-rule=0 < %s -pipeliner-experimental-cg=true | FileCheck %s
; Test that we generate the correct names for the phis in the kernel for the
; incoming values. In this case, the loop contains a phi and has another phi
diff --git a/llvm/test/CodeGen/Hexagon/swp-matmul-bitext.ll b/llvm/test/CodeGen/Hexagon/swp-matmul-bitext.ll
index 42efe60..a0aeb80 100644
--- a/llvm/test/CodeGen/Hexagon/swp-matmul-bitext.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-matmul-bitext.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=hexagon -mcpu=hexagonv60 -enable-pipeliner < %s | FileCheck %s
+; RUN: llc -mtriple=hexagon -mcpu=hexagonv60 -enable-pipeliner -terminal-rule=0 < %s | FileCheck %s
; From coremark. Test that we pipeline the matrix multiplication bitextract
; function. The pipelined code should have two packets.
diff --git a/llvm/test/CodeGen/Hexagon/swp-order-copies.ll b/llvm/test/CodeGen/Hexagon/swp-order-copies.ll
index 1c9cc4a1..bbaa8cd 100644
--- a/llvm/test/CodeGen/Hexagon/swp-order-copies.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-order-copies.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=hexagon < %s -pipeliner-experimental-cg=true | FileCheck %s
+; RUN: llc -mtriple=hexagon < %s -pipeliner-experimental-cg=true -terminal-rule=0 | FileCheck %s
; Test that the instruction ordering code in the pipeliner fixes up dependences
; between post-increment register definitions and uses so that the register
diff --git a/llvm/test/CodeGen/Hexagon/swp-order-deps7.ll b/llvm/test/CodeGen/Hexagon/swp-order-deps7.ll
index 5f1780f..38893de 100644
--- a/llvm/test/CodeGen/Hexagon/swp-order-deps7.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-order-deps7.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=hexagon < %s -pipeliner-experimental-cg=true | FileCheck %s
+; RUN: llc -mtriple=hexagon < %s -pipeliner-experimental-cg=true -terminal-rule=0 | FileCheck %s
; Test that the pipeliner cause an assert and correctly pipelines the
; loop.
diff --git a/llvm/test/CodeGen/Hexagon/swp-reuse-phi-6.ll b/llvm/test/CodeGen/Hexagon/swp-reuse-phi-6.ll
index 6c8b063..5189812 100644
--- a/llvm/test/CodeGen/Hexagon/swp-reuse-phi-6.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-reuse-phi-6.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=hexagon < %s -pipeliner-experimental-cg=true | FileCheck %s
+; RUN: llc -mtriple=hexagon < %s -pipeliner-experimental-cg=true -terminal-rule=0 | FileCheck %s
; Test that the pipeliner generates correct code when attempting to reuse
; an existing phi. This test case contains a phi that references another
diff --git a/llvm/test/CodeGen/RISCV/branch-on-zero.ll b/llvm/test/CodeGen/RISCV/branch-on-zero.ll
index 02aeebd..2aec92e 100644
--- a/llvm/test/CodeGen/RISCV/branch-on-zero.ll
+++ b/llvm/test/CodeGen/RISCV/branch-on-zero.ll
@@ -127,13 +127,11 @@ define i32 @test_lshr2(ptr nocapture %x, ptr nocapture readonly %y, i32 %n) {
; RV32-NEXT: .LBB3_2: # %while.body
; RV32-NEXT: # =>This Inner Loop Header: Depth=1
; RV32-NEXT: lw a3, 0(a1)
-; RV32-NEXT: addi a4, a1, 4
+; RV32-NEXT: addi a1, a1, 4
; RV32-NEXT: slli a3, a3, 1
-; RV32-NEXT: addi a1, a0, 4
; RV32-NEXT: sw a3, 0(a0)
-; RV32-NEXT: mv a0, a1
-; RV32-NEXT: mv a1, a4
-; RV32-NEXT: bne a4, a2, .LBB3_2
+; RV32-NEXT: addi a0, a0, 4
+; RV32-NEXT: bne a1, a2, .LBB3_2
; RV32-NEXT: .LBB3_3: # %while.end
; RV32-NEXT: li a0, 0
; RV32-NEXT: ret
@@ -151,13 +149,11 @@ define i32 @test_lshr2(ptr nocapture %x, ptr nocapture readonly %y, i32 %n) {
; RV64-NEXT: .LBB3_2: # %while.body
; RV64-NEXT: # =>This Inner Loop Header: Depth=1
; RV64-NEXT: lw a3, 0(a1)
-; RV64-NEXT: addi a4, a1, 4
+; RV64-NEXT: addi a1, a1, 4
; RV64-NEXT: slli a3, a3, 1
-; RV64-NEXT: addi a1, a0, 4
; RV64-NEXT: sw a3, 0(a0)
-; RV64-NEXT: mv a0, a1
-; RV64-NEXT: mv a1, a4
-; RV64-NEXT: bne a4, a2, .LBB3_2
+; RV64-NEXT: addi a0, a0, 4
+; RV64-NEXT: bne a1, a2, .LBB3_2
; RV64-NEXT: .LBB3_3: # %while.end
; RV64-NEXT: li a0, 0
; RV64-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/machine-pipeliner.ll b/llvm/test/CodeGen/RISCV/machine-pipeliner.ll
index d250098..a2a7da7 100644
--- a/llvm/test/CodeGen/RISCV/machine-pipeliner.ll
+++ b/llvm/test/CodeGen/RISCV/machine-pipeliner.ll
@@ -54,37 +54,37 @@ define void @test_pipelined_1(ptr noalias %in, ptr noalias %out, i32 signext %cn
; CHECK-PIPELINED: # %bb.0: # %entry
; CHECK-PIPELINED-NEXT: blez a2, .LBB1_6
; CHECK-PIPELINED-NEXT: # %bb.1: # %for.body.preheader
-; CHECK-PIPELINED-NEXT: lw a4, 0(a1)
+; CHECK-PIPELINED-NEXT: lw a7, 0(a1)
; CHECK-PIPELINED-NEXT: addi a2, a2, -1
+; CHECK-PIPELINED-NEXT: addi a3, a0, 4
+; CHECK-PIPELINED-NEXT: addi a5, a1, 4
; CHECK-PIPELINED-NEXT: sh2add.uw a6, a2, a1
-; CHECK-PIPELINED-NEXT: addi a2, a0, 4
-; CHECK-PIPELINED-NEXT: addi a1, a1, 4
; CHECK-PIPELINED-NEXT: addi a6, a6, 4
-; CHECK-PIPELINED-NEXT: beq a1, a6, .LBB1_5
+; CHECK-PIPELINED-NEXT: beq a5, a6, .LBB1_5
; CHECK-PIPELINED-NEXT: # %bb.2: # %for.body
-; CHECK-PIPELINED-NEXT: lw a5, 0(a1)
-; CHECK-PIPELINED-NEXT: addi a3, a2, 4
-; CHECK-PIPELINED-NEXT: addi a4, a4, 1
-; CHECK-PIPELINED-NEXT: addi a1, a1, 4
-; CHECK-PIPELINED-NEXT: beq a1, a6, .LBB1_4
+; CHECK-PIPELINED-NEXT: lw a1, 0(a5)
+; CHECK-PIPELINED-NEXT: addi a4, a3, 4
+; CHECK-PIPELINED-NEXT: addi a5, a5, 4
+; CHECK-PIPELINED-NEXT: beq a5, a6, .LBB1_4
; CHECK-PIPELINED-NEXT: .LBB1_3: # %for.body
; CHECK-PIPELINED-NEXT: # =>This Inner Loop Header: Depth=1
-; CHECK-PIPELINED-NEXT: sw a4, 0(a0)
-; CHECK-PIPELINED-NEXT: mv a4, a5
-; CHECK-PIPELINED-NEXT: lw a5, 0(a1)
-; CHECK-PIPELINED-NEXT: mv a0, a2
-; CHECK-PIPELINED-NEXT: mv a2, a3
-; CHECK-PIPELINED-NEXT: addi a3, a3, 4
-; CHECK-PIPELINED-NEXT: addi a4, a4, 1
-; CHECK-PIPELINED-NEXT: addi a1, a1, 4
-; CHECK-PIPELINED-NEXT: bne a1, a6, .LBB1_3
+; CHECK-PIPELINED-NEXT: addi a2, a7, 1
+; CHECK-PIPELINED-NEXT: mv a7, a1
+; CHECK-PIPELINED-NEXT: lw a1, 0(a5)
+; CHECK-PIPELINED-NEXT: sw a2, 0(a0)
+; CHECK-PIPELINED-NEXT: mv a0, a3
+; CHECK-PIPELINED-NEXT: mv a3, a4
+; CHECK-PIPELINED-NEXT: addi a4, a4, 4
+; CHECK-PIPELINED-NEXT: addi a5, a5, 4
+; CHECK-PIPELINED-NEXT: bne a5, a6, .LBB1_3
; CHECK-PIPELINED-NEXT: .LBB1_4:
-; CHECK-PIPELINED-NEXT: sw a4, 0(a0)
-; CHECK-PIPELINED-NEXT: mv a0, a2
-; CHECK-PIPELINED-NEXT: mv a4, a5
+; CHECK-PIPELINED-NEXT: addi a7, a7, 1
+; CHECK-PIPELINED-NEXT: sw a7, 0(a0)
+; CHECK-PIPELINED-NEXT: mv a0, a3
+; CHECK-PIPELINED-NEXT: mv a7, a1
; CHECK-PIPELINED-NEXT: .LBB1_5:
-; CHECK-PIPELINED-NEXT: addi a4, a4, 1
-; CHECK-PIPELINED-NEXT: sw a4, 0(a0)
+; CHECK-PIPELINED-NEXT: addi a7, a7, 1
+; CHECK-PIPELINED-NEXT: sw a7, 0(a0)
; CHECK-PIPELINED-NEXT: .LBB1_6: # %for.end
; CHECK-PIPELINED-NEXT: ret
entry:
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-exact-vlen.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-exact-vlen.ll
index 9c6d77d..c3fe6b3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-exact-vlen.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-exact-vlen.ll
@@ -44,9 +44,8 @@ define <4 x i64> @m2_splat_with_tail(<4 x i64> %v1) vscale_range(2,2) {
; CHECK-LABEL: m2_splat_with_tail:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT: vrgather.vi v10, v8, 0
-; CHECK-NEXT: vmv1r.v v11, v9
-; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vrgather.vi v8, v10, 0
; CHECK-NEXT: ret
%res = shufflevector <4 x i64> %v1, <4 x i64> poison, <4 x i32> <i32 0, i32 0, i32 2, i32 3>
ret <4 x i64> %res
@@ -99,9 +98,8 @@ define <4 x i64> @m2_splat_into_identity(<4 x i64> %v1) vscale_range(2,2) {
; CHECK-LABEL: m2_splat_into_identity:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT: vrgather.vi v10, v8, 0
-; CHECK-NEXT: vmv1r.v v11, v9
-; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vrgather.vi v8, v10, 0
; CHECK-NEXT: ret
%res = shufflevector <4 x i64> %v1, <4 x i64> poison, <4 x i32> <i32 0, i32 0, i32 2, i32 3>
ret <4 x i64> %res
diff --git a/llvm/test/CodeGen/RISCV/rvv/machine-combiner-subreg-verifier-error.mir b/llvm/test/CodeGen/RISCV/rvv/machine-combiner-subreg-verifier-error.mir
new file mode 100644
index 0000000..76dfd4e
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/machine-combiner-subreg-verifier-error.mir
@@ -0,0 +1,39 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
+# RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs -run-pass=machine-combiner -o - %s | FileCheck %s
+
+# Make sure the verifier doesn't fail due to dropping subregister
+# uses.
+
+---
+name: machine_combiner_subreg_verifier_error
+tracksRegLiveness: true
+isSSA: true
+body: |
+ bb.0:
+ liveins: $v8m4, $v12m4
+
+ ; CHECK-LABEL: name: machine_combiner_subreg_verifier_error
+ ; CHECK: liveins: $v8m4, $v12m4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:gprnox0 = IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF2:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF3:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF4:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF5:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; CHECK-NEXT: [[PseudoVSLIDEDOWN_VI_M8_:%[0-9]+]]:vrm8 = PseudoVSLIDEDOWN_VI_M8 $noreg, [[DEF2]], 26, 2, 5 /* e32 */, 3 /* ta, ma */
+ ; CHECK-NEXT: [[PseudoVADD_VV_MF2_:%[0-9]+]]:vr = PseudoVADD_VV_MF2 $noreg, [[DEF2]].sub_vrm1_0, killed [[DEF3]], 2, 5 /* e32 */, 1 /* ta, mu */
+ ; CHECK-NEXT: [[PseudoVADD_VV_MF2_1:%[0-9]+]]:vr = PseudoVADD_VV_MF2 $noreg, [[PseudoVSLIDEDOWN_VI_M8_]].sub_vrm1_0, killed [[PseudoVADD_VV_MF2_]], 2, 5 /* e32 */, 1 /* ta, mu */
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:vrm4 = IMPLICIT_DEF
+ %1:gprnox0 = IMPLICIT_DEF
+ %2:vrm8 = IMPLICIT_DEF
+ %3:vr = IMPLICIT_DEF
+ %4:vrm2 = IMPLICIT_DEF
+ %5:vr = IMPLICIT_DEF
+ %6:vrm8 = PseudoVSLIDEDOWN_VI_M8 $noreg, %2, 26, 2, 5 /* e32 */, 3 /* ta, ma */
+ %7:vr = PseudoVADD_VV_MF2 $noreg, %6.sub_vrm1_0, %2.sub_vrm1_0, 2, 5 /* e32 */, 1 /* ta, mu */
+ %8:vr = PseudoVADD_VV_MF2 $noreg, killed %7, killed %3, 2, 5 /* e32 */, 1 /* ta, mu */
+ PseudoRET implicit $v8
+
+...
diff --git a/llvm/test/CodeGen/RISCV/rvv/pr95865.ll b/llvm/test/CodeGen/RISCV/rvv/pr95865.ll
index ab98496..a4c793b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/pr95865.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/pr95865.ll
@@ -36,7 +36,7 @@ define i32 @main(i1 %arg.1, i64 %arg.2, i1 %arg.3, i64 %arg.4, i1 %arg.5, <vscal
; CHECK-NEXT: .cfi_offset s10, -96
; CHECK-NEXT: .cfi_offset s11, -104
; CHECK-NEXT: li a6, 0
-; CHECK-NEXT: li s2, 8
+; CHECK-NEXT: li a7, 8
; CHECK-NEXT: li t0, 12
; CHECK-NEXT: li s0, 4
; CHECK-NEXT: li t1, 20
@@ -45,7 +45,7 @@ define i32 @main(i1 %arg.1, i64 %arg.2, i1 %arg.3, i64 %arg.4, i1 %arg.5, <vscal
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: andi t3, a4, 1
-; CHECK-NEXT: li t2, 4
+; CHECK-NEXT: li s2, 4
; CHECK-NEXT: .LBB0_1: # %for.cond1.preheader.i
; CHECK-NEXT: # =>This Loop Header: Depth=1
; CHECK-NEXT: # Child Loop BB0_2 Depth 2
@@ -53,9 +53,9 @@ define i32 @main(i1 %arg.1, i64 %arg.2, i1 %arg.3, i64 %arg.4, i1 %arg.5, <vscal
; CHECK-NEXT: # Child Loop BB0_4 Depth 4
; CHECK-NEXT: # Child Loop BB0_5 Depth 5
; CHECK-NEXT: mv t4, t1
-; CHECK-NEXT: mv t5, t2
+; CHECK-NEXT: mv t2, s2
; CHECK-NEXT: mv t6, t0
-; CHECK-NEXT: mv a7, s2
+; CHECK-NEXT: mv s3, a7
; CHECK-NEXT: mv s4, a6
; CHECK-NEXT: .LBB0_2: # %for.cond5.preheader.i
; CHECK-NEXT: # Parent Loop BB0_1 Depth=1
@@ -64,9 +64,9 @@ define i32 @main(i1 %arg.1, i64 %arg.2, i1 %arg.3, i64 %arg.4, i1 %arg.5, <vscal
; CHECK-NEXT: # Child Loop BB0_4 Depth 4
; CHECK-NEXT: # Child Loop BB0_5 Depth 5
; CHECK-NEXT: mv s5, t4
-; CHECK-NEXT: mv s6, t5
+; CHECK-NEXT: mv t5, t2
; CHECK-NEXT: mv s7, t6
-; CHECK-NEXT: mv s3, a7
+; CHECK-NEXT: mv s8, s3
; CHECK-NEXT: mv s9, s4
; CHECK-NEXT: .LBB0_3: # %for.cond9.preheader.i
; CHECK-NEXT: # Parent Loop BB0_1 Depth=1
@@ -75,9 +75,9 @@ define i32 @main(i1 %arg.1, i64 %arg.2, i1 %arg.3, i64 %arg.4, i1 %arg.5, <vscal
; CHECK-NEXT: # Child Loop BB0_4 Depth 4
; CHECK-NEXT: # Child Loop BB0_5 Depth 5
; CHECK-NEXT: mv s11, s5
-; CHECK-NEXT: mv a3, s6
+; CHECK-NEXT: mv s6, t5
; CHECK-NEXT: mv ra, s7
-; CHECK-NEXT: mv s8, s3
+; CHECK-NEXT: mv a5, s8
; CHECK-NEXT: mv s1, s9
; CHECK-NEXT: .LBB0_4: # %vector.ph.i
; CHECK-NEXT: # Parent Loop BB0_1 Depth=1
@@ -92,45 +92,44 @@ define i32 @main(i1 %arg.1, i64 %arg.2, i1 %arg.3, i64 %arg.4, i1 %arg.5, <vscal
; CHECK-NEXT: # Parent Loop BB0_3 Depth=3
; CHECK-NEXT: # Parent Loop BB0_4 Depth=4
; CHECK-NEXT: # => This Inner Loop Header: Depth=5
-; CHECK-NEXT: addi a5, a1, 4
-; CHECK-NEXT: add a4, s8, a1
-; CHECK-NEXT: add a1, a1, a3
+; CHECK-NEXT: add a4, a5, a1
+; CHECK-NEXT: add a3, s6, a1
+; CHECK-NEXT: addi a1, a1, 4
; CHECK-NEXT: vse32.v v8, (a4), v0.t
-; CHECK-NEXT: vse32.v v8, (a1), v0.t
-; CHECK-NEXT: mv a1, a5
-; CHECK-NEXT: bne a5, s0, .LBB0_5
+; CHECK-NEXT: vse32.v v8, (a3), v0.t
+; CHECK-NEXT: bne a1, s0, .LBB0_5
; CHECK-NEXT: # %bb.6: # %for.cond.cleanup15.i
; CHECK-NEXT: # in Loop: Header=BB0_4 Depth=4
; CHECK-NEXT: addi s1, s1, 4
-; CHECK-NEXT: addi s8, s8, 4
+; CHECK-NEXT: addi a5, a5, 4
; CHECK-NEXT: addi ra, ra, 4
-; CHECK-NEXT: addi a3, a3, 4
+; CHECK-NEXT: addi s6, s6, 4
; CHECK-NEXT: andi s10, a0, 1
; CHECK-NEXT: addi s11, s11, 4
; CHECK-NEXT: beqz s10, .LBB0_4
; CHECK-NEXT: # %bb.7: # %for.cond.cleanup11.i
; CHECK-NEXT: # in Loop: Header=BB0_3 Depth=3
; CHECK-NEXT: addi s9, s9, 4
-; CHECK-NEXT: addi s3, s3, 4
+; CHECK-NEXT: addi s8, s8, 4
; CHECK-NEXT: addi s7, s7, 4
-; CHECK-NEXT: addi s6, s6, 4
+; CHECK-NEXT: addi t5, t5, 4
; CHECK-NEXT: andi a1, a2, 1
; CHECK-NEXT: addi s5, s5, 4
; CHECK-NEXT: beqz a1, .LBB0_3
; CHECK-NEXT: # %bb.8: # %for.cond.cleanup7.i
; CHECK-NEXT: # in Loop: Header=BB0_2 Depth=2
; CHECK-NEXT: addi s4, s4, 4
-; CHECK-NEXT: addi a7, a7, 4
+; CHECK-NEXT: addi s3, s3, 4
; CHECK-NEXT: addi t6, t6, 4
-; CHECK-NEXT: addi t5, t5, 4
+; CHECK-NEXT: addi t2, t2, 4
; CHECK-NEXT: addi t4, t4, 4
; CHECK-NEXT: beqz t3, .LBB0_2
; CHECK-NEXT: # %bb.9: # %for.cond.cleanup3.i
; CHECK-NEXT: # in Loop: Header=BB0_1 Depth=1
; CHECK-NEXT: addi a6, a6, 4
-; CHECK-NEXT: addi s2, s2, 4
+; CHECK-NEXT: addi a7, a7, 4
; CHECK-NEXT: addi t0, t0, 4
-; CHECK-NEXT: addi t2, t2, 4
+; CHECK-NEXT: addi s2, s2, 4
; CHECK-NEXT: addi t1, t1, 4
; CHECK-NEXT: beqz a1, .LBB0_1
; CHECK-NEXT: # %bb.10: # %l.exit
diff --git a/llvm/test/CodeGen/RISCV/rvv/vandn-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vandn-sdnode.ll
index f295bd8..386c736 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vandn-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vandn-sdnode.ll
@@ -2258,18 +2258,18 @@ define void @vand_vx_loop_hoisted_not(ptr %a, i32 noundef signext %mask) {
; CHECK-RV32-NEXT: vsetvli a7, zero, e32, m2, ta, ma
; CHECK-RV32-NEXT: .LBB98_3: # %vector.body
; CHECK-RV32-NEXT: # =>This Inner Loop Header: Depth=1
-; CHECK-RV32-NEXT: slli a7, a6, 2
-; CHECK-RV32-NEXT: add t0, a6, a4
-; CHECK-RV32-NEXT: add a7, a0, a7
-; CHECK-RV32-NEXT: vl2re32.v v8, (a7)
-; CHECK-RV32-NEXT: sltu a6, t0, a6
-; CHECK-RV32-NEXT: add a5, a5, a6
-; CHECK-RV32-NEXT: xor a6, t0, a3
+; CHECK-RV32-NEXT: mv a7, a6
+; CHECK-RV32-NEXT: slli t0, a6, 2
+; CHECK-RV32-NEXT: add a6, a6, a4
+; CHECK-RV32-NEXT: add t0, a0, t0
+; CHECK-RV32-NEXT: vl2re32.v v8, (t0)
+; CHECK-RV32-NEXT: sltu a7, a6, a7
+; CHECK-RV32-NEXT: add a5, a5, a7
+; CHECK-RV32-NEXT: xor a7, a6, a3
; CHECK-RV32-NEXT: vand.vx v8, v8, a1
-; CHECK-RV32-NEXT: or t1, a6, a5
-; CHECK-RV32-NEXT: vs2r.v v8, (a7)
-; CHECK-RV32-NEXT: mv a6, t0
-; CHECK-RV32-NEXT: bnez t1, .LBB98_3
+; CHECK-RV32-NEXT: or a7, a7, a5
+; CHECK-RV32-NEXT: vs2r.v v8, (t0)
+; CHECK-RV32-NEXT: bnez a7, .LBB98_3
; CHECK-RV32-NEXT: # %bb.4: # %middle.block
; CHECK-RV32-NEXT: bnez a3, .LBB98_6
; CHECK-RV32-NEXT: .LBB98_5: # %for.body
@@ -2350,18 +2350,18 @@ define void @vand_vx_loop_hoisted_not(ptr %a, i32 noundef signext %mask) {
; CHECK-ZVKB-NOZBB32-NEXT: vsetvli a7, zero, e32, m2, ta, ma
; CHECK-ZVKB-NOZBB32-NEXT: .LBB98_3: # %vector.body
; CHECK-ZVKB-NOZBB32-NEXT: # =>This Inner Loop Header: Depth=1
-; CHECK-ZVKB-NOZBB32-NEXT: slli a7, a6, 2
-; CHECK-ZVKB-NOZBB32-NEXT: add t0, a6, a4
-; CHECK-ZVKB-NOZBB32-NEXT: add a7, a0, a7
-; CHECK-ZVKB-NOZBB32-NEXT: vl2re32.v v8, (a7)
-; CHECK-ZVKB-NOZBB32-NEXT: sltu a6, t0, a6
-; CHECK-ZVKB-NOZBB32-NEXT: add a5, a5, a6
-; CHECK-ZVKB-NOZBB32-NEXT: xor a6, t0, a3
+; CHECK-ZVKB-NOZBB32-NEXT: mv a7, a6
+; CHECK-ZVKB-NOZBB32-NEXT: slli t0, a6, 2
+; CHECK-ZVKB-NOZBB32-NEXT: add a6, a6, a4
+; CHECK-ZVKB-NOZBB32-NEXT: add t0, a0, t0
+; CHECK-ZVKB-NOZBB32-NEXT: vl2re32.v v8, (t0)
+; CHECK-ZVKB-NOZBB32-NEXT: sltu a7, a6, a7
+; CHECK-ZVKB-NOZBB32-NEXT: add a5, a5, a7
+; CHECK-ZVKB-NOZBB32-NEXT: xor a7, a6, a3
; CHECK-ZVKB-NOZBB32-NEXT: vandn.vx v8, v8, a1
-; CHECK-ZVKB-NOZBB32-NEXT: or t1, a6, a5
-; CHECK-ZVKB-NOZBB32-NEXT: vs2r.v v8, (a7)
-; CHECK-ZVKB-NOZBB32-NEXT: mv a6, t0
-; CHECK-ZVKB-NOZBB32-NEXT: bnez t1, .LBB98_3
+; CHECK-ZVKB-NOZBB32-NEXT: or a7, a7, a5
+; CHECK-ZVKB-NOZBB32-NEXT: vs2r.v v8, (t0)
+; CHECK-ZVKB-NOZBB32-NEXT: bnez a7, .LBB98_3
; CHECK-ZVKB-NOZBB32-NEXT: # %bb.4: # %middle.block
; CHECK-ZVKB-NOZBB32-NEXT: bnez a3, .LBB98_7
; CHECK-ZVKB-NOZBB32-NEXT: .LBB98_5: # %for.body.preheader
@@ -2444,18 +2444,18 @@ define void @vand_vx_loop_hoisted_not(ptr %a, i32 noundef signext %mask) {
; CHECK-ZVKB-ZBB32-NEXT: vsetvli a7, zero, e32, m2, ta, ma
; CHECK-ZVKB-ZBB32-NEXT: .LBB98_3: # %vector.body
; CHECK-ZVKB-ZBB32-NEXT: # =>This Inner Loop Header: Depth=1
-; CHECK-ZVKB-ZBB32-NEXT: slli a7, a6, 2
-; CHECK-ZVKB-ZBB32-NEXT: add t0, a6, a4
-; CHECK-ZVKB-ZBB32-NEXT: add a7, a0, a7
-; CHECK-ZVKB-ZBB32-NEXT: vl2re32.v v8, (a7)
-; CHECK-ZVKB-ZBB32-NEXT: sltu a6, t0, a6
-; CHECK-ZVKB-ZBB32-NEXT: add a5, a5, a6
-; CHECK-ZVKB-ZBB32-NEXT: xor a6, t0, a3
+; CHECK-ZVKB-ZBB32-NEXT: mv a7, a6
+; CHECK-ZVKB-ZBB32-NEXT: slli t0, a6, 2
+; CHECK-ZVKB-ZBB32-NEXT: add a6, a6, a4
+; CHECK-ZVKB-ZBB32-NEXT: add t0, a0, t0
+; CHECK-ZVKB-ZBB32-NEXT: vl2re32.v v8, (t0)
+; CHECK-ZVKB-ZBB32-NEXT: sltu a7, a6, a7
+; CHECK-ZVKB-ZBB32-NEXT: add a5, a5, a7
+; CHECK-ZVKB-ZBB32-NEXT: xor a7, a6, a3
; CHECK-ZVKB-ZBB32-NEXT: vandn.vx v8, v8, a1
-; CHECK-ZVKB-ZBB32-NEXT: or t1, a6, a5
-; CHECK-ZVKB-ZBB32-NEXT: vs2r.v v8, (a7)
-; CHECK-ZVKB-ZBB32-NEXT: mv a6, t0
-; CHECK-ZVKB-ZBB32-NEXT: bnez t1, .LBB98_3
+; CHECK-ZVKB-ZBB32-NEXT: or a7, a7, a5
+; CHECK-ZVKB-ZBB32-NEXT: vs2r.v v8, (t0)
+; CHECK-ZVKB-ZBB32-NEXT: bnez a7, .LBB98_3
; CHECK-ZVKB-ZBB32-NEXT: # %bb.4: # %middle.block
; CHECK-ZVKB-ZBB32-NEXT: bnez a3, .LBB98_6
; CHECK-ZVKB-ZBB32-NEXT: .LBB98_5: # %for.body
diff --git a/llvm/test/CodeGen/RISCV/rvv/vcpop-shl-zext-opt.ll b/llvm/test/CodeGen/RISCV/rvv/vcpop-shl-zext-opt.ll
index ed6b7f1..1044008 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vcpop-shl-zext-opt.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vcpop-shl-zext-opt.ll
@@ -25,24 +25,24 @@ define dso_local void @test_store1(ptr nocapture noundef writeonly %dst, ptr noc
; RV32-NEXT: li a6, 0
; RV32-NEXT: .LBB0_4: # %vector.body
; RV32-NEXT: # =>This Inner Loop Header: Depth=1
-; RV32-NEXT: slli t0, a7, 2
-; RV32-NEXT: addi t1, a7, 8
-; RV32-NEXT: add t0, a1, t0
+; RV32-NEXT: mv t0, a7
+; RV32-NEXT: slli t1, a7, 2
+; RV32-NEXT: addi a7, a7, 8
+; RV32-NEXT: add t1, a1, t1
; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; RV32-NEXT: vle32.v v8, (t0)
-; RV32-NEXT: sltu a7, t1, a7
-; RV32-NEXT: xor t0, t1, a5
-; RV32-NEXT: add a6, a6, a7
+; RV32-NEXT: vle32.v v8, (t1)
+; RV32-NEXT: sltu t0, a7, t0
+; RV32-NEXT: xor t1, a7, a5
+; RV32-NEXT: add a6, a6, t0
; RV32-NEXT: vmslt.vx v12, v8, a2
; RV32-NEXT: vcompress.vm v10, v8, v12
-; RV32-NEXT: vcpop.m a7, v12
-; RV32-NEXT: vsetvli zero, a7, e32, m2, ta, ma
+; RV32-NEXT: vcpop.m t0, v12
+; RV32-NEXT: vsetvli zero, t0, e32, m2, ta, ma
; RV32-NEXT: vse32.v v10, (a0)
-; RV32-NEXT: slli a7, a7, 2
-; RV32-NEXT: or t0, t0, a6
-; RV32-NEXT: add a0, a0, a7
-; RV32-NEXT: mv a7, t1
-; RV32-NEXT: bnez t0, .LBB0_4
+; RV32-NEXT: slli t0, t0, 2
+; RV32-NEXT: or t1, t1, a6
+; RV32-NEXT: add a0, a0, t0
+; RV32-NEXT: bnez t1, .LBB0_4
; RV32-NEXT: # %bb.5: # %middle.block
; RV32-NEXT: bne a5, a3, .LBB0_9
; RV32-NEXT: .LBB0_6: # %for.cond.cleanup
diff --git a/llvm/test/CodeGen/RISCV/rvv/vxrm-insert-out-of-loop.ll b/llvm/test/CodeGen/RISCV/rvv/vxrm-insert-out-of-loop.ll
index ead79fc..af3b0852a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vxrm-insert-out-of-loop.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vxrm-insert-out-of-loop.ll
@@ -102,20 +102,20 @@ define void @test1(ptr nocapture noundef writeonly %dst, i32 noundef signext %i_
; RV32-NEXT: .LBB0_13: # %vector.body
; RV32-NEXT: # Parent Loop BB0_10 Depth=1
; RV32-NEXT: # => This Inner Loop Header: Depth=2
-; RV32-NEXT: add s0, a2, t6
-; RV32-NEXT: add s1, a4, t6
-; RV32-NEXT: vl2r.v v8, (s0)
-; RV32-NEXT: add s0, a0, t6
+; RV32-NEXT: mv s0, t6
+; RV32-NEXT: add t6, a2, t6
+; RV32-NEXT: add s1, a4, s0
+; RV32-NEXT: vl2r.v v8, (t6)
+; RV32-NEXT: add s2, a0, s0
; RV32-NEXT: vl2r.v v10, (s1)
-; RV32-NEXT: add s1, t6, t2
-; RV32-NEXT: sltu t6, s1, t6
-; RV32-NEXT: add t5, t5, t6
-; RV32-NEXT: xor t6, s1, t4
+; RV32-NEXT: add t6, s0, t2
+; RV32-NEXT: sltu s0, t6, s0
+; RV32-NEXT: add t5, t5, s0
+; RV32-NEXT: xor s0, t6, t4
; RV32-NEXT: vaaddu.vv v8, v8, v10
-; RV32-NEXT: or s2, t6, t5
-; RV32-NEXT: vs2r.v v8, (s0)
-; RV32-NEXT: mv t6, s1
-; RV32-NEXT: bnez s2, .LBB0_13
+; RV32-NEXT: or s0, s0, t5
+; RV32-NEXT: vs2r.v v8, (s2)
+; RV32-NEXT: bnez s0, .LBB0_13
; RV32-NEXT: # %bb.14: # %middle.block
; RV32-NEXT: # in Loop: Header=BB0_10 Depth=1
; RV32-NEXT: beq t4, a6, .LBB0_9
diff --git a/llvm/test/CodeGen/RISCV/sra-xor-sra.ll b/llvm/test/CodeGen/RISCV/sra-xor-sra.ll
new file mode 100644
index 0000000..b04f0a2
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/sra-xor-sra.ll
@@ -0,0 +1,32 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s | FileCheck %s
+
+; Test folding of: (sra (xor (sra x, c1), -1), c2) -> (sra (xor x, -1), c3)
+; Original motivating example: should merge sra+sra across xor
+define i16 @not_invert_signbit_splat_mask(i8 %x, i16 %y) {
+; CHECK-LABEL: not_invert_signbit_splat_mask:
+; CHECK: # %bb.0:
+; CHECK-NEXT: slli a0, a0, 56
+; CHECK-NEXT: srai a0, a0, 62
+; CHECK-NEXT: not a0, a0
+; CHECK-NEXT: and a0, a0, a1
+; CHECK-NEXT: ret
+ %a = ashr i8 %x, 6
+ %n = xor i8 %a, -1
+ %s = sext i8 %n to i16
+ %r = and i16 %s, %y
+ ret i16 %r
+}
+
+; Edge case
+define i16 @sra_xor_sra_overflow(i8 %x, i16 %y) {
+; CHECK-LABEL: sra_xor_sra_overflow:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a0, 0
+; CHECK-NEXT: ret
+ %a = ashr i8 %x, 10
+ %n = xor i8 %a, -1
+ %s = sext i8 %n to i16
+ %r = and i16 %s, %y
+ ret i16 %r
+}
diff --git a/llvm/test/CodeGen/X86/apx/no-rex2-general.ll b/llvm/test/CodeGen/X86/apx/no-rex2-general.ll
index 805fc7c..2b34739 100644
--- a/llvm/test/CodeGen/X86/apx/no-rex2-general.ll
+++ b/llvm/test/CodeGen/X86/apx/no-rex2-general.ll
@@ -1,76 +1,80 @@
-; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown -stop-after=x86-isel -mattr=+sse2,+ssse3,+egpr | FileCheck %s --check-prefix=SSE
-; RUN: llc < %s -mtriple=x86_64-unknown -stop-after=x86-isel -mattr=+sse2,+ssse3,+egpr,+avx | FileCheck %s --check-prefix=AVX
-; RUN: llc < %s -enable-new-pm -mtriple=x86_64-unknown -stop-after=x86-isel -mattr=+sse2,+ssse3,+egpr | FileCheck %s --check-prefix=SSE
-; RUN: llc < %s -enable-new-pm -mtriple=x86_64-unknown -stop-after=x86-isel -mattr=+sse2,+ssse3,+egpr,+avx | FileCheck %s --check-prefix=AVX
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2,+ssse3,+egpr | FileCheck %s --check-prefixes=CHECK,SSE
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2,+ssse3,+egpr,+avx | FileCheck %s --check-prefixes=CHECK,AVX
define i32 @map0(ptr nocapture noundef readonly %a, i64 noundef %b) {
- ; SSE-LABEL: name: map0
- ; SSE: bb.0.entry:
- ; SSE-NEXT: liveins: $rdi, $rsi
- ; SSE-NEXT: {{ $}}
- ; SSE-NEXT: [[COPY:%[0-9]+]]:gr64_nosp = COPY $rsi
- ; SSE-NEXT: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi
- ; SSE-NEXT: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY1]], 4, [[COPY]], 0, $noreg :: (load (s32) from %ir.add.ptr)
- ; SSE-NEXT: $eax = COPY [[MOV32rm]]
- ; SSE-NEXT: RET 0, $eax
- ; AVX-LABEL: name: map0
- ; AVX: bb.0.entry:
- ; AVX-NEXT: liveins: $rdi, $rsi
- ; AVX-NEXT: {{ $}}
- ; AVX-NEXT: [[COPY:%[0-9]+]]:gr64_nosp = COPY $rsi
- ; AVX-NEXT: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi
- ; AVX-NEXT: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY1]], 4, [[COPY]], 0, $noreg :: (load (s32) from %ir.add.ptr)
- ; AVX-NEXT: $eax = COPY [[MOV32rm]]
- ; AVX-NEXT: RET 0, $eax
+; CHECK-LABEL: map0:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movq %rsi, %r16
+; CHECK-NEXT: movq %rdi, %r17
+; CHECK-NEXT: #APP
+; CHECK-NEXT: nop
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: movl (%r17,%r16,4), %eax
+; CHECK-NEXT: retq
entry:
%add.ptr = getelementptr inbounds i32, ptr %a, i64 %b
+ tail call void asm sideeffect "nop", "~{eax},~{ecx},~{edx},~{esi},~{edi},~{r8},~{r9},~{r10},~{r11}"()
%0 = load i32, ptr %add.ptr
ret i32 %0
}
-define i32 @map1_or_vex(<2 x double> noundef %a) {
- ; SSE-LABEL: name: map1_or_vex
- ; SSE: bb.0.entry:
- ; SSE-NEXT: liveins: $xmm0
- ; SSE-NEXT: {{ $}}
- ; SSE-NEXT: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
- ; SSE-NEXT: [[CVTSD2SIrr_Int:%[0-9]+]]:gr32 = nofpexcept CVTSD2SIrr_Int [[COPY]], implicit $mxcsr
- ; SSE-NEXT: $eax = COPY [[CVTSD2SIrr_Int]]
- ; SSE-NEXT: RET 0, $eax
- ; AVX-LABEL: name: map1_or_vex
- ; AVX: bb.0.entry:
- ; AVX-NEXT: liveins: $xmm0
- ; AVX-NEXT: {{ $}}
- ; AVX-NEXT: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
- ; AVX-NEXT: [[VCVTSD2SIrr_Int:%[0-9]+]]:gr32_norex2 = nofpexcept VCVTSD2SIrr_Int [[COPY]], implicit $mxcsr
- ; AVX-NEXT: $eax = COPY [[VCVTSD2SIrr_Int]]
- ; AVX-NEXT: RET 0, $eax
+define i32 @map1_or_vex(<2 x double> noundef %a) nounwind {
+; SSE-LABEL: map1_or_vex:
+; SSE: # %bb.0: # %entry
+; SSE-NEXT: cvtsd2si %xmm0, %r16d
+; SSE-NEXT: #APP
+; SSE-NEXT: nop
+; SSE-NEXT: #NO_APP
+; SSE-NEXT: movl %r16d, %eax
+; SSE-NEXT: retq
+;
+; AVX-LABEL: map1_or_vex:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: pushq %rbx
+; AVX-NEXT: vcvtsd2si %xmm0, %ebx
+; AVX-NEXT: #APP
+; AVX-NEXT: nop
+; AVX-NEXT: #NO_APP
+; AVX-NEXT: movl %ebx, %eax
+; AVX-NEXT: popq %rbx
+; AVX-NEXT: retq
entry:
%0 = tail call i32 @llvm.x86.sse2.cvtsd2si(<2 x double> %a)
+ tail call void asm sideeffect "nop", "~{eax},~{ecx},~{edx},~{esi},~{edi},~{r8},~{r9},~{r10},~{r11}"()
ret i32 %0
}
-define <2 x i64> @map2_or_vex(ptr nocapture noundef readonly %b, i64 noundef %c) {
- ; SSE-LABEL: name: map2_or_vex
- ; SSE: bb.0.entry:
- ; SSE-NEXT: liveins: $rdi, $rsi
- ; SSE-NEXT: {{ $}}
- ; SSE-NEXT: [[COPY:%[0-9]+]]:gr64_norex2_nosp = COPY $rsi
- ; SSE-NEXT: [[COPY1:%[0-9]+]]:gr64_norex2 = COPY $rdi
- ; SSE-NEXT: [[PABSBrm:%[0-9]+]]:vr128 = PABSBrm [[COPY1]], 4, [[COPY]], 0, $noreg :: (load (s128) from %ir.add.ptr)
- ; SSE-NEXT: $xmm0 = COPY [[PABSBrm]]
- ; SSE-NEXT: RET 0, $xmm0
- ; AVX-LABEL: name: map2_or_vex
- ; AVX: bb.0.entry:
- ; AVX-NEXT: liveins: $rdi, $rsi
- ; AVX-NEXT: {{ $}}
- ; AVX-NEXT: [[COPY:%[0-9]+]]:gr64_norex2_nosp = COPY $rsi
- ; AVX-NEXT: [[COPY1:%[0-9]+]]:gr64_norex2 = COPY $rdi
- ; AVX-NEXT: [[VPABSBrm:%[0-9]+]]:vr128 = VPABSBrm [[COPY1]], 4, [[COPY]], 0, $noreg :: (load (s128) from %ir.add.ptr)
- ; AVX-NEXT: $xmm0 = COPY [[VPABSBrm]]
- ; AVX-NEXT: RET 0, $xmm0
+define <2 x i64> @map2_or_vex(ptr nocapture noundef readonly %b, i64 noundef %c) nounwind {
+; SSE-LABEL: map2_or_vex:
+; SSE: # %bb.0: # %entry
+; SSE-NEXT: pushq %r14
+; SSE-NEXT: pushq %rbx
+; SSE-NEXT: movq %rsi, %rbx
+; SSE-NEXT: movq %rdi, %r14
+; SSE-NEXT: #APP
+; SSE-NEXT: nop
+; SSE-NEXT: #NO_APP
+; SSE-NEXT: pabsb (%r14,%rbx,4), %xmm0
+; SSE-NEXT: popq %rbx
+; SSE-NEXT: popq %r14
+; SSE-NEXT: retq
+;
+; AVX-LABEL: map2_or_vex:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: pushq %r14
+; AVX-NEXT: pushq %rbx
+; AVX-NEXT: movq %rsi, %rbx
+; AVX-NEXT: movq %rdi, %r14
+; AVX-NEXT: #APP
+; AVX-NEXT: nop
+; AVX-NEXT: #NO_APP
+; AVX-NEXT: vpabsb (%r14,%rbx,4), %xmm0
+; AVX-NEXT: popq %rbx
+; AVX-NEXT: popq %r14
+; AVX-NEXT: retq
entry:
+ tail call void asm sideeffect "nop", "~{eax},~{ecx},~{edx},~{esi},~{edi},~{r8},~{r9},~{r10},~{r11}"()
%add.ptr = getelementptr inbounds i32, ptr %b, i64 %c
%a = load <2 x i64>, ptr %add.ptr
%0 = bitcast <2 x i64> %a to <16 x i8>
diff --git a/llvm/test/CodeGen/X86/apx/no-rex2-pseudo-amx.ll b/llvm/test/CodeGen/X86/apx/no-rex2-pseudo-amx.ll
index 5fa4cb4..c193680 100644
--- a/llvm/test/CodeGen/X86/apx/no-rex2-pseudo-amx.ll
+++ b/llvm/test/CodeGen/X86/apx/no-rex2-pseudo-amx.ll
@@ -1,17 +1,20 @@
-; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown -stop-after=x86-isel -mattr=+amx-tile,+egpr | FileCheck %s
-; RUN: llc < %s -enable-new-pm -mtriple=x86_64-unknown -stop-after=x86-isel -mattr=+amx-tile,+egpr | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+amx-tile,+egpr | FileCheck %s
-define dso_local void @amx(ptr noundef %data) {
- ; CHECK-LABEL: name: amx
- ; CHECK: bb.0.entry:
- ; CHECK-NEXT: liveins: $rdi
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: [[COPY:%[0-9]+]]:gr64_norex2 = COPY $rdi
- ; CHECK-NEXT: [[MOV32ri64_:%[0-9]+]]:gr64_norex2_nosp = MOV32ri64 8
- ; CHECK-NEXT: PTILELOADD 4, [[COPY]], 1, killed [[MOV32ri64_]], 0, $noreg
- ; CHECK-NEXT: RET 0
- entry:
+define dso_local void @amx(ptr noundef %data) nounwind {
+; CHECK-LABEL: amx:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: pushq %rbx
+; CHECK-NEXT: movq %rdi, %rbx
+; CHECK-NEXT: #APP
+; CHECK-NEXT: nop
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: movl $8, %eax
+; CHECK-NEXT: tileloadd (%rbx,%rax), %tmm4
+; CHECK-NEXT: popq %rbx
+; CHECK-NEXT: retq
+entry:
+ tail call void asm sideeffect "nop", "~{eax},~{ecx},~{edx},~{esi},~{edi},~{r8},~{r9},~{r10},~{r11}"()
call void @llvm.x86.tileloadd64(i8 4, ptr %data, i64 8)
ret void
}
diff --git a/llvm/test/CodeGen/X86/apx/no-rex2-pseudo-x87.ll b/llvm/test/CodeGen/X86/apx/no-rex2-pseudo-x87.ll
index a9ca591..4692a58 100644
--- a/llvm/test/CodeGen/X86/apx/no-rex2-pseudo-x87.ll
+++ b/llvm/test/CodeGen/X86/apx/no-rex2-pseudo-x87.ll
@@ -1,17 +1,22 @@
-; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown -stop-after=x86-isel -mattr=-sse,+egpr | FileCheck %s
-; RUN: llc < %s -enable-new-pm -mtriple=x86_64-unknown -stop-after=x86-isel -mattr=-sse,+egpr | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=-sse,+egpr | FileCheck %s
-define void @x87(ptr %0, ptr %1) {
- ; CHECK-LABEL: name: x87
- ; CHECK: bb.0 (%ir-block.2):
- ; CHECK-NEXT: liveins: $rdi, $rsi
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: [[COPY:%[0-9]+]]:gr64_norex2 = COPY $rsi
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gr64_norex2 = COPY $rdi
- ; CHECK-NEXT: [[LD_Fp32m:%[0-9]+]]:rfp32 = nofpexcept LD_Fp32m [[COPY1]], 1, $noreg, 0, $noreg, implicit-def dead $fpsw, implicit $fpcw :: (load (s32) from %ir.0)
- ; CHECK-NEXT: nofpexcept ST_Fp32m [[COPY]], 1, $noreg, 0, $noreg, killed [[LD_Fp32m]], implicit-def dead $fpsw, implicit $fpcw :: (store (s32) into %ir.1)
- ; CHECK-NEXT: RET 0
+define void @x87(ptr %0, ptr %1) nounwind {
+; CHECK-LABEL: x87:
+; CHECK: # %bb.0:
+; CHECK-NEXT: pushq %r14
+; CHECK-NEXT: pushq %rbx
+; CHECK-NEXT: movq %rsi, %rbx
+; CHECK-NEXT: movq %rdi, %r14
+; CHECK-NEXT: #APP
+; CHECK-NEXT: nop
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: flds (%r14)
+; CHECK-NEXT: fstps (%rbx)
+; CHECK-NEXT: popq %rbx
+; CHECK-NEXT: popq %r14
+; CHECK-NEXT: retq
+ tail call void asm sideeffect "nop", "~{eax},~{ecx},~{edx},~{esi},~{edi},~{r8},~{r9},~{r10},~{r11}"()
%3 = load float, ptr %0
store float %3, ptr %1
ret void
diff --git a/llvm/test/CodeGen/X86/apx/no-rex2-special.ll b/llvm/test/CodeGen/X86/apx/no-rex2-special.ll
index 8653442..f2025b5 100644
--- a/llvm/test/CodeGen/X86/apx/no-rex2-special.ll
+++ b/llvm/test/CodeGen/X86/apx/no-rex2-special.ll
@@ -1,70 +1,81 @@
-; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown -stop-after=x86-isel -mattr=+xsave,+egpr | FileCheck %s
-; RUN: llc < %s -enable-new-pm -mtriple=x86_64-unknown -stop-after=x86-isel -mattr=+xsave,+egpr | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+xsave,+egpr | FileCheck %s
-define void @test_xsave(ptr %ptr, i32 %hi, i32 %lo) {
- ; CHECK-LABEL: name: test_xsave
- ; CHECK: bb.0 (%ir-block.0):
- ; CHECK-NEXT: liveins: $rdi, $esi, $edx
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: [[COPY:%[0-9]+]]:gr32 = COPY $edx
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
- ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gr64_norex2 = COPY $rdi
- ; CHECK-NEXT: $edx = COPY [[COPY1]]
- ; CHECK-NEXT: $eax = COPY [[COPY]]
- ; CHECK-NEXT: XSAVE [[COPY2]], 1, $noreg, 0, $noreg, implicit $edx, implicit $eax
- ; CHECK-NEXT: RET 0
+define void @test_xsave(ptr %ptr, i32 %hi, i32 %lo) nounwind {
+; CHECK-LABEL: test_xsave:
+; CHECK: # %bb.0:
+; CHECK-NEXT: pushq %rbx
+; CHECK-NEXT: movl %edx, %r16d
+; CHECK-NEXT: movl %esi, %edx
+; CHECK-NEXT: movq %rdi, %rbx
+; CHECK-NEXT: #APP
+; CHECK-NEXT: nop
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: movl %r16d, %eax
+; CHECK-NEXT: xsave (%rbx)
+; CHECK-NEXT: popq %rbx
+; CHECK-NEXT: retq
+ tail call void asm sideeffect "nop", "~{eax},~{ecx},~{esi},~{edi},~{r8},~{r9},~{r10},~{r11}"()
call void @llvm.x86.xsave(ptr %ptr, i32 %hi, i32 %lo)
ret void;
}
declare void @llvm.x86.xsave(ptr, i32, i32)
-define void @test_xsave64(ptr %ptr, i32 %hi, i32 %lo) {
- ; CHECK-LABEL: name: test_xsave64
- ; CHECK: bb.0 (%ir-block.0):
- ; CHECK-NEXT: liveins: $rdi, $esi, $edx
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: [[COPY:%[0-9]+]]:gr32 = COPY $edx
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
- ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gr64_norex2 = COPY $rdi
- ; CHECK-NEXT: $edx = COPY [[COPY1]]
- ; CHECK-NEXT: $eax = COPY [[COPY]]
- ; CHECK-NEXT: XSAVE64 [[COPY2]], 1, $noreg, 0, $noreg, implicit $edx, implicit $eax
- ; CHECK-NEXT: RET 0
+define void @test_xsave64(ptr %ptr, i32 %hi, i32 %lo) nounwind {
+; CHECK-LABEL: test_xsave64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: pushq %rbx
+; CHECK-NEXT: movl %edx, %r16d
+; CHECK-NEXT: movl %esi, %edx
+; CHECK-NEXT: movq %rdi, %rbx
+; CHECK-NEXT: #APP
+; CHECK-NEXT: nop
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: movl %r16d, %eax
+; CHECK-NEXT: xsave64 (%rbx)
+; CHECK-NEXT: popq %rbx
+; CHECK-NEXT: retq
+ tail call void asm sideeffect "nop", "~{eax},~{ecx},~{esi},~{edi},~{r8},~{r9},~{r10},~{r11}"()
call void @llvm.x86.xsave64(ptr %ptr, i32 %hi, i32 %lo)
ret void;
}
declare void @llvm.x86.xsave64(ptr, i32, i32)
-define void @test_xrstor(ptr %ptr, i32 %hi, i32 %lo) {
- ; CHECK-LABEL: name: test_xrstor
- ; CHECK: bb.0 (%ir-block.0):
- ; CHECK-NEXT: liveins: $rdi, $esi, $edx
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: [[COPY:%[0-9]+]]:gr32 = COPY $edx
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
- ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gr64_norex2 = COPY $rdi
- ; CHECK-NEXT: $edx = COPY [[COPY1]]
- ; CHECK-NEXT: $eax = COPY [[COPY]]
- ; CHECK-NEXT: XRSTOR [[COPY2]], 1, $noreg, 0, $noreg, implicit $edx, implicit $eax
- ; CHECK-NEXT: RET 0
+define void @test_xrstor(ptr %ptr, i32 %hi, i32 %lo) nounwind {
+; CHECK-LABEL: test_xrstor:
+; CHECK: # %bb.0:
+; CHECK-NEXT: pushq %rbx
+; CHECK-NEXT: movl %edx, %r16d
+; CHECK-NEXT: movl %esi, %edx
+; CHECK-NEXT: movq %rdi, %rbx
+; CHECK-NEXT: #APP
+; CHECK-NEXT: nop
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: movl %r16d, %eax
+; CHECK-NEXT: xrstor (%rbx)
+; CHECK-NEXT: popq %rbx
+; CHECK-NEXT: retq
+ tail call void asm sideeffect "nop", "~{eax},~{ecx},~{esi},~{edi},~{r8},~{r9},~{r10},~{r11}"()
call void @llvm.x86.xrstor(ptr %ptr, i32 %hi, i32 %lo)
ret void;
}
declare void @llvm.x86.xrstor(ptr, i32, i32)
-define void @test_xrstor64(ptr %ptr, i32 %hi, i32 %lo) {
- ; CHECK-LABEL: name: test_xrstor64
- ; CHECK: bb.0 (%ir-block.0):
- ; CHECK-NEXT: liveins: $rdi, $esi, $edx
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: [[COPY:%[0-9]+]]:gr32 = COPY $edx
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
- ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gr64_norex2 = COPY $rdi
- ; CHECK-NEXT: $edx = COPY [[COPY1]]
- ; CHECK-NEXT: $eax = COPY [[COPY]]
- ; CHECK-NEXT: XRSTOR64 [[COPY2]], 1, $noreg, 0, $noreg, implicit $edx, implicit $eax
- ; CHECK-NEXT: RET 0
+define void @test_xrstor64(ptr %ptr, i32 %hi, i32 %lo) nounwind {
+; CHECK-LABEL: test_xrstor64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: pushq %rbx
+; CHECK-NEXT: movl %edx, %r16d
+; CHECK-NEXT: movl %esi, %edx
+; CHECK-NEXT: movq %rdi, %rbx
+; CHECK-NEXT: #APP
+; CHECK-NEXT: nop
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: movl %r16d, %eax
+; CHECK-NEXT: xrstor64 (%rbx)
+; CHECK-NEXT: popq %rbx
+; CHECK-NEXT: retq
+ tail call void asm sideeffect "nop", "~{eax},~{ecx},~{esi},~{edi},~{r8},~{r9},~{r10},~{r11}"()
call void @llvm.x86.xrstor64(ptr %ptr, i32 %hi, i32 %lo)
ret void;
}
diff --git a/llvm/test/Other/new-pm-defaults.ll b/llvm/test/Other/new-pm-defaults.ll
index b59d4cf6..1f437a6 100644
--- a/llvm/test/Other/new-pm-defaults.ll
+++ b/llvm/test/Other/new-pm-defaults.ll
@@ -208,7 +208,6 @@
; CHECK-O-NEXT: Running analysis: DemandedBitsAnalysis
; CHECK-O-NEXT: Running pass: InstCombinePass
; CHECK-EP-PEEPHOLE-NEXT: Running pass: NoOpFunctionPass
-; CHECK-O23SZ-NEXT: Running pass: DFAJumpThreadingPass
; CHECK-O23SZ-NEXT: Running pass: JumpThreadingPass
; CHECK-O23SZ-NEXT: Running analysis: LazyValueAnalysis
; CHECK-O23SZ-NEXT: Running pass: CorrelatedValuePropagationPass
diff --git a/llvm/test/Other/new-pm-thinlto-postlink-defaults.ll b/llvm/test/Other/new-pm-thinlto-postlink-defaults.ll
index c1d8b42..2d8b8f1 100644
--- a/llvm/test/Other/new-pm-thinlto-postlink-defaults.ll
+++ b/llvm/test/Other/new-pm-thinlto-postlink-defaults.ll
@@ -133,7 +133,6 @@
; CHECK-O-NEXT: Running pass: BDCEPass
; CHECK-O-NEXT: Running analysis: DemandedBitsAnalysis
; CHECK-O-NEXT: Running pass: InstCombinePass
-; CHECK-O23SZ-NEXT: Running pass: DFAJumpThreadingPass
; CHECK-O23SZ-NEXT: Running pass: JumpThreadingPass
; CHECK-O23SZ-NEXT: Running analysis: LazyValueAnalysis
; CHECK-O23SZ-NEXT: Running pass: CorrelatedValuePropagationPass
diff --git a/llvm/test/Other/new-pm-thinlto-postlink-pgo-defaults.ll b/llvm/test/Other/new-pm-thinlto-postlink-pgo-defaults.ll
index 45f0902..7cacc17 100644
--- a/llvm/test/Other/new-pm-thinlto-postlink-pgo-defaults.ll
+++ b/llvm/test/Other/new-pm-thinlto-postlink-pgo-defaults.ll
@@ -118,7 +118,6 @@
; CHECK-O-NEXT: Running pass: BDCEPass
; CHECK-O-NEXT: Running analysis: DemandedBitsAnalysis
; CHECK-O-NEXT: Running pass: InstCombinePass
-; CHECK-O23SZ-NEXT: Running pass: DFAJumpThreadingPass
; CHECK-O23SZ-NEXT: Running pass: JumpThreadingPass
; CHECK-O23SZ-NEXT: Running analysis: LazyValueAnalysis
; CHECK-O23SZ-NEXT: Running pass: CorrelatedValuePropagationPass
diff --git a/llvm/test/Other/new-pm-thinlto-postlink-samplepgo-defaults.ll b/llvm/test/Other/new-pm-thinlto-postlink-samplepgo-defaults.ll
index 4c330f4..ef6cd83 100644
--- a/llvm/test/Other/new-pm-thinlto-postlink-samplepgo-defaults.ll
+++ b/llvm/test/Other/new-pm-thinlto-postlink-samplepgo-defaults.ll
@@ -127,7 +127,6 @@
; CHECK-O-NEXT: Running pass: BDCEPass
; CHECK-O-NEXT: Running analysis: DemandedBitsAnalysis
; CHECK-O-NEXT: Running pass: InstCombinePass
-; CHECK-O23SZ-NEXT: Running pass: DFAJumpThreadingPass
; CHECK-O23SZ-NEXT: Running pass: JumpThreadingPass
; CHECK-O23SZ-NEXT: Running analysis: LazyValueAnalysis
; CHECK-O23SZ-NEXT: Running pass: CorrelatedValuePropagationPass
diff --git a/llvm/test/Other/new-pm-thinlto-prelink-defaults.ll b/llvm/test/Other/new-pm-thinlto-prelink-defaults.ll
index b61edc8..dd6acd2 100644
--- a/llvm/test/Other/new-pm-thinlto-prelink-defaults.ll
+++ b/llvm/test/Other/new-pm-thinlto-prelink-defaults.ll
@@ -165,7 +165,6 @@
; CHECK-O-NEXT: Running pass: BDCEPass
; CHECK-O-NEXT: Running analysis: DemandedBitsAnalysis
; CHECK-O-NEXT: Running pass: InstCombinePass
-; CHECK-O23SZ-NEXT: Running pass: DFAJumpThreadingPass
; CHECK-O23SZ-NEXT: Running pass: JumpThreadingPass
; CHECK-O23SZ-NEXT: Running analysis: LazyValueAnalysis
; CHECK-O23SZ-NEXT: Running pass: CorrelatedValuePropagationPass
diff --git a/llvm/test/Other/new-pm-thinlto-prelink-pgo-defaults.ll b/llvm/test/Other/new-pm-thinlto-prelink-pgo-defaults.ll
index acf8c05..ee05452 100644
--- a/llvm/test/Other/new-pm-thinlto-prelink-pgo-defaults.ll
+++ b/llvm/test/Other/new-pm-thinlto-prelink-pgo-defaults.ll
@@ -167,7 +167,6 @@
; CHECK-O-NEXT: Running pass: BDCEPass
; CHECK-O-NEXT: Running analysis: DemandedBitsAnalysis
; CHECK-O-NEXT: Running pass: InstCombinePass
-; CHECK-O23SZ-NEXT: Running pass: DFAJumpThreadingPass
; CHECK-O23SZ-NEXT: Running pass: JumpThreadingPass
; CHECK-O23SZ-NEXT: Running analysis: LazyValueAnalysis
; CHECK-O23SZ-NEXT: Running pass: CorrelatedValuePropagationPass
diff --git a/llvm/test/Other/new-pm-thinlto-prelink-samplepgo-defaults.ll b/llvm/test/Other/new-pm-thinlto-prelink-samplepgo-defaults.ll
index 6b3c5ca..fd95e94 100644
--- a/llvm/test/Other/new-pm-thinlto-prelink-samplepgo-defaults.ll
+++ b/llvm/test/Other/new-pm-thinlto-prelink-samplepgo-defaults.ll
@@ -131,7 +131,6 @@
; CHECK-O-NEXT: Running pass: BDCEPass
; CHECK-O-NEXT: Running analysis: DemandedBitsAnalysis
; CHECK-O-NEXT: Running pass: InstCombinePass
-; CHECK-O23SZ-NEXT: Running pass: DFAJumpThreadingPass
; CHECK-O23SZ-NEXT: Running pass: JumpThreadingPass
; CHECK-O23SZ-NEXT: Running analysis: LazyValueAnalysis
; CHECK-O23SZ-NEXT: Running pass: CorrelatedValuePropagationPass
diff --git a/llvm/unittests/Analysis/AliasAnalysisTest.cpp b/llvm/unittests/Analysis/AliasAnalysisTest.cpp
index 06066b1..a28d318 100644
--- a/llvm/unittests/Analysis/AliasAnalysisTest.cpp
+++ b/llvm/unittests/Analysis/AliasAnalysisTest.cpp
@@ -232,18 +232,18 @@ TEST_F(AliasAnalysisTest, BatchAAPhiCycles) {
LLVMContext C;
SMDiagnostic Err;
std::unique_ptr<Module> M = parseAssemblyString(R"(
- define void @f(i8* noalias %a, i1 %c) {
+ define void @f(ptr noalias %a, i1 %c) {
entry:
br label %loop
loop:
- %phi = phi i8* [ null, %entry ], [ %a2, %loop ]
+ %phi = phi ptr [ null, %entry ], [ %a2, %loop ]
%offset1 = phi i64 [ 0, %entry ], [ %offset2, %loop]
%offset2 = add i64 %offset1, 1
- %a1 = getelementptr i8, i8* %a, i64 %offset1
- %a2 = getelementptr i8, i8* %a, i64 %offset2
- %s1 = select i1 %c, i8* %a1, i8* %phi
- %s2 = select i1 %c, i8* %a2, i8* %a1
+ %a1 = getelementptr i8, ptr %a, i64 %offset1
+ %a2 = getelementptr i8, ptr %a, i64 %offset2
+ %s1 = select i1 %c, ptr %a1, ptr %phi
+ %s2 = select i1 %c, ptr %a2, ptr %a1
br label %loop
}
)", Err, C);
@@ -280,15 +280,15 @@ TEST_F(AliasAnalysisTest, BatchAAPhiAssumption) {
LLVMContext C;
SMDiagnostic Err;
std::unique_ptr<Module> M = parseAssemblyString(R"(
- define void @f(i8* %a.base, i8* %b.base, i1 %c) {
+ define void @f(ptr %a.base, ptr %b.base, i1 %c) {
entry:
br label %loop
loop:
- %a = phi i8* [ %a.next, %loop ], [ %a.base, %entry ]
- %b = phi i8* [ %b.next, %loop ], [ %b.base, %entry ]
- %a.next = getelementptr i8, i8* %a, i64 1
- %b.next = getelementptr i8, i8* %b, i64 1
+ %a = phi ptr [ %a.next, %loop ], [ %a.base, %entry ]
+ %b = phi ptr [ %b.next, %loop ], [ %b.base, %entry ]
+ %a.next = getelementptr i8, ptr %a, i64 1
+ %b.next = getelementptr i8, ptr %b, i64 1
br label %loop
}
)", Err, C);
@@ -318,16 +318,16 @@ TEST_F(AliasAnalysisTest, PartialAliasOffset) {
LLVMContext C;
SMDiagnostic Err;
std::unique_ptr<Module> M = parseAssemblyString(R"(
- define void @foo(float* %arg, i32 %i) {
+ define void @foo(ptr %arg, i32 %i) {
bb:
%i2 = zext i32 %i to i64
- %i3 = getelementptr inbounds float, float* %arg, i64 %i2
- %i4 = bitcast float* %i3 to <2 x float>*
- %L1 = load <2 x float>, <2 x float>* %i4, align 16
+ %i3 = getelementptr inbounds float, ptr %arg, i64 %i2
+ %i4 = bitcast ptr %i3 to ptr
+ %L1 = load <2 x float>, ptr %i4, align 16
%i7 = add nuw nsw i32 %i, 1
%i8 = zext i32 %i7 to i64
- %i9 = getelementptr inbounds float, float* %arg, i64 %i8
- %L2 = load float, float* %i9, align 4
+ %i9 = getelementptr inbounds float, ptr %arg, i64 %i8
+ %L2 = load float, ptr %i9, align 4
ret void
}
)",
@@ -353,11 +353,11 @@ TEST_F(AliasAnalysisTest, PartialAliasOffsetSign) {
LLVMContext C;
SMDiagnostic Err;
std::unique_ptr<Module> M = parseAssemblyString(R"(
- define void @f(i64* %p) {
- %L1 = load i64, i64* %p
- %p.i8 = bitcast i64* %p to i8*
- %q = getelementptr i8, i8* %p.i8, i32 1
- %L2 = load i8, i8* %q
+ define void @f(ptr %p) {
+ %L1 = load i64, ptr %p
+ %p.i8 = bitcast ptr %p to ptr
+ %q = getelementptr i8, ptr %p.i8, i32 1
+ %L2 = load i8, ptr %q
ret void
}
)",
@@ -388,10 +388,10 @@ protected:
public:
AAPassInfraTest()
- : M(parseAssemblyString("define i32 @f(i32* %x, i32* %y) {\n"
+ : M(parseAssemblyString("define i32 @f(ptr %x, ptr %y) {\n"
"entry:\n"
- " %lx = load i32, i32* %x\n"
- " %ly = load i32, i32* %y\n"
+ " %lx = load i32, ptr %x\n"
+ " %ly = load i32, ptr %y\n"
" %sum = add i32 %lx, %ly\n"
" ret i32 %sum\n"
"}\n",
diff --git a/llvm/unittests/Analysis/AliasSetTrackerTest.cpp b/llvm/unittests/Analysis/AliasSetTrackerTest.cpp
index e784e6e..b5adc84 100644
--- a/llvm/unittests/Analysis/AliasSetTrackerTest.cpp
+++ b/llvm/unittests/Analysis/AliasSetTrackerTest.cpp
@@ -26,13 +26,13 @@ TEST(AliasSetTracker, AliasUnknownInst) {
; Function Attrs: nounwind ssp uwtable
define i32 @read_a() #0 {
- %1 = load i32, i32* @a, align 4, !tbaa !3
+ %1 = load i32, ptr @a, align 4, !tbaa !3
ret i32 %1
}
; Function Attrs: nounwind ssp uwtable
define void @write_b() #0 {
- store float 1.000000e+01, float* @b, align 4, !tbaa !7
+ store float 1.000000e+01, ptr @b, align 4, !tbaa !7
ret void
}
@@ -72,7 +72,7 @@ TEST(AliasSetTracker, AliasUnknownInst) {
AliasSetTracker AST(BAA);
for (auto &BB : *Test)
AST.add(BB);
- // There should be 2 disjoint alias sets. 1 from each call.
+ // There should be 2 disjoint alias sets. 1 from each call.
ASSERT_EQ((int)AST.getAliasSets().size(), 2);
// Directly test aliasesUnknownInst.
diff --git a/llvm/unittests/Analysis/AssumeBundleQueriesTest.cpp b/llvm/unittests/Analysis/AssumeBundleQueriesTest.cpp
index 5fd2ecc..921e2aa 100644
--- a/llvm/unittests/Analysis/AssumeBundleQueriesTest.cpp
+++ b/llvm/unittests/Analysis/AssumeBundleQueriesTest.cpp
@@ -74,18 +74,18 @@ TEST(AssumeQueryAPI, hasAttributeInAssume) {
EnableKnowledgeRetention.setValue(true);
StringRef Head =
"declare void @llvm.assume(i1)\n"
- "declare void @func(i32*, i32*, i32*)\n"
- "declare void @func1(i32*, i32*, i32*, i32*)\n"
- "declare void @func_many(i32*) \"no-jump-tables\" nounwind "
+ "declare void @func(ptr, ptr, ptr)\n"
+ "declare void @func1(ptr, ptr, ptr, ptr)\n"
+ "declare void @func_many(ptr) \"no-jump-tables\" nounwind "
"\"less-precise-fpmad\" willreturn norecurse\n"
- "define void @test(i32* %P, i32* %P1, i32* %P2, i32* %P3) {\n";
+ "define void @test(ptr %P, ptr %P1, ptr %P2, ptr %P3) {\n";
StringRef Tail = "ret void\n"
"}";
std::vector<std::pair<StringRef, llvm::function_ref<void(Instruction *)>>>
Tests;
Tests.push_back(std::make_pair(
- "call void @func(i32* nonnull align 4 dereferenceable(16) %P, i32* align "
- "8 noalias %P1, i32* align 8 noundef %P2)\n",
+ "call void @func(ptr nonnull align 4 dereferenceable(16) %P, ptr align "
+ "8 noalias %P1, ptr align 8 noundef %P2)\n",
[](Instruction *I) {
auto *Assume = buildAssumeFromInst(I);
Assume->insertBefore(I->getIterator());
@@ -103,11 +103,11 @@ TEST(AssumeQueryAPI, hasAttributeInAssume) {
Attribute::AttrKind::Alignment, 4));
}));
Tests.push_back(std::make_pair(
- "call void @func1(i32* nonnull align 32 dereferenceable(48) %P, i32* "
+ "call void @func1(ptr nonnull align 32 dereferenceable(48) %P, ptr "
"nonnull "
- "align 8 dereferenceable(28) %P, i32* nonnull align 64 "
+ "align 8 dereferenceable(28) %P, ptr nonnull align 64 "
"dereferenceable(4) "
- "%P, i32* nonnull align 16 dereferenceable(12) %P)\n",
+ "%P, ptr nonnull align 16 dereferenceable(12) %P)\n",
[](Instruction *I) {
auto *Assume = buildAssumeFromInst(I);
Assume->insertBefore(I->getIterator());
@@ -127,7 +127,7 @@ TEST(AssumeQueryAPI, hasAttributeInAssume) {
Attribute::AttrKind::Alignment, 64));
}));
Tests.push_back(std::make_pair(
- "call void @func_many(i32* align 8 noundef %P1) cold\n", [](Instruction *I) {
+ "call void @func_many(ptr align 8 noundef %P1) cold\n", [](Instruction *I) {
ShouldPreserveAllAttributes.setValue(true);
auto *Assume = buildAssumeFromInst(I);
Assume->insertBefore(I->getIterator());
@@ -142,11 +142,11 @@ TEST(AssumeQueryAPI, hasAttributeInAssume) {
ASSERT_TRUE(hasMatchesExactlyAttributes(Assume, nullptr, ""));
}));
Tests.push_back(std::make_pair(
- "call void @func1(i32* readnone align 32 "
- "dereferenceable(48) noalias %P, i32* "
- "align 8 dereferenceable(28) %P1, i32* align 64 "
+ "call void @func1(ptr readnone align 32 "
+ "dereferenceable(48) noalias %P, ptr "
+ "align 8 dereferenceable(28) %P1, ptr align 64 "
"dereferenceable(4) "
- "%P2, i32* nonnull align 16 dereferenceable(12) %P3)\n",
+ "%P2, ptr nonnull align 16 dereferenceable(12) %P3)\n",
[](Instruction *I) {
auto *Assume = buildAssumeFromInst(I);
Assume->insertBefore(I->getIterator());
@@ -178,11 +178,11 @@ TEST(AssumeQueryAPI, hasAttributeInAssume) {
}));
Tests.push_back(std::make_pair(
- "call void @func1(i32* readnone align 32 "
- "dereferenceable(48) noalias %P, i32* "
- "align 8 dereferenceable(28) %P1, i32* align 64 "
+ "call void @func1(ptr readnone align 32 "
+ "dereferenceable(48) noalias %P, ptr "
+ "align 8 dereferenceable(28) %P1, ptr align 64 "
"dereferenceable(4) "
- "%P2, i32* nonnull align 16 dereferenceable(12) %P3)\n",
+ "%P2, ptr nonnull align 16 dereferenceable(12) %P3)\n",
[](Instruction *I) {
auto *Assume = buildAssumeFromInst(I);
Assume->insertBefore(I->getIterator());
@@ -204,8 +204,8 @@ TEST(AssumeQueryAPI, hasAttributeInAssume) {
Attribute::AttrKind::Dereferenceable, 48));
}));
Tests.push_back(std::make_pair(
- "call void @func(i32* nonnull align 4 dereferenceable(16) %P, i32* align "
- "8 noalias %P1, i32* %P1)\n",
+ "call void @func(ptr nonnull align 4 dereferenceable(16) %P, ptr align "
+ "8 noalias %P1, ptr %P1)\n",
[](Instruction *I) {
auto *Assume = buildAssumeFromInst(I);
Assume->insertBefore(I->getIterator());
@@ -251,18 +251,18 @@ TEST(AssumeQueryAPI, fillMapFromAssume) {
EnableKnowledgeRetention.setValue(true);
StringRef Head =
"declare void @llvm.assume(i1)\n"
- "declare void @func(i32*, i32*, i32*)\n"
- "declare void @func1(i32*, i32*, i32*, i32*)\n"
- "declare void @func_many(i32*) \"no-jump-tables\" nounwind "
+ "declare void @func(ptr, ptr, ptr)\n"
+ "declare void @func1(ptr, ptr, ptr, ptr)\n"
+ "declare void @func_many(ptr) \"no-jump-tables\" nounwind "
"\"less-precise-fpmad\" willreturn norecurse\n"
- "define void @test(i32* %P, i32* %P1, i32* %P2, i32* %P3) {\n";
+ "define void @test(ptr %P, ptr %P1, ptr %P2, ptr %P3) {\n";
StringRef Tail = "ret void\n"
"}";
std::vector<std::pair<StringRef, llvm::function_ref<void(Instruction *)>>>
Tests;
Tests.push_back(std::make_pair(
- "call void @func(i32* nonnull align 4 dereferenceable(16) %P, i32* align "
- "8 noalias %P1, i32* align 8 dereferenceable(8) %P2)\n",
+ "call void @func(ptr nonnull align 4 dereferenceable(16) %P, ptr align "
+ "8 noalias %P1, ptr align 8 dereferenceable(8) %P2)\n",
[](Instruction *I) {
auto *Assume = buildAssumeFromInst(I);
Assume->insertBefore(I->getIterator());
@@ -283,11 +283,11 @@ TEST(AssumeQueryAPI, fillMapFromAssume) {
{4, 4}));
}));
Tests.push_back(std::make_pair(
- "call void @func1(i32* nonnull align 32 dereferenceable(48) %P, i32* "
+ "call void @func1(ptr nonnull align 32 dereferenceable(48) %P, ptr "
"nonnull "
- "align 8 dereferenceable(28) %P, i32* nonnull align 64 "
+ "align 8 dereferenceable(28) %P, ptr nonnull align 64 "
"dereferenceable(4) "
- "%P, i32* nonnull align 16 dereferenceable(12) %P)\n",
+ "%P, ptr nonnull align 16 dereferenceable(12) %P)\n",
[](Instruction *I) {
auto *Assume = buildAssumeFromInst(I);
Assume->insertBefore(I->getIterator());
@@ -310,7 +310,7 @@ TEST(AssumeQueryAPI, fillMapFromAssume) {
Map, Assume, {I->getOperand(0), Attribute::Alignment}, {64, 64}));
}));
Tests.push_back(std::make_pair(
- "call void @func_many(i32* align 8 %P1) cold\n", [](Instruction *I) {
+ "call void @func_many(ptr align 8 %P1) cold\n", [](Instruction *I) {
ShouldPreserveAllAttributes.setValue(true);
auto *Assume = buildAssumeFromInst(I);
Assume->insertBefore(I->getIterator());
@@ -331,11 +331,11 @@ TEST(AssumeQueryAPI, fillMapFromAssume) {
ASSERT_TRUE(Map.empty());
}));
Tests.push_back(std::make_pair(
- "call void @func1(i32* readnone align 32 "
- "dereferenceable(48) noalias %P, i32* "
- "align 8 dereferenceable(28) %P1, i32* align 64 "
+ "call void @func1(ptr readnone align 32 "
+ "dereferenceable(48) noalias %P, ptr "
+ "align 8 dereferenceable(28) %P1, ptr align 64 "
"dereferenceable(4) "
- "%P2, i32* nonnull align 16 dereferenceable(12) %P3)\n",
+ "%P2, ptr nonnull align 16 dereferenceable(12) %P3)\n",
[](Instruction *I) {
auto *Assume = buildAssumeFromInst(I);
Assume->insertBefore(I->getIterator());
@@ -371,8 +371,8 @@ TEST(AssumeQueryAPI, fillMapFromAssume) {
/// Keep this test last as it modifies the function.
Tests.push_back(std::make_pair(
- "call void @func(i32* nonnull align 4 dereferenceable(16) %P, i32* align "
- "8 noalias %P1, i32* %P2)\n",
+ "call void @func(ptr nonnull align 4 dereferenceable(16) %P, ptr align "
+ "8 noalias %P1, ptr %P2)\n",
[](Instruction *I) {
auto *Assume = buildAssumeFromInst(I);
Assume->insertBefore(I->getIterator());
@@ -507,11 +507,11 @@ TEST(AssumeQueryAPI, AssumptionCache) {
SMDiagnostic Err;
std::unique_ptr<Module> Mod = parseAssemblyString(
"declare void @llvm.assume(i1)\n"
- "define void @test(i32* %P, i32* %P1, i32* %P2, i32* %P3, i1 %B) {\n"
- "call void @llvm.assume(i1 true) [\"nonnull\"(i32* %P), \"align\"(i32* "
- "%P2, i32 4), \"align\"(i32* %P, i32 8)]\n"
- "call void @llvm.assume(i1 %B) [\"test\"(i32* %P1), "
- "\"dereferenceable\"(i32* %P, i32 4)]\n"
+ "define void @test(ptr %P, ptr %P1, ptr %P2, ptr %P3, i1 %B) {\n"
+ "call void @llvm.assume(i1 true) [\"nonnull\"(ptr %P), \"align\"(ptr "
+ "%P2, i32 4), \"align\"(ptr %P, i32 8)]\n"
+ "call void @llvm.assume(i1 %B) [\"test\"(ptr %P1), "
+ "\"dereferenceable\"(ptr %P, i32 4)]\n"
"ret void\n}\n",
Err, C);
if (!Mod)
@@ -569,11 +569,11 @@ TEST(AssumeQueryAPI, Alignment) {
SMDiagnostic Err;
std::unique_ptr<Module> Mod = parseAssemblyString(
"declare void @llvm.assume(i1)\n"
- "define void @test(i32* %P, i32* %P1, i32* %P2, i32 %I3, i1 %B) {\n"
- "call void @llvm.assume(i1 true) [\"align\"(i32* %P, i32 8, i32 %I3)]\n"
- "call void @llvm.assume(i1 true) [\"align\"(i32* %P1, i32 %I3, i32 "
+ "define void @test(ptr %P, ptr %P1, ptr %P2, i32 %I3, i1 %B) {\n"
+ "call void @llvm.assume(i1 true) [\"align\"(ptr %P, i32 8, i32 %I3)]\n"
+ "call void @llvm.assume(i1 true) [\"align\"(ptr %P1, i32 %I3, i32 "
"%I3)]\n"
- "call void @llvm.assume(i1 true) [\"align\"(i32* %P2, i32 16, i32 8)]\n"
+ "call void @llvm.assume(i1 true) [\"align\"(ptr %P2, i32 16, i32 8)]\n"
"ret void\n}\n",
Err, C);
if (!Mod)
diff --git a/llvm/unittests/Analysis/CGSCCPassManagerTest.cpp b/llvm/unittests/Analysis/CGSCCPassManagerTest.cpp
index 17240a1..bf5afe8 100644
--- a/llvm/unittests/Analysis/CGSCCPassManagerTest.cpp
+++ b/llvm/unittests/Analysis/CGSCCPassManagerTest.cpp
@@ -1936,26 +1936,26 @@ TEST_F(CGSCCPassManagerTest, TestDeletionOfFunctionInNonTrivialRefSCC) {
TEST_F(CGSCCPassManagerTest, TestInsertionOfNewNonTrivialCallEdge) {
std::unique_ptr<Module> M = parseIR("define void @f1() {\n"
"entry:\n"
- " %a = bitcast void ()* @f4 to i8*\n"
- " %b = bitcast void ()* @f2 to i8*\n"
+ " %a = bitcast ptr @f4 to ptr\n"
+ " %b = bitcast ptr @f2 to ptr\n"
" ret void\n"
"}\n"
"define void @f2() {\n"
"entry:\n"
- " %a = bitcast void ()* @f1 to i8*\n"
- " %b = bitcast void ()* @f3 to i8*\n"
+ " %a = bitcast ptr @f1 to ptr\n"
+ " %b = bitcast ptr @f3 to ptr\n"
" ret void\n"
"}\n"
"define void @f3() {\n"
"entry:\n"
- " %a = bitcast void ()* @f2 to i8*\n"
- " %b = bitcast void ()* @f4 to i8*\n"
+ " %a = bitcast ptr @f2 to ptr\n"
+ " %b = bitcast ptr @f4 to ptr\n"
" ret void\n"
"}\n"
"define void @f4() {\n"
"entry:\n"
- " %a = bitcast void ()* @f3 to i8*\n"
- " %b = bitcast void ()* @f1 to i8*\n"
+ " %a = bitcast ptr @f3 to ptr\n"
+ " %b = bitcast ptr @f1 to ptr\n"
" ret void\n"
"}\n");
diff --git a/llvm/unittests/Analysis/CaptureTrackingTest.cpp b/llvm/unittests/Analysis/CaptureTrackingTest.cpp
index ea3f21e..d7ee525 100644
--- a/llvm/unittests/Analysis/CaptureTrackingTest.cpp
+++ b/llvm/unittests/Analysis/CaptureTrackingTest.cpp
@@ -20,27 +20,27 @@ using namespace llvm;
TEST(CaptureTracking, MaxUsesToExplore) {
StringRef Assembly = R"(
; Function Attrs: nounwind ssp uwtable
- declare void @doesnt_capture(i8* nocapture, i8* nocapture, i8* nocapture,
- i8* nocapture, i8* nocapture)
+ declare void @doesnt_capture(ptr nocapture, ptr nocapture, ptr nocapture,
+ ptr nocapture, ptr nocapture)
; %arg has 5 uses
- define void @test_few_uses(i8* %arg) {
- call void @doesnt_capture(i8* %arg, i8* %arg, i8* %arg, i8* %arg, i8* %arg)
+ define void @test_few_uses(ptr %arg) {
+ call void @doesnt_capture(ptr %arg, ptr %arg, ptr %arg, ptr %arg, ptr %arg)
ret void
}
; %arg has 50 uses
- define void @test_many_uses(i8* %arg) {
- call void @doesnt_capture(i8* %arg, i8* %arg, i8* %arg, i8* %arg, i8* %arg)
- call void @doesnt_capture(i8* %arg, i8* %arg, i8* %arg, i8* %arg, i8* %arg)
- call void @doesnt_capture(i8* %arg, i8* %arg, i8* %arg, i8* %arg, i8* %arg)
- call void @doesnt_capture(i8* %arg, i8* %arg, i8* %arg, i8* %arg, i8* %arg)
- call void @doesnt_capture(i8* %arg, i8* %arg, i8* %arg, i8* %arg, i8* %arg)
- call void @doesnt_capture(i8* %arg, i8* %arg, i8* %arg, i8* %arg, i8* %arg)
- call void @doesnt_capture(i8* %arg, i8* %arg, i8* %arg, i8* %arg, i8* %arg)
- call void @doesnt_capture(i8* %arg, i8* %arg, i8* %arg, i8* %arg, i8* %arg)
- call void @doesnt_capture(i8* %arg, i8* %arg, i8* %arg, i8* %arg, i8* %arg)
- call void @doesnt_capture(i8* %arg, i8* %arg, i8* %arg, i8* %arg, i8* %arg)
+ define void @test_many_uses(ptr %arg) {
+ call void @doesnt_capture(ptr %arg, ptr %arg, ptr %arg, ptr %arg, ptr %arg)
+ call void @doesnt_capture(ptr %arg, ptr %arg, ptr %arg, ptr %arg, ptr %arg)
+ call void @doesnt_capture(ptr %arg, ptr %arg, ptr %arg, ptr %arg, ptr %arg)
+ call void @doesnt_capture(ptr %arg, ptr %arg, ptr %arg, ptr %arg, ptr %arg)
+ call void @doesnt_capture(ptr %arg, ptr %arg, ptr %arg, ptr %arg, ptr %arg)
+ call void @doesnt_capture(ptr %arg, ptr %arg, ptr %arg, ptr %arg, ptr %arg)
+ call void @doesnt_capture(ptr %arg, ptr %arg, ptr %arg, ptr %arg, ptr %arg)
+ call void @doesnt_capture(ptr %arg, ptr %arg, ptr %arg, ptr %arg, ptr %arg)
+ call void @doesnt_capture(ptr %arg, ptr %arg, ptr %arg, ptr %arg, ptr %arg)
+ call void @doesnt_capture(ptr %arg, ptr %arg, ptr %arg, ptr %arg, ptr %arg)
ret void
}
)";
@@ -85,12 +85,12 @@ struct CollectingCaptureTracker : public CaptureTracker {
TEST(CaptureTracking, MultipleUsesInSameInstruction) {
StringRef Assembly = R"(
- declare void @call(i8*, i8*, i8*)
+ declare void @call(ptr, ptr, ptr)
- define void @test(i8* %arg, i8** %ptr) {
- call void @call(i8* %arg, i8* nocapture %arg, i8* %arg) [ "bundle"(i8* %arg) ]
- cmpxchg i8** %ptr, i8* %arg, i8* %arg acq_rel monotonic
- icmp eq i8* %arg, %arg
+ define void @test(ptr %arg, ptr %ptr) {
+ call void @call(ptr %arg, ptr nocapture %arg, ptr %arg) [ "bundle"(ptr %arg) ]
+ cmpxchg ptr %ptr, ptr %arg, ptr %arg acq_rel monotonic
+ icmp eq ptr %arg, %arg
ret void
}
)";
diff --git a/llvm/unittests/Analysis/DDGTest.cpp b/llvm/unittests/Analysis/DDGTest.cpp
index 7fcdfdb..12944a3 100644
--- a/llvm/unittests/Analysis/DDGTest.cpp
+++ b/llvm/unittests/Analysis/DDGTest.cpp
@@ -51,7 +51,7 @@ TEST(DDGTest, getDependencies) {
"target datalayout = \"e-m:e-i64:64-n32:64\"\n"
"target triple = \"powerpc64le-unknown-linux-gnu\"\n"
"\n"
- "define dso_local void @foo(i32 signext %n, i32* noalias %A, i32* "
+ "define dso_local void @foo(i32 signext %n, ptr noalias %A, ptr "
"noalias %B) {\n"
"entry:\n"
" %cmp1 = icmp sgt i32 %n, 0\n"
@@ -64,16 +64,16 @@ TEST(DDGTest, getDependencies) {
" for.body:\n"
" %indvars.iv = phi i64 [ 0, %for.body.preheader ], [ "
"%indvars.iv.next, %for.body ]\n"
- " %arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv\n"
+ " %arrayidx = getelementptr inbounds i32, ptr %A, i64 %indvars.iv\n"
" %0 = trunc i64 %indvars.iv to i32\n"
- " store i32 %0, i32* %arrayidx, align 4\n"
+ " store i32 %0, ptr %arrayidx, align 4\n"
" %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1\n"
- " %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 "
+ " %arrayidx2 = getelementptr inbounds i32, ptr %A, i64 "
"%indvars.iv.next\n"
- " %1 = load i32, i32* %arrayidx2, align 4\n"
+ " %1 = load i32, ptr %arrayidx2, align 4\n"
" %add3 = add nsw i32 %1, 1\n"
- " %arrayidx5 = getelementptr inbounds i32, i32* %B, i64 %indvars.iv\n"
- " store i32 %add3, i32* %arrayidx5, align 4\n"
+ " %arrayidx5 = getelementptr inbounds i32, ptr %B, i64 %indvars.iv\n"
+ " store i32 %add3, ptr %arrayidx5, align 4\n"
" %exitcond = icmp ne i64 %indvars.iv.next, %wide.trip.count\n"
" br i1 %exitcond, label %for.body, label %for.end.loopexit\n"
"\n"
@@ -142,8 +142,8 @@ TEST(DDGTest, avoidDuplicateEdgesToFromPiBlocks) {
const char *ModuleStr =
"target datalayout = \"e-m:e-i64:64-n32:64-v256:256:256-v512:512:512\"\n"
"\n"
- "define void @foo(float* noalias %A, float* noalias %B, float* noalias "
- "%C, float* noalias %D, i32 signext %n) {\n"
+ "define void @foo(ptr noalias %A, ptr noalias %B, ptr noalias "
+ "%C, ptr noalias %D, i32 signext %n) {\n"
"entry:\n"
" %cmp1 = icmp sgt i32 %n, 0\n"
" br i1 %cmp1, label %for.body.preheader, label %for.end\n"
@@ -156,26 +156,26 @@ TEST(DDGTest, avoidDuplicateEdgesToFromPiBlocks) {
"%for.body.preheader, %if.end\n"
" %indvars.iv = phi i64 [ 0, %for.body.preheader ], [ %indvars.iv.next, "
"%if.end ]\n"
- " %arrayidx = getelementptr inbounds float, float* %A, i64 %indvars.iv\n"
- " %loadASubI = load float, float* %arrayidx, align 4\n"
- " %arrayidx2 = getelementptr inbounds float, float* %B, i64 "
+ " %arrayidx = getelementptr inbounds float, ptr %A, i64 %indvars.iv\n"
+ " %loadASubI = load float, ptr %arrayidx, align 4\n"
+ " %arrayidx2 = getelementptr inbounds float, ptr %B, i64 "
"%indvars.iv\n"
- " %loadBSubI = load float, float* %arrayidx2, align 4\n"
+ " %loadBSubI = load float, ptr %arrayidx2, align 4\n"
" %add = fadd fast float %loadASubI, %loadBSubI\n"
- " %arrayidx4 = getelementptr inbounds float, float* %A, i64 "
+ " %arrayidx4 = getelementptr inbounds float, ptr %A, i64 "
"%indvars.iv\n"
- " store float %add, float* %arrayidx4, align 4\n"
- " %arrayidx6 = getelementptr inbounds float, float* %A, i64 "
+ " store float %add, ptr %arrayidx4, align 4\n"
+ " %arrayidx6 = getelementptr inbounds float, ptr %A, i64 "
"%indvars.iv\n"
- " %0 = load float, float* %arrayidx6, align 4\n"
+ " %0 = load float, ptr %arrayidx6, align 4\n"
" %add7 = fadd fast float %0, 1.000000e+00\n"
" %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1\n"
- " %arrayidx10 = getelementptr inbounds float, float* %B, i64 "
+ " %arrayidx10 = getelementptr inbounds float, ptr %B, i64 "
"%indvars.iv.next\n"
- " store float %add7, float* %arrayidx10, align 4\n"
- " %arrayidx12 = getelementptr inbounds float, float* %A, i64 "
+ " store float %add7, ptr %arrayidx10, align 4\n"
+ " %arrayidx12 = getelementptr inbounds float, ptr %A, i64 "
"%indvars.iv\n"
- " %1 = load float, float* %arrayidx12, align 4\n"
+ " %1 = load float, ptr %arrayidx12, align 4\n"
" %cmp13 = fcmp fast ogt float %1, 1.000000e+02\n"
" br i1 %cmp13, label %if.then, label %if.else\n"
"\n"
@@ -188,7 +188,7 @@ TEST(DDGTest, avoidDuplicateEdgesToFromPiBlocks) {
"if.end: ; preds = %if.else, "
"%if.then\n"
" %ff.0 = phi float [ %add, %if.then ], [ %add7, %if.else ]\n"
- " store float %ff.0, float* %C, align 4\n"
+ " store float %ff.0, ptr %C, align 4\n"
" %exitcond = icmp ne i64 %indvars.iv.next, %wide.trip.count\n"
" br i1 %exitcond, label %for.body, label %for.end.loopexit\n"
"\n"
diff --git a/llvm/unittests/Analysis/FunctionPropertiesAnalysisTest.cpp b/llvm/unittests/Analysis/FunctionPropertiesAnalysisTest.cpp
index 497da8f..dc5d0a8 100644
--- a/llvm/unittests/Analysis/FunctionPropertiesAnalysisTest.cpp
+++ b/llvm/unittests/Analysis/FunctionPropertiesAnalysisTest.cpp
@@ -457,7 +457,7 @@ entry:
ret void
}
-define i32 @caller() personality i32 (...)* @__gxx_personality_v0 {
+define i32 @caller() personality ptr @__gxx_personality_v0 {
entry:
invoke void @callee()
to label %cont unwind label %exc
@@ -466,7 +466,7 @@ cont:
ret i32 0
exc:
- %exn = landingpad {i8*, i32}
+ %exn = landingpad {ptr, i32}
cleanup
ret i32 1
}
@@ -498,7 +498,7 @@ TEST_F(FunctionPropertiesAnalysisTest, InvokeUnreachableHandler) {
R"IR(
declare void @might_throw()
-define internal i32 @callee() personality i32 (...)* @__gxx_personality_v0 {
+define internal i32 @callee() personality ptr @__gxx_personality_v0 {
entry:
invoke void @might_throw()
to label %cont unwind label %exc
@@ -507,12 +507,12 @@ cont:
ret i32 0
exc:
- %exn = landingpad {i8*, i32}
+ %exn = landingpad {ptr, i32}
cleanup
- resume { i8*, i32 } %exn
+ resume { ptr, i32 } %exn
}
-define i32 @caller() personality i32 (...)* @__gxx_personality_v0 {
+define i32 @caller() personality ptr @__gxx_personality_v0 {
entry:
%X = invoke i32 @callee()
to label %cont unwind label %Handler
@@ -521,7 +521,7 @@ cont:
ret i32 %X
Handler:
- %exn = landingpad {i8*, i32}
+ %exn = landingpad {ptr, i32}
cleanup
ret i32 1
}
@@ -554,7 +554,7 @@ TEST_F(FunctionPropertiesAnalysisTest, Rethrow) {
R"IR(
declare void @might_throw()
-define internal i32 @callee() personality i32 (...)* @__gxx_personality_v0 {
+define internal i32 @callee() personality ptr @__gxx_personality_v0 {
entry:
invoke void @might_throw()
to label %cont unwind label %exc
@@ -563,12 +563,12 @@ cont:
ret i32 0
exc:
- %exn = landingpad {i8*, i32}
+ %exn = landingpad {ptr, i32}
cleanup
- resume { i8*, i32 } %exn
+ resume { ptr, i32 } %exn
}
-define i32 @caller() personality i32 (...)* @__gxx_personality_v0 {
+define i32 @caller() personality ptr @__gxx_personality_v0 {
entry:
%X = invoke i32 @callee()
to label %cont unwind label %Handler
@@ -577,7 +577,7 @@ cont:
ret i32 %X
Handler:
- %exn = landingpad {i8*, i32}
+ %exn = landingpad {ptr, i32}
cleanup
ret i32 1
}
@@ -612,18 +612,18 @@ declare void @external_func()
@exception_type2 = external global i8
-define internal void @inner() personality i8* null {
+define internal void @inner() personality ptr null {
invoke void @external_func()
to label %cont unwind label %lpad
cont:
ret void
lpad:
%lp = landingpad i32
- catch i8* @exception_type1
+ catch ptr @exception_type1
resume i32 %lp
}
-define void @outer() personality i8* null {
+define void @outer() personality ptr null {
invoke void @inner()
to label %cont unwind label %lpad
cont:
@@ -631,7 +631,7 @@ cont:
lpad:
%lp = landingpad i32
cleanup
- catch i8* @exception_type2
+ catch ptr @exception_type2
resume i32 %lp
}
@@ -666,18 +666,18 @@ declare void @external_func()
@exception_type2 = external global i8
-define internal void @inner() personality i8* null {
+define internal void @inner() personality ptr null {
invoke void @external_func()
to label %cont unwind label %lpad
cont:
ret void
lpad:
%lp = landingpad i32
- catch i8* @exception_type1
+ catch ptr @exception_type1
resume i32 %lp
}
-define void @outer(i32 %a) personality i8* null {
+define void @outer(i32 %a) personality ptr null {
entry:
%i = icmp slt i32 %a, 0
br i1 %i, label %if.then, label %cont
@@ -689,7 +689,7 @@ cont:
lpad:
%lp = landingpad i32
cleanup
- catch i8* @exception_type2
+ catch ptr @exception_type2
resume i32 %lp
}
@@ -931,9 +931,9 @@ TEST_F(FunctionPropertiesAnalysisTest, DetailedOperandCount) {
@a = global i64 1
define i64 @f1(i64 %e) {
- %b = load i64, i64* @a
+ %b = load i64, ptr @a
%c = add i64 %b, 2
- %d = call i64 asm "mov $1,$0", "=r,r" (i64 %c)
+ %d = call i64 asm "mov $1,$0", "=r,r" (i64 %c)
%f = add i64 %d, %e
ret i64 %f
}
diff --git a/llvm/unittests/Analysis/LazyCallGraphTest.cpp b/llvm/unittests/Analysis/LazyCallGraphTest.cpp
index 4a4ff32..5c0bfbd 100644
--- a/llvm/unittests/Analysis/LazyCallGraphTest.cpp
+++ b/llvm/unittests/Analysis/LazyCallGraphTest.cpp
@@ -142,78 +142,78 @@ static const char DiamondOfTriangles[] =
static const char DiamondOfTrianglesRefGraph[] =
"define void @a1() {\n"
"entry:\n"
- " %a = alloca void ()*\n"
- " store void ()* @a2, void ()** %a\n"
- " store void ()* @b2, void ()** %a\n"
- " store void ()* @c3, void ()** %a\n"
+ " %a = alloca ptr\n"
+ " store ptr @a2, ptr %a\n"
+ " store ptr @b2, ptr %a\n"
+ " store ptr @c3, ptr %a\n"
" ret void\n"
"}\n"
"define void @a2() {\n"
"entry:\n"
- " %a = alloca void ()*\n"
- " store void ()* @a3, void ()** %a\n"
+ " %a = alloca ptr\n"
+ " store ptr @a3, ptr %a\n"
" ret void\n"
"}\n"
"define void @a3() {\n"
"entry:\n"
- " %a = alloca void ()*\n"
- " store void ()* @a1, void ()** %a\n"
+ " %a = alloca ptr\n"
+ " store ptr @a1, ptr %a\n"
" ret void\n"
"}\n"
"define void @b1() {\n"
"entry:\n"
- " %a = alloca void ()*\n"
- " store void ()* @b2, void ()** %a\n"
- " store void ()* @d3, void ()** %a\n"
+ " %a = alloca ptr\n"
+ " store ptr @b2, ptr %a\n"
+ " store ptr @d3, ptr %a\n"
" ret void\n"
"}\n"
"define void @b2() {\n"
"entry:\n"
- " %a = alloca void ()*\n"
- " store void ()* @b3, void ()** %a\n"
+ " %a = alloca ptr\n"
+ " store ptr @b3, ptr %a\n"
" ret void\n"
"}\n"
"define void @b3() {\n"
"entry:\n"
- " %a = alloca void ()*\n"
- " store void ()* @b1, void ()** %a\n"
+ " %a = alloca ptr\n"
+ " store ptr @b1, ptr %a\n"
" ret void\n"
"}\n"
"define void @c1() {\n"
"entry:\n"
- " %a = alloca void ()*\n"
- " store void ()* @c2, void ()** %a\n"
- " store void ()* @d2, void ()** %a\n"
+ " %a = alloca ptr\n"
+ " store ptr @c2, ptr %a\n"
+ " store ptr @d2, ptr %a\n"
" ret void\n"
"}\n"
"define void @c2() {\n"
"entry:\n"
- " %a = alloca void ()*\n"
- " store void ()* @c3, void ()** %a\n"
+ " %a = alloca ptr\n"
+ " store ptr @c3, ptr %a\n"
" ret void\n"
"}\n"
"define void @c3() {\n"
"entry:\n"
- " %a = alloca void ()*\n"
- " store void ()* @c1, void ()** %a\n"
+ " %a = alloca ptr\n"
+ " store ptr @c1, ptr %a\n"
" ret void\n"
"}\n"
"define void @d1() {\n"
"entry:\n"
- " %a = alloca void ()*\n"
- " store void ()* @d2, void ()** %a\n"
+ " %a = alloca ptr\n"
+ " store ptr @d2, ptr %a\n"
" ret void\n"
"}\n"
"define void @d2() {\n"
"entry:\n"
- " %a = alloca void ()*\n"
- " store void ()* @d3, void ()** %a\n"
+ " %a = alloca ptr\n"
+ " store ptr @d3, ptr %a\n"
" ret void\n"
"}\n"
"define void @d3() {\n"
"entry:\n"
- " %a = alloca void ()*\n"
- " store void ()* @d1, void ()** %a\n"
+ " %a = alloca ptr\n"
+ " store ptr @d1, ptr %a\n"
" ret void\n"
"}\n";
@@ -1005,20 +1005,20 @@ TEST(LazyCallGraphTest, IncomingEdgeInsertionLargeRefCycle) {
std::unique_ptr<Module> M =
parseAssembly(Context, "define void @a() {\n"
"entry:\n"
- " %p = alloca void ()*\n"
- " store void ()* @b, void ()** %p\n"
+ " %p = alloca ptr\n"
+ " store ptr @b, ptr %p\n"
" ret void\n"
"}\n"
"define void @b() {\n"
"entry:\n"
- " %p = alloca void ()*\n"
- " store void ()* @c, void ()** %p\n"
+ " %p = alloca ptr\n"
+ " store ptr @c, ptr %p\n"
" ret void\n"
"}\n"
"define void @c() {\n"
"entry:\n"
- " %p = alloca void ()*\n"
- " store void ()* @d, void ()** %p\n"
+ " %p = alloca ptr\n"
+ " store ptr @d, ptr %p\n"
" ret void\n"
"}\n"
"define void @d() {\n"
@@ -1306,25 +1306,25 @@ TEST(LazyCallGraphTest, InternalEdgeRemoval) {
LLVMContext Context;
// A nice fully connected (including self-edges) RefSCC.
std::unique_ptr<Module> M = parseAssembly(
- Context, "define void @a(i8** %ptr) {\n"
+ Context, "define void @a(ptr %ptr) {\n"
"entry:\n"
- " store i8* bitcast (void(i8**)* @a to i8*), i8** %ptr\n"
- " store i8* bitcast (void(i8**)* @b to i8*), i8** %ptr\n"
- " store i8* bitcast (void(i8**)* @c to i8*), i8** %ptr\n"
+ " store ptr @a, ptr %ptr\n"
+ " store ptr @b, ptr %ptr\n"
+ " store ptr @c, ptr %ptr\n"
" ret void\n"
"}\n"
- "define void @b(i8** %ptr) {\n"
+ "define void @b(ptr %ptr) {\n"
"entry:\n"
- " store i8* bitcast (void(i8**)* @a to i8*), i8** %ptr\n"
- " store i8* bitcast (void(i8**)* @b to i8*), i8** %ptr\n"
- " store i8* bitcast (void(i8**)* @c to i8*), i8** %ptr\n"
+ " store ptr @a, ptr %ptr\n"
+ " store ptr @b, ptr %ptr\n"
+ " store ptr @c, ptr %ptr\n"
" ret void\n"
"}\n"
- "define void @c(i8** %ptr) {\n"
+ "define void @c(ptr %ptr) {\n"
"entry:\n"
- " store i8* bitcast (void(i8**)* @a to i8*), i8** %ptr\n"
- " store i8* bitcast (void(i8**)* @b to i8*), i8** %ptr\n"
- " store i8* bitcast (void(i8**)* @c to i8*), i8** %ptr\n"
+ " store ptr @a, ptr %ptr\n"
+ " store ptr @b, ptr %ptr\n"
+ " store ptr @c, ptr %ptr\n"
" ret void\n"
"}\n");
LazyCallGraph CG = buildCG(*M);
@@ -1384,25 +1384,25 @@ TEST(LazyCallGraphTest, InternalMultiEdgeRemoval) {
LLVMContext Context;
// A nice fully connected (including self-edges) RefSCC.
std::unique_ptr<Module> M = parseAssembly(
- Context, "define void @a(i8** %ptr) {\n"
+ Context, "define void @a(ptr %ptr) {\n"
"entry:\n"
- " store i8* bitcast (void(i8**)* @a to i8*), i8** %ptr\n"
- " store i8* bitcast (void(i8**)* @b to i8*), i8** %ptr\n"
- " store i8* bitcast (void(i8**)* @c to i8*), i8** %ptr\n"
+ " store ptr @a, ptr %ptr\n"
+ " store ptr @b, ptr %ptr\n"
+ " store ptr @c, ptr %ptr\n"
" ret void\n"
"}\n"
- "define void @b(i8** %ptr) {\n"
+ "define void @b(ptr %ptr) {\n"
"entry:\n"
- " store i8* bitcast (void(i8**)* @a to i8*), i8** %ptr\n"
- " store i8* bitcast (void(i8**)* @b to i8*), i8** %ptr\n"
- " store i8* bitcast (void(i8**)* @c to i8*), i8** %ptr\n"
+ " store ptr @a, ptr %ptr\n"
+ " store ptr @b, ptr %ptr\n"
+ " store ptr @c, ptr %ptr\n"
" ret void\n"
"}\n"
- "define void @c(i8** %ptr) {\n"
+ "define void @c(ptr %ptr) {\n"
"entry:\n"
- " store i8* bitcast (void(i8**)* @a to i8*), i8** %ptr\n"
- " store i8* bitcast (void(i8**)* @b to i8*), i8** %ptr\n"
- " store i8* bitcast (void(i8**)* @c to i8*), i8** %ptr\n"
+ " store ptr @a, ptr %ptr\n"
+ " store ptr @b, ptr %ptr\n"
+ " store ptr @c, ptr %ptr\n"
" ret void\n"
"}\n");
LazyCallGraph CG = buildCG(*M);
@@ -1454,22 +1454,22 @@ TEST(LazyCallGraphTest, InternalNoOpEdgeRemoval) {
// Reference edges: a -> b -> c -> a
// Call edges: a -> c -> b -> a
std::unique_ptr<Module> M = parseAssembly(
- Context, "define void @a(i8** %ptr) {\n"
+ Context, "define void @a(ptr %ptr) {\n"
"entry:\n"
- " call void @b(i8** %ptr)\n"
- " store i8* bitcast (void(i8**)* @c to i8*), i8** %ptr\n"
+ " call void @b(ptr %ptr)\n"
+ " store ptr @c, ptr %ptr\n"
" ret void\n"
"}\n"
- "define void @b(i8** %ptr) {\n"
+ "define void @b(ptr %ptr) {\n"
"entry:\n"
- " store i8* bitcast (void(i8**)* @a to i8*), i8** %ptr\n"
- " call void @c(i8** %ptr)\n"
+ " store ptr @a, ptr %ptr\n"
+ " call void @c(ptr %ptr)\n"
" ret void\n"
"}\n"
- "define void @c(i8** %ptr) {\n"
+ "define void @c(ptr %ptr) {\n"
"entry:\n"
- " call void @a(i8** %ptr)\n"
- " store i8* bitcast (void(i8**)* @b to i8*), i8** %ptr\n"
+ " call void @a(ptr %ptr)\n"
+ " store ptr @b, ptr %ptr\n"
" ret void\n"
"}\n");
LazyCallGraph CG = buildCG(*M);
@@ -1622,24 +1622,24 @@ TEST(LazyCallGraphTest, InternalRefEdgeToCall) {
"entry:\n"
" call void @b()\n"
" call void @c()\n"
- " store void()* @d, void()** undef\n"
+ " store ptr @d, ptr undef\n"
" ret void\n"
"}\n"
"define void @b() {\n"
"entry:\n"
- " store void()* @c, void()** undef\n"
+ " store ptr @c, ptr undef\n"
" call void @d()\n"
" ret void\n"
"}\n"
"define void @c() {\n"
"entry:\n"
- " store void()* @b, void()** undef\n"
+ " store ptr @b, ptr undef\n"
" call void @d()\n"
" ret void\n"
"}\n"
"define void @d() {\n"
"entry:\n"
- " store void()* @a, void()** undef\n"
+ " store ptr @a, ptr undef\n"
" ret void\n"
"}\n");
LazyCallGraph CG = buildCG(*M);
@@ -1745,13 +1745,13 @@ TEST(LazyCallGraphTest, InternalRefEdgeToCallNoCycleInterleaved) {
"}\n"
"define void @c3() {\n"
"entry:\n"
- " store void()* @b1, void()** undef\n"
+ " store ptr @b1, ptr undef\n"
" call void @d()\n"
" ret void\n"
"}\n"
"define void @d() {\n"
"entry:\n"
- " store void()* @a, void()** undef\n"
+ " store ptr @a, ptr undef\n"
" ret void\n"
"}\n");
LazyCallGraph CG = buildCG(*M);
@@ -1875,13 +1875,13 @@ TEST(LazyCallGraphTest, InternalRefEdgeToCallBothPartitionAndMerge) {
"}\n"
"define void @f() {\n"
"entry:\n"
- " store void()* @b, void()** undef\n"
+ " store ptr @b, ptr undef\n"
" call void @g()\n"
" ret void\n"
"}\n"
"define void @g() {\n"
"entry:\n"
- " store void()* @a, void()** undef\n"
+ " store ptr @a, ptr undef\n"
" ret void\n"
"}\n");
LazyCallGraph CG = buildCG(*M);
@@ -1962,9 +1962,9 @@ TEST(LazyCallGraphTest, HandleBlockAddress) {
"bb:\n"
" unreachable\n"
"}\n"
- "define void @g(i8** %ptr) {\n"
+ "define void @g(ptr %ptr) {\n"
"entry:\n"
- " store i8* blockaddress(@f, %bb), i8** %ptr\n"
+ " store ptr blockaddress(@f, %bb), ptr %ptr\n"
" ret void\n"
"}\n");
LazyCallGraph CG = buildCG(*M);
@@ -1991,9 +1991,9 @@ TEST(LazyCallGraphTest, HandleBlockAddress2) {
parseAssembly(Context, "define void @f() {\n"
" ret void\n"
"}\n"
- "define void @g(i8** %ptr) {\n"
+ "define void @g(ptr %ptr) {\n"
"bb:\n"
- " store i8* blockaddress(@g, %bb), i8** %ptr\n"
+ " store ptr blockaddress(@g, %bb), ptr %ptr\n"
" ret void\n"
"}\n");
LazyCallGraph CG = buildCG(*M);
@@ -2018,31 +2018,31 @@ TEST(LazyCallGraphTest, ReplaceNodeFunction) {
// function.
std::unique_ptr<Module> M =
parseAssembly(Context,
- "define void @a(i8** %ptr) {\n"
+ "define void @a(ptr %ptr) {\n"
"entry:\n"
- " store i8* bitcast (void(i8**)* @d to i8*), i8** %ptr\n"
+ " store ptr @d, ptr %ptr\n"
" ret void\n"
"}\n"
- "define void @b(i8** %ptr) {\n"
+ "define void @b(ptr %ptr) {\n"
"entry:\n"
- " store i8* bitcast (void(i8**)* @d to i8*), i8** %ptr\n"
- " store i8* bitcast (void(i8**)* @d to i8*), i8** %ptr\n"
- " call void @d(i8** %ptr)"
+ " store ptr @d, ptr %ptr\n"
+ " store ptr @d, ptr %ptr\n"
+ " call void @d(ptr %ptr)"
" ret void\n"
"}\n"
- "define void @c(i8** %ptr) {\n"
+ "define void @c(ptr %ptr) {\n"
"entry:\n"
- " call void @d(i8** %ptr)"
- " call void @d(i8** %ptr)"
- " store i8* bitcast (void(i8**)* @d to i8*), i8** %ptr\n"
+ " call void @d(ptr %ptr)"
+ " call void @d(ptr %ptr)"
+ " store ptr @d, ptr %ptr\n"
" ret void\n"
"}\n"
- "define void @d(i8** %ptr) {\n"
+ "define void @d(ptr %ptr) {\n"
"entry:\n"
- " store i8* bitcast (void(i8**)* @b to i8*), i8** %ptr\n"
- " call void @c(i8** %ptr)"
- " call void @d(i8** %ptr)"
- " store i8* bitcast (void(i8**)* @d to i8*), i8** %ptr\n"
+ " store ptr @b, ptr %ptr\n"
+ " call void @c(ptr %ptr)"
+ " call void @d(ptr %ptr)"
+ " store ptr @d, ptr %ptr\n"
" ret void\n"
"}\n");
LazyCallGraph CG = buildCG(*M);
@@ -2098,25 +2098,25 @@ TEST(LazyCallGraphTest, RemoveFunctionWithSpuriousRef) {
// A graph with a couple of RefSCCs.
std::unique_ptr<Module> M =
parseAssembly(Context,
- "define void @a(i8** %ptr) {\n"
+ "define void @a(ptr %ptr) {\n"
"entry:\n"
- " store i8* bitcast (void(i8**)* @d to i8*), i8** %ptr\n"
+ " store ptr @d, ptr %ptr\n"
" ret void\n"
"}\n"
- "define void @b(i8** %ptr) {\n"
+ "define void @b(ptr %ptr) {\n"
"entry:\n"
- " store i8* bitcast (void(i8**)* @c to i8*), i8** %ptr\n"
+ " store ptr @c, ptr %ptr\n"
" ret void\n"
"}\n"
- "define void @c(i8** %ptr) {\n"
+ "define void @c(ptr %ptr) {\n"
"entry:\n"
- " call void @d(i8** %ptr)"
+ " call void @d(ptr %ptr)"
" ret void\n"
"}\n"
- "define void @d(i8** %ptr) {\n"
+ "define void @d(ptr %ptr) {\n"
"entry:\n"
- " call void @c(i8** %ptr)"
- " store i8* bitcast (void(i8**)* @b to i8*), i8** %ptr\n"
+ " call void @c(ptr %ptr)"
+ " store ptr @b, ptr %ptr\n"
" ret void\n"
"}\n"
"define void @dead() {\n"
@@ -2965,7 +2965,7 @@ TEST(LazyCallGraphTest, AddSplitFunctions5) {
LLVMContext Context;
std::unique_ptr<Module> M =
parseAssembly(Context, "define void @f() {\n"
- " %1 = bitcast void ()* @f2 to i8*\n"
+ " %1 = bitcast ptr @f2 to ptr\n"
" ret void\n"
"}\n"
"define void @f2() {\n"
diff --git a/llvm/unittests/Analysis/SparsePropagation.cpp b/llvm/unittests/Analysis/SparsePropagation.cpp
index ca73a48..0cbf5de 100644
--- a/llvm/unittests/Analysis/SparsePropagation.cpp
+++ b/llvm/unittests/Analysis/SparsePropagation.cpp
@@ -357,9 +357,9 @@ TEST_F(SparsePropagationTest, GlobalVariableOverDefined) {
/// Test that we propagate information through function returns.
///
-/// define internal i64 @f(i1* %cond) {
+/// define internal i64 @f(ptr %cond) {
/// if:
-/// %0 = load i1, i1* %cond
+/// %0 = load i1, ptr %cond
/// br i1 %0, label %then, label %else
///
/// then:
@@ -397,9 +397,9 @@ TEST_F(SparsePropagationTest, FunctionDefined) {
/// Test that we propagate information through function returns.
///
-/// define internal i64 @f(i1* %cond) {
+/// define internal i64 @f(ptr %cond) {
/// if:
-/// %0 = load i1, i1* %cond
+/// %0 = load i1, ptr %cond
/// br i1 %0, label %then, label %else
///
/// then:
diff --git a/llvm/unittests/Analysis/UnrollAnalyzerTest.cpp b/llvm/unittests/Analysis/UnrollAnalyzerTest.cpp
index d5ba175..3c7ee7ad 100644
--- a/llvm/unittests/Analysis/UnrollAnalyzerTest.cpp
+++ b/llvm/unittests/Analysis/UnrollAnalyzerTest.cpp
@@ -214,18 +214,18 @@ TEST(UnrollAnalyzerTest, PtrCmpSimplifications) {
"target datalayout = \"e-m:o-i64:64-f80:128-n8:16:32:64-S128\"\n"
"define void @ptr_cmp(i8 *%a) {\n"
"entry:\n"
- " %limit = getelementptr i8, i8* %a, i64 40\n"
- " %start.iv2 = getelementptr i8, i8* %a, i64 7\n"
+ " %limit = getelementptr i8, ptr %a, i64 40\n"
+ " %start.iv2 = getelementptr i8, ptr %a, i64 7\n"
" br label %loop.body\n"
"loop.body:\n"
- " %iv.0 = phi i8* [ %a, %entry ], [ %iv.1, %loop.body ]\n"
- " %iv2.0 = phi i8* [ %start.iv2, %entry ], [ %iv2.1, %loop.body ]\n"
- " %cmp = icmp eq i8* %iv2.0, %iv.0\n"
- " %cmp2 = icmp slt i8* %iv2.0, %iv.0\n"
- " %cmp3 = icmp ult i8* %iv2.0, %iv.0\n"
- " %iv.1 = getelementptr inbounds i8, i8* %iv.0, i64 1\n"
- " %iv2.1 = getelementptr inbounds i8, i8* %iv2.0, i64 1\n"
- " %exitcond = icmp ne i8* %iv.1, %limit\n"
+ " %iv.0 = phi ptr [ %a, %entry ], [ %iv.1, %loop.body ]\n"
+ " %iv2.0 = phi ptr [ %start.iv2, %entry ], [ %iv2.1, %loop.body ]\n"
+ " %cmp = icmp eq ptr %iv2.0, %iv.0\n"
+ " %cmp2 = icmp slt ptr %iv2.0, %iv.0\n"
+ " %cmp3 = icmp ult ptr %iv2.0, %iv.0\n"
+ " %iv.1 = getelementptr inbounds i8, ptr %iv.0, i64 1\n"
+ " %iv2.1 = getelementptr inbounds i8, ptr %iv2.0, i64 1\n"
+ " %exitcond = icmp ne ptr %iv.1, %limit\n"
" br i1 %exitcond, label %loop.body, label %loop.exit\n"
"loop.exit:\n"
" ret void\n"
@@ -248,14 +248,14 @@ TEST(UnrollAnalyzerTest, PtrCmpSimplifications) {
Instruction *Cmp2 = &*BBI++;
Instruction *Cmp3 = &*BBI++;
// Check simplification expected on the 5th iteration.
- // Check that "%cmp = icmp eq i8* %iv2.0, %iv.0" is simplified to 0.
+ // Check that "%cmp = icmp eq ptr %iv2.0, %iv.0" is simplified to 0.
auto I1 = SimplifiedValuesVector[5].find(Cmp1);
EXPECT_TRUE(I1 != SimplifiedValuesVector[5].end());
EXPECT_EQ(cast<ConstantInt>((*I1).second)->getZExtValue(), 0U);
- // Check that "%cmp2 = icmp slt i8* %iv2.0, %iv.0" does not simplify
+ // Check that "%cmp2 = icmp slt ptr %iv2.0, %iv.0" does not simplify
auto I2 = SimplifiedValuesVector[5].find(Cmp2);
EXPECT_TRUE(I2 == SimplifiedValuesVector[5].end());
- // Check that "%cmp3 = icmp ult i8* %iv2.0, %iv.0" is simplified to 0.
+ // Check that "%cmp3 = icmp ult ptr %iv2.0, %iv.0" is simplified to 0.
auto I3 = SimplifiedValuesVector[5].find(Cmp3);
EXPECT_TRUE(I3 != SimplifiedValuesVector[5].end());
EXPECT_EQ(cast<ConstantInt>((*I1).second)->getZExtValue(), 0U);
@@ -271,8 +271,8 @@ TEST(UnrollAnalyzerTest, CastSimplifications) {
"\n"
"loop:\n"
" %iv = phi i64 [ 0, %entry ], [ %inc, %loop ]\n"
- " %array_const_idx = getelementptr inbounds [10 x i32], [10 x i32]* @known_constant, i64 0, i64 %iv\n"
- " %const_array_element = load i32, i32* %array_const_idx, align 4\n"
+ " %array_const_idx = getelementptr inbounds [10 x i32], ptr @known_constant, i64 0, i64 %iv\n"
+ " %const_array_element = load i32, ptr %array_const_idx, align 4\n"
" %se = sext i32 %const_array_element to i64\n"
" %ze = zext i32 %const_array_element to i64\n"
" %tr = trunc i32 %const_array_element to i8\n"
diff --git a/llvm/unittests/Analysis/ValueTrackingTest.cpp b/llvm/unittests/Analysis/ValueTrackingTest.cpp
index bb0280ee..89a0fae 100644
--- a/llvm/unittests/Analysis/ValueTrackingTest.cpp
+++ b/llvm/unittests/Analysis/ValueTrackingTest.cpp
@@ -2697,9 +2697,9 @@ TEST_F(ComputeKnownBitsTest, ComputeKnownBitsGEPWithRange) {
parseAssembly(
"define void @test(ptr %p) {\n"
" %A = load i64, ptr %p, !range !{i64 64, i64 65536}\n"
- " %APtr = inttoptr i64 %A to float*"
- " %APtrPlus512 = getelementptr float, float* %APtr, i32 128\n"
- " %c = icmp ugt float* %APtrPlus512, inttoptr (i32 523 to float*)\n"
+ " %APtr = inttoptr i64 %A to ptr"
+ " %APtrPlus512 = getelementptr float, ptr %APtr, i32 128\n"
+ " %c = icmp ugt ptr %APtrPlus512, inttoptr (i32 523 to ptr)\n"
" call void @llvm.assume(i1 %c)\n"
" ret void\n"
"}\n"
@@ -2730,9 +2730,9 @@ TEST_F(ComputeKnownBitsTest, ComputeKnownBitsGEPWithRangeNoOverlap) {
parseAssembly(
"define void @test(ptr %p) {\n"
" %A = load i64, ptr %p, !range !{i64 32, i64 64}\n"
- " %APtr = inttoptr i64 %A to float*"
- " %APtrPlus512 = getelementptr float, float* %APtr, i32 128\n"
- " %c = icmp ugt float* %APtrPlus512, inttoptr (i32 523 to float*)\n"
+ " %APtr = inttoptr i64 %A to ptr"
+ " %APtrPlus512 = getelementptr float, ptr %APtr, i32 128\n"
+ " %c = icmp ugt ptr %APtrPlus512, inttoptr (i32 523 to ptr)\n"
" call void @llvm.assume(i1 %c)\n"
" ret void\n"
"}\n"
diff --git a/llvm/unittests/MIR/MachineMetadata.cpp b/llvm/unittests/MIR/MachineMetadata.cpp
index 5875512..8c36377 100644
--- a/llvm/unittests/MIR/MachineMetadata.cpp
+++ b/llvm/unittests/MIR/MachineMetadata.cpp
@@ -205,8 +205,8 @@ TEST_F(MachineMetadataTest, MMSlotTrackerAArch64) {
StringRef MIRString = R"MIR(
--- |
- define i32 @test0(i32* %p) {
- %r = load i32, i32* %p, align 4
+ define i32 @test0(ptr %p) {
+ %r = load i32, ptr %p, align 4
ret i32 %r
}
...
@@ -354,8 +354,8 @@ TEST_F(MachineMetadataTest, MMSlotTrackerX64) {
StringRef MIRString = R"MIR(
--- |
- define i32 @test0(i32* %p) {
- %r = load i32, i32* %p, align 4
+ define i32 @test0(ptr %p) {
+ %r = load i32, ptr %p, align 4
ret i32 %r
}
...
@@ -446,8 +446,8 @@ TEST_F(MachineMetadataTest, MMSlotTrackerAMDGPU) {
StringRef MIRString = R"MIR(
--- |
- define i32 @test0(i32* %p) {
- %r = load i32, i32* %p, align 4
+ define i32 @test0(ptr %p) {
+ %r = load i32, ptr %p, align 4
ret i32 %r
}
...
diff --git a/llvm/unittests/Target/WebAssembly/WebAssemblyExceptionInfoTest.cpp b/llvm/unittests/Target/WebAssembly/WebAssemblyExceptionInfoTest.cpp
index d7f2908..c36ed93 100644
--- a/llvm/unittests/Target/WebAssembly/WebAssemblyExceptionInfoTest.cpp
+++ b/llvm/unittests/Target/WebAssembly/WebAssemblyExceptionInfoTest.cpp
@@ -75,7 +75,7 @@ TEST(WebAssemblyExceptionInfoTest, TEST0) {
declare i32 @__gxx_wasm_personality_v0(...)
- define void @test0() personality i8* bitcast (i32 (...)* @__gxx_wasm_personality_v0 to i8*) {
+ define void @test0() personality ptr @__gxx_wasm_personality_v0 {
unreachable
}
@@ -237,7 +237,7 @@ TEST(WebAssemblyExceptionInfoTest, TEST1) {
declare i32 @__gxx_wasm_personality_v0(...)
- define void @test1() personality i8* bitcast (i32 (...)* @__gxx_wasm_personality_v0 to i8*) {
+ define void @test1() personality ptr @__gxx_wasm_personality_v0 {
unreachable
}
diff --git a/llvm/unittests/Transforms/IPO/AttributorTest.cpp b/llvm/unittests/Transforms/IPO/AttributorTest.cpp
index e442dae..e345c60 100644
--- a/llvm/unittests/Transforms/IPO/AttributorTest.cpp
+++ b/llvm/unittests/Transforms/IPO/AttributorTest.cpp
@@ -78,17 +78,17 @@ TEST_F(AttributorTestBase, AAReachabilityTest) {
const char *ModuleString = R"(
@x = external global i32
define void @func4() {
- store i32 0, i32* @x
+ store i32 0, ptr @x
ret void
}
define internal void @func3() {
- store i32 0, i32* @x
+ store i32 0, ptr @x
ret void
}
define internal void @func8() {
- store i32 0, i32* @x
+ store i32 0, ptr @x
ret void
}
@@ -105,7 +105,7 @@ TEST_F(AttributorTestBase, AAReachabilityTest) {
}
declare void @unknown()
- define internal void @func5(void ()* %ptr) {
+ define internal void @func5(ptr %ptr) {
entry:
call void %ptr()
call void @unknown()
@@ -114,8 +114,8 @@ TEST_F(AttributorTestBase, AAReachabilityTest) {
define void @func6() {
entry:
- store i32 0, i32* @x
- call void @func5(void ()* @func3)
+ store i32 0, ptr @x
+ call void @func5(ptr @func3)
ret void
}
diff --git a/llvm/unittests/Transforms/Scalar/LICMTest.cpp b/llvm/unittests/Transforms/Scalar/LICMTest.cpp
index 98a69bb..a193993b 100644
--- a/llvm/unittests/Transforms/Scalar/LICMTest.cpp
+++ b/llvm/unittests/Transforms/Scalar/LICMTest.cpp
@@ -37,13 +37,13 @@ TEST(LICMTest, TestSCEVInvalidationOnHoisting) {
SMDiagnostic Error;
StringRef Text = R"(
- define void @foo(i64* %ptr) {
+ define void @foo(ptr %ptr) {
entry:
br label %loop
loop:
%iv = phi i64 [ 0, %entry ], [ %iv.inc, %loop ]
- %n = load i64, i64* %ptr, !invariant.load !0
+ %n = load i64, ptr %ptr, !invariant.load !0
%iv.inc = add i64 %iv, 1
%cmp = icmp ult i64 %iv.inc, %n
br i1 %cmp, label %loop, label %exit
@@ -62,17 +62,17 @@ TEST(LICMTest, TestSCEVInvalidationOnHoisting) {
BasicBlock &EntryBB = F->getEntryBlock();
BasicBlock *LoopBB = EntryBB.getUniqueSuccessor();
- // Select `load i64, i64* %ptr`.
+ // Select `load i64, ptr %ptr`.
Instruction *IBefore = &*LoopBB->getFirstNonPHIIt();
// Make sure the right instruction was selected.
ASSERT_TRUE(isa<LoadInst>(IBefore));
- // Upon this query SCEV caches disposition of <load i64, i64* %ptr> SCEV.
+ // Upon this query SCEV caches disposition of <load i64, ptr %ptr> SCEV.
ASSERT_EQ(SE.getBlockDisposition(SE.getSCEV(IBefore), LoopBB),
ScalarEvolution::BlockDisposition::DominatesBlock);
MPM.run(*M, MAM);
- // Select `load i64, i64* %ptr` after it was hoisted.
+ // Select `load i64, ptr %ptr` after it was hoisted.
Instruction *IAfter = &*EntryBB.getFirstNonPHIIt();
// Make sure the right instruction was selected.
ASSERT_TRUE(isa<LoadInst>(IAfter));
@@ -84,7 +84,7 @@ TEST(LICMTest, TestSCEVInvalidationOnHoisting) {
SE.getBlockDisposition(SE.getSCEV(IAfter), LoopBB);
// If LICM have properly invalidated SCEV,
- // 1. SCEV of <load i64, i64* %ptr> should properly dominate the "loop" BB,
+ // 1. SCEV of <load i64, ptr %ptr> should properly dominate the "loop" BB,
// 2. extra invalidation shouldn't change result of the query.
EXPECT_EQ(DispositionBeforeInvalidation,
ScalarEvolution::BlockDisposition::ProperlyDominatesBlock);
diff --git a/llvm/unittests/Transforms/Scalar/LoopPassManagerTest.cpp b/llvm/unittests/Transforms/Scalar/LoopPassManagerTest.cpp
index cb3d1001..88eaa87 100644
--- a/llvm/unittests/Transforms/Scalar/LoopPassManagerTest.cpp
+++ b/llvm/unittests/Transforms/Scalar/LoopPassManagerTest.cpp
@@ -265,21 +265,21 @@ protected:
public:
LoopPassManagerTest()
: M(parseIR(Context,
- "define void @f(i1* %ptr) {\n"
+ "define void @f(ptr %ptr) {\n"
"entry:\n"
" br label %loop.0\n"
"loop.0:\n"
- " %cond.0 = load volatile i1, i1* %ptr\n"
+ " %cond.0 = load volatile i1, ptr %ptr\n"
" br i1 %cond.0, label %loop.0.0.ph, label %end\n"
"loop.0.0.ph:\n"
" br label %loop.0.0\n"
"loop.0.0:\n"
- " %cond.0.0 = load volatile i1, i1* %ptr\n"
+ " %cond.0.0 = load volatile i1, ptr %ptr\n"
" br i1 %cond.0.0, label %loop.0.0, label %loop.0.1.ph\n"
"loop.0.1.ph:\n"
" br label %loop.0.1\n"
"loop.0.1:\n"
- " %cond.0.1 = load volatile i1, i1* %ptr\n"
+ " %cond.0.1 = load volatile i1, ptr %ptr\n"
" br i1 %cond.0.1, label %loop.0.1, label %loop.0.latch\n"
"loop.0.latch:\n"
" br label %loop.0\n"
@@ -287,11 +287,11 @@ public:
" ret void\n"
"}\n"
"\n"
- "define void @g(i1* %ptr) {\n"
+ "define void @g(ptr %ptr) {\n"
"entry:\n"
" br label %loop.g.0\n"
"loop.g.0:\n"
- " %cond.0 = load volatile i1, i1* %ptr\n"
+ " %cond.0 = load volatile i1, ptr %ptr\n"
" br i1 %cond.0, label %loop.g.0, label %end\n"
"end:\n"
" ret void\n"
@@ -861,26 +861,26 @@ TEST_F(LoopPassManagerTest, IndirectOuterPassInvalidation) {
TEST_F(LoopPassManagerTest, LoopChildInsertion) {
// Super boring module with three loops in a single loop nest.
- M = parseIR(Context, "define void @f(i1* %ptr) {\n"
+ M = parseIR(Context, "define void @f(ptr %ptr) {\n"
"entry:\n"
" br label %loop.0\n"
"loop.0:\n"
- " %cond.0 = load volatile i1, i1* %ptr\n"
+ " %cond.0 = load volatile i1, ptr %ptr\n"
" br i1 %cond.0, label %loop.0.0.ph, label %end\n"
"loop.0.0.ph:\n"
" br label %loop.0.0\n"
"loop.0.0:\n"
- " %cond.0.0 = load volatile i1, i1* %ptr\n"
+ " %cond.0.0 = load volatile i1, ptr %ptr\n"
" br i1 %cond.0.0, label %loop.0.0, label %loop.0.1.ph\n"
"loop.0.1.ph:\n"
" br label %loop.0.1\n"
"loop.0.1:\n"
- " %cond.0.1 = load volatile i1, i1* %ptr\n"
+ " %cond.0.1 = load volatile i1, ptr %ptr\n"
" br i1 %cond.0.1, label %loop.0.1, label %loop.0.2.ph\n"
"loop.0.2.ph:\n"
" br label %loop.0.2\n"
"loop.0.2:\n"
- " %cond.0.2 = load volatile i1, i1* %ptr\n"
+ " %cond.0.2 = load volatile i1, ptr %ptr\n"
" br i1 %cond.0.2, label %loop.0.2, label %loop.0.latch\n"
"loop.0.latch:\n"
" br label %loop.0\n"
@@ -1064,28 +1064,28 @@ TEST_F(LoopPassManagerTest, LoopChildInsertion) {
TEST_F(LoopPassManagerTest, LoopPeerInsertion) {
// Super boring module with two loop nests and loop nest with two child
// loops.
- M = parseIR(Context, "define void @f(i1* %ptr) {\n"
+ M = parseIR(Context, "define void @f(ptr %ptr) {\n"
"entry:\n"
" br label %loop.0\n"
"loop.0:\n"
- " %cond.0 = load volatile i1, i1* %ptr\n"
+ " %cond.0 = load volatile i1, ptr %ptr\n"
" br i1 %cond.0, label %loop.0.0.ph, label %loop.2.ph\n"
"loop.0.0.ph:\n"
" br label %loop.0.0\n"
"loop.0.0:\n"
- " %cond.0.0 = load volatile i1, i1* %ptr\n"
+ " %cond.0.0 = load volatile i1, ptr %ptr\n"
" br i1 %cond.0.0, label %loop.0.0, label %loop.0.2.ph\n"
"loop.0.2.ph:\n"
" br label %loop.0.2\n"
"loop.0.2:\n"
- " %cond.0.2 = load volatile i1, i1* %ptr\n"
+ " %cond.0.2 = load volatile i1, ptr %ptr\n"
" br i1 %cond.0.2, label %loop.0.2, label %loop.0.latch\n"
"loop.0.latch:\n"
" br label %loop.0\n"
"loop.2.ph:\n"
" br label %loop.2\n"
"loop.2:\n"
- " %cond.2 = load volatile i1, i1* %ptr\n"
+ " %cond.2 = load volatile i1, ptr %ptr\n"
" br i1 %cond.2, label %loop.2, label %end\n"
"end:\n"
" ret void\n"
@@ -1318,31 +1318,31 @@ TEST_F(LoopPassManagerTest, LoopDeletion) {
// Build a module with a single loop nest that contains one outer loop with
// three subloops, and one of those with its own subloop. We will
// incrementally delete all of these to test different deletion scenarios.
- M = parseIR(Context, "define void @f(i1* %ptr) {\n"
+ M = parseIR(Context, "define void @f(ptr %ptr) {\n"
"entry:\n"
" br label %loop.0\n"
"loop.0:\n"
- " %cond.0 = load volatile i1, i1* %ptr\n"
+ " %cond.0 = load volatile i1, ptr %ptr\n"
" br i1 %cond.0, label %loop.0.0.ph, label %end\n"
"loop.0.0.ph:\n"
" br label %loop.0.0\n"
"loop.0.0:\n"
- " %cond.0.0 = load volatile i1, i1* %ptr\n"
+ " %cond.0.0 = load volatile i1, ptr %ptr\n"
" br i1 %cond.0.0, label %loop.0.0, label %loop.0.1.ph\n"
"loop.0.1.ph:\n"
" br label %loop.0.1\n"
"loop.0.1:\n"
- " %cond.0.1 = load volatile i1, i1* %ptr\n"
+ " %cond.0.1 = load volatile i1, ptr %ptr\n"
" br i1 %cond.0.1, label %loop.0.1, label %loop.0.2.ph\n"
"loop.0.2.ph:\n"
" br label %loop.0.2\n"
"loop.0.2:\n"
- " %cond.0.2 = load volatile i1, i1* %ptr\n"
+ " %cond.0.2 = load volatile i1, ptr %ptr\n"
" br i1 %cond.0.2, label %loop.0.2.0.ph, label %loop.0.latch\n"
"loop.0.2.0.ph:\n"
" br label %loop.0.2.0\n"
"loop.0.2.0:\n"
- " %cond.0.2.0 = load volatile i1, i1* %ptr\n"
+ " %cond.0.2.0 = load volatile i1, ptr %ptr\n"
" br i1 %cond.0.2.0, label %loop.0.2.0, label %loop.0.2.latch\n"
"loop.0.2.latch:\n"
" br label %loop.0.2\n"
diff --git a/llvm/unittests/Transforms/Utils/BasicBlockUtilsTest.cpp b/llvm/unittests/Transforms/Utils/BasicBlockUtilsTest.cpp
index 4235c93..00d9e9f 100644
--- a/llvm/unittests/Transforms/Utils/BasicBlockUtilsTest.cpp
+++ b/llvm/unittests/Transforms/Utils/BasicBlockUtilsTest.cpp
@@ -484,9 +484,9 @@ exit:
TEST(BasicBlockUtils, SplitIndirectBrCriticalEdgesIgnorePHIs) {
LLVMContext C;
std::unique_ptr<Module> M = parseIR(C, R"IR(
-define void @crit_edge(i8* %tgt, i1 %cond0, i1 %cond1) {
+define void @crit_edge(ptr %tgt, i1 %cond0, i1 %cond1) {
entry:
- indirectbr i8* %tgt, [label %bb0, label %bb1, label %bb2]
+ indirectbr ptr %tgt, [label %bb0, label %bb1, label %bb2]
bb0:
br i1 %cond0, label %bb1, label %bb2
bb1:
@@ -526,9 +526,9 @@ bb4:
TEST(BasicBlockUtils, SplitIndirectBrCriticalEdges) {
LLVMContext C;
std::unique_ptr<Module> M = parseIR(C, R"IR(
-define void @crit_edge(i8* %tgt, i1 %cond0, i1 %cond1) {
+define void @crit_edge(ptr %tgt, i1 %cond0, i1 %cond1) {
entry:
- indirectbr i8* %tgt, [label %bb0, label %bb1, label %bb2]
+ indirectbr ptr %tgt, [label %bb0, label %bb1, label %bb2]
bb0:
br i1 %cond0, label %bb1, label %bb2
bb1:
diff --git a/llvm/unittests/Transforms/Utils/CloningTest.cpp b/llvm/unittests/Transforms/Utils/CloningTest.cpp
index d990808..237bc6e 100644
--- a/llvm/unittests/Transforms/Utils/CloningTest.cpp
+++ b/llvm/unittests/Transforms/Utils/CloningTest.cpp
@@ -394,7 +394,7 @@ TEST(CloneLoop, CloneLoopNest) {
std::unique_ptr<Module> M = parseIR(
Context,
- R"(define void @foo(i32* %A, i32 %ub) {
+ R"(define void @foo(ptr %A, i32 %ub) {
entry:
%guardcmp = icmp slt i32 0, %ub
br i1 %guardcmp, label %for.outer.preheader, label %for.end
@@ -408,8 +408,8 @@ for.inner.preheader:
for.inner:
%i = phi i32 [ 0, %for.inner.preheader ], [ %inc, %for.inner ]
%idxprom = sext i32 %i to i64
- %arrayidx = getelementptr inbounds i32, i32* %A, i64 %idxprom
- store i32 %i, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %A, i64 %idxprom
+ store i32 %i, ptr %arrayidx, align 4
%inc = add nsw i32 %i, 1
%cmp = icmp slt i32 %inc, %ub
br i1 %cmp, label %for.inner, label %for.inner.exit
@@ -728,10 +728,10 @@ TEST(CloneFunction, CloneEmptyFunction) {
TEST(CloneFunction, CloneFunctionWithInalloca) {
StringRef ImplAssembly = R"(
- declare void @a(i32* inalloca(i32))
+ declare void @a(ptr inalloca(i32))
define void @foo() {
%a = alloca inalloca i32
- call void @a(i32* inalloca(i32) %a)
+ call void @a(ptr inalloca(i32) %a)
ret void
}
declare void @bar()
diff --git a/llvm/unittests/Transforms/Utils/CodeExtractorTest.cpp b/llvm/unittests/Transforms/Utils/CodeExtractorTest.cpp
index 9ea8de3..90f0620 100644
--- a/llvm/unittests/Transforms/Utils/CodeExtractorTest.cpp
+++ b/llvm/unittests/Transforms/Utils/CodeExtractorTest.cpp
@@ -154,13 +154,13 @@ TEST(CodeExtractor, ExitBlockOrderingPhis) {
%0 = alloca i32, align 4
br label %test0
test0:
- %c = load i32, i32* %0, align 4
+ %c = load i32, ptr %0, align 4
br label %test1
test1:
- %e = load i32, i32* %0, align 4
+ %e = load i32, ptr %0, align 4
br i1 true, label %first, label %test
test:
- %d = load i32, i32* %0, align 4
+ %d = load i32, ptr %0, align 4
br i1 true, label %first, label %next
first:
%1 = phi i32 [ %c, %test ], [ %e, %test1 ]
@@ -212,13 +212,13 @@ TEST(CodeExtractor, ExitBlockOrdering) {
%0 = alloca i32, align 4
br label %test0
test0:
- %c = load i32, i32* %0, align 4
+ %c = load i32, ptr %0, align 4
br label %test1
test1:
- %e = load i32, i32* %0, align 4
+ %e = load i32, ptr %0, align 4
br i1 true, label %first, label %test
test:
- %d = load i32, i32* %0, align 4
+ %d = load i32, ptr %0, align 4
br i1 true, label %first, label %next
first:
ret void
@@ -317,7 +317,7 @@ TEST(CodeExtractor, StoreOutputInvokeResultAfterEHPad) {
std::unique_ptr<Module> M(parseAssemblyString(R"invalid(
declare i8 @hoge()
- define i32 @foo() personality i8* null {
+ define i32 @foo() personality ptr null {
entry:
%call = invoke i8 @hoge()
to label %invoke.cont unwind label %lpad
@@ -326,8 +326,8 @@ TEST(CodeExtractor, StoreOutputInvokeResultAfterEHPad) {
unreachable
lpad: ; preds = %entry
- %0 = landingpad { i8*, i32 }
- catch i8* null
+ %0 = landingpad { ptr, i32 }
+ catch ptr null
br i1 undef, label %catch, label %finally.catchall
catch: ; preds = %lpad
@@ -342,13 +342,13 @@ TEST(CodeExtractor, StoreOutputInvokeResultAfterEHPad) {
unreachable
lpad2: ; preds = %invoke.cont2, %catch
- %ex.1 = phi i8* [ undef, %invoke.cont2 ], [ null, %catch ]
- %1 = landingpad { i8*, i32 }
- catch i8* null
+ %ex.1 = phi ptr [ undef, %invoke.cont2 ], [ null, %catch ]
+ %1 = landingpad { ptr, i32 }
+ catch ptr null
br label %finally.catchall
finally.catchall: ; preds = %lpad33, %lpad
- %ex.2 = phi i8* [ %ex.1, %lpad2 ], [ null, %lpad ]
+ %ex.2 = phi ptr [ %ex.1, %lpad2 ], [ null, %lpad ]
unreachable
}
)invalid", Err, Ctx));
@@ -384,7 +384,7 @@ TEST(CodeExtractor, StoreOutputInvokeResultInExitStub) {
std::unique_ptr<Module> M(parseAssemblyString(R"invalid(
declare i32 @bar()
- define i32 @foo() personality i8* null {
+ define i32 @foo() personality ptr null {
entry:
%0 = invoke i32 @bar() to label %exit unwind label %lpad
@@ -392,9 +392,9 @@ TEST(CodeExtractor, StoreOutputInvokeResultInExitStub) {
ret i32 %0
lpad:
- %1 = landingpad { i8*, i32 }
+ %1 = landingpad { ptr, i32 }
cleanup
- resume { i8*, i32 } %1
+ resume { ptr, i32 } %1
}
)invalid",
Err, Ctx));
@@ -421,7 +421,7 @@ TEST(CodeExtractor, ExtractAndInvalidateAssumptionCache) {
target triple = "aarch64"
%b = type { i64 }
- declare void @g(i8*)
+ declare void @g(ptr)
declare void @llvm.assume(i1) #0
@@ -430,9 +430,9 @@ TEST(CodeExtractor, ExtractAndInvalidateAssumptionCache) {
br label %label
label:
- %0 = load %b*, %b** inttoptr (i64 8 to %b**), align 8
- %1 = getelementptr inbounds %b, %b* %0, i64 undef, i32 0
- %2 = load i64, i64* %1, align 8
+ %0 = load ptr, ptr inttoptr (i64 8 to ptr), align 8
+ %1 = getelementptr inbounds %b, ptr %0, i64 undef, i32 0
+ %2 = load i64, ptr %1, align 8
%3 = icmp ugt i64 %2, 1
br i1 %3, label %if.then, label %if.else
@@ -440,8 +440,8 @@ TEST(CodeExtractor, ExtractAndInvalidateAssumptionCache) {
unreachable
if.else:
- call void @g(i8* undef)
- store i64 undef, i64* null, align 536870912
+ call void @g(ptr undef)
+ store i64 undef, ptr null, align 536870912
%4 = icmp eq i64 %2, 0
call void @llvm.assume(i1 %4)
unreachable
@@ -473,9 +473,9 @@ TEST(CodeExtractor, RemoveBitcastUsesFromOuterLifetimeMarkers) {
target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
- declare void @use(i32*)
- declare void @llvm.lifetime.start.p0i8(i64, i8*)
- declare void @llvm.lifetime.end.p0i8(i64, i8*)
+ declare void @use(ptr)
+ declare void @llvm.lifetime.start.p0i8(i64, ptr)
+ declare void @llvm.lifetime.end.p0i8(i64, ptr)
define void @foo() {
entry:
@@ -483,14 +483,14 @@ TEST(CodeExtractor, RemoveBitcastUsesFromOuterLifetimeMarkers) {
br label %extract
extract:
- %1 = bitcast i32* %0 to i8*
- call void @llvm.lifetime.start.p0i8(i64 4, i8* %1)
- call void @use(i32* %0)
+ %1 = bitcast ptr %0 to ptr
+ call void @llvm.lifetime.start.p0i8(i64 4, ptr %1)
+ call void @use(ptr %0)
br label %exit
exit:
- call void @use(i32* %0)
- call void @llvm.lifetime.end.p0i8(i64 4, i8* %1)
+ call void @use(ptr %0)
+ call void @llvm.lifetime.end.p0i8(i64 4, ptr %1)
ret void
}
)ir",
diff --git a/llvm/unittests/Transforms/Utils/CodeMoverUtilsTest.cpp b/llvm/unittests/Transforms/Utils/CodeMoverUtilsTest.cpp
index 9466977..191ccc3 100644
--- a/llvm/unittests/Transforms/Utils/CodeMoverUtilsTest.cpp
+++ b/llvm/unittests/Transforms/Utils/CodeMoverUtilsTest.cpp
@@ -75,21 +75,21 @@ TEST(CodeMoverUtils, IsControlFlowEquivalentSimpleTest) {
// i = 3;
// }
std::unique_ptr<Module> M =
- parseIR(C, R"(define void @foo(i32* %i, i1 %cond1, i1 %cond2) {
+ parseIR(C, R"(define void @foo(ptr %i, i1 %cond1, i1 %cond2) {
entry:
br i1 %cond1, label %if.first, label %if.first.end
if.first:
- store i32 1, i32* %i, align 4
+ store i32 1, ptr %i, align 4
br label %if.first.end
if.first.end:
br i1 %cond1, label %if.second, label %if.second.end
if.second:
- store i32 2, i32* %i, align 4
+ store i32 2, ptr %i, align 4
br label %if.second.end
if.second.end:
br i1 %cond2, label %if.third, label %if.third.end
if.third:
- store i32 3, i32* %i, align 4
+ store i32 3, ptr %i, align 4
br label %if.third.end
if.third.end:
ret void
@@ -136,51 +136,51 @@ TEST(CodeMoverUtils, IsControlFlowEquivalentOppositeCondTest) {
// i = 9;
// }
std::unique_ptr<Module> M =
- parseIR(C, R"(define void @foo(i32* %i, i32 %X, i32 %Y) {
+ parseIR(C, R"(define void @foo(ptr %i, i32 %X, i32 %Y) {
entry:
%cmp1 = icmp ult i32 %X, %Y
br i1 %cmp1, label %if.first, label %if.first.end
if.first:
- store i32 1, i32* %i, align 4
+ store i32 1, ptr %i, align 4
br label %if.first.end
if.first.end:
%cmp2 = icmp ugt i32 %Y, %X
br i1 %cmp2, label %if.second, label %if.second.end
if.second:
- store i32 2, i32* %i, align 4
+ store i32 2, ptr %i, align 4
br label %if.second.end
if.second.end:
%cmp3 = icmp uge i32 %X, %Y
br i1 %cmp3, label %if.third, label %if.third.else
if.third:
- store i32 3, i32* %i, align 4
+ store i32 3, ptr %i, align 4
br label %if.third.end
if.third.else:
- store i32 4, i32* %i, align 4
+ store i32 4, ptr %i, align 4
br label %if.third.end
if.third.end:
%cmp4 = icmp eq i32 %X, %Y
br i1 %cmp4, label %if.fourth, label %if.fourth.end
if.fourth:
- store i32 5, i32* %i, align 4
+ store i32 5, ptr %i, align 4
br label %if.fourth.end
if.fourth.end:
%cmp5 = icmp eq i32 %Y, %X
br i1 %cmp5, label %if.fifth, label %if.fifth.else
if.fifth:
- store i32 6, i32* %i, align 4
+ store i32 6, ptr %i, align 4
br label %if.fifth.end
if.fifth.else:
- store i32 7, i32* %i, align 4
+ store i32 7, ptr %i, align 4
br label %if.fifth.end
if.fifth.end:
%cmp6 = icmp ne i32 %X, %Y
br i1 %cmp6, label %if.sixth, label %if.sixth.else
if.sixth:
- store i32 8, i32* %i, align 4
+ store i32 8, ptr %i, align 4
br label %if.sixth.end
if.sixth.else:
- store i32 9, i32* %i, align 4
+ store i32 9, ptr %i, align 4
br label %if.sixth.end
if.sixth.end:
ret void
@@ -227,20 +227,20 @@ TEST(CodeMoverUtils, IsControlFlowEquivalentCondNestTest) {
// i = 2;
// }
std::unique_ptr<Module> M =
- parseIR(C, R"(define void @foo(i32* %i, i1 %cond1, i1 %cond2) {
+ parseIR(C, R"(define void @foo(ptr %i, i1 %cond1, i1 %cond2) {
entry:
br i1 %cond1, label %if.outer.first, label %if.first.end
if.outer.first:
br i1 %cond2, label %if.inner.first, label %if.first.end
if.inner.first:
- store i32 1, i32* %i, align 4
+ store i32 1, ptr %i, align 4
br label %if.first.end
if.first.end:
br i1 %cond2, label %if.outer.second, label %if.second.end
if.outer.second:
br i1 %cond1, label %if.inner.second, label %if.second.end
if.inner.second:
- store i32 2, i32* %i, align 4
+ store i32 2, ptr %i, align 4
br label %if.second.end
if.second.end:
ret void
@@ -283,7 +283,7 @@ TEST(CodeMoverUtils, IsControlFlowEquivalentImbalanceTest) {
// i = 4;
// }
std::unique_ptr<Module> M =
- parseIR(C, R"(define void @foo(i32* %i, i1 %cond1, i1 %cond2, i1 %cond3) {
+ parseIR(C, R"(define void @foo(ptr %i, i1 %cond1, i1 %cond2, i1 %cond3) {
entry:
br i1 %cond1, label %if.outer.first, label %if.first.end
if.outer.first:
@@ -291,26 +291,26 @@ TEST(CodeMoverUtils, IsControlFlowEquivalentImbalanceTest) {
if.middle.first:
br i1 %cond3, label %if.inner.first, label %if.first.end
if.inner.first:
- store i32 1, i32* %i, align 4
+ store i32 1, ptr %i, align 4
br label %if.first.end
if.first.end:
br i1 %cond2, label %if.outer.second, label %if.second.end
if.outer.second:
br i1 %cond3, label %if.inner.second, label %if.second.end
if.inner.second:
- store i32 2, i32* %i, align 4
+ store i32 2, ptr %i, align 4
br label %if.second.end
if.second.end:
br i1 %cond1, label %if.outer.third, label %if.third.end
if.outer.third:
br i1 %cond1, label %if.inner.third, label %if.third.end
if.inner.third:
- store i32 3, i32* %i, align 4
+ store i32 3, ptr %i, align 4
br label %if.third.end
if.third.end:
br i1 %cond1, label %if.fourth, label %if.fourth.end
if.fourth:
- store i32 4, i32* %i, align 4
+ store i32 4, ptr %i, align 4
br label %if.fourth.end
if.fourth.end:
ret void
@@ -343,28 +343,28 @@ TEST(CodeMoverUtils, IsControlFlowEquivalentPointerTest) {
// i = 3;
// }
std::unique_ptr<Module> M =
- parseIR(C, R"(define void @foo(i32* %i, i32* %cond) {
+ parseIR(C, R"(define void @foo(ptr %i, ptr %cond) {
entry:
- %0 = load i32, i32* %cond, align 4
+ %0 = load i32, ptr %cond, align 4
%tobool1 = icmp ne i32 %0, 0
br i1 %tobool1, label %if.first, label %if.first.end
if.first:
- store i32 1, i32* %i, align 4
+ store i32 1, ptr %i, align 4
br label %if.first.end
if.first.end:
- %1 = load i32, i32* %cond, align 4
+ %1 = load i32, ptr %cond, align 4
%tobool2 = icmp ne i32 %1, 0
br i1 %tobool2, label %if.second, label %if.second.end
if.second:
- store i32 2, i32* %i, align 4
+ store i32 2, ptr %i, align 4
br label %if.second.end
if.second.end:
- store i32 1, i32* %cond, align 4
- %2 = load i32, i32* %cond, align 4
+ store i32 1, ptr %cond, align 4
+ %2 = load i32, ptr %cond, align 4
%tobool3 = icmp ne i32 %2, 0
br i1 %tobool3, label %if.third, label %if.third.end
if.third:
- store i32 3, i32* %i, align 4
+ store i32 3, ptr %i, align 4
br label %if.third.end
if.third.end:
ret void
@@ -450,7 +450,7 @@ TEST(CodeMoverUtils, IsSafeToMoveTest1) {
// }
// }
std::unique_ptr<Module> M = parseIR(
- C, R"(define void @foo(i32* noalias %A, i32* noalias %B, i32* noalias %C
+ C, R"(define void @foo(ptr noalias %A, ptr noalias %B, ptr noalias %C
, i64 %N) {
entry:
%X = sdiv i64 1, %N
@@ -461,18 +461,18 @@ TEST(CodeMoverUtils, IsSafeToMoveTest1) {
br i1 %cmp1, label %for.body, label %for.end
for.body:
%i = phi i64 [ 0, %entry ], [ %inc, %for.body ]
- %arrayidx_A5 = getelementptr inbounds i32, i32* %A, i64 5
- store i32 5, i32* %arrayidx_A5, align 4
- %arrayidx_A = getelementptr inbounds i32, i32* %A, i64 %i
- store i32 0, i32* %arrayidx_A, align 4
- %load1 = load i32, i32* %arrayidx_A, align 4
- %arrayidx_B = getelementptr inbounds i32, i32* %B, i64 %i
- store i32 %load1, i32* %arrayidx_B, align 4
- %load2 = load i32, i32* %arrayidx_A, align 4
- %arrayidx_C = getelementptr inbounds i32, i32* %C, i64 %i
- store i32 %load2, i32* %arrayidx_C, align 4
- %arrayidx_A6 = getelementptr inbounds i32, i32* %A, i64 6
- store i32 6, i32* %arrayidx_A6, align 4
+ %arrayidx_A5 = getelementptr inbounds i32, ptr %A, i64 5
+ store i32 5, ptr %arrayidx_A5, align 4
+ %arrayidx_A = getelementptr inbounds i32, ptr %A, i64 %i
+ store i32 0, ptr %arrayidx_A, align 4
+ %load1 = load i32, ptr %arrayidx_A, align 4
+ %arrayidx_B = getelementptr inbounds i32, ptr %B, i64 %i
+ store i32 %load1, ptr %arrayidx_B, align 4
+ %load2 = load i32, ptr %arrayidx_A, align 4
+ %arrayidx_C = getelementptr inbounds i32, ptr %C, i64 %i
+ store i32 %load2, ptr %arrayidx_C, align 4
+ %arrayidx_A6 = getelementptr inbounds i32, ptr %A, i64 6
+ store i32 6, ptr %arrayidx_A6, align 4
%inc = add nsw i64 %i, 1
%cmp = icmp slt i64 %inc, %N
br i1 %cmp, label %for.body, label %for.end
@@ -686,19 +686,19 @@ TEST(CodeMoverUtils, IsSafeToMoveTest5) {
LLVMContext C;
std::unique_ptr<Module> M =
- parseIR(C, R"(define void @dependence(i32* noalias %A, i32* noalias %B){
+ parseIR(C, R"(define void @dependence(ptr noalias %A, ptr noalias %B){
entry:
- store i32 0, i32* %A, align 4 ; storeA0
- store i32 2, i32* %A, align 4 ; storeA1
- %tmp0 = load i32, i32* %A, align 4 ; loadA0
- store i32 1, i32* %B, align 4 ; storeB0
- %tmp1 = load i32, i32* %A, align 4 ; loadA1
- store i32 2, i32* %A, align 4 ; storeA2
- store i32 4, i32* %B, align 4 ; StoreB1
- %tmp2 = load i32, i32* %A, align 4 ; loadA2
- %tmp3 = load i32, i32* %A, align 4 ; loadA3
- %tmp4 = load i32, i32* %B, align 4 ; loadB2
- %tmp5 = load i32, i32* %B, align 4 ; loadB3
+ store i32 0, ptr %A, align 4 ; storeA0
+ store i32 2, ptr %A, align 4 ; storeA1
+ %tmp0 = load i32, ptr %A, align 4 ; loadA0
+ store i32 1, ptr %B, align 4 ; storeB0
+ %tmp1 = load i32, ptr %A, align 4 ; loadA1
+ store i32 2, ptr %A, align 4 ; storeA2
+ store i32 4, ptr %B, align 4 ; StoreB1
+ %tmp2 = load i32, ptr %A, align 4 ; loadA2
+ %tmp3 = load i32, ptr %A, align 4 ; loadA3
+ %tmp4 = load i32, ptr %B, align 4 ; loadB2
+ %tmp5 = load i32, ptr %B, align 4 ; loadB3
ret void
})");
@@ -763,63 +763,63 @@ TEST(CodeMoverUtils, IsSafeToMoveTest6) {
LLVMContext C;
std::unique_ptr<Module> M = parseIR(
- C, R"(define void @dependence(i1 %cond, i32* noalias %A, i32* noalias %B){
+ C, R"(define void @dependence(i1 %cond, ptr noalias %A, ptr noalias %B){
entry:
br i1 %cond, label %bb0, label %bb1
bb0:
br label %bb1
bb1:
- store i32 0, i32* %A, align 4 ; storeA0
+ store i32 0, ptr %A, align 4 ; storeA0
br i1 %cond, label %bb2, label %bb3
bb2:
br label %bb3
bb3:
- store i32 2, i32* %A, align 4 ; storeA1
+ store i32 2, ptr %A, align 4 ; storeA1
br i1 %cond, label %bb4, label %bb5
bb4:
br label %bb5
bb5:
- %tmp0 = load i32, i32* %A, align 4 ; loadA0
+ %tmp0 = load i32, ptr %A, align 4 ; loadA0
br i1 %cond, label %bb6, label %bb7
bb6:
br label %bb7
bb7:
- store i32 1, i32* %B, align 4 ; storeB0
+ store i32 1, ptr %B, align 4 ; storeB0
br i1 %cond, label %bb8, label %bb9
bb8:
br label %bb9
bb9:
- %tmp1 = load i32, i32* %A, align 4 ; loadA1
+ %tmp1 = load i32, ptr %A, align 4 ; loadA1
br i1 %cond, label %bb10, label %bb11
bb10:
br label %bb11
bb11:
- store i32 2, i32* %A, align 4 ; storeA2
+ store i32 2, ptr %A, align 4 ; storeA2
br i1 %cond, label %bb12, label %bb13
bb12:
br label %bb13
bb13:
- store i32 4, i32* %B, align 4 ; StoreB1
+ store i32 4, ptr %B, align 4 ; StoreB1
br i1 %cond, label %bb14, label %bb15
bb14:
br label %bb15
bb15:
- %tmp2 = load i32, i32* %A, align 4 ; loadA2
+ %tmp2 = load i32, ptr %A, align 4 ; loadA2
br i1 %cond, label %bb16, label %bb17
bb16:
br label %bb17
bb17:
- %tmp3 = load i32, i32* %A, align 4 ; loadA3
+ %tmp3 = load i32, ptr %A, align 4 ; loadA3
br i1 %cond, label %bb18, label %bb19
bb18:
br label %bb19
bb19:
- %tmp4 = load i32, i32* %B, align 4 ; loadB2
+ %tmp4 = load i32, ptr %B, align 4 ; loadB2
br i1 %cond, label %bb20, label %bb21
bb20:
br label %bb21
bb21:
- %tmp5 = load i32, i32* %B, align 4 ; loadB3
+ %tmp5 = load i32, ptr %B, align 4 ; loadB3
ret void
})");
run(*M, "dependence",
diff --git a/llvm/unittests/Transforms/Utils/LocalTest.cpp b/llvm/unittests/Transforms/Utils/LocalTest.cpp
index 4908eda..c37ed5d 100644
--- a/llvm/unittests/Transforms/Utils/LocalTest.cpp
+++ b/llvm/unittests/Transforms/Utils/LocalTest.cpp
@@ -183,7 +183,7 @@ TEST(Local, MergeBasicBlockIntoOnlyPred) {
auto resetIR = [&]() {
M = parseIR(C,
R"(
- define i32 @f(i8* %str) {
+ define i32 @f(ptr %str) {
entry:
br label %bb2.i
bb2.i: ; preds = %bb4.i, %entry
@@ -411,7 +411,7 @@ TEST(Local, ConstantFoldTerminator) {
define void @indirectbr() {
entry:
- indirectbr i8* blockaddress(@indirectbr, %bb0), [label %bb0, label %bb1]
+ indirectbr ptr blockaddress(@indirectbr, %bb0), [label %bb0, label %bb1]
bb0:
ret void
bb1:
@@ -420,14 +420,14 @@ TEST(Local, ConstantFoldTerminator) {
define void @indirectbr_repeated() {
entry:
- indirectbr i8* blockaddress(@indirectbr_repeated, %bb0), [label %bb0, label %bb0]
+ indirectbr ptr blockaddress(@indirectbr_repeated, %bb0), [label %bb0, label %bb0]
bb0:
ret void
}
define void @indirectbr_unreachable() {
entry:
- indirectbr i8* blockaddress(@indirectbr_unreachable, %bb0), [label %bb1]
+ indirectbr ptr blockaddress(@indirectbr_unreachable, %bb0), [label %bb1]
bb0:
ret void
bb1:
@@ -925,7 +925,7 @@ TEST(Local, RemoveUnreachableBlocks) {
declare i32 @__gxx_personality_v0(...)
- define void @invoke_terminator() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+ define void @invoke_terminator() personality ptr @__gxx_personality_v0 {
entry:
br i1 undef, label %invoke.block, label %exit
@@ -943,8 +943,8 @@ TEST(Local, RemoveUnreachableBlocks) {
unreachable
lpad.block:
- %lp = landingpad { i8*, i32 }
- catch i8* null
+ %lp = landingpad { ptr, i32 }
+ catch ptr null
br label %exit
exit:
diff --git a/llvm/unittests/Transforms/Utils/MemTransferLowering.cpp b/llvm/unittests/Transforms/Utils/MemTransferLowering.cpp
index b97bc31..dd03b4f 100644
--- a/llvm/unittests/Transforms/Utils/MemTransferLowering.cpp
+++ b/llvm/unittests/Transforms/Utils/MemTransferLowering.cpp
@@ -98,13 +98,13 @@ struct MemTransferLowerTest : public testing::Test {
// For that reason expandMemCpyAsLoop is expected to explicitly mark
// loads from source and stores to destination as not aliasing.
TEST_F(MemTransferLowerTest, MemCpyKnownLength) {
- ParseAssembly("declare void @llvm.memcpy.p0i8.p0i8.i64(i8*, i8 *, i64, i1)\n"
- "define void @foo(i8* %dst, i8* %src, i64 %n) optsize {\n"
+ ParseAssembly("declare void @llvm.memcpy.p0i8.p0i8.i64(ptr, ptr, i64, i1)\n"
+ "define void @foo(ptr %dst, ptr %src, i64 %n) optsize {\n"
"entry:\n"
- " %is_not_equal = icmp ne i8* %dst, %src\n"
+ " %is_not_equal = icmp ne ptr %dst, %src\n"
" br i1 %is_not_equal, label %memcpy, label %exit\n"
"memcpy:\n"
- " call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %src, "
+ " call void @llvm.memcpy.p0i8.p0i8.i64(ptr %dst, ptr %src, "
"i64 1024, i1 false)\n"
" br label %exit\n"
"exit:\n"
@@ -138,13 +138,13 @@ TEST_F(MemTransferLowerTest, MemCpyKnownLength) {
// llvm.memcpy lowering) doesn't alias by making sure the loop can be
// successfully vectorized without additional runtime checks.
TEST_F(MemTransferLowerTest, VecMemCpyKnownLength) {
- ParseAssembly("declare void @llvm.memcpy.p0i8.p0i8.i64(i8*, i8 *, i64, i1)\n"
- "define void @foo(i8* %dst, i8* %src, i64 %n) optsize {\n"
+ ParseAssembly("declare void @llvm.memcpy.p0i8.p0i8.i64(ptr, ptr, i64, i1)\n"
+ "define void @foo(ptr %dst, ptr %src, i64 %n) optsize {\n"
"entry:\n"
- " %is_not_equal = icmp ne i8* %dst, %src\n"
+ " %is_not_equal = icmp ne ptr %dst, %src\n"
" br i1 %is_not_equal, label %memcpy, label %exit\n"
"memcpy:\n"
- " call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %src, "
+ " call void @llvm.memcpy.p0i8.p0i8.i64(ptr %dst, ptr %src, "
"i64 1024, i1 false)\n"
" br label %exit\n"
"exit:\n"
@@ -176,16 +176,16 @@ TEST_F(MemTransferLowerTest, VecMemCpyKnownLength) {
TEST_F(MemTransferLowerTest, AtomicMemCpyKnownLength) {
ParseAssembly("declare void "
- "@llvm.memcpy.element.unordered.atomic.p0i32.p0i32.i64(i32*, "
+ "@llvm.memcpy.element.unordered.atomic.p0i32.p0i32.i64(ptr, "
"i32 *, i64, i32)\n"
- "define void @foo(i32* %dst, i32* %src, i64 %n) optsize {\n"
+ "define void @foo(ptr %dst, ptr %src, i64 %n) optsize {\n"
"entry:\n"
- " %is_not_equal = icmp ne i32* %dst, %src\n"
+ " %is_not_equal = icmp ne ptr %dst, %src\n"
" br i1 %is_not_equal, label %memcpy, label %exit\n"
"memcpy:\n"
" call void "
- "@llvm.memcpy.element.unordered.atomic.p0i32.p0i32.i64(i32* "
- "%dst, i32* %src, "
+ "@llvm.memcpy.element.unordered.atomic.p0i32.p0i32.i64(ptr "
+ "%dst, ptr %src, "
"i64 1024, i32 4)\n"
" br label %exit\n"
"exit:\n"
@@ -221,16 +221,16 @@ TEST_F(MemTransferLowerTest, AtomicMemCpyKnownLength) {
TEST_F(MemTransferLowerTest, AtomicMemCpyUnKnownLength) {
ParseAssembly("declare void "
- "@llvm.memcpy.element.unordered.atomic.p0i32.p0i32.i64(i32*, "
+ "@llvm.memcpy.element.unordered.atomic.p0i32.p0i32.i64(ptr, "
"i32 *, i64, i32)\n"
- "define void @foo(i32* %dst, i32* %src, i64 %n) optsize {\n"
+ "define void @foo(ptr %dst, ptr %src, i64 %n) optsize {\n"
"entry:\n"
- " %is_not_equal = icmp ne i32* %dst, %src\n"
+ " %is_not_equal = icmp ne ptr %dst, %src\n"
" br i1 %is_not_equal, label %memcpy, label %exit\n"
"memcpy:\n"
" call void "
- "@llvm.memcpy.element.unordered.atomic.p0i32.p0i32.i64(i32* "
- "%dst, i32* %src, "
+ "@llvm.memcpy.element.unordered.atomic.p0i32.p0i32.i64(ptr "
+ "%dst, ptr %src, "
"i64 %n, i32 4)\n"
" br label %exit\n"
"exit:\n"
diff --git a/llvm/unittests/Transforms/Utils/ScalarEvolutionExpanderTest.cpp b/llvm/unittests/Transforms/Utils/ScalarEvolutionExpanderTest.cpp
index 55eae64..4fe3080 100644
--- a/llvm/unittests/Transforms/Utils/ScalarEvolutionExpanderTest.cpp
+++ b/llvm/unittests/Transforms/Utils/ScalarEvolutionExpanderTest.cpp
@@ -121,18 +121,18 @@ TEST_F(ScalarEvolutionExpanderTest, ExpandPtrTypeSCEV) {
TEST_F(ScalarEvolutionExpanderTest, SCEVZeroExtendExprNonIntegral) {
/*
* Create the following code:
- * func(i64 addrspace(10)* %arg)
+ * func(ptr addrspace(10) %arg)
* top:
* br label %L.ph
* L.ph:
- * %gepbase = getelementptr i64 addrspace(10)* %arg, i64 1
+ * %gepbase = getelementptr ptr addrspace(10) %arg, i64 1
* br label %L
* L:
* %phi = phi i64 [i64 0, %L.ph], [ %add, %L2 ]
* %add = add i64 %phi2, 1
* br i1 undef, label %post, label %L2
* post:
- * #= %gep = getelementptr i64 addrspace(10)* %gepbase, i64 %add =#
+ * #= %gep = getelementptr ptr addrspace(10) %gepbase, i64 %add =#
* ret void
*
* We will create the appropriate SCEV expression for %gep and expand it,
@@ -199,7 +199,7 @@ TEST_F(ScalarEvolutionExpanderTest, SCEVZeroExtendExprNonIntegral) {
TEST_F(ScalarEvolutionExpanderTest, SCEVExpanderIsSafeToExpandAt) {
/*
* Create the following code:
- * func(i64 addrspace(10)* %arg)
+ * func(ptr addrspace(10) %arg)
* top:
* br label %L.ph
* L.ph:
@@ -704,14 +704,14 @@ TEST_F(ScalarEvolutionExpanderTest, SCEVExpanderShlNSW) {
EXPECT_FALSE(I->hasNoSignedWrap());
};
- checkOneCase("define void @f(i16* %arrayidx) { "
- " %1 = load i16, i16* %arrayidx "
+ checkOneCase("define void @f(ptr %arrayidx) { "
+ " %1 = load i16, ptr %arrayidx "
" %2 = and i16 %1, -32768 "
" ret void "
"} ");
- checkOneCase("define void @f(i8* %arrayidx) { "
- " %1 = load i8, i8* %arrayidx "
+ checkOneCase("define void @f(ptr %arrayidx) { "
+ " %1 = load i8, ptr %arrayidx "
" %2 = and i8 %1, -128 "
" ret void "
"} ");
diff --git a/llvm/unittests/Transforms/Utils/UnrollLoopTest.cpp b/llvm/unittests/Transforms/Utils/UnrollLoopTest.cpp
index eec1011..7ba259d 100644
--- a/llvm/unittests/Transforms/Utils/UnrollLoopTest.cpp
+++ b/llvm/unittests/Transforms/Utils/UnrollLoopTest.cpp
@@ -34,7 +34,7 @@ TEST(LoopUnrollRuntime, Latch) {
std::unique_ptr<Module> M = parseIR(
C,
- R"(define i32 @test(i32* %a, i32* %b, i32* %c, i64 %n) {
+ R"(define i32 @test(ptr %a, ptr %b, ptr %c, i64 %n) {
entry:
br label %while.cond
@@ -44,13 +44,13 @@ while.cond: ; preds = %while.body, %entry
br i1 %cmp, label %while.body, label %while.end
while.body: ; preds = %while.cond
- %arrayidx = getelementptr inbounds i32, i32* %b, i64 %i.0
- %0 = load i32, i32* %arrayidx
- %arrayidx1 = getelementptr inbounds i32, i32* %c, i64 %i.0
- %1 = load i32, i32* %arrayidx1
+ %arrayidx = getelementptr inbounds i32, ptr %b, i64 %i.0
+ %0 = load i32, ptr %arrayidx
+ %arrayidx1 = getelementptr inbounds i32, ptr %c, i64 %i.0
+ %1 = load i32, ptr %arrayidx1
%mul = mul nsw i32 %0, %1
- %arrayidx2 = getelementptr inbounds i32, i32* %a, i64 %i.0
- store i32 %mul, i32* %arrayidx2
+ %arrayidx2 = getelementptr inbounds i32, ptr %a, i64 %i.0
+ store i32 %mul, ptr %arrayidx2
%inc = add nsw i64 %i.0, 1
br label %while.cond
diff --git a/llvm/unittests/Transforms/Utils/ValueMapperTest.cpp b/llvm/unittests/Transforms/Utils/ValueMapperTest.cpp
index e39cd70..7f12deae 100644
--- a/llvm/unittests/Transforms/Utils/ValueMapperTest.cpp
+++ b/llvm/unittests/Transforms/Utils/ValueMapperTest.cpp
@@ -74,7 +74,7 @@ TEST(ValueMapperTest, mapMDNodeDuplicatedCycle) {
// Create a cycle that references G0.
MDNode *N0; // !0 = !{!1}
- MDNode *N1; // !1 = !{!0, i8* @G0}
+ MDNode *N1; // !1 = !{!0, ptr @G0}
{
auto T0 = MDTuple::getTemporary(Context, nullptr);
Metadata *Ops1[] = {T0.get(), ConstantAsMetadata::get(G0.get())};