aboutsummaryrefslogtreecommitdiff
path: root/llvm
diff options
context:
space:
mode:
Diffstat (limited to 'llvm')
-rw-r--r--llvm/examples/Kaleidoscope/Chapter8/toy.cpp3
-rw-r--r--llvm/include/llvm/ADT/STLExtras.h22
-rw-r--r--llvm/include/llvm/MC/TargetRegistry.h3
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp14
-rw-r--r--llvm/lib/Target/AMDGPU/SIISelLowering.cpp76
-rw-r--r--llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp22
-rw-r--r--llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.h10
-rw-r--r--llvm/lib/Target/AMDGPU/VOP3PInstructions.td70
-rw-r--r--llvm/lib/Target/RISCV/RISCVISelLowering.cpp134
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfo.cpp29
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfo.h19
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfoZb.td4
-rw-r--r--llvm/lib/Transforms/Scalar/DFAJumpThreading.cpp26
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mfma.form.ll201
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mfma.gfx90a.ll80
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mfma.gfx942.ll112
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mfma.gfx950.bf16.ll158
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mfma.gfx950.ll672
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.smfmac.gfx950.ll895
-rw-r--r--llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr.ll77
-rw-r--r--llvm/test/CodeGen/AMDGPU/smfmac_alloc_failure_no_agpr_O0.ll119
-rw-r--r--llvm/test/CodeGen/RISCV/rv64zba.ll10
-rw-r--r--llvm/test/TableGen/RuntimeLibcallEmitter-calling-conv.td38
-rw-r--r--llvm/test/TableGen/RuntimeLibcallEmitter-conflict-warning.td18
-rw-r--r--llvm/test/TableGen/RuntimeLibcallEmitter.td60
-rw-r--r--llvm/test/tools/llvm-reduce/inline-call-sites-cost.ll95
-rw-r--r--llvm/test/tools/llvm-reduce/inline-call-sites.ll765
-rw-r--r--llvm/tools/llvm-profdata/CMakeLists.txt5
-rw-r--r--llvm/tools/llvm-profdata/llvm-profdata.cpp5
-rw-r--r--llvm/tools/llvm-reduce/CMakeLists.txt1
-rw-r--r--llvm/tools/llvm-reduce/DeltaManager.cpp1
-rw-r--r--llvm/tools/llvm-reduce/DeltaPasses.def2
-rw-r--r--llvm/tools/llvm-reduce/deltas/ReduceInlineCallSites.cpp103
-rw-r--r--llvm/tools/llvm-reduce/deltas/ReduceInlineCallSites.h18
-rw-r--r--llvm/unittests/ADT/STLExtrasTest.cpp49
-rw-r--r--llvm/utils/TableGen/Basic/RuntimeLibcallsEmitter.cpp40
36 files changed, 2556 insertions, 1400 deletions
diff --git a/llvm/examples/Kaleidoscope/Chapter8/toy.cpp b/llvm/examples/Kaleidoscope/Chapter8/toy.cpp
index 739b895..1575211 100644
--- a/llvm/examples/Kaleidoscope/Chapter8/toy.cpp
+++ b/llvm/examples/Kaleidoscope/Chapter8/toy.cpp
@@ -1228,7 +1228,8 @@ int main() {
TheModule->setTargetTriple(Triple(TargetTriple));
std::string Error;
- auto Target = TargetRegistry::lookupTarget(TargetTriple, Error);
+ auto Target =
+ TargetRegistry::lookupTarget(TheModule->getTargetTriple(), Error);
// Print an error and exit if we couldn't find the requested target.
// This generally occurs if we've forgotten to initialise the
diff --git a/llvm/include/llvm/ADT/STLExtras.h b/llvm/include/llvm/ADT/STLExtras.h
index 4a91b06..5b20d6bd 100644
--- a/llvm/include/llvm/ADT/STLExtras.h
+++ b/llvm/include/llvm/ADT/STLExtras.h
@@ -1692,6 +1692,28 @@ template <typename R, typename E> auto accumulate(R &&Range, E &&Init) {
std::forward<E>(Init));
}
+/// Wrapper for std::accumulate with a binary operator.
+template <typename R, typename E, typename BinaryOp>
+auto accumulate(R &&Range, E &&Init, BinaryOp &&Op) {
+ return std::accumulate(adl_begin(Range), adl_end(Range),
+ std::forward<E>(Init), std::forward<BinaryOp>(Op));
+}
+
+/// Returns the sum of all values in `Range` with `Init` initial value.
+/// The default initial value is 0.
+template <typename R, typename E = detail::ValueOfRange<R>>
+auto sum_of(R &&Range, E Init = E{0}) {
+ return accumulate(std::forward<R>(Range), std::move(Init));
+}
+
+/// Returns the product of all values in `Range` with `Init` initial value.
+/// The default initial value is 1.
+template <typename R, typename E = detail::ValueOfRange<R>>
+auto product_of(R &&Range, E Init = E{1}) {
+ return accumulate(std::forward<R>(Range), std::move(Init),
+ std::multiplies<>{});
+}
+
/// Provide wrappers to std::for_each which take ranges instead of having to
/// pass begin/end explicitly.
template <typename R, typename UnaryFunction>
diff --git a/llvm/include/llvm/MC/TargetRegistry.h b/llvm/include/llvm/MC/TargetRegistry.h
index 570d4c0..234c587 100644
--- a/llvm/include/llvm/MC/TargetRegistry.h
+++ b/llvm/include/llvm/MC/TargetRegistry.h
@@ -737,7 +737,8 @@ struct TargetRegistry {
/// \param TripleStr - The triple to use for finding a target.
/// \param Error - On failure, an error string describing why no target was
/// found.
- // TODO: Drop this in favor of the method accepting Triple.
+ // TODO(boomanaiden154): Remove this function after LLVM 22 branches.
+ [[deprecated("Use overload accepting Triple instead")]]
static const Target *lookupTarget(StringRef TripleStr, std::string &Error) {
return lookupTarget(Triple(TripleStr), Error);
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
index 848d9a5..557d87f 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
@@ -5043,6 +5043,9 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
case Intrinsic::amdgcn_mfma_i32_16x16x64_i8:
case Intrinsic::amdgcn_mfma_i32_32x32x32_i8:
case Intrinsic::amdgcn_mfma_f32_16x16x32_bf16: {
+ unsigned DstSize = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
+ unsigned MinNumRegsRequired = DstSize / 32;
+
// Default for MAI intrinsics.
// srcC can also be an immediate which can be folded later.
// FIXME: Should we eventually add an alternative mapping with AGPR src
@@ -5051,29 +5054,32 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
// vdst, srcA, srcB, srcC
const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
OpdsMapping[0] =
- Info->mayNeedAGPRs()
+ Info->getMinNumAGPRs() >= MinNumRegsRequired
? getAGPROpMapping(MI.getOperand(0).getReg(), MRI, *TRI)
: getVGPROpMapping(MI.getOperand(0).getReg(), MRI, *TRI);
OpdsMapping[2] = getVGPROpMapping(MI.getOperand(2).getReg(), MRI, *TRI);
OpdsMapping[3] = getVGPROpMapping(MI.getOperand(3).getReg(), MRI, *TRI);
OpdsMapping[4] =
- Info->mayNeedAGPRs()
+ Info->getMinNumAGPRs() >= MinNumRegsRequired
? getAGPROpMapping(MI.getOperand(4).getReg(), MRI, *TRI)
: getVGPROpMapping(MI.getOperand(4).getReg(), MRI, *TRI);
break;
}
case Intrinsic::amdgcn_mfma_scale_f32_16x16x128_f8f6f4:
case Intrinsic::amdgcn_mfma_scale_f32_32x32x64_f8f6f4: {
+ unsigned DstSize = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
+ unsigned MinNumRegsRequired = DstSize / 32;
+
const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
OpdsMapping[0] =
- Info->mayNeedAGPRs()
+ Info->getMinNumAGPRs() >= MinNumRegsRequired
? getAGPROpMapping(MI.getOperand(0).getReg(), MRI, *TRI)
: getVGPROpMapping(MI.getOperand(0).getReg(), MRI, *TRI);
OpdsMapping[2] = getVGPROpMapping(MI.getOperand(2).getReg(), MRI, *TRI);
OpdsMapping[3] = getVGPROpMapping(MI.getOperand(3).getReg(), MRI, *TRI);
OpdsMapping[4] =
- Info->mayNeedAGPRs()
+ Info->getMinNumAGPRs() >= MinNumRegsRequired
? getAGPROpMapping(MI.getOperand(4).getReg(), MRI, *TRI)
: getVGPROpMapping(MI.getOperand(4).getReg(), MRI, *TRI);
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index e233457..1a686a9 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -17346,74 +17346,24 @@ void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
MachineFunction *MF = MI.getParent()->getParent();
MachineRegisterInfo &MRI = MF->getRegInfo();
- SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
if (TII->isVOP3(MI.getOpcode())) {
// Make sure constant bus requirements are respected.
TII->legalizeOperandsVOP3(MRI, MI);
- // Prefer VGPRs over AGPRs in mAI instructions where possible.
- // This saves a chain-copy of registers and better balance register
- // use between vgpr and agpr as agpr tuples tend to be big.
- if (!MI.getDesc().operands().empty()) {
- unsigned Opc = MI.getOpcode();
- bool HasAGPRs = Info->mayNeedAGPRs();
- const SIRegisterInfo *TRI = Subtarget->getRegisterInfo();
- int16_t Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2);
- for (auto I :
- {AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0),
- AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1), Src2Idx}) {
- if (I == -1)
- break;
- if ((I == Src2Idx) && (HasAGPRs))
- break;
- MachineOperand &Op = MI.getOperand(I);
- if (!Op.isReg() || !Op.getReg().isVirtual())
- continue;
- auto *RC = TRI->getRegClassForReg(MRI, Op.getReg());
- if (!TRI->hasAGPRs(RC))
- continue;
- auto *Src = MRI.getUniqueVRegDef(Op.getReg());
- if (!Src || !Src->isCopy() ||
- !TRI->isSGPRReg(MRI, Src->getOperand(1).getReg()))
- continue;
- auto *NewRC = TRI->getEquivalentVGPRClass(RC);
- // All uses of agpr64 and agpr32 can also accept vgpr except for
- // v_accvgpr_read, but we do not produce agpr reads during selection,
- // so no use checks are needed.
- MRI.setRegClass(Op.getReg(), NewRC);
- }
-
- if (TII->isMAI(MI)) {
- // The ordinary src0, src1, src2 were legalized above.
- //
- // We have to also legalize the appended v_mfma_ld_scale_b32 operands,
- // as a separate instruction.
- int Src0Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
- AMDGPU::OpName::scale_src0);
- if (Src0Idx != -1) {
- int Src1Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
- AMDGPU::OpName::scale_src1);
- if (TII->usesConstantBus(MRI, MI, Src0Idx) &&
- TII->usesConstantBus(MRI, MI, Src1Idx))
- TII->legalizeOpWithMove(MI, Src1Idx);
- }
- }
-
- if (!HasAGPRs)
- return;
-
- // Resolve the rest of AV operands to AGPRs.
- if (auto *Src2 = TII->getNamedOperand(MI, AMDGPU::OpName::src2)) {
- if (Src2->isReg() && Src2->getReg().isVirtual()) {
- auto *RC = TRI->getRegClassForReg(MRI, Src2->getReg());
- if (TRI->isVectorSuperClass(RC)) {
- auto *NewRC = TRI->getEquivalentAGPRClass(RC);
- MRI.setRegClass(Src2->getReg(), NewRC);
- if (Src2->isTied())
- MRI.setRegClass(MI.getOperand(0).getReg(), NewRC);
- }
- }
+ if (TII->isMAI(MI)) {
+ // The ordinary src0, src1, src2 were legalized above.
+ //
+ // We have to also legalize the appended v_mfma_ld_scale_b32 operands,
+ // as a separate instruction.
+ int Src0Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
+ AMDGPU::OpName::scale_src0);
+ if (Src0Idx != -1) {
+ int Src1Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
+ AMDGPU::OpName::scale_src1);
+ if (TII->usesConstantBus(MRI, MI, Src0Idx) &&
+ TII->usesConstantBus(MRI, MI, Src1Idx))
+ TII->legalizeOpWithMove(MI, Src1Idx);
}
}
diff --git a/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp b/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp
index 908d856..b398db4 100644
--- a/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp
@@ -33,17 +33,20 @@ using namespace llvm;
// optimal RC for Opc and Dest of MFMA. In particular, there are high RP cases
// where it is better to produce the VGPR form (e.g. if there are VGPR users
// of the MFMA result).
-static cl::opt<bool> MFMAVGPRForm(
- "amdgpu-mfma-vgpr-form", cl::Hidden,
+static cl::opt<bool, true> MFMAVGPRFormOpt(
+ "amdgpu-mfma-vgpr-form",
cl::desc("Whether to force use VGPR for Opc and Dest of MFMA. If "
"unspecified, default to compiler heuristics"),
- cl::init(false));
+ cl::location(SIMachineFunctionInfo::MFMAVGPRForm), cl::init(false),
+ cl::Hidden);
const GCNTargetMachine &getTM(const GCNSubtarget *STI) {
const SITargetLowering *TLI = STI->getTargetLowering();
return static_cast<const GCNTargetMachine &>(TLI->getTargetMachine());
}
+bool SIMachineFunctionInfo::MFMAVGPRForm = false;
+
SIMachineFunctionInfo::SIMachineFunctionInfo(const Function &F,
const GCNSubtarget *STI)
: AMDGPUMachineFunction(F, *STI), Mode(F, *STI), GWSResourcePSV(getTM(STI)),
@@ -81,14 +84,13 @@ SIMachineFunctionInfo::SIMachineFunctionInfo(const Function &F,
PSInputAddr = AMDGPU::getInitialPSInputAddr(F);
}
- MayNeedAGPRs = ST.hasMAIInsts();
if (ST.hasGFX90AInsts()) {
- // FIXME: MayNeedAGPRs is a misnomer for how this is used. MFMA selection
- // should be separated from availability of AGPRs
- if (MFMAVGPRForm ||
- (ST.getMaxNumVGPRs(F) <= ST.getAddressableNumArchVGPRs() &&
- !mayUseAGPRs(F)))
- MayNeedAGPRs = false; // We will select all MAI with VGPR operands.
+ // FIXME: Extract logic out of getMaxNumVectorRegs; we need to apply the
+ // allocation granule and clamping.
+ auto [MinNumAGPRAttr, MaxNumAGPRAttr] =
+ AMDGPU::getIntegerPairAttribute(F, "amdgpu-agpr-alloc", {~0u, ~0u},
+ /*OnlyFirstRequired=*/true);
+ MinNumAGPRs = MinNumAGPRAttr;
}
if (AMDGPU::isChainCC(CC)) {
diff --git a/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.h b/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.h
index 4560615..b7dbb59 100644
--- a/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.h
+++ b/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.h
@@ -509,7 +509,9 @@ private:
// user arguments. This is an offset from the KernargSegmentPtr.
bool ImplicitArgPtr : 1;
- bool MayNeedAGPRs : 1;
+ /// Minimum number of AGPRs required to allocate in the function. Only
+ /// relevant for gfx90a-gfx950. For gfx908, this should be infinite.
+ unsigned MinNumAGPRs = ~0u;
// The hard-wired high half of the address of the global information table
// for AMDPAL OS type. 0xffffffff represents no hard-wired high half, since
@@ -537,6 +539,8 @@ private:
void MRI_NoteCloneVirtualRegister(Register NewReg, Register SrcReg) override;
public:
+ static bool MFMAVGPRForm;
+
struct VGPRSpillToAGPR {
SmallVector<MCPhysReg, 32> Lanes;
bool FullyAllocated = false;
@@ -1196,9 +1200,7 @@ public:
unsigned getMaxMemoryClusterDWords() const { return MaxMemoryClusterDWords; }
- bool mayNeedAGPRs() const {
- return MayNeedAGPRs;
- }
+ unsigned getMinNumAGPRs() const { return MinNumAGPRs; }
// \returns true if a function has a use of AGPRs via inline asm or
// has a call which may use it.
diff --git a/llvm/lib/Target/AMDGPU/VOP3PInstructions.td b/llvm/lib/Target/AMDGPU/VOP3PInstructions.td
index 5daf860..3a0cc35 100644
--- a/llvm/lib/Target/AMDGPU/VOP3PInstructions.td
+++ b/llvm/lib/Target/AMDGPU/VOP3PInstructions.td
@@ -67,7 +67,7 @@ class VOP3P_Mix_Profile<VOPProfile P, VOP3Features Features = VOP3_REGULAR,
class VOP3P_Mix_Profile_t16<VOPProfile P, VOP3Features Features = VOP3_REGULAR>
: VOP3P_Mix_Profile<P, Features, 0> {
let IsTrue16 = 1;
- let IsRealTrue16 = 1;
+ let IsRealTrue16 = 1;
let DstRC64 = getVALUDstForVT<P.DstVT, 1 /*IsTrue16*/, 1 /*IsVOP3Encoding*/>.ret;
}
@@ -950,7 +950,7 @@ class MFMA_F8F6F4_WithSizeTable_Helper<VOP3_Pseudo ps, string F8F8Op> :
}
// Currently assumes scaled instructions never have abid
-class MAIFrag<SDPatternOperator Op, code pred, bit HasAbid = true, bit Scaled = false> : PatFrag <
+class MAIFrag<SDPatternOperator Op, bit HasAbid = true, bit Scaled = false> : PatFrag <
!if(Scaled, (ops node:$src0, node:$src1, node:$src2, node:$cbsz, node:$blgp,
node:$src0_modifiers, node:$scale_src0,
node:$src1_modifiers, node:$scale_src1),
@@ -959,37 +959,30 @@ class MAIFrag<SDPatternOperator Op, code pred, bit HasAbid = true, bit Scaled =
(ops node:$blgp))),
!if(Scaled, (Op $src0, $src1, $src2, $cbsz, $blgp, $src0_modifiers, $scale_src0, $src1_modifiers, $scale_src1),
!if(HasAbid, (Op $src0, $src1, $src2, $cbsz, $abid, $blgp),
- (Op $src0, $src1, $src2, $cbsz, $blgp))),
- pred
->;
-
-defvar MayNeedAGPRs = [{
- return MF->getInfo<SIMachineFunctionInfo>()->mayNeedAGPRs();
-}];
-
-defvar MayNeedAGPRs_gisel = [{
- return MF.getInfo<SIMachineFunctionInfo>()->mayNeedAGPRs();
-}];
+ (Op $src0, $src1, $src2, $cbsz, $blgp)))>;
-defvar MayNotNeedAGPRs = [{
- return !MF->getInfo<SIMachineFunctionInfo>()->mayNeedAGPRs();
-}];
+class CanUseAGPR_MAI<ValueType vt> {
+ code PredicateCode = [{
+ return !Subtarget->hasGFX90AInsts() ||
+ (!SIMachineFunctionInfo::MFMAVGPRForm &&
+ MF->getInfo<SIMachineFunctionInfo>()->getMinNumAGPRs() >=
+ }] # !srl(vt.Size, 5) # ");";
-defvar MayNotNeedAGPRs_gisel = [{
- return !MF.getInfo<SIMachineFunctionInfo>()->mayNeedAGPRs();
-}];
+ code GISelPredicateCode = [{
+ return !Subtarget->hasGFX90AInsts() ||
+ (!SIMachineFunctionInfo::MFMAVGPRForm &&
+ MF.getInfo<SIMachineFunctionInfo>()->getMinNumAGPRs() >=
+ }] # !srl(vt.Size, 5) # ");";
+}
-class AgprMAIFrag<SDPatternOperator Op, bit HasAbid = true,
+class AgprMAIFrag<SDPatternOperator Op, ValueType vt, bit HasAbid = true,
bit Scaled = false> :
- MAIFrag<Op, MayNeedAGPRs, HasAbid, Scaled> {
- let GISelPredicateCode = MayNeedAGPRs_gisel;
-}
+ MAIFrag<Op, HasAbid, Scaled>,
+ CanUseAGPR_MAI<vt>;
class VgprMAIFrag<SDPatternOperator Op, bit HasAbid = true,
- bit Scaled = false> :
- MAIFrag<Op, MayNotNeedAGPRs, HasAbid, Scaled> {
- let GISelPredicateCode = MayNotNeedAGPRs_gisel;
-}
+ bit Scaled = false> :
+ MAIFrag<Op, HasAbid, Scaled>;
let isAsCheapAsAMove = 1, isReMaterializable = 1 in {
defm V_ACCVGPR_READ_B32 : VOP3Inst<"v_accvgpr_read_b32", VOPProfileAccRead>;
@@ -1037,16 +1030,19 @@ multiclass MAIInst<string OpName, string P, SDPatternOperator node = null_frag,
bit HasAbid = true,
bit Scaled = false> {
defvar NoDstOverlap = !cast<VOPProfileMAI>("VOPProfileMAI_" # P).NoDstOverlap;
+ defvar ProfileAGPR = !cast<VOPProfileMAI>("VOPProfileMAI_" # P);
+ defvar ProfileVGPR = !cast<VOPProfileMAI>("VOPProfileMAI_" # P # "_VCD");
+
let isConvergent = 1, mayRaiseFPException = 0, ReadsModeReg = 1 in {
// FP32 denorm mode is respected, rounding mode is not. Exceptions are not supported.
let Constraints = !if(NoDstOverlap, "@earlyclobber $vdst", "") in {
- def _e64 : MAIInst<OpName, !cast<VOPProfileMAI>("VOPProfileMAI_" # P),
- !if(!or(NoDstOverlap, !eq(node, null_frag)), null_frag, AgprMAIFrag<node, HasAbid, Scaled>), Scaled>,
+ def _e64 : MAIInst<OpName, ProfileAGPR,
+ !if(!or(NoDstOverlap, !eq(node, null_frag)), null_frag, AgprMAIFrag<node, ProfileAGPR.DstVT, HasAbid, Scaled>), Scaled>,
MFMATable<0, "AGPR", NAME # "_e64">;
let OtherPredicates = [isGFX90APlus], Mnemonic = OpName in
- def _vgprcd_e64 : MAIInst<OpName # "_vgprcd", !cast<VOPProfileMAI>("VOPProfileMAI_" # P # "_VCD"),
+ def _vgprcd_e64 : MAIInst<OpName # "_vgprcd", ProfileVGPR,
!if(!or(NoDstOverlap, !eq(node, null_frag)), null_frag, VgprMAIFrag<node, HasAbid, Scaled>), Scaled>,
MFMATable<0, "VGPR", NAME # "_vgprcd_e64", NAME # "_e64">;
}
@@ -1055,12 +1051,12 @@ multiclass MAIInst<string OpName, string P, SDPatternOperator node = null_frag,
let Constraints = !if(NoDstOverlap, "$vdst = $src2", ""),
isConvertibleToThreeAddress = NoDstOverlap,
Mnemonic = OpName in {
- def "_mac_e64" : MAIInst<OpName # "_mac", !cast<VOPProfileMAI>("VOPProfileMAI_" # P),
- !if(!eq(node, null_frag), null_frag, AgprMAIFrag<node, HasAbid, Scaled>), Scaled>,
+ def "_mac_e64" : MAIInst<OpName # "_mac", ProfileAGPR,
+ !if(!eq(node, null_frag), null_frag, AgprMAIFrag<node, ProfileAGPR.DstVT, HasAbid, Scaled>), Scaled>,
MFMATable<1, "AGPR", NAME # "_e64", NAME # "_mac_e64">;
let OtherPredicates = [isGFX90APlus] in
- def _mac_vgprcd_e64 : MAIInst<OpName # "_mac_vgprcd", !cast<VOPProfileMAI>("VOPProfileMAI_" # P # "_VCD"),
+ def _mac_vgprcd_e64 : MAIInst<OpName # "_mac_vgprcd", ProfileVGPR,
!if(!eq(node, null_frag), null_frag, VgprMAIFrag<node, HasAbid, Scaled>), Scaled>,
MFMATable<1, "VGPR", NAME # "_vgprcd_e64", NAME # "_mac_e64">;
}
@@ -1074,11 +1070,11 @@ multiclass ScaledMAIInst_mc<string OpName, string UnscaledOpName_, SDPatternOper
defvar UnscaledOpName = UnscaledOpName_#VariantSuffix;
defvar HasAbid = false;
-
- defvar NoDstOverlap = !cast<VOPProfileMAI>(!cast<MAIInst>(UnscaledOpName#"_e64").Pfl).NoDstOverlap;
+ defvar Profile = !cast<VOPProfileMAI>(!cast<MAIInst>(UnscaledOpName#"_e64").Pfl);
+ defvar NoDstOverlap = Profile.NoDstOverlap;
def _e64 : ScaledMAIInst<OpName,
- !cast<MAIInst>(UnscaledOpName#"_e64"), !if(NoDstOverlap, null_frag, AgprMAIFrag<node, HasAbid, true>)>,
+ !cast<MAIInst>(UnscaledOpName#"_e64"), !if(NoDstOverlap, null_frag, AgprMAIFrag<node, Profile.DstVT, HasAbid, true>)>,
MFMATable<0, "AGPR", NAME # "_e64">;
def _vgprcd_e64 : ScaledMAIInst<OpName # "_vgprcd",
@@ -1090,7 +1086,7 @@ multiclass ScaledMAIInst_mc<string OpName, string UnscaledOpName_, SDPatternOper
isConvertibleToThreeAddress = NoDstOverlap,
Mnemonic = UnscaledOpName_ in {
def _mac_e64 : ScaledMAIInst<OpName # "_mac",
- !cast<MAIInst>(UnscaledOpName # "_mac_e64"), AgprMAIFrag<node, HasAbid, true>>,
+ !cast<MAIInst>(UnscaledOpName # "_mac_e64"), AgprMAIFrag<node, Profile.DstVT, HasAbid, true>>,
MFMATable<1, "AGPR", NAME # "_e64">;
def _mac_vgprcd_e64 : ScaledMAIInst<OpName # " _mac_vgprcd",
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 6234714..a3a4cf2 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -16498,43 +16498,60 @@ static SDValue expandMul(SDNode *N, SelectionDAG &DAG,
SDValue X = N->getOperand(0);
if (Subtarget.hasShlAdd(3)) {
- for (uint64_t Divisor : {3, 5, 9}) {
- if (MulAmt % Divisor != 0)
- continue;
- uint64_t MulAmt2 = MulAmt / Divisor;
- // 3/5/9 * 2^N -> shl (shXadd X, X), N
- if (isPowerOf2_64(MulAmt2)) {
- SDLoc DL(N);
- SDValue X = N->getOperand(0);
- // Put the shift first if we can fold a zext into the
- // shift forming a slli.uw.
- if (X.getOpcode() == ISD::AND && isa<ConstantSDNode>(X.getOperand(1)) &&
- X.getConstantOperandVal(1) == UINT64_C(0xffffffff)) {
- SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, X,
- DAG.getConstant(Log2_64(MulAmt2), DL, VT));
- return DAG.getNode(RISCVISD::SHL_ADD, DL, VT, Shl,
- DAG.getConstant(Log2_64(Divisor - 1), DL, VT),
- Shl);
- }
- // Otherwise, put rhe shl second so that it can fold with following
- // instructions (e.g. sext or add).
- SDValue Mul359 =
- DAG.getNode(RISCVISD::SHL_ADD, DL, VT, X,
- DAG.getConstant(Log2_64(Divisor - 1), DL, VT), X);
- return DAG.getNode(ISD::SHL, DL, VT, Mul359,
- DAG.getConstant(Log2_64(MulAmt2), DL, VT));
- }
-
- // 3/5/9 * 3/5/9 -> shXadd (shYadd X, X), (shYadd X, X)
- if (MulAmt2 == 3 || MulAmt2 == 5 || MulAmt2 == 9) {
- SDLoc DL(N);
- SDValue Mul359 =
- DAG.getNode(RISCVISD::SHL_ADD, DL, VT, X,
- DAG.getConstant(Log2_64(Divisor - 1), DL, VT), X);
- return DAG.getNode(RISCVISD::SHL_ADD, DL, VT, Mul359,
- DAG.getConstant(Log2_64(MulAmt2 - 1), DL, VT),
- Mul359);
+ int Shift;
+ if (int ShXAmount = isShifted359(MulAmt, Shift)) {
+ // 3/5/9 * 2^N -> shl (shXadd X, X), N
+ SDLoc DL(N);
+ SDValue X = N->getOperand(0);
+ // Put the shift first if we can fold a zext into the shift forming
+ // a slli.uw.
+ if (X.getOpcode() == ISD::AND && isa<ConstantSDNode>(X.getOperand(1)) &&
+ X.getConstantOperandVal(1) == UINT64_C(0xffffffff)) {
+ SDValue Shl =
+ DAG.getNode(ISD::SHL, DL, VT, X, DAG.getConstant(Shift, DL, VT));
+ return DAG.getNode(RISCVISD::SHL_ADD, DL, VT, Shl,
+ DAG.getConstant(ShXAmount, DL, VT), Shl);
}
+ // Otherwise, put the shl second so that it can fold with following
+ // instructions (e.g. sext or add).
+ SDValue Mul359 = DAG.getNode(RISCVISD::SHL_ADD, DL, VT, X,
+ DAG.getConstant(ShXAmount, DL, VT), X);
+ return DAG.getNode(ISD::SHL, DL, VT, Mul359,
+ DAG.getConstant(Shift, DL, VT));
+ }
+
+ // 3/5/9 * 3/5/9 -> shXadd (shYadd X, X), (shYadd X, X)
+ int ShX;
+ int ShY;
+ switch (MulAmt) {
+ case 3 * 5:
+ ShY = 1;
+ ShX = 2;
+ break;
+ case 3 * 9:
+ ShY = 1;
+ ShX = 3;
+ break;
+ case 5 * 5:
+ ShX = ShY = 2;
+ break;
+ case 5 * 9:
+ ShY = 2;
+ ShX = 3;
+ break;
+ case 9 * 9:
+ ShX = ShY = 3;
+ break;
+ default:
+ ShX = ShY = 0;
+ break;
+ }
+ if (ShX) {
+ SDLoc DL(N);
+ SDValue Mul359 = DAG.getNode(RISCVISD::SHL_ADD, DL, VT, X,
+ DAG.getConstant(ShY, DL, VT), X);
+ return DAG.getNode(RISCVISD::SHL_ADD, DL, VT, Mul359,
+ DAG.getConstant(ShX, DL, VT), Mul359);
}
// If this is a power 2 + 2/4/8, we can use a shift followed by a single
@@ -16557,18 +16574,14 @@ static SDValue expandMul(SDNode *N, SelectionDAG &DAG,
// variants we could implement. e.g.
// (2^(1,2,3) * 3,5,9 + 1) << C2
// 2^(C1>3) * 3,5,9 +/- 1
- for (uint64_t Divisor : {3, 5, 9}) {
- uint64_t C = MulAmt - 1;
- if (C <= Divisor)
- continue;
- unsigned TZ = llvm::countr_zero(C);
- if ((C >> TZ) == Divisor && (TZ == 1 || TZ == 2 || TZ == 3)) {
+ if (int ShXAmount = isShifted359(MulAmt - 1, Shift)) {
+ assert(Shift != 0 && "MulAmt=4,6,10 handled before");
+ if (Shift <= 3) {
SDLoc DL(N);
- SDValue Mul359 =
- DAG.getNode(RISCVISD::SHL_ADD, DL, VT, X,
- DAG.getConstant(Log2_64(Divisor - 1), DL, VT), X);
+ SDValue Mul359 = DAG.getNode(RISCVISD::SHL_ADD, DL, VT, X,
+ DAG.getConstant(ShXAmount, DL, VT), X);
return DAG.getNode(RISCVISD::SHL_ADD, DL, VT, Mul359,
- DAG.getConstant(TZ, DL, VT), X);
+ DAG.getConstant(Shift, DL, VT), X);
}
}
@@ -16576,7 +16589,7 @@ static SDValue expandMul(SDNode *N, SelectionDAG &DAG,
if (MulAmt > 2 && isPowerOf2_64((MulAmt - 1) & (MulAmt - 2))) {
unsigned ScaleShift = llvm::countr_zero(MulAmt - 1);
if (ScaleShift >= 1 && ScaleShift < 4) {
- unsigned ShiftAmt = Log2_64(((MulAmt - 1) & (MulAmt - 2)));
+ unsigned ShiftAmt = llvm::countr_zero((MulAmt - 1) & (MulAmt - 2));
SDLoc DL(N);
SDValue Shift1 =
DAG.getNode(ISD::SHL, DL, VT, X, DAG.getConstant(ShiftAmt, DL, VT));
@@ -16589,7 +16602,7 @@ static SDValue expandMul(SDNode *N, SelectionDAG &DAG,
// 2^N - 3/5/9 --> (sub (shl X, C1), (shXadd X, x))
for (uint64_t Offset : {3, 5, 9}) {
if (isPowerOf2_64(MulAmt + Offset)) {
- unsigned ShAmt = Log2_64(MulAmt + Offset);
+ unsigned ShAmt = llvm::countr_zero(MulAmt + Offset);
if (ShAmt >= VT.getSizeInBits())
continue;
SDLoc DL(N);
@@ -16608,21 +16621,16 @@ static SDValue expandMul(SDNode *N, SelectionDAG &DAG,
uint64_t MulAmt2 = MulAmt / Divisor;
// 3/5/9 * 3/5/9 * 2^N - In particular, this covers multiples
// of 25 which happen to be quite common.
- for (uint64_t Divisor2 : {3, 5, 9}) {
- if (MulAmt2 % Divisor2 != 0)
- continue;
- uint64_t MulAmt3 = MulAmt2 / Divisor2;
- if (isPowerOf2_64(MulAmt3)) {
- SDLoc DL(N);
- SDValue Mul359A =
- DAG.getNode(RISCVISD::SHL_ADD, DL, VT, X,
- DAG.getConstant(Log2_64(Divisor - 1), DL, VT), X);
- SDValue Mul359B = DAG.getNode(
- RISCVISD::SHL_ADD, DL, VT, Mul359A,
- DAG.getConstant(Log2_64(Divisor2 - 1), DL, VT), Mul359A);
- return DAG.getNode(ISD::SHL, DL, VT, Mul359B,
- DAG.getConstant(Log2_64(MulAmt3), DL, VT));
- }
+ if (int ShBAmount = isShifted359(MulAmt2, Shift)) {
+ SDLoc DL(N);
+ SDValue Mul359A =
+ DAG.getNode(RISCVISD::SHL_ADD, DL, VT, X,
+ DAG.getConstant(Log2_64(Divisor - 1), DL, VT), X);
+ SDValue Mul359B =
+ DAG.getNode(RISCVISD::SHL_ADD, DL, VT, Mul359A,
+ DAG.getConstant(ShBAmount, DL, VT), Mul359A);
+ return DAG.getNode(ISD::SHL, DL, VT, Mul359B,
+ DAG.getConstant(Shift, DL, VT));
}
}
}
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
index 7db4832..96e1078 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
@@ -4586,24 +4586,23 @@ void RISCVInstrInfo::mulImm(MachineFunction &MF, MachineBasicBlock &MBB,
.addReg(DestReg, RegState::Kill)
.addImm(ShiftAmount)
.setMIFlag(Flag);
- } else if (STI.hasShlAdd(3) &&
- ((Amount % 3 == 0 && isPowerOf2_64(Amount / 3)) ||
- (Amount % 5 == 0 && isPowerOf2_64(Amount / 5)) ||
- (Amount % 9 == 0 && isPowerOf2_64(Amount / 9)))) {
+ } else if (int ShXAmount, ShiftAmount;
+ STI.hasShlAdd(3) &&
+ (ShXAmount = isShifted359(Amount, ShiftAmount)) != 0) {
// We can use Zba SHXADD+SLLI instructions for multiply in some cases.
unsigned Opc;
- uint32_t ShiftAmount;
- if (Amount % 9 == 0) {
- Opc = RISCV::SH3ADD;
- ShiftAmount = Log2_64(Amount / 9);
- } else if (Amount % 5 == 0) {
- Opc = RISCV::SH2ADD;
- ShiftAmount = Log2_64(Amount / 5);
- } else if (Amount % 3 == 0) {
+ switch (ShXAmount) {
+ case 1:
Opc = RISCV::SH1ADD;
- ShiftAmount = Log2_64(Amount / 3);
- } else {
- llvm_unreachable("implied by if-clause");
+ break;
+ case 2:
+ Opc = RISCV::SH2ADD;
+ break;
+ case 3:
+ Opc = RISCV::SH3ADD;
+ break;
+ default:
+ llvm_unreachable("unexpected result of isShifted359");
}
if (ShiftAmount)
BuildMI(MBB, II, DL, get(RISCV::SLLI), DestReg)
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.h b/llvm/lib/Target/RISCV/RISCVInstrInfo.h
index 42a0c4c..c5eddb9 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.h
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.h
@@ -25,6 +25,25 @@
namespace llvm {
+// If Value is of the form C1<<C2, where C1 = 3, 5 or 9,
+// returns log2(C1 - 1) and assigns Shift = C2.
+// Otherwise, returns 0.
+template <typename T> int isShifted359(T Value, int &Shift) {
+ if (Value == 0)
+ return 0;
+ Shift = llvm::countr_zero(Value);
+ switch (Value >> Shift) {
+ case 3:
+ return 1;
+ case 5:
+ return 2;
+ case 9:
+ return 3;
+ default:
+ return 0;
+ }
+}
+
class RISCVSubtarget;
static const MachineMemOperand::Flags MONontemporalBit0 =
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td
index ce21d83..8d9b777 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td
@@ -808,9 +808,9 @@ multiclass Sh2Add_UWPat<Instruction sh2add_uw> {
}
multiclass Sh3Add_UWPat<Instruction sh3add_uw> {
- def : Pat<(i64 (add_like_non_imm12 (and GPR:$rs1, 0xFFFFFFF8),
+ def : Pat<(i64 (add_like_non_imm12 (and (shl GPR:$rs1, (i64 3)), 0x7FFFFFFFF),
(XLenVT GPR:$rs2))),
- (sh3add_uw (XLenVT (SRLIW GPR:$rs1, 3)), GPR:$rs2)>;
+ (sh3add_uw GPR:$rs1, GPR:$rs2)>;
// Use SRLI to clear the LSBs and SHXADD_UW to mask and shift.
def : Pat<(i64 (add_like_non_imm12 (and GPR:$rs1, 0x7FFFFFFF8),
(XLenVT GPR:$rs2))),
diff --git a/llvm/lib/Transforms/Scalar/DFAJumpThreading.cpp b/llvm/lib/Transforms/Scalar/DFAJumpThreading.cpp
index e9a3e98..41a6c80 100644
--- a/llvm/lib/Transforms/Scalar/DFAJumpThreading.cpp
+++ b/llvm/lib/Transforms/Scalar/DFAJumpThreading.cpp
@@ -120,6 +120,12 @@ static cl::opt<unsigned>
cl::desc("Maximum cost accepted for the transformation"),
cl::Hidden, cl::init(50));
+static cl::opt<double> MaxClonedRate(
+ "dfa-max-cloned-rate",
+ cl::desc(
+ "Maximum cloned instructions rate accepted for the transformation"),
+ cl::Hidden, cl::init(7.5));
+
namespace {
class SelectInstToUnfold {
@@ -828,6 +834,7 @@ private:
/// also returns false if it is illegal to clone some required block.
bool isLegalAndProfitableToTransform() {
CodeMetrics Metrics;
+ uint64_t NumClonedInst = 0;
SwitchInst *Switch = SwitchPaths->getSwitchInst();
// Don't thread switch without multiple successors.
@@ -837,7 +844,6 @@ private:
// Note that DuplicateBlockMap is not being used as intended here. It is
// just being used to ensure (BB, State) pairs are only counted once.
DuplicateBlockMap DuplicateMap;
-
for (ThreadingPath &TPath : SwitchPaths->getThreadingPaths()) {
PathType PathBBs = TPath.getPath();
APInt NextState = TPath.getExitValue();
@@ -848,6 +854,7 @@ private:
BasicBlock *VisitedBB = getClonedBB(BB, NextState, DuplicateMap);
if (!VisitedBB) {
Metrics.analyzeBasicBlock(BB, *TTI, EphValues);
+ NumClonedInst += BB->sizeWithoutDebug();
DuplicateMap[BB].push_back({BB, NextState});
}
@@ -865,6 +872,7 @@ private:
if (VisitedBB)
continue;
Metrics.analyzeBasicBlock(BB, *TTI, EphValues);
+ NumClonedInst += BB->sizeWithoutDebug();
DuplicateMap[BB].push_back({BB, NextState});
}
@@ -901,6 +909,22 @@ private:
}
}
+ // Too much cloned instructions slow down later optimizations, especially
+ // SLPVectorizer.
+ // TODO: Thread the switch partially before reaching the threshold.
+ uint64_t NumOrigInst = 0;
+ for (auto *BB : DuplicateMap.keys())
+ NumOrigInst += BB->sizeWithoutDebug();
+ if (double(NumClonedInst) / double(NumOrigInst) > MaxClonedRate) {
+ LLVM_DEBUG(dbgs() << "DFA Jump Threading: Not jump threading, too much "
+ "instructions wll be cloned\n");
+ ORE->emit([&]() {
+ return OptimizationRemarkMissed(DEBUG_TYPE, "NotProfitable", Switch)
+ << "Too much instructions will be cloned.";
+ });
+ return false;
+ }
+
InstructionCost DuplicationCost = 0;
unsigned JumpTableSize = 0;
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mfma.form.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mfma.form.ll
index 87a7c2e..cc4cc8e 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mfma.form.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mfma.form.ll
@@ -72,5 +72,206 @@ define <4 x float> @request_no_agpr(<8 x half> %arg0, <8 x half> %arg1, <4 x flo
ret <4 x float> %result
}
+; Make sure this selects the VGPR form, if AGPRs available, but not
+; enough.
+define amdgpu_kernel void @not_enough_agprs(ptr addrspace(1) %arg) #2 {
+; HEURRC-LABEL: not_enough_agprs:
+; HEURRC: ; %bb.0: ; %bb
+; HEURRC-NEXT: s_load_dwordx2 s[34:35], s[4:5], 0x24
+; HEURRC-NEXT: v_mov_b32_e32 v33, 1.0
+; HEURRC-NEXT: v_mov_b32_e32 v34, 2.0
+; HEURRC-NEXT: v_mov_b32_e32 v32, 0
+; HEURRC-NEXT: s_waitcnt lgkmcnt(0)
+; HEURRC-NEXT: s_load_dwordx16 s[16:31], s[34:35], 0x0
+; HEURRC-NEXT: s_load_dwordx16 s[0:15], s[34:35], 0x40
+; HEURRC-NEXT: s_waitcnt lgkmcnt(0)
+; HEURRC-NEXT: v_mov_b32_e32 v0, s16
+; HEURRC-NEXT: v_mov_b32_e32 v1, s17
+; HEURRC-NEXT: v_mov_b32_e32 v2, s18
+; HEURRC-NEXT: v_mov_b32_e32 v3, s19
+; HEURRC-NEXT: v_mov_b32_e32 v4, s20
+; HEURRC-NEXT: v_mov_b32_e32 v5, s21
+; HEURRC-NEXT: v_mov_b32_e32 v6, s22
+; HEURRC-NEXT: v_mov_b32_e32 v7, s23
+; HEURRC-NEXT: v_mov_b32_e32 v8, s24
+; HEURRC-NEXT: v_mov_b32_e32 v9, s25
+; HEURRC-NEXT: v_mov_b32_e32 v10, s26
+; HEURRC-NEXT: v_mov_b32_e32 v11, s27
+; HEURRC-NEXT: v_mov_b32_e32 v12, s28
+; HEURRC-NEXT: v_mov_b32_e32 v13, s29
+; HEURRC-NEXT: v_mov_b32_e32 v14, s30
+; HEURRC-NEXT: v_mov_b32_e32 v15, s31
+; HEURRC-NEXT: v_mov_b32_e32 v16, s0
+; HEURRC-NEXT: v_mov_b32_e32 v17, s1
+; HEURRC-NEXT: v_mov_b32_e32 v18, s2
+; HEURRC-NEXT: v_mov_b32_e32 v19, s3
+; HEURRC-NEXT: v_mov_b32_e32 v20, s4
+; HEURRC-NEXT: v_mov_b32_e32 v21, s5
+; HEURRC-NEXT: v_mov_b32_e32 v22, s6
+; HEURRC-NEXT: v_mov_b32_e32 v23, s7
+; HEURRC-NEXT: v_mov_b32_e32 v24, s8
+; HEURRC-NEXT: v_mov_b32_e32 v25, s9
+; HEURRC-NEXT: v_mov_b32_e32 v26, s10
+; HEURRC-NEXT: v_mov_b32_e32 v27, s11
+; HEURRC-NEXT: v_mov_b32_e32 v28, s12
+; HEURRC-NEXT: v_mov_b32_e32 v29, s13
+; HEURRC-NEXT: v_mov_b32_e32 v30, s14
+; HEURRC-NEXT: v_mov_b32_e32 v31, s15
+; HEURRC-NEXT: s_nop 1
+; HEURRC-NEXT: v_mfma_f32_32x32x1_2b_f32 v[0:31], v33, v34, v[0:31] cbsz:1 abid:2 blgp:3
+; HEURRC-NEXT: s_nop 15
+; HEURRC-NEXT: s_nop 1
+; HEURRC-NEXT: global_store_dwordx4 v32, v[24:27], s[34:35] offset:96
+; HEURRC-NEXT: global_store_dwordx4 v32, v[28:31], s[34:35] offset:112
+; HEURRC-NEXT: global_store_dwordx4 v32, v[16:19], s[34:35] offset:64
+; HEURRC-NEXT: global_store_dwordx4 v32, v[20:23], s[34:35] offset:80
+; HEURRC-NEXT: global_store_dwordx4 v32, v[8:11], s[34:35] offset:32
+; HEURRC-NEXT: global_store_dwordx4 v32, v[12:15], s[34:35] offset:48
+; HEURRC-NEXT: global_store_dwordx4 v32, v[0:3], s[34:35]
+; HEURRC-NEXT: global_store_dwordx4 v32, v[4:7], s[34:35] offset:16
+; HEURRC-NEXT: s_endpgm
+;
+; VGPRRC-LABEL: not_enough_agprs:
+; VGPRRC: ; %bb.0: ; %bb
+; VGPRRC-NEXT: s_load_dwordx2 s[34:35], s[4:5], 0x24
+; VGPRRC-NEXT: v_mov_b32_e32 v33, 1.0
+; VGPRRC-NEXT: v_mov_b32_e32 v34, 2.0
+; VGPRRC-NEXT: v_mov_b32_e32 v32, 0
+; VGPRRC-NEXT: s_waitcnt lgkmcnt(0)
+; VGPRRC-NEXT: s_load_dwordx16 s[16:31], s[34:35], 0x0
+; VGPRRC-NEXT: s_load_dwordx16 s[0:15], s[34:35], 0x40
+; VGPRRC-NEXT: s_waitcnt lgkmcnt(0)
+; VGPRRC-NEXT: v_mov_b32_e32 v0, s16
+; VGPRRC-NEXT: v_mov_b32_e32 v1, s17
+; VGPRRC-NEXT: v_mov_b32_e32 v2, s18
+; VGPRRC-NEXT: v_mov_b32_e32 v3, s19
+; VGPRRC-NEXT: v_mov_b32_e32 v4, s20
+; VGPRRC-NEXT: v_mov_b32_e32 v5, s21
+; VGPRRC-NEXT: v_mov_b32_e32 v6, s22
+; VGPRRC-NEXT: v_mov_b32_e32 v7, s23
+; VGPRRC-NEXT: v_mov_b32_e32 v8, s24
+; VGPRRC-NEXT: v_mov_b32_e32 v9, s25
+; VGPRRC-NEXT: v_mov_b32_e32 v10, s26
+; VGPRRC-NEXT: v_mov_b32_e32 v11, s27
+; VGPRRC-NEXT: v_mov_b32_e32 v12, s28
+; VGPRRC-NEXT: v_mov_b32_e32 v13, s29
+; VGPRRC-NEXT: v_mov_b32_e32 v14, s30
+; VGPRRC-NEXT: v_mov_b32_e32 v15, s31
+; VGPRRC-NEXT: v_mov_b32_e32 v16, s0
+; VGPRRC-NEXT: v_mov_b32_e32 v17, s1
+; VGPRRC-NEXT: v_mov_b32_e32 v18, s2
+; VGPRRC-NEXT: v_mov_b32_e32 v19, s3
+; VGPRRC-NEXT: v_mov_b32_e32 v20, s4
+; VGPRRC-NEXT: v_mov_b32_e32 v21, s5
+; VGPRRC-NEXT: v_mov_b32_e32 v22, s6
+; VGPRRC-NEXT: v_mov_b32_e32 v23, s7
+; VGPRRC-NEXT: v_mov_b32_e32 v24, s8
+; VGPRRC-NEXT: v_mov_b32_e32 v25, s9
+; VGPRRC-NEXT: v_mov_b32_e32 v26, s10
+; VGPRRC-NEXT: v_mov_b32_e32 v27, s11
+; VGPRRC-NEXT: v_mov_b32_e32 v28, s12
+; VGPRRC-NEXT: v_mov_b32_e32 v29, s13
+; VGPRRC-NEXT: v_mov_b32_e32 v30, s14
+; VGPRRC-NEXT: v_mov_b32_e32 v31, s15
+; VGPRRC-NEXT: s_nop 1
+; VGPRRC-NEXT: v_mfma_f32_32x32x1_2b_f32 v[0:31], v33, v34, v[0:31] cbsz:1 abid:2 blgp:3
+; VGPRRC-NEXT: s_nop 15
+; VGPRRC-NEXT: s_nop 1
+; VGPRRC-NEXT: global_store_dwordx4 v32, v[24:27], s[34:35] offset:96
+; VGPRRC-NEXT: global_store_dwordx4 v32, v[28:31], s[34:35] offset:112
+; VGPRRC-NEXT: global_store_dwordx4 v32, v[16:19], s[34:35] offset:64
+; VGPRRC-NEXT: global_store_dwordx4 v32, v[20:23], s[34:35] offset:80
+; VGPRRC-NEXT: global_store_dwordx4 v32, v[8:11], s[34:35] offset:32
+; VGPRRC-NEXT: global_store_dwordx4 v32, v[12:15], s[34:35] offset:48
+; VGPRRC-NEXT: global_store_dwordx4 v32, v[0:3], s[34:35]
+; VGPRRC-NEXT: global_store_dwordx4 v32, v[4:7], s[34:35] offset:16
+; VGPRRC-NEXT: s_endpgm
+bb:
+ %in.1 = load <32 x float>, ptr addrspace(1) %arg, align 128
+ %mai.1 = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 1.000000e+00, float 2.000000e+00, <32 x float> %in.1, i32 1, i32 2, i32 3)
+ store <32 x float> %mai.1, ptr addrspace(1) %arg, align 128
+ ret void
+}
+
+define <16 x float> @mfma_scale_respect_flag(<8 x i32> %arg0, <8 x i32> %arg1, <16 x float> %arg2, i32 %scale0, i32 %scale1) #2 {
+; HEURRC-LABEL: mfma_scale_respect_flag:
+; HEURRC: ; %bb.0:
+; HEURRC-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; HEURRC-NEXT: scratch_load_dword a15, off, s32
+; HEURRC-NEXT: scratch_load_dword v31, off, s32 offset:8
+; HEURRC-NEXT: scratch_load_dword v32, off, s32 offset:4
+; HEURRC-NEXT: v_accvgpr_write_b32 a0, v16
+; HEURRC-NEXT: v_accvgpr_write_b32 a1, v17
+; HEURRC-NEXT: v_accvgpr_write_b32 a2, v18
+; HEURRC-NEXT: v_accvgpr_write_b32 a3, v19
+; HEURRC-NEXT: v_accvgpr_write_b32 a4, v20
+; HEURRC-NEXT: v_accvgpr_write_b32 a5, v21
+; HEURRC-NEXT: v_accvgpr_write_b32 a6, v22
+; HEURRC-NEXT: v_accvgpr_write_b32 a7, v23
+; HEURRC-NEXT: v_accvgpr_write_b32 a8, v24
+; HEURRC-NEXT: v_accvgpr_write_b32 a9, v25
+; HEURRC-NEXT: v_accvgpr_write_b32 a10, v26
+; HEURRC-NEXT: v_accvgpr_write_b32 a11, v27
+; HEURRC-NEXT: v_accvgpr_write_b32 a12, v28
+; HEURRC-NEXT: v_accvgpr_write_b32 a13, v29
+; HEURRC-NEXT: v_accvgpr_write_b32 a14, v30
+; HEURRC-NEXT: s_waitcnt vmcnt(0)
+; HEURRC-NEXT: s_nop 0
+; HEURRC-NEXT: v_mfma_scale_f32_32x32x64_f8f6f4 a[0:15], v[0:7], v[8:15], a[0:15], v32, v31 op_sel_hi:[0,0,0]
+; HEURRC-NEXT: s_nop 15
+; HEURRC-NEXT: s_nop 3
+; HEURRC-NEXT: v_accvgpr_read_b32 v0, a0
+; HEURRC-NEXT: v_accvgpr_read_b32 v1, a1
+; HEURRC-NEXT: v_accvgpr_read_b32 v2, a2
+; HEURRC-NEXT: v_accvgpr_read_b32 v3, a3
+; HEURRC-NEXT: v_accvgpr_read_b32 v4, a4
+; HEURRC-NEXT: v_accvgpr_read_b32 v5, a5
+; HEURRC-NEXT: v_accvgpr_read_b32 v6, a6
+; HEURRC-NEXT: v_accvgpr_read_b32 v7, a7
+; HEURRC-NEXT: v_accvgpr_read_b32 v8, a8
+; HEURRC-NEXT: v_accvgpr_read_b32 v9, a9
+; HEURRC-NEXT: v_accvgpr_read_b32 v10, a10
+; HEURRC-NEXT: v_accvgpr_read_b32 v11, a11
+; HEURRC-NEXT: v_accvgpr_read_b32 v12, a12
+; HEURRC-NEXT: v_accvgpr_read_b32 v13, a13
+; HEURRC-NEXT: v_accvgpr_read_b32 v14, a14
+; HEURRC-NEXT: v_accvgpr_read_b32 v15, a15
+; HEURRC-NEXT: s_setpc_b64 s[30:31]
+;
+; VGPRRC-LABEL: mfma_scale_respect_flag:
+; VGPRRC: ; %bb.0:
+; VGPRRC-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VGPRRC-NEXT: scratch_load_dword v31, off, s32
+; VGPRRC-NEXT: scratch_load_dword v32, off, s32 offset:8
+; VGPRRC-NEXT: scratch_load_dword v33, off, s32 offset:4
+; VGPRRC-NEXT: s_waitcnt vmcnt(0)
+; VGPRRC-NEXT: v_mfma_scale_f32_32x32x64_f8f6f4 v[16:31], v[0:7], v[8:15], v[16:31], v33, v32 op_sel_hi:[0,0,0]
+; VGPRRC-NEXT: s_nop 15
+; VGPRRC-NEXT: s_nop 3
+; VGPRRC-NEXT: v_mov_b32_e32 v0, v16
+; VGPRRC-NEXT: v_mov_b32_e32 v1, v17
+; VGPRRC-NEXT: v_mov_b32_e32 v2, v18
+; VGPRRC-NEXT: v_mov_b32_e32 v3, v19
+; VGPRRC-NEXT: v_mov_b32_e32 v4, v20
+; VGPRRC-NEXT: v_mov_b32_e32 v5, v21
+; VGPRRC-NEXT: v_mov_b32_e32 v6, v22
+; VGPRRC-NEXT: v_mov_b32_e32 v7, v23
+; VGPRRC-NEXT: v_mov_b32_e32 v8, v24
+; VGPRRC-NEXT: v_mov_b32_e32 v9, v25
+; VGPRRC-NEXT: v_mov_b32_e32 v10, v26
+; VGPRRC-NEXT: v_mov_b32_e32 v11, v27
+; VGPRRC-NEXT: v_mov_b32_e32 v12, v28
+; VGPRRC-NEXT: v_mov_b32_e32 v13, v29
+; VGPRRC-NEXT: v_mov_b32_e32 v14, v30
+; VGPRRC-NEXT: v_mov_b32_e32 v15, v31
+; VGPRRC-NEXT: s_setpc_b64 s[30:31]
+ %result = call <16 x float> @llvm.amdgcn.mfma.scale.f32.32x32x64.f8f6f4.v8i32.v8i32(<8 x i32> %arg0, <8 x i32> %arg1, <16 x float> %arg2,
+ i32 0, ; cbsz
+ i32 0, ; blgp
+ i32 0, i32 %scale0, i32 0, i32 %scale1)
+ ret <16 x float> %result
+}
+
attributes #0 = { "amdgpu-agpr-alloc"="32,256" }
attributes #1 = { "amdgpu-agpr-alloc"="0,0" }
+attributes #2 = { nounwind "amdgpu-agpr-alloc"="20" }
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mfma.gfx90a.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mfma.gfx90a.ll
index 5ab8706..22bc62a 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mfma.gfx90a.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mfma.gfx90a.ll
@@ -726,12 +726,12 @@ define amdgpu_kernel void @test_mfma_f64_4x4x4f64(ptr addrspace(1) %arg, double
; GFX90A-VGPR-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; GFX90A-VGPR-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
; GFX90A-VGPR-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-VGPR-NEXT: v_pk_mov_b32 v[0:1], s[2:3], s[2:3] op_sel:[0,1]
-; GFX90A-VGPR-NEXT: v_pk_mov_b32 v[2:3], s[6:7], s[6:7] op_sel:[0,1]
+; GFX90A-VGPR-NEXT: v_pk_mov_b32 v[2:3], s[2:3], s[2:3] op_sel:[0,1]
+; GFX90A-VGPR-NEXT: v_pk_mov_b32 v[4:5], s[6:7], s[6:7] op_sel:[0,1]
; GFX90A-VGPR-NEXT: s_nop 1
-; GFX90A-VGPR-NEXT: v_mfma_f64_4x4x4f64 v[4:5], v[0:1], v[2:3], 0
+; GFX90A-VGPR-NEXT: v_mfma_f64_4x4x4f64 v[0:1], v[2:3], v[4:5], 0
; GFX90A-VGPR-NEXT: s_nop 3
-; GFX90A-VGPR-NEXT: v_mfma_f64_4x4x4f64 v[0:1], v[0:1], v[2:3], v[4:5] cbsz:1 abid:2 blgp:3
+; GFX90A-VGPR-NEXT: v_mfma_f64_4x4x4f64 v[0:1], v[2:3], v[4:5], v[0:1] cbsz:1 abid:2 blgp:3
; GFX90A-VGPR-NEXT: v_mov_b32_e32 v2, 0
; GFX90A-VGPR-NEXT: s_nop 7
; GFX90A-VGPR-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
@@ -742,12 +742,12 @@ define amdgpu_kernel void @test_mfma_f64_4x4x4f64(ptr addrspace(1) %arg, double
; GFX942-VGPR-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; GFX942-VGPR-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
; GFX942-VGPR-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-VGPR-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
-; GFX942-VGPR-NEXT: v_mov_b64_e32 v[2:3], s[6:7]
+; GFX942-VGPR-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
+; GFX942-VGPR-NEXT: v_mov_b64_e32 v[4:5], s[6:7]
; GFX942-VGPR-NEXT: s_nop 1
-; GFX942-VGPR-NEXT: v_mfma_f64_4x4x4_4b_f64 v[4:5], v[0:1], v[2:3], 0
+; GFX942-VGPR-NEXT: v_mfma_f64_4x4x4_4b_f64 v[0:1], v[2:3], v[4:5], 0
; GFX942-VGPR-NEXT: s_nop 3
-; GFX942-VGPR-NEXT: v_mfma_f64_4x4x4_4b_f64 v[0:1], v[0:1], v[2:3], v[4:5] cbsz:1 abid:2 neg:[1,1,0]
+; GFX942-VGPR-NEXT: v_mfma_f64_4x4x4_4b_f64 v[0:1], v[2:3], v[4:5], v[0:1] cbsz:1 abid:2 neg:[1,1,0]
; GFX942-VGPR-NEXT: v_mov_b32_e32 v2, 0
; GFX942-VGPR-NEXT: s_nop 7
; GFX942-VGPR-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
@@ -765,10 +765,10 @@ define amdgpu_kernel void @test_mfma_f64_16x16x4f64(ptr addrspace(1) %arg, doubl
; GFX90A-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x24
; GFX90A-NEXT: s_load_dwordx2 s[12:13], s[4:5], 0x34
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_mov_b32_e32 v2, s10
+; GFX90A-NEXT: v_mov_b32_e32 v0, s10
; GFX90A-NEXT: s_load_dwordx8 s[0:7], s[8:9], 0x0
-; GFX90A-NEXT: v_mov_b32_e32 v3, s11
-; GFX90A-NEXT: v_pk_mov_b32 v[0:1], s[12:13], s[12:13] op_sel:[0,1]
+; GFX90A-NEXT: v_mov_b32_e32 v1, s11
+; GFX90A-NEXT: v_pk_mov_b32 v[2:3], s[12:13], s[12:13] op_sel:[0,1]
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
; GFX90A-NEXT: v_accvgpr_write_b32 a0, s0
; GFX90A-NEXT: v_accvgpr_write_b32 a1, s1
@@ -779,7 +779,7 @@ define amdgpu_kernel void @test_mfma_f64_16x16x4f64(ptr addrspace(1) %arg, doubl
; GFX90A-NEXT: v_accvgpr_write_b32 a6, s6
; GFX90A-NEXT: v_accvgpr_write_b32 a7, s7
; GFX90A-NEXT: s_nop 1
-; GFX90A-NEXT: v_mfma_f64_16x16x4f64 a[0:7], v[2:3], v[0:1], a[0:7] cbsz:1 abid:2 blgp:3
+; GFX90A-NEXT: v_mfma_f64_16x16x4f64 a[0:7], v[0:1], v[2:3], a[0:7] cbsz:1 abid:2 blgp:3
; GFX90A-NEXT: v_mov_b32_e32 v0, 0
; GFX90A-NEXT: s_nop 15
; GFX90A-NEXT: s_nop 0
@@ -792,10 +792,10 @@ define amdgpu_kernel void @test_mfma_f64_16x16x4f64(ptr addrspace(1) %arg, doubl
; GFX942-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x24
; GFX942-NEXT: s_load_dwordx2 s[12:13], s[4:5], 0x34
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_mov_b32_e32 v2, s10
+; GFX942-NEXT: v_mov_b32_e32 v0, s10
; GFX942-NEXT: s_load_dwordx8 s[0:7], s[8:9], 0x0
-; GFX942-NEXT: v_mov_b32_e32 v3, s11
-; GFX942-NEXT: v_mov_b64_e32 v[0:1], s[12:13]
+; GFX942-NEXT: v_mov_b32_e32 v1, s11
+; GFX942-NEXT: v_mov_b64_e32 v[2:3], s[12:13]
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-NEXT: v_accvgpr_write_b32 a0, s0
; GFX942-NEXT: v_accvgpr_write_b32 a1, s1
@@ -806,7 +806,7 @@ define amdgpu_kernel void @test_mfma_f64_16x16x4f64(ptr addrspace(1) %arg, doubl
; GFX942-NEXT: v_accvgpr_write_b32 a6, s6
; GFX942-NEXT: v_accvgpr_write_b32 a7, s7
; GFX942-NEXT: s_nop 1
-; GFX942-NEXT: v_mfma_f64_16x16x4_f64 a[0:7], v[2:3], v[0:1], a[0:7] cbsz:1 abid:2 neg:[1,1,0]
+; GFX942-NEXT: v_mfma_f64_16x16x4_f64 a[0:7], v[0:1], v[2:3], a[0:7] cbsz:1 abid:2 neg:[1,1,0]
; GFX942-NEXT: v_mov_b32_e32 v0, 0
; GFX942-NEXT: s_nop 15
; GFX942-NEXT: s_nop 0
@@ -819,17 +819,17 @@ define amdgpu_kernel void @test_mfma_f64_16x16x4f64(ptr addrspace(1) %arg, doubl
; GFX90A-VGPR-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x24
; GFX90A-VGPR-NEXT: s_load_dwordx2 s[12:13], s[4:5], 0x34
; GFX90A-VGPR-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-VGPR-NEXT: v_mov_b32_e32 v10, s10
+; GFX90A-VGPR-NEXT: v_mov_b32_e32 v8, s10
; GFX90A-VGPR-NEXT: s_load_dwordx8 s[0:7], s[8:9], 0x0
-; GFX90A-VGPR-NEXT: v_mov_b32_e32 v11, s11
-; GFX90A-VGPR-NEXT: v_pk_mov_b32 v[8:9], s[12:13], s[12:13] op_sel:[0,1]
+; GFX90A-VGPR-NEXT: v_mov_b32_e32 v9, s11
+; GFX90A-VGPR-NEXT: v_pk_mov_b32 v[10:11], s[12:13], s[12:13] op_sel:[0,1]
; GFX90A-VGPR-NEXT: s_waitcnt lgkmcnt(0)
; GFX90A-VGPR-NEXT: v_pk_mov_b32 v[0:1], s[0:1], s[0:1] op_sel:[0,1]
; GFX90A-VGPR-NEXT: v_pk_mov_b32 v[2:3], s[2:3], s[2:3] op_sel:[0,1]
; GFX90A-VGPR-NEXT: v_pk_mov_b32 v[4:5], s[4:5], s[4:5] op_sel:[0,1]
; GFX90A-VGPR-NEXT: v_pk_mov_b32 v[6:7], s[6:7], s[6:7] op_sel:[0,1]
; GFX90A-VGPR-NEXT: s_nop 1
-; GFX90A-VGPR-NEXT: v_mfma_f64_16x16x4f64 v[0:7], v[10:11], v[8:9], v[0:7] cbsz:1 abid:2 blgp:3
+; GFX90A-VGPR-NEXT: v_mfma_f64_16x16x4f64 v[0:7], v[8:9], v[10:11], v[0:7] cbsz:1 abid:2 blgp:3
; GFX90A-VGPR-NEXT: v_mov_b32_e32 v8, 0
; GFX90A-VGPR-NEXT: s_nop 15
; GFX90A-VGPR-NEXT: s_nop 0
@@ -842,17 +842,17 @@ define amdgpu_kernel void @test_mfma_f64_16x16x4f64(ptr addrspace(1) %arg, doubl
; GFX942-VGPR-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x24
; GFX942-VGPR-NEXT: s_load_dwordx2 s[12:13], s[4:5], 0x34
; GFX942-VGPR-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-VGPR-NEXT: v_mov_b32_e32 v10, s10
+; GFX942-VGPR-NEXT: v_mov_b32_e32 v8, s10
; GFX942-VGPR-NEXT: s_load_dwordx8 s[0:7], s[8:9], 0x0
-; GFX942-VGPR-NEXT: v_mov_b32_e32 v11, s11
-; GFX942-VGPR-NEXT: v_mov_b64_e32 v[8:9], s[12:13]
+; GFX942-VGPR-NEXT: v_mov_b32_e32 v9, s11
+; GFX942-VGPR-NEXT: v_mov_b64_e32 v[10:11], s[12:13]
; GFX942-VGPR-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-VGPR-NEXT: v_mov_b64_e32 v[0:1], s[0:1]
; GFX942-VGPR-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
; GFX942-VGPR-NEXT: v_mov_b64_e32 v[4:5], s[4:5]
; GFX942-VGPR-NEXT: v_mov_b64_e32 v[6:7], s[6:7]
; GFX942-VGPR-NEXT: s_nop 1
-; GFX942-VGPR-NEXT: v_mfma_f64_16x16x4_f64 v[0:7], v[10:11], v[8:9], v[0:7] cbsz:1 abid:2 neg:[1,1,0]
+; GFX942-VGPR-NEXT: v_mfma_f64_16x16x4_f64 v[0:7], v[8:9], v[10:11], v[0:7] cbsz:1 abid:2 neg:[1,1,0]
; GFX942-VGPR-NEXT: v_mov_b32_e32 v8, 0
; GFX942-VGPR-NEXT: s_nop 15
; GFX942-VGPR-NEXT: s_nop 0
@@ -1629,20 +1629,20 @@ define amdgpu_kernel void @test_mfma_f64_16x16x4f64_imm(ptr addrspace(1) %arg, d
; GFX90A-VGPR-NEXT: v_mov_b32_e32 v7, 0x3ff00000
; GFX90A-VGPR-NEXT: v_mov_b32_e32 v2, v0
; GFX90A-VGPR-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-VGPR-NEXT: v_mov_b32_e32 v12, s2
-; GFX90A-VGPR-NEXT: v_mov_b32_e32 v13, s3
+; GFX90A-VGPR-NEXT: v_mov_b32_e32 v10, s2
+; GFX90A-VGPR-NEXT: v_mov_b32_e32 v11, s3
; GFX90A-VGPR-NEXT: v_mov_b32_e32 v3, v0
; GFX90A-VGPR-NEXT: v_mov_b32_e32 v4, v0
; GFX90A-VGPR-NEXT: v_mov_b32_e32 v5, v0
; GFX90A-VGPR-NEXT: v_mov_b32_e32 v6, v0
; GFX90A-VGPR-NEXT: v_mov_b32_e32 v1, v0
; GFX90A-VGPR-NEXT: v_pk_mov_b32 v[8:9], v[6:7], v[6:7] op_sel:[0,1]
-; GFX90A-VGPR-NEXT: v_pk_mov_b32 v[10:11], s[6:7], s[6:7] op_sel:[0,1]
+; GFX90A-VGPR-NEXT: v_pk_mov_b32 v[12:13], s[6:7], s[6:7] op_sel:[0,1]
; GFX90A-VGPR-NEXT: v_pk_mov_b32 v[6:7], v[4:5], v[4:5] op_sel:[0,1]
; GFX90A-VGPR-NEXT: v_pk_mov_b32 v[4:5], v[2:3], v[2:3] op_sel:[0,1]
; GFX90A-VGPR-NEXT: v_pk_mov_b32 v[2:3], v[0:1], v[0:1] op_sel:[0,1]
; GFX90A-VGPR-NEXT: s_nop 1
-; GFX90A-VGPR-NEXT: v_mfma_f64_16x16x4f64 v[2:9], v[12:13], v[10:11], v[2:9]
+; GFX90A-VGPR-NEXT: v_mfma_f64_16x16x4f64 v[2:9], v[10:11], v[12:13], v[2:9]
; GFX90A-VGPR-NEXT: s_nop 15
; GFX90A-VGPR-NEXT: s_nop 1
; GFX90A-VGPR-NEXT: global_store_dwordx4 v0, v[6:9], s[0:1] offset:16
@@ -1657,20 +1657,20 @@ define amdgpu_kernel void @test_mfma_f64_16x16x4f64_imm(ptr addrspace(1) %arg, d
; GFX942-VGPR-NEXT: v_mov_b32_e32 v7, 0x3ff00000
; GFX942-VGPR-NEXT: v_mov_b32_e32 v2, v0
; GFX942-VGPR-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-VGPR-NEXT: v_mov_b32_e32 v12, s2
-; GFX942-VGPR-NEXT: v_mov_b32_e32 v13, s3
+; GFX942-VGPR-NEXT: v_mov_b32_e32 v10, s2
+; GFX942-VGPR-NEXT: v_mov_b32_e32 v11, s3
; GFX942-VGPR-NEXT: v_mov_b32_e32 v3, v0
; GFX942-VGPR-NEXT: v_mov_b32_e32 v4, v0
; GFX942-VGPR-NEXT: v_mov_b32_e32 v5, v0
; GFX942-VGPR-NEXT: v_mov_b32_e32 v6, v0
; GFX942-VGPR-NEXT: v_mov_b32_e32 v1, v0
; GFX942-VGPR-NEXT: v_mov_b64_e32 v[8:9], v[6:7]
-; GFX942-VGPR-NEXT: v_mov_b64_e32 v[10:11], s[6:7]
+; GFX942-VGPR-NEXT: v_mov_b64_e32 v[12:13], s[6:7]
; GFX942-VGPR-NEXT: v_mov_b64_e32 v[6:7], v[4:5]
; GFX942-VGPR-NEXT: v_mov_b64_e32 v[4:5], v[2:3]
; GFX942-VGPR-NEXT: v_mov_b64_e32 v[2:3], v[0:1]
; GFX942-VGPR-NEXT: s_nop 1
-; GFX942-VGPR-NEXT: v_mfma_f64_16x16x4_f64 v[2:9], v[12:13], v[10:11], v[2:9]
+; GFX942-VGPR-NEXT: v_mfma_f64_16x16x4_f64 v[2:9], v[10:11], v[12:13], v[2:9]
; GFX942-VGPR-NEXT: s_nop 15
; GFX942-VGPR-NEXT: s_nop 1
; GFX942-VGPR-NEXT: global_store_dwordx4 v0, v[6:9], s[0:1] offset:16
@@ -1743,20 +1743,20 @@ define amdgpu_kernel void @test_mfma_f64_16x16x4f64_splat_lit(ptr addrspace(1) %
; GFX90A-VGPR-NEXT: v_mov_b32_e32 v1, 0x405ec000
; GFX90A-VGPR-NEXT: v_mov_b32_e32 v2, v0
; GFX90A-VGPR-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-VGPR-NEXT: v_mov_b32_e32 v12, s2
-; GFX90A-VGPR-NEXT: v_mov_b32_e32 v13, s3
+; GFX90A-VGPR-NEXT: v_mov_b32_e32 v10, s2
+; GFX90A-VGPR-NEXT: v_mov_b32_e32 v11, s3
; GFX90A-VGPR-NEXT: v_mov_b32_e32 v3, v1
; GFX90A-VGPR-NEXT: v_mov_b32_e32 v4, v0
; GFX90A-VGPR-NEXT: v_mov_b32_e32 v5, v1
; GFX90A-VGPR-NEXT: v_mov_b32_e32 v6, v0
; GFX90A-VGPR-NEXT: v_mov_b32_e32 v7, v1
; GFX90A-VGPR-NEXT: v_pk_mov_b32 v[8:9], v[6:7], v[6:7] op_sel:[0,1]
-; GFX90A-VGPR-NEXT: v_pk_mov_b32 v[10:11], s[6:7], s[6:7] op_sel:[0,1]
+; GFX90A-VGPR-NEXT: v_pk_mov_b32 v[12:13], s[6:7], s[6:7] op_sel:[0,1]
; GFX90A-VGPR-NEXT: v_pk_mov_b32 v[6:7], v[4:5], v[4:5] op_sel:[0,1]
; GFX90A-VGPR-NEXT: v_pk_mov_b32 v[4:5], v[2:3], v[2:3] op_sel:[0,1]
; GFX90A-VGPR-NEXT: v_pk_mov_b32 v[2:3], v[0:1], v[0:1] op_sel:[0,1]
; GFX90A-VGPR-NEXT: s_nop 1
-; GFX90A-VGPR-NEXT: v_mfma_f64_16x16x4f64 v[2:9], v[12:13], v[10:11], v[2:9]
+; GFX90A-VGPR-NEXT: v_mfma_f64_16x16x4f64 v[2:9], v[10:11], v[12:13], v[2:9]
; GFX90A-VGPR-NEXT: s_nop 15
; GFX90A-VGPR-NEXT: s_nop 1
; GFX90A-VGPR-NEXT: global_store_dwordx4 v0, v[6:9], s[0:1] offset:16
@@ -1771,20 +1771,20 @@ define amdgpu_kernel void @test_mfma_f64_16x16x4f64_splat_lit(ptr addrspace(1) %
; GFX942-VGPR-NEXT: v_mov_b32_e32 v1, 0x405ec000
; GFX942-VGPR-NEXT: v_mov_b32_e32 v2, v0
; GFX942-VGPR-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-VGPR-NEXT: v_mov_b32_e32 v12, s2
-; GFX942-VGPR-NEXT: v_mov_b32_e32 v13, s3
+; GFX942-VGPR-NEXT: v_mov_b32_e32 v10, s2
+; GFX942-VGPR-NEXT: v_mov_b32_e32 v11, s3
; GFX942-VGPR-NEXT: v_mov_b32_e32 v3, v1
; GFX942-VGPR-NEXT: v_mov_b32_e32 v4, v0
; GFX942-VGPR-NEXT: v_mov_b32_e32 v5, v1
; GFX942-VGPR-NEXT: v_mov_b32_e32 v6, v0
; GFX942-VGPR-NEXT: v_mov_b32_e32 v7, v1
; GFX942-VGPR-NEXT: v_mov_b64_e32 v[8:9], v[6:7]
-; GFX942-VGPR-NEXT: v_mov_b64_e32 v[10:11], s[6:7]
+; GFX942-VGPR-NEXT: v_mov_b64_e32 v[12:13], s[6:7]
; GFX942-VGPR-NEXT: v_mov_b64_e32 v[6:7], v[4:5]
; GFX942-VGPR-NEXT: v_mov_b64_e32 v[4:5], v[2:3]
; GFX942-VGPR-NEXT: v_mov_b64_e32 v[2:3], v[0:1]
; GFX942-VGPR-NEXT: s_nop 1
-; GFX942-VGPR-NEXT: v_mfma_f64_16x16x4_f64 v[2:9], v[12:13], v[10:11], v[2:9]
+; GFX942-VGPR-NEXT: v_mfma_f64_16x16x4_f64 v[2:9], v[10:11], v[12:13], v[2:9]
; GFX942-VGPR-NEXT: s_nop 15
; GFX942-VGPR-NEXT: s_nop 1
; GFX942-VGPR-NEXT: global_store_dwordx4 v0, v[6:9], s[0:1] offset:16
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mfma.gfx942.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mfma.gfx942.ll
index dc4c9291..2fb677e 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mfma.gfx942.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mfma.gfx942.ll
@@ -1445,20 +1445,20 @@ define amdgpu_kernel void @test_smfmac_f32_16x16x32_f16(ptr addrspace(1) %arg, <
; GFX942-SDAG: ; %bb.0: ; %bb
; GFX942-SDAG-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x24
; GFX942-SDAG-NEXT: s_load_dword s6, s[4:5], 0x44
-; GFX942-SDAG-NEXT: v_mov_b32_e32 v6, 0
+; GFX942-SDAG-NEXT: v_mov_b32_e32 v0, 0
; GFX942-SDAG-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-SDAG-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x0
-; GFX942-SDAG-NEXT: v_mov_b64_e32 v[4:5], s[10:11]
-; GFX942-SDAG-NEXT: v_mov_b64_e32 v[0:1], s[12:13]
-; GFX942-SDAG-NEXT: v_mov_b64_e32 v[2:3], s[14:15]
-; GFX942-SDAG-NEXT: v_mov_b32_e32 v7, s6
+; GFX942-SDAG-NEXT: v_mov_b64_e32 v[10:11], s[10:11]
+; GFX942-SDAG-NEXT: v_mov_b64_e32 v[2:3], s[12:13]
+; GFX942-SDAG-NEXT: v_mov_b64_e32 v[4:5], s[14:15]
+; GFX942-SDAG-NEXT: v_mov_b32_e32 v1, s6
; GFX942-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-SDAG-NEXT: v_mov_b64_e32 v[10:11], s[2:3]
-; GFX942-SDAG-NEXT: v_mov_b64_e32 v[8:9], s[0:1]
+; GFX942-SDAG-NEXT: v_mov_b64_e32 v[8:9], s[2:3]
+; GFX942-SDAG-NEXT: v_mov_b64_e32 v[6:7], s[0:1]
; GFX942-SDAG-NEXT: s_nop 1
-; GFX942-SDAG-NEXT: v_smfmac_f32_16x16x32_f16 v[8:11], v[4:5], v[0:3], v7 cbsz:1 abid:2
+; GFX942-SDAG-NEXT: v_smfmac_f32_16x16x32_f16 v[6:9], v[10:11], v[2:5], v1 cbsz:1 abid:2
; GFX942-SDAG-NEXT: s_nop 6
-; GFX942-SDAG-NEXT: global_store_dwordx4 v6, v[8:11], s[8:9]
+; GFX942-SDAG-NEXT: global_store_dwordx4 v0, v[6:9], s[8:9]
; GFX942-SDAG-NEXT: s_endpgm
;
; GFX942-GISEL-LABEL: test_smfmac_f32_16x16x32_f16:
@@ -1485,20 +1485,20 @@ define amdgpu_kernel void @test_smfmac_f32_16x16x32_f16(ptr addrspace(1) %arg, <
; GFX950-SDAG: ; %bb.0: ; %bb
; GFX950-SDAG-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x24
; GFX950-SDAG-NEXT: s_load_dword s6, s[4:5], 0x44
-; GFX950-SDAG-NEXT: v_mov_b32_e32 v6, 0
+; GFX950-SDAG-NEXT: v_mov_b32_e32 v0, 0
; GFX950-SDAG-NEXT: s_waitcnt lgkmcnt(0)
; GFX950-SDAG-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x0
-; GFX950-SDAG-NEXT: v_mov_b64_e32 v[4:5], s[10:11]
-; GFX950-SDAG-NEXT: v_mov_b64_e32 v[0:1], s[12:13]
-; GFX950-SDAG-NEXT: v_mov_b64_e32 v[2:3], s[14:15]
-; GFX950-SDAG-NEXT: v_mov_b32_e32 v7, s6
+; GFX950-SDAG-NEXT: v_mov_b64_e32 v[10:11], s[10:11]
+; GFX950-SDAG-NEXT: v_mov_b64_e32 v[2:3], s[12:13]
+; GFX950-SDAG-NEXT: v_mov_b64_e32 v[4:5], s[14:15]
+; GFX950-SDAG-NEXT: v_mov_b32_e32 v1, s6
; GFX950-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; GFX950-SDAG-NEXT: v_mov_b64_e32 v[10:11], s[2:3]
-; GFX950-SDAG-NEXT: v_mov_b64_e32 v[8:9], s[0:1]
+; GFX950-SDAG-NEXT: v_mov_b64_e32 v[8:9], s[2:3]
+; GFX950-SDAG-NEXT: v_mov_b64_e32 v[6:7], s[0:1]
; GFX950-SDAG-NEXT: s_nop 1
-; GFX950-SDAG-NEXT: v_smfmac_f32_16x16x32_f16 v[8:11], v[4:5], v[0:3], v7 cbsz:1 abid:2
+; GFX950-SDAG-NEXT: v_smfmac_f32_16x16x32_f16 v[6:9], v[10:11], v[2:5], v1 cbsz:1 abid:2
; GFX950-SDAG-NEXT: s_nop 7
-; GFX950-SDAG-NEXT: global_store_dwordx4 v6, v[8:11], s[8:9]
+; GFX950-SDAG-NEXT: global_store_dwordx4 v0, v[6:9], s[8:9]
; GFX950-SDAG-NEXT: s_endpgm
;
; GFX950-GISEL-LABEL: test_smfmac_f32_16x16x32_f16:
@@ -1577,11 +1577,11 @@ define amdgpu_kernel void @test_smfmac_f32_32x32x16_f16(ptr addrspace(1) %arg, <
; GFX942-SDAG-NEXT: s_load_dwordx8 s[16:23], s[4:5], 0x24
; GFX942-SDAG-NEXT: s_load_dword s24, s[4:5], 0x44
; GFX942-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-SDAG-NEXT: v_mov_b64_e32 v[20:21], s[18:19]
+; GFX942-SDAG-NEXT: v_mov_b64_e32 v[22:23], s[18:19]
; GFX942-SDAG-NEXT: s_load_dwordx16 s[0:15], s[16:17], 0x0
-; GFX942-SDAG-NEXT: v_mov_b64_e32 v[16:17], s[20:21]
-; GFX942-SDAG-NEXT: v_mov_b64_e32 v[18:19], s[22:23]
-; GFX942-SDAG-NEXT: v_mov_b32_e32 v22, s24
+; GFX942-SDAG-NEXT: v_mov_b64_e32 v[18:19], s[20:21]
+; GFX942-SDAG-NEXT: v_mov_b64_e32 v[20:21], s[22:23]
+; GFX942-SDAG-NEXT: v_mov_b32_e32 v16, s24
; GFX942-SDAG-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-SDAG-NEXT: v_mov_b64_e32 v[0:1], s[0:1]
; GFX942-SDAG-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
@@ -1592,7 +1592,7 @@ define amdgpu_kernel void @test_smfmac_f32_32x32x16_f16(ptr addrspace(1) %arg, <
; GFX942-SDAG-NEXT: v_mov_b64_e32 v[12:13], s[12:13]
; GFX942-SDAG-NEXT: v_mov_b64_e32 v[14:15], s[14:15]
; GFX942-SDAG-NEXT: s_nop 1
-; GFX942-SDAG-NEXT: v_smfmac_f32_32x32x16_f16 v[0:15], v[20:21], v[16:19], v22 cbsz:1 abid:2
+; GFX942-SDAG-NEXT: v_smfmac_f32_32x32x16_f16 v[0:15], v[22:23], v[18:21], v16 cbsz:1 abid:2
; GFX942-SDAG-NEXT: v_mov_b32_e32 v16, 0
; GFX942-SDAG-NEXT: s_nop 9
; GFX942-SDAG-NEXT: global_store_dwordx4 v16, v[12:15], s[16:17] offset:48
@@ -1635,11 +1635,11 @@ define amdgpu_kernel void @test_smfmac_f32_32x32x16_f16(ptr addrspace(1) %arg, <
; GFX950-SDAG-NEXT: s_load_dwordx8 s[16:23], s[4:5], 0x24
; GFX950-SDAG-NEXT: s_load_dword s24, s[4:5], 0x44
; GFX950-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; GFX950-SDAG-NEXT: v_mov_b64_e32 v[20:21], s[18:19]
+; GFX950-SDAG-NEXT: v_mov_b64_e32 v[22:23], s[18:19]
; GFX950-SDAG-NEXT: s_load_dwordx16 s[0:15], s[16:17], 0x0
-; GFX950-SDAG-NEXT: v_mov_b64_e32 v[16:17], s[20:21]
-; GFX950-SDAG-NEXT: v_mov_b64_e32 v[18:19], s[22:23]
-; GFX950-SDAG-NEXT: v_mov_b32_e32 v22, s24
+; GFX950-SDAG-NEXT: v_mov_b64_e32 v[18:19], s[20:21]
+; GFX950-SDAG-NEXT: v_mov_b64_e32 v[20:21], s[22:23]
+; GFX950-SDAG-NEXT: v_mov_b32_e32 v16, s24
; GFX950-SDAG-NEXT: s_waitcnt lgkmcnt(0)
; GFX950-SDAG-NEXT: v_mov_b64_e32 v[0:1], s[0:1]
; GFX950-SDAG-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
@@ -1650,7 +1650,7 @@ define amdgpu_kernel void @test_smfmac_f32_32x32x16_f16(ptr addrspace(1) %arg, <
; GFX950-SDAG-NEXT: v_mov_b64_e32 v[12:13], s[12:13]
; GFX950-SDAG-NEXT: v_mov_b64_e32 v[14:15], s[14:15]
; GFX950-SDAG-NEXT: s_nop 1
-; GFX950-SDAG-NEXT: v_smfmac_f32_32x32x16_f16 v[0:15], v[20:21], v[16:19], v22 cbsz:1 abid:2
+; GFX950-SDAG-NEXT: v_smfmac_f32_32x32x16_f16 v[0:15], v[22:23], v[18:21], v16 cbsz:1 abid:2
; GFX950-SDAG-NEXT: v_mov_b32_e32 v16, 0
; GFX950-SDAG-NEXT: s_nop 10
; GFX950-SDAG-NEXT: global_store_dwordx4 v16, v[12:15], s[16:17] offset:48
@@ -1847,20 +1847,20 @@ define amdgpu_kernel void @test_smfmac_f32_16x16x32_bf16(ptr addrspace(1) %arg,
; GFX942-SDAG: ; %bb.0: ; %bb
; GFX942-SDAG-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x24
; GFX942-SDAG-NEXT: s_load_dword s6, s[4:5], 0x44
-; GFX942-SDAG-NEXT: v_mov_b32_e32 v6, 0
+; GFX942-SDAG-NEXT: v_mov_b32_e32 v0, 0
; GFX942-SDAG-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-SDAG-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x0
-; GFX942-SDAG-NEXT: v_mov_b64_e32 v[4:5], s[10:11]
-; GFX942-SDAG-NEXT: v_mov_b64_e32 v[0:1], s[12:13]
-; GFX942-SDAG-NEXT: v_mov_b64_e32 v[2:3], s[14:15]
-; GFX942-SDAG-NEXT: v_mov_b32_e32 v7, s6
+; GFX942-SDAG-NEXT: v_mov_b64_e32 v[10:11], s[10:11]
+; GFX942-SDAG-NEXT: v_mov_b64_e32 v[2:3], s[12:13]
+; GFX942-SDAG-NEXT: v_mov_b64_e32 v[4:5], s[14:15]
+; GFX942-SDAG-NEXT: v_mov_b32_e32 v1, s6
; GFX942-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-SDAG-NEXT: v_mov_b64_e32 v[10:11], s[2:3]
-; GFX942-SDAG-NEXT: v_mov_b64_e32 v[8:9], s[0:1]
+; GFX942-SDAG-NEXT: v_mov_b64_e32 v[8:9], s[2:3]
+; GFX942-SDAG-NEXT: v_mov_b64_e32 v[6:7], s[0:1]
; GFX942-SDAG-NEXT: s_nop 1
-; GFX942-SDAG-NEXT: v_smfmac_f32_16x16x32_bf16 v[8:11], v[4:5], v[0:3], v7 cbsz:1 abid:2
+; GFX942-SDAG-NEXT: v_smfmac_f32_16x16x32_bf16 v[6:9], v[10:11], v[2:5], v1 cbsz:1 abid:2
; GFX942-SDAG-NEXT: s_nop 6
-; GFX942-SDAG-NEXT: global_store_dwordx4 v6, v[8:11], s[8:9]
+; GFX942-SDAG-NEXT: global_store_dwordx4 v0, v[6:9], s[8:9]
; GFX942-SDAG-NEXT: s_endpgm
;
; GFX942-GISEL-LABEL: test_smfmac_f32_16x16x32_bf16:
@@ -1887,20 +1887,20 @@ define amdgpu_kernel void @test_smfmac_f32_16x16x32_bf16(ptr addrspace(1) %arg,
; GFX950-SDAG: ; %bb.0: ; %bb
; GFX950-SDAG-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x24
; GFX950-SDAG-NEXT: s_load_dword s6, s[4:5], 0x44
-; GFX950-SDAG-NEXT: v_mov_b32_e32 v6, 0
+; GFX950-SDAG-NEXT: v_mov_b32_e32 v0, 0
; GFX950-SDAG-NEXT: s_waitcnt lgkmcnt(0)
; GFX950-SDAG-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x0
-; GFX950-SDAG-NEXT: v_mov_b64_e32 v[4:5], s[10:11]
-; GFX950-SDAG-NEXT: v_mov_b64_e32 v[0:1], s[12:13]
-; GFX950-SDAG-NEXT: v_mov_b64_e32 v[2:3], s[14:15]
-; GFX950-SDAG-NEXT: v_mov_b32_e32 v7, s6
+; GFX950-SDAG-NEXT: v_mov_b64_e32 v[10:11], s[10:11]
+; GFX950-SDAG-NEXT: v_mov_b64_e32 v[2:3], s[12:13]
+; GFX950-SDAG-NEXT: v_mov_b64_e32 v[4:5], s[14:15]
+; GFX950-SDAG-NEXT: v_mov_b32_e32 v1, s6
; GFX950-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; GFX950-SDAG-NEXT: v_mov_b64_e32 v[10:11], s[2:3]
-; GFX950-SDAG-NEXT: v_mov_b64_e32 v[8:9], s[0:1]
+; GFX950-SDAG-NEXT: v_mov_b64_e32 v[8:9], s[2:3]
+; GFX950-SDAG-NEXT: v_mov_b64_e32 v[6:7], s[0:1]
; GFX950-SDAG-NEXT: s_nop 1
-; GFX950-SDAG-NEXT: v_smfmac_f32_16x16x32_bf16 v[8:11], v[4:5], v[0:3], v7 cbsz:1 abid:2
+; GFX950-SDAG-NEXT: v_smfmac_f32_16x16x32_bf16 v[6:9], v[10:11], v[2:5], v1 cbsz:1 abid:2
; GFX950-SDAG-NEXT: s_nop 7
-; GFX950-SDAG-NEXT: global_store_dwordx4 v6, v[8:11], s[8:9]
+; GFX950-SDAG-NEXT: global_store_dwordx4 v0, v[6:9], s[8:9]
; GFX950-SDAG-NEXT: s_endpgm
;
; GFX950-GISEL-LABEL: test_smfmac_f32_16x16x32_bf16:
@@ -1979,11 +1979,11 @@ define amdgpu_kernel void @test_smfmac_f32_32x32x16_bf16(ptr addrspace(1) %arg,
; GFX942-SDAG-NEXT: s_load_dwordx8 s[16:23], s[4:5], 0x24
; GFX942-SDAG-NEXT: s_load_dword s24, s[4:5], 0x44
; GFX942-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-SDAG-NEXT: v_mov_b64_e32 v[20:21], s[18:19]
+; GFX942-SDAG-NEXT: v_mov_b64_e32 v[22:23], s[18:19]
; GFX942-SDAG-NEXT: s_load_dwordx16 s[0:15], s[16:17], 0x0
-; GFX942-SDAG-NEXT: v_mov_b64_e32 v[16:17], s[20:21]
-; GFX942-SDAG-NEXT: v_mov_b64_e32 v[18:19], s[22:23]
-; GFX942-SDAG-NEXT: v_mov_b32_e32 v22, s24
+; GFX942-SDAG-NEXT: v_mov_b64_e32 v[18:19], s[20:21]
+; GFX942-SDAG-NEXT: v_mov_b64_e32 v[20:21], s[22:23]
+; GFX942-SDAG-NEXT: v_mov_b32_e32 v16, s24
; GFX942-SDAG-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-SDAG-NEXT: v_mov_b64_e32 v[0:1], s[0:1]
; GFX942-SDAG-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
@@ -1994,7 +1994,7 @@ define amdgpu_kernel void @test_smfmac_f32_32x32x16_bf16(ptr addrspace(1) %arg,
; GFX942-SDAG-NEXT: v_mov_b64_e32 v[12:13], s[12:13]
; GFX942-SDAG-NEXT: v_mov_b64_e32 v[14:15], s[14:15]
; GFX942-SDAG-NEXT: s_nop 1
-; GFX942-SDAG-NEXT: v_smfmac_f32_32x32x16_bf16 v[0:15], v[20:21], v[16:19], v22 cbsz:1 abid:2
+; GFX942-SDAG-NEXT: v_smfmac_f32_32x32x16_bf16 v[0:15], v[22:23], v[18:21], v16 cbsz:1 abid:2
; GFX942-SDAG-NEXT: v_mov_b32_e32 v16, 0
; GFX942-SDAG-NEXT: s_nop 9
; GFX942-SDAG-NEXT: global_store_dwordx4 v16, v[12:15], s[16:17] offset:48
@@ -2037,11 +2037,11 @@ define amdgpu_kernel void @test_smfmac_f32_32x32x16_bf16(ptr addrspace(1) %arg,
; GFX950-SDAG-NEXT: s_load_dwordx8 s[16:23], s[4:5], 0x24
; GFX950-SDAG-NEXT: s_load_dword s24, s[4:5], 0x44
; GFX950-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; GFX950-SDAG-NEXT: v_mov_b64_e32 v[20:21], s[18:19]
+; GFX950-SDAG-NEXT: v_mov_b64_e32 v[22:23], s[18:19]
; GFX950-SDAG-NEXT: s_load_dwordx16 s[0:15], s[16:17], 0x0
-; GFX950-SDAG-NEXT: v_mov_b64_e32 v[16:17], s[20:21]
-; GFX950-SDAG-NEXT: v_mov_b64_e32 v[18:19], s[22:23]
-; GFX950-SDAG-NEXT: v_mov_b32_e32 v22, s24
+; GFX950-SDAG-NEXT: v_mov_b64_e32 v[18:19], s[20:21]
+; GFX950-SDAG-NEXT: v_mov_b64_e32 v[20:21], s[22:23]
+; GFX950-SDAG-NEXT: v_mov_b32_e32 v16, s24
; GFX950-SDAG-NEXT: s_waitcnt lgkmcnt(0)
; GFX950-SDAG-NEXT: v_mov_b64_e32 v[0:1], s[0:1]
; GFX950-SDAG-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
@@ -2052,7 +2052,7 @@ define amdgpu_kernel void @test_smfmac_f32_32x32x16_bf16(ptr addrspace(1) %arg,
; GFX950-SDAG-NEXT: v_mov_b64_e32 v[12:13], s[12:13]
; GFX950-SDAG-NEXT: v_mov_b64_e32 v[14:15], s[14:15]
; GFX950-SDAG-NEXT: s_nop 1
-; GFX950-SDAG-NEXT: v_smfmac_f32_32x32x16_bf16 v[0:15], v[20:21], v[16:19], v22 cbsz:1 abid:2
+; GFX950-SDAG-NEXT: v_smfmac_f32_32x32x16_bf16 v[0:15], v[22:23], v[18:21], v16 cbsz:1 abid:2
; GFX950-SDAG-NEXT: v_mov_b32_e32 v16, 0
; GFX950-SDAG-NEXT: s_nop 10
; GFX950-SDAG-NEXT: global_store_dwordx4 v16, v[12:15], s[16:17] offset:48
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mfma.gfx950.bf16.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mfma.gfx950.bf16.ll
index 033a35f..13a96cf 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mfma.gfx950.bf16.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mfma.gfx950.bf16.ll
@@ -15,15 +15,15 @@ define amdgpu_kernel void @test_mfma_f32_32x32x16_bf16(<8 x bfloat> %arg0, <8 x
; GCN: ; %bb.0:
; GCN-NEXT: s_load_dwordx8 s[24:31], s[4:5], 0x24
; GCN-NEXT: s_load_dwordx16 s[8:23], s[4:5], 0x64
-; GCN-NEXT: v_mov_b64_e32 v[8:9], 48
-; GCN-NEXT: v_mov_b64_e32 v[10:11], 32
-; GCN-NEXT: v_mov_b64_e32 v[12:13], 16
+; GCN-NEXT: v_mov_b64_e32 v[0:1], 48
+; GCN-NEXT: v_mov_b64_e32 v[2:3], 32
+; GCN-NEXT: v_mov_b64_e32 v[4:5], 16
; GCN-NEXT: s_waitcnt lgkmcnt(0)
-; GCN-NEXT: v_mov_b64_e32 v[0:1], s[24:25]
-; GCN-NEXT: v_mov_b64_e32 v[2:3], s[26:27]
-; GCN-NEXT: v_mov_b64_e32 v[4:5], s[28:29]
+; GCN-NEXT: v_mov_b64_e32 v[8:9], s[24:25]
+; GCN-NEXT: v_mov_b64_e32 v[10:11], s[26:27]
+; GCN-NEXT: v_mov_b64_e32 v[12:13], s[28:29]
; GCN-NEXT: v_accvgpr_write_b32 a0, s8
-; GCN-NEXT: v_mov_b64_e32 v[6:7], s[30:31]
+; GCN-NEXT: v_mov_b64_e32 v[14:15], s[30:31]
; GCN-NEXT: v_accvgpr_write_b32 a1, s9
; GCN-NEXT: v_accvgpr_write_b32 a2, s10
; GCN-NEXT: v_accvgpr_write_b32 a3, s11
@@ -41,40 +41,39 @@ define amdgpu_kernel void @test_mfma_f32_32x32x16_bf16(<8 x bfloat> %arg0, <8 x
; GCN-NEXT: v_accvgpr_write_b32 a15, s23
; GCN-NEXT: v_mov_b32_e32 v16, s16
; GCN-NEXT: v_mov_b32_e32 v17, s17
-; GCN-NEXT: v_mfma_f32_32x32x16_bf16 a[16:31], v[0:3], v[4:7], a[0:15]
+; GCN-NEXT: v_mfma_f32_32x32x16_bf16 a[16:31], v[8:11], v[12:15], a[0:15]
; GCN-NEXT: v_mov_b32_e32 v18, s18
; GCN-NEXT: v_mov_b32_e32 v19, s19
-; GCN-NEXT: v_mov_b32_e32 v0, s20
-; GCN-NEXT: v_mov_b32_e32 v1, s21
-; GCN-NEXT: v_mov_b32_e32 v2, s22
-; GCN-NEXT: v_mov_b32_e32 v3, s23
-; GCN-NEXT: v_mov_b64_e32 v[14:15], 0
+; GCN-NEXT: v_mov_b32_e32 v8, s20
+; GCN-NEXT: v_mov_b32_e32 v9, s21
+; GCN-NEXT: v_mov_b32_e32 v10, s22
+; GCN-NEXT: v_mov_b32_e32 v11, s23
+; GCN-NEXT: v_mov_b64_e32 v[6:7], 0
; GCN-NEXT: s_nop 4
-; GCN-NEXT: global_store_dwordx4 v[8:9], a[28:31], off sc0 sc1
+; GCN-NEXT: global_store_dwordx4 v[0:1], a[28:31], off sc0 sc1
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: global_store_dwordx4 v[10:11], a[24:27], off sc0 sc1
+; GCN-NEXT: global_store_dwordx4 v[2:3], a[24:27], off sc0 sc1
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: global_store_dwordx4 v[12:13], a[20:23], off sc0 sc1
+; GCN-NEXT: global_store_dwordx4 v[4:5], a[20:23], off sc0 sc1
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: global_store_dwordx4 v[14:15], a[16:19], off sc0 sc1
+; GCN-NEXT: global_store_dwordx4 v[6:7], a[16:19], off sc0 sc1
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: global_store_dwordx4 v[10:11], v[16:19], off sc0 sc1
+; GCN-NEXT: global_store_dwordx4 v[2:3], v[16:19], off sc0 sc1
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: global_store_dwordx4 v[8:9], v[0:3], off sc0 sc1
+; GCN-NEXT: global_store_dwordx4 v[0:1], v[8:11], off sc0 sc1
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: s_nop 0
; GCN-NEXT: v_mov_b32_e32 v0, s8
; GCN-NEXT: v_mov_b32_e32 v1, s9
; GCN-NEXT: v_mov_b32_e32 v2, s10
; GCN-NEXT: v_mov_b32_e32 v3, s11
-; GCN-NEXT: global_store_dwordx4 v[14:15], v[0:3], off sc0 sc1
+; GCN-NEXT: global_store_dwordx4 v[6:7], v[0:3], off sc0 sc1
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: s_nop 0
; GCN-NEXT: v_mov_b32_e32 v0, s12
; GCN-NEXT: v_mov_b32_e32 v1, s13
; GCN-NEXT: v_mov_b32_e32 v2, s14
; GCN-NEXT: v_mov_b32_e32 v3, s15
-; GCN-NEXT: global_store_dwordx4 v[12:13], v[0:3], off sc0 sc1
+; GCN-NEXT: global_store_dwordx4 v[4:5], v[0:3], off sc0 sc1
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: s_endpgm
%result = call <16 x float> @llvm.amdgcn.mfma.f32.32x32x16.bf16(<8 x bfloat> %arg0, <8 x bfloat> %arg1, <16 x float> %arg2, i32 0, i32 0, i32 0)
@@ -88,15 +87,15 @@ define amdgpu_kernel void @test_mfma_f32_32x32x16_bf16__flags(<8 x bfloat> %arg0
; GCN: ; %bb.0:
; GCN-NEXT: s_load_dwordx8 s[24:31], s[4:5], 0x24
; GCN-NEXT: s_load_dwordx16 s[8:23], s[4:5], 0x64
-; GCN-NEXT: v_mov_b64_e32 v[8:9], 48
-; GCN-NEXT: v_mov_b64_e32 v[10:11], 32
-; GCN-NEXT: v_mov_b64_e32 v[12:13], 16
+; GCN-NEXT: v_mov_b64_e32 v[0:1], 48
+; GCN-NEXT: v_mov_b64_e32 v[2:3], 32
+; GCN-NEXT: v_mov_b64_e32 v[4:5], 16
; GCN-NEXT: s_waitcnt lgkmcnt(0)
-; GCN-NEXT: v_mov_b64_e32 v[0:1], s[24:25]
-; GCN-NEXT: v_mov_b64_e32 v[2:3], s[26:27]
-; GCN-NEXT: v_mov_b64_e32 v[4:5], s[28:29]
+; GCN-NEXT: v_mov_b64_e32 v[8:9], s[24:25]
+; GCN-NEXT: v_mov_b64_e32 v[10:11], s[26:27]
+; GCN-NEXT: v_mov_b64_e32 v[12:13], s[28:29]
; GCN-NEXT: v_accvgpr_write_b32 a0, s8
-; GCN-NEXT: v_mov_b64_e32 v[6:7], s[30:31]
+; GCN-NEXT: v_mov_b64_e32 v[14:15], s[30:31]
; GCN-NEXT: v_accvgpr_write_b32 a1, s9
; GCN-NEXT: v_accvgpr_write_b32 a2, s10
; GCN-NEXT: v_accvgpr_write_b32 a3, s11
@@ -114,40 +113,39 @@ define amdgpu_kernel void @test_mfma_f32_32x32x16_bf16__flags(<8 x bfloat> %arg0
; GCN-NEXT: v_accvgpr_write_b32 a15, s23
; GCN-NEXT: v_mov_b32_e32 v16, s16
; GCN-NEXT: v_mov_b32_e32 v17, s17
-; GCN-NEXT: v_mfma_f32_32x32x16_bf16 a[16:31], v[0:3], v[4:7], a[0:15] cbsz:2 abid:3 blgp:1
+; GCN-NEXT: v_mfma_f32_32x32x16_bf16 a[16:31], v[8:11], v[12:15], a[0:15] cbsz:2 abid:3 blgp:1
; GCN-NEXT: v_mov_b32_e32 v18, s18
; GCN-NEXT: v_mov_b32_e32 v19, s19
-; GCN-NEXT: v_mov_b32_e32 v0, s20
-; GCN-NEXT: v_mov_b32_e32 v1, s21
-; GCN-NEXT: v_mov_b32_e32 v2, s22
-; GCN-NEXT: v_mov_b32_e32 v3, s23
-; GCN-NEXT: v_mov_b64_e32 v[14:15], 0
+; GCN-NEXT: v_mov_b32_e32 v8, s20
+; GCN-NEXT: v_mov_b32_e32 v9, s21
+; GCN-NEXT: v_mov_b32_e32 v10, s22
+; GCN-NEXT: v_mov_b32_e32 v11, s23
+; GCN-NEXT: v_mov_b64_e32 v[6:7], 0
; GCN-NEXT: s_nop 4
-; GCN-NEXT: global_store_dwordx4 v[8:9], a[28:31], off sc0 sc1
+; GCN-NEXT: global_store_dwordx4 v[0:1], a[28:31], off sc0 sc1
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: global_store_dwordx4 v[10:11], a[24:27], off sc0 sc1
+; GCN-NEXT: global_store_dwordx4 v[2:3], a[24:27], off sc0 sc1
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: global_store_dwordx4 v[12:13], a[20:23], off sc0 sc1
+; GCN-NEXT: global_store_dwordx4 v[4:5], a[20:23], off sc0 sc1
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: global_store_dwordx4 v[14:15], a[16:19], off sc0 sc1
+; GCN-NEXT: global_store_dwordx4 v[6:7], a[16:19], off sc0 sc1
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: global_store_dwordx4 v[10:11], v[16:19], off sc0 sc1
+; GCN-NEXT: global_store_dwordx4 v[2:3], v[16:19], off sc0 sc1
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: global_store_dwordx4 v[8:9], v[0:3], off sc0 sc1
+; GCN-NEXT: global_store_dwordx4 v[0:1], v[8:11], off sc0 sc1
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: s_nop 0
; GCN-NEXT: v_mov_b32_e32 v0, s8
; GCN-NEXT: v_mov_b32_e32 v1, s9
; GCN-NEXT: v_mov_b32_e32 v2, s10
; GCN-NEXT: v_mov_b32_e32 v3, s11
-; GCN-NEXT: global_store_dwordx4 v[14:15], v[0:3], off sc0 sc1
+; GCN-NEXT: global_store_dwordx4 v[6:7], v[0:3], off sc0 sc1
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: s_nop 0
; GCN-NEXT: v_mov_b32_e32 v0, s12
; GCN-NEXT: v_mov_b32_e32 v1, s13
; GCN-NEXT: v_mov_b32_e32 v2, s14
; GCN-NEXT: v_mov_b32_e32 v3, s15
-; GCN-NEXT: global_store_dwordx4 v[12:13], v[0:3], off sc0 sc1
+; GCN-NEXT: global_store_dwordx4 v[4:5], v[0:3], off sc0 sc1
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: s_endpgm
%result = call <16 x float> @llvm.amdgcn.mfma.f32.32x32x16.bf16(<8 x bfloat> %arg0, <8 x bfloat> %arg1, <16 x float> %arg2, i32 2, i32 3, i32 1)
@@ -250,13 +248,13 @@ define amdgpu_kernel void @test_mfma_f32_32x32x16_bf16__vgprcd(<8 x bfloat> %arg
; GCN-NEXT: s_load_dwordx8 s[24:31], s[4:5], 0x24
; GCN-NEXT: s_load_dwordx16 s[8:23], s[4:5], 0x64
; GCN-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0xa4
-; GCN-NEXT: v_mov_b32_e32 v44, 0
+; GCN-NEXT: v_mov_b32_e32 v36, 0
; GCN-NEXT: s_waitcnt lgkmcnt(0)
-; GCN-NEXT: v_mov_b64_e32 v[34:35], s[26:27]
-; GCN-NEXT: v_mov_b64_e32 v[32:33], s[24:25]
-; GCN-NEXT: v_mov_b64_e32 v[38:39], s[30:31]
+; GCN-NEXT: v_mov_b64_e32 v[40:41], s[26:27]
+; GCN-NEXT: v_mov_b64_e32 v[38:39], s[24:25]
+; GCN-NEXT: v_mov_b64_e32 v[44:45], s[30:31]
; GCN-NEXT: v_mov_b64_e32 v[30:31], s[22:23]
-; GCN-NEXT: v_mov_b64_e32 v[36:37], s[28:29]
+; GCN-NEXT: v_mov_b64_e32 v[42:43], s[28:29]
; GCN-NEXT: v_mov_b64_e32 v[28:29], s[20:21]
; GCN-NEXT: v_mov_b64_e32 v[26:27], s[18:19]
; GCN-NEXT: v_mov_b64_e32 v[24:25], s[16:17]
@@ -264,41 +262,41 @@ define amdgpu_kernel void @test_mfma_f32_32x32x16_bf16__vgprcd(<8 x bfloat> %arg
; GCN-NEXT: v_mov_b64_e32 v[20:21], s[12:13]
; GCN-NEXT: v_mov_b64_e32 v[18:19], s[10:11]
; GCN-NEXT: v_mov_b64_e32 v[16:17], s[8:9]
-; GCN-NEXT: v_mov_b32_e32 v40, s20
-; GCN-NEXT: v_mov_b32_e32 v41, s21
-; GCN-NEXT: v_mfma_f32_32x32x16_bf16 v[0:15], v[32:35], v[36:39], v[16:31]
-; GCN-NEXT: v_mov_b32_e32 v42, s22
-; GCN-NEXT: v_mov_b32_e32 v43, s23
-; GCN-NEXT: global_store_dwordx4 v44, v[40:43], s[0:1] offset:48 sc0 sc1
+; GCN-NEXT: v_mov_b32_e32 v32, s20
+; GCN-NEXT: v_mov_b32_e32 v33, s21
+; GCN-NEXT: v_mfma_f32_32x32x16_bf16 v[0:15], v[38:41], v[42:45], v[16:31]
+; GCN-NEXT: v_mov_b32_e32 v34, s22
+; GCN-NEXT: v_mov_b32_e32 v35, s23
+; GCN-NEXT: global_store_dwordx4 v36, v[32:35], s[0:1] offset:48 sc0 sc1
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: s_nop 2
; GCN-NEXT: v_mov_b32_e32 v16, s16
; GCN-NEXT: v_mov_b32_e32 v17, s17
; GCN-NEXT: v_mov_b32_e32 v18, s18
; GCN-NEXT: v_mov_b32_e32 v19, s19
-; GCN-NEXT: global_store_dwordx4 v44, v[16:19], s[0:1] offset:32 sc0 sc1
+; GCN-NEXT: global_store_dwordx4 v36, v[16:19], s[0:1] offset:32 sc0 sc1
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: s_nop 0
; GCN-NEXT: v_mov_b32_e32 v16, s12
; GCN-NEXT: v_mov_b32_e32 v17, s13
; GCN-NEXT: v_mov_b32_e32 v18, s14
; GCN-NEXT: v_mov_b32_e32 v19, s15
-; GCN-NEXT: global_store_dwordx4 v44, v[16:19], s[0:1] offset:16 sc0 sc1
+; GCN-NEXT: global_store_dwordx4 v36, v[16:19], s[0:1] offset:16 sc0 sc1
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: s_nop 0
; GCN-NEXT: v_mov_b32_e32 v16, s8
; GCN-NEXT: v_mov_b32_e32 v17, s9
; GCN-NEXT: v_mov_b32_e32 v18, s10
; GCN-NEXT: v_mov_b32_e32 v19, s11
-; GCN-NEXT: global_store_dwordx4 v44, v[16:19], s[0:1] sc0 sc1
+; GCN-NEXT: global_store_dwordx4 v36, v[16:19], s[0:1] sc0 sc1
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: global_store_dwordx4 v44, v[8:11], s[0:1] offset:32 sc0 sc1
+; GCN-NEXT: global_store_dwordx4 v36, v[8:11], s[0:1] offset:32 sc0 sc1
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: global_store_dwordx4 v44, v[12:15], s[0:1] offset:48 sc0 sc1
+; GCN-NEXT: global_store_dwordx4 v36, v[12:15], s[0:1] offset:48 sc0 sc1
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: global_store_dwordx4 v44, v[0:3], s[0:1] sc0 sc1
+; GCN-NEXT: global_store_dwordx4 v36, v[0:3], s[0:1] sc0 sc1
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: global_store_dwordx4 v44, v[4:7], s[0:1] offset:16 sc0 sc1
+; GCN-NEXT: global_store_dwordx4 v36, v[4:7], s[0:1] offset:16 sc0 sc1
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: s_endpgm
%result = call <16 x float> @llvm.amdgcn.mfma.f32.32x32x16.bf16(<8 x bfloat> %arg0, <8 x bfloat> %arg1, <16 x float> %arg2, i32 0, i32 0, i32 0)
@@ -313,13 +311,13 @@ define amdgpu_kernel void @test_mfma_f32_32x32x16_bf16__vgprcd__flags(<8 x bfloa
; GCN-NEXT: s_load_dwordx8 s[24:31], s[4:5], 0x24
; GCN-NEXT: s_load_dwordx16 s[8:23], s[4:5], 0x64
; GCN-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0xa4
-; GCN-NEXT: v_mov_b32_e32 v44, 0
+; GCN-NEXT: v_mov_b32_e32 v36, 0
; GCN-NEXT: s_waitcnt lgkmcnt(0)
-; GCN-NEXT: v_mov_b64_e32 v[34:35], s[26:27]
-; GCN-NEXT: v_mov_b64_e32 v[32:33], s[24:25]
-; GCN-NEXT: v_mov_b64_e32 v[38:39], s[30:31]
+; GCN-NEXT: v_mov_b64_e32 v[40:41], s[26:27]
+; GCN-NEXT: v_mov_b64_e32 v[38:39], s[24:25]
+; GCN-NEXT: v_mov_b64_e32 v[44:45], s[30:31]
; GCN-NEXT: v_mov_b64_e32 v[30:31], s[22:23]
-; GCN-NEXT: v_mov_b64_e32 v[36:37], s[28:29]
+; GCN-NEXT: v_mov_b64_e32 v[42:43], s[28:29]
; GCN-NEXT: v_mov_b64_e32 v[28:29], s[20:21]
; GCN-NEXT: v_mov_b64_e32 v[26:27], s[18:19]
; GCN-NEXT: v_mov_b64_e32 v[24:25], s[16:17]
@@ -327,41 +325,41 @@ define amdgpu_kernel void @test_mfma_f32_32x32x16_bf16__vgprcd__flags(<8 x bfloa
; GCN-NEXT: v_mov_b64_e32 v[20:21], s[12:13]
; GCN-NEXT: v_mov_b64_e32 v[18:19], s[10:11]
; GCN-NEXT: v_mov_b64_e32 v[16:17], s[8:9]
-; GCN-NEXT: v_mov_b32_e32 v40, s20
-; GCN-NEXT: v_mov_b32_e32 v41, s21
-; GCN-NEXT: v_mfma_f32_32x32x16_bf16 v[0:15], v[32:35], v[36:39], v[16:31] cbsz:1 abid:2 blgp:3
-; GCN-NEXT: v_mov_b32_e32 v42, s22
-; GCN-NEXT: v_mov_b32_e32 v43, s23
-; GCN-NEXT: global_store_dwordx4 v44, v[40:43], s[0:1] offset:48 sc0 sc1
+; GCN-NEXT: v_mov_b32_e32 v32, s20
+; GCN-NEXT: v_mov_b32_e32 v33, s21
+; GCN-NEXT: v_mfma_f32_32x32x16_bf16 v[0:15], v[38:41], v[42:45], v[16:31] cbsz:1 abid:2 blgp:3
+; GCN-NEXT: v_mov_b32_e32 v34, s22
+; GCN-NEXT: v_mov_b32_e32 v35, s23
+; GCN-NEXT: global_store_dwordx4 v36, v[32:35], s[0:1] offset:48 sc0 sc1
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: s_nop 2
; GCN-NEXT: v_mov_b32_e32 v16, s16
; GCN-NEXT: v_mov_b32_e32 v17, s17
; GCN-NEXT: v_mov_b32_e32 v18, s18
; GCN-NEXT: v_mov_b32_e32 v19, s19
-; GCN-NEXT: global_store_dwordx4 v44, v[16:19], s[0:1] offset:32 sc0 sc1
+; GCN-NEXT: global_store_dwordx4 v36, v[16:19], s[0:1] offset:32 sc0 sc1
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: s_nop 0
; GCN-NEXT: v_mov_b32_e32 v16, s12
; GCN-NEXT: v_mov_b32_e32 v17, s13
; GCN-NEXT: v_mov_b32_e32 v18, s14
; GCN-NEXT: v_mov_b32_e32 v19, s15
-; GCN-NEXT: global_store_dwordx4 v44, v[16:19], s[0:1] offset:16 sc0 sc1
+; GCN-NEXT: global_store_dwordx4 v36, v[16:19], s[0:1] offset:16 sc0 sc1
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: s_nop 0
; GCN-NEXT: v_mov_b32_e32 v16, s8
; GCN-NEXT: v_mov_b32_e32 v17, s9
; GCN-NEXT: v_mov_b32_e32 v18, s10
; GCN-NEXT: v_mov_b32_e32 v19, s11
-; GCN-NEXT: global_store_dwordx4 v44, v[16:19], s[0:1] sc0 sc1
+; GCN-NEXT: global_store_dwordx4 v36, v[16:19], s[0:1] sc0 sc1
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: global_store_dwordx4 v44, v[8:11], s[0:1] offset:32 sc0 sc1
+; GCN-NEXT: global_store_dwordx4 v36, v[8:11], s[0:1] offset:32 sc0 sc1
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: global_store_dwordx4 v44, v[12:15], s[0:1] offset:48 sc0 sc1
+; GCN-NEXT: global_store_dwordx4 v36, v[12:15], s[0:1] offset:48 sc0 sc1
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: global_store_dwordx4 v44, v[0:3], s[0:1] sc0 sc1
+; GCN-NEXT: global_store_dwordx4 v36, v[0:3], s[0:1] sc0 sc1
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: global_store_dwordx4 v44, v[4:7], s[0:1] offset:16 sc0 sc1
+; GCN-NEXT: global_store_dwordx4 v36, v[4:7], s[0:1] offset:16 sc0 sc1
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: s_endpgm
%result = call <16 x float> @llvm.amdgcn.mfma.f32.32x32x16.bf16(<8 x bfloat> %arg0, <8 x bfloat> %arg1, <16 x float> %arg2, i32 1, i32 2, i32 3)
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mfma.gfx950.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mfma.gfx950.ll
index 7532062..ab0000f 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mfma.gfx950.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mfma.gfx950.ll
@@ -141,18 +141,18 @@ define amdgpu_kernel void @test_mfma_f32_16x16x32_f16_no_agpr__vgprcd(ptr addrsp
; SDAG-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x34
; SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x54
; SDAG-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x24
-; SDAG-NEXT: v_mov_b32_e32 v12, 0
+; SDAG-NEXT: v_mov_b32_e32 v4, 0
; SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; SDAG-NEXT: v_mov_b64_e32 v[0:1], s[8:9]
-; SDAG-NEXT: v_mov_b64_e32 v[2:3], s[10:11]
-; SDAG-NEXT: v_mov_b64_e32 v[4:5], s[12:13]
-; SDAG-NEXT: v_mov_b64_e32 v[10:11], s[2:3]
-; SDAG-NEXT: v_mov_b64_e32 v[6:7], s[14:15]
-; SDAG-NEXT: v_mov_b64_e32 v[8:9], s[0:1]
+; SDAG-NEXT: v_mov_b64_e32 v[6:7], s[8:9]
+; SDAG-NEXT: v_mov_b64_e32 v[8:9], s[10:11]
+; SDAG-NEXT: v_mov_b64_e32 v[10:11], s[12:13]
+; SDAG-NEXT: v_mov_b64_e32 v[0:1], s[0:1]
+; SDAG-NEXT: v_mov_b64_e32 v[12:13], s[14:15]
+; SDAG-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
; SDAG-NEXT: s_nop 1
-; SDAG-NEXT: v_mfma_f32_16x16x32_f16 v[0:3], v[0:3], v[4:7], v[8:11]
+; SDAG-NEXT: v_mfma_f32_16x16x32_f16 v[0:3], v[6:9], v[10:13], v[0:3]
; SDAG-NEXT: s_nop 7
-; SDAG-NEXT: global_store_dwordx4 v12, v[0:3], s[6:7]
+; SDAG-NEXT: global_store_dwordx4 v4, v[0:3], s[6:7]
; SDAG-NEXT: s_endpgm
;
; GISEL-LABEL: test_mfma_f32_16x16x32_f16_no_agpr__vgprcd:
@@ -179,18 +179,18 @@ define amdgpu_kernel void @test_mfma_f32_16x16x32_f16_no_agpr__vgprcd(ptr addrsp
; HEURRC-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x34
; HEURRC-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x54
; HEURRC-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x24
-; HEURRC-NEXT: v_mov_b32_e32 v12, 0
+; HEURRC-NEXT: v_mov_b32_e32 v4, 0
; HEURRC-NEXT: s_waitcnt lgkmcnt(0)
-; HEURRC-NEXT: v_mov_b64_e32 v[0:1], s[8:9]
-; HEURRC-NEXT: v_mov_b64_e32 v[2:3], s[10:11]
-; HEURRC-NEXT: v_mov_b64_e32 v[4:5], s[12:13]
-; HEURRC-NEXT: v_mov_b64_e32 v[10:11], s[2:3]
-; HEURRC-NEXT: v_mov_b64_e32 v[6:7], s[14:15]
-; HEURRC-NEXT: v_mov_b64_e32 v[8:9], s[0:1]
+; HEURRC-NEXT: v_mov_b64_e32 v[6:7], s[8:9]
+; HEURRC-NEXT: v_mov_b64_e32 v[8:9], s[10:11]
+; HEURRC-NEXT: v_mov_b64_e32 v[10:11], s[12:13]
+; HEURRC-NEXT: v_mov_b64_e32 v[0:1], s[0:1]
+; HEURRC-NEXT: v_mov_b64_e32 v[12:13], s[14:15]
+; HEURRC-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
; HEURRC-NEXT: s_nop 1
-; HEURRC-NEXT: v_mfma_f32_16x16x32_f16 v[0:3], v[0:3], v[4:7], v[8:11]
+; HEURRC-NEXT: v_mfma_f32_16x16x32_f16 v[0:3], v[6:9], v[10:13], v[0:3]
; HEURRC-NEXT: s_nop 7
-; HEURRC-NEXT: global_store_dwordx4 v12, v[0:3], s[6:7]
+; HEURRC-NEXT: global_store_dwordx4 v4, v[0:3], s[6:7]
; HEURRC-NEXT: s_endpgm
;
; VGPRRC-LABEL: test_mfma_f32_16x16x32_f16_no_agpr__vgprcd:
@@ -198,18 +198,18 @@ define amdgpu_kernel void @test_mfma_f32_16x16x32_f16_no_agpr__vgprcd(ptr addrsp
; VGPRRC-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x34
; VGPRRC-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x54
; VGPRRC-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x24
-; VGPRRC-NEXT: v_mov_b32_e32 v12, 0
+; VGPRRC-NEXT: v_mov_b32_e32 v4, 0
; VGPRRC-NEXT: s_waitcnt lgkmcnt(0)
-; VGPRRC-NEXT: v_mov_b64_e32 v[0:1], s[8:9]
-; VGPRRC-NEXT: v_mov_b64_e32 v[2:3], s[10:11]
-; VGPRRC-NEXT: v_mov_b64_e32 v[4:5], s[12:13]
-; VGPRRC-NEXT: v_mov_b64_e32 v[10:11], s[2:3]
-; VGPRRC-NEXT: v_mov_b64_e32 v[6:7], s[14:15]
-; VGPRRC-NEXT: v_mov_b64_e32 v[8:9], s[0:1]
+; VGPRRC-NEXT: v_mov_b64_e32 v[6:7], s[8:9]
+; VGPRRC-NEXT: v_mov_b64_e32 v[8:9], s[10:11]
+; VGPRRC-NEXT: v_mov_b64_e32 v[10:11], s[12:13]
+; VGPRRC-NEXT: v_mov_b64_e32 v[0:1], s[0:1]
+; VGPRRC-NEXT: v_mov_b64_e32 v[12:13], s[14:15]
+; VGPRRC-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
; VGPRRC-NEXT: s_nop 1
-; VGPRRC-NEXT: v_mfma_f32_16x16x32_f16 v[0:3], v[0:3], v[4:7], v[8:11]
+; VGPRRC-NEXT: v_mfma_f32_16x16x32_f16 v[0:3], v[6:9], v[10:13], v[0:3]
; VGPRRC-NEXT: s_nop 7
-; VGPRRC-NEXT: global_store_dwordx4 v12, v[0:3], s[6:7]
+; VGPRRC-NEXT: global_store_dwordx4 v4, v[0:3], s[6:7]
; VGPRRC-NEXT: s_endpgm
; AGPR-LABEL: test_mfma_f32_16x16x32_f16_no_agpr__vgprcd:
; AGPR: ; %bb.0:
@@ -260,18 +260,18 @@ define amdgpu_kernel void @test_mfma_f32_16x16x32_f16_no_agpr__vgprcd__flags(ptr
; SDAG-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x34
; SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x54
; SDAG-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x24
-; SDAG-NEXT: v_mov_b32_e32 v12, 0
+; SDAG-NEXT: v_mov_b32_e32 v4, 0
; SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; SDAG-NEXT: v_mov_b64_e32 v[0:1], s[8:9]
-; SDAG-NEXT: v_mov_b64_e32 v[2:3], s[10:11]
-; SDAG-NEXT: v_mov_b64_e32 v[4:5], s[12:13]
-; SDAG-NEXT: v_mov_b64_e32 v[10:11], s[2:3]
-; SDAG-NEXT: v_mov_b64_e32 v[6:7], s[14:15]
-; SDAG-NEXT: v_mov_b64_e32 v[8:9], s[0:1]
+; SDAG-NEXT: v_mov_b64_e32 v[6:7], s[8:9]
+; SDAG-NEXT: v_mov_b64_e32 v[8:9], s[10:11]
+; SDAG-NEXT: v_mov_b64_e32 v[10:11], s[12:13]
+; SDAG-NEXT: v_mov_b64_e32 v[0:1], s[0:1]
+; SDAG-NEXT: v_mov_b64_e32 v[12:13], s[14:15]
+; SDAG-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
; SDAG-NEXT: s_nop 1
-; SDAG-NEXT: v_mfma_f32_16x16x32_f16 v[0:3], v[0:3], v[4:7], v[8:11] cbsz:3 abid:2 blgp:1
+; SDAG-NEXT: v_mfma_f32_16x16x32_f16 v[0:3], v[6:9], v[10:13], v[0:3] cbsz:3 abid:2 blgp:1
; SDAG-NEXT: s_nop 7
-; SDAG-NEXT: global_store_dwordx4 v12, v[0:3], s[6:7]
+; SDAG-NEXT: global_store_dwordx4 v4, v[0:3], s[6:7]
; SDAG-NEXT: s_endpgm
;
; GISEL-LABEL: test_mfma_f32_16x16x32_f16_no_agpr__vgprcd__flags:
@@ -298,18 +298,18 @@ define amdgpu_kernel void @test_mfma_f32_16x16x32_f16_no_agpr__vgprcd__flags(ptr
; HEURRC-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x34
; HEURRC-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x54
; HEURRC-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x24
-; HEURRC-NEXT: v_mov_b32_e32 v12, 0
+; HEURRC-NEXT: v_mov_b32_e32 v4, 0
; HEURRC-NEXT: s_waitcnt lgkmcnt(0)
-; HEURRC-NEXT: v_mov_b64_e32 v[0:1], s[8:9]
-; HEURRC-NEXT: v_mov_b64_e32 v[2:3], s[10:11]
-; HEURRC-NEXT: v_mov_b64_e32 v[4:5], s[12:13]
-; HEURRC-NEXT: v_mov_b64_e32 v[10:11], s[2:3]
-; HEURRC-NEXT: v_mov_b64_e32 v[6:7], s[14:15]
-; HEURRC-NEXT: v_mov_b64_e32 v[8:9], s[0:1]
+; HEURRC-NEXT: v_mov_b64_e32 v[6:7], s[8:9]
+; HEURRC-NEXT: v_mov_b64_e32 v[8:9], s[10:11]
+; HEURRC-NEXT: v_mov_b64_e32 v[10:11], s[12:13]
+; HEURRC-NEXT: v_mov_b64_e32 v[0:1], s[0:1]
+; HEURRC-NEXT: v_mov_b64_e32 v[12:13], s[14:15]
+; HEURRC-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
; HEURRC-NEXT: s_nop 1
-; HEURRC-NEXT: v_mfma_f32_16x16x32_f16 v[0:3], v[0:3], v[4:7], v[8:11] cbsz:3 abid:2 blgp:1
+; HEURRC-NEXT: v_mfma_f32_16x16x32_f16 v[0:3], v[6:9], v[10:13], v[0:3] cbsz:3 abid:2 blgp:1
; HEURRC-NEXT: s_nop 7
-; HEURRC-NEXT: global_store_dwordx4 v12, v[0:3], s[6:7]
+; HEURRC-NEXT: global_store_dwordx4 v4, v[0:3], s[6:7]
; HEURRC-NEXT: s_endpgm
;
; VGPRRC-LABEL: test_mfma_f32_16x16x32_f16_no_agpr__vgprcd__flags:
@@ -317,18 +317,18 @@ define amdgpu_kernel void @test_mfma_f32_16x16x32_f16_no_agpr__vgprcd__flags(ptr
; VGPRRC-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x34
; VGPRRC-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x54
; VGPRRC-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x24
-; VGPRRC-NEXT: v_mov_b32_e32 v12, 0
+; VGPRRC-NEXT: v_mov_b32_e32 v4, 0
; VGPRRC-NEXT: s_waitcnt lgkmcnt(0)
-; VGPRRC-NEXT: v_mov_b64_e32 v[0:1], s[8:9]
-; VGPRRC-NEXT: v_mov_b64_e32 v[2:3], s[10:11]
-; VGPRRC-NEXT: v_mov_b64_e32 v[4:5], s[12:13]
-; VGPRRC-NEXT: v_mov_b64_e32 v[10:11], s[2:3]
-; VGPRRC-NEXT: v_mov_b64_e32 v[6:7], s[14:15]
-; VGPRRC-NEXT: v_mov_b64_e32 v[8:9], s[0:1]
+; VGPRRC-NEXT: v_mov_b64_e32 v[6:7], s[8:9]
+; VGPRRC-NEXT: v_mov_b64_e32 v[8:9], s[10:11]
+; VGPRRC-NEXT: v_mov_b64_e32 v[10:11], s[12:13]
+; VGPRRC-NEXT: v_mov_b64_e32 v[0:1], s[0:1]
+; VGPRRC-NEXT: v_mov_b64_e32 v[12:13], s[14:15]
+; VGPRRC-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
; VGPRRC-NEXT: s_nop 1
-; VGPRRC-NEXT: v_mfma_f32_16x16x32_f16 v[0:3], v[0:3], v[4:7], v[8:11] cbsz:3 abid:2 blgp:1
+; VGPRRC-NEXT: v_mfma_f32_16x16x32_f16 v[0:3], v[6:9], v[10:13], v[0:3] cbsz:3 abid:2 blgp:1
; VGPRRC-NEXT: s_nop 7
-; VGPRRC-NEXT: global_store_dwordx4 v12, v[0:3], s[6:7]
+; VGPRRC-NEXT: global_store_dwordx4 v4, v[0:3], s[6:7]
; VGPRRC-NEXT: s_endpgm
; AGPR-LABEL: test_mfma_f32_16x16x32_f16_no_agpr__vgprcd__flags:
; AGPR: ; %bb.0:
@@ -382,15 +382,15 @@ define amdgpu_kernel void @test_mfma_f32_32x32x16_f16(<8 x half> %arg0, <8 x hal
; SDAG: ; %bb.0:
; SDAG-NEXT: s_load_dwordx8 s[24:31], s[4:5], 0x24
; SDAG-NEXT: s_load_dwordx16 s[8:23], s[4:5], 0x64
-; SDAG-NEXT: v_mov_b64_e32 v[8:9], 48
-; SDAG-NEXT: v_mov_b64_e32 v[10:11], 32
-; SDAG-NEXT: v_mov_b64_e32 v[12:13], 16
+; SDAG-NEXT: v_mov_b64_e32 v[0:1], 48
+; SDAG-NEXT: v_mov_b64_e32 v[2:3], 32
+; SDAG-NEXT: v_mov_b64_e32 v[4:5], 16
; SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; SDAG-NEXT: v_mov_b64_e32 v[0:1], s[24:25]
-; SDAG-NEXT: v_mov_b64_e32 v[2:3], s[26:27]
-; SDAG-NEXT: v_mov_b64_e32 v[4:5], s[28:29]
+; SDAG-NEXT: v_mov_b64_e32 v[8:9], s[24:25]
+; SDAG-NEXT: v_mov_b64_e32 v[10:11], s[26:27]
+; SDAG-NEXT: v_mov_b64_e32 v[12:13], s[28:29]
; SDAG-NEXT: v_accvgpr_write_b32 a0, s8
-; SDAG-NEXT: v_mov_b64_e32 v[6:7], s[30:31]
+; SDAG-NEXT: v_mov_b64_e32 v[14:15], s[30:31]
; SDAG-NEXT: v_accvgpr_write_b32 a1, s9
; SDAG-NEXT: v_accvgpr_write_b32 a2, s10
; SDAG-NEXT: v_accvgpr_write_b32 a3, s11
@@ -408,40 +408,39 @@ define amdgpu_kernel void @test_mfma_f32_32x32x16_f16(<8 x half> %arg0, <8 x hal
; SDAG-NEXT: v_accvgpr_write_b32 a15, s23
; SDAG-NEXT: v_mov_b32_e32 v16, s16
; SDAG-NEXT: v_mov_b32_e32 v17, s17
-; SDAG-NEXT: v_mfma_f32_32x32x16_f16 a[16:31], v[0:3], v[4:7], a[0:15]
+; SDAG-NEXT: v_mfma_f32_32x32x16_f16 a[16:31], v[8:11], v[12:15], a[0:15]
; SDAG-NEXT: v_mov_b32_e32 v18, s18
; SDAG-NEXT: v_mov_b32_e32 v19, s19
-; SDAG-NEXT: v_mov_b32_e32 v0, s20
-; SDAG-NEXT: v_mov_b32_e32 v1, s21
-; SDAG-NEXT: v_mov_b32_e32 v2, s22
-; SDAG-NEXT: v_mov_b32_e32 v3, s23
-; SDAG-NEXT: v_mov_b64_e32 v[14:15], 0
+; SDAG-NEXT: v_mov_b32_e32 v8, s20
+; SDAG-NEXT: v_mov_b32_e32 v9, s21
+; SDAG-NEXT: v_mov_b32_e32 v10, s22
+; SDAG-NEXT: v_mov_b32_e32 v11, s23
+; SDAG-NEXT: v_mov_b64_e32 v[6:7], 0
; SDAG-NEXT: s_nop 4
-; SDAG-NEXT: global_store_dwordx4 v[8:9], a[28:31], off sc0 sc1
+; SDAG-NEXT: global_store_dwordx4 v[0:1], a[28:31], off sc0 sc1
; SDAG-NEXT: s_waitcnt vmcnt(0)
-; SDAG-NEXT: global_store_dwordx4 v[10:11], a[24:27], off sc0 sc1
+; SDAG-NEXT: global_store_dwordx4 v[2:3], a[24:27], off sc0 sc1
; SDAG-NEXT: s_waitcnt vmcnt(0)
-; SDAG-NEXT: global_store_dwordx4 v[12:13], a[20:23], off sc0 sc1
+; SDAG-NEXT: global_store_dwordx4 v[4:5], a[20:23], off sc0 sc1
; SDAG-NEXT: s_waitcnt vmcnt(0)
-; SDAG-NEXT: global_store_dwordx4 v[14:15], a[16:19], off sc0 sc1
+; SDAG-NEXT: global_store_dwordx4 v[6:7], a[16:19], off sc0 sc1
; SDAG-NEXT: s_waitcnt vmcnt(0)
-; SDAG-NEXT: global_store_dwordx4 v[10:11], v[16:19], off sc0 sc1
+; SDAG-NEXT: global_store_dwordx4 v[2:3], v[16:19], off sc0 sc1
; SDAG-NEXT: s_waitcnt vmcnt(0)
-; SDAG-NEXT: global_store_dwordx4 v[8:9], v[0:3], off sc0 sc1
+; SDAG-NEXT: global_store_dwordx4 v[0:1], v[8:11], off sc0 sc1
; SDAG-NEXT: s_waitcnt vmcnt(0)
-; SDAG-NEXT: s_nop 0
; SDAG-NEXT: v_mov_b32_e32 v0, s8
; SDAG-NEXT: v_mov_b32_e32 v1, s9
; SDAG-NEXT: v_mov_b32_e32 v2, s10
; SDAG-NEXT: v_mov_b32_e32 v3, s11
-; SDAG-NEXT: global_store_dwordx4 v[14:15], v[0:3], off sc0 sc1
+; SDAG-NEXT: global_store_dwordx4 v[6:7], v[0:3], off sc0 sc1
; SDAG-NEXT: s_waitcnt vmcnt(0)
; SDAG-NEXT: s_nop 0
; SDAG-NEXT: v_mov_b32_e32 v0, s12
; SDAG-NEXT: v_mov_b32_e32 v1, s13
; SDAG-NEXT: v_mov_b32_e32 v2, s14
; SDAG-NEXT: v_mov_b32_e32 v3, s15
-; SDAG-NEXT: global_store_dwordx4 v[12:13], v[0:3], off sc0 sc1
+; SDAG-NEXT: global_store_dwordx4 v[4:5], v[0:3], off sc0 sc1
; SDAG-NEXT: s_waitcnt vmcnt(0)
; SDAG-NEXT: s_endpgm
;
@@ -508,15 +507,15 @@ define amdgpu_kernel void @test_mfma_f32_32x32x16_f16(<8 x half> %arg0, <8 x hal
; HEURRC: ; %bb.0:
; HEURRC-NEXT: s_load_dwordx8 s[24:31], s[4:5], 0x24
; HEURRC-NEXT: s_load_dwordx16 s[8:23], s[4:5], 0x64
-; HEURRC-NEXT: v_mov_b64_e32 v[8:9], 48
-; HEURRC-NEXT: v_mov_b64_e32 v[10:11], 32
-; HEURRC-NEXT: v_mov_b64_e32 v[12:13], 16
+; HEURRC-NEXT: v_mov_b64_e32 v[0:1], 48
+; HEURRC-NEXT: v_mov_b64_e32 v[2:3], 32
+; HEURRC-NEXT: v_mov_b64_e32 v[4:5], 16
; HEURRC-NEXT: s_waitcnt lgkmcnt(0)
-; HEURRC-NEXT: v_mov_b64_e32 v[0:1], s[24:25]
-; HEURRC-NEXT: v_mov_b64_e32 v[2:3], s[26:27]
-; HEURRC-NEXT: v_mov_b64_e32 v[4:5], s[28:29]
+; HEURRC-NEXT: v_mov_b64_e32 v[8:9], s[24:25]
+; HEURRC-NEXT: v_mov_b64_e32 v[10:11], s[26:27]
+; HEURRC-NEXT: v_mov_b64_e32 v[12:13], s[28:29]
; HEURRC-NEXT: v_accvgpr_write_b32 a0, s8
-; HEURRC-NEXT: v_mov_b64_e32 v[6:7], s[30:31]
+; HEURRC-NEXT: v_mov_b64_e32 v[14:15], s[30:31]
; HEURRC-NEXT: v_accvgpr_write_b32 a1, s9
; HEURRC-NEXT: v_accvgpr_write_b32 a2, s10
; HEURRC-NEXT: v_accvgpr_write_b32 a3, s11
@@ -534,40 +533,39 @@ define amdgpu_kernel void @test_mfma_f32_32x32x16_f16(<8 x half> %arg0, <8 x hal
; HEURRC-NEXT: v_accvgpr_write_b32 a15, s23
; HEURRC-NEXT: v_mov_b32_e32 v16, s16
; HEURRC-NEXT: v_mov_b32_e32 v17, s17
-; HEURRC-NEXT: v_mfma_f32_32x32x16_f16 a[16:31], v[0:3], v[4:7], a[0:15]
+; HEURRC-NEXT: v_mfma_f32_32x32x16_f16 a[16:31], v[8:11], v[12:15], a[0:15]
; HEURRC-NEXT: v_mov_b32_e32 v18, s18
; HEURRC-NEXT: v_mov_b32_e32 v19, s19
-; HEURRC-NEXT: v_mov_b32_e32 v0, s20
-; HEURRC-NEXT: v_mov_b32_e32 v1, s21
-; HEURRC-NEXT: v_mov_b32_e32 v2, s22
-; HEURRC-NEXT: v_mov_b32_e32 v3, s23
-; HEURRC-NEXT: v_mov_b64_e32 v[14:15], 0
+; HEURRC-NEXT: v_mov_b32_e32 v8, s20
+; HEURRC-NEXT: v_mov_b32_e32 v9, s21
+; HEURRC-NEXT: v_mov_b32_e32 v10, s22
+; HEURRC-NEXT: v_mov_b32_e32 v11, s23
+; HEURRC-NEXT: v_mov_b64_e32 v[6:7], 0
; HEURRC-NEXT: s_nop 4
-; HEURRC-NEXT: global_store_dwordx4 v[8:9], a[28:31], off sc0 sc1
+; HEURRC-NEXT: global_store_dwordx4 v[0:1], a[28:31], off sc0 sc1
; HEURRC-NEXT: s_waitcnt vmcnt(0)
-; HEURRC-NEXT: global_store_dwordx4 v[10:11], a[24:27], off sc0 sc1
+; HEURRC-NEXT: global_store_dwordx4 v[2:3], a[24:27], off sc0 sc1
; HEURRC-NEXT: s_waitcnt vmcnt(0)
-; HEURRC-NEXT: global_store_dwordx4 v[12:13], a[20:23], off sc0 sc1
+; HEURRC-NEXT: global_store_dwordx4 v[4:5], a[20:23], off sc0 sc1
; HEURRC-NEXT: s_waitcnt vmcnt(0)
-; HEURRC-NEXT: global_store_dwordx4 v[14:15], a[16:19], off sc0 sc1
+; HEURRC-NEXT: global_store_dwordx4 v[6:7], a[16:19], off sc0 sc1
; HEURRC-NEXT: s_waitcnt vmcnt(0)
-; HEURRC-NEXT: global_store_dwordx4 v[10:11], v[16:19], off sc0 sc1
+; HEURRC-NEXT: global_store_dwordx4 v[2:3], v[16:19], off sc0 sc1
; HEURRC-NEXT: s_waitcnt vmcnt(0)
-; HEURRC-NEXT: global_store_dwordx4 v[8:9], v[0:3], off sc0 sc1
+; HEURRC-NEXT: global_store_dwordx4 v[0:1], v[8:11], off sc0 sc1
; HEURRC-NEXT: s_waitcnt vmcnt(0)
-; HEURRC-NEXT: s_nop 0
; HEURRC-NEXT: v_mov_b32_e32 v0, s8
; HEURRC-NEXT: v_mov_b32_e32 v1, s9
; HEURRC-NEXT: v_mov_b32_e32 v2, s10
; HEURRC-NEXT: v_mov_b32_e32 v3, s11
-; HEURRC-NEXT: global_store_dwordx4 v[14:15], v[0:3], off sc0 sc1
+; HEURRC-NEXT: global_store_dwordx4 v[6:7], v[0:3], off sc0 sc1
; HEURRC-NEXT: s_waitcnt vmcnt(0)
; HEURRC-NEXT: s_nop 0
; HEURRC-NEXT: v_mov_b32_e32 v0, s12
; HEURRC-NEXT: v_mov_b32_e32 v1, s13
; HEURRC-NEXT: v_mov_b32_e32 v2, s14
; HEURRC-NEXT: v_mov_b32_e32 v3, s15
-; HEURRC-NEXT: global_store_dwordx4 v[12:13], v[0:3], off sc0 sc1
+; HEURRC-NEXT: global_store_dwordx4 v[4:5], v[0:3], off sc0 sc1
; HEURRC-NEXT: s_waitcnt vmcnt(0)
; HEURRC-NEXT: s_endpgm
;
@@ -575,15 +573,15 @@ define amdgpu_kernel void @test_mfma_f32_32x32x16_f16(<8 x half> %arg0, <8 x hal
; VGPRRC: ; %bb.0:
; VGPRRC-NEXT: s_load_dwordx8 s[24:31], s[4:5], 0x24
; VGPRRC-NEXT: s_load_dwordx16 s[8:23], s[4:5], 0x64
-; VGPRRC-NEXT: v_mov_b64_e32 v[40:41], 48
-; VGPRRC-NEXT: v_mov_b64_e32 v[42:43], 32
-; VGPRRC-NEXT: v_mov_b64_e32 v[44:45], 16
+; VGPRRC-NEXT: v_mov_b64_e32 v[32:33], 48
+; VGPRRC-NEXT: v_mov_b64_e32 v[34:35], 32
+; VGPRRC-NEXT: v_mov_b64_e32 v[36:37], 16
; VGPRRC-NEXT: s_waitcnt lgkmcnt(0)
-; VGPRRC-NEXT: v_mov_b64_e32 v[34:35], s[26:27]
-; VGPRRC-NEXT: v_mov_b64_e32 v[32:33], s[24:25]
-; VGPRRC-NEXT: v_mov_b64_e32 v[38:39], s[30:31]
+; VGPRRC-NEXT: v_mov_b64_e32 v[42:43], s[26:27]
+; VGPRRC-NEXT: v_mov_b64_e32 v[40:41], s[24:25]
+; VGPRRC-NEXT: v_mov_b64_e32 v[46:47], s[30:31]
; VGPRRC-NEXT: v_mov_b64_e32 v[0:1], s[8:9]
-; VGPRRC-NEXT: v_mov_b64_e32 v[36:37], s[28:29]
+; VGPRRC-NEXT: v_mov_b64_e32 v[44:45], s[28:29]
; VGPRRC-NEXT: v_mov_b64_e32 v[2:3], s[10:11]
; VGPRRC-NEXT: v_mov_b64_e32 v[4:5], s[12:13]
; VGPRRC-NEXT: v_mov_b64_e32 v[6:7], s[14:15]
@@ -593,40 +591,40 @@ define amdgpu_kernel void @test_mfma_f32_32x32x16_f16(<8 x half> %arg0, <8 x hal
; VGPRRC-NEXT: v_mov_b64_e32 v[14:15], s[22:23]
; VGPRRC-NEXT: v_mov_b32_e32 v48, s16
; VGPRRC-NEXT: v_mov_b32_e32 v49, s17
-; VGPRRC-NEXT: v_mfma_f32_32x32x16_f16 v[16:31], v[32:35], v[36:39], v[0:15]
+; VGPRRC-NEXT: v_mfma_f32_32x32x16_f16 v[16:31], v[40:43], v[44:47], v[0:15]
; VGPRRC-NEXT: v_mov_b32_e32 v50, s18
; VGPRRC-NEXT: v_mov_b32_e32 v51, s19
-; VGPRRC-NEXT: v_mov_b64_e32 v[46:47], 0
+; VGPRRC-NEXT: v_mov_b64_e32 v[38:39], 0
; VGPRRC-NEXT: s_nop 8
-; VGPRRC-NEXT: global_store_dwordx4 v[40:41], v[28:31], off sc0 sc1
+; VGPRRC-NEXT: global_store_dwordx4 v[32:33], v[28:31], off sc0 sc1
; VGPRRC-NEXT: s_waitcnt vmcnt(0)
-; VGPRRC-NEXT: global_store_dwordx4 v[42:43], v[24:27], off sc0 sc1
+; VGPRRC-NEXT: global_store_dwordx4 v[34:35], v[24:27], off sc0 sc1
; VGPRRC-NEXT: s_waitcnt vmcnt(0)
-; VGPRRC-NEXT: global_store_dwordx4 v[44:45], v[20:23], off sc0 sc1
+; VGPRRC-NEXT: global_store_dwordx4 v[36:37], v[20:23], off sc0 sc1
; VGPRRC-NEXT: s_waitcnt vmcnt(0)
-; VGPRRC-NEXT: global_store_dwordx4 v[46:47], v[16:19], off sc0 sc1
+; VGPRRC-NEXT: global_store_dwordx4 v[38:39], v[16:19], off sc0 sc1
; VGPRRC-NEXT: s_waitcnt vmcnt(0)
; VGPRRC-NEXT: v_mov_b32_e32 v0, s20
; VGPRRC-NEXT: v_mov_b32_e32 v1, s21
; VGPRRC-NEXT: v_mov_b32_e32 v2, s22
; VGPRRC-NEXT: v_mov_b32_e32 v3, s23
-; VGPRRC-NEXT: global_store_dwordx4 v[42:43], v[48:51], off sc0 sc1
+; VGPRRC-NEXT: global_store_dwordx4 v[34:35], v[48:51], off sc0 sc1
; VGPRRC-NEXT: s_waitcnt vmcnt(0)
-; VGPRRC-NEXT: global_store_dwordx4 v[40:41], v[0:3], off sc0 sc1
+; VGPRRC-NEXT: global_store_dwordx4 v[32:33], v[0:3], off sc0 sc1
; VGPRRC-NEXT: s_waitcnt vmcnt(0)
; VGPRRC-NEXT: s_nop 0
; VGPRRC-NEXT: v_mov_b32_e32 v0, s8
; VGPRRC-NEXT: v_mov_b32_e32 v1, s9
; VGPRRC-NEXT: v_mov_b32_e32 v2, s10
; VGPRRC-NEXT: v_mov_b32_e32 v3, s11
-; VGPRRC-NEXT: global_store_dwordx4 v[46:47], v[0:3], off sc0 sc1
+; VGPRRC-NEXT: global_store_dwordx4 v[38:39], v[0:3], off sc0 sc1
; VGPRRC-NEXT: s_waitcnt vmcnt(0)
; VGPRRC-NEXT: s_nop 0
; VGPRRC-NEXT: v_mov_b32_e32 v0, s12
; VGPRRC-NEXT: v_mov_b32_e32 v1, s13
; VGPRRC-NEXT: v_mov_b32_e32 v2, s14
; VGPRRC-NEXT: v_mov_b32_e32 v3, s15
-; VGPRRC-NEXT: global_store_dwordx4 v[44:45], v[0:3], off sc0 sc1
+; VGPRRC-NEXT: global_store_dwordx4 v[36:37], v[0:3], off sc0 sc1
; VGPRRC-NEXT: s_waitcnt vmcnt(0)
; VGPRRC-NEXT: s_endpgm
; AGPR-LABEL: test_mfma_f32_32x32x16_f16:
@@ -765,15 +763,15 @@ define amdgpu_kernel void @test_mfma_f32_32x32x16_f16__flags(<8 x half> %arg0, <
; SDAG: ; %bb.0:
; SDAG-NEXT: s_load_dwordx8 s[24:31], s[4:5], 0x24
; SDAG-NEXT: s_load_dwordx16 s[8:23], s[4:5], 0x64
-; SDAG-NEXT: v_mov_b64_e32 v[8:9], 48
-; SDAG-NEXT: v_mov_b64_e32 v[10:11], 32
-; SDAG-NEXT: v_mov_b64_e32 v[12:13], 16
+; SDAG-NEXT: v_mov_b64_e32 v[0:1], 48
+; SDAG-NEXT: v_mov_b64_e32 v[2:3], 32
+; SDAG-NEXT: v_mov_b64_e32 v[4:5], 16
; SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; SDAG-NEXT: v_mov_b64_e32 v[0:1], s[24:25]
-; SDAG-NEXT: v_mov_b64_e32 v[2:3], s[26:27]
-; SDAG-NEXT: v_mov_b64_e32 v[4:5], s[28:29]
+; SDAG-NEXT: v_mov_b64_e32 v[8:9], s[24:25]
+; SDAG-NEXT: v_mov_b64_e32 v[10:11], s[26:27]
+; SDAG-NEXT: v_mov_b64_e32 v[12:13], s[28:29]
; SDAG-NEXT: v_accvgpr_write_b32 a0, s8
-; SDAG-NEXT: v_mov_b64_e32 v[6:7], s[30:31]
+; SDAG-NEXT: v_mov_b64_e32 v[14:15], s[30:31]
; SDAG-NEXT: v_accvgpr_write_b32 a1, s9
; SDAG-NEXT: v_accvgpr_write_b32 a2, s10
; SDAG-NEXT: v_accvgpr_write_b32 a3, s11
@@ -791,40 +789,39 @@ define amdgpu_kernel void @test_mfma_f32_32x32x16_f16__flags(<8 x half> %arg0, <
; SDAG-NEXT: v_accvgpr_write_b32 a15, s23
; SDAG-NEXT: v_mov_b32_e32 v16, s16
; SDAG-NEXT: v_mov_b32_e32 v17, s17
-; SDAG-NEXT: v_mfma_f32_32x32x16_f16 a[16:31], v[0:3], v[4:7], a[0:15] cbsz:2 abid:3 blgp:1
+; SDAG-NEXT: v_mfma_f32_32x32x16_f16 a[16:31], v[8:11], v[12:15], a[0:15] cbsz:2 abid:3 blgp:1
; SDAG-NEXT: v_mov_b32_e32 v18, s18
; SDAG-NEXT: v_mov_b32_e32 v19, s19
-; SDAG-NEXT: v_mov_b32_e32 v0, s20
-; SDAG-NEXT: v_mov_b32_e32 v1, s21
-; SDAG-NEXT: v_mov_b32_e32 v2, s22
-; SDAG-NEXT: v_mov_b32_e32 v3, s23
-; SDAG-NEXT: v_mov_b64_e32 v[14:15], 0
+; SDAG-NEXT: v_mov_b32_e32 v8, s20
+; SDAG-NEXT: v_mov_b32_e32 v9, s21
+; SDAG-NEXT: v_mov_b32_e32 v10, s22
+; SDAG-NEXT: v_mov_b32_e32 v11, s23
+; SDAG-NEXT: v_mov_b64_e32 v[6:7], 0
; SDAG-NEXT: s_nop 4
-; SDAG-NEXT: global_store_dwordx4 v[8:9], a[28:31], off sc0 sc1
+; SDAG-NEXT: global_store_dwordx4 v[0:1], a[28:31], off sc0 sc1
; SDAG-NEXT: s_waitcnt vmcnt(0)
-; SDAG-NEXT: global_store_dwordx4 v[10:11], a[24:27], off sc0 sc1
+; SDAG-NEXT: global_store_dwordx4 v[2:3], a[24:27], off sc0 sc1
; SDAG-NEXT: s_waitcnt vmcnt(0)
-; SDAG-NEXT: global_store_dwordx4 v[12:13], a[20:23], off sc0 sc1
+; SDAG-NEXT: global_store_dwordx4 v[4:5], a[20:23], off sc0 sc1
; SDAG-NEXT: s_waitcnt vmcnt(0)
-; SDAG-NEXT: global_store_dwordx4 v[14:15], a[16:19], off sc0 sc1
+; SDAG-NEXT: global_store_dwordx4 v[6:7], a[16:19], off sc0 sc1
; SDAG-NEXT: s_waitcnt vmcnt(0)
-; SDAG-NEXT: global_store_dwordx4 v[10:11], v[16:19], off sc0 sc1
+; SDAG-NEXT: global_store_dwordx4 v[2:3], v[16:19], off sc0 sc1
; SDAG-NEXT: s_waitcnt vmcnt(0)
-; SDAG-NEXT: global_store_dwordx4 v[8:9], v[0:3], off sc0 sc1
+; SDAG-NEXT: global_store_dwordx4 v[0:1], v[8:11], off sc0 sc1
; SDAG-NEXT: s_waitcnt vmcnt(0)
-; SDAG-NEXT: s_nop 0
; SDAG-NEXT: v_mov_b32_e32 v0, s8
; SDAG-NEXT: v_mov_b32_e32 v1, s9
; SDAG-NEXT: v_mov_b32_e32 v2, s10
; SDAG-NEXT: v_mov_b32_e32 v3, s11
-; SDAG-NEXT: global_store_dwordx4 v[14:15], v[0:3], off sc0 sc1
+; SDAG-NEXT: global_store_dwordx4 v[6:7], v[0:3], off sc0 sc1
; SDAG-NEXT: s_waitcnt vmcnt(0)
; SDAG-NEXT: s_nop 0
; SDAG-NEXT: v_mov_b32_e32 v0, s12
; SDAG-NEXT: v_mov_b32_e32 v1, s13
; SDAG-NEXT: v_mov_b32_e32 v2, s14
; SDAG-NEXT: v_mov_b32_e32 v3, s15
-; SDAG-NEXT: global_store_dwordx4 v[12:13], v[0:3], off sc0 sc1
+; SDAG-NEXT: global_store_dwordx4 v[4:5], v[0:3], off sc0 sc1
; SDAG-NEXT: s_waitcnt vmcnt(0)
; SDAG-NEXT: s_endpgm
;
@@ -891,15 +888,15 @@ define amdgpu_kernel void @test_mfma_f32_32x32x16_f16__flags(<8 x half> %arg0, <
; HEURRC: ; %bb.0:
; HEURRC-NEXT: s_load_dwordx8 s[24:31], s[4:5], 0x24
; HEURRC-NEXT: s_load_dwordx16 s[8:23], s[4:5], 0x64
-; HEURRC-NEXT: v_mov_b64_e32 v[8:9], 48
-; HEURRC-NEXT: v_mov_b64_e32 v[10:11], 32
-; HEURRC-NEXT: v_mov_b64_e32 v[12:13], 16
+; HEURRC-NEXT: v_mov_b64_e32 v[0:1], 48
+; HEURRC-NEXT: v_mov_b64_e32 v[2:3], 32
+; HEURRC-NEXT: v_mov_b64_e32 v[4:5], 16
; HEURRC-NEXT: s_waitcnt lgkmcnt(0)
-; HEURRC-NEXT: v_mov_b64_e32 v[0:1], s[24:25]
-; HEURRC-NEXT: v_mov_b64_e32 v[2:3], s[26:27]
-; HEURRC-NEXT: v_mov_b64_e32 v[4:5], s[28:29]
+; HEURRC-NEXT: v_mov_b64_e32 v[8:9], s[24:25]
+; HEURRC-NEXT: v_mov_b64_e32 v[10:11], s[26:27]
+; HEURRC-NEXT: v_mov_b64_e32 v[12:13], s[28:29]
; HEURRC-NEXT: v_accvgpr_write_b32 a0, s8
-; HEURRC-NEXT: v_mov_b64_e32 v[6:7], s[30:31]
+; HEURRC-NEXT: v_mov_b64_e32 v[14:15], s[30:31]
; HEURRC-NEXT: v_accvgpr_write_b32 a1, s9
; HEURRC-NEXT: v_accvgpr_write_b32 a2, s10
; HEURRC-NEXT: v_accvgpr_write_b32 a3, s11
@@ -917,40 +914,39 @@ define amdgpu_kernel void @test_mfma_f32_32x32x16_f16__flags(<8 x half> %arg0, <
; HEURRC-NEXT: v_accvgpr_write_b32 a15, s23
; HEURRC-NEXT: v_mov_b32_e32 v16, s16
; HEURRC-NEXT: v_mov_b32_e32 v17, s17
-; HEURRC-NEXT: v_mfma_f32_32x32x16_f16 a[16:31], v[0:3], v[4:7], a[0:15] cbsz:2 abid:3 blgp:1
+; HEURRC-NEXT: v_mfma_f32_32x32x16_f16 a[16:31], v[8:11], v[12:15], a[0:15] cbsz:2 abid:3 blgp:1
; HEURRC-NEXT: v_mov_b32_e32 v18, s18
; HEURRC-NEXT: v_mov_b32_e32 v19, s19
-; HEURRC-NEXT: v_mov_b32_e32 v0, s20
-; HEURRC-NEXT: v_mov_b32_e32 v1, s21
-; HEURRC-NEXT: v_mov_b32_e32 v2, s22
-; HEURRC-NEXT: v_mov_b32_e32 v3, s23
-; HEURRC-NEXT: v_mov_b64_e32 v[14:15], 0
+; HEURRC-NEXT: v_mov_b32_e32 v8, s20
+; HEURRC-NEXT: v_mov_b32_e32 v9, s21
+; HEURRC-NEXT: v_mov_b32_e32 v10, s22
+; HEURRC-NEXT: v_mov_b32_e32 v11, s23
+; HEURRC-NEXT: v_mov_b64_e32 v[6:7], 0
; HEURRC-NEXT: s_nop 4
-; HEURRC-NEXT: global_store_dwordx4 v[8:9], a[28:31], off sc0 sc1
+; HEURRC-NEXT: global_store_dwordx4 v[0:1], a[28:31], off sc0 sc1
; HEURRC-NEXT: s_waitcnt vmcnt(0)
-; HEURRC-NEXT: global_store_dwordx4 v[10:11], a[24:27], off sc0 sc1
+; HEURRC-NEXT: global_store_dwordx4 v[2:3], a[24:27], off sc0 sc1
; HEURRC-NEXT: s_waitcnt vmcnt(0)
-; HEURRC-NEXT: global_store_dwordx4 v[12:13], a[20:23], off sc0 sc1
+; HEURRC-NEXT: global_store_dwordx4 v[4:5], a[20:23], off sc0 sc1
; HEURRC-NEXT: s_waitcnt vmcnt(0)
-; HEURRC-NEXT: global_store_dwordx4 v[14:15], a[16:19], off sc0 sc1
+; HEURRC-NEXT: global_store_dwordx4 v[6:7], a[16:19], off sc0 sc1
; HEURRC-NEXT: s_waitcnt vmcnt(0)
-; HEURRC-NEXT: global_store_dwordx4 v[10:11], v[16:19], off sc0 sc1
+; HEURRC-NEXT: global_store_dwordx4 v[2:3], v[16:19], off sc0 sc1
; HEURRC-NEXT: s_waitcnt vmcnt(0)
-; HEURRC-NEXT: global_store_dwordx4 v[8:9], v[0:3], off sc0 sc1
+; HEURRC-NEXT: global_store_dwordx4 v[0:1], v[8:11], off sc0 sc1
; HEURRC-NEXT: s_waitcnt vmcnt(0)
-; HEURRC-NEXT: s_nop 0
; HEURRC-NEXT: v_mov_b32_e32 v0, s8
; HEURRC-NEXT: v_mov_b32_e32 v1, s9
; HEURRC-NEXT: v_mov_b32_e32 v2, s10
; HEURRC-NEXT: v_mov_b32_e32 v3, s11
-; HEURRC-NEXT: global_store_dwordx4 v[14:15], v[0:3], off sc0 sc1
+; HEURRC-NEXT: global_store_dwordx4 v[6:7], v[0:3], off sc0 sc1
; HEURRC-NEXT: s_waitcnt vmcnt(0)
; HEURRC-NEXT: s_nop 0
; HEURRC-NEXT: v_mov_b32_e32 v0, s12
; HEURRC-NEXT: v_mov_b32_e32 v1, s13
; HEURRC-NEXT: v_mov_b32_e32 v2, s14
; HEURRC-NEXT: v_mov_b32_e32 v3, s15
-; HEURRC-NEXT: global_store_dwordx4 v[12:13], v[0:3], off sc0 sc1
+; HEURRC-NEXT: global_store_dwordx4 v[4:5], v[0:3], off sc0 sc1
; HEURRC-NEXT: s_waitcnt vmcnt(0)
; HEURRC-NEXT: s_endpgm
;
@@ -958,15 +954,15 @@ define amdgpu_kernel void @test_mfma_f32_32x32x16_f16__flags(<8 x half> %arg0, <
; VGPRRC: ; %bb.0:
; VGPRRC-NEXT: s_load_dwordx8 s[24:31], s[4:5], 0x24
; VGPRRC-NEXT: s_load_dwordx16 s[8:23], s[4:5], 0x64
-; VGPRRC-NEXT: v_mov_b64_e32 v[40:41], 48
-; VGPRRC-NEXT: v_mov_b64_e32 v[42:43], 32
-; VGPRRC-NEXT: v_mov_b64_e32 v[44:45], 16
+; VGPRRC-NEXT: v_mov_b64_e32 v[32:33], 48
+; VGPRRC-NEXT: v_mov_b64_e32 v[34:35], 32
+; VGPRRC-NEXT: v_mov_b64_e32 v[36:37], 16
; VGPRRC-NEXT: s_waitcnt lgkmcnt(0)
-; VGPRRC-NEXT: v_mov_b64_e32 v[34:35], s[26:27]
-; VGPRRC-NEXT: v_mov_b64_e32 v[32:33], s[24:25]
-; VGPRRC-NEXT: v_mov_b64_e32 v[38:39], s[30:31]
+; VGPRRC-NEXT: v_mov_b64_e32 v[42:43], s[26:27]
+; VGPRRC-NEXT: v_mov_b64_e32 v[40:41], s[24:25]
+; VGPRRC-NEXT: v_mov_b64_e32 v[46:47], s[30:31]
; VGPRRC-NEXT: v_mov_b64_e32 v[0:1], s[8:9]
-; VGPRRC-NEXT: v_mov_b64_e32 v[36:37], s[28:29]
+; VGPRRC-NEXT: v_mov_b64_e32 v[44:45], s[28:29]
; VGPRRC-NEXT: v_mov_b64_e32 v[2:3], s[10:11]
; VGPRRC-NEXT: v_mov_b64_e32 v[4:5], s[12:13]
; VGPRRC-NEXT: v_mov_b64_e32 v[6:7], s[14:15]
@@ -976,40 +972,40 @@ define amdgpu_kernel void @test_mfma_f32_32x32x16_f16__flags(<8 x half> %arg0, <
; VGPRRC-NEXT: v_mov_b64_e32 v[14:15], s[22:23]
; VGPRRC-NEXT: v_mov_b32_e32 v48, s16
; VGPRRC-NEXT: v_mov_b32_e32 v49, s17
-; VGPRRC-NEXT: v_mfma_f32_32x32x16_f16 v[16:31], v[32:35], v[36:39], v[0:15] cbsz:2 abid:3 blgp:1
+; VGPRRC-NEXT: v_mfma_f32_32x32x16_f16 v[16:31], v[40:43], v[44:47], v[0:15] cbsz:2 abid:3 blgp:1
; VGPRRC-NEXT: v_mov_b32_e32 v50, s18
; VGPRRC-NEXT: v_mov_b32_e32 v51, s19
-; VGPRRC-NEXT: v_mov_b64_e32 v[46:47], 0
+; VGPRRC-NEXT: v_mov_b64_e32 v[38:39], 0
; VGPRRC-NEXT: s_nop 8
-; VGPRRC-NEXT: global_store_dwordx4 v[40:41], v[28:31], off sc0 sc1
+; VGPRRC-NEXT: global_store_dwordx4 v[32:33], v[28:31], off sc0 sc1
; VGPRRC-NEXT: s_waitcnt vmcnt(0)
-; VGPRRC-NEXT: global_store_dwordx4 v[42:43], v[24:27], off sc0 sc1
+; VGPRRC-NEXT: global_store_dwordx4 v[34:35], v[24:27], off sc0 sc1
; VGPRRC-NEXT: s_waitcnt vmcnt(0)
-; VGPRRC-NEXT: global_store_dwordx4 v[44:45], v[20:23], off sc0 sc1
+; VGPRRC-NEXT: global_store_dwordx4 v[36:37], v[20:23], off sc0 sc1
; VGPRRC-NEXT: s_waitcnt vmcnt(0)
-; VGPRRC-NEXT: global_store_dwordx4 v[46:47], v[16:19], off sc0 sc1
+; VGPRRC-NEXT: global_store_dwordx4 v[38:39], v[16:19], off sc0 sc1
; VGPRRC-NEXT: s_waitcnt vmcnt(0)
; VGPRRC-NEXT: v_mov_b32_e32 v0, s20
; VGPRRC-NEXT: v_mov_b32_e32 v1, s21
; VGPRRC-NEXT: v_mov_b32_e32 v2, s22
; VGPRRC-NEXT: v_mov_b32_e32 v3, s23
-; VGPRRC-NEXT: global_store_dwordx4 v[42:43], v[48:51], off sc0 sc1
+; VGPRRC-NEXT: global_store_dwordx4 v[34:35], v[48:51], off sc0 sc1
; VGPRRC-NEXT: s_waitcnt vmcnt(0)
-; VGPRRC-NEXT: global_store_dwordx4 v[40:41], v[0:3], off sc0 sc1
+; VGPRRC-NEXT: global_store_dwordx4 v[32:33], v[0:3], off sc0 sc1
; VGPRRC-NEXT: s_waitcnt vmcnt(0)
; VGPRRC-NEXT: s_nop 0
; VGPRRC-NEXT: v_mov_b32_e32 v0, s8
; VGPRRC-NEXT: v_mov_b32_e32 v1, s9
; VGPRRC-NEXT: v_mov_b32_e32 v2, s10
; VGPRRC-NEXT: v_mov_b32_e32 v3, s11
-; VGPRRC-NEXT: global_store_dwordx4 v[46:47], v[0:3], off sc0 sc1
+; VGPRRC-NEXT: global_store_dwordx4 v[38:39], v[0:3], off sc0 sc1
; VGPRRC-NEXT: s_waitcnt vmcnt(0)
; VGPRRC-NEXT: s_nop 0
; VGPRRC-NEXT: v_mov_b32_e32 v0, s12
; VGPRRC-NEXT: v_mov_b32_e32 v1, s13
; VGPRRC-NEXT: v_mov_b32_e32 v2, s14
; VGPRRC-NEXT: v_mov_b32_e32 v3, s15
-; VGPRRC-NEXT: global_store_dwordx4 v[44:45], v[0:3], off sc0 sc1
+; VGPRRC-NEXT: global_store_dwordx4 v[36:37], v[0:3], off sc0 sc1
; VGPRRC-NEXT: s_waitcnt vmcnt(0)
; VGPRRC-NEXT: s_endpgm
; AGPR-LABEL: test_mfma_f32_32x32x16_f16__flags:
@@ -1489,13 +1485,13 @@ define amdgpu_kernel void @test_mfma_f32_32x32x16_f16__vgprcd(<8 x half> %arg0,
; SDAG-NEXT: s_load_dwordx8 s[24:31], s[4:5], 0x24
; SDAG-NEXT: s_load_dwordx16 s[8:23], s[4:5], 0x64
; SDAG-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0xa4
-; SDAG-NEXT: v_mov_b32_e32 v44, 0
+; SDAG-NEXT: v_mov_b32_e32 v36, 0
; SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; SDAG-NEXT: v_mov_b64_e32 v[34:35], s[26:27]
-; SDAG-NEXT: v_mov_b64_e32 v[32:33], s[24:25]
-; SDAG-NEXT: v_mov_b64_e32 v[38:39], s[30:31]
+; SDAG-NEXT: v_mov_b64_e32 v[40:41], s[26:27]
+; SDAG-NEXT: v_mov_b64_e32 v[38:39], s[24:25]
+; SDAG-NEXT: v_mov_b64_e32 v[44:45], s[30:31]
; SDAG-NEXT: v_mov_b64_e32 v[30:31], s[22:23]
-; SDAG-NEXT: v_mov_b64_e32 v[36:37], s[28:29]
+; SDAG-NEXT: v_mov_b64_e32 v[42:43], s[28:29]
; SDAG-NEXT: v_mov_b64_e32 v[28:29], s[20:21]
; SDAG-NEXT: v_mov_b64_e32 v[26:27], s[18:19]
; SDAG-NEXT: v_mov_b64_e32 v[24:25], s[16:17]
@@ -1503,41 +1499,41 @@ define amdgpu_kernel void @test_mfma_f32_32x32x16_f16__vgprcd(<8 x half> %arg0,
; SDAG-NEXT: v_mov_b64_e32 v[20:21], s[12:13]
; SDAG-NEXT: v_mov_b64_e32 v[18:19], s[10:11]
; SDAG-NEXT: v_mov_b64_e32 v[16:17], s[8:9]
-; SDAG-NEXT: v_mov_b32_e32 v40, s20
-; SDAG-NEXT: v_mov_b32_e32 v41, s21
-; SDAG-NEXT: v_mfma_f32_32x32x16_f16 v[0:15], v[32:35], v[36:39], v[16:31]
-; SDAG-NEXT: v_mov_b32_e32 v42, s22
-; SDAG-NEXT: v_mov_b32_e32 v43, s23
-; SDAG-NEXT: global_store_dwordx4 v44, v[40:43], s[0:1] offset:48 sc0 sc1
+; SDAG-NEXT: v_mov_b32_e32 v32, s20
+; SDAG-NEXT: v_mov_b32_e32 v33, s21
+; SDAG-NEXT: v_mfma_f32_32x32x16_f16 v[0:15], v[38:41], v[42:45], v[16:31]
+; SDAG-NEXT: v_mov_b32_e32 v34, s22
+; SDAG-NEXT: v_mov_b32_e32 v35, s23
+; SDAG-NEXT: global_store_dwordx4 v36, v[32:35], s[0:1] offset:48 sc0 sc1
; SDAG-NEXT: s_waitcnt vmcnt(0)
; SDAG-NEXT: s_nop 2
; SDAG-NEXT: v_mov_b32_e32 v16, s16
; SDAG-NEXT: v_mov_b32_e32 v17, s17
; SDAG-NEXT: v_mov_b32_e32 v18, s18
; SDAG-NEXT: v_mov_b32_e32 v19, s19
-; SDAG-NEXT: global_store_dwordx4 v44, v[16:19], s[0:1] offset:32 sc0 sc1
+; SDAG-NEXT: global_store_dwordx4 v36, v[16:19], s[0:1] offset:32 sc0 sc1
; SDAG-NEXT: s_waitcnt vmcnt(0)
; SDAG-NEXT: s_nop 0
; SDAG-NEXT: v_mov_b32_e32 v16, s12
; SDAG-NEXT: v_mov_b32_e32 v17, s13
; SDAG-NEXT: v_mov_b32_e32 v18, s14
; SDAG-NEXT: v_mov_b32_e32 v19, s15
-; SDAG-NEXT: global_store_dwordx4 v44, v[16:19], s[0:1] offset:16 sc0 sc1
+; SDAG-NEXT: global_store_dwordx4 v36, v[16:19], s[0:1] offset:16 sc0 sc1
; SDAG-NEXT: s_waitcnt vmcnt(0)
; SDAG-NEXT: s_nop 0
; SDAG-NEXT: v_mov_b32_e32 v16, s8
; SDAG-NEXT: v_mov_b32_e32 v17, s9
; SDAG-NEXT: v_mov_b32_e32 v18, s10
; SDAG-NEXT: v_mov_b32_e32 v19, s11
-; SDAG-NEXT: global_store_dwordx4 v44, v[16:19], s[0:1] sc0 sc1
+; SDAG-NEXT: global_store_dwordx4 v36, v[16:19], s[0:1] sc0 sc1
; SDAG-NEXT: s_waitcnt vmcnt(0)
-; SDAG-NEXT: global_store_dwordx4 v44, v[8:11], s[0:1] offset:32 sc0 sc1
+; SDAG-NEXT: global_store_dwordx4 v36, v[8:11], s[0:1] offset:32 sc0 sc1
; SDAG-NEXT: s_waitcnt vmcnt(0)
-; SDAG-NEXT: global_store_dwordx4 v44, v[12:15], s[0:1] offset:48 sc0 sc1
+; SDAG-NEXT: global_store_dwordx4 v36, v[12:15], s[0:1] offset:48 sc0 sc1
; SDAG-NEXT: s_waitcnt vmcnt(0)
-; SDAG-NEXT: global_store_dwordx4 v44, v[0:3], s[0:1] sc0 sc1
+; SDAG-NEXT: global_store_dwordx4 v36, v[0:3], s[0:1] sc0 sc1
; SDAG-NEXT: s_waitcnt vmcnt(0)
-; SDAG-NEXT: global_store_dwordx4 v44, v[4:7], s[0:1] offset:16 sc0 sc1
+; SDAG-NEXT: global_store_dwordx4 v36, v[4:7], s[0:1] offset:16 sc0 sc1
; SDAG-NEXT: s_waitcnt vmcnt(0)
; SDAG-NEXT: s_endpgm
;
@@ -1592,13 +1588,13 @@ define amdgpu_kernel void @test_mfma_f32_32x32x16_f16__vgprcd(<8 x half> %arg0,
; HEURRC-NEXT: s_load_dwordx8 s[24:31], s[4:5], 0x24
; HEURRC-NEXT: s_load_dwordx16 s[8:23], s[4:5], 0x64
; HEURRC-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0xa4
-; HEURRC-NEXT: v_mov_b32_e32 v44, 0
+; HEURRC-NEXT: v_mov_b32_e32 v36, 0
; HEURRC-NEXT: s_waitcnt lgkmcnt(0)
-; HEURRC-NEXT: v_mov_b64_e32 v[34:35], s[26:27]
-; HEURRC-NEXT: v_mov_b64_e32 v[32:33], s[24:25]
-; HEURRC-NEXT: v_mov_b64_e32 v[38:39], s[30:31]
+; HEURRC-NEXT: v_mov_b64_e32 v[40:41], s[26:27]
+; HEURRC-NEXT: v_mov_b64_e32 v[38:39], s[24:25]
+; HEURRC-NEXT: v_mov_b64_e32 v[44:45], s[30:31]
; HEURRC-NEXT: v_mov_b64_e32 v[30:31], s[22:23]
-; HEURRC-NEXT: v_mov_b64_e32 v[36:37], s[28:29]
+; HEURRC-NEXT: v_mov_b64_e32 v[42:43], s[28:29]
; HEURRC-NEXT: v_mov_b64_e32 v[28:29], s[20:21]
; HEURRC-NEXT: v_mov_b64_e32 v[26:27], s[18:19]
; HEURRC-NEXT: v_mov_b64_e32 v[24:25], s[16:17]
@@ -1606,41 +1602,41 @@ define amdgpu_kernel void @test_mfma_f32_32x32x16_f16__vgprcd(<8 x half> %arg0,
; HEURRC-NEXT: v_mov_b64_e32 v[20:21], s[12:13]
; HEURRC-NEXT: v_mov_b64_e32 v[18:19], s[10:11]
; HEURRC-NEXT: v_mov_b64_e32 v[16:17], s[8:9]
-; HEURRC-NEXT: v_mov_b32_e32 v40, s20
-; HEURRC-NEXT: v_mov_b32_e32 v41, s21
-; HEURRC-NEXT: v_mfma_f32_32x32x16_f16 v[0:15], v[32:35], v[36:39], v[16:31]
-; HEURRC-NEXT: v_mov_b32_e32 v42, s22
-; HEURRC-NEXT: v_mov_b32_e32 v43, s23
-; HEURRC-NEXT: global_store_dwordx4 v44, v[40:43], s[0:1] offset:48 sc0 sc1
+; HEURRC-NEXT: v_mov_b32_e32 v32, s20
+; HEURRC-NEXT: v_mov_b32_e32 v33, s21
+; HEURRC-NEXT: v_mfma_f32_32x32x16_f16 v[0:15], v[38:41], v[42:45], v[16:31]
+; HEURRC-NEXT: v_mov_b32_e32 v34, s22
+; HEURRC-NEXT: v_mov_b32_e32 v35, s23
+; HEURRC-NEXT: global_store_dwordx4 v36, v[32:35], s[0:1] offset:48 sc0 sc1
; HEURRC-NEXT: s_waitcnt vmcnt(0)
; HEURRC-NEXT: s_nop 2
; HEURRC-NEXT: v_mov_b32_e32 v16, s16
; HEURRC-NEXT: v_mov_b32_e32 v17, s17
; HEURRC-NEXT: v_mov_b32_e32 v18, s18
; HEURRC-NEXT: v_mov_b32_e32 v19, s19
-; HEURRC-NEXT: global_store_dwordx4 v44, v[16:19], s[0:1] offset:32 sc0 sc1
+; HEURRC-NEXT: global_store_dwordx4 v36, v[16:19], s[0:1] offset:32 sc0 sc1
; HEURRC-NEXT: s_waitcnt vmcnt(0)
; HEURRC-NEXT: s_nop 0
; HEURRC-NEXT: v_mov_b32_e32 v16, s12
; HEURRC-NEXT: v_mov_b32_e32 v17, s13
; HEURRC-NEXT: v_mov_b32_e32 v18, s14
; HEURRC-NEXT: v_mov_b32_e32 v19, s15
-; HEURRC-NEXT: global_store_dwordx4 v44, v[16:19], s[0:1] offset:16 sc0 sc1
+; HEURRC-NEXT: global_store_dwordx4 v36, v[16:19], s[0:1] offset:16 sc0 sc1
; HEURRC-NEXT: s_waitcnt vmcnt(0)
; HEURRC-NEXT: s_nop 0
; HEURRC-NEXT: v_mov_b32_e32 v16, s8
; HEURRC-NEXT: v_mov_b32_e32 v17, s9
; HEURRC-NEXT: v_mov_b32_e32 v18, s10
; HEURRC-NEXT: v_mov_b32_e32 v19, s11
-; HEURRC-NEXT: global_store_dwordx4 v44, v[16:19], s[0:1] sc0 sc1
+; HEURRC-NEXT: global_store_dwordx4 v36, v[16:19], s[0:1] sc0 sc1
; HEURRC-NEXT: s_waitcnt vmcnt(0)
-; HEURRC-NEXT: global_store_dwordx4 v44, v[8:11], s[0:1] offset:32 sc0 sc1
+; HEURRC-NEXT: global_store_dwordx4 v36, v[8:11], s[0:1] offset:32 sc0 sc1
; HEURRC-NEXT: s_waitcnt vmcnt(0)
-; HEURRC-NEXT: global_store_dwordx4 v44, v[12:15], s[0:1] offset:48 sc0 sc1
+; HEURRC-NEXT: global_store_dwordx4 v36, v[12:15], s[0:1] offset:48 sc0 sc1
; HEURRC-NEXT: s_waitcnt vmcnt(0)
-; HEURRC-NEXT: global_store_dwordx4 v44, v[0:3], s[0:1] sc0 sc1
+; HEURRC-NEXT: global_store_dwordx4 v36, v[0:3], s[0:1] sc0 sc1
; HEURRC-NEXT: s_waitcnt vmcnt(0)
-; HEURRC-NEXT: global_store_dwordx4 v44, v[4:7], s[0:1] offset:16 sc0 sc1
+; HEURRC-NEXT: global_store_dwordx4 v36, v[4:7], s[0:1] offset:16 sc0 sc1
; HEURRC-NEXT: s_waitcnt vmcnt(0)
; HEURRC-NEXT: s_endpgm
;
@@ -1649,13 +1645,13 @@ define amdgpu_kernel void @test_mfma_f32_32x32x16_f16__vgprcd(<8 x half> %arg0,
; VGPRRC-NEXT: s_load_dwordx8 s[24:31], s[4:5], 0x24
; VGPRRC-NEXT: s_load_dwordx16 s[8:23], s[4:5], 0x64
; VGPRRC-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0xa4
-; VGPRRC-NEXT: v_mov_b32_e32 v44, 0
+; VGPRRC-NEXT: v_mov_b32_e32 v36, 0
; VGPRRC-NEXT: s_waitcnt lgkmcnt(0)
-; VGPRRC-NEXT: v_mov_b64_e32 v[34:35], s[26:27]
-; VGPRRC-NEXT: v_mov_b64_e32 v[32:33], s[24:25]
-; VGPRRC-NEXT: v_mov_b64_e32 v[38:39], s[30:31]
+; VGPRRC-NEXT: v_mov_b64_e32 v[40:41], s[26:27]
+; VGPRRC-NEXT: v_mov_b64_e32 v[38:39], s[24:25]
+; VGPRRC-NEXT: v_mov_b64_e32 v[44:45], s[30:31]
; VGPRRC-NEXT: v_mov_b64_e32 v[30:31], s[22:23]
-; VGPRRC-NEXT: v_mov_b64_e32 v[36:37], s[28:29]
+; VGPRRC-NEXT: v_mov_b64_e32 v[42:43], s[28:29]
; VGPRRC-NEXT: v_mov_b64_e32 v[28:29], s[20:21]
; VGPRRC-NEXT: v_mov_b64_e32 v[26:27], s[18:19]
; VGPRRC-NEXT: v_mov_b64_e32 v[24:25], s[16:17]
@@ -1663,41 +1659,41 @@ define amdgpu_kernel void @test_mfma_f32_32x32x16_f16__vgprcd(<8 x half> %arg0,
; VGPRRC-NEXT: v_mov_b64_e32 v[20:21], s[12:13]
; VGPRRC-NEXT: v_mov_b64_e32 v[18:19], s[10:11]
; VGPRRC-NEXT: v_mov_b64_e32 v[16:17], s[8:9]
-; VGPRRC-NEXT: v_mov_b32_e32 v40, s20
-; VGPRRC-NEXT: v_mov_b32_e32 v41, s21
-; VGPRRC-NEXT: v_mfma_f32_32x32x16_f16 v[0:15], v[32:35], v[36:39], v[16:31]
-; VGPRRC-NEXT: v_mov_b32_e32 v42, s22
-; VGPRRC-NEXT: v_mov_b32_e32 v43, s23
-; VGPRRC-NEXT: global_store_dwordx4 v44, v[40:43], s[0:1] offset:48 sc0 sc1
+; VGPRRC-NEXT: v_mov_b32_e32 v32, s20
+; VGPRRC-NEXT: v_mov_b32_e32 v33, s21
+; VGPRRC-NEXT: v_mfma_f32_32x32x16_f16 v[0:15], v[38:41], v[42:45], v[16:31]
+; VGPRRC-NEXT: v_mov_b32_e32 v34, s22
+; VGPRRC-NEXT: v_mov_b32_e32 v35, s23
+; VGPRRC-NEXT: global_store_dwordx4 v36, v[32:35], s[0:1] offset:48 sc0 sc1
; VGPRRC-NEXT: s_waitcnt vmcnt(0)
; VGPRRC-NEXT: s_nop 2
; VGPRRC-NEXT: v_mov_b32_e32 v16, s16
; VGPRRC-NEXT: v_mov_b32_e32 v17, s17
; VGPRRC-NEXT: v_mov_b32_e32 v18, s18
; VGPRRC-NEXT: v_mov_b32_e32 v19, s19
-; VGPRRC-NEXT: global_store_dwordx4 v44, v[16:19], s[0:1] offset:32 sc0 sc1
+; VGPRRC-NEXT: global_store_dwordx4 v36, v[16:19], s[0:1] offset:32 sc0 sc1
; VGPRRC-NEXT: s_waitcnt vmcnt(0)
; VGPRRC-NEXT: s_nop 0
; VGPRRC-NEXT: v_mov_b32_e32 v16, s12
; VGPRRC-NEXT: v_mov_b32_e32 v17, s13
; VGPRRC-NEXT: v_mov_b32_e32 v18, s14
; VGPRRC-NEXT: v_mov_b32_e32 v19, s15
-; VGPRRC-NEXT: global_store_dwordx4 v44, v[16:19], s[0:1] offset:16 sc0 sc1
+; VGPRRC-NEXT: global_store_dwordx4 v36, v[16:19], s[0:1] offset:16 sc0 sc1
; VGPRRC-NEXT: s_waitcnt vmcnt(0)
; VGPRRC-NEXT: s_nop 0
; VGPRRC-NEXT: v_mov_b32_e32 v16, s8
; VGPRRC-NEXT: v_mov_b32_e32 v17, s9
; VGPRRC-NEXT: v_mov_b32_e32 v18, s10
; VGPRRC-NEXT: v_mov_b32_e32 v19, s11
-; VGPRRC-NEXT: global_store_dwordx4 v44, v[16:19], s[0:1] sc0 sc1
+; VGPRRC-NEXT: global_store_dwordx4 v36, v[16:19], s[0:1] sc0 sc1
; VGPRRC-NEXT: s_waitcnt vmcnt(0)
-; VGPRRC-NEXT: global_store_dwordx4 v44, v[8:11], s[0:1] offset:32 sc0 sc1
+; VGPRRC-NEXT: global_store_dwordx4 v36, v[8:11], s[0:1] offset:32 sc0 sc1
; VGPRRC-NEXT: s_waitcnt vmcnt(0)
-; VGPRRC-NEXT: global_store_dwordx4 v44, v[12:15], s[0:1] offset:48 sc0 sc1
+; VGPRRC-NEXT: global_store_dwordx4 v36, v[12:15], s[0:1] offset:48 sc0 sc1
; VGPRRC-NEXT: s_waitcnt vmcnt(0)
-; VGPRRC-NEXT: global_store_dwordx4 v44, v[0:3], s[0:1] sc0 sc1
+; VGPRRC-NEXT: global_store_dwordx4 v36, v[0:3], s[0:1] sc0 sc1
; VGPRRC-NEXT: s_waitcnt vmcnt(0)
-; VGPRRC-NEXT: global_store_dwordx4 v44, v[4:7], s[0:1] offset:16 sc0 sc1
+; VGPRRC-NEXT: global_store_dwordx4 v36, v[4:7], s[0:1] offset:16 sc0 sc1
; VGPRRC-NEXT: s_waitcnt vmcnt(0)
; VGPRRC-NEXT: s_endpgm
; AGPR-LABEL: test_mfma_f32_32x32x16_f16__vgprcd:
@@ -1831,13 +1827,13 @@ define amdgpu_kernel void @test_mfma_f32_32x32x16_f16__vgprcd__flags(<8 x half>
; SDAG-NEXT: s_load_dwordx8 s[24:31], s[4:5], 0x24
; SDAG-NEXT: s_load_dwordx16 s[8:23], s[4:5], 0x64
; SDAG-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0xa4
-; SDAG-NEXT: v_mov_b32_e32 v44, 0
+; SDAG-NEXT: v_mov_b32_e32 v36, 0
; SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; SDAG-NEXT: v_mov_b64_e32 v[34:35], s[26:27]
-; SDAG-NEXT: v_mov_b64_e32 v[32:33], s[24:25]
-; SDAG-NEXT: v_mov_b64_e32 v[38:39], s[30:31]
+; SDAG-NEXT: v_mov_b64_e32 v[40:41], s[26:27]
+; SDAG-NEXT: v_mov_b64_e32 v[38:39], s[24:25]
+; SDAG-NEXT: v_mov_b64_e32 v[44:45], s[30:31]
; SDAG-NEXT: v_mov_b64_e32 v[30:31], s[22:23]
-; SDAG-NEXT: v_mov_b64_e32 v[36:37], s[28:29]
+; SDAG-NEXT: v_mov_b64_e32 v[42:43], s[28:29]
; SDAG-NEXT: v_mov_b64_e32 v[28:29], s[20:21]
; SDAG-NEXT: v_mov_b64_e32 v[26:27], s[18:19]
; SDAG-NEXT: v_mov_b64_e32 v[24:25], s[16:17]
@@ -1845,41 +1841,41 @@ define amdgpu_kernel void @test_mfma_f32_32x32x16_f16__vgprcd__flags(<8 x half>
; SDAG-NEXT: v_mov_b64_e32 v[20:21], s[12:13]
; SDAG-NEXT: v_mov_b64_e32 v[18:19], s[10:11]
; SDAG-NEXT: v_mov_b64_e32 v[16:17], s[8:9]
-; SDAG-NEXT: v_mov_b32_e32 v40, s20
-; SDAG-NEXT: v_mov_b32_e32 v41, s21
-; SDAG-NEXT: v_mfma_f32_32x32x16_f16 v[0:15], v[32:35], v[36:39], v[16:31] cbsz:1 abid:2 blgp:3
-; SDAG-NEXT: v_mov_b32_e32 v42, s22
-; SDAG-NEXT: v_mov_b32_e32 v43, s23
-; SDAG-NEXT: global_store_dwordx4 v44, v[40:43], s[0:1] offset:48 sc0 sc1
+; SDAG-NEXT: v_mov_b32_e32 v32, s20
+; SDAG-NEXT: v_mov_b32_e32 v33, s21
+; SDAG-NEXT: v_mfma_f32_32x32x16_f16 v[0:15], v[38:41], v[42:45], v[16:31] cbsz:1 abid:2 blgp:3
+; SDAG-NEXT: v_mov_b32_e32 v34, s22
+; SDAG-NEXT: v_mov_b32_e32 v35, s23
+; SDAG-NEXT: global_store_dwordx4 v36, v[32:35], s[0:1] offset:48 sc0 sc1
; SDAG-NEXT: s_waitcnt vmcnt(0)
; SDAG-NEXT: s_nop 2
; SDAG-NEXT: v_mov_b32_e32 v16, s16
; SDAG-NEXT: v_mov_b32_e32 v17, s17
; SDAG-NEXT: v_mov_b32_e32 v18, s18
; SDAG-NEXT: v_mov_b32_e32 v19, s19
-; SDAG-NEXT: global_store_dwordx4 v44, v[16:19], s[0:1] offset:32 sc0 sc1
+; SDAG-NEXT: global_store_dwordx4 v36, v[16:19], s[0:1] offset:32 sc0 sc1
; SDAG-NEXT: s_waitcnt vmcnt(0)
; SDAG-NEXT: s_nop 0
; SDAG-NEXT: v_mov_b32_e32 v16, s12
; SDAG-NEXT: v_mov_b32_e32 v17, s13
; SDAG-NEXT: v_mov_b32_e32 v18, s14
; SDAG-NEXT: v_mov_b32_e32 v19, s15
-; SDAG-NEXT: global_store_dwordx4 v44, v[16:19], s[0:1] offset:16 sc0 sc1
+; SDAG-NEXT: global_store_dwordx4 v36, v[16:19], s[0:1] offset:16 sc0 sc1
; SDAG-NEXT: s_waitcnt vmcnt(0)
; SDAG-NEXT: s_nop 0
; SDAG-NEXT: v_mov_b32_e32 v16, s8
; SDAG-NEXT: v_mov_b32_e32 v17, s9
; SDAG-NEXT: v_mov_b32_e32 v18, s10
; SDAG-NEXT: v_mov_b32_e32 v19, s11
-; SDAG-NEXT: global_store_dwordx4 v44, v[16:19], s[0:1] sc0 sc1
+; SDAG-NEXT: global_store_dwordx4 v36, v[16:19], s[0:1] sc0 sc1
; SDAG-NEXT: s_waitcnt vmcnt(0)
-; SDAG-NEXT: global_store_dwordx4 v44, v[8:11], s[0:1] offset:32 sc0 sc1
+; SDAG-NEXT: global_store_dwordx4 v36, v[8:11], s[0:1] offset:32 sc0 sc1
; SDAG-NEXT: s_waitcnt vmcnt(0)
-; SDAG-NEXT: global_store_dwordx4 v44, v[12:15], s[0:1] offset:48 sc0 sc1
+; SDAG-NEXT: global_store_dwordx4 v36, v[12:15], s[0:1] offset:48 sc0 sc1
; SDAG-NEXT: s_waitcnt vmcnt(0)
-; SDAG-NEXT: global_store_dwordx4 v44, v[0:3], s[0:1] sc0 sc1
+; SDAG-NEXT: global_store_dwordx4 v36, v[0:3], s[0:1] sc0 sc1
; SDAG-NEXT: s_waitcnt vmcnt(0)
-; SDAG-NEXT: global_store_dwordx4 v44, v[4:7], s[0:1] offset:16 sc0 sc1
+; SDAG-NEXT: global_store_dwordx4 v36, v[4:7], s[0:1] offset:16 sc0 sc1
; SDAG-NEXT: s_waitcnt vmcnt(0)
; SDAG-NEXT: s_endpgm
;
@@ -1934,13 +1930,13 @@ define amdgpu_kernel void @test_mfma_f32_32x32x16_f16__vgprcd__flags(<8 x half>
; HEURRC-NEXT: s_load_dwordx8 s[24:31], s[4:5], 0x24
; HEURRC-NEXT: s_load_dwordx16 s[8:23], s[4:5], 0x64
; HEURRC-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0xa4
-; HEURRC-NEXT: v_mov_b32_e32 v44, 0
+; HEURRC-NEXT: v_mov_b32_e32 v36, 0
; HEURRC-NEXT: s_waitcnt lgkmcnt(0)
-; HEURRC-NEXT: v_mov_b64_e32 v[34:35], s[26:27]
-; HEURRC-NEXT: v_mov_b64_e32 v[32:33], s[24:25]
-; HEURRC-NEXT: v_mov_b64_e32 v[38:39], s[30:31]
+; HEURRC-NEXT: v_mov_b64_e32 v[40:41], s[26:27]
+; HEURRC-NEXT: v_mov_b64_e32 v[38:39], s[24:25]
+; HEURRC-NEXT: v_mov_b64_e32 v[44:45], s[30:31]
; HEURRC-NEXT: v_mov_b64_e32 v[30:31], s[22:23]
-; HEURRC-NEXT: v_mov_b64_e32 v[36:37], s[28:29]
+; HEURRC-NEXT: v_mov_b64_e32 v[42:43], s[28:29]
; HEURRC-NEXT: v_mov_b64_e32 v[28:29], s[20:21]
; HEURRC-NEXT: v_mov_b64_e32 v[26:27], s[18:19]
; HEURRC-NEXT: v_mov_b64_e32 v[24:25], s[16:17]
@@ -1948,41 +1944,41 @@ define amdgpu_kernel void @test_mfma_f32_32x32x16_f16__vgprcd__flags(<8 x half>
; HEURRC-NEXT: v_mov_b64_e32 v[20:21], s[12:13]
; HEURRC-NEXT: v_mov_b64_e32 v[18:19], s[10:11]
; HEURRC-NEXT: v_mov_b64_e32 v[16:17], s[8:9]
-; HEURRC-NEXT: v_mov_b32_e32 v40, s20
-; HEURRC-NEXT: v_mov_b32_e32 v41, s21
-; HEURRC-NEXT: v_mfma_f32_32x32x16_f16 v[0:15], v[32:35], v[36:39], v[16:31] cbsz:1 abid:2 blgp:3
-; HEURRC-NEXT: v_mov_b32_e32 v42, s22
-; HEURRC-NEXT: v_mov_b32_e32 v43, s23
-; HEURRC-NEXT: global_store_dwordx4 v44, v[40:43], s[0:1] offset:48 sc0 sc1
+; HEURRC-NEXT: v_mov_b32_e32 v32, s20
+; HEURRC-NEXT: v_mov_b32_e32 v33, s21
+; HEURRC-NEXT: v_mfma_f32_32x32x16_f16 v[0:15], v[38:41], v[42:45], v[16:31] cbsz:1 abid:2 blgp:3
+; HEURRC-NEXT: v_mov_b32_e32 v34, s22
+; HEURRC-NEXT: v_mov_b32_e32 v35, s23
+; HEURRC-NEXT: global_store_dwordx4 v36, v[32:35], s[0:1] offset:48 sc0 sc1
; HEURRC-NEXT: s_waitcnt vmcnt(0)
; HEURRC-NEXT: s_nop 2
; HEURRC-NEXT: v_mov_b32_e32 v16, s16
; HEURRC-NEXT: v_mov_b32_e32 v17, s17
; HEURRC-NEXT: v_mov_b32_e32 v18, s18
; HEURRC-NEXT: v_mov_b32_e32 v19, s19
-; HEURRC-NEXT: global_store_dwordx4 v44, v[16:19], s[0:1] offset:32 sc0 sc1
+; HEURRC-NEXT: global_store_dwordx4 v36, v[16:19], s[0:1] offset:32 sc0 sc1
; HEURRC-NEXT: s_waitcnt vmcnt(0)
; HEURRC-NEXT: s_nop 0
; HEURRC-NEXT: v_mov_b32_e32 v16, s12
; HEURRC-NEXT: v_mov_b32_e32 v17, s13
; HEURRC-NEXT: v_mov_b32_e32 v18, s14
; HEURRC-NEXT: v_mov_b32_e32 v19, s15
-; HEURRC-NEXT: global_store_dwordx4 v44, v[16:19], s[0:1] offset:16 sc0 sc1
+; HEURRC-NEXT: global_store_dwordx4 v36, v[16:19], s[0:1] offset:16 sc0 sc1
; HEURRC-NEXT: s_waitcnt vmcnt(0)
; HEURRC-NEXT: s_nop 0
; HEURRC-NEXT: v_mov_b32_e32 v16, s8
; HEURRC-NEXT: v_mov_b32_e32 v17, s9
; HEURRC-NEXT: v_mov_b32_e32 v18, s10
; HEURRC-NEXT: v_mov_b32_e32 v19, s11
-; HEURRC-NEXT: global_store_dwordx4 v44, v[16:19], s[0:1] sc0 sc1
+; HEURRC-NEXT: global_store_dwordx4 v36, v[16:19], s[0:1] sc0 sc1
; HEURRC-NEXT: s_waitcnt vmcnt(0)
-; HEURRC-NEXT: global_store_dwordx4 v44, v[8:11], s[0:1] offset:32 sc0 sc1
+; HEURRC-NEXT: global_store_dwordx4 v36, v[8:11], s[0:1] offset:32 sc0 sc1
; HEURRC-NEXT: s_waitcnt vmcnt(0)
-; HEURRC-NEXT: global_store_dwordx4 v44, v[12:15], s[0:1] offset:48 sc0 sc1
+; HEURRC-NEXT: global_store_dwordx4 v36, v[12:15], s[0:1] offset:48 sc0 sc1
; HEURRC-NEXT: s_waitcnt vmcnt(0)
-; HEURRC-NEXT: global_store_dwordx4 v44, v[0:3], s[0:1] sc0 sc1
+; HEURRC-NEXT: global_store_dwordx4 v36, v[0:3], s[0:1] sc0 sc1
; HEURRC-NEXT: s_waitcnt vmcnt(0)
-; HEURRC-NEXT: global_store_dwordx4 v44, v[4:7], s[0:1] offset:16 sc0 sc1
+; HEURRC-NEXT: global_store_dwordx4 v36, v[4:7], s[0:1] offset:16 sc0 sc1
; HEURRC-NEXT: s_waitcnt vmcnt(0)
; HEURRC-NEXT: s_endpgm
;
@@ -1991,13 +1987,13 @@ define amdgpu_kernel void @test_mfma_f32_32x32x16_f16__vgprcd__flags(<8 x half>
; VGPRRC-NEXT: s_load_dwordx8 s[24:31], s[4:5], 0x24
; VGPRRC-NEXT: s_load_dwordx16 s[8:23], s[4:5], 0x64
; VGPRRC-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0xa4
-; VGPRRC-NEXT: v_mov_b32_e32 v44, 0
+; VGPRRC-NEXT: v_mov_b32_e32 v36, 0
; VGPRRC-NEXT: s_waitcnt lgkmcnt(0)
-; VGPRRC-NEXT: v_mov_b64_e32 v[34:35], s[26:27]
-; VGPRRC-NEXT: v_mov_b64_e32 v[32:33], s[24:25]
-; VGPRRC-NEXT: v_mov_b64_e32 v[38:39], s[30:31]
+; VGPRRC-NEXT: v_mov_b64_e32 v[40:41], s[26:27]
+; VGPRRC-NEXT: v_mov_b64_e32 v[38:39], s[24:25]
+; VGPRRC-NEXT: v_mov_b64_e32 v[44:45], s[30:31]
; VGPRRC-NEXT: v_mov_b64_e32 v[30:31], s[22:23]
-; VGPRRC-NEXT: v_mov_b64_e32 v[36:37], s[28:29]
+; VGPRRC-NEXT: v_mov_b64_e32 v[42:43], s[28:29]
; VGPRRC-NEXT: v_mov_b64_e32 v[28:29], s[20:21]
; VGPRRC-NEXT: v_mov_b64_e32 v[26:27], s[18:19]
; VGPRRC-NEXT: v_mov_b64_e32 v[24:25], s[16:17]
@@ -2005,41 +2001,41 @@ define amdgpu_kernel void @test_mfma_f32_32x32x16_f16__vgprcd__flags(<8 x half>
; VGPRRC-NEXT: v_mov_b64_e32 v[20:21], s[12:13]
; VGPRRC-NEXT: v_mov_b64_e32 v[18:19], s[10:11]
; VGPRRC-NEXT: v_mov_b64_e32 v[16:17], s[8:9]
-; VGPRRC-NEXT: v_mov_b32_e32 v40, s20
-; VGPRRC-NEXT: v_mov_b32_e32 v41, s21
-; VGPRRC-NEXT: v_mfma_f32_32x32x16_f16 v[0:15], v[32:35], v[36:39], v[16:31] cbsz:1 abid:2 blgp:3
-; VGPRRC-NEXT: v_mov_b32_e32 v42, s22
-; VGPRRC-NEXT: v_mov_b32_e32 v43, s23
-; VGPRRC-NEXT: global_store_dwordx4 v44, v[40:43], s[0:1] offset:48 sc0 sc1
+; VGPRRC-NEXT: v_mov_b32_e32 v32, s20
+; VGPRRC-NEXT: v_mov_b32_e32 v33, s21
+; VGPRRC-NEXT: v_mfma_f32_32x32x16_f16 v[0:15], v[38:41], v[42:45], v[16:31] cbsz:1 abid:2 blgp:3
+; VGPRRC-NEXT: v_mov_b32_e32 v34, s22
+; VGPRRC-NEXT: v_mov_b32_e32 v35, s23
+; VGPRRC-NEXT: global_store_dwordx4 v36, v[32:35], s[0:1] offset:48 sc0 sc1
; VGPRRC-NEXT: s_waitcnt vmcnt(0)
; VGPRRC-NEXT: s_nop 2
; VGPRRC-NEXT: v_mov_b32_e32 v16, s16
; VGPRRC-NEXT: v_mov_b32_e32 v17, s17
; VGPRRC-NEXT: v_mov_b32_e32 v18, s18
; VGPRRC-NEXT: v_mov_b32_e32 v19, s19
-; VGPRRC-NEXT: global_store_dwordx4 v44, v[16:19], s[0:1] offset:32 sc0 sc1
+; VGPRRC-NEXT: global_store_dwordx4 v36, v[16:19], s[0:1] offset:32 sc0 sc1
; VGPRRC-NEXT: s_waitcnt vmcnt(0)
; VGPRRC-NEXT: s_nop 0
; VGPRRC-NEXT: v_mov_b32_e32 v16, s12
; VGPRRC-NEXT: v_mov_b32_e32 v17, s13
; VGPRRC-NEXT: v_mov_b32_e32 v18, s14
; VGPRRC-NEXT: v_mov_b32_e32 v19, s15
-; VGPRRC-NEXT: global_store_dwordx4 v44, v[16:19], s[0:1] offset:16 sc0 sc1
+; VGPRRC-NEXT: global_store_dwordx4 v36, v[16:19], s[0:1] offset:16 sc0 sc1
; VGPRRC-NEXT: s_waitcnt vmcnt(0)
; VGPRRC-NEXT: s_nop 0
; VGPRRC-NEXT: v_mov_b32_e32 v16, s8
; VGPRRC-NEXT: v_mov_b32_e32 v17, s9
; VGPRRC-NEXT: v_mov_b32_e32 v18, s10
; VGPRRC-NEXT: v_mov_b32_e32 v19, s11
-; VGPRRC-NEXT: global_store_dwordx4 v44, v[16:19], s[0:1] sc0 sc1
+; VGPRRC-NEXT: global_store_dwordx4 v36, v[16:19], s[0:1] sc0 sc1
; VGPRRC-NEXT: s_waitcnt vmcnt(0)
-; VGPRRC-NEXT: global_store_dwordx4 v44, v[8:11], s[0:1] offset:32 sc0 sc1
+; VGPRRC-NEXT: global_store_dwordx4 v36, v[8:11], s[0:1] offset:32 sc0 sc1
; VGPRRC-NEXT: s_waitcnt vmcnt(0)
-; VGPRRC-NEXT: global_store_dwordx4 v44, v[12:15], s[0:1] offset:48 sc0 sc1
+; VGPRRC-NEXT: global_store_dwordx4 v36, v[12:15], s[0:1] offset:48 sc0 sc1
; VGPRRC-NEXT: s_waitcnt vmcnt(0)
-; VGPRRC-NEXT: global_store_dwordx4 v44, v[0:3], s[0:1] sc0 sc1
+; VGPRRC-NEXT: global_store_dwordx4 v36, v[0:3], s[0:1] sc0 sc1
; VGPRRC-NEXT: s_waitcnt vmcnt(0)
-; VGPRRC-NEXT: global_store_dwordx4 v44, v[4:7], s[0:1] offset:16 sc0 sc1
+; VGPRRC-NEXT: global_store_dwordx4 v36, v[4:7], s[0:1] offset:16 sc0 sc1
; VGPRRC-NEXT: s_waitcnt vmcnt(0)
; VGPRRC-NEXT: s_endpgm
; AGPR-LABEL: test_mfma_f32_32x32x16_f16__vgprcd__flags:
@@ -5425,18 +5421,18 @@ define amdgpu_kernel void @test_mfma_f32_16x16x32_bf16_no_agpr__vgprcd(ptr addrs
; GCN-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x34
; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x54
; GCN-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x24
-; GCN-NEXT: v_mov_b32_e32 v12, 0
+; GCN-NEXT: v_mov_b32_e32 v4, 0
; GCN-NEXT: s_waitcnt lgkmcnt(0)
-; GCN-NEXT: v_mov_b64_e32 v[0:1], s[8:9]
-; GCN-NEXT: v_mov_b64_e32 v[2:3], s[10:11]
-; GCN-NEXT: v_mov_b64_e32 v[4:5], s[12:13]
-; GCN-NEXT: v_mov_b64_e32 v[10:11], s[2:3]
-; GCN-NEXT: v_mov_b64_e32 v[6:7], s[14:15]
-; GCN-NEXT: v_mov_b64_e32 v[8:9], s[0:1]
+; GCN-NEXT: v_mov_b64_e32 v[6:7], s[8:9]
+; GCN-NEXT: v_mov_b64_e32 v[8:9], s[10:11]
+; GCN-NEXT: v_mov_b64_e32 v[10:11], s[12:13]
+; GCN-NEXT: v_mov_b64_e32 v[0:1], s[0:1]
+; GCN-NEXT: v_mov_b64_e32 v[12:13], s[14:15]
+; GCN-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
; GCN-NEXT: s_nop 1
-; GCN-NEXT: v_mfma_f32_16x16x32_bf16 v[0:3], v[0:3], v[4:7], v[8:11]
+; GCN-NEXT: v_mfma_f32_16x16x32_bf16 v[0:3], v[6:9], v[10:13], v[0:3]
; GCN-NEXT: s_nop 7
-; GCN-NEXT: global_store_dwordx4 v12, v[0:3], s[6:7]
+; GCN-NEXT: global_store_dwordx4 v4, v[0:3], s[6:7]
; GCN-NEXT: s_endpgm
;
; HEURRC-LABEL: test_mfma_f32_16x16x32_bf16_no_agpr__vgprcd:
@@ -5444,18 +5440,18 @@ define amdgpu_kernel void @test_mfma_f32_16x16x32_bf16_no_agpr__vgprcd(ptr addrs
; HEURRC-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x34
; HEURRC-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x54
; HEURRC-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x24
-; HEURRC-NEXT: v_mov_b32_e32 v12, 0
+; HEURRC-NEXT: v_mov_b32_e32 v4, 0
; HEURRC-NEXT: s_waitcnt lgkmcnt(0)
-; HEURRC-NEXT: v_mov_b64_e32 v[0:1], s[8:9]
-; HEURRC-NEXT: v_mov_b64_e32 v[2:3], s[10:11]
-; HEURRC-NEXT: v_mov_b64_e32 v[4:5], s[12:13]
-; HEURRC-NEXT: v_mov_b64_e32 v[10:11], s[2:3]
-; HEURRC-NEXT: v_mov_b64_e32 v[6:7], s[14:15]
-; HEURRC-NEXT: v_mov_b64_e32 v[8:9], s[0:1]
+; HEURRC-NEXT: v_mov_b64_e32 v[6:7], s[8:9]
+; HEURRC-NEXT: v_mov_b64_e32 v[8:9], s[10:11]
+; HEURRC-NEXT: v_mov_b64_e32 v[10:11], s[12:13]
+; HEURRC-NEXT: v_mov_b64_e32 v[0:1], s[0:1]
+; HEURRC-NEXT: v_mov_b64_e32 v[12:13], s[14:15]
+; HEURRC-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
; HEURRC-NEXT: s_nop 1
-; HEURRC-NEXT: v_mfma_f32_16x16x32_bf16 v[0:3], v[0:3], v[4:7], v[8:11]
+; HEURRC-NEXT: v_mfma_f32_16x16x32_bf16 v[0:3], v[6:9], v[10:13], v[0:3]
; HEURRC-NEXT: s_nop 7
-; HEURRC-NEXT: global_store_dwordx4 v12, v[0:3], s[6:7]
+; HEURRC-NEXT: global_store_dwordx4 v4, v[0:3], s[6:7]
; HEURRC-NEXT: s_endpgm
;
; VGPRRC-LABEL: test_mfma_f32_16x16x32_bf16_no_agpr__vgprcd:
@@ -5463,18 +5459,18 @@ define amdgpu_kernel void @test_mfma_f32_16x16x32_bf16_no_agpr__vgprcd(ptr addrs
; VGPRRC-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x34
; VGPRRC-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x54
; VGPRRC-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x24
-; VGPRRC-NEXT: v_mov_b32_e32 v12, 0
+; VGPRRC-NEXT: v_mov_b32_e32 v4, 0
; VGPRRC-NEXT: s_waitcnt lgkmcnt(0)
-; VGPRRC-NEXT: v_mov_b64_e32 v[0:1], s[8:9]
-; VGPRRC-NEXT: v_mov_b64_e32 v[2:3], s[10:11]
-; VGPRRC-NEXT: v_mov_b64_e32 v[4:5], s[12:13]
-; VGPRRC-NEXT: v_mov_b64_e32 v[10:11], s[2:3]
-; VGPRRC-NEXT: v_mov_b64_e32 v[6:7], s[14:15]
-; VGPRRC-NEXT: v_mov_b64_e32 v[8:9], s[0:1]
+; VGPRRC-NEXT: v_mov_b64_e32 v[6:7], s[8:9]
+; VGPRRC-NEXT: v_mov_b64_e32 v[8:9], s[10:11]
+; VGPRRC-NEXT: v_mov_b64_e32 v[10:11], s[12:13]
+; VGPRRC-NEXT: v_mov_b64_e32 v[0:1], s[0:1]
+; VGPRRC-NEXT: v_mov_b64_e32 v[12:13], s[14:15]
+; VGPRRC-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
; VGPRRC-NEXT: s_nop 1
-; VGPRRC-NEXT: v_mfma_f32_16x16x32_bf16 v[0:3], v[0:3], v[4:7], v[8:11]
+; VGPRRC-NEXT: v_mfma_f32_16x16x32_bf16 v[0:3], v[6:9], v[10:13], v[0:3]
; VGPRRC-NEXT: s_nop 7
-; VGPRRC-NEXT: global_store_dwordx4 v12, v[0:3], s[6:7]
+; VGPRRC-NEXT: global_store_dwordx4 v4, v[0:3], s[6:7]
; VGPRRC-NEXT: s_endpgm
; AGPR-LABEL: test_mfma_f32_16x16x32_bf16_no_agpr__vgprcd:
; AGPR: ; %bb.0:
@@ -5525,18 +5521,18 @@ define amdgpu_kernel void @test_mfma_f32_16x16x32_bf16_no_agpr__vgprcd__flags(pt
; GCN-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x34
; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x54
; GCN-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x24
-; GCN-NEXT: v_mov_b32_e32 v12, 0
+; GCN-NEXT: v_mov_b32_e32 v4, 0
; GCN-NEXT: s_waitcnt lgkmcnt(0)
-; GCN-NEXT: v_mov_b64_e32 v[0:1], s[8:9]
-; GCN-NEXT: v_mov_b64_e32 v[2:3], s[10:11]
-; GCN-NEXT: v_mov_b64_e32 v[4:5], s[12:13]
-; GCN-NEXT: v_mov_b64_e32 v[10:11], s[2:3]
-; GCN-NEXT: v_mov_b64_e32 v[6:7], s[14:15]
-; GCN-NEXT: v_mov_b64_e32 v[8:9], s[0:1]
+; GCN-NEXT: v_mov_b64_e32 v[6:7], s[8:9]
+; GCN-NEXT: v_mov_b64_e32 v[8:9], s[10:11]
+; GCN-NEXT: v_mov_b64_e32 v[10:11], s[12:13]
+; GCN-NEXT: v_mov_b64_e32 v[0:1], s[0:1]
+; GCN-NEXT: v_mov_b64_e32 v[12:13], s[14:15]
+; GCN-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
; GCN-NEXT: s_nop 1
-; GCN-NEXT: v_mfma_f32_16x16x32_bf16 v[0:3], v[0:3], v[4:7], v[8:11] cbsz:3 abid:2 blgp:1
+; GCN-NEXT: v_mfma_f32_16x16x32_bf16 v[0:3], v[6:9], v[10:13], v[0:3] cbsz:3 abid:2 blgp:1
; GCN-NEXT: s_nop 7
-; GCN-NEXT: global_store_dwordx4 v12, v[0:3], s[6:7]
+; GCN-NEXT: global_store_dwordx4 v4, v[0:3], s[6:7]
; GCN-NEXT: s_endpgm
;
; HEURRC-LABEL: test_mfma_f32_16x16x32_bf16_no_agpr__vgprcd__flags:
@@ -5544,18 +5540,18 @@ define amdgpu_kernel void @test_mfma_f32_16x16x32_bf16_no_agpr__vgprcd__flags(pt
; HEURRC-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x34
; HEURRC-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x54
; HEURRC-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x24
-; HEURRC-NEXT: v_mov_b32_e32 v12, 0
+; HEURRC-NEXT: v_mov_b32_e32 v4, 0
; HEURRC-NEXT: s_waitcnt lgkmcnt(0)
-; HEURRC-NEXT: v_mov_b64_e32 v[0:1], s[8:9]
-; HEURRC-NEXT: v_mov_b64_e32 v[2:3], s[10:11]
-; HEURRC-NEXT: v_mov_b64_e32 v[4:5], s[12:13]
-; HEURRC-NEXT: v_mov_b64_e32 v[10:11], s[2:3]
-; HEURRC-NEXT: v_mov_b64_e32 v[6:7], s[14:15]
-; HEURRC-NEXT: v_mov_b64_e32 v[8:9], s[0:1]
+; HEURRC-NEXT: v_mov_b64_e32 v[6:7], s[8:9]
+; HEURRC-NEXT: v_mov_b64_e32 v[8:9], s[10:11]
+; HEURRC-NEXT: v_mov_b64_e32 v[10:11], s[12:13]
+; HEURRC-NEXT: v_mov_b64_e32 v[0:1], s[0:1]
+; HEURRC-NEXT: v_mov_b64_e32 v[12:13], s[14:15]
+; HEURRC-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
; HEURRC-NEXT: s_nop 1
-; HEURRC-NEXT: v_mfma_f32_16x16x32_bf16 v[0:3], v[0:3], v[4:7], v[8:11] cbsz:3 abid:2 blgp:1
+; HEURRC-NEXT: v_mfma_f32_16x16x32_bf16 v[0:3], v[6:9], v[10:13], v[0:3] cbsz:3 abid:2 blgp:1
; HEURRC-NEXT: s_nop 7
-; HEURRC-NEXT: global_store_dwordx4 v12, v[0:3], s[6:7]
+; HEURRC-NEXT: global_store_dwordx4 v4, v[0:3], s[6:7]
; HEURRC-NEXT: s_endpgm
;
; VGPRRC-LABEL: test_mfma_f32_16x16x32_bf16_no_agpr__vgprcd__flags:
@@ -5563,18 +5559,18 @@ define amdgpu_kernel void @test_mfma_f32_16x16x32_bf16_no_agpr__vgprcd__flags(pt
; VGPRRC-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x34
; VGPRRC-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x54
; VGPRRC-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x24
-; VGPRRC-NEXT: v_mov_b32_e32 v12, 0
+; VGPRRC-NEXT: v_mov_b32_e32 v4, 0
; VGPRRC-NEXT: s_waitcnt lgkmcnt(0)
-; VGPRRC-NEXT: v_mov_b64_e32 v[0:1], s[8:9]
-; VGPRRC-NEXT: v_mov_b64_e32 v[2:3], s[10:11]
-; VGPRRC-NEXT: v_mov_b64_e32 v[4:5], s[12:13]
-; VGPRRC-NEXT: v_mov_b64_e32 v[10:11], s[2:3]
-; VGPRRC-NEXT: v_mov_b64_e32 v[6:7], s[14:15]
-; VGPRRC-NEXT: v_mov_b64_e32 v[8:9], s[0:1]
+; VGPRRC-NEXT: v_mov_b64_e32 v[6:7], s[8:9]
+; VGPRRC-NEXT: v_mov_b64_e32 v[8:9], s[10:11]
+; VGPRRC-NEXT: v_mov_b64_e32 v[10:11], s[12:13]
+; VGPRRC-NEXT: v_mov_b64_e32 v[0:1], s[0:1]
+; VGPRRC-NEXT: v_mov_b64_e32 v[12:13], s[14:15]
+; VGPRRC-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
; VGPRRC-NEXT: s_nop 1
-; VGPRRC-NEXT: v_mfma_f32_16x16x32_bf16 v[0:3], v[0:3], v[4:7], v[8:11] cbsz:3 abid:2 blgp:1
+; VGPRRC-NEXT: v_mfma_f32_16x16x32_bf16 v[0:3], v[6:9], v[10:13], v[0:3] cbsz:3 abid:2 blgp:1
; VGPRRC-NEXT: s_nop 7
-; VGPRRC-NEXT: global_store_dwordx4 v12, v[0:3], s[6:7]
+; VGPRRC-NEXT: global_store_dwordx4 v4, v[0:3], s[6:7]
; VGPRRC-NEXT: s_endpgm
; AGPR-LABEL: test_mfma_f32_16x16x32_bf16_no_agpr__vgprcd__flags:
; AGPR: ; %bb.0:
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.smfmac.gfx950.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.smfmac.gfx950.ll
index 6eb9449..ee11b92 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.smfmac.gfx950.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.smfmac.gfx950.ll
@@ -17,24 +17,24 @@ define amdgpu_kernel void @test_smfmac_f32_16x16x64_f16__vgpr(ptr addrspace(1) %
; SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x34
; SDAG-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; SDAG-NEXT: v_lshlrev_b32_e32 v0, 4, v0
-; SDAG-NEXT: v_mov_b32_e32 v16, 0
+; SDAG-NEXT: v_mov_b32_e32 v4, 0
; SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; SDAG-NEXT: global_load_dwordx4 v[8:11], v0, s[6:7]
+; SDAG-NEXT: global_load_dwordx4 v[0:3], v0, s[6:7]
; SDAG-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x44
; SDAG-NEXT: s_load_dword s16, s[4:5], 0x64
-; SDAG-NEXT: v_mov_b64_e32 v[14:15], s[2:3]
-; SDAG-NEXT: v_mov_b64_e32 v[12:13], s[0:1]
+; SDAG-NEXT: v_mov_b64_e32 v[16:17], s[2:3]
+; SDAG-NEXT: v_mov_b64_e32 v[14:15], s[0:1]
; SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; SDAG-NEXT: v_mov_b64_e32 v[0:1], s[8:9]
-; SDAG-NEXT: v_mov_b64_e32 v[2:3], s[10:11]
-; SDAG-NEXT: v_mov_b64_e32 v[4:5], s[12:13]
-; SDAG-NEXT: v_mov_b64_e32 v[6:7], s[14:15]
-; SDAG-NEXT: v_mov_b32_e32 v17, s16
+; SDAG-NEXT: v_mov_b64_e32 v[6:7], s[8:9]
+; SDAG-NEXT: v_mov_b64_e32 v[8:9], s[10:11]
+; SDAG-NEXT: v_mov_b64_e32 v[10:11], s[12:13]
+; SDAG-NEXT: v_mov_b64_e32 v[12:13], s[14:15]
+; SDAG-NEXT: v_mov_b32_e32 v5, s16
; SDAG-NEXT: s_waitcnt vmcnt(0)
; SDAG-NEXT: s_nop 0
-; SDAG-NEXT: v_smfmac_f32_16x16x64_f16 v[8:11], v[12:15], v[0:7], v17 cbsz:1 abid:2
+; SDAG-NEXT: v_smfmac_f32_16x16x64_f16 v[0:3], v[14:17], v[6:13], v5 cbsz:1 abid:2
; SDAG-NEXT: s_nop 7
-; SDAG-NEXT: global_store_dwordx4 v16, v[8:11], s[6:7]
+; SDAG-NEXT: global_store_dwordx4 v4, v[0:3], s[6:7]
; SDAG-NEXT: s_endpgm
;
; GISEL-LABEL: test_smfmac_f32_16x16x64_f16__vgpr:
@@ -120,30 +120,25 @@ define <4 x float> @test_smfmac_f32_16x16x64_f16__sgpr(<8 x half> inreg %arg0, <
; SDAG-LABEL: test_smfmac_f32_16x16x64_f16__sgpr:
; SDAG: ; %bb.0:
; SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SDAG-NEXT: v_mov_b32_e32 v10, s0
-; SDAG-NEXT: v_mov_b32_e32 v11, s1
-; SDAG-NEXT: v_mov_b32_e32 v12, s2
-; SDAG-NEXT: v_mov_b32_e32 v13, s3
-; SDAG-NEXT: v_mov_b32_e32 v2, s16
-; SDAG-NEXT: v_mov_b32_e32 v3, s17
-; SDAG-NEXT: v_mov_b32_e32 v4, s18
-; SDAG-NEXT: v_mov_b32_e32 v5, s19
-; SDAG-NEXT: v_mov_b32_e32 v6, s20
-; SDAG-NEXT: v_mov_b32_e32 v7, s21
-; SDAG-NEXT: v_mov_b32_e32 v8, s22
-; SDAG-NEXT: v_mov_b32_e32 v9, s23
-; SDAG-NEXT: v_accvgpr_write_b32 a0, s24
-; SDAG-NEXT: v_accvgpr_write_b32 a1, s25
-; SDAG-NEXT: v_accvgpr_write_b32 a2, s26
-; SDAG-NEXT: v_accvgpr_write_b32 a3, s27
-; SDAG-NEXT: v_mov_b32_e32 v0, s28
+; SDAG-NEXT: v_mov_b32_e32 v14, s0
+; SDAG-NEXT: v_mov_b32_e32 v15, s1
+; SDAG-NEXT: v_mov_b32_e32 v16, s2
+; SDAG-NEXT: v_mov_b32_e32 v17, s3
+; SDAG-NEXT: v_mov_b32_e32 v6, s16
+; SDAG-NEXT: v_mov_b32_e32 v7, s17
+; SDAG-NEXT: v_mov_b32_e32 v8, s18
+; SDAG-NEXT: v_mov_b32_e32 v9, s19
+; SDAG-NEXT: v_mov_b32_e32 v10, s20
+; SDAG-NEXT: v_mov_b32_e32 v11, s21
+; SDAG-NEXT: v_mov_b32_e32 v12, s22
+; SDAG-NEXT: v_mov_b32_e32 v13, s23
+; SDAG-NEXT: v_mov_b32_e32 v0, s24
+; SDAG-NEXT: v_mov_b32_e32 v1, s25
+; SDAG-NEXT: v_mov_b32_e32 v2, s26
+; SDAG-NEXT: v_mov_b32_e32 v3, s27
+; SDAG-NEXT: v_mov_b32_e32 v4, s28
; SDAG-NEXT: s_nop 1
-; SDAG-NEXT: v_smfmac_f32_16x16x64_f16 a[0:3], v[10:13], v[2:9], v0
-; SDAG-NEXT: s_nop 7
-; SDAG-NEXT: v_accvgpr_read_b32 v0, a0
-; SDAG-NEXT: v_accvgpr_read_b32 v1, a1
-; SDAG-NEXT: v_accvgpr_read_b32 v2, a2
-; SDAG-NEXT: v_accvgpr_read_b32 v3, a3
+; SDAG-NEXT: v_smfmac_f32_16x16x64_f16 v[0:3], v[14:17], v[6:13], v4
; SDAG-NEXT: s_setpc_b64 s[30:31]
;
; GISEL-LABEL: test_smfmac_f32_16x16x64_f16__sgpr:
@@ -187,17 +182,17 @@ define amdgpu_kernel void @test_smfmac_f32_32x32x32_f16__vgpr(ptr addrspace(1) %
; SDAG-NEXT: global_load_dwordx4 v[0:3], v16, s[6:7]
; SDAG-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x44
; SDAG-NEXT: s_load_dword s16, s[4:5], 0x64
-; SDAG-NEXT: v_mov_b64_e32 v[26:27], s[2:3]
-; SDAG-NEXT: v_mov_b64_e32 v[24:25], s[0:1]
+; SDAG-NEXT: v_mov_b64_e32 v[28:29], s[2:3]
+; SDAG-NEXT: v_mov_b64_e32 v[26:27], s[0:1]
; SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; SDAG-NEXT: v_mov_b64_e32 v[22:23], s[14:15]
-; SDAG-NEXT: v_mov_b64_e32 v[20:21], s[12:13]
-; SDAG-NEXT: v_mov_b64_e32 v[18:19], s[10:11]
-; SDAG-NEXT: v_mov_b64_e32 v[16:17], s[8:9]
-; SDAG-NEXT: v_mov_b32_e32 v28, s16
+; SDAG-NEXT: v_mov_b64_e32 v[24:25], s[14:15]
+; SDAG-NEXT: v_mov_b64_e32 v[22:23], s[12:13]
+; SDAG-NEXT: v_mov_b64_e32 v[20:21], s[10:11]
+; SDAG-NEXT: v_mov_b64_e32 v[18:19], s[8:9]
+; SDAG-NEXT: v_mov_b32_e32 v16, s16
; SDAG-NEXT: s_waitcnt vmcnt(0)
; SDAG-NEXT: s_nop 0
-; SDAG-NEXT: v_smfmac_f32_32x32x32_f16 v[0:15], v[24:27], v[16:23], v28 cbsz:1 abid:2
+; SDAG-NEXT: v_smfmac_f32_32x32x32_f16 v[0:15], v[26:29], v[18:25], v16 cbsz:1 abid:2
; SDAG-NEXT: v_mov_b32_e32 v16, 0
; SDAG-NEXT: s_nop 10
; SDAG-NEXT: global_store_dwordx4 v16, v[8:11], s[6:7] offset:32
@@ -436,53 +431,37 @@ define <16 x float> @test_smfmac_f32_32x32x32_f16__sgpr(<8 x half> inreg %arg0,
; SDAG-LABEL: test_smfmac_f32_32x32x32_f16__sgpr:
; SDAG: ; %bb.0:
; SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SDAG-NEXT: v_mov_b32_e32 v36, s0
-; SDAG-NEXT: v_mov_b32_e32 v37, s1
-; SDAG-NEXT: v_mov_b32_e32 v38, s2
-; SDAG-NEXT: v_mov_b32_e32 v39, s3
-; SDAG-NEXT: v_mov_b32_e32 v13, s25
-; SDAG-NEXT: v_mov_b32_e32 v14, s26
-; SDAG-NEXT: v_mov_b32_e32 v15, s27
-; SDAG-NEXT: v_mov_b32_e32 v16, s28
-; SDAG-NEXT: v_mov_b32_e32 v17, s29
-; SDAG-NEXT: v_mov_b32_e32 v28, s16
-; SDAG-NEXT: v_mov_b32_e32 v29, s17
-; SDAG-NEXT: v_mov_b32_e32 v30, s18
-; SDAG-NEXT: v_mov_b32_e32 v31, s19
-; SDAG-NEXT: v_mov_b32_e32 v32, s20
-; SDAG-NEXT: v_mov_b32_e32 v33, s21
-; SDAG-NEXT: v_mov_b32_e32 v34, s22
-; SDAG-NEXT: v_mov_b32_e32 v35, s23
-; SDAG-NEXT: v_mov_b32_e32 v12, s24
-; SDAG-NEXT: v_mov_b32_e32 v18, v0
-; SDAG-NEXT: v_mov_b32_e32 v19, v1
-; SDAG-NEXT: v_mov_b32_e32 v20, v2
-; SDAG-NEXT: v_mov_b32_e32 v21, v3
-; SDAG-NEXT: v_mov_b32_e32 v22, v4
-; SDAG-NEXT: v_mov_b32_e32 v23, v5
-; SDAG-NEXT: v_mov_b32_e32 v24, v6
-; SDAG-NEXT: v_mov_b32_e32 v25, v7
-; SDAG-NEXT: v_mov_b32_e32 v26, v8
-; SDAG-NEXT: v_mov_b32_e32 v27, v9
+; SDAG-NEXT: v_mov_b32_e32 v26, s0
+; SDAG-NEXT: v_mov_b32_e32 v27, s1
+; SDAG-NEXT: v_mov_b32_e32 v28, s2
+; SDAG-NEXT: v_mov_b32_e32 v29, s3
+; SDAG-NEXT: v_mov_b32_e32 v16, v10
+; SDAG-NEXT: v_mov_b32_e32 v15, v9
+; SDAG-NEXT: v_mov_b32_e32 v14, v8
+; SDAG-NEXT: v_mov_b32_e32 v13, v7
+; SDAG-NEXT: v_mov_b32_e32 v12, v6
+; SDAG-NEXT: v_mov_b32_e32 v11, v5
+; SDAG-NEXT: v_mov_b32_e32 v10, v4
+; SDAG-NEXT: v_mov_b32_e32 v9, v3
+; SDAG-NEXT: v_mov_b32_e32 v8, v2
+; SDAG-NEXT: v_mov_b32_e32 v7, v1
+; SDAG-NEXT: v_mov_b32_e32 v6, v0
+; SDAG-NEXT: v_mov_b32_e32 v0, s24
+; SDAG-NEXT: v_mov_b32_e32 v1, s25
+; SDAG-NEXT: v_mov_b32_e32 v2, s26
+; SDAG-NEXT: v_mov_b32_e32 v3, s27
+; SDAG-NEXT: v_mov_b32_e32 v4, s28
+; SDAG-NEXT: v_mov_b32_e32 v5, s29
+; SDAG-NEXT: v_mov_b32_e32 v18, s16
+; SDAG-NEXT: v_mov_b32_e32 v19, s17
+; SDAG-NEXT: v_mov_b32_e32 v20, s18
+; SDAG-NEXT: v_mov_b32_e32 v21, s19
+; SDAG-NEXT: v_mov_b32_e32 v22, s20
+; SDAG-NEXT: v_mov_b32_e32 v23, s21
+; SDAG-NEXT: v_mov_b32_e32 v24, s22
+; SDAG-NEXT: v_mov_b32_e32 v25, s23
; SDAG-NEXT: s_nop 1
-; SDAG-NEXT: v_smfmac_f32_32x32x32_f16 v[12:27], v[36:39], v[28:35], v10
-; SDAG-NEXT: s_nop 11
-; SDAG-NEXT: v_mov_b32_e32 v0, v12
-; SDAG-NEXT: v_mov_b32_e32 v1, v13
-; SDAG-NEXT: v_mov_b32_e32 v2, v14
-; SDAG-NEXT: v_mov_b32_e32 v3, v15
-; SDAG-NEXT: v_mov_b32_e32 v4, v16
-; SDAG-NEXT: v_mov_b32_e32 v5, v17
-; SDAG-NEXT: v_mov_b32_e32 v6, v18
-; SDAG-NEXT: v_mov_b32_e32 v7, v19
-; SDAG-NEXT: v_mov_b32_e32 v8, v20
-; SDAG-NEXT: v_mov_b32_e32 v9, v21
-; SDAG-NEXT: v_mov_b32_e32 v10, v22
-; SDAG-NEXT: v_mov_b32_e32 v11, v23
-; SDAG-NEXT: v_mov_b32_e32 v12, v24
-; SDAG-NEXT: v_mov_b32_e32 v13, v25
-; SDAG-NEXT: v_mov_b32_e32 v14, v26
-; SDAG-NEXT: v_mov_b32_e32 v15, v27
+; SDAG-NEXT: v_smfmac_f32_32x32x32_f16 v[0:15], v[26:29], v[18:25], v16
; SDAG-NEXT: s_setpc_b64 s[30:31]
;
; GISEL-LABEL: test_smfmac_f32_32x32x32_f16__sgpr:
@@ -541,24 +520,24 @@ define amdgpu_kernel void @test_smfmac_f32_16x16x64_bf16__vgpr(ptr addrspace(1)
; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x34
; GCN-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; GCN-NEXT: v_lshlrev_b32_e32 v0, 4, v0
-; GCN-NEXT: v_mov_b32_e32 v16, 0
+; GCN-NEXT: v_mov_b32_e32 v4, 0
; GCN-NEXT: s_waitcnt lgkmcnt(0)
-; GCN-NEXT: global_load_dwordx4 v[8:11], v0, s[6:7]
+; GCN-NEXT: global_load_dwordx4 v[0:3], v0, s[6:7]
; GCN-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x44
; GCN-NEXT: s_load_dword s16, s[4:5], 0x64
-; GCN-NEXT: v_mov_b64_e32 v[14:15], s[2:3]
-; GCN-NEXT: v_mov_b64_e32 v[12:13], s[0:1]
+; GCN-NEXT: v_mov_b64_e32 v[16:17], s[2:3]
+; GCN-NEXT: v_mov_b64_e32 v[14:15], s[0:1]
; GCN-NEXT: s_waitcnt lgkmcnt(0)
-; GCN-NEXT: v_mov_b64_e32 v[0:1], s[8:9]
-; GCN-NEXT: v_mov_b64_e32 v[2:3], s[10:11]
-; GCN-NEXT: v_mov_b64_e32 v[4:5], s[12:13]
-; GCN-NEXT: v_mov_b64_e32 v[6:7], s[14:15]
-; GCN-NEXT: v_mov_b32_e32 v17, s16
+; GCN-NEXT: v_mov_b64_e32 v[6:7], s[8:9]
+; GCN-NEXT: v_mov_b64_e32 v[8:9], s[10:11]
+; GCN-NEXT: v_mov_b64_e32 v[10:11], s[12:13]
+; GCN-NEXT: v_mov_b64_e32 v[12:13], s[14:15]
+; GCN-NEXT: v_mov_b32_e32 v5, s16
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: s_nop 0
-; GCN-NEXT: v_smfmac_f32_16x16x64_bf16 v[8:11], v[12:15], v[0:7], v17 cbsz:1 abid:2
+; GCN-NEXT: v_smfmac_f32_16x16x64_bf16 v[0:3], v[14:17], v[6:13], v5 cbsz:1 abid:2
; GCN-NEXT: s_nop 7
-; GCN-NEXT: global_store_dwordx4 v16, v[8:11], s[6:7]
+; GCN-NEXT: global_store_dwordx4 v4, v[0:3], s[6:7]
; GCN-NEXT: s_endpgm
bb:
%id = call i32 @llvm.amdgcn.workitem.id.x()
@@ -618,30 +597,25 @@ define <4 x float> @test_smfmac_f32_16x16x64_bf16__sgpr(<8 x bfloat> inreg %arg0
; GCN-LABEL: test_smfmac_f32_16x16x64_bf16__sgpr:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_mov_b32_e32 v10, s0
-; GCN-NEXT: v_mov_b32_e32 v11, s1
-; GCN-NEXT: v_mov_b32_e32 v12, s2
-; GCN-NEXT: v_mov_b32_e32 v13, s3
-; GCN-NEXT: v_mov_b32_e32 v2, s16
-; GCN-NEXT: v_mov_b32_e32 v3, s17
-; GCN-NEXT: v_mov_b32_e32 v4, s18
-; GCN-NEXT: v_mov_b32_e32 v5, s19
-; GCN-NEXT: v_mov_b32_e32 v6, s20
-; GCN-NEXT: v_mov_b32_e32 v7, s21
-; GCN-NEXT: v_mov_b32_e32 v8, s22
-; GCN-NEXT: v_mov_b32_e32 v9, s23
-; GCN-NEXT: v_accvgpr_write_b32 a0, s24
-; GCN-NEXT: v_accvgpr_write_b32 a1, s25
-; GCN-NEXT: v_accvgpr_write_b32 a2, s26
-; GCN-NEXT: v_accvgpr_write_b32 a3, s27
-; GCN-NEXT: v_mov_b32_e32 v0, s28
+; GCN-NEXT: v_mov_b32_e32 v14, s0
+; GCN-NEXT: v_mov_b32_e32 v15, s1
+; GCN-NEXT: v_mov_b32_e32 v16, s2
+; GCN-NEXT: v_mov_b32_e32 v17, s3
+; GCN-NEXT: v_mov_b32_e32 v6, s16
+; GCN-NEXT: v_mov_b32_e32 v7, s17
+; GCN-NEXT: v_mov_b32_e32 v8, s18
+; GCN-NEXT: v_mov_b32_e32 v9, s19
+; GCN-NEXT: v_mov_b32_e32 v10, s20
+; GCN-NEXT: v_mov_b32_e32 v11, s21
+; GCN-NEXT: v_mov_b32_e32 v12, s22
+; GCN-NEXT: v_mov_b32_e32 v13, s23
+; GCN-NEXT: v_mov_b32_e32 v0, s24
+; GCN-NEXT: v_mov_b32_e32 v1, s25
+; GCN-NEXT: v_mov_b32_e32 v2, s26
+; GCN-NEXT: v_mov_b32_e32 v3, s27
+; GCN-NEXT: v_mov_b32_e32 v4, s28
; GCN-NEXT: s_nop 1
-; GCN-NEXT: v_smfmac_f32_16x16x64_bf16 a[0:3], v[10:13], v[2:9], v0
-; GCN-NEXT: s_nop 7
-; GCN-NEXT: v_accvgpr_read_b32 v0, a0
-; GCN-NEXT: v_accvgpr_read_b32 v1, a1
-; GCN-NEXT: v_accvgpr_read_b32 v2, a2
-; GCN-NEXT: v_accvgpr_read_b32 v3, a3
+; GCN-NEXT: v_smfmac_f32_16x16x64_bf16 v[0:3], v[14:17], v[6:13], v4
; GCN-NEXT: s_setpc_b64 s[30:31]
%result = call <4 x float> @llvm.amdgcn.smfmac.f32.16x16x64.bf16(<8 x bfloat> %arg0, <16 x bfloat> %arg1, <4 x float> %arg2, i32 %arg3, i32 immarg 0, i32 immarg 0)
ret <4 x float> %result
@@ -667,17 +641,17 @@ define amdgpu_kernel void @test_smfmac_f32_32x32x32_bf16__vgpr(ptr addrspace(1)
; GCN-NEXT: global_load_dwordx4 v[0:3], v16, s[6:7]
; GCN-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x44
; GCN-NEXT: s_load_dword s16, s[4:5], 0x64
-; GCN-NEXT: v_mov_b64_e32 v[26:27], s[2:3]
-; GCN-NEXT: v_mov_b64_e32 v[24:25], s[0:1]
+; GCN-NEXT: v_mov_b64_e32 v[28:29], s[2:3]
+; GCN-NEXT: v_mov_b64_e32 v[26:27], s[0:1]
; GCN-NEXT: s_waitcnt lgkmcnt(0)
-; GCN-NEXT: v_mov_b64_e32 v[22:23], s[14:15]
-; GCN-NEXT: v_mov_b64_e32 v[20:21], s[12:13]
-; GCN-NEXT: v_mov_b64_e32 v[18:19], s[10:11]
-; GCN-NEXT: v_mov_b64_e32 v[16:17], s[8:9]
-; GCN-NEXT: v_mov_b32_e32 v28, s16
+; GCN-NEXT: v_mov_b64_e32 v[24:25], s[14:15]
+; GCN-NEXT: v_mov_b64_e32 v[22:23], s[12:13]
+; GCN-NEXT: v_mov_b64_e32 v[20:21], s[10:11]
+; GCN-NEXT: v_mov_b64_e32 v[18:19], s[8:9]
+; GCN-NEXT: v_mov_b32_e32 v16, s16
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: s_nop 0
-; GCN-NEXT: v_smfmac_f32_32x32x32_bf16 v[0:15], v[24:27], v[16:23], v28 cbsz:1 abid:2
+; GCN-NEXT: v_smfmac_f32_32x32x32_bf16 v[0:15], v[26:29], v[18:25], v16 cbsz:1 abid:2
; GCN-NEXT: v_mov_b32_e32 v16, 0
; GCN-NEXT: s_nop 10
; GCN-NEXT: global_store_dwordx4 v16, v[8:11], s[6:7] offset:32
@@ -779,53 +753,37 @@ define <16 x float> @test_smfmac_f32_32x32x32_bf16__sgpr(<8 x bfloat> inreg %arg
; GCN-LABEL: test_smfmac_f32_32x32x32_bf16__sgpr:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_mov_b32_e32 v36, s0
-; GCN-NEXT: v_mov_b32_e32 v37, s1
-; GCN-NEXT: v_mov_b32_e32 v38, s2
-; GCN-NEXT: v_mov_b32_e32 v39, s3
-; GCN-NEXT: v_mov_b32_e32 v13, s25
-; GCN-NEXT: v_mov_b32_e32 v14, s26
-; GCN-NEXT: v_mov_b32_e32 v15, s27
-; GCN-NEXT: v_mov_b32_e32 v16, s28
-; GCN-NEXT: v_mov_b32_e32 v17, s29
-; GCN-NEXT: v_mov_b32_e32 v28, s16
-; GCN-NEXT: v_mov_b32_e32 v29, s17
-; GCN-NEXT: v_mov_b32_e32 v30, s18
-; GCN-NEXT: v_mov_b32_e32 v31, s19
-; GCN-NEXT: v_mov_b32_e32 v32, s20
-; GCN-NEXT: v_mov_b32_e32 v33, s21
-; GCN-NEXT: v_mov_b32_e32 v34, s22
-; GCN-NEXT: v_mov_b32_e32 v35, s23
-; GCN-NEXT: v_mov_b32_e32 v12, s24
-; GCN-NEXT: v_mov_b32_e32 v18, v0
-; GCN-NEXT: v_mov_b32_e32 v19, v1
-; GCN-NEXT: v_mov_b32_e32 v20, v2
-; GCN-NEXT: v_mov_b32_e32 v21, v3
-; GCN-NEXT: v_mov_b32_e32 v22, v4
-; GCN-NEXT: v_mov_b32_e32 v23, v5
-; GCN-NEXT: v_mov_b32_e32 v24, v6
-; GCN-NEXT: v_mov_b32_e32 v25, v7
-; GCN-NEXT: v_mov_b32_e32 v26, v8
-; GCN-NEXT: v_mov_b32_e32 v27, v9
+; GCN-NEXT: v_mov_b32_e32 v26, s0
+; GCN-NEXT: v_mov_b32_e32 v27, s1
+; GCN-NEXT: v_mov_b32_e32 v28, s2
+; GCN-NEXT: v_mov_b32_e32 v29, s3
+; GCN-NEXT: v_mov_b32_e32 v16, v10
+; GCN-NEXT: v_mov_b32_e32 v15, v9
+; GCN-NEXT: v_mov_b32_e32 v14, v8
+; GCN-NEXT: v_mov_b32_e32 v13, v7
+; GCN-NEXT: v_mov_b32_e32 v12, v6
+; GCN-NEXT: v_mov_b32_e32 v11, v5
+; GCN-NEXT: v_mov_b32_e32 v10, v4
+; GCN-NEXT: v_mov_b32_e32 v9, v3
+; GCN-NEXT: v_mov_b32_e32 v8, v2
+; GCN-NEXT: v_mov_b32_e32 v7, v1
+; GCN-NEXT: v_mov_b32_e32 v6, v0
+; GCN-NEXT: v_mov_b32_e32 v0, s24
+; GCN-NEXT: v_mov_b32_e32 v1, s25
+; GCN-NEXT: v_mov_b32_e32 v2, s26
+; GCN-NEXT: v_mov_b32_e32 v3, s27
+; GCN-NEXT: v_mov_b32_e32 v4, s28
+; GCN-NEXT: v_mov_b32_e32 v5, s29
+; GCN-NEXT: v_mov_b32_e32 v18, s16
+; GCN-NEXT: v_mov_b32_e32 v19, s17
+; GCN-NEXT: v_mov_b32_e32 v20, s18
+; GCN-NEXT: v_mov_b32_e32 v21, s19
+; GCN-NEXT: v_mov_b32_e32 v22, s20
+; GCN-NEXT: v_mov_b32_e32 v23, s21
+; GCN-NEXT: v_mov_b32_e32 v24, s22
+; GCN-NEXT: v_mov_b32_e32 v25, s23
; GCN-NEXT: s_nop 1
-; GCN-NEXT: v_smfmac_f32_32x32x32_bf16 v[12:27], v[36:39], v[28:35], v10
-; GCN-NEXT: s_nop 11
-; GCN-NEXT: v_mov_b32_e32 v0, v12
-; GCN-NEXT: v_mov_b32_e32 v1, v13
-; GCN-NEXT: v_mov_b32_e32 v2, v14
-; GCN-NEXT: v_mov_b32_e32 v3, v15
-; GCN-NEXT: v_mov_b32_e32 v4, v16
-; GCN-NEXT: v_mov_b32_e32 v5, v17
-; GCN-NEXT: v_mov_b32_e32 v6, v18
-; GCN-NEXT: v_mov_b32_e32 v7, v19
-; GCN-NEXT: v_mov_b32_e32 v8, v20
-; GCN-NEXT: v_mov_b32_e32 v9, v21
-; GCN-NEXT: v_mov_b32_e32 v10, v22
-; GCN-NEXT: v_mov_b32_e32 v11, v23
-; GCN-NEXT: v_mov_b32_e32 v12, v24
-; GCN-NEXT: v_mov_b32_e32 v13, v25
-; GCN-NEXT: v_mov_b32_e32 v14, v26
-; GCN-NEXT: v_mov_b32_e32 v15, v27
+; GCN-NEXT: v_smfmac_f32_32x32x32_bf16 v[0:15], v[26:29], v[18:25], v16
; GCN-NEXT: s_setpc_b64 s[30:31]
%result = call <16 x float> @llvm.amdgcn.smfmac.f32.32x32x32.bf16(<8 x bfloat> %arg0, <16 x bfloat> %arg1, <16 x float> %arg2, i32 %arg3, i32 immarg 0, i32 immarg 0)
ret <16 x float> %result
@@ -953,30 +911,25 @@ define <4 x i32> @test_smfmac_i32_16x16x128_i8__sgpr(<4 x i32> inreg %arg0, <8 x
; SDAG-LABEL: test_smfmac_i32_16x16x128_i8__sgpr:
; SDAG: ; %bb.0:
; SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SDAG-NEXT: v_mov_b32_e32 v10, s0
-; SDAG-NEXT: v_mov_b32_e32 v11, s1
-; SDAG-NEXT: v_mov_b32_e32 v12, s2
-; SDAG-NEXT: v_mov_b32_e32 v13, s3
-; SDAG-NEXT: v_mov_b32_e32 v2, s16
-; SDAG-NEXT: v_mov_b32_e32 v3, s17
-; SDAG-NEXT: v_mov_b32_e32 v4, s18
-; SDAG-NEXT: v_mov_b32_e32 v5, s19
-; SDAG-NEXT: v_mov_b32_e32 v6, s20
-; SDAG-NEXT: v_mov_b32_e32 v7, s21
-; SDAG-NEXT: v_mov_b32_e32 v8, s22
-; SDAG-NEXT: v_mov_b32_e32 v9, s23
-; SDAG-NEXT: v_accvgpr_write_b32 a0, s24
-; SDAG-NEXT: v_accvgpr_write_b32 a1, s25
-; SDAG-NEXT: v_accvgpr_write_b32 a2, s26
-; SDAG-NEXT: v_accvgpr_write_b32 a3, s27
-; SDAG-NEXT: v_mov_b32_e32 v0, s28
+; SDAG-NEXT: v_mov_b32_e32 v14, s0
+; SDAG-NEXT: v_mov_b32_e32 v15, s1
+; SDAG-NEXT: v_mov_b32_e32 v16, s2
+; SDAG-NEXT: v_mov_b32_e32 v17, s3
+; SDAG-NEXT: v_mov_b32_e32 v6, s16
+; SDAG-NEXT: v_mov_b32_e32 v7, s17
+; SDAG-NEXT: v_mov_b32_e32 v8, s18
+; SDAG-NEXT: v_mov_b32_e32 v9, s19
+; SDAG-NEXT: v_mov_b32_e32 v10, s20
+; SDAG-NEXT: v_mov_b32_e32 v11, s21
+; SDAG-NEXT: v_mov_b32_e32 v12, s22
+; SDAG-NEXT: v_mov_b32_e32 v13, s23
+; SDAG-NEXT: v_mov_b32_e32 v0, s24
+; SDAG-NEXT: v_mov_b32_e32 v1, s25
+; SDAG-NEXT: v_mov_b32_e32 v2, s26
+; SDAG-NEXT: v_mov_b32_e32 v3, s27
+; SDAG-NEXT: v_mov_b32_e32 v4, s28
; SDAG-NEXT: s_nop 1
-; SDAG-NEXT: v_smfmac_i32_16x16x128_i8 a[0:3], v[10:13], v[2:9], v0
-; SDAG-NEXT: s_nop 7
-; SDAG-NEXT: v_accvgpr_read_b32 v0, a0
-; SDAG-NEXT: v_accvgpr_read_b32 v1, a1
-; SDAG-NEXT: v_accvgpr_read_b32 v2, a2
-; SDAG-NEXT: v_accvgpr_read_b32 v3, a3
+; SDAG-NEXT: v_smfmac_i32_16x16x128_i8 v[0:3], v[14:17], v[6:13], v4
; SDAG-NEXT: s_setpc_b64 s[30:31]
;
; GISEL-LABEL: test_smfmac_i32_16x16x128_i8__sgpr:
@@ -1275,53 +1228,37 @@ define <16 x i32> @test_smfmac_i32_32x32x64_i8__sgpr(<4 x i32> inreg %arg0, <8 x
; SDAG-LABEL: test_smfmac_i32_32x32x64_i8__sgpr:
; SDAG: ; %bb.0:
; SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SDAG-NEXT: v_mov_b32_e32 v36, s0
-; SDAG-NEXT: v_mov_b32_e32 v37, s1
-; SDAG-NEXT: v_mov_b32_e32 v38, s2
-; SDAG-NEXT: v_mov_b32_e32 v39, s3
-; SDAG-NEXT: v_mov_b32_e32 v13, s25
-; SDAG-NEXT: v_mov_b32_e32 v14, s26
-; SDAG-NEXT: v_mov_b32_e32 v15, s27
-; SDAG-NEXT: v_mov_b32_e32 v16, s28
-; SDAG-NEXT: v_mov_b32_e32 v17, s29
-; SDAG-NEXT: v_mov_b32_e32 v28, s16
-; SDAG-NEXT: v_mov_b32_e32 v29, s17
-; SDAG-NEXT: v_mov_b32_e32 v30, s18
-; SDAG-NEXT: v_mov_b32_e32 v31, s19
-; SDAG-NEXT: v_mov_b32_e32 v32, s20
-; SDAG-NEXT: v_mov_b32_e32 v33, s21
-; SDAG-NEXT: v_mov_b32_e32 v34, s22
-; SDAG-NEXT: v_mov_b32_e32 v35, s23
-; SDAG-NEXT: v_mov_b32_e32 v12, s24
-; SDAG-NEXT: v_mov_b32_e32 v18, v0
-; SDAG-NEXT: v_mov_b32_e32 v19, v1
-; SDAG-NEXT: v_mov_b32_e32 v20, v2
-; SDAG-NEXT: v_mov_b32_e32 v21, v3
-; SDAG-NEXT: v_mov_b32_e32 v22, v4
-; SDAG-NEXT: v_mov_b32_e32 v23, v5
-; SDAG-NEXT: v_mov_b32_e32 v24, v6
-; SDAG-NEXT: v_mov_b32_e32 v25, v7
-; SDAG-NEXT: v_mov_b32_e32 v26, v8
-; SDAG-NEXT: v_mov_b32_e32 v27, v9
+; SDAG-NEXT: v_mov_b32_e32 v26, s0
+; SDAG-NEXT: v_mov_b32_e32 v27, s1
+; SDAG-NEXT: v_mov_b32_e32 v28, s2
+; SDAG-NEXT: v_mov_b32_e32 v29, s3
+; SDAG-NEXT: v_mov_b32_e32 v16, v10
+; SDAG-NEXT: v_mov_b32_e32 v15, v9
+; SDAG-NEXT: v_mov_b32_e32 v14, v8
+; SDAG-NEXT: v_mov_b32_e32 v13, v7
+; SDAG-NEXT: v_mov_b32_e32 v12, v6
+; SDAG-NEXT: v_mov_b32_e32 v11, v5
+; SDAG-NEXT: v_mov_b32_e32 v10, v4
+; SDAG-NEXT: v_mov_b32_e32 v9, v3
+; SDAG-NEXT: v_mov_b32_e32 v8, v2
+; SDAG-NEXT: v_mov_b32_e32 v7, v1
+; SDAG-NEXT: v_mov_b32_e32 v6, v0
+; SDAG-NEXT: v_mov_b32_e32 v0, s24
+; SDAG-NEXT: v_mov_b32_e32 v1, s25
+; SDAG-NEXT: v_mov_b32_e32 v2, s26
+; SDAG-NEXT: v_mov_b32_e32 v3, s27
+; SDAG-NEXT: v_mov_b32_e32 v4, s28
+; SDAG-NEXT: v_mov_b32_e32 v5, s29
+; SDAG-NEXT: v_mov_b32_e32 v18, s16
+; SDAG-NEXT: v_mov_b32_e32 v19, s17
+; SDAG-NEXT: v_mov_b32_e32 v20, s18
+; SDAG-NEXT: v_mov_b32_e32 v21, s19
+; SDAG-NEXT: v_mov_b32_e32 v22, s20
+; SDAG-NEXT: v_mov_b32_e32 v23, s21
+; SDAG-NEXT: v_mov_b32_e32 v24, s22
+; SDAG-NEXT: v_mov_b32_e32 v25, s23
; SDAG-NEXT: s_nop 1
-; SDAG-NEXT: v_smfmac_i32_32x32x64_i8 v[12:27], v[36:39], v[28:35], v10
-; SDAG-NEXT: s_nop 11
-; SDAG-NEXT: v_mov_b32_e32 v0, v12
-; SDAG-NEXT: v_mov_b32_e32 v1, v13
-; SDAG-NEXT: v_mov_b32_e32 v2, v14
-; SDAG-NEXT: v_mov_b32_e32 v3, v15
-; SDAG-NEXT: v_mov_b32_e32 v4, v16
-; SDAG-NEXT: v_mov_b32_e32 v5, v17
-; SDAG-NEXT: v_mov_b32_e32 v6, v18
-; SDAG-NEXT: v_mov_b32_e32 v7, v19
-; SDAG-NEXT: v_mov_b32_e32 v8, v20
-; SDAG-NEXT: v_mov_b32_e32 v9, v21
-; SDAG-NEXT: v_mov_b32_e32 v10, v22
-; SDAG-NEXT: v_mov_b32_e32 v11, v23
-; SDAG-NEXT: v_mov_b32_e32 v12, v24
-; SDAG-NEXT: v_mov_b32_e32 v13, v25
-; SDAG-NEXT: v_mov_b32_e32 v14, v26
-; SDAG-NEXT: v_mov_b32_e32 v15, v27
+; SDAG-NEXT: v_smfmac_i32_32x32x64_i8 v[0:15], v[26:29], v[18:25], v16
; SDAG-NEXT: s_setpc_b64 s[30:31]
;
; GISEL-LABEL: test_smfmac_i32_32x32x64_i8__sgpr:
@@ -1489,30 +1426,25 @@ define <4 x float> @test_smfmac_f32_16x16x128_bf8_bf8__sgpr(<4 x i32> inreg %arg
; SDAG-LABEL: test_smfmac_f32_16x16x128_bf8_bf8__sgpr:
; SDAG: ; %bb.0:
; SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SDAG-NEXT: v_mov_b32_e32 v10, s0
-; SDAG-NEXT: v_mov_b32_e32 v11, s1
-; SDAG-NEXT: v_mov_b32_e32 v12, s2
-; SDAG-NEXT: v_mov_b32_e32 v13, s3
-; SDAG-NEXT: v_mov_b32_e32 v2, s16
-; SDAG-NEXT: v_mov_b32_e32 v3, s17
-; SDAG-NEXT: v_mov_b32_e32 v4, s18
-; SDAG-NEXT: v_mov_b32_e32 v5, s19
-; SDAG-NEXT: v_mov_b32_e32 v6, s20
-; SDAG-NEXT: v_mov_b32_e32 v7, s21
-; SDAG-NEXT: v_mov_b32_e32 v8, s22
-; SDAG-NEXT: v_mov_b32_e32 v9, s23
-; SDAG-NEXT: v_accvgpr_write_b32 a0, s24
-; SDAG-NEXT: v_accvgpr_write_b32 a1, s25
-; SDAG-NEXT: v_accvgpr_write_b32 a2, s26
-; SDAG-NEXT: v_accvgpr_write_b32 a3, s27
-; SDAG-NEXT: v_mov_b32_e32 v0, s28
+; SDAG-NEXT: v_mov_b32_e32 v14, s0
+; SDAG-NEXT: v_mov_b32_e32 v15, s1
+; SDAG-NEXT: v_mov_b32_e32 v16, s2
+; SDAG-NEXT: v_mov_b32_e32 v17, s3
+; SDAG-NEXT: v_mov_b32_e32 v6, s16
+; SDAG-NEXT: v_mov_b32_e32 v7, s17
+; SDAG-NEXT: v_mov_b32_e32 v8, s18
+; SDAG-NEXT: v_mov_b32_e32 v9, s19
+; SDAG-NEXT: v_mov_b32_e32 v10, s20
+; SDAG-NEXT: v_mov_b32_e32 v11, s21
+; SDAG-NEXT: v_mov_b32_e32 v12, s22
+; SDAG-NEXT: v_mov_b32_e32 v13, s23
+; SDAG-NEXT: v_mov_b32_e32 v0, s24
+; SDAG-NEXT: v_mov_b32_e32 v1, s25
+; SDAG-NEXT: v_mov_b32_e32 v2, s26
+; SDAG-NEXT: v_mov_b32_e32 v3, s27
+; SDAG-NEXT: v_mov_b32_e32 v4, s28
; SDAG-NEXT: s_nop 1
-; SDAG-NEXT: v_smfmac_f32_16x16x128_bf8_bf8 a[0:3], v[10:13], v[2:9], v0
-; SDAG-NEXT: s_nop 7
-; SDAG-NEXT: v_accvgpr_read_b32 v0, a0
-; SDAG-NEXT: v_accvgpr_read_b32 v1, a1
-; SDAG-NEXT: v_accvgpr_read_b32 v2, a2
-; SDAG-NEXT: v_accvgpr_read_b32 v3, a3
+; SDAG-NEXT: v_smfmac_f32_16x16x128_bf8_bf8 v[0:3], v[14:17], v[6:13], v4
; SDAG-NEXT: s_setpc_b64 s[30:31]
;
; GISEL-LABEL: test_smfmac_f32_16x16x128_bf8_bf8__sgpr:
@@ -1658,30 +1590,25 @@ define <4 x float> @test_smfmac_f32_16x16x128_bf8_fp8__sgpr(<4 x i32> inreg %arg
; SDAG-LABEL: test_smfmac_f32_16x16x128_bf8_fp8__sgpr:
; SDAG: ; %bb.0:
; SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SDAG-NEXT: v_mov_b32_e32 v10, s0
-; SDAG-NEXT: v_mov_b32_e32 v11, s1
-; SDAG-NEXT: v_mov_b32_e32 v12, s2
-; SDAG-NEXT: v_mov_b32_e32 v13, s3
-; SDAG-NEXT: v_mov_b32_e32 v2, s16
-; SDAG-NEXT: v_mov_b32_e32 v3, s17
-; SDAG-NEXT: v_mov_b32_e32 v4, s18
-; SDAG-NEXT: v_mov_b32_e32 v5, s19
-; SDAG-NEXT: v_mov_b32_e32 v6, s20
-; SDAG-NEXT: v_mov_b32_e32 v7, s21
-; SDAG-NEXT: v_mov_b32_e32 v8, s22
-; SDAG-NEXT: v_mov_b32_e32 v9, s23
-; SDAG-NEXT: v_accvgpr_write_b32 a0, s24
-; SDAG-NEXT: v_accvgpr_write_b32 a1, s25
-; SDAG-NEXT: v_accvgpr_write_b32 a2, s26
-; SDAG-NEXT: v_accvgpr_write_b32 a3, s27
-; SDAG-NEXT: v_mov_b32_e32 v0, s28
+; SDAG-NEXT: v_mov_b32_e32 v14, s0
+; SDAG-NEXT: v_mov_b32_e32 v15, s1
+; SDAG-NEXT: v_mov_b32_e32 v16, s2
+; SDAG-NEXT: v_mov_b32_e32 v17, s3
+; SDAG-NEXT: v_mov_b32_e32 v6, s16
+; SDAG-NEXT: v_mov_b32_e32 v7, s17
+; SDAG-NEXT: v_mov_b32_e32 v8, s18
+; SDAG-NEXT: v_mov_b32_e32 v9, s19
+; SDAG-NEXT: v_mov_b32_e32 v10, s20
+; SDAG-NEXT: v_mov_b32_e32 v11, s21
+; SDAG-NEXT: v_mov_b32_e32 v12, s22
+; SDAG-NEXT: v_mov_b32_e32 v13, s23
+; SDAG-NEXT: v_mov_b32_e32 v0, s24
+; SDAG-NEXT: v_mov_b32_e32 v1, s25
+; SDAG-NEXT: v_mov_b32_e32 v2, s26
+; SDAG-NEXT: v_mov_b32_e32 v3, s27
+; SDAG-NEXT: v_mov_b32_e32 v4, s28
; SDAG-NEXT: s_nop 1
-; SDAG-NEXT: v_smfmac_f32_16x16x128_bf8_fp8 a[0:3], v[10:13], v[2:9], v0
-; SDAG-NEXT: s_nop 7
-; SDAG-NEXT: v_accvgpr_read_b32 v0, a0
-; SDAG-NEXT: v_accvgpr_read_b32 v1, a1
-; SDAG-NEXT: v_accvgpr_read_b32 v2, a2
-; SDAG-NEXT: v_accvgpr_read_b32 v3, a3
+; SDAG-NEXT: v_smfmac_f32_16x16x128_bf8_fp8 v[0:3], v[14:17], v[6:13], v4
; SDAG-NEXT: s_setpc_b64 s[30:31]
;
; GISEL-LABEL: test_smfmac_f32_16x16x128_bf8_fp8__sgpr:
@@ -1827,30 +1754,25 @@ define <4 x float> @test_smfmac_f32_16x16x128_fp8_bf8__sgpr(<4 x i32> inreg %arg
; SDAG-LABEL: test_smfmac_f32_16x16x128_fp8_bf8__sgpr:
; SDAG: ; %bb.0:
; SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SDAG-NEXT: v_mov_b32_e32 v10, s0
-; SDAG-NEXT: v_mov_b32_e32 v11, s1
-; SDAG-NEXT: v_mov_b32_e32 v12, s2
-; SDAG-NEXT: v_mov_b32_e32 v13, s3
-; SDAG-NEXT: v_mov_b32_e32 v2, s16
-; SDAG-NEXT: v_mov_b32_e32 v3, s17
-; SDAG-NEXT: v_mov_b32_e32 v4, s18
-; SDAG-NEXT: v_mov_b32_e32 v5, s19
-; SDAG-NEXT: v_mov_b32_e32 v6, s20
-; SDAG-NEXT: v_mov_b32_e32 v7, s21
-; SDAG-NEXT: v_mov_b32_e32 v8, s22
-; SDAG-NEXT: v_mov_b32_e32 v9, s23
-; SDAG-NEXT: v_accvgpr_write_b32 a0, s24
-; SDAG-NEXT: v_accvgpr_write_b32 a1, s25
-; SDAG-NEXT: v_accvgpr_write_b32 a2, s26
-; SDAG-NEXT: v_accvgpr_write_b32 a3, s27
-; SDAG-NEXT: v_mov_b32_e32 v0, s28
+; SDAG-NEXT: v_mov_b32_e32 v14, s0
+; SDAG-NEXT: v_mov_b32_e32 v15, s1
+; SDAG-NEXT: v_mov_b32_e32 v16, s2
+; SDAG-NEXT: v_mov_b32_e32 v17, s3
+; SDAG-NEXT: v_mov_b32_e32 v6, s16
+; SDAG-NEXT: v_mov_b32_e32 v7, s17
+; SDAG-NEXT: v_mov_b32_e32 v8, s18
+; SDAG-NEXT: v_mov_b32_e32 v9, s19
+; SDAG-NEXT: v_mov_b32_e32 v10, s20
+; SDAG-NEXT: v_mov_b32_e32 v11, s21
+; SDAG-NEXT: v_mov_b32_e32 v12, s22
+; SDAG-NEXT: v_mov_b32_e32 v13, s23
+; SDAG-NEXT: v_mov_b32_e32 v0, s24
+; SDAG-NEXT: v_mov_b32_e32 v1, s25
+; SDAG-NEXT: v_mov_b32_e32 v2, s26
+; SDAG-NEXT: v_mov_b32_e32 v3, s27
+; SDAG-NEXT: v_mov_b32_e32 v4, s28
; SDAG-NEXT: s_nop 1
-; SDAG-NEXT: v_smfmac_f32_16x16x128_fp8_bf8 a[0:3], v[10:13], v[2:9], v0
-; SDAG-NEXT: s_nop 7
-; SDAG-NEXT: v_accvgpr_read_b32 v0, a0
-; SDAG-NEXT: v_accvgpr_read_b32 v1, a1
-; SDAG-NEXT: v_accvgpr_read_b32 v2, a2
-; SDAG-NEXT: v_accvgpr_read_b32 v3, a3
+; SDAG-NEXT: v_smfmac_f32_16x16x128_fp8_bf8 v[0:3], v[14:17], v[6:13], v4
; SDAG-NEXT: s_setpc_b64 s[30:31]
;
; GISEL-LABEL: test_smfmac_f32_16x16x128_fp8_bf8__sgpr:
@@ -1996,30 +1918,25 @@ define <4 x float> @test_smfmac_f32_16x16x128_fp8_fp8__sgpr(<4 x i32> inreg %arg
; SDAG-LABEL: test_smfmac_f32_16x16x128_fp8_fp8__sgpr:
; SDAG: ; %bb.0:
; SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SDAG-NEXT: v_mov_b32_e32 v10, s0
-; SDAG-NEXT: v_mov_b32_e32 v11, s1
-; SDAG-NEXT: v_mov_b32_e32 v12, s2
-; SDAG-NEXT: v_mov_b32_e32 v13, s3
-; SDAG-NEXT: v_mov_b32_e32 v2, s16
-; SDAG-NEXT: v_mov_b32_e32 v3, s17
-; SDAG-NEXT: v_mov_b32_e32 v4, s18
-; SDAG-NEXT: v_mov_b32_e32 v5, s19
-; SDAG-NEXT: v_mov_b32_e32 v6, s20
-; SDAG-NEXT: v_mov_b32_e32 v7, s21
-; SDAG-NEXT: v_mov_b32_e32 v8, s22
-; SDAG-NEXT: v_mov_b32_e32 v9, s23
-; SDAG-NEXT: v_accvgpr_write_b32 a0, s24
-; SDAG-NEXT: v_accvgpr_write_b32 a1, s25
-; SDAG-NEXT: v_accvgpr_write_b32 a2, s26
-; SDAG-NEXT: v_accvgpr_write_b32 a3, s27
-; SDAG-NEXT: v_mov_b32_e32 v0, s28
+; SDAG-NEXT: v_mov_b32_e32 v14, s0
+; SDAG-NEXT: v_mov_b32_e32 v15, s1
+; SDAG-NEXT: v_mov_b32_e32 v16, s2
+; SDAG-NEXT: v_mov_b32_e32 v17, s3
+; SDAG-NEXT: v_mov_b32_e32 v6, s16
+; SDAG-NEXT: v_mov_b32_e32 v7, s17
+; SDAG-NEXT: v_mov_b32_e32 v8, s18
+; SDAG-NEXT: v_mov_b32_e32 v9, s19
+; SDAG-NEXT: v_mov_b32_e32 v10, s20
+; SDAG-NEXT: v_mov_b32_e32 v11, s21
+; SDAG-NEXT: v_mov_b32_e32 v12, s22
+; SDAG-NEXT: v_mov_b32_e32 v13, s23
+; SDAG-NEXT: v_mov_b32_e32 v0, s24
+; SDAG-NEXT: v_mov_b32_e32 v1, s25
+; SDAG-NEXT: v_mov_b32_e32 v2, s26
+; SDAG-NEXT: v_mov_b32_e32 v3, s27
+; SDAG-NEXT: v_mov_b32_e32 v4, s28
; SDAG-NEXT: s_nop 1
-; SDAG-NEXT: v_smfmac_f32_16x16x128_fp8_fp8 a[0:3], v[10:13], v[2:9], v0
-; SDAG-NEXT: s_nop 7
-; SDAG-NEXT: v_accvgpr_read_b32 v0, a0
-; SDAG-NEXT: v_accvgpr_read_b32 v1, a1
-; SDAG-NEXT: v_accvgpr_read_b32 v2, a2
-; SDAG-NEXT: v_accvgpr_read_b32 v3, a3
+; SDAG-NEXT: v_smfmac_f32_16x16x128_fp8_fp8 v[0:3], v[14:17], v[6:13], v4
; SDAG-NEXT: s_setpc_b64 s[30:31]
;
; GISEL-LABEL: test_smfmac_f32_16x16x128_fp8_fp8__sgpr:
@@ -2318,53 +2235,37 @@ define <16 x float> @test_smfmac_f32_32x32x64_bf8_bf8__sgpr(<4 x i32> inreg %arg
; SDAG-LABEL: test_smfmac_f32_32x32x64_bf8_bf8__sgpr:
; SDAG: ; %bb.0:
; SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SDAG-NEXT: v_mov_b32_e32 v36, s0
-; SDAG-NEXT: v_mov_b32_e32 v37, s1
-; SDAG-NEXT: v_mov_b32_e32 v38, s2
-; SDAG-NEXT: v_mov_b32_e32 v39, s3
-; SDAG-NEXT: v_mov_b32_e32 v13, s25
-; SDAG-NEXT: v_mov_b32_e32 v14, s26
-; SDAG-NEXT: v_mov_b32_e32 v15, s27
-; SDAG-NEXT: v_mov_b32_e32 v16, s28
-; SDAG-NEXT: v_mov_b32_e32 v17, s29
-; SDAG-NEXT: v_mov_b32_e32 v28, s16
-; SDAG-NEXT: v_mov_b32_e32 v29, s17
-; SDAG-NEXT: v_mov_b32_e32 v30, s18
-; SDAG-NEXT: v_mov_b32_e32 v31, s19
-; SDAG-NEXT: v_mov_b32_e32 v32, s20
-; SDAG-NEXT: v_mov_b32_e32 v33, s21
-; SDAG-NEXT: v_mov_b32_e32 v34, s22
-; SDAG-NEXT: v_mov_b32_e32 v35, s23
-; SDAG-NEXT: v_mov_b32_e32 v12, s24
-; SDAG-NEXT: v_mov_b32_e32 v18, v0
-; SDAG-NEXT: v_mov_b32_e32 v19, v1
-; SDAG-NEXT: v_mov_b32_e32 v20, v2
-; SDAG-NEXT: v_mov_b32_e32 v21, v3
-; SDAG-NEXT: v_mov_b32_e32 v22, v4
-; SDAG-NEXT: v_mov_b32_e32 v23, v5
-; SDAG-NEXT: v_mov_b32_e32 v24, v6
-; SDAG-NEXT: v_mov_b32_e32 v25, v7
-; SDAG-NEXT: v_mov_b32_e32 v26, v8
-; SDAG-NEXT: v_mov_b32_e32 v27, v9
+; SDAG-NEXT: v_mov_b32_e32 v26, s0
+; SDAG-NEXT: v_mov_b32_e32 v27, s1
+; SDAG-NEXT: v_mov_b32_e32 v28, s2
+; SDAG-NEXT: v_mov_b32_e32 v29, s3
+; SDAG-NEXT: v_mov_b32_e32 v16, v10
+; SDAG-NEXT: v_mov_b32_e32 v15, v9
+; SDAG-NEXT: v_mov_b32_e32 v14, v8
+; SDAG-NEXT: v_mov_b32_e32 v13, v7
+; SDAG-NEXT: v_mov_b32_e32 v12, v6
+; SDAG-NEXT: v_mov_b32_e32 v11, v5
+; SDAG-NEXT: v_mov_b32_e32 v10, v4
+; SDAG-NEXT: v_mov_b32_e32 v9, v3
+; SDAG-NEXT: v_mov_b32_e32 v8, v2
+; SDAG-NEXT: v_mov_b32_e32 v7, v1
+; SDAG-NEXT: v_mov_b32_e32 v6, v0
+; SDAG-NEXT: v_mov_b32_e32 v0, s24
+; SDAG-NEXT: v_mov_b32_e32 v1, s25
+; SDAG-NEXT: v_mov_b32_e32 v2, s26
+; SDAG-NEXT: v_mov_b32_e32 v3, s27
+; SDAG-NEXT: v_mov_b32_e32 v4, s28
+; SDAG-NEXT: v_mov_b32_e32 v5, s29
+; SDAG-NEXT: v_mov_b32_e32 v18, s16
+; SDAG-NEXT: v_mov_b32_e32 v19, s17
+; SDAG-NEXT: v_mov_b32_e32 v20, s18
+; SDAG-NEXT: v_mov_b32_e32 v21, s19
+; SDAG-NEXT: v_mov_b32_e32 v22, s20
+; SDAG-NEXT: v_mov_b32_e32 v23, s21
+; SDAG-NEXT: v_mov_b32_e32 v24, s22
+; SDAG-NEXT: v_mov_b32_e32 v25, s23
; SDAG-NEXT: s_nop 1
-; SDAG-NEXT: v_smfmac_f32_32x32x64_bf8_bf8 v[12:27], v[36:39], v[28:35], v10
-; SDAG-NEXT: s_nop 11
-; SDAG-NEXT: v_mov_b32_e32 v0, v12
-; SDAG-NEXT: v_mov_b32_e32 v1, v13
-; SDAG-NEXT: v_mov_b32_e32 v2, v14
-; SDAG-NEXT: v_mov_b32_e32 v3, v15
-; SDAG-NEXT: v_mov_b32_e32 v4, v16
-; SDAG-NEXT: v_mov_b32_e32 v5, v17
-; SDAG-NEXT: v_mov_b32_e32 v6, v18
-; SDAG-NEXT: v_mov_b32_e32 v7, v19
-; SDAG-NEXT: v_mov_b32_e32 v8, v20
-; SDAG-NEXT: v_mov_b32_e32 v9, v21
-; SDAG-NEXT: v_mov_b32_e32 v10, v22
-; SDAG-NEXT: v_mov_b32_e32 v11, v23
-; SDAG-NEXT: v_mov_b32_e32 v12, v24
-; SDAG-NEXT: v_mov_b32_e32 v13, v25
-; SDAG-NEXT: v_mov_b32_e32 v14, v26
-; SDAG-NEXT: v_mov_b32_e32 v15, v27
+; SDAG-NEXT: v_smfmac_f32_32x32x64_bf8_bf8 v[0:15], v[26:29], v[18:25], v16
; SDAG-NEXT: s_setpc_b64 s[30:31]
;
; GISEL-LABEL: test_smfmac_f32_32x32x64_bf8_bf8__sgpr:
@@ -2685,53 +2586,37 @@ define <16 x float> @test_smfmac_f32_32x32x64_bf8_fp8__sgpr(<4 x i32> inreg %arg
; SDAG-LABEL: test_smfmac_f32_32x32x64_bf8_fp8__sgpr:
; SDAG: ; %bb.0:
; SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SDAG-NEXT: v_mov_b32_e32 v36, s0
-; SDAG-NEXT: v_mov_b32_e32 v37, s1
-; SDAG-NEXT: v_mov_b32_e32 v38, s2
-; SDAG-NEXT: v_mov_b32_e32 v39, s3
-; SDAG-NEXT: v_mov_b32_e32 v13, s25
-; SDAG-NEXT: v_mov_b32_e32 v14, s26
-; SDAG-NEXT: v_mov_b32_e32 v15, s27
-; SDAG-NEXT: v_mov_b32_e32 v16, s28
-; SDAG-NEXT: v_mov_b32_e32 v17, s29
-; SDAG-NEXT: v_mov_b32_e32 v28, s16
-; SDAG-NEXT: v_mov_b32_e32 v29, s17
-; SDAG-NEXT: v_mov_b32_e32 v30, s18
-; SDAG-NEXT: v_mov_b32_e32 v31, s19
-; SDAG-NEXT: v_mov_b32_e32 v32, s20
-; SDAG-NEXT: v_mov_b32_e32 v33, s21
-; SDAG-NEXT: v_mov_b32_e32 v34, s22
-; SDAG-NEXT: v_mov_b32_e32 v35, s23
-; SDAG-NEXT: v_mov_b32_e32 v12, s24
-; SDAG-NEXT: v_mov_b32_e32 v18, v0
-; SDAG-NEXT: v_mov_b32_e32 v19, v1
-; SDAG-NEXT: v_mov_b32_e32 v20, v2
-; SDAG-NEXT: v_mov_b32_e32 v21, v3
-; SDAG-NEXT: v_mov_b32_e32 v22, v4
-; SDAG-NEXT: v_mov_b32_e32 v23, v5
-; SDAG-NEXT: v_mov_b32_e32 v24, v6
-; SDAG-NEXT: v_mov_b32_e32 v25, v7
-; SDAG-NEXT: v_mov_b32_e32 v26, v8
-; SDAG-NEXT: v_mov_b32_e32 v27, v9
+; SDAG-NEXT: v_mov_b32_e32 v26, s0
+; SDAG-NEXT: v_mov_b32_e32 v27, s1
+; SDAG-NEXT: v_mov_b32_e32 v28, s2
+; SDAG-NEXT: v_mov_b32_e32 v29, s3
+; SDAG-NEXT: v_mov_b32_e32 v16, v10
+; SDAG-NEXT: v_mov_b32_e32 v15, v9
+; SDAG-NEXT: v_mov_b32_e32 v14, v8
+; SDAG-NEXT: v_mov_b32_e32 v13, v7
+; SDAG-NEXT: v_mov_b32_e32 v12, v6
+; SDAG-NEXT: v_mov_b32_e32 v11, v5
+; SDAG-NEXT: v_mov_b32_e32 v10, v4
+; SDAG-NEXT: v_mov_b32_e32 v9, v3
+; SDAG-NEXT: v_mov_b32_e32 v8, v2
+; SDAG-NEXT: v_mov_b32_e32 v7, v1
+; SDAG-NEXT: v_mov_b32_e32 v6, v0
+; SDAG-NEXT: v_mov_b32_e32 v0, s24
+; SDAG-NEXT: v_mov_b32_e32 v1, s25
+; SDAG-NEXT: v_mov_b32_e32 v2, s26
+; SDAG-NEXT: v_mov_b32_e32 v3, s27
+; SDAG-NEXT: v_mov_b32_e32 v4, s28
+; SDAG-NEXT: v_mov_b32_e32 v5, s29
+; SDAG-NEXT: v_mov_b32_e32 v18, s16
+; SDAG-NEXT: v_mov_b32_e32 v19, s17
+; SDAG-NEXT: v_mov_b32_e32 v20, s18
+; SDAG-NEXT: v_mov_b32_e32 v21, s19
+; SDAG-NEXT: v_mov_b32_e32 v22, s20
+; SDAG-NEXT: v_mov_b32_e32 v23, s21
+; SDAG-NEXT: v_mov_b32_e32 v24, s22
+; SDAG-NEXT: v_mov_b32_e32 v25, s23
; SDAG-NEXT: s_nop 1
-; SDAG-NEXT: v_smfmac_f32_32x32x64_bf8_fp8 v[12:27], v[36:39], v[28:35], v10
-; SDAG-NEXT: s_nop 11
-; SDAG-NEXT: v_mov_b32_e32 v0, v12
-; SDAG-NEXT: v_mov_b32_e32 v1, v13
-; SDAG-NEXT: v_mov_b32_e32 v2, v14
-; SDAG-NEXT: v_mov_b32_e32 v3, v15
-; SDAG-NEXT: v_mov_b32_e32 v4, v16
-; SDAG-NEXT: v_mov_b32_e32 v5, v17
-; SDAG-NEXT: v_mov_b32_e32 v6, v18
-; SDAG-NEXT: v_mov_b32_e32 v7, v19
-; SDAG-NEXT: v_mov_b32_e32 v8, v20
-; SDAG-NEXT: v_mov_b32_e32 v9, v21
-; SDAG-NEXT: v_mov_b32_e32 v10, v22
-; SDAG-NEXT: v_mov_b32_e32 v11, v23
-; SDAG-NEXT: v_mov_b32_e32 v12, v24
-; SDAG-NEXT: v_mov_b32_e32 v13, v25
-; SDAG-NEXT: v_mov_b32_e32 v14, v26
-; SDAG-NEXT: v_mov_b32_e32 v15, v27
+; SDAG-NEXT: v_smfmac_f32_32x32x64_bf8_fp8 v[0:15], v[26:29], v[18:25], v16
; SDAG-NEXT: s_setpc_b64 s[30:31]
;
; GISEL-LABEL: test_smfmac_f32_32x32x64_bf8_fp8__sgpr:
@@ -3052,53 +2937,37 @@ define <16 x float> @test_smfmac_f32_32x32x64_fp8_bf8__sgpr(<4 x i32> inreg %arg
; SDAG-LABEL: test_smfmac_f32_32x32x64_fp8_bf8__sgpr:
; SDAG: ; %bb.0:
; SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SDAG-NEXT: v_mov_b32_e32 v36, s0
-; SDAG-NEXT: v_mov_b32_e32 v37, s1
-; SDAG-NEXT: v_mov_b32_e32 v38, s2
-; SDAG-NEXT: v_mov_b32_e32 v39, s3
-; SDAG-NEXT: v_mov_b32_e32 v13, s25
-; SDAG-NEXT: v_mov_b32_e32 v14, s26
-; SDAG-NEXT: v_mov_b32_e32 v15, s27
-; SDAG-NEXT: v_mov_b32_e32 v16, s28
-; SDAG-NEXT: v_mov_b32_e32 v17, s29
-; SDAG-NEXT: v_mov_b32_e32 v28, s16
-; SDAG-NEXT: v_mov_b32_e32 v29, s17
-; SDAG-NEXT: v_mov_b32_e32 v30, s18
-; SDAG-NEXT: v_mov_b32_e32 v31, s19
-; SDAG-NEXT: v_mov_b32_e32 v32, s20
-; SDAG-NEXT: v_mov_b32_e32 v33, s21
-; SDAG-NEXT: v_mov_b32_e32 v34, s22
-; SDAG-NEXT: v_mov_b32_e32 v35, s23
-; SDAG-NEXT: v_mov_b32_e32 v12, s24
-; SDAG-NEXT: v_mov_b32_e32 v18, v0
-; SDAG-NEXT: v_mov_b32_e32 v19, v1
-; SDAG-NEXT: v_mov_b32_e32 v20, v2
-; SDAG-NEXT: v_mov_b32_e32 v21, v3
-; SDAG-NEXT: v_mov_b32_e32 v22, v4
-; SDAG-NEXT: v_mov_b32_e32 v23, v5
-; SDAG-NEXT: v_mov_b32_e32 v24, v6
-; SDAG-NEXT: v_mov_b32_e32 v25, v7
-; SDAG-NEXT: v_mov_b32_e32 v26, v8
-; SDAG-NEXT: v_mov_b32_e32 v27, v9
+; SDAG-NEXT: v_mov_b32_e32 v26, s0
+; SDAG-NEXT: v_mov_b32_e32 v27, s1
+; SDAG-NEXT: v_mov_b32_e32 v28, s2
+; SDAG-NEXT: v_mov_b32_e32 v29, s3
+; SDAG-NEXT: v_mov_b32_e32 v16, v10
+; SDAG-NEXT: v_mov_b32_e32 v15, v9
+; SDAG-NEXT: v_mov_b32_e32 v14, v8
+; SDAG-NEXT: v_mov_b32_e32 v13, v7
+; SDAG-NEXT: v_mov_b32_e32 v12, v6
+; SDAG-NEXT: v_mov_b32_e32 v11, v5
+; SDAG-NEXT: v_mov_b32_e32 v10, v4
+; SDAG-NEXT: v_mov_b32_e32 v9, v3
+; SDAG-NEXT: v_mov_b32_e32 v8, v2
+; SDAG-NEXT: v_mov_b32_e32 v7, v1
+; SDAG-NEXT: v_mov_b32_e32 v6, v0
+; SDAG-NEXT: v_mov_b32_e32 v0, s24
+; SDAG-NEXT: v_mov_b32_e32 v1, s25
+; SDAG-NEXT: v_mov_b32_e32 v2, s26
+; SDAG-NEXT: v_mov_b32_e32 v3, s27
+; SDAG-NEXT: v_mov_b32_e32 v4, s28
+; SDAG-NEXT: v_mov_b32_e32 v5, s29
+; SDAG-NEXT: v_mov_b32_e32 v18, s16
+; SDAG-NEXT: v_mov_b32_e32 v19, s17
+; SDAG-NEXT: v_mov_b32_e32 v20, s18
+; SDAG-NEXT: v_mov_b32_e32 v21, s19
+; SDAG-NEXT: v_mov_b32_e32 v22, s20
+; SDAG-NEXT: v_mov_b32_e32 v23, s21
+; SDAG-NEXT: v_mov_b32_e32 v24, s22
+; SDAG-NEXT: v_mov_b32_e32 v25, s23
; SDAG-NEXT: s_nop 1
-; SDAG-NEXT: v_smfmac_f32_32x32x64_fp8_bf8 v[12:27], v[36:39], v[28:35], v10
-; SDAG-NEXT: s_nop 11
-; SDAG-NEXT: v_mov_b32_e32 v0, v12
-; SDAG-NEXT: v_mov_b32_e32 v1, v13
-; SDAG-NEXT: v_mov_b32_e32 v2, v14
-; SDAG-NEXT: v_mov_b32_e32 v3, v15
-; SDAG-NEXT: v_mov_b32_e32 v4, v16
-; SDAG-NEXT: v_mov_b32_e32 v5, v17
-; SDAG-NEXT: v_mov_b32_e32 v6, v18
-; SDAG-NEXT: v_mov_b32_e32 v7, v19
-; SDAG-NEXT: v_mov_b32_e32 v8, v20
-; SDAG-NEXT: v_mov_b32_e32 v9, v21
-; SDAG-NEXT: v_mov_b32_e32 v10, v22
-; SDAG-NEXT: v_mov_b32_e32 v11, v23
-; SDAG-NEXT: v_mov_b32_e32 v12, v24
-; SDAG-NEXT: v_mov_b32_e32 v13, v25
-; SDAG-NEXT: v_mov_b32_e32 v14, v26
-; SDAG-NEXT: v_mov_b32_e32 v15, v27
+; SDAG-NEXT: v_smfmac_f32_32x32x64_fp8_bf8 v[0:15], v[26:29], v[18:25], v16
; SDAG-NEXT: s_setpc_b64 s[30:31]
;
; GISEL-LABEL: test_smfmac_f32_32x32x64_fp8_bf8__sgpr:
@@ -3419,53 +3288,37 @@ define <16 x float> @test_smfmac_f32_32x32x64_fp8_fp8__sgpr(<4 x i32> inreg %arg
; SDAG-LABEL: test_smfmac_f32_32x32x64_fp8_fp8__sgpr:
; SDAG: ; %bb.0:
; SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SDAG-NEXT: v_mov_b32_e32 v36, s0
-; SDAG-NEXT: v_mov_b32_e32 v37, s1
-; SDAG-NEXT: v_mov_b32_e32 v38, s2
-; SDAG-NEXT: v_mov_b32_e32 v39, s3
-; SDAG-NEXT: v_mov_b32_e32 v13, s25
-; SDAG-NEXT: v_mov_b32_e32 v14, s26
-; SDAG-NEXT: v_mov_b32_e32 v15, s27
-; SDAG-NEXT: v_mov_b32_e32 v16, s28
-; SDAG-NEXT: v_mov_b32_e32 v17, s29
-; SDAG-NEXT: v_mov_b32_e32 v28, s16
-; SDAG-NEXT: v_mov_b32_e32 v29, s17
-; SDAG-NEXT: v_mov_b32_e32 v30, s18
-; SDAG-NEXT: v_mov_b32_e32 v31, s19
-; SDAG-NEXT: v_mov_b32_e32 v32, s20
-; SDAG-NEXT: v_mov_b32_e32 v33, s21
-; SDAG-NEXT: v_mov_b32_e32 v34, s22
-; SDAG-NEXT: v_mov_b32_e32 v35, s23
-; SDAG-NEXT: v_mov_b32_e32 v12, s24
-; SDAG-NEXT: v_mov_b32_e32 v18, v0
-; SDAG-NEXT: v_mov_b32_e32 v19, v1
-; SDAG-NEXT: v_mov_b32_e32 v20, v2
-; SDAG-NEXT: v_mov_b32_e32 v21, v3
-; SDAG-NEXT: v_mov_b32_e32 v22, v4
-; SDAG-NEXT: v_mov_b32_e32 v23, v5
-; SDAG-NEXT: v_mov_b32_e32 v24, v6
-; SDAG-NEXT: v_mov_b32_e32 v25, v7
-; SDAG-NEXT: v_mov_b32_e32 v26, v8
-; SDAG-NEXT: v_mov_b32_e32 v27, v9
+; SDAG-NEXT: v_mov_b32_e32 v26, s0
+; SDAG-NEXT: v_mov_b32_e32 v27, s1
+; SDAG-NEXT: v_mov_b32_e32 v28, s2
+; SDAG-NEXT: v_mov_b32_e32 v29, s3
+; SDAG-NEXT: v_mov_b32_e32 v16, v10
+; SDAG-NEXT: v_mov_b32_e32 v15, v9
+; SDAG-NEXT: v_mov_b32_e32 v14, v8
+; SDAG-NEXT: v_mov_b32_e32 v13, v7
+; SDAG-NEXT: v_mov_b32_e32 v12, v6
+; SDAG-NEXT: v_mov_b32_e32 v11, v5
+; SDAG-NEXT: v_mov_b32_e32 v10, v4
+; SDAG-NEXT: v_mov_b32_e32 v9, v3
+; SDAG-NEXT: v_mov_b32_e32 v8, v2
+; SDAG-NEXT: v_mov_b32_e32 v7, v1
+; SDAG-NEXT: v_mov_b32_e32 v6, v0
+; SDAG-NEXT: v_mov_b32_e32 v0, s24
+; SDAG-NEXT: v_mov_b32_e32 v1, s25
+; SDAG-NEXT: v_mov_b32_e32 v2, s26
+; SDAG-NEXT: v_mov_b32_e32 v3, s27
+; SDAG-NEXT: v_mov_b32_e32 v4, s28
+; SDAG-NEXT: v_mov_b32_e32 v5, s29
+; SDAG-NEXT: v_mov_b32_e32 v18, s16
+; SDAG-NEXT: v_mov_b32_e32 v19, s17
+; SDAG-NEXT: v_mov_b32_e32 v20, s18
+; SDAG-NEXT: v_mov_b32_e32 v21, s19
+; SDAG-NEXT: v_mov_b32_e32 v22, s20
+; SDAG-NEXT: v_mov_b32_e32 v23, s21
+; SDAG-NEXT: v_mov_b32_e32 v24, s22
+; SDAG-NEXT: v_mov_b32_e32 v25, s23
; SDAG-NEXT: s_nop 1
-; SDAG-NEXT: v_smfmac_f32_32x32x64_fp8_fp8 v[12:27], v[36:39], v[28:35], v10
-; SDAG-NEXT: s_nop 11
-; SDAG-NEXT: v_mov_b32_e32 v0, v12
-; SDAG-NEXT: v_mov_b32_e32 v1, v13
-; SDAG-NEXT: v_mov_b32_e32 v2, v14
-; SDAG-NEXT: v_mov_b32_e32 v3, v15
-; SDAG-NEXT: v_mov_b32_e32 v4, v16
-; SDAG-NEXT: v_mov_b32_e32 v5, v17
-; SDAG-NEXT: v_mov_b32_e32 v6, v18
-; SDAG-NEXT: v_mov_b32_e32 v7, v19
-; SDAG-NEXT: v_mov_b32_e32 v8, v20
-; SDAG-NEXT: v_mov_b32_e32 v9, v21
-; SDAG-NEXT: v_mov_b32_e32 v10, v22
-; SDAG-NEXT: v_mov_b32_e32 v11, v23
-; SDAG-NEXT: v_mov_b32_e32 v12, v24
-; SDAG-NEXT: v_mov_b32_e32 v13, v25
-; SDAG-NEXT: v_mov_b32_e32 v14, v26
-; SDAG-NEXT: v_mov_b32_e32 v15, v27
+; SDAG-NEXT: v_smfmac_f32_32x32x64_fp8_fp8 v[0:15], v[26:29], v[18:25], v16
; SDAG-NEXT: s_setpc_b64 s[30:31]
;
; GISEL-LABEL: test_smfmac_f32_32x32x64_fp8_fp8__sgpr:
diff --git a/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr.ll b/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr.ll
index b9e9893..9a23788 100644
--- a/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr.ll
+++ b/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr.ll
@@ -369,7 +369,7 @@ define amdgpu_kernel void @illegal_mfma_after_rewrite() #1 {
; CHECK: ; %bb.0: ; %entry
; CHECK-NEXT: s_mov_b32 s0, 0
; CHECK-NEXT: s_mov_b32 s1, s0
-; CHECK-NEXT: v_mov_b64_e32 v[8:9], s[0:1]
+; CHECK-NEXT: v_mov_b64_e32 v[28:29], s[0:1]
; CHECK-NEXT: ;;#ASMSTART
; CHECK-NEXT: ; def s[0:3]
; CHECK-NEXT: ;;#ASMEND
@@ -378,73 +378,66 @@ define amdgpu_kernel void @illegal_mfma_after_rewrite() #1 {
; CHECK-NEXT: v_mov_b64_e32 v[4:5], s[0:1]
; CHECK-NEXT: s_mov_b32 s0, 0x3c003c00
; CHECK-NEXT: s_mov_b32 s1, s0
-; CHECK-NEXT: v_mfma_f32_16x16x16_f16 v[0:3], v[8:9], v[8:9], v[4:7]
-; CHECK-NEXT: v_mov_b64_e32 v[12:13], s[0:1]
+; CHECK-NEXT: v_mov_b64_e32 v[30:31], s[0:1]
; CHECK-NEXT: s_mov_b32 s0, 0x7e007e00
; CHECK-NEXT: s_mov_b32 s1, s0
-; CHECK-NEXT: v_mov_b64_e32 v[10:11], s[0:1]
-; CHECK-NEXT: v_mfma_f32_16x16x16_f16 v[14:17], v[8:9], v[12:13], v[4:7]
-; CHECK-NEXT: s_nop 1
-; CHECK-NEXT: v_accvgpr_write_b32 a0, v0
-; CHECK-NEXT: v_mfma_f32_16x16x16_f16 v[18:21], v[8:9], v[10:11], v[4:7]
-; CHECK-NEXT: v_accvgpr_write_b32 a1, v1
-; CHECK-NEXT: v_accvgpr_write_b32 a2, v2
-; CHECK-NEXT: v_accvgpr_write_b32 a3, v3
+; CHECK-NEXT: v_accvgpr_write_b32 a0, s0
+; CHECK-NEXT: v_accvgpr_write_b32 a1, s1
+; CHECK-NEXT: v_mfma_f32_16x16x16_f16 v[0:3], v[28:29], v[28:29], v[4:7]
+; CHECK-NEXT: v_mfma_f32_16x16x16_f16 v[8:11], v[28:29], v[30:31], v[4:7]
+; CHECK-NEXT: v_mfma_f32_16x16x16_f16 v[12:15], v[28:29], a[0:1], v[4:7]
+; CHECK-NEXT: s_nop 2
; CHECK-NEXT: v_mov_b32_e32 v4, 0x7fc00000
; CHECK-NEXT: v_mov_b32_e32 v5, v4
; CHECK-NEXT: v_mov_b32_e32 v6, v4
; CHECK-NEXT: v_mov_b32_e32 v7, v4
-; CHECK-NEXT: v_mfma_f32_16x16x16_f16 v[14:17], v[8:9], v[8:9], v[14:17]
+; CHECK-NEXT: v_mfma_f32_16x16x16_f16 v[8:11], v[28:29], v[28:29], v[8:11]
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: v_mfma_f32_16x16x16_f16 v[22:25], v[8:9], v[8:9], v[4:7]
+; CHECK-NEXT: v_mfma_f32_16x16x16_f16 v[16:19], v[28:29], v[28:29], v[4:7]
; CHECK-NEXT: ;;#ASMSTART
; CHECK-NEXT: ; def v[4:7]
; CHECK-NEXT: ;;#ASMEND
-; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: v_mfma_f32_16x16x16_f16 v[0:3], v[8:9], v[12:13], v[4:7]
-; CHECK-NEXT: v_mfma_f32_16x16x16_f16 v[26:29], v[8:9], v[8:9], v[4:7]
-; CHECK-NEXT: v_mfma_f32_16x16x16_f16 v[0:3], v[8:9], v[8:9], v[0:3]
-; CHECK-NEXT: v_mfma_f32_16x16x16_f16 v[22:25], v[8:9], v[8:9], v[22:25]
-; CHECK-NEXT: v_mfma_f32_16x16x16_f16 v[4:7], v[8:9], v[8:9], v[26:29]
+; CHECK-NEXT: v_mfma_f32_16x16x16_f16 v[16:19], v[28:29], v[28:29], v[16:19]
+; CHECK-NEXT: v_mfma_f32_16x16x16_f16 v[24:27], v[28:29], v[30:31], v[4:7]
; CHECK-NEXT: s_nop 5
-; CHECK-NEXT: v_cvt_f16_f32_e32 v23, v14
-; CHECK-NEXT: v_mfma_f32_16x16x16_f16 v[14:17], v[8:9], v[8:9], v[18:21]
-; CHECK-NEXT: v_mfma_f32_16x16x16_f16 v[0:3], v[12:13], v[8:9], v[0:3]
-; CHECK-NEXT: s_nop 1
-; CHECK-NEXT: v_accvgpr_read_b32 v19, a3
-; CHECK-NEXT: v_accvgpr_read_b32 v18, a2
-; CHECK-NEXT: v_mov_b64_e32 v[20:21], 0
-; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: v_accvgpr_read_b32 v17, a1
-; CHECK-NEXT: v_accvgpr_read_b32 v16, a0
-; CHECK-NEXT: v_cvt_f16_f32_e32 v15, v22
-; CHECK-NEXT: v_cvt_f16_f32_e32 v14, v14
-; CHECK-NEXT: v_mfma_f32_16x16x16_f16 v[16:19], v[8:9], v[8:9], v[16:19]
-; CHECK-NEXT: v_cvt_f16_f32_e32 v12, v0
-; CHECK-NEXT: global_store_short v[20:21], v23, off
+; CHECK-NEXT: v_cvt_f16_f32_e32 v17, v8
+; CHECK-NEXT: v_mfma_f32_16x16x16_f16 v[8:11], v[28:29], v[28:29], v[12:15]
+; CHECK-NEXT: s_nop 2
+; CHECK-NEXT: v_mov_b64_e32 v[12:13], 0
+; CHECK-NEXT: v_mfma_f32_16x16x16_f16 v[0:3], v[28:29], v[28:29], v[0:3]
+; CHECK-NEXT: global_store_short v[12:13], v17, off
; CHECK-NEXT: buffer_wbl2 sc0 sc1
; CHECK-NEXT: s_waitcnt vmcnt(0)
; CHECK-NEXT: buffer_inv sc0 sc1
-; CHECK-NEXT: v_mfma_f32_16x16x16_f16 v[0:3], v[10:11], v[8:9], v[4:7]
-; CHECK-NEXT: global_store_short v[20:21], v15, off
+; CHECK-NEXT: v_cvt_f16_f32_e32 v9, v16
+; CHECK-NEXT: v_mfma_f32_16x16x16_f16 v[20:23], v[28:29], v[28:29], v[4:7]
+; CHECK-NEXT: global_store_short v[12:13], v9, off
+; CHECK-NEXT: v_cvt_f16_f32_e32 v1, v8
+; CHECK-NEXT: v_mfma_f32_16x16x16_f16 v[8:11], v[28:29], v[28:29], v[24:27]
; CHECK-NEXT: buffer_wbl2 sc0 sc1
; CHECK-NEXT: s_waitcnt vmcnt(0)
; CHECK-NEXT: buffer_inv sc0 sc1
-; CHECK-NEXT: global_store_short v[20:21], v14, off
-; CHECK-NEXT: v_cvt_f16_f32_e32 v14, v16
+; CHECK-NEXT: v_cvt_f16_f32_e32 v14, v0
+; CHECK-NEXT: global_store_short v[12:13], v1, off
+; CHECK-NEXT: v_mfma_f32_16x16x16_f16 v[4:7], v[28:29], v[28:29], v[20:23]
; CHECK-NEXT: buffer_wbl2 sc0 sc1
; CHECK-NEXT: s_waitcnt vmcnt(0)
; CHECK-NEXT: buffer_inv sc0 sc1
-; CHECK-NEXT: global_store_short v[20:21], v14, off
-; CHECK-NEXT: v_cvt_f16_f32_e32 v0, v0
+; CHECK-NEXT: global_store_short v[12:13], v14, off
; CHECK-NEXT: buffer_wbl2 sc0 sc1
; CHECK-NEXT: s_waitcnt vmcnt(0)
; CHECK-NEXT: buffer_inv sc0 sc1
-; CHECK-NEXT: global_store_short v[20:21], v12, off
+; CHECK-NEXT: v_mfma_f32_16x16x16_f16 v[0:3], v[30:31], v[28:29], v[8:11]
+; CHECK-NEXT: s_nop 6
+; CHECK-NEXT: v_cvt_f16_f32_e32 v8, v0
+; CHECK-NEXT: v_mfma_f32_16x16x16_f16 v[0:3], a[0:1], v[28:29], v[4:7]
+; CHECK-NEXT: global_store_short v[12:13], v8, off
; CHECK-NEXT: buffer_wbl2 sc0 sc1
; CHECK-NEXT: s_waitcnt vmcnt(0)
; CHECK-NEXT: buffer_inv sc0 sc1
-; CHECK-NEXT: global_store_short v[20:21], v0, off
+; CHECK-NEXT: s_nop 2
+; CHECK-NEXT: v_cvt_f16_f32_e32 v0, v0
+; CHECK-NEXT: global_store_short v[12:13], v0, off
; CHECK-NEXT: s_endpgm
entry:
%k0 = call <4 x float> asm sideeffect "; def $0", "=s"()
diff --git a/llvm/test/CodeGen/AMDGPU/smfmac_alloc_failure_no_agpr_O0.ll b/llvm/test/CodeGen/AMDGPU/smfmac_alloc_failure_no_agpr_O0.ll
new file mode 100644
index 0000000..ba0fdc68
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/smfmac_alloc_failure_no_agpr_O0.ll
@@ -0,0 +1,119 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc -O0 -mtriple=amdgcn -mcpu=gfx950 -amdgpu-mfma-vgpr-form=0 < %s | FileCheck %s
+; RUN: llc -O0 -mtriple=amdgcn -mcpu=gfx950 -amdgpu-mfma-vgpr-form=1 < %s | FileCheck %s
+
+declare <16 x float> @llvm.amdgcn.smfmac.f32.32x32x32.f16(<8 x half>, <16 x half>, <16 x float>, i32, i32 immarg, i32 immarg)
+
+define amdgpu_kernel void @test_smfmac_f32_32x32x32_f16__vgpr(ptr addrspace(1) %arg, <8 x half> %a, <16 x half> %b, i32 %idx) #0 {
+; CHECK-LABEL: test_smfmac_f32_32x32x32_f16__vgpr:
+; CHECK: ; %bb.0: ; %bb
+; CHECK-NEXT: s_mov_b64 s[2:3], s[4:5]
+; CHECK-NEXT: v_mov_b32_e32 v1, v0
+; CHECK-NEXT: v_mov_b32_e32 v0, 0
+; CHECK-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
+; CHECK-NEXT: s_load_dwordx4 s[12:15], s[2:3], 0x34
+; CHECK-NEXT: s_load_dwordx8 s[4:11], s[2:3], 0x44
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: s_load_dword s2, s[2:3], 0x64
+; CHECK-NEXT: s_mov_b32 s3, 0x3ff
+; CHECK-NEXT: v_and_b32_e64 v1, v1, s3
+; CHECK-NEXT: s_mov_b32 s3, 6
+; CHECK-NEXT: v_lshlrev_b32_e64 v8, s3, v1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: global_load_dwordx4 v[4:7], v8, s[0:1] offset:48
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_mov_b32_e32 v1, v7
+; CHECK-NEXT: v_mov_b32_e32 v2, v6
+; CHECK-NEXT: v_mov_b32_e32 v3, v5
+; CHECK-NEXT: ; kill: def $vgpr4 killed $vgpr4 killed $vgpr4_vgpr5_vgpr6_vgpr7 killed $exec
+; CHECK-NEXT: global_load_dwordx4 v[10:13], v8, s[0:1] offset:32
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_mov_b32_e32 v5, v13
+; CHECK-NEXT: v_mov_b32_e32 v6, v12
+; CHECK-NEXT: v_mov_b32_e32 v7, v11
+; CHECK-NEXT: v_mov_b32_e32 v24, v10
+; CHECK-NEXT: global_load_dwordx4 v[10:13], v8, s[0:1] offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_mov_b32_e32 v25, v13
+; CHECK-NEXT: v_mov_b32_e32 v26, v12
+; CHECK-NEXT: v_mov_b32_e32 v27, v11
+; CHECK-NEXT: v_mov_b32_e32 v28, v10
+; CHECK-NEXT: global_load_dwordx4 v[8:11], v8, s[0:1]
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_mov_b32_e32 v29, v11
+; CHECK-NEXT: v_mov_b32_e32 v30, v10
+; CHECK-NEXT: v_mov_b32_e32 v31, v9
+; CHECK-NEXT: ; kill: def $vgpr8 killed $vgpr8 killed $vgpr8_vgpr9_vgpr10_vgpr11 killed $exec
+; CHECK-NEXT: ; kill: def $vgpr8 killed $vgpr8 def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 killed $exec
+; CHECK-NEXT: v_mov_b32_e32 v9, v31
+; CHECK-NEXT: v_mov_b32_e32 v10, v30
+; CHECK-NEXT: v_mov_b32_e32 v11, v29
+; CHECK-NEXT: v_mov_b32_e32 v12, v28
+; CHECK-NEXT: v_mov_b32_e32 v13, v27
+; CHECK-NEXT: v_mov_b32_e32 v14, v26
+; CHECK-NEXT: v_mov_b32_e32 v15, v25
+; CHECK-NEXT: v_mov_b32_e32 v16, v24
+; CHECK-NEXT: v_mov_b32_e32 v17, v7
+; CHECK-NEXT: v_mov_b32_e32 v18, v6
+; CHECK-NEXT: v_mov_b32_e32 v19, v5
+; CHECK-NEXT: v_mov_b32_e32 v20, v4
+; CHECK-NEXT: v_mov_b32_e32 v21, v3
+; CHECK-NEXT: v_mov_b32_e32 v22, v2
+; CHECK-NEXT: v_mov_b32_e32 v23, v1
+; CHECK-NEXT: v_mov_b64_e32 v[2:3], s[12:13]
+; CHECK-NEXT: v_mov_b64_e32 v[4:5], s[14:15]
+; CHECK-NEXT: v_mov_b64_e32 v[30:31], s[10:11]
+; CHECK-NEXT: v_mov_b64_e32 v[28:29], s[8:9]
+; CHECK-NEXT: v_mov_b64_e32 v[26:27], s[6:7]
+; CHECK-NEXT: v_mov_b64_e32 v[24:25], s[4:5]
+; CHECK-NEXT: v_mov_b32_e32 v1, s2
+; CHECK-NEXT: s_nop 1
+; CHECK-NEXT: v_smfmac_f32_32x32x32_f16 v[8:23], v[2:5], v[24:31], v1 cbsz:1 abid:2
+; CHECK-NEXT: s_nop 11
+; CHECK-NEXT: v_mov_b32_e32 v1, v23
+; CHECK-NEXT: v_mov_b32_e32 v6, v22
+; CHECK-NEXT: v_mov_b32_e32 v7, v21
+; CHECK-NEXT: v_mov_b32_e32 v2, v20
+; CHECK-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3_vgpr4_vgpr5 killed $exec
+; CHECK-NEXT: v_mov_b32_e32 v3, v7
+; CHECK-NEXT: v_mov_b32_e32 v4, v6
+; CHECK-NEXT: v_mov_b32_e32 v5, v1
+; CHECK-NEXT: global_store_dwordx4 v0, v[2:5], s[0:1] offset:48
+; CHECK-NEXT: v_mov_b32_e32 v1, v19
+; CHECK-NEXT: v_mov_b32_e32 v6, v18
+; CHECK-NEXT: v_mov_b32_e32 v7, v17
+; CHECK-NEXT: v_mov_b32_e32 v2, v16
+; CHECK-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3_vgpr4_vgpr5 killed $exec
+; CHECK-NEXT: v_mov_b32_e32 v3, v7
+; CHECK-NEXT: v_mov_b32_e32 v4, v6
+; CHECK-NEXT: v_mov_b32_e32 v5, v1
+; CHECK-NEXT: global_store_dwordx4 v0, v[2:5], s[0:1] offset:32
+; CHECK-NEXT: v_mov_b32_e32 v1, v15
+; CHECK-NEXT: v_mov_b32_e32 v6, v14
+; CHECK-NEXT: v_mov_b32_e32 v7, v13
+; CHECK-NEXT: v_mov_b32_e32 v2, v12
+; CHECK-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3_vgpr4_vgpr5 killed $exec
+; CHECK-NEXT: v_mov_b32_e32 v3, v7
+; CHECK-NEXT: v_mov_b32_e32 v4, v6
+; CHECK-NEXT: v_mov_b32_e32 v5, v1
+; CHECK-NEXT: global_store_dwordx4 v0, v[2:5], s[0:1] offset:16
+; CHECK-NEXT: v_mov_b32_e32 v1, v11
+; CHECK-NEXT: v_mov_b32_e32 v6, v10
+; CHECK-NEXT: v_mov_b32_e32 v7, v9
+; CHECK-NEXT: v_mov_b32_e32 v2, v8
+; CHECK-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3_vgpr4_vgpr5 killed $exec
+; CHECK-NEXT: v_mov_b32_e32 v3, v7
+; CHECK-NEXT: v_mov_b32_e32 v4, v6
+; CHECK-NEXT: v_mov_b32_e32 v5, v1
+; CHECK-NEXT: global_store_dwordx4 v0, v[2:5], s[0:1]
+; CHECK-NEXT: s_endpgm
+bb:
+ %id = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr <16 x float>, ptr addrspace(1) %arg, i32 %id
+ %in.1 = load <16 x float>, ptr addrspace(1) %gep
+ %mai.1 = tail call <16 x float> @llvm.amdgcn.smfmac.f32.32x32x32.f16(<8 x half> %a, <16 x half> %b, <16 x float> %in.1, i32 %idx, i32 1, i32 2)
+ store <16 x float> %mai.1, ptr addrspace(1) %arg
+ ret void
+}
+
+attributes #0 = { "amdgpu-flat-work-group-size"="1,256" "amdgpu-agpr-alloc"="0,0" }
diff --git a/llvm/test/CodeGen/RISCV/rv64zba.ll b/llvm/test/CodeGen/RISCV/rv64zba.ll
index c028d25..7fd7626 100644
--- a/llvm/test/CodeGen/RISCV/rv64zba.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zba.ll
@@ -409,15 +409,11 @@ define i64 @sh3adduw_2(i64 %0, i64 %1) {
;
; RV64ZBA-LABEL: sh3adduw_2:
; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: slli a0, a0, 3
-; RV64ZBA-NEXT: srli a0, a0, 3
; RV64ZBA-NEXT: sh3add.uw a0, a0, a1
; RV64ZBA-NEXT: ret
;
; RV64XANDESPERF-LABEL: sh3adduw_2:
; RV64XANDESPERF: # %bb.0:
-; RV64XANDESPERF-NEXT: slli a0, a0, 3
-; RV64XANDESPERF-NEXT: srli a0, a0, 3
; RV64XANDESPERF-NEXT: nds.lea.d.ze a0, a1, a0
; RV64XANDESPERF-NEXT: ret
%3 = shl i64 %0, 3
@@ -436,15 +432,11 @@ define i64 @sh3adduw_3(i64 %0, i64 %1) {
;
; RV64ZBA-LABEL: sh3adduw_3:
; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: slli a0, a0, 3
-; RV64ZBA-NEXT: srli a0, a0, 3
; RV64ZBA-NEXT: sh3add.uw a0, a0, a1
; RV64ZBA-NEXT: ret
;
; RV64XANDESPERF-LABEL: sh3adduw_3:
; RV64XANDESPERF: # %bb.0:
-; RV64XANDESPERF-NEXT: slli a0, a0, 3
-; RV64XANDESPERF-NEXT: srli a0, a0, 3
; RV64XANDESPERF-NEXT: nds.lea.d.ze a0, a1, a0
; RV64XANDESPERF-NEXT: ret
%3 = shl i64 %0, 3
@@ -2681,7 +2673,7 @@ define i64 @srliw_3_sh3add(ptr %0, i32 signext %1) {
; RV64ZBA-LABEL: srliw_3_sh3add:
; RV64ZBA: # %bb.0:
; RV64ZBA-NEXT: srliw a1, a1, 3
-; RV64ZBA-NEXT: sh3add.uw a0, a1, a0
+; RV64ZBA-NEXT: sh3add a0, a1, a0
; RV64ZBA-NEXT: ld a0, 0(a0)
; RV64ZBA-NEXT: ret
;
diff --git a/llvm/test/TableGen/RuntimeLibcallEmitter-calling-conv.td b/llvm/test/TableGen/RuntimeLibcallEmitter-calling-conv.td
index c224cd6..7ec70b7 100644
--- a/llvm/test/TableGen/RuntimeLibcallEmitter-calling-conv.td
+++ b/llvm/test/TableGen/RuntimeLibcallEmitter-calling-conv.td
@@ -48,47 +48,39 @@ def MSP430LibraryWithCondCC : SystemRuntimeLibrary<isMSP430,
// CHECK-NEXT: Entry = DefaultCC;
// CHECK-NEXT: }
// CHECK-EMPTY:
-// CHECK-NEXT: setLibcallsImpl({
-// CHECK-NEXT: {RTLIB::MALLOC, RTLIB::impl_malloc}, // malloc
-// CHECK-NEXT: });
+// CHECK-NEXT: setLibcallImpl(RTLIB::MALLOC, RTLIB::impl_malloc); // malloc
// CHECK-EMPTY:
-// CHECK-NEXT: setLibcallsImpl({
-// CHECK-NEXT: {RTLIB::SDIVREM_I8, RTLIB::impl___divmodqi4}, // __divmodqi4
-// CHECK-NEXT: {RTLIB::UDIVREM_I16, RTLIB::impl___udivmodhi4}, // __udivmodhi4
-// CHECK-NEXT: }, CallingConv::AVR_BUILTIN);
+// CHECK-NEXT: setLibcallImpl(RTLIB::SDIVREM_I8, RTLIB::impl___divmodqi4); // __divmodqi4
+// CHECK-NEXT: setLibcallImplCallingConv(RTLIB::impl___divmodqi4, CallingConv::AVR_BUILTIN);
+// CHECK-NEXT: setLibcallImpl(RTLIB::UDIVREM_I16, RTLIB::impl___udivmodhi4); // __udivmodhi4
+// CHECK-NEXT: setLibcallImplCallingConv(RTLIB::impl___udivmodhi4, CallingConv::AVR_BUILTIN);
// CHECK-EMPTY:
// CHECK-NEXT: return;
// CHECK-NEXT: }
// CHECK-EMPTY:
// CHECK-NEXT: if (TT.getArch() == Triple::avr) {
-// CHECK-NEXT: setLibcallsImpl({
-// CHECK-NEXT: {RTLIB::MALLOC, RTLIB::impl_malloc}, // malloc
-// CHECK-NEXT: });
+// CHECK-NEXT: setLibcallImpl(RTLIB::MALLOC, RTLIB::impl_malloc); // malloc
// CHECK-EMPTY:
-// CHECK-NEXT: setLibcallsImpl({
-// CHECK-NEXT: {RTLIB::SDIVREM_I8, RTLIB::impl___divmodqi4}, // __divmodqi4
-// CHECK-NEXT: {RTLIB::UDIVREM_I16, RTLIB::impl___udivmodhi4}, // __udivmodhi4
-// CHECK-NEXT: }, CallingConv::AVR_BUILTIN);
+// CHECK-NEXT: setLibcallImpl(RTLIB::SDIVREM_I8, RTLIB::impl___divmodqi4); // __divmodqi4
+// CHECK-NEXT: setLibcallImplCallingConv(RTLIB::impl___divmodqi4, CallingConv::AVR_BUILTIN);
+// CHECK-NEXT: setLibcallImpl(RTLIB::UDIVREM_I16, RTLIB::impl___udivmodhi4); // __udivmodhi4
+// CHECK-NEXT: setLibcallImplCallingConv(RTLIB::impl___udivmodhi4, CallingConv::AVR_BUILTIN);
// CHECK-EMPTY:
// CHECK-NEXT: return;
// CHECK-NEXT: }
// CHECK-EMPTY:
// CHECK-NEXT: if (TT.getArch() == Triple::msp430) {
-// CHECK-NEXT: setLibcallsImpl({
-// CHECK-NEXT: {RTLIB::MALLOC, RTLIB::impl_malloc}, // malloc
-// CHECK-NEXT: });
+// CHECK-NEXT: setLibcallImpl(RTLIB::MALLOC, RTLIB::impl_malloc); // malloc
// CHECK-EMPTY:
// CHECK-NEXT: if ( isFoo() ) {
-// CHECK-NEXT: setLibcallsImpl({
-// CHECK-NEXT: {RTLIB::SDIVREM_I8, RTLIB::impl___divmodqi4}, // __divmodqi4
-// CHECK-NEXT: }, CallingConv::AVR_BUILTIN);
+// CHECK-NEXT: setLibcallImpl(RTLIB::SDIVREM_I8, RTLIB::impl___divmodqi4); // __divmodqi4
+// CHECK-NEXT: setLibcallImplCallingConv(RTLIB::impl___divmodqi4, CallingConv::AVR_BUILTIN);
// CHECK-EMPTY:
// CHECK-NEXT: }
// CHECK-EMPTY:
// CHECK-NEXT: if ( isBar() ) {
-// CHECK-NEXT: setLibcallsImpl({
-// CHECK-NEXT: {RTLIB::UDIVREM_I16, RTLIB::impl___udivmodhi4}, // __udivmodhi4
-// CHECK-NEXT: }, CallingConv::MSP430_BUILTIN);
+// CHECK-NEXT: setLibcallImpl(RTLIB::UDIVREM_I16, RTLIB::impl___udivmodhi4); // __udivmodhi4
+// CHECK-NEXT: setLibcallImplCallingConv(RTLIB::impl___udivmodhi4, CallingConv::MSP430_BUILTIN);
// CHECK-EMPTY:
// CHECK-NEXT: }
// CHECK-EMPTY:
diff --git a/llvm/test/TableGen/RuntimeLibcallEmitter-conflict-warning.td b/llvm/test/TableGen/RuntimeLibcallEmitter-conflict-warning.td
index 8169f56..112c33e 100644
--- a/llvm/test/TableGen/RuntimeLibcallEmitter-conflict-warning.td
+++ b/llvm/test/TableGen/RuntimeLibcallEmitter-conflict-warning.td
@@ -25,9 +25,7 @@ def dup1 : RuntimeLibcallImpl<ANOTHER_DUP>;
// func_a and func_b both provide SOME_FUNC.
// CHECK: if (isTargetArchA()) {
-// CHECK-NEXT: setLibcallsImpl({
-// CHECK-NEXT: {RTLIB::SOME_FUNC, RTLIB::impl_func_b}, // func_b
-// CHECK-NEXT: });
+// CHECK-NEXT: setLibcallImpl(RTLIB::SOME_FUNC, RTLIB::impl_func_b); // func_b
// ERR: :[[@LINE+1]]:5: warning: conflicting implementations for libcall SOME_FUNC: func_b, func_a
def TheSystemLibraryA : SystemRuntimeLibrary<isTargetArchA,
@@ -35,10 +33,8 @@ def TheSystemLibraryA : SystemRuntimeLibrary<isTargetArchA,
>;
// CHECK: if (isTargetArchB()) {
-// CHECK-NEXT: setLibcallsImpl({
-// CHECK-NEXT: {RTLIB::OTHER_FUNC, RTLIB::impl_other_func}, // other_func
-// CHECK-NEXT: {RTLIB::SOME_FUNC, RTLIB::impl_func_a}, // func_a
-// CHECK-NEXT: });
+// CHECK-NEXT: setLibcallImpl(RTLIB::OTHER_FUNC, RTLIB::impl_other_func); // other_func
+// CHECK-NEXT: setLibcallImpl(RTLIB::SOME_FUNC, RTLIB::impl_func_a); // func_a
// ERR: :[[@LINE+1]]:5: warning: conflicting implementations for libcall SOME_FUNC: func_a, func_b
def TheSystemLibraryB : SystemRuntimeLibrary<isTargetArchB,
@@ -46,11 +42,9 @@ def TheSystemLibraryB : SystemRuntimeLibrary<isTargetArchB,
>;
// CHECK: if (isTargetArchC()) {
-// CHECK-NEXT: setLibcallsImpl({
-// CHECK-NEXT: {RTLIB::ANOTHER_DUP, RTLIB::impl_dup1}, // dup1
-// CHECK-NEXT: {RTLIB::OTHER_FUNC, RTLIB::impl_other_func}, // other_func
-// CHECK-NEXT: {RTLIB::SOME_FUNC, RTLIB::impl_func_a}, // func_a
-// CHECK-NEXT: });
+// CHECK-NEXT: setLibcallImpl(RTLIB::ANOTHER_DUP, RTLIB::impl_dup1); // dup1
+// CHECK-NEXT: setLibcallImpl(RTLIB::OTHER_FUNC, RTLIB::impl_other_func); // other_func
+// CHECK-NEXT: setLibcallImpl(RTLIB::SOME_FUNC, RTLIB::impl_func_a); // func_a
// ERR: :[[@LINE+3]]:5: warning: conflicting implementations for libcall ANOTHER_DUP: dup1, dup0
// ERR: :[[@LINE+2]]:5: warning: conflicting implementations for libcall SOME_FUNC: func_a, func_b
diff --git a/llvm/test/TableGen/RuntimeLibcallEmitter.td b/llvm/test/TableGen/RuntimeLibcallEmitter.td
index 78705e2..f4577f8 100644
--- a/llvm/test/TableGen/RuntimeLibcallEmitter.td
+++ b/llvm/test/TableGen/RuntimeLibcallEmitter.td
@@ -190,40 +190,20 @@ def BlahLibrary : SystemRuntimeLibrary<isBlahArch, (add calloc, LibraryWithCondi
// CHECK-NEXT: }
// CHECK: void llvm::RTLIB::RuntimeLibcallsInfo::setTargetRuntimeLibcallSets(const llvm::Triple &TT, ExceptionHandling ExceptionModel, FloatABI::ABIType FloatABI, EABI EABIVersion, StringRef ABIName) {
-// CHECK-NEXT: struct LibcallImplPair {
-// CHECK-NEXT: RTLIB::Libcall Func;
-// CHECK-NEXT: RTLIB::LibcallImpl Impl;
-// CHECK-NEXT: };
-// CHECK-NEXT: auto setLibcallsImpl = [this](
-// CHECK-NEXT: ArrayRef<LibcallImplPair> Libcalls,
-// CHECK-NEXT: std::optional<llvm::CallingConv::ID> CC = {})
-// CHECK-NEXT: {
-// CHECK-NEXT: for (const auto [Func, Impl] : Libcalls) {
-// CHECK-NEXT: setLibcallImpl(Func, Impl);
-// CHECK-NEXT: if (CC)
-// CHECK-NEXT: setLibcallImplCallingConv(Impl, *CC);
-// CHECK-NEXT: }
-// CHECK-NEXT: };
// CHECK-EMPTY:
// CHECK-NEXT: if (TT.getArch() == Triple::blah) {
-// CHECK-NEXT: setLibcallsImpl({
-// CHECK-NEXT: {RTLIB::BZERO, RTLIB::impl_bzero}, // bzero
-// CHECK-NEXT: {RTLIB::CALLOC, RTLIB::impl_calloc}, // calloc
-// CHECK-NEXT: {RTLIB::SQRT_F128, RTLIB::impl_sqrtl_f128}, // sqrtl
-// CHECK-NEXT: });
+// CHECK-NEXT: setLibcallImpl(RTLIB::BZERO, RTLIB::impl_bzero); // bzero
+// CHECK-NEXT: setLibcallImpl(RTLIB::CALLOC, RTLIB::impl_calloc); // calloc
+// CHECK-NEXT: setLibcallImpl(RTLIB::SQRT_F128, RTLIB::impl_sqrtl_f128); // sqrtl
// CHECK-EMPTY:
// CHECK-NEXT: if (TT.hasCompilerRT()) {
-// CHECK-NEXT: setLibcallsImpl({
-// CHECK-NEXT: {RTLIB::SHL_I32, RTLIB::impl___ashlsi3}, // __ashlsi3
-// CHECK-NEXT: {RTLIB::SRL_I64, RTLIB::impl___lshrdi3}, // __lshrdi3
-// CHECK-NEXT: });
+// CHECK-NEXT: setLibcallImpl(RTLIB::SHL_I32, RTLIB::impl___ashlsi3); // __ashlsi3
+// CHECK-NEXT: setLibcallImpl(RTLIB::SRL_I64, RTLIB::impl___lshrdi3); // __lshrdi3
// CHECK-EMPTY:
// CHECK-NEXT: }
// CHECK-EMPTY:
// CHECK-NEXT: if (TT.getOS() == Triple::bar) {
-// CHECK-NEXT: setLibcallsImpl({
-// CHECK-NEXT: {RTLIB::MEMSET, RTLIB::impl____memset}, // ___memset
-// CHECK-NEXT: });
+// CHECK-NEXT: setLibcallImpl(RTLIB::MEMSET, RTLIB::impl____memset); // ___memset
// CHECK-EMPTY:
// CHECK-NEXT: }
// CHECK-EMPTY:
@@ -231,25 +211,19 @@ def BlahLibrary : SystemRuntimeLibrary<isBlahArch, (add calloc, LibraryWithCondi
// CHECK-NEXT: }
// CHECK-EMPTY:
// CHECK-NEXT: if (TT.getArch() == Triple::buzz) {
-// CHECK-NEXT: setLibcallsImpl({
-// CHECK-NEXT: {RTLIB::SHL_I32, RTLIB::impl___ashlsi3}, // __ashlsi3
-// CHECK-NEXT: {RTLIB::SQRT_F80, RTLIB::impl_sqrtl_f80}, // sqrtl
-// CHECK-NEXT: {RTLIB::SRL_I64, RTLIB::impl___lshrdi3}, // __lshrdi3
-// CHECK-NEXT: });
+// CHECK-NEXT: setLibcallImpl(RTLIB::SHL_I32, RTLIB::impl___ashlsi3); // __ashlsi3
+// CHECK-NEXT: setLibcallImpl(RTLIB::SQRT_F80, RTLIB::impl_sqrtl_f80); // sqrtl
+// CHECK-NEXT: setLibcallImpl(RTLIB::SRL_I64, RTLIB::impl___lshrdi3); // __lshrdi3
// CHECK-EMPTY:
// CHECK-NEXT: return;
// CHECK-NEXT: }
// CHECK-EMPTY:
// CHECK-NEXT: if (TT.getArch() == Triple::foo) {
-// CHECK-NEXT: setLibcallsImpl({
-// CHECK-NEXT: {RTLIB::BZERO, RTLIB::impl_bzero}, // bzero
-// CHECK-NEXT: {RTLIB::SQRT_F128, RTLIB::impl_sqrtl_f128}, // sqrtl
-// CHECK-NEXT: });
+// CHECK-NEXT: setLibcallImpl(RTLIB::BZERO, RTLIB::impl_bzero); // bzero
+// CHECK-NEXT: setLibcallImpl(RTLIB::SQRT_F128, RTLIB::impl_sqrtl_f128); // sqrtl
// CHECK-EMPTY:
// CHECK-NEXT: if (TT.getOS() == Triple::bar) {
-// CHECK-NEXT: setLibcallsImpl({
-// CHECK-NEXT: {RTLIB::MEMSET, RTLIB::impl____memset}, // ___memset
-// CHECK-NEXT: });
+// CHECK-NEXT: setLibcallImpl(RTLIB::MEMSET, RTLIB::impl____memset); // ___memset
// CHECK-EMPTY:
// CHECK-NEXT: }
// CHECK-EMPTY:
@@ -257,12 +231,10 @@ def BlahLibrary : SystemRuntimeLibrary<isBlahArch, (add calloc, LibraryWithCondi
// CHECK-NEXT: }
// CHECK-EMPTY:
// CHECK-NEXT: if (TT.getArch() == Triple::simple) {
-// CHECK-NEXT: setLibcallsImpl({
-// CHECK-NEXT: {RTLIB::CALLOC, RTLIB::impl_calloc}, // calloc
-// CHECK-NEXT: {RTLIB::SHL_I32, RTLIB::impl___ashlsi3}, // __ashlsi3
-// CHECK-NEXT: {RTLIB::SQRT_F80, RTLIB::impl_sqrtl_f80}, // sqrtl
-// CHECK-NEXT: {RTLIB::SRL_I64, RTLIB::impl___lshrdi3}, // __lshrdi3
-// CHECK-NEXT: });
+// CHECK-NEXT: setLibcallImpl(RTLIB::CALLOC, RTLIB::impl_calloc); // calloc
+// CHECK-NEXT: setLibcallImpl(RTLIB::SHL_I32, RTLIB::impl___ashlsi3); // __ashlsi3
+// CHECK-NEXT: setLibcallImpl(RTLIB::SQRT_F80, RTLIB::impl_sqrtl_f80); // sqrtl
+// CHECK-NEXT: setLibcallImpl(RTLIB::SRL_I64, RTLIB::impl___lshrdi3); // __lshrdi3
// CHECK-EMPTY:
// CHECK-NEXT: return;
// CHECK-NEXT: }
diff --git a/llvm/test/tools/llvm-reduce/inline-call-sites-cost.ll b/llvm/test/tools/llvm-reduce/inline-call-sites-cost.ll
new file mode 100644
index 0000000..fc25ca4
--- /dev/null
+++ b/llvm/test/tools/llvm-reduce/inline-call-sites-cost.ll
@@ -0,0 +1,95 @@
+; RUN: llvm-reduce --abort-on-invalid-reduction --delta-passes=inline-call-sites -reduce-callsite-inline-threshold=3 --test FileCheck --test-arg --check-prefix=CHECK --test-arg %s --test-arg --input-file %s -o %t
+; RUN: FileCheck -check-prefixes=RESULT,CHECK %s < %t
+
+declare void @extern_b()
+declare void @extern_a()
+
+; RESULT: @gv_init = global ptr @no_inline_noncall_user
+@gv_init = global ptr @no_inline_noncall_user
+
+
+; CHECK-LABEL: define void @no_inline_noncall_user(
+define void @no_inline_noncall_user() {
+ call void @extern_a()
+ call void @extern_a()
+ call void @extern_a()
+ call void @extern_a()
+ ret void
+}
+
+; RESULT-LABEL: define void @noncall_user_call() {
+; RESULT-NEXT: call void @no_inline_noncall_user()
+; RESULT-NEXT: ret void
+define void @noncall_user_call() {
+ call void @no_inline_noncall_user()
+ ret void
+}
+
+; RESULT-LABEL: define void @big_callee_small_caller_callee() {
+define void @big_callee_small_caller_callee() {
+ call void @extern_a()
+ call void @extern_a()
+ call void @extern_a()
+ call void @extern_a()
+ ret void
+}
+
+; RESULT-LABEL: define void @big_callee_small_caller_caller() {
+; RESULT-NEXT: call void @extern_b()
+; RESULT-NEXT: call void @extern_a()
+; RESULT-NEXT: call void @extern_a()
+; RESULT-NEXT: call void @extern_a()
+; RESULT-NEXT: call void @extern_a()
+; RESULT-NEXT: ret void
+define void @big_callee_small_caller_caller() {
+ call void @extern_b()
+ call void @big_callee_small_caller_callee()
+ ret void
+}
+
+; RESULT-LABEL: define void @small_callee_big_caller_callee() {
+; RESULT-NEXT: call void @extern_a()
+; RESULT-NEXT: ret void
+define void @small_callee_big_caller_callee() {
+ call void @extern_a()
+ ret void
+}
+
+; RESULT-LABEL: define void @small_callee_big_caller_caller() {
+; RESULT-NEXT: call void @extern_b()
+; RESULT-NEXT: call void @extern_a()
+; RESULT-NEXT: call void @extern_b()
+; RESULT-NEXT: call void @extern_b()
+; RESULT-NEXT: ret void
+define void @small_callee_big_caller_caller() {
+ call void @extern_b()
+ call void @small_callee_big_caller_callee()
+ call void @extern_b()
+ call void @extern_b()
+ ret void
+}
+
+; RESULT-LABEL: define void @big_callee_big_caller_callee() {
+define void @big_callee_big_caller_callee() {
+ call void @extern_a()
+ call void @extern_a()
+ call void @extern_a()
+ call void @extern_a()
+ ret void
+}
+
+; RESULT-LABEL: define void @big_callee_big_caller_caller() {
+; RESULT-NEXT: call void @extern_b()
+; RESULT-NEXT: call void @big_callee_big_caller_callee()
+; RESULT-NEXT: call void @extern_b()
+; RESULT-NEXT: call void @extern_b()
+; RESULT-NEXT: call void @extern_b()
+; RESULT-NEXT: ret void
+define void @big_callee_big_caller_caller() {
+ call void @extern_b()
+ call void @big_callee_big_caller_callee()
+ call void @extern_b()
+ call void @extern_b()
+ call void @extern_b()
+ ret void
+}
diff --git a/llvm/test/tools/llvm-reduce/inline-call-sites.ll b/llvm/test/tools/llvm-reduce/inline-call-sites.ll
new file mode 100644
index 0000000..34775d9
--- /dev/null
+++ b/llvm/test/tools/llvm-reduce/inline-call-sites.ll
@@ -0,0 +1,765 @@
+; RUN: llvm-reduce --abort-on-invalid-reduction --delta-passes=inline-call-sites -reduce-callsite-inline-threshold=-1 --test FileCheck --test-arg --check-prefixes=CHECK,INTERESTING --test-arg %s --test-arg --input-file %s -o %t
+; RUN: FileCheck -check-prefixes=RESULT,CHECK %s < %t
+
+; RESULT: @gv = global [2 x ptr] [ptr @only_gv_user, ptr @simple_callee]
+@gv = global [2 x ptr] [ptr @only_gv_user, ptr @simple_callee]
+
+; RESULT: @indirectbr.L = internal unnamed_addr constant [3 x ptr] [ptr blockaddress(@callee_with_indirectbr, %L1), ptr blockaddress(@callee_with_indirectbr, %L2), ptr null], align 8
+@indirectbr.L = internal unnamed_addr constant [3 x ptr] [ptr blockaddress(@callee_with_indirectbr, %L1), ptr blockaddress(@callee_with_indirectbr, %L2), ptr null], align 8
+
+
+; CHECK-LABEL: define void @simple_callee(
+; RESULT-NEXT: store i32 123, ptr %arg, align 4
+; RESULT-NEXT: ret void
+define void @simple_callee(ptr %arg) {
+ store i32 123, ptr %arg
+ ret void
+}
+
+; CHECK-LABEL: define void @simple_caller(
+; RESULT-NEXT: store i32 123, ptr %outer.arg, align 4
+; RESULT-NEXT: ret void
+define void @simple_caller(ptr %outer.arg) {
+ call void @simple_callee(ptr %outer.arg)
+ ret void
+}
+
+; CHECK-LABEL: define void @multi_simple_caller(
+; RESULT-NEXT: store i32 123, ptr %outer.arg, align 4
+; RESULT-NEXT: store i32 123, ptr %outer.arg, align 4
+; RESULT-NEXT: store i32 123, ptr null, align 4
+; RESULT-NEXT: ret void
+define void @multi_simple_caller(ptr %outer.arg) {
+ call void @simple_callee(ptr %outer.arg)
+ call void @simple_callee(ptr %outer.arg)
+ call void @simple_callee(ptr null)
+ ret void
+}
+
+; CHECK-LABEL: define void @only_gv_user(
+; RESULT-NEXT: store i32 666, ptr %arg, align 4
+; RESULT-NEXT: ret void
+define void @only_gv_user(ptr %arg) {
+ store i32 666, ptr %arg
+ ret void
+}
+
+; CHECK-LABEL: define void @recursive(
+; RESULT-NEXT: call void @recursive(ptr %arg)
+; RESULT-NEXT: ret void
+define void @recursive(ptr %arg) {
+ call void @recursive(ptr %arg)
+ ret void
+}
+
+; CHECK-LABEL: define void @recursive_with_wrong_callsite_type(
+; RESULT-NEXT: call void @recursive_with_wrong_callsite_type(ptr %arg, i32 2)
+; RESULT-NEXT: ret void
+define void @recursive_with_wrong_callsite_type(ptr %arg) {
+ call void @recursive_with_wrong_callsite_type(ptr %arg, i32 2)
+ ret void
+}
+
+; CHECK-LABEL: define void @non_callee_use(
+; RESULT-NEXT: store i32 567, ptr %arg, align 4
+; RESULT-NEXT: ret void
+define void @non_callee_use(ptr %arg) {
+ store i32 567, ptr %arg
+ ret void
+}
+
+declare void @extern_ptr_use(ptr)
+
+; CHECK-LABEL: define void @non_callee_user(
+; RESULT-NEXT: call void @extern_ptr_use(ptr @non_callee_use)
+; RESULT-NEXT: ret void
+define void @non_callee_user() {
+ call void @extern_ptr_use(ptr @non_callee_use)
+ ret void
+}
+
+; CHECK-LABEL: define void @non_call_inst_use(
+define void @non_call_inst_use(ptr %arg) {
+ store i32 999, ptr %arg
+ ret void
+}
+
+; CHECK-LABEL: define void @non_call_inst_user(
+; RESULT-NEXT: store ptr @non_call_inst_use, ptr %arg, align 8
+; RESULT-NEXT: ret void
+define void @non_call_inst_user(ptr %arg) {
+ store ptr @non_call_inst_use, ptr %arg
+ ret void
+}
+
+; CHECK-LABEL: define i32 @used_wrong_call_type(
+; RESULT-NEXT: store i32 123, ptr %arg, align 4
+; RESULT-NEXT: ret i32 8
+define i32 @used_wrong_call_type(ptr %arg) {
+ store i32 123, ptr %arg
+ ret i32 8
+}
+
+; Inlining doesn't support the UB cases
+; CHECK-LABEL: define void @use_wrong_call_type(
+; RESULT-NEXT: call void @used_wrong_call_type(ptr %outer.arg)
+; RESULT-NEXT: ret void
+define void @use_wrong_call_type(ptr %outer.arg) {
+ call void @used_wrong_call_type(ptr %outer.arg)
+ ret void
+}
+
+; INTERESTING-LABEL: define void @incompatible_gc_callee(
+
+; RESULT-LABEL: define void @incompatible_gc_callee(ptr %arg) gc "gc0" {
+; RESULT-NEXT: store i32 10000, ptr %arg, align 4
+; RESULT-NEXT: ret void
+define void @incompatible_gc_callee(ptr %arg) gc "gc0" {
+ store i32 10000, ptr %arg
+ ret void
+}
+
+; INTERESTING-LABEL: define void @incompatible_gc_caller(
+
+; RESULT-LABEL: define void @incompatible_gc_caller(ptr %outer.arg) gc "gc1" {
+; RESULT-NEXT: call void @incompatible_gc_callee(ptr %outer.arg)
+; RESULT-NEXT: ret void
+define void @incompatible_gc_caller(ptr %outer.arg) gc "gc1" {
+ call void @incompatible_gc_callee(ptr %outer.arg)
+ ret void
+}
+
+; INTERESTING-LABEL: define void @propagate_callee_gc(
+
+; RESULT-LABEL: define void @propagate_callee_gc(ptr %arg) gc "propagate-gc" {
+; RESULT-NEXT: store i32 10000, ptr %arg, align 4
+; RESULT-NEXT: ret void
+define void @propagate_callee_gc(ptr %arg) gc "propagate-gc" {
+ store i32 10000, ptr %arg
+ ret void
+}
+
+; INTERESTING-LABEL: define void @propagate_caller_gc(
+
+; RESULT-LABEL: define void @propagate_caller_gc(ptr %arg) gc "propagate-gc" {
+; RESULT-NEXT: store i32 10000, ptr %arg, align 4
+; RESULT-NEXT: ret void
+define void @propagate_caller_gc(ptr %arg) {
+ call void @propagate_callee_gc(ptr %arg)
+ ret void
+}
+
+declare i32 @__gxx_personality_v0(...)
+
+; INTERESTING-LABEL: define void @propagate_callee_personality(
+
+; RESULT-LABEL: define void @propagate_callee_personality(ptr %arg) personality ptr @__gxx_personality_v0 {
+; RESULT-NEXT: store i32 2000, ptr %arg, align 4
+; RESULT-NEXT: ret void
+define void @propagate_callee_personality(ptr %arg) personality ptr @__gxx_personality_v0 {
+ store i32 2000, ptr %arg
+ ret void
+}
+
+; INTERESTING-LABEL: define void @propagate_caller_personality(
+
+; RESULT-LABEL: define void @propagate_caller_personality(ptr %arg) personality ptr @__gxx_personality_v0 {
+; RESULT-NEXT: store i32 2000, ptr %arg, align 4
+; RESULT-NEXT: ret void
+define void @propagate_caller_personality(ptr %arg) {
+ call void @propagate_callee_personality(ptr %arg)
+ ret void
+}
+
+; CHECK-LABEL: define void @callee_with_indirectbr(
+define void @callee_with_indirectbr() {
+entry:
+ br label %L1
+
+L1: ; preds = %entry, %L1
+ %i = phi i32 [ 0, %entry ], [ %inc, %L1 ]
+ %inc = add i32 %i, 1
+ %idxprom = zext i32 %i to i64
+ %arrayidx = getelementptr inbounds [3 x ptr], ptr @indirectbr.L, i64 0, i64 %idxprom
+ %brtarget = load ptr, ptr %arrayidx, align 8
+ indirectbr ptr %brtarget, [label %L1, label %L2]
+
+L2: ; preds = %L1
+ ret void
+}
+
+; CHECK-LABEL: define void @calls_func_with_indirectbr(
+
+; RESULT: L1.i:
+; RESULT-NEXT: %i.i = phi i32 [ 0, %call ], [ %inc.i, %L1.i ]
+; RESULT-NEXT: %inc.i = add i32 %i.i, 1
+; RESULT-NEXT: %idxprom.i = zext i32 %i.i to i64
+; RESULT-NEXT: %arrayidx.i = getelementptr inbounds [3 x ptr], ptr @indirectbr.L, i64 0, i64 %idxprom.i
+; RESULT-NEXT: %brtarget.i = load ptr, ptr %arrayidx.i, align 8
+; RESULT-NEXT: indirectbr ptr %brtarget.i, [label %L1.i, label %callee_with_indirectbr.exit]
+
+define void @calls_func_with_indirectbr(i1 %arg0) {
+entry:
+ br i1 %arg0, label %call, label %ret
+
+call:
+ call void @callee_with_indirectbr()
+ br label %ret
+
+ret:
+ ret void
+}
+
+
+; CHECK-LABEL: define ptr @callee_with_blockaddress_use(
+; RESULT: L2:
+; RESULT-NEXT: store ptr blockaddress(@callee_with_blockaddress_use, %L1), ptr %alloca, align 8
+; RESULT-NEXT: store ptr blockaddress(@callee_with_blockaddress_use, %L2), ptr %alloca, align 8
+; RESULT-NEXT: store ptr blockaddress(@callee_with_blockaddress_use, %L3), ptr %alloca, align 8
+; RESULT-NEXT: %cond1 = load volatile i1, ptr addrspace(1) null
+; RESULT-NEXT: br i1 %cond1, label %L1, label %L3
+define ptr @callee_with_blockaddress_use() {
+entry:
+ %alloca = alloca ptr
+ %cond0 = load volatile i1, ptr addrspace(1) null
+ br i1 %cond0, label %L1, label %L2
+
+L1:
+ br label %L2
+
+L2:
+ ; reference an earlier block
+ store ptr blockaddress(@callee_with_blockaddress_use, %L1), ptr %alloca
+
+ ; reference the block itself from the block
+ store ptr blockaddress(@callee_with_blockaddress_use, %L2), ptr %alloca
+
+ ; reference a later block
+ store ptr blockaddress(@callee_with_blockaddress_use, %L3), ptr %alloca
+
+ %cond1 = load volatile i1, ptr addrspace(1) null
+ br i1 %cond1, label %L1, label %L3
+
+L3:
+ %load = load ptr, ptr %alloca
+ ret ptr %load
+}
+
+; FIXME: This is not correctly remapping the blockaddress use
+; CHECK-LABEL: define void @calls_func_with_blockaddress_use(
+; RESULT: entry:
+; RESULT-NEXT: %alloca.i = alloca ptr, align 8
+; RESULT-NEXT: store i32 1000, ptr null, align 4
+; RESULT-NEXT: br i1 %arg0, label %call, label %ret
+
+; RESULT: call:
+; RESULT-NEXT: store i32 2000, ptr null, align 4
+; RESULT-NEXT: call void @llvm.lifetime.start.p0(ptr %alloca.i)
+; RESULT-NEXT: %cond0.i = load volatile i1, ptr addrspace(1) null, align 1
+; RESULT-NEXT: br i1 %cond0.i, label %L1.i, label %L2.i
+
+; RESULT: L1.i: ; preds = %L2.i, %call
+; RESULT-NEXT: br label %L2.i
+
+; RESULT: L2.i: ; preds = %L1.i, %call
+; RESULT-NEXT: store ptr blockaddress(@callee_with_blockaddress_use, %L1), ptr %alloca.i, align 8
+; RESULT-NEXT: store ptr blockaddress(@calls_func_with_blockaddress_use, %L2.i), ptr %alloca.i, align 8
+; RESULT-NEXT: store ptr blockaddress(@callee_with_blockaddress_use, %L3), ptr %alloca.i, align 8
+; RESULT-NEXT: %cond1.i = load volatile i1, ptr addrspace(1) null, align 1
+; RESULT-NEXT: br i1 %cond1.i, label %L1.i, label %callee_with_blockaddress_use.exit
+
+; RESULT: callee_with_blockaddress_use.exit: ; preds = %L2.i
+; RESULT-NEXT: %load.i = load ptr, ptr %alloca.i, align 8
+; RESULT-NEXT: call void @llvm.lifetime.end.p0(ptr %alloca.i)
+; RESULT-NEXT: store i32 3000, ptr null, align 4
+; RESULT-NEXT: br label %ret
+
+; RESULT: ret: ; preds = %callee_with_blockaddress_use.exit, %entry
+; RESULT-NEXT: store i32 4000, ptr null, align 4
+; RESULT-NEXT: ret void
+define void @calls_func_with_blockaddress_use(i1 %arg0) {
+entry:
+ store i32 1000, ptr null
+ br i1 %arg0, label %call, label %ret
+
+call:
+ store i32 2000, ptr null
+ call ptr @callee_with_blockaddress_use()
+ store i32 3000, ptr null
+ br label %ret
+
+ret:
+ store i32 4000, ptr null
+ ret void
+}
+
+; CHECK-LABEL: define void @callee_with_fallthrough_blockaddress_use(
+; RESULT: L2:
+; RESULT-NEXT: store ptr blockaddress(@callee_with_fallthrough_blockaddress_use, %L1), ptr %alloca, align 8
+; RESULT-NEXT: store ptr blockaddress(@callee_with_fallthrough_blockaddress_use, %L2), ptr %alloca, align 8
+; RESULT-NEXT: store ptr blockaddress(@callee_with_fallthrough_blockaddress_use, %L3), ptr %alloca, align 8
+; RESULT-NEXT: br label %L3
+define void @callee_with_fallthrough_blockaddress_use() {
+entry:
+ %alloca = alloca ptr
+ br label %L1
+
+L1:
+ store i32 999, ptr null
+ br label %L2
+
+L2: ; preds = %entry, %L1
+ ; reference a block before this block
+ store ptr blockaddress(@callee_with_fallthrough_blockaddress_use, %L1), ptr %alloca
+
+ ; reference the block itself from the block
+ store ptr blockaddress(@callee_with_fallthrough_blockaddress_use, %L2), ptr %alloca
+
+ ; reference a block after this block
+ store ptr blockaddress(@callee_with_fallthrough_blockaddress_use, %L3), ptr %alloca
+ br label %L3
+
+L3: ; preds = %L1
+ %load = load ptr, ptr %alloca
+ ret void
+}
+
+
+; CHECK-LABEL: define void @calls_func_with_fallthrough_blockaddress_use(
+; RESULT: entry:
+; RESULT-NEXT: %alloca.i = alloca ptr, align 8
+; RESULT-NEXT: store i32 1000, ptr null
+; RESULT-NEXT: br i1 %arg0, label %call, label %ret
+
+; RESULT: call:
+; RESULT-NEXT: store i32 2000, ptr null, align 4
+; RESULT-NEXT: call void @llvm.lifetime.start.p0(ptr %alloca.i)
+; RESULT-NEXT: br label %L1.i
+
+; RESULT: L1.i: ; preds = %call
+; RESULT-NEXT: store i32 999, ptr null, align 4
+; RESULT-NEXT: br label %L2.i
+
+; RESULT: L2.i:
+; RESULT-NEXT: store ptr blockaddress(@calls_func_with_fallthrough_blockaddress_use, %L1.i), ptr %alloca.i, align 8
+; RESULT-NEXT: store ptr blockaddress(@calls_func_with_fallthrough_blockaddress_use, %L2.i), ptr %alloca.i, align 8
+; RESULT-NEXT: store ptr blockaddress(@callee_with_fallthrough_blockaddress_use, %L3), ptr %alloca.i, align 8
+; RESULT-NEXT: br label %callee_with_fallthrough_blockaddress_use.exit
+
+; RESULT: callee_with_fallthrough_blockaddress_use.exit: ; preds = %L2.i
+; RESULT-NEXT: %load.i = load ptr, ptr %alloca.i, align 8
+; RESULT-NEXT: call void @llvm.lifetime.end.p0(ptr %alloca.i)
+; RESULT-NEXT: store i32 3000, ptr null, align 4
+; RESULT-NEXT: br label %ret
+
+; RESULT: ret:
+; RESULT-NEXT: store i32 4000, ptr null, align 4
+; RESULT-NEXT: ret void
+define void @calls_func_with_fallthrough_blockaddress_use(i1 %arg0) {
+entry:
+ store i32 1000, ptr null
+ br i1 %arg0, label %call, label %ret
+
+call:
+ store i32 2000, ptr null
+ call void @callee_with_fallthrough_blockaddress_use()
+ store i32 3000, ptr null
+ br label %ret
+
+ret:
+ store i32 4000, ptr null
+ ret void
+}
+
+declare i32 @extern_returns_twice() returns_twice
+
+; CHECK-LABEL: define i32 @callee_returns_twice(
+; RESULT-NEXT: %call = call i32 @extern_returns_twice()
+; RESULT-NEXT: %add = add nsw i32 1, %call
+; RESULT-NEXT: ret i32 %add
+define i32 @callee_returns_twice() {
+ %call = call i32 @extern_returns_twice()
+ %add = add nsw i32 1, %call
+ ret i32 %add
+}
+
+; CHECK-LABEL: define i32 @caller_returns_twice_calls_callee_returns_twice(
+; RESULT-NEXT: %call.i = call i32 @extern_returns_twice()
+; RESULT-NEXT: %add.i = add nsw i32 1, %call.i
+; RESULT-NEXT: %add = add nsw i32 1, %add.i
+; RESULT-NEXT: ret i32 %add
+ define i32 @caller_returns_twice_calls_callee_returns_twice() returns_twice {
+ %call = call i32 @callee_returns_twice()
+ %add = add nsw i32 1, %call
+ ret i32 %add
+}
+
+; Inliner usually blocks inlining of returns_twice functions into
+; non-returns_twice functions
+; CHECK-LABEL: define i32 @regular_caller_calls_callee_returns_twice() {
+; RESULT-NEXT: %call.i = call i32 @extern_returns_twice()
+; RESULT-NEXT: %add.i = add nsw i32 1, %call.i
+; RESULT-NEXT: %add = add nsw i32 1, %add.i
+; RESULT-NEXT: ret i32 %add
+define i32 @regular_caller_calls_callee_returns_twice() {
+ %call = call i32 @callee_returns_twice()
+ %add = add nsw i32 1, %call
+ ret i32 %add
+}
+
+; CHECK-LABEL: define void @caller_with_vastart(
+; RESULT-NEXT: %ap = alloca ptr, align 4
+; RESULT-NEXT: %ap2 = alloca ptr, align 4
+; RESULT-NEXT: call void @llvm.va_start.p0(ptr nonnull %ap)
+; RESULT-NEXT: call void @llvm.va_end.p0(ptr nonnull %ap)
+; RESULT-NEXT: call void @llvm.va_start.p0(ptr nonnull %ap)
+; RESULT-NEXT: call void @llvm.va_end.p0(ptr nonnull %ap)
+; RESULT-NEXT: ret void
+define void @caller_with_vastart(ptr noalias nocapture readnone %args, ...) {
+ %ap = alloca ptr, align 4
+ %ap2 = alloca ptr, align 4
+ call void @llvm.va_start.p0(ptr nonnull %ap)
+ call fastcc void @callee_with_vaend(ptr nonnull %ap)
+ call void @llvm.va_start.p0(ptr nonnull %ap)
+ call fastcc void @callee_with_vaend_alwaysinline(ptr nonnull %ap)
+ ret void
+}
+
+; CHECK-LABEL: define fastcc void @callee_with_vaend(
+; RESULT-NEXT: tail call void @llvm.va_end.p0(ptr %a)
+; RESULT-NEXT: ret void
+define fastcc void @callee_with_vaend(ptr %a) {
+ tail call void @llvm.va_end.p0(ptr %a)
+ ret void
+}
+
+; CHECK-LABEL: define internal fastcc void @callee_with_vaend_alwaysinline(
+; RESULT-NEXT: tail call void @llvm.va_end.p0(ptr %a)
+; RESULT-NEXT: ret void
+define internal fastcc void @callee_with_vaend_alwaysinline(ptr %a) alwaysinline {
+ tail call void @llvm.va_end.p0(ptr %a)
+ ret void
+}
+
+; CHECK-LABEL: define i32 @callee_with_va_start(
+define i32 @callee_with_va_start(ptr %a, ...) {
+ %vargs = alloca ptr, align 8
+ tail call void @llvm.va_start.p0(ptr %a)
+ %va1 = va_arg ptr %vargs, i32
+ call void @llvm.va_end(ptr %vargs)
+ ret i32 %va1
+}
+
+; CHECK-LABEL: define i32 @callee_vastart_caller(
+; RESULT-NEXT: %vargs.i = alloca ptr, align 8
+; RESULT-NEXT: %ap = alloca ptr, align 4
+; RESULT-NEXT: %b = load i32, ptr null, align 4
+; RESULT-NEXT: call void @llvm.lifetime.start.p0(ptr %vargs.i)
+; RESULT-NEXT: call void @llvm.va_start.p0(ptr nonnull %ap)
+; RESULT-NEXT: %va1.i = va_arg ptr %vargs.i, i32
+; RESULT-NEXT: call void @llvm.va_end.p0(ptr %vargs.i)
+; RESULT-NEXT: call void @llvm.lifetime.end.p0(ptr %vargs.i)
+; RESULT-NEXT: ret i32 %va1.i
+define i32 @callee_vastart_caller(ptr noalias nocapture readnone %args, ...) {
+ %ap = alloca ptr, align 4
+ %b = load i32, ptr null
+ %result = call i32 (ptr, ...) @callee_with_va_start(ptr nonnull %ap, i32 %b)
+ ret i32 %result
+}
+
+declare void @llvm.localescape(...)
+
+; CHECK-LABEL: define internal void @callee_uses_localrecover(
+define internal void @callee_uses_localrecover(ptr %fp) {
+ %a.i8 = call ptr @llvm.localrecover(ptr @callee_uses_localescape, ptr %fp, i32 0)
+ store i32 42, ptr %a.i8
+ ret void
+}
+
+; CHECK-LABEL: define i32 @callee_uses_localescape(
+; RESULT-NEXT: %a = alloca i32, align 4
+; RESULT-NEXT: call void (...) @llvm.localescape(ptr %a)
+; RESULT-NEXT: %fp = call ptr @llvm.frameaddress.p0(i32 0)
+; RESULT-NEXT: %a.i8.i = call ptr @llvm.localrecover(ptr @callee_uses_localescape, ptr %fp, i32 0)
+; RESULT-NEXT: store i32 42, ptr %a.i8.i, align 4
+; RESULT-NEXT: %r = load i32, ptr %a, align 4
+; RESULT-NEXT: ret i32 %r
+define i32 @callee_uses_localescape() alwaysinline {
+ %a = alloca i32
+ call void (...) @llvm.localescape(ptr %a)
+ %fp = call ptr @llvm.frameaddress(i32 0)
+ tail call void @callee_uses_localrecover(ptr %fp)
+ %r = load i32, ptr %a
+ ret i32 %r
+}
+
+; CHECK-LABEL: define i32 @callee_uses_localescape_caller(
+; RESULT-NEXT: %a.i = alloca i32, align 4
+; RESULT-NEXT: call void @llvm.lifetime.start.p0(ptr %a.i)
+; RESULT-NEXT: call void (...) @llvm.localescape(ptr %a.i)
+; RESULT-NEXT: %fp.i = call ptr @llvm.frameaddress.p0(i32 0)
+; RESULT-NEXT: %a.i8.i.i = call ptr @llvm.localrecover(ptr @callee_uses_localescape, ptr %fp.i, i32 0)
+; RESULT-NEXT: store i32 42, ptr %a.i8.i.i, align 4
+; RESULT-NEXT: %r.i = load i32, ptr %a.i, align 4
+; RESULT-NEXT: call void @llvm.lifetime.end.p0(ptr %a.i)
+; RESULT-NEXT: ret i32 %r.i
+define i32 @callee_uses_localescape_caller() {
+ %r = tail call i32 @callee_uses_localescape()
+ ret i32 %r
+}
+
+declare void @llvm.icall.branch.funnel(...)
+
+; CHECK-LABEL: define void @callee_uses_branch_funnel(
+; RESULT-NEXT: musttail call void (...) @llvm.icall.branch.funnel(...)
+; RESULT-NEXT: ret void
+define void @callee_uses_branch_funnel(...) {
+ musttail call void (...) @llvm.icall.branch.funnel(...)
+ ret void
+}
+
+; FIXME: This should fail the verifier after inlining
+; CHECK-LABEL: define void @callee_branch_funnel_musttail_caller(
+; RESULT-NEXT: call void (...) @llvm.icall.branch.funnel()
+; RESULT-NEXT: ret void
+define void @callee_branch_funnel_musttail_caller() {
+ call void (...) @callee_uses_branch_funnel()
+ ret void
+}
+
+; Ignore noinline on the callee function
+; CHECK-LABEL: define void @noinline_callee(
+; RESULT-NEXT: store i32 123, ptr %arg, align 4
+; RESULT-NEXT: ret void
+define void @noinline_callee(ptr %arg) {
+ store i32 123, ptr %arg
+ ret void
+}
+
+; CHECK-LABEL: define void @calls_noinline_func(
+; RESULT-NEXT: store i32 123, ptr %outer.arg, align 4
+; RESULT-NEXT: ret void
+define void @calls_noinline_func(ptr %outer.arg) {
+ call void @noinline_callee(ptr %outer.arg)
+ ret void
+}
+
+; Ignore noinline on the callsite
+; CHECK-LABEL: define void @calls_noinline_callsite(
+; RESULT-NEXT: store i32 123, ptr %outer.arg, align 4
+; RESULT-NEXT: ret void
+define void @calls_noinline_callsite(ptr %outer.arg) {
+ call void @simple_callee(ptr %outer.arg) noinline
+ ret void
+}
+
+; Ignore optnone
+; CHECK-LABEL: define void @optnone_callee(
+; RESULT-NEXT: store i32 5555, ptr %arg, align 4
+; RESULT-NEXT: ret void
+define void @optnone_callee(ptr %arg) optnone noinline {
+ store i32 5555, ptr %arg
+ ret void
+}
+
+; CHECK-LABEL: define void @calls_optnone_callee(
+; RESULT-NEXT: store i32 5555, ptr %outer.arg, align 4
+; RESULT-NEXT: ret void
+define void @calls_optnone_callee(ptr %outer.arg) {
+ call void @optnone_callee(ptr %outer.arg)
+ ret void
+}
+
+; CHECK-LABEL: define void @optnone_caller(
+; RESULT-NEXT: store i32 123, ptr %outer.arg, align 4
+; RESULT-NEXT: ret void
+define void @optnone_caller(ptr %outer.arg) optnone noinline {
+ call void @simple_callee(ptr %outer.arg)
+ ret void
+}
+
+; CHECK-LABEL: define weak void @interposable_callee(
+; RESULT-NEXT: store i32 2024, ptr %arg, align 4
+; RESULT-NEXT: ret void
+define weak void @interposable_callee(ptr %arg) {
+ store i32 2024, ptr %arg
+ ret void
+}
+
+; Ignore interposable linkage
+; CHECK-LABEL: @calls_interposable_callee(
+; RESULT-NEXT: store i32 2024, ptr %arg, align 4
+; RESULT-NEXT: ret void
+define void @calls_interposable_callee(ptr %arg) {
+ call void @interposable_callee(ptr %arg)
+ ret void
+}
+
+; Ignore null_pointer_is_valid
+; CHECK-LABEL: @null_pointer_is_valid_callee(
+; RESULT-NEXT: store i32 42069, ptr %arg, align 4
+; RESULT-NEXT: ret void
+define void @null_pointer_is_valid_callee(ptr %arg) null_pointer_is_valid {
+ store i32 42069, ptr %arg
+ ret void
+}
+
+; CHECK-LABEL: @calls_null_pointer_is_valid_callee(
+; RESULT-NEXT: store i32 42069, ptr %arg, align 4
+; RESULT-NEXT: ret void
+define void @calls_null_pointer_is_valid_callee(ptr %arg) {
+ call void @null_pointer_is_valid_callee(ptr %arg)
+ ret void
+}
+
+; CHECK-LABEL: @byval_arg_uses_non_alloca_addrspace(
+; RESULT-NEXT: %load = load i32, ptr addrspace(1) %arg, align 4
+; RESULT-NEXT: ret i32 %load
+define i32 @byval_arg_uses_non_alloca_addrspace(ptr addrspace(1) byval(i32) %arg) {
+ %load = load i32, ptr addrspace(1) %arg
+ ret i32 %load
+}
+
+; CHECK-LABEL: @calls_byval_arg_uses_non_alloca_addrspace(
+; RESULT-NEXT: %arg1 = alloca i32, align 4, addrspace(1)
+; RESULT-NEXT: call void @llvm.lifetime.start.p1(ptr addrspace(1) %arg1)
+; RESULT-NEXT: call void @llvm.memcpy.p1.p1.i64(ptr addrspace(1) align 4 %arg1, ptr addrspace(1) %arg, i64 4, i1 false)
+; RESULT-NEXT: %load.i = load i32, ptr addrspace(1) %arg1, align 4
+; RESULT-NEXT: call void @llvm.lifetime.end.p1(ptr addrspace(1) %arg1)
+; RESULT-NEXT: ret i32 %load.i
+define i32 @calls_byval_arg_uses_non_alloca_addrspace(ptr addrspace(1) %arg) {
+ %call = call i32 @byval_arg_uses_non_alloca_addrspace(ptr addrspace(1) byval(i32) %arg)
+ ret i32 %call
+}
+
+; CHECK-LABEL: define void @callee_stacksize(
+; RESULT-NEXT: %alloca = alloca [4096 x i32]
+; RESULT-NEXT: store i32 12345678, ptr %arg
+; RESULT-NEXT: store i32 0, ptr %alloca
+; RESULT-NEXT: ret void
+define void @callee_stacksize(ptr %arg) "inline-max-stacksize"="4" {
+ %alloca = alloca [4096 x i32]
+ store i32 12345678, ptr %arg
+ store i32 0, ptr %alloca
+ ret void
+}
+
+; CHECK-LABEL: define void @caller_stacksize(
+; RESULT-NEXT: %alloca.i = alloca [4096 x i32], align 4
+; RESULT-NEXT: call void @llvm.lifetime.start.p0(ptr %alloca.i)
+; RESULT-NEXT: store i32 12345678, ptr %arg, align 4
+; RESULT-NEXT: store i32 0, ptr %alloca.i, align 4
+; RESULT-NEXT: call void @llvm.lifetime.end.p0(ptr %alloca.i)
+; RESULT-NEXT: ret void
+define void @caller_stacksize(ptr %arg) {
+ call void @callee_stacksize(ptr %arg)
+ ret void
+}
+
+; CHECK-LABEL: define void @callee_dynamic_alloca(
+; RESULT-NEXT: %alloca = alloca i32, i32 %n, align 4
+; RESULT-NEXT: store i32 12345678, ptr %arg, align 4
+; RESULT-NEXT: store i32 0, ptr %alloca, align 4
+; RESULT-NEXT: ret void
+define void @callee_dynamic_alloca(ptr %arg, i32 %n) "inline-max-stacksize"="4" {
+ %alloca = alloca i32, i32 %n
+ store i32 12345678, ptr %arg
+ store i32 0, ptr %alloca
+ ret void
+}
+
+; CHECK-LABEL: define void @caller_dynamic_alloca(
+; RESULT-NEXT: %savedstack = call ptr @llvm.stacksave.p0()
+; RESULT-NEXT: %alloca.i = alloca i32, i32 %size, align 4
+; RESULT-NEXT: store i32 12345678, ptr %arg, align 4
+; RESULT-NEXT: store i32 0, ptr %alloca.i, align 4
+; RESULT-NEXT: call void @llvm.stackrestore.p0(ptr %savedstack)
+; RESULT-NEXT: ret void
+define void @caller_dynamic_alloca(ptr %arg, i32 %size) {
+ call void @callee_dynamic_alloca(ptr %arg, i32 %size)
+ ret void
+}
+
+declare void @extern_noduplicate() noduplicate
+
+; CHECK-LABEL: define void @callee_noduplicate_calls(
+; RESULT-NEXT: call void @extern_noduplicate()
+; RESULT-NEXT: call void @extern_noduplicate()
+; RESULT-NEXT: ret void
+define void @callee_noduplicate_calls() {
+ call void @extern_noduplicate()
+ call void @extern_noduplicate()
+ ret void
+}
+
+; Ignore noduplicate restrictions
+; CHECK-LABEL: define void @caller_noduplicate_calls_callee(
+; RESULT-NEXT: call void @extern_noduplicate()
+; RESULT-NEXT: call void @extern_noduplicate()
+; RESULT-NEXT: call void @extern_noduplicate()
+; RESULT-NEXT: call void @extern_noduplicate()
+; RESULT-NEXT: ret void
+define void @caller_noduplicate_calls_callee() {
+ call void @callee_noduplicate_calls()
+ call void @callee_noduplicate_calls()
+ ret void
+}
+
+; CHECK-LABEL: define void @sanitize_address_callee(
+; RESULT-NEXT: store i32 333, ptr %arg
+; RESULT-NEXT: ret void
+define void @sanitize_address_callee(ptr %arg) sanitize_address {
+ store i32 333, ptr %arg
+ ret void
+}
+
+; CHECK-LABEL: define void @no_sanitize_address_caller(
+; RESULT-NEXT: store i32 333, ptr %arg
+; RESULT-NEXT: ret void
+define void @no_sanitize_address_caller(ptr %arg) {
+ call void @sanitize_address_callee(ptr %arg)
+ ret void
+}
+
+; CHECK-LABEL: define float @nonstrictfp_callee(
+; RESULT-NEXT: %add = fadd float %a, %a
+; RESULT-NEXT: ret float %add
+define float @nonstrictfp_callee(float %a) {
+ %add = fadd float %a, %a
+ ret float %add
+}
+
+; CHECK-LABEL: define float @strictfp_caller(
+; RESULT-NEXT: call float @llvm.experimental.constrained.fadd.f32(
+; RESULT-NEXT: call float @llvm.experimental.constrained.fadd.f32(
+; RESULT-NEXT: ret float %add
+define float @strictfp_caller(float %a) strictfp {
+ %call = call float @nonstrictfp_callee(float %a) strictfp
+ %add = call float @llvm.experimental.constrained.fadd.f32(float %call, float 2.0, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret float %add
+}
+
+; CHECK-LABEL: define float @strictfp_callee(
+; RESULT-NEXT: call float @llvm.experimental.constrained.fadd.f32(
+; RESULT-NEXT: ret float
+define float @strictfp_callee(float %a) strictfp {
+ %add = call float @llvm.experimental.constrained.fadd.f32(float %a, float %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret float %add
+}
+
+; FIXME: This should not inline. The inlined case should fail the
+; verifier, but it does not.
+; CHECK-LABEL: define float @nonstrictfp_caller(
+; RESULT-NEXT: call float @llvm.experimental.constrained.fadd.f32(
+; RESULT-NEXT: fadd float
+; RESULT-NEXT: ret float
+define float @nonstrictfp_caller(float %a) {
+ %call = call float @strictfp_callee(float %a)
+ %add1 = fadd float %call, 2.0
+ ret float %add1
+}
+
+define void @caller_also_has_non_callee_use() {
+ call void @simple_callee(ptr @simple_callee)
+ ret void
+}
diff --git a/llvm/tools/llvm-profdata/CMakeLists.txt b/llvm/tools/llvm-profdata/CMakeLists.txt
index 165be9a2..e5aa858 100644
--- a/llvm/tools/llvm-profdata/CMakeLists.txt
+++ b/llvm/tools/llvm-profdata/CMakeLists.txt
@@ -10,9 +10,6 @@ add_llvm_tool(llvm-profdata
DEPENDS
intrinsics_gen
- GENERATE_DRIVER
)
-if(NOT LLVM_TOOL_LLVM_DRIVER_BUILD)
- target_link_libraries(llvm-profdata PRIVATE LLVMDebuginfod)
-endif()
+target_link_libraries(llvm-profdata PRIVATE LLVMDebuginfod)
diff --git a/llvm/tools/llvm-profdata/llvm-profdata.cpp b/llvm/tools/llvm-profdata/llvm-profdata.cpp
index d658ea9..15ddb05 100644
--- a/llvm/tools/llvm-profdata/llvm-profdata.cpp
+++ b/llvm/tools/llvm-profdata/llvm-profdata.cpp
@@ -3464,10 +3464,7 @@ static int order_main() {
return 0;
}
-int llvm_profdata_main(int argc, char **argvNonConst,
- const llvm::ToolContext &) {
- const char **argv = const_cast<const char **>(argvNonConst);
-
+int main(int argc, const char *argv[]) {
StringRef ProgName(sys::path::filename(argv[0]));
if (argc < 2) {
diff --git a/llvm/tools/llvm-reduce/CMakeLists.txt b/llvm/tools/llvm-reduce/CMakeLists.txt
index 7be90bc..c8673b4 100644
--- a/llvm/tools/llvm-reduce/CMakeLists.txt
+++ b/llvm/tools/llvm-reduce/CMakeLists.txt
@@ -39,6 +39,7 @@ add_llvm_tool(llvm-reduce
deltas/ReduceGlobalValues.cpp
deltas/ReduceGlobalVarInitializers.cpp
deltas/ReduceGlobalVars.cpp
+ deltas/ReduceInlineCallSites.cpp
deltas/ReduceInstructions.cpp
deltas/ReduceInstructionFlags.cpp
deltas/ReduceInvokes.cpp
diff --git a/llvm/tools/llvm-reduce/DeltaManager.cpp b/llvm/tools/llvm-reduce/DeltaManager.cpp
index f5c6276..9b13202 100644
--- a/llvm/tools/llvm-reduce/DeltaManager.cpp
+++ b/llvm/tools/llvm-reduce/DeltaManager.cpp
@@ -28,6 +28,7 @@
#include "deltas/ReduceGlobalVarInitializers.h"
#include "deltas/ReduceGlobalVars.h"
#include "deltas/ReduceIRReferences.h"
+#include "deltas/ReduceInlineCallSites.h"
#include "deltas/ReduceInstructionFlags.h"
#include "deltas/ReduceInstructionFlagsMIR.h"
#include "deltas/ReduceInstructions.h"
diff --git a/llvm/tools/llvm-reduce/DeltaPasses.def b/llvm/tools/llvm-reduce/DeltaPasses.def
index 3aed0cc..845b106 100644
--- a/llvm/tools/llvm-reduce/DeltaPasses.def
+++ b/llvm/tools/llvm-reduce/DeltaPasses.def
@@ -58,7 +58,7 @@ DELTA_PASS_IR("volatile", reduceVolatileInstructionsDeltaPass, "Reducing Volatil
DELTA_PASS_IR("atomic-ordering", reduceAtomicOrderingDeltaPass, "Reducing Atomic Ordering")
DELTA_PASS_IR("syncscopes", reduceAtomicSyncScopesDeltaPass, "Reducing Atomic Sync Scopes")
DELTA_PASS_IR("instruction-flags", reduceInstructionFlagsDeltaPass, "Reducing Instruction Flags")
-
+DELTA_PASS_IR("inline-call-sites", reduceInlineCallSitesDeltaPass, "Inlining callsites")
#ifndef DELTA_PASS_MIR
#define DELTA_PASS_MIR(NAME, FUNC, DESC)
diff --git a/llvm/tools/llvm-reduce/deltas/ReduceInlineCallSites.cpp b/llvm/tools/llvm-reduce/deltas/ReduceInlineCallSites.cpp
new file mode 100644
index 0000000..cfef367
--- /dev/null
+++ b/llvm/tools/llvm-reduce/deltas/ReduceInlineCallSites.cpp
@@ -0,0 +1,103 @@
+//===- ReduceInlineCallSites.cpp ------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "ReduceInlineCallSites.h"
+#include "llvm/IR/InstrTypes.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Transforms/Utils/Cloning.h"
+
+using namespace llvm;
+
+extern cl::OptionCategory LLVMReduceOptions;
+
+static cl::opt<int> CallsiteInlineThreshold(
+ "reduce-callsite-inline-threshold",
+ cl::desc("Number of instructions in a function to unconditionally inline "
+ "(-1 for inline all)"),
+ cl::init(5), cl::cat(LLVMReduceOptions));
+
+static bool functionHasMoreThanNonTerminatorInsts(const Function &F,
+ uint64_t NumInsts) {
+ uint64_t InstCount = 0;
+ for (const BasicBlock &BB : F) {
+ for (const Instruction &I : make_range(BB.begin(), std::prev(BB.end()))) {
+ (void)I;
+ if (InstCount++ > NumInsts)
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static bool hasOnlyOneCallUse(const Function &F) {
+ unsigned UseCount = 0;
+ for (const Use &U : F.uses()) {
+ const CallBase *CB = dyn_cast<CallBase>(U.getUser());
+ if (!CB || !CB->isCallee(&U))
+ return false;
+ if (UseCount++ > 1)
+ return false;
+ }
+
+ return UseCount == 1;
+}
+
+// TODO: This could use more thought.
+static bool inlineWillReduceComplexity(const Function &Caller,
+ const Function &Callee) {
+ // Backdoor to force all possible inlining.
+ if (CallsiteInlineThreshold < 0)
+ return true;
+
+ if (!hasOnlyOneCallUse(Callee))
+ return false;
+
+ // Permit inlining small functions into big functions, or big functions into
+ // small functions.
+ if (!functionHasMoreThanNonTerminatorInsts(Callee, CallsiteInlineThreshold) &&
+ !functionHasMoreThanNonTerminatorInsts(Caller, CallsiteInlineThreshold))
+ return true;
+
+ return false;
+}
+
+static void reduceCallSites(Oracle &O, Function &F) {
+ std::vector<std::pair<CallBase *, InlineFunctionInfo>> CallSitesToInline;
+
+ for (Use &U : F.uses()) {
+ if (CallBase *CB = dyn_cast<CallBase>(U.getUser())) {
+ // Ignore callsites with wrong call type.
+ if (!CB->isCallee(&U))
+ continue;
+
+ // We do not consider isInlineViable here. It is overly conservative in
+ // cases that the inliner should handle correctly (e.g. disallowing inline
+ // of of functions with indirectbr). Some of the other cases are for other
+ // correctness issues which we do need to worry about here.
+
+ // TODO: Should we delete the function body?
+ InlineFunctionInfo IFI;
+ if (CanInlineCallSite(*CB, IFI).isSuccess() &&
+ inlineWillReduceComplexity(*CB->getFunction(), F) && !O.shouldKeep())
+ CallSitesToInline.emplace_back(CB, std::move(IFI));
+ }
+ }
+
+ // TODO: InlineFunctionImpl will implicitly perform some simplifications /
+ // optimizations which we should be able to opt-out of.
+ for (auto [CB, IFI] : CallSitesToInline)
+ InlineFunctionImpl(*CB, IFI);
+}
+
+void llvm::reduceInlineCallSitesDeltaPass(Oracle &O, ReducerWorkItem &Program) {
+ for (Function &F : Program.getModule()) {
+ if (!F.isDeclaration())
+ reduceCallSites(O, F);
+ }
+}
diff --git a/llvm/tools/llvm-reduce/deltas/ReduceInlineCallSites.h b/llvm/tools/llvm-reduce/deltas/ReduceInlineCallSites.h
new file mode 100644
index 0000000..1df31a1
--- /dev/null
+++ b/llvm/tools/llvm-reduce/deltas/ReduceInlineCallSites.h
@@ -0,0 +1,18 @@
+//===- ReduceInlineCallSites.h ----------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TOOLS_LLVM_REDUCE_DELTAS_REDUCEINLINECALLSITES_H
+#define LLVM_TOOLS_LLVM_REDUCE_DELTAS_REDUCEINLINECALLSITES_H
+
+#include "Delta.h"
+
+namespace llvm {
+void reduceInlineCallSitesDeltaPass(Oracle &O, ReducerWorkItem &Program);
+} // namespace llvm
+
+#endif
diff --git a/llvm/unittests/ADT/STLExtrasTest.cpp b/llvm/unittests/ADT/STLExtrasTest.cpp
index 5020acd..47469983 100644
--- a/llvm/unittests/ADT/STLExtrasTest.cpp
+++ b/llvm/unittests/ADT/STLExtrasTest.cpp
@@ -14,6 +14,7 @@
#include <array>
#include <climits>
#include <cstddef>
+#include <functional>
#include <initializer_list>
#include <iterator>
#include <list>
@@ -1658,6 +1659,54 @@ TEST(STLExtrasTest, Accumulate) {
EXPECT_EQ(accumulate(V1, 10), std::accumulate(V1.begin(), V1.end(), 10));
EXPECT_EQ(accumulate(drop_begin(V1), 7),
std::accumulate(V1.begin() + 1, V1.end(), 7));
+
+ EXPECT_EQ(accumulate(V1, 2, std::multiplies<>{}), 240);
+}
+
+TEST(STLExtrasTest, SumOf) {
+ EXPECT_EQ(sum_of(std::vector<int>()), 0);
+ EXPECT_EQ(sum_of(std::vector<int>(), 1), 1);
+ std::vector<int> V1 = {1, 2, 3, 4, 5};
+ static_assert(std::is_same_v<decltype(sum_of(V1)), int>);
+ static_assert(std::is_same_v<decltype(sum_of(V1, 1)), int>);
+ EXPECT_EQ(sum_of(V1), 15);
+ EXPECT_EQ(sum_of(V1, 1), 16);
+
+ std::vector<float> V2 = {1.0f, 2.0f, 4.0f};
+ static_assert(std::is_same_v<decltype(sum_of(V2)), float>);
+ static_assert(std::is_same_v<decltype(sum_of(V2), 1.0f), float>);
+ static_assert(std::is_same_v<decltype(sum_of(V2), 1.0), double>);
+ EXPECT_EQ(sum_of(V2), 7.0f);
+ EXPECT_EQ(sum_of(V2, 1.0f), 8.0f);
+
+ // Make sure that for a const argument the return value is non-const.
+ const std::vector<float> V3 = {1.0f, 2.0f};
+ static_assert(std::is_same_v<decltype(sum_of(V3)), float>);
+ EXPECT_EQ(sum_of(V3), 3.0f);
+}
+
+TEST(STLExtrasTest, ProductOf) {
+ EXPECT_EQ(product_of(std::vector<int>()), 1);
+ EXPECT_EQ(product_of(std::vector<int>(), 0), 0);
+ EXPECT_EQ(product_of(std::vector<int>(), 1), 1);
+ std::vector<int> V1 = {1, 2, 3, 4, 5};
+ static_assert(std::is_same_v<decltype(product_of(V1)), int>);
+ static_assert(std::is_same_v<decltype(product_of(V1, 1)), int>);
+ EXPECT_EQ(product_of(V1), 120);
+ EXPECT_EQ(product_of(V1, 1), 120);
+ EXPECT_EQ(product_of(V1, 2), 240);
+
+ std::vector<float> V2 = {1.0f, 2.0f, 4.0f};
+ static_assert(std::is_same_v<decltype(product_of(V2)), float>);
+ static_assert(std::is_same_v<decltype(product_of(V2), 1.0f), float>);
+ static_assert(std::is_same_v<decltype(product_of(V2), 1.0), double>);
+ EXPECT_EQ(product_of(V2), 8.0f);
+ EXPECT_EQ(product_of(V2, 4.0f), 32.0f);
+
+ // Make sure that for a const argument the return value is non-const.
+ const std::vector<float> V3 = {1.0f, 2.0f};
+ static_assert(std::is_same_v<decltype(product_of(V3)), float>);
+ EXPECT_EQ(product_of(V3), 2.0f);
}
struct Foo;
diff --git a/llvm/utils/TableGen/Basic/RuntimeLibcallsEmitter.cpp b/llvm/utils/TableGen/Basic/RuntimeLibcallsEmitter.cpp
index 45cb209..c96331c 100644
--- a/llvm/utils/TableGen/Basic/RuntimeLibcallsEmitter.cpp
+++ b/llvm/utils/TableGen/Basic/RuntimeLibcallsEmitter.cpp
@@ -543,21 +543,8 @@ void RuntimeLibcallEmitter::emitSystemRuntimeLibrarySetCalls(
OS << "void llvm::RTLIB::RuntimeLibcallsInfo::setTargetRuntimeLibcallSets("
"const llvm::Triple &TT, ExceptionHandling ExceptionModel, "
"FloatABI::ABIType FloatABI, EABI EABIVersion, "
- "StringRef ABIName) {\n"
- " struct LibcallImplPair {\n"
- " RTLIB::Libcall Func;\n"
- " RTLIB::LibcallImpl Impl;\n"
- " };\n"
- " auto setLibcallsImpl = [this](\n"
- " ArrayRef<LibcallImplPair> Libcalls,\n"
- " std::optional<llvm::CallingConv::ID> CC = {})\n"
- " {\n"
- " for (const auto [Func, Impl] : Libcalls) {\n"
- " setLibcallImpl(Func, Impl);\n"
- " if (CC)\n"
- " setLibcallImplCallingConv(Impl, *CC);\n"
- " }\n"
- " };\n";
+ "StringRef ABIName) {\n";
+
ArrayRef<const Record *> AllLibs =
Records.getAllDerivedDefinitions("SystemRuntimeLibrary");
@@ -682,18 +669,21 @@ void RuntimeLibcallEmitter::emitSystemRuntimeLibrarySetCalls(
Funcs.erase(UniqueI, Funcs.end());
- OS << indent(IndentDepth + 2) << "setLibcallsImpl({\n";
+ StringRef CCEnum;
+ if (FuncsWithCC.CallingConv)
+ CCEnum = FuncsWithCC.CallingConv->getValueAsString("CallingConv");
+
for (const RuntimeLibcallImpl *LibCallImpl : Funcs) {
- OS << indent(IndentDepth + 4);
- LibCallImpl->emitTableEntry(OS);
- }
- OS << indent(IndentDepth + 2) << "}";
- if (FuncsWithCC.CallingConv) {
- StringRef CCEnum =
- FuncsWithCC.CallingConv->getValueAsString("CallingConv");
- OS << ", " << CCEnum;
+ OS << indent(IndentDepth + 2);
+ LibCallImpl->emitSetImplCall(OS);
+
+ if (FuncsWithCC.CallingConv) {
+ OS << indent(IndentDepth + 2) << "setLibcallImplCallingConv(";
+ LibCallImpl->emitEnumEntry(OS);
+ OS << ", " << CCEnum << ");\n";
+ }
}
- OS << ");\n\n";
+ OS << '\n';
if (!SubsetPredicate.isAlwaysAvailable()) {
OS << indent(IndentDepth);