aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib/Target
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Target')
-rw-r--r--llvm/lib/Target/AArch64/AArch64ISelLowering.cpp12
-rw-r--r--llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.h4
-rw-r--r--llvm/lib/Target/AArch64/AArch64PrologueEpilogue.cpp526
-rw-r--r--llvm/lib/Target/AArch64/AArch64PrologueEpilogue.h15
-rw-r--r--llvm/lib/Target/Hexagon/Hexagon.td3
-rw-r--r--llvm/lib/Target/Hexagon/HexagonPatterns.td7
-rw-r--r--llvm/lib/Target/Hexagon/HexagonSubtarget.h2
-rw-r--r--llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp7
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp6
-rw-r--r--llvm/lib/Target/RISCV/RISCVFeatures.td5
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfoXqci.td2
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfoZb.td94
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrPredicates.td4
-rw-r--r--llvm/lib/Target/RISCV/RISCVProcessors.td3
-rw-r--r--llvm/lib/Target/RISCV/RISCVSchedSiFive7.td142
-rw-r--r--llvm/lib/Target/RISCV/RISCVScheduleV.td16
-rw-r--r--llvm/lib/Target/SystemZ/MCTargetDesc/SystemZInstPrinterCommon.cpp6
-rw-r--r--llvm/lib/Target/SystemZ/SystemZConstantPoolValue.cpp7
-rw-r--r--llvm/lib/Target/SystemZ/SystemZHazardRecognizer.cpp7
-rw-r--r--llvm/lib/Target/X86/X86InstrAVX10.td4
20 files changed, 452 insertions, 420 deletions
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index dc8e7c8..31b3d18 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -1458,6 +1458,7 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
setPartialReduceMLAAction(MLAOps, MVT::v4i32, MVT::v16i8, Legal);
setPartialReduceMLAAction(MLAOps, MVT::v2i32, MVT::v8i8, Legal);
+ setPartialReduceMLAAction(MLAOps, MVT::v2i32, MVT::v16i8, Custom);
setPartialReduceMLAAction(MLAOps, MVT::v2i64, MVT::v16i8, Custom);
if (Subtarget->hasMatMulInt8()) {
@@ -30769,6 +30770,17 @@ AArch64TargetLowering::LowerPARTIAL_REDUCE_MLA(SDValue Op,
ResultVT.isFixedLengthVector() &&
useSVEForFixedLengthVectorVT(ResultVT, /*OverrideNEON=*/true);
+ // We can handle this case natively by accumulating into a wider
+ // zero-padded vector.
+ if (!ConvertToScalable && ResultVT == MVT::v2i32 && OpVT == MVT::v16i8) {
+ SDValue ZeroVec = DAG.getConstant(0, DL, MVT::v4i32);
+ SDValue WideAcc = DAG.getInsertSubvector(DL, ZeroVec, Acc, 0);
+ SDValue Wide =
+ DAG.getNode(Op.getOpcode(), DL, MVT::v4i32, WideAcc, LHS, RHS);
+ SDValue Reduced = DAG.getNode(AArch64ISD::ADDP, DL, MVT::v4i32, Wide, Wide);
+ return DAG.getExtractSubvector(DL, MVT::v2i32, Reduced, 0);
+ }
+
if (ConvertToScalable) {
ResultVT = getContainerForFixedLengthVector(DAG, ResultVT);
OpVT = getContainerForFixedLengthVector(DAG, LHS.getValueType());
diff --git a/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.h b/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.h
index 91e64e6..bd0a17d 100644
--- a/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.h
+++ b/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.h
@@ -315,6 +315,8 @@ public:
}
void setStackSizeSVE(uint64_t ZPR, uint64_t PPR) {
+ assert(isAligned(Align(16), ZPR) && isAligned(Align(16), PPR) &&
+ "expected SVE stack sizes to be aligned to 16-bytes");
StackSizeZPR = ZPR;
StackSizePPR = PPR;
HasCalculatedStackSizeSVE = true;
@@ -425,6 +427,8 @@ public:
// Saves the CalleeSavedStackSize for SVE vectors in 'scalable bytes'
void setSVECalleeSavedStackSize(unsigned ZPR, unsigned PPR) {
+ assert(isAligned(Align(16), ZPR) && isAligned(Align(16), PPR) &&
+ "expected SVE callee-save sizes to be aligned to 16-bytes");
ZPRCalleeSavedStackSize = ZPR;
PPRCalleeSavedStackSize = PPR;
HasSVECalleeSavedStackSize = true;
diff --git a/llvm/lib/Target/AArch64/AArch64PrologueEpilogue.cpp b/llvm/lib/Target/AArch64/AArch64PrologueEpilogue.cpp
index 1568161..f110558 100644
--- a/llvm/lib/Target/AArch64/AArch64PrologueEpilogue.cpp
+++ b/llvm/lib/Target/AArch64/AArch64PrologueEpilogue.cpp
@@ -60,7 +60,6 @@ static bool isPartOfZPRCalleeSaves(MachineBasicBlock::iterator I) {
case AArch64::PTRUE_C_B:
return I->getFlag(MachineInstr::FrameSetup) ||
I->getFlag(MachineInstr::FrameDestroy);
- case AArch64::SEH_SavePReg:
case AArch64::SEH_SaveZReg:
return true;
}
@@ -75,6 +74,8 @@ static bool isPartOfPPRCalleeSaves(MachineBasicBlock::iterator I) {
case AArch64::LDR_PXI:
return I->getFlag(MachineInstr::FrameSetup) ||
I->getFlag(MachineInstr::FrameDestroy);
+ case AArch64::SEH_SavePReg:
+ return true;
}
}
@@ -94,6 +95,26 @@ AArch64PrologueEpilogueCommon::AArch64PrologueEpilogueCommon(
HasFP = AFL.hasFP(MF);
NeedsWinCFI = AFL.needsWinCFI(MF);
+
+ // Windows unwind can't represent the required stack adjustments if we have
+ // both SVE callee-saves and dynamic stack allocations, and the frame pointer
+ // is before the SVE spills. The allocation of the frame pointer must be the
+ // last instruction in the prologue so the unwinder can restore the stack
+ // pointer correctly. (And there isn't any unwind opcode for `addvl sp, x29,
+ // -17`.)
+ //
+ // Because of this, we do spills in the opposite order on Windows: first SVE,
+ // then GPRs. The main side-effect of this is that it makes accessing
+ // parameters passed on the stack more expensive.
+ //
+ // We could consider rearranging the spills for simpler cases.
+ if (Subtarget.isTargetWindows() && AFI->getSVECalleeSavedStackSize()) {
+ if (AFI->hasStackHazardSlotIndex())
+ reportFatalUsageError("SME hazard padding is not supported on Windows");
+ SVELayout = SVEStackLayout::CalleeSavesAboveFrameRecord;
+ } else if (AFI->hasSplitSVEObjects()) {
+ SVELayout = SVEStackLayout::Split;
+ }
}
MachineBasicBlock::iterator
@@ -334,6 +355,55 @@ bool AArch64PrologueEpilogueCommon::shouldCombineCSRLocalStackBump(
return true;
}
+SVEFrameSizes AArch64PrologueEpilogueCommon::getSVEStackFrameSizes() const {
+ StackOffset PPRCalleeSavesSize =
+ StackOffset::getScalable(AFI->getPPRCalleeSavedStackSize());
+ StackOffset ZPRCalleeSavesSize =
+ StackOffset::getScalable(AFI->getZPRCalleeSavedStackSize());
+ StackOffset PPRLocalsSize = AFL.getPPRStackSize(MF) - PPRCalleeSavesSize;
+ StackOffset ZPRLocalsSize = AFL.getZPRStackSize(MF) - ZPRCalleeSavesSize;
+ if (SVELayout == SVEStackLayout::Split)
+ return {{PPRCalleeSavesSize, PPRLocalsSize},
+ {ZPRCalleeSavesSize, ZPRLocalsSize}};
+ // For simplicity, attribute all locals to ZPRs when split SVE is disabled.
+ return {{PPRCalleeSavesSize, StackOffset{}},
+ {ZPRCalleeSavesSize, PPRLocalsSize + ZPRLocalsSize}};
+}
+
+struct SVEPartitions {
+ struct {
+ MachineBasicBlock::iterator Begin, End;
+ } PPR, ZPR;
+};
+
+static SVEPartitions partitionSVECS(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ StackOffset PPRCalleeSavesSize,
+ StackOffset ZPRCalleeSavesSize,
+ bool IsEpilogue) {
+ MachineBasicBlock::iterator PPRsI = MBBI;
+ MachineBasicBlock::iterator End =
+ IsEpilogue ? MBB.begin() : MBB.getFirstTerminator();
+ auto AdjustI = [&](auto MBBI) { return IsEpilogue ? std::prev(MBBI) : MBBI; };
+ // Process the SVE CS to find the starts/ends of the ZPR and PPR areas.
+ if (PPRCalleeSavesSize) {
+ PPRsI = AdjustI(PPRsI);
+ assert(isPartOfPPRCalleeSaves(*PPRsI) && "Unexpected instruction");
+ while (PPRsI != End && isPartOfPPRCalleeSaves(AdjustI(PPRsI)))
+ IsEpilogue ? (--PPRsI) : (++PPRsI);
+ }
+ MachineBasicBlock::iterator ZPRsI = PPRsI;
+ if (ZPRCalleeSavesSize) {
+ ZPRsI = AdjustI(ZPRsI);
+ assert(isPartOfZPRCalleeSaves(*ZPRsI) && "Unexpected instruction");
+ while (ZPRsI != End && isPartOfZPRCalleeSaves(AdjustI(ZPRsI)))
+ IsEpilogue ? (--ZPRsI) : (++ZPRsI);
+ }
+ if (IsEpilogue)
+ return {{PPRsI, MBBI}, {ZPRsI, PPRsI}};
+ return {{MBBI, PPRsI}, {PPRsI, ZPRsI}};
+}
+
AArch64PrologueEmitter::AArch64PrologueEmitter(MachineFunction &MF,
MachineBasicBlock &MBB,
const AArch64FrameLowering &AFL)
@@ -613,30 +683,12 @@ void AArch64PrologueEmitter::emitPrologue() {
bool IsWin64 = Subtarget.isCallingConvWin64(F.getCallingConv(), F.isVarArg());
unsigned FixedObject = AFL.getFixedObjectSize(MF, AFI, IsWin64, IsFunclet);
- // Windows unwind can't represent the required stack adjustments if we have
- // both SVE callee-saves and dynamic stack allocations, and the frame
- // pointer is before the SVE spills. The allocation of the frame pointer
- // must be the last instruction in the prologue so the unwinder can restore
- // the stack pointer correctly. (And there isn't any unwind opcode for
- // `addvl sp, x29, -17`.)
- //
- // Because of this, we do spills in the opposite order on Windows: first SVE,
- // then GPRs. The main side-effect of this is that it makes accessing
- // parameters passed on the stack more expensive.
- //
- // We could consider rearranging the spills for simpler cases.
- bool FPAfterSVECalleeSaves =
- Subtarget.isTargetWindows() && AFI->getSVECalleeSavedStackSize();
-
- if (FPAfterSVECalleeSaves && AFI->hasStackHazardSlotIndex())
- reportFatalUsageError("SME hazard padding is not supported on Windows");
-
auto PrologueSaveSize = AFI->getCalleeSavedStackSize() + FixedObject;
// All of the remaining stack allocations are for locals.
determineLocalsStackSize(NumBytes, PrologueSaveSize);
MachineBasicBlock::iterator FirstGPRSaveI = PrologueBeginI;
- if (FPAfterSVECalleeSaves) {
+ if (SVELayout == SVEStackLayout::CalleeSavesAboveFrameRecord) {
// If we're doing SVE saves first, we need to immediately allocate space
// for fixed objects, then space for the SVE callee saves.
//
@@ -712,110 +764,66 @@ void AArch64PrologueEmitter::emitPrologue() {
if (AFL.windowsRequiresStackProbe(MF, NumBytes + RealignmentPadding))
emitWindowsStackProbe(AfterGPRSavesI, DL, NumBytes, RealignmentPadding);
- StackOffset PPRCalleeSavesSize =
- StackOffset::getScalable(AFI->getPPRCalleeSavedStackSize());
- StackOffset ZPRCalleeSavesSize =
- StackOffset::getScalable(AFI->getZPRCalleeSavedStackSize());
- StackOffset SVECalleeSavesSize = PPRCalleeSavesSize + ZPRCalleeSavesSize;
- StackOffset PPRLocalsSize = AFL.getPPRStackSize(MF) - PPRCalleeSavesSize;
- StackOffset ZPRLocalsSize = AFL.getZPRStackSize(MF) - ZPRCalleeSavesSize;
-
- std::optional<MachineBasicBlock::iterator> ZPRCalleeSavesBegin,
- ZPRCalleeSavesEnd, PPRCalleeSavesBegin, PPRCalleeSavesEnd;
-
+ auto [PPR, ZPR] = getSVEStackFrameSizes();
+ StackOffset SVECalleeSavesSize = ZPR.CalleeSavesSize + PPR.CalleeSavesSize;
+ StackOffset NonSVELocalsSize = StackOffset::getFixed(NumBytes);
StackOffset CFAOffset =
- StackOffset::getFixed((int64_t)MFI.getStackSize() - NumBytes);
+ StackOffset::getFixed(MFI.getStackSize()) - NonSVELocalsSize;
+
MachineBasicBlock::iterator AfterSVESavesI = AfterGPRSavesI;
- if (!FPAfterSVECalleeSaves) {
- // Process the SVE callee-saves to find the starts/ends of the ZPR and PPR
- // areas.
- PPRCalleeSavesBegin = AfterGPRSavesI;
- if (PPRCalleeSavesSize) {
- LLVM_DEBUG(dbgs() << "PPRCalleeSavedStackSize = "
- << PPRCalleeSavesSize.getScalable() << "\n");
-
- assert(isPartOfPPRCalleeSaves(*PPRCalleeSavesBegin) &&
- "Unexpected instruction");
- while (isPartOfPPRCalleeSaves(AfterSVESavesI) &&
- AfterSVESavesI != MBB.getFirstTerminator())
- ++AfterSVESavesI;
+ // Allocate space for the callee saves and PPR locals (if any).
+ if (SVELayout != SVEStackLayout::CalleeSavesAboveFrameRecord) {
+ auto [PPRRange, ZPRRange] =
+ partitionSVECS(MBB, AfterGPRSavesI, PPR.CalleeSavesSize,
+ ZPR.CalleeSavesSize, /*IsEpilogue=*/false);
+ AfterSVESavesI = ZPRRange.End;
+ if (EmitAsyncCFI)
+ emitCalleeSavedSVELocations(AfterSVESavesI);
+
+ StackOffset AllocateBeforePPRs = SVECalleeSavesSize;
+ StackOffset AllocateAfterPPRs = PPR.LocalsSize;
+ if (SVELayout == SVEStackLayout::Split) {
+ AllocateBeforePPRs = PPR.CalleeSavesSize;
+ AllocateAfterPPRs = PPR.LocalsSize + ZPR.CalleeSavesSize;
}
- PPRCalleeSavesEnd = ZPRCalleeSavesBegin = AfterSVESavesI;
- if (ZPRCalleeSavesSize) {
- LLVM_DEBUG(dbgs() << "ZPRCalleeSavedStackSize = "
- << ZPRCalleeSavesSize.getScalable() << "\n");
- assert(isPartOfZPRCalleeSaves(*ZPRCalleeSavesBegin) &&
- "Unexpected instruction");
- while (isPartOfZPRCalleeSaves(AfterSVESavesI) &&
- AfterSVESavesI != MBB.getFirstTerminator())
- ++AfterSVESavesI;
- }
- ZPRCalleeSavesEnd = AfterSVESavesI;
- }
-
- if (EmitAsyncCFI)
- emitCalleeSavedSVELocations(AfterSVESavesI);
-
- if (AFI->hasSplitSVEObjects()) {
- assert(!FPAfterSVECalleeSaves &&
- "Cannot use FPAfterSVECalleeSaves with aarch64-split-sve-objects");
- assert(!AFL.canUseRedZone(MF) &&
- "Cannot use redzone with aarch64-split-sve-objects");
- // TODO: Handle HasWinCFI/NeedsWinCFI?
- assert(!NeedsWinCFI &&
- "WinCFI with aarch64-split-sve-objects is not supported");
-
- // Split ZPR and PPR allocation.
- // Allocate PPR callee saves
- allocateStackSpace(*PPRCalleeSavesBegin, 0, PPRCalleeSavesSize,
+ allocateStackSpace(PPRRange.Begin, 0, AllocateBeforePPRs,
EmitAsyncCFI && !HasFP, CFAOffset,
- MFI.hasVarSizedObjects() || ZPRCalleeSavesSize ||
- ZPRLocalsSize || PPRLocalsSize);
- CFAOffset += PPRCalleeSavesSize;
-
- // Allocate PPR locals + ZPR callee saves
- assert(PPRCalleeSavesEnd == ZPRCalleeSavesBegin &&
+ MFI.hasVarSizedObjects() || AllocateAfterPPRs ||
+ ZPR.LocalsSize || NonSVELocalsSize);
+ CFAOffset += AllocateBeforePPRs;
+ assert(PPRRange.End == ZPRRange.Begin &&
"Expected ZPR callee saves after PPR locals");
- allocateStackSpace(*PPRCalleeSavesEnd, RealignmentPadding,
- PPRLocalsSize + ZPRCalleeSavesSize,
- EmitAsyncCFI && !HasFP, CFAOffset,
- MFI.hasVarSizedObjects() || ZPRLocalsSize);
- CFAOffset += PPRLocalsSize + ZPRCalleeSavesSize;
-
- // Allocate ZPR locals
- allocateStackSpace(*ZPRCalleeSavesEnd, RealignmentPadding,
- ZPRLocalsSize + StackOffset::getFixed(NumBytes),
+ allocateStackSpace(PPRRange.End, RealignmentPadding, AllocateAfterPPRs,
EmitAsyncCFI && !HasFP, CFAOffset,
- MFI.hasVarSizedObjects());
+ MFI.hasVarSizedObjects() || ZPR.LocalsSize ||
+ NonSVELocalsSize);
+ CFAOffset += AllocateAfterPPRs;
} else {
- // Allocate space for the callee saves (if any).
- StackOffset LocalsSize =
- PPRLocalsSize + ZPRLocalsSize + StackOffset::getFixed(NumBytes);
- if (!FPAfterSVECalleeSaves)
- allocateStackSpace(AfterGPRSavesI, 0, SVECalleeSavesSize,
- EmitAsyncCFI && !HasFP, CFAOffset,
- MFI.hasVarSizedObjects() || LocalsSize);
+ assert(SVELayout == SVEStackLayout::CalleeSavesAboveFrameRecord);
+ // Note: With CalleeSavesAboveFrameRecord, the SVE CS have already been
+ // allocated (and separate PPR locals are not supported, all SVE locals,
+ // both PPR and ZPR, are within the ZPR locals area).
+ assert(!PPR.LocalsSize && "Unexpected PPR locals!");
CFAOffset += SVECalleeSavesSize;
+ }
- // Allocate space for the rest of the frame including SVE locals. Align the
- // stack as necessary.
- assert(!(AFL.canUseRedZone(MF) && NeedsRealignment) &&
- "Cannot use redzone with stack realignment");
- if (!AFL.canUseRedZone(MF)) {
- // FIXME: in the case of dynamic re-alignment, NumBytes doesn't have
- // the correct value here, as NumBytes also includes padding bytes,
- // which shouldn't be counted here.
- StackOffset SVELocalsSize = PPRLocalsSize + ZPRLocalsSize;
- allocateStackSpace(AfterSVESavesI, RealignmentPadding,
- SVELocalsSize + StackOffset::getFixed(NumBytes),
- EmitAsyncCFI && !HasFP, CFAOffset,
- MFI.hasVarSizedObjects());
- }
+ // Allocate space for the rest of the frame including ZPR locals. Align the
+ // stack as necessary.
+ assert(!(AFL.canUseRedZone(MF) && NeedsRealignment) &&
+ "Cannot use redzone with stack realignment");
+ if (!AFL.canUseRedZone(MF)) {
+ // FIXME: in the case of dynamic re-alignment, NumBytes doesn't have the
+ // correct value here, as NumBytes also includes padding bytes, which
+ // shouldn't be counted here.
+ allocateStackSpace(
+ AfterSVESavesI, RealignmentPadding, ZPR.LocalsSize + NonSVELocalsSize,
+ EmitAsyncCFI && !HasFP, CFAOffset, MFI.hasVarSizedObjects());
}
// If we need a base pointer, set it up here. It's whatever the value of the
- // stack pointer is at this point. Any variable size objects will be allocated
- // after this, so we can still use the base pointer to reference locals.
+ // stack pointer is at this point. Any variable size objects will be
+ // allocated after this, so we can still use the base pointer to reference
+ // locals.
//
// FIXME: Clarify FrameSetup flags here.
// Note: Use emitFrameOffset() like above for FP if the FrameSetup flag is
@@ -1270,7 +1278,9 @@ void AArch64PrologueEmitter::emitCalleeSavedSVELocations(
StackOffset::getScalable(MFI.getObjectOffset(FI)) -
StackOffset::getFixed(AFI->getCalleeSavedStackSize(MFI));
- if (AFI->hasSplitSVEObjects() &&
+ // The scalable vectors are below (lower address) the scalable predicates
+ // with split SVE objects, so we must subtract the size of the predicates.
+ if (SVELayout == SVEStackLayout::Split &&
MFI.getStackID(FI) == TargetStackID::ScalableVector)
Offset -= PPRStackSize;
@@ -1349,13 +1359,10 @@ void AArch64EpilogueEmitter::emitEpilogue() {
return;
}
- bool FPAfterSVECalleeSaves =
- Subtarget.isTargetWindows() && AFI->getSVECalleeSavedStackSize();
-
bool CombineSPBump = shouldCombineCSRLocalStackBump(NumBytes);
// Assume we can't combine the last pop with the sp restore.
bool CombineAfterCSRBump = false;
- if (FPAfterSVECalleeSaves) {
+ if (SVELayout == SVEStackLayout::CalleeSavesAboveFrameRecord) {
AfterCSRPopSize += FixedObject;
} else if (!CombineSPBump && PrologueSaveSize != 0) {
MachineBasicBlock::iterator Pop = std::prev(MBB.getFirstTerminator());
@@ -1390,7 +1397,8 @@ void AArch64EpilogueEmitter::emitEpilogue() {
while (FirstGPRRestoreI != Begin) {
--FirstGPRRestoreI;
if (!FirstGPRRestoreI->getFlag(MachineInstr::FrameDestroy) ||
- (!FPAfterSVECalleeSaves && isPartOfSVECalleeSaves(FirstGPRRestoreI))) {
+ (SVELayout != SVEStackLayout::CalleeSavesAboveFrameRecord &&
+ isPartOfSVECalleeSaves(FirstGPRRestoreI))) {
++FirstGPRRestoreI;
break;
} else if (CombineSPBump)
@@ -1414,13 +1422,9 @@ void AArch64EpilogueEmitter::emitEpilogue() {
if (HasFP && AFI->hasSwiftAsyncContext())
emitSwiftAsyncContextFramePointer(EpilogueEndI, DL);
- StackOffset ZPRStackSize = AFL.getZPRStackSize(MF);
- StackOffset PPRStackSize = AFL.getPPRStackSize(MF);
- StackOffset SVEStackSize = ZPRStackSize + PPRStackSize;
-
// If there is a single SP update, insert it before the ret and we're done.
if (CombineSPBump) {
- assert(!SVEStackSize && "Cannot combine SP bump with SVE");
+ assert(!AFI->hasSVEStackSize() && "Cannot combine SP bump with SVE");
// When we are about to restore the CSRs, the CFA register is SP again.
if (EmitCFI && HasFP)
@@ -1437,188 +1441,122 @@ void AArch64EpilogueEmitter::emitEpilogue() {
NumBytes -= PrologueSaveSize;
assert(NumBytes >= 0 && "Negative stack allocation size!?");
- if (!AFI->hasSplitSVEObjects()) {
- // Process the SVE callee-saves to determine what space needs to be
- // deallocated.
- StackOffset DeallocateBefore = {}, DeallocateAfter = SVEStackSize;
- MachineBasicBlock::iterator RestoreBegin = FirstGPRRestoreI,
- RestoreEnd = FirstGPRRestoreI;
- int64_t ZPRCalleeSavedSize = AFI->getZPRCalleeSavedStackSize();
- int64_t PPRCalleeSavedSize = AFI->getPPRCalleeSavedStackSize();
- int64_t SVECalleeSavedSize = ZPRCalleeSavedSize + PPRCalleeSavedSize;
-
- if (SVECalleeSavedSize) {
- if (FPAfterSVECalleeSaves)
- RestoreEnd = MBB.getFirstTerminator();
-
- RestoreBegin = std::prev(RestoreEnd);
- while (RestoreBegin != MBB.begin() &&
- isPartOfSVECalleeSaves(std::prev(RestoreBegin)))
- --RestoreBegin;
-
- assert(isPartOfSVECalleeSaves(RestoreBegin) &&
- isPartOfSVECalleeSaves(std::prev(RestoreEnd)) &&
- "Unexpected instruction");
-
- StackOffset CalleeSavedSizeAsOffset =
- StackOffset::getScalable(SVECalleeSavedSize);
- DeallocateBefore = SVEStackSize - CalleeSavedSizeAsOffset;
- DeallocateAfter = CalleeSavedSizeAsOffset;
+ auto [PPR, ZPR] = getSVEStackFrameSizes();
+ auto [PPRRange, ZPRRange] = partitionSVECS(
+ MBB,
+ SVELayout == SVEStackLayout::CalleeSavesAboveFrameRecord
+ ? MBB.getFirstTerminator()
+ : FirstGPRRestoreI,
+ PPR.CalleeSavesSize, ZPR.CalleeSavesSize, /*IsEpilogue=*/true);
+
+ StackOffset SVECalleeSavesSize = ZPR.CalleeSavesSize + PPR.CalleeSavesSize;
+ StackOffset SVEStackSize =
+ SVECalleeSavesSize + PPR.LocalsSize + ZPR.LocalsSize;
+ MachineBasicBlock::iterator RestoreBegin = ZPRRange.Begin;
+ MachineBasicBlock::iterator RestoreEnd = PPRRange.End;
+
+ // Deallocate the SVE area.
+ if (SVELayout == SVEStackLayout::CalleeSavesAboveFrameRecord) {
+ StackOffset SVELocalsSize = ZPR.LocalsSize + PPR.LocalsSize;
+ // If the callee-save area is before FP, restoring the FP implicitly
+ // deallocates non-callee-save SVE allocations. Otherwise, deallocate them
+ // explicitly.
+ if (!AFI->isStackRealigned() && !MFI.hasVarSizedObjects()) {
+ emitFrameOffset(MBB, FirstGPRRestoreI, DL, AArch64::SP, AArch64::SP,
+ SVELocalsSize, TII, MachineInstr::FrameDestroy, false,
+ NeedsWinCFI, &HasWinCFI);
}
- // Deallocate the SVE area.
- if (FPAfterSVECalleeSaves) {
- // If the callee-save area is before FP, restoring the FP implicitly
- // deallocates non-callee-save SVE allocations. Otherwise, deallocate
- // them explicitly.
- if (!AFI->isStackRealigned() && !MFI.hasVarSizedObjects()) {
- emitFrameOffset(MBB, FirstGPRRestoreI, DL, AArch64::SP, AArch64::SP,
- DeallocateBefore, TII, MachineInstr::FrameDestroy,
- false, NeedsWinCFI, &HasWinCFI);
- }
+ // Deallocate callee-save non-SVE registers.
+ emitFrameOffset(MBB, RestoreBegin, DL, AArch64::SP, AArch64::SP,
+ StackOffset::getFixed(AFI->getCalleeSavedStackSize()), TII,
+ MachineInstr::FrameDestroy, false, NeedsWinCFI, &HasWinCFI);
- // Deallocate callee-save non-SVE registers.
- emitFrameOffset(MBB, RestoreBegin, DL, AArch64::SP, AArch64::SP,
- StackOffset::getFixed(AFI->getCalleeSavedStackSize()),
- TII, MachineInstr::FrameDestroy, false, NeedsWinCFI,
- &HasWinCFI);
-
- // Deallocate fixed objects.
- emitFrameOffset(MBB, RestoreEnd, DL, AArch64::SP, AArch64::SP,
- StackOffset::getFixed(FixedObject), TII,
- MachineInstr::FrameDestroy, false, NeedsWinCFI,
- &HasWinCFI);
-
- // Deallocate callee-save SVE registers.
- emitFrameOffset(MBB, RestoreEnd, DL, AArch64::SP, AArch64::SP,
- DeallocateAfter, TII, MachineInstr::FrameDestroy, false,
- NeedsWinCFI, &HasWinCFI);
- } else if (SVEStackSize) {
- int64_t SVECalleeSavedSize = AFI->getSVECalleeSavedStackSize();
- // If we have stack realignment or variable-sized objects we must use the
- // FP to restore SVE callee saves (as there is an unknown amount of
- // data/padding between the SP and SVE CS area).
- Register BaseForSVEDealloc =
- (AFI->isStackRealigned() || MFI.hasVarSizedObjects()) ? AArch64::FP
- : AArch64::SP;
- if (SVECalleeSavedSize && BaseForSVEDealloc == AArch64::FP) {
- Register CalleeSaveBase = AArch64::FP;
- if (int64_t CalleeSaveBaseOffset =
- AFI->getCalleeSaveBaseToFrameRecordOffset()) {
- // If we have have an non-zero offset to the non-SVE CS base we need
- // to compute the base address by subtracting the offest in a
- // temporary register first (to avoid briefly deallocating the SVE
- // CS).
- CalleeSaveBase = MBB.getParent()->getRegInfo().createVirtualRegister(
- &AArch64::GPR64RegClass);
- emitFrameOffset(MBB, RestoreBegin, DL, CalleeSaveBase, AArch64::FP,
- StackOffset::getFixed(-CalleeSaveBaseOffset), TII,
- MachineInstr::FrameDestroy);
- }
- // The code below will deallocate the stack space space by moving the
- // SP to the start of the SVE callee-save area.
- emitFrameOffset(MBB, RestoreBegin, DL, AArch64::SP, CalleeSaveBase,
- StackOffset::getScalable(-SVECalleeSavedSize), TII,
+ // Deallocate fixed objects.
+ emitFrameOffset(MBB, RestoreEnd, DL, AArch64::SP, AArch64::SP,
+ StackOffset::getFixed(FixedObject), TII,
+ MachineInstr::FrameDestroy, false, NeedsWinCFI, &HasWinCFI);
+
+ // Deallocate callee-save SVE registers.
+ emitFrameOffset(MBB, RestoreEnd, DL, AArch64::SP, AArch64::SP,
+ SVECalleeSavesSize, TII, MachineInstr::FrameDestroy, false,
+ NeedsWinCFI, &HasWinCFI);
+ } else if (AFI->hasSVEStackSize()) {
+ // If we have stack realignment or variable-sized objects we must use the FP
+ // to restore SVE callee saves (as there is an unknown amount of
+ // data/padding between the SP and SVE CS area).
+ Register BaseForSVEDealloc =
+ (AFI->isStackRealigned() || MFI.hasVarSizedObjects()) ? AArch64::FP
+ : AArch64::SP;
+ if (SVECalleeSavesSize && BaseForSVEDealloc == AArch64::FP) {
+ // TODO: Support stack realigment and variable-sized objects.
+ assert(
+ SVELayout != SVEStackLayout::Split &&
+ "unexpected stack realignment or variable sized objects with split "
+ "SVE stack objects");
+
+ Register CalleeSaveBase = AArch64::FP;
+ if (int64_t CalleeSaveBaseOffset =
+ AFI->getCalleeSaveBaseToFrameRecordOffset()) {
+ // If we have have an non-zero offset to the non-SVE CS base we need to
+ // compute the base address by subtracting the offest in a temporary
+ // register first (to avoid briefly deallocating the SVE CS).
+ CalleeSaveBase = MBB.getParent()->getRegInfo().createVirtualRegister(
+ &AArch64::GPR64RegClass);
+ emitFrameOffset(MBB, RestoreBegin, DL, CalleeSaveBase, AArch64::FP,
+ StackOffset::getFixed(-CalleeSaveBaseOffset), TII,
MachineInstr::FrameDestroy);
- } else if (BaseForSVEDealloc == AArch64::SP) {
- if (SVECalleeSavedSize) {
- // Deallocate the non-SVE locals first before we can deallocate (and
- // restore callee saves) from the SVE area.
- emitFrameOffset(MBB, RestoreBegin, DL, AArch64::SP, AArch64::SP,
- StackOffset::getFixed(NumBytes), TII,
- MachineInstr::FrameDestroy, false, NeedsWinCFI,
- &HasWinCFI, EmitCFI && !HasFP,
- SVEStackSize + StackOffset::getFixed(
- NumBytes + PrologueSaveSize));
- NumBytes = 0;
- }
-
- emitFrameOffset(MBB, RestoreBegin, DL, AArch64::SP, AArch64::SP,
- DeallocateBefore, TII, MachineInstr::FrameDestroy,
- false, NeedsWinCFI, &HasWinCFI, EmitCFI && !HasFP,
- SVEStackSize +
- StackOffset::getFixed(NumBytes + PrologueSaveSize));
-
- emitFrameOffset(MBB, RestoreEnd, DL, AArch64::SP, AArch64::SP,
- DeallocateAfter, TII, MachineInstr::FrameDestroy, false,
- NeedsWinCFI, &HasWinCFI, EmitCFI && !HasFP,
- DeallocateAfter +
- StackOffset::getFixed(NumBytes + PrologueSaveSize));
+ }
+ // The code below will deallocate the stack space space by moving the SP
+ // to the start of the SVE callee-save area.
+ emitFrameOffset(MBB, RestoreBegin, DL, AArch64::SP, CalleeSaveBase,
+ -SVECalleeSavesSize, TII, MachineInstr::FrameDestroy);
+ } else if (BaseForSVEDealloc == AArch64::SP) {
+ auto CFAOffset =
+ SVEStackSize + StackOffset::getFixed(NumBytes + PrologueSaveSize);
+
+ if (SVECalleeSavesSize) {
+ // Deallocate the non-SVE locals first before we can deallocate (and
+ // restore callee saves) from the SVE area.
+ auto NonSVELocals = StackOffset::getFixed(NumBytes);
+ emitFrameOffset(MBB, ZPRRange.Begin, DL, AArch64::SP, AArch64::SP,
+ NonSVELocals, TII, MachineInstr::FrameDestroy, false,
+ NeedsWinCFI, &HasWinCFI, EmitCFI && !HasFP, CFAOffset);
+ CFAOffset -= NonSVELocals;
+ NumBytes = 0;
}
- if (EmitCFI)
- emitCalleeSavedSVERestores(RestoreEnd);
- }
- } else if (AFI->hasSplitSVEObjects() && SVEStackSize) {
- // TODO: Support stack realigment and variable-sized objects.
- assert(!AFI->isStackRealigned() && !MFI.hasVarSizedObjects() &&
- "unexpected stack realignment or variable sized objects with split "
- "SVE stack objects");
- // SplitSVEObjects. Determine the sizes and starts/ends of the ZPR and PPR
- // areas.
- auto ZPRCalleeSavedSize =
- StackOffset::getScalable(AFI->getZPRCalleeSavedStackSize());
- auto PPRCalleeSavedSize =
- StackOffset::getScalable(AFI->getPPRCalleeSavedStackSize());
- StackOffset PPRLocalsSize = PPRStackSize - PPRCalleeSavedSize;
- StackOffset ZPRLocalsSize = ZPRStackSize - ZPRCalleeSavedSize;
-
- MachineBasicBlock::iterator PPRRestoreBegin = FirstGPRRestoreI,
- PPRRestoreEnd = FirstGPRRestoreI;
- if (PPRCalleeSavedSize) {
- PPRRestoreBegin = std::prev(PPRRestoreEnd);
- while (PPRRestoreBegin != MBB.begin() &&
- isPartOfPPRCalleeSaves(std::prev(PPRRestoreBegin)))
- --PPRRestoreBegin;
- }
-
- MachineBasicBlock::iterator ZPRRestoreBegin = PPRRestoreBegin,
- ZPRRestoreEnd = PPRRestoreBegin;
- if (ZPRCalleeSavedSize) {
- ZPRRestoreBegin = std::prev(ZPRRestoreEnd);
- while (ZPRRestoreBegin != MBB.begin() &&
- isPartOfZPRCalleeSaves(std::prev(ZPRRestoreBegin)))
- --ZPRRestoreBegin;
- }
-
- auto CFAOffset =
- SVEStackSize + StackOffset::getFixed(NumBytes + PrologueSaveSize);
- if (PPRCalleeSavedSize || ZPRCalleeSavedSize) {
- // Deallocate the non-SVE locals first before we can deallocate (and
- // restore callee saves) from the SVE area.
- auto NonSVELocals = StackOffset::getFixed(NumBytes);
- emitFrameOffset(MBB, ZPRRestoreBegin, DL, AArch64::SP, AArch64::SP,
- NonSVELocals, TII, MachineInstr::FrameDestroy, false,
- false, nullptr, EmitCFI && !HasFP, CFAOffset);
- NumBytes = 0;
- CFAOffset -= NonSVELocals;
- }
+ if (ZPR.LocalsSize) {
+ emitFrameOffset(MBB, ZPRRange.Begin, DL, AArch64::SP, AArch64::SP,
+ ZPR.LocalsSize, TII, MachineInstr::FrameDestroy, false,
+ NeedsWinCFI, &HasWinCFI, EmitCFI && !HasFP, CFAOffset);
+ CFAOffset -= ZPR.LocalsSize;
+ }
- if (ZPRLocalsSize) {
- emitFrameOffset(MBB, ZPRRestoreBegin, DL, AArch64::SP, AArch64::SP,
- ZPRLocalsSize, TII, MachineInstr::FrameDestroy, false,
- false, nullptr, EmitCFI && !HasFP, CFAOffset);
- CFAOffset -= ZPRLocalsSize;
- }
+ StackOffset SVECalleeSavesToDealloc = SVECalleeSavesSize;
+ if (SVELayout == SVEStackLayout::Split &&
+ (PPR.LocalsSize || ZPR.CalleeSavesSize)) {
+ assert(PPRRange.Begin == ZPRRange.End &&
+ "Expected PPR restores after ZPR");
+ emitFrameOffset(MBB, PPRRange.Begin, DL, AArch64::SP, AArch64::SP,
+ PPR.LocalsSize + ZPR.CalleeSavesSize, TII,
+ MachineInstr::FrameDestroy, false, NeedsWinCFI,
+ &HasWinCFI, EmitCFI && !HasFP, CFAOffset);
+ CFAOffset -= PPR.LocalsSize + ZPR.CalleeSavesSize;
+ SVECalleeSavesToDealloc -= ZPR.CalleeSavesSize;
+ }
- if (PPRLocalsSize || ZPRCalleeSavedSize) {
- assert(PPRRestoreBegin == ZPRRestoreEnd &&
- "Expected PPR restores after ZPR");
- emitFrameOffset(MBB, PPRRestoreBegin, DL, AArch64::SP, AArch64::SP,
- PPRLocalsSize + ZPRCalleeSavedSize, TII,
- MachineInstr::FrameDestroy, false, false, nullptr,
- EmitCFI && !HasFP, CFAOffset);
- CFAOffset -= PPRLocalsSize + ZPRCalleeSavedSize;
- }
- if (PPRCalleeSavedSize) {
- emitFrameOffset(MBB, PPRRestoreEnd, DL, AArch64::SP, AArch64::SP,
- PPRCalleeSavedSize, TII, MachineInstr::FrameDestroy,
- false, false, nullptr, EmitCFI && !HasFP, CFAOffset);
+ // If split SVE is on, this dealloc PPRs, otherwise, deallocs ZPRs + PPRs:
+ if (SVECalleeSavesToDealloc)
+ emitFrameOffset(MBB, PPRRange.End, DL, AArch64::SP, AArch64::SP,
+ SVECalleeSavesToDealloc, TII,
+ MachineInstr::FrameDestroy, false, NeedsWinCFI,
+ &HasWinCFI, EmitCFI && !HasFP, CFAOffset);
}
- // We only emit CFI information for ZPRs so emit CFI after the ZPR restores.
if (EmitCFI)
- emitCalleeSavedSVERestores(ZPRRestoreEnd);
+ emitCalleeSavedSVERestores(
+ SVELayout == SVEStackLayout::Split ? ZPRRange.End : PPRRange.End);
}
if (!HasFP) {
diff --git a/llvm/lib/Target/AArch64/AArch64PrologueEpilogue.h b/llvm/lib/Target/AArch64/AArch64PrologueEpilogue.h
index a1c9b34..bccadda 100644
--- a/llvm/lib/Target/AArch64/AArch64PrologueEpilogue.h
+++ b/llvm/lib/Target/AArch64/AArch64PrologueEpilogue.h
@@ -27,11 +27,23 @@ class AArch64Subtarget;
class AArch64FunctionInfo;
class AArch64FrameLowering;
+struct SVEFrameSizes {
+ struct {
+ StackOffset CalleeSavesSize, LocalsSize;
+ } PPR, ZPR;
+};
+
class AArch64PrologueEpilogueCommon {
public:
AArch64PrologueEpilogueCommon(MachineFunction &MF, MachineBasicBlock &MBB,
const AArch64FrameLowering &AFL);
+ enum class SVEStackLayout {
+ Default,
+ Split,
+ CalleeSavesAboveFrameRecord,
+ };
+
protected:
bool requiresGetVGCall() const;
@@ -53,6 +65,8 @@ protected:
bool shouldCombineCSRLocalStackBump(uint64_t StackBumpBytes) const;
+ SVEFrameSizes getSVEStackFrameSizes() const;
+
MachineFunction &MF;
MachineBasicBlock &MBB;
@@ -68,6 +82,7 @@ protected:
bool IsFunclet = false; // Note: Set in derived constructors.
bool NeedsWinCFI = false; // Note: Can be changed in emitFramePointerSetup.
bool HomPrologEpilog = false; // Note: Set in derived constructors.
+ SVEStackLayout SVELayout = SVEStackLayout::Default;
// Note: "HasWinCFI" is mutable as it can change in any "emit" function.
mutable bool HasWinCFI = false;
diff --git a/llvm/lib/Target/Hexagon/Hexagon.td b/llvm/lib/Target/Hexagon/Hexagon.td
index 6d0529f..fb0928b8 100644
--- a/llvm/lib/Target/Hexagon/Hexagon.td
+++ b/llvm/lib/Target/Hexagon/Hexagon.td
@@ -110,8 +110,6 @@ def FeatureSmallData: SubtargetFeature<"small-data", "UseSmallData", "true",
"Allow GP-relative addressing of global variables">;
def FeatureDuplex: SubtargetFeature<"duplex", "EnableDuplex", "true",
"Enable generation of duplex instruction">;
-def FeatureUnsafeFP: SubtargetFeature<"unsafe-fp", "UseUnsafeMath", "true",
- "Use unsafe FP math">;
def FeatureReservedR19: SubtargetFeature<"reserved-r19", "ReservedR19",
"true", "Reserve register R19">;
def FeatureNoreturnStackElim: SubtargetFeature<"noreturn-stack-elim",
@@ -167,7 +165,6 @@ def UseHVXQFloat : Predicate<"HST->useHVXQFloatOps()">,
def UseHVXFloatingPoint: Predicate<"HST->useHVXFloatingPoint()">;
def HasMemNoShuf : Predicate<"HST->hasMemNoShuf()">,
AssemblerPredicate<(all_of FeatureMemNoShuf)>;
-def UseUnsafeMath : Predicate<"HST->useUnsafeMath()">;
def NotOptTinyCore : Predicate<"!HST->isTinyCore() ||"
"MF->getFunction().hasOptSize()"> {
let RecomputePerFunction = 1;
diff --git a/llvm/lib/Target/Hexagon/HexagonPatterns.td b/llvm/lib/Target/Hexagon/HexagonPatterns.td
index 4b23670..a0acfcf 100644
--- a/llvm/lib/Target/Hexagon/HexagonPatterns.td
+++ b/llvm/lib/Target/Hexagon/HexagonPatterns.td
@@ -1611,8 +1611,11 @@ def DfMpy: OutPatFrag<(ops node:$Rs, node:$Rt),
$Rt, $Rs),
$Rs, $Rt)>;
-let Predicates = [HasV67,UseUnsafeMath], AddedComplexity = 50 in {
- def: Pat<(fmul F64:$Rs, F64:$Rt), (DfMpy $Rs, $Rt)>;
+def fmul_afn : PatFrag<(ops node:$a, node:$b), (fmul node:$a, node:$b), [{
+ return N->getFlags().hasApproximateFuncs();
+}]>;
+let Predicates = [HasV67], AddedComplexity = 50 in {
+ def : Pat<(fmul_afn F64:$Rs, F64:$Rt), (DfMpy $Rs, $Rt)>;
}
let Predicates = [HasV67] in {
def: OpR_RR_pat<F2_dfmin, pf2<fminimumnum>, f64, F64>;
diff --git a/llvm/lib/Target/Hexagon/HexagonSubtarget.h b/llvm/lib/Target/Hexagon/HexagonSubtarget.h
index b111471..7430567 100644
--- a/llvm/lib/Target/Hexagon/HexagonSubtarget.h
+++ b/llvm/lib/Target/Hexagon/HexagonSubtarget.h
@@ -54,7 +54,6 @@ class HexagonSubtarget : public HexagonGenSubtargetInfo {
bool UseNewValueJumps = false;
bool UseNewValueStores = false;
bool UseSmallData = false;
- bool UseUnsafeMath = false;
bool UseZRegOps = false;
bool UseHVXIEEEFPOps = false;
bool UseHVXQFloatOps = false;
@@ -234,7 +233,6 @@ public:
bool useNewValueJumps() const { return UseNewValueJumps; }
bool useNewValueStores() const { return UseNewValueStores; }
bool useSmallData() const { return UseSmallData; }
- bool useUnsafeMath() const { return UseUnsafeMath; }
bool useZRegOps() const { return UseZRegOps; }
bool useCabac() const { return UseCabac; }
diff --git a/llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp b/llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp
index 0afa04a..f5d8b69 100644
--- a/llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp
@@ -250,13 +250,6 @@ HexagonTargetMachine::getSubtargetImpl(const Function &F) const {
CPUAttr.isValid() ? CPUAttr.getValueAsString().str() : TargetCPU;
std::string FS =
FSAttr.isValid() ? FSAttr.getValueAsString().str() : TargetFS;
- // Append the preexisting target features last, so that +mattr overrides
- // the "unsafe-fp-math" function attribute.
- // Creating a separate target feature is not strictly necessary, it only
- // exists to make "unsafe-fp-math" force creating a new subtarget.
-
- if (F.getFnAttribute("unsafe-fp-math").getValueAsBool())
- FS = FS.empty() ? "+unsafe-fp" : "+unsafe-fp," + FS;
auto &I = SubtargetMap[CPU + FS];
if (!I) {
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
index bc047a4a..a1fb665 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
@@ -651,7 +651,7 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM,
// Custom conversions to/from v2i8.
setOperationAction(ISD::BITCAST, MVT::v2i8, Custom);
- // Only logical ops can be done on v4i8 directly, others must be done
+ // Only logical ops can be done on v4i8/v2i32 directly, others must be done
// elementwise.
setOperationAction(
{ISD::ABS, ISD::ADD, ISD::ADDC, ISD::ADDE,
@@ -669,7 +669,7 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM,
ISD::UMIN, ISD::UMULO, ISD::UMUL_LOHI, ISD::UREM,
ISD::USHLSAT, ISD::USUBO, ISD::USUBO_CARRY, ISD::VSELECT,
ISD::USUBSAT},
- MVT::v4i8, Expand);
+ {MVT::v4i8, MVT::v2i32}, Expand);
// Operations not directly supported by NVPTX.
for (MVT VT : {MVT::bf16, MVT::f16, MVT::v2bf16, MVT::v2f16, MVT::f32,
@@ -689,7 +689,7 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM,
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Legal);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
- setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Expand);
+ setOperationAction(ISD::SIGN_EXTEND_INREG, {MVT::v2i16, MVT::v2i32}, Expand);
setOperationAction(ISD::SHL_PARTS, MVT::i32 , Custom);
setOperationAction(ISD::SRA_PARTS, MVT::i32 , Custom);
diff --git a/llvm/lib/Target/RISCV/RISCVFeatures.td b/llvm/lib/Target/RISCV/RISCVFeatures.td
index 40c05e8..333b693 100644
--- a/llvm/lib/Target/RISCV/RISCVFeatures.td
+++ b/llvm/lib/Target/RISCV/RISCVFeatures.td
@@ -1823,6 +1823,11 @@ def TuneConditionalCompressedMoveFusion
def HasConditionalMoveFusion : Predicate<"Subtarget->hasConditionalMoveFusion()">;
def NoConditionalMoveFusion : Predicate<"!Subtarget->hasConditionalMoveFusion()">;
+def TuneHasSingleElementVecFP64
+ : SubtargetFeature<"single-element-vec-fp64", "HasSingleElementVectorFP64", "true",
+ "Certain vector FP64 operations produce a single result "
+ "element per cycle">;
+
def TuneMIPSP8700
: SubtargetFeature<"mips-p8700", "RISCVProcFamily", "MIPSP8700",
"MIPS p8700 processor">;
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoXqci.td b/llvm/lib/Target/RISCV/RISCVInstrInfoXqci.td
index 447f05c..f2724c41 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoXqci.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoXqci.td
@@ -1636,7 +1636,7 @@ def : QCISELECTCCIPat<SETNE, QC_SELECTNEI>;
}
let Predicates = [HasVendorXqcilsm, IsRV32] in {
-def : Pat<(qc_setwmi GPR:$rs3, GPR:$rs1, tuimm5nonzero:$uimm5, tuimm7_lsb00:$uimm7),
+def : Pat<(qc_setwmi (i32 GPR:$rs3), GPR:$rs1, tuimm5nonzero:$uimm5, tuimm7_lsb00:$uimm7),
(QC_SETWMI GPR:$rs3, GPR:$rs1, tuimm5nonzero:$uimm5, tuimm7_lsb00:$uimm7)>;
} // Predicates = [HasVendorXqcilsm, IsRV32]
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td
index a29b7dd..57fbaa0 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td
@@ -634,56 +634,56 @@ def : PatGpr<bswap, REV8_RV64, i64>;
let Predicates = [HasStdExtZbkb] in {
def : Pat<(or (and (shl GPR:$rs2, (XLenVT 8)), 0xFFFF),
- (zexti8 (XLenVT GPR:$rs1))),
- (PACKH GPR:$rs1, GPR:$rs2)>;
-def : Pat<(or (shl (zexti8 (XLenVT GPR:$rs2)), (XLenVT 8)),
- (zexti8 (XLenVT GPR:$rs1))),
- (PACKH GPR:$rs1, GPR:$rs2)>;
+ zexti8:$rs1),
+ (PACKH zexti8:$rs1, GPR:$rs2)>;
+def : Pat<(or (shl zexti8:$rs2, (XLenVT 8)),
+ zexti8:$rs1),
+ (PACKH zexti8:$rs1, zexti8:$rs2)>;
def : Pat<(and (or (shl GPR:$rs2, (XLenVT 8)),
- (zexti8 (XLenVT GPR:$rs1))), 0xFFFF),
- (PACKH GPR:$rs1, GPR:$rs2)>;
+ zexti8:$rs1), 0xFFFF),
+ (PACKH zexti8:$rs1, GPR:$rs2)>;
def : Pat<(binop_allhusers<or> (shl GPR:$rs2, (XLenVT 8)),
- (zexti8 (XLenVT GPR:$rs1))),
- (PACKH GPR:$rs1, GPR:$rs2)>;
+ zexti8:$rs1),
+ (PACKH zexti8:$rs1, GPR:$rs2)>;
} // Predicates = [HasStdExtZbkb]
let Predicates = [HasStdExtZbkb, IsRV32] in {
-def : Pat<(i32 (or (zexti16 (i32 GPR:$rs1)), (shl GPR:$rs2, (i32 16)))),
- (PACK GPR:$rs1, GPR:$rs2)>;
+def : Pat<(i32 (or zexti16:$rs1, (shl GPR:$rs2, (i32 16)))),
+ (PACK zexti16:$rs1, GPR:$rs2)>;
-def : Pat<(or (shl GPR:$rs2, (XLenVT 24)),
- (shl (zexti8 (XLenVT GPR:$rs1)), (XLenVT 16))),
- (SLLI (XLenVT (PACKH GPR:$rs1, GPR:$rs2)), (XLenVT 16))>;
+def : Pat<(i32 (or (shl GPR:$rs2, (XLenVT 24)),
+ (shl zexti8:$rs1, (XLenVT 16)))),
+ (SLLI (XLenVT (PACKH zexti8:$rs1, GPR:$rs2)), (XLenVT 16))>;
// Match a pattern of 2 bytes being inserted into bits [31:16], with bits
// bits [15:0] coming from a zero extended value. We can use pack with packh for
// bits [31:16]. If bits [15:0] can also be a packh, it can be matched
// separately.
-def : Pat<(or (or (shl GPR:$op1rs2, (XLenVT 24)),
- (shl (zexti8 (XLenVT GPR:$op1rs1)), (XLenVT 16))),
- (zexti16 (XLenVT GPR:$rs1))),
- (PACK (XLenVT GPR:$rs1),
- (XLenVT (PACKH GPR:$op1rs1, GPR:$op1rs2)))>;
+def : Pat<(i32 (or (or (shl GPR:$op1rs2, (XLenVT 24)),
+ (shl zexti8:$op1rs1, (XLenVT 16))),
+ zexti16:$rs1)),
+ (PACK zexti16:$rs1,
+ (XLenVT (PACKH zexti8:$op1rs1, GPR:$op1rs2)))>;
}
let Predicates = [HasStdExtZbkb, IsRV64] in {
-def : Pat<(i64 (or (zexti32 (i64 GPR:$rs1)), (shl GPR:$rs2, (i64 32)))),
- (PACK GPR:$rs1, GPR:$rs2)>;
+def : Pat<(i64 (or zexti32:$rs1, (shl GPR:$rs2, (i64 32)))),
+ (PACK zexti32:$rs1, GPR:$rs2)>;
-def : Pat<(or (shl (zexti8 (XLenVT GPR:$rs2)), (XLenVT 24)),
- (shl (zexti8 (XLenVT GPR:$rs1)), (XLenVT 16))),
- (SLLI (XLenVT (PACKH GPR:$rs1, GPR:$rs2)), (XLenVT 16))>;
+def : Pat<(i64 (or (shl zexti8:$rs2, (XLenVT 24)),
+ (shl zexti8:$rs1, (XLenVT 16)))),
+ (SLLI (XLenVT (PACKH zexti8:$rs1, zexti8:$rs2)), (XLenVT 16))>;
def : Pat<(binop_allwusers<or> (shl GPR:$rs2, (XLenVT 24)),
- (shl (zexti8 (XLenVT GPR:$rs1)), (XLenVT 16))),
- (SLLI (XLenVT (PACKH GPR:$rs1, GPR:$rs2)), (XLenVT 16))>;
+ (shl zexti8:$rs1, (XLenVT 16))),
+ (SLLI (XLenVT (PACKH zexti8:$rs1, GPR:$rs2)), (XLenVT 16))>;
def : Pat<(binop_allwusers<or> (shl GPR:$rs2, (i64 16)),
- (zexti16 (i64 GPR:$rs1))),
- (PACKW GPR:$rs1, GPR:$rs2)>;
+ zexti16:$rs1),
+ (PACKW zexti16:$rs1, GPR:$rs2)>;
def : Pat<(i64 (or (sext_inreg (shl GPR:$rs2, (i64 16)), i32),
- (zexti16 (i64 GPR:$rs1)))),
- (PACKW GPR:$rs1, GPR:$rs2)>;
+ zexti16:$rs1)),
+ (PACKW zexti16:$rs1, GPR:$rs2)>;
// Match a pattern of 2 bytes being inserted into bits [31:16], with bits
// bits [15:0] coming from a zero extended value, and bits [63:32] being
@@ -691,35 +691,35 @@ def : Pat<(i64 (or (sext_inreg (shl GPR:$rs2, (i64 16)), i32),
// also be a packh, it can be matched separately.
def : Pat<(binop_allwusers<or>
(or (shl GPR:$op1rs2, (XLenVT 24)),
- (shl (zexti8 (XLenVT GPR:$op1rs1)), (XLenVT 16))),
- (zexti16 (XLenVT GPR:$rs1))),
- (PACKW GPR:$rs1, (XLenVT (PACKH GPR:$op1rs1, GPR:$op1rs2)))>;
+ (shl zexti8:$op1rs1, (XLenVT 16))),
+ zexti16:$rs1),
+ (PACKW zexti16:$rs1, (XLenVT (PACKH zexti8:$op1rs1, GPR:$op1rs2)))>;
// We need to manually reassociate the patterns because of the binop_allwusers.
def : Pat<(binop_allwusers<or>
- (or (zexti16 (XLenVT GPR:$rs1)),
- (shl (zexti8 (XLenVT GPR:$op1rs1)), (XLenVT 16))),
+ (or zexti16:$rs1,
+ (shl zexti8:$op1rs1, (XLenVT 16))),
(shl GPR:$op1rs2, (XLenVT 24))),
- (PACKW GPR:$rs1, (XLenVT (PACKH GPR:$op1rs1, GPR:$op1rs2)))>;
+ (PACKW zexti16:$rs1, (XLenVT (PACKH zexti8:$op1rs1, GPR:$op1rs2)))>;
def : Pat<(binop_allwusers<or>
- (or (zexti16 (XLenVT GPR:$rs1)),
- (shl GPR:$op1rs1, (XLenVT 24))),
- (shl (zexti8 (XLenVT GPR:$op1rs2)), (XLenVT 16))),
- (PACKW GPR:$rs1, (XLenVT (PACKH GPR:$op1rs1, GPR:$op1rs2)))>;
+ (or zexti16:$rs1,
+ (shl GPR:$op1rs2, (XLenVT 24))),
+ (shl zexti8:$op1rs1, (XLenVT 16))),
+ (PACKW zexti16:$rs1, (XLenVT (PACKH zexti8:$op1rs1, GPR:$op1rs2)))>;
def : Pat<(i64 (or (or (zexti16 (XLenVT GPR:$rs1)),
- (shl (zexti8 (XLenVT GPR:$op1rs2)), (XLenVT 16))),
- (sext_inreg (shl GPR:$op1rs1, (XLenVT 24)), i32))),
- (PACKW GPR:$rs1, (XLenVT (PACKH GPR:$op1rs1, GPR:$op1rs2)))>;
+ (shl zexti8:$op1rs1, (XLenVT 16))),
+ (sext_inreg (shl GPR:$op1rs2, (XLenVT 24)), i32))),
+ (PACKW GPR:$rs1, (XLenVT (PACKH zexti8:$op1rs1, GPR:$op1rs2)))>;
// Match a pattern of 2 halfwords being inserted into bits [63:32], with bits
// bits [31:0] coming from a zero extended value. We can use pack with packw for
// bits [63:32]. If bits [63:31] can also be a packw, it can be matched
// separately.
def : Pat<(or (or (shl GPR:$op1rs2, (i64 48)),
- (shl (zexti16 (i64 GPR:$op1rs1)), (i64 32))),
- (zexti32 (i64 GPR:$rs1))),
- (PACK (XLenVT GPR:$rs1),
- (XLenVT (PACKW GPR:$op1rs1, GPR:$op1rs2)))>;
+ (shl zexti16:$op1rs1, (i64 32))),
+ zexti32:$rs1),
+ (PACK zexti32:$rs1,
+ (XLenVT (PACKW zexti16:$op1rs1, GPR:$op1rs2)))>;
} // Predicates = [HasStdExtZbkb, IsRV64]
let Predicates = [HasStdExtZbb, IsRV32] in
diff --git a/llvm/lib/Target/RISCV/RISCVInstrPredicates.td b/llvm/lib/Target/RISCV/RISCVInstrPredicates.td
index 6d86aff..3658817 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrPredicates.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrPredicates.td
@@ -14,6 +14,10 @@
// otherwise.
def VLDSX0Pred : MCSchedPredicate<CheckRegOperand<3, X0>>;
+// This scheduling predicate is true when subtarget feature TuneHasSingleElementVecFP64
+// is enabled.
+def SingleElementVecFP64SchedPred : FeatureSchedPredicate<TuneHasSingleElementVecFP64>;
+
// Returns true if this is the sext.w pattern, addiw rd, rs1, 0.
def isSEXT_W
: TIIPredicate<"isSEXT_W",
diff --git a/llvm/lib/Target/RISCV/RISCVProcessors.td b/llvm/lib/Target/RISCV/RISCVProcessors.td
index 17a7948..e86431f 100644
--- a/llvm/lib/Target/RISCV/RISCVProcessors.td
+++ b/llvm/lib/Target/RISCV/RISCVProcessors.td
@@ -338,7 +338,8 @@ def SIFIVE_X390 : RISCVProcessorModel<"sifive-x390",
FeatureStdExtZvl1024b,
FeatureVendorXSiFivecdiscarddlone,
FeatureVendorXSiFivecflushdlone],
- SiFiveIntelligenceTuneFeatures>;
+ !listconcat(SiFiveIntelligenceTuneFeatures,
+ [TuneHasSingleElementVecFP64])>;
defvar SiFiveP400TuneFeatures = [TuneNoDefaultUnroll,
TuneConditionalCompressedMoveFusion,
diff --git a/llvm/lib/Target/RISCV/RISCVSchedSiFive7.td b/llvm/lib/Target/RISCV/RISCVSchedSiFive7.td
index 3e07eff..f863392a 100644
--- a/llvm/lib/Target/RISCV/RISCVSchedSiFive7.td
+++ b/llvm/lib/Target/RISCV/RISCVSchedSiFive7.td
@@ -317,7 +317,6 @@ multiclass SiFive7WriteResBase<int VLEN,
ProcResourceKind VL, ProcResourceKind VS,
ProcResourceKind VCQ,
SiFive7FPLatencies fpLatencies,
- bit isFP64Throttled = false,
bit hasFastGather = false> {
// Branching
@@ -832,29 +831,56 @@ multiclass SiFive7WriteResBase<int VLEN,
// 13. Vector Floating-Point Instructions
foreach mx = SchedMxListF in {
foreach sew = SchedSEWSet<mx, isF=1>.val in {
- defvar Cycles = !if(!and(isFP64Throttled, !eq(sew, 64)),
- SiFive7GetCyclesOnePerElement<mx, sew, VLEN>.c,
- SiFive7GetCyclesDefault<mx>.c);
- defvar Lat8 = !if(!and(isFP64Throttled, !eq(sew, 64)), Cycles, 8);
- defvar VA = !if(!and(isFP64Throttled, !eq(sew, 64)), VA1, VA1OrVA2);
defvar IsWorstCase = SiFive7IsWorstCaseMXSEW<mx, sew, SchedMxListF, isF=1>.c;
- let Latency = Lat8, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in {
- defm : LMULSEWWriteResMXSEW<"WriteVFALUV", [VCQ, VA], mx, sew, IsWorstCase>;
- defm : LMULSEWWriteResMXSEW<"WriteVFALUF", [VCQ, VA], mx, sew, IsWorstCase>;
- defm : LMULSEWWriteResMXSEW<"WriteVFMulV", [VCQ, VA], mx, sew, IsWorstCase>;
- defm : LMULSEWWriteResMXSEW<"WriteVFMulF", [VCQ, VA], mx, sew, IsWorstCase>;
- defm : LMULSEWWriteResMXSEW<"WriteVFMulAddV", [VCQ, VA], mx, sew, IsWorstCase>;
- defm : LMULSEWWriteResMXSEW<"WriteVFMulAddF", [VCQ, VA], mx, sew, IsWorstCase>;
- defm : LMULSEWWriteResMXSEW<"WriteVFRecpV", [VCQ, VA1], mx, sew, IsWorstCase>;
- defm : LMULSEWWriteResMXSEW<"WriteVFCvtIToFV", [VCQ, VA1], mx, sew, IsWorstCase>;
- }
- defvar Lat4 = !if(!and(isFP64Throttled, !eq(sew, 64)), Cycles, 4);
- let Latency = Lat4, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in {
- defm : LMULSEWWriteResMXSEW<"WriteVFSgnjV", [VCQ, VA], mx, sew, IsWorstCase>;
- defm : LMULSEWWriteResMXSEW<"WriteVFSgnjF", [VCQ, VA], mx, sew, IsWorstCase>;
- // min max require merge
- defm : LMULSEWWriteResMXSEW<"WriteVFMinMaxV", [VCQ, VA1], mx, sew, IsWorstCase>;
- defm : LMULSEWWriteResMXSEW<"WriteVFMinMaxF", [VCQ, VA1], mx, sew, IsWorstCase>;
+ if !eq(sew, 64) then {
+ defvar SingleElementCycles = SiFive7GetCyclesOnePerElement<mx, sew, VLEN>.c;
+ foreach SchedWriteName = ["WriteVFALUV", "WriteVFALUF", "WriteVFMulV", "WriteVFMulF",
+ "WriteVFMulAddV", "WriteVFMulAddF"] in
+ defm : LMULSEWWriteResMXSEWVariant<SchedWriteName, SingleElementVecFP64SchedPred,
+ // Predicated
+ [VCQ, VA1], !add(SingleElementCycles, 7), [0, 1], [1, !add(1, SingleElementCycles)],
+ // Not Predicated
+ [VCQ, VA1OrVA2], 8, [0, 1], [1, !add(1, SiFive7GetCyclesDefault<mx>.c)],
+ mx, sew, IsWorstCase>;
+ foreach SchedWriteName = ["WriteVFRecpV", "WriteVFCvtIToFV"] in
+ defm : LMULSEWWriteResMXSEWVariant<SchedWriteName, SingleElementVecFP64SchedPred,
+ // Predicated
+ [VCQ, VA1], !add(SingleElementCycles, 7), [0, 1], [1, !add(1, SingleElementCycles)],
+ // Not Predicated
+ [VCQ, VA1], 8, [0, 1], [1, !add(1, SiFive7GetCyclesDefault<mx>.c)],
+ mx, sew, IsWorstCase>;
+ foreach SchedWriteName = ["WriteVFSgnjV", "WriteVFSgnjF"] in
+ defm : LMULSEWWriteResMXSEWVariant<SchedWriteName, SingleElementVecFP64SchedPred,
+ // Predicated
+ [VCQ, VA1], !add(SingleElementCycles, 3), [0, 1], [1, !add(1, SingleElementCycles)],
+ // Not Predicated
+ [VCQ, VA1OrVA2], 4, [0, 1], [1, !add(1, SiFive7GetCyclesDefault<mx>.c)],
+ mx, sew, IsWorstCase>;
+ foreach SchedWriteName = ["WriteVFMinMaxV", "WriteVFMinMaxF"] in
+ defm : LMULSEWWriteResMXSEWVariant<SchedWriteName, SingleElementVecFP64SchedPred,
+ // Predicated
+ [VCQ, VA1], !add(SingleElementCycles, 3), [0, 1], [1, !add(1, SingleElementCycles)],
+ // Not Predicated
+ [VCQ, VA1], 4, [0, 1], [1, !add(1, SiFive7GetCyclesDefault<mx>.c)],
+ mx, sew, IsWorstCase>;
+ } else {
+ let Latency = 8, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, SiFive7GetCyclesDefault<mx>.c)] in {
+ defm : LMULSEWWriteResMXSEW<"WriteVFALUV", [VCQ, VA1OrVA2], mx, sew, IsWorstCase>;
+ defm : LMULSEWWriteResMXSEW<"WriteVFALUF", [VCQ, VA1OrVA2], mx, sew, IsWorstCase>;
+ defm : LMULSEWWriteResMXSEW<"WriteVFMulV", [VCQ, VA1OrVA2], mx, sew, IsWorstCase>;
+ defm : LMULSEWWriteResMXSEW<"WriteVFMulF", [VCQ, VA1OrVA2], mx, sew, IsWorstCase>;
+ defm : LMULSEWWriteResMXSEW<"WriteVFMulAddV", [VCQ, VA1OrVA2], mx, sew, IsWorstCase>;
+ defm : LMULSEWWriteResMXSEW<"WriteVFMulAddF", [VCQ, VA1OrVA2], mx, sew, IsWorstCase>;
+ defm : LMULSEWWriteResMXSEW<"WriteVFRecpV", [VCQ, VA1], mx, sew, IsWorstCase>;
+ defm : LMULSEWWriteResMXSEW<"WriteVFCvtIToFV", [VCQ, VA1], mx, sew, IsWorstCase>;
+ }
+ let Latency = 4, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, SiFive7GetCyclesDefault<mx>.c)] in {
+ defm : LMULSEWWriteResMXSEW<"WriteVFSgnjV", [VCQ, VA1OrVA2], mx, sew, IsWorstCase>;
+ defm : LMULSEWWriteResMXSEW<"WriteVFSgnjF", [VCQ, VA1OrVA2], mx, sew, IsWorstCase>;
+ // min max require merge
+ defm : LMULSEWWriteResMXSEW<"WriteVFMinMaxV", [VCQ, VA1], mx, sew, IsWorstCase>;
+ defm : LMULSEWWriteResMXSEW<"WriteVFMinMaxF", [VCQ, VA1], mx, sew, IsWorstCase>;
+ }
}
}
}
@@ -892,19 +918,28 @@ multiclass SiFive7WriteResBase<int VLEN,
// Widening
foreach mx = SchedMxListW in {
foreach sew = SchedSEWSet<mx, isF=0, isWidening=1>.val in {
- defvar Cycles = !if(!and(isFP64Throttled, !eq(sew, 32)),
- SiFive7GetCyclesOnePerElement<mx, sew, VLEN>.c,
- SiFive7GetCyclesDefault<mx>.c);
defvar IsWorstCase = SiFive7IsWorstCaseMXSEW<mx, sew, SchedMxListW>.c;
- let Latency = 8, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in
- defm : LMULSEWWriteResMXSEW<"WriteVFWCvtIToFV", [VCQ, VA1], mx, sew, IsWorstCase>;
+ defvar DefaultCycles = SiFive7GetCyclesDefault<mx>.c;
+ if !eq(sew, 32) then {
+ defvar SingleElementCycles = SiFive7GetCyclesOnePerElement<mx, sew, VLEN>.c;
+ defm : LMULSEWWriteResMXSEWVariant<"WriteVFWCvtIToFV", SingleElementVecFP64SchedPred,
+ // Predicated
+ [VCQ, VA1], 8, [0, 1], [1, !add(1, SingleElementCycles)],
+ // Not Predicated
+ [VCQ, VA1], 8, [0, 1], [1, !add(1, DefaultCycles)],
+ mx, sew, IsWorstCase>;
+ } else {
+ let Latency = 8,
+ AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, DefaultCycles)] in
+ defm : LMULSEWWriteResMXSEW<"WriteVFWCvtIToFV", [VCQ, VA1], mx, sew, IsWorstCase>;
+ }
}
}
foreach mx = SchedMxListFW in {
foreach sew = SchedSEWSet<mx, isF=1, isWidening=1>.val in {
- defvar Cycles = SiFive7GetCyclesDefault<mx>.c;
+ defvar DefaultCycles = SiFive7GetCyclesDefault<mx>.c;
defvar IsWorstCase = SiFive7IsWorstCaseMXSEW<mx, sew, SchedMxListFW, isF=1>.c;
- let Latency = 8, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in {
+ let Latency = 8, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, DefaultCycles)] in {
defm : LMULSEWWriteResMXSEW<"WriteVFWALUV", [VCQ, VA1OrVA2], mx, sew, IsWorstCase>;
defm : LMULSEWWriteResMXSEW<"WriteVFWALUF", [VCQ, VA1OrVA2], mx, sew, IsWorstCase>;
defm : LMULSEWWriteResMXSEW<"WriteVFWMulV", [VCQ, VA1OrVA2], mx, sew, IsWorstCase>;
@@ -912,11 +947,19 @@ multiclass SiFive7WriteResBase<int VLEN,
defm : LMULSEWWriteResMXSEW<"WriteVFWMulAddV", [VCQ, VA1OrVA2], mx, sew, IsWorstCase>;
defm : LMULSEWWriteResMXSEW<"WriteVFWMulAddF", [VCQ, VA1OrVA2], mx, sew, IsWorstCase>;
}
- defvar CvtCycles = !if(!and(isFP64Throttled, !eq(sew, 32)),
- SiFive7GetCyclesOnePerElement<mx, sew, VLEN>.c,
- SiFive7GetCyclesDefault<mx>.c);
- let Latency = 8, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, CvtCycles)] in
- defm "" : LMULSEWWriteResMXSEW<"WriteVFWCvtFToFV", [VCQ, VA1], mx, sew, IsWorstCase>;
+ if !eq(sew, 32) then {
+ defvar SingleElementCycles = SiFive7GetCyclesOnePerElement<mx, sew, VLEN>.c;
+ defm : LMULSEWWriteResMXSEWVariant<"WriteVFWCvtFToFV", SingleElementVecFP64SchedPred,
+ // Predicated
+ [VCQ, VA1], 8, [0, 1], [1, !add(1, SingleElementCycles)],
+ // Not Predicated
+ [VCQ, VA1], 8, [0, 1], [1, !add(1, DefaultCycles)],
+ mx, sew, IsWorstCase>;
+ } else {
+ let Latency = 8,
+ AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, DefaultCycles)] in
+ defm : LMULSEWWriteResMXSEW<"WriteVFWCvtFToFV", [VCQ, VA1], mx, sew, IsWorstCase>;
+ }
}
defvar Cycles = SiFive7GetCyclesDefault<mx>.c;
defvar IsWorstCase = SiFive7IsWorstCaseMX<mx, SchedMxListFW>.c;
@@ -933,13 +976,23 @@ multiclass SiFive7WriteResBase<int VLEN,
}
foreach mx = SchedMxListFW in {
foreach sew = SchedSEWSet<mx, isF=1, isWidening=1>.val in {
- defvar Cycles = !if(!and(isFP64Throttled, !eq(sew, 32)),
- SiFive7GetCyclesOnePerElement<mx, sew, VLEN>.c,
- SiFive7GetCyclesNarrowing<mx>.c);
defvar IsWorstCase = SiFive7IsWorstCaseMXSEW<mx, sew, SchedMxListFW, isF=1>.c;
- let Latency = 8, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in {
- defm : LMULSEWWriteResMXSEW<"WriteVFNCvtIToFV", [VCQ, VA1], mx, sew, IsWorstCase>;
- defm : LMULSEWWriteResMXSEW<"WriteVFNCvtFToFV", [VCQ, VA1], mx, sew, IsWorstCase>;
+ defvar DefaultCycles = SiFive7GetCyclesNarrowing<mx>.c;
+ if !eq(sew, 32) then {
+ defvar SingleElementCycles = SiFive7GetCyclesOnePerElement<mx, sew, VLEN>.c;
+ foreach SchedWriteName = ["WriteVFNCvtIToFV", "WriteVFNCvtFToFV"] in
+ defm : LMULSEWWriteResMXSEWVariant<SchedWriteName, SingleElementVecFP64SchedPred,
+ // Predicated
+ [VCQ, VA1], 8, [0, 1], [1, !add(1, SingleElementCycles)],
+ // Not Predicated
+ [VCQ, VA1], 8, [0, 1], [1, !add(1, DefaultCycles)],
+ mx, sew, IsWorstCase>;
+ } else {
+ let Latency = 8,
+ AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, DefaultCycles)] in {
+ defm : LMULSEWWriteResMXSEW<"WriteVFNCvtIToFV", [VCQ, VA1], mx, sew, IsWorstCase>;
+ defm : LMULSEWWriteResMXSEW<"WriteVFNCvtFToFV", [VCQ, VA1], mx, sew, IsWorstCase>;
+ }
}
}
}
@@ -1499,7 +1552,6 @@ multiclass SiFive7ReadAdvance {
/// eventually be supplied by different SchedMachineModels.
multiclass SiFive7SchedResources<int vlen, bit extraVALU,
SiFive7FPLatencies fpLatencies,
- bit isFP64Throttled,
bit hasFastGather> {
defm SiFive7 : SiFive7ProcResources<extraVALU>;
@@ -1527,8 +1579,7 @@ multiclass SiFive7SchedResources<int vlen, bit extraVALU,
: SiFive7WriteResBase<vlen, SiFive7PipeA, SiFive7PipeB, SiFive7PipeAB,
SiFive7IDiv, SiFive7FDiv, SiFive7VA1,
SiFive7VA1OrVA2, SiFive7VL, SiFive7VS,
- SiFive7VCQ, fpLatencies, isFP64Throttled,
- hasFastGather>;
+ SiFive7VCQ, fpLatencies, hasFastGather>;
//===----------------------------------------------------------------------===//
// Bypass and advance
@@ -1560,7 +1611,6 @@ class SiFive7SchedMachineModel<int vlen> : SchedMachineModel {
bit HasExtraVALU = false;
SiFive7FPLatencies FPLatencies;
- bit IsFP64Throttled = false;
bit HasFastGather = false;
string Name = !subst("Model", "", !subst("SiFive7", "", NAME));
@@ -1587,7 +1637,6 @@ def SiFive7VLEN512Model : SiFive7SchedMachineModel<512> {
def SiFive7VLEN1024X300Model : SiFive7SchedMachineModel<1024> {
let HasExtraVALU = true;
let FPLatencies = SiFive7LowFPLatencies;
- let IsFP64Throttled = true;
let HasFastGather = true;
}
@@ -1596,7 +1645,6 @@ foreach model = [SiFive7VLEN512Model, SiFive7VLEN1024X300Model] in {
let SchedModel = model in
defm model.Name : SiFive7SchedResources<model.VLEN, model.HasExtraVALU,
model.FPLatencies,
- model.IsFP64Throttled,
model.HasFastGather>;
}
diff --git a/llvm/lib/Target/RISCV/RISCVScheduleV.td b/llvm/lib/Target/RISCV/RISCVScheduleV.td
index 01a4308..d11b446 100644
--- a/llvm/lib/Target/RISCV/RISCVScheduleV.td
+++ b/llvm/lib/Target/RISCV/RISCVScheduleV.td
@@ -128,6 +128,22 @@ multiclass LMULWriteResMXVariant<string name, SchedPredicateBase Pred,
IsWorstCase>;
}
+multiclass LMULSEWWriteResMXSEWVariant<string name, SchedPredicateBase Pred,
+ list<ProcResourceKind> predResources,
+ int predLat, list<int> predAcquireCycles,
+ list<int> predReleaseCycles,
+ list<ProcResourceKind> noPredResources,
+ int noPredLat, list<int> noPredAcquireCycles,
+ list<int> noPredReleaseCycles,
+ string mx, int sew, bit IsWorstCase> {
+ defm "" : LMULWriteResVariantImpl<name, name # "_" # mx # "_E" # sew, Pred, predResources,
+ predLat, predAcquireCycles,
+ predReleaseCycles, noPredResources,
+ noPredLat, noPredAcquireCycles,
+ noPredReleaseCycles,
+ IsWorstCase>;
+}
+
// Define multiclasses to define SchedWrite, SchedRead, WriteRes, and
// ReadAdvance for each (name, LMUL) pair and for each LMUL in each of the
// SchedMxList variants above. Each multiclass is responsible for defining
diff --git a/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZInstPrinterCommon.cpp b/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZInstPrinterCommon.cpp
index af79070..275165d 100644
--- a/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZInstPrinterCommon.cpp
+++ b/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZInstPrinterCommon.cpp
@@ -184,8 +184,8 @@ void SystemZInstPrinterCommon::printPCRelTLSOperand(const MCInst *MI,
// Output the TLS marker if present.
if ((unsigned)OpNum + 1 < MI->getNumOperands()) {
const MCOperand &MO = MI->getOperand(OpNum + 1);
- const MCSymbolRefExpr &refExp = cast<MCSymbolRefExpr>(*MO.getExpr());
- switch (refExp.getSpecifier()) {
+ const MCSymbolRefExpr &RefExp = cast<MCSymbolRefExpr>(*MO.getExpr());
+ switch (RefExp.getSpecifier()) {
case SystemZ::S_TLSGD:
O << ":tls_gdcall:";
break;
@@ -195,7 +195,7 @@ void SystemZInstPrinterCommon::printPCRelTLSOperand(const MCInst *MI,
default:
llvm_unreachable("Unexpected symbol kind");
}
- O << refExp.getSymbol().getName();
+ O << RefExp.getSymbol().getName();
}
}
diff --git a/llvm/lib/Target/SystemZ/SystemZConstantPoolValue.cpp b/llvm/lib/Target/SystemZ/SystemZConstantPoolValue.cpp
index fce6393..8c31579 100644
--- a/llvm/lib/Target/SystemZ/SystemZConstantPoolValue.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZConstantPoolValue.cpp
@@ -13,10 +13,9 @@
using namespace llvm;
-SystemZConstantPoolValue::
-SystemZConstantPoolValue(const GlobalValue *gv,
- SystemZCP::SystemZCPModifier modifier)
- : MachineConstantPoolValue(gv->getType()), GV(gv), Modifier(modifier) {}
+SystemZConstantPoolValue::SystemZConstantPoolValue(
+ const GlobalValue *GV, SystemZCP::SystemZCPModifier Modifier)
+ : MachineConstantPoolValue(GV->getType()), GV(GV), Modifier(Modifier) {}
SystemZConstantPoolValue *
SystemZConstantPoolValue::Create(const GlobalValue *GV,
diff --git a/llvm/lib/Target/SystemZ/SystemZHazardRecognizer.cpp b/llvm/lib/Target/SystemZ/SystemZHazardRecognizer.cpp
index 34d58e0..5313fba 100644
--- a/llvm/lib/Target/SystemZ/SystemZHazardRecognizer.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZHazardRecognizer.cpp
@@ -352,10 +352,9 @@ int SystemZHazardRecognizer::groupingCost(SUnit *SU) const {
// Similarly, a group-ending SU may either fit well (last in group), or
// end the group prematurely.
if (SC->EndGroup) {
- unsigned resultingGroupSize =
- (CurrGroupSize + getNumDecoderSlots(SU));
- if (resultingGroupSize < 3)
- return (3 - resultingGroupSize);
+ unsigned ResultingGroupSize = (CurrGroupSize + getNumDecoderSlots(SU));
+ if (ResultingGroupSize < 3)
+ return (3 - ResultingGroupSize);
return -1;
}
diff --git a/llvm/lib/Target/X86/X86InstrAVX10.td b/llvm/lib/Target/X86/X86InstrAVX10.td
index 764ff998..4b3ddbd 100644
--- a/llvm/lib/Target/X86/X86InstrAVX10.td
+++ b/llvm/lib/Target/X86/X86InstrAVX10.td
@@ -592,10 +592,10 @@ def : Pat<(X86mcvttp2sis (v2f64 (X86VBroadcastld64 addr:$src)),
(VCVTTPD2DQSZ128rmbkz VK2WM:$mask, addr:$src)>;
// Patterns VCVTTPD2UDQSZ128
-def : Pat<(v4i32 (X86cvttp2uis (v2f64 (X86VBroadcastld64 addr:$src)))),
- (VCVTTPD2UDQSZ128rmb addr:$src)>;
def : Pat<(v4i32 (X86cvttp2uis (v2f64 VR128X:$src))),
(VCVTTPD2UDQSZ128rr VR128X:$src)>;
+def : Pat<(v4i32 (X86cvttp2uis (loadv2f64 addr:$src))),
+ (VCVTTPD2UDQSZ128rm addr:$src)>;
def : Pat<(v4i32 (X86cvttp2uis (v2f64 (X86VBroadcastld64 addr:$src)))),
(VCVTTPD2UDQSZ128rmb addr:$src)>;
def : Pat<(X86mcvttp2uis (v2f64 VR128X:$src), (v4i32 VR128X:$src0),