aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib')
-rw-r--r--llvm/lib/Target/AArch64/AArch64FrameLowering.cpp25
-rw-r--r--llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.cpp7
-rw-r--r--llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.h11
3 files changed, 39 insertions, 4 deletions
diff --git a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
index b91e5c5..2f3cb71 100644
--- a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
@@ -335,6 +335,26 @@ static Register findScratchNonCalleeSaveRegister(MachineBasicBlock *MBB,
bool HasCall = false);
static bool requiresSaveVG(const MachineFunction &MF);
+// Conservatively, returns true if the function is likely to have an SVE vectors
+// on the stack. This function is safe to be called before callee-saves or
+// object offsets have been determined.
+static bool isLikelyToHaveSVEStack(MachineFunction &MF) {
+ auto *AFI = MF.getInfo<AArch64FunctionInfo>();
+ if (AFI->isSVECC())
+ return true;
+
+ if (AFI->hasCalculatedStackSizeSVE())
+ return bool(getSVEStackSize(MF));
+
+ const MachineFrameInfo &MFI = MF.getFrameInfo();
+ for (int FI = MFI.getObjectIndexBegin(); FI < MFI.getObjectIndexEnd(); FI++) {
+ if (MFI.getStackID(FI) == TargetStackID::ScalableVector)
+ return true;
+ }
+
+ return false;
+}
+
/// Returns true if a homogeneous prolog or epilog code can be emitted
/// for the size optimization. If possible, a frame helper call is injected.
/// When Exit block is given, this check is for epilog.
@@ -350,8 +370,9 @@ bool AArch64FrameLowering::homogeneousPrologEpilog(
// TODO: Window is supported yet.
if (needsWinCFI(MF))
return false;
+
// TODO: SVE is not supported yet.
- if (getSVEStackSize(MF))
+ if (isLikelyToHaveSVEStack(MF))
return false;
// Bail on stack adjustment needed on return for simplicity.
@@ -3039,7 +3060,7 @@ static bool produceCompactUnwindFrame(MachineFunction &MF) {
!(Subtarget.getTargetLowering()->supportSwiftError() &&
Attrs.hasAttrSomewhere(Attribute::SwiftError)) &&
MF.getFunction().getCallingConv() != CallingConv::SwiftTail &&
- !requiresSaveVG(MF) && AFI->getSVECalleeSavedStackSize() == 0;
+ !requiresSaveVG(MF) && !AFI->isSVECC();
}
static bool invalidateWindowsRegisterPairing(unsigned Reg1, unsigned Reg2,
diff --git a/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.cpp b/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.cpp
index 4b04b80..b4197a0 100644
--- a/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.cpp
@@ -25,7 +25,10 @@ using namespace llvm;
yaml::AArch64FunctionInfo::AArch64FunctionInfo(
const llvm::AArch64FunctionInfo &MFI)
- : HasRedZone(MFI.hasRedZone()) {}
+ : HasRedZone(MFI.hasRedZone()),
+ StackSizeSVE(MFI.hasCalculatedStackSizeSVE()
+ ? std::optional<uint64_t>(MFI.getStackSizeSVE())
+ : std::nullopt) {}
void yaml::AArch64FunctionInfo::mappingImpl(yaml::IO &YamlIO) {
MappingTraits<AArch64FunctionInfo>::mapping(YamlIO, *this);
@@ -35,6 +38,8 @@ void AArch64FunctionInfo::initializeBaseYamlFields(
const yaml::AArch64FunctionInfo &YamlMFI) {
if (YamlMFI.HasRedZone)
HasRedZone = YamlMFI.HasRedZone;
+ if (YamlMFI.StackSizeSVE)
+ setStackSizeSVE(*YamlMFI.StackSizeSVE);
}
static std::pair<bool, bool> GetSignReturnAddress(const Function &F) {
diff --git a/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.h b/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.h
index e61f228..800787c 100644
--- a/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.h
+++ b/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.h
@@ -82,6 +82,7 @@ class AArch64FunctionInfo final : public MachineFunctionInfo {
unsigned CalleeSavedStackSize = 0;
unsigned SVECalleeSavedStackSize = 0;
bool HasCalleeSavedStackSize = false;
+ bool HasSVECalleeSavedStackSize = false;
/// Number of TLS accesses using the special (combinable)
/// _TLS_MODULE_BASE_ symbol.
@@ -306,7 +307,10 @@ public:
StackSizeSVE = S;
}
- uint64_t getStackSizeSVE() const { return StackSizeSVE; }
+ uint64_t getStackSizeSVE() const {
+ assert(hasCalculatedStackSizeSVE());
+ return StackSizeSVE;
+ }
bool hasStackFrame() const { return HasStackFrame; }
void setHasStackFrame(bool s) { HasStackFrame = s; }
@@ -400,8 +404,11 @@ public:
// Saves the CalleeSavedStackSize for SVE vectors in 'scalable bytes'
void setSVECalleeSavedStackSize(unsigned Size) {
SVECalleeSavedStackSize = Size;
+ HasSVECalleeSavedStackSize = true;
}
unsigned getSVECalleeSavedStackSize() const {
+ assert(HasSVECalleeSavedStackSize &&
+ "SVECalleeSavedStackSize has not been calculated");
return SVECalleeSavedStackSize;
}
@@ -592,6 +599,7 @@ private:
namespace yaml {
struct AArch64FunctionInfo final : public yaml::MachineFunctionInfo {
std::optional<bool> HasRedZone;
+ std::optional<uint64_t> StackSizeSVE;
AArch64FunctionInfo() = default;
AArch64FunctionInfo(const llvm::AArch64FunctionInfo &MFI);
@@ -603,6 +611,7 @@ struct AArch64FunctionInfo final : public yaml::MachineFunctionInfo {
template <> struct MappingTraits<AArch64FunctionInfo> {
static void mapping(IO &YamlIO, AArch64FunctionInfo &MFI) {
YamlIO.mapOptional("hasRedZone", MFI.HasRedZone);
+ YamlIO.mapOptional("stackSizeSVE", MFI.StackSizeSVE);
}
};