aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h')
-rw-r--r--llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h66
1 files changed, 46 insertions, 20 deletions
diff --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h
index fe2e849..ecefe2a 100644
--- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h
+++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h
@@ -59,9 +59,17 @@ class AArch64TTIImpl final : public BasicTTIImplBase<AArch64TTIImpl> {
VECTOR_LDST_FOUR_ELEMENTS
};
- bool isWideningInstruction(Type *DstTy, unsigned Opcode,
- ArrayRef<const Value *> Args,
- Type *SrcOverrideTy = nullptr) const;
+ /// Given a add/sub/mul operation, detect a widening addl/subl/mull pattern
+ /// where both operands can be treated like extends. Returns the minimal type
+ /// needed to compute the operation.
+ Type *isBinExtWideningInstruction(unsigned Opcode, Type *DstTy,
+ ArrayRef<const Value *> Args,
+ Type *SrcOverrideTy = nullptr) const;
+ /// Given a add/sub operation with a single extend operand, detect a
+ /// widening addw/subw pattern.
+ bool isSingleExtWideningInstruction(unsigned Opcode, Type *DstTy,
+ ArrayRef<const Value *> Args,
+ Type *SrcOverrideTy = nullptr) const;
// A helper function called by 'getVectorInstrCost'.
//
@@ -84,12 +92,13 @@ public:
const Function *Callee) const override;
bool areTypesABICompatible(const Function *Caller, const Function *Callee,
- const ArrayRef<Type *> &Types) const override;
+ ArrayRef<Type *> Types) const override;
unsigned getInlineCallPenalty(const Function *F, const CallBase &Call,
unsigned DefaultCallPenalty) const override;
APInt getFeatureMask(const Function &F) const override;
+ APInt getPriorityMask(const Function &F) const override;
bool isMultiversionedFunction(const Function &F) const override;
@@ -180,15 +189,14 @@ public:
unsigned Opcode2) const;
InstructionCost
- getMaskedMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment,
- unsigned AddressSpace,
- TTI::TargetCostKind CostKind) const override;
+ getMemIntrinsicInstrCost(const MemIntrinsicCostAttributes &MICA,
+ TTI::TargetCostKind CostKind) const override;
- InstructionCost
- getGatherScatterOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr,
- bool VariableMask, Align Alignment,
- TTI::TargetCostKind CostKind,
- const Instruction *I = nullptr) const override;
+ InstructionCost getMaskedMemoryOpCost(const MemIntrinsicCostAttributes &MICA,
+ TTI::TargetCostKind CostKind) const;
+
+ InstructionCost getGatherScatterOpCost(const MemIntrinsicCostAttributes &MICA,
+ TTI::TargetCostKind CostKind) const;
bool isExtPartOfAvgExpr(const Instruction *ExtUser, Type *Dst,
Type *Src) const;
@@ -304,7 +312,7 @@ public:
}
bool isLegalMaskedLoadStore(Type *DataType, Align Alignment) const {
- if (!ST->hasSVE())
+ if (!ST->isSVEorStreamingSVEAvailable())
return false;
// For fixed vectors, avoid scalarization if using SVE for them.
@@ -316,15 +324,34 @@ public:
}
bool isLegalMaskedLoad(Type *DataType, Align Alignment,
- unsigned /*AddressSpace*/) const override {
+ unsigned /*AddressSpace*/,
+ TTI::MaskKind /*MaskKind*/) const override {
return isLegalMaskedLoadStore(DataType, Alignment);
}
bool isLegalMaskedStore(Type *DataType, Align Alignment,
- unsigned /*AddressSpace*/) const override {
+ unsigned /*AddressSpace*/,
+ TTI::MaskKind /*MaskKind*/) const override {
return isLegalMaskedLoadStore(DataType, Alignment);
}
+ bool isElementTypeLegalForCompressStore(Type *Ty) const {
+ return Ty->isFloatTy() || Ty->isDoubleTy() || Ty->isIntegerTy(32) ||
+ Ty->isIntegerTy(64);
+ }
+
+ bool isLegalMaskedCompressStore(Type *DataType,
+ Align Alignment) const override {
+ if (!ST->isSVEAvailable())
+ return false;
+
+ if (isa<FixedVectorType>(DataType) &&
+ DataType->getPrimitiveSizeInBits() < 128)
+ return false;
+
+ return isElementTypeLegalForCompressStore(DataType->getScalarType());
+ }
+
bool isLegalMaskedGatherScatter(Type *DataType) const {
if (!ST->isSVEAvailable())
return false;
@@ -448,11 +475,10 @@ public:
/// FP16 and BF16 operations are lowered to fptrunc(op(fpext, fpext) if the
/// architecture features are not present.
- std::optional<InstructionCost>
- getFP16BF16PromoteCost(Type *Ty, TTI::TargetCostKind CostKind,
- TTI::OperandValueInfo Op1Info,
- TTI::OperandValueInfo Op2Info, bool IncludeTrunc,
- std::function<InstructionCost(Type *)> InstCost) const;
+ std::optional<InstructionCost> getFP16BF16PromoteCost(
+ Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info,
+ TTI::OperandValueInfo Op2Info, bool IncludeTrunc, bool CanUseSVE,
+ std::function<InstructionCost(Type *)> InstCost) const;
InstructionCost
getArithmeticReductionCost(unsigned Opcode, VectorType *Ty,