diff options
author | Simon Moll <simon.moll@emea.nec.com> | 2020-04-15 14:00:07 +0200 |
---|---|---|
committer | Simon Moll <simon.moll@emea.nec.com> | 2020-04-15 14:00:07 +0200 |
commit | b310daea219b2fb2fe50362f7eec8c0b4ff79a29 (patch) | |
tree | b0e15724e1ee085a01fe7bb18018a1c988624189 /llvm | |
parent | edbb27ccb63402b591a459f4087434ea778c23a7 (diff) | |
download | llvm-b310daea219b2fb2fe50362f7eec8c0b4ff79a29.zip llvm-b310daea219b2fb2fe50362f7eec8c0b4ff79a29.tar.gz llvm-b310daea219b2fb2fe50362f7eec8c0b4ff79a29.tar.bz2 |
[nfc] clang-format TargetTransformInfo.h
Diffstat (limited to 'llvm')
-rw-r--r-- | llvm/include/llvm/Analysis/TargetTransformInfo.h | 300 |
1 files changed, 140 insertions, 160 deletions
diff --git a/llvm/include/llvm/Analysis/TargetTransformInfo.h b/llvm/include/llvm/Analysis/TargetTransformInfo.h index d70396a..ba8b775 100644 --- a/llvm/include/llvm/Analysis/TargetTransformInfo.h +++ b/llvm/include/llvm/Analysis/TargetTransformInfo.h @@ -22,15 +22,15 @@ #define LLVM_ANALYSIS_TARGETTRANSFORMINFO_H #include "llvm/ADT/Optional.h" +#include "llvm/Analysis/AssumptionCache.h" +#include "llvm/Analysis/LoopInfo.h" +#include "llvm/Analysis/ScalarEvolution.h" +#include "llvm/IR/Dominators.h" #include "llvm/IR/Operator.h" #include "llvm/IR/PassManager.h" #include "llvm/Pass.h" #include "llvm/Support/AtomicOrdering.h" #include "llvm/Support/DataTypes.h" -#include "llvm/Analysis/LoopInfo.h" -#include "llvm/Analysis/ScalarEvolution.h" -#include "llvm/IR/Dominators.h" -#include "llvm/Analysis/AssumptionCache.h" #include <functional> namespace llvm { @@ -78,7 +78,8 @@ struct MemIntrinsicInfo { bool isUnordered() const { return (Ordering == AtomicOrdering::NotAtomic || - Ordering == AtomicOrdering::Unordered) && !IsVolatile; + Ordering == AtomicOrdering::Unordered) && + !IsVolatile; } }; @@ -165,7 +166,7 @@ public: /// Note, this method does not cache the cost calculation and it /// can be expensive in some cases. int getInstructionCost(const Instruction *I, enum TargetCostKind kind) const { - switch (kind){ + switch (kind) { case TCK_RecipThroughput: return getInstructionThroughput(I); @@ -245,8 +246,8 @@ public: /// /// Vector bonuses: We want to more aggressively inline vector-dense kernels /// and apply this bonus based on the percentage of vector instructions. A - /// bonus is applied if the vector instructions exceed 50% and half that amount - /// is applied if it exceeds 10%. Note that these bonuses are some what + /// bonus is applied if the vector instructions exceed 50% and half that + /// amount is applied if it exceeds 10%. Note that these bonuses are some what /// arbitrary and evolved over time by accident as much as because they are /// principled bonuses. /// FIXME: It would be nice to base the bonus values on something more @@ -320,9 +321,9 @@ public: /// Returns whether V is a source of divergence. /// /// This function provides the target-dependent information for - /// the target-independent LegacyDivergenceAnalysis. LegacyDivergenceAnalysis first - /// builds the dependency graph, and then runs the reachability algorithm - /// starting with the sources of divergence. + /// the target-independent LegacyDivergenceAnalysis. LegacyDivergenceAnalysis + /// first builds the dependency graph, and then runs the reachability + /// algorithm starting with the sources of divergence. bool isSourceOfDivergence(const Value *V) const; // Returns true for the target specific @@ -359,8 +360,8 @@ public: /// NewV, which has a different address space. This should happen for every /// operand index that collectFlatAddressOperands returned for the intrinsic. /// \returns true if the intrinsic /// was handled. - bool rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, - Value *OldV, Value *NewV) const; + bool rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, Value *OldV, + Value *NewV) const; /// Test whether calls to a function lower to actual program function /// calls. @@ -492,12 +493,11 @@ public: /// Query the target whether it would be profitable to convert the given loop /// into a hardware loop. bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, - AssumptionCache &AC, - TargetLibraryInfo *LibInfo, + AssumptionCache &AC, TargetLibraryInfo *LibInfo, HardwareLoopInfo &HWLoopInfo) const; - /// Query the target whether it would be prefered to create a predicated vector - /// loop, which can avoid the need to emit a scalar epilogue loop. + /// Query the target whether it would be prefered to create a predicated + /// vector loop, which can avoid the need to emit a scalar epilogue loop. bool preferPredicateOverEpilogue(Loop *L, LoopInfo *LI, ScalarEvolution &SE, AssumptionCache &AC, TargetLibraryInfo *TLI, DominatorTree *DT, @@ -703,8 +703,8 @@ public: bool isFPVectorizationPotentiallyUnsafe() const; /// Determine if the target supports unaligned memory accesses. - bool allowsMisalignedMemoryAccesses(LLVMContext &Context, - unsigned BitWidth, unsigned AddressSpace = 0, + bool allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth, + unsigned AddressSpace = 0, unsigned Alignment = 1, bool *Fast = nullptr) const; @@ -752,18 +752,18 @@ public: /// The various kinds of shuffle patterns for vector queries. enum ShuffleKind { - SK_Broadcast, ///< Broadcast element 0 to all other elements. - SK_Reverse, ///< Reverse the order of the vector. - SK_Select, ///< Selects elements from the corresponding lane of - ///< either source operand. This is equivalent to a - ///< vector select with a constant condition operand. - SK_Transpose, ///< Transpose two vectors. - SK_InsertSubvector, ///< InsertSubvector. Index indicates start offset. - SK_ExtractSubvector,///< ExtractSubvector Index indicates start offset. - SK_PermuteTwoSrc, ///< Merge elements from two source vectors into one - ///< with any shuffle mask. - SK_PermuteSingleSrc ///< Shuffle elements of single source vector with any - ///< shuffle mask. + SK_Broadcast, ///< Broadcast element 0 to all other elements. + SK_Reverse, ///< Reverse the order of the vector. + SK_Select, ///< Selects elements from the corresponding lane of + ///< either source operand. This is equivalent to a + ///< vector select with a constant condition operand. + SK_Transpose, ///< Transpose two vectors. + SK_InsertSubvector, ///< InsertSubvector. Index indicates start offset. + SK_ExtractSubvector, ///< ExtractSubvector Index indicates start offset. + SK_PermuteTwoSrc, ///< Merge elements from two source vectors into one + ///< with any shuffle mask. + SK_PermuteSingleSrc ///< Shuffle elements of single source vector with any + ///< shuffle mask. }; /// Additional information about an operand's possible values. @@ -781,19 +781,20 @@ public: unsigned getNumberOfRegisters(unsigned ClassID) const; /// \return the target-provided register class ID for the provided type, - /// accounting for type promotion and other type-legalization techniques that the target might apply. - /// However, it specifically does not account for the scalarization or splitting of vector types. - /// Should a vector type require scalarization or splitting into multiple underlying vector registers, - /// that type should be mapped to a register class containing no registers. - /// Specifically, this is designed to provide a simple, high-level view of the register allocation - /// later performed by the backend. These register classes don't necessarily map onto the - /// register classes used by the backend. + /// accounting for type promotion and other type-legalization techniques that + /// the target might apply. However, it specifically does not account for the + /// scalarization or splitting of vector types. Should a vector type require + /// scalarization or splitting into multiple underlying vector registers, that + /// type should be mapped to a register class containing no registers. + /// Specifically, this is designed to provide a simple, high-level view of the + /// register allocation later performed by the backend. These register classes + /// don't necessarily map onto the register classes used by the backend. /// FIXME: It's not currently possible to determine how many registers /// are used by the provided type. unsigned getRegisterClassForType(bool Vector, Type *Ty = nullptr) const; /// \return the target-provided register class name - const char* getRegisterClassName(unsigned ClassID) const; + const char *getRegisterClassName(unsigned ClassID) const; /// \return The width of the largest scalar or vector register type. unsigned getRegisterBitWidth(bool Vector) const; @@ -825,8 +826,8 @@ public: /// The possible cache levels enum class CacheLevel { - L1D, // The L1 data cache - L2D, // The L2 data cache + L1D, // The L1 data cache + L2D, // The L2 data cache // We currently do not model L3 caches, as their sizes differ widely between // microarchitectures. Also, we currently do not have a use for L3 cache @@ -863,8 +864,7 @@ public: /// stride. unsigned getMinPrefetchStride(unsigned NumMemAccesses, unsigned NumStridedMemAccesses, - unsigned NumPrefetches, - bool HasCall) const; + unsigned NumPrefetches, bool HasCall) const; /// \return The maximum number of iterations to prefetch ahead. If /// the required number of iterations is more than this number, no @@ -933,8 +933,8 @@ public: /// \returns The expected cost of compare and select instructions. If there /// is an existing instruction that holds Opcode, it may be passed in the /// 'I' parameter. - int getCmpSelInstrCost(unsigned Opcode, Type *ValTy, - Type *CondTy = nullptr, const Instruction *I = nullptr) const; + int getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy = nullptr, + const Instruction *I = nullptr) const; /// \return The expected cost of vector Insert and Extract. /// Use -1 to indicate that there is no information on the index value. @@ -1068,13 +1068,10 @@ public: /// Calculates the operand types to use when copying \p RemainingBytes of /// memory, where source and destination alignments are \p SrcAlign and /// \p DestAlign respectively. - void getMemcpyLoopResidualLoweringType(SmallVectorImpl<Type *> &OpsOut, - LLVMContext &Context, - unsigned RemainingBytes, - unsigned SrcAddrSpace, - unsigned DestAddrSpace, - unsigned SrcAlign, - unsigned DestAlign) const; + void getMemcpyLoopResidualLoweringType( + SmallVectorImpl<Type *> &OpsOut, LLVMContext &Context, + unsigned RemainingBytes, unsigned SrcAddrSpace, unsigned DestAddrSpace, + unsigned SrcAlign, unsigned DestAlign) const; /// \returns True if the two functions have compatible attributes for inlining /// purposes. @@ -1091,11 +1088,11 @@ public: /// The type of load/store indexing. enum MemIndexedMode { - MIM_Unindexed, ///< No indexing. - MIM_PreInc, ///< Pre-incrementing. - MIM_PreDec, ///< Pre-decrementing. - MIM_PostInc, ///< Post-incrementing. - MIM_PostDec ///< Post-decrementing. + MIM_Unindexed, ///< No indexing. + MIM_PreInc, ///< Pre-incrementing. + MIM_PreDec, ///< Pre-decrementing. + MIM_PostInc, ///< Post-incrementing. + MIM_PostDec ///< Post-decrementing. }; /// \returns True if the specified indexed load for the given type is legal. @@ -1159,9 +1156,9 @@ public: /// \name Vector Predication Information /// @{ - /// Whether the target supports the %evl parameter of VP intrinsic efficiently in hardware. - /// (see LLVM Language Reference - "Vector Predication Intrinsics") - /// Use of %evl is discouraged when that is not the case. + /// Whether the target supports the %evl parameter of VP intrinsic efficiently + /// in hardware. (see LLVM Language Reference - "Vector Predication + /// Intrinsics") Use of %evl is discouraged when that is not the case. bool hasActiveVectorLength() const; /// @} @@ -1204,12 +1201,11 @@ public: ArrayRef<const Value *> Arguments, const User *U) = 0; virtual int getMemcpyCost(const Instruction *I) = 0; - virtual unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI, - unsigned &JTSize, - ProfileSummaryInfo *PSI, - BlockFrequencyInfo *BFI) = 0; - virtual int - getUserCost(const User *U, ArrayRef<const Value *> Operands) = 0; + virtual unsigned + getEstimatedNumberOfCaseClusters(const SwitchInst &SI, unsigned &JTSize, + ProfileSummaryInfo *PSI, + BlockFrequencyInfo *BFI) = 0; + virtual int getUserCost(const User *U, ArrayRef<const Value *> Operands) = 0; virtual bool hasBranchDivergence() = 0; virtual bool useGPUDivergenceAnalysis() = 0; virtual bool isSourceOfDivergence(const Value *V) = 0; @@ -1217,8 +1213,8 @@ public: virtual unsigned getFlatAddressSpace() = 0; virtual bool collectFlatAddressOperands(SmallVectorImpl<int> &OpIndexes, Intrinsic::ID IID) const = 0; - virtual bool rewriteIntrinsicWithAddressSpace( - IntrinsicInst *II, Value *OldV, Value *NewV) const = 0; + virtual bool rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, Value *OldV, + Value *NewV) const = 0; virtual bool isLoweredToCall(const Function *F) = 0; virtual void getUnrollingPreferences(Loop *L, ScalarEvolution &, UnrollingPreferences &UP) = 0; @@ -1226,18 +1222,15 @@ public: AssumptionCache &AC, TargetLibraryInfo *LibInfo, HardwareLoopInfo &HWLoopInfo) = 0; - virtual bool preferPredicateOverEpilogue(Loop *L, LoopInfo *LI, - ScalarEvolution &SE, - AssumptionCache &AC, - TargetLibraryInfo *TLI, - DominatorTree *DT, - const LoopAccessInfo *LAI) = 0; + virtual bool + preferPredicateOverEpilogue(Loop *L, LoopInfo *LI, ScalarEvolution &SE, + AssumptionCache &AC, TargetLibraryInfo *TLI, + DominatorTree *DT, const LoopAccessInfo *LAI) = 0; virtual bool isLegalAddImmediate(int64_t Imm) = 0; virtual bool isLegalICmpImmediate(int64_t Imm) = 0; virtual bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg, - int64_t Scale, - unsigned AddrSpace, + int64_t Scale, unsigned AddrSpace, Instruction *I) = 0; virtual bool isLSRCostLess(TargetTransformInfo::LSRCost &C1, TargetTransformInfo::LSRCost &C2) = 0; @@ -1269,10 +1262,11 @@ public: virtual bool shouldBuildLookupTables() = 0; virtual bool shouldBuildLookupTablesForConstant(Constant *C) = 0; virtual bool useColdCCForColdCall(Function &F) = 0; + virtual unsigned getScalarizationOverhead(Type *Ty, bool Insert, + bool Extract) = 0; virtual unsigned - getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) = 0; - virtual unsigned getOperandsScalarizationOverhead(ArrayRef<const Value *> Args, - unsigned VF) = 0; + getOperandsScalarizationOverhead(ArrayRef<const Value *> Args, + unsigned VF) = 0; virtual bool supportsEfficientVectorElementLoadStore() = 0; virtual bool enableAggressiveInterleaving(bool LoopHasReductions) = 0; virtual MemCmpExpansionOptions @@ -1289,16 +1283,17 @@ public: virtual bool haveFastSqrt(Type *Ty) = 0; virtual bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) = 0; virtual int getFPOpCost(Type *Ty) = 0; - virtual int getIntImmCodeSizeCost(unsigned Opc, unsigned Idx, const APInt &Imm, - Type *Ty) = 0; + virtual int getIntImmCodeSizeCost(unsigned Opc, unsigned Idx, + const APInt &Imm, Type *Ty) = 0; virtual int getIntImmCost(const APInt &Imm, Type *Ty) = 0; virtual int getIntImmCostInst(unsigned Opc, unsigned Idx, const APInt &Imm, Type *Ty) = 0; virtual int getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx, const APInt &Imm, Type *Ty) = 0; virtual unsigned getNumberOfRegisters(unsigned ClassID) const = 0; - virtual unsigned getRegisterClassForType(bool Vector, Type *Ty = nullptr) const = 0; - virtual const char* getRegisterClassName(unsigned ClassID) const = 0; + virtual unsigned getRegisterClassForType(bool Vector, + Type *Ty = nullptr) const = 0; + virtual const char *getRegisterClassName(unsigned ClassID) const = 0; virtual unsigned getRegisterBitWidth(bool Vector) const = 0; virtual unsigned getMinVectorRegisterBitWidth() = 0; virtual bool shouldMaximizeVectorBandwidth(bool OptSize) const = 0; @@ -1307,7 +1302,8 @@ public: const Instruction &I, bool &AllowPromotionWithoutCommonHeader) = 0; virtual unsigned getCacheLineSize() const = 0; virtual llvm::Optional<unsigned> getCacheSize(CacheLevel Level) const = 0; - virtual llvm::Optional<unsigned> getCacheAssociativity(CacheLevel Level) const = 0; + virtual llvm::Optional<unsigned> + getCacheAssociativity(CacheLevel Level) const = 0; /// \return How much before a load we should place the prefetch /// instruction. This is currently measured in number of @@ -1346,8 +1342,8 @@ public: virtual int getExtractWithExtendCost(unsigned Opcode, Type *Dst, VectorType *VecTy, unsigned Index) = 0; virtual int getCFInstrCost(unsigned Opcode) = 0; - virtual int getCmpSelInstrCost(unsigned Opcode, Type *ValTy, - Type *CondTy, const Instruction *I) = 0; + virtual int getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, + const Instruction *I) = 0; virtual int getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) = 0; virtual int getMemoryOpCost(unsigned Opcode, Type *Src, MaybeAlign Alignment, @@ -1358,13 +1354,11 @@ public: virtual int getGatherScatterOpCost(unsigned Opcode, Type *DataTy, Value *Ptr, bool VariableMask, unsigned Alignment, const Instruction *I = nullptr) = 0; - virtual int getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, - unsigned Factor, - ArrayRef<unsigned> Indices, - unsigned Alignment, - unsigned AddressSpace, - bool UseMaskForCond = false, - bool UseMaskForGaps = false) = 0; + virtual int + getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, + ArrayRef<unsigned> Indices, unsigned Alignment, + unsigned AddressSpace, bool UseMaskForCond = false, + bool UseMaskForGaps = false) = 0; virtual int getArithmeticReductionCost(unsigned Opcode, Type *Ty, bool IsPairwiseForm) = 0; virtual int getMinMaxReductionCost(Type *Ty, Type *CondTy, @@ -1394,8 +1388,7 @@ public: unsigned DestAlign) const = 0; virtual void getMemcpyLoopResidualLoweringType( SmallVectorImpl<Type *> &OpsOut, LLVMContext &Context, - unsigned RemainingBytes, - unsigned SrcAddrSpace, unsigned DestAddrSpace, + unsigned RemainingBytes, unsigned SrcAddrSpace, unsigned DestAddrSpace, unsigned SrcAlign, unsigned DestAlign) const = 0; virtual bool areInlineCompatible(const Function *Caller, const Function *Callee) const = 0; @@ -1403,7 +1396,7 @@ public: areFunctionArgsABICompatible(const Function *Caller, const Function *Callee, SmallPtrSetImpl<Argument *> &Args) const = 0; virtual bool isIndexedLoadLegal(MemIndexedMode Mode, Type *Ty) const = 0; - virtual bool isIndexedStoreLegal(MemIndexedMode Mode,Type *Ty) const = 0; + virtual bool isIndexedStoreLegal(MemIndexedMode Mode, Type *Ty) const = 0; virtual unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const = 0; virtual bool isLegalToVectorizeLoad(LoadInst *LI) const = 0; virtual bool isLegalToVectorizeStore(StoreInst *SI) const = 0; @@ -1456,7 +1449,8 @@ public: return Impl.getInlinerVectorBonusPercent(); } int getIntrinsicCost(Intrinsic::ID IID, Type *RetTy, - ArrayRef<Type *> ParamTys, const User *U = nullptr) override { + ArrayRef<Type *> ParamTys, + const User *U = nullptr) override { return Impl.getIntrinsicCost(IID, RetTy, ParamTys, U); } int getIntrinsicCost(Intrinsic::ID IID, Type *RetTy, @@ -1471,7 +1465,9 @@ public: return Impl.getUserCost(U, Operands); } bool hasBranchDivergence() override { return Impl.hasBranchDivergence(); } - bool useGPUDivergenceAnalysis() override { return Impl.useGPUDivergenceAnalysis(); } + bool useGPUDivergenceAnalysis() override { + return Impl.useGPUDivergenceAnalysis(); + } bool isSourceOfDivergence(const Value *V) override { return Impl.isSourceOfDivergence(V); } @@ -1480,17 +1476,15 @@ public: return Impl.isAlwaysUniform(V); } - unsigned getFlatAddressSpace() override { - return Impl.getFlatAddressSpace(); - } + unsigned getFlatAddressSpace() override { return Impl.getFlatAddressSpace(); } bool collectFlatAddressOperands(SmallVectorImpl<int> &OpIndexes, Intrinsic::ID IID) const override { return Impl.collectFlatAddressOperands(OpIndexes, IID); } - bool rewriteIntrinsicWithAddressSpace( - IntrinsicInst *II, Value *OldV, Value *NewV) const override { + bool rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, Value *OldV, + Value *NewV) const override { return Impl.rewriteIntrinsicWithAddressSpace(II, OldV, NewV); } @@ -1502,8 +1496,7 @@ public: return Impl.getUnrollingPreferences(L, SE, UP); } bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, - AssumptionCache &AC, - TargetLibraryInfo *LibInfo, + AssumptionCache &AC, TargetLibraryInfo *LibInfo, HardwareLoopInfo &HWLoopInfo) override { return Impl.isHardwareLoopProfitable(L, SE, AC, LibInfo, HWLoopInfo); } @@ -1520,28 +1513,22 @@ public: return Impl.isLegalICmpImmediate(Imm); } bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, - bool HasBaseReg, int64_t Scale, - unsigned AddrSpace, + bool HasBaseReg, int64_t Scale, unsigned AddrSpace, Instruction *I) override { - return Impl.isLegalAddressingMode(Ty, BaseGV, BaseOffset, HasBaseReg, - Scale, AddrSpace, I); + return Impl.isLegalAddressingMode(Ty, BaseGV, BaseOffset, HasBaseReg, Scale, + AddrSpace, I); } bool isLSRCostLess(TargetTransformInfo::LSRCost &C1, TargetTransformInfo::LSRCost &C2) override { return Impl.isLSRCostLess(C1, C2); } - bool canMacroFuseCmp() override { - return Impl.canMacroFuseCmp(); - } - bool canSaveCmp(Loop *L, BranchInst **BI, - ScalarEvolution *SE, - LoopInfo *LI, DominatorTree *DT, AssumptionCache *AC, - TargetLibraryInfo *LibInfo) override { + bool canMacroFuseCmp() override { return Impl.canMacroFuseCmp(); } + bool canSaveCmp(Loop *L, BranchInst **BI, ScalarEvolution *SE, LoopInfo *LI, + DominatorTree *DT, AssumptionCache *AC, + TargetLibraryInfo *LibInfo) override { return Impl.canSaveCmp(L, BI, SE, LI, DT, AC, LibInfo); } - bool shouldFavorPostInc() const override { - return Impl.shouldFavorPostInc(); - } + bool shouldFavorPostInc() const override { return Impl.shouldFavorPostInc(); } bool shouldFavorBackedgeIndex(const Loop *L) const override { return Impl.shouldFavorBackedgeIndex(L); } @@ -1581,12 +1568,10 @@ public: int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace) override { - return Impl.getScalingFactorCost(Ty, BaseGV, BaseOffset, HasBaseReg, - Scale, AddrSpace); - } - bool LSRWithInstrQueries() override { - return Impl.LSRWithInstrQueries(); + return Impl.getScalingFactorCost(Ty, BaseGV, BaseOffset, HasBaseReg, Scale, + AddrSpace); } + bool LSRWithInstrQueries() override { return Impl.LSRWithInstrQueries(); } bool isTruncateFree(Type *Ty1, Type *Ty2) override { return Impl.isTruncateFree(Ty1, Ty2); } @@ -1634,9 +1619,9 @@ public: bool isFPVectorizationPotentiallyUnsafe() override { return Impl.isFPVectorizationPotentiallyUnsafe(); } - bool allowsMisalignedMemoryAccesses(LLVMContext &Context, - unsigned BitWidth, unsigned AddressSpace, - unsigned Alignment, bool *Fast) override { + bool allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth, + unsigned AddressSpace, unsigned Alignment, + bool *Fast) override { return Impl.allowsMisalignedMemoryAccesses(Context, BitWidth, AddressSpace, Alignment, Fast); } @@ -1669,10 +1654,11 @@ public: unsigned getNumberOfRegisters(unsigned ClassID) const override { return Impl.getNumberOfRegisters(ClassID); } - unsigned getRegisterClassForType(bool Vector, Type *Ty = nullptr) const override { + unsigned getRegisterClassForType(bool Vector, + Type *Ty = nullptr) const override { return Impl.getRegisterClassForType(Vector, Ty); } - const char* getRegisterClassName(unsigned ClassID) const override { + const char *getRegisterClassName(unsigned ClassID) const override { return Impl.getRegisterClassName(ClassID); } unsigned getRegisterBitWidth(bool Vector) const override { @@ -1692,13 +1678,12 @@ public: return Impl.shouldConsiderAddressTypePromotion( I, AllowPromotionWithoutCommonHeader); } - unsigned getCacheLineSize() const override { - return Impl.getCacheLineSize(); - } + unsigned getCacheLineSize() const override { return Impl.getCacheLineSize(); } llvm::Optional<unsigned> getCacheSize(CacheLevel Level) const override { return Impl.getCacheSize(Level); } - llvm::Optional<unsigned> getCacheAssociativity(CacheLevel Level) const override { + llvm::Optional<unsigned> + getCacheAssociativity(CacheLevel Level) const override { return Impl.getCacheAssociativity(Level); } @@ -1798,22 +1783,21 @@ public: bool IsPairwiseForm) override { return Impl.getArithmeticReductionCost(Opcode, Ty, IsPairwiseForm); } - int getMinMaxReductionCost(Type *Ty, Type *CondTy, - bool IsPairwiseForm, bool IsUnsigned) override { + int getMinMaxReductionCost(Type *Ty, Type *CondTy, bool IsPairwiseForm, + bool IsUnsigned) override { return Impl.getMinMaxReductionCost(Ty, CondTy, IsPairwiseForm, IsUnsigned); - } - int getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy, - ArrayRef<Type *> Tys, FastMathFlags FMF, - unsigned ScalarizationCostPassed, - const Instruction *I) override { - return Impl.getIntrinsicInstrCost(ID, RetTy, Tys, FMF, - ScalarizationCostPassed, I); - } - int getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy, - ArrayRef<Value *> Args, FastMathFlags FMF, - unsigned VF, const Instruction *I) override { - return Impl.getIntrinsicInstrCost(ID, RetTy, Args, FMF, VF, I); - } + } + int getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy, ArrayRef<Type *> Tys, + FastMathFlags FMF, unsigned ScalarizationCostPassed, + const Instruction *I) override { + return Impl.getIntrinsicInstrCost(ID, RetTy, Tys, FMF, + ScalarizationCostPassed, I); + } + int getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy, + ArrayRef<Value *> Args, FastMathFlags FMF, + unsigned VF, const Instruction *I) override { + return Impl.getIntrinsicInstrCost(ID, RetTy, Args, FMF, VF, I); + } int getCallInstrCost(Function *F, Type *RetTy, ArrayRef<Type *> Tys) override { return Impl.getCallInstrCost(F, RetTy, Tys); @@ -1843,17 +1827,13 @@ public: unsigned SrcAddrSpace, unsigned DestAddrSpace, unsigned SrcAlign, unsigned DestAlign) const override { - return Impl.getMemcpyLoopLoweringType(Context, Length, - SrcAddrSpace, DestAddrSpace, - SrcAlign, DestAlign); - } - void getMemcpyLoopResidualLoweringType(SmallVectorImpl<Type *> &OpsOut, - LLVMContext &Context, - unsigned RemainingBytes, - unsigned SrcAddrSpace, - unsigned DestAddrSpace, - unsigned SrcAlign, - unsigned DestAlign) const override { + return Impl.getMemcpyLoopLoweringType(Context, Length, SrcAddrSpace, + DestAddrSpace, SrcAlign, DestAlign); + } + void getMemcpyLoopResidualLoweringType( + SmallVectorImpl<Type *> &OpsOut, LLVMContext &Context, + unsigned RemainingBytes, unsigned SrcAddrSpace, unsigned DestAddrSpace, + unsigned SrcAlign, unsigned DestAlign) const override { Impl.getMemcpyLoopResidualLoweringType(OpsOut, Context, RemainingBytes, SrcAddrSpace, DestAddrSpace, SrcAlign, DestAlign); @@ -2022,6 +2002,6 @@ public: /// clients. ImmutablePass *createTargetTransformInfoWrapperPass(TargetIRAnalysis TIRA); -} // End llvm namespace +} // namespace llvm #endif |