diff options
Diffstat (limited to 'llvm')
57 files changed, 1784 insertions, 685 deletions
diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h index 88691b9..73f2c55 100644 --- a/llvm/include/llvm/CodeGen/TargetLowering.h +++ b/llvm/include/llvm/CodeGen/TargetLowering.h @@ -847,8 +847,7 @@ public: /// This is usually true on most targets. But some targets, like Thumb1, /// have immediate shift instructions, but no immediate "and" instruction; /// this makes the fold unprofitable. - virtual bool shouldFoldConstantShiftPairToMask(const SDNode *N, - CombineLevel Level) const { + virtual bool shouldFoldConstantShiftPairToMask(const SDNode *N) const { return true; } diff --git a/llvm/include/llvm/IR/ConstantFPRange.h b/llvm/include/llvm/IR/ConstantFPRange.h index face5da..d47f6c0 100644 --- a/llvm/include/llvm/IR/ConstantFPRange.h +++ b/llvm/include/llvm/IR/ConstantFPRange.h @@ -216,6 +216,12 @@ public: /// Get the range without infinities. It is useful when we apply ninf flag to /// range of operands/results. LLVM_ABI ConstantFPRange getWithoutInf() const; + + /// Return a new range in the specified format with the specified rounding + /// mode. + LLVM_ABI ConstantFPRange + cast(const fltSemantics &DstSem, + APFloat::roundingMode RM = APFloat::rmNearestTiesToEven) const; }; inline raw_ostream &operator<<(raw_ostream &OS, const ConstantFPRange &CR) { diff --git a/llvm/include/llvm/Support/raw_ostream.h b/llvm/include/llvm/Support/raw_ostream.h index f87344e..70916d8 100644 --- a/llvm/include/llvm/Support/raw_ostream.h +++ b/llvm/include/llvm/Support/raw_ostream.h @@ -739,7 +739,7 @@ class LLVM_ABI raw_null_ostream : public raw_pwrite_stream { uint64_t current_pos() const override; public: - explicit raw_null_ostream() = default; + explicit raw_null_ostream() : raw_pwrite_stream(/*Unbuffered=*/true) {} ~raw_null_ostream() override; }; diff --git a/llvm/include/llvm/TargetParser/Triple.h b/llvm/include/llvm/TargetParser/Triple.h index ed2e01c..dc8cd86d 100644 --- a/llvm/include/llvm/TargetParser/Triple.h +++ b/llvm/include/llvm/TargetParser/Triple.h @@ -277,6 +277,7 @@ public: MuslF32, MuslSF, MuslX32, + MuslWALI, LLVM, MSVC, @@ -798,6 +799,12 @@ public: return getObjectFormat() == Triple::DXContainer; } + /// Tests whether the target uses WALI Wasm + bool isWALI() const { + return getArch() == Triple::wasm32 && isOSLinux() && + getEnvironment() == Triple::MuslWALI; + } + /// Tests whether the target is the PS4 platform. bool isPS4() const { return getArch() == Triple::x86_64 && @@ -840,6 +847,7 @@ public: getEnvironment() == Triple::MuslF32 || getEnvironment() == Triple::MuslSF || getEnvironment() == Triple::MuslX32 || + getEnvironment() == Triple::MuslWALI || getEnvironment() == Triple::OpenHOS || isOSLiteOS(); } diff --git a/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp b/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp index 0ca55a26..54e916e 100644 --- a/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp +++ b/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp @@ -118,6 +118,10 @@ static cl::opt<bool> #endif cl::desc("")); +static cl::opt<bool> PreserveBitcodeUseListOrder( + "preserve-bc-uselistorder", cl::Hidden, cl::init(true), + cl::desc("Preserve use-list order when writing LLVM bitcode.")); + namespace llvm { extern FunctionSummary::ForceSummaryHotnessType ForceSummaryEdgesCold; } @@ -217,7 +221,10 @@ public: bool ShouldPreserveUseListOrder, const ModuleSummaryIndex *Index) : BitcodeWriterBase(Stream, StrtabBuilder), M(M), - VE(M, ShouldPreserveUseListOrder), Index(Index) { + VE(M, PreserveBitcodeUseListOrder.getNumOccurrences() + ? PreserveBitcodeUseListOrder + : ShouldPreserveUseListOrder), + Index(Index) { // Assign ValueIds to any callee values in the index that came from // indirect call profiles and were recorded as a GUID not a Value* // (which would have been assigned an ID by the ValueEnumerator). diff --git a/llvm/lib/CodeGen/MachineSink.cpp b/llvm/lib/CodeGen/MachineSink.cpp index d5153b7..cdcb29d9 100644 --- a/llvm/lib/CodeGen/MachineSink.cpp +++ b/llvm/lib/CodeGen/MachineSink.cpp @@ -1209,7 +1209,7 @@ MachineSinking::getBBRegisterPressure(const MachineBasicBlock &MBB, MIE = MBB.instr_begin(); MII != MIE; --MII) { const MachineInstr &MI = *std::prev(MII); - if (MI.isDebugInstr() || MI.isPseudoProbe()) + if (MI.isDebugOrPseudoInstr()) continue; RegisterOperands RegOpers; RegOpers.collect(MI, *TRI, *MRI, false, false); diff --git a/llvm/lib/CodeGen/RegisterPressure.cpp b/llvm/lib/CodeGen/RegisterPressure.cpp index 5f37890..7d4674b 100644 --- a/llvm/lib/CodeGen/RegisterPressure.cpp +++ b/llvm/lib/CodeGen/RegisterPressure.cpp @@ -858,7 +858,7 @@ void RegPressureTracker::recedeSkipDebugValues() { void RegPressureTracker::recede(SmallVectorImpl<VRegMaskOrUnit> *LiveUses) { recedeSkipDebugValues(); - if (CurrPos->isDebugInstr() || CurrPos->isPseudoProbe()) { + if (CurrPos->isDebugOrPseudoInstr()) { // It's possible to only have debug_value and pseudo probe instructions and // hit the start of the block. assert(CurrPos == MBB->begin()); diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index b47274b..b23b190 100644 --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -10628,7 +10628,7 @@ SDValue DAGCombiner::visitSHL(SDNode *N) { // folding this will increase the total number of instructions. if (N0.getOpcode() == ISD::SRL && (N0.getOperand(1) == N1 || N0.hasOneUse()) && - TLI.shouldFoldConstantShiftPairToMask(N, Level)) { + TLI.shouldFoldConstantShiftPairToMask(N)) { if (ISD::matchBinaryPredicate(N1, N0.getOperand(1), MatchShiftAmount, /*AllowUndefs*/ false, /*AllowTypeMismatch*/ true)) { @@ -11207,7 +11207,7 @@ SDValue DAGCombiner::visitSRL(SDNode *N) { // fold (srl (shl x, c1), c2) -> (and (shl x, (sub c1, c2), MASK) or // (and (srl x, (sub c2, c1), MASK) if ((N0.getOperand(1) == N1 || N0->hasOneUse()) && - TLI.shouldFoldConstantShiftPairToMask(N, Level)) { + TLI.shouldFoldConstantShiftPairToMask(N)) { auto MatchShiftAmount = [OpSizeInBits](ConstantSDNode *LHS, ConstantSDNode *RHS) { const APInt &LHSC = LHS->getAPIntValue(); diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp index b5f8a61..437d0f4 100644 --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp @@ -3313,7 +3313,6 @@ void DAGTypeLegalizer::SoftPromoteHalfResult(SDNode *N, unsigned ResNo) { case ISD::FP_ROUND: R = SoftPromoteHalfRes_FP_ROUND(N); break; // Unary FP Operations - case ISD::FABS: case ISD::FACOS: case ISD::FASIN: case ISD::FATAN: @@ -3329,7 +3328,6 @@ void DAGTypeLegalizer::SoftPromoteHalfResult(SDNode *N, unsigned ResNo) { case ISD::FLOG2: case ISD::FLOG10: case ISD::FNEARBYINT: - case ISD::FNEG: case ISD::FREEZE: case ISD::FRINT: case ISD::FROUND: @@ -3341,6 +3339,12 @@ void DAGTypeLegalizer::SoftPromoteHalfResult(SDNode *N, unsigned ResNo) { case ISD::FTAN: case ISD::FTANH: case ISD::FCANONICALIZE: R = SoftPromoteHalfRes_UnaryOp(N); break; + case ISD::FABS: + R = SoftPromoteHalfRes_FABS(N); + break; + case ISD::FNEG: + R = SoftPromoteHalfRes_FNEG(N); + break; case ISD::AssertNoFPClass: R = SoftPromoteHalfRes_AssertNoFPClass(N); break; @@ -3670,6 +3674,24 @@ SDValue DAGTypeLegalizer::SoftPromoteHalfRes_UnaryOp(SDNode *N) { return DAG.getNode(GetPromotionOpcode(NVT, OVT), dl, MVT::i16, Res); } +SDValue DAGTypeLegalizer::SoftPromoteHalfRes_FABS(SDNode *N) { + SDValue Op = GetSoftPromotedHalf(N->getOperand(0)); + SDLoc dl(N); + + // Clear the sign bit. + return DAG.getNode(ISD::AND, dl, MVT::i16, Op, + DAG.getConstant(0x7fff, dl, MVT::i16)); +} + +SDValue DAGTypeLegalizer::SoftPromoteHalfRes_FNEG(SDNode *N) { + SDValue Op = GetSoftPromotedHalf(N->getOperand(0)); + SDLoc dl(N); + + // Invert the sign bit. + return DAG.getNode(ISD::XOR, dl, MVT::i16, Op, + DAG.getConstant(0x8000, dl, MVT::i16)); +} + SDValue DAGTypeLegalizer::SoftPromoteHalfRes_AssertNoFPClass(SDNode *N) { return GetSoftPromotedHalf(N->getOperand(0)); } diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h index d580ce0..603dc34 100644 --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h @@ -832,6 +832,8 @@ private: SDValue SoftPromoteHalfRes_SELECT(SDNode *N); SDValue SoftPromoteHalfRes_SELECT_CC(SDNode *N); SDValue SoftPromoteHalfRes_UnaryOp(SDNode *N); + SDValue SoftPromoteHalfRes_FABS(SDNode *N); + SDValue SoftPromoteHalfRes_FNEG(SDNode *N); SDValue SoftPromoteHalfRes_AssertNoFPClass(SDNode *N); SDValue SoftPromoteHalfRes_XINT_TO_FP(SDNode *N); SDValue SoftPromoteHalfRes_UNDEF(SDNode *N); diff --git a/llvm/lib/IR/AsmWriter.cpp b/llvm/lib/IR/AsmWriter.cpp index 0bc877d..2430d98 100644 --- a/llvm/lib/IR/AsmWriter.cpp +++ b/llvm/lib/IR/AsmWriter.cpp @@ -102,6 +102,10 @@ static cl::opt<bool> PrintProfData( "print-prof-data", cl::Hidden, cl::desc("Pretty print perf data (branch weights, etc) when dumping")); +static cl::opt<bool> PreserveAssemblyUseListOrder( + "preserve-ll-uselistorder", cl::Hidden, cl::init(false), + cl::desc("Preserve use-list order when writing LLVM assembly.")); + // Make virtual table appear in this compilation unit. AssemblyAnnotationWriter::~AssemblyAnnotationWriter() = default; @@ -2939,7 +2943,10 @@ AssemblyWriter::AssemblyWriter(formatted_raw_ostream &o, SlotTracker &Mac, bool IsForDebug, bool ShouldPreserveUseListOrder) : Out(o), TheModule(M), Machine(Mac), TypePrinter(M), AnnotationWriter(AAW), IsForDebug(IsForDebug), - ShouldPreserveUseListOrder(ShouldPreserveUseListOrder) { + ShouldPreserveUseListOrder( + PreserveAssemblyUseListOrder.getNumOccurrences() + ? PreserveAssemblyUseListOrder + : ShouldPreserveUseListOrder) { if (!TheModule) return; for (const GlobalObject &GO : TheModule->global_objects()) @@ -2950,7 +2957,8 @@ AssemblyWriter::AssemblyWriter(formatted_raw_ostream &o, SlotTracker &Mac, AssemblyWriter::AssemblyWriter(formatted_raw_ostream &o, SlotTracker &Mac, const ModuleSummaryIndex *Index, bool IsForDebug) : Out(o), TheIndex(Index), Machine(Mac), TypePrinter(/*Module=*/nullptr), - IsForDebug(IsForDebug), ShouldPreserveUseListOrder(false) {} + IsForDebug(IsForDebug), + ShouldPreserveUseListOrder(PreserveAssemblyUseListOrder) {} void AssemblyWriter::writeOperand(const Value *Operand, bool PrintType) { if (!Operand) { diff --git a/llvm/lib/IR/ConstantFPRange.cpp b/llvm/lib/IR/ConstantFPRange.cpp index 2477e22..070e833 100644 --- a/llvm/lib/IR/ConstantFPRange.cpp +++ b/llvm/lib/IR/ConstantFPRange.cpp @@ -326,6 +326,8 @@ std::optional<bool> ConstantFPRange::getSignBit() const { } bool ConstantFPRange::operator==(const ConstantFPRange &CR) const { + assert(&getSemantics() == &CR.getSemantics() && + "Should only use the same semantics"); if (MayBeSNaN != CR.MayBeSNaN || MayBeQNaN != CR.MayBeQNaN) return false; return Lower.bitwiseIsEqual(CR.Lower) && Upper.bitwiseIsEqual(CR.Upper); @@ -425,3 +427,20 @@ ConstantFPRange ConstantFPRange::getWithoutInf() const { return ConstantFPRange(std::move(NewLower), std::move(NewUpper), MayBeQNaN, MayBeSNaN); } + +ConstantFPRange ConstantFPRange::cast(const fltSemantics &DstSem, + APFloat::roundingMode RM) const { + bool LosesInfo; + APFloat NewLower = Lower; + APFloat NewUpper = Upper; + // For conservative, return full range if conversion is invalid. + if (NewLower.convert(DstSem, RM, &LosesInfo) == APFloat::opInvalidOp || + NewLower.isNaN()) + return getFull(DstSem); + if (NewUpper.convert(DstSem, RM, &LosesInfo) == APFloat::opInvalidOp || + NewUpper.isNaN()) + return getFull(DstSem); + return ConstantFPRange(std::move(NewLower), std::move(NewUpper), + /*MayBeQNaNVal=*/MayBeQNaN || MayBeSNaN, + /*MayBeSNaNVal=*/false); +} diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp index 7294f3e..fbce3b0 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -18640,7 +18640,7 @@ bool AArch64TargetLowering::isDesirableToCommuteXorWithShift( } bool AArch64TargetLowering::shouldFoldConstantShiftPairToMask( - const SDNode *N, CombineLevel Level) const { + const SDNode *N) const { assert(((N->getOpcode() == ISD::SHL && N->getOperand(0).getOpcode() == ISD::SRL) || (N->getOpcode() == ISD::SRL && diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h index e472e7d..00956fd 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h @@ -300,8 +300,7 @@ public: bool isDesirableToCommuteXorWithShift(const SDNode *N) const override; /// Return true if it is profitable to fold a pair of shifts into a mask. - bool shouldFoldConstantShiftPairToMask(const SDNode *N, - CombineLevel Level) const override; + bool shouldFoldConstantShiftPairToMask(const SDNode *N) const override; /// Return true if it is profitable to fold a pair of shifts into a mask. bool shouldFoldMaskToVariableShiftPair(SDValue Y) const override { diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp index 83c7def..67ea2dd 100644 --- a/llvm/lib/Target/ARM/ARMISelLowering.cpp +++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -13816,7 +13816,7 @@ bool ARMTargetLowering::isDesirableToCommuteXorWithShift( } bool ARMTargetLowering::shouldFoldConstantShiftPairToMask( - const SDNode *N, CombineLevel Level) const { + const SDNode *N) const { assert(((N->getOpcode() == ISD::SHL && N->getOperand(0).getOpcode() == ISD::SRL) || (N->getOpcode() == ISD::SRL && @@ -13826,7 +13826,8 @@ bool ARMTargetLowering::shouldFoldConstantShiftPairToMask( if (!Subtarget->isThumb1Only()) return true; - if (Level == BeforeLegalizeTypes) + EVT VT = N->getValueType(0); + if (VT.getScalarSizeInBits() > 32) return true; return false; diff --git a/llvm/lib/Target/ARM/ARMISelLowering.h b/llvm/lib/Target/ARM/ARMISelLowering.h index 26ff54c..70aa001 100644 --- a/llvm/lib/Target/ARM/ARMISelLowering.h +++ b/llvm/lib/Target/ARM/ARMISelLowering.h @@ -772,8 +772,7 @@ class VectorType; bool isDesirableToCommuteXorWithShift(const SDNode *N) const override; - bool shouldFoldConstantShiftPairToMask(const SDNode *N, - CombineLevel Level) const override; + bool shouldFoldConstantShiftPairToMask(const SDNode *N) const override; /// Return true if it is profitable to fold a pair of shifts into a mask. bool shouldFoldMaskToVariableShiftPair(SDValue Y) const override { diff --git a/llvm/lib/Target/Mips/MipsISelLowering.cpp b/llvm/lib/Target/Mips/MipsISelLowering.cpp index b05de49..7f1ff45 100644 --- a/llvm/lib/Target/Mips/MipsISelLowering.cpp +++ b/llvm/lib/Target/Mips/MipsISelLowering.cpp @@ -1306,7 +1306,7 @@ bool MipsTargetLowering::hasBitTest(SDValue X, SDValue Y) const { } bool MipsTargetLowering::shouldFoldConstantShiftPairToMask( - const SDNode *N, CombineLevel Level) const { + const SDNode *N) const { assert(((N->getOpcode() == ISD::SHL && N->getOperand(0).getOpcode() == ISD::SRL) || (N->getOpcode() == ISD::SRL && diff --git a/llvm/lib/Target/Mips/MipsISelLowering.h b/llvm/lib/Target/Mips/MipsISelLowering.h index c65c76c..25a0bf9 100644 --- a/llvm/lib/Target/Mips/MipsISelLowering.h +++ b/llvm/lib/Target/Mips/MipsISelLowering.h @@ -290,8 +290,7 @@ class TargetRegisterClass; bool isCheapToSpeculateCttz(Type *Ty) const override; bool isCheapToSpeculateCtlz(Type *Ty) const override; bool hasBitTest(SDValue X, SDValue Y) const override; - bool shouldFoldConstantShiftPairToMask(const SDNode *N, - CombineLevel Level) const override; + bool shouldFoldConstantShiftPairToMask(const SDNode *N) const override; /// Return the register type for a given MVT, ensuring vectors are treated /// as a series of gpr sized integers. diff --git a/llvm/lib/Target/SPIRV/CMakeLists.txt b/llvm/lib/Target/SPIRV/CMakeLists.txt index 46afe03..eab7b21 100644 --- a/llvm/lib/Target/SPIRV/CMakeLists.txt +++ b/llvm/lib/Target/SPIRV/CMakeLists.txt @@ -36,6 +36,7 @@ add_llvm_target(SPIRVCodeGen SPIRVMetadata.cpp SPIRVModuleAnalysis.cpp SPIRVStructurizer.cpp + SPIRVCombinerHelper.cpp SPIRVPreLegalizer.cpp SPIRVPreLegalizerCombiner.cpp SPIRVPostLegalizer.cpp diff --git a/llvm/lib/Target/SPIRV/SPIRVCombine.td b/llvm/lib/Target/SPIRV/SPIRVCombine.td index 6f726e0..fde56c4 100644 --- a/llvm/lib/Target/SPIRV/SPIRVCombine.td +++ b/llvm/lib/Target/SPIRV/SPIRVCombine.td @@ -11,8 +11,8 @@ include "llvm/Target/GlobalISel/Combine.td" def vector_length_sub_to_distance_lowering : GICombineRule < (defs root:$root), (match (wip_match_opcode G_INTRINSIC):$root, - [{ return matchLengthToDistance(*${root}, MRI); }]), - (apply [{ applySPIRVDistance(*${root}, MRI, B); }]) + [{ return Helper.matchLengthToDistance(*${root}); }]), + (apply [{ Helper.applySPIRVDistance(*${root}); }]) >; def SPIRVPreLegalizerCombiner diff --git a/llvm/lib/Target/SPIRV/SPIRVCombinerHelper.cpp b/llvm/lib/Target/SPIRV/SPIRVCombinerHelper.cpp new file mode 100644 index 0000000..267794c --- /dev/null +++ b/llvm/lib/Target/SPIRV/SPIRVCombinerHelper.cpp @@ -0,0 +1,60 @@ +//===-- SPIRVCombinerHelper.cpp -------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "SPIRVCombinerHelper.h" +#include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h" +#include "llvm/CodeGen/GlobalISel/MIPatternMatch.h" +#include "llvm/IR/IntrinsicsSPIRV.h" +#include "llvm/Target/TargetMachine.h" + +using namespace llvm; +using namespace MIPatternMatch; + +SPIRVCombinerHelper::SPIRVCombinerHelper( + GISelChangeObserver &Observer, MachineIRBuilder &B, bool IsPreLegalize, + GISelValueTracking *VT, MachineDominatorTree *MDT, const LegalizerInfo *LI, + const SPIRVSubtarget &STI) + : CombinerHelper(Observer, B, IsPreLegalize, VT, MDT, LI), STI(STI) {} + +/// This match is part of a combine that +/// rewrites length(X - Y) to distance(X, Y) +/// (f32 (g_intrinsic length +/// (g_fsub (vXf32 X) (vXf32 Y)))) +/// -> +/// (f32 (g_intrinsic distance +/// (vXf32 X) (vXf32 Y))) +/// +bool SPIRVCombinerHelper::matchLengthToDistance(MachineInstr &MI) const { + if (MI.getOpcode() != TargetOpcode::G_INTRINSIC || + cast<GIntrinsic>(MI).getIntrinsicID() != Intrinsic::spv_length) + return false; + + // First operand of MI is `G_INTRINSIC` so start at operand 2. + Register SubReg = MI.getOperand(2).getReg(); + MachineInstr *SubInstr = MRI.getVRegDef(SubReg); + if (SubInstr->getOpcode() != TargetOpcode::G_FSUB) + return false; + + return true; +} + +void SPIRVCombinerHelper::applySPIRVDistance(MachineInstr &MI) const { + // Extract the operands for X and Y from the match criteria. + Register SubDestReg = MI.getOperand(2).getReg(); + MachineInstr *SubInstr = MRI.getVRegDef(SubDestReg); + Register SubOperand1 = SubInstr->getOperand(1).getReg(); + Register SubOperand2 = SubInstr->getOperand(2).getReg(); + Register ResultReg = MI.getOperand(0).getReg(); + + Builder.setInstrAndDebugLoc(MI); + Builder.buildIntrinsic(Intrinsic::spv_distance, ResultReg) + .addUse(SubOperand1) + .addUse(SubOperand2); + + MI.eraseFromParent(); +} diff --git a/llvm/lib/Target/SPIRV/SPIRVCombinerHelper.h b/llvm/lib/Target/SPIRV/SPIRVCombinerHelper.h new file mode 100644 index 0000000..0b39d34 --- /dev/null +++ b/llvm/lib/Target/SPIRV/SPIRVCombinerHelper.h @@ -0,0 +1,38 @@ +//===-- SPIRVCombinerHelper.h -----------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// +/// This contains common combine transformations that may be used in a combine +/// pass. +/// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIB_TARGET_SPIRV_SPIRVCOMBINERHELPER_H +#define LLVM_LIB_TARGET_SPIRV_SPIRVCOMBINERHELPER_H + +#include "SPIRVSubtarget.h" +#include "llvm/CodeGen/GlobalISel/CombinerHelper.h" + +namespace llvm { +class SPIRVCombinerHelper : public CombinerHelper { +protected: + const SPIRVSubtarget &STI; + +public: + using CombinerHelper::CombinerHelper; + SPIRVCombinerHelper(GISelChangeObserver &Observer, MachineIRBuilder &B, + bool IsPreLegalize, GISelValueTracking *VT, + MachineDominatorTree *MDT, const LegalizerInfo *LI, + const SPIRVSubtarget &STI); + + bool matchLengthToDistance(MachineInstr &MI) const; + void applySPIRVDistance(MachineInstr &MI) const; +}; + +} // end namespace llvm + +#endif // LLVM_LIB_TARGET_SPIRV_SPIRVCOMBINERHELPER_H diff --git a/llvm/lib/Target/SPIRV/SPIRVPreLegalizerCombiner.cpp b/llvm/lib/Target/SPIRV/SPIRVPreLegalizerCombiner.cpp index 8356751..48f4047 100644 --- a/llvm/lib/Target/SPIRV/SPIRVPreLegalizerCombiner.cpp +++ b/llvm/lib/Target/SPIRV/SPIRVPreLegalizerCombiner.cpp @@ -1,4 +1,3 @@ - //===-- SPIRVPreLegalizerCombiner.cpp - combine legalization ----*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. @@ -13,24 +12,17 @@ //===----------------------------------------------------------------------===// #include "SPIRV.h" -#include "SPIRVTargetMachine.h" +#include "SPIRVCombinerHelper.h" #include "llvm/CodeGen/GlobalISel/CSEInfo.h" #include "llvm/CodeGen/GlobalISel/Combiner.h" -#include "llvm/CodeGen/GlobalISel/CombinerHelper.h" #include "llvm/CodeGen/GlobalISel/CombinerInfo.h" #include "llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h" #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h" #include "llvm/CodeGen/GlobalISel/GISelValueTracking.h" -#include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h" #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h" -#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" -#include "llvm/CodeGen/GlobalISel/Utils.h" #include "llvm/CodeGen/MachineDominators.h" #include "llvm/CodeGen/MachineFunctionPass.h" -#include "llvm/CodeGen/MachineRegisterInfo.h" -#include "llvm/CodeGen/TargetOpcodes.h" #include "llvm/CodeGen/TargetPassConfig.h" -#include "llvm/IR/IntrinsicsSPIRV.h" #define GET_GICOMBINER_DEPS #include "SPIRVGenPreLegalizeGICombiner.inc" @@ -47,72 +39,9 @@ namespace { #include "SPIRVGenPreLegalizeGICombiner.inc" #undef GET_GICOMBINER_TYPES -/// This match is part of a combine that -/// rewrites length(X - Y) to distance(X, Y) -/// (f32 (g_intrinsic length -/// (g_fsub (vXf32 X) (vXf32 Y)))) -/// -> -/// (f32 (g_intrinsic distance -/// (vXf32 X) (vXf32 Y))) -/// -bool matchLengthToDistance(MachineInstr &MI, MachineRegisterInfo &MRI) { - if (MI.getOpcode() != TargetOpcode::G_INTRINSIC || - cast<GIntrinsic>(MI).getIntrinsicID() != Intrinsic::spv_length) - return false; - - // First operand of MI is `G_INTRINSIC` so start at operand 2. - Register SubReg = MI.getOperand(2).getReg(); - MachineInstr *SubInstr = MRI.getVRegDef(SubReg); - if (!SubInstr || SubInstr->getOpcode() != TargetOpcode::G_FSUB) - return false; - - return true; -} -void applySPIRVDistance(MachineInstr &MI, MachineRegisterInfo &MRI, - MachineIRBuilder &B) { - - // Extract the operands for X and Y from the match criteria. - Register SubDestReg = MI.getOperand(2).getReg(); - MachineInstr *SubInstr = MRI.getVRegDef(SubDestReg); - Register SubOperand1 = SubInstr->getOperand(1).getReg(); - Register SubOperand2 = SubInstr->getOperand(2).getReg(); - - // Remove the original `spv_length` instruction. - - Register ResultReg = MI.getOperand(0).getReg(); - DebugLoc DL = MI.getDebugLoc(); - MachineBasicBlock &MBB = *MI.getParent(); - MachineBasicBlock::iterator InsertPt = MI.getIterator(); - - // Build the `spv_distance` intrinsic. - MachineInstrBuilder NewInstr = - BuildMI(MBB, InsertPt, DL, B.getTII().get(TargetOpcode::G_INTRINSIC)); - NewInstr - .addDef(ResultReg) // Result register - .addIntrinsicID(Intrinsic::spv_distance) // Intrinsic ID - .addUse(SubOperand1) // Operand X - .addUse(SubOperand2); // Operand Y - - SPIRVGlobalRegistry *GR = - MI.getMF()->getSubtarget<SPIRVSubtarget>().getSPIRVGlobalRegistry(); - auto RemoveAllUses = [&](Register Reg) { - SmallVector<MachineInstr *, 4> UsesToErase( - llvm::make_pointer_range(MRI.use_instructions(Reg))); - - // calling eraseFromParent to early invalidates the iterator. - for (auto *MIToErase : UsesToErase) { - GR->invalidateMachineInstr(MIToErase); - MIToErase->eraseFromParent(); - } - }; - RemoveAllUses(SubDestReg); // remove all uses of FSUB Result - GR->invalidateMachineInstr(SubInstr); - SubInstr->eraseFromParent(); // remove FSUB instruction -} - class SPIRVPreLegalizerCombinerImpl : public Combiner { protected: - const CombinerHelper Helper; + const SPIRVCombinerHelper Helper; const SPIRVPreLegalizerCombinerImplRuleConfig &RuleConfig; const SPIRVSubtarget &STI; @@ -147,7 +76,7 @@ SPIRVPreLegalizerCombinerImpl::SPIRVPreLegalizerCombinerImpl( const SPIRVSubtarget &STI, MachineDominatorTree *MDT, const LegalizerInfo *LI) : Combiner(MF, CInfo, TPC, &VT, CSEInfo), - Helper(Observer, B, /*IsPreLegalize*/ true, &VT, MDT, LI), + Helper(Observer, B, /*IsPreLegalize*/ true, &VT, MDT, LI, STI), RuleConfig(RuleConfig), STI(STI), #define GET_GICOMBINER_CONSTRUCTOR_INITS #include "SPIRVGenPreLegalizeGICombiner.inc" diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 1cfcb1f..eea84a2 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -3633,7 +3633,7 @@ bool X86TargetLowering::preferScalarizeSplat(SDNode *N) const { } bool X86TargetLowering::shouldFoldConstantShiftPairToMask( - const SDNode *N, CombineLevel Level) const { + const SDNode *N) const { assert(((N->getOpcode() == ISD::SHL && N->getOperand(0).getOpcode() == ISD::SRL) || (N->getOpcode() == ISD::SRL && @@ -3648,7 +3648,7 @@ bool X86TargetLowering::shouldFoldConstantShiftPairToMask( // the fold for non-splats yet. return N->getOperand(1) == N->getOperand(0).getOperand(1); } - return TargetLoweringBase::shouldFoldConstantShiftPairToMask(N, Level); + return TargetLoweringBase::shouldFoldConstantShiftPairToMask(N); } bool X86TargetLowering::shouldFoldMaskToVariableShiftPair(SDValue Y) const { diff --git a/llvm/lib/Target/X86/X86ISelLowering.h b/llvm/lib/Target/X86/X86ISelLowering.h index b55556a..e28b9c1 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.h +++ b/llvm/lib/Target/X86/X86ISelLowering.h @@ -1244,8 +1244,7 @@ namespace llvm { getJumpConditionMergingParams(Instruction::BinaryOps Opc, const Value *Lhs, const Value *Rhs) const override; - bool shouldFoldConstantShiftPairToMask(const SDNode *N, - CombineLevel Level) const override; + bool shouldFoldConstantShiftPairToMask(const SDNode *N) const override; bool shouldFoldMaskToVariableShiftPair(SDValue Y) const override; diff --git a/llvm/lib/TargetParser/Triple.cpp b/llvm/lib/TargetParser/Triple.cpp index ac3626d..f021094 100644 --- a/llvm/lib/TargetParser/Triple.cpp +++ b/llvm/lib/TargetParser/Triple.cpp @@ -375,6 +375,8 @@ StringRef Triple::getEnvironmentTypeName(EnvironmentType Kind) { case MuslSF: return "muslsf"; case MuslX32: return "muslx32"; + case MuslWALI: + return "muslwali"; case Simulator: return "simulator"; case Pixel: return "pixel"; case Vertex: return "vertex"; @@ -767,6 +769,7 @@ static Triple::EnvironmentType parseEnvironment(StringRef EnvironmentName) { .StartsWith("muslf32", Triple::MuslF32) .StartsWith("muslsf", Triple::MuslSF) .StartsWith("muslx32", Triple::MuslX32) + .StartsWith("muslwali", Triple::MuslWALI) .StartsWith("musl", Triple::Musl) .StartsWith("msvc", Triple::MSVC) .StartsWith("itanium", Triple::Itanium) diff --git a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp index 8c8fc69..6b67b48 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp @@ -544,8 +544,18 @@ Instruction *InstCombinerImpl::foldSelectIntoOp(SelectInst &SI, Value *TrueVal, Value *NewSel = Builder.CreateSelect(SI.getCondition(), Swapped ? C : OOp, Swapped ? OOp : C, "", &SI); - if (isa<FPMathOperator>(&SI)) - cast<Instruction>(NewSel)->setFastMathFlags(FMF); + if (isa<FPMathOperator>(&SI)) { + FastMathFlags NewSelFMF = FMF; + // We cannot propagate ninf from the original select, because OOp may be + // inf and the flag only guarantees that FalseVal (op OOp) is never + // infinity. + // Examples: -inf + +inf = NaN, -inf - -inf = NaN, 0 * inf = NaN + // Specifically, if the original select has both ninf and nnan, we can + // safely propagate the flag. + NewSelFMF.setNoInfs(TVI->hasNoInfs() || + (NewSelFMF.noInfs() && NewSelFMF.noNaNs())); + cast<Instruction>(NewSel)->setFastMathFlags(NewSelFMF); + } NewSel->takeName(TVI); BinaryOperator *BO = BinaryOperator::Create(TVI->getOpcode(), FalseVal, NewSel); diff --git a/llvm/lib/Transforms/Scalar/SROA.cpp b/llvm/lib/Transforms/Scalar/SROA.cpp index 45d3d49..b9d332b 100644 --- a/llvm/lib/Transforms/Scalar/SROA.cpp +++ b/llvm/lib/Transforms/Scalar/SROA.cpp @@ -2961,6 +2961,7 @@ public: isa<FixedVectorType>(NewAI.getAllocatedType()) ? cast<FixedVectorType>(NewAI.getAllocatedType())->getElementType() : Type::getInt8Ty(NewAI.getContext()); + unsigned AllocatedEltTySize = DL.getTypeSizeInBits(AllocatedEltTy); // Helper to check if a type is // 1. A fixed vector type @@ -2991,10 +2992,17 @@ public: // Do not handle the case if // 1. The store does not meet the conditions in the helper function // 2. The store is volatile + // 3. The total store size is not a multiple of the allocated element + // type size if (!IsTypeValidForTreeStructuredMerge( SI->getValueOperand()->getType()) || SI->isVolatile()) return std::nullopt; + auto *VecTy = cast<FixedVectorType>(SI->getValueOperand()->getType()); + unsigned NumElts = VecTy->getNumElements(); + unsigned EltSize = DL.getTypeSizeInBits(VecTy->getElementType()); + if (NumElts * EltSize % AllocatedEltTySize != 0) + return std::nullopt; StoreInfos.emplace_back(SI, S.beginOffset(), S.endOffset(), SI->getValueOperand()); } else { diff --git a/llvm/lib/Transforms/Utils/InstructionNamer.cpp b/llvm/lib/Transforms/Utils/InstructionNamer.cpp index 3ae570c..4f1ff7b 100644 --- a/llvm/lib/Transforms/Utils/InstructionNamer.cpp +++ b/llvm/lib/Transforms/Utils/InstructionNamer.cpp @@ -20,9 +20,8 @@ using namespace llvm; -namespace { -void nameInstructions(Function &F) { - for (auto &Arg : F.args()) { +static void nameInstructions(Function &F) { + for (Argument &Arg : F.args()) { if (!Arg.hasName()) Arg.setName("arg"); } @@ -38,8 +37,6 @@ void nameInstructions(Function &F) { } } -} // namespace - PreservedAnalyses InstructionNamerPass::run(Function &F, FunctionAnalysisManager &FAM) { nameInstructions(F); diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp index cfa8d27..2388375 100644 --- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp +++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp @@ -2245,6 +2245,26 @@ public: Align Alignment, const int64_t Diff, Value *Ptr0, Value *PtrN, StridedPtrInfo &SPtrInfo) const; + /// Return true if an array of scalar loads can be replaced with a strided + /// load (with run-time stride). + /// \param PointerOps list of pointer arguments of loads. + /// \param ScalarTy type of loads. + /// \param CommonAlignment common alignement of loads as computed by + /// `computeCommonAlignment<LoadInst>`. + /// \param SortedIndicies is a list of indicies computed by this function such + /// that the sequence `PointerOps[SortedIndices[0]], + /// PointerOps[SortedIndicies[1]], ..., PointerOps[SortedIndices[n]]` is + /// ordered by the coefficient of the stride. For example, if PointerOps is + /// `%base + %stride, %base, %base + 2 * stride` the `SortedIndices` will be + /// `[1, 0, 2]`. We follow the convention that if `SortedIndices` has to be + /// `0, 1, 2, 3, ...` we return empty vector for `SortedIndicies`. + /// \param SPtrInfo If the function return `true`, it also sets all the fields + /// of `SPtrInfo` necessary to generate the strided load later. + bool analyzeRtStrideCandidate(ArrayRef<Value *> PointerOps, Type *ScalarTy, + Align CommonAlignment, + SmallVectorImpl<unsigned> &SortedIndices, + StridedPtrInfo &SPtrInfo) const; + /// Checks if the given array of loads can be represented as a vectorized, /// scatter or just simple gather. /// \param VL list of loads. @@ -6875,6 +6895,24 @@ bool BoUpSLP::isStridedLoad(ArrayRef<Value *> PointerOps, Type *ScalarTy, return false; } +bool BoUpSLP::analyzeRtStrideCandidate(ArrayRef<Value *> PointerOps, + Type *ScalarTy, Align CommonAlignment, + SmallVectorImpl<unsigned> &SortedIndices, + StridedPtrInfo &SPtrInfo) const { + const unsigned Sz = PointerOps.size(); + FixedVectorType *StridedLoadTy = getWidenedType(ScalarTy, Sz); + if (Sz <= MinProfitableStridedLoads || !TTI->isTypeLegal(StridedLoadTy) || + !TTI->isLegalStridedLoadStore(StridedLoadTy, CommonAlignment)) + return false; + if (const SCEV *Stride = + calculateRtStride(PointerOps, ScalarTy, *DL, *SE, SortedIndices)) { + SPtrInfo.Ty = getWidenedType(ScalarTy, PointerOps.size()); + SPtrInfo.StrideSCEV = Stride; + return true; + } + return false; +} + BoUpSLP::LoadsState BoUpSLP::canVectorizeLoads( ArrayRef<Value *> VL, const Value *VL0, SmallVectorImpl<unsigned> &Order, SmallVectorImpl<Value *> &PointerOps, StridedPtrInfo &SPtrInfo, @@ -6915,15 +6953,9 @@ BoUpSLP::LoadsState BoUpSLP::canVectorizeLoads( auto *VecTy = getWidenedType(ScalarTy, Sz); Align CommonAlignment = computeCommonAlignment<LoadInst>(VL); if (!IsSorted) { - if (Sz > MinProfitableStridedLoads && TTI->isTypeLegal(VecTy)) { - if (const SCEV *Stride = - calculateRtStride(PointerOps, ScalarTy, *DL, *SE, Order); - Stride && TTI->isLegalStridedLoadStore(VecTy, CommonAlignment)) { - SPtrInfo.Ty = getWidenedType(ScalarTy, PointerOps.size()); - SPtrInfo.StrideSCEV = Stride; - return LoadsState::StridedVectorize; - } - } + if (analyzeRtStrideCandidate(PointerOps, ScalarTy, CommonAlignment, Order, + SPtrInfo)) + return LoadsState::StridedVectorize; if (!TTI->isLegalMaskedGather(VecTy, CommonAlignment) || TTI->forceScalarizeMaskedGather(VecTy, CommonAlignment)) @@ -10632,7 +10664,9 @@ class InstructionsCompatibilityAnalysis { void findAndSetMainInstruction(ArrayRef<Value *> VL, const BoUpSLP &R) { BasicBlock *Parent = nullptr; // Checks if the instruction has supported opcode. - auto IsSupportedInstruction = [&](Instruction *I) { + auto IsSupportedInstruction = [&](Instruction *I, bool AnyUndef) { + if (AnyUndef && (I->isIntDivRem() || I->isFPDivRem() || isa<CallInst>(I))) + return false; return I && isSupportedOpcode(I->getOpcode()) && (!doesNotNeedToBeScheduled(I) || !R.isVectorized(I)); }; @@ -10640,10 +10674,13 @@ class InstructionsCompatibilityAnalysis { // will be unable to schedule anyway. SmallDenseSet<Value *, 8> Operands; SmallMapVector<unsigned, SmallVector<Instruction *>, 4> Candidates; + bool AnyUndef = false; for (Value *V : VL) { auto *I = dyn_cast<Instruction>(V); - if (!I) + if (!I) { + AnyUndef |= isa<UndefValue>(V); continue; + } if (!DT.isReachableFromEntry(I->getParent())) continue; if (Candidates.empty()) { @@ -10678,7 +10715,7 @@ class InstructionsCompatibilityAnalysis { if (P.second.size() < BestOpcodeNum) continue; for (Instruction *I : P.second) { - if (IsSupportedInstruction(I) && !Operands.contains(I)) { + if (IsSupportedInstruction(I, AnyUndef) && !Operands.contains(I)) { MainOp = I; BestOpcodeNum = P.second.size(); break; diff --git a/llvm/test/CodeGen/AMDGPU/bf16.ll b/llvm/test/CodeGen/AMDGPU/bf16.ll index 4b14dc6..7ee0015f 100644 --- a/llvm/test/CodeGen/AMDGPU/bf16.ll +++ b/llvm/test/CodeGen/AMDGPU/bf16.ll @@ -21204,18 +21204,14 @@ define bfloat @v_fabs_bf16(bfloat %a) { ; GCN: ; %bb.0: ; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GCN-NEXT: v_mul_f32_e32 v0, 1.0, v0 -; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 -; GCN-NEXT: v_and_b32_e32 v0, 0x7fffffff, v0 -; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GCN-NEXT: v_and_b32_e32 v0, 0x7fff0000, v0 ; GCN-NEXT: s_setpc_b64 s[30:31] ; ; GFX7-LABEL: v_fabs_bf16: ; GFX7: ; %bb.0: ; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0 -; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 -; GFX7-NEXT: v_and_b32_e32 v0, 0x7fffffff, v0 -; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX7-NEXT: v_and_b32_e32 v0, 0x7fff0000, v0 ; GFX7-NEXT: s_setpc_b64 s[30:31] ; ; GFX8-LABEL: v_fabs_bf16: @@ -21440,10 +21436,7 @@ define bfloat @v_fneg_fabs_bf16(bfloat %a) { ; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GCN-NEXT: v_mul_f32_e32 v0, 1.0, v0 ; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 -; GCN-NEXT: v_and_b32_e32 v0, 0x7fffffff, v0 -; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 -; GCN-NEXT: v_xor_b32_e32 v0, 0x80000000, v0 -; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GCN-NEXT: v_or_b32_e32 v0, 0x80000000, v0 ; GCN-NEXT: s_setpc_b64 s[30:31] ; ; GFX7-LABEL: v_fneg_fabs_bf16: @@ -21451,10 +21444,7 @@ define bfloat @v_fneg_fabs_bf16(bfloat %a) { ; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0 ; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 -; GFX7-NEXT: v_and_b32_e32 v0, 0x7fffffff, v0 -; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 -; GFX7-NEXT: v_xor_b32_e32 v0, 0x80000000, v0 -; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX7-NEXT: v_or_b32_e32 v0, 0x80000000, v0 ; GFX7-NEXT: s_setpc_b64 s[30:31] ; ; GFX8-LABEL: v_fneg_fabs_bf16: @@ -21510,23 +21500,17 @@ define amdgpu_ps i32 @s_fneg_fabs_bf16(bfloat inreg %a) { ; GCN-LABEL: s_fneg_fabs_bf16: ; GCN: ; %bb.0: ; GCN-NEXT: v_mul_f32_e64 v0, 1.0, s0 +; GCN-NEXT: v_lshrrev_b32_e32 v0, 16, v0 +; GCN-NEXT: v_or_b32_e32 v0, 0x8000, v0 ; GCN-NEXT: v_readfirstlane_b32 s0, v0 -; GCN-NEXT: s_and_b32 s0, s0, 0xffff0000 -; GCN-NEXT: s_bitset0_b32 s0, 31 -; GCN-NEXT: s_and_b32 s0, s0, 0xffff0000 -; GCN-NEXT: s_xor_b32 s0, s0, 0x80000000 -; GCN-NEXT: s_lshr_b32 s0, s0, 16 ; GCN-NEXT: ; return to shader part epilog ; ; GFX7-LABEL: s_fneg_fabs_bf16: ; GFX7: ; %bb.0: ; GFX7-NEXT: v_mul_f32_e64 v0, 1.0, s0 +; GFX7-NEXT: v_lshrrev_b32_e32 v0, 16, v0 +; GFX7-NEXT: v_or_b32_e32 v0, 0x8000, v0 ; GFX7-NEXT: v_readfirstlane_b32 s0, v0 -; GFX7-NEXT: s_and_b32 s0, s0, 0xffff0000 -; GFX7-NEXT: s_bitset0_b32 s0, 31 -; GFX7-NEXT: s_and_b32 s0, s0, 0xffff0000 -; GFX7-NEXT: s_xor_b32 s0, s0, 0x80000000 -; GFX7-NEXT: s_lshr_b32 s0, s0, 16 ; GFX7-NEXT: ; return to shader part epilog ; ; GFX8-LABEL: s_fneg_fabs_bf16: diff --git a/llvm/test/CodeGen/AMDGPU/fabs.bf16.ll b/llvm/test/CodeGen/AMDGPU/fabs.bf16.ll index 5d184b1..c46fcde 100644 --- a/llvm/test/CodeGen/AMDGPU/fabs.bf16.ll +++ b/llvm/test/CodeGen/AMDGPU/fabs.bf16.ll @@ -218,19 +218,11 @@ define amdgpu_kernel void @s_fabs_v4bf16(ptr addrspace(1) %out, <4 x bfloat> %in ; CI-NEXT: s_mov_b32 flat_scratch_lo, s13 ; CI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8 ; CI-NEXT: s_waitcnt lgkmcnt(0) -; CI-NEXT: s_and_b32 s4, s3, 0xffff0000 -; CI-NEXT: s_lshl_b32 s3, s3, 16 -; CI-NEXT: s_and_b32 s5, s2, 0xffff0000 -; CI-NEXT: v_mul_f32_e64 v0, 1.0, |s4| -; CI-NEXT: v_mul_f32_e64 v1, 1.0, |s3| -; CI-NEXT: v_mul_f32_e64 v2, 1.0, |s5| -; CI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 -; CI-NEXT: s_lshl_b32 s2, s2, 16 -; CI-NEXT: v_alignbit_b32 v1, v0, v1, 16 -; CI-NEXT: v_lshrrev_b32_e32 v0, 16, v2 -; CI-NEXT: v_mul_f32_e64 v2, 1.0, |s2| -; CI-NEXT: v_alignbit_b32 v0, v0, v2, 16 +; CI-NEXT: s_and_b32 s3, s3, 0x7fff7fff +; CI-NEXT: s_and_b32 s2, s2, 0x7fff7fff ; CI-NEXT: v_mov_b32_e32 v3, s1 +; CI-NEXT: v_mov_b32_e32 v0, s2 +; CI-NEXT: v_mov_b32_e32 v1, s3 ; CI-NEXT: v_mov_b32_e32 v2, s0 ; CI-NEXT: flat_store_dwordx2 v[2:3], v[0:1] ; CI-NEXT: s_endpgm @@ -537,16 +529,15 @@ define amdgpu_kernel void @v_fabs_fold_self_v2bf16(ptr addrspace(1) %out, ptr ad ; CI-NEXT: v_mov_b32_e32 v0, s0 ; CI-NEXT: v_mov_b32_e32 v1, s1 ; CI-NEXT: s_waitcnt vmcnt(0) -; CI-NEXT: v_and_b32_e32 v3, 0xffff0000, v2 -; CI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; CI-NEXT: v_mul_f32_e64 v4, 1.0, |v3| -; CI-NEXT: v_mul_f32_e64 v5, 1.0, |v2| -; CI-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 -; CI-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 -; CI-NEXT: v_mul_f32_e32 v3, v4, v3 -; CI-NEXT: v_mul_f32_e32 v2, v5, v2 -; CI-NEXT: v_lshrrev_b32_e32 v3, 16, v3 -; CI-NEXT: v_alignbit_b32 v2, v3, v2, 16 +; CI-NEXT: v_and_b32_e32 v3, 0x7fff, v2 +; CI-NEXT: v_lshlrev_b32_e32 v4, 16, v2 +; CI-NEXT: v_and_b32_e32 v5, 0xffff0000, v2 +; CI-NEXT: v_and_b32_e32 v2, 0x7fff0000, v2 +; CI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; CI-NEXT: v_mul_f32_e32 v2, v2, v5 +; CI-NEXT: v_mul_f32_e32 v3, v3, v4 +; CI-NEXT: v_lshrrev_b32_e32 v2, 16, v2 +; CI-NEXT: v_alignbit_b32 v2, v2, v3, 16 ; CI-NEXT: flat_store_dword v[0:1], v2 ; CI-NEXT: s_endpgm ; @@ -898,16 +889,13 @@ define amdgpu_kernel void @v_extract_fabs_fold_v2bf16(ptr addrspace(1) %in) #0 { ; CI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc ; CI-NEXT: flat_load_dword v0, v[0:1] ; CI-NEXT: s_waitcnt vmcnt(0) -; CI-NEXT: v_lshlrev_b32_e32 v1, 16, v0 -; CI-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 -; CI-NEXT: v_mul_f32_e64 v1, 1.0, |v1| -; CI-NEXT: v_mul_f32_e64 v0, 1.0, |v0| -; CI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 -; CI-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 -; CI-NEXT: v_mul_f32_e32 v1, 4.0, v1 +; CI-NEXT: v_and_b32_e32 v1, 0x7fff, v0 +; CI-NEXT: v_and_b32_e32 v0, 0x7fff0000, v0 +; CI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; CI-NEXT: v_add_f32_e32 v0, 2.0, v0 -; CI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; CI-NEXT: v_mul_f32_e32 v1, 4.0, v1 ; CI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 +; CI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 ; CI-NEXT: flat_store_short v[0:1], v1 ; CI-NEXT: s_waitcnt vmcnt(0) ; CI-NEXT: flat_store_short v[0:1], v0 diff --git a/llvm/test/CodeGen/AMDGPU/fneg-fabs.bf16.ll b/llvm/test/CodeGen/AMDGPU/fneg-fabs.bf16.ll index 64a9727..76da0aa 100644 --- a/llvm/test/CodeGen/AMDGPU/fneg-fabs.bf16.ll +++ b/llvm/test/CodeGen/AMDGPU/fneg-fabs.bf16.ll @@ -107,12 +107,10 @@ define amdgpu_kernel void @fneg_fabs_fmul_bf16(ptr addrspace(1) %out, bfloat %x, ; CI-NEXT: s_mov_b32 flat_scratch_lo, s13 ; CI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8 ; CI-NEXT: s_waitcnt lgkmcnt(0) -; CI-NEXT: s_and_b32 s3, s2, 0x7fff -; CI-NEXT: s_lshl_b32 s3, s3, 16 -; CI-NEXT: v_mul_f32_e64 v0, -1.0, s3 +; CI-NEXT: s_lshl_b32 s3, s2, 16 ; CI-NEXT: s_and_b32 s2, s2, 0xffff0000 -; CI-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 -; CI-NEXT: v_mul_f32_e32 v0, s2, v0 +; CI-NEXT: v_mov_b32_e32 v0, s3 +; CI-NEXT: v_mul_f32_e64 v0, s2, -|v0| ; CI-NEXT: v_lshrrev_b32_e32 v2, 16, v0 ; CI-NEXT: v_mov_b32_e32 v0, s0 ; CI-NEXT: v_mov_b32_e32 v1, s1 @@ -204,12 +202,10 @@ define amdgpu_kernel void @fneg_fabs_free_bf16(ptr addrspace(1) %out, i16 %in) { ; CI-NEXT: s_mov_b32 flat_scratch_lo, s13 ; CI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8 ; CI-NEXT: s_waitcnt lgkmcnt(0) -; CI-NEXT: s_and_b32 s2, s2, 0x7fff -; CI-NEXT: s_lshl_b32 s2, s2, 16 -; CI-NEXT: v_mul_f32_e64 v0, -1.0, s2 -; CI-NEXT: v_lshrrev_b32_e32 v2, 16, v0 +; CI-NEXT: s_bitset1_b32 s2, 15 ; CI-NEXT: v_mov_b32_e32 v0, s0 ; CI-NEXT: v_mov_b32_e32 v1, s1 +; CI-NEXT: v_mov_b32_e32 v2, s2 ; CI-NEXT: flat_store_short v[0:1], v2 ; CI-NEXT: s_endpgm ; @@ -279,12 +275,10 @@ define amdgpu_kernel void @fneg_fabs_bf16(ptr addrspace(1) %out, bfloat %in) { ; CI-NEXT: s_mov_b32 flat_scratch_lo, s13 ; CI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8 ; CI-NEXT: s_waitcnt lgkmcnt(0) -; CI-NEXT: s_and_b32 s2, s2, 0x7fff -; CI-NEXT: s_lshl_b32 s2, s2, 16 -; CI-NEXT: v_mul_f32_e64 v0, -1.0, s2 -; CI-NEXT: v_lshrrev_b32_e32 v2, 16, v0 +; CI-NEXT: s_bitset1_b32 s2, 15 ; CI-NEXT: v_mov_b32_e32 v0, s0 ; CI-NEXT: v_mov_b32_e32 v1, s1 +; CI-NEXT: v_mov_b32_e32 v2, s2 ; CI-NEXT: flat_store_short v[0:1], v2 ; CI-NEXT: s_endpgm ; @@ -345,43 +339,22 @@ define amdgpu_kernel void @fneg_fabs_bf16(ptr addrspace(1) %out, bfloat %in) { } define amdgpu_kernel void @v_fneg_fabs_bf16(ptr addrspace(1) %out, ptr addrspace(1) %in) { -; CI-LABEL: v_fneg_fabs_bf16: -; CI: ; %bb.0: -; CI-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x0 -; CI-NEXT: s_add_i32 s12, s12, s17 -; CI-NEXT: s_mov_b32 flat_scratch_lo, s13 -; CI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8 -; CI-NEXT: s_waitcnt lgkmcnt(0) -; CI-NEXT: v_mov_b32_e32 v0, s2 -; CI-NEXT: v_mov_b32_e32 v1, s3 -; CI-NEXT: flat_load_ushort v2, v[0:1] -; CI-NEXT: v_mov_b32_e32 v0, s0 -; CI-NEXT: v_mov_b32_e32 v1, s1 -; CI-NEXT: s_waitcnt vmcnt(0) -; CI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; CI-NEXT: v_mul_f32_e64 v2, 1.0, |v2| -; CI-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 -; CI-NEXT: v_xor_b32_e32 v2, 0x80000000, v2 -; CI-NEXT: v_lshrrev_b32_e32 v2, 16, v2 -; CI-NEXT: flat_store_short v[0:1], v2 -; CI-NEXT: s_endpgm -; -; VI-LABEL: v_fneg_fabs_bf16: -; VI: ; %bb.0: -; VI-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x0 -; VI-NEXT: s_add_i32 s12, s12, s17 -; VI-NEXT: s_mov_b32 flat_scratch_lo, s13 -; VI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8 -; VI-NEXT: s_waitcnt lgkmcnt(0) -; VI-NEXT: v_mov_b32_e32 v0, s2 -; VI-NEXT: v_mov_b32_e32 v1, s3 -; VI-NEXT: flat_load_ushort v2, v[0:1] -; VI-NEXT: v_mov_b32_e32 v0, s0 -; VI-NEXT: v_mov_b32_e32 v1, s1 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_e32 v2, 0x8000, v2 -; VI-NEXT: flat_store_short v[0:1], v2 -; VI-NEXT: s_endpgm +; CIVI-LABEL: v_fneg_fabs_bf16: +; CIVI: ; %bb.0: +; CIVI-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x0 +; CIVI-NEXT: s_add_i32 s12, s12, s17 +; CIVI-NEXT: s_mov_b32 flat_scratch_lo, s13 +; CIVI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8 +; CIVI-NEXT: s_waitcnt lgkmcnt(0) +; CIVI-NEXT: v_mov_b32_e32 v0, s2 +; CIVI-NEXT: v_mov_b32_e32 v1, s3 +; CIVI-NEXT: flat_load_ushort v2, v[0:1] +; CIVI-NEXT: v_mov_b32_e32 v0, s0 +; CIVI-NEXT: v_mov_b32_e32 v1, s1 +; CIVI-NEXT: s_waitcnt vmcnt(0) +; CIVI-NEXT: v_or_b32_e32 v2, 0x8000, v2 +; CIVI-NEXT: flat_store_short v[0:1], v2 +; CIVI-NEXT: s_endpgm ; ; GFX9-LABEL: v_fneg_fabs_bf16: ; GFX9: ; %bb.0: @@ -431,21 +404,13 @@ define amdgpu_kernel void @s_fneg_fabs_v2bf16_non_bc_src(ptr addrspace(1) %out, ; CI-NEXT: s_mov_b32 flat_scratch_lo, s13 ; CI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8 ; CI-NEXT: s_waitcnt lgkmcnt(0) -; CI-NEXT: s_and_b32 s3, s2, 0xffff0000 -; CI-NEXT: s_lshl_b32 s2, s2, 16 -; CI-NEXT: v_add_f32_e64 v0, s3, 2.0 -; CI-NEXT: v_add_f32_e64 v1, s2, 1.0 -; CI-NEXT: v_readfirstlane_b32 s2, v0 +; CI-NEXT: s_lshl_b32 s3, s2, 16 ; CI-NEXT: s_and_b32 s2, s2, 0xffff0000 -; CI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 -; CI-NEXT: s_bitset0_b32 s2, 31 -; CI-NEXT: v_and_b32_e32 v0, 0x7fffffff, v1 -; CI-NEXT: s_and_b32 s2, s2, 0xffff0000 -; CI-NEXT: s_xor_b32 s2, s2, 0x80000000 -; CI-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 -; CI-NEXT: s_lshr_b32 s2, s2, 16 -; CI-NEXT: v_xor_b32_e32 v0, 0x80000000, v0 -; CI-NEXT: v_alignbit_b32 v2, s2, v0, 16 +; CI-NEXT: v_add_f32_e64 v1, s2, 2.0 +; CI-NEXT: v_add_f32_e64 v0, s3, 1.0 +; CI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; CI-NEXT: v_alignbit_b32 v0, v1, v0, 16 +; CI-NEXT: v_or_b32_e32 v2, 0x80008000, v0 ; CI-NEXT: v_mov_b32_e32 v0, s0 ; CI-NEXT: v_mov_b32_e32 v1, s1 ; CI-NEXT: flat_store_dword v[0:1], v2 @@ -566,15 +531,10 @@ define amdgpu_kernel void @s_fneg_fabs_v2bf16_bc_src(ptr addrspace(1) %out, <2 x ; CI-NEXT: s_mov_b32 flat_scratch_lo, s13 ; CI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8 ; CI-NEXT: s_waitcnt lgkmcnt(0) -; CI-NEXT: s_and_b32 s3, s2, 0x7fff -; CI-NEXT: s_and_b32 s2, s2, 0x7fff0000 -; CI-NEXT: v_mul_f32_e64 v0, -1.0, s2 -; CI-NEXT: s_lshl_b32 s2, s3, 16 -; CI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 -; CI-NEXT: v_mul_f32_e64 v1, -1.0, s2 -; CI-NEXT: v_alignbit_b32 v2, v0, v1, 16 +; CI-NEXT: s_or_b32 s2, s2, 0x80008000 ; CI-NEXT: v_mov_b32_e32 v0, s0 ; CI-NEXT: v_mov_b32_e32 v1, s1 +; CI-NEXT: v_mov_b32_e32 v2, s2 ; CI-NEXT: flat_store_dword v[0:1], v2 ; CI-NEXT: s_endpgm ; @@ -629,27 +589,11 @@ define amdgpu_kernel void @fneg_fabs_v4bf16(ptr addrspace(1) %out, <4 x bfloat> ; CI-NEXT: s_mov_b32 flat_scratch_lo, s13 ; CI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8 ; CI-NEXT: s_waitcnt lgkmcnt(0) -; CI-NEXT: s_lshl_b32 s4, s2, 16 -; CI-NEXT: s_and_b32 s2, s2, 0xffff0000 -; CI-NEXT: v_mul_f32_e64 v2, 1.0, |s2| -; CI-NEXT: s_and_b32 s2, s3, 0xffff0000 -; CI-NEXT: s_lshl_b32 s5, s3, 16 -; CI-NEXT: v_mul_f32_e64 v3, 1.0, |s2| -; CI-NEXT: v_mul_f32_e64 v0, 1.0, |s4| -; CI-NEXT: v_mul_f32_e64 v1, 1.0, |s5| -; CI-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 -; CI-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 -; CI-NEXT: v_xor_b32_e32 v3, 0x80000000, v3 -; CI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 -; CI-NEXT: v_xor_b32_e32 v2, 0x80000000, v2 -; CI-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 -; CI-NEXT: v_lshrrev_b32_e32 v3, 16, v3 -; CI-NEXT: v_xor_b32_e32 v1, 0x80000000, v1 -; CI-NEXT: v_lshrrev_b32_e32 v2, 16, v2 -; CI-NEXT: v_xor_b32_e32 v0, 0x80000000, v0 -; CI-NEXT: v_alignbit_b32 v1, v3, v1, 16 -; CI-NEXT: v_alignbit_b32 v0, v2, v0, 16 +; CI-NEXT: s_or_b32 s3, s3, 0x80008000 +; CI-NEXT: s_or_b32 s2, s2, 0x80008000 ; CI-NEXT: v_mov_b32_e32 v3, s1 +; CI-NEXT: v_mov_b32_e32 v0, s2 +; CI-NEXT: v_mov_b32_e32 v1, s3 ; CI-NEXT: v_mov_b32_e32 v2, s0 ; CI-NEXT: flat_store_dwordx2 v[2:3], v[0:1] ; CI-NEXT: s_endpgm @@ -860,21 +804,20 @@ define amdgpu_kernel void @s_fneg_multi_use_fabs_v2bf16(ptr addrspace(1) %out0, ; CI-NEXT: s_mov_b32 flat_scratch_lo, s13 ; CI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8 ; CI-NEXT: s_waitcnt lgkmcnt(0) -; CI-NEXT: v_mov_b32_e32 v1, s1 -; CI-NEXT: v_mov_b32_e32 v2, s2 -; CI-NEXT: s_and_b32 s1, s4, 0x7fff -; CI-NEXT: s_and_b32 s2, s4, 0x7fff0000 -; CI-NEXT: v_mul_f32_e64 v4, -1.0, s2 -; CI-NEXT: s_lshl_b32 s1, s1, 16 ; CI-NEXT: v_mov_b32_e32 v0, s0 ; CI-NEXT: s_and_b32 s0, s4, 0x7fff7fff -; CI-NEXT: v_lshrrev_b32_e32 v4, 16, v4 -; CI-NEXT: v_mul_f32_e64 v5, -1.0, s1 -; CI-NEXT: v_alignbit_b32 v4, v4, v5, 16 -; CI-NEXT: v_mov_b32_e32 v5, s0 +; CI-NEXT: v_mov_b32_e32 v2, s2 +; CI-NEXT: s_or_b32 s2, s0, 0x8000 +; CI-NEXT: v_mov_b32_e32 v1, s1 +; CI-NEXT: s_and_b32 s1, s4, 0x7fff0000 +; CI-NEXT: s_and_b32 s2, s2, 0xffff +; CI-NEXT: s_or_b32 s1, s1, s2 +; CI-NEXT: s_bitset1_b32 s1, 31 +; CI-NEXT: v_mov_b32_e32 v4, s0 ; CI-NEXT: v_mov_b32_e32 v3, s3 -; CI-NEXT: flat_store_dword v[0:1], v5 -; CI-NEXT: flat_store_dword v[2:3], v4 +; CI-NEXT: flat_store_dword v[0:1], v4 +; CI-NEXT: v_mov_b32_e32 v0, s1 +; CI-NEXT: flat_store_dword v[2:3], v0 ; CI-NEXT: s_endpgm ; ; VI-LABEL: s_fneg_multi_use_fabs_v2bf16: @@ -1086,5 +1029,3 @@ declare <4 x bfloat> @llvm.fabs.v4bf16(<4 x bfloat>) #1 attributes #0 = { nounwind } attributes #1 = { nounwind readnone } -;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: -; CIVI: {{.*}} diff --git a/llvm/test/CodeGen/AMDGPU/fneg.bf16.ll b/llvm/test/CodeGen/AMDGPU/fneg.bf16.ll index d232693..98044a7 100644 --- a/llvm/test/CodeGen/AMDGPU/fneg.bf16.ll +++ b/llvm/test/CodeGen/AMDGPU/fneg.bf16.ll @@ -14,11 +14,10 @@ define amdgpu_kernel void @s_fneg_bf16(ptr addrspace(1) %out, bfloat %in) #0 { ; CI-NEXT: s_mov_b32 flat_scratch_lo, s13 ; CI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8 ; CI-NEXT: s_waitcnt lgkmcnt(0) -; CI-NEXT: s_lshl_b32 s2, s2, 16 -; CI-NEXT: v_mul_f32_e64 v0, -1.0, s2 -; CI-NEXT: v_lshrrev_b32_e32 v2, 16, v0 +; CI-NEXT: s_xor_b32 s2, s2, 0x8000 ; CI-NEXT: v_mov_b32_e32 v0, s0 ; CI-NEXT: v_mov_b32_e32 v1, s1 +; CI-NEXT: v_mov_b32_e32 v2, s2 ; CI-NEXT: flat_store_short v[0:1], v2 ; CI-NEXT: s_endpgm ; @@ -93,9 +92,7 @@ define amdgpu_kernel void @v_fneg_bf16(ptr addrspace(1) %out, ptr addrspace(1) % ; CI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc ; CI-NEXT: flat_load_ushort v2, v[0:1] ; CI-NEXT: s_waitcnt vmcnt(0) -; CI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; CI-NEXT: v_mul_f32_e32 v2, -1.0, v2 -; CI-NEXT: v_lshrrev_b32_e32 v2, 16, v2 +; CI-NEXT: v_xor_b32_e32 v2, 0x8000, v2 ; CI-NEXT: flat_store_short v[0:1], v2 ; CI-NEXT: s_endpgm ; @@ -170,11 +167,10 @@ define amdgpu_kernel void @s_fneg_free_bf16(ptr addrspace(1) %out, i16 %in) #0 { ; CI-NEXT: s_mov_b32 flat_scratch_lo, s13 ; CI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8 ; CI-NEXT: s_waitcnt lgkmcnt(0) -; CI-NEXT: s_lshl_b32 s2, s2, 16 -; CI-NEXT: v_mul_f32_e64 v0, -1.0, s2 -; CI-NEXT: v_lshrrev_b32_e32 v2, 16, v0 +; CI-NEXT: s_xor_b32 s2, s2, 0x8000 ; CI-NEXT: v_mov_b32_e32 v0, s0 ; CI-NEXT: v_mov_b32_e32 v1, s1 +; CI-NEXT: v_mov_b32_e32 v2, s2 ; CI-NEXT: flat_store_short v[0:1], v2 ; CI-NEXT: s_endpgm ; @@ -248,9 +244,9 @@ define amdgpu_kernel void @v_fneg_fold_bf16(ptr addrspace(1) %out, ptr addrspace ; CI-NEXT: v_mov_b32_e32 v0, s0 ; CI-NEXT: v_mov_b32_e32 v1, s1 ; CI-NEXT: s_waitcnt vmcnt(0) +; CI-NEXT: v_xor_b32_e32 v3, 0x8000, v2 ; CI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; CI-NEXT: v_mul_f32_e32 v3, -1.0, v2 -; CI-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; CI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 ; CI-NEXT: v_mul_f32_e32 v2, v3, v2 ; CI-NEXT: v_lshrrev_b32_e32 v2, 16, v2 ; CI-NEXT: flat_store_short v[0:1], v2 @@ -365,13 +361,13 @@ define amdgpu_kernel void @s_fneg_v2bf16(ptr addrspace(1) %out, <2 x bfloat> %in ; CI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8 ; CI-NEXT: s_waitcnt lgkmcnt(0) ; CI-NEXT: s_and_b32 s3, s2, 0xffff0000 -; CI-NEXT: s_lshl_b32 s2, s2, 16 -; CI-NEXT: v_mul_f32_e64 v0, -1.0, s3 -; CI-NEXT: v_mul_f32_e64 v1, -1.0, s2 -; CI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 -; CI-NEXT: v_alignbit_b32 v2, v0, v1, 16 +; CI-NEXT: s_xor_b32 s2, s2, 0x8000 +; CI-NEXT: s_and_b32 s2, s2, 0xffff +; CI-NEXT: s_or_b32 s2, s2, s3 +; CI-NEXT: s_add_i32 s2, s2, 0x80000000 ; CI-NEXT: v_mov_b32_e32 v0, s0 ; CI-NEXT: v_mov_b32_e32 v1, s1 +; CI-NEXT: v_mov_b32_e32 v2, s2 ; CI-NEXT: flat_store_dword v[0:1], v2 ; CI-NEXT: s_endpgm ; @@ -426,16 +422,16 @@ define amdgpu_kernel void @s_fneg_v2bf16_nonload(ptr addrspace(1) %out) #0 { ; CI-NEXT: ; def s2 ; CI-NEXT: ;;#ASMEND ; CI-NEXT: s_and_b32 s3, s2, 0xffff0000 -; CI-NEXT: v_mul_f32_e64 v0, -1.0, s3 -; CI-NEXT: s_lshl_b32 s2, s2, 16 -; CI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 -; CI-NEXT: v_mul_f32_e64 v1, -1.0, s2 -; CI-NEXT: v_alignbit_b32 v2, v0, v1, 16 +; CI-NEXT: s_xor_b32 s2, s2, 0x8000 +; CI-NEXT: s_and_b32 s2, s2, 0xffff +; CI-NEXT: s_or_b32 s2, s2, s3 +; CI-NEXT: s_add_i32 s2, s2, 0x80000000 ; CI-NEXT: s_waitcnt lgkmcnt(0) ; CI-NEXT: v_mov_b32_e32 v0, s0 ; CI-NEXT: s_mov_b32 flat_scratch_lo, s13 ; CI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8 ; CI-NEXT: v_mov_b32_e32 v1, s1 +; CI-NEXT: v_mov_b32_e32 v2, s2 ; CI-NEXT: flat_store_dword v[0:1], v2 ; CI-NEXT: s_endpgm ; @@ -501,13 +497,11 @@ define amdgpu_kernel void @v_fneg_v2bf16(ptr addrspace(1) %out, ptr addrspace(1) ; CI-NEXT: v_add_i32_e32 v0, vcc, s0, v0 ; CI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc ; CI-NEXT: flat_load_dword v2, v[0:1] +; CI-NEXT: s_mov_b32 s0, 0xffff ; CI-NEXT: s_waitcnt vmcnt(0) -; CI-NEXT: v_and_b32_e32 v3, 0xffff0000, v2 -; CI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; CI-NEXT: v_mul_f32_e32 v3, -1.0, v3 -; CI-NEXT: v_lshrrev_b32_e32 v3, 16, v3 -; CI-NEXT: v_mul_f32_e32 v2, -1.0, v2 -; CI-NEXT: v_alignbit_b32 v2, v3, v2, 16 +; CI-NEXT: v_xor_b32_e32 v3, 0x8000, v2 +; CI-NEXT: v_bfi_b32 v2, s0, v3, v2 +; CI-NEXT: v_add_i32_e32 v2, vcc, 0x80000000, v2 ; CI-NEXT: flat_store_dword v[0:1], v2 ; CI-NEXT: s_endpgm ; @@ -570,13 +564,13 @@ define amdgpu_kernel void @fneg_free_v2bf16(ptr addrspace(1) %out, i32 %in) #0 { ; CI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8 ; CI-NEXT: s_waitcnt lgkmcnt(0) ; CI-NEXT: s_and_b32 s3, s2, 0xffff0000 -; CI-NEXT: s_lshl_b32 s2, s2, 16 -; CI-NEXT: v_mul_f32_e64 v0, -1.0, s3 -; CI-NEXT: v_mul_f32_e64 v1, -1.0, s2 -; CI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 -; CI-NEXT: v_alignbit_b32 v2, v0, v1, 16 +; CI-NEXT: s_xor_b32 s2, s2, 0x8000 +; CI-NEXT: s_and_b32 s2, s2, 0xffff +; CI-NEXT: s_or_b32 s2, s2, s3 +; CI-NEXT: s_add_i32 s2, s2, 0x80000000 ; CI-NEXT: v_mov_b32_e32 v0, s0 ; CI-NEXT: v_mov_b32_e32 v1, s1 +; CI-NEXT: v_mov_b32_e32 v2, s2 ; CI-NEXT: flat_store_dword v[0:1], v2 ; CI-NEXT: s_endpgm ; @@ -637,16 +631,14 @@ define amdgpu_kernel void @v_fneg_fold_v2bf16(ptr addrspace(1) %out, ptr addrspa ; CI-NEXT: v_mov_b32_e32 v0, s0 ; CI-NEXT: v_mov_b32_e32 v1, s1 ; CI-NEXT: s_waitcnt vmcnt(0) -; CI-NEXT: v_and_b32_e32 v3, 0xffff0000, v2 -; CI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; CI-NEXT: v_mul_f32_e32 v4, -1.0, v3 -; CI-NEXT: v_mul_f32_e32 v5, -1.0, v2 -; CI-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 -; CI-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 -; CI-NEXT: v_mul_f32_e32 v3, v4, v3 -; CI-NEXT: v_mul_f32_e32 v2, v5, v2 -; CI-NEXT: v_lshrrev_b32_e32 v3, 16, v3 -; CI-NEXT: v_alignbit_b32 v2, v3, v2, 16 +; CI-NEXT: v_xor_b32_e32 v3, 0x8000, v2 +; CI-NEXT: v_lshlrev_b32_e32 v4, 16, v2 +; CI-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; CI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; CI-NEXT: v_mul_f32_e64 v2, -v2, v2 +; CI-NEXT: v_mul_f32_e32 v3, v3, v4 +; CI-NEXT: v_lshrrev_b32_e32 v2, 16, v2 +; CI-NEXT: v_alignbit_b32 v2, v2, v3, 16 ; CI-NEXT: flat_store_dword v[0:1], v2 ; CI-NEXT: s_endpgm ; @@ -912,12 +904,9 @@ define amdgpu_kernel void @v_extract_fneg_no_fold_v2bf16(ptr addrspace(1) %in) # ; CI-NEXT: v_mov_b32_e32 v1, s1 ; CI-NEXT: flat_load_dword v0, v[0:1] ; CI-NEXT: s_waitcnt vmcnt(0) -; CI-NEXT: v_and_b32_e32 v1, 0xffff0000, v0 -; CI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; CI-NEXT: v_mul_f32_e32 v1, -1.0, v1 -; CI-NEXT: v_mul_f32_e32 v0, -1.0, v0 -; CI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; CI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 +; CI-NEXT: v_lshrrev_b32_e32 v1, 16, v0 +; CI-NEXT: v_xor_b32_e32 v0, 0x8000, v0 +; CI-NEXT: v_xor_b32_e32 v1, 0x8000, v1 ; CI-NEXT: flat_store_short v[0:1], v0 ; CI-NEXT: s_waitcnt vmcnt(0) ; CI-NEXT: flat_store_short v[0:1], v1 diff --git a/llvm/test/CodeGen/ARM/fp16-promote.ll b/llvm/test/CodeGen/ARM/fp16-promote.ll index 800ee87..8230e47 100644 --- a/llvm/test/CodeGen/ARM/fp16-promote.ll +++ b/llvm/test/CodeGen/ARM/fp16-promote.ll @@ -1572,26 +1572,11 @@ define void @test_fma(ptr %p, ptr %q, ptr %r) #0 { } define void @test_fabs(ptr %p) { -; CHECK-FP16-LABEL: test_fabs: -; CHECK-FP16: ldrh r1, [r0] -; CHECK-FP16-NEXT: vmov s0, r1 -; CHECK-FP16-NEXT: vcvtb.f32.f16 s0, s0 -; CHECK-FP16-NEXT: vabs.f32 s0, s0 -; CHECK-FP16-NEXT: vcvtb.f16.f32 s0, s0 -; CHECK-FP16-NEXT: vmov r1, s0 -; CHECK-FP16-NEXT: strh r1, [r0] -; CHECK-FP16-NEXT: bx lr -; -; CHECK-LIBCALL-LABEL: test_fabs: -; CHECK-LIBCALL: .save {r4, lr} -; CHECK-LIBCALL-NEXT: push {r4, lr} -; CHECK-LIBCALL-NEXT: mov r4, r0 -; CHECK-LIBCALL-NEXT: ldrh r0, [r0] -; CHECK-LIBCALL-NEXT: bl __aeabi_h2f -; CHECK-LIBCALL-NEXT: bic r0, r0, #-2147483648 -; CHECK-LIBCALL-NEXT: bl __aeabi_f2h -; CHECK-LIBCALL-NEXT: strh r0, [r4] -; CHECK-LIBCALL-NEXT: pop {r4, pc} +; CHECK-ALL-LABEL: test_fabs: +; CHECK-ALL: ldrh r1, [r0] +; CHECK-ALL-NEXT: bfc r1, #15, #17 +; CHECK-ALL-NEXT: strh r1, [r0] +; CHECK-ALL-NEXT: bx lr %a = load half, ptr %p, align 2 %r = call half @llvm.fabs.f16(half %a) store half %r, ptr %p @@ -2454,26 +2439,11 @@ define half @test_sitofp_i32_fadd(i32 %a, half %b) #0 { } define void @test_fneg(ptr %p1, ptr %p2) #0 { -; CHECK-FP16-LABEL: test_fneg: -; CHECK-FP16: ldrh r0, [r0] -; CHECK-FP16-NEXT: vmov s0, r0 -; CHECK-FP16-NEXT: vcvtb.f32.f16 s0, s0 -; CHECK-FP16-NEXT: vneg.f32 s0, s0 -; CHECK-FP16-NEXT: vcvtb.f16.f32 s0, s0 -; CHECK-FP16-NEXT: vmov r0, s0 -; CHECK-FP16-NEXT: strh r0, [r1] -; CHECK-FP16-NEXT: bx lr -; -; CHECK-LIBCALL-LABEL: test_fneg: -; CHECK-LIBCALL: .save {r4, lr} -; CHECK-LIBCALL-NEXT: push {r4, lr} -; CHECK-LIBCALL-NEXT: ldrh r0, [r0] -; CHECK-LIBCALL-NEXT: mov r4, r1 -; CHECK-LIBCALL-NEXT: bl __aeabi_h2f -; CHECK-LIBCALL-NEXT: eor r0, r0, #-2147483648 -; CHECK-LIBCALL-NEXT: bl __aeabi_f2h -; CHECK-LIBCALL-NEXT: strh r0, [r4] -; CHECK-LIBCALL-NEXT: pop {r4, pc} +; CHECK-ALL-LABEL: test_fneg: +; CHECK-ALL: ldrh r0, [r0] +; CHECK-ALL-NEXT: eor r0, r0, #32768 +; CHECK-ALL-NEXT: strh r0, [r1] +; CHECK-ALL-NEXT: bx lr %v = load half, ptr %p1, align 2 %res = fneg half %v store half %res, ptr %p2, align 2 diff --git a/llvm/test/CodeGen/Generic/bfloat-op.ll b/llvm/test/CodeGen/Generic/bfloat-op.ll new file mode 100644 index 0000000..d593328 --- /dev/null +++ b/llvm/test/CodeGen/Generic/bfloat-op.ll @@ -0,0 +1,104 @@ +; Same as `bfloat.ll`, but for `fneg`, `fabs`, `copysign` and `fma`. +; Can be merged back into `bfloat.ll` once they have the same platform coverage. +; Once all targets are fixed, the `CHECK-*` prefixes should all be merged into a single `CHECK` prefix and the `BAD-*` prefixes should be removed. + +; RUN: %if aarch64-registered-target %{ llc %s -o - -mtriple=aarch64-apple-darwin | FileCheck %s --check-prefixes=ALL,CHECK-COPYSIGN,CHECK-FMA %} +; RUN: %if aarch64-registered-target %{ llc %s -o - -mtriple=aarch64-pc-windows-msvc | FileCheck %s --check-prefixes=ALL,CHECK-COPYSIGN,CHECK-FMA %} +; RUN: %if aarch64-registered-target %{ llc %s -o - -mtriple=aarch64-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK-COPYSIGN,CHECK-FMA %} +; RUN: %if aarch64-registered-target %{ llc %s -o - -mtriple=arm64ec-pc-windows-msvc | FileCheck %s --check-prefixes=ALL,CHECK-COPYSIGN,CHECK-FMA %} +; RUN: %if amdgpu-registered-target %{ llc %s -o - -mtriple=amdgcn-amd-amdhsa | FileCheck %s --check-prefixes=ALL,CHECK-COPYSIGN,CHECK-FMA %} +; RUN: %if arc-registered-target %{ llc %s -o - -mtriple=arc-elf | FileCheck %s --check-prefixes=ALL,CHECK-COPYSIGN,BAD-FMA %} +; RUN: %if arm-registered-target %{ llc %s -o - -mtriple=arm-unknown-linux-gnueabi | FileCheck %s --check-prefixes=ALL,CHECK-COPYSIGN,BAD-FMA %} +; RUN: %if arm-registered-target %{ llc %s -o - -mtriple=thumbv7em-none-eabi | FileCheck %s --check-prefixes=ALL,CHECK-COPYSIGN,BAD-FMA %} +; RUN: %if avr-registered-target %{ llc %s -o - -mtriple=avr-none | FileCheck %s --check-prefixes=ALL,CHECK-COPYSIGN,BAD-FMA %} +; FIXME: BPF has a compiler error +; RUN: %if csky-registered-target %{ llc %s -o - -mtriple=csky-unknown-linux-gnuabiv2 | FileCheck %s --check-prefixes=ALL,CHECK-COPYSIGN,BAD-FMA %} +; FIXME: hard float csky crashes +; FIXME: directx has a compiler error +; FIXME: hexagon crashes +; RUN: %if lanai-registered-target %{ llc %s -o - -mtriple=lanai-unknown-unknown | FileCheck %s --check-prefixes=ALL,CHECK-COPYSIGN,BAD-FMA %} +; RUN: %if loongarch-registered-target %{ llc %s -o - -mtriple=loongarch32-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK-COPYSIGN,BAD-FMA %} +; RUN: %if loongarch-registered-target %{ llc %s -o - -mtriple=loongarch64-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK-COPYSIGN,BAD-FMA %} +; RUN: %if loongarch-registered-target %{ llc %s -o - -mtriple=loongarch64-unknown-linux-gnu -mattr=+f | FileCheck %s --check-prefixes=ALL,CHECK-COPYSIGN,BAD-FMA %} +; RUN: %if m68k-registered-target %{ llc %s -o - -mtriple=m68k-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK-COPYSIGN,BAD-FMA %} +; FIXME: mips crashes +; RUN: %if msp430-registered-target %{ llc %s -o - -mtriple=msp430-none-elf | FileCheck %s --check-prefixes=ALL,CHECK-COPYSIGN,BAD-FMA %} +; RUN: %if nvptx-registered-target %{ llc %s -o - -mtriple=nvptx64-nvidia-cuda | FileCheck %s --check-prefixes=NOCRASH %} +; FIXME: powerpc crashes +; RUN: %if riscv-registered-target %{ llc %s -o - -mtriple=riscv32-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK-COPYSIGN,BAD-FMA %} +; RUN: %if riscv-registered-target %{ llc %s -o - -mtriple=riscv64-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK-COPYSIGN,BAD-FMA %} +; FIXME: sparc crashes +; FIXME: spirv crashes +; FIXME: s390x crashes +; FIXME: ve crashes +; FIXME: wasm crashes +; RUN: %if x86-registered-target %{ llc %s -o - -mtriple=i686-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK-COPYSIGN,BAD-FMA %} +; RUN: %if x86-registered-target %{ llc %s -o - -mtriple=x86_64-pc-windows-msvc | FileCheck %s --check-prefixes=ALL,CHECK-COPYSIGN,BAD-FMA %} +; RUN: %if x86-registered-target %{ llc %s -o - -mtriple=x86_64-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK-COPYSIGN,BAD-FMA %} +; RUN: %if xcore-registered-target %{ llc %s -o - -mtriple=xcore-unknown-unknown | FileCheck %s --check-prefixes=ALL,CHECK-COPYSIGN,BAD-FMA %} +; RUN: %if xtensa-registered-target %{ llc %s -o - -mtriple=xtensa-none-elf | FileCheck %s --check-prefixes=ALL,BAD-COPYSIGN,CHECK-FMA %} + +; Note that arm64ec labels are quoted, hence the `{{"?}}:`. + +; Codegen tests don't work the same for graphics targets. Add a dummy directive +; for filecheck, just make sure we don't crash. +; NOCRASH: {{.*}} + +; fneg, fabs and copysign all need to not quieten signalling NaNs, so should not call any conversion functions which do. +; These tests won't catch cases where the everything is done using native instructions instead of builtins. + +define void @test_fneg(ptr %p1, ptr %p2) #0 { +; ALL-LABEL: test_fneg{{"?}}: +; ALL-NEG-NOT: __extend +; ALL-NEG-NOT: __trunc +; ALL-NEG-NOT: __gnu +; ALL-NEG-NOT: __aeabi + %v = load bfloat, ptr %p1 + %res = fneg bfloat %v + store bfloat %res, ptr %p2 + ret void +} + +define void @test_fabs(ptr %p1, ptr %p2) { +; ALL-LABEL: test_fabs{{"?}}: +; ALL-ABS-NOT: __extend +; ALL-ABS-NOT: __trunc +; ALL-ABS-NOT: __gnu +; ALL-ABS-NOT: __aeabi + %a = load bfloat, ptr %p1 + %r = call bfloat @llvm.fabs.f16(bfloat %a) + store bfloat %r, ptr %p2 + ret void +} + +define void @test_copysign(ptr %p1, ptr %p2, ptr %p3) { +; ALL-LABEL: test_copysign{{"?}}: +; CHECK-COPYSIGN-NOT: __extend +; CHECK-COPYSIGN-NOT: __trunc +; CHECK-COPYSIGN-NOT: __gnu +; CHECK-COPYSIGN-NOT: __aeabi +; BAD-COPYSIGN: __truncsfbf2 + %a = load bfloat, ptr %p1 + %b = load bfloat, ptr %p2 + %r = call bfloat @llvm.copysign.f16(bfloat %a, bfloat %b) + store bfloat %r, ptr %p3 + ret void +} + +; There is no floating-point type LLVM supports that is large enough to promote bfloat FMA to +; without causing double rounding issues. This checks for libcalls to f32/f64 fma and truncating +; f32/f64 to bf16. See https://github.com/llvm/llvm-project/issues/131531 + +define void @test_fma(ptr %p1, ptr %p2, ptr %p3, ptr %p4) { +; ALL-LABEL: test_fma{{"?}}: +; CHECK-FMA-NOT: {{\bfmaf?\b}} +; CHECK-FMA-NOT: __truncsfbf2 +; CHECK-FMA-NOT: __truncdfbf2 +; BAD-FMA: {{__truncsfbf2|\bfmaf?\b}} + %a = load bfloat, ptr %p1 + %b = load bfloat, ptr %p2 + %c = load bfloat, ptr %p3 + %r = call bfloat @llvm.fma.f16(bfloat %a, bfloat %b, bfloat %c) + store bfloat %r, ptr %p4 + ret void +} diff --git a/llvm/test/CodeGen/Generic/bfloat.ll b/llvm/test/CodeGen/Generic/bfloat.ll new file mode 100644 index 0000000..83c6711 --- /dev/null +++ b/llvm/test/CodeGen/Generic/bfloat.ll @@ -0,0 +1,75 @@ +; Simple cross-platform smoke checks for basic bf16 operations. +; +; There shouldn't be any architectures that crash when trying to use `bfloat`; +; check that here. Additionally do a small handful of smoke tests that work +; well cross-platform. + +; RUN: %if aarch64-registered-target %{ llc %s -o - -mtriple=aarch64-apple-darwin | FileCheck %s --check-prefixes=ALL,CHECK %} +; RUN: %if aarch64-registered-target %{ llc %s -o - -mtriple=aarch64-pc-windows-msvc | FileCheck %s --check-prefixes=ALL,CHECK %} +; RUN: %if aarch64-registered-target %{ llc %s -o - -mtriple=aarch64-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK %} +; FIXME: arm64ec crashes when passing/returning bfloat +; RUN: %if amdgpu-registered-target %{ llc %s -o - -mtriple=amdgcn-amd-amdhsa | FileCheck %s --check-prefixes=ALL,CHECK %} +; RUN: %if arc-registered-target %{ llc %s -o - -mtriple=arc-elf | FileCheck %s --check-prefixes=ALL,CHECK %} +; RUN: %if arm-registered-target %{ llc %s -o - -mtriple=arm-unknown-linux-gnueabi | FileCheck %s --check-prefixes=ALL,CHECK %} +; RUN: %if arm-registered-target %{ llc %s -o - -mtriple=thumbv7em-none-eabi | FileCheck %s --check-prefixes=ALL,CHECK %} +; RUN: %if avr-registered-target %{ llc %s -o - -mtriple=avr-none | FileCheck %s --check-prefixes=ALL,CHECK %} +; RUN: %if bpf-registered-target %{ llc %s -o - -mtriple=bpfel | FileCheck %s --check-prefixes=ALL,CHECK %} +; RUN: %if csky-registered-target %{ llc %s -o - -mtriple=csky-unknown-linux-gnuabiv2 | FileCheck %s --check-prefixes=ALL,CHECK %} +; FIXME: hard float csky crashes +; RUN: %if directx-registered-target %{ llc %s -o - -mtriple=dxil-pc-shadermodel6.3-library | FileCheck %s --check-prefixes=NOCRASH %} +; FIXME: hexagon crashes +; RUN: %if lanai-registered-target %{ llc %s -o - -mtriple=lanai-unknown-unknown | FileCheck %s --check-prefixes=ALL,CHECK %} +; RUN: %if loongarch-registered-target %{ llc %s -o - -mtriple=loongarch32-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK %} +; RUN: %if loongarch-registered-target %{ llc %s -o - -mtriple=loongarch64-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK %} +; RUN: %if loongarch-registered-target %{ llc %s -o - -mtriple=loongarch64-unknown-linux-gnu -mattr=+f | FileCheck %s --check-prefixes=ALL,CHECK %} +; RUN: %if m68k-registered-target %{ llc %s -o - -mtriple=m68k-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK %} +; FIXME: mips crashes +; RUN: %if msp430-registered-target %{ llc %s -o - -mtriple=msp430-none-elf | FileCheck %s --check-prefixes=ALL,CHECK %} +; RUN: %if nvptx-registered-target %{ llc %s -o - -mtriple=nvptx64-nvidia-cuda | FileCheck %s --check-prefixes=NOCRASH %} +; FIXME: powerpc crashes +; RUN: %if riscv-registered-target %{ llc %s -o - -mtriple=riscv32-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK %} +; RUN: %if riscv-registered-target %{ llc %s -o - -mtriple=riscv64-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK %} +; FIXME: sparc crashes +; FIXME: spirv crashes +; FIXME: s390x crashes +; FIXME: ve crashes +; FIXME: wasm crashes +; RUN: %if x86-registered-target %{ llc %s -o - -mtriple=i686-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,BAD %} +; RUN: %if x86-registered-target %{ llc %s -o - -mtriple=x86_64-pc-windows-msvc | FileCheck %s --check-prefixes=ALL,CHECK %} +; RUN: %if x86-registered-target %{ llc %s -o - -mtriple=x86_64-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK %} +; RUN: %if xcore-registered-target %{ llc %s -o - -mtriple=xcore-unknown-unknown | FileCheck %s --check-prefixes=ALL,CHECK %} +; RUN: %if xtensa-registered-target %{ llc %s -o - -mtriple=xtensa-none-elf | FileCheck %s --check-prefixes=ALL,CHECK %} + +; Note that arm64ec labels are quoted, hence the `{{"?}}:`. + +; Codegen tests don't work the same for graphics targets. Add a dummy directive +; for filecheck, just make sure we don't crash. +; NOCRASH: {{.*}} + +; All backends need to be able to bitcast without converting to another format, +; so we assert against libcalls (specifically __truncsfbf2). This won't catch hardware conversions. + +define bfloat @from_bits(i16 %bits) nounwind { +; ALL-LABEL: from_bits{{"?}}: +; ALL-NOT: __extend +; ALL-NOT: __trunc +; ALL-NOT: __gnu + %f = bitcast i16 %bits to bfloat + ret bfloat %f +} + +define i16 @to_bits(bfloat %f) nounwind { +; ALL-LABEL: to_bits{{"?}}: +; CHECK-NOT: __extend +; CHECK-NOT: __trunc +; CHECK-NOT: __gnu +; BAD: __truncsfbf2 + %bits = bitcast bfloat %f to i16 + ret i16 %bits +} + +define bfloat @check_freeze(bfloat %f) nounwind { +; ALL-LABEL: check_freeze{{"?}}: + %t0 = freeze bfloat %f + ret bfloat %t0 +} diff --git a/llvm/test/CodeGen/Generic/half-op.ll b/llvm/test/CodeGen/Generic/half-op.ll new file mode 100644 index 0000000..1037d8e --- /dev/null +++ b/llvm/test/CodeGen/Generic/half-op.ll @@ -0,0 +1,115 @@ +; Same as `half.ll`, but for `fneg`, `fabs`, `copysign` and `fma`. +; Can be merged back into `half.ll` once BPF doesn't have a compiler error. +; Once all targets are fixed, the `CHECK-*` prefixes should all be merged into a single `CHECK` prefix and the `BAD-*` prefixes should be removed. + +; RUN: %if aarch64-registered-target %{ llc %s -o - -mtriple=aarch64-apple-darwin | FileCheck %s --check-prefixes=ALL,CHECK-NEG-ABS,CHECK-COPYSIGN,CHECK-FMA %} +; RUN: %if aarch64-registered-target %{ llc %s -o - -mtriple=aarch64-pc-windows-msvc | FileCheck %s --check-prefixes=ALL,CHECK-NEG-ABS,CHECK-COPYSIGN,CHECK-FMA %} +; RUN: %if aarch64-registered-target %{ llc %s -o - -mtriple=aarch64-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK-NEG-ABS,CHECK-COPYSIGN,CHECK-FMA %} +; RUN: %if aarch64-registered-target %{ llc %s -o - -mtriple=arm64ec-pc-windows-msvc | FileCheck %s --check-prefixes=ALL,CHECK-NEG-ABS,CHECK-COPYSIGN,CHECK-FMA %} +; RUN: %if amdgpu-registered-target %{ llc %s -o - -mtriple=amdgcn-amd-amdhsa | FileCheck %s --check-prefixes=ALL,CHECK-NEG-ABS,CHECK-COPYSIGN,CHECK-FMA %} +; RUN: %if arc-registered-target %{ llc %s -o - -mtriple=arc-elf | FileCheck %s --check-prefixes=ALL,BAD-NEG-ABS,BAD-COPYSIGN,BAD-FMA %} +; RUN: %if arm-registered-target %{ llc %s -o - -mtriple=arm-unknown-linux-gnueabi | FileCheck %s --check-prefixes=ALL,CHECK-NEG-ABS,CHECK-COPYSIGN,BAD-FMA %} +; RUN: %if arm-registered-target %{ llc %s -o - -mtriple=thumbv7em-none-eabi | FileCheck %s --check-prefixes=ALL,CHECK-NEG-ABS,CHECK-COPYSIGN,BAD-FMA %} +; RUN: %if avr-registered-target %{ llc %s -o - -mtriple=avr-none | FileCheck %s --check-prefixes=ALL,CHECK-NEG-ABS,CHECK-COPYSIGN,BAD-FMA %} +; FIXME: BPF has a compiler error +; RUN: %if csky-registered-target %{ llc %s -o - -mtriple=csky-unknown-linux-gnuabiv2 | FileCheck %s --check-prefixes=ALL,BAD-NEG-ABS,BAD-COPYSIGN,BAD-FMA %} +; RUN: %if csky-registered-target %{ llc %s -o - -mtriple=csky-unknown-linux-gnuabiv2 -mcpu=ck860fv -mattr=+hard-float | FileCheck %s --check-prefixes=ALL,BAD-NEG-ABS,BAD-COPYSIGN,BAD-FMA %} +; FIXME: directx has a compiler error +; RUN: %if hexagon-registered-target %{ llc %s -o - -mtriple=hexagon-unknown-linux-musl | FileCheck %s --check-prefixes=ALL,CHECK-NEG-ABS,CHECK-COPYSIGN,BAD-FMA %} +; RUN: %if lanai-registered-target %{ llc %s -o - -mtriple=lanai-unknown-unknown | FileCheck %s --check-prefixes=ALL,BAD-NEG-ABS,BAD-COPYSIGN,BAD-FMA %} +; RUN: %if loongarch-registered-target %{ llc %s -o - -mtriple=loongarch32-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK-NEG-ABS,CHECK-COPYSIGN,BAD-FMA %} +; RUN: %if loongarch-registered-target %{ llc %s -o - -mtriple=loongarch64-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK-NEG-ABS,CHECK-COPYSIGN,BAD-FMA %} +; RUN: %if loongarch-registered-target %{ llc %s -o - -mtriple=loongarch64-unknown-linux-gnu -mattr=+f | FileCheck %s --check-prefixes=ALL,CHECK-NEG-ABS,CHECK-COPYSIGN,BAD-FMA %} +; RUN: %if m68k-registered-target %{ llc %s -o - -mtriple=m68k-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,BAD-NEG-ABS,BAD-COPYSIGN,BAD-FMA %} +; RUN: %if mips-registered-target %{ llc %s -o - -mtriple=mips-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK-NEG-ABS,CHECK-COPYSIGN,BAD-FMA %} +; RUN: %if mips-registered-target %{ llc %s -o - -mtriple=mips64-unknown-linux-gnuabi64 | FileCheck %s --check-prefixes=ALL,CHECK-NEG-ABS,CHECK-COPYSIGN,BAD-FMA %} +; RUN: %if mips-registered-target %{ llc %s -o - -mtriple=mips64el-unknown-linux-gnuabi64 | FileCheck %s --check-prefixes=ALL,CHECK-NEG-ABS,CHECK-COPYSIGN,BAD-FMA %} +; RUN: %if mips-registered-target %{ llc %s -o - -mtriple=mipsel-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK-NEG-ABS,CHECK-COPYSIGN,BAD-FMA %} +; RUN: %if msp430-registered-target %{ llc %s -o - -mtriple=msp430-none-elf | FileCheck %s --check-prefixes=ALL,BAD-NEG-ABS,BAD-COPYSIGN,BAD-FMA %} +; RUN: %if nvptx-registered-target %{ llc %s -o - -mtriple=nvptx64-nvidia-cuda | FileCheck %s --check-prefixes=NOCRASH %} +; RUN: %if powerpc-registered-target %{ llc %s -o - -mtriple=powerpc-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,BAD-NEG-ABS,BAD-COPYSIGN,BAD-FMA %} +; RUN: %if powerpc-registered-target %{ llc %s -o - -mtriple=powerpc64-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,BAD-NEG-ABS,BAD-COPYSIGN,BAD-FMA %} +; RUN: %if powerpc-registered-target %{ llc %s -o - -mtriple=powerpc64le-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,BAD-NEG-ABS,BAD-COPYSIGN,BAD-FMA %} +; RUN: %if riscv-registered-target %{ llc %s -o - -mtriple=riscv32-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK-NEG-ABS,CHECK-COPYSIGN,BAD-FMA %} +; RUN: %if riscv-registered-target %{ llc %s -o - -mtriple=riscv64-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK-NEG-ABS,CHECK-COPYSIGN,BAD-FMA %} +; RUN: %if sparc-registered-target %{ llc %s -o - -mtriple=sparc-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK-NEG-ABS,CHECK-COPYSIGN,BAD-FMA %} +; RUN: %if sparc-registered-target %{ llc %s -o - -mtriple=sparc64-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK-NEG-ABS,CHECK-COPYSIGN,BAD-FMA %} +; RUN: %if spirv-registered-target %{ llc %s -o - -mtriple=spirv-unknown-unknown | FileCheck %s --check-prefixes=NOCRASH %} +; RUN: %if systemz-registered-target %{ llc %s -o - -mtriple=s390x-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK-NEG-ABS,CHECK-COPYSIGN,BAD-FMA %} +; RUN: %if ve-registered-target %{ llc %s -o - -mtriple=ve-unknown-unknown | FileCheck %s --check-prefixes=ALL,BAD-NEG-ABS,BAD-COPYSIGN,BAD-FMA %} +; RUN: %if webassembly-registered-target %{ llc %s -o - -mtriple=wasm32-unknown-unknown | FileCheck %s --check-prefixes=ALL,BAD-NEG-ABS,BAD-COPYSIGN,BAD-FMA %} +; RUN: %if x86-registered-target %{ llc %s -o - -mtriple=i686-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK-NEG-ABS,CHECK-COPYSIGN,BAD-FMA %} +; RUN: %if x86-registered-target %{ llc %s -o - -mtriple=x86_64-pc-windows-msvc | FileCheck %s --check-prefixes=ALL,CHECK-NEG-ABS,CHECK-COPYSIGN,BAD-FMA %} +; RUN: %if x86-registered-target %{ llc %s -o - -mtriple=x86_64-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK-NEG-ABS,CHECK-COPYSIGN,BAD-FMA %} +; RUN: %if xcore-registered-target %{ llc %s -o - -mtriple=xcore-unknown-unknown | FileCheck %s --check-prefixes=ALL,BAD-NEG-ABS,BAD-COPYSIGN,BAD-FMA %} +; RUN: %if xtensa-registered-target %{ llc %s -o - -mtriple=xtensa-none-elf | FileCheck %s --check-prefixes=ALL,BAD-NEG-ABS,BAD-COPYSIGN,CHECK-FMA %} + +; Note that arm64ec labels are quoted, hence the `{{"?}}:`. + +; Codegen tests don't work the same for graphics targets. Add a dummy directive +; for filecheck, just make sure we don't crash. +; NOCRASH: {{.*}} + +; fneg, fabs and copysign all need to not quieten signalling NaNs, so should not call any conversion functions which do. +; These tests won't catch cases where the everything is done using native instructions instead of builtins. +; See https://github.com/llvm/llvm-project/issues/104915 + +define void @test_fneg(ptr %p1, ptr %p2) #0 { +; ALL-LABEL: test_fneg{{"?}}: +; CHECK-NEG-ABS-NOT: __extend +; CHECK-NEG-ABS-NOT: __trunc +; CHECK-NEG-ABS-NOT: __gnu +; CHECK-NEG-ABS-NOT: __aeabi +; BAD-NEG-ABS: {{__extendhfsf2|__gnu_h2f_ieee|__aeabi_h2f}} + %v = load half, ptr %p1 + %res = fneg half %v + store half %res, ptr %p2 + ret void +} + +define void @test_fabs(ptr %p1, ptr %p2) { +; ALL-LABEL: test_fabs{{"?}}: +; CHECK-NEG-ABS-NOT: __extend +; CHECK-NEG-ABS-NOT: __trunc +; CHECK-NEG-ABS-NOT: __gnu +; CHECK-NEG-ABS-NOT: __aeabi +; BAD-NEG-ABS: {{__extendhfsf2|__gnu_h2f_ieee|__aeabi_h2f}} + %a = load half, ptr %p1 + %r = call half @llvm.fabs.f16(half %a) + store half %r, ptr %p2 + ret void +} + +define void @test_copysign(ptr %p1, ptr %p2, ptr %p3) { +; ALL-LABEL: test_copysign{{"?}}: +; CHECK-COPYSIGN-NOT: __extend +; CHECK-COPYSIGN-NOT: __trunc +; CHECK-COPYSIGN-NOT: __gnu +; CHECK-COPYSIGN-NOT: __aeabi +; BAD-COPYSIGN: {{__extendhfsf2|__gnu_h2f_ieee}} + %a = load half, ptr %p1 + %b = load half, ptr %p2 + %r = call half @llvm.copysign.f16(half %a, half %b) + store half %r, ptr %p3 + ret void +} + +; If promoting, fma must promote at least to f64 to avoid double rounding issues. +; This checks for calls to f32 fmaf and truncating f32 to f16. +; See https://github.com/llvm/llvm-project/issues/98389 + +define void @test_fma(ptr %p1, ptr %p2, ptr %p3, ptr %p4) { +; ALL-LABEL: test_fma{{"?}}: +; Allow fmaf16 +; CHECK-FMA-NOT: fmaf{{\b}} +; CHECK-FMA-NOT: __truncsfhf2 +; CHECK-FMA-NOT: __gnu_f2h_ieee +; CHECK-FMA-NOT: __aeabi_f2h +; BAD-FMA: {{__truncsfhf2|__gnu_f2h_ieee|__aeabi_f2h|fmaf\b}} + %a = load half, ptr %p1 + %b = load half, ptr %p2 + %c = load half, ptr %p3 + %r = call half @llvm.fma.f16(half %a, half %b, half %c) + store half %r, ptr %p4 + ret void +} diff --git a/llvm/test/CodeGen/RISCV/half-arith.ll b/llvm/test/CodeGen/RISCV/half-arith.ll index 2ebb6e9..d089e36 100644 --- a/llvm/test/CodeGen/RISCV/half-arith.ll +++ b/llvm/test/CodeGen/RISCV/half-arith.ll @@ -514,6 +514,7 @@ define i32 @fneg_h(half %a, half %b) nounwind { ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill ; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s2, 0(sp) # 4-byte Folded Spill ; RV32I-NEXT: lui a1, 16 ; RV32I-NEXT: addi s1, a1, -1 ; RV32I-NEXT: and a0, a0, s1 @@ -521,13 +522,12 @@ define i32 @fneg_h(half %a, half %b) nounwind { ; RV32I-NEXT: mv a1, a0 ; RV32I-NEXT: call __addsf3 ; RV32I-NEXT: call __truncsfhf2 +; RV32I-NEXT: lui a1, 8 +; RV32I-NEXT: xor s2, a0, a1 ; RV32I-NEXT: and a0, a0, s1 ; RV32I-NEXT: call __extendhfsf2 ; RV32I-NEXT: mv s0, a0 -; RV32I-NEXT: lui a0, 524288 -; RV32I-NEXT: xor a0, s0, a0 -; RV32I-NEXT: call __truncsfhf2 -; RV32I-NEXT: and a0, a0, s1 +; RV32I-NEXT: and a0, s2, s1 ; RV32I-NEXT: call __extendhfsf2 ; RV32I-NEXT: mv a1, a0 ; RV32I-NEXT: mv a0, s0 @@ -536,6 +536,7 @@ define i32 @fneg_h(half %a, half %b) nounwind { ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s2, 0(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret ; @@ -545,6 +546,7 @@ define i32 @fneg_h(half %a, half %b) nounwind { ; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill ; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill ; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill ; RV64I-NEXT: lui a1, 16 ; RV64I-NEXT: addi s1, a1, -1 ; RV64I-NEXT: and a0, a0, s1 @@ -552,13 +554,12 @@ define i32 @fneg_h(half %a, half %b) nounwind { ; RV64I-NEXT: mv a1, a0 ; RV64I-NEXT: call __addsf3 ; RV64I-NEXT: call __truncsfhf2 +; RV64I-NEXT: lui a1, 8 +; RV64I-NEXT: xor s2, a0, a1 ; RV64I-NEXT: and a0, a0, s1 ; RV64I-NEXT: call __extendhfsf2 ; RV64I-NEXT: mv s0, a0 -; RV64I-NEXT: lui a0, 524288 -; RV64I-NEXT: xor a0, s0, a0 -; RV64I-NEXT: call __truncsfhf2 -; RV64I-NEXT: and a0, a0, s1 +; RV64I-NEXT: and a0, s2, s1 ; RV64I-NEXT: call __extendhfsf2 ; RV64I-NEXT: mv a1, a0 ; RV64I-NEXT: mv a0, s0 @@ -567,6 +568,7 @@ define i32 @fneg_h(half %a, half %b) nounwind { ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld s2, 0(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 32 ; RV64I-NEXT: ret ; @@ -638,11 +640,7 @@ define half @fsgnjn_h(half %a, half %b) nounwind { ; RV32I-NEXT: mv a0, s2 ; RV32I-NEXT: call __addsf3 ; RV32I-NEXT: call __truncsfhf2 -; RV32I-NEXT: and a0, a0, s3 -; RV32I-NEXT: call __extendhfsf2 -; RV32I-NEXT: lui a1, 524288 -; RV32I-NEXT: xor a0, a0, a1 -; RV32I-NEXT: call __truncsfhf2 +; RV32I-NEXT: not a0, a0 ; RV32I-NEXT: lui a1, 1048568 ; RV32I-NEXT: slli s1, s1, 17 ; RV32I-NEXT: and a0, a0, a1 @@ -677,11 +675,7 @@ define half @fsgnjn_h(half %a, half %b) nounwind { ; RV64I-NEXT: mv a0, s2 ; RV64I-NEXT: call __addsf3 ; RV64I-NEXT: call __truncsfhf2 -; RV64I-NEXT: and a0, a0, s3 -; RV64I-NEXT: call __extendhfsf2 -; RV64I-NEXT: lui a1, 524288 -; RV64I-NEXT: xor a0, a0, a1 -; RV64I-NEXT: call __truncsfhf2 +; RV64I-NEXT: not a0, a0 ; RV64I-NEXT: lui a1, 1048568 ; RV64I-NEXT: slli s1, s1, 49 ; RV64I-NEXT: and a0, a0, a1 @@ -804,15 +798,14 @@ define half @fabs_h(half %a, half %b) nounwind { ; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: call __addsf3 ; RV32I-NEXT: call __truncsfhf2 +; RV32I-NEXT: slli s0, a0, 17 +; RV32I-NEXT: srli s0, s0, 17 ; RV32I-NEXT: and a0, a0, s2 ; RV32I-NEXT: call __extendhfsf2 -; RV32I-NEXT: mv s0, a0 -; RV32I-NEXT: slli a0, a0, 1 -; RV32I-NEXT: srli a0, a0, 1 -; RV32I-NEXT: call __truncsfhf2 -; RV32I-NEXT: and a0, a0, s2 +; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __extendhfsf2 -; RV32I-NEXT: mv a1, s0 +; RV32I-NEXT: mv a1, s1 ; RV32I-NEXT: call __addsf3 ; RV32I-NEXT: call __truncsfhf2 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload @@ -841,15 +834,14 @@ define half @fabs_h(half %a, half %b) nounwind { ; RV64I-NEXT: mv a0, s1 ; RV64I-NEXT: call __addsf3 ; RV64I-NEXT: call __truncsfhf2 +; RV64I-NEXT: slli s0, a0, 49 +; RV64I-NEXT: srli s0, s0, 49 ; RV64I-NEXT: and a0, a0, s2 ; RV64I-NEXT: call __extendhfsf2 -; RV64I-NEXT: mv s0, a0 -; RV64I-NEXT: slli a0, a0, 33 -; RV64I-NEXT: srli a0, a0, 33 -; RV64I-NEXT: call __truncsfhf2 -; RV64I-NEXT: and a0, a0, s2 +; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __extendhfsf2 -; RV64I-NEXT: mv a1, s0 +; RV64I-NEXT: mv a1, s1 ; RV64I-NEXT: call __addsf3 ; RV64I-NEXT: call __truncsfhf2 ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload @@ -1217,25 +1209,21 @@ define half @fmsub_h(half %a, half %b, half %c) nounwind { ; RV32I-NEXT: mv s0, a1 ; RV32I-NEXT: mv s1, a0 ; RV32I-NEXT: lui a0, 16 -; RV32I-NEXT: addi s3, a0, -1 -; RV32I-NEXT: and a0, a2, s3 +; RV32I-NEXT: addi s2, a0, -1 +; RV32I-NEXT: and a0, a2, s2 ; RV32I-NEXT: call __extendhfsf2 ; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: call __addsf3 ; RV32I-NEXT: call __truncsfhf2 -; RV32I-NEXT: and a0, a0, s3 -; RV32I-NEXT: call __extendhfsf2 -; RV32I-NEXT: lui a1, 524288 -; RV32I-NEXT: xor a0, a0, a1 -; RV32I-NEXT: call __truncsfhf2 -; RV32I-NEXT: mv s2, a0 -; RV32I-NEXT: and a0, s1, s3 +; RV32I-NEXT: lui a1, 8 +; RV32I-NEXT: xor s3, a0, a1 +; RV32I-NEXT: and a0, s1, s2 ; RV32I-NEXT: call __extendhfsf2 ; RV32I-NEXT: mv s1, a0 -; RV32I-NEXT: and a0, s0, s3 +; RV32I-NEXT: and a0, s0, s2 ; RV32I-NEXT: call __extendhfsf2 ; RV32I-NEXT: mv s0, a0 -; RV32I-NEXT: and a0, s2, s3 +; RV32I-NEXT: and a0, s3, s2 ; RV32I-NEXT: call __extendhfsf2 ; RV32I-NEXT: mv a2, a0 ; RV32I-NEXT: mv a0, s1 @@ -1261,25 +1249,21 @@ define half @fmsub_h(half %a, half %b, half %c) nounwind { ; RV64I-NEXT: mv s0, a1 ; RV64I-NEXT: mv s1, a0 ; RV64I-NEXT: lui a0, 16 -; RV64I-NEXT: addi s3, a0, -1 -; RV64I-NEXT: and a0, a2, s3 +; RV64I-NEXT: addi s2, a0, -1 +; RV64I-NEXT: and a0, a2, s2 ; RV64I-NEXT: call __extendhfsf2 ; RV64I-NEXT: li a1, 0 ; RV64I-NEXT: call __addsf3 ; RV64I-NEXT: call __truncsfhf2 -; RV64I-NEXT: and a0, a0, s3 -; RV64I-NEXT: call __extendhfsf2 -; RV64I-NEXT: lui a1, 524288 -; RV64I-NEXT: xor a0, a0, a1 -; RV64I-NEXT: call __truncsfhf2 -; RV64I-NEXT: mv s2, a0 -; RV64I-NEXT: and a0, s1, s3 +; RV64I-NEXT: lui a1, 8 +; RV64I-NEXT: xor s3, a0, a1 +; RV64I-NEXT: and a0, s1, s2 ; RV64I-NEXT: call __extendhfsf2 ; RV64I-NEXT: mv s1, a0 -; RV64I-NEXT: and a0, s0, s3 +; RV64I-NEXT: and a0, s0, s2 ; RV64I-NEXT: call __extendhfsf2 ; RV64I-NEXT: mv s0, a0 -; RV64I-NEXT: and a0, s2, s3 +; RV64I-NEXT: and a0, s3, s2 ; RV64I-NEXT: call __extendhfsf2 ; RV64I-NEXT: mv a2, a0 ; RV64I-NEXT: mv a0, s1 @@ -1355,43 +1339,34 @@ define half @fnmadd_h(half %a, half %b, half %c) nounwind { ; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill ; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: sw s4, 8(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv s1, a2 -; RV32I-NEXT: mv s0, a1 -; RV32I-NEXT: lui s3, 16 -; RV32I-NEXT: addi s3, s3, -1 +; RV32I-NEXT: mv s0, a2 +; RV32I-NEXT: mv s1, a1 +; RV32I-NEXT: lui a1, 16 +; RV32I-NEXT: addi s3, a1, -1 ; RV32I-NEXT: and a0, a0, s3 ; RV32I-NEXT: call __extendhfsf2 ; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: call __addsf3 ; RV32I-NEXT: call __truncsfhf2 ; RV32I-NEXT: mv s2, a0 -; RV32I-NEXT: and a0, s1, s3 +; RV32I-NEXT: and a0, s0, s3 ; RV32I-NEXT: call __extendhfsf2 ; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: call __addsf3 ; RV32I-NEXT: call __truncsfhf2 -; RV32I-NEXT: mv s1, a0 -; RV32I-NEXT: and a0, s2, s3 -; RV32I-NEXT: call __extendhfsf2 -; RV32I-NEXT: lui s4, 524288 -; RV32I-NEXT: xor a0, a0, s4 -; RV32I-NEXT: call __truncsfhf2 -; RV32I-NEXT: mv s2, a0 +; RV32I-NEXT: lui a1, 8 +; RV32I-NEXT: xor s2, s2, a1 +; RV32I-NEXT: xor s4, a0, a1 ; RV32I-NEXT: and a0, s1, s3 ; RV32I-NEXT: call __extendhfsf2 -; RV32I-NEXT: xor a0, a0, s4 -; RV32I-NEXT: call __truncsfhf2 -; RV32I-NEXT: mv s1, a0 -; RV32I-NEXT: and a0, s0, s3 -; RV32I-NEXT: call __extendhfsf2 ; RV32I-NEXT: mv s0, a0 ; RV32I-NEXT: and a0, s2, s3 ; RV32I-NEXT: call __extendhfsf2 -; RV32I-NEXT: mv s2, a0 -; RV32I-NEXT: and a0, s1, s3 +; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: and a0, s4, s3 ; RV32I-NEXT: call __extendhfsf2 ; RV32I-NEXT: mv a2, a0 -; RV32I-NEXT: mv a0, s2 +; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: mv a1, s0 ; RV32I-NEXT: call fmaf ; RV32I-NEXT: call __truncsfhf2 @@ -1413,43 +1388,34 @@ define half @fnmadd_h(half %a, half %b, half %c) nounwind { ; RV64I-NEXT: sd s2, 16(sp) # 8-byte Folded Spill ; RV64I-NEXT: sd s3, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: sd s4, 0(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv s1, a2 -; RV64I-NEXT: mv s0, a1 -; RV64I-NEXT: lui s3, 16 -; RV64I-NEXT: addi s3, s3, -1 +; RV64I-NEXT: mv s0, a2 +; RV64I-NEXT: mv s1, a1 +; RV64I-NEXT: lui a1, 16 +; RV64I-NEXT: addi s3, a1, -1 ; RV64I-NEXT: and a0, a0, s3 ; RV64I-NEXT: call __extendhfsf2 ; RV64I-NEXT: li a1, 0 ; RV64I-NEXT: call __addsf3 ; RV64I-NEXT: call __truncsfhf2 ; RV64I-NEXT: mv s2, a0 -; RV64I-NEXT: and a0, s1, s3 +; RV64I-NEXT: and a0, s0, s3 ; RV64I-NEXT: call __extendhfsf2 ; RV64I-NEXT: li a1, 0 ; RV64I-NEXT: call __addsf3 ; RV64I-NEXT: call __truncsfhf2 -; RV64I-NEXT: mv s1, a0 -; RV64I-NEXT: and a0, s2, s3 -; RV64I-NEXT: call __extendhfsf2 -; RV64I-NEXT: lui s4, 524288 -; RV64I-NEXT: xor a0, a0, s4 -; RV64I-NEXT: call __truncsfhf2 -; RV64I-NEXT: mv s2, a0 +; RV64I-NEXT: lui a1, 8 +; RV64I-NEXT: xor s2, s2, a1 +; RV64I-NEXT: xor s4, a0, a1 ; RV64I-NEXT: and a0, s1, s3 ; RV64I-NEXT: call __extendhfsf2 -; RV64I-NEXT: xor a0, a0, s4 -; RV64I-NEXT: call __truncsfhf2 -; RV64I-NEXT: mv s1, a0 -; RV64I-NEXT: and a0, s0, s3 -; RV64I-NEXT: call __extendhfsf2 ; RV64I-NEXT: mv s0, a0 ; RV64I-NEXT: and a0, s2, s3 ; RV64I-NEXT: call __extendhfsf2 -; RV64I-NEXT: mv s2, a0 -; RV64I-NEXT: and a0, s1, s3 +; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: and a0, s4, s3 ; RV64I-NEXT: call __extendhfsf2 ; RV64I-NEXT: mv a2, a0 -; RV64I-NEXT: mv a0, s2 +; RV64I-NEXT: mv a0, s1 ; RV64I-NEXT: mv a1, s0 ; RV64I-NEXT: call fmaf ; RV64I-NEXT: call __truncsfhf2 @@ -1535,44 +1501,35 @@ define half @fnmadd_h_2(half %a, half %b, half %c) nounwind { ; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill ; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: sw s4, 8(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv s1, a2 -; RV32I-NEXT: mv s0, a0 -; RV32I-NEXT: lui s3, 16 -; RV32I-NEXT: addi s3, s3, -1 +; RV32I-NEXT: mv s0, a2 +; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: lui a0, 16 +; RV32I-NEXT: addi s3, a0, -1 ; RV32I-NEXT: and a0, a1, s3 ; RV32I-NEXT: call __extendhfsf2 ; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: call __addsf3 ; RV32I-NEXT: call __truncsfhf2 ; RV32I-NEXT: mv s2, a0 -; RV32I-NEXT: and a0, s1, s3 +; RV32I-NEXT: and a0, s0, s3 ; RV32I-NEXT: call __extendhfsf2 ; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: call __addsf3 ; RV32I-NEXT: call __truncsfhf2 -; RV32I-NEXT: mv s1, a0 -; RV32I-NEXT: and a0, s2, s3 -; RV32I-NEXT: call __extendhfsf2 -; RV32I-NEXT: lui s4, 524288 -; RV32I-NEXT: xor a0, a0, s4 -; RV32I-NEXT: call __truncsfhf2 -; RV32I-NEXT: mv s2, a0 +; RV32I-NEXT: lui a1, 8 +; RV32I-NEXT: xor s2, s2, a1 +; RV32I-NEXT: xor s4, a0, a1 ; RV32I-NEXT: and a0, s1, s3 ; RV32I-NEXT: call __extendhfsf2 -; RV32I-NEXT: xor a0, a0, s4 -; RV32I-NEXT: call __truncsfhf2 -; RV32I-NEXT: mv s1, a0 -; RV32I-NEXT: and a0, s0, s3 -; RV32I-NEXT: call __extendhfsf2 ; RV32I-NEXT: mv s0, a0 ; RV32I-NEXT: and a0, s2, s3 ; RV32I-NEXT: call __extendhfsf2 -; RV32I-NEXT: mv s2, a0 -; RV32I-NEXT: and a0, s1, s3 +; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: and a0, s4, s3 ; RV32I-NEXT: call __extendhfsf2 ; RV32I-NEXT: mv a2, a0 ; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: mv a1, s2 +; RV32I-NEXT: mv a1, s1 ; RV32I-NEXT: call fmaf ; RV32I-NEXT: call __truncsfhf2 ; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload @@ -1593,44 +1550,35 @@ define half @fnmadd_h_2(half %a, half %b, half %c) nounwind { ; RV64I-NEXT: sd s2, 16(sp) # 8-byte Folded Spill ; RV64I-NEXT: sd s3, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: sd s4, 0(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv s1, a2 -; RV64I-NEXT: mv s0, a0 -; RV64I-NEXT: lui s3, 16 -; RV64I-NEXT: addi s3, s3, -1 +; RV64I-NEXT: mv s0, a2 +; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: lui a0, 16 +; RV64I-NEXT: addi s3, a0, -1 ; RV64I-NEXT: and a0, a1, s3 ; RV64I-NEXT: call __extendhfsf2 ; RV64I-NEXT: li a1, 0 ; RV64I-NEXT: call __addsf3 ; RV64I-NEXT: call __truncsfhf2 ; RV64I-NEXT: mv s2, a0 -; RV64I-NEXT: and a0, s1, s3 +; RV64I-NEXT: and a0, s0, s3 ; RV64I-NEXT: call __extendhfsf2 ; RV64I-NEXT: li a1, 0 ; RV64I-NEXT: call __addsf3 ; RV64I-NEXT: call __truncsfhf2 -; RV64I-NEXT: mv s1, a0 -; RV64I-NEXT: and a0, s2, s3 -; RV64I-NEXT: call __extendhfsf2 -; RV64I-NEXT: lui s4, 524288 -; RV64I-NEXT: xor a0, a0, s4 -; RV64I-NEXT: call __truncsfhf2 -; RV64I-NEXT: mv s2, a0 +; RV64I-NEXT: lui a1, 8 +; RV64I-NEXT: xor s2, s2, a1 +; RV64I-NEXT: xor s4, a0, a1 ; RV64I-NEXT: and a0, s1, s3 ; RV64I-NEXT: call __extendhfsf2 -; RV64I-NEXT: xor a0, a0, s4 -; RV64I-NEXT: call __truncsfhf2 -; RV64I-NEXT: mv s1, a0 -; RV64I-NEXT: and a0, s0, s3 -; RV64I-NEXT: call __extendhfsf2 ; RV64I-NEXT: mv s0, a0 ; RV64I-NEXT: and a0, s2, s3 ; RV64I-NEXT: call __extendhfsf2 -; RV64I-NEXT: mv s2, a0 -; RV64I-NEXT: and a0, s1, s3 +; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: and a0, s4, s3 ; RV64I-NEXT: call __extendhfsf2 ; RV64I-NEXT: mv a2, a0 ; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a1, s2 +; RV64I-NEXT: mv a1, s1 ; RV64I-NEXT: call fmaf ; RV64I-NEXT: call __truncsfhf2 ; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload @@ -1960,25 +1908,21 @@ define half @fnmsub_h(half %a, half %b, half %c) nounwind { ; RV32I-NEXT: mv s0, a2 ; RV32I-NEXT: mv s1, a1 ; RV32I-NEXT: lui a1, 16 -; RV32I-NEXT: addi s3, a1, -1 -; RV32I-NEXT: and a0, a0, s3 +; RV32I-NEXT: addi s2, a1, -1 +; RV32I-NEXT: and a0, a0, s2 ; RV32I-NEXT: call __extendhfsf2 ; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: call __addsf3 ; RV32I-NEXT: call __truncsfhf2 -; RV32I-NEXT: and a0, a0, s3 -; RV32I-NEXT: call __extendhfsf2 -; RV32I-NEXT: lui a1, 524288 -; RV32I-NEXT: xor a0, a0, a1 -; RV32I-NEXT: call __truncsfhf2 -; RV32I-NEXT: mv s2, a0 -; RV32I-NEXT: and a0, s1, s3 +; RV32I-NEXT: lui a1, 8 +; RV32I-NEXT: xor s3, a0, a1 +; RV32I-NEXT: and a0, s1, s2 ; RV32I-NEXT: call __extendhfsf2 ; RV32I-NEXT: mv s1, a0 -; RV32I-NEXT: and a0, s0, s3 +; RV32I-NEXT: and a0, s0, s2 ; RV32I-NEXT: call __extendhfsf2 ; RV32I-NEXT: mv s0, a0 -; RV32I-NEXT: and a0, s2, s3 +; RV32I-NEXT: and a0, s3, s2 ; RV32I-NEXT: call __extendhfsf2 ; RV32I-NEXT: mv a1, s1 ; RV32I-NEXT: mv a2, s0 @@ -2003,25 +1947,21 @@ define half @fnmsub_h(half %a, half %b, half %c) nounwind { ; RV64I-NEXT: mv s0, a2 ; RV64I-NEXT: mv s1, a1 ; RV64I-NEXT: lui a1, 16 -; RV64I-NEXT: addi s3, a1, -1 -; RV64I-NEXT: and a0, a0, s3 +; RV64I-NEXT: addi s2, a1, -1 +; RV64I-NEXT: and a0, a0, s2 ; RV64I-NEXT: call __extendhfsf2 ; RV64I-NEXT: li a1, 0 ; RV64I-NEXT: call __addsf3 ; RV64I-NEXT: call __truncsfhf2 -; RV64I-NEXT: and a0, a0, s3 -; RV64I-NEXT: call __extendhfsf2 -; RV64I-NEXT: lui a1, 524288 -; RV64I-NEXT: xor a0, a0, a1 -; RV64I-NEXT: call __truncsfhf2 -; RV64I-NEXT: mv s2, a0 -; RV64I-NEXT: and a0, s1, s3 +; RV64I-NEXT: lui a1, 8 +; RV64I-NEXT: xor s3, a0, a1 +; RV64I-NEXT: and a0, s1, s2 ; RV64I-NEXT: call __extendhfsf2 ; RV64I-NEXT: mv s1, a0 -; RV64I-NEXT: and a0, s0, s3 +; RV64I-NEXT: and a0, s0, s2 ; RV64I-NEXT: call __extendhfsf2 ; RV64I-NEXT: mv s0, a0 -; RV64I-NEXT: and a0, s2, s3 +; RV64I-NEXT: and a0, s3, s2 ; RV64I-NEXT: call __extendhfsf2 ; RV64I-NEXT: mv a1, s1 ; RV64I-NEXT: mv a2, s0 @@ -2096,25 +2036,21 @@ define half @fnmsub_h_2(half %a, half %b, half %c) nounwind { ; RV32I-NEXT: mv s0, a2 ; RV32I-NEXT: mv s1, a0 ; RV32I-NEXT: lui a0, 16 -; RV32I-NEXT: addi s3, a0, -1 -; RV32I-NEXT: and a0, a1, s3 +; RV32I-NEXT: addi s2, a0, -1 +; RV32I-NEXT: and a0, a1, s2 ; RV32I-NEXT: call __extendhfsf2 ; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: call __addsf3 ; RV32I-NEXT: call __truncsfhf2 -; RV32I-NEXT: and a0, a0, s3 -; RV32I-NEXT: call __extendhfsf2 -; RV32I-NEXT: lui a1, 524288 -; RV32I-NEXT: xor a0, a0, a1 -; RV32I-NEXT: call __truncsfhf2 -; RV32I-NEXT: mv s2, a0 -; RV32I-NEXT: and a0, s1, s3 +; RV32I-NEXT: lui a1, 8 +; RV32I-NEXT: xor s3, a0, a1 +; RV32I-NEXT: and a0, s1, s2 ; RV32I-NEXT: call __extendhfsf2 ; RV32I-NEXT: mv s1, a0 -; RV32I-NEXT: and a0, s0, s3 +; RV32I-NEXT: and a0, s0, s2 ; RV32I-NEXT: call __extendhfsf2 ; RV32I-NEXT: mv s0, a0 -; RV32I-NEXT: and a0, s2, s3 +; RV32I-NEXT: and a0, s3, s2 ; RV32I-NEXT: call __extendhfsf2 ; RV32I-NEXT: mv a1, a0 ; RV32I-NEXT: mv a0, s1 @@ -2140,25 +2076,21 @@ define half @fnmsub_h_2(half %a, half %b, half %c) nounwind { ; RV64I-NEXT: mv s0, a2 ; RV64I-NEXT: mv s1, a0 ; RV64I-NEXT: lui a0, 16 -; RV64I-NEXT: addi s3, a0, -1 -; RV64I-NEXT: and a0, a1, s3 +; RV64I-NEXT: addi s2, a0, -1 +; RV64I-NEXT: and a0, a1, s2 ; RV64I-NEXT: call __extendhfsf2 ; RV64I-NEXT: li a1, 0 ; RV64I-NEXT: call __addsf3 ; RV64I-NEXT: call __truncsfhf2 -; RV64I-NEXT: and a0, a0, s3 -; RV64I-NEXT: call __extendhfsf2 -; RV64I-NEXT: lui a1, 524288 -; RV64I-NEXT: xor a0, a0, a1 -; RV64I-NEXT: call __truncsfhf2 -; RV64I-NEXT: mv s2, a0 -; RV64I-NEXT: and a0, s1, s3 +; RV64I-NEXT: lui a1, 8 +; RV64I-NEXT: xor s3, a0, a1 +; RV64I-NEXT: and a0, s1, s2 ; RV64I-NEXT: call __extendhfsf2 ; RV64I-NEXT: mv s1, a0 -; RV64I-NEXT: and a0, s0, s3 +; RV64I-NEXT: and a0, s0, s2 ; RV64I-NEXT: call __extendhfsf2 ; RV64I-NEXT: mv s0, a0 -; RV64I-NEXT: and a0, s2, s3 +; RV64I-NEXT: and a0, s3, s2 ; RV64I-NEXT: call __extendhfsf2 ; RV64I-NEXT: mv a1, a0 ; RV64I-NEXT: mv a0, s1 @@ -2519,12 +2451,8 @@ define half @fnmadd_h_contract(half %a, half %b, half %c) nounwind { ; RV32I-NEXT: mv a0, s2 ; RV32I-NEXT: call __mulsf3 ; RV32I-NEXT: call __truncsfhf2 -; RV32I-NEXT: and a0, a0, s3 -; RV32I-NEXT: call __extendhfsf2 -; RV32I-NEXT: lui a1, 524288 -; RV32I-NEXT: xor a0, a0, a1 -; RV32I-NEXT: call __truncsfhf2 -; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: lui a1, 8 +; RV32I-NEXT: xor s1, a0, a1 ; RV32I-NEXT: and a0, s0, s3 ; RV32I-NEXT: call __extendhfsf2 ; RV32I-NEXT: mv s0, a0 @@ -2580,12 +2508,8 @@ define half @fnmadd_h_contract(half %a, half %b, half %c) nounwind { ; RV64I-NEXT: mv a0, s2 ; RV64I-NEXT: call __mulsf3 ; RV64I-NEXT: call __truncsfhf2 -; RV64I-NEXT: and a0, a0, s3 -; RV64I-NEXT: call __extendhfsf2 -; RV64I-NEXT: lui a1, 524288 -; RV64I-NEXT: xor a0, a0, a1 -; RV64I-NEXT: call __truncsfhf2 -; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: lui a1, 8 +; RV64I-NEXT: xor s1, a0, a1 ; RV64I-NEXT: and a0, s0, s3 ; RV64I-NEXT: call __extendhfsf2 ; RV64I-NEXT: mv s0, a0 diff --git a/llvm/test/CodeGen/Thumb2/mve-vabd.ll b/llvm/test/CodeGen/Thumb2/mve-vabd.ll index 8d52fe5..3c35a29 100644 --- a/llvm/test/CodeGen/Thumb2/mve-vabd.ll +++ b/llvm/test/CodeGen/Thumb2/mve-vabd.ll @@ -63,34 +63,30 @@ define arm_aapcs_vfpcc void @vabd_v8f16(<8 x half> %x, <8 x half> %y, ptr %z) { ; CHECK-MVE-NEXT: .vsave {d8, d9, d10, d11, d12, d13} ; CHECK-MVE-NEXT: vpush {d8, d9, d10, d11, d12, d13} ; CHECK-MVE-NEXT: mov r4, r0 -; CHECK-MVE-NEXT: vmov.u16 r0, q1[0] +; CHECK-MVE-NEXT: vmov.u16 r0, q1[1] ; CHECK-MVE-NEXT: vmov q5, q1 ; CHECK-MVE-NEXT: vmov q4, q0 ; CHECK-MVE-NEXT: bl __aeabi_h2f ; CHECK-MVE-NEXT: mov r5, r0 -; CHECK-MVE-NEXT: vmov.u16 r0, q4[0] +; CHECK-MVE-NEXT: vmov.u16 r0, q4[1] ; CHECK-MVE-NEXT: bl __aeabi_h2f ; CHECK-MVE-NEXT: mov r1, r5 ; CHECK-MVE-NEXT: bl __aeabi_fsub ; CHECK-MVE-NEXT: bl __aeabi_f2h -; CHECK-MVE-NEXT: bl __aeabi_h2f -; CHECK-MVE-NEXT: bic r0, r0, #-2147483648 -; CHECK-MVE-NEXT: bl __aeabi_f2h ; CHECK-MVE-NEXT: mov r5, r0 -; CHECK-MVE-NEXT: vmov.u16 r0, q5[1] +; CHECK-MVE-NEXT: vmov.u16 r0, q5[0] ; CHECK-MVE-NEXT: bl __aeabi_h2f ; CHECK-MVE-NEXT: mov r6, r0 -; CHECK-MVE-NEXT: vmov.u16 r0, q4[1] +; CHECK-MVE-NEXT: vmov.u16 r0, q4[0] ; CHECK-MVE-NEXT: bl __aeabi_h2f ; CHECK-MVE-NEXT: mov r1, r6 ; CHECK-MVE-NEXT: bl __aeabi_fsub ; CHECK-MVE-NEXT: bl __aeabi_f2h -; CHECK-MVE-NEXT: vmov.16 q6[0], r5 -; CHECK-MVE-NEXT: bl __aeabi_h2f -; CHECK-MVE-NEXT: bic r0, r0, #-2147483648 -; CHECK-MVE-NEXT: bl __aeabi_f2h -; CHECK-MVE-NEXT: vmov.16 q6[1], r0 +; CHECK-MVE-NEXT: bfc r0, #15, #17 +; CHECK-MVE-NEXT: bfc r5, #15, #17 +; CHECK-MVE-NEXT: vmov.16 q6[0], r0 ; CHECK-MVE-NEXT: vmov.u16 r0, q5[2] +; CHECK-MVE-NEXT: vmov.16 q6[1], r5 ; CHECK-MVE-NEXT: bl __aeabi_h2f ; CHECK-MVE-NEXT: mov r5, r0 ; CHECK-MVE-NEXT: vmov.u16 r0, q4[2] @@ -98,9 +94,7 @@ define arm_aapcs_vfpcc void @vabd_v8f16(<8 x half> %x, <8 x half> %y, ptr %z) { ; CHECK-MVE-NEXT: mov r1, r5 ; CHECK-MVE-NEXT: bl __aeabi_fsub ; CHECK-MVE-NEXT: bl __aeabi_f2h -; CHECK-MVE-NEXT: bl __aeabi_h2f -; CHECK-MVE-NEXT: bic r0, r0, #-2147483648 -; CHECK-MVE-NEXT: bl __aeabi_f2h +; CHECK-MVE-NEXT: bfc r0, #15, #17 ; CHECK-MVE-NEXT: vmov.16 q6[2], r0 ; CHECK-MVE-NEXT: vmov.u16 r0, q5[3] ; CHECK-MVE-NEXT: bl __aeabi_h2f @@ -110,9 +104,7 @@ define arm_aapcs_vfpcc void @vabd_v8f16(<8 x half> %x, <8 x half> %y, ptr %z) { ; CHECK-MVE-NEXT: mov r1, r5 ; CHECK-MVE-NEXT: bl __aeabi_fsub ; CHECK-MVE-NEXT: bl __aeabi_f2h -; CHECK-MVE-NEXT: bl __aeabi_h2f -; CHECK-MVE-NEXT: bic r0, r0, #-2147483648 -; CHECK-MVE-NEXT: bl __aeabi_f2h +; CHECK-MVE-NEXT: bfc r0, #15, #17 ; CHECK-MVE-NEXT: vmov.16 q6[3], r0 ; CHECK-MVE-NEXT: vmov.u16 r0, q5[4] ; CHECK-MVE-NEXT: bl __aeabi_h2f @@ -122,9 +114,7 @@ define arm_aapcs_vfpcc void @vabd_v8f16(<8 x half> %x, <8 x half> %y, ptr %z) { ; CHECK-MVE-NEXT: mov r1, r5 ; CHECK-MVE-NEXT: bl __aeabi_fsub ; CHECK-MVE-NEXT: bl __aeabi_f2h -; CHECK-MVE-NEXT: bl __aeabi_h2f -; CHECK-MVE-NEXT: bic r0, r0, #-2147483648 -; CHECK-MVE-NEXT: bl __aeabi_f2h +; CHECK-MVE-NEXT: bfc r0, #15, #17 ; CHECK-MVE-NEXT: vmov.16 q6[4], r0 ; CHECK-MVE-NEXT: vmov.u16 r0, q5[5] ; CHECK-MVE-NEXT: bl __aeabi_h2f @@ -134,9 +124,7 @@ define arm_aapcs_vfpcc void @vabd_v8f16(<8 x half> %x, <8 x half> %y, ptr %z) { ; CHECK-MVE-NEXT: mov r1, r5 ; CHECK-MVE-NEXT: bl __aeabi_fsub ; CHECK-MVE-NEXT: bl __aeabi_f2h -; CHECK-MVE-NEXT: bl __aeabi_h2f -; CHECK-MVE-NEXT: bic r0, r0, #-2147483648 -; CHECK-MVE-NEXT: bl __aeabi_f2h +; CHECK-MVE-NEXT: bfc r0, #15, #17 ; CHECK-MVE-NEXT: vmov.16 q6[5], r0 ; CHECK-MVE-NEXT: vmov.u16 r0, q5[6] ; CHECK-MVE-NEXT: bl __aeabi_h2f @@ -146,9 +134,7 @@ define arm_aapcs_vfpcc void @vabd_v8f16(<8 x half> %x, <8 x half> %y, ptr %z) { ; CHECK-MVE-NEXT: mov r1, r5 ; CHECK-MVE-NEXT: bl __aeabi_fsub ; CHECK-MVE-NEXT: bl __aeabi_f2h -; CHECK-MVE-NEXT: bl __aeabi_h2f -; CHECK-MVE-NEXT: bic r0, r0, #-2147483648 -; CHECK-MVE-NEXT: bl __aeabi_f2h +; CHECK-MVE-NEXT: bfc r0, #15, #17 ; CHECK-MVE-NEXT: vmov.16 q6[6], r0 ; CHECK-MVE-NEXT: vmov.u16 r0, q5[7] ; CHECK-MVE-NEXT: bl __aeabi_h2f @@ -158,9 +144,7 @@ define arm_aapcs_vfpcc void @vabd_v8f16(<8 x half> %x, <8 x half> %y, ptr %z) { ; CHECK-MVE-NEXT: mov r1, r5 ; CHECK-MVE-NEXT: bl __aeabi_fsub ; CHECK-MVE-NEXT: bl __aeabi_f2h -; CHECK-MVE-NEXT: bl __aeabi_h2f -; CHECK-MVE-NEXT: bic r0, r0, #-2147483648 -; CHECK-MVE-NEXT: bl __aeabi_f2h +; CHECK-MVE-NEXT: bfc r0, #15, #17 ; CHECK-MVE-NEXT: vmov.16 q6[7], r0 ; CHECK-MVE-NEXT: vstrw.32 q6, [r4] ; CHECK-MVE-NEXT: vpop {d8, d9, d10, d11, d12, d13} diff --git a/llvm/test/CodeGen/X86/global-variable-partition-with-dap.ll b/llvm/test/CodeGen/X86/global-variable-partition-with-dap.ll new file mode 100644 index 0000000..a0c243b --- /dev/null +++ b/llvm/test/CodeGen/X86/global-variable-partition-with-dap.ll @@ -0,0 +1,43 @@ +target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128" +target triple = "x86_64-unknown-linux-gnu" + +;; A minimal test case. llc will crash if global variables already has a section +;; prefix. Subsequent PRs will expand on this test case to test the hotness +;; reconciliation implementation. + +; RUN: not llc -mtriple=x86_64-unknown-linux-gnu -relocation-model=pic \ +; RUN: -partition-static-data-sections=true \ +; RUN: -data-sections=true -unique-section-names=false \ +; RUN: %s -o - 2>&1 | FileCheck %s --check-prefix=ERR + +; ERR: Global variable hot_bss already has a section prefix hot + +@hot_bss = internal global i32 0, !section_prefix !17 + +define void @hot_func() !prof !14 { + %9 = load i32, ptr @hot_bss + %11 = call i32 (...) @func_taking_arbitrary_param(i32 %9) + ret void +} + +declare i32 @func_taking_arbitrary_param(...) + +!llvm.module.flags = !{!1} + +!1 = !{i32 1, !"ProfileSummary", !2} +!2 = !{!3, !4, !5, !6, !7, !8, !9, !10} +!3 = !{!"ProfileFormat", !"InstrProf"} +!4 = !{!"TotalCount", i64 1460183} +!5 = !{!"MaxCount", i64 849024} +!6 = !{!"MaxInternalCount", i64 32769} +!7 = !{!"MaxFunctionCount", i64 849024} +!8 = !{!"NumCounts", i64 23627} +!9 = !{!"NumFunctions", i64 3271} +!10 = !{!"DetailedSummary", !11} +!11 = !{!12, !13} +!12 = !{i32 990000, i64 166, i32 73} +!13 = !{i32 999999, i64 3, i32 1443} +!14 = !{!"function_entry_count", i64 100000} +!15 = !{!"function_entry_count", i64 1} +!16 = !{!"branch_weights", i32 1, i32 99999} +!17 = !{!"section_prefix", !"hot"} diff --git a/llvm/test/Transforms/InstCombine/fold-select-fmul-if-zero.ll b/llvm/test/Transforms/InstCombine/fold-select-fmul-if-zero.ll index ff6d9aa..1ba7005 100644 --- a/llvm/test/Transforms/InstCombine/fold-select-fmul-if-zero.ll +++ b/llvm/test/Transforms/InstCombine/fold-select-fmul-if-zero.ll @@ -481,7 +481,7 @@ define float @fmul_by_var_if_0_oeq_zero_f32_nsz_fmul(float %x, float %y) { define float @fmul_by_var_if_0_oeq_zero_f32_nsz_ninf_fmul(float %x, float %y) { ; CHECK-LABEL: @fmul_by_var_if_0_oeq_zero_f32_nsz_ninf_fmul( ; CHECK-NEXT: [[X_IS_ZERO:%.*]] = fcmp oeq float [[X:%.*]], 0.000000e+00 -; CHECK-NEXT: [[SCALED_X:%.*]] = select nnan i1 [[X_IS_ZERO]], float [[Y:%.*]], float 1.000000e+00 +; CHECK-NEXT: [[SCALED_X:%.*]] = select nnan ninf i1 [[X_IS_ZERO]], float [[Y:%.*]], float 1.000000e+00 ; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul float [[X]], [[SCALED_X]] ; CHECK-NEXT: ret float [[SCALED_IF_DENORMAL]] ; @@ -509,7 +509,7 @@ define float @fmul_by_var_if_0_oeq_zero_f32_nsz_nnan_fmul(float %x, float %y) { define float @fmul_by_var_if_0_oeq_zero_f32_nnan_ninf_fmul(float %x, float %y) { ; CHECK-LABEL: @fmul_by_var_if_0_oeq_zero_f32_nnan_ninf_fmul( ; CHECK-NEXT: [[X_IS_ZERO:%.*]] = fcmp oeq float [[X:%.*]], 0.000000e+00 -; CHECK-NEXT: [[SCALED_X:%.*]] = select nnan i1 [[X_IS_ZERO]], float [[Y:%.*]], float 1.000000e+00 +; CHECK-NEXT: [[SCALED_X:%.*]] = select nnan ninf i1 [[X_IS_ZERO]], float [[Y:%.*]], float 1.000000e+00 ; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul nnan float [[X]], [[SCALED_X]] ; CHECK-NEXT: ret float [[SCALED_IF_DENORMAL]] ; @@ -558,7 +558,7 @@ define float @fmul_by_var_if_0_oeq_zero_f32_fmul_nnan_ninf_select_nsz_inverted(f define float @fmul_by_var_if_0_oeq_zero_f32_fmul_nnan_ninf_nsz(float %x, float %y) { ; CHECK-LABEL: @fmul_by_var_if_0_oeq_zero_f32_fmul_nnan_ninf_nsz( ; CHECK-NEXT: [[X_IS_ZERO:%.*]] = fcmp oeq float [[X:%.*]], 0.000000e+00 -; CHECK-NEXT: [[SCALED_X:%.*]] = select nnan i1 [[X_IS_ZERO]], float [[Y:%.*]], float 1.000000e+00 +; CHECK-NEXT: [[SCALED_X:%.*]] = select nnan ninf i1 [[X_IS_ZERO]], float [[Y:%.*]], float 1.000000e+00 ; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul nnan float [[X]], [[SCALED_X]] ; CHECK-NEXT: ret float [[SCALED_IF_DENORMAL]] ; @@ -571,7 +571,7 @@ define float @fmul_by_var_if_0_oeq_zero_f32_fmul_nnan_ninf_nsz(float %x, float % define float @fmul_by_var_if_0_oeq_zero_f32_fmul_nnan_ninf_nsz_commuted(float %x, float %y) { ; CHECK-LABEL: @fmul_by_var_if_0_oeq_zero_f32_fmul_nnan_ninf_nsz_commuted( ; CHECK-NEXT: [[X_IS_ZERO:%.*]] = fcmp oeq float [[X:%.*]], 0.000000e+00 -; CHECK-NEXT: [[SCALED_X:%.*]] = select nnan i1 [[X_IS_ZERO]], float [[Y:%.*]], float 1.000000e+00 +; CHECK-NEXT: [[SCALED_X:%.*]] = select nnan ninf i1 [[X_IS_ZERO]], float [[Y:%.*]], float 1.000000e+00 ; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul nnan float [[X]], [[SCALED_X]] ; CHECK-NEXT: ret float [[SCALED_IF_DENORMAL]] ; @@ -585,7 +585,7 @@ define float @fmul_by_var_if_0_oeq_zero_f32_fmul_nnan_ninf_nsz_commuted(float %x define float @fmul_by_var_if_0_oeq_zero_f32_fmul_nnan_ninf_select_known_never_negzero(float %x, float nofpclass(nzero) %y) { ; CHECK-LABEL: @fmul_by_var_if_0_oeq_zero_f32_fmul_nnan_ninf_select_known_never_negzero( ; CHECK-NEXT: [[X_IS_ZERO:%.*]] = fcmp oeq float [[X:%.*]], 0.000000e+00 -; CHECK-NEXT: [[SCALED_X:%.*]] = select nnan i1 [[X_IS_ZERO]], float [[Y:%.*]], float 1.000000e+00 +; CHECK-NEXT: [[SCALED_X:%.*]] = select nnan ninf i1 [[X_IS_ZERO]], float [[Y:%.*]], float 1.000000e+00 ; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul nnan float [[X]], [[SCALED_X]] ; CHECK-NEXT: ret float [[SCALED_IF_DENORMAL]] ; @@ -598,7 +598,7 @@ define float @fmul_by_var_if_0_oeq_zero_f32_fmul_nnan_ninf_select_known_never_ne define float @fmul_by_var_if_0_oeq_zero_f32_fmul_nnan_ninf_select_known_never_negzero_negsub(float %x, float nofpclass(nzero nsub) %y) { ; CHECK-LABEL: @fmul_by_var_if_0_oeq_zero_f32_fmul_nnan_ninf_select_known_never_negzero_negsub( ; CHECK-NEXT: [[X_IS_ZERO:%.*]] = fcmp oeq float [[X:%.*]], 0.000000e+00 -; CHECK-NEXT: [[SCALED_X:%.*]] = select nnan i1 [[X_IS_ZERO]], float [[Y:%.*]], float 1.000000e+00 +; CHECK-NEXT: [[SCALED_X:%.*]] = select nnan ninf i1 [[X_IS_ZERO]], float [[Y:%.*]], float 1.000000e+00 ; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul nnan float [[X]], [[SCALED_X]] ; CHECK-NEXT: ret float [[SCALED_IF_DENORMAL]] ; @@ -705,7 +705,7 @@ define float @fmul_by_self_if_0_oeq_zero_f32(float %x) { define float @fmul_by_self_if_0_oeq_zero_f32_fmul_nnan_ninf_nsz(float %x) { ; CHECK-LABEL: @fmul_by_self_if_0_oeq_zero_f32_fmul_nnan_ninf_nsz( ; CHECK-NEXT: [[X_IS_ZERO:%.*]] = fcmp oeq float [[X:%.*]], 0.000000e+00 -; CHECK-NEXT: [[SCALED_X:%.*]] = select nnan i1 [[X_IS_ZERO]], float [[X]], float 1.000000e+00 +; CHECK-NEXT: [[SCALED_X:%.*]] = select nnan ninf i1 [[X_IS_ZERO]], float [[X]], float 1.000000e+00 ; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul nnan float [[X]], [[SCALED_X]] ; CHECK-NEXT: ret float [[SCALED_IF_DENORMAL]] ; diff --git a/llvm/test/Transforms/InstCombine/select-binop-foldable-floating-point.ll b/llvm/test/Transforms/InstCombine/select-binop-foldable-floating-point.ll index 253bc9e7..c14dd46 100644 --- a/llvm/test/Transforms/InstCombine/select-binop-foldable-floating-point.ll +++ b/llvm/test/Transforms/InstCombine/select-binop-foldable-floating-point.ll @@ -23,6 +23,50 @@ define float @select_fpclass_fadd(i1 %cond, float nofpclass(nan) %A, float %B) { ret float %D } +define float @select_fpclass_fadd_ninf1(i1 %cond, float nofpclass(nan) %A, float %B) { +; CHECK-LABEL: @select_fpclass_fadd_ninf1( +; CHECK-NEXT: [[C:%.*]] = select ninf i1 [[COND:%.*]], float [[B:%.*]], float -0.000000e+00 +; CHECK-NEXT: [[D:%.*]] = fadd float [[A:%.*]], [[C]] +; CHECK-NEXT: ret float [[D]] +; + %C = fadd ninf float %A, %B + %D = select i1 %cond, float %C, float %A + ret float %D +} + +define float @select_fpclass_fadd_ninf2(i1 %cond, float nofpclass(nan) %A, float %B) { +; CHECK-LABEL: @select_fpclass_fadd_ninf2( +; CHECK-NEXT: [[C:%.*]] = select i1 [[COND:%.*]], float [[B:%.*]], float -0.000000e+00 +; CHECK-NEXT: [[D:%.*]] = fadd float [[A:%.*]], [[C]] +; CHECK-NEXT: ret float [[D]] +; + %C = fadd float %A, %B + %D = select ninf i1 %cond, float %C, float %A + ret float %D +} + +define float @select_fpclass_fadd_ninf3(i1 %cond, float nofpclass(nan) %A, float %B) { +; CHECK-LABEL: @select_fpclass_fadd_ninf3( +; CHECK-NEXT: [[C:%.*]] = select ninf i1 [[COND:%.*]], float [[B:%.*]], float -0.000000e+00 +; CHECK-NEXT: [[D:%.*]] = fadd ninf float [[A:%.*]], [[C]] +; CHECK-NEXT: ret float [[D]] +; + %C = fadd ninf float %A, %B + %D = select ninf i1 %cond, float %C, float %A + ret float %D +} + +define float @select_fpclass_fadd_nnan_ninf(i1 %cond, float nofpclass(nan) %A, float %B) { +; CHECK-LABEL: @select_fpclass_fadd_nnan_ninf( +; CHECK-NEXT: [[C:%.*]] = select nnan ninf i1 [[COND:%.*]], float [[B:%.*]], float -0.000000e+00 +; CHECK-NEXT: [[D:%.*]] = fadd float [[A:%.*]], [[C]] +; CHECK-NEXT: ret float [[D]] +; + %C = fadd float %A, %B + %D = select nnan ninf i1 %cond, float %C, float %A + ret float %D +} + define float @select_nnan_fadd(i1 %cond, float %A, float %B) { ; CHECK-LABEL: @select_nnan_fadd( ; CHECK-NEXT: [[C:%.*]] = select nnan i1 [[COND:%.*]], float [[B:%.*]], float -0.000000e+00 @@ -47,7 +91,7 @@ define float @select_nnan_fadd_swapped(i1 %cond, float %A, float %B) { define float @select_nnan_fadd_fast_math(i1 %cond, float %A, float %B) { ; CHECK-LABEL: @select_nnan_fadd_fast_math( -; CHECK-NEXT: [[C:%.*]] = select nnan i1 [[COND:%.*]], float [[B:%.*]], float -0.000000e+00 +; CHECK-NEXT: [[C:%.*]] = select nnan ninf i1 [[COND:%.*]], float [[B:%.*]], float -0.000000e+00 ; CHECK-NEXT: [[D:%.*]] = fadd reassoc nnan arcp contract afn float [[A:%.*]], [[C]] ; CHECK-NEXT: ret float [[D]] ; @@ -58,7 +102,7 @@ define float @select_nnan_fadd_fast_math(i1 %cond, float %A, float %B) { define float @select_nnan_fadd_swapped_fast_math(i1 %cond, float %A, float %B) { ; CHECK-LABEL: @select_nnan_fadd_swapped_fast_math( -; CHECK-NEXT: [[C:%.*]] = select nnan i1 [[COND:%.*]], float -0.000000e+00, float [[B:%.*]] +; CHECK-NEXT: [[C:%.*]] = select nnan ninf i1 [[COND:%.*]], float -0.000000e+00, float [[B:%.*]] ; CHECK-NEXT: [[D:%.*]] = fadd reassoc nnan arcp contract afn float [[A:%.*]], [[C]] ; CHECK-NEXT: ret float [[D]] ; @@ -124,7 +168,7 @@ define float @select_nnan_fmul_swapped(i1 %cond, float %A, float %B) { define float @select_nnan_fmul_fast_math(i1 %cond, float %A, float %B) { ; CHECK-LABEL: @select_nnan_fmul_fast_math( -; CHECK-NEXT: [[C:%.*]] = select nnan i1 [[COND:%.*]], float [[B:%.*]], float 1.000000e+00 +; CHECK-NEXT: [[C:%.*]] = select nnan ninf i1 [[COND:%.*]], float [[B:%.*]], float 1.000000e+00 ; CHECK-NEXT: [[D:%.*]] = fmul reassoc nnan arcp contract afn float [[A:%.*]], [[C]] ; CHECK-NEXT: ret float [[D]] ; @@ -135,7 +179,7 @@ define float @select_nnan_fmul_fast_math(i1 %cond, float %A, float %B) { define float @select_nnan_fmul_swapped_fast_math(i1 %cond, float %A, float %B) { ; CHECK-LABEL: @select_nnan_fmul_swapped_fast_math( -; CHECK-NEXT: [[C:%.*]] = select nnan i1 [[COND:%.*]], float 1.000000e+00, float [[B:%.*]] +; CHECK-NEXT: [[C:%.*]] = select nnan ninf i1 [[COND:%.*]], float 1.000000e+00, float [[B:%.*]] ; CHECK-NEXT: [[D:%.*]] = fmul reassoc nnan arcp contract afn float [[A:%.*]], [[C]] ; CHECK-NEXT: ret float [[D]] ; @@ -144,6 +188,50 @@ define float @select_nnan_fmul_swapped_fast_math(i1 %cond, float %A, float %B) { ret float %D } +define float @select_fpclass_fmul_ninf1(i1 %cond, float nofpclass(nan) %A, float %B) { +; CHECK-LABEL: @select_fpclass_fmul_ninf1( +; CHECK-NEXT: [[C:%.*]] = select ninf i1 [[COND:%.*]], float [[B:%.*]], float 1.000000e+00 +; CHECK-NEXT: [[D:%.*]] = fmul float [[A:%.*]], [[C]] +; CHECK-NEXT: ret float [[D]] +; + %C = fmul ninf float %A, %B + %D = select i1 %cond, float %C, float %A + ret float %D +} + +define float @select_fpclass_fmul_ninf2(i1 %cond, float nofpclass(nan) %A, float %B) { +; CHECK-LABEL: @select_fpclass_fmul_ninf2( +; CHECK-NEXT: [[C:%.*]] = select i1 [[COND:%.*]], float [[B:%.*]], float 1.000000e+00 +; CHECK-NEXT: [[D:%.*]] = fmul float [[A:%.*]], [[C]] +; CHECK-NEXT: ret float [[D]] +; + %C = fmul float %A, %B + %D = select ninf i1 %cond, float %C, float %A + ret float %D +} + +define float @select_fpclass_fmul_ninf3(i1 %cond, float nofpclass(nan) %A, float %B) { +; CHECK-LABEL: @select_fpclass_fmul_ninf3( +; CHECK-NEXT: [[C:%.*]] = select ninf i1 [[COND:%.*]], float [[B:%.*]], float 1.000000e+00 +; CHECK-NEXT: [[D:%.*]] = fmul ninf float [[A:%.*]], [[C]] +; CHECK-NEXT: ret float [[D]] +; + %C = fmul ninf float %A, %B + %D = select ninf i1 %cond, float %C, float %A + ret float %D +} + +define float @select_fpclass_fmul_nnan_ninf(i1 %cond, float nofpclass(nan) %A, float %B) { +; CHECK-LABEL: @select_fpclass_fmul_nnan_ninf( +; CHECK-NEXT: [[C:%.*]] = select nnan ninf i1 [[COND:%.*]], float [[B:%.*]], float 1.000000e+00 +; CHECK-NEXT: [[D:%.*]] = fmul float [[A:%.*]], [[C]] +; CHECK-NEXT: ret float [[D]] +; + %C = fmul float %A, %B + %D = select nnan ninf i1 %cond, float %C, float %A + ret float %D +} + define float @select_nnan_fsub(i1 %cond, float %A, float %B) { ; CHECK-LABEL: @select_nnan_fsub( ; CHECK-NEXT: [[C:%.*]] = select nnan i1 [[COND:%.*]], float [[B:%.*]], float 0.000000e+00 @@ -168,7 +256,7 @@ define float @select_nnan_fsub_swapped(i1 %cond, float %A, float %B) { define float @select_nnan_fsub_fast_math(i1 %cond, float %A, float %B) { ; CHECK-LABEL: @select_nnan_fsub_fast_math( -; CHECK-NEXT: [[C:%.*]] = select nnan i1 [[COND:%.*]], float [[B:%.*]], float 0.000000e+00 +; CHECK-NEXT: [[C:%.*]] = select nnan ninf i1 [[COND:%.*]], float [[B:%.*]], float 0.000000e+00 ; CHECK-NEXT: [[D:%.*]] = fsub reassoc nnan arcp contract afn float [[A:%.*]], [[C]] ; CHECK-NEXT: ret float [[D]] ; @@ -179,7 +267,7 @@ define float @select_nnan_fsub_fast_math(i1 %cond, float %A, float %B) { define float @select_nnan_fsub_swapped_fast_math(i1 %cond, float %A, float %B) { ; CHECK-LABEL: @select_nnan_fsub_swapped_fast_math( -; CHECK-NEXT: [[C:%.*]] = select nnan i1 [[COND:%.*]], float 0.000000e+00, float [[B:%.*]] +; CHECK-NEXT: [[C:%.*]] = select nnan ninf i1 [[COND:%.*]], float 0.000000e+00, float [[B:%.*]] ; CHECK-NEXT: [[D:%.*]] = fsub reassoc nnan arcp contract afn float [[A:%.*]], [[C]] ; CHECK-NEXT: ret float [[D]] ; @@ -188,6 +276,50 @@ define float @select_nnan_fsub_swapped_fast_math(i1 %cond, float %A, float %B) { ret float %D } +define float @select_fpclass_fsub_ninf1(i1 %cond, float nofpclass(nan) %A, float %B) { +; CHECK-LABEL: @select_fpclass_fsub_ninf1( +; CHECK-NEXT: [[C:%.*]] = select ninf i1 [[COND:%.*]], float [[B:%.*]], float 0.000000e+00 +; CHECK-NEXT: [[D:%.*]] = fsub float [[A:%.*]], [[C]] +; CHECK-NEXT: ret float [[D]] +; + %C = fsub ninf float %A, %B + %D = select i1 %cond, float %C, float %A + ret float %D +} + +define float @select_fpclass_fsub_ninf2(i1 %cond, float nofpclass(nan) %A, float %B) { +; CHECK-LABEL: @select_fpclass_fsub_ninf2( +; CHECK-NEXT: [[C:%.*]] = select i1 [[COND:%.*]], float [[B:%.*]], float 0.000000e+00 +; CHECK-NEXT: [[D:%.*]] = fsub float [[A:%.*]], [[C]] +; CHECK-NEXT: ret float [[D]] +; + %C = fsub float %A, %B + %D = select ninf i1 %cond, float %C, float %A + ret float %D +} + +define float @select_fpclass_fsub_ninf3(i1 %cond, float nofpclass(nan) %A, float %B) { +; CHECK-LABEL: @select_fpclass_fsub_ninf3( +; CHECK-NEXT: [[C:%.*]] = select ninf i1 [[COND:%.*]], float [[B:%.*]], float 0.000000e+00 +; CHECK-NEXT: [[D:%.*]] = fsub ninf float [[A:%.*]], [[C]] +; CHECK-NEXT: ret float [[D]] +; + %C = fsub ninf float %A, %B + %D = select ninf i1 %cond, float %C, float %A + ret float %D +} + +define float @select_fpclass_fsub_nnan_ninf(i1 %cond, float nofpclass(nan) %A, float %B) { +; CHECK-LABEL: @select_fpclass_fsub_nnan_ninf( +; CHECK-NEXT: [[C:%.*]] = select nnan ninf i1 [[COND:%.*]], float [[B:%.*]], float 0.000000e+00 +; CHECK-NEXT: [[D:%.*]] = fsub float [[A:%.*]], [[C]] +; CHECK-NEXT: ret float [[D]] +; + %C = fsub float %A, %B + %D = select nnan ninf i1 %cond, float %C, float %A + ret float %D +} + define <4 x float> @select_nnan_nsz_fsub_v4f32(<4 x i1> %cond, <4 x float> %A, <4 x float> %B) { ; CHECK-LABEL: @select_nnan_nsz_fsub_v4f32( ; CHECK-NEXT: [[C:%.*]] = select nnan nsz <4 x i1> [[COND:%.*]], <4 x float> [[B:%.*]], <4 x float> zeroinitializer @@ -246,7 +378,7 @@ define float @select_nnan_fdiv_swapped(i1 %cond, float %A, float %B) { define float @select_nnan_fdiv_fast_math(i1 %cond, float %A, float %B) { ; CHECK-LABEL: @select_nnan_fdiv_fast_math( -; CHECK-NEXT: [[C:%.*]] = select nnan i1 [[COND:%.*]], float [[B:%.*]], float 1.000000e+00 +; CHECK-NEXT: [[C:%.*]] = select nnan ninf i1 [[COND:%.*]], float [[B:%.*]], float 1.000000e+00 ; CHECK-NEXT: [[D:%.*]] = fdiv reassoc nnan arcp contract afn float [[A:%.*]], [[C]] ; CHECK-NEXT: ret float [[D]] ; @@ -257,7 +389,7 @@ define float @select_nnan_fdiv_fast_math(i1 %cond, float %A, float %B) { define float @select_nnan_fdiv_swapped_fast_math(i1 %cond, float %A, float %B) { ; CHECK-LABEL: @select_nnan_fdiv_swapped_fast_math( -; CHECK-NEXT: [[C:%.*]] = select nnan i1 [[COND:%.*]], float 1.000000e+00, float [[B:%.*]] +; CHECK-NEXT: [[C:%.*]] = select nnan ninf i1 [[COND:%.*]], float 1.000000e+00, float [[B:%.*]] ; CHECK-NEXT: [[D:%.*]] = fdiv reassoc nnan arcp contract afn float [[A:%.*]], [[C]] ; CHECK-NEXT: ret float [[D]] ; diff --git a/llvm/test/Transforms/LoopVectorize/ARM/mve-selectandorcost.ll b/llvm/test/Transforms/LoopVectorize/ARM/mve-selectandorcost.ll index e154883c..9dbbf4c 100644 --- a/llvm/test/Transforms/LoopVectorize/ARM/mve-selectandorcost.ll +++ b/llvm/test/Transforms/LoopVectorize/ARM/mve-selectandorcost.ll @@ -45,7 +45,7 @@ define float @test(ptr nocapture readonly %pA, ptr nocapture readonly %pB, i32 % ; CHECK-NEXT: [[TMP7:%.*]] = fsub fast <4 x float> [[WIDE_LOAD]], [[WIDE_LOAD7]] ; CHECK-NEXT: [[TMP8:%.*]] = call fast <4 x float> @llvm.fabs.v4f32(<4 x float> [[TMP7]]) ; CHECK-NEXT: [[TMP9:%.*]] = fdiv fast <4 x float> [[TMP8]], [[TMP6]] -; CHECK-NEXT: [[TMP10:%.*]] = select <4 x i1> [[TMP20]], <4 x float> [[TMP9]], <4 x float> splat (float -0.000000e+00) +; CHECK-NEXT: [[TMP10:%.*]] = select ninf <4 x i1> [[TMP20]], <4 x float> [[TMP9]], <4 x float> splat (float -0.000000e+00) ; CHECK-NEXT: [[PREDPHI]] = fadd reassoc arcp contract afn <4 x float> [[VEC_PHI]], [[TMP10]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 ; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] diff --git a/llvm/test/Transforms/PhaseOrdering/AArch64/predicated-reduction.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/predicated-reduction.ll index e8709a5..55adda7 100644 --- a/llvm/test/Transforms/PhaseOrdering/AArch64/predicated-reduction.ll +++ b/llvm/test/Transforms/PhaseOrdering/AArch64/predicated-reduction.ll @@ -41,12 +41,12 @@ define nofpclass(nan inf) double @monte_simple(i32 noundef %nblocks, i32 noundef ; CHECK-NEXT: [[TMP9:%.*]] = fcmp fast ogt <4 x double> [[TMP7]], zeroinitializer ; CHECK-NEXT: [[TMP10:%.*]] = fmul fast <4 x double> [[TMP6]], [[TMP6]] ; CHECK-NEXT: [[TMP11:%.*]] = fmul fast <4 x double> [[TMP7]], [[TMP7]] -; CHECK-NEXT: [[TMP12:%.*]] = select <4 x i1> [[TMP8]], <4 x double> [[TMP6]], <4 x double> splat (double -0.000000e+00) -; CHECK-NEXT: [[TMP13:%.*]] = select <4 x i1> [[TMP9]], <4 x double> [[TMP7]], <4 x double> splat (double -0.000000e+00) +; CHECK-NEXT: [[TMP12:%.*]] = select ninf <4 x i1> [[TMP8]], <4 x double> [[TMP6]], <4 x double> splat (double -0.000000e+00) +; CHECK-NEXT: [[TMP13:%.*]] = select ninf <4 x i1> [[TMP9]], <4 x double> [[TMP7]], <4 x double> splat (double -0.000000e+00) ; CHECK-NEXT: [[TMP14]] = fadd reassoc arcp contract afn <4 x double> [[VEC_PHI16]], [[TMP12]] ; CHECK-NEXT: [[TMP15]] = fadd reassoc arcp contract afn <4 x double> [[VEC_PHI17]], [[TMP13]] -; CHECK-NEXT: [[TMP16:%.*]] = select <4 x i1> [[TMP8]], <4 x double> [[TMP10]], <4 x double> splat (double -0.000000e+00) -; CHECK-NEXT: [[TMP17:%.*]] = select <4 x i1> [[TMP9]], <4 x double> [[TMP11]], <4 x double> splat (double -0.000000e+00) +; CHECK-NEXT: [[TMP16:%.*]] = select ninf <4 x i1> [[TMP8]], <4 x double> [[TMP10]], <4 x double> splat (double -0.000000e+00) +; CHECK-NEXT: [[TMP17:%.*]] = select ninf <4 x i1> [[TMP9]], <4 x double> [[TMP11]], <4 x double> splat (double -0.000000e+00) ; CHECK-NEXT: [[TMP18]] = fadd reassoc arcp contract afn <4 x double> [[VEC_PHI]], [[TMP16]] ; CHECK-NEXT: [[TMP19]] = fadd reassoc arcp contract afn <4 x double> [[VEC_PHI15]], [[TMP17]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDVARS_IV1]], 8 @@ -75,9 +75,9 @@ define nofpclass(nan inf) double @monte_simple(i32 noundef %nblocks, i32 noundef ; CHECK-NEXT: [[SUB:%.*]] = fsub fast double [[MUL]], [[Z]] ; CHECK-NEXT: [[CMP1:%.*]] = fcmp fast ogt double [[SUB]], 0.000000e+00 ; CHECK-NEXT: [[MUL3:%.*]] = fmul fast double [[SUB]], [[SUB]] -; CHECK-NEXT: [[ADD8:%.*]] = select i1 [[CMP1]], double [[SUB]], double -0.000000e+00 +; CHECK-NEXT: [[ADD8:%.*]] = select ninf i1 [[CMP1]], double [[SUB]], double -0.000000e+00 ; CHECK-NEXT: [[V0_2]] = fadd reassoc arcp contract afn double [[V0_011]], [[ADD8]] -; CHECK-NEXT: [[ADD4:%.*]] = select i1 [[CMP1]], double [[MUL3]], double -0.000000e+00 +; CHECK-NEXT: [[ADD4:%.*]] = select ninf i1 [[CMP1]], double [[MUL3]], double -0.000000e+00 ; CHECK-NEXT: [[V1_2]] = fadd reassoc arcp contract afn double [[V1_012]], [[ADD4]] ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[WIDE_TRIP_COUNT]] @@ -229,12 +229,12 @@ define nofpclass(nan inf) double @monte_exp(i32 noundef %nblocks, i32 noundef %R ; CHECK-NEXT: [[TMP13:%.*]] = fcmp fast ogt <4 x double> [[TMP11]], zeroinitializer ; CHECK-NEXT: [[TMP14:%.*]] = fmul fast <4 x double> [[TMP10]], [[TMP10]] ; CHECK-NEXT: [[TMP15:%.*]] = fmul fast <4 x double> [[TMP11]], [[TMP11]] -; CHECK-NEXT: [[TMP16:%.*]] = select <4 x i1> [[TMP12]], <4 x double> [[TMP10]], <4 x double> splat (double -0.000000e+00) -; CHECK-NEXT: [[TMP17:%.*]] = select <4 x i1> [[TMP13]], <4 x double> [[TMP11]], <4 x double> splat (double -0.000000e+00) +; CHECK-NEXT: [[TMP16:%.*]] = select ninf <4 x i1> [[TMP12]], <4 x double> [[TMP10]], <4 x double> splat (double -0.000000e+00) +; CHECK-NEXT: [[TMP17:%.*]] = select ninf <4 x i1> [[TMP13]], <4 x double> [[TMP11]], <4 x double> splat (double -0.000000e+00) ; CHECK-NEXT: [[TMP18]] = fadd reassoc arcp contract afn <4 x double> [[VEC_PHI32]], [[TMP16]] ; CHECK-NEXT: [[TMP19]] = fadd reassoc arcp contract afn <4 x double> [[VEC_PHI33]], [[TMP17]] -; CHECK-NEXT: [[TMP20:%.*]] = select <4 x i1> [[TMP12]], <4 x double> [[TMP14]], <4 x double> splat (double -0.000000e+00) -; CHECK-NEXT: [[TMP21:%.*]] = select <4 x i1> [[TMP13]], <4 x double> [[TMP15]], <4 x double> splat (double -0.000000e+00) +; CHECK-NEXT: [[TMP20:%.*]] = select ninf <4 x i1> [[TMP12]], <4 x double> [[TMP14]], <4 x double> splat (double -0.000000e+00) +; CHECK-NEXT: [[TMP21:%.*]] = select ninf <4 x i1> [[TMP13]], <4 x double> [[TMP15]], <4 x double> splat (double -0.000000e+00) ; CHECK-NEXT: [[TMP22]] = fadd reassoc arcp contract afn <4 x double> [[VEC_PHI]], [[TMP20]] ; CHECK-NEXT: [[TMP23]] = fadd reassoc arcp contract afn <4 x double> [[VEC_PHI31]], [[TMP21]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDVARS_IV1]], 8 @@ -263,9 +263,9 @@ define nofpclass(nan inf) double @monte_exp(i32 noundef %nblocks, i32 noundef %R ; CHECK-NEXT: [[SUB_US:%.*]] = fsub fast double [[MUL_US]], [[Z]] ; CHECK-NEXT: [[CMP4_US:%.*]] = fcmp fast ogt double [[SUB_US]], 0.000000e+00 ; CHECK-NEXT: [[ADD7_US:%.*]] = fmul fast double [[SUB_US]], [[SUB_US]] -; CHECK-NEXT: [[ADD12_US:%.*]] = select i1 [[CMP4_US]], double [[SUB_US]], double -0.000000e+00 +; CHECK-NEXT: [[ADD12_US:%.*]] = select ninf i1 [[CMP4_US]], double [[SUB_US]], double -0.000000e+00 ; CHECK-NEXT: [[V0_2_US]] = fadd reassoc arcp contract afn double [[V0_115_US]], [[ADD12_US]] -; CHECK-NEXT: [[ADD7_US1:%.*]] = select i1 [[CMP4_US]], double [[ADD7_US]], double -0.000000e+00 +; CHECK-NEXT: [[ADD7_US1:%.*]] = select ninf i1 [[CMP4_US]], double [[ADD7_US]], double -0.000000e+00 ; CHECK-NEXT: [[V1_2_US]] = fadd reassoc arcp contract afn double [[V1_116_US]], [[ADD7_US1]] ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; CHECK-NEXT: [[EXITCOND25_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[WIDE_TRIP_COUNT]] diff --git a/llvm/test/Transforms/SLPVectorizer/RISCV/x264-satd-8x4.ll b/llvm/test/Transforms/SLPVectorizer/RISCV/x264-satd-8x4.ll new file mode 100644 index 0000000..c1042f18 --- /dev/null +++ b/llvm/test/Transforms/SLPVectorizer/RISCV/x264-satd-8x4.ll @@ -0,0 +1,526 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 +; RUN: opt -mtriple=riscv64 -mattr=+m,+v,+unaligned-vector-mem \ +; RUN: -passes=slp-vectorizer -S < %s | FileCheck %s +; Function Attrs: nounwind uwtable vscale_range(8,1024) +define i32 @x264_pixel_satd_8x4(ptr %pix1, i32 %i_pix1, ptr %pix2, i32 %i_pix2) { +; CHECK-LABEL: define i32 @x264_pixel_satd_8x4( +; CHECK-SAME: ptr [[PIX1:%.*]], i32 [[I_PIX1:%.*]], ptr [[PIX2:%.*]], i32 [[I_PIX2:%.*]]) #[[ATTR0:[0-9]+]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[IDX_EXT:%.*]] = sext i32 [[I_PIX1]] to i64 +; CHECK-NEXT: [[IDX_EXT63:%.*]] = sext i32 [[I_PIX2]] to i64 +; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds nuw i8, ptr [[PIX1]], i64 4 +; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds nuw i8, ptr [[PIX2]], i64 4 +; CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds i8, ptr [[PIX1]], i64 [[IDX_EXT]] +; CHECK-NEXT: [[ADD_PTR64:%.*]] = getelementptr inbounds i8, ptr [[PIX2]], i64 [[IDX_EXT63]] +; CHECK-NEXT: [[ARRAYIDX3_1:%.*]] = getelementptr inbounds nuw i8, ptr [[ADD_PTR]], i64 4 +; CHECK-NEXT: [[ARRAYIDX5_1:%.*]] = getelementptr inbounds nuw i8, ptr [[ADD_PTR64]], i64 4 +; CHECK-NEXT: [[ADD_PTR_1:%.*]] = getelementptr inbounds i8, ptr [[ADD_PTR]], i64 [[IDX_EXT]] +; CHECK-NEXT: [[ADD_PTR64_1:%.*]] = getelementptr inbounds i8, ptr [[ADD_PTR64]], i64 [[IDX_EXT63]] +; CHECK-NEXT: [[ARRAYIDX3_2:%.*]] = getelementptr inbounds nuw i8, ptr [[ADD_PTR_1]], i64 4 +; CHECK-NEXT: [[ARRAYIDX5_2:%.*]] = getelementptr inbounds nuw i8, ptr [[ADD_PTR64_1]], i64 4 +; CHECK-NEXT: [[ADD_PTR_2:%.*]] = getelementptr inbounds i8, ptr [[ADD_PTR_1]], i64 [[IDX_EXT]] +; CHECK-NEXT: [[ADD_PTR64_2:%.*]] = getelementptr inbounds i8, ptr [[ADD_PTR64_1]], i64 [[IDX_EXT63]] +; CHECK-NEXT: [[ARRAYIDX3_3:%.*]] = getelementptr inbounds nuw i8, ptr [[ADD_PTR_2]], i64 4 +; CHECK-NEXT: [[ARRAYIDX5_3:%.*]] = getelementptr inbounds nuw i8, ptr [[ADD_PTR64_2]], i64 4 +; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i8>, ptr [[PIX1]], align 1 +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i8>, ptr [[PIX2]], align 1 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i8>, ptr [[ARRAYIDX3]], align 1 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i8>, ptr [[ARRAYIDX5]], align 1 +; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i8>, ptr [[ADD_PTR]], align 1 +; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i8>, ptr [[ADD_PTR64]], align 1 +; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i8>, ptr [[ARRAYIDX3_1]], align 1 +; CHECK-NEXT: [[TMP7:%.*]] = load <4 x i8>, ptr [[ARRAYIDX5_1]], align 1 +; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i8>, ptr [[ADD_PTR_1]], align 1 +; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i8>, ptr [[ADD_PTR64_1]], align 1 +; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i8>, ptr [[ARRAYIDX3_2]], align 1 +; CHECK-NEXT: [[TMP11:%.*]] = load <4 x i8>, ptr [[ARRAYIDX5_2]], align 1 +; CHECK-NEXT: [[TMP12:%.*]] = load <4 x i8>, ptr [[ADD_PTR_2]], align 1 +; CHECK-NEXT: [[TMP13:%.*]] = shufflevector <4 x i8> [[TMP0]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison> +; CHECK-NEXT: [[TMP14:%.*]] = shufflevector <4 x i8> [[TMP4]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison> +; CHECK-NEXT: [[TMP15:%.*]] = shufflevector <4 x i8> [[TMP0]], <4 x i8> [[TMP4]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison> +; CHECK-NEXT: [[TMP16:%.*]] = shufflevector <4 x i8> [[TMP8]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison> +; CHECK-NEXT: [[TMP17:%.*]] = shufflevector <16 x i8> [[TMP15]], <16 x i8> [[TMP16]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 poison, i32 poison, i32 poison, i32 poison> +; CHECK-NEXT: [[TMP18:%.*]] = shufflevector <4 x i8> [[TMP12]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison> +; CHECK-NEXT: [[TMP19:%.*]] = shufflevector <16 x i8> [[TMP17]], <16 x i8> [[TMP18]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 16, i32 17, i32 18, i32 19> +; CHECK-NEXT: [[TMP20:%.*]] = zext <16 x i8> [[TMP19]] to <16 x i32> +; CHECK-NEXT: [[TMP21:%.*]] = load <4 x i8>, ptr [[ADD_PTR64_2]], align 1 +; CHECK-NEXT: [[TMP22:%.*]] = shufflevector <4 x i8> [[TMP1]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison> +; CHECK-NEXT: [[TMP23:%.*]] = shufflevector <4 x i8> [[TMP5]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison> +; CHECK-NEXT: [[TMP24:%.*]] = shufflevector <4 x i8> [[TMP1]], <4 x i8> [[TMP5]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison> +; CHECK-NEXT: [[TMP25:%.*]] = shufflevector <4 x i8> [[TMP9]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison> +; CHECK-NEXT: [[TMP26:%.*]] = shufflevector <16 x i8> [[TMP24]], <16 x i8> [[TMP25]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 poison, i32 poison, i32 poison, i32 poison> +; CHECK-NEXT: [[TMP27:%.*]] = shufflevector <4 x i8> [[TMP21]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison> +; CHECK-NEXT: [[TMP28:%.*]] = shufflevector <16 x i8> [[TMP26]], <16 x i8> [[TMP27]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 16, i32 17, i32 18, i32 19> +; CHECK-NEXT: [[TMP29:%.*]] = zext <16 x i8> [[TMP28]] to <16 x i32> +; CHECK-NEXT: [[TMP30:%.*]] = sub nsw <16 x i32> [[TMP20]], [[TMP29]] +; CHECK-NEXT: [[TMP31:%.*]] = load <4 x i8>, ptr [[ARRAYIDX3_3]], align 1 +; CHECK-NEXT: [[TMP32:%.*]] = shufflevector <4 x i8> [[TMP2]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison> +; CHECK-NEXT: [[TMP33:%.*]] = shufflevector <4 x i8> [[TMP6]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison> +; CHECK-NEXT: [[TMP34:%.*]] = shufflevector <4 x i8> [[TMP2]], <4 x i8> [[TMP6]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison> +; CHECK-NEXT: [[TMP35:%.*]] = shufflevector <4 x i8> [[TMP10]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison> +; CHECK-NEXT: [[TMP36:%.*]] = shufflevector <16 x i8> [[TMP34]], <16 x i8> [[TMP35]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 poison, i32 poison, i32 poison, i32 poison> +; CHECK-NEXT: [[TMP37:%.*]] = shufflevector <4 x i8> [[TMP31]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison> +; CHECK-NEXT: [[TMP38:%.*]] = shufflevector <16 x i8> [[TMP36]], <16 x i8> [[TMP37]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 16, i32 17, i32 18, i32 19> +; CHECK-NEXT: [[TMP39:%.*]] = zext <16 x i8> [[TMP38]] to <16 x i32> +; CHECK-NEXT: [[TMP40:%.*]] = load <4 x i8>, ptr [[ARRAYIDX5_3]], align 1 +; CHECK-NEXT: [[TMP41:%.*]] = shufflevector <4 x i8> [[TMP3]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison> +; CHECK-NEXT: [[TMP42:%.*]] = shufflevector <4 x i8> [[TMP7]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison> +; CHECK-NEXT: [[TMP43:%.*]] = shufflevector <4 x i8> [[TMP3]], <4 x i8> [[TMP7]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison> +; CHECK-NEXT: [[TMP44:%.*]] = shufflevector <4 x i8> [[TMP11]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison> +; CHECK-NEXT: [[TMP45:%.*]] = shufflevector <16 x i8> [[TMP43]], <16 x i8> [[TMP44]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 poison, i32 poison, i32 poison, i32 poison> +; CHECK-NEXT: [[TMP46:%.*]] = shufflevector <4 x i8> [[TMP40]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison> +; CHECK-NEXT: [[TMP47:%.*]] = shufflevector <16 x i8> [[TMP45]], <16 x i8> [[TMP46]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 16, i32 17, i32 18, i32 19> +; CHECK-NEXT: [[TMP48:%.*]] = zext <16 x i8> [[TMP47]] to <16 x i32> +; CHECK-NEXT: [[TMP49:%.*]] = sub nsw <16 x i32> [[TMP39]], [[TMP48]] +; CHECK-NEXT: [[TMP50:%.*]] = shl nsw <16 x i32> [[TMP49]], splat (i32 16) +; CHECK-NEXT: [[TMP51:%.*]] = add nsw <16 x i32> [[TMP50]], [[TMP30]] +; CHECK-NEXT: [[TMP52:%.*]] = shufflevector <16 x i32> [[TMP51]], <16 x i32> poison, <16 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 9, i32 8, i32 11, i32 10, i32 13, i32 12, i32 15, i32 14> +; CHECK-NEXT: [[TMP53:%.*]] = add nsw <16 x i32> [[TMP52]], [[TMP51]] +; CHECK-NEXT: [[TMP54:%.*]] = sub nsw <16 x i32> [[TMP52]], [[TMP51]] +; CHECK-NEXT: [[TMP55:%.*]] = shufflevector <16 x i32> [[TMP53]], <16 x i32> [[TMP54]], <16 x i32> <i32 0, i32 17, i32 2, i32 19, i32 4, i32 21, i32 6, i32 23, i32 8, i32 25, i32 10, i32 27, i32 12, i32 29, i32 14, i32 31> +; CHECK-NEXT: [[TMP56:%.*]] = shufflevector <16 x i32> [[TMP55]], <16 x i32> poison, <16 x i32> <i32 2, i32 3, i32 0, i32 1, i32 6, i32 7, i32 4, i32 5, i32 10, i32 11, i32 8, i32 9, i32 14, i32 15, i32 12, i32 13> +; CHECK-NEXT: [[TMP57:%.*]] = add nsw <16 x i32> [[TMP55]], [[TMP56]] +; CHECK-NEXT: [[TMP58:%.*]] = sub nsw <16 x i32> [[TMP55]], [[TMP56]] +; CHECK-NEXT: [[TMP59:%.*]] = shufflevector <16 x i32> [[TMP57]], <16 x i32> [[TMP58]], <16 x i32> <i32 16, i32 17, i32 2, i32 3, i32 20, i32 21, i32 6, i32 7, i32 24, i32 25, i32 10, i32 11, i32 28, i32 29, i32 14, i32 15> +; CHECK-NEXT: [[TMP60:%.*]] = shufflevector <16 x i32> [[TMP59]], <16 x i32> poison, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11> +; CHECK-NEXT: [[TMP61:%.*]] = sub nsw <16 x i32> [[TMP59]], [[TMP60]] +; CHECK-NEXT: [[TMP62:%.*]] = add nsw <16 x i32> [[TMP59]], [[TMP60]] +; CHECK-NEXT: [[TMP63:%.*]] = shufflevector <16 x i32> [[TMP61]], <16 x i32> [[TMP62]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 20, i32 21, i32 22, i32 23, i32 8, i32 9, i32 10, i32 11, i32 28, i32 29, i32 30, i32 31> +; CHECK-NEXT: [[TMP64:%.*]] = shufflevector <16 x i32> [[TMP63]], <16 x i32> poison, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7> +; CHECK-NEXT: [[TMP65:%.*]] = add nsw <16 x i32> [[TMP63]], [[TMP64]] +; CHECK-NEXT: [[TMP66:%.*]] = sub nsw <16 x i32> [[TMP63]], [[TMP64]] +; CHECK-NEXT: [[TMP67:%.*]] = shufflevector <16 x i32> [[TMP65]], <16 x i32> [[TMP66]], <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15> +; CHECK-NEXT: [[TMP68:%.*]] = lshr <16 x i32> [[TMP67]], splat (i32 15) +; CHECK-NEXT: [[TMP69:%.*]] = and <16 x i32> [[TMP68]], splat (i32 65537) +; CHECK-NEXT: [[TMP70:%.*]] = mul nuw <16 x i32> [[TMP69]], splat (i32 65535) +; CHECK-NEXT: [[TMP71:%.*]] = add <16 x i32> [[TMP70]], [[TMP67]] +; CHECK-NEXT: [[TMP72:%.*]] = xor <16 x i32> [[TMP71]], [[TMP70]] +; CHECK-NEXT: [[TMP73:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP72]]) +; CHECK-NEXT: [[CONV118:%.*]] = and i32 [[TMP73]], 65535 +; CHECK-NEXT: [[SHR:%.*]] = lshr i32 [[TMP73]], 16 +; CHECK-NEXT: [[ADD119:%.*]] = add nuw nsw i32 [[CONV118]], [[SHR]] +; CHECK-NEXT: [[SHR120:%.*]] = lshr i32 [[ADD119]], 1 +; CHECK-NEXT: ret i32 [[SHR120]] +; +entry: + %idx.ext = sext i32 %i_pix1 to i64 + %idx.ext63 = sext i32 %i_pix2 to i64 + %0 = load i8, ptr %pix1, align 1 + %conv = zext i8 %0 to i32 + %1 = load i8, ptr %pix2, align 1 + %conv2 = zext i8 %1 to i32 + %sub = sub nsw i32 %conv, %conv2 + %arrayidx3 = getelementptr inbounds nuw i8, ptr %pix1, i64 4 + %2 = load i8, ptr %arrayidx3, align 1 + %conv4 = zext i8 %2 to i32 + %arrayidx5 = getelementptr inbounds nuw i8, ptr %pix2, i64 4 + %3 = load i8, ptr %arrayidx5, align 1 + %conv6 = zext i8 %3 to i32 + %sub7 = sub nsw i32 %conv4, %conv6 + %shl = shl nsw i32 %sub7, 16 + %add = add nsw i32 %shl, %sub + %arrayidx8 = getelementptr inbounds nuw i8, ptr %pix1, i64 1 + %4 = load i8, ptr %arrayidx8, align 1 + %conv9 = zext i8 %4 to i32 + %arrayidx10 = getelementptr inbounds nuw i8, ptr %pix2, i64 1 + %5 = load i8, ptr %arrayidx10, align 1 + %conv11 = zext i8 %5 to i32 + %sub12 = sub nsw i32 %conv9, %conv11 + %arrayidx13 = getelementptr inbounds nuw i8, ptr %pix1, i64 5 + %6 = load i8, ptr %arrayidx13, align 1 + %conv14 = zext i8 %6 to i32 + %arrayidx15 = getelementptr inbounds nuw i8, ptr %pix2, i64 5 + %7 = load i8, ptr %arrayidx15, align 1 + %conv16 = zext i8 %7 to i32 + %sub17 = sub nsw i32 %conv14, %conv16 + %shl18 = shl nsw i32 %sub17, 16 + %add19 = add nsw i32 %shl18, %sub12 + %arrayidx20 = getelementptr inbounds nuw i8, ptr %pix1, i64 2 + %8 = load i8, ptr %arrayidx20, align 1 + %conv21 = zext i8 %8 to i32 + %arrayidx22 = getelementptr inbounds nuw i8, ptr %pix2, i64 2 + %9 = load i8, ptr %arrayidx22, align 1 + %conv23 = zext i8 %9 to i32 + %sub24 = sub nsw i32 %conv21, %conv23 + %arrayidx25 = getelementptr inbounds nuw i8, ptr %pix1, i64 6 + %10 = load i8, ptr %arrayidx25, align 1 + %conv26 = zext i8 %10 to i32 + %arrayidx27 = getelementptr inbounds nuw i8, ptr %pix2, i64 6 + %11 = load i8, ptr %arrayidx27, align 1 + %conv28 = zext i8 %11 to i32 + %sub29 = sub nsw i32 %conv26, %conv28 + %shl30 = shl nsw i32 %sub29, 16 + %add31 = add nsw i32 %shl30, %sub24 + %arrayidx32 = getelementptr inbounds nuw i8, ptr %pix1, i64 3 + %12 = load i8, ptr %arrayidx32, align 1 + %conv33 = zext i8 %12 to i32 + %arrayidx34 = getelementptr inbounds nuw i8, ptr %pix2, i64 3 + %13 = load i8, ptr %arrayidx34, align 1 + %conv35 = zext i8 %13 to i32 + %sub36 = sub nsw i32 %conv33, %conv35 + %arrayidx37 = getelementptr inbounds nuw i8, ptr %pix1, i64 7 + %14 = load i8, ptr %arrayidx37, align 1 + %conv38 = zext i8 %14 to i32 + %arrayidx39 = getelementptr inbounds nuw i8, ptr %pix2, i64 7 + %15 = load i8, ptr %arrayidx39, align 1 + %conv40 = zext i8 %15 to i32 + %sub41 = sub nsw i32 %conv38, %conv40 + %shl42 = shl nsw i32 %sub41, 16 + %add43 = add nsw i32 %shl42, %sub36 + %add44 = add nsw i32 %add19, %add + %sub45 = sub nsw i32 %add, %add19 + %add46 = add nsw i32 %add43, %add31 + %sub47 = sub nsw i32 %add31, %add43 + %add48 = add nsw i32 %add46, %add44 + %sub51 = sub nsw i32 %add44, %add46 + %add55 = add nsw i32 %sub47, %sub45 + %sub59 = sub nsw i32 %sub45, %sub47 + %add.ptr = getelementptr inbounds i8, ptr %pix1, i64 %idx.ext + %add.ptr64 = getelementptr inbounds i8, ptr %pix2, i64 %idx.ext63 + %16 = load i8, ptr %add.ptr, align 1 + %conv.1 = zext i8 %16 to i32 + %17 = load i8, ptr %add.ptr64, align 1 + %conv2.1 = zext i8 %17 to i32 + %sub.1 = sub nsw i32 %conv.1, %conv2.1 + %arrayidx3.1 = getelementptr inbounds nuw i8, ptr %add.ptr, i64 4 + %18 = load i8, ptr %arrayidx3.1, align 1 + %conv4.1 = zext i8 %18 to i32 + %arrayidx5.1 = getelementptr inbounds nuw i8, ptr %add.ptr64, i64 4 + %19 = load i8, ptr %arrayidx5.1, align 1 + %conv6.1 = zext i8 %19 to i32 + %sub7.1 = sub nsw i32 %conv4.1, %conv6.1 + %shl.1 = shl nsw i32 %sub7.1, 16 + %add.1 = add nsw i32 %shl.1, %sub.1 + %arrayidx8.1 = getelementptr inbounds nuw i8, ptr %add.ptr, i64 1 + %20 = load i8, ptr %arrayidx8.1, align 1 + %conv9.1 = zext i8 %20 to i32 + %arrayidx10.1 = getelementptr inbounds nuw i8, ptr %add.ptr64, i64 1 + %21 = load i8, ptr %arrayidx10.1, align 1 + %conv11.1 = zext i8 %21 to i32 + %sub12.1 = sub nsw i32 %conv9.1, %conv11.1 + %arrayidx13.1 = getelementptr inbounds nuw i8, ptr %add.ptr, i64 5 + %22 = load i8, ptr %arrayidx13.1, align 1 + %conv14.1 = zext i8 %22 to i32 + %arrayidx15.1 = getelementptr inbounds nuw i8, ptr %add.ptr64, i64 5 + %23 = load i8, ptr %arrayidx15.1, align 1 + %conv16.1 = zext i8 %23 to i32 + %sub17.1 = sub nsw i32 %conv14.1, %conv16.1 + %shl18.1 = shl nsw i32 %sub17.1, 16 + %add19.1 = add nsw i32 %shl18.1, %sub12.1 + %arrayidx20.1 = getelementptr inbounds nuw i8, ptr %add.ptr, i64 2 + %24 = load i8, ptr %arrayidx20.1, align 1 + %conv21.1 = zext i8 %24 to i32 + %arrayidx22.1 = getelementptr inbounds nuw i8, ptr %add.ptr64, i64 2 + %25 = load i8, ptr %arrayidx22.1, align 1 + %conv23.1 = zext i8 %25 to i32 + %sub24.1 = sub nsw i32 %conv21.1, %conv23.1 + %arrayidx25.1 = getelementptr inbounds nuw i8, ptr %add.ptr, i64 6 + %26 = load i8, ptr %arrayidx25.1, align 1 + %conv26.1 = zext i8 %26 to i32 + %arrayidx27.1 = getelementptr inbounds nuw i8, ptr %add.ptr64, i64 6 + %27 = load i8, ptr %arrayidx27.1, align 1 + %conv28.1 = zext i8 %27 to i32 + %sub29.1 = sub nsw i32 %conv26.1, %conv28.1 + %shl30.1 = shl nsw i32 %sub29.1, 16 + %add31.1 = add nsw i32 %shl30.1, %sub24.1 + %arrayidx32.1 = getelementptr inbounds nuw i8, ptr %add.ptr, i64 3 + %28 = load i8, ptr %arrayidx32.1, align 1 + %conv33.1 = zext i8 %28 to i32 + %arrayidx34.1 = getelementptr inbounds nuw i8, ptr %add.ptr64, i64 3 + %29 = load i8, ptr %arrayidx34.1, align 1 + %conv35.1 = zext i8 %29 to i32 + %sub36.1 = sub nsw i32 %conv33.1, %conv35.1 + %arrayidx37.1 = getelementptr inbounds nuw i8, ptr %add.ptr, i64 7 + %30 = load i8, ptr %arrayidx37.1, align 1 + %conv38.1 = zext i8 %30 to i32 + %arrayidx39.1 = getelementptr inbounds nuw i8, ptr %add.ptr64, i64 7 + %31 = load i8, ptr %arrayidx39.1, align 1 + %conv40.1 = zext i8 %31 to i32 + %sub41.1 = sub nsw i32 %conv38.1, %conv40.1 + %shl42.1 = shl nsw i32 %sub41.1, 16 + %add43.1 = add nsw i32 %shl42.1, %sub36.1 + %add44.1 = add nsw i32 %add19.1, %add.1 + %sub45.1 = sub nsw i32 %add.1, %add19.1 + %add46.1 = add nsw i32 %add43.1, %add31.1 + %sub47.1 = sub nsw i32 %add31.1, %add43.1 + %add48.1 = add nsw i32 %add46.1, %add44.1 + %sub51.1 = sub nsw i32 %add44.1, %add46.1 + %add55.1 = add nsw i32 %sub47.1, %sub45.1 + %sub59.1 = sub nsw i32 %sub45.1, %sub47.1 + %add.ptr.1 = getelementptr inbounds i8, ptr %add.ptr, i64 %idx.ext + %add.ptr64.1 = getelementptr inbounds i8, ptr %add.ptr64, i64 %idx.ext63 + %32 = load i8, ptr %add.ptr.1, align 1 + %conv.2 = zext i8 %32 to i32 + %33 = load i8, ptr %add.ptr64.1, align 1 + %conv2.2 = zext i8 %33 to i32 + %sub.2 = sub nsw i32 %conv.2, %conv2.2 + %arrayidx3.2 = getelementptr inbounds nuw i8, ptr %add.ptr.1, i64 4 + %34 = load i8, ptr %arrayidx3.2, align 1 + %conv4.2 = zext i8 %34 to i32 + %arrayidx5.2 = getelementptr inbounds nuw i8, ptr %add.ptr64.1, i64 4 + %35 = load i8, ptr %arrayidx5.2, align 1 + %conv6.2 = zext i8 %35 to i32 + %sub7.2 = sub nsw i32 %conv4.2, %conv6.2 + %shl.2 = shl nsw i32 %sub7.2, 16 + %add.2 = add nsw i32 %shl.2, %sub.2 + %arrayidx8.2 = getelementptr inbounds nuw i8, ptr %add.ptr.1, i64 1 + %36 = load i8, ptr %arrayidx8.2, align 1 + %conv9.2 = zext i8 %36 to i32 + %arrayidx10.2 = getelementptr inbounds nuw i8, ptr %add.ptr64.1, i64 1 + %37 = load i8, ptr %arrayidx10.2, align 1 + %conv11.2 = zext i8 %37 to i32 + %sub12.2 = sub nsw i32 %conv9.2, %conv11.2 + %arrayidx13.2 = getelementptr inbounds nuw i8, ptr %add.ptr.1, i64 5 + %38 = load i8, ptr %arrayidx13.2, align 1 + %conv14.2 = zext i8 %38 to i32 + %arrayidx15.2 = getelementptr inbounds nuw i8, ptr %add.ptr64.1, i64 5 + %39 = load i8, ptr %arrayidx15.2, align 1 + %conv16.2 = zext i8 %39 to i32 + %sub17.2 = sub nsw i32 %conv14.2, %conv16.2 + %shl18.2 = shl nsw i32 %sub17.2, 16 + %add19.2 = add nsw i32 %shl18.2, %sub12.2 + %arrayidx20.2 = getelementptr inbounds nuw i8, ptr %add.ptr.1, i64 2 + %40 = load i8, ptr %arrayidx20.2, align 1 + %conv21.2 = zext i8 %40 to i32 + %arrayidx22.2 = getelementptr inbounds nuw i8, ptr %add.ptr64.1, i64 2 + %41 = load i8, ptr %arrayidx22.2, align 1 + %conv23.2 = zext i8 %41 to i32 + %sub24.2 = sub nsw i32 %conv21.2, %conv23.2 + %arrayidx25.2 = getelementptr inbounds nuw i8, ptr %add.ptr.1, i64 6 + %42 = load i8, ptr %arrayidx25.2, align 1 + %conv26.2 = zext i8 %42 to i32 + %arrayidx27.2 = getelementptr inbounds nuw i8, ptr %add.ptr64.1, i64 6 + %43 = load i8, ptr %arrayidx27.2, align 1 + %conv28.2 = zext i8 %43 to i32 + %sub29.2 = sub nsw i32 %conv26.2, %conv28.2 + %shl30.2 = shl nsw i32 %sub29.2, 16 + %add31.2 = add nsw i32 %shl30.2, %sub24.2 + %arrayidx32.2 = getelementptr inbounds nuw i8, ptr %add.ptr.1, i64 3 + %44 = load i8, ptr %arrayidx32.2, align 1 + %conv33.2 = zext i8 %44 to i32 + %arrayidx34.2 = getelementptr inbounds nuw i8, ptr %add.ptr64.1, i64 3 + %45 = load i8, ptr %arrayidx34.2, align 1 + %conv35.2 = zext i8 %45 to i32 + %sub36.2 = sub nsw i32 %conv33.2, %conv35.2 + %arrayidx37.2 = getelementptr inbounds nuw i8, ptr %add.ptr.1, i64 7 + %46 = load i8, ptr %arrayidx37.2, align 1 + %conv38.2 = zext i8 %46 to i32 + %arrayidx39.2 = getelementptr inbounds nuw i8, ptr %add.ptr64.1, i64 7 + %47 = load i8, ptr %arrayidx39.2, align 1 + %conv40.2 = zext i8 %47 to i32 + %sub41.2 = sub nsw i32 %conv38.2, %conv40.2 + %shl42.2 = shl nsw i32 %sub41.2, 16 + %add43.2 = add nsw i32 %shl42.2, %sub36.2 + %add44.2 = add nsw i32 %add19.2, %add.2 + %sub45.2 = sub nsw i32 %add.2, %add19.2 + %add46.2 = add nsw i32 %add43.2, %add31.2 + %sub47.2 = sub nsw i32 %add31.2, %add43.2 + %add48.2 = add nsw i32 %add46.2, %add44.2 + %sub51.2 = sub nsw i32 %add44.2, %add46.2 + %add55.2 = add nsw i32 %sub47.2, %sub45.2 + %sub59.2 = sub nsw i32 %sub45.2, %sub47.2 + %add.ptr.2 = getelementptr inbounds i8, ptr %add.ptr.1, i64 %idx.ext + %add.ptr64.2 = getelementptr inbounds i8, ptr %add.ptr64.1, i64 %idx.ext63 + %48 = load i8, ptr %add.ptr.2, align 1 + %conv.3 = zext i8 %48 to i32 + %49 = load i8, ptr %add.ptr64.2, align 1 + %conv2.3 = zext i8 %49 to i32 + %sub.3 = sub nsw i32 %conv.3, %conv2.3 + %arrayidx3.3 = getelementptr inbounds nuw i8, ptr %add.ptr.2, i64 4 + %50 = load i8, ptr %arrayidx3.3, align 1 + %conv4.3 = zext i8 %50 to i32 + %arrayidx5.3 = getelementptr inbounds nuw i8, ptr %add.ptr64.2, i64 4 + %51 = load i8, ptr %arrayidx5.3, align 1 + %conv6.3 = zext i8 %51 to i32 + %sub7.3 = sub nsw i32 %conv4.3, %conv6.3 + %shl.3 = shl nsw i32 %sub7.3, 16 + %add.3 = add nsw i32 %shl.3, %sub.3 + %arrayidx8.3 = getelementptr inbounds nuw i8, ptr %add.ptr.2, i64 1 + %52 = load i8, ptr %arrayidx8.3, align 1 + %conv9.3 = zext i8 %52 to i32 + %arrayidx10.3 = getelementptr inbounds nuw i8, ptr %add.ptr64.2, i64 1 + %53 = load i8, ptr %arrayidx10.3, align 1 + %conv11.3 = zext i8 %53 to i32 + %sub12.3 = sub nsw i32 %conv9.3, %conv11.3 + %arrayidx13.3 = getelementptr inbounds nuw i8, ptr %add.ptr.2, i64 5 + %54 = load i8, ptr %arrayidx13.3, align 1 + %conv14.3 = zext i8 %54 to i32 + %arrayidx15.3 = getelementptr inbounds nuw i8, ptr %add.ptr64.2, i64 5 + %55 = load i8, ptr %arrayidx15.3, align 1 + %conv16.3 = zext i8 %55 to i32 + %sub17.3 = sub nsw i32 %conv14.3, %conv16.3 + %shl18.3 = shl nsw i32 %sub17.3, 16 + %add19.3 = add nsw i32 %shl18.3, %sub12.3 + %arrayidx20.3 = getelementptr inbounds nuw i8, ptr %add.ptr.2, i64 2 + %56 = load i8, ptr %arrayidx20.3, align 1 + %conv21.3 = zext i8 %56 to i32 + %arrayidx22.3 = getelementptr inbounds nuw i8, ptr %add.ptr64.2, i64 2 + %57 = load i8, ptr %arrayidx22.3, align 1 + %conv23.3 = zext i8 %57 to i32 + %sub24.3 = sub nsw i32 %conv21.3, %conv23.3 + %arrayidx25.3 = getelementptr inbounds nuw i8, ptr %add.ptr.2, i64 6 + %58 = load i8, ptr %arrayidx25.3, align 1 + %conv26.3 = zext i8 %58 to i32 + %arrayidx27.3 = getelementptr inbounds nuw i8, ptr %add.ptr64.2, i64 6 + %59 = load i8, ptr %arrayidx27.3, align 1 + %conv28.3 = zext i8 %59 to i32 + %sub29.3 = sub nsw i32 %conv26.3, %conv28.3 + %shl30.3 = shl nsw i32 %sub29.3, 16 + %add31.3 = add nsw i32 %shl30.3, %sub24.3 + %arrayidx32.3 = getelementptr inbounds nuw i8, ptr %add.ptr.2, i64 3 + %60 = load i8, ptr %arrayidx32.3, align 1 + %conv33.3 = zext i8 %60 to i32 + %arrayidx34.3 = getelementptr inbounds nuw i8, ptr %add.ptr64.2, i64 3 + %61 = load i8, ptr %arrayidx34.3, align 1 + %conv35.3 = zext i8 %61 to i32 + %sub36.3 = sub nsw i32 %conv33.3, %conv35.3 + %arrayidx37.3 = getelementptr inbounds nuw i8, ptr %add.ptr.2, i64 7 + %62 = load i8, ptr %arrayidx37.3, align 1 + %conv38.3 = zext i8 %62 to i32 + %arrayidx39.3 = getelementptr inbounds nuw i8, ptr %add.ptr64.2, i64 7 + %63 = load i8, ptr %arrayidx39.3, align 1 + %conv40.3 = zext i8 %63 to i32 + %sub41.3 = sub nsw i32 %conv38.3, %conv40.3 + %shl42.3 = shl nsw i32 %sub41.3, 16 + %add43.3 = add nsw i32 %shl42.3, %sub36.3 + %add44.3 = add nsw i32 %add19.3, %add.3 + %sub45.3 = sub nsw i32 %add.3, %add19.3 + %add46.3 = add nsw i32 %add43.3, %add31.3 + %sub47.3 = sub nsw i32 %add31.3, %add43.3 + %add48.3 = add nsw i32 %add46.3, %add44.3 + %sub51.3 = sub nsw i32 %add44.3, %add46.3 + %add55.3 = add nsw i32 %sub47.3, %sub45.3 + %sub59.3 = sub nsw i32 %sub45.3, %sub47.3 + %add78 = add nsw i32 %add48.1, %add48 + %sub86 = sub nsw i32 %add48, %add48.1 + %add94 = add nsw i32 %add48.3, %add48.2 + %sub102 = sub nsw i32 %add48.2, %add48.3 + %add103 = add nsw i32 %add94, %add78 + %sub104 = sub nsw i32 %add78, %add94 + %add105 = add nsw i32 %sub102, %sub86 + %sub106 = sub nsw i32 %sub86, %sub102 + %shr.i = lshr i32 %add103, 15 + %and.i = and i32 %shr.i, 65537 + %mul.i = mul nuw i32 %and.i, 65535 + %add.i = add i32 %mul.i, %add103 + %xor.i = xor i32 %add.i, %mul.i + %shr.i169 = lshr i32 %add105, 15 + %and.i170 = and i32 %shr.i169, 65537 + %mul.i171 = mul nuw i32 %and.i170, 65535 + %add.i172 = add i32 %mul.i171, %add105 + %xor.i173 = xor i32 %add.i172, %mul.i171 + %shr.i174 = lshr i32 %sub104, 15 + %and.i175 = and i32 %shr.i174, 65537 + %mul.i176 = mul nuw i32 %and.i175, 65535 + %add.i177 = add i32 %mul.i176, %sub104 + %xor.i178 = xor i32 %add.i177, %mul.i176 + %shr.i179 = lshr i32 %sub106, 15 + %and.i180 = and i32 %shr.i179, 65537 + %mul.i181 = mul nuw i32 %and.i180, 65535 + %add.i182 = add i32 %mul.i181, %sub106 + %xor.i183 = xor i32 %add.i182, %mul.i181 + %add110 = add i32 %xor.i173, %xor.i + %add112 = add i32 %add110, %xor.i178 + %add113 = add i32 %add112, %xor.i183 + %add78.1 = add nsw i32 %add55.1, %add55 + %sub86.1 = sub nsw i32 %add55, %add55.1 + %add94.1 = add nsw i32 %add55.3, %add55.2 + %sub102.1 = sub nsw i32 %add55.2, %add55.3 + %add103.1 = add nsw i32 %add94.1, %add78.1 + %sub104.1 = sub nsw i32 %add78.1, %add94.1 + %add105.1 = add nsw i32 %sub102.1, %sub86.1 + %sub106.1 = sub nsw i32 %sub86.1, %sub102.1 + %shr.i.1 = lshr i32 %add103.1, 15 + %and.i.1 = and i32 %shr.i.1, 65537 + %mul.i.1 = mul nuw i32 %and.i.1, 65535 + %add.i.1 = add i32 %mul.i.1, %add103.1 + %xor.i.1 = xor i32 %add.i.1, %mul.i.1 + %shr.i169.1 = lshr i32 %add105.1, 15 + %and.i170.1 = and i32 %shr.i169.1, 65537 + %mul.i171.1 = mul nuw i32 %and.i170.1, 65535 + %add.i172.1 = add i32 %mul.i171.1, %add105.1 + %xor.i173.1 = xor i32 %add.i172.1, %mul.i171.1 + %shr.i174.1 = lshr i32 %sub104.1, 15 + %and.i175.1 = and i32 %shr.i174.1, 65537 + %mul.i176.1 = mul nuw i32 %and.i175.1, 65535 + %add.i177.1 = add i32 %mul.i176.1, %sub104.1 + %xor.i178.1 = xor i32 %add.i177.1, %mul.i176.1 + %shr.i179.1 = lshr i32 %sub106.1, 15 + %and.i180.1 = and i32 %shr.i179.1, 65537 + %mul.i181.1 = mul nuw i32 %and.i180.1, 65535 + %add.i182.1 = add i32 %mul.i181.1, %sub106.1 + %xor.i183.1 = xor i32 %add.i182.1, %mul.i181.1 + %add108.1 = add i32 %xor.i173.1, %add113 + %add110.1 = add i32 %add108.1, %xor.i.1 + %add112.1 = add i32 %add110.1, %xor.i178.1 + %add113.1 = add i32 %add112.1, %xor.i183.1 + %add78.2 = add nsw i32 %sub51.1, %sub51 + %sub86.2 = sub nsw i32 %sub51, %sub51.1 + %add94.2 = add nsw i32 %sub51.3, %sub51.2 + %sub102.2 = sub nsw i32 %sub51.2, %sub51.3 + %add103.2 = add nsw i32 %add94.2, %add78.2 + %sub104.2 = sub nsw i32 %add78.2, %add94.2 + %add105.2 = add nsw i32 %sub102.2, %sub86.2 + %sub106.2 = sub nsw i32 %sub86.2, %sub102.2 + %shr.i.2 = lshr i32 %add103.2, 15 + %and.i.2 = and i32 %shr.i.2, 65537 + %mul.i.2 = mul nuw i32 %and.i.2, 65535 + %add.i.2 = add i32 %mul.i.2, %add103.2 + %xor.i.2 = xor i32 %add.i.2, %mul.i.2 + %shr.i169.2 = lshr i32 %add105.2, 15 + %and.i170.2 = and i32 %shr.i169.2, 65537 + %mul.i171.2 = mul nuw i32 %and.i170.2, 65535 + %add.i172.2 = add i32 %mul.i171.2, %add105.2 + %xor.i173.2 = xor i32 %add.i172.2, %mul.i171.2 + %shr.i174.2 = lshr i32 %sub104.2, 15 + %and.i175.2 = and i32 %shr.i174.2, 65537 + %mul.i176.2 = mul nuw i32 %and.i175.2, 65535 + %add.i177.2 = add i32 %mul.i176.2, %sub104.2 + %xor.i178.2 = xor i32 %add.i177.2, %mul.i176.2 + %shr.i179.2 = lshr i32 %sub106.2, 15 + %and.i180.2 = and i32 %shr.i179.2, 65537 + %mul.i181.2 = mul nuw i32 %and.i180.2, 65535 + %add.i182.2 = add i32 %mul.i181.2, %sub106.2 + %xor.i183.2 = xor i32 %add.i182.2, %mul.i181.2 + %add108.2 = add i32 %xor.i173.2, %add113.1 + %add110.2 = add i32 %add108.2, %xor.i.2 + %add112.2 = add i32 %add110.2, %xor.i178.2 + %add113.2 = add i32 %add112.2, %xor.i183.2 + %add78.3 = add nsw i32 %sub59.1, %sub59 + %sub86.3 = sub nsw i32 %sub59, %sub59.1 + %add94.3 = add nsw i32 %sub59.3, %sub59.2 + %sub102.3 = sub nsw i32 %sub59.2, %sub59.3 + %add103.3 = add nsw i32 %add94.3, %add78.3 + %sub104.3 = sub nsw i32 %add78.3, %add94.3 + %add105.3 = add nsw i32 %sub102.3, %sub86.3 + %sub106.3 = sub nsw i32 %sub86.3, %sub102.3 + %shr.i.3 = lshr i32 %add103.3, 15 + %and.i.3 = and i32 %shr.i.3, 65537 + %mul.i.3 = mul nuw i32 %and.i.3, 65535 + %add.i.3 = add i32 %mul.i.3, %add103.3 + %xor.i.3 = xor i32 %add.i.3, %mul.i.3 + %shr.i169.3 = lshr i32 %add105.3, 15 + %and.i170.3 = and i32 %shr.i169.3, 65537 + %mul.i171.3 = mul nuw i32 %and.i170.3, 65535 + %add.i172.3 = add i32 %mul.i171.3, %add105.3 + %xor.i173.3 = xor i32 %add.i172.3, %mul.i171.3 + %shr.i174.3 = lshr i32 %sub104.3, 15 + %and.i175.3 = and i32 %shr.i174.3, 65537 + %mul.i176.3 = mul nuw i32 %and.i175.3, 65535 + %add.i177.3 = add i32 %mul.i176.3, %sub104.3 + %xor.i178.3 = xor i32 %add.i177.3, %mul.i176.3 + %shr.i179.3 = lshr i32 %sub106.3, 15 + %and.i180.3 = and i32 %shr.i179.3, 65537 + %mul.i181.3 = mul nuw i32 %and.i180.3, 65535 + %add.i182.3 = add i32 %mul.i181.3, %sub106.3 + %xor.i183.3 = xor i32 %add.i182.3, %mul.i181.3 + %add108.3 = add i32 %xor.i173.3, %add113.2 + %add110.3 = add i32 %add108.3, %xor.i.3 + %add112.3 = add i32 %add110.3, %xor.i178.3 + %add113.3 = add i32 %add112.3, %xor.i183.3 + %conv118 = and i32 %add113.3, 65535 + %shr = lshr i32 %add113.3, 16 + %add119 = add nuw nsw i32 %conv118, %shr + %shr120 = lshr i32 %add119, 1 + ret i32 %shr120 +} diff --git a/llvm/test/Transforms/SLPVectorizer/X86/poison-within-divisions.ll b/llvm/test/Transforms/SLPVectorizer/X86/poison-within-divisions.ll new file mode 100644 index 0000000..76ef396 --- /dev/null +++ b/llvm/test/Transforms/SLPVectorizer/X86/poison-within-divisions.ll @@ -0,0 +1,98 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 +; RUN: opt --passes=slp-vectorizer -S -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s + +define i32 @test(i1 %tobool2.not, i64 %conv21) { +; CHECK-LABEL: define i32 @test( +; CHECK-SAME: i1 [[TOBOOL2_NOT:%.*]], i64 [[CONV21:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[WHILE_BODY:.*]] +; CHECK: [[WHILE_BODY]]: +; CHECK-NEXT: [[Q24_659:%.*]] = phi i32 [ [[Q24_655:%.*]], %[[IF_END35:.*]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[L15_1:%.*]] = phi i32 [ [[L15_4:%.*]], %[[IF_END35]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: br i1 [[TOBOOL2_NOT]], label %[[IF_END4:.*]], label %[[Q:.*]] +; CHECK: [[IF_END4]]: +; CHECK-NEXT: [[TMP0:%.*]] = icmp eq i32 [[Q24_659]], 0 +; CHECK-NEXT: br label %[[AB:.*]] +; CHECK: [[AB]]: +; CHECK-NEXT: [[Q24_658:%.*]] = phi i32 [ [[Q24_660:%.*]], %[[IF_END35]] ], [ 0, %[[IF_END4]] ] +; CHECK-NEXT: [[M_1:%.*]] = phi i1 [ false, %[[IF_END35]] ], [ [[TMP0]], %[[IF_END4]] ] +; CHECK-NEXT: [[O_2:%.*]] = phi i32 [ [[O_7:%.*]], %[[IF_END35]] ], [ 0, %[[IF_END4]] ] +; CHECK-NEXT: [[Q24_2:%.*]] = phi i32 [ [[Q24_7:%.*]], %[[IF_END35]] ], [ 0, %[[IF_END4]] ] +; CHECK-NEXT: br i1 [[M_1]], label %[[AE:.*]], label %[[AC:.*]] +; CHECK: [[Q]]: +; CHECK-NEXT: [[TOBOOL16_NOT:%.*]] = icmp ne i32 [[L15_1]], 0 +; CHECK-NEXT: [[SPEC_SELECT2:%.*]] = zext i1 [[TOBOOL16_NOT]] to i32 +; CHECK-NEXT: br label %[[AE]] +; CHECK: [[AE]]: +; CHECK-NEXT: [[Q24_655]] = phi i32 [ [[Q24_658]], %[[AB]] ], [ 0, %[[Q]] ] +; CHECK-NEXT: [[M_3:%.*]] = phi i64 [ 0, %[[AB]] ], [ 1, %[[Q]] ] +; CHECK-NEXT: [[L15_4]] = phi i32 [ poison, %[[AB]] ], [ [[SPEC_SELECT2]], %[[Q]] ] +; CHECK-NEXT: [[O_4:%.*]] = phi i32 [ [[O_2]], %[[AB]] ], [ 0, %[[Q]] ] +; CHECK-NEXT: [[Q24_4:%.*]] = phi i32 [ [[Q24_2]], %[[AB]] ], [ 0, %[[Q]] ] +; CHECK-NEXT: br i1 [[TOBOOL2_NOT]], label %[[IF_END35]], label %[[IF_THEN20:.*]] +; CHECK: [[IF_THEN20]]: +; CHECK-NEXT: [[DIV22:%.*]] = udiv i64 [[M_3]], [[CONV21]] +; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[DIV22]] to i32 +; CHECK-NEXT: [[CONV23:%.*]] = sub i32 0, [[TMP1]] +; CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[M_3]] to i32 +; CHECK-NEXT: [[CONV25:%.*]] = xor i32 [[TMP2]], 1 +; CHECK-NEXT: br label %[[IF_END35]] +; CHECK: [[AC]]: +; CHECK-NEXT: [[SPEC_SELECT:%.*]] = select i1 [[TOBOOL2_NOT]], i32 [[Q24_2]], i32 [[O_2]] +; CHECK-NEXT: ret i32 [[SPEC_SELECT]] +; CHECK: [[IF_END35]]: +; CHECK-NEXT: [[Q24_660]] = phi i32 [ 0, %[[AE]] ], [ [[CONV25]], %[[IF_THEN20]] ] +; CHECK-NEXT: [[O_7]] = phi i32 [ [[O_4]], %[[AE]] ], [ [[CONV23]], %[[IF_THEN20]] ] +; CHECK-NEXT: [[Q24_7]] = phi i32 [ [[Q24_4]], %[[AE]] ], [ [[CONV25]], %[[IF_THEN20]] ] +; CHECK-NEXT: br i1 [[TOBOOL2_NOT]], label %[[WHILE_BODY]], label %[[AB]] +; +entry: + br label %while.body + +while.body: + %q24.659 = phi i32 [ %q24.655, %if.end35 ], [ 0, %entry ] + %l15.1 = phi i32 [ %l15.4, %if.end35 ], [ 0, %entry ] + br i1 %tobool2.not, label %if.end4, label %q + +if.end4: + %0 = icmp eq i32 %q24.659, 0 + br label %ab + +ab: + %q24.658 = phi i32 [ %q24.660, %if.end35 ], [ 0, %if.end4 ] + %m.1 = phi i1 [ false, %if.end35 ], [ %0, %if.end4 ] + %o.2 = phi i32 [ %o.7, %if.end35 ], [ 0, %if.end4 ] + %q24.2 = phi i32 [ %q24.7, %if.end35 ], [ 0, %if.end4 ] + br i1 %m.1, label %ae, label %ac + +q: + %tobool16.not = icmp ne i32 %l15.1, 0 + %spec.select2 = zext i1 %tobool16.not to i32 + br label %ae + +ae: + %q24.655 = phi i32 [ %q24.658, %ab ], [ 0, %q ] + %m.3 = phi i64 [ 0, %ab ], [ 1, %q ] + %l15.4 = phi i32 [ poison, %ab ], [ %spec.select2, %q ] + %o.4 = phi i32 [ %o.2, %ab ], [ 0, %q ] + %q24.4 = phi i32 [ %q24.2, %ab ], [ 0, %q ] + br i1 %tobool2.not, label %if.end35, label %if.then20 + +if.then20: + %div22 = udiv i64 %m.3, %conv21 + %1 = trunc i64 %div22 to i32 + %conv23 = sub i32 0, %1 + %2 = trunc i64 %m.3 to i32 + %conv25 = xor i32 %2, 1 + br label %if.end35 + +ac: + %spec.select = select i1 %tobool2.not, i32 %q24.2, i32 %o.2 + ret i32 %spec.select + +if.end35: + %q24.660 = phi i32 [ 0, %ae ], [ %conv25, %if.then20 ] + %o.7 = phi i32 [ %o.4, %ae ], [ %conv23, %if.then20 ] + %q24.7 = phi i32 [ %q24.4, %ae ], [ %conv25, %if.then20 ] + br i1 %tobool2.not, label %while.body, label %ab +} diff --git a/llvm/test/Transforms/SROA/vector-promotion-cannot-tree-structure-merge.ll b/llvm/test/Transforms/SROA/vector-promotion-cannot-tree-structure-merge.ll index c858d07..ead6e02 100644 --- a/llvm/test/Transforms/SROA/vector-promotion-cannot-tree-structure-merge.ll +++ b/llvm/test/Transforms/SROA/vector-promotion-cannot-tree-structure-merge.ll @@ -219,4 +219,18 @@ entry: } +define <1 x i32> @test_store_value_size_not_multiple_of_allocated_element_type_size(<1 x i16> %a, <1 x i16> %b) { +entry: + %alloca = alloca [2 x i16] + + %ptr0 = getelementptr inbounds [2 x i16], ptr %alloca, i32 0, i32 0 + store <1 x i16> %a, ptr %ptr0 + + %ptr1 = getelementptr inbounds [2 x i16], ptr %alloca, i32 0, i32 1 + store <1 x i16> %b, ptr %ptr1 + + %result = load <1 x i32>, ptr %alloca + ret <1 x i32> %result +} + declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1 immarg) diff --git a/llvm/tools/bugpoint/OptimizerDriver.cpp b/llvm/tools/bugpoint/OptimizerDriver.cpp index 56a0fa4..3daacfd 100644 --- a/llvm/tools/bugpoint/OptimizerDriver.cpp +++ b/llvm/tools/bugpoint/OptimizerDriver.cpp @@ -38,11 +38,6 @@ namespace llvm { extern cl::opt<std::string> OutputPrefix; } -static cl::opt<bool> PreserveBitcodeUseListOrder( - "preserve-bc-uselistorder", - cl::desc("Preserve use-list order when writing LLVM bitcode."), - cl::init(true), cl::Hidden); - static cl::opt<std::string> OptCmd("opt-command", cl::init(""), cl::desc("Path to opt. (default: search path " @@ -51,7 +46,7 @@ static cl::opt<std::string> /// This writes the current "Program" to the named bitcode file. If an error /// occurs, true is returned. static bool writeProgramToFileAux(ToolOutputFile &Out, const Module &M) { - WriteBitcodeToFile(M, Out.os(), PreserveBitcodeUseListOrder); + WriteBitcodeToFile(M, Out.os(), /* ShouldPreserveUseListOrder */ true); Out.os().close(); if (!Out.os().has_error()) { Out.keep(); @@ -68,7 +63,7 @@ bool BugDriver::writeProgramToFile(const std::string &Filename, int FD, bool BugDriver::writeProgramToFile(int FD, const Module &M) const { raw_fd_ostream OS(FD, /*shouldClose*/ false); - WriteBitcodeToFile(M, OS, PreserveBitcodeUseListOrder); + WriteBitcodeToFile(M, OS, /* ShouldPreserveUseListOrder */ true); OS.flush(); if (!OS.has_error()) return false; @@ -155,7 +150,7 @@ bool BugDriver::runPasses(Module &Program, DiscardTemp Discard{*Temp}; raw_fd_ostream OS(Temp->FD, /*shouldClose*/ false); - WriteBitcodeToFile(Program, OS, PreserveBitcodeUseListOrder); + WriteBitcodeToFile(Program, OS, /* ShouldPreserveUseListOrder */ true); OS.flush(); if (OS.has_error()) { errs() << "Error writing bitcode file: " << Temp->TmpName << "\n"; diff --git a/llvm/tools/llvm-as/llvm-as.cpp b/llvm/tools/llvm-as/llvm-as.cpp index 2164867..200e6a5 100644 --- a/llvm/tools/llvm-as/llvm-as.cpp +++ b/llvm/tools/llvm-as/llvm-as.cpp @@ -57,11 +57,6 @@ static cl::opt<bool> cl::desc("Do not run verifier on input LLVM (dangerous!)"), cl::cat(AsCat)); -static cl::opt<bool> PreserveBitcodeUseListOrder( - "preserve-bc-uselistorder", - cl::desc("Preserve use-list order when writing LLVM bitcode."), - cl::init(true), cl::Hidden, cl::cat(AsCat)); - static cl::opt<std::string> ClDataLayout("data-layout", cl::desc("data layout string to use"), cl::value_desc("layout-string"), @@ -100,7 +95,7 @@ static void WriteOutputFile(const Module *M, const ModuleSummaryIndex *Index) { // any non-null Index along with it as a per-module Index. // If both are empty, this will give an empty module block, which is // the expected behavior. - WriteBitcodeToFile(*M, Out->os(), PreserveBitcodeUseListOrder, + WriteBitcodeToFile(*M, Out->os(), /* ShouldPreserveUseListOrder */ true, IndexToWrite, EmitModuleHash); else // Otherwise, with an empty Module but non-empty Index, we write a diff --git a/llvm/tools/llvm-dis/llvm-dis.cpp b/llvm/tools/llvm-dis/llvm-dis.cpp index 2b43d27..35c5409 100644 --- a/llvm/tools/llvm-dis/llvm-dis.cpp +++ b/llvm/tools/llvm-dis/llvm-dis.cpp @@ -80,11 +80,6 @@ static cl::opt<bool> cl::desc("Add informational comments to the .ll file"), cl::cat(DisCategory)); -static cl::opt<bool> PreserveAssemblyUseListOrder( - "preserve-ll-uselistorder", - cl::desc("Preserve use-list order when writing LLVM assembly."), - cl::init(false), cl::Hidden, cl::cat(DisCategory)); - static cl::opt<bool> MaterializeMetadata("materialize-metadata", cl::desc("Load module without materializing metadata, " @@ -255,7 +250,8 @@ int main(int argc, char **argv) { if (!DontPrint) { if (M) { M->removeDebugIntrinsicDeclarations(); - M->print(Out->os(), Annotator.get(), PreserveAssemblyUseListOrder); + M->print(Out->os(), Annotator.get(), + /* ShouldPreserveUseListOrder */ false); } if (Index) Index->print(Out->os()); diff --git a/llvm/tools/llvm-extract/llvm-extract.cpp b/llvm/tools/llvm-extract/llvm-extract.cpp index 69636ca..439a4a4 100644 --- a/llvm/tools/llvm-extract/llvm-extract.cpp +++ b/llvm/tools/llvm-extract/llvm-extract.cpp @@ -129,16 +129,6 @@ static cl::opt<bool> OutputAssembly("S", cl::desc("Write output as LLVM assembly"), cl::Hidden, cl::cat(ExtractCat)); -static cl::opt<bool> PreserveBitcodeUseListOrder( - "preserve-bc-uselistorder", - cl::desc("Preserve use-list order when writing LLVM bitcode."), - cl::init(true), cl::Hidden, cl::cat(ExtractCat)); - -static cl::opt<bool> PreserveAssemblyUseListOrder( - "preserve-ll-uselistorder", - cl::desc("Preserve use-list order when writing LLVM assembly."), - cl::init(false), cl::Hidden, cl::cat(ExtractCat)); - int main(int argc, char **argv) { InitLLVM X(argc, argv); @@ -421,9 +411,11 @@ int main(int argc, char **argv) { } if (OutputAssembly) - PM.addPass(PrintModulePass(Out.os(), "", PreserveAssemblyUseListOrder)); + PM.addPass( + PrintModulePass(Out.os(), "", /* ShouldPreserveUseListOrder */ false)); else if (Force || !CheckBitcodeOutputToConsole(Out.os())) - PM.addPass(BitcodeWriterPass(Out.os(), PreserveBitcodeUseListOrder)); + PM.addPass( + BitcodeWriterPass(Out.os(), /* ShouldPreserveUseListOrder */ true)); PM.run(*M, MAM); diff --git a/llvm/tools/llvm-link/llvm-link.cpp b/llvm/tools/llvm-link/llvm-link.cpp index 22ea54e..93b1fb6 100644 --- a/llvm/tools/llvm-link/llvm-link.cpp +++ b/llvm/tools/llvm-link/llvm-link.cpp @@ -110,16 +110,6 @@ static cl::opt<bool> SuppressWarnings("suppress-warnings", cl::desc("Suppress all linking warnings"), cl::init(false), cl::cat(LinkCategory)); -static cl::opt<bool> PreserveBitcodeUseListOrder( - "preserve-bc-uselistorder", - cl::desc("Preserve use-list order when writing LLVM bitcode."), - cl::init(true), cl::Hidden, cl::cat(LinkCategory)); - -static cl::opt<bool> PreserveAssemblyUseListOrder( - "preserve-ll-uselistorder", - cl::desc("Preserve use-list order when writing LLVM assembly."), - cl::init(false), cl::Hidden, cl::cat(LinkCategory)); - static cl::opt<bool> NoVerify("disable-verify", cl::desc("Do not run the verifier"), cl::Hidden, cl::cat(LinkCategory)); @@ -525,9 +515,10 @@ int main(int argc, char **argv) { errs() << "Writing bitcode...\n"; Composite->removeDebugIntrinsicDeclarations(); if (OutputAssembly) { - Composite->print(Out.os(), nullptr, PreserveAssemblyUseListOrder); + Composite->print(Out.os(), nullptr, /* ShouldPreserveUseListOrder */ false); } else if (Force || !CheckBitcodeOutputToConsole(Out.os())) { - WriteBitcodeToFile(*Composite, Out.os(), PreserveBitcodeUseListOrder); + WriteBitcodeToFile(*Composite, Out.os(), + /* ShouldPreserveUseListOrder */ true); } // Declare success. diff --git a/llvm/tools/opt/optdriver.cpp b/llvm/tools/opt/optdriver.cpp index d4fa6eb..2ac8de7 100644 --- a/llvm/tools/opt/optdriver.cpp +++ b/llvm/tools/opt/optdriver.cpp @@ -232,16 +232,6 @@ static cl::opt<std::string> ClDataLayout("data-layout", cl::value_desc("layout-string"), cl::init("")); -static cl::opt<bool> PreserveBitcodeUseListOrder( - "preserve-bc-uselistorder", - cl::desc("Preserve use-list order when writing LLVM bitcode."), - cl::init(true), cl::Hidden); - -static cl::opt<bool> PreserveAssemblyUseListOrder( - "preserve-ll-uselistorder", - cl::desc("Preserve use-list order when writing LLVM assembly."), - cl::init(false), cl::Hidden); - static cl::opt<bool> RunTwice("run-twice", cl::desc("Run all passes twice, re-using the " "same pass manager (legacy PM only)."), @@ -753,9 +743,9 @@ extern "C" int optMain( return runPassPipeline( argv[0], *M, TM.get(), &TLII, Out.get(), ThinLinkOut.get(), RemarksFile.get(), Pipeline, PluginList, PassBuilderCallbacks, - OK, VK, PreserveAssemblyUseListOrder, - PreserveBitcodeUseListOrder, EmitSummaryIndex, EmitModuleHash, - EnableDebugify, VerifyDebugInfoPreserve, + OK, VK, /* ShouldPreserveAssemblyUseListOrder */ false, + /* ShouldPreserveBitcodeUseListOrder */ true, EmitSummaryIndex, + EmitModuleHash, EnableDebugify, VerifyDebugInfoPreserve, EnableProfileVerification, UnifiedLTO) ? 0 : 1; @@ -877,9 +867,11 @@ extern "C" int optMain( OS = BOS.get(); } if (OutputAssembly) - Passes.add(createPrintModulePass(*OS, "", PreserveAssemblyUseListOrder)); + Passes.add(createPrintModulePass( + *OS, "", /* ShouldPreserveAssemblyUseListOrder */ false)); else - Passes.add(createBitcodeWriterPass(*OS, PreserveBitcodeUseListOrder)); + Passes.add(createBitcodeWriterPass( + *OS, /* ShouldPreserveBitcodeUseListOrder */ true)); } // Before executing passes, print the final values of the LLVM options. diff --git a/llvm/unittests/IR/ConstantFPRangeTest.cpp b/llvm/unittests/IR/ConstantFPRangeTest.cpp index 5bc516d..58a65b9 100644 --- a/llvm/unittests/IR/ConstantFPRangeTest.cpp +++ b/llvm/unittests/IR/ConstantFPRangeTest.cpp @@ -7,6 +7,7 @@ //===----------------------------------------------------------------------===// #include "llvm/IR/ConstantFPRange.h" +#include "llvm/ADT/APFloat.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/Operator.h" #include "gtest/gtest.h" @@ -818,4 +819,110 @@ TEST_F(ConstantFPRangeTest, getWithout) { APFloat::getLargest(Sem, /*Negative=*/true), APFloat(3.0))); } +TEST_F(ConstantFPRangeTest, cast) { + const fltSemantics &F16Sem = APFloat::IEEEhalf(); + const fltSemantics &BF16Sem = APFloat::BFloat(); + const fltSemantics &F32Sem = APFloat::IEEEsingle(); + const fltSemantics &F8NanOnlySem = APFloat::Float8E4M3FN(); + // normal -> normal (exact) + EXPECT_EQ(ConstantFPRange::getNonNaN(APFloat(1.0), APFloat(2.0)).cast(F32Sem), + ConstantFPRange::getNonNaN(APFloat(1.0f), APFloat(2.0f))); + EXPECT_EQ( + ConstantFPRange::getNonNaN(APFloat(-2.0f), APFloat(-1.0f)).cast(Sem), + ConstantFPRange::getNonNaN(APFloat(-2.0), APFloat(-1.0))); + // normal -> normal (inexact) + EXPECT_EQ( + ConstantFPRange::getNonNaN(APFloat(3.141592653589793), + APFloat(6.283185307179586)) + .cast(F32Sem), + ConstantFPRange::getNonNaN(APFloat(3.14159274f), APFloat(6.28318548f))); + // normal -> subnormal + EXPECT_EQ(ConstantFPRange::getNonNaN(APFloat(-5e-8), APFloat(5e-8)) + .cast(F16Sem) + .classify(), + fcSubnormal | fcZero); + // normal -> zero + EXPECT_EQ(ConstantFPRange::getNonNaN( + APFloat::getSmallestNormalized(Sem, /*Negative=*/true), + APFloat::getSmallestNormalized(Sem, /*Negative=*/false)) + .cast(F32Sem) + .classify(), + fcZero); + // normal -> inf + EXPECT_EQ(ConstantFPRange::getNonNaN(APFloat(-65536.0), APFloat(65536.0)) + .cast(F16Sem), + ConstantFPRange::getNonNaN(F16Sem)); + // nan -> qnan + EXPECT_EQ( + ConstantFPRange::getNaNOnly(Sem, /*MayBeQNaN=*/true, /*MayBeSNaN=*/false) + .cast(F32Sem), + ConstantFPRange::getNaNOnly(F32Sem, /*MayBeQNaN=*/true, + /*MayBeSNaN=*/false)); + EXPECT_EQ( + ConstantFPRange::getNaNOnly(Sem, /*MayBeQNaN=*/false, /*MayBeSNaN=*/true) + .cast(F32Sem), + ConstantFPRange::getNaNOnly(F32Sem, /*MayBeQNaN=*/true, + /*MayBeSNaN=*/false)); + EXPECT_EQ( + ConstantFPRange::getNaNOnly(Sem, /*MayBeQNaN=*/true, /*MayBeSNaN=*/true) + .cast(F32Sem), + ConstantFPRange::getNaNOnly(F32Sem, /*MayBeQNaN=*/true, + /*MayBeSNaN=*/false)); + // For BF16 -> F32, signaling bit is still lost. + EXPECT_EQ(ConstantFPRange::getNaNOnly(BF16Sem, /*MayBeQNaN=*/true, + /*MayBeSNaN=*/true) + .cast(F32Sem), + ConstantFPRange::getNaNOnly(F32Sem, /*MayBeQNaN=*/true, + /*MayBeSNaN=*/false)); + // inf -> nan only (return full set for now) + EXPECT_EQ(ConstantFPRange::getNonNaN(APFloat::getInf(Sem, /*Negative=*/true), + APFloat::getInf(Sem, /*Negative=*/false)) + .cast(F8NanOnlySem), + ConstantFPRange::getFull(F8NanOnlySem)); + // other rounding modes + EXPECT_EQ( + ConstantFPRange::getNonNaN(APFloat::getSmallest(Sem, /*Negative=*/true), + APFloat::getSmallest(Sem, /*Negative=*/false)) + .cast(F32Sem, APFloat::rmTowardNegative), + ConstantFPRange::getNonNaN( + APFloat::getSmallest(F32Sem, /*Negative=*/true), + APFloat::getZero(F32Sem, /*Negative=*/false))); + EXPECT_EQ( + ConstantFPRange::getNonNaN(APFloat::getSmallest(Sem, /*Negative=*/true), + APFloat::getSmallest(Sem, /*Negative=*/false)) + .cast(F32Sem, APFloat::rmTowardPositive), + ConstantFPRange::getNonNaN( + APFloat::getZero(F32Sem, /*Negative=*/true), + APFloat::getSmallest(F32Sem, /*Negative=*/false))); + EXPECT_EQ( + ConstantFPRange::getNonNaN( + APFloat::getSmallestNormalized(Sem, /*Negative=*/true), + APFloat::getSmallestNormalized(Sem, /*Negative=*/false)) + .cast(F32Sem, APFloat::rmTowardZero), + ConstantFPRange::getNonNaN(APFloat::getZero(F32Sem, /*Negative=*/true), + APFloat::getZero(F32Sem, /*Negative=*/false))); + + EnumerateValuesInConstantFPRange( + ConstantFPRange::getFull(APFloat::Float8E4M3()), + [&](const APFloat &V) { + bool LosesInfo = false; + + APFloat DoubleV = V; + DoubleV.convert(Sem, APFloat::rmNearestTiesToEven, &LosesInfo); + ConstantFPRange DoubleCR = ConstantFPRange(V).cast(Sem); + EXPECT_TRUE(DoubleCR.contains(DoubleV)) + << "Casting " << V << " to double failed. " << DoubleCR + << " doesn't contain " << DoubleV; + + auto &FP4Sem = APFloat::Float4E2M1FN(); + APFloat FP4V = V; + FP4V.convert(FP4Sem, APFloat::rmNearestTiesToEven, &LosesInfo); + ConstantFPRange FP4CR = ConstantFPRange(V).cast(FP4Sem); + EXPECT_TRUE(FP4CR.contains(FP4V)) + << "Casting " << V << " to FP4E2M1FN failed. " << FP4CR + << " doesn't contain " << FP4V; + }, + /*IgnoreNaNPayload=*/true); +} + } // anonymous namespace diff --git a/llvm/unittests/Support/raw_ostream_test.cpp b/llvm/unittests/Support/raw_ostream_test.cpp index fbeff37..8f9ed41 100644 --- a/llvm/unittests/Support/raw_ostream_test.cpp +++ b/llvm/unittests/Support/raw_ostream_test.cpp @@ -626,6 +626,11 @@ TEST(raw_ostreamTest, writeToDevNull) { EXPECT_TRUE(DevNullIsUsed); } +TEST(raw_ostreamTest, nullStreamZeroBufferSize) { + raw_ostream &NullStream = nulls(); + EXPECT_EQ(NullStream.GetBufferSize(), 0u); +} + TEST(raw_ostreamTest, writeToStdOut) { outs().flush(); testing::internal::CaptureStdout(); diff --git a/llvm/utils/profcheck-xfail.txt b/llvm/utils/profcheck-xfail.txt index f101624..092d63d 100644 --- a/llvm/utils/profcheck-xfail.txt +++ b/llvm/utils/profcheck-xfail.txt @@ -11,7 +11,6 @@ CodeGen/AArch64/llvm-masked-scatter-legal-for-sve.ll CodeGen/AArch64/selectopt-cast.ll CodeGen/AArch64/selectopt.ll CodeGen/AMDGPU/amdgpu-attributor-min-agpr-alloc.ll -CodeGen/AMDGPU/amdgpu-attributor-no-agpr.ll CodeGen/AMDGPU/amdgpu-codegenprepare-fdiv.ll CodeGen/AMDGPU/amdgpu-codegenprepare-sqrt.ll CodeGen/AMDGPU/amdgpu-sw-lower-lds-dynamic-indirect-access-asan.ll @@ -74,7 +73,6 @@ CodeGen/Hexagon/loop-idiom/hexagon-memmove2.ll CodeGen/Hexagon/loop-idiom/memmove-rt-check.ll CodeGen/NVPTX/lower-ctor-dtor.ll CodeGen/RISCV/zmmul.ll -CodeGen/SPIRV/hlsl-resources/UniqueImplicitBindingNumber.ll CodeGen/WebAssembly/memory-interleave.ll CodeGen/X86/masked_gather_scatter.ll CodeGen/X86/nocfivalue.ll @@ -85,7 +83,6 @@ DebugInfo/KeyInstructions/Generic/loop-unswitch.ll DebugInfo/X86/asan_debug_info.ll Instrumentation/AddressSanitizer/aarch64be.ll Instrumentation/AddressSanitizer/adaptive_global_redzones.ll -Instrumentation/AddressSanitizer/alloca-offset-lifetime.ll Instrumentation/AddressSanitizer/AMDGPU/adaptive_constant_global_redzones.ll Instrumentation/AddressSanitizer/AMDGPU/adaptive_global_redzones.ll Instrumentation/AddressSanitizer/AMDGPU/asan_do_not_instrument_lds.ll @@ -549,12 +546,6 @@ tools/UpdateTestChecks/update_test_checks/stable_ir_values_funcs.test tools/UpdateTestChecks/update_test_checks/stable_ir_values.test tools/UpdateTestChecks/update_test_checks/tbaa-semantics-checks.test tools/UpdateTestChecks/update_test_checks/various_ir_values_dbgrecords.test -Transforms/AggressiveInstCombine/lower-table-based-cttz-basics.ll -Transforms/AggressiveInstCombine/lower-table-based-cttz-dereferencing-pointer.ll -Transforms/AggressiveInstCombine/lower-table-based-cttz-non-argument-value.ll -Transforms/AggressiveInstCombine/lower-table-based-cttz-zero-element.ll -Transforms/AggressiveInstCombine/trunc_select_cmp.ll -Transforms/AggressiveInstCombine/trunc_select.ll Transforms/AtomicExpand/AArch64/atomicrmw-fp.ll Transforms/AtomicExpand/AArch64/expand-atomicrmw-xchg-fp.ll Transforms/AtomicExpand/AArch64/pcsections.ll @@ -819,7 +810,6 @@ Transforms/InstCombine/AMDGPU/addrspacecast.ll Transforms/InstCombine/and2.ll Transforms/InstCombine/and-fcmp.ll Transforms/InstCombine/and.ll -Transforms/InstCombine/and-or-icmp-nullptr.ll Transforms/InstCombine/and-or-icmps.ll Transforms/InstCombine/and-or-implied-cond-not.ll Transforms/InstCombine/apint-div1.ll @@ -1105,7 +1095,6 @@ Transforms/LoopSimplifyCFG/invalidate-scev-dispositions.ll Transforms/LoopSimplifyCFG/lcssa.ll Transforms/LoopSimplifyCFG/live_block_marking.ll Transforms/LoopSimplifyCFG/mssa_update.ll -Transforms/LoopSimplifyCFG/pr117537.ll Transforms/LoopSimplifyCFG/update_parents.ll Transforms/LoopUnroll/peel-last-iteration-expansion-cost.ll Transforms/LoopUnroll/peel-last-iteration-with-guards.ll @@ -1260,7 +1249,6 @@ Transforms/PhaseOrdering/AArch64/hoisting-sinking-required-for-vectorization.ll Transforms/PhaseOrdering/AArch64/predicated-reduction.ll Transforms/PhaseOrdering/AArch64/quant_4x4.ll Transforms/PhaseOrdering/ARM/arm_mean_q7.ll -Transforms/PhaseOrdering/lower-table-based-cttz.ll Transforms/PhaseOrdering/vector-select.ll Transforms/PhaseOrdering/X86/blendv-select.ll Transforms/PhaseOrdering/X86/merge-functions2.ll |