aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--bolt/include/bolt/Core/BinaryFunction.h16
-rw-r--r--bolt/lib/Core/BinaryContext.cpp96
-rw-r--r--bolt/lib/Core/BinaryFunction.cpp7
-rw-r--r--bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp6
-rw-r--r--bolt/test/AArch64/Inputs/plt-gnu-ld.yaml8
-rw-r--r--bolt/test/AArch64/invalid-code-padding.s34
-rw-r--r--bolt/test/AArch64/plt-got.test5
-rw-r--r--clang/include/clang/Basic/BuiltinsAMDGPU.def9
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExpr.cpp10
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp19
-rw-r--r--clang/lib/Format/TokenAnnotator.cpp10
-rw-r--r--clang/test/CIR/CodeGen/call-via-class-member-funcptr.cpp86
-rw-r--r--clang/test/CIR/CodeGen/struct-init.cpp15
-rw-r--r--clang/test/CodeGenOpenCL/amdgpu-cluster-dims.cl4
-rw-r--r--clang/test/CodeGenOpenCL/amdgpu-features.cl4
-rw-r--r--clang/test/CodeGenOpenCL/builtins-amdgcn-gfx1250.cl109
-rw-r--r--clang/test/SemaOpenCL/builtins-amdgcn-error-gfx1250-param.cl18
-rw-r--r--clang/test/SemaOpenCL/builtins-amdgcn-error-gfx1250.cl14
-rw-r--r--clang/unittests/Format/TokenAnnotatorTest.cpp5
-rw-r--r--flang/lib/Optimizer/Builder/IntrinsicCall.cpp18
-rw-r--r--flang/test/Lower/CUDA/cuda-device-proc.cuf9
-rw-r--r--libcxx/include/__compare/strong_order.h42
-rw-r--r--llvm/docs/MLGO.rst144
-rw-r--r--llvm/include/llvm/ADT/IndexedMap.h13
-rw-r--r--llvm/include/llvm/ADT/identity.h31
-rw-r--r--llvm/include/llvm/IR/IntrinsicsAMDGPU.td14
-rw-r--r--llvm/include/llvm/Support/GlobPattern.h21
-rw-r--r--llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp12
-rw-r--r--llvm/lib/CodeGen/InlineSpiller.cpp3
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp23
-rw-r--r--llvm/lib/Frontend/HLSL/CBuffer.cpp9
-rw-r--r--llvm/lib/Support/GlobPattern.cpp67
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPU.td18
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp8
-rw-r--r--llvm/lib/Target/AMDGPU/GCNSubtarget.h6
-rw-r--r--llvm/lib/Target/AMDGPU/VOP3Instructions.td8
-rw-r--r--llvm/lib/Target/AMDGPU/VOP3PInstructions.td13
-rw-r--r--llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp3
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfoZvfbf.td34
-rw-r--r--llvm/lib/TargetParser/TargetParser.cpp2
-rw-r--r--llvm/test/Analysis/BranchProbabilityInfo/pr22718.ll17
-rw-r--r--llvm/test/Analysis/CostModel/SystemZ/intrinsic-cost-crash.ll6
-rw-r--r--llvm/test/Analysis/Delinearization/constant_functions_multi_dim.ll15
-rw-r--r--llvm/test/Analysis/DependenceAnalysis/MIVCheckConst.ll4
-rw-r--r--llvm/test/Analysis/DependenceAnalysis/NonCanonicalizedSubscript.ll4
-rw-r--r--llvm/test/Analysis/MemoryDependenceAnalysis/invariant.group-bug.ll13
-rw-r--r--llvm/test/Analysis/MemorySSA/pr28880.ll5
-rw-r--r--llvm/test/Analysis/MemorySSA/pr39197.ll34
-rw-r--r--llvm/test/Analysis/MemorySSA/pr40038.ll11
-rw-r--r--llvm/test/Analysis/MemorySSA/pr43569.ll8
-rw-r--r--llvm/test/Analysis/ScalarEvolution/pr22674.ll4
-rw-r--r--llvm/test/Analysis/ScalarEvolution/scev-canonical-mode.ll4
-rw-r--r--llvm/test/Analysis/TypeBasedAliasAnalysis/PR17620.ll7
-rw-r--r--llvm/test/Analysis/TypeBasedAliasAnalysis/tbaa-path.ll26
-rw-r--r--llvm/test/CodeGen/AArch64/pr164181.ll640
-rw-r--r--llvm/test/CodeGen/AMDGPU/combine_andor_with_cmps.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/fdiv.f64.ll7
-rw-r--r--llvm/test/CodeGen/AMDGPU/fmad-formation-fmul-distribute-denormal-mode.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/fmed3.bf16.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/fmed3.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/fneg-combines.legal.f16.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/fneg-combines.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/frem.ll11
-rw-r--r--llvm/test/CodeGen/AMDGPU/fsqrt.f64.ll3
-rw-r--r--llvm/test/CodeGen/AMDGPU/fsqrt.r600.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/inline-attr.ll16
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.add.min.max.ll191
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.exp2.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.log2.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/minmax.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/stackguard.ll14
-rw-r--r--llvm/test/CodeGen/DirectX/CBufferAccess/unused.ll13
-rw-r--r--llvm/test/CodeGen/LoongArch/lasx/shuffle-as-permute-and-shuffle.ll12
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfadd-sdnode.ll815
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll2336
-rw-r--r--llvm/test/Transforms/ADCE/2016-09-06.ll4
-rw-r--r--llvm/test/Transforms/ADCE/blocks-with-dead-term-nondeterministic.ll4
-rw-r--r--llvm/test/Transforms/AddDiscriminators/basic.ll4
-rw-r--r--llvm/test/Transforms/AddDiscriminators/call-nested.ll9
-rw-r--r--llvm/test/Transforms/AddDiscriminators/call.ll7
-rw-r--r--llvm/test/Transforms/AddDiscriminators/diamond.ll10
-rw-r--r--llvm/test/Transforms/AddDiscriminators/first-only.ll4
-rw-r--r--llvm/test/Transforms/AddDiscriminators/invoke.ll30
-rw-r--r--llvm/test/Transforms/AddDiscriminators/multiple.ll4
-rw-r--r--llvm/test/Transforms/AddDiscriminators/no-discriminators.ll7
-rw-r--r--llvm/test/Transforms/AddDiscriminators/oneline.ll7
-rw-r--r--llvm/test/Transforms/Attributor/reduced/register_benchmark_test.ll30
-rw-r--r--llvm/test/Transforms/CodeGenPrepare/ARM/bitreverse-recognize.ll4
-rw-r--r--llvm/test/Transforms/CodeGenPrepare/X86/bitreverse-hang.ll4
-rw-r--r--llvm/test/Transforms/CodeGenPrepare/dom-tree.ll4
-rw-r--r--llvm/test/Transforms/ConstantHoisting/X86/ehpad.ll7
-rw-r--r--llvm/test/Transforms/Coroutines/coro-debug.ll37
-rw-r--r--llvm/test/Transforms/Coroutines/coro-split-dbg.ll49
-rw-r--r--llvm/test/Transforms/Util/dbg-user-of-aext.ll7
-rw-r--r--llvm/test/Transforms/Util/libcalls-fast-math-inf-loop.ll14
-rw-r--r--llvm/test/tools/llvm-ir2vec/embeddings-flowaware.ll2
-rw-r--r--llvm/test/tools/llvm-ir2vec/embeddings-symbolic.ll2
-rw-r--r--llvm/test/tools/llvm-ir2vec/embeddings-symbolic.mir2
-rw-r--r--llvm/test/tools/llvm-ir2vec/error-handling.ll2
-rw-r--r--llvm/test/tools/llvm-ir2vec/error-handling.mir8
-rw-r--r--llvm/tools/llvm-ir2vec/llvm-ir2vec.cpp47
-rw-r--r--llvm/unittests/Support/GlobPatternTest.cpp66
-rw-r--r--mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp7
-rw-r--r--mlir/lib/Dialect/MemRef/Transforms/ExpandStridedMetadata.cpp87
-rw-r--r--mlir/test/Dialect/MemRef/canonicalize.mlir126
-rw-r--r--mlir/test/Dialect/MemRef/expand-strided-metadata.mlir127
-rw-r--r--utils/bazel/llvm-project-overlay/llvm/BUILD.bazel7
-rw-r--r--utils/bazel/llvm-project-overlay/mlir/BUILD.bazel55
108 files changed, 4665 insertions, 1330 deletions
diff --git a/bolt/include/bolt/Core/BinaryFunction.h b/bolt/include/bolt/Core/BinaryFunction.h
index 7b10b2d..118d579 100644
--- a/bolt/include/bolt/Core/BinaryFunction.h
+++ b/bolt/include/bolt/Core/BinaryFunction.h
@@ -2142,8 +2142,9 @@ public:
}
/// Detects whether \p Address is inside a data region in this function
- /// (constant islands).
- bool isInConstantIsland(uint64_t Address) const {
+ /// (constant islands), and optionally return the island size starting
+ /// from the given \p Address.
+ bool isInConstantIsland(uint64_t Address, uint64_t *Size = nullptr) const {
if (!Islands)
return false;
@@ -2161,10 +2162,15 @@ public:
DataIter = std::prev(DataIter);
auto CodeIter = Islands->CodeOffsets.upper_bound(Offset);
- if (CodeIter == Islands->CodeOffsets.begin())
+ if (CodeIter == Islands->CodeOffsets.begin() ||
+ *std::prev(CodeIter) <= *DataIter) {
+ if (Size)
+ *Size = (CodeIter == Islands->CodeOffsets.end() ? getMaxSize()
+ : *CodeIter) -
+ Offset;
return true;
-
- return *std::prev(CodeIter) <= *DataIter;
+ }
+ return false;
}
uint16_t getConstantIslandAlignment() const;
diff --git a/bolt/lib/Core/BinaryContext.cpp b/bolt/lib/Core/BinaryContext.cpp
index 7dded4c..c33540a 100644
--- a/bolt/lib/Core/BinaryContext.cpp
+++ b/bolt/lib/Core/BinaryContext.cpp
@@ -77,6 +77,11 @@ cl::opt<std::string> CompDirOverride(
"location, which is used with DW_AT_dwo_name to construct a path "
"to *.dwo files."),
cl::Hidden, cl::init(""), cl::cat(BoltCategory));
+
+static cl::opt<bool>
+ FailOnInvalidPadding("fail-on-invalid-padding", cl::Hidden, cl::init(false),
+ cl::desc("treat invalid code padding as error"),
+ cl::ZeroOrMore, cl::cat(BoltCategory));
} // namespace opts
namespace llvm {
@@ -942,8 +947,7 @@ std::string BinaryContext::generateJumpTableName(const BinaryFunction &BF,
}
bool BinaryContext::hasValidCodePadding(const BinaryFunction &BF) {
- // FIXME: aarch64 support is missing.
- if (!isX86())
+ if (!isX86() && !isAArch64())
return true;
if (BF.getSize() == BF.getMaxSize())
@@ -973,14 +977,26 @@ bool BinaryContext::hasValidCodePadding(const BinaryFunction &BF) {
return Offset - StartOffset;
};
- // Skip a sequence of zero bytes.
+ // Skip a sequence of zero bytes. For AArch64 we only skip 4 bytes of zeros
+ // in case the following zeros belong to constant island or veneer.
auto skipZeros = [&]() {
const uint64_t StartOffset = Offset;
- for (; Offset < BF.getMaxSize(); ++Offset)
- if ((*FunctionData)[Offset] != 0)
+ uint64_t CurrentOffset = Offset;
+ for (; CurrentOffset < BF.getMaxSize() &&
+ (!isAArch64() || CurrentOffset < StartOffset + 4);
+ ++CurrentOffset)
+ if ((*FunctionData)[CurrentOffset] != 0)
break;
- return Offset - StartOffset;
+ uint64_t NumZeros = CurrentOffset - StartOffset;
+ if (isAArch64())
+ NumZeros &= ~((uint64_t)0x3);
+
+ if (NumZeros == 0)
+ return false;
+ Offset += NumZeros;
+ InstrAddress += NumZeros;
+ return true;
};
// Accept the whole padding area filled with breakpoints.
@@ -993,6 +1009,8 @@ bool BinaryContext::hasValidCodePadding(const BinaryFunction &BF) {
// Some functions have a jump to the next function or to the padding area
// inserted after the body.
auto isSkipJump = [&](const MCInst &Instr) {
+ if (!isX86())
+ return false;
uint64_t TargetAddress = 0;
if (MIB->isUnconditionalBranch(Instr) &&
MIB->evaluateBranch(Instr, InstrAddress, InstrSize, TargetAddress)) {
@@ -1004,34 +1022,73 @@ bool BinaryContext::hasValidCodePadding(const BinaryFunction &BF) {
return false;
};
+ // For veneers that are not already covered by binary functions, only those
+ // that handleAArch64Veneer() can recognize are checked here.
+ auto skipAArch64Veneer = [&]() {
+ if (!isAArch64() || Offset >= BF.getMaxSize())
+ return false;
+ BinaryFunction *BFVeneer = getBinaryFunctionContainingAddress(InstrAddress);
+ if (BFVeneer) {
+ // A binary function may have been created to point to this veneer.
+ Offset += BFVeneer->getSize();
+ assert(Offset <= BF.getMaxSize() &&
+ "AArch64 veneeer goes past the max size of function");
+ InstrAddress += BFVeneer->getSize();
+ return true;
+ }
+ const uint64_t AArch64VeneerSize = 12;
+ if (Offset + AArch64VeneerSize <= BF.getMaxSize() &&
+ handleAArch64Veneer(InstrAddress, /*MatchOnly*/ true)) {
+ Offset += AArch64VeneerSize;
+ InstrAddress += AArch64VeneerSize;
+ this->errs() << "BOLT-WARNING: found unmarked AArch64 veneer at 0x"
+ << Twine::utohexstr(BF.getAddress() + Offset) << '\n';
+ return true;
+ }
+ return false;
+ };
+
+ auto skipAArch64ConstantIsland = [&]() {
+ if (!isAArch64() || Offset >= BF.getMaxSize())
+ return false;
+ uint64_t Size;
+ if (BF.isInConstantIsland(InstrAddress, &Size)) {
+ Offset += Size;
+ InstrAddress += Size;
+ return true;
+ }
+ return false;
+ };
+
// Skip over nops, jumps, and zero padding. Allow interleaving (this happens).
- while (skipInstructions(isNoop) || skipInstructions(isSkipJump) ||
+ // For AArch64 also check veneers and skip constant islands.
+ while (skipAArch64Veneer() || skipAArch64ConstantIsland() ||
+ skipInstructions(isNoop) || skipInstructions(isSkipJump) ||
skipZeros())
;
if (Offset == BF.getMaxSize())
return true;
- if (opts::Verbosity >= 1) {
- this->errs() << "BOLT-WARNING: bad padding at address 0x"
- << Twine::utohexstr(BF.getAddress() + BF.getSize())
- << " starting at offset " << (Offset - BF.getSize())
- << " in function " << BF << '\n'
- << FunctionData->slice(BF.getSize(),
- BF.getMaxSize() - BF.getSize())
- << '\n';
- }
-
+ this->errs() << "BOLT-WARNING: bad padding at address 0x"
+ << Twine::utohexstr(BF.getAddress() + BF.getSize())
+ << " starting at offset " << (Offset - BF.getSize())
+ << " in function " << BF << '\n'
+ << FunctionData->slice(BF.getSize(),
+ BF.getMaxSize() - BF.getSize())
+ << '\n';
return false;
}
void BinaryContext::adjustCodePadding() {
+ uint64_t NumInvalid = 0;
for (auto &BFI : BinaryFunctions) {
BinaryFunction &BF = BFI.second;
if (!shouldEmit(BF))
continue;
if (!hasValidCodePadding(BF)) {
+ NumInvalid++;
if (HasRelocations) {
this->errs() << "BOLT-WARNING: function " << BF
<< " has invalid padding. Ignoring the function\n";
@@ -1041,6 +1098,11 @@ void BinaryContext::adjustCodePadding() {
}
}
}
+ if (NumInvalid && opts::FailOnInvalidPadding) {
+ this->errs() << "BOLT-ERROR: found " << NumInvalid
+ << " instance(s) of invalid code padding\n";
+ exit(1);
+ }
}
MCSymbol *BinaryContext::registerNameAtAddress(StringRef Name, uint64_t Address,
diff --git a/bolt/lib/Core/BinaryFunction.cpp b/bolt/lib/Core/BinaryFunction.cpp
index 4dfd4ba..776f4ae 100644
--- a/bolt/lib/Core/BinaryFunction.cpp
+++ b/bolt/lib/Core/BinaryFunction.cpp
@@ -1427,10 +1427,9 @@ Error BinaryFunction::disassemble() {
!(BC.isAArch64() &&
BC.handleAArch64Veneer(TargetAddress, /*MatchOnly*/ true))) {
// Result of __builtin_unreachable().
- LLVM_DEBUG(dbgs() << "BOLT-DEBUG: jump past end detected at 0x"
- << Twine::utohexstr(AbsoluteInstrAddr)
- << " in function " << *this
- << " : replacing with nop.\n");
+ errs() << "BOLT-WARNING: jump past end detected at 0x"
+ << Twine::utohexstr(AbsoluteInstrAddr) << " in function "
+ << *this << " : replacing with nop.\n";
BC.MIB->createNoop(Instruction);
if (IsCondBranch) {
// Register branch offset for profile validation.
diff --git a/bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp b/bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp
index 6954cb2..7769162 100644
--- a/bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp
+++ b/bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp
@@ -157,6 +157,10 @@ public:
MCPhysReg getStackPointer() const override { return AArch64::SP; }
MCPhysReg getFramePointer() const override { return AArch64::FP; }
+ bool isBreakpoint(const MCInst &Inst) const override {
+ return Inst.getOpcode() == AArch64::BRK;
+ }
+
bool isPush(const MCInst &Inst) const override {
return isStoreToStack(Inst);
};
@@ -2153,7 +2157,7 @@ public:
--I;
Address -= 4;
- if (I->getOpcode() != AArch64::ADRP ||
+ if (I != Begin || I->getOpcode() != AArch64::ADRP ||
MCPlus::getNumPrimeOperands(*I) < 2 || !I->getOperand(0).isReg() ||
!I->getOperand(1).isImm() || I->getOperand(0).getReg() != AArch64::X16)
return 0;
diff --git a/bolt/test/AArch64/Inputs/plt-gnu-ld.yaml b/bolt/test/AArch64/Inputs/plt-gnu-ld.yaml
index 1e3353a..f6dc60d 100644
--- a/bolt/test/AArch64/Inputs/plt-gnu-ld.yaml
+++ b/bolt/test/AArch64/Inputs/plt-gnu-ld.yaml
@@ -93,7 +93,7 @@ Sections:
Flags: [ SHF_ALLOC, SHF_EXECINSTR ]
Address: 0x400510
AddressAlign: 0x8
- Content: 1D0080D21E0080D2E50300AAE10340F9E2230091E603009100000090002015910300009063201B910400009084201D91E0FFFF97EBFFFF972F0000148000009000F047F9400000B4E2FFFF17C0035FD6800000B000000191810000B0210001913F0000EBC00000540100009021B443F9610000B4F00301AA00021FD6C0035FD6800000B000000191810000B021000191210000CB22FC7FD3410C818BFF0781EB21FC4193C00000540200009042B843F9620000B4F00302AA00021FD6C0035FD6FD7BBEA9FD030091F30B00F9930000B06002413980000035DEFFFF972000805260020139F30B40F9FD7BC2A8C0035FD6E4FFFF17FF8300D1FD7B01A9FD430091BFC31FB8E1230091E8DD9752A8D5BB72E80300B9E80B00B9E0130091FF0700B9880000B00900009029C11291092500F9082540F9820080D200013FD6E90340B9E80740B90801096BA00000540100001428008052A8C31FB814000014880000B00900009029411391092900F9082940F9E0230091E1031F2A820080D200013FD6E80B40B9A80000340100001428008052A8C31FB8050000140000009000E01D9194FFFF9701000014A0C35FB8FD7B41A9FF830091C0035FD6FD7BBCA9FD030091F35301A99400009094023891F55B02A995000090B5E23791940215CBF603002AF76303A9F70301AAF80302AA5DFFFF97FF0F94EB6001005494FE4393130080D2A37A73F8E20318AA73060091E10317AAE003162A60003FD69F0213EB21FFFF54F35341A9F55B42A9F76343A9FD7BC4A8C0035FD61F2003D5C0035FD6
+ Content: 1D0080D21E0080D2E50300AAE10340F9E2230091E603009100000090002015910300009063201B910400009084201D91E0FFFF97EBFFFF972F0000148000009000F047F9400000B4E2FFFF17C0035FD6800000B000000191810000B0210001913F0000EBC00000540100009021B443F9610000B4F00301AA00021FD6C0035FD6800000B000000191810000B021000191210000CB22FC7FD3410C818BFF0781EB21FC4193C00000540200009042B843F9620000B4F00302AA00021FD6C0035FD6FD7BBEA9FD030091F30B00F9930000B06002413980000035DEFFFF972000805260020139F30B40F9FD7BC2A8C0035FD6E4FFFF17FF8300D1FD7B01A9FD430091BFC31FB8E1230091E8DD9752A8D5BB72E80300B9E80B00B9E0130091FF0700B9880000B00900009029C11291092500F9082540F9820080D200013FD6E90340B9E80740B90801096BA00000540100001428008052A8C31FB814000014880000B00900009029411391092900F9082940F9E0230091E1031F2A820080D200013FD6E80B40B9A80000340100001428008052A8C31FB8050000140000009000E01D9194FFFF9701000014A0C35FB8FD7B41A9FF830091C0035FD6FD7BBCA9FD030091F35301A99400009094023891F55B02A995000090B5E23791940215CBF603002AF76303A9F70301AAF80302AA60003FD6FF0F94EB6001005494FE4393130080D2A37A73F8E20318AA73060091E10317AAE003162A60003FD69F0213EB21FFFF54F35341A9F55B42A9F76343A9FD7BC4A8C0035FD61F2003D5C0035FD6
- Name: .rodata
Type: SHT_PROGBITS
Flags: [ SHF_ALLOC ]
@@ -435,6 +435,12 @@ Symbols:
Binding: STB_GLOBAL
Value: 0x400604
Size: 0xC4
+ - Name: post_main
+ Type: STT_FUNC
+ Section: .text
+ Binding: STB_GLOBAL
+ Value: 0x4006C8
+ Size: 0x7C
- Name: 'printf@@GLIBC_2.17'
Type: STT_FUNC
Binding: STB_GLOBAL
diff --git a/bolt/test/AArch64/invalid-code-padding.s b/bolt/test/AArch64/invalid-code-padding.s
new file mode 100644
index 0000000..4706e60
--- /dev/null
+++ b/bolt/test/AArch64/invalid-code-padding.s
@@ -0,0 +1,34 @@
+# RUN: %clang %cflags %s -o %t.so -Wl,-q
+# RUN: not llvm-bolt %t.so -o %t.bolt --fail-on-invalid-padding \
+# RUN: 2>&1 | FileCheck %s
+# CHECK: BOLT-ERROR: found 1 instance(s) of invalid code padding
+
+ .text
+ .align 2
+ .global foo
+ .type foo, %function
+foo:
+ cmp x0, x1
+ b.eq .Ltmp1
+ adrp x1, jmptbl
+ add x1, x1, :lo12:jmptbl
+ ldrsw x2, [x1, x2, lsl #2]
+ br x2
+ b .Ltmp1
+.Ltmp2:
+ add x0, x0, x1
+ ret
+ .size foo, .-foo
+
+.Ltmp1:
+ add x0, x0, x1
+ b .Ltmp2
+
+ # Dummy relocation to force relocation mode
+ .reloc 0, R_AARCH64_NONE
+
+ .section .rodata, "a"
+ .align 2
+ .global jmptbl
+jmptbl:
+ .word .text+0x28 - .
diff --git a/bolt/test/AArch64/plt-got.test b/bolt/test/AArch64/plt-got.test
index be1c095..18b1ea4 100644
--- a/bolt/test/AArch64/plt-got.test
+++ b/bolt/test/AArch64/plt-got.test
@@ -1,7 +1,8 @@
// This test checks .plt.got handling by BOLT
RUN: yaml2obj %p/Inputs/plt-got.yaml &> %t.exe
-RUN: llvm-bolt %t.exe -o %t.bolt --print-disasm --print-only=_start/1 | \
-RUN: FileCheck %s
+RUN: llvm-bolt %t.exe -o %t.bolt --print-disasm --print-only=_start/1 \
+RUN: 2>&1 | FileCheck %s
CHECK: bl abort@PLT
+CHECK: BOLT-WARNING: found unmarked AArch64 veneer
diff --git a/clang/include/clang/Basic/BuiltinsAMDGPU.def b/clang/include/clang/Basic/BuiltinsAMDGPU.def
index 8428fa9..01d121b 100644
--- a/clang/include/clang/Basic/BuiltinsAMDGPU.def
+++ b/clang/include/clang/Basic/BuiltinsAMDGPU.def
@@ -830,6 +830,15 @@ TARGET_BUILTIN(__builtin_amdgcn_perm_pk16_b4_u4, "V2UiUiUiV2Ui", "nc", "tensor-c
TARGET_BUILTIN(__builtin_amdgcn_perm_pk16_b6_u4, "V3UiUiULiV2Ui", "nc", "tensor-cvt-lut-insts")
TARGET_BUILTIN(__builtin_amdgcn_perm_pk16_b8_u4, "V4UiULiULiV2Ui", "nc", "tensor-cvt-lut-insts")
+TARGET_BUILTIN(__builtin_amdgcn_add_max_i32, "iiiiIb", "nc", "add-min-max-insts")
+TARGET_BUILTIN(__builtin_amdgcn_add_max_u32, "UiUiUiUiIb", "nc", "add-min-max-insts")
+TARGET_BUILTIN(__builtin_amdgcn_add_min_i32, "iiiiIb", "nc", "add-min-max-insts")
+TARGET_BUILTIN(__builtin_amdgcn_add_min_u32, "UiUiUiUiIb", "nc", "add-min-max-insts")
+TARGET_BUILTIN(__builtin_amdgcn_pk_add_max_i16, "V2sV2sV2sV2sIb", "nc", "pk-add-min-max-insts")
+TARGET_BUILTIN(__builtin_amdgcn_pk_add_max_u16, "V2UsV2UsV2UsV2UsIb", "nc", "pk-add-min-max-insts")
+TARGET_BUILTIN(__builtin_amdgcn_pk_add_min_i16, "V2sV2sV2sV2sIb", "nc", "pk-add-min-max-insts")
+TARGET_BUILTIN(__builtin_amdgcn_pk_add_min_u16, "V2UsV2UsV2UsV2UsIb", "nc", "pk-add-min-max-insts")
+
// GFX1250 WMMA builtins
TARGET_BUILTIN(__builtin_amdgcn_wmma_f32_16x16x4_f32, "V8fIbV2fIbV2fIsV8fIbIb", "nc", "gfx1250-insts,wavefrontsize32")
TARGET_BUILTIN(__builtin_amdgcn_wmma_f32_16x16x32_bf16, "V8fIbV16yIbV16yIsV8fIbIb", "nc", "gfx1250-insts,wavefrontsize32")
diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
index cc5050a..9df88ad 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
@@ -1820,10 +1820,12 @@ CIRGenCallee CIRGenFunction::emitCallee(const clang::Expr *e) {
// Resolve direct calls.
const auto *funcDecl = cast<FunctionDecl>(declRef->getDecl());
return emitDirectCallee(funcDecl);
- } else if (isa<MemberExpr>(e)) {
- cgm.errorNYI(e->getSourceRange(),
- "emitCallee: call to member function is NYI");
- return {};
+ } else if (auto me = dyn_cast<MemberExpr>(e)) {
+ if (const auto *fd = dyn_cast<FunctionDecl>(me->getMemberDecl())) {
+ emitIgnoredExpr(me->getBase());
+ return emitDirectCallee(fd);
+ }
+ // Else fall through to the indirect reference handling below.
} else if (auto *pde = dyn_cast<CXXPseudoDestructorExpr>(e)) {
return CIRGenCallee::forPseudoDestructor(pde);
}
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp
index 800262a..8f05014 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp
@@ -179,8 +179,23 @@ bool ConstantAggregateBuilder::add(mlir::TypedAttr typedAttr, CharUnits offset,
}
// Uncommon case: constant overlaps what we've already created.
- cgm.errorNYI("overlapping constants");
- return false;
+ std::optional<size_t> firstElemToReplace = splitAt(offset);
+ if (!firstElemToReplace)
+ return false;
+
+ CharUnits cSize = getSize(typedAttr);
+ std::optional<size_t> lastElemToReplace = splitAt(offset + cSize);
+ if (!lastElemToReplace)
+ return false;
+
+ assert((firstElemToReplace == lastElemToReplace || allowOverwrite) &&
+ "unexpectedly overwriting field");
+
+ Element newElt(typedAttr, offset);
+ replace(elements, *firstElemToReplace, *lastElemToReplace, {newElt});
+ size = std::max(size, offset + cSize);
+ naturalLayout = false;
+ return true;
}
bool ConstantAggregateBuilder::addBits(llvm::APInt bits, uint64_t offsetInBits,
diff --git a/clang/lib/Format/TokenAnnotator.cpp b/clang/lib/Format/TokenAnnotator.cpp
index 25971d2..c97a9e8 100644
--- a/clang/lib/Format/TokenAnnotator.cpp
+++ b/clang/lib/Format/TokenAnnotator.cpp
@@ -3791,18 +3791,12 @@ static bool isFunctionDeclarationName(const LangOptions &LangOpts,
if (Current.is(TT_FunctionDeclarationName))
return true;
- if (!Current.Tok.getIdentifierInfo())
+ if (Current.isNoneOf(tok::identifier, tok::kw_operator))
return false;
const auto *Prev = Current.getPreviousNonComment();
assert(Prev);
- if (Prev->is(tok::coloncolon))
- Prev = Prev->Previous;
-
- if (!Prev)
- return false;
-
const auto &Previous = *Prev;
if (const auto *PrevPrev = Previous.getPreviousNonComment();
@@ -3851,6 +3845,8 @@ static bool isFunctionDeclarationName(const LangOptions &LangOpts,
// Find parentheses of parameter list.
if (Current.is(tok::kw_operator)) {
+ if (Line.startsWith(tok::kw_friend))
+ return true;
if (Previous.Tok.getIdentifierInfo() &&
Previous.isNoneOf(tok::kw_return, tok::kw_co_return)) {
return true;
diff --git a/clang/test/CIR/CodeGen/call-via-class-member-funcptr.cpp b/clang/test/CIR/CodeGen/call-via-class-member-funcptr.cpp
new file mode 100644
index 0000000..dbde454
--- /dev/null
+++ b/clang/test/CIR/CodeGen/call-via-class-member-funcptr.cpp
@@ -0,0 +1,86 @@
+// RUN: %clang_cc1 -std=c++20 -triple aarch64-none-linux-android21 -fclangir -emit-cir %s -o %t.cir
+// RUN: FileCheck --input-file=%t.cir %s --check-prefix=CIR
+// RUN: %clang_cc1 -std=c++20 -triple aarch64-none-linux-android21 -fclangir -emit-llvm %s -o %t-cir.ll
+// RUN: FileCheck --input-file=%t-cir.ll %s --check-prefix=LLVM
+// RUN: %clang_cc1 -std=c++20 -triple aarch64-none-linux-android21 -emit-llvm %s -o %t.ll
+// RUN: FileCheck --input-file=%t.ll %s --check-prefix=OGCG
+
+class A {
+public:
+ static char *b(int);
+};
+
+int h=0;
+
+class F {
+public:
+ const char *b();
+ A g;
+};
+
+const char *F::b() { return g.b(h); }
+
+void fn1() { F f1; }
+
+// CIR: cir.func {{.*}} @_ZN1F1bEv
+// CIR: %[[H_PTR:.*]] = cir.get_global @h : !cir.ptr<!s32i>
+// CIR: %[[H_VAL:.*]] = cir.load{{.*}} %[[H_PTR]] : !cir.ptr<!s32i>, !s32i
+// CIR: %[[RET:.*]] = cir.call @_ZN1A1bEi(%[[H_VAL]]) : (!s32i) -> !cir.ptr<!s8i>
+
+// LLVM: define {{.*}} ptr @_ZN1F1bEv
+// LLVM: %[[VAR_H:.*]] = load i32, ptr @h
+// LLVM: %[[RET:.*]] = call ptr @_ZN1A1bEi(i32 %[[VAR_H]])
+
+// OGCG: define {{.*}} ptr @_ZN1F1bEv
+// OGCG: %[[VAR_H:.*]] = load i32, ptr @h
+// OGCG: %[[RET:.*]] = call noundef ptr @_ZN1A1bEi(i32 noundef %[[VAR_H]])
+
+class B {
+public:
+ B();
+ int (&indirect_callee_int_ref)(int);
+};
+
+class C {
+public:
+ int call_indirect(int v) { return inner.indirect_callee_int_ref(v); };
+ B inner;
+};
+
+void fn2() { C c1; c1.call_indirect(2); }
+
+// CIR: cir.func {{.*}} @_ZN1C13call_indirectEi(%[[THIS_ARG:.*]]: !cir.ptr<!rec_C> {{.*}}, %[[V_ARG:.*]]: !s32i {{.*}}) -> !s32i
+// CIR: %[[THIS_ADDR:.*]] = cir.alloca !cir.ptr<!rec_C>, !cir.ptr<!cir.ptr<!rec_C>>, ["this", init]
+// CIR: %[[V_ADDR:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["v", init]
+// CIR: cir.store %[[THIS_ARG]], %[[THIS_ADDR]]
+// CIR: cir.store %[[V_ARG]], %[[V_ADDR]]
+// CIR: %[[THIS:.*]] = cir.load %[[THIS_ADDR]]
+// CIR: %[[INNER:.*]] = cir.get_member %[[THIS]][0] {name = "inner"} : !cir.ptr<!rec_C> -> !cir.ptr<!rec_B>
+// CIR: %[[INDIRECT_CALLEE_PTR:.*]] = cir.get_member %[[INNER]][0] {name = "indirect_callee_int_ref"}
+// CIR: %[[INDIRECT_CALLEE:.*]] = cir.load %[[INDIRECT_CALLEE_PTR]]
+// CIR: %[[V:.*]] = cir.load{{.*}} %[[V_ADDR]] : !cir.ptr<!s32i>, !s32i
+// CIR: %[[RET:.*]] = cir.call %[[INDIRECT_CALLEE]](%[[V]])
+
+// LLVM: define {{.*}} i32 @_ZN1C13call_indirectEi(ptr %[[THIS_ARG:.*]], i32 %[[V_ARG:.*]])
+// LLVM: %[[THIS_ADDR:.*]] = alloca ptr
+// LLVM: %[[V_ADDR:.*]] = alloca i32
+// LLVM: store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]]
+// LLVM: store i32 %[[V_ARG]], ptr %[[V_ADDR]]
+// LLVM: %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]]
+// LLVM: %[[INNER:.*]] = getelementptr %class.C, ptr %[[THIS]], i32 0, i32 0
+// LLVM: %[[INDIRECT_CALLEE_PTR:.*]] = getelementptr %class.B, ptr %[[INNER]], i32 0, i32 0
+// LLVM: %[[INDIRECT_CALLEE:.*]] = load ptr, ptr %[[INDIRECT_CALLEE_PTR]]
+// LLVM: %[[V:.*]] = load i32, ptr %[[V_ADDR]]
+// LLVM: %[[RET:.*]] = call i32 %[[INDIRECT_CALLEE]](i32 %[[V]])
+
+// OGCG: define {{.*}} i32 @_ZN1C13call_indirectEi(ptr {{.*}} %[[THIS_ARG:.*]], i32 {{.*}} %[[V_ARG:.*]])
+// OGCG: %[[THIS_ADDR:.*]] = alloca ptr
+// OGCG: %[[V_ADDR:.*]] = alloca i32
+// OGCG: store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]]
+// OGCG: store i32 %[[V_ARG]], ptr %[[V_ADDR]]
+// OGCG: %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]]
+// OGCG: %[[INNER:.*]] = getelementptr inbounds nuw %class.C, ptr %[[THIS]], i32 0, i32 0
+// OGCG: %[[INDIRECT_CALLEE_PTR:.*]] = getelementptr inbounds nuw %class.B, ptr %[[INNER]], i32 0, i32 0
+// OGCG: %[[INDIRECT_CALLEE:.*]] = load ptr, ptr %[[INDIRECT_CALLEE_PTR]]
+// OGCG: %[[V:.*]] = load i32, ptr %[[V_ADDR]]
+// OGCG: %[[RET:.*]] = call noundef i32 %[[INDIRECT_CALLEE]](i32 noundef %[[V]])
diff --git a/clang/test/CIR/CodeGen/struct-init.cpp b/clang/test/CIR/CodeGen/struct-init.cpp
index cb50999..63e13dd 100644
--- a/clang/test/CIR/CodeGen/struct-init.cpp
+++ b/clang/test/CIR/CodeGen/struct-init.cpp
@@ -5,6 +5,21 @@
// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-llvm %s -o %t.ll
// RUN: FileCheck --check-prefix=OGCG --input-file=%t.ll %s
+struct BitfieldStruct {
+ unsigned int a:4;
+ unsigned int b:14;
+ unsigned int c:14;
+};
+
+BitfieldStruct overlapping_init = { 3, 2, 1 };
+
+// This is unintuitive. The bitfields are initialized using a struct of constants
+// that maps to the bitfields but splits the value into bytes.
+
+// CIR: cir.global external @overlapping_init = #cir.const_record<{#cir.int<35> : !u8i, #cir.int<0> : !u8i, #cir.int<4> : !u8i, #cir.int<0> : !u8i}> : !rec_anon_struct
+// LLVM: @overlapping_init = global { i8, i8, i8, i8 } { i8 35, i8 0, i8 4, i8 0 }
+// OGCG: @overlapping_init = global { i8, i8, i8, i8 } { i8 35, i8 0, i8 4, i8 0 }
+
struct S {
int a, b, c;
};
diff --git a/clang/test/CodeGenOpenCL/amdgpu-cluster-dims.cl b/clang/test/CodeGenOpenCL/amdgpu-cluster-dims.cl
index 8c3e5b7..14fbeb2 100644
--- a/clang/test/CodeGenOpenCL/amdgpu-cluster-dims.cl
+++ b/clang/test/CodeGenOpenCL/amdgpu-cluster-dims.cl
@@ -26,8 +26,8 @@ kernel void foo(global int *p) { *p = 1; }
// CHECK-NEXT: ret void
//
//.
-// CHECK: attributes #[[ATTR0]] = { convergent norecurse nounwind "amdgpu-cluster-dims"="0,0,0" "amdgpu-flat-work-group-size"="1,256" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="gfx1250" "target-features"="+16-bit-insts,+ashr-pk-insts,+atomic-buffer-global-pk-add-f16-insts,+atomic-buffer-pk-add-bf16-inst,+atomic-ds-pk-add-16-insts,+atomic-fadd-rtn-insts,+atomic-flat-pk-add-16-insts,+atomic-fmin-fmax-global-f32,+atomic-fmin-fmax-global-f64,+atomic-global-pk-add-bf16-inst,+bf16-cvt-insts,+bf16-pk-insts,+bf16-trans-insts,+bitop3-insts,+ci-insts,+clusters,+dl-insts,+dot7-insts,+dot8-insts,+dpp,+fp8-conversion-insts,+fp8e5m3-insts,+gfx10-3-insts,+gfx10-insts,+gfx11-insts,+gfx12-insts,+gfx1250-insts,+gfx8-insts,+gfx9-insts,+permlane16-swap,+prng-inst,+setprio-inc-wg-inst,+tanh-insts,+tensor-cvt-lut-insts,+transpose-load-f4f6-insts,+vmem-pref-insts,+wavefrontsize32" "uniform-work-group-size"="false" }
-// CHECK: attributes #[[ATTR1]] = { alwaysinline convergent norecurse nounwind "amdgpu-cluster-dims"="0,0,0" "amdgpu-flat-work-group-size"="1,256" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="gfx1250" "target-features"="+16-bit-insts,+ashr-pk-insts,+atomic-buffer-global-pk-add-f16-insts,+atomic-buffer-pk-add-bf16-inst,+atomic-ds-pk-add-16-insts,+atomic-fadd-rtn-insts,+atomic-flat-pk-add-16-insts,+atomic-fmin-fmax-global-f32,+atomic-fmin-fmax-global-f64,+atomic-global-pk-add-bf16-inst,+bf16-cvt-insts,+bf16-pk-insts,+bf16-trans-insts,+bitop3-insts,+ci-insts,+clusters,+dl-insts,+dot7-insts,+dot8-insts,+dpp,+fp8-conversion-insts,+fp8e5m3-insts,+gfx10-3-insts,+gfx10-insts,+gfx11-insts,+gfx12-insts,+gfx1250-insts,+gfx8-insts,+gfx9-insts,+permlane16-swap,+prng-inst,+setprio-inc-wg-inst,+tanh-insts,+tensor-cvt-lut-insts,+transpose-load-f4f6-insts,+vmem-pref-insts,+wavefrontsize32" }
+// CHECK: attributes #[[ATTR0]] = { convergent norecurse nounwind "amdgpu-cluster-dims"="0,0,0" "amdgpu-flat-work-group-size"="1,256" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="gfx1250" "target-features"="+16-bit-insts,+add-min-max-insts,+ashr-pk-insts,+atomic-buffer-global-pk-add-f16-insts,+atomic-buffer-pk-add-bf16-inst,+atomic-ds-pk-add-16-insts,+atomic-fadd-rtn-insts,+atomic-flat-pk-add-16-insts,+atomic-fmin-fmax-global-f32,+atomic-fmin-fmax-global-f64,+atomic-global-pk-add-bf16-inst,+bf16-cvt-insts,+bf16-pk-insts,+bf16-trans-insts,+bitop3-insts,+ci-insts,+clusters,+dl-insts,+dot7-insts,+dot8-insts,+dpp,+fp8-conversion-insts,+fp8e5m3-insts,+gfx10-3-insts,+gfx10-insts,+gfx11-insts,+gfx12-insts,+gfx1250-insts,+gfx8-insts,+gfx9-insts,+permlane16-swap,+pk-add-min-max-insts,+prng-inst,+setprio-inc-wg-inst,+tanh-insts,+tensor-cvt-lut-insts,+transpose-load-f4f6-insts,+vmem-pref-insts,+wavefrontsize32" "uniform-work-group-size"="false" }
+// CHECK: attributes #[[ATTR1]] = { alwaysinline convergent norecurse nounwind "amdgpu-cluster-dims"="0,0,0" "amdgpu-flat-work-group-size"="1,256" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="gfx1250" "target-features"="+16-bit-insts,+add-min-max-insts,+ashr-pk-insts,+atomic-buffer-global-pk-add-f16-insts,+atomic-buffer-pk-add-bf16-inst,+atomic-ds-pk-add-16-insts,+atomic-fadd-rtn-insts,+atomic-flat-pk-add-16-insts,+atomic-fmin-fmax-global-f32,+atomic-fmin-fmax-global-f64,+atomic-global-pk-add-bf16-inst,+bf16-cvt-insts,+bf16-pk-insts,+bf16-trans-insts,+bitop3-insts,+ci-insts,+clusters,+dl-insts,+dot7-insts,+dot8-insts,+dpp,+fp8-conversion-insts,+fp8e5m3-insts,+gfx10-3-insts,+gfx10-insts,+gfx11-insts,+gfx12-insts,+gfx1250-insts,+gfx8-insts,+gfx9-insts,+permlane16-swap,+pk-add-min-max-insts,+prng-inst,+setprio-inc-wg-inst,+tanh-insts,+tensor-cvt-lut-insts,+transpose-load-f4f6-insts,+vmem-pref-insts,+wavefrontsize32" }
// CHECK: attributes #[[ATTR2]] = { convergent nounwind }
//.
// CHECK: [[META0:![0-9]+]] = !{i32 1, !"amdhsa_code_object_version", i32 600}
diff --git a/clang/test/CodeGenOpenCL/amdgpu-features.cl b/clang/test/CodeGenOpenCL/amdgpu-features.cl
index 7cc83c0..9bd096f 100644
--- a/clang/test/CodeGenOpenCL/amdgpu-features.cl
+++ b/clang/test/CodeGenOpenCL/amdgpu-features.cl
@@ -109,8 +109,8 @@
// GFX1153: "target-features"="+16-bit-insts,+atomic-fadd-rtn-insts,+atomic-fmin-fmax-global-f32,+ci-insts,+dl-insts,+dot10-insts,+dot12-insts,+dot5-insts,+dot7-insts,+dot8-insts,+dot9-insts,+dpp,+gfx10-3-insts,+gfx10-insts,+gfx11-insts,+gfx8-insts,+gfx9-insts,+wavefrontsize32"
// GFX1200: "target-features"="+16-bit-insts,+atomic-buffer-global-pk-add-f16-insts,+atomic-buffer-pk-add-bf16-inst,+atomic-ds-pk-add-16-insts,+atomic-fadd-rtn-insts,+atomic-flat-pk-add-16-insts,+atomic-fmin-fmax-global-f32,+atomic-global-pk-add-bf16-inst,+ci-insts,+dl-insts,+dot10-insts,+dot11-insts,+dot12-insts,+dot7-insts,+dot8-insts,+dot9-insts,+dpp,+fp8-conversion-insts,+gfx10-3-insts,+gfx10-insts,+gfx11-insts,+gfx12-insts,+gfx8-insts,+gfx9-insts,+wavefrontsize32"
// GFX1201: "target-features"="+16-bit-insts,+atomic-buffer-global-pk-add-f16-insts,+atomic-buffer-pk-add-bf16-inst,+atomic-ds-pk-add-16-insts,+atomic-fadd-rtn-insts,+atomic-flat-pk-add-16-insts,+atomic-fmin-fmax-global-f32,+atomic-global-pk-add-bf16-inst,+ci-insts,+dl-insts,+dot10-insts,+dot11-insts,+dot12-insts,+dot7-insts,+dot8-insts,+dot9-insts,+dpp,+fp8-conversion-insts,+gfx10-3-insts,+gfx10-insts,+gfx11-insts,+gfx12-insts,+gfx8-insts,+gfx9-insts,+wavefrontsize32"
-// GFX1250: "target-features"="+16-bit-insts,+ashr-pk-insts,+atomic-buffer-global-pk-add-f16-insts,+atomic-buffer-pk-add-bf16-inst,+atomic-ds-pk-add-16-insts,+atomic-fadd-rtn-insts,+atomic-flat-pk-add-16-insts,+atomic-fmin-fmax-global-f32,+atomic-fmin-fmax-global-f64,+atomic-global-pk-add-bf16-inst,+bf16-cvt-insts,+bf16-pk-insts,+bf16-trans-insts,+bitop3-insts,+ci-insts,+clusters,+dl-insts,+dot7-insts,+dot8-insts,+dpp,+fp8-conversion-insts,+fp8e5m3-insts,+gfx10-3-insts,+gfx10-insts,+gfx11-insts,+gfx12-insts,+gfx1250-insts,+gfx8-insts,+gfx9-insts,+permlane16-swap,+prng-inst,+setprio-inc-wg-inst,+tanh-insts,+tensor-cvt-lut-insts,+transpose-load-f4f6-insts,+vmem-pref-insts,+wavefrontsize32"
-// GFX1251: "target-features"="+16-bit-insts,+ashr-pk-insts,+atomic-buffer-global-pk-add-f16-insts,+atomic-buffer-pk-add-bf16-inst,+atomic-ds-pk-add-16-insts,+atomic-fadd-rtn-insts,+atomic-flat-pk-add-16-insts,+atomic-fmin-fmax-global-f32,+atomic-fmin-fmax-global-f64,+atomic-global-pk-add-bf16-inst,+bf16-cvt-insts,+bf16-pk-insts,+bf16-trans-insts,+bitop3-insts,+ci-insts,+clusters,+dl-insts,+dot7-insts,+dot8-insts,+dpp,+fp8-conversion-insts,+fp8e5m3-insts,+gfx10-3-insts,+gfx10-insts,+gfx11-insts,+gfx12-insts,+gfx1250-insts,+gfx8-insts,+gfx9-insts,+permlane16-swap,+prng-inst,+setprio-inc-wg-inst,+tanh-insts,+tensor-cvt-lut-insts,+transpose-load-f4f6-insts,+vmem-pref-insts,+wavefrontsize32"
+// GFX1250: "target-features"="+16-bit-insts,+add-min-max-insts,+ashr-pk-insts,+atomic-buffer-global-pk-add-f16-insts,+atomic-buffer-pk-add-bf16-inst,+atomic-ds-pk-add-16-insts,+atomic-fadd-rtn-insts,+atomic-flat-pk-add-16-insts,+atomic-fmin-fmax-global-f32,+atomic-fmin-fmax-global-f64,+atomic-global-pk-add-bf16-inst,+bf16-cvt-insts,+bf16-pk-insts,+bf16-trans-insts,+bitop3-insts,+ci-insts,+clusters,+dl-insts,+dot7-insts,+dot8-insts,+dpp,+fp8-conversion-insts,+fp8e5m3-insts,+gfx10-3-insts,+gfx10-insts,+gfx11-insts,+gfx12-insts,+gfx1250-insts,+gfx8-insts,+gfx9-insts,+permlane16-swap,+pk-add-min-max-insts,+prng-inst,+setprio-inc-wg-inst,+tanh-insts,+tensor-cvt-lut-insts,+transpose-load-f4f6-insts,+vmem-pref-insts,+wavefrontsize32"
+// GFX1251: "target-features"="+16-bit-insts,+add-min-max-insts,+ashr-pk-insts,+atomic-buffer-global-pk-add-f16-insts,+atomic-buffer-pk-add-bf16-inst,+atomic-ds-pk-add-16-insts,+atomic-fadd-rtn-insts,+atomic-flat-pk-add-16-insts,+atomic-fmin-fmax-global-f32,+atomic-fmin-fmax-global-f64,+atomic-global-pk-add-bf16-inst,+bf16-cvt-insts,+bf16-pk-insts,+bf16-trans-insts,+bitop3-insts,+ci-insts,+clusters,+dl-insts,+dot7-insts,+dot8-insts,+dpp,+fp8-conversion-insts,+fp8e5m3-insts,+gfx10-3-insts,+gfx10-insts,+gfx11-insts,+gfx12-insts,+gfx1250-insts,+gfx8-insts,+gfx9-insts,+permlane16-swap,+pk-add-min-max-insts,+prng-inst,+setprio-inc-wg-inst,+tanh-insts,+tensor-cvt-lut-insts,+transpose-load-f4f6-insts,+vmem-pref-insts,+wavefrontsize32"
// GFX1103-W64: "target-features"="+16-bit-insts,+atomic-fadd-rtn-insts,+atomic-fmin-fmax-global-f32,+ci-insts,+dl-insts,+dot10-insts,+dot12-insts,+dot5-insts,+dot7-insts,+dot8-insts,+dot9-insts,+dpp,+gfx10-3-insts,+gfx10-insts,+gfx11-insts,+gfx8-insts,+gfx9-insts,+wavefrontsize64"
diff --git a/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx1250.cl b/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx1250.cl
index b6b475a7..e4a5fe9 100644
--- a/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx1250.cl
+++ b/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx1250.cl
@@ -21,6 +21,7 @@ typedef float __attribute__((ext_vector_type(8))) float8;
typedef float __attribute__((ext_vector_type(16))) float16;
typedef float __attribute__((ext_vector_type(32))) float32;
typedef short __attribute__((ext_vector_type(2))) short2;
+typedef unsigned short __attribute__((ext_vector_type(2))) ushort2;
// CHECK-LABEL: @test_setprio_inc_wg(
// CHECK-NEXT: entry:
@@ -1718,3 +1719,111 @@ void test_cvt_f32_fp8_e5m3(global int* out, int a)
*out = __builtin_amdgcn_cvt_f32_fp8_e5m3(a, 2);
*out = __builtin_amdgcn_cvt_f32_fp8_e5m3(a, 3);
}
+
+// CHECK-LABEL: @test_add_min_max(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[OUT_ADDR:%.*]] = alloca ptr addrspace(1), align 8, addrspace(5)
+// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-NEXT: [[C_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-NEXT: [[OUT_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[OUT_ADDR]] to ptr
+// CHECK-NEXT: [[A_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[A_ADDR]] to ptr
+// CHECK-NEXT: [[B_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[B_ADDR]] to ptr
+// CHECK-NEXT: [[C_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[C_ADDR]] to ptr
+// CHECK-NEXT: store ptr addrspace(1) [[OUT:%.*]], ptr [[OUT_ADDR_ASCAST]], align 8
+// CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store i32 [[C:%.*]], ptr [[C_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[C_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.amdgcn.add.max.i32(i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]], i1 false)
+// CHECK-NEXT: [[TMP4:%.*]] = load ptr addrspace(1), ptr [[OUT_ADDR_ASCAST]], align 8
+// CHECK-NEXT: store i32 [[TMP3]], ptr addrspace(1) [[TMP4]], align 4
+// CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[A_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[B_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[C_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.add.max.u32(i32 [[TMP5]], i32 [[TMP6]], i32 [[TMP7]], i1 true)
+// CHECK-NEXT: [[TMP9:%.*]] = load ptr addrspace(1), ptr [[OUT_ADDR_ASCAST]], align 8
+// CHECK-NEXT: store i32 [[TMP8]], ptr addrspace(1) [[TMP9]], align 4
+// CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[A_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[B_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP12:%.*]] = load i32, ptr [[C_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP13:%.*]] = call i32 @llvm.amdgcn.add.min.i32(i32 [[TMP10]], i32 [[TMP11]], i32 [[TMP12]], i1 false)
+// CHECK-NEXT: [[TMP14:%.*]] = load ptr addrspace(1), ptr [[OUT_ADDR_ASCAST]], align 8
+// CHECK-NEXT: store i32 [[TMP13]], ptr addrspace(1) [[TMP14]], align 4
+// CHECK-NEXT: [[TMP15:%.*]] = load i32, ptr [[A_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP16:%.*]] = load i32, ptr [[B_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP17:%.*]] = load i32, ptr [[C_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP18:%.*]] = call i32 @llvm.amdgcn.add.min.u32(i32 [[TMP15]], i32 [[TMP16]], i32 [[TMP17]], i1 true)
+// CHECK-NEXT: [[TMP19:%.*]] = load ptr addrspace(1), ptr [[OUT_ADDR_ASCAST]], align 8
+// CHECK-NEXT: store i32 [[TMP18]], ptr addrspace(1) [[TMP19]], align 4
+// CHECK-NEXT: ret void
+//
+void test_add_min_max(global int *out, int a, int b, int c)
+{
+ *out = __builtin_amdgcn_add_max_i32(a, b, c, false);
+ *out = __builtin_amdgcn_add_max_u32(a, b, c, true);
+ *out = __builtin_amdgcn_add_min_i32(a, b, c, false);
+ *out = __builtin_amdgcn_add_min_u32(a, b, c, true);
+}
+
+// CHECK-LABEL: @test_pk_add_min_max(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[OUT_ADDR:%.*]] = alloca ptr addrspace(1), align 8, addrspace(5)
+// CHECK-NEXT: [[UOUT_ADDR:%.*]] = alloca ptr addrspace(1), align 8, addrspace(5)
+// CHECK-NEXT: [[A_ADDR:%.*]] = alloca <2 x i16>, align 4, addrspace(5)
+// CHECK-NEXT: [[B_ADDR:%.*]] = alloca <2 x i16>, align 4, addrspace(5)
+// CHECK-NEXT: [[C_ADDR:%.*]] = alloca <2 x i16>, align 4, addrspace(5)
+// CHECK-NEXT: [[UA_ADDR:%.*]] = alloca <2 x i16>, align 4, addrspace(5)
+// CHECK-NEXT: [[UB_ADDR:%.*]] = alloca <2 x i16>, align 4, addrspace(5)
+// CHECK-NEXT: [[UC_ADDR:%.*]] = alloca <2 x i16>, align 4, addrspace(5)
+// CHECK-NEXT: [[OUT_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[OUT_ADDR]] to ptr
+// CHECK-NEXT: [[UOUT_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[UOUT_ADDR]] to ptr
+// CHECK-NEXT: [[A_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[A_ADDR]] to ptr
+// CHECK-NEXT: [[B_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[B_ADDR]] to ptr
+// CHECK-NEXT: [[C_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[C_ADDR]] to ptr
+// CHECK-NEXT: [[UA_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[UA_ADDR]] to ptr
+// CHECK-NEXT: [[UB_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[UB_ADDR]] to ptr
+// CHECK-NEXT: [[UC_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[UC_ADDR]] to ptr
+// CHECK-NEXT: store ptr addrspace(1) [[OUT:%.*]], ptr [[OUT_ADDR_ASCAST]], align 8
+// CHECK-NEXT: store ptr addrspace(1) [[UOUT:%.*]], ptr [[UOUT_ADDR_ASCAST]], align 8
+// CHECK-NEXT: store <2 x i16> [[A:%.*]], ptr [[A_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store <2 x i16> [[B:%.*]], ptr [[B_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store <2 x i16> [[C:%.*]], ptr [[C_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store <2 x i16> [[UA:%.*]], ptr [[UA_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store <2 x i16> [[UB:%.*]], ptr [[UB_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store <2 x i16> [[UC:%.*]], ptr [[UC_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP0:%.*]] = load <2 x i16>, ptr [[A_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP1:%.*]] = load <2 x i16>, ptr [[B_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP2:%.*]] = load <2 x i16>, ptr [[C_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP3:%.*]] = call <2 x i16> @llvm.amdgcn.pk.add.max.i16(<2 x i16> [[TMP0]], <2 x i16> [[TMP1]], <2 x i16> [[TMP2]], i1 false)
+// CHECK-NEXT: [[TMP4:%.*]] = load ptr addrspace(1), ptr [[OUT_ADDR_ASCAST]], align 8
+// CHECK-NEXT: store <2 x i16> [[TMP3]], ptr addrspace(1) [[TMP4]], align 4
+// CHECK-NEXT: [[TMP5:%.*]] = load <2 x i16>, ptr [[UA_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP6:%.*]] = load <2 x i16>, ptr [[UB_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP7:%.*]] = load <2 x i16>, ptr [[UC_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP8:%.*]] = call <2 x i16> @llvm.amdgcn.pk.add.max.u16(<2 x i16> [[TMP5]], <2 x i16> [[TMP6]], <2 x i16> [[TMP7]], i1 true)
+// CHECK-NEXT: [[TMP9:%.*]] = load ptr addrspace(1), ptr [[UOUT_ADDR_ASCAST]], align 8
+// CHECK-NEXT: store <2 x i16> [[TMP8]], ptr addrspace(1) [[TMP9]], align 4
+// CHECK-NEXT: [[TMP10:%.*]] = load <2 x i16>, ptr [[A_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP11:%.*]] = load <2 x i16>, ptr [[B_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP12:%.*]] = load <2 x i16>, ptr [[C_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP13:%.*]] = call <2 x i16> @llvm.amdgcn.pk.add.min.i16(<2 x i16> [[TMP10]], <2 x i16> [[TMP11]], <2 x i16> [[TMP12]], i1 false)
+// CHECK-NEXT: [[TMP14:%.*]] = load ptr addrspace(1), ptr [[OUT_ADDR_ASCAST]], align 8
+// CHECK-NEXT: store <2 x i16> [[TMP13]], ptr addrspace(1) [[TMP14]], align 4
+// CHECK-NEXT: [[TMP15:%.*]] = load <2 x i16>, ptr [[UA_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP16:%.*]] = load <2 x i16>, ptr [[UB_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP17:%.*]] = load <2 x i16>, ptr [[UC_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP18:%.*]] = call <2 x i16> @llvm.amdgcn.pk.add.min.u16(<2 x i16> [[TMP15]], <2 x i16> [[TMP16]], <2 x i16> [[TMP17]], i1 true)
+// CHECK-NEXT: [[TMP19:%.*]] = load ptr addrspace(1), ptr [[UOUT_ADDR_ASCAST]], align 8
+// CHECK-NEXT: store <2 x i16> [[TMP18]], ptr addrspace(1) [[TMP19]], align 4
+// CHECK-NEXT: ret void
+//
+void test_pk_add_min_max(global short2 *out, global ushort2 *uout, short2 a, short2 b, short2 c, ushort2 ua, ushort2 ub, ushort2 uc)
+{
+ *out = __builtin_amdgcn_pk_add_max_i16(a, b, c, false);
+ *uout = __builtin_amdgcn_pk_add_max_u16(ua, ub, uc, true);
+ *out = __builtin_amdgcn_pk_add_min_i16(a, b, c, false);
+ *uout = __builtin_amdgcn_pk_add_min_u16(ua, ub, uc, true);
+}
diff --git a/clang/test/SemaOpenCL/builtins-amdgcn-error-gfx1250-param.cl b/clang/test/SemaOpenCL/builtins-amdgcn-error-gfx1250-param.cl
index 3cea47b..da6a03b 100644
--- a/clang/test/SemaOpenCL/builtins-amdgcn-error-gfx1250-param.cl
+++ b/clang/test/SemaOpenCL/builtins-amdgcn-error-gfx1250-param.cl
@@ -15,6 +15,8 @@ typedef half __attribute__((ext_vector_type(32))) half32;
typedef float __attribute__((ext_vector_type(8))) float8;
typedef float __attribute__((ext_vector_type(16))) float16;
typedef float __attribute__((ext_vector_type(32))) float32;
+typedef short __attribute__((ext_vector_type(2))) short2;
+typedef unsigned short __attribute__((ext_vector_type(2))) ushort2;
typedef int v4i __attribute__((ext_vector_type(4)));
typedef int v8i __attribute__((ext_vector_type(8)));
@@ -165,3 +167,19 @@ void test_cvt_f32_fp8_e5m3(global int* out, int a)
{
*out = __builtin_amdgcn_cvt_f32_fp8_e5m3(a, a); // expected-error {{'__builtin_amdgcn_cvt_f32_fp8_e5m3' must be a constant integer}}
}
+
+void test_add_min_max(global int *out, int a, int b, int c, bool clamp)
+{
+ *out = __builtin_amdgcn_add_max_i32(a, b, c, clamp); // expected-error {{'__builtin_amdgcn_add_max_i32' must be a constant integer}}
+ *out = __builtin_amdgcn_add_max_u32(a, b, c, clamp); // expected-error {{'__builtin_amdgcn_add_max_u32' must be a constant integer}}
+ *out = __builtin_amdgcn_add_min_i32(a, b, c, clamp); // expected-error {{'__builtin_amdgcn_add_min_i32' must be a constant integer}}
+ *out = __builtin_amdgcn_add_min_u32(a, b, c, clamp); // expected-error {{'__builtin_amdgcn_add_min_u32' must be a constant integer}}
+}
+
+void test_pk_add_min_max(global short2 *out, global ushort2 *uout, short2 a, short2 b, short2 c, ushort2 ua, ushort2 ub, ushort2 uc, bool clamp)
+{
+ *out = __builtin_amdgcn_pk_add_max_i16(a, b, c, clamp); // expected-error {{'__builtin_amdgcn_pk_add_max_i16' must be a constant integer}}
+ *uout = __builtin_amdgcn_pk_add_max_u16(ua, ub, uc, clamp); // expected-error {{'__builtin_amdgcn_pk_add_max_u16' must be a constant integer}}
+ *out = __builtin_amdgcn_pk_add_min_i16(a, b, c, clamp); // expected-error {{'__builtin_amdgcn_pk_add_min_i16' must be a constant integer}}
+ *uout = __builtin_amdgcn_pk_add_min_u16(ua, ub, uc, clamp); // expected-error {{'__builtin_amdgcn_pk_add_min_u16' must be a constant integer}}
+}
diff --git a/clang/test/SemaOpenCL/builtins-amdgcn-error-gfx1250.cl b/clang/test/SemaOpenCL/builtins-amdgcn-error-gfx1250.cl
index d7045cd..e53beae 100644
--- a/clang/test/SemaOpenCL/builtins-amdgcn-error-gfx1250.cl
+++ b/clang/test/SemaOpenCL/builtins-amdgcn-error-gfx1250.cl
@@ -3,9 +3,21 @@
typedef unsigned int uint;
typedef unsigned short int ushort;
+typedef short __attribute__((ext_vector_type(2))) short2;
+typedef unsigned short __attribute__((ext_vector_type(2))) ushort2;
-void test(global uint* out, uint a, uint b, uint c) {
+void test(global int* out, global short2 *s2out, global ushort2 *us2out,
+ uint a, uint b, uint c, short2 s2a, short2 s2b, short2 s2c,
+ ushort2 us2a, ushort2 us2b, ushort2 us2c) {
__builtin_amdgcn_s_setprio_inc_wg(1); // expected-error {{'__builtin_amdgcn_s_setprio_inc_wg' needs target feature setprio-inc-wg-inst}}
*out = __builtin_amdgcn_bitop3_b32(a, b, c, 1); // expected-error {{'__builtin_amdgcn_bitop3_b32' needs target feature bitop3-insts}}
*out = __builtin_amdgcn_bitop3_b16((ushort)a, (ushort)b, (ushort)c, 1); // expected-error {{'__builtin_amdgcn_bitop3_b16' needs target feature bitop3-insts}}
+ *out = __builtin_amdgcn_add_max_i32(a, b, c, false); // expected-error {{'__builtin_amdgcn_add_max_i32' needs target feature add-min-max-insts}}
+ *out = __builtin_amdgcn_add_max_u32(a, b, c, true); // expected-error {{'__builtin_amdgcn_add_max_u32' needs target feature add-min-max-insts}}
+ *out = __builtin_amdgcn_add_min_i32(a, b, c, false); // expected-error {{'__builtin_amdgcn_add_min_i32' needs target feature add-min-max-insts}}
+ *out = __builtin_amdgcn_add_min_u32(a, b, c, true); // expected-error {{'__builtin_amdgcn_add_min_u32' needs target feature add-min-max-insts}}
+ *s2out = __builtin_amdgcn_pk_add_max_i16(s2a, s2b, s2c, false); // expected-error {{'__builtin_amdgcn_pk_add_max_i16' needs target feature pk-add-min-max-insts}}
+ *us2out = __builtin_amdgcn_pk_add_max_u16(us2a, us2b, us2c, true); // expected-error {{'__builtin_amdgcn_pk_add_max_u16' needs target feature pk-add-min-max-insts}}
+ *s2out = __builtin_amdgcn_pk_add_min_i16(s2a, s2b, s2c, false); // expected-error {{'__builtin_amdgcn_pk_add_min_i16' needs target feature pk-add-min-max-insts}}
+ *us2out = __builtin_amdgcn_pk_add_min_u16(us2a, us2b, us2c, true); // expected-error {{'__builtin_amdgcn_pk_add_min_u16' needs target feature pk-add-min-max-insts}}
}
diff --git a/clang/unittests/Format/TokenAnnotatorTest.cpp b/clang/unittests/Format/TokenAnnotatorTest.cpp
index f363738..ca99940 100644
--- a/clang/unittests/Format/TokenAnnotatorTest.cpp
+++ b/clang/unittests/Format/TokenAnnotatorTest.cpp
@@ -1129,6 +1129,11 @@ TEST_F(TokenAnnotatorTest, UnderstandsOverloadedOperators) {
ASSERT_EQ(Tokens.size(), 7u) << Tokens;
// Not TT_FunctionDeclarationName.
EXPECT_TOKEN(Tokens[3], tok::kw_operator, TT_Unknown);
+
+ Tokens = annotate("SomeAPI::operator()();");
+ ASSERT_EQ(Tokens.size(), 9u) << Tokens;
+ // Not TT_FunctionDeclarationName.
+ EXPECT_TOKEN(Tokens[2], tok::kw_operator, TT_Unknown);
}
TEST_F(TokenAnnotatorTest, OverloadedOperatorInTemplate) {
diff --git a/flang/lib/Optimizer/Builder/IntrinsicCall.cpp b/flang/lib/Optimizer/Builder/IntrinsicCall.cpp
index d2a36d4..1ae8bbe 100644
--- a/flang/lib/Optimizer/Builder/IntrinsicCall.cpp
+++ b/flang/lib/Optimizer/Builder/IntrinsicCall.cpp
@@ -8974,10 +8974,12 @@ IntrinsicLibrary::genSyncThreadsAnd(mlir::Type resultType,
llvm::ArrayRef<mlir::Value> args) {
constexpr llvm::StringLiteral funcName = "llvm.nvvm.barrier0.and";
mlir::MLIRContext *context = builder.getContext();
+ mlir::Type i32 = builder.getI32Type();
mlir::FunctionType ftype =
- mlir::FunctionType::get(context, {resultType}, {args[0].getType()});
+ mlir::FunctionType::get(context, {resultType}, {i32});
auto funcOp = builder.createFunction(loc, funcName, ftype);
- return fir::CallOp::create(builder, loc, funcOp, args).getResult(0);
+ mlir::Value arg = builder.createConvert(loc, i32, args[0]);
+ return fir::CallOp::create(builder, loc, funcOp, {arg}).getResult(0);
}
// SYNCTHREADS_COUNT
@@ -8986,10 +8988,12 @@ IntrinsicLibrary::genSyncThreadsCount(mlir::Type resultType,
llvm::ArrayRef<mlir::Value> args) {
constexpr llvm::StringLiteral funcName = "llvm.nvvm.barrier0.popc";
mlir::MLIRContext *context = builder.getContext();
+ mlir::Type i32 = builder.getI32Type();
mlir::FunctionType ftype =
- mlir::FunctionType::get(context, {resultType}, {args[0].getType()});
+ mlir::FunctionType::get(context, {resultType}, {i32});
auto funcOp = builder.createFunction(loc, funcName, ftype);
- return fir::CallOp::create(builder, loc, funcOp, args).getResult(0);
+ mlir::Value arg = builder.createConvert(loc, i32, args[0]);
+ return fir::CallOp::create(builder, loc, funcOp, {arg}).getResult(0);
}
// SYNCTHREADS_OR
@@ -8998,10 +9002,12 @@ IntrinsicLibrary::genSyncThreadsOr(mlir::Type resultType,
llvm::ArrayRef<mlir::Value> args) {
constexpr llvm::StringLiteral funcName = "llvm.nvvm.barrier0.or";
mlir::MLIRContext *context = builder.getContext();
+ mlir::Type i32 = builder.getI32Type();
mlir::FunctionType ftype =
- mlir::FunctionType::get(context, {resultType}, {args[0].getType()});
+ mlir::FunctionType::get(context, {resultType}, {i32});
auto funcOp = builder.createFunction(loc, funcName, ftype);
- return fir::CallOp::create(builder, loc, funcOp, args).getResult(0);
+ mlir::Value arg = builder.createConvert(loc, i32, args[0]);
+ return fir::CallOp::create(builder, loc, funcOp, {arg}).getResult(0);
}
// SYNCWARP
diff --git a/flang/test/Lower/CUDA/cuda-device-proc.cuf b/flang/test/Lower/CUDA/cuda-device-proc.cuf
index 55bb587..7d6caf5 100644
--- a/flang/test/Lower/CUDA/cuda-device-proc.cuf
+++ b/flang/test/Lower/CUDA/cuda-device-proc.cuf
@@ -110,17 +110,20 @@ end
! CHECK: %[[A:.*]] = fir.load %{{.*}} : !fir.ref<i32>
! CHECK: %[[B:.*]] = fir.load %{{.*}} : !fir.ref<i32>
! CHECK: %[[CMP:.*]] = arith.cmpi sgt, %[[A]], %[[B]] : i32
-! CHECK: %{{.*}} = fir.call @llvm.nvvm.barrier0.and(%[[CMP]])
+! CHECK: %[[CONV:.*]] = fir.convert %[[CMP]] : (i1) -> i32
+! CHECK: %{{.*}} = fir.call @llvm.nvvm.barrier0.and(%[[CONV]])
! CHECK: %{{.*}} = fir.call @llvm.nvvm.barrier0.popc(%c1{{.*}}) fastmath<contract> : (i32) -> i32
! CHECK: %[[A:.*]] = fir.load %{{.*}} : !fir.ref<i32>
! CHECK: %[[B:.*]] = fir.load %{{.*}} : !fir.ref<i32>
! CHECK: %[[CMP:.*]] = arith.cmpi sgt, %[[A]], %[[B]] : i32
-! CHECK: %{{.*}} = fir.call @llvm.nvvm.barrier0.popc(%[[CMP]]) fastmath<contract> : (i1) -> i32
+! CHECK: %[[CONV:.*]] = fir.convert %[[CMP]] : (i1) -> i32
+! CHECK: %{{.*}} = fir.call @llvm.nvvm.barrier0.popc(%[[CONV]]) fastmath<contract> : (i32) -> i32
! CHECK: %{{.*}} = fir.call @llvm.nvvm.barrier0.or(%c1{{.*}}) fastmath<contract> : (i32) -> i32
! CHECK: %[[A:.*]] = fir.load %{{.*}} : !fir.ref<i32>
! CHECK: %[[B:.*]] = fir.load %{{.*}} : !fir.ref<i32>
! CHECK: %[[CMP:.*]] = arith.cmpi sgt, %[[A]], %[[B]] : i32
-! CHECK: %{{.*}} = fir.call @llvm.nvvm.barrier0.or(%[[CMP]]) fastmath<contract> : (i1) -> i32
+! CHECK: %[[CONV:.*]] = fir.convert %[[CMP]] : (i1) -> i32
+! CHECK: %{{.*}} = fir.call @llvm.nvvm.barrier0.or(%[[CONV]]) fastmath<contract> : (i32) -> i32
! CHECK: %{{.*}} = llvm.atomicrmw add %{{.*}}, %{{.*}} seq_cst : !llvm.ptr, i32
! CHECK: %{{.*}} = llvm.atomicrmw add %{{.*}}, %{{.*}} seq_cst : !llvm.ptr, i64
! CHECK: %{{.*}} = llvm.atomicrmw fadd %{{.*}}, %{{.*}} seq_cst : !llvm.ptr, f32
diff --git a/libcxx/include/__compare/strong_order.h b/libcxx/include/__compare/strong_order.h
index 8c363b5..ba6de44 100644
--- a/libcxx/include/__compare/strong_order.h
+++ b/libcxx/include/__compare/strong_order.h
@@ -13,7 +13,6 @@
#include <__compare/compare_three_way.h>
#include <__compare/ordering.h>
#include <__config>
-#include <__math/exponential_functions.h>
#include <__math/traits.h>
#include <__type_traits/conditional.h>
#include <__type_traits/decay.h>
@@ -53,38 +52,21 @@ struct __fn {
template <class _Tp, class _Up, class _Dp = decay_t<_Tp>>
requires is_same_v<_Dp, decay_t<_Up>> && is_floating_point_v<_Dp>
_LIBCPP_HIDE_FROM_ABI static constexpr strong_ordering __go(_Tp&& __t, _Up&& __u, __priority_tag<1>) noexcept {
- if constexpr (numeric_limits<_Dp>::is_iec559 && sizeof(_Dp) == sizeof(int32_t)) {
- int32_t __rx = std::bit_cast<int32_t>(__t);
- int32_t __ry = std::bit_cast<int32_t>(__u);
- __rx = (__rx < 0) ? (numeric_limits<int32_t>::min() - __rx - 1) : __rx;
- __ry = (__ry < 0) ? (numeric_limits<int32_t>::min() - __ry - 1) : __ry;
- return (__rx <=> __ry);
- } else if constexpr (numeric_limits<_Dp>::is_iec559 && sizeof(_Dp) == sizeof(int64_t)) {
- int64_t __rx = std::bit_cast<int64_t>(__t);
- int64_t __ry = std::bit_cast<int64_t>(__u);
- __rx = (__rx < 0) ? (numeric_limits<int64_t>::min() - __rx - 1) : __rx;
- __ry = (__ry < 0) ? (numeric_limits<int64_t>::min() - __ry - 1) : __ry;
+ if constexpr (numeric_limits<_Dp>::is_iec559 &&
+ (sizeof(_Dp) == sizeof(int32_t) || sizeof(_Dp) == sizeof(int64_t))) {
+ using _IntT = conditional_t<sizeof(_Dp) == sizeof(int32_t), int32_t, int64_t>;
+ _IntT __rx = std::bit_cast<_IntT>(__t);
+ _IntT __ry = std::bit_cast<_IntT>(__u);
+ __rx = (__rx < 0) ? (numeric_limits<_IntT>::min() - __rx - 1) : __rx;
+ __ry = (__ry < 0) ? (numeric_limits<_IntT>::min() - __ry - 1) : __ry;
return (__rx <=> __ry);
} else if (__t < __u) {
return strong_ordering::less;
} else if (__t > __u) {
return strong_ordering::greater;
} else if (__t == __u) {
- if constexpr (numeric_limits<_Dp>::radix == 2) {
- return __math::signbit(__u) <=> __math::signbit(__t);
- } else {
- // This is bullet 3 of the IEEE754 algorithm, relevant
- // only for decimal floating-point;
- // see https://stackoverflow.com/questions/69068075/
- if (__t == 0 || __math::isinf(__t)) {
- return __math::signbit(__u) <=> __math::signbit(__t);
- } else {
- int __texp, __uexp;
- (void)__math::frexp(__t, &__texp);
- (void)__math::frexp(__u, &__uexp);
- return (__t < 0) ? (__texp <=> __uexp) : (__uexp <=> __texp);
- }
- }
+ static_assert(numeric_limits<_Dp>::radix == 2, "floating point type with a radix other than 2?");
+ return __math::signbit(__u) <=> __math::signbit(__t);
} else {
// They're unordered, so one of them must be a NAN.
// The order is -QNAN, -SNAN, numbers, +SNAN, +QNAN.
@@ -93,9 +75,9 @@ struct __fn {
bool __t_is_negative = __math::signbit(__t);
bool __u_is_negative = __math::signbit(__u);
using _IntType =
- conditional_t< sizeof(__t) == sizeof(int32_t),
- int32_t,
- conditional_t< sizeof(__t) == sizeof(int64_t), int64_t, void> >;
+ conditional_t<sizeof(__t) == sizeof(int32_t),
+ int32_t,
+ conditional_t<sizeof(__t) == sizeof(int64_t), int64_t, void>>;
if constexpr (is_same_v<_IntType, void>) {
static_assert(sizeof(_Dp) == 0, "std::strong_order is unimplemented for this floating-point type");
} else if (__t_is_nan && __u_is_nan) {
diff --git a/llvm/docs/MLGO.rst b/llvm/docs/MLGO.rst
index bf3de11..2443835 100644
--- a/llvm/docs/MLGO.rst
+++ b/llvm/docs/MLGO.rst
@@ -434,8 +434,27 @@ The latter is also used in tests.
There is no C++ implementation of a log reader. We do not have a scenario
motivating one.
-IR2Vec Embeddings
-=================
+Embeddings
+==========
+
+LLVM provides embedding frameworks to generate vector representations of code
+at different abstraction levels. These embeddings capture syntactic, semantic,
+and structural properties of the code and can be used as features for machine
+learning models in various compiler optimization tasks.
+
+Two embedding frameworks are available:
+
+- **IR2Vec**: Generates embeddings for LLVM IR
+- **MIR2Vec**: Generates embeddings for Machine IR
+
+Both frameworks follow a similar architecture with vocabulary-based embedding
+generation, where a vocabulary maps code entities to n-dimensional floating
+point vectors. These embeddings can be computed at multiple granularity levels
+(instruction, basic block, and function) and used for ML-guided compiler
+optimizations.
+
+IR2Vec
+------
IR2Vec is a program embedding approach designed specifically for LLVM IR. It
is implemented as a function analysis pass in LLVM. The IR2Vec embeddings
@@ -466,7 +485,7 @@ The core components are:
compute embeddings for instructions, basic blocks, and functions.
Using IR2Vec
-------------
+^^^^^^^^^^^^
.. note::
@@ -526,7 +545,7 @@ embeddings can be computed and accessed via an ``ir2vec::Embedder`` instance.
between different code snippets, or perform other analyses as needed.
Further Details
----------------
+^^^^^^^^^^^^^^^
For more detailed information about the IR2Vec algorithm, its parameters, and
advanced usage, please refer to the original paper:
@@ -538,6 +557,123 @@ triplets from LLVM IR, see :doc:`CommandGuide/llvm-ir2vec`.
The LLVM source code for ``IR2Vec`` can also be explored to understand the
implementation details.
+MIR2Vec
+-------
+
+MIR2Vec is an extension of IR2Vec designed specifically for LLVM Machine IR
+(MIR). It generates embeddings for machine-level instructions, basic blocks,
+and functions. MIR2Vec operates on the target-specific machine representation,
+capturing machine instruction semantics including opcodes, operands, and
+register information at the machine level.
+
+MIR2Vec extends the vocabulary to include:
+
+- **Machine Opcodes**: Target-specific instruction opcodes derived from the
+ TargetInstrInfo, grouped by instruction semantics.
+
+- **Common Operands**: All common operand types (excluding register operands),
+ defined by the ``MachineOperand::MachineOperandType`` enum.
+
+- **Physical Register Classes**: Register classes defined by the target,
+ specialized for physical registers.
+
+- **Virtual Register Classes**: Register classes defined by the target,
+ specialized for virtual registers.
+
+The core components are:
+
+- **Vocabulary**: A mapping from machine IR entities (opcodes, operands, register
+ classes) to their vector representations. This is managed by
+ ``MIR2VecVocabLegacyAnalysis`` for the legacy pass manager, with a
+ ``MIR2VecVocabProvider`` that can be used standalone or wrapped by pass
+ managers. The vocabulary (.json file) contains sections for opcodes, common
+ operands, physical register classes, and virtual register classes.
+
+ .. note::
+
+ The vocabulary file should contain these sections for it to be valid.
+
+- **Embedder**: A class (``mir2vec::MIREmbedder``) that uses the vocabulary to
+ compute embeddings for machine instructions, machine basic blocks, and
+ machine functions. Currently, ``SymbolicMIREmbedder`` is the available
+ implementation.
+
+Using MIR2Vec
+^^^^^^^^^^^^^
+
+.. note::
+
+ This section describes how to use MIR2Vec within LLVM passes. `llvm-ir2vec`
+ tool ` :doc:`CommandGuide/llvm-ir2vec` can be used for generating MIR2Vec
+ embeddings from Machine IR files (.mir), which can be useful for generating
+ embeddings outside of compiler passes.
+
+To generate MIR2Vec embeddings in a compiler pass, first obtain the vocabulary,
+then create an embedder instance to compute and access embeddings.
+
+1. **Get the Vocabulary**:
+ In a MachineFunctionPass, get the vocabulary from the analysis:
+
+ .. code-block:: c++
+
+ auto &VocabAnalysis = getAnalysis<MIR2VecVocabLegacyAnalysis>();
+ auto VocabOrErr = VocabAnalysis.getMIR2VecVocabulary(*MF.getFunction().getParent());
+ if (!VocabOrErr) {
+ // Handle error: vocabulary is not available or invalid
+ return;
+ }
+ const mir2vec::MIRVocabulary &Vocabulary = *VocabOrErr;
+
+ Note that ``MIR2VecVocabLegacyAnalysis`` is an immutable pass.
+
+2. **Create Embedder instance**:
+ With the vocabulary, create an embedder for a specific machine function:
+
+ .. code-block:: c++
+
+ // Assuming MF is a MachineFunction&
+ // For example, using MIR2VecKind::Symbolic:
+ std::unique_ptr<mir2vec::MIREmbedder> Emb =
+ mir2vec::MIREmbedder::create(MIR2VecKind::Symbolic, MF, Vocabulary);
+
+
+3. **Compute and Access Embeddings**:
+ Call ``getMFunctionVector()`` to get the embedding for the machine function.
+
+ .. code-block:: c++
+
+ mir2vec::Embedding FuncVector = Emb->getMFunctionVector();
+
+ Currently, ``MIREmbedder`` can generate embeddings at three levels: Machine
+ Instructions, Machine Basic Blocks, and Machine Functions. Appropriate
+ getters are provided to access the embeddings at these levels.
+
+ .. note::
+
+ The validity of the ``MIREmbedder`` instance (and the embeddings it
+ generates) is tied to the machine function it is associated with. If the
+ machine function is modified, the embeddings may become stale and should
+ be recomputed accordingly.
+
+4. **Working with Embeddings:**
+ Embeddings are represented as ``std::vector<double>``. These vectors can be
+ used as features for machine learning models, compute similarity scores
+ between different code snippets, or perform other analyses as needed.
+
+Further Details
+^^^^^^^^^^^^^^^
+
+For more detailed information about the MIR2Vec algorithm, its parameters, and
+advanced usage, please refer to the original paper:
+`RL4ReAl: Reinforcement Learning for Register Allocation <https://doi.org/10.1145/3578360.3580273>`_.
+
+For information about using MIR2Vec tool for generating embeddings from
+Machine IR, see :doc:`CommandGuide/llvm-ir2vec`.
+
+The LLVM source code for ``MIR2Vec`` can be explored to understand the
+implementation details. See ``llvm/include/llvm/CodeGen/MIR2Vec.h`` and
+``llvm/lib/CodeGen/MIR2Vec.cpp``.
+
Building with ML support
========================
diff --git a/llvm/include/llvm/ADT/IndexedMap.h b/llvm/include/llvm/ADT/IndexedMap.h
index cda0316..638fe14 100644
--- a/llvm/include/llvm/ADT/IndexedMap.h
+++ b/llvm/include/llvm/ADT/IndexedMap.h
@@ -22,12 +22,21 @@
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/identity.h"
#include <cassert>
namespace llvm {
-template <typename T, typename ToIndexT = identity<unsigned>> class IndexedMap {
+namespace detail {
+template <class Ty> struct IdentityIndex {
+ using argument_type = Ty;
+
+ Ty &operator()(Ty &self) const { return self; }
+ const Ty &operator()(const Ty &self) const { return self; }
+};
+} // namespace detail
+
+template <typename T, typename ToIndexT = detail::IdentityIndex<unsigned>>
+class IndexedMap {
using IndexT = typename ToIndexT::argument_type;
// Prefer SmallVector with zero inline storage over std::vector. IndexedMaps
// can grow very large and SmallVector grows more efficiently as long as T
diff --git a/llvm/include/llvm/ADT/identity.h b/llvm/include/llvm/ADT/identity.h
deleted file mode 100644
index 88d033f..0000000
--- a/llvm/include/llvm/ADT/identity.h
+++ /dev/null
@@ -1,31 +0,0 @@
-//===- llvm/ADT/Identity.h - Provide std::identity from C++20 ---*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file provides an implementation of std::identity from C++20.
-//
-// No library is required when using these functions.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_ADT_IDENTITY_H
-#define LLVM_ADT_IDENTITY_H
-
-namespace llvm {
-
-// Similar to `std::identity` from C++20.
-template <class Ty> struct identity {
- using is_transparent = void;
- using argument_type = Ty;
-
- Ty &operator()(Ty &self) const { return self; }
- const Ty &operator()(const Ty &self) const { return self; }
-};
-
-} // namespace llvm
-
-#endif // LLVM_ADT_IDENTITY_H
diff --git a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
index 9e334d4..8e35109 100644
--- a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
+++ b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
@@ -3789,6 +3789,20 @@ def int_amdgcn_perm_pk16_b8_u4 : ClangBuiltin<"__builtin_amdgcn_perm_pk16_b8_u4"
DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_i64_ty, llvm_i64_ty, llvm_v2i32_ty],
[IntrNoMem, IntrSpeculatable]>;
+class AMDGPUAddMinMax<LLVMType Ty, string Name> : ClangBuiltin<"__builtin_amdgcn_"#Name>,
+ DefaultAttrsIntrinsic<[Ty], [Ty, Ty, Ty, llvm_i1_ty /* clamp */],
+ [IntrNoMem, IntrSpeculatable, ImmArg<ArgIndex<3>>]>;
+
+def int_amdgcn_add_max_i32 : AMDGPUAddMinMax<llvm_i32_ty, "add_max_i32">;
+def int_amdgcn_add_max_u32 : AMDGPUAddMinMax<llvm_i32_ty, "add_max_u32">;
+def int_amdgcn_add_min_i32 : AMDGPUAddMinMax<llvm_i32_ty, "add_min_i32">;
+def int_amdgcn_add_min_u32 : AMDGPUAddMinMax<llvm_i32_ty, "add_min_u32">;
+
+def int_amdgcn_pk_add_max_i16 : AMDGPUAddMinMax<llvm_v2i16_ty, "pk_add_max_i16">;
+def int_amdgcn_pk_add_max_u16 : AMDGPUAddMinMax<llvm_v2i16_ty, "pk_add_max_u16">;
+def int_amdgcn_pk_add_min_i16 : AMDGPUAddMinMax<llvm_v2i16_ty, "pk_add_min_i16">;
+def int_amdgcn_pk_add_min_u16 : AMDGPUAddMinMax<llvm_v2i16_ty, "pk_add_min_u16">;
+
class AMDGPUCooperativeAtomicStore<LLVMType Ty> : Intrinsic <
[],
[llvm_anyptr_ty, // pointer to store to
diff --git a/llvm/include/llvm/Support/GlobPattern.h b/llvm/include/llvm/Support/GlobPattern.h
index c1b4484..8cae6a3 100644
--- a/llvm/include/llvm/Support/GlobPattern.h
+++ b/llvm/include/llvm/Support/GlobPattern.h
@@ -63,21 +63,30 @@ public:
// Returns true for glob pattern "*". Can be used to avoid expensive
// preparation/acquisition of the input for match().
bool isTrivialMatchAll() const {
- if (!Prefix.empty())
+ if (PrefixSize)
return false;
- if (!Suffix.empty())
+ if (SuffixSize)
return false;
if (SubGlobs.size() != 1)
return false;
return SubGlobs[0].getPat() == "*";
}
- StringRef prefix() const { return Prefix; }
- StringRef suffix() const { return Suffix; }
+ // The following functions are just shortcuts for faster matching. They are
+ // conservative to simplify implementations.
+
+ // Returns plain prefix of the pattern.
+ StringRef prefix() const { return Pattern.take_front(PrefixSize); }
+ // Returns plain suffix of the pattern.
+ StringRef suffix() const { return Pattern.take_back(SuffixSize); }
+ // Returns the longest plain substring of the pattern between prefix and
+ // suffix.
+ StringRef longest_substr() const;
private:
- StringRef Prefix;
- StringRef Suffix;
+ StringRef Pattern;
+ size_t PrefixSize = 0;
+ size_t SuffixSize = 0;
struct SubGlobPattern {
/// \param Pat the pattern to match against
diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
index 1fe38d6..b49040b 100644
--- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
@@ -1862,15 +1862,19 @@ bool IRTranslator::translateVectorDeinterleave2Intrinsic(
void IRTranslator::getStackGuard(Register DstReg,
MachineIRBuilder &MIRBuilder) {
+ Value *Global = TLI->getSDagStackGuard(*MF->getFunction().getParent());
+ if (!Global) {
+ LLVMContext &Ctx = MIRBuilder.getContext();
+ Ctx.diagnose(DiagnosticInfoGeneric("unable to lower stackguard"));
+ MIRBuilder.buildUndef(DstReg);
+ return;
+ }
+
const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
MRI->setRegClass(DstReg, TRI->getPointerRegClass());
auto MIB =
MIRBuilder.buildInstr(TargetOpcode::LOAD_STACK_GUARD, {DstReg}, {});
- Value *Global = TLI->getSDagStackGuard(*MF->getFunction().getParent());
- if (!Global)
- return;
-
unsigned AddrSpace = Global->getType()->getPointerAddressSpace();
LLT PtrTy = LLT::pointer(AddrSpace, DL->getPointerSizeInBits(AddrSpace));
diff --git a/llvm/lib/CodeGen/InlineSpiller.cpp b/llvm/lib/CodeGen/InlineSpiller.cpp
index d6e8505..c3e0964 100644
--- a/llvm/lib/CodeGen/InlineSpiller.cpp
+++ b/llvm/lib/CodeGen/InlineSpiller.cpp
@@ -721,6 +721,9 @@ bool InlineSpiller::reMaterializeFor(LiveInterval &VirtReg, MachineInstr &MI) {
// Allocate a new register for the remat.
Register NewVReg = Edit->createFrom(Original);
+ // Constrain it to the register class of MI.
+ MRI.constrainRegClass(NewVReg, MRI.getRegClass(VirtReg.reg()));
+
// Finally we can rematerialize OrigMI before MI.
SlotIndex DefIdx =
Edit->rematerializeAt(*MI.getParent(), MI, NewVReg, RM, TRI);
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index dcf2df3..d57c5fb 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -3131,12 +3131,16 @@ void SelectionDAGBuilder::visitSPDescriptorParent(StackProtectorDescriptor &SPD,
if (TLI.useLoadStackGuardNode(M)) {
Guard = getLoadStackGuard(DAG, dl, Chain);
} else {
- const Value *IRGuard = TLI.getSDagStackGuard(M);
- SDValue GuardPtr = getValue(IRGuard);
-
- Guard = DAG.getLoad(PtrMemTy, dl, Chain, GuardPtr,
- MachinePointerInfo(IRGuard, 0), Align,
- MachineMemOperand::MOVolatile);
+ if (const Value *IRGuard = TLI.getSDagStackGuard(M)) {
+ SDValue GuardPtr = getValue(IRGuard);
+ Guard = DAG.getLoad(PtrMemTy, dl, Chain, GuardPtr,
+ MachinePointerInfo(IRGuard, 0), Align,
+ MachineMemOperand::MOVolatile);
+ } else {
+ LLVMContext &Ctx = *DAG.getContext();
+ Ctx.diagnose(DiagnosticInfoGeneric("unable to lower stackguard"));
+ Guard = DAG.getPOISON(PtrMemTy);
+ }
}
// Perform the comparison via a getsetcc.
@@ -7324,6 +7328,13 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
Res = DAG.getPtrExtOrTrunc(Res, sdl, PtrTy);
} else {
const Value *Global = TLI.getSDagStackGuard(M);
+ if (!Global) {
+ LLVMContext &Ctx = *DAG.getContext();
+ Ctx.diagnose(DiagnosticInfoGeneric("unable to lower stackguard"));
+ setValue(&I, DAG.getPOISON(PtrTy));
+ return;
+ }
+
Align Align = DAG.getDataLayout().getPrefTypeAlign(Global->getType());
Res = DAG.getLoad(PtrTy, sdl, Chain, getValue(Global),
MachinePointerInfo(Global, 0), Align,
diff --git a/llvm/lib/Frontend/HLSL/CBuffer.cpp b/llvm/lib/Frontend/HLSL/CBuffer.cpp
index 407b6ad..1f53c87 100644
--- a/llvm/lib/Frontend/HLSL/CBuffer.cpp
+++ b/llvm/lib/Frontend/HLSL/CBuffer.cpp
@@ -43,8 +43,13 @@ std::optional<CBufferMetadata> CBufferMetadata::get(Module &M) {
for (const MDNode *MD : CBufMD->operands()) {
assert(MD->getNumOperands() && "Invalid cbuffer metadata");
- auto *Handle = cast<GlobalVariable>(
- cast<ValueAsMetadata>(MD->getOperand(0))->getValue());
+ // For an unused cbuffer, the handle may have been optimized out
+ Metadata *OpMD = MD->getOperand(0);
+ if (!OpMD)
+ continue;
+
+ auto *Handle =
+ cast<GlobalVariable>(cast<ValueAsMetadata>(OpMD)->getValue());
CBufferMapping &Mapping = Result->Mappings.emplace_back(Handle);
for (int I = 1, E = MD->getNumOperands(); I < E; ++I) {
diff --git a/llvm/lib/Support/GlobPattern.cpp b/llvm/lib/Support/GlobPattern.cpp
index 0ecf47d..2715229 100644
--- a/llvm/lib/Support/GlobPattern.cpp
+++ b/llvm/lib/Support/GlobPattern.cpp
@@ -132,24 +132,70 @@ parseBraceExpansions(StringRef S, std::optional<size_t> MaxSubPatterns) {
return std::move(SubPatterns);
}
+static StringRef maxPlainSubstring(StringRef S) {
+ StringRef Best;
+ while (!S.empty()) {
+ size_t PrefixSize = S.find_first_of("?*[{\\");
+ if (PrefixSize == std::string::npos)
+ PrefixSize = S.size();
+
+ if (Best.size() < PrefixSize)
+ Best = S.take_front(PrefixSize);
+
+ S = S.drop_front(PrefixSize);
+
+ // It's impossible, as the first and last characters of the input string
+ // must be Glob special characters, otherwise they would be parts of
+ // the prefix or the suffix.
+ assert(!S.empty());
+
+ switch (S.front()) {
+ case '\\':
+ S = S.drop_front(2);
+ break;
+ case '[': {
+ // Drop '[' and the first character which can be ']'.
+ S = S.drop_front(2);
+ size_t EndBracket = S.find_first_of("]");
+ // Should not be possible, SubGlobPattern::create should fail on invalid
+ // pattern before we get here.
+ assert(EndBracket != std::string::npos);
+ S = S.drop_front(EndBracket + 1);
+ break;
+ }
+ case '{':
+ // TODO: implement.
+ // Fallback to whatever is best for now.
+ return Best;
+ default:
+ S = S.drop_front(1);
+ }
+ }
+
+ return Best;
+}
+
Expected<GlobPattern>
GlobPattern::create(StringRef S, std::optional<size_t> MaxSubPatterns) {
GlobPattern Pat;
+ Pat.Pattern = S;
// Store the prefix that does not contain any metacharacter.
- size_t PrefixSize = S.find_first_of("?*[{\\");
- Pat.Prefix = S.substr(0, PrefixSize);
- if (PrefixSize == std::string::npos)
+ Pat.PrefixSize = S.find_first_of("?*[{\\");
+ if (Pat.PrefixSize == std::string::npos) {
+ Pat.PrefixSize = S.size();
return Pat;
- S = S.substr(PrefixSize);
+ }
+ S = S.substr(Pat.PrefixSize);
// Just in case we stop on unmatched opening brackets.
size_t SuffixStart = S.find_last_of("?*[]{}\\");
assert(SuffixStart != std::string::npos);
if (S[SuffixStart] == '\\')
++SuffixStart;
- ++SuffixStart;
- Pat.Suffix = S.substr(SuffixStart);
+ if (SuffixStart < S.size())
+ ++SuffixStart;
+ Pat.SuffixSize = S.size() - SuffixStart;
S = S.substr(0, SuffixStart);
SmallVector<std::string, 1> SubPats;
@@ -199,10 +245,15 @@ GlobPattern::SubGlobPattern::create(StringRef S) {
return Pat;
}
+StringRef GlobPattern::longest_substr() const {
+ return maxPlainSubstring(
+ Pattern.drop_front(PrefixSize).drop_back(SuffixSize));
+}
+
bool GlobPattern::match(StringRef S) const {
- if (!S.consume_front(Prefix))
+ if (!S.consume_front(prefix()))
return false;
- if (!S.consume_back(Suffix))
+ if (!S.consume_back(suffix()))
return false;
if (SubGlobs.empty() && S.empty())
return true;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPU.td b/llvm/lib/Target/AMDGPU/AMDGPU.td
index ea32748..1c8383c 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPU.td
+++ b/llvm/lib/Target/AMDGPU/AMDGPU.td
@@ -1430,6 +1430,18 @@ def FeatureAddSubU64Insts
def FeatureMadU32Inst : SubtargetFeature<"mad-u32-inst", "HasMadU32Inst",
"true", "Has v_mad_u32 instruction">;
+def FeatureAddMinMaxInsts : SubtargetFeature<"add-min-max-insts",
+ "HasAddMinMaxInsts",
+ "true",
+ "Has v_add_{min|max}_{i|u}32 instructions"
+>;
+
+def FeaturePkAddMinMaxInsts : SubtargetFeature<"pk-add-min-max-insts",
+ "HasPkAddMinMaxInsts",
+ "true",
+ "Has v_pk_add_{min|max}_{i|u}16 instructions"
+>;
+
def FeatureMemToLDSLoad : SubtargetFeature<"vmem-to-lds-load-insts",
"HasVMemToLDSLoad",
"true",
@@ -2115,6 +2127,8 @@ def FeatureISAVersion12_50 : FeatureSet<
FeatureLshlAddU64Inst,
FeatureAddSubU64Insts,
FeatureMadU32Inst,
+ FeatureAddMinMaxInsts,
+ FeaturePkAddMinMaxInsts,
FeatureLdsBarrierArriveAtomic,
FeatureSetPrioIncWgInst,
Feature45BitNumRecordsBufferResource,
@@ -2658,11 +2672,11 @@ def HasFmaakFmamkF64Insts :
def HasAddMinMaxInsts :
Predicate<"Subtarget->hasAddMinMaxInsts()">,
- AssemblerPredicate<(any_of FeatureGFX1250Insts)>;
+ AssemblerPredicate<(any_of FeatureAddMinMaxInsts)>;
def HasPkAddMinMaxInsts :
Predicate<"Subtarget->hasPkAddMinMaxInsts()">,
- AssemblerPredicate<(any_of FeatureGFX1250Insts)>;
+ AssemblerPredicate<(any_of FeaturePkAddMinMaxInsts)>;
def HasPkMinMax3Insts :
Predicate<"Subtarget->hasPkMinMax3Insts()">,
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
index 56807a4..54ba2f8 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
@@ -4835,6 +4835,14 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
case Intrinsic::amdgcn_perm_pk16_b4_u4:
case Intrinsic::amdgcn_perm_pk16_b6_u4:
case Intrinsic::amdgcn_perm_pk16_b8_u4:
+ case Intrinsic::amdgcn_add_max_i32:
+ case Intrinsic::amdgcn_add_max_u32:
+ case Intrinsic::amdgcn_add_min_i32:
+ case Intrinsic::amdgcn_add_min_u32:
+ case Intrinsic::amdgcn_pk_add_max_i16:
+ case Intrinsic::amdgcn_pk_add_max_u16:
+ case Intrinsic::amdgcn_pk_add_min_i16:
+ case Intrinsic::amdgcn_pk_add_min_u16:
return getDefaultMappingVOP(MI);
case Intrinsic::amdgcn_log:
case Intrinsic::amdgcn_exp2:
diff --git a/llvm/lib/Target/AMDGPU/GCNSubtarget.h b/llvm/lib/Target/AMDGPU/GCNSubtarget.h
index a466780..ac660d5 100644
--- a/llvm/lib/Target/AMDGPU/GCNSubtarget.h
+++ b/llvm/lib/Target/AMDGPU/GCNSubtarget.h
@@ -277,6 +277,8 @@ protected:
bool HasLshlAddU64Inst = false;
bool HasAddSubU64Insts = false;
bool HasMadU32Inst = false;
+ bool HasAddMinMaxInsts = false;
+ bool HasPkAddMinMaxInsts = false;
bool HasPointSampleAccel = false;
bool HasLdsBarrierArriveAtomic = false;
bool HasSetPrioIncWgInst = false;
@@ -1567,10 +1569,10 @@ public:
bool hasIntMinMax64() const { return GFX1250Insts; }
// \returns true if the target has V_ADD_{MIN|MAX}_{I|U}32 instructions.
- bool hasAddMinMaxInsts() const { return GFX1250Insts; }
+ bool hasAddMinMaxInsts() const { return HasAddMinMaxInsts; }
// \returns true if the target has V_PK_ADD_{MIN|MAX}_{I|U}16 instructions.
- bool hasPkAddMinMaxInsts() const { return GFX1250Insts; }
+ bool hasPkAddMinMaxInsts() const { return HasPkAddMinMaxInsts; }
// \returns true if the target has V_PK_{MIN|MAX}3_{I|U}16 instructions.
bool hasPkMinMax3Insts() const { return GFX1250Insts; }
diff --git a/llvm/lib/Target/AMDGPU/VOP3Instructions.td b/llvm/lib/Target/AMDGPU/VOP3Instructions.td
index 7cce033..ee10190 100644
--- a/llvm/lib/Target/AMDGPU/VOP3Instructions.td
+++ b/llvm/lib/Target/AMDGPU/VOP3Instructions.td
@@ -775,10 +775,10 @@ let SubtargetPredicate = HasMinimum3Maximum3F16, ReadsModeReg = 0 in {
} // End SubtargetPredicate = isGFX12Plus, ReadsModeReg = 0
let SubtargetPredicate = HasAddMinMaxInsts, isCommutable = 1, isReMaterializable = 1 in {
- defm V_ADD_MAX_I32 : VOP3Inst <"v_add_max_i32", VOP3_Profile<VOP_I32_I32_I32_I32, VOP3_CLAMP>>;
- defm V_ADD_MAX_U32 : VOP3Inst <"v_add_max_u32", VOP3_Profile<VOP_I32_I32_I32_I32, VOP3_CLAMP>>;
- defm V_ADD_MIN_I32 : VOP3Inst <"v_add_min_i32", VOP3_Profile<VOP_I32_I32_I32_I32, VOP3_CLAMP>>;
- defm V_ADD_MIN_U32 : VOP3Inst <"v_add_min_u32", VOP3_Profile<VOP_I32_I32_I32_I32, VOP3_CLAMP>>;
+ defm V_ADD_MAX_I32 : VOP3Inst <"v_add_max_i32", VOP3_Profile<VOP_I32_I32_I32_I32, VOP3_CLAMP>, int_amdgcn_add_max_i32>;
+ defm V_ADD_MAX_U32 : VOP3Inst <"v_add_max_u32", VOP3_Profile<VOP_I32_I32_I32_I32, VOP3_CLAMP>, int_amdgcn_add_max_u32>;
+ defm V_ADD_MIN_I32 : VOP3Inst <"v_add_min_i32", VOP3_Profile<VOP_I32_I32_I32_I32, VOP3_CLAMP>, int_amdgcn_add_min_i32>;
+ defm V_ADD_MIN_U32 : VOP3Inst <"v_add_min_u32", VOP3_Profile<VOP_I32_I32_I32_I32, VOP3_CLAMP>, int_amdgcn_add_min_u32>;
}
defm V_ADD_I16 : VOP3Inst_t16 <"v_add_i16", VOP_I16_I16_I16>;
diff --git a/llvm/lib/Target/AMDGPU/VOP3PInstructions.td b/llvm/lib/Target/AMDGPU/VOP3PInstructions.td
index 6500fce..c4692b7 100644
--- a/llvm/lib/Target/AMDGPU/VOP3PInstructions.td
+++ b/llvm/lib/Target/AMDGPU/VOP3PInstructions.td
@@ -75,7 +75,7 @@ multiclass VOP3PInst<string OpName, VOPProfile P,
SDPatternOperator node = null_frag, bit IsDOT = 0> {
def NAME : VOP3P_Pseudo<OpName, P,
!if (P.HasModifiers,
- getVOP3PModPat<P, node, IsDOT, IsDOT>.ret,
+ getVOP3PModPat<P, node, !or(P.EnableClamp, IsDOT), IsDOT>.ret,
getVOP3Pat<P, node>.ret)>;
let SubtargetPredicate = isGFX11Plus in {
if P.HasExtVOP3DPP then
@@ -434,15 +434,16 @@ defm : MadFmaMixFP16Pats_t16<fma, V_FMA_MIX_BF16_t16>;
} // End SubtargetPredicate = HasFmaMixBF16Insts
def PK_ADD_MINMAX_Profile : VOP3P_Profile<VOP_V2I16_V2I16_V2I16_V2I16, VOP3_PACKED> {
- let HasModifiers = 0;
+ let HasNeg = 0;
+ let EnableClamp = 1;
}
let isCommutable = 1, isReMaterializable = 1 in {
let SubtargetPredicate = HasPkAddMinMaxInsts in {
-defm V_PK_ADD_MAX_I16 : VOP3PInst<"v_pk_add_max_i16", PK_ADD_MINMAX_Profile>;
-defm V_PK_ADD_MAX_U16 : VOP3PInst<"v_pk_add_max_u16", PK_ADD_MINMAX_Profile>;
-defm V_PK_ADD_MIN_I16 : VOP3PInst<"v_pk_add_min_i16", PK_ADD_MINMAX_Profile>;
-defm V_PK_ADD_MIN_U16 : VOP3PInst<"v_pk_add_min_u16", PK_ADD_MINMAX_Profile>;
+defm V_PK_ADD_MAX_I16 : VOP3PInst<"v_pk_add_max_i16", PK_ADD_MINMAX_Profile, int_amdgcn_pk_add_max_i16>;
+defm V_PK_ADD_MAX_U16 : VOP3PInst<"v_pk_add_max_u16", PK_ADD_MINMAX_Profile, int_amdgcn_pk_add_max_u16>;
+defm V_PK_ADD_MIN_I16 : VOP3PInst<"v_pk_add_min_i16", PK_ADD_MINMAX_Profile, int_amdgcn_pk_add_min_i16>;
+defm V_PK_ADD_MIN_U16 : VOP3PInst<"v_pk_add_min_u16", PK_ADD_MINMAX_Profile, int_amdgcn_pk_add_min_u16>;
}
let SubtargetPredicate = HasPkMinMax3Insts in {
defm V_PK_MAX3_I16 : VOP3PInst<"v_pk_max3_i16", PK_ADD_MINMAX_Profile>;
diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
index f7deeaf..ca4a655 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
@@ -2614,6 +2614,9 @@ static SDValue lower256BitShuffle(const SDLoc &DL, ArrayRef<int> Mask, MVT VT,
if ((Result = lowerVECTOR_SHUFFLE_XVSHUF4I(DL, Mask, VT, V1, V2, DAG,
Subtarget)))
return Result;
+ // Try to widen vectors to gain more optimization opportunities.
+ if (SDValue NewShuffle = widenShuffleMask(DL, Mask, VT, V1, V2, DAG))
+ return NewShuffle;
if ((Result =
lowerVECTOR_SHUFFLE_XVPERMI(DL, Mask, VT, V1, DAG, Subtarget)))
return Result;
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZvfbf.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZvfbf.td
index f7d1a09..b9c5b75 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZvfbf.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZvfbf.td
@@ -668,4 +668,38 @@ foreach vti = NoGroupBF16Vectors in {
def : Pat<(vti.Scalar (extractelt (vti.Vector vti.RegClass:$rs2), 0)),
(vfmv_f_s_inst vti.RegClass:$rs2, vti.Log2SEW)>;
}
+
+let Predicates = [HasStdExtZvfbfa] in {
+ foreach fvtiToFWti = AllWidenableBF16ToFloatVectors in {
+ defvar fvti = fvtiToFWti.Vti;
+ defvar fwti = fvtiToFWti.Wti;
+ def : Pat<(fwti.Vector (any_riscv_fpextend_vl
+ (fvti.Vector fvti.RegClass:$rs1),
+ (fvti.Mask VMV0:$vm),
+ VLOpFrag)),
+ (!cast<Instruction>("PseudoVFWCVT_F_F_ALT_V_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK")
+ (fwti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1,
+ (fvti.Mask VMV0:$vm),
+ GPR:$vl, fvti.Log2SEW, TA_MA)>;
+
+ def : Pat<(fvti.Vector (any_riscv_fpround_vl
+ (fwti.Vector fwti.RegClass:$rs1),
+ (fwti.Mask VMV0:$vm), VLOpFrag)),
+ (!cast<Instruction>("PseudoVFNCVT_F_F_ALT_W_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK")
+ (fvti.Vector (IMPLICIT_DEF)), fwti.RegClass:$rs1,
+ (fwti.Mask VMV0:$vm),
+ // Value to indicate no rounding mode change in
+ // RISCVInsertReadWriteCSR
+ FRM_DYN,
+ GPR:$vl, fvti.Log2SEW, TA_MA)>;
+ def : Pat<(fvti.Vector (fpround (fwti.Vector fwti.RegClass:$rs1))),
+ (!cast<Instruction>("PseudoVFNCVT_F_F_ALT_W_"#fvti.LMul.MX#"_E"#fvti.SEW)
+ (fvti.Vector (IMPLICIT_DEF)),
+ fwti.RegClass:$rs1,
+ // Value to indicate no rounding mode change in
+ // RISCVInsertReadWriteCSR
+ FRM_DYN,
+ fvti.AVL, fvti.Log2SEW, TA_MA)>;
+ }
+}
} // Predicates = [HasStdExtZvfbfa]
diff --git a/llvm/lib/TargetParser/TargetParser.cpp b/llvm/lib/TargetParser/TargetParser.cpp
index 62a3c88..975a271 100644
--- a/llvm/lib/TargetParser/TargetParser.cpp
+++ b/llvm/lib/TargetParser/TargetParser.cpp
@@ -433,6 +433,8 @@ static void fillAMDGCNFeatureMap(StringRef GPU, const Triple &T,
Features["fp8e5m3-insts"] = true;
Features["permlane16-swap"] = true;
Features["ashr-pk-insts"] = true;
+ Features["add-min-max-insts"] = true;
+ Features["pk-add-min-max-insts"] = true;
Features["atomic-buffer-pk-add-bf16-inst"] = true;
Features["vmem-pref-insts"] = true;
Features["atomic-fadd-rtn-insts"] = true;
diff --git a/llvm/test/Analysis/BranchProbabilityInfo/pr22718.ll b/llvm/test/Analysis/BranchProbabilityInfo/pr22718.ll
index ed851f2..67ce44e 100644
--- a/llvm/test/Analysis/BranchProbabilityInfo/pr22718.ll
+++ b/llvm/test/Analysis/BranchProbabilityInfo/pr22718.ll
@@ -12,14 +12,14 @@
@.str = private unnamed_addr constant [17 x i8] c"x = %lu\0Ay = %lu\0A\00", align 1
; Function Attrs: inlinehint nounwind uwtable
-define i32 @main() #0 {
+define i32 @main() {
entry:
%retval = alloca i32, align 4
%i = alloca i64, align 8
store i32 0, ptr %retval
store i64 0, ptr @y, align 8
store i64 0, ptr @x, align 8
- call void @srand(i32 422304) #3
+ call void @srand(i32 422304)
store i64 0, ptr %i, align 8
br label %for.cond
@@ -29,7 +29,7 @@ for.cond: ; preds = %for.inc, %entry
br i1 %cmp, label %for.body, label %for.end, !prof !1
for.body: ; preds = %for.cond
- %call = call i32 @rand() #3
+ %call = call i32 @rand()
%conv = sitofp i32 %call to double
%mul = fmul double %conv, 1.000000e+02
%div = fdiv double %mul, 0x41E0000000000000
@@ -65,17 +65,12 @@ for.end: ; preds = %for.cond
}
; Function Attrs: nounwind
-declare void @srand(i32) #1
+declare void @srand(i32)
; Function Attrs: nounwind
-declare i32 @rand() #1
+declare i32 @rand()
-declare i32 @printf(ptr, ...) #2
-
-attributes #0 = { inlinehint nounwind uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+sse,+sse2" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+sse,+sse2" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #2 = { "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+sse,+sse2" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #3 = { nounwind }
+declare i32 @printf(ptr, ...)
!llvm.ident = !{!0}
diff --git a/llvm/test/Analysis/CostModel/SystemZ/intrinsic-cost-crash.ll b/llvm/test/Analysis/CostModel/SystemZ/intrinsic-cost-crash.ll
index 245e8f7..058370c 100644
--- a/llvm/test/Analysis/CostModel/SystemZ/intrinsic-cost-crash.ll
+++ b/llvm/test/Analysis/CostModel/SystemZ/intrinsic-cost-crash.ll
@@ -23,10 +23,10 @@
%"class.llvm::Metadata.306.1758.9986.10470.10954.11438.11922.12406.12890.13374.13858.15310.15794.16278.17730.19182.21118.25958.26926.29346.29830.30314.30798.31282.31766.32250.32734.33702.36606.38058.41638" = type { i8, i8, i16, i32 }
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.end(ptr nocapture) #0
+declare void @llvm.lifetime.end(ptr nocapture)
; Function Attrs: nounwind ssp uwtable
-define hidden void @fun(ptr %N, i1 %arg) #1 align 2 {
+define hidden void @fun(ptr %N, i1 %arg) align 2 {
; CHECK: define
entry:
%NumOperands.i = getelementptr inbounds %"class.llvm::SDNode.310.1762.9990.10474.10958.11442.11926.12410.12894.13378.13862.15314.15798.16282.17734.19186.21122.25962.26930.29350.29834.30318.30802.31286.31770.32254.32738.33706.36610.38062.41642", ptr %N, i64 0, i32 8
@@ -47,8 +47,6 @@ for.body: ; preds = %for.body, %for.body
br i1 %exitcond193, label %for.cond.cleanup, label %for.body
}
-attributes #0 = { argmemonly nounwind }
-attributes #1 = { nounwind ssp uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="penryn" "target-features"="+cx16,+fxsr,+mmx,+sse,+sse2,+sse3,+sse4.1,+ssse3,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
!llvm.ident = !{!0}
diff --git a/llvm/test/Analysis/Delinearization/constant_functions_multi_dim.ll b/llvm/test/Analysis/Delinearization/constant_functions_multi_dim.ll
index 0c0fb41..891d604 100644
--- a/llvm/test/Analysis/Delinearization/constant_functions_multi_dim.ll
+++ b/llvm/test/Analysis/Delinearization/constant_functions_multi_dim.ll
@@ -4,7 +4,7 @@
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
; Function Attrs: noinline nounwind uwtable
-define void @mat_mul(ptr %C, ptr %A, ptr %B, i64 %N) #0 !kernel_arg_addr_space !2 !kernel_arg_access_qual !3 !kernel_arg_type !4 !kernel_arg_base_type !4 !kernel_arg_type_qual !5 {
+define void @mat_mul(ptr %C, ptr %A, ptr %B, i64 %N) !kernel_arg_addr_space !2 !kernel_arg_access_qual !3 !kernel_arg_type !4 !kernel_arg_base_type !4 !kernel_arg_type_qual !5 {
; CHECK-LABEL: 'mat_mul'
; CHECK-NEXT: Inst: %tmp = load float, ptr %arrayidx, align 4
; CHECK-NEXT: AccessFunction: {(4 * %N * %call),+,4}<%for.inc>
@@ -22,8 +22,8 @@ entry:
br label %entry.split
entry.split: ; preds = %entry
- %call = tail call i64 @_Z13get_global_idj(i32 0) #3
- %call1 = tail call i64 @_Z13get_global_idj(i32 1) #3
+ %call = tail call i64 @_Z13get_global_idj(i32 0)
+ %call1 = tail call i64 @_Z13get_global_idj(i32 1)
%cmp1 = icmp sgt i64 %N, 0
%mul = mul nsw i64 %call, %N
br i1 %cmp1, label %for.inc.lr.ph, label %for.end
@@ -59,15 +59,10 @@ for.end: ; preds = %for.cond.for.end_cr
}
; Function Attrs: nounwind readnone
-declare i64 @_Z13get_global_idj(i32) #1
+declare i64 @_Z13get_global_idj(i32)
; Function Attrs: nounwind readnone speculatable
-declare float @llvm.fmuladd.f32(float, float, float) #2
-
-attributes #0 = { noinline nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { nounwind readnone "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #2 = { nounwind readnone speculatable }
-attributes #3 = { nounwind readnone }
+declare float @llvm.fmuladd.f32(float, float, float)
!llvm.module.flags = !{!0}
!llvm.ident = !{!1}
diff --git a/llvm/test/Analysis/DependenceAnalysis/MIVCheckConst.ll b/llvm/test/Analysis/DependenceAnalysis/MIVCheckConst.ll
index b498d7064..f5be89a 100644
--- a/llvm/test/Analysis/DependenceAnalysis/MIVCheckConst.ll
+++ b/llvm/test/Analysis/DependenceAnalysis/MIVCheckConst.ll
@@ -30,7 +30,7 @@ target datalayout = "e-m:e-p:32:32-i1:32-i64:64-a:0-v32:32-n16:32"
%20 = type { [768 x i32] }
%21 = type { [416 x i32] }
-define void @test(ptr %A, ptr %B, i1 %arg, i32 %n, i32 %m) #0 align 2 {
+define void @test(ptr %A, ptr %B, i1 %arg, i32 %n, i32 %m) align 2 {
; CHECK-LABEL: 'test'
; CHECK-NEXT: Src: %v1 = load i32, ptr %B, align 4 --> Dst: %v1 = load i32, ptr %B, align 4
; CHECK-NEXT: da analyze - none!
@@ -91,5 +91,3 @@ bb38:
bb40:
ret void
}
-
-attributes #0 = { "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/llvm/test/Analysis/DependenceAnalysis/NonCanonicalizedSubscript.ll b/llvm/test/Analysis/DependenceAnalysis/NonCanonicalizedSubscript.ll
index e5d5d21e..eba017a 100644
--- a/llvm/test/Analysis/DependenceAnalysis/NonCanonicalizedSubscript.ll
+++ b/llvm/test/Analysis/DependenceAnalysis/NonCanonicalizedSubscript.ll
@@ -52,7 +52,7 @@ for.end:
@a = global [10004 x [10004 x i32]] zeroinitializer, align 16
; Function Attrs: nounwind uwtable
-define void @coupled_miv_type_mismatch(i32 %n) #0 {
+define void @coupled_miv_type_mismatch(i32 %n) {
; CHECK-LABEL: 'coupled_miv_type_mismatch'
; CHECK-NEXT: Src: %2 = load i32, ptr %arrayidx5, align 4 --> Dst: %2 = load i32, ptr %arrayidx5, align 4
; CHECK-NEXT: da analyze - none!
@@ -101,8 +101,6 @@ for.end13: ; preds = %for.cond
ret void
}
-attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+sse,+sse2" "unsafe-fp-math"="false" "use-soft-float"="false" }
-
!llvm.ident = !{!0}
!0 = !{!"clang version 3.7.0"}
diff --git a/llvm/test/Analysis/MemoryDependenceAnalysis/invariant.group-bug.ll b/llvm/test/Analysis/MemoryDependenceAnalysis/invariant.group-bug.ll
index c11191e..5470ef9 100644
--- a/llvm/test/Analysis/MemoryDependenceAnalysis/invariant.group-bug.ll
+++ b/llvm/test/Analysis/MemoryDependenceAnalysis/invariant.group-bug.ll
@@ -17,13 +17,13 @@ target triple = "x86_64-grtev4-linux-gnu"
%4 = type { ptr }
%5 = type { i64, [8 x i8] }
-define void @fail(ptr noalias sret(i1) %arg, ptr %arg1, ptr %arg2, ptr %arg3, i1 %arg4) local_unnamed_addr #0 {
+define void @fail(ptr noalias sret(i1) %arg, ptr %arg1, ptr %arg2, ptr %arg3, i1 %arg4) local_unnamed_addr {
; CHECK-LABEL: @fail(
; CHECK-NEXT: bb:
; CHECK-NEXT: [[I4:%.*]] = load ptr, ptr [[ARG1:%.*]], align 8, !invariant.group [[META6:![0-9]+]]
; CHECK-NEXT: [[I5:%.*]] = getelementptr inbounds ptr, ptr [[I4]], i64 6
; CHECK-NEXT: [[I6:%.*]] = load ptr, ptr [[I5]], align 8, !invariant.load [[META6]]
-; CHECK-NEXT: [[I7:%.*]] = tail call i64 [[I6]](ptr [[ARG1]]) #[[ATTR1:[0-9]+]]
+; CHECK-NEXT: [[I7:%.*]] = tail call i64 [[I6]](ptr [[ARG1]])
; CHECK-NEXT: [[I9:%.*]] = load ptr, ptr [[ARG2:%.*]], align 8
; CHECK-NEXT: store i8 0, ptr [[I9]], align 1
; CHECK-NEXT: br i1 [[ARG4:%.*]], label [[BB10:%.*]], label [[BB29:%.*]]
@@ -32,7 +32,7 @@ define void @fail(ptr noalias sret(i1) %arg, ptr %arg1, ptr %arg2, ptr %arg3, i1
; CHECK-NEXT: [[I15_PRE:%.*]] = load ptr, ptr [[I14_PHI_TRANS_INSERT]], align 8, !invariant.load [[META6]]
; CHECK-NEXT: br label [[BB12:%.*]]
; CHECK: bb12:
-; CHECK-NEXT: [[I16:%.*]] = call i64 [[I15_PRE]](ptr nonnull [[ARG1]], ptr null, i64 0) #[[ATTR1]]
+; CHECK-NEXT: [[I16:%.*]] = call i64 [[I15_PRE]](ptr nonnull [[ARG1]], ptr null, i64 0)
; CHECK-NEXT: br i1 true, label [[BB28:%.*]], label [[BB17:%.*]]
; CHECK: bb17:
; CHECK-NEXT: br i1 true, label [[BB18:%.*]], label [[BB21:%.*]]
@@ -55,7 +55,7 @@ bb:
%i4 = load ptr, ptr %arg1, align 8, !invariant.group !6
%i5 = getelementptr inbounds ptr, ptr %i4, i64 6
%i6 = load ptr, ptr %i5, align 8, !invariant.load !6
- %i7 = tail call i64 %i6(ptr %arg1) #1
+ %i7 = tail call i64 %i6(ptr %arg1)
%i9 = load ptr, ptr %arg2, align 8
store i8 0, ptr %i9, align 1
br i1 %arg4, label %bb10, label %bb29
@@ -67,7 +67,7 @@ bb12: ; preds = %bb28, %bb10
%i13 = load ptr, ptr %arg1, align 8, !invariant.group !6
%i14 = getelementptr inbounds ptr, ptr %i13, i64 22
%i15 = load ptr, ptr %i14, align 8, !invariant.load !6
- %i16 = call i64 %i15(ptr nonnull %arg1, ptr null, i64 0) #1
+ %i16 = call i64 %i15(ptr nonnull %arg1, ptr null, i64 0)
br i1 %arg4, label %bb28, label %bb17
bb17: ; preds = %bb12
@@ -110,9 +110,6 @@ bb29: ; preds = %bb28, %bb
ret void
}
-attributes #0 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="non-leaf" "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+popcnt,+sse,+sse2,+sse3,+sse4.1,+sse4.2,+ssse3,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="non-leaf" "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+popcnt,+sse,+sse2,+sse3,+sse4.1,+sse4.2,+ssse3,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-
!llvm.linker.options = !{}
!llvm.module.flags = !{!0, !1, !3, !4, !5}
diff --git a/llvm/test/Analysis/MemorySSA/pr28880.ll b/llvm/test/Analysis/MemorySSA/pr28880.ll
index 98f3261..a2690b9 100644
--- a/llvm/test/Analysis/MemorySSA/pr28880.ll
+++ b/llvm/test/Analysis/MemorySSA/pr28880.ll
@@ -8,7 +8,7 @@
@global.1 = external hidden unnamed_addr global double, align 8
; Function Attrs: nounwind ssp uwtable
-define hidden fastcc void @hoge(i1 %arg) unnamed_addr #0 {
+define hidden fastcc void @hoge(i1 %arg) unnamed_addr {
bb:
br i1 %arg, label %bb1, label %bb2
@@ -45,6 +45,3 @@ bb4: ; preds = %bb3
bb6: ; preds = %bb3
unreachable
}
-
-attributes #0 = { nounwind ssp uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="core2" "target-features"="+cx16,+fxsr,+mmx,+sse,+sse2,+sse3,+ssse3" "unsafe-fp-math"="false" "use-soft-float"="false" }
-
diff --git a/llvm/test/Analysis/MemorySSA/pr39197.ll b/llvm/test/Analysis/MemorySSA/pr39197.ll
index af57b3c..6be0c58 100644
--- a/llvm/test/Analysis/MemorySSA/pr39197.ll
+++ b/llvm/test/Analysis/MemorySSA/pr39197.ll
@@ -12,13 +12,13 @@ declare void @dummy()
; CHECK-LABEL: @main()
; Function Attrs: nounwind
-define dso_local void @main() #0 {
+define dso_local void @main() {
call void @func_1()
unreachable
}
; Function Attrs: nounwind
-define dso_local void @func_1() #0 {
+define dso_local void @func_1() {
%1 = alloca ptr, align 8
%2 = call signext i32 @func_2()
%3 = icmp ne i32 %2, 0
@@ -64,45 +64,45 @@ define dso_local void @func_1() #0 {
}
; Function Attrs: nounwind
-declare dso_local signext i32 @func_2() #0
+declare dso_local signext i32 @func_2()
; Function Attrs: nounwind
-define dso_local void @safe_sub_func_uint8_t_u_u() #0 {
+define dso_local void @safe_sub_func_uint8_t_u_u() {
ret void
}
; Function Attrs: nounwind
-define dso_local void @safe_add_func_int64_t_s_s() #0 {
+define dso_local void @safe_add_func_int64_t_s_s() {
ret void
}
; Function Attrs: nounwind
-define dso_local void @safe_rshift_func_int16_t_s_u() #0 {
+define dso_local void @safe_rshift_func_int16_t_s_u() {
ret void
}
; Function Attrs: nounwind
-define dso_local void @safe_div_func_uint8_t_u_u() #0 {
+define dso_local void @safe_div_func_uint8_t_u_u() {
ret void
}
; Function Attrs: nounwind
-define dso_local void @safe_mul_func_uint16_t_u_u() #0 {
+define dso_local void @safe_mul_func_uint16_t_u_u() {
ret void
}
; Function Attrs: nounwind
-define dso_local void @safe_mul_func_int16_t_s_s() #0 {
+define dso_local void @safe_mul_func_int16_t_s_s() {
ret void
}
; Function Attrs: nounwind
-define dso_local void @safe_div_func_int32_t_s_s() #0 {
+define dso_local void @safe_div_func_int32_t_s_s() {
ret void
}
; Function Attrs: nounwind
-define dso_local signext i16 @safe_sub_func_int16_t_s_s(i16 signext) #0 {
+define dso_local signext i16 @safe_sub_func_int16_t_s_s(i16 signext) {
%2 = alloca i16, align 2
store i16 %0, ptr %2, align 2, !tbaa !1
%3 = load i16, ptr %2, align 2, !tbaa !1
@@ -113,29 +113,25 @@ define dso_local signext i16 @safe_sub_func_int16_t_s_s(i16 signext) #0 {
}
; Function Attrs: nounwind
-define dso_local void @safe_add_func_uint16_t_u_u() #0 {
+define dso_local void @safe_add_func_uint16_t_u_u() {
ret void
}
; Function Attrs: nounwind
-define dso_local void @safe_div_func_int8_t_s_s() #0 {
+define dso_local void @safe_div_func_int8_t_s_s() {
ret void
}
; Function Attrs: nounwind
-define dso_local void @safe_add_func_int16_t_s_s() #0 {
+define dso_local void @safe_add_func_int16_t_s_s() {
ret void
}
; Function Attrs: nounwind
-define dso_local void @safe_add_func_uint8_t_u_u() #0 {
+define dso_local void @safe_add_func_uint8_t_u_u() {
ret void
}
-attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="z13" "target-features"="+transactional-execution,+vector" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { argmemonly nounwind }
-attributes #2 = { nounwind }
-
!llvm.ident = !{!0}
!0 = !{!"clang version 8.0.0 (http://llvm.org/git/clang.git 7cda4756fc9713d98fd3513b8df172700f267bad) (http://llvm.org/git/llvm.git 199c0d32e96b646bd8cf6beeaf0f99f8a434b56a)"}
diff --git a/llvm/test/Analysis/MemorySSA/pr40038.ll b/llvm/test/Analysis/MemorySSA/pr40038.ll
index efdcbe5..39ea78b 100644
--- a/llvm/test/Analysis/MemorySSA/pr40038.ll
+++ b/llvm/test/Analysis/MemorySSA/pr40038.ll
@@ -10,21 +10,21 @@ target triple = "s390x-ibm-linux"
; Function Attrs: nounwind
; CHECK-LABEL: @main
-define dso_local void @main() #0 {
+define dso_local void @main() {
bb:
call void @func_1()
unreachable
}
; Function Attrs: nounwind
-define dso_local void @func_1() #0 {
+define dso_local void @func_1() {
bb:
call void @func_2()
unreachable
}
; Function Attrs: nounwind
-define dso_local void @func_2() #0 {
+define dso_local void @func_2() {
bb:
%tmp = alloca i32, align 4
store i32 0, ptr @g_80, align 4, !tbaa !1
@@ -68,10 +68,7 @@ bb18: ; preds = %bb12, %bb1
}
; Function Attrs: cold noreturn nounwind
-declare void @llvm.trap() #1
-
-attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="z13" "target-features"="+transactional-execution,+vector" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { cold noreturn nounwind }
+declare void @llvm.trap()
!llvm.ident = !{!0}
diff --git a/llvm/test/Analysis/MemorySSA/pr43569.ll b/llvm/test/Analysis/MemorySSA/pr43569.ll
index 02d074e..c81f8d4 100644
--- a/llvm/test/Analysis/MemorySSA/pr43569.ll
+++ b/llvm/test/Analysis/MemorySSA/pr43569.ll
@@ -10,7 +10,7 @@ target triple = "x86_64-unknown-linux-gnu"
; CHECK-LABEL: @c()
; Function Attrs: nounwind uwtable
-define dso_local void @c() #0 {
+define dso_local void @c() {
entry:
call void @llvm.instrprof.increment(ptr @__profn_c, i64 68269137, i32 3, i32 0)
br label %for.cond
@@ -42,8 +42,4 @@ for.end: ; preds = %for.cond1
}
; Function Attrs: nounwind
-declare void @llvm.instrprof.increment(ptr, i64, i32, i32) #1
-
-attributes #0 = { nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { nounwind }
-
+declare void @llvm.instrprof.increment(ptr, i64, i32, i32)
diff --git a/llvm/test/Analysis/ScalarEvolution/pr22674.ll b/llvm/test/Analysis/ScalarEvolution/pr22674.ll
index 95f96ca..b2f4ae6 100644
--- a/llvm/test/Analysis/ScalarEvolution/pr22674.ll
+++ b/llvm/test/Analysis/ScalarEvolution/pr22674.ll
@@ -11,7 +11,7 @@ target triple = "x86_64-pc-linux-gnux32"
%"class.llvm::AttributeImpl.2.1802.3601.5914.6685.7456.8227.9255.9769.10026.18508" = type <{ ptr, %"class.llvm::FoldingSetImpl::Node.1.1801.3600.5913.6684.7455.8226.9254.9768.10025.18505", i8, [3 x i8] }>
; Function Attrs: nounwind uwtable
-define void @_ZNK4llvm11AttrBuilder13hasAttributesENS_12AttributeSetEy(i1 %arg) #0 align 2 {
+define void @_ZNK4llvm11AttrBuilder13hasAttributesENS_12AttributeSetEy(i1 %arg) align 2 {
entry:
br i1 %arg, label %cond.false, label %_ZNK4llvm12AttributeSet11getNumSlotsEv.exit
@@ -82,8 +82,6 @@ return: ; preds = %_ZNK4llvm9Attribute
ret void
}
-attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
-
!llvm.module.flags = !{!0}
!llvm.ident = !{!1}
diff --git a/llvm/test/Analysis/ScalarEvolution/scev-canonical-mode.ll b/llvm/test/Analysis/ScalarEvolution/scev-canonical-mode.ll
index 3879b2e7..d9cc3e5 100644
--- a/llvm/test/Analysis/ScalarEvolution/scev-canonical-mode.ll
+++ b/llvm/test/Analysis/ScalarEvolution/scev-canonical-mode.ll
@@ -6,7 +6,7 @@ target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
; Function Attrs: norecurse nounwind uwtable
-define void @ehF(i1 %arg) #0 {
+define void @ehF(i1 %arg) {
entry:
br i1 %arg, label %if.then.i, label %hup.exit
@@ -28,5 +28,3 @@ for.body.i: ; preds = %for.body.i, %for.bo
hup.exit: ; preds = %for.body.i, %if.then.i, %entry
ret void
}
-
-attributes #0 = { norecurse nounwind uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/llvm/test/Analysis/TypeBasedAliasAnalysis/PR17620.ll b/llvm/test/Analysis/TypeBasedAliasAnalysis/PR17620.ll
index 67d81e7..7fb4231 100644
--- a/llvm/test/Analysis/TypeBasedAliasAnalysis/PR17620.ll
+++ b/llvm/test/Analysis/TypeBasedAliasAnalysis/PR17620.ll
@@ -13,7 +13,7 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
%classD = type { ptr }
; Function Attrs: ssp uwtable
-define ptr @test(ptr %this, ptr %p1) #0 align 2 {
+define ptr @test(ptr %this, ptr %p1) align 2 {
entry:
; CHECK-LABEL: @test
; CHECK: load ptr, ptr %p1, align 8, !tbaa
@@ -25,10 +25,7 @@ entry:
unreachable
}
-declare void @callee(ptr, ptr) #1
-
-attributes #0 = { ssp uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+declare void @callee(ptr, ptr)
!llvm.ident = !{!0}
diff --git a/llvm/test/Analysis/TypeBasedAliasAnalysis/tbaa-path.ll b/llvm/test/Analysis/TypeBasedAliasAnalysis/tbaa-path.ll
index 942fdf5..f9a2988 100644
--- a/llvm/test/Analysis/TypeBasedAliasAnalysis/tbaa-path.ll
+++ b/llvm/test/Analysis/TypeBasedAliasAnalysis/tbaa-path.ll
@@ -9,7 +9,7 @@
%struct.StructC = type { i16, %struct.StructB, i32 }
%struct.StructD = type { i16, %struct.StructB, i32, i8 }
-define i32 @_Z1gPjP7StructAy(ptr %s, ptr %A, i64 %count) #0 {
+define i32 @_Z1gPjP7StructAy(ptr %s, ptr %A, i64 %count) {
entry:
; Access to ptr and &(A->f32).
; CHECK: Function
@@ -35,7 +35,7 @@ entry:
ret i32 %3
}
-define i32 @_Z2g2PjP7StructAy(ptr %s, ptr %A, i64 %count) #0 {
+define i32 @_Z2g2PjP7StructAy(ptr %s, ptr %A, i64 %count) {
entry:
; Access to ptr and &(A->f16).
; CHECK: Function
@@ -60,7 +60,7 @@ entry:
ret i32 %3
}
-define i32 @_Z2g3P7StructAP7StructBy(ptr %A, ptr %B, i64 %count) #0 {
+define i32 @_Z2g3P7StructAP7StructBy(ptr %A, ptr %B, i64 %count) {
entry:
; Access to &(A->f32) and &(B->a.f32).
; CHECK: Function
@@ -89,7 +89,7 @@ entry:
ret i32 %3
}
-define i32 @_Z2g4P7StructAP7StructBy(ptr %A, ptr %B, i64 %count) #0 {
+define i32 @_Z2g4P7StructAP7StructBy(ptr %A, ptr %B, i64 %count) {
entry:
; Access to &(A->f32) and &(B->a.f16).
; CHECK: Function
@@ -117,7 +117,7 @@ entry:
ret i32 %3
}
-define i32 @_Z2g5P7StructAP7StructBy(ptr %A, ptr %B, i64 %count) #0 {
+define i32 @_Z2g5P7StructAP7StructBy(ptr %A, ptr %B, i64 %count) {
entry:
; Access to &(A->f32) and &(B->f32).
; CHECK: Function
@@ -145,7 +145,7 @@ entry:
ret i32 %3
}
-define i32 @_Z2g6P7StructAP7StructBy(ptr %A, ptr %B, i64 %count) #0 {
+define i32 @_Z2g6P7StructAP7StructBy(ptr %A, ptr %B, i64 %count) {
entry:
; Access to &(A->f32) and &(B->a.f32_2).
; CHECK: Function
@@ -174,7 +174,7 @@ entry:
ret i32 %3
}
-define i32 @_Z2g7P7StructAP7StructSy(ptr %A, ptr %S, i64 %count) #0 {
+define i32 @_Z2g7P7StructAP7StructSy(ptr %A, ptr %S, i64 %count) {
entry:
; Access to &(A->f32) and &(S->f32).
; CHECK: Function
@@ -202,7 +202,7 @@ entry:
ret i32 %3
}
-define i32 @_Z2g8P7StructAP7StructSy(ptr %A, ptr %S, i64 %count) #0 {
+define i32 @_Z2g8P7StructAP7StructSy(ptr %A, ptr %S, i64 %count) {
entry:
; Access to &(A->f32) and &(S->f16).
; CHECK: Function
@@ -229,7 +229,7 @@ entry:
ret i32 %3
}
-define i32 @_Z2g9P7StructSP8StructS2y(ptr %S, ptr %S2, i64 %count) #0 {
+define i32 @_Z2g9P7StructSP8StructS2y(ptr %S, ptr %S2, i64 %count) {
entry:
; Access to &(S->f32) and &(S2->f32).
; CHECK: Function
@@ -257,7 +257,7 @@ entry:
ret i32 %3
}
-define i32 @_Z3g10P7StructSP8StructS2y(ptr %S, ptr %S2, i64 %count) #0 {
+define i32 @_Z3g10P7StructSP8StructS2y(ptr %S, ptr %S2, i64 %count) {
entry:
; Access to &(S->f32) and &(S2->f16).
; CHECK: Function
@@ -284,7 +284,7 @@ entry:
ret i32 %3
}
-define i32 @_Z3g11P7StructCP7StructDy(ptr %C, ptr %D, i64 %count) #0 {
+define i32 @_Z3g11P7StructCP7StructDy(ptr %C, ptr %D, i64 %count) {
entry:
; Access to &(C->b.a.f32) and &(D->b.a.f32).
; CHECK: Function
@@ -318,7 +318,7 @@ entry:
ret i32 %3
}
-define i32 @_Z3g12P7StructCP7StructDy(ptr %C, ptr %D, i64 %count) #0 {
+define i32 @_Z3g12P7StructCP7StructDy(ptr %C, ptr %D, i64 %count) {
entry:
; Access to &(b1->a.f32) and &(b2->a.f32).
; CHECK: Function
@@ -357,8 +357,6 @@ entry:
ret i32 %5
}
-attributes #0 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
-
!0 = !{!1, !1, i64 0}
!1 = !{!"any pointer", !2}
!2 = !{!"omnipotent char", !3}
diff --git a/llvm/test/CodeGen/AArch64/pr164181.ll b/llvm/test/CodeGen/AArch64/pr164181.ll
new file mode 100644
index 0000000..4ec63ec
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/pr164181.ll
@@ -0,0 +1,640 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc -verify-machineinstrs < %s | FileCheck %s
+
+; This test recreates a regalloc crash reported in
+; https://github.com/llvm/llvm-project/issues/164181
+; When rematting an instruction we need to make sure to constrain the newly
+; allocated register to both the rematted def's reg class and the use's reg
+; class.
+
+target triple = "aarch64-unknown-linux-gnu"
+
+@var_32 = external global i16
+@var_35 = external global i64
+@var_39 = external global i64
+@var_46 = external global i64
+@var_50 = external global i32
+
+define void @f(i1 %var_0, i16 %var_1, i64 %var_2, i8 %var_3, i16 %var_4, i1 %var_5, i32 %var_6, i32 %var_7, i8 %var_10, i64 %var_11, i8 %var_14, i32 %var_15, i64 %var_16, ptr %arr_3, ptr %arr_4, ptr %arr_6, ptr %arr_7, ptr %arr_12, ptr %arr_13, ptr %arr_19, i64 %mul, i64 %conv35, i64 %idxprom138.us16, i8 %0, i8 %1, ptr %invariant.gep875.us) #0 {
+; CHECK-LABEL: f:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: sub sp, sp, #240
+; CHECK-NEXT: str x30, [sp, #144] // 8-byte Folded Spill
+; CHECK-NEXT: stp x28, x27, [sp, #160] // 16-byte Folded Spill
+; CHECK-NEXT: stp x26, x25, [sp, #176] // 16-byte Folded Spill
+; CHECK-NEXT: stp x24, x23, [sp, #192] // 16-byte Folded Spill
+; CHECK-NEXT: stp x22, x21, [sp, #208] // 16-byte Folded Spill
+; CHECK-NEXT: stp x20, x19, [sp, #224] // 16-byte Folded Spill
+; CHECK-NEXT: str w6, [sp, #20] // 4-byte Folded Spill
+; CHECK-NEXT: str w4, [sp, #72] // 4-byte Folded Spill
+; CHECK-NEXT: str w3, [sp, #112] // 4-byte Folded Spill
+; CHECK-NEXT: str w5, [sp, #36] // 4-byte Folded Spill
+; CHECK-NEXT: tbz w5, #0, .LBB0_43
+; CHECK-NEXT: // %bb.1: // %for.body41.lr.ph
+; CHECK-NEXT: ldr x4, [sp, #312]
+; CHECK-NEXT: ldr x14, [sp, #280]
+; CHECK-NEXT: tbz w0, #0, .LBB0_42
+; CHECK-NEXT: // %bb.2: // %for.body41.us.preheader
+; CHECK-NEXT: ldrb w8, [sp, #368]
+; CHECK-NEXT: ldrb w12, [sp, #256]
+; CHECK-NEXT: ldr w26, [sp, #264]
+; CHECK-NEXT: adrp x20, :got:var_50
+; CHECK-NEXT: mov x28, #-1 // =0xffffffffffffffff
+; CHECK-NEXT: mov w21, #36006 // =0x8ca6
+; CHECK-NEXT: ldr x11, [sp, #376]
+; CHECK-NEXT: ldrb w13, [sp, #360]
+; CHECK-NEXT: ldp x17, x16, [sp, #296]
+; CHECK-NEXT: mov w22, #1 // =0x1
+; CHECK-NEXT: add x27, x14, #120
+; CHECK-NEXT: ldr x18, [sp, #288]
+; CHECK-NEXT: ldr x7, [sp, #272]
+; CHECK-NEXT: ldr x5, [sp, #248]
+; CHECK-NEXT: mov x10, xzr
+; CHECK-NEXT: mov w23, wzr
+; CHECK-NEXT: mov w30, wzr
+; CHECK-NEXT: ldrb w19, [sp, #240]
+; CHECK-NEXT: mov w25, wzr
+; CHECK-NEXT: mov x24, xzr
+; CHECK-NEXT: str w8, [sp, #108] // 4-byte Folded Spill
+; CHECK-NEXT: mov x3, x26
+; CHECK-NEXT: ldp x9, x8, [sp, #344]
+; CHECK-NEXT: str w12, [sp, #92] // 4-byte Folded Spill
+; CHECK-NEXT: mov w12, #1 // =0x1
+; CHECK-NEXT: bic w12, w12, w0
+; CHECK-NEXT: str w12, [sp, #76] // 4-byte Folded Spill
+; CHECK-NEXT: mov w12, #48 // =0x30
+; CHECK-NEXT: str x9, [sp, #136] // 8-byte Folded Spill
+; CHECK-NEXT: ldp x9, x15, [sp, #328]
+; CHECK-NEXT: madd x8, x8, x12, x9
+; CHECK-NEXT: str x8, [sp, #64] // 8-byte Folded Spill
+; CHECK-NEXT: add x8, x26, w26, uxtw #1
+; CHECK-NEXT: ldr x20, [x20, :got_lo12:var_50]
+; CHECK-NEXT: str x26, [sp, #96] // 8-byte Folded Spill
+; CHECK-NEXT: str x14, [sp, #152] // 8-byte Folded Spill
+; CHECK-NEXT: lsl x6, x8, #3
+; CHECK-NEXT: add x8, x14, #120
+; CHECK-NEXT: str x4, [sp, #24] // 8-byte Folded Spill
+; CHECK-NEXT: str w19, [sp, #16] // 4-byte Folded Spill
+; CHECK-NEXT: str x8, [sp, #80] // 8-byte Folded Spill
+; CHECK-NEXT: b .LBB0_4
+; CHECK-NEXT: .p2align 5, , 16
+; CHECK-NEXT: .LBB0_3: // in Loop: Header=BB0_4 Depth=1
+; CHECK-NEXT: ldr w19, [sp, #16] // 4-byte Folded Reload
+; CHECK-NEXT: ldr x24, [sp, #40] // 8-byte Folded Reload
+; CHECK-NEXT: ldr x14, [sp, #152] // 8-byte Folded Reload
+; CHECK-NEXT: mov w23, #1 // =0x1
+; CHECK-NEXT: mov w30, #1 // =0x1
+; CHECK-NEXT: mov w25, w19
+; CHECK-NEXT: .LBB0_4: // %for.body41.us
+; CHECK-NEXT: // =>This Loop Header: Depth=1
+; CHECK-NEXT: // Child Loop BB0_6 Depth 2
+; CHECK-NEXT: // Child Loop BB0_8 Depth 3
+; CHECK-NEXT: // Child Loop BB0_10 Depth 4
+; CHECK-NEXT: // Child Loop BB0_11 Depth 5
+; CHECK-NEXT: // Child Loop BB0_28 Depth 5
+; CHECK-NEXT: // Child Loop BB0_39 Depth 5
+; CHECK-NEXT: ldr w8, [sp, #20] // 4-byte Folded Reload
+; CHECK-NEXT: mov x12, x24
+; CHECK-NEXT: str x24, [sp, #48] // 8-byte Folded Spill
+; CHECK-NEXT: str w8, [x14]
+; CHECK-NEXT: mov w8, #1 // =0x1
+; CHECK-NEXT: strb w19, [x14]
+; CHECK-NEXT: b .LBB0_6
+; CHECK-NEXT: .p2align 5, , 16
+; CHECK-NEXT: .LBB0_5: // %for.cond.cleanup93.us
+; CHECK-NEXT: // in Loop: Header=BB0_6 Depth=2
+; CHECK-NEXT: ldr w9, [sp, #36] // 4-byte Folded Reload
+; CHECK-NEXT: ldr x4, [sp, #24] // 8-byte Folded Reload
+; CHECK-NEXT: ldp x24, x12, [sp, #48] // 16-byte Folded Reload
+; CHECK-NEXT: mov x22, xzr
+; CHECK-NEXT: mov w25, wzr
+; CHECK-NEXT: mov w8, wzr
+; CHECK-NEXT: tbz w9, #0, .LBB0_3
+; CHECK-NEXT: .LBB0_6: // %for.body67.us
+; CHECK-NEXT: // Parent Loop BB0_4 Depth=1
+; CHECK-NEXT: // => This Loop Header: Depth=2
+; CHECK-NEXT: // Child Loop BB0_8 Depth 3
+; CHECK-NEXT: // Child Loop BB0_10 Depth 4
+; CHECK-NEXT: // Child Loop BB0_11 Depth 5
+; CHECK-NEXT: // Child Loop BB0_28 Depth 5
+; CHECK-NEXT: // Child Loop BB0_39 Depth 5
+; CHECK-NEXT: str x12, [sp, #40] // 8-byte Folded Spill
+; CHECK-NEXT: cmn x24, #30
+; CHECK-NEXT: mov x12, #-30 // =0xffffffffffffffe2
+; CHECK-NEXT: add x19, x4, w8, sxtw #2
+; CHECK-NEXT: mov x9, xzr
+; CHECK-NEXT: csel x12, x24, x12, lo
+; CHECK-NEXT: mov w4, w30
+; CHECK-NEXT: str x12, [sp, #56] // 8-byte Folded Spill
+; CHECK-NEXT: b .LBB0_8
+; CHECK-NEXT: .p2align 5, , 16
+; CHECK-NEXT: .LBB0_7: // %for.cond.cleanup98.us
+; CHECK-NEXT: // in Loop: Header=BB0_8 Depth=3
+; CHECK-NEXT: ldr w4, [sp, #72] // 4-byte Folded Reload
+; CHECK-NEXT: ldr w23, [sp, #128] // 4-byte Folded Reload
+; CHECK-NEXT: mov w9, #1 // =0x1
+; CHECK-NEXT: mov x22, xzr
+; CHECK-NEXT: tbnz w0, #0, .LBB0_5
+; CHECK-NEXT: .LBB0_8: // %for.cond95.preheader.us
+; CHECK-NEXT: // Parent Loop BB0_4 Depth=1
+; CHECK-NEXT: // Parent Loop BB0_6 Depth=2
+; CHECK-NEXT: // => This Loop Header: Depth=3
+; CHECK-NEXT: // Child Loop BB0_10 Depth 4
+; CHECK-NEXT: // Child Loop BB0_11 Depth 5
+; CHECK-NEXT: // Child Loop BB0_28 Depth 5
+; CHECK-NEXT: // Child Loop BB0_39 Depth 5
+; CHECK-NEXT: ldr x8, [sp, #64] // 8-byte Folded Reload
+; CHECK-NEXT: mov w14, #1152 // =0x480
+; CHECK-NEXT: mov w24, #1 // =0x1
+; CHECK-NEXT: mov w12, wzr
+; CHECK-NEXT: str wzr, [sp, #132] // 4-byte Folded Spill
+; CHECK-NEXT: mov w30, w4
+; CHECK-NEXT: madd x8, x9, x14, x8
+; CHECK-NEXT: mov w14, #1 // =0x1
+; CHECK-NEXT: str x8, [sp, #120] // 8-byte Folded Spill
+; CHECK-NEXT: add x8, x9, x9, lsl #1
+; CHECK-NEXT: lsl x26, x8, #4
+; CHECK-NEXT: sxtb w8, w23
+; CHECK-NEXT: mov w23, w25
+; CHECK-NEXT: str w8, [sp, #116] // 4-byte Folded Spill
+; CHECK-NEXT: b .LBB0_10
+; CHECK-NEXT: .p2align 5, , 16
+; CHECK-NEXT: .LBB0_9: // %for.cond510.preheader.us
+; CHECK-NEXT: // in Loop: Header=BB0_10 Depth=4
+; CHECK-NEXT: ldr w23, [sp, #92] // 4-byte Folded Reload
+; CHECK-NEXT: mov x22, x8
+; CHECK-NEXT: ldr x3, [sp, #96] // 8-byte Folded Reload
+; CHECK-NEXT: ldr x27, [sp, #80] // 8-byte Folded Reload
+; CHECK-NEXT: mov x28, #-1 // =0xffffffffffffffff
+; CHECK-NEXT: mov x14, xzr
+; CHECK-NEXT: ldr w8, [sp, #76] // 4-byte Folded Reload
+; CHECK-NEXT: tbz w8, #31, .LBB0_7
+; CHECK-NEXT: .LBB0_10: // %for.body99.us
+; CHECK-NEXT: // Parent Loop BB0_4 Depth=1
+; CHECK-NEXT: // Parent Loop BB0_6 Depth=2
+; CHECK-NEXT: // Parent Loop BB0_8 Depth=3
+; CHECK-NEXT: // => This Loop Header: Depth=4
+; CHECK-NEXT: // Child Loop BB0_11 Depth 5
+; CHECK-NEXT: // Child Loop BB0_28 Depth 5
+; CHECK-NEXT: // Child Loop BB0_39 Depth 5
+; CHECK-NEXT: ldr w8, [sp, #116] // 4-byte Folded Reload
+; CHECK-NEXT: and w8, w8, w8, asr #31
+; CHECK-NEXT: str w8, [sp, #128] // 4-byte Folded Spill
+; CHECK-NEXT: .p2align 5, , 16
+; CHECK-NEXT: .LBB0_11: // %for.body113.us
+; CHECK-NEXT: // Parent Loop BB0_4 Depth=1
+; CHECK-NEXT: // Parent Loop BB0_6 Depth=2
+; CHECK-NEXT: // Parent Loop BB0_8 Depth=3
+; CHECK-NEXT: // Parent Loop BB0_10 Depth=4
+; CHECK-NEXT: // => This Inner Loop Header: Depth=5
+; CHECK-NEXT: tbnz w0, #0, .LBB0_11
+; CHECK-NEXT: // %bb.12: // %for.cond131.preheader.us
+; CHECK-NEXT: // in Loop: Header=BB0_10 Depth=4
+; CHECK-NEXT: ldr w8, [sp, #112] // 4-byte Folded Reload
+; CHECK-NEXT: mov w4, #1 // =0x1
+; CHECK-NEXT: strb w8, [x18]
+; CHECK-NEXT: ldr x8, [sp, #120] // 8-byte Folded Reload
+; CHECK-NEXT: ldrh w8, [x8]
+; CHECK-NEXT: cbnz w4, .LBB0_14
+; CHECK-NEXT: // %bb.13: // %cond.true146.us
+; CHECK-NEXT: // in Loop: Header=BB0_10 Depth=4
+; CHECK-NEXT: ldrsb w4, [x27, x3]
+; CHECK-NEXT: b .LBB0_15
+; CHECK-NEXT: .p2align 5, , 16
+; CHECK-NEXT: .LBB0_14: // in Loop: Header=BB0_10 Depth=4
+; CHECK-NEXT: mov w4, wzr
+; CHECK-NEXT: .LBB0_15: // %cond.end154.us
+; CHECK-NEXT: // in Loop: Header=BB0_10 Depth=4
+; CHECK-NEXT: mov w25, #18984 // =0x4a28
+; CHECK-NEXT: mul w8, w8, w25
+; CHECK-NEXT: and w8, w8, #0xfff8
+; CHECK-NEXT: lsl w8, w8, w4
+; CHECK-NEXT: cbz w8, .LBB0_17
+; CHECK-NEXT: // %bb.16: // %if.then.us
+; CHECK-NEXT: // in Loop: Header=BB0_10 Depth=4
+; CHECK-NEXT: str wzr, [sp, #132] // 4-byte Folded Spill
+; CHECK-NEXT: str wzr, [x18]
+; CHECK-NEXT: .LBB0_17: // %if.end.us
+; CHECK-NEXT: // in Loop: Header=BB0_10 Depth=4
+; CHECK-NEXT: ldr w8, [sp, #108] // 4-byte Folded Reload
+; CHECK-NEXT: mov w4, #18984 // =0x4a28
+; CHECK-NEXT: mov w25, w23
+; CHECK-NEXT: strb w8, [x18]
+; CHECK-NEXT: ldrsb w8, [x27, x3]
+; CHECK-NEXT: lsl w8, w4, w8
+; CHECK-NEXT: mov x4, #-18403 // =0xffffffffffffb81d
+; CHECK-NEXT: movk x4, #58909, lsl #16
+; CHECK-NEXT: cbz w8, .LBB0_19
+; CHECK-NEXT: // %bb.18: // %if.then.us.2
+; CHECK-NEXT: // in Loop: Header=BB0_10 Depth=4
+; CHECK-NEXT: str wzr, [sp, #132] // 4-byte Folded Spill
+; CHECK-NEXT: strb wzr, [x18]
+; CHECK-NEXT: .LBB0_19: // %if.then.us.5
+; CHECK-NEXT: // in Loop: Header=BB0_10 Depth=4
+; CHECK-NEXT: ldr w23, [sp, #132] // 4-byte Folded Reload
+; CHECK-NEXT: mov w8, #29625 // =0x73b9
+; CHECK-NEXT: movk w8, #21515, lsl #16
+; CHECK-NEXT: cmp w23, w8
+; CHECK-NEXT: csel w23, w23, w8, lt
+; CHECK-NEXT: str w23, [sp, #132] // 4-byte Folded Spill
+; CHECK-NEXT: tbz w0, #0, .LBB0_21
+; CHECK-NEXT: // %bb.20: // in Loop: Header=BB0_10 Depth=4
+; CHECK-NEXT: mov w8, wzr
+; CHECK-NEXT: b .LBB0_22
+; CHECK-NEXT: .p2align 5, , 16
+; CHECK-NEXT: .LBB0_21: // %cond.true146.us.7
+; CHECK-NEXT: // in Loop: Header=BB0_10 Depth=4
+; CHECK-NEXT: ldrsb w8, [x27, x3]
+; CHECK-NEXT: .LBB0_22: // %cond.end154.us.7
+; CHECK-NEXT: // in Loop: Header=BB0_10 Depth=4
+; CHECK-NEXT: mov w23, #18984 // =0x4a28
+; CHECK-NEXT: mov w3, #149 // =0x95
+; CHECK-NEXT: lsl w8, w23, w8
+; CHECK-NEXT: cbz w8, .LBB0_24
+; CHECK-NEXT: // %bb.23: // %if.then.us.7
+; CHECK-NEXT: // in Loop: Header=BB0_10 Depth=4
+; CHECK-NEXT: ldr x8, [sp, #152] // 8-byte Folded Reload
+; CHECK-NEXT: str wzr, [sp, #132] // 4-byte Folded Spill
+; CHECK-NEXT: str wzr, [x8]
+; CHECK-NEXT: .LBB0_24: // %if.end.us.7
+; CHECK-NEXT: // in Loop: Header=BB0_10 Depth=4
+; CHECK-NEXT: mov x23, xzr
+; CHECK-NEXT: b .LBB0_28
+; CHECK-NEXT: .p2align 5, , 16
+; CHECK-NEXT: .LBB0_25: // %cond.true331.us
+; CHECK-NEXT: // in Loop: Header=BB0_28 Depth=5
+; CHECK-NEXT: ldrsb w4, [x10]
+; CHECK-NEXT: .LBB0_26: // %cond.end345.us
+; CHECK-NEXT: // in Loop: Header=BB0_28 Depth=5
+; CHECK-NEXT: strh w4, [x18]
+; CHECK-NEXT: mul x4, x22, x28
+; CHECK-NEXT: adrp x22, :got:var_46
+; CHECK-NEXT: mov x8, xzr
+; CHECK-NEXT: ldr x22, [x22, :got_lo12:var_46]
+; CHECK-NEXT: str x4, [x22]
+; CHECK-NEXT: mov x4, #-18403 // =0xffffffffffffb81d
+; CHECK-NEXT: movk x4, #58909, lsl #16
+; CHECK-NEXT: .LBB0_27: // %for.inc371.us
+; CHECK-NEXT: // in Loop: Header=BB0_28 Depth=5
+; CHECK-NEXT: mov w22, #-18978 // =0xffffb5de
+; CHECK-NEXT: orr x23, x23, #0x1
+; CHECK-NEXT: mov x24, xzr
+; CHECK-NEXT: mul w12, w12, w22
+; CHECK-NEXT: mov x22, x5
+; CHECK-NEXT: tbz w0, #0, .LBB0_36
+; CHECK-NEXT: .LBB0_28: // %for.body194.us
+; CHECK-NEXT: // Parent Loop BB0_4 Depth=1
+; CHECK-NEXT: // Parent Loop BB0_6 Depth=2
+; CHECK-NEXT: // Parent Loop BB0_8 Depth=3
+; CHECK-NEXT: // Parent Loop BB0_10 Depth=4
+; CHECK-NEXT: // => This Inner Loop Header: Depth=5
+; CHECK-NEXT: cbnz wzr, .LBB0_30
+; CHECK-NEXT: // %bb.29: // %if.then222.us
+; CHECK-NEXT: // in Loop: Header=BB0_28 Depth=5
+; CHECK-NEXT: adrp x27, :got:var_32
+; CHECK-NEXT: ldur w8, [x19, #-12]
+; CHECK-NEXT: ldr x27, [x27, :got_lo12:var_32]
+; CHECK-NEXT: strh w8, [x27]
+; CHECK-NEXT: sxtb w8, w25
+; CHECK-NEXT: bic w25, w8, w8, asr #31
+; CHECK-NEXT: b .LBB0_31
+; CHECK-NEXT: .p2align 5, , 16
+; CHECK-NEXT: .LBB0_30: // in Loop: Header=BB0_28 Depth=5
+; CHECK-NEXT: mov w25, wzr
+; CHECK-NEXT: .LBB0_31: // %if.end239.us
+; CHECK-NEXT: // in Loop: Header=BB0_28 Depth=5
+; CHECK-NEXT: strb w3, [x16]
+; CHECK-NEXT: tst w13, #0xff
+; CHECK-NEXT: b.eq .LBB0_33
+; CHECK-NEXT: // %bb.32: // %if.then254.us
+; CHECK-NEXT: // in Loop: Header=BB0_28 Depth=5
+; CHECK-NEXT: ldrh w8, [x26, x14, lsl #1]
+; CHECK-NEXT: adrp x27, :got:var_35
+; CHECK-NEXT: ldr x27, [x27, :got_lo12:var_35]
+; CHECK-NEXT: cmp w8, #0
+; CHECK-NEXT: csel x8, xzr, x7, eq
+; CHECK-NEXT: str x8, [x27]
+; CHECK-NEXT: strh w1, [x17]
+; CHECK-NEXT: .LBB0_33: // %if.end282.us
+; CHECK-NEXT: // in Loop: Header=BB0_28 Depth=5
+; CHECK-NEXT: orr x27, x24, x4
+; CHECK-NEXT: adrp x8, :got:var_39
+; CHECK-NEXT: str x27, [x18]
+; CHECK-NEXT: ldr x8, [x8, :got_lo12:var_39]
+; CHECK-NEXT: str x10, [x8]
+; CHECK-NEXT: ldrb w8, [x6, x9]
+; CHECK-NEXT: str x8, [x18]
+; CHECK-NEXT: mov w8, #1 // =0x1
+; CHECK-NEXT: cbnz x2, .LBB0_27
+; CHECK-NEXT: // %bb.34: // %if.then327.us
+; CHECK-NEXT: // in Loop: Header=BB0_28 Depth=5
+; CHECK-NEXT: cbz w8, .LBB0_25
+; CHECK-NEXT: // %bb.35: // in Loop: Header=BB0_28 Depth=5
+; CHECK-NEXT: mov w4, wzr
+; CHECK-NEXT: b .LBB0_26
+; CHECK-NEXT: .p2align 5, , 16
+; CHECK-NEXT: .LBB0_36: // %for.cond376.preheader.us
+; CHECK-NEXT: // in Loop: Header=BB0_10 Depth=4
+; CHECK-NEXT: mov w3, #1152 // =0x480
+; CHECK-NEXT: mov x22, xzr
+; CHECK-NEXT: mov w4, wzr
+; CHECK-NEXT: mov x24, x27
+; CHECK-NEXT: lsl x23, x14, #1
+; CHECK-NEXT: mov x27, #-1 // =0xffffffffffffffff
+; CHECK-NEXT: madd x14, x14, x3, x11
+; CHECK-NEXT: mov w28, w30
+; CHECK-NEXT: mov w3, #-7680 // =0xffffe200
+; CHECK-NEXT: b .LBB0_39
+; CHECK-NEXT: .p2align 5, , 16
+; CHECK-NEXT: .LBB0_37: // %if.then466.us
+; CHECK-NEXT: // in Loop: Header=BB0_39 Depth=5
+; CHECK-NEXT: ldr x28, [sp, #152] // 8-byte Folded Reload
+; CHECK-NEXT: ldr x3, [sp, #136] // 8-byte Folded Reload
+; CHECK-NEXT: sxtb w4, w4
+; CHECK-NEXT: bic w4, w4, w4, asr #31
+; CHECK-NEXT: str x3, [x28]
+; CHECK-NEXT: mov w3, #-7680 // =0xffffe200
+; CHECK-NEXT: .LBB0_38: // %for.inc505.us
+; CHECK-NEXT: // in Loop: Header=BB0_39 Depth=5
+; CHECK-NEXT: add x22, x22, #1
+; CHECK-NEXT: add x27, x27, #1
+; CHECK-NEXT: mov w28, wzr
+; CHECK-NEXT: cmp x27, #0
+; CHECK-NEXT: b.hs .LBB0_9
+; CHECK-NEXT: .LBB0_39: // %for.body380.us
+; CHECK-NEXT: // Parent Loop BB0_4 Depth=1
+; CHECK-NEXT: // Parent Loop BB0_6 Depth=2
+; CHECK-NEXT: // Parent Loop BB0_8 Depth=3
+; CHECK-NEXT: // Parent Loop BB0_10 Depth=4
+; CHECK-NEXT: // => This Inner Loop Header: Depth=5
+; CHECK-NEXT: mov w30, w28
+; CHECK-NEXT: ldrh w28, [x23]
+; CHECK-NEXT: tst w0, #0x1
+; CHECK-NEXT: strh w28, [x11]
+; CHECK-NEXT: csel w28, w21, w3, ne
+; CHECK-NEXT: str w28, [x20]
+; CHECK-NEXT: cbz x15, .LBB0_38
+; CHECK-NEXT: // %bb.40: // %if.then436.us
+; CHECK-NEXT: // in Loop: Header=BB0_39 Depth=5
+; CHECK-NEXT: ldrh w28, [x14]
+; CHECK-NEXT: cbnz w28, .LBB0_37
+; CHECK-NEXT: // %bb.41: // in Loop: Header=BB0_39 Depth=5
+; CHECK-NEXT: mov w4, wzr
+; CHECK-NEXT: b .LBB0_38
+; CHECK-NEXT: .LBB0_42: // %for.body41
+; CHECK-NEXT: strb wzr, [x4]
+; CHECK-NEXT: strb wzr, [x14]
+; CHECK-NEXT: .LBB0_43: // %for.cond563.preheader
+; CHECK-NEXT: ldp x20, x19, [sp, #224] // 16-byte Folded Reload
+; CHECK-NEXT: ldp x22, x21, [sp, #208] // 16-byte Folded Reload
+; CHECK-NEXT: ldp x24, x23, [sp, #192] // 16-byte Folded Reload
+; CHECK-NEXT: ldp x26, x25, [sp, #176] // 16-byte Folded Reload
+; CHECK-NEXT: ldp x28, x27, [sp, #160] // 16-byte Folded Reload
+; CHECK-NEXT: ldr x30, [sp, #144] // 8-byte Folded Reload
+; CHECK-NEXT: add sp, sp, #240
+; CHECK-NEXT: ret
+entry:
+ br i1 %var_5, label %for.body41.lr.ph, label %for.cond563.preheader
+
+for.body41.lr.ph: ; preds = %entry
+ %arrayidx147 = getelementptr i8, ptr %arr_3, i64 120
+ %tobool326.not = icmp eq i64 %var_2, 0
+ %not353 = xor i64 0, -1
+ %add538 = select i1 %var_0, i16 0, i16 1
+ br i1 %var_0, label %for.body41.us, label %for.body41
+
+for.body41.us: ; preds = %for.cond.cleanup93.us, %for.body41.lr.ph
+ %var_24.promoted9271009.us = phi i64 [ 0, %for.body41.lr.ph ], [ %6, %for.cond.cleanup93.us ]
+ %var_37.promoted9301008.us = phi i64 [ 1, %for.body41.lr.ph ], [ 0, %for.cond.cleanup93.us ]
+ %2 = phi i8 [ 0, %for.body41.lr.ph ], [ 1, %for.cond.cleanup93.us ]
+ %add4139751001.us = phi i16 [ 0, %for.body41.lr.ph ], [ 1, %for.cond.cleanup93.us ]
+ %3 = phi i8 [ 0, %for.body41.lr.ph ], [ %var_10, %for.cond.cleanup93.us ]
+ store i32 %var_6, ptr %arr_3, align 4
+ store i8 %var_10, ptr %arr_3, align 1
+ br label %for.body67.us
+
+for.body67.us: ; preds = %for.cond.cleanup93.us, %for.body41.us
+ %4 = phi i8 [ %3, %for.body41.us ], [ 0, %for.cond.cleanup93.us ]
+ %add413977.us = phi i16 [ %add4139751001.us, %for.body41.us ], [ %add413.us17, %for.cond.cleanup93.us ]
+ %5 = phi i8 [ %2, %for.body41.us ], [ %.sroa.speculated829.us, %for.cond.cleanup93.us ]
+ %conv64922.us = phi i32 [ 1, %for.body41.us ], [ 0, %for.cond.cleanup93.us ]
+ %6 = phi i64 [ %var_24.promoted9271009.us, %for.body41.us ], [ %.sroa.speculated832.us, %for.cond.cleanup93.us ]
+ %mul354903918.us = phi i64 [ %var_37.promoted9301008.us, %for.body41.us ], [ 0, %for.cond.cleanup93.us ]
+ %i_2.0921.us = zext i32 %var_15 to i64
+ %.sroa.speculated832.us = tail call i64 @llvm.umin.i64(i64 %var_24.promoted9271009.us, i64 -30)
+ %sext1023 = shl i64 %i_2.0921.us, 1
+ %idxprom138.us162 = ashr i64 %sext1023, 1
+ %gep889.us = getelementptr [24 x i16], ptr %arr_19, i64 %idxprom138.us16
+ %arrayidx149.us = getelementptr i8, ptr %arrayidx147, i64 %idxprom138.us162
+ %arrayidx319.us = getelementptr [24 x i8], ptr null, i64 %idxprom138.us162
+ %7 = sext i32 %conv64922.us to i64
+ %8 = getelementptr i32, ptr %arr_12, i64 %7
+ %arrayidx226.us = getelementptr i8, ptr %8, i64 -12
+ br label %for.cond95.preheader.us
+
+for.cond.cleanup93.us: ; preds = %for.cond.cleanup98.us
+ br i1 %var_5, label %for.body67.us, label %for.body41.us
+
+for.cond.cleanup98.us: ; preds = %for.cond510.preheader.us
+ br i1 %var_0, label %for.cond.cleanup93.us, label %for.cond95.preheader.us
+
+for.body99.us: ; preds = %for.cond95.preheader.us, %for.cond510.preheader.us
+ %mul287985.us = phi i16 [ 0, %for.cond95.preheader.us ], [ %mul287.us, %for.cond510.preheader.us ]
+ %9 = phi i8 [ %29, %for.cond95.preheader.us ], [ %var_14, %for.cond510.preheader.us ]
+ %add413979.us = phi i16 [ %add413978.us, %for.cond95.preheader.us ], [ %add413.us17, %for.cond510.preheader.us ]
+ %10 = phi i32 [ 0, %for.cond95.preheader.us ], [ %26, %for.cond510.preheader.us ]
+ %mul354905.us = phi i64 [ %mul354904.us, %for.cond95.preheader.us ], [ %mul354907.us, %for.cond510.preheader.us ]
+ %sub283896.us = phi i64 [ 1, %for.cond95.preheader.us ], [ %sub283.us, %for.cond510.preheader.us ]
+ %conv96880.us = phi i64 [ 1, %for.cond95.preheader.us ], [ 0, %for.cond510.preheader.us ]
+ %.sroa.speculated829.us = tail call i8 @llvm.smin.i8(i8 %30, i8 0)
+ br label %for.body113.us
+
+for.body380.us: ; preds = %for.cond376.preheader.us, %for.inc505.us
+ %indvars.iv1018 = phi i64 [ 0, %for.cond376.preheader.us ], [ %indvars.iv.next1019, %for.inc505.us ]
+ %11 = phi i8 [ 0, %for.cond376.preheader.us ], [ %13, %for.inc505.us ]
+ %add413980.us = phi i16 [ %add413979.us, %for.cond376.preheader.us ], [ 0, %for.inc505.us ]
+ %12 = load i16, ptr %arrayidx384.us, align 2
+ store i16 %12, ptr %invariant.gep875.us, align 2
+ %add413.us17 = or i16 %add413980.us, 0
+ %arrayidx416.us = getelementptr i16, ptr %arr_13, i64 %indvars.iv1018
+ %conv419.us = select i1 %var_0, i32 36006, i32 -7680
+ store i32 %conv419.us, ptr @var_50, align 4
+ %tobool435.not.us = icmp eq i64 %mul, 0
+ br i1 %tobool435.not.us, label %for.inc505.us, label %if.then436.us
+
+if.then436.us: ; preds = %for.body380.us
+ %.sroa.speculated817.us = tail call i8 @llvm.smax.i8(i8 %11, i8 0)
+ %cond464.in.us = load i16, ptr %gep876.us, align 2
+ %tobool465.not.us = icmp eq i16 %cond464.in.us, 0
+ br i1 %tobool465.not.us, label %for.inc505.us, label %if.then466.us
+
+if.then466.us: ; preds = %if.then436.us
+ store i64 %conv35, ptr %arr_3, align 8
+ br label %for.inc505.us
+
+for.inc505.us: ; preds = %if.then466.us, %if.then436.us, %for.body380.us
+ %13 = phi i8 [ %11, %for.body380.us ], [ %.sroa.speculated817.us, %if.then466.us ], [ 0, %if.then436.us ]
+ %indvars.iv.next1019 = add i64 %indvars.iv1018, 1
+ %cmp378.us = icmp ult i64 %indvars.iv1018, 0
+ br i1 %cmp378.us, label %for.body380.us, label %for.cond510.preheader.us
+
+for.body194.us: ; preds = %if.end.us.7, %for.inc371.us
+ %indvars.iv = phi i64 [ 0, %if.end.us.7 ], [ %indvars.iv.next, %for.inc371.us ]
+ %mul287986.us = phi i16 [ %mul287985.us, %if.end.us.7 ], [ %mul287.us, %for.inc371.us ]
+ %14 = phi i8 [ %9, %if.end.us.7 ], [ %16, %for.inc371.us ]
+ %mul354906.us = phi i64 [ %mul354905.us, %if.end.us.7 ], [ %var_11, %for.inc371.us ]
+ %sub283897.us = phi i64 [ %sub283896.us, %if.end.us.7 ], [ 0, %for.inc371.us ]
+ %tobool221.not.us = icmp eq i32 1, 0
+ br i1 %tobool221.not.us, label %if.end239.us, label %if.then222.us
+
+if.then222.us: ; preds = %for.body194.us
+ %15 = load i32, ptr %arrayidx226.us, align 4
+ %conv227.us = trunc i32 %15 to i16
+ store i16 %conv227.us, ptr @var_32, align 2
+ %.sroa.speculated820.us = tail call i8 @llvm.smax.i8(i8 %14, i8 0)
+ br label %if.end239.us
+
+if.end239.us: ; preds = %if.then222.us, %for.body194.us
+ %16 = phi i8 [ %.sroa.speculated820.us, %if.then222.us ], [ 0, %for.body194.us ]
+ store i8 -107, ptr %arr_7, align 1
+ %tobool253.not.us = icmp eq i8 %0, 0
+ br i1 %tobool253.not.us, label %if.end282.us, label %if.then254.us
+
+if.then254.us: ; preds = %if.end239.us
+ %17 = load i16, ptr %arrayidx259.us, align 2
+ %tobool261.not.us = icmp eq i16 %17, 0
+ %conv268.us = select i1 %tobool261.not.us, i64 0, i64 %var_16
+ store i64 %conv268.us, ptr @var_35, align 8
+ %gep867.us = getelementptr [24 x [24 x i64]], ptr null, i64 %indvars.iv
+ store i16 %var_1, ptr %arr_6, align 2
+ br label %if.end282.us
+
+if.end282.us: ; preds = %if.then254.us, %if.end239.us
+ %sub283.us = or i64 %sub283897.us, -434259939
+ store i64 %sub283.us, ptr %arr_4, align 8
+ %mul287.us = mul i16 %mul287986.us, -18978
+ store i64 0, ptr @var_39, align 8
+ %18 = load i8, ptr %arrayidx321.us, align 1
+ %conv322.us = zext i8 %18 to i64
+ store i64 %conv322.us, ptr %arr_4, align 8
+ br i1 %tobool326.not, label %if.then327.us, label %for.inc371.us
+
+if.then327.us: ; preds = %if.end282.us
+ %tobool330.not.us = icmp eq i32 0, 0
+ br i1 %tobool330.not.us, label %cond.end345.us, label %cond.true331.us
+
+cond.true331.us: ; preds = %if.then327.us
+ %19 = load i8, ptr null, align 1
+ %20 = sext i8 %19 to i16
+ br label %cond.end345.us
+
+cond.end345.us: ; preds = %cond.true331.us, %if.then327.us
+ %cond346.us = phi i16 [ %20, %cond.true331.us ], [ 0, %if.then327.us ]
+ store i16 %cond346.us, ptr %arr_4, align 2
+ %mul354.us = mul i64 %mul354906.us, %not353
+ store i64 %mul354.us, ptr @var_46, align 8
+ br label %for.inc371.us
+
+for.inc371.us: ; preds = %cond.end345.us, %if.end282.us
+ %mul354907.us = phi i64 [ 1, %if.end282.us ], [ 0, %cond.end345.us ]
+ %indvars.iv.next = or i64 %indvars.iv, 1
+ br i1 %var_0, label %for.body194.us, label %for.cond376.preheader.us
+
+cond.true146.us: ; preds = %for.cond131.preheader.us
+ %21 = load i8, ptr %arrayidx149.us, align 1
+ %conv150.us = sext i8 %21 to i32
+ br label %cond.end154.us
+
+cond.end154.us: ; preds = %for.cond131.preheader.us, %cond.true146.us
+ %cond155.us = phi i32 [ %conv150.us, %cond.true146.us ], [ 0, %for.cond131.preheader.us ]
+ %shl.us = shl i32 %div.us, %cond155.us
+ %tobool157.not.us = icmp eq i32 %shl.us, 0
+ br i1 %tobool157.not.us, label %if.end.us, label %if.then.us
+
+if.then.us: ; preds = %cond.end154.us
+ store i32 0, ptr %arr_4, align 4
+ br label %if.end.us
+
+if.end.us: ; preds = %if.then.us, %cond.end154.us
+ %22 = phi i32 [ 0, %if.then.us ], [ %10, %cond.end154.us ]
+ store i8 %1, ptr %arr_4, align 1
+ call void @llvm.assume(i1 true)
+ %23 = load i8, ptr %arrayidx149.us, align 1
+ %conv150.us.2 = sext i8 %23 to i32
+ %shl.us.2 = shl i32 18984, %conv150.us.2
+ %tobool157.not.us.2 = icmp eq i32 %shl.us.2, 0
+ br i1 %tobool157.not.us.2, label %if.then.us.5, label %if.then.us.2
+
+if.then.us.2: ; preds = %if.end.us
+ %.sroa.speculated826.us.2 = tail call i32 @llvm.smin.i32(i32 %10, i32 0)
+ store i8 0, ptr %arr_4, align 1
+ br label %if.then.us.5
+
+if.then.us.5: ; preds = %if.then.us.2, %if.end.us
+ %24 = phi i32 [ 0, %if.then.us.2 ], [ %22, %if.end.us ]
+ %.sroa.speculated826.us.5 = tail call i32 @llvm.smin.i32(i32 %24, i32 1410036665)
+ br i1 %var_0, label %cond.end154.us.7, label %cond.true146.us.7
+
+cond.true146.us.7: ; preds = %if.then.us.5
+ %25 = load i8, ptr %arrayidx149.us, align 1
+ %conv150.us.7 = sext i8 %25 to i32
+ br label %cond.end154.us.7
+
+cond.end154.us.7: ; preds = %cond.true146.us.7, %if.then.us.5
+ %cond155.us.7 = phi i32 [ %conv150.us.7, %cond.true146.us.7 ], [ 0, %if.then.us.5 ]
+ %shl.us.7 = shl i32 18984, %cond155.us.7
+ %tobool157.not.us.7 = icmp eq i32 %shl.us.7, 0
+ br i1 %tobool157.not.us.7, label %if.end.us.7, label %if.then.us.7
+
+if.then.us.7: ; preds = %cond.end154.us.7
+ store i32 0, ptr %arr_3, align 4
+ br label %if.end.us.7
+
+if.end.us.7: ; preds = %if.then.us.7, %cond.end154.us.7
+ %26 = phi i32 [ 0, %if.then.us.7 ], [ %.sroa.speculated826.us.5, %cond.end154.us.7 ]
+ %arrayidx259.us = getelementptr i16, ptr %arrayidx257.us, i64 %conv96880.us
+ br label %for.body194.us
+
+for.body113.us: ; preds = %for.body113.us, %for.body99.us
+ br i1 %var_0, label %for.body113.us, label %for.cond131.preheader.us
+
+for.cond510.preheader.us: ; preds = %for.inc505.us
+ %cmp97.us = icmp slt i16 %add538, 0
+ br i1 %cmp97.us, label %for.body99.us, label %for.cond.cleanup98.us
+
+for.cond376.preheader.us: ; preds = %for.inc371.us
+ %arrayidx384.us = getelementptr i16, ptr null, i64 %conv96880.us
+ %gep876.us = getelementptr [24 x [24 x i16]], ptr %invariant.gep875.us, i64 %conv96880.us
+ br label %for.body380.us
+
+for.cond131.preheader.us: ; preds = %for.body113.us
+ store i8 %var_3, ptr %arr_4, align 1
+ %27 = load i16, ptr %gep884.us, align 2
+ %28 = mul i16 18984, %27
+ %div.us = zext i16 %28 to i32
+ %tobool145.not.us = icmp eq i8 0, 0
+ br i1 %tobool145.not.us, label %cond.end154.us, label %cond.true146.us
+
+for.cond95.preheader.us: ; preds = %for.cond.cleanup98.us, %for.body67.us
+ %indvars.iv1021 = phi i64 [ 1, %for.cond.cleanup98.us ], [ 0, %for.body67.us ]
+ %29 = phi i8 [ %16, %for.cond.cleanup98.us ], [ %4, %for.body67.us ]
+ %add413978.us = phi i16 [ %var_4, %for.cond.cleanup98.us ], [ %add413977.us, %for.body67.us ]
+ %30 = phi i8 [ %.sroa.speculated829.us, %for.cond.cleanup98.us ], [ %5, %for.body67.us ]
+ %mul354904.us = phi i64 [ 0, %for.cond.cleanup98.us ], [ %mul354903918.us, %for.body67.us ]
+ %gep884.us = getelementptr [24 x [24 x i16]], ptr %gep889.us, i64 %indvars.iv1021
+ %arrayidx321.us = getelementptr i8, ptr %arrayidx319.us, i64 %indvars.iv1021
+ %arrayidx257.us = getelementptr [24 x i16], ptr null, i64 %indvars.iv1021
+ br label %for.body99.us
+
+for.cond563.preheader: ; preds = %for.body41, %entry
+ ret void
+
+for.body41: ; preds = %for.body41.lr.ph
+ store i8 0, ptr %arr_12, align 1
+ store i8 0, ptr %arr_3, align 1
+ br label %for.cond563.preheader
+}
+
+attributes #0 = { nounwind "frame-pointer"="non-leaf" "target-cpu"="grace" }
+attributes #1 = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
+attributes #2 = { nocallback nofree nosync nounwind willreturn memory(inaccessiblemem: write) }
diff --git a/llvm/test/CodeGen/AMDGPU/combine_andor_with_cmps.ll b/llvm/test/CodeGen/AMDGPU/combine_andor_with_cmps.ll
index 57a1e4c..ec92edb 100644
--- a/llvm/test/CodeGen/AMDGPU/combine_andor_with_cmps.ll
+++ b/llvm/test/CodeGen/AMDGPU/combine_andor_with_cmps.ll
@@ -3385,7 +3385,7 @@ declare half @llvm.canonicalize.f16(half)
declare <2 x half> @llvm.canonicalize.v2f16(<2 x half>)
attributes #0 = { nounwind "amdgpu-ieee"="false" }
-attributes #1 = { nounwind "unsafe-fp-math"="true" "no-nans-fp-math"="true" }
+attributes #1 = { nounwind "no-nans-fp-math"="true" }
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
; GFX11NONANS-FAKE16: {{.*}}
; GFX11NONANS-TRUE16: {{.*}}
diff --git a/llvm/test/CodeGen/AMDGPU/fdiv.f64.ll b/llvm/test/CodeGen/AMDGPU/fdiv.f64.ll
index acb32d4..11476a6 100644
--- a/llvm/test/CodeGen/AMDGPU/fdiv.f64.ll
+++ b/llvm/test/CodeGen/AMDGPU/fdiv.f64.ll
@@ -127,7 +127,7 @@ define amdgpu_kernel void @s_fdiv_v4f64(ptr addrspace(1) %out, <4 x double> %num
; GCN-LABEL: {{^}}div_fast_2_x_pat_f64:
; GCN: v_mul_f64 [[MUL:v\[[0-9]+:[0-9]+\]]], s{{\[[0-9]+:[0-9]+\]}}, 0.5
; GCN: buffer_store_dwordx2 [[MUL]]
-define amdgpu_kernel void @div_fast_2_x_pat_f64(ptr addrspace(1) %out) #1 {
+define amdgpu_kernel void @div_fast_2_x_pat_f64(ptr addrspace(1) %out) #0 {
%x = load double, ptr addrspace(1) poison
%rcp = fdiv fast double %x, 2.0
store double %rcp, ptr addrspace(1) %out, align 4
@@ -139,7 +139,7 @@ define amdgpu_kernel void @div_fast_2_x_pat_f64(ptr addrspace(1) %out) #1 {
; GCN-DAG: v_mov_b32_e32 v[[K_HI:[0-9]+]], 0x3fb99999
; GCN: v_mul_f64 [[MUL:v\[[0-9]+:[0-9]+\]]], s{{\[[0-9]+:[0-9]+\]}}, v[[[K_LO]]:[[K_HI]]]
; GCN: buffer_store_dwordx2 [[MUL]]
-define amdgpu_kernel void @div_fast_k_x_pat_f64(ptr addrspace(1) %out) #1 {
+define amdgpu_kernel void @div_fast_k_x_pat_f64(ptr addrspace(1) %out) #0 {
%x = load double, ptr addrspace(1) poison
%rcp = fdiv fast double %x, 10.0
store double %rcp, ptr addrspace(1) %out, align 4
@@ -151,7 +151,7 @@ define amdgpu_kernel void @div_fast_k_x_pat_f64(ptr addrspace(1) %out) #1 {
; GCN-DAG: v_mov_b32_e32 v[[K_HI:[0-9]+]], 0xbfb99999
; GCN: v_mul_f64 [[MUL:v\[[0-9]+:[0-9]+\]]], s{{\[[0-9]+:[0-9]+\]}}, v[[[K_LO]]:[[K_HI]]]
; GCN: buffer_store_dwordx2 [[MUL]]
-define amdgpu_kernel void @div_fast_neg_k_x_pat_f64(ptr addrspace(1) %out) #1 {
+define amdgpu_kernel void @div_fast_neg_k_x_pat_f64(ptr addrspace(1) %out) #0 {
%x = load double, ptr addrspace(1) poison
%rcp = fdiv fast double %x, -10.0
store double %rcp, ptr addrspace(1) %out, align 4
@@ -159,4 +159,3 @@ define amdgpu_kernel void @div_fast_neg_k_x_pat_f64(ptr addrspace(1) %out) #1 {
}
attributes #0 = { nounwind }
-attributes #1 = { nounwind "unsafe-fp-math"="true" }
diff --git a/llvm/test/CodeGen/AMDGPU/fmad-formation-fmul-distribute-denormal-mode.ll b/llvm/test/CodeGen/AMDGPU/fmad-formation-fmul-distribute-denormal-mode.ll
index 92eb4a6..0a266bc 100644
--- a/llvm/test/CodeGen/AMDGPU/fmad-formation-fmul-distribute-denormal-mode.ll
+++ b/llvm/test/CodeGen/AMDGPU/fmad-formation-fmul-distribute-denormal-mode.ll
@@ -284,4 +284,4 @@ define <2 x float> @unsafe_fast_fmul_fsub_ditribute_post_legalize(float %arg0, <
ret <2 x float> %tmp1
}
-attributes #0 = { "no-infs-fp-math"="true" "unsafe-fp-math"="true" }
+attributes #0 = { "no-infs-fp-math"="true" }
diff --git a/llvm/test/CodeGen/AMDGPU/fmed3.bf16.ll b/llvm/test/CodeGen/AMDGPU/fmed3.bf16.ll
index bc85dc2..3e513de 100644
--- a/llvm/test/CodeGen/AMDGPU/fmed3.bf16.ll
+++ b/llvm/test/CodeGen/AMDGPU/fmed3.bf16.ll
@@ -219,8 +219,8 @@ define <2 x bfloat> @v_test_fmed3_r_i_i_v2bf16_minimumnum_maximumnum(<2 x bfloat
}
attributes #0 = { nounwind readnone }
-attributes #1 = { nounwind "unsafe-fp-math"="false" "no-nans-fp-math"="false" }
-attributes #2 = { nounwind "unsafe-fp-math"="false" "no-nans-fp-math"="true" }
+attributes #1 = { nounwind "no-nans-fp-math"="false" }
+attributes #2 = { nounwind "no-nans-fp-math"="true" }
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
; GFX11: {{.*}}
; GFX11-SDAG: {{.*}}
diff --git a/llvm/test/CodeGen/AMDGPU/fmed3.ll b/llvm/test/CodeGen/AMDGPU/fmed3.ll
index 3145a27..60ac0b9 100644
--- a/llvm/test/CodeGen/AMDGPU/fmed3.ll
+++ b/llvm/test/CodeGen/AMDGPU/fmed3.ll
@@ -8905,4 +8905,4 @@ declare half @llvm.minnum.f16(half, half) #0
declare half @llvm.maxnum.f16(half, half) #0
attributes #0 = { nounwind readnone }
-attributes #2 = { nounwind "unsafe-fp-math"="false" "no-nans-fp-math"="true" }
+attributes #2 = { nounwind "no-nans-fp-math"="true" }
diff --git a/llvm/test/CodeGen/AMDGPU/fneg-combines.legal.f16.ll b/llvm/test/CodeGen/AMDGPU/fneg-combines.legal.f16.ll
index d8bbda1..69d1ee3f 100644
--- a/llvm/test/CodeGen/AMDGPU/fneg-combines.legal.f16.ll
+++ b/llvm/test/CodeGen/AMDGPU/fneg-combines.legal.f16.ll
@@ -159,7 +159,7 @@ declare half @llvm.amdgcn.interp.p2.f16(float, float, i32, i32, i1, i32) #0
attributes #0 = { nounwind "denormal-fp-math-f32"="preserve-sign,preserve-sign" }
attributes #1 = { nounwind readnone }
-attributes #2 = { nounwind "unsafe-fp-math"="true" }
+attributes #2 = { nounwind }
attributes #3 = { nounwind "no-signed-zeros-fp-math"="true" }
attributes #4 = { nounwind "amdgpu-ieee"="false" "denormal-fp-math-f32"="preserve-sign,preserve-sign" }
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
diff --git a/llvm/test/CodeGen/AMDGPU/fneg-combines.ll b/llvm/test/CodeGen/AMDGPU/fneg-combines.ll
index aaea4f7..b3202cb 100644
--- a/llvm/test/CodeGen/AMDGPU/fneg-combines.ll
+++ b/llvm/test/CodeGen/AMDGPU/fneg-combines.ll
@@ -8006,7 +8006,7 @@ declare float @llvm.amdgcn.interp.p2(float, float, i32, i32, i32) #0
attributes #0 = { nounwind "denormal-fp-math-f32"="preserve-sign,preserve-sign" }
attributes #1 = { nounwind readnone }
-attributes #2 = { nounwind "unsafe-fp-math"="true" }
+attributes #2 = { nounwind }
attributes #3 = { nounwind "no-signed-zeros-fp-math"="true" }
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
; GCN-NSZ: {{.*}}
diff --git a/llvm/test/CodeGen/AMDGPU/frem.ll b/llvm/test/CodeGen/AMDGPU/frem.ll
index 6f91222..d8cbdb1 100644
--- a/llvm/test/CodeGen/AMDGPU/frem.ll
+++ b/llvm/test/CodeGen/AMDGPU/frem.ll
@@ -2048,7 +2048,7 @@ define amdgpu_kernel void @unsafe_frem_f16(ptr addrspace(1) %out, ptr addrspace(
; GFX1200-FAKE16-NEXT: v_fmac_f16_e32 v1, v3, v2
; GFX1200-FAKE16-NEXT: global_store_b16 v0, v1, s[0:1]
; GFX1200-FAKE16-NEXT: s_endpgm
- ptr addrspace(1) %in2) #1 {
+ ptr addrspace(1) %in2) #0 {
%gep2 = getelementptr half, ptr addrspace(1) %in2, i32 4
%r0 = load half, ptr addrspace(1) %in1, align 4
%r1 = load half, ptr addrspace(1) %gep2, align 4
@@ -3417,7 +3417,7 @@ define amdgpu_kernel void @unsafe_frem_f32(ptr addrspace(1) %out, ptr addrspace(
; GFX1200-NEXT: v_fmac_f32_e32 v1, v3, v2
; GFX1200-NEXT: global_store_b32 v0, v1, s[0:1]
; GFX1200-NEXT: s_endpgm
- ptr addrspace(1) %in2) #1 {
+ ptr addrspace(1) %in2) #0 {
%gep2 = getelementptr float, ptr addrspace(1) %in2, i32 4
%r0 = load float, ptr addrspace(1) %in1, align 4
%r1 = load float, ptr addrspace(1) %gep2, align 4
@@ -4821,7 +4821,7 @@ define amdgpu_kernel void @unsafe_frem_f64(ptr addrspace(1) %out, ptr addrspace(
; GFX1200-NEXT: v_fma_f64 v[0:1], -v[4:5], v[2:3], v[0:1]
; GFX1200-NEXT: global_store_b64 v12, v[0:1], s[0:1]
; GFX1200-NEXT: s_endpgm
- ptr addrspace(1) %in2) #1 {
+ ptr addrspace(1) %in2) #0 {
%r0 = load double, ptr addrspace(1) %in1, align 8
%r1 = load double, ptr addrspace(1) %in2, align 8
%r2 = frem afn double %r0, %r1
@@ -18918,7 +18918,4 @@ define amdgpu_kernel void @frem_v2f64_const(ptr addrspace(1) %out) #0 {
-attributes #0 = { nounwind "unsafe-fp-math"="false" "denormal-fp-math-f32"="preserve-sign,preserve-sign" }
-attributes #1 = { nounwind "unsafe-fp-math"="true" "denormal-fp-math-f32"="preserve-sign,preserve-sign" }
-
-
+attributes #0 = { nounwind "denormal-fp-math-f32"="preserve-sign,preserve-sign" }
diff --git a/llvm/test/CodeGen/AMDGPU/fsqrt.f64.ll b/llvm/test/CodeGen/AMDGPU/fsqrt.f64.ll
index 1b74ddf..9b97981 100644
--- a/llvm/test/CodeGen/AMDGPU/fsqrt.f64.ll
+++ b/llvm/test/CodeGen/AMDGPU/fsqrt.f64.ll
@@ -2870,7 +2870,7 @@ define double @v_sqrt_f64__enough_unsafe_attrs(double %x) #3 {
ret double %result
}
-define double @v_sqrt_f64__unsafe_attr(double %x) #4 {
+define double @v_sqrt_f64__unsafe_attr(double %x) {
; GFX6-SDAG-LABEL: v_sqrt_f64__unsafe_attr:
; GFX6-SDAG: ; %bb.0:
; GFX6-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -3449,7 +3449,6 @@ declare i32 @llvm.amdgcn.readfirstlane(i32) #1
attributes #0 = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
attributes #1 = { convergent nounwind willreturn memory(none) }
attributes #3 = { "no-nans-fp-math"="true" "no-infs-fp-math"="true" }
-attributes #4 = { "unsafe-fp-math"="true" }
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
; GFX6: {{.*}}
; GFX8: {{.*}}
diff --git a/llvm/test/CodeGen/AMDGPU/fsqrt.r600.ll b/llvm/test/CodeGen/AMDGPU/fsqrt.r600.ll
index 9f19bcb..c93c077 100644
--- a/llvm/test/CodeGen/AMDGPU/fsqrt.r600.ll
+++ b/llvm/test/CodeGen/AMDGPU/fsqrt.r600.ll
@@ -239,4 +239,4 @@ declare <2 x float> @llvm.sqrt.v2f32(<2 x float> %in) #0
declare <4 x float> @llvm.sqrt.v4f32(<4 x float> %in) #0
attributes #0 = { nounwind readnone }
-attributes #1 = { nounwind "unsafe-fp-math"="true" }
+attributes #1 = { nounwind }
diff --git a/llvm/test/CodeGen/AMDGPU/inline-attr.ll b/llvm/test/CodeGen/AMDGPU/inline-attr.ll
index 4e93eca..c33b3344 100644
--- a/llvm/test/CodeGen/AMDGPU/inline-attr.ll
+++ b/llvm/test/CodeGen/AMDGPU/inline-attr.ll
@@ -36,18 +36,18 @@ entry:
ret void
}
-attributes #0 = { nounwind "uniform-work-group-size"="false" "unsafe-fp-math"="true"}
-attributes #1 = { nounwind "less-precise-fpmad"="true" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "unsafe-fp-math"="true" }
+attributes #0 = { nounwind "uniform-work-group-size"="false"}
+attributes #1 = { nounwind "less-precise-fpmad"="true" "no-infs-fp-math"="true" "no-nans-fp-math"="true" }
;.
-; UNSAFE: attributes #[[ATTR0]] = { nounwind "uniform-work-group-size"="false" "unsafe-fp-math"="true" }
-; UNSAFE: attributes #[[ATTR1]] = { nounwind "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "uniform-work-group-size"="false" "unsafe-fp-math"="true" }
+; UNSAFE: attributes #[[ATTR0]] = { nounwind "uniform-work-group-size"="false" }
+; UNSAFE: attributes #[[ATTR1]] = { nounwind "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "uniform-work-group-size"="false" }
;.
-; NONANS: attributes #[[ATTR0]] = { nounwind "no-nans-fp-math"="true" "uniform-work-group-size"="false" "unsafe-fp-math"="true" }
-; NONANS: attributes #[[ATTR1]] = { nounwind "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="true" "uniform-work-group-size"="false" "unsafe-fp-math"="true" }
+; NONANS: attributes #[[ATTR0]] = { nounwind "no-nans-fp-math"="true" "uniform-work-group-size"="false" }
+; NONANS: attributes #[[ATTR1]] = { nounwind "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="true" "uniform-work-group-size"="false" }
;.
-; NOINFS: attributes #[[ATTR0]] = { nounwind "no-infs-fp-math"="true" "uniform-work-group-size"="false" "unsafe-fp-math"="true" }
-; NOINFS: attributes #[[ATTR1]] = { nounwind "less-precise-fpmad"="false" "no-infs-fp-math"="true" "no-nans-fp-math"="false" "uniform-work-group-size"="false" "unsafe-fp-math"="true" }
+; NOINFS: attributes #[[ATTR0]] = { nounwind "no-infs-fp-math"="true" "uniform-work-group-size"="false" }
+; NOINFS: attributes #[[ATTR1]] = { nounwind "less-precise-fpmad"="false" "no-infs-fp-math"="true" "no-nans-fp-math"="false" "uniform-work-group-size"="false" }
;.
; UNSAFE: [[META0]] = !{}
;.
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.add.min.max.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.add.min.max.ll
new file mode 100644
index 0000000..99421d4
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.add.min.max.ll
@@ -0,0 +1,191 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck -check-prefixes=GCN,GFX1250-SDAG %s
+; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck -check-prefixes=GCN,GFX1250-GISEL %s
+
+declare i32 @llvm.amdgcn.add.min.i32(i32, i32, i32, i1)
+declare i32 @llvm.amdgcn.add.max.i32(i32, i32, i32, i1)
+declare i32 @llvm.amdgcn.add.min.u32(i32, i32, i32, i1)
+declare i32 @llvm.amdgcn.add.max.u32(i32, i32, i32, i1)
+declare <2 x i16> @llvm.amdgcn.pk.add.min.i16(<2 x i16>, <2 x i16>, <2 x i16>, i1)
+declare <2 x i16> @llvm.amdgcn.pk.add.max.i16(<2 x i16>, <2 x i16>, <2 x i16>, i1)
+declare <2 x i16> @llvm.amdgcn.pk.add.min.u16(<2 x i16>, <2 x i16>, <2 x i16>, i1)
+declare <2 x i16> @llvm.amdgcn.pk.add.max.u16(<2 x i16>, <2 x i16>, <2 x i16>, i1)
+
+define i32 @test_add_min_i32_vvv(i32 %a, i32 %b, i32 %c) {
+; GCN-LABEL: test_add_min_i32_vvv:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
+; GCN-NEXT: s_wait_kmcnt 0x0
+; GCN-NEXT: v_add_min_i32 v0, v0, v1, v2
+; GCN-NEXT: s_set_pc_i64 s[30:31]
+ %ret = tail call i32 @llvm.amdgcn.add.min.i32(i32 %a, i32 %b, i32 %c, i1 0)
+ ret i32 %ret
+}
+
+define i32 @test_add_min_i32_ssi_clamp(i32 inreg %a, i32 inreg %b) {
+; GCN-LABEL: test_add_min_i32_ssi_clamp:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
+; GCN-NEXT: s_wait_kmcnt 0x0
+; GCN-NEXT: v_add_min_i32 v0, s0, s1, 1 clamp
+; GCN-NEXT: s_set_pc_i64 s[30:31]
+ %ret = tail call i32 @llvm.amdgcn.add.min.i32(i32 %a, i32 %b, i32 1, i1 1)
+ ret i32 %ret
+}
+
+define i32 @test_add_min_u32_vvv(i32 %a, i32 %b, i32 %c) {
+; GCN-LABEL: test_add_min_u32_vvv:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
+; GCN-NEXT: s_wait_kmcnt 0x0
+; GCN-NEXT: v_add_min_u32 v0, v0, v1, v2
+; GCN-NEXT: s_set_pc_i64 s[30:31]
+ %ret = tail call i32 @llvm.amdgcn.add.min.u32(i32 %a, i32 %b, i32 %c, i1 0)
+ ret i32 %ret
+}
+
+define i32 @test_add_min_u32_ssi_clamp(i32 inreg %a, i32 inreg %b) {
+; GCN-LABEL: test_add_min_u32_ssi_clamp:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
+; GCN-NEXT: s_wait_kmcnt 0x0
+; GCN-NEXT: v_add_min_u32 v0, s0, s1, 1 clamp
+; GCN-NEXT: s_set_pc_i64 s[30:31]
+ %ret = tail call i32 @llvm.amdgcn.add.min.u32(i32 %a, i32 %b, i32 1, i1 1)
+ ret i32 %ret
+}
+
+define i32 @test_add_max_i32_vvv(i32 %a, i32 %b, i32 %c) {
+; GCN-LABEL: test_add_max_i32_vvv:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
+; GCN-NEXT: s_wait_kmcnt 0x0
+; GCN-NEXT: v_add_max_i32 v0, v0, v1, v2
+; GCN-NEXT: s_set_pc_i64 s[30:31]
+ %ret = tail call i32 @llvm.amdgcn.add.max.i32(i32 %a, i32 %b, i32 %c, i1 0)
+ ret i32 %ret
+}
+
+define i32 @test_add_max_i32_ssi_clamp(i32 inreg %a, i32 inreg %b) {
+; GCN-LABEL: test_add_max_i32_ssi_clamp:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
+; GCN-NEXT: s_wait_kmcnt 0x0
+; GCN-NEXT: v_add_max_i32 v0, s0, s1, 1 clamp
+; GCN-NEXT: s_set_pc_i64 s[30:31]
+ %ret = tail call i32 @llvm.amdgcn.add.max.i32(i32 %a, i32 %b, i32 1, i1 1)
+ ret i32 %ret
+}
+
+define i32 @test_add_max_u32_vvv(i32 %a, i32 %b, i32 %c) {
+; GCN-LABEL: test_add_max_u32_vvv:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
+; GCN-NEXT: s_wait_kmcnt 0x0
+; GCN-NEXT: v_add_max_u32 v0, v0, v1, v2
+; GCN-NEXT: s_set_pc_i64 s[30:31]
+ %ret = tail call i32 @llvm.amdgcn.add.max.u32(i32 %a, i32 %b, i32 %c, i1 0)
+ ret i32 %ret
+}
+
+define i32 @test_add_max_u32_ssi_clamp(i32 inreg %a, i32 inreg %b) {
+; GCN-LABEL: test_add_max_u32_ssi_clamp:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
+; GCN-NEXT: s_wait_kmcnt 0x0
+; GCN-NEXT: v_add_max_u32 v0, s0, s1, 1 clamp
+; GCN-NEXT: s_set_pc_i64 s[30:31]
+ %ret = tail call i32 @llvm.amdgcn.add.max.u32(i32 %a, i32 %b, i32 1, i1 1)
+ ret i32 %ret
+}
+
+define <2 x i16> @test_add_min_i16_vvv(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c) {
+; GCN-LABEL: test_add_min_i16_vvv:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
+; GCN-NEXT: s_wait_kmcnt 0x0
+; GCN-NEXT: v_pk_add_min_i16 v0, v0, v1, v2
+; GCN-NEXT: s_set_pc_i64 s[30:31]
+ %ret = tail call <2 x i16> @llvm.amdgcn.pk.add.min.i16(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c, i1 0)
+ ret <2 x i16> %ret
+}
+
+define <2 x i16> @test_add_min_i16_ssi_clamp(<2 x i16> inreg %a, <2 x i16> inreg %b) {
+; GCN-LABEL: test_add_min_i16_ssi_clamp:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
+; GCN-NEXT: s_wait_kmcnt 0x0
+; GCN-NEXT: v_pk_add_min_i16 v0, s0, s1, 1 op_sel_hi:[1,1,0] clamp
+; GCN-NEXT: s_set_pc_i64 s[30:31]
+ %ret = tail call <2 x i16> @llvm.amdgcn.pk.add.min.i16(<2 x i16> %a, <2 x i16> %b, <2 x i16> <i16 1, i16 1>, i1 1)
+ ret <2 x i16> %ret
+}
+
+define <2 x i16> @test_add_min_u16_vvv(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c) {
+; GCN-LABEL: test_add_min_u16_vvv:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
+; GCN-NEXT: s_wait_kmcnt 0x0
+; GCN-NEXT: v_pk_add_min_u16 v0, v0, v1, v2
+; GCN-NEXT: s_set_pc_i64 s[30:31]
+ %ret = tail call <2 x i16> @llvm.amdgcn.pk.add.min.u16(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c, i1 0)
+ ret <2 x i16> %ret
+}
+
+define <2 x i16> @test_add_min_u16_ssi_clamp(<2 x i16> inreg %a, <2 x i16> inreg %b) {
+; GCN-LABEL: test_add_min_u16_ssi_clamp:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
+; GCN-NEXT: s_wait_kmcnt 0x0
+; GCN-NEXT: v_pk_add_min_u16 v0, s0, s1, 1 op_sel_hi:[1,1,0] clamp
+; GCN-NEXT: s_set_pc_i64 s[30:31]
+ %ret = tail call <2 x i16> @llvm.amdgcn.pk.add.min.u16(<2 x i16> %a, <2 x i16> %b, <2 x i16> <i16 1, i16 1>, i1 1)
+ ret <2 x i16> %ret
+}
+
+define <2 x i16> @test_add_max_i16_vvv(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c) {
+; GCN-LABEL: test_add_max_i16_vvv:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
+; GCN-NEXT: s_wait_kmcnt 0x0
+; GCN-NEXT: v_pk_add_max_i16 v0, v0, v1, v2
+; GCN-NEXT: s_set_pc_i64 s[30:31]
+ %ret = tail call <2 x i16> @llvm.amdgcn.pk.add.max.i16(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c, i1 0)
+ ret <2 x i16> %ret
+}
+
+define <2 x i16> @test_add_max_i16_ssi_clamp(<2 x i16> inreg %a, <2 x i16> inreg %b) {
+; GCN-LABEL: test_add_max_i16_ssi_clamp:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
+; GCN-NEXT: s_wait_kmcnt 0x0
+; GCN-NEXT: v_pk_add_max_i16 v0, s0, s1, 1 op_sel_hi:[1,1,0] clamp
+; GCN-NEXT: s_set_pc_i64 s[30:31]
+ %ret = tail call <2 x i16> @llvm.amdgcn.pk.add.max.i16(<2 x i16> %a, <2 x i16> %b, <2 x i16> <i16 1, i16 1>, i1 1)
+ ret <2 x i16> %ret
+}
+
+define <2 x i16> @test_add_max_u16_vvv(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c) {
+; GCN-LABEL: test_add_max_u16_vvv:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
+; GCN-NEXT: s_wait_kmcnt 0x0
+; GCN-NEXT: v_pk_add_max_u16 v0, v0, v1, v2
+; GCN-NEXT: s_set_pc_i64 s[30:31]
+ %ret = tail call <2 x i16> @llvm.amdgcn.pk.add.max.u16(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c, i1 0)
+ ret <2 x i16> %ret
+}
+
+define <2 x i16> @test_add_max_u16_ssi_clamp(<2 x i16> inreg %a, <2 x i16> inreg %b) {
+; GCN-LABEL: test_add_max_u16_ssi_clamp:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
+; GCN-NEXT: s_wait_kmcnt 0x0
+; GCN-NEXT: v_pk_add_max_u16 v0, s0, s1, 1 op_sel_hi:[1,1,0] clamp
+; GCN-NEXT: s_set_pc_i64 s[30:31]
+ %ret = tail call <2 x i16> @llvm.amdgcn.pk.add.max.u16(<2 x i16> %a, <2 x i16> %b, <2 x i16> <i16 1, i16 1>, i1 1)
+ ret <2 x i16> %ret
+}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; GFX1250-GISEL: {{.*}}
+; GFX1250-SDAG: {{.*}}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.exp2.ll b/llvm/test/CodeGen/AMDGPU/llvm.exp2.ll
index 883db20..e30a586 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.exp2.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.exp2.ll
@@ -1485,7 +1485,7 @@ define float @v_exp2_f32_fast(float %in) {
ret float %result
}
-define float @v_exp2_f32_unsafe_math_attr(float %in) "unsafe-fp-math"="true" {
+define float @v_exp2_f32_unsafe_math_attr(float %in) {
; SI-SDAG-LABEL: v_exp2_f32_unsafe_math_attr:
; SI-SDAG: ; %bb.0:
; SI-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.log2.ll b/llvm/test/CodeGen/AMDGPU/llvm.log2.ll
index 0854134..61a777f 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.log2.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.log2.ll
@@ -1907,7 +1907,7 @@ define float @v_log2_f32_fast(float %in) {
ret float %result
}
-define float @v_log2_f32_unsafe_math_attr(float %in) "unsafe-fp-math"="true" {
+define float @v_log2_f32_unsafe_math_attr(float %in) {
; SI-SDAG-LABEL: v_log2_f32_unsafe_math_attr:
; SI-SDAG: ; %bb.0:
; SI-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
diff --git a/llvm/test/CodeGen/AMDGPU/minmax.ll b/llvm/test/CodeGen/AMDGPU/minmax.ll
index d578d2e..60570bd 100644
--- a/llvm/test/CodeGen/AMDGPU/minmax.ll
+++ b/llvm/test/CodeGen/AMDGPU/minmax.ll
@@ -1296,4 +1296,4 @@ declare half @llvm.minnum.f16(half, half)
declare half @llvm.maxnum.f16(half, half)
declare float @llvm.minnum.f32(float, float)
declare float @llvm.maxnum.f32(float, float)
-attributes #0 = { nounwind "unsafe-fp-math"="false" "no-nans-fp-math"="true" }
+attributes #0 = { nounwind "no-nans-fp-math"="true" }
diff --git a/llvm/test/CodeGen/AMDGPU/stackguard.ll b/llvm/test/CodeGen/AMDGPU/stackguard.ll
new file mode 100644
index 0000000..393686f
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/stackguard.ll
@@ -0,0 +1,14 @@
+; RUN: not llc -global-isel=0 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -filetype=null %s 2>&1 | FileCheck %s
+; RUN: not llc -global-isel -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -filetype=null %s 2>&1 | FileCheck %s
+
+; FIXME: To actually support stackguard, need to fix intrinsic to
+; return pointer in any address space.
+
+; CHECK: error: unable to lower stackguard
+define i1 @test_stackguard(ptr %p1) {
+ %p2 = call ptr @llvm.stackguard()
+ %res = icmp ne ptr %p2, %p1
+ ret i1 %res
+}
+
+declare ptr @llvm.stackguard()
diff --git a/llvm/test/CodeGen/DirectX/CBufferAccess/unused.ll b/llvm/test/CodeGen/DirectX/CBufferAccess/unused.ll
new file mode 100644
index 0000000..8c0d82e
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/CBufferAccess/unused.ll
@@ -0,0 +1,13 @@
+; RUN: opt -S -dxil-cbuffer-access -mtriple=dxil--shadermodel6.3-library %s | FileCheck %s
+; Check that we correctly ignore cbuffers that were nulled out by optimizations.
+
+%__cblayout_CB = type <{ float }>
+@CB.cb = local_unnamed_addr global target("dx.CBuffer", %__cblayout_CB) poison
+@x = external local_unnamed_addr addrspace(2) global float, align 4
+
+; CHECK-NOT: !hlsl.cbs =
+!hlsl.cbs = !{!0, !1, !2}
+
+!0 = !{ptr @CB.cb, ptr addrspace(2) @x}
+!1 = !{ptr @CB.cb, null}
+!2 = !{null, null}
diff --git a/llvm/test/CodeGen/LoongArch/lasx/shuffle-as-permute-and-shuffle.ll b/llvm/test/CodeGen/LoongArch/lasx/shuffle-as-permute-and-shuffle.ll
index 245f764..7149cdb 100644
--- a/llvm/test/CodeGen/LoongArch/lasx/shuffle-as-permute-and-shuffle.ll
+++ b/llvm/test/CodeGen/LoongArch/lasx/shuffle-as-permute-and-shuffle.ll
@@ -32,9 +32,7 @@ define <16 x i16> @shuffle_v16i16(<16 x i16> %a) {
; CHECK: # %bb.0:
; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI2_0)
; CHECK-NEXT: xvld $xr1, $a0, %pc_lo12(.LCPI2_0)
-; CHECK-NEXT: xvpermi.d $xr2, $xr0, 78
-; CHECK-NEXT: xvshuf.w $xr1, $xr2, $xr0
-; CHECK-NEXT: xvori.b $xr0, $xr1, 0
+; CHECK-NEXT: xvperm.w $xr0, $xr0, $xr1
; CHECK-NEXT: ret
%shuffle = shufflevector <16 x i16> %a, <16 x i16> poison, <16 x i32> <i32 8, i32 9, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
ret <16 x i16> %shuffle
@@ -55,9 +53,7 @@ define <16 x i16> @shuffle_v16i16_same_lane(<16 x i16> %a) {
define <8 x i32> @shuffle_v8i32(<8 x i32> %a) {
; CHECK-LABEL: shuffle_v8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI4_0)
-; CHECK-NEXT: xvld $xr1, $a0, %pc_lo12(.LCPI4_0)
-; CHECK-NEXT: xvperm.w $xr0, $xr0, $xr1
+; CHECK-NEXT: xvpermi.d $xr0, $xr0, 226
; CHECK-NEXT: ret
%shuffle = shufflevector <8 x i32> %a, <8 x i32> poison, <8 x i32> <i32 4, i32 5, i32 0, i32 1, i32 4, i32 5, i32 6, i32 7>
ret <8 x i32> %shuffle
@@ -93,9 +89,7 @@ define <4 x i64> @shuffle_v4i64_same_lane(<4 x i64> %a) {
define <8 x float> @shuffle_v8f32(<8 x float> %a) {
; CHECK-LABEL: shuffle_v8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI8_0)
-; CHECK-NEXT: xvld $xr1, $a0, %pc_lo12(.LCPI8_0)
-; CHECK-NEXT: xvperm.w $xr0, $xr0, $xr1
+; CHECK-NEXT: xvpermi.d $xr0, $xr0, 226
; CHECK-NEXT: ret
%shuffle = shufflevector <8 x float> %a, <8 x float> poison, <8 x i32> <i32 4, i32 5, i32 0, i32 1, i32 4, i32 5, i32 6, i32 7>
ret <8 x float> %shuffle
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfadd-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd-sdnode.ll
index 061b2b0..abd00b6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfadd-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfadd-sdnode.ll
@@ -11,33 +11,80 @@
; RUN: llc -mtriple=riscv64 -mattr=+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin,+v \
; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \
; RUN: --check-prefixes=CHECK,ZVFHMIN
+; RUN: llc -mtriple=riscv64 -mattr=+zvfh,+experimental-zvfbfa,+v \
+; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \
+; RUN: --check-prefixes=CHECK,ZVFBFA
define <vscale x 1 x bfloat> @vfadd_vv_nxv1bf16(<vscale x 1 x bfloat> %va, <vscale x 1 x bfloat> %vb) {
-; CHECK-LABEL: vfadd_vv_nxv1bf16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
-; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v9
-; CHECK-NEXT: vfwcvtbf16.f.f.v v9, v8
-; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
-; CHECK-NEXT: vfadd.vv v9, v9, v10
-; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v8, v9
-; CHECK-NEXT: ret
+; ZVFH-LABEL: vfadd_vv_nxv1bf16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v10, v9
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v9, v8
+; ZVFH-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; ZVFH-NEXT: vfadd.vv v9, v9, v10
+; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v9
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: vfadd_vv_nxv1bf16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v10, v9
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v9, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vfadd.vv v9, v9, v10
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v9
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vv_nxv1bf16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli a0, zero, e16alt, mf4, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v9
+; ZVFBFA-NEXT: vfwcvt.f.f.v v9, v8
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v9, v9, v10
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, mf4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9
+; ZVFBFA-NEXT: ret
%vc = fadd <vscale x 1 x bfloat> %va, %vb
ret <vscale x 1 x bfloat> %vc
}
define <vscale x 1 x bfloat> @vfadd_vf_nxv1bf16(<vscale x 1 x bfloat> %va, bfloat %b) {
-; CHECK-LABEL: vfadd_vf_nxv1bf16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: fcvt.s.bf16 fa5, fa0
-; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
-; CHECK-NEXT: vfwcvtbf16.f.f.v v9, v8
-; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
-; CHECK-NEXT: vfadd.vf v9, v9, fa5
-; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v8, v9
-; CHECK-NEXT: ret
+; ZVFH-LABEL: vfadd_vf_nxv1bf16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: fcvt.s.bf16 fa5, fa0
+; ZVFH-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v9, v8
+; ZVFH-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; ZVFH-NEXT: vfadd.vf v9, v9, fa5
+; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v9
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: vfadd_vf_nxv1bf16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: fcvt.s.bf16 fa5, fa0
+; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v9, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vfadd.vf v9, v9, fa5
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v9
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv1bf16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: fcvt.s.bf16 fa5, fa0
+; ZVFBFA-NEXT: vsetvli a0, zero, e16alt, mf4, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v9, v8
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; ZVFBFA-NEXT: vfadd.vf v9, v9, fa5
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, mf4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9
+; ZVFBFA-NEXT: ret
%head = insertelement <vscale x 1 x bfloat> poison, bfloat %b, i32 0
%splat = shufflevector <vscale x 1 x bfloat> %head, <vscale x 1 x bfloat> poison, <vscale x 1 x i32> zeroinitializer
%vc = fadd <vscale x 1 x bfloat> %va, %splat
@@ -45,31 +92,75 @@ define <vscale x 1 x bfloat> @vfadd_vf_nxv1bf16(<vscale x 1 x bfloat> %va, bfloa
}
define <vscale x 2 x bfloat> @vfadd_vv_nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x bfloat> %vb) {
-; CHECK-LABEL: vfadd_vv_nxv2bf16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
-; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v9
-; CHECK-NEXT: vfwcvtbf16.f.f.v v9, v8
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; CHECK-NEXT: vfadd.vv v9, v9, v10
-; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v8, v9
-; CHECK-NEXT: ret
+; ZVFH-LABEL: vfadd_vv_nxv2bf16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v10, v9
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v9, v8
+; ZVFH-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFH-NEXT: vfadd.vv v9, v9, v10
+; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v9
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: vfadd_vv_nxv2bf16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v10, v9
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v9, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-NEXT: vfadd.vv v9, v9, v10
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v9
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vv_nxv2bf16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli a0, zero, e16alt, mf2, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v9
+; ZVFBFA-NEXT: vfwcvt.f.f.v v9, v8
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v9, v9, v10
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, mf2, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9
+; ZVFBFA-NEXT: ret
%vc = fadd <vscale x 2 x bfloat> %va, %vb
ret <vscale x 2 x bfloat> %vc
}
define <vscale x 2 x bfloat> @vfadd_vf_nxv2bf16(<vscale x 2 x bfloat> %va, bfloat %b) {
-; CHECK-LABEL: vfadd_vf_nxv2bf16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: fcvt.s.bf16 fa5, fa0
-; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
-; CHECK-NEXT: vfwcvtbf16.f.f.v v9, v8
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; CHECK-NEXT: vfadd.vf v9, v9, fa5
-; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v8, v9
-; CHECK-NEXT: ret
+; ZVFH-LABEL: vfadd_vf_nxv2bf16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: fcvt.s.bf16 fa5, fa0
+; ZVFH-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v9, v8
+; ZVFH-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFH-NEXT: vfadd.vf v9, v9, fa5
+; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v9
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: vfadd_vf_nxv2bf16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: fcvt.s.bf16 fa5, fa0
+; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v9, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-NEXT: vfadd.vf v9, v9, fa5
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v9
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv2bf16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: fcvt.s.bf16 fa5, fa0
+; ZVFBFA-NEXT: vsetvli a0, zero, e16alt, mf2, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v9, v8
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFBFA-NEXT: vfadd.vf v9, v9, fa5
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, mf2, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9
+; ZVFBFA-NEXT: ret
%head = insertelement <vscale x 2 x bfloat> poison, bfloat %b, i32 0
%splat = shufflevector <vscale x 2 x bfloat> %head, <vscale x 2 x bfloat> poison, <vscale x 2 x i32> zeroinitializer
%vc = fadd <vscale x 2 x bfloat> %va, %splat
@@ -77,31 +168,75 @@ define <vscale x 2 x bfloat> @vfadd_vf_nxv2bf16(<vscale x 2 x bfloat> %va, bfloa
}
define <vscale x 4 x bfloat> @vfadd_vv_nxv4bf16(<vscale x 4 x bfloat> %va, <vscale x 4 x bfloat> %vb) {
-; CHECK-LABEL: vfadd_vv_nxv4bf16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
-; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v9
-; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8
-; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; CHECK-NEXT: vfadd.vv v10, v12, v10
-; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v8, v10
-; CHECK-NEXT: ret
+; ZVFH-LABEL: vfadd_vv_nxv4bf16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v10, v9
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v12, v8
+; ZVFH-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; ZVFH-NEXT: vfadd.vv v10, v12, v10
+; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v10
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: vfadd_vv_nxv4bf16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v10, v9
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v12, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; ZVFHMIN-NEXT: vfadd.vv v10, v12, v10
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v10
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vv_nxv4bf16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli a0, zero, e16alt, m1, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v9
+; ZVFBFA-NEXT: vfwcvt.f.f.v v12, v8
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v10, v12, v10
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m1, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v10
+; ZVFBFA-NEXT: ret
%vc = fadd <vscale x 4 x bfloat> %va, %vb
ret <vscale x 4 x bfloat> %vc
}
define <vscale x 4 x bfloat> @vfadd_vf_nxv4bf16(<vscale x 4 x bfloat> %va, bfloat %b) {
-; CHECK-LABEL: vfadd_vf_nxv4bf16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: fcvt.s.bf16 fa5, fa0
-; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
-; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
-; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; CHECK-NEXT: vfadd.vf v10, v10, fa5
-; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v8, v10
-; CHECK-NEXT: ret
+; ZVFH-LABEL: vfadd_vf_nxv4bf16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: fcvt.s.bf16 fa5, fa0
+; ZVFH-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v10, v8
+; ZVFH-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; ZVFH-NEXT: vfadd.vf v10, v10, fa5
+; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v10
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: vfadd_vf_nxv4bf16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: fcvt.s.bf16 fa5, fa0
+; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v10, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; ZVFHMIN-NEXT: vfadd.vf v10, v10, fa5
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v10
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv4bf16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: fcvt.s.bf16 fa5, fa0
+; ZVFBFA-NEXT: vsetvli a0, zero, e16alt, m1, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v8
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; ZVFBFA-NEXT: vfadd.vf v10, v10, fa5
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m1, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v10
+; ZVFBFA-NEXT: ret
%head = insertelement <vscale x 4 x bfloat> poison, bfloat %b, i32 0
%splat = shufflevector <vscale x 4 x bfloat> %head, <vscale x 4 x bfloat> poison, <vscale x 4 x i32> zeroinitializer
%vc = fadd <vscale x 4 x bfloat> %va, %splat
@@ -109,31 +244,75 @@ define <vscale x 4 x bfloat> @vfadd_vf_nxv4bf16(<vscale x 4 x bfloat> %va, bfloa
}
define <vscale x 8 x bfloat> @vfadd_vv_nxv8bf16(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb) {
-; CHECK-LABEL: vfadd_vv_nxv8bf16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
-; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v10
-; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8
-; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
-; CHECK-NEXT: vfadd.vv v12, v16, v12
-; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v8, v12
-; CHECK-NEXT: ret
+; ZVFH-LABEL: vfadd_vv_nxv8bf16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v12, v10
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v8
+; ZVFH-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; ZVFH-NEXT: vfadd.vv v12, v16, v12
+; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v12
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: vfadd_vv_nxv8bf16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v12, v10
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vfadd.vv v12, v16, v12
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v12
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vv_nxv8bf16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli a0, zero, e16alt, m2, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v12, v10
+; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v8
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v12, v16, v12
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m2, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v12
+; ZVFBFA-NEXT: ret
%vc = fadd <vscale x 8 x bfloat> %va, %vb
ret <vscale x 8 x bfloat> %vc
}
define <vscale x 8 x bfloat> @vfadd_vf_nxv8bf16(<vscale x 8 x bfloat> %va, bfloat %b) {
-; CHECK-LABEL: vfadd_vf_nxv8bf16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: fcvt.s.bf16 fa5, fa0
-; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
-; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8
-; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
-; CHECK-NEXT: vfadd.vf v12, v12, fa5
-; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v8, v12
-; CHECK-NEXT: ret
+; ZVFH-LABEL: vfadd_vf_nxv8bf16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: fcvt.s.bf16 fa5, fa0
+; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v12, v8
+; ZVFH-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; ZVFH-NEXT: vfadd.vf v12, v12, fa5
+; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v12
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: vfadd_vf_nxv8bf16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: fcvt.s.bf16 fa5, fa0
+; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v12, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vfadd.vf v12, v12, fa5
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v12
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv8bf16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: fcvt.s.bf16 fa5, fa0
+; ZVFBFA-NEXT: vsetvli a0, zero, e16alt, m2, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v12, v8
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; ZVFBFA-NEXT: vfadd.vf v12, v12, fa5
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m2, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v12
+; ZVFBFA-NEXT: ret
%head = insertelement <vscale x 8 x bfloat> poison, bfloat %b, i32 0
%splat = shufflevector <vscale x 8 x bfloat> %head, <vscale x 8 x bfloat> poison, <vscale x 8 x i32> zeroinitializer
%vc = fadd <vscale x 8 x bfloat> %va, %splat
@@ -141,16 +320,38 @@ define <vscale x 8 x bfloat> @vfadd_vf_nxv8bf16(<vscale x 8 x bfloat> %va, bfloa
}
define <vscale x 8 x bfloat> @vfadd_fv_nxv8bf16(<vscale x 8 x bfloat> %va, bfloat %b) {
-; CHECK-LABEL: vfadd_fv_nxv8bf16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: fcvt.s.bf16 fa5, fa0
-; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
-; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8
-; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
-; CHECK-NEXT: vfadd.vf v12, v12, fa5
-; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v8, v12
-; CHECK-NEXT: ret
+; ZVFH-LABEL: vfadd_fv_nxv8bf16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: fcvt.s.bf16 fa5, fa0
+; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v12, v8
+; ZVFH-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; ZVFH-NEXT: vfadd.vf v12, v12, fa5
+; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v12
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: vfadd_fv_nxv8bf16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: fcvt.s.bf16 fa5, fa0
+; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v12, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vfadd.vf v12, v12, fa5
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v12
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_fv_nxv8bf16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: fcvt.s.bf16 fa5, fa0
+; ZVFBFA-NEXT: vsetvli a0, zero, e16alt, m2, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v12, v8
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; ZVFBFA-NEXT: vfadd.vf v12, v12, fa5
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m2, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v12
+; ZVFBFA-NEXT: ret
%head = insertelement <vscale x 8 x bfloat> poison, bfloat %b, i32 0
%splat = shufflevector <vscale x 8 x bfloat> %head, <vscale x 8 x bfloat> poison, <vscale x 8 x i32> zeroinitializer
%vc = fadd <vscale x 8 x bfloat> %splat, %va
@@ -158,31 +359,75 @@ define <vscale x 8 x bfloat> @vfadd_fv_nxv8bf16(<vscale x 8 x bfloat> %va, bfloa
}
define <vscale x 16 x bfloat> @vfadd_vv_nxv16bf16(<vscale x 16 x bfloat> %va, <vscale x 16 x bfloat> %vb) {
-; CHECK-LABEL: vfadd_vv_nxv16bf16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
-; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v12
-; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v8
-; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT: vfadd.vv v16, v24, v16
-; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v8, v16
-; CHECK-NEXT: ret
+; ZVFH-LABEL: vfadd_vv_nxv16bf16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v12
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v24, v8
+; ZVFH-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFH-NEXT: vfadd.vv v16, v24, v16
+; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v16
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: vfadd_vv_nxv16bf16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v12
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v24, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfadd.vv v16, v24, v16
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v16
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vv_nxv16bf16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli a0, zero, e16alt, m4, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v12
+; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v8
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v16, v24, v16
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v16
+; ZVFBFA-NEXT: ret
%vc = fadd <vscale x 16 x bfloat> %va, %vb
ret <vscale x 16 x bfloat> %vc
}
define <vscale x 16 x bfloat> @vfadd_vf_nxv16bf16(<vscale x 16 x bfloat> %va, bfloat %b) {
-; CHECK-LABEL: vfadd_vf_nxv16bf16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: fcvt.s.bf16 fa5, fa0
-; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
-; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8
-; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT: vfadd.vf v16, v16, fa5
-; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v8, v16
-; CHECK-NEXT: ret
+; ZVFH-LABEL: vfadd_vf_nxv16bf16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: fcvt.s.bf16 fa5, fa0
+; ZVFH-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v8
+; ZVFH-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFH-NEXT: vfadd.vf v16, v16, fa5
+; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v16
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: vfadd_vf_nxv16bf16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: fcvt.s.bf16 fa5, fa0
+; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfadd.vf v16, v16, fa5
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v16
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv16bf16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: fcvt.s.bf16 fa5, fa0
+; ZVFBFA-NEXT: vsetvli a0, zero, e16alt, m4, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v8
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFBFA-NEXT: vfadd.vf v16, v16, fa5
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v16
+; ZVFBFA-NEXT: ret
%head = insertelement <vscale x 16 x bfloat> poison, bfloat %b, i32 0
%splat = shufflevector <vscale x 16 x bfloat> %head, <vscale x 16 x bfloat> poison, <vscale x 16 x i32> zeroinitializer
%vc = fadd <vscale x 16 x bfloat> %va, %splat
@@ -190,78 +435,216 @@ define <vscale x 16 x bfloat> @vfadd_vf_nxv16bf16(<vscale x 16 x bfloat> %va, bf
}
define <vscale x 32 x bfloat> @vfadd_vv_nxv32bf16(<vscale x 32 x bfloat> %va, <vscale x 32 x bfloat> %vb) {
-; CHECK-LABEL: vfadd_vv_nxv32bf16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
-; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v16
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vs8r.v v24, (a0) # vscale x 64-byte Folded Spill
-; CHECK-NEXT: vfwcvtbf16.f.f.v v0, v8
-; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v20
-; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v12
-; CHECK-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
-; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT: vfadd.vv v0, v0, v8
-; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v8, v0
-; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT: vfadd.vv v16, v16, v24
-; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v12, v16
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: .cfi_def_cfa sp, 16
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: .cfi_def_cfa_offset 0
-; CHECK-NEXT: ret
+; ZVFH-LABEL: vfadd_vv_nxv32bf16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: addi sp, sp, -16
+; ZVFH-NEXT: .cfi_def_cfa_offset 16
+; ZVFH-NEXT: csrr a0, vlenb
+; ZVFH-NEXT: slli a0, a0, 3
+; ZVFH-NEXT: sub sp, sp, a0
+; ZVFH-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; ZVFH-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v24, v16
+; ZVFH-NEXT: addi a0, sp, 16
+; ZVFH-NEXT: vs8r.v v24, (a0) # vscale x 64-byte Folded Spill
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v0, v8
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v24, v20
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v12
+; ZVFH-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
+; ZVFH-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFH-NEXT: vfadd.vv v0, v0, v8
+; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v0
+; ZVFH-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFH-NEXT: vfadd.vv v16, v16, v24
+; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v12, v16
+; ZVFH-NEXT: csrr a0, vlenb
+; ZVFH-NEXT: slli a0, a0, 3
+; ZVFH-NEXT: add sp, sp, a0
+; ZVFH-NEXT: .cfi_def_cfa sp, 16
+; ZVFH-NEXT: addi sp, sp, 16
+; ZVFH-NEXT: .cfi_def_cfa_offset 0
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: vfadd_vv_nxv32bf16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: addi sp, sp, -16
+; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: slli a0, a0, 3
+; ZVFHMIN-NEXT: sub sp, sp, a0
+; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v24, v16
+; ZVFHMIN-NEXT: addi a0, sp, 16
+; ZVFHMIN-NEXT: vs8r.v v24, (a0) # vscale x 64-byte Folded Spill
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v0, v8
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v24, v20
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v12
+; ZVFHMIN-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfadd.vv v0, v0, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v0
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfadd.vv v16, v16, v24
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v12, v16
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: slli a0, a0, 3
+; ZVFHMIN-NEXT: add sp, sp, a0
+; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
+; ZVFHMIN-NEXT: addi sp, sp, 16
+; ZVFHMIN-NEXT: .cfi_def_cfa_offset 0
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vv_nxv32bf16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: addi sp, sp, -16
+; ZVFBFA-NEXT: .cfi_def_cfa_offset 16
+; ZVFBFA-NEXT: csrr a0, vlenb
+; ZVFBFA-NEXT: slli a0, a0, 3
+; ZVFBFA-NEXT: sub sp, sp, a0
+; ZVFBFA-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; ZVFBFA-NEXT: vsetvli a0, zero, e16alt, m4, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v16
+; ZVFBFA-NEXT: addi a0, sp, 16
+; ZVFBFA-NEXT: vs8r.v v24, (a0) # vscale x 64-byte Folded Spill
+; ZVFBFA-NEXT: vfwcvt.f.f.v v0, v8
+; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v20
+; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v12
+; ZVFBFA-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v0, v0, v8
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v0
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v16, v16, v24
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v12, v16
+; ZVFBFA-NEXT: csrr a0, vlenb
+; ZVFBFA-NEXT: slli a0, a0, 3
+; ZVFBFA-NEXT: add sp, sp, a0
+; ZVFBFA-NEXT: .cfi_def_cfa sp, 16
+; ZVFBFA-NEXT: addi sp, sp, 16
+; ZVFBFA-NEXT: .cfi_def_cfa_offset 0
+; ZVFBFA-NEXT: ret
%vc = fadd <vscale x 32 x bfloat> %va, %vb
ret <vscale x 32 x bfloat> %vc
}
define <vscale x 32 x bfloat> @vfadd_vf_nxv32bf16(<vscale x 32 x bfloat> %va, bfloat %b) {
-; CHECK-LABEL: vfadd_vf_nxv32bf16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: fmv.x.h a0, fa0
-; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
-; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8
-; CHECK-NEXT: addi a1, sp, 16
-; CHECK-NEXT: vs8r.v v16, (a1) # vscale x 64-byte Folded Spill
-; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v12
-; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma
-; CHECK-NEXT: vmv.v.x v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
-; CHECK-NEXT: vfwcvtbf16.f.f.v v0, v8
-; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v12
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
-; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT: vfadd.vv v0, v8, v0
-; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v8, v0
-; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT: vfadd.vv v16, v24, v16
-; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v12, v16
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: .cfi_def_cfa sp, 16
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: .cfi_def_cfa_offset 0
-; CHECK-NEXT: ret
+; ZVFH-LABEL: vfadd_vf_nxv32bf16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: addi sp, sp, -16
+; ZVFH-NEXT: .cfi_def_cfa_offset 16
+; ZVFH-NEXT: csrr a0, vlenb
+; ZVFH-NEXT: slli a0, a0, 3
+; ZVFH-NEXT: sub sp, sp, a0
+; ZVFH-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; ZVFH-NEXT: fmv.x.h a0, fa0
+; ZVFH-NEXT: vsetvli a1, zero, e16, m4, ta, ma
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v8
+; ZVFH-NEXT: addi a1, sp, 16
+; ZVFH-NEXT: vs8r.v v16, (a1) # vscale x 64-byte Folded Spill
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v24, v12
+; ZVFH-NEXT: vsetvli a1, zero, e16, m8, ta, ma
+; ZVFH-NEXT: vmv.v.x v8, a0
+; ZVFH-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v0, v8
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v12
+; ZVFH-NEXT: addi a0, sp, 16
+; ZVFH-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
+; ZVFH-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFH-NEXT: vfadd.vv v0, v8, v0
+; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v0
+; ZVFH-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFH-NEXT: vfadd.vv v16, v24, v16
+; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v12, v16
+; ZVFH-NEXT: csrr a0, vlenb
+; ZVFH-NEXT: slli a0, a0, 3
+; ZVFH-NEXT: add sp, sp, a0
+; ZVFH-NEXT: .cfi_def_cfa sp, 16
+; ZVFH-NEXT: addi sp, sp, 16
+; ZVFH-NEXT: .cfi_def_cfa_offset 0
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: vfadd_vf_nxv32bf16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: addi sp, sp, -16
+; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: slli a0, a0, 3
+; ZVFHMIN-NEXT: sub sp, sp, a0
+; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; ZVFHMIN-NEXT: fmv.x.h a0, fa0
+; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v8
+; ZVFHMIN-NEXT: addi a1, sp, 16
+; ZVFHMIN-NEXT: vs8r.v v16, (a1) # vscale x 64-byte Folded Spill
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v24, v12
+; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m8, ta, ma
+; ZVFHMIN-NEXT: vmv.v.x v8, a0
+; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v0, v8
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v12
+; ZVFHMIN-NEXT: addi a0, sp, 16
+; ZVFHMIN-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfadd.vv v0, v8, v0
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v0
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfadd.vv v16, v24, v16
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v12, v16
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: slli a0, a0, 3
+; ZVFHMIN-NEXT: add sp, sp, a0
+; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
+; ZVFHMIN-NEXT: addi sp, sp, 16
+; ZVFHMIN-NEXT: .cfi_def_cfa_offset 0
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv32bf16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: addi sp, sp, -16
+; ZVFBFA-NEXT: .cfi_def_cfa_offset 16
+; ZVFBFA-NEXT: csrr a0, vlenb
+; ZVFBFA-NEXT: slli a0, a0, 3
+; ZVFBFA-NEXT: sub sp, sp, a0
+; ZVFBFA-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; ZVFBFA-NEXT: fmv.x.h a0, fa0
+; ZVFBFA-NEXT: vsetvli a1, zero, e16alt, m4, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v8
+; ZVFBFA-NEXT: addi a1, sp, 16
+; ZVFBFA-NEXT: vs8r.v v16, (a1) # vscale x 64-byte Folded Spill
+; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v12
+; ZVFBFA-NEXT: vsetvli a1, zero, e16alt, m8, ta, ma
+; ZVFBFA-NEXT: vmv.v.x v8, a0
+; ZVFBFA-NEXT: vsetvli a0, zero, e16alt, m4, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v0, v8
+; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v12
+; ZVFBFA-NEXT: addi a0, sp, 16
+; ZVFBFA-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v0, v8, v0
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v0
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v16, v24, v16
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v12, v16
+; ZVFBFA-NEXT: csrr a0, vlenb
+; ZVFBFA-NEXT: slli a0, a0, 3
+; ZVFBFA-NEXT: add sp, sp, a0
+; ZVFBFA-NEXT: .cfi_def_cfa sp, 16
+; ZVFBFA-NEXT: addi sp, sp, 16
+; ZVFBFA-NEXT: .cfi_def_cfa_offset 0
+; ZVFBFA-NEXT: ret
%head = insertelement <vscale x 32 x bfloat> poison, bfloat %b, i32 0
%splat = shufflevector <vscale x 32 x bfloat> %head, <vscale x 32 x bfloat> poison, <vscale x 32 x i32> zeroinitializer
%vc = fadd <vscale x 32 x bfloat> %va, %splat
@@ -285,6 +668,12 @@ define <vscale x 1 x half> @vfadd_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vv_nxv1f16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v8, v8, v9
+; ZVFBFA-NEXT: ret
%vc = fadd <vscale x 1 x half> %va, %vb
ret <vscale x 1 x half> %vc
}
@@ -306,6 +695,12 @@ define <vscale x 1 x half> @vfadd_vf_nxv1f16(<vscale x 1 x half> %va, half %b) {
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv1f16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; ZVFBFA-NEXT: vfadd.vf v8, v8, fa0
+; ZVFBFA-NEXT: ret
%head = insertelement <vscale x 1 x half> poison, half %b, i32 0
%splat = shufflevector <vscale x 1 x half> %head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
%vc = fadd <vscale x 1 x half> %va, %splat
@@ -329,6 +724,12 @@ define <vscale x 2 x half> @vfadd_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vv_nxv2f16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v8, v8, v9
+; ZVFBFA-NEXT: ret
%vc = fadd <vscale x 2 x half> %va, %vb
ret <vscale x 2 x half> %vc
}
@@ -350,6 +751,12 @@ define <vscale x 2 x half> @vfadd_vf_nxv2f16(<vscale x 2 x half> %va, half %b) {
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv2f16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; ZVFBFA-NEXT: vfadd.vf v8, v8, fa0
+; ZVFBFA-NEXT: ret
%head = insertelement <vscale x 2 x half> poison, half %b, i32 0
%splat = shufflevector <vscale x 2 x half> %head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
%vc = fadd <vscale x 2 x half> %va, %splat
@@ -373,6 +780,12 @@ define <vscale x 4 x half> @vfadd_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vv_nxv4f16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v8, v8, v9
+; ZVFBFA-NEXT: ret
%vc = fadd <vscale x 4 x half> %va, %vb
ret <vscale x 4 x half> %vc
}
@@ -394,6 +807,12 @@ define <vscale x 4 x half> @vfadd_vf_nxv4f16(<vscale x 4 x half> %va, half %b) {
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv4f16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; ZVFBFA-NEXT: vfadd.vf v8, v8, fa0
+; ZVFBFA-NEXT: ret
%head = insertelement <vscale x 4 x half> poison, half %b, i32 0
%splat = shufflevector <vscale x 4 x half> %head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
%vc = fadd <vscale x 4 x half> %va, %splat
@@ -417,6 +836,12 @@ define <vscale x 8 x half> @vfadd_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vv_nxv8f16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v8, v8, v10
+; ZVFBFA-NEXT: ret
%vc = fadd <vscale x 8 x half> %va, %vb
ret <vscale x 8 x half> %vc
}
@@ -438,6 +863,12 @@ define <vscale x 8 x half> @vfadd_vf_nxv8f16(<vscale x 8 x half> %va, half %b) {
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv8f16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; ZVFBFA-NEXT: vfadd.vf v8, v8, fa0
+; ZVFBFA-NEXT: ret
%head = insertelement <vscale x 8 x half> poison, half %b, i32 0
%splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
%vc = fadd <vscale x 8 x half> %va, %splat
@@ -461,6 +892,12 @@ define <vscale x 8 x half> @vfadd_fv_nxv8f16(<vscale x 8 x half> %va, half %b) {
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_fv_nxv8f16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; ZVFBFA-NEXT: vfadd.vf v8, v8, fa0
+; ZVFBFA-NEXT: ret
%head = insertelement <vscale x 8 x half> poison, half %b, i32 0
%splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
%vc = fadd <vscale x 8 x half> %splat, %va
@@ -484,6 +921,12 @@ define <vscale x 16 x half> @vfadd_vv_nxv16f16(<vscale x 16 x half> %va, <vscale
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vv_nxv16f16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v8, v8, v12
+; ZVFBFA-NEXT: ret
%vc = fadd <vscale x 16 x half> %va, %vb
ret <vscale x 16 x half> %vc
}
@@ -505,6 +948,12 @@ define <vscale x 16 x half> @vfadd_vf_nxv16f16(<vscale x 16 x half> %va, half %b
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv16f16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; ZVFBFA-NEXT: vfadd.vf v8, v8, fa0
+; ZVFBFA-NEXT: ret
%head = insertelement <vscale x 16 x half> poison, half %b, i32 0
%splat = shufflevector <vscale x 16 x half> %head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
%vc = fadd <vscale x 16 x half> %va, %splat
@@ -549,6 +998,12 @@ define <vscale x 32 x half> @vfadd_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: .cfi_def_cfa_offset 0
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vv_nxv32f16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli a0, zero, e16, m8, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v8, v8, v16
+; ZVFBFA-NEXT: ret
%vc = fadd <vscale x 32 x half> %va, %vb
ret <vscale x 32 x half> %vc
}
@@ -596,6 +1051,12 @@ define <vscale x 32 x half> @vfadd_vf_nxv32f16(<vscale x 32 x half> %va, half %b
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: .cfi_def_cfa_offset 0
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv32f16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli a0, zero, e16, m8, ta, ma
+; ZVFBFA-NEXT: vfadd.vf v8, v8, fa0
+; ZVFBFA-NEXT: ret
%head = insertelement <vscale x 32 x half> poison, half %b, i32 0
%splat = shufflevector <vscale x 32 x half> %head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
%vc = fadd <vscale x 32 x half> %va, %splat
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll
index 32e3d6b..633a201 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll
@@ -11,52 +11,125 @@
; RUN: llc -mtriple=riscv64 -mattr=+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin,+v \
; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \
; RUN: --check-prefixes=CHECK,ZVFHMIN
+; RUN: llc -mtriple=riscv64 -mattr=+zvfhmin,+experimental-zvfbfa,+v \
+; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \
+; RUN: --check-prefixes=CHECK,ZVFBFA
declare <vscale x 1 x bfloat> @llvm.vp.fadd.nxv1bf16(<vscale x 1 x bfloat>, <vscale x 1 x bfloat>, <vscale x 1 x i1>, i32)
define <vscale x 1 x bfloat> @vfadd_vv_nxv1bf16(<vscale x 1 x bfloat> %va, <vscale x 1 x bfloat> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfadd_vv_nxv1bf16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v9, v0.t
-; CHECK-NEXT: vfwcvtbf16.f.f.v v9, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
-; CHECK-NEXT: vfadd.vv v9, v9, v10, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v8, v9, v0.t
-; CHECK-NEXT: ret
+; ZVFH-LABEL: vfadd_vv_nxv1bf16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v10, v9, v0.t
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v9, v8, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; ZVFH-NEXT: vfadd.vv v9, v9, v10, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v9, v0.t
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: vfadd_vv_nxv1bf16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v10, v9, v0.t
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v9, v8, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vfadd.vv v9, v9, v10, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v9, v0.t
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vv_nxv1bf16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v9, v0.t
+; ZVFBFA-NEXT: vfwcvt.f.f.v v9, v8, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v9, v9, v10, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, mf4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9, v0.t
+; ZVFBFA-NEXT: ret
%v = call <vscale x 1 x bfloat> @llvm.vp.fadd.nxv1bf16(<vscale x 1 x bfloat> %va, <vscale x 1 x bfloat> %b, <vscale x 1 x i1> %m, i32 %evl)
ret <vscale x 1 x bfloat> %v
}
define <vscale x 1 x bfloat> @vfadd_vv_nxv1bf16_unmasked(<vscale x 1 x bfloat> %va, <vscale x 1 x bfloat> %b, i32 zeroext %evl) {
-; CHECK-LABEL: vfadd_vv_nxv1bf16_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v9
-; CHECK-NEXT: vfwcvtbf16.f.f.v v9, v8
-; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
-; CHECK-NEXT: vfadd.vv v9, v9, v10
-; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v8, v9
-; CHECK-NEXT: ret
+; ZVFH-LABEL: vfadd_vv_nxv1bf16_unmasked:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v10, v9
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v9, v8
+; ZVFH-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; ZVFH-NEXT: vfadd.vv v9, v9, v10
+; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v9
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: vfadd_vv_nxv1bf16_unmasked:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v10, v9
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v9, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vfadd.vv v9, v9, v10
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v9
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vv_nxv1bf16_unmasked:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v9
+; ZVFBFA-NEXT: vfwcvt.f.f.v v9, v8
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v9, v9, v10
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, mf4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9
+; ZVFBFA-NEXT: ret
%v = call <vscale x 1 x bfloat> @llvm.vp.fadd.nxv1bf16(<vscale x 1 x bfloat> %va, <vscale x 1 x bfloat> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
ret <vscale x 1 x bfloat> %v
}
define <vscale x 1 x bfloat> @vfadd_vf_nxv1bf16(<vscale x 1 x bfloat> %va, bfloat %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfadd_vf_nxv1bf16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: fmv.x.h a1, fa0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT: vmv.v.x v9, a1
-; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8, v0.t
-; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
-; CHECK-NEXT: vfadd.vv v9, v10, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v8, v9, v0.t
-; CHECK-NEXT: ret
+; ZVFH-LABEL: vfadd_vf_nxv1bf16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: fmv.x.h a1, fa0
+; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFH-NEXT: vmv.v.x v9, a1
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v10, v8, v0.t
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v8, v9, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; ZVFH-NEXT: vfadd.vv v9, v10, v8, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v9, v0.t
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: vfadd_vf_nxv1bf16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: fmv.x.h a1, fa0
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vmv.v.x v9, a1
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v10, v8, v0.t
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v8, v9, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vfadd.vv v9, v10, v8, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v9, v0.t
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv1bf16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: fmv.x.h a1, fa0
+; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; ZVFBFA-NEXT: vmv.v.x v9, a1
+; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v8, v0.t
+; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v9, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v9, v10, v8, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, mf4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9, v0.t
+; ZVFBFA-NEXT: ret
%elt.head = insertelement <vscale x 1 x bfloat> poison, bfloat %b, i32 0
%vb = shufflevector <vscale x 1 x bfloat> %elt.head, <vscale x 1 x bfloat> poison, <vscale x 1 x i32> zeroinitializer
%v = call <vscale x 1 x bfloat> @llvm.vp.fadd.nxv1bf16(<vscale x 1 x bfloat> %va, <vscale x 1 x bfloat> %vb, <vscale x 1 x i1> %m, i32 %evl)
@@ -64,18 +137,44 @@ define <vscale x 1 x bfloat> @vfadd_vf_nxv1bf16(<vscale x 1 x bfloat> %va, bfloa
}
define <vscale x 1 x bfloat> @vfadd_vf_nxv1bf16_commute(<vscale x 1 x bfloat> %va, bfloat %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfadd_vf_nxv1bf16_commute:
-; CHECK: # %bb.0:
-; CHECK-NEXT: fmv.x.h a1, fa0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT: vmv.v.x v9, a1
-; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8, v0.t
-; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
-; CHECK-NEXT: vfadd.vv v9, v8, v10, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v8, v9, v0.t
-; CHECK-NEXT: ret
+; ZVFH-LABEL: vfadd_vf_nxv1bf16_commute:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: fmv.x.h a1, fa0
+; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFH-NEXT: vmv.v.x v9, a1
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v10, v8, v0.t
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v8, v9, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; ZVFH-NEXT: vfadd.vv v9, v8, v10, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v9, v0.t
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: vfadd_vf_nxv1bf16_commute:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: fmv.x.h a1, fa0
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vmv.v.x v9, a1
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v10, v8, v0.t
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v8, v9, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vfadd.vv v9, v8, v10, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v9, v0.t
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv1bf16_commute:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: fmv.x.h a1, fa0
+; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; ZVFBFA-NEXT: vmv.v.x v9, a1
+; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v8, v0.t
+; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v9, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v9, v8, v10, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, mf4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9, v0.t
+; ZVFBFA-NEXT: ret
%elt.head = insertelement <vscale x 1 x bfloat> poison, bfloat %b, i32 0
%vb = shufflevector <vscale x 1 x bfloat> %elt.head, <vscale x 1 x bfloat> poison, <vscale x 1 x i32> zeroinitializer
%v = call <vscale x 1 x bfloat> @llvm.vp.fadd.nxv1bf16(<vscale x 1 x bfloat> %vb, <vscale x 1 x bfloat> %va, <vscale x 1 x i1> %m, i32 %evl)
@@ -83,18 +182,44 @@ define <vscale x 1 x bfloat> @vfadd_vf_nxv1bf16_commute(<vscale x 1 x bfloat> %v
}
define <vscale x 1 x bfloat> @vfadd_vf_nxv1bf16_unmasked(<vscale x 1 x bfloat> %va, bfloat %b, i32 zeroext %evl) {
-; CHECK-LABEL: vfadd_vf_nxv1bf16_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: fmv.x.h a1, fa0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT: vmv.v.x v9, a1
-; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
-; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v9
-; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
-; CHECK-NEXT: vfadd.vv v9, v10, v8
-; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v8, v9
-; CHECK-NEXT: ret
+; ZVFH-LABEL: vfadd_vf_nxv1bf16_unmasked:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: fmv.x.h a1, fa0
+; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFH-NEXT: vmv.v.x v9, a1
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v10, v8
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v8, v9
+; ZVFH-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; ZVFH-NEXT: vfadd.vv v9, v10, v8
+; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v9
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: vfadd_vf_nxv1bf16_unmasked:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: fmv.x.h a1, fa0
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vmv.v.x v9, a1
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v10, v8
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v8, v9
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vfadd.vv v9, v10, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v9
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv1bf16_unmasked:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: fmv.x.h a1, fa0
+; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; ZVFBFA-NEXT: vmv.v.x v9, a1
+; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v8
+; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v9
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v9, v10, v8
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, mf4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9
+; ZVFBFA-NEXT: ret
%elt.head = insertelement <vscale x 1 x bfloat> poison, bfloat %b, i32 0
%vb = shufflevector <vscale x 1 x bfloat> %elt.head, <vscale x 1 x bfloat> poison, <vscale x 1 x i32> zeroinitializer
%v = call <vscale x 1 x bfloat> @llvm.vp.fadd.nxv1bf16(<vscale x 1 x bfloat> %va, <vscale x 1 x bfloat> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
@@ -102,18 +227,44 @@ define <vscale x 1 x bfloat> @vfadd_vf_nxv1bf16_unmasked(<vscale x 1 x bfloat> %
}
define <vscale x 1 x bfloat> @vfadd_vf_nxv1bf16_unmasked_commute(<vscale x 1 x bfloat> %va, bfloat %b, i32 zeroext %evl) {
-; CHECK-LABEL: vfadd_vf_nxv1bf16_unmasked_commute:
-; CHECK: # %bb.0:
-; CHECK-NEXT: fmv.x.h a1, fa0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT: vmv.v.x v9, a1
-; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
-; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v9
-; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
-; CHECK-NEXT: vfadd.vv v9, v8, v10
-; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v8, v9
-; CHECK-NEXT: ret
+; ZVFH-LABEL: vfadd_vf_nxv1bf16_unmasked_commute:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: fmv.x.h a1, fa0
+; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFH-NEXT: vmv.v.x v9, a1
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v10, v8
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v8, v9
+; ZVFH-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; ZVFH-NEXT: vfadd.vv v9, v8, v10
+; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v9
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: vfadd_vf_nxv1bf16_unmasked_commute:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: fmv.x.h a1, fa0
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vmv.v.x v9, a1
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v10, v8
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v8, v9
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vfadd.vv v9, v8, v10
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v9
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv1bf16_unmasked_commute:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: fmv.x.h a1, fa0
+; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; ZVFBFA-NEXT: vmv.v.x v9, a1
+; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v8
+; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v9
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v9, v8, v10
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, mf4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9
+; ZVFBFA-NEXT: ret
%elt.head = insertelement <vscale x 1 x bfloat> poison, bfloat %b, i32 0
%vb = shufflevector <vscale x 1 x bfloat> %elt.head, <vscale x 1 x bfloat> poison, <vscale x 1 x i32> zeroinitializer
%v = call <vscale x 1 x bfloat> @llvm.vp.fadd.nxv1bf16(<vscale x 1 x bfloat> %vb, <vscale x 1 x bfloat> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
@@ -123,48 +274,118 @@ define <vscale x 1 x bfloat> @vfadd_vf_nxv1bf16_unmasked_commute(<vscale x 1 x b
declare <vscale x 2 x bfloat> @llvm.vp.fadd.nxv2bf16(<vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x i1>, i32)
define <vscale x 2 x bfloat> @vfadd_vv_nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x bfloat> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfadd_vv_nxv2bf16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v9, v0.t
-; CHECK-NEXT: vfwcvtbf16.f.f.v v9, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; CHECK-NEXT: vfadd.vv v9, v9, v10, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v8, v9, v0.t
-; CHECK-NEXT: ret
+; ZVFH-LABEL: vfadd_vv_nxv2bf16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v10, v9, v0.t
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v9, v8, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFH-NEXT: vfadd.vv v9, v9, v10, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v9, v0.t
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: vfadd_vv_nxv2bf16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v10, v9, v0.t
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v9, v8, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-NEXT: vfadd.vv v9, v9, v10, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v9, v0.t
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vv_nxv2bf16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v9, v0.t
+; ZVFBFA-NEXT: vfwcvt.f.f.v v9, v8, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v9, v9, v10, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, mf2, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9, v0.t
+; ZVFBFA-NEXT: ret
%v = call <vscale x 2 x bfloat> @llvm.vp.fadd.nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x bfloat> %b, <vscale x 2 x i1> %m, i32 %evl)
ret <vscale x 2 x bfloat> %v
}
define <vscale x 2 x bfloat> @vfadd_vv_nxv2bf16_unmasked(<vscale x 2 x bfloat> %va, <vscale x 2 x bfloat> %b, i32 zeroext %evl) {
-; CHECK-LABEL: vfadd_vv_nxv2bf16_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v9
-; CHECK-NEXT: vfwcvtbf16.f.f.v v9, v8
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; CHECK-NEXT: vfadd.vv v9, v9, v10
-; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v8, v9
-; CHECK-NEXT: ret
+; ZVFH-LABEL: vfadd_vv_nxv2bf16_unmasked:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v10, v9
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v9, v8
+; ZVFH-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFH-NEXT: vfadd.vv v9, v9, v10
+; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v9
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: vfadd_vv_nxv2bf16_unmasked:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v10, v9
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v9, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-NEXT: vfadd.vv v9, v9, v10
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v9
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vv_nxv2bf16_unmasked:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v9
+; ZVFBFA-NEXT: vfwcvt.f.f.v v9, v8
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v9, v9, v10
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, mf2, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9
+; ZVFBFA-NEXT: ret
%v = call <vscale x 2 x bfloat> @llvm.vp.fadd.nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x bfloat> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
ret <vscale x 2 x bfloat> %v
}
define <vscale x 2 x bfloat> @vfadd_vf_nxv2bf16(<vscale x 2 x bfloat> %va, bfloat %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfadd_vf_nxv2bf16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: fmv.x.h a1, fa0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT: vmv.v.x v9, a1
-; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8, v0.t
-; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; CHECK-NEXT: vfadd.vv v9, v10, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v8, v9, v0.t
-; CHECK-NEXT: ret
+; ZVFH-LABEL: vfadd_vf_nxv2bf16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: fmv.x.h a1, fa0
+; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT: vmv.v.x v9, a1
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v10, v8, v0.t
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v8, v9, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFH-NEXT: vfadd.vv v9, v10, v8, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v9, v0.t
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: vfadd_vf_nxv2bf16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: fmv.x.h a1, fa0
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vmv.v.x v9, a1
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v10, v8, v0.t
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v8, v9, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-NEXT: vfadd.vv v9, v10, v8, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v9, v0.t
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv2bf16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: fmv.x.h a1, fa0
+; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; ZVFBFA-NEXT: vmv.v.x v9, a1
+; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v8, v0.t
+; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v9, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v9, v10, v8, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, mf2, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9, v0.t
+; ZVFBFA-NEXT: ret
%elt.head = insertelement <vscale x 2 x bfloat> poison, bfloat %b, i32 0
%vb = shufflevector <vscale x 2 x bfloat> %elt.head, <vscale x 2 x bfloat> poison, <vscale x 2 x i32> zeroinitializer
%v = call <vscale x 2 x bfloat> @llvm.vp.fadd.nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x bfloat> %vb, <vscale x 2 x i1> %m, i32 %evl)
@@ -172,18 +393,44 @@ define <vscale x 2 x bfloat> @vfadd_vf_nxv2bf16(<vscale x 2 x bfloat> %va, bfloa
}
define <vscale x 2 x bfloat> @vfadd_vf_nxv2bf16_unmasked(<vscale x 2 x bfloat> %va, bfloat %b, i32 zeroext %evl) {
-; CHECK-LABEL: vfadd_vf_nxv2bf16_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: fmv.x.h a1, fa0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT: vmv.v.x v9, a1
-; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
-; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v9
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; CHECK-NEXT: vfadd.vv v9, v10, v8
-; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v8, v9
-; CHECK-NEXT: ret
+; ZVFH-LABEL: vfadd_vf_nxv2bf16_unmasked:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: fmv.x.h a1, fa0
+; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT: vmv.v.x v9, a1
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v10, v8
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v8, v9
+; ZVFH-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFH-NEXT: vfadd.vv v9, v10, v8
+; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v9
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: vfadd_vf_nxv2bf16_unmasked:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: fmv.x.h a1, fa0
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vmv.v.x v9, a1
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v10, v8
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v8, v9
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-NEXT: vfadd.vv v9, v10, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v9
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv2bf16_unmasked:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: fmv.x.h a1, fa0
+; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; ZVFBFA-NEXT: vmv.v.x v9, a1
+; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v8
+; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v9
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v9, v10, v8
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, mf2, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9
+; ZVFBFA-NEXT: ret
%elt.head = insertelement <vscale x 2 x bfloat> poison, bfloat %b, i32 0
%vb = shufflevector <vscale x 2 x bfloat> %elt.head, <vscale x 2 x bfloat> poison, <vscale x 2 x i32> zeroinitializer
%v = call <vscale x 2 x bfloat> @llvm.vp.fadd.nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x bfloat> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
@@ -193,48 +440,118 @@ define <vscale x 2 x bfloat> @vfadd_vf_nxv2bf16_unmasked(<vscale x 2 x bfloat> %
declare <vscale x 4 x bfloat> @llvm.vp.fadd.nxv4bf16(<vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x i1>, i32)
define <vscale x 4 x bfloat> @vfadd_vv_nxv4bf16(<vscale x 4 x bfloat> %va, <vscale x 4 x bfloat> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfadd_vv_nxv4bf16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v9, v0.t
-; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; CHECK-NEXT: vfadd.vv v10, v12, v10, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v8, v10, v0.t
-; CHECK-NEXT: ret
+; ZVFH-LABEL: vfadd_vv_nxv4bf16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v10, v9, v0.t
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v12, v8, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; ZVFH-NEXT: vfadd.vv v10, v12, v10, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v10, v0.t
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: vfadd_vv_nxv4bf16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v10, v9, v0.t
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v12, v8, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; ZVFHMIN-NEXT: vfadd.vv v10, v12, v10, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v10, v0.t
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vv_nxv4bf16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v9, v0.t
+; ZVFBFA-NEXT: vfwcvt.f.f.v v12, v8, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v10, v12, v10, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m1, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v10, v0.t
+; ZVFBFA-NEXT: ret
%v = call <vscale x 4 x bfloat> @llvm.vp.fadd.nxv4bf16(<vscale x 4 x bfloat> %va, <vscale x 4 x bfloat> %b, <vscale x 4 x i1> %m, i32 %evl)
ret <vscale x 4 x bfloat> %v
}
define <vscale x 4 x bfloat> @vfadd_vv_nxv4bf16_unmasked(<vscale x 4 x bfloat> %va, <vscale x 4 x bfloat> %b, i32 zeroext %evl) {
-; CHECK-LABEL: vfadd_vv_nxv4bf16_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v9
-; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8
-; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; CHECK-NEXT: vfadd.vv v10, v12, v10
-; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v8, v10
-; CHECK-NEXT: ret
+; ZVFH-LABEL: vfadd_vv_nxv4bf16_unmasked:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v10, v9
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v12, v8
+; ZVFH-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; ZVFH-NEXT: vfadd.vv v10, v12, v10
+; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v10
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: vfadd_vv_nxv4bf16_unmasked:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v10, v9
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v12, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; ZVFHMIN-NEXT: vfadd.vv v10, v12, v10
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v10
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vv_nxv4bf16_unmasked:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v9
+; ZVFBFA-NEXT: vfwcvt.f.f.v v12, v8
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v10, v12, v10
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m1, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v10
+; ZVFBFA-NEXT: ret
%v = call <vscale x 4 x bfloat> @llvm.vp.fadd.nxv4bf16(<vscale x 4 x bfloat> %va, <vscale x 4 x bfloat> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
ret <vscale x 4 x bfloat> %v
}
define <vscale x 4 x bfloat> @vfadd_vf_nxv4bf16(<vscale x 4 x bfloat> %va, bfloat %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfadd_vf_nxv4bf16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: fmv.x.h a1, fa0
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT: vmv.v.x v12, a1
-; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8, v0.t
-; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v12, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; CHECK-NEXT: vfadd.vv v10, v10, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v8, v10, v0.t
-; CHECK-NEXT: ret
+; ZVFH-LABEL: vfadd_vf_nxv4bf16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: fmv.x.h a1, fa0
+; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; ZVFH-NEXT: vmv.v.x v12, a1
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v10, v8, v0.t
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v8, v12, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; ZVFH-NEXT: vfadd.vv v10, v10, v8, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v10, v0.t
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: vfadd_vf_nxv4bf16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: fmv.x.h a1, fa0
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vmv.v.x v12, a1
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v10, v8, v0.t
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v8, v12, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; ZVFHMIN-NEXT: vfadd.vv v10, v10, v8, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v10, v0.t
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv4bf16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: fmv.x.h a1, fa0
+; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; ZVFBFA-NEXT: vmv.v.x v12, a1
+; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v8, v0.t
+; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v12, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v10, v10, v8, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m1, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v10, v0.t
+; ZVFBFA-NEXT: ret
%elt.head = insertelement <vscale x 4 x bfloat> poison, bfloat %b, i32 0
%vb = shufflevector <vscale x 4 x bfloat> %elt.head, <vscale x 4 x bfloat> poison, <vscale x 4 x i32> zeroinitializer
%v = call <vscale x 4 x bfloat> @llvm.vp.fadd.nxv4bf16(<vscale x 4 x bfloat> %va, <vscale x 4 x bfloat> %vb, <vscale x 4 x i1> %m, i32 %evl)
@@ -242,18 +559,44 @@ define <vscale x 4 x bfloat> @vfadd_vf_nxv4bf16(<vscale x 4 x bfloat> %va, bfloa
}
define <vscale x 4 x bfloat> @vfadd_vf_nxv4bf16_unmasked(<vscale x 4 x bfloat> %va, bfloat %b, i32 zeroext %evl) {
-; CHECK-LABEL: vfadd_vf_nxv4bf16_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: fmv.x.h a1, fa0
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT: vmv.v.x v12, a1
-; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
-; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v12
-; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; CHECK-NEXT: vfadd.vv v10, v10, v8
-; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v8, v10
-; CHECK-NEXT: ret
+; ZVFH-LABEL: vfadd_vf_nxv4bf16_unmasked:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: fmv.x.h a1, fa0
+; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; ZVFH-NEXT: vmv.v.x v12, a1
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v10, v8
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v8, v12
+; ZVFH-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; ZVFH-NEXT: vfadd.vv v10, v10, v8
+; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v10
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: vfadd_vf_nxv4bf16_unmasked:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: fmv.x.h a1, fa0
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vmv.v.x v12, a1
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v10, v8
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v8, v12
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; ZVFHMIN-NEXT: vfadd.vv v10, v10, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v10
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv4bf16_unmasked:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: fmv.x.h a1, fa0
+; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; ZVFBFA-NEXT: vmv.v.x v12, a1
+; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v8
+; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v12
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v10, v10, v8
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m1, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v10
+; ZVFBFA-NEXT: ret
%elt.head = insertelement <vscale x 4 x bfloat> poison, bfloat %b, i32 0
%vb = shufflevector <vscale x 4 x bfloat> %elt.head, <vscale x 4 x bfloat> poison, <vscale x 4 x i32> zeroinitializer
%v = call <vscale x 4 x bfloat> @llvm.vp.fadd.nxv4bf16(<vscale x 4 x bfloat> %va, <vscale x 4 x bfloat> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
@@ -263,48 +606,118 @@ define <vscale x 4 x bfloat> @vfadd_vf_nxv4bf16_unmasked(<vscale x 4 x bfloat> %
declare <vscale x 8 x bfloat> @llvm.vp.fadd.nxv8bf16(<vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x i1>, i32)
define <vscale x 8 x bfloat> @vfadd_vv_nxv8bf16(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfadd_vv_nxv8bf16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v10, v0.t
-; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
-; CHECK-NEXT: vfadd.vv v12, v16, v12, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v8, v12, v0.t
-; CHECK-NEXT: ret
+; ZVFH-LABEL: vfadd_vv_nxv8bf16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v12, v10, v0.t
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v8, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; ZVFH-NEXT: vfadd.vv v12, v16, v12, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v12, v0.t
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: vfadd_vv_nxv8bf16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v12, v10, v0.t
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v8, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vfadd.vv v12, v16, v12, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v12, v0.t
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vv_nxv8bf16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v12, v10, v0.t
+; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v8, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v12, v16, v12, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m2, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v12, v0.t
+; ZVFBFA-NEXT: ret
%v = call <vscale x 8 x bfloat> @llvm.vp.fadd.nxv8bf16(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %b, <vscale x 8 x i1> %m, i32 %evl)
ret <vscale x 8 x bfloat> %v
}
define <vscale x 8 x bfloat> @vfadd_vv_nxv8bf16_unmasked(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %b, i32 zeroext %evl) {
-; CHECK-LABEL: vfadd_vv_nxv8bf16_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v10
-; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8
-; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
-; CHECK-NEXT: vfadd.vv v12, v16, v12
-; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v8, v12
-; CHECK-NEXT: ret
+; ZVFH-LABEL: vfadd_vv_nxv8bf16_unmasked:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v12, v10
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v8
+; ZVFH-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; ZVFH-NEXT: vfadd.vv v12, v16, v12
+; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v12
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: vfadd_vv_nxv8bf16_unmasked:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v12, v10
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vfadd.vv v12, v16, v12
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v12
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vv_nxv8bf16_unmasked:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v12, v10
+; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v8
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v12, v16, v12
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m2, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v12
+; ZVFBFA-NEXT: ret
%v = call <vscale x 8 x bfloat> @llvm.vp.fadd.nxv8bf16(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
ret <vscale x 8 x bfloat> %v
}
define <vscale x 8 x bfloat> @vfadd_vf_nxv8bf16(<vscale x 8 x bfloat> %va, bfloat %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfadd_vf_nxv8bf16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: fmv.x.h a1, fa0
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT: vmv.v.x v16, a1
-; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8, v0.t
-; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
-; CHECK-NEXT: vfadd.vv v12, v12, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v8, v12, v0.t
-; CHECK-NEXT: ret
+; ZVFH-LABEL: vfadd_vf_nxv8bf16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: fmv.x.h a1, fa0
+; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; ZVFH-NEXT: vmv.v.x v16, a1
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v12, v8, v0.t
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v8, v16, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; ZVFH-NEXT: vfadd.vv v12, v12, v8, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v12, v0.t
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: vfadd_vf_nxv8bf16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: fmv.x.h a1, fa0
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vmv.v.x v16, a1
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v12, v8, v0.t
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v8, v16, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vfadd.vv v12, v12, v8, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v12, v0.t
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv8bf16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: fmv.x.h a1, fa0
+; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; ZVFBFA-NEXT: vmv.v.x v16, a1
+; ZVFBFA-NEXT: vfwcvt.f.f.v v12, v8, v0.t
+; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v16, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v12, v12, v8, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m2, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v12, v0.t
+; ZVFBFA-NEXT: ret
%elt.head = insertelement <vscale x 8 x bfloat> poison, bfloat %b, i32 0
%vb = shufflevector <vscale x 8 x bfloat> %elt.head, <vscale x 8 x bfloat> poison, <vscale x 8 x i32> zeroinitializer
%v = call <vscale x 8 x bfloat> @llvm.vp.fadd.nxv8bf16(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb, <vscale x 8 x i1> %m, i32 %evl)
@@ -312,18 +725,44 @@ define <vscale x 8 x bfloat> @vfadd_vf_nxv8bf16(<vscale x 8 x bfloat> %va, bfloa
}
define <vscale x 8 x bfloat> @vfadd_vf_nxv8bf16_unmasked(<vscale x 8 x bfloat> %va, bfloat %b, i32 zeroext %evl) {
-; CHECK-LABEL: vfadd_vf_nxv8bf16_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: fmv.x.h a1, fa0
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT: vmv.v.x v16, a1
-; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8
-; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v16
-; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
-; CHECK-NEXT: vfadd.vv v12, v12, v8
-; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v8, v12
-; CHECK-NEXT: ret
+; ZVFH-LABEL: vfadd_vf_nxv8bf16_unmasked:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: fmv.x.h a1, fa0
+; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; ZVFH-NEXT: vmv.v.x v16, a1
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v12, v8
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v8, v16
+; ZVFH-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; ZVFH-NEXT: vfadd.vv v12, v12, v8
+; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v12
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: vfadd_vf_nxv8bf16_unmasked:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: fmv.x.h a1, fa0
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vmv.v.x v16, a1
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v12, v8
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v8, v16
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vfadd.vv v12, v12, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v12
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv8bf16_unmasked:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: fmv.x.h a1, fa0
+; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; ZVFBFA-NEXT: vmv.v.x v16, a1
+; ZVFBFA-NEXT: vfwcvt.f.f.v v12, v8
+; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v16
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v12, v12, v8
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m2, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v12
+; ZVFBFA-NEXT: ret
%elt.head = insertelement <vscale x 8 x bfloat> poison, bfloat %b, i32 0
%vb = shufflevector <vscale x 8 x bfloat> %elt.head, <vscale x 8 x bfloat> poison, <vscale x 8 x i32> zeroinitializer
%v = call <vscale x 8 x bfloat> @llvm.vp.fadd.nxv8bf16(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
@@ -333,48 +772,118 @@ define <vscale x 8 x bfloat> @vfadd_vf_nxv8bf16_unmasked(<vscale x 8 x bfloat> %
declare <vscale x 16 x bfloat> @llvm.vp.fadd.nxv16bf16(<vscale x 16 x bfloat>, <vscale x 16 x bfloat>, <vscale x 16 x i1>, i32)
define <vscale x 16 x bfloat> @vfadd_vv_nxv16bf16(<vscale x 16 x bfloat> %va, <vscale x 16 x bfloat> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfadd_vv_nxv16bf16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v12, v0.t
-; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT: vfadd.vv v16, v24, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v8, v16, v0.t
-; CHECK-NEXT: ret
+; ZVFH-LABEL: vfadd_vv_nxv16bf16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v12, v0.t
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v24, v8, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFH-NEXT: vfadd.vv v16, v24, v16, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v16, v0.t
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: vfadd_vv_nxv16bf16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v12, v0.t
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v24, v8, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfadd.vv v16, v24, v16, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v16, v0.t
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vv_nxv16bf16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v12, v0.t
+; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v8, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v16, v24, v16, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v16, v0.t
+; ZVFBFA-NEXT: ret
%v = call <vscale x 16 x bfloat> @llvm.vp.fadd.nxv16bf16(<vscale x 16 x bfloat> %va, <vscale x 16 x bfloat> %b, <vscale x 16 x i1> %m, i32 %evl)
ret <vscale x 16 x bfloat> %v
}
define <vscale x 16 x bfloat> @vfadd_vv_nxv16bf16_unmasked(<vscale x 16 x bfloat> %va, <vscale x 16 x bfloat> %b, i32 zeroext %evl) {
-; CHECK-LABEL: vfadd_vv_nxv16bf16_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v12
-; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v8
-; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT: vfadd.vv v16, v24, v16
-; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v8, v16
-; CHECK-NEXT: ret
+; ZVFH-LABEL: vfadd_vv_nxv16bf16_unmasked:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v12
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v24, v8
+; ZVFH-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFH-NEXT: vfadd.vv v16, v24, v16
+; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v16
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: vfadd_vv_nxv16bf16_unmasked:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v12
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v24, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfadd.vv v16, v24, v16
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v16
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vv_nxv16bf16_unmasked:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v12
+; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v8
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v16, v24, v16
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v16
+; ZVFBFA-NEXT: ret
%v = call <vscale x 16 x bfloat> @llvm.vp.fadd.nxv16bf16(<vscale x 16 x bfloat> %va, <vscale x 16 x bfloat> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
ret <vscale x 16 x bfloat> %v
}
define <vscale x 16 x bfloat> @vfadd_vf_nxv16bf16(<vscale x 16 x bfloat> %va, bfloat %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfadd_vf_nxv16bf16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: fmv.x.h a1, fa0
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vmv.v.x v24, a1
-; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8, v0.t
-; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT: vfadd.vv v16, v16, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v8, v16, v0.t
-; CHECK-NEXT: ret
+; ZVFH-LABEL: vfadd_vf_nxv16bf16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: fmv.x.h a1, fa0
+; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; ZVFH-NEXT: vmv.v.x v24, a1
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v8, v0.t
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v8, v24, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFH-NEXT: vfadd.vv v16, v16, v8, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v16, v0.t
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: vfadd_vf_nxv16bf16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: fmv.x.h a1, fa0
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vmv.v.x v24, a1
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v8, v0.t
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v8, v24, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfadd.vv v16, v16, v8, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v16, v0.t
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv16bf16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: fmv.x.h a1, fa0
+; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; ZVFBFA-NEXT: vmv.v.x v24, a1
+; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v8, v0.t
+; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v24, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v16, v16, v8, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v16, v0.t
+; ZVFBFA-NEXT: ret
%elt.head = insertelement <vscale x 16 x bfloat> poison, bfloat %b, i32 0
%vb = shufflevector <vscale x 16 x bfloat> %elt.head, <vscale x 16 x bfloat> poison, <vscale x 16 x i32> zeroinitializer
%v = call <vscale x 16 x bfloat> @llvm.vp.fadd.nxv16bf16(<vscale x 16 x bfloat> %va, <vscale x 16 x bfloat> %vb, <vscale x 16 x i1> %m, i32 %evl)
@@ -382,18 +891,44 @@ define <vscale x 16 x bfloat> @vfadd_vf_nxv16bf16(<vscale x 16 x bfloat> %va, bf
}
define <vscale x 16 x bfloat> @vfadd_vf_nxv16bf16_unmasked(<vscale x 16 x bfloat> %va, bfloat %b, i32 zeroext %evl) {
-; CHECK-LABEL: vfadd_vf_nxv16bf16_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: fmv.x.h a1, fa0
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vmv.v.x v24, a1
-; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8
-; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v24
-; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT: vfadd.vv v16, v16, v8
-; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v8, v16
-; CHECK-NEXT: ret
+; ZVFH-LABEL: vfadd_vf_nxv16bf16_unmasked:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: fmv.x.h a1, fa0
+; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; ZVFH-NEXT: vmv.v.x v24, a1
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v8
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v8, v24
+; ZVFH-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFH-NEXT: vfadd.vv v16, v16, v8
+; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v16
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: vfadd_vf_nxv16bf16_unmasked:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: fmv.x.h a1, fa0
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vmv.v.x v24, a1
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v8
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v8, v24
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfadd.vv v16, v16, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v16
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv16bf16_unmasked:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: fmv.x.h a1, fa0
+; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; ZVFBFA-NEXT: vmv.v.x v24, a1
+; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v8
+; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v24
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v16, v16, v8
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v16
+; ZVFBFA-NEXT: ret
%elt.head = insertelement <vscale x 16 x bfloat> poison, bfloat %b, i32 0
%vb = shufflevector <vscale x 16 x bfloat> %elt.head, <vscale x 16 x bfloat> poison, <vscale x 16 x i32> zeroinitializer
%v = call <vscale x 16 x bfloat> @llvm.vp.fadd.nxv16bf16(<vscale x 16 x bfloat> %va, <vscale x 16 x bfloat> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
@@ -403,173 +938,493 @@ define <vscale x 16 x bfloat> @vfadd_vf_nxv16bf16_unmasked(<vscale x 16 x bfloat
declare <vscale x 32 x bfloat> @llvm.vp.fadd.nxv32bf16(<vscale x 32 x bfloat>, <vscale x 32 x bfloat>, <vscale x 32 x i1>, i32)
define <vscale x 32 x bfloat> @vfadd_vv_nxv32bf16(<vscale x 32 x bfloat> %va, <vscale x 32 x bfloat> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfadd_vv_nxv32bf16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 3
-; CHECK-NEXT: sub sp, sp, a1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
-; CHECK-NEXT: vmv1r.v v7, v0
-; CHECK-NEXT: csrr a2, vlenb
-; CHECK-NEXT: slli a1, a2, 1
-; CHECK-NEXT: srli a2, a2, 2
-; CHECK-NEXT: sub a3, a0, a1
-; CHECK-NEXT: vslidedown.vx v0, v0, a2
-; CHECK-NEXT: sltu a2, a0, a3
-; CHECK-NEXT: addi a2, a2, -1
-; CHECK-NEXT: and a2, a2, a3
-; CHECK-NEXT: addi a3, sp, 16
-; CHECK-NEXT: vs8r.v v16, (a3) # vscale x 64-byte Folded Spill
-; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma
-; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v20, v0.t
-; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v12, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT: vfadd.vv v16, v16, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v12, v16, v0.t
-; CHECK-NEXT: bltu a0, a1, .LBB22_2
-; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: mv a0, a1
-; CHECK-NEXT: .LBB22_2:
-; CHECK-NEXT: vmv1r.v v0, v7
-; CHECK-NEXT: addi a1, sp, 16
-; CHECK-NEXT: vl8r.v v24, (a1) # vscale x 64-byte Folded Reload
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v24, v0.t
-; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT: vfadd.vv v16, v24, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v8, v16, v0.t
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: .cfi_def_cfa sp, 16
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: .cfi_def_cfa_offset 0
-; CHECK-NEXT: ret
+; ZVFH-LABEL: vfadd_vv_nxv32bf16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: addi sp, sp, -16
+; ZVFH-NEXT: .cfi_def_cfa_offset 16
+; ZVFH-NEXT: csrr a1, vlenb
+; ZVFH-NEXT: slli a1, a1, 3
+; ZVFH-NEXT: sub sp, sp, a1
+; ZVFH-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; ZVFH-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
+; ZVFH-NEXT: vmv1r.v v7, v0
+; ZVFH-NEXT: csrr a2, vlenb
+; ZVFH-NEXT: slli a1, a2, 1
+; ZVFH-NEXT: srli a2, a2, 2
+; ZVFH-NEXT: sub a3, a0, a1
+; ZVFH-NEXT: vslidedown.vx v0, v0, a2
+; ZVFH-NEXT: sltu a2, a0, a3
+; ZVFH-NEXT: addi a2, a2, -1
+; ZVFH-NEXT: and a2, a2, a3
+; ZVFH-NEXT: addi a3, sp, 16
+; ZVFH-NEXT: vs8r.v v16, (a3) # vscale x 64-byte Folded Spill
+; ZVFH-NEXT: vsetvli zero, a2, e16, m4, ta, ma
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v24, v20, v0.t
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v12, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFH-NEXT: vfadd.vv v16, v16, v24, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v12, v16, v0.t
+; ZVFH-NEXT: bltu a0, a1, .LBB22_2
+; ZVFH-NEXT: # %bb.1:
+; ZVFH-NEXT: mv a0, a1
+; ZVFH-NEXT: .LBB22_2:
+; ZVFH-NEXT: vmv1r.v v0, v7
+; ZVFH-NEXT: addi a1, sp, 16
+; ZVFH-NEXT: vl8r.v v24, (a1) # vscale x 64-byte Folded Reload
+; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v24, v0.t
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v24, v8, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFH-NEXT: vfadd.vv v16, v24, v16, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v16, v0.t
+; ZVFH-NEXT: csrr a0, vlenb
+; ZVFH-NEXT: slli a0, a0, 3
+; ZVFH-NEXT: add sp, sp, a0
+; ZVFH-NEXT: .cfi_def_cfa sp, 16
+; ZVFH-NEXT: addi sp, sp, 16
+; ZVFH-NEXT: .cfi_def_cfa_offset 0
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: vfadd_vv_nxv32bf16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: addi sp, sp, -16
+; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16
+; ZVFHMIN-NEXT: csrr a1, vlenb
+; ZVFHMIN-NEXT: slli a1, a1, 3
+; ZVFHMIN-NEXT: sub sp, sp, a1
+; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; ZVFHMIN-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
+; ZVFHMIN-NEXT: vmv1r.v v7, v0
+; ZVFHMIN-NEXT: csrr a2, vlenb
+; ZVFHMIN-NEXT: slli a1, a2, 1
+; ZVFHMIN-NEXT: srli a2, a2, 2
+; ZVFHMIN-NEXT: sub a3, a0, a1
+; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a2
+; ZVFHMIN-NEXT: sltu a2, a0, a3
+; ZVFHMIN-NEXT: addi a2, a2, -1
+; ZVFHMIN-NEXT: and a2, a2, a3
+; ZVFHMIN-NEXT: addi a3, sp, 16
+; ZVFHMIN-NEXT: vs8r.v v16, (a3) # vscale x 64-byte Folded Spill
+; ZVFHMIN-NEXT: vsetvli zero, a2, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v24, v20, v0.t
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v12, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfadd.vv v16, v16, v24, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v12, v16, v0.t
+; ZVFHMIN-NEXT: bltu a0, a1, .LBB22_2
+; ZVFHMIN-NEXT: # %bb.1:
+; ZVFHMIN-NEXT: mv a0, a1
+; ZVFHMIN-NEXT: .LBB22_2:
+; ZVFHMIN-NEXT: vmv1r.v v0, v7
+; ZVFHMIN-NEXT: addi a1, sp, 16
+; ZVFHMIN-NEXT: vl8r.v v24, (a1) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v24, v0.t
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v24, v8, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfadd.vv v16, v24, v16, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v16, v0.t
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: slli a0, a0, 3
+; ZVFHMIN-NEXT: add sp, sp, a0
+; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
+; ZVFHMIN-NEXT: addi sp, sp, 16
+; ZVFHMIN-NEXT: .cfi_def_cfa_offset 0
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vv_nxv32bf16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: addi sp, sp, -16
+; ZVFBFA-NEXT: .cfi_def_cfa_offset 16
+; ZVFBFA-NEXT: csrr a1, vlenb
+; ZVFBFA-NEXT: slli a1, a1, 3
+; ZVFBFA-NEXT: sub sp, sp, a1
+; ZVFBFA-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; ZVFBFA-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
+; ZVFBFA-NEXT: vmv1r.v v7, v0
+; ZVFBFA-NEXT: csrr a2, vlenb
+; ZVFBFA-NEXT: slli a1, a2, 1
+; ZVFBFA-NEXT: srli a2, a2, 2
+; ZVFBFA-NEXT: sub a3, a0, a1
+; ZVFBFA-NEXT: vslidedown.vx v0, v0, a2
+; ZVFBFA-NEXT: sltu a2, a0, a3
+; ZVFBFA-NEXT: addi a2, a2, -1
+; ZVFBFA-NEXT: and a2, a2, a3
+; ZVFBFA-NEXT: addi a3, sp, 16
+; ZVFBFA-NEXT: vs8r.v v16, (a3) # vscale x 64-byte Folded Spill
+; ZVFBFA-NEXT: vsetvli zero, a2, e16alt, m4, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v20, v0.t
+; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v12, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v16, v16, v24, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v12, v16, v0.t
+; ZVFBFA-NEXT: bltu a0, a1, .LBB22_2
+; ZVFBFA-NEXT: # %bb.1:
+; ZVFBFA-NEXT: mv a0, a1
+; ZVFBFA-NEXT: .LBB22_2:
+; ZVFBFA-NEXT: vmv1r.v v0, v7
+; ZVFBFA-NEXT: addi a1, sp, 16
+; ZVFBFA-NEXT: vl8r.v v24, (a1) # vscale x 64-byte Folded Reload
+; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v24, v0.t
+; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v8, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v16, v24, v16, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v16, v0.t
+; ZVFBFA-NEXT: csrr a0, vlenb
+; ZVFBFA-NEXT: slli a0, a0, 3
+; ZVFBFA-NEXT: add sp, sp, a0
+; ZVFBFA-NEXT: .cfi_def_cfa sp, 16
+; ZVFBFA-NEXT: addi sp, sp, 16
+; ZVFBFA-NEXT: .cfi_def_cfa_offset 0
+; ZVFBFA-NEXT: ret
%v = call <vscale x 32 x bfloat> @llvm.vp.fadd.nxv32bf16(<vscale x 32 x bfloat> %va, <vscale x 32 x bfloat> %b, <vscale x 32 x i1> %m, i32 %evl)
ret <vscale x 32 x bfloat> %v
}
define <vscale x 32 x bfloat> @vfadd_vv_nxv32bf16_unmasked(<vscale x 32 x bfloat> %va, <vscale x 32 x bfloat> %b, i32 zeroext %evl) {
-; CHECK-LABEL: vfadd_vv_nxv32bf16_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 3
-; CHECK-NEXT: sub sp, sp, a1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: csrr a2, vlenb
-; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma
-; CHECK-NEXT: vmset.m v24
-; CHECK-NEXT: slli a1, a2, 1
-; CHECK-NEXT: srli a2, a2, 2
-; CHECK-NEXT: sub a3, a0, a1
-; CHECK-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
-; CHECK-NEXT: vslidedown.vx v0, v24, a2
-; CHECK-NEXT: sltu a2, a0, a3
-; CHECK-NEXT: addi a2, a2, -1
-; CHECK-NEXT: and a2, a2, a3
-; CHECK-NEXT: addi a3, sp, 16
-; CHECK-NEXT: vs8r.v v16, (a3) # vscale x 64-byte Folded Spill
-; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma
-; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v20, v0.t
-; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v12, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT: vfadd.vv v16, v16, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v12, v16, v0.t
-; CHECK-NEXT: bltu a0, a1, .LBB23_2
-; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: mv a0, a1
-; CHECK-NEXT: .LBB23_2:
-; CHECK-NEXT: addi a1, sp, 16
-; CHECK-NEXT: vl8r.v v24, (a1) # vscale x 64-byte Folded Reload
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v24
-; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v8
-; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT: vfadd.vv v16, v24, v16
-; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v8, v16
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: .cfi_def_cfa sp, 16
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: .cfi_def_cfa_offset 0
-; CHECK-NEXT: ret
+; ZVFH-LABEL: vfadd_vv_nxv32bf16_unmasked:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: addi sp, sp, -16
+; ZVFH-NEXT: .cfi_def_cfa_offset 16
+; ZVFH-NEXT: csrr a1, vlenb
+; ZVFH-NEXT: slli a1, a1, 3
+; ZVFH-NEXT: sub sp, sp, a1
+; ZVFH-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; ZVFH-NEXT: csrr a2, vlenb
+; ZVFH-NEXT: vsetvli a1, zero, e8, m4, ta, ma
+; ZVFH-NEXT: vmset.m v24
+; ZVFH-NEXT: slli a1, a2, 1
+; ZVFH-NEXT: srli a2, a2, 2
+; ZVFH-NEXT: sub a3, a0, a1
+; ZVFH-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
+; ZVFH-NEXT: vslidedown.vx v0, v24, a2
+; ZVFH-NEXT: sltu a2, a0, a3
+; ZVFH-NEXT: addi a2, a2, -1
+; ZVFH-NEXT: and a2, a2, a3
+; ZVFH-NEXT: addi a3, sp, 16
+; ZVFH-NEXT: vs8r.v v16, (a3) # vscale x 64-byte Folded Spill
+; ZVFH-NEXT: vsetvli zero, a2, e16, m4, ta, ma
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v24, v20, v0.t
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v12, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFH-NEXT: vfadd.vv v16, v16, v24, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v12, v16, v0.t
+; ZVFH-NEXT: bltu a0, a1, .LBB23_2
+; ZVFH-NEXT: # %bb.1:
+; ZVFH-NEXT: mv a0, a1
+; ZVFH-NEXT: .LBB23_2:
+; ZVFH-NEXT: addi a1, sp, 16
+; ZVFH-NEXT: vl8r.v v24, (a1) # vscale x 64-byte Folded Reload
+; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v24
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v24, v8
+; ZVFH-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFH-NEXT: vfadd.vv v16, v24, v16
+; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v16
+; ZVFH-NEXT: csrr a0, vlenb
+; ZVFH-NEXT: slli a0, a0, 3
+; ZVFH-NEXT: add sp, sp, a0
+; ZVFH-NEXT: .cfi_def_cfa sp, 16
+; ZVFH-NEXT: addi sp, sp, 16
+; ZVFH-NEXT: .cfi_def_cfa_offset 0
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: vfadd_vv_nxv32bf16_unmasked:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: addi sp, sp, -16
+; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16
+; ZVFHMIN-NEXT: csrr a1, vlenb
+; ZVFHMIN-NEXT: slli a1, a1, 3
+; ZVFHMIN-NEXT: sub sp, sp, a1
+; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; ZVFHMIN-NEXT: csrr a2, vlenb
+; ZVFHMIN-NEXT: vsetvli a1, zero, e8, m4, ta, ma
+; ZVFHMIN-NEXT: vmset.m v24
+; ZVFHMIN-NEXT: slli a1, a2, 1
+; ZVFHMIN-NEXT: srli a2, a2, 2
+; ZVFHMIN-NEXT: sub a3, a0, a1
+; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
+; ZVFHMIN-NEXT: vslidedown.vx v0, v24, a2
+; ZVFHMIN-NEXT: sltu a2, a0, a3
+; ZVFHMIN-NEXT: addi a2, a2, -1
+; ZVFHMIN-NEXT: and a2, a2, a3
+; ZVFHMIN-NEXT: addi a3, sp, 16
+; ZVFHMIN-NEXT: vs8r.v v16, (a3) # vscale x 64-byte Folded Spill
+; ZVFHMIN-NEXT: vsetvli zero, a2, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v24, v20, v0.t
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v12, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfadd.vv v16, v16, v24, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v12, v16, v0.t
+; ZVFHMIN-NEXT: bltu a0, a1, .LBB23_2
+; ZVFHMIN-NEXT: # %bb.1:
+; ZVFHMIN-NEXT: mv a0, a1
+; ZVFHMIN-NEXT: .LBB23_2:
+; ZVFHMIN-NEXT: addi a1, sp, 16
+; ZVFHMIN-NEXT: vl8r.v v24, (a1) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v24
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v24, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfadd.vv v16, v24, v16
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v16
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: slli a0, a0, 3
+; ZVFHMIN-NEXT: add sp, sp, a0
+; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
+; ZVFHMIN-NEXT: addi sp, sp, 16
+; ZVFHMIN-NEXT: .cfi_def_cfa_offset 0
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vv_nxv32bf16_unmasked:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: addi sp, sp, -16
+; ZVFBFA-NEXT: .cfi_def_cfa_offset 16
+; ZVFBFA-NEXT: csrr a1, vlenb
+; ZVFBFA-NEXT: slli a1, a1, 3
+; ZVFBFA-NEXT: sub sp, sp, a1
+; ZVFBFA-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; ZVFBFA-NEXT: csrr a2, vlenb
+; ZVFBFA-NEXT: vsetvli a1, zero, e8, m4, ta, ma
+; ZVFBFA-NEXT: vmset.m v24
+; ZVFBFA-NEXT: slli a1, a2, 1
+; ZVFBFA-NEXT: srli a2, a2, 2
+; ZVFBFA-NEXT: sub a3, a0, a1
+; ZVFBFA-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
+; ZVFBFA-NEXT: vslidedown.vx v0, v24, a2
+; ZVFBFA-NEXT: sltu a2, a0, a3
+; ZVFBFA-NEXT: addi a2, a2, -1
+; ZVFBFA-NEXT: and a2, a2, a3
+; ZVFBFA-NEXT: addi a3, sp, 16
+; ZVFBFA-NEXT: vs8r.v v16, (a3) # vscale x 64-byte Folded Spill
+; ZVFBFA-NEXT: vsetvli zero, a2, e16alt, m4, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v20, v0.t
+; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v12, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v16, v16, v24, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v12, v16, v0.t
+; ZVFBFA-NEXT: bltu a0, a1, .LBB23_2
+; ZVFBFA-NEXT: # %bb.1:
+; ZVFBFA-NEXT: mv a0, a1
+; ZVFBFA-NEXT: .LBB23_2:
+; ZVFBFA-NEXT: addi a1, sp, 16
+; ZVFBFA-NEXT: vl8r.v v24, (a1) # vscale x 64-byte Folded Reload
+; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v24
+; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v8
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v16, v24, v16
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v16
+; ZVFBFA-NEXT: csrr a0, vlenb
+; ZVFBFA-NEXT: slli a0, a0, 3
+; ZVFBFA-NEXT: add sp, sp, a0
+; ZVFBFA-NEXT: .cfi_def_cfa sp, 16
+; ZVFBFA-NEXT: addi sp, sp, 16
+; ZVFBFA-NEXT: .cfi_def_cfa_offset 0
+; ZVFBFA-NEXT: ret
%v = call <vscale x 32 x bfloat> @llvm.vp.fadd.nxv32bf16(<vscale x 32 x bfloat> %va, <vscale x 32 x bfloat> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
ret <vscale x 32 x bfloat> %v
}
define <vscale x 32 x bfloat> @vfadd_vf_nxv32bf16(<vscale x 32 x bfloat> %va, bfloat %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfadd_vf_nxv32bf16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 4
-; CHECK-NEXT: sub sp, sp, a1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v7, v0
-; CHECK-NEXT: fmv.x.h a1, fa0
-; CHECK-NEXT: csrr a2, vlenb
-; CHECK-NEXT: vmv.v.x v24, a1
-; CHECK-NEXT: slli a1, a2, 1
-; CHECK-NEXT: srli a2, a2, 2
-; CHECK-NEXT: sub a3, a0, a1
-; CHECK-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
-; CHECK-NEXT: vslidedown.vx v0, v0, a2
-; CHECK-NEXT: sltu a2, a0, a3
-; CHECK-NEXT: addi a2, a2, -1
-; CHECK-NEXT: and a2, a2, a3
-; CHECK-NEXT: csrr a3, vlenb
-; CHECK-NEXT: slli a3, a3, 3
-; CHECK-NEXT: add a3, sp, a3
-; CHECK-NEXT: addi a3, a3, 16
-; CHECK-NEXT: vs8r.v v24, (a3) # vscale x 64-byte Folded Spill
-; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma
-; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v28, v0.t
-; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v12, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT: vfadd.vv v16, v24, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v12, v16, v0.t
-; CHECK-NEXT: bltu a0, a1, .LBB24_2
-; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: mv a0, a1
-; CHECK-NEXT: .LBB24_2:
-; CHECK-NEXT: vmv1r.v v0, v7
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8, v0.t
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v16, v0.t
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT: vfadd.vv v16, v16, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v8, v16, v0.t
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 4
-; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: .cfi_def_cfa sp, 16
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: .cfi_def_cfa_offset 0
-; CHECK-NEXT: ret
+; ZVFH-LABEL: vfadd_vf_nxv32bf16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: addi sp, sp, -16
+; ZVFH-NEXT: .cfi_def_cfa_offset 16
+; ZVFH-NEXT: csrr a1, vlenb
+; ZVFH-NEXT: slli a1, a1, 4
+; ZVFH-NEXT: sub sp, sp, a1
+; ZVFH-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
+; ZVFH-NEXT: vsetvli a1, zero, e16, m8, ta, ma
+; ZVFH-NEXT: vmv1r.v v7, v0
+; ZVFH-NEXT: fmv.x.h a1, fa0
+; ZVFH-NEXT: csrr a2, vlenb
+; ZVFH-NEXT: vmv.v.x v24, a1
+; ZVFH-NEXT: slli a1, a2, 1
+; ZVFH-NEXT: srli a2, a2, 2
+; ZVFH-NEXT: sub a3, a0, a1
+; ZVFH-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
+; ZVFH-NEXT: vslidedown.vx v0, v0, a2
+; ZVFH-NEXT: sltu a2, a0, a3
+; ZVFH-NEXT: addi a2, a2, -1
+; ZVFH-NEXT: and a2, a2, a3
+; ZVFH-NEXT: csrr a3, vlenb
+; ZVFH-NEXT: slli a3, a3, 3
+; ZVFH-NEXT: add a3, sp, a3
+; ZVFH-NEXT: addi a3, a3, 16
+; ZVFH-NEXT: vs8r.v v24, (a3) # vscale x 64-byte Folded Spill
+; ZVFH-NEXT: vsetvli zero, a2, e16, m4, ta, ma
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v28, v0.t
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v24, v12, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFH-NEXT: vfadd.vv v16, v24, v16, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v12, v16, v0.t
+; ZVFH-NEXT: bltu a0, a1, .LBB24_2
+; ZVFH-NEXT: # %bb.1:
+; ZVFH-NEXT: mv a0, a1
+; ZVFH-NEXT: .LBB24_2:
+; ZVFH-NEXT: vmv1r.v v0, v7
+; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v8, v0.t
+; ZVFH-NEXT: addi a0, sp, 16
+; ZVFH-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill
+; ZVFH-NEXT: csrr a0, vlenb
+; ZVFH-NEXT: slli a0, a0, 3
+; ZVFH-NEXT: add a0, sp, a0
+; ZVFH-NEXT: addi a0, a0, 16
+; ZVFH-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v24, v16, v0.t
+; ZVFH-NEXT: addi a0, sp, 16
+; ZVFH-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFH-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFH-NEXT: vfadd.vv v16, v16, v24, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v16, v0.t
+; ZVFH-NEXT: csrr a0, vlenb
+; ZVFH-NEXT: slli a0, a0, 4
+; ZVFH-NEXT: add sp, sp, a0
+; ZVFH-NEXT: .cfi_def_cfa sp, 16
+; ZVFH-NEXT: addi sp, sp, 16
+; ZVFH-NEXT: .cfi_def_cfa_offset 0
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: vfadd_vf_nxv32bf16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: addi sp, sp, -16
+; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16
+; ZVFHMIN-NEXT: csrr a1, vlenb
+; ZVFHMIN-NEXT: slli a1, a1, 4
+; ZVFHMIN-NEXT: sub sp, sp, a1
+; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
+; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m8, ta, ma
+; ZVFHMIN-NEXT: vmv1r.v v7, v0
+; ZVFHMIN-NEXT: fmv.x.h a1, fa0
+; ZVFHMIN-NEXT: csrr a2, vlenb
+; ZVFHMIN-NEXT: vmv.v.x v24, a1
+; ZVFHMIN-NEXT: slli a1, a2, 1
+; ZVFHMIN-NEXT: srli a2, a2, 2
+; ZVFHMIN-NEXT: sub a3, a0, a1
+; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
+; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a2
+; ZVFHMIN-NEXT: sltu a2, a0, a3
+; ZVFHMIN-NEXT: addi a2, a2, -1
+; ZVFHMIN-NEXT: and a2, a2, a3
+; ZVFHMIN-NEXT: csrr a3, vlenb
+; ZVFHMIN-NEXT: slli a3, a3, 3
+; ZVFHMIN-NEXT: add a3, sp, a3
+; ZVFHMIN-NEXT: addi a3, a3, 16
+; ZVFHMIN-NEXT: vs8r.v v24, (a3) # vscale x 64-byte Folded Spill
+; ZVFHMIN-NEXT: vsetvli zero, a2, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v28, v0.t
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v24, v12, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfadd.vv v16, v24, v16, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v12, v16, v0.t
+; ZVFHMIN-NEXT: bltu a0, a1, .LBB24_2
+; ZVFHMIN-NEXT: # %bb.1:
+; ZVFHMIN-NEXT: mv a0, a1
+; ZVFHMIN-NEXT: .LBB24_2:
+; ZVFHMIN-NEXT: vmv1r.v v0, v7
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v8, v0.t
+; ZVFHMIN-NEXT: addi a0, sp, 16
+; ZVFHMIN-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: slli a0, a0, 3
+; ZVFHMIN-NEXT: add a0, sp, a0
+; ZVFHMIN-NEXT: addi a0, a0, 16
+; ZVFHMIN-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v24, v16, v0.t
+; ZVFHMIN-NEXT: addi a0, sp, 16
+; ZVFHMIN-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfadd.vv v16, v16, v24, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v16, v0.t
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: slli a0, a0, 4
+; ZVFHMIN-NEXT: add sp, sp, a0
+; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
+; ZVFHMIN-NEXT: addi sp, sp, 16
+; ZVFHMIN-NEXT: .cfi_def_cfa_offset 0
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv32bf16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: addi sp, sp, -16
+; ZVFBFA-NEXT: .cfi_def_cfa_offset 16
+; ZVFBFA-NEXT: csrr a1, vlenb
+; ZVFBFA-NEXT: slli a1, a1, 4
+; ZVFBFA-NEXT: sub sp, sp, a1
+; ZVFBFA-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
+; ZVFBFA-NEXT: vsetvli a1, zero, e16, m8, ta, ma
+; ZVFBFA-NEXT: vmv1r.v v7, v0
+; ZVFBFA-NEXT: fmv.x.h a1, fa0
+; ZVFBFA-NEXT: csrr a2, vlenb
+; ZVFBFA-NEXT: vmv.v.x v24, a1
+; ZVFBFA-NEXT: slli a1, a2, 1
+; ZVFBFA-NEXT: srli a2, a2, 2
+; ZVFBFA-NEXT: sub a3, a0, a1
+; ZVFBFA-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
+; ZVFBFA-NEXT: vslidedown.vx v0, v0, a2
+; ZVFBFA-NEXT: sltu a2, a0, a3
+; ZVFBFA-NEXT: addi a2, a2, -1
+; ZVFBFA-NEXT: and a2, a2, a3
+; ZVFBFA-NEXT: csrr a3, vlenb
+; ZVFBFA-NEXT: slli a3, a3, 3
+; ZVFBFA-NEXT: add a3, sp, a3
+; ZVFBFA-NEXT: addi a3, a3, 16
+; ZVFBFA-NEXT: vs8r.v v24, (a3) # vscale x 64-byte Folded Spill
+; ZVFBFA-NEXT: vsetvli zero, a2, e16alt, m4, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v28, v0.t
+; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v12, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v16, v24, v16, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v12, v16, v0.t
+; ZVFBFA-NEXT: bltu a0, a1, .LBB24_2
+; ZVFBFA-NEXT: # %bb.1:
+; ZVFBFA-NEXT: mv a0, a1
+; ZVFBFA-NEXT: .LBB24_2:
+; ZVFBFA-NEXT: vmv1r.v v0, v7
+; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v8, v0.t
+; ZVFBFA-NEXT: addi a0, sp, 16
+; ZVFBFA-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill
+; ZVFBFA-NEXT: csrr a0, vlenb
+; ZVFBFA-NEXT: slli a0, a0, 3
+; ZVFBFA-NEXT: add a0, sp, a0
+; ZVFBFA-NEXT: addi a0, a0, 16
+; ZVFBFA-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v16, v0.t
+; ZVFBFA-NEXT: addi a0, sp, 16
+; ZVFBFA-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v16, v16, v24, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v16, v0.t
+; ZVFBFA-NEXT: csrr a0, vlenb
+; ZVFBFA-NEXT: slli a0, a0, 4
+; ZVFBFA-NEXT: add sp, sp, a0
+; ZVFBFA-NEXT: .cfi_def_cfa sp, 16
+; ZVFBFA-NEXT: addi sp, sp, 16
+; ZVFBFA-NEXT: .cfi_def_cfa_offset 0
+; ZVFBFA-NEXT: ret
%elt.head = insertelement <vscale x 32 x bfloat> poison, bfloat %b, i32 0
%vb = shufflevector <vscale x 32 x bfloat> %elt.head, <vscale x 32 x bfloat> poison, <vscale x 32 x i32> zeroinitializer
%v = call <vscale x 32 x bfloat> @llvm.vp.fadd.nxv32bf16(<vscale x 32 x bfloat> %va, <vscale x 32 x bfloat> %vb, <vscale x 32 x i1> %m, i32 %evl)
@@ -577,56 +1432,158 @@ define <vscale x 32 x bfloat> @vfadd_vf_nxv32bf16(<vscale x 32 x bfloat> %va, bf
}
define <vscale x 32 x bfloat> @vfadd_vf_nxv32bf16_unmasked(<vscale x 32 x bfloat> %va, bfloat %b, i32 zeroext %evl) {
-; CHECK-LABEL: vfadd_vf_nxv32bf16_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 3
-; CHECK-NEXT: sub sp, sp, a1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: fmv.x.h a1, fa0
-; CHECK-NEXT: csrr a2, vlenb
-; CHECK-NEXT: vsetvli a3, zero, e16, m8, ta, ma
-; CHECK-NEXT: vmset.m v24
-; CHECK-NEXT: vmv.v.x v16, a1
-; CHECK-NEXT: slli a1, a2, 1
-; CHECK-NEXT: srli a2, a2, 2
-; CHECK-NEXT: sub a3, a0, a1
-; CHECK-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
-; CHECK-NEXT: vslidedown.vx v0, v24, a2
-; CHECK-NEXT: sltu a2, a0, a3
-; CHECK-NEXT: addi a2, a2, -1
-; CHECK-NEXT: and a2, a2, a3
-; CHECK-NEXT: addi a3, sp, 16
-; CHECK-NEXT: vs8r.v v16, (a3) # vscale x 64-byte Folded Spill
-; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma
-; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v20, v0.t
-; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v12, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT: vfadd.vv v16, v16, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v12, v16, v0.t
-; CHECK-NEXT: bltu a0, a1, .LBB25_2
-; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: mv a0, a1
-; CHECK-NEXT: .LBB25_2:
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v0, (a0) # vscale x 64-byte Folded Reload
-; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v0
-; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT: vfadd.vv v16, v16, v24
-; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v8, v16
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: .cfi_def_cfa sp, 16
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: .cfi_def_cfa_offset 0
-; CHECK-NEXT: ret
+; ZVFH-LABEL: vfadd_vf_nxv32bf16_unmasked:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: addi sp, sp, -16
+; ZVFH-NEXT: .cfi_def_cfa_offset 16
+; ZVFH-NEXT: csrr a1, vlenb
+; ZVFH-NEXT: slli a1, a1, 3
+; ZVFH-NEXT: sub sp, sp, a1
+; ZVFH-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; ZVFH-NEXT: fmv.x.h a1, fa0
+; ZVFH-NEXT: csrr a2, vlenb
+; ZVFH-NEXT: vsetvli a3, zero, e16, m8, ta, ma
+; ZVFH-NEXT: vmset.m v24
+; ZVFH-NEXT: vmv.v.x v16, a1
+; ZVFH-NEXT: slli a1, a2, 1
+; ZVFH-NEXT: srli a2, a2, 2
+; ZVFH-NEXT: sub a3, a0, a1
+; ZVFH-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
+; ZVFH-NEXT: vslidedown.vx v0, v24, a2
+; ZVFH-NEXT: sltu a2, a0, a3
+; ZVFH-NEXT: addi a2, a2, -1
+; ZVFH-NEXT: and a2, a2, a3
+; ZVFH-NEXT: addi a3, sp, 16
+; ZVFH-NEXT: vs8r.v v16, (a3) # vscale x 64-byte Folded Spill
+; ZVFH-NEXT: vsetvli zero, a2, e16, m4, ta, ma
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v24, v20, v0.t
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v12, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFH-NEXT: vfadd.vv v16, v16, v24, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v12, v16, v0.t
+; ZVFH-NEXT: bltu a0, a1, .LBB25_2
+; ZVFH-NEXT: # %bb.1:
+; ZVFH-NEXT: mv a0, a1
+; ZVFH-NEXT: .LBB25_2:
+; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v8
+; ZVFH-NEXT: addi a0, sp, 16
+; ZVFH-NEXT: vl8r.v v0, (a0) # vscale x 64-byte Folded Reload
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v24, v0
+; ZVFH-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFH-NEXT: vfadd.vv v16, v16, v24
+; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v16
+; ZVFH-NEXT: csrr a0, vlenb
+; ZVFH-NEXT: slli a0, a0, 3
+; ZVFH-NEXT: add sp, sp, a0
+; ZVFH-NEXT: .cfi_def_cfa sp, 16
+; ZVFH-NEXT: addi sp, sp, 16
+; ZVFH-NEXT: .cfi_def_cfa_offset 0
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: vfadd_vf_nxv32bf16_unmasked:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: addi sp, sp, -16
+; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16
+; ZVFHMIN-NEXT: csrr a1, vlenb
+; ZVFHMIN-NEXT: slli a1, a1, 3
+; ZVFHMIN-NEXT: sub sp, sp, a1
+; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; ZVFHMIN-NEXT: fmv.x.h a1, fa0
+; ZVFHMIN-NEXT: csrr a2, vlenb
+; ZVFHMIN-NEXT: vsetvli a3, zero, e16, m8, ta, ma
+; ZVFHMIN-NEXT: vmset.m v24
+; ZVFHMIN-NEXT: vmv.v.x v16, a1
+; ZVFHMIN-NEXT: slli a1, a2, 1
+; ZVFHMIN-NEXT: srli a2, a2, 2
+; ZVFHMIN-NEXT: sub a3, a0, a1
+; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
+; ZVFHMIN-NEXT: vslidedown.vx v0, v24, a2
+; ZVFHMIN-NEXT: sltu a2, a0, a3
+; ZVFHMIN-NEXT: addi a2, a2, -1
+; ZVFHMIN-NEXT: and a2, a2, a3
+; ZVFHMIN-NEXT: addi a3, sp, 16
+; ZVFHMIN-NEXT: vs8r.v v16, (a3) # vscale x 64-byte Folded Spill
+; ZVFHMIN-NEXT: vsetvli zero, a2, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v24, v20, v0.t
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v12, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfadd.vv v16, v16, v24, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v12, v16, v0.t
+; ZVFHMIN-NEXT: bltu a0, a1, .LBB25_2
+; ZVFHMIN-NEXT: # %bb.1:
+; ZVFHMIN-NEXT: mv a0, a1
+; ZVFHMIN-NEXT: .LBB25_2:
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v8
+; ZVFHMIN-NEXT: addi a0, sp, 16
+; ZVFHMIN-NEXT: vl8r.v v0, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v24, v0
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfadd.vv v16, v16, v24
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v16
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: slli a0, a0, 3
+; ZVFHMIN-NEXT: add sp, sp, a0
+; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
+; ZVFHMIN-NEXT: addi sp, sp, 16
+; ZVFHMIN-NEXT: .cfi_def_cfa_offset 0
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv32bf16_unmasked:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: addi sp, sp, -16
+; ZVFBFA-NEXT: .cfi_def_cfa_offset 16
+; ZVFBFA-NEXT: csrr a1, vlenb
+; ZVFBFA-NEXT: slli a1, a1, 3
+; ZVFBFA-NEXT: sub sp, sp, a1
+; ZVFBFA-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; ZVFBFA-NEXT: fmv.x.h a1, fa0
+; ZVFBFA-NEXT: csrr a2, vlenb
+; ZVFBFA-NEXT: vsetvli a3, zero, e16, m8, ta, ma
+; ZVFBFA-NEXT: vmset.m v24
+; ZVFBFA-NEXT: vmv.v.x v16, a1
+; ZVFBFA-NEXT: slli a1, a2, 1
+; ZVFBFA-NEXT: srli a2, a2, 2
+; ZVFBFA-NEXT: sub a3, a0, a1
+; ZVFBFA-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
+; ZVFBFA-NEXT: vslidedown.vx v0, v24, a2
+; ZVFBFA-NEXT: sltu a2, a0, a3
+; ZVFBFA-NEXT: addi a2, a2, -1
+; ZVFBFA-NEXT: and a2, a2, a3
+; ZVFBFA-NEXT: addi a3, sp, 16
+; ZVFBFA-NEXT: vs8r.v v16, (a3) # vscale x 64-byte Folded Spill
+; ZVFBFA-NEXT: vsetvli zero, a2, e16alt, m4, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v20, v0.t
+; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v12, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v16, v16, v24, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v12, v16, v0.t
+; ZVFBFA-NEXT: bltu a0, a1, .LBB25_2
+; ZVFBFA-NEXT: # %bb.1:
+; ZVFBFA-NEXT: mv a0, a1
+; ZVFBFA-NEXT: .LBB25_2:
+; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v8
+; ZVFBFA-NEXT: addi a0, sp, 16
+; ZVFBFA-NEXT: vl8r.v v0, (a0) # vscale x 64-byte Folded Reload
+; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v0
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v16, v16, v24
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v16
+; ZVFBFA-NEXT: csrr a0, vlenb
+; ZVFBFA-NEXT: slli a0, a0, 3
+; ZVFBFA-NEXT: add sp, sp, a0
+; ZVFBFA-NEXT: .cfi_def_cfa sp, 16
+; ZVFBFA-NEXT: addi sp, sp, 16
+; ZVFBFA-NEXT: .cfi_def_cfa_offset 0
+; ZVFBFA-NEXT: ret
%elt.head = insertelement <vscale x 32 x bfloat> poison, bfloat %b, i32 0
%vb = shufflevector <vscale x 32 x bfloat> %elt.head, <vscale x 32 x bfloat> poison, <vscale x 32 x i32> zeroinitializer
%v = call <vscale x 32 x bfloat> @llvm.vp.fadd.nxv32bf16(<vscale x 32 x bfloat> %va, <vscale x 32 x bfloat> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
@@ -651,6 +1608,17 @@ define <vscale x 1 x half> @vfadd_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9, v0.t
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vv_nxv1f16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v9, v0.t
+; ZVFBFA-NEXT: vfwcvt.f.f.v v9, v8, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v9, v9, v10, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9, v0.t
+; ZVFBFA-NEXT: ret
%v = call <vscale x 1 x half> @llvm.vp.fadd.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %b, <vscale x 1 x i1> %m, i32 %evl)
ret <vscale x 1 x half> %v
}
@@ -672,6 +1640,17 @@ define <vscale x 1 x half> @vfadd_vv_nxv1f16_unmasked(<vscale x 1 x half> %va, <
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vv_nxv1f16_unmasked:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v9
+; ZVFBFA-NEXT: vfwcvt.f.f.v v9, v8
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v9, v9, v10
+; ZVFBFA-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9
+; ZVFBFA-NEXT: ret
%v = call <vscale x 1 x half> @llvm.vp.fadd.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
ret <vscale x 1 x half> %v
}
@@ -695,6 +1674,19 @@ define <vscale x 1 x half> @vfadd_vf_nxv1f16(<vscale x 1 x half> %va, half %b, <
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9, v0.t
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv1f16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: fmv.x.w a1, fa0
+; ZVFBFA-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFBFA-NEXT: vmv.v.x v9, a1
+; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v8, v0.t
+; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v9, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v9, v10, v8, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9, v0.t
+; ZVFBFA-NEXT: ret
%elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
%v = call <vscale x 1 x half> @llvm.vp.fadd.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> %m, i32 %evl)
@@ -720,6 +1712,19 @@ define <vscale x 1 x half> @vfadd_vf_nxv1f16_commute(<vscale x 1 x half> %va, ha
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9, v0.t
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv1f16_commute:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: fmv.x.w a1, fa0
+; ZVFBFA-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFBFA-NEXT: vmv.v.x v9, a1
+; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v8, v0.t
+; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v9, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v9, v8, v10, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9, v0.t
+; ZVFBFA-NEXT: ret
%elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
%v = call <vscale x 1 x half> @llvm.vp.fadd.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x half> %va, <vscale x 1 x i1> %m, i32 %evl)
@@ -745,6 +1750,19 @@ define <vscale x 1 x half> @vfadd_vf_nxv1f16_unmasked(<vscale x 1 x half> %va, h
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv1f16_unmasked:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: fmv.x.w a1, fa0
+; ZVFBFA-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFBFA-NEXT: vmv.v.x v9, a1
+; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v8
+; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v9
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v9, v10, v8
+; ZVFBFA-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9
+; ZVFBFA-NEXT: ret
%elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
%v = call <vscale x 1 x half> @llvm.vp.fadd.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
@@ -770,6 +1788,19 @@ define <vscale x 1 x half> @vfadd_vf_nxv1f16_unmasked_commute(<vscale x 1 x half
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv1f16_unmasked_commute:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: fmv.x.w a1, fa0
+; ZVFBFA-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFBFA-NEXT: vmv.v.x v9, a1
+; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v8
+; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v9
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v9, v8, v10
+; ZVFBFA-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9
+; ZVFBFA-NEXT: ret
%elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
%v = call <vscale x 1 x half> @llvm.vp.fadd.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x half> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
@@ -795,6 +1826,17 @@ define <vscale x 2 x half> @vfadd_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9, v0.t
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vv_nxv2f16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v9, v0.t
+; ZVFBFA-NEXT: vfwcvt.f.f.v v9, v8, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v9, v9, v10, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9, v0.t
+; ZVFBFA-NEXT: ret
%v = call <vscale x 2 x half> @llvm.vp.fadd.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %b, <vscale x 2 x i1> %m, i32 %evl)
ret <vscale x 2 x half> %v
}
@@ -816,6 +1858,17 @@ define <vscale x 2 x half> @vfadd_vv_nxv2f16_unmasked(<vscale x 2 x half> %va, <
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vv_nxv2f16_unmasked:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v9
+; ZVFBFA-NEXT: vfwcvt.f.f.v v9, v8
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v9, v9, v10
+; ZVFBFA-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9
+; ZVFBFA-NEXT: ret
%v = call <vscale x 2 x half> @llvm.vp.fadd.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
ret <vscale x 2 x half> %v
}
@@ -839,6 +1892,19 @@ define <vscale x 2 x half> @vfadd_vf_nxv2f16(<vscale x 2 x half> %va, half %b, <
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9, v0.t
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv2f16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: fmv.x.w a1, fa0
+; ZVFBFA-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFBFA-NEXT: vmv.v.x v9, a1
+; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v8, v0.t
+; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v9, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v9, v10, v8, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9, v0.t
+; ZVFBFA-NEXT: ret
%elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
%v = call <vscale x 2 x half> @llvm.vp.fadd.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> %m, i32 %evl)
@@ -864,6 +1930,19 @@ define <vscale x 2 x half> @vfadd_vf_nxv2f16_unmasked(<vscale x 2 x half> %va, h
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv2f16_unmasked:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: fmv.x.w a1, fa0
+; ZVFBFA-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFBFA-NEXT: vmv.v.x v9, a1
+; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v8
+; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v9
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v9, v10, v8
+; ZVFBFA-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9
+; ZVFBFA-NEXT: ret
%elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
%v = call <vscale x 2 x half> @llvm.vp.fadd.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
@@ -889,6 +1968,17 @@ define <vscale x 4 x half> @vfadd_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10, v0.t
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vv_nxv4f16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v9, v0.t
+; ZVFBFA-NEXT: vfwcvt.f.f.v v12, v8, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v10, v12, v10, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v10, v0.t
+; ZVFBFA-NEXT: ret
%v = call <vscale x 4 x half> @llvm.vp.fadd.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %b, <vscale x 4 x i1> %m, i32 %evl)
ret <vscale x 4 x half> %v
}
@@ -910,6 +2000,17 @@ define <vscale x 4 x half> @vfadd_vv_nxv4f16_unmasked(<vscale x 4 x half> %va, <
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vv_nxv4f16_unmasked:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v9
+; ZVFBFA-NEXT: vfwcvt.f.f.v v12, v8
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v10, v12, v10
+; ZVFBFA-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v10
+; ZVFBFA-NEXT: ret
%v = call <vscale x 4 x half> @llvm.vp.fadd.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
ret <vscale x 4 x half> %v
}
@@ -933,6 +2034,19 @@ define <vscale x 4 x half> @vfadd_vf_nxv4f16(<vscale x 4 x half> %va, half %b, <
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10, v0.t
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv4f16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: fmv.x.w a1, fa0
+; ZVFBFA-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; ZVFBFA-NEXT: vmv.v.x v12, a1
+; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v8, v0.t
+; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v12, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v10, v10, v8, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v10, v0.t
+; ZVFBFA-NEXT: ret
%elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
%v = call <vscale x 4 x half> @llvm.vp.fadd.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x i1> %m, i32 %evl)
@@ -958,6 +2072,19 @@ define <vscale x 4 x half> @vfadd_vf_nxv4f16_unmasked(<vscale x 4 x half> %va, h
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv4f16_unmasked:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: fmv.x.w a1, fa0
+; ZVFBFA-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; ZVFBFA-NEXT: vmv.v.x v12, a1
+; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v8
+; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v12
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v10, v10, v8
+; ZVFBFA-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v10
+; ZVFBFA-NEXT: ret
%elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
%v = call <vscale x 4 x half> @llvm.vp.fadd.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
@@ -983,6 +2110,17 @@ define <vscale x 8 x half> @vfadd_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12, v0.t
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vv_nxv8f16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v12, v10, v0.t
+; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v8, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v12, v16, v12, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v12, v0.t
+; ZVFBFA-NEXT: ret
%v = call <vscale x 8 x half> @llvm.vp.fadd.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %b, <vscale x 8 x i1> %m, i32 %evl)
ret <vscale x 8 x half> %v
}
@@ -1004,6 +2142,17 @@ define <vscale x 8 x half> @vfadd_vv_nxv8f16_unmasked(<vscale x 8 x half> %va, <
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vv_nxv8f16_unmasked:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v12, v10
+; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v8
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v12, v16, v12
+; ZVFBFA-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v12
+; ZVFBFA-NEXT: ret
%v = call <vscale x 8 x half> @llvm.vp.fadd.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
ret <vscale x 8 x half> %v
}
@@ -1027,6 +2176,19 @@ define <vscale x 8 x half> @vfadd_vf_nxv8f16(<vscale x 8 x half> %va, half %b, <
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12, v0.t
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv8f16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: fmv.x.w a1, fa0
+; ZVFBFA-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; ZVFBFA-NEXT: vmv.v.x v16, a1
+; ZVFBFA-NEXT: vfwcvt.f.f.v v12, v8, v0.t
+; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v16, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v12, v12, v8, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v12, v0.t
+; ZVFBFA-NEXT: ret
%elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
%v = call <vscale x 8 x half> @llvm.vp.fadd.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x i1> %m, i32 %evl)
@@ -1052,6 +2214,19 @@ define <vscale x 8 x half> @vfadd_vf_nxv8f16_unmasked(<vscale x 8 x half> %va, h
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv8f16_unmasked:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: fmv.x.w a1, fa0
+; ZVFBFA-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; ZVFBFA-NEXT: vmv.v.x v16, a1
+; ZVFBFA-NEXT: vfwcvt.f.f.v v12, v8
+; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v16
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v12, v12, v8
+; ZVFBFA-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v12
+; ZVFBFA-NEXT: ret
%elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
%v = call <vscale x 8 x half> @llvm.vp.fadd.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
@@ -1077,6 +2252,17 @@ define <vscale x 16 x half> @vfadd_vv_nxv16f16(<vscale x 16 x half> %va, <vscale
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16, v0.t
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vv_nxv16f16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v12, v0.t
+; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v8, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v16, v24, v16, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v16, v0.t
+; ZVFBFA-NEXT: ret
%v = call <vscale x 16 x half> @llvm.vp.fadd.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %b, <vscale x 16 x i1> %m, i32 %evl)
ret <vscale x 16 x half> %v
}
@@ -1098,6 +2284,17 @@ define <vscale x 16 x half> @vfadd_vv_nxv16f16_unmasked(<vscale x 16 x half> %va
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vv_nxv16f16_unmasked:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v12
+; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v8
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v16, v24, v16
+; ZVFBFA-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v16
+; ZVFBFA-NEXT: ret
%v = call <vscale x 16 x half> @llvm.vp.fadd.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
ret <vscale x 16 x half> %v
}
@@ -1121,6 +2318,19 @@ define <vscale x 16 x half> @vfadd_vf_nxv16f16(<vscale x 16 x half> %va, half %b
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16, v0.t
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv16f16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: fmv.x.w a1, fa0
+; ZVFBFA-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; ZVFBFA-NEXT: vmv.v.x v24, a1
+; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v8, v0.t
+; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v24, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v16, v16, v8, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v16, v0.t
+; ZVFBFA-NEXT: ret
%elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
%v = call <vscale x 16 x half> @llvm.vp.fadd.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> %m, i32 %evl)
@@ -1146,6 +2356,19 @@ define <vscale x 16 x half> @vfadd_vf_nxv16f16_unmasked(<vscale x 16 x half> %va
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv16f16_unmasked:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: fmv.x.w a1, fa0
+; ZVFBFA-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; ZVFBFA-NEXT: vmv.v.x v24, a1
+; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v8
+; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v24
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v16, v16, v8
+; ZVFBFA-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v16
+; ZVFBFA-NEXT: ret
%elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
%v = call <vscale x 16 x half> @llvm.vp.fadd.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
@@ -1209,6 +2432,55 @@ define <vscale x 32 x half> @vfadd_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: .cfi_def_cfa_offset 0
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vv_nxv32f16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: addi sp, sp, -16
+; ZVFBFA-NEXT: .cfi_def_cfa_offset 16
+; ZVFBFA-NEXT: csrr a1, vlenb
+; ZVFBFA-NEXT: slli a1, a1, 3
+; ZVFBFA-NEXT: sub sp, sp, a1
+; ZVFBFA-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; ZVFBFA-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
+; ZVFBFA-NEXT: vmv1r.v v7, v0
+; ZVFBFA-NEXT: csrr a2, vlenb
+; ZVFBFA-NEXT: slli a1, a2, 1
+; ZVFBFA-NEXT: srli a2, a2, 2
+; ZVFBFA-NEXT: sub a3, a0, a1
+; ZVFBFA-NEXT: vslidedown.vx v0, v0, a2
+; ZVFBFA-NEXT: sltu a2, a0, a3
+; ZVFBFA-NEXT: addi a2, a2, -1
+; ZVFBFA-NEXT: and a2, a2, a3
+; ZVFBFA-NEXT: addi a3, sp, 16
+; ZVFBFA-NEXT: vs8r.v v16, (a3) # vscale x 64-byte Folded Spill
+; ZVFBFA-NEXT: vsetvli zero, a2, e16, m4, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v20, v0.t
+; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v12, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v16, v16, v24, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v12, v16, v0.t
+; ZVFBFA-NEXT: bltu a0, a1, .LBB48_2
+; ZVFBFA-NEXT: # %bb.1:
+; ZVFBFA-NEXT: mv a0, a1
+; ZVFBFA-NEXT: .LBB48_2:
+; ZVFBFA-NEXT: vmv1r.v v0, v7
+; ZVFBFA-NEXT: addi a1, sp, 16
+; ZVFBFA-NEXT: vl8r.v v24, (a1) # vscale x 64-byte Folded Reload
+; ZVFBFA-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v24, v0.t
+; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v8, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v16, v24, v16, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v16, v0.t
+; ZVFBFA-NEXT: csrr a0, vlenb
+; ZVFBFA-NEXT: slli a0, a0, 3
+; ZVFBFA-NEXT: add sp, sp, a0
+; ZVFBFA-NEXT: .cfi_def_cfa sp, 16
+; ZVFBFA-NEXT: addi sp, sp, 16
+; ZVFBFA-NEXT: .cfi_def_cfa_offset 0
+; ZVFBFA-NEXT: ret
%v = call <vscale x 32 x half> @llvm.vp.fadd.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x i1> %m, i32 %evl)
ret <vscale x 32 x half> %v
}
@@ -1268,6 +2540,55 @@ define <vscale x 32 x half> @vfadd_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: .cfi_def_cfa_offset 0
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vv_nxv32f16_unmasked:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: addi sp, sp, -16
+; ZVFBFA-NEXT: .cfi_def_cfa_offset 16
+; ZVFBFA-NEXT: csrr a1, vlenb
+; ZVFBFA-NEXT: slli a1, a1, 3
+; ZVFBFA-NEXT: sub sp, sp, a1
+; ZVFBFA-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; ZVFBFA-NEXT: csrr a2, vlenb
+; ZVFBFA-NEXT: vsetvli a1, zero, e8, m4, ta, ma
+; ZVFBFA-NEXT: vmset.m v24
+; ZVFBFA-NEXT: slli a1, a2, 1
+; ZVFBFA-NEXT: srli a2, a2, 2
+; ZVFBFA-NEXT: sub a3, a0, a1
+; ZVFBFA-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
+; ZVFBFA-NEXT: vslidedown.vx v0, v24, a2
+; ZVFBFA-NEXT: sltu a2, a0, a3
+; ZVFBFA-NEXT: addi a2, a2, -1
+; ZVFBFA-NEXT: and a2, a2, a3
+; ZVFBFA-NEXT: addi a3, sp, 16
+; ZVFBFA-NEXT: vs8r.v v16, (a3) # vscale x 64-byte Folded Spill
+; ZVFBFA-NEXT: vsetvli zero, a2, e16, m4, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v20, v0.t
+; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v12, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v16, v16, v24, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v12, v16, v0.t
+; ZVFBFA-NEXT: bltu a0, a1, .LBB49_2
+; ZVFBFA-NEXT: # %bb.1:
+; ZVFBFA-NEXT: mv a0, a1
+; ZVFBFA-NEXT: .LBB49_2:
+; ZVFBFA-NEXT: addi a1, sp, 16
+; ZVFBFA-NEXT: vl8r.v v24, (a1) # vscale x 64-byte Folded Reload
+; ZVFBFA-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v24
+; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v8
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v16, v24, v16
+; ZVFBFA-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v16
+; ZVFBFA-NEXT: csrr a0, vlenb
+; ZVFBFA-NEXT: slli a0, a0, 3
+; ZVFBFA-NEXT: add sp, sp, a0
+; ZVFBFA-NEXT: .cfi_def_cfa sp, 16
+; ZVFBFA-NEXT: addi sp, sp, 16
+; ZVFBFA-NEXT: .cfi_def_cfa_offset 0
+; ZVFBFA-NEXT: ret
%v = call <vscale x 32 x half> @llvm.vp.fadd.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
ret <vscale x 32 x half> %v
}
@@ -1340,6 +2661,68 @@ define <vscale x 32 x half> @vfadd_vf_nxv32f16(<vscale x 32 x half> %va, half %b
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: .cfi_def_cfa_offset 0
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv32f16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: addi sp, sp, -16
+; ZVFBFA-NEXT: .cfi_def_cfa_offset 16
+; ZVFBFA-NEXT: csrr a1, vlenb
+; ZVFBFA-NEXT: slli a1, a1, 4
+; ZVFBFA-NEXT: sub sp, sp, a1
+; ZVFBFA-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
+; ZVFBFA-NEXT: vsetvli a1, zero, e16, m8, ta, ma
+; ZVFBFA-NEXT: vmv1r.v v7, v0
+; ZVFBFA-NEXT: fmv.x.w a1, fa0
+; ZVFBFA-NEXT: csrr a2, vlenb
+; ZVFBFA-NEXT: vmv.v.x v24, a1
+; ZVFBFA-NEXT: slli a1, a2, 1
+; ZVFBFA-NEXT: srli a2, a2, 2
+; ZVFBFA-NEXT: sub a3, a0, a1
+; ZVFBFA-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
+; ZVFBFA-NEXT: vslidedown.vx v0, v0, a2
+; ZVFBFA-NEXT: sltu a2, a0, a3
+; ZVFBFA-NEXT: addi a2, a2, -1
+; ZVFBFA-NEXT: and a2, a2, a3
+; ZVFBFA-NEXT: csrr a3, vlenb
+; ZVFBFA-NEXT: slli a3, a3, 3
+; ZVFBFA-NEXT: add a3, sp, a3
+; ZVFBFA-NEXT: addi a3, a3, 16
+; ZVFBFA-NEXT: vs8r.v v24, (a3) # vscale x 64-byte Folded Spill
+; ZVFBFA-NEXT: vsetvli zero, a2, e16, m4, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v28, v0.t
+; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v12, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v16, v24, v16, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v12, v16, v0.t
+; ZVFBFA-NEXT: bltu a0, a1, .LBB50_2
+; ZVFBFA-NEXT: # %bb.1:
+; ZVFBFA-NEXT: mv a0, a1
+; ZVFBFA-NEXT: .LBB50_2:
+; ZVFBFA-NEXT: vmv1r.v v0, v7
+; ZVFBFA-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v8, v0.t
+; ZVFBFA-NEXT: addi a0, sp, 16
+; ZVFBFA-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill
+; ZVFBFA-NEXT: csrr a0, vlenb
+; ZVFBFA-NEXT: slli a0, a0, 3
+; ZVFBFA-NEXT: add a0, sp, a0
+; ZVFBFA-NEXT: addi a0, a0, 16
+; ZVFBFA-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v16, v0.t
+; ZVFBFA-NEXT: addi a0, sp, 16
+; ZVFBFA-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v16, v16, v24, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v16, v0.t
+; ZVFBFA-NEXT: csrr a0, vlenb
+; ZVFBFA-NEXT: slli a0, a0, 4
+; ZVFBFA-NEXT: add sp, sp, a0
+; ZVFBFA-NEXT: .cfi_def_cfa sp, 16
+; ZVFBFA-NEXT: addi sp, sp, 16
+; ZVFBFA-NEXT: .cfi_def_cfa_offset 0
+; ZVFBFA-NEXT: ret
%elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
%v = call <vscale x 32 x half> @llvm.vp.fadd.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> %m, i32 %evl)
@@ -1403,6 +2786,57 @@ define <vscale x 32 x half> @vfadd_vf_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: .cfi_def_cfa_offset 0
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv32f16_unmasked:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: addi sp, sp, -16
+; ZVFBFA-NEXT: .cfi_def_cfa_offset 16
+; ZVFBFA-NEXT: csrr a1, vlenb
+; ZVFBFA-NEXT: slli a1, a1, 3
+; ZVFBFA-NEXT: sub sp, sp, a1
+; ZVFBFA-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; ZVFBFA-NEXT: fmv.x.w a1, fa0
+; ZVFBFA-NEXT: csrr a2, vlenb
+; ZVFBFA-NEXT: vsetvli a3, zero, e16, m8, ta, ma
+; ZVFBFA-NEXT: vmset.m v24
+; ZVFBFA-NEXT: vmv.v.x v16, a1
+; ZVFBFA-NEXT: slli a1, a2, 1
+; ZVFBFA-NEXT: srli a2, a2, 2
+; ZVFBFA-NEXT: sub a3, a0, a1
+; ZVFBFA-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
+; ZVFBFA-NEXT: vslidedown.vx v0, v24, a2
+; ZVFBFA-NEXT: sltu a2, a0, a3
+; ZVFBFA-NEXT: addi a2, a2, -1
+; ZVFBFA-NEXT: and a2, a2, a3
+; ZVFBFA-NEXT: addi a3, sp, 16
+; ZVFBFA-NEXT: vs8r.v v16, (a3) # vscale x 64-byte Folded Spill
+; ZVFBFA-NEXT: vsetvli zero, a2, e16, m4, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v20, v0.t
+; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v12, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v16, v16, v24, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v12, v16, v0.t
+; ZVFBFA-NEXT: bltu a0, a1, .LBB51_2
+; ZVFBFA-NEXT: # %bb.1:
+; ZVFBFA-NEXT: mv a0, a1
+; ZVFBFA-NEXT: .LBB51_2:
+; ZVFBFA-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v8
+; ZVFBFA-NEXT: addi a0, sp, 16
+; ZVFBFA-NEXT: vl8r.v v0, (a0) # vscale x 64-byte Folded Reload
+; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v0
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v16, v16, v24
+; ZVFBFA-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v16
+; ZVFBFA-NEXT: csrr a0, vlenb
+; ZVFBFA-NEXT: slli a0, a0, 3
+; ZVFBFA-NEXT: add sp, sp, a0
+; ZVFBFA-NEXT: .cfi_def_cfa sp, 16
+; ZVFBFA-NEXT: addi sp, sp, 16
+; ZVFBFA-NEXT: .cfi_def_cfa_offset 0
+; ZVFBFA-NEXT: ret
%elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
%v = call <vscale x 32 x half> @llvm.vp.fadd.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
diff --git a/llvm/test/Transforms/ADCE/2016-09-06.ll b/llvm/test/Transforms/ADCE/2016-09-06.ll
index 850f412..1329ac6 100644
--- a/llvm/test/Transforms/ADCE/2016-09-06.ll
+++ b/llvm/test/Transforms/ADCE/2016-09-06.ll
@@ -5,7 +5,7 @@ target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
; Function Attrs: nounwind uwtable
-define i32 @foo(i32, i32, i32) #0 {
+define i32 @foo(i32, i32, i32) {
%4 = alloca i32, align 4
%5 = alloca i32, align 4
%6 = alloca i32, align 4
@@ -48,8 +48,6 @@ B21:
ret i32 %I22
}
-attributes #0 = { nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-
!llvm.ident = !{!0}
!0 = !{!"clang version 4.0.0"}
diff --git a/llvm/test/Transforms/ADCE/blocks-with-dead-term-nondeterministic.ll b/llvm/test/Transforms/ADCE/blocks-with-dead-term-nondeterministic.ll
index 9708be9..5e844b4 100644
--- a/llvm/test/Transforms/ADCE/blocks-with-dead-term-nondeterministic.ll
+++ b/llvm/test/Transforms/ADCE/blocks-with-dead-term-nondeterministic.ll
@@ -5,7 +5,7 @@ target triple = "x86_64-apple-macosx10.10.0"
; CHECK: uselistorder label %bb16, { 1, 0 }
; Function Attrs: noinline nounwind ssp uwtable
-define void @ham(i1 %arg) local_unnamed_addr #0 {
+define void @ham(i1 %arg) local_unnamed_addr {
bb:
br i1 false, label %bb1, label %bb22
@@ -64,8 +64,6 @@ bb22: ; preds = %bb21, %bb
ret void
}
-attributes #0 = { noinline nounwind ssp uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="core2" "target-features"="+cx16,+fxsr,+mmx,+sse,+sse2,+sse3,+ssse3,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-
!llvm.module.flags = !{!0}
!0 = !{i32 7, !"PIC Level", i32 2}
diff --git a/llvm/test/Transforms/AddDiscriminators/basic.ll b/llvm/test/Transforms/AddDiscriminators/basic.ll
index 5186537..fc4c10a 100644
--- a/llvm/test/Transforms/AddDiscriminators/basic.ll
+++ b/llvm/test/Transforms/AddDiscriminators/basic.ll
@@ -11,7 +11,7 @@
; if (i < 10) x = i;
; }
-define void @foo(i32 %i) #0 !dbg !4 {
+define void @foo(i32 %i) !dbg !4 {
entry:
%i.addr = alloca i32, align 4
%x = alloca i32, align 4
@@ -35,8 +35,6 @@ if.end: ; preds = %if.then, %entry
; CHECK: ret void, !dbg ![[END:[0-9]+]]
}
-attributes #0 = { nounwind uwtable noinline optnone "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
-
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!7, !8}
!llvm.ident = !{!9}
diff --git a/llvm/test/Transforms/AddDiscriminators/call-nested.ll b/llvm/test/Transforms/AddDiscriminators/call-nested.ll
index 99340a5..f1373e4 100644
--- a/llvm/test/Transforms/AddDiscriminators/call-nested.ll
+++ b/llvm/test/Transforms/AddDiscriminators/call-nested.ll
@@ -9,7 +9,7 @@
; #6 }
; Function Attrs: uwtable
-define i32 @_Z3bazv() #0 !dbg !4 {
+define i32 @_Z3bazv() !dbg !4 {
%1 = call i32 @_Z3barv(), !dbg !11
; CHECK: %1 = call i32 @_Z3barv(), !dbg ![[CALL0:[0-9]+]]
%2 = call i32 @_Z3barv(), !dbg !12
@@ -19,12 +19,9 @@ define i32 @_Z3bazv() #0 !dbg !4 {
ret i32 %3, !dbg !14
}
-declare i32 @_Z3fooii(i32, i32) #1
+declare i32 @_Z3fooii(i32, i32)
-declare i32 @_Z3barv() #1
-
-attributes #0 = { uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
+declare i32 @_Z3barv()
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!8, !9}
diff --git a/llvm/test/Transforms/AddDiscriminators/call.ll b/llvm/test/Transforms/AddDiscriminators/call.ll
index 93d3aa4..11b21ef 100644
--- a/llvm/test/Transforms/AddDiscriminators/call.ll
+++ b/llvm/test/Transforms/AddDiscriminators/call.ll
@@ -8,7 +8,7 @@
; #5 }
; Function Attrs: uwtable
-define void @_Z3foov() #0 !dbg !4 {
+define void @_Z3foov() !dbg !4 {
call void @_Z3barv(), !dbg !10
; CHECK: call void @_Z3barv(), !dbg ![[CALL0:[0-9]+]]
%a = alloca [100 x i8], align 16
@@ -21,13 +21,10 @@ define void @_Z3foov() #0 !dbg !4 {
ret void, !dbg !13
}
-declare void @_Z3barv() #1
+declare void @_Z3barv()
declare void @llvm.lifetime.start.p0(ptr nocapture) nounwind argmemonly
declare void @llvm.lifetime.end.p0(ptr nocapture) nounwind argmemonly
-attributes #0 = { uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2" "unsafe-fp-math"="false" "use-soft-float"="false" }
-
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!7, !8}
!llvm.ident = !{!9}
diff --git a/llvm/test/Transforms/AddDiscriminators/diamond.ll b/llvm/test/Transforms/AddDiscriminators/diamond.ll
index c93a57a..9edcf39 100644
--- a/llvm/test/Transforms/AddDiscriminators/diamond.ll
+++ b/llvm/test/Transforms/AddDiscriminators/diamond.ll
@@ -12,7 +12,7 @@
; bar(3): discriminator 2
; Function Attrs: uwtable
-define void @_Z3fooi(i32 %i) #0 !dbg !4 {
+define void @_Z3fooi(i32 %i) !dbg !4 {
%1 = alloca i32, align 4
store i32 %i, ptr %1, align 4
call void @llvm.dbg.declare(metadata ptr %1, metadata !11, metadata !12), !dbg !13
@@ -34,13 +34,9 @@ define void @_Z3fooi(i32 %i) #0 !dbg !4 {
}
; Function Attrs: nounwind readnone
-declare void @llvm.dbg.declare(metadata, metadata, metadata) #1
+declare void @llvm.dbg.declare(metadata, metadata, metadata)
-declare void @_Z3bari(i32) #2
-
-attributes #0 = { uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { nounwind readnone }
-attributes #2 = { "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2" "unsafe-fp-math"="false" "use-soft-float"="false" }
+declare void @_Z3bari(i32)
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!8, !9}
diff --git a/llvm/test/Transforms/AddDiscriminators/first-only.ll b/llvm/test/Transforms/AddDiscriminators/first-only.ll
index 7ae9ed0..415e5f0 100644
--- a/llvm/test/Transforms/AddDiscriminators/first-only.ll
+++ b/llvm/test/Transforms/AddDiscriminators/first-only.ll
@@ -13,7 +13,7 @@
; }
; }
-define void @foo(i32 %i) #0 !dbg !4 {
+define void @foo(i32 %i) !dbg !4 {
entry:
%i.addr = alloca i32, align 4
%x = alloca i32, align 4
@@ -44,8 +44,6 @@ if.end: ; preds = %if.then, %entry
; CHECK: ret void, !dbg ![[END:[0-9]+]]
}
-attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
-
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!7, !8}
!llvm.ident = !{!9}
diff --git a/llvm/test/Transforms/AddDiscriminators/invoke.ll b/llvm/test/Transforms/AddDiscriminators/invoke.ll
index d39014d..a3989b6 100644
--- a/llvm/test/Transforms/AddDiscriminators/invoke.ll
+++ b/llvm/test/Transforms/AddDiscriminators/invoke.ll
@@ -5,14 +5,14 @@ target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.14.0"
; Function Attrs: ssp uwtable
-define void @_Z3foov() #0 personality ptr @__gxx_personality_v0 !dbg !8 {
+define void @_Z3foov() personality ptr @__gxx_personality_v0 !dbg !8 {
entry:
%exn.slot = alloca ptr
%ehselector.slot = alloca i32
; CHECK: call void @_Z12bar_noexceptv({{.*}} !dbg ![[CALL1:[0-9]+]]
- call void @_Z12bar_noexceptv() #4, !dbg !11
+ call void @_Z12bar_noexceptv(), !dbg !11
; CHECK: call void @_Z12bar_noexceptv({{.*}} !dbg ![[CALL2:[0-9]+]]
- call void @_Z12bar_noexceptv() #4, !dbg !13
+ call void @_Z12bar_noexceptv(), !dbg !13
invoke void @_Z3barv()
; CHECK: unwind label {{.*}} !dbg ![[INVOKE:[0-9]+]]
to label %invoke.cont unwind label %lpad, !dbg !14
@@ -31,8 +31,8 @@ lpad: ; preds = %entry
catch: ; preds = %lpad
%exn = load ptr, ptr %exn.slot, align 8, !dbg !15
- %3 = call ptr @__cxa_begin_catch(ptr %exn) #4, !dbg !15
- invoke void @__cxa_rethrow() #5
+ %3 = call ptr @__cxa_begin_catch(ptr %exn), !dbg !15
+ invoke void @__cxa_rethrow()
to label %unreachable unwind label %lpad1, !dbg !17
lpad1: ; preds = %catch
@@ -62,7 +62,7 @@ terminate.lpad: ; preds = %lpad1
%7 = landingpad { ptr, i32 }
catch ptr null, !dbg !20
%8 = extractvalue { ptr, i32 } %7, 0, !dbg !20
- call void @__clang_call_terminate(ptr %8) #6, !dbg !20
+ call void @__clang_call_terminate(ptr %8), !dbg !20
unreachable, !dbg !20
unreachable: ; preds = %catch
@@ -70,9 +70,9 @@ unreachable: ; preds = %catch
}
; Function Attrs: nounwind
-declare void @_Z12bar_noexceptv() #1
+declare void @_Z12bar_noexceptv()
-declare void @_Z3barv() #2
+declare void @_Z3barv()
declare i32 @__gxx_personality_v0(...)
@@ -83,22 +83,14 @@ declare void @__cxa_rethrow()
declare void @__cxa_end_catch()
; Function Attrs: noinline noreturn nounwind
-define linkonce_odr hidden void @__clang_call_terminate(ptr) #3 {
- %2 = call ptr @__cxa_begin_catch(ptr %0) #4
- call void @_ZSt9terminatev() #6
+define linkonce_odr hidden void @__clang_call_terminate(ptr) {
+ %2 = call ptr @__cxa_begin_catch(ptr %0)
+ call void @_ZSt9terminatev()
unreachable
}
declare void @_ZSt9terminatev()
-attributes #0 = { ssp uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="penryn" "target-features"="+cx16,+fxsr,+mmx,+sahf,+sse,+sse2,+sse3,+sse4.1,+ssse3,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="penryn" "target-features"="+cx16,+fxsr,+mmx,+sahf,+sse,+sse2,+sse3,+sse4.1,+ssse3,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #2 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="penryn" "target-features"="+cx16,+fxsr,+mmx,+sahf,+sse,+sse2,+sse3,+sse4.1,+ssse3,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #3 = { noinline noreturn nounwind }
-attributes #4 = { nounwind }
-attributes #5 = { noreturn }
-attributes #6 = { noreturn nounwind }
-
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!3, !4, !5, !6}
!llvm.ident = !{!7}
diff --git a/llvm/test/Transforms/AddDiscriminators/multiple.ll b/llvm/test/Transforms/AddDiscriminators/multiple.ll
index 54c1a5d..8e8ca6a 100644
--- a/llvm/test/Transforms/AddDiscriminators/multiple.ll
+++ b/llvm/test/Transforms/AddDiscriminators/multiple.ll
@@ -10,7 +10,7 @@
; The two stores inside the if-then-else line must have different discriminator
; values.
-define void @foo(i32 %i) #0 !dbg !4 {
+define void @foo(i32 %i) !dbg !4 {
entry:
%i.addr = alloca i32, align 4
%x = alloca i32, align 4
@@ -45,8 +45,6 @@ if.end: ; preds = %if.else, %if.then
ret void, !dbg !12
}
-attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
-
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!7, !8}
!llvm.ident = !{!9}
diff --git a/llvm/test/Transforms/AddDiscriminators/no-discriminators.ll b/llvm/test/Transforms/AddDiscriminators/no-discriminators.ll
index c23edd6..f84579b 100644
--- a/llvm/test/Transforms/AddDiscriminators/no-discriminators.ll
+++ b/llvm/test/Transforms/AddDiscriminators/no-discriminators.ll
@@ -12,7 +12,7 @@
; altered. If they are, it means that the discriminators pass added a
; new lexical scope.
-define i32 @foo(i64 %i) #0 !dbg !4 {
+define i32 @foo(i64 %i) !dbg !4 {
entry:
%retval = alloca i32, align 4
%i.addr = alloca i64, align 8
@@ -39,10 +39,7 @@ return: ; preds = %if.else, %if.then
}
; Function Attrs: nounwind readnone
-declare void @llvm.dbg.declare(metadata, metadata, metadata) #1
-
-attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { nounwind readnone }
+declare void @llvm.dbg.declare(metadata, metadata, metadata)
; We should be able to add discriminators even in the absence of llvm.dbg.cu.
; When using sample profiles, the front end will generate line tables but it
diff --git a/llvm/test/Transforms/AddDiscriminators/oneline.ll b/llvm/test/Transforms/AddDiscriminators/oneline.ll
index 533d547..fc1675b 100644
--- a/llvm/test/Transforms/AddDiscriminators/oneline.ll
+++ b/llvm/test/Transforms/AddDiscriminators/oneline.ll
@@ -10,7 +10,7 @@
; return 100: discriminator 4
; return 99: discriminator 6
-define i32 @_Z3fooi(i32 %i) #0 !dbg !4 {
+define i32 @_Z3fooi(i32 %i) !dbg !4 {
%1 = alloca i32, align 4
%2 = alloca i32, align 4
store i32 %i, ptr %2, align 4, !tbaa !13
@@ -49,10 +49,7 @@ define i32 @_Z3fooi(i32 %i) #0 !dbg !4 {
}
; Function Attrs: nounwind readnone
-declare void @llvm.dbg.declare(metadata, metadata, metadata) #1
-
-attributes #0 = { nounwind uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { nounwind readnone }
+declare void @llvm.dbg.declare(metadata, metadata, metadata)
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!10, !11}
diff --git a/llvm/test/Transforms/Attributor/reduced/register_benchmark_test.ll b/llvm/test/Transforms/Attributor/reduced/register_benchmark_test.ll
index eb7d78f..4704238 100644
--- a/llvm/test/Transforms/Attributor/reduced/register_benchmark_test.ll
+++ b/llvm/test/Transforms/Attributor/reduced/register_benchmark_test.ll
@@ -1557,24 +1557,24 @@ declare dso_local void @_GLOBAL__sub_I_register_benchmark_test.cc() #0 section "
; Function Attrs: cold noreturn nounwind
declare void @llvm.trap() #20
-attributes #0 = { uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #2 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-jump-tables"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "use-soft-float"="false" }
+attributes #1 = { "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "use-soft-float"="false" }
+attributes #2 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "use-soft-float"="false" }
attributes #3 = { nounwind }
-attributes #4 = { nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #4 = { nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-jump-tables"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "use-soft-float"="false" }
attributes #5 = { argmemonly nounwind willreturn }
-attributes #6 = { alwaysinline uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #7 = { alwaysinline nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #8 = { nobuiltin "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #9 = { nobuiltin nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #10 = { inlinehint uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #11 = { inlinehint nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #12 = { noreturn nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #13 = { norecurse uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #6 = { alwaysinline uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-jump-tables"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "use-soft-float"="false" }
+attributes #7 = { alwaysinline nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-jump-tables"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "use-soft-float"="false" }
+attributes #8 = { nobuiltin "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "use-soft-float"="false" }
+attributes #9 = { nobuiltin nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "use-soft-float"="false" }
+attributes #10 = { inlinehint uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-jump-tables"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "use-soft-float"="false" }
+attributes #11 = { inlinehint nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-jump-tables"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "use-soft-float"="false" }
+attributes #12 = { noreturn nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "use-soft-float"="false" }
+attributes #13 = { norecurse uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-jump-tables"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "use-soft-float"="false" }
attributes #14 = { nounwind readnone willreturn }
-attributes #15 = { noreturn "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #15 = { noreturn "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "use-soft-float"="false" }
attributes #16 = { noinline noreturn nounwind }
attributes #17 = { argmemonly nounwind willreturn writeonly }
-attributes #18 = { noreturn uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #19 = { inlinehint noreturn uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #18 = { noreturn uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-jump-tables"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "use-soft-float"="false" }
+attributes #19 = { inlinehint noreturn uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-jump-tables"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "use-soft-float"="false" }
attributes #20 = { cold noreturn nounwind }
diff --git a/llvm/test/Transforms/CodeGenPrepare/ARM/bitreverse-recognize.ll b/llvm/test/Transforms/CodeGenPrepare/ARM/bitreverse-recognize.ll
index d272fef..189186b 100644
--- a/llvm/test/Transforms/CodeGenPrepare/ARM/bitreverse-recognize.ll
+++ b/llvm/test/Transforms/CodeGenPrepare/ARM/bitreverse-recognize.ll
@@ -4,7 +4,7 @@ target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
target triple = "armv7--linux-gnueabihf"
; CHECK-LABEL: @f
-define i32 @f(i32 %a) #0 {
+define i32 @f(i32 %a) {
; CHECK: call i32 @llvm.bitreverse.i32
entry:
br label %for.body
@@ -25,8 +25,6 @@ for.body: ; preds = %for.body, %entry
br i1 %exitcond, label %for.cond.cleanup, label %for.body, !llvm.loop !3
}
-attributes #0 = { norecurse nounwind readnone "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="cortex-a8" "target-features"="+dsp,+neon,+vfp3" "unsafe-fp-math"="false" "use-soft-float"="false" }
-
!llvm.module.flags = !{!0, !1}
!llvm.ident = !{!2}
diff --git a/llvm/test/Transforms/CodeGenPrepare/X86/bitreverse-hang.ll b/llvm/test/Transforms/CodeGenPrepare/X86/bitreverse-hang.ll
index eec0967..35115cf 100644
--- a/llvm/test/Transforms/CodeGenPrepare/X86/bitreverse-hang.ll
+++ b/llvm/test/Transforms/CodeGenPrepare/X86/bitreverse-hang.ll
@@ -21,7 +21,7 @@
@b = common global i32 0, align 4
; CHECK: define i32 @fn1
-define i32 @fn1() #0 {
+define i32 @fn1() {
entry:
%b.promoted = load i32, ptr @b, align 4, !tbaa !2
br label %for.body
@@ -40,8 +40,6 @@ for.end: ; preds = %for.body
ret i32 undef
}
-attributes #0 = { norecurse nounwind ssp uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="core2" "target-features"="+cx16,+fxsr,+mmx,+sse,+sse2,+sse3,+ssse3" "unsafe-fp-math"="false" "use-soft-float"="false" }
-
!llvm.module.flags = !{!0}
!llvm.ident = !{!1}
diff --git a/llvm/test/Transforms/CodeGenPrepare/dom-tree.ll b/llvm/test/Transforms/CodeGenPrepare/dom-tree.ll
index 1c990ff..14360fe 100644
--- a/llvm/test/Transforms/CodeGenPrepare/dom-tree.ll
+++ b/llvm/test/Transforms/CodeGenPrepare/dom-tree.ll
@@ -10,7 +10,7 @@
target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
target triple = "armv7--linux-gnueabihf"
-define i32 @f(i32 %a) #0 {
+define i32 @f(i32 %a) {
entry:
br label %for.body
@@ -30,8 +30,6 @@ for.body:
br i1 %exitcond, label %for.cond.cleanup, label %for.body, !llvm.loop !3
}
-attributes #0 = { norecurse nounwind readnone "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="cortex-a8" "target-features"="+dsp,+neon,+vfp3" "unsafe-fp-math"="false" "use-soft-float"="false" }
-
!llvm.module.flags = !{!0, !1}
!llvm.ident = !{!2}
diff --git a/llvm/test/Transforms/ConstantHoisting/X86/ehpad.ll b/llvm/test/Transforms/ConstantHoisting/X86/ehpad.ll
index 46fa066..78760db 100644
--- a/llvm/test/Transforms/ConstantHoisting/X86/ehpad.ll
+++ b/llvm/test/Transforms/ConstantHoisting/X86/ehpad.ll
@@ -20,7 +20,7 @@ target triple = "x86_64-pc-windows-msvc"
; BFIHOIST: br label %endif
; Function Attrs: norecurse
-define i32 @main(i32 %argc, ptr nocapture readnone %argv) local_unnamed_addr #0 personality ptr @__CxxFrameHandler3 {
+define i32 @main(i32 %argc, ptr nocapture readnone %argv) local_unnamed_addr personality ptr @__CxxFrameHandler3 {
%call = tail call i64 @fn(i64 0)
%call1 = tail call i64 @fn(i64 1)
%tobool = icmp eq i32 %argc, 0
@@ -62,9 +62,6 @@ endif:
ret i32 0
}
-declare i64 @fn(i64) local_unnamed_addr #1
+declare i64 @fn(i64) local_unnamed_addr
declare i32 @__CxxFrameHandler3(...)
-
-attributes #0 = { norecurse "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "stack-protector-buffer-size"="8" "target-features"="+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "stack-protector-buffer-size"="8" "target-features"="+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/llvm/test/Transforms/Coroutines/coro-debug.ll b/llvm/test/Transforms/Coroutines/coro-debug.ll
index d1f1922..109be51f 100644
--- a/llvm/test/Transforms/Coroutines/coro-debug.ll
+++ b/llvm/test/Transforms/Coroutines/coro-debug.ll
@@ -14,7 +14,7 @@ entry:
%0 = call token @llvm.coro.id(i32 0, ptr null, ptr @flink, ptr null), !dbg !16
%1 = call i64 @llvm.coro.size.i64(), !dbg !16
%call = call ptr @malloc(i64 %1), !dbg !16
- %2 = call ptr @llvm.coro.begin(token %0, ptr %call) #7, !dbg !16
+ %2 = call ptr @llvm.coro.begin(token %0, ptr %call), !dbg !16
store ptr %2, ptr %coro_hdl, align 8, !dbg !16
%3 = call i8 @llvm.coro.suspend(token none, i1 false), !dbg !17
%conv = sext i8 %3 to i32, !dbg !17
@@ -69,7 +69,7 @@ coro_Cleanup: ; preds = %sw.epilog, %sw.bb1
br label %coro_Suspend, !dbg !24
coro_Suspend: ; preds = %coro_Cleanup, %sw.default
- call void @llvm.coro.end(ptr null, i1 false, token none) #7, !dbg !24
+ call void @llvm.coro.end(ptr null, i1 false, token none), !dbg !24
%7 = load ptr, ptr %coro_hdl, align 8, !dbg !24
store i32 0, ptr %late_local, !dbg !24
ret ptr %7, !dbg !24
@@ -82,47 +82,40 @@ ehcleanup:
}
; Function Attrs: nounwind readnone speculatable
-declare void @llvm.dbg.value(metadata, metadata, metadata) #1
+declare void @llvm.dbg.value(metadata, metadata, metadata)
; Function Attrs: nounwind readnone speculatable
-declare void @llvm.dbg.declare(metadata, metadata, metadata) #1
+declare void @llvm.dbg.declare(metadata, metadata, metadata)
; Function Attrs: argmemonly nounwind readonly
-declare token @llvm.coro.id(i32, ptr readnone, ptr nocapture readonly, ptr) #2
+declare token @llvm.coro.id(i32, ptr readnone, ptr nocapture readonly, ptr)
-declare ptr @malloc(i64) #3
+declare ptr @malloc(i64)
declare ptr @allocate()
declare void @print({ ptr, i32 })
declare void @log()
; Function Attrs: nounwind readnone
-declare i64 @llvm.coro.size.i64() #4
+declare i64 @llvm.coro.size.i64()
; Function Attrs: nounwind
-declare ptr @llvm.coro.begin(token, ptr writeonly) #5
+declare ptr @llvm.coro.begin(token, ptr writeonly)
; Function Attrs: nounwind
-declare i8 @llvm.coro.suspend(token, i1) #5
+declare i8 @llvm.coro.suspend(token, i1)
-declare void @free(ptr) #3
+declare void @free(ptr)
; Function Attrs: argmemonly nounwind readonly
-declare ptr @llvm.coro.free(token, ptr nocapture readonly) #2
+declare ptr @llvm.coro.free(token, ptr nocapture readonly)
; Function Attrs: nounwind
-declare void @llvm.coro.end(ptr, i1, token) #5
+declare void @llvm.coro.end(ptr, i1, token)
; Function Attrs: argmemonly nounwind readonly
-declare ptr @llvm.coro.subfn.addr(ptr nocapture readonly, i8) #2
-
-attributes #0 = { noinline nounwind presplitcoroutine "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-features"="+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { nounwind readnone speculatable }
-attributes #2 = { argmemonly nounwind readonly }
-attributes #3 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-features"="+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #4 = { nounwind readnone }
-attributes #5 = { nounwind }
-attributes #6 = { alwaysinline }
-attributes #7 = { noduplicate }
+declare ptr @llvm.coro.subfn.addr(ptr nocapture readonly, i8)
+
+attributes #0 = { noinline nounwind presplitcoroutine }
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!3, !4}
diff --git a/llvm/test/Transforms/Coroutines/coro-split-dbg.ll b/llvm/test/Transforms/Coroutines/coro-split-dbg.ll
index c53bea8..577ca9a 100644
--- a/llvm/test/Transforms/Coroutines/coro-split-dbg.ll
+++ b/llvm/test/Transforms/Coroutines/coro-split-dbg.ll
@@ -6,9 +6,9 @@ target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
; Function Attrs: nounwind readnone
-declare void @llvm.dbg.declare(metadata, metadata, metadata) #1
+declare void @llvm.dbg.declare(metadata, metadata, metadata)
-declare void @bar(...) local_unnamed_addr #2
+declare void @bar(...) local_unnamed_addr
; Function Attrs: nounwind uwtable
define ptr @f() #3 !dbg !16 {
@@ -16,14 +16,14 @@ entry:
%0 = tail call token @llvm.coro.id(i32 0, ptr null, ptr @f, ptr null), !dbg !26
%1 = tail call i64 @llvm.coro.size.i64(), !dbg !26
%call = tail call ptr @malloc(i64 %1), !dbg !26
- %2 = tail call ptr @llvm.coro.begin(token %0, ptr %call) #9, !dbg !26
+ %2 = tail call ptr @llvm.coro.begin(token %0, ptr %call), !dbg !26
tail call void @llvm.dbg.value(metadata ptr %2, metadata !21, metadata !12), !dbg !26
br label %for.cond, !dbg !27
for.cond: ; preds = %for.cond, %entry
tail call void @llvm.dbg.value(metadata i32 undef, metadata !22, metadata !12), !dbg !28
- tail call void @llvm.dbg.value(metadata i32 undef, metadata !11, metadata !12) #7, !dbg !29
- tail call void (...) @bar() #7, !dbg !33
+ tail call void @llvm.dbg.value(metadata i32 undef, metadata !11, metadata !12), !dbg !29
+ tail call void (...) @bar(), !dbg !33
%3 = tail call token @llvm.coro.save(ptr null), !dbg !34
%4 = tail call i8 @llvm.coro.suspend(token %3, i1 false), !dbg !34
%conv = sext i8 %4 to i32, !dbg !34
@@ -38,40 +38,31 @@ coro_Cleanup: ; preds = %for.cond
br label %coro_Suspend, !dbg !36
coro_Suspend: ; preds = %for.cond, %if.then, %coro_Cleanup
- tail call void @llvm.coro.end(ptr null, i1 false, token none) #9, !dbg !38
+ tail call void @llvm.coro.end(ptr null, i1 false, token none), !dbg !38
ret ptr %2, !dbg !39
}
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.start.p0(ptr nocapture) #4
+declare void @llvm.lifetime.start.p0(ptr nocapture)
; Function Attrs: argmemonly nounwind readonly
-declare token @llvm.coro.id(i32, ptr readnone, ptr nocapture readonly, ptr) #5
+declare token @llvm.coro.id(i32, ptr readnone, ptr nocapture readonly, ptr)
; Function Attrs: nounwind
-declare noalias ptr @malloc(i64) local_unnamed_addr #6
-declare i64 @llvm.coro.size.i64() #1
-declare ptr @llvm.coro.begin(token, ptr writeonly) #7
-declare token @llvm.coro.save(ptr) #7
-declare i8 @llvm.coro.suspend(token, i1) #7
-declare void @llvm.lifetime.end.p0(ptr nocapture) #4
-declare ptr @llvm.coro.free(token, ptr nocapture readonly) #5
-declare void @free(ptr nocapture) local_unnamed_addr #6
-declare void @llvm.coro.end(ptr, i1, token) #7
-declare ptr @llvm.coro.subfn.addr(ptr nocapture readonly, i8) #5
+declare noalias ptr @malloc(i64) local_unnamed_addr
+declare i64 @llvm.coro.size.i64()
+declare ptr @llvm.coro.begin(token, ptr writeonly)
+declare token @llvm.coro.save(ptr)
+declare i8 @llvm.coro.suspend(token, i1)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
+declare ptr @llvm.coro.free(token, ptr nocapture readonly)
+declare void @free(ptr nocapture) local_unnamed_addr
+declare void @llvm.coro.end(ptr, i1, token)
+declare ptr @llvm.coro.subfn.addr(ptr nocapture readonly, i8)
-declare void @llvm.dbg.value(metadata, metadata, metadata) #1
+declare void @llvm.dbg.value(metadata, metadata, metadata)
-attributes #0 = { nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { nounwind readnone }
-attributes #2 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #3 = { nounwind uwtable presplitcoroutine "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #4 = { argmemonly nounwind }
-attributes #5 = { argmemonly nounwind readonly }
-attributes #6 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #7 = { nounwind }
-attributes #8 = { alwaysinline nounwind }
-attributes #9 = { noduplicate }
+attributes #3 = { nounwind uwtable presplitcoroutine }
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!3, !4}
diff --git a/llvm/test/Transforms/Util/dbg-user-of-aext.ll b/llvm/test/Transforms/Util/dbg-user-of-aext.ll
index 9e7935e..b3d1b90 100644
--- a/llvm/test/Transforms/Util/dbg-user-of-aext.ll
+++ b/llvm/test/Transforms/Util/dbg-user-of-aext.ll
@@ -27,7 +27,7 @@
%struct.foo = type { i8, i64 }
; Function Attrs: noinline nounwind uwtable
-define void @_Z1fbb3foo(i1 zeroext %b, i1 zeroext %frag, i8 %g.coerce0, i64 %g.coerce1) #0 !dbg !6 {
+define void @_Z1fbb3foo(i1 zeroext %b, i1 zeroext %frag, i8 %g.coerce0, i64 %g.coerce1) !dbg !6 {
entry:
%g = alloca %struct.foo, align 8
%b.addr = alloca i8, align 1
@@ -51,10 +51,7 @@ entry:
; CHECK: ![[VAR_FRAG]] = !DILocalVariable(name: "frag"
; Function Attrs: nounwind readnone speculatable
-declare void @llvm.dbg.declare(metadata, metadata, metadata) #1
-
-attributes #0 = { noinline nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { nounwind readnone speculatable }
+declare void @llvm.dbg.declare(metadata, metadata, metadata)
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!3, !4}
diff --git a/llvm/test/Transforms/Util/libcalls-fast-math-inf-loop.ll b/llvm/test/Transforms/Util/libcalls-fast-math-inf-loop.ll
index ad23bf7..e9f0c8c 100644
--- a/llvm/test/Transforms/Util/libcalls-fast-math-inf-loop.ll
+++ b/llvm/test/Transforms/Util/libcalls-fast-math-inf-loop.ll
@@ -19,18 +19,18 @@ target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-unknown"
; Function Attrs: nounwind
-define float @fn(float %f) #0 {
+define float @fn(float %f) {
; CHECK: define float @fn(
; CHECK: call fast float @expf(
%f.addr = alloca float, align 4
store float %f, ptr %f.addr, align 4, !tbaa !1
%1 = load float, ptr %f.addr, align 4, !tbaa !1
- %call = call fast float @expf(float %1) #3
+ %call = call fast float @expf(float %1)
ret float %call
}
; Function Attrs: inlinehint nounwind readnone
-define available_externally float @expf(float %x) #1 {
+define available_externally float @expf(float %x) {
; CHECK: define available_externally float @expf(
; CHECK: fpext float
; CHECK: call fast double @exp(
@@ -39,17 +39,13 @@ define available_externally float @expf(float %x) #1 {
store float %x, ptr %x.addr, align 4, !tbaa !1
%1 = load float, ptr %x.addr, align 4, !tbaa !1
%conv = fpext float %1 to double
- %call = call fast double @exp(double %conv) #3
+ %call = call fast double @exp(double %conv)
%conv1 = fptrunc double %call to float
ret float %conv1
}
; Function Attrs: nounwind readnone
-declare double @exp(double) #2
-
-attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-features"="+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { inlinehint nounwind readnone "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-features"="+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #2 = { nounwind readnone }
+declare double @exp(double)
!llvm.ident = !{!0}
diff --git a/llvm/test/tools/llvm-ir2vec/embeddings-flowaware.ll b/llvm/test/tools/llvm-ir2vec/embeddings-flowaware.ll
index b2362f8..ade228d 100644
--- a/llvm/test/tools/llvm-ir2vec/embeddings-flowaware.ll
+++ b/llvm/test/tools/llvm-ir2vec/embeddings-flowaware.ll
@@ -49,7 +49,7 @@ entry:
; CHECK-FUNC-LEVEL-ABC: Function: abc
; CHECK-FUNC-LEVEL-NEXT-ABC: [ 3630.00 3672.00 3714.00 ]
-; CHECK-FUNC-DEF: Error: Function 'def' not found
+; CHECK-FUNC-DEF: error: Function 'def' not found
; CHECK-BB-LEVEL: Function: abc
; CHECK-BB-LEVEL-NEXT: entry: [ 3630.00 3672.00 3714.00 ]
diff --git a/llvm/test/tools/llvm-ir2vec/embeddings-symbolic.ll b/llvm/test/tools/llvm-ir2vec/embeddings-symbolic.ll
index f9aa108..9d60e12 100644
--- a/llvm/test/tools/llvm-ir2vec/embeddings-symbolic.ll
+++ b/llvm/test/tools/llvm-ir2vec/embeddings-symbolic.ll
@@ -49,7 +49,7 @@ entry:
; CHECK-FUNC-LEVEL-ABC: Function: abc
; CHECK-FUNC-LEVEL-NEXT-ABC: [ 878.00 889.00 900.00 ]
-; CHECK-FUNC-DEF: Error: Function 'def' not found
+; CHECK-FUNC-DEF: error: Function 'def' not found
; CHECK-BB-LEVEL: Function: abc
; CHECK-BB-LEVEL-NEXT: entry: [ 878.00 889.00 900.00 ]
diff --git a/llvm/test/tools/llvm-ir2vec/embeddings-symbolic.mir b/llvm/test/tools/llvm-ir2vec/embeddings-symbolic.mir
index e5f78bf..ef835fe 100644
--- a/llvm/test/tools/llvm-ir2vec/embeddings-symbolic.mir
+++ b/llvm/test/tools/llvm-ir2vec/embeddings-symbolic.mir
@@ -67,7 +67,7 @@ body: |
# CHECK-FUNC-LEVEL-ADD-NEXT: Function vector: [ 26.50 27.10 27.70 ]
# CHECK-FUNC-LEVEL-ADD-NOT: simple_function
-# CHECK-FUNC-MISSING: Error: Function 'missing_function' not found
+# CHECK-FUNC-MISSING: error: Function 'missing_function' not found
# CHECK-BB-LEVEL: MIR2Vec embeddings for machine function add_function:
# CHECK-BB-LEVEL-NEXT: Basic block vectors:
diff --git a/llvm/test/tools/llvm-ir2vec/error-handling.ll b/llvm/test/tools/llvm-ir2vec/error-handling.ll
index b944ea0..8e9e455 100644
--- a/llvm/test/tools/llvm-ir2vec/error-handling.ll
+++ b/llvm/test/tools/llvm-ir2vec/error-handling.ll
@@ -10,4 +10,4 @@ entry:
}
; CHECK-NO-VOCAB: error: IR2Vec vocabulary file path not specified; You may need to set it using --ir2vec-vocab-path
-; CHECK-FUNC-NOT-FOUND: Error: Function 'nonexistent' not found
+; CHECK-FUNC-NOT-FOUND: error: Function 'nonexistent' not found
diff --git a/llvm/test/tools/llvm-ir2vec/error-handling.mir b/llvm/test/tools/llvm-ir2vec/error-handling.mir
index 154078c..caec454c 100644
--- a/llvm/test/tools/llvm-ir2vec/error-handling.mir
+++ b/llvm/test/tools/llvm-ir2vec/error-handling.mir
@@ -31,11 +31,11 @@ body: |
$eax = COPY %0
RET 0, $eax
-# CHECK-NO-VOCAB: Error: Failed to load MIR2Vec vocabulary - MIR2Vec vocabulary file path not specified; set it using --mir2vec-vocab-path
+# CHECK-NO-VOCAB: error: Failed to load MIR2Vec vocabulary - MIR2Vec vocabulary file path not specified; set it using --mir2vec-vocab-path
-# CHECK-VOCAB-NOT-FOUND: Error: Failed to load MIR2Vec vocabulary
+# CHECK-VOCAB-NOT-FOUND: error: Failed to load MIR2Vec vocabulary
# CHECK-VOCAB-NOT-FOUND: No such file or directory
-# CHECK-INVALID-VOCAB: Error: Failed to load MIR2Vec vocabulary - Missing 'Opcodes' section in vocabulary file
+# CHECK-INVALID-VOCAB: error: Failed to load MIR2Vec vocabulary - Missing 'Opcodes' section in vocabulary file
-# CHECK-FUNC-NOT-FOUND: Error: Function 'nonexistent_function' not found
+# CHECK-FUNC-NOT-FOUND: error: Function 'nonexistent_function' not found
diff --git a/llvm/tools/llvm-ir2vec/llvm-ir2vec.cpp b/llvm/tools/llvm-ir2vec/llvm-ir2vec.cpp
index c41cf205..a723d37 100644
--- a/llvm/tools/llvm-ir2vec/llvm-ir2vec.cpp
+++ b/llvm/tools/llvm-ir2vec/llvm-ir2vec.cpp
@@ -77,6 +77,8 @@
namespace llvm {
+static const char *ToolName = "llvm-ir2vec";
+
// Common option category for options shared between IR2Vec and MIR2Vec
static cl::OptionCategory CommonCategory("Common Options",
"Options applicable to both IR2Vec "
@@ -258,7 +260,8 @@ public:
/// Generate embeddings for the entire module
void generateEmbeddings(raw_ostream &OS) const {
if (!Vocab->isValid()) {
- OS << "Error: Vocabulary is not valid. IR2VecTool not initialized.\n";
+ WithColor::error(errs(), ToolName)
+ << "Vocabulary is not valid. IR2VecTool not initialized.\n";
return;
}
@@ -277,8 +280,8 @@ public:
assert(Vocab->isValid() && "Vocabulary is not valid");
auto Emb = Embedder::create(IR2VecEmbeddingKind, F, *Vocab);
if (!Emb) {
- OS << "Error: Failed to create embedder for function " << F.getName()
- << "\n";
+ WithColor::error(errs(), ToolName)
+ << "Failed to create embedder for function " << F.getName() << "\n";
return;
}
@@ -351,13 +354,14 @@ private:
public:
explicit MIR2VecTool(MachineModuleInfo &MMI) : MMI(MMI) {}
- /// Initialize the MIR2Vec vocabulary
+ /// Initialize MIR2Vec vocabulary
bool initializeVocabulary(const Module &M) {
MIR2VecVocabProvider Provider(MMI);
auto VocabOrErr = Provider.getVocabulary(M);
if (!VocabOrErr) {
- errs() << "Error: Failed to load MIR2Vec vocabulary - "
- << toString(VocabOrErr.takeError()) << "\n";
+ WithColor::error(errs(), ToolName)
+ << "Failed to load MIR2Vec vocabulary - "
+ << toString(VocabOrErr.takeError()) << "\n";
return false;
}
Vocab = std::make_unique<MIRVocabulary>(std::move(*VocabOrErr));
@@ -367,7 +371,7 @@ public:
/// Generate embeddings for all machine functions in the module
void generateEmbeddings(const Module &M, raw_ostream &OS) const {
if (!Vocab) {
- OS << "Error: Vocabulary not initialized.\n";
+ WithColor::error(errs(), ToolName) << "Vocabulary not initialized.\n";
return;
}
@@ -377,7 +381,8 @@ public:
MachineFunction *MF = MMI.getMachineFunction(F);
if (!MF) {
- errs() << "Warning: No MachineFunction for " << F.getName() << "\n";
+ WithColor::warning(errs(), ToolName)
+ << "No MachineFunction for " << F.getName() << "\n";
continue;
}
@@ -388,13 +393,14 @@ public:
/// Generate embeddings for a specific machine function
void generateEmbeddings(MachineFunction &MF, raw_ostream &OS) const {
if (!Vocab) {
- OS << "Error: Vocabulary not initialized.\n";
+ WithColor::error(errs(), ToolName) << "Vocabulary not initialized.\n";
return;
}
auto Emb = MIREmbedder::create(MIR2VecKind::Symbolic, MF, *Vocab);
if (!Emb) {
- errs() << "Error: Failed to create embedder for " << MF.getName() << "\n";
+ WithColor::error(errs(), ToolName)
+ << "Failed to create embedder for " << MF.getName() << "\n";
return;
}
@@ -456,7 +462,8 @@ int main(int argc, char **argv) {
std::error_code EC;
raw_fd_ostream OS(OutputFilename, EC);
if (EC) {
- errs() << "Error opening output file: " << EC.message() << "\n";
+ WithColor::error(errs(), ToolName)
+ << "opening output file: " << EC.message() << "\n";
return 1;
}
@@ -472,13 +479,13 @@ int main(int argc, char **argv) {
LLVMContext Context;
std::unique_ptr<Module> M = parseIRFile(InputFilename, Err, Context);
if (!M) {
- Err.print(argv[0], errs());
+ Err.print(ToolName, errs());
return 1;
}
if (Error Err = processModule(*M, OS)) {
handleAllErrors(std::move(Err), [&](const ErrorInfoBase &EIB) {
- errs() << "Error: " << EIB.message() << "\n";
+ WithColor::error(errs(), ToolName) << EIB.message() << "\n";
});
return 1;
}
@@ -499,7 +506,7 @@ int main(int argc, char **argv) {
auto MIR = createMIRParserFromFile(InputFilename, Err, Context);
if (!MIR) {
- Err.print(argv[0], WithColor::error(errs(), argv[0]));
+ Err.print(ToolName, errs());
return 1;
}
@@ -511,7 +518,7 @@ int main(int argc, char **argv) {
TheTriple.setTriple(sys::getDefaultTargetTriple());
auto TMOrErr = codegen::createTargetMachineForTriple(TheTriple.str());
if (!TMOrErr) {
- Err.print(argv[0], WithColor::error(errs(), argv[0]));
+ Err.print(ToolName, errs());
exit(1);
}
TM = std::move(*TMOrErr);
@@ -520,14 +527,14 @@ int main(int argc, char **argv) {
std::unique_ptr<Module> M = MIR->parseIRModule(SetDataLayout);
if (!M) {
- Err.print(argv[0], WithColor::error(errs(), argv[0]));
+ Err.print(ToolName, errs());
return 1;
}
// Parse machine functions
auto MMI = std::make_unique<MachineModuleInfo>(TM.get());
if (!MMI || MIR->parseMachineFunctions(*M, *MMI)) {
- Err.print(argv[0], WithColor::error(errs(), argv[0]));
+ Err.print(ToolName, errs());
return 1;
}
@@ -547,13 +554,15 @@ int main(int argc, char **argv) {
// Process single function
Function *F = M->getFunction(FunctionName);
if (!F) {
- errs() << "Error: Function '" << FunctionName << "' not found\n";
+ WithColor::error(errs(), ToolName)
+ << "Function '" << FunctionName << "' not found\n";
return 1;
}
MachineFunction *MF = MMI->getMachineFunction(*F);
if (!MF) {
- errs() << "Error: No MachineFunction for " << FunctionName << "\n";
+ WithColor::error(errs(), ToolName)
+ << "No MachineFunction for " << FunctionName << "\n";
return 1;
}
diff --git a/llvm/unittests/Support/GlobPatternTest.cpp b/llvm/unittests/Support/GlobPatternTest.cpp
index 58fd767..872a21e 100644
--- a/llvm/unittests/Support/GlobPatternTest.cpp
+++ b/llvm/unittests/Support/GlobPatternTest.cpp
@@ -329,6 +329,72 @@ TEST_F(GlobPatternTest, PrefixSuffix) {
EXPECT_EQ("cd", Pat->suffix());
}
+TEST_F(GlobPatternTest, Substr) {
+ auto Pat = GlobPattern::create("");
+ ASSERT_TRUE((bool)Pat);
+ EXPECT_EQ("", Pat->longest_substr());
+
+ Pat = GlobPattern::create("abcd");
+ ASSERT_TRUE((bool)Pat);
+ EXPECT_EQ("", Pat->longest_substr());
+
+ Pat = GlobPattern::create("a*bcd");
+ ASSERT_TRUE((bool)Pat);
+ EXPECT_EQ("", Pat->longest_substr());
+
+ Pat = GlobPattern::create("*abcd");
+ ASSERT_TRUE((bool)Pat);
+ EXPECT_EQ("", Pat->longest_substr());
+
+ Pat = GlobPattern::create("abcd*");
+ ASSERT_TRUE((bool)Pat);
+ EXPECT_EQ("", Pat->longest_substr());
+
+ Pat = GlobPattern::create("a*bc*d");
+ ASSERT_TRUE((bool)Pat);
+ EXPECT_EQ("bc", Pat->longest_substr());
+
+ Pat = GlobPattern::create("a*bc*def*g");
+ ASSERT_TRUE((bool)Pat);
+ EXPECT_EQ("def", Pat->longest_substr());
+
+ Pat = GlobPattern::create("a*bcd*ef*g");
+ ASSERT_TRUE((bool)Pat);
+ EXPECT_EQ("bcd", Pat->longest_substr());
+
+ Pat = GlobPattern::create("a*bcd*efg*h");
+ ASSERT_TRUE((bool)Pat);
+ EXPECT_EQ("bcd", Pat->longest_substr());
+
+ Pat = GlobPattern::create("a*bcd[ef]g*h");
+ ASSERT_TRUE((bool)Pat);
+ EXPECT_EQ("bcd", Pat->longest_substr());
+
+ Pat = GlobPattern::create("a*bc[d]efg*h");
+ ASSERT_TRUE((bool)Pat);
+ EXPECT_EQ("efg", Pat->longest_substr());
+
+ Pat = GlobPattern::create("a*bc[]]efg*h");
+ ASSERT_TRUE((bool)Pat);
+ EXPECT_EQ("efg", Pat->longest_substr());
+
+ Pat = GlobPattern::create("a*bcde\\fg*h");
+ ASSERT_TRUE((bool)Pat);
+ EXPECT_EQ("bcde", Pat->longest_substr());
+
+ Pat = GlobPattern::create("a*bcde\\[fg*h");
+ ASSERT_TRUE((bool)Pat);
+ EXPECT_EQ("bcde", Pat->longest_substr());
+
+ Pat = GlobPattern::create("a*bcde?fg*h");
+ ASSERT_TRUE((bool)Pat);
+ EXPECT_EQ("bcde", Pat->longest_substr());
+
+ Pat = GlobPattern::create("a*bcdef{g}*h");
+ ASSERT_TRUE((bool)Pat);
+ EXPECT_EQ("bcdef", Pat->longest_substr());
+}
+
TEST_F(GlobPatternTest, Pathological) {
std::string P, S(40, 'a');
StringRef Pieces[] = {"a*", "[ba]*", "{b*,a*}*"};
diff --git a/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp b/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp
index 94947b7..c06a48e 100644
--- a/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp
+++ b/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp
@@ -1437,6 +1437,13 @@ ExtractStridedMetadataOp::fold(FoldAdaptor adaptor,
atLeastOneReplacement |= replaceConstantUsesOf(
builder, getLoc(), getStrides(), getConstifiedMixedStrides());
+ // extract_strided_metadata(cast(x)) -> extract_strided_metadata(x).
+ if (auto prev = getSource().getDefiningOp<CastOp>())
+ if (isa<MemRefType>(prev.getSource().getType())) {
+ getSourceMutable().assign(prev.getSource());
+ atLeastOneReplacement = true;
+ }
+
return success(atLeastOneReplacement);
}
diff --git a/mlir/lib/Dialect/MemRef/Transforms/ExpandStridedMetadata.cpp b/mlir/lib/Dialect/MemRef/Transforms/ExpandStridedMetadata.cpp
index d35566a..bd02516 100644
--- a/mlir/lib/Dialect/MemRef/Transforms/ExpandStridedMetadata.cpp
+++ b/mlir/lib/Dialect/MemRef/Transforms/ExpandStridedMetadata.cpp
@@ -1033,91 +1033,6 @@ class ExtractStridedMetadataOpReinterpretCastFolder
}
};
-/// Replace `base, offset, sizes, strides =
-/// extract_strided_metadata(
-/// cast(src) to dstTy)`
-/// With
-/// ```
-/// base, ... = extract_strided_metadata(src)
-/// offset = !dstTy.srcOffset.isDynamic()
-/// ? dstTy.srcOffset
-/// : extract_strided_metadata(src).offset
-/// sizes = for each srcSize in dstTy.srcSizes:
-/// !srcSize.isDynamic()
-/// ? srcSize
-// : extract_strided_metadata(src).sizes[i]
-/// strides = for each srcStride in dstTy.srcStrides:
-/// !srcStrides.isDynamic()
-/// ? srcStrides
-/// : extract_strided_metadata(src).strides[i]
-/// ```
-///
-/// In other words, consume the `cast` and apply its effects
-/// on the offset, sizes, and strides or compute them directly from `src`.
-class ExtractStridedMetadataOpCastFolder
- : public OpRewritePattern<memref::ExtractStridedMetadataOp> {
- using OpRewritePattern::OpRewritePattern;
-
- LogicalResult
- matchAndRewrite(memref::ExtractStridedMetadataOp extractStridedMetadataOp,
- PatternRewriter &rewriter) const override {
- Value source = extractStridedMetadataOp.getSource();
- auto castOp = source.getDefiningOp<memref::CastOp>();
- if (!castOp)
- return failure();
-
- Location loc = extractStridedMetadataOp.getLoc();
- // Check if the source is suitable for extract_strided_metadata.
- SmallVector<Type> inferredReturnTypes;
- if (failed(extractStridedMetadataOp.inferReturnTypes(
- rewriter.getContext(), loc, {castOp.getSource()},
- /*attributes=*/{}, /*properties=*/nullptr, /*regions=*/{},
- inferredReturnTypes)))
- return rewriter.notifyMatchFailure(castOp,
- "cast source's type is incompatible");
-
- auto memrefType = cast<MemRefType>(source.getType());
- unsigned rank = memrefType.getRank();
- SmallVector<OpFoldResult> results;
- results.resize_for_overwrite(rank * 2 + 2);
-
- auto newExtractStridedMetadata = memref::ExtractStridedMetadataOp::create(
- rewriter, loc, castOp.getSource());
-
- // Register the base_buffer.
- results[0] = newExtractStridedMetadata.getBaseBuffer();
-
- auto getConstantOrValue = [&rewriter](int64_t constant,
- OpFoldResult ofr) -> OpFoldResult {
- return ShapedType::isStatic(constant)
- ? OpFoldResult(rewriter.getIndexAttr(constant))
- : ofr;
- };
-
- auto [sourceStrides, sourceOffset] = memrefType.getStridesAndOffset();
- assert(sourceStrides.size() == rank && "unexpected number of strides");
-
- // Register the new offset.
- results[1] =
- getConstantOrValue(sourceOffset, newExtractStridedMetadata.getOffset());
-
- const unsigned sizeStartIdx = 2;
- const unsigned strideStartIdx = sizeStartIdx + rank;
- ArrayRef<int64_t> sourceSizes = memrefType.getShape();
-
- SmallVector<OpFoldResult> sizes = newExtractStridedMetadata.getSizes();
- SmallVector<OpFoldResult> strides = newExtractStridedMetadata.getStrides();
- for (unsigned i = 0; i < rank; ++i) {
- results[sizeStartIdx + i] = getConstantOrValue(sourceSizes[i], sizes[i]);
- results[strideStartIdx + i] =
- getConstantOrValue(sourceStrides[i], strides[i]);
- }
- rewriter.replaceOp(extractStridedMetadataOp,
- getValueOrCreateConstantIndexOp(rewriter, loc, results));
- return success();
- }
-};
-
/// Replace `base, offset, sizes, strides = extract_strided_metadata(
/// memory_space_cast(src) to dstTy)`
/// with
@@ -1209,7 +1124,6 @@ void memref::populateExpandStridedMetadataPatterns(
RewriteExtractAlignedPointerAsIndexOfViewLikeOp,
ExtractStridedMetadataOpReinterpretCastFolder,
ExtractStridedMetadataOpSubviewFolder,
- ExtractStridedMetadataOpCastFolder,
ExtractStridedMetadataOpMemorySpaceCastFolder,
ExtractStridedMetadataOpAssumeAlignmentFolder,
ExtractStridedMetadataOpExtractStridedMetadataFolder>(
@@ -1226,7 +1140,6 @@ void memref::populateResolveExtractStridedMetadataPatterns(
ExtractStridedMetadataOpSubviewFolder,
RewriteExtractAlignedPointerAsIndexOfViewLikeOp,
ExtractStridedMetadataOpReinterpretCastFolder,
- ExtractStridedMetadataOpCastFolder,
ExtractStridedMetadataOpMemorySpaceCastFolder,
ExtractStridedMetadataOpAssumeAlignmentFolder,
ExtractStridedMetadataOpExtractStridedMetadataFolder>(
diff --git a/mlir/test/Dialect/MemRef/canonicalize.mlir b/mlir/test/Dialect/MemRef/canonicalize.mlir
index 7160b52..3130902 100644
--- a/mlir/test/Dialect/MemRef/canonicalize.mlir
+++ b/mlir/test/Dialect/MemRef/canonicalize.mlir
@@ -901,6 +901,132 @@ func.func @scope_merge_without_terminator() {
// -----
+// Check that we simplify extract_strided_metadata of cast
+// when the source of the cast is compatible with what
+// `extract_strided_metadata`s accept.
+//
+// When we apply the transformation the resulting offset, sizes and strides
+// should come straight from the inputs of the cast.
+// Additionally the folder on extract_strided_metadata should propagate the
+// static information.
+//
+// CHECK-LABEL: func @extract_strided_metadata_of_cast
+// CHECK-SAME: %[[ARG:.*]]: memref<3x?xi32, strided<[4, ?], offset: ?>>)
+//
+// CHECK-DAG: %[[C3:.*]] = arith.constant 3 : index
+// CHECK-DAG: %[[C4:.*]] = arith.constant 4 : index
+// CHECK: %[[BASE:.*]], %[[DYN_OFFSET:.*]], %[[DYN_SIZES:.*]]:2, %[[DYN_STRIDES:.*]]:2 = memref.extract_strided_metadata %[[ARG]]
+//
+// CHECK: return %[[BASE]], %[[DYN_OFFSET]], %[[C3]], %[[DYN_SIZES]]#1, %[[C4]], %[[DYN_STRIDES]]#1
+func.func @extract_strided_metadata_of_cast(
+ %arg : memref<3x?xi32, strided<[4, ?], offset:?>>)
+ -> (memref<i32>, index,
+ index, index,
+ index, index) {
+
+ %cast =
+ memref.cast %arg :
+ memref<3x?xi32, strided<[4, ?], offset: ?>> to
+ memref<?x?xi32, strided<[?, ?], offset: ?>>
+
+ %base, %base_offset, %sizes:2, %strides:2 =
+ memref.extract_strided_metadata %cast:memref<?x?xi32, strided<[?, ?], offset: ?>>
+ -> memref<i32>, index,
+ index, index,
+ index, index
+
+ return %base, %base_offset,
+ %sizes#0, %sizes#1,
+ %strides#0, %strides#1 :
+ memref<i32>, index,
+ index, index,
+ index, index
+}
+
+// -----
+
+// Check that we simplify extract_strided_metadata of cast
+// when the source of the cast is compatible with what
+// `extract_strided_metadata`s accept.
+//
+// Same as extract_strided_metadata_of_cast but with constant sizes and strides
+// in the destination type.
+//
+// CHECK-LABEL: func @extract_strided_metadata_of_cast_w_csts
+// CHECK-SAME: %[[ARG:.*]]: memref<?x?xi32, strided<[?, ?], offset: ?>>)
+//
+// CHECK-DAG: %[[C4:.*]] = arith.constant 4 : index
+// CHECK-DAG: %[[C18:.*]] = arith.constant 18 : index
+// CHECK-DAG: %[[C25:.*]] = arith.constant 25 : index
+// CHECK: %[[BASE:.*]], %[[DYN_OFFSET:.*]], %[[DYN_SIZES:.*]]:2, %[[DYN_STRIDES:.*]]:2 = memref.extract_strided_metadata %[[ARG]]
+//
+// CHECK: return %[[BASE]], %[[C25]], %[[C4]], %[[DYN_SIZES]]#1, %[[DYN_STRIDES]]#0, %[[C18]]
+func.func @extract_strided_metadata_of_cast_w_csts(
+ %arg : memref<?x?xi32, strided<[?, ?], offset:?>>)
+ -> (memref<i32>, index,
+ index, index,
+ index, index) {
+
+ %cast =
+ memref.cast %arg :
+ memref<?x?xi32, strided<[?, ?], offset: ?>> to
+ memref<4x?xi32, strided<[?, 18], offset: 25>>
+
+ %base, %base_offset, %sizes:2, %strides:2 =
+ memref.extract_strided_metadata %cast:memref<4x?xi32, strided<[?, 18], offset: 25>>
+ -> memref<i32>, index,
+ index, index,
+ index, index
+
+ return %base, %base_offset,
+ %sizes#0, %sizes#1,
+ %strides#0, %strides#1 :
+ memref<i32>, index,
+ index, index,
+ index, index
+}
+
+// -----
+
+// Check that we don't simplify extract_strided_metadata of
+// cast when the source of the cast is unranked.
+// Unranked memrefs cannot feed into extract_strided_metadata operations.
+// Note: Technically we could still fold the sizes and strides.
+//
+// CHECK-LABEL: func @extract_strided_metadata_of_cast_unranked
+// CHECK-SAME: %[[ARG:.*]]: memref<*xi32>)
+//
+// CHECK: %[[CAST:.*]] = memref.cast %[[ARG]] :
+// CHECK: %[[BASE:.*]], %[[OFFSET:.*]], %[[SIZES:.*]]:2, %[[STRIDES:.*]]:2 = memref.extract_strided_metadata %[[CAST]]
+//
+// CHECK: return %[[BASE]], %[[OFFSET]], %[[SIZES]]#0, %[[SIZES]]#1, %[[STRIDES]]#0, %[[STRIDES]]#1
+func.func @extract_strided_metadata_of_cast_unranked(
+ %arg : memref<*xi32>)
+ -> (memref<i32>, index,
+ index, index,
+ index, index) {
+
+ %cast =
+ memref.cast %arg :
+ memref<*xi32> to
+ memref<?x?xi32, strided<[?, ?], offset: ?>>
+
+ %base, %base_offset, %sizes:2, %strides:2 =
+ memref.extract_strided_metadata %cast:memref<?x?xi32, strided<[?, ?], offset: ?>>
+ -> memref<i32>, index,
+ index, index,
+ index, index
+
+ return %base, %base_offset,
+ %sizes#0, %sizes#1,
+ %strides#0, %strides#1 :
+ memref<i32>, index,
+ index, index,
+ index, index
+}
+
+// -----
+
// CHECK-LABEL: func @reinterpret_noop
// CHECK-SAME: (%[[ARG:.*]]: memref<2x3x4xf32>)
// CHECK-NEXT: return %[[ARG]]
diff --git a/mlir/test/Dialect/MemRef/expand-strided-metadata.mlir b/mlir/test/Dialect/MemRef/expand-strided-metadata.mlir
index 1e6b011..18cdfb7 100644
--- a/mlir/test/Dialect/MemRef/expand-strided-metadata.mlir
+++ b/mlir/test/Dialect/MemRef/expand-strided-metadata.mlir
@@ -1378,133 +1378,6 @@ func.func @extract_strided_metadata_of_get_global_with_offset()
// -----
-// Check that we simplify extract_strided_metadata of cast
-// when the source of the cast is compatible with what
-// `extract_strided_metadata`s accept.
-//
-// When we apply the transformation the resulting offset, sizes and strides
-// should come straight from the inputs of the cast.
-// Additionally the folder on extract_strided_metadata should propagate the
-// static information.
-//
-// CHECK-LABEL: func @extract_strided_metadata_of_cast
-// CHECK-SAME: %[[ARG:.*]]: memref<3x?xi32, strided<[4, ?], offset: ?>>)
-//
-// CHECK-DAG: %[[C3:.*]] = arith.constant 3 : index
-// CHECK-DAG: %[[C4:.*]] = arith.constant 4 : index
-// CHECK: %[[BASE:.*]], %[[DYN_OFFSET:.*]], %[[DYN_SIZES:.*]]:2, %[[DYN_STRIDES:.*]]:2 = memref.extract_strided_metadata %[[ARG]]
-//
-// CHECK: return %[[BASE]], %[[DYN_OFFSET]], %[[C3]], %[[DYN_SIZES]]#1, %[[C4]], %[[DYN_STRIDES]]#1
-func.func @extract_strided_metadata_of_cast(
- %arg : memref<3x?xi32, strided<[4, ?], offset:?>>)
- -> (memref<i32>, index,
- index, index,
- index, index) {
-
- %cast =
- memref.cast %arg :
- memref<3x?xi32, strided<[4, ?], offset: ?>> to
- memref<?x?xi32, strided<[?, ?], offset: ?>>
-
- %base, %base_offset, %sizes:2, %strides:2 =
- memref.extract_strided_metadata %cast:memref<?x?xi32, strided<[?, ?], offset: ?>>
- -> memref<i32>, index,
- index, index,
- index, index
-
- return %base, %base_offset,
- %sizes#0, %sizes#1,
- %strides#0, %strides#1 :
- memref<i32>, index,
- index, index,
- index, index
-}
-
-// -----
-
-// Check that we simplify extract_strided_metadata of cast
-// when the source of the cast is compatible with what
-// `extract_strided_metadata`s accept.
-//
-// Same as extract_strided_metadata_of_cast but with constant sizes and strides
-// in the destination type.
-//
-// CHECK-LABEL: func @extract_strided_metadata_of_cast_w_csts
-// CHECK-SAME: %[[ARG:.*]]: memref<?x?xi32, strided<[?, ?], offset: ?>>)
-//
-// CHECK-DAG: %[[C4:.*]] = arith.constant 4 : index
-// CHECK-DAG: %[[C18:.*]] = arith.constant 18 : index
-// CHECK-DAG: %[[C25:.*]] = arith.constant 25 : index
-// CHECK: %[[BASE:.*]], %[[DYN_OFFSET:.*]], %[[DYN_SIZES:.*]]:2, %[[DYN_STRIDES:.*]]:2 = memref.extract_strided_metadata %[[ARG]]
-//
-// CHECK: return %[[BASE]], %[[C25]], %[[C4]], %[[DYN_SIZES]]#1, %[[DYN_STRIDES]]#0, %[[C18]]
-func.func @extract_strided_metadata_of_cast_w_csts(
- %arg : memref<?x?xi32, strided<[?, ?], offset:?>>)
- -> (memref<i32>, index,
- index, index,
- index, index) {
-
- %cast =
- memref.cast %arg :
- memref<?x?xi32, strided<[?, ?], offset: ?>> to
- memref<4x?xi32, strided<[?, 18], offset: 25>>
-
- %base, %base_offset, %sizes:2, %strides:2 =
- memref.extract_strided_metadata %cast:memref<4x?xi32, strided<[?, 18], offset: 25>>
- -> memref<i32>, index,
- index, index,
- index, index
-
- return %base, %base_offset,
- %sizes#0, %sizes#1,
- %strides#0, %strides#1 :
- memref<i32>, index,
- index, index,
- index, index
-}
-
-// -----
-
-// Check that we don't simplify extract_strided_metadata of
-// cast when the source of the cast is unranked.
-// Unranked memrefs cannot feed into extract_strided_metadata operations.
-// Note: Technically we could still fold the sizes and strides.
-//
-// CHECK-LABEL: func @extract_strided_metadata_of_cast_unranked
-// CHECK-SAME: %[[ARG:.*]]: memref<*xi32>)
-//
-// CHECK: %[[CAST:.*]] = memref.cast %[[ARG]] :
-// CHECK: %[[BASE:.*]], %[[OFFSET:.*]], %[[SIZES:.*]]:2, %[[STRIDES:.*]]:2 = memref.extract_strided_metadata %[[CAST]]
-//
-// CHECK: return %[[BASE]], %[[OFFSET]], %[[SIZES]]#0, %[[SIZES]]#1, %[[STRIDES]]#0, %[[STRIDES]]#1
-func.func @extract_strided_metadata_of_cast_unranked(
- %arg : memref<*xi32>)
- -> (memref<i32>, index,
- index, index,
- index, index) {
-
- %cast =
- memref.cast %arg :
- memref<*xi32> to
- memref<?x?xi32, strided<[?, ?], offset: ?>>
-
- %base, %base_offset, %sizes:2, %strides:2 =
- memref.extract_strided_metadata %cast:memref<?x?xi32, strided<[?, ?], offset: ?>>
- -> memref<i32>, index,
- index, index,
- index, index
-
- return %base, %base_offset,
- %sizes#0, %sizes#1,
- %strides#0, %strides#1 :
- memref<i32>, index,
- index, index,
- index, index
-}
-
-
-// -----
-
memref.global "private" @dynamicShmem : memref<0xf16,3>
// CHECK-LABEL: func @zero_sized_memred
diff --git a/utils/bazel/llvm-project-overlay/llvm/BUILD.bazel b/utils/bazel/llvm-project-overlay/llvm/BUILD.bazel
index b415a36..63d4aea 100644
--- a/utils/bazel/llvm-project-overlay/llvm/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/llvm/BUILD.bazel
@@ -5697,10 +5697,17 @@ cc_binary(
copts = llvm_copts,
stamp = 0,
deps = [
+ ":AllTargetsAsmParsers",
+ ":AllTargetsCodeGens",
+ ":AllTargetsMCAs",
":Analysis",
+ ":CodeGen",
":Core",
":IRReader",
+ ":MC",
":Support",
+ ":Target",
+ ":TargetParser",
":config",
],
)
diff --git a/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel b/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel
index 7fb0c03..d528dae 100644
--- a/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel
@@ -9397,6 +9397,7 @@ cc_library(
":MemRefTransforms",
":NVGPUTransforms",
":OpenACCTransforms",
+ ":OpenMPTransforms",
":QuantTransforms",
":SCFTransforms",
":SPIRVTransforms",
@@ -10173,11 +10174,31 @@ cc_library(
deps = [
":IR",
":OpenACCDialect",
+ ":OpenACCUtils",
":Pass",
],
)
cc_library(
+ name = "OpenACCUtils",
+ srcs = glob(
+ [
+ "lib/Dialect/OpenACC/Utils/*.cpp",
+ ],
+ ),
+ includes = ["include"],
+ deps = [
+ ":OpenACCDialect",
+ ":OpenACCOpsIncGen",
+ ":OpenACCPassIncGen",
+ ":OpenACCTypeInterfacesIncGen",
+ ":Support",
+ ":ViewLikeInterface",
+ "//llvm:Support",
+ ],
+)
+
+cc_library(
name = "OpenACCTransforms",
srcs = glob(
[
@@ -10330,6 +10351,40 @@ cc_library(
],
)
+gentbl_cc_library(
+ name = "OpenMPPassIncGen",
+ tbl_outs = {"include/mlir/Dialect/OpenMP/Transforms/Passes.h.inc": [
+ "-gen-pass-decls",
+ "-name=OpenMP",
+ ]},
+ tblgen = ":mlir-tblgen",
+ td_file = "include/mlir/Dialect/OpenMP/Transforms/Passes.td",
+ deps = [":PassBaseTdFiles"],
+)
+
+cc_library(
+ name = "OpenMPTransforms",
+ srcs = glob(
+ [
+ "lib/Dialect/OpenMP/Transforms/*.cpp",
+ ],
+ ),
+ hdrs = glob(["include/mlir/Dialect/OpenMP/Transforms/*.h"]),
+ includes = ["include"],
+ deps = [
+ ":Analysis",
+ ":FuncDialect",
+ ":IR",
+ ":LLVMDialect",
+ ":OpenMPDialect",
+ ":OpenMPPassIncGen",
+ ":Pass",
+ ":Support",
+ ":Transforms",
+ "//llvm:Support",
+ ],
+)
+
cc_library(
name = "OpenACCToSCF",
srcs = glob([